hip_filename
stringlengths 5
84
| hip_content
stringlengths 79
9.69M
| cuda_filename
stringlengths 4
83
| cuda_content
stringlengths 19
9.69M
|
---|---|---|---|
79b3c5ecefa71590248d5df7a5959042d4100f1c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/operators/elementwise/elementwise_add_op.h"
#include "paddle/fluid/operators/elementwise/elementwise_op_function.cu.h"
#include "paddle/fluid/platform/complex128.h"
#include "paddle/fluid/platform/complex64.h"
#include "paddle/fluid/platform/float16.h"
namespace ops = paddle::operators;
namespace plat = paddle::platform;
namespace paddle {
namespace operators {
template <typename T>
struct SameDimsElemwiseAdd<platform::CUDADeviceContext, T> {
void operator()(const framework::ExecutionContext& ctx,
const framework::Tensor* x, const framework::Tensor* y,
framework::Tensor* z) {
AddRangeFunctor<T> functor(x->data<T>(), y->data<T>(), z->data<T>());
auto& dev_ctx = ctx.template device_context<platform::CUDADeviceContext>();
platform::ForRange<platform::CUDADeviceContext> for_range(dev_ctx,
x->numel());
for_range(functor);
}
};
template <>
struct SameDimsElemwiseAdd<platform::CUDADeviceContext, platform::float16> {
void operator()(const framework::ExecutionContext& ctx,
const framework::Tensor* x, const framework::Tensor* y,
framework::Tensor* z) {
auto size = x->numel();
dim3 grid_size = dim3(((size + 1) / 2 + PADDLE_CUDA_THREAD_SIZE - 1) /
PADDLE_CUDA_THREAD_SIZE,
1);
dim3 block_size = dim3(PADDLE_CUDA_THREAD_SIZE, 1);
const half* x2 =
reinterpret_cast<const half*>(x->data<platform::float16>());
const half* y2 =
reinterpret_cast<const half*>(y->data<platform::float16>());
half* z2 = reinterpret_cast<half*>(z->data<platform::float16>());
hipLaunchKernelGGL(( SameDimsElemwiseAddCUDAKernel),
dim3(grid_size), dim3(block_size), 0,
ctx.template device_context<platform::CUDADeviceContext>().stream(),
x2, y2, z2, size);
}
};
template <typename T>
static __global__ void SimpleElemwiseAddGradCUDAKernel(const T* dout,
int64_t size, T* dx,
T* dy) {
int col = blockIdx.x * blockDim.x + threadIdx.x;
while (col < size) {
dx[col] = dout[col];
dy[col] = dout[col];
col += blockDim.x * gridDim.x;
}
}
template <typename DeviceContext, typename T>
typename std::enable_if<
std::is_same<DeviceContext, plat::CUDADeviceContext>::value>::type
elementwise_add_grad(const framework::ExecutionContext& ctx,
const framework::Tensor* x, const framework::Tensor* y,
const framework::Tensor* out,
const framework::Tensor* dout, framework::Tensor* dx,
framework::Tensor* dy) {
dim3 block_size = dim3(PADDLE_CUDA_THREAD_SIZE, 1);
auto size = x->numel();
dim3 grid_size =
dim3((size + PADDLE_CUDA_THREAD_SIZE - 1) / PADDLE_CUDA_THREAD_SIZE, 1);
hipLaunchKernelGGL(( SimpleElemwiseAddGradCUDAKernel<
T>), dim3(grid_size), dim3(block_size), 0,
ctx.template device_context<plat::CUDADeviceContext>().stream(),
dout->data<T>(), size, dx->mutable_data<T>(ctx.GetPlace()),
dy->mutable_data<T>(ctx.GetPlace()));
}
} // namespace operators
} // namespace paddle
REGISTER_OP_CUDA_KERNEL(
elementwise_add, ops::ElementwiseAddKernel<plat::CUDADeviceContext, float>,
ops::ElementwiseAddKernel<plat::CUDADeviceContext, double>,
ops::ElementwiseAddKernel<plat::CUDADeviceContext, int>,
ops::ElementwiseAddKernel<plat::CUDADeviceContext, int64_t>,
ops::ElementwiseAddKernel<plat::CUDADeviceContext, plat::float16>,
ops::ElementwiseAddKernel<plat::CUDADeviceContext, plat::complex64>,
ops::ElementwiseAddKernel<plat::CUDADeviceContext, plat::complex128>);
REGISTER_OP_CUDA_KERNEL(
elementwise_add_grad,
ops::ElementwiseAddGradKernel<plat::CUDADeviceContext, float>,
ops::ElementwiseAddGradKernel<plat::CUDADeviceContext, double>,
ops::ElementwiseAddGradKernel<plat::CUDADeviceContext, int>,
ops::ElementwiseAddGradKernel<plat::CUDADeviceContext, int64_t>,
ops::ElementwiseAddGradKernel<plat::CUDADeviceContext, plat::float16>,
ops::ElementwiseAddGradKernel<plat::CUDADeviceContext, plat::complex64>,
ops::ElementwiseAddGradKernel<plat::CUDADeviceContext, plat::complex128>);
REGISTER_OP_CUDA_KERNEL(
elementwise_add_grad_grad,
ops::ElementwiseAddDoubleGradKernel<plat::CUDADeviceContext, float>,
ops::ElementwiseAddDoubleGradKernel<plat::CUDADeviceContext, double>,
ops::ElementwiseAddDoubleGradKernel<plat::CUDADeviceContext, int>,
ops::ElementwiseAddDoubleGradKernel<plat::CUDADeviceContext, int64_t>,
ops::ElementwiseAddDoubleGradKernel<plat::CUDADeviceContext, plat::float16>,
ops::ElementwiseAddDoubleGradKernel<plat::CUDADeviceContext,
plat::complex64>,
ops::ElementwiseAddDoubleGradKernel<plat::CUDADeviceContext,
plat::complex128>);
REGISTER_OP_CUDA_KERNEL(
grad_add, ops::ElementwiseAddKernel<plat::CUDADeviceContext, float>,
ops::ElementwiseAddKernel<plat::CUDADeviceContext, double>,
ops::ElementwiseAddKernel<plat::CUDADeviceContext, int>,
ops::ElementwiseAddKernel<plat::CUDADeviceContext, int64_t>,
ops::ElementwiseAddKernel<plat::CUDADeviceContext, plat::float16>,
ops::ElementwiseAddKernel<plat::CUDADeviceContext, plat::complex64>,
ops::ElementwiseAddKernel<plat::CUDADeviceContext, plat::complex128>);
| 79b3c5ecefa71590248d5df7a5959042d4100f1c.cu | /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/operators/elementwise/elementwise_add_op.h"
#include "paddle/fluid/operators/elementwise/elementwise_op_function.cu.h"
#include "paddle/fluid/platform/complex128.h"
#include "paddle/fluid/platform/complex64.h"
#include "paddle/fluid/platform/float16.h"
namespace ops = paddle::operators;
namespace plat = paddle::platform;
namespace paddle {
namespace operators {
template <typename T>
struct SameDimsElemwiseAdd<platform::CUDADeviceContext, T> {
void operator()(const framework::ExecutionContext& ctx,
const framework::Tensor* x, const framework::Tensor* y,
framework::Tensor* z) {
AddRangeFunctor<T> functor(x->data<T>(), y->data<T>(), z->data<T>());
auto& dev_ctx = ctx.template device_context<platform::CUDADeviceContext>();
platform::ForRange<platform::CUDADeviceContext> for_range(dev_ctx,
x->numel());
for_range(functor);
}
};
template <>
struct SameDimsElemwiseAdd<platform::CUDADeviceContext, platform::float16> {
void operator()(const framework::ExecutionContext& ctx,
const framework::Tensor* x, const framework::Tensor* y,
framework::Tensor* z) {
auto size = x->numel();
dim3 grid_size = dim3(((size + 1) / 2 + PADDLE_CUDA_THREAD_SIZE - 1) /
PADDLE_CUDA_THREAD_SIZE,
1);
dim3 block_size = dim3(PADDLE_CUDA_THREAD_SIZE, 1);
const half* x2 =
reinterpret_cast<const half*>(x->data<platform::float16>());
const half* y2 =
reinterpret_cast<const half*>(y->data<platform::float16>());
half* z2 = reinterpret_cast<half*>(z->data<platform::float16>());
SameDimsElemwiseAddCUDAKernel<<<
grid_size, block_size, 0,
ctx.template device_context<platform::CUDADeviceContext>().stream()>>>(
x2, y2, z2, size);
}
};
template <typename T>
static __global__ void SimpleElemwiseAddGradCUDAKernel(const T* dout,
int64_t size, T* dx,
T* dy) {
int col = blockIdx.x * blockDim.x + threadIdx.x;
while (col < size) {
dx[col] = dout[col];
dy[col] = dout[col];
col += blockDim.x * gridDim.x;
}
}
template <typename DeviceContext, typename T>
typename std::enable_if<
std::is_same<DeviceContext, plat::CUDADeviceContext>::value>::type
elementwise_add_grad(const framework::ExecutionContext& ctx,
const framework::Tensor* x, const framework::Tensor* y,
const framework::Tensor* out,
const framework::Tensor* dout, framework::Tensor* dx,
framework::Tensor* dy) {
dim3 block_size = dim3(PADDLE_CUDA_THREAD_SIZE, 1);
auto size = x->numel();
dim3 grid_size =
dim3((size + PADDLE_CUDA_THREAD_SIZE - 1) / PADDLE_CUDA_THREAD_SIZE, 1);
SimpleElemwiseAddGradCUDAKernel<
T><<<grid_size, block_size, 0,
ctx.template device_context<plat::CUDADeviceContext>().stream()>>>(
dout->data<T>(), size, dx->mutable_data<T>(ctx.GetPlace()),
dy->mutable_data<T>(ctx.GetPlace()));
}
} // namespace operators
} // namespace paddle
REGISTER_OP_CUDA_KERNEL(
elementwise_add, ops::ElementwiseAddKernel<plat::CUDADeviceContext, float>,
ops::ElementwiseAddKernel<plat::CUDADeviceContext, double>,
ops::ElementwiseAddKernel<plat::CUDADeviceContext, int>,
ops::ElementwiseAddKernel<plat::CUDADeviceContext, int64_t>,
ops::ElementwiseAddKernel<plat::CUDADeviceContext, plat::float16>,
ops::ElementwiseAddKernel<plat::CUDADeviceContext, plat::complex64>,
ops::ElementwiseAddKernel<plat::CUDADeviceContext, plat::complex128>);
REGISTER_OP_CUDA_KERNEL(
elementwise_add_grad,
ops::ElementwiseAddGradKernel<plat::CUDADeviceContext, float>,
ops::ElementwiseAddGradKernel<plat::CUDADeviceContext, double>,
ops::ElementwiseAddGradKernel<plat::CUDADeviceContext, int>,
ops::ElementwiseAddGradKernel<plat::CUDADeviceContext, int64_t>,
ops::ElementwiseAddGradKernel<plat::CUDADeviceContext, plat::float16>,
ops::ElementwiseAddGradKernel<plat::CUDADeviceContext, plat::complex64>,
ops::ElementwiseAddGradKernel<plat::CUDADeviceContext, plat::complex128>);
REGISTER_OP_CUDA_KERNEL(
elementwise_add_grad_grad,
ops::ElementwiseAddDoubleGradKernel<plat::CUDADeviceContext, float>,
ops::ElementwiseAddDoubleGradKernel<plat::CUDADeviceContext, double>,
ops::ElementwiseAddDoubleGradKernel<plat::CUDADeviceContext, int>,
ops::ElementwiseAddDoubleGradKernel<plat::CUDADeviceContext, int64_t>,
ops::ElementwiseAddDoubleGradKernel<plat::CUDADeviceContext, plat::float16>,
ops::ElementwiseAddDoubleGradKernel<plat::CUDADeviceContext,
plat::complex64>,
ops::ElementwiseAddDoubleGradKernel<plat::CUDADeviceContext,
plat::complex128>);
REGISTER_OP_CUDA_KERNEL(
grad_add, ops::ElementwiseAddKernel<plat::CUDADeviceContext, float>,
ops::ElementwiseAddKernel<plat::CUDADeviceContext, double>,
ops::ElementwiseAddKernel<plat::CUDADeviceContext, int>,
ops::ElementwiseAddKernel<plat::CUDADeviceContext, int64_t>,
ops::ElementwiseAddKernel<plat::CUDADeviceContext, plat::float16>,
ops::ElementwiseAddKernel<plat::CUDADeviceContext, plat::complex64>,
ops::ElementwiseAddKernel<plat::CUDADeviceContext, plat::complex128>);
|
a93885eab66daa704e64e3c5f8e22707e3301f65.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
#include <iostream>
#include <conio.h>
#include <ctime>
#include "FindCloestGPU.h"
using namespace std;
int main()
{
srand(time(NULL));
// numberof points
const int count = 10000;
// array of points
int *indexOfCloest = new int[count];
float3d *points = new float3d[count];
int *d_indexOfCloest = new int[count];
float3d *d_points = new float3d[count];
// create a list of random points
for (int i = 0; i < count; i++)
{
points[i].x = (float)(rand() % 10000 - 5000);
points[i].y = (float)(rand() % 10000 - 5000);
points[i].z = (float)(rand() % 10000 - 5000);
}
// allocate GPU memory
hipMalloc(&d_points, sizeof(float3d)*count);
hipMalloc(&d_indexOfCloest, sizeof(int)*count);
//copy from CPU -> GPU
hipMemcpy(d_points, points, sizeof(float3d) * count, hipMemcpyHostToDevice);
//hipMemcpy(d_indexOfCloest, indexOfCloest, sizeof(int)*count, hipMemcpyHostToDevice);
// track the fast time so far
long fastest = 1000000;
// run the algorithm 10 times
for (int q = 0; q < 10; q++)
{
long startTime = clock();
// Run the algorithm
//FindCloestCPU(points, indexOfCloest, count);
hipLaunchKernelGGL(( FindCloestGPU2), dim3((count / 1024) + 1), dim3(1024) , 0, 0, d_points, d_indexOfCloest, count);
hipMemcpy(indexOfCloest, d_indexOfCloest, sizeof(int)*count, hipMemcpyDeviceToHost);
long finishTime = clock();
cout << "Run " << q << " tooks " << (finishTime - startTime) << " millis " << endl;
// if that run faster update the fastest time
if ((finishTime - startTime) < fastest)
{
fastest = finishTime - startTime;
}
}
cout << "Fastest time: " << fastest << endl;
cout << "Final results: " << endl;
for (int i = 0; i < 10; i++)
{
cout << i << "." << indexOfCloest[i] << endl;
}
delete[] indexOfCloest;
delete[] points;
hipFree(d_points);
hipFree(d_indexOfCloest);
hipDeviceReset();
//_getch();
return 0;
}
/*
Run 0 tooks 172 millis
Run 1 tooks 156 millis
Run 2 tooks 156 millis
Run 3 tooks 156 millis
Run 4 tooks 156 millis
Run 5 tooks 165 millis
Run 6 tooks 171 millis
Run 7 tooks 157 millis
Run 8 tooks 172 millis
Run 9 tooks 156 millis
Fastest time: 156
Final results:
0.6634
1.5760
2.4348
3.8022
4.3039
5.5750
6.3481
7.5505
8.2954
9.7554
*/
/*
set q 60-70
Run 60 tooks 94 millis
Run 61 tooks 109 millis
Run 62 tooks 94 millis
Run 63 tooks 78 millis
Run 64 tooks 78 millis
Run 65 tooks 78 millis
Run 66 tooks 78 millis
Run 67 tooks 78 millis
Run 68 tooks 78 millis
Run 69 tooks 78 millis
Fastest time: 78
Final results:
0.4742
1.8046
2.2075
3.2417
4.4452
5.1367
6.8928
7.7888
8.337
9.1859
*/
/*
set thread 1024
Run 0 tooks 63 millis
Run 1 tooks 62 millis
Run 2 tooks 63 millis
Run 3 tooks 46 millis
Run 4 tooks 47 millis
Run 5 tooks 62 millis
Run 6 tooks 62 millis
Run 7 tooks 47 millis
Run 8 tooks 63 millis
Run 9 tooks 47 millis
Fastest time: 46
Final results:
0.1388
1.6690
2.5918
3.1731
4.1459
5.6047
6.3757
7.4032
8.5540
9.9065
*/
/*
set compute_20 -> compute_20
Run 0 tooks 47 millis
Run 1 tooks 47 millis
Run 2 tooks 62 millis
Run 3 tooks 63 millis
Run 4 tooks 46 millis
Run 5 tooks 63 millis
Run 6 tooks 47 millis
Run 7 tooks 62 millis
Run 8 tooks 47 millis
Run 9 tooks 67 millis
Fastest time: 46
Final results:
0.4752
1.594
2.7661
3.9382
4.8911
5.3446
6.6047
7.3418
8.1654
9.4205
*/
/*
use FindCloestGPU2
which use 1024 float3 shared memory
Run 0 tooks 31 millis
Run 1 tooks 47 millis
Run 2 tooks 31 millis
Run 3 tooks 47 millis
Run 4 tooks 47 millis
Run 5 tooks 31 millis
Run 6 tooks 32 millis
Run 7 tooks 31 millis
Run 8 tooks 32 millis
Run 9 tooks 41 millis
Fastest time: 31
Final results:
0.2876
1.6022
2.5693
3.4147
4.6879
5.5743
6.9816
7.5201
8.6942
9.4043
*/
| a93885eab66daa704e64e3c5f8e22707e3301f65.cu | #include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <iostream>
#include <conio.h>
#include <ctime>
#include "FindCloestGPU.h"
using namespace std;
int main()
{
srand(time(NULL));
// numberof points
const int count = 10000;
// array of points
int *indexOfCloest = new int[count];
float3d *points = new float3d[count];
int *d_indexOfCloest = new int[count];
float3d *d_points = new float3d[count];
// create a list of random points
for (int i = 0; i < count; i++)
{
points[i].x = (float)(rand() % 10000 - 5000);
points[i].y = (float)(rand() % 10000 - 5000);
points[i].z = (float)(rand() % 10000 - 5000);
}
// allocate GPU memory
cudaMalloc(&d_points, sizeof(float3d)*count);
cudaMalloc(&d_indexOfCloest, sizeof(int)*count);
//copy from CPU -> GPU
cudaMemcpy(d_points, points, sizeof(float3d) * count, cudaMemcpyHostToDevice);
//cudaMemcpy(d_indexOfCloest, indexOfCloest, sizeof(int)*count, cudaMemcpyHostToDevice);
// track the fast time so far
long fastest = 1000000;
// run the algorithm 10 times
for (int q = 0; q < 10; q++)
{
long startTime = clock();
// Run the algorithm
//FindCloestCPU(points, indexOfCloest, count);
FindCloestGPU2<<<(count / 1024) + 1, 1024 >>>(d_points, d_indexOfCloest, count);
cudaMemcpy(indexOfCloest, d_indexOfCloest, sizeof(int)*count, cudaMemcpyDeviceToHost);
long finishTime = clock();
cout << "Run " << q << " tooks " << (finishTime - startTime) << " millis " << endl;
// if that run faster update the fastest time
if ((finishTime - startTime) < fastest)
{
fastest = finishTime - startTime;
}
}
cout << "Fastest time: " << fastest << endl;
cout << "Final results: " << endl;
for (int i = 0; i < 10; i++)
{
cout << i << "." << indexOfCloest[i] << endl;
}
delete[] indexOfCloest;
delete[] points;
cudaFree(d_points);
cudaFree(d_indexOfCloest);
cudaDeviceReset();
//_getch();
return 0;
}
/*
Run 0 tooks 172 millis
Run 1 tooks 156 millis
Run 2 tooks 156 millis
Run 3 tooks 156 millis
Run 4 tooks 156 millis
Run 5 tooks 165 millis
Run 6 tooks 171 millis
Run 7 tooks 157 millis
Run 8 tooks 172 millis
Run 9 tooks 156 millis
Fastest time: 156
Final results:
0.6634
1.5760
2.4348
3.8022
4.3039
5.5750
6.3481
7.5505
8.2954
9.7554
*/
/*
set q 60-70
Run 60 tooks 94 millis
Run 61 tooks 109 millis
Run 62 tooks 94 millis
Run 63 tooks 78 millis
Run 64 tooks 78 millis
Run 65 tooks 78 millis
Run 66 tooks 78 millis
Run 67 tooks 78 millis
Run 68 tooks 78 millis
Run 69 tooks 78 millis
Fastest time: 78
Final results:
0.4742
1.8046
2.2075
3.2417
4.4452
5.1367
6.8928
7.7888
8.337
9.1859
*/
/*
set thread 1024
Run 0 tooks 63 millis
Run 1 tooks 62 millis
Run 2 tooks 63 millis
Run 3 tooks 46 millis
Run 4 tooks 47 millis
Run 5 tooks 62 millis
Run 6 tooks 62 millis
Run 7 tooks 47 millis
Run 8 tooks 63 millis
Run 9 tooks 47 millis
Fastest time: 46
Final results:
0.1388
1.6690
2.5918
3.1731
4.1459
5.6047
6.3757
7.4032
8.5540
9.9065
*/
/*
set compute_20 -> compute_20
Run 0 tooks 47 millis
Run 1 tooks 47 millis
Run 2 tooks 62 millis
Run 3 tooks 63 millis
Run 4 tooks 46 millis
Run 5 tooks 63 millis
Run 6 tooks 47 millis
Run 7 tooks 62 millis
Run 8 tooks 47 millis
Run 9 tooks 67 millis
Fastest time: 46
Final results:
0.4752
1.594
2.7661
3.9382
4.8911
5.3446
6.6047
7.3418
8.1654
9.4205
*/
/*
use FindCloestGPU2
which use 1024 float3 shared memory
Run 0 tooks 31 millis
Run 1 tooks 47 millis
Run 2 tooks 31 millis
Run 3 tooks 47 millis
Run 4 tooks 47 millis
Run 5 tooks 31 millis
Run 6 tooks 32 millis
Run 7 tooks 31 millis
Run 8 tooks 32 millis
Run 9 tooks 41 millis
Fastest time: 31
Final results:
0.2876
1.6022
2.5693
3.4147
4.6879
5.5743
6.9816
7.5201
8.6942
9.4043
*/
|
07f66e990058966514e8757061b44cae4d7796b7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<iostream>
#include<ctime>
using namespace std;
template<unsigned int DIMX, unsigned int DIMY, typename T>
__global__ void transpose(T *in_data, T *out_data, unsigned int nx, unsigned int ny) {
//padding = 2
__shared__ T tile[DIMY][DIMX*2 + 2];
unsigned int idx = threadIdx.x + blockDim.x * blockIdx.x * 2;
unsigned int idy = threadIdx.y + blockDim.y * blockIdx.y;
if(idx + blockDim.x < nx && idy < ny) {
tile[threadIdx.y][threadIdx.x] = in_data[idy * nx + idx];
tile[threadIdx.y][threadIdx.x + blockDim.x] = in_data[idy * nx + idx + blockDim.x];
__syncthreads();
unsigned int posB = threadIdx.y * blockDim.x + threadIdx.x;
unsigned int column = posB / blockDim.y;
unsigned int row = posB % blockDim.y;
idx = column + blockDim.x * blockIdx.x * 2;
idy = row + blockDim.y * blockIdx.y;
out_data[idx * ny + idy] = tile[row][column];
out_data[(idx + blockDim.x) * ny + idy] = tile[row][column + blockDim.x];
}
}
template<typename T>
void transposeHost(T *in, T* out, unsigned int nx, unsigned int ny) {
for(int i = 0;i < nx;++i) {
for(int j = 0;j < ny;++j) {
out[i * ny + j] = in[j * nx + i];
}
}
}
int main(int argc, char *argv[]) {
unsigned int nx = 1 << 9;
unsigned int ny = 1 << 9;
constexpr unsigned int blockx = 32;
constexpr unsigned int blocky = 32;
clock_t start, end;
int in[nx * ny], out[nx * ny], *in_dev, *out_dev;
auto init = [](auto*in ,unsigned int size)->void {
for(int i = 0;i < size;++i) {
in[i] = random()%1000;
}
};
init(in, nx * ny);
hipMalloc((void**)&in_dev, sizeof(in));
hipMalloc((void**)&out_dev, sizeof(in));
hipMemcpy(in_dev, in ,sizeof(in), hipMemcpyHostToDevice);
hipDeviceSynchronize();
transposeHost(in, out, nx, ny);
dim3 block(blockx, blocky);
dim3 grid((nx + blockx - 1) / blockx / 2, (ny + blocky - 1) / blocky);
start = clock();
hipLaunchKernelGGL(( transpose<blockx, blocky>), dim3(grid), dim3(block), 0, 0, in_dev, out_dev, nx,ny);
hipDeviceSynchronize();
end = clock();
cout <<" gpu time: " << end - start<<endl;
hipMemcpy(in, out_dev,sizeof(in), hipMemcpyDeviceToHost);
hipDeviceSynchronize();
hipFree(in_dev);
hipFree(out_dev);
int n = 0;
for (int i = 0;i < nx * ny;++i) {
if(out[i] != in[i]) {
n++;
}
}
cout << n << endl;
return 0;
}
| 07f66e990058966514e8757061b44cae4d7796b7.cu | #include<iostream>
#include<ctime>
using namespace std;
template<unsigned int DIMX, unsigned int DIMY, typename T>
__global__ void transpose(T *in_data, T *out_data, unsigned int nx, unsigned int ny) {
//padding = 2
__shared__ T tile[DIMY][DIMX*2 + 2];
unsigned int idx = threadIdx.x + blockDim.x * blockIdx.x * 2;
unsigned int idy = threadIdx.y + blockDim.y * blockIdx.y;
if(idx + blockDim.x < nx && idy < ny) {
tile[threadIdx.y][threadIdx.x] = in_data[idy * nx + idx];
tile[threadIdx.y][threadIdx.x + blockDim.x] = in_data[idy * nx + idx + blockDim.x];
__syncthreads();
unsigned int posB = threadIdx.y * blockDim.x + threadIdx.x;
unsigned int column = posB / blockDim.y;
unsigned int row = posB % blockDim.y;
idx = column + blockDim.x * blockIdx.x * 2;
idy = row + blockDim.y * blockIdx.y;
out_data[idx * ny + idy] = tile[row][column];
out_data[(idx + blockDim.x) * ny + idy] = tile[row][column + blockDim.x];
}
}
template<typename T>
void transposeHost(T *in, T* out, unsigned int nx, unsigned int ny) {
for(int i = 0;i < nx;++i) {
for(int j = 0;j < ny;++j) {
out[i * ny + j] = in[j * nx + i];
}
}
}
int main(int argc, char *argv[]) {
unsigned int nx = 1 << 9;
unsigned int ny = 1 << 9;
constexpr unsigned int blockx = 32;
constexpr unsigned int blocky = 32;
clock_t start, end;
int in[nx * ny], out[nx * ny], *in_dev, *out_dev;
auto init = [](auto*in ,unsigned int size)->void {
for(int i = 0;i < size;++i) {
in[i] = random()%1000;
}
};
init(in, nx * ny);
cudaMalloc((void**)&in_dev, sizeof(in));
cudaMalloc((void**)&out_dev, sizeof(in));
cudaMemcpy(in_dev, in ,sizeof(in), cudaMemcpyHostToDevice);
cudaDeviceSynchronize();
transposeHost(in, out, nx, ny);
dim3 block(blockx, blocky);
dim3 grid((nx + blockx - 1) / blockx / 2, (ny + blocky - 1) / blocky);
start = clock();
transpose<blockx, blocky><<<grid, block>>>(in_dev, out_dev, nx,ny);
cudaDeviceSynchronize();
end = clock();
cout <<" gpu time: " << end - start<<endl;
cudaMemcpy(in, out_dev,sizeof(in), cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
cudaFree(in_dev);
cudaFree(out_dev);
int n = 0;
for (int i = 0;i < nx * ny;++i) {
if(out[i] != in[i]) {
n++;
}
}
cout << n << endl;
return 0;
}
|
0fbddafba8c04bd9c56e8a61a96be9c09f5096be.hip | // !!! This is a file automatically generated by hipify!!!
#include "cu_matrix.cuh"
std::set<FeatType *> CuMatrix::MemoryPool;
CuMatrix::CuMatrix(Matrix M, const hipblasHandle_t &handle_)
: Matrix(M.getRows(), M.getCols(), M.getData()) {
cudaStat = hipError_t();
handle = handle_;
nnz = 0;
csrVal = NULL;
csrColInd = NULL;
isSparse = 0;
deviceMalloc();
if (getData() != NULL) deviceSetMatrix();
}
CuMatrix::CuMatrix(){
cudaStat = hipError_t();
nnz = 0;
csrVal = NULL;
csrColInd = NULL;
csrRowInd = NULL;
isSparse = 0;
setData(NULL);
};
Matrix CuMatrix::getMatrix() {
updateMatrixFromGPU();
return Matrix(getRows(), getCols(), getData());
}
void CuMatrix::freeGPU() {
for (auto ptr : MemoryPool) hipFree(ptr);
}
CuMatrix CuMatrix::extractRow(unsigned row) {
FeatType *data = getData() ? (getData() + row * getCols()) : NULL;
CuMatrix rowVec;
rowVec.handle = handle;
rowVec.setData(data);
rowVec.setRows(1);
rowVec.setCols(getCols());
rowVec.devPtr = devPtr + row * getCols();
return rowVec;
}
void CuMatrix::deviceMalloc() {
unsigned rows = this->getRows();
unsigned cols = this->getCols();
cudaStat = hipMalloc((void **)&devPtr, rows * cols * sizeof(FeatType));
if (cudaStat != hipSuccess) {
printf("device memory allocation failed %u\n", cudaStat);
exit(EXIT_FAILURE);
}
MemoryPool.insert(devPtr);
}
void CuMatrix::deviceSetMatrix() {
unsigned rows = this->getRows();
unsigned cols = this->getCols();
FeatType *data = this->getData();
stat = hipblasSetMatrix(rows, cols, sizeof(float), data, rows, devPtr, rows);
if (stat != HIPBLAS_STATUS_SUCCESS) {
switch (stat) {
case HIPBLAS_STATUS_NOT_INITIALIZED:
printf("HIPBLAS_STATUS_NOT_INITIALIZED\n");
break;
case HIPBLAS_STATUS_INVALID_VALUE:
printf("HIPBLAS_STATUS_INVALID_VALUE\n");
break;
case HIPBLAS_STATUS_MAPPING_ERROR:
printf("HIPBLAS_STATUS_MAPPING_ERROR\n");
break;
}
hipFree(devPtr);
hipblasDestroy(handle);
exit(EXIT_FAILURE);
}
}
void CuMatrix::updateMatrixFromGPU() {
unsigned rows = this->getRows();
unsigned cols = this->getCols();
if (getData() == NULL) setData(new FeatType[getNumElemts()]);
FeatType *data = this->getData();
stat = hipblasGetMatrix(rows, cols, sizeof(float), devPtr, rows, data, rows);
if (stat != HIPBLAS_STATUS_SUCCESS) {
printf("data upload failed\n");
switch (stat) {
case HIPBLAS_STATUS_NOT_INITIALIZED:
printf("HIPBLAS_STATUS_NOT_INITIALIZED\n");
break;
case HIPBLAS_STATUS_INVALID_VALUE:
printf("HIPBLAS_STATUS_INVALID_VALUE\n");
break;
case HIPBLAS_STATUS_MAPPING_ERROR:
printf("HIPBLAS_STATUS_MAPPING_ERROR\n");
break;
}
hipFree(devPtr);
hipblasDestroy(handle);
exit(EXIT_FAILURE);
}
}
CuMatrix::~CuMatrix() {
}
void CuMatrix::scale(const float &alpha) {
hipblasSscal(handle, getNumElemts(), &alpha, devPtr, 1);
}
CuMatrix CuMatrix::dot(CuMatrix &B, bool A_trans, bool B_trans, float alpha,
float beta) {
if (handle != B.handle) {
std::cout << "Handle don't match\n";
exit(EXIT_FAILURE);
}
hipblasOperation_t ATrans = A_trans ? HIPBLAS_OP_T : HIPBLAS_OP_N;
hipblasOperation_t BTrans = B_trans ? HIPBLAS_OP_T : HIPBLAS_OP_N;
// 1. cublas is using col-major
// 2. when cpy into/out device memory, it will do Transpose
// 3. C=AB and C^T= (B^T*A^T)
// This means just swap the order of multiplicaiton
// Guide: https://peterwittek.com/cublas-matrix-c-style.html
Matrix AT = Matrix(getCols(), getRows(), getData());
Matrix BT = Matrix(B.getCols(), B.getRows(), B.getData());
unsigned CRow = A_trans ? AT.getRows() : getRows();
unsigned CCol = B_trans ? BT.getCols() : B.getCols();
Matrix mat_C(CRow, CCol, (char *)NULL); // real C
unsigned k = A_trans ? getRows() : getCols();
CuMatrix C(mat_C, handle);
stat = hipblasSgemm(handle, BTrans, ATrans, C.getCols(), C.getRows(), k,
&alpha, B.devPtr, B.getCols(), devPtr, getCols(), &beta,
C.devPtr, C.getCols());
if (stat != HIPBLAS_STATUS_SUCCESS) {
printf("SGEMM ERROR\n");
hipFree(devPtr);
hipblasDestroy(handle);
exit(EXIT_FAILURE);
}
return C;
}
CuMatrix CuMatrix::transpose() {
// CuMatrix res(Matrix(getCols(), getRows(),
// (char *)malloc(getNumElemts() * sizeof(FeatType))),
// handle);
CuMatrix res(Matrix(getCols(), getRows(), (char *)NULL), handle);
float alpha = 1.0;
float beta = 0.;
stat = hipblasSgeam(handle, HIPBLAS_OP_T, HIPBLAS_OP_N, getRows(), getCols(),
&alpha, devPtr, getCols(), &beta, devPtr, getRows(),
res.devPtr, getRows());
if (stat != HIPBLAS_STATUS_SUCCESS) {
hipblasDestroy(handle);
exit(EXIT_FAILURE);
}
return res;
}
| 0fbddafba8c04bd9c56e8a61a96be9c09f5096be.cu | #include "cu_matrix.cuh"
std::set<FeatType *> CuMatrix::MemoryPool;
CuMatrix::CuMatrix(Matrix M, const cublasHandle_t &handle_)
: Matrix(M.getRows(), M.getCols(), M.getData()) {
cudaStat = cudaError_t();
handle = handle_;
nnz = 0;
csrVal = NULL;
csrColInd = NULL;
isSparse = 0;
deviceMalloc();
if (getData() != NULL) deviceSetMatrix();
}
CuMatrix::CuMatrix(){
cudaStat = cudaError_t();
nnz = 0;
csrVal = NULL;
csrColInd = NULL;
csrRowInd = NULL;
isSparse = 0;
setData(NULL);
};
Matrix CuMatrix::getMatrix() {
updateMatrixFromGPU();
return Matrix(getRows(), getCols(), getData());
}
void CuMatrix::freeGPU() {
for (auto ptr : MemoryPool) cudaFree(ptr);
}
CuMatrix CuMatrix::extractRow(unsigned row) {
FeatType *data = getData() ? (getData() + row * getCols()) : NULL;
CuMatrix rowVec;
rowVec.handle = handle;
rowVec.setData(data);
rowVec.setRows(1);
rowVec.setCols(getCols());
rowVec.devPtr = devPtr + row * getCols();
return rowVec;
}
void CuMatrix::deviceMalloc() {
unsigned rows = this->getRows();
unsigned cols = this->getCols();
cudaStat = cudaMalloc((void **)&devPtr, rows * cols * sizeof(FeatType));
if (cudaStat != cudaSuccess) {
printf("device memory allocation failed %u\n", cudaStat);
exit(EXIT_FAILURE);
}
MemoryPool.insert(devPtr);
}
void CuMatrix::deviceSetMatrix() {
unsigned rows = this->getRows();
unsigned cols = this->getCols();
FeatType *data = this->getData();
stat = cublasSetMatrix(rows, cols, sizeof(float), data, rows, devPtr, rows);
if (stat != CUBLAS_STATUS_SUCCESS) {
switch (stat) {
case CUBLAS_STATUS_NOT_INITIALIZED:
printf("CUBLAS_STATUS_NOT_INITIALIZED\n");
break;
case CUBLAS_STATUS_INVALID_VALUE:
printf("CUBLAS_STATUS_INVALID_VALUE\n");
break;
case CUBLAS_STATUS_MAPPING_ERROR:
printf("CUBLAS_STATUS_MAPPING_ERROR\n");
break;
}
cudaFree(devPtr);
cublasDestroy(handle);
exit(EXIT_FAILURE);
}
}
void CuMatrix::updateMatrixFromGPU() {
unsigned rows = this->getRows();
unsigned cols = this->getCols();
if (getData() == NULL) setData(new FeatType[getNumElemts()]);
FeatType *data = this->getData();
stat = cublasGetMatrix(rows, cols, sizeof(float), devPtr, rows, data, rows);
if (stat != CUBLAS_STATUS_SUCCESS) {
printf("data upload failed\n");
switch (stat) {
case CUBLAS_STATUS_NOT_INITIALIZED:
printf("CUBLAS_STATUS_NOT_INITIALIZED\n");
break;
case CUBLAS_STATUS_INVALID_VALUE:
printf("CUBLAS_STATUS_INVALID_VALUE\n");
break;
case CUBLAS_STATUS_MAPPING_ERROR:
printf("CUBLAS_STATUS_MAPPING_ERROR\n");
break;
}
cudaFree(devPtr);
cublasDestroy(handle);
exit(EXIT_FAILURE);
}
}
CuMatrix::~CuMatrix() {
}
void CuMatrix::scale(const float &alpha) {
cublasSscal(handle, getNumElemts(), &alpha, devPtr, 1);
}
CuMatrix CuMatrix::dot(CuMatrix &B, bool A_trans, bool B_trans, float alpha,
float beta) {
if (handle != B.handle) {
std::cout << "Handle don't match\n";
exit(EXIT_FAILURE);
}
cublasOperation_t ATrans = A_trans ? CUBLAS_OP_T : CUBLAS_OP_N;
cublasOperation_t BTrans = B_trans ? CUBLAS_OP_T : CUBLAS_OP_N;
// 1. cublas is using col-major
// 2. when cpy into/out device memory, it will do Transpose
// 3. C=AB and C^T= (B^T*A^T)
// This means just swap the order of multiplicaiton
// Guide: https://peterwittek.com/cublas-matrix-c-style.html
Matrix AT = Matrix(getCols(), getRows(), getData());
Matrix BT = Matrix(B.getCols(), B.getRows(), B.getData());
unsigned CRow = A_trans ? AT.getRows() : getRows();
unsigned CCol = B_trans ? BT.getCols() : B.getCols();
Matrix mat_C(CRow, CCol, (char *)NULL); // real C
unsigned k = A_trans ? getRows() : getCols();
CuMatrix C(mat_C, handle);
stat = cublasSgemm(handle, BTrans, ATrans, C.getCols(), C.getRows(), k,
&alpha, B.devPtr, B.getCols(), devPtr, getCols(), &beta,
C.devPtr, C.getCols());
if (stat != CUBLAS_STATUS_SUCCESS) {
printf("SGEMM ERROR\n");
cudaFree(devPtr);
cublasDestroy(handle);
exit(EXIT_FAILURE);
}
return C;
}
CuMatrix CuMatrix::transpose() {
// CuMatrix res(Matrix(getCols(), getRows(),
// (char *)malloc(getNumElemts() * sizeof(FeatType))),
// handle);
CuMatrix res(Matrix(getCols(), getRows(), (char *)NULL), handle);
float alpha = 1.0;
float beta = 0.;
stat = cublasSgeam(handle, CUBLAS_OP_T, CUBLAS_OP_N, getRows(), getCols(),
&alpha, devPtr, getCols(), &beta, devPtr, getRows(),
res.devPtr, getRows());
if (stat != CUBLAS_STATUS_SUCCESS) {
cublasDestroy(handle);
exit(EXIT_FAILURE);
}
return res;
}
|
317387d487170d3b5bf4d0cf7fbb98a89a7e541a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include "caffe/common.hpp"
#include "caffe/util/im2col.hpp"
namespace caffe {
//gpuim2col
template <typename Dtype>
__global__ void im2col_gpu_kernel(
const int n, //
const Dtype* data_im,//im_data
const int height, const int width,//
const int kernel_h, const int kernel_w,//
const int pad_h, const int pad_w,//
const int stride_h, const int stride_w,//
const int dilation_h, const int dilation_w,//
const int height_col, const int width_col,//col
Dtype* data_col //col
) {
CUDA_KERNEL_LOOP(index, n) //cudanindexblock
{
//blockh
const int h_index = index / width_col;
//block col
const int h_col = h_index % height_col;
//colw
const int w_col = index % width_col;
//imblock
const int c_im = h_index / height_col;
//
const int c_col = c_im * kernel_h * kernel_w;
//
const int h_offset = h_col * stride_h - pad_h;
//
const int w_offset = w_col * stride_w - pad_w;
//
Dtype* data_col_ptr = data_col;
//block
data_col_ptr += (c_col * height_col + h_col) * width_col + w_col;
//im
const Dtype* data_im_ptr = data_im;
//blockim
data_im_ptr += (c_im * height + h_offset) * width + w_offset;
//
for (int i = 0; i < kernel_h; ++i) {
for (int j = 0; j < kernel_w; ++j) {
//h
int h_im = h_offset + i * dilation_h;
//w
int w_im = w_offset + j * dilation_w;
*data_col_ptr =
(h_im >= 0 && w_im >= 0 && h_im < height && w_im < width) ?
data_im_ptr[i * dilation_h * width + j * dilation_w] : 0;
//
data_col_ptr += height_col * width_col;
}
}
}
}
template <typename Dtype>
//im2col gpu
void im2col_gpu(
const Dtype* data_im,//
const int channels,//
const int height, const int width,//
const int kernel_h, const int kernel_w,//
const int pad_h, const int pad_w,//
const int stride_h, const int stride_w,//
const int dilation_h, const int dilation_w,//
Dtype* data_col) {
// We are going to launch channels * height_col * width_col kernels, each
// kernel responsible for copying a single-channel grid.
//channel * height_col * width_col
//col
int height_col = (height + 2 * pad_h -
(dilation_h * (kernel_h - 1) + 1)) / stride_h + 1;
//col
int width_col = (width + 2 * pad_w -
(dilation_w * (kernel_w - 1) + 1)) / stride_w + 1;
//kernel
int num_kernels = channels * height_col * width_col;
// NOLINT_NEXT_LINE(whitespace/operators)
//im2col_gpu_kernel
hipLaunchKernelGGL(( im2col_gpu_kernel<Dtype>), dim3(CAFFE_GET_BLOCKS(num_kernels)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
num_kernels, data_im, height, width, kernel_h, kernel_w, pad_h,
pad_w, stride_h, stride_w, dilation_h, dilation_w, height_col,
width_col, data_col);
CUDA_POST_KERNEL_CHECK;
}
//
// Explicit instantiation
template void im2col_gpu<float>(const float* data_im, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w, float* data_col);
template void im2col_gpu<double>(const double* data_im, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w, double* data_col);
//im2colcol2im
template <typename Dtype, int num_axes>
__global__ void im2col_nd_gpu_kernel(
const int n,//kernel
const Dtype* data_im,//im
const int* im_shape,//imshape
const int* col_shape,//col shape
const int* kernel_shape,//kernel
const int* pad,//
const int* stride,//
const int* dilation,//
Dtype* data_col//
) {
//
int d_temp[num_axes]; // NOLINT(runtime/arrays)
//
int d_iter[num_axes]; // NOLINT(runtime/arrays)
//hostdevice
//
__shared__ int shared_dilation[num_axes];
//kernel shape
__shared__ int shared_kernel_shape[num_axes];
//shape pad
__shared__ int shared_pad[num_axes];
//
__shared__ int shared_stride[num_axes];
//col
__shared__ int shared_col_shape[num_axes + 1];
//im
__shared__ int shared_im_shape[num_axes + 1];
//
if (threadIdx.x < num_axes) {
shared_dilation[threadIdx.x] = dilation[threadIdx.x];
shared_kernel_shape[threadIdx.x] = kernel_shape[threadIdx.x];
shared_pad[threadIdx.x] = pad[threadIdx.x];
shared_stride[threadIdx.x] = stride[threadIdx.x];
}
//
if (threadIdx.x < num_axes + 1) {
shared_col_shape[threadIdx.x] = col_shape[threadIdx.x];
shared_im_shape[threadIdx.x] = im_shape[threadIdx.x];
}
//
__syncthreads();
//
int i;
//kernel
CUDA_KERNEL_LOOP(index, n) {
// Initialize channel_in, computed in the loop below, with intermediate
// computations used to compute the spatial indices.
//channel_in
int channel_in = index;
//channel
int channel_out = 1;
//
for (i = num_axes - 1; i >= 0; --i) {
d_temp[i] = channel_in % shared_col_shape[i + 1];
channel_in /= shared_col_shape[i + 1];
channel_out *= shared_kernel_shape[i];
}
//=*
channel_out *= channel_in;
//col1
int data_col_inc = 1;
for (i = 0; i < num_axes; ++i) {
//
channel_out *= shared_col_shape[i + 1];
//
channel_out += d_temp[i];
//temptemp
d_temp[i] = d_temp[i] * shared_stride[i] - shared_pad[i];
//
channel_in *= shared_im_shape[i + 1];
//
channel_in += d_temp[i];
//
data_col_inc *= shared_col_shape[i + 1];
//
d_iter[i] = 0;
}
//blockdata
Dtype* data_col_ptr = data_col + channel_out;
//im
const Dtype* data_im_ptr = data_im + channel_in;
//
bool incremented;
//
do {
//
bool in_range = true;
//im
for (i = 0; i < num_axes; ++i) {
const int d_iter_im = d_iter[i] * shared_dilation[i] + d_temp[i];
//
in_range &= d_iter_im >= 0 && d_iter_im < shared_im_shape[i + 1];
if (!in_range) { break; }
}
//
if (in_range) {
//
int data_im_offset = d_iter[0] * shared_dilation[0];
//im
for (i = 1; i < num_axes; ++i) {
data_im_offset *= shared_im_shape[i + 1];
data_im_offset += d_iter[i] * shared_dilation[i];
}
//col
*data_col_ptr = data_im_ptr[data_im_offset];
} else {
//0
*data_col_ptr = 0;
}
//data_col_inc
data_col_ptr += data_col_inc;
//
incremented = false;
//
for (i = num_axes - 1; i >= 0; --i) {
//
const int d_max = shared_kernel_shape[i];
if (d_iter[i] == d_max - 1) {
//0
d_iter[i] = 0;
} else { // d_iter[i] < d_max - 1
//
++d_iter[i];
//
incremented = true;
break;
}
} // for (int i = num_axes - 1; i >= 0; --i)
} while (incremented); // do
} // CUDA_KERNEL_LOOP(index, n)
}
//im2colkernel
template <typename Dtype>
void im2col_nd_gpu(
const Dtype* data_im,
const int num_spatial_axes,
const int num_kernels,
const int* im_shape,
const int* col_shape,
const int* kernel_shape,
const int* pad, const int* stride,
const int* dilation,
Dtype* data_col
) {
// num_axes should be smaller than block size
DCHECK_LT(num_spatial_axes, CAFFE_CUDA_NUM_THREADS);
switch (num_spatial_axes) {
case 1:
im2col_nd_gpu_kernel<Dtype, 1> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(num_kernels)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
num_kernels, data_im, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_col);
break;
case 2:
im2col_nd_gpu_kernel<Dtype, 2> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(num_kernels)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
num_kernels, data_im, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_col);
break;
case 3:
im2col_nd_gpu_kernel<Dtype, 3> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(num_kernels)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
num_kernels, data_im, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_col);
break;
case 4:
im2col_nd_gpu_kernel<Dtype, 4> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(num_kernels)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
num_kernels, data_im, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_col);
break;
case 5:
im2col_nd_gpu_kernel<Dtype, 5> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(num_kernels)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
num_kernels, data_im, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_col);
break;
case 6:
im2col_nd_gpu_kernel<Dtype, 6> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(num_kernels)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
num_kernels, data_im, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_col);
break;
case 7:
im2col_nd_gpu_kernel<Dtype, 7> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(num_kernels)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
num_kernels, data_im, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_col);
break;
case 8:
im2col_nd_gpu_kernel<Dtype, 8> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(num_kernels)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
num_kernels, data_im, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_col);
break;
case 9:
im2col_nd_gpu_kernel<Dtype, 9> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(num_kernels)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
num_kernels, data_im, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_col);
break;
case 10:
im2col_nd_gpu_kernel<Dtype, 10> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(num_kernels)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
num_kernels, data_im, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_col);
break;
default:
LOG(FATAL) << "im2col_nd_gpu does not support computation with "
<< num_spatial_axes << " spatial axes";
}
CUDA_POST_KERNEL_CHECK;
}
// Explicit instantiation
template void im2col_nd_gpu<float>(const float* data_im,
const int num_spatial_axes, const int col_size,
const int* im_shape, const int* col_shape,
const int* kernel_shape, const int* pad, const int* stride,
const int* dilation, float* data_col);
template void im2col_nd_gpu<double>(const double* data_im,
const int num_spatial_axes, const int col_size,
const int* im_shape, const int* col_shape,
const int* kernel_shape, const int* pad, const int* stride,
const int* dilation, double* data_col);
template <typename Dtype>
__global__ void col2im_gpu_kernel(const int n, const Dtype* data_col,
const int height, const int width, const int channels,
const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int height_col, const int width_col,
Dtype* data_im) {
CUDA_KERNEL_LOOP(index, n) {
//0
Dtype val = 0;
const int w_im = index % width + pad_w;
//bolckim
const int h_im = (index / width) % height + pad_h;
const int c_im = index / (width * height);
int kernel_extent_w = (kernel_w - 1) * dilation_w + 1;
int kernel_extent_h = (kernel_h - 1) * dilation_h + 1;
// compute the start and end of the output
//
const int w_col_start =
(w_im < kernel_extent_w) ? 0 : (w_im - kernel_extent_w) / stride_w + 1;
const int w_col_end = min(w_im / stride_w + 1, width_col);
const int h_col_start =
(h_im < kernel_extent_h) ? 0 : (h_im - kernel_extent_h) / stride_h + 1;
const int h_col_end = min(h_im / stride_h + 1, height_col);
// TODO: use LCM of stride and dilation to avoid unnecessary loops
//colim
for (int h_col = h_col_start; h_col < h_col_end; h_col += 1) {
for (int w_col = w_col_start; w_col < w_col_end; w_col += 1) {
//
int h_k = (h_im - h_col * stride_h);
//
int w_k = (w_im - w_col * stride_w);
//
if (h_k % dilation_h == 0 && w_k % dilation_w == 0) {
//h
h_k /= dilation_h;
//w
w_k /= dilation_w;
//col
int data_col_index = (((c_im * kernel_h + h_k) * kernel_w + w_k) *
height_col + h_col) * width_col + w_col;
//index
val += data_col[data_col_index];
}
}
}
data_im[index] = val;
}
}
template <typename Dtype>
void col2im_gpu(const Dtype* data_col, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h,
const int stride_w, const int dilation_h, const int dilation_w,
Dtype* data_im) {
int height_col = (height + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) /
stride_h + 1;
int width_col = (width + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) /
stride_w + 1;
//
int num_kernels = channels * height * width;
// To avoid involving atomic operations, we will launch one kernel per
// bottom dimension, and then in the kernel add up the top dimensions.
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( col2im_gpu_kernel<Dtype>), dim3(CAFFE_GET_BLOCKS(num_kernels)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
num_kernels, data_col, height, width, channels, kernel_h, kernel_w,
pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w,
height_col, width_col, data_im);
CUDA_POST_KERNEL_CHECK;
}
// Explicit instantiation
template void col2im_gpu<float>(const float* data_col, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h,
const int stride_w, const int dilation_h, const int dilation_w,
float* data_im);
template void col2im_gpu<double>(const double* data_col, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h,
const int stride_w, const int dilation_h, const int dilation_w,
double* data_im);
//col2imim2col
template <typename Dtype, int num_axes>
__global__ void col2im_nd_gpu_kernel(const int n, const Dtype* data_col,
const int* im_shape, const int* col_shape,
const int* kernel_shape, const int* pad, const int* stride,
const int* dilation, Dtype* data_im) {
int d_im[num_axes]; // NOLINT(runtime/arrays)
int d_col_iter[num_axes]; // NOLINT(runtime/arrays)
int d_col_start[num_axes]; // NOLINT(runtime/arrays)
int d_col_end[num_axes]; // NOLINT(runtime/arrays)
__shared__ int shared_dilation[num_axes];
__shared__ int shared_kernel_shape[num_axes];
__shared__ int shared_pad[num_axes];
__shared__ int shared_stride[num_axes];
__shared__ int shared_col_shape[num_axes + 1];
__shared__ int shared_im_shape[num_axes + 1];
if (threadIdx.x < num_axes) {
shared_dilation[threadIdx.x] = dilation[threadIdx.x];
shared_kernel_shape[threadIdx.x] = kernel_shape[threadIdx.x];
shared_pad[threadIdx.x] = pad[threadIdx.x];
shared_stride[threadIdx.x] = stride[threadIdx.x];
}
if (threadIdx.x < num_axes + 1) {
shared_col_shape[threadIdx.x] = col_shape[threadIdx.x];
shared_im_shape[threadIdx.x] = im_shape[threadIdx.x];
}
__syncthreads();
CUDA_KERNEL_LOOP(index, n) {
// Initialize channel_in, computed in the loop below, with intermediate
// computations used to compute the spatial indices.
int c_im = index;
// Calculate d_im (image dimensions).
for (int i = num_axes - 1; i >= 0; --i) {
d_im[i] = c_im % shared_im_shape[i + 1] + shared_pad[i];
c_im /= shared_im_shape[i + 1];
}
// Calculate col start/end indices.
bool done = false;
for (int i = 0; i < num_axes; ++i) {
const int kernel_extent =
shared_dilation[i] * (shared_kernel_shape[i] - 1) + 1;
d_col_start[i] = d_col_iter[i] =
(d_im[i] < kernel_extent) ? 0 :
(d_im[i] - kernel_extent) / shared_stride[i] + 1;
d_col_end[i] =
min(d_im[i] / shared_stride[i] + 1, shared_col_shape[i + 1]);
if (d_col_start[i] >= d_col_end[i]) {
// Skip computation if the dimension is 0 at any spatial axis --
// final val will be 0.
data_im[index] = 0;
done = true;
break; // for (int i = 0; i < num_axes; ++i)
}
}
if (done) {
continue; // CUDA_KERNEL_LOOP(index, n)
}
// Loop over the col to compute the output val.
Dtype val = 0;
bool incremented = true;
bool skip = false;
do {
// Compute the final offset.
int final_offset = 0;
int kernel_shape_prod = 1;
int kernel_index;
for (int i = num_axes - 1; i >= 0; --i) {
kernel_index = d_im[i] - d_col_iter[i] * shared_stride[i];
if (kernel_index % shared_dilation[i]) {
skip = true;
break;
} else {
kernel_index /= shared_dilation[i];
final_offset += kernel_index * kernel_shape_prod;
kernel_shape_prod *= shared_kernel_shape[i];
}
}
if (!skip) {
final_offset += kernel_shape_prod * c_im;
for (int i = 0; i < num_axes; ++i) {
final_offset *= shared_col_shape[i + 1];
final_offset += d_col_iter[i];
}
val += data_col[final_offset];
}
skip = false;
incremented = false;
for (int i = num_axes - 1; i >= 0; --i) {
const int d_max = d_col_end[i];
if (d_col_iter[i] == d_max - 1) {
d_col_iter[i] = d_col_start[i];
} else { // d_col_iter[i] < d_max - 1
++d_col_iter[i];
incremented = true;
break; // for (int i = num_axes - 1; i >= 0; --i)
}
} // for (int i = num_axes - 1; i >= 0; --i)
} while (incremented);
data_im[index] = val;
} // CUDA_KERNEL_LOOP(index, n)
}
template <typename Dtype>
void col2im_nd_gpu(const Dtype* data_col, const int num_spatial_axes,
const int im_size, const int* im_shape, const int* col_shape,
const int* kernel_shape, const int* pad, const int* stride,
const int* dilation, Dtype* data_im) {
// num_axes should be smaller than block size
DCHECK_LT(num_spatial_axes, CAFFE_CUDA_NUM_THREADS);
switch (num_spatial_axes) {
case 1:
col2im_nd_gpu_kernel<Dtype, 1> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(im_size)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
im_size, data_col, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_im);
break;
case 2:
col2im_nd_gpu_kernel<Dtype, 2> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(im_size)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
im_size, data_col, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_im);
break;
case 3:
col2im_nd_gpu_kernel<Dtype, 3> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(im_size)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
im_size, data_col, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_im);
break;
case 4:
col2im_nd_gpu_kernel<Dtype, 4> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(im_size)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
im_size, data_col, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_im);
break;
case 5:
col2im_nd_gpu_kernel<Dtype, 5> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(im_size)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
im_size, data_col, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_im);
break;
case 6:
col2im_nd_gpu_kernel<Dtype, 6> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(im_size)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
im_size, data_col, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_im);
break;
case 7:
col2im_nd_gpu_kernel<Dtype, 7> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(im_size)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
im_size, data_col, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_im);
break;
case 8:
col2im_nd_gpu_kernel<Dtype, 8> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(im_size)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
im_size, data_col, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_im);
break;
case 9:
col2im_nd_gpu_kernel<Dtype, 9> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(im_size)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
im_size, data_col, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_im);
break;
case 10:
col2im_nd_gpu_kernel<Dtype, 10> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(im_size)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
im_size, data_col, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_im);
break;
default:
LOG(FATAL) << "col2im_nd_gpu does not support computation with "
<< num_spatial_axes << " spatial axes";
}
CUDA_POST_KERNEL_CHECK;
}
// Explicit instantiation
template void col2im_nd_gpu<float>(const float* data_col,
const int num_spatial_axes, const int im_size,
const int* im_shape, const int* col_shape,
const int* kernel_shape, const int* pad, const int* stride,
const int* dilation, float* data_im);
template void col2im_nd_gpu<double>(const double* data_col,
const int num_spatial_axes, const int im_size,
const int* im_shape, const int* col_shape,
const int* kernel_shape, const int* pad, const int* stride,
const int* dilation, double* data_im);
} // namespace caffe
| 317387d487170d3b5bf4d0cf7fbb98a89a7e541a.cu | #include <algorithm>
#include "caffe/common.hpp"
#include "caffe/util/im2col.hpp"
namespace caffe {
//gpu版本的im2col
template <typename Dtype>
__global__ void im2col_gpu_kernel(
const int n, //分的线程总数
const Dtype* data_im,//im_data
const int height, const int width,//宽高
const int kernel_h, const int kernel_w,//卷积盖度和宽度
const int pad_h, const int pad_w,//扩充高宽
const int stride_h, const int stride_w,//步长高宽
const int dilation_h, const int dilation_w,//缩放高宽
const int height_col, const int width_col,//输出col高宽
Dtype* data_col //出输出数据col
) {
CUDA_KERNEL_LOOP(index, n) //cuda网格流执行程序,n表示总线程数,index表示block的索引
{
//计算每个block中的h高度索引
const int h_index = index / width_col;
//计算每个block 中的col高度偏移
const int h_col = h_index % height_col;
//计算col中的w偏移
const int w_col = index % width_col;
//计算im的相对偏移索引,最终得到每个block中的块的索引
const int c_im = h_index / height_col;
//计算最终的输出大小
const int c_col = c_im * kernel_h * kernel_w;
//计算高度偏移
const int h_offset = h_col * stride_h - pad_h;
//计算宽度偏移
const int w_offset = w_col * stride_w - pad_w;
//定义输出数据指针
Dtype* data_col_ptr = data_col;
//设置当前block中的数据偏移
data_col_ptr += (c_col * height_col + h_col) * width_col + w_col;
//im数据指针
const Dtype* data_im_ptr = data_im;
//当前block的im数据偏移和最终数据指向
data_im_ptr += (c_im * height + h_offset) * width + w_offset;
//遍历卷积核
for (int i = 0; i < kernel_h; ++i) {
for (int j = 0; j < kernel_w; ++j) {
//获取h索引
int h_im = h_offset + i * dilation_h;
//获取w索引
int w_im = w_offset + j * dilation_w;
*data_col_ptr =
(h_im >= 0 && w_im >= 0 && h_im < height && w_im < width) ?
data_im_ptr[i * dilation_h * width + j * dilation_w] : 0;
//移动更新指针
data_col_ptr += height_col * width_col;
}
}
}
}
template <typename Dtype>
//将im2col gpu 版本
void im2col_gpu(
const Dtype* data_im,//数据
const int channels,//通道
const int height, const int width,//高宽
const int kernel_h, const int kernel_w,//卷积核高宽
const int pad_h, const int pad_w,//扩充高宽
const int stride_h, const int stride_w,//步长高宽
const int dilation_h, const int dilation_w,//缩放高宽
Dtype* data_col) {
// We are going to launch channels * height_col * width_col kernels, each
// kernel responsible for copying a single-channel grid.
//我们将启动channel * height_col * width_col内核,每个内核负责复制单通道网格。
//计算col高度
int height_col = (height + 2 * pad_h -
(dilation_h * (kernel_h - 1) + 1)) / stride_h + 1;
//计算col宽度
int width_col = (width + 2 * pad_w -
(dilation_w * (kernel_w - 1) + 1)) / stride_w + 1;
//计算kernel数量
int num_kernels = channels * height_col * width_col;
// NOLINT_NEXT_LINE(whitespace/operators)
//使用im2col_gpu_kernel进行计算
im2col_gpu_kernel<Dtype><<<CAFFE_GET_BLOCKS(num_kernels),
CAFFE_CUDA_NUM_THREADS>>>(
num_kernels, data_im, height, width, kernel_h, kernel_w, pad_h,
pad_w, stride_h, stride_w, dilation_h, dilation_w, height_col,
width_col, data_col);
CUDA_POST_KERNEL_CHECK;
}
//特例化单双精度函数
// Explicit instantiation
template void im2col_gpu<float>(const float* data_im, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w, float* data_col);
template void im2col_gpu<double>(const double* data_im, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w, double* data_col);
//im2col和col2im
template <typename Dtype, int num_axes>
__global__ void im2col_nd_gpu_kernel(
const int n,//kernel数量
const Dtype* data_im,//im数据
const int* im_shape,//imshape
const int* col_shape,//col shape
const int* kernel_shape,//kernel维度数据
const int* pad,//扩充
const int* stride,//步长
const int* dilation,//缩放
Dtype* data_col//输出数据
) {
//初始化临时行长度
int d_temp[num_axes]; // NOLINT(runtime/arrays)
//初始化行内偏移
int d_iter[num_axes]; // NOLINT(runtime/arrays)
//host和device的共享数据:
//缩放
__shared__ int shared_dilation[num_axes];
//kernel shape
__shared__ int shared_kernel_shape[num_axes];
//shape pad
__shared__ int shared_pad[num_axes];
//步长
__shared__ int shared_stride[num_axes];
//col形状
__shared__ int shared_col_shape[num_axes + 1];
//im形状
__shared__ int shared_im_shape[num_axes + 1];
//实现数据之间的同步
if (threadIdx.x < num_axes) {
shared_dilation[threadIdx.x] = dilation[threadIdx.x];
shared_kernel_shape[threadIdx.x] = kernel_shape[threadIdx.x];
shared_pad[threadIdx.x] = pad[threadIdx.x];
shared_stride[threadIdx.x] = stride[threadIdx.x];
}
//实现数据共享和同步
if (threadIdx.x < num_axes + 1) {
shared_col_shape[threadIdx.x] = col_shape[threadIdx.x];
shared_im_shape[threadIdx.x] = im_shape[threadIdx.x];
}
//同步线程数据
__syncthreads();
//
int i;
//设置kernel进行工作
CUDA_KERNEL_LOOP(index, n) {
// Initialize channel_in, computed in the loop below, with intermediate
// computations used to compute the spatial indices.
//初始化channel_in,在下面的循环中计算,使用中间计算来计算空间索引。
int channel_in = index;
//输出的channel数量
int channel_out = 1;
//遍历维度,计算维度长度和输入输出的索引
for (i = num_axes - 1; i >= 0; --i) {
d_temp[i] = channel_in % shared_col_shape[i + 1];
channel_in /= shared_col_shape[i + 1];
channel_out *= shared_kernel_shape[i];
}
//输出=输出*输入;更新输出的通道数量
channel_out *= channel_in;
//col数据的行索引初始化为1
int data_col_inc = 1;
for (i = 0; i < num_axes; ++i) {
//计算维度
channel_out *= shared_col_shape[i + 1];
//计算偏移
channel_out += d_temp[i];
//temp更新temp的索引
d_temp[i] = d_temp[i] * shared_stride[i] - shared_pad[i];
//计算输入通道
channel_in *= shared_im_shape[i + 1];
//维度的输入偏移
channel_in += d_temp[i];
//输出数据索引
data_col_inc *= shared_col_shape[i + 1];
//重制行内偏移
d_iter[i] = 0;
}
//计算当前block中data指针应该指向的位置
Dtype* data_col_ptr = data_col + channel_out;
//im指针应该指向的位置
const Dtype* data_im_ptr = data_im + channel_in;
//是否持续增加
bool incremented;
//开始循环
do {
//是否越界
bool in_range = true;
//遍历维度,计算im的行内偏移
for (i = 0; i < num_axes; ++i) {
const int d_iter_im = d_iter[i] * shared_dilation[i] + d_temp[i];
//判断是否在范围内
in_range &= d_iter_im >= 0 && d_iter_im < shared_im_shape[i + 1];
if (!in_range) { break; }
}
//在范围内
if (in_range) {
//计算初始的伸缩后的数据偏移
int data_im_offset = d_iter[0] * shared_dilation[0];
//计算im的数据偏移
for (i = 1; i < num_axes; ++i) {
data_im_offset *= shared_im_shape[i + 1];
data_im_offset += d_iter[i] * shared_dilation[i];
}
//计算最后的col数据指针位置
*data_col_ptr = data_im_ptr[data_im_offset];
} else {
//范围之外数据置0
*data_col_ptr = 0;
}
//将指针加上之前的data_col_inc偏移
data_col_ptr += data_col_inc;
//不再增加,这里要将每个维度都遍历完后才会停止
incremented = false;
//维度
for (i = num_axes - 1; i >= 0; --i) {
//最大线程数量
const int d_max = shared_kernel_shape[i];
if (d_iter[i] == d_max - 1) {
//越界偏移为0
d_iter[i] = 0;
} else { // d_iter[i] < d_max - 1
//添加指针,更新维度
++d_iter[i];
//继续增加
incremented = true;
break;
}
} // for (int i = num_axes - 1; i >= 0; --i)
} while (incremented); // do
} // CUDA_KERNEL_LOOP(index, n)
}
//im2col的正真接口,将kernel核心数目作为参数,进行了封装
template <typename Dtype>
void im2col_nd_gpu(
const Dtype* data_im,
const int num_spatial_axes,
const int num_kernels,
const int* im_shape,
const int* col_shape,
const int* kernel_shape,
const int* pad, const int* stride,
const int* dilation,
Dtype* data_col
) {
// num_axes should be smaller than block size
DCHECK_LT(num_spatial_axes, CAFFE_CUDA_NUM_THREADS);
switch (num_spatial_axes) {
case 1:
im2col_nd_gpu_kernel<Dtype, 1> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS>>>(
num_kernels, data_im, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_col);
break;
case 2:
im2col_nd_gpu_kernel<Dtype, 2> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS>>>(
num_kernels, data_im, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_col);
break;
case 3:
im2col_nd_gpu_kernel<Dtype, 3> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS>>>(
num_kernels, data_im, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_col);
break;
case 4:
im2col_nd_gpu_kernel<Dtype, 4> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS>>>(
num_kernels, data_im, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_col);
break;
case 5:
im2col_nd_gpu_kernel<Dtype, 5> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS>>>(
num_kernels, data_im, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_col);
break;
case 6:
im2col_nd_gpu_kernel<Dtype, 6> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS>>>(
num_kernels, data_im, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_col);
break;
case 7:
im2col_nd_gpu_kernel<Dtype, 7> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS>>>(
num_kernels, data_im, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_col);
break;
case 8:
im2col_nd_gpu_kernel<Dtype, 8> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS>>>(
num_kernels, data_im, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_col);
break;
case 9:
im2col_nd_gpu_kernel<Dtype, 9> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS>>>(
num_kernels, data_im, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_col);
break;
case 10:
im2col_nd_gpu_kernel<Dtype, 10> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS>>>(
num_kernels, data_im, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_col);
break;
default:
LOG(FATAL) << "im2col_nd_gpu does not support computation with "
<< num_spatial_axes << " spatial axes";
}
CUDA_POST_KERNEL_CHECK;
}
// Explicit instantiation
template void im2col_nd_gpu<float>(const float* data_im,
const int num_spatial_axes, const int col_size,
const int* im_shape, const int* col_shape,
const int* kernel_shape, const int* pad, const int* stride,
const int* dilation, float* data_col);
template void im2col_nd_gpu<double>(const double* data_im,
const int num_spatial_axes, const int col_size,
const int* im_shape, const int* col_shape,
const int* kernel_shape, const int* pad, const int* stride,
const int* dilation, double* data_col);
template <typename Dtype>
__global__ void col2im_gpu_kernel(const int n, const Dtype* data_col,
const int height, const int width, const int channels,
const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int height_col, const int width_col,
Dtype* data_im) {
CUDA_KERNEL_LOOP(index, n) {
//初始化真实值为0
Dtype val = 0;
const int w_im = index % width + pad_w;
//计算bolck中对应的im的高度
const int h_im = (index / width) % height + pad_h;
const int c_im = index / (width * height);
int kernel_extent_w = (kernel_w - 1) * dilation_w + 1;
int kernel_extent_h = (kernel_h - 1) * dilation_h + 1;
// compute the start and end of the output
//计算线程内的输出起止节点
const int w_col_start =
(w_im < kernel_extent_w) ? 0 : (w_im - kernel_extent_w) / stride_w + 1;
const int w_col_end = min(w_im / stride_w + 1, width_col);
const int h_col_start =
(h_im < kernel_extent_h) ? 0 : (h_im - kernel_extent_h) / stride_h + 1;
const int h_col_end = min(h_im / stride_h + 1, height_col);
// TODO: use LCM of stride and dilation to avoid unnecessary loops
//循环遍历col查找对应的im的值
for (int h_col = h_col_start; h_col < h_col_end; h_col += 1) {
for (int w_col = w_col_start; w_col < w_col_end; w_col += 1) {
//获取行映射
int h_k = (h_im - h_col * stride_h);
//获取列映射
int w_k = (w_im - w_col * stride_w);
//正数倍缩放,即是缩放的值
if (h_k % dilation_h == 0 && w_k % dilation_w == 0) {
//还原真实索引h
h_k /= dilation_h;
//还原真实索引w
w_k /= dilation_w;
//计算真实的col索引值
int data_col_index = (((c_im * kernel_h + h_k) * kernel_w + w_k) *
height_col + h_col) * width_col + w_col;
//将数据index的数据进行叠加
val += data_col[data_col_index];
}
}
}
data_im[index] = val;
}
}
template <typename Dtype>
void col2im_gpu(const Dtype* data_col, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h,
const int stride_w, const int dilation_h, const int dilation_w,
Dtype* data_im) {
int height_col = (height + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) /
stride_h + 1;
int width_col = (width + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) /
stride_w + 1;
//设置计算核心数目
int num_kernels = channels * height * width;
// To avoid involving atomic operations, we will launch one kernel per
// bottom dimension, and then in the kernel add up the top dimensions.
// NOLINT_NEXT_LINE(whitespace/operators)
col2im_gpu_kernel<Dtype><<<CAFFE_GET_BLOCKS(num_kernels),
CAFFE_CUDA_NUM_THREADS>>>(
num_kernels, data_col, height, width, channels, kernel_h, kernel_w,
pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w,
height_col, width_col, data_im);
CUDA_POST_KERNEL_CHECK;
}
// Explicit instantiation
template void col2im_gpu<float>(const float* data_col, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h,
const int stride_w, const int dilation_h, const int dilation_w,
float* data_im);
template void col2im_gpu<double>(const double* data_col, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h,
const int stride_w, const int dilation_h, const int dilation_w,
double* data_im);
//col2im与im2col操作的最后一步不一样,其它基本相同
template <typename Dtype, int num_axes>
__global__ void col2im_nd_gpu_kernel(const int n, const Dtype* data_col,
const int* im_shape, const int* col_shape,
const int* kernel_shape, const int* pad, const int* stride,
const int* dilation, Dtype* data_im) {
int d_im[num_axes]; // NOLINT(runtime/arrays)
int d_col_iter[num_axes]; // NOLINT(runtime/arrays)
int d_col_start[num_axes]; // NOLINT(runtime/arrays)
int d_col_end[num_axes]; // NOLINT(runtime/arrays)
__shared__ int shared_dilation[num_axes];
__shared__ int shared_kernel_shape[num_axes];
__shared__ int shared_pad[num_axes];
__shared__ int shared_stride[num_axes];
__shared__ int shared_col_shape[num_axes + 1];
__shared__ int shared_im_shape[num_axes + 1];
if (threadIdx.x < num_axes) {
shared_dilation[threadIdx.x] = dilation[threadIdx.x];
shared_kernel_shape[threadIdx.x] = kernel_shape[threadIdx.x];
shared_pad[threadIdx.x] = pad[threadIdx.x];
shared_stride[threadIdx.x] = stride[threadIdx.x];
}
if (threadIdx.x < num_axes + 1) {
shared_col_shape[threadIdx.x] = col_shape[threadIdx.x];
shared_im_shape[threadIdx.x] = im_shape[threadIdx.x];
}
__syncthreads();
CUDA_KERNEL_LOOP(index, n) {
// Initialize channel_in, computed in the loop below, with intermediate
// computations used to compute the spatial indices.
int c_im = index;
// Calculate d_im (image dimensions).
for (int i = num_axes - 1; i >= 0; --i) {
d_im[i] = c_im % shared_im_shape[i + 1] + shared_pad[i];
c_im /= shared_im_shape[i + 1];
}
// Calculate col start/end indices.
bool done = false;
for (int i = 0; i < num_axes; ++i) {
const int kernel_extent =
shared_dilation[i] * (shared_kernel_shape[i] - 1) + 1;
d_col_start[i] = d_col_iter[i] =
(d_im[i] < kernel_extent) ? 0 :
(d_im[i] - kernel_extent) / shared_stride[i] + 1;
d_col_end[i] =
min(d_im[i] / shared_stride[i] + 1, shared_col_shape[i + 1]);
if (d_col_start[i] >= d_col_end[i]) {
// Skip computation if the dimension is 0 at any spatial axis --
// final val will be 0.
data_im[index] = 0;
done = true;
break; // for (int i = 0; i < num_axes; ++i)
}
}
if (done) {
continue; // CUDA_KERNEL_LOOP(index, n)
}
// Loop over the col to compute the output val.
Dtype val = 0;
bool incremented = true;
bool skip = false;
do {
// Compute the final offset.
int final_offset = 0;
int kernel_shape_prod = 1;
int kernel_index;
for (int i = num_axes - 1; i >= 0; --i) {
kernel_index = d_im[i] - d_col_iter[i] * shared_stride[i];
if (kernel_index % shared_dilation[i]) {
skip = true;
break;
} else {
kernel_index /= shared_dilation[i];
final_offset += kernel_index * kernel_shape_prod;
kernel_shape_prod *= shared_kernel_shape[i];
}
}
if (!skip) {
final_offset += kernel_shape_prod * c_im;
for (int i = 0; i < num_axes; ++i) {
final_offset *= shared_col_shape[i + 1];
final_offset += d_col_iter[i];
}
val += data_col[final_offset];
}
skip = false;
incremented = false;
for (int i = num_axes - 1; i >= 0; --i) {
const int d_max = d_col_end[i];
if (d_col_iter[i] == d_max - 1) {
d_col_iter[i] = d_col_start[i];
} else { // d_col_iter[i] < d_max - 1
++d_col_iter[i];
incremented = true;
break; // for (int i = num_axes - 1; i >= 0; --i)
}
} // for (int i = num_axes - 1; i >= 0; --i)
} while (incremented);
data_im[index] = val;
} // CUDA_KERNEL_LOOP(index, n)
}
template <typename Dtype>
void col2im_nd_gpu(const Dtype* data_col, const int num_spatial_axes,
const int im_size, const int* im_shape, const int* col_shape,
const int* kernel_shape, const int* pad, const int* stride,
const int* dilation, Dtype* data_im) {
// num_axes should be smaller than block size
DCHECK_LT(num_spatial_axes, CAFFE_CUDA_NUM_THREADS);
switch (num_spatial_axes) {
case 1:
col2im_nd_gpu_kernel<Dtype, 1> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(im_size), CAFFE_CUDA_NUM_THREADS>>>(
im_size, data_col, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_im);
break;
case 2:
col2im_nd_gpu_kernel<Dtype, 2> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(im_size), CAFFE_CUDA_NUM_THREADS>>>(
im_size, data_col, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_im);
break;
case 3:
col2im_nd_gpu_kernel<Dtype, 3> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(im_size), CAFFE_CUDA_NUM_THREADS>>>(
im_size, data_col, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_im);
break;
case 4:
col2im_nd_gpu_kernel<Dtype, 4> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(im_size), CAFFE_CUDA_NUM_THREADS>>>(
im_size, data_col, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_im);
break;
case 5:
col2im_nd_gpu_kernel<Dtype, 5> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(im_size), CAFFE_CUDA_NUM_THREADS>>>(
im_size, data_col, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_im);
break;
case 6:
col2im_nd_gpu_kernel<Dtype, 6> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(im_size), CAFFE_CUDA_NUM_THREADS>>>(
im_size, data_col, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_im);
break;
case 7:
col2im_nd_gpu_kernel<Dtype, 7> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(im_size), CAFFE_CUDA_NUM_THREADS>>>(
im_size, data_col, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_im);
break;
case 8:
col2im_nd_gpu_kernel<Dtype, 8> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(im_size), CAFFE_CUDA_NUM_THREADS>>>(
im_size, data_col, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_im);
break;
case 9:
col2im_nd_gpu_kernel<Dtype, 9> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(im_size), CAFFE_CUDA_NUM_THREADS>>>(
im_size, data_col, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_im);
break;
case 10:
col2im_nd_gpu_kernel<Dtype, 10> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(im_size), CAFFE_CUDA_NUM_THREADS>>>(
im_size, data_col, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_im);
break;
default:
LOG(FATAL) << "col2im_nd_gpu does not support computation with "
<< num_spatial_axes << " spatial axes";
}
CUDA_POST_KERNEL_CHECK;
}
// Explicit instantiation
template void col2im_nd_gpu<float>(const float* data_col,
const int num_spatial_axes, const int im_size,
const int* im_shape, const int* col_shape,
const int* kernel_shape, const int* pad, const int* stride,
const int* dilation, float* data_im);
template void col2im_nd_gpu<double>(const double* data_col,
const int num_spatial_axes, const int im_size,
const int* im_shape, const int* col_shape,
const int* kernel_shape, const int* pad, const int* stride,
const int* dilation, double* data_im);
} // namespace caffe
|
8aea4e952cc499397bc5976d278f53d2056444bc.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#ifdef __HIPCC__
#define CUDA_CALLABLE_MEMBER __host__ __device__
#define KERNEL_ARGS2(grid,hipLaunchKernelGGL(( block)) , dim3(grid), dim3(block) , 0, 0,
#define KERNEL_ARGS3grid, block,hipLaunchKernelGGL(( sh_mem)) , dim3(grid), dim3(block), sh_mem , 0,
#define KERNEL_ARGS4grid, block, sh_mem,hipLaunchKernelGGL(( stream)) , dim3(grid), dim3(block), sh_mem, stream ,
#else
#define CUDA_CALLABLE_MEMBER
#define KERNEL_ARGS2grid, block)
#define KERNEL_ARGS3(grid, block, sh_mem)
#define KERNEL_ARGS4(grid, block, sh_mem, stream)
#endif
#include <stdio.h>
const int N = 16;
const int blocksize = 16;
__global__ void hello(char* a, int* b)
{
a[threadIdx.x] += b[threadIdx.x];
}
int main()
{
char a[N] = "Hello \0\0\0\0\0\0";
int b[N] = { 15, 10, 6, 0, -11, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
char* ad;
int* bd;
const int csize = N * sizeof(char);
const int isize = N * sizeof(int);
printf("%s", a);
hipMalloc((void**)& ad, csize);
hipMalloc((void**)& bd, isize);
hipMemcpy(ad, a, csize, hipMemcpyHostToDevice);
hipMemcpy(bd, b, isize, hipMemcpyHostToDevice);
dim3 dimBlock(blocksize, 1);
dim3 dimGrid(1, 1);
hello KERNEL_ARGS2(dimGrid, dimBlock) (ad, bd);
hipMemcpy(a, ad, csize, hipMemcpyDeviceToHost);
hipFree(ad);
hipFree(bd);
printf("%s\n", a);
return EXIT_SUCCESS;
} | 8aea4e952cc499397bc5976d278f53d2056444bc.cu |
#include "cuda.h"
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#ifdef __CUDACC__
#define CUDA_CALLABLE_MEMBER __host__ __device__
#define KERNEL_ARGS2(grid, block) <<< grid, block >>>
#define KERNEL_ARGS3(grid, block, sh_mem) <<< grid, block, sh_mem >>>
#define KERNEL_ARGS4(grid, block, sh_mem, stream) <<< grid, block, sh_mem, stream >>>
#else
#define CUDA_CALLABLE_MEMBER
#define KERNEL_ARGS2(grid, block)
#define KERNEL_ARGS3(grid, block, sh_mem)
#define KERNEL_ARGS4(grid, block, sh_mem, stream)
#endif
#include <stdio.h>
const int N = 16;
const int blocksize = 16;
__global__ void hello(char* a, int* b)
{
a[threadIdx.x] += b[threadIdx.x];
}
int main()
{
char a[N] = "Hello \0\0\0\0\0\0";
int b[N] = { 15, 10, 6, 0, -11, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
char* ad;
int* bd;
const int csize = N * sizeof(char);
const int isize = N * sizeof(int);
printf("%s", a);
cudaMalloc((void**)& ad, csize);
cudaMalloc((void**)& bd, isize);
cudaMemcpy(ad, a, csize, cudaMemcpyHostToDevice);
cudaMemcpy(bd, b, isize, cudaMemcpyHostToDevice);
dim3 dimBlock(blocksize, 1);
dim3 dimGrid(1, 1);
hello KERNEL_ARGS2(dimGrid, dimBlock) (ad, bd);
cudaMemcpy(a, ad, csize, cudaMemcpyDeviceToHost);
cudaFree(ad);
cudaFree(bd);
printf("%s\n", a);
return EXIT_SUCCESS;
} |
4ca797cbeb0e46a986d2b844ad71f8602ba3abc4.hip | // !!! This is a file automatically generated by hipify!!!
#include <cstdlib>
#include <cstdio>
#include <string>
#include <iostream>
#include <color_spinor_field.h>
#include <clover_field.h>
// these control the Wilson-type actions
#ifdef GPU_WILSON_DIRAC
//#define DIRECT_ACCESS_LINK
//#define DIRECT_ACCESS_WILSON_SPINOR
//#define DIRECT_ACCESS_WILSON_ACCUM
//#define DIRECT_ACCESS_WILSON_INTER
//#define DIRECT_ACCESS_WILSON_PACK_SPINOR
//#define DIRECT_ACCESS_CLOVER
#endif // GPU_WILSON_DIRAC
#include <quda_internal.h>
#include <dslash_quda.h>
#include <sys/time.h>
#include <blas_quda.h>
#include <face_quda.h>
#include <inline_ptx.h>
namespace quda {
namespace domainwall4d {
#undef GPU_STAGGERED_DIRAC
#include <dslash_constants.h>
#include <dslash_textures.h>
#include <dslash_index.cuh>
// Enable shared memory dslash for Fermi architecture
//#define SHARED_WILSON_DSLASH
//#define SHARED_8_BYTE_WORD_SIZE // 8-byte shared memory access
#ifdef GPU_DOMAIN_WALL_DIRAC
#include <dw_dslash4_def.h> // Dslash4 Domain Wall kernels
#include <dw_dslash5_def.h> // Dslash5 Domain Wall kernels
#include <dw_dslash5inv_def.h> // Dslash5inv Domain Wall kernels
#endif
#ifndef DSLASH_SHARED_FLOATS_PER_THREAD
#define DSLASH_SHARED_FLOATS_PER_THREAD 0
#endif
#include <dslash_quda.cuh>
}
// declare the dslash events
#include <dslash_events.cuh>
using namespace domainwall4d;
#ifdef GPU_DOMAIN_WALL_DIRAC
template <typename sFloat, typename gFloat>
class DomainWallDslash4DPCCuda : public DslashCuda {
private:
const gFloat *gauge0, *gauge1;
const double mferm;
const double a;
const int DS_type;
bool checkGrid(TuneParam ¶m) const {
if (param.grid.x > deviceProp.maxGridSize[0] || param.grid.y > deviceProp.maxGridSize[1]) {
warningQuda("Autotuner is skipping blockDim=(%u,%u,%u), gridDim=(%u,%u,%u) because lattice volume is too large",
param.block.x, param.block.y, param.block.z,
param.grid.x, param.grid.y, param.grid.z);
return false;
} else {
return true;
}
}
protected:
bool advanceBlockDim(TuneParam ¶m) const
{
const unsigned int max_shared = 16384; // FIXME: use deviceProp.sharedMemPerBlock;
const int step[2] = { deviceProp.warpSize, 1 };
bool advance[2] = { false, false };
// first try to advance block.x
param.block.x += step[0];
if (param.block.x > deviceProp.maxThreadsDim[0] ||
sharedBytesPerThread()*param.block.x*param.block.y > max_shared) {
advance[0] = false;
param.block.x = step[0]; // reset block.x
} else {
advance[0] = true; // successfully advanced block.x
}
if (!advance[0]) { // if failed to advance block.x, now try block.y
param.block.y += step[1];
if (param.block.y > in->X(4) ||
sharedBytesPerThread()*param.block.x*param.block.y > max_shared) {
advance[1] = false;
param.block.y = step[1]; // reset block.x
} else {
advance[1] = true; // successfully advanced block.y
}
}
if (advance[0] || advance[1]) {
param.grid = dim3( (dslashParam.threads+param.block.x-1) / param.block.x,
(in->X(4)+param.block.y-1) / param.block.y, 1);
bool advance = true;
if (!checkGrid(param)) advance = advanceBlockDim(param);
return advance;
} else {
return false;
}
}
unsigned int sharedBytesPerThread() const { return 0; }
public:
DomainWallDslash4DPCCuda(cudaColorSpinorField *out, const gFloat *gauge0, const gFloat *gauge1,
const QudaReconstructType reconstruct, const cudaColorSpinorField *in,
const cudaColorSpinorField *x, const double mferm,
const double a, const int dagger, const int DS_type)
: DslashCuda(out, in, x, reconstruct, dagger), gauge0(gauge0), gauge1(gauge1),
mferm(mferm), a(a), DS_type(DS_type)
{
bindSpinorTex<sFloat>(in, out, x);
}
virtual ~DomainWallDslash4DPCCuda() { unbindSpinorTex<sFloat>(in, out, x); }
TuneKey tuneKey() const
{
TuneKey key = DslashCuda::tuneKey();
switch(DS_type){
case 0:
strcat(key.aux,",Dslash4");
break;
case 1:
strcat(key.aux,",Dslash5");
break;
case 2:
strcat(key.aux,",Dslash5inv");
break;
}
return key;
}
virtual void initTuneParam(TuneParam ¶m) const
{
Tunable::initTuneParam(param);
param.grid = dim3( (dslashParam.threads+param.block.x-1) / param.block.x,
(in->X(4)+param.block.y-1) / param.block.y, 1);
bool ok = true;
if (!checkGrid(param)) ok = advanceBlockDim(param);
if (!ok) errorQuda("Lattice volume is too large for even the largest blockDim");
}
/** sets default values for when tuning is disabled */
virtual void defaultTuneParam(TuneParam ¶m) const
{
Tunable::defaultTuneParam(param);
param.grid = dim3( (dslashParam.threads+param.block.x-1) / param.block.x,
(in->X(4)+param.block.y-1) / param.block.y, 1);
bool ok = true;
if (!checkGrid(param)) ok = advanceBlockDim(param);
if (!ok) errorQuda("Lattice volume is too large for even the largest blockDim");
}
void apply(const hipStream_t &stream)
{
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
switch(DS_type){
case 0:
DSLASH(domainWallDslash4, tp.grid, tp.block, tp.shared_bytes, stream, dslashParam,
(sFloat*)out->V(), (float*)out->Norm(), gauge0, gauge1, (sFloat*)in->V(),
(float*)in->Norm(), mferm, (sFloat*)(x ? x->V() : 0), (float*)(x ? x->Norm() : 0), a);
break;
case 1:
DSLASH(domainWallDslash5, tp.grid, tp.block, tp.shared_bytes, stream, dslashParam,
(sFloat*)out->V(), (float*)out->Norm(), gauge0, gauge1, (sFloat*)in->V(),
(float*)in->Norm(), mferm, (sFloat*)(x ? x->V() : 0), (float*)(x ? x->Norm() : 0), a);
break;
case 2:
DSLASH(domainWallDslash5inv, tp.grid, tp.block, tp.shared_bytes, stream, dslashParam,
(sFloat*)out->V(), (float*)out->Norm(), gauge0, gauge1, (sFloat*)in->V(),
(float*)in->Norm(), mferm, (sFloat*)(x ? x->V() : 0), (float*)(x ? x->Norm() : 0), a);
break;
default:
errorQuda("invalid Dslash type");
}
}
long long flops() const { // FIXME for multi-GPU
long long Ls = in->X(4);
long long vol4d = in->VolumeCB() / Ls;
long long bulk = (Ls-2)*vol4d;
long long wall = 2*vol4d;
long long flops_Tmp;
switch(DS_type){
case 0:
flops_Tmp = (x ? 1368ll : 1320ll)*in->VolumeCB();
break;
case 1:
flops_Tmp = (x ? 48ll : 0 ) * in->VolumeCB() + 96ll*bulk + 120ll*wall;
break;
case 2:
flops_Tmp = 144ll*in->VolumeCB()*Ls + 3ll*Ls*(Ls-1ll);
break;
default:
errorQuda("invalid Dslash type");
}
return flops_Tmp;
}
};
#endif // GPU_DOMAIN_WALL_DIRAC
#include <dslash_policy.cuh>
//-----------------------------------------------------
// Modification for 4D preconditioned DWF operator
// Additional Arg. is added to give a function name.
//
// pre-defined DS_type list
// 0 = dslash4
// 1 = dslash5
// 2 = dslash5inv
//-----------------------------------------------------
void domainWallDslashCuda(cudaColorSpinorField *out, const cudaGaugeField &gauge,
const cudaColorSpinorField *in, const int parity, const int dagger,
const cudaColorSpinorField *x, const double &m_f, const double &k2,
const int *commOverride, const int DS_type, TimeProfile &profile, const QudaDslashPolicy &dslashPolicy)
{
inSpinor = (cudaColorSpinorField*)in; // EVIL
dslashParam.parity = parity;
#ifdef GPU_DOMAIN_WALL_DIRAC
//currently splitting in space-time is impelemented:
int dirs = 4;
int Npad = (in->Ncolor()*in->Nspin()*2)/in->FieldOrder(); // SPINOR_HOP in old code
for(int i = 0;i < dirs; i++){
dslashParam.ghostDim[i] = commDimPartitioned(i); // determines whether to use regular or ghost indexing at boundary
dslashParam.ghostOffset[i] = Npad*(in->GhostOffset(i) + in->Stride());
dslashParam.ghostNormOffset[i] = in->GhostNormOffset(i) + in->Stride();
dslashParam.commDim[i] = (!commOverride[i]) ? 0 : commDimPartitioned(i); // switch off comms if override = 0
}
void *gauge0, *gauge1;
bindGaugeTex(gauge, parity, &gauge0, &gauge1);
if (in->Precision() != gauge.Precision())
errorQuda("Mixing gauge and spinor precision not supported");
DslashCuda *dslash = 0;
size_t regSize = sizeof(float);
if (in->Precision() == QUDA_DOUBLE_PRECISION) {
#if (__COMPUTE_CAPABILITY__ >= 130)
dslash = new DomainWallDslash4DPCCuda<double2,double2>(out, (double2*)gauge0, (double2*)gauge1,
gauge.Reconstruct(), in, x, m_f, k2, dagger, DS_type);
regSize = sizeof(double);
#else
errorQuda("Double precision not supported on this GPU");
#endif
} else if (in->Precision() == QUDA_SINGLE_PRECISION) {
dslash = new DomainWallDslash4DPCCuda<float4,float4>(out, (float4*)gauge0, (float4*)gauge1,
gauge.Reconstruct(), in, x, m_f, k2, dagger, DS_type);
} else if (in->Precision() == QUDA_HALF_PRECISION) {
dslash = new DomainWallDslash4DPCCuda<short4,short4>(out, (short4*)gauge0, (short4*)gauge1,
gauge.Reconstruct(), in, x, m_f, k2, dagger, DS_type);
}
// the parameters passed to dslashCuda must be 4-d volume and 3-d
// faces because Ls is added as the y-dimension in thread space
int ghostFace[QUDA_MAX_DIM];
for (int i=0; i<4; i++) ghostFace[i] = in->GhostFace()[i] / in->X(4);
DslashPolicyImp* dslashImp = NULL;
if (DS_type != 0) {
dslashImp = DslashFactory::create(QUDA_DSLASH_NC);
} else {
#ifndef GPU_COMMS
dslashImp = DslashFactory::create(dslashPolicy);
#else
dslashImp = DslashFactory::create(QUDA_GPU_COMMS_DSLASH);
#endif
}
(*dslashImp)(*dslash, const_cast<cudaColorSpinorField*>(in), regSize, parity, dagger, in->Volume()/in->X(4), ghostFace, profile);
delete dslashImp;
delete dslash;
unbindGaugeTex(gauge);
checkCudaError();
#else
errorQuda("4D preconditioned Domain wall dslash has not been built");
#endif
}
}
| 4ca797cbeb0e46a986d2b844ad71f8602ba3abc4.cu | #include <cstdlib>
#include <cstdio>
#include <string>
#include <iostream>
#include <color_spinor_field.h>
#include <clover_field.h>
// these control the Wilson-type actions
#ifdef GPU_WILSON_DIRAC
//#define DIRECT_ACCESS_LINK
//#define DIRECT_ACCESS_WILSON_SPINOR
//#define DIRECT_ACCESS_WILSON_ACCUM
//#define DIRECT_ACCESS_WILSON_INTER
//#define DIRECT_ACCESS_WILSON_PACK_SPINOR
//#define DIRECT_ACCESS_CLOVER
#endif // GPU_WILSON_DIRAC
#include <quda_internal.h>
#include <dslash_quda.h>
#include <sys/time.h>
#include <blas_quda.h>
#include <face_quda.h>
#include <inline_ptx.h>
namespace quda {
namespace domainwall4d {
#undef GPU_STAGGERED_DIRAC
#include <dslash_constants.h>
#include <dslash_textures.h>
#include <dslash_index.cuh>
// Enable shared memory dslash for Fermi architecture
//#define SHARED_WILSON_DSLASH
//#define SHARED_8_BYTE_WORD_SIZE // 8-byte shared memory access
#ifdef GPU_DOMAIN_WALL_DIRAC
#include <dw_dslash4_def.h> // Dslash4 Domain Wall kernels
#include <dw_dslash5_def.h> // Dslash5 Domain Wall kernels
#include <dw_dslash5inv_def.h> // Dslash5inv Domain Wall kernels
#endif
#ifndef DSLASH_SHARED_FLOATS_PER_THREAD
#define DSLASH_SHARED_FLOATS_PER_THREAD 0
#endif
#include <dslash_quda.cuh>
}
// declare the dslash events
#include <dslash_events.cuh>
using namespace domainwall4d;
#ifdef GPU_DOMAIN_WALL_DIRAC
template <typename sFloat, typename gFloat>
class DomainWallDslash4DPCCuda : public DslashCuda {
private:
const gFloat *gauge0, *gauge1;
const double mferm;
const double a;
const int DS_type;
bool checkGrid(TuneParam ¶m) const {
if (param.grid.x > deviceProp.maxGridSize[0] || param.grid.y > deviceProp.maxGridSize[1]) {
warningQuda("Autotuner is skipping blockDim=(%u,%u,%u), gridDim=(%u,%u,%u) because lattice volume is too large",
param.block.x, param.block.y, param.block.z,
param.grid.x, param.grid.y, param.grid.z);
return false;
} else {
return true;
}
}
protected:
bool advanceBlockDim(TuneParam ¶m) const
{
const unsigned int max_shared = 16384; // FIXME: use deviceProp.sharedMemPerBlock;
const int step[2] = { deviceProp.warpSize, 1 };
bool advance[2] = { false, false };
// first try to advance block.x
param.block.x += step[0];
if (param.block.x > deviceProp.maxThreadsDim[0] ||
sharedBytesPerThread()*param.block.x*param.block.y > max_shared) {
advance[0] = false;
param.block.x = step[0]; // reset block.x
} else {
advance[0] = true; // successfully advanced block.x
}
if (!advance[0]) { // if failed to advance block.x, now try block.y
param.block.y += step[1];
if (param.block.y > in->X(4) ||
sharedBytesPerThread()*param.block.x*param.block.y > max_shared) {
advance[1] = false;
param.block.y = step[1]; // reset block.x
} else {
advance[1] = true; // successfully advanced block.y
}
}
if (advance[0] || advance[1]) {
param.grid = dim3( (dslashParam.threads+param.block.x-1) / param.block.x,
(in->X(4)+param.block.y-1) / param.block.y, 1);
bool advance = true;
if (!checkGrid(param)) advance = advanceBlockDim(param);
return advance;
} else {
return false;
}
}
unsigned int sharedBytesPerThread() const { return 0; }
public:
DomainWallDslash4DPCCuda(cudaColorSpinorField *out, const gFloat *gauge0, const gFloat *gauge1,
const QudaReconstructType reconstruct, const cudaColorSpinorField *in,
const cudaColorSpinorField *x, const double mferm,
const double a, const int dagger, const int DS_type)
: DslashCuda(out, in, x, reconstruct, dagger), gauge0(gauge0), gauge1(gauge1),
mferm(mferm), a(a), DS_type(DS_type)
{
bindSpinorTex<sFloat>(in, out, x);
}
virtual ~DomainWallDslash4DPCCuda() { unbindSpinorTex<sFloat>(in, out, x); }
TuneKey tuneKey() const
{
TuneKey key = DslashCuda::tuneKey();
switch(DS_type){
case 0:
strcat(key.aux,",Dslash4");
break;
case 1:
strcat(key.aux,",Dslash5");
break;
case 2:
strcat(key.aux,",Dslash5inv");
break;
}
return key;
}
virtual void initTuneParam(TuneParam ¶m) const
{
Tunable::initTuneParam(param);
param.grid = dim3( (dslashParam.threads+param.block.x-1) / param.block.x,
(in->X(4)+param.block.y-1) / param.block.y, 1);
bool ok = true;
if (!checkGrid(param)) ok = advanceBlockDim(param);
if (!ok) errorQuda("Lattice volume is too large for even the largest blockDim");
}
/** sets default values for when tuning is disabled */
virtual void defaultTuneParam(TuneParam ¶m) const
{
Tunable::defaultTuneParam(param);
param.grid = dim3( (dslashParam.threads+param.block.x-1) / param.block.x,
(in->X(4)+param.block.y-1) / param.block.y, 1);
bool ok = true;
if (!checkGrid(param)) ok = advanceBlockDim(param);
if (!ok) errorQuda("Lattice volume is too large for even the largest blockDim");
}
void apply(const cudaStream_t &stream)
{
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
switch(DS_type){
case 0:
DSLASH(domainWallDslash4, tp.grid, tp.block, tp.shared_bytes, stream, dslashParam,
(sFloat*)out->V(), (float*)out->Norm(), gauge0, gauge1, (sFloat*)in->V(),
(float*)in->Norm(), mferm, (sFloat*)(x ? x->V() : 0), (float*)(x ? x->Norm() : 0), a);
break;
case 1:
DSLASH(domainWallDslash5, tp.grid, tp.block, tp.shared_bytes, stream, dslashParam,
(sFloat*)out->V(), (float*)out->Norm(), gauge0, gauge1, (sFloat*)in->V(),
(float*)in->Norm(), mferm, (sFloat*)(x ? x->V() : 0), (float*)(x ? x->Norm() : 0), a);
break;
case 2:
DSLASH(domainWallDslash5inv, tp.grid, tp.block, tp.shared_bytes, stream, dslashParam,
(sFloat*)out->V(), (float*)out->Norm(), gauge0, gauge1, (sFloat*)in->V(),
(float*)in->Norm(), mferm, (sFloat*)(x ? x->V() : 0), (float*)(x ? x->Norm() : 0), a);
break;
default:
errorQuda("invalid Dslash type");
}
}
long long flops() const { // FIXME for multi-GPU
long long Ls = in->X(4);
long long vol4d = in->VolumeCB() / Ls;
long long bulk = (Ls-2)*vol4d;
long long wall = 2*vol4d;
long long flops_Tmp;
switch(DS_type){
case 0:
flops_Tmp = (x ? 1368ll : 1320ll)*in->VolumeCB();
break;
case 1:
flops_Tmp = (x ? 48ll : 0 ) * in->VolumeCB() + 96ll*bulk + 120ll*wall;
break;
case 2:
flops_Tmp = 144ll*in->VolumeCB()*Ls + 3ll*Ls*(Ls-1ll);
break;
default:
errorQuda("invalid Dslash type");
}
return flops_Tmp;
}
};
#endif // GPU_DOMAIN_WALL_DIRAC
#include <dslash_policy.cuh>
//-----------------------------------------------------
// Modification for 4D preconditioned DWF operator
// Additional Arg. is added to give a function name.
//
// pre-defined DS_type list
// 0 = dslash4
// 1 = dslash5
// 2 = dslash5inv
//-----------------------------------------------------
void domainWallDslashCuda(cudaColorSpinorField *out, const cudaGaugeField &gauge,
const cudaColorSpinorField *in, const int parity, const int dagger,
const cudaColorSpinorField *x, const double &m_f, const double &k2,
const int *commOverride, const int DS_type, TimeProfile &profile, const QudaDslashPolicy &dslashPolicy)
{
inSpinor = (cudaColorSpinorField*)in; // EVIL
dslashParam.parity = parity;
#ifdef GPU_DOMAIN_WALL_DIRAC
//currently splitting in space-time is impelemented:
int dirs = 4;
int Npad = (in->Ncolor()*in->Nspin()*2)/in->FieldOrder(); // SPINOR_HOP in old code
for(int i = 0;i < dirs; i++){
dslashParam.ghostDim[i] = commDimPartitioned(i); // determines whether to use regular or ghost indexing at boundary
dslashParam.ghostOffset[i] = Npad*(in->GhostOffset(i) + in->Stride());
dslashParam.ghostNormOffset[i] = in->GhostNormOffset(i) + in->Stride();
dslashParam.commDim[i] = (!commOverride[i]) ? 0 : commDimPartitioned(i); // switch off comms if override = 0
}
void *gauge0, *gauge1;
bindGaugeTex(gauge, parity, &gauge0, &gauge1);
if (in->Precision() != gauge.Precision())
errorQuda("Mixing gauge and spinor precision not supported");
DslashCuda *dslash = 0;
size_t regSize = sizeof(float);
if (in->Precision() == QUDA_DOUBLE_PRECISION) {
#if (__COMPUTE_CAPABILITY__ >= 130)
dslash = new DomainWallDslash4DPCCuda<double2,double2>(out, (double2*)gauge0, (double2*)gauge1,
gauge.Reconstruct(), in, x, m_f, k2, dagger, DS_type);
regSize = sizeof(double);
#else
errorQuda("Double precision not supported on this GPU");
#endif
} else if (in->Precision() == QUDA_SINGLE_PRECISION) {
dslash = new DomainWallDslash4DPCCuda<float4,float4>(out, (float4*)gauge0, (float4*)gauge1,
gauge.Reconstruct(), in, x, m_f, k2, dagger, DS_type);
} else if (in->Precision() == QUDA_HALF_PRECISION) {
dslash = new DomainWallDslash4DPCCuda<short4,short4>(out, (short4*)gauge0, (short4*)gauge1,
gauge.Reconstruct(), in, x, m_f, k2, dagger, DS_type);
}
// the parameters passed to dslashCuda must be 4-d volume and 3-d
// faces because Ls is added as the y-dimension in thread space
int ghostFace[QUDA_MAX_DIM];
for (int i=0; i<4; i++) ghostFace[i] = in->GhostFace()[i] / in->X(4);
DslashPolicyImp* dslashImp = NULL;
if (DS_type != 0) {
dslashImp = DslashFactory::create(QUDA_DSLASH_NC);
} else {
#ifndef GPU_COMMS
dslashImp = DslashFactory::create(dslashPolicy);
#else
dslashImp = DslashFactory::create(QUDA_GPU_COMMS_DSLASH);
#endif
}
(*dslashImp)(*dslash, const_cast<cudaColorSpinorField*>(in), regSize, parity, dagger, in->Volume()/in->X(4), ghostFace, profile);
delete dslashImp;
delete dslash;
unbindGaugeTex(gauge);
checkCudaError();
#else
errorQuda("4D preconditioned Domain wall dslash has not been built");
#endif
}
}
|
f69b68fe92057e11facd21f05fa9dd8f7d07aaaf.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "MLP_basic.h"
#include "my_device_function.cuh"
#define IDX2C(i,j,Id) (((j)*(Id))+(i)) // j -> row, i -> column
using namespace std;
MLP_basic:: MLP_basic()
{
total_layers = 0;
max_batch = 0;
alpha = 0;
ramda = 0;
d_target = NULL;
d_temp = NULL;
d_temp1 = NULL;
d_one_vector = NULL;
d_train_input = NULL;
d_train_target = NULL;
d_validation_input = NULL;
d_validation_target = NULL;
d_test_input = NULL;
d_test_target = NULL;
for(long i = 0 ; i < MAXIMUM_LAYERS ; i++)
{
neural[i] = 0;
W[i] = NULL;
b[i] = NULL;
d_W[i] = NULL;
d_b[i] = NULL;
d_a[i] = NULL;
d_z[i] = NULL;
d_delta[i] = NULL;
d_delta_W[i] = NULL;
d_delta_b[i] = NULL;
}
CURAND_CALL(hiprandCreateGenerator(&rand_gen,HIPRAND_RNG_PSEUDO_DEFAULT));
CUBLAS_CALL(hipblasCreate(&handle));
}
MLP_basic :: ~MLP_basic()
{
if(d_target != NULL) CUDA_CALL(hipFree(d_target));
if(d_temp != NULL) CUDA_CALL(hipFree(d_temp));
if(d_temp1 != NULL) CUDA_CALL(hipFree(d_temp1));
if(d_one_vector != NULL) CUDA_CALL(hipFree(d_one_vector));
if(d_train_input != NULL) CUDA_CALL(hipFree(d_train_input));
if(d_train_target != NULL) CUDA_CALL(hipFree(d_train_target));
if(d_validation_input != NULL) CUDA_CALL(hipFree(d_validation_input));
if(d_validation_target != NULL) CUDA_CALL(hipFree(d_validation_target));
if(d_test_input != NULL) CUDA_CALL(hipFree(d_test_input));
if(d_test_target != NULL) CUDA_CALL(hipFree(d_test_target));
for(long i = 0 ; i < MAXIMUM_LAYERS ; i++)
{
if(W[i] != NULL) free(W[i]);
if(b[i] != NULL) free(b[i]);
if(d_W[i] != NULL) CUDA_CALL(hipFree(d_W[i]));
if(d_b[i] != NULL) CUDA_CALL(hipFree(d_b[i]));
if(d_a[i] != NULL) CUDA_CALL(hipFree(d_a[i]));
if(d_z[i] != NULL) CUDA_CALL(hipFree(d_z[i]));
if(d_delta[i] != NULL) CUDA_CALL(hipFree(d_delta[i]));
if(d_delta_W[i] != NULL) CUDA_CALL(hipFree(d_delta_W[i]));
if(d_delta_b[i] != NULL) CUDA_CALL(hipFree(d_delta_b[i]));
}
CUBLAS_CALL(hipblasDestroy(handle));
CURAND_CALL(hiprandDestroyGenerator(rand_gen));
}
void MLP_basic :: init(long *neurals,long layers,long max_batch_size,float alpha, float ramda)
{
this->total_layers = layers;
this->max_batch = max_batch_size;
this->alpha = alpha;
this->ramda = ramda;
for(long i = 0 ; i < this->total_layers ; i++)
{
this->neural[i] = neurals[i];
}
CUDA_CALL(hipMalloc(&d_target,sizeof(float)*neural[total_layers-1]*max_batch));
CUDA_CALL(hipMalloc(&d_a[0],sizeof(float)*neural[0]*max_batch));
CUDA_CALL(hipMalloc(&d_train_input,sizeof(float)*neural[0]*max_batch));
CUDA_CALL(hipMalloc(&d_train_target,sizeof(float)*neural[total_layers - 1]*max_batch));
CUDA_CALL(hipMalloc(&d_validation_input,sizeof(float)*neural[0]*max_batch));
CUDA_CALL(hipMalloc(&d_validation_target,sizeof(float)*neural[total_layers - 1]*max_batch));
CUDA_CALL(hipMalloc(&d_test_input,sizeof(float)*neural[0]*max_batch));
CUDA_CALL(hipMalloc(&d_test_target,sizeof(float)*neural[total_layers - 1]*max_batch));
long maximum = 0;
for(long i = 0 ; i < total_layers-1 ; i++)
{
W[i] = (float*)calloc(neural[i]*neural[i+1],sizeof(float));
b[i] = (float*)calloc(neural[i+1],sizeof(float));
CUDA_CALL(hipMalloc(&d_W[i],sizeof(float)*neural[i]*neural[i+1]));
CUDA_CALL(hipMalloc(&d_b[i],sizeof(float)*neural[i+1]));
CUDA_CALL(hipMalloc(&d_a[i+1],sizeof(float)*neural[i+1]*max_batch));
CUDA_CALL(hipMalloc(&d_z[i+1],sizeof(float)*neural[i+1]*max_batch));
CUDA_CALL(hipMalloc(&d_delta[i+1],sizeof(float)*neural[i+1]*max_batch));
CUDA_CALL(hipMalloc(&d_delta_W[i],sizeof(float)*neural[i+1]*neural[i]));
CUDA_CALL(hipMalloc(&d_delta_b[i],sizeof(float)*neural[i+1]));
if(neural[i] > maximum) maximum = neural[i];
}
CUDA_CALL(hipMalloc(&d_temp,sizeof(float)*maximum*max_batch)); //temp alloc
CUDA_CALL(hipMalloc(&d_temp1,sizeof(float)*maximum*max_batch));
float *one_vector;
one_vector = (float*)calloc(max_batchi*maximum,sizeof(float));
for(long i = 0 ; i < max_batch*maximum ; i++) one_vector[i] = 1.0;
CUDA_CALL(hipMalloc(&d_one_vector,sizeof(float)*max_batch*maximum));
CUBLAS_CALL(hipblasSetMatrix(1,max_batch,sizeof(float),one_vector,1,d_one_vector,1));
free(one_vector);
}
void MLP_basic :: first_parameters_host_device()
{
for(long i = 0 ; i < total_layers -1 ; i++)
{
CUBLAS_CALL(hipblasSetMatrix(neural[i+1],neural[i],sizeof(float),W[i],neural[i+1],d_W[i],neural[i+1]));
CUBLAS_CALL(hipblasSetVector(neural[i+1],sizeof(float),b[i],1,d_b[i],1));
}
}
void MLP_basic :: first_random_parameter()
{
for(long i = 0 ; i < total_layers -1 ; i++)
{
CURAND_CALL(hiprandGenerateNormal(rand_gen,d_W[i],neural[i+1]*neural[i],PARA_MEAN,PARA_STD));
CURAND_CALL(hiprandGenerateNormal(rand_gen,d_b[i],neural[i+1],PARA_MEAN,PARA_STD));
}
}
void MLP_basic :: second_validation_test_set_host_device(float *validation_input, float* validation_target,
long validation_batch_size, float *test_input, float *test_target,long test_batch_size)
{
CUBLAS_CALL(hipblasSetMatrix(neural[0],validation_batch_size,sizeof(float),validation_input,neural[0],
d_validation_input,neural[0]));
CUBLAS_CALL(hipblasSetMatrix(neural[total_layers-1],validation_batch_size,sizeof(float),validation_target,
neural[total_layers-1],d_validation_target,neural[total_layers-1]));
CUBLAS_CALL(hipblasSetMatrix(neural[0],test_batch_size,sizeof(float),test_input,neural[0],
d_test_input,neural[0]));
CUBLAS_CALL(hipblasSetMatrix(neural[total_layers-1],test_batch_size,sizeof(float),test_target,
neural[total_layers-1],d_test_target,neural[total_layers-1]));
}
void MLP_basic :: third_train_set_host_device(float *train_input, float *train_target, long train_batch_size)
{
long threadsPerBolck = 1024;
long blocksPerGride = 0;
CUBLAS_CALL(hipblasSetMatrix(neural[0],train_batch_size,sizeof(float),train_input,neural[0],d_train_input,neural[0]));
CUBLAS_CALL(hipblasSetMatrix(neural[total_layers-1],train_batch_size,sizeof(float),train_target,neural[total_layers-1],d_train_target,neural[total_layers-1]));
blocksPerGride = (neural[0]*train_batch_size + threadsPerBolck -1)/threadsPerBolck;
hipLaunchKernelGGL(( deliver_front_to_rear), dim3(blocksPerGride), dim3(threadsPerBolck), 0, 0, d_train_input,d_a[0],neural[0]*train_batch_size);
blocksPerGride = (neural[total_layers-1]*train_batch_size + threadsPerBolck -1)/threadsPerBolck;
hipLaunchKernelGGL(( deliver_front_to_rear), dim3(blocksPerGride), dim3(threadsPerBolck), 0, 0, d_train_target,d_target,neural[total_layers-1]*train_batch_size);
}
void MLP_basic :: validataion_setting(long batch_size)
{
long threadsPerBolck = 1024;
long blocksPerGride = 0;
blocksPerGride = (neural[0]*batch_size + threadsPerBolck -1)/threadsPerBolck;
hipLaunchKernelGGL(( deliver_front_to_rear), dim3(blocksPerGride), dim3(threadsPerBolck), 0, 0, d_validation_input,d_a[0],neural[0]*batch_size);
blocksPerGride = (neural[total_layers-1]*batch_size + threadsPerBolck -1)/threadsPerBolck;
hipLaunchKernelGGL(( deliver_front_to_rear), dim3(blocksPerGride), dim3(threadsPerBolck), 0, 0, d_validation_target,d_target,neural[total_layers-1]*batch_size);
}
void MLP_basic :: test_setting(long batch_size)
{
long threadsPerBolck = 1024;
long blocksPerGride = 0;
blocksPerGride = (neural[0]*batch_size + threadsPerBolck -1)/threadsPerBolck;
hipLaunchKernelGGL(( deliver_front_to_rear), dim3(blocksPerGride), dim3(threadsPerBolck), 0, 0, d_test_input,d_a[0],neural[0]*batch_size);
blocksPerGride = (neural[total_layers-1]*batch_size + threadsPerBolck -1)/threadsPerBolck;
hipLaunchKernelGGL(( deliver_front_to_rear), dim3(blocksPerGride), dim3(threadsPerBolck), 0, 0, d_test_target,d_target,neural[total_layers-1]*batch_size);
}
void MLP_basic :: forward_propagation(long batch_size)
{
float one = 1.0;
float zero = 0.0;
long threadsPerBolck = 1024;
long blocksPerGride = 0;
for(int i = 0 ; i < total_layers-2; i++)
{
//z(i+1) = w(i)*a(i)
CUBLAS_CALL(hipblasSgemm(handle, HIPBLAS_OP_N,HIPBLAS_OP_N,neural[i+1],batch_size,neural[i], &one,
d_W[i],neural[i+1], d_a[i],neural[i], &zero, d_z[i+1],neural[i+1]));
//z(i+1) = z(i+1) + b(i);
blocksPerGride = (neural[i+1]*batch_size + threadsPerBolck -1)/threadsPerBolck;
hipLaunchKernelGGL(( add_bias), dim3(blocksPerGride), dim3(threadsPerBolck), 0, 0, d_z[i+1],d_b[i],neural[i+1],neural[i+1]*batch_size);
// z(i+1) -> batch_normalizaion -> z(i+1)
//a(i+1) = F(z(i+1))
blocksPerGride = (neural[i+1]*batch_size + threadsPerBolck -1)/threadsPerBolck;
hipLaunchKernelGGL(( relu), dim3(blocksPerGride), dim3(threadsPerBolck), 0, 0, d_a[i+1],d_z[i+1],neural[i+1]*batch_size);
// a(i+1) -> drop out -> a(i+1)
}
//last layer
//z(last) = w(last-1)*a(last-1)
CUBLAS_CALL(hipblasSgemm(handle, HIPBLAS_OP_N,HIPBLAS_OP_N,neural[total_layers-1],batch_size,neural[total_layers-2],
&one, d_W[total_layers-2],neural[total_layers-1], d_a[total_layers-2],neural[total_layers-2],
&zero, d_z[total_layers-1],neural[total_layers-1]));
//z(last) = z(last) + b(last-1);
blocksPerGride = (neural[total_layers-1]*batch_size + threadsPerBolck -1)/threadsPerBolck;
hipLaunchKernelGGL(( add_bias), dim3(blocksPerGride), dim3(threadsPerBolck), 0, 0, d_z[total_layers-1],d_b[total_layers-2],
neural[total_layers-1],neural[total_layers-1]*batch_size);
//a(last) = F(z(last))
blocksPerGride = (neural[total_layers-1]*batch_size + threadsPerBolck -1)/threadsPerBolck;
hipLaunchKernelGGL(( sigmoid), dim3(blocksPerGride), dim3(threadsPerBolck), 0, 0, d_a[total_layers-1],
d_z[total_layers-1],neural[total_layers-1]*batch_size);
}
void MLP_basic :: delta_rule(long batch_size)
{
float one = 1.0;
float zero = 0.0;
long threadsPerBolck = 1024;
long blocksPerGride = 0;
// temp = (y-T)*(2*batch_size)
blocksPerGride = (neural[total_layers-1]*batch_size + threadsPerBolck -1)/threadsPerBolck;
hipLaunchKernelGGL(( last_delta_before_transpose), dim3(blocksPerGride), dim3(threadsPerBolck), 0, 0, d_temp,d_a[total_layers-1],
d_target,batch_size,neural[total_layers-1]*batch_size);
//delta4 = transpose(temp)
blocksPerGride = (neural[total_layers-1]*batch_size + threadsPerBolck -1)/threadsPerBolck;
hipLaunchKernelGGL(( transpose), dim3(blocksPerGride), dim3(threadsPerBolck), 0, 0, d_delta[total_layers-1],d_temp,neural[total_layers-1],batch_size);
for(int i = total_layers - 2 ; i > 0 ; i--)
{
//delta(i) = delta(i+1)*W(i)
CUBLAS_CALL(hipblasSgemm(handle,HIPBLAS_OP_N,HIPBLAS_OP_N,batch_size,neural[i],neural[i+1], &one,
d_delta[i+1],batch_size, d_W[i],neural[i+1], &zero, d_delta[i],batch_size));
//temp = f_inv(z(i+1))
blocksPerGride = (neural[i]*batch_size + threadsPerBolck -1)/threadsPerBolck;
hipLaunchKernelGGL(( relu_inv), dim3(blocksPerGride), dim3(threadsPerBolck), 0, 0, d_temp,d_z[i],neural[i]*batch_size);
//temp1 = transpose(temp)
blocksPerGride = (neural[i]*batch_size + threadsPerBolck -1)/threadsPerBolck;
hipLaunchKernelGGL(( transpose), dim3(blocksPerGride), dim3(threadsPerBolck), 0, 0, d_temp1,d_temp,neural[i],batch_size);
//delta2 = delta(i+1).*temp1
blocksPerGride = (neural[i]*batch_size + threadsPerBolck -1)/threadsPerBolck;
hipLaunchKernelGGL(( basic_multi), dim3(blocksPerGride), dim3(threadsPerBolck), 0, 0, d_delta[i],d_temp1,d_delta[i],neural[i]*batch_size);
}
}
float MLP_basic :: get_loss_error(long batch_size)
{
float result;
float one = 1.0;
float zero = 0.0;
float number = 1.0/(neural[total_layers-1]*batch_size);
long threadsPerBolck = 1024;
long blocksPerGride = 0;
//temp = -0.5*(T*log(y) + (1-T)*log(1-y))
blocksPerGride = (neural[total_layers-1]*batch_size + threadsPerBolck -1)/threadsPerBolck;
hipLaunchKernelGGL(( loss_cross_entropy), dim3(blocksPerGride), dim3(threadsPerBolck), 0, 0, d_target,d_a[total_layers-1],d_temp,
neural[total_layers-1],batch_size);
//temp1(y,1) = temp(y,batch_size)*one_vector(batch_size,1) //
CUBLAS_CALL(hipblasSgemm(handle,HIPBLAS_OP_N,HIPBLAS_OP_N,neural[total_layers-1],1, batch_size,
&one, d_temp,neural[total_layers-1], d_one_vector, batch_size, &zero, d_temp1,neural[total_layers-1]));
//d_temp(1,1) = one_vector(1,y) * temp(y,1)
CUBLAS_CALL(hipblasSgemm(handle,HIPBLAS_OP_N,HIPBLAS_OP_N,1,1,neural[total_layers-1],
&number, d_one_vector,1, d_temp1,neural[total_layers-1], &zero, d_temp,1));
CUBLAS_CALL(hipblasGetMatrix(1,1,sizeof(float),d_temp,1,&result,1));
return result;
}
float MLP_basic :: get_accuracy(long batch_size)
{
float result;
float one = 1.0;
float zero = 0.0;
long threadsPerBolck = 1024;
long blocksPerGride = 0;
// 1 0
blocksPerGride = (batch_size + threadsPerBolck -1)/threadsPerBolck;
hipLaunchKernelGGL(( matching), dim3(blocksPerGride), dim3(threadsPerBolck), 0, 0, d_target,d_a[total_layers-1],d_temp1,
neural[total_layers-1],batch_size);
//d_temp() = temp1*one_vector //
CUBLAS_CALL(hipblasSgemm(handle,HIPBLAS_OP_N,HIPBLAS_OP_N,1,1,batch_size,
&one, d_temp1,1, d_one_vector, batch_size, &zero, d_temp,1));
CUBLAS_CALL(hipblasGetMatrix(1,1,sizeof(float),d_temp,1,&result,1));
return result/batch_size;
}
float MLP_basic :: get_sum_square_weight()
{
float result = 0.0;
float result1;
float one = 1.0;
float zero = 0.0;
long threadsPerBolck = 1024;
long blocksPerGride = 0;
for(long i = 0 ; i < total_layers-1 ; i++)
{
//temp = W.*W
blocksPerGride = (neural[i+1]*neural[i] + threadsPerBolck -1)/threadsPerBolck;
hipLaunchKernelGGL(( basic_multi), dim3(blocksPerGride), dim3(threadsPerBolck), 0, 0, d_W[i],d_W[i],d_temp,neural[i+1]*neural[i]);
//temp1 = (one_vector)^T*temp //
CUBLAS_CALL(hipblasSgemm(handle,HIPBLAS_OP_N,HIPBLAS_OP_N,1,neural[i], neural[i+1],
&one, d_one_vector,1, d_temp, neural[i+1], &zero, d_temp1,1));
//d_temp = temp1*one_vector //
CUBLAS_CALL(hipblasSgemm(handle,HIPBLAS_OP_N,HIPBLAS_OP_N,1,1,neural[i],
&one, d_temp1,1, d_one_vector,neural[i], &zero, d_temp,1));
CUBLAS_CALL(hipblasGetMatrix(1,1,sizeof(float),d_temp,1,&result1,1));
result += result1;
}
return result;
}
void MLP_basic :: update(long batch_size)
{
float one = 1.0;
float zero = 0.0;
long threadsPerBolck = 1024;
long blocksPerGride = 0;
for(int i = 0 ; i < total_layers-1 ; i++)
{
//temp = a(i)*delta(i+1)
CUBLAS_CALL(hipblasSgemm(handle,HIPBLAS_OP_N,HIPBLAS_OP_N,neural[i],neural[i+1],batch_size, &one,
d_a[i],neural[i], d_delta[i+1],batch_size, &zero, d_temp,neural[i]));
//delta_W(i) = transpose(temp)
blocksPerGride = (neural[i]*neural[i+1] + threadsPerBolck -1)/threadsPerBolck;
hipLaunchKernelGGL(( transpose), dim3(blocksPerGride), dim3(threadsPerBolck), 0, 0, d_delta_W[i],d_temp,neural[i],neural[i+1]);
//W(i) = W(i) - alpha*(delta_W(i) + ramda*W(i))
blocksPerGride = (neural[i+1]*neural[i] + threadsPerBolck -1)/threadsPerBolck;
hipLaunchKernelGGL(( weight_update), dim3(blocksPerGride), dim3(threadsPerBolck), 0, 0, d_W[i],d_delta_W[i],alpha,ramda,neural[i+1]*neural[i]);
//delta_b(i) = one_vector^T * delta(i+1)
CUBLAS_CALL(hipblasSgemm(handle,HIPBLAS_OP_N,HIPBLAS_OP_N,1,neural[i+1],batch_size, &one,
d_one_vector,1, d_delta[i+1],batch_size, &zero, d_delta_b[i],1));
//b(i) = b(i) - alpha*transpose(delta_b(i))
blocksPerGride = (neural[i+1] + threadsPerBolck -1)/threadsPerBolck;
hipLaunchKernelGGL(( bias_update), dim3(blocksPerGride), dim3(threadsPerBolck), 0, 0, d_b[i],d_delta_b[i],alpha,neural[i+1]);
}
}
void MLP_basic :: temp_print()
{
float aaa[1000000];
hipblasStatus_t stat;
int mini_batch = 10;
stat = hipblasGetMatrix(neural[total_layers-1],mini_batch,sizeof(float),d_a[total_layers-1],neural[total_layers-1],aaa,neural[total_layers-1]);
cout<<stat<<endl;
for(int y = 0 ; y < neural[total_layers-1] ; y++)
{
for(int x = 0 ; x < mini_batch ;x++)
{
printf("%1.4f ",aaa[IDX2C(y,x,neural[total_layers-1])]);
}
cout<<endl;
}
cout<<endl;
stat = hipblasGetMatrix(neural[total_layers-1],mini_batch,sizeof(float),d_target,neural[total_layers-1],aaa,neural[total_layers-1]);
cout<<stat<<endl;
for(int y = 0 ; y < neural[total_layers-1] ; y++)
{
for(int x = 0 ; x < mini_batch ;x++)
{
printf("%1.4f ",aaa[IDX2C(y,x,neural[total_layers-1])]);
}
cout<<endl;
}
cout<<endl;
/*
float aaa[1000000];
hipblasStatus_t stat;
int idx = 0;
stat = hipblasGetMatrix(neural[idx],mini_batch,sizeof(float),d_a[idx],neural[idx],aaa,neural[idx]);
cout<<stat<<endl;
for(int y = 0 ; y < neural[idx] ; y++)
{
for(int x = 0 ; x < mini_batch ;x++)
{
cout<<aaa[IDX2C(y,x,neural[idx])]<<" ";
}
cout<<endl;
}
cout<<endl;
*/
/*
float aaa[1000000];
hipblasStatus_t stat;
// hiprandStatus_t
int idx = 2;
stat = hipblasGetMatrix(neural[idx+1],neural[idx],sizeof(float),d_W[idx],neural[idx+1],aaa,neural[idx+1]);
cout<<stat<<endl;
for(int y = 0 ; y < neural[idx+1] ; y++)
{
for(int x = 0 ; x < neural[idx] ;x++)
{
cout<<aaa[IDX2C(y,x,neural[idx+1])]<<" ";
}
cout<<endl;
}
cout<<endl;
*/
}
| f69b68fe92057e11facd21f05fa9dd8f7d07aaaf.cu | #include "MLP_basic.h"
#include "my_device_function.cuh"
#define IDX2C(i,j,Id) (((j)*(Id))+(i)) // j -> row, i -> column
using namespace std;
MLP_basic:: MLP_basic()
{
total_layers = 0;
max_batch = 0;
alpha = 0;
ramda = 0;
d_target = NULL;
d_temp = NULL;
d_temp1 = NULL;
d_one_vector = NULL;
d_train_input = NULL;
d_train_target = NULL;
d_validation_input = NULL;
d_validation_target = NULL;
d_test_input = NULL;
d_test_target = NULL;
for(long i = 0 ; i < MAXIMUM_LAYERS ; i++)
{
neural[i] = 0;
W[i] = NULL;
b[i] = NULL;
d_W[i] = NULL;
d_b[i] = NULL;
d_a[i] = NULL;
d_z[i] = NULL;
d_delta[i] = NULL;
d_delta_W[i] = NULL;
d_delta_b[i] = NULL;
}
CURAND_CALL(curandCreateGenerator(&rand_gen,CURAND_RNG_PSEUDO_DEFAULT));
CUBLAS_CALL(cublasCreate(&handle));
}
MLP_basic :: ~MLP_basic()
{
if(d_target != NULL) CUDA_CALL(cudaFree(d_target));
if(d_temp != NULL) CUDA_CALL(cudaFree(d_temp));
if(d_temp1 != NULL) CUDA_CALL(cudaFree(d_temp1));
if(d_one_vector != NULL) CUDA_CALL(cudaFree(d_one_vector));
if(d_train_input != NULL) CUDA_CALL(cudaFree(d_train_input));
if(d_train_target != NULL) CUDA_CALL(cudaFree(d_train_target));
if(d_validation_input != NULL) CUDA_CALL(cudaFree(d_validation_input));
if(d_validation_target != NULL) CUDA_CALL(cudaFree(d_validation_target));
if(d_test_input != NULL) CUDA_CALL(cudaFree(d_test_input));
if(d_test_target != NULL) CUDA_CALL(cudaFree(d_test_target));
for(long i = 0 ; i < MAXIMUM_LAYERS ; i++)
{
if(W[i] != NULL) free(W[i]);
if(b[i] != NULL) free(b[i]);
if(d_W[i] != NULL) CUDA_CALL(cudaFree(d_W[i]));
if(d_b[i] != NULL) CUDA_CALL(cudaFree(d_b[i]));
if(d_a[i] != NULL) CUDA_CALL(cudaFree(d_a[i]));
if(d_z[i] != NULL) CUDA_CALL(cudaFree(d_z[i]));
if(d_delta[i] != NULL) CUDA_CALL(cudaFree(d_delta[i]));
if(d_delta_W[i] != NULL) CUDA_CALL(cudaFree(d_delta_W[i]));
if(d_delta_b[i] != NULL) CUDA_CALL(cudaFree(d_delta_b[i]));
}
CUBLAS_CALL(cublasDestroy(handle));
CURAND_CALL(curandDestroyGenerator(rand_gen));
}
void MLP_basic :: init(long *neurals,long layers,long max_batch_size,float alpha, float ramda)
{
this->total_layers = layers;
this->max_batch = max_batch_size;
this->alpha = alpha;
this->ramda = ramda;
for(long i = 0 ; i < this->total_layers ; i++)
{
this->neural[i] = neurals[i];
}
CUDA_CALL(cudaMalloc(&d_target,sizeof(float)*neural[total_layers-1]*max_batch));
CUDA_CALL(cudaMalloc(&d_a[0],sizeof(float)*neural[0]*max_batch));
CUDA_CALL(cudaMalloc(&d_train_input,sizeof(float)*neural[0]*max_batch));
CUDA_CALL(cudaMalloc(&d_train_target,sizeof(float)*neural[total_layers - 1]*max_batch));
CUDA_CALL(cudaMalloc(&d_validation_input,sizeof(float)*neural[0]*max_batch));
CUDA_CALL(cudaMalloc(&d_validation_target,sizeof(float)*neural[total_layers - 1]*max_batch));
CUDA_CALL(cudaMalloc(&d_test_input,sizeof(float)*neural[0]*max_batch));
CUDA_CALL(cudaMalloc(&d_test_target,sizeof(float)*neural[total_layers - 1]*max_batch));
long maximum = 0;
for(long i = 0 ; i < total_layers-1 ; i++)
{
W[i] = (float*)calloc(neural[i]*neural[i+1],sizeof(float));
b[i] = (float*)calloc(neural[i+1],sizeof(float));
CUDA_CALL(cudaMalloc(&d_W[i],sizeof(float)*neural[i]*neural[i+1]));
CUDA_CALL(cudaMalloc(&d_b[i],sizeof(float)*neural[i+1]));
CUDA_CALL(cudaMalloc(&d_a[i+1],sizeof(float)*neural[i+1]*max_batch));
CUDA_CALL(cudaMalloc(&d_z[i+1],sizeof(float)*neural[i+1]*max_batch));
CUDA_CALL(cudaMalloc(&d_delta[i+1],sizeof(float)*neural[i+1]*max_batch));
CUDA_CALL(cudaMalloc(&d_delta_W[i],sizeof(float)*neural[i+1]*neural[i]));
CUDA_CALL(cudaMalloc(&d_delta_b[i],sizeof(float)*neural[i+1]));
if(neural[i] > maximum) maximum = neural[i];
}
CUDA_CALL(cudaMalloc(&d_temp,sizeof(float)*maximum*max_batch)); //temp alloc
CUDA_CALL(cudaMalloc(&d_temp1,sizeof(float)*maximum*max_batch));
float *one_vector;
one_vector = (float*)calloc(max_batchi*maximum,sizeof(float));
for(long i = 0 ; i < max_batch*maximum ; i++) one_vector[i] = 1.0;
CUDA_CALL(cudaMalloc(&d_one_vector,sizeof(float)*max_batch*maximum));
CUBLAS_CALL(cublasSetMatrix(1,max_batch,sizeof(float),one_vector,1,d_one_vector,1));
free(one_vector);
}
void MLP_basic :: first_parameters_host_device()
{
for(long i = 0 ; i < total_layers -1 ; i++)
{
CUBLAS_CALL(cublasSetMatrix(neural[i+1],neural[i],sizeof(float),W[i],neural[i+1],d_W[i],neural[i+1]));
CUBLAS_CALL(cublasSetVector(neural[i+1],sizeof(float),b[i],1,d_b[i],1));
}
}
void MLP_basic :: first_random_parameter()
{
for(long i = 0 ; i < total_layers -1 ; i++)
{
CURAND_CALL(curandGenerateNormal(rand_gen,d_W[i],neural[i+1]*neural[i],PARA_MEAN,PARA_STD));
CURAND_CALL(curandGenerateNormal(rand_gen,d_b[i],neural[i+1],PARA_MEAN,PARA_STD));
}
}
void MLP_basic :: second_validation_test_set_host_device(float *validation_input, float* validation_target,
long validation_batch_size, float *test_input, float *test_target,long test_batch_size)
{
CUBLAS_CALL(cublasSetMatrix(neural[0],validation_batch_size,sizeof(float),validation_input,neural[0],
d_validation_input,neural[0]));
CUBLAS_CALL(cublasSetMatrix(neural[total_layers-1],validation_batch_size,sizeof(float),validation_target,
neural[total_layers-1],d_validation_target,neural[total_layers-1]));
CUBLAS_CALL(cublasSetMatrix(neural[0],test_batch_size,sizeof(float),test_input,neural[0],
d_test_input,neural[0]));
CUBLAS_CALL(cublasSetMatrix(neural[total_layers-1],test_batch_size,sizeof(float),test_target,
neural[total_layers-1],d_test_target,neural[total_layers-1]));
}
void MLP_basic :: third_train_set_host_device(float *train_input, float *train_target, long train_batch_size)
{
long threadsPerBolck = 1024;
long blocksPerGride = 0;
CUBLAS_CALL(cublasSetMatrix(neural[0],train_batch_size,sizeof(float),train_input,neural[0],d_train_input,neural[0]));
CUBLAS_CALL(cublasSetMatrix(neural[total_layers-1],train_batch_size,sizeof(float),train_target,neural[total_layers-1],d_train_target,neural[total_layers-1]));
blocksPerGride = (neural[0]*train_batch_size + threadsPerBolck -1)/threadsPerBolck;
deliver_front_to_rear<<<blocksPerGride, threadsPerBolck>>>(d_train_input,d_a[0],neural[0]*train_batch_size);
blocksPerGride = (neural[total_layers-1]*train_batch_size + threadsPerBolck -1)/threadsPerBolck;
deliver_front_to_rear<<<blocksPerGride, threadsPerBolck>>>(d_train_target,d_target,neural[total_layers-1]*train_batch_size);
}
void MLP_basic :: validataion_setting(long batch_size)
{
long threadsPerBolck = 1024;
long blocksPerGride = 0;
blocksPerGride = (neural[0]*batch_size + threadsPerBolck -1)/threadsPerBolck;
deliver_front_to_rear<<<blocksPerGride, threadsPerBolck>>>(d_validation_input,d_a[0],neural[0]*batch_size);
blocksPerGride = (neural[total_layers-1]*batch_size + threadsPerBolck -1)/threadsPerBolck;
deliver_front_to_rear<<<blocksPerGride, threadsPerBolck>>>(d_validation_target,d_target,neural[total_layers-1]*batch_size);
}
void MLP_basic :: test_setting(long batch_size)
{
long threadsPerBolck = 1024;
long blocksPerGride = 0;
blocksPerGride = (neural[0]*batch_size + threadsPerBolck -1)/threadsPerBolck;
deliver_front_to_rear<<<blocksPerGride, threadsPerBolck>>>(d_test_input,d_a[0],neural[0]*batch_size);
blocksPerGride = (neural[total_layers-1]*batch_size + threadsPerBolck -1)/threadsPerBolck;
deliver_front_to_rear<<<blocksPerGride, threadsPerBolck>>>(d_test_target,d_target,neural[total_layers-1]*batch_size);
}
void MLP_basic :: forward_propagation(long batch_size)
{
float one = 1.0;
float zero = 0.0;
long threadsPerBolck = 1024;
long blocksPerGride = 0;
for(int i = 0 ; i < total_layers-2; i++)
{
//z(i+1) = w(i)*a(i)
CUBLAS_CALL(cublasSgemm(handle, CUBLAS_OP_N,CUBLAS_OP_N,neural[i+1],batch_size,neural[i], &one,
d_W[i],neural[i+1], d_a[i],neural[i], &zero, d_z[i+1],neural[i+1]));
//z(i+1) = z(i+1) + b(i);
blocksPerGride = (neural[i+1]*batch_size + threadsPerBolck -1)/threadsPerBolck;
add_bias<<<blocksPerGride, threadsPerBolck>>>(d_z[i+1],d_b[i],neural[i+1],neural[i+1]*batch_size);
// z(i+1) -> batch_normalizaion -> z(i+1)
//a(i+1) = F(z(i+1))
blocksPerGride = (neural[i+1]*batch_size + threadsPerBolck -1)/threadsPerBolck;
relu<<<blocksPerGride, threadsPerBolck>>>(d_a[i+1],d_z[i+1],neural[i+1]*batch_size);
// a(i+1) -> drop out -> a(i+1)
}
//last layer
//z(last) = w(last-1)*a(last-1)
CUBLAS_CALL(cublasSgemm(handle, CUBLAS_OP_N,CUBLAS_OP_N,neural[total_layers-1],batch_size,neural[total_layers-2],
&one, d_W[total_layers-2],neural[total_layers-1], d_a[total_layers-2],neural[total_layers-2],
&zero, d_z[total_layers-1],neural[total_layers-1]));
//z(last) = z(last) + b(last-1);
blocksPerGride = (neural[total_layers-1]*batch_size + threadsPerBolck -1)/threadsPerBolck;
add_bias<<<blocksPerGride, threadsPerBolck>>>(d_z[total_layers-1],d_b[total_layers-2],
neural[total_layers-1],neural[total_layers-1]*batch_size);
//a(last) = F(z(last))
blocksPerGride = (neural[total_layers-1]*batch_size + threadsPerBolck -1)/threadsPerBolck;
sigmoid<<<blocksPerGride, threadsPerBolck>>>(d_a[total_layers-1],
d_z[total_layers-1],neural[total_layers-1]*batch_size);
}
void MLP_basic :: delta_rule(long batch_size)
{
float one = 1.0;
float zero = 0.0;
long threadsPerBolck = 1024;
long blocksPerGride = 0;
// temp = (y-T)*(2*batch_size)
blocksPerGride = (neural[total_layers-1]*batch_size + threadsPerBolck -1)/threadsPerBolck;
last_delta_before_transpose<<<blocksPerGride, threadsPerBolck>>>(d_temp,d_a[total_layers-1],
d_target,batch_size,neural[total_layers-1]*batch_size);
//delta4 = transpose(temp)
blocksPerGride = (neural[total_layers-1]*batch_size + threadsPerBolck -1)/threadsPerBolck;
transpose<<<blocksPerGride, threadsPerBolck>>>(d_delta[total_layers-1],d_temp,neural[total_layers-1],batch_size);
for(int i = total_layers - 2 ; i > 0 ; i--)
{
//delta(i) = delta(i+1)*W(i)
CUBLAS_CALL(cublasSgemm(handle,CUBLAS_OP_N,CUBLAS_OP_N,batch_size,neural[i],neural[i+1], &one,
d_delta[i+1],batch_size, d_W[i],neural[i+1], &zero, d_delta[i],batch_size));
//temp = f_inv(z(i+1))
blocksPerGride = (neural[i]*batch_size + threadsPerBolck -1)/threadsPerBolck;
relu_inv<<<blocksPerGride, threadsPerBolck>>>(d_temp,d_z[i],neural[i]*batch_size);
//temp1 = transpose(temp)
blocksPerGride = (neural[i]*batch_size + threadsPerBolck -1)/threadsPerBolck;
transpose<<<blocksPerGride, threadsPerBolck>>>(d_temp1,d_temp,neural[i],batch_size);
//delta2 = delta(i+1).*temp1
blocksPerGride = (neural[i]*batch_size + threadsPerBolck -1)/threadsPerBolck;
basic_multi<<<blocksPerGride, threadsPerBolck>>>(d_delta[i],d_temp1,d_delta[i],neural[i]*batch_size);
}
}
float MLP_basic :: get_loss_error(long batch_size)
{
float result;
float one = 1.0;
float zero = 0.0;
float number = 1.0/(neural[total_layers-1]*batch_size);
long threadsPerBolck = 1024;
long blocksPerGride = 0;
//temp = -0.5*(T*log(y) + (1-T)*log(1-y))
blocksPerGride = (neural[total_layers-1]*batch_size + threadsPerBolck -1)/threadsPerBolck;
loss_cross_entropy<<<blocksPerGride, threadsPerBolck>>>(d_target,d_a[total_layers-1],d_temp,
neural[total_layers-1],batch_size);
//temp1(y,1) = temp(y,batch_size)*one_vector(batch_size,1) // 세로끼리의 합
CUBLAS_CALL(cublasSgemm(handle,CUBLAS_OP_N,CUBLAS_OP_N,neural[total_layers-1],1, batch_size,
&one, d_temp,neural[total_layers-1], d_one_vector, batch_size, &zero, d_temp1,neural[total_layers-1]));
//d_temp(1,1) = one_vector(1,y) * temp(y,1)
CUBLAS_CALL(cublasSgemm(handle,CUBLAS_OP_N,CUBLAS_OP_N,1,1,neural[total_layers-1],
&number, d_one_vector,1, d_temp1,neural[total_layers-1], &zero, d_temp,1));
CUBLAS_CALL(cublasGetMatrix(1,1,sizeof(float),d_temp,1,&result,1));
return result;
}
float MLP_basic :: get_accuracy(long batch_size)
{
float result;
float one = 1.0;
float zero = 0.0;
long threadsPerBolck = 1024;
long blocksPerGride = 0;
// 배치를 병렬로 해서 출력값의 최대값의 인덱스와 타겟 인덱스 비교후 일치하면 1 아니면 0반환
blocksPerGride = (batch_size + threadsPerBolck -1)/threadsPerBolck;
matching<<<blocksPerGride, threadsPerBolck>>>(d_target,d_a[total_layers-1],d_temp1,
neural[total_layers-1],batch_size);
//d_temp(스칼라) = temp1*one_vector // 가로 합
CUBLAS_CALL(cublasSgemm(handle,CUBLAS_OP_N,CUBLAS_OP_N,1,1,batch_size,
&one, d_temp1,1, d_one_vector, batch_size, &zero, d_temp,1));
CUBLAS_CALL(cublasGetMatrix(1,1,sizeof(float),d_temp,1,&result,1));
return result/batch_size;
}
float MLP_basic :: get_sum_square_weight()
{
float result = 0.0;
float result1;
float one = 1.0;
float zero = 0.0;
long threadsPerBolck = 1024;
long blocksPerGride = 0;
for(long i = 0 ; i < total_layers-1 ; i++)
{
//temp = W.*W
blocksPerGride = (neural[i+1]*neural[i] + threadsPerBolck -1)/threadsPerBolck;
basic_multi<<<blocksPerGride, threadsPerBolck>>>(d_W[i],d_W[i],d_temp,neural[i+1]*neural[i]);
//temp1 = (one_vector)^T*temp // 세로끼리의 합
CUBLAS_CALL(cublasSgemm(handle,CUBLAS_OP_N,CUBLAS_OP_N,1,neural[i], neural[i+1],
&one, d_one_vector,1, d_temp, neural[i+1], &zero, d_temp1,1));
//d_temp = temp1*one_vector // 가로 합
CUBLAS_CALL(cublasSgemm(handle,CUBLAS_OP_N,CUBLAS_OP_N,1,1,neural[i],
&one, d_temp1,1, d_one_vector,neural[i], &zero, d_temp,1));
CUBLAS_CALL(cublasGetMatrix(1,1,sizeof(float),d_temp,1,&result1,1));
result += result1;
}
return result;
}
void MLP_basic :: update(long batch_size)
{
float one = 1.0;
float zero = 0.0;
long threadsPerBolck = 1024;
long blocksPerGride = 0;
for(int i = 0 ; i < total_layers-1 ; i++)
{
//temp = a(i)*delta(i+1)
CUBLAS_CALL(cublasSgemm(handle,CUBLAS_OP_N,CUBLAS_OP_N,neural[i],neural[i+1],batch_size, &one,
d_a[i],neural[i], d_delta[i+1],batch_size, &zero, d_temp,neural[i]));
//delta_W(i) = transpose(temp)
blocksPerGride = (neural[i]*neural[i+1] + threadsPerBolck -1)/threadsPerBolck;
transpose<<<blocksPerGride, threadsPerBolck>>>(d_delta_W[i],d_temp,neural[i],neural[i+1]);
//W(i) = W(i) - alpha*(delta_W(i) + ramda*W(i))
blocksPerGride = (neural[i+1]*neural[i] + threadsPerBolck -1)/threadsPerBolck;
weight_update<<<blocksPerGride, threadsPerBolck>>>(d_W[i],d_delta_W[i],alpha,ramda,neural[i+1]*neural[i]);
//delta_b(i) = one_vector^T * delta(i+1)
CUBLAS_CALL(cublasSgemm(handle,CUBLAS_OP_N,CUBLAS_OP_N,1,neural[i+1],batch_size, &one,
d_one_vector,1, d_delta[i+1],batch_size, &zero, d_delta_b[i],1));
//b(i) = b(i) - alpha*transpose(delta_b(i))
blocksPerGride = (neural[i+1] + threadsPerBolck -1)/threadsPerBolck;
bias_update<<<blocksPerGride, threadsPerBolck>>>(d_b[i],d_delta_b[i],alpha,neural[i+1]);
}
}
void MLP_basic :: temp_print()
{
float aaa[1000000];
cublasStatus_t stat;
int mini_batch = 10;
stat = cublasGetMatrix(neural[total_layers-1],mini_batch,sizeof(float),d_a[total_layers-1],neural[total_layers-1],aaa,neural[total_layers-1]);
cout<<stat<<endl;
for(int y = 0 ; y < neural[total_layers-1] ; y++)
{
for(int x = 0 ; x < mini_batch ;x++)
{
printf("%1.4f ",aaa[IDX2C(y,x,neural[total_layers-1])]);
}
cout<<endl;
}
cout<<endl;
stat = cublasGetMatrix(neural[total_layers-1],mini_batch,sizeof(float),d_target,neural[total_layers-1],aaa,neural[total_layers-1]);
cout<<stat<<endl;
for(int y = 0 ; y < neural[total_layers-1] ; y++)
{
for(int x = 0 ; x < mini_batch ;x++)
{
printf("%1.4f ",aaa[IDX2C(y,x,neural[total_layers-1])]);
}
cout<<endl;
}
cout<<endl;
/*
float aaa[1000000];
cublasStatus_t stat;
int idx = 0;
stat = cublasGetMatrix(neural[idx],mini_batch,sizeof(float),d_a[idx],neural[idx],aaa,neural[idx]);
cout<<stat<<endl;
for(int y = 0 ; y < neural[idx] ; y++)
{
for(int x = 0 ; x < mini_batch ;x++)
{
cout<<aaa[IDX2C(y,x,neural[idx])]<<" ";
}
cout<<endl;
}
cout<<endl;
*/
/*
float aaa[1000000];
cublasStatus_t stat;
// curandStatus_t
int idx = 2;
stat = cublasGetMatrix(neural[idx+1],neural[idx],sizeof(float),d_W[idx],neural[idx+1],aaa,neural[idx+1]);
cout<<stat<<endl;
for(int y = 0 ; y < neural[idx+1] ; y++)
{
for(int x = 0 ; x < neural[idx] ;x++)
{
cout<<aaa[IDX2C(y,x,neural[idx+1])]<<" ";
}
cout<<endl;
}
cout<<endl;
*/
}
|
e843c5a0c863fd568b5b98e2e314d216c5f43635.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// test cuda programming
// nvcc myAddVec.cu -o myAddVec
#include <iostream>
#include <vector>
#include <assert.h>
using namespace std;
__global__ void addVec(int* da, int* db, int* dc, int n) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if(tid < n) {
dc[tid] = da[tid] + db[tid];
}
}
int main() {
cout << "Demo: CUDA add vector" << endl;
const int n = 1000;
size_t bytes = n*sizeof(int);
vector<int> a = vector<int>(n, 1);
vector<int> b = vector<int>(n, 2);
vector<int> c = vector<int>(n, 0);
int* da;
int* db;
int* dc;
hipMalloc(&da, bytes);
hipMalloc(&db, bytes);
hipMalloc(&dc, bytes);
hipError_t err = hipSuccess;
err = hipMemcpy(da, a.data(), bytes, hipMemcpyHostToDevice);
err = hipMemcpy(db, b.data(), bytes, hipMemcpyHostToDevice);
int BlockSize = 256;
int GridSize = (n + BlockSize - 1)/BlockSize;
cout << "GridSize=" << GridSize << endl;
cout << "BlockSize=" << BlockSize << endl;
hipLaunchKernelGGL(( addVec), dim3(GridSize), dim3(BlockSize), 0, 0, da, db, dc, n);
hipDeviceSynchronize();
err = hipMemcpy(c.data(), dc, bytes, hipMemcpyDeviceToHost);
if(err == hipSuccess)
cout << "hipMemcpyDeviceToHost ok." << endl;
else
cout << err << " hipMemcpyDeviceToHost failed." << endl;
hipFree(da);
hipFree(db);
hipFree(dc);
cout << "c[0]:" << c[0] << endl;
cout << "c[100]:" << c[100] << endl;
assert(c[0] == 3);
assert(c[500] == 3);
cout << "CUDA add vector successfully!" << endl;
}
| e843c5a0c863fd568b5b98e2e314d216c5f43635.cu | // test cuda programming
// nvcc myAddVec.cu -o myAddVec
#include <iostream>
#include <vector>
#include <assert.h>
using namespace std;
__global__ void addVec(int* da, int* db, int* dc, int n) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if(tid < n) {
dc[tid] = da[tid] + db[tid];
}
}
int main() {
cout << "Demo: CUDA add vector" << endl;
const int n = 1000;
size_t bytes = n*sizeof(int);
vector<int> a = vector<int>(n, 1);
vector<int> b = vector<int>(n, 2);
vector<int> c = vector<int>(n, 0);
int* da;
int* db;
int* dc;
cudaMalloc(&da, bytes);
cudaMalloc(&db, bytes);
cudaMalloc(&dc, bytes);
cudaError_t err = cudaSuccess;
err = cudaMemcpy(da, a.data(), bytes, cudaMemcpyHostToDevice);
err = cudaMemcpy(db, b.data(), bytes, cudaMemcpyHostToDevice);
int BlockSize = 256;
int GridSize = (n + BlockSize - 1)/BlockSize;
cout << "GridSize=" << GridSize << endl;
cout << "BlockSize=" << BlockSize << endl;
addVec<<<GridSize, BlockSize>>>(da, db, dc, n);
cudaDeviceSynchronize();
err = cudaMemcpy(c.data(), dc, bytes, cudaMemcpyDeviceToHost);
if(err == cudaSuccess)
cout << "cudaMemcpyDeviceToHost ok." << endl;
else
cout << err << " cudaMemcpyDeviceToHost failed." << endl;
cudaFree(da);
cudaFree(db);
cudaFree(dc);
cout << "c[0]:" << c[0] << endl;
cout << "c[100]:" << c[100] << endl;
assert(c[0] == 3);
assert(c[500] == 3);
cout << "CUDA add vector successfully!" << endl;
}
|
1506af463ecf3f38dd7c0853996ecc6f2634e29e.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "absolute_upd_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const float4 __restrict *input = NULL;
hipMalloc(&input, XSIZE*YSIZE);
float4 __restrict *output = NULL;
hipMalloc(&output, XSIZE*YSIZE);
int elem_count = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
absolute_upd_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, input,output,elem_count);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
absolute_upd_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, input,output,elem_count);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
absolute_upd_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, input,output,elem_count);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 1506af463ecf3f38dd7c0853996ecc6f2634e29e.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "absolute_upd_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const float4 __restrict *input = NULL;
cudaMalloc(&input, XSIZE*YSIZE);
float4 __restrict *output = NULL;
cudaMalloc(&output, XSIZE*YSIZE);
int elem_count = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
absolute_upd_kernel<<<gridBlock,threadBlock>>>(input,output,elem_count);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
absolute_upd_kernel<<<gridBlock,threadBlock>>>(input,output,elem_count);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
absolute_upd_kernel<<<gridBlock,threadBlock>>>(input,output,elem_count);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
37fd650c13447a38182eecf10ba7a52cb45ad184.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <assert.h>
#include <hip/hip_runtime.h>
#include <iostream>
__global__ void square_array(double *a, int N) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx<N) a[idx] = a[idx] * a[idx];
printf("idx = %d, a = %f\n", idx, a[idx]);
}
extern "C" void someOperation() {
const int localN = 10;
double * localFloat = new double[localN];
for (int i = 0; i < localN; i++)
localFloat[i] = i;
double *a_d = new double[localN]; // initialize a_d as an array with N double pointer
size_t size = localN * sizeof(double);
hipMalloc((void **) &a_d, size);
hipMemcpy(a_d, localFloat, size, hipMemcpyHostToDevice);
int block_size = 4;
int n_blocks = localN/block_size + (localN%block_size == 0 ? 0:1);
hipLaunchKernelGGL(( square_array) , dim3(n_blocks), dim3(block_size), 0, 0, a_d, localN);
hipMemcpy(localFloat, a_d, size, hipMemcpyDeviceToHost);
hipFree(a_d);
std::cout << "Host side output: " << std::endl;
for (int i = 0; i < localN; i++)
std::cout << localFloat[i] << std::endl;
}
extern "C" void DeviceInfo(void) {
int deviceCount;
hipGetDeviceCount(&deviceCount);
if (deviceCount == 0)
printf("There is no device supporting CUDA\n");
int dev;
for (dev = 0; dev < deviceCount; ++dev) {
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, dev);
if (dev == 0) {
if (deviceProp.major == 9999 && deviceProp.minor == 9999)
printf("There is no device supporting CUDA.\n");
else if (deviceCount == 1)
printf("There is 1 device supporting CUDA\n");
else
printf("There are %d devices supporting CUDA\n", deviceCount);
}
printf("\nDevice %d: \"%s\"\n", dev, deviceProp.name);
printf(" Major revision number: %d\n",
deviceProp.major);
printf(" Minor revision number: %d\n",
deviceProp.minor);
printf(" Total amount of global memory: %u bytes\n",
deviceProp.totalGlobalMem);
#if CUDART_VERSION >= 2000
printf(" Number of multiprocessors: %d\n",
deviceProp.multiProcessorCount);
printf(" Number of cores: %d\n",
8 * deviceProp.multiProcessorCount);
#endif
printf(" Total amount of constant memory: %u bytes\n",
deviceProp.totalConstMem);
printf(" Total amount of shared memory per block: %u bytes\n",
deviceProp.sharedMemPerBlock);
printf(" Total number of registers available per block: %d\n",
deviceProp.regsPerBlock);
printf(" Warp size: %d\n",
deviceProp.warpSize);
printf(" Maximum number of threads per block: %d\n",
deviceProp.maxThreadsPerBlock);
printf(" Maximum sizes of each dimension of a block: %d x %d x %d\n",
deviceProp.maxThreadsDim[0],
deviceProp.maxThreadsDim[1],
deviceProp.maxThreadsDim[2]);
printf(" Maximum sizes of each dimension of a grid: %d x %d x %d\n",
deviceProp.maxGridSize[0],
deviceProp.maxGridSize[1],
deviceProp.maxGridSize[2]);
printf(" Maximum memory pitch: %u bytes\n",
deviceProp.memPitch);
printf(" Texture alignment: %u bytes\n",
deviceProp.textureAlignment);
printf(" Clock rate: %.2f GHz\n",
deviceProp.clockRate * 1e-6f);
#if CUDART_VERSION >= 2000
printf(" Concurrent copy and execution: %s\n",
deviceProp.deviceOverlap ? "Yes" : "No");
#endif
}
printf("\nTest PASSED\n");
}
| 37fd650c13447a38182eecf10ba7a52cb45ad184.cu | #include <stdio.h>
#include <assert.h>
#include <cuda.h>
#include <iostream>
__global__ void square_array(double *a, int N) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx<N) a[idx] = a[idx] * a[idx];
printf("idx = %d, a = %f\n", idx, a[idx]);
}
extern "C" void someOperation() {
const int localN = 10;
double * localFloat = new double[localN];
for (int i = 0; i < localN; i++)
localFloat[i] = i;
double *a_d = new double[localN]; // initialize a_d as an array with N double pointer
size_t size = localN * sizeof(double);
cudaMalloc((void **) &a_d, size);
cudaMemcpy(a_d, localFloat, size, cudaMemcpyHostToDevice);
int block_size = 4;
int n_blocks = localN/block_size + (localN%block_size == 0 ? 0:1);
square_array <<<n_blocks, block_size>>> (a_d, localN);
cudaMemcpy(localFloat, a_d, size, cudaMemcpyDeviceToHost);
cudaFree(a_d);
std::cout << "Host side output: " << std::endl;
for (int i = 0; i < localN; i++)
std::cout << localFloat[i] << std::endl;
}
extern "C" void DeviceInfo(void) {
int deviceCount;
cudaGetDeviceCount(&deviceCount);
if (deviceCount == 0)
printf("There is no device supporting CUDA\n");
int dev;
for (dev = 0; dev < deviceCount; ++dev) {
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, dev);
if (dev == 0) {
if (deviceProp.major == 9999 && deviceProp.minor == 9999)
printf("There is no device supporting CUDA.\n");
else if (deviceCount == 1)
printf("There is 1 device supporting CUDA\n");
else
printf("There are %d devices supporting CUDA\n", deviceCount);
}
printf("\nDevice %d: \"%s\"\n", dev, deviceProp.name);
printf(" Major revision number: %d\n",
deviceProp.major);
printf(" Minor revision number: %d\n",
deviceProp.minor);
printf(" Total amount of global memory: %u bytes\n",
deviceProp.totalGlobalMem);
#if CUDART_VERSION >= 2000
printf(" Number of multiprocessors: %d\n",
deviceProp.multiProcessorCount);
printf(" Number of cores: %d\n",
8 * deviceProp.multiProcessorCount);
#endif
printf(" Total amount of constant memory: %u bytes\n",
deviceProp.totalConstMem);
printf(" Total amount of shared memory per block: %u bytes\n",
deviceProp.sharedMemPerBlock);
printf(" Total number of registers available per block: %d\n",
deviceProp.regsPerBlock);
printf(" Warp size: %d\n",
deviceProp.warpSize);
printf(" Maximum number of threads per block: %d\n",
deviceProp.maxThreadsPerBlock);
printf(" Maximum sizes of each dimension of a block: %d x %d x %d\n",
deviceProp.maxThreadsDim[0],
deviceProp.maxThreadsDim[1],
deviceProp.maxThreadsDim[2]);
printf(" Maximum sizes of each dimension of a grid: %d x %d x %d\n",
deviceProp.maxGridSize[0],
deviceProp.maxGridSize[1],
deviceProp.maxGridSize[2]);
printf(" Maximum memory pitch: %u bytes\n",
deviceProp.memPitch);
printf(" Texture alignment: %u bytes\n",
deviceProp.textureAlignment);
printf(" Clock rate: %.2f GHz\n",
deviceProp.clockRate * 1e-6f);
#if CUDART_VERSION >= 2000
printf(" Concurrent copy and execution: %s\n",
deviceProp.deviceOverlap ? "Yes" : "No");
#endif
}
printf("\nTest PASSED\n");
}
|
9bfaaa0a98452034725808cba00ac32eacefab3e.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <cstdlib>
#include <device_launch_parameters.h>
#include <hip/hip_runtime.h>
__global__
void initWith(float num, float *a, int N)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < N; i += stride)
{
a[i] = num;
}
}
__global__
void addVectorsInto(float *result, float *a, float *b, int N)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < N; i += stride)
{
result[i] = a[i] + b[i];
}
}
void checkElementsAre(float target, float *vector, int N)
{
for (int i = 0; i < N; i++)
{
if (vector[i] != target)
{
printf("FAIL: vector[%d] - %0.0f does not equal %0.0f\n", i, vector[i], target);
exit(1);
}
}
printf("Success! All values calculated correctly.\n");
}
int main()
{
int deviceId;
int numberOfSMs;
hipGetDevice(&deviceId);
hipDeviceGetAttribute(&numberOfSMs, hipDeviceAttributeMultiprocessorCount, deviceId);
const int N = 2 << 24;
size_t size = N * sizeof(float);
float *a;
float *b;
float *c;
hipMallocManaged(&a, size);
hipMallocManaged(&b, size);
hipMallocManaged(&c, size);
hipMemPrefetchAsync(a, size, deviceId);
hipMemPrefetchAsync(b, size, deviceId);
hipMemPrefetchAsync(c, size, deviceId);
int threadsPerBlock;
int numberOfBlocks;
threadsPerBlock = 256;
numberOfBlocks = 32 * numberOfSMs;
hipError_t addVectorsErr;
hipError_t asyncErr;
initWith << <numberOfBlocks, threadsPerBlock >> > (3, a, N);
initWith << <numberOfBlocks, threadsPerBlock >> > (4, b, N);
initWith << <numberOfBlocks, threadsPerBlock >> > (0, c, N);
addVectorsInto << <numberOfBlocks, threadsPerBlock >> > (c, a, b, N);
addVectorsErr = hipGetLastError();
if (addVectorsErr != hipSuccess) printf("Error: %s\n", hipGetErrorString(addVectorsErr));
asyncErr = hipDeviceSynchronize();
if (asyncErr != hipSuccess) printf("Error: %s\n", hipGetErrorString(asyncErr));
checkElementsAre(7, c, N);
hipFree(a);
hipFree(b);
hipFree(c);
}
| 9bfaaa0a98452034725808cba00ac32eacefab3e.cu | #include <stdio.h>
#include <cstdlib>
#include <device_launch_parameters.h>
#include <cuda_runtime.h>
__global__
void initWith(float num, float *a, int N)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < N; i += stride)
{
a[i] = num;
}
}
__global__
void addVectorsInto(float *result, float *a, float *b, int N)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < N; i += stride)
{
result[i] = a[i] + b[i];
}
}
void checkElementsAre(float target, float *vector, int N)
{
for (int i = 0; i < N; i++)
{
if (vector[i] != target)
{
printf("FAIL: vector[%d] - %0.0f does not equal %0.0f\n", i, vector[i], target);
exit(1);
}
}
printf("Success! All values calculated correctly.\n");
}
int main()
{
int deviceId;
int numberOfSMs;
cudaGetDevice(&deviceId);
cudaDeviceGetAttribute(&numberOfSMs, cudaDevAttrMultiProcessorCount, deviceId);
const int N = 2 << 24;
size_t size = N * sizeof(float);
float *a;
float *b;
float *c;
cudaMallocManaged(&a, size);
cudaMallocManaged(&b, size);
cudaMallocManaged(&c, size);
cudaMemPrefetchAsync(a, size, deviceId);
cudaMemPrefetchAsync(b, size, deviceId);
cudaMemPrefetchAsync(c, size, deviceId);
int threadsPerBlock;
int numberOfBlocks;
threadsPerBlock = 256;
numberOfBlocks = 32 * numberOfSMs;
cudaError_t addVectorsErr;
cudaError_t asyncErr;
initWith << <numberOfBlocks, threadsPerBlock >> > (3, a, N);
initWith << <numberOfBlocks, threadsPerBlock >> > (4, b, N);
initWith << <numberOfBlocks, threadsPerBlock >> > (0, c, N);
addVectorsInto << <numberOfBlocks, threadsPerBlock >> > (c, a, b, N);
addVectorsErr = cudaGetLastError();
if (addVectorsErr != cudaSuccess) printf("Error: %s\n", cudaGetErrorString(addVectorsErr));
asyncErr = cudaDeviceSynchronize();
if (asyncErr != cudaSuccess) printf("Error: %s\n", cudaGetErrorString(asyncErr));
checkElementsAre(7, c, N);
cudaFree(a);
cudaFree(b);
cudaFree(c);
}
|
1a147d7ac40a7f78c8bb3cbc4ba7847aacd0cddf.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2009, Jiri Matela
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <unistd.h>
#include <error.h>
#include <stdio.h>
#include <stdlib.h>
#include <fcntl.h>
#include <errno.h>
#include <string.h>
#include <assert.h>
#include <sys/time.h>
#include <getopt.h>
#include "common.h"
#include "components.h"
#include "dwt.h"
struct dwt {
char * srcFilename;
char * outFilename;
unsigned char *srcImg;
int pixWidth;
int pixHeight;
int components;
int dwtLvls;
};
int getImg(char * srcFilename, unsigned char *srcImg, int inputSize)
{
// printf("Loading ipnput: %s\n", srcFilename);
char *path = "";
char *newSrc = NULL;
if((newSrc = (char *)malloc(strlen(srcFilename)+strlen(path)+1)) != NULL)
{
newSrc[0] = '\0';
strcat(newSrc, path);
strcat(newSrc, srcFilename);
srcFilename= newSrc;
}
printf("Loading ipnput: %s\n", srcFilename);
//srcFilename = strcat("../../data/dwt2d/",srcFilename);
//read image
int i = open(srcFilename, O_RDONLY, 0644);
if (i == -1) {
error(0,errno,"cannot access %s", srcFilename);
return -1;
}
int ret = read(i, srcImg, inputSize);
printf("precteno %d, inputsize %d\n", ret, inputSize);
close(i);
return 0;
}
void usage() {
printf("dwt [otpions] src_img.rgb <out_img.dwt>\n\
-d, --dimension\t\tdimensions of src img, e.g. 1920x1080\n\
-c, --components\t\tnumber of color components, default 3\n\
-b, --depth\t\t\tbit depth, default 8\n\
-l, --level\t\t\tDWT level, default 3\n\
-D, --device\t\t\tcuda device\n\
-f, --forward\t\t\tforward transform\n\
-r, --reverse\t\t\treverse transform\n\
-9, --97\t\t\t9/7 transform\n\
-5, --53\t\t\t5/3 transform\n\
-w --write-visual\t\twrite output in visual (tiled) fashion instead of the linear\n");
}
template <typename T>
void processDWT(struct dwt *d, int forward, int writeVisual)
{
int componentSize = d->pixWidth*d->pixHeight*sizeof(T);
T *c_r_out, *backup ;
hipMalloc((void**)&c_r_out, componentSize); //< aligned component size
cudaCheckError("Alloc device memory");
hipMemset(c_r_out, 0, componentSize);
cudaCheckError("Memset device memory");
hipMalloc((void**)&backup, componentSize); //< aligned component size
cudaCheckError("Alloc device memory");
hipMemset(backup, 0, componentSize);
cudaCheckError("Memset device memory");
if (d->components == 3) {
/* Alloc two more buffers for G and B */
T *c_g_out, *c_b_out;
hipMalloc((void**)&c_g_out, componentSize); //< aligned component size
cudaCheckError("Alloc device memory");
hipMemset(c_g_out, 0, componentSize);
cudaCheckError("Memset device memory");
hipMalloc((void**)&c_b_out, componentSize); //< aligned component size
cudaCheckError("Alloc device memory");
hipMemset(c_b_out, 0, componentSize);
cudaCheckError("Memset device memory");
/* Load components */
T *c_r, *c_g, *c_b;
hipMalloc((void**)&c_r, componentSize); //< R, aligned component size
cudaCheckError("Alloc device memory");
hipMemset(c_r, 0, componentSize);
cudaCheckError("Memset device memory");
hipMalloc((void**)&c_g, componentSize); //< G, aligned component size
cudaCheckError("Alloc device memory");
hipMemset(c_g, 0, componentSize);
cudaCheckError("Memset device memory");
hipMalloc((void**)&c_b, componentSize); //< B, aligned component size
cudaCheckError("Alloc device memory");
hipMemset(c_b, 0, componentSize);
cudaCheckError("Memset device memory");
rgbToComponents(c_r, c_g, c_b, d->srcImg, d->pixWidth, d->pixHeight);
/* Compute DWT and always store into file */
nStage2dDWT(c_r, c_r_out, backup, d->pixWidth, d->pixHeight, d->dwtLvls, forward);
nStage2dDWT(c_g, c_g_out, backup, d->pixWidth, d->pixHeight, d->dwtLvls, forward);
nStage2dDWT(c_b, c_b_out, backup, d->pixWidth, d->pixHeight, d->dwtLvls, forward);
// -------test----------
// T *h_r_out=(T*)malloc(componentSize);
// hipMemcpy(h_r_out, c_g_out, componentSize, hipMemcpyDeviceToHost);
// int ii;
// for(ii=0;ii<componentSize/sizeof(T);ii++) {
// fprintf(stderr, "%d ", h_r_out[ii]);
// if((ii+1) % (d->pixWidth) == 0) fprintf(stderr, "\n");
// }
// -------test----------
/* Store DWT to file */
#ifdef OUTPUT
if (writeVisual) {
writeNStage2DDWT(c_r_out, d->pixWidth, d->pixHeight, d->dwtLvls, d->outFilename, ".r");
writeNStage2DDWT(c_g_out, d->pixWidth, d->pixHeight, d->dwtLvls, d->outFilename, ".g");
writeNStage2DDWT(c_b_out, d->pixWidth, d->pixHeight, d->dwtLvls, d->outFilename, ".b");
} else {
writeLinear(c_r_out, d->pixWidth, d->pixHeight, d->outFilename, ".r");
writeLinear(c_g_out, d->pixWidth, d->pixHeight, d->outFilename, ".g");
writeLinear(c_b_out, d->pixWidth, d->pixHeight, d->outFilename, ".b");
}
#endif
hipFree(c_r);
cudaCheckError("Cuda free");
hipFree(c_g);
cudaCheckError("Cuda free");
hipFree(c_b);
cudaCheckError("Cuda free");
hipFree(c_g_out);
cudaCheckError("Cuda free");
hipFree(c_b_out);
cudaCheckError("Cuda free");
}
else if (d->components == 1) {
//Load component
T *c_r;
hipMalloc((void**)&(c_r), componentSize); //< R, aligned component size
cudaCheckError("Alloc device memory");
hipMemset(c_r, 0, componentSize);
cudaCheckError("Memset device memory");
bwToComponent(c_r, d->srcImg, d->pixWidth, d->pixHeight);
// Compute DWT
nStage2dDWT(c_r, c_r_out, backup, d->pixWidth, d->pixHeight, d->dwtLvls, forward);
// Store DWT to file
// #ifdef OUTPUT
if (writeVisual) {
writeNStage2DDWT(c_r_out, d->pixWidth, d->pixHeight, d->dwtLvls, d->outFilename, ".out");
} else {
writeLinear(c_r_out, d->pixWidth, d->pixHeight, d->outFilename, ".lin.out");
}
// #endif
hipFree(c_r);
cudaCheckError("Cuda free");
}
hipFree(c_r_out);
cudaCheckError("Cuda free device");
hipFree(backup);
cudaCheckError("Cuda free device");
}
int main(int argc, char **argv)
{
int optindex = 0;
char ch;
struct option longopts[] = {
{"dimension", required_argument, 0, 'd'}, //dimensions of src img
{"components", required_argument, 0, 'c'}, //numger of components of src img
{"depth", required_argument, 0, 'b'}, //bit depth of src img
{"level", required_argument, 0, 'l'}, //level of dwt
{"device", required_argument, 0, 'D'}, //cuda device
{"forward", no_argument, 0, 'f'}, //forward transform
{"reverse", no_argument, 0, 'r'}, //reverse transform
{"97", no_argument, 0, '9'}, //9/7 transform
{"53", no_argument, 0, '5' }, //5/3transform
{"write-visual",no_argument, 0, 'w' }, //write output (subbands) in visual (tiled) order instead of linear
{"help", no_argument, 0, 'h'}
};
int pixWidth = 0; //<real pixWidth
int pixHeight = 0; //<real pixHeight
int compCount = 3; //number of components; 3 for RGB or YUV, 4 for RGBA
int bitDepth = 8;
int dwtLvls = 3; //default numuber of DWT levels
int device = 0;
int forward = 1; //forward transform
int dwt97 = 1; //1=dwt9/7, 0=dwt5/3 transform
int writeVisual = 0; //write output (subbands) in visual (tiled) order instead of linear
char * pos;
while ((ch = getopt_long(argc, argv, "d:c:b:l:D:fr95wh", longopts, &optindex)) != -1) {
switch (ch) {
case 'd':
pixWidth = atoi(optarg);
pos = strstr(optarg, "x");
if (pos == NULL || pixWidth == 0 || (strlen(pos) >= strlen(optarg))) {
usage();
return -1;
}
pixHeight = atoi(pos+1);
break;
case 'c':
compCount = atoi(optarg);
break;
case 'b':
bitDepth = atoi(optarg);
break;
case 'l':
dwtLvls = atoi(optarg);
break;
case 'D':
device = atoi(optarg);
break;
case 'f':
forward = 1;
break;
case 'r':
forward = 0;
break;
case '9':
dwt97 = 1;
break;
case '5':
dwt97 = 0;
break;
case 'w':
writeVisual = 1;
break;
case 'h':
usage();
return 0;
case '?':
return -1;
default :
usage();
return -1;
}
}
argc -= optind;
argv += optind;
if (argc == 0) { // at least one filename is expected
printf("Please supply src file name\n");
usage();
return -1;
}
if (pixWidth <= 0 || pixHeight <=0) {
printf("Wrong or missing dimensions\n");
usage();
return -1;
}
if (forward == 0) {
writeVisual = 0; //do not write visual when RDWT
}
// device init
int devCount;
hipGetDeviceCount(&devCount);
cudaCheckError("Get device count");
if (devCount == 0) {
printf("No CUDA enabled device\n");
return -1;
}
if (device < 0 || device > devCount -1) {
printf("Selected device %d is out of bound. Devices on your system are in range %d - %d\n",
device, 0, devCount -1);
return -1;
}
hipDeviceProp_t devProp;
hipGetDeviceProperties(&devProp, device);
cudaCheckError("Get device properties");
if (devProp.major < 1) {
printf("Device %d does not support CUDA\n", device);
return -1;
}
printf("Using device %d: %s\n", device, devProp.name);
hipSetDevice(device);
cudaCheckError("Set selected device");
struct dwt *d;
d = (struct dwt *)malloc(sizeof(struct dwt));
d->srcImg = NULL;
d->pixWidth = pixWidth;
d->pixHeight = pixHeight;
d->components = compCount;
d->dwtLvls = dwtLvls;
// file names
d->srcFilename = (char *)malloc(strlen(argv[0]));
strcpy(d->srcFilename, argv[0]);
if (argc == 1) { // only one filename supplyed
d->outFilename = (char *)malloc(strlen(d->srcFilename)+4);
strcpy(d->outFilename, d->srcFilename);
strcpy(d->outFilename+strlen(d->srcFilename), ".dwt");
} else {
d->outFilename = strdup(argv[1]);
}
//Input review
printf("Source file:\t\t%s\n", d->srcFilename);
printf(" Dimensions:\t\t%dx%d\n", pixWidth, pixHeight);
printf(" Components count:\t%d\n", compCount);
printf(" Bit depth:\t\t%d\n", bitDepth);
printf(" DWT levels:\t\t%d\n", dwtLvls);
printf(" Forward transform:\t%d\n", forward);
printf(" 9/7 transform:\t\t%d\n", dwt97);
//data sizes
int inputSize = pixWidth*pixHeight*compCount; //<amount of data (in bytes) to proccess
//load img source image
hipHostMalloc((void **)&d->srcImg, inputSize);
cudaCheckError("Alloc host memory");
if (getImg(d->srcFilename, d->srcImg, inputSize) == -1)
return -1;
/* DWT */
if (forward == 1) {
if(dwt97 == 1 )
processDWT<float>(d, forward, writeVisual);
else // 5/3
processDWT<int>(d, forward, writeVisual);
}
else { // reverse
if(dwt97 == 1 )
processDWT<float>(d, forward, writeVisual);
else // 5/3
processDWT<int>(d, forward, writeVisual);
}
//writeComponent(r_cuda, pixWidth, pixHeight, srcFilename, ".g");
//writeComponent(g_wave_cuda, 512000, ".g");
//writeComponent(g_cuda, componentSize, ".g");
//writeComponent(b_wave_cuda, componentSize, ".b");
hipHostFree(d->srcImg);
cudaCheckError("Cuda free host");
return 0;
}
| 1a147d7ac40a7f78c8bb3cbc4ba7847aacd0cddf.cu | /*
* Copyright (c) 2009, Jiri Matela
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <unistd.h>
#include <error.h>
#include <stdio.h>
#include <stdlib.h>
#include <fcntl.h>
#include <errno.h>
#include <string.h>
#include <assert.h>
#include <sys/time.h>
#include <getopt.h>
#include "common.h"
#include "components.h"
#include "dwt.h"
struct dwt {
char * srcFilename;
char * outFilename;
unsigned char *srcImg;
int pixWidth;
int pixHeight;
int components;
int dwtLvls;
};
int getImg(char * srcFilename, unsigned char *srcImg, int inputSize)
{
// printf("Loading ipnput: %s\n", srcFilename);
char *path = "";
char *newSrc = NULL;
if((newSrc = (char *)malloc(strlen(srcFilename)+strlen(path)+1)) != NULL)
{
newSrc[0] = '\0';
strcat(newSrc, path);
strcat(newSrc, srcFilename);
srcFilename= newSrc;
}
printf("Loading ipnput: %s\n", srcFilename);
//srcFilename = strcat("../../data/dwt2d/",srcFilename);
//read image
int i = open(srcFilename, O_RDONLY, 0644);
if (i == -1) {
error(0,errno,"cannot access %s", srcFilename);
return -1;
}
int ret = read(i, srcImg, inputSize);
printf("precteno %d, inputsize %d\n", ret, inputSize);
close(i);
return 0;
}
void usage() {
printf("dwt [otpions] src_img.rgb <out_img.dwt>\n\
-d, --dimension\t\tdimensions of src img, e.g. 1920x1080\n\
-c, --components\t\tnumber of color components, default 3\n\
-b, --depth\t\t\tbit depth, default 8\n\
-l, --level\t\t\tDWT level, default 3\n\
-D, --device\t\t\tcuda device\n\
-f, --forward\t\t\tforward transform\n\
-r, --reverse\t\t\treverse transform\n\
-9, --97\t\t\t9/7 transform\n\
-5, --53\t\t\t5/3 transform\n\
-w --write-visual\t\twrite output in visual (tiled) fashion instead of the linear\n");
}
template <typename T>
void processDWT(struct dwt *d, int forward, int writeVisual)
{
int componentSize = d->pixWidth*d->pixHeight*sizeof(T);
T *c_r_out, *backup ;
cudaMalloc((void**)&c_r_out, componentSize); //< aligned component size
cudaCheckError("Alloc device memory");
cudaMemset(c_r_out, 0, componentSize);
cudaCheckError("Memset device memory");
cudaMalloc((void**)&backup, componentSize); //< aligned component size
cudaCheckError("Alloc device memory");
cudaMemset(backup, 0, componentSize);
cudaCheckError("Memset device memory");
if (d->components == 3) {
/* Alloc two more buffers for G and B */
T *c_g_out, *c_b_out;
cudaMalloc((void**)&c_g_out, componentSize); //< aligned component size
cudaCheckError("Alloc device memory");
cudaMemset(c_g_out, 0, componentSize);
cudaCheckError("Memset device memory");
cudaMalloc((void**)&c_b_out, componentSize); //< aligned component size
cudaCheckError("Alloc device memory");
cudaMemset(c_b_out, 0, componentSize);
cudaCheckError("Memset device memory");
/* Load components */
T *c_r, *c_g, *c_b;
cudaMalloc((void**)&c_r, componentSize); //< R, aligned component size
cudaCheckError("Alloc device memory");
cudaMemset(c_r, 0, componentSize);
cudaCheckError("Memset device memory");
cudaMalloc((void**)&c_g, componentSize); //< G, aligned component size
cudaCheckError("Alloc device memory");
cudaMemset(c_g, 0, componentSize);
cudaCheckError("Memset device memory");
cudaMalloc((void**)&c_b, componentSize); //< B, aligned component size
cudaCheckError("Alloc device memory");
cudaMemset(c_b, 0, componentSize);
cudaCheckError("Memset device memory");
rgbToComponents(c_r, c_g, c_b, d->srcImg, d->pixWidth, d->pixHeight);
/* Compute DWT and always store into file */
nStage2dDWT(c_r, c_r_out, backup, d->pixWidth, d->pixHeight, d->dwtLvls, forward);
nStage2dDWT(c_g, c_g_out, backup, d->pixWidth, d->pixHeight, d->dwtLvls, forward);
nStage2dDWT(c_b, c_b_out, backup, d->pixWidth, d->pixHeight, d->dwtLvls, forward);
// -------test----------
// T *h_r_out=(T*)malloc(componentSize);
// cudaMemcpy(h_r_out, c_g_out, componentSize, cudaMemcpyDeviceToHost);
// int ii;
// for(ii=0;ii<componentSize/sizeof(T);ii++) {
// fprintf(stderr, "%d ", h_r_out[ii]);
// if((ii+1) % (d->pixWidth) == 0) fprintf(stderr, "\n");
// }
// -------test----------
/* Store DWT to file */
#ifdef OUTPUT
if (writeVisual) {
writeNStage2DDWT(c_r_out, d->pixWidth, d->pixHeight, d->dwtLvls, d->outFilename, ".r");
writeNStage2DDWT(c_g_out, d->pixWidth, d->pixHeight, d->dwtLvls, d->outFilename, ".g");
writeNStage2DDWT(c_b_out, d->pixWidth, d->pixHeight, d->dwtLvls, d->outFilename, ".b");
} else {
writeLinear(c_r_out, d->pixWidth, d->pixHeight, d->outFilename, ".r");
writeLinear(c_g_out, d->pixWidth, d->pixHeight, d->outFilename, ".g");
writeLinear(c_b_out, d->pixWidth, d->pixHeight, d->outFilename, ".b");
}
#endif
cudaFree(c_r);
cudaCheckError("Cuda free");
cudaFree(c_g);
cudaCheckError("Cuda free");
cudaFree(c_b);
cudaCheckError("Cuda free");
cudaFree(c_g_out);
cudaCheckError("Cuda free");
cudaFree(c_b_out);
cudaCheckError("Cuda free");
}
else if (d->components == 1) {
//Load component
T *c_r;
cudaMalloc((void**)&(c_r), componentSize); //< R, aligned component size
cudaCheckError("Alloc device memory");
cudaMemset(c_r, 0, componentSize);
cudaCheckError("Memset device memory");
bwToComponent(c_r, d->srcImg, d->pixWidth, d->pixHeight);
// Compute DWT
nStage2dDWT(c_r, c_r_out, backup, d->pixWidth, d->pixHeight, d->dwtLvls, forward);
// Store DWT to file
// #ifdef OUTPUT
if (writeVisual) {
writeNStage2DDWT(c_r_out, d->pixWidth, d->pixHeight, d->dwtLvls, d->outFilename, ".out");
} else {
writeLinear(c_r_out, d->pixWidth, d->pixHeight, d->outFilename, ".lin.out");
}
// #endif
cudaFree(c_r);
cudaCheckError("Cuda free");
}
cudaFree(c_r_out);
cudaCheckError("Cuda free device");
cudaFree(backup);
cudaCheckError("Cuda free device");
}
int main(int argc, char **argv)
{
int optindex = 0;
char ch;
struct option longopts[] = {
{"dimension", required_argument, 0, 'd'}, //dimensions of src img
{"components", required_argument, 0, 'c'}, //numger of components of src img
{"depth", required_argument, 0, 'b'}, //bit depth of src img
{"level", required_argument, 0, 'l'}, //level of dwt
{"device", required_argument, 0, 'D'}, //cuda device
{"forward", no_argument, 0, 'f'}, //forward transform
{"reverse", no_argument, 0, 'r'}, //reverse transform
{"97", no_argument, 0, '9'}, //9/7 transform
{"53", no_argument, 0, '5' }, //5/3transform
{"write-visual",no_argument, 0, 'w' }, //write output (subbands) in visual (tiled) order instead of linear
{"help", no_argument, 0, 'h'}
};
int pixWidth = 0; //<real pixWidth
int pixHeight = 0; //<real pixHeight
int compCount = 3; //number of components; 3 for RGB or YUV, 4 for RGBA
int bitDepth = 8;
int dwtLvls = 3; //default numuber of DWT levels
int device = 0;
int forward = 1; //forward transform
int dwt97 = 1; //1=dwt9/7, 0=dwt5/3 transform
int writeVisual = 0; //write output (subbands) in visual (tiled) order instead of linear
char * pos;
while ((ch = getopt_long(argc, argv, "d:c:b:l:D:fr95wh", longopts, &optindex)) != -1) {
switch (ch) {
case 'd':
pixWidth = atoi(optarg);
pos = strstr(optarg, "x");
if (pos == NULL || pixWidth == 0 || (strlen(pos) >= strlen(optarg))) {
usage();
return -1;
}
pixHeight = atoi(pos+1);
break;
case 'c':
compCount = atoi(optarg);
break;
case 'b':
bitDepth = atoi(optarg);
break;
case 'l':
dwtLvls = atoi(optarg);
break;
case 'D':
device = atoi(optarg);
break;
case 'f':
forward = 1;
break;
case 'r':
forward = 0;
break;
case '9':
dwt97 = 1;
break;
case '5':
dwt97 = 0;
break;
case 'w':
writeVisual = 1;
break;
case 'h':
usage();
return 0;
case '?':
return -1;
default :
usage();
return -1;
}
}
argc -= optind;
argv += optind;
if (argc == 0) { // at least one filename is expected
printf("Please supply src file name\n");
usage();
return -1;
}
if (pixWidth <= 0 || pixHeight <=0) {
printf("Wrong or missing dimensions\n");
usage();
return -1;
}
if (forward == 0) {
writeVisual = 0; //do not write visual when RDWT
}
// device init
int devCount;
cudaGetDeviceCount(&devCount);
cudaCheckError("Get device count");
if (devCount == 0) {
printf("No CUDA enabled device\n");
return -1;
}
if (device < 0 || device > devCount -1) {
printf("Selected device %d is out of bound. Devices on your system are in range %d - %d\n",
device, 0, devCount -1);
return -1;
}
cudaDeviceProp devProp;
cudaGetDeviceProperties(&devProp, device);
cudaCheckError("Get device properties");
if (devProp.major < 1) {
printf("Device %d does not support CUDA\n", device);
return -1;
}
printf("Using device %d: %s\n", device, devProp.name);
cudaSetDevice(device);
cudaCheckError("Set selected device");
struct dwt *d;
d = (struct dwt *)malloc(sizeof(struct dwt));
d->srcImg = NULL;
d->pixWidth = pixWidth;
d->pixHeight = pixHeight;
d->components = compCount;
d->dwtLvls = dwtLvls;
// file names
d->srcFilename = (char *)malloc(strlen(argv[0]));
strcpy(d->srcFilename, argv[0]);
if (argc == 1) { // only one filename supplyed
d->outFilename = (char *)malloc(strlen(d->srcFilename)+4);
strcpy(d->outFilename, d->srcFilename);
strcpy(d->outFilename+strlen(d->srcFilename), ".dwt");
} else {
d->outFilename = strdup(argv[1]);
}
//Input review
printf("Source file:\t\t%s\n", d->srcFilename);
printf(" Dimensions:\t\t%dx%d\n", pixWidth, pixHeight);
printf(" Components count:\t%d\n", compCount);
printf(" Bit depth:\t\t%d\n", bitDepth);
printf(" DWT levels:\t\t%d\n", dwtLvls);
printf(" Forward transform:\t%d\n", forward);
printf(" 9/7 transform:\t\t%d\n", dwt97);
//data sizes
int inputSize = pixWidth*pixHeight*compCount; //<amount of data (in bytes) to proccess
//load img source image
cudaMallocHost((void **)&d->srcImg, inputSize);
cudaCheckError("Alloc host memory");
if (getImg(d->srcFilename, d->srcImg, inputSize) == -1)
return -1;
/* DWT */
if (forward == 1) {
if(dwt97 == 1 )
processDWT<float>(d, forward, writeVisual);
else // 5/3
processDWT<int>(d, forward, writeVisual);
}
else { // reverse
if(dwt97 == 1 )
processDWT<float>(d, forward, writeVisual);
else // 5/3
processDWT<int>(d, forward, writeVisual);
}
//writeComponent(r_cuda, pixWidth, pixHeight, srcFilename, ".g");
//writeComponent(g_wave_cuda, 512000, ".g");
//writeComponent(g_cuda, componentSize, ".g");
//writeComponent(b_wave_cuda, componentSize, ".b");
cudaFreeHost(d->srcImg);
cudaCheckError("Cuda free host");
return 0;
}
|
de1de39b326e05e43b17ed40e82d2315d53d194c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "sigmoid_activation.hh"
#include "../nn_utils/nn_exception.hh"
#include <iostream>
__device__ float sigmoid(float x) {
return 1.0f / (((exp(x) + exp(-x))/2) * ((exp(x) + exp(-x)) / 2));
}
__global__ void sigmoidActivationForward(float* Z, float* A,
int Z_x_dim, int Z_y_dim) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < Z_x_dim * Z_y_dim) {
A[index] = sigmoid(Z[index]);
}
}
__global__ void sigmoidActivationBackprop(float* Z, float* dA, float* dZ,
int Z_x_dim, int Z_y_dim) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < Z_x_dim * Z_y_dim) {
dZ[index] = dA[index] * sigmoid(Z[index]) * (1 - sigmoid(Z[index]));
}
}
SigmoidActivation::SigmoidActivation(std::string name) {
this->name = name;
}
SigmoidActivation::~SigmoidActivation()
{ }
Matrix& SigmoidActivation::forward(Matrix& Z) {
this->Z = Z;
A.allocateMemoryIfNotAllocated(Z.shape);
dim3 block_size(256);
dim3 num_of_blocks((Z.shape.y * Z.shape.x + block_size.x - 1) / block_size.x);
hipLaunchKernelGGL(( sigmoidActivationForward), dim3(num_of_blocks), dim3(block_size), 0, 0, Z.data_device.get(), A.data_device.get(),
Z.shape.x, Z.shape.y);
NNException::throwIfDeviceErrorsOccurred("Cannot perform sigmoid forward propagation.");
return A;
}
Matrix& SigmoidActivation::backprop(Matrix& dA, float learning_rate) {
dZ.allocateMemoryIfNotAllocated(Z.shape);
dim3 block_size(256);
dim3 num_of_blocks((Z.shape.y * Z.shape.x + block_size.x - 1) / block_size.x);
hipLaunchKernelGGL(( sigmoidActivationBackprop), dim3(num_of_blocks), dim3(block_size), 0, 0, Z.data_device.get(), dA.data_device.get(),
dZ.data_device.get(),
Z.shape.x, Z.shape.y);
NNException::throwIfDeviceErrorsOccurred("Cannot perform sigmoid back propagation");
return dZ;
}
void SigmoidActivation::initializeWeight(Matrix W_input) {
return;
}
void SigmoidActivation::initializeBias(Matrix b_input){
return;
} | de1de39b326e05e43b17ed40e82d2315d53d194c.cu | #include "sigmoid_activation.hh"
#include "../nn_utils/nn_exception.hh"
#include <iostream>
__device__ float sigmoid(float x) {
return 1.0f / (((exp(x) + exp(-x))/2) * ((exp(x) + exp(-x)) / 2));
}
__global__ void sigmoidActivationForward(float* Z, float* A,
int Z_x_dim, int Z_y_dim) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < Z_x_dim * Z_y_dim) {
A[index] = sigmoid(Z[index]);
}
}
__global__ void sigmoidActivationBackprop(float* Z, float* dA, float* dZ,
int Z_x_dim, int Z_y_dim) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < Z_x_dim * Z_y_dim) {
dZ[index] = dA[index] * sigmoid(Z[index]) * (1 - sigmoid(Z[index]));
}
}
SigmoidActivation::SigmoidActivation(std::string name) {
this->name = name;
}
SigmoidActivation::~SigmoidActivation()
{ }
Matrix& SigmoidActivation::forward(Matrix& Z) {
this->Z = Z;
A.allocateMemoryIfNotAllocated(Z.shape);
dim3 block_size(256);
dim3 num_of_blocks((Z.shape.y * Z.shape.x + block_size.x - 1) / block_size.x);
sigmoidActivationForward<<<num_of_blocks, block_size>>>(Z.data_device.get(), A.data_device.get(),
Z.shape.x, Z.shape.y);
NNException::throwIfDeviceErrorsOccurred("Cannot perform sigmoid forward propagation.");
return A;
}
Matrix& SigmoidActivation::backprop(Matrix& dA, float learning_rate) {
dZ.allocateMemoryIfNotAllocated(Z.shape);
dim3 block_size(256);
dim3 num_of_blocks((Z.shape.y * Z.shape.x + block_size.x - 1) / block_size.x);
sigmoidActivationBackprop<<<num_of_blocks, block_size>>>(Z.data_device.get(), dA.data_device.get(),
dZ.data_device.get(),
Z.shape.x, Z.shape.y);
NNException::throwIfDeviceErrorsOccurred("Cannot perform sigmoid back propagation");
return dZ;
}
void SigmoidActivation::initializeWeight(Matrix W_input) {
return;
}
void SigmoidActivation::initializeBias(Matrix b_input){
return;
} |
267cd5bdf1760536e5f98fea0495e0ebddaa7f09.hip | // !!! This is a file automatically generated by hipify!!!
#include "common.h"
#include "allgatherv.h"
/**
* @brief Call MPI_Allgatherv() with host to host.
*
* @param
* sendbuf_d: Head address of sending device buffer of MPI_Allgatherv()
* sendcount: Number of elements in the sendbuf_d.
* recvbuf_d: Head address of receiving device buffer of MPI_Allgatherv()
* recvcounts: Array of number of elements that are to be received from
* each process
* displs: Entry i specifies the displacement at which to place the
* incoming data from process i.
* dtype: Datatype of MPI_Allgatherv() MUST be MPI_DOUBLE.
* comm: MPI communicator of MPI_Bcast()
*
*/
int allgatherv_h2h(double *sendbuf_d, int sendcount, double *recvbuf_d,
int *recvcounts, int *displs, MPI_Datatype dtype,
MPI_Comm comm) {
double *sendbuf_h = NULL;
double *recvbuf_h = NULL;
int rc = MPI_SUCCESS;
int rank = 0;
int size = 1;
int total_recvcount = 0;
MPI_Comm_rank(comm, &rank);
MPI_Comm_size(comm, &size);
for (int i=0; i<size; i++)
total_recvcount += recvcounts[i];
sendbuf_h = (double *)malloc(sizeof(double) * sendcount);
recvbuf_h = (double *)malloc(sizeof(double) * total_recvcount);
CUDACHECK( hipMemcpy(sendbuf_h, sendbuf_d, sizeof(double) * sendcount,
hipMemcpyDeviceToHost) );
rc = MPI_Allgatherv(sendbuf_h, sendcount, dtype, recvbuf_h, recvcounts,
displs, dtype, comm);
CUDACHECK( hipMemcpy(recvbuf_d, recvbuf_h, sizeof(double) * total_recvcount,
hipMemcpyHostToDevice) );
free(recvbuf_h);
free(sendbuf_h);
return rc;
}
/**
* @brief Call MPI_Allgatherv() with host to device.
*
* @param
* sendbuf_d: Head address of sending device buffer of MPI_Allgatherv()
* sendcount: Number of elements in the sendbuf_d.
* recvbuf_d: Head address of receiving device buffer of MPI_Allgatherv()
* recvcounts: Array of number of elements that are to be received from
* each process
* displs: Entry i specifies the displacement at which to place the
* incoming data from process i.
* dtype: Datatype of MPI_Allgatherv() MUST be MPI_DOUBLE.
* comm: MPI communicator of MPI_Bcast()
*
*/
int allgatherv_h2d(double *sendbuf_d, int sendcount, double *recvbuf_d,
int *recvcounts, int *displs, MPI_Datatype dtype,
MPI_Comm comm) {
/* NOT IMPLEMENTED ! */
return 0;
}
/**
* @brief Call MPI_Allgatherv() with device to host.
*
* @param
* sendbuf_d: Head address of sending device buffer of MPI_Allgatherv()
* sendcount: Number of elements in the sendbuf_d.
* recvbuf_d: Head address of receiving device buffer of MPI_Allgatherv()
* recvcounts: Array of number of elements that are to be received from
* each process
* displs: Entry i specifies the displacement at which to place the
* incoming data from process i.
* dtype: Datatype of MPI_Allgatherv() MUST be MPI_DOUBLE.
* comm: MPI communicator of MPI_Bcast()
*
*/
int allgatherv_d2h(double *sendbuf_d, int sendcount, double *recvbuf_d,
int *recvcounts, int *displs, MPI_Datatype dtype,
MPI_Comm comm) {
/* NOT IMPLEMENTED ! */
return 0;
}
/**
* @brief Call MPI_Allgatherv() with device to host.
*
* @param
* sendbuf_d: Head address of sending device buffer of MPI_Allgatherv()
* sendcount: Number of elements in the sendbuf_d.
* recvbuf_d: Head address of receiving device buffer of MPI_Allgatherv()
* recvcounts: Array of number of elements that are to be received from
* each process
* displs: Entry i specifies the displacement at which to place the
* incoming data from process i.
* dtype: Datatype of MPI_Allgatherv() MUST be MPI_DOUBLE.
* comm: MPI communicator of MPI_Bcast()
*
*/
int allgatherv_d2d(double *sendbuf_d, int sendcount, double *recvbuf_d,
int *recvcounts, int *displs, MPI_Datatype dtype,
MPI_Comm comm) {
return MPI_Allgatherv(sendbuf_d, sendcount, dtype, recvbuf_d, recvcounts,
displs, dtype, comm);
}
/**
* @brief Initalizations for calling MPI_Allgatherv()
*
* @param
* info: Process information.
* sendbuf_h: Head address of host buffer (output)
* recvbuf_h: Head address of device buffer (output)
* sendbuf_d: Head address of host buffer (output)
* recvbuf_d: Head address of device buffer (output)
* recvcounts: Head address of recvcounts array (output)
* displs: Head of displs array (output)
* count: Number of elements in the broadcasted vector. The datatype would
* be double.
*
*/
void allgatherv_init(const info_t info, double **sendbuf_h, double **recvbuf_h,
double **sendbuf_d, double **recvbuf_d, int **recvcounts,
int **displs, const int count) {
/* Calculate the sendcount and recvcount */
int recvcounts_sum = 0;
*recvcounts = (int *)malloc(sizeof(int) * info.size);
*displs = (int *)malloc(sizeof(int) * info.size);
for (int i=0; i<info.size; i++) {
(*displs)[i] = recvcounts_sum;
(*recvcounts)[i] = count / info.size;
recvcounts_sum += (*recvcounts)[i];
}
(*recvcounts)[info.size - 1] = count - recvcounts_sum;
/* Allocation and initalization of buffers */
*sendbuf_h = (double *)calloc((*recvcounts)[info.rank], sizeof(double));
*recvbuf_h = (double *)calloc(count, sizeof(double));
for (int i=0; i<(*recvcounts)[info.rank]; i++) {
(*sendbuf_h)[i] = i;
}
CUDACHECK( hipMalloc((void**)sendbuf_d,
sizeof(double) * (*recvcounts)[info.rank]) );
CUDACHECK( hipMalloc((void**)recvbuf_d,
sizeof(double) * count) );
/* Host to device */
CUDACHECK( hipMemcpy(*sendbuf_d, *sendbuf_h,
sizeof(double) * (*recvcounts)[info.rank],
hipMemcpyHostToDevice) );
/* Print and barrier */
for (int i=0; i<info.size; i++) {
if (info.rank == i) {
printf("[%d/%d: %s]: B (%.2f, %.2f, %.2f, %.2f, ...)\n",
info.rank, info.size, info.hostname,
(*sendbuf_h)[0], (*sendbuf_h)[1], (*sendbuf_h)[2], (*sendbuf_h)[3]);
}
MPI_Barrier(MPI_COMM_WORLD);
}
if (info.rank == info.size - 1) {
printf("Starting MPI_Allgatherv()...\n");
}
MPI_Barrier(MPI_COMM_WORLD);
}
/**
* @brief Finalization for calling MPI_Allgatherv()
*
* @param
* info: Process information.
* sendbuf_h: Head address of host buffer (output)
* recvbuf_h: Head address of device buffer (output)
* sendbuf_d: Head address of host buffer (output)
* recvbuf_d: Head address of device buffer (output)
* recvcounts: Head address of recvcounts array (output)
* displs: Head of displs array (output)
* count: Number of elements in the broadcasted vector. The datatype would
* be double.
*
*/
void allgatherv_finalize(const info_t info, double *sendbuf_h,
double *recvbuf_h, double *sendbuf_d,
double *recvbuf_d, int *recvcounts, int *displs,
const int count) {
/* Device to host */
CUDACHECK( hipMemcpy(recvbuf_h, recvbuf_d, sizeof(double) * count,
hipMemcpyDeviceToHost) );
/* Print and barrier */
for (int i=0; i<info.size; i++) {
if (info.rank == i) {
printf("[%d/%d: %s]: A (%.2f, %.2f, %.2f, %.2f, ...)\n",
info.rank, info.size, info.hostname,
recvbuf_h[0], recvbuf_h[1], recvbuf_h[2], recvbuf_h[3]);
}
MPI_Barrier(MPI_COMM_WORLD);
}
if (info.rank == info.size - 1) {
printf("MPI_Allgatherv() done.\n");
}
MPI_Barrier(MPI_COMM_WORLD);
/* Free buffers */
CUDACHECK( hipFree(recvbuf_d) );
CUDACHECK( hipFree(sendbuf_d) );
free(displs);
free(recvcounts);
free(recvbuf_h);
free(sendbuf_h);
}
| 267cd5bdf1760536e5f98fea0495e0ebddaa7f09.cu | #include "common.h"
#include "allgatherv.h"
/**
* @brief Call MPI_Allgatherv() with host to host.
*
* @param
* sendbuf_d: Head address of sending device buffer of MPI_Allgatherv()
* sendcount: Number of elements in the sendbuf_d.
* recvbuf_d: Head address of receiving device buffer of MPI_Allgatherv()
* recvcounts: Array of number of elements that are to be received from
* each process
* displs: Entry i specifies the displacement at which to place the
* incoming data from process i.
* dtype: Datatype of MPI_Allgatherv() MUST be MPI_DOUBLE.
* comm: MPI communicator of MPI_Bcast()
*
*/
int allgatherv_h2h(double *sendbuf_d, int sendcount, double *recvbuf_d,
int *recvcounts, int *displs, MPI_Datatype dtype,
MPI_Comm comm) {
double *sendbuf_h = NULL;
double *recvbuf_h = NULL;
int rc = MPI_SUCCESS;
int rank = 0;
int size = 1;
int total_recvcount = 0;
MPI_Comm_rank(comm, &rank);
MPI_Comm_size(comm, &size);
for (int i=0; i<size; i++)
total_recvcount += recvcounts[i];
sendbuf_h = (double *)malloc(sizeof(double) * sendcount);
recvbuf_h = (double *)malloc(sizeof(double) * total_recvcount);
CUDACHECK( cudaMemcpy(sendbuf_h, sendbuf_d, sizeof(double) * sendcount,
cudaMemcpyDeviceToHost) );
rc = MPI_Allgatherv(sendbuf_h, sendcount, dtype, recvbuf_h, recvcounts,
displs, dtype, comm);
CUDACHECK( cudaMemcpy(recvbuf_d, recvbuf_h, sizeof(double) * total_recvcount,
cudaMemcpyHostToDevice) );
free(recvbuf_h);
free(sendbuf_h);
return rc;
}
/**
* @brief Call MPI_Allgatherv() with host to device.
*
* @param
* sendbuf_d: Head address of sending device buffer of MPI_Allgatherv()
* sendcount: Number of elements in the sendbuf_d.
* recvbuf_d: Head address of receiving device buffer of MPI_Allgatherv()
* recvcounts: Array of number of elements that are to be received from
* each process
* displs: Entry i specifies the displacement at which to place the
* incoming data from process i.
* dtype: Datatype of MPI_Allgatherv() MUST be MPI_DOUBLE.
* comm: MPI communicator of MPI_Bcast()
*
*/
int allgatherv_h2d(double *sendbuf_d, int sendcount, double *recvbuf_d,
int *recvcounts, int *displs, MPI_Datatype dtype,
MPI_Comm comm) {
/* NOT IMPLEMENTED ! */
return 0;
}
/**
* @brief Call MPI_Allgatherv() with device to host.
*
* @param
* sendbuf_d: Head address of sending device buffer of MPI_Allgatherv()
* sendcount: Number of elements in the sendbuf_d.
* recvbuf_d: Head address of receiving device buffer of MPI_Allgatherv()
* recvcounts: Array of number of elements that are to be received from
* each process
* displs: Entry i specifies the displacement at which to place the
* incoming data from process i.
* dtype: Datatype of MPI_Allgatherv() MUST be MPI_DOUBLE.
* comm: MPI communicator of MPI_Bcast()
*
*/
int allgatherv_d2h(double *sendbuf_d, int sendcount, double *recvbuf_d,
int *recvcounts, int *displs, MPI_Datatype dtype,
MPI_Comm comm) {
/* NOT IMPLEMENTED ! */
return 0;
}
/**
* @brief Call MPI_Allgatherv() with device to host.
*
* @param
* sendbuf_d: Head address of sending device buffer of MPI_Allgatherv()
* sendcount: Number of elements in the sendbuf_d.
* recvbuf_d: Head address of receiving device buffer of MPI_Allgatherv()
* recvcounts: Array of number of elements that are to be received from
* each process
* displs: Entry i specifies the displacement at which to place the
* incoming data from process i.
* dtype: Datatype of MPI_Allgatherv() MUST be MPI_DOUBLE.
* comm: MPI communicator of MPI_Bcast()
*
*/
int allgatherv_d2d(double *sendbuf_d, int sendcount, double *recvbuf_d,
int *recvcounts, int *displs, MPI_Datatype dtype,
MPI_Comm comm) {
return MPI_Allgatherv(sendbuf_d, sendcount, dtype, recvbuf_d, recvcounts,
displs, dtype, comm);
}
/**
* @brief Initalizations for calling MPI_Allgatherv()
*
* @param
* info: Process information.
* sendbuf_h: Head address of host buffer (output)
* recvbuf_h: Head address of device buffer (output)
* sendbuf_d: Head address of host buffer (output)
* recvbuf_d: Head address of device buffer (output)
* recvcounts: Head address of recvcounts array (output)
* displs: Head of displs array (output)
* count: Number of elements in the broadcasted vector. The datatype would
* be double.
*
*/
void allgatherv_init(const info_t info, double **sendbuf_h, double **recvbuf_h,
double **sendbuf_d, double **recvbuf_d, int **recvcounts,
int **displs, const int count) {
/* Calculate the sendcount and recvcount */
int recvcounts_sum = 0;
*recvcounts = (int *)malloc(sizeof(int) * info.size);
*displs = (int *)malloc(sizeof(int) * info.size);
for (int i=0; i<info.size; i++) {
(*displs)[i] = recvcounts_sum;
(*recvcounts)[i] = count / info.size;
recvcounts_sum += (*recvcounts)[i];
}
(*recvcounts)[info.size - 1] = count - recvcounts_sum;
/* Allocation and initalization of buffers */
*sendbuf_h = (double *)calloc((*recvcounts)[info.rank], sizeof(double));
*recvbuf_h = (double *)calloc(count, sizeof(double));
for (int i=0; i<(*recvcounts)[info.rank]; i++) {
(*sendbuf_h)[i] = i;
}
CUDACHECK( cudaMalloc((void**)sendbuf_d,
sizeof(double) * (*recvcounts)[info.rank]) );
CUDACHECK( cudaMalloc((void**)recvbuf_d,
sizeof(double) * count) );
/* Host to device */
CUDACHECK( cudaMemcpy(*sendbuf_d, *sendbuf_h,
sizeof(double) * (*recvcounts)[info.rank],
cudaMemcpyHostToDevice) );
/* Print and barrier */
for (int i=0; i<info.size; i++) {
if (info.rank == i) {
printf("[%d/%d: %s]: B (%.2f, %.2f, %.2f, %.2f, ...)\n",
info.rank, info.size, info.hostname,
(*sendbuf_h)[0], (*sendbuf_h)[1], (*sendbuf_h)[2], (*sendbuf_h)[3]);
}
MPI_Barrier(MPI_COMM_WORLD);
}
if (info.rank == info.size - 1) {
printf("Starting MPI_Allgatherv()...\n");
}
MPI_Barrier(MPI_COMM_WORLD);
}
/**
* @brief Finalization for calling MPI_Allgatherv()
*
* @param
* info: Process information.
* sendbuf_h: Head address of host buffer (output)
* recvbuf_h: Head address of device buffer (output)
* sendbuf_d: Head address of host buffer (output)
* recvbuf_d: Head address of device buffer (output)
* recvcounts: Head address of recvcounts array (output)
* displs: Head of displs array (output)
* count: Number of elements in the broadcasted vector. The datatype would
* be double.
*
*/
void allgatherv_finalize(const info_t info, double *sendbuf_h,
double *recvbuf_h, double *sendbuf_d,
double *recvbuf_d, int *recvcounts, int *displs,
const int count) {
/* Device to host */
CUDACHECK( cudaMemcpy(recvbuf_h, recvbuf_d, sizeof(double) * count,
cudaMemcpyDeviceToHost) );
/* Print and barrier */
for (int i=0; i<info.size; i++) {
if (info.rank == i) {
printf("[%d/%d: %s]: A (%.2f, %.2f, %.2f, %.2f, ...)\n",
info.rank, info.size, info.hostname,
recvbuf_h[0], recvbuf_h[1], recvbuf_h[2], recvbuf_h[3]);
}
MPI_Barrier(MPI_COMM_WORLD);
}
if (info.rank == info.size - 1) {
printf("MPI_Allgatherv() done.\n");
}
MPI_Barrier(MPI_COMM_WORLD);
/* Free buffers */
CUDACHECK( cudaFree(recvbuf_d) );
CUDACHECK( cudaFree(sendbuf_d) );
free(displs);
free(recvcounts);
free(recvbuf_h);
free(sendbuf_h);
}
|
590f371965341081a90b7e58a0aa25f317a06992.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <errno.h>
#include <math.h>
// Kernel function to add the elements of two arrays
__global__
void add(int n, float *x, float *y)
{
int bx = blockIdx.x;
int by = blockIdx.y;
// Thread index
int tx = threadIdx.x;
int ty = threadIdx.y;
printf("%d %d %d %d\n", bx, by, tx, ty);
int index = threadIdx.x;
int stride = blockDim.x;
for (int i = index; i < n; i += stride)
y[i] = x[i] + y[i];
}
int main(void)
{
int N = 1<<20;
float *x = NULL, *y = NULL;
// Allocate Unified Memory accessible from CPU or GPU
int rc = hipMalloc(&x, N*sizeof(float)) ;
if (hipSuccess != rc) {
printf("failed %s\n", hipGetErrorString(hipGetLastError()));
exit(1);
}
//rc = hipMallocManaged(&y, N*sizeof(float));
rc = hipMalloc(&y, N*sizeof(float)) ;
if (hipSuccess != rc) {
printf("failed %s\n", hipGetErrorString(hipGetLastError()));
exit(1);
}
if (x == NULL) {
printf("x == NULL\n");
exit (1);
}
float *h_x = (float *) malloc(sizeof(float)*N);
float *h_y = (float *) malloc(sizeof(float)*N);
// initialize x and y arrays on the host
for (int i = 0; i < N; i++) {
h_y[i] = 2.0f;
h_x[i] = 1.0f;
}
// copy host memory to device
hipError_t error = hipMemcpy(x, h_x, sizeof(h_x), hipMemcpyHostToDevice);
if (error != hipSuccess)
{
printf("hipMemcpy (d_A,h_x) returned error %s %d %d" , hipGetErrorString(error) , error , __LINE__);
exit(1);
}
printf("Hi there\n");
//exit(1);
// Run kernel on 1M elements on the GPU
hipLaunchKernelGGL(( add), dim3(1), dim3(256), 0, 0, N, x, y);
printf("%d\n", __LINE__);
// Wait for GPU to finish before accessing on host
hipDeviceSynchronize();
//printf("%d\n", __LINE__);
// Check for errors (all values should be 3.0f)
float maxError = 0.0f;
// copy host memory to device
error = hipMemcpy(h_y, y, sizeof(h_x), hipMemcpyDeviceToHost);
if (error != hipSuccess)
{
printf("hipMemcpy (d_A,h_x) returned error %s %d %d" , hipGetErrorString(error) , error , __LINE__);
exit(1);
}
for (int j = 0; j < N; j++)
maxError = fmax(maxError, fabs(h_y[j]-3.0f));
printf("Max error: %f\n" , maxError);
// Free memory
hipFree(x);
hipFree(y);
return 0;
}
| 590f371965341081a90b7e58a0aa25f317a06992.cu | #include <stdio.h>
#include <errno.h>
#include <math.h>
// Kernel function to add the elements of two arrays
__global__
void add(int n, float *x, float *y)
{
int bx = blockIdx.x;
int by = blockIdx.y;
// Thread index
int tx = threadIdx.x;
int ty = threadIdx.y;
printf("%d %d %d %d\n", bx, by, tx, ty);
int index = threadIdx.x;
int stride = blockDim.x;
for (int i = index; i < n; i += stride)
y[i] = x[i] + y[i];
}
int main(void)
{
int N = 1<<20;
float *x = NULL, *y = NULL;
// Allocate Unified Memory – accessible from CPU or GPU
int rc = cudaMalloc(&x, N*sizeof(float)) ;
if (cudaSuccess != rc) {
printf("failed %s\n", cudaGetErrorString(cudaGetLastError()));
exit(1);
}
//rc = cudaMallocManaged(&y, N*sizeof(float));
rc = cudaMalloc(&y, N*sizeof(float)) ;
if (cudaSuccess != rc) {
printf("failed %s\n", cudaGetErrorString(cudaGetLastError()));
exit(1);
}
if (x == NULL) {
printf("x == NULL\n");
exit (1);
}
float *h_x = (float *) malloc(sizeof(float)*N);
float *h_y = (float *) malloc(sizeof(float)*N);
// initialize x and y arrays on the host
for (int i = 0; i < N; i++) {
h_y[i] = 2.0f;
h_x[i] = 1.0f;
}
// copy host memory to device
cudaError_t error = cudaMemcpy(x, h_x, sizeof(h_x), cudaMemcpyHostToDevice);
if (error != cudaSuccess)
{
printf("cudaMemcpy (d_A,h_x) returned error %s %d %d" , cudaGetErrorString(error) , error , __LINE__);
exit(1);
}
printf("Hi there\n");
//exit(1);
// Run kernel on 1M elements on the GPU
add<<<1, 256>>>(N, x, y);
printf("%d\n", __LINE__);
// Wait for GPU to finish before accessing on host
cudaDeviceSynchronize();
//printf("%d\n", __LINE__);
// Check for errors (all values should be 3.0f)
float maxError = 0.0f;
// copy host memory to device
error = cudaMemcpy(h_y, y, sizeof(h_x), cudaMemcpyDeviceToHost);
if (error != cudaSuccess)
{
printf("cudaMemcpy (d_A,h_x) returned error %s %d %d" , cudaGetErrorString(error) , error , __LINE__);
exit(1);
}
for (int j = 0; j < N; j++)
maxError = fmax(maxError, fabs(h_y[j]-3.0f));
printf("Max error: %f\n" , maxError);
// Free memory
cudaFree(x);
cudaFree(y);
return 0;
}
|
c2606bcaf3b367c15a9fc2cdd27329b5e4bb98a9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include"convolution.h"
// note the eigen stores its matrix in column-major !!!!!!
// but it seems that we only need to change the convolution part -- and with in the convolution part, only the encode and decode part need to be changed
/* todo list : a convolution function which takes in input images, kernels and the reference to the output image
* a batch function which takes in the vector containing bunch of images and kernels and call the convolution kernel inside */
__global__
void convolution( double ** d_dataBuffer,
double ** d_outputBuffer,
double ** d_kernels,
size_t dataBufferSize,
size_t outputBufferSize,
size_t kernelSize){ // note the kernelSize here means the total number of kernels
// note that all the threads within the block are in use
// indexing
// the arrangement is each block will take care of one output image
size_t mydataIdx = blockIdx.x / kernelSize;
size_t mykernelIdx = blockIdx.x % kernelSize;
size_t myoutputIdx = blockIdx.x;
// load the corresponding kernel to the shared memory
// would like to load load the image to the shared memory as well but limited by the shared memory size
__shared__ double s_kernel[KERNEL_SIZE];
if ( threadIdx.x < KERNEL_SIZE)
s_kernel[threadIdx.x] = d_kernels[mykernelIdx][threadIdx.x];
__syncthreads();
//calculate all the pixels in 75 loops -- 320 x 240 / 1024 = 75 -- hard coded here
// NOTE THE EIGEN IS COLOMN_MAJOR!!!!!!
// encode method: myIdx = x * IMG_HEIGHT + y
// decode method: x = myIdx / IMG_HEIGHT
// y = myIdx % IMG_HEIGHT
// for the edges, use zero
// we may encounter with negative index, so all int here
int myIdx = 0; // the pixel index within the single image in the 1d array
// the pixel's location in the image
int myX = 0;
int myY = 0;
// the relative coordinates for each pixel
//int relativeX = 0;
//int relativeY = 0;
// by adding to the relative coordinate the offset, we get the absolute coordinate
// and note the top left pixel is considered as the origin.
int offsetX = 0;
int offsetY = 0;
// pixel index -- the absolute coordinates for the pixel
int pixelX = 0;
int pixelY = 0;
/* value containers used in the loop */
double sum = 0;
// note you may assume we are using 1024 threads per block here but the grid size is a different story
for ( size_t iLoops = 0; iLoops < 75 ; ++ iLoops){
myIdx = iLoops * blockDim.x + threadIdx.x;
myX = myIdx / IMG_HEIGHT;
myY = myIdx % IMG_HEIGHT;
offsetX = myX - (KERNEL_WIDTH - 1) / 2;
offsetY = myY - (KERNEL_HEIGHT - 1) / 2;
sum = 0;
for ( size_t iPixels = 0; iPixels < KERNEL_SIZE; ++iPixels){
//relativeX = iPixels % KERNEL_WIDTH;
//relativeY = iPixels / KERNEL_WIDTH;
//pixelX = relativeX + offsetX;
//pixelY = relativeY + offsetY;
pixelX = iPixels % KERNEL_WIDTH + offsetX;
pixelY = iPixels / KERNEL_WIDTH + offsetY;
if ( pixelX >= 0 && pixelY >= 0) // only update the sum when the pixel coordinate is valid -- considering the out-bound pixels as zero
sum += d_dataBuffer[mydataIdx][pixelX * IMG_HEIGHT + pixelY] * s_kernel[iPixels];
}
// write the result to the output array
d_outputBuffer[myoutputIdx][myIdx] = sum / KERNEL_SIZE;
}
}
void batch(vector< vector <MatrixXd> > dataset, vector< MatrixXd > kernels, vector < vector < MatrixXd > > output ){
/* Some notes :
* the total number of pictures to deal with for each batch is imgNum * channelNum * kernelNum
* and each image contains 320 * 240 pixels
* note 320 * 240 / 1024 = 75, which means if we assign to each block a image, the process will finish in 75 loops. and this is a cute coincidence
* so we are going to find a way to arrange this imgNum * channelNum * kernelNum of images to as much processors as possible
* the good thing is that the process for each image will take roughly the same amount of time
* so for each kernel call , we send to the gpu m images (where m is the total number of SMs on the gpu) and each thread will figure out which kernel should they use. and due to limited resources, of course only the kernel is preloaded to the gpu
* the communication between the cpu and gpu happens once a loop, and apparently this is a bottleneck which needs careful optimization!!!!!
* and the indexing problem on CPU should also be taken good care of
* So the mechanism we are using here is to first figure out which images are going to be transmitted in this kernel call, and then store the corresponding address in the h_dataBuffer array. After convolution, the output will be written to the place specified by the h_outputStorage,
* the good point of this arrangement is that we don't have to copy the data in CPU over and over
*/
// note the basic components in the vector is the eigen matrix, and if A is the matrix object, A.data() returns the pointer pointing to the first element of the matrix
/* STEP 0 : Memory Allocation */
// CPU
size_t inputDatasetSize = dataset.size() * dataset[0].size() ; // the total number of pictures in the input dataset
size_t outputDatasetSize = dataset.size() * dataset[0].size() * kernels.size() ; // the total number of pictures in the output dataset
size_t kernelSize = kernels.size();
size_t dataBufferSize = BLKNUM / kernelSize; // the number of images we deal with in each kernel call (each channel is considered as a image here and this arrangement may make some blocks idle, but the number of idle blocks is less than the number of kernels;
size_t outputBufferSize = dataBufferSize * kernelSize;
//double * h_dataArray = new double [inputDatasetSize * IMG_HEIGHT * IMG_WIDTH ]; // each pixel takes a double number's space
//double * h_outputArray = new double [outputDatasetSize* IMG_HEIGHT * IMG_WIDTH];
//double * h_kernels = new double[kernels.size() * KERNEL_HEIGHT * KERNEL_WIDTH];
// note BLKNUM is the total number of blocks available on the card , and it's defined in the .h file
double** h_dataBuffer = new double*[dataBufferSize]; // this array stores the address of the leading element for each matrix that we are going to the GPU in each kernel call
double** h_outputStorage = new double*[outputBufferSize]; // similar as above, storing the address of the leading entry of the output container
// GPU
// we apply similar memory management as what we did to the host
double ** d_kernels = new double*[kernelSize];
double ** d_dataBuffer = new double*[dataBufferSize];
double ** d_outputBuffer = new double*[outputBufferSize];
// allocating and preloading the kernels to the gpu memory
for ( size_t i = 0; i < kernelSize; ++i){
checkCudaErrors(hipMalloc(&d_kernels[i], sizeof(double) * KERNEL_WIDTH * KERNEL_HEIGHT));
checkCudaErrors(hipMemcpy(d_kernels[i], kernels[i].data(), sizeof(double) * KERNEL_WIDTH * KERNEL_HEIGHT, hipMemcpyHostToDevice));
}
for (size_t i = 0 ; i < dataBufferSize; ++i){
checkCudaErrors(hipMalloc(&d_dataBuffer[i], sizeof(double) * IMG_WIDTH * IMG_HEIGHT));
}
for ( size_t i = 0; i < outputBufferSize; ++i){
checkCudaErrors(hipMalloc(&d_outputBuffer[i], sizeof(double) * IMG_HEIGHT * IMG_WIDTH));
}
/* STEP 2: Convolving */
size_t cursor = 0; // the index of the NEXT image to be sent to the GPU. Assuming that the inner vector sizes are all the same -- the number of channels for each data image are the same
size_t NumChannelsPerImg = dataset[0].size();
//size_t outervectorIdx = 0;
//size_t innervectorIdx = 0;
/* the transformation formulas are :
* cursor = outervectorIdx * NumChannelsPerImg + innervectorIdx
* outervectorIdx = cursor / NumChannelsPerImg
* innervectorIdx = cursor % NumChannelsPerImg */
size_t inputStartingIdx = 0;
size_t totalNumIteration = inputDatasetSize / dataBufferSize; // then in this case totalNumIteration * dataBufferSize should be smaller than the inputDatasetSize, we will deal with the left overs at the end
for (size_t i = 0; i < totalNumIteration; ++i){
// first copy the data to GPU
inputStartingIdx = cursor; // the index of the leading input image for this loop
// then the index of the last image is inputStartingIdx + dataBufferSize - 1
for (size_t j = 0; j < dataBufferSize; ++j){
checkCudaErrors(hipMemcpy(d_dataBuffer[j], dataset[cursor/NumChannelsPerImg][cursor % NumChannelsPerImg].data(), sizeof(double) * IMG_HEIGHT * IMG_WIDTH, hipMemcpyHostToDevice));
++cursor;
}
// then launching kernel to do the computation
hipLaunchKernelGGL(( convolution), dim3(BLKNUM), dim3(1024), 0, 0, d_dataBuffer, d_outputBuffer, d_kernels, dataBufferSize, dataBufferSize * kernelSize, kernelSize);
// finally copy the result out
// in the gpu, we store the convolved picture derived from the same image close to each other. eg, if the input image sequence is ABC and there are three kernel in total, then the output sequence is A1A2A3B1B2B3C1C2C3, and we need to convert the order to A1B1C1A2B2C2....
for ( size_t j = 0; j < outputDatasetSize; ++j){
// need to check : the function call to write into eigen
// the second class image index is j/kernelSize + inputStartingIdx and the kernel index is j % kernelSize
// then the outervectorIdx =( j/kernelSize + inputStartingIdx) /NumChannelsPerImg
// and the innervectorIdx = j % kernelSize * NumChannelsPerImg + ( j / kernelSize + inputStartingIdx) % NumChannelsPerImg
checkCudaErrors(hipMemcpy( output[ (j/kernelSize + inputStartingIdx) /NumChannelsPerImg][j % kernelSize * NumChannelsPerImg + ( j / kernelSize + inputStartingIdx) % NumChannelsPerImg].data(), d_outputBuffer[j], sizeof(double) * IMG_HEIGHT * IMG_WIDTH, hipMemcpyDeviceToHost)) ;
}
}
// deal with the leftovers
// the number of leftovers is less than kernelSize
// copy in
size_t lastInputSize = inputDatasetSize - cursor;
for (size_t i = 0; i < lastInputSize; ++i){
checkCudaErrors(hipMemcpy(d_dataBuffer[i], dataset[(cursor + i)/NumChannelsPerImg][(cursor + i ) % NumChannelsPerImg].data(), sizeof(double) * IMG_HEIGHT * IMG_WIDTH, hipMemcpyHostToDevice));
}
// launch kernel
hipLaunchKernelGGL(( convolution), dim3(lastInputSize), dim3(1024), 0, 0, d_dataBuffer, d_outputBuffer, d_kernels, lastInputSize, lastInputSize * kernelSize, kernelSize);
// copy out
for (size_t i = 0; i < lastInputSize * kernelSize; ++i){
checkCudaErrors(hipMemcpy( output[ (i/kernelSize + cursor) /NumChannelsPerImg][i % kernelSize * NumChannelsPerImg + ( i / kernelSize + cursor) % NumChannelsPerImg].data(), d_outputBuffer[i], sizeof(double) * IMG_HEIGHT * IMG_WIDTH, hipMemcpyDeviceToHost)) ;
}
return;
}
| c2606bcaf3b367c15a9fc2cdd27329b5e4bb98a9.cu | #include"convolution.h"
// note the eigen stores its matrix in column-major !!!!!!
// but it seems that we only need to change the convolution part -- and with in the convolution part, only the encode and decode part need to be changed
/* todo list : a convolution function which takes in input images, kernels and the reference to the output image
* a batch function which takes in the vector containing bunch of images and kernels and call the convolution kernel inside */
__global__
void convolution( double ** d_dataBuffer,
double ** d_outputBuffer,
double ** d_kernels,
size_t dataBufferSize,
size_t outputBufferSize,
size_t kernelSize){ // note the kernelSize here means the total number of kernels
// note that all the threads within the block are in use
// indexing
// the arrangement is each block will take care of one output image
size_t mydataIdx = blockIdx.x / kernelSize;
size_t mykernelIdx = blockIdx.x % kernelSize;
size_t myoutputIdx = blockIdx.x;
// load the corresponding kernel to the shared memory
// would like to load load the image to the shared memory as well but limited by the shared memory size
__shared__ double s_kernel[KERNEL_SIZE];
if ( threadIdx.x < KERNEL_SIZE)
s_kernel[threadIdx.x] = d_kernels[mykernelIdx][threadIdx.x];
__syncthreads();
//calculate all the pixels in 75 loops -- 320 x 240 / 1024 = 75 -- hard coded here
// NOTE THE EIGEN IS COLOMN_MAJOR!!!!!!
// encode method: myIdx = x * IMG_HEIGHT + y
// decode method: x = myIdx / IMG_HEIGHT
// y = myIdx % IMG_HEIGHT
// for the edges, use zero
// we may encounter with negative index, so all int here
int myIdx = 0; // the pixel index within the single image in the 1d array
// the pixel's location in the image
int myX = 0;
int myY = 0;
// the relative coordinates for each pixel
//int relativeX = 0;
//int relativeY = 0;
// by adding to the relative coordinate the offset, we get the absolute coordinate
// and note the top left pixel is considered as the origin.
int offsetX = 0;
int offsetY = 0;
// pixel index -- the absolute coordinates for the pixel
int pixelX = 0;
int pixelY = 0;
/* value containers used in the loop */
double sum = 0;
// note you may assume we are using 1024 threads per block here but the grid size is a different story
for ( size_t iLoops = 0; iLoops < 75 ; ++ iLoops){
myIdx = iLoops * blockDim.x + threadIdx.x;
myX = myIdx / IMG_HEIGHT;
myY = myIdx % IMG_HEIGHT;
offsetX = myX - (KERNEL_WIDTH - 1) / 2;
offsetY = myY - (KERNEL_HEIGHT - 1) / 2;
sum = 0;
for ( size_t iPixels = 0; iPixels < KERNEL_SIZE; ++iPixels){
//relativeX = iPixels % KERNEL_WIDTH;
//relativeY = iPixels / KERNEL_WIDTH;
//pixelX = relativeX + offsetX;
//pixelY = relativeY + offsetY;
pixelX = iPixels % KERNEL_WIDTH + offsetX;
pixelY = iPixels / KERNEL_WIDTH + offsetY;
if ( pixelX >= 0 && pixelY >= 0) // only update the sum when the pixel coordinate is valid -- considering the out-bound pixels as zero
sum += d_dataBuffer[mydataIdx][pixelX * IMG_HEIGHT + pixelY] * s_kernel[iPixels];
}
// write the result to the output array
d_outputBuffer[myoutputIdx][myIdx] = sum / KERNEL_SIZE;
}
}
void batch(vector< vector <MatrixXd> > dataset, vector< MatrixXd > kernels, vector < vector < MatrixXd > > output ){
/* Some notes :
* the total number of pictures to deal with for each batch is imgNum * channelNum * kernelNum
* and each image contains 320 * 240 pixels
* note 320 * 240 / 1024 = 75, which means if we assign to each block a image, the process will finish in 75 loops. and this is a cute coincidence
* so we are going to find a way to arrange this imgNum * channelNum * kernelNum of images to as much processors as possible
* the good thing is that the process for each image will take roughly the same amount of time
* so for each kernel call , we send to the gpu m images (where m is the total number of SMs on the gpu) and each thread will figure out which kernel should they use. and due to limited resources, of course only the kernel is preloaded to the gpu
* the communication between the cpu and gpu happens once a loop, and apparently this is a bottleneck which needs careful optimization!!!!!
* and the indexing problem on CPU should also be taken good care of
* So the mechanism we are using here is to first figure out which images are going to be transmitted in this kernel call, and then store the corresponding address in the h_dataBuffer array. After convolution, the output will be written to the place specified by the h_outputStorage,
* the good point of this arrangement is that we don't have to copy the data in CPU over and over
*/
// note the basic components in the vector is the eigen matrix, and if A is the matrix object, A.data() returns the pointer pointing to the first element of the matrix
/* STEP 0 : Memory Allocation */
// CPU
size_t inputDatasetSize = dataset.size() * dataset[0].size() ; // the total number of pictures in the input dataset
size_t outputDatasetSize = dataset.size() * dataset[0].size() * kernels.size() ; // the total number of pictures in the output dataset
size_t kernelSize = kernels.size();
size_t dataBufferSize = BLKNUM / kernelSize; // the number of images we deal with in each kernel call (each channel is considered as a image here and this arrangement may make some blocks idle, but the number of idle blocks is less than the number of kernels;
size_t outputBufferSize = dataBufferSize * kernelSize;
//double * h_dataArray = new double [inputDatasetSize * IMG_HEIGHT * IMG_WIDTH ]; // each pixel takes a double number's space
//double * h_outputArray = new double [outputDatasetSize* IMG_HEIGHT * IMG_WIDTH];
//double * h_kernels = new double[kernels.size() * KERNEL_HEIGHT * KERNEL_WIDTH];
// note BLKNUM is the total number of blocks available on the card , and it's defined in the .h file
double** h_dataBuffer = new double*[dataBufferSize]; // this array stores the address of the leading element for each matrix that we are going to the GPU in each kernel call
double** h_outputStorage = new double*[outputBufferSize]; // similar as above, storing the address of the leading entry of the output container
// GPU
// we apply similar memory management as what we did to the host
double ** d_kernels = new double*[kernelSize];
double ** d_dataBuffer = new double*[dataBufferSize];
double ** d_outputBuffer = new double*[outputBufferSize];
// allocating and preloading the kernels to the gpu memory
for ( size_t i = 0; i < kernelSize; ++i){
checkCudaErrors(cudaMalloc(&d_kernels[i], sizeof(double) * KERNEL_WIDTH * KERNEL_HEIGHT));
checkCudaErrors(cudaMemcpy(d_kernels[i], kernels[i].data(), sizeof(double) * KERNEL_WIDTH * KERNEL_HEIGHT, cudaMemcpyHostToDevice));
}
for (size_t i = 0 ; i < dataBufferSize; ++i){
checkCudaErrors(cudaMalloc(&d_dataBuffer[i], sizeof(double) * IMG_WIDTH * IMG_HEIGHT));
}
for ( size_t i = 0; i < outputBufferSize; ++i){
checkCudaErrors(cudaMalloc(&d_outputBuffer[i], sizeof(double) * IMG_HEIGHT * IMG_WIDTH));
}
/* STEP 2: Convolving */
size_t cursor = 0; // the index of the NEXT image to be sent to the GPU. Assuming that the inner vector sizes are all the same -- the number of channels for each data image are the same
size_t NumChannelsPerImg = dataset[0].size();
//size_t outervectorIdx = 0;
//size_t innervectorIdx = 0;
/* the transformation formulas are :
* cursor = outervectorIdx * NumChannelsPerImg + innervectorIdx
* outervectorIdx = cursor / NumChannelsPerImg
* innervectorIdx = cursor % NumChannelsPerImg */
size_t inputStartingIdx = 0;
size_t totalNumIteration = inputDatasetSize / dataBufferSize; // then in this case totalNumIteration * dataBufferSize should be smaller than the inputDatasetSize, we will deal with the left overs at the end
for (size_t i = 0; i < totalNumIteration; ++i){
// first copy the data to GPU
inputStartingIdx = cursor; // the index of the leading input image for this loop
// then the index of the last image is inputStartingIdx + dataBufferSize - 1
for (size_t j = 0; j < dataBufferSize; ++j){
checkCudaErrors(cudaMemcpy(d_dataBuffer[j], dataset[cursor/NumChannelsPerImg][cursor % NumChannelsPerImg].data(), sizeof(double) * IMG_HEIGHT * IMG_WIDTH, cudaMemcpyHostToDevice));
++cursor;
}
// then launching kernel to do the computation
convolution<<<BLKNUM, 1024>>>(d_dataBuffer, d_outputBuffer, d_kernels, dataBufferSize, dataBufferSize * kernelSize, kernelSize);
// finally copy the result out
// in the gpu, we store the convolved picture derived from the same image close to each other. eg, if the input image sequence is ABC and there are three kernel in total, then the output sequence is A1A2A3B1B2B3C1C2C3, and we need to convert the order to A1B1C1A2B2C2....
for ( size_t j = 0; j < outputDatasetSize; ++j){
// need to check : the function call to write into eigen
// the second class image index is j/kernelSize + inputStartingIdx and the kernel index is j % kernelSize
// then the outervectorIdx =( j/kernelSize + inputStartingIdx) /NumChannelsPerImg
// and the innervectorIdx = j % kernelSize * NumChannelsPerImg + ( j / kernelSize + inputStartingIdx) % NumChannelsPerImg
checkCudaErrors(cudaMemcpy( output[ (j/kernelSize + inputStartingIdx) /NumChannelsPerImg][j % kernelSize * NumChannelsPerImg + ( j / kernelSize + inputStartingIdx) % NumChannelsPerImg].data(), d_outputBuffer[j], sizeof(double) * IMG_HEIGHT * IMG_WIDTH, cudaMemcpyDeviceToHost)) ;
}
}
// deal with the leftovers
// the number of leftovers is less than kernelSize
// copy in
size_t lastInputSize = inputDatasetSize - cursor;
for (size_t i = 0; i < lastInputSize; ++i){
checkCudaErrors(cudaMemcpy(d_dataBuffer[i], dataset[(cursor + i)/NumChannelsPerImg][(cursor + i ) % NumChannelsPerImg].data(), sizeof(double) * IMG_HEIGHT * IMG_WIDTH, cudaMemcpyHostToDevice));
}
// launch kernel
convolution<<<lastInputSize, 1024>>>(d_dataBuffer, d_outputBuffer, d_kernels, lastInputSize, lastInputSize * kernelSize, kernelSize);
// copy out
for (size_t i = 0; i < lastInputSize * kernelSize; ++i){
checkCudaErrors(cudaMemcpy( output[ (i/kernelSize + cursor) /NumChannelsPerImg][i % kernelSize * NumChannelsPerImg + ( i / kernelSize + cursor) % NumChannelsPerImg].data(), d_outputBuffer[i], sizeof(double) * IMG_HEIGHT * IMG_WIDTH, cudaMemcpyDeviceToHost)) ;
}
return;
}
|
547487282d60d65b6755dea4762ad17d48e5cabb.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <thrust/device_ptr.h>
#include <thrust/sort.h>
#include <thrust/remove.h>
#include <kernel_segsort.hxx>
#include "utils.hpp"
#include "update.hpp"
using namespace mgpu;
__global__ void calcEdgelistLengths(BatchUpdateData *bud, length_t* const __restrict__ ell){
int tid = blockIdx.x * blockDim.x + threadIdx.x;
length_t batchSize = *(bud->getBatchSize());
if (tid < batchSize) {
vertexId_t src = bud->getSrc()[tid];
atomicAdd(ell+src, 1);
}
}
__global__ void copyIndices(BatchUpdateData *bud, vertexId_t* const __restrict__ ind,
vertexId_t* const __restrict__ seg, length_t* const __restrict__ off,
length_t* const __restrict__ ell){
int tid = blockIdx.x * blockDim.x + threadIdx.x;
length_t batchSize = *(bud->getBatchSize());
if (tid < batchSize)
{
vertexId_t src = bud->getSrc()[tid];
// Start filling up from the end of the edge list like so:
// ind = ...___|_,_,_,_,_,_,_,3,8,6|_,_,_,_...
// el_mark = ^
length_t el_mark = atomicSub(ell + src, 1) - 1;
ind[off[src]+el_mark] = bud->getDst()[tid];
seg[off[src]+el_mark] = src;
}
}
template <typename T>
__global__ void initDeviceArray(T* mem, int32_t size, T value)
{
int32_t idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < size)
{
mem[idx] = value;
}
}
__device__ void isort(vertexId_t* const __restrict__ u, length_t ell) {
vertexId_t *v;
vertexId_t w;
for (int i = 0; i < ell; ++i) {
v = u+i;
while (v != u && *v < *(v-1)) {
w = *v;
*v = *(v-1);
*(v-1) = w;
v--;
}
}
}
__global__ void iSortAll(vertexId_t* const __restrict__ ind,
length_t* const __restrict__ off, length_t nv) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < nv) {
isort( &ind[ off[tid] ], off[tid+1] - off[tid]);
}
}
void testSort(length_t nv, BatchUpdate& bu, const int blockdim){
hipEvent_t ce_start,ce_stop;
length_t batchsize = *(bu.getHostBUD()->getBatchSize());
dim3 numBlocks(1, 1);
// iSort approach =============================================
start_clock(ce_start, ce_stop);
vertexId_t* d_bind = (vertexId_t*) allocDeviceArray(batchsize, sizeof(vertexId_t));
vertexId_t* d_bseg = (vertexId_t*) allocDeviceArray(batchsize, sizeof(vertexId_t));
length_t* d_boff = (length_t*) allocDeviceArray(nv+1, sizeof(length_t));
length_t* d_ell = (length_t*) allocDeviceArray(nv+1, sizeof(length_t));
numBlocks.x = ceil((float)nv/(float)blockdim);
hipLaunchKernelGGL(( initDeviceArray), dim3(numBlocks),dim3(blockdim), 0, 0, d_ell, nv, 0);
numBlocks.x = ceil((float)batchsize/(float)blockdim);
hipLaunchKernelGGL(( calcEdgelistLengths), dim3(numBlocks),dim3(blockdim), 0, 0, bu.getDeviceBUD()->devicePtr(), d_ell);
thrust::device_ptr<vertexId_t> dp_ell(d_ell);
thrust::device_ptr<vertexId_t> dp_boff(d_boff);
thrust::exclusive_scan(dp_ell, dp_ell+nv+1, dp_boff);
hipLaunchKernelGGL(( copyIndices), dim3(numBlocks),dim3(blockdim), 0, 0, bu.getDeviceBUD()->devicePtr(), d_bind, d_bseg, d_boff, d_ell);
numBlocks.x = ceil((float)nv/(float)blockdim);
hipLaunchKernelGGL(( iSortAll), dim3(numBlocks),dim3(blockdim), 0, 0, d_bind, d_boff, nv);
printf("\n%s <%d> %f\n", __FUNCTION__, __LINE__, end_clock(ce_start, ce_stop));
// MGPU segsort approach ========================================
start_clock(ce_start, ce_stop);
// mgpu::segmented_sort(d_bind, batchsize, d_boff+1, nv-2, mgpu::less_t<int>(), context);
printf("\n%s <%d> %f\n", __FUNCTION__, __LINE__, end_clock(ce_start, ce_stop));
// Thrust approach =============================================
start_clock(ce_start, ce_stop);
thrust::device_ptr<vertexId_t> dp_bind(bu.getDeviceBUD()->getDst());
thrust::device_ptr<vertexId_t> dp_bseg(bu.getDeviceBUD()->getSrc());
thrust::stable_sort_by_key(dp_bind, dp_bind + batchsize, dp_bseg);
thrust::stable_sort_by_key(dp_bseg, dp_bseg + batchsize, dp_bind);
length_t* d_tell = (length_t*) allocDeviceArray(nv+1, sizeof(length_t));
length_t* d_tboff = (length_t*) allocDeviceArray(nv+1, sizeof(length_t));
numBlocks.x = ceil((float)nv/(float)blockdim);
hipLaunchKernelGGL(( initDeviceArray), dim3(numBlocks),dim3(blockdim), 0, 0, d_tell, nv, 0);
numBlocks.x = ceil((float)batchsize/(float)blockdim);
hipLaunchKernelGGL(( calcEdgelistLengths), dim3(numBlocks),dim3(blockdim), 0, 0, bu.getDeviceBUD()->devicePtr(), d_tell);
thrust::device_ptr<vertexId_t> dp_tell(d_tell);
thrust::device_ptr<vertexId_t> dp_tboff(d_tboff);
thrust::exclusive_scan(dp_tell, dp_tell+nv+1, dp_tboff);
printf("\n%s <%d> %f\n", __FUNCTION__, __LINE__, end_clock(ce_start, ce_stop));
// Correctness ==============================================
// From iSort
vertexId_t* h_bind = (vertexId_t*) allocHostArray(batchsize, sizeof(vertexId_t));
vertexId_t* h_bseg = (vertexId_t*) allocHostArray(batchsize, sizeof(vertexId_t));
length_t* h_boff = (length_t*) allocHostArray(nv+1, sizeof(length_t));
copyArrayDeviceToHost(d_bind, h_bind, batchsize, sizeof(vertexId_t));
copyArrayDeviceToHost(d_bseg, h_bseg, batchsize, sizeof(vertexId_t));
copyArrayDeviceToHost(d_boff, h_boff, nv, sizeof(length_t));
// From Thrust
vertexId_t* h_tbind = (vertexId_t*) allocHostArray(batchsize, sizeof(vertexId_t));
vertexId_t* h_tbseg = (vertexId_t*) allocHostArray(batchsize, sizeof(vertexId_t));
length_t* h_tboff = (length_t*) allocHostArray(nv+1, sizeof(length_t));
copyArrayDeviceToHost(bu.getDeviceBUD()->getDst(), h_tbind, batchsize, sizeof(vertexId_t));
copyArrayDeviceToHost(bu.getDeviceBUD()->getSrc(), h_tbseg, batchsize, sizeof(vertexId_t));
copyArrayDeviceToHost(d_tboff, h_tboff, nv, sizeof(length_t));
// Compare
for (int i = 0; i < nv; ++i)
{
if (h_tboff[i] != h_boff[i])
{
printf("h_tboff = %d\t h_boff = %d\n", h_tboff[i], h_boff[i]);
}
}
for (int i = 0; i < batchsize; ++i)
{
if (h_tbseg[i] != h_bseg[i])
{
printf("h_tbseg = %d\t h_bseg = %d\n", h_tbseg[i], h_bseg[i]);
}
if (h_tbind[i] != h_bind[i])
{
printf("h_tbind = %d\t h_bind = %d\n", h_tbind[i], h_bind[i]);
}
}
}
void testmgpusort(){
mgpu::standard_context_t context;
int count = 1000;
int num_segments = div_up(count, 100);
mem_t<int> segs = fill_random(0, count - 1, num_segments, true, context);
std::vector<int> segs_host = from_mem(segs);
mem_t<int> data = fill_random(0, 100000, count, false, context);
mem_t<int> values(count, context);
std::vector<int> host_data = from_mem(data);
segmented_sort(data.data(), count, segs.data(), num_segments,
less_t<int>(), context);
}
// TODO: change this into a CUDA mem copy operation.
__global__ void copyCSRToBUD(BatchUpdateData *bud, vertexId_t* const __restrict__ ind,
vertexId_t* const __restrict__ seg)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
length_t batchSize = *(bud->getBatchSize());
if (tid < batchSize)
{
bud->getSrc()[tid] = seg[tid];
bud->getDst()[tid] = ind[tid];
}
}
__global__ void copyOffCSRToBUD(BatchUpdateData *bud, length_t* const __restrict__ off)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
length_t nv = *(bud->getNumVertices());
if (tid < nv+1)
{
bud->getOffsets()[tid] = off[tid];
}
}
void BatchUpdate::sortDeviceBUD(const int blockdim)
{
length_t batchsize = *(getHostBUD()->getBatchSize());
length_t nv = *(getHostBUD()->getNumVertices());
printf("batchsize %d\n", batchsize);
dim3 numBlocks(1, 1);
vertexId_t* d_bind = (vertexId_t*) allocDeviceArray(batchsize, sizeof(vertexId_t));
length_t* d_boff = (length_t*) allocDeviceArray(nv+1, sizeof(length_t));
vertexId_t* d_bseg = (vertexId_t*) allocDeviceArray(batchsize, sizeof(vertexId_t));
length_t* d_ell = (length_t*) allocDeviceArray(nv+1, sizeof(length_t));
numBlocks.x = ceil((float)nv/(float)blockdim);
// TODO: use memset instead of this hack
hipLaunchKernelGGL(( initDeviceArray), dim3(numBlocks),dim3(blockdim), 0, 0, d_ell, nv, 0);
hipLaunchKernelGGL(( initDeviceArray), dim3(numBlocks),dim3(blockdim), 0, 0, d_boff, nv, 0);
// TODO: find a home for this poor statement
hipLaunchKernelGGL(( initDeviceArray), dim3(numBlocks),dim3(blockdim), 0, 0, getDeviceBUD()->getvNumDuplicates(), nv, 0);
numBlocks.x = ceil((float)batchsize/(float)blockdim);
hipLaunchKernelGGL(( calcEdgelistLengths), dim3(numBlocks),dim3(blockdim), 0, 0, getDeviceBUD()->devicePtr(), d_ell);
thrust::device_ptr<vertexId_t> dp_ell(d_ell);
thrust::device_ptr<vertexId_t> dp_boff(d_boff);
thrust::exclusive_scan(dp_ell, dp_ell+nv+1, dp_boff);
hipLaunchKernelGGL(( copyIndices), dim3(numBlocks),dim3(blockdim), 0, 0, getDeviceBUD()->devicePtr(), d_bind, d_bseg, d_boff, d_ell);
numBlocks.x = ceil((float)nv/(float)blockdim);
hipLaunchKernelGGL(( iSortAll), dim3(numBlocks),dim3(blockdim), 0, 0, d_bind, d_boff, nv);
// Put the sorted csr back into bud
numBlocks.x = ceil((float)batchsize/(float)blockdim);
hipLaunchKernelGGL(( copyCSRToBUD), dim3(numBlocks),dim3(blockdim), 0, 0, getDeviceBUD()->devicePtr(), d_bind, d_bseg);
numBlocks.x = ceil((float)(nv+1)/(float)blockdim);
hipLaunchKernelGGL(( copyOffCSRToBUD), dim3(numBlocks),dim3(blockdim), 0, 0, getDeviceBUD()->devicePtr(), d_boff);
freeDeviceArray(d_bind);
freeDeviceArray(d_boff);
freeDeviceArray(d_bseg);
freeDeviceArray(d_ell);
}
| 547487282d60d65b6755dea4762ad17d48e5cabb.cu | #include <thrust/device_ptr.h>
#include <thrust/sort.h>
#include <thrust/remove.h>
#include <kernel_segsort.hxx>
#include "utils.hpp"
#include "update.hpp"
using namespace mgpu;
__global__ void calcEdgelistLengths(BatchUpdateData *bud, length_t* const __restrict__ ell){
int tid = blockIdx.x * blockDim.x + threadIdx.x;
length_t batchSize = *(bud->getBatchSize());
if (tid < batchSize) {
vertexId_t src = bud->getSrc()[tid];
atomicAdd(ell+src, 1);
}
}
__global__ void copyIndices(BatchUpdateData *bud, vertexId_t* const __restrict__ ind,
vertexId_t* const __restrict__ seg, length_t* const __restrict__ off,
length_t* const __restrict__ ell){
int tid = blockIdx.x * blockDim.x + threadIdx.x;
length_t batchSize = *(bud->getBatchSize());
if (tid < batchSize)
{
vertexId_t src = bud->getSrc()[tid];
// Start filling up from the end of the edge list like so:
// ind = ...___|_,_,_,_,_,_,_,3,8,6|_,_,_,_...
// el_mark = ^
length_t el_mark = atomicSub(ell + src, 1) - 1;
ind[off[src]+el_mark] = bud->getDst()[tid];
seg[off[src]+el_mark] = src;
}
}
template <typename T>
__global__ void initDeviceArray(T* mem, int32_t size, T value)
{
int32_t idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < size)
{
mem[idx] = value;
}
}
__device__ void isort(vertexId_t* const __restrict__ u, length_t ell) {
vertexId_t *v;
vertexId_t w;
for (int i = 0; i < ell; ++i) {
v = u+i;
while (v != u && *v < *(v-1)) {
w = *v;
*v = *(v-1);
*(v-1) = w;
v--;
}
}
}
__global__ void iSortAll(vertexId_t* const __restrict__ ind,
length_t* const __restrict__ off, length_t nv) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < nv) {
isort( &ind[ off[tid] ], off[tid+1] - off[tid]);
}
}
void testSort(length_t nv, BatchUpdate& bu, const int blockdim){
cudaEvent_t ce_start,ce_stop;
length_t batchsize = *(bu.getHostBUD()->getBatchSize());
dim3 numBlocks(1, 1);
// iSort approach =============================================
start_clock(ce_start, ce_stop);
vertexId_t* d_bind = (vertexId_t*) allocDeviceArray(batchsize, sizeof(vertexId_t));
vertexId_t* d_bseg = (vertexId_t*) allocDeviceArray(batchsize, sizeof(vertexId_t));
length_t* d_boff = (length_t*) allocDeviceArray(nv+1, sizeof(length_t));
length_t* d_ell = (length_t*) allocDeviceArray(nv+1, sizeof(length_t));
numBlocks.x = ceil((float)nv/(float)blockdim);
initDeviceArray<<<numBlocks,blockdim>>>(d_ell, nv, 0);
numBlocks.x = ceil((float)batchsize/(float)blockdim);
calcEdgelistLengths<<<numBlocks,blockdim>>>(bu.getDeviceBUD()->devicePtr(), d_ell);
thrust::device_ptr<vertexId_t> dp_ell(d_ell);
thrust::device_ptr<vertexId_t> dp_boff(d_boff);
thrust::exclusive_scan(dp_ell, dp_ell+nv+1, dp_boff);
copyIndices<<<numBlocks,blockdim>>>(bu.getDeviceBUD()->devicePtr(), d_bind, d_bseg, d_boff, d_ell);
numBlocks.x = ceil((float)nv/(float)blockdim);
iSortAll<<<numBlocks,blockdim>>>(d_bind, d_boff, nv);
printf("\n%s <%d> %f\n", __FUNCTION__, __LINE__, end_clock(ce_start, ce_stop));
// MGPU segsort approach ========================================
start_clock(ce_start, ce_stop);
// mgpu::segmented_sort(d_bind, batchsize, d_boff+1, nv-2, mgpu::less_t<int>(), context);
printf("\n%s <%d> %f\n", __FUNCTION__, __LINE__, end_clock(ce_start, ce_stop));
// Thrust approach =============================================
start_clock(ce_start, ce_stop);
thrust::device_ptr<vertexId_t> dp_bind(bu.getDeviceBUD()->getDst());
thrust::device_ptr<vertexId_t> dp_bseg(bu.getDeviceBUD()->getSrc());
thrust::stable_sort_by_key(dp_bind, dp_bind + batchsize, dp_bseg);
thrust::stable_sort_by_key(dp_bseg, dp_bseg + batchsize, dp_bind);
length_t* d_tell = (length_t*) allocDeviceArray(nv+1, sizeof(length_t));
length_t* d_tboff = (length_t*) allocDeviceArray(nv+1, sizeof(length_t));
numBlocks.x = ceil((float)nv/(float)blockdim);
initDeviceArray<<<numBlocks,blockdim>>>(d_tell, nv, 0);
numBlocks.x = ceil((float)batchsize/(float)blockdim);
calcEdgelistLengths<<<numBlocks,blockdim>>>(bu.getDeviceBUD()->devicePtr(), d_tell);
thrust::device_ptr<vertexId_t> dp_tell(d_tell);
thrust::device_ptr<vertexId_t> dp_tboff(d_tboff);
thrust::exclusive_scan(dp_tell, dp_tell+nv+1, dp_tboff);
printf("\n%s <%d> %f\n", __FUNCTION__, __LINE__, end_clock(ce_start, ce_stop));
// Correctness ==============================================
// From iSort
vertexId_t* h_bind = (vertexId_t*) allocHostArray(batchsize, sizeof(vertexId_t));
vertexId_t* h_bseg = (vertexId_t*) allocHostArray(batchsize, sizeof(vertexId_t));
length_t* h_boff = (length_t*) allocHostArray(nv+1, sizeof(length_t));
copyArrayDeviceToHost(d_bind, h_bind, batchsize, sizeof(vertexId_t));
copyArrayDeviceToHost(d_bseg, h_bseg, batchsize, sizeof(vertexId_t));
copyArrayDeviceToHost(d_boff, h_boff, nv, sizeof(length_t));
// From Thrust
vertexId_t* h_tbind = (vertexId_t*) allocHostArray(batchsize, sizeof(vertexId_t));
vertexId_t* h_tbseg = (vertexId_t*) allocHostArray(batchsize, sizeof(vertexId_t));
length_t* h_tboff = (length_t*) allocHostArray(nv+1, sizeof(length_t));
copyArrayDeviceToHost(bu.getDeviceBUD()->getDst(), h_tbind, batchsize, sizeof(vertexId_t));
copyArrayDeviceToHost(bu.getDeviceBUD()->getSrc(), h_tbseg, batchsize, sizeof(vertexId_t));
copyArrayDeviceToHost(d_tboff, h_tboff, nv, sizeof(length_t));
// Compare
for (int i = 0; i < nv; ++i)
{
if (h_tboff[i] != h_boff[i])
{
printf("h_tboff = %d\t h_boff = %d\n", h_tboff[i], h_boff[i]);
}
}
for (int i = 0; i < batchsize; ++i)
{
if (h_tbseg[i] != h_bseg[i])
{
printf("h_tbseg = %d\t h_bseg = %d\n", h_tbseg[i], h_bseg[i]);
}
if (h_tbind[i] != h_bind[i])
{
printf("h_tbind = %d\t h_bind = %d\n", h_tbind[i], h_bind[i]);
}
}
}
void testmgpusort(){
mgpu::standard_context_t context;
int count = 1000;
int num_segments = div_up(count, 100);
mem_t<int> segs = fill_random(0, count - 1, num_segments, true, context);
std::vector<int> segs_host = from_mem(segs);
mem_t<int> data = fill_random(0, 100000, count, false, context);
mem_t<int> values(count, context);
std::vector<int> host_data = from_mem(data);
segmented_sort(data.data(), count, segs.data(), num_segments,
less_t<int>(), context);
}
// TODO: change this into a CUDA mem copy operation.
__global__ void copyCSRToBUD(BatchUpdateData *bud, vertexId_t* const __restrict__ ind,
vertexId_t* const __restrict__ seg)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
length_t batchSize = *(bud->getBatchSize());
if (tid < batchSize)
{
bud->getSrc()[tid] = seg[tid];
bud->getDst()[tid] = ind[tid];
}
}
__global__ void copyOffCSRToBUD(BatchUpdateData *bud, length_t* const __restrict__ off)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
length_t nv = *(bud->getNumVertices());
if (tid < nv+1)
{
bud->getOffsets()[tid] = off[tid];
}
}
void BatchUpdate::sortDeviceBUD(const int blockdim)
{
length_t batchsize = *(getHostBUD()->getBatchSize());
length_t nv = *(getHostBUD()->getNumVertices());
printf("batchsize %d\n", batchsize);
dim3 numBlocks(1, 1);
vertexId_t* d_bind = (vertexId_t*) allocDeviceArray(batchsize, sizeof(vertexId_t));
length_t* d_boff = (length_t*) allocDeviceArray(nv+1, sizeof(length_t));
vertexId_t* d_bseg = (vertexId_t*) allocDeviceArray(batchsize, sizeof(vertexId_t));
length_t* d_ell = (length_t*) allocDeviceArray(nv+1, sizeof(length_t));
numBlocks.x = ceil((float)nv/(float)blockdim);
// TODO: use memset instead of this hack
initDeviceArray<<<numBlocks,blockdim>>>(d_ell, nv, 0);
initDeviceArray<<<numBlocks,blockdim>>>(d_boff, nv, 0);
// TODO: find a home for this poor statement
initDeviceArray<<<numBlocks,blockdim>>>(getDeviceBUD()->getvNumDuplicates(), nv, 0);
numBlocks.x = ceil((float)batchsize/(float)blockdim);
calcEdgelistLengths<<<numBlocks,blockdim>>>(getDeviceBUD()->devicePtr(), d_ell);
thrust::device_ptr<vertexId_t> dp_ell(d_ell);
thrust::device_ptr<vertexId_t> dp_boff(d_boff);
thrust::exclusive_scan(dp_ell, dp_ell+nv+1, dp_boff);
copyIndices<<<numBlocks,blockdim>>>(getDeviceBUD()->devicePtr(), d_bind, d_bseg, d_boff, d_ell);
numBlocks.x = ceil((float)nv/(float)blockdim);
iSortAll<<<numBlocks,blockdim>>>(d_bind, d_boff, nv);
// Put the sorted csr back into bud
numBlocks.x = ceil((float)batchsize/(float)blockdim);
copyCSRToBUD<<<numBlocks,blockdim>>>(getDeviceBUD()->devicePtr(), d_bind, d_bseg);
numBlocks.x = ceil((float)(nv+1)/(float)blockdim);
copyOffCSRToBUD<<<numBlocks,blockdim>>>(getDeviceBUD()->devicePtr(), d_boff);
freeDeviceArray(d_bind);
freeDeviceArray(d_boff);
freeDeviceArray(d_bseg);
freeDeviceArray(d_ell);
}
|
e6ad58925eedad7fd3dfd2703c77dc78bb318698.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "reference_calc.cpp"
#include "utils.h"
#include <stdio.h>
__global__
void rgba_to_greyscale(const uchar4* const rgbaImage,
unsigned char* const greyImage,
int numRows, int numCols)
{
for(int r = 0; r < numRows; ++r){
for(int c = 0; c < numCols; ++c){
uchar4 rgba = rgbaImage[r * numCols + c];
float channelSum = .299f * rgba.x + .587f * rgba.y + .114f * rgba.z;
greyImage[r * numCols + c] = channelSum;
}
}
}
void your_rgba_to_greyscale(const uchar4 * const h_rgbaImage, uchar4 * const d_rgbaImage,
unsigned char* const d_greyImage, size_t numRows, size_t numCols)
{
int numDevices, ThreadsPerBlock;
//Stored the number of devices on GPU side
hipDeviceProp_t prop;
hipGetDeviceCount(&numDevices);
/*Looped through each device and determined the minmum number of threads per block
assuming both devices would be used. */
for(int i = 0; i < numDevices; i++){
hipGetDeviceProperties(&prop, i);
if (prop.maxThreadsPerBlock < ThreadsPerBlock){
ThreadsPerBlock = prop.maxThreadsPerBlock;
}
}
printf("Num devices: %d\n", numDevices);
printf("Min Threads per block: %d\n", ThreadsPerBlock);
//Called the kernel function
const dim3 blockSize(ThreadsPerBlock);
const dim3 gridSize(numRows * numCols / ThreadsPerBlock);
hipLaunchKernelGGL(( rgba_to_greyscale), dim3(gridSize), dim3(blockSize), 0, 0, d_rgbaImage, d_greyImage, numRows, numCols);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
}
| e6ad58925eedad7fd3dfd2703c77dc78bb318698.cu | #include "reference_calc.cpp"
#include "utils.h"
#include <stdio.h>
__global__
void rgba_to_greyscale(const uchar4* const rgbaImage,
unsigned char* const greyImage,
int numRows, int numCols)
{
for(int r = 0; r < numRows; ++r){
for(int c = 0; c < numCols; ++c){
uchar4 rgba = rgbaImage[r * numCols + c];
float channelSum = .299f * rgba.x + .587f * rgba.y + .114f * rgba.z;
greyImage[r * numCols + c] = channelSum;
}
}
}
void your_rgba_to_greyscale(const uchar4 * const h_rgbaImage, uchar4 * const d_rgbaImage,
unsigned char* const d_greyImage, size_t numRows, size_t numCols)
{
int numDevices, ThreadsPerBlock;
//Stored the number of devices on GPU side
cudaDeviceProp prop;
cudaGetDeviceCount(&numDevices);
/*Looped through each device and determined the minmum number of threads per block
assuming both devices would be used. */
for(int i = 0; i < numDevices; i++){
cudaGetDeviceProperties(&prop, i);
if (prop.maxThreadsPerBlock < ThreadsPerBlock){
ThreadsPerBlock = prop.maxThreadsPerBlock;
}
}
printf("Num devices: %d\n", numDevices);
printf("Min Threads per block: %d\n", ThreadsPerBlock);
//Called the kernel function
const dim3 blockSize(ThreadsPerBlock);
const dim3 gridSize(numRows * numCols / ThreadsPerBlock);
rgba_to_greyscale<<<gridSize, blockSize>>>(d_rgbaImage, d_greyImage, numRows, numCols);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
}
|
47118ebae4a24beb9452e54b5dc975b6a41f4ac5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*******************************<GINKGO LICENSE>******************************
Copyright (c) 2017-2023, the Ginkgo authors
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
******************************<GINKGO LICENSE>*******************************/
#include "core/factorization/par_ict_kernels.hpp"
#include <ginkgo/core/base/array.hpp>
#include <ginkgo/core/base/math.hpp>
#include <ginkgo/core/matrix/coo.hpp>
#include <ginkgo/core/matrix/csr.hpp>
#include <ginkgo/core/matrix/dense.hpp>
#include "core/components/prefix_sum_kernels.hpp"
#include "core/matrix/coo_builder.hpp"
#include "core/matrix/csr_builder.hpp"
#include "core/matrix/csr_kernels.hpp"
#include "core/synthesizer/implementation_selection.hpp"
#include "cuda/base/math.hpp"
#include "cuda/components/intrinsics.cuh"
#include "cuda/components/merging.cuh"
#include "cuda/components/prefix_sum.cuh"
#include "cuda/components/reduction.cuh"
#include "cuda/components/searching.cuh"
#include "cuda/components/thread_ids.cuh"
namespace gko {
namespace kernels {
namespace cuda {
/**
* @brief The parallel ICT factorization namespace.
*
* @ingroup factor
*/
namespace par_ict_factorization {
constexpr int default_block_size = 512;
// subwarp sizes for all warp-parallel kernels (filter, add_candidates)
using compiled_kernels =
syn::value_list<int, 1, 2, 4, 8, 16, 32, config::warp_size>;
#include "common/cuda_hip/factorization/par_ict_spgeam_kernels.hpp.inc"
#include "common/cuda_hip/factorization/par_ict_sweep_kernels.hpp.inc"
namespace {
template <int subwarp_size, typename ValueType, typename IndexType>
void add_candidates(syn::value_list<int, subwarp_size>,
std::shared_ptr<const DefaultExecutor> exec,
const matrix::Csr<ValueType, IndexType>* llh,
const matrix::Csr<ValueType, IndexType>* a,
const matrix::Csr<ValueType, IndexType>* l,
matrix::Csr<ValueType, IndexType>* l_new)
{
auto num_rows = static_cast<IndexType>(llh->get_size()[0]);
auto subwarps_per_block = default_block_size / subwarp_size;
auto num_blocks = ceildiv(num_rows, subwarps_per_block);
matrix::CsrBuilder<ValueType, IndexType> l_new_builder(l_new);
auto llh_row_ptrs = llh->get_const_row_ptrs();
auto llh_col_idxs = llh->get_const_col_idxs();
auto llh_vals = llh->get_const_values();
auto a_row_ptrs = a->get_const_row_ptrs();
auto a_col_idxs = a->get_const_col_idxs();
auto a_vals = a->get_const_values();
auto l_row_ptrs = l->get_const_row_ptrs();
auto l_col_idxs = l->get_const_col_idxs();
auto l_vals = l->get_const_values();
auto l_new_row_ptrs = l_new->get_row_ptrs();
// count non-zeros per row
if (num_blocks > 0) {
hipLaunchKernelGGL(( kernel::ict_tri_spgeam_nnz<subwarp_size>)
, dim3(num_blocks), dim3(default_block_size), 0, exec->get_stream(),
llh_row_ptrs, llh_col_idxs, a_row_ptrs, a_col_idxs,
l_new_row_ptrs, num_rows);
}
// build row ptrs
components::prefix_sum_nonnegative(exec, l_new_row_ptrs, num_rows + 1);
// resize output arrays
auto l_new_nnz = exec->copy_val_to_host(l_new_row_ptrs + num_rows);
l_new_builder.get_col_idx_array().resize_and_reset(l_new_nnz);
l_new_builder.get_value_array().resize_and_reset(l_new_nnz);
auto l_new_col_idxs = l_new->get_col_idxs();
auto l_new_vals = l_new->get_values();
// fill columns and values
if (num_blocks > 0) {
hipLaunchKernelGGL(( kernel::ict_tri_spgeam_init<subwarp_size>)
, dim3(num_blocks), dim3(default_block_size), 0, exec->get_stream(),
llh_row_ptrs, llh_col_idxs, as_device_type(llh_vals),
a_row_ptrs, a_col_idxs, as_device_type(a_vals), l_row_ptrs,
l_col_idxs, as_device_type(l_vals), l_new_row_ptrs,
l_new_col_idxs, as_device_type(l_new_vals), num_rows);
}
}
GKO_ENABLE_IMPLEMENTATION_SELECTION(select_add_candidates, add_candidates);
template <int subwarp_size, typename ValueType, typename IndexType>
void compute_factor(syn::value_list<int, subwarp_size>,
std::shared_ptr<const DefaultExecutor> exec,
const matrix::Csr<ValueType, IndexType>* a,
matrix::Csr<ValueType, IndexType>* l,
const matrix::Coo<ValueType, IndexType>* l_coo)
{
auto total_nnz = static_cast<IndexType>(l->get_num_stored_elements());
auto block_size = default_block_size / subwarp_size;
auto num_blocks = ceildiv(total_nnz, block_size);
if (num_blocks > 0) {
hipLaunchKernelGGL(( kernel::ict_sweep<subwarp_size>)
, dim3(num_blocks), dim3(default_block_size), 0, exec->get_stream(),
a->get_const_row_ptrs(), a->get_const_col_idxs(),
as_device_type(a->get_const_values()), l->get_const_row_ptrs(),
l_coo->get_const_row_idxs(), l->get_const_col_idxs(),
as_device_type(l->get_values()),
static_cast<IndexType>(l->get_num_stored_elements()));
}
}
GKO_ENABLE_IMPLEMENTATION_SELECTION(select_compute_factor, compute_factor);
} // namespace
template <typename ValueType, typename IndexType>
void add_candidates(std::shared_ptr<const DefaultExecutor> exec,
const matrix::Csr<ValueType, IndexType>* llh,
const matrix::Csr<ValueType, IndexType>* a,
const matrix::Csr<ValueType, IndexType>* l,
matrix::Csr<ValueType, IndexType>* l_new)
{
auto num_rows = a->get_size()[0];
auto total_nnz =
llh->get_num_stored_elements() + a->get_num_stored_elements();
auto total_nnz_per_row = total_nnz / num_rows;
select_add_candidates(
compiled_kernels(),
[&](int compiled_subwarp_size) {
return total_nnz_per_row <= compiled_subwarp_size ||
compiled_subwarp_size == config::warp_size;
},
syn::value_list<int>(), syn::type_list<>(), exec, llh, a, l, l_new);
}
GKO_INSTANTIATE_FOR_EACH_VALUE_AND_INDEX_TYPE(
GKO_DECLARE_PAR_ICT_ADD_CANDIDATES_KERNEL);
template <typename ValueType, typename IndexType>
void compute_factor(std::shared_ptr<const DefaultExecutor> exec,
const matrix::Csr<ValueType, IndexType>* a,
matrix::Csr<ValueType, IndexType>* l,
const matrix::Coo<ValueType, IndexType>* l_coo)
{
auto num_rows = a->get_size()[0];
auto total_nnz = 2 * l->get_num_stored_elements();
auto total_nnz_per_row = total_nnz / num_rows;
select_compute_factor(
compiled_kernels(),
[&](int compiled_subwarp_size) {
return total_nnz_per_row <= compiled_subwarp_size ||
compiled_subwarp_size == config::warp_size;
},
syn::value_list<int>(), syn::type_list<>(), exec, a, l, l_coo);
}
GKO_INSTANTIATE_FOR_EACH_VALUE_AND_INDEX_TYPE(
GKO_DECLARE_PAR_ICT_COMPUTE_FACTOR_KERNEL);
} // namespace par_ict_factorization
} // namespace cuda
} // namespace kernels
} // namespace gko
| 47118ebae4a24beb9452e54b5dc975b6a41f4ac5.cu | /*******************************<GINKGO LICENSE>******************************
Copyright (c) 2017-2023, the Ginkgo authors
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
******************************<GINKGO LICENSE>*******************************/
#include "core/factorization/par_ict_kernels.hpp"
#include <ginkgo/core/base/array.hpp>
#include <ginkgo/core/base/math.hpp>
#include <ginkgo/core/matrix/coo.hpp>
#include <ginkgo/core/matrix/csr.hpp>
#include <ginkgo/core/matrix/dense.hpp>
#include "core/components/prefix_sum_kernels.hpp"
#include "core/matrix/coo_builder.hpp"
#include "core/matrix/csr_builder.hpp"
#include "core/matrix/csr_kernels.hpp"
#include "core/synthesizer/implementation_selection.hpp"
#include "cuda/base/math.hpp"
#include "cuda/components/intrinsics.cuh"
#include "cuda/components/merging.cuh"
#include "cuda/components/prefix_sum.cuh"
#include "cuda/components/reduction.cuh"
#include "cuda/components/searching.cuh"
#include "cuda/components/thread_ids.cuh"
namespace gko {
namespace kernels {
namespace cuda {
/**
* @brief The parallel ICT factorization namespace.
*
* @ingroup factor
*/
namespace par_ict_factorization {
constexpr int default_block_size = 512;
// subwarp sizes for all warp-parallel kernels (filter, add_candidates)
using compiled_kernels =
syn::value_list<int, 1, 2, 4, 8, 16, 32, config::warp_size>;
#include "common/cuda_hip/factorization/par_ict_spgeam_kernels.hpp.inc"
#include "common/cuda_hip/factorization/par_ict_sweep_kernels.hpp.inc"
namespace {
template <int subwarp_size, typename ValueType, typename IndexType>
void add_candidates(syn::value_list<int, subwarp_size>,
std::shared_ptr<const DefaultExecutor> exec,
const matrix::Csr<ValueType, IndexType>* llh,
const matrix::Csr<ValueType, IndexType>* a,
const matrix::Csr<ValueType, IndexType>* l,
matrix::Csr<ValueType, IndexType>* l_new)
{
auto num_rows = static_cast<IndexType>(llh->get_size()[0]);
auto subwarps_per_block = default_block_size / subwarp_size;
auto num_blocks = ceildiv(num_rows, subwarps_per_block);
matrix::CsrBuilder<ValueType, IndexType> l_new_builder(l_new);
auto llh_row_ptrs = llh->get_const_row_ptrs();
auto llh_col_idxs = llh->get_const_col_idxs();
auto llh_vals = llh->get_const_values();
auto a_row_ptrs = a->get_const_row_ptrs();
auto a_col_idxs = a->get_const_col_idxs();
auto a_vals = a->get_const_values();
auto l_row_ptrs = l->get_const_row_ptrs();
auto l_col_idxs = l->get_const_col_idxs();
auto l_vals = l->get_const_values();
auto l_new_row_ptrs = l_new->get_row_ptrs();
// count non-zeros per row
if (num_blocks > 0) {
kernel::ict_tri_spgeam_nnz<subwarp_size>
<<<num_blocks, default_block_size, 0, exec->get_stream()>>>(
llh_row_ptrs, llh_col_idxs, a_row_ptrs, a_col_idxs,
l_new_row_ptrs, num_rows);
}
// build row ptrs
components::prefix_sum_nonnegative(exec, l_new_row_ptrs, num_rows + 1);
// resize output arrays
auto l_new_nnz = exec->copy_val_to_host(l_new_row_ptrs + num_rows);
l_new_builder.get_col_idx_array().resize_and_reset(l_new_nnz);
l_new_builder.get_value_array().resize_and_reset(l_new_nnz);
auto l_new_col_idxs = l_new->get_col_idxs();
auto l_new_vals = l_new->get_values();
// fill columns and values
if (num_blocks > 0) {
kernel::ict_tri_spgeam_init<subwarp_size>
<<<num_blocks, default_block_size, 0, exec->get_stream()>>>(
llh_row_ptrs, llh_col_idxs, as_device_type(llh_vals),
a_row_ptrs, a_col_idxs, as_device_type(a_vals), l_row_ptrs,
l_col_idxs, as_device_type(l_vals), l_new_row_ptrs,
l_new_col_idxs, as_device_type(l_new_vals), num_rows);
}
}
GKO_ENABLE_IMPLEMENTATION_SELECTION(select_add_candidates, add_candidates);
template <int subwarp_size, typename ValueType, typename IndexType>
void compute_factor(syn::value_list<int, subwarp_size>,
std::shared_ptr<const DefaultExecutor> exec,
const matrix::Csr<ValueType, IndexType>* a,
matrix::Csr<ValueType, IndexType>* l,
const matrix::Coo<ValueType, IndexType>* l_coo)
{
auto total_nnz = static_cast<IndexType>(l->get_num_stored_elements());
auto block_size = default_block_size / subwarp_size;
auto num_blocks = ceildiv(total_nnz, block_size);
if (num_blocks > 0) {
kernel::ict_sweep<subwarp_size>
<<<num_blocks, default_block_size, 0, exec->get_stream()>>>(
a->get_const_row_ptrs(), a->get_const_col_idxs(),
as_device_type(a->get_const_values()), l->get_const_row_ptrs(),
l_coo->get_const_row_idxs(), l->get_const_col_idxs(),
as_device_type(l->get_values()),
static_cast<IndexType>(l->get_num_stored_elements()));
}
}
GKO_ENABLE_IMPLEMENTATION_SELECTION(select_compute_factor, compute_factor);
} // namespace
template <typename ValueType, typename IndexType>
void add_candidates(std::shared_ptr<const DefaultExecutor> exec,
const matrix::Csr<ValueType, IndexType>* llh,
const matrix::Csr<ValueType, IndexType>* a,
const matrix::Csr<ValueType, IndexType>* l,
matrix::Csr<ValueType, IndexType>* l_new)
{
auto num_rows = a->get_size()[0];
auto total_nnz =
llh->get_num_stored_elements() + a->get_num_stored_elements();
auto total_nnz_per_row = total_nnz / num_rows;
select_add_candidates(
compiled_kernels(),
[&](int compiled_subwarp_size) {
return total_nnz_per_row <= compiled_subwarp_size ||
compiled_subwarp_size == config::warp_size;
},
syn::value_list<int>(), syn::type_list<>(), exec, llh, a, l, l_new);
}
GKO_INSTANTIATE_FOR_EACH_VALUE_AND_INDEX_TYPE(
GKO_DECLARE_PAR_ICT_ADD_CANDIDATES_KERNEL);
template <typename ValueType, typename IndexType>
void compute_factor(std::shared_ptr<const DefaultExecutor> exec,
const matrix::Csr<ValueType, IndexType>* a,
matrix::Csr<ValueType, IndexType>* l,
const matrix::Coo<ValueType, IndexType>* l_coo)
{
auto num_rows = a->get_size()[0];
auto total_nnz = 2 * l->get_num_stored_elements();
auto total_nnz_per_row = total_nnz / num_rows;
select_compute_factor(
compiled_kernels(),
[&](int compiled_subwarp_size) {
return total_nnz_per_row <= compiled_subwarp_size ||
compiled_subwarp_size == config::warp_size;
},
syn::value_list<int>(), syn::type_list<>(), exec, a, l, l_coo);
}
GKO_INSTANTIATE_FOR_EACH_VALUE_AND_INDEX_TYPE(
GKO_DECLARE_PAR_ICT_COMPUTE_FACTOR_KERNEL);
} // namespace par_ict_factorization
} // namespace cuda
} // namespace kernels
} // namespace gko
|
a9e830c0aeaac7ca1463bffdcf73c18a010899ad.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
__global__ void kernel(void) {
}
int main(void) {
hipLaunchKernelGGL(( kernel), dim3(1), dim3(1), 0, 0, );
printf("Hello, World!\n");
return 0;
}
| a9e830c0aeaac7ca1463bffdcf73c18a010899ad.cu | #include <iostream>
__global__ void kernel(void) {
}
int main(void) {
kernel<<<1, 1>>>();
printf("Hello, World!\n");
return 0;
}
|
db82f0769b704fec2ad878651f80ff1fd1c0a5f1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
extern "C" __global__ void
mmkernel( float* a, float* b, float* c,
int pitch_a, int pitch_b, int pitch_c,
int n, int m, int p )
{
int tx = threadIdx.x;
int i = blockIdx.x*64 + tx;
int j = blockIdx.y*2;
__shared__ float cb0[32], cb1[32];
float sum0 = 0.0, sum1 = 0., sum2 = 0.0, sum3 = 0.0;
for( int ks = 0; ks < p; ks += 32 ){
cb0[tx] = c[ks+tx+pitch_c*j];
cb1[tx] = c[ks+tx+pitch_c*(j+1)];
__syncthreads();
for( int k = ks; k < ks+32; ++k ){
float rb0 = b[i+pitch_b*k];
float rb1 = b[i+32+pitch_b*k];
sum0 += rb0 * cb0[k - ks];
sum1 += rb0 * cb1[k - ks];
sum2 += rb1*cb0[k - ks];
sum3 += rb1*cb1[k - ks];
}
__syncthreads();
}
a[i+pitch_a*j] = sum0;
a[i+pitch_a*(j+1)] = sum1;
a[i+32+pitch_a*j] = sum2;
a[i+32+pitch_a*(j+1)] = sum3;
}
| db82f0769b704fec2ad878651f80ff1fd1c0a5f1.cu | extern "C" __global__ void
mmkernel( float* a, float* b, float* c,
int pitch_a, int pitch_b, int pitch_c,
int n, int m, int p )
{
int tx = threadIdx.x;
int i = blockIdx.x*64 + tx;
int j = blockIdx.y*2;
__shared__ float cb0[32], cb1[32];
float sum0 = 0.0, sum1 = 0., sum2 = 0.0, sum3 = 0.0;
for( int ks = 0; ks < p; ks += 32 ){
cb0[tx] = c[ks+tx+pitch_c*j];
cb1[tx] = c[ks+tx+pitch_c*(j+1)];
__syncthreads();
for( int k = ks; k < ks+32; ++k ){
float rb0 = b[i+pitch_b*k];
float rb1 = b[i+32+pitch_b*k];
sum0 += rb0 * cb0[k - ks];
sum1 += rb0 * cb1[k - ks];
sum2 += rb1*cb0[k - ks];
sum3 += rb1*cb1[k - ks];
}
__syncthreads();
}
a[i+pitch_a*j] = sum0;
a[i+pitch_a*(j+1)] = sum1;
a[i+32+pitch_a*j] = sum2;
a[i+32+pitch_a*(j+1)] = sum3;
}
|
5dc5803f06066aee977202c0fe7bc4c02e5461b3.hip | // !!! This is a file automatically generated by hipify!!!
#include <cstdlib>
#include <iostream>
#include <cstdio>
#include <fstream>
#include "utils.h"
#include "timer.h"
#include <cstdio>
#include <sys/time.h>
#include <thrust/random/linear_congruential_engine.h>
#include <thrust/random/normal_distribution.h>
#include <thrust/random/uniform_int_distribution.h>
void computeHistogram(const unsigned int* d_vals,
unsigned int* const d_histo,
const unsigned int numBins,
const unsigned int numElems);
int main(int argc, char **argv) {
if (argc != 2) {
std::cerr << "You must supply an output file" << std::endl;
exit(1);
}
const unsigned int numBins = 1024;
const unsigned int numElems = 10000 * numBins;
const float stddev = 100.f;
unsigned int *vals = new unsigned int[numElems];
unsigned int *histo = new unsigned int[numBins];
timeval tv;
gettimeofday(&tv, NULL);
srand(tv.tv_usec);
//make the mean unpredictable, but close enough to the middle
//so that timings are unaffected
unsigned int mean = rand() % 100 + 462;
//Output mean so that grading can happen with the same inputs
std::cout << mean << std::endl;
thrust::minstd_rand rng;
thrust::random::experimental::normal_distribution<float> normalDist((float)mean, stddev);
for (size_t i = 0; i < numElems; ++i) {
vals[i] = min(max((int)normalDist(rng), 0), numBins - 1);
}
unsigned int *d_vals, *d_histo;
GpuTimer timer;
checkCudaErrors(hipMalloc(&d_vals, sizeof(unsigned int) * numElems));
checkCudaErrors(hipMalloc(&d_histo, sizeof(unsigned int) * numBins));
checkCudaErrors(hipMemset(d_histo, 0, sizeof(unsigned int) * numBins));
checkCudaErrors(hipMemcpy(d_vals, vals, sizeof(unsigned int) * numElems, hipMemcpyHostToDevice));
timer.Start();
computeHistogram(d_vals, d_histo, numBins, numElems);
timer.Stop();
int err = printf("%f msecs.\n", timer.Elapsed());
if (err < 0) {
//Couldn't print! Probably the student closed stdout - bad news
std::cerr << "Couldn't print timing information! STDOUT Closed!" << std::endl;
exit(1);
}
unsigned int *h_gpu = new unsigned int[numBins];
checkCudaErrors(hipMemcpy(h_gpu, d_histo, sizeof(unsigned int) * numBins, hipMemcpyDeviceToHost));
std::ofstream ofs(argv[1], std::ios::out | std::iostream::binary);
ofs.write(reinterpret_cast<char *>(h_gpu), numBins * sizeof(unsigned int));
ofs.close();
delete[] h_gpu;
delete[] vals;
delete[] histo;
hipFree(d_vals);
hipFree(d_histo);
return 0;
}
| 5dc5803f06066aee977202c0fe7bc4c02e5461b3.cu | #include <cstdlib>
#include <iostream>
#include <cstdio>
#include <fstream>
#include "utils.h"
#include "timer.h"
#include <cstdio>
#include <sys/time.h>
#include <thrust/random/linear_congruential_engine.h>
#include <thrust/random/normal_distribution.h>
#include <thrust/random/uniform_int_distribution.h>
void computeHistogram(const unsigned int* d_vals,
unsigned int* const d_histo,
const unsigned int numBins,
const unsigned int numElems);
int main(int argc, char **argv) {
if (argc != 2) {
std::cerr << "You must supply an output file" << std::endl;
exit(1);
}
const unsigned int numBins = 1024;
const unsigned int numElems = 10000 * numBins;
const float stddev = 100.f;
unsigned int *vals = new unsigned int[numElems];
unsigned int *histo = new unsigned int[numBins];
timeval tv;
gettimeofday(&tv, NULL);
srand(tv.tv_usec);
//make the mean unpredictable, but close enough to the middle
//so that timings are unaffected
unsigned int mean = rand() % 100 + 462;
//Output mean so that grading can happen with the same inputs
std::cout << mean << std::endl;
thrust::minstd_rand rng;
thrust::random::experimental::normal_distribution<float> normalDist((float)mean, stddev);
for (size_t i = 0; i < numElems; ++i) {
vals[i] = min(max((int)normalDist(rng), 0), numBins - 1);
}
unsigned int *d_vals, *d_histo;
GpuTimer timer;
checkCudaErrors(cudaMalloc(&d_vals, sizeof(unsigned int) * numElems));
checkCudaErrors(cudaMalloc(&d_histo, sizeof(unsigned int) * numBins));
checkCudaErrors(cudaMemset(d_histo, 0, sizeof(unsigned int) * numBins));
checkCudaErrors(cudaMemcpy(d_vals, vals, sizeof(unsigned int) * numElems, cudaMemcpyHostToDevice));
timer.Start();
computeHistogram(d_vals, d_histo, numBins, numElems);
timer.Stop();
int err = printf("%f msecs.\n", timer.Elapsed());
if (err < 0) {
//Couldn't print! Probably the student closed stdout - bad news
std::cerr << "Couldn't print timing information! STDOUT Closed!" << std::endl;
exit(1);
}
unsigned int *h_gpu = new unsigned int[numBins];
checkCudaErrors(cudaMemcpy(h_gpu, d_histo, sizeof(unsigned int) * numBins, cudaMemcpyDeviceToHost));
std::ofstream ofs(argv[1], std::ios::out | std::iostream::binary);
ofs.write(reinterpret_cast<char *>(h_gpu), numBins * sizeof(unsigned int));
ofs.close();
delete[] h_gpu;
delete[] vals;
delete[] histo;
cudaFree(d_vals);
cudaFree(d_histo);
return 0;
}
|
bf4bcc61e817818acbb6ba677179c0d561a2c8e4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
/// Tile size used by the OptimizedMMKernel
#define TILE_SIZE 32
/// Naive matrix multiplication CUDA Kernel
/// Tiled 1D Shared Memory No Unrolling
/// Tiled 2D Shared Memory No Unrolling
/// Tiled 2D Shared Memory With Unrolling (4x4 Tile Size)
/// Tiled 2D Shared Memory With Unrolling (8x8 Tile Size)
/// Tiled 2D Shared Memory With Unrolling (16x16 Tile Size)
/// Tiled 2D Shared Memory With Unrolling (32x32 Tile Size)
/// Prints a matrix out to the stderr stream
__global__ void OptimizedMMKernel_2_8(float *a, float *b, float *c, int size)
{
// Create shared matrices for rows of A and columns of B
__shared__ float sharedA[8][8];
__shared__ float sharedB[8][8];
int tx = threadIdx.x;
int ty = threadIdx.y;
int x = blockIdx.x * blockDim.x + tx;
int y = blockIdx.y * blockDim.y + ty;
float sum = 0;
// Divide the matrix up into tiles based on the tile size so each thread
// Can perform its partial sum of the dot product from the shared matrix
int tilesPerGrid = size / blockDim.x;
for (int i = 0; i < tilesPerGrid; i++)
{
// Each thread loads element into A and B
sharedA[ty][tx] = a[(y * size) + (i * 8) + tx];
sharedB[ty][tx] = b[(i * 8 * size) + (ty * size) + x];
// Wait for all threads to load each section of the shared matrix
__syncthreads();
sum += sharedA[ty][0] * sharedB[0][tx];
sum += sharedA[ty][1] * sharedB[1][tx];
sum += sharedA[ty][2] * sharedB[2][tx];
sum += sharedA[ty][3] * sharedB[3][tx];
sum += sharedA[ty][4] * sharedB[4][tx];
sum += sharedA[ty][5] * sharedB[5][tx];
sum += sharedA[ty][6] * sharedB[6][tx];
sum += sharedA[ty][7] * sharedB[7][tx];
// Wait for all threads to compute their partial sum from the shared matrices before loading the next
__syncthreads();
}
// Store the full sum as the result
c[y * size + x] = sum;
} | bf4bcc61e817818acbb6ba677179c0d561a2c8e4.cu | #include "includes.h"
/// Tile size used by the OptimizedMMKernel
#define TILE_SIZE 32
/// Naive matrix multiplication CUDA Kernel
/// Tiled 1D Shared Memory No Unrolling
/// Tiled 2D Shared Memory No Unrolling
/// Tiled 2D Shared Memory With Unrolling (4x4 Tile Size)
/// Tiled 2D Shared Memory With Unrolling (8x8 Tile Size)
/// Tiled 2D Shared Memory With Unrolling (16x16 Tile Size)
/// Tiled 2D Shared Memory With Unrolling (32x32 Tile Size)
/// Prints a matrix out to the stderr stream
__global__ void OptimizedMMKernel_2_8(float *a, float *b, float *c, int size)
{
// Create shared matrices for rows of A and columns of B
__shared__ float sharedA[8][8];
__shared__ float sharedB[8][8];
int tx = threadIdx.x;
int ty = threadIdx.y;
int x = blockIdx.x * blockDim.x + tx;
int y = blockIdx.y * blockDim.y + ty;
float sum = 0;
// Divide the matrix up into tiles based on the tile size so each thread
// Can perform its partial sum of the dot product from the shared matrix
int tilesPerGrid = size / blockDim.x;
for (int i = 0; i < tilesPerGrid; i++)
{
// Each thread loads element into A and B
sharedA[ty][tx] = a[(y * size) + (i * 8) + tx];
sharedB[ty][tx] = b[(i * 8 * size) + (ty * size) + x];
// Wait for all threads to load each section of the shared matrix
__syncthreads();
sum += sharedA[ty][0] * sharedB[0][tx];
sum += sharedA[ty][1] * sharedB[1][tx];
sum += sharedA[ty][2] * sharedB[2][tx];
sum += sharedA[ty][3] * sharedB[3][tx];
sum += sharedA[ty][4] * sharedB[4][tx];
sum += sharedA[ty][5] * sharedB[5][tx];
sum += sharedA[ty][6] * sharedB[6][tx];
sum += sharedA[ty][7] * sharedB[7][tx];
// Wait for all threads to compute their partial sum from the shared matrices before loading the next
__syncthreads();
}
// Store the full sum as the result
c[y * size + x] = sum;
} |
a658194099bb40f4da868ad189ca07f898633fb8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#if !defined CUDA_DISABLER
#include "opencv2/gpu/device/common.hpp"
#include "opencv2/gpu/device/functional.hpp"
#include "opencv2/gpu/device/emulation.hpp"
#include "opencv2/gpu/device/transform.hpp"
using namespace cv::gpu;
using namespace cv::gpu::device;
namespace hist
{
__global__ void histogram256Kernel(const uchar* src, int cols, int rows, size_t step, int* hist)
{
__shared__ int shist[256];
const int y = blockIdx.x * blockDim.y + threadIdx.y;
const int tid = threadIdx.y * blockDim.x + threadIdx.x;
shist[tid] = 0;
__syncthreads();
if (y < rows)
{
const unsigned int* rowPtr = (const unsigned int*) (src + y * step);
const int cols_4 = cols / 4;
for (int x = threadIdx.x; x < cols_4; x += blockDim.x)
{
unsigned int data = rowPtr[x];
Emulation::smem::atomicAdd(&shist[(data >> 0) & 0xFFU], 1);
Emulation::smem::atomicAdd(&shist[(data >> 8) & 0xFFU], 1);
Emulation::smem::atomicAdd(&shist[(data >> 16) & 0xFFU], 1);
Emulation::smem::atomicAdd(&shist[(data >> 24) & 0xFFU], 1);
}
if (cols % 4 != 0 && threadIdx.x == 0)
{
for (int x = cols_4 * 4; x < cols; ++x)
{
unsigned int data = ((const uchar*)rowPtr)[x];
Emulation::smem::atomicAdd(&shist[data], 1);
}
}
}
__syncthreads();
const int histVal = shist[tid];
if (histVal > 0)
::atomicAdd(hist + tid, histVal);
}
void histogram256(PtrStepSzb src, int* hist, hipStream_t stream)
{
const dim3 block(32, 8);
const dim3 grid(divUp(src.rows, block.y));
hipLaunchKernelGGL(( histogram256Kernel), dim3(grid), dim3(block), 0, stream, src.data, src.cols, src.rows, src.step, hist);
cudaSafeCall( hipGetLastError() );
if (stream == 0)
cudaSafeCall( hipDeviceSynchronize() );
}
}
/////////////////////////////////////////////////////////////////////////
namespace hist
{
__constant__ int c_lut[256];
struct EqualizeHist : unary_function<uchar, uchar>
{
float scale;
__host__ EqualizeHist(float _scale) : scale(_scale) {}
__device__ __forceinline__ uchar operator ()(uchar val) const
{
const int lut = c_lut[val];
return __float2int_rn(scale * lut);
}
};
}
namespace cv { namespace gpu { namespace device
{
template <> struct TransformFunctorTraits<hist::EqualizeHist> : DefaultTransformFunctorTraits<hist::EqualizeHist>
{
enum { smart_shift = 4 };
};
}}}
namespace hist
{
void equalizeHist(PtrStepSzb src, PtrStepSzb dst, const int* lut, hipStream_t stream)
{
if (stream == 0)
cudaSafeCall( hipMemcpyToSymbol(c_lut, lut, 256 * sizeof(int), 0, hipMemcpyDeviceToDevice) );
else
cudaSafeCall( hipMemcpyToSymbolAsync(c_lut, lut, 256 * sizeof(int), 0, hipMemcpyDeviceToDevice, stream) );
const float scale = 255.0f / (src.cols * src.rows);
transform(src, dst, EqualizeHist(scale), WithOutMask(), stream);
}
}
#endif /* CUDA_DISABLER */
| a658194099bb40f4da868ad189ca07f898633fb8.cu | /*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#if !defined CUDA_DISABLER
#include "opencv2/gpu/device/common.hpp"
#include "opencv2/gpu/device/functional.hpp"
#include "opencv2/gpu/device/emulation.hpp"
#include "opencv2/gpu/device/transform.hpp"
using namespace cv::gpu;
using namespace cv::gpu::device;
namespace hist
{
__global__ void histogram256Kernel(const uchar* src, int cols, int rows, size_t step, int* hist)
{
__shared__ int shist[256];
const int y = blockIdx.x * blockDim.y + threadIdx.y;
const int tid = threadIdx.y * blockDim.x + threadIdx.x;
shist[tid] = 0;
__syncthreads();
if (y < rows)
{
const unsigned int* rowPtr = (const unsigned int*) (src + y * step);
const int cols_4 = cols / 4;
for (int x = threadIdx.x; x < cols_4; x += blockDim.x)
{
unsigned int data = rowPtr[x];
Emulation::smem::atomicAdd(&shist[(data >> 0) & 0xFFU], 1);
Emulation::smem::atomicAdd(&shist[(data >> 8) & 0xFFU], 1);
Emulation::smem::atomicAdd(&shist[(data >> 16) & 0xFFU], 1);
Emulation::smem::atomicAdd(&shist[(data >> 24) & 0xFFU], 1);
}
if (cols % 4 != 0 && threadIdx.x == 0)
{
for (int x = cols_4 * 4; x < cols; ++x)
{
unsigned int data = ((const uchar*)rowPtr)[x];
Emulation::smem::atomicAdd(&shist[data], 1);
}
}
}
__syncthreads();
const int histVal = shist[tid];
if (histVal > 0)
::atomicAdd(hist + tid, histVal);
}
void histogram256(PtrStepSzb src, int* hist, cudaStream_t stream)
{
const dim3 block(32, 8);
const dim3 grid(divUp(src.rows, block.y));
histogram256Kernel<<<grid, block, 0, stream>>>(src.data, src.cols, src.rows, src.step, hist);
cudaSafeCall( cudaGetLastError() );
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
}
}
/////////////////////////////////////////////////////////////////////////
namespace hist
{
__constant__ int c_lut[256];
struct EqualizeHist : unary_function<uchar, uchar>
{
float scale;
__host__ EqualizeHist(float _scale) : scale(_scale) {}
__device__ __forceinline__ uchar operator ()(uchar val) const
{
const int lut = c_lut[val];
return __float2int_rn(scale * lut);
}
};
}
namespace cv { namespace gpu { namespace device
{
template <> struct TransformFunctorTraits<hist::EqualizeHist> : DefaultTransformFunctorTraits<hist::EqualizeHist>
{
enum { smart_shift = 4 };
};
}}}
namespace hist
{
void equalizeHist(PtrStepSzb src, PtrStepSzb dst, const int* lut, cudaStream_t stream)
{
if (stream == 0)
cudaSafeCall( cudaMemcpyToSymbol(c_lut, lut, 256 * sizeof(int), 0, cudaMemcpyDeviceToDevice) );
else
cudaSafeCall( cudaMemcpyToSymbolAsync(c_lut, lut, 256 * sizeof(int), 0, cudaMemcpyDeviceToDevice, stream) );
const float scale = 255.0f / (src.cols * src.rows);
transform(src, dst, EqualizeHist(scale), WithOutMask(), stream);
}
}
#endif /* CUDA_DISABLER */
|
85b769f1c693bb1901f43419f987a6919115e5ce.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
*this file exercise matrix multiplication with shared memory and use
*the thought of dividing matrix to sub_matrix
*/
#include<time.h>
#include<stdlib.h>
#include<stdio.h>
#include<assert.h>
#include<hip/hip_runtime_api.h>
#define BLOCK_SIZE 8
#define MATRIX_SIZE 64
typedef struct {
int width;
int height;
float *vals;
} Matrix;
float& GetElement(const Matrix A, int row, int col) {
return A.vals[row * A.width + col];
}
__device__ float& GetElementKernel(const Matrix A, int row, int col) {
return A.vals[row * A.width + col];
}
__global__ void MatMulKernel(const Matrix A, const Matrix B, Matrix C) {
//__shared__ float As[BLOCK_SIZE][BLOCK_SIZE];
//__shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE];
int range = A.width / BLOCK_SIZE;
float c_value = 0.0f;
for (int k = 0; k < range; ++k) {
__shared__ float As[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE];
As[threadIdx.x][threadIdx.y] = GetElementKernel(A, blockIdx.x * BLOCK_SIZE + threadIdx.x, k * BLOCK_SIZE + threadIdx.y);
Bs[threadIdx.x][threadIdx.y] = GetElementKernel(B, k * BLOCK_SIZE + threadIdx.x, blockIdx.y * BLOCK_SIZE + threadIdx.y);
__syncthreads();
float tmp = 0.0f;
for (int block_k = 0; block_k < BLOCK_SIZE; ++block_k) {
tmp += As[threadIdx.x][block_k] * Bs[block_k][threadIdx.y];
}
c_value += tmp;
__syncthreads();
}
GetElementKernel(C, blockIdx.x * BLOCK_SIZE + threadIdx.x, blockIdx.y * BLOCK_SIZE + threadIdx.y) = c_value;
}
void MatMulUsual(const Matrix A, const Matrix B, Matrix C) {
for (int i = 0; i < C.height; ++i) {
for (int j = 0; j < C.width; ++j) {
float res = 0.0f;
for (int k = 0; k < A.width; ++k) {
res += GetElement(A, i, k) * GetElement(B, k, j);
}
GetElement(C, i, j) = res;
}
}
}
void checkCUDAError(const char *msg);
int main() {
size_t memSize = MATRIX_SIZE * MATRIX_SIZE * sizeof(float);
//initialize two matrix
srand(time(NULL));
float *valsA = (float*)malloc(memSize);
float *valsB = (float*)malloc(memSize);
for (int i = 1; i <= MATRIX_SIZE; ++i) {
for (int j = 1; j <= MATRIX_SIZE; ++j) {
valsA[(i - 1) * MATRIX_SIZE + (j - 1)] = (float)(rand()%100);
valsB[(i - 1) * MATRIX_SIZE + (j - 1)] = (float)(rand()%100);
}
}
Matrix matrixA = {MATRIX_SIZE, MATRIX_SIZE, valsA};
Matrix matrixB = {MATRIX_SIZE, MATRIX_SIZE, valsB};
//multiplicate with CPU
float *valsC_CPU = (float*)malloc(memSize);
Matrix matrixC_CPU = {MATRIX_SIZE, MATRIX_SIZE, valsC_CPU};
MatMulUsual(matrixA, matrixB, matrixC_CPU);
//multiplicate withGPU
float *valsC_GPU = (float*)malloc(memSize);
Matrix matrixC_GPU = {MATRIX_SIZE, MATRIX_SIZE, valsC_GPU};
//no use
// int numBlocks = 8 * 8;
//int numThreadsPerBlock = MATRIX_SIZE * MATRIX_SIZE / numBlocks;
float *valsA_d, *valsB_d, *valsC_d;
hipMalloc(&valsA_d, memSize);
hipMemcpy(valsA_d, valsA, memSize, hipMemcpyHostToDevice);
hipMalloc(&valsB_d, memSize);
hipMemcpy(valsB_d, valsB, memSize, hipMemcpyHostToDevice);
hipMalloc(&valsC_d, memSize);
Matrix A_d = {MATRIX_SIZE, MATRIX_SIZE, valsA_d};
Matrix B_d = {MATRIX_SIZE, MATRIX_SIZE, valsB_d};
Matrix C_d = {MATRIX_SIZE, MATRIX_SIZE, valsC_d};
//launch kernel
dim3 dimGrid(MATRIX_SIZE / BLOCK_SIZE, MATRIX_SIZE / BLOCK_SIZE);
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
hipLaunchKernelGGL(( MatMulKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, A_d, B_d, C_d);
//block until the device has completed
hipDeviceSynchronize();
//check errors
checkCUDAError("kernel invocation");
//data fetch
hipMemcpy(valsC_GPU, valsC_d, memSize, hipMemcpyDeviceToHost);
checkCUDAError("memcpy");
//verify the data
for (int i = 0; i < MATRIX_SIZE; ++i) {
for (int j = 0; j < MATRIX_SIZE; ++j) {
assert(GetElement(matrixC_CPU, i, j) == GetElement(matrixC_GPU, i, j));
}
}
hipFree(valsA_d);
hipFree(valsB_d);
hipFree(valsC_d);
free(valsA);
free(valsB);
free(valsC_CPU);
free(valsC_GPU);
printf("Correct!\n");
hipProfilerStop();
return 0;
}
void checkCUDAError(const char *msg)
{
hipError_t err = hipGetLastError();
if( hipSuccess != err)
{
fprintf(stderr, "Cuda error: %s: %s.\n", msg, hipGetErrorString( err) );
exit(EXIT_FAILURE);
}
}
| 85b769f1c693bb1901f43419f987a6919115e5ce.cu | /*
*this file exercise matrix multiplication with shared memory and use
*the thought of dividing matrix to sub_matrix
*/
#include<time.h>
#include<stdlib.h>
#include<stdio.h>
#include<assert.h>
#include<cuda_profiler_api.h>
#define BLOCK_SIZE 8
#define MATRIX_SIZE 64
typedef struct {
int width;
int height;
float *vals;
} Matrix;
float& GetElement(const Matrix A, int row, int col) {
return A.vals[row * A.width + col];
}
__device__ float& GetElementKernel(const Matrix A, int row, int col) {
return A.vals[row * A.width + col];
}
__global__ void MatMulKernel(const Matrix A, const Matrix B, Matrix C) {
//__shared__ float As[BLOCK_SIZE][BLOCK_SIZE];
//__shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE];
int range = A.width / BLOCK_SIZE;
float c_value = 0.0f;
for (int k = 0; k < range; ++k) {
__shared__ float As[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE];
As[threadIdx.x][threadIdx.y] = GetElementKernel(A, blockIdx.x * BLOCK_SIZE + threadIdx.x, k * BLOCK_SIZE + threadIdx.y);
Bs[threadIdx.x][threadIdx.y] = GetElementKernel(B, k * BLOCK_SIZE + threadIdx.x, blockIdx.y * BLOCK_SIZE + threadIdx.y);
__syncthreads();
float tmp = 0.0f;
for (int block_k = 0; block_k < BLOCK_SIZE; ++block_k) {
tmp += As[threadIdx.x][block_k] * Bs[block_k][threadIdx.y];
}
c_value += tmp;
__syncthreads();
}
GetElementKernel(C, blockIdx.x * BLOCK_SIZE + threadIdx.x, blockIdx.y * BLOCK_SIZE + threadIdx.y) = c_value;
}
void MatMulUsual(const Matrix A, const Matrix B, Matrix C) {
for (int i = 0; i < C.height; ++i) {
for (int j = 0; j < C.width; ++j) {
float res = 0.0f;
for (int k = 0; k < A.width; ++k) {
res += GetElement(A, i, k) * GetElement(B, k, j);
}
GetElement(C, i, j) = res;
}
}
}
void checkCUDAError(const char *msg);
int main() {
size_t memSize = MATRIX_SIZE * MATRIX_SIZE * sizeof(float);
//initialize two matrix
srand(time(NULL));
float *valsA = (float*)malloc(memSize);
float *valsB = (float*)malloc(memSize);
for (int i = 1; i <= MATRIX_SIZE; ++i) {
for (int j = 1; j <= MATRIX_SIZE; ++j) {
valsA[(i - 1) * MATRIX_SIZE + (j - 1)] = (float)(rand()%100);
valsB[(i - 1) * MATRIX_SIZE + (j - 1)] = (float)(rand()%100);
}
}
Matrix matrixA = {MATRIX_SIZE, MATRIX_SIZE, valsA};
Matrix matrixB = {MATRIX_SIZE, MATRIX_SIZE, valsB};
//multiplicate with CPU
float *valsC_CPU = (float*)malloc(memSize);
Matrix matrixC_CPU = {MATRIX_SIZE, MATRIX_SIZE, valsC_CPU};
MatMulUsual(matrixA, matrixB, matrixC_CPU);
//multiplicate withGPU
float *valsC_GPU = (float*)malloc(memSize);
Matrix matrixC_GPU = {MATRIX_SIZE, MATRIX_SIZE, valsC_GPU};
//no use
// int numBlocks = 8 * 8;
//int numThreadsPerBlock = MATRIX_SIZE * MATRIX_SIZE / numBlocks;
float *valsA_d, *valsB_d, *valsC_d;
cudaMalloc(&valsA_d, memSize);
cudaMemcpy(valsA_d, valsA, memSize, cudaMemcpyHostToDevice);
cudaMalloc(&valsB_d, memSize);
cudaMemcpy(valsB_d, valsB, memSize, cudaMemcpyHostToDevice);
cudaMalloc(&valsC_d, memSize);
Matrix A_d = {MATRIX_SIZE, MATRIX_SIZE, valsA_d};
Matrix B_d = {MATRIX_SIZE, MATRIX_SIZE, valsB_d};
Matrix C_d = {MATRIX_SIZE, MATRIX_SIZE, valsC_d};
//launch kernel
dim3 dimGrid(MATRIX_SIZE / BLOCK_SIZE, MATRIX_SIZE / BLOCK_SIZE);
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
MatMulKernel<<<dimGrid, dimBlock>>>(A_d, B_d, C_d);
//block until the device has completed
cudaThreadSynchronize();
//check errors
checkCUDAError("kernel invocation");
//data fetch
cudaMemcpy(valsC_GPU, valsC_d, memSize, cudaMemcpyDeviceToHost);
checkCUDAError("memcpy");
//verify the data
for (int i = 0; i < MATRIX_SIZE; ++i) {
for (int j = 0; j < MATRIX_SIZE; ++j) {
assert(GetElement(matrixC_CPU, i, j) == GetElement(matrixC_GPU, i, j));
}
}
cudaFree(valsA_d);
cudaFree(valsB_d);
cudaFree(valsC_d);
free(valsA);
free(valsB);
free(valsC_CPU);
free(valsC_GPU);
printf("Correct!\n");
cudaProfilerStop();
return 0;
}
void checkCUDAError(const char *msg)
{
cudaError_t err = cudaGetLastError();
if( cudaSuccess != err)
{
fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString( err) );
exit(EXIT_FAILURE);
}
}
|
0db9418c58af2f58d23026c8b11d6cd59496aa3f.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <helper_cuda.h>
#include <stdint.h>
#include "../../common/para.h"
#include "../../common/para.cuh"
#include <stdarg.h>
#include "runtime.cuh"
__device__ int syncID;
__device__ int threadNum;
int *done, *doneDev;
int *totalScheTasks, *totalScheTasksDev;
cTaskStruct *ccTaskPool;
gTaskStruct *ggTaskPool;
static int taskId = 0;
static int lastEmptyTask = 0;
static int round_count = 0;
static int taskIndex = 0;
static int barrierCount = 0;
hipStream_t master_kernel_stream;
hipStream_t runtime_stream;
__global__ void masterKernel(volatile int *done, volatile int *totalScheTasks, volatile gTaskStruct *gTaskPool);
void runtime_init(){
int i;
setenv("CUDA_DEVICE_MAX_CONNECTIONS", "32", 1);
checkCudaErrors(hipStreamCreate(&runtime_stream));
checkCudaErrors(hipStreamCreate(&master_kernel_stream));
// done flag to interrupt runtime
checkCudaErrors(hipHostMalloc(&done, sizeof(int), hipHostMallocDefault));
checkCudaErrors(hipMalloc(&doneDev, sizeof(int)));
// host task buffer
checkCudaErrors(hipHostMalloc(&ccTaskPool, (BK_NUM*BP_NUM)*sizeof(cTaskStruct), hipHostMallocDefault));
// device task buffer
checkCudaErrors(hipMalloc(&ggTaskPool, (BK_NUM*BP_NUM)*sizeof(gTaskStruct)));
// totalScheTasks:
checkCudaErrors(hipHostMalloc(&totalScheTasks, sizeof(int), hipHostMallocDefault));
checkCudaErrors(hipMalloc(&totalScheTasksDev, sizeof(int)));
for(i = 0; i < (BK_NUM*BP_NUM); i++) {
ccTaskPool[i].ready = 0;
ccTaskPool[i].done = -1;
ccTaskPool[i].taskId = 0;
ccTaskPool[i].readyId = -1;
}
// runtime variables copy
*done = 0;
*totalScheTasks = 0;
checkCudaErrors(hipMemcpyAsync(doneDev, done, sizeof(int), hipMemcpyHostToDevice, runtime_stream));
checkCudaErrors(hipMemcpyAsync(totalScheTasksDev, totalScheTasks, sizeof(int), hipMemcpyHostToDevice, runtime_stream));
checkCudaErrors(hipMemcpyAsync(ggTaskPool, ccTaskPool, (BK_NUM*BP_NUM)*sizeof(gTaskStruct), hipMemcpyHostToDevice, runtime_stream));
checkCudaErrors(hipStreamSynchronize(runtime_stream));
//MasterKernel
hipLaunchKernelGGL(( masterKernel), dim3(BK_NUM), dim3(TD_NUM), SH_MEM_SIZE, master_kernel_stream, doneDev, totalScheTasksDev, ggTaskPool);
}
int taskLaunch(int paraN, ...){
int j, k;
int terminate = 1;
va_list ap;
va_start(ap,paraN);
while(taskIndex < (BK_NUM*BP_NUM) && terminate == 1){
if(ccTaskPool[taskIndex].ready == 0 && ccTaskPool[taskIndex].readyId == -1){
// **Add here**: renew task table, set the bit of task ID on
// **Add here**: get_ID()
ccTaskPool[taskIndex].ready = 1;
ccTaskPool[taskIndex].taskId = taskIndex+1;
ccTaskPool[taskIndex].done = 1;
if(round_count > 0) {
ccTaskPool[taskIndex].readyId = taskId;
}else{
lastEmptyTask = taskIndex;
}
round_count ++;
taskId = taskIndex;
for(j = 0; j < paraN; j++){ // set parameters
int type = va_arg(ap, enum mytypes);
switch(type){
case INT:
if(j == 0) ccTaskPool[taskIndex].thread = va_arg(ap, int);
if(j == 1) ccTaskPool[taskIndex].block = va_arg(ap, int);
if(j == 2) ccTaskPool[taskIndex].sharemem = va_arg(ap, int);
if(j == 3) ccTaskPool[taskIndex].sync = va_arg(ap, int);
if(j == 4) ccTaskPool[taskIndex].funcId = va_arg(ap, int);
if(j > 4) ccTaskPool[taskIndex].para[j-inParaNum] = va_arg(ap, int*);
break;
case FLOAT:
ccTaskPool[taskIndex].para[j-inParaNum] = va_arg(ap, float*);
break;
case DOUBLE:
ccTaskPool[taskIndex].para[j-inParaNum] = va_arg(ap, double*);
break;
case CHAR:
ccTaskPool[taskIndex].para[j-inParaNum] = va_arg(ap, unsigned char*);
break;
case INT32:
ccTaskPool[taskIndex].para[j-inParaNum] = va_arg(ap, unsigned long int*);
break;
default:
break;
} // End switch
} // End for paraN
checkCudaErrors(hipMemcpyAsync(ggTaskPool+taskIndex, ccTaskPool+taskIndex,
sizeof(gTaskStruct), hipMemcpyHostToDevice, runtime_stream));
terminate = 0;
} // end if cTaskPool
taskIndex++;
if(taskIndex == (BK_NUM*BP_NUM) && round_count > 0){
ccTaskPool[lastEmptyTask].readyId = taskId;
checkCudaErrors(hipMemcpyAsync((int*)&ggTaskPool[lastEmptyTask].readyId,
(int*)&ccTaskPool[lastEmptyTask].readyId,sizeof(int), hipMemcpyHostToDevice, runtime_stream));
checkCudaErrors(hipStreamSynchronize(runtime_stream));
barrierCount ++;
round_count = 0;
}
if(taskIndex == (BK_NUM*BP_NUM)){
checkCudaErrors(hipMemcpyAsync(ccTaskPool, ggTaskPool, (BK_NUM*BP_NUM)*sizeof(gTaskStruct),
hipMemcpyDeviceToHost, runtime_stream));
checkCudaErrors(hipStreamSynchronize(runtime_stream));
taskIndex = 0;
}
} // end while i < BK_NUM*BP_NUM
va_end(ap);
return taskId;
}
void waitAll(int num_tasks){
*totalScheTasks = 0;
ccTaskPool[lastEmptyTask].readyId = taskId;
checkCudaErrors(hipMemcpyAsync((int*)&ggTaskPool[lastEmptyTask].readyId, (int*)&ccTaskPool[lastEmptyTask].readyId,
sizeof(int), hipMemcpyHostToDevice, runtime_stream));
checkCudaErrors(hipStreamSynchronize(runtime_stream));
round_count = 0;
int i;
while(*totalScheTasks < num_tasks){
checkCudaErrors(hipMemcpyAsync(totalScheTasks, totalScheTasksDev, sizeof(int), hipMemcpyDeviceToHost, runtime_stream));
checkCudaErrors(hipStreamSynchronize(runtime_stream));
}
*totalScheTasks = 0;
checkCudaErrors(hipMemcpyAsync(totalScheTasksDev, totalScheTasks, sizeof(int), hipMemcpyHostToDevice, runtime_stream));
checkCudaErrors(hipStreamSynchronize(runtime_stream));
taskIndex = 0;
taskId = 0;
lastEmptyTask = 0;
}
void runtime_destroy(){
*done = 1;
checkCudaErrors(hipMemcpyAsync(doneDev, done, sizeof(int), hipMemcpyHostToDevice, runtime_stream));
checkCudaErrors(hipStreamSynchronize(runtime_stream));
}
void runtime_free(){
checkCudaErrors(hipStreamDestroy(master_kernel_stream));
checkCudaErrors(hipStreamDestroy(runtime_stream));
checkCudaErrors(hipHostFree(done));
checkCudaErrors(hipHostFree(ccTaskPool));
checkCudaErrors(hipHostFree(totalScheTasks));
checkCudaErrors(hipFree(doneDev));
checkCudaErrors(hipFree(ggTaskPool));
checkCudaErrors(hipFree(totalScheTasksDev));
}
| 0db9418c58af2f58d23026c8b11d6cd59496aa3f.cu | #include <cuda_runtime.h>
#include <helper_cuda.h>
#include <stdint.h>
#include "../../common/para.h"
#include "../../common/para.cuh"
#include <stdarg.h>
#include "runtime.cuh"
__device__ int syncID;
__device__ int threadNum;
int *done, *doneDev;
int *totalScheTasks, *totalScheTasksDev;
cTaskStruct *ccTaskPool;
gTaskStruct *ggTaskPool;
static int taskId = 0;
static int lastEmptyTask = 0;
static int round_count = 0;
static int taskIndex = 0;
static int barrierCount = 0;
cudaStream_t master_kernel_stream;
cudaStream_t runtime_stream;
__global__ void masterKernel(volatile int *done, volatile int *totalScheTasks, volatile gTaskStruct *gTaskPool);
void runtime_init(){
int i;
setenv("CUDA_DEVICE_MAX_CONNECTIONS", "32", 1);
checkCudaErrors(cudaStreamCreate(&runtime_stream));
checkCudaErrors(cudaStreamCreate(&master_kernel_stream));
// done flag to interrupt runtime
checkCudaErrors(cudaHostAlloc(&done, sizeof(int), cudaHostAllocDefault));
checkCudaErrors(cudaMalloc(&doneDev, sizeof(int)));
// host task buffer
checkCudaErrors(cudaHostAlloc(&ccTaskPool, (BK_NUM*BP_NUM)*sizeof(cTaskStruct), cudaHostAllocDefault));
// device task buffer
checkCudaErrors(cudaMalloc(&ggTaskPool, (BK_NUM*BP_NUM)*sizeof(gTaskStruct)));
// totalScheTasks:
checkCudaErrors(cudaHostAlloc(&totalScheTasks, sizeof(int), cudaHostAllocDefault));
checkCudaErrors(cudaMalloc(&totalScheTasksDev, sizeof(int)));
for(i = 0; i < (BK_NUM*BP_NUM); i++) {
ccTaskPool[i].ready = 0;
ccTaskPool[i].done = -1;
ccTaskPool[i].taskId = 0;
ccTaskPool[i].readyId = -1;
}
// runtime variables copy
*done = 0;
*totalScheTasks = 0;
checkCudaErrors(cudaMemcpyAsync(doneDev, done, sizeof(int), cudaMemcpyHostToDevice, runtime_stream));
checkCudaErrors(cudaMemcpyAsync(totalScheTasksDev, totalScheTasks, sizeof(int), cudaMemcpyHostToDevice, runtime_stream));
checkCudaErrors(cudaMemcpyAsync(ggTaskPool, ccTaskPool, (BK_NUM*BP_NUM)*sizeof(gTaskStruct), cudaMemcpyHostToDevice, runtime_stream));
checkCudaErrors(cudaStreamSynchronize(runtime_stream));
//MasterKernel
masterKernel<<<BK_NUM, TD_NUM, SH_MEM_SIZE, master_kernel_stream>>>(doneDev, totalScheTasksDev, ggTaskPool);
}
int taskLaunch(int paraN, ...){
int j, k;
int terminate = 1;
va_list ap;
va_start(ap,paraN);
while(taskIndex < (BK_NUM*BP_NUM) && terminate == 1){
if(ccTaskPool[taskIndex].ready == 0 && ccTaskPool[taskIndex].readyId == -1){
// **Add here**: renew task table, set the bit of task ID on
// **Add here**: get_ID()
ccTaskPool[taskIndex].ready = 1;
ccTaskPool[taskIndex].taskId = taskIndex+1;
ccTaskPool[taskIndex].done = 1;
if(round_count > 0) {
ccTaskPool[taskIndex].readyId = taskId;
}else{
lastEmptyTask = taskIndex;
}
round_count ++;
taskId = taskIndex;
for(j = 0; j < paraN; j++){ // set parameters
int type = va_arg(ap, enum mytypes);
switch(type){
case INT:
if(j == 0) ccTaskPool[taskIndex].thread = va_arg(ap, int);
if(j == 1) ccTaskPool[taskIndex].block = va_arg(ap, int);
if(j == 2) ccTaskPool[taskIndex].sharemem = va_arg(ap, int);
if(j == 3) ccTaskPool[taskIndex].sync = va_arg(ap, int);
if(j == 4) ccTaskPool[taskIndex].funcId = va_arg(ap, int);
if(j > 4) ccTaskPool[taskIndex].para[j-inParaNum] = va_arg(ap, int*);
break;
case FLOAT:
ccTaskPool[taskIndex].para[j-inParaNum] = va_arg(ap, float*);
break;
case DOUBLE:
ccTaskPool[taskIndex].para[j-inParaNum] = va_arg(ap, double*);
break;
case CHAR:
ccTaskPool[taskIndex].para[j-inParaNum] = va_arg(ap, unsigned char*);
break;
case INT32:
ccTaskPool[taskIndex].para[j-inParaNum] = va_arg(ap, unsigned long int*);
break;
default:
break;
} // End switch
} // End for paraN
checkCudaErrors(cudaMemcpyAsync(ggTaskPool+taskIndex, ccTaskPool+taskIndex,
sizeof(gTaskStruct), cudaMemcpyHostToDevice, runtime_stream));
terminate = 0;
} // end if cTaskPool
taskIndex++;
if(taskIndex == (BK_NUM*BP_NUM) && round_count > 0){
ccTaskPool[lastEmptyTask].readyId = taskId;
checkCudaErrors(cudaMemcpyAsync((int*)&ggTaskPool[lastEmptyTask].readyId,
(int*)&ccTaskPool[lastEmptyTask].readyId,sizeof(int), cudaMemcpyHostToDevice, runtime_stream));
checkCudaErrors(cudaStreamSynchronize(runtime_stream));
barrierCount ++;
round_count = 0;
}
if(taskIndex == (BK_NUM*BP_NUM)){
checkCudaErrors(cudaMemcpyAsync(ccTaskPool, ggTaskPool, (BK_NUM*BP_NUM)*sizeof(gTaskStruct),
cudaMemcpyDeviceToHost, runtime_stream));
checkCudaErrors(cudaStreamSynchronize(runtime_stream));
taskIndex = 0;
}
} // end while i < BK_NUM*BP_NUM
va_end(ap);
return taskId;
}
void waitAll(int num_tasks){
*totalScheTasks = 0;
ccTaskPool[lastEmptyTask].readyId = taskId;
checkCudaErrors(cudaMemcpyAsync((int*)&ggTaskPool[lastEmptyTask].readyId, (int*)&ccTaskPool[lastEmptyTask].readyId,
sizeof(int), cudaMemcpyHostToDevice, runtime_stream));
checkCudaErrors(cudaStreamSynchronize(runtime_stream));
round_count = 0;
int i;
while(*totalScheTasks < num_tasks){
checkCudaErrors(cudaMemcpyAsync(totalScheTasks, totalScheTasksDev, sizeof(int), cudaMemcpyDeviceToHost, runtime_stream));
checkCudaErrors(cudaStreamSynchronize(runtime_stream));
}
*totalScheTasks = 0;
checkCudaErrors(cudaMemcpyAsync(totalScheTasksDev, totalScheTasks, sizeof(int), cudaMemcpyHostToDevice, runtime_stream));
checkCudaErrors(cudaStreamSynchronize(runtime_stream));
taskIndex = 0;
taskId = 0;
lastEmptyTask = 0;
}
void runtime_destroy(){
*done = 1;
checkCudaErrors(cudaMemcpyAsync(doneDev, done, sizeof(int), cudaMemcpyHostToDevice, runtime_stream));
checkCudaErrors(cudaStreamSynchronize(runtime_stream));
}
void runtime_free(){
checkCudaErrors(cudaStreamDestroy(master_kernel_stream));
checkCudaErrors(cudaStreamDestroy(runtime_stream));
checkCudaErrors(cudaFreeHost(done));
checkCudaErrors(cudaFreeHost(ccTaskPool));
checkCudaErrors(cudaFreeHost(totalScheTasks));
checkCudaErrors(cudaFree(doneDev));
checkCudaErrors(cudaFree(ggTaskPool));
checkCudaErrors(cudaFree(totalScheTasksDev));
}
|
0ef0af6ad443af85058914bf9aded77b300996e4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "solver3x3_kernel.cuh"
#define N 3
#define NN 9
#define BLOCK_SIZE_X 192//256
#define BLOCK_SIZE_Y 1
#include <hip/hip_vector_types.h>
#include <vector_functions.h>
#include <hip/hip_complex.h>
#include <cutil_math.h> // for vector operations
// Utilities for handeling 3x1 vectors with complex values
//struct __device_builtin__ __builtin_align__(24) cuComplex3
struct cuComplex3
{
hipComplex x, y, z;
};
inline __host__ __device__ cuComplex3 make_cuComplex3(hipComplex x, hipComplex y, hipComplex z) {
cuComplex3 c; c.x = x; c.y = y; c.z = z; return c;
}
inline __host__ __device__ cuComplex3 cross(cuComplex3 a, cuComplex3 b)
{
return make_cuComplex3(cuCmulf(a.y,b.z) - cuCmulf(a.z,b.y), cuCmulf(a.z,b.x) - cuCmulf(a.x,b.z), cuCmulf(a.x,b.y) - cuCmulf(a.y,b.x));
}
inline __host__ __device__ hipComplex dot(cuComplex3 a, cuComplex3 b)
{
return cuCmulf(a.x, cuConjf(b.x)) + cuCmulf(a.y, cuConjf(b.y)) + cuCmulf(a.z, cuConjf(b.z));
}
inline __host__ __device__ hipComplex inner(cuComplex3 a, cuComplex3 b)
{
return cuCmulf(a.x, b.x) + cuCmulf(a.y, b.y) + cuCmulf(a.z, b.z);
}
inline __host__ __device__ cuComplex3 operator*(float b, cuComplex3 a)
{
return make_cuComplex3(b * a.x, b * a.y, b * a.z);
}
inline __host__ __device__ cuComplex3 operator*(hipComplex b, cuComplex3 a)
{
return make_cuComplex3(cuCmulf(b, a.x), cuCmulf(b,a.y), cuCmulf(b,a.z));
}
/**
* Kernel solving 3x3 sets of linear equations using Cramer's rule
*
* x = solutions
* A = matrices
* b = left-hand sides
* stride = distance between elements in a single matrix (not in use)
* batch = number of sets
**/
__global__ void solve3x3_kernel(hipComplex* x, const hipComplex* A, const hipComplex* b, const int stride, const int batch)
{
//__shared__ hipComplex Ai[BLOCK_SIZE_X*NN];
//__shared__ hipComplex bi[BLOCK_SIZE_X*N];
/*const int index_at_block_start = blockDim.x * blockIdx.x;
const int local_index = threadIdx.x;
const int index = index_at_block_start + local_index;*/
const int index = blockDim.x * blockIdx.x + threadIdx.x;
if (index < batch) {
// load values from global to local memory // An attempt to remove poor global memory access pattern.
/*const int n_threads = BLOCK_SIZE_X; // TODO: Handle last block! Might have less threads.
const int threadId = threadIdx.x;
int n_elements = BLOCK_SIZE_X*NN;
A += index_at_block_start*NN;
b += index_at_block_start*N;
for (int i = threadId; i < n_elements; i += n_threads) {
Ai[i] = A[i];
}
n_elements = BLOCK_SIZE_X*N;
for (int i = threadId; i < n_elements; i += n_threads) {
bi[i] = b[i];
}*/
// extract 3 colums to form a 3x3 matrix
const int mem_bias_m = NN*index;
cuComplex3 col0 = make_cuComplex3(A[mem_bias_m ], A[mem_bias_m + 1], A[mem_bias_m + 2]); // TODO: Memory access is no good!
cuComplex3 col1 = make_cuComplex3(A[mem_bias_m + 3], A[mem_bias_m + 4], A[mem_bias_m + 5]);
cuComplex3 col2 = make_cuComplex3(A[mem_bias_m + 6], A[mem_bias_m + 7], A[mem_bias_m + 8]);
//const int mem_bias_m = NN*local_index;
//cuComplex3 col0 = make_cuComplex3(Ai[mem_bias_m + 0], Ai[mem_bias_m + 1], Ai[mem_bias_m + 2]);
//cuComplex3 col1 = make_cuComplex3(Ai[mem_bias_m + 3], Ai[mem_bias_m + 4], Ai[mem_bias_m + 5]);
//cuComplex3 col2 = make_cuComplex3(Ai[mem_bias_m + 6], Ai[mem_bias_m + 7], Ai[mem_bias_m + 8]);
// calc inverse of A
hipComplex detMatrix = inner(col0, cross(col1,col2));
hipComplex a = make_cuComplex(0.0f, 0.0f);
if (cuCabsf(detMatrix) != 0) {
a = cuCdivf(make_cuComplex(1.0f,0.0f), detMatrix);//1 / detMatrix; //invertible matrix!
cuComplex3 invRow0 = a * cross(col1, col2);
cuComplex3 invRow1 = a * cross(col2, col0);
cuComplex3 invRow2 = a * cross(col0, col1);
// read b from global memory to registers
//const int mem_bias_v = local_index*N;
//cuComplex3 b_reg = make_cuComplex3(bi[mem_bias_v], bi[mem_bias_v + 1], bi[mem_bias_v + 2]);
const int mem_bias_v = N*index;
cuComplex3 b_reg = make_cuComplex3(b[mem_bias_v], b[mem_bias_v + 1], b[mem_bias_v + 2]);
// calc x and write it back to global memory
x[mem_bias_v ] = inner(invRow0, b_reg);
x[mem_bias_v + 1] = inner(invRow1, b_reg);
x[mem_bias_v + 2] = inner(invRow2, b_reg);
} else {
x[index] = make_cuComplex(0.5f, 0.0f); // debug value // in matlab we get here. So there is somthing wrong with the assignment of col0, col1 and col2.
return; // we might want to exit here, or do something else
}
}
}
int solve3x3(hipComplex* x, const hipComplex* A, const hipComplex* b, const int batch)
{
if (batch > 0) {
dim3 threads(BLOCK_SIZE_X, 1, 1);
dim3 grid((batch-1)/BLOCK_SIZE_X + 1, 1, 1);
// set cache configuration to maximize L1 cache
hipFuncSetCacheConfig(solve3x3_kernel, hipFuncCachePreferL1);
int stride = 1; // distance between each element in a matrix in A.
// execute the kernel
hipLaunchKernelGGL(( solve3x3_kernel), dim3(grid), dim3(threads), 0, 0, x, A, b, stride, batch);
return hipGetLastError();
} else {
return hipErrorInvalidValue;
}
}
| 0ef0af6ad443af85058914bf9aded77b300996e4.cu | #include "solver3x3_kernel.cuh"
#define N 3
#define NN 9
#define BLOCK_SIZE_X 192//256
#define BLOCK_SIZE_Y 1
#include <vector_types.h>
#include <vector_functions.h>
#include <cuComplex.h>
#include <cutil_math.h> // for vector operations
// Utilities for handeling 3x1 vectors with complex values
//struct __device_builtin__ __builtin_align__(24) cuComplex3
struct cuComplex3
{
cuComplex x, y, z;
};
inline __host__ __device__ cuComplex3 make_cuComplex3(cuComplex x, cuComplex y, cuComplex z) {
cuComplex3 c; c.x = x; c.y = y; c.z = z; return c;
}
inline __host__ __device__ cuComplex3 cross(cuComplex3 a, cuComplex3 b)
{
return make_cuComplex3(cuCmulf(a.y,b.z) - cuCmulf(a.z,b.y), cuCmulf(a.z,b.x) - cuCmulf(a.x,b.z), cuCmulf(a.x,b.y) - cuCmulf(a.y,b.x));
}
inline __host__ __device__ cuComplex dot(cuComplex3 a, cuComplex3 b)
{
return cuCmulf(a.x, cuConjf(b.x)) + cuCmulf(a.y, cuConjf(b.y)) + cuCmulf(a.z, cuConjf(b.z));
}
inline __host__ __device__ cuComplex inner(cuComplex3 a, cuComplex3 b)
{
return cuCmulf(a.x, b.x) + cuCmulf(a.y, b.y) + cuCmulf(a.z, b.z);
}
inline __host__ __device__ cuComplex3 operator*(float b, cuComplex3 a)
{
return make_cuComplex3(b * a.x, b * a.y, b * a.z);
}
inline __host__ __device__ cuComplex3 operator*(cuComplex b, cuComplex3 a)
{
return make_cuComplex3(cuCmulf(b, a.x), cuCmulf(b,a.y), cuCmulf(b,a.z));
}
/**
* Kernel solving 3x3 sets of linear equations using Cramer's rule
*
* x = solutions
* A = matrices
* b = left-hand sides
* stride = distance between elements in a single matrix (not in use)
* batch = number of sets
**/
__global__ void solve3x3_kernel(cuComplex* x, const cuComplex* A, const cuComplex* b, const int stride, const int batch)
{
//__shared__ cuComplex Ai[BLOCK_SIZE_X*NN];
//__shared__ cuComplex bi[BLOCK_SIZE_X*N];
/*const int index_at_block_start = blockDim.x * blockIdx.x;
const int local_index = threadIdx.x;
const int index = index_at_block_start + local_index;*/
const int index = blockDim.x * blockIdx.x + threadIdx.x;
if (index < batch) {
// load values from global to local memory // An attempt to remove poor global memory access pattern.
/*const int n_threads = BLOCK_SIZE_X; // TODO: Handle last block! Might have less threads.
const int threadId = threadIdx.x;
int n_elements = BLOCK_SIZE_X*NN;
A += index_at_block_start*NN;
b += index_at_block_start*N;
for (int i = threadId; i < n_elements; i += n_threads) {
Ai[i] = A[i];
}
n_elements = BLOCK_SIZE_X*N;
for (int i = threadId; i < n_elements; i += n_threads) {
bi[i] = b[i];
}*/
// extract 3 colums to form a 3x3 matrix
const int mem_bias_m = NN*index;
cuComplex3 col0 = make_cuComplex3(A[mem_bias_m ], A[mem_bias_m + 1], A[mem_bias_m + 2]); // TODO: Memory access is no good!
cuComplex3 col1 = make_cuComplex3(A[mem_bias_m + 3], A[mem_bias_m + 4], A[mem_bias_m + 5]);
cuComplex3 col2 = make_cuComplex3(A[mem_bias_m + 6], A[mem_bias_m + 7], A[mem_bias_m + 8]);
//const int mem_bias_m = NN*local_index;
//cuComplex3 col0 = make_cuComplex3(Ai[mem_bias_m + 0], Ai[mem_bias_m + 1], Ai[mem_bias_m + 2]);
//cuComplex3 col1 = make_cuComplex3(Ai[mem_bias_m + 3], Ai[mem_bias_m + 4], Ai[mem_bias_m + 5]);
//cuComplex3 col2 = make_cuComplex3(Ai[mem_bias_m + 6], Ai[mem_bias_m + 7], Ai[mem_bias_m + 8]);
// calc inverse of A
cuComplex detMatrix = inner(col0, cross(col1,col2));
cuComplex a = make_cuComplex(0.0f, 0.0f);
if (cuCabsf(detMatrix) != 0) {
a = cuCdivf(make_cuComplex(1.0f,0.0f), detMatrix);//1 / detMatrix; //invertible matrix!
cuComplex3 invRow0 = a * cross(col1, col2);
cuComplex3 invRow1 = a * cross(col2, col0);
cuComplex3 invRow2 = a * cross(col0, col1);
// read b from global memory to registers
//const int mem_bias_v = local_index*N;
//cuComplex3 b_reg = make_cuComplex3(bi[mem_bias_v], bi[mem_bias_v + 1], bi[mem_bias_v + 2]);
const int mem_bias_v = N*index;
cuComplex3 b_reg = make_cuComplex3(b[mem_bias_v], b[mem_bias_v + 1], b[mem_bias_v + 2]);
// calc x and write it back to global memory
x[mem_bias_v ] = inner(invRow0, b_reg);
x[mem_bias_v + 1] = inner(invRow1, b_reg);
x[mem_bias_v + 2] = inner(invRow2, b_reg);
} else {
x[index] = make_cuComplex(0.5f, 0.0f); // debug value // in matlab we get here. So there is somthing wrong with the assignment of col0, col1 and col2.
return; // we might want to exit here, or do something else
}
}
}
int solve3x3(cuComplex* x, const cuComplex* A, const cuComplex* b, const int batch)
{
if (batch > 0) {
dim3 threads(BLOCK_SIZE_X, 1, 1);
dim3 grid((batch-1)/BLOCK_SIZE_X + 1, 1, 1);
// set cache configuration to maximize L1 cache
cudaFuncSetCacheConfig(solve3x3_kernel, cudaFuncCachePreferL1);
int stride = 1; // distance between each element in a matrix in A.
// execute the kernel
solve3x3_kernel<<< grid, threads>>>(x, A, b, stride, batch);
return cudaGetLastError();
} else {
return cudaErrorInvalidValue;
}
}
|
2b27db63fb235479b6c40865b31d807b6e85fc82.hip | // !!! This is a file automatically generated by hipify!!!
/*
Copyright (c) 2012, Los Alamos National Security, LLC
All rights reserved.
Copyright 2012. Los Alamos National Security, LLC. This software was produced under U.S. Government contract DE-AC52-06NA25396 for Los Alamos National Laboratory (LANL),
which is operated by Los Alamos National Security, LLC for the U.S. Department of Energy. The U.S. Government has rights to use, reproduce, and distribute this software.
NEITHER THE GOVERNMENT NOR LOS ALAMOS NATIONAL SECURITY, LLC MAKES ANY WARRANTY, EXPRESS OR IMPLIED, OR ASSUMES ANY LIABILITY FOR THE USE OF THIS SOFTWARE.
If software is modified to produce derivative works, such modified software should be clearly marked, so as not to confuse it with the version available from LANL.
Additionally, redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other
materials provided with the distribution.
Neither the name of Los Alamos National Security, LLC, Los Alamos National Laboratory, LANL, the U.S. Government, nor the names of its contributors may be used
to endorse or promote products derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY LOS ALAMOS NATIONAL SECURITY, LLC AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LOS ALAMOS NATIONAL SECURITY, LLC OR CONTRIBUTORS BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Author: Christopher Sewell, [email protected]
This simulation is based on the method by Matt Sottile described here: http://syntacticsalt.com/2011/03/10/functional-flocks/
*/
#ifdef __APPLE__
#include <GL/glew.h>
#include <OpenGL/OpenGL.h>
#include <GLUT/glut.h>
#else
#include <GL/glew.h>
#include <GL/glut.h>
#include <GL/gl.h>
#endif
#include <QtGui>
#include <QObject>
#ifdef USE_INTEROP
#include <cuda_gl_interop.h>
#endif
#include <sys/time.h>
#include <stdio.h>
#include <math.h>
#include <vtkSphereSource.h>
#include <vtkArrowSource.h>
#include <vtkPolyData.h>
#include <vtkCellArray.h>
#include <vtkCellData.h>
#include <vtkDoubleArray.h>
#include <vtkFloatArray.h>
#include <vtkPoints.h>
#include <vtkPolyData.h>
#include <vtkPolyDataNormals.h>
#include <vtkPointData.h>
#include <vtkSmartPointer.h>
#include <vtkXMLPolyDataReader.h>
#include <vtkTriangleFilter.h>
#include <vtkPolyDataNormals.h>
#include <piston/piston_math.h>
#include <piston/choose_container.h>
#include <piston/hsv_color_map.h>
#define SPACE thrust::detail::default_device_space_tag
using namespace piston;
#include "flock_sim.h"
#include "glyph.h"
#include "glwindow.h"
//! Number of boids
#define INPUT_SIZE 1024
//==========================================================================
/*!
Variable declarations
*/
//==========================================================================
//! Variables for timing the framerate
struct timeval begin, end, diff;
int frameCount;
//! The flock simulation and glyph operators
flock_sim* simulation;
glyph<thrust::device_vector<float3>::iterator, thrust::device_vector<float3>::iterator, thrust::device_vector<float>::iterator,
thrust::device_vector<float3>::iterator, thrust::device_vector<float3>::iterator, thrust::device_vector<uint3>::iterator >* glyphs;
//! Initial positions and velocities for the boids
thrust::host_vector<float3> inputPositionsHost;
thrust::device_vector<float3> inputPositions;
thrust::device_vector<float3> inputVelocities;
//! Vertices, normals, colors, vertex indices, and scalars for output
thrust::host_vector<float3> vertices;
thrust::host_vector<float3> normals;
thrust::host_vector<float4> colors;
thrust::host_vector<uint3> indices;
thrust::device_vector<float> scalars;
//! Vertices, normals, and vertex indices for the sphere and arrow glyphs
thrust::device_vector<float3> sphereGlyphVertices;
thrust::device_vector<float3> sphereGlyphNormals;
thrust::device_vector<uint3> sphereGlyphIndices;
thrust::device_vector<float3> arrowGlyphVertices;
thrust::device_vector<float3> arrowGlyphNormals;
thrust::device_vector<uint3> arrowGlyphIndices;
//! VTK filters to produce the arrow and sphere glyphs
vtkArrowSource *arrowSource;
vtkSphereSource *sphereSource;
vtkPolyData *spherePoly;
vtkPolyData *arrowPoly;
vtkTriangleFilter *triangleFilter;
vtkPolyDataNormals *normalGenerator;
//! Camera and UI variables
int glyphMode;
bool simPaused;
float3 cameraPos;
float cameraFOV;
int gridSize;
//! Vertex buffer objects used by CUDA interop
#ifdef USE_INTEROP
GLuint vboBuffers[4]; struct cudaGraphicsResource* vboResources[4];
#endif
//==========================================================================
/*!
struct randomInit
Initialize the vector elements with random values between the min and max
*/
//==========================================================================
struct randomInit : public thrust::unary_function<float3, float3>
{
float minValue, maxValue;
__host__ __device__
randomInit(float minValue, float maxValue) : minValue(minValue), maxValue(maxValue) { };
__host__ __device__
float3 operator() (float3 i)
{
float3 result;
result.x = minValue + (maxValue-minValue)*((rand() % 100000)/100000.0);
result.y = minValue + (maxValue-minValue)*((rand() % 100000)/100000.0);
result.z = minValue + (maxValue-minValue)*((rand() % 100000)/100000.0);
return result;
}
};
//==========================================================================
/*!
Extract vertices, normals, and vertex indices from a vtkPolyData instance
\fn copyPolyData
*/
//==========================================================================
void copyPolyData(vtkPolyData *polyData, thrust::device_vector<float3> &points, thrust::device_vector<float3> &vectors, thrust::device_vector<uint3> &indexes)
{
// Extract the vertices and normals and copy to the output vectors
vtkPoints* pts = polyData->GetPoints();
vtkFloatArray* verts = vtkFloatArray::SafeDownCast(pts->GetData());
vtkFloatArray* norms = vtkFloatArray::SafeDownCast(polyData->GetPointData()->GetNormals());
float3* vData = (float3*)verts->GetPointer(0);
float3* nData = (float3*)norms->GetPointer(0);
points.assign(vData, vData+verts->GetNumberOfTuples());
vectors.assign(nData, nData+norms->GetNumberOfTuples());
// Extract the vertex indices from the cells and copy to the output vectors
vtkCellArray* cellArray = polyData->GetPolys();
vtkIdTypeArray* conn = cellArray->GetData();
vtkIdType* cData = conn->GetPointer(0);
for (int i=0; i<3*polyData->GetNumberOfPolys(); i++) cData[i] = cData[(i/3)*4+(i%3)+1];
thrust::host_vector<uint> indexTemp;
indexTemp.assign(cData, cData+3*polyData->GetNumberOfPolys());
uint3* c3Data = (uint3*)(thrust::raw_pointer_cast(&*indexTemp.begin()));
indexes.assign(c3Data, c3Data+polyData->GetNumberOfPolys());
}
//==========================================================================
/*!
Constructor for GLWindow class
\fn GLWindow::GLWindow
*/
//==========================================================================
GLWindow::GLWindow(QWidget *parent)
: QGLWidget(QGLFormat(QGL::SampleBuffers), parent)
{
// Start the QT callback timer
setFocusPolicy(Qt::StrongFocus);
timer = new QTimer(this);
connect(timer, SIGNAL(timeout()), this, SLOT(updateGL()));
timer->start(1);
}
//==========================================================================
/*!
Create the flock simulation and glyph operators
\fn GLWindow::initializeGL
*/
//==========================================================================
void GLWindow::initializeGL()
{
// Initialize camera and UI variables
qrot.set(0,0,0,1);
frameCount = 0;
gridSize = 256;
glyphMode = 0;
simPaused = false;
cameraPos = make_float3(0.0f, 0.0f, 1.5*gridSize);
cameraFOV = 60.0;
// Set up basic OpenGL state and lighting
glClearColor(0.0f, 0.0f, 0.0f, 0.0f);
glEnable(GL_DEPTH_TEST);
glShadeModel(GL_SMOOTH);
float white[] = { 0.8, 0.8, 0.8, 1.0 };
float black[] = { 0.0, 0.0, 0.0, 1.0 };
float lightPos[] = { 0.0, 0.0, gridSize*1.5, 1.0 };
glLightfv(GL_LIGHT0, GL_AMBIENT, white);
glLightfv(GL_LIGHT0, GL_DIFFUSE, white);
glLightfv(GL_LIGHT0, GL_SPECULAR, black);
glLightfv(GL_LIGHT0, GL_POSITION, lightPos);
glLightModeli(GL_LIGHT_MODEL_TWO_SIDE, 1);
glEnable(GL_LIGHTING);
glEnable(GL_LIGHT0);
glEnable(GL_NORMALIZE);
glColorMaterial(GL_FRONT_AND_BACK, GL_AMBIENT_AND_DIFFUSE);
glEnable(GL_COLOR_MATERIAL);
// Initialize CUDA interop if it is being used
#ifdef USE_INTEROP
glewInit();
hipGLSetGLDevice(0);
#endif
// Initialize boid positions to random values and boid velocities to zero
inputPositions.resize(INPUT_SIZE); inputPositionsHost.resize(INPUT_SIZE);
thrust::transform(inputPositionsHost.begin(), inputPositionsHost.end(), inputPositionsHost.begin(), randomInit(0.0f, 1.0f*gridSize));
inputPositions = inputPositionsHost;
thrust::fill(inputVelocities.begin(), inputVelocities.end(), make_float3(0.0f, 0.0f, 0.0f));
// Set the boundaries of the simulation space
float3 boundaryMin, boundaryMax;
boundaryMin.x = boundaryMin.y = boundaryMin.z = 0.0f;
boundaryMax.x = boundaryMax.y = boundaryMax.z = gridSize;
// Create the flock simulation instance
simulation = new flock_sim(inputPositions, inputVelocities, boundaryMin, boundaryMax, 1.0f, 5.0f, 1.0f, 0.01f, 1.0025f, 30.0f, 5.0f, 30.0f, 4.0f, 10.0f, 0.01f, 0.5f);
// Use VTK to generate a sphere glyph
sphereSource = vtkSphereSource::New();
sphereSource->SetThetaResolution(5);
sphereSource->SetPhiResolution(5);
sphereSource->Update();
spherePoly = vtkPolyData::New();
spherePoly->ShallowCopy(sphereSource->GetOutput());
copyPolyData(spherePoly, sphereGlyphVertices, sphereGlyphNormals, sphereGlyphIndices);
// Use VTK to generate an arrow glyph and its normals
arrowSource = vtkArrowSource::New();
arrowSource->Update();
triangleFilter = vtkTriangleFilter::New();
triangleFilter->SetInputConnection(arrowSource->GetOutputPort());
triangleFilter->Update();
arrowPoly = vtkPolyData::New();
arrowPoly->ShallowCopy(triangleFilter->GetOutput());
normalGenerator = vtkPolyDataNormals::New();
normalGenerator->SetInput(arrowPoly);
normalGenerator->ComputePointNormalsOn();
normalGenerator->ComputeCellNormalsOff();
normalGenerator->Update();
arrowPoly = normalGenerator->GetOutput();
copyPolyData(arrowPoly, arrowGlyphVertices, arrowGlyphNormals, arrowGlyphIndices);
// Initialize glyph input scalars to the minimum simulation scalar value
scalars.resize(INPUT_SIZE);
thrust::fill(scalars.begin(), scalars.end(), simulation->get_scalar_min());
// Create the glyph operator instance
glyphs = new glyph<thrust::device_vector<float3>::iterator, thrust::device_vector<float3>::iterator, thrust::device_vector<float>::iterator,
thrust::device_vector<float3>::iterator, thrust::device_vector<float3>::iterator, thrust::device_vector<uint3>::iterator>();
// If using interop, initialize vertex buffer objects
#ifdef USE_INTEROP
int numPoints = INPUT_SIZE*::max(sphereGlyphVertices.size(), arrowGlyphVertices.size());
glGenBuffers(4, vboBuffers);
for (int i=0; i<3; i++)
{
unsigned int bufferSize = (i == 1) ? numPoints*sizeof(float4) : numPoints*sizeof(float3);
glBindBuffer(GL_ARRAY_BUFFER, vboBuffers[i]);
glBufferData(GL_ARRAY_BUFFER, bufferSize, 0, GL_DYNAMIC_DRAW);
}
glBindBuffer(GL_ARRAY_BUFFER, vboBuffers[3]);
glBufferData(GL_ARRAY_BUFFER, numPoints*sizeof(uint3), 0, GL_DYNAMIC_DRAW);
glBindBuffer(GL_ARRAY_BUFFER, 0);
for (int i=0; i<4; i++)
{
hipGraphicsGLRegisterBuffer(&(vboResources[i]), vboBuffers[i], hipGraphicsMapFlagsWriteDiscard);
glyphs->vboResources[i] = vboResources[i];
}
#endif
// Enable OpenGL state for vertex, normal, and color arrays
glEnableClientState(GL_VERTEX_ARRAY);
glEnableClientState(GL_NORMAL_ARRAY);
glEnableClientState(GL_COLOR_ARRAY);
}
//==========================================================================
/*!
Update the simulation and graphics
\fn GLWindow::paintGL
*/
//==========================================================================
void GLWindow::paintGL()
{
// Stop the QT callback timer
timer->stop();
// Start timing this interval
if (frameCount == 0) gettimeofday(&begin, 0);
// Set up the OpenGL state
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glPolygonMode(GL_FRONT_AND_BACK, GL_FILL);
// Set up the projection and modelview matrices for the view
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
gluPerspective(cameraFOV, 1.0f, 1.0f, gridSize*4.0f);
glMatrixMode(GL_MODELVIEW);
glPushMatrix();
glLoadIdentity();
gluLookAt(cameraPos.x, cameraPos.y, cameraPos.z, cameraPos.x, cameraPos.y, 0.0f, 0.0f, 1.0f, 0.0f);
// Set up the current rotation and translation
qrot.getRotMat(rotationMatrix);
glMultMatrixf(rotationMatrix);
glTranslatef(-(gridSize-1)/2, -(gridSize-1)/2, -(gridSize-1)/2);
// If the simulation is not paused, compute the next simulation step, and apply the glyph operator to the result,
// using either the arrow or sphere glyph
int curGlyphMode = glyphMode;
if (!simPaused)
{
(*simulation)();
if (curGlyphMode == 0)
(*glyphs)(simulation->positions_begin(), simulation->velocities_begin(), simulation->speeds_begin(),
arrowGlyphVertices.begin(), arrowGlyphNormals.begin(), arrowGlyphIndices.begin(),
INPUT_SIZE, arrowGlyphVertices.size(), arrowGlyphIndices.size(),
simulation->get_scalar_min(), simulation->get_scalar_max());
else
(*glyphs)(simulation->positions_begin(), simulation->velocities_begin(), scalars.begin(),
sphereGlyphVertices.begin(), sphereGlyphNormals.begin(), sphereGlyphIndices.begin(),
INPUT_SIZE, sphereGlyphVertices.size(), sphereGlyphIndices.size(),
simulation->get_scalar_min(), simulation->get_scalar_max());
}
// If using interop, render the vertex buffer objects; otherwise, render the arrays
#ifdef USE_INTEROP
glBindBuffer(GL_ARRAY_BUFFER, vboBuffers[0]);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, vboBuffers[3]);
glVertexPointer(3, GL_FLOAT, 0, 0);
glBindBuffer(GL_ARRAY_BUFFER, vboBuffers[1]);
glColorPointer(4, GL_FLOAT, 0, 0);
glBindBuffer(GL_ARRAY_BUFFER, vboBuffers[2]);
glNormalPointer(GL_FLOAT, 0, 0);
int numIndices = INPUT_SIZE;
if (curGlyphMode == 0) numIndices *= arrowGlyphIndices.size();
else numIndices *= sphereGlyphIndices.size();
glDrawElements(GL_TRIANGLES, 3*numIndices, GL_UNSIGNED_INT, 0);
glBindBuffer(GL_ARRAY_BUFFER, 0);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0);
#else
normals.assign(glyphs->normals_begin(), glyphs->normals_end());
indices.assign(glyphs->indices_begin(), glyphs->indices_end());
vertices.assign(glyphs->vertices_begin(), glyphs->vertices_end());
colors.assign(thrust::make_transform_iterator(glyphs->scalars_begin(), color_map<float>(simulation->get_scalar_min(), simulation->get_scalar_max())),
thrust::make_transform_iterator(glyphs->scalars_end(), color_map<float>(simulation->get_scalar_min(), simulation->get_scalar_max())));
glNormalPointer(GL_FLOAT, 0, &normals[0]);
glColorPointer(4, GL_FLOAT, 0, &colors[0]);
glVertexPointer(3, GL_FLOAT, 0, &vertices[0]);
glDrawElements(GL_TRIANGLES, 3*indices.size(), GL_UNSIGNED_INT, &indices[0]);
#endif
// Pop this OpenGL view matrix
glPopMatrix();
// Periodically output the framerate
gettimeofday(&end, 0);
timersub(&end, &begin, &diff);
frameCount++;
float seconds = diff.tv_sec + 1.0E-6*diff.tv_usec;
if (seconds > 0.5f)
{
char title[256];
sprintf(title, "Flock simulation, fps: %2.2f", float(frameCount)/seconds);
std::cout << title << std::endl;
seconds = 0.0f;
frameCount = 0;
}
// Restart the QT callback timer
timer->start(1);
}
//==========================================================================
/*!
Handle window resize event
\fn GLWindow::resizeGL
*/
//==========================================================================
void GLWindow::resizeGL(int width, int height)
{
glViewport(0, 0, width, height);
}
//==========================================================================
/*!
Handle mouse press event
\fn GLWindow::mousePressEvent
*/
//==========================================================================
void GLWindow::mousePressEvent(QMouseEvent *event)
{
lastPos = event->pos();
}
//==========================================================================
/*!
Handle mouse move event to rotate, translate, or zoom
\fn GLWindow::mouseMoveEvent
*/
//==========================================================================
void GLWindow::mouseMoveEvent(QMouseEvent *event)
{
int dx = event->x() - lastPos.x();
int dy = event->y() - lastPos.y();
// Rotate, zoom, or translate the view
if (event->buttons() & Qt::LeftButton)
{
Quaternion newRotX;
newRotX.setEulerAngles(-0.2f*dx*3.14159f/180.0f, 0.0f, 0.0f);
qrot.mul(newRotX);
Quaternion newRotY;
newRotY.setEulerAngles(0.0f, 0.0f, -0.2f*dy*3.14159f/180.0f);
qrot.mul(newRotY);
}
else if (event->buttons() & Qt::RightButton)
{
cameraFOV += dy/20.0f;
}
else if (event->buttons() & Qt::MiddleButton)
{
cameraPos.x -= dx/2.0f;
cameraPos.y += dy/2.0f;
}
lastPos = event->pos();
}
//==========================================================================
/*!
Handle keyboard input event
\fn GLWindow::keyPressEvent
*/
//==========================================================================
void GLWindow::keyPressEvent(QKeyEvent *event)
{
// Toggle the glyph type (spheres or arrows)
if ((event->key() == 'g') || (event->key() == 'G'))
if (!simPaused) glyphMode = 1 - glyphMode;
// Pause or resume the simulation
if ((event->key() == 'p') || (event->key() == 'P'))
simPaused = !simPaused;
}
| 2b27db63fb235479b6c40865b31d807b6e85fc82.cu | /*
Copyright (c) 2012, Los Alamos National Security, LLC
All rights reserved.
Copyright 2012. Los Alamos National Security, LLC. This software was produced under U.S. Government contract DE-AC52-06NA25396 for Los Alamos National Laboratory (LANL),
which is operated by Los Alamos National Security, LLC for the U.S. Department of Energy. The U.S. Government has rights to use, reproduce, and distribute this software.
NEITHER THE GOVERNMENT NOR LOS ALAMOS NATIONAL SECURITY, LLC MAKES ANY WARRANTY, EXPRESS OR IMPLIED, OR ASSUMES ANY LIABILITY FOR THE USE OF THIS SOFTWARE.
If software is modified to produce derivative works, such modified software should be clearly marked, so as not to confuse it with the version available from LANL.
Additionally, redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
· Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
· Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other
materials provided with the distribution.
· Neither the name of Los Alamos National Security, LLC, Los Alamos National Laboratory, LANL, the U.S. Government, nor the names of its contributors may be used
to endorse or promote products derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY LOS ALAMOS NATIONAL SECURITY, LLC AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LOS ALAMOS NATIONAL SECURITY, LLC OR CONTRIBUTORS BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Author: Christopher Sewell, [email protected]
This simulation is based on the method by Matt Sottile described here: http://syntacticsalt.com/2011/03/10/functional-flocks/
*/
#ifdef __APPLE__
#include <GL/glew.h>
#include <OpenGL/OpenGL.h>
#include <GLUT/glut.h>
#else
#include <GL/glew.h>
#include <GL/glut.h>
#include <GL/gl.h>
#endif
#include <QtGui>
#include <QObject>
#ifdef USE_INTEROP
#include <cuda_gl_interop.h>
#endif
#include <sys/time.h>
#include <stdio.h>
#include <math.h>
#include <vtkSphereSource.h>
#include <vtkArrowSource.h>
#include <vtkPolyData.h>
#include <vtkCellArray.h>
#include <vtkCellData.h>
#include <vtkDoubleArray.h>
#include <vtkFloatArray.h>
#include <vtkPoints.h>
#include <vtkPolyData.h>
#include <vtkPolyDataNormals.h>
#include <vtkPointData.h>
#include <vtkSmartPointer.h>
#include <vtkXMLPolyDataReader.h>
#include <vtkTriangleFilter.h>
#include <vtkPolyDataNormals.h>
#include <piston/piston_math.h>
#include <piston/choose_container.h>
#include <piston/hsv_color_map.h>
#define SPACE thrust::detail::default_device_space_tag
using namespace piston;
#include "flock_sim.h"
#include "glyph.h"
#include "glwindow.h"
//! Number of boids
#define INPUT_SIZE 1024
//==========================================================================
/*!
Variable declarations
*/
//==========================================================================
//! Variables for timing the framerate
struct timeval begin, end, diff;
int frameCount;
//! The flock simulation and glyph operators
flock_sim* simulation;
glyph<thrust::device_vector<float3>::iterator, thrust::device_vector<float3>::iterator, thrust::device_vector<float>::iterator,
thrust::device_vector<float3>::iterator, thrust::device_vector<float3>::iterator, thrust::device_vector<uint3>::iterator >* glyphs;
//! Initial positions and velocities for the boids
thrust::host_vector<float3> inputPositionsHost;
thrust::device_vector<float3> inputPositions;
thrust::device_vector<float3> inputVelocities;
//! Vertices, normals, colors, vertex indices, and scalars for output
thrust::host_vector<float3> vertices;
thrust::host_vector<float3> normals;
thrust::host_vector<float4> colors;
thrust::host_vector<uint3> indices;
thrust::device_vector<float> scalars;
//! Vertices, normals, and vertex indices for the sphere and arrow glyphs
thrust::device_vector<float3> sphereGlyphVertices;
thrust::device_vector<float3> sphereGlyphNormals;
thrust::device_vector<uint3> sphereGlyphIndices;
thrust::device_vector<float3> arrowGlyphVertices;
thrust::device_vector<float3> arrowGlyphNormals;
thrust::device_vector<uint3> arrowGlyphIndices;
//! VTK filters to produce the arrow and sphere glyphs
vtkArrowSource *arrowSource;
vtkSphereSource *sphereSource;
vtkPolyData *spherePoly;
vtkPolyData *arrowPoly;
vtkTriangleFilter *triangleFilter;
vtkPolyDataNormals *normalGenerator;
//! Camera and UI variables
int glyphMode;
bool simPaused;
float3 cameraPos;
float cameraFOV;
int gridSize;
//! Vertex buffer objects used by CUDA interop
#ifdef USE_INTEROP
GLuint vboBuffers[4]; struct cudaGraphicsResource* vboResources[4];
#endif
//==========================================================================
/*!
struct randomInit
Initialize the vector elements with random values between the min and max
*/
//==========================================================================
struct randomInit : public thrust::unary_function<float3, float3>
{
float minValue, maxValue;
__host__ __device__
randomInit(float minValue, float maxValue) : minValue(minValue), maxValue(maxValue) { };
__host__ __device__
float3 operator() (float3 i)
{
float3 result;
result.x = minValue + (maxValue-minValue)*((rand() % 100000)/100000.0);
result.y = minValue + (maxValue-minValue)*((rand() % 100000)/100000.0);
result.z = minValue + (maxValue-minValue)*((rand() % 100000)/100000.0);
return result;
}
};
//==========================================================================
/*!
Extract vertices, normals, and vertex indices from a vtkPolyData instance
\fn copyPolyData
*/
//==========================================================================
void copyPolyData(vtkPolyData *polyData, thrust::device_vector<float3> &points, thrust::device_vector<float3> &vectors, thrust::device_vector<uint3> &indexes)
{
// Extract the vertices and normals and copy to the output vectors
vtkPoints* pts = polyData->GetPoints();
vtkFloatArray* verts = vtkFloatArray::SafeDownCast(pts->GetData());
vtkFloatArray* norms = vtkFloatArray::SafeDownCast(polyData->GetPointData()->GetNormals());
float3* vData = (float3*)verts->GetPointer(0);
float3* nData = (float3*)norms->GetPointer(0);
points.assign(vData, vData+verts->GetNumberOfTuples());
vectors.assign(nData, nData+norms->GetNumberOfTuples());
// Extract the vertex indices from the cells and copy to the output vectors
vtkCellArray* cellArray = polyData->GetPolys();
vtkIdTypeArray* conn = cellArray->GetData();
vtkIdType* cData = conn->GetPointer(0);
for (int i=0; i<3*polyData->GetNumberOfPolys(); i++) cData[i] = cData[(i/3)*4+(i%3)+1];
thrust::host_vector<uint> indexTemp;
indexTemp.assign(cData, cData+3*polyData->GetNumberOfPolys());
uint3* c3Data = (uint3*)(thrust::raw_pointer_cast(&*indexTemp.begin()));
indexes.assign(c3Data, c3Data+polyData->GetNumberOfPolys());
}
//==========================================================================
/*!
Constructor for GLWindow class
\fn GLWindow::GLWindow
*/
//==========================================================================
GLWindow::GLWindow(QWidget *parent)
: QGLWidget(QGLFormat(QGL::SampleBuffers), parent)
{
// Start the QT callback timer
setFocusPolicy(Qt::StrongFocus);
timer = new QTimer(this);
connect(timer, SIGNAL(timeout()), this, SLOT(updateGL()));
timer->start(1);
}
//==========================================================================
/*!
Create the flock simulation and glyph operators
\fn GLWindow::initializeGL
*/
//==========================================================================
void GLWindow::initializeGL()
{
// Initialize camera and UI variables
qrot.set(0,0,0,1);
frameCount = 0;
gridSize = 256;
glyphMode = 0;
simPaused = false;
cameraPos = make_float3(0.0f, 0.0f, 1.5*gridSize);
cameraFOV = 60.0;
// Set up basic OpenGL state and lighting
glClearColor(0.0f, 0.0f, 0.0f, 0.0f);
glEnable(GL_DEPTH_TEST);
glShadeModel(GL_SMOOTH);
float white[] = { 0.8, 0.8, 0.8, 1.0 };
float black[] = { 0.0, 0.0, 0.0, 1.0 };
float lightPos[] = { 0.0, 0.0, gridSize*1.5, 1.0 };
glLightfv(GL_LIGHT0, GL_AMBIENT, white);
glLightfv(GL_LIGHT0, GL_DIFFUSE, white);
glLightfv(GL_LIGHT0, GL_SPECULAR, black);
glLightfv(GL_LIGHT0, GL_POSITION, lightPos);
glLightModeli(GL_LIGHT_MODEL_TWO_SIDE, 1);
glEnable(GL_LIGHTING);
glEnable(GL_LIGHT0);
glEnable(GL_NORMALIZE);
glColorMaterial(GL_FRONT_AND_BACK, GL_AMBIENT_AND_DIFFUSE);
glEnable(GL_COLOR_MATERIAL);
// Initialize CUDA interop if it is being used
#ifdef USE_INTEROP
glewInit();
cudaGLSetGLDevice(0);
#endif
// Initialize boid positions to random values and boid velocities to zero
inputPositions.resize(INPUT_SIZE); inputPositionsHost.resize(INPUT_SIZE);
thrust::transform(inputPositionsHost.begin(), inputPositionsHost.end(), inputPositionsHost.begin(), randomInit(0.0f, 1.0f*gridSize));
inputPositions = inputPositionsHost;
thrust::fill(inputVelocities.begin(), inputVelocities.end(), make_float3(0.0f, 0.0f, 0.0f));
// Set the boundaries of the simulation space
float3 boundaryMin, boundaryMax;
boundaryMin.x = boundaryMin.y = boundaryMin.z = 0.0f;
boundaryMax.x = boundaryMax.y = boundaryMax.z = gridSize;
// Create the flock simulation instance
simulation = new flock_sim(inputPositions, inputVelocities, boundaryMin, boundaryMax, 1.0f, 5.0f, 1.0f, 0.01f, 1.0025f, 30.0f, 5.0f, 30.0f, 4.0f, 10.0f, 0.01f, 0.5f);
// Use VTK to generate a sphere glyph
sphereSource = vtkSphereSource::New();
sphereSource->SetThetaResolution(5);
sphereSource->SetPhiResolution(5);
sphereSource->Update();
spherePoly = vtkPolyData::New();
spherePoly->ShallowCopy(sphereSource->GetOutput());
copyPolyData(spherePoly, sphereGlyphVertices, sphereGlyphNormals, sphereGlyphIndices);
// Use VTK to generate an arrow glyph and its normals
arrowSource = vtkArrowSource::New();
arrowSource->Update();
triangleFilter = vtkTriangleFilter::New();
triangleFilter->SetInputConnection(arrowSource->GetOutputPort());
triangleFilter->Update();
arrowPoly = vtkPolyData::New();
arrowPoly->ShallowCopy(triangleFilter->GetOutput());
normalGenerator = vtkPolyDataNormals::New();
normalGenerator->SetInput(arrowPoly);
normalGenerator->ComputePointNormalsOn();
normalGenerator->ComputeCellNormalsOff();
normalGenerator->Update();
arrowPoly = normalGenerator->GetOutput();
copyPolyData(arrowPoly, arrowGlyphVertices, arrowGlyphNormals, arrowGlyphIndices);
// Initialize glyph input scalars to the minimum simulation scalar value
scalars.resize(INPUT_SIZE);
thrust::fill(scalars.begin(), scalars.end(), simulation->get_scalar_min());
// Create the glyph operator instance
glyphs = new glyph<thrust::device_vector<float3>::iterator, thrust::device_vector<float3>::iterator, thrust::device_vector<float>::iterator,
thrust::device_vector<float3>::iterator, thrust::device_vector<float3>::iterator, thrust::device_vector<uint3>::iterator>();
// If using interop, initialize vertex buffer objects
#ifdef USE_INTEROP
int numPoints = INPUT_SIZE*std::max(sphereGlyphVertices.size(), arrowGlyphVertices.size());
glGenBuffers(4, vboBuffers);
for (int i=0; i<3; i++)
{
unsigned int bufferSize = (i == 1) ? numPoints*sizeof(float4) : numPoints*sizeof(float3);
glBindBuffer(GL_ARRAY_BUFFER, vboBuffers[i]);
glBufferData(GL_ARRAY_BUFFER, bufferSize, 0, GL_DYNAMIC_DRAW);
}
glBindBuffer(GL_ARRAY_BUFFER, vboBuffers[3]);
glBufferData(GL_ARRAY_BUFFER, numPoints*sizeof(uint3), 0, GL_DYNAMIC_DRAW);
glBindBuffer(GL_ARRAY_BUFFER, 0);
for (int i=0; i<4; i++)
{
cudaGraphicsGLRegisterBuffer(&(vboResources[i]), vboBuffers[i], cudaGraphicsMapFlagsWriteDiscard);
glyphs->vboResources[i] = vboResources[i];
}
#endif
// Enable OpenGL state for vertex, normal, and color arrays
glEnableClientState(GL_VERTEX_ARRAY);
glEnableClientState(GL_NORMAL_ARRAY);
glEnableClientState(GL_COLOR_ARRAY);
}
//==========================================================================
/*!
Update the simulation and graphics
\fn GLWindow::paintGL
*/
//==========================================================================
void GLWindow::paintGL()
{
// Stop the QT callback timer
timer->stop();
// Start timing this interval
if (frameCount == 0) gettimeofday(&begin, 0);
// Set up the OpenGL state
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glPolygonMode(GL_FRONT_AND_BACK, GL_FILL);
// Set up the projection and modelview matrices for the view
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
gluPerspective(cameraFOV, 1.0f, 1.0f, gridSize*4.0f);
glMatrixMode(GL_MODELVIEW);
glPushMatrix();
glLoadIdentity();
gluLookAt(cameraPos.x, cameraPos.y, cameraPos.z, cameraPos.x, cameraPos.y, 0.0f, 0.0f, 1.0f, 0.0f);
// Set up the current rotation and translation
qrot.getRotMat(rotationMatrix);
glMultMatrixf(rotationMatrix);
glTranslatef(-(gridSize-1)/2, -(gridSize-1)/2, -(gridSize-1)/2);
// If the simulation is not paused, compute the next simulation step, and apply the glyph operator to the result,
// using either the arrow or sphere glyph
int curGlyphMode = glyphMode;
if (!simPaused)
{
(*simulation)();
if (curGlyphMode == 0)
(*glyphs)(simulation->positions_begin(), simulation->velocities_begin(), simulation->speeds_begin(),
arrowGlyphVertices.begin(), arrowGlyphNormals.begin(), arrowGlyphIndices.begin(),
INPUT_SIZE, arrowGlyphVertices.size(), arrowGlyphIndices.size(),
simulation->get_scalar_min(), simulation->get_scalar_max());
else
(*glyphs)(simulation->positions_begin(), simulation->velocities_begin(), scalars.begin(),
sphereGlyphVertices.begin(), sphereGlyphNormals.begin(), sphereGlyphIndices.begin(),
INPUT_SIZE, sphereGlyphVertices.size(), sphereGlyphIndices.size(),
simulation->get_scalar_min(), simulation->get_scalar_max());
}
// If using interop, render the vertex buffer objects; otherwise, render the arrays
#ifdef USE_INTEROP
glBindBuffer(GL_ARRAY_BUFFER, vboBuffers[0]);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, vboBuffers[3]);
glVertexPointer(3, GL_FLOAT, 0, 0);
glBindBuffer(GL_ARRAY_BUFFER, vboBuffers[1]);
glColorPointer(4, GL_FLOAT, 0, 0);
glBindBuffer(GL_ARRAY_BUFFER, vboBuffers[2]);
glNormalPointer(GL_FLOAT, 0, 0);
int numIndices = INPUT_SIZE;
if (curGlyphMode == 0) numIndices *= arrowGlyphIndices.size();
else numIndices *= sphereGlyphIndices.size();
glDrawElements(GL_TRIANGLES, 3*numIndices, GL_UNSIGNED_INT, 0);
glBindBuffer(GL_ARRAY_BUFFER, 0);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0);
#else
normals.assign(glyphs->normals_begin(), glyphs->normals_end());
indices.assign(glyphs->indices_begin(), glyphs->indices_end());
vertices.assign(glyphs->vertices_begin(), glyphs->vertices_end());
colors.assign(thrust::make_transform_iterator(glyphs->scalars_begin(), color_map<float>(simulation->get_scalar_min(), simulation->get_scalar_max())),
thrust::make_transform_iterator(glyphs->scalars_end(), color_map<float>(simulation->get_scalar_min(), simulation->get_scalar_max())));
glNormalPointer(GL_FLOAT, 0, &normals[0]);
glColorPointer(4, GL_FLOAT, 0, &colors[0]);
glVertexPointer(3, GL_FLOAT, 0, &vertices[0]);
glDrawElements(GL_TRIANGLES, 3*indices.size(), GL_UNSIGNED_INT, &indices[0]);
#endif
// Pop this OpenGL view matrix
glPopMatrix();
// Periodically output the framerate
gettimeofday(&end, 0);
timersub(&end, &begin, &diff);
frameCount++;
float seconds = diff.tv_sec + 1.0E-6*diff.tv_usec;
if (seconds > 0.5f)
{
char title[256];
sprintf(title, "Flock simulation, fps: %2.2f", float(frameCount)/seconds);
std::cout << title << std::endl;
seconds = 0.0f;
frameCount = 0;
}
// Restart the QT callback timer
timer->start(1);
}
//==========================================================================
/*!
Handle window resize event
\fn GLWindow::resizeGL
*/
//==========================================================================
void GLWindow::resizeGL(int width, int height)
{
glViewport(0, 0, width, height);
}
//==========================================================================
/*!
Handle mouse press event
\fn GLWindow::mousePressEvent
*/
//==========================================================================
void GLWindow::mousePressEvent(QMouseEvent *event)
{
lastPos = event->pos();
}
//==========================================================================
/*!
Handle mouse move event to rotate, translate, or zoom
\fn GLWindow::mouseMoveEvent
*/
//==========================================================================
void GLWindow::mouseMoveEvent(QMouseEvent *event)
{
int dx = event->x() - lastPos.x();
int dy = event->y() - lastPos.y();
// Rotate, zoom, or translate the view
if (event->buttons() & Qt::LeftButton)
{
Quaternion newRotX;
newRotX.setEulerAngles(-0.2f*dx*3.14159f/180.0f, 0.0f, 0.0f);
qrot.mul(newRotX);
Quaternion newRotY;
newRotY.setEulerAngles(0.0f, 0.0f, -0.2f*dy*3.14159f/180.0f);
qrot.mul(newRotY);
}
else if (event->buttons() & Qt::RightButton)
{
cameraFOV += dy/20.0f;
}
else if (event->buttons() & Qt::MiddleButton)
{
cameraPos.x -= dx/2.0f;
cameraPos.y += dy/2.0f;
}
lastPos = event->pos();
}
//==========================================================================
/*!
Handle keyboard input event
\fn GLWindow::keyPressEvent
*/
//==========================================================================
void GLWindow::keyPressEvent(QKeyEvent *event)
{
// Toggle the glyph type (spheres or arrows)
if ((event->key() == 'g') || (event->key() == 'G'))
if (!simPaused) glyphMode = 1 - glyphMode;
// Pause or resume the simulation
if ((event->key() == 'p') || (event->key() == 'P'))
simPaused = !simPaused;
}
|
81abb0b95eb71cdee38afc6e85cb7a8d5fdcb001.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#define _USE_MATH_DEFINES
//The maximum particles to be simulated
#define MAX_PARTICLES 2048
//How many simulations are to be ran
#define NUM_SIMULATIONS 1000
//How many tests are to be ran
#define NUM_TESTS 100
//The delta time between each simulation
#define TIMESTEP 0.01f;
//Small, near 0 value to improve result
#define SOFTENING 1e-4f
//Newtons gravitational constant, probably wont use this because of how weak gravity is
#define G 6.673e-11f
//How many blocks to use
#define BLOCK_SIZE 16
//Density of hydrogen in kg/m3
#define H_DENISTY 0.08988
//Density of oxygen in kg/m3
#define O_DENISTY 1.429
//Density of iron in kg/m3
#define FE_DENISTY 7874.0
//Density of osmium in kg/m3
#define OS_DENISTY 22600.0
#include "GLShader.h"
#include <GLFW\glfw3.h>
#include <chrono>
#include <glm\gtc\type_ptr.hpp>
#include <glm\gtc\matrix_transform.hpp>
#include <glm\common.hpp>
#include <glm\gtx\norm.hpp>
#include "Camera.h"
#include <fstream>
#include <random>
#include <cmath>
#include <chrono>
#include <iostream>
#include <ctime>
#include <math.h>
#include <algorithm>
#include "Texture.h"
#include <omp.h>
#include <thread>
#include <hiprand/hiprand.h>
#include <hiprand/hiprand_kernel.h>
//The texture for each particle
Texture tex;
//Basic target camera
Camera cam;
//Particle shader
GLShader shader;
GLFWwindow* window;
// Uniform locations for shader.
GLuint CameraRight_worldspace_ID;
GLuint CameraUp_worldspace_ID;
GLuint ViewProjMatrixID;
// Get the number of threads this hardware can support.
int numThreads = std::thread::hardware_concurrency();
// This class represents the particle.
struct Particle
{
//Position of the particle.
glm::vec3 pos;
//Colour of the particle.
unsigned char r, g, b, a;
//Radius of the particle in meters
float radius;
//Velocity of the particle
glm::vec3 velocity;
//Particles mass in kg
float mass;
};
GLuint VertexArrayID;
static GLfloat* gl_pos_data = new GLfloat[MAX_PARTICLES * 4];
static GLubyte* gl_colour_data = new GLubyte[MAX_PARTICLES * 4];
GLuint pos_buffer;
GLuint colour_buffer;
GLuint vertex_buffer;
//Array of all the particles in the scene
Particle particles[MAX_PARTICLES];
//The positions of every particle after each simulation
Particle particleMovements[NUM_SIMULATIONS][MAX_PARTICLES];
double lastTime;
unsigned long long particlesSize;
Particle *particlesBuffer;
using namespace std::chrono;
void LoadParticles()
{
for (int i = 0; i < MAX_PARTICLES; i++)
{
double x = (rand() % 100) - 50;
double y = (rand() % 100) - 50;
double z = (rand() % 100) - 50;
particles[i].pos = glm::dvec3(x, y, z);
particles[i].velocity = glm::dvec3(0);
particles[i].r = 0;
particles[i].g = 100;
particles[i].b = 255;
particles[i].a = 255;
particles[i].mass = 1;
//if (i == 0)
//particles[i].mass = 100;
//Volume = mass/density
float volume = particles[i].mass / H_DENISTY;
particles[i].radius = cbrt((3 * volume) / (4 * M_PI));
gl_colour_data[4 * i + 0] = particles[i].r;
gl_colour_data[4 * i + 1] = particles[i].g;
gl_colour_data[4 * i + 2] = particles[i].b;
gl_colour_data[4 * i + 3] = particles[i].a;
}
hipMemcpy(particlesBuffer, &particles, particlesSize, hipMemcpyHostToDevice);
for (int i = 0; i < MAX_PARTICLES; i++)
{
particleMovements[0][i] = particles[i];
}
}
int Initialise()
{
// Initialise GLFW
if (!glfwInit())
{
fprintf(stderr, "Failed to initialize GLFW\n");
getchar();
return -1;
}
glfwWindowHint(GLFW_SAMPLES, 4);
glfwWindowHint(GLFW_RESIZABLE, GL_FALSE);
glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 3);
glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, 3);
glfwWindowHint(GLFW_OPENGL_FORWARD_COMPAT, GL_TRUE);
glfwWindowHint(GLFW_OPENGL_PROFILE, GLFW_OPENGL_CORE_PROFILE);
glEnable(GL_BLEND);
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
// Open a window and create its OpenGL context
window = glfwCreateWindow(1920, 1080, "N-Body Simulation", NULL, NULL);
if (window == NULL) {
fprintf(stderr, "Failed to open GLFW window.\n");
getchar();
glfwTerminate();
return -1;
}
glfwMakeContextCurrent(window);
// Initialize GLEW
glewExperimental = true; // Needed for core profile
if (glewInit() != GLEW_OK) {
fprintf(stderr, "Failed to initialize GLEW\n");
getchar();
glfwTerminate();
return -1;
}
// Ensure we can capture the escape key being pressed below
glfwSetInputMode(window, GLFW_STICKY_KEYS, GL_TRUE);
// Hide the mouse and enable unlimited mouvement
glfwSetInputMode(window, GLFW_CURSOR, GLFW_CURSOR_DISABLED);
// Set the mouse at the center of the screen
glfwPollEvents();
glfwSetCursorPos(window, 1024 / 2, 768 / 2);
//Backgroud colour
glClearColor(0.2f, 0.2f, 0.2f, 0.0f);
glGenVertexArrays(1, &VertexArrayID);
glBindVertexArray(VertexArrayID);
// Create and compile our GLSL program from the shaders
shader.SetProgram();
shader.AddShaderFromFile("../res/shaders/Quad.vert", GLShader::VERTEX);
shader.AddShaderFromFile("../res/shaders/Quad.frag", GLShader::FRAGMENT);
shader.Link();
cam.SetProjection(glm::quarter_pi<float>(), 1920 / 1080, 2.414f, 100000);
cam.SetWindow(window);
cam.SetPosition(glm::vec3(0, 0, 200));
// Vertex shader
CameraRight_worldspace_ID = glGetUniformLocation(shader.GetId(), "CameraRight_worldspace");
CameraUp_worldspace_ID = glGetUniformLocation(shader.GetId(), "CameraUp_worldspace");
ViewProjMatrixID = glGetUniformLocation(shader.GetId(), "VP");
lastTime = glfwGetTime();
tex = Texture("../res/textures/Particle.png");
static const GLfloat g_vertex_buffer_data[] =
{
-0.5f, -0.5f, 0.0f,
0.5f, -0.5f, 0.0f,
-0.5f, 0.5f, 0.0f,
0.5f, 0.5f, 0.0f,
};
glGenBuffers(1, &vertex_buffer);
glBindBuffer(GL_ARRAY_BUFFER, vertex_buffer);
glBufferData(GL_ARRAY_BUFFER, sizeof(g_vertex_buffer_data), g_vertex_buffer_data, GL_STATIC_DRAW);
//The VBO containing the positions and sizes of the particles
glGenBuffers(1, &pos_buffer);
glBindBuffer(GL_ARRAY_BUFFER, pos_buffer);
//Initialize with empty (NULL) buffer : it will be updated later, each frame.
glBufferData(GL_ARRAY_BUFFER, MAX_PARTICLES * 4 * sizeof(GLfloat), NULL, GL_STREAM_DRAW);
//The VBO containing the colors of the particles
glGenBuffers(1, &colour_buffer);
glBindBuffer(GL_ARRAY_BUFFER, colour_buffer);
//Initialize with empty (NULL) buffer : it will be updated later, each frame.
glBufferData(GL_ARRAY_BUFFER, MAX_PARTICLES * 4 * sizeof(GLubyte), NULL, GL_STREAM_DRAW);
glEnable(GL_BLEND);
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
hipSetDevice(0);
particlesSize = sizeof(Particle)*MAX_PARTICLES;
hipMalloc((void**)&particlesBuffer, particlesSize);
LoadParticles();
return 0;
}
__global__
void CalculateForces(Particle* particles)
{
float fX = 0.0f; float fY = 0.0f; float fZ = 0.0f;
int i = threadIdx.x + blockIdx.x * blockDim.x;
for (int j = 0; j < MAX_PARTICLES; j++)
{
float dx = particles[j].pos.x - particles[i].pos.x;
float dy = particles[j].pos.y - particles[i].pos.y;
float dz = particles[j].pos.z - particles[i].pos.z;
float distSqr = dx*dx + dy*dy + dz*dz + SOFTENING;
float invDist = 1.0f / sqrtf(distSqr);
float invDist3 = invDist * invDist * invDist;
fX += (particles[i].mass * particles[j].mass) * dx * invDist3;
fY += (particles[i].mass * particles[j].mass) * dy * invDist3;
fZ += (particles[i].mass * particles[j].mass) * dz * invDist3;
}
particles[i].velocity.x += fX;
particles[i].velocity.y += fY;
particles[i].velocity.z += fZ;
}
void SimulateParticles(int currentIndex)
{
int nBlocks = (MAX_PARTICLES + BLOCK_SIZE - 1) / BLOCK_SIZE;
hipMemcpy(particlesBuffer, &particles, particlesSize, hipMemcpyHostToDevice);
CalculateForces << <nBlocks, BLOCK_SIZE>> > (particlesBuffer);
hipDeviceSynchronize();
hipMemcpy(particles, &particlesBuffer[0], particlesSize, hipMemcpyDeviceToHost);
for (int i = 0; i < MAX_PARTICLES; i++)
{
Particle& p = particles[i];
p.pos += p.velocity * TIMESTEP;
particleMovements[currentIndex][i] = particles[i];
}
}
void UpdatePosBuffer(int currentIndex)
{
Particle tempParticles[MAX_PARTICLES];
for (int i = 0; i < MAX_PARTICLES; i++)
{
tempParticles[i] = particleMovements[currentIndex][i];
}
bool swap = 1;
for (int i = 1; (i <= MAX_PARTICLES) && swap; i++)
{
swap = 0;
for (int j = 0; j < (MAX_PARTICLES - 1); j++)
{
Particle& p1 = tempParticles[j];
Particle& p2 = tempParticles[j + 1];
if (glm::distance(p2.pos, cam.GetPosition()) > glm::distance(p1.pos, cam.GetPosition()))
{
Particle temp = p1;
p1 = p2;
p2 = temp;
swap = 1;
}
}
}
for (int i = 0; i < MAX_PARTICLES; i++)
{
Particle& p = tempParticles[i];
// Update GPU buffer with new positions.
gl_pos_data[4 * i + 0] = p.pos.x;
gl_pos_data[4 * i + 1] = p.pos.y;
gl_pos_data[4 * i + 2] = p.pos.z;
gl_pos_data[4 * i + 3] = p.radius;
}
}
void Update(double deltaTime)
{
//make targe once I set bounds
float ratio_width = glm::quarter_pi<float>() / static_cast<float>(1920);
float ratio_height = glm::quarter_pi<float>() / static_cast<float>(1080);
double xpos, ypos;
glfwGetCursorPos(window, &xpos, &ypos);
glfwSetCursorPos(window, 1920.0 / 2, 1080.0 / 2);
// Calculate delta of cursor positions from last frame
double delta_x = xpos - 1920.0 / 2;
double delta_y = ypos - 1080.0 / 2;
// Multiply deltas by ratios - gets actual change in orientation
delta_x *= ratio_width;
delta_y *= ratio_height;
cam.Rotate(static_cast<float>(delta_x), static_cast<float>(-delta_y)); // flipped y to revert the invert.
cam.Update(deltaTime);
}
void Render()
{
// Update the OpenGL buffers with updated particle positions.
glBindBuffer(GL_ARRAY_BUFFER, pos_buffer);
glBufferData(GL_ARRAY_BUFFER, MAX_PARTICLES * 4 * sizeof(GLfloat), NULL, GL_STREAM_DRAW);
glBufferSubData(GL_ARRAY_BUFFER, 0, MAX_PARTICLES * sizeof(GLfloat) * 4, gl_pos_data);
glBindBuffer(GL_ARRAY_BUFFER, colour_buffer);
glBufferData(GL_ARRAY_BUFFER, MAX_PARTICLES * 4 * sizeof(GLubyte), NULL, GL_STREAM_DRAW);
glBufferSubData(GL_ARRAY_BUFFER, 0, MAX_PARTICLES * sizeof(GLubyte) * 4, gl_colour_data);
// Clear the screen
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
// glClearColor(1, 1, 1, 1);
glm::mat4 ProjectionMatrix = cam.GetProjection();
glm::mat4 ViewMatrix = cam.GetView();
glm::mat4 ViewProjectionMatrix = ProjectionMatrix * ViewMatrix;
// Use our shader
shader.Use();
glUniform3f(CameraRight_worldspace_ID, ViewMatrix[0][0], ViewMatrix[1][0], ViewMatrix[2][0]);
glUniform3f(CameraUp_worldspace_ID, ViewMatrix[0][1], ViewMatrix[1][1], ViewMatrix[2][1]);
glUniformMatrix4fv(ViewProjMatrixID, 1, GL_FALSE, &ViewProjectionMatrix[0][0]);
glActiveTexture(GL_TEXTURE1);
glBindTexture(GL_TEXTURE_2D, tex.id);
glUniform1i(glGetUniformLocation(shader.GetId(), "tex"), 1);
glEnableVertexAttribArray(0);
glBindBuffer(GL_ARRAY_BUFFER, vertex_buffer);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 0, (void*)0);
// 2nd attribute buffer : positions of particles' centers
glEnableVertexAttribArray(1);
glBindBuffer(GL_ARRAY_BUFFER, pos_buffer);
glVertexAttribPointer(1, 4, GL_FLOAT, GL_FALSE, 0, (void*)0);
// 3rd attribute buffer : particles' colors
glEnableVertexAttribArray(2);
glBindBuffer(GL_ARRAY_BUFFER, colour_buffer);
glVertexAttribPointer(2, 4, GL_UNSIGNED_BYTE, GL_TRUE, 0, (void*)0);
glVertexAttribDivisor(0, 0);
glVertexAttribDivisor(1, 1);
glVertexAttribDivisor(2, 1);
glDrawArraysInstanced(GL_TRIANGLE_STRIP, 0, 4, MAX_PARTICLES);
glDisableVertexAttribArray(0);
glDisableVertexAttribArray(1);
glDisableVertexAttribArray(2);
// Swap buffers
glfwSwapBuffers(window);
glfwPollEvents();
}
int main(void)
{
if (Initialise() == -1)
return -1;
std::ofstream data((std::to_string(BLOCK_SIZE) + "B_" + std::to_string(MAX_PARTICLES) + "P_" + std::to_string(NUM_SIMULATIONS) + "S_" + std::to_string(NUM_TESTS) + "T.csv").c_str(), std::ofstream::out);
for (int n = 0; n < NUM_TESTS; n++)
{
clock_t t;
t = clock();
for (int i = 0; i < NUM_SIMULATIONS; i++)
{
SimulateParticles(i);
}
clock_t end = clock();
float elapsedTime = float(end - t) / CLOCKS_PER_SEC;
data << elapsedTime << std::endl;
LoadParticles();
}
data.close();
int i = 0;
//While still running and esc hasnt been pressed
while (glfwGetKey(window, GLFW_KEY_ESCAPE) != GLFW_PRESS && glfwWindowShouldClose(window) == 0)
{
UpdatePosBuffer(i);
double currentTime = glfwGetTime();
double delta = currentTime - lastTime;
Update(delta);
Render();
lastTime = currentTime;
i++;
if (i > NUM_SIMULATIONS)
i = 0;
}
delete[] gl_pos_data;
//Cleanup VBO and shader
glDeleteBuffers(1, &colour_buffer);
glDeleteBuffers(1, &pos_buffer);
glDeleteBuffers(1, &vertex_buffer);
glDeleteProgram(shader.GetId());
glDeleteVertexArrays(1, &VertexArrayID);
//Close OpenGL window and terminate GLFW
glfwTerminate();
return 0;
} | 81abb0b95eb71cdee38afc6e85cb7a8d5fdcb001.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#define _USE_MATH_DEFINES
//The maximum particles to be simulated
#define MAX_PARTICLES 2048
//How many simulations are to be ran
#define NUM_SIMULATIONS 1000
//How many tests are to be ran
#define NUM_TESTS 100
//The delta time between each simulation
#define TIMESTEP 0.01f;
//Small, near 0 value to improve result
#define SOFTENING 1e-4f
//Newtons gravitational constant, probably wont use this because of how weak gravity is
#define G 6.673e-11f
//How many blocks to use
#define BLOCK_SIZE 16
//Density of hydrogen in kg/m3
#define H_DENISTY 0.08988
//Density of oxygen in kg/m3
#define O_DENISTY 1.429
//Density of iron in kg/m3
#define FE_DENISTY 7874.0
//Density of osmium in kg/m3
#define OS_DENISTY 22600.0
#include "GLShader.h"
#include <GLFW\glfw3.h>
#include <chrono>
#include <glm\gtc\type_ptr.hpp>
#include <glm\gtc\matrix_transform.hpp>
#include <glm\common.hpp>
#include <glm\gtx\norm.hpp>
#include "Camera.h"
#include <fstream>
#include <random>
#include <cmath>
#include <chrono>
#include <iostream>
#include <ctime>
#include <math.h>
#include <algorithm>
#include "Texture.h"
#include <omp.h>
#include <thread>
#include <curand.h>
#include <curand_kernel.h>
//The texture for each particle
Texture tex;
//Basic target camera
Camera cam;
//Particle shader
GLShader shader;
GLFWwindow* window;
// Uniform locations for shader.
GLuint CameraRight_worldspace_ID;
GLuint CameraUp_worldspace_ID;
GLuint ViewProjMatrixID;
// Get the number of threads this hardware can support.
int numThreads = std::thread::hardware_concurrency();
// This class represents the particle.
struct Particle
{
//Position of the particle.
glm::vec3 pos;
//Colour of the particle.
unsigned char r, g, b, a;
//Radius of the particle in meters
float radius;
//Velocity of the particle
glm::vec3 velocity;
//Particles mass in kg
float mass;
};
GLuint VertexArrayID;
static GLfloat* gl_pos_data = new GLfloat[MAX_PARTICLES * 4];
static GLubyte* gl_colour_data = new GLubyte[MAX_PARTICLES * 4];
GLuint pos_buffer;
GLuint colour_buffer;
GLuint vertex_buffer;
//Array of all the particles in the scene
Particle particles[MAX_PARTICLES];
//The positions of every particle after each simulation
Particle particleMovements[NUM_SIMULATIONS][MAX_PARTICLES];
double lastTime;
unsigned long long particlesSize;
Particle *particlesBuffer;
using namespace std::chrono;
void LoadParticles()
{
for (int i = 0; i < MAX_PARTICLES; i++)
{
double x = (rand() % 100) - 50;
double y = (rand() % 100) - 50;
double z = (rand() % 100) - 50;
particles[i].pos = glm::dvec3(x, y, z);
particles[i].velocity = glm::dvec3(0);
particles[i].r = 0;
particles[i].g = 100;
particles[i].b = 255;
particles[i].a = 255;
particles[i].mass = 1;
//if (i == 0)
//particles[i].mass = 100;
//Volume = mass/density
float volume = particles[i].mass / H_DENISTY;
particles[i].radius = cbrt((3 * volume) / (4 * M_PI));
gl_colour_data[4 * i + 0] = particles[i].r;
gl_colour_data[4 * i + 1] = particles[i].g;
gl_colour_data[4 * i + 2] = particles[i].b;
gl_colour_data[4 * i + 3] = particles[i].a;
}
cudaMemcpy(particlesBuffer, &particles, particlesSize, cudaMemcpyHostToDevice);
for (int i = 0; i < MAX_PARTICLES; i++)
{
particleMovements[0][i] = particles[i];
}
}
int Initialise()
{
// Initialise GLFW
if (!glfwInit())
{
fprintf(stderr, "Failed to initialize GLFW\n");
getchar();
return -1;
}
glfwWindowHint(GLFW_SAMPLES, 4);
glfwWindowHint(GLFW_RESIZABLE, GL_FALSE);
glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 3);
glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, 3);
glfwWindowHint(GLFW_OPENGL_FORWARD_COMPAT, GL_TRUE);
glfwWindowHint(GLFW_OPENGL_PROFILE, GLFW_OPENGL_CORE_PROFILE);
glEnable(GL_BLEND);
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
// Open a window and create its OpenGL context
window = glfwCreateWindow(1920, 1080, "N-Body Simulation", NULL, NULL);
if (window == NULL) {
fprintf(stderr, "Failed to open GLFW window.\n");
getchar();
glfwTerminate();
return -1;
}
glfwMakeContextCurrent(window);
// Initialize GLEW
glewExperimental = true; // Needed for core profile
if (glewInit() != GLEW_OK) {
fprintf(stderr, "Failed to initialize GLEW\n");
getchar();
glfwTerminate();
return -1;
}
// Ensure we can capture the escape key being pressed below
glfwSetInputMode(window, GLFW_STICKY_KEYS, GL_TRUE);
// Hide the mouse and enable unlimited mouvement
glfwSetInputMode(window, GLFW_CURSOR, GLFW_CURSOR_DISABLED);
// Set the mouse at the center of the screen
glfwPollEvents();
glfwSetCursorPos(window, 1024 / 2, 768 / 2);
//Backgroud colour
glClearColor(0.2f, 0.2f, 0.2f, 0.0f);
glGenVertexArrays(1, &VertexArrayID);
glBindVertexArray(VertexArrayID);
// Create and compile our GLSL program from the shaders
shader.SetProgram();
shader.AddShaderFromFile("../res/shaders/Quad.vert", GLShader::VERTEX);
shader.AddShaderFromFile("../res/shaders/Quad.frag", GLShader::FRAGMENT);
shader.Link();
cam.SetProjection(glm::quarter_pi<float>(), 1920 / 1080, 2.414f, 100000);
cam.SetWindow(window);
cam.SetPosition(glm::vec3(0, 0, 200));
// Vertex shader
CameraRight_worldspace_ID = glGetUniformLocation(shader.GetId(), "CameraRight_worldspace");
CameraUp_worldspace_ID = glGetUniformLocation(shader.GetId(), "CameraUp_worldspace");
ViewProjMatrixID = glGetUniformLocation(shader.GetId(), "VP");
lastTime = glfwGetTime();
tex = Texture("../res/textures/Particle.png");
static const GLfloat g_vertex_buffer_data[] =
{
-0.5f, -0.5f, 0.0f,
0.5f, -0.5f, 0.0f,
-0.5f, 0.5f, 0.0f,
0.5f, 0.5f, 0.0f,
};
glGenBuffers(1, &vertex_buffer);
glBindBuffer(GL_ARRAY_BUFFER, vertex_buffer);
glBufferData(GL_ARRAY_BUFFER, sizeof(g_vertex_buffer_data), g_vertex_buffer_data, GL_STATIC_DRAW);
//The VBO containing the positions and sizes of the particles
glGenBuffers(1, &pos_buffer);
glBindBuffer(GL_ARRAY_BUFFER, pos_buffer);
//Initialize with empty (NULL) buffer : it will be updated later, each frame.
glBufferData(GL_ARRAY_BUFFER, MAX_PARTICLES * 4 * sizeof(GLfloat), NULL, GL_STREAM_DRAW);
//The VBO containing the colors of the particles
glGenBuffers(1, &colour_buffer);
glBindBuffer(GL_ARRAY_BUFFER, colour_buffer);
//Initialize with empty (NULL) buffer : it will be updated later, each frame.
glBufferData(GL_ARRAY_BUFFER, MAX_PARTICLES * 4 * sizeof(GLubyte), NULL, GL_STREAM_DRAW);
glEnable(GL_BLEND);
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
cudaSetDevice(0);
particlesSize = sizeof(Particle)*MAX_PARTICLES;
cudaMalloc((void**)&particlesBuffer, particlesSize);
LoadParticles();
return 0;
}
__global__
void CalculateForces(Particle* particles)
{
float fX = 0.0f; float fY = 0.0f; float fZ = 0.0f;
int i = threadIdx.x + blockIdx.x * blockDim.x;
for (int j = 0; j < MAX_PARTICLES; j++)
{
float dx = particles[j].pos.x - particles[i].pos.x;
float dy = particles[j].pos.y - particles[i].pos.y;
float dz = particles[j].pos.z - particles[i].pos.z;
float distSqr = dx*dx + dy*dy + dz*dz + SOFTENING;
float invDist = 1.0f / sqrtf(distSqr);
float invDist3 = invDist * invDist * invDist;
fX += (particles[i].mass * particles[j].mass) * dx * invDist3;
fY += (particles[i].mass * particles[j].mass) * dy * invDist3;
fZ += (particles[i].mass * particles[j].mass) * dz * invDist3;
}
particles[i].velocity.x += fX;
particles[i].velocity.y += fY;
particles[i].velocity.z += fZ;
}
void SimulateParticles(int currentIndex)
{
int nBlocks = (MAX_PARTICLES + BLOCK_SIZE - 1) / BLOCK_SIZE;
cudaMemcpy(particlesBuffer, &particles, particlesSize, cudaMemcpyHostToDevice);
CalculateForces << <nBlocks, BLOCK_SIZE>> > (particlesBuffer);
cudaDeviceSynchronize();
cudaMemcpy(particles, &particlesBuffer[0], particlesSize, cudaMemcpyDeviceToHost);
for (int i = 0; i < MAX_PARTICLES; i++)
{
Particle& p = particles[i];
p.pos += p.velocity * TIMESTEP;
particleMovements[currentIndex][i] = particles[i];
}
}
void UpdatePosBuffer(int currentIndex)
{
Particle tempParticles[MAX_PARTICLES];
for (int i = 0; i < MAX_PARTICLES; i++)
{
tempParticles[i] = particleMovements[currentIndex][i];
}
bool swap = 1;
for (int i = 1; (i <= MAX_PARTICLES) && swap; i++)
{
swap = 0;
for (int j = 0; j < (MAX_PARTICLES - 1); j++)
{
Particle& p1 = tempParticles[j];
Particle& p2 = tempParticles[j + 1];
if (glm::distance(p2.pos, cam.GetPosition()) > glm::distance(p1.pos, cam.GetPosition()))
{
Particle temp = p1;
p1 = p2;
p2 = temp;
swap = 1;
}
}
}
for (int i = 0; i < MAX_PARTICLES; i++)
{
Particle& p = tempParticles[i];
// Update GPU buffer with new positions.
gl_pos_data[4 * i + 0] = p.pos.x;
gl_pos_data[4 * i + 1] = p.pos.y;
gl_pos_data[4 * i + 2] = p.pos.z;
gl_pos_data[4 * i + 3] = p.radius;
}
}
void Update(double deltaTime)
{
//make targe once I set bounds
float ratio_width = glm::quarter_pi<float>() / static_cast<float>(1920);
float ratio_height = glm::quarter_pi<float>() / static_cast<float>(1080);
double xpos, ypos;
glfwGetCursorPos(window, &xpos, &ypos);
glfwSetCursorPos(window, 1920.0 / 2, 1080.0 / 2);
// Calculate delta of cursor positions from last frame
double delta_x = xpos - 1920.0 / 2;
double delta_y = ypos - 1080.0 / 2;
// Multiply deltas by ratios - gets actual change in orientation
delta_x *= ratio_width;
delta_y *= ratio_height;
cam.Rotate(static_cast<float>(delta_x), static_cast<float>(-delta_y)); // flipped y to revert the invert.
cam.Update(deltaTime);
}
void Render()
{
// Update the OpenGL buffers with updated particle positions.
glBindBuffer(GL_ARRAY_BUFFER, pos_buffer);
glBufferData(GL_ARRAY_BUFFER, MAX_PARTICLES * 4 * sizeof(GLfloat), NULL, GL_STREAM_DRAW);
glBufferSubData(GL_ARRAY_BUFFER, 0, MAX_PARTICLES * sizeof(GLfloat) * 4, gl_pos_data);
glBindBuffer(GL_ARRAY_BUFFER, colour_buffer);
glBufferData(GL_ARRAY_BUFFER, MAX_PARTICLES * 4 * sizeof(GLubyte), NULL, GL_STREAM_DRAW);
glBufferSubData(GL_ARRAY_BUFFER, 0, MAX_PARTICLES * sizeof(GLubyte) * 4, gl_colour_data);
// Clear the screen
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
// glClearColor(1, 1, 1, 1);
glm::mat4 ProjectionMatrix = cam.GetProjection();
glm::mat4 ViewMatrix = cam.GetView();
glm::mat4 ViewProjectionMatrix = ProjectionMatrix * ViewMatrix;
// Use our shader
shader.Use();
glUniform3f(CameraRight_worldspace_ID, ViewMatrix[0][0], ViewMatrix[1][0], ViewMatrix[2][0]);
glUniform3f(CameraUp_worldspace_ID, ViewMatrix[0][1], ViewMatrix[1][1], ViewMatrix[2][1]);
glUniformMatrix4fv(ViewProjMatrixID, 1, GL_FALSE, &ViewProjectionMatrix[0][0]);
glActiveTexture(GL_TEXTURE1);
glBindTexture(GL_TEXTURE_2D, tex.id);
glUniform1i(glGetUniformLocation(shader.GetId(), "tex"), 1);
glEnableVertexAttribArray(0);
glBindBuffer(GL_ARRAY_BUFFER, vertex_buffer);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 0, (void*)0);
// 2nd attribute buffer : positions of particles' centers
glEnableVertexAttribArray(1);
glBindBuffer(GL_ARRAY_BUFFER, pos_buffer);
glVertexAttribPointer(1, 4, GL_FLOAT, GL_FALSE, 0, (void*)0);
// 3rd attribute buffer : particles' colors
glEnableVertexAttribArray(2);
glBindBuffer(GL_ARRAY_BUFFER, colour_buffer);
glVertexAttribPointer(2, 4, GL_UNSIGNED_BYTE, GL_TRUE, 0, (void*)0);
glVertexAttribDivisor(0, 0);
glVertexAttribDivisor(1, 1);
glVertexAttribDivisor(2, 1);
glDrawArraysInstanced(GL_TRIANGLE_STRIP, 0, 4, MAX_PARTICLES);
glDisableVertexAttribArray(0);
glDisableVertexAttribArray(1);
glDisableVertexAttribArray(2);
// Swap buffers
glfwSwapBuffers(window);
glfwPollEvents();
}
int main(void)
{
if (Initialise() == -1)
return -1;
std::ofstream data((std::to_string(BLOCK_SIZE) + "B_" + std::to_string(MAX_PARTICLES) + "P_" + std::to_string(NUM_SIMULATIONS) + "S_" + std::to_string(NUM_TESTS) + "T.csv").c_str(), std::ofstream::out);
for (int n = 0; n < NUM_TESTS; n++)
{
clock_t t;
t = clock();
for (int i = 0; i < NUM_SIMULATIONS; i++)
{
SimulateParticles(i);
}
clock_t end = clock();
float elapsedTime = float(end - t) / CLOCKS_PER_SEC;
data << elapsedTime << std::endl;
LoadParticles();
}
data.close();
int i = 0;
//While still running and esc hasnt been pressed
while (glfwGetKey(window, GLFW_KEY_ESCAPE) != GLFW_PRESS && glfwWindowShouldClose(window) == 0)
{
UpdatePosBuffer(i);
double currentTime = glfwGetTime();
double delta = currentTime - lastTime;
Update(delta);
Render();
lastTime = currentTime;
i++;
if (i > NUM_SIMULATIONS)
i = 0;
}
delete[] gl_pos_data;
//Cleanup VBO and shader
glDeleteBuffers(1, &colour_buffer);
glDeleteBuffers(1, &pos_buffer);
glDeleteBuffers(1, &vertex_buffer);
glDeleteProgram(shader.GetId());
glDeleteVertexArrays(1, &VertexArrayID);
//Close OpenGL window and terminate GLFW
glfwTerminate();
return 0;
} |
e55eefe28ad3135fc165c0509a9524982d072aa0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
#include "cdpSimpleQuicksort.h"
#include <iostream>
#include <cstdio>
#define MAX_DEPTH 16
#define INSERTION_SORT 32
//#define INSERTION_SORT 100
////////////////////////////////////////////////////////////////////////////////
// Selection sort used when depth gets too big or the number of elements drops
// below a threshold.
////////////////////////////////////////////////////////////////////////////////
__device__ void selection_sort(int *data, int * index, int left, int right)
{
for (int i = left ; i <= right ; ++i)
{
int min_val = data[i];
int min_idx = i;
//store the index of smallest value
int indexMinValue = index[i];
// Find the smallest value in the range [left, right].
for (int j = i+1 ; j <= right ; ++j)
{
int val_j = data[j];
if (val_j < min_val)
{
min_idx = j;
min_val = val_j;
indexMinValue = index[j];
}
}
// Swap the values.
if (i != min_idx)
{
data[min_idx] = data[i];
data[i] = min_val;
//swap index
index[min_idx] = index[i];
index[i] = indexMinValue;
}
}
}
////////////////////////////////////////////////////////////////////////////////
// Very basic quicksort algorithm, recursively launching the next level.
////////////////////////////////////////////////////////////////////////////////
__global__ void cdp_simple_quicksort(int *data, int * index, int left, int right, int depth)
{
// If we're too deep or there are few elements left, we use an insertion sort...
if (depth >= MAX_DEPTH || right-left <= INSERTION_SORT)
{
//selection_sort(data, left, right);
selection_sort(data, index, left, right);
return;
}
int *lptr = data+left;
int *rptr = data+right;
int pivot = data[(left+right)/2];
//for index change
int *indexLptr = index + left;
int *indexRptr = index + right;
// Do the partitioning.
while (lptr <= rptr)
{
// Find the next left- and right-hand values to swap
int lval = *lptr;
int rval = *rptr;
// Find the next indexLeft- and indexRight-hand values to swap
int indexLval = *indexLptr;
int indexRval = *indexRptr;
// Move the left pointer as long as the pointed element is smaller than the pivot.
while (lval < pivot)
{
lptr++;
lval = *lptr;
//index pointer should go forward as well
indexLptr++;
indexLval = *indexLptr;
}
// Move the right pointer as long as the pointed element is larger than the pivot.
while (rval > pivot)
{
rptr--;
rval = *rptr;
//index pointer should go backward as well
indexRptr--;
indexRval = *indexRptr;
}
// If the swap points are valid, do the swap!
if (lptr <= rptr)
{
*lptr++ = rval;
*rptr-- = lval;
*indexLptr++ = indexRval;
*indexRptr-- = indexLval;
}
}
// Now the recursive part
int nright = rptr - data;
int nleft = lptr - data;
// Launch a new block to sort the left part.
if (left < (rptr-data))
{
hipStream_t s;
hipStreamCreateWithFlags(&s, hipStreamNonBlocking);
//cdp_simple_quicksort<<< 1, 1, 0, s >>>(data, left, nright, depth+1);
hipLaunchKernelGGL(( cdp_simple_quicksort), dim3(1), dim3(1), 0, s , data, index, left, nright, depth+1);
hipStreamDestroy(s);
}
// Launch a new block to sort the right part.
if ((lptr-data) < right)
{
hipStream_t s1;
hipStreamCreateWithFlags(&s1, hipStreamNonBlocking);
//cdp_simple_quicksort<<< 1, 1, 0, s1 >>>(data, nleft, right, depth+1);
hipLaunchKernelGGL(( cdp_simple_quicksort), dim3(1), dim3(1), 0, s1 , data, index, nleft, right, depth+1);
hipStreamDestroy(s1);
}
}
////////////////////////////////////////////////////////////////////////////////
// Call the quicksort kernel from the host.
////////////////////////////////////////////////////////////////////////////////
void run_qsort(int *data, int * index, int nitems)
{
// Prepare CDP for the max depth 'MAX_DEPTH'.
hipDeviceSetLimit(hipLimitDevRuntimeSyncDepth, MAX_DEPTH);
// Launch on device
int left = 0;
int right = nitems-1;
//cdp_simple_quicksort<<< 1, 1 >>>(data, left, right, 0);
hipLaunchKernelGGL(( cdp_simple_quicksort), dim3(1), dim3(1) , 0, 0, data, index, left, right, 0);
hipDeviceSynchronize();
}
| e55eefe28ad3135fc165c0509a9524982d072aa0.cu | /*
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
#include "cdpSimpleQuicksort.h"
#include <iostream>
#include <cstdio>
#define MAX_DEPTH 16
#define INSERTION_SORT 32
//#define INSERTION_SORT 100
////////////////////////////////////////////////////////////////////////////////
// Selection sort used when depth gets too big or the number of elements drops
// below a threshold.
////////////////////////////////////////////////////////////////////////////////
__device__ void selection_sort(int *data, int * index, int left, int right)
{
for (int i = left ; i <= right ; ++i)
{
int min_val = data[i];
int min_idx = i;
//store the index of smallest value
int indexMinValue = index[i];
// Find the smallest value in the range [left, right].
for (int j = i+1 ; j <= right ; ++j)
{
int val_j = data[j];
if (val_j < min_val)
{
min_idx = j;
min_val = val_j;
indexMinValue = index[j];
}
}
// Swap the values.
if (i != min_idx)
{
data[min_idx] = data[i];
data[i] = min_val;
//swap index
index[min_idx] = index[i];
index[i] = indexMinValue;
}
}
}
////////////////////////////////////////////////////////////////////////////////
// Very basic quicksort algorithm, recursively launching the next level.
////////////////////////////////////////////////////////////////////////////////
__global__ void cdp_simple_quicksort(int *data, int * index, int left, int right, int depth)
{
// If we're too deep or there are few elements left, we use an insertion sort...
if (depth >= MAX_DEPTH || right-left <= INSERTION_SORT)
{
//selection_sort(data, left, right);
selection_sort(data, index, left, right);
return;
}
int *lptr = data+left;
int *rptr = data+right;
int pivot = data[(left+right)/2];
//for index change
int *indexLptr = index + left;
int *indexRptr = index + right;
// Do the partitioning.
while (lptr <= rptr)
{
// Find the next left- and right-hand values to swap
int lval = *lptr;
int rval = *rptr;
// Find the next indexLeft- and indexRight-hand values to swap
int indexLval = *indexLptr;
int indexRval = *indexRptr;
// Move the left pointer as long as the pointed element is smaller than the pivot.
while (lval < pivot)
{
lptr++;
lval = *lptr;
//index pointer should go forward as well
indexLptr++;
indexLval = *indexLptr;
}
// Move the right pointer as long as the pointed element is larger than the pivot.
while (rval > pivot)
{
rptr--;
rval = *rptr;
//index pointer should go backward as well
indexRptr--;
indexRval = *indexRptr;
}
// If the swap points are valid, do the swap!
if (lptr <= rptr)
{
*lptr++ = rval;
*rptr-- = lval;
*indexLptr++ = indexRval;
*indexRptr-- = indexLval;
}
}
// Now the recursive part
int nright = rptr - data;
int nleft = lptr - data;
// Launch a new block to sort the left part.
if (left < (rptr-data))
{
cudaStream_t s;
cudaStreamCreateWithFlags(&s, cudaStreamNonBlocking);
//cdp_simple_quicksort<<< 1, 1, 0, s >>>(data, left, nright, depth+1);
cdp_simple_quicksort<<< 1, 1, 0, s >>>(data, index, left, nright, depth+1);
cudaStreamDestroy(s);
}
// Launch a new block to sort the right part.
if ((lptr-data) < right)
{
cudaStream_t s1;
cudaStreamCreateWithFlags(&s1, cudaStreamNonBlocking);
//cdp_simple_quicksort<<< 1, 1, 0, s1 >>>(data, nleft, right, depth+1);
cdp_simple_quicksort<<< 1, 1, 0, s1 >>>(data, index, nleft, right, depth+1);
cudaStreamDestroy(s1);
}
}
////////////////////////////////////////////////////////////////////////////////
// Call the quicksort kernel from the host.
////////////////////////////////////////////////////////////////////////////////
void run_qsort(int *data, int * index, int nitems)
{
// Prepare CDP for the max depth 'MAX_DEPTH'.
cudaDeviceSetLimit(cudaLimitDevRuntimeSyncDepth, MAX_DEPTH);
// Launch on device
int left = 0;
int right = nitems-1;
//cdp_simple_quicksort<<< 1, 1 >>>(data, left, right, 0);
cdp_simple_quicksort<<< 1, 1 >>>(data, index, left, right, 0);
cudaDeviceSynchronize();
}
|
c27425a3c4be19e41a8fea6da86028277dc2b890.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifdef _TIMER_
#include "hip/hip_runtime_api.h"
#endif
#include "stdio.h"
#define FORMA_MAX(a,b) ( (a) > (b) ? (a) : (b) )
#define max(a,b) FORMA_MAX(a,b)
#define FORMA_MIN(a,b) ( (a) < (b) ? (a) : (b) )
#define min(a,b) FORMA_MIN(a,b)
#define FORMA_CEIL(a,b) ( (a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1 )
#define mod(x,y) ( (x) & (y-1))
#ifndef FORMA_MAX_BLOCKDIM_0
#define FORMA_MAX_BLOCKDIM_0 1024
#endif
#ifndef FORMA_MAX_BLOCKDIM_1
#define FORMA_MAX_BLOCKDIM_1 1024
#endif
#ifndef FORMA_MAX_BLOCKDIM_2
#define FORMA_MAX_BLOCKDIM_2 1024
#endif
template<typename T>
__global__ void __kernel_init__(T* input, T value)
{
int loc = (int)(blockIdx.x)*(int)(blockDim.x)+(int)(threadIdx.x);
input[loc] = value;
}
template<typename T>
void initialize_array(T* d_input, int size, T value)
{
dim3 init_grid(FORMA_CEIL(size,FORMA_MAX_BLOCKDIM_0));
dim3 init_block(FORMA_MAX_BLOCKDIM_0);
hipLaunchKernelGGL(( __kernel_init__), dim3(init_grid),dim3(init_block), 0, 0, d_input,value);
}
void Check_CUDA_Error(const char* message);
/*Texture references */
/*Shared Memory Variable */
extern __shared__ char __FORMA_SHARED_MEM__[];
/* Device code Begin */
__global__ void __kernel___forma_kernel__0__(float * __restrict__ input, int N, int M, int FORMA_BLOCKDIM_X, int FORMA_BLOCKDIM_Y, float * __restrict__ __var_1__){
int __FORMA_SHARED_MEM_OFFSET__ = 0;
float * __tilevar_0__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*((FORMA_BLOCKDIM_Y+16)*FORMA_BLOCKDIM_X);
float * __tilevar_1__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*((FORMA_BLOCKDIM_Y+16)*FORMA_BLOCKDIM_X);
int rowy = FORMA_BLOCKDIM_Y+16;
//int threadIdx_y = mod((int)threadIdx.y,2);
int __iter_0__ = (int)(blockIdx.x)*(int)(FORMA_BLOCKDIM_X-4);
for (int __iter_1__ = 0; __iter_1__ <= N-1; __iter_1__ += FORMA_BLOCKDIM_Y) {
int __iter_2__ = FORMA_MAX(__iter_1__,0) + (int)(threadIdx.y) ;
int __iter_3__ = FORMA_MAX(__iter_0__,0) + (int)(threadIdx.x) ;
if(__iter_2__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-1),(N-1)) & __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-1),(M-1))){
__tilevar_0__[__iter_3__-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_2__,rowy)] = input[__iter_3__+M*__iter_2__];
}
__syncthreads();
int __iter_4__ = FORMA_MAX((__iter_1__-1),1) + (int)(threadIdx.y) ;
if( __iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-2),(N-2)) ){
int __iter_5__ = FORMA_MAX((__iter_0__+1),1) + (int)(threadIdx.x) ;
if( __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-2),(M-2)) ){
float __temp_2__ = (__tilevar_0__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*mod((__iter_4__-1),rowy)]);
float __temp_5__ = (__tilevar_0__[__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_4__,rowy)]);
float __temp_6__ = (5 * __temp_2__ + 12 * __temp_5__);
float __temp_9__ = (__tilevar_0__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_4__,rowy)]);
float __temp_10__ = (__temp_6__ + 15 * __temp_9__);
float __temp_13__ = (__tilevar_0__[__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_4__,rowy)]);
float __temp_14__ = (__temp_10__ + 12 * __temp_13__);
float __temp_17__ = (__tilevar_0__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*mod((__iter_4__+1),rowy)]);
float __temp_18__ = (__temp_14__ + 5 * __temp_17__);
float __temp_19__ = (__temp_18__ / 118);
__tilevar_1__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_4__,rowy)] = __temp_19__;
}
}
__syncthreads();
int __iter_10__ = FORMA_MAX((__iter_1__-2),1) + (int)(threadIdx.y) ;
if( __iter_10__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-3),(N-2)) ){
int __iter_11__ = FORMA_MAX((__iter_0__+2),1) + (int)(threadIdx.x) ;
if( __iter_11__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(M-2)) ){
float __temp_32__ = (__tilevar_1__[__iter_11__-__iter_0__+FORMA_BLOCKDIM_X*mod((__iter_10__-1),rowy)]);
float __temp_35__ = (__tilevar_1__[__iter_11__+(-1)-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_10__,rowy)]);
float __temp_36__ = (5 * __temp_32__ + 12 * __temp_35__);
float __temp_39__ = (__tilevar_1__[__iter_11__-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_10__,rowy)]);
float __temp_40__ = (__temp_36__ + 15 * __temp_39__);
float __temp_43__ = (__tilevar_1__[__iter_11__+1-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_10__,rowy)]);
float __temp_44__ = (__temp_40__ + 12 * __temp_43__);
float __temp_47__ = (__tilevar_1__[__iter_11__-__iter_0__+FORMA_BLOCKDIM_X*mod((__iter_10__+1),rowy)]);
float __temp_48__ = (__temp_44__ + 5 * __temp_47__);
float __temp_49__ = (__temp_48__ / 118);
__var_1__[__iter_11__+(M)*(__iter_10__)] = __temp_49__;
}
}
}
}
int __blockSizeToSMemSize___kernel___forma_kernel__0__(dim3 blockDim){
int FORMA_BLOCKDIM_Y = (int)(blockDim.y);
int FORMA_BLOCKDIM_X = (int)(blockDim.x);
int SMemSize = 0;
SMemSize += sizeof(float)*(2*(FORMA_BLOCKDIM_Y+16)*FORMA_BLOCKDIM_X);
return SMemSize;
}
/*Device code End */
/* Host Code Begin */
extern "C" void jacobi(float * h_input, int N, int M, float * __var_0__){
/* Host allocation Begin */
float * input;
hipMalloc(&input,sizeof(float)*((N)*(M)));
Check_CUDA_Error("Allocation Error!! : input\n");
hipPointerAttribute_t ptrAttrib_h_input;
hipMemcpyKind memcpy_kind_h_input = hipMemcpyHostToDevice;
if (hipPointerGetAttributes(&ptrAttrib_h_input, h_input) == hipSuccess)
if (ptrAttrib_h_input.memoryType == hipMemoryTypeDevice)
memcpy_kind_h_input = hipMemcpyDeviceToDevice;
hipGetLastError();
if( memcpy_kind_h_input != hipMemcpyDeviceToDevice ){
hipMemcpy(input,h_input,sizeof(float)*((N)*(M)), memcpy_kind_h_input);
}
float * __var_1__;
hipMalloc(&__var_1__,sizeof(float)*((N)*(M)));
Check_CUDA_Error("Allocation Error!! : __var_1__\n");
float * __var_2__;
hipMalloc(&__var_2__,sizeof(float)*((N)*(M)));
Check_CUDA_Error("Allocation Error!! : __var_2__\n");
/*Host Allocation End */
/* Kernel Launch Begin */
int __FORMA_MAX_SHARED_MEM__;
hipDeviceGetAttribute(&__FORMA_MAX_SHARED_MEM__,hipDeviceAttributeMaxSharedMemoryPerBlock,0);
#ifdef _TIMER_
hipEvent_t _forma_timer_start_,_forma_timer_stop_;
hipEventCreate(&_forma_timer_start_);
hipEventCreate(&_forma_timer_stop_);
hipEventRecord(_forma_timer_start_,0);
#endif
int __size_0___kernel___forma_kernel__0__ = ((M-1) - 0 ) + 1;
int __block_0___kernel___forma_kernel__0__ = 32;
int __block_1___kernel___forma_kernel__0__ = 16;
dim3 __blockConfig___kernel___forma_kernel__0__(__block_0___kernel___forma_kernel__0__,__block_1___kernel___forma_kernel__0__);
int __SMemSize___kernel___forma_kernel__0__ = 0;
__SMemSize___kernel___forma_kernel__0__ = __blockSizeToSMemSize___kernel___forma_kernel__0__(__blockConfig___kernel___forma_kernel__0__);
int __grid_0___kernel___forma_kernel__0__ = FORMA_CEIL(__size_0___kernel___forma_kernel__0__,__blockConfig___kernel___forma_kernel__0__.x-4);
int __grid_1___kernel___forma_kernel__0__ = 1;
dim3 __gridConfig___kernel___forma_kernel__0__(__grid_0___kernel___forma_kernel__0__,__grid_1___kernel___forma_kernel__0__);
hipLaunchKernelGGL(( __kernel___forma_kernel__0__), dim3(__gridConfig___kernel___forma_kernel__0__), dim3(__blockConfig___kernel___forma_kernel__0__), __SMemSize___kernel___forma_kernel__0__, 0, input, N, M, __blockConfig___kernel___forma_kernel__0__.x, __blockConfig___kernel___forma_kernel__0__.y, __var_2__);
Check_CUDA_Error("Kernel Launch Error!! : __kernel___forma_kernel__0__\n");
hipLaunchKernelGGL(( __kernel___forma_kernel__0__), dim3(__gridConfig___kernel___forma_kernel__0__), dim3(__blockConfig___kernel___forma_kernel__0__), __SMemSize___kernel___forma_kernel__0__, 0, __var_2__, N, M, __blockConfig___kernel___forma_kernel__0__.x, __blockConfig___kernel___forma_kernel__0__.y, __var_1__);
Check_CUDA_Error("Kernel Launch Error!! : __kernel___forma_kernel__0__\n");
hipPointerAttribute_t ptrAttrib___var_0__;
hipMemcpyKind memcpy_kind___var_0__ = hipMemcpyDeviceToHost;
if (hipPointerGetAttributes(&ptrAttrib___var_0__, __var_0__) == hipSuccess)
if (ptrAttrib___var_0__.memoryType == hipMemoryTypeDevice)
memcpy_kind___var_0__ = hipMemcpyDeviceToDevice;
hipGetLastError();
hipMemcpy(__var_0__,__var_1__, sizeof(float)*((N)*(M)), memcpy_kind___var_0__);
#ifdef _TIMER_
hipEventRecord(_forma_timer_stop_,0);
hipEventSynchronize(_forma_timer_stop_);
float elapsedTime;
hipEventElapsedTime(&elapsedTime,_forma_timer_start_,_forma_timer_stop_);
printf("[FORMA] Computation Time(ms) : %lf\n",elapsedTime);
hipEventDestroy(_forma_timer_start_);
hipEventDestroy(_forma_timer_stop_);
#endif
/*Kernel Launch End */
/* Host Free Begin */
hipFree(input);
hipFree(__var_1__);
hipFree(__var_2__);
}
/*Host Free End*/
| c27425a3c4be19e41a8fea6da86028277dc2b890.cu | #include "cuda.h"
#ifdef _TIMER_
#include "cuda_profiler_api.h"
#endif
#include "stdio.h"
#define FORMA_MAX(a,b) ( (a) > (b) ? (a) : (b) )
#define max(a,b) FORMA_MAX(a,b)
#define FORMA_MIN(a,b) ( (a) < (b) ? (a) : (b) )
#define min(a,b) FORMA_MIN(a,b)
#define FORMA_CEIL(a,b) ( (a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1 )
#define mod(x,y) ( (x) & (y-1))
#ifndef FORMA_MAX_BLOCKDIM_0
#define FORMA_MAX_BLOCKDIM_0 1024
#endif
#ifndef FORMA_MAX_BLOCKDIM_1
#define FORMA_MAX_BLOCKDIM_1 1024
#endif
#ifndef FORMA_MAX_BLOCKDIM_2
#define FORMA_MAX_BLOCKDIM_2 1024
#endif
template<typename T>
__global__ void __kernel_init__(T* input, T value)
{
int loc = (int)(blockIdx.x)*(int)(blockDim.x)+(int)(threadIdx.x);
input[loc] = value;
}
template<typename T>
void initialize_array(T* d_input, int size, T value)
{
dim3 init_grid(FORMA_CEIL(size,FORMA_MAX_BLOCKDIM_0));
dim3 init_block(FORMA_MAX_BLOCKDIM_0);
__kernel_init__<<<init_grid,init_block>>>(d_input,value);
}
void Check_CUDA_Error(const char* message);
/*Texture references */
/*Shared Memory Variable */
extern __shared__ char __FORMA_SHARED_MEM__[];
/* Device code Begin */
__global__ void __kernel___forma_kernel__0__(float * __restrict__ input, int N, int M, int FORMA_BLOCKDIM_X, int FORMA_BLOCKDIM_Y, float * __restrict__ __var_1__){
int __FORMA_SHARED_MEM_OFFSET__ = 0;
float * __tilevar_0__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*((FORMA_BLOCKDIM_Y+16)*FORMA_BLOCKDIM_X);
float * __tilevar_1__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*((FORMA_BLOCKDIM_Y+16)*FORMA_BLOCKDIM_X);
int rowy = FORMA_BLOCKDIM_Y+16;
//int threadIdx_y = mod((int)threadIdx.y,2);
int __iter_0__ = (int)(blockIdx.x)*(int)(FORMA_BLOCKDIM_X-4);
for (int __iter_1__ = 0; __iter_1__ <= N-1; __iter_1__ += FORMA_BLOCKDIM_Y) {
int __iter_2__ = FORMA_MAX(__iter_1__,0) + (int)(threadIdx.y) ;
int __iter_3__ = FORMA_MAX(__iter_0__,0) + (int)(threadIdx.x) ;
if(__iter_2__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-1),(N-1)) & __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-1),(M-1))){
__tilevar_0__[__iter_3__-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_2__,rowy)] = input[__iter_3__+M*__iter_2__];
}
__syncthreads();
int __iter_4__ = FORMA_MAX((__iter_1__-1),1) + (int)(threadIdx.y) ;
if( __iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-2),(N-2)) ){
int __iter_5__ = FORMA_MAX((__iter_0__+1),1) + (int)(threadIdx.x) ;
if( __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-2),(M-2)) ){
float __temp_2__ = (__tilevar_0__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*mod((__iter_4__-1),rowy)]);
float __temp_5__ = (__tilevar_0__[__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_4__,rowy)]);
float __temp_6__ = (5 * __temp_2__ + 12 * __temp_5__);
float __temp_9__ = (__tilevar_0__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_4__,rowy)]);
float __temp_10__ = (__temp_6__ + 15 * __temp_9__);
float __temp_13__ = (__tilevar_0__[__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_4__,rowy)]);
float __temp_14__ = (__temp_10__ + 12 * __temp_13__);
float __temp_17__ = (__tilevar_0__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*mod((__iter_4__+1),rowy)]);
float __temp_18__ = (__temp_14__ + 5 * __temp_17__);
float __temp_19__ = (__temp_18__ / 118);
__tilevar_1__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_4__,rowy)] = __temp_19__;
}
}
__syncthreads();
int __iter_10__ = FORMA_MAX((__iter_1__-2),1) + (int)(threadIdx.y) ;
if( __iter_10__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-3),(N-2)) ){
int __iter_11__ = FORMA_MAX((__iter_0__+2),1) + (int)(threadIdx.x) ;
if( __iter_11__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(M-2)) ){
float __temp_32__ = (__tilevar_1__[__iter_11__-__iter_0__+FORMA_BLOCKDIM_X*mod((__iter_10__-1),rowy)]);
float __temp_35__ = (__tilevar_1__[__iter_11__+(-1)-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_10__,rowy)]);
float __temp_36__ = (5 * __temp_32__ + 12 * __temp_35__);
float __temp_39__ = (__tilevar_1__[__iter_11__-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_10__,rowy)]);
float __temp_40__ = (__temp_36__ + 15 * __temp_39__);
float __temp_43__ = (__tilevar_1__[__iter_11__+1-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_10__,rowy)]);
float __temp_44__ = (__temp_40__ + 12 * __temp_43__);
float __temp_47__ = (__tilevar_1__[__iter_11__-__iter_0__+FORMA_BLOCKDIM_X*mod((__iter_10__+1),rowy)]);
float __temp_48__ = (__temp_44__ + 5 * __temp_47__);
float __temp_49__ = (__temp_48__ / 118);
__var_1__[__iter_11__+(M)*(__iter_10__)] = __temp_49__;
}
}
}
}
int __blockSizeToSMemSize___kernel___forma_kernel__0__(dim3 blockDim){
int FORMA_BLOCKDIM_Y = (int)(blockDim.y);
int FORMA_BLOCKDIM_X = (int)(blockDim.x);
int SMemSize = 0;
SMemSize += sizeof(float)*(2*(FORMA_BLOCKDIM_Y+16)*FORMA_BLOCKDIM_X);
return SMemSize;
}
/*Device code End */
/* Host Code Begin */
extern "C" void jacobi(float * h_input, int N, int M, float * __var_0__){
/* Host allocation Begin */
float * input;
cudaMalloc(&input,sizeof(float)*((N)*(M)));
Check_CUDA_Error("Allocation Error!! : input\n");
cudaPointerAttributes ptrAttrib_h_input;
cudaMemcpyKind memcpy_kind_h_input = cudaMemcpyHostToDevice;
if (cudaPointerGetAttributes(&ptrAttrib_h_input, h_input) == cudaSuccess)
if (ptrAttrib_h_input.memoryType == cudaMemoryTypeDevice)
memcpy_kind_h_input = cudaMemcpyDeviceToDevice;
cudaGetLastError();
if( memcpy_kind_h_input != cudaMemcpyDeviceToDevice ){
cudaMemcpy(input,h_input,sizeof(float)*((N)*(M)), memcpy_kind_h_input);
}
float * __var_1__;
cudaMalloc(&__var_1__,sizeof(float)*((N)*(M)));
Check_CUDA_Error("Allocation Error!! : __var_1__\n");
float * __var_2__;
cudaMalloc(&__var_2__,sizeof(float)*((N)*(M)));
Check_CUDA_Error("Allocation Error!! : __var_2__\n");
/*Host Allocation End */
/* Kernel Launch Begin */
int __FORMA_MAX_SHARED_MEM__;
cudaDeviceGetAttribute(&__FORMA_MAX_SHARED_MEM__,cudaDevAttrMaxSharedMemoryPerBlock,0);
#ifdef _TIMER_
cudaEvent_t _forma_timer_start_,_forma_timer_stop_;
cudaEventCreate(&_forma_timer_start_);
cudaEventCreate(&_forma_timer_stop_);
cudaEventRecord(_forma_timer_start_,0);
#endif
int __size_0___kernel___forma_kernel__0__ = ((M-1) - 0 ) + 1;
int __block_0___kernel___forma_kernel__0__ = 32;
int __block_1___kernel___forma_kernel__0__ = 16;
dim3 __blockConfig___kernel___forma_kernel__0__(__block_0___kernel___forma_kernel__0__,__block_1___kernel___forma_kernel__0__);
int __SMemSize___kernel___forma_kernel__0__ = 0;
__SMemSize___kernel___forma_kernel__0__ = __blockSizeToSMemSize___kernel___forma_kernel__0__(__blockConfig___kernel___forma_kernel__0__);
int __grid_0___kernel___forma_kernel__0__ = FORMA_CEIL(__size_0___kernel___forma_kernel__0__,__blockConfig___kernel___forma_kernel__0__.x-4);
int __grid_1___kernel___forma_kernel__0__ = 1;
dim3 __gridConfig___kernel___forma_kernel__0__(__grid_0___kernel___forma_kernel__0__,__grid_1___kernel___forma_kernel__0__);
__kernel___forma_kernel__0__<<<__gridConfig___kernel___forma_kernel__0__, __blockConfig___kernel___forma_kernel__0__, __SMemSize___kernel___forma_kernel__0__>>> (input, N, M, __blockConfig___kernel___forma_kernel__0__.x, __blockConfig___kernel___forma_kernel__0__.y, __var_2__);
Check_CUDA_Error("Kernel Launch Error!! : __kernel___forma_kernel__0__\n");
__kernel___forma_kernel__0__<<<__gridConfig___kernel___forma_kernel__0__, __blockConfig___kernel___forma_kernel__0__, __SMemSize___kernel___forma_kernel__0__>>> (__var_2__, N, M, __blockConfig___kernel___forma_kernel__0__.x, __blockConfig___kernel___forma_kernel__0__.y, __var_1__);
Check_CUDA_Error("Kernel Launch Error!! : __kernel___forma_kernel__0__\n");
cudaPointerAttributes ptrAttrib___var_0__;
cudaMemcpyKind memcpy_kind___var_0__ = cudaMemcpyDeviceToHost;
if (cudaPointerGetAttributes(&ptrAttrib___var_0__, __var_0__) == cudaSuccess)
if (ptrAttrib___var_0__.memoryType == cudaMemoryTypeDevice)
memcpy_kind___var_0__ = cudaMemcpyDeviceToDevice;
cudaGetLastError();
cudaMemcpy(__var_0__,__var_1__, sizeof(float)*((N)*(M)), memcpy_kind___var_0__);
#ifdef _TIMER_
cudaEventRecord(_forma_timer_stop_,0);
cudaEventSynchronize(_forma_timer_stop_);
float elapsedTime;
cudaEventElapsedTime(&elapsedTime,_forma_timer_start_,_forma_timer_stop_);
printf("[FORMA] Computation Time(ms) : %lf\n",elapsedTime);
cudaEventDestroy(_forma_timer_start_);
cudaEventDestroy(_forma_timer_stop_);
#endif
/*Kernel Launch End */
/* Host Free Begin */
cudaFree(input);
cudaFree(__var_1__);
cudaFree(__var_2__);
}
/*Host Free End*/
|
ed411fe941533d2b9d017246ef09ba833ea8a93d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* ___ _ _ ___ _ __ __ _ ___
* / __| | | | \ /_\ | \/ | /_\ | _ \
* | (__| |_| | |) / _ \ | |\/| |/ _ \| _/
* \___|\___/|___/_/_\_\_|_|__|_/_/_\_\_|_ ___
* / __| | | | _ \ __| _ \___| _ \ __/ __|
* \__ \ |_| | _/ _|| /___| / _|\__ \
* |___/\___/|_| |___|_|_\ |_|_\___|___/
* 2012
*
* by Jens Wetzl ([email protected])
* and Oliver Taubmann ([email protected])
*
* This work is licensed under a Creative Commons
* Attribution 3.0 Unported License. (CC-BY)
* http://creativecommons.org/licenses/by/3.0/
*
**/
#include "SRImage.h"
#include "SRSystemMatrix.h"
#include "ImageIO.h"
#include "cudalbfgs_error_checking.h"
#include <CudaLBFGS/timer.h>
namespace gpu_SRImage
{
__global__ void computeCSRColSums(float *d_colSums, const float *d_systemMatrixVals,
const int *d_systemMatrixRows, const int *d_systemMatrixCols,
const size_t m, const size_t n);
__global__ void elementwiseDiv(float *a, const float *b, const size_t len);
__global__ void divideByCSCColSums(const float *values, const int *colPointers,
float *pixels, const size_t n);
}
SRImage::SRImage(const size_t height, const size_t width)
: m_height(height)
, m_width(width)
, m_numPixels(height * width)
{
CudaSafeCall( hipMalloc((void**) &m_d_pixels, m_numPixels * sizeof(float)) );
}
SRImage::~SRImage()
{
}
void SRImage::setZero()
{
CudaSafeCall( hipMemset(m_d_pixels, 0, m_numPixels * sizeof(float)) );
}
void SRImage::initToAverageImage(const LRImageStack &lrImages, const SRSystemMatrix &systemMatrix,
const GPUHandles &gpuHandles)
{
#ifdef SUPERRES_TIMING
timer averageTimer("averageImage");
timer colSums("avgColSums");
averageTimer.start();
#endif
const size_t m = systemMatrix.getHeight();
const size_t n = systemMatrix.getWidth();
// Compute highresImage = systemMatrix^T lowresVector
#ifndef SUPERRES_STORE_TRANSPOSE
// Use CRS for transpose-multiply (inefficient)
CusparseSafeCall( hipsparseScsrmv(gpuHandles.cusparseHandle, HIPSPARSE_OPERATION_TRANSPOSE, m, n, 1.0f,
gpuHandles.cusparseDescriptor, systemMatrix.getValues(),
systemMatrix.getRowPointers(), systemMatrix.getColIndices(),
lrImages.getPixels(), 0.0f, m_d_pixels) );
#else
// Use CCS for transpose-multiply
CusparseSafeCall( hipsparseScsrmv(gpuHandles.cusparseHandle, HIPSPARSE_OPERATION_NON_TRANSPOSE, n, m, 1.0f,
gpuHandles.cusparseDescriptor, systemMatrix.getValuesCCS(),
systemMatrix.getColPointersCCS(), systemMatrix.getRowIndicesCCS(),
lrImages.getPixels(), 0.0f, m_d_pixels) );
#endif
CudaCheckError();
hipDeviceSynchronize();
// Compute column sums of the system matrix to colSums
#ifndef SUPERRES_STORE_TRANSPOSE
float *d_colSums;
CudaSafeCall( hipMalloc((void**) &d_colSums, n * sizeof(float)) );
CudaSafeCall( hipMemset(d_colSums, 0, n * sizeof(float)) );
{
dim3 blockDim(512);
dim3 gridDim = (m % blockDim.x == 0) ? (m / blockDim.x)
: (m / blockDim.x) + 1;
hipLaunchKernelGGL(( gpu_SRImage::computeCSRColSums), dim3(gridDim), dim3(blockDim), 0, 0, d_colSums, systemMatrix.getValues(), systemMatrix.getRowPointers(),
systemMatrix.getColIndices(), m, n);
CudaCheckError();
hipDeviceSynchronize();
}
// Compute pixels[i] /= colSums[i], i = 0..n-1
{
dim3 blockDim(512);
dim3 gridDim = (n % blockDim.x == 0) ? (n / blockDim.x)
: (n / blockDim.x) + 1;
hipLaunchKernelGGL(( gpu_SRImage::elementwiseDiv), dim3(gridDim), dim3(blockDim), 0, 0, m_d_pixels, d_colSums, n);
CudaCheckError();
hipDeviceSynchronize();
}
CudaSafeCall( hipFree(d_colSums) );
#else
dim3 blockDim(512);
dim3 gridDim = (n % blockDim.x == 0) ? (n / blockDim.x)
: (n / blockDim.x) + 1;
hipLaunchKernelGGL(( gpu_SRImage::divideByCSCColSums), dim3(gridDim), dim3(blockDim), 0, 0, systemMatrix.getValuesCCS(),
systemMatrix.getColPointersCCS(), m_d_pixels, n);
#endif
#ifdef SUPERRES_TIMING
averageTimer.stop();
averageTimer.saveMeasurement();
#endif
// saveToFile("highres_initial.txt");
}
void SRImage::saveToFile(const std::string &fileName) const
{
ImageIO::saveGPUImage(fileName, m_d_pixels, m_width, m_height, m_width);
}
void SRImage::destroy()
{
CudaSafeCall( hipFree(m_d_pixels) );
}
namespace gpu_SRImage
{
__device__ static void myAtomicAdd(float *address, float value)
{
#if __CUDA_ARCH__ >= 200
atomicAdd(address, value);
#else
// cf. https://www.sharcnet.ca/help/index.php/CUDA_tips_and_tricks
int oldval, newval, readback;
oldval = __float_as_int(*address);
newval = __float_as_int(__int_as_float(oldval) + value);
while ((readback=atomicCAS((int *)address, oldval, newval)) != oldval)
{
oldval = readback;
newval = __float_as_int(__int_as_float(oldval) + value);
}
#endif
}
__global__ void computeCSRColSums(float *d_colSums, const float *d_systemMatrixVals,
const int *d_systemMatrixRows, const int *d_systemMatrixCols,
const size_t m, const size_t n)
{
const size_t row = blockIdx.x * blockDim.x + threadIdx.x;
if (row >= m)
return;
for (size_t cidx = d_systemMatrixRows[row]; cidx < d_systemMatrixRows[row+1]; ++cidx)
{
myAtomicAdd(d_colSums + d_systemMatrixCols[cidx], d_systemMatrixVals[cidx]);
}
}
__global__ void elementwiseDiv(float *a, const float *b, const size_t len)
{
const size_t idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= len)
return;
a[idx] /= b[idx] + 1e-6f;
}
__global__ void divideByCSCColSums(const float *values, const int *colPointers,
float *pixels, const size_t n)
{
const size_t idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= n)
return;
float weight = 0.0f;
for (size_t ridx = colPointers[idx]; ridx < colPointers[idx+1]; ++ridx)
{
weight += values[ridx];
}
pixels[idx] /= weight + 1e-6f;
}
}
| ed411fe941533d2b9d017246ef09ba833ea8a93d.cu | /**
* ___ _ _ ___ _ __ __ _ ___
* / __| | | | \ /_\ | \/ | /_\ | _ \
* | (__| |_| | |) / _ \ | |\/| |/ _ \| _/
* \___|\___/|___/_/_\_\_|_|__|_/_/_\_\_|_ ___
* / __| | | | _ \ __| _ \___| _ \ __/ __|
* \__ \ |_| | _/ _|| /___| / _|\__ \
* |___/\___/|_| |___|_|_\ |_|_\___|___/
* 2012
*
* by Jens Wetzl ([email protected])
* and Oliver Taubmann ([email protected])
*
* This work is licensed under a Creative Commons
* Attribution 3.0 Unported License. (CC-BY)
* http://creativecommons.org/licenses/by/3.0/
*
**/
#include "SRImage.h"
#include "SRSystemMatrix.h"
#include "ImageIO.h"
#include "cudalbfgs_error_checking.h"
#include <CudaLBFGS/timer.h>
namespace gpu_SRImage
{
__global__ void computeCSRColSums(float *d_colSums, const float *d_systemMatrixVals,
const int *d_systemMatrixRows, const int *d_systemMatrixCols,
const size_t m, const size_t n);
__global__ void elementwiseDiv(float *a, const float *b, const size_t len);
__global__ void divideByCSCColSums(const float *values, const int *colPointers,
float *pixels, const size_t n);
}
SRImage::SRImage(const size_t height, const size_t width)
: m_height(height)
, m_width(width)
, m_numPixels(height * width)
{
CudaSafeCall( cudaMalloc((void**) &m_d_pixels, m_numPixels * sizeof(float)) );
}
SRImage::~SRImage()
{
}
void SRImage::setZero()
{
CudaSafeCall( cudaMemset(m_d_pixels, 0, m_numPixels * sizeof(float)) );
}
void SRImage::initToAverageImage(const LRImageStack &lrImages, const SRSystemMatrix &systemMatrix,
const GPUHandles &gpuHandles)
{
#ifdef SUPERRES_TIMING
timer averageTimer("averageImage");
timer colSums("avgColSums");
averageTimer.start();
#endif
const size_t m = systemMatrix.getHeight();
const size_t n = systemMatrix.getWidth();
// Compute highresImage = systemMatrix^T lowresVector
#ifndef SUPERRES_STORE_TRANSPOSE
// Use CRS for transpose-multiply (inefficient)
CusparseSafeCall( cusparseScsrmv(gpuHandles.cusparseHandle, CUSPARSE_OPERATION_TRANSPOSE, m, n, 1.0f,
gpuHandles.cusparseDescriptor, systemMatrix.getValues(),
systemMatrix.getRowPointers(), systemMatrix.getColIndices(),
lrImages.getPixels(), 0.0f, m_d_pixels) );
#else
// Use CCS for transpose-multiply
CusparseSafeCall( cusparseScsrmv(gpuHandles.cusparseHandle, CUSPARSE_OPERATION_NON_TRANSPOSE, n, m, 1.0f,
gpuHandles.cusparseDescriptor, systemMatrix.getValuesCCS(),
systemMatrix.getColPointersCCS(), systemMatrix.getRowIndicesCCS(),
lrImages.getPixels(), 0.0f, m_d_pixels) );
#endif
CudaCheckError();
cudaDeviceSynchronize();
// Compute column sums of the system matrix to colSums
#ifndef SUPERRES_STORE_TRANSPOSE
float *d_colSums;
CudaSafeCall( cudaMalloc((void**) &d_colSums, n * sizeof(float)) );
CudaSafeCall( cudaMemset(d_colSums, 0, n * sizeof(float)) );
{
dim3 blockDim(512);
dim3 gridDim = (m % blockDim.x == 0) ? (m / blockDim.x)
: (m / blockDim.x) + 1;
gpu_SRImage::computeCSRColSums<<<gridDim, blockDim>>>(d_colSums, systemMatrix.getValues(), systemMatrix.getRowPointers(),
systemMatrix.getColIndices(), m, n);
CudaCheckError();
cudaDeviceSynchronize();
}
// Compute pixels[i] /= colSums[i], i = 0..n-1
{
dim3 blockDim(512);
dim3 gridDim = (n % blockDim.x == 0) ? (n / blockDim.x)
: (n / blockDim.x) + 1;
gpu_SRImage::elementwiseDiv<<<gridDim, blockDim>>>(m_d_pixels, d_colSums, n);
CudaCheckError();
cudaDeviceSynchronize();
}
CudaSafeCall( cudaFree(d_colSums) );
#else
dim3 blockDim(512);
dim3 gridDim = (n % blockDim.x == 0) ? (n / blockDim.x)
: (n / blockDim.x) + 1;
gpu_SRImage::divideByCSCColSums<<<gridDim, blockDim>>>(systemMatrix.getValuesCCS(),
systemMatrix.getColPointersCCS(), m_d_pixels, n);
#endif
#ifdef SUPERRES_TIMING
averageTimer.stop();
averageTimer.saveMeasurement();
#endif
// saveToFile("highres_initial.txt");
}
void SRImage::saveToFile(const std::string &fileName) const
{
ImageIO::saveGPUImage(fileName, m_d_pixels, m_width, m_height, m_width);
}
void SRImage::destroy()
{
CudaSafeCall( cudaFree(m_d_pixels) );
}
namespace gpu_SRImage
{
__device__ static void myAtomicAdd(float *address, float value)
{
#if __CUDA_ARCH__ >= 200
atomicAdd(address, value);
#else
// cf. https://www.sharcnet.ca/help/index.php/CUDA_tips_and_tricks
int oldval, newval, readback;
oldval = __float_as_int(*address);
newval = __float_as_int(__int_as_float(oldval) + value);
while ((readback=atomicCAS((int *)address, oldval, newval)) != oldval)
{
oldval = readback;
newval = __float_as_int(__int_as_float(oldval) + value);
}
#endif
}
__global__ void computeCSRColSums(float *d_colSums, const float *d_systemMatrixVals,
const int *d_systemMatrixRows, const int *d_systemMatrixCols,
const size_t m, const size_t n)
{
const size_t row = blockIdx.x * blockDim.x + threadIdx.x;
if (row >= m)
return;
for (size_t cidx = d_systemMatrixRows[row]; cidx < d_systemMatrixRows[row+1]; ++cidx)
{
myAtomicAdd(d_colSums + d_systemMatrixCols[cidx], d_systemMatrixVals[cidx]);
}
}
__global__ void elementwiseDiv(float *a, const float *b, const size_t len)
{
const size_t idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= len)
return;
a[idx] /= b[idx] + 1e-6f;
}
__global__ void divideByCSCColSums(const float *values, const int *colPointers,
float *pixels, const size_t n)
{
const size_t idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= n)
return;
float weight = 0.0f;
for (size_t ridx = colPointers[idx]; ridx < colPointers[idx+1]; ++ridx)
{
weight += values[ridx];
}
pixels[idx] /= weight + 1e-6f;
}
}
|
7eba68dd40866c34f13cbd1bcbaed84efa4f55c8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Stokes Flow with Drag Component kernel
// Last updated: 02/14/13
#ifndef _SLIP_KERNEL_H_
#define _SLIP_KERNEL_H_
// Thread block size
#define POINTS 256
#define MATRIX_SIZE 16
#define THREADS 256
#define BLOCKS 256
__constant__ double INVERSE_QUARTER_PI = M_1_PI/4.0f;
// CUDA kernel
__global__ void SlipKernel(double* xd, double* yd, double* Fxd, double* Fyd, double* Vxd, double* Vyd, double visc, double e, double esq)
{
// block ID
int bx = blockIdx.x;
// cache thread ID
int tx = threadIdx.x;
int idx = threadIdx.x + blockIdx.x * THREADS;
// Declaration of shared memory arrays
__shared__ float cache_x[THREADS];
__shared__ float cache_y[THREADS];
int at = floor((float)idx/POINTS); /* 4 right shifting is probably faster than division by 16 */
int dueto = idx % POINTS;
// Each thread fills Stokeslet matrix
double rk = sqrt(powf(xd[at]-xd[dueto],2) + powf(yd[at]-yd[dueto],2));
double sq = sqrtf(powf(rk,2) + esq);
double p1 = (INVERSE_QUARTER_PI/visc) * (logf(sq+e)-(e*(sq+2*e))/(sq*(sq+e)));
double p2 = (INVERSE_QUARTER_PI/visc) * (sq+2*e)/(sq*powf(sq+e,2));
// Sub-Stokeslet matrix
cache_x[tx] = -p1*Fxd[dueto] + p2*(powf(xd[at]-xd[dueto],2)*Fxd[dueto] + (xd[at]-xd[dueto])*(yd[at]-yd[dueto])*Fyd[dueto]);
cache_y[tx] = -p1*Fyd[dueto] + p2*((xd[at]-xd[dueto])*(yd[at]-yd[dueto])*Fxd[dueto] + powf(yd[at]-yd[dueto],2)*Fyd[dueto]);
//#define TEST_REDUCTION
#ifdef TEST_REDUCTION
cache_x[tx] = 1.0;
cache_y[tx] = 1.0;
#endif
// Synchronize all threads in a block to ensure submatrix is computed and loaded
__syncthreads();
//printf("DBG: thrd:%d block:%d & stokeslet (%f, %f)\n", tx, bx, cache_x[tx], cache_y[tx]);
// Reduction
// only half the threads work (rest chill and go on for the ride)
int j = blockDim.x/2; // keeps track of active threads
int k = MATRIX_SIZE/2; // keeps track of which neighbor you add you value with & simulateounsly
// many entries per row should be changed by this code
while (j >= MATRIX_SIZE ) {
if ( (tx%MATRIX_SIZE) < k ) { // for each row we add your value + value of k away neighbor
cache_x[tx] = cache_x[tx] + cache_x[tx+k];
cache_y[tx] = cache_y[tx] + cache_y[tx+k];
}
j = j >> 1;
k = k >> 1;
__syncthreads();
}
#if 0
for (i=0;i<MATRIX_SIZE;i++)
printf("[%d] %d: %f %f %f %f %f %f %f %f %f %f %f %f %f %f %f %f\n", tx, i, cache_x[i*MATRIX_SIZE],cache_x[i*MATRIX_SIZE+1],cache_x[i*MATRIX_SIZE+2],cache_x[i*MATRIX_SIZE+3],cache_x[i*MATRIX_SIZE+4],cache_x[i*MATRIX_SIZE+5],cache_x[i*MATRIX_SIZE+6],cache_x[i*MATRIX_SIZE+7],cache_x[i*MATRIX_SIZE+8],cache_x[i*MATRIX_SIZE+9],cache_x[i*MATRIX_SIZE+10],cache_x[i*MATRIX_SIZE+11],cache_x[i*MATRIX_SIZE+12],cache_x[i*MATRIX_SIZE+13],cache_x[i*MATRIX_SIZE+14],cache_x[i*MATRIX_SIZE+15]);
#endif
// Update velocity per stride
if ( (idx%POINTS == 0) ) {
for(int i=0; i<POINTS/MATRIX_SIZE; i++) {
Vxd[idx/POINTS] += cache_x[tx+i*MATRIX_SIZE];
Vyd[idx/POINTS] += cache_y[tx+i*MATRIX_SIZE];
}
}
}
#endif // #ifndef _SLIP_KERNEL_H
| 7eba68dd40866c34f13cbd1bcbaed84efa4f55c8.cu | // Stokes Flow with Drag Component kernel
// Last updated: 02/14/13
#ifndef _SLIP_KERNEL_H_
#define _SLIP_KERNEL_H_
// Thread block size
#define POINTS 256
#define MATRIX_SIZE 16
#define THREADS 256
#define BLOCKS 256
__constant__ double INVERSE_QUARTER_PI = M_1_PI/4.0f;
// CUDA kernel
__global__ void SlipKernel(double* xd, double* yd, double* Fxd, double* Fyd, double* Vxd, double* Vyd, double visc, double e, double esq)
{
// block ID
int bx = blockIdx.x;
// cache thread ID
int tx = threadIdx.x;
int idx = threadIdx.x + blockIdx.x * THREADS;
// Declaration of shared memory arrays
__shared__ float cache_x[THREADS];
__shared__ float cache_y[THREADS];
int at = floor((float)idx/POINTS); /* 4 right shifting is probably faster than division by 16 */
int dueto = idx % POINTS;
// Each thread fills Stokeslet matrix
double rk = sqrt(powf(xd[at]-xd[dueto],2) + powf(yd[at]-yd[dueto],2));
double sq = sqrtf(powf(rk,2) + esq);
double p1 = (INVERSE_QUARTER_PI/visc) * (logf(sq+e)-(e*(sq+2*e))/(sq*(sq+e)));
double p2 = (INVERSE_QUARTER_PI/visc) * (sq+2*e)/(sq*powf(sq+e,2));
// Sub-Stokeslet matrix
cache_x[tx] = -p1*Fxd[dueto] + p2*(powf(xd[at]-xd[dueto],2)*Fxd[dueto] + (xd[at]-xd[dueto])*(yd[at]-yd[dueto])*Fyd[dueto]);
cache_y[tx] = -p1*Fyd[dueto] + p2*((xd[at]-xd[dueto])*(yd[at]-yd[dueto])*Fxd[dueto] + powf(yd[at]-yd[dueto],2)*Fyd[dueto]);
//#define TEST_REDUCTION
#ifdef TEST_REDUCTION
cache_x[tx] = 1.0;
cache_y[tx] = 1.0;
#endif
// Synchronize all threads in a block to ensure submatrix is computed and loaded
__syncthreads();
//printf("DBG: thrd:%d block:%d & stokeslet (%f, %f)\n", tx, bx, cache_x[tx], cache_y[tx]);
// Reduction
// only half the threads work (rest chill and go on for the ride)
int j = blockDim.x/2; // keeps track of active threads
int k = MATRIX_SIZE/2; // keeps track of which neighbor you add you value with & simulateounsly
// many entries per row should be changed by this code
while (j >= MATRIX_SIZE ) {
if ( (tx%MATRIX_SIZE) < k ) { // for each row we add your value + value of k away neighbor
cache_x[tx] = cache_x[tx] + cache_x[tx+k];
cache_y[tx] = cache_y[tx] + cache_y[tx+k];
}
j = j >> 1;
k = k >> 1;
__syncthreads();
}
#if 0
for (i=0;i<MATRIX_SIZE;i++)
printf("[%d] %d: %f %f %f %f %f %f %f %f %f %f %f %f %f %f %f %f\n", tx, i, cache_x[i*MATRIX_SIZE],cache_x[i*MATRIX_SIZE+1],cache_x[i*MATRIX_SIZE+2],cache_x[i*MATRIX_SIZE+3],cache_x[i*MATRIX_SIZE+4],cache_x[i*MATRIX_SIZE+5],cache_x[i*MATRIX_SIZE+6],cache_x[i*MATRIX_SIZE+7],cache_x[i*MATRIX_SIZE+8],cache_x[i*MATRIX_SIZE+9],cache_x[i*MATRIX_SIZE+10],cache_x[i*MATRIX_SIZE+11],cache_x[i*MATRIX_SIZE+12],cache_x[i*MATRIX_SIZE+13],cache_x[i*MATRIX_SIZE+14],cache_x[i*MATRIX_SIZE+15]);
#endif
// Update velocity per stride
if ( (idx%POINTS == 0) ) {
for(int i=0; i<POINTS/MATRIX_SIZE; i++) {
Vxd[idx/POINTS] += cache_x[tx+i*MATRIX_SIZE];
Vyd[idx/POINTS] += cache_y[tx+i*MATRIX_SIZE];
}
}
}
#endif // #ifndef _SLIP_KERNEL_H
|
384921a414ae92013377e5169859e0ad3b57664f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "gpu_runtime.h"
__global__ void conv2d_broadcast_to_kernel(size_t nthreads, const float *input_data, float * output_data, size_t input_size, size_t output_size){
size_t id = blockIdx.x * blockDim.x + threadIdx.x;
if(id >= nthreads) return;
size_t input_id = (id % (input_size * output_size))/output_size;
output_data[id] = input_data[input_id];
}
int DLGpuConv2d_broadcast_to(const DLArrayHandle input_x, DLArrayHandle output_y, DLStreamHandle stream_handle = NULL){
assert (input_x -> shape[0] == output_y -> shape[1]);
const float *input_data = (const float *)input_x -> data;
float *output_data = (float *)output_y ->data;
size_t batch_size = output_y -> shape[0];
size_t input_size = input_x -> shape[0];
size_t output_size = (output_y -> shape[2]) * (output_y -> shape[3]);
size_t nthreads = batch_size * input_size * output_size;
size_t BLOCKS = (nthreads + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
if (stream_handle)
hipLaunchKernelGGL(( conv2d_broadcast_to_kernel), dim3(BLOCKS), dim3(THREADS_PER_BLOCK), 0, *(hipStream_t*)stream_handle->handle, nthreads, input_data, output_data, input_size, output_size);
else
hipLaunchKernelGGL(( conv2d_broadcast_to_kernel), dim3(BLOCKS), dim3(THREADS_PER_BLOCK), 0, 0, nthreads, input_data, output_data, input_size, output_size);
return 0;
} | 384921a414ae92013377e5169859e0ad3b57664f.cu | #include "gpu_runtime.h"
__global__ void conv2d_broadcast_to_kernel(size_t nthreads, const float *input_data, float * output_data, size_t input_size, size_t output_size){
size_t id = blockIdx.x * blockDim.x + threadIdx.x;
if(id >= nthreads) return;
size_t input_id = (id % (input_size * output_size))/output_size;
output_data[id] = input_data[input_id];
}
int DLGpuConv2d_broadcast_to(const DLArrayHandle input_x, DLArrayHandle output_y, DLStreamHandle stream_handle = NULL){
assert (input_x -> shape[0] == output_y -> shape[1]);
const float *input_data = (const float *)input_x -> data;
float *output_data = (float *)output_y ->data;
size_t batch_size = output_y -> shape[0];
size_t input_size = input_x -> shape[0];
size_t output_size = (output_y -> shape[2]) * (output_y -> shape[3]);
size_t nthreads = batch_size * input_size * output_size;
size_t BLOCKS = (nthreads + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
if (stream_handle)
conv2d_broadcast_to_kernel<<<BLOCKS, THREADS_PER_BLOCK, 0, *(cudaStream_t*)stream_handle->handle>>>(nthreads, input_data, output_data, input_size, output_size);
else
conv2d_broadcast_to_kernel<<<BLOCKS, THREADS_PER_BLOCK>>>(nthreads, input_data, output_data, input_size, output_size);
return 0;
} |
41860ed1d3f271e5d2cd9b45b66504fbed2522e6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#include <ATen/ATen.h>
#include <ATen/hip/HIPContext.h>
#include <THH/THH.h>
#include <THH/THHAtomics.cuh>
#include <THH/THHDeviceUtils.cuh>
#include "cuda/vision.h"
// #define FLT_MAX 3.402823466e+38F
// TODO make it in a common file
#define CUDA_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \
i += blockDim.x * gridDim.x)
template <typename Dtype>
__global__ void RIE_forward_cuda_kernel(
const uint32 nthreads,
const Dtype* feature_data,
const uint16 nBatch,
const uint16 nFeature,
const uint8 nOrientation,
uint8* mainDirection_data,
Dtype* aligned_data)
{
CUDA_KERNEL_LOOP(n, nthreads) {
const uint16 j = n % nFeature;
const uint16 i = n / nFeature;
uint8 l;
uint8 *direction = mainDirection_data + i * nFeature + j;
Dtype maxVal = -FLT_MAX;
for (l = 0; l < nOrientation; l++) {
Dtype val = *(feature_data + i * (nFeature * nOrientation)
+ j * (nOrientation)
+ l);
if (val > maxVal) {
maxVal = val;
*direction = l;
}
}
for (l = 0; l < nOrientation; l++) {
Dtype src = *(feature_data + i * (nFeature * nOrientation)
+ j * (nOrientation)
+ l);
uint8 alignedIndex = ((l - (uint8)*direction) + nOrientation) % nOrientation;
Dtype *target = aligned_data + i * (nFeature * nOrientation)
+ j * (nOrientation)
+ alignedIndex;
*target = src;
}
}
}
template <typename Dtype>
__global__ void RIE_backward_cuda_kernel(
const uint32 nthreads,
const Dtype* aligned_data,
const uint8* mainDirection_data,
const uint16 nBatch,
const uint16 nFeature,
const uint8 nOrientation,
Dtype* feature_data)
{
CUDA_KERNEL_LOOP(n, nthreads) {
uint8 l;
const uint16 j = n % nFeature;
const uint16 i = n / nFeature;
const uint8 direction = *(mainDirection_data + i * nFeature + j);
for (l = 0; l < nOrientation; l++) {
Dtype src = *(aligned_data + i * (nFeature * nOrientation)
+ j * (nOrientation)
+ l);
uint8 alignedIndex = (l + direction) % nOrientation;
Dtype *target = feature_data + i * (nFeature * nOrientation)
+ j * (nOrientation)
+ alignedIndex;
*target = src;
}
}
}
std::tuple<at::Tensor, at::Tensor> RIE_forward_cuda(const at::Tensor& feature,
const uint8 nOrientation) {
AT_ASSERTM(feature.ndimension() == 4, "only supports batch mode.");
AT_ASSERTM(feature.size(2) == 1 && feature.size(3) == 1, "mH x mW should be 1x1.");
AT_ASSERTM(feature.type().is_cuda(), "input must be a CUDA tensor");
const uint16 nBatch = feature.size(0);
const uint16 nChannel = feature.size(1);
const uint16 nFeature = nChannel / nOrientation;
auto mainDirection = at::empty({nBatch, nFeature}, feature.options().dtype(at::kByte));
auto aligned = at::zeros_like(feature);
const long count = nBatch * nFeature;
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
dim3 grid(::min(THCCeilDiv(count, 512L), 4096L));
dim3 block(512);
if (mainDirection.numel() == 0) {
THCudaCheck(hipGetLastError());
return std::make_tuple(mainDirection, aligned);
}
AT_DISPATCH_FLOATING_TYPES(feature.type(), "RIE_forward", [&] {
hipLaunchKernelGGL(( RIE_forward_cuda_kernel<scalar_t>), dim3(grid), dim3(block), 0, stream,
count,
feature.contiguous().data<scalar_t>(),
nBatch,
nFeature,
nOrientation,
mainDirection.contiguous().data<uint8_t>(),
aligned.contiguous().data<scalar_t>());
});
THCudaCheck(hipGetLastError());
return std::make_tuple(mainDirection, aligned);
}
at::Tensor RIE_backward_cuda(const at::Tensor& mainDirection,
const at::Tensor& gradOutput,
const uint8 nOrientation) {
AT_ASSERTM(mainDirection.type().is_cuda(), "input must be a CPU tensor");
AT_ASSERTM(gradOutput.type().is_cuda(), "rois must be a CPU tensor");
const uint16 nBatch = mainDirection.size(0);
const uint16 nFeature = mainDirection.size(1);
auto gradInput = at::zeros_like(gradOutput);
const long count = nBatch * nFeature;
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
dim3 grid(::min(THCCeilDiv(count, 512L), 4096L));
dim3 block(512);
// handle possibly empty gradients
if (gradOutput.numel() == 0) {
THCudaCheck(hipGetLastError());
return gradInput;
}
AT_DISPATCH_FLOATING_TYPES(gradOutput.type(), "RIE_backward", [&] {
hipLaunchKernelGGL(( RIE_backward_cuda_kernel<scalar_t>), dim3(grid), dim3(block), 0, stream,
count,
gradOutput.contiguous().data<scalar_t>(),
mainDirection.contiguous().data<uint8_t>(),
nBatch,
nFeature,
nOrientation,
gradInput.contiguous().data<scalar_t>());
});
THCudaCheck(hipGetLastError());
return gradInput;
} | 41860ed1d3f271e5d2cd9b45b66504fbed2522e6.cu | // Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#include <ATen/ATen.h>
#include <ATen/cuda/CUDAContext.h>
#include <THC/THC.h>
#include <THC/THCAtomics.cuh>
#include <THC/THCDeviceUtils.cuh>
#include "cuda/vision.h"
// #define FLT_MAX 3.402823466e+38F
// TODO make it in a common file
#define CUDA_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \
i += blockDim.x * gridDim.x)
template <typename Dtype>
__global__ void RIE_forward_cuda_kernel(
const uint32 nthreads,
const Dtype* feature_data,
const uint16 nBatch,
const uint16 nFeature,
const uint8 nOrientation,
uint8* mainDirection_data,
Dtype* aligned_data)
{
CUDA_KERNEL_LOOP(n, nthreads) {
const uint16 j = n % nFeature;
const uint16 i = n / nFeature;
uint8 l;
uint8 *direction = mainDirection_data + i * nFeature + j;
Dtype maxVal = -FLT_MAX;
for (l = 0; l < nOrientation; l++) {
Dtype val = *(feature_data + i * (nFeature * nOrientation)
+ j * (nOrientation)
+ l);
if (val > maxVal) {
maxVal = val;
*direction = l;
}
}
for (l = 0; l < nOrientation; l++) {
Dtype src = *(feature_data + i * (nFeature * nOrientation)
+ j * (nOrientation)
+ l);
uint8 alignedIndex = ((l - (uint8)*direction) + nOrientation) % nOrientation;
Dtype *target = aligned_data + i * (nFeature * nOrientation)
+ j * (nOrientation)
+ alignedIndex;
*target = src;
}
}
}
template <typename Dtype>
__global__ void RIE_backward_cuda_kernel(
const uint32 nthreads,
const Dtype* aligned_data,
const uint8* mainDirection_data,
const uint16 nBatch,
const uint16 nFeature,
const uint8 nOrientation,
Dtype* feature_data)
{
CUDA_KERNEL_LOOP(n, nthreads) {
uint8 l;
const uint16 j = n % nFeature;
const uint16 i = n / nFeature;
const uint8 direction = *(mainDirection_data + i * nFeature + j);
for (l = 0; l < nOrientation; l++) {
Dtype src = *(aligned_data + i * (nFeature * nOrientation)
+ j * (nOrientation)
+ l);
uint8 alignedIndex = (l + direction) % nOrientation;
Dtype *target = feature_data + i * (nFeature * nOrientation)
+ j * (nOrientation)
+ alignedIndex;
*target = src;
}
}
}
std::tuple<at::Tensor, at::Tensor> RIE_forward_cuda(const at::Tensor& feature,
const uint8 nOrientation) {
AT_ASSERTM(feature.ndimension() == 4, "only supports batch mode.");
AT_ASSERTM(feature.size(2) == 1 && feature.size(3) == 1, "mH x mW should be 1x1.");
AT_ASSERTM(feature.type().is_cuda(), "input must be a CUDA tensor");
const uint16 nBatch = feature.size(0);
const uint16 nChannel = feature.size(1);
const uint16 nFeature = nChannel / nOrientation;
auto mainDirection = at::empty({nBatch, nFeature}, feature.options().dtype(at::kByte));
auto aligned = at::zeros_like(feature);
const long count = nBatch * nFeature;
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
dim3 grid(std::min(THCCeilDiv(count, 512L), 4096L));
dim3 block(512);
if (mainDirection.numel() == 0) {
THCudaCheck(cudaGetLastError());
return std::make_tuple(mainDirection, aligned);
}
AT_DISPATCH_FLOATING_TYPES(feature.type(), "RIE_forward", [&] {
RIE_forward_cuda_kernel<scalar_t><<<grid, block, 0, stream>>>(
count,
feature.contiguous().data<scalar_t>(),
nBatch,
nFeature,
nOrientation,
mainDirection.contiguous().data<uint8_t>(),
aligned.contiguous().data<scalar_t>());
});
THCudaCheck(cudaGetLastError());
return std::make_tuple(mainDirection, aligned);
}
at::Tensor RIE_backward_cuda(const at::Tensor& mainDirection,
const at::Tensor& gradOutput,
const uint8 nOrientation) {
AT_ASSERTM(mainDirection.type().is_cuda(), "input must be a CPU tensor");
AT_ASSERTM(gradOutput.type().is_cuda(), "rois must be a CPU tensor");
const uint16 nBatch = mainDirection.size(0);
const uint16 nFeature = mainDirection.size(1);
auto gradInput = at::zeros_like(gradOutput);
const long count = nBatch * nFeature;
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
dim3 grid(std::min(THCCeilDiv(count, 512L), 4096L));
dim3 block(512);
// handle possibly empty gradients
if (gradOutput.numel() == 0) {
THCudaCheck(cudaGetLastError());
return gradInput;
}
AT_DISPATCH_FLOATING_TYPES(gradOutput.type(), "RIE_backward", [&] {
RIE_backward_cuda_kernel<scalar_t><<<grid, block, 0, stream>>>(
count,
gradOutput.contiguous().data<scalar_t>(),
mainDirection.contiguous().data<uint8_t>(),
nBatch,
nFeature,
nOrientation,
gradInput.contiguous().data<scalar_t>());
});
THCudaCheck(cudaGetLastError());
return gradInput;
} |
918d78e9409ab3ac89522836e96089aa6eead7fa.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "caffe2/core/context_gpu.h"
#include "caffe2/core/cudnn_wrappers.h"
#include "caffe2/operators/conv_pool_op_base.h"
#include <hipcub/hipcub.hpp>
namespace caffe2 {
namespace {
// Explicit fast paths for avg and max global pooling due to CuDNN global
// pooling performance bug which makes pooling extremely slow.
template <typename T>
__global__ void
global_avgpool_kernel_NCHW(const int NC, const int sz, const T* data, T* out) {
typedef hipcub::BlockReduce<T, CAFFE_CUDA_NUM_THREADS> BlockReduce;
__shared__ typename BlockReduce::TempStorage temp_storage;
for (int j = blockIdx.x; j < NC; j += gridDim.x) {
T sum(0);
for (int k = threadIdx.x; k < sz; k += blockDim.x) {
sum += data[j * sz + k];
}
float totalsum = BlockReduce(temp_storage).Sum(sum);
if (threadIdx.x == 0) {
out[j] = totalsum / sz;
}
__syncthreads();
}
}
template <typename T>
__global__ void
global_avgpool_backward_NCHW(const int NC, const int sz, const T* dx, T* out) {
CUDA_1D_KERNEL_LOOP(i, NC * sz) {
out[i] = dx[i / sz] / sz;
}
}
template <typename T>
__global__ void
global_maxpool_kernel_NCHW(const int NC, const int sz, const T* data, T* out) {
typedef hipcub::BlockReduce<T, CAFFE_CUDA_NUM_THREADS> BlockReduce;
__shared__ typename BlockReduce::TempStorage temp_storage;
for (int j = blockIdx.x; j < NC; j += gridDim.x) {
T max(-FLT_MAX);
for (int k = threadIdx.x; k < sz; k += blockDim.x) {
max = data[j * sz + k] > max ? data[j * sz + k] : max;
}
float totalmax = BlockReduce(temp_storage).Reduce(max, hipcub::Max());
if (threadIdx.x == 0) {
out[j] = totalmax;
}
__syncthreads();
}
}
template <typename T>
__global__ void global_maxpool_backward_NCHW(
const int NC,
const int sz,
const T* dx,
T* out,
const T* x,
const T* in) {
CUDA_1D_KERNEL_LOOP(i, NC * sz) {
if (in[i] == x[i / sz]) {
out[i] = dx[i / sz];
} else {
out[i] = 0.0;
}
}
}
template <typename T>
void setTensorDescriptor(
const int size,
const StorageOrder order,
const int N,
const int C,
const int H,
const int W,
const int D,
cudnnTensorDescriptor_t& desc) {
if (size == 4) {
CUDNN_ENFORCE(cudnnSetTensor4dDescriptor(
desc,
GetCudnnTensorFormat(order),
cudnnTypeWrapper<T>::type,
N,
C,
H,
W));
} else {
vector<int> dims = {N, C, H, W, D};
vector<int> strides;
order == NCHW
? strides.insert(strides.end(), {C * H * W * D, H * W * D, W * D, D, 1})
: strides.insert(
strides.end(), {H * W * D * C, 1, W * D * C, D * C, C});
CUDNN_ENFORCE(cudnnSetTensorNdDescriptor(
desc,
cudnnTypeWrapper<T>::type,
size > 3 ? size : 4,
dims.data(),
strides.data()));
}
}
} // namespace
class CuDNNPoolOp : public ConvPoolOpBase<CUDAContext> {
public:
CuDNNPoolOp(const OperatorDef& operator_def, Workspace* ws)
: ConvPoolOpBase<CUDAContext>(operator_def, ws),
cudnn_wrapper_(&context_) {
CUDNN_ENFORCE(cudnnCreateTensorDescriptor(&bottom_desc_));
CUDNN_ENFORCE(cudnnCreateTensorDescriptor(&top_desc_));
CUDNN_ENFORCE(cudnnCreatePoolingDescriptor(&pooling_desc_));
OPERATOR_NEEDS_FEATURE(kernel_.size() >=2 && kernel_.size() <=3,
"Cudnn pooling only supports 4d and 5d tensor");
if (legacy_pad_ != LegacyPadding::CAFFE_LEGACY_POOLING) {
for (int i = 0; i < kernel_.size(); ++i) {
OPERATOR_NEEDS_FEATURE(
pads_[i] == pads_[kernel_.size() + i],
"The current padding scheme leads to unequal padding on the left "
"and right, which is not supported by cudnn.");
}
}
// Figure out the pooling descriptor.
if (operator_def.type().substr(0, 7) == "MaxPool") {
bool deterministic =
OperatorBase::GetSingleArgument<bool>("deterministic", false);
#if CUDNN_VERSION_MIN(6, 0, 0)
mode_ =
deterministic ? CUDNN_POOLING_MAX_DETERMINISTIC : CUDNN_POOLING_MAX;
#else
mode_ = CUDNN_POOLING_MAX;
#endif
} else if (operator_def.type().substr(0, 11) == "AveragePool") {
mode_ = CUDNN_POOLING_AVERAGE_COUNT_EXCLUDE_PADDING;
} else {
LOG(FATAL) << "Unsupported pooling method: " << operator_def.type();
}
}
~CuDNNPoolOp() {
CUDNN_ENFORCE(cudnnDestroyTensorDescriptor(bottom_desc_));
CUDNN_ENFORCE(cudnnDestroyTensorDescriptor(top_desc_));
CUDNN_ENFORCE(cudnnDestroyPoolingDescriptor(pooling_desc_));
}
template <typename T, typename M>
bool DoRunWithType() {
auto& X = Input(0);
auto* Y = Output(0);
int N = 0, C = 0, H = 0, W = 0, D = 0;
int H_out = 0, W_out = 0, D_out = 0;
// cuDNN pooling support only 2 and 3 spatial dimensions.
CAFFE_ENFORCE(X.ndim() >= 4 && X.ndim() <= 5);
switch (order_) {
case StorageOrder::NHWC:
N = X.dim32(0);
H = X.dim32(1);
W = X.ndim() > 3 ? X.dim32(2) : 1;
D = X.ndim() > 4 ? X.dim32(3) : 1;
C = X.dim32(X.ndim() - 1);
ConvPoolOpBase::SetOutputSize(X, Y, C);
H_out = Y->dim32(1);
W_out = Y->ndim() > 3 ? Y->dim32(2) : 1;
D_out = Y->ndim() > 4 ? Y->dim32(3) : 1;
break;
case StorageOrder::NCHW:
N = X.dim32(0);
C = X.dim32(1);
H = X.dim32(2);
W = X.ndim() > 3 ? X.dim32(3) : 1;
D = X.ndim() > 4 ? X.dim32(4) : 1;
ConvPoolOpBase::SetOutputSize(X, Y, C);
H_out = Y->dim32(2);
W_out = Y->ndim() > 3 ? Y->dim32(3) : 1;
D_out = Y->ndim() > 4 ? Y->dim32(4) : 1;
break;
default:
LOG(FATAL) << "Unknown storage order: " << order_;
}
// Fast path for global pooling, as cudnn is slow. But only
// on float, because fp16 not supported for CUB.
if (std::is_same<T, float>::value) {
if (order_ == StorageOrder::NCHW && global_pooling_) {
if (mode_ == CUDNN_POOLING_AVERAGE_COUNT_EXCLUDE_PADDING) {
hipLaunchKernelGGL(( global_avgpool_kernel_NCHW<float>)
, dim3(::min(N * C, CAFFE_MAXIMUM_NUM_BLOCKS)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
N * C,
H * W * D,
X.data<float>(),
Y->template mutable_data<float>());
return true;
}
if (mode_ == CUDNN_POOLING_MAX) {
hipLaunchKernelGGL(( global_maxpool_kernel_NCHW<float>)
, dim3(::min(N * C, CAFFE_MAXIMUM_NUM_BLOCKS)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
N * C,
H * W * D,
X.data<float>(),
Y->template mutable_data<float>());
return true;
}
}
}
if (cudnn_input_dims_ != X.dims()) {
// Dimensions changed; we will need to re-initialize things.
VLOG(1) << "Changing the cudnn descriptor configurations.";
cudnn_input_dims_ = X.dims().vec();
setTensorDescriptor<T>(X.ndim(), order_, N, C, H, W, D, bottom_desc_);
setTensorDescriptor<T>(
Y->ndim(), order_, N, C, H_out, W_out, D_out, top_desc_);
for (int i = 0; i < kernel_.size(); ++i) {
if (pads_[i] != pads_[kernel_.size() + i]) {
CAFFE_ENFORCE(
legacy_pad_ == LegacyPadding::CAFFE_LEGACY_POOLING,
"Cudnn pooling only supports even padding on both sides, with "
"the only exception of the caffe legacy pooling case where we "
"try to preserve backward compatibility with Caffe.");
}
}
if (kernel_.size() == 2) {
CUDNN_ENFORCE(cudnnSetPooling2dDescriptor(
pooling_desc_,
mode_,
CUDNN_NOT_PROPAGATE_NAN,
kernel_h(),
kernel_w(),
pad_t(),
pad_l(),
stride_h(),
stride_w()));
} else {
CUDNN_ENFORCE(cudnnSetPoolingNdDescriptor(
pooling_desc_,
mode_,
CUDNN_NOT_PROPAGATE_NAN,
kernel_.size(),
kernel_.data(),
pads_.data(),
stride_.data()));
}
}
// Carry out the pooling computation.
const T* Xdata = X.template data<T>();
T* Ydata = Y->template mutable_data<T>();
CUDNN_ENFORCE(cudnnPoolingForward(
cudnn_wrapper_.inline_cudnn_handle(),
pooling_desc_,
cudnnTypeWrapper<T>::kOne(),
bottom_desc_,
Xdata,
cudnnTypeWrapper<T>::kZero(),
top_desc_,
Ydata));
return true;
}
bool RunOnDevice() final {
auto& X = Input(0);
auto* Y = Output(0);
if (X.IsType<float>()) {
return DoRunWithType<float, float>();
} else if (X.IsType<at::Half>()) {
return DoRunWithType<at::Half, float>();
} else {
LOG(FATAL) << "Unsupported input types";
}
return true;
}
protected:
vector<int64_t> cudnn_input_dims_;
CuDNNWrapper cudnn_wrapper_;
cudnnTensorDescriptor_t bottom_desc_;
cudnnTensorDescriptor_t top_desc_;
cudnnPoolingDescriptor_t pooling_desc_;
cudnnPoolingMode_t mode_;
private:
};
class CuDNNPoolGradientOp : public ConvPoolOpBase<CUDAContext> {
public:
CuDNNPoolGradientOp(const OperatorDef& operator_def, Workspace* ws)
: ConvPoolOpBase<CUDAContext>(operator_def, ws),
cudnn_wrapper_(&context_) {
CUDNN_ENFORCE(cudnnCreateTensorDescriptor(&bottom_desc_));
CUDNN_ENFORCE(cudnnCreateTensorDescriptor(&top_desc_));
CUDNN_ENFORCE(cudnnCreatePoolingDescriptor(&pooling_desc_));
// Figure out the pooling descriptor.
if (operator_def.type() == "MaxPoolGradient" ||
operator_def.type() == "MaxPool1DGradient" ||
operator_def.type() == "MaxPool2DGradient" ||
operator_def.type() == "MaxPool3DGradient") {
bool deterministic =
OperatorBase::GetSingleArgument<bool>("deterministic", false);
#if CUDNN_VERSION_MIN(6, 0, 0)
mode_ =
deterministic ? CUDNN_POOLING_MAX_DETERMINISTIC : CUDNN_POOLING_MAX;
#else
mode_ = CUDNN_POOLING_MAX;
#endif
} else if (
operator_def.type() == "AveragePoolGradient" ||
operator_def.type() == "AveragePool1DGradient" ||
operator_def.type() == "AveragePool2DGradient" ||
operator_def.type() == "AveragePool3DGradient") {
mode_ = CUDNN_POOLING_AVERAGE_COUNT_EXCLUDE_PADDING;
} else {
LOG(FATAL) << "Unsupported pooling method: " << operator_def.type();
}
}
~CuDNNPoolGradientOp() {
CUDNN_ENFORCE(cudnnDestroyTensorDescriptor(bottom_desc_));
CUDNN_ENFORCE(cudnnDestroyTensorDescriptor(top_desc_));
CUDNN_ENFORCE(cudnnDestroyPoolingDescriptor(pooling_desc_));
}
template <typename T, typename M>
bool DoRunWithType() {
auto& X = Input(0);
auto& Y = Input(1);
auto& dY = Input(2);
auto* dX = Output(0);
// cuDNN pooling support only 2 and 3 spatial dimensions.
CAFFE_ENFORCE(X.ndim() >= 4 && X.ndim() <= 5);
dX->ResizeLike(X);
int N = 0, C = 0, H = 0, W = 0, D = 0;
int H_out = 0, W_out = 0, D_out = 0;
switch (order_) {
case StorageOrder::NHWC:
N = X.dim32(0);
H = X.dim32(1);
W = X.ndim() > 3 ? X.dim32(2) : 1;
D = X.ndim() > 4 ? X.dim32(3) : 1;
C = X.dim32(X.ndim() - 1);
H_out = Y.dim32(1);
W_out = Y.ndim() > 3 ? Y.dim32(2) : 1;
D_out = Y.ndim() > 4 ? Y.dim32(3) : 1;
break;
case StorageOrder::NCHW:
N = X.dim32(0);
C = X.dim32(1);
H = X.dim32(2);
W = X.ndim() > 3 ? X.dim32(3) : 1;
D = X.ndim() > 4 ? X.dim32(4) : 1;
H_out = Y.dim32(2);
W_out = Y.ndim() > 3 ? Y.dim32(3) : 1;
D_out = Y.ndim() > 4 ? Y.dim32(4) : 1;
break;
default:
LOG(FATAL) << "Unknown storage order: " << order_;
}
// Fast path for global pooling, as cudnn is slow. But only
// on float, because fp16 not supported for CUB.
if (std::is_same<T, float>::value) {
if (order_ == StorageOrder::NCHW && global_pooling_) {
if (mode_ == CUDNN_POOLING_AVERAGE_COUNT_EXCLUDE_PADDING) {
hipLaunchKernelGGL(( global_avgpool_backward_NCHW<float>)
, dim3(CAFFE_GET_BLOCKS(dX->size())),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
N * C,
H * W * D,
dY.data<float>(),
dX->template mutable_data<float>());
return true;
}
#if CUDNN_VERSION_MIN(6, 0, 0)
if (mode_ == CUDNN_POOLING_MAX ||
mode_ == CUDNN_POOLING_MAX_DETERMINISTIC) {
#else
if (mode_ == CUDNN_POOLING_MAX) {
#endif
hipLaunchKernelGGL(( global_maxpool_backward_NCHW<float>)
, dim3(CAFFE_GET_BLOCKS(dX->size())),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
N * C,
H * W * D,
dY.data<float>(),
dX->template mutable_data<float>(),
Y.data<float>(),
X.data<float>());
return true;
}
}
}
if (kernel_.size() == 1) {
ConvPoolOpBase<CUDAContext>::ComputePads({H});
} else if (kernel_.size() == 2) {
ConvPoolOpBase<CUDAContext>::ComputePads({H, W});
} else if (kernel_.size() == 3) {
ConvPoolOpBase<CUDAContext>::ComputePads({H, W, D});
} else {
CAFFE_THROW("Unsupported kernel size :", kernel_.size());
}
if (cudnn_input_dims_ != X.dims()) {
// Dimensions changed; we will need to re-initialize things.
VLOG(1) << "Changing the cudnn descriptor configurations.";
cudnn_input_dims_ = X.dims().vec();
setTensorDescriptor<T>(X.ndim(), order_, N, C, H, W, D, bottom_desc_);
setTensorDescriptor<T>(
Y.ndim(), order_, N, C, H_out, W_out, D_out, top_desc_);
for (int i = 0; i < kernel_.size(); ++i) {
if (pads_[i] != pads_[kernel_.size() + i]) {
CAFFE_ENFORCE(
legacy_pad_ == LegacyPadding::CAFFE_LEGACY_POOLING,
"Cudnn pooling only supports even padding on both sides, with "
"the only exception of the caffe legacy pooling case where we "
"try to preserve backward compatibility with Caffe.");
}
}
if (kernel_.size() == 2) {
CUDNN_ENFORCE(cudnnSetPooling2dDescriptor(
pooling_desc_,
mode_,
CUDNN_NOT_PROPAGATE_NAN,
kernel_h(),
kernel_w(),
pad_t(),
pad_l(),
stride_h(),
stride_w()));
} else {
CUDNN_ENFORCE(cudnnSetPoolingNdDescriptor(
pooling_desc_,
mode_,
CUDNN_NOT_PROPAGATE_NAN,
kernel_.size(),
kernel_.data(),
pads_.data(),
stride_.data()));
}
}
// Carry out the pooling computation.
const T* Xdata = X.template data<T>();
const T* Ydata = Y.template data<T>();
const T* dYdata = dY.template data<T>();
T* dXdata = dX->template mutable_data<T>();
CUDNN_ENFORCE(cudnnPoolingBackward(
cudnn_wrapper_.inline_cudnn_handle(),
pooling_desc_,
cudnnTypeWrapper<T>::kOne(),
top_desc_,
Ydata,
top_desc_,
dYdata,
bottom_desc_,
Xdata,
cudnnTypeWrapper<T>::kZero(),
bottom_desc_,
dXdata));
return true;
}
bool RunOnDevice() final {
auto& X = Input(0);
auto& Y = Input(1);
auto& dY = Input(2);
auto* dX = Output(0);
dX->ResizeLike(X);
if (X.IsType<float>()) {
return DoRunWithType<float, float>();
} else if (X.IsType<at::Half>()) {
return DoRunWithType<at::Half, float>();
} else {
LOG(FATAL) << "Unsupported input types";
}
return true;
}
protected:
vector<int64_t> cudnn_input_dims_;
CuDNNWrapper cudnn_wrapper_;
cudnnTensorDescriptor_t bottom_desc_;
cudnnTensorDescriptor_t top_desc_;
cudnnPoolingDescriptor_t pooling_desc_;
cudnnPoolingMode_t mode_;
};
namespace {
REGISTER_CUDNN_OPERATOR(AveragePool, CuDNNPoolOp);
REGISTER_CUDNN_OPERATOR(AveragePoolGradient, CuDNNPoolGradientOp);
REGISTER_CUDNN_OPERATOR(AveragePool1D, CuDNNPoolOp);
REGISTER_CUDNN_OPERATOR(AveragePool1DGradient, CuDNNPoolGradientOp);
REGISTER_CUDNN_OPERATOR(AveragePool2D, CuDNNPoolOp);
REGISTER_CUDNN_OPERATOR(AveragePool2DGradient, CuDNNPoolGradientOp);
REGISTER_CUDNN_OPERATOR(AveragePool3D, CuDNNPoolOp);
REGISTER_CUDNN_OPERATOR(AveragePool3DGradient, CuDNNPoolGradientOp);
REGISTER_CUDNN_OPERATOR(MaxPool, CuDNNPoolOp);
REGISTER_CUDNN_OPERATOR(MaxPoolGradient, CuDNNPoolGradientOp);
REGISTER_CUDNN_OPERATOR(MaxPool1D, CuDNNPoolOp);
REGISTER_CUDNN_OPERATOR(MaxPool1DGradient, CuDNNPoolGradientOp);
REGISTER_CUDNN_OPERATOR(MaxPool2D, CuDNNPoolOp);
REGISTER_CUDNN_OPERATOR(MaxPool2DGradient, CuDNNPoolGradientOp);
REGISTER_CUDNN_OPERATOR(MaxPool3D, CuDNNPoolOp);
REGISTER_CUDNN_OPERATOR(MaxPool3DGradient, CuDNNPoolGradientOp);
} // namespace
} // namespace caffe2
| 918d78e9409ab3ac89522836e96089aa6eead7fa.cu | #include "caffe2/core/context_gpu.h"
#include "caffe2/core/cudnn_wrappers.h"
#include "caffe2/operators/conv_pool_op_base.h"
#include <cub/cub.cuh>
namespace caffe2 {
namespace {
// Explicit fast paths for avg and max global pooling due to CuDNN global
// pooling performance bug which makes pooling extremely slow.
template <typename T>
__global__ void
global_avgpool_kernel_NCHW(const int NC, const int sz, const T* data, T* out) {
typedef cub::BlockReduce<T, CAFFE_CUDA_NUM_THREADS> BlockReduce;
__shared__ typename BlockReduce::TempStorage temp_storage;
for (int j = blockIdx.x; j < NC; j += gridDim.x) {
T sum(0);
for (int k = threadIdx.x; k < sz; k += blockDim.x) {
sum += data[j * sz + k];
}
float totalsum = BlockReduce(temp_storage).Sum(sum);
if (threadIdx.x == 0) {
out[j] = totalsum / sz;
}
__syncthreads();
}
}
template <typename T>
__global__ void
global_avgpool_backward_NCHW(const int NC, const int sz, const T* dx, T* out) {
CUDA_1D_KERNEL_LOOP(i, NC * sz) {
out[i] = dx[i / sz] / sz;
}
}
template <typename T>
__global__ void
global_maxpool_kernel_NCHW(const int NC, const int sz, const T* data, T* out) {
typedef cub::BlockReduce<T, CAFFE_CUDA_NUM_THREADS> BlockReduce;
__shared__ typename BlockReduce::TempStorage temp_storage;
for (int j = blockIdx.x; j < NC; j += gridDim.x) {
T max(-FLT_MAX);
for (int k = threadIdx.x; k < sz; k += blockDim.x) {
max = data[j * sz + k] > max ? data[j * sz + k] : max;
}
float totalmax = BlockReduce(temp_storage).Reduce(max, cub::Max());
if (threadIdx.x == 0) {
out[j] = totalmax;
}
__syncthreads();
}
}
template <typename T>
__global__ void global_maxpool_backward_NCHW(
const int NC,
const int sz,
const T* dx,
T* out,
const T* x,
const T* in) {
CUDA_1D_KERNEL_LOOP(i, NC * sz) {
if (in[i] == x[i / sz]) {
out[i] = dx[i / sz];
} else {
out[i] = 0.0;
}
}
}
template <typename T>
void setTensorDescriptor(
const int size,
const StorageOrder order,
const int N,
const int C,
const int H,
const int W,
const int D,
cudnnTensorDescriptor_t& desc) {
if (size == 4) {
CUDNN_ENFORCE(cudnnSetTensor4dDescriptor(
desc,
GetCudnnTensorFormat(order),
cudnnTypeWrapper<T>::type,
N,
C,
H,
W));
} else {
vector<int> dims = {N, C, H, W, D};
vector<int> strides;
order == NCHW
? strides.insert(strides.end(), {C * H * W * D, H * W * D, W * D, D, 1})
: strides.insert(
strides.end(), {H * W * D * C, 1, W * D * C, D * C, C});
CUDNN_ENFORCE(cudnnSetTensorNdDescriptor(
desc,
cudnnTypeWrapper<T>::type,
size > 3 ? size : 4,
dims.data(),
strides.data()));
}
}
} // namespace
class CuDNNPoolOp : public ConvPoolOpBase<CUDAContext> {
public:
CuDNNPoolOp(const OperatorDef& operator_def, Workspace* ws)
: ConvPoolOpBase<CUDAContext>(operator_def, ws),
cudnn_wrapper_(&context_) {
CUDNN_ENFORCE(cudnnCreateTensorDescriptor(&bottom_desc_));
CUDNN_ENFORCE(cudnnCreateTensorDescriptor(&top_desc_));
CUDNN_ENFORCE(cudnnCreatePoolingDescriptor(&pooling_desc_));
OPERATOR_NEEDS_FEATURE(kernel_.size() >=2 && kernel_.size() <=3,
"Cudnn pooling only supports 4d and 5d tensor");
if (legacy_pad_ != LegacyPadding::CAFFE_LEGACY_POOLING) {
for (int i = 0; i < kernel_.size(); ++i) {
OPERATOR_NEEDS_FEATURE(
pads_[i] == pads_[kernel_.size() + i],
"The current padding scheme leads to unequal padding on the left "
"and right, which is not supported by cudnn.");
}
}
// Figure out the pooling descriptor.
if (operator_def.type().substr(0, 7) == "MaxPool") {
bool deterministic =
OperatorBase::GetSingleArgument<bool>("deterministic", false);
#if CUDNN_VERSION_MIN(6, 0, 0)
mode_ =
deterministic ? CUDNN_POOLING_MAX_DETERMINISTIC : CUDNN_POOLING_MAX;
#else
mode_ = CUDNN_POOLING_MAX;
#endif
} else if (operator_def.type().substr(0, 11) == "AveragePool") {
mode_ = CUDNN_POOLING_AVERAGE_COUNT_EXCLUDE_PADDING;
} else {
LOG(FATAL) << "Unsupported pooling method: " << operator_def.type();
}
}
~CuDNNPoolOp() {
CUDNN_ENFORCE(cudnnDestroyTensorDescriptor(bottom_desc_));
CUDNN_ENFORCE(cudnnDestroyTensorDescriptor(top_desc_));
CUDNN_ENFORCE(cudnnDestroyPoolingDescriptor(pooling_desc_));
}
template <typename T, typename M>
bool DoRunWithType() {
auto& X = Input(0);
auto* Y = Output(0);
int N = 0, C = 0, H = 0, W = 0, D = 0;
int H_out = 0, W_out = 0, D_out = 0;
// cuDNN pooling support only 2 and 3 spatial dimensions.
CAFFE_ENFORCE(X.ndim() >= 4 && X.ndim() <= 5);
switch (order_) {
case StorageOrder::NHWC:
N = X.dim32(0);
H = X.dim32(1);
W = X.ndim() > 3 ? X.dim32(2) : 1;
D = X.ndim() > 4 ? X.dim32(3) : 1;
C = X.dim32(X.ndim() - 1);
ConvPoolOpBase::SetOutputSize(X, Y, C);
H_out = Y->dim32(1);
W_out = Y->ndim() > 3 ? Y->dim32(2) : 1;
D_out = Y->ndim() > 4 ? Y->dim32(3) : 1;
break;
case StorageOrder::NCHW:
N = X.dim32(0);
C = X.dim32(1);
H = X.dim32(2);
W = X.ndim() > 3 ? X.dim32(3) : 1;
D = X.ndim() > 4 ? X.dim32(4) : 1;
ConvPoolOpBase::SetOutputSize(X, Y, C);
H_out = Y->dim32(2);
W_out = Y->ndim() > 3 ? Y->dim32(3) : 1;
D_out = Y->ndim() > 4 ? Y->dim32(4) : 1;
break;
default:
LOG(FATAL) << "Unknown storage order: " << order_;
}
// Fast path for global pooling, as cudnn is slow. But only
// on float, because fp16 not supported for CUB.
if (std::is_same<T, float>::value) {
if (order_ == StorageOrder::NCHW && global_pooling_) {
if (mode_ == CUDNN_POOLING_AVERAGE_COUNT_EXCLUDE_PADDING) {
global_avgpool_kernel_NCHW<float>
<<<std::min(N * C, CAFFE_MAXIMUM_NUM_BLOCKS),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
N * C,
H * W * D,
X.data<float>(),
Y->template mutable_data<float>());
return true;
}
if (mode_ == CUDNN_POOLING_MAX) {
global_maxpool_kernel_NCHW<float>
<<<std::min(N * C, CAFFE_MAXIMUM_NUM_BLOCKS),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
N * C,
H * W * D,
X.data<float>(),
Y->template mutable_data<float>());
return true;
}
}
}
if (cudnn_input_dims_ != X.dims()) {
// Dimensions changed; we will need to re-initialize things.
VLOG(1) << "Changing the cudnn descriptor configurations.";
cudnn_input_dims_ = X.dims().vec();
setTensorDescriptor<T>(X.ndim(), order_, N, C, H, W, D, bottom_desc_);
setTensorDescriptor<T>(
Y->ndim(), order_, N, C, H_out, W_out, D_out, top_desc_);
for (int i = 0; i < kernel_.size(); ++i) {
if (pads_[i] != pads_[kernel_.size() + i]) {
CAFFE_ENFORCE(
legacy_pad_ == LegacyPadding::CAFFE_LEGACY_POOLING,
"Cudnn pooling only supports even padding on both sides, with "
"the only exception of the caffe legacy pooling case where we "
"try to preserve backward compatibility with Caffe.");
}
}
if (kernel_.size() == 2) {
CUDNN_ENFORCE(cudnnSetPooling2dDescriptor(
pooling_desc_,
mode_,
CUDNN_NOT_PROPAGATE_NAN,
kernel_h(),
kernel_w(),
pad_t(),
pad_l(),
stride_h(),
stride_w()));
} else {
CUDNN_ENFORCE(cudnnSetPoolingNdDescriptor(
pooling_desc_,
mode_,
CUDNN_NOT_PROPAGATE_NAN,
kernel_.size(),
kernel_.data(),
pads_.data(),
stride_.data()));
}
}
// Carry out the pooling computation.
const T* Xdata = X.template data<T>();
T* Ydata = Y->template mutable_data<T>();
CUDNN_ENFORCE(cudnnPoolingForward(
cudnn_wrapper_.inline_cudnn_handle(),
pooling_desc_,
cudnnTypeWrapper<T>::kOne(),
bottom_desc_,
Xdata,
cudnnTypeWrapper<T>::kZero(),
top_desc_,
Ydata));
return true;
}
bool RunOnDevice() final {
auto& X = Input(0);
auto* Y = Output(0);
if (X.IsType<float>()) {
return DoRunWithType<float, float>();
} else if (X.IsType<at::Half>()) {
return DoRunWithType<at::Half, float>();
} else {
LOG(FATAL) << "Unsupported input types";
}
return true;
}
protected:
vector<int64_t> cudnn_input_dims_;
CuDNNWrapper cudnn_wrapper_;
cudnnTensorDescriptor_t bottom_desc_;
cudnnTensorDescriptor_t top_desc_;
cudnnPoolingDescriptor_t pooling_desc_;
cudnnPoolingMode_t mode_;
private:
};
class CuDNNPoolGradientOp : public ConvPoolOpBase<CUDAContext> {
public:
CuDNNPoolGradientOp(const OperatorDef& operator_def, Workspace* ws)
: ConvPoolOpBase<CUDAContext>(operator_def, ws),
cudnn_wrapper_(&context_) {
CUDNN_ENFORCE(cudnnCreateTensorDescriptor(&bottom_desc_));
CUDNN_ENFORCE(cudnnCreateTensorDescriptor(&top_desc_));
CUDNN_ENFORCE(cudnnCreatePoolingDescriptor(&pooling_desc_));
// Figure out the pooling descriptor.
if (operator_def.type() == "MaxPoolGradient" ||
operator_def.type() == "MaxPool1DGradient" ||
operator_def.type() == "MaxPool2DGradient" ||
operator_def.type() == "MaxPool3DGradient") {
bool deterministic =
OperatorBase::GetSingleArgument<bool>("deterministic", false);
#if CUDNN_VERSION_MIN(6, 0, 0)
mode_ =
deterministic ? CUDNN_POOLING_MAX_DETERMINISTIC : CUDNN_POOLING_MAX;
#else
mode_ = CUDNN_POOLING_MAX;
#endif
} else if (
operator_def.type() == "AveragePoolGradient" ||
operator_def.type() == "AveragePool1DGradient" ||
operator_def.type() == "AveragePool2DGradient" ||
operator_def.type() == "AveragePool3DGradient") {
mode_ = CUDNN_POOLING_AVERAGE_COUNT_EXCLUDE_PADDING;
} else {
LOG(FATAL) << "Unsupported pooling method: " << operator_def.type();
}
}
~CuDNNPoolGradientOp() {
CUDNN_ENFORCE(cudnnDestroyTensorDescriptor(bottom_desc_));
CUDNN_ENFORCE(cudnnDestroyTensorDescriptor(top_desc_));
CUDNN_ENFORCE(cudnnDestroyPoolingDescriptor(pooling_desc_));
}
template <typename T, typename M>
bool DoRunWithType() {
auto& X = Input(0);
auto& Y = Input(1);
auto& dY = Input(2);
auto* dX = Output(0);
// cuDNN pooling support only 2 and 3 spatial dimensions.
CAFFE_ENFORCE(X.ndim() >= 4 && X.ndim() <= 5);
dX->ResizeLike(X);
int N = 0, C = 0, H = 0, W = 0, D = 0;
int H_out = 0, W_out = 0, D_out = 0;
switch (order_) {
case StorageOrder::NHWC:
N = X.dim32(0);
H = X.dim32(1);
W = X.ndim() > 3 ? X.dim32(2) : 1;
D = X.ndim() > 4 ? X.dim32(3) : 1;
C = X.dim32(X.ndim() - 1);
H_out = Y.dim32(1);
W_out = Y.ndim() > 3 ? Y.dim32(2) : 1;
D_out = Y.ndim() > 4 ? Y.dim32(3) : 1;
break;
case StorageOrder::NCHW:
N = X.dim32(0);
C = X.dim32(1);
H = X.dim32(2);
W = X.ndim() > 3 ? X.dim32(3) : 1;
D = X.ndim() > 4 ? X.dim32(4) : 1;
H_out = Y.dim32(2);
W_out = Y.ndim() > 3 ? Y.dim32(3) : 1;
D_out = Y.ndim() > 4 ? Y.dim32(4) : 1;
break;
default:
LOG(FATAL) << "Unknown storage order: " << order_;
}
// Fast path for global pooling, as cudnn is slow. But only
// on float, because fp16 not supported for CUB.
if (std::is_same<T, float>::value) {
if (order_ == StorageOrder::NCHW && global_pooling_) {
if (mode_ == CUDNN_POOLING_AVERAGE_COUNT_EXCLUDE_PADDING) {
global_avgpool_backward_NCHW<float>
<<<CAFFE_GET_BLOCKS(dX->size()),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
N * C,
H * W * D,
dY.data<float>(),
dX->template mutable_data<float>());
return true;
}
#if CUDNN_VERSION_MIN(6, 0, 0)
if (mode_ == CUDNN_POOLING_MAX ||
mode_ == CUDNN_POOLING_MAX_DETERMINISTIC) {
#else
if (mode_ == CUDNN_POOLING_MAX) {
#endif
global_maxpool_backward_NCHW<float>
<<<CAFFE_GET_BLOCKS(dX->size()),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
N * C,
H * W * D,
dY.data<float>(),
dX->template mutable_data<float>(),
Y.data<float>(),
X.data<float>());
return true;
}
}
}
if (kernel_.size() == 1) {
ConvPoolOpBase<CUDAContext>::ComputePads({H});
} else if (kernel_.size() == 2) {
ConvPoolOpBase<CUDAContext>::ComputePads({H, W});
} else if (kernel_.size() == 3) {
ConvPoolOpBase<CUDAContext>::ComputePads({H, W, D});
} else {
CAFFE_THROW("Unsupported kernel size :", kernel_.size());
}
if (cudnn_input_dims_ != X.dims()) {
// Dimensions changed; we will need to re-initialize things.
VLOG(1) << "Changing the cudnn descriptor configurations.";
cudnn_input_dims_ = X.dims().vec();
setTensorDescriptor<T>(X.ndim(), order_, N, C, H, W, D, bottom_desc_);
setTensorDescriptor<T>(
Y.ndim(), order_, N, C, H_out, W_out, D_out, top_desc_);
for (int i = 0; i < kernel_.size(); ++i) {
if (pads_[i] != pads_[kernel_.size() + i]) {
CAFFE_ENFORCE(
legacy_pad_ == LegacyPadding::CAFFE_LEGACY_POOLING,
"Cudnn pooling only supports even padding on both sides, with "
"the only exception of the caffe legacy pooling case where we "
"try to preserve backward compatibility with Caffe.");
}
}
if (kernel_.size() == 2) {
CUDNN_ENFORCE(cudnnSetPooling2dDescriptor(
pooling_desc_,
mode_,
CUDNN_NOT_PROPAGATE_NAN,
kernel_h(),
kernel_w(),
pad_t(),
pad_l(),
stride_h(),
stride_w()));
} else {
CUDNN_ENFORCE(cudnnSetPoolingNdDescriptor(
pooling_desc_,
mode_,
CUDNN_NOT_PROPAGATE_NAN,
kernel_.size(),
kernel_.data(),
pads_.data(),
stride_.data()));
}
}
// Carry out the pooling computation.
const T* Xdata = X.template data<T>();
const T* Ydata = Y.template data<T>();
const T* dYdata = dY.template data<T>();
T* dXdata = dX->template mutable_data<T>();
CUDNN_ENFORCE(cudnnPoolingBackward(
cudnn_wrapper_.inline_cudnn_handle(),
pooling_desc_,
cudnnTypeWrapper<T>::kOne(),
top_desc_,
Ydata,
top_desc_,
dYdata,
bottom_desc_,
Xdata,
cudnnTypeWrapper<T>::kZero(),
bottom_desc_,
dXdata));
return true;
}
bool RunOnDevice() final {
auto& X = Input(0);
auto& Y = Input(1);
auto& dY = Input(2);
auto* dX = Output(0);
dX->ResizeLike(X);
if (X.IsType<float>()) {
return DoRunWithType<float, float>();
} else if (X.IsType<at::Half>()) {
return DoRunWithType<at::Half, float>();
} else {
LOG(FATAL) << "Unsupported input types";
}
return true;
}
protected:
vector<int64_t> cudnn_input_dims_;
CuDNNWrapper cudnn_wrapper_;
cudnnTensorDescriptor_t bottom_desc_;
cudnnTensorDescriptor_t top_desc_;
cudnnPoolingDescriptor_t pooling_desc_;
cudnnPoolingMode_t mode_;
};
namespace {
REGISTER_CUDNN_OPERATOR(AveragePool, CuDNNPoolOp);
REGISTER_CUDNN_OPERATOR(AveragePoolGradient, CuDNNPoolGradientOp);
REGISTER_CUDNN_OPERATOR(AveragePool1D, CuDNNPoolOp);
REGISTER_CUDNN_OPERATOR(AveragePool1DGradient, CuDNNPoolGradientOp);
REGISTER_CUDNN_OPERATOR(AveragePool2D, CuDNNPoolOp);
REGISTER_CUDNN_OPERATOR(AveragePool2DGradient, CuDNNPoolGradientOp);
REGISTER_CUDNN_OPERATOR(AveragePool3D, CuDNNPoolOp);
REGISTER_CUDNN_OPERATOR(AveragePool3DGradient, CuDNNPoolGradientOp);
REGISTER_CUDNN_OPERATOR(MaxPool, CuDNNPoolOp);
REGISTER_CUDNN_OPERATOR(MaxPoolGradient, CuDNNPoolGradientOp);
REGISTER_CUDNN_OPERATOR(MaxPool1D, CuDNNPoolOp);
REGISTER_CUDNN_OPERATOR(MaxPool1DGradient, CuDNNPoolGradientOp);
REGISTER_CUDNN_OPERATOR(MaxPool2D, CuDNNPoolOp);
REGISTER_CUDNN_OPERATOR(MaxPool2DGradient, CuDNNPoolGradientOp);
REGISTER_CUDNN_OPERATOR(MaxPool3D, CuDNNPoolOp);
REGISTER_CUDNN_OPERATOR(MaxPool3DGradient, CuDNNPoolGradientOp);
} // namespace
} // namespace caffe2
|
e609ec8b5df63b0f752a4d544da3053e24b1a60d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "reconstruction.h"
#include "device_launch_parameters.h"
#include "hip/device_functions.h"
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/core/cuda.hpp>
#include <stdio.h>
#include <iostream>
#include <math.h>
using namespace cv;
using namespace std;
// KERNEL FUNCIONS
//----------------------
__global__ void GPU_signal_extraction(uchar* dev_data,
uchar* dev_coeff,
uchar* dev_ss_row,
uchar* dev_ss_col,
uchar* dev_pinv_im,
int im_rows,
int im_cols,
const int data_step,
const int ss_row_step,
const int ss_col_step,
const int pinv_im_step,
const int coeff_step) {
float prod;
const int idx = threadIdx.x + blockIdx.x * blockDim.x;
const int idy = threadIdx.y + blockIdx.y * blockDim.y;
int data_elem_size = sizeof(uint16_t);
int ss_elem_size = sizeof(uint16_t);
int pinv_elem_size = sizeof(float);
int coeff_elem_size = sizeof(float);
if (idy < im_rows && idx < im_cols) {
uint16_t* pix_loc_data = (uint16_t*)(dev_data + data_step*idy + data_elem_size*idx);
uint16_t* pix_loc_ss_row = (uint16_t*)(dev_ss_row + ss_row_step*idy + ss_elem_size*idx);
uint16_t* pix_loc_ss_col = (uint16_t*)(dev_ss_col + ss_col_step*idy + ss_elem_size*idx);
float* pix_loc_pinv_im = (float*)(dev_pinv_im + pinv_im_step*idy + pinv_elem_size*idx);
float* pix_loc_coeff = (float*)(dev_coeff + coeff_step * *pix_loc_ss_row + coeff_elem_size * *pix_loc_ss_col);
prod = (float)*pix_loc_data * *pix_loc_pinv_im;
atomicAdd(pix_loc_coeff, prod);
}
}
//__global__ void GPU_signal_extraction(cuda::GpuMat* dev_data,
// cuda::GpuMat* dev_coeff,
// cuda::GpuMat* dev_ss_row,
// cuda::GpuMat* dev_ss_col,
// cuda::GpuMat* dev_pinv_im,
// int im_rows,
// int im_cols) {
//
//
// float prod;
// const int idx = threadIdx.x + blockIdx.x * blockDim.x;
// const int idy = threadIdx.y + blockIdx.y * blockDim.y;
//
// int data_elem_size = 2;
// int ss_elem_size = 2;
// int pinv_elem_size = 4;
// int coeff_elem_size = 4;
//
// if (idy < im_rows && idx < im_cols) {
//
// printf("data adress on GPU = %d", dev_data->data);
// }
//
//}
//---------------------
//CLASS FUNCTIONS-------------
Reconstruction::Reconstruction(const int in_im_rows, const int in_im_cols, const int in_im_slices, float* in_pattern, const int in_nr_bases, float* in_sigmas, uchar** rawdata_ptr, vector<vector<Mat>>* in_coeffs_mats)
: im_rows(in_im_rows), im_cols(in_im_cols), im_slices(in_im_slices), nr_bases(in_nr_bases)
{
for (int i = 0; i < this->nr_bases; i++) {
this->sigmas.push_back(in_sigmas[i]);
}
this->pattern[0] = in_pattern[0];
this->pattern[1] = in_pattern[1];
this->pattern[2] = in_pattern[2];
this->pattern[3] = in_pattern[3];
this->coeffs_mats = *in_coeffs_mats;
this->raw_data_ptr = rawdata_ptr;
}
Reconstruction::Reconstruction(const int in_im_rows, const int in_im_cols, const int in_im_slices, float* in_pattern, const int in_nr_bases, float* in_sigmas, uchar** rawdata_ptr, uchar*** in_coeffs_ptrs)
:im_rows(in_im_rows), im_cols(in_im_cols), im_slices(in_im_slices), nr_bases(in_nr_bases)
{
for(int i = 0; i < this->nr_bases; i++) {
this->sigmas.push_back(in_sigmas[i]);
}
this->sigmas[0] = in_sigmas[0];
this->sigmas[1] = in_sigmas[1];
this->pattern[0] = in_pattern[0];
this->pattern[1] = in_pattern[1];
this->pattern[2] = in_pattern[2];
this->pattern[3] = in_pattern[3];
this->coeffs_ptrs = in_coeffs_ptrs;
this->raw_data_ptr = rawdata_ptr;
}
Reconstruction::~Reconstruction()
{
}
void Reconstruction::make_subsquares() {
Mat subsquares_row(im_rows, im_cols, DataType<uint16_t>::type);
Mat subsquares_col(im_rows, im_cols, DataType<uint16_t>::type);
int ss;
float pat_start_row = pattern[0];
float pat_period_rows = pattern[2];
int mem = 0;
Mat aux;
this->ss_row_start.push_back(0);
for (int i = 0; i < im_rows; i++) {
ss = (int)(i / pat_period_rows + 1.0f / 2.0f - pat_start_row / pat_period_rows); //Cast to int implies floor operation.
subsquares_row.row(i).setTo((uint16_t)ss);
if (ss != mem) {
this->ss_row_start.push_back(i);
this->ss_row_end.push_back(i - 1);
mem = ss;
}
}
this->ss_row_end.push_back(subsquares_row.rows - 1);
float pat_start_col = pattern[1];
float pat_period_cols = pattern[3];
mem = 0;
this->ss_col_start.push_back(0);
for (int j = 0; j < im_cols; j++) {
ss = (int)(j / pat_period_cols + 1.0f / 2.0f - pat_start_col / pat_period_cols);
subsquares_col.col(j).setTo((uint16_t)ss);
if (ss != mem) {
this->ss_col_start.push_back(j);
this->ss_col_end.push_back(j - 1);
mem = ss;
}
}
this->ss_col_end.push_back(subsquares_row.cols - 1);
this->grid_rows = subsquares_row.at<uint16_t>(im_rows - 1, 0) + 1;
this->grid_cols = subsquares_col.at<uint16_t>(0, im_rows - 1) + 1;
this->ss_row = subsquares_row;
this->ss_col = subsquares_col;
}
void Reconstruction::make_bases_im() {
float ss_center_row;
float ss_center_col;
Mat ss;
Mat temp_bases_im(this->im_rows, this->im_cols, CV_32F);
for (int q = 0; q < this->nr_bases; q++) {
for (int i = 0; i < this->grid_rows; i++) {
for (int j = 0; j < this->grid_cols; j++) {
ss = temp_bases_im(Range(this->ss_row_start.at(i), this->ss_row_end.at(i) + 1), Range(this->ss_col_start.at(j), this->ss_col_end.at(j) + 1));
ss_center_row = this->pattern[0] + i * this->pattern[2] - this->ss_row_start.at(i);
ss_center_col = this->pattern[1] + j * this->pattern[3] - this->ss_col_start.at(j);
for (int k = 0; k < ss.rows; k++) {
for (int l = 0; l < ss.cols; l++) {
ss.at<float>(k, l) = exp(-(pow(k - ss_center_row, 2) + pow(l - ss_center_col, 2)) / (2 * pow(this->sigmas[q], 2)));
}
}
}
}
if(q == this->nr_bases - 1)
this->bases_ims.push_back(temp_bases_im);
else
this->bases_ims.push_back(temp_bases_im.clone());
}
}
void Reconstruction::make_pinv_im() {
//Mat ss_row_debug(this->im_cols, this->im_cols, CV_16U, this->ss_row.data);
float ss_center_row;
float ss_center_col;
Mat ss;
Mat ss_vec;
Mat ss_mat_inv;
Mat ss_mat;
Mat temp_mat;
for (int q = 0; q < this->nr_bases; q++) {
temp_mat.create(this->im_rows, this->im_cols, CV_32F);
this->pinv_ims.push_back(temp_mat.clone());
}
Range row_range;
Range col_range;
for (int i = 0; i < this->grid_rows; i++) {
for (int j = 0; j < this->grid_cols; j++) {
row_range = Range(this->ss_row_start.at(i), this->ss_row_end.at(i) + 1);
col_range = Range(this->ss_col_start.at(j), this->ss_col_end.at(j) + 1);
ss_mat.create(this->nr_bases, row_range.size()*col_range.size(), CV_32F);
for (int q = 0; q < this->nr_bases; q++) {
ss = this->bases_ims[q](row_range, col_range).clone();
ss.reshape(0, 1).copyTo(ss_mat.row(q));
}
ss_mat_inv = ss_mat.inv(cv::DECOMP_SVD);
for (int q = 0; q < this->nr_bases; q++) {
ss_vec = ss_mat_inv.col(q).clone();
ss_vec.reshape(0, ss.rows).copyTo(ss);
ss.copyTo(this->pinv_ims[q](row_range, col_range));
}
}
}
}
void Reconstruction::extract_signal_GPU() {
hipEvent_t start, stop;
hipError_t cudaStatus;
hipEventCreate(&start);
hipEventCreate(&stop);
dim3 block(32, 32);
dim3 grid((this->im_cols + block.x - 1) / block.x, (this->im_rows + block.y - 1) / block.y);
cuda::GpuMat dev_ss_row;
cuda::GpuMat dev_ss_col;
cuda::GpuMat dev_pinv_im;
cuda::GpuMat dev_data;
cuda::GpuMat dev_coeff(this->grid_rows, this->grid_cols, CV_32F);
dev_ss_row.upload(this->ss_row);
dev_ss_col.upload(this->ss_col);
for (int q = 0; q < this->nr_bases; q++) {
dev_pinv_im.upload(this->pinv_ims[q]);
for (int i = 0; i < this->im_slices; i++) {
Mat rawdata(this->im_rows, this->im_cols, CV_16U, this->raw_data_ptr[i]);
Mat coeffs(this->grid_rows, this->grid_cols, CV_32F, this->coeffs_ptrs[q][i]);
dev_data.upload(rawdata);
dev_coeff.setTo(Scalar(0));
hipEventRecord(start);
GPU_signal_extraction << < grid, block >> > (dev_data.data, dev_coeff.data, dev_ss_row.data, dev_ss_col.data, dev_pinv_im.data, im_rows, im_cols, dev_data.step, dev_ss_row.step, dev_ss_col.step, dev_pinv_im.step, dev_coeff.step);
hipDeviceSynchronize();
hipEventRecord(stop);
//dev_coeff.download(this->coeffs_mats[q][i]);
dev_coeff.download(coeffs);
}
}
this->cudaStatus = hipEventElapsedTime(&this->elapsed, start, stop);
}
void Reconstruction::extract_signal_CPU() {
uint16_t ci, cj;
for (int q = 0; q < this->nr_bases; q++) {
for (int s = 0; s < this->im_slices; s++) {
Mat rawdata(this->im_rows, this->im_cols, CV_16U, this->raw_data_ptr[s]);
Mat coeffs(this->grid_rows, this->grid_cols, CV_32F, this->coeffs_ptrs[q][s]);
for (int i = 0; i < this->im_rows; i++) {
for (int j = 0; j < this->im_cols; j++) {
ci = ss_row.at<uint16_t>(i, j);
cj = ss_col.at<uint16_t>(i, j);
coeffs.at<float>(ci, cj) += this->pinv_ims[q].at<float>(i, j) * rawdata.at<uint16_t>(i, j);
}
}
}
}
} | e609ec8b5df63b0f752a4d544da3053e24b1a60d.cu | #include "reconstruction.h"
#include "device_launch_parameters.h"
#include "device_functions.h"
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/core/cuda.hpp>
#include <stdio.h>
#include <iostream>
#include <math.h>
using namespace cv;
using namespace std;
// KERNEL FUNCIONS
//----------------------
__global__ void GPU_signal_extraction(uchar* dev_data,
uchar* dev_coeff,
uchar* dev_ss_row,
uchar* dev_ss_col,
uchar* dev_pinv_im,
int im_rows,
int im_cols,
const int data_step,
const int ss_row_step,
const int ss_col_step,
const int pinv_im_step,
const int coeff_step) {
float prod;
const int idx = threadIdx.x + blockIdx.x * blockDim.x;
const int idy = threadIdx.y + blockIdx.y * blockDim.y;
int data_elem_size = sizeof(uint16_t);
int ss_elem_size = sizeof(uint16_t);
int pinv_elem_size = sizeof(float);
int coeff_elem_size = sizeof(float);
if (idy < im_rows && idx < im_cols) {
uint16_t* pix_loc_data = (uint16_t*)(dev_data + data_step*idy + data_elem_size*idx);
uint16_t* pix_loc_ss_row = (uint16_t*)(dev_ss_row + ss_row_step*idy + ss_elem_size*idx);
uint16_t* pix_loc_ss_col = (uint16_t*)(dev_ss_col + ss_col_step*idy + ss_elem_size*idx);
float* pix_loc_pinv_im = (float*)(dev_pinv_im + pinv_im_step*idy + pinv_elem_size*idx);
float* pix_loc_coeff = (float*)(dev_coeff + coeff_step * *pix_loc_ss_row + coeff_elem_size * *pix_loc_ss_col);
prod = (float)*pix_loc_data * *pix_loc_pinv_im;
atomicAdd(pix_loc_coeff, prod);
}
}
//__global__ void GPU_signal_extraction(cuda::GpuMat* dev_data,
// cuda::GpuMat* dev_coeff,
// cuda::GpuMat* dev_ss_row,
// cuda::GpuMat* dev_ss_col,
// cuda::GpuMat* dev_pinv_im,
// int im_rows,
// int im_cols) {
//
//
// float prod;
// const int idx = threadIdx.x + blockIdx.x * blockDim.x;
// const int idy = threadIdx.y + blockIdx.y * blockDim.y;
//
// int data_elem_size = 2;
// int ss_elem_size = 2;
// int pinv_elem_size = 4;
// int coeff_elem_size = 4;
//
// if (idy < im_rows && idx < im_cols) {
//
// printf("data adress on GPU = %d", dev_data->data);
// }
//
//}
//---------------------
//CLASS FUNCTIONS-------------
Reconstruction::Reconstruction(const int in_im_rows, const int in_im_cols, const int in_im_slices, float* in_pattern, const int in_nr_bases, float* in_sigmas, uchar** rawdata_ptr, vector<vector<Mat>>* in_coeffs_mats)
: im_rows(in_im_rows), im_cols(in_im_cols), im_slices(in_im_slices), nr_bases(in_nr_bases)
{
for (int i = 0; i < this->nr_bases; i++) {
this->sigmas.push_back(in_sigmas[i]);
}
this->pattern[0] = in_pattern[0];
this->pattern[1] = in_pattern[1];
this->pattern[2] = in_pattern[2];
this->pattern[3] = in_pattern[3];
this->coeffs_mats = *in_coeffs_mats;
this->raw_data_ptr = rawdata_ptr;
}
Reconstruction::Reconstruction(const int in_im_rows, const int in_im_cols, const int in_im_slices, float* in_pattern, const int in_nr_bases, float* in_sigmas, uchar** rawdata_ptr, uchar*** in_coeffs_ptrs)
:im_rows(in_im_rows), im_cols(in_im_cols), im_slices(in_im_slices), nr_bases(in_nr_bases)
{
for(int i = 0; i < this->nr_bases; i++) {
this->sigmas.push_back(in_sigmas[i]);
}
this->sigmas[0] = in_sigmas[0];
this->sigmas[1] = in_sigmas[1];
this->pattern[0] = in_pattern[0];
this->pattern[1] = in_pattern[1];
this->pattern[2] = in_pattern[2];
this->pattern[3] = in_pattern[3];
this->coeffs_ptrs = in_coeffs_ptrs;
this->raw_data_ptr = rawdata_ptr;
}
Reconstruction::~Reconstruction()
{
}
void Reconstruction::make_subsquares() {
Mat subsquares_row(im_rows, im_cols, DataType<uint16_t>::type);
Mat subsquares_col(im_rows, im_cols, DataType<uint16_t>::type);
int ss;
float pat_start_row = pattern[0];
float pat_period_rows = pattern[2];
int mem = 0;
Mat aux;
this->ss_row_start.push_back(0);
for (int i = 0; i < im_rows; i++) {
ss = (int)(i / pat_period_rows + 1.0f / 2.0f - pat_start_row / pat_period_rows); //Cast to int implies floor operation.
subsquares_row.row(i).setTo((uint16_t)ss);
if (ss != mem) {
this->ss_row_start.push_back(i);
this->ss_row_end.push_back(i - 1);
mem = ss;
}
}
this->ss_row_end.push_back(subsquares_row.rows - 1);
float pat_start_col = pattern[1];
float pat_period_cols = pattern[3];
mem = 0;
this->ss_col_start.push_back(0);
for (int j = 0; j < im_cols; j++) {
ss = (int)(j / pat_period_cols + 1.0f / 2.0f - pat_start_col / pat_period_cols);
subsquares_col.col(j).setTo((uint16_t)ss);
if (ss != mem) {
this->ss_col_start.push_back(j);
this->ss_col_end.push_back(j - 1);
mem = ss;
}
}
this->ss_col_end.push_back(subsquares_row.cols - 1);
this->grid_rows = subsquares_row.at<uint16_t>(im_rows - 1, 0) + 1;
this->grid_cols = subsquares_col.at<uint16_t>(0, im_rows - 1) + 1;
this->ss_row = subsquares_row;
this->ss_col = subsquares_col;
}
void Reconstruction::make_bases_im() {
float ss_center_row;
float ss_center_col;
Mat ss;
Mat temp_bases_im(this->im_rows, this->im_cols, CV_32F);
for (int q = 0; q < this->nr_bases; q++) {
for (int i = 0; i < this->grid_rows; i++) {
for (int j = 0; j < this->grid_cols; j++) {
ss = temp_bases_im(Range(this->ss_row_start.at(i), this->ss_row_end.at(i) + 1), Range(this->ss_col_start.at(j), this->ss_col_end.at(j) + 1));
ss_center_row = this->pattern[0] + i * this->pattern[2] - this->ss_row_start.at(i);
ss_center_col = this->pattern[1] + j * this->pattern[3] - this->ss_col_start.at(j);
for (int k = 0; k < ss.rows; k++) {
for (int l = 0; l < ss.cols; l++) {
ss.at<float>(k, l) = exp(-(pow(k - ss_center_row, 2) + pow(l - ss_center_col, 2)) / (2 * pow(this->sigmas[q], 2)));
}
}
}
}
if(q == this->nr_bases - 1)
this->bases_ims.push_back(temp_bases_im);
else
this->bases_ims.push_back(temp_bases_im.clone());
}
}
void Reconstruction::make_pinv_im() {
//Mat ss_row_debug(this->im_cols, this->im_cols, CV_16U, this->ss_row.data);
float ss_center_row;
float ss_center_col;
Mat ss;
Mat ss_vec;
Mat ss_mat_inv;
Mat ss_mat;
Mat temp_mat;
for (int q = 0; q < this->nr_bases; q++) {
temp_mat.create(this->im_rows, this->im_cols, CV_32F);
this->pinv_ims.push_back(temp_mat.clone());
}
Range row_range;
Range col_range;
for (int i = 0; i < this->grid_rows; i++) {
for (int j = 0; j < this->grid_cols; j++) {
row_range = Range(this->ss_row_start.at(i), this->ss_row_end.at(i) + 1);
col_range = Range(this->ss_col_start.at(j), this->ss_col_end.at(j) + 1);
ss_mat.create(this->nr_bases, row_range.size()*col_range.size(), CV_32F);
for (int q = 0; q < this->nr_bases; q++) {
ss = this->bases_ims[q](row_range, col_range).clone();
ss.reshape(0, 1).copyTo(ss_mat.row(q));
}
ss_mat_inv = ss_mat.inv(cv::DECOMP_SVD);
for (int q = 0; q < this->nr_bases; q++) {
ss_vec = ss_mat_inv.col(q).clone();
ss_vec.reshape(0, ss.rows).copyTo(ss);
ss.copyTo(this->pinv_ims[q](row_range, col_range));
}
}
}
}
void Reconstruction::extract_signal_GPU() {
cudaEvent_t start, stop;
cudaError_t cudaStatus;
cudaEventCreate(&start);
cudaEventCreate(&stop);
dim3 block(32, 32);
dim3 grid((this->im_cols + block.x - 1) / block.x, (this->im_rows + block.y - 1) / block.y);
cuda::GpuMat dev_ss_row;
cuda::GpuMat dev_ss_col;
cuda::GpuMat dev_pinv_im;
cuda::GpuMat dev_data;
cuda::GpuMat dev_coeff(this->grid_rows, this->grid_cols, CV_32F);
dev_ss_row.upload(this->ss_row);
dev_ss_col.upload(this->ss_col);
for (int q = 0; q < this->nr_bases; q++) {
dev_pinv_im.upload(this->pinv_ims[q]);
for (int i = 0; i < this->im_slices; i++) {
Mat rawdata(this->im_rows, this->im_cols, CV_16U, this->raw_data_ptr[i]);
Mat coeffs(this->grid_rows, this->grid_cols, CV_32F, this->coeffs_ptrs[q][i]);
dev_data.upload(rawdata);
dev_coeff.setTo(Scalar(0));
cudaEventRecord(start);
GPU_signal_extraction << < grid, block >> > (dev_data.data, dev_coeff.data, dev_ss_row.data, dev_ss_col.data, dev_pinv_im.data, im_rows, im_cols, dev_data.step, dev_ss_row.step, dev_ss_col.step, dev_pinv_im.step, dev_coeff.step);
cudaDeviceSynchronize();
cudaEventRecord(stop);
//dev_coeff.download(this->coeffs_mats[q][i]);
dev_coeff.download(coeffs);
}
}
this->cudaStatus = cudaEventElapsedTime(&this->elapsed, start, stop);
}
void Reconstruction::extract_signal_CPU() {
uint16_t ci, cj;
for (int q = 0; q < this->nr_bases; q++) {
for (int s = 0; s < this->im_slices; s++) {
Mat rawdata(this->im_rows, this->im_cols, CV_16U, this->raw_data_ptr[s]);
Mat coeffs(this->grid_rows, this->grid_cols, CV_32F, this->coeffs_ptrs[q][s]);
for (int i = 0; i < this->im_rows; i++) {
for (int j = 0; j < this->im_cols; j++) {
ci = ss_row.at<uint16_t>(i, j);
cj = ss_col.at<uint16_t>(i, j);
coeffs.at<float>(ci, cj) += this->pinv_ims[q].at<float>(i, j) * rawdata.at<uint16_t>(i, j);
}
}
}
}
} |
6cfae357a411c26d10a03c88377a87e37a8cbad5.hip | // !!! This is a file automatically generated by hipify!!!
#include <cassert>
#include <cfloat>
#include <hip/hip_runtime_api.h>
#include <hip/hip_runtime.h>
#include <iostream>
#include <stdio.h>
#include <list>
#include <map>
#include <math.h>
#include <stdlib.h>
#include <vector>
#include <set>
#include <algorithm>
#include <iterator>
#include <fstream>
#include "../include/common.h"
#define K 1
using namespace std;
#define mm_BLOCK_SIZE 32
//#define mm_SUPER_BLOCKS_PER_SM 4
//int mm_SUPER_BLOCKS_PER_SM = 4;
#define iSizeMultiple 4 //must be multipes of 15
#define WA (4 * mm_BLOCK_SIZE) // Matrix A width
#define HA (4 * mm_BLOCK_SIZE) // Matrix A height
//#define WB (mm_SUPER_BLOCKS_PER_SM * mm_BLOCK_SIZE) // Matrix B width
#define WB (60 * mm_BLOCK_SIZE) // Matrix B width
#define HB WA // Matrix B height
#define WC WB // Matrix C width
#define HC HA // Matrix C height
#define mm_GRID_X (WC*iSizeMultiple/mm_BLOCK_SIZE)
#define mm_GRID_Y (HC*iSizeMultiple/mm_BLOCK_SIZE)
#define mm_NBLOCKS (mm_GRID_X*mm_GRID_Y)
#define AS(i, j) As[i][j]
#define BS(i, j) Bs[i][j]
texture<float,2,hipReadModeElementType> tex_A;
texture<float,2,hipReadModeElementType> tex_B;
void randomInit(float* data, int size)
{
for (int i = 0; i < size; ++i)
data[i] = rand() / (float)RAND_MAX;
}
void
computeGold(float* C, const float* A, const float* B, unsigned int hA, unsigned int wA, unsigned int wB)
{
for (unsigned int i = 0; i < hA; ++i)
for (unsigned int j = 0; j < wB; ++j) {
double sum = 0;
for (unsigned int k = 0; k < wA; ++k) {
double a = A[i * wA + k];
double b = B[k * wB + j];
sum += a * b;
}
C[i * wB + j] = (float)sum;
}
}
__global__ void
mm_kernel(float *B, float* C, int wA, int wB)
{
// Block index
int bx = blockIdx.x;
int by = blockIdx.y;
// Thread index
int tx = threadIdx.x;
int ty = threadIdx.y;
// Index of the first sub-matrix of A processed by the block
int aBegin = wA * mm_BLOCK_SIZE * by;
// Index of the last sub-matrix of A processed by the block
int aEnd = aBegin + wA - 1;
// Step size used to iterate through the sub-matrices of A
int aStep = mm_BLOCK_SIZE;
// Index of the first sub-matrix of B processed by the block
int bBegin = mm_BLOCK_SIZE * bx;
// Step size used to iterate through the sub-matrices of B
int bStep = mm_BLOCK_SIZE * wB;
// Csub is used to store the element of the block sub-matrix
// that is computed by the thread
float Csub = 0;
// Loop over all the sub-matrices of A and B
// required to compute the block sub-matrix
for (int a = aBegin, b = bBegin;
a <= aEnd;
a += aStep, b += bStep) {
// Declaration of the shared memory array As used to
// store the sub-matrix of A
__shared__ float As[mm_BLOCK_SIZE][mm_BLOCK_SIZE];
// Declaration of the shared memory array Bs used to
// store the sub-matrix of B
__shared__ float Bs[mm_BLOCK_SIZE][mm_BLOCK_SIZE];
// Load the matrices from device memory
// to shared memory; each thread loads
// one element of each matrix
AS(ty, tx) = tex2D(tex_A,(a+wA*ty+tx)%wA,(a+wA*ty+tx)/wA);//A[a + wA * ty + tx];
BS(ty, tx) = B[b + wB * ty + tx];
// Synchronize to make sure the matrices are loaded
__syncthreads();
// Multiply the two matrices together;
// each thread computes one element
// of the block sub-matrix
#pragma unroll
for (int k = 0; k < mm_BLOCK_SIZE; ++k)
Csub += AS(ty, k) * BS(k, tx);
// Synchronize to make sure that the preceding
// computation is done before loading two new
// sub-matrices of A and B in the next iteration
__syncthreads();
}
// Write the block sub-matrix to device memory;
// each thread writes one element
int c = wB * mm_BLOCK_SIZE * by + mm_BLOCK_SIZE * bx;
C[c + wB * ty + tx] = Csub;
//if (threadIdx.x==0&&threadIdx.y==0) atomicAdd(d_flag,1);
}
int main(int argc, char **argv) {
// hipSetDevice(1);
srand(2013);
unsigned int uiWA, uiHA, uiWB, uiHB, uiWC, uiHC;
uiWA = WA * iSizeMultiple;
uiHA = HA * iSizeMultiple;
uiWB = WB * iSizeMultiple;
uiHB = HB * iSizeMultiple;
uiWC = WC * iSizeMultiple;
uiHC = HC * iSizeMultiple;
// allocate host memory for matrices A and B
unsigned int size_A = uiWA * uiHA;
unsigned int mem_size_A = sizeof(float) * size_A;
float* h_A = (float*)malloc(mem_size_A);
unsigned int size_B = uiWB * uiHB;
unsigned int mem_size_B = sizeof(float) * size_B;
float* h_B = (float*)malloc(mem_size_B);
float* d_B;
hipMalloc((void **)&d_B,mem_size_B);
// hipMemcpy()
// initialize host memory
randomInit(h_A, size_A);
randomInit(h_B, size_B);
// allocate device memory
float* d_A, *d_C;
unsigned int size_C = uiWC * uiHC;
unsigned int mem_size_C = sizeof(float) * size_C;
// allocate host memory for the result
float* h_C = (float*) malloc(mem_size_C);
float* h_CUBLAS = (float*) malloc(mem_size_C);
checkCudaErrors(hipMalloc((void**) &d_A, mem_size_A));
// checkCudaErrors(hipMalloc((void**) &d_B, mem_size_B));
hipChannelFormatDesc channelDescA = hipCreateChannelDesc<float>();
hipChannelFormatDesc channelDescB = hipCreateChannelDesc<float>();
hipArray* A_Array, *B_Array;
hipMallocArray(&A_Array, &channelDescA, uiWA, uiHA);
hipMallocArray(&B_Array, &channelDescB, uiWB, uiHB);
// Copy to device memory some data located at address h_data
// in host memory
hipMemcpyToArray(A_Array, 0, 0, h_A, uiWA * uiHA * sizeof(float),
hipMemcpyHostToDevice);
hipMemcpyToArray(B_Array, 0, 0, h_B, uiWB * uiHB * sizeof(float),
hipMemcpyHostToDevice);
// Set texture reference parameters
tex_A.addressMode[0] = hipAddressModeWrap;
tex_A.addressMode[1] = hipAddressModeWrap;
tex_A.filterMode = hipFilterModePoint;
tex_B.addressMode[0] = hipAddressModeWrap;
tex_B.addressMode[1] = hipAddressModeWrap;
tex_B.filterMode = hipFilterModePoint;
// Bind the array to the texture reference
hipBindTextureToArray(tex_A, A_Array, channelDescA);
hipBindTextureToArray(tex_B, B_Array, channelDescB);
// copy host memory to device
//checkCudaErrors(hipMemcpy(d_A, h_A, mem_size_A, hipMemcpyHostToDevice) );
//checkCudaErrors(hipMemcpy(d_B, h_B, mem_size_B, hipMemcpyHostToDevice) );
checkCudaErrors(hipMalloc((void**) &d_C, mem_size_C));
hipMemcpy(d_B, h_B, mem_size_B, hipMemcpyHostToDevice);
hipEvent_t kernel_start, kernel_stop;
hipEventCreate(&kernel_start);
hipEventCreate(&kernel_stop);
float kernel_time = 0.0f;
hipEventRecord(kernel_start, 0);
// setup execution parameters
dim3 mm_grid(mm_GRID_X, mm_GRID_Y);
dim3 mm_block(mm_BLOCK_SIZE, mm_BLOCK_SIZE);
// int mm_grid=mm_GRID_X*mm_GRID_Y;
hipLaunchKernelGGL(( mm_kernel), dim3(mm_grid), dim3(mm_block), 0, 0, d_B,d_C, uiWA, uiWB);
hipDeviceSynchronize();
hipEventRecord(kernel_stop, 0);
hipEventSynchronize(kernel_stop);
// get elapsed time
kernel_time = 0.0f;
hipEventElapsedTime(&kernel_time, kernel_start, kernel_stop);
kernel_time *= 1.e-3; // Convert to seconds
cout << "kernel exe time: " << kernel_time << endl;
// copy result from device to host
checkCudaErrors(hipMemcpy(h_C, d_C, mem_size_C, hipMemcpyDeviceToHost) );
// compute reference solution
float* reference = (float*)malloc(mem_size_C);
computeGold(reference, h_A, h_B, uiHA, uiWA, uiWB);
// check result (matrixMul)
bool resCUDA = sdkCompareL2fe(reference, h_C, size_C, 1.0e-6f);
printf("CUDA matrixMul compares %s\n\n", (true == resCUDA) ? "passed" : "FAIL");
// ofstream f1("mm_correct.txt");
// for(int i=0; i<size_C; ++i)
// f1 << reference[i] << endl;
// f1.close();
//
// ofstream f2("mm_gpu.txt");
// for(int i=0; i<size_C; ++i)
// f2 << h_C[i] << endl;
// f2.close();
// clean up memory
free(h_A);
free(h_B);
free(h_C);
free(reference);
checkCudaErrors(hipFree(d_A));
checkCudaErrors(hipFree(d_B));
checkCudaErrors(hipFree(d_C));
return 0;
}
| 6cfae357a411c26d10a03c88377a87e37a8cbad5.cu |
#include <cassert>
#include <cfloat>
#include <cuda_runtime_api.h>
#include <cuda.h>
#include <iostream>
#include <stdio.h>
#include <list>
#include <map>
#include <math.h>
#include <stdlib.h>
#include <vector>
#include <set>
#include <algorithm>
#include <iterator>
#include <fstream>
#include "../include/common.h"
#define K 1
using namespace std;
#define mm_BLOCK_SIZE 32
//#define mm_SUPER_BLOCKS_PER_SM 4
//int mm_SUPER_BLOCKS_PER_SM = 4;
#define iSizeMultiple 4 //must be multipes of 15
#define WA (4 * mm_BLOCK_SIZE) // Matrix A width
#define HA (4 * mm_BLOCK_SIZE) // Matrix A height
//#define WB (mm_SUPER_BLOCKS_PER_SM * mm_BLOCK_SIZE) // Matrix B width
#define WB (60 * mm_BLOCK_SIZE) // Matrix B width
#define HB WA // Matrix B height
#define WC WB // Matrix C width
#define HC HA // Matrix C height
#define mm_GRID_X (WC*iSizeMultiple/mm_BLOCK_SIZE)
#define mm_GRID_Y (HC*iSizeMultiple/mm_BLOCK_SIZE)
#define mm_NBLOCKS (mm_GRID_X*mm_GRID_Y)
#define AS(i, j) As[i][j]
#define BS(i, j) Bs[i][j]
texture<float,2,cudaReadModeElementType> tex_A;
texture<float,2,cudaReadModeElementType> tex_B;
void randomInit(float* data, int size)
{
for (int i = 0; i < size; ++i)
data[i] = rand() / (float)RAND_MAX;
}
void
computeGold(float* C, const float* A, const float* B, unsigned int hA, unsigned int wA, unsigned int wB)
{
for (unsigned int i = 0; i < hA; ++i)
for (unsigned int j = 0; j < wB; ++j) {
double sum = 0;
for (unsigned int k = 0; k < wA; ++k) {
double a = A[i * wA + k];
double b = B[k * wB + j];
sum += a * b;
}
C[i * wB + j] = (float)sum;
}
}
__global__ void
mm_kernel(float *B, float* C, int wA, int wB)
{
// Block index
int bx = blockIdx.x;
int by = blockIdx.y;
// Thread index
int tx = threadIdx.x;
int ty = threadIdx.y;
// Index of the first sub-matrix of A processed by the block
int aBegin = wA * mm_BLOCK_SIZE * by;
// Index of the last sub-matrix of A processed by the block
int aEnd = aBegin + wA - 1;
// Step size used to iterate through the sub-matrices of A
int aStep = mm_BLOCK_SIZE;
// Index of the first sub-matrix of B processed by the block
int bBegin = mm_BLOCK_SIZE * bx;
// Step size used to iterate through the sub-matrices of B
int bStep = mm_BLOCK_SIZE * wB;
// Csub is used to store the element of the block sub-matrix
// that is computed by the thread
float Csub = 0;
// Loop over all the sub-matrices of A and B
// required to compute the block sub-matrix
for (int a = aBegin, b = bBegin;
a <= aEnd;
a += aStep, b += bStep) {
// Declaration of the shared memory array As used to
// store the sub-matrix of A
__shared__ float As[mm_BLOCK_SIZE][mm_BLOCK_SIZE];
// Declaration of the shared memory array Bs used to
// store the sub-matrix of B
__shared__ float Bs[mm_BLOCK_SIZE][mm_BLOCK_SIZE];
// Load the matrices from device memory
// to shared memory; each thread loads
// one element of each matrix
AS(ty, tx) = tex2D(tex_A,(a+wA*ty+tx)%wA,(a+wA*ty+tx)/wA);//A[a + wA * ty + tx];
BS(ty, tx) = B[b + wB * ty + tx];
// Synchronize to make sure the matrices are loaded
__syncthreads();
// Multiply the two matrices together;
// each thread computes one element
// of the block sub-matrix
#pragma unroll
for (int k = 0; k < mm_BLOCK_SIZE; ++k)
Csub += AS(ty, k) * BS(k, tx);
// Synchronize to make sure that the preceding
// computation is done before loading two new
// sub-matrices of A and B in the next iteration
__syncthreads();
}
// Write the block sub-matrix to device memory;
// each thread writes one element
int c = wB * mm_BLOCK_SIZE * by + mm_BLOCK_SIZE * bx;
C[c + wB * ty + tx] = Csub;
//if (threadIdx.x==0&&threadIdx.y==0) atomicAdd(d_flag,1);
}
int main(int argc, char **argv) {
// cudaSetDevice(1);
srand(2013);
unsigned int uiWA, uiHA, uiWB, uiHB, uiWC, uiHC;
uiWA = WA * iSizeMultiple;
uiHA = HA * iSizeMultiple;
uiWB = WB * iSizeMultiple;
uiHB = HB * iSizeMultiple;
uiWC = WC * iSizeMultiple;
uiHC = HC * iSizeMultiple;
// allocate host memory for matrices A and B
unsigned int size_A = uiWA * uiHA;
unsigned int mem_size_A = sizeof(float) * size_A;
float* h_A = (float*)malloc(mem_size_A);
unsigned int size_B = uiWB * uiHB;
unsigned int mem_size_B = sizeof(float) * size_B;
float* h_B = (float*)malloc(mem_size_B);
float* d_B;
cudaMalloc((void **)&d_B,mem_size_B);
// cudaMemcpy()
// initialize host memory
randomInit(h_A, size_A);
randomInit(h_B, size_B);
// allocate device memory
float* d_A, *d_C;
unsigned int size_C = uiWC * uiHC;
unsigned int mem_size_C = sizeof(float) * size_C;
// allocate host memory for the result
float* h_C = (float*) malloc(mem_size_C);
float* h_CUBLAS = (float*) malloc(mem_size_C);
checkCudaErrors(cudaMalloc((void**) &d_A, mem_size_A));
// checkCudaErrors(cudaMalloc((void**) &d_B, mem_size_B));
cudaChannelFormatDesc channelDescA = cudaCreateChannelDesc<float>();
cudaChannelFormatDesc channelDescB = cudaCreateChannelDesc<float>();
cudaArray* A_Array, *B_Array;
cudaMallocArray(&A_Array, &channelDescA, uiWA, uiHA);
cudaMallocArray(&B_Array, &channelDescB, uiWB, uiHB);
// Copy to device memory some data located at address h_data
// in host memory
cudaMemcpyToArray(A_Array, 0, 0, h_A, uiWA * uiHA * sizeof(float),
cudaMemcpyHostToDevice);
cudaMemcpyToArray(B_Array, 0, 0, h_B, uiWB * uiHB * sizeof(float),
cudaMemcpyHostToDevice);
// Set texture reference parameters
tex_A.addressMode[0] = cudaAddressModeWrap;
tex_A.addressMode[1] = cudaAddressModeWrap;
tex_A.filterMode = cudaFilterModePoint;
tex_B.addressMode[0] = cudaAddressModeWrap;
tex_B.addressMode[1] = cudaAddressModeWrap;
tex_B.filterMode = cudaFilterModePoint;
// Bind the array to the texture reference
cudaBindTextureToArray(tex_A, A_Array, channelDescA);
cudaBindTextureToArray(tex_B, B_Array, channelDescB);
// copy host memory to device
//checkCudaErrors(cudaMemcpy(d_A, h_A, mem_size_A, cudaMemcpyHostToDevice) );
//checkCudaErrors(cudaMemcpy(d_B, h_B, mem_size_B, cudaMemcpyHostToDevice) );
checkCudaErrors(cudaMalloc((void**) &d_C, mem_size_C));
cudaMemcpy(d_B, h_B, mem_size_B, cudaMemcpyHostToDevice);
cudaEvent_t kernel_start, kernel_stop;
cudaEventCreate(&kernel_start);
cudaEventCreate(&kernel_stop);
float kernel_time = 0.0f;
cudaEventRecord(kernel_start, 0);
// setup execution parameters
dim3 mm_grid(mm_GRID_X, mm_GRID_Y);
dim3 mm_block(mm_BLOCK_SIZE, mm_BLOCK_SIZE);
// int mm_grid=mm_GRID_X*mm_GRID_Y;
mm_kernel<<< mm_grid, mm_block>>>(d_B,d_C, uiWA, uiWB);
cudaDeviceSynchronize();
cudaEventRecord(kernel_stop, 0);
cudaEventSynchronize(kernel_stop);
// get elapsed time
kernel_time = 0.0f;
cudaEventElapsedTime(&kernel_time, kernel_start, kernel_stop);
kernel_time *= 1.e-3; // Convert to seconds
cout << "kernel exe time: " << kernel_time << endl;
// copy result from device to host
checkCudaErrors(cudaMemcpy(h_C, d_C, mem_size_C, cudaMemcpyDeviceToHost) );
// compute reference solution
float* reference = (float*)malloc(mem_size_C);
computeGold(reference, h_A, h_B, uiHA, uiWA, uiWB);
// check result (matrixMul)
bool resCUDA = sdkCompareL2fe(reference, h_C, size_C, 1.0e-6f);
printf("CUDA matrixMul compares %s\n\n", (true == resCUDA) ? "passed" : "FAIL");
// ofstream f1("mm_correct.txt");
// for(int i=0; i<size_C; ++i)
// f1 << reference[i] << endl;
// f1.close();
//
// ofstream f2("mm_gpu.txt");
// for(int i=0; i<size_C; ++i)
// f2 << h_C[i] << endl;
// f2.close();
// clean up memory
free(h_A);
free(h_B);
free(h_C);
free(reference);
checkCudaErrors(cudaFree(d_A));
checkCudaErrors(cudaFree(d_B));
checkCudaErrors(cudaFree(d_C));
return 0;
}
|
f45acb301a81de224d3521390b5363acff5890e4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2018, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <gtest/gtest.h>
#include "linalg/map_then_reduce.h"
#include "random/rng.h"
#include "test_utils.h"
namespace MLCommon {
namespace LinAlg {
template <typename Type, typename MapOp>
__global__ void naiveMapReduceKernel(Type *out, const Type *in, size_t len,
MapOp map) {
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < len) {
myAtomicAdd(out, map(in[idx]));
}
}
template <typename Type, typename MapOp>
void naiveMapReduce(Type *out, const Type *in, size_t len, MapOp map, hipStream_t stream) {
static const int TPB = 64;
int nblks = ceildiv(len, (size_t)TPB);
hipLaunchKernelGGL(( naiveMapReduceKernel<Type, MapOp>), dim3(nblks), dim3(TPB), 0, stream, out, in, len, map);
CUDA_CHECK(hipPeekAtLastError());
}
template <typename T>
struct MapReduceInputs {
T tolerance;
size_t len;
unsigned long long int seed;
};
template <typename T>
::std::ostream &operator<<(::std::ostream &os, const MapReduceInputs<T> &dims) {
return os;
}
// Or else, we get the following compilation error
// for an extended __device__ lambda cannot have private or protected access
// within its class
template <typename T>
void mapReduceLaunch(T *out_ref, T *out, const T *in, size_t len, hipStream_t stream) {
auto op = [] __device__(T in) { return in; };
naiveMapReduce(out_ref, in, len, op, stream);
mapThenSumReduce(out, len, op, 0, in);
}
template <typename T>
class MapReduceTest : public ::testing::TestWithParam<MapReduceInputs<T>> {
protected:
void SetUp() override {
params = ::testing::TestWithParam<MapReduceInputs<T>>::GetParam();
Random::Rng r(params.seed);
auto len = params.len;
hipStream_t stream;
CUDA_CHECK(hipStreamCreate(&stream));
allocate(in, len);
allocate(out_ref, len);
allocate(out, len);
r.uniform(in, len, T(-1.0), T(1.0), stream);
mapReduceLaunch(out_ref, out, in, len, stream);
CUDA_CHECK(hipStreamDestroy(stream));
}
void TearDown() override {
CUDA_CHECK(hipFree(in));
CUDA_CHECK(hipFree(out_ref));
CUDA_CHECK(hipFree(out));
}
protected:
MapReduceInputs<T> params;
T *in, *out_ref, *out;
};
const std::vector<MapReduceInputs<float>> inputsf = {
{0.001f, 1024 * 1024, 1234ULL}};
typedef MapReduceTest<float> MapReduceTestF;
TEST_P(MapReduceTestF, Result) {
ASSERT_TRUE(devArrMatch(out_ref, out, params.len,
CompareApprox<float>(params.tolerance)));
}
INSTANTIATE_TEST_CASE_P(MapReduceTests, MapReduceTestF,
::testing::ValuesIn(inputsf));
const std::vector<MapReduceInputs<double>> inputsd = {
{0.000001, 1024 * 1024, 1234ULL}};
typedef MapReduceTest<double> MapReduceTestD;
TEST_P(MapReduceTestD, Result) {
ASSERT_TRUE(devArrMatch(out_ref, out, params.len,
CompareApprox<double>(params.tolerance)));
}
INSTANTIATE_TEST_CASE_P(MapReduceTests, MapReduceTestD,
::testing::ValuesIn(inputsd));
} // end namespace LinAlg
} // end namespace MLCommon
| f45acb301a81de224d3521390b5363acff5890e4.cu | /*
* Copyright (c) 2018, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <gtest/gtest.h>
#include "linalg/map_then_reduce.h"
#include "random/rng.h"
#include "test_utils.h"
namespace MLCommon {
namespace LinAlg {
template <typename Type, typename MapOp>
__global__ void naiveMapReduceKernel(Type *out, const Type *in, size_t len,
MapOp map) {
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < len) {
myAtomicAdd(out, map(in[idx]));
}
}
template <typename Type, typename MapOp>
void naiveMapReduce(Type *out, const Type *in, size_t len, MapOp map, cudaStream_t stream) {
static const int TPB = 64;
int nblks = ceildiv(len, (size_t)TPB);
naiveMapReduceKernel<Type, MapOp><<<nblks, TPB, 0, stream>>>(out, in, len, map);
CUDA_CHECK(cudaPeekAtLastError());
}
template <typename T>
struct MapReduceInputs {
T tolerance;
size_t len;
unsigned long long int seed;
};
template <typename T>
::std::ostream &operator<<(::std::ostream &os, const MapReduceInputs<T> &dims) {
return os;
}
// Or else, we get the following compilation error
// for an extended __device__ lambda cannot have private or protected access
// within its class
template <typename T>
void mapReduceLaunch(T *out_ref, T *out, const T *in, size_t len, cudaStream_t stream) {
auto op = [] __device__(T in) { return in; };
naiveMapReduce(out_ref, in, len, op, stream);
mapThenSumReduce(out, len, op, 0, in);
}
template <typename T>
class MapReduceTest : public ::testing::TestWithParam<MapReduceInputs<T>> {
protected:
void SetUp() override {
params = ::testing::TestWithParam<MapReduceInputs<T>>::GetParam();
Random::Rng r(params.seed);
auto len = params.len;
cudaStream_t stream;
CUDA_CHECK(cudaStreamCreate(&stream));
allocate(in, len);
allocate(out_ref, len);
allocate(out, len);
r.uniform(in, len, T(-1.0), T(1.0), stream);
mapReduceLaunch(out_ref, out, in, len, stream);
CUDA_CHECK(cudaStreamDestroy(stream));
}
void TearDown() override {
CUDA_CHECK(cudaFree(in));
CUDA_CHECK(cudaFree(out_ref));
CUDA_CHECK(cudaFree(out));
}
protected:
MapReduceInputs<T> params;
T *in, *out_ref, *out;
};
const std::vector<MapReduceInputs<float>> inputsf = {
{0.001f, 1024 * 1024, 1234ULL}};
typedef MapReduceTest<float> MapReduceTestF;
TEST_P(MapReduceTestF, Result) {
ASSERT_TRUE(devArrMatch(out_ref, out, params.len,
CompareApprox<float>(params.tolerance)));
}
INSTANTIATE_TEST_CASE_P(MapReduceTests, MapReduceTestF,
::testing::ValuesIn(inputsf));
const std::vector<MapReduceInputs<double>> inputsd = {
{0.000001, 1024 * 1024, 1234ULL}};
typedef MapReduceTest<double> MapReduceTestD;
TEST_P(MapReduceTestD, Result) {
ASSERT_TRUE(devArrMatch(out_ref, out, params.len,
CompareApprox<double>(params.tolerance)));
}
INSTANTIATE_TEST_CASE_P(MapReduceTests, MapReduceTestD,
::testing::ValuesIn(inputsd));
} // end namespace LinAlg
} // end namespace MLCommon
|
6c89f0da32d208c7ec0ed2ca8ab6d82d5cdf2022.hip | // !!! This is a file automatically generated by hipify!!!
#include <ATen/Dispatch.h>
#include <ATen/native/BinaryOps.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/hip/Loops.cuh>
// NOTE: CUDA on Windows requires that the enclosing function
// of a __device__ lambda not have internal linkage.
namespace at { namespace native {
void gt_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_ALL_TYPES_AND2(kHalf, kBool, iter.common_dtype(), "gt_cuda", [&]() {
gpu_kernel_with_scalars(iter, []GPU_LAMBDA(scalar_t a, scalar_t b) -> bool {
return a > b;
});
});
}
REGISTER_DISPATCH(gt_stub, >_kernel_cuda);
}} // namespace at::native
| 6c89f0da32d208c7ec0ed2ca8ab6d82d5cdf2022.cu | #include <ATen/Dispatch.h>
#include <ATen/native/BinaryOps.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/cuda/Loops.cuh>
// NOTE: CUDA on Windows requires that the enclosing function
// of a __device__ lambda not have internal linkage.
namespace at { namespace native {
void gt_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_ALL_TYPES_AND2(kHalf, kBool, iter.common_dtype(), "gt_cuda", [&]() {
gpu_kernel_with_scalars(iter, []GPU_LAMBDA(scalar_t a, scalar_t b) -> bool {
return a > b;
});
});
}
REGISTER_DISPATCH(gt_stub, >_kernel_cuda);
}} // namespace at::native
|
aa1d907f6ff75c17ab133d885bf56c690d0d81ad.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "gtest/gtest.h"
#include "high_res_clock.h"
#include "hip/hip_runtime_api.h"
#include <cugraph.h>
#include <omp.h>
#include "test_utils.h"
#include "snmg_test_utils.h"
struct MGcoo2csr_Usecase {
std::string matrix_file;
MGcoo2csr_Usecase(const std::string& a) {
// assume relative paths are relative to RAPIDS_DATASET_ROOT_DIR
// if RAPIDS_DATASET_ROOT_DIR not set, default to "/datasets"
const std::string& rapidsDatasetRootDir = get_rapids_dataset_root_dir();
if ((a != "") && (a[0] != '/')) {
matrix_file = rapidsDatasetRootDir + "/" + a;
} else {
matrix_file = a;
}
}
MGcoo2csr_Usecase& operator=(const MGcoo2csr_Usecase& rhs) {
matrix_file = rhs.matrix_file;
return *this;
}
};
class Tests_MGcoo2csr: public ::testing::TestWithParam<MGcoo2csr_Usecase> {
public:
Tests_MGcoo2csr() {
}
static void SetupTestCase() {
}
static void TearDownTestCase() {
}
virtual void SetUp() {
}
virtual void TearDown() {
}
static std::vector<double> mgspmv_time;
template<typename idx_t, typename val_t>
void run_current_test(const MGcoo2csr_Usecase& param) {
const ::testing::TestInfo* const test_info =
::testing::UnitTest::GetInstance()->current_test_info();
std::stringstream ss;
std::string test_id = std::string(test_info->test_case_name()) + std::string(".")
+ std::string(test_info->name()) + std::string("_") + getFileName(param.matrix_file)
+ std::string("_") + ss.str().c_str();
std::cout << test_id << "\n";
int m, k, nnz, n_gpus;
MM_typecode mc;
gdf_error status;
double t;
FILE* fpin = fopen(param.matrix_file.c_str(), "r");
if (!fpin) {
std::cout << "Could not open file: " << param.matrix_file << "\n";
FAIL();
}
ASSERT_EQ(mm_properties<int>(fpin, 1, &mc, &m, &k, &nnz),0)<< "could not read Matrix Market file properties"<< "\n";
ASSERT_TRUE(mm_is_matrix(mc));
ASSERT_TRUE(mm_is_coordinate(mc));
ASSERT_FALSE(mm_is_complex(mc));
ASSERT_FALSE(mm_is_skew(mc));
// Allocate memory on host
std::vector<idx_t> cooRowInd(nnz), cooColInd(nnz), csrColInd(nnz), csrRowPtr(m + 1);
std::vector<idx_t> degree_h(m, 0.0), degree_ref(m, 0.0);
std::vector<val_t> csrVal(nnz, 0.0);
// Read
ASSERT_EQ( (mm_to_coo<int,int>(fpin, 1, nnz, &cooRowInd[0], &cooColInd[0], NULL, NULL)) , 0)<< "could not read matrix data"<< "\n";
ASSERT_EQ(fclose(fpin), 0);
//ASSERT_EQ( (coo_to_csr<int,val_t> (m, m, nnz, &cooRowInd[0], &cooColInd[0], NULL, NULL, &csrRowPtr[0], NULL, NULL, NULL)), 0) << "could not covert COO to CSR "<< "\n";
std::vector<idx_t> cooRowInd_tmp(cooRowInd);
std::vector<idx_t> cooColInd_tmp(cooColInd);
coo2csr(cooRowInd_tmp, cooColInd_tmp, csrRowPtr, csrColInd);
CUDA_RT_CALL(hipGetDeviceCount(&n_gpus));
std::vector<size_t> v_loc(n_gpus), e_loc(n_gpus), part_offset(n_gpus + 1), part_offset_r(n_gpus
+ 1);
void* comm1;
if (nnz < 1200000000) {
#pragma omp parallel num_threads(1)
{
//omp_set_num_threads(n_gpus);
auto i = omp_get_thread_num();
auto p = omp_get_num_threads();
CUDA_RT_CALL(hipSetDevice(i));
#ifdef SNMG_VERBOSE
#pragma omp master
{
std::cout << "Number of GPUs : "<< n_gpus <<std::endl;
std::cout << "Number of threads : "<< p <<std::endl;
}
#endif
gdf_column *csr_off = new gdf_column;
gdf_column *csr_ind = new gdf_column;
gdf_column *csr_val = new gdf_column;
gdf_column *col_off = new gdf_column;
gdf_column *col_ind = new gdf_column;
gdf_column *col_val = new gdf_column;
gdf_column *coo_row = new gdf_column;
gdf_column *coo_col = new gdf_column;
gdf_column *coo_val = new gdf_column;
#pragma omp barrier
//load a chunk of the graph on each GPU
load_csr_loc(csrRowPtr, csrColInd, csrVal,
v_loc,
e_loc, part_offset,
col_off,
col_ind, col_val);
//load a chunk of the graph on each GPU COO
load_coo_loc(cooRowInd, cooColInd, csrVal, coo_row, coo_col, coo_val);
t = omp_get_wtime();
status = gdf_snmg_coo2csr(&part_offset_r[0],
false,
&comm1,
coo_row,
coo_col,
coo_val,
csr_off,
csr_ind,
csr_val);
if (status != 0) {
std::cout << "Call to gdf_snmg_coo2csr failed: " << gdf_error_get_name(status) << "\n";
}
EXPECT_EQ(status, 0);
#pragma omp master
{
std::cout << "GPU time: " << omp_get_wtime() - t << "\n";
}
// Compare the results with those generated on the host
if (status == 0) {
EXPECT_EQ(part_offset[0], part_offset_r[0]);
EXPECT_EQ(part_offset[1], part_offset_r[1]);
EXPECT_TRUE(gdf_csr_equal<idx_t>(csr_off, csr_ind, col_off, col_ind));
}
gdf_col_delete(col_off);
gdf_col_delete(col_ind);
gdf_col_delete(col_val);
gdf_col_delete(csr_off);
gdf_col_delete(csr_ind);
gdf_col_delete(csr_val);
gdf_col_delete(coo_row);
gdf_col_delete(coo_col);
gdf_col_delete(coo_val);
}
}
if (n_gpus > 1)
{
// Only using the 4 fully connected GPUs on DGX1
if (n_gpus == 8)
n_gpus = 4;
#pragma omp parallel num_threads(n_gpus)
{
//omp_set_num_threads(n_gpus);
auto i = omp_get_thread_num();
auto p = omp_get_num_threads();
CUDA_RT_CALL(hipSetDevice(i));
#ifdef SNMG_VERBOSE
#pragma omp master
{
std::cout << "Number of GPUs : "<< n_gpus <<std::endl;
std::cout << "Number of threads : "<< p <<std::endl;
}
#endif
gdf_column *csr_off = new gdf_column;
gdf_column *csr_ind = new gdf_column;
gdf_column *csr_val = new gdf_column;
gdf_column *col_off = new gdf_column;
gdf_column *col_ind = new gdf_column;
gdf_column *col_val = new gdf_column;
gdf_column *coo_row = new gdf_column;
gdf_column *coo_col = new gdf_column;
gdf_column *coo_val = new gdf_column;
#pragma omp barrier
//load a chunk of the graph on each GPU
load_csr_loc(csrRowPtr, csrColInd, csrVal,
v_loc,
e_loc, part_offset,
col_off,
col_ind, col_val);
//load a chunk of the graph on each GPU COO
load_coo_loc(cooRowInd, cooColInd, csrVal, coo_row, coo_col, coo_val);
t = omp_get_wtime();
status = gdf_snmg_coo2csr(&part_offset_r[0],
false,
&comm1,
coo_row,
coo_col,
coo_val,
csr_off,
csr_ind,
csr_val);
if (status != 0) {
std::cout << "Call to gdf_snmg_coo2csr failed: " << gdf_error_get_name(status) << "\n";
}
EXPECT_EQ(status, 0);
#pragma omp master
{
std::cout << "multi-GPU time: " << omp_get_wtime() - t << "\n";
}
// Compare the results with those generated on the host
if (status == 0) {
for (int j = 0; j < n_gpus + 1; j++)
EXPECT_EQ(part_offset[j], part_offset_r[j]);
EXPECT_TRUE(gdf_csr_equal<idx_t>(csr_off, csr_ind, col_off, col_ind));
}
gdf_col_delete(col_off);
gdf_col_delete(col_ind);
gdf_col_delete(col_val);
gdf_col_delete(csr_off);
gdf_col_delete(csr_ind);
gdf_col_delete(csr_val);
gdf_col_delete(coo_row);
gdf_col_delete(coo_col);
gdf_col_delete(coo_val);
}
}
std::cout << std::endl;
}
};
TEST_P(Tests_MGcoo2csr, CheckInt32_floatmtx) {
run_current_test<int, float>(GetParam());
}
TEST_P(Tests_MGcoo2csr, CheckInt32_doublemtx) {
run_current_test<int, double>(GetParam());
}
INSTANTIATE_TEST_CASE_P(mtx_test, Tests_MGcoo2csr,
::testing::Values(MGcoo2csr_Usecase("test/datasets/karate.mtx"),
MGcoo2csr_Usecase("test/datasets/netscience.mtx"),
MGcoo2csr_Usecase("test/datasets/cit-Patents.mtx"),
MGcoo2csr_Usecase("test/datasets/webbase-1M.mtx"),
MGcoo2csr_Usecase("test/datasets/web-Google.mtx"),
MGcoo2csr_Usecase("test/datasets/wiki-Talk.mtx")));
class Tests_MGcoo2csrTrans: public ::testing::TestWithParam<MGcoo2csr_Usecase> {
public:
Tests_MGcoo2csrTrans() {
}
static void SetupTestCase() {
}
static void TearDownTestCase() {
}
virtual void SetUp() {
}
virtual void TearDown() {
}
static std::vector<double> mgspmv_time;
template<typename idx_t, typename val_t>
void run_current_test(const MGcoo2csr_Usecase& param) {
const ::testing::TestInfo* const test_info =
::testing::UnitTest::GetInstance()->current_test_info();
std::stringstream ss;
std::string test_id = std::string(test_info->test_case_name()) + std::string(".")
+ std::string(test_info->name()) + std::string("_") + getFileName(param.matrix_file)
+ std::string("_") + ss.str().c_str();
std::cout << test_id << "\n";
int m, k, nnz, n_gpus;
MM_typecode mc;
gdf_error status;
double t;
FILE* fpin = fopen(param.matrix_file.c_str(), "r");
if (!fpin) {
std::cout << "Could not open file: " << param.matrix_file << "\n";
FAIL();
}
ASSERT_EQ(mm_properties<int>(fpin, 1, &mc, &m, &k, &nnz),0)<< "could not read Matrix Market file properties"<< "\n";
ASSERT_TRUE(mm_is_matrix(mc));
ASSERT_TRUE(mm_is_coordinate(mc));
ASSERT_FALSE(mm_is_complex(mc));
ASSERT_FALSE(mm_is_skew(mc));
// Allocate memory on host
std::vector<idx_t> cooRowInd(nnz), cooColInd(nnz), csrColInd(nnz), csrRowPtr(m + 1);
std::vector<idx_t> degree_h(m, 0.0), degree_ref(m, 0.0);
std::vector<val_t> csrVal(nnz, 0.0);
// Read
ASSERT_EQ( (mm_to_coo<int,int>(fpin, 1, nnz, &cooColInd[0], &cooRowInd[0], NULL, NULL)) , 0)<< "could not read matrix data"<< "\n";
ASSERT_EQ(fclose(fpin), 0);
//ASSERT_EQ( (coo_to_csr<int,val_t> (m, m, nnz, &cooRowInd[0], &cooColInd[0], NULL, NULL, &csrRowPtr[0], NULL, NULL, NULL)), 0) << "could not covert COO to CSR "<< "\n";
std::vector<idx_t> cooRowInd_tmp(cooRowInd);
std::vector<idx_t> cooColInd_tmp(cooColInd);
coo2csr(cooRowInd_tmp, cooColInd_tmp, csrRowPtr, csrColInd);
CUDA_RT_CALL(hipGetDeviceCount(&n_gpus));
std::vector<size_t> v_loc(n_gpus), e_loc(n_gpus), part_offset(n_gpus + 1), part_offset_r(n_gpus
+ 1);
void* comm1;
if (nnz < 1200000000) {
#pragma omp parallel num_threads(1)
{
//omp_set_num_threads(n_gpus);
auto i = omp_get_thread_num();
auto p = omp_get_num_threads();
CUDA_RT_CALL(hipSetDevice(i));
#ifdef SNMG_VERBOSE
#pragma omp master
{
std::cout << "Number of GPUs : "<< n_gpus <<std::endl;
std::cout << "Number of threads : "<< p <<std::endl;
}
#endif
gdf_column *csr_off = new gdf_column;
gdf_column *csr_ind = new gdf_column;
gdf_column *csr_val = new gdf_column;
gdf_column *col_off = new gdf_column;
gdf_column *col_ind = new gdf_column;
gdf_column *col_val = new gdf_column;
gdf_column *coo_row = new gdf_column;
gdf_column *coo_col = new gdf_column;
gdf_column *coo_val = new gdf_column;
#pragma omp barrier
//load a chunk of the graph on each GPU
load_csr_loc(csrRowPtr, csrColInd, csrVal,
v_loc,
e_loc, part_offset,
col_off,
col_ind, col_val);
//load a chunk of the graph on each GPU COO
load_coo_loc(cooRowInd, cooColInd, csrVal, coo_row, coo_col, coo_val);
t = omp_get_wtime();
status = gdf_snmg_coo2csr(&part_offset_r[0],
false,
&comm1,
coo_row,
coo_col,
coo_val,
csr_off,
csr_ind,
csr_val);
if (status != 0) {
std::cout << "Call to gdf_snmg_coo2csr failed: " << gdf_error_get_name(status) << "\n";
}
EXPECT_EQ(status, 0);
#pragma omp master
{
std::cout << "GPU time: " << omp_get_wtime() - t << "\n";
}
// Compare the results with those generated on the host
if (status == 0) {
EXPECT_EQ(part_offset[0], part_offset_r[0]);
EXPECT_EQ(part_offset[1], part_offset_r[1]);
EXPECT_TRUE(gdf_csr_equal<idx_t>(csr_off, csr_ind, col_off, col_ind));
}
gdf_col_delete(col_off);
gdf_col_delete(col_ind);
gdf_col_delete(col_val);
gdf_col_delete(csr_off);
gdf_col_delete(csr_ind);
gdf_col_delete(csr_val);
gdf_col_delete(coo_row);
gdf_col_delete(coo_col);
gdf_col_delete(coo_val);
}
}
if (n_gpus > 1)
{
// Only using the 4 fully connected GPUs on DGX1
if (n_gpus == 8)
n_gpus = 4;
#pragma omp parallel num_threads(n_gpus)
{
//omp_set_num_threads(n_gpus);
auto i = omp_get_thread_num();
auto p = omp_get_num_threads();
CUDA_RT_CALL(hipSetDevice(i));
#ifdef SNMG_VERBOSE
#pragma omp master
{
std::cout << "Number of GPUs : "<< n_gpus <<std::endl;
std::cout << "Number of threads : "<< p <<std::endl;
}
#endif
gdf_column *csr_off = new gdf_column;
gdf_column *csr_ind = new gdf_column;
gdf_column *csr_val = new gdf_column;
gdf_column *col_off = new gdf_column;
gdf_column *col_ind = new gdf_column;
gdf_column *col_val = new gdf_column;
gdf_column *coo_row = new gdf_column;
gdf_column *coo_col = new gdf_column;
gdf_column *coo_val = new gdf_column;
#pragma omp barrier
//load a chunk of the graph on each GPU
load_csr_loc(csrRowPtr, csrColInd, csrVal,
v_loc,
e_loc, part_offset,
col_off,
col_ind, col_val);
//load a chunk of the graph on each GPU COO
load_coo_loc(cooRowInd, cooColInd, csrVal, coo_row, coo_col, coo_val);
t = omp_get_wtime();
status = gdf_snmg_coo2csr(&part_offset_r[0],
false,
&comm1,
coo_row,
coo_col,
coo_val,
csr_off,
csr_ind,
csr_val);
if (status != 0) {
std::cout << "Call to gdf_snmg_coo2csr failed: " << gdf_error_get_name(status) << "\n";
}
EXPECT_EQ(status, 0);
#pragma omp master
{
std::cout << "multi-GPU time: " << omp_get_wtime() - t << "\n";
}
// Compare the results with those generated on the host
if (status == 0) {
for (int j = 0; j < n_gpus + 1; j++)
EXPECT_EQ(part_offset[j], part_offset_r[j]);
EXPECT_TRUE(gdf_csr_equal<idx_t>(csr_off, csr_ind, col_off, col_ind));
}
gdf_col_delete(col_off);
gdf_col_delete(col_ind);
gdf_col_delete(col_val);
gdf_col_delete(csr_off);
gdf_col_delete(csr_ind);
gdf_col_delete(csr_val);
gdf_col_delete(coo_row);
gdf_col_delete(coo_col);
gdf_col_delete(coo_val);
}
}
std::cout << std::endl;
}
};
TEST_P(Tests_MGcoo2csrTrans, CheckInt32_floatmtx) {
run_current_test<int, float>(GetParam());
}
TEST_P(Tests_MGcoo2csrTrans, CheckInt32_doublemtx) {
run_current_test<int, double>(GetParam());
}
INSTANTIATE_TEST_CASE_P(mtx_test, Tests_MGcoo2csrTrans,
::testing::Values(MGcoo2csr_Usecase("test/datasets/karate.mtx"),
MGcoo2csr_Usecase("test/datasets/netscience.mtx"),
MGcoo2csr_Usecase("test/datasets/cit-Patents.mtx"),
MGcoo2csr_Usecase("test/datasets/webbase-1M.mtx"),
MGcoo2csr_Usecase("test/datasets/web-Google.mtx"),
MGcoo2csr_Usecase("test/datasets/wiki-Talk.mtx")));
class Tests_MGcoo2csr_hibench: public ::testing::TestWithParam<MGcoo2csr_Usecase> {
public:
Tests_MGcoo2csr_hibench() {
}
static void SetupTestCase() {
}
static void TearDownTestCase() {
}
virtual void SetUp() {
}
virtual void TearDown() {
}
static std::vector<double> mgspmv_time;
template<typename idx_t, typename val_t>
void run_current_test(const MGcoo2csr_Usecase& param) {
const ::testing::TestInfo* const test_info =
::testing::UnitTest::GetInstance()->current_test_info();
std::stringstream ss;
std::string test_id = std::string(test_info->test_case_name()) + std::string(".")
+ std::string(test_info->name()) + std::string("_") + getFileName(param.matrix_file)
+ std::string("_") + ss.str().c_str();
std::cout << "Filename: " << param.matrix_file << "\n";
int m, nnz, n_gpus;
gdf_error status;
std::vector<idx_t> cooRowInd, cooColInd;
double t;
ASSERT_EQ(read_single_file(param.matrix_file.c_str(), cooRowInd, cooColInd), 0);
nnz = cooRowInd.size();
m = ::max(*(std::max_element(cooRowInd.begin(), cooRowInd.end())),
*(std::max_element(cooColInd.begin(), cooColInd.end())));
m += 1;
// Allocate memory on host
std::vector<idx_t> csrColInd(nnz), csrRowPtr(m + 1), degree_ref(m), degree_h(m);
std::vector<val_t> csrVal(nnz, 0);
std::vector<idx_t> cooRowInd_tmp(cooRowInd);
std::vector<idx_t> cooColInd_tmp(cooColInd);
coo2csr(cooRowInd_tmp, cooColInd_tmp, csrRowPtr, csrColInd);
CUDA_RT_CALL(hipGetDeviceCount(&n_gpus));
std::vector<size_t> v_loc(n_gpus), e_loc(n_gpus), part_offset(n_gpus + 1), part_offset_r(n_gpus + 1);
void* comm1;
if (nnz < 1200000000) {
#pragma omp parallel num_threads(1)
{
//omp_set_num_threads(n_gpus);
auto i = omp_get_thread_num();
auto p = omp_get_num_threads();
CUDA_RT_CALL(hipSetDevice(i));
#ifdef SNMG_VERBOSE
#pragma omp master
{
std::cout << "Number of GPUs : "<< n_gpus <<std::endl;
std::cout << "Number of threads : "<< p <<std::endl;
}
#endif
gdf_column *csr_off = new gdf_column;
gdf_column *csr_ind = new gdf_column;
gdf_column *csr_val = new gdf_column;
gdf_column *col_off = new gdf_column;
gdf_column *col_ind = new gdf_column;
gdf_column *col_val = new gdf_column;
gdf_column *coo_row = new gdf_column;
gdf_column *coo_col = new gdf_column;
gdf_column *coo_val = new gdf_column;
#pragma omp barrier
//load a chunk of the graph on each GPU
load_csr_loc(csrRowPtr, csrColInd, csrVal,
v_loc,
e_loc, part_offset,
col_off,
col_ind, col_val);
//load a chunk of the graph on each GPU COO
load_coo_loc(cooRowInd, cooColInd, csrVal, coo_row, coo_col, coo_val);
t = omp_get_wtime();
status = gdf_snmg_coo2csr(&part_offset_r[0],
false,
&comm1,
coo_row,
coo_col,
coo_val,
csr_off,
csr_ind,
csr_val);
if (status != 0) {
std::cout << "Call to gdf_snmg_coo2csr failed: " << gdf_error_get_name(status) << "\n";
}
EXPECT_EQ(status, 0);
#pragma omp master
{
std::cout << "GPU time: " << omp_get_wtime() - t << "\n";
}
// Compare the results with those generated on the host
if (status == 0) {
EXPECT_EQ(part_offset[0], part_offset_r[0]);
EXPECT_EQ(part_offset[1], part_offset_r[1]);
EXPECT_TRUE(gdf_csr_equal<idx_t>(csr_off, csr_ind, col_off, col_ind));
}
gdf_col_delete(col_off);
gdf_col_delete(col_ind);
gdf_col_delete(col_val);
gdf_col_delete(csr_off);
gdf_col_delete(csr_ind);
gdf_col_delete(csr_val);
gdf_col_delete(coo_row);
gdf_col_delete(coo_col);
gdf_col_delete(coo_val);
}
}
if (n_gpus > 1) {
// Only using the 4 fully connected GPUs on DGX1
if (n_gpus == 8)
n_gpus = 4;
#pragma omp parallel num_threads(n_gpus)
{
auto i = omp_get_thread_num();
auto p = omp_get_num_threads();
CUDA_RT_CALL(hipSetDevice(i));
#ifdef SNMG_VERBOSE
#pragma omp master
{
std::cout << "Number of GPUs : "<< n_gpus <<std::endl;
std::cout << "Number of threads : "<< p <<std::endl;
}
#endif
gdf_column *csr_off = new gdf_column;
gdf_column *csr_ind = new gdf_column;
gdf_column *csr_val = new gdf_column;
gdf_column *col_off = new gdf_column;
gdf_column *col_ind = new gdf_column;
gdf_column *col_val = new gdf_column;
gdf_column *coo_row = new gdf_column;
gdf_column *coo_col = new gdf_column;
gdf_column *coo_val = new gdf_column;
#pragma omp barrier
//load a chunk of the graph on each GPU
load_csr_loc(csrRowPtr, csrColInd, csrVal,
v_loc,
e_loc, part_offset,
col_off,
col_ind, col_val);
//load a chunk of the graph on each GPU COO
load_coo_loc(cooRowInd, cooColInd, csrVal, coo_row, coo_col, coo_val);
t = omp_get_wtime();
status = gdf_snmg_coo2csr(&part_offset_r[0],
false,
&comm1,
coo_row,
coo_col,
coo_val,
csr_off,
csr_ind,
csr_val);
if (status != 0) {
std::cout << "Call to gdf_snmg_coo2csr failed: " << gdf_error_get_name(status) << "\n";
}
EXPECT_EQ(status, 0);
#pragma omp master
{
std::cout << "multi-GPU time: " << omp_get_wtime() - t << "\n";
}
// Compare the results with those generated on the host
if (status == 0) {
for (int j = 0; j < n_gpus + 1; j++)
EXPECT_EQ(part_offset[j], part_offset_r[j]);
EXPECT_TRUE(gdf_csr_equal<idx_t>(csr_off, csr_ind, col_off, col_ind));
}
gdf_col_delete(col_off);
gdf_col_delete(col_ind);
gdf_col_delete(col_val);
gdf_col_delete(csr_off);
gdf_col_delete(csr_ind);
gdf_col_delete(csr_val);
gdf_col_delete(coo_row);
gdf_col_delete(coo_col);
gdf_col_delete(coo_val);
}
}
std::cout << std::endl;
}
};
TEST_P(Tests_MGcoo2csr_hibench, CheckFP32_hibench) {
run_current_test<int, float>(GetParam());
}
TEST_P(Tests_MGcoo2csr_hibench, CheckFP64_hibench) {
run_current_test<int, double>(GetParam());
}
INSTANTIATE_TEST_CASE_P(hibench_test,
Tests_MGcoo2csr_hibench,
::testing::Values(MGcoo2csr_Usecase("benchmark/hibench/1/Input-small/edges/part-00000"),
MGcoo2csr_Usecase("benchmark/hibench/1/Input-large/edges/part-00000"),
MGcoo2csr_Usecase("benchmark/hibench/1/Input-huge/edges/part-00000")));
int main(int argc, char **argv) {
srand(42);
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}
| aa1d907f6ff75c17ab133d885bf56c690d0d81ad.cu | /*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "gtest/gtest.h"
#include "high_res_clock.h"
#include "cuda_profiler_api.h"
#include <cugraph.h>
#include <omp.h>
#include "test_utils.h"
#include "snmg_test_utils.h"
struct MGcoo2csr_Usecase {
std::string matrix_file;
MGcoo2csr_Usecase(const std::string& a) {
// assume relative paths are relative to RAPIDS_DATASET_ROOT_DIR
// if RAPIDS_DATASET_ROOT_DIR not set, default to "/datasets"
const std::string& rapidsDatasetRootDir = get_rapids_dataset_root_dir();
if ((a != "") && (a[0] != '/')) {
matrix_file = rapidsDatasetRootDir + "/" + a;
} else {
matrix_file = a;
}
}
MGcoo2csr_Usecase& operator=(const MGcoo2csr_Usecase& rhs) {
matrix_file = rhs.matrix_file;
return *this;
}
};
class Tests_MGcoo2csr: public ::testing::TestWithParam<MGcoo2csr_Usecase> {
public:
Tests_MGcoo2csr() {
}
static void SetupTestCase() {
}
static void TearDownTestCase() {
}
virtual void SetUp() {
}
virtual void TearDown() {
}
static std::vector<double> mgspmv_time;
template<typename idx_t, typename val_t>
void run_current_test(const MGcoo2csr_Usecase& param) {
const ::testing::TestInfo* const test_info =
::testing::UnitTest::GetInstance()->current_test_info();
std::stringstream ss;
std::string test_id = std::string(test_info->test_case_name()) + std::string(".")
+ std::string(test_info->name()) + std::string("_") + getFileName(param.matrix_file)
+ std::string("_") + ss.str().c_str();
std::cout << test_id << "\n";
int m, k, nnz, n_gpus;
MM_typecode mc;
gdf_error status;
double t;
FILE* fpin = fopen(param.matrix_file.c_str(), "r");
if (!fpin) {
std::cout << "Could not open file: " << param.matrix_file << "\n";
FAIL();
}
ASSERT_EQ(mm_properties<int>(fpin, 1, &mc, &m, &k, &nnz),0)<< "could not read Matrix Market file properties"<< "\n";
ASSERT_TRUE(mm_is_matrix(mc));
ASSERT_TRUE(mm_is_coordinate(mc));
ASSERT_FALSE(mm_is_complex(mc));
ASSERT_FALSE(mm_is_skew(mc));
// Allocate memory on host
std::vector<idx_t> cooRowInd(nnz), cooColInd(nnz), csrColInd(nnz), csrRowPtr(m + 1);
std::vector<idx_t> degree_h(m, 0.0), degree_ref(m, 0.0);
std::vector<val_t> csrVal(nnz, 0.0);
// Read
ASSERT_EQ( (mm_to_coo<int,int>(fpin, 1, nnz, &cooRowInd[0], &cooColInd[0], NULL, NULL)) , 0)<< "could not read matrix data"<< "\n";
ASSERT_EQ(fclose(fpin), 0);
//ASSERT_EQ( (coo_to_csr<int,val_t> (m, m, nnz, &cooRowInd[0], &cooColInd[0], NULL, NULL, &csrRowPtr[0], NULL, NULL, NULL)), 0) << "could not covert COO to CSR "<< "\n";
std::vector<idx_t> cooRowInd_tmp(cooRowInd);
std::vector<idx_t> cooColInd_tmp(cooColInd);
coo2csr(cooRowInd_tmp, cooColInd_tmp, csrRowPtr, csrColInd);
CUDA_RT_CALL(cudaGetDeviceCount(&n_gpus));
std::vector<size_t> v_loc(n_gpus), e_loc(n_gpus), part_offset(n_gpus + 1), part_offset_r(n_gpus
+ 1);
void* comm1;
if (nnz < 1200000000) {
#pragma omp parallel num_threads(1)
{
//omp_set_num_threads(n_gpus);
auto i = omp_get_thread_num();
auto p = omp_get_num_threads();
CUDA_RT_CALL(cudaSetDevice(i));
#ifdef SNMG_VERBOSE
#pragma omp master
{
std::cout << "Number of GPUs : "<< n_gpus <<std::endl;
std::cout << "Number of threads : "<< p <<std::endl;
}
#endif
gdf_column *csr_off = new gdf_column;
gdf_column *csr_ind = new gdf_column;
gdf_column *csr_val = new gdf_column;
gdf_column *col_off = new gdf_column;
gdf_column *col_ind = new gdf_column;
gdf_column *col_val = new gdf_column;
gdf_column *coo_row = new gdf_column;
gdf_column *coo_col = new gdf_column;
gdf_column *coo_val = new gdf_column;
#pragma omp barrier
//load a chunk of the graph on each GPU
load_csr_loc(csrRowPtr, csrColInd, csrVal,
v_loc,
e_loc, part_offset,
col_off,
col_ind, col_val);
//load a chunk of the graph on each GPU COO
load_coo_loc(cooRowInd, cooColInd, csrVal, coo_row, coo_col, coo_val);
t = omp_get_wtime();
status = gdf_snmg_coo2csr(&part_offset_r[0],
false,
&comm1,
coo_row,
coo_col,
coo_val,
csr_off,
csr_ind,
csr_val);
if (status != 0) {
std::cout << "Call to gdf_snmg_coo2csr failed: " << gdf_error_get_name(status) << "\n";
}
EXPECT_EQ(status, 0);
#pragma omp master
{
std::cout << "GPU time: " << omp_get_wtime() - t << "\n";
}
// Compare the results with those generated on the host
if (status == 0) {
EXPECT_EQ(part_offset[0], part_offset_r[0]);
EXPECT_EQ(part_offset[1], part_offset_r[1]);
EXPECT_TRUE(gdf_csr_equal<idx_t>(csr_off, csr_ind, col_off, col_ind));
}
gdf_col_delete(col_off);
gdf_col_delete(col_ind);
gdf_col_delete(col_val);
gdf_col_delete(csr_off);
gdf_col_delete(csr_ind);
gdf_col_delete(csr_val);
gdf_col_delete(coo_row);
gdf_col_delete(coo_col);
gdf_col_delete(coo_val);
}
}
if (n_gpus > 1)
{
// Only using the 4 fully connected GPUs on DGX1
if (n_gpus == 8)
n_gpus = 4;
#pragma omp parallel num_threads(n_gpus)
{
//omp_set_num_threads(n_gpus);
auto i = omp_get_thread_num();
auto p = omp_get_num_threads();
CUDA_RT_CALL(cudaSetDevice(i));
#ifdef SNMG_VERBOSE
#pragma omp master
{
std::cout << "Number of GPUs : "<< n_gpus <<std::endl;
std::cout << "Number of threads : "<< p <<std::endl;
}
#endif
gdf_column *csr_off = new gdf_column;
gdf_column *csr_ind = new gdf_column;
gdf_column *csr_val = new gdf_column;
gdf_column *col_off = new gdf_column;
gdf_column *col_ind = new gdf_column;
gdf_column *col_val = new gdf_column;
gdf_column *coo_row = new gdf_column;
gdf_column *coo_col = new gdf_column;
gdf_column *coo_val = new gdf_column;
#pragma omp barrier
//load a chunk of the graph on each GPU
load_csr_loc(csrRowPtr, csrColInd, csrVal,
v_loc,
e_loc, part_offset,
col_off,
col_ind, col_val);
//load a chunk of the graph on each GPU COO
load_coo_loc(cooRowInd, cooColInd, csrVal, coo_row, coo_col, coo_val);
t = omp_get_wtime();
status = gdf_snmg_coo2csr(&part_offset_r[0],
false,
&comm1,
coo_row,
coo_col,
coo_val,
csr_off,
csr_ind,
csr_val);
if (status != 0) {
std::cout << "Call to gdf_snmg_coo2csr failed: " << gdf_error_get_name(status) << "\n";
}
EXPECT_EQ(status, 0);
#pragma omp master
{
std::cout << "multi-GPU time: " << omp_get_wtime() - t << "\n";
}
// Compare the results with those generated on the host
if (status == 0) {
for (int j = 0; j < n_gpus + 1; j++)
EXPECT_EQ(part_offset[j], part_offset_r[j]);
EXPECT_TRUE(gdf_csr_equal<idx_t>(csr_off, csr_ind, col_off, col_ind));
}
gdf_col_delete(col_off);
gdf_col_delete(col_ind);
gdf_col_delete(col_val);
gdf_col_delete(csr_off);
gdf_col_delete(csr_ind);
gdf_col_delete(csr_val);
gdf_col_delete(coo_row);
gdf_col_delete(coo_col);
gdf_col_delete(coo_val);
}
}
std::cout << std::endl;
}
};
TEST_P(Tests_MGcoo2csr, CheckInt32_floatmtx) {
run_current_test<int, float>(GetParam());
}
TEST_P(Tests_MGcoo2csr, CheckInt32_doublemtx) {
run_current_test<int, double>(GetParam());
}
INSTANTIATE_TEST_CASE_P(mtx_test, Tests_MGcoo2csr,
::testing::Values(MGcoo2csr_Usecase("test/datasets/karate.mtx"),
MGcoo2csr_Usecase("test/datasets/netscience.mtx"),
MGcoo2csr_Usecase("test/datasets/cit-Patents.mtx"),
MGcoo2csr_Usecase("test/datasets/webbase-1M.mtx"),
MGcoo2csr_Usecase("test/datasets/web-Google.mtx"),
MGcoo2csr_Usecase("test/datasets/wiki-Talk.mtx")));
class Tests_MGcoo2csrTrans: public ::testing::TestWithParam<MGcoo2csr_Usecase> {
public:
Tests_MGcoo2csrTrans() {
}
static void SetupTestCase() {
}
static void TearDownTestCase() {
}
virtual void SetUp() {
}
virtual void TearDown() {
}
static std::vector<double> mgspmv_time;
template<typename idx_t, typename val_t>
void run_current_test(const MGcoo2csr_Usecase& param) {
const ::testing::TestInfo* const test_info =
::testing::UnitTest::GetInstance()->current_test_info();
std::stringstream ss;
std::string test_id = std::string(test_info->test_case_name()) + std::string(".")
+ std::string(test_info->name()) + std::string("_") + getFileName(param.matrix_file)
+ std::string("_") + ss.str().c_str();
std::cout << test_id << "\n";
int m, k, nnz, n_gpus;
MM_typecode mc;
gdf_error status;
double t;
FILE* fpin = fopen(param.matrix_file.c_str(), "r");
if (!fpin) {
std::cout << "Could not open file: " << param.matrix_file << "\n";
FAIL();
}
ASSERT_EQ(mm_properties<int>(fpin, 1, &mc, &m, &k, &nnz),0)<< "could not read Matrix Market file properties"<< "\n";
ASSERT_TRUE(mm_is_matrix(mc));
ASSERT_TRUE(mm_is_coordinate(mc));
ASSERT_FALSE(mm_is_complex(mc));
ASSERT_FALSE(mm_is_skew(mc));
// Allocate memory on host
std::vector<idx_t> cooRowInd(nnz), cooColInd(nnz), csrColInd(nnz), csrRowPtr(m + 1);
std::vector<idx_t> degree_h(m, 0.0), degree_ref(m, 0.0);
std::vector<val_t> csrVal(nnz, 0.0);
// Read
ASSERT_EQ( (mm_to_coo<int,int>(fpin, 1, nnz, &cooColInd[0], &cooRowInd[0], NULL, NULL)) , 0)<< "could not read matrix data"<< "\n";
ASSERT_EQ(fclose(fpin), 0);
//ASSERT_EQ( (coo_to_csr<int,val_t> (m, m, nnz, &cooRowInd[0], &cooColInd[0], NULL, NULL, &csrRowPtr[0], NULL, NULL, NULL)), 0) << "could not covert COO to CSR "<< "\n";
std::vector<idx_t> cooRowInd_tmp(cooRowInd);
std::vector<idx_t> cooColInd_tmp(cooColInd);
coo2csr(cooRowInd_tmp, cooColInd_tmp, csrRowPtr, csrColInd);
CUDA_RT_CALL(cudaGetDeviceCount(&n_gpus));
std::vector<size_t> v_loc(n_gpus), e_loc(n_gpus), part_offset(n_gpus + 1), part_offset_r(n_gpus
+ 1);
void* comm1;
if (nnz < 1200000000) {
#pragma omp parallel num_threads(1)
{
//omp_set_num_threads(n_gpus);
auto i = omp_get_thread_num();
auto p = omp_get_num_threads();
CUDA_RT_CALL(cudaSetDevice(i));
#ifdef SNMG_VERBOSE
#pragma omp master
{
std::cout << "Number of GPUs : "<< n_gpus <<std::endl;
std::cout << "Number of threads : "<< p <<std::endl;
}
#endif
gdf_column *csr_off = new gdf_column;
gdf_column *csr_ind = new gdf_column;
gdf_column *csr_val = new gdf_column;
gdf_column *col_off = new gdf_column;
gdf_column *col_ind = new gdf_column;
gdf_column *col_val = new gdf_column;
gdf_column *coo_row = new gdf_column;
gdf_column *coo_col = new gdf_column;
gdf_column *coo_val = new gdf_column;
#pragma omp barrier
//load a chunk of the graph on each GPU
load_csr_loc(csrRowPtr, csrColInd, csrVal,
v_loc,
e_loc, part_offset,
col_off,
col_ind, col_val);
//load a chunk of the graph on each GPU COO
load_coo_loc(cooRowInd, cooColInd, csrVal, coo_row, coo_col, coo_val);
t = omp_get_wtime();
status = gdf_snmg_coo2csr(&part_offset_r[0],
false,
&comm1,
coo_row,
coo_col,
coo_val,
csr_off,
csr_ind,
csr_val);
if (status != 0) {
std::cout << "Call to gdf_snmg_coo2csr failed: " << gdf_error_get_name(status) << "\n";
}
EXPECT_EQ(status, 0);
#pragma omp master
{
std::cout << "GPU time: " << omp_get_wtime() - t << "\n";
}
// Compare the results with those generated on the host
if (status == 0) {
EXPECT_EQ(part_offset[0], part_offset_r[0]);
EXPECT_EQ(part_offset[1], part_offset_r[1]);
EXPECT_TRUE(gdf_csr_equal<idx_t>(csr_off, csr_ind, col_off, col_ind));
}
gdf_col_delete(col_off);
gdf_col_delete(col_ind);
gdf_col_delete(col_val);
gdf_col_delete(csr_off);
gdf_col_delete(csr_ind);
gdf_col_delete(csr_val);
gdf_col_delete(coo_row);
gdf_col_delete(coo_col);
gdf_col_delete(coo_val);
}
}
if (n_gpus > 1)
{
// Only using the 4 fully connected GPUs on DGX1
if (n_gpus == 8)
n_gpus = 4;
#pragma omp parallel num_threads(n_gpus)
{
//omp_set_num_threads(n_gpus);
auto i = omp_get_thread_num();
auto p = omp_get_num_threads();
CUDA_RT_CALL(cudaSetDevice(i));
#ifdef SNMG_VERBOSE
#pragma omp master
{
std::cout << "Number of GPUs : "<< n_gpus <<std::endl;
std::cout << "Number of threads : "<< p <<std::endl;
}
#endif
gdf_column *csr_off = new gdf_column;
gdf_column *csr_ind = new gdf_column;
gdf_column *csr_val = new gdf_column;
gdf_column *col_off = new gdf_column;
gdf_column *col_ind = new gdf_column;
gdf_column *col_val = new gdf_column;
gdf_column *coo_row = new gdf_column;
gdf_column *coo_col = new gdf_column;
gdf_column *coo_val = new gdf_column;
#pragma omp barrier
//load a chunk of the graph on each GPU
load_csr_loc(csrRowPtr, csrColInd, csrVal,
v_loc,
e_loc, part_offset,
col_off,
col_ind, col_val);
//load a chunk of the graph on each GPU COO
load_coo_loc(cooRowInd, cooColInd, csrVal, coo_row, coo_col, coo_val);
t = omp_get_wtime();
status = gdf_snmg_coo2csr(&part_offset_r[0],
false,
&comm1,
coo_row,
coo_col,
coo_val,
csr_off,
csr_ind,
csr_val);
if (status != 0) {
std::cout << "Call to gdf_snmg_coo2csr failed: " << gdf_error_get_name(status) << "\n";
}
EXPECT_EQ(status, 0);
#pragma omp master
{
std::cout << "multi-GPU time: " << omp_get_wtime() - t << "\n";
}
// Compare the results with those generated on the host
if (status == 0) {
for (int j = 0; j < n_gpus + 1; j++)
EXPECT_EQ(part_offset[j], part_offset_r[j]);
EXPECT_TRUE(gdf_csr_equal<idx_t>(csr_off, csr_ind, col_off, col_ind));
}
gdf_col_delete(col_off);
gdf_col_delete(col_ind);
gdf_col_delete(col_val);
gdf_col_delete(csr_off);
gdf_col_delete(csr_ind);
gdf_col_delete(csr_val);
gdf_col_delete(coo_row);
gdf_col_delete(coo_col);
gdf_col_delete(coo_val);
}
}
std::cout << std::endl;
}
};
TEST_P(Tests_MGcoo2csrTrans, CheckInt32_floatmtx) {
run_current_test<int, float>(GetParam());
}
TEST_P(Tests_MGcoo2csrTrans, CheckInt32_doublemtx) {
run_current_test<int, double>(GetParam());
}
INSTANTIATE_TEST_CASE_P(mtx_test, Tests_MGcoo2csrTrans,
::testing::Values(MGcoo2csr_Usecase("test/datasets/karate.mtx"),
MGcoo2csr_Usecase("test/datasets/netscience.mtx"),
MGcoo2csr_Usecase("test/datasets/cit-Patents.mtx"),
MGcoo2csr_Usecase("test/datasets/webbase-1M.mtx"),
MGcoo2csr_Usecase("test/datasets/web-Google.mtx"),
MGcoo2csr_Usecase("test/datasets/wiki-Talk.mtx")));
class Tests_MGcoo2csr_hibench: public ::testing::TestWithParam<MGcoo2csr_Usecase> {
public:
Tests_MGcoo2csr_hibench() {
}
static void SetupTestCase() {
}
static void TearDownTestCase() {
}
virtual void SetUp() {
}
virtual void TearDown() {
}
static std::vector<double> mgspmv_time;
template<typename idx_t, typename val_t>
void run_current_test(const MGcoo2csr_Usecase& param) {
const ::testing::TestInfo* const test_info =
::testing::UnitTest::GetInstance()->current_test_info();
std::stringstream ss;
std::string test_id = std::string(test_info->test_case_name()) + std::string(".")
+ std::string(test_info->name()) + std::string("_") + getFileName(param.matrix_file)
+ std::string("_") + ss.str().c_str();
std::cout << "Filename: " << param.matrix_file << "\n";
int m, nnz, n_gpus;
gdf_error status;
std::vector<idx_t> cooRowInd, cooColInd;
double t;
ASSERT_EQ(read_single_file(param.matrix_file.c_str(), cooRowInd, cooColInd), 0);
nnz = cooRowInd.size();
m = std::max(*(std::max_element(cooRowInd.begin(), cooRowInd.end())),
*(std::max_element(cooColInd.begin(), cooColInd.end())));
m += 1;
// Allocate memory on host
std::vector<idx_t> csrColInd(nnz), csrRowPtr(m + 1), degree_ref(m), degree_h(m);
std::vector<val_t> csrVal(nnz, 0);
std::vector<idx_t> cooRowInd_tmp(cooRowInd);
std::vector<idx_t> cooColInd_tmp(cooColInd);
coo2csr(cooRowInd_tmp, cooColInd_tmp, csrRowPtr, csrColInd);
CUDA_RT_CALL(cudaGetDeviceCount(&n_gpus));
std::vector<size_t> v_loc(n_gpus), e_loc(n_gpus), part_offset(n_gpus + 1), part_offset_r(n_gpus + 1);
void* comm1;
if (nnz < 1200000000) {
#pragma omp parallel num_threads(1)
{
//omp_set_num_threads(n_gpus);
auto i = omp_get_thread_num();
auto p = omp_get_num_threads();
CUDA_RT_CALL(cudaSetDevice(i));
#ifdef SNMG_VERBOSE
#pragma omp master
{
std::cout << "Number of GPUs : "<< n_gpus <<std::endl;
std::cout << "Number of threads : "<< p <<std::endl;
}
#endif
gdf_column *csr_off = new gdf_column;
gdf_column *csr_ind = new gdf_column;
gdf_column *csr_val = new gdf_column;
gdf_column *col_off = new gdf_column;
gdf_column *col_ind = new gdf_column;
gdf_column *col_val = new gdf_column;
gdf_column *coo_row = new gdf_column;
gdf_column *coo_col = new gdf_column;
gdf_column *coo_val = new gdf_column;
#pragma omp barrier
//load a chunk of the graph on each GPU
load_csr_loc(csrRowPtr, csrColInd, csrVal,
v_loc,
e_loc, part_offset,
col_off,
col_ind, col_val);
//load a chunk of the graph on each GPU COO
load_coo_loc(cooRowInd, cooColInd, csrVal, coo_row, coo_col, coo_val);
t = omp_get_wtime();
status = gdf_snmg_coo2csr(&part_offset_r[0],
false,
&comm1,
coo_row,
coo_col,
coo_val,
csr_off,
csr_ind,
csr_val);
if (status != 0) {
std::cout << "Call to gdf_snmg_coo2csr failed: " << gdf_error_get_name(status) << "\n";
}
EXPECT_EQ(status, 0);
#pragma omp master
{
std::cout << "GPU time: " << omp_get_wtime() - t << "\n";
}
// Compare the results with those generated on the host
if (status == 0) {
EXPECT_EQ(part_offset[0], part_offset_r[0]);
EXPECT_EQ(part_offset[1], part_offset_r[1]);
EXPECT_TRUE(gdf_csr_equal<idx_t>(csr_off, csr_ind, col_off, col_ind));
}
gdf_col_delete(col_off);
gdf_col_delete(col_ind);
gdf_col_delete(col_val);
gdf_col_delete(csr_off);
gdf_col_delete(csr_ind);
gdf_col_delete(csr_val);
gdf_col_delete(coo_row);
gdf_col_delete(coo_col);
gdf_col_delete(coo_val);
}
}
if (n_gpus > 1) {
// Only using the 4 fully connected GPUs on DGX1
if (n_gpus == 8)
n_gpus = 4;
#pragma omp parallel num_threads(n_gpus)
{
auto i = omp_get_thread_num();
auto p = omp_get_num_threads();
CUDA_RT_CALL(cudaSetDevice(i));
#ifdef SNMG_VERBOSE
#pragma omp master
{
std::cout << "Number of GPUs : "<< n_gpus <<std::endl;
std::cout << "Number of threads : "<< p <<std::endl;
}
#endif
gdf_column *csr_off = new gdf_column;
gdf_column *csr_ind = new gdf_column;
gdf_column *csr_val = new gdf_column;
gdf_column *col_off = new gdf_column;
gdf_column *col_ind = new gdf_column;
gdf_column *col_val = new gdf_column;
gdf_column *coo_row = new gdf_column;
gdf_column *coo_col = new gdf_column;
gdf_column *coo_val = new gdf_column;
#pragma omp barrier
//load a chunk of the graph on each GPU
load_csr_loc(csrRowPtr, csrColInd, csrVal,
v_loc,
e_loc, part_offset,
col_off,
col_ind, col_val);
//load a chunk of the graph on each GPU COO
load_coo_loc(cooRowInd, cooColInd, csrVal, coo_row, coo_col, coo_val);
t = omp_get_wtime();
status = gdf_snmg_coo2csr(&part_offset_r[0],
false,
&comm1,
coo_row,
coo_col,
coo_val,
csr_off,
csr_ind,
csr_val);
if (status != 0) {
std::cout << "Call to gdf_snmg_coo2csr failed: " << gdf_error_get_name(status) << "\n";
}
EXPECT_EQ(status, 0);
#pragma omp master
{
std::cout << "multi-GPU time: " << omp_get_wtime() - t << "\n";
}
// Compare the results with those generated on the host
if (status == 0) {
for (int j = 0; j < n_gpus + 1; j++)
EXPECT_EQ(part_offset[j], part_offset_r[j]);
EXPECT_TRUE(gdf_csr_equal<idx_t>(csr_off, csr_ind, col_off, col_ind));
}
gdf_col_delete(col_off);
gdf_col_delete(col_ind);
gdf_col_delete(col_val);
gdf_col_delete(csr_off);
gdf_col_delete(csr_ind);
gdf_col_delete(csr_val);
gdf_col_delete(coo_row);
gdf_col_delete(coo_col);
gdf_col_delete(coo_val);
}
}
std::cout << std::endl;
}
};
TEST_P(Tests_MGcoo2csr_hibench, CheckFP32_hibench) {
run_current_test<int, float>(GetParam());
}
TEST_P(Tests_MGcoo2csr_hibench, CheckFP64_hibench) {
run_current_test<int, double>(GetParam());
}
INSTANTIATE_TEST_CASE_P(hibench_test,
Tests_MGcoo2csr_hibench,
::testing::Values(MGcoo2csr_Usecase("benchmark/hibench/1/Input-small/edges/part-00000"),
MGcoo2csr_Usecase("benchmark/hibench/1/Input-large/edges/part-00000"),
MGcoo2csr_Usecase("benchmark/hibench/1/Input-huge/edges/part-00000")));
int main(int argc, char **argv) {
srand(42);
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}
|
122a125025322ba6b0c59b342e8cea98cb6193fa.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "scan.cuh"
#include "segmented_scan.cuh"
#include "segmented_scan_helpers.cuh"
#include <contrib/libs/cub/hipcub/hipcub.hpp>
namespace NKernel
{
template<class T>
__global__ void ZeroSegmentStartsImpl(const ui32* flags, ui32 flagMask, ui32 size, T* output) {
const ui32 tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < size) {
bool segmentStart = flags[tid] & flagMask;
if (segmentStart) {
output[tid] = 0;
}
}
}
template<typename T>
hipError_t SegmentedScanCub(const T* input, const ui32* flags, ui32 flagMask,
T* output,
ui32 size, bool inclusive,
TScanKernelContext<T>& context,
TCudaStream stream) {
if (inclusive) {
using TInput = TSegmentedScanInputIterator<T>;
using TOutput = TSegmentedScanOutputIterator<T, true>;
TInput inputIter(input, flags, flagMask);
TOutput outputIter(output, output + size);
return hipcub::DeviceScan::InclusiveScan<TInput, TOutput>(context.PartResults, context.NumParts, inputIter, outputIter, TSegmentedSum(), size, stream);
} else {
using TInput = TSegmentedScanInputIterator<T>;
using TOutput = TSegmentedScanOutputIterator<T, false>;
TInput inputIter(input, flags, flagMask);
TOutput outputIter(output, output + size);
hipError_t errorCode = hipcub::DeviceScan::InclusiveScan<TInput, TOutput>(context.PartResults, context.NumParts, inputIter, outputIter, TSegmentedSum(), size, stream);
{
ui32 blockSize = 256;
ui32 numBlocks = CeilDivide<ui32>(size, blockSize);
hipLaunchKernelGGL(( ZeroSegmentStartsImpl), dim3(numBlocks), dim3(blockSize), 0, stream, flags, flagMask, size, output);
}
return errorCode;
}
}
template <class T>
ui64 SegmentedScanVectorTempSize(ui32 size, bool inclusive) {
(void)inclusive;
using TInput = TSegmentedScanInputIterator<T>;
using TOutput = TSegmentedScanOutputIterator<T, true>;
ui64 sizeInBytes = 0;
TInput fakeInput((T*)nullptr, (ui32*)nullptr, 0u);
TOutput fakeOutput((T*)nullptr, (T*)nullptr);
hipcub::DeviceScan::InclusiveScan<TInput, TOutput, TSegmentedSum>(nullptr, sizeInBytes, fakeInput, fakeOutput, TSegmentedSum(), size);
return sizeInBytes;
}
#define SEGMENTED_SCAN_CUB(Type)\
template hipError_t SegmentedScanCub<Type>(const Type* input, const ui32* flags, ui32 mask, Type* output, ui32 size, bool inclusive,\
TScanKernelContext<Type>& context, TCudaStream stream);
SEGMENTED_SCAN_CUB(float)
SEGMENTED_SCAN_CUB(double)
SEGMENTED_SCAN_CUB(int)
SEGMENTED_SCAN_CUB(ui32)
template ui64 SegmentedScanVectorTempSize<int>(ui32, bool);
template ui64 SegmentedScanVectorTempSize<ui32>(ui32, bool);
template ui64 SegmentedScanVectorTempSize<float>(ui32, bool);
template ui64 SegmentedScanVectorTempSize<double>(ui32, bool);
}
| 122a125025322ba6b0c59b342e8cea98cb6193fa.cu | #include "scan.cuh"
#include "segmented_scan.cuh"
#include "segmented_scan_helpers.cuh"
#include <contrib/libs/cub/cub/device/device_scan.cuh>
namespace NKernel
{
template<class T>
__global__ void ZeroSegmentStartsImpl(const ui32* flags, ui32 flagMask, ui32 size, T* output) {
const ui32 tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < size) {
bool segmentStart = flags[tid] & flagMask;
if (segmentStart) {
output[tid] = 0;
}
}
}
template<typename T>
cudaError_t SegmentedScanCub(const T* input, const ui32* flags, ui32 flagMask,
T* output,
ui32 size, bool inclusive,
TScanKernelContext<T>& context,
TCudaStream stream) {
if (inclusive) {
using TInput = TSegmentedScanInputIterator<T>;
using TOutput = TSegmentedScanOutputIterator<T, true>;
TInput inputIter(input, flags, flagMask);
TOutput outputIter(output, output + size);
return cub::DeviceScan::InclusiveScan<TInput, TOutput>(context.PartResults, context.NumParts, inputIter, outputIter, TSegmentedSum(), size, stream);
} else {
using TInput = TSegmentedScanInputIterator<T>;
using TOutput = TSegmentedScanOutputIterator<T, false>;
TInput inputIter(input, flags, flagMask);
TOutput outputIter(output, output + size);
cudaError_t errorCode = cub::DeviceScan::InclusiveScan<TInput, TOutput>(context.PartResults, context.NumParts, inputIter, outputIter, TSegmentedSum(), size, stream);
{
ui32 blockSize = 256;
ui32 numBlocks = CeilDivide<ui32>(size, blockSize);
ZeroSegmentStartsImpl<<<numBlocks, blockSize, 0, stream>>>(flags, flagMask, size, output);
}
return errorCode;
}
}
template <class T>
ui64 SegmentedScanVectorTempSize(ui32 size, bool inclusive) {
(void)inclusive;
using TInput = TSegmentedScanInputIterator<T>;
using TOutput = TSegmentedScanOutputIterator<T, true>;
ui64 sizeInBytes = 0;
TInput fakeInput((T*)nullptr, (ui32*)nullptr, 0u);
TOutput fakeOutput((T*)nullptr, (T*)nullptr);
cub::DeviceScan::InclusiveScan<TInput, TOutput, TSegmentedSum>(nullptr, sizeInBytes, fakeInput, fakeOutput, TSegmentedSum(), size);
return sizeInBytes;
}
#define SEGMENTED_SCAN_CUB(Type)\
template cudaError_t SegmentedScanCub<Type>(const Type* input, const ui32* flags, ui32 mask, Type* output, ui32 size, bool inclusive,\
TScanKernelContext<Type>& context, TCudaStream stream);
SEGMENTED_SCAN_CUB(float)
SEGMENTED_SCAN_CUB(double)
SEGMENTED_SCAN_CUB(int)
SEGMENTED_SCAN_CUB(ui32)
template ui64 SegmentedScanVectorTempSize<int>(ui32, bool);
template ui64 SegmentedScanVectorTempSize<ui32>(ui32, bool);
template ui64 SegmentedScanVectorTempSize<float>(ui32, bool);
template ui64 SegmentedScanVectorTempSize<double>(ui32, bool);
}
|
e05a81a29ee88549487518f9c1ae93ef47bb6e37.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#define CHECK(call)\
{\
const hipError_t error = call;\
if (error != hipSuccess)\
{\
fprintf(stderr, "Error: %s:%d, ", __FILE__, __LINE__);\
fprintf(stderr, "code: %d, reason: %s\n", error,\
hipGetErrorString(error));\
exit(EXIT_FAILURE);\
}\
}
struct GpuTimer
{
hipEvent_t start;
hipEvent_t stop;
GpuTimer()
{
hipEventCreate(&start);
hipEventCreate(&stop);
}
~GpuTimer()
{
hipEventDestroy(start);
hipEventDestroy(stop);
}
void Start()
{
hipEventRecord(start, 0);
hipEventSynchronize(start);
}
void Stop()
{
hipEventRecord(stop, 0);
}
float Elapsed()
{
float elapsed;
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsed, start, stop);
return elapsed;
}
};
__global__ void reduceBlksKernel1(int * in, int n, int * out)
{
// TODO
int i = blockIdx.x * blockDim.x * 2 + threadIdx.x * 2;
for (int stride = 1; stride < 2 * blockDim.x; stride *= 2)
{
if ((threadIdx.x % stride) == 0)
if (i + stride < n)
in[i] += in[i + stride];
__syncthreads(); // Synchronize within each block
}
if (threadIdx.x == 0)
out[blockIdx.x] = in[blockIdx.x * blockDim.x * 2];
}
__global__ void reduceBlksKernel2(int * in, int n, int * out)
{
// TODO
int numElemsBeforeBlk = blockIdx.x * blockDim.x * 2;
for (int stride = 1; stride < 2 * blockDim.x; stride *= 2)
{
int i = numElemsBeforeBlk + threadIdx.x * 2 * stride;
if (threadIdx.x < (blockDim.x / stride))
if (i + stride < n)
in[i] += in[i + stride];
__syncthreads(); // Synchronize within each block
}
if (threadIdx.x == 0)
out[blockIdx.x] = in[numElemsBeforeBlk];
}
__global__ void reduceBlksKernel3(int * in, int n, int * out)
{
// TODO
int numElemsBeforeBlk = blockIdx.x * blockDim.x * 2;
for (int stride = blockDim.x; stride > 0; stride /= 2)
{
int i = numElemsBeforeBlk + threadIdx.x;
if (threadIdx.x < stride)
if ((i - numElemsBeforeBlk) == threadIdx.x)
in[i] += in[i + stride];
__syncthreads(); // Synchronize within each block
}
if (threadIdx.x == 0)
out[blockIdx.x] = in[numElemsBeforeBlk];
}
int reduce(int const * in, int n,
bool useDevice=false, dim3 blockSize=dim3(1), int kernelType=1)
{
int result = 0; // Init
if (useDevice == false)
{
result = in[0];
for (int i = 1; i < n; i++)
{
result += in[i];
}
}
else // Use device
{
// Allocate device memories
int * d_in, * d_out;
// dim3 gridSize(1); // TODO: Compute gridSize from n and blockSize
dim3 gridSize(n / (2 * blockSize.x) + 1);
CHECK(hipMalloc(&d_in, n * sizeof(int)));
CHECK(hipMalloc(&d_out, gridSize.x * sizeof(int)));
// Copy data to device memory
CHECK(hipMemcpy(d_in, in, n * sizeof(int), hipMemcpyHostToDevice));
// Call kernel
GpuTimer timer;
timer.Start();
if (kernelType == 1)
hipLaunchKernelGGL(( reduceBlksKernel1), dim3(gridSize), dim3(blockSize), 0, 0, d_in, n, d_out);
else if (kernelType == 2)
hipLaunchKernelGGL(( reduceBlksKernel2), dim3(gridSize), dim3(blockSize), 0, 0, d_in, n, d_out);
else
hipLaunchKernelGGL(( reduceBlksKernel3), dim3(gridSize), dim3(blockSize), 0, 0, d_in, n, d_out);
timer.Stop();
float kernelTime = timer.Elapsed();
hipDeviceSynchronize();
CHECK(hipGetLastError());
// Copy result from device memory
int * out = (int *)malloc(gridSize.x * sizeof(int));
CHECK(hipMemcpy(out, d_out, gridSize.x * sizeof(int), hipMemcpyDeviceToHost));
// Free device memories
CHECK(hipFree(d_in));
CHECK(hipFree(d_out));
// Host do the rest of the work
timer.Start();
result = out[0];
for (int i = 1; i < gridSize.x; i++)
result += out[i];
timer.Stop();
float postKernelTime = timer.Elapsed();
// Free memory
free(out);
// Print info
printf("\nKernel %d\n", kernelType);
printf("Grid size: %d, block size: %d\n", gridSize.x, blockSize.x);
printf("Kernel time = %f ms, post-kernel time = %f ms\n", kernelTime, postKernelTime);
}
return result;
}
void checkCorrectness(int r1, int r2)
{
if (r1 == r2)
printf("CORRECT :)\n");
else
printf("INCORRECT :(\n");
}
void printDeviceInfo()
{
hipDeviceProp_t devProv;
CHECK(hipGetDeviceProperties(&devProv, 0));
printf("**********GPU info**********\n");
printf("Name: %s\n", devProv.name);
printf("Compute capability: %d.%d\n", devProv.major, devProv.minor);
printf("Num SMs: %d\n", devProv.multiProcessorCount);
printf("Max num threads per SM: %d\n", devProv.maxThreadsPerMultiProcessor);
printf("Max num warps per SM: %d\n", devProv.maxThreadsPerMultiProcessor / devProv.warpSize);
printf("GMEM: %lu bytes\n", devProv.totalGlobalMem);
printf("****************************\n\n");
}
int main(int argc, char ** argv)
{
printDeviceInfo();
// Set up input size
int n = (1 << 24) + 1;
printf("Input size: %d\n", n);
// Set up input data
int * in = (int *) malloc(n * sizeof(int));
for (int i = 0; i < n; i++)
{
// Generate a random integer in [0, 255]
in[i] = (int)(rand() & 0xFF);
}
// Reduce NOT using device
int correctResult = reduce(in, n);
// Reduce using device, kernel1
dim3 blockSize(512); // Default
if (argc == 2)
blockSize.x = atoi(argv[1]);
int result1 = reduce(in, n, true, blockSize, 1);
checkCorrectness(result1, correctResult);
// Reduce using device, kernel2
int result2 = reduce(in, n, true, blockSize, 2);
checkCorrectness(result2, correctResult);
// Reduce using device, kernel3
int result3 = reduce(in, n, true, blockSize, 3);
checkCorrectness(result3, correctResult);
// Free memories
free(in);
}
| e05a81a29ee88549487518f9c1ae93ef47bb6e37.cu | #include <stdio.h>
#define CHECK(call)\
{\
const cudaError_t error = call;\
if (error != cudaSuccess)\
{\
fprintf(stderr, "Error: %s:%d, ", __FILE__, __LINE__);\
fprintf(stderr, "code: %d, reason: %s\n", error,\
cudaGetErrorString(error));\
exit(EXIT_FAILURE);\
}\
}
struct GpuTimer
{
cudaEvent_t start;
cudaEvent_t stop;
GpuTimer()
{
cudaEventCreate(&start);
cudaEventCreate(&stop);
}
~GpuTimer()
{
cudaEventDestroy(start);
cudaEventDestroy(stop);
}
void Start()
{
cudaEventRecord(start, 0);
cudaEventSynchronize(start);
}
void Stop()
{
cudaEventRecord(stop, 0);
}
float Elapsed()
{
float elapsed;
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsed, start, stop);
return elapsed;
}
};
__global__ void reduceBlksKernel1(int * in, int n, int * out)
{
// TODO
int i = blockIdx.x * blockDim.x * 2 + threadIdx.x * 2;
for (int stride = 1; stride < 2 * blockDim.x; stride *= 2)
{
if ((threadIdx.x % stride) == 0)
if (i + stride < n)
in[i] += in[i + stride];
__syncthreads(); // Synchronize within each block
}
if (threadIdx.x == 0)
out[blockIdx.x] = in[blockIdx.x * blockDim.x * 2];
}
__global__ void reduceBlksKernel2(int * in, int n, int * out)
{
// TODO
int numElemsBeforeBlk = blockIdx.x * blockDim.x * 2;
for (int stride = 1; stride < 2 * blockDim.x; stride *= 2)
{
int i = numElemsBeforeBlk + threadIdx.x * 2 * stride;
if (threadIdx.x < (blockDim.x / stride))
if (i + stride < n)
in[i] += in[i + stride];
__syncthreads(); // Synchronize within each block
}
if (threadIdx.x == 0)
out[blockIdx.x] = in[numElemsBeforeBlk];
}
__global__ void reduceBlksKernel3(int * in, int n, int * out)
{
// TODO
int numElemsBeforeBlk = blockIdx.x * blockDim.x * 2;
for (int stride = blockDim.x; stride > 0; stride /= 2)
{
int i = numElemsBeforeBlk + threadIdx.x;
if (threadIdx.x < stride)
if ((i - numElemsBeforeBlk) == threadIdx.x)
in[i] += in[i + stride];
__syncthreads(); // Synchronize within each block
}
if (threadIdx.x == 0)
out[blockIdx.x] = in[numElemsBeforeBlk];
}
int reduce(int const * in, int n,
bool useDevice=false, dim3 blockSize=dim3(1), int kernelType=1)
{
int result = 0; // Init
if (useDevice == false)
{
result = in[0];
for (int i = 1; i < n; i++)
{
result += in[i];
}
}
else // Use device
{
// Allocate device memories
int * d_in, * d_out;
// dim3 gridSize(1); // TODO: Compute gridSize from n and blockSize
dim3 gridSize(n / (2 * blockSize.x) + 1);
CHECK(cudaMalloc(&d_in, n * sizeof(int)));
CHECK(cudaMalloc(&d_out, gridSize.x * sizeof(int)));
// Copy data to device memory
CHECK(cudaMemcpy(d_in, in, n * sizeof(int), cudaMemcpyHostToDevice));
// Call kernel
GpuTimer timer;
timer.Start();
if (kernelType == 1)
reduceBlksKernel1<<<gridSize, blockSize>>>(d_in, n, d_out);
else if (kernelType == 2)
reduceBlksKernel2<<<gridSize, blockSize>>>(d_in, n, d_out);
else
reduceBlksKernel3<<<gridSize, blockSize>>>(d_in, n, d_out);
timer.Stop();
float kernelTime = timer.Elapsed();
cudaDeviceSynchronize();
CHECK(cudaGetLastError());
// Copy result from device memory
int * out = (int *)malloc(gridSize.x * sizeof(int));
CHECK(cudaMemcpy(out, d_out, gridSize.x * sizeof(int), cudaMemcpyDeviceToHost));
// Free device memories
CHECK(cudaFree(d_in));
CHECK(cudaFree(d_out));
// Host do the rest of the work
timer.Start();
result = out[0];
for (int i = 1; i < gridSize.x; i++)
result += out[i];
timer.Stop();
float postKernelTime = timer.Elapsed();
// Free memory
free(out);
// Print info
printf("\nKernel %d\n", kernelType);
printf("Grid size: %d, block size: %d\n", gridSize.x, blockSize.x);
printf("Kernel time = %f ms, post-kernel time = %f ms\n", kernelTime, postKernelTime);
}
return result;
}
void checkCorrectness(int r1, int r2)
{
if (r1 == r2)
printf("CORRECT :)\n");
else
printf("INCORRECT :(\n");
}
void printDeviceInfo()
{
cudaDeviceProp devProv;
CHECK(cudaGetDeviceProperties(&devProv, 0));
printf("**********GPU info**********\n");
printf("Name: %s\n", devProv.name);
printf("Compute capability: %d.%d\n", devProv.major, devProv.minor);
printf("Num SMs: %d\n", devProv.multiProcessorCount);
printf("Max num threads per SM: %d\n", devProv.maxThreadsPerMultiProcessor);
printf("Max num warps per SM: %d\n", devProv.maxThreadsPerMultiProcessor / devProv.warpSize);
printf("GMEM: %lu bytes\n", devProv.totalGlobalMem);
printf("****************************\n\n");
}
int main(int argc, char ** argv)
{
printDeviceInfo();
// Set up input size
int n = (1 << 24) + 1;
printf("Input size: %d\n", n);
// Set up input data
int * in = (int *) malloc(n * sizeof(int));
for (int i = 0; i < n; i++)
{
// Generate a random integer in [0, 255]
in[i] = (int)(rand() & 0xFF);
}
// Reduce NOT using device
int correctResult = reduce(in, n);
// Reduce using device, kernel1
dim3 blockSize(512); // Default
if (argc == 2)
blockSize.x = atoi(argv[1]);
int result1 = reduce(in, n, true, blockSize, 1);
checkCorrectness(result1, correctResult);
// Reduce using device, kernel2
int result2 = reduce(in, n, true, blockSize, 2);
checkCorrectness(result2, correctResult);
// Reduce using device, kernel3
int result3 = reduce(in, n, true, blockSize, 3);
checkCorrectness(result3, correctResult);
// Free memories
free(in);
}
|
ea194b76bf2c574596dce9fb943bb7bc0eb27639.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2012-2015, The University of Oxford
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the name of the University of Oxford nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include "interferometer/oskar_evaluate_jones_K_cuda.h"
/* Kernels. ================================================================ */
#define BLK_STATION 2
#define BLK_SOURCE 128
/* Single precision. */
__global__
void oskar_evaluate_jones_K_cudak_f(float2* restrict jones,
const int num_sources, const float* restrict l,
const float* restrict m, const float* restrict n,
const int num_stations, const float* restrict u,
const float* restrict v, const float* restrict w,
const float wavenumber, const float* restrict source_filter,
const float source_filter_min, const float source_filter_max)
{
const int s = blockDim.x * blockIdx.x + threadIdx.x; /* Source index. */
const int a = blockDim.y * blockIdx.y + threadIdx.y; /* Station index. */
/* Cache source and station data from global memory. */
__shared__ float l_[BLK_SOURCE], m_[BLK_SOURCE], n_[BLK_SOURCE];
__shared__ float f_[BLK_SOURCE];
__shared__ float u_[BLK_STATION], v_[BLK_STATION], w_[BLK_STATION];
if (s < num_sources && threadIdx.y == 0)
{
l_[threadIdx.x] = l[s];
m_[threadIdx.x] = m[s];
n_[threadIdx.x] = n[s] - 1.0f;
f_[threadIdx.x] = source_filter[s];
}
if (a < num_stations && threadIdx.x == 0)
{
u_[threadIdx.y] = wavenumber * u[a];
v_[threadIdx.y] = wavenumber * v[a];
w_[threadIdx.y] = wavenumber * w[a];
}
__syncthreads();
/* Compute the geometric phase of the source direction. */
float2 weight = make_float2(0.0f, 0.0f);
if (f_[threadIdx.x] > source_filter_min &&
f_[threadIdx.x] <= source_filter_max)
{
float phase;
phase = u_[threadIdx.y] * l_[threadIdx.x];
phase += v_[threadIdx.y] * m_[threadIdx.x];
phase += w_[threadIdx.y] * n_[threadIdx.x];
sincosf(phase, &weight.y, &weight.x);
}
/* Write result to global memory. */
if (s < num_sources && a < num_stations)
jones[s + num_sources * a] = weight;
}
/* Double precision. */
__global__
void oskar_evaluate_jones_K_cudak_d(double2* restrict jones,
const int num_sources, const double* restrict l,
const double* restrict m, const double* restrict n,
const int num_stations, const double* restrict u,
const double* restrict v, const double* restrict w,
const double wavenumber, const double* restrict source_filter,
const double source_filter_min, const double source_filter_max)
{
const int s = blockDim.x * blockIdx.x + threadIdx.x; /* Source index. */
const int a = blockDim.y * blockIdx.y + threadIdx.y; /* Station index. */
/* Cache source and station data from global memory. */
__shared__ double l_[BLK_SOURCE], m_[BLK_SOURCE], n_[BLK_SOURCE];
__shared__ double f_[BLK_SOURCE];
__shared__ double u_[BLK_STATION], v_[BLK_STATION], w_[BLK_STATION];
if (s < num_sources && threadIdx.y == 0)
{
l_[threadIdx.x] = l[s];
m_[threadIdx.x] = m[s];
n_[threadIdx.x] = n[s] - 1.0;
f_[threadIdx.x] = source_filter[s];
}
if (a < num_stations && threadIdx.x == 0)
{
u_[threadIdx.y] = wavenumber * u[a];
v_[threadIdx.y] = wavenumber * v[a];
w_[threadIdx.y] = wavenumber * w[a];
}
__syncthreads();
/* Compute the geometric phase of the source direction. */
double2 weight = make_double2(0.0, 0.0);
if (f_[threadIdx.x] > source_filter_min &&
f_[threadIdx.x] <= source_filter_max)
{
double phase;
phase = u_[threadIdx.y] * l_[threadIdx.x];
phase += v_[threadIdx.y] * m_[threadIdx.x];
phase += w_[threadIdx.y] * n_[threadIdx.x];
sincos(phase, &weight.y, &weight.x);
}
/* Write result to global memory. */
if (s < num_sources && a < num_stations)
jones[s + num_sources * a] = weight;
}
#ifdef __cplusplus
extern "C" {
#endif
/* Kernel wrappers. ======================================================== */
/* Single precision. */
void oskar_evaluate_jones_K_cuda_f(float2* d_jones, int num_sources,
const float* d_l, const float* d_m, const float* d_n,
int num_stations, const float* d_u, const float* d_v,
const float* d_w, float wavenumber, const float* d_source_filter,
float source_filter_min, float source_filter_max)
{
/* Define block and grid sizes. */
const dim3 num_threads(BLK_SOURCE, BLK_STATION);
const dim3 num_blocks((num_sources + num_threads.x - 1) / num_threads.x,
(num_stations + num_threads.y - 1) / num_threads.y);
/* Compute DFT phase weights for K. */
oskar_evaluate_jones_K_cudak_f
OSKAR_CUDAK_CONF(num_blocks, num_threads)
(d_jones, num_sources, d_l, d_m, d_n, num_stations, d_u, d_v, d_w,
wavenumber, d_source_filter, source_filter_min, source_filter_max);
}
/* Double precision. */
void oskar_evaluate_jones_K_cuda_d(double2* d_jones, int num_sources,
const double* d_l, const double* d_m, const double* d_n,
int num_stations, const double* d_u, const double* d_v,
const double* d_w, double wavenumber, const double* d_source_filter,
double source_filter_min, double source_filter_max)
{
/* Define block and grid sizes. */
const dim3 num_threads(BLK_SOURCE, BLK_STATION);
const dim3 num_blocks((num_sources + num_threads.x - 1) / num_threads.x,
(num_stations + num_threads.y - 1) / num_threads.y);
/* Compute DFT phase weights for K. */
oskar_evaluate_jones_K_cudak_d
OSKAR_CUDAK_CONF(num_blocks, num_threads)
(d_jones, num_sources, d_l, d_m, d_n, num_stations, d_u, d_v, d_w,
wavenumber, d_source_filter, source_filter_min, source_filter_max);
}
#ifdef __cplusplus
}
#endif
| ea194b76bf2c574596dce9fb943bb7bc0eb27639.cu | /*
* Copyright (c) 2012-2015, The University of Oxford
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the name of the University of Oxford nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include "interferometer/oskar_evaluate_jones_K_cuda.h"
/* Kernels. ================================================================ */
#define BLK_STATION 2
#define BLK_SOURCE 128
/* Single precision. */
__global__
void oskar_evaluate_jones_K_cudak_f(float2* restrict jones,
const int num_sources, const float* restrict l,
const float* restrict m, const float* restrict n,
const int num_stations, const float* restrict u,
const float* restrict v, const float* restrict w,
const float wavenumber, const float* restrict source_filter,
const float source_filter_min, const float source_filter_max)
{
const int s = blockDim.x * blockIdx.x + threadIdx.x; /* Source index. */
const int a = blockDim.y * blockIdx.y + threadIdx.y; /* Station index. */
/* Cache source and station data from global memory. */
__shared__ float l_[BLK_SOURCE], m_[BLK_SOURCE], n_[BLK_SOURCE];
__shared__ float f_[BLK_SOURCE];
__shared__ float u_[BLK_STATION], v_[BLK_STATION], w_[BLK_STATION];
if (s < num_sources && threadIdx.y == 0)
{
l_[threadIdx.x] = l[s];
m_[threadIdx.x] = m[s];
n_[threadIdx.x] = n[s] - 1.0f;
f_[threadIdx.x] = source_filter[s];
}
if (a < num_stations && threadIdx.x == 0)
{
u_[threadIdx.y] = wavenumber * u[a];
v_[threadIdx.y] = wavenumber * v[a];
w_[threadIdx.y] = wavenumber * w[a];
}
__syncthreads();
/* Compute the geometric phase of the source direction. */
float2 weight = make_float2(0.0f, 0.0f);
if (f_[threadIdx.x] > source_filter_min &&
f_[threadIdx.x] <= source_filter_max)
{
float phase;
phase = u_[threadIdx.y] * l_[threadIdx.x];
phase += v_[threadIdx.y] * m_[threadIdx.x];
phase += w_[threadIdx.y] * n_[threadIdx.x];
sincosf(phase, &weight.y, &weight.x);
}
/* Write result to global memory. */
if (s < num_sources && a < num_stations)
jones[s + num_sources * a] = weight;
}
/* Double precision. */
__global__
void oskar_evaluate_jones_K_cudak_d(double2* restrict jones,
const int num_sources, const double* restrict l,
const double* restrict m, const double* restrict n,
const int num_stations, const double* restrict u,
const double* restrict v, const double* restrict w,
const double wavenumber, const double* restrict source_filter,
const double source_filter_min, const double source_filter_max)
{
const int s = blockDim.x * blockIdx.x + threadIdx.x; /* Source index. */
const int a = blockDim.y * blockIdx.y + threadIdx.y; /* Station index. */
/* Cache source and station data from global memory. */
__shared__ double l_[BLK_SOURCE], m_[BLK_SOURCE], n_[BLK_SOURCE];
__shared__ double f_[BLK_SOURCE];
__shared__ double u_[BLK_STATION], v_[BLK_STATION], w_[BLK_STATION];
if (s < num_sources && threadIdx.y == 0)
{
l_[threadIdx.x] = l[s];
m_[threadIdx.x] = m[s];
n_[threadIdx.x] = n[s] - 1.0;
f_[threadIdx.x] = source_filter[s];
}
if (a < num_stations && threadIdx.x == 0)
{
u_[threadIdx.y] = wavenumber * u[a];
v_[threadIdx.y] = wavenumber * v[a];
w_[threadIdx.y] = wavenumber * w[a];
}
__syncthreads();
/* Compute the geometric phase of the source direction. */
double2 weight = make_double2(0.0, 0.0);
if (f_[threadIdx.x] > source_filter_min &&
f_[threadIdx.x] <= source_filter_max)
{
double phase;
phase = u_[threadIdx.y] * l_[threadIdx.x];
phase += v_[threadIdx.y] * m_[threadIdx.x];
phase += w_[threadIdx.y] * n_[threadIdx.x];
sincos(phase, &weight.y, &weight.x);
}
/* Write result to global memory. */
if (s < num_sources && a < num_stations)
jones[s + num_sources * a] = weight;
}
#ifdef __cplusplus
extern "C" {
#endif
/* Kernel wrappers. ======================================================== */
/* Single precision. */
void oskar_evaluate_jones_K_cuda_f(float2* d_jones, int num_sources,
const float* d_l, const float* d_m, const float* d_n,
int num_stations, const float* d_u, const float* d_v,
const float* d_w, float wavenumber, const float* d_source_filter,
float source_filter_min, float source_filter_max)
{
/* Define block and grid sizes. */
const dim3 num_threads(BLK_SOURCE, BLK_STATION);
const dim3 num_blocks((num_sources + num_threads.x - 1) / num_threads.x,
(num_stations + num_threads.y - 1) / num_threads.y);
/* Compute DFT phase weights for K. */
oskar_evaluate_jones_K_cudak_f
OSKAR_CUDAK_CONF(num_blocks, num_threads)
(d_jones, num_sources, d_l, d_m, d_n, num_stations, d_u, d_v, d_w,
wavenumber, d_source_filter, source_filter_min, source_filter_max);
}
/* Double precision. */
void oskar_evaluate_jones_K_cuda_d(double2* d_jones, int num_sources,
const double* d_l, const double* d_m, const double* d_n,
int num_stations, const double* d_u, const double* d_v,
const double* d_w, double wavenumber, const double* d_source_filter,
double source_filter_min, double source_filter_max)
{
/* Define block and grid sizes. */
const dim3 num_threads(BLK_SOURCE, BLK_STATION);
const dim3 num_blocks((num_sources + num_threads.x - 1) / num_threads.x,
(num_stations + num_threads.y - 1) / num_threads.y);
/* Compute DFT phase weights for K. */
oskar_evaluate_jones_K_cudak_d
OSKAR_CUDAK_CONF(num_blocks, num_threads)
(d_jones, num_sources, d_l, d_m, d_n, num_stations, d_u, d_v, d_w,
wavenumber, d_source_filter, source_filter_min, source_filter_max);
}
#ifdef __cplusplus
}
#endif
|
9742e2298d4fc6fc5debe9c8d9b4e19eadefb69d.hip | // !!! This is a file automatically generated by hipify!!!
#include "heap.h"
#include "astar_gpu.h"
#include "cuda_utils.h"
#include <stdlib.h>
#include <stdio.h>
__device__ static void swap(state **s1, state **s2);
heap **heaps_create(int k) {
heap **Q_cpu = (heap**)malloc(k * sizeof(heap*));
heap **Q_dev = NULL;
for (int i = 0; i < k; i++) {
Q_cpu[i] = heap_create(16 * 1024);
}
HANDLE_RESULT(hipMalloc(&Q_dev, k * sizeof(heap*)));
HANDLE_RESULT(hipMemcpy(Q_dev, Q_cpu, k * sizeof(heap*), hipMemcpyDefault));
free(Q_cpu);
return Q_dev;
}
heap *heap_create(int capacity) {
heap heap_cpu;
heap *heap_dev;
heap_cpu.size = 0;
HANDLE_RESULT(hipMalloc(&(heap_cpu.states), (capacity + 1) * sizeof(state*)));
HANDLE_RESULT(hipMemset(heap_cpu.states, 0, (capacity + 1) * sizeof(state*)));
HANDLE_RESULT(hipMalloc(&heap_dev, sizeof(heap)));
HANDLE_RESULT(hipMemcpy(heap_dev, &heap_cpu, sizeof(heap), hipMemcpyDefault));
return heap_dev;
}
void heaps_destroy(heap **Q_dev, int k) {
heap **Q_cpu = (heap**)malloc(k * sizeof(heap*));
HANDLE_RESULT(hipMemcpy(Q_cpu, Q_dev, k * sizeof(heap*), hipMemcpyDefault));
for (int i = 0; i < k; i++) {
heap_destroy(Q_cpu[i]);
}
free(Q_cpu);
HANDLE_RESULT(hipFree(Q_dev));
}
void heap_destroy(heap *heap_dev) {
heap heap_cpu;
HANDLE_RESULT(hipMemcpy(&heap_cpu, heap_dev, sizeof(heap), hipMemcpyDefault));
HANDLE_RESULT(hipFree(heap_cpu.states));
HANDLE_RESULT(hipFree(heap_dev));
}
__device__ void heap_insert(heap *heap, state *state) {
heap->size++;
heap->states[heap->size] = state;
int current = heap->size;
while (current > 1 && heap->states[current]->f < heap->states[current / 2]->f) {
swap(&(heap->states[current]), &(heap->states[current / 2]));
current /= 2;
}
}
__device__ state *heap_extract(heap *heap) {
state *res = heap->states[1];
heap->states[1] = heap->states[heap->size];
heap->states[heap->size] = NULL;
heap->size--;
int current = 1;
while (current < heap->size) {
int smallest = current;
int child = 2 * current;
if (child <= heap->size && heap->states[child]->f < heap->states[smallest]->f) {
smallest = child;
}
child = 2 * current + 1;
if (child <= heap->size && heap->states[child]->f < heap->states[smallest]->f) {
smallest = child;
}
if (smallest == current) {
break;
}
swap(&(heap->states[current]), &(heap->states[smallest]));
current = smallest;
}
return res;
}
__device__ bool heaps_empty(heap **heaps, int k) {
for (int i = 0; i < k; i++) {
if (heaps[i]->size != 0) return false;
}
return true;
}
__device__ int heaps_min(heap **heaps, int k) {
int best_f = INT_MAX;
for (int i = 0; i < k; i++) {
state *current_best = heaps[i]->states[1];
if (current_best != NULL && current_best->f < best_f) {
best_f = current_best->f;
}
}
return best_f;
}
__device__ static void swap(state **s1, state **s2) {
state *tmp = *s1;
*s1 = *s2;
*s2 = tmp;
}
| 9742e2298d4fc6fc5debe9c8d9b4e19eadefb69d.cu | #include "heap.h"
#include "astar_gpu.h"
#include "cuda_utils.h"
#include <stdlib.h>
#include <stdio.h>
__device__ static void swap(state **s1, state **s2);
heap **heaps_create(int k) {
heap **Q_cpu = (heap**)malloc(k * sizeof(heap*));
heap **Q_dev = NULL;
for (int i = 0; i < k; i++) {
Q_cpu[i] = heap_create(16 * 1024);
}
HANDLE_RESULT(cudaMalloc(&Q_dev, k * sizeof(heap*)));
HANDLE_RESULT(cudaMemcpy(Q_dev, Q_cpu, k * sizeof(heap*), cudaMemcpyDefault));
free(Q_cpu);
return Q_dev;
}
heap *heap_create(int capacity) {
heap heap_cpu;
heap *heap_dev;
heap_cpu.size = 0;
HANDLE_RESULT(cudaMalloc(&(heap_cpu.states), (capacity + 1) * sizeof(state*)));
HANDLE_RESULT(cudaMemset(heap_cpu.states, 0, (capacity + 1) * sizeof(state*)));
HANDLE_RESULT(cudaMalloc(&heap_dev, sizeof(heap)));
HANDLE_RESULT(cudaMemcpy(heap_dev, &heap_cpu, sizeof(heap), cudaMemcpyDefault));
return heap_dev;
}
void heaps_destroy(heap **Q_dev, int k) {
heap **Q_cpu = (heap**)malloc(k * sizeof(heap*));
HANDLE_RESULT(cudaMemcpy(Q_cpu, Q_dev, k * sizeof(heap*), cudaMemcpyDefault));
for (int i = 0; i < k; i++) {
heap_destroy(Q_cpu[i]);
}
free(Q_cpu);
HANDLE_RESULT(cudaFree(Q_dev));
}
void heap_destroy(heap *heap_dev) {
heap heap_cpu;
HANDLE_RESULT(cudaMemcpy(&heap_cpu, heap_dev, sizeof(heap), cudaMemcpyDefault));
HANDLE_RESULT(cudaFree(heap_cpu.states));
HANDLE_RESULT(cudaFree(heap_dev));
}
__device__ void heap_insert(heap *heap, state *state) {
heap->size++;
heap->states[heap->size] = state;
int current = heap->size;
while (current > 1 && heap->states[current]->f < heap->states[current / 2]->f) {
swap(&(heap->states[current]), &(heap->states[current / 2]));
current /= 2;
}
}
__device__ state *heap_extract(heap *heap) {
state *res = heap->states[1];
heap->states[1] = heap->states[heap->size];
heap->states[heap->size] = NULL;
heap->size--;
int current = 1;
while (current < heap->size) {
int smallest = current;
int child = 2 * current;
if (child <= heap->size && heap->states[child]->f < heap->states[smallest]->f) {
smallest = child;
}
child = 2 * current + 1;
if (child <= heap->size && heap->states[child]->f < heap->states[smallest]->f) {
smallest = child;
}
if (smallest == current) {
break;
}
swap(&(heap->states[current]), &(heap->states[smallest]));
current = smallest;
}
return res;
}
__device__ bool heaps_empty(heap **heaps, int k) {
for (int i = 0; i < k; i++) {
if (heaps[i]->size != 0) return false;
}
return true;
}
__device__ int heaps_min(heap **heaps, int k) {
int best_f = INT_MAX;
for (int i = 0; i < k; i++) {
state *current_best = heaps[i]->states[1];
if (current_best != NULL && current_best->f < best_f) {
best_f = current_best->f;
}
}
return best_f;
}
__device__ static void swap(state **s1, state **s2) {
state *tmp = *s1;
*s1 = *s2;
*s2 = tmp;
}
|
56dd93a147c158001920d0b64a861be411f931e2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <math.h>
#include <float.h>
__global__ void sdt_compute(unsigned char *img, int *sz, float *sdt, int sz_edge, int width, float *d_min, int start, int val)
{
int tx = threadIdx.x + blockDim.x*blockIdx.x;
extern __shared__ int ep[];
for(int i=start, j=0;i< val; i++){
ep[j++] = sz[i];
}
__syncthreads();
float min_dist, dist2;
min_dist = d_min[tx];
float _x, _y;
float sign;
float dx, dy;
int x = tx % width;
int y = tx / width;
for(int k=0; k<val-start; k++)
{
_x = ep[k] % width;
_y = ep[k] / width;
dx = _x - x;
dy = _y - y;
dist2 = dx*dx + dy*dy;
if(dist2 < min_dist) min_dist = dist2;
}
d_min[tx] = min_dist;
}
__global__ void final_comp(unsigned char *img, float *d_min, float *d_min2, float *sdt)
{
float sign;
int tx = threadIdx.x + blockDim.x*blockIdx.x;
sign = (img[tx] >= 127)? 1.0f : -1.0f;
float dm = d_min[tx];
if(dm > d_min2[tx])
dm = d_min2[tx];
sdt[tx] = sign * sqrtf(dm);
}
extern "C" void run_sampleKernel(unsigned char * bitmap, float *sdt, int width, int height)
{
//Collect all edge pixels in an array
int sz = width*height;
int sz_edge = 0;
for(int i = 0; i<sz; i++) if(bitmap[i] == 255) sz_edge++;
int *edge_pixels = new int[sz_edge];
for(int i = 0, j = 0; i<sz; i++) if(bitmap[i] == 255) edge_pixels[j++] = i;
std::cout<< "\t"<<sz_edge << " edge pixels in the image of size " << width << " x " << height << "\n"<<std::flush;
int *d_sz;
float *temp_min;
unsigned char *d_img;
float *d_sdt, *d_min, *d_min2;
hipHostMalloc(&temp_min,height*width*sizeof(float));
// temp_min = new float[height*width];
for(int i=0;i<height*width;i++){
temp_min[i] = FLT_MAX;
}
hipStream_t stream1, stream2;
hipStreamCreate(&stream1);
hipStreamCreate(&stream2);
// hipStreamCreateWithFlags(&stream1,hipStreamNonBlocking);
hipMalloc((void**)&d_sz, sz_edge*sizeof(int));
hipMalloc((void**)&d_img, height*width*sizeof(unsigned char));
hipMalloc((void**)&d_sdt, height*width*sizeof(float));
hipMalloc((void**)&d_min, height*width*sizeof(float));
hipMalloc((void**)&d_min2, height*width*sizeof(float));
hipMemcpyAsync(d_img, bitmap, width*height*sizeof(unsigned char), hipMemcpyHostToDevice);
hipMemcpyAsync(d_min, temp_min, width*height*sizeof(float), hipMemcpyHostToDevice);
hipMemcpyAsync(d_min2, temp_min, width*height*sizeof(float), hipMemcpyHostToDevice);
hipMemcpyAsync(d_sz, edge_pixels, sz_edge*sizeof(int), hipMemcpyHostToDevice);
int divisions = 20;
int val_div = sz_edge/divisions;
int n, m;
m =1;
for(n =0; n<divisions; n+=2){
hipLaunchKernelGGL(( sdt_compute), dim3((height*width)/256), dim3(256), val_div*sizeof(int), stream2, d_img, d_sz, d_sdt, sz_edge, width, d_min, n*val_div, (n+1)*val_div);
hipLaunchKernelGGL(( sdt_compute), dim3((height*width)/256), dim3(256), val_div*sizeof(int), stream1, d_img, d_sz, d_sdt, sz_edge, width, d_min2, m*val_div, (m+1)*val_div);
m += 2;
}
// std::cout<<n<<std::endl;
if((sz_edge%divisions) !=0){
hipLaunchKernelGGL(( sdt_compute), dim3((height*width)/256), dim3(256), (sz_edge%divisions)*sizeof(int), 0, d_img, d_sz, d_sdt, sz_edge, width, d_min, (m-1)*val_div, (m-1)*val_div + sz_edge%divisions);
}
hipLaunchKernelGGL(( final_comp), dim3((height*width)/256), dim3(256), 0, 0, d_img, d_min, d_min2, d_sdt);
hipDeviceSynchronize();
hipMemcpy(sdt, d_sdt, height*width*sizeof(float), hipMemcpyDeviceToHost);
}
| 56dd93a147c158001920d0b64a861be411f931e2.cu | #include <iostream>
#include <math.h>
#include <float.h>
__global__ void sdt_compute(unsigned char *img, int *sz, float *sdt, int sz_edge, int width, float *d_min, int start, int val)
{
int tx = threadIdx.x + blockDim.x*blockIdx.x;
extern __shared__ int ep[];
for(int i=start, j=0;i< val; i++){
ep[j++] = sz[i];
}
__syncthreads();
float min_dist, dist2;
min_dist = d_min[tx];
float _x, _y;
float sign;
float dx, dy;
int x = tx % width;
int y = tx / width;
for(int k=0; k<val-start; k++)
{
_x = ep[k] % width;
_y = ep[k] / width;
dx = _x - x;
dy = _y - y;
dist2 = dx*dx + dy*dy;
if(dist2 < min_dist) min_dist = dist2;
}
d_min[tx] = min_dist;
}
__global__ void final_comp(unsigned char *img, float *d_min, float *d_min2, float *sdt)
{
float sign;
int tx = threadIdx.x + blockDim.x*blockIdx.x;
sign = (img[tx] >= 127)? 1.0f : -1.0f;
float dm = d_min[tx];
if(dm > d_min2[tx])
dm = d_min2[tx];
sdt[tx] = sign * sqrtf(dm);
}
extern "C" void run_sampleKernel(unsigned char * bitmap, float *sdt, int width, int height)
{
//Collect all edge pixels in an array
int sz = width*height;
int sz_edge = 0;
for(int i = 0; i<sz; i++) if(bitmap[i] == 255) sz_edge++;
int *edge_pixels = new int[sz_edge];
for(int i = 0, j = 0; i<sz; i++) if(bitmap[i] == 255) edge_pixels[j++] = i;
std::cout<< "\t"<<sz_edge << " edge pixels in the image of size " << width << " x " << height << "\n"<<std::flush;
int *d_sz;
float *temp_min;
unsigned char *d_img;
float *d_sdt, *d_min, *d_min2;
cudaMallocHost(&temp_min,height*width*sizeof(float));
// temp_min = new float[height*width];
for(int i=0;i<height*width;i++){
temp_min[i] = FLT_MAX;
}
cudaStream_t stream1, stream2;
cudaStreamCreate(&stream1);
cudaStreamCreate(&stream2);
// cudaStreamCreateWithFlags(&stream1,cudaStreamNonBlocking);
cudaMalloc((void**)&d_sz, sz_edge*sizeof(int));
cudaMalloc((void**)&d_img, height*width*sizeof(unsigned char));
cudaMalloc((void**)&d_sdt, height*width*sizeof(float));
cudaMalloc((void**)&d_min, height*width*sizeof(float));
cudaMalloc((void**)&d_min2, height*width*sizeof(float));
cudaMemcpyAsync(d_img, bitmap, width*height*sizeof(unsigned char), cudaMemcpyHostToDevice);
cudaMemcpyAsync(d_min, temp_min, width*height*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpyAsync(d_min2, temp_min, width*height*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpyAsync(d_sz, edge_pixels, sz_edge*sizeof(int), cudaMemcpyHostToDevice);
int divisions = 20;
int val_div = sz_edge/divisions;
int n, m;
m =1;
for(n =0; n<divisions; n+=2){
sdt_compute<<<(height*width)/256, 256, val_div*sizeof(int), stream2>>>(d_img, d_sz, d_sdt, sz_edge, width, d_min, n*val_div, (n+1)*val_div);
sdt_compute<<<(height*width)/256, 256, val_div*sizeof(int), stream1>>>(d_img, d_sz, d_sdt, sz_edge, width, d_min2, m*val_div, (m+1)*val_div);
m += 2;
}
// std::cout<<n<<std::endl;
if((sz_edge%divisions) !=0){
sdt_compute<<<(height*width)/256, 256, (sz_edge%divisions)*sizeof(int)>>>(d_img, d_sz, d_sdt, sz_edge, width, d_min, (m-1)*val_div, (m-1)*val_div + sz_edge%divisions);
}
final_comp<<<(height*width)/256, 256>>>(d_img, d_min, d_min2, d_sdt);
cudaDeviceSynchronize();
cudaMemcpy(sdt, d_sdt, height*width*sizeof(float), cudaMemcpyDeviceToHost);
}
|
2864a3d908361a3b23b8f47e8bb23dc1a67943d3.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/scan.h>
#include "common.h"
#include "thrust.h"
namespace StreamCompaction {
namespace Thrust {
using StreamCompaction::Common::PerformanceTimer;
PerformanceTimer& timer()
{
static PerformanceTimer timer;
return timer;
}
/**
* Performs prefix-sum (aka scan) on idata, storing the result into odata.
*/
void scan(int n, int *odata, const int *idata) {
// use `thrust::exclusive_scan`
// example: for device_vectors dv_in and dv_out:
// thrust::exclusive_scan(dv_in.begin(), dv_in.end(), dv_out.begin());
thrust::host_vector<int> host_input(idata, idata + n);
thrust::device_vector<int> dev_input = host_input;
//thrust::host_vector<int> host_output(odata, odata + n);
thrust::device_vector<int> dev_output(odata, odata + n);
// what happened during thrust? GPU timer malfunctioning
timer().startGpuTimer();
// call
thrust::exclusive_scan(dev_input.begin(), dev_input.end(), dev_output.begin());
timer().endGpuTimer();
thrust::copy(dev_output.begin(), dev_output.end(), odata);
}
}
}
| 2864a3d908361a3b23b8f47e8bb23dc1a67943d3.cu | #include <cuda.h>
#include <cuda_runtime.h>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/scan.h>
#include "common.h"
#include "thrust.h"
namespace StreamCompaction {
namespace Thrust {
using StreamCompaction::Common::PerformanceTimer;
PerformanceTimer& timer()
{
static PerformanceTimer timer;
return timer;
}
/**
* Performs prefix-sum (aka scan) on idata, storing the result into odata.
*/
void scan(int n, int *odata, const int *idata) {
// use `thrust::exclusive_scan`
// example: for device_vectors dv_in and dv_out:
// thrust::exclusive_scan(dv_in.begin(), dv_in.end(), dv_out.begin());
thrust::host_vector<int> host_input(idata, idata + n);
thrust::device_vector<int> dev_input = host_input;
//thrust::host_vector<int> host_output(odata, odata + n);
thrust::device_vector<int> dev_output(odata, odata + n);
// what happened during thrust? GPU timer malfunctioning
timer().startGpuTimer();
// call
thrust::exclusive_scan(dev_input.begin(), dev_input.end(), dev_output.begin());
timer().endGpuTimer();
thrust::copy(dev_output.begin(), dev_output.end(), odata);
}
}
}
|
4aa37aecc3cb8fd3f6244d64c076c277362616b5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/****************************************************************************\
* --- Practical Course: GPU Programming in Computer Vision ---
*
* time: summer term 2011 / 19-26th September
*
* project: gradient
* file: gradient.cu
*
*
\********* PLEASE ENTER YOUR CORRECT STUDENT NAME AND ID BELOW **************/
const char* studentName = "Sadiq Huq";
const int studentID = 3273623;
/****************************************************************************\
*
* In this file the following methods have to be edited or completed:
*
* derivativeY_sm_d(const float *inputImage, ... )
* derivativeY_sm_d(const float3 *inputImage, ... )
* gradient_magnitude_d(const float *inputImage, ... )
* gradient_magnitude_d(const float3 *inputImage, ... )
*
\****************************************************************************/
#include <cutil.h>
#include <cutil_inline.h>
#include "gradient.cuh"
#define BW 16
#define BH 16
const char* getStudentName() { return studentName; };
int getStudentID() { return studentID; };
bool checkStudentNameAndID() { return strcmp(studentName, "John Doe") != 0 && studentID != 1234567; };
__global__ void derivativeX_sm_d(const float *inputImage, float *outputImage,
int iWidth, int iHeight, size_t iPitchBytes)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
__shared__ float u[BW+2][BH];
if (x < iWidth && y < iHeight) {
u[threadIdx.x+1][threadIdx.y] = *((float*)((char*)inputImage + y*iPitchBytes)+x);
if (x == 0) u[threadIdx.x][threadIdx.y] = u[threadIdx.x+1][threadIdx.y];
else if (x == (iWidth-1)) u[threadIdx.x+2][threadIdx.y] = u[threadIdx.x+1][threadIdx.y];
else {
if (threadIdx.x == 0) u[threadIdx.x][threadIdx.y] = *((float*)((char*)inputImage + y*iPitchBytes)+x-1);
else if (threadIdx.x == blockDim.x-1) u[threadIdx.x+2][threadIdx.y] = *((float*)((char*)inputImage + y*iPitchBytes)+x+1);
}
}
__syncthreads();
if (x < iWidth && y < iHeight)
*((float*)(((char*)outputImage) + y*iPitchBytes)+ x) = 0.5f*(u[threadIdx.x+2][threadIdx.y]-u[threadIdx.x][threadIdx.y])+128;
}
__global__ void derivativeX_sm_d(const float3 *inputImage, float3 *outputImage,
int iWidth, int iHeight, size_t iPitchBytes)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
float3 imgValue ;
__shared__ float3 u[BW+2][BH];
if (x < iWidth && y < iHeight) {
u[threadIdx.x+1][threadIdx.y] = *((float3*)((char*)inputImage + y*iPitchBytes)+x);
if (x == 0) u[threadIdx.x][threadIdx.y] = u[threadIdx.x+1][threadIdx.y];
else if (x == (iWidth-1)) u[threadIdx.x+2][threadIdx.y] = u[threadIdx.x+1][threadIdx.y];
else {
if (threadIdx.x == 0) u[threadIdx.x][threadIdx.y] = *((float3*)((char*)inputImage + y*iPitchBytes)+x-1);
else if (threadIdx.x == blockDim.x-1) u[threadIdx.x+2][threadIdx.y] = *((float3*)((char*)inputImage + y*iPitchBytes)+x+1);
}
}
__syncthreads();
// +128 to stay within range 255
if (x < iWidth && y < iHeight) {
imgValue.x = 0.5f*(u[threadIdx.x+2][threadIdx.y].x - u[threadIdx.x][threadIdx.y].x)+128;
imgValue.y = 0.5f*(u[threadIdx.x+2][threadIdx.y].y - u[threadIdx.x][threadIdx.y].y)+128;
imgValue.z = 0.5f*(u[threadIdx.x+2][threadIdx.y].z - u[threadIdx.x][threadIdx.y].z)+128;
*((float3*)(((char*)outputImage) + y*iPitchBytes)+ x) = imgValue;
}
}
__global__ void derivativeY_sm_d(const float *inputImage, float *outputImage,
int iWidth, int iHeight, size_t iPitchBytes)
{
// ### implement me ###
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
__shared__ float u[BW][BH+2];
if (x < iWidth && y < iHeight) {
u[threadIdx.x][threadIdx.y+1] = *((float*)((char*)inputImage + y*iPitchBytes)+x);
if (y == 0) u[threadIdx.x][threadIdx.y] = u[threadIdx.x][threadIdx.y+1];
else if (y == (iHeight-1)) u[threadIdx.x][threadIdx.y+2] = u[threadIdx.x][threadIdx.y+1];
else {
if (threadIdx.y == 0) u[threadIdx.x][threadIdx.y] = *((float*)((char*)inputImage + (y-1)*iPitchBytes)+x);
else if (threadIdx.y == blockDim.y-1) u[threadIdx.x][threadIdx.y+2] = *((float*)((char*)inputImage + (y+1)*iPitchBytes)+x);
}
}
__syncthreads();
if (x < iWidth && y < iHeight)
*((float*)(((char*)outputImage) + y*iPitchBytes)+ x) = 0.5f*(u[threadIdx.x][threadIdx.y+2]-u[threadIdx.x][threadIdx.y])+128;
}
__global__ void derivativeY_sm_d(const float3 *inputImage, float3 *outputImage,
int iWidth, int iHeight, size_t iPitchBytes)
{
// ### implement me ###
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
float3 imgValue ;
__shared__ float3 u[BW][BH+2];
if (x < iWidth && y < iHeight) {
u[threadIdx.x][threadIdx.y+1] = *((float3*)((char*)inputImage + y*iPitchBytes)+x);
if (y == 0) u[threadIdx.x][threadIdx.y] = u[threadIdx.x][threadIdx.y+1];
else if (y == (iWidth-1)) u[threadIdx.x][threadIdx.y+2] = u[threadIdx.x][threadIdx.y+1];
else {
if (threadIdx.y == 0) u[threadIdx.x][threadIdx.y] = *((float3*)((char*)inputImage + (y-1)*iPitchBytes)+x);
else if (threadIdx.y == blockDim.y-1) u[threadIdx.x][threadIdx.y+2] = *((float3*)((char*)inputImage + (y+1)*iPitchBytes)+x);
}
}
__syncthreads();
// +128 to stay within range 255
if (x < iWidth && y < iHeight) {
imgValue.x = 0.5f*(u[threadIdx.x][threadIdx.y+2].x - u[threadIdx.x][threadIdx.y].x)+128;
imgValue.y = 0.5f*(u[threadIdx.x][threadIdx.y+2].y - u[threadIdx.x][threadIdx.y].y)+128;
imgValue.z = 0.5f*(u[threadIdx.x][threadIdx.y+2].z - u[threadIdx.x][threadIdx.y].z)+128;
// float3 value = make_float3(0.0f, 0.0f, 0.0f);
// *((float3*)(((char*)outputImage) + y*iPitchBytes)+ x) = value;
*((float3*)(((char*)outputImage) + y*iPitchBytes)+ x) = imgValue;
}
}
__global__ void gradient_magnitude_d(const float *inputImage, float *outputImage,
int iWidth, int iHeight, size_t iPitchBytes)
{
// ### implement me ###
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
__shared__ float u[BW + 2][BH+2];
if (x < iWidth && y < iHeight)
{
u[threadIdx.x + 1][threadIdx.y] = *((float*)((char*)inputImage + y*iPitchBytes) + x);
u[threadIdx.x][threadIdx.y+1] = *((float*)((char*)inputImage + y*iPitchBytes) + x);
// BC for X
if (x == 0) // clamp left border
u[threadIdx.x][threadIdx.y+1] = u[threadIdx.x+1][threadIdx.y];
else if (x == (iWidth-1)) // clamp right
u[threadIdx.x+2][threadIdx.y] = u[threadIdx.x+1][threadIdx.y];
else // interier pixels
{
if (threadIdx.x == 0)
u[threadIdx.x][threadIdx.y] = *((float*)((char*)inputImage + y*iPitchBytes)+x-1);
else if (threadIdx.x == blockDim.x-1)
u[threadIdx.x+2][threadIdx.y] = *((float*)((char*)inputImage + y*iPitchBytes)+x+1);
}
// BC for Y
if (y == 0) // clamp left border
u[threadIdx.x + 1][threadIdx.y] = u[threadIdx.x][threadIdx.y+1];
else if (y == (iHeight-1)) // clamp right
u[threadIdx.x + 1][threadIdx.y+2] = u[threadIdx.x + 1][threadIdx.y+1];
else // interior
{
if (threadIdx.y == 0)
u[threadIdx.x ][threadIdx.y] = *((float*)((char*)inputImage + (y-1)*iPitchBytes)+x);
else if (threadIdx.y == blockDim.y-1)
u[threadIdx.x ][threadIdx.y+2] = *((float*)((char*)inputImage + (y+1)*iPitchBytes)+x);
}
}
__syncthreads();
if (x < iWidth && y < iHeight)
{
float dx = 0.5f*(u[threadIdx.x+2][threadIdx.y]-u[threadIdx.x][threadIdx.y])+128;
float dy = 0.5f*(u[threadIdx.x][threadIdx.y+2]-u[threadIdx.x][threadIdx.y])+128;
*((float*)(((char*)outputImage) + y * iPitchBytes) + x) = sqrt(dx * dx + dy * dy);
}
}
__global__ void gradient_magnitude_d(const float3 *inputImage, float3 *outputImage,
int iWidth, int iHeight, size_t iPitchBytes)
{
// ### implement me ###
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
float3 imgValue ;
__shared__ float3 u[BW+2][BH+2];
if (x < iWidth && y < iHeight) {
u[threadIdx.x+1][threadIdx.y] = *((float3*)((char*)inputImage + y*iPitchBytes)+x);
u[threadIdx.x][threadIdx.y+1] = *((float3*)((char*)inputImage + y*iPitchBytes)+x);
// BC for X
if (x == 0) u[threadIdx.x][threadIdx.y] = u[threadIdx.x+1][threadIdx.y];
else if (x == (iWidth-1)) u[threadIdx.x+2][threadIdx.y] = u[threadIdx.x+1][threadIdx.y];
else {
if (threadIdx.x == 0) u[threadIdx.x][threadIdx.y] = *((float3*)((char*)inputImage + y*iPitchBytes)+x-1);
else if (threadIdx.x == blockDim.x-1) u[threadIdx.x+2][threadIdx.y] = *((float3*)((char*)inputImage + y*iPitchBytes)+x+1);
}
// BC for Y
if (y == 0) u[threadIdx.x][threadIdx.y] = u[threadIdx.x][threadIdx.y+1];
else if (y == (iHeight-1)) u[threadIdx.x][threadIdx.y+2] = u[threadIdx.x][threadIdx.y+1];
else {
if (threadIdx.y == 0) u[threadIdx.x][threadIdx.y] = *((float3*)((char*)inputImage + (y-1)*iPitchBytes)+x);
else if (threadIdx.y == blockDim.y-1) u[threadIdx.x][threadIdx.y+2] = *((float3*)((char*)inputImage + (y+1)*iPitchBytes)+x);
}
}
__syncthreads();
if (x < iWidth && y < iHeight)
{
float3 dx3 = make_float3(
0.5f*(u[threadIdx.x+2][threadIdx.y].x - u[threadIdx.x][threadIdx.y].x)+128,
0.5f*(u[threadIdx.x+2][threadIdx.y].y - u[threadIdx.x][threadIdx.y].y)+128,
0.5f*(u[threadIdx.x+2][threadIdx.y].z - u[threadIdx.x][threadIdx.y].z)+128);
float3 dy3 = make_float3(
0.5f*(u[threadIdx.x][threadIdx.y+2].x - u[threadIdx.x][threadIdx.y].x)+128,
0.5f*(u[threadIdx.x][threadIdx.y+2].y - u[threadIdx.x][threadIdx.y].y)+128,
0.5f*(u[threadIdx.x][threadIdx.y+2].z - u[threadIdx.x][threadIdx.y].z)+128 );
float3 value = make_float3(
sqrt(dx3.x * dx3.x + dy3.x * dy3.x ),
sqrt(dx3.y * dx3.z + dy3.y * dy3.y ),
sqrt(dx3.z * dx3.z + dy3.z * dy3.z ));
*((float3*)(((char*)outputImage) + y*iPitchBytes)+ x) = value;
// *((float3*)(((char*)outputImage) + y*iPitchBytes)+ x) = sqrt(dx3*dx3 + dy3*dy3);
}
}
void gpu_derivative_sm_d(const float *inputImage, float *outputImage,
int iWidth, int iHeight, int iSpectrum, int mode)
{
size_t iPitchBytes;
float *inputImage_d = 0, *outputImage_d = 0;
dim3 blockSize(BW, BH);
dim3 gridSize( (int)ceil(iWidth/(float)BW), (int)ceil(iHeight/(float)BH) );
//dim3 smSize(BW+2,BH);
if(iSpectrum == 1) {
cutilSafeCall( hipMallocPitch( (void**)&(inputImage_d), &iPitchBytes, iWidth*sizeof(float), iHeight ) );
cutilSafeCall( hipMallocPitch( (void**)&(outputImage_d), &iPitchBytes, iWidth*sizeof(float), iHeight ) );
cutilSafeCall( hipMemcpy2D(inputImage_d, iPitchBytes, inputImage, iWidth*sizeof(float), iWidth*sizeof(float), iHeight, hipMemcpyHostToDevice) );
if (mode == 0)
hipLaunchKernelGGL(( derivativeX_sm_d), dim3(gridSize), dim3(blockSize), 0, 0, inputImage_d, outputImage_d, iWidth, iHeight, iPitchBytes);
else if (mode == 1)
hipLaunchKernelGGL(( derivativeY_sm_d), dim3(gridSize), dim3(blockSize), 0, 0, inputImage_d, outputImage_d, iWidth, iHeight, iPitchBytes);
else if (mode == 2)
hipLaunchKernelGGL(( gradient_magnitude_d), dim3(gridSize), dim3(blockSize), 0, 0, inputImage_d, outputImage_d, iWidth, iHeight, iPitchBytes);
cutilSafeCall( hipDeviceSynchronize() );
cutilSafeCall( hipMemcpy2D(outputImage, iWidth*sizeof(float), outputImage_d, iPitchBytes, iWidth*sizeof(float), iHeight, hipMemcpyDeviceToHost) );
}
else if(iSpectrum == 3) {
cutilSafeCall( hipMallocPitch( (void**)&(inputImage_d), &iPitchBytes, iWidth*sizeof(float3), iHeight ) );
cutilSafeCall( hipMallocPitch( (void**)&(outputImage_d), &iPitchBytes, iWidth*sizeof(float3), iHeight ) );
cutilSafeCall( hipMemcpy2D(inputImage_d, iPitchBytes, inputImage, iWidth*sizeof(float3), iWidth*sizeof(float3), iHeight, hipMemcpyHostToDevice) );
if (mode == 0)
hipLaunchKernelGGL(( derivativeX_sm_d), dim3(gridSize), dim3(blockSize), 0, 0, (float3*)inputImage_d, (float3*)outputImage_d, iWidth, iHeight, iPitchBytes);
else if (mode == 1)
hipLaunchKernelGGL(( derivativeY_sm_d), dim3(gridSize), dim3(blockSize), 0, 0, (float3*)inputImage_d, (float3*)outputImage_d, iWidth, iHeight, iPitchBytes);
else if (mode == 2)
hipLaunchKernelGGL(( gradient_magnitude_d), dim3(gridSize), dim3(blockSize), 0, 0, (float3*)inputImage_d, (float3*)outputImage_d, iWidth, iHeight, iPitchBytes);
cutilSafeCall( hipDeviceSynchronize() );
cutilSafeCall( hipMemcpy2D(outputImage, iWidth*sizeof(float3), outputImage_d, iPitchBytes, iWidth*sizeof(float3), iHeight, hipMemcpyDeviceToHost) );
}
cutilSafeCall( hipFree(inputImage_d) );
cutilSafeCall( hipFree(outputImage_d) );
}
| 4aa37aecc3cb8fd3f6244d64c076c277362616b5.cu | /****************************************************************************\
* --- Practical Course: GPU Programming in Computer Vision ---
*
* time: summer term 2011 / 19-26th September
*
* project: gradient
* file: gradient.cu
*
*
\********* PLEASE ENTER YOUR CORRECT STUDENT NAME AND ID BELOW **************/
const char* studentName = "Sadiq Huq";
const int studentID = 3273623;
/****************************************************************************\
*
* In this file the following methods have to be edited or completed:
*
* derivativeY_sm_d(const float *inputImage, ... )
* derivativeY_sm_d(const float3 *inputImage, ... )
* gradient_magnitude_d(const float *inputImage, ... )
* gradient_magnitude_d(const float3 *inputImage, ... )
*
\****************************************************************************/
#include <cutil.h>
#include <cutil_inline.h>
#include "gradient.cuh"
#define BW 16
#define BH 16
const char* getStudentName() { return studentName; };
int getStudentID() { return studentID; };
bool checkStudentNameAndID() { return strcmp(studentName, "John Doe") != 0 && studentID != 1234567; };
__global__ void derivativeX_sm_d(const float *inputImage, float *outputImage,
int iWidth, int iHeight, size_t iPitchBytes)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
__shared__ float u[BW+2][BH];
if (x < iWidth && y < iHeight) {
u[threadIdx.x+1][threadIdx.y] = *((float*)((char*)inputImage + y*iPitchBytes)+x);
if (x == 0) u[threadIdx.x][threadIdx.y] = u[threadIdx.x+1][threadIdx.y];
else if (x == (iWidth-1)) u[threadIdx.x+2][threadIdx.y] = u[threadIdx.x+1][threadIdx.y];
else {
if (threadIdx.x == 0) u[threadIdx.x][threadIdx.y] = *((float*)((char*)inputImage + y*iPitchBytes)+x-1);
else if (threadIdx.x == blockDim.x-1) u[threadIdx.x+2][threadIdx.y] = *((float*)((char*)inputImage + y*iPitchBytes)+x+1);
}
}
__syncthreads();
if (x < iWidth && y < iHeight)
*((float*)(((char*)outputImage) + y*iPitchBytes)+ x) = 0.5f*(u[threadIdx.x+2][threadIdx.y]-u[threadIdx.x][threadIdx.y])+128;
}
__global__ void derivativeX_sm_d(const float3 *inputImage, float3 *outputImage,
int iWidth, int iHeight, size_t iPitchBytes)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
float3 imgValue ;
__shared__ float3 u[BW+2][BH];
if (x < iWidth && y < iHeight) {
u[threadIdx.x+1][threadIdx.y] = *((float3*)((char*)inputImage + y*iPitchBytes)+x);
if (x == 0) u[threadIdx.x][threadIdx.y] = u[threadIdx.x+1][threadIdx.y];
else if (x == (iWidth-1)) u[threadIdx.x+2][threadIdx.y] = u[threadIdx.x+1][threadIdx.y];
else {
if (threadIdx.x == 0) u[threadIdx.x][threadIdx.y] = *((float3*)((char*)inputImage + y*iPitchBytes)+x-1);
else if (threadIdx.x == blockDim.x-1) u[threadIdx.x+2][threadIdx.y] = *((float3*)((char*)inputImage + y*iPitchBytes)+x+1);
}
}
__syncthreads();
// +128 to stay within range 255
if (x < iWidth && y < iHeight) {
imgValue.x = 0.5f*(u[threadIdx.x+2][threadIdx.y].x - u[threadIdx.x][threadIdx.y].x)+128;
imgValue.y = 0.5f*(u[threadIdx.x+2][threadIdx.y].y - u[threadIdx.x][threadIdx.y].y)+128;
imgValue.z = 0.5f*(u[threadIdx.x+2][threadIdx.y].z - u[threadIdx.x][threadIdx.y].z)+128;
*((float3*)(((char*)outputImage) + y*iPitchBytes)+ x) = imgValue;
}
}
__global__ void derivativeY_sm_d(const float *inputImage, float *outputImage,
int iWidth, int iHeight, size_t iPitchBytes)
{
// ### implement me ###
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
__shared__ float u[BW][BH+2];
if (x < iWidth && y < iHeight) {
u[threadIdx.x][threadIdx.y+1] = *((float*)((char*)inputImage + y*iPitchBytes)+x);
if (y == 0) u[threadIdx.x][threadIdx.y] = u[threadIdx.x][threadIdx.y+1];
else if (y == (iHeight-1)) u[threadIdx.x][threadIdx.y+2] = u[threadIdx.x][threadIdx.y+1];
else {
if (threadIdx.y == 0) u[threadIdx.x][threadIdx.y] = *((float*)((char*)inputImage + (y-1)*iPitchBytes)+x);
else if (threadIdx.y == blockDim.y-1) u[threadIdx.x][threadIdx.y+2] = *((float*)((char*)inputImage + (y+1)*iPitchBytes)+x);
}
}
__syncthreads();
if (x < iWidth && y < iHeight)
*((float*)(((char*)outputImage) + y*iPitchBytes)+ x) = 0.5f*(u[threadIdx.x][threadIdx.y+2]-u[threadIdx.x][threadIdx.y])+128;
}
__global__ void derivativeY_sm_d(const float3 *inputImage, float3 *outputImage,
int iWidth, int iHeight, size_t iPitchBytes)
{
// ### implement me ###
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
float3 imgValue ;
__shared__ float3 u[BW][BH+2];
if (x < iWidth && y < iHeight) {
u[threadIdx.x][threadIdx.y+1] = *((float3*)((char*)inputImage + y*iPitchBytes)+x);
if (y == 0) u[threadIdx.x][threadIdx.y] = u[threadIdx.x][threadIdx.y+1];
else if (y == (iWidth-1)) u[threadIdx.x][threadIdx.y+2] = u[threadIdx.x][threadIdx.y+1];
else {
if (threadIdx.y == 0) u[threadIdx.x][threadIdx.y] = *((float3*)((char*)inputImage + (y-1)*iPitchBytes)+x);
else if (threadIdx.y == blockDim.y-1) u[threadIdx.x][threadIdx.y+2] = *((float3*)((char*)inputImage + (y+1)*iPitchBytes)+x);
}
}
__syncthreads();
// +128 to stay within range 255
if (x < iWidth && y < iHeight) {
imgValue.x = 0.5f*(u[threadIdx.x][threadIdx.y+2].x - u[threadIdx.x][threadIdx.y].x)+128;
imgValue.y = 0.5f*(u[threadIdx.x][threadIdx.y+2].y - u[threadIdx.x][threadIdx.y].y)+128;
imgValue.z = 0.5f*(u[threadIdx.x][threadIdx.y+2].z - u[threadIdx.x][threadIdx.y].z)+128;
// float3 value = make_float3(0.0f, 0.0f, 0.0f);
// *((float3*)(((char*)outputImage) + y*iPitchBytes)+ x) = value;
*((float3*)(((char*)outputImage) + y*iPitchBytes)+ x) = imgValue;
}
}
__global__ void gradient_magnitude_d(const float *inputImage, float *outputImage,
int iWidth, int iHeight, size_t iPitchBytes)
{
// ### implement me ###
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
__shared__ float u[BW + 2][BH+2];
if (x < iWidth && y < iHeight)
{
u[threadIdx.x + 1][threadIdx.y] = *((float*)((char*)inputImage + y*iPitchBytes) + x);
u[threadIdx.x][threadIdx.y+1] = *((float*)((char*)inputImage + y*iPitchBytes) + x);
// BC for X
if (x == 0) // clamp left border
u[threadIdx.x][threadIdx.y+1] = u[threadIdx.x+1][threadIdx.y];
else if (x == (iWidth-1)) // clamp right
u[threadIdx.x+2][threadIdx.y] = u[threadIdx.x+1][threadIdx.y];
else // interier pixels
{
if (threadIdx.x == 0)
u[threadIdx.x][threadIdx.y] = *((float*)((char*)inputImage + y*iPitchBytes)+x-1);
else if (threadIdx.x == blockDim.x-1)
u[threadIdx.x+2][threadIdx.y] = *((float*)((char*)inputImage + y*iPitchBytes)+x+1);
}
// BC for Y
if (y == 0) // clamp left border
u[threadIdx.x + 1][threadIdx.y] = u[threadIdx.x][threadIdx.y+1];
else if (y == (iHeight-1)) // clamp right
u[threadIdx.x + 1][threadIdx.y+2] = u[threadIdx.x + 1][threadIdx.y+1];
else // interior
{
if (threadIdx.y == 0)
u[threadIdx.x ][threadIdx.y] = *((float*)((char*)inputImage + (y-1)*iPitchBytes)+x);
else if (threadIdx.y == blockDim.y-1)
u[threadIdx.x ][threadIdx.y+2] = *((float*)((char*)inputImage + (y+1)*iPitchBytes)+x);
}
}
__syncthreads();
if (x < iWidth && y < iHeight)
{
float dx = 0.5f*(u[threadIdx.x+2][threadIdx.y]-u[threadIdx.x][threadIdx.y])+128;
float dy = 0.5f*(u[threadIdx.x][threadIdx.y+2]-u[threadIdx.x][threadIdx.y])+128;
*((float*)(((char*)outputImage) + y * iPitchBytes) + x) = sqrt(dx * dx + dy * dy);
}
}
__global__ void gradient_magnitude_d(const float3 *inputImage, float3 *outputImage,
int iWidth, int iHeight, size_t iPitchBytes)
{
// ### implement me ###
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
float3 imgValue ;
__shared__ float3 u[BW+2][BH+2];
if (x < iWidth && y < iHeight) {
u[threadIdx.x+1][threadIdx.y] = *((float3*)((char*)inputImage + y*iPitchBytes)+x);
u[threadIdx.x][threadIdx.y+1] = *((float3*)((char*)inputImage + y*iPitchBytes)+x);
// BC for X
if (x == 0) u[threadIdx.x][threadIdx.y] = u[threadIdx.x+1][threadIdx.y];
else if (x == (iWidth-1)) u[threadIdx.x+2][threadIdx.y] = u[threadIdx.x+1][threadIdx.y];
else {
if (threadIdx.x == 0) u[threadIdx.x][threadIdx.y] = *((float3*)((char*)inputImage + y*iPitchBytes)+x-1);
else if (threadIdx.x == blockDim.x-1) u[threadIdx.x+2][threadIdx.y] = *((float3*)((char*)inputImage + y*iPitchBytes)+x+1);
}
// BC for Y
if (y == 0) u[threadIdx.x][threadIdx.y] = u[threadIdx.x][threadIdx.y+1];
else if (y == (iHeight-1)) u[threadIdx.x][threadIdx.y+2] = u[threadIdx.x][threadIdx.y+1];
else {
if (threadIdx.y == 0) u[threadIdx.x][threadIdx.y] = *((float3*)((char*)inputImage + (y-1)*iPitchBytes)+x);
else if (threadIdx.y == blockDim.y-1) u[threadIdx.x][threadIdx.y+2] = *((float3*)((char*)inputImage + (y+1)*iPitchBytes)+x);
}
}
__syncthreads();
if (x < iWidth && y < iHeight)
{
float3 dx3 = make_float3(
0.5f*(u[threadIdx.x+2][threadIdx.y].x - u[threadIdx.x][threadIdx.y].x)+128,
0.5f*(u[threadIdx.x+2][threadIdx.y].y - u[threadIdx.x][threadIdx.y].y)+128,
0.5f*(u[threadIdx.x+2][threadIdx.y].z - u[threadIdx.x][threadIdx.y].z)+128);
float3 dy3 = make_float3(
0.5f*(u[threadIdx.x][threadIdx.y+2].x - u[threadIdx.x][threadIdx.y].x)+128,
0.5f*(u[threadIdx.x][threadIdx.y+2].y - u[threadIdx.x][threadIdx.y].y)+128,
0.5f*(u[threadIdx.x][threadIdx.y+2].z - u[threadIdx.x][threadIdx.y].z)+128 );
float3 value = make_float3(
sqrt(dx3.x * dx3.x + dy3.x * dy3.x ),
sqrt(dx3.y * dx3.z + dy3.y * dy3.y ),
sqrt(dx3.z * dx3.z + dy3.z * dy3.z ));
*((float3*)(((char*)outputImage) + y*iPitchBytes)+ x) = value;
// *((float3*)(((char*)outputImage) + y*iPitchBytes)+ x) = sqrt(dx3*dx3 + dy3*dy3);
}
}
void gpu_derivative_sm_d(const float *inputImage, float *outputImage,
int iWidth, int iHeight, int iSpectrum, int mode)
{
size_t iPitchBytes;
float *inputImage_d = 0, *outputImage_d = 0;
dim3 blockSize(BW, BH);
dim3 gridSize( (int)ceil(iWidth/(float)BW), (int)ceil(iHeight/(float)BH) );
//dim3 smSize(BW+2,BH);
if(iSpectrum == 1) {
cutilSafeCall( cudaMallocPitch( (void**)&(inputImage_d), &iPitchBytes, iWidth*sizeof(float), iHeight ) );
cutilSafeCall( cudaMallocPitch( (void**)&(outputImage_d), &iPitchBytes, iWidth*sizeof(float), iHeight ) );
cutilSafeCall( cudaMemcpy2D(inputImage_d, iPitchBytes, inputImage, iWidth*sizeof(float), iWidth*sizeof(float), iHeight, cudaMemcpyHostToDevice) );
if (mode == 0)
derivativeX_sm_d<<<gridSize, blockSize>>>(inputImage_d, outputImage_d, iWidth, iHeight, iPitchBytes);
else if (mode == 1)
derivativeY_sm_d<<<gridSize, blockSize>>>(inputImage_d, outputImage_d, iWidth, iHeight, iPitchBytes);
else if (mode == 2)
gradient_magnitude_d<<<gridSize, blockSize>>>(inputImage_d, outputImage_d, iWidth, iHeight, iPitchBytes);
cutilSafeCall( cudaThreadSynchronize() );
cutilSafeCall( cudaMemcpy2D(outputImage, iWidth*sizeof(float), outputImage_d, iPitchBytes, iWidth*sizeof(float), iHeight, cudaMemcpyDeviceToHost) );
}
else if(iSpectrum == 3) {
cutilSafeCall( cudaMallocPitch( (void**)&(inputImage_d), &iPitchBytes, iWidth*sizeof(float3), iHeight ) );
cutilSafeCall( cudaMallocPitch( (void**)&(outputImage_d), &iPitchBytes, iWidth*sizeof(float3), iHeight ) );
cutilSafeCall( cudaMemcpy2D(inputImage_d, iPitchBytes, inputImage, iWidth*sizeof(float3), iWidth*sizeof(float3), iHeight, cudaMemcpyHostToDevice) );
if (mode == 0)
derivativeX_sm_d<<<gridSize, blockSize>>>((float3*)inputImage_d, (float3*)outputImage_d, iWidth, iHeight, iPitchBytes);
else if (mode == 1)
derivativeY_sm_d<<<gridSize, blockSize>>>((float3*)inputImage_d, (float3*)outputImage_d, iWidth, iHeight, iPitchBytes);
else if (mode == 2)
gradient_magnitude_d<<<gridSize, blockSize>>>((float3*)inputImage_d, (float3*)outputImage_d, iWidth, iHeight, iPitchBytes);
cutilSafeCall( cudaThreadSynchronize() );
cutilSafeCall( cudaMemcpy2D(outputImage, iWidth*sizeof(float3), outputImage_d, iPitchBytes, iWidth*sizeof(float3), iHeight, cudaMemcpyDeviceToHost) );
}
cutilSafeCall( cudaFree(inputImage_d) );
cutilSafeCall( cudaFree(outputImage_d) );
}
|
07fc08c7b53383fdb6a82393d1f1538fdcd1639c.hip | // !!! This is a file automatically generated by hipify!!!
//generates fast constraint kernels
#include "runtimeKernel.h"
#include <iostream>
#include <fstream>
#include <string>
#include <hip/hiprtc.h>
#include <hip/hip_runtime.h>
#include <streambuf>
#include <vector>
#include <map>
#include <sstream>
#include <iomanip>
#include "bioCharmm.h"
#define NVRTC_SAFE_CALL(x) \
do { \
hiprtcResult result = x; \
if (result != HIPRTC_SUCCESS) { \
std::cerr << "\nerror: " #x " failed with error " \
<< hiprtcGetErrorString(result) << '\n'; \
exit(1); \
} \
} while(0)
#define CUDA_SAFE_CALL2(x) \
do { \
hipError_t result = x; \
if (result != hipSuccess) { \
const char *msg; \
hipGetErrorName(result, &msg); \
std::cerr << "\nerror: " #x " failed with error " \
<< msg << '\n'; \
exit(1); \
} \
} while(0)
void RuntimeKernelManager::initDeviceContext()
{
CUDA_SAFE_CALL2(hipInit(0));
CUDA_SAFE_CALL2(hipDeviceGet(&cuDevice, 0));
CUDA_SAFE_CALL2(hipCtxCreate(&context, 0, cuDevice));
}
void RuntimeKernelManager::compile(std::string fileName, std::string kernelName)
{
printf("compiling \n");
std::ifstream t(fileName.c_str());
std::string str((std::istreambuf_iterator<char>(t)),
std::istreambuf_iterator<char>());
hiprtcProgram p;
// Create an instance of hiprtcProgram with the SAXPY code string.
NVRTC_SAFE_CALL(
hiprtcCreateProgram(&p, // prog
str.c_str(), // buffer
fileName.c_str(), // name
0, // numHeaders
NULL, // headers
NULL)); // includeNames
const char *opts[] = {"--gpu-architecture=compute_60",
"-default-device"};
hiprtcResult compileResult = hiprtcCompileProgram(p, // prog
2, // numOptions
opts); // options
RuntimeKernel k;
k.prog = p; //std::move(p);
k.compileResult = compileResult;
k.kernelBody = str;
k.name = kernelName;
kernels.push_back(k); //std::move(k));
// Obtain compilation log from the program.
size_t logSize;
NVRTC_SAFE_CALL(hiprtcGetProgramLogSize(p, &logSize));
char *log = new char[logSize];
NVRTC_SAFE_CALL(hiprtcGetProgramLog(p, log));
std::cout << log << '\n';
delete[] log;
if (compileResult != HIPRTC_SUCCESS)
{
printf("bad compile for %s\n", fileName.c_str());
exit(1);
}
}
void RuntimeKernelManager::loadToRuntime(std::string kernelName)
{
int kernelId = 0;
for (int i = 0; i < kernels.size(); i++)
{
if (kernels[i].name == kernelName)
{
kernelId = i;
printf("found kernel \n");
}
}
RuntimeKernel *k = &kernels[kernelId];
// Obtain PTX from the program.
size_t ptxSize;
NVRTC_SAFE_CALL(hiprtcGetCodeSize(k->prog, &ptxSize));
k->ptx = new char[ptxSize];
NVRTC_SAFE_CALL(hiprtcGetCode(k->prog, k->ptx));
//we don't need this anymore
NVRTC_SAFE_CALL(hiprtcDestroyProgram(&k->prog));
CUDA_SAFE_CALL2(hipModuleLoadDataEx(&k->module, k->ptx, 0, 0, 0));
}
void RuntimeKernelManager::getKernel(hipFunction_t * func, std::string kernelName)
{
int kernelId = 0;
for (int i = 0; i < kernels.size(); i++)
{
if (kernels[i].name == kernelName)
{
kernelId = i;
printf("found kernel \n");
}
}
CUDA_SAFE_CALL2(hipModuleGetFunction(func, kernels[kernelId].module, kernelName.c_str()));
}
runtimeKernelSegments RuntimeKernelManager::parseKernelSegmentFile(std::string fileName)
{
printf("parsing \n");
std::ifstream testFile(fileName);
std::string currentSection = "";
std::string codeSegment = "";
int sectionCounter = 0;
std::vector<std::string> sections; //strings that compose kernel body, modified by generateKernel()
std::map<std::string, int> sectionIDMap; //map of section name to section id in "sections"
std::string line;
std::string delim = " ";
//loop through each line of file
//and find all listed code "segments", where each segment in file is
//the continguous chunk of lines between pair of "@startJit [SECTIONNAME]" and "@endJit [SECTIONNAME]" keywords
//then load each section into a vector
//also keep a hashmap that maps each segment's SECTIONNAME to it's index in the vector
//just so it's more straightforward to access code segments
while (std::getline(testFile, line))
{
std::istringstream split(line);
std::vector<std::string> tokens;
std::string item;
while (std::getline(split, item, ' '))
{
tokens.push_back(item);
}
if (tokens.size() > 0)
{
if (tokens[0] == "@startJit")
{
//get name of current code segment, insert into code segment id->name map
currentSection = tokens[1];
printf("parsing %s\n", currentSection.c_str());
sectionIDMap.insert(std::pair<std::string, int>(currentSection, sectionCounter));
sectionCounter++;
}
else if (tokens[0] == "@endJit")
{
//insert code segment to our code segment map, reset codesegment buffer
sections.push_back(codeSegment + "\n");
codeSegment = "";
printf("finished parsing %s\n", currentSection.c_str());
}
else if (currentSection != "")
{
//add current line to code segment buffer
codeSegment += line + "\n";
}
}
}
int loopID = sectionIDMap["loop"];
//std::pair<std::vector<std::string>, std::map<std::string, int>> runtimeKernelSegments;
return std::pair<std::vector<std::string>, std::map < std::string, int>>(sections, sectionIDMap);
//printf("section loop %s\n", sections[loopID].c_str());
}
std::string RuntimeKernelManager::generateKernel(runtimeKernelSegments &segments, void *parms)
{
/*
switch(resTypeID)
{
for (;it<maxit;it++) //start the iterative loop
{
errMax =0.0;
for (int ab=0; ab<numPair; ab++)
{
int pairID = pairIDs[ab];
int a = pairsIindexs[ab];
int b = pairsJindexs[ab];
double dist2 = distances2[ab];
double dist2inv = distances2inv[ab];
THREE_VECTOR vab;
VOP2(vab,=,v[a],-,v[b]);
double rma=rMass[a];
double rmb=rMass[b];
double rma_rmb_inv = rMassPairInv[ab];
//call Front or back function
double rvab = FrontFuncOpt(dt, dt2inv,dist2, rab[ab], vab)*dist2inv;
//if (wid==0) printf("it %i ab %idist2 %f rvab %f dt %f nc %i\n", it, ab,dist2, rvab, dt, nConstraint);
double gab=-rvab*rma_rmb_inv; //units: mass/time)
double err = fabs(rvab*dt);
errMax = fmax(err, errMax);
VSVOP(v[a],+=,(rma*gab),*,rab[ab]);
VSVOP(v[b],-=,(rmb*gab),*,rab[ab]);
gamma[ab] += gab;
} //numPair
if(tid==0 && errMax<tol) {break;}
} //maxit
// break;
// }
}
*/
CHARMMPOT_PARMS *cp_parms = static_cast<CHARMMPOT_PARMS*> (parms);
CHARMM_PARMS *charmmParms = cp_parms->charmmParms;
std::string tab = " ";
std::string curtab = "";
std::ostringstream curr;
curr << curtab + "if(tid==0){\n";
std::cout << std::fixed;
int precision = 17;
curtab = curtab + tab;
curr << curtab + "switch(resTypeID){\n";
curtab = curtab + tab;
//int constraintListSize = 0; //also a counter for making a prefix sum array
for (int i = 0; i < charmmParms->resiConnSize; i++)
{
if (i != 2)continue;
curr << curtab + "case " + std::to_string(i) + ":\n";
RESI_CONN * resiConn = charmmParms->resiConnList[i];
CONSTRAINT ** consList = resiConn->consList;
//iterate over constraint groups in a residue
if (resiConn->consListSize > 1)continue;
for (int j = 0; j < resiConn->consListSize; j++)
{
CONSTRAINT *constraint = consList[j];
//iterate over pairs in a constraint group
if (constraint->numPair > 0)
{
printf("i %i j %i\n", i, j);
curtab = curtab + tab;
//curr<< curtab+"int it=0;\n";
curr << curtab + "for (int it=0;it<maxit;it++){\n";
curtab = curtab + tab;
curr << curtab + "errMax =0.0;\n";
curr << curtab + "int ab=0;\n";
for (int k = 0; k < constraint->numPair; k++)
{
curr << curtab + "{\n";
curtab = curtab + tab;
CONS_PAIR *cpair = constraint->conspairList[k];
double dist = cpair->distance;
int a = cpair->atomIindex;
int b = cpair->atomJindex;
double mass_a = resiConn->atomList[a]->atmTypePtr->mass;
double mass_b = resiConn->atomList[b]->atmTypePtr->mass;
curr << curtab + "const int a = " << std::to_string(a) + ";\n";
curr << curtab + "const int b = " + std::to_string(b) + ";\n";
curr << curtab + "const double dist2 = " << std::setprecision(precision) << dist * dist << ";\n";
curr << curtab + "const double dist2inv = " << std::setprecision(precision) << 1.0 / (dist * dist) << ";\n";
curr << curtab + "const double rma = " << std::setprecision(precision) << mass_a << ";\n";
curr << curtab + "const double rmb = " << std::setprecision(precision) << mass_b << ";\n";
curr << curtab + "const double rma_rmb_inv = " << std::setprecision(precision) << 1.0 / (mass_a + mass_b) << ";\n";
curr << curtab + "THREE_VECTOR vab;\n";
curr << curtab + "VOP2(vab,=,v[a],-,v[b]);\n";
curr << curtab + "double rvab = FrontFuncOpt(dt, dt2inv,dist2, rab[ab], vab)*dist2inv;\n";
curr << curtab + "double gab=-rvab*rma_rmb_inv;\n";
curr << curtab + "double err = fabs(rvab*dt);\n";
curr << curtab + "errMax = fmax(err, errMax);\n";
curr << curtab + "VSVOP(v[a],+=,(rma*gab),*,rab[ab]);\n";
curr << curtab + "VSVOP(v[b],-=,(rmb*gab),*,rab[ab]);\n";
curr << curtab + "gamma[ab] += gab;\n";
curtab = curtab.substr(0, curtab.length() - 4);
curr << curtab + "}\n";
curr << curtab + "ab++;\n";
}
curr << curtab + "if(tid==0 && errMax<tol) {break;}\n";
curtab = curtab.substr(0, curtab.length() - 2 * 4);
curr << curtab + "};\n";
}
}
curr << curtab + "break; //maxit \n";
}
//defaulta
curr << curtab + "default:\n";
std::string defaultcase = "\
for (;it<maxit;it++) //start the iterative loop\n\
{\n\
errMax =0.0;\n\
for (int ab=0; ab<numPair; ab++)\n\
{\n\
int pairID = pairIDs[ab];\n\
int a = pairsIindexs[ab];\n\
int b = pairsJindexs[ab];\n\
double dist2 = distances2[ab];\n\
double dist2inv = distances2inv[ab];\n\
THREE_VECTOR vab; \n\
VOP2(vab,=,v[a],-,v[b]);\n\
double rma=rMass[a];\n\
double rmb=rMass[b]; \n\
double rma_rmb_inv = rMassPairInv[ab];\n\
//call Front or back function\n\
double rvab = FrontFuncOpt(dt, dt2inv,dist2, rab[ab], vab)*dist2inv;\n\
double gab=-rvab*rma_rmb_inv; //units: mass/time)\n\
double err = fabs(rvab*dt);\n\
errMax = fmax(err, errMax);\n\
VSVOP(v[a],+=,(rma*gab),*,rab[ab]); \n\
VSVOP(v[b],-=,(rmb*gab),*,rab[ab]); \n\
gamma[ab] += gab; \n\
} //numPair\n\
if(tid==0 && errMax<tol) {break;}\n\
} //maxit\n";
curr << defaultcase;
curtab = curtab.substr(0, curtab.length() - 4);
curr << curtab + "}\n";
curtab = curtab.substr(0, curtab.length() - 4);
curr << curtab + "}\n";
printf("done iter\n");
//printf("curr \n%s\n", curr.c_str());
auto codeSegments = segments.first;
auto sectionIDMap = segments.second;
int loopID = sectionIDMap["loop"];
codeSegments[loopID] = curr.str();
std::string fileString;
for (auto segment : codeSegments)
{
std::cout << segment << "\n";
fileString += segment;
}
std::ofstream out("outputKernel.cu");
out << fileString;
out.close();
return fileString;
}
int main1()
{
RuntimeKernelManager kernelGen;
kernelGen.compile("kernelTemplate.cu", "");
//init the runtime
kernelGen.initDeviceContext();
// Load the generated PTX and get a handle to the SAXPY kernel.
kernelGen.loadToRuntime("kernelTemplate.cu");
//only this should be called in the simulation loop
//hipFunction_t kernel;
//kernelGen.getKernel(kernel, "saxpy");
/*
// Generate input for execution, and create output buffers.
size_t n = NUM_THREADS * NUM_BLOCKS;
size_t bufferSize = n * sizeof(float);
float a = 5.1f;
float *hX = new float[n], *hY = new float[n], *hOut = new float[n];
for (size_t i = 0; i < n; ++i) {
hX[i] = static_cast<float>(i);
hY[i] = static_cast<float>(i * 2);
}
hipDeviceptr_t dX, dY, dOut;
CUDA_SAFE_CALL(cuMemAlloc(&dX, bufferSize));
CUDA_SAFE_CALL(cuMemAlloc(&dY, bufferSize));
CUDA_SAFE_CALL(cuMemAlloc(&dOut, bufferSize));
CUDA_SAFE_CALL(cuMemcpyHtoD(dX, hX, bufferSize));
CUDA_SAFE_CALL(cuMemcpyHtoD(dY, hY, bufferSize));
// Execute SAXPY.
void *args[] = { &a, &dX, &dY, &dOut, &n };
CUDA_SAFE_CALL(
hipModuleLaunchKernel(kernel,
NUM_BLOCKS, 1, 1, // grid dim
NUM_THREADS, 1, 1, // block dim
0, NULL, // shared mem and stream
args, 0)); // arguments
CUDA_SAFE_CALL(hipCtxSynchronize());
// Retrieve and print output.
CUDA_SAFE_CALL(cuMemcpyDtoH(hOut, dOut, bufferSize));
for (size_t i = 0; i < n; ++i) {
std::cout << a << " * " << hX[i] << " + " << hY[i]
<< " = " << hOut[i] << '\n';
}
// Release resources.
CUDA_SAFE_CALL(hipFree(dX));
CUDA_SAFE_CALL(hipFree(dY));
CUDA_SAFE_CALL(hipFree(dOut));
//CUDA_SAFE_CALL(hipModuleUnload(module));
//CUDA_SAFE_CALL(hipCtxDestroy(context));
delete[] hX;
delete[] hY;
delete[] hOut;
*/
return 0;
}
| 07fc08c7b53383fdb6a82393d1f1538fdcd1639c.cu | //generates fast constraint kernels
#include "runtimeKernel.h"
#include <iostream>
#include <fstream>
#include <string>
#include <nvrtc.h>
#include <cuda.h>
#include <streambuf>
#include <vector>
#include <map>
#include <sstream>
#include <iomanip>
#include "bioCharmm.h"
#define NVRTC_SAFE_CALL(x) \
do { \
nvrtcResult result = x; \
if (result != NVRTC_SUCCESS) { \
std::cerr << "\nerror: " #x " failed with error " \
<< nvrtcGetErrorString(result) << '\n'; \
exit(1); \
} \
} while(0)
#define CUDA_SAFE_CALL2(x) \
do { \
CUresult result = x; \
if (result != CUDA_SUCCESS) { \
const char *msg; \
cuGetErrorName(result, &msg); \
std::cerr << "\nerror: " #x " failed with error " \
<< msg << '\n'; \
exit(1); \
} \
} while(0)
void RuntimeKernelManager::initDeviceContext()
{
CUDA_SAFE_CALL2(cuInit(0));
CUDA_SAFE_CALL2(cuDeviceGet(&cuDevice, 0));
CUDA_SAFE_CALL2(cuCtxCreate(&context, 0, cuDevice));
}
void RuntimeKernelManager::compile(std::string fileName, std::string kernelName)
{
printf("compiling \n");
std::ifstream t(fileName.c_str());
std::string str((std::istreambuf_iterator<char>(t)),
std::istreambuf_iterator<char>());
nvrtcProgram p;
// Create an instance of nvrtcProgram with the SAXPY code string.
NVRTC_SAFE_CALL(
nvrtcCreateProgram(&p, // prog
str.c_str(), // buffer
fileName.c_str(), // name
0, // numHeaders
NULL, // headers
NULL)); // includeNames
const char *opts[] = {"--gpu-architecture=compute_60",
"-default-device"};
nvrtcResult compileResult = nvrtcCompileProgram(p, // prog
2, // numOptions
opts); // options
RuntimeKernel k;
k.prog = p; //std::move(p);
k.compileResult = compileResult;
k.kernelBody = str;
k.name = kernelName;
kernels.push_back(k); //std::move(k));
// Obtain compilation log from the program.
size_t logSize;
NVRTC_SAFE_CALL(nvrtcGetProgramLogSize(p, &logSize));
char *log = new char[logSize];
NVRTC_SAFE_CALL(nvrtcGetProgramLog(p, log));
std::cout << log << '\n';
delete[] log;
if (compileResult != NVRTC_SUCCESS)
{
printf("bad compile for %s\n", fileName.c_str());
exit(1);
}
}
void RuntimeKernelManager::loadToRuntime(std::string kernelName)
{
int kernelId = 0;
for (int i = 0; i < kernels.size(); i++)
{
if (kernels[i].name == kernelName)
{
kernelId = i;
printf("found kernel \n");
}
}
RuntimeKernel *k = &kernels[kernelId];
// Obtain PTX from the program.
size_t ptxSize;
NVRTC_SAFE_CALL(nvrtcGetPTXSize(k->prog, &ptxSize));
k->ptx = new char[ptxSize];
NVRTC_SAFE_CALL(nvrtcGetPTX(k->prog, k->ptx));
//we don't need this anymore
NVRTC_SAFE_CALL(nvrtcDestroyProgram(&k->prog));
CUDA_SAFE_CALL2(cuModuleLoadDataEx(&k->module, k->ptx, 0, 0, 0));
}
void RuntimeKernelManager::getKernel(CUfunction * func, std::string kernelName)
{
int kernelId = 0;
for (int i = 0; i < kernels.size(); i++)
{
if (kernels[i].name == kernelName)
{
kernelId = i;
printf("found kernel \n");
}
}
CUDA_SAFE_CALL2(cuModuleGetFunction(func, kernels[kernelId].module, kernelName.c_str()));
}
runtimeKernelSegments RuntimeKernelManager::parseKernelSegmentFile(std::string fileName)
{
printf("parsing \n");
std::ifstream testFile(fileName);
std::string currentSection = "";
std::string codeSegment = "";
int sectionCounter = 0;
std::vector<std::string> sections; //strings that compose kernel body, modified by generateKernel()
std::map<std::string, int> sectionIDMap; //map of section name to section id in "sections"
std::string line;
std::string delim = " ";
//loop through each line of file
//and find all listed code "segments", where each segment in file is
//the continguous chunk of lines between pair of "@startJit [SECTIONNAME]" and "@endJit [SECTIONNAME]" keywords
//then load each section into a vector
//also keep a hashmap that maps each segment's SECTIONNAME to it's index in the vector
//just so it's more straightforward to access code segments
while (std::getline(testFile, line))
{
std::istringstream split(line);
std::vector<std::string> tokens;
std::string item;
while (std::getline(split, item, ' '))
{
tokens.push_back(item);
}
if (tokens.size() > 0)
{
if (tokens[0] == "@startJit")
{
//get name of current code segment, insert into code segment id->name map
currentSection = tokens[1];
printf("parsing %s\n", currentSection.c_str());
sectionIDMap.insert(std::pair<std::string, int>(currentSection, sectionCounter));
sectionCounter++;
}
else if (tokens[0] == "@endJit")
{
//insert code segment to our code segment map, reset codesegment buffer
sections.push_back(codeSegment + "\n");
codeSegment = "";
printf("finished parsing %s\n", currentSection.c_str());
}
else if (currentSection != "")
{
//add current line to code segment buffer
codeSegment += line + "\n";
}
}
}
int loopID = sectionIDMap["loop"];
//std::pair<std::vector<std::string>, std::map<std::string, int>> runtimeKernelSegments;
return std::pair<std::vector<std::string>, std::map < std::string, int>>(sections, sectionIDMap);
//printf("section loop %s\n", sections[loopID].c_str());
}
std::string RuntimeKernelManager::generateKernel(runtimeKernelSegments &segments, void *parms)
{
/*
switch(resTypeID)
{
for (;it<maxit;it++) //start the iterative loop
{
errMax =0.0;
for (int ab=0; ab<numPair; ab++)
{
int pairID = pairIDs[ab];
int a = pairsIindexs[ab];
int b = pairsJindexs[ab];
double dist2 = distances2[ab];
double dist2inv = distances2inv[ab];
THREE_VECTOR vab;
VOP2(vab,=,v[a],-,v[b]);
double rma=rMass[a];
double rmb=rMass[b];
double rma_rmb_inv = rMassPairInv[ab];
//call Front or back function
double rvab = FrontFuncOpt(dt, dt2inv,dist2, rab[ab], vab)*dist2inv;
//if (wid==0) printf("it %i ab %idist2 %f rvab %f dt %f nc %i\n", it, ab,dist2, rvab, dt, nConstraint);
double gab=-rvab*rma_rmb_inv; //units: mass/time)
double err = fabs(rvab*dt);
errMax = fmax(err, errMax);
VSVOP(v[a],+=,(rma*gab),*,rab[ab]);
VSVOP(v[b],-=,(rmb*gab),*,rab[ab]);
gamma[ab] += gab;
} //numPair
if(tid==0 && errMax<tol) {break;}
} //maxit
// break;
// }
}
*/
CHARMMPOT_PARMS *cp_parms = static_cast<CHARMMPOT_PARMS*> (parms);
CHARMM_PARMS *charmmParms = cp_parms->charmmParms;
std::string tab = " ";
std::string curtab = "";
std::ostringstream curr;
curr << curtab + "if(tid==0){\n";
std::cout << std::fixed;
int precision = 17;
curtab = curtab + tab;
curr << curtab + "switch(resTypeID){\n";
curtab = curtab + tab;
//int constraintListSize = 0; //also a counter for making a prefix sum array
for (int i = 0; i < charmmParms->resiConnSize; i++)
{
if (i != 2)continue;
curr << curtab + "case " + std::to_string(i) + ":\n";
RESI_CONN * resiConn = charmmParms->resiConnList[i];
CONSTRAINT ** consList = resiConn->consList;
//iterate over constraint groups in a residue
if (resiConn->consListSize > 1)continue;
for (int j = 0; j < resiConn->consListSize; j++)
{
CONSTRAINT *constraint = consList[j];
//iterate over pairs in a constraint group
if (constraint->numPair > 0)
{
printf("i %i j %i\n", i, j);
curtab = curtab + tab;
//curr<< curtab+"int it=0;\n";
curr << curtab + "for (int it=0;it<maxit;it++){\n";
curtab = curtab + tab;
curr << curtab + "errMax =0.0;\n";
curr << curtab + "int ab=0;\n";
for (int k = 0; k < constraint->numPair; k++)
{
curr << curtab + "{\n";
curtab = curtab + tab;
CONS_PAIR *cpair = constraint->conspairList[k];
double dist = cpair->distance;
int a = cpair->atomIindex;
int b = cpair->atomJindex;
double mass_a = resiConn->atomList[a]->atmTypePtr->mass;
double mass_b = resiConn->atomList[b]->atmTypePtr->mass;
curr << curtab + "const int a = " << std::to_string(a) + ";\n";
curr << curtab + "const int b = " + std::to_string(b) + ";\n";
curr << curtab + "const double dist2 = " << std::setprecision(precision) << dist * dist << ";\n";
curr << curtab + "const double dist2inv = " << std::setprecision(precision) << 1.0 / (dist * dist) << ";\n";
curr << curtab + "const double rma = " << std::setprecision(precision) << mass_a << ";\n";
curr << curtab + "const double rmb = " << std::setprecision(precision) << mass_b << ";\n";
curr << curtab + "const double rma_rmb_inv = " << std::setprecision(precision) << 1.0 / (mass_a + mass_b) << ";\n";
curr << curtab + "THREE_VECTOR vab;\n";
curr << curtab + "VOP2(vab,=,v[a],-,v[b]);\n";
curr << curtab + "double rvab = FrontFuncOpt(dt, dt2inv,dist2, rab[ab], vab)*dist2inv;\n";
curr << curtab + "double gab=-rvab*rma_rmb_inv;\n";
curr << curtab + "double err = fabs(rvab*dt);\n";
curr << curtab + "errMax = fmax(err, errMax);\n";
curr << curtab + "VSVOP(v[a],+=,(rma*gab),*,rab[ab]);\n";
curr << curtab + "VSVOP(v[b],-=,(rmb*gab),*,rab[ab]);\n";
curr << curtab + "gamma[ab] += gab;\n";
curtab = curtab.substr(0, curtab.length() - 4);
curr << curtab + "}\n";
curr << curtab + "ab++;\n";
}
curr << curtab + "if(tid==0 && errMax<tol) {break;}\n";
curtab = curtab.substr(0, curtab.length() - 2 * 4);
curr << curtab + "};\n";
}
}
curr << curtab + "break; //maxit \n";
}
//defaulta
curr << curtab + "default:\n";
std::string defaultcase = "\
for (;it<maxit;it++) //start the iterative loop\n\
{\n\
errMax =0.0;\n\
for (int ab=0; ab<numPair; ab++)\n\
{\n\
int pairID = pairIDs[ab];\n\
int a = pairsIindexs[ab];\n\
int b = pairsJindexs[ab];\n\
double dist2 = distances2[ab];\n\
double dist2inv = distances2inv[ab];\n\
THREE_VECTOR vab; \n\
VOP2(vab,=,v[a],-,v[b]);\n\
double rma=rMass[a];\n\
double rmb=rMass[b]; \n\
double rma_rmb_inv = rMassPairInv[ab];\n\
//call Front or back function\n\
double rvab = FrontFuncOpt(dt, dt2inv,dist2, rab[ab], vab)*dist2inv;\n\
double gab=-rvab*rma_rmb_inv; //units: mass/time)\n\
double err = fabs(rvab*dt);\n\
errMax = fmax(err, errMax);\n\
VSVOP(v[a],+=,(rma*gab),*,rab[ab]); \n\
VSVOP(v[b],-=,(rmb*gab),*,rab[ab]); \n\
gamma[ab] += gab; \n\
} //numPair\n\
if(tid==0 && errMax<tol) {break;}\n\
} //maxit\n";
curr << defaultcase;
curtab = curtab.substr(0, curtab.length() - 4);
curr << curtab + "}\n";
curtab = curtab.substr(0, curtab.length() - 4);
curr << curtab + "}\n";
printf("done iter\n");
//printf("curr \n%s\n", curr.c_str());
auto codeSegments = segments.first;
auto sectionIDMap = segments.second;
int loopID = sectionIDMap["loop"];
codeSegments[loopID] = curr.str();
std::string fileString;
for (auto segment : codeSegments)
{
std::cout << segment << "\n";
fileString += segment;
}
std::ofstream out("outputKernel.cu");
out << fileString;
out.close();
return fileString;
}
int main1()
{
RuntimeKernelManager kernelGen;
kernelGen.compile("kernelTemplate.cu", "");
//init the runtime
kernelGen.initDeviceContext();
// Load the generated PTX and get a handle to the SAXPY kernel.
kernelGen.loadToRuntime("kernelTemplate.cu");
//only this should be called in the simulation loop
//CUfunction kernel;
//kernelGen.getKernel(kernel, "saxpy");
/*
// Generate input for execution, and create output buffers.
size_t n = NUM_THREADS * NUM_BLOCKS;
size_t bufferSize = n * sizeof(float);
float a = 5.1f;
float *hX = new float[n], *hY = new float[n], *hOut = new float[n];
for (size_t i = 0; i < n; ++i) {
hX[i] = static_cast<float>(i);
hY[i] = static_cast<float>(i * 2);
}
CUdeviceptr dX, dY, dOut;
CUDA_SAFE_CALL(cuMemAlloc(&dX, bufferSize));
CUDA_SAFE_CALL(cuMemAlloc(&dY, bufferSize));
CUDA_SAFE_CALL(cuMemAlloc(&dOut, bufferSize));
CUDA_SAFE_CALL(cuMemcpyHtoD(dX, hX, bufferSize));
CUDA_SAFE_CALL(cuMemcpyHtoD(dY, hY, bufferSize));
// Execute SAXPY.
void *args[] = { &a, &dX, &dY, &dOut, &n };
CUDA_SAFE_CALL(
cuLaunchKernel(kernel,
NUM_BLOCKS, 1, 1, // grid dim
NUM_THREADS, 1, 1, // block dim
0, NULL, // shared mem and stream
args, 0)); // arguments
CUDA_SAFE_CALL(cuCtxSynchronize());
// Retrieve and print output.
CUDA_SAFE_CALL(cuMemcpyDtoH(hOut, dOut, bufferSize));
for (size_t i = 0; i < n; ++i) {
std::cout << a << " * " << hX[i] << " + " << hY[i]
<< " = " << hOut[i] << '\n';
}
// Release resources.
CUDA_SAFE_CALL(cuMemFree(dX));
CUDA_SAFE_CALL(cuMemFree(dY));
CUDA_SAFE_CALL(cuMemFree(dOut));
//CUDA_SAFE_CALL(cuModuleUnload(module));
//CUDA_SAFE_CALL(cuCtxDestroy(context));
delete[] hX;
delete[] hY;
delete[] hOut;
*/
return 0;
}
|
564ba6618b7a0c4b2e264a1835271497fd3c940b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#if GOOGLE_CUDA
#include "ew_op_gpu.h"
#include "gpu_hmma.h"
#include <stdio.h>
template <uint OP_A, bool N64>
__global__ void __launch_bounds__(128,6) bst_sgemm_32x64x32_xn(
const uint2* __restrict__ Lut,
const bhalf* __restrict__ A,
const float* __restrict__ B,
float* C,
uint szCtxHeadStateB, uint szCtxHeadStateC, uint szHeadState, uint szState,
uint szHeadBlocksBlk, uint szBlocksBlk, uint szLut,
uint grid_M, uint grid_N, uint magic_N, uint shift_N)
{
__shared__ float fShare[(33 + 64)*32];
uint2* Lut2s = (uint2*)&fShare[(33 + 64)*32];
char* bShare = (char*)&fShare;
uint tid = threadIdx.x;
uint idx_MN = blockIdx.x; // compound outer product dims
uint idx_M = div64(idx_MN, magic_N, shift_N); // idx_M = idx_MN / grid_N;
uint idx_N = idx_MN - idx_M*grid_N; // idx_N = idx_MN % grid_N;
uint idx_B = blockIdx.y; // batch dim
uint idx_H = blockIdx.z; // head dim
// assume lower diagonal and schedule large reductions first
if (OP_A == OP_N)
idx_M = grid_M - idx_M;
// each head can optionally have its own lut
Lut += idx_H*szLut;
uint2 lut_head = Lut[idx_M];
uint lut_offset = lut_head.x;
uint lut_size = lut_head.y;
uint txb = tid % 16;
uint tyb = tid / 16;
if (lut_size > 0)
{
// prefetch the lut data into shared
Lut += lut_offset;
#pragma unroll 1
for (uint i = tid; i < lut_size; i += 128)
{
uint2 entry = Lut[i];
entry.x *= 32*32; // 1024 entries of A per block
entry.y *= szHeadState*32; // 32 lines of B per block
Lut2s[i] = entry;
}
__syncthreads();
uint txa = tid % 8;
uint tya = tid / 8;
uint tid16 = tid & 16;
uint tid96 = tid & 96;
uint loadB = ((tid / 2) % 8) * 4*4;
uint loadA = (tid % 2) * 4*4;
// each warp handles a quarter of the weights
loadA += tid96;
// second half of warp starts 16 rows down
loadB += tid16 * 64*4;
loadA += tid16 * 32*4;
uint storB = (tyb*64 + txb*4) * 4;
uint storA;
if (OP_A == OP_T)
storA = tid * 4*4;
else
{
// Transpose weights on store to shared
// Avoid bank conflicts by shifting writes over by 4 every 4 rows (+txa*4)
storA = (txa*32*4 + tya + txa*4) * 4;
loadA += tid16 * 4; // shift over 4 floats every 4 rows, second half of warp starts 16 rows down
}
uint b = idx_N*64 + txb*4;
uint offsetA = idx_B*szHeadBlocksBlk + idx_H*szBlocksBlk + tid*4;
uint offsetB = idx_B*szCtxHeadStateB + tyb*szHeadState + idx_H*szState + b;
bool inB = N64 || b < szState;
// zero accumulation registers
float regC[4][8];
for (int i = 0; i < 4; i++)
for (int j = 0; j < 8; j++)
regC[i][j] = 0.0f;
// Force compiler to fully compute these prior to loop
asm("mov.b32 %0, %0;" : "+r"(loadA) : );
asm("mov.b32 %0, %0;" : "+r"(loadB) : );
asm("mov.b32 %0, %0;" : "+r"(storA) : );
asm("mov.b32 %0, %0;" : "+r"(storB) : );
asm("mov.b32 %0, %0;" : "+r"(offsetA) : );
asm("mov.b32 %0, %0;" : "+r"(offsetB) : );
int idx_lut = 0;
#pragma unroll 1
do
{
//asm volatile (".pragma \"nounroll\";"::); // ptxas, don't get clever
uint2 entry = Lut2s[idx_lut];
const bhalf* pA = add_ptr_u(A, entry.x + offsetA);
bhalf4 a00 = ldg((const bhalf4*)(pA + 0*32));
bhalf4 a16 = ldg((const bhalf4*)(pA + 16*32));
float4 b00 = {0.0f}, b08 = {0.0f}, b16 = {0.0f}, b24 = {0.0f};
entry.y += offsetB;
if (inB)
{
b00 = ldg((const float4*)(B + (entry.y + 0*szHeadState)));
b08 = ldg((const float4*)(B + (entry.y + 8*szHeadState)));
b16 = ldg((const float4*)(B + (entry.y + 16*szHeadState)));
b24 = ldg((const float4*)(B + (entry.y + 24*szHeadState)));
}
__syncthreads();
float4 fa00 = to_float(a00);
float4 fa16 = to_float(a16);
if (OP_A == OP_T)
{
*(float4*)&bShare[storA + (0*16*32 + 64*32)*4] = fa00;
*(float4*)&bShare[storA + (1*16*32 + 64*32)*4] = fa16;
}
else
{
// transpose the shared store of W
*(float*)&bShare[storA + (0*32 + 0*16 + 64*32)*4] = fa00.x;
*(float*)&bShare[storA + (1*32 + 0*16 + 64*32)*4] = fa00.y;
*(float*)&bShare[storA + (2*32 + 0*16 + 64*32)*4] = fa00.z;
*(float*)&bShare[storA + (3*32 + 0*16 + 64*32)*4] = fa00.w;
*(float*)&bShare[storA + (0*32 + 1*16 + 64*32)*4] = fa16.x;
*(float*)&bShare[storA + (1*32 + 1*16 + 64*32)*4] = fa16.y;
*(float*)&bShare[storA + (2*32 + 1*16 + 64*32)*4] = fa16.z;
*(float*)&bShare[storA + (3*32 + 1*16 + 64*32)*4] = fa16.w;
}
*(float4*)&bShare[storB + 0*64*4] = b00;
*(float4*)&bShare[storB + 8*64*4] = b08;
*(float4*)&bShare[storB + 16*64*4] = b16;
*(float4*)&bShare[storB + 24*64*4] = b24;
__syncthreads();
// computes a 32x64x32 gemm tile with 4x8 register blocking
float regA[4];
float regB[8];
#pragma unroll
for (int j = 0; j < 16; j++)
{
// fetch outer product data
*(float4*)®A[0] = *(float4*)&bShare[loadA + (32*j + 64*32 + (OP_A == OP_T ? 0 : (j/4)*4))*4]; // shift over 4 floats every 4 rows
*(float4*)®B[0] = *(float4*)&bShare[loadB + (64*j + 0)*4];
*(float4*)®B[4] = *(float4*)&bShare[loadB + (64*j + 32)*4];
// accumulate outer product
for (int i = 0; i < 4; i++)
for (int j = 0; j < 8; j++)
regC[i][j] += regA[i] * regB[j];
}
} while (++idx_lut < lut_size);
asm volatile ("mov.u32 %0, %tid.x;" : "=r"(tid ) :);
asm volatile ("mov.u32 %0, %ctaid.x;" : "=r"(idx_MN) :);
asm volatile ("mov.u32 %0, %ctaid.y;" : "=r"(idx_B) :);
asm volatile ("mov.u32 %0, %ctaid.z;" : "=r"(idx_H) :);
idx_M = div64(idx_MN, magic_N, shift_N);
idx_N = idx_MN - idx_M*grid_N;
if (OP_A == OP_N)
idx_M = grid_M - idx_M;
// printf("%3d %.0f %.0f %.0f %.0f | %.0f %.0f %.0f %.0f | %.0f %.0f %.0f %.0f | %.0f %.0f %.0f %.0f\n", tid,
// regC[0][0], regC[0][1], regC[0][2], regC[0][3],
// regC[1][0], regC[1][1], regC[1][2], regC[1][3],
// regC[2][0], regC[2][1], regC[2][2], regC[2][3],
// regC[3][0], regC[3][1], regC[3][2], regC[3][3]);
tid16 = tid & 16;
tid96 = tid & 96;
uint tn = (tid / 2) % 8;
uint tm = ((tid % 2) + (tid96 / 16))*4 + (tid16 / 16);
bool t16 = tid16 != 0;
float outC[2][8];
for (int i = 0; i < 2; i++)
for (int j = 0; j < 8; j++)
{
float swap = t16 ? regC[2*i + 0][j] : regC[2*i + 1][j];
outC[i][j] = t16 ? regC[2*i + 1][j] : regC[2*i + 0][j];
outC[i][j] += shfl_xor(swap, 16);
}
uint n = idx_N*64 + tn*4;
bool bn00 = N64 || n + 0 < szState;
bool bn32 = N64 || n + 32 < szState;
uint offsetC = idx_B*szCtxHeadStateC + (idx_M*32 + tm)*szHeadState + idx_H*szState + n;
store((float4*)(C + (offsetC + szHeadState*0 + 0)), *(float4*)&outC[0][0], 0, bn00);
store((float4*)(C + (offsetC + szHeadState*0 + 32)), *(float4*)&outC[0][4], 0, bn32);
store((float4*)(C + (offsetC + szHeadState*2 + 0)), *(float4*)&outC[1][0], 0, bn00);
store((float4*)(C + (offsetC + szHeadState*2 + 32)), *(float4*)&outC[1][4], 0, bn32);
}
else
{
uint c = idx_N*64 + txb*4;
uint offsetC = idx_B*szCtxHeadStateC + (idx_M*32 + tyb)*szHeadState + idx_H*szState + c;
if (N64 || c < szState)
{
float4 zero = {0.0f};
*(float4*)&C[offsetC + szHeadState* 0] = zero;
*(float4*)&C[offsetC + szHeadState* 8] = zero;
*(float4*)&C[offsetC + szHeadState*16] = zero;
*(float4*)&C[offsetC + szHeadState*24] = zero;
}
}
}
template <bool K64>
__global__ void __launch_bounds__(256,3) bst_sgemm_32x32x64_nt(
const uint2* __restrict__ Lut,
const float* __restrict__ A,
const float* __restrict__ B,
bhalf* C,
uint szCtxHeadStateA, uint szCtxHeadStateB, uint szHeadState, uint szState,
uint szHeadBlocksBlk, uint szBlocksBlk, uint szLut, uint loops)
{
__shared__ float fShare[65*32*2];
char* bShare = (char*)fShare;
uint tid = threadIdx.x;
uint bid = blockIdx.x; // blockid
uint idx_B = blockIdx.y; // batch dim
uint idx_H = blockIdx.z; // head dim
// each head can optionally have its own lut
uint2 lut_head = Lut[idx_H*szLut + bid];
uint tx = tid % 16;
uint ty = tid / 16;
uint k = tx * 4;
uint idx_M = lut_head.x;
uint idx_N = lut_head.y;
uint offsetA00 = idx_B*szCtxHeadStateA + (idx_M*32 + ty)*szHeadState + idx_H*szState + k;
uint offsetB00 = idx_B*szCtxHeadStateB + (idx_N*32 + ty)*szHeadState + idx_H*szState + k;
uint offsetA16 = offsetA00 + szHeadState*16;
uint offsetB16 = offsetB00 + szHeadState*16;
uint tid224 = tid & 224; // 256 - 32
// avoid bank conflicts when writing transpose (+ tx*2)
uint storAB = (tx*32*4 + ty + tx*2)*4;
// 32 threads per tile, each tile reads 8 lines, shifted over by 4
uint loadA = (((tid & 16) >> 3) | (tid & 1)) << 4;
uint loadB = ((tid >> 1) & 7) << 4;
loadA += (tid224 * 32) + (tid224 / 2); // 32*8*4
loadB += (tid224 * 32) + (tid224 / 2); // 32*8*4
// This keeps all prior logic outside of the loops.
asm("mov.b32 %0, %0;" : "+r"(storAB) : );
asm("mov.b32 %0, %0;" : "+r"(loadA) : );
asm("mov.b32 %0, %0;" : "+r"(loadB) : );
float regC[8][4];
for (int i = 0; i < 8; i++)
for (int j = 0; j < 4; j++)
regC[i][j] = 0.0f;
uint loop = 0;
#pragma unroll 1
do
{
float4 a00 = {0}, a16 = {0};
float4 b00 = {0}, b16 = {0};
if (K64 || k < szState)
{
a00 = ldg((const float4*)(add_ptr_u(A, offsetA00)));
a16 = ldg((const float4*)(add_ptr_u(A, offsetA16)));
b00 = ldg((const float4*)(add_ptr_u(B, offsetB00)));
b16 = ldg((const float4*)(add_ptr_u(B, offsetB16)));
}
offsetA00 += 64;
offsetA16 += 64;
offsetB00 += 64;
offsetB16 += 64;
if (!K64)
k += 64;
__syncthreads();
*(float*)&bShare[storAB + (0*32 + 0 + 0*65*32)*4] = a00.x;
*(float*)&bShare[storAB + (1*32 + 0 + 0*65*32)*4] = a00.y;
*(float*)&bShare[storAB + (2*32 + 0 + 0*65*32)*4] = a00.z;
*(float*)&bShare[storAB + (3*32 + 0 + 0*65*32)*4] = a00.w;
*(float*)&bShare[storAB + (0*32 + 16 + 0*65*32)*4] = a16.x;
*(float*)&bShare[storAB + (1*32 + 16 + 0*65*32)*4] = a16.y;
*(float*)&bShare[storAB + (2*32 + 16 + 0*65*32)*4] = a16.z;
*(float*)&bShare[storAB + (3*32 + 16 + 0*65*32)*4] = a16.w;
*(float*)&bShare[storAB + (0*32 + 0 + 1*65*32)*4] = b00.x;
*(float*)&bShare[storAB + (1*32 + 0 + 1*65*32)*4] = b00.y;
*(float*)&bShare[storAB + (2*32 + 0 + 1*65*32)*4] = b00.z;
*(float*)&bShare[storAB + (3*32 + 0 + 1*65*32)*4] = b00.w;
*(float*)&bShare[storAB + (0*32 + 16 + 1*65*32)*4] = b16.x;
*(float*)&bShare[storAB + (1*32 + 16 + 1*65*32)*4] = b16.y;
*(float*)&bShare[storAB + (2*32 + 16 + 1*65*32)*4] = b16.z;
*(float*)&bShare[storAB + (3*32 + 16 + 1*65*32)*4] = b16.w;
__syncthreads();
float regA[8], regB[4];
#pragma unroll
for (int j = 0; j < 4; j++)
{
// fetch outer product data
*(float4*)®A[0] = *(float4*)&bShare[loadA + (32*j + 0)*4];
*(float4*)®A[4] = *(float4*)&bShare[loadA + (32*j + 16)*4];
*(float4*)®B[0] = *(float4*)&bShare[loadB + (32*j + 65*32)*4];
for (int i = 0; i < 8; i++)
for (int j = 0; j < 4; j++)
regC[i][j] += regA[i] * regB[j];
}
#pragma unroll
for (int j = 4; j < 8; j++)
{
*(float2*)®A[0] = *(float2*)&bShare[loadA + (32*j + 0 + (j/4)*2)*4];
*(float2*)®A[2] = *(float2*)&bShare[loadA + (32*j + 2 + (j/4)*2)*4];
*(float2*)®A[4] = *(float2*)&bShare[loadA + (32*j + 16 + (j/4)*2)*4];
*(float2*)®A[6] = *(float2*)&bShare[loadA + (32*j + 18 + (j/4)*2)*4];
*(float2*)®B[0] = *(float2*)&bShare[loadB + (32*j + 0 + (j/4)*2 + 65*32)*4];
*(float2*)®B[2] = *(float2*)&bShare[loadB + (32*j + 2 + (j/4)*2 + 65*32)*4];
for (int i = 0; i < 8; i++)
for (int j = 0; j < 4; j++)
regC[i][j] += regA[i] * regB[j];
}
} while (++loop < loops);
asm volatile ("mov.u32 %0, %tid.x;" : "=r"(tid) :);
asm volatile ("mov.u32 %0, %ctaid.x;" : "=r"(bid) :);
asm volatile ("mov.u32 %0, %ctaid.y;" : "=r"(idx_B) :);
asm volatile ("mov.u32 %0, %ctaid.z;" : "=r"(idx_H) :);
//printf("%3d %.0f %.0f %.0f %.0f %.0f %.0f %.0f %.0f\n", tid, regC[0][0], regC[0][1], regC[0][2], regC[0][3], regC[4][0], regC[4][1], regC[4][2], regC[4][3]);
// if ((tid & 31) == 0)
// printf("%3d %.0f\n", tid, regC[0][0]);
C += idx_B*szHeadBlocksBlk + idx_H*szBlocksBlk + bid*32*32 + tid*2;
// Arrange 8 tiles horizontally in the X direction: ((tid & 224) >> 1)
// Add some spacing to avoid write bank conflicts: (ty << 2)
ty = ((tid & 16) >> 3) + (tid & 1);
tx = ((tid >> 1) & 7) + ((tid & 224) >> 2) + (ty << 2);
uint storC = ty*32*8*4 + tx*4;
tx = tid % 16;
ty = tid / 16;
uint readC = ty*32*8 + tx*2 + ((tid & 192)>>2);
__syncthreads();
*(float4*)&fShare[storC + 0*32*8] = *(float4*)regC[0];
*(float4*)&fShare[storC + 1*32*8] = *(float4*)regC[1];
*(float4*)&fShare[storC + 2*32*8] = *(float4*)regC[2];
*(float4*)&fShare[storC + 3*32*8] = *(float4*)regC[3];
__syncthreads();
float2 c2[8];
for (int i = 0; i < 8; i++)
c2[i] = *(float2*)&fShare[readC + i*32];
// Tree reduce
for (int j = 4; j > 0; j >>= 1)
for (int i = 0; i < j; i++)
c2[i] = ew_add(c2[i], c2[i+j]);
store((bhalf2*)C, c2[0]);
__syncthreads();
*(float4*)&fShare[storC + 0*32*8] = *(float4*)regC[4];
*(float4*)&fShare[storC + 1*32*8] = *(float4*)regC[5];
*(float4*)&fShare[storC + 2*32*8] = *(float4*)regC[6];
*(float4*)&fShare[storC + 3*32*8] = *(float4*)regC[7];
__syncthreads();
for (int i = 0; i < 8; i++)
c2[i] = *(float2*)&fShare[readC + i*32];
// Tree reduce
for (int j = 4; j > 0; j >>= 1)
for (int i = 0; i < j; i++)
c2[i] = ew_add(c2[i], c2[i+j]);
store((bhalf2*)(C + 16*32), c2[0]);
}
bool bst_sgemm_xn(hipStream_t stream,
const uint2* lut,
const bhalf* a,
const float* b,
float* c,
uint block_size, uint blocks,
uint batch_dim, uint ctx_blks_b, uint ctx_blks_c, uint head_dim, uint state_dim,
uint lut_heads, uint lut_dim, uint op, uint magic, uint shift, uint max_lut)
{
uint szState = state_dim;
uint szHeadState = head_dim * szState;
uint szCtxHeadStateB = ctx_blks_b * block_size * szHeadState;
uint szCtxHeadStateC = ctx_blks_c * block_size * szHeadState;
uint szBlocksBlk = blocks * block_size * block_size;
uint szHeadBlocksBlk = head_dim * szBlocksBlk;
// if just one lut head, broadcast block-sparsity to all heads
uint szLut = lut_heads > 1 ? lut_dim : 0;
// compound gridDim.x with m and n coords
uint gridN = CEIL_DIV(state_dim, 64);
uint gridM = ctx_blks_c - 1;
uint gridX = ctx_blks_c * gridN;
uint shared = ((max_lut+1)/2)*2*8; // round up to nearest even, 8 bytes per entry
bool n64 = (state_dim & 63) == 0;
dim3 grid(gridX, batch_dim, head_dim);
if (block_size == 32)
{
if (op == NN_OP) // NN
{
if (n64)
hipLaunchKernelGGL(( bst_sgemm_32x64x32_xn<OP_N, true>), dim3(grid),dim3(128),shared,stream, lut, a, b, c, szCtxHeadStateB, szCtxHeadStateC, szHeadState, szState, szHeadBlocksBlk, szBlocksBlk, szLut, gridM, gridN, magic, shift);
else
hipLaunchKernelGGL(( bst_sgemm_32x64x32_xn<OP_N,false>), dim3(grid),dim3(128),shared,stream, lut, a, b, c, szCtxHeadStateB, szCtxHeadStateC, szHeadState, szState, szHeadBlocksBlk, szBlocksBlk, szLut, gridM, gridN, magic, shift);
}
else // TN
{
if (n64)
hipLaunchKernelGGL(( bst_sgemm_32x64x32_xn<OP_T, true>), dim3(grid),dim3(128),shared,stream, lut, a, b, c, szCtxHeadStateB, szCtxHeadStateC, szHeadState, szState, szHeadBlocksBlk, szBlocksBlk, szLut, gridM, gridN, magic, shift);
else
hipLaunchKernelGGL(( bst_sgemm_32x64x32_xn<OP_T,false>), dim3(grid),dim3(128),shared,stream, lut, a, b, c, szCtxHeadStateB, szCtxHeadStateC, szHeadState, szState, szHeadBlocksBlk, szBlocksBlk, szLut, gridM, gridN, magic, shift);
}
}
return true;
}
bool bst_sgemm_nt(hipStream_t stream,
const uint2* lut,
const float* a,
const float* b,
bhalf* c,
uint block_size, uint blocks,
uint batch_dim, uint ctx_blks_a, uint ctx_blks_b, uint head_dim, uint state_dim,
uint lut_heads, uint lut_dim)
{
uint szState = state_dim;
uint szHeadState = head_dim * szState;
uint szCtxHeadStateA = ctx_blks_a * block_size * szHeadState;
uint szCtxHeadStateB = ctx_blks_b * block_size * szHeadState;
uint szBlocksBlk = blocks * block_size * block_size;
uint szHeadBlocksBlk = head_dim * szBlocksBlk;
// if just one lut head, broadcast block-sparsity to all heads
uint szLut = lut_heads > 1 ? lut_dim : 0;
uint loops = CEIL_DIV(state_dim, 64);
bool k64 = (state_dim & 63) == 0;
dim3 grid(blocks, batch_dim, head_dim);
if (block_size == 32)
{
if (k64)
hipLaunchKernelGGL(( bst_sgemm_32x32x64_nt< true>), dim3(grid),dim3(256),0,stream, lut, a, b, c, szCtxHeadStateA, szCtxHeadStateB, szHeadState, szState, szHeadBlocksBlk, szBlocksBlk, szLut, loops);
else
hipLaunchKernelGGL(( bst_sgemm_32x32x64_nt<false>), dim3(grid),dim3(256),0,stream, lut, a, b, c, szCtxHeadStateA, szCtxHeadStateB, szHeadState, szState, szHeadBlocksBlk, szBlocksBlk, szLut, loops);
}
return true;
}
#endif // GOOGLE_CUDA | 564ba6618b7a0c4b2e264a1835271497fd3c940b.cu | #if GOOGLE_CUDA
#include "ew_op_gpu.h"
#include "gpu_hmma.h"
#include <stdio.h>
template <uint OP_A, bool N64>
__global__ void __launch_bounds__(128,6) bst_sgemm_32x64x32_xn(
const uint2* __restrict__ Lut,
const bhalf* __restrict__ A,
const float* __restrict__ B,
float* C,
uint szCtxHeadStateB, uint szCtxHeadStateC, uint szHeadState, uint szState,
uint szHeadBlocksBlk, uint szBlocksBlk, uint szLut,
uint grid_M, uint grid_N, uint magic_N, uint shift_N)
{
__shared__ float fShare[(33 + 64)*32];
uint2* Lut2s = (uint2*)&fShare[(33 + 64)*32];
char* bShare = (char*)&fShare;
uint tid = threadIdx.x;
uint idx_MN = blockIdx.x; // compound outer product dims
uint idx_M = div64(idx_MN, magic_N, shift_N); // idx_M = idx_MN / grid_N;
uint idx_N = idx_MN - idx_M*grid_N; // idx_N = idx_MN % grid_N;
uint idx_B = blockIdx.y; // batch dim
uint idx_H = blockIdx.z; // head dim
// assume lower diagonal and schedule large reductions first
if (OP_A == OP_N)
idx_M = grid_M - idx_M;
// each head can optionally have its own lut
Lut += idx_H*szLut;
uint2 lut_head = Lut[idx_M];
uint lut_offset = lut_head.x;
uint lut_size = lut_head.y;
uint txb = tid % 16;
uint tyb = tid / 16;
if (lut_size > 0)
{
// prefetch the lut data into shared
Lut += lut_offset;
#pragma unroll 1
for (uint i = tid; i < lut_size; i += 128)
{
uint2 entry = Lut[i];
entry.x *= 32*32; // 1024 entries of A per block
entry.y *= szHeadState*32; // 32 lines of B per block
Lut2s[i] = entry;
}
__syncthreads();
uint txa = tid % 8;
uint tya = tid / 8;
uint tid16 = tid & 16;
uint tid96 = tid & 96;
uint loadB = ((tid / 2) % 8) * 4*4;
uint loadA = (tid % 2) * 4*4;
// each warp handles a quarter of the weights
loadA += tid96;
// second half of warp starts 16 rows down
loadB += tid16 * 64*4;
loadA += tid16 * 32*4;
uint storB = (tyb*64 + txb*4) * 4;
uint storA;
if (OP_A == OP_T)
storA = tid * 4*4;
else
{
// Transpose weights on store to shared
// Avoid bank conflicts by shifting writes over by 4 every 4 rows (+txa*4)
storA = (txa*32*4 + tya + txa*4) * 4;
loadA += tid16 * 4; // shift over 4 floats every 4 rows, second half of warp starts 16 rows down
}
uint b = idx_N*64 + txb*4;
uint offsetA = idx_B*szHeadBlocksBlk + idx_H*szBlocksBlk + tid*4;
uint offsetB = idx_B*szCtxHeadStateB + tyb*szHeadState + idx_H*szState + b;
bool inB = N64 || b < szState;
// zero accumulation registers
float regC[4][8];
for (int i = 0; i < 4; i++)
for (int j = 0; j < 8; j++)
regC[i][j] = 0.0f;
// Force compiler to fully compute these prior to loop
asm("mov.b32 %0, %0;" : "+r"(loadA) : );
asm("mov.b32 %0, %0;" : "+r"(loadB) : );
asm("mov.b32 %0, %0;" : "+r"(storA) : );
asm("mov.b32 %0, %0;" : "+r"(storB) : );
asm("mov.b32 %0, %0;" : "+r"(offsetA) : );
asm("mov.b32 %0, %0;" : "+r"(offsetB) : );
int idx_lut = 0;
#pragma unroll 1
do
{
//asm volatile (".pragma \"nounroll\";"::); // ptxas, don't get clever
uint2 entry = Lut2s[idx_lut];
const bhalf* pA = add_ptr_u(A, entry.x + offsetA);
bhalf4 a00 = ldg((const bhalf4*)(pA + 0*32));
bhalf4 a16 = ldg((const bhalf4*)(pA + 16*32));
float4 b00 = {0.0f}, b08 = {0.0f}, b16 = {0.0f}, b24 = {0.0f};
entry.y += offsetB;
if (inB)
{
b00 = ldg((const float4*)(B + (entry.y + 0*szHeadState)));
b08 = ldg((const float4*)(B + (entry.y + 8*szHeadState)));
b16 = ldg((const float4*)(B + (entry.y + 16*szHeadState)));
b24 = ldg((const float4*)(B + (entry.y + 24*szHeadState)));
}
__syncthreads();
float4 fa00 = to_float(a00);
float4 fa16 = to_float(a16);
if (OP_A == OP_T)
{
*(float4*)&bShare[storA + (0*16*32 + 64*32)*4] = fa00;
*(float4*)&bShare[storA + (1*16*32 + 64*32)*4] = fa16;
}
else
{
// transpose the shared store of W
*(float*)&bShare[storA + (0*32 + 0*16 + 64*32)*4] = fa00.x;
*(float*)&bShare[storA + (1*32 + 0*16 + 64*32)*4] = fa00.y;
*(float*)&bShare[storA + (2*32 + 0*16 + 64*32)*4] = fa00.z;
*(float*)&bShare[storA + (3*32 + 0*16 + 64*32)*4] = fa00.w;
*(float*)&bShare[storA + (0*32 + 1*16 + 64*32)*4] = fa16.x;
*(float*)&bShare[storA + (1*32 + 1*16 + 64*32)*4] = fa16.y;
*(float*)&bShare[storA + (2*32 + 1*16 + 64*32)*4] = fa16.z;
*(float*)&bShare[storA + (3*32 + 1*16 + 64*32)*4] = fa16.w;
}
*(float4*)&bShare[storB + 0*64*4] = b00;
*(float4*)&bShare[storB + 8*64*4] = b08;
*(float4*)&bShare[storB + 16*64*4] = b16;
*(float4*)&bShare[storB + 24*64*4] = b24;
__syncthreads();
// computes a 32x64x32 gemm tile with 4x8 register blocking
float regA[4];
float regB[8];
#pragma unroll
for (int j = 0; j < 16; j++)
{
// fetch outer product data
*(float4*)®A[0] = *(float4*)&bShare[loadA + (32*j + 64*32 + (OP_A == OP_T ? 0 : (j/4)*4))*4]; // shift over 4 floats every 4 rows
*(float4*)®B[0] = *(float4*)&bShare[loadB + (64*j + 0)*4];
*(float4*)®B[4] = *(float4*)&bShare[loadB + (64*j + 32)*4];
// accumulate outer product
for (int i = 0; i < 4; i++)
for (int j = 0; j < 8; j++)
regC[i][j] += regA[i] * regB[j];
}
} while (++idx_lut < lut_size);
asm volatile ("mov.u32 %0, %tid.x;" : "=r"(tid ) :);
asm volatile ("mov.u32 %0, %ctaid.x;" : "=r"(idx_MN) :);
asm volatile ("mov.u32 %0, %ctaid.y;" : "=r"(idx_B) :);
asm volatile ("mov.u32 %0, %ctaid.z;" : "=r"(idx_H) :);
idx_M = div64(idx_MN, magic_N, shift_N);
idx_N = idx_MN - idx_M*grid_N;
if (OP_A == OP_N)
idx_M = grid_M - idx_M;
// printf("%3d %.0f %.0f %.0f %.0f | %.0f %.0f %.0f %.0f | %.0f %.0f %.0f %.0f | %.0f %.0f %.0f %.0f\n", tid,
// regC[0][0], regC[0][1], regC[0][2], regC[0][3],
// regC[1][0], regC[1][1], regC[1][2], regC[1][3],
// regC[2][0], regC[2][1], regC[2][2], regC[2][3],
// regC[3][0], regC[3][1], regC[3][2], regC[3][3]);
tid16 = tid & 16;
tid96 = tid & 96;
uint tn = (tid / 2) % 8;
uint tm = ((tid % 2) + (tid96 / 16))*4 + (tid16 / 16);
bool t16 = tid16 != 0;
float outC[2][8];
for (int i = 0; i < 2; i++)
for (int j = 0; j < 8; j++)
{
float swap = t16 ? regC[2*i + 0][j] : regC[2*i + 1][j];
outC[i][j] = t16 ? regC[2*i + 1][j] : regC[2*i + 0][j];
outC[i][j] += shfl_xor(swap, 16);
}
uint n = idx_N*64 + tn*4;
bool bn00 = N64 || n + 0 < szState;
bool bn32 = N64 || n + 32 < szState;
uint offsetC = idx_B*szCtxHeadStateC + (idx_M*32 + tm)*szHeadState + idx_H*szState + n;
store((float4*)(C + (offsetC + szHeadState*0 + 0)), *(float4*)&outC[0][0], 0, bn00);
store((float4*)(C + (offsetC + szHeadState*0 + 32)), *(float4*)&outC[0][4], 0, bn32);
store((float4*)(C + (offsetC + szHeadState*2 + 0)), *(float4*)&outC[1][0], 0, bn00);
store((float4*)(C + (offsetC + szHeadState*2 + 32)), *(float4*)&outC[1][4], 0, bn32);
}
else
{
uint c = idx_N*64 + txb*4;
uint offsetC = idx_B*szCtxHeadStateC + (idx_M*32 + tyb)*szHeadState + idx_H*szState + c;
if (N64 || c < szState)
{
float4 zero = {0.0f};
*(float4*)&C[offsetC + szHeadState* 0] = zero;
*(float4*)&C[offsetC + szHeadState* 8] = zero;
*(float4*)&C[offsetC + szHeadState*16] = zero;
*(float4*)&C[offsetC + szHeadState*24] = zero;
}
}
}
template <bool K64>
__global__ void __launch_bounds__(256,3) bst_sgemm_32x32x64_nt(
const uint2* __restrict__ Lut,
const float* __restrict__ A,
const float* __restrict__ B,
bhalf* C,
uint szCtxHeadStateA, uint szCtxHeadStateB, uint szHeadState, uint szState,
uint szHeadBlocksBlk, uint szBlocksBlk, uint szLut, uint loops)
{
__shared__ float fShare[65*32*2];
char* bShare = (char*)fShare;
uint tid = threadIdx.x;
uint bid = blockIdx.x; // blockid
uint idx_B = blockIdx.y; // batch dim
uint idx_H = blockIdx.z; // head dim
// each head can optionally have its own lut
uint2 lut_head = Lut[idx_H*szLut + bid];
uint tx = tid % 16;
uint ty = tid / 16;
uint k = tx * 4;
uint idx_M = lut_head.x;
uint idx_N = lut_head.y;
uint offsetA00 = idx_B*szCtxHeadStateA + (idx_M*32 + ty)*szHeadState + idx_H*szState + k;
uint offsetB00 = idx_B*szCtxHeadStateB + (idx_N*32 + ty)*szHeadState + idx_H*szState + k;
uint offsetA16 = offsetA00 + szHeadState*16;
uint offsetB16 = offsetB00 + szHeadState*16;
uint tid224 = tid & 224; // 256 - 32
// avoid bank conflicts when writing transpose (+ tx*2)
uint storAB = (tx*32*4 + ty + tx*2)*4;
// 32 threads per tile, each tile reads 8 lines, shifted over by 4
uint loadA = (((tid & 16) >> 3) | (tid & 1)) << 4;
uint loadB = ((tid >> 1) & 7) << 4;
loadA += (tid224 * 32) + (tid224 / 2); // 32*8*4
loadB += (tid224 * 32) + (tid224 / 2); // 32*8*4
// This keeps all prior logic outside of the loops.
asm("mov.b32 %0, %0;" : "+r"(storAB) : );
asm("mov.b32 %0, %0;" : "+r"(loadA) : );
asm("mov.b32 %0, %0;" : "+r"(loadB) : );
float regC[8][4];
for (int i = 0; i < 8; i++)
for (int j = 0; j < 4; j++)
regC[i][j] = 0.0f;
uint loop = 0;
#pragma unroll 1
do
{
float4 a00 = {0}, a16 = {0};
float4 b00 = {0}, b16 = {0};
if (K64 || k < szState)
{
a00 = ldg((const float4*)(add_ptr_u(A, offsetA00)));
a16 = ldg((const float4*)(add_ptr_u(A, offsetA16)));
b00 = ldg((const float4*)(add_ptr_u(B, offsetB00)));
b16 = ldg((const float4*)(add_ptr_u(B, offsetB16)));
}
offsetA00 += 64;
offsetA16 += 64;
offsetB00 += 64;
offsetB16 += 64;
if (!K64)
k += 64;
__syncthreads();
*(float*)&bShare[storAB + (0*32 + 0 + 0*65*32)*4] = a00.x;
*(float*)&bShare[storAB + (1*32 + 0 + 0*65*32)*4] = a00.y;
*(float*)&bShare[storAB + (2*32 + 0 + 0*65*32)*4] = a00.z;
*(float*)&bShare[storAB + (3*32 + 0 + 0*65*32)*4] = a00.w;
*(float*)&bShare[storAB + (0*32 + 16 + 0*65*32)*4] = a16.x;
*(float*)&bShare[storAB + (1*32 + 16 + 0*65*32)*4] = a16.y;
*(float*)&bShare[storAB + (2*32 + 16 + 0*65*32)*4] = a16.z;
*(float*)&bShare[storAB + (3*32 + 16 + 0*65*32)*4] = a16.w;
*(float*)&bShare[storAB + (0*32 + 0 + 1*65*32)*4] = b00.x;
*(float*)&bShare[storAB + (1*32 + 0 + 1*65*32)*4] = b00.y;
*(float*)&bShare[storAB + (2*32 + 0 + 1*65*32)*4] = b00.z;
*(float*)&bShare[storAB + (3*32 + 0 + 1*65*32)*4] = b00.w;
*(float*)&bShare[storAB + (0*32 + 16 + 1*65*32)*4] = b16.x;
*(float*)&bShare[storAB + (1*32 + 16 + 1*65*32)*4] = b16.y;
*(float*)&bShare[storAB + (2*32 + 16 + 1*65*32)*4] = b16.z;
*(float*)&bShare[storAB + (3*32 + 16 + 1*65*32)*4] = b16.w;
__syncthreads();
float regA[8], regB[4];
#pragma unroll
for (int j = 0; j < 4; j++)
{
// fetch outer product data
*(float4*)®A[0] = *(float4*)&bShare[loadA + (32*j + 0)*4];
*(float4*)®A[4] = *(float4*)&bShare[loadA + (32*j + 16)*4];
*(float4*)®B[0] = *(float4*)&bShare[loadB + (32*j + 65*32)*4];
for (int i = 0; i < 8; i++)
for (int j = 0; j < 4; j++)
regC[i][j] += regA[i] * regB[j];
}
#pragma unroll
for (int j = 4; j < 8; j++)
{
*(float2*)®A[0] = *(float2*)&bShare[loadA + (32*j + 0 + (j/4)*2)*4];
*(float2*)®A[2] = *(float2*)&bShare[loadA + (32*j + 2 + (j/4)*2)*4];
*(float2*)®A[4] = *(float2*)&bShare[loadA + (32*j + 16 + (j/4)*2)*4];
*(float2*)®A[6] = *(float2*)&bShare[loadA + (32*j + 18 + (j/4)*2)*4];
*(float2*)®B[0] = *(float2*)&bShare[loadB + (32*j + 0 + (j/4)*2 + 65*32)*4];
*(float2*)®B[2] = *(float2*)&bShare[loadB + (32*j + 2 + (j/4)*2 + 65*32)*4];
for (int i = 0; i < 8; i++)
for (int j = 0; j < 4; j++)
regC[i][j] += regA[i] * regB[j];
}
} while (++loop < loops);
asm volatile ("mov.u32 %0, %tid.x;" : "=r"(tid) :);
asm volatile ("mov.u32 %0, %ctaid.x;" : "=r"(bid) :);
asm volatile ("mov.u32 %0, %ctaid.y;" : "=r"(idx_B) :);
asm volatile ("mov.u32 %0, %ctaid.z;" : "=r"(idx_H) :);
//printf("%3d %.0f %.0f %.0f %.0f %.0f %.0f %.0f %.0f\n", tid, regC[0][0], regC[0][1], regC[0][2], regC[0][3], regC[4][0], regC[4][1], regC[4][2], regC[4][3]);
// if ((tid & 31) == 0)
// printf("%3d %.0f\n", tid, regC[0][0]);
C += idx_B*szHeadBlocksBlk + idx_H*szBlocksBlk + bid*32*32 + tid*2;
// Arrange 8 tiles horizontally in the X direction: ((tid & 224) >> 1)
// Add some spacing to avoid write bank conflicts: (ty << 2)
ty = ((tid & 16) >> 3) + (tid & 1);
tx = ((tid >> 1) & 7) + ((tid & 224) >> 2) + (ty << 2);
uint storC = ty*32*8*4 + tx*4;
tx = tid % 16;
ty = tid / 16;
uint readC = ty*32*8 + tx*2 + ((tid & 192)>>2);
__syncthreads();
*(float4*)&fShare[storC + 0*32*8] = *(float4*)regC[0];
*(float4*)&fShare[storC + 1*32*8] = *(float4*)regC[1];
*(float4*)&fShare[storC + 2*32*8] = *(float4*)regC[2];
*(float4*)&fShare[storC + 3*32*8] = *(float4*)regC[3];
__syncthreads();
float2 c2[8];
for (int i = 0; i < 8; i++)
c2[i] = *(float2*)&fShare[readC + i*32];
// Tree reduce
for (int j = 4; j > 0; j >>= 1)
for (int i = 0; i < j; i++)
c2[i] = ew_add(c2[i], c2[i+j]);
store((bhalf2*)C, c2[0]);
__syncthreads();
*(float4*)&fShare[storC + 0*32*8] = *(float4*)regC[4];
*(float4*)&fShare[storC + 1*32*8] = *(float4*)regC[5];
*(float4*)&fShare[storC + 2*32*8] = *(float4*)regC[6];
*(float4*)&fShare[storC + 3*32*8] = *(float4*)regC[7];
__syncthreads();
for (int i = 0; i < 8; i++)
c2[i] = *(float2*)&fShare[readC + i*32];
// Tree reduce
for (int j = 4; j > 0; j >>= 1)
for (int i = 0; i < j; i++)
c2[i] = ew_add(c2[i], c2[i+j]);
store((bhalf2*)(C + 16*32), c2[0]);
}
bool bst_sgemm_xn(CUstream stream,
const uint2* lut,
const bhalf* a,
const float* b,
float* c,
uint block_size, uint blocks,
uint batch_dim, uint ctx_blks_b, uint ctx_blks_c, uint head_dim, uint state_dim,
uint lut_heads, uint lut_dim, uint op, uint magic, uint shift, uint max_lut)
{
uint szState = state_dim;
uint szHeadState = head_dim * szState;
uint szCtxHeadStateB = ctx_blks_b * block_size * szHeadState;
uint szCtxHeadStateC = ctx_blks_c * block_size * szHeadState;
uint szBlocksBlk = blocks * block_size * block_size;
uint szHeadBlocksBlk = head_dim * szBlocksBlk;
// if just one lut head, broadcast block-sparsity to all heads
uint szLut = lut_heads > 1 ? lut_dim : 0;
// compound gridDim.x with m and n coords
uint gridN = CEIL_DIV(state_dim, 64);
uint gridM = ctx_blks_c - 1;
uint gridX = ctx_blks_c * gridN;
uint shared = ((max_lut+1)/2)*2*8; // round up to nearest even, 8 bytes per entry
bool n64 = (state_dim & 63) == 0;
dim3 grid(gridX, batch_dim, head_dim);
if (block_size == 32)
{
if (op == NN_OP) // NN
{
if (n64)
bst_sgemm_32x64x32_xn<OP_N, true><<<grid,128,shared,stream>>>(lut, a, b, c, szCtxHeadStateB, szCtxHeadStateC, szHeadState, szState, szHeadBlocksBlk, szBlocksBlk, szLut, gridM, gridN, magic, shift);
else
bst_sgemm_32x64x32_xn<OP_N,false><<<grid,128,shared,stream>>>(lut, a, b, c, szCtxHeadStateB, szCtxHeadStateC, szHeadState, szState, szHeadBlocksBlk, szBlocksBlk, szLut, gridM, gridN, magic, shift);
}
else // TN
{
if (n64)
bst_sgemm_32x64x32_xn<OP_T, true><<<grid,128,shared,stream>>>(lut, a, b, c, szCtxHeadStateB, szCtxHeadStateC, szHeadState, szState, szHeadBlocksBlk, szBlocksBlk, szLut, gridM, gridN, magic, shift);
else
bst_sgemm_32x64x32_xn<OP_T,false><<<grid,128,shared,stream>>>(lut, a, b, c, szCtxHeadStateB, szCtxHeadStateC, szHeadState, szState, szHeadBlocksBlk, szBlocksBlk, szLut, gridM, gridN, magic, shift);
}
}
return true;
}
bool bst_sgemm_nt(CUstream stream,
const uint2* lut,
const float* a,
const float* b,
bhalf* c,
uint block_size, uint blocks,
uint batch_dim, uint ctx_blks_a, uint ctx_blks_b, uint head_dim, uint state_dim,
uint lut_heads, uint lut_dim)
{
uint szState = state_dim;
uint szHeadState = head_dim * szState;
uint szCtxHeadStateA = ctx_blks_a * block_size * szHeadState;
uint szCtxHeadStateB = ctx_blks_b * block_size * szHeadState;
uint szBlocksBlk = blocks * block_size * block_size;
uint szHeadBlocksBlk = head_dim * szBlocksBlk;
// if just one lut head, broadcast block-sparsity to all heads
uint szLut = lut_heads > 1 ? lut_dim : 0;
uint loops = CEIL_DIV(state_dim, 64);
bool k64 = (state_dim & 63) == 0;
dim3 grid(blocks, batch_dim, head_dim);
if (block_size == 32)
{
if (k64)
bst_sgemm_32x32x64_nt< true><<<grid,256,0,stream>>>(lut, a, b, c, szCtxHeadStateA, szCtxHeadStateB, szHeadState, szState, szHeadBlocksBlk, szBlocksBlk, szLut, loops);
else
bst_sgemm_32x32x64_nt<false><<<grid,256,0,stream>>>(lut, a, b, c, szCtxHeadStateA, szCtxHeadStateB, szHeadState, szState, szHeadBlocksBlk, szBlocksBlk, szLut, loops);
}
return true;
}
#endif // GOOGLE_CUDA |
270a024e0e69359f763d2ce4c677ec2539fec0c4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2016 Baidu, Inc. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "hl_base.h"
#include "hl_cuda.h"
#include "hl_cuda.ph"
#include "hl_aggregate.h"
#include "hl_thread.ph"
#include "hl_matrix_base.cuh"
#include "paddle/utils/Logging.h"
/**
* @brief matrix row operator.
*/
template<class Agg, int blockSize>
__global__ void KeMatrixRowOp(Agg agg,
real *E,
real *Sum,
int dimN) {
__shared__ real sum_s[blockSize];
int cnt = (dimN + blockSize -1) / blockSize;
int rowId = blockIdx.x + blockIdx.y*gridDim.x;
int index = rowId*dimN;
int tid = threadIdx.x;
int lmt = tid;
real tmp = agg.init();
for (int ii = 0; ii < cnt && lmt < dimN; ii++) {
tmp = agg(tmp, E[index + lmt]);
lmt += blockSize;
}
sum_s[tid] = tmp;
__syncthreads();
for (int stride = blockSize/2; stride > 0; stride = stride/2) {
if (tid < stride) {
sum_s[tid] = agg(sum_s[tid], sum_s[tid + stride]);
}
__syncthreads();
}
__syncthreads();
if (tid == 0) {
Sum[rowId] = sum_s[0];
}
}
template <class Agg>
void hl_matrix_row_op(Agg agg,
real *A_d,
real *C_d,
int dimM,
int dimN) {
int blocksX = dimM;
int blocksY = 1;
dim3 threads(128, 1);
dim3 grid(blocksX, blocksY);
hipLaunchKernelGGL(( KeMatrixRowOp<Agg, 128>), dim3(grid), dim3(threads), 0, STREAM_DEFAULT ,
agg, A_d, C_d, dimN);
}
void hl_matrix_row_sum(real *A_d, real *C_d, int dimM, int dimN) {
CHECK_NOTNULL(A_d);
CHECK_NOTNULL(C_d);
hl_matrix_row_op(aggregate::sum(),
A_d,
C_d,
dimM,
dimN);
CHECK_SYNC("hl_matrix_row_sum failed");
}
void hl_matrix_row_max(real *A_d, real *C_d, int dimM, int dimN) {
CHECK_NOTNULL(A_d);
CHECK_NOTNULL(C_d);
hl_matrix_row_op(aggregate::max(),
A_d,
C_d,
dimM,
dimN);
CHECK_SYNC("hl_matrix_row_max failed");
}
void hl_matrix_row_min(real *A_d, real *C_d, int dimM, int dimN) {
CHECK_NOTNULL(A_d);
CHECK_NOTNULL(C_d);
hl_matrix_row_op(aggregate::min(),
A_d,
C_d,
dimM,
dimN);
CHECK_SYNC("hl_matrix_row_min failed");
}
/**
* @brief matrix column operator.
*/
template<class Agg>
__global__ void KeMatrixColumnOp(Agg agg,
real *E,
real *Sum,
int dimM,
int dimN) {
int rowIdx = blockIdx.x * blockDim.x + threadIdx.x;
real tmp = agg.init();
if (rowIdx < dimN) {
for (int index = 0; index < dimM; index++) {
tmp = agg(tmp, E[dimN * index + rowIdx]);
}
Sum[rowIdx] = tmp;
}
}
template<class Agg, int blockDimX, int blockDimY>
__global__ void KeMatrixColumnOp_S(Agg agg,
real *E,
real *Sum,
int dimM,
int dimN) {
__shared__ real _sum[blockDimX*blockDimY];
int rowIdx = blockIdx.x * blockDim.x + threadIdx.x;
int index = threadIdx.y;
real tmp = agg.init();
if (rowIdx < dimN) {
for (; index < dimM;) {
tmp = agg(tmp, E[dimN * index + rowIdx]);
index += blockDimY;
}
}
_sum[threadIdx.x + threadIdx.y*blockDimX] = tmp;
__syncthreads();
if (rowIdx < dimN) {
if (threadIdx.y ==0) {
real tmp = agg.init();
for (int i=0; i < blockDimY; i++) {
tmp = agg(tmp, _sum[threadIdx.x + i*blockDimX]);
}
Sum[rowIdx] = tmp;
}
}
}
template <class Agg>
void hl_matrix_column_op(Agg agg,
real *A_d,
real *C_d,
int dimM,
int dimN) {
if (dimN >= 8192) {
int blocksX = (dimN + 128 -1) / 128;
int blocksY = 1;
dim3 threads(128, 1);
dim3 grid(blocksX, blocksY);
hipLaunchKernelGGL(( KeMatrixColumnOp<Agg>), dim3(grid), dim3(threads), 0, STREAM_DEFAULT ,
agg, A_d, C_d, dimM, dimN);
} else {
int blocksX = (dimN + 32 -1) / 32;
int blocksY = 1;
dim3 threads(32, 32);
dim3 grid(blocksX, blocksY);
hipLaunchKernelGGL(( KeMatrixColumnOp_S<Agg, 32, 32>), dim3(grid), dim3(threads), 0, STREAM_DEFAULT,
agg, A_d, C_d, dimM, dimN);
}
return;
}
void hl_matrix_column_sum(real *A_d, real *C_d, int dimM, int dimN) {
CHECK_NOTNULL(A_d);
CHECK_NOTNULL(C_d);
hl_matrix_column_op(aggregate::sum(),
A_d,
C_d,
dimM,
dimN);
CHECK_SYNC("hl_matrix_column_sum failed");
}
void hl_matrix_column_max(real *A_d, real *C_d, int dimM, int dimN) {
CHECK_NOTNULL(A_d);
CHECK_NOTNULL(C_d);
hl_matrix_column_op(aggregate::max(),
A_d,
C_d,
dimM,
dimN);
CHECK_SYNC("hl_matrix_column_max failed");
}
void hl_matrix_column_min(real *A_d, real *C_d, int dimM, int dimN) {
CHECK_NOTNULL(A_d);
CHECK_NOTNULL(C_d);
hl_matrix_column_op(aggregate::min(),
A_d,
C_d,
dimM,
dimN);
CHECK_SYNC("hl_matrix_column_min failed");
}
template <int blockSize>
__global__ void KeVectorSum(real *E, real *Sum, int dimM) {
__shared__ double sum_s[blockSize];
int tid = threadIdx.x;
int index = blockIdx.y*blockDim.x+threadIdx.x;
sum_s[tid] = 0.0f;
while (index < dimM) {
sum_s[tid] += E[index];
index += blockDim.x*gridDim.y;
}
__syncthreads();
for (int stride = blockSize/2; stride > 0; stride = stride/2) {
if (tid < stride) {
sum_s[tid] += sum_s[tid + stride];
}
__syncthreads();
}
__syncthreads();
if (tid == 0) {
Sum[blockIdx.y] = sum_s[0];
}
}
void hl_vector_sum(real *A_d, real *C_h, int dimM) {
CHECK_NOTNULL(A_d);
CHECK_NOTNULL(C_h);
int blockSize = 128;
int gridSize = 128;
int blocksX = 1;
int blocksY = gridSize;
dim3 threads(blockSize, 1);
dim3 grid(blocksX, blocksY);
struct _hl_event_st hl_event_st = {.cu_event = t_resource.event};
hl_event_t hl_event = &hl_event_st;
bool isNotReady = false;
do {
hl_cuda_event_query(hl_event, isNotReady);
} while (isNotReady == hipErrorNotReady);
hipLaunchKernelGGL(( KeVectorSum<128>), dim3(grid), dim3(threads), 0, STREAM_DEFAULT ,
A_d, t_resource.gpu_mem, dimM);
hipLaunchKernelGGL(( KeVectorSum<128>), dim3(1), dim3(threads), 0, STREAM_DEFAULT ,
t_resource.gpu_mem, t_resource.cpu_mem, 128);
hl_memcpy_async(C_h, t_resource.cpu_mem, sizeof(real), HPPL_STREAM_DEFAULT);
hl_stream_record_event(HPPL_STREAM_DEFAULT, hl_event);
CHECK_SYNC("hl_vector_sum failed");
}
template <int blockSize>
__global__ void KeVectorAbsSum(real *E, real *Sum, int dimM) {
__shared__ double sum_s[blockSize];
int tid = threadIdx.x;
int index = blockIdx.y*blockDim.x+threadIdx.x;
sum_s[tid] = 0.0f;
while (index < dimM) {
sum_s[tid] += abs(E[index]);
index += blockDim.x*gridDim.y;
}
__syncthreads();
for (int stride = blockSize/2; stride > 0; stride = stride/2) {
if (tid < stride) {
sum_s[tid] += sum_s[tid + stride];
}
__syncthreads();
}
__syncthreads();
if (tid == 0) {
Sum[blockIdx.y] = sum_s[0];
}
}
void hl_vector_abs_sum(real *A_d, real *C_h, int dimM) {
CHECK_NOTNULL(A_d);
CHECK_NOTNULL(C_h);
int blockSize = 128;
int gridSize = 128;
int blocksX = 1;
int blocksY = gridSize;
dim3 threads(blockSize, 1);
dim3 grid(blocksX, blocksY);
struct _hl_event_st hl_event_st = {.cu_event = t_resource.event};
hl_event_t hl_event = &hl_event_st;
bool isNotReady = false;
do {
hl_cuda_event_query(hl_event, isNotReady);
} while (isNotReady == hipErrorNotReady);
hipLaunchKernelGGL(( KeVectorAbsSum<128>), dim3(grid), dim3(threads), 0, STREAM_DEFAULT ,
A_d, t_resource.gpu_mem, dimM);
hipLaunchKernelGGL(( KeVectorAbsSum<128>), dim3(1), dim3(threads), 0, STREAM_DEFAULT ,
t_resource.gpu_mem, t_resource.cpu_mem, 128);
hl_memcpy_async(C_h, t_resource.cpu_mem, sizeof(real), HPPL_STREAM_DEFAULT);
hl_stream_record_event(HPPL_STREAM_DEFAULT, hl_event);
CHECK_SYNC("hl_vector_abs_sum failed");
}
| 270a024e0e69359f763d2ce4c677ec2539fec0c4.cu | /* Copyright (c) 2016 Baidu, Inc. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "hl_base.h"
#include "hl_cuda.h"
#include "hl_cuda.ph"
#include "hl_aggregate.h"
#include "hl_thread.ph"
#include "hl_matrix_base.cuh"
#include "paddle/utils/Logging.h"
/**
* @brief matrix row operator.
*/
template<class Agg, int blockSize>
__global__ void KeMatrixRowOp(Agg agg,
real *E,
real *Sum,
int dimN) {
__shared__ real sum_s[blockSize];
int cnt = (dimN + blockSize -1) / blockSize;
int rowId = blockIdx.x + blockIdx.y*gridDim.x;
int index = rowId*dimN;
int tid = threadIdx.x;
int lmt = tid;
real tmp = agg.init();
for (int ii = 0; ii < cnt && lmt < dimN; ii++) {
tmp = agg(tmp, E[index + lmt]);
lmt += blockSize;
}
sum_s[tid] = tmp;
__syncthreads();
for (int stride = blockSize/2; stride > 0; stride = stride/2) {
if (tid < stride) {
sum_s[tid] = agg(sum_s[tid], sum_s[tid + stride]);
}
__syncthreads();
}
__syncthreads();
if (tid == 0) {
Sum[rowId] = sum_s[0];
}
}
template <class Agg>
void hl_matrix_row_op(Agg agg,
real *A_d,
real *C_d,
int dimM,
int dimN) {
int blocksX = dimM;
int blocksY = 1;
dim3 threads(128, 1);
dim3 grid(blocksX, blocksY);
KeMatrixRowOp<Agg, 128><<< grid, threads, 0, STREAM_DEFAULT >>>
(agg, A_d, C_d, dimN);
}
void hl_matrix_row_sum(real *A_d, real *C_d, int dimM, int dimN) {
CHECK_NOTNULL(A_d);
CHECK_NOTNULL(C_d);
hl_matrix_row_op(aggregate::sum(),
A_d,
C_d,
dimM,
dimN);
CHECK_SYNC("hl_matrix_row_sum failed");
}
void hl_matrix_row_max(real *A_d, real *C_d, int dimM, int dimN) {
CHECK_NOTNULL(A_d);
CHECK_NOTNULL(C_d);
hl_matrix_row_op(aggregate::max(),
A_d,
C_d,
dimM,
dimN);
CHECK_SYNC("hl_matrix_row_max failed");
}
void hl_matrix_row_min(real *A_d, real *C_d, int dimM, int dimN) {
CHECK_NOTNULL(A_d);
CHECK_NOTNULL(C_d);
hl_matrix_row_op(aggregate::min(),
A_d,
C_d,
dimM,
dimN);
CHECK_SYNC("hl_matrix_row_min failed");
}
/**
* @brief matrix column operator.
*/
template<class Agg>
__global__ void KeMatrixColumnOp(Agg agg,
real *E,
real *Sum,
int dimM,
int dimN) {
int rowIdx = blockIdx.x * blockDim.x + threadIdx.x;
real tmp = agg.init();
if (rowIdx < dimN) {
for (int index = 0; index < dimM; index++) {
tmp = agg(tmp, E[dimN * index + rowIdx]);
}
Sum[rowIdx] = tmp;
}
}
template<class Agg, int blockDimX, int blockDimY>
__global__ void KeMatrixColumnOp_S(Agg agg,
real *E,
real *Sum,
int dimM,
int dimN) {
__shared__ real _sum[blockDimX*blockDimY];
int rowIdx = blockIdx.x * blockDim.x + threadIdx.x;
int index = threadIdx.y;
real tmp = agg.init();
if (rowIdx < dimN) {
for (; index < dimM;) {
tmp = agg(tmp, E[dimN * index + rowIdx]);
index += blockDimY;
}
}
_sum[threadIdx.x + threadIdx.y*blockDimX] = tmp;
__syncthreads();
if (rowIdx < dimN) {
if (threadIdx.y ==0) {
real tmp = agg.init();
for (int i=0; i < blockDimY; i++) {
tmp = agg(tmp, _sum[threadIdx.x + i*blockDimX]);
}
Sum[rowIdx] = tmp;
}
}
}
template <class Agg>
void hl_matrix_column_op(Agg agg,
real *A_d,
real *C_d,
int dimM,
int dimN) {
if (dimN >= 8192) {
int blocksX = (dimN + 128 -1) / 128;
int blocksY = 1;
dim3 threads(128, 1);
dim3 grid(blocksX, blocksY);
KeMatrixColumnOp<Agg><<< grid, threads, 0, STREAM_DEFAULT >>>
(agg, A_d, C_d, dimM, dimN);
} else {
int blocksX = (dimN + 32 -1) / 32;
int blocksY = 1;
dim3 threads(32, 32);
dim3 grid(blocksX, blocksY);
KeMatrixColumnOp_S<Agg, 32, 32><<< grid, threads, 0, STREAM_DEFAULT>>>
(agg, A_d, C_d, dimM, dimN);
}
return;
}
void hl_matrix_column_sum(real *A_d, real *C_d, int dimM, int dimN) {
CHECK_NOTNULL(A_d);
CHECK_NOTNULL(C_d);
hl_matrix_column_op(aggregate::sum(),
A_d,
C_d,
dimM,
dimN);
CHECK_SYNC("hl_matrix_column_sum failed");
}
void hl_matrix_column_max(real *A_d, real *C_d, int dimM, int dimN) {
CHECK_NOTNULL(A_d);
CHECK_NOTNULL(C_d);
hl_matrix_column_op(aggregate::max(),
A_d,
C_d,
dimM,
dimN);
CHECK_SYNC("hl_matrix_column_max failed");
}
void hl_matrix_column_min(real *A_d, real *C_d, int dimM, int dimN) {
CHECK_NOTNULL(A_d);
CHECK_NOTNULL(C_d);
hl_matrix_column_op(aggregate::min(),
A_d,
C_d,
dimM,
dimN);
CHECK_SYNC("hl_matrix_column_min failed");
}
template <int blockSize>
__global__ void KeVectorSum(real *E, real *Sum, int dimM) {
__shared__ double sum_s[blockSize];
int tid = threadIdx.x;
int index = blockIdx.y*blockDim.x+threadIdx.x;
sum_s[tid] = 0.0f;
while (index < dimM) {
sum_s[tid] += E[index];
index += blockDim.x*gridDim.y;
}
__syncthreads();
for (int stride = blockSize/2; stride > 0; stride = stride/2) {
if (tid < stride) {
sum_s[tid] += sum_s[tid + stride];
}
__syncthreads();
}
__syncthreads();
if (tid == 0) {
Sum[blockIdx.y] = sum_s[0];
}
}
void hl_vector_sum(real *A_d, real *C_h, int dimM) {
CHECK_NOTNULL(A_d);
CHECK_NOTNULL(C_h);
int blockSize = 128;
int gridSize = 128;
int blocksX = 1;
int blocksY = gridSize;
dim3 threads(blockSize, 1);
dim3 grid(blocksX, blocksY);
struct _hl_event_st hl_event_st = {.cu_event = t_resource.event};
hl_event_t hl_event = &hl_event_st;
bool isNotReady = false;
do {
hl_cuda_event_query(hl_event, isNotReady);
} while (isNotReady == cudaErrorNotReady);
KeVectorSum<128><<< grid, threads, 0, STREAM_DEFAULT >>>
(A_d, t_resource.gpu_mem, dimM);
KeVectorSum<128><<< 1, threads, 0, STREAM_DEFAULT >>>
(t_resource.gpu_mem, t_resource.cpu_mem, 128);
hl_memcpy_async(C_h, t_resource.cpu_mem, sizeof(real), HPPL_STREAM_DEFAULT);
hl_stream_record_event(HPPL_STREAM_DEFAULT, hl_event);
CHECK_SYNC("hl_vector_sum failed");
}
template <int blockSize>
__global__ void KeVectorAbsSum(real *E, real *Sum, int dimM) {
__shared__ double sum_s[blockSize];
int tid = threadIdx.x;
int index = blockIdx.y*blockDim.x+threadIdx.x;
sum_s[tid] = 0.0f;
while (index < dimM) {
sum_s[tid] += abs(E[index]);
index += blockDim.x*gridDim.y;
}
__syncthreads();
for (int stride = blockSize/2; stride > 0; stride = stride/2) {
if (tid < stride) {
sum_s[tid] += sum_s[tid + stride];
}
__syncthreads();
}
__syncthreads();
if (tid == 0) {
Sum[blockIdx.y] = sum_s[0];
}
}
void hl_vector_abs_sum(real *A_d, real *C_h, int dimM) {
CHECK_NOTNULL(A_d);
CHECK_NOTNULL(C_h);
int blockSize = 128;
int gridSize = 128;
int blocksX = 1;
int blocksY = gridSize;
dim3 threads(blockSize, 1);
dim3 grid(blocksX, blocksY);
struct _hl_event_st hl_event_st = {.cu_event = t_resource.event};
hl_event_t hl_event = &hl_event_st;
bool isNotReady = false;
do {
hl_cuda_event_query(hl_event, isNotReady);
} while (isNotReady == cudaErrorNotReady);
KeVectorAbsSum<128><<< grid, threads, 0, STREAM_DEFAULT >>>
(A_d, t_resource.gpu_mem, dimM);
KeVectorAbsSum<128><<< 1, threads, 0, STREAM_DEFAULT >>>
(t_resource.gpu_mem, t_resource.cpu_mem, 128);
hl_memcpy_async(C_h, t_resource.cpu_mem, sizeof(real), HPPL_STREAM_DEFAULT);
hl_stream_record_event(HPPL_STREAM_DEFAULT, hl_event);
CHECK_SYNC("hl_vector_abs_sum failed");
}
|
1be565f17fac947de2ffd8b3504440a23a045b72.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Mandelbrot explorer, based on my old Julia demo plus parts of Nicolas Melot's Lab 1 code.
// CPU only! Your task: Rewrite for CUDA! Test and evaluate performance.
// Compile with:
// gcc interactiveMandelbrot.cpp -shared-libgcc -lstdc++-static -o interactiveMandelbrot -lglut -lGL
// or
// g++ interactiveMandelbrot.cpp -o interactiveMandelbrot -lglut -lGL
// Your CUDA version should compile with something like
// nvcc -lglut -lGL interactiveMandelbrotCUDA.cu -o interactiveMandelbrotCUDA
// Preliminary version 2014-11-30
// Cleaned a bit more 2014-12-01
// Corrected the missing glRasterPos2i 2014-12-03
#ifdef __APPLE__
#include <OpenGL/gl.h>
#include <GLUT/glut.h>
#else
#include <GL/glut.h>
#include <GL/gl.h>
#endif
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
// Image data
unsigned char *pixels = NULL;
// GPU data
unsigned int totalPixelBytes;
unsigned char *devPixels = NULL;
dim3 dimBlock;
dim3 dimGrid;
// Select precision here! float or double!
#define MYFLOAT float
#define DIM 512
struct drawingArguments_t
{
// User controlled parameters
int maxiter;
MYFLOAT offsetx;
MYFLOAT offsety;
MYFLOAT zoom;
MYFLOAT scale;
int imageWidth;
int imageHeight;
};
struct drawingArguments_t g_drawArgs;
hipEvent_t startEvent;
hipEvent_t endEvent;
// Init image data
void initBitmap(int width, int height)
{
if (pixels) free(pixels);
pixels = (unsigned char *)malloc(width * height * 4);
g_drawArgs.imageWidth = width;
g_drawArgs.imageHeight = height;
g_drawArgs.maxiter = 20;
g_drawArgs.offsetx = -200;
g_drawArgs.offsety = 0;
g_drawArgs.zoom = 0;
g_drawArgs.scale = 1.5;
hipEventCreate(&startEvent);
hipEventCreate(&endEvent);
totalPixelBytes = width * height * 4;
hipMalloc((void**)&devPixels, totalPixelBytes);
int blocksize = 16;
int gridsize = DIM / blocksize;
dimBlock = dim3(blocksize, blocksize);
dimGrid = dim3(gridsize, gridsize);
}
// Complex number class
struct hipComplex
{
MYFLOAT r;
MYFLOAT i;
__device__
hipComplex( MYFLOAT a, MYFLOAT b ) : r(a), i(b) {}
__device__
float magnitude2( void )
{
return r * r + i * i;
}
__device__
hipComplex operator*(const hipComplex& a)
{
return hipComplex(r*a.r - i*a.i, i*a.r + r*a.i);
}
__device__
hipComplex operator+(const hipComplex& a)
{
return hipComplex(r+a.r, i+a.i);
}
};
__device__
int mandelbrot( int x, int y, struct drawingArguments_t drawArgs)
{
MYFLOAT jx = drawArgs.scale * (MYFLOAT)(drawArgs.imageWidth/2 - x + drawArgs.offsetx/drawArgs.scale)/(drawArgs.imageWidth/2);
MYFLOAT jy = drawArgs.scale * (MYFLOAT)(drawArgs.imageHeight/2 - y + drawArgs.offsety/drawArgs.scale)/(drawArgs.imageWidth/2);
hipComplex c(jx, jy);
hipComplex a(jx, jy);
int i = 0;
for (i=0; i<drawArgs.maxiter; i++)
{
a = a * a + c;
if (a.magnitude2() > 1000)
return i;
}
return i;
}
__global__
void compFracGPU(unsigned char *ptr, struct drawingArguments_t drawArgs)
{
unsigned int x = blockDim.x * blockIdx.x + threadIdx.x;
unsigned int y = blockDim.y * blockIdx.y + threadIdx.y;
int offset = x + y * drawArgs.imageWidth;
// now calculate the value at that position
int fractalValue = mandelbrot( x, y, drawArgs);
// Colorize it
int red = 255 * fractalValue/drawArgs.maxiter;
if (red > 255) red = 255 - red;
int green = 255 * fractalValue*4/drawArgs.maxiter;
if (green > 255) green = 255 - green;
int blue = 255 * fractalValue*20/drawArgs.maxiter;
if (blue > 255) blue = 255 - blue;
ptr[offset*4 + 0] = red;
ptr[offset*4 + 1] = green;
ptr[offset*4 + 2] = blue;
ptr[offset*4 + 3] = 255;
}
void computeFractal( unsigned char *ptr, unsigned char *devPtr)
{
float timeGPU;
hipEventRecord(startEvent, 0);
hipLaunchKernelGGL(( compFracGPU), dim3(dimGrid), dim3(dimBlock), 0, 0, devPtr, g_drawArgs);
hipEventRecord(endEvent, 0);
hipMemcpy( ptr, devPtr, totalPixelBytes, hipMemcpyDeviceToHost );
hipEventSynchronize(endEvent);
hipEventElapsedTime(&timeGPU, startEvent, endEvent);
printf("GPU time: %fms\n", timeGPU);
// map from x, y to pixel position
/*
for (int x = 0; x < gImageWidth; x++)
for (int y = 0; y < gImageHeight; y++)
{
int offset = x + y * gImageWidth;
// now calculate the value at that position
int fractalValue = mandelbrot( x, y);
// Colorize it
int red = 255 * fractalValue/maxiter;
if (red > 255) red = 255 - red;
int green = 255 * fractalValue*4/maxiter;
if (green > 255) green = 255 - green;
int blue = 255 * fractalValue*20/maxiter;
if (blue > 255) blue = 255 - blue;
ptr[offset*4 + 0] = red;
ptr[offset*4 + 1] = green;
ptr[offset*4 + 2] = blue;
ptr[offset*4 + 3] = 255;
}
*/
}
char print_help = 0;
// Yuck, GLUT text is old junk that should be avoided... but it will have to do
static void print_str(void *font, const char *string)
{
int i;
for (i = 0; string[i]; i++)
glutBitmapCharacter(font, string[i]);
}
void PrintHelp()
{
if (print_help)
{
glPushMatrix();
glLoadIdentity();
glOrtho(-0.5, 639.5, -0.5, 479.5, -1.0, 1.0);
glEnable(GL_BLEND);
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
glColor4f(0.f, 0.f, 0.5f, 0.5f);
glRecti(40, 40, 600, 440);
glColor3f(1.f, 1.f, 1.f);
glRasterPos2i(300, 420);
print_str(GLUT_BITMAP_HELVETICA_18, "Help");
glRasterPos2i(60, 390);
print_str(GLUT_BITMAP_HELVETICA_18, "h - Toggle Help");
glRasterPos2i(60, 300);
print_str(GLUT_BITMAP_HELVETICA_18, "Left click + drag - move picture");
glRasterPos2i(60, 270);
print_str(GLUT_BITMAP_HELVETICA_18,
"Right click + drag up/down - unzoom/zoom");
glRasterPos2i(60, 240);
print_str(GLUT_BITMAP_HELVETICA_18, "+ - Increase max. iterations by 32");
glRasterPos2i(60, 210);
print_str(GLUT_BITMAP_HELVETICA_18, "- - Decrease max. iterations by 32");
glRasterPos2i(0, 0);
glDisable(GL_BLEND);
glPopMatrix();
}
}
// Compute fractal and display image
void Draw()
{
computeFractal(pixels, devPixels);
// Dump the whole picture onto the screen. (Old-style OpenGL but without lots of geometry that doesn't matter so much.)
glClearColor( 0.0, 0.0, 0.0, 1.0 );
glClear( GL_COLOR_BUFFER_BIT );
glDrawPixels( g_drawArgs.imageWidth, g_drawArgs.imageHeight, GL_RGBA, GL_UNSIGNED_BYTE, pixels );
if (print_help)
PrintHelp();
glutSwapBuffers();
}
char explore = 1;
static void Reshape(int width, int height)
{
glViewport(0, 0, width, height);
glLoadIdentity();
glOrtho(-0.5f, width - 0.5f, -0.5f, height - 0.5f, -1.f, 1.f);
initBitmap(width, height);
glutPostRedisplay();
}
int mouse_x, mouse_y, mouse_btn;
// Mouse down
static void mouse_button(int button, int state, int x, int y)
{
if (state == GLUT_DOWN)
{
// Record start position
mouse_x = x;
mouse_y = y;
mouse_btn = button;
}
}
// Drag mouse
static void mouse_motion(int x, int y)
{
if (mouse_btn == 0)
// Ordinary mouse button - move
{
g_drawArgs.offsetx += (x - mouse_x)*g_drawArgs.scale;
mouse_x = x;
g_drawArgs.offsety += (mouse_y - y)*g_drawArgs.scale;
mouse_y = y;
glutPostRedisplay();
}
else
// Alt mouse button - scale
{
g_drawArgs.scale *= pow(1.1, y - mouse_y);
mouse_y = y;
glutPostRedisplay();
}
}
void KeyboardProc(unsigned char key, int x, int y)
{
switch (key)
{
case 27: /* Escape key */
case 'q':
case 'Q':
exit(0);
break;
case '+':
g_drawArgs.maxiter += g_drawArgs.maxiter < 1024 - 32 ? 32 : 0;
break;
case '-':
g_drawArgs.maxiter -= g_drawArgs.maxiter > 0 + 32 ? 32 : 0;
break;
case 'h':
print_help = !print_help;
break;
}
glutPostRedisplay();
}
// Main program, inits
int main( int argc, char** argv)
{
glutInit(&argc, argv);
glutInitDisplayMode( GLUT_DOUBLE | GLUT_RGBA );
glutInitWindowSize( DIM, DIM );
glutCreateWindow("Mandelbrot explorer (GPU)");
glutDisplayFunc(Draw);
glutMouseFunc(mouse_button);
glutMotionFunc(mouse_motion);
glutKeyboardFunc(KeyboardProc);
glutReshapeFunc(Reshape);
initBitmap(DIM, DIM);
glutMainLoop();
}
| 1be565f17fac947de2ffd8b3504440a23a045b72.cu | // Mandelbrot explorer, based on my old Julia demo plus parts of Nicolas Melot's Lab 1 code.
// CPU only! Your task: Rewrite for CUDA! Test and evaluate performance.
// Compile with:
// gcc interactiveMandelbrot.cpp -shared-libgcc -lstdc++-static -o interactiveMandelbrot -lglut -lGL
// or
// g++ interactiveMandelbrot.cpp -o interactiveMandelbrot -lglut -lGL
// Your CUDA version should compile with something like
// nvcc -lglut -lGL interactiveMandelbrotCUDA.cu -o interactiveMandelbrotCUDA
// Preliminary version 2014-11-30
// Cleaned a bit more 2014-12-01
// Corrected the missing glRasterPos2i 2014-12-03
#ifdef __APPLE__
#include <OpenGL/gl.h>
#include <GLUT/glut.h>
#else
#include <GL/glut.h>
#include <GL/gl.h>
#endif
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
// Image data
unsigned char *pixels = NULL;
// GPU data
unsigned int totalPixelBytes;
unsigned char *devPixels = NULL;
dim3 dimBlock;
dim3 dimGrid;
// Select precision here! float or double!
#define MYFLOAT float
#define DIM 512
struct drawingArguments_t
{
// User controlled parameters
int maxiter;
MYFLOAT offsetx;
MYFLOAT offsety;
MYFLOAT zoom;
MYFLOAT scale;
int imageWidth;
int imageHeight;
};
struct drawingArguments_t g_drawArgs;
cudaEvent_t startEvent;
cudaEvent_t endEvent;
// Init image data
void initBitmap(int width, int height)
{
if (pixels) free(pixels);
pixels = (unsigned char *)malloc(width * height * 4);
g_drawArgs.imageWidth = width;
g_drawArgs.imageHeight = height;
g_drawArgs.maxiter = 20;
g_drawArgs.offsetx = -200;
g_drawArgs.offsety = 0;
g_drawArgs.zoom = 0;
g_drawArgs.scale = 1.5;
cudaEventCreate(&startEvent);
cudaEventCreate(&endEvent);
totalPixelBytes = width * height * 4;
cudaMalloc((void**)&devPixels, totalPixelBytes);
int blocksize = 16;
int gridsize = DIM / blocksize;
dimBlock = dim3(blocksize, blocksize);
dimGrid = dim3(gridsize, gridsize);
}
// Complex number class
struct cuComplex
{
MYFLOAT r;
MYFLOAT i;
__device__
cuComplex( MYFLOAT a, MYFLOAT b ) : r(a), i(b) {}
__device__
float magnitude2( void )
{
return r * r + i * i;
}
__device__
cuComplex operator*(const cuComplex& a)
{
return cuComplex(r*a.r - i*a.i, i*a.r + r*a.i);
}
__device__
cuComplex operator+(const cuComplex& a)
{
return cuComplex(r+a.r, i+a.i);
}
};
__device__
int mandelbrot( int x, int y, struct drawingArguments_t drawArgs)
{
MYFLOAT jx = drawArgs.scale * (MYFLOAT)(drawArgs.imageWidth/2 - x + drawArgs.offsetx/drawArgs.scale)/(drawArgs.imageWidth/2);
MYFLOAT jy = drawArgs.scale * (MYFLOAT)(drawArgs.imageHeight/2 - y + drawArgs.offsety/drawArgs.scale)/(drawArgs.imageWidth/2);
cuComplex c(jx, jy);
cuComplex a(jx, jy);
int i = 0;
for (i=0; i<drawArgs.maxiter; i++)
{
a = a * a + c;
if (a.magnitude2() > 1000)
return i;
}
return i;
}
__global__
void compFracGPU(unsigned char *ptr, struct drawingArguments_t drawArgs)
{
unsigned int x = blockDim.x * blockIdx.x + threadIdx.x;
unsigned int y = blockDim.y * blockIdx.y + threadIdx.y;
int offset = x + y * drawArgs.imageWidth;
// now calculate the value at that position
int fractalValue = mandelbrot( x, y, drawArgs);
// Colorize it
int red = 255 * fractalValue/drawArgs.maxiter;
if (red > 255) red = 255 - red;
int green = 255 * fractalValue*4/drawArgs.maxiter;
if (green > 255) green = 255 - green;
int blue = 255 * fractalValue*20/drawArgs.maxiter;
if (blue > 255) blue = 255 - blue;
ptr[offset*4 + 0] = red;
ptr[offset*4 + 1] = green;
ptr[offset*4 + 2] = blue;
ptr[offset*4 + 3] = 255;
}
void computeFractal( unsigned char *ptr, unsigned char *devPtr)
{
float timeGPU;
cudaEventRecord(startEvent, 0);
compFracGPU<<<dimGrid, dimBlock>>>(devPtr, g_drawArgs);
cudaEventRecord(endEvent, 0);
cudaMemcpy( ptr, devPtr, totalPixelBytes, cudaMemcpyDeviceToHost );
cudaEventSynchronize(endEvent);
cudaEventElapsedTime(&timeGPU, startEvent, endEvent);
printf("GPU time: %fms\n", timeGPU);
// map from x, y to pixel position
/*
for (int x = 0; x < gImageWidth; x++)
for (int y = 0; y < gImageHeight; y++)
{
int offset = x + y * gImageWidth;
// now calculate the value at that position
int fractalValue = mandelbrot( x, y);
// Colorize it
int red = 255 * fractalValue/maxiter;
if (red > 255) red = 255 - red;
int green = 255 * fractalValue*4/maxiter;
if (green > 255) green = 255 - green;
int blue = 255 * fractalValue*20/maxiter;
if (blue > 255) blue = 255 - blue;
ptr[offset*4 + 0] = red;
ptr[offset*4 + 1] = green;
ptr[offset*4 + 2] = blue;
ptr[offset*4 + 3] = 255;
}
*/
}
char print_help = 0;
// Yuck, GLUT text is old junk that should be avoided... but it will have to do
static void print_str(void *font, const char *string)
{
int i;
for (i = 0; string[i]; i++)
glutBitmapCharacter(font, string[i]);
}
void PrintHelp()
{
if (print_help)
{
glPushMatrix();
glLoadIdentity();
glOrtho(-0.5, 639.5, -0.5, 479.5, -1.0, 1.0);
glEnable(GL_BLEND);
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
glColor4f(0.f, 0.f, 0.5f, 0.5f);
glRecti(40, 40, 600, 440);
glColor3f(1.f, 1.f, 1.f);
glRasterPos2i(300, 420);
print_str(GLUT_BITMAP_HELVETICA_18, "Help");
glRasterPos2i(60, 390);
print_str(GLUT_BITMAP_HELVETICA_18, "h - Toggle Help");
glRasterPos2i(60, 300);
print_str(GLUT_BITMAP_HELVETICA_18, "Left click + drag - move picture");
glRasterPos2i(60, 270);
print_str(GLUT_BITMAP_HELVETICA_18,
"Right click + drag up/down - unzoom/zoom");
glRasterPos2i(60, 240);
print_str(GLUT_BITMAP_HELVETICA_18, "+ - Increase max. iterations by 32");
glRasterPos2i(60, 210);
print_str(GLUT_BITMAP_HELVETICA_18, "- - Decrease max. iterations by 32");
glRasterPos2i(0, 0);
glDisable(GL_BLEND);
glPopMatrix();
}
}
// Compute fractal and display image
void Draw()
{
computeFractal(pixels, devPixels);
// Dump the whole picture onto the screen. (Old-style OpenGL but without lots of geometry that doesn't matter so much.)
glClearColor( 0.0, 0.0, 0.0, 1.0 );
glClear( GL_COLOR_BUFFER_BIT );
glDrawPixels( g_drawArgs.imageWidth, g_drawArgs.imageHeight, GL_RGBA, GL_UNSIGNED_BYTE, pixels );
if (print_help)
PrintHelp();
glutSwapBuffers();
}
char explore = 1;
static void Reshape(int width, int height)
{
glViewport(0, 0, width, height);
glLoadIdentity();
glOrtho(-0.5f, width - 0.5f, -0.5f, height - 0.5f, -1.f, 1.f);
initBitmap(width, height);
glutPostRedisplay();
}
int mouse_x, mouse_y, mouse_btn;
// Mouse down
static void mouse_button(int button, int state, int x, int y)
{
if (state == GLUT_DOWN)
{
// Record start position
mouse_x = x;
mouse_y = y;
mouse_btn = button;
}
}
// Drag mouse
static void mouse_motion(int x, int y)
{
if (mouse_btn == 0)
// Ordinary mouse button - move
{
g_drawArgs.offsetx += (x - mouse_x)*g_drawArgs.scale;
mouse_x = x;
g_drawArgs.offsety += (mouse_y - y)*g_drawArgs.scale;
mouse_y = y;
glutPostRedisplay();
}
else
// Alt mouse button - scale
{
g_drawArgs.scale *= pow(1.1, y - mouse_y);
mouse_y = y;
glutPostRedisplay();
}
}
void KeyboardProc(unsigned char key, int x, int y)
{
switch (key)
{
case 27: /* Escape key */
case 'q':
case 'Q':
exit(0);
break;
case '+':
g_drawArgs.maxiter += g_drawArgs.maxiter < 1024 - 32 ? 32 : 0;
break;
case '-':
g_drawArgs.maxiter -= g_drawArgs.maxiter > 0 + 32 ? 32 : 0;
break;
case 'h':
print_help = !print_help;
break;
}
glutPostRedisplay();
}
// Main program, inits
int main( int argc, char** argv)
{
glutInit(&argc, argv);
glutInitDisplayMode( GLUT_DOUBLE | GLUT_RGBA );
glutInitWindowSize( DIM, DIM );
glutCreateWindow("Mandelbrot explorer (GPU)");
glutDisplayFunc(Draw);
glutMouseFunc(mouse_button);
glutMotionFunc(mouse_motion);
glutKeyboardFunc(KeyboardProc);
glutReshapeFunc(Reshape);
initBitmap(DIM, DIM);
glutMainLoop();
}
|
5e90d2db3ec7293dd2095d9c16f5cf80de21ea33.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <wb.h>
#define NUM_BINS 4096
#define CUDA_CHECK(ans) \
{ gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, const char *file, int line,
bool abort = true) {
if (code != hipSuccess) {
fprintf(stderr, "GPUassert: %s %s %d\n", hipGetErrorString(code),
file, line);
if (abort)
exit(code);
}
}
__global__ void histo_kernel(unsigned int* input, unsigned int* bins, int inputLength){
unsigned int tid = threadIdx.x + blockIdx.x * blockDim.x;
// All threads handle blockDim.x * gridDim.x
// consecutive elements in each iteration
for (unsigned int i = tid; i < inputLength; i += blockDim.x*gridDim.x){
atomicAdd(&(bins[input[i]]), 1);
}
__syncthreads();
}
__global__ void histogram_privatized_kernel(unsigned int* input, unsigned int* bins, unsigned int num_elements, unsigned int num_bins){
unsigned int tid = blockIdx.x*blockDim.x + threadIdx.x;
// privatized bins
extern __shared__ unsigned int histo_s[];
for (unsigned int binIdx = threadIdx.x; binIdx < num_bins; binIdx += blockDim.x){
histo_s[binIdx] = 0;
}
__syncthreads();
// histogram
for (unsigned int i = tid; i < num_elements; i += blockDim.x*gridDim.x){
atomicAdd(&(histo_s[input[i]]), 1);
}
__syncthreads();
// commit to global memory
for (unsigned int binIdx = threadIdx.x; binIdx < num_bins; binIdx += blockDim.x){
atomicAdd(&(bins[binIdx]), histo_s[binIdx]);
}
}
__global__ void postProcess(unsigned int* bins){
int tid = blockIdx.x*blockDim.x + threadIdx.x;
if (tid < NUM_BINS){
if (bins[tid] > 127){
bins[tid] = 127;
}
}
}
int main(int argc, char *argv[]) {
wbArg_t args;
int inputLength;
unsigned int *hostInput;
unsigned int *hostBins;
unsigned int *deviceInput;
unsigned int *deviceBins;
args = wbArg_read(argc, argv);
wbTime_start(Generic, "Importing data and creating memory on host");
hostInput = (unsigned int *)wbImport(wbArg_getInputFile(args, 0),
&inputLength, "Integer");
hostBins = (unsigned int *)malloc(NUM_BINS * sizeof(unsigned int));
wbTime_stop(Generic, "Importing data and creating memory on host");
wbLog(TRACE, "The input length is ", inputLength);
wbLog(TRACE, "The number of bins is ", NUM_BINS);
wbTime_start(GPU, "Allocating GPU memory.");
// TODO: Allocate GPU memory here
hipMalloc((void **)&deviceInput, inputLength*sizeof(unsigned int));
hipMalloc((void **)&deviceBins, NUM_BINS*sizeof(unsigned int));
CUDA_CHECK(hipDeviceSynchronize());
wbTime_stop(GPU, "Allocating GPU memory.");
wbTime_start(GPU, "Copying input memory to the GPU.");
// TODO: Copy memory to the GPU here
hipMemcpy(deviceInput, hostInput, inputLength*sizeof(unsigned int), hipMemcpyHostToDevice);
//hipMemcpy(deviceBins, hostBins, NUM_BINS*sizeof(unsigned int), hipMemcpyHostToDevice);
CUDA_CHECK(hipDeviceSynchronize());
wbTime_stop(GPU, "Copying input memory to the GPU.");
// Launch kernel
// ----------------------------------------------------------
wbLog(TRACE, "Launching kernel");
wbTime_start(Compute, "Performing CUDA computation");
// TODO: Perform kernel computation here
dim3 gridDim(ceil(inputLength/32)+1, 1, 1);
dim3 blockDim(32, 1, 1);
histo_kernel << <gridDim, blockDim >> >(deviceInput, deviceBins, inputLength);
//histogram_privatized_kernel << <gridDim, blockDim, NUM_BINS*sizeof(unsigned int) >> >(deviceInput, deviceBins, inputLength, NUM_BINS);
//postProcess << <gridDim, blockDim >> >(deviceBins);
// You should call the following lines after you call the kernel.
CUDA_CHECK(hipGetLastError());
CUDA_CHECK(hipDeviceSynchronize());
wbTime_stop(Compute, "Performing CUDA computation");
wbTime_start(Copy, "Copying output memory to the CPU");
// TODO: Copy the GPU memory back to the CPU here
hipMemcpy(hostBins, deviceBins, NUM_BINS*sizeof(unsigned int), hipMemcpyDeviceToHost);
CUDA_CHECK(hipDeviceSynchronize());
wbTime_stop(Copy, "Copying output memory to the CPU");
wbTime_start(GPU, "Freeing GPU Memory");
// TODO: Free the GPU memory here
hipFree(deviceBins);
hipFree(deviceInput);
wbTime_stop(GPU, "Freeing GPU Memory");
// Verify correctness
// -----------------------------------------------------
wbSolution(args, hostBins, NUM_BINS);
free(hostBins);
free(hostInput);
#if LAB_DEBUG
system("pause");
#endif
return 0;
}
| 5e90d2db3ec7293dd2095d9c16f5cf80de21ea33.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <wb.h>
#define NUM_BINS 4096
#define CUDA_CHECK(ans) \
{ gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line,
bool abort = true) {
if (code != cudaSuccess) {
fprintf(stderr, "GPUassert: %s %s %d\n", cudaGetErrorString(code),
file, line);
if (abort)
exit(code);
}
}
__global__ void histo_kernel(unsigned int* input, unsigned int* bins, int inputLength){
unsigned int tid = threadIdx.x + blockIdx.x * blockDim.x;
// All threads handle blockDim.x * gridDim.x
// consecutive elements in each iteration
for (unsigned int i = tid; i < inputLength; i += blockDim.x*gridDim.x){
atomicAdd(&(bins[input[i]]), 1);
}
__syncthreads();
}
__global__ void histogram_privatized_kernel(unsigned int* input, unsigned int* bins, unsigned int num_elements, unsigned int num_bins){
unsigned int tid = blockIdx.x*blockDim.x + threadIdx.x;
// privatized bins
extern __shared__ unsigned int histo_s[];
for (unsigned int binIdx = threadIdx.x; binIdx < num_bins; binIdx += blockDim.x){
histo_s[binIdx] = 0;
}
__syncthreads();
// histogram
for (unsigned int i = tid; i < num_elements; i += blockDim.x*gridDim.x){
atomicAdd(&(histo_s[input[i]]), 1);
}
__syncthreads();
// commit to global memory
for (unsigned int binIdx = threadIdx.x; binIdx < num_bins; binIdx += blockDim.x){
atomicAdd(&(bins[binIdx]), histo_s[binIdx]);
}
}
__global__ void postProcess(unsigned int* bins){
int tid = blockIdx.x*blockDim.x + threadIdx.x;
if (tid < NUM_BINS){
if (bins[tid] > 127){
bins[tid] = 127;
}
}
}
int main(int argc, char *argv[]) {
wbArg_t args;
int inputLength;
unsigned int *hostInput;
unsigned int *hostBins;
unsigned int *deviceInput;
unsigned int *deviceBins;
args = wbArg_read(argc, argv);
wbTime_start(Generic, "Importing data and creating memory on host");
hostInput = (unsigned int *)wbImport(wbArg_getInputFile(args, 0),
&inputLength, "Integer");
hostBins = (unsigned int *)malloc(NUM_BINS * sizeof(unsigned int));
wbTime_stop(Generic, "Importing data and creating memory on host");
wbLog(TRACE, "The input length is ", inputLength);
wbLog(TRACE, "The number of bins is ", NUM_BINS);
wbTime_start(GPU, "Allocating GPU memory.");
// TODO: Allocate GPU memory here
cudaMalloc((void **)&deviceInput, inputLength*sizeof(unsigned int));
cudaMalloc((void **)&deviceBins, NUM_BINS*sizeof(unsigned int));
CUDA_CHECK(cudaDeviceSynchronize());
wbTime_stop(GPU, "Allocating GPU memory.");
wbTime_start(GPU, "Copying input memory to the GPU.");
// TODO: Copy memory to the GPU here
cudaMemcpy(deviceInput, hostInput, inputLength*sizeof(unsigned int), cudaMemcpyHostToDevice);
//cudaMemcpy(deviceBins, hostBins, NUM_BINS*sizeof(unsigned int), cudaMemcpyHostToDevice);
CUDA_CHECK(cudaDeviceSynchronize());
wbTime_stop(GPU, "Copying input memory to the GPU.");
// Launch kernel
// ----------------------------------------------------------
wbLog(TRACE, "Launching kernel");
wbTime_start(Compute, "Performing CUDA computation");
// TODO: Perform kernel computation here
dim3 gridDim(ceil(inputLength/32)+1, 1, 1);
dim3 blockDim(32, 1, 1);
histo_kernel << <gridDim, blockDim >> >(deviceInput, deviceBins, inputLength);
//histogram_privatized_kernel << <gridDim, blockDim, NUM_BINS*sizeof(unsigned int) >> >(deviceInput, deviceBins, inputLength, NUM_BINS);
//postProcess << <gridDim, blockDim >> >(deviceBins);
// You should call the following lines after you call the kernel.
CUDA_CHECK(cudaGetLastError());
CUDA_CHECK(cudaDeviceSynchronize());
wbTime_stop(Compute, "Performing CUDA computation");
wbTime_start(Copy, "Copying output memory to the CPU");
// TODO: Copy the GPU memory back to the CPU here
cudaMemcpy(hostBins, deviceBins, NUM_BINS*sizeof(unsigned int), cudaMemcpyDeviceToHost);
CUDA_CHECK(cudaDeviceSynchronize());
wbTime_stop(Copy, "Copying output memory to the CPU");
wbTime_start(GPU, "Freeing GPU Memory");
// TODO: Free the GPU memory here
cudaFree(deviceBins);
cudaFree(deviceInput);
wbTime_stop(GPU, "Freeing GPU Memory");
// Verify correctness
// -----------------------------------------------------
wbSolution(args, hostBins, NUM_BINS);
free(hostBins);
free(hostInput);
#if LAB_DEBUG
system("pause");
#endif
return 0;
}
|
64bd193c3b0a1b9115eec40fc28202c4df5dbf03.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include "hip/hip_runtime.h"
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
#define ceil(a,b) ((a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1)
void check_error (const char* message) {
hipError_t error = hipGetLastError ();
if (error != hipSuccess) {
printf ("CUDA error : %s, %s\n", message, hipGetErrorString (error));
exit(-1);
}
}
__global__ void hypterm_0 (double * __restrict__ flux_in_0, double * __restrict__ flux_in_1, double * __restrict__ flux_in_2, double * __restrict__ flux_in_3, double * __restrict__ cons_in_1, double * __restrict__ cons_in_2, double * __restrict__ cons_in_3, double * __restrict__ q_in_1, double * __restrict__ q_in_2, double * __restrict__ q_in_3, double * __restrict__ q_in_4, double dxinv0, double dxinv1, double dxinv2, int L, int M, int N) {
//Determing the block's indices
int blockdim_i= (int)(blockDim.x);
int i0 = (int)(blockIdx.x)*(blockdim_i);
int i = max (i0, 0) + (int)(threadIdx.x);
int blockdim_j= (int)(blockDim.y);
int j0 = (int)(blockIdx.y)*(blockdim_j);
int j = max (j0, 0) + (int)(threadIdx.y);
int blockdim_k= (int)(blockDim.z);
int k0 = (int)(blockIdx.z)*(blockdim_k);
int k = max (k0, 0) + (int)(threadIdx.z);
double (*flux_0)[308][308] = (double (*)[308][308])flux_in_0;
double (*flux_1)[308][308] = (double (*)[308][308])flux_in_1;
double (*flux_2)[308][308] = (double (*)[308][308])flux_in_2;
double (*flux_3)[308][308] = (double (*)[308][308])flux_in_3;
double (*cons_1)[308][308] = (double (*)[308][308])cons_in_1;
double (*cons_2)[308][308] = (double (*)[308][308])cons_in_2;
double (*cons_3)[308][308] = (double (*)[308][308])cons_in_3;
double (*q_1)[308][308] = (double (*)[308][308])q_in_1;
double (*q_2)[308][308] = (double (*)[308][308])q_in_2;
double (*q_3)[308][308] = (double (*)[308][308])q_in_3;
double (*q_4)[308][308] = (double (*)[308][308])q_in_4;
if (i>=4 & j>=4 & k>=4 & i<=N-5 & j<=N-5 & k<=N-5) {
double _t_1_;
double flux_0kc0jc0ic0;
double _t_2_;
double _t_3_;
double _t_4_;
double _t_7_;
double _t_5_;
double _t_8_;
double _t_9_;
double _t_10_;
double _t_12_;
double flux_1kc0jc0ic0;
double _t_13_;
double _t_14_;
double _t_15_;
double _t_18_;
double _t_16_;
double _t_19_;
double _t_20_;
double _t_21_;
double _t_23_;
double flux_2kc0jc0ic0;
double _t_24_;
double _t_25_;
double _t_26_;
double _t_29_;
double _t_27_;
double _t_30_;
double _t_31_;
double _t_32_;
double _t_34_;
double flux_3kc0jc0ic0;
double _t_35_;
double _t_36_;
double _t_37_;
double _t_40_;
double _t_38_;
double _t_41_;
double _t_42_;
double _t_43_;
_t_1_ = cons_1[k][j][i+1];
_t_1_ -= cons_1[k][j][i-1];
flux_0kc0jc0ic0 = dxinv0 * 0.8 * _t_1_;
_t_2_ = cons_1[k][j][i+2];
_t_2_ -= cons_1[k][j][i-2];
flux_0kc0jc0ic0 -= dxinv0 * 0.2 * _t_2_;
_t_3_ = cons_1[k][j][i+3];
_t_3_ -= cons_1[k][j][i-3];
flux_0kc0jc0ic0 += dxinv0 * 0.038 * _t_3_;
_t_4_ = cons_1[k][j][i+4];
_t_4_ -= cons_1[k][j][i-4];
flux_0kc0jc0ic0 -= dxinv0 * 0.0035 * _t_4_;
_t_7_ = cons_2[k][j+1][i];
_t_7_ -= cons_2[k][j-1][i];
_t_5_ = dxinv1 * 0.8 * _t_7_;
_t_8_ = cons_2[k][j+2][i];
_t_8_ -= cons_2[k][j-2][i];
_t_5_ -= dxinv1 * 0.2 * _t_8_;
_t_9_ = cons_2[k][j+3][i];
_t_9_ -= cons_2[k][j-3][i];
_t_5_ += dxinv1 * 0.038 * _t_9_;
_t_10_ = cons_2[k][j+4][i];
_t_10_ -= cons_2[k][j-4][i];
_t_5_ -= dxinv1 * 0.0035 * _t_10_;
flux_0kc0jc0ic0 -= _t_5_;
flux_0[k][j][i] = flux_0kc0jc0ic0;
_t_12_ = cons_1[k][j][i+1] * q_1[k][j][i+1];
_t_12_ -= cons_1[k][j][i-1] * q_1[k][j][i-1];
_t_12_ += q_4[k][j][i+1];
_t_12_ -= q_4[k][j][i-1];
flux_1kc0jc0ic0 = dxinv0 * 0.8 * _t_12_;
_t_13_ = cons_1[k][j][i+2] * q_1[k][j][i+2];
_t_13_ -= cons_1[k][j][i-2] * q_1[k][j][i-2];
_t_13_ += q_4[k][j][i+2];
_t_13_ -= q_4[k][j][i-2];
flux_1kc0jc0ic0 -= dxinv0 * 0.2 * _t_13_;
_t_14_ = cons_1[k][j][i+3] * q_1[k][j][i+3];
_t_14_ -= cons_1[k][j][i-3] * q_1[k][j][i-3];
_t_14_ += q_4[k][j][i+3];
_t_14_ -= q_4[k][j][i-3];
flux_1kc0jc0ic0 += dxinv0 * 0.038 * _t_14_;
_t_15_ = cons_1[k][j][i+4] * q_1[k][j][i+4];
_t_15_ -= cons_1[k][j][i-4] * q_1[k][j][i-4];
_t_15_ += q_4[k][j][i+4];
_t_15_ -= q_4[k][j][i-4];
flux_1kc0jc0ic0 -= dxinv0 * 0.0035 * _t_15_;
_t_18_ = cons_1[k][j+1][i] * q_2[k][j+1][i];
_t_18_ -= cons_1[k][j-1][i] * q_2[k][j-1][i];
_t_16_ = dxinv1 * 0.8 * _t_18_;
_t_19_ = cons_1[k][j+2][i] * q_2[k][j+2][i];
_t_19_ -= cons_1[k][j-2][i] * q_2[k][j-2][i];
_t_16_ -= dxinv1 * 0.2 * _t_19_;
_t_20_ = cons_1[k][j+3][i] * q_2[k][j+3][i];
_t_20_ -= cons_1[k][j-3][i] * q_2[k][j-3][i];
_t_16_ += dxinv1 * 0.038 * _t_20_;
_t_21_ = cons_1[k][j+4][i] * q_2[k][j+4][i];
_t_21_ -= cons_1[k][j-4][i] * q_2[k][j-4][i];
_t_16_ -= dxinv1 * 0.0035 * _t_21_;
flux_1kc0jc0ic0 -= _t_16_;
flux_1[k][j][i] = flux_1kc0jc0ic0;
_t_23_ = cons_2[k][j][i+1] * q_1[k][j][i+1];
_t_23_ -= cons_2[k][j][i-1] * q_1[k][j][i-1];
flux_2kc0jc0ic0 = dxinv0 * 0.8 * _t_23_;
_t_24_ = cons_2[k][j][i+2] * q_1[k][j][i+2];
_t_24_ -= cons_2[k][j][i-2] * q_1[k][j][i-2];
flux_2kc0jc0ic0 -= dxinv0 * 0.2 * _t_24_;
_t_25_ = cons_2[k][j][i+3] * q_1[k][j][i+3];
_t_25_ -= cons_2[k][j][i-3] * q_1[k][j][i-3];
flux_2kc0jc0ic0 += dxinv0 * 0.038 * _t_25_;
_t_26_ = cons_2[k][j][i+4] * q_1[k][j][i+4];
_t_26_ -= cons_2[k][j][i-4] * q_1[k][j][i-4];
flux_2kc0jc0ic0 -= dxinv0 * 0.0035 * _t_26_;
_t_29_ = cons_2[k][j+1][i] * q_2[k][j+1][i];
_t_29_ -= cons_2[k][j-1][i] * q_2[k][j-1][i];
_t_29_ += q_4[k][j+1][i];
_t_29_ -= q_4[k][j-1][i];
_t_27_ = dxinv1 * 0.8 * _t_29_;
_t_30_ = cons_2[k][j+2][i] * q_2[k][j+2][i];
_t_30_ -= cons_2[k][j-2][i] * q_2[k][j-2][i];
_t_30_ += q_4[k][j+2][i];
_t_30_ -= q_4[k][j-2][i];
_t_27_ -= dxinv1 * 0.2 * _t_30_;
_t_31_ = cons_2[k][j+3][i] * q_2[k][j+3][i];
_t_31_ -= cons_2[k][j-3][i] * q_2[k][j-3][i];
_t_31_ += q_4[k][j+3][i];
_t_31_ -= q_4[k][j-3][i];
_t_27_ += dxinv1 * 0.038 * _t_31_;
_t_32_ = cons_2[k][j+4][i] * q_2[k][j+4][i];
_t_32_ -= cons_2[k][j-4][i] * q_2[k][j-4][i];
_t_32_ += q_4[k][j+4][i];
_t_32_ -= q_4[k][j-4][i];
_t_27_ -= dxinv1 * 0.0035 * _t_32_;
flux_2kc0jc0ic0 -= _t_27_;
flux_2[k][j][i] = flux_2kc0jc0ic0;
_t_34_ = cons_3[k][j][i+1] * q_1[k][j][i+1];
_t_34_ -= cons_3[k][j][i-1] * q_1[k][j][i-1];
flux_3kc0jc0ic0 = dxinv0 * 0.8 * _t_34_;
_t_35_ = cons_3[k][j][i+2] * q_1[k][j][i+2];
_t_35_ -= cons_3[k][j][i-2] * q_1[k][j][i-2];
flux_3kc0jc0ic0 -= dxinv0 * 0.2 * _t_35_;
_t_36_ = cons_3[k][j][i+3] * q_1[k][j][i+3];
_t_36_ -= cons_3[k][j][i-3] * q_1[k][j][i-3];
flux_3kc0jc0ic0 += dxinv0 * 0.038 * _t_36_;
_t_37_ = cons_3[k][j][i+4] * q_1[k][j][i+4];
_t_37_ -= cons_3[k][j][i-4] * q_1[k][j][i-4];
flux_3kc0jc0ic0 -= dxinv0 * 0.0035 * _t_37_;
_t_40_ = cons_3[k][j+1][i] * q_2[k][j+1][i];
_t_40_ -= cons_3[k][j-1][i] * q_2[k][j-1][i];
_t_38_ = dxinv1 * 0.8 * _t_40_;
_t_41_ = cons_3[k][j+2][i] * q_2[k][j+2][i];
_t_41_ -= cons_3[k][j-2][i] * q_2[k][j-2][i];
_t_38_ -= dxinv1 * 0.2 * _t_41_;
_t_42_ = cons_3[k][j+3][i] * q_2[k][j+3][i];
_t_42_ -= cons_3[k][j-3][i] * q_2[k][j-3][i];
_t_38_ += dxinv1 * 0.038 * _t_42_;
_t_43_ = cons_3[k][j+4][i] * q_2[k][j+4][i];
_t_43_ -= cons_3[k][j-4][i] * q_2[k][j-4][i];
_t_38_ -= dxinv1 * 0.0035 * _t_43_;
flux_3kc0jc0ic0 -= _t_38_;
flux_3[k][j][i] = flux_3kc0jc0ic0;
}
}
__global__ void hypterm_1 (double * __restrict__ flux_in_0, double * __restrict__ flux_in_1, double * __restrict__ flux_in_2, double * __restrict__ flux_in_3, double * __restrict__ cons_in_1, double * __restrict__ cons_in_2, double * __restrict__ cons_in_3, double * __restrict__ q_in_1, double * __restrict__ q_in_2, double * __restrict__ q_in_3, double * __restrict__ q_in_4, double dxinv0, double dxinv1, double dxinv2, int L, int M, int N) {
//Determing the block's indices
int blockdim_i= (int)(blockDim.x);
int i0 = (int)(blockIdx.x)*(blockdim_i);
int i = max (i0, 0) + (int)(threadIdx.x);
int blockdim_j= (int)(blockDim.y);
int j0 = (int)(blockIdx.y)*(blockdim_j);
int j = max (j0, 0) + (int)(threadIdx.y);
int blockdim_k= (int)(blockDim.z);
int k0 = (int)(blockIdx.z)*(4*blockdim_k);
int k = max (k0, 0) + (int)(4*threadIdx.z);
double (*flux_0)[308][308] = (double (*)[308][308])flux_in_0;
double (*flux_1)[308][308] = (double (*)[308][308])flux_in_1;
double (*flux_2)[308][308] = (double (*)[308][308])flux_in_2;
double (*flux_3)[308][308] = (double (*)[308][308])flux_in_3;
double (*cons_1)[308][308] = (double (*)[308][308])cons_in_1;
double (*cons_2)[308][308] = (double (*)[308][308])cons_in_2;
double (*cons_3)[308][308] = (double (*)[308][308])cons_in_3;
double (*q_1)[308][308] = (double (*)[308][308])q_in_1;
double (*q_2)[308][308] = (double (*)[308][308])q_in_2;
double (*q_3)[308][308] = (double (*)[308][308])q_in_3;
double (*q_4)[308][308] = (double (*)[308][308])q_in_4;
double flux0_a, flux1_a, flux2_a, flux3_a;
double flux0_b, flux1_b, flux2_b, flux3_b;
double flux0_c, flux1_c, flux2_c, flux3_c;
double flux0_d, flux1_d, flux2_d, flux3_d;
if (i>=4 & j>=4 & k>=4 & i<=N-5 & j<=N-5 & k<=N-5) {
double flux0_a;
double flux_0kc0jc0ic0;
double _t_1_;
double _t_2_;
double _t_3_;
double _t_4_;
double flux0_b;
double flux_0kp1jc0ic0;
double _t_6_;
double _t_7_;
double _t_8_;
double _t_9_;
double flux0_c;
double flux_0kp2jc0ic0;
double _t_11_;
double _t_12_;
double _t_13_;
double _t_14_;
double flux0_d;
double flux_0kp3jc0ic0;
double _t_16_;
double _t_17_;
double _t_18_;
double _t_19_;
double flux1_a;
double flux_1kc0jc0ic0;
double _t_21_;
double _t_22_;
double _t_23_;
double _t_24_;
double flux1_b;
double flux_1kp1jc0ic0;
double _t_26_;
double _t_27_;
double _t_28_;
double _t_29_;
double flux1_c;
double flux_1kp2jc0ic0;
double _t_31_;
double _t_32_;
double _t_33_;
double _t_34_;
double flux1_d;
double flux_1kp3jc0ic0;
double _t_36_;
double _t_37_;
double _t_38_;
double _t_39_;
double flux2_a;
double flux_2kc0jc0ic0;
double _t_41_;
double _t_42_;
double _t_43_;
double _t_44_;
double flux2_b;
double flux_2kp1jc0ic0;
double _t_46_;
double _t_47_;
double _t_48_;
double _t_49_;
double flux2_c;
double flux_2kp2jc0ic0;
double _t_51_;
double _t_52_;
double _t_53_;
double _t_54_;
double flux2_d;
double flux_2kp3jc0ic0;
double _t_56_;
double _t_57_;
double _t_58_;
double _t_59_;
double flux3_a;
double flux_3kc0jc0ic0;
double _t_61_;
double _t_62_;
double _t_63_;
double _t_64_;
double flux3_b;
double flux_3kp1jc0ic0;
double _t_66_;
double _t_67_;
double _t_68_;
double _t_69_;
double flux3_c;
double flux_3kp2jc0ic0;
double _t_71_;
double _t_72_;
double _t_73_;
double _t_74_;
double flux3_d;
double flux_3kp3jc0ic0;
double _t_76_;
double _t_77_;
double _t_78_;
double _t_79_;
flux0_a = flux_0[k][j][i];
flux_0kc0jc0ic0 = flux0_a;
_t_1_ = cons_3[k+1][j][i];
_t_1_ -= cons_3[k-1][j][i];
flux_0kc0jc0ic0 -= dxinv2 * 0.8 * _t_1_;
_t_2_ = cons_3[k+2][j][i];
_t_2_ -= cons_3[k-2][j][i];
flux_0kc0jc0ic0 += dxinv2 * 0.2 * _t_2_;
_t_3_ = cons_3[k+3][j][i];
_t_3_ -= cons_3[k-3][j][i];
flux_0kc0jc0ic0 -= dxinv2 * 0.038 * _t_3_;
_t_4_ = cons_3[k+4][j][i];
_t_4_ -= cons_3[k-4][j][i];
flux_0kc0jc0ic0 += dxinv2 * 0.0035 * _t_4_;
flux_0[k][j][i] = flux_0kc0jc0ic0;
flux0_b = flux_0[k+1][j][i];
flux_0kp1jc0ic0 = flux0_b;
_t_6_ = cons_3[k+2][j][i];
_t_6_ -= cons_3[k][j][i];
flux_0kp1jc0ic0 -= dxinv2 * 0.8 * _t_6_;
_t_7_ = cons_3[k+3][j][i];
_t_7_ -= cons_3[k-1][j][i];
flux_0kp1jc0ic0 += dxinv2 * 0.2 * _t_7_;
_t_8_ = cons_3[k+4][j][i];
_t_8_ -= cons_3[k-2][j][i];
flux_0kp1jc0ic0 -= dxinv2 * 0.038 * _t_8_;
_t_9_ = cons_3[k+5][j][i];
_t_9_ -= cons_3[k-3][j][i];
flux_0kp1jc0ic0 += dxinv2 * 0.0035 * _t_9_;
flux_0[k+1][j][i] = flux_0kp1jc0ic0;
flux0_c = flux_0[k+2][j][i];
flux_0kp2jc0ic0 = flux0_c;
_t_11_ = cons_3[k+3][j][i];
_t_11_ -= cons_3[k+1][j][i];
flux_0kp2jc0ic0 -= dxinv2 * 0.8 * _t_11_;
_t_12_ = cons_3[k+4][j][i];
_t_12_ -= cons_3[k][j][i];
flux_0kp2jc0ic0 += dxinv2 * 0.2 * _t_12_;
_t_13_ = cons_3[k+5][j][i];
_t_13_ -= cons_3[k-1][j][i];
flux_0kp2jc0ic0 -= dxinv2 * 0.038 * _t_13_;
_t_14_ = cons_3[k+6][j][i];
_t_14_ -= cons_3[k-2][j][i];
flux_0kp2jc0ic0 += dxinv2 * 0.0035 * _t_14_;
flux_0[k+2][j][i] = flux_0kp2jc0ic0;
flux0_d = flux_0[k+3][j][i];
flux_0kp3jc0ic0 = flux0_d;
_t_16_ = cons_3[k+4][j][i];
_t_16_ -= cons_3[k+2][j][i];
flux_0kp3jc0ic0 -= dxinv2 * 0.8 * _t_16_;
_t_17_ = cons_3[k+5][j][i];
_t_17_ -= cons_3[k+1][j][i];
flux_0kp3jc0ic0 += dxinv2 * 0.2 * _t_17_;
_t_18_ = cons_3[k+6][j][i];
_t_18_ -= cons_3[k][j][i];
flux_0kp3jc0ic0 -= dxinv2 * 0.038 * _t_18_;
_t_19_ = cons_3[k+7][j][i];
_t_19_ -= cons_3[k-1][j][i];
flux_0kp3jc0ic0 += dxinv2 * 0.0035 * _t_19_;
flux_0[k+3][j][i] = flux_0kp3jc0ic0;
flux1_a = flux_1[k][j][i];
flux_1kc0jc0ic0 = flux1_a;
_t_21_ = cons_1[k+1][j][i] * q_3[k+1][j][i];
_t_21_ -= cons_1[k-1][j][i] * q_3[k-1][j][i];
flux_1kc0jc0ic0 -= dxinv2 * 0.8 * _t_21_;
_t_22_ = cons_1[k+2][j][i] * q_3[k+2][j][i];
_t_22_ -= cons_1[k-2][j][i] * q_3[k-2][j][i];
flux_1kc0jc0ic0 += dxinv2 * 0.2 * _t_22_;
_t_23_ = cons_1[k+3][j][i] * q_3[k+3][j][i];
_t_23_ -= cons_1[k-3][j][i] * q_3[k-3][j][i];
flux_1kc0jc0ic0 -= dxinv2 * 0.038 * _t_23_;
_t_24_ = cons_1[k+4][j][i] * q_3[k+4][j][i];
_t_24_ -= cons_1[k-4][j][i] * q_3[k-4][j][i];
flux_1kc0jc0ic0 += dxinv2 * 0.0035 * _t_24_;
flux_1[k][j][i] = flux_1kc0jc0ic0;
flux1_b = flux_1[k+1][j][i];
flux_1kp1jc0ic0 = flux1_b;
_t_26_ = cons_1[k+2][j][i] * q_3[k+2][j][i];
_t_26_ -= cons_1[k][j][i] * q_3[k][j][i];
flux_1kp1jc0ic0 -= dxinv2 * 0.8 * _t_26_;
_t_27_ = cons_1[k+3][j][i] * q_3[k+3][j][i];
_t_27_ -= cons_1[k-1][j][i] * q_3[k-1][j][i];
flux_1kp1jc0ic0 += dxinv2 * 0.2 * _t_27_;
_t_28_ = cons_1[k+4][j][i] * q_3[k+4][j][i];
_t_28_ -= cons_1[k-2][j][i] * q_3[k-2][j][i];
flux_1kp1jc0ic0 -= dxinv2 * 0.038 * _t_28_;
_t_29_ = cons_1[k+5][j][i] * q_3[k+5][j][i];
_t_29_ -= cons_1[k-3][j][i] * q_3[k-3][j][i];
flux_1kp1jc0ic0 += dxinv2 * 0.0035 * _t_29_;
flux_1[k+1][j][i] = flux_1kp1jc0ic0;
flux1_c = flux_1[k+2][j][i];
flux_1kp2jc0ic0 = flux1_c;
_t_31_ = cons_1[k+3][j][i] * q_3[k+3][j][i];
_t_31_ -= cons_1[k+1][j][i] * q_3[k+1][j][i];
flux_1kp2jc0ic0 -= dxinv2 * 0.8 * _t_31_;
_t_32_ = cons_1[k+4][j][i] * q_3[k+4][j][i];
_t_32_ -= cons_1[k][j][i] * q_3[k][j][i];
flux_1kp2jc0ic0 += dxinv2 * 0.2 * _t_32_;
_t_33_ = cons_1[k+5][j][i] * q_3[k+5][j][i];
_t_33_ -= cons_1[k-1][j][i] * q_3[k-1][j][i];
flux_1kp2jc0ic0 -= dxinv2 * 0.038 * _t_33_;
_t_34_ = cons_1[k+6][j][i] * q_3[k+6][j][i];
_t_34_ -= cons_1[k-2][j][i] * q_3[k-2][j][i];
flux_1kp2jc0ic0 += dxinv2 * 0.0035 * _t_34_;
flux_1[k+2][j][i] = flux_1kp2jc0ic0;
flux1_d = flux_1[k+3][j][i];
flux_1kp3jc0ic0 = flux1_d;
_t_36_ = cons_1[k+4][j][i] * q_3[k+4][j][i];
_t_36_ -= cons_1[k+2][j][i] * q_3[k+2][j][i];
flux_1kp3jc0ic0 -= dxinv2 * 0.8 * _t_36_;
_t_37_ = cons_1[k+5][j][i] * q_3[k+5][j][i];
_t_37_ -= cons_1[k+1][j][i] * q_3[k+1][j][i];
flux_1kp3jc0ic0 += dxinv2 * 0.2 * _t_37_;
_t_38_ = cons_1[k+6][j][i] * q_3[k+6][j][i];
_t_38_ -= cons_1[k][j][i] * q_3[k][j][i];
flux_1kp3jc0ic0 -= dxinv2 * 0.038 * _t_38_;
_t_39_ = cons_1[k+7][j][i] * q_3[k+7][j][i];
_t_39_ -= cons_1[k-1][j][i] * q_3[k-1][j][i];
flux_1kp3jc0ic0 += dxinv2 * 0.0035 * _t_39_;
flux_1[k+3][j][i] = flux_1kp3jc0ic0;
flux2_a = flux_2[k][j][i];
flux_2kc0jc0ic0 = flux2_a;
_t_41_ = cons_2[k+1][j][i] * q_3[k+1][j][i];
_t_41_ -= cons_2[k-1][j][i] * q_3[k-1][j][i];
flux_2kc0jc0ic0 -= dxinv2 * 0.8 * _t_41_;
_t_42_ = cons_2[k+2][j][i] * q_3[k+2][j][i];
_t_42_ -= cons_2[k-2][j][i] * q_3[k-2][j][i];
flux_2kc0jc0ic0 += dxinv2 * 0.2 * _t_42_;
_t_43_ = cons_2[k+3][j][i] * q_3[k+3][j][i];
_t_43_ -= cons_2[k-3][j][i] * q_3[k-3][j][i];
flux_2kc0jc0ic0 -= dxinv2 * 0.038 * _t_43_;
_t_44_ = cons_2[k+4][j][i] * q_3[k+4][j][i];
_t_44_ -= cons_2[k-4][j][i] * q_3[k-4][j][i];
flux_2kc0jc0ic0 += dxinv2 * 0.0035 * _t_44_;
flux_2[k][j][i] = flux_2kc0jc0ic0;
flux2_b = flux_2[k+1][j][i];
flux_2kp1jc0ic0 = flux2_b;
_t_46_ = cons_2[k+2][j][i] * q_3[k+2][j][i];
_t_46_ -= cons_2[k][j][i] * q_3[k][j][i];
flux_2kp1jc0ic0 -= dxinv2 * 0.8 * _t_46_;
_t_47_ = cons_2[k+3][j][i] * q_3[k+3][j][i];
_t_47_ -= cons_2[k-1][j][i] * q_3[k-1][j][i];
flux_2kp1jc0ic0 += dxinv2 * 0.2 * _t_47_;
_t_48_ = cons_2[k+4][j][i] * q_3[k+4][j][i];
_t_48_ -= cons_2[k-2][j][i] * q_3[k-2][j][i];
flux_2kp1jc0ic0 -= dxinv2 * 0.038 * _t_48_;
_t_49_ = cons_2[k+5][j][i] * q_3[k+5][j][i];
_t_49_ -= cons_2[k-3][j][i] * q_3[k-3][j][i];
flux_2kp1jc0ic0 += dxinv2 * 0.0035 * _t_49_;
flux_2[k+1][j][i] = flux_2kp1jc0ic0;
flux2_c = flux_2[k+2][j][i];
flux_2kp2jc0ic0 = flux2_c;
_t_51_ = cons_2[k+3][j][i] * q_3[k+3][j][i];
_t_51_ -= cons_2[k+1][j][i] * q_3[k+1][j][i];
flux_2kp2jc0ic0 -= dxinv2 * 0.8 * _t_51_;
_t_52_ = cons_2[k+4][j][i] * q_3[k+4][j][i];
_t_52_ -= cons_2[k][j][i] * q_3[k][j][i];
flux_2kp2jc0ic0 += dxinv2 * 0.2 * _t_52_;
_t_53_ = cons_2[k+5][j][i] * q_3[k+5][j][i];
_t_53_ -= cons_2[k-1][j][i] * q_3[k-1][j][i];
flux_2kp2jc0ic0 -= dxinv2 * 0.038 * _t_53_;
_t_54_ = cons_2[k+6][j][i] * q_3[k+6][j][i];
_t_54_ -= cons_2[k-2][j][i] * q_3[k-2][j][i];
flux_2kp2jc0ic0 += dxinv2 * 0.0035 * _t_54_;
flux_2[k+2][j][i] = flux_2kp2jc0ic0;
flux2_d = flux_2[k+3][j][i];
flux_2kp3jc0ic0 = flux2_d;
_t_56_ = cons_2[k+4][j][i] * q_3[k+4][j][i];
_t_56_ -= cons_2[k+2][j][i] * q_3[k+2][j][i];
flux_2kp3jc0ic0 -= dxinv2 * 0.8 * _t_56_;
_t_57_ = cons_2[k+5][j][i] * q_3[k+5][j][i];
_t_57_ -= cons_2[k+1][j][i] * q_3[k+1][j][i];
flux_2kp3jc0ic0 += dxinv2 * 0.2 * _t_57_;
_t_58_ = cons_2[k+6][j][i] * q_3[k+6][j][i];
_t_58_ -= cons_2[k][j][i] * q_3[k][j][i];
flux_2kp3jc0ic0 -= dxinv2 * 0.038 * _t_58_;
_t_59_ = cons_2[k+7][j][i] * q_3[k+7][j][i];
_t_59_ -= cons_2[k-1][j][i] * q_3[k-1][j][i];
flux_2kp3jc0ic0 += dxinv2 * 0.0035 * _t_59_;
flux_2[k+3][j][i] = flux_2kp3jc0ic0;
flux3_a = flux_3[k][j][i];
flux_3kc0jc0ic0 = flux3_a;
_t_61_ = cons_3[k+1][j][i] * q_3[k+1][j][i];
_t_61_ -= cons_3[k-1][j][i] * q_3[k-1][j][i];
_t_61_ += q_4[k+1][j][i];
_t_61_ -= q_4[k-1][j][i];
flux_3kc0jc0ic0 -= dxinv2 * 0.8 * _t_61_;
_t_62_ = cons_3[k+2][j][i] * q_3[k+2][j][i];
_t_62_ -= cons_3[k-2][j][i] * q_3[k-2][j][i];
_t_62_ += q_4[k+2][j][i];
_t_62_ -= q_4[k-2][j][i];
flux_3kc0jc0ic0 += dxinv2 * 0.2 * _t_62_;
_t_63_ = cons_3[k+3][j][i] * q_3[k+3][j][i];
_t_63_ -= cons_3[k-3][j][i] * q_3[k-3][j][i];
_t_63_ += q_4[k+3][j][i];
_t_63_ -= q_4[k-3][j][i];
flux_3kc0jc0ic0 -= dxinv2 * 0.038 * _t_63_;
_t_64_ = cons_3[k+4][j][i] * q_3[k+4][j][i];
_t_64_ -= cons_3[k-4][j][i] * q_3[k-4][j][i];
_t_64_ += q_4[k+4][j][i];
_t_64_ -= q_4[k-4][j][i];
flux_3kc0jc0ic0 += dxinv2 * 0.0035 * _t_64_;
flux_3[k][j][i] = flux_3kc0jc0ic0;
flux3_b = flux_3[k+1][j][i];
flux_3kp1jc0ic0 = flux3_b;
_t_66_ = cons_3[k+2][j][i] * q_3[k+2][j][i];
_t_66_ -= cons_3[k][j][i] * q_3[k][j][i];
_t_66_ += q_4[k+2][j][i];
_t_66_ -= q_4[k][j][i];
flux_3kp1jc0ic0 -= dxinv2 * 0.8 * _t_66_;
_t_67_ = cons_3[k+3][j][i] * q_3[k+3][j][i];
_t_67_ -= cons_3[k-1][j][i] * q_3[k-1][j][i];
_t_67_ += q_4[k+3][j][i];
_t_67_ -= q_4[k-1][j][i];
flux_3kp1jc0ic0 += dxinv2 * 0.2 * _t_67_;
_t_68_ = cons_3[k+4][j][i] * q_3[k+4][j][i];
_t_68_ -= cons_3[k-2][j][i] * q_3[k-2][j][i];
_t_68_ += q_4[k+4][j][i];
_t_68_ -= q_4[k-2][j][i];
flux_3kp1jc0ic0 -= dxinv2 * 0.038 * _t_68_;
_t_69_ = cons_3[k+5][j][i] * q_3[k+5][j][i];
_t_69_ -= cons_3[k-3][j][i] * q_3[k-3][j][i];
_t_69_ += q_4[k+5][j][i];
_t_69_ -= q_4[k-3][j][i];
flux_3kp1jc0ic0 += dxinv2 * 0.0035 * _t_69_;
flux_3[k+1][j][i] = flux_3kp1jc0ic0;
flux3_c = flux_3[k+2][j][i];
flux_3kp2jc0ic0 = flux3_c;
_t_71_ = cons_3[k+3][j][i] * q_3[k+3][j][i];
_t_71_ -= cons_3[k+1][j][i] * q_3[k+1][j][i];
_t_71_ += q_4[k+3][j][i];
_t_71_ -= q_4[k+1][j][i];
flux_3kp2jc0ic0 -= dxinv2 * 0.8 * _t_71_;
_t_72_ = cons_3[k+4][j][i] * q_3[k+4][j][i];
_t_72_ -= cons_3[k][j][i] * q_3[k][j][i];
_t_72_ += q_4[k+4][j][i];
_t_72_ -= q_4[k][j][i];
flux_3kp2jc0ic0 += dxinv2 * 0.2 * _t_72_;
_t_73_ = cons_3[k+5][j][i] * q_3[k+5][j][i];
_t_73_ -= cons_3[k-1][j][i] * q_3[k-1][j][i];
_t_73_ += q_4[k+5][j][i];
_t_73_ -= q_4[k-1][j][i];
flux_3kp2jc0ic0 -= dxinv2 * 0.038 * _t_73_;
_t_74_ = cons_3[k+6][j][i] * q_3[k+6][j][i];
_t_74_ -= cons_3[k-2][j][i] * q_3[k-2][j][i];
_t_74_ += q_4[k+6][j][i];
_t_74_ -= q_4[k-2][j][i];
flux_3kp2jc0ic0 += dxinv2 * 0.0035 * _t_74_;
flux_3[k+2][j][i] = flux_3kp2jc0ic0;
flux3_d = flux_3[k+3][j][i];
flux_3kp3jc0ic0 = flux3_d;
_t_76_ = cons_3[k+4][j][i] * q_3[k+4][j][i];
_t_76_ -= cons_3[k+2][j][i] * q_3[k+2][j][i];
_t_76_ += q_4[k+4][j][i];
_t_76_ -= q_4[k+2][j][i];
flux_3kp3jc0ic0 -= dxinv2 * 0.8 * _t_76_;
_t_77_ = cons_3[k+5][j][i] * q_3[k+5][j][i];
_t_77_ -= cons_3[k+1][j][i] * q_3[k+1][j][i];
_t_77_ += q_4[k+5][j][i];
_t_77_ -= q_4[k+1][j][i];
flux_3kp3jc0ic0 += dxinv2 * 0.2 * _t_77_;
_t_78_ = cons_3[k+6][j][i] * q_3[k+6][j][i];
_t_78_ -= cons_3[k][j][i] * q_3[k][j][i];
_t_78_ += q_4[k+6][j][i];
_t_78_ -= q_4[k][j][i];
flux_3kp3jc0ic0 -= dxinv2 * 0.038 * _t_78_;
_t_79_ = cons_3[k+7][j][i] * q_3[k+7][j][i];
_t_79_ -= cons_3[k-1][j][i] * q_3[k-1][j][i];
_t_79_ += q_4[k+7][j][i];
_t_79_ -= q_4[k-1][j][i];
flux_3kp3jc0ic0 += dxinv2 * 0.0035 * _t_79_;
flux_3[k+3][j][i] = flux_3kp3jc0ic0;
}
}
__global__ void hypterm_2 (double * __restrict__ flux_in_4, double * __restrict__ cons_in_4, double * __restrict__ q_in_1, double * __restrict__ q_in_2, double * __restrict__ q_in_3, double * __restrict__ q_in_4, double dxinv0, double dxinv1, double dxinv2, int L, int M, int N) {
//Determing the block's indices
int blockdim_i= (int)(blockDim.x);
int i0 = (int)(blockIdx.x)*(blockdim_i);
int i = max (i0, 0) + (int)(threadIdx.x);
int blockdim_j= (int)(blockDim.y);
int j0 = (int)(blockIdx.y)*(blockdim_j);
int j = max (j0, 0) + (int)(threadIdx.y);
int blockdim_k= (int)(blockDim.z);
int k0 = (int)(blockIdx.z)*(2*blockdim_k);
int k = max (k0, 0) + (int)(2*threadIdx.z);
double (*flux_4)[308][308] = (double (*)[308][308])flux_in_4;
double (*q_1)[308][308] = (double (*)[308][308])q_in_1;
double (*q_2)[308][308] = (double (*)[308][308])q_in_2;
double (*q_3)[308][308] = (double (*)[308][308])q_in_3;
double (*q_4)[308][308] = (double (*)[308][308])q_in_4;
double (*cons_4)[308][308] = (double (*)[308][308])cons_in_4;
if (i>=4 & j>=4 & k>=4 & i<=N-5 & j<=N-5 & k<=N-5) {
flux_4[k][j][i] = ((0.8*(cons_4[k][j][i+1]*q_1[k][j][i+1]-cons_4[k][j][i-1]*q_1[k][j][i-1]+(q_4[k][j][i+1]*q_1[k][j][i+1]-q_4[k][j][i-1]*q_1[k][j][i-1]))-0.2*(cons_4[k][j][i+2]*q_1[k][j][i+2]-cons_4[k][j][i-2]*q_1[k][j][i-2]+(q_4[k][j][i+2]*q_1[k][j][i+2]-q_4[k][j][i-2]*q_1[k][j][i-2]))+0.038*(cons_4[k][j][i+3]*q_1[k][j][i+3]-cons_4[k][j][i-3]*q_1[k][j][i-3]+(q_4[k][j][i+3]*q_1[k][j][i+3]-q_4[k][j][i-3]*q_1[k][j][i-3]))-0.0035*(cons_4[k][j][i+4]*q_1[k][j][i+4]-cons_4[k][j][i-4]*q_1[k][j][i-4]+(q_4[k][j][i+4]*q_1[k][j][i+4]-q_4[k][j][i-4]*q_1[k][j][i-4])))*dxinv0);
flux_4[k+1][j][i] = ((0.8*(cons_4[k+1][j][i+1]*q_1[k+1][j][i+1]-cons_4[k+1][j][i-1]*q_1[k+1][j][i-1]+(q_4[k+1][j][i+1]*q_1[k+1][j][i+1]-q_4[k+1][j][i-1]*q_1[k+1][j][i-1]))-0.2*(cons_4[k+1][j][i+2]*q_1[k+1][j][i+2]-cons_4[k+1][j][i-2]*q_1[k+1][j][i-2]+(q_4[k+1][j][i+2]*q_1[k+1][j][i+2]-q_4[k+1][j][i-2]*q_1[k+1][j][i-2]))+0.038*(cons_4[k+1][j][i+3]*q_1[k+1][j][i+3]-cons_4[k+1][j][i-3]*q_1[k+1][j][i-3]+(q_4[k+1][j][i+3]*q_1[k+1][j][i+3]-q_4[k+1][j][i-3]*q_1[k+1][j][i-3]))-0.0035*(cons_4[k+1][j][i+4]*q_1[k+1][j][i+4]-cons_4[k+1][j][i-4]*q_1[k+1][j][i-4]+(q_4[k+1][j][i+4]*q_1[k+1][j][i+4]-q_4[k+1][j][i-4]*q_1[k+1][j][i-4])))*dxinv0);
flux_4[k][j][i] -= (0.8*(cons_4[k][j+1][i]*q_2[k][j+1][i]-cons_4[k][j-1][i]*q_2[k][j-1][i]+(q_4[k][j+1][i]*q_2[k][j+1][i]-q_4[k][j-1][i]*q_2[k][j-1][i]))-0.2*(cons_4[k][j+2][i]*q_2[k][j+2][i]-cons_4[k][j-2][i]*q_2[k][j-2][i]+(q_4[k][j+2][i]*q_2[k][j+2][i]-q_4[k][j-2][i]*q_2[k][j-2][i]))+0.038*(cons_4[k][j+3][i]*q_2[k][j+3][i]-cons_4[k][j-3][i]*q_2[k][j-3][i]+(q_4[k][j+3][i]*q_2[k][j+3][i]-q_4[k][j-3][i]*q_2[k][j-3][i]))-0.0035*(cons_4[k][j+4][i]*q_2[k][j+4][i]-cons_4[k][j-4][i]*q_2[k][j-4][i]+(q_4[k][j+4][i]*q_2[k][j+4][i]-q_4[k][j-4][i]*q_2[k][j-4][i])))*dxinv1;
flux_4[k+1][j][i] -= (0.8*(cons_4[k+1][j+1][i]*q_2[k+1][j+1][i]-cons_4[k+1][j-1][i]*q_2[k+1][j-1][i]+(q_4[k+1][j+1][i]*q_2[k+1][j+1][i]-q_4[k+1][j-1][i]*q_2[k+1][j-1][i]))-0.2*(cons_4[k+1][j+2][i]*q_2[k+1][j+2][i]-cons_4[k+1][j-2][i]*q_2[k+1][j-2][i]+(q_4[k+1][j+2][i]*q_2[k+1][j+2][i]-q_4[k+1][j-2][i]*q_2[k+1][j-2][i]))+0.038*(cons_4[k+1][j+3][i]*q_2[k+1][j+3][i]-cons_4[k+1][j-3][i]*q_2[k+1][j-3][i]+(q_4[k+1][j+3][i]*q_2[k+1][j+3][i]-q_4[k+1][j-3][i]*q_2[k+1][j-3][i]))-0.0035*(cons_4[k+1][j+4][i]*q_2[k+1][j+4][i]-cons_4[k+1][j-4][i]*q_2[k+1][j-4][i]+(q_4[k+1][j+4][i]*q_2[k+1][j+4][i]-q_4[k+1][j-4][i]*q_2[k+1][j-4][i])))*dxinv1;
flux_4[k][j][i] -= (0.8*(cons_4[k+1][j][i]*q_3[k+1][j][i]-cons_4[k-1][j][i]*q_3[k-1][j][i]+(q_4[k+1][j][i]*q_3[k+1][j][i]-q_4[k-1][j][i]*q_3[k-1][j][i]))-0.2*(cons_4[k+2][j][i]*q_3[k+2][j][i]-cons_4[k-2][j][i]*q_3[k-2][j][i]+(q_4[k+2][j][i]*q_3[k+2][j][i]-q_4[k-2][j][i]*q_3[k-2][j][i]))+0.038*(cons_4[k+3][j][i]*q_3[k+3][j][i]-cons_4[k-3][j][i]*q_3[k-3][j][i]+(q_4[k+3][j][i]*q_3[k+3][j][i]-q_4[k-3][j][i]*q_3[k-3][j][i]))-0.0035*(cons_4[k+4][j][i]*q_3[k+4][j][i]-cons_4[k-4][j][i]*q_3[k-4][j][i]+(q_4[k+4][j][i]*q_3[k+4][j][i]-q_4[k-4][j][i]*q_3[k-4][j][i])))*dxinv2;
flux_4[k+1][j][i] -= (0.8*(cons_4[k+1+1][j][i]*q_3[k+1+1][j][i]-cons_4[k+1-1][j][i]*q_3[k+1-1][j][i]+(q_4[k+1+1][j][i]*q_3[k+1+1][j][i]-q_4[k+1-1][j][i]*q_3[k+1-1][j][i]))-0.2*(cons_4[k+1+2][j][i]*q_3[k+1+2][j][i]-cons_4[k+1-2][j][i]*q_3[k+1-2][j][i]+(q_4[k+1+2][j][i]*q_3[k+1+2][j][i]-q_4[k+1-2][j][i]*q_3[k+1-2][j][i]))+0.038*(cons_4[k+1+3][j][i]*q_3[k+1+3][j][i]-cons_4[k+1-3][j][i]*q_3[k+1-3][j][i]+(q_4[k+1+3][j][i]*q_3[k+1+3][j][i]-q_4[k+1-3][j][i]*q_3[k+1-3][j][i]))-0.0035*(cons_4[k+1+4][j][i]*q_3[k+1+4][j][i]-cons_4[k+1-4][j][i]*q_3[k+1-4][j][i]+(q_4[k+1+4][j][i]*q_3[k+1+4][j][i]-q_4[k+1-4][j][i]*q_3[k+1-4][j][i])))*dxinv2;
}
}
extern "C" void host_code (double *h_flux_0, double *h_flux_1, double *h_flux_2, double *h_flux_3, double *h_flux_4, double *h_cons_1, double *h_cons_2, double *h_cons_3, double *h_cons_4, double *h_q_1, double *h_q_2, double *h_q_3, double *h_q_4, double dxinv0, double dxinv1, double dxinv2, int L, int M, int N) {
double *flux_0;
hipMalloc (&flux_0, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for flux_0\n");
hipMemcpy (flux_0, h_flux_0, sizeof(double)*L*M*N, hipMemcpyHostToDevice);
double *flux_1;
hipMalloc (&flux_1, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for flux_1\n");
hipMemcpy (flux_1, h_flux_1, sizeof(double)*L*M*N, hipMemcpyHostToDevice);
double *flux_2;
hipMalloc (&flux_2, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for flux_2\n");
hipMemcpy (flux_2, h_flux_2, sizeof(double)*L*M*N, hipMemcpyHostToDevice);
double *flux_3;
hipMalloc (&flux_3, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for flux_3\n");
hipMemcpy (flux_3, h_flux_3, sizeof(double)*L*M*N, hipMemcpyHostToDevice);
double *flux_4;
hipMalloc (&flux_4, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for flux_4\n");
hipMemcpy (flux_4, h_flux_4, sizeof(double)*L*M*N, hipMemcpyHostToDevice);
double *cons_1;
hipMalloc (&cons_1, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for cons_1\n");
hipMemcpy (cons_1, h_cons_1, sizeof(double)*L*M*N, hipMemcpyHostToDevice);
double *cons_2;
hipMalloc (&cons_2, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for cons_2\n");
hipMemcpy (cons_2, h_cons_2, sizeof(double)*L*M*N, hipMemcpyHostToDevice);
double *cons_3;
hipMalloc (&cons_3, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for cons_3\n");
hipMemcpy (cons_3, h_cons_3, sizeof(double)*L*M*N, hipMemcpyHostToDevice);
double *cons_4;
hipMalloc (&cons_4, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for cons_4\n");
hipMemcpy (cons_4, h_cons_4, sizeof(double)*L*M*N, hipMemcpyHostToDevice);
double *q_1;
hipMalloc (&q_1, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for q_1\n");
hipMemcpy (q_1, h_q_1, sizeof(double)*L*M*N, hipMemcpyHostToDevice);
double *q_2;
hipMalloc (&q_2, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for q_2\n");
hipMemcpy (q_2, h_q_2, sizeof(double)*L*M*N, hipMemcpyHostToDevice);
double *q_3;
hipMalloc (&q_3, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for q_3\n");
hipMemcpy (q_3, h_q_3, sizeof(double)*L*M*N, hipMemcpyHostToDevice);
double *q_4;
hipMalloc (&q_4, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for q_4\n");
hipMemcpy (q_4, h_q_4, sizeof(double)*L*M*N, hipMemcpyHostToDevice);
dim3 blockconfig (16, 4, 4);
dim3 gridconfig_0 (ceil(N, blockconfig.x), ceil(M, blockconfig.y), ceil(L, blockconfig.z));
hipLaunchKernelGGL(( hypterm_0) , dim3(gridconfig_0), dim3(blockconfig), 0, 0, flux_0, flux_1, flux_2, flux_3, cons_1, cons_2, cons_3, q_1, q_2, q_3, q_4, -dxinv0, dxinv1, dxinv2, L, M, N);
dim3 gridconfig_1 (ceil(N, blockconfig.x), ceil(M, blockconfig.y), ceil(L, 4*blockconfig.z));
hipLaunchKernelGGL(( hypterm_1) , dim3(gridconfig_1), dim3(blockconfig), 0, 0, flux_0, flux_1, flux_2, flux_3, cons_1, cons_2, cons_3, q_1, q_2, q_3, q_4, -dxinv0, dxinv1, dxinv2, L, M, N);
dim3 gridconfig_2 (ceil(N, blockconfig.x), ceil(M, blockconfig.y), ceil(L, 2*blockconfig.z));
hipLaunchKernelGGL(( hypterm_2) , dim3(gridconfig_2), dim3(blockconfig), 0, 0, flux_4, cons_4, q_1, q_2, q_3, q_4, -dxinv0, dxinv1, dxinv2, L, M, N);
hipMemcpy (h_flux_0, flux_0, sizeof(double)*L*M*N, hipMemcpyDeviceToHost);
hipMemcpy (h_flux_1, flux_1, sizeof(double)*L*M*N, hipMemcpyDeviceToHost);
hipMemcpy (h_flux_3, flux_3, sizeof(double)*L*M*N, hipMemcpyDeviceToHost);
hipMemcpy (h_flux_4, flux_4, sizeof(double)*L*M*N, hipMemcpyDeviceToHost);
hipMemcpy (h_flux_2, flux_2, sizeof(double)*L*M*N, hipMemcpyDeviceToHost);
}
| 64bd193c3b0a1b9115eec40fc28202c4df5dbf03.cu | #include <stdio.h>
#include "cuda.h"
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
#define ceil(a,b) ((a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1)
void check_error (const char* message) {
cudaError_t error = cudaGetLastError ();
if (error != cudaSuccess) {
printf ("CUDA error : %s, %s\n", message, cudaGetErrorString (error));
exit(-1);
}
}
__global__ void hypterm_0 (double * __restrict__ flux_in_0, double * __restrict__ flux_in_1, double * __restrict__ flux_in_2, double * __restrict__ flux_in_3, double * __restrict__ cons_in_1, double * __restrict__ cons_in_2, double * __restrict__ cons_in_3, double * __restrict__ q_in_1, double * __restrict__ q_in_2, double * __restrict__ q_in_3, double * __restrict__ q_in_4, double dxinv0, double dxinv1, double dxinv2, int L, int M, int N) {
//Determing the block's indices
int blockdim_i= (int)(blockDim.x);
int i0 = (int)(blockIdx.x)*(blockdim_i);
int i = max (i0, 0) + (int)(threadIdx.x);
int blockdim_j= (int)(blockDim.y);
int j0 = (int)(blockIdx.y)*(blockdim_j);
int j = max (j0, 0) + (int)(threadIdx.y);
int blockdim_k= (int)(blockDim.z);
int k0 = (int)(blockIdx.z)*(blockdim_k);
int k = max (k0, 0) + (int)(threadIdx.z);
double (*flux_0)[308][308] = (double (*)[308][308])flux_in_0;
double (*flux_1)[308][308] = (double (*)[308][308])flux_in_1;
double (*flux_2)[308][308] = (double (*)[308][308])flux_in_2;
double (*flux_3)[308][308] = (double (*)[308][308])flux_in_3;
double (*cons_1)[308][308] = (double (*)[308][308])cons_in_1;
double (*cons_2)[308][308] = (double (*)[308][308])cons_in_2;
double (*cons_3)[308][308] = (double (*)[308][308])cons_in_3;
double (*q_1)[308][308] = (double (*)[308][308])q_in_1;
double (*q_2)[308][308] = (double (*)[308][308])q_in_2;
double (*q_3)[308][308] = (double (*)[308][308])q_in_3;
double (*q_4)[308][308] = (double (*)[308][308])q_in_4;
if (i>=4 & j>=4 & k>=4 & i<=N-5 & j<=N-5 & k<=N-5) {
double _t_1_;
double flux_0kc0jc0ic0;
double _t_2_;
double _t_3_;
double _t_4_;
double _t_7_;
double _t_5_;
double _t_8_;
double _t_9_;
double _t_10_;
double _t_12_;
double flux_1kc0jc0ic0;
double _t_13_;
double _t_14_;
double _t_15_;
double _t_18_;
double _t_16_;
double _t_19_;
double _t_20_;
double _t_21_;
double _t_23_;
double flux_2kc0jc0ic0;
double _t_24_;
double _t_25_;
double _t_26_;
double _t_29_;
double _t_27_;
double _t_30_;
double _t_31_;
double _t_32_;
double _t_34_;
double flux_3kc0jc0ic0;
double _t_35_;
double _t_36_;
double _t_37_;
double _t_40_;
double _t_38_;
double _t_41_;
double _t_42_;
double _t_43_;
_t_1_ = cons_1[k][j][i+1];
_t_1_ -= cons_1[k][j][i-1];
flux_0kc0jc0ic0 = dxinv0 * 0.8 * _t_1_;
_t_2_ = cons_1[k][j][i+2];
_t_2_ -= cons_1[k][j][i-2];
flux_0kc0jc0ic0 -= dxinv0 * 0.2 * _t_2_;
_t_3_ = cons_1[k][j][i+3];
_t_3_ -= cons_1[k][j][i-3];
flux_0kc0jc0ic0 += dxinv0 * 0.038 * _t_3_;
_t_4_ = cons_1[k][j][i+4];
_t_4_ -= cons_1[k][j][i-4];
flux_0kc0jc0ic0 -= dxinv0 * 0.0035 * _t_4_;
_t_7_ = cons_2[k][j+1][i];
_t_7_ -= cons_2[k][j-1][i];
_t_5_ = dxinv1 * 0.8 * _t_7_;
_t_8_ = cons_2[k][j+2][i];
_t_8_ -= cons_2[k][j-2][i];
_t_5_ -= dxinv1 * 0.2 * _t_8_;
_t_9_ = cons_2[k][j+3][i];
_t_9_ -= cons_2[k][j-3][i];
_t_5_ += dxinv1 * 0.038 * _t_9_;
_t_10_ = cons_2[k][j+4][i];
_t_10_ -= cons_2[k][j-4][i];
_t_5_ -= dxinv1 * 0.0035 * _t_10_;
flux_0kc0jc0ic0 -= _t_5_;
flux_0[k][j][i] = flux_0kc0jc0ic0;
_t_12_ = cons_1[k][j][i+1] * q_1[k][j][i+1];
_t_12_ -= cons_1[k][j][i-1] * q_1[k][j][i-1];
_t_12_ += q_4[k][j][i+1];
_t_12_ -= q_4[k][j][i-1];
flux_1kc0jc0ic0 = dxinv0 * 0.8 * _t_12_;
_t_13_ = cons_1[k][j][i+2] * q_1[k][j][i+2];
_t_13_ -= cons_1[k][j][i-2] * q_1[k][j][i-2];
_t_13_ += q_4[k][j][i+2];
_t_13_ -= q_4[k][j][i-2];
flux_1kc0jc0ic0 -= dxinv0 * 0.2 * _t_13_;
_t_14_ = cons_1[k][j][i+3] * q_1[k][j][i+3];
_t_14_ -= cons_1[k][j][i-3] * q_1[k][j][i-3];
_t_14_ += q_4[k][j][i+3];
_t_14_ -= q_4[k][j][i-3];
flux_1kc0jc0ic0 += dxinv0 * 0.038 * _t_14_;
_t_15_ = cons_1[k][j][i+4] * q_1[k][j][i+4];
_t_15_ -= cons_1[k][j][i-4] * q_1[k][j][i-4];
_t_15_ += q_4[k][j][i+4];
_t_15_ -= q_4[k][j][i-4];
flux_1kc0jc0ic0 -= dxinv0 * 0.0035 * _t_15_;
_t_18_ = cons_1[k][j+1][i] * q_2[k][j+1][i];
_t_18_ -= cons_1[k][j-1][i] * q_2[k][j-1][i];
_t_16_ = dxinv1 * 0.8 * _t_18_;
_t_19_ = cons_1[k][j+2][i] * q_2[k][j+2][i];
_t_19_ -= cons_1[k][j-2][i] * q_2[k][j-2][i];
_t_16_ -= dxinv1 * 0.2 * _t_19_;
_t_20_ = cons_1[k][j+3][i] * q_2[k][j+3][i];
_t_20_ -= cons_1[k][j-3][i] * q_2[k][j-3][i];
_t_16_ += dxinv1 * 0.038 * _t_20_;
_t_21_ = cons_1[k][j+4][i] * q_2[k][j+4][i];
_t_21_ -= cons_1[k][j-4][i] * q_2[k][j-4][i];
_t_16_ -= dxinv1 * 0.0035 * _t_21_;
flux_1kc0jc0ic0 -= _t_16_;
flux_1[k][j][i] = flux_1kc0jc0ic0;
_t_23_ = cons_2[k][j][i+1] * q_1[k][j][i+1];
_t_23_ -= cons_2[k][j][i-1] * q_1[k][j][i-1];
flux_2kc0jc0ic0 = dxinv0 * 0.8 * _t_23_;
_t_24_ = cons_2[k][j][i+2] * q_1[k][j][i+2];
_t_24_ -= cons_2[k][j][i-2] * q_1[k][j][i-2];
flux_2kc0jc0ic0 -= dxinv0 * 0.2 * _t_24_;
_t_25_ = cons_2[k][j][i+3] * q_1[k][j][i+3];
_t_25_ -= cons_2[k][j][i-3] * q_1[k][j][i-3];
flux_2kc0jc0ic0 += dxinv0 * 0.038 * _t_25_;
_t_26_ = cons_2[k][j][i+4] * q_1[k][j][i+4];
_t_26_ -= cons_2[k][j][i-4] * q_1[k][j][i-4];
flux_2kc0jc0ic0 -= dxinv0 * 0.0035 * _t_26_;
_t_29_ = cons_2[k][j+1][i] * q_2[k][j+1][i];
_t_29_ -= cons_2[k][j-1][i] * q_2[k][j-1][i];
_t_29_ += q_4[k][j+1][i];
_t_29_ -= q_4[k][j-1][i];
_t_27_ = dxinv1 * 0.8 * _t_29_;
_t_30_ = cons_2[k][j+2][i] * q_2[k][j+2][i];
_t_30_ -= cons_2[k][j-2][i] * q_2[k][j-2][i];
_t_30_ += q_4[k][j+2][i];
_t_30_ -= q_4[k][j-2][i];
_t_27_ -= dxinv1 * 0.2 * _t_30_;
_t_31_ = cons_2[k][j+3][i] * q_2[k][j+3][i];
_t_31_ -= cons_2[k][j-3][i] * q_2[k][j-3][i];
_t_31_ += q_4[k][j+3][i];
_t_31_ -= q_4[k][j-3][i];
_t_27_ += dxinv1 * 0.038 * _t_31_;
_t_32_ = cons_2[k][j+4][i] * q_2[k][j+4][i];
_t_32_ -= cons_2[k][j-4][i] * q_2[k][j-4][i];
_t_32_ += q_4[k][j+4][i];
_t_32_ -= q_4[k][j-4][i];
_t_27_ -= dxinv1 * 0.0035 * _t_32_;
flux_2kc0jc0ic0 -= _t_27_;
flux_2[k][j][i] = flux_2kc0jc0ic0;
_t_34_ = cons_3[k][j][i+1] * q_1[k][j][i+1];
_t_34_ -= cons_3[k][j][i-1] * q_1[k][j][i-1];
flux_3kc0jc0ic0 = dxinv0 * 0.8 * _t_34_;
_t_35_ = cons_3[k][j][i+2] * q_1[k][j][i+2];
_t_35_ -= cons_3[k][j][i-2] * q_1[k][j][i-2];
flux_3kc0jc0ic0 -= dxinv0 * 0.2 * _t_35_;
_t_36_ = cons_3[k][j][i+3] * q_1[k][j][i+3];
_t_36_ -= cons_3[k][j][i-3] * q_1[k][j][i-3];
flux_3kc0jc0ic0 += dxinv0 * 0.038 * _t_36_;
_t_37_ = cons_3[k][j][i+4] * q_1[k][j][i+4];
_t_37_ -= cons_3[k][j][i-4] * q_1[k][j][i-4];
flux_3kc0jc0ic0 -= dxinv0 * 0.0035 * _t_37_;
_t_40_ = cons_3[k][j+1][i] * q_2[k][j+1][i];
_t_40_ -= cons_3[k][j-1][i] * q_2[k][j-1][i];
_t_38_ = dxinv1 * 0.8 * _t_40_;
_t_41_ = cons_3[k][j+2][i] * q_2[k][j+2][i];
_t_41_ -= cons_3[k][j-2][i] * q_2[k][j-2][i];
_t_38_ -= dxinv1 * 0.2 * _t_41_;
_t_42_ = cons_3[k][j+3][i] * q_2[k][j+3][i];
_t_42_ -= cons_3[k][j-3][i] * q_2[k][j-3][i];
_t_38_ += dxinv1 * 0.038 * _t_42_;
_t_43_ = cons_3[k][j+4][i] * q_2[k][j+4][i];
_t_43_ -= cons_3[k][j-4][i] * q_2[k][j-4][i];
_t_38_ -= dxinv1 * 0.0035 * _t_43_;
flux_3kc0jc0ic0 -= _t_38_;
flux_3[k][j][i] = flux_3kc0jc0ic0;
}
}
__global__ void hypterm_1 (double * __restrict__ flux_in_0, double * __restrict__ flux_in_1, double * __restrict__ flux_in_2, double * __restrict__ flux_in_3, double * __restrict__ cons_in_1, double * __restrict__ cons_in_2, double * __restrict__ cons_in_3, double * __restrict__ q_in_1, double * __restrict__ q_in_2, double * __restrict__ q_in_3, double * __restrict__ q_in_4, double dxinv0, double dxinv1, double dxinv2, int L, int M, int N) {
//Determing the block's indices
int blockdim_i= (int)(blockDim.x);
int i0 = (int)(blockIdx.x)*(blockdim_i);
int i = max (i0, 0) + (int)(threadIdx.x);
int blockdim_j= (int)(blockDim.y);
int j0 = (int)(blockIdx.y)*(blockdim_j);
int j = max (j0, 0) + (int)(threadIdx.y);
int blockdim_k= (int)(blockDim.z);
int k0 = (int)(blockIdx.z)*(4*blockdim_k);
int k = max (k0, 0) + (int)(4*threadIdx.z);
double (*flux_0)[308][308] = (double (*)[308][308])flux_in_0;
double (*flux_1)[308][308] = (double (*)[308][308])flux_in_1;
double (*flux_2)[308][308] = (double (*)[308][308])flux_in_2;
double (*flux_3)[308][308] = (double (*)[308][308])flux_in_3;
double (*cons_1)[308][308] = (double (*)[308][308])cons_in_1;
double (*cons_2)[308][308] = (double (*)[308][308])cons_in_2;
double (*cons_3)[308][308] = (double (*)[308][308])cons_in_3;
double (*q_1)[308][308] = (double (*)[308][308])q_in_1;
double (*q_2)[308][308] = (double (*)[308][308])q_in_2;
double (*q_3)[308][308] = (double (*)[308][308])q_in_3;
double (*q_4)[308][308] = (double (*)[308][308])q_in_4;
double flux0_a, flux1_a, flux2_a, flux3_a;
double flux0_b, flux1_b, flux2_b, flux3_b;
double flux0_c, flux1_c, flux2_c, flux3_c;
double flux0_d, flux1_d, flux2_d, flux3_d;
if (i>=4 & j>=4 & k>=4 & i<=N-5 & j<=N-5 & k<=N-5) {
double flux0_a;
double flux_0kc0jc0ic0;
double _t_1_;
double _t_2_;
double _t_3_;
double _t_4_;
double flux0_b;
double flux_0kp1jc0ic0;
double _t_6_;
double _t_7_;
double _t_8_;
double _t_9_;
double flux0_c;
double flux_0kp2jc0ic0;
double _t_11_;
double _t_12_;
double _t_13_;
double _t_14_;
double flux0_d;
double flux_0kp3jc0ic0;
double _t_16_;
double _t_17_;
double _t_18_;
double _t_19_;
double flux1_a;
double flux_1kc0jc0ic0;
double _t_21_;
double _t_22_;
double _t_23_;
double _t_24_;
double flux1_b;
double flux_1kp1jc0ic0;
double _t_26_;
double _t_27_;
double _t_28_;
double _t_29_;
double flux1_c;
double flux_1kp2jc0ic0;
double _t_31_;
double _t_32_;
double _t_33_;
double _t_34_;
double flux1_d;
double flux_1kp3jc0ic0;
double _t_36_;
double _t_37_;
double _t_38_;
double _t_39_;
double flux2_a;
double flux_2kc0jc0ic0;
double _t_41_;
double _t_42_;
double _t_43_;
double _t_44_;
double flux2_b;
double flux_2kp1jc0ic0;
double _t_46_;
double _t_47_;
double _t_48_;
double _t_49_;
double flux2_c;
double flux_2kp2jc0ic0;
double _t_51_;
double _t_52_;
double _t_53_;
double _t_54_;
double flux2_d;
double flux_2kp3jc0ic0;
double _t_56_;
double _t_57_;
double _t_58_;
double _t_59_;
double flux3_a;
double flux_3kc0jc0ic0;
double _t_61_;
double _t_62_;
double _t_63_;
double _t_64_;
double flux3_b;
double flux_3kp1jc0ic0;
double _t_66_;
double _t_67_;
double _t_68_;
double _t_69_;
double flux3_c;
double flux_3kp2jc0ic0;
double _t_71_;
double _t_72_;
double _t_73_;
double _t_74_;
double flux3_d;
double flux_3kp3jc0ic0;
double _t_76_;
double _t_77_;
double _t_78_;
double _t_79_;
flux0_a = flux_0[k][j][i];
flux_0kc0jc0ic0 = flux0_a;
_t_1_ = cons_3[k+1][j][i];
_t_1_ -= cons_3[k-1][j][i];
flux_0kc0jc0ic0 -= dxinv2 * 0.8 * _t_1_;
_t_2_ = cons_3[k+2][j][i];
_t_2_ -= cons_3[k-2][j][i];
flux_0kc0jc0ic0 += dxinv2 * 0.2 * _t_2_;
_t_3_ = cons_3[k+3][j][i];
_t_3_ -= cons_3[k-3][j][i];
flux_0kc0jc0ic0 -= dxinv2 * 0.038 * _t_3_;
_t_4_ = cons_3[k+4][j][i];
_t_4_ -= cons_3[k-4][j][i];
flux_0kc0jc0ic0 += dxinv2 * 0.0035 * _t_4_;
flux_0[k][j][i] = flux_0kc0jc0ic0;
flux0_b = flux_0[k+1][j][i];
flux_0kp1jc0ic0 = flux0_b;
_t_6_ = cons_3[k+2][j][i];
_t_6_ -= cons_3[k][j][i];
flux_0kp1jc0ic0 -= dxinv2 * 0.8 * _t_6_;
_t_7_ = cons_3[k+3][j][i];
_t_7_ -= cons_3[k-1][j][i];
flux_0kp1jc0ic0 += dxinv2 * 0.2 * _t_7_;
_t_8_ = cons_3[k+4][j][i];
_t_8_ -= cons_3[k-2][j][i];
flux_0kp1jc0ic0 -= dxinv2 * 0.038 * _t_8_;
_t_9_ = cons_3[k+5][j][i];
_t_9_ -= cons_3[k-3][j][i];
flux_0kp1jc0ic0 += dxinv2 * 0.0035 * _t_9_;
flux_0[k+1][j][i] = flux_0kp1jc0ic0;
flux0_c = flux_0[k+2][j][i];
flux_0kp2jc0ic0 = flux0_c;
_t_11_ = cons_3[k+3][j][i];
_t_11_ -= cons_3[k+1][j][i];
flux_0kp2jc0ic0 -= dxinv2 * 0.8 * _t_11_;
_t_12_ = cons_3[k+4][j][i];
_t_12_ -= cons_3[k][j][i];
flux_0kp2jc0ic0 += dxinv2 * 0.2 * _t_12_;
_t_13_ = cons_3[k+5][j][i];
_t_13_ -= cons_3[k-1][j][i];
flux_0kp2jc0ic0 -= dxinv2 * 0.038 * _t_13_;
_t_14_ = cons_3[k+6][j][i];
_t_14_ -= cons_3[k-2][j][i];
flux_0kp2jc0ic0 += dxinv2 * 0.0035 * _t_14_;
flux_0[k+2][j][i] = flux_0kp2jc0ic0;
flux0_d = flux_0[k+3][j][i];
flux_0kp3jc0ic0 = flux0_d;
_t_16_ = cons_3[k+4][j][i];
_t_16_ -= cons_3[k+2][j][i];
flux_0kp3jc0ic0 -= dxinv2 * 0.8 * _t_16_;
_t_17_ = cons_3[k+5][j][i];
_t_17_ -= cons_3[k+1][j][i];
flux_0kp3jc0ic0 += dxinv2 * 0.2 * _t_17_;
_t_18_ = cons_3[k+6][j][i];
_t_18_ -= cons_3[k][j][i];
flux_0kp3jc0ic0 -= dxinv2 * 0.038 * _t_18_;
_t_19_ = cons_3[k+7][j][i];
_t_19_ -= cons_3[k-1][j][i];
flux_0kp3jc0ic0 += dxinv2 * 0.0035 * _t_19_;
flux_0[k+3][j][i] = flux_0kp3jc0ic0;
flux1_a = flux_1[k][j][i];
flux_1kc0jc0ic0 = flux1_a;
_t_21_ = cons_1[k+1][j][i] * q_3[k+1][j][i];
_t_21_ -= cons_1[k-1][j][i] * q_3[k-1][j][i];
flux_1kc0jc0ic0 -= dxinv2 * 0.8 * _t_21_;
_t_22_ = cons_1[k+2][j][i] * q_3[k+2][j][i];
_t_22_ -= cons_1[k-2][j][i] * q_3[k-2][j][i];
flux_1kc0jc0ic0 += dxinv2 * 0.2 * _t_22_;
_t_23_ = cons_1[k+3][j][i] * q_3[k+3][j][i];
_t_23_ -= cons_1[k-3][j][i] * q_3[k-3][j][i];
flux_1kc0jc0ic0 -= dxinv2 * 0.038 * _t_23_;
_t_24_ = cons_1[k+4][j][i] * q_3[k+4][j][i];
_t_24_ -= cons_1[k-4][j][i] * q_3[k-4][j][i];
flux_1kc0jc0ic0 += dxinv2 * 0.0035 * _t_24_;
flux_1[k][j][i] = flux_1kc0jc0ic0;
flux1_b = flux_1[k+1][j][i];
flux_1kp1jc0ic0 = flux1_b;
_t_26_ = cons_1[k+2][j][i] * q_3[k+2][j][i];
_t_26_ -= cons_1[k][j][i] * q_3[k][j][i];
flux_1kp1jc0ic0 -= dxinv2 * 0.8 * _t_26_;
_t_27_ = cons_1[k+3][j][i] * q_3[k+3][j][i];
_t_27_ -= cons_1[k-1][j][i] * q_3[k-1][j][i];
flux_1kp1jc0ic0 += dxinv2 * 0.2 * _t_27_;
_t_28_ = cons_1[k+4][j][i] * q_3[k+4][j][i];
_t_28_ -= cons_1[k-2][j][i] * q_3[k-2][j][i];
flux_1kp1jc0ic0 -= dxinv2 * 0.038 * _t_28_;
_t_29_ = cons_1[k+5][j][i] * q_3[k+5][j][i];
_t_29_ -= cons_1[k-3][j][i] * q_3[k-3][j][i];
flux_1kp1jc0ic0 += dxinv2 * 0.0035 * _t_29_;
flux_1[k+1][j][i] = flux_1kp1jc0ic0;
flux1_c = flux_1[k+2][j][i];
flux_1kp2jc0ic0 = flux1_c;
_t_31_ = cons_1[k+3][j][i] * q_3[k+3][j][i];
_t_31_ -= cons_1[k+1][j][i] * q_3[k+1][j][i];
flux_1kp2jc0ic0 -= dxinv2 * 0.8 * _t_31_;
_t_32_ = cons_1[k+4][j][i] * q_3[k+4][j][i];
_t_32_ -= cons_1[k][j][i] * q_3[k][j][i];
flux_1kp2jc0ic0 += dxinv2 * 0.2 * _t_32_;
_t_33_ = cons_1[k+5][j][i] * q_3[k+5][j][i];
_t_33_ -= cons_1[k-1][j][i] * q_3[k-1][j][i];
flux_1kp2jc0ic0 -= dxinv2 * 0.038 * _t_33_;
_t_34_ = cons_1[k+6][j][i] * q_3[k+6][j][i];
_t_34_ -= cons_1[k-2][j][i] * q_3[k-2][j][i];
flux_1kp2jc0ic0 += dxinv2 * 0.0035 * _t_34_;
flux_1[k+2][j][i] = flux_1kp2jc0ic0;
flux1_d = flux_1[k+3][j][i];
flux_1kp3jc0ic0 = flux1_d;
_t_36_ = cons_1[k+4][j][i] * q_3[k+4][j][i];
_t_36_ -= cons_1[k+2][j][i] * q_3[k+2][j][i];
flux_1kp3jc0ic0 -= dxinv2 * 0.8 * _t_36_;
_t_37_ = cons_1[k+5][j][i] * q_3[k+5][j][i];
_t_37_ -= cons_1[k+1][j][i] * q_3[k+1][j][i];
flux_1kp3jc0ic0 += dxinv2 * 0.2 * _t_37_;
_t_38_ = cons_1[k+6][j][i] * q_3[k+6][j][i];
_t_38_ -= cons_1[k][j][i] * q_3[k][j][i];
flux_1kp3jc0ic0 -= dxinv2 * 0.038 * _t_38_;
_t_39_ = cons_1[k+7][j][i] * q_3[k+7][j][i];
_t_39_ -= cons_1[k-1][j][i] * q_3[k-1][j][i];
flux_1kp3jc0ic0 += dxinv2 * 0.0035 * _t_39_;
flux_1[k+3][j][i] = flux_1kp3jc0ic0;
flux2_a = flux_2[k][j][i];
flux_2kc0jc0ic0 = flux2_a;
_t_41_ = cons_2[k+1][j][i] * q_3[k+1][j][i];
_t_41_ -= cons_2[k-1][j][i] * q_3[k-1][j][i];
flux_2kc0jc0ic0 -= dxinv2 * 0.8 * _t_41_;
_t_42_ = cons_2[k+2][j][i] * q_3[k+2][j][i];
_t_42_ -= cons_2[k-2][j][i] * q_3[k-2][j][i];
flux_2kc0jc0ic0 += dxinv2 * 0.2 * _t_42_;
_t_43_ = cons_2[k+3][j][i] * q_3[k+3][j][i];
_t_43_ -= cons_2[k-3][j][i] * q_3[k-3][j][i];
flux_2kc0jc0ic0 -= dxinv2 * 0.038 * _t_43_;
_t_44_ = cons_2[k+4][j][i] * q_3[k+4][j][i];
_t_44_ -= cons_2[k-4][j][i] * q_3[k-4][j][i];
flux_2kc0jc0ic0 += dxinv2 * 0.0035 * _t_44_;
flux_2[k][j][i] = flux_2kc0jc0ic0;
flux2_b = flux_2[k+1][j][i];
flux_2kp1jc0ic0 = flux2_b;
_t_46_ = cons_2[k+2][j][i] * q_3[k+2][j][i];
_t_46_ -= cons_2[k][j][i] * q_3[k][j][i];
flux_2kp1jc0ic0 -= dxinv2 * 0.8 * _t_46_;
_t_47_ = cons_2[k+3][j][i] * q_3[k+3][j][i];
_t_47_ -= cons_2[k-1][j][i] * q_3[k-1][j][i];
flux_2kp1jc0ic0 += dxinv2 * 0.2 * _t_47_;
_t_48_ = cons_2[k+4][j][i] * q_3[k+4][j][i];
_t_48_ -= cons_2[k-2][j][i] * q_3[k-2][j][i];
flux_2kp1jc0ic0 -= dxinv2 * 0.038 * _t_48_;
_t_49_ = cons_2[k+5][j][i] * q_3[k+5][j][i];
_t_49_ -= cons_2[k-3][j][i] * q_3[k-3][j][i];
flux_2kp1jc0ic0 += dxinv2 * 0.0035 * _t_49_;
flux_2[k+1][j][i] = flux_2kp1jc0ic0;
flux2_c = flux_2[k+2][j][i];
flux_2kp2jc0ic0 = flux2_c;
_t_51_ = cons_2[k+3][j][i] * q_3[k+3][j][i];
_t_51_ -= cons_2[k+1][j][i] * q_3[k+1][j][i];
flux_2kp2jc0ic0 -= dxinv2 * 0.8 * _t_51_;
_t_52_ = cons_2[k+4][j][i] * q_3[k+4][j][i];
_t_52_ -= cons_2[k][j][i] * q_3[k][j][i];
flux_2kp2jc0ic0 += dxinv2 * 0.2 * _t_52_;
_t_53_ = cons_2[k+5][j][i] * q_3[k+5][j][i];
_t_53_ -= cons_2[k-1][j][i] * q_3[k-1][j][i];
flux_2kp2jc0ic0 -= dxinv2 * 0.038 * _t_53_;
_t_54_ = cons_2[k+6][j][i] * q_3[k+6][j][i];
_t_54_ -= cons_2[k-2][j][i] * q_3[k-2][j][i];
flux_2kp2jc0ic0 += dxinv2 * 0.0035 * _t_54_;
flux_2[k+2][j][i] = flux_2kp2jc0ic0;
flux2_d = flux_2[k+3][j][i];
flux_2kp3jc0ic0 = flux2_d;
_t_56_ = cons_2[k+4][j][i] * q_3[k+4][j][i];
_t_56_ -= cons_2[k+2][j][i] * q_3[k+2][j][i];
flux_2kp3jc0ic0 -= dxinv2 * 0.8 * _t_56_;
_t_57_ = cons_2[k+5][j][i] * q_3[k+5][j][i];
_t_57_ -= cons_2[k+1][j][i] * q_3[k+1][j][i];
flux_2kp3jc0ic0 += dxinv2 * 0.2 * _t_57_;
_t_58_ = cons_2[k+6][j][i] * q_3[k+6][j][i];
_t_58_ -= cons_2[k][j][i] * q_3[k][j][i];
flux_2kp3jc0ic0 -= dxinv2 * 0.038 * _t_58_;
_t_59_ = cons_2[k+7][j][i] * q_3[k+7][j][i];
_t_59_ -= cons_2[k-1][j][i] * q_3[k-1][j][i];
flux_2kp3jc0ic0 += dxinv2 * 0.0035 * _t_59_;
flux_2[k+3][j][i] = flux_2kp3jc0ic0;
flux3_a = flux_3[k][j][i];
flux_3kc0jc0ic0 = flux3_a;
_t_61_ = cons_3[k+1][j][i] * q_3[k+1][j][i];
_t_61_ -= cons_3[k-1][j][i] * q_3[k-1][j][i];
_t_61_ += q_4[k+1][j][i];
_t_61_ -= q_4[k-1][j][i];
flux_3kc0jc0ic0 -= dxinv2 * 0.8 * _t_61_;
_t_62_ = cons_3[k+2][j][i] * q_3[k+2][j][i];
_t_62_ -= cons_3[k-2][j][i] * q_3[k-2][j][i];
_t_62_ += q_4[k+2][j][i];
_t_62_ -= q_4[k-2][j][i];
flux_3kc0jc0ic0 += dxinv2 * 0.2 * _t_62_;
_t_63_ = cons_3[k+3][j][i] * q_3[k+3][j][i];
_t_63_ -= cons_3[k-3][j][i] * q_3[k-3][j][i];
_t_63_ += q_4[k+3][j][i];
_t_63_ -= q_4[k-3][j][i];
flux_3kc0jc0ic0 -= dxinv2 * 0.038 * _t_63_;
_t_64_ = cons_3[k+4][j][i] * q_3[k+4][j][i];
_t_64_ -= cons_3[k-4][j][i] * q_3[k-4][j][i];
_t_64_ += q_4[k+4][j][i];
_t_64_ -= q_4[k-4][j][i];
flux_3kc0jc0ic0 += dxinv2 * 0.0035 * _t_64_;
flux_3[k][j][i] = flux_3kc0jc0ic0;
flux3_b = flux_3[k+1][j][i];
flux_3kp1jc0ic0 = flux3_b;
_t_66_ = cons_3[k+2][j][i] * q_3[k+2][j][i];
_t_66_ -= cons_3[k][j][i] * q_3[k][j][i];
_t_66_ += q_4[k+2][j][i];
_t_66_ -= q_4[k][j][i];
flux_3kp1jc0ic0 -= dxinv2 * 0.8 * _t_66_;
_t_67_ = cons_3[k+3][j][i] * q_3[k+3][j][i];
_t_67_ -= cons_3[k-1][j][i] * q_3[k-1][j][i];
_t_67_ += q_4[k+3][j][i];
_t_67_ -= q_4[k-1][j][i];
flux_3kp1jc0ic0 += dxinv2 * 0.2 * _t_67_;
_t_68_ = cons_3[k+4][j][i] * q_3[k+4][j][i];
_t_68_ -= cons_3[k-2][j][i] * q_3[k-2][j][i];
_t_68_ += q_4[k+4][j][i];
_t_68_ -= q_4[k-2][j][i];
flux_3kp1jc0ic0 -= dxinv2 * 0.038 * _t_68_;
_t_69_ = cons_3[k+5][j][i] * q_3[k+5][j][i];
_t_69_ -= cons_3[k-3][j][i] * q_3[k-3][j][i];
_t_69_ += q_4[k+5][j][i];
_t_69_ -= q_4[k-3][j][i];
flux_3kp1jc0ic0 += dxinv2 * 0.0035 * _t_69_;
flux_3[k+1][j][i] = flux_3kp1jc0ic0;
flux3_c = flux_3[k+2][j][i];
flux_3kp2jc0ic0 = flux3_c;
_t_71_ = cons_3[k+3][j][i] * q_3[k+3][j][i];
_t_71_ -= cons_3[k+1][j][i] * q_3[k+1][j][i];
_t_71_ += q_4[k+3][j][i];
_t_71_ -= q_4[k+1][j][i];
flux_3kp2jc0ic0 -= dxinv2 * 0.8 * _t_71_;
_t_72_ = cons_3[k+4][j][i] * q_3[k+4][j][i];
_t_72_ -= cons_3[k][j][i] * q_3[k][j][i];
_t_72_ += q_4[k+4][j][i];
_t_72_ -= q_4[k][j][i];
flux_3kp2jc0ic0 += dxinv2 * 0.2 * _t_72_;
_t_73_ = cons_3[k+5][j][i] * q_3[k+5][j][i];
_t_73_ -= cons_3[k-1][j][i] * q_3[k-1][j][i];
_t_73_ += q_4[k+5][j][i];
_t_73_ -= q_4[k-1][j][i];
flux_3kp2jc0ic0 -= dxinv2 * 0.038 * _t_73_;
_t_74_ = cons_3[k+6][j][i] * q_3[k+6][j][i];
_t_74_ -= cons_3[k-2][j][i] * q_3[k-2][j][i];
_t_74_ += q_4[k+6][j][i];
_t_74_ -= q_4[k-2][j][i];
flux_3kp2jc0ic0 += dxinv2 * 0.0035 * _t_74_;
flux_3[k+2][j][i] = flux_3kp2jc0ic0;
flux3_d = flux_3[k+3][j][i];
flux_3kp3jc0ic0 = flux3_d;
_t_76_ = cons_3[k+4][j][i] * q_3[k+4][j][i];
_t_76_ -= cons_3[k+2][j][i] * q_3[k+2][j][i];
_t_76_ += q_4[k+4][j][i];
_t_76_ -= q_4[k+2][j][i];
flux_3kp3jc0ic0 -= dxinv2 * 0.8 * _t_76_;
_t_77_ = cons_3[k+5][j][i] * q_3[k+5][j][i];
_t_77_ -= cons_3[k+1][j][i] * q_3[k+1][j][i];
_t_77_ += q_4[k+5][j][i];
_t_77_ -= q_4[k+1][j][i];
flux_3kp3jc0ic0 += dxinv2 * 0.2 * _t_77_;
_t_78_ = cons_3[k+6][j][i] * q_3[k+6][j][i];
_t_78_ -= cons_3[k][j][i] * q_3[k][j][i];
_t_78_ += q_4[k+6][j][i];
_t_78_ -= q_4[k][j][i];
flux_3kp3jc0ic0 -= dxinv2 * 0.038 * _t_78_;
_t_79_ = cons_3[k+7][j][i] * q_3[k+7][j][i];
_t_79_ -= cons_3[k-1][j][i] * q_3[k-1][j][i];
_t_79_ += q_4[k+7][j][i];
_t_79_ -= q_4[k-1][j][i];
flux_3kp3jc0ic0 += dxinv2 * 0.0035 * _t_79_;
flux_3[k+3][j][i] = flux_3kp3jc0ic0;
}
}
__global__ void hypterm_2 (double * __restrict__ flux_in_4, double * __restrict__ cons_in_4, double * __restrict__ q_in_1, double * __restrict__ q_in_2, double * __restrict__ q_in_3, double * __restrict__ q_in_4, double dxinv0, double dxinv1, double dxinv2, int L, int M, int N) {
//Determing the block's indices
int blockdim_i= (int)(blockDim.x);
int i0 = (int)(blockIdx.x)*(blockdim_i);
int i = max (i0, 0) + (int)(threadIdx.x);
int blockdim_j= (int)(blockDim.y);
int j0 = (int)(blockIdx.y)*(blockdim_j);
int j = max (j0, 0) + (int)(threadIdx.y);
int blockdim_k= (int)(blockDim.z);
int k0 = (int)(blockIdx.z)*(2*blockdim_k);
int k = max (k0, 0) + (int)(2*threadIdx.z);
double (*flux_4)[308][308] = (double (*)[308][308])flux_in_4;
double (*q_1)[308][308] = (double (*)[308][308])q_in_1;
double (*q_2)[308][308] = (double (*)[308][308])q_in_2;
double (*q_3)[308][308] = (double (*)[308][308])q_in_3;
double (*q_4)[308][308] = (double (*)[308][308])q_in_4;
double (*cons_4)[308][308] = (double (*)[308][308])cons_in_4;
if (i>=4 & j>=4 & k>=4 & i<=N-5 & j<=N-5 & k<=N-5) {
flux_4[k][j][i] = ((0.8*(cons_4[k][j][i+1]*q_1[k][j][i+1]-cons_4[k][j][i-1]*q_1[k][j][i-1]+(q_4[k][j][i+1]*q_1[k][j][i+1]-q_4[k][j][i-1]*q_1[k][j][i-1]))-0.2*(cons_4[k][j][i+2]*q_1[k][j][i+2]-cons_4[k][j][i-2]*q_1[k][j][i-2]+(q_4[k][j][i+2]*q_1[k][j][i+2]-q_4[k][j][i-2]*q_1[k][j][i-2]))+0.038*(cons_4[k][j][i+3]*q_1[k][j][i+3]-cons_4[k][j][i-3]*q_1[k][j][i-3]+(q_4[k][j][i+3]*q_1[k][j][i+3]-q_4[k][j][i-3]*q_1[k][j][i-3]))-0.0035*(cons_4[k][j][i+4]*q_1[k][j][i+4]-cons_4[k][j][i-4]*q_1[k][j][i-4]+(q_4[k][j][i+4]*q_1[k][j][i+4]-q_4[k][j][i-4]*q_1[k][j][i-4])))*dxinv0);
flux_4[k+1][j][i] = ((0.8*(cons_4[k+1][j][i+1]*q_1[k+1][j][i+1]-cons_4[k+1][j][i-1]*q_1[k+1][j][i-1]+(q_4[k+1][j][i+1]*q_1[k+1][j][i+1]-q_4[k+1][j][i-1]*q_1[k+1][j][i-1]))-0.2*(cons_4[k+1][j][i+2]*q_1[k+1][j][i+2]-cons_4[k+1][j][i-2]*q_1[k+1][j][i-2]+(q_4[k+1][j][i+2]*q_1[k+1][j][i+2]-q_4[k+1][j][i-2]*q_1[k+1][j][i-2]))+0.038*(cons_4[k+1][j][i+3]*q_1[k+1][j][i+3]-cons_4[k+1][j][i-3]*q_1[k+1][j][i-3]+(q_4[k+1][j][i+3]*q_1[k+1][j][i+3]-q_4[k+1][j][i-3]*q_1[k+1][j][i-3]))-0.0035*(cons_4[k+1][j][i+4]*q_1[k+1][j][i+4]-cons_4[k+1][j][i-4]*q_1[k+1][j][i-4]+(q_4[k+1][j][i+4]*q_1[k+1][j][i+4]-q_4[k+1][j][i-4]*q_1[k+1][j][i-4])))*dxinv0);
flux_4[k][j][i] -= (0.8*(cons_4[k][j+1][i]*q_2[k][j+1][i]-cons_4[k][j-1][i]*q_2[k][j-1][i]+(q_4[k][j+1][i]*q_2[k][j+1][i]-q_4[k][j-1][i]*q_2[k][j-1][i]))-0.2*(cons_4[k][j+2][i]*q_2[k][j+2][i]-cons_4[k][j-2][i]*q_2[k][j-2][i]+(q_4[k][j+2][i]*q_2[k][j+2][i]-q_4[k][j-2][i]*q_2[k][j-2][i]))+0.038*(cons_4[k][j+3][i]*q_2[k][j+3][i]-cons_4[k][j-3][i]*q_2[k][j-3][i]+(q_4[k][j+3][i]*q_2[k][j+3][i]-q_4[k][j-3][i]*q_2[k][j-3][i]))-0.0035*(cons_4[k][j+4][i]*q_2[k][j+4][i]-cons_4[k][j-4][i]*q_2[k][j-4][i]+(q_4[k][j+4][i]*q_2[k][j+4][i]-q_4[k][j-4][i]*q_2[k][j-4][i])))*dxinv1;
flux_4[k+1][j][i] -= (0.8*(cons_4[k+1][j+1][i]*q_2[k+1][j+1][i]-cons_4[k+1][j-1][i]*q_2[k+1][j-1][i]+(q_4[k+1][j+1][i]*q_2[k+1][j+1][i]-q_4[k+1][j-1][i]*q_2[k+1][j-1][i]))-0.2*(cons_4[k+1][j+2][i]*q_2[k+1][j+2][i]-cons_4[k+1][j-2][i]*q_2[k+1][j-2][i]+(q_4[k+1][j+2][i]*q_2[k+1][j+2][i]-q_4[k+1][j-2][i]*q_2[k+1][j-2][i]))+0.038*(cons_4[k+1][j+3][i]*q_2[k+1][j+3][i]-cons_4[k+1][j-3][i]*q_2[k+1][j-3][i]+(q_4[k+1][j+3][i]*q_2[k+1][j+3][i]-q_4[k+1][j-3][i]*q_2[k+1][j-3][i]))-0.0035*(cons_4[k+1][j+4][i]*q_2[k+1][j+4][i]-cons_4[k+1][j-4][i]*q_2[k+1][j-4][i]+(q_4[k+1][j+4][i]*q_2[k+1][j+4][i]-q_4[k+1][j-4][i]*q_2[k+1][j-4][i])))*dxinv1;
flux_4[k][j][i] -= (0.8*(cons_4[k+1][j][i]*q_3[k+1][j][i]-cons_4[k-1][j][i]*q_3[k-1][j][i]+(q_4[k+1][j][i]*q_3[k+1][j][i]-q_4[k-1][j][i]*q_3[k-1][j][i]))-0.2*(cons_4[k+2][j][i]*q_3[k+2][j][i]-cons_4[k-2][j][i]*q_3[k-2][j][i]+(q_4[k+2][j][i]*q_3[k+2][j][i]-q_4[k-2][j][i]*q_3[k-2][j][i]))+0.038*(cons_4[k+3][j][i]*q_3[k+3][j][i]-cons_4[k-3][j][i]*q_3[k-3][j][i]+(q_4[k+3][j][i]*q_3[k+3][j][i]-q_4[k-3][j][i]*q_3[k-3][j][i]))-0.0035*(cons_4[k+4][j][i]*q_3[k+4][j][i]-cons_4[k-4][j][i]*q_3[k-4][j][i]+(q_4[k+4][j][i]*q_3[k+4][j][i]-q_4[k-4][j][i]*q_3[k-4][j][i])))*dxinv2;
flux_4[k+1][j][i] -= (0.8*(cons_4[k+1+1][j][i]*q_3[k+1+1][j][i]-cons_4[k+1-1][j][i]*q_3[k+1-1][j][i]+(q_4[k+1+1][j][i]*q_3[k+1+1][j][i]-q_4[k+1-1][j][i]*q_3[k+1-1][j][i]))-0.2*(cons_4[k+1+2][j][i]*q_3[k+1+2][j][i]-cons_4[k+1-2][j][i]*q_3[k+1-2][j][i]+(q_4[k+1+2][j][i]*q_3[k+1+2][j][i]-q_4[k+1-2][j][i]*q_3[k+1-2][j][i]))+0.038*(cons_4[k+1+3][j][i]*q_3[k+1+3][j][i]-cons_4[k+1-3][j][i]*q_3[k+1-3][j][i]+(q_4[k+1+3][j][i]*q_3[k+1+3][j][i]-q_4[k+1-3][j][i]*q_3[k+1-3][j][i]))-0.0035*(cons_4[k+1+4][j][i]*q_3[k+1+4][j][i]-cons_4[k+1-4][j][i]*q_3[k+1-4][j][i]+(q_4[k+1+4][j][i]*q_3[k+1+4][j][i]-q_4[k+1-4][j][i]*q_3[k+1-4][j][i])))*dxinv2;
}
}
extern "C" void host_code (double *h_flux_0, double *h_flux_1, double *h_flux_2, double *h_flux_3, double *h_flux_4, double *h_cons_1, double *h_cons_2, double *h_cons_3, double *h_cons_4, double *h_q_1, double *h_q_2, double *h_q_3, double *h_q_4, double dxinv0, double dxinv1, double dxinv2, int L, int M, int N) {
double *flux_0;
cudaMalloc (&flux_0, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for flux_0\n");
cudaMemcpy (flux_0, h_flux_0, sizeof(double)*L*M*N, cudaMemcpyHostToDevice);
double *flux_1;
cudaMalloc (&flux_1, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for flux_1\n");
cudaMemcpy (flux_1, h_flux_1, sizeof(double)*L*M*N, cudaMemcpyHostToDevice);
double *flux_2;
cudaMalloc (&flux_2, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for flux_2\n");
cudaMemcpy (flux_2, h_flux_2, sizeof(double)*L*M*N, cudaMemcpyHostToDevice);
double *flux_3;
cudaMalloc (&flux_3, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for flux_3\n");
cudaMemcpy (flux_3, h_flux_3, sizeof(double)*L*M*N, cudaMemcpyHostToDevice);
double *flux_4;
cudaMalloc (&flux_4, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for flux_4\n");
cudaMemcpy (flux_4, h_flux_4, sizeof(double)*L*M*N, cudaMemcpyHostToDevice);
double *cons_1;
cudaMalloc (&cons_1, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for cons_1\n");
cudaMemcpy (cons_1, h_cons_1, sizeof(double)*L*M*N, cudaMemcpyHostToDevice);
double *cons_2;
cudaMalloc (&cons_2, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for cons_2\n");
cudaMemcpy (cons_2, h_cons_2, sizeof(double)*L*M*N, cudaMemcpyHostToDevice);
double *cons_3;
cudaMalloc (&cons_3, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for cons_3\n");
cudaMemcpy (cons_3, h_cons_3, sizeof(double)*L*M*N, cudaMemcpyHostToDevice);
double *cons_4;
cudaMalloc (&cons_4, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for cons_4\n");
cudaMemcpy (cons_4, h_cons_4, sizeof(double)*L*M*N, cudaMemcpyHostToDevice);
double *q_1;
cudaMalloc (&q_1, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for q_1\n");
cudaMemcpy (q_1, h_q_1, sizeof(double)*L*M*N, cudaMemcpyHostToDevice);
double *q_2;
cudaMalloc (&q_2, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for q_2\n");
cudaMemcpy (q_2, h_q_2, sizeof(double)*L*M*N, cudaMemcpyHostToDevice);
double *q_3;
cudaMalloc (&q_3, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for q_3\n");
cudaMemcpy (q_3, h_q_3, sizeof(double)*L*M*N, cudaMemcpyHostToDevice);
double *q_4;
cudaMalloc (&q_4, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for q_4\n");
cudaMemcpy (q_4, h_q_4, sizeof(double)*L*M*N, cudaMemcpyHostToDevice);
dim3 blockconfig (16, 4, 4);
dim3 gridconfig_0 (ceil(N, blockconfig.x), ceil(M, blockconfig.y), ceil(L, blockconfig.z));
hypterm_0 <<<gridconfig_0, blockconfig>>> (flux_0, flux_1, flux_2, flux_3, cons_1, cons_2, cons_3, q_1, q_2, q_3, q_4, -dxinv0, dxinv1, dxinv2, L, M, N);
dim3 gridconfig_1 (ceil(N, blockconfig.x), ceil(M, blockconfig.y), ceil(L, 4*blockconfig.z));
hypterm_1 <<<gridconfig_1, blockconfig>>> (flux_0, flux_1, flux_2, flux_3, cons_1, cons_2, cons_3, q_1, q_2, q_3, q_4, -dxinv0, dxinv1, dxinv2, L, M, N);
dim3 gridconfig_2 (ceil(N, blockconfig.x), ceil(M, blockconfig.y), ceil(L, 2*blockconfig.z));
hypterm_2 <<<gridconfig_2, blockconfig>>> (flux_4, cons_4, q_1, q_2, q_3, q_4, -dxinv0, dxinv1, dxinv2, L, M, N);
cudaMemcpy (h_flux_0, flux_0, sizeof(double)*L*M*N, cudaMemcpyDeviceToHost);
cudaMemcpy (h_flux_1, flux_1, sizeof(double)*L*M*N, cudaMemcpyDeviceToHost);
cudaMemcpy (h_flux_3, flux_3, sizeof(double)*L*M*N, cudaMemcpyDeviceToHost);
cudaMemcpy (h_flux_4, flux_4, sizeof(double)*L*M*N, cudaMemcpyDeviceToHost);
cudaMemcpy (h_flux_2, flux_2, sizeof(double)*L*M*N, cudaMemcpyDeviceToHost);
}
|
28481933c312e3b0098beaf018345e68c002f368.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// RUN: %clang_cc1 -no-opaque-pointers -triple amdgcn-amd-amdhsa -fcuda-is-device -std=c++11 \
// RUN: -emit-llvm -o - -x hip %s | FileCheck \
// RUN: -check-prefixes=COMMON,DEV,NORDC-D %s
// RUN: %clang_cc1 -no-opaque-pointers -triple amdgcn-amd-amdhsa -fcuda-is-device -std=c++11 \
// RUN: -emit-llvm -fgpu-rdc -cuid=abc -o - -x hip %s > %t.dev
// RUN: cat %t.dev | FileCheck -check-prefixes=COMMON,DEV,RDC-D %s
// RUN: %clang_cc1 -no-opaque-pointers -triple x86_64-gnu-linux -std=c++11 \
// RUN: -emit-llvm -o - -x hip %s | FileCheck \
// RUN: -check-prefixes=COMMON,HOST,NORDC %s
// RUN: %clang_cc1 -no-opaque-pointers -triple x86_64-gnu-linux -std=c++11 \
// RUN: -emit-llvm -fgpu-rdc -cuid=abc -o - -x hip %s > %t.host
// RUN: cat %t.host | FileCheck -check-prefixes=COMMON,HOST,RDC %s
// Check device and host compilation use the same postfix for static
// variable name.
// RUN: cat %t.dev %t.host | FileCheck -check-prefix=POSTFIX %s
#include "Inputs/cuda.h"
struct vec {
float x,y,z;
};
// DEV-DAG: @x.managed = addrspace(1) externally_initialized global i32 1, align 4
// DEV-DAG: @x = addrspace(1) externally_initialized global i32 addrspace(1)* null
// NORDC-DAG: @x.managed = internal global i32 1
// RDC-DAG: @x.managed = global i32 1
// NORDC-DAG: @x = internal externally_initialized global i32* null
// RDC-DAG: @x = externally_initialized global i32* null
// HOST-DAG: @[[DEVNAMEX:[0-9]+]] = {{.*}}c"x\00"
__managed__ int x = 1;
// DEV-DAG: @v.managed = addrspace(1) externally_initialized global [100 x %struct.vec] zeroinitializer, align 4
// DEV-DAG: @v = addrspace(1) externally_initialized global [100 x %struct.vec] addrspace(1)* null
__managed__ vec v[100];
// DEV-DAG: @v2.managed = addrspace(1) externally_initialized global <{ %struct.vec, [99 x %struct.vec] }> <{ %struct.vec { float 1.000000e+00, float 1.000000e+00, float 1.000000e+00 }, [99 x %struct.vec] zeroinitializer }>, align 4
// DEV-DAG: @v2 = addrspace(1) externally_initialized global <{ %struct.vec, [99 x %struct.vec] }> addrspace(1)* null
__managed__ vec v2[100] = {{1, 1, 1}};
// DEV-DAG: @ex.managed = external addrspace(1) global i32, align 4
// DEV-DAG: @ex = external addrspace(1) externally_initialized global i32 addrspace(1)*
// HOST-DAG: @ex.managed = external global i32
// HOST-DAG: @ex = external externally_initialized global i32*
extern __managed__ int ex;
// NORDC-D-DAG: @_ZL2sx.managed = addrspace(1) externally_initialized global i32 1, align 4
// NORDC-D-DAG: @_ZL2sx = addrspace(1) externally_initialized global i32 addrspace(1)* null
// RDC-D-DAG: @_ZL2sx.static.[[HASH:.*]].managed = addrspace(1) externally_initialized global i32 1, align 4
// RDC-D-DAG: @_ZL2sx.static.[[HASH]] = addrspace(1) externally_initialized global i32 addrspace(1)* null
// HOST-DAG: @_ZL2sx.managed = internal global i32 1
// HOST-DAG: @_ZL2sx = internal externally_initialized global i32* null
// NORDC-DAG: @[[DEVNAMESX:[0-9]+]] = {{.*}}c"_ZL2sx\00"
// RDC-DAG: @[[DEVNAMESX:[0-9]+]] = {{.*}}c"_ZL2sx.static.[[HASH:.*]]\00"
// POSTFIX: @_ZL2sx.static.[[HASH:.*]] = addrspace(1) externally_initialized global i32 addrspace(1)* null
// POSTFIX: @[[DEVNAMESX:[0-9]+]] = {{.*}}c"_ZL2sx.static.[[HASH]]\00"
static __managed__ int sx = 1;
// DEV-DAG: @llvm.compiler.used
// DEV-SAME-DAG: @x.managed
// DEV-SAME-DAG: @x
// DEV-SAME-DAG: @v.managed
// DEV-SAME-DAG: @v
// DEV-SAME-DAG: @_ZL2sx.managed
// DEV-SAME-DAG: @_ZL2sx
// Force ex and sx mitted in device compilation.
__global__ void foo(int *z) {
*z = x + ex + sx;
v[1].x = 2;
}
// Force ex and sx emitted in host compilatioin.
int foo2() {
return ex + sx;
}
// COMMON-LABEL: define {{.*}}@_Z4loadv()
// DEV: %ld.managed = load i32 addrspace(1)*, i32 addrspace(1)* addrspace(1)* @x, align 4
// DEV: %0 = addrspacecast i32 addrspace(1)* %ld.managed to i32*
// DEV: %1 = load i32, i32* %0, align 4
// DEV: ret i32 %1
// HOST: %ld.managed = load i32*, i32** @x, align 4
// HOST: %0 = load i32, i32* %ld.managed, align 4
// HOST: ret i32 %0
__device__ __host__ int load() {
return x;
}
// COMMON-LABEL: define {{.*}}@_Z5storev()
// DEV: %ld.managed = load i32 addrspace(1)*, i32 addrspace(1)* addrspace(1)* @x, align 4
// DEV: %0 = addrspacecast i32 addrspace(1)* %ld.managed to i32*
// DEV: store i32 2, i32* %0, align 4
// HOST: %ld.managed = load i32*, i32** @x, align 4
// HOST: store i32 2, i32* %ld.managed, align 4
__device__ __host__ void store() {
x = 2;
}
// COMMON-LABEL: define {{.*}}@_Z10addr_takenv()
// DEV: %0 = addrspacecast i32 addrspace(1)* %ld.managed to i32*
// DEV: store i32* %0, i32** %p.ascast, align 8
// DEV: %1 = load i32*, i32** %p.ascast, align 8
// DEV: store i32 3, i32* %1, align 4
// HOST: %ld.managed = load i32*, i32** @x, align 4
// HOST: store i32* %ld.managed, i32** %p, align 8
// HOST: %0 = load i32*, i32** %p, align 8
// HOST: store i32 3, i32* %0, align 4
__device__ __host__ void addr_taken() {
int *p = &x;
*p = 3;
}
// HOST-LABEL: define {{.*}}@_Z5load2v()
// HOST: %ld.managed = load [100 x %struct.vec]*, [100 x %struct.vec]** @v, align 16
// HOST: %0 = getelementptr inbounds [100 x %struct.vec], [100 x %struct.vec]* %ld.managed, i64 0, i64 1, i32 0
// HOST: %1 = load float, float* %0, align 4
// HOST: ret float %1
__device__ __host__ float load2() {
return v[1].x;
}
// HOST-LABEL: define {{.*}}@_Z5load3v()
// HOST: %ld.managed = load <{ %struct.vec, [99 x %struct.vec] }>*, <{ %struct.vec, [99 x %struct.vec] }>** @v2, align 16
// HOST: %0 = bitcast <{ %struct.vec, [99 x %struct.vec] }>* %ld.managed to [100 x %struct.vec]*
// HOST: %1 = getelementptr inbounds [100 x %struct.vec], [100 x %struct.vec]* %0, i64 0, i64 1, i32 1
// HOST: %2 = load float, float* %1, align 4
// HOST: ret float %2
float load3() {
return v2[1].y;
}
// HOST-LABEL: define {{.*}}@_Z11addr_taken2v()
// HOST: %ld.managed = load [100 x %struct.vec]*, [100 x %struct.vec]** @v, align 16
// HOST: %0 = getelementptr inbounds [100 x %struct.vec], [100 x %struct.vec]* %ld.managed, i64 0, i64 1, i32 0
// HOST: %1 = ptrtoint float* %0 to i64
// HOST: %ld.managed1 = load <{ %struct.vec, [99 x %struct.vec] }>*, <{ %struct.vec, [99 x %struct.vec] }>** @v2, align 16
// HOST: %2 = bitcast <{ %struct.vec, [99 x %struct.vec] }>* %ld.managed1 to [100 x %struct.vec]*
// HOST: %3 = getelementptr inbounds [100 x %struct.vec], [100 x %struct.vec]* %2, i64 0, i64 1, i32 1
// HOST: %4 = ptrtoint float* %3 to i64
// HOST: %5 = sub i64 %4, %1
// HOST: %sub.ptr.div = sdiv exact i64 %5, 4
// HOST: %conv = sitofp i64 %sub.ptr.div to float
// HOST: ret float %conv
float addr_taken2() {
return (float)reinterpret_cast<long>(&(v2[1].y)-&(v[1].x));
}
// COMMON-LABEL: define {{.*}}@_Z5load4v()
// DEV: %ld.managed = load i32 addrspace(1)*, i32 addrspace(1)* addrspace(1)* @ex, align 4
// DEV: %0 = addrspacecast i32 addrspace(1)* %ld.managed to i32*
// DEV: %1 = load i32, i32* %0, align 4
// DEV: ret i32 %1
// HOST: %ld.managed = load i32*, i32** @ex, align 4
// HOST: %0 = load i32, i32* %ld.managed, align 4
// HOST: ret i32 %0
__device__ __host__ int load4() {
return ex;
}
// HOST-DAG: __hipRegisterManagedVar({{.*}}@x {{.*}}@x.managed {{.*}}@[[DEVNAMEX]]{{.*}}, i64 4, i32 4)
// HOST-DAG: __hipRegisterManagedVar({{.*}}@_ZL2sx {{.*}}@_ZL2sx.managed {{.*}}@[[DEVNAMESX]]
// HOST-NOT: __hipRegisterManagedVar({{.*}}@ex {{.*}}@ex.managed
// HOST-DAG: declare void @__hipRegisterManagedVar(i8**, i8*, i8*, i8*, i64, i32)
| 28481933c312e3b0098beaf018345e68c002f368.cu | // RUN: %clang_cc1 -no-opaque-pointers -triple amdgcn-amd-amdhsa -fcuda-is-device -std=c++11 \
// RUN: -emit-llvm -o - -x hip %s | FileCheck \
// RUN: -check-prefixes=COMMON,DEV,NORDC-D %s
// RUN: %clang_cc1 -no-opaque-pointers -triple amdgcn-amd-amdhsa -fcuda-is-device -std=c++11 \
// RUN: -emit-llvm -fgpu-rdc -cuid=abc -o - -x hip %s > %t.dev
// RUN: cat %t.dev | FileCheck -check-prefixes=COMMON,DEV,RDC-D %s
// RUN: %clang_cc1 -no-opaque-pointers -triple x86_64-gnu-linux -std=c++11 \
// RUN: -emit-llvm -o - -x hip %s | FileCheck \
// RUN: -check-prefixes=COMMON,HOST,NORDC %s
// RUN: %clang_cc1 -no-opaque-pointers -triple x86_64-gnu-linux -std=c++11 \
// RUN: -emit-llvm -fgpu-rdc -cuid=abc -o - -x hip %s > %t.host
// RUN: cat %t.host | FileCheck -check-prefixes=COMMON,HOST,RDC %s
// Check device and host compilation use the same postfix for static
// variable name.
// RUN: cat %t.dev %t.host | FileCheck -check-prefix=POSTFIX %s
#include "Inputs/cuda.h"
struct vec {
float x,y,z;
};
// DEV-DAG: @x.managed = addrspace(1) externally_initialized global i32 1, align 4
// DEV-DAG: @x = addrspace(1) externally_initialized global i32 addrspace(1)* null
// NORDC-DAG: @x.managed = internal global i32 1
// RDC-DAG: @x.managed = global i32 1
// NORDC-DAG: @x = internal externally_initialized global i32* null
// RDC-DAG: @x = externally_initialized global i32* null
// HOST-DAG: @[[DEVNAMEX:[0-9]+]] = {{.*}}c"x\00"
__managed__ int x = 1;
// DEV-DAG: @v.managed = addrspace(1) externally_initialized global [100 x %struct.vec] zeroinitializer, align 4
// DEV-DAG: @v = addrspace(1) externally_initialized global [100 x %struct.vec] addrspace(1)* null
__managed__ vec v[100];
// DEV-DAG: @v2.managed = addrspace(1) externally_initialized global <{ %struct.vec, [99 x %struct.vec] }> <{ %struct.vec { float 1.000000e+00, float 1.000000e+00, float 1.000000e+00 }, [99 x %struct.vec] zeroinitializer }>, align 4
// DEV-DAG: @v2 = addrspace(1) externally_initialized global <{ %struct.vec, [99 x %struct.vec] }> addrspace(1)* null
__managed__ vec v2[100] = {{1, 1, 1}};
// DEV-DAG: @ex.managed = external addrspace(1) global i32, align 4
// DEV-DAG: @ex = external addrspace(1) externally_initialized global i32 addrspace(1)*
// HOST-DAG: @ex.managed = external global i32
// HOST-DAG: @ex = external externally_initialized global i32*
extern __managed__ int ex;
// NORDC-D-DAG: @_ZL2sx.managed = addrspace(1) externally_initialized global i32 1, align 4
// NORDC-D-DAG: @_ZL2sx = addrspace(1) externally_initialized global i32 addrspace(1)* null
// RDC-D-DAG: @_ZL2sx.static.[[HASH:.*]].managed = addrspace(1) externally_initialized global i32 1, align 4
// RDC-D-DAG: @_ZL2sx.static.[[HASH]] = addrspace(1) externally_initialized global i32 addrspace(1)* null
// HOST-DAG: @_ZL2sx.managed = internal global i32 1
// HOST-DAG: @_ZL2sx = internal externally_initialized global i32* null
// NORDC-DAG: @[[DEVNAMESX:[0-9]+]] = {{.*}}c"_ZL2sx\00"
// RDC-DAG: @[[DEVNAMESX:[0-9]+]] = {{.*}}c"_ZL2sx.static.[[HASH:.*]]\00"
// POSTFIX: @_ZL2sx.static.[[HASH:.*]] = addrspace(1) externally_initialized global i32 addrspace(1)* null
// POSTFIX: @[[DEVNAMESX:[0-9]+]] = {{.*}}c"_ZL2sx.static.[[HASH]]\00"
static __managed__ int sx = 1;
// DEV-DAG: @llvm.compiler.used
// DEV-SAME-DAG: @x.managed
// DEV-SAME-DAG: @x
// DEV-SAME-DAG: @v.managed
// DEV-SAME-DAG: @v
// DEV-SAME-DAG: @_ZL2sx.managed
// DEV-SAME-DAG: @_ZL2sx
// Force ex and sx mitted in device compilation.
__global__ void foo(int *z) {
*z = x + ex + sx;
v[1].x = 2;
}
// Force ex and sx emitted in host compilatioin.
int foo2() {
return ex + sx;
}
// COMMON-LABEL: define {{.*}}@_Z4loadv()
// DEV: %ld.managed = load i32 addrspace(1)*, i32 addrspace(1)* addrspace(1)* @x, align 4
// DEV: %0 = addrspacecast i32 addrspace(1)* %ld.managed to i32*
// DEV: %1 = load i32, i32* %0, align 4
// DEV: ret i32 %1
// HOST: %ld.managed = load i32*, i32** @x, align 4
// HOST: %0 = load i32, i32* %ld.managed, align 4
// HOST: ret i32 %0
__device__ __host__ int load() {
return x;
}
// COMMON-LABEL: define {{.*}}@_Z5storev()
// DEV: %ld.managed = load i32 addrspace(1)*, i32 addrspace(1)* addrspace(1)* @x, align 4
// DEV: %0 = addrspacecast i32 addrspace(1)* %ld.managed to i32*
// DEV: store i32 2, i32* %0, align 4
// HOST: %ld.managed = load i32*, i32** @x, align 4
// HOST: store i32 2, i32* %ld.managed, align 4
__device__ __host__ void store() {
x = 2;
}
// COMMON-LABEL: define {{.*}}@_Z10addr_takenv()
// DEV: %0 = addrspacecast i32 addrspace(1)* %ld.managed to i32*
// DEV: store i32* %0, i32** %p.ascast, align 8
// DEV: %1 = load i32*, i32** %p.ascast, align 8
// DEV: store i32 3, i32* %1, align 4
// HOST: %ld.managed = load i32*, i32** @x, align 4
// HOST: store i32* %ld.managed, i32** %p, align 8
// HOST: %0 = load i32*, i32** %p, align 8
// HOST: store i32 3, i32* %0, align 4
__device__ __host__ void addr_taken() {
int *p = &x;
*p = 3;
}
// HOST-LABEL: define {{.*}}@_Z5load2v()
// HOST: %ld.managed = load [100 x %struct.vec]*, [100 x %struct.vec]** @v, align 16
// HOST: %0 = getelementptr inbounds [100 x %struct.vec], [100 x %struct.vec]* %ld.managed, i64 0, i64 1, i32 0
// HOST: %1 = load float, float* %0, align 4
// HOST: ret float %1
__device__ __host__ float load2() {
return v[1].x;
}
// HOST-LABEL: define {{.*}}@_Z5load3v()
// HOST: %ld.managed = load <{ %struct.vec, [99 x %struct.vec] }>*, <{ %struct.vec, [99 x %struct.vec] }>** @v2, align 16
// HOST: %0 = bitcast <{ %struct.vec, [99 x %struct.vec] }>* %ld.managed to [100 x %struct.vec]*
// HOST: %1 = getelementptr inbounds [100 x %struct.vec], [100 x %struct.vec]* %0, i64 0, i64 1, i32 1
// HOST: %2 = load float, float* %1, align 4
// HOST: ret float %2
float load3() {
return v2[1].y;
}
// HOST-LABEL: define {{.*}}@_Z11addr_taken2v()
// HOST: %ld.managed = load [100 x %struct.vec]*, [100 x %struct.vec]** @v, align 16
// HOST: %0 = getelementptr inbounds [100 x %struct.vec], [100 x %struct.vec]* %ld.managed, i64 0, i64 1, i32 0
// HOST: %1 = ptrtoint float* %0 to i64
// HOST: %ld.managed1 = load <{ %struct.vec, [99 x %struct.vec] }>*, <{ %struct.vec, [99 x %struct.vec] }>** @v2, align 16
// HOST: %2 = bitcast <{ %struct.vec, [99 x %struct.vec] }>* %ld.managed1 to [100 x %struct.vec]*
// HOST: %3 = getelementptr inbounds [100 x %struct.vec], [100 x %struct.vec]* %2, i64 0, i64 1, i32 1
// HOST: %4 = ptrtoint float* %3 to i64
// HOST: %5 = sub i64 %4, %1
// HOST: %sub.ptr.div = sdiv exact i64 %5, 4
// HOST: %conv = sitofp i64 %sub.ptr.div to float
// HOST: ret float %conv
float addr_taken2() {
return (float)reinterpret_cast<long>(&(v2[1].y)-&(v[1].x));
}
// COMMON-LABEL: define {{.*}}@_Z5load4v()
// DEV: %ld.managed = load i32 addrspace(1)*, i32 addrspace(1)* addrspace(1)* @ex, align 4
// DEV: %0 = addrspacecast i32 addrspace(1)* %ld.managed to i32*
// DEV: %1 = load i32, i32* %0, align 4
// DEV: ret i32 %1
// HOST: %ld.managed = load i32*, i32** @ex, align 4
// HOST: %0 = load i32, i32* %ld.managed, align 4
// HOST: ret i32 %0
__device__ __host__ int load4() {
return ex;
}
// HOST-DAG: __hipRegisterManagedVar({{.*}}@x {{.*}}@x.managed {{.*}}@[[DEVNAMEX]]{{.*}}, i64 4, i32 4)
// HOST-DAG: __hipRegisterManagedVar({{.*}}@_ZL2sx {{.*}}@_ZL2sx.managed {{.*}}@[[DEVNAMESX]]
// HOST-NOT: __hipRegisterManagedVar({{.*}}@ex {{.*}}@ex.managed
// HOST-DAG: declare void @__hipRegisterManagedVar(i8**, i8*, i8*, i8*, i64, i32)
|
d0b9e6c97e716984f52d6fd99ce72c0d7d6865df.hip | // !!! This is a file automatically generated by hipify!!!
/***************************************************************************************************
* Copyright (c) 2020, Vijay Thakkar ([email protected]).
**************************************************************************************************/
//////////////////////////////////////////////////////////////////////
// THIS BENCHMARK FILE IS GENERATED AUTOMATICALLY : DO NOT MODIFY //
//////////////////////////////////////////////////////////////////////
#include "benchmark/benchmark.h"
#include "cuasr/gemm/device/default_srgemm_configuration.h"
#include "cuasr/gemm/device/srgemm.h"
#include "cuasr/functional.h"
#include "harness.h"
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 1
// Threadblock: 8 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_minimum_maximum_ssrgemm_nt_t_8x32x8_8x32x1_2x4_4x8_1x1(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<8, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_nt_t_8x32x8_8x32x1_2x4_4x8_1x1)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 1
// Threadblock: 16 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_minimum_maximum_ssrgemm_nt_t_16x32x8_16x32x1_4x4_4x8_1x1(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_nt_t_16x32x8_16x32x1_4x4_4x8_1x1)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 8
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 1
// Threadblock: 16 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_minimum_maximum_ssrgemm_nt_t_16x64x8_16x64x1_4x8_4x8_1x1(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_nt_t_16x64x8_16x64x1_4x8_4x8_1x1)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 1
// Threadblock: 32 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_minimum_maximum_ssrgemm_nt_t_32x32x8_32x32x1_8x4_4x8_1x1(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_nt_t_32x32x8_32x32x1_8x4_4x8_1x1)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 8
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 1
// Threadblock: 32 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_minimum_maximum_ssrgemm_nt_t_32x64x8_32x64x1_8x8_4x8_1x1(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_nt_t_32x64x8_32x64x1_8x8_4x8_1x1)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 8
// Threads / Warp: 8 x 4
// Warps / Block: 1 x 1
// Threadblock: 64 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_minimum_maximum_ssrgemm_nt_t_64x32x8_64x32x1_8x8_8x4_1x1(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_nt_t_64x32x8_64x32x1_8x8_8x4_1x1)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 2
// Threadblock: 8 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_maximum_ssrgemm_nt_t_8x32x8_8x16x1_2x2_4x8_1x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<8, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_nt_t_8x32x8_8x16x1_2x2_4x8_1x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 2
// Threadblock: 8 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_minimum_maximum_ssrgemm_nt_t_8x64x8_8x32x1_2x4_4x8_1x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<8, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_nt_t_8x64x8_8x32x1_2x4_4x8_1x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 2
// Threadblock: 16 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_maximum_ssrgemm_nt_t_16x32x8_16x16x1_4x2_4x8_1x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_nt_t_16x32x8_16x16x1_4x2_4x8_1x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 2
// Threadblock: 16 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_maximum_ssrgemm_nt_t_16x64x8_16x32x1_4x4_4x8_1x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_nt_t_16x64x8_16x32x1_4x4_4x8_1x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 8
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 2
// Threadblock: 16 x 128 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_minimum_maximum_ssrgemm_nt_t_16x128x8_16x64x1_4x8_4x8_1x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 128, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_nt_t_16x128x8_16x64x1_4x8_4x8_1x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 8 x 4
// Warps / Block: 1 x 2
// Threadblock: 32 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_maximum_ssrgemm_nt_t_32x32x8_32x16x1_4x4_8x4_1x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_nt_t_32x32x8_32x16x1_4x4_8x4_1x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 2
// Threadblock: 32 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_maximum_ssrgemm_nt_t_32x64x8_32x32x1_8x4_4x8_1x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_nt_t_32x64x8_32x32x1_8x4_4x8_1x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 8
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 2
// Threadblock: 32 x 128 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_minimum_maximum_ssrgemm_nt_t_32x128x8_32x64x1_8x8_4x8_1x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_nt_t_32x128x8_32x64x1_8x8_4x8_1x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 8
// Threads / Warp: 8 x 4
// Warps / Block: 1 x 2
// Threadblock: 64 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 0)
static void BM_SM50_device_minimum_maximum_ssrgemm_nt_t_64x64x8_64x32x1_8x8_8x4_1x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_nt_t_64x64x8_64x32x1_8x8_8x4_1x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 1
// Threadblock: 32 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_maximum_ssrgemm_nt_t_32x32x8_16x32x1_4x4_4x8_2x1(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_nt_t_32x32x8_16x32x1_4x4_4x8_2x1)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 1
// Threadblock: 64 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_maximum_ssrgemm_nt_t_64x32x8_32x32x1_8x4_4x8_2x1(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_nt_t_64x32x8_32x32x1_8x4_4x8_2x1)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 8
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 1
// Threadblock: 64 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_minimum_maximum_ssrgemm_nt_t_64x64x8_32x64x1_8x8_4x8_2x1(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_nt_t_64x64x8_32x64x1_8x8_4x8_2x1)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 8
// Threads / Warp: 8 x 4
// Warps / Block: 2 x 1
// Threadblock: 128 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_minimum_maximum_ssrgemm_nt_t_128x32x8_64x32x1_8x8_8x4_2x1(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_nt_t_128x32x8_64x32x1_8x8_8x4_2x1)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 2
// Threadblock: 16 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_maximum_ssrgemm_nt_t_16x32x8_8x16x1_2x2_4x8_2x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_nt_t_16x32x8_8x16x1_2x2_4x8_2x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 2
// Threadblock: 16 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_maximum_ssrgemm_nt_t_16x64x8_8x32x1_2x4_4x8_2x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_nt_t_16x64x8_8x32x1_2x4_4x8_2x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 2
// Threadblock: 32 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_maximum_ssrgemm_nt_t_32x32x8_16x16x1_4x2_4x8_2x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_nt_t_32x32x8_16x16x1_4x2_4x8_2x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 2
// Threadblock: 32 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_maximum_ssrgemm_nt_t_32x64x8_16x32x1_4x4_4x8_2x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_nt_t_32x64x8_16x32x1_4x4_4x8_2x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 8
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 2
// Threadblock: 32 x 128 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_maximum_ssrgemm_nt_t_32x128x8_16x64x1_4x8_4x8_2x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_nt_t_32x128x8_16x64x1_4x8_4x8_2x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 8 x 4
// Warps / Block: 2 x 2
// Threadblock: 64 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_maximum_ssrgemm_nt_t_64x32x8_32x16x1_4x4_8x4_2x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_nt_t_64x32x8_32x16x1_4x4_8x4_2x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 2
// Threadblock: 64 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_maximum_ssrgemm_nt_t_64x64x8_32x32x1_8x4_4x8_2x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_nt_t_64x64x8_32x32x1_8x4_4x8_2x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 8
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 2
// Threadblock: 64 x 128 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_minimum_maximum_ssrgemm_nt_t_64x128x8_32x64x1_8x8_4x8_2x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_nt_t_64x128x8_32x64x1_8x8_4x8_2x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 4
// Threads / Warp: 8 x 4
// Warps / Block: 2 x 2
// Threadblock: 128 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_maximum_ssrgemm_nt_t_128x32x8_64x16x1_8x4_8x4_2x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<64, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_nt_t_128x32x8_64x16x1_8x4_8x4_2x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 8
// Threads / Warp: 8 x 4
// Warps / Block: 2 x 2
// Threadblock: 128 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_minimum_maximum_ssrgemm_nt_t_128x64x8_64x32x1_8x8_8x4_2x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_nt_t_128x64x8_64x32x1_8x8_8x4_2x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 4
// Threadblock: 16 x 64 x 16
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_maximum_ssrgemm_nt_t_16x64x16_8x16x1_2x2_4x8_2x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 16>;
using WarpShape = cutlass::gemm::GemmShape<8, 16, 16>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_nt_t_16x64x16_8x16x1_2x2_4x8_2x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 4
// Threadblock: 16 x 128 x 16
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_maximum_ssrgemm_nt_t_16x128x16_8x32x1_2x4_4x8_2x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 128, 16>;
using WarpShape = cutlass::gemm::GemmShape<8, 32, 16>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_nt_t_16x128x16_8x32x1_2x4_4x8_2x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 2
// Threads / Warp: 8 x 4
// Warps / Block: 2 x 4
// Threadblock: 32 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_maximum_ssrgemm_nt_t_32x32x8_16x8x1_2x2_8x4_2x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 8, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_nt_t_32x32x8_16x8x1_2x2_8x4_2x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 4
// Threadblock: 32 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_maximum_ssrgemm_nt_t_32x64x8_16x16x1_4x2_4x8_2x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_nt_t_32x64x8_16x16x1_4x2_4x8_2x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 4
// Threadblock: 32 x 128 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_maximum_ssrgemm_nt_t_32x128x8_16x32x1_4x4_4x8_2x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_nt_t_32x128x8_16x32x1_4x4_4x8_2x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 8
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 4
// Threadblock: 32 x 256 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_minimum_maximum_ssrgemm_nt_t_32x256x8_16x64x1_4x8_4x8_2x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 256, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_nt_t_32x256x8_16x64x1_4x8_4x8_2x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 8 x 4
// Warps / Block: 2 x 4
// Threadblock: 64 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_maximum_ssrgemm_nt_t_64x64x8_32x16x1_4x4_8x4_2x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_nt_t_64x64x8_32x16x1_4x4_8x4_2x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 4
// Threadblock: 64 x 128 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_maximum_ssrgemm_nt_t_64x128x8_32x32x1_8x4_4x8_2x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_nt_t_64x128x8_32x32x1_8x4_4x8_2x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 8
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 4
// Threadblock: 64 x 256 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_minimum_maximum_ssrgemm_nt_t_64x256x8_32x64x1_8x8_4x8_2x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 256, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_nt_t_64x256x8_32x64x1_8x8_4x8_2x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 8
// Threads / Warp: 8 x 4
// Warps / Block: 2 x 4
// Threadblock: 128 x 128 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 0)
static void BM_SM50_device_minimum_maximum_ssrgemm_nt_t_128x128x8_64x32x1_8x8_8x4_2x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 8>;
using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_nt_t_128x128x8_64x32x1_8x8_8x4_2x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 4 x 2
// Threadblock: 32 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_maximum_ssrgemm_nt_t_32x32x8_8x16x1_2x2_4x8_4x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_nt_t_32x32x8_8x16x1_2x2_4x8_4x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 4 x 2
// Threadblock: 64 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_maximum_ssrgemm_nt_t_64x32x8_16x16x1_4x2_4x8_4x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_nt_t_64x32x8_16x16x1_4x2_4x8_4x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 4 x 2
// Threadblock: 64 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_maximum_ssrgemm_nt_t_64x64x8_16x32x1_4x4_4x8_4x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_nt_t_64x64x8_16x32x1_4x4_4x8_4x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 8 x 4
// Warps / Block: 4 x 2
// Threadblock: 128 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_maximum_ssrgemm_nt_t_128x32x8_32x16x1_4x4_8x4_4x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_nt_t_128x32x8_32x16x1_4x4_8x4_4x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 4 x 2
// Threadblock: 128 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_maximum_ssrgemm_nt_t_128x64x8_32x32x1_8x4_4x8_4x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_nt_t_128x64x8_32x32x1_8x4_4x8_4x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 8
// Threads / Warp: 4 x 8
// Warps / Block: 4 x 2
// Threadblock: 128 x 128 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_minimum_maximum_ssrgemm_nt_t_128x128x8_32x64x1_8x8_4x8_4x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_nt_t_128x128x8_32x64x1_8x8_4x8_4x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 4
// Threads / Warp: 8 x 4
// Warps / Block: 4 x 2
// Threadblock: 256 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_minimum_maximum_ssrgemm_nt_t_256x32x8_64x16x1_8x4_8x4_4x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<256, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<64, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_nt_t_256x32x8_64x16x1_8x4_8x4_4x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 8
// Threads / Warp: 8 x 4
// Warps / Block: 4 x 2
// Threadblock: 256 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_minimum_maximum_ssrgemm_nt_t_256x64x8_64x32x1_8x8_8x4_4x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<256, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_nt_t_256x64x8_64x32x1_8x8_8x4_4x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 4 x 4
// Threadblock: 32 x 64 x 16
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_maximum_ssrgemm_nt_t_32x64x16_8x16x1_2x2_4x8_4x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 16>;
using WarpShape = cutlass::gemm::GemmShape<8, 16, 16>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_nt_t_32x64x16_8x16x1_2x2_4x8_4x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 4 x 4
// Threadblock: 32 x 128 x 16
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_maximum_ssrgemm_nt_t_32x128x16_8x32x1_2x4_4x8_4x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 16>;
using WarpShape = cutlass::gemm::GemmShape<8, 32, 16>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_nt_t_32x128x16_8x32x1_2x4_4x8_4x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 2
// Threads / Warp: 8 x 4
// Warps / Block: 4 x 4
// Threadblock: 64 x 32 x 16
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_maximum_ssrgemm_nt_t_64x32x16_16x8x1_2x2_8x4_4x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 16>;
using WarpShape = cutlass::gemm::GemmShape<16, 8, 16>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_nt_t_64x32x16_16x8x1_2x2_8x4_4x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 4 x 4
// Threadblock: 64 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_maximum_ssrgemm_nt_t_64x64x8_16x16x1_4x2_4x8_4x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_nt_t_64x64x8_16x16x1_4x2_4x8_4x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 4 x 4
// Threadblock: 64 x 128 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_maximum_ssrgemm_nt_t_64x128x8_16x32x1_4x4_4x8_4x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_nt_t_64x128x8_16x32x1_4x4_4x8_4x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 8
// Threads / Warp: 4 x 8
// Warps / Block: 4 x 4
// Threadblock: 64 x 256 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_maximum_ssrgemm_nt_t_64x256x8_16x64x1_4x8_4x8_4x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 256, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_nt_t_64x256x8_16x64x1_4x8_4x8_4x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 2
// Threads / Warp: 8 x 4
// Warps / Block: 4 x 4
// Threadblock: 128 x 32 x 16
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_maximum_ssrgemm_nt_t_128x32x16_32x8x1_4x2_8x4_4x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 16>;
using WarpShape = cutlass::gemm::GemmShape<32, 8, 16>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_nt_t_128x32x16_32x8x1_4x2_8x4_4x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 8 x 4
// Warps / Block: 4 x 4
// Threadblock: 128 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_maximum_ssrgemm_nt_t_128x64x8_32x16x1_4x4_8x4_4x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_nt_t_128x64x8_32x16x1_4x4_8x4_4x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 4 x 4
// Threadblock: 128 x 128 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_maximum_ssrgemm_nt_t_128x128x8_32x32x1_8x4_4x8_4x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_nt_t_128x128x8_32x32x1_8x4_4x8_4x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 4
// Threads / Warp: 8 x 4
// Warps / Block: 4 x 4
// Threadblock: 256 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_maximum_ssrgemm_nt_t_256x64x8_64x16x1_8x4_8x4_4x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<256, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<64, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_nt_t_256x64x8_64x16x1_8x4_8x4_4x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
| d0b9e6c97e716984f52d6fd99ce72c0d7d6865df.cu | /***************************************************************************************************
* Copyright (c) 2020, Vijay Thakkar ([email protected]).
**************************************************************************************************/
//////////////////////////////////////////////////////////////////////
// THIS BENCHMARK FILE IS GENERATED AUTOMATICALLY : DO NOT MODIFY //
//////////////////////////////////////////////////////////////////////
#include "benchmark/benchmark.h"
#include "cuasr/gemm/device/default_srgemm_configuration.h"
#include "cuasr/gemm/device/srgemm.h"
#include "cuasr/functional.h"
#include "harness.h"
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 1
// Threadblock: 8 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_minimum_maximum_ssrgemm_nt_t_8x32x8_8x32x1_2x4_4x8_1x1(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<8, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_nt_t_8x32x8_8x32x1_2x4_4x8_1x1)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 1
// Threadblock: 16 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_minimum_maximum_ssrgemm_nt_t_16x32x8_16x32x1_4x4_4x8_1x1(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_nt_t_16x32x8_16x32x1_4x4_4x8_1x1)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 8
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 1
// Threadblock: 16 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_minimum_maximum_ssrgemm_nt_t_16x64x8_16x64x1_4x8_4x8_1x1(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_nt_t_16x64x8_16x64x1_4x8_4x8_1x1)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 1
// Threadblock: 32 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_minimum_maximum_ssrgemm_nt_t_32x32x8_32x32x1_8x4_4x8_1x1(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_nt_t_32x32x8_32x32x1_8x4_4x8_1x1)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 8
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 1
// Threadblock: 32 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_minimum_maximum_ssrgemm_nt_t_32x64x8_32x64x1_8x8_4x8_1x1(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_nt_t_32x64x8_32x64x1_8x8_4x8_1x1)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 8
// Threads / Warp: 8 x 4
// Warps / Block: 1 x 1
// Threadblock: 64 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_minimum_maximum_ssrgemm_nt_t_64x32x8_64x32x1_8x8_8x4_1x1(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_nt_t_64x32x8_64x32x1_8x8_8x4_1x1)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 2
// Threadblock: 8 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_maximum_ssrgemm_nt_t_8x32x8_8x16x1_2x2_4x8_1x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<8, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_nt_t_8x32x8_8x16x1_2x2_4x8_1x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 2
// Threadblock: 8 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_minimum_maximum_ssrgemm_nt_t_8x64x8_8x32x1_2x4_4x8_1x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<8, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_nt_t_8x64x8_8x32x1_2x4_4x8_1x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 2
// Threadblock: 16 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_maximum_ssrgemm_nt_t_16x32x8_16x16x1_4x2_4x8_1x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_nt_t_16x32x8_16x16x1_4x2_4x8_1x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 2
// Threadblock: 16 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_maximum_ssrgemm_nt_t_16x64x8_16x32x1_4x4_4x8_1x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_nt_t_16x64x8_16x32x1_4x4_4x8_1x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 8
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 2
// Threadblock: 16 x 128 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_minimum_maximum_ssrgemm_nt_t_16x128x8_16x64x1_4x8_4x8_1x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 128, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_nt_t_16x128x8_16x64x1_4x8_4x8_1x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 8 x 4
// Warps / Block: 1 x 2
// Threadblock: 32 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_maximum_ssrgemm_nt_t_32x32x8_32x16x1_4x4_8x4_1x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_nt_t_32x32x8_32x16x1_4x4_8x4_1x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 2
// Threadblock: 32 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_maximum_ssrgemm_nt_t_32x64x8_32x32x1_8x4_4x8_1x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_nt_t_32x64x8_32x32x1_8x4_4x8_1x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 8
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 2
// Threadblock: 32 x 128 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_minimum_maximum_ssrgemm_nt_t_32x128x8_32x64x1_8x8_4x8_1x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_nt_t_32x128x8_32x64x1_8x8_4x8_1x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 8
// Threads / Warp: 8 x 4
// Warps / Block: 1 x 2
// Threadblock: 64 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 0)
static void BM_SM50_device_minimum_maximum_ssrgemm_nt_t_64x64x8_64x32x1_8x8_8x4_1x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_nt_t_64x64x8_64x32x1_8x8_8x4_1x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 1
// Threadblock: 32 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_maximum_ssrgemm_nt_t_32x32x8_16x32x1_4x4_4x8_2x1(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_nt_t_32x32x8_16x32x1_4x4_4x8_2x1)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 1
// Threadblock: 64 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_maximum_ssrgemm_nt_t_64x32x8_32x32x1_8x4_4x8_2x1(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_nt_t_64x32x8_32x32x1_8x4_4x8_2x1)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 8
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 1
// Threadblock: 64 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_minimum_maximum_ssrgemm_nt_t_64x64x8_32x64x1_8x8_4x8_2x1(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_nt_t_64x64x8_32x64x1_8x8_4x8_2x1)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 8
// Threads / Warp: 8 x 4
// Warps / Block: 2 x 1
// Threadblock: 128 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_minimum_maximum_ssrgemm_nt_t_128x32x8_64x32x1_8x8_8x4_2x1(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_nt_t_128x32x8_64x32x1_8x8_8x4_2x1)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 2
// Threadblock: 16 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_maximum_ssrgemm_nt_t_16x32x8_8x16x1_2x2_4x8_2x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_nt_t_16x32x8_8x16x1_2x2_4x8_2x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 2
// Threadblock: 16 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_maximum_ssrgemm_nt_t_16x64x8_8x32x1_2x4_4x8_2x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_nt_t_16x64x8_8x32x1_2x4_4x8_2x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 2
// Threadblock: 32 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_maximum_ssrgemm_nt_t_32x32x8_16x16x1_4x2_4x8_2x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_nt_t_32x32x8_16x16x1_4x2_4x8_2x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 2
// Threadblock: 32 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_maximum_ssrgemm_nt_t_32x64x8_16x32x1_4x4_4x8_2x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_nt_t_32x64x8_16x32x1_4x4_4x8_2x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 8
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 2
// Threadblock: 32 x 128 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_maximum_ssrgemm_nt_t_32x128x8_16x64x1_4x8_4x8_2x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_nt_t_32x128x8_16x64x1_4x8_4x8_2x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 8 x 4
// Warps / Block: 2 x 2
// Threadblock: 64 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_maximum_ssrgemm_nt_t_64x32x8_32x16x1_4x4_8x4_2x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_nt_t_64x32x8_32x16x1_4x4_8x4_2x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 2
// Threadblock: 64 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_maximum_ssrgemm_nt_t_64x64x8_32x32x1_8x4_4x8_2x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_nt_t_64x64x8_32x32x1_8x4_4x8_2x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 8
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 2
// Threadblock: 64 x 128 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_minimum_maximum_ssrgemm_nt_t_64x128x8_32x64x1_8x8_4x8_2x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_nt_t_64x128x8_32x64x1_8x8_4x8_2x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 4
// Threads / Warp: 8 x 4
// Warps / Block: 2 x 2
// Threadblock: 128 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_maximum_ssrgemm_nt_t_128x32x8_64x16x1_8x4_8x4_2x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<64, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_nt_t_128x32x8_64x16x1_8x4_8x4_2x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 8
// Threads / Warp: 8 x 4
// Warps / Block: 2 x 2
// Threadblock: 128 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_minimum_maximum_ssrgemm_nt_t_128x64x8_64x32x1_8x8_8x4_2x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_nt_t_128x64x8_64x32x1_8x8_8x4_2x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 4
// Threadblock: 16 x 64 x 16
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_maximum_ssrgemm_nt_t_16x64x16_8x16x1_2x2_4x8_2x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 16>;
using WarpShape = cutlass::gemm::GemmShape<8, 16, 16>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_nt_t_16x64x16_8x16x1_2x2_4x8_2x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 4
// Threadblock: 16 x 128 x 16
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_maximum_ssrgemm_nt_t_16x128x16_8x32x1_2x4_4x8_2x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 128, 16>;
using WarpShape = cutlass::gemm::GemmShape<8, 32, 16>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_nt_t_16x128x16_8x32x1_2x4_4x8_2x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 2
// Threads / Warp: 8 x 4
// Warps / Block: 2 x 4
// Threadblock: 32 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_maximum_ssrgemm_nt_t_32x32x8_16x8x1_2x2_8x4_2x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 8, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_nt_t_32x32x8_16x8x1_2x2_8x4_2x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 4
// Threadblock: 32 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_maximum_ssrgemm_nt_t_32x64x8_16x16x1_4x2_4x8_2x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_nt_t_32x64x8_16x16x1_4x2_4x8_2x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 4
// Threadblock: 32 x 128 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_maximum_ssrgemm_nt_t_32x128x8_16x32x1_4x4_4x8_2x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_nt_t_32x128x8_16x32x1_4x4_4x8_2x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 8
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 4
// Threadblock: 32 x 256 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_minimum_maximum_ssrgemm_nt_t_32x256x8_16x64x1_4x8_4x8_2x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 256, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_nt_t_32x256x8_16x64x1_4x8_4x8_2x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 8 x 4
// Warps / Block: 2 x 4
// Threadblock: 64 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_maximum_ssrgemm_nt_t_64x64x8_32x16x1_4x4_8x4_2x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_nt_t_64x64x8_32x16x1_4x4_8x4_2x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 4
// Threadblock: 64 x 128 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_maximum_ssrgemm_nt_t_64x128x8_32x32x1_8x4_4x8_2x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_nt_t_64x128x8_32x32x1_8x4_4x8_2x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 8
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 4
// Threadblock: 64 x 256 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_minimum_maximum_ssrgemm_nt_t_64x256x8_32x64x1_8x8_4x8_2x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 256, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_nt_t_64x256x8_32x64x1_8x8_4x8_2x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 8
// Threads / Warp: 8 x 4
// Warps / Block: 2 x 4
// Threadblock: 128 x 128 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 0)
static void BM_SM50_device_minimum_maximum_ssrgemm_nt_t_128x128x8_64x32x1_8x8_8x4_2x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 8>;
using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_nt_t_128x128x8_64x32x1_8x8_8x4_2x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 4 x 2
// Threadblock: 32 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_maximum_ssrgemm_nt_t_32x32x8_8x16x1_2x2_4x8_4x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_nt_t_32x32x8_8x16x1_2x2_4x8_4x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 4 x 2
// Threadblock: 64 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_maximum_ssrgemm_nt_t_64x32x8_16x16x1_4x2_4x8_4x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_nt_t_64x32x8_16x16x1_4x2_4x8_4x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 4 x 2
// Threadblock: 64 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_maximum_ssrgemm_nt_t_64x64x8_16x32x1_4x4_4x8_4x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_nt_t_64x64x8_16x32x1_4x4_4x8_4x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 8 x 4
// Warps / Block: 4 x 2
// Threadblock: 128 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_maximum_ssrgemm_nt_t_128x32x8_32x16x1_4x4_8x4_4x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_nt_t_128x32x8_32x16x1_4x4_8x4_4x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 4 x 2
// Threadblock: 128 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_maximum_ssrgemm_nt_t_128x64x8_32x32x1_8x4_4x8_4x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_nt_t_128x64x8_32x32x1_8x4_4x8_4x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 8
// Threads / Warp: 4 x 8
// Warps / Block: 4 x 2
// Threadblock: 128 x 128 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_minimum_maximum_ssrgemm_nt_t_128x128x8_32x64x1_8x8_4x8_4x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_nt_t_128x128x8_32x64x1_8x8_4x8_4x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 4
// Threads / Warp: 8 x 4
// Warps / Block: 4 x 2
// Threadblock: 256 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_minimum_maximum_ssrgemm_nt_t_256x32x8_64x16x1_8x4_8x4_4x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<256, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<64, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_nt_t_256x32x8_64x16x1_8x4_8x4_4x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 8
// Threads / Warp: 8 x 4
// Warps / Block: 4 x 2
// Threadblock: 256 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_minimum_maximum_ssrgemm_nt_t_256x64x8_64x32x1_8x8_8x4_4x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<256, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_nt_t_256x64x8_64x32x1_8x8_8x4_4x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 4 x 4
// Threadblock: 32 x 64 x 16
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_maximum_ssrgemm_nt_t_32x64x16_8x16x1_2x2_4x8_4x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 16>;
using WarpShape = cutlass::gemm::GemmShape<8, 16, 16>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_nt_t_32x64x16_8x16x1_2x2_4x8_4x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 4 x 4
// Threadblock: 32 x 128 x 16
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_maximum_ssrgemm_nt_t_32x128x16_8x32x1_2x4_4x8_4x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 16>;
using WarpShape = cutlass::gemm::GemmShape<8, 32, 16>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_nt_t_32x128x16_8x32x1_2x4_4x8_4x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 2
// Threads / Warp: 8 x 4
// Warps / Block: 4 x 4
// Threadblock: 64 x 32 x 16
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_maximum_ssrgemm_nt_t_64x32x16_16x8x1_2x2_8x4_4x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 16>;
using WarpShape = cutlass::gemm::GemmShape<16, 8, 16>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_nt_t_64x32x16_16x8x1_2x2_8x4_4x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 4 x 4
// Threadblock: 64 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_maximum_ssrgemm_nt_t_64x64x8_16x16x1_4x2_4x8_4x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_nt_t_64x64x8_16x16x1_4x2_4x8_4x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 4 x 4
// Threadblock: 64 x 128 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_maximum_ssrgemm_nt_t_64x128x8_16x32x1_4x4_4x8_4x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_nt_t_64x128x8_16x32x1_4x4_4x8_4x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 8
// Threads / Warp: 4 x 8
// Warps / Block: 4 x 4
// Threadblock: 64 x 256 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_maximum_ssrgemm_nt_t_64x256x8_16x64x1_4x8_4x8_4x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 256, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_nt_t_64x256x8_16x64x1_4x8_4x8_4x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 2
// Threads / Warp: 8 x 4
// Warps / Block: 4 x 4
// Threadblock: 128 x 32 x 16
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_maximum_ssrgemm_nt_t_128x32x16_32x8x1_4x2_8x4_4x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 16>;
using WarpShape = cutlass::gemm::GemmShape<32, 8, 16>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_nt_t_128x32x16_32x8x1_4x2_8x4_4x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 8 x 4
// Warps / Block: 4 x 4
// Threadblock: 128 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_maximum_ssrgemm_nt_t_128x64x8_32x16x1_4x4_8x4_4x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_nt_t_128x64x8_32x16x1_4x4_8x4_4x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 4 x 4
// Threadblock: 128 x 128 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_maximum_ssrgemm_nt_t_128x128x8_32x32x1_8x4_4x8_4x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_nt_t_128x128x8_32x32x1_8x4_4x8_4x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 4
// Threads / Warp: 8 x 4
// Warps / Block: 4 x 4
// Threadblock: 256 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_maximum_ssrgemm_nt_t_256x64x8_64x16x1_8x4_8x4_4x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<256, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<64, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_nt_t_256x64x8_64x16x1_8x4_8x4_4x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
|
79ef6a9d638a2a2a118578e236e80b4e2e2ab041.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <hiprand/hiprand.h>
#include <hiprand/hiprand_kernel.h>
#include <math.h>
#define N 6
#define UPPER 1
#define LOWER N*4
#define THREADS_PER_BLOCK 1
#define BLOCKS 1 //(N + THREADS_PER_BLOCK - 1)/THREADS_PER_BLOCK
void rand_init_array(int *a, int n, int upper, int lower);
void display_array(int *a, int n);
__global__ void setup_kernel(hiprandState_t *state){
int idx = threadIdx.x + blockDim.x * blockIdx.x;
hiprand_init(clock64(), idx, 0, &state[idx]);
}
__device__ float getnextrand(hiprandState_t *state){
return (float)(hiprand_uniform(state));
}
__device__ int getnextrandscaled(hiprandState_t *state, int scale){
return (int) scale * getnextrand(state);
}
/*
* Function: swap_random
* --------------------
* Randomizes elements of array
*
* a: the array (integer)
* i: the index of element that will be swapped
* n: number of elements in the array
*
*/
__device__ void swap_random(int *a, hiprandState_t *state){
int idx = threadIdx.x + blockDim.x * blockIdx.x;
int r = getnextrandscaled(state, N);
a[r] = atomicExch(&(a[idx]), a[r]);
printf("%d ", r);
}
/*
* Function: is_sorted
* --------------------
* Checks if array is sorted
*
* a: the array (integer)
* n: number of elements in the array
*
*/
__device__ int is_sorted(int *a, int n){
while ( --n >= 1 )
if ( a[n] < a[n-1] || a[n] == a[n-1]) return 0;
return 1;
}
/*
* Function: bogo_sort
* --------------------
* Performs bogo sort (random suffle until the array is sorted)
*
* a: the array (integer)
* n: number of elements in the array
*
*/
__global__ void bogo_sort(int *a, int n, int *found, hiprandState_t *state){
int idx = threadIdx.x + blockDim.x * blockIdx.x;
if (idx < n){
while(!found[0]){
swap_random(a, state);
found[0] = is_sorted(a, n);
}
}
}
/*
* Main
*/
int main(int argc, char *argv[]){
float total_time, comp_time;
hipEvent_t total_start, total_stop, comp_start, comp_stop;
hipEventCreate(&total_start);
hipEventCreate(&total_stop);
hipEventCreate(&comp_start);
hipEventCreate(&comp_stop);
/* Some initializations & allocations to generate random number within kernel */
hiprandState_t *d_state;
hipMalloc(&d_state, sizeof(hiprandState_t));
hipLaunchKernelGGL(( setup_kernel), dim3(BLOCKS), dim3(THREADS_PER_BLOCK) , 0, 0, d_state);
/* -------------------------------------------------------------------------- */
/*
* Memory allocation on host
*/
int *array = (int *)malloc(N*sizeof(int));
int *found = {0};
/*
* Init array
*/
rand_init_array(array, N, UPPER, LOWER);
display_array(array, N);
/*
* Memory allocation on device
*/
int *array_dev, *found_dev;
hipMalloc((void **)&array_dev, N*sizeof(int));
hipMalloc((void **)&found_dev, 1*sizeof(int));
hipEventRecord(total_start);
/*
* Copy array from host memory to device memory
*/
hipMemcpy(array_dev, array, N*sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(found_dev, found, 1*sizeof(int), hipMemcpyHostToDevice);
hipEventRecord(comp_start);
/*
* Kernel call
*/
hipLaunchKernelGGL(( bogo_sort), dim3(BLOCKS), dim3(THREADS_PER_BLOCK) , 0, 0, array_dev, N, found_dev, d_state);
hipEventRecord(comp_stop);
hipEventSynchronize(comp_stop);
hipEventElapsedTime(&comp_time, comp_start, comp_stop);
/*
* Copy c from host device memory to host memory
*/
hipMemcpy(array, array_dev, N*sizeof(int), hipMemcpyDeviceToHost);
hipEventRecord(total_stop);
hipEventSynchronize(total_stop);
hipEventElapsedTime(&total_time, total_start, total_stop);
/*
* Free memory on device
*/
hipFree(array_dev);
hipEventDestroy(comp_start);
hipEventDestroy(comp_stop);
hipEventDestroy(total_start);
hipEventDestroy(total_stop);
/*
* GPU timing
*/
printf("N: %d, blocks: %d, total_threads: %d\n", N, BLOCKS, THREADS_PER_BLOCK*BLOCKS);
printf("Total time (ms): %f\n", total_time);
printf("Kernel time (ms): %f\n", comp_time);
printf("Data transfer time (ms): %f\n", total_time-comp_time);
display_array(array, N);
return 0;
}
/*
* Function: rand_init_array
* --------------------
* Fills an integer array with random numbers
*
* a: the array that will be filled with numbers
* n: number of elements in the array
* upper: highest value of random number
* lower: lowest value of random number
*
*/
void rand_init_array(int *a, int n, int upper, int lower){
int i;
for (i=0; i<n; ++i)
a[i] = (rand() % (upper - lower + 1)) + lower;
}
/*
* Function: display_array
* --------------------
* Prints an integer array to user
*
* a: the array that will be printed
* n: number of elements in the array
*
*/
void display_array(int *a, int n){
int i;
for (i=0; i < n; ++i) printf("%d ", a[i]);
printf("\n");
} | 79ef6a9d638a2a2a118578e236e80b4e2e2ab041.cu | #include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <curand.h>
#include <curand_kernel.h>
#include <math.h>
#define N 6
#define UPPER 1
#define LOWER N*4
#define THREADS_PER_BLOCK 1
#define BLOCKS 1 //(N + THREADS_PER_BLOCK - 1)/THREADS_PER_BLOCK
void rand_init_array(int *a, int n, int upper, int lower);
void display_array(int *a, int n);
__global__ void setup_kernel(curandState *state){
int idx = threadIdx.x + blockDim.x * blockIdx.x;
curand_init(clock64(), idx, 0, &state[idx]);
}
__device__ float getnextrand(curandState *state){
return (float)(curand_uniform(state));
}
__device__ int getnextrandscaled(curandState *state, int scale){
return (int) scale * getnextrand(state);
}
/*
* Function: swap_random
* --------------------
* Randomizes elements of array
*
* a: the array (integer)
* i: the index of element that will be swapped
* n: number of elements in the array
*
*/
__device__ void swap_random(int *a, curandState *state){
int idx = threadIdx.x + blockDim.x * blockIdx.x;
int r = getnextrandscaled(state, N);
a[r] = atomicExch(&(a[idx]), a[r]);
printf("%d ", r);
}
/*
* Function: is_sorted
* --------------------
* Checks if array is sorted
*
* a: the array (integer)
* n: number of elements in the array
*
*/
__device__ int is_sorted(int *a, int n){
while ( --n >= 1 )
if ( a[n] < a[n-1] || a[n] == a[n-1]) return 0;
return 1;
}
/*
* Function: bogo_sort
* --------------------
* Performs bogo sort (random suffle until the array is sorted)
*
* a: the array (integer)
* n: number of elements in the array
*
*/
__global__ void bogo_sort(int *a, int n, int *found, curandState *state){
int idx = threadIdx.x + blockDim.x * blockIdx.x;
if (idx < n){
while(!found[0]){
swap_random(a, state);
found[0] = is_sorted(a, n);
}
}
}
/*
* Main
*/
int main(int argc, char *argv[]){
float total_time, comp_time;
cudaEvent_t total_start, total_stop, comp_start, comp_stop;
cudaEventCreate(&total_start);
cudaEventCreate(&total_stop);
cudaEventCreate(&comp_start);
cudaEventCreate(&comp_stop);
/* Some initializations & allocations to generate random number within kernel */
curandState *d_state;
cudaMalloc(&d_state, sizeof(curandState));
setup_kernel<<< BLOCKS, THREADS_PER_BLOCK >>>(d_state);
/* -------------------------------------------------------------------------- */
/*
* Memory allocation on host
*/
int *array = (int *)malloc(N*sizeof(int));
int *found = {0};
/*
* Init array
*/
rand_init_array(array, N, UPPER, LOWER);
display_array(array, N);
/*
* Memory allocation on device
*/
int *array_dev, *found_dev;
cudaMalloc((void **)&array_dev, N*sizeof(int));
cudaMalloc((void **)&found_dev, 1*sizeof(int));
cudaEventRecord(total_start);
/*
* Copy array from host memory to device memory
*/
cudaMemcpy(array_dev, array, N*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(found_dev, found, 1*sizeof(int), cudaMemcpyHostToDevice);
cudaEventRecord(comp_start);
/*
* Kernel call
*/
bogo_sort<<< BLOCKS, THREADS_PER_BLOCK >>>(array_dev, N, found_dev, d_state);
cudaEventRecord(comp_stop);
cudaEventSynchronize(comp_stop);
cudaEventElapsedTime(&comp_time, comp_start, comp_stop);
/*
* Copy c from host device memory to host memory
*/
cudaMemcpy(array, array_dev, N*sizeof(int), cudaMemcpyDeviceToHost);
cudaEventRecord(total_stop);
cudaEventSynchronize(total_stop);
cudaEventElapsedTime(&total_time, total_start, total_stop);
/*
* Free memory on device
*/
cudaFree(array_dev);
cudaEventDestroy(comp_start);
cudaEventDestroy(comp_stop);
cudaEventDestroy(total_start);
cudaEventDestroy(total_stop);
/*
* GPU timing
*/
printf("N: %d, blocks: %d, total_threads: %d\n", N, BLOCKS, THREADS_PER_BLOCK*BLOCKS);
printf("Total time (ms): %f\n", total_time);
printf("Kernel time (ms): %f\n", comp_time);
printf("Data transfer time (ms): %f\n", total_time-comp_time);
display_array(array, N);
return 0;
}
/*
* Function: rand_init_array
* --------------------
* Fills an integer array with random numbers
*
* a: the array that will be filled with numbers
* n: number of elements in the array
* upper: highest value of random number
* lower: lowest value of random number
*
*/
void rand_init_array(int *a, int n, int upper, int lower){
int i;
for (i=0; i<n; ++i)
a[i] = (rand() % (upper - lower + 1)) + lower;
}
/*
* Function: display_array
* --------------------
* Prints an integer array to user
*
* a: the array that will be printed
* n: number of elements in the array
*
*/
void display_array(int *a, int n){
int i;
for (i=0; i < n; ++i) printf("%d ", a[i]);
printf("\n");
} |
088e52fae4df83d62fd31c116a4726eabbab9ecf.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Code for A Thread Block Program
#include <stdio.h>
#define NUM_BLOCKS 16
#define BLOCK_WIDTH 1
__global__ void hello()
{
printf("Hello world! I'm a thread in block %d\n", blockIdx.x);
}
int main(int argc,char **argv)
{
// launch the kernel
hipLaunchKernelGGL(( hello), dim3(NUM_BLOCKS), dim3(BLOCK_WIDTH), 0, 0, );
// force the printf()s to flush
hipDeviceSynchronize();
printf("That's all!\n");
return 0;
} | 088e52fae4df83d62fd31c116a4726eabbab9ecf.cu | // Code for A Thread Block Program
#include <stdio.h>
#define NUM_BLOCKS 16
#define BLOCK_WIDTH 1
__global__ void hello()
{
printf("Hello world! I'm a thread in block %d\n", blockIdx.x);
}
int main(int argc,char **argv)
{
// launch the kernel
hello<<<NUM_BLOCKS, BLOCK_WIDTH>>>();
// force the printf()s to flush
cudaDeviceSynchronize();
printf("That's all!\n");
return 0;
} |
b1b7e78db0f351dc1b1337db36db0ce67e8ae9ed.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2018-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <common/cudart_utils.h>
#include <gtest/gtest.h>
#include <linalg/eltwise.cuh>
#include <random/rng.cuh>
#include "test_utils.h"
using namespace MLCommon;
namespace raft {
namespace linalg {
//// Testing unary ops
template <typename Type>
__global__ void naiveScaleKernel(Type *out, const Type *in, Type scalar,
int len) {
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < len) {
out[idx] = scalar * in[idx];
}
}
template <typename Type>
void naiveScale(Type *out, const Type *in, Type scalar, int len,
hipStream_t stream) {
static const int TPB = 64;
int nblks = MLCommon::ceildiv(len, TPB);
hipLaunchKernelGGL(( naiveScaleKernel<Type>), dim3(nblks), dim3(TPB), 0, stream, out, in, scalar, len);
CUDA_CHECK(hipPeekAtLastError());
}
template <typename T>
struct ScalarMultiplyInputs {
T tolerance;
int len;
T scalar;
unsigned long long int seed;
};
template <typename T>
::std::ostream &operator<<(::std::ostream &os,
const ScalarMultiplyInputs<T> &dims) {
return os;
}
template <typename T>
class ScalarMultiplyTest
: public ::testing::TestWithParam<ScalarMultiplyInputs<T>> {
protected:
void SetUp() override {
params = ::testing::TestWithParam<ScalarMultiplyInputs<T>>::GetParam();
raft::random::Rng r(params.seed);
int len = params.len;
T scalar = params.scalar;
hipStream_t stream;
CUDA_CHECK(hipStreamCreate(&stream));
allocate(in, len);
allocate(out_ref, len);
allocate(out, len);
r.uniform(in, len, T(-1.0), T(1.0), stream);
naiveScale(out_ref, in, scalar, len, stream);
scalarMultiply(out, in, scalar, len, stream);
CUDA_CHECK(hipStreamDestroy(stream));
}
void TearDown() override {
CUDA_CHECK(hipFree(in));
CUDA_CHECK(hipFree(out_ref));
CUDA_CHECK(hipFree(out));
}
protected:
ScalarMultiplyInputs<T> params;
T *in, *out_ref, *out;
};
const std::vector<ScalarMultiplyInputs<float>> inputsf1 = {
{0.000001f, 1024 * 1024, 2.f, 1234ULL}};
const std::vector<ScalarMultiplyInputs<double>> inputsd1 = {
{0.00000001, 1024 * 1024, 2.0, 1234ULL}};
typedef ScalarMultiplyTest<float> ScalarMultiplyTestF;
TEST_P(ScalarMultiplyTestF, Result) {
ASSERT_TRUE(devArrMatch(out_ref, out, params.len,
CompareApprox<float>(params.tolerance)));
}
typedef ScalarMultiplyTest<double> ScalarMultiplyTestD;
TEST_P(ScalarMultiplyTestD, Result) {
ASSERT_TRUE(devArrMatch(out_ref, out, params.len,
CompareApprox<double>(params.tolerance)));
}
INSTANTIATE_TEST_CASE_P(ScalarMultiplyTests, ScalarMultiplyTestF,
::testing::ValuesIn(inputsf1));
INSTANTIATE_TEST_CASE_P(ScalarMultiplyTests, ScalarMultiplyTestD,
::testing::ValuesIn(inputsd1));
//// Testing binary ops
template <typename Type>
__global__ void naiveAddKernel(Type *out, const Type *in1, const Type *in2,
int len) {
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < len) {
out[idx] = in1[idx] + in2[idx];
}
}
template <typename Type>
void naiveAdd(Type *out, const Type *in1, const Type *in2, int len,
hipStream_t stream) {
static const int TPB = 64;
int nblks = MLCommon::ceildiv(len, TPB);
hipLaunchKernelGGL(( naiveAddKernel<Type>), dim3(nblks), dim3(TPB), 0, stream, out, in1, in2, len);
CUDA_CHECK(hipPeekAtLastError());
}
template <typename T>
struct EltwiseAddInputs {
T tolerance;
int len;
unsigned long long int seed;
};
template <typename T>
::std::ostream &operator<<(::std::ostream &os,
const EltwiseAddInputs<T> &dims) {
return os;
}
template <typename T>
class EltwiseAddTest : public ::testing::TestWithParam<EltwiseAddInputs<T>> {
protected:
void SetUp() override {
params = ::testing::TestWithParam<EltwiseAddInputs<T>>::GetParam();
raft::random::Rng r(params.seed);
hipStream_t stream;
CUDA_CHECK(hipStreamCreate(&stream));
int len = params.len;
allocate(in1, len);
allocate(in2, len);
allocate(out_ref, len);
allocate(out, len);
r.uniform(in1, len, T(-1.0), T(1.0), stream);
r.uniform(in2, len, T(-1.0), T(1.0), stream);
naiveAdd(out_ref, in1, in2, len, stream);
eltwiseAdd(out, in1, in2, len, stream);
CUDA_CHECK(hipStreamDestroy(stream));
}
void TearDown() override {
CUDA_CHECK(hipFree(in1));
CUDA_CHECK(hipFree(in2));
CUDA_CHECK(hipFree(out_ref));
CUDA_CHECK(hipFree(out));
}
protected:
EltwiseAddInputs<T> params;
T *in1, *in2, *out_ref, *out;
};
const std::vector<EltwiseAddInputs<float>> inputsf2 = {
{0.000001f, 1024 * 1024, 1234ULL}};
const std::vector<EltwiseAddInputs<double>> inputsd2 = {
{0.00000001, 1024 * 1024, 1234ULL}};
typedef EltwiseAddTest<float> EltwiseAddTestF;
TEST_P(EltwiseAddTestF, Result) {
ASSERT_TRUE(devArrMatch(out_ref, out, params.len,
CompareApprox<float>(params.tolerance)));
}
typedef EltwiseAddTest<double> EltwiseAddTestD;
TEST_P(EltwiseAddTestD, Result) {
ASSERT_TRUE(devArrMatch(out_ref, out, params.len,
CompareApprox<double>(params.tolerance)));
}
INSTANTIATE_TEST_CASE_P(EltwiseAddTests, EltwiseAddTestF,
::testing::ValuesIn(inputsf2));
INSTANTIATE_TEST_CASE_P(EltwiseAddTests, EltwiseAddTestD,
::testing::ValuesIn(inputsd2));
} // end namespace linalg
} // end namespace raft
| b1b7e78db0f351dc1b1337db36db0ce67e8ae9ed.cu | /*
* Copyright (c) 2018-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <common/cudart_utils.h>
#include <gtest/gtest.h>
#include <linalg/eltwise.cuh>
#include <random/rng.cuh>
#include "test_utils.h"
using namespace MLCommon;
namespace raft {
namespace linalg {
//// Testing unary ops
template <typename Type>
__global__ void naiveScaleKernel(Type *out, const Type *in, Type scalar,
int len) {
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < len) {
out[idx] = scalar * in[idx];
}
}
template <typename Type>
void naiveScale(Type *out, const Type *in, Type scalar, int len,
cudaStream_t stream) {
static const int TPB = 64;
int nblks = MLCommon::ceildiv(len, TPB);
naiveScaleKernel<Type><<<nblks, TPB, 0, stream>>>(out, in, scalar, len);
CUDA_CHECK(cudaPeekAtLastError());
}
template <typename T>
struct ScalarMultiplyInputs {
T tolerance;
int len;
T scalar;
unsigned long long int seed;
};
template <typename T>
::std::ostream &operator<<(::std::ostream &os,
const ScalarMultiplyInputs<T> &dims) {
return os;
}
template <typename T>
class ScalarMultiplyTest
: public ::testing::TestWithParam<ScalarMultiplyInputs<T>> {
protected:
void SetUp() override {
params = ::testing::TestWithParam<ScalarMultiplyInputs<T>>::GetParam();
raft::random::Rng r(params.seed);
int len = params.len;
T scalar = params.scalar;
cudaStream_t stream;
CUDA_CHECK(cudaStreamCreate(&stream));
allocate(in, len);
allocate(out_ref, len);
allocate(out, len);
r.uniform(in, len, T(-1.0), T(1.0), stream);
naiveScale(out_ref, in, scalar, len, stream);
scalarMultiply(out, in, scalar, len, stream);
CUDA_CHECK(cudaStreamDestroy(stream));
}
void TearDown() override {
CUDA_CHECK(cudaFree(in));
CUDA_CHECK(cudaFree(out_ref));
CUDA_CHECK(cudaFree(out));
}
protected:
ScalarMultiplyInputs<T> params;
T *in, *out_ref, *out;
};
const std::vector<ScalarMultiplyInputs<float>> inputsf1 = {
{0.000001f, 1024 * 1024, 2.f, 1234ULL}};
const std::vector<ScalarMultiplyInputs<double>> inputsd1 = {
{0.00000001, 1024 * 1024, 2.0, 1234ULL}};
typedef ScalarMultiplyTest<float> ScalarMultiplyTestF;
TEST_P(ScalarMultiplyTestF, Result) {
ASSERT_TRUE(devArrMatch(out_ref, out, params.len,
CompareApprox<float>(params.tolerance)));
}
typedef ScalarMultiplyTest<double> ScalarMultiplyTestD;
TEST_P(ScalarMultiplyTestD, Result) {
ASSERT_TRUE(devArrMatch(out_ref, out, params.len,
CompareApprox<double>(params.tolerance)));
}
INSTANTIATE_TEST_CASE_P(ScalarMultiplyTests, ScalarMultiplyTestF,
::testing::ValuesIn(inputsf1));
INSTANTIATE_TEST_CASE_P(ScalarMultiplyTests, ScalarMultiplyTestD,
::testing::ValuesIn(inputsd1));
//// Testing binary ops
template <typename Type>
__global__ void naiveAddKernel(Type *out, const Type *in1, const Type *in2,
int len) {
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < len) {
out[idx] = in1[idx] + in2[idx];
}
}
template <typename Type>
void naiveAdd(Type *out, const Type *in1, const Type *in2, int len,
cudaStream_t stream) {
static const int TPB = 64;
int nblks = MLCommon::ceildiv(len, TPB);
naiveAddKernel<Type><<<nblks, TPB, 0, stream>>>(out, in1, in2, len);
CUDA_CHECK(cudaPeekAtLastError());
}
template <typename T>
struct EltwiseAddInputs {
T tolerance;
int len;
unsigned long long int seed;
};
template <typename T>
::std::ostream &operator<<(::std::ostream &os,
const EltwiseAddInputs<T> &dims) {
return os;
}
template <typename T>
class EltwiseAddTest : public ::testing::TestWithParam<EltwiseAddInputs<T>> {
protected:
void SetUp() override {
params = ::testing::TestWithParam<EltwiseAddInputs<T>>::GetParam();
raft::random::Rng r(params.seed);
cudaStream_t stream;
CUDA_CHECK(cudaStreamCreate(&stream));
int len = params.len;
allocate(in1, len);
allocate(in2, len);
allocate(out_ref, len);
allocate(out, len);
r.uniform(in1, len, T(-1.0), T(1.0), stream);
r.uniform(in2, len, T(-1.0), T(1.0), stream);
naiveAdd(out_ref, in1, in2, len, stream);
eltwiseAdd(out, in1, in2, len, stream);
CUDA_CHECK(cudaStreamDestroy(stream));
}
void TearDown() override {
CUDA_CHECK(cudaFree(in1));
CUDA_CHECK(cudaFree(in2));
CUDA_CHECK(cudaFree(out_ref));
CUDA_CHECK(cudaFree(out));
}
protected:
EltwiseAddInputs<T> params;
T *in1, *in2, *out_ref, *out;
};
const std::vector<EltwiseAddInputs<float>> inputsf2 = {
{0.000001f, 1024 * 1024, 1234ULL}};
const std::vector<EltwiseAddInputs<double>> inputsd2 = {
{0.00000001, 1024 * 1024, 1234ULL}};
typedef EltwiseAddTest<float> EltwiseAddTestF;
TEST_P(EltwiseAddTestF, Result) {
ASSERT_TRUE(devArrMatch(out_ref, out, params.len,
CompareApprox<float>(params.tolerance)));
}
typedef EltwiseAddTest<double> EltwiseAddTestD;
TEST_P(EltwiseAddTestD, Result) {
ASSERT_TRUE(devArrMatch(out_ref, out, params.len,
CompareApprox<double>(params.tolerance)));
}
INSTANTIATE_TEST_CASE_P(EltwiseAddTests, EltwiseAddTestF,
::testing::ValuesIn(inputsf2));
INSTANTIATE_TEST_CASE_P(EltwiseAddTests, EltwiseAddTestD,
::testing::ValuesIn(inputsd2));
} // end namespace linalg
} // end namespace raft
|
2c2e4c57e4d5fa75b4e2bffa8bdbaeaab1bc8320.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <vector>
#include "caffe/filler.hpp"
#include "caffe/layers/inner_product_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void kernel_channel_dot(const int num, const int dim,
const Dtype* data_1, const Dtype* data_2,
Dtype* channel_dot, Dtype epsilon) {
CUDA_KERNEL_LOOP(index, num) {
Dtype dot = 0;
for (int d = 0; d < dim; ++d) {
dot += data_1[index * dim + d] * data_2[index * dim + d];
}
channel_dot[index] = dot + epsilon;
}
}
template <typename Dtype>
__global__ void kernel_channel_scal(const int num, const int dim,
const Dtype* norm_data,
Dtype* input_output_data) {
CUDA_KERNEL_LOOP(index, num * dim) {
int n = index / dim;
input_output_data[index] *= norm_data[n];
}
}
template <typename Dtype>
void InnerProductLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
const Dtype* weight = bottom.size() >= 2 ? bottom[1]->gpu_data() : this->blobs_[0]->gpu_data();
if (normalize_ && bottom.size() == 1) {
Dtype* mutable_weight = this->blobs_[0]->mutable_gpu_data();
Dtype* weight_norm_data = weight_norm_.mutable_gpu_data();
// NOLINT_NEXT_LINE(whitespace/operators)
kernel_channel_dot<Dtype> << <CAFFE_GET_BLOCKS(N_),
CAFFE_CUDA_NUM_THREADS >> >(N_, K_, weight, weight, weight_norm_data, 1e-12);
caffe_gpu_powx(N_, weight_norm_data, Dtype(-0.5), weight_norm_data);
// NOLINT_NEXT_LINE(whitespace/operators)
kernel_channel_scal<Dtype> << <CAFFE_GET_BLOCKS(N_ * K_),
CAFFE_CUDA_NUM_THREADS >> >(N_, K_, weight_norm_data, mutable_weight);
}
if (M_ == 1) {
caffe_gpu_gemv<Dtype>(CblasNoTrans, N_, K_, (Dtype)1.,
weight, bottom_data, (Dtype)0., top_data);
if (bias_term_)
caffe_gpu_axpy<Dtype>(N_, bias_multiplier_.cpu_data()[0],
bottom.size() == 3 ? bottom[2]->gpu_data() : this->blobs_[1]->gpu_data(), top_data);
}
else {
caffe_gpu_gemm<Dtype>(CblasNoTrans,
transpose_ ? CblasNoTrans : CblasTrans,
M_, N_, K_, (Dtype)1.,
bottom_data, weight, (Dtype)0., top_data);
if (bias_term_)
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, M_, N_, 1, (Dtype)1.,
bias_multiplier_.gpu_data(),
bottom.size() == 3 ? bottom[2]->gpu_data() : this->blobs_[1]->gpu_data(), (Dtype)1., top_data);
}
}
template <typename Dtype>
void InnerProductLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
const Dtype* weight = bottom.size() >= 2 ? bottom[1]->gpu_data() : this->blobs_[0]->gpu_data();
if ((bottom.size() == 1 && this->param_propagate_down_[0]) ||
(bottom.size() >= 2 && propagate_down[1])) {
const Dtype* top_diff = top[0]->gpu_diff();
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* weight_diff = bottom.size() >= 2 ? bottom[1]->mutable_gpu_diff() : this->blobs_[0]->mutable_gpu_diff();
// Gradient with respect to weight
if (transpose_) {
caffe_gpu_gemm<Dtype>(CblasTrans, CblasNoTrans,
K_, N_, M_,
(Dtype)1., bottom_data, top_diff,
(Dtype)1., weight_diff);
}
else {
caffe_gpu_gemm<Dtype>(CblasTrans, CblasNoTrans,
N_, K_, M_,
(Dtype)1., top_diff, bottom_data,
(Dtype)1., weight_diff);
}
}
if (bias_term_ && (this->param_propagate_down_[1] ||
(bottom.size() == 3 && propagate_down[2]))) {
const Dtype* top_diff = top[0]->gpu_diff();
// Gradient with respect to bias
caffe_gpu_gemv<Dtype>(CblasTrans, M_, N_, (Dtype)1., top_diff,
bias_multiplier_.gpu_data(), (Dtype)1.,
bottom.size() == 3 ? bottom[2]->mutable_gpu_diff() : this->blobs_[1]->mutable_gpu_diff());
}
if (propagate_down[0]) {
const Dtype* top_diff = top[0]->gpu_diff();
// Gradient with respect to bottom data
if (transpose_) {
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasTrans,
M_, K_, N_,
(Dtype)1., top_diff, weight,
(Dtype)0., bottom[0]->mutable_gpu_diff());
}
else {
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans,
M_, K_, N_,
(Dtype)1., top_diff, weight,
(Dtype)0., bottom[0]->mutable_gpu_diff());
}
}
}
INSTANTIATE_LAYER_GPU_FUNCS(InnerProductLayer);
} // namespace caffe | 2c2e4c57e4d5fa75b4e2bffa8bdbaeaab1bc8320.cu | #include <vector>
#include "caffe/filler.hpp"
#include "caffe/layers/inner_product_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void kernel_channel_dot(const int num, const int dim,
const Dtype* data_1, const Dtype* data_2,
Dtype* channel_dot, Dtype epsilon) {
CUDA_KERNEL_LOOP(index, num) {
Dtype dot = 0;
for (int d = 0; d < dim; ++d) {
dot += data_1[index * dim + d] * data_2[index * dim + d];
}
channel_dot[index] = dot + epsilon;
}
}
template <typename Dtype>
__global__ void kernel_channel_scal(const int num, const int dim,
const Dtype* norm_data,
Dtype* input_output_data) {
CUDA_KERNEL_LOOP(index, num * dim) {
int n = index / dim;
input_output_data[index] *= norm_data[n];
}
}
template <typename Dtype>
void InnerProductLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
const Dtype* weight = bottom.size() >= 2 ? bottom[1]->gpu_data() : this->blobs_[0]->gpu_data();
if (normalize_ && bottom.size() == 1) {
Dtype* mutable_weight = this->blobs_[0]->mutable_gpu_data();
Dtype* weight_norm_data = weight_norm_.mutable_gpu_data();
// NOLINT_NEXT_LINE(whitespace/operators)
kernel_channel_dot<Dtype> << <CAFFE_GET_BLOCKS(N_),
CAFFE_CUDA_NUM_THREADS >> >(N_, K_, weight, weight, weight_norm_data, 1e-12);
caffe_gpu_powx(N_, weight_norm_data, Dtype(-0.5), weight_norm_data);
// NOLINT_NEXT_LINE(whitespace/operators)
kernel_channel_scal<Dtype> << <CAFFE_GET_BLOCKS(N_ * K_),
CAFFE_CUDA_NUM_THREADS >> >(N_, K_, weight_norm_data, mutable_weight);
}
if (M_ == 1) {
caffe_gpu_gemv<Dtype>(CblasNoTrans, N_, K_, (Dtype)1.,
weight, bottom_data, (Dtype)0., top_data);
if (bias_term_)
caffe_gpu_axpy<Dtype>(N_, bias_multiplier_.cpu_data()[0],
bottom.size() == 3 ? bottom[2]->gpu_data() : this->blobs_[1]->gpu_data(), top_data);
}
else {
caffe_gpu_gemm<Dtype>(CblasNoTrans,
transpose_ ? CblasNoTrans : CblasTrans,
M_, N_, K_, (Dtype)1.,
bottom_data, weight, (Dtype)0., top_data);
if (bias_term_)
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, M_, N_, 1, (Dtype)1.,
bias_multiplier_.gpu_data(),
bottom.size() == 3 ? bottom[2]->gpu_data() : this->blobs_[1]->gpu_data(), (Dtype)1., top_data);
}
}
template <typename Dtype>
void InnerProductLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
const Dtype* weight = bottom.size() >= 2 ? bottom[1]->gpu_data() : this->blobs_[0]->gpu_data();
if ((bottom.size() == 1 && this->param_propagate_down_[0]) ||
(bottom.size() >= 2 && propagate_down[1])) {
const Dtype* top_diff = top[0]->gpu_diff();
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* weight_diff = bottom.size() >= 2 ? bottom[1]->mutable_gpu_diff() : this->blobs_[0]->mutable_gpu_diff();
// Gradient with respect to weight
if (transpose_) {
caffe_gpu_gemm<Dtype>(CblasTrans, CblasNoTrans,
K_, N_, M_,
(Dtype)1., bottom_data, top_diff,
(Dtype)1., weight_diff);
}
else {
caffe_gpu_gemm<Dtype>(CblasTrans, CblasNoTrans,
N_, K_, M_,
(Dtype)1., top_diff, bottom_data,
(Dtype)1., weight_diff);
}
}
if (bias_term_ && (this->param_propagate_down_[1] ||
(bottom.size() == 3 && propagate_down[2]))) {
const Dtype* top_diff = top[0]->gpu_diff();
// Gradient with respect to bias
caffe_gpu_gemv<Dtype>(CblasTrans, M_, N_, (Dtype)1., top_diff,
bias_multiplier_.gpu_data(), (Dtype)1.,
bottom.size() == 3 ? bottom[2]->mutable_gpu_diff() : this->blobs_[1]->mutable_gpu_diff());
}
if (propagate_down[0]) {
const Dtype* top_diff = top[0]->gpu_diff();
// Gradient with respect to bottom data
if (transpose_) {
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasTrans,
M_, K_, N_,
(Dtype)1., top_diff, weight,
(Dtype)0., bottom[0]->mutable_gpu_diff());
}
else {
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans,
M_, K_, N_,
(Dtype)1., top_diff, weight,
(Dtype)0., bottom[0]->mutable_gpu_diff());
}
}
}
INSTANTIATE_LAYER_GPU_FUNCS(InnerProductLayer);
} // namespace caffe |
cfee30b7afaa140a37b66a9306c497191b1ecf96.hip | // !!! This is a file automatically generated by hipify!!!
#if !MEGDNN_TEGRA_X1
// generated by gen_cuda_conv_bias_kern_impls.py
// ignore warning of cutlass
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl"
using LayoutSrc = cutlass::layout::TensorNCxHWx<4>;
using LayoutFilter = cutlass::layout::TensorCxRSKx<4>;
using LayoutDst = cutlass::layout::TensorNCHW;
using ThreadBlockShape = cutlass::gemm::GemmShape<32, 64, 32>;
using WarpShape = cutlass::gemm::GemmShape<32, 64, 32>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 4>;
using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationRelu<
float, 1, int32_t, float, float>;
using Convolution = cutlass::conv::device::Convolution<
int8_t, LayoutSrc, int8_t, LayoutFilter, float,
LayoutDst, float, LayoutDst, int32_t,
cutlass::conv::ConvType::kConvolution, cutlass::arch::OpClassSimt, cutlass::arch::Sm61,
ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp,
cutlass::conv::threadblock::ConvolutionFpropNCxHWxThreadblockSwizzle,
2, 4, 16, false,
cutlass::arch::OpMultiplyAdd>;
template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>(
const typename Convolution::ElementSrc* d_src,
const typename Convolution::ElementFilter* d_filter,
const typename Convolution::ElementBias* d_bias,
const typename Convolution::ElementDst* d_z,
typename Convolution::ElementDst* d_dst,
int* workspace,
typename Convolution::ConvolutionParameter const& conv_param,
typename Convolution::EpilogueOutputOp::Params const& epilogue,
hipStream_t stream);
#pragma GCC diagnostic pop
#endif
| cfee30b7afaa140a37b66a9306c497191b1ecf96.cu | #if !MEGDNN_TEGRA_X1
// generated by gen_cuda_conv_bias_kern_impls.py
// ignore warning of cutlass
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl"
using LayoutSrc = cutlass::layout::TensorNCxHWx<4>;
using LayoutFilter = cutlass::layout::TensorCxRSKx<4>;
using LayoutDst = cutlass::layout::TensorNCHW;
using ThreadBlockShape = cutlass::gemm::GemmShape<32, 64, 32>;
using WarpShape = cutlass::gemm::GemmShape<32, 64, 32>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 4>;
using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationRelu<
float, 1, int32_t, float, float>;
using Convolution = cutlass::conv::device::Convolution<
int8_t, LayoutSrc, int8_t, LayoutFilter, float,
LayoutDst, float, LayoutDst, int32_t,
cutlass::conv::ConvType::kConvolution, cutlass::arch::OpClassSimt, cutlass::arch::Sm61,
ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp,
cutlass::conv::threadblock::ConvolutionFpropNCxHWxThreadblockSwizzle,
2, 4, 16, false,
cutlass::arch::OpMultiplyAdd>;
template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>(
const typename Convolution::ElementSrc* d_src,
const typename Convolution::ElementFilter* d_filter,
const typename Convolution::ElementBias* d_bias,
const typename Convolution::ElementDst* d_z,
typename Convolution::ElementDst* d_dst,
int* workspace,
typename Convolution::ConvolutionParameter const& conv_param,
typename Convolution::EpilogueOutputOp::Params const& epilogue,
cudaStream_t stream);
#pragma GCC diagnostic pop
#endif
|
a8dca9bc689019d854a9802741b5abcd08f6cb99.hip | // !!! This is a file automatically generated by hipify!!!
#include "imp_includes.hcu"
//#define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ ))
void HandleError( hipError_t err,const char *file,int line )
{
confirm(err == hipSuccess,hipGetErrorString( err )<<" in "<<file<<" at "<<line<<".");
}
void HandleError( hipblasStatus_t err,const char *file,int line )
{
confirm(err == HIPBLAS_STATUS_SUCCESS,cublasGetErrorString( err )<<" in "<<file<<" at "<<line<<".");
}
void HandleError( cusolverStatus_t err,const char *file,int line )
{
confirm(err == CUSOLVER_STATUS_SUCCESS,cusolverGetErrorString( err )<<" in "<<file<<" at "<<line<<".");
}
void HandleError( hiprandStatus_t err,const char *file,int line )
{
confirm(err == HIPRAND_STATUS_SUCCESS,curandGetErrorString( err )<<" in "<<file<<" at "<<line<<".");
}
// static void HandleError( hipfftResult err,const char *file,int line )
// {
// confirm(err == HIPFFT_SUCCESS,cufftGetErrorString( err )<<" in "<<file<<" at "<<line<<".");
// }
// static void HandleError( hipsparseStatus_t err,const char *file,int line )
// {
// confirm(err == HIPSPARSE_STATUS_SUCCESS,hipsparseGetErrorString( err )<<" in "<<file<<" at "<<line<<".");
// }
| a8dca9bc689019d854a9802741b5abcd08f6cb99.cu | #include "imp_includes.hcu"
//#define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ ))
void HandleError( cudaError_t err,const char *file,int line )
{
confirm(err == cudaSuccess,cudaGetErrorString( err )<<" in "<<file<<" at "<<line<<".");
}
void HandleError( cublasStatus_t err,const char *file,int line )
{
confirm(err == CUBLAS_STATUS_SUCCESS,cublasGetErrorString( err )<<" in "<<file<<" at "<<line<<".");
}
void HandleError( cusolverStatus_t err,const char *file,int line )
{
confirm(err == CUSOLVER_STATUS_SUCCESS,cusolverGetErrorString( err )<<" in "<<file<<" at "<<line<<".");
}
void HandleError( curandStatus_t err,const char *file,int line )
{
confirm(err == CURAND_STATUS_SUCCESS,curandGetErrorString( err )<<" in "<<file<<" at "<<line<<".");
}
// static void HandleError( cufftResult err,const char *file,int line )
// {
// confirm(err == CUFFT_SUCCESS,cufftGetErrorString( err )<<" in "<<file<<" at "<<line<<".");
// }
// static void HandleError( cusparseStatus_t err,const char *file,int line )
// {
// confirm(err == CUSPARSE_STATUS_SUCCESS,cusparseGetErrorString( err )<<" in "<<file<<" at "<<line<<".");
// }
|
07dda0c52af3c10f57c1f952b72d7f0ba7e42d06.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <accelerate_cuda.h>
static TexInt32 arrIn0_0;
extern "C" __global__ void scanlUp(const Int64 shIn0_0, const Int64 shOut_0, Int32* __restrict__ arrOut_0)
{
extern volatile __shared__ Int32 sdata0[];
Int32 x0;
Int32 y0;
const Int64 sh0 = shIn0_0;
const int shapeSize = sh0;
const int intervalSize = (shapeSize + gridDim.x - 1) / gridDim.x;
const int start = blockIdx.x * intervalSize;
const int end = min(start + intervalSize, shapeSize);
const int numElements = end - start;
int carryIn = 0;
int seg;
for (seg = threadIdx.x; seg < numElements; seg += blockDim.x) {
const int ix = start + seg;
x0 = indexArray(arrIn0_0, ix);
if (threadIdx.x == 0 && carryIn) {
x0 = y0 + x0;
}
sdata0[threadIdx.x] = x0;
__syncthreads();
if (blockDim.x > 1) {
if (threadIdx.x >= 1) {
y0 = sdata0[threadIdx.x - 1];
x0 = y0 + x0;
}
__syncthreads();
sdata0[threadIdx.x] = x0;
__syncthreads();
}
if (blockDim.x > 2) {
if (threadIdx.x >= 2) {
y0 = sdata0[threadIdx.x - 2];
x0 = y0 + x0;
}
__syncthreads();
sdata0[threadIdx.x] = x0;
__syncthreads();
}
if (blockDim.x > 4) {
if (threadIdx.x >= 4) {
y0 = sdata0[threadIdx.x - 4];
x0 = y0 + x0;
}
__syncthreads();
sdata0[threadIdx.x] = x0;
__syncthreads();
}
if (blockDim.x > 8) {
if (threadIdx.x >= 8) {
y0 = sdata0[threadIdx.x - 8];
x0 = y0 + x0;
}
__syncthreads();
sdata0[threadIdx.x] = x0;
__syncthreads();
}
if (blockDim.x > 16) {
if (threadIdx.x >= 16) {
y0 = sdata0[threadIdx.x - 16];
x0 = y0 + x0;
}
__syncthreads();
sdata0[threadIdx.x] = x0;
__syncthreads();
}
if (blockDim.x > 32) {
if (threadIdx.x >= 32) {
y0 = sdata0[threadIdx.x - 32];
x0 = y0 + x0;
}
__syncthreads();
sdata0[threadIdx.x] = x0;
__syncthreads();
}
if (blockDim.x > 64) {
if (threadIdx.x >= 64) {
y0 = sdata0[threadIdx.x - 64];
x0 = y0 + x0;
}
__syncthreads();
sdata0[threadIdx.x] = x0;
__syncthreads();
}
if (blockDim.x > 128) {
if (threadIdx.x >= 128) {
y0 = sdata0[threadIdx.x - 128];
x0 = y0 + x0;
}
__syncthreads();
sdata0[threadIdx.x] = x0;
__syncthreads();
}
if (blockDim.x > 256) {
if (threadIdx.x >= 256) {
y0 = sdata0[threadIdx.x - 256];
x0 = y0 + x0;
}
__syncthreads();
sdata0[threadIdx.x] = x0;
__syncthreads();
}
if (blockDim.x > 512) {
if (threadIdx.x >= 512) {
y0 = sdata0[threadIdx.x - 512];
x0 = y0 + x0;
}
__syncthreads();
sdata0[threadIdx.x] = x0;
__syncthreads();
}
if (threadIdx.x == 0) {
const int last = min(numElements - seg, blockDim.x) - 1;
y0 = sdata0[last];
}
carryIn = 1;
}
if (threadIdx.x == 0) {
arrOut_0[blockIdx.x] = y0;
}
}
| 07dda0c52af3c10f57c1f952b72d7f0ba7e42d06.cu | #include <accelerate_cuda.h>
static TexInt32 arrIn0_0;
extern "C" __global__ void scanlUp(const Int64 shIn0_0, const Int64 shOut_0, Int32* __restrict__ arrOut_0)
{
extern volatile __shared__ Int32 sdata0[];
Int32 x0;
Int32 y0;
const Int64 sh0 = shIn0_0;
const int shapeSize = sh0;
const int intervalSize = (shapeSize + gridDim.x - 1) / gridDim.x;
const int start = blockIdx.x * intervalSize;
const int end = min(start + intervalSize, shapeSize);
const int numElements = end - start;
int carryIn = 0;
int seg;
for (seg = threadIdx.x; seg < numElements; seg += blockDim.x) {
const int ix = start + seg;
x0 = indexArray(arrIn0_0, ix);
if (threadIdx.x == 0 && carryIn) {
x0 = y0 + x0;
}
sdata0[threadIdx.x] = x0;
__syncthreads();
if (blockDim.x > 1) {
if (threadIdx.x >= 1) {
y0 = sdata0[threadIdx.x - 1];
x0 = y0 + x0;
}
__syncthreads();
sdata0[threadIdx.x] = x0;
__syncthreads();
}
if (blockDim.x > 2) {
if (threadIdx.x >= 2) {
y0 = sdata0[threadIdx.x - 2];
x0 = y0 + x0;
}
__syncthreads();
sdata0[threadIdx.x] = x0;
__syncthreads();
}
if (blockDim.x > 4) {
if (threadIdx.x >= 4) {
y0 = sdata0[threadIdx.x - 4];
x0 = y0 + x0;
}
__syncthreads();
sdata0[threadIdx.x] = x0;
__syncthreads();
}
if (blockDim.x > 8) {
if (threadIdx.x >= 8) {
y0 = sdata0[threadIdx.x - 8];
x0 = y0 + x0;
}
__syncthreads();
sdata0[threadIdx.x] = x0;
__syncthreads();
}
if (blockDim.x > 16) {
if (threadIdx.x >= 16) {
y0 = sdata0[threadIdx.x - 16];
x0 = y0 + x0;
}
__syncthreads();
sdata0[threadIdx.x] = x0;
__syncthreads();
}
if (blockDim.x > 32) {
if (threadIdx.x >= 32) {
y0 = sdata0[threadIdx.x - 32];
x0 = y0 + x0;
}
__syncthreads();
sdata0[threadIdx.x] = x0;
__syncthreads();
}
if (blockDim.x > 64) {
if (threadIdx.x >= 64) {
y0 = sdata0[threadIdx.x - 64];
x0 = y0 + x0;
}
__syncthreads();
sdata0[threadIdx.x] = x0;
__syncthreads();
}
if (blockDim.x > 128) {
if (threadIdx.x >= 128) {
y0 = sdata0[threadIdx.x - 128];
x0 = y0 + x0;
}
__syncthreads();
sdata0[threadIdx.x] = x0;
__syncthreads();
}
if (blockDim.x > 256) {
if (threadIdx.x >= 256) {
y0 = sdata0[threadIdx.x - 256];
x0 = y0 + x0;
}
__syncthreads();
sdata0[threadIdx.x] = x0;
__syncthreads();
}
if (blockDim.x > 512) {
if (threadIdx.x >= 512) {
y0 = sdata0[threadIdx.x - 512];
x0 = y0 + x0;
}
__syncthreads();
sdata0[threadIdx.x] = x0;
__syncthreads();
}
if (threadIdx.x == 0) {
const int last = min(numElements - seg, blockDim.x) - 1;
y0 = sdata0[last];
}
carryIn = 1;
}
if (threadIdx.x == 0) {
arrOut_0[blockIdx.x] = y0;
}
}
|
3cf04802d749c284321135e0aa37a9f3b059b476.hip | // !!! This is a file automatically generated by hipify!!!
#ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "THH/generic/THHTensorMathPointwise.hip"
#else
#include <ATen/MemoryOverlap.h>
#include <ATen/NamedTensorUtils.h>
void THCTensor_(cmax)(THCState *state, THCTensor *self, THCTensor *src1, THCTensor *src2)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self, src1, src2));
THArgCheck(THCTensor_(nElement)(state, src1) ==
THCTensor_(nElement)(state, src2), 2, "sizes do not match");
if (self == src1) {
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self, src2, TensorMaxOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self, src1);
if (!THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, self, src1, src2, TensorMaxOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
}
void THCTensor_(cmin)(THCState *state, THCTensor *self, THCTensor *src1, THCTensor *src2)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self, src1, src2));
THArgCheck(THCTensor_(nElement)(state, src1) ==
THCTensor_(nElement)(state, src2), 2, "sizes do not match");
if (self == src1) {
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self, src2, TensorMinOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self, src1);
if (!THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, self, src1, src2, TensorMinOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
}
void THCTensor_(cmaxValue)(THCState *state, THCTensor *self, THCTensor *src, scalar_t value)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src));
if (self == src) {
if (!THC_pointwiseApply1<scalar_t>(state, self, TensorMaxValueOp<scalar_t>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self, src);
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self, src, TensorMaxValueOp<scalar_t>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
}
void THCTensor_(cminValue)(THCState *state, THCTensor *self, THCTensor *src, scalar_t value)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src));
if (self == src) {
if (!THC_pointwiseApply1<scalar_t>(state, self, TensorMinValueOp<scalar_t>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self, src);
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self, src, TensorMinValueOp<scalar_t>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
}
#if !defined(THC_REAL_IS_BOOL)
static void propagate_names_if_named_tensor_enabled(THCTensor* result, THCTensor* src) {
at::namedinference::propagate_names(result, src);
}
#define IMPLEMENT_CUDA_TENSOR_BASIC_FUNC_(NAME, CFUNC, REAL) \
struct Tensor_##NAME##_##REAL##_Op { \
__device__ __forceinline__ void operator()(scalar_t* out, scalar_t* in) const { \
*out = CFUNC(*in); \
} \
\
__device__ __forceinline__ void operator()(scalar_t* v) const { \
*v = CFUNC(*v); \
} \
}; \
\
void THCTensor_(NAME)(THCState* state, THCTensor* self_, THCTensor* src) { \
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src)); \
at::assert_no_internal_overlap(self_); \
if (self_ == src) { \
if (!THC_pointwiseApply1<scalar_t>(state, self_, Tensor_##NAME##_##REAL##_Op())) { \
THArgCheck(false, 2, CUTORCH_DIM_WARNING); \
} \
} else { \
THCTensor_(resizeAs)(state, self_, src); \
\
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src, Tensor_##NAME##_##REAL##_Op())) { \
THArgCheck(false, 2, CUTORCH_DIM_WARNING); \
} \
} \
\
THCudaCheck(hipGetLastError()); \
propagate_names_if_named_tensor_enabled(self_, src); \
}
#define IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(NAME, CFUNC, REAL) \
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC_(NAME, CFUNC, REAL)
#if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_HALF)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( exp, THCNumerics<scalar_t>::exp, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( cos, THCNumerics<scalar_t>::cos, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( sqrt, THCNumerics<scalar_t>::sqrt, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( cosh, THCNumerics<scalar_t>::cosh, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( tan, THCNumerics<scalar_t>::tan, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( atan, THCNumerics<scalar_t>::atan, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( tanh, THCNumerics<scalar_t>::tanh, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( erf, THCNumerics<scalar_t>::erf, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( erfc, THCNumerics<scalar_t>::erfc, Real)
#endif
#undef IMPLEMENT_CUDA_TENSOR_BASIC_FUNC_
#undef IMPLEMENT_CUDA_TENSOR_BASIC_FUNC
void THCTensor_(clamp)(THCState *state, THCTensor *self_, THCTensor *src, scalar_t min_value,
scalar_t max_value)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src));
if (self_ == src) {
if (!THC_pointwiseApply1<scalar_t>(state, self_, TensorClampOp<scalar_t>(min_value, max_value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self_, src);
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src, TensorClampOp<scalar_t>(min_value, max_value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(hipGetLastError());
}
void THCTensor_(crossKernel)(THCState *state, THCTensor *self, THCTensor *x, THCTensor *y, int dimension)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self, x, y));
int64_t sx = THCTensor_(stride)(state, x, dimension);
int64_t sy = THCTensor_(stride)(state, y, dimension);
int64_t so = THCTensor_(stride)(state, self, dimension);
THCTensor *nx = THCTensor_(newNarrow)(state, x, dimension, 0, 1);
THCTensor *ny = THCTensor_(newNarrow)(state, y, dimension, 0, 1);
THCTensor *nself = THCTensor_(newNarrow)(state, self, dimension, 0, 1);
if (!THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, nself, nx, ny, TensorCrossOp<scalar_t>(sx, sy, so))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
THCTensor_(free)(state, nx);
THCTensor_(free)(state, ny);
THCTensor_(free)(state, nself);
}
namespace {
c10::intrusive_ptr<at::TensorImpl, at::UndefinedTensorImpl> retainTensorImpl(THCTensor* self) {
c10::raw::intrusive_ptr::incref(self);
return c10::intrusive_ptr<at::TensorImpl, at::UndefinedTensorImpl>::reclaim(self);
}
}
void THCTensor_(cadd)(THCState *state, THCTensor *self_, THCTensor* src1, scalar_t value, THCTensor *src2)
{
auto out = at::Tensor(retainTensorImpl(self_));
#ifdef THC_REAL_IS_HALF
auto alpha = at::Half(value);
#else
auto alpha = value;
#endif
at::add_out(out, at::Tensor(retainTensorImpl(src1)), at::Tensor(retainTensorImpl(src2)), alpha);
}
void THCTensor_(csub)(THCState *state, THCTensor *self_, THCTensor* src1, scalar_t value, THCTensor *src2)
{
auto out = at::Tensor(retainTensorImpl(self_));
#ifdef THC_REAL_IS_HALF
auto alpha = at::Half(value);
#else
auto alpha = value;
#endif
at::sub_out(out, at::Tensor(retainTensorImpl(src1)), at::Tensor(retainTensorImpl(src2)), alpha);
}
void THCTensor_(cmul)(THCState *state, THCTensor *self_, THCTensor *src1, THCTensor *src2)
{
auto out = at::Tensor(retainTensorImpl(self_));
at::mul_out(out, at::Tensor(retainTensorImpl(src1)), at::Tensor(retainTensorImpl(src2)));
}
void THCTensor_(cdiv)(THCState* state, THCTensor *self_, THCTensor *src1, THCTensor *src2)
{
auto out = at::Tensor(retainTensorImpl(self_));
at::div_out(out, at::Tensor(retainTensorImpl(src1)), at::Tensor(retainTensorImpl(src2)));
}
void THCTensor_(cremainder)(THCState *state, THCTensor *self, THCTensor *src1, THCTensor *src2)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self, src1, src2));
THArgCheck(THCTensor_(nElement)(state, src1) ==
THCTensor_(nElement)(state, src2), 2, "sizes do not match");
if (self == src1) {
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self, src2, TensorCRemainderOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self, src1);
if (!THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, self, src1, src2, TensorCRemainderOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
}
void THCTensor_(cfmod)(THCState *state, THCTensor *self, THCTensor *src1, THCTensor *src2)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self, src1, src2));
THArgCheck(THCTensor_(nElement)(state, src1) ==
THCTensor_(nElement)(state, src2), 2, "sizes do not match");
if (self == src1) {
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self, src2, TensorCFmodOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self, src1);
if (!THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, self, src1, src2, TensorCFmodOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
}
#endif
#endif
| 3cf04802d749c284321135e0aa37a9f3b059b476.cu | #ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "THC/generic/THCTensorMathPointwise.cu"
#else
#include <ATen/MemoryOverlap.h>
#include <ATen/NamedTensorUtils.h>
void THCTensor_(cmax)(THCState *state, THCTensor *self, THCTensor *src1, THCTensor *src2)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self, src1, src2));
THArgCheck(THCTensor_(nElement)(state, src1) ==
THCTensor_(nElement)(state, src2), 2, "sizes do not match");
if (self == src1) {
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self, src2, TensorMaxOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self, src1);
if (!THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, self, src1, src2, TensorMaxOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
}
void THCTensor_(cmin)(THCState *state, THCTensor *self, THCTensor *src1, THCTensor *src2)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self, src1, src2));
THArgCheck(THCTensor_(nElement)(state, src1) ==
THCTensor_(nElement)(state, src2), 2, "sizes do not match");
if (self == src1) {
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self, src2, TensorMinOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self, src1);
if (!THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, self, src1, src2, TensorMinOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
}
void THCTensor_(cmaxValue)(THCState *state, THCTensor *self, THCTensor *src, scalar_t value)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src));
if (self == src) {
if (!THC_pointwiseApply1<scalar_t>(state, self, TensorMaxValueOp<scalar_t>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self, src);
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self, src, TensorMaxValueOp<scalar_t>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
}
void THCTensor_(cminValue)(THCState *state, THCTensor *self, THCTensor *src, scalar_t value)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src));
if (self == src) {
if (!THC_pointwiseApply1<scalar_t>(state, self, TensorMinValueOp<scalar_t>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self, src);
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self, src, TensorMinValueOp<scalar_t>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
}
#if !defined(THC_REAL_IS_BOOL)
static void propagate_names_if_named_tensor_enabled(THCTensor* result, THCTensor* src) {
at::namedinference::propagate_names(result, src);
}
#define IMPLEMENT_CUDA_TENSOR_BASIC_FUNC_(NAME, CFUNC, REAL) \
struct Tensor_##NAME##_##REAL##_Op { \
__device__ __forceinline__ void operator()(scalar_t* out, scalar_t* in) const { \
*out = CFUNC(*in); \
} \
\
__device__ __forceinline__ void operator()(scalar_t* v) const { \
*v = CFUNC(*v); \
} \
}; \
\
void THCTensor_(NAME)(THCState* state, THCTensor* self_, THCTensor* src) { \
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src)); \
at::assert_no_internal_overlap(self_); \
if (self_ == src) { \
if (!THC_pointwiseApply1<scalar_t>(state, self_, Tensor_##NAME##_##REAL##_Op())) { \
THArgCheck(false, 2, CUTORCH_DIM_WARNING); \
} \
} else { \
THCTensor_(resizeAs)(state, self_, src); \
\
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src, Tensor_##NAME##_##REAL##_Op())) { \
THArgCheck(false, 2, CUTORCH_DIM_WARNING); \
} \
} \
\
THCudaCheck(cudaGetLastError()); \
propagate_names_if_named_tensor_enabled(self_, src); \
}
#define IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(NAME, CFUNC, REAL) \
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC_(NAME, CFUNC, REAL)
#if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_HALF)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( exp, THCNumerics<scalar_t>::exp, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( cos, THCNumerics<scalar_t>::cos, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( sqrt, THCNumerics<scalar_t>::sqrt, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( cosh, THCNumerics<scalar_t>::cosh, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( tan, THCNumerics<scalar_t>::tan, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( atan, THCNumerics<scalar_t>::atan, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( tanh, THCNumerics<scalar_t>::tanh, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( erf, THCNumerics<scalar_t>::erf, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( erfc, THCNumerics<scalar_t>::erfc, Real)
#endif
#undef IMPLEMENT_CUDA_TENSOR_BASIC_FUNC_
#undef IMPLEMENT_CUDA_TENSOR_BASIC_FUNC
void THCTensor_(clamp)(THCState *state, THCTensor *self_, THCTensor *src, scalar_t min_value,
scalar_t max_value)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src));
if (self_ == src) {
if (!THC_pointwiseApply1<scalar_t>(state, self_, TensorClampOp<scalar_t>(min_value, max_value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self_, src);
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src, TensorClampOp<scalar_t>(min_value, max_value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(cudaGetLastError());
}
void THCTensor_(crossKernel)(THCState *state, THCTensor *self, THCTensor *x, THCTensor *y, int dimension)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self, x, y));
int64_t sx = THCTensor_(stride)(state, x, dimension);
int64_t sy = THCTensor_(stride)(state, y, dimension);
int64_t so = THCTensor_(stride)(state, self, dimension);
THCTensor *nx = THCTensor_(newNarrow)(state, x, dimension, 0, 1);
THCTensor *ny = THCTensor_(newNarrow)(state, y, dimension, 0, 1);
THCTensor *nself = THCTensor_(newNarrow)(state, self, dimension, 0, 1);
if (!THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, nself, nx, ny, TensorCrossOp<scalar_t>(sx, sy, so))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
THCTensor_(free)(state, nx);
THCTensor_(free)(state, ny);
THCTensor_(free)(state, nself);
}
namespace {
c10::intrusive_ptr<at::TensorImpl, at::UndefinedTensorImpl> retainTensorImpl(THCTensor* self) {
c10::raw::intrusive_ptr::incref(self);
return c10::intrusive_ptr<at::TensorImpl, at::UndefinedTensorImpl>::reclaim(self);
}
}
void THCTensor_(cadd)(THCState *state, THCTensor *self_, THCTensor* src1, scalar_t value, THCTensor *src2)
{
auto out = at::Tensor(retainTensorImpl(self_));
#ifdef THC_REAL_IS_HALF
auto alpha = at::Half(value);
#else
auto alpha = value;
#endif
at::add_out(out, at::Tensor(retainTensorImpl(src1)), at::Tensor(retainTensorImpl(src2)), alpha);
}
void THCTensor_(csub)(THCState *state, THCTensor *self_, THCTensor* src1, scalar_t value, THCTensor *src2)
{
auto out = at::Tensor(retainTensorImpl(self_));
#ifdef THC_REAL_IS_HALF
auto alpha = at::Half(value);
#else
auto alpha = value;
#endif
at::sub_out(out, at::Tensor(retainTensorImpl(src1)), at::Tensor(retainTensorImpl(src2)), alpha);
}
void THCTensor_(cmul)(THCState *state, THCTensor *self_, THCTensor *src1, THCTensor *src2)
{
auto out = at::Tensor(retainTensorImpl(self_));
at::mul_out(out, at::Tensor(retainTensorImpl(src1)), at::Tensor(retainTensorImpl(src2)));
}
void THCTensor_(cdiv)(THCState* state, THCTensor *self_, THCTensor *src1, THCTensor *src2)
{
auto out = at::Tensor(retainTensorImpl(self_));
at::div_out(out, at::Tensor(retainTensorImpl(src1)), at::Tensor(retainTensorImpl(src2)));
}
void THCTensor_(cremainder)(THCState *state, THCTensor *self, THCTensor *src1, THCTensor *src2)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self, src1, src2));
THArgCheck(THCTensor_(nElement)(state, src1) ==
THCTensor_(nElement)(state, src2), 2, "sizes do not match");
if (self == src1) {
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self, src2, TensorCRemainderOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self, src1);
if (!THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, self, src1, src2, TensorCRemainderOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
}
void THCTensor_(cfmod)(THCState *state, THCTensor *self, THCTensor *src1, THCTensor *src2)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self, src1, src2));
THArgCheck(THCTensor_(nElement)(state, src1) ==
THCTensor_(nElement)(state, src2), 2, "sizes do not match");
if (self == src1) {
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self, src2, TensorCFmodOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self, src1);
if (!THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, self, src1, src2, TensorCFmodOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
}
#endif
#endif
|
6b9fcebdfa070ed51657414200706c8e3830a608.hip | // !!! This is a file automatically generated by hipify!!!
/*
* A tutorial program for cuda programming. It implement algorithm of matrix multipling.
* Steven Liu
*
*/
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <iostream>
#define checkCuda(ret) checkCuda_func( (hipError_t)(ret), __FILE__, __LINE__ )
int pid = 0;
float * init_matrix(int n_rows, int n_cols, float default_val)
{
float *p;
int n_elems = n_rows*n_cols;
p = (float*)malloc(n_elems*sizeof(float));
for(int i=0; i < n_elems; i ++)
p[i] = default_val;
return p;
}
float * init_array(int n_len, float default_val)
{
p = (float*)malloc(n_len*sizeof(float));
for(int i=0; i < n_len; i ++)
p[i] = default_val;
return p;
}
__global__ void matrix_add_kernel(float* d_mA, float* d_mB, float *d_mP, int n_rows, int n_cols)
{
int tx = blockIdx.x * blockDim.x + threadIdx.x;
int ty = blockIdx.y * blockDim.y + threadIdx.y;
int idx = tx*n_rows + ty;
d_mP[idx] = d_mA[idx] + d_mB[idx];
}
__global__ void matrix_mul_kernel(float* d_mA, float* d_mB, float *d_mP, int n_rows, int n_cols)
{
int tx = blockIdx.x * blockDim.x + threadIdx.x;
int ty = blockIdx.y * blockDim.y + threadIdx.y;
float p_val = 0.0;
int idx = tx*n_rows + ty;
for(int k=0; k < n_rows; k++) {
p_val += d_mA[k*n_cols+tx] * d_mB[ty*n_cols+k];
}
d_mP[tx*n_rows+ty] = p_val;
}
void matrix_mul_on_device(float *mA, float *mB, float *mP, int n_rows, int n_cols)
{
int n_elems = n_rows*n_cols;
int size = n_elems*sizeof(float);
float *d_mA, *d_mB, *d_mP;
checkCuda( hipMalloc(&d_mA, size) );
checkCuda( hipMalloc(&d_mB, size) );
checkCuda( hipMalloc(&d_mP, size) );
checkCuda( hipMemcpy(d_mA, mA, size, hipMemcpyHostToDevice) );
checkCuda( hipMemcpy(d_mB, mB, size, hipMemcpyHostToDevice) );
int block_size=32;
dim3 dimBlock(block_size, block_size);
dim3 dimGrid(n_rows/block_size, n_cols/block_size);
hipLaunchKernelGGL(( matrix_mul_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, d_mA, d_mB, d_mP, n_rows, n_cols);
checkCuda( hipMemcpy(mP, d_mP, size, hipMemcpyDeviceToHost) );
checkCuda( hipFree(d_mA) );
checkCuda( hipFree(d_mB) );
checkCuda( hipFree(d_mP) );
}
void reduction_min_max(float *mR, float *mA, int n_len)
{
for(int i=0; i<n_len; i++)
for(int j=0; j<n_len; j++) {
mA[i*n_len+j] = mR[i] * mR[j];
}
}
int main(int argc, char *argv[])
{
int n_len=8192;
float *R = init_array(n_len, 1.0);
float *mA = init_matrix(n_len, n_len, 0.0);
float *mB = init_matrix(n_rows, n_cols, 3.0);
float *mP = init_matrix(n_rows, n_cols, 0.0);
matrix_mul_on_device(mA, mB, mP, n_rows, n_cols);
display_array(mA, 100);
display_array(mB, 100);
display_array(mP, 100);
free(mA);
free(mB);
free(mP);
}
| 6b9fcebdfa070ed51657414200706c8e3830a608.cu | /*
* A tutorial program for cuda programming. It implement algorithm of matrix multipling.
* Steven Liu
*
*/
#include <stdio.h>
#include <cuda.h>
#include <iostream>
#define checkCuda(ret) checkCuda_func( (cudaError_t)(ret), __FILE__, __LINE__ )
int pid = 0;
float * init_matrix(int n_rows, int n_cols, float default_val)
{
float *p;
int n_elems = n_rows*n_cols;
p = (float*)malloc(n_elems*sizeof(float));
for(int i=0; i < n_elems; i ++)
p[i] = default_val;
return p;
}
float * init_array(int n_len, float default_val)
{
p = (float*)malloc(n_len*sizeof(float));
for(int i=0; i < n_len; i ++)
p[i] = default_val;
return p;
}
__global__ void matrix_add_kernel(float* d_mA, float* d_mB, float *d_mP, int n_rows, int n_cols)
{
int tx = blockIdx.x * blockDim.x + threadIdx.x;
int ty = blockIdx.y * blockDim.y + threadIdx.y;
int idx = tx*n_rows + ty;
d_mP[idx] = d_mA[idx] + d_mB[idx];
}
__global__ void matrix_mul_kernel(float* d_mA, float* d_mB, float *d_mP, int n_rows, int n_cols)
{
int tx = blockIdx.x * blockDim.x + threadIdx.x;
int ty = blockIdx.y * blockDim.y + threadIdx.y;
float p_val = 0.0;
int idx = tx*n_rows + ty;
for(int k=0; k < n_rows; k++) {
p_val += d_mA[k*n_cols+tx] * d_mB[ty*n_cols+k];
}
d_mP[tx*n_rows+ty] = p_val;
}
void matrix_mul_on_device(float *mA, float *mB, float *mP, int n_rows, int n_cols)
{
int n_elems = n_rows*n_cols;
int size = n_elems*sizeof(float);
float *d_mA, *d_mB, *d_mP;
checkCuda( cudaMalloc(&d_mA, size) );
checkCuda( cudaMalloc(&d_mB, size) );
checkCuda( cudaMalloc(&d_mP, size) );
checkCuda( cudaMemcpy(d_mA, mA, size, cudaMemcpyHostToDevice) );
checkCuda( cudaMemcpy(d_mB, mB, size, cudaMemcpyHostToDevice) );
int block_size=32;
dim3 dimBlock(block_size, block_size);
dim3 dimGrid(n_rows/block_size, n_cols/block_size);
matrix_mul_kernel<<<dimGrid, dimBlock>>>(d_mA, d_mB, d_mP, n_rows, n_cols);
checkCuda( cudaMemcpy(mP, d_mP, size, cudaMemcpyDeviceToHost) );
checkCuda( cudaFree(d_mA) );
checkCuda( cudaFree(d_mB) );
checkCuda( cudaFree(d_mP) );
}
void reduction_min_max(float *mR, float *mA, int n_len)
{
for(int i=0; i<n_len; i++)
for(int j=0; j<n_len; j++) {
mA[i*n_len+j] = mR[i] * mR[j];
}
}
int main(int argc, char *argv[])
{
int n_len=8192;
float *R = init_array(n_len, 1.0);
float *mA = init_matrix(n_len, n_len, 0.0);
float *mB = init_matrix(n_rows, n_cols, 3.0);
float *mP = init_matrix(n_rows, n_cols, 0.0);
matrix_mul_on_device(mA, mB, mP, n_rows, n_cols);
display_array(mA, 100);
display_array(mB, 100);
display_array(mP, 100);
free(mA);
free(mB);
free(mP);
}
|
5fb5e3fed4e74799f7131030e675b9d1ce938d8c.hip | // !!! This is a file automatically generated by hipify!!!
/*
Copyright (c) 2012, MAURO BIANCO, UGO VARETTO, SWISS NATIONAL SUPERCOMPUTING CENTRE (CSCS)
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the Swiss National Supercomputing Centre (CSCS) nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL MAURO BIANCO, UGO VARETTO, OR
SWISS NATIONAL SUPERCOMPUTING CENTRE (CSCS), BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <mpi.h>
#include <iostream>
#include <sstream>
#include <fstream>
#include <halo_exchange.h>
#include <string>
#include <stdlib.h>
#include <utils/layout_map.h>
#include <utils/boollist.h>
#include <sys/time.h>
#include "triplet.h"
int pid;
int nprocs;
MPI_Comm CartComm;
int dims[3] = {0,0,0};
int coords[3]={0,0,0};
struct timeval start_tv;
struct timeval stop1_tv;
struct timeval stop2_tv;
struct timeval stop3_tv;
double lapse_time1;
double lapse_time2;
double lapse_time3;
double lapse_time4;
#define B_ADD 1
#define C_ADD 2
typedef GCL::gcl_gpu arch_type;
template <typename T, typename lmap>
struct array {
T *ptr;
int n,m,l;
array(T* _p, int _n, int _m, int _l)
: ptr(_p)
, n(lmap::template find<2>(_n,_m,_l))
, m(lmap::template find<1>(_n,_m,_l))
, l(lmap::template find<0>(_n,_m,_l))
{}
T &operator()(int i, int j, int k) {
// a[(DIM1+2*H)*(DIM2+2*H)*kk+ii*(DIM2+2*H)+jj]
return ptr[l*m*lmap::template find<2>(i,j,k)+
l*lmap::template find<1>(i,j,k)+
lmap::template find<0>(i,j,k)];
}
T const &operator()(int i, int j, int k) const {
return ptr[l*m*lmap::template find<2>(i,j,k)+
l*lmap::template find<1>(i,j,k)+
lmap::template find<0>(i,j,k)];
}
operator void*() const {return reinterpret_cast<void*>(ptr);}
operator T*() const {return ptr;}
};
/** \file Example of use of halo_exchange pattern for regular
grids. The comments in the code aim at highlight the process of
instantiating and running a halo exchange pattern.
*/
inline int modulus(int __i, int __j) {
return (((((__i%__j)<0)?(__j+__i%__j):(__i%__j))));
}
/* Just and utility to print values
*/
template <typename array_t>
void printbuff(std::ostream &file, array_t const & a, int d1, int d2, int d3) {
if (d1<=7 && d2<=7 && d3<=7) {
file << "------------\n";
for (int kk=0; kk<d3; ++kk) {
file << "|";
for (int jj=0; jj<d2; ++jj) {
for (int ii=0; ii<d1; ++ii) {
file << a(ii,jj,kk);
}
file << "|\n";
}
file << "\n\n";
}
file << "------------\n\n";
}
}
template <typename ST, int I1, int I2, int I3, bool per0, bool per1, bool per2>
void run(ST & file, int DIM1, int DIM2, int DIM3, int H1m, int H1p, int H2m, int H2p, int H3m, int H3p, triple_t<USE_DOUBLE> *_a, triple_t<USE_DOUBLE> *_b, triple_t<USE_DOUBLE> *_c) {
typedef GCL::layout_map<I1,I2,I3> layoutmap;
array<triple_t<USE_DOUBLE>, layoutmap > a(_a, (DIM1+H1m+H1p),(DIM2+H2m+H2p),(DIM3+H3m+H3p));
array<triple_t<USE_DOUBLE>, layoutmap > b(_b, (DIM1+H1m+H1p),(DIM2+H2m+H2p),(DIM3+H3m+H3p));
array<triple_t<USE_DOUBLE>, layoutmap > c(_c, (DIM1+H1m+H1p),(DIM2+H2m+H2p),(DIM3+H3m+H3p));
/* Just an initialization */
for (int ii=0; ii<DIM1+H1m+H1p; ++ii)
for (int jj=0; jj<DIM2+H2m+H2p; ++jj) {
for (int kk=0; kk<DIM3+H3m+H3p; ++kk) {
a(ii,jj,kk) = triple_t<USE_DOUBLE>();
b(ii,jj,kk) = triple_t<USE_DOUBLE>();
c(ii,jj,kk) = triple_t<USE_DOUBLE>();
}
}
/* The pattern type is defined with the layouts, data types and
number of dimensions.
The logical assumption done in the program is that 'i' is the
first dimension (rows), 'j' is the second, and 'k' is the
third. The first layout states that 'i' is the second dimension
in order of strides, while 'j' is the first and 'k' is the third
(just by looking at the initialization loops this shoule be
clear).
The second layout states that the first dimension in data ('i')
identify also the first dimension in the communicator. Logically,
moving on 'i' dimension from processot (p,q,r) will lead you
logically to processor (p+1,q,r). The other dimensions goes as
the others.
*/
typedef GCL::halo_exchange_generic<GCL::layout_map<0,1,2>, 3, arch_type, GCL::version_mpi_pack > pattern_type;
/* The pattern is now instantiated with the periodicities and the
communicator. The periodicity of the communicator is
irrelevant. Setting it to be periodic is the best choice, then
GCL can deal with any periodicity easily.
*/
pattern_type he(typename pattern_type::grid_type::period_type(per0, per1, per2), CartComm);
GCL::array<GCL::halo_descriptor,3> halo_dsc;
halo_dsc[0] = GCL::halo_descriptor(H1m, H1p, H1m, DIM1+H1m-1, DIM1+H1m+H1p);
halo_dsc[1] = GCL::halo_descriptor(H2m, H2p, H2m, DIM2+H2m-1, DIM2+H2m+H2p);
halo_dsc[2] = GCL::halo_descriptor(H3m, H3p, H3m, DIM3+H3m-1, DIM3+H3m+H3p);
GCL::field_on_the_fly<triple_t<USE_DOUBLE>::data_type, layoutmap, pattern_type::traits> field1(reinterpret_cast<triple_t<USE_DOUBLE>::data_type*>(a.ptr), halo_dsc);
GCL::field_on_the_fly<triple_t<USE_DOUBLE>::data_type, layoutmap, pattern_type::traits> field2(reinterpret_cast<triple_t<USE_DOUBLE>::data_type*>(b.ptr), halo_dsc);
GCL::field_on_the_fly<triple_t<USE_DOUBLE>::data_type, layoutmap, pattern_type::traits> field3(reinterpret_cast<triple_t<USE_DOUBLE>::data_type*>(c.ptr), halo_dsc);
/* Pattern is set up. This must be done only once per pattern. The
parameter must me greater or equal to the largest number of
arrays updated in a single step.
*/
//he.setup(100, halo_dsc, sizeof(double));
he.setup(3, GCL::field_on_the_fly<int,layoutmap, pattern_type::traits>(NULL,halo_dsc), sizeof(triple_t<USE_DOUBLE>)); // Estimates the size
file << "Proc: (" << coords[0] << ", " << coords[1] << ", " << coords[2] << ")\n";
for (int ii=H1m; ii<DIM1+H1m; ++ii)
for (int jj=H2m; jj<DIM2+H2m; ++jj)
for (int kk=H3m; kk<DIM3+H3m; ++kk) {
a(ii,jj,kk) =
triple_t<USE_DOUBLE>(ii-H1m+(DIM1)*coords[0],
jj-H2m+(DIM2)*coords[1],
kk-H3m+(DIM3)*coords[2]);
b(ii,jj,kk) =
triple_t<USE_DOUBLE>(ii-H1m+(DIM1)*coords[0]+B_ADD,
jj-H2m+(DIM2)*coords[1]+B_ADD,
kk-H3m+(DIM3)*coords[2]+B_ADD);
c(ii,jj,kk) =
triple_t<USE_DOUBLE>(ii-H1m+(DIM1)*coords[0]+C_ADD,
jj-H2m+(DIM2)*coords[1]+C_ADD,
kk-H3m+(DIM3)*coords[2]+C_ADD);
}
file << "A \n";
printbuff(file,a, DIM1+H1m+H1p, DIM2+H2m+H2p, DIM3+H3m+H3p);
file << "B \n";
printbuff(file,b, DIM1+H1m+H1p, DIM2+H2m+H2p, DIM3+H3m+H3p);
file << "C \n";
printbuff(file,c, DIM1+H1m+H1p, DIM2+H2m+H2p, DIM3+H3m+H3p);
file.flush();
file << "GPU GPU GPU GPU GPU GPU GPU GPU GPU GPU GPU GPU GPU GPU GPU GPU \n";
triple_t<USE_DOUBLE>::data_type* gpu_a = 0;
triple_t<USE_DOUBLE>::data_type* gpu_b = 0;
triple_t<USE_DOUBLE>::data_type* gpu_c = 0;
hipError_t status;
status = hipMalloc( &gpu_a, (DIM1+H1m+H1p)*(DIM2+H2m+H2p)*(DIM3+H3m+H3p)*sizeof(triple_t<USE_DOUBLE>::data_type));
if( !checkCudaStatus( status ) ) return;
status = hipMalloc( &gpu_b, (DIM1+H1m+H1p)*(DIM2+H2m+H2p)*(DIM3+H3m+H3p)*sizeof(triple_t<USE_DOUBLE>::data_type));
if( !checkCudaStatus( status ) ) return;
status = hipMalloc( &gpu_c, (DIM1+H1m+H1p)*(DIM2+H2m+H2p)*(DIM3+H3m+H3p)*sizeof(triple_t<USE_DOUBLE>::data_type));
if( !checkCudaStatus( status ) ) return;
status = hipMemcpy( gpu_a, a.ptr,
(DIM1+H1m+H1p)*(DIM2+H2m+H2p)*(DIM3+H3m+H3p)*sizeof(triple_t<USE_DOUBLE>),
hipMemcpyHostToDevice );
if( !checkCudaStatus( status ) ) return;
status = hipMemcpy( gpu_b, b.ptr,
(DIM1+H1m+H1p)*(DIM2+H2m+H2p)*(DIM3+H3m+H3p)*sizeof(triple_t<USE_DOUBLE>),
hipMemcpyHostToDevice );
if( !checkCudaStatus( status ) ) return;
status = hipMemcpy( gpu_c, c.ptr,
(DIM1+H1m+H1p)*(DIM2+H2m+H2p)*(DIM3+H3m+H3p)*sizeof(triple_t<USE_DOUBLE>),
hipMemcpyHostToDevice );
if( !checkCudaStatus( status ) ) return;
GCL::field_on_the_fly<triple_t<USE_DOUBLE>::data_type, layoutmap, pattern_type::traits> field1_gpu(gpu_a, halo_dsc);
GCL::field_on_the_fly<triple_t<USE_DOUBLE>::data_type, layoutmap, pattern_type::traits> field2_gpu(gpu_b, halo_dsc);
GCL::field_on_the_fly<triple_t<USE_DOUBLE>::data_type, layoutmap, pattern_type::traits> field3_gpu(gpu_c, halo_dsc);
std::vector<GCL::field_on_the_fly<triple_t<USE_DOUBLE>, layoutmap, pattern_type::traits> > vect(3);
//#define VECTOR_INTERFACE
#ifdef VECTOR_INTERFACE
vect[0] = field1_gpu;
vect[1] = field2_gpu;
vect[2] = field3_gpu;
// std::vector<triple_t<USE_DOUBLE>*> vect(3);
// vect[0] = gpu_a;
// vect[1] = gpu_b;
// vect[2] = gpu_c;
// /* This is self explanatory now
MPI_Barrier(MPI_COMM_WORLD);
gettimeofday(&start_tv, NULL);
he.pack(vect);
gettimeofday(&stop1_tv, NULL);
he.exchange();
gettimeofday(&stop2_tv, NULL);
he.unpack(vect);
gettimeofday(&stop3_tv, NULL);
#else
MPI_Barrier(MPI_COMM_WORLD);
gettimeofday(&start_tv, NULL);
he.pack(field1_gpu, field2_gpu, field3_gpu);
gettimeofday(&stop1_tv, NULL);
he.exchange();
gettimeofday(&stop2_tv, NULL);
he.unpack(field1_gpu, field2_gpu, field3_gpu);
gettimeofday(&stop3_tv, NULL);
#endif
lapse_time1 = ((static_cast<double>(stop1_tv.tv_sec)+1/1000000.0*static_cast<double>(stop1_tv.tv_usec)) - (static_cast<double>(start_tv.tv_sec)+1/1000000.0*static_cast<double>(start_tv.tv_usec))) * 1000.0;
lapse_time2 = ((static_cast<double>(stop2_tv.tv_sec)+1/1000000.0*static_cast<double>(stop2_tv.tv_usec)) - (static_cast<double>(stop1_tv.tv_sec)+1/1000000.0*static_cast<double>(stop1_tv.tv_usec))) * 1000.0;
lapse_time3 = ((static_cast<double>(stop3_tv.tv_sec)+1/1000000.0*static_cast<double>(stop3_tv.tv_usec)) - (static_cast<double>(stop2_tv.tv_sec)+1/1000000.0*static_cast<double>(stop2_tv.tv_usec))) * 1000.0;
lapse_time4 = ((static_cast<double>(stop3_tv.tv_sec)+1/1000000.0*static_cast<double>(stop3_tv.tv_usec)) - (static_cast<double>(start_tv.tv_sec)+1/1000000.0*static_cast<double>(start_tv.tv_usec))) * 1000.0;
MPI_Barrier(MPI_COMM_WORLD);
file << "TIME PACK: " << lapse_time1 << std::endl;
file << "TIME EXCH: " << lapse_time2 << std::endl;
file << "TIME UNPK: " << lapse_time3 << std::endl;
file << "TIME ALL : " << lapse_time1+lapse_time2+lapse_time3 << std::endl;
file << "TIME TOT : " << lapse_time4 << std::endl;
status = hipMemcpy( a.ptr, gpu_a,
(DIM1+H1m+H1p)*(DIM2+H2m+H2p)*(DIM3+H3m+H3p)*sizeof(triple_t<USE_DOUBLE>),
hipMemcpyDeviceToHost );
if( !checkCudaStatus( status ) ) return;
status = hipMemcpy( b.ptr, gpu_b,
(DIM1+H1m+H1p)*(DIM2+H2m+H2p)*(DIM3+H3m+H3p)*sizeof(triple_t<USE_DOUBLE>),
hipMemcpyDeviceToHost );
if( !checkCudaStatus( status ) ) return;
status = hipMemcpy( c.ptr, gpu_c,
(DIM1+H1m+H1p)*(DIM2+H2m+H2p)*(DIM3+H3m+H3p)*sizeof(triple_t<USE_DOUBLE>),
hipMemcpyDeviceToHost );
if( !checkCudaStatus( status ) ) return;
status = hipFree( gpu_a );
if( !checkCudaStatus( status ) ) return;
status = hipFree( gpu_b );
if( !checkCudaStatus( status ) ) return;
status = hipFree( gpu_c );
if( !checkCudaStatus( status ) ) return;
file << "\n********************************************************************************\n";
file << "A \n";
printbuff(file,a, DIM1+H1m+H1p, DIM2+H2m+H2p, DIM3+H3m+H3p);
file << "B \n";
printbuff(file,b, DIM1+H1m+H1p, DIM2+H2m+H2p, DIM3+H3m+H3p);
file << "C \n";
printbuff(file,c, DIM1+H1m+H1p, DIM2+H2m+H2p, DIM3+H3m+H3p);
file.flush();
file.flush();
int passed = true;
/* Checking the data arrived correctly in the whole region
*/
for (int ii=0; ii<DIM1+H1m+H1p; ++ii)
for (int jj=0; jj<DIM2+H2m+H2p; ++jj)
for (int kk=0; kk<DIM3+H3m+H3p; ++kk) {
triple_t<USE_DOUBLE> ta;
triple_t<USE_DOUBLE> tb;
triple_t<USE_DOUBLE> tc;
int tax, tay, taz;
int tbx, tby, tbz;
int tcx, tcy, tcz;
tax = modulus(ii-H1m+(DIM1)*coords[0], DIM1*dims[0]);
tbx = modulus(ii-H1m+(DIM1)*coords[0], DIM1*dims[0])+B_ADD;
tcx = modulus(ii-H1m+(DIM1)*coords[0], DIM1*dims[0])+C_ADD;
tay = modulus(jj-H2m+(DIM2)*coords[1], DIM2*dims[1]);
tby = modulus(jj-H2m+(DIM2)*coords[1], DIM2*dims[1])+B_ADD;
tcy = modulus(jj-H2m+(DIM2)*coords[1], DIM2*dims[1])+C_ADD;
taz = modulus(kk-H3m+(DIM3)*coords[2], DIM3*dims[2]);
tbz = modulus(kk-H3m+(DIM3)*coords[2], DIM3*dims[2])+B_ADD;
tcz = modulus(kk-H3m+(DIM3)*coords[2], DIM3*dims[2])+C_ADD;
if (!per0) {
if ( ((coords[0]==0) && (ii<H1m)) ||
((coords[0] == dims[0]-1) && (ii >= DIM1+H1m)) ) {
tax=triple_t<USE_DOUBLE>().x();
tbx=triple_t<USE_DOUBLE>().x();
tcx=triple_t<USE_DOUBLE>().x();
}
}
if (!per1) {
if ( ((coords[1]==0) && (jj<H2m)) ||
((coords[1] == dims[1]-1) && (jj >= DIM2+H2m)) ) {
tay=triple_t<USE_DOUBLE>().y();
tby=triple_t<USE_DOUBLE>().y();
tcy=triple_t<USE_DOUBLE>().y();
}
}
if (!per2) {
if ( ((coords[2]==0) && (kk<H3m)) ||
((coords[2] == dims[2]-1) && (kk >= DIM3+H3m)) ) {
taz=triple_t<USE_DOUBLE>().z();
tbz=triple_t<USE_DOUBLE>().z();
tcz=triple_t<USE_DOUBLE>().z();
}
}
ta = triple_t<USE_DOUBLE>(tax, tay, taz).floor();
tb = triple_t<USE_DOUBLE>(tbx, tby, tbz).floor();
tc = triple_t<USE_DOUBLE>(tcx, tcy, tcz).floor();
if (a(ii,jj,kk) != ta) {
passed = false;
file << ii << ", " << jj << ", " << kk << " values found != expct: "
<< "a " << a(ii,jj,kk) << " != "
<< ta
<< "\n";
}
if (b(ii,jj,kk) != tb) {
passed = false;
file << ii << ", " << jj << ", " << kk << " values found != expct: "
<< "b " << b(ii,jj,kk) << " != "
<< tb
<< "\n";
}
if (c(ii,jj,kk) != tc) {
passed = false;
file << ii << ", " << jj << ", " << kk << " values found != expct: "
<< "c " << c(ii,jj,kk) << " != "
<< tc
<< "\n";
}
}
if (passed)
file << "RESULT: PASSED!\n";
else
file << "RESULT: FAILED!\n";
}
#ifdef _GCL_GPU_
/* device_binding added by Devendar Bureddy, OSU */
void
device_binding ()
{
int local_rank=0/*, num_local_procs*/;
int dev_count, use_dev_count, my_dev_id;
char *str;
if ((str = getenv ("MV2_COMM_WORLD_LOCAL_RANK")) != NULL)
{
local_rank = atoi (str);
printf ("MV2_COMM_WORLD_LOCAL_RANK %s\n", str);
}
if ((str = getenv ("MPISPAWN_LOCAL_NPROCS")) != NULL)
{
//num_local_procs = atoi (str);
printf ("MPISPAWN_LOCAL_NPROCS %s\n", str);
}
hipGetDeviceCount (&dev_count);
if ((str = getenv ("NUM_GPU_DEVICES")) != NULL)
{
use_dev_count = atoi (str);
printf ("NUM_GPU_DEVICES %s\n", str);
}
else
{
use_dev_count = dev_count;
}
my_dev_id = local_rank % use_dev_count;
printf ("local rank = %d dev id = %d\n", local_rank, my_dev_id);
hipSetDevice (my_dev_id);
}
#endif
int main(int argc, char** argv) {
#ifdef _GCL_GPU_
device_binding();
#endif
/* this example is based on MPI Cart Communicators, so we need to
initialize MPI. This can be done by GCL automatically
*/
MPI_Init(&argc, &argv);
/* Now let us initialize GCL itself. If MPI is not initialized at
this point, it will initialize it
*/
GCL::GCL_Init(argc, argv);
/* Here we compute the computing gris as in many applications
*/
MPI_Comm_rank(MPI_COMM_WORLD, &pid);
MPI_Comm_size(MPI_COMM_WORLD, &nprocs);
std::cout << pid << " " << nprocs << "\n";
std::stringstream ss;
ss << pid;
std::string filename = "out" + ss.str() + ".txt";
std::cout << filename << std::endl;
std::ofstream file(filename.c_str());
file << pid << " " << nprocs << "\n";
MPI_Dims_create(nprocs, 3, dims);
int period[3] = {1, 1, 1};
file << "@" << pid << "@ MPI GRID SIZE " << dims[0] << " - " << dims[1] << " - " << dims[2] << "\n";
MPI_Cart_create(MPI_COMM_WORLD, 3, dims, period, false, &CartComm);
MPI_Cart_get(CartComm, 3, dims, period, coords);
/* Each process will hold a tile of size
(DIM1+2*H)x(DIM2+2*H)x(DIM3+2*H). The DIM1xDIM2xDIM3 area inside
the H width border is the inner region of an hypothetical stencil
computation whise halo width is H.
*/
int DIM1=atoi(argv[1]);
int DIM2=atoi(argv[2]);
int DIM3=atoi(argv[3]);
int H1m =atoi(argv[4]);
int H1p =atoi(argv[5]);
int H2m =atoi(argv[6]);
int H2p =atoi(argv[7]);
int H3m =atoi(argv[8]);
int H3p =atoi(argv[9]);
/* This example will exchange 3 data arrays at the same time with
different values.
*/
triple_t<USE_DOUBLE> *_a = new triple_t<USE_DOUBLE>[(DIM1+H1m+H1p)*(DIM2+H2m+H2p)*(DIM3+H3m+H3p)];
triple_t<USE_DOUBLE> *_b = new triple_t<USE_DOUBLE>[(DIM1+H1m+H1p)*(DIM2+H2m+H2p)*(DIM3+H3m+H3p)];
triple_t<USE_DOUBLE> *_c = new triple_t<USE_DOUBLE>[(DIM1+H1m+H1p)*(DIM2+H2m+H2p)*(DIM3+H3m+H3p)];
file << "Permutation 0,1,2\n";
file << "Permutation 0,1,2\n";
file << "run<std::ostream, 0,1,2, true, true, true>(file, DIM1, DIM2, DIM3, H1m, H1p, H2m, H2p, H3m, H3p, _a, _b, _c)\n";
run<std::ostream, 0,1,2, true, true, true>(file, DIM1, DIM2, DIM3, H1m, H1p, H2m, H2p, H3m, H3p, _a, _b, _c);
file << "run<std::ostream, 0,1,2, true, true, false>(file, DIM1, DIM2, DIM3, H1m, H1p, H2m, H2p, H3m, H3p, _a, _b, _c)\n";
run<std::ostream, 0,1,2, true, true, false>(file, DIM1, DIM2, DIM3, H1m, H1p, H2m, H2p, H3m, H3p, _a, _b, _c);
file << "run<std::ostream, 0,1,2, true, false, true>(file, DIM1, DIM2, DIM3, H1m, H1p, H2m, H2p, H3m, H3p, _a, _b, _c)\n";
run<std::ostream, 0,1,2, true, false, true>(file, DIM1, DIM2, DIM3, H1m, H1p, H2m, H2p, H3m, H3p, _a, _b, _c);
file << "run<std::ostream, 0,1,2, true, false, false>(file, DIM1, DIM2, DIM3, H1m, H1p, H2m, H2p, H3m, H3p, _a, _b, _c)\n";
run<std::ostream, 0,1,2, true, false, false>(file, DIM1, DIM2, DIM3, H1m, H1p, H2m, H2p, H3m, H3p, _a, _b, _c);
file << "run<std::ostream, 0,1,2, false, true, true>(file, DIM1, DIM2, DIM3, H1m, H1p, H2m, H2p, H3m, H3p, _a, _b, _c)\n";
run<std::ostream, 0,1,2, false, true, true>(file, DIM1, DIM2, DIM3, H1m, H1p, H2m, H2p, H3m, H3p, _a, _b, _c);
file << "run<std::ostream, 0,1,2, false, true, false>(file, DIM1, DIM2, DIM3, H1m, H1p, H2m, H2p, H3m, H3p, _a, _b, _c)\n";
run<std::ostream, 0,1,2, false, true, false>(file, DIM1, DIM2, DIM3, H1m, H1p, H2m, H2p, H3m, H3p, _a, _b, _c);
file << "run<std::ostream, 0,1,2, false, false, true>(file, DIM1, DIM2, DIM3, H1m, H1p, H2m, H2p, H3m, H3p, _a, _b, _c)\n";
run<std::ostream, 0,1,2, false, false, true>(file, DIM1, DIM2, DIM3, H1m, H1p, H2m, H2p, H3m, H3p, _a, _b, _c);
file << "run<std::ostream, 0,1,2, false, false, false>(file, DIM1, DIM2, DIM3, H1m, H1p, H2m, H2p, H3m, H3p, _a, _b, _c)\n";
run<std::ostream, 0,1,2, false, false, false>(file, DIM1, DIM2, DIM3, H1m, H1p, H2m, H2p, H3m, H3p, _a, _b, _c);
file << "---------------------------------------------------\n";
file << "Permutation 0,2,1\n";
file << "run<std::ostream, 0,2,1, true, true, true>(file, DIM1, DIM2, DIM3, H1m, H1p, H2m, H2p, H3m, H3p, _a, _b, _c)\n";
run<std::ostream, 0,2,1, true, true, true>(file, DIM1, DIM2, DIM3, H1m, H1p, H2m, H2p, H3m, H3p, _a, _b, _c);
file << "run<std::ostream, 0,2,1, true, true, false>(file, DIM1, DIM2, DIM3, H1m, H1p, H2m, H2p, H3m, H3p, _a, _b, _c)\n";
run<std::ostream, 0,2,1, true, true, false>(file, DIM1, DIM2, DIM3, H1m, H1p, H2m, H2p, H3m, H3p, _a, _b, _c);
file << "run<std::ostream, 0,2,1, true, false, true>(file, DIM1, DIM2, DIM3, H1m, H1p, H2m, H2p, H3m, H3p, _a, _b, _c)\n";
run<std::ostream, 0,2,1, true, false, true>(file, DIM1, DIM2, DIM3, H1m, H1p, H2m, H2p, H3m, H3p, _a, _b, _c);
file << "run<std::ostream, 0,2,1, true, false, false>(file, DIM1, DIM2, DIM3, H1m, H1p, H2m, H2p, H3m, H3p, _a, _b, _c)\n";
run<std::ostream, 0,2,1, true, false, false>(file, DIM1, DIM2, DIM3, H1m, H1p, H2m, H2p, H3m, H3p, _a, _b, _c);
file << "run<std::ostream, 0,2,1, false, true, true>(file, DIM1, DIM2, DIM3, H1m, H1p, H2m, H2p, H3m, H3p, _a, _b, _c)\n";
run<std::ostream, 0,2,1, false, true, true>(file, DIM1, DIM2, DIM3, H1m, H1p, H2m, H2p, H3m, H3p, _a, _b, _c);
file << "run<std::ostream, 0,2,1, false, true, false>(file, DIM1, DIM2, DIM3, H1m, H1p, H2m, H2p, H3m, H3p, _a, _b, _c)\n";
run<std::ostream, 0,2,1, false, true, false>(file, DIM1, DIM2, DIM3, H1m, H1p, H2m, H2p, H3m, H3p, _a, _b, _c);
file << "run<std::ostream, 0,2,1, false, false, true>(file, DIM1, DIM2, DIM3, H1m, H1p, H2m, H2p, H3m, H3p, _a, _b, _c)\n";
run<std::ostream, 0,2,1, false, false, true>(file, DIM1, DIM2, DIM3, H1m, H1p, H2m, H2p, H3m, H3p, _a, _b, _c);
file << "run<std::ostream, 0,2,1, false, false, false>(file, DIM1, DIM2, DIM3, H1m, H1p, H2m, H2p, H3m, H3p, _a, _b, _c)\n";
run<std::ostream, 0,2,1, false, false, false>(file, DIM1, DIM2, DIM3, H1m, H1p, H2m, H2p, H3m, H3p, _a, _b, _c);
file << "---------------------------------------------------\n";
file << "Permutation 1,0,2\n";
file << "run<std::ostream, 1,0,2, true, true, true>(file, DIM1, DIM2, DIM3, H1m, H1p, H2m, H2p, H3m, H3p, _a, _b, _c)\n";
run<std::ostream, 1,0,2, true, true, true>(file, DIM1, DIM2, DIM3, H1m, H1p, H2m, H2p, H3m, H3p, _a, _b, _c);
file << "run<std::ostream, 1,0,2, true, true, false>(file, DIM1, DIM2, DIM3, H1m, H1p, H2m, H2p, H3m, H3p, _a, _b, _c)\n";
run<std::ostream, 1,0,2, true, true, false>(file, DIM1, DIM2, DIM3, H1m, H1p, H2m, H2p, H3m, H3p, _a, _b, _c);
file << "run<std::ostream, 1,0,2, true, false, true>(file, DIM1, DIM2, DIM3, H1m, H1p, H2m, H2p, H3m, H3p, _a, _b, _c)\n";
run<std::ostream, 1,0,2, true, false, true>(file, DIM1, DIM2, DIM3, H1m, H1p, H2m, H2p, H3m, H3p, _a, _b, _c);
file << "run<std::ostream, 1,0,2, true, false, false>(file, DIM1, DIM2, DIM3, H1m, H1p, H2m, H2p, H3m, H3p, _a, _b, _c)\n";
run<std::ostream, 1,0,2, true, false, false>(file, DIM1, DIM2, DIM3, H1m, H1p, H2m, H2p, H3m, H3p, _a, _b, _c);
file << "run<std::ostream, 1,0,2, false, true, true>(file, DIM1, DIM2, DIM3, H1m, H1p, H2m, H2p, H3m, H3p, _a, _b, _c)\n";
run<std::ostream, 1,0,2, false, true, true>(file, DIM1, DIM2, DIM3, H1m, H1p, H2m, H2p, H3m, H3p, _a, _b, _c);
file << "run<std::ostream, 1,0,2, false, true, false>(file, DIM1, DIM2, DIM3, H1m, H1p, H2m, H2p, H3m, H3p, _a, _b, _c)\n";
run<std::ostream, 1,0,2, false, true, false>(file, DIM1, DIM2, DIM3, H1m, H1p, H2m, H2p, H3m, H3p, _a, _b, _c);
file << "run<std::ostream, 1,0,2, false, false, true>(file, DIM1, DIM2, DIM3, H1m, H1p, H2m, H2p, H3m, H3p, _a, _b, _c)\n";
run<std::ostream, 1,0,2, false, false, true>(file, DIM1, DIM2, DIM3, H1m, H1p, H2m, H2p, H3m, H3p, _a, _b, _c);
file << "run<std::ostream, 1,0,2, false, false, false>(file, DIM1, DIM2, DIM3, H1m, H1p, H2m, H2p, H3m, H3p, _a, _b, _c)\n";
run<std::ostream, 1,0,2, false, false, false>(file, DIM1, DIM2, DIM3, H1m, H1p, H2m, H2p, H3m, H3p, _a, _b, _c);
file << "---------------------------------------------------\n";
file << "Permutation 1,2,0\n";
file << "run<std::ostream, 1,2,0, true, true, true>(file, DIM1, DIM2, DIM3, H1m, H1p, H2m, H2p, H3m, H3p, _a, _b, _c)\n";
run<std::ostream, 1,2,0, true, true, true>(file, DIM1, DIM2, DIM3, H1m, H1p, H2m, H2p, H3m, H3p, _a, _b, _c);
file << "run<std::ostream, 1,2,0, true, true, false>(file, DIM1, DIM2, DIM3, H1m, H1p, H2m, H2p, H3m, H3p, _a, _b, _c)\n";
run<std::ostream, 1,2,0, true, true, false>(file, DIM1, DIM2, DIM3, H1m, H1p, H2m, H2p, H3m, H3p, _a, _b, _c);
file << "run<std::ostream, 1,2,0, true, false, true>(file, DIM1, DIM2, DIM3, H1m, H1p, H2m, H2p, H3m, H3p, _a, _b, _c)\n";
run<std::ostream, 1,2,0, true, false, true>(file, DIM1, DIM2, DIM3, H1m, H1p, H2m, H2p, H3m, H3p, _a, _b, _c);
file << "run<std::ostream, 1,2,0, true, false, false>(file, DIM1, DIM2, DIM3, H1m, H1p, H2m, H2p, H3m, H3p, _a, _b, _c)\n";
run<std::ostream, 1,2,0, true, false, false>(file, DIM1, DIM2, DIM3, H1m, H1p, H2m, H2p, H3m, H3p, _a, _b, _c);
file << "run<std::ostream, 1,2,0, false, true, true>(file, DIM1, DIM2, DIM3, H1m, H1p, H2m, H2p, H3m, H3p, _a, _b, _c)\n";
run<std::ostream, 1,2,0, false, true, true>(file, DIM1, DIM2, DIM3, H1m, H1p, H2m, H2p, H3m, H3p, _a, _b, _c);
file << "run<std::ostream, 1,2,0, false, true, false>(file, DIM1, DIM2, DIM3, H1m, H1p, H2m, H2p, H3m, H3p, _a, _b, _c)\n";
run<std::ostream, 1,2,0, false, true, false>(file, DIM1, DIM2, DIM3, H1m, H1p, H2m, H2p, H3m, H3p, _a, _b, _c);
file << "run<std::ostream, 1,2,0, false, false, true>(file, DIM1, DIM2, DIM3, H1m, H1p, H2m, H2p, H3m, H3p, _a, _b, _c)\n";
run<std::ostream, 1,2,0, false, false, true>(file, DIM1, DIM2, DIM3, H1m, H1p, H2m, H2p, H3m, H3p, _a, _b, _c);
file << "run<std::ostream, 1,2,0, false, false, false>(file, DIM1, DIM2, DIM3, H1m, H1p, H2m, H2p, H3m, H3p, _a, _b, _c)\n";
run<std::ostream, 1,2,0, false, false, false>(file, DIM1, DIM2, DIM3, H1m, H1p, H2m, H2p, H3m, H3p, _a, _b, _c);
file << "---------------------------------------------------\n";
file << "Permutation 2,0,1\n";
file << "run<std::ostream, 2,0,1, true, true, true>(file, DIM1, DIM2, DIM3, H1m, H1p, H2m, H2p, H3m, H3p, _a, _b, _c)\n";
run<std::ostream, 2,0,1, true, true, true>(file, DIM1, DIM2, DIM3, H1m, H1p, H2m, H2p, H3m, H3p, _a, _b, _c);
file << "run<std::ostream, 2,0,1, true, true, false>(file, DIM1, DIM2, DIM3, H1m, H1p, H2m, H2p, H3m, H3p, _a, _b, _c)\n";
run<std::ostream, 2,0,1, true, true, false>(file, DIM1, DIM2, DIM3, H1m, H1p, H2m, H2p, H3m, H3p, _a, _b, _c);
file << "run<std::ostream, 2,0,1, true, false, true>(file, DIM1, DIM2, DIM3, H1m, H1p, H2m, H2p, H3m, H3p, _a, _b, _c)\n";
run<std::ostream, 2,0,1, true, false, true>(file, DIM1, DIM2, DIM3, H1m, H1p, H2m, H2p, H3m, H3p, _a, _b, _c);
file << "run<std::ostream, 2,0,1, true, false, false>(file, DIM1, DIM2, DIM3, H1m, H1p, H2m, H2p, H3m, H3p, _a, _b, _c)\n";
run<std::ostream, 2,0,1, true, false, false>(file, DIM1, DIM2, DIM3, H1m, H1p, H2m, H2p, H3m, H3p, _a, _b, _c);
file << "run<std::ostream, 2,0,1, false, true, true>(file, DIM1, DIM2, DIM3, H1m, H1p, H2m, H2p, H3m, H3p, _a, _b, _c)\n";
run<std::ostream, 2,0,1, false, true, true>(file, DIM1, DIM2, DIM3, H1m, H1p, H2m, H2p, H3m, H3p, _a, _b, _c);
file << "run<std::ostream, 2,0,1, false, true, false>(file, DIM1, DIM2, DIM3, H1m, H1p, H2m, H2p, H3m, H3p, _a, _b, _c)\n";
run<std::ostream, 2,0,1, false, true, false>(file, DIM1, DIM2, DIM3, H1m, H1p, H2m, H2p, H3m, H3p, _a, _b, _c);
file << "run<std::ostream, 2,0,1, false, false, true>(file, DIM1, DIM2, DIM3, H1m, H1p, H2m, H2p, H3m, H3p, _a, _b, _c)\n";
run<std::ostream, 2,0,1, false, false, true>(file, DIM1, DIM2, DIM3, H1m, H1p, H2m, H2p, H3m, H3p, _a, _b, _c);
file << "run<std::ostream, 2,0,1, false, false, false>(file, DIM1, DIM2, DIM3, H1m, H1p, H2m, H2p, H3m, H3p, _a, _b, _c)\n";
run<std::ostream, 2,0,1, false, false, false>(file, DIM1, DIM2, DIM3, H1m, H1p, H2m, H2p, H3m, H3p, _a, _b, _c);
file << "---------------------------------------------------\n";
file << "Permutation 2,1,0\n";
file << "run<std::ostream, 2,1,0, true, true, true>(file, DIM1, DIM2, DIM3, H1m, H1p, H2m, H2p, H3m, H3p, _a, _b, _c)\n";
run<std::ostream, 2,1,0, true, true, true>(file, DIM1, DIM2, DIM3, H1m, H1p, H2m, H2p, H3m, H3p, _a, _b, _c);
file << "run<std::ostream, 2,1,0, true, true, false>(file, DIM1, DIM2, DIM3, H1m, H1p, H2m, H2p, H3m, H3p, _a, _b, _c)\n";
run<std::ostream, 2,1,0, true, true, false>(file, DIM1, DIM2, DIM3, H1m, H1p, H2m, H2p, H3m, H3p, _a, _b, _c);
file << "run<std::ostream, 2,1,0, true, false, true>(file, DIM1, DIM2, DIM3, H1m, H1p, H2m, H2p, H3m, H3p, _a, _b, _c)\n";
run<std::ostream, 2,1,0, true, false, true>(file, DIM1, DIM2, DIM3, H1m, H1p, H2m, H2p, H3m, H3p, _a, _b, _c);
file << "run<std::ostream, 2,1,0, true, false, false>(file, DIM1, DIM2, DIM3, H1m, H1p, H2m, H2p, H3m, H3p, _a, _b, _c)\n";
run<std::ostream, 2,1,0, true, false, false>(file, DIM1, DIM2, DIM3, H1m, H1p, H2m, H2p, H3m, H3p, _a, _b, _c);
file << "run<std::ostream, 2,1,0, false, true, true>(file, DIM1, DIM2, DIM3, H1m, H1p, H2m, H2p, H3m, H3p, _a, _b, _c)\n";
run<std::ostream, 2,1,0, false, true, true>(file, DIM1, DIM2, DIM3, H1m, H1p, H2m, H2p, H3m, H3p, _a, _b, _c);
file << "run<std::ostream, 2,1,0, false, true, false>(file, DIM1, DIM2, DIM3, H1m, H1p, H2m, H2p, H3m, H3p, _a, _b, _c)\n";
run<std::ostream, 2,1,0, false, true, false>(file, DIM1, DIM2, DIM3, H1m, H1p, H2m, H2p, H3m, H3p, _a, _b, _c);
file << "run<std::ostream, 2,1,0, false, false, true>(file, DIM1, DIM2, DIM3, H1m, H1p, H2m, H2p, H3m, H3p, _a, _b, _c)\n";
run<std::ostream, 2,1,0, false, false, true>(file, DIM1, DIM2, DIM3, H1m, H1p, H2m, H2p, H3m, H3p, _a, _b, _c);
file << "run<std::ostream, 2,1,0, false, false, false>(file, DIM1, DIM2, DIM3, H1m, H1p, H2m, H2p, H3m, H3p, _a, _b, _c)\n";
run<std::ostream, 2,1,0, false, false, false>(file, DIM1, DIM2, DIM3, H1m, H1p, H2m, H2p, H3m, H3p, _a, _b, _c);
file << "---------------------------------------------------\n";
MPI_Barrier(MPI_COMM_WORLD);
MPI_Finalize();
return 0;
}
| 5fb5e3fed4e74799f7131030e675b9d1ce938d8c.cu |
/*
Copyright (c) 2012, MAURO BIANCO, UGO VARETTO, SWISS NATIONAL SUPERCOMPUTING CENTRE (CSCS)
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the Swiss National Supercomputing Centre (CSCS) nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL MAURO BIANCO, UGO VARETTO, OR
SWISS NATIONAL SUPERCOMPUTING CENTRE (CSCS), BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <mpi.h>
#include <iostream>
#include <sstream>
#include <fstream>
#include <halo_exchange.h>
#include <string>
#include <stdlib.h>
#include <utils/layout_map.h>
#include <utils/boollist.h>
#include <sys/time.h>
#include "triplet.h"
int pid;
int nprocs;
MPI_Comm CartComm;
int dims[3] = {0,0,0};
int coords[3]={0,0,0};
struct timeval start_tv;
struct timeval stop1_tv;
struct timeval stop2_tv;
struct timeval stop3_tv;
double lapse_time1;
double lapse_time2;
double lapse_time3;
double lapse_time4;
#define B_ADD 1
#define C_ADD 2
typedef GCL::gcl_gpu arch_type;
template <typename T, typename lmap>
struct array {
T *ptr;
int n,m,l;
array(T* _p, int _n, int _m, int _l)
: ptr(_p)
, n(lmap::template find<2>(_n,_m,_l))
, m(lmap::template find<1>(_n,_m,_l))
, l(lmap::template find<0>(_n,_m,_l))
{}
T &operator()(int i, int j, int k) {
// a[(DIM1+2*H)*(DIM2+2*H)*kk+ii*(DIM2+2*H)+jj]
return ptr[l*m*lmap::template find<2>(i,j,k)+
l*lmap::template find<1>(i,j,k)+
lmap::template find<0>(i,j,k)];
}
T const &operator()(int i, int j, int k) const {
return ptr[l*m*lmap::template find<2>(i,j,k)+
l*lmap::template find<1>(i,j,k)+
lmap::template find<0>(i,j,k)];
}
operator void*() const {return reinterpret_cast<void*>(ptr);}
operator T*() const {return ptr;}
};
/** \file Example of use of halo_exchange pattern for regular
grids. The comments in the code aim at highlight the process of
instantiating and running a halo exchange pattern.
*/
inline int modulus(int __i, int __j) {
return (((((__i%__j)<0)?(__j+__i%__j):(__i%__j))));
}
/* Just and utility to print values
*/
template <typename array_t>
void printbuff(std::ostream &file, array_t const & a, int d1, int d2, int d3) {
if (d1<=7 && d2<=7 && d3<=7) {
file << "------------\n";
for (int kk=0; kk<d3; ++kk) {
file << "|";
for (int jj=0; jj<d2; ++jj) {
for (int ii=0; ii<d1; ++ii) {
file << a(ii,jj,kk);
}
file << "|\n";
}
file << "\n\n";
}
file << "------------\n\n";
}
}
template <typename ST, int I1, int I2, int I3, bool per0, bool per1, bool per2>
void run(ST & file, int DIM1, int DIM2, int DIM3, int H1m, int H1p, int H2m, int H2p, int H3m, int H3p, triple_t<USE_DOUBLE> *_a, triple_t<USE_DOUBLE> *_b, triple_t<USE_DOUBLE> *_c) {
typedef GCL::layout_map<I1,I2,I3> layoutmap;
array<triple_t<USE_DOUBLE>, layoutmap > a(_a, (DIM1+H1m+H1p),(DIM2+H2m+H2p),(DIM3+H3m+H3p));
array<triple_t<USE_DOUBLE>, layoutmap > b(_b, (DIM1+H1m+H1p),(DIM2+H2m+H2p),(DIM3+H3m+H3p));
array<triple_t<USE_DOUBLE>, layoutmap > c(_c, (DIM1+H1m+H1p),(DIM2+H2m+H2p),(DIM3+H3m+H3p));
/* Just an initialization */
for (int ii=0; ii<DIM1+H1m+H1p; ++ii)
for (int jj=0; jj<DIM2+H2m+H2p; ++jj) {
for (int kk=0; kk<DIM3+H3m+H3p; ++kk) {
a(ii,jj,kk) = triple_t<USE_DOUBLE>();
b(ii,jj,kk) = triple_t<USE_DOUBLE>();
c(ii,jj,kk) = triple_t<USE_DOUBLE>();
}
}
/* The pattern type is defined with the layouts, data types and
number of dimensions.
The logical assumption done in the program is that 'i' is the
first dimension (rows), 'j' is the second, and 'k' is the
third. The first layout states that 'i' is the second dimension
in order of strides, while 'j' is the first and 'k' is the third
(just by looking at the initialization loops this shoule be
clear).
The second layout states that the first dimension in data ('i')
identify also the first dimension in the communicator. Logically,
moving on 'i' dimension from processot (p,q,r) will lead you
logically to processor (p+1,q,r). The other dimensions goes as
the others.
*/
typedef GCL::halo_exchange_generic<GCL::layout_map<0,1,2>, 3, arch_type, GCL::version_mpi_pack > pattern_type;
/* The pattern is now instantiated with the periodicities and the
communicator. The periodicity of the communicator is
irrelevant. Setting it to be periodic is the best choice, then
GCL can deal with any periodicity easily.
*/
pattern_type he(typename pattern_type::grid_type::period_type(per0, per1, per2), CartComm);
GCL::array<GCL::halo_descriptor,3> halo_dsc;
halo_dsc[0] = GCL::halo_descriptor(H1m, H1p, H1m, DIM1+H1m-1, DIM1+H1m+H1p);
halo_dsc[1] = GCL::halo_descriptor(H2m, H2p, H2m, DIM2+H2m-1, DIM2+H2m+H2p);
halo_dsc[2] = GCL::halo_descriptor(H3m, H3p, H3m, DIM3+H3m-1, DIM3+H3m+H3p);
GCL::field_on_the_fly<triple_t<USE_DOUBLE>::data_type, layoutmap, pattern_type::traits> field1(reinterpret_cast<triple_t<USE_DOUBLE>::data_type*>(a.ptr), halo_dsc);
GCL::field_on_the_fly<triple_t<USE_DOUBLE>::data_type, layoutmap, pattern_type::traits> field2(reinterpret_cast<triple_t<USE_DOUBLE>::data_type*>(b.ptr), halo_dsc);
GCL::field_on_the_fly<triple_t<USE_DOUBLE>::data_type, layoutmap, pattern_type::traits> field3(reinterpret_cast<triple_t<USE_DOUBLE>::data_type*>(c.ptr), halo_dsc);
/* Pattern is set up. This must be done only once per pattern. The
parameter must me greater or equal to the largest number of
arrays updated in a single step.
*/
//he.setup(100, halo_dsc, sizeof(double));
he.setup(3, GCL::field_on_the_fly<int,layoutmap, pattern_type::traits>(NULL,halo_dsc), sizeof(triple_t<USE_DOUBLE>)); // Estimates the size
file << "Proc: (" << coords[0] << ", " << coords[1] << ", " << coords[2] << ")\n";
for (int ii=H1m; ii<DIM1+H1m; ++ii)
for (int jj=H2m; jj<DIM2+H2m; ++jj)
for (int kk=H3m; kk<DIM3+H3m; ++kk) {
a(ii,jj,kk) =
triple_t<USE_DOUBLE>(ii-H1m+(DIM1)*coords[0],
jj-H2m+(DIM2)*coords[1],
kk-H3m+(DIM3)*coords[2]);
b(ii,jj,kk) =
triple_t<USE_DOUBLE>(ii-H1m+(DIM1)*coords[0]+B_ADD,
jj-H2m+(DIM2)*coords[1]+B_ADD,
kk-H3m+(DIM3)*coords[2]+B_ADD);
c(ii,jj,kk) =
triple_t<USE_DOUBLE>(ii-H1m+(DIM1)*coords[0]+C_ADD,
jj-H2m+(DIM2)*coords[1]+C_ADD,
kk-H3m+(DIM3)*coords[2]+C_ADD);
}
file << "A \n";
printbuff(file,a, DIM1+H1m+H1p, DIM2+H2m+H2p, DIM3+H3m+H3p);
file << "B \n";
printbuff(file,b, DIM1+H1m+H1p, DIM2+H2m+H2p, DIM3+H3m+H3p);
file << "C \n";
printbuff(file,c, DIM1+H1m+H1p, DIM2+H2m+H2p, DIM3+H3m+H3p);
file.flush();
file << "GPU GPU GPU GPU GPU GPU GPU GPU GPU GPU GPU GPU GPU GPU GPU GPU \n";
triple_t<USE_DOUBLE>::data_type* gpu_a = 0;
triple_t<USE_DOUBLE>::data_type* gpu_b = 0;
triple_t<USE_DOUBLE>::data_type* gpu_c = 0;
cudaError_t status;
status = cudaMalloc( &gpu_a, (DIM1+H1m+H1p)*(DIM2+H2m+H2p)*(DIM3+H3m+H3p)*sizeof(triple_t<USE_DOUBLE>::data_type));
if( !checkCudaStatus( status ) ) return;
status = cudaMalloc( &gpu_b, (DIM1+H1m+H1p)*(DIM2+H2m+H2p)*(DIM3+H3m+H3p)*sizeof(triple_t<USE_DOUBLE>::data_type));
if( !checkCudaStatus( status ) ) return;
status = cudaMalloc( &gpu_c, (DIM1+H1m+H1p)*(DIM2+H2m+H2p)*(DIM3+H3m+H3p)*sizeof(triple_t<USE_DOUBLE>::data_type));
if( !checkCudaStatus( status ) ) return;
status = cudaMemcpy( gpu_a, a.ptr,
(DIM1+H1m+H1p)*(DIM2+H2m+H2p)*(DIM3+H3m+H3p)*sizeof(triple_t<USE_DOUBLE>),
cudaMemcpyHostToDevice );
if( !checkCudaStatus( status ) ) return;
status = cudaMemcpy( gpu_b, b.ptr,
(DIM1+H1m+H1p)*(DIM2+H2m+H2p)*(DIM3+H3m+H3p)*sizeof(triple_t<USE_DOUBLE>),
cudaMemcpyHostToDevice );
if( !checkCudaStatus( status ) ) return;
status = cudaMemcpy( gpu_c, c.ptr,
(DIM1+H1m+H1p)*(DIM2+H2m+H2p)*(DIM3+H3m+H3p)*sizeof(triple_t<USE_DOUBLE>),
cudaMemcpyHostToDevice );
if( !checkCudaStatus( status ) ) return;
GCL::field_on_the_fly<triple_t<USE_DOUBLE>::data_type, layoutmap, pattern_type::traits> field1_gpu(gpu_a, halo_dsc);
GCL::field_on_the_fly<triple_t<USE_DOUBLE>::data_type, layoutmap, pattern_type::traits> field2_gpu(gpu_b, halo_dsc);
GCL::field_on_the_fly<triple_t<USE_DOUBLE>::data_type, layoutmap, pattern_type::traits> field3_gpu(gpu_c, halo_dsc);
std::vector<GCL::field_on_the_fly<triple_t<USE_DOUBLE>, layoutmap, pattern_type::traits> > vect(3);
//#define VECTOR_INTERFACE
#ifdef VECTOR_INTERFACE
vect[0] = field1_gpu;
vect[1] = field2_gpu;
vect[2] = field3_gpu;
// std::vector<triple_t<USE_DOUBLE>*> vect(3);
// vect[0] = gpu_a;
// vect[1] = gpu_b;
// vect[2] = gpu_c;
// /* This is self explanatory now
MPI_Barrier(MPI_COMM_WORLD);
gettimeofday(&start_tv, NULL);
he.pack(vect);
gettimeofday(&stop1_tv, NULL);
he.exchange();
gettimeofday(&stop2_tv, NULL);
he.unpack(vect);
gettimeofday(&stop3_tv, NULL);
#else
MPI_Barrier(MPI_COMM_WORLD);
gettimeofday(&start_tv, NULL);
he.pack(field1_gpu, field2_gpu, field3_gpu);
gettimeofday(&stop1_tv, NULL);
he.exchange();
gettimeofday(&stop2_tv, NULL);
he.unpack(field1_gpu, field2_gpu, field3_gpu);
gettimeofday(&stop3_tv, NULL);
#endif
lapse_time1 = ((static_cast<double>(stop1_tv.tv_sec)+1/1000000.0*static_cast<double>(stop1_tv.tv_usec)) - (static_cast<double>(start_tv.tv_sec)+1/1000000.0*static_cast<double>(start_tv.tv_usec))) * 1000.0;
lapse_time2 = ((static_cast<double>(stop2_tv.tv_sec)+1/1000000.0*static_cast<double>(stop2_tv.tv_usec)) - (static_cast<double>(stop1_tv.tv_sec)+1/1000000.0*static_cast<double>(stop1_tv.tv_usec))) * 1000.0;
lapse_time3 = ((static_cast<double>(stop3_tv.tv_sec)+1/1000000.0*static_cast<double>(stop3_tv.tv_usec)) - (static_cast<double>(stop2_tv.tv_sec)+1/1000000.0*static_cast<double>(stop2_tv.tv_usec))) * 1000.0;
lapse_time4 = ((static_cast<double>(stop3_tv.tv_sec)+1/1000000.0*static_cast<double>(stop3_tv.tv_usec)) - (static_cast<double>(start_tv.tv_sec)+1/1000000.0*static_cast<double>(start_tv.tv_usec))) * 1000.0;
MPI_Barrier(MPI_COMM_WORLD);
file << "TIME PACK: " << lapse_time1 << std::endl;
file << "TIME EXCH: " << lapse_time2 << std::endl;
file << "TIME UNPK: " << lapse_time3 << std::endl;
file << "TIME ALL : " << lapse_time1+lapse_time2+lapse_time3 << std::endl;
file << "TIME TOT : " << lapse_time4 << std::endl;
status = cudaMemcpy( a.ptr, gpu_a,
(DIM1+H1m+H1p)*(DIM2+H2m+H2p)*(DIM3+H3m+H3p)*sizeof(triple_t<USE_DOUBLE>),
cudaMemcpyDeviceToHost );
if( !checkCudaStatus( status ) ) return;
status = cudaMemcpy( b.ptr, gpu_b,
(DIM1+H1m+H1p)*(DIM2+H2m+H2p)*(DIM3+H3m+H3p)*sizeof(triple_t<USE_DOUBLE>),
cudaMemcpyDeviceToHost );
if( !checkCudaStatus( status ) ) return;
status = cudaMemcpy( c.ptr, gpu_c,
(DIM1+H1m+H1p)*(DIM2+H2m+H2p)*(DIM3+H3m+H3p)*sizeof(triple_t<USE_DOUBLE>),
cudaMemcpyDeviceToHost );
if( !checkCudaStatus( status ) ) return;
status = cudaFree( gpu_a );
if( !checkCudaStatus( status ) ) return;
status = cudaFree( gpu_b );
if( !checkCudaStatus( status ) ) return;
status = cudaFree( gpu_c );
if( !checkCudaStatus( status ) ) return;
file << "\n********************************************************************************\n";
file << "A \n";
printbuff(file,a, DIM1+H1m+H1p, DIM2+H2m+H2p, DIM3+H3m+H3p);
file << "B \n";
printbuff(file,b, DIM1+H1m+H1p, DIM2+H2m+H2p, DIM3+H3m+H3p);
file << "C \n";
printbuff(file,c, DIM1+H1m+H1p, DIM2+H2m+H2p, DIM3+H3m+H3p);
file.flush();
file.flush();
int passed = true;
/* Checking the data arrived correctly in the whole region
*/
for (int ii=0; ii<DIM1+H1m+H1p; ++ii)
for (int jj=0; jj<DIM2+H2m+H2p; ++jj)
for (int kk=0; kk<DIM3+H3m+H3p; ++kk) {
triple_t<USE_DOUBLE> ta;
triple_t<USE_DOUBLE> tb;
triple_t<USE_DOUBLE> tc;
int tax, tay, taz;
int tbx, tby, tbz;
int tcx, tcy, tcz;
tax = modulus(ii-H1m+(DIM1)*coords[0], DIM1*dims[0]);
tbx = modulus(ii-H1m+(DIM1)*coords[0], DIM1*dims[0])+B_ADD;
tcx = modulus(ii-H1m+(DIM1)*coords[0], DIM1*dims[0])+C_ADD;
tay = modulus(jj-H2m+(DIM2)*coords[1], DIM2*dims[1]);
tby = modulus(jj-H2m+(DIM2)*coords[1], DIM2*dims[1])+B_ADD;
tcy = modulus(jj-H2m+(DIM2)*coords[1], DIM2*dims[1])+C_ADD;
taz = modulus(kk-H3m+(DIM3)*coords[2], DIM3*dims[2]);
tbz = modulus(kk-H3m+(DIM3)*coords[2], DIM3*dims[2])+B_ADD;
tcz = modulus(kk-H3m+(DIM3)*coords[2], DIM3*dims[2])+C_ADD;
if (!per0) {
if ( ((coords[0]==0) && (ii<H1m)) ||
((coords[0] == dims[0]-1) && (ii >= DIM1+H1m)) ) {
tax=triple_t<USE_DOUBLE>().x();
tbx=triple_t<USE_DOUBLE>().x();
tcx=triple_t<USE_DOUBLE>().x();
}
}
if (!per1) {
if ( ((coords[1]==0) && (jj<H2m)) ||
((coords[1] == dims[1]-1) && (jj >= DIM2+H2m)) ) {
tay=triple_t<USE_DOUBLE>().y();
tby=triple_t<USE_DOUBLE>().y();
tcy=triple_t<USE_DOUBLE>().y();
}
}
if (!per2) {
if ( ((coords[2]==0) && (kk<H3m)) ||
((coords[2] == dims[2]-1) && (kk >= DIM3+H3m)) ) {
taz=triple_t<USE_DOUBLE>().z();
tbz=triple_t<USE_DOUBLE>().z();
tcz=triple_t<USE_DOUBLE>().z();
}
}
ta = triple_t<USE_DOUBLE>(tax, tay, taz).floor();
tb = triple_t<USE_DOUBLE>(tbx, tby, tbz).floor();
tc = triple_t<USE_DOUBLE>(tcx, tcy, tcz).floor();
if (a(ii,jj,kk) != ta) {
passed = false;
file << ii << ", " << jj << ", " << kk << " values found != expct: "
<< "a " << a(ii,jj,kk) << " != "
<< ta
<< "\n";
}
if (b(ii,jj,kk) != tb) {
passed = false;
file << ii << ", " << jj << ", " << kk << " values found != expct: "
<< "b " << b(ii,jj,kk) << " != "
<< tb
<< "\n";
}
if (c(ii,jj,kk) != tc) {
passed = false;
file << ii << ", " << jj << ", " << kk << " values found != expct: "
<< "c " << c(ii,jj,kk) << " != "
<< tc
<< "\n";
}
}
if (passed)
file << "RESULT: PASSED!\n";
else
file << "RESULT: FAILED!\n";
}
#ifdef _GCL_GPU_
/* device_binding added by Devendar Bureddy, OSU */
void
device_binding ()
{
int local_rank=0/*, num_local_procs*/;
int dev_count, use_dev_count, my_dev_id;
char *str;
if ((str = getenv ("MV2_COMM_WORLD_LOCAL_RANK")) != NULL)
{
local_rank = atoi (str);
printf ("MV2_COMM_WORLD_LOCAL_RANK %s\n", str);
}
if ((str = getenv ("MPISPAWN_LOCAL_NPROCS")) != NULL)
{
//num_local_procs = atoi (str);
printf ("MPISPAWN_LOCAL_NPROCS %s\n", str);
}
cudaGetDeviceCount (&dev_count);
if ((str = getenv ("NUM_GPU_DEVICES")) != NULL)
{
use_dev_count = atoi (str);
printf ("NUM_GPU_DEVICES %s\n", str);
}
else
{
use_dev_count = dev_count;
}
my_dev_id = local_rank % use_dev_count;
printf ("local rank = %d dev id = %d\n", local_rank, my_dev_id);
cudaSetDevice (my_dev_id);
}
#endif
int main(int argc, char** argv) {
#ifdef _GCL_GPU_
device_binding();
#endif
/* this example is based on MPI Cart Communicators, so we need to
initialize MPI. This can be done by GCL automatically
*/
MPI_Init(&argc, &argv);
/* Now let us initialize GCL itself. If MPI is not initialized at
this point, it will initialize it
*/
GCL::GCL_Init(argc, argv);
/* Here we compute the computing gris as in many applications
*/
MPI_Comm_rank(MPI_COMM_WORLD, &pid);
MPI_Comm_size(MPI_COMM_WORLD, &nprocs);
std::cout << pid << " " << nprocs << "\n";
std::stringstream ss;
ss << pid;
std::string filename = "out" + ss.str() + ".txt";
std::cout << filename << std::endl;
std::ofstream file(filename.c_str());
file << pid << " " << nprocs << "\n";
MPI_Dims_create(nprocs, 3, dims);
int period[3] = {1, 1, 1};
file << "@" << pid << "@ MPI GRID SIZE " << dims[0] << " - " << dims[1] << " - " << dims[2] << "\n";
MPI_Cart_create(MPI_COMM_WORLD, 3, dims, period, false, &CartComm);
MPI_Cart_get(CartComm, 3, dims, period, coords);
/* Each process will hold a tile of size
(DIM1+2*H)x(DIM2+2*H)x(DIM3+2*H). The DIM1xDIM2xDIM3 area inside
the H width border is the inner region of an hypothetical stencil
computation whise halo width is H.
*/
int DIM1=atoi(argv[1]);
int DIM2=atoi(argv[2]);
int DIM3=atoi(argv[3]);
int H1m =atoi(argv[4]);
int H1p =atoi(argv[5]);
int H2m =atoi(argv[6]);
int H2p =atoi(argv[7]);
int H3m =atoi(argv[8]);
int H3p =atoi(argv[9]);
/* This example will exchange 3 data arrays at the same time with
different values.
*/
triple_t<USE_DOUBLE> *_a = new triple_t<USE_DOUBLE>[(DIM1+H1m+H1p)*(DIM2+H2m+H2p)*(DIM3+H3m+H3p)];
triple_t<USE_DOUBLE> *_b = new triple_t<USE_DOUBLE>[(DIM1+H1m+H1p)*(DIM2+H2m+H2p)*(DIM3+H3m+H3p)];
triple_t<USE_DOUBLE> *_c = new triple_t<USE_DOUBLE>[(DIM1+H1m+H1p)*(DIM2+H2m+H2p)*(DIM3+H3m+H3p)];
file << "Permutation 0,1,2\n";
file << "Permutation 0,1,2\n";
file << "run<std::ostream, 0,1,2, true, true, true>(file, DIM1, DIM2, DIM3, H1m, H1p, H2m, H2p, H3m, H3p, _a, _b, _c)\n";
run<std::ostream, 0,1,2, true, true, true>(file, DIM1, DIM2, DIM3, H1m, H1p, H2m, H2p, H3m, H3p, _a, _b, _c);
file << "run<std::ostream, 0,1,2, true, true, false>(file, DIM1, DIM2, DIM3, H1m, H1p, H2m, H2p, H3m, H3p, _a, _b, _c)\n";
run<std::ostream, 0,1,2, true, true, false>(file, DIM1, DIM2, DIM3, H1m, H1p, H2m, H2p, H3m, H3p, _a, _b, _c);
file << "run<std::ostream, 0,1,2, true, false, true>(file, DIM1, DIM2, DIM3, H1m, H1p, H2m, H2p, H3m, H3p, _a, _b, _c)\n";
run<std::ostream, 0,1,2, true, false, true>(file, DIM1, DIM2, DIM3, H1m, H1p, H2m, H2p, H3m, H3p, _a, _b, _c);
file << "run<std::ostream, 0,1,2, true, false, false>(file, DIM1, DIM2, DIM3, H1m, H1p, H2m, H2p, H3m, H3p, _a, _b, _c)\n";
run<std::ostream, 0,1,2, true, false, false>(file, DIM1, DIM2, DIM3, H1m, H1p, H2m, H2p, H3m, H3p, _a, _b, _c);
file << "run<std::ostream, 0,1,2, false, true, true>(file, DIM1, DIM2, DIM3, H1m, H1p, H2m, H2p, H3m, H3p, _a, _b, _c)\n";
run<std::ostream, 0,1,2, false, true, true>(file, DIM1, DIM2, DIM3, H1m, H1p, H2m, H2p, H3m, H3p, _a, _b, _c);
file << "run<std::ostream, 0,1,2, false, true, false>(file, DIM1, DIM2, DIM3, H1m, H1p, H2m, H2p, H3m, H3p, _a, _b, _c)\n";
run<std::ostream, 0,1,2, false, true, false>(file, DIM1, DIM2, DIM3, H1m, H1p, H2m, H2p, H3m, H3p, _a, _b, _c);
file << "run<std::ostream, 0,1,2, false, false, true>(file, DIM1, DIM2, DIM3, H1m, H1p, H2m, H2p, H3m, H3p, _a, _b, _c)\n";
run<std::ostream, 0,1,2, false, false, true>(file, DIM1, DIM2, DIM3, H1m, H1p, H2m, H2p, H3m, H3p, _a, _b, _c);
file << "run<std::ostream, 0,1,2, false, false, false>(file, DIM1, DIM2, DIM3, H1m, H1p, H2m, H2p, H3m, H3p, _a, _b, _c)\n";
run<std::ostream, 0,1,2, false, false, false>(file, DIM1, DIM2, DIM3, H1m, H1p, H2m, H2p, H3m, H3p, _a, _b, _c);
file << "---------------------------------------------------\n";
file << "Permutation 0,2,1\n";
file << "run<std::ostream, 0,2,1, true, true, true>(file, DIM1, DIM2, DIM3, H1m, H1p, H2m, H2p, H3m, H3p, _a, _b, _c)\n";
run<std::ostream, 0,2,1, true, true, true>(file, DIM1, DIM2, DIM3, H1m, H1p, H2m, H2p, H3m, H3p, _a, _b, _c);
file << "run<std::ostream, 0,2,1, true, true, false>(file, DIM1, DIM2, DIM3, H1m, H1p, H2m, H2p, H3m, H3p, _a, _b, _c)\n";
run<std::ostream, 0,2,1, true, true, false>(file, DIM1, DIM2, DIM3, H1m, H1p, H2m, H2p, H3m, H3p, _a, _b, _c);
file << "run<std::ostream, 0,2,1, true, false, true>(file, DIM1, DIM2, DIM3, H1m, H1p, H2m, H2p, H3m, H3p, _a, _b, _c)\n";
run<std::ostream, 0,2,1, true, false, true>(file, DIM1, DIM2, DIM3, H1m, H1p, H2m, H2p, H3m, H3p, _a, _b, _c);
file << "run<std::ostream, 0,2,1, true, false, false>(file, DIM1, DIM2, DIM3, H1m, H1p, H2m, H2p, H3m, H3p, _a, _b, _c)\n";
run<std::ostream, 0,2,1, true, false, false>(file, DIM1, DIM2, DIM3, H1m, H1p, H2m, H2p, H3m, H3p, _a, _b, _c);
file << "run<std::ostream, 0,2,1, false, true, true>(file, DIM1, DIM2, DIM3, H1m, H1p, H2m, H2p, H3m, H3p, _a, _b, _c)\n";
run<std::ostream, 0,2,1, false, true, true>(file, DIM1, DIM2, DIM3, H1m, H1p, H2m, H2p, H3m, H3p, _a, _b, _c);
file << "run<std::ostream, 0,2,1, false, true, false>(file, DIM1, DIM2, DIM3, H1m, H1p, H2m, H2p, H3m, H3p, _a, _b, _c)\n";
run<std::ostream, 0,2,1, false, true, false>(file, DIM1, DIM2, DIM3, H1m, H1p, H2m, H2p, H3m, H3p, _a, _b, _c);
file << "run<std::ostream, 0,2,1, false, false, true>(file, DIM1, DIM2, DIM3, H1m, H1p, H2m, H2p, H3m, H3p, _a, _b, _c)\n";
run<std::ostream, 0,2,1, false, false, true>(file, DIM1, DIM2, DIM3, H1m, H1p, H2m, H2p, H3m, H3p, _a, _b, _c);
file << "run<std::ostream, 0,2,1, false, false, false>(file, DIM1, DIM2, DIM3, H1m, H1p, H2m, H2p, H3m, H3p, _a, _b, _c)\n";
run<std::ostream, 0,2,1, false, false, false>(file, DIM1, DIM2, DIM3, H1m, H1p, H2m, H2p, H3m, H3p, _a, _b, _c);
file << "---------------------------------------------------\n";
file << "Permutation 1,0,2\n";
file << "run<std::ostream, 1,0,2, true, true, true>(file, DIM1, DIM2, DIM3, H1m, H1p, H2m, H2p, H3m, H3p, _a, _b, _c)\n";
run<std::ostream, 1,0,2, true, true, true>(file, DIM1, DIM2, DIM3, H1m, H1p, H2m, H2p, H3m, H3p, _a, _b, _c);
file << "run<std::ostream, 1,0,2, true, true, false>(file, DIM1, DIM2, DIM3, H1m, H1p, H2m, H2p, H3m, H3p, _a, _b, _c)\n";
run<std::ostream, 1,0,2, true, true, false>(file, DIM1, DIM2, DIM3, H1m, H1p, H2m, H2p, H3m, H3p, _a, _b, _c);
file << "run<std::ostream, 1,0,2, true, false, true>(file, DIM1, DIM2, DIM3, H1m, H1p, H2m, H2p, H3m, H3p, _a, _b, _c)\n";
run<std::ostream, 1,0,2, true, false, true>(file, DIM1, DIM2, DIM3, H1m, H1p, H2m, H2p, H3m, H3p, _a, _b, _c);
file << "run<std::ostream, 1,0,2, true, false, false>(file, DIM1, DIM2, DIM3, H1m, H1p, H2m, H2p, H3m, H3p, _a, _b, _c)\n";
run<std::ostream, 1,0,2, true, false, false>(file, DIM1, DIM2, DIM3, H1m, H1p, H2m, H2p, H3m, H3p, _a, _b, _c);
file << "run<std::ostream, 1,0,2, false, true, true>(file, DIM1, DIM2, DIM3, H1m, H1p, H2m, H2p, H3m, H3p, _a, _b, _c)\n";
run<std::ostream, 1,0,2, false, true, true>(file, DIM1, DIM2, DIM3, H1m, H1p, H2m, H2p, H3m, H3p, _a, _b, _c);
file << "run<std::ostream, 1,0,2, false, true, false>(file, DIM1, DIM2, DIM3, H1m, H1p, H2m, H2p, H3m, H3p, _a, _b, _c)\n";
run<std::ostream, 1,0,2, false, true, false>(file, DIM1, DIM2, DIM3, H1m, H1p, H2m, H2p, H3m, H3p, _a, _b, _c);
file << "run<std::ostream, 1,0,2, false, false, true>(file, DIM1, DIM2, DIM3, H1m, H1p, H2m, H2p, H3m, H3p, _a, _b, _c)\n";
run<std::ostream, 1,0,2, false, false, true>(file, DIM1, DIM2, DIM3, H1m, H1p, H2m, H2p, H3m, H3p, _a, _b, _c);
file << "run<std::ostream, 1,0,2, false, false, false>(file, DIM1, DIM2, DIM3, H1m, H1p, H2m, H2p, H3m, H3p, _a, _b, _c)\n";
run<std::ostream, 1,0,2, false, false, false>(file, DIM1, DIM2, DIM3, H1m, H1p, H2m, H2p, H3m, H3p, _a, _b, _c);
file << "---------------------------------------------------\n";
file << "Permutation 1,2,0\n";
file << "run<std::ostream, 1,2,0, true, true, true>(file, DIM1, DIM2, DIM3, H1m, H1p, H2m, H2p, H3m, H3p, _a, _b, _c)\n";
run<std::ostream, 1,2,0, true, true, true>(file, DIM1, DIM2, DIM3, H1m, H1p, H2m, H2p, H3m, H3p, _a, _b, _c);
file << "run<std::ostream, 1,2,0, true, true, false>(file, DIM1, DIM2, DIM3, H1m, H1p, H2m, H2p, H3m, H3p, _a, _b, _c)\n";
run<std::ostream, 1,2,0, true, true, false>(file, DIM1, DIM2, DIM3, H1m, H1p, H2m, H2p, H3m, H3p, _a, _b, _c);
file << "run<std::ostream, 1,2,0, true, false, true>(file, DIM1, DIM2, DIM3, H1m, H1p, H2m, H2p, H3m, H3p, _a, _b, _c)\n";
run<std::ostream, 1,2,0, true, false, true>(file, DIM1, DIM2, DIM3, H1m, H1p, H2m, H2p, H3m, H3p, _a, _b, _c);
file << "run<std::ostream, 1,2,0, true, false, false>(file, DIM1, DIM2, DIM3, H1m, H1p, H2m, H2p, H3m, H3p, _a, _b, _c)\n";
run<std::ostream, 1,2,0, true, false, false>(file, DIM1, DIM2, DIM3, H1m, H1p, H2m, H2p, H3m, H3p, _a, _b, _c);
file << "run<std::ostream, 1,2,0, false, true, true>(file, DIM1, DIM2, DIM3, H1m, H1p, H2m, H2p, H3m, H3p, _a, _b, _c)\n";
run<std::ostream, 1,2,0, false, true, true>(file, DIM1, DIM2, DIM3, H1m, H1p, H2m, H2p, H3m, H3p, _a, _b, _c);
file << "run<std::ostream, 1,2,0, false, true, false>(file, DIM1, DIM2, DIM3, H1m, H1p, H2m, H2p, H3m, H3p, _a, _b, _c)\n";
run<std::ostream, 1,2,0, false, true, false>(file, DIM1, DIM2, DIM3, H1m, H1p, H2m, H2p, H3m, H3p, _a, _b, _c);
file << "run<std::ostream, 1,2,0, false, false, true>(file, DIM1, DIM2, DIM3, H1m, H1p, H2m, H2p, H3m, H3p, _a, _b, _c)\n";
run<std::ostream, 1,2,0, false, false, true>(file, DIM1, DIM2, DIM3, H1m, H1p, H2m, H2p, H3m, H3p, _a, _b, _c);
file << "run<std::ostream, 1,2,0, false, false, false>(file, DIM1, DIM2, DIM3, H1m, H1p, H2m, H2p, H3m, H3p, _a, _b, _c)\n";
run<std::ostream, 1,2,0, false, false, false>(file, DIM1, DIM2, DIM3, H1m, H1p, H2m, H2p, H3m, H3p, _a, _b, _c);
file << "---------------------------------------------------\n";
file << "Permutation 2,0,1\n";
file << "run<std::ostream, 2,0,1, true, true, true>(file, DIM1, DIM2, DIM3, H1m, H1p, H2m, H2p, H3m, H3p, _a, _b, _c)\n";
run<std::ostream, 2,0,1, true, true, true>(file, DIM1, DIM2, DIM3, H1m, H1p, H2m, H2p, H3m, H3p, _a, _b, _c);
file << "run<std::ostream, 2,0,1, true, true, false>(file, DIM1, DIM2, DIM3, H1m, H1p, H2m, H2p, H3m, H3p, _a, _b, _c)\n";
run<std::ostream, 2,0,1, true, true, false>(file, DIM1, DIM2, DIM3, H1m, H1p, H2m, H2p, H3m, H3p, _a, _b, _c);
file << "run<std::ostream, 2,0,1, true, false, true>(file, DIM1, DIM2, DIM3, H1m, H1p, H2m, H2p, H3m, H3p, _a, _b, _c)\n";
run<std::ostream, 2,0,1, true, false, true>(file, DIM1, DIM2, DIM3, H1m, H1p, H2m, H2p, H3m, H3p, _a, _b, _c);
file << "run<std::ostream, 2,0,1, true, false, false>(file, DIM1, DIM2, DIM3, H1m, H1p, H2m, H2p, H3m, H3p, _a, _b, _c)\n";
run<std::ostream, 2,0,1, true, false, false>(file, DIM1, DIM2, DIM3, H1m, H1p, H2m, H2p, H3m, H3p, _a, _b, _c);
file << "run<std::ostream, 2,0,1, false, true, true>(file, DIM1, DIM2, DIM3, H1m, H1p, H2m, H2p, H3m, H3p, _a, _b, _c)\n";
run<std::ostream, 2,0,1, false, true, true>(file, DIM1, DIM2, DIM3, H1m, H1p, H2m, H2p, H3m, H3p, _a, _b, _c);
file << "run<std::ostream, 2,0,1, false, true, false>(file, DIM1, DIM2, DIM3, H1m, H1p, H2m, H2p, H3m, H3p, _a, _b, _c)\n";
run<std::ostream, 2,0,1, false, true, false>(file, DIM1, DIM2, DIM3, H1m, H1p, H2m, H2p, H3m, H3p, _a, _b, _c);
file << "run<std::ostream, 2,0,1, false, false, true>(file, DIM1, DIM2, DIM3, H1m, H1p, H2m, H2p, H3m, H3p, _a, _b, _c)\n";
run<std::ostream, 2,0,1, false, false, true>(file, DIM1, DIM2, DIM3, H1m, H1p, H2m, H2p, H3m, H3p, _a, _b, _c);
file << "run<std::ostream, 2,0,1, false, false, false>(file, DIM1, DIM2, DIM3, H1m, H1p, H2m, H2p, H3m, H3p, _a, _b, _c)\n";
run<std::ostream, 2,0,1, false, false, false>(file, DIM1, DIM2, DIM3, H1m, H1p, H2m, H2p, H3m, H3p, _a, _b, _c);
file << "---------------------------------------------------\n";
file << "Permutation 2,1,0\n";
file << "run<std::ostream, 2,1,0, true, true, true>(file, DIM1, DIM2, DIM3, H1m, H1p, H2m, H2p, H3m, H3p, _a, _b, _c)\n";
run<std::ostream, 2,1,0, true, true, true>(file, DIM1, DIM2, DIM3, H1m, H1p, H2m, H2p, H3m, H3p, _a, _b, _c);
file << "run<std::ostream, 2,1,0, true, true, false>(file, DIM1, DIM2, DIM3, H1m, H1p, H2m, H2p, H3m, H3p, _a, _b, _c)\n";
run<std::ostream, 2,1,0, true, true, false>(file, DIM1, DIM2, DIM3, H1m, H1p, H2m, H2p, H3m, H3p, _a, _b, _c);
file << "run<std::ostream, 2,1,0, true, false, true>(file, DIM1, DIM2, DIM3, H1m, H1p, H2m, H2p, H3m, H3p, _a, _b, _c)\n";
run<std::ostream, 2,1,0, true, false, true>(file, DIM1, DIM2, DIM3, H1m, H1p, H2m, H2p, H3m, H3p, _a, _b, _c);
file << "run<std::ostream, 2,1,0, true, false, false>(file, DIM1, DIM2, DIM3, H1m, H1p, H2m, H2p, H3m, H3p, _a, _b, _c)\n";
run<std::ostream, 2,1,0, true, false, false>(file, DIM1, DIM2, DIM3, H1m, H1p, H2m, H2p, H3m, H3p, _a, _b, _c);
file << "run<std::ostream, 2,1,0, false, true, true>(file, DIM1, DIM2, DIM3, H1m, H1p, H2m, H2p, H3m, H3p, _a, _b, _c)\n";
run<std::ostream, 2,1,0, false, true, true>(file, DIM1, DIM2, DIM3, H1m, H1p, H2m, H2p, H3m, H3p, _a, _b, _c);
file << "run<std::ostream, 2,1,0, false, true, false>(file, DIM1, DIM2, DIM3, H1m, H1p, H2m, H2p, H3m, H3p, _a, _b, _c)\n";
run<std::ostream, 2,1,0, false, true, false>(file, DIM1, DIM2, DIM3, H1m, H1p, H2m, H2p, H3m, H3p, _a, _b, _c);
file << "run<std::ostream, 2,1,0, false, false, true>(file, DIM1, DIM2, DIM3, H1m, H1p, H2m, H2p, H3m, H3p, _a, _b, _c)\n";
run<std::ostream, 2,1,0, false, false, true>(file, DIM1, DIM2, DIM3, H1m, H1p, H2m, H2p, H3m, H3p, _a, _b, _c);
file << "run<std::ostream, 2,1,0, false, false, false>(file, DIM1, DIM2, DIM3, H1m, H1p, H2m, H2p, H3m, H3p, _a, _b, _c)\n";
run<std::ostream, 2,1,0, false, false, false>(file, DIM1, DIM2, DIM3, H1m, H1p, H2m, H2p, H3m, H3p, _a, _b, _c);
file << "---------------------------------------------------\n";
MPI_Barrier(MPI_COMM_WORLD);
MPI_Finalize();
return 0;
}
|
de33ec9a06383c2fd63dd3f45d1c74929fae1820.hip | // !!! This is a file automatically generated by hipify!!!
#define GLM_FORCE_CUDA
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <cmath>
#include <glm/glm.hpp>
#include "utilityCore.hpp"
#include "kernel.h"
#include <device_launch_parameters.h>
// LOOK-2.1 potentially useful for doing grid-based neighbor search
#ifndef imax
#define imax( a, b ) ( ((a) > (b)) ? (a) : (b) )
#endif
#ifndef imin
#define imin( a, b ) ( ((a) < (b)) ? (a) : (b) )
#endif
#define checkCUDAErrorWithLine(msg) checkCUDAError(msg, __LINE__)
/**
* Check for CUDA errors; print and exit if there was a problem.
*/
void checkCUDAError(const char *msg, int line = -1) {
hipError_t err = hipGetLastError();
if (hipSuccess != err) {
if (line >= 0) {
fprintf(stderr, "Line %d: ", line);
}
fprintf(stderr, "Cuda error: %s: %s.\n", msg, hipGetErrorString(err));
exit(EXIT_FAILURE);
}
}
/*****************
* Configuration *
*****************/
/*! Block size used for CUDA kernel launch. */
#define blockSize 128
// LOOK-1.2 Parameters for the boids algorithm.
// These worked well in our reference implementation.
#define rule1Distance 5.0f
#define rule2Distance 3.0f
#define rule3Distance 5.0f
#define rule1Scale 0.01f
#define rule2Scale 0.1f
#define rule3Scale 0.1f
#define maxSpeed 1.0f
/*! Size of the starting area in simulation space. */
#define scene_scale 100.0f
/***********************************************
* Kernel state (pointers are device pointers) *
***********************************************/
int numObjects;
dim3 threadsPerBlock(blockSize);
// LOOK-1.2 - These buffers are here to hold all your boid information.
// These get allocated for you in Boids::initSimulation.
// Consider why you would need two velocity buffers in a simulation where each
// boid cares about its neighbors' velocities.
// These are called ping-pong buffers.
glm::vec3 *dev_pos;
glm::vec3 *dev_vel1;
glm::vec3 *dev_vel2;
glm::vec3 *sorted_Vel1;
glm::vec3 *sorted_Vel2;
glm::vec3 *sorted_Pos;
// LOOK-2.1 - these are NOT allocated for you. You'll have to set up the thrust
// pointers on your own too.
// For efficient sorting and the uniform grid. These should always be parallel.
int *dev_particleArrayIndices; // What index in dev_pos and dev_velX represents this particle?
int *dev_particleGridIndices; // What grid cell is this particle in?
// needed for use with thrust
thrust::device_ptr<int> dev_thrust_particleArrayIndices;
thrust::device_ptr<int> dev_thrust_particleGridIndices;
int *dev_gridCellStartIndices; // What part of dev_particleArrayIndices belongs
int *dev_gridCellEndIndices; // to this cell?
// TODO-2.3 - consider what additional buffers you might need to reshuffle
// the position and velocity data to be coherent within cells.
// LOOK-2.1 - Grid parameters based on simulation parameters.
// These are automatically computed for you in Boids::initSimulation
int gridCellCount;
int gridSideCount;
float gridCellWidth;
float gridInverseCellWidth;
glm::vec3 gridMinimum;
bool useVelBuf1;
/******************
* initSimulation *
******************/
__host__ __device__ unsigned int hash(unsigned int a) {
a = (a + 0x7ed55d16) + (a << 12);
a = (a ^ 0xc761c23c) ^ (a >> 19);
a = (a + 0x165667b1) + (a << 5);
a = (a + 0xd3a2646c) ^ (a << 9);
a = (a + 0xfd7046c5) + (a << 3);
a = (a ^ 0xb55a4f09) ^ (a >> 16);
return a;
}
/**
* LOOK-1.2 - this is a typical helper function for a CUDA kernel.
* Function for generating a random vec3.
*/
__host__ __device__ glm::vec3 generateRandomVec3(float time, int index) {
thrust::default_random_engine rng(hash((int)(index * time)));
thrust::uniform_real_distribution<float> unitDistrib(-1, 1);
return glm::vec3((float)unitDistrib(rng), (float)unitDistrib(rng), (float)unitDistrib(rng));
}
/**
* LOOK-1.2 - This is a basic CUDA kernel.
* CUDA kernel for generating boids with a specified mass randomly around the star.
*/
__global__ void kernGenerateRandomPosArray(int time, int N, glm::vec3 * arr, float scale) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < N) {
glm::vec3 rand = generateRandomVec3(time, index);
arr[index].x = scale * rand.x;
arr[index].y = scale * rand.y;
arr[index].z = scale * rand.z;
}
}
/**
* Initialize memory, update some globals
*/
void Boids::initSimulation(int N) {
useVelBuf1 = true;
numObjects = N;
dim3 fullBlocksPerGrid((N + blockSize - 1) / blockSize);
// LOOK-1.2 - This is basic CUDA memory management and error checking.
// Don't forget to hipFree in Boids::endSimulation.
hipMalloc((void**)&dev_pos, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("hipMalloc dev_pos failed!");
hipMalloc((void**)&dev_vel1, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("hipMalloc dev_vel1 failed!");
hipMalloc((void**)&dev_vel2, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("hipMalloc dev_vel2 failed!");
// LOOK-1.2 - This is a typical CUDA kernel invocation.
hipLaunchKernelGGL(( kernGenerateRandomPosArray), dim3(fullBlocksPerGrid), dim3(blockSize), 0, 0, 1, numObjects,
dev_pos, scene_scale);
checkCUDAErrorWithLine("kernGenerateRandomPosArray failed!");
// LOOK-2.1 computing grid params
gridCellWidth = 2.0f * ::max(::max(rule1Distance, rule2Distance), rule3Distance);
int halfSideCount = (int)(scene_scale / gridCellWidth) + 1;
gridSideCount = 2 * halfSideCount;
gridCellCount = gridSideCount * gridSideCount * gridSideCount;
gridInverseCellWidth = 1.0f / gridCellWidth;
float halfGridWidth = gridCellWidth * halfSideCount;
gridMinimum.x -= halfGridWidth;
gridMinimum.y -= halfGridWidth;
gridMinimum.z -= halfGridWidth;
// TODO-2.1 TODO-2.3 - Allocate additional buffers here.
hipMalloc((void**)&dev_particleArrayIndices, N * sizeof(int));
dev_thrust_particleArrayIndices = thrust::device_ptr<int>(dev_particleArrayIndices);
checkCUDAErrorWithLine("dev_particleArrayIndices failed");
hipMalloc((void**)&dev_particleGridIndices, N * sizeof(int));
dev_thrust_particleGridIndices = thrust::device_ptr<int>(dev_particleGridIndices);
checkCUDAErrorWithLine("dev_particleGridIndices failed");
hipMalloc((void**)&dev_gridCellStartIndices, gridCellCount * sizeof(int));
checkCUDAErrorWithLine("dev_gridCellStartIndices failed");
hipMalloc((void**)&dev_gridCellEndIndices, gridCellCount * sizeof(int));
checkCUDAErrorWithLine("dev_gridCellEndIndices failed");
hipMalloc((void**)&sorted_Vel1, N * sizeof(glm::vec3));
hipMalloc((void**)&sorted_Vel2, N * sizeof(glm::vec3));
hipMalloc((void**)&sorted_Pos, N * sizeof(glm::vec3));
hipDeviceSynchronize();
}
/******************
* copyBoidsToVBO *
******************/
/**
* Copy the boid positions into the VBO so that they can be drawn by OpenGL.
*/
__global__ void kernCopyPositionsToVBO(int N, glm::vec3 *pos, float *vbo, float s_scale) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
float c_scale = -1.0f / s_scale;
if (index < N) {
vbo[4 * index + 0] = pos[index].x * c_scale;
vbo[4 * index + 1] = pos[index].y * c_scale;
vbo[4 * index + 2] = pos[index].z * c_scale;
vbo[4 * index + 3] = 1.0f;
}
}
__global__ void kernCopyVelocitiesToVBO(int N, glm::vec3 *vel, float *vbo, float s_scale) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index < N) {
vbo[4 * index + 0] = vel[index].x + 0.3f;
vbo[4 * index + 1] = vel[index].y + 0.3f;
vbo[4 * index + 2] = vel[index].z + 0.3f;
vbo[4 * index + 3] = 1.0f;
}
}
/**
* Wrapper for call to the kernCopyboidsToVBO CUDA kernel.
*/
void Boids::copyBoidsToVBO(float *vbodptr_positions, float *vbodptr_velocities) {
dim3 fullBlocksPerGrid((numObjects + blockSize - 1) / blockSize);
kernCopyPositionsToVBO << <fullBlocksPerGrid, blockSize >> >(numObjects, dev_pos, vbodptr_positions, scene_scale);
kernCopyVelocitiesToVBO << <fullBlocksPerGrid, blockSize >> >(numObjects, dev_vel1, vbodptr_velocities, scene_scale);
checkCUDAErrorWithLine("copyBoidsToVBO failed!");
hipDeviceSynchronize();
}
/******************
* stepSimulation *
******************/
/**
* LOOK-1.2 You can use this as a helper for kernUpdateVelocityBruteForce.
* __device__ code can be called from a __global__ context
* Compute the new velocity on the body with index `iSelf` due to the `N` boids
* in the `pos` and `vel` arrays.
*/
__device__ glm::vec3 computeVelocityChange(int N, int iSelf, const glm::vec3 *pos, const glm::vec3 *vel) {
// Rule 1: boids fly towards their local perceived center of mass, which excludes themselves
// Rule 2: boids try to stay a distance d away from each other
// Rule 3: boids try to match the speed of surrounding boids
int count1 = 0;
int count3 = 0;
glm::vec3 perceivedCenter(0.f, 0.f, 0.f);
glm::vec3 perceivedVelocity(0.f, 0.f, 0.f);
glm::vec3 c(0.f, 0.f, 0.f);
for (int i = 0; i < N; i++) {
if (i != iSelf) {
float distance = glm::length(pos[i] - pos[iSelf]);
if (distance < rule1Distance) {
perceivedCenter += pos[i];
count1++;
}
if (distance < rule2Distance) {
c -= (pos[i] - pos[iSelf]);
}
if (distance < rule3Distance) {
perceivedVelocity += vel[i];
count3++;
}
}
}
if (count1 > 0) {
perceivedCenter /= float(count1);
perceivedCenter = (perceivedCenter - pos[iSelf]);
}
if (count3 > 0) {
perceivedVelocity /= float(count3);
}
glm::vec3 finalVel;
finalVel = (perceivedCenter * rule1Scale) + (c * rule2Scale) + (perceivedVelocity * rule3Scale);
return finalVel;
}
// LOOK-2.1 Consider this method of computing a 1D index from a 3D grid index.
// LOOK-2.3 Looking at this method, what would be the most memory efficient
// order for iterating over neighboring grid cells?
// for(x)
// for(y)
// for(z)? Or some other order?
__device__ int gridIndex3Dto1D(int x, int y, int z, int cellWidth) {
return x + y * cellWidth + z * cellWidth * cellWidth;
}
/**
* TODO-1.2 implement basic flocking
* For each of the `N` bodies, update its position based on its current velocity.
*/
__global__ void kernUpdateVelocityBruteForce(int N, glm::vec3 *pos,
glm::vec3 *vel1, glm::vec3 *vel2) {
// Compute a new velocity based on pos and vel1
// Clamp the speed
// Record the new velocity into vel2. Question: why NOT vel1?
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index < N) {
glm::vec3 delta = computeVelocityChange(N, index, pos, vel1);
delta += vel1[index];
if (glm::length(delta) > maxSpeed) {
delta = glm::normalize(delta) * maxSpeed;
}
vel2[index] = delta;
}
}
/**
* LOOK-1.2 Since this is pretty trivial, we implemented it for you.
* For each of the `N` bodies, update its position based on its current velocity.
*/
__global__ void kernUpdatePos(int N, float dt, glm::vec3 *pos, glm::vec3 *vel) {
// Update position by velocity
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= N) {
return;
}
glm::vec3 thisPos = pos[index];
thisPos += vel[index] * dt;
// Wrap the boids around so we don't lose them
thisPos.x = thisPos.x < -scene_scale ? scene_scale : thisPos.x;
thisPos.y = thisPos.y < -scene_scale ? scene_scale : thisPos.y;
thisPos.z = thisPos.z < -scene_scale ? scene_scale : thisPos.z;
thisPos.x = thisPos.x > scene_scale ? -scene_scale : thisPos.x;
thisPos.y = thisPos.y > scene_scale ? -scene_scale : thisPos.y;
thisPos.z = thisPos.z > scene_scale ? -scene_scale : thisPos.z;
pos[index] = thisPos;
}
__global__ void kernComputeIndices(int N, int gridSideCount,
glm::vec3 gridMin, float cellWidth,
glm::vec3 *pos, int *indices, int *gridIndices) {
// TODO-2.1
// - Label each boid with the index of its grid cell.
// - Set up a parallel array of integer indices as pointers to the actual
// boid data in pos and vel1/vel2
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < N) {
int x = floor((pos[index].x + abs(gridMin.x)) / cellWidth);
int y = floor((pos[index].y + abs(gridMin.y)) / cellWidth);
int z = floor((pos[index].z + abs(gridMin.z)) / cellWidth);
indices[index] = index;
gridIndices[index] = gridIndex3Dto1D(x, y, z, gridSideCount);
}
}
// LOOK-2.1 Consider how this could be useful for indicating that a cell
// does not enclose any boids
__global__ void kernResetIntBuffer(int N, int *intBuffer, int value) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < N) {
intBuffer[index] = value;
}
}
__global__ void kernIdentifyCellStartEnd(int N, int *particleGridIndices,
int *gridCellStartIndices, int *gridCellEndIndices) {
// TODO-2.1
// Identify the start point of each cell in the gridIndices array.
// This is basically a parallel unrolling of a loop that goes
// "this index doesn't match the one before it, must be a new cell!"
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < N) {
if (index == 0 || particleGridIndices[index] != particleGridIndices[index-1]) {
gridCellStartIndices[particleGridIndices[index]] = index;
}
if (index == N - 1 || particleGridIndices[index] != particleGridIndices[index + 1]) {
gridCellEndIndices[particleGridIndices[index]] = index;
}
}
}
__global__ void kernUpdateVelNeighborSearchScattered(
int N, int gridResolution, glm::vec3 gridMin,
float inverseCellWidth, float cellWidth,
int *gridCellStartIndices, int *gridCellEndIndices,
int *particleArrayIndices,
glm::vec3 *pos, glm::vec3 *vel1, glm::vec3 *vel2) {
// TODO-2.1 - Update a boid's velocity using the uniform grid to reduce
// the number of boids that need to be checked.
// - Identify the grid cell that this particle is in
// - Identify which cells may contain neighbors. This isn't always 8.
// - For each cell, read the start/end indices in the boid pointer array.
// - Access each boid in the cell and compute velocity change from
// the boids rules, if this boid is within the neighborhood distance.
// - Clamp the speed change before putting the new speed in vel2
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index < N) {
float maxSearchDistance = glm::max(glm::max(rule1Distance, rule2Distance), rule3Distance);
glm::vec3 min = glm::vec3(floor(pos[index].x + abs(gridMin.x) - maxSearchDistance) * inverseCellWidth,
floor(pos[index].y + abs(gridMin.y) - maxSearchDistance) * inverseCellWidth,
floor(pos[index].z + abs(gridMin.z) - maxSearchDistance) * inverseCellWidth);
glm::vec3 max = glm::vec3(floor(pos[index].x + abs(gridMin.x) + maxSearchDistance) * inverseCellWidth,
floor(pos[index].y + abs(gridMin.y) + maxSearchDistance) * inverseCellWidth,
floor(pos[index].z + abs(gridMin.z) + maxSearchDistance) * inverseCellWidth);
glm::clamp(min, glm::vec3(0, 0, 0), glm::vec3(gridResolution, gridResolution, gridResolution));
glm::clamp(max, glm::vec3(0, 0, 0), glm::vec3(gridResolution, gridResolution, gridResolution));
int count1 = 0;
int count3 = 0;
glm::vec3 perceivedCenter;
glm::vec3 perceivedVelocity;
glm::vec3 c;
for (int i = min.x; i < max.x; i++) {
for (int j = min.y; j < max.y; j++) {
for (int k = min.z; k < max.z; k++) {
int currentGridIndex = gridIndex3Dto1D(i, j, k, gridResolution);
int startIndex = gridCellStartIndices[currentGridIndex];
int endIndex = gridCellEndIndices[currentGridIndex];
for (int gridIndex = startIndex; gridIndex < endIndex; gridIndex++) {
int particleIndex = particleArrayIndices[gridIndex];
if (particleIndex != index) {
float distance = glm::distance(pos[particleIndex], pos[index]);
if (distance < rule1Distance) {
perceivedCenter += pos[particleIndex];
count1++;
}
if (distance < rule2Distance) {
c -= (pos[particleIndex] - pos[index]);
}
if (distance < rule3Distance) {
perceivedVelocity += vel1[particleIndex];
count3++;
}
}
}
}
}
}
if (count1 > 0) {
perceivedCenter /= count1;
perceivedCenter = (perceivedCenter - pos[index]);
}
if (count3 > 0) {
perceivedVelocity /= count3;
}
glm::vec3 finalVel = vel1[index];
finalVel += perceivedCenter * rule1Scale;
finalVel += c * rule2Scale;
finalVel += perceivedVelocity * rule3Scale;
if (glm::length(finalVel) > maxSpeed) {
finalVel = glm::normalize(finalVel) * maxSpeed;
}
vel2[index] = finalVel;
}
}
__global__ void kernUpdateVelNeighborSearchCoherent(
int N, int gridResolution, glm::vec3 gridMin,
float inverseCellWidth, float cellWidth,
int *gridCellStartIndices, int *gridCellEndIndices,
glm::vec3 *pos, glm::vec3 *vel1, glm::vec3 *vel2) {
// TODO-2.3 - This should be very similar to kernUpdateVelNeighborSearchScattered,
// except with one less level of indirection.
// This should expect gridCellStartIndices and gridCellEndIndices to refer
// directly to pos and vel1.
// - Identify the grid cell that this particle is in
// - Identify which cells may contain neighbors. This isn't always 8.
// - For each cell, read the start/end indices in the boid pointer array.
// DIFFERENCE: For best results, consider what order the cells should be
// checked in to maximize the memory benefits of reordering the boids data.
// - Access each boid in the cell and compute velocity change from
// the boids rules, if this boid is within the neighborhood distance.
// - Clamp the speed change before putting the new speed in vel2
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index < N) {
float maxSearchDistance = glm::max(glm::max(rule1Distance, rule2Distance), rule3Distance);
glm::vec3 min = glm::vec3(floor(pos[index].x + abs(gridMin.x) - maxSearchDistance) * inverseCellWidth,
floor(pos[index].y + abs(gridMin.y) - maxSearchDistance) * inverseCellWidth,
floor(pos[index].z + abs(gridMin.z) - maxSearchDistance) * inverseCellWidth);
glm::vec3 max = glm::vec3(floor(pos[index].x + abs(gridMin.x) + maxSearchDistance) * inverseCellWidth,
floor(pos[index].y + abs(gridMin.y) + maxSearchDistance) * inverseCellWidth,
floor(pos[index].z + abs(gridMin.z) + maxSearchDistance) * inverseCellWidth);
glm::clamp(min, glm::vec3(0, 0, 0), glm::vec3(gridResolution, gridResolution, gridResolution));
glm::clamp(max, glm::vec3(0, 0, 0), glm::vec3(gridResolution, gridResolution, gridResolution));
int count1 = 0;
int count3 = 0;
glm::vec3 perceivedCenter;
glm::vec3 perceivedVelocity;
glm::vec3 c;
for (int i = min.x; i < max.x; i++) {
for (int j = min.y; j < max.y; j++) {
for (int k = min.z; k < max.z; k++) {
int currentGridIndex = gridIndex3Dto1D(i, j, k, gridResolution);
int startIndex = gridCellStartIndices[currentGridIndex];
int endIndex = gridCellEndIndices[currentGridIndex];
for (int gridIndex = startIndex; gridIndex < endIndex; gridIndex++) {
int particleIndex = gridIndex;// particleArrayIndices[gridIndex];
if (particleIndex != index) {
float distance = glm::distance(pos[particleIndex], pos[index]);
if (distance < rule1Distance) {
perceivedCenter += pos[particleIndex];
count1++;
}
if (distance < rule2Distance) {
c -= (pos[particleIndex] - pos[index]);
}
if (distance < rule3Distance) {
perceivedVelocity += vel1[particleIndex];
count3++;
}
}
}
}
}
}
if (count1 > 0) {
perceivedCenter /= count1;
perceivedCenter = (perceivedCenter - pos[index]);
}
if (count3 > 0) {
perceivedVelocity /= count3;
}
glm::vec3 finalVel = vel1[index];
finalVel += perceivedCenter * rule1Scale;
finalVel += c * rule2Scale;
finalVel += perceivedVelocity * rule3Scale;
if (glm::length(finalVel) > maxSpeed) {
finalVel = glm::normalize(finalVel) * maxSpeed;
}
vel2[index] = finalVel;
}
}
/**
* Step the entire N-body simulation by `dt` seconds.
*/
void Boids::stepSimulationNaive(float dt) {
// TODO-1.2 - use the kernels you wrote to step the simulation forward in time.
// TODO-1.2 ping-pong the velocity buffers
dim3 boidSizeBlocks((numObjects + blockSize - 1) / blockSize);
kernUpdateVelocityBruteForce << <boidSizeBlocks, blockSize >> > (numObjects, dev_pos, dev_vel1, dev_vel2);
glm::vec3 *temp = dev_vel1;
dev_vel1 = dev_vel2;
dev_vel2 = temp;
kernUpdatePos << < boidSizeBlocks, blockSize >> > (numObjects, dt, dev_pos, dev_vel1);
}
void Boids::stepSimulationScatteredGrid(float dt) {
// TODO-2.1
// Uniform Grid Neighbor search using Thrust sort.
// In Parallel:
// - label each particle with its array index as well as its grid index.
// Use 2x width grids.
// - Unstable key sort using Thrust. A stable sort isn't necessary, but you
// are welcome to do a performance comparison.
// - Naively unroll the loop for finding the start and end indices of each
// cell's data pointers in the array of boid indices
// - Perform velocity updates using neighbor search
// - Update positions
// - Ping-pong buffers as needed
int boidSizeBlocks = (numObjects + blockSize - 1) / blockSize;
int gridSizeBlocks = (gridCellCount + blockSize - 1) / blockSize;
kernComputeIndices << < boidSizeBlocks, blockSize >> >(numObjects, gridSideCount, gridMinimum, gridCellWidth, dev_pos, dev_particleArrayIndices, dev_particleGridIndices);
thrust::sort_by_key(dev_thrust_particleGridIndices, dev_thrust_particleGridIndices + numObjects, dev_thrust_particleArrayIndices);
kernResetIntBuffer << < gridSizeBlocks, blockSize >> > (gridCellCount, dev_gridCellStartIndices, -1);
kernResetIntBuffer << < gridSizeBlocks, blockSize >> > (gridCellCount, dev_gridCellEndIndices, -1);
kernIdentifyCellStartEnd << < boidSizeBlocks, blockSize >> >(gridCellCount, dev_particleGridIndices, dev_gridCellStartIndices, dev_gridCellEndIndices);
kernUpdateVelNeighborSearchScattered << < boidSizeBlocks, blockSize >> > (numObjects, gridSideCount, gridMinimum, gridInverseCellWidth, gridCellWidth, dev_gridCellStartIndices, dev_gridCellEndIndices, dev_particleArrayIndices, dev_pos, dev_vel1, dev_vel2);
glm::vec3 *temp = dev_vel1;
dev_vel1 = dev_vel2;
dev_vel2 = temp;
kernUpdatePos << < boidSizeBlocks, blockSize >> > (numObjects, dt, dev_pos, dev_vel1);
}
__global__ void kernSortVelocities(int N, int *particleArrayIndices, glm::vec3 *pos, glm::vec3 *vel1, glm::vec3 *vel2, glm::vec3 *sortedPos, glm::vec3 *sortedVel1, glm::vec3 *sortedVel2) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index < N) {
int grid = particleArrayIndices[index];
sortedPos[index] = pos[grid];
sortedVel1[index] = vel1[grid];
sortedVel2[index] = vel2[grid];
}
}
void Boids::stepSimulationCoherentGrid(float dt) {
// TODO-2.3 - start by copying Boids::stepSimulationNaiveGrid
// Uniform Grid Neighbor search using Thrust sort on cell-coherent data.
// In Parallel:
// - Label each particle with its array index as well as its grid index.
// Use 2x width grids
// - Unstable key sort using Thrust. A stable sort isn't necessary, but you
// are welcome to do a performance comparison.
// - Naively unroll the loop for finding the start and end indices of each
// cell's data pointers in the array of boid indices
// - BIG DIFFERENCE: use the rearranged array index buffer to reshuffle all
// the particle data in the simulation array.
// CONSIDER WHAT ADDITIONAL BUFFERS YOU NEED
// - Perform velocity updates using neighbor search
// - Update positions
// - Ping-pong buffers as needed. THIS MAY BE DIFFERENT FROM BEFORE.
int boidSizeBlocks = (numObjects + blockSize - 1) / blockSize;
int gridSizeBlocks = (gridCellCount + blockSize - 1) / blockSize;
kernComputeIndices << < boidSizeBlocks, blockSize >> > (numObjects, gridSideCount, gridMinimum, gridCellWidth, dev_pos, dev_particleArrayIndices, dev_particleGridIndices);
thrust::sort_by_key(dev_thrust_particleGridIndices, dev_thrust_particleGridIndices + numObjects, dev_thrust_particleArrayIndices);
kernResetIntBuffer << < gridSizeBlocks, blockSize >> > (gridCellCount, dev_gridCellStartIndices, -1);
kernResetIntBuffer << < gridSizeBlocks, blockSize >> > (gridCellCount, dev_gridCellEndIndices, -1);
kernIdentifyCellStartEnd << < boidSizeBlocks, blockSize >> > (gridCellCount, dev_particleGridIndices, dev_gridCellStartIndices, dev_gridCellEndIndices);
kernSortVelocities << <boidSizeBlocks, blockSize >> > (numObjects, dev_particleArrayIndices, dev_pos, dev_vel1, dev_vel2, sorted_Pos, sorted_Vel1, sorted_Vel2);
glm::vec3 *temp = dev_pos;
dev_pos = sorted_Pos;
sorted_Pos = temp;
temp = dev_vel1;
dev_vel1 = sorted_Vel1;
sorted_Vel1 = temp;
temp = dev_vel2;
dev_vel2 = sorted_Vel2;
sorted_Vel2 = temp;
kernUpdateVelNeighborSearchScattered << < boidSizeBlocks, blockSize >> > (numObjects, gridSideCount, gridMinimum, gridInverseCellWidth, gridCellWidth, dev_gridCellStartIndices, dev_gridCellEndIndices, dev_particleArrayIndices, dev_pos, dev_vel1, dev_vel2);
temp = dev_vel1;
dev_vel1 = dev_vel2;
dev_vel2 = temp;
kernUpdatePos << < boidSizeBlocks, blockSize >> > (numObjects, dt, dev_pos, dev_vel1);
}
void Boids::endSimulation() {
hipFree(dev_vel1);
hipFree(dev_vel2);
hipFree(dev_pos);
// TODO-2.1 TODO-2.3 - Free any additional buffers here.
}
void Boids::unitTest() {
// LOOK-1.2 Feel free to write additional tests here.
// test unstable sort
int *dev_intKeys;
int *dev_intValues;
int N = 10;
std::unique_ptr<int[]>intKeys{ new int[N] };
std::unique_ptr<int[]>intValues{ new int[N] };
intKeys[0] = 0; intValues[0] = 0;
intKeys[1] = 1; intValues[1] = 1;
intKeys[2] = 0; intValues[2] = 2;
intKeys[3] = 3; intValues[3] = 3;
intKeys[4] = 0; intValues[4] = 4;
intKeys[5] = 2; intValues[5] = 5;
intKeys[6] = 2; intValues[6] = 6;
intKeys[7] = 0; intValues[7] = 7;
intKeys[8] = 5; intValues[8] = 8;
intKeys[9] = 6; intValues[9] = 9;
hipMalloc((void**)&dev_intKeys, N * sizeof(int));
checkCUDAErrorWithLine("hipMalloc dev_intKeys failed!");
hipMalloc((void**)&dev_intValues, N * sizeof(int));
checkCUDAErrorWithLine("hipMalloc dev_intValues failed!");
dim3 fullBlocksPerGrid((N + blockSize - 1) / blockSize);
std::cout << "before unstable sort: " << std::endl;
for (int i = 0; i < N; i++) {
std::cout << " key: " << intKeys[i];
std::cout << " value: " << intValues[i] << std::endl;
}
// How to copy data to the GPU
hipMemcpy(dev_intKeys, intKeys.get(), sizeof(int) * N, hipMemcpyHostToDevice);
hipMemcpy(dev_intValues, intValues.get(), sizeof(int) * N, hipMemcpyHostToDevice);
// Wrap device vectors in thrust iterators for use with thrust.
thrust::device_ptr<int> dev_thrust_keys(dev_intKeys);
thrust::device_ptr<int> dev_thrust_values(dev_intValues);
// LOOK-2.1 Example for using thrust::sort_by_key
thrust::sort_by_key(dev_thrust_keys, dev_thrust_keys + N, dev_thrust_values);
// How to copy data back to the CPU side from the GPU
hipMemcpy(intKeys.get(), dev_intKeys, sizeof(int) * N, hipMemcpyDeviceToHost);
hipMemcpy(intValues.get(), dev_intValues, sizeof(int) * N, hipMemcpyDeviceToHost);
checkCUDAErrorWithLine("memcpy back failed!");
std::cout << "after unstable sort: " << std::endl;
for (int i = 0; i < N; i++) {
std::cout << " key: " << intKeys[i];
std::cout << " value: " << intValues[i] << std::endl;
}
// cleanup
hipFree(dev_intKeys);
hipFree(dev_intValues);
checkCUDAErrorWithLine("hipFree failed!");
return;
}
| de33ec9a06383c2fd63dd3f45d1c74929fae1820.cu | #define GLM_FORCE_CUDA
#include <stdio.h>
#include <cuda.h>
#include <cmath>
#include <glm/glm.hpp>
#include "utilityCore.hpp"
#include "kernel.h"
#include <device_launch_parameters.h>
// LOOK-2.1 potentially useful for doing grid-based neighbor search
#ifndef imax
#define imax( a, b ) ( ((a) > (b)) ? (a) : (b) )
#endif
#ifndef imin
#define imin( a, b ) ( ((a) < (b)) ? (a) : (b) )
#endif
#define checkCUDAErrorWithLine(msg) checkCUDAError(msg, __LINE__)
/**
* Check for CUDA errors; print and exit if there was a problem.
*/
void checkCUDAError(const char *msg, int line = -1) {
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err) {
if (line >= 0) {
fprintf(stderr, "Line %d: ", line);
}
fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
}
/*****************
* Configuration *
*****************/
/*! Block size used for CUDA kernel launch. */
#define blockSize 128
// LOOK-1.2 Parameters for the boids algorithm.
// These worked well in our reference implementation.
#define rule1Distance 5.0f
#define rule2Distance 3.0f
#define rule3Distance 5.0f
#define rule1Scale 0.01f
#define rule2Scale 0.1f
#define rule3Scale 0.1f
#define maxSpeed 1.0f
/*! Size of the starting area in simulation space. */
#define scene_scale 100.0f
/***********************************************
* Kernel state (pointers are device pointers) *
***********************************************/
int numObjects;
dim3 threadsPerBlock(blockSize);
// LOOK-1.2 - These buffers are here to hold all your boid information.
// These get allocated for you in Boids::initSimulation.
// Consider why you would need two velocity buffers in a simulation where each
// boid cares about its neighbors' velocities.
// These are called ping-pong buffers.
glm::vec3 *dev_pos;
glm::vec3 *dev_vel1;
glm::vec3 *dev_vel2;
glm::vec3 *sorted_Vel1;
glm::vec3 *sorted_Vel2;
glm::vec3 *sorted_Pos;
// LOOK-2.1 - these are NOT allocated for you. You'll have to set up the thrust
// pointers on your own too.
// For efficient sorting and the uniform grid. These should always be parallel.
int *dev_particleArrayIndices; // What index in dev_pos and dev_velX represents this particle?
int *dev_particleGridIndices; // What grid cell is this particle in?
// needed for use with thrust
thrust::device_ptr<int> dev_thrust_particleArrayIndices;
thrust::device_ptr<int> dev_thrust_particleGridIndices;
int *dev_gridCellStartIndices; // What part of dev_particleArrayIndices belongs
int *dev_gridCellEndIndices; // to this cell?
// TODO-2.3 - consider what additional buffers you might need to reshuffle
// the position and velocity data to be coherent within cells.
// LOOK-2.1 - Grid parameters based on simulation parameters.
// These are automatically computed for you in Boids::initSimulation
int gridCellCount;
int gridSideCount;
float gridCellWidth;
float gridInverseCellWidth;
glm::vec3 gridMinimum;
bool useVelBuf1;
/******************
* initSimulation *
******************/
__host__ __device__ unsigned int hash(unsigned int a) {
a = (a + 0x7ed55d16) + (a << 12);
a = (a ^ 0xc761c23c) ^ (a >> 19);
a = (a + 0x165667b1) + (a << 5);
a = (a + 0xd3a2646c) ^ (a << 9);
a = (a + 0xfd7046c5) + (a << 3);
a = (a ^ 0xb55a4f09) ^ (a >> 16);
return a;
}
/**
* LOOK-1.2 - this is a typical helper function for a CUDA kernel.
* Function for generating a random vec3.
*/
__host__ __device__ glm::vec3 generateRandomVec3(float time, int index) {
thrust::default_random_engine rng(hash((int)(index * time)));
thrust::uniform_real_distribution<float> unitDistrib(-1, 1);
return glm::vec3((float)unitDistrib(rng), (float)unitDistrib(rng), (float)unitDistrib(rng));
}
/**
* LOOK-1.2 - This is a basic CUDA kernel.
* CUDA kernel for generating boids with a specified mass randomly around the star.
*/
__global__ void kernGenerateRandomPosArray(int time, int N, glm::vec3 * arr, float scale) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < N) {
glm::vec3 rand = generateRandomVec3(time, index);
arr[index].x = scale * rand.x;
arr[index].y = scale * rand.y;
arr[index].z = scale * rand.z;
}
}
/**
* Initialize memory, update some globals
*/
void Boids::initSimulation(int N) {
useVelBuf1 = true;
numObjects = N;
dim3 fullBlocksPerGrid((N + blockSize - 1) / blockSize);
// LOOK-1.2 - This is basic CUDA memory management and error checking.
// Don't forget to cudaFree in Boids::endSimulation.
cudaMalloc((void**)&dev_pos, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("cudaMalloc dev_pos failed!");
cudaMalloc((void**)&dev_vel1, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("cudaMalloc dev_vel1 failed!");
cudaMalloc((void**)&dev_vel2, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("cudaMalloc dev_vel2 failed!");
// LOOK-1.2 - This is a typical CUDA kernel invocation.
kernGenerateRandomPosArray<<<fullBlocksPerGrid, blockSize>>>(1, numObjects,
dev_pos, scene_scale);
checkCUDAErrorWithLine("kernGenerateRandomPosArray failed!");
// LOOK-2.1 computing grid params
gridCellWidth = 2.0f * std::max(std::max(rule1Distance, rule2Distance), rule3Distance);
int halfSideCount = (int)(scene_scale / gridCellWidth) + 1;
gridSideCount = 2 * halfSideCount;
gridCellCount = gridSideCount * gridSideCount * gridSideCount;
gridInverseCellWidth = 1.0f / gridCellWidth;
float halfGridWidth = gridCellWidth * halfSideCount;
gridMinimum.x -= halfGridWidth;
gridMinimum.y -= halfGridWidth;
gridMinimum.z -= halfGridWidth;
// TODO-2.1 TODO-2.3 - Allocate additional buffers here.
cudaMalloc((void**)&dev_particleArrayIndices, N * sizeof(int));
dev_thrust_particleArrayIndices = thrust::device_ptr<int>(dev_particleArrayIndices);
checkCUDAErrorWithLine("dev_particleArrayIndices failed");
cudaMalloc((void**)&dev_particleGridIndices, N * sizeof(int));
dev_thrust_particleGridIndices = thrust::device_ptr<int>(dev_particleGridIndices);
checkCUDAErrorWithLine("dev_particleGridIndices failed");
cudaMalloc((void**)&dev_gridCellStartIndices, gridCellCount * sizeof(int));
checkCUDAErrorWithLine("dev_gridCellStartIndices failed");
cudaMalloc((void**)&dev_gridCellEndIndices, gridCellCount * sizeof(int));
checkCUDAErrorWithLine("dev_gridCellEndIndices failed");
cudaMalloc((void**)&sorted_Vel1, N * sizeof(glm::vec3));
cudaMalloc((void**)&sorted_Vel2, N * sizeof(glm::vec3));
cudaMalloc((void**)&sorted_Pos, N * sizeof(glm::vec3));
cudaDeviceSynchronize();
}
/******************
* copyBoidsToVBO *
******************/
/**
* Copy the boid positions into the VBO so that they can be drawn by OpenGL.
*/
__global__ void kernCopyPositionsToVBO(int N, glm::vec3 *pos, float *vbo, float s_scale) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
float c_scale = -1.0f / s_scale;
if (index < N) {
vbo[4 * index + 0] = pos[index].x * c_scale;
vbo[4 * index + 1] = pos[index].y * c_scale;
vbo[4 * index + 2] = pos[index].z * c_scale;
vbo[4 * index + 3] = 1.0f;
}
}
__global__ void kernCopyVelocitiesToVBO(int N, glm::vec3 *vel, float *vbo, float s_scale) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index < N) {
vbo[4 * index + 0] = vel[index].x + 0.3f;
vbo[4 * index + 1] = vel[index].y + 0.3f;
vbo[4 * index + 2] = vel[index].z + 0.3f;
vbo[4 * index + 3] = 1.0f;
}
}
/**
* Wrapper for call to the kernCopyboidsToVBO CUDA kernel.
*/
void Boids::copyBoidsToVBO(float *vbodptr_positions, float *vbodptr_velocities) {
dim3 fullBlocksPerGrid((numObjects + blockSize - 1) / blockSize);
kernCopyPositionsToVBO << <fullBlocksPerGrid, blockSize >> >(numObjects, dev_pos, vbodptr_positions, scene_scale);
kernCopyVelocitiesToVBO << <fullBlocksPerGrid, blockSize >> >(numObjects, dev_vel1, vbodptr_velocities, scene_scale);
checkCUDAErrorWithLine("copyBoidsToVBO failed!");
cudaDeviceSynchronize();
}
/******************
* stepSimulation *
******************/
/**
* LOOK-1.2 You can use this as a helper for kernUpdateVelocityBruteForce.
* __device__ code can be called from a __global__ context
* Compute the new velocity on the body with index `iSelf` due to the `N` boids
* in the `pos` and `vel` arrays.
*/
__device__ glm::vec3 computeVelocityChange(int N, int iSelf, const glm::vec3 *pos, const glm::vec3 *vel) {
// Rule 1: boids fly towards their local perceived center of mass, which excludes themselves
// Rule 2: boids try to stay a distance d away from each other
// Rule 3: boids try to match the speed of surrounding boids
int count1 = 0;
int count3 = 0;
glm::vec3 perceivedCenter(0.f, 0.f, 0.f);
glm::vec3 perceivedVelocity(0.f, 0.f, 0.f);
glm::vec3 c(0.f, 0.f, 0.f);
for (int i = 0; i < N; i++) {
if (i != iSelf) {
float distance = glm::length(pos[i] - pos[iSelf]);
if (distance < rule1Distance) {
perceivedCenter += pos[i];
count1++;
}
if (distance < rule2Distance) {
c -= (pos[i] - pos[iSelf]);
}
if (distance < rule3Distance) {
perceivedVelocity += vel[i];
count3++;
}
}
}
if (count1 > 0) {
perceivedCenter /= float(count1);
perceivedCenter = (perceivedCenter - pos[iSelf]);
}
if (count3 > 0) {
perceivedVelocity /= float(count3);
}
glm::vec3 finalVel;
finalVel = (perceivedCenter * rule1Scale) + (c * rule2Scale) + (perceivedVelocity * rule3Scale);
return finalVel;
}
// LOOK-2.1 Consider this method of computing a 1D index from a 3D grid index.
// LOOK-2.3 Looking at this method, what would be the most memory efficient
// order for iterating over neighboring grid cells?
// for(x)
// for(y)
// for(z)? Or some other order?
__device__ int gridIndex3Dto1D(int x, int y, int z, int cellWidth) {
return x + y * cellWidth + z * cellWidth * cellWidth;
}
/**
* TODO-1.2 implement basic flocking
* For each of the `N` bodies, update its position based on its current velocity.
*/
__global__ void kernUpdateVelocityBruteForce(int N, glm::vec3 *pos,
glm::vec3 *vel1, glm::vec3 *vel2) {
// Compute a new velocity based on pos and vel1
// Clamp the speed
// Record the new velocity into vel2. Question: why NOT vel1?
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index < N) {
glm::vec3 delta = computeVelocityChange(N, index, pos, vel1);
delta += vel1[index];
if (glm::length(delta) > maxSpeed) {
delta = glm::normalize(delta) * maxSpeed;
}
vel2[index] = delta;
}
}
/**
* LOOK-1.2 Since this is pretty trivial, we implemented it for you.
* For each of the `N` bodies, update its position based on its current velocity.
*/
__global__ void kernUpdatePos(int N, float dt, glm::vec3 *pos, glm::vec3 *vel) {
// Update position by velocity
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= N) {
return;
}
glm::vec3 thisPos = pos[index];
thisPos += vel[index] * dt;
// Wrap the boids around so we don't lose them
thisPos.x = thisPos.x < -scene_scale ? scene_scale : thisPos.x;
thisPos.y = thisPos.y < -scene_scale ? scene_scale : thisPos.y;
thisPos.z = thisPos.z < -scene_scale ? scene_scale : thisPos.z;
thisPos.x = thisPos.x > scene_scale ? -scene_scale : thisPos.x;
thisPos.y = thisPos.y > scene_scale ? -scene_scale : thisPos.y;
thisPos.z = thisPos.z > scene_scale ? -scene_scale : thisPos.z;
pos[index] = thisPos;
}
__global__ void kernComputeIndices(int N, int gridSideCount,
glm::vec3 gridMin, float cellWidth,
glm::vec3 *pos, int *indices, int *gridIndices) {
// TODO-2.1
// - Label each boid with the index of its grid cell.
// - Set up a parallel array of integer indices as pointers to the actual
// boid data in pos and vel1/vel2
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < N) {
int x = floor((pos[index].x + abs(gridMin.x)) / cellWidth);
int y = floor((pos[index].y + abs(gridMin.y)) / cellWidth);
int z = floor((pos[index].z + abs(gridMin.z)) / cellWidth);
indices[index] = index;
gridIndices[index] = gridIndex3Dto1D(x, y, z, gridSideCount);
}
}
// LOOK-2.1 Consider how this could be useful for indicating that a cell
// does not enclose any boids
__global__ void kernResetIntBuffer(int N, int *intBuffer, int value) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < N) {
intBuffer[index] = value;
}
}
__global__ void kernIdentifyCellStartEnd(int N, int *particleGridIndices,
int *gridCellStartIndices, int *gridCellEndIndices) {
// TODO-2.1
// Identify the start point of each cell in the gridIndices array.
// This is basically a parallel unrolling of a loop that goes
// "this index doesn't match the one before it, must be a new cell!"
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < N) {
if (index == 0 || particleGridIndices[index] != particleGridIndices[index-1]) {
gridCellStartIndices[particleGridIndices[index]] = index;
}
if (index == N - 1 || particleGridIndices[index] != particleGridIndices[index + 1]) {
gridCellEndIndices[particleGridIndices[index]] = index;
}
}
}
__global__ void kernUpdateVelNeighborSearchScattered(
int N, int gridResolution, glm::vec3 gridMin,
float inverseCellWidth, float cellWidth,
int *gridCellStartIndices, int *gridCellEndIndices,
int *particleArrayIndices,
glm::vec3 *pos, glm::vec3 *vel1, glm::vec3 *vel2) {
// TODO-2.1 - Update a boid's velocity using the uniform grid to reduce
// the number of boids that need to be checked.
// - Identify the grid cell that this particle is in
// - Identify which cells may contain neighbors. This isn't always 8.
// - For each cell, read the start/end indices in the boid pointer array.
// - Access each boid in the cell and compute velocity change from
// the boids rules, if this boid is within the neighborhood distance.
// - Clamp the speed change before putting the new speed in vel2
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index < N) {
float maxSearchDistance = glm::max(glm::max(rule1Distance, rule2Distance), rule3Distance);
glm::vec3 min = glm::vec3(floor(pos[index].x + abs(gridMin.x) - maxSearchDistance) * inverseCellWidth,
floor(pos[index].y + abs(gridMin.y) - maxSearchDistance) * inverseCellWidth,
floor(pos[index].z + abs(gridMin.z) - maxSearchDistance) * inverseCellWidth);
glm::vec3 max = glm::vec3(floor(pos[index].x + abs(gridMin.x) + maxSearchDistance) * inverseCellWidth,
floor(pos[index].y + abs(gridMin.y) + maxSearchDistance) * inverseCellWidth,
floor(pos[index].z + abs(gridMin.z) + maxSearchDistance) * inverseCellWidth);
glm::clamp(min, glm::vec3(0, 0, 0), glm::vec3(gridResolution, gridResolution, gridResolution));
glm::clamp(max, glm::vec3(0, 0, 0), glm::vec3(gridResolution, gridResolution, gridResolution));
int count1 = 0;
int count3 = 0;
glm::vec3 perceivedCenter;
glm::vec3 perceivedVelocity;
glm::vec3 c;
for (int i = min.x; i < max.x; i++) {
for (int j = min.y; j < max.y; j++) {
for (int k = min.z; k < max.z; k++) {
int currentGridIndex = gridIndex3Dto1D(i, j, k, gridResolution);
int startIndex = gridCellStartIndices[currentGridIndex];
int endIndex = gridCellEndIndices[currentGridIndex];
for (int gridIndex = startIndex; gridIndex < endIndex; gridIndex++) {
int particleIndex = particleArrayIndices[gridIndex];
if (particleIndex != index) {
float distance = glm::distance(pos[particleIndex], pos[index]);
if (distance < rule1Distance) {
perceivedCenter += pos[particleIndex];
count1++;
}
if (distance < rule2Distance) {
c -= (pos[particleIndex] - pos[index]);
}
if (distance < rule3Distance) {
perceivedVelocity += vel1[particleIndex];
count3++;
}
}
}
}
}
}
if (count1 > 0) {
perceivedCenter /= count1;
perceivedCenter = (perceivedCenter - pos[index]);
}
if (count3 > 0) {
perceivedVelocity /= count3;
}
glm::vec3 finalVel = vel1[index];
finalVel += perceivedCenter * rule1Scale;
finalVel += c * rule2Scale;
finalVel += perceivedVelocity * rule3Scale;
if (glm::length(finalVel) > maxSpeed) {
finalVel = glm::normalize(finalVel) * maxSpeed;
}
vel2[index] = finalVel;
}
}
__global__ void kernUpdateVelNeighborSearchCoherent(
int N, int gridResolution, glm::vec3 gridMin,
float inverseCellWidth, float cellWidth,
int *gridCellStartIndices, int *gridCellEndIndices,
glm::vec3 *pos, glm::vec3 *vel1, glm::vec3 *vel2) {
// TODO-2.3 - This should be very similar to kernUpdateVelNeighborSearchScattered,
// except with one less level of indirection.
// This should expect gridCellStartIndices and gridCellEndIndices to refer
// directly to pos and vel1.
// - Identify the grid cell that this particle is in
// - Identify which cells may contain neighbors. This isn't always 8.
// - For each cell, read the start/end indices in the boid pointer array.
// DIFFERENCE: For best results, consider what order the cells should be
// checked in to maximize the memory benefits of reordering the boids data.
// - Access each boid in the cell and compute velocity change from
// the boids rules, if this boid is within the neighborhood distance.
// - Clamp the speed change before putting the new speed in vel2
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index < N) {
float maxSearchDistance = glm::max(glm::max(rule1Distance, rule2Distance), rule3Distance);
glm::vec3 min = glm::vec3(floor(pos[index].x + abs(gridMin.x) - maxSearchDistance) * inverseCellWidth,
floor(pos[index].y + abs(gridMin.y) - maxSearchDistance) * inverseCellWidth,
floor(pos[index].z + abs(gridMin.z) - maxSearchDistance) * inverseCellWidth);
glm::vec3 max = glm::vec3(floor(pos[index].x + abs(gridMin.x) + maxSearchDistance) * inverseCellWidth,
floor(pos[index].y + abs(gridMin.y) + maxSearchDistance) * inverseCellWidth,
floor(pos[index].z + abs(gridMin.z) + maxSearchDistance) * inverseCellWidth);
glm::clamp(min, glm::vec3(0, 0, 0), glm::vec3(gridResolution, gridResolution, gridResolution));
glm::clamp(max, glm::vec3(0, 0, 0), glm::vec3(gridResolution, gridResolution, gridResolution));
int count1 = 0;
int count3 = 0;
glm::vec3 perceivedCenter;
glm::vec3 perceivedVelocity;
glm::vec3 c;
for (int i = min.x; i < max.x; i++) {
for (int j = min.y; j < max.y; j++) {
for (int k = min.z; k < max.z; k++) {
int currentGridIndex = gridIndex3Dto1D(i, j, k, gridResolution);
int startIndex = gridCellStartIndices[currentGridIndex];
int endIndex = gridCellEndIndices[currentGridIndex];
for (int gridIndex = startIndex; gridIndex < endIndex; gridIndex++) {
int particleIndex = gridIndex;// particleArrayIndices[gridIndex];
if (particleIndex != index) {
float distance = glm::distance(pos[particleIndex], pos[index]);
if (distance < rule1Distance) {
perceivedCenter += pos[particleIndex];
count1++;
}
if (distance < rule2Distance) {
c -= (pos[particleIndex] - pos[index]);
}
if (distance < rule3Distance) {
perceivedVelocity += vel1[particleIndex];
count3++;
}
}
}
}
}
}
if (count1 > 0) {
perceivedCenter /= count1;
perceivedCenter = (perceivedCenter - pos[index]);
}
if (count3 > 0) {
perceivedVelocity /= count3;
}
glm::vec3 finalVel = vel1[index];
finalVel += perceivedCenter * rule1Scale;
finalVel += c * rule2Scale;
finalVel += perceivedVelocity * rule3Scale;
if (glm::length(finalVel) > maxSpeed) {
finalVel = glm::normalize(finalVel) * maxSpeed;
}
vel2[index] = finalVel;
}
}
/**
* Step the entire N-body simulation by `dt` seconds.
*/
void Boids::stepSimulationNaive(float dt) {
// TODO-1.2 - use the kernels you wrote to step the simulation forward in time.
// TODO-1.2 ping-pong the velocity buffers
dim3 boidSizeBlocks((numObjects + blockSize - 1) / blockSize);
kernUpdateVelocityBruteForce << <boidSizeBlocks, blockSize >> > (numObjects, dev_pos, dev_vel1, dev_vel2);
glm::vec3 *temp = dev_vel1;
dev_vel1 = dev_vel2;
dev_vel2 = temp;
kernUpdatePos << < boidSizeBlocks, blockSize >> > (numObjects, dt, dev_pos, dev_vel1);
}
void Boids::stepSimulationScatteredGrid(float dt) {
// TODO-2.1
// Uniform Grid Neighbor search using Thrust sort.
// In Parallel:
// - label each particle with its array index as well as its grid index.
// Use 2x width grids.
// - Unstable key sort using Thrust. A stable sort isn't necessary, but you
// are welcome to do a performance comparison.
// - Naively unroll the loop for finding the start and end indices of each
// cell's data pointers in the array of boid indices
// - Perform velocity updates using neighbor search
// - Update positions
// - Ping-pong buffers as needed
int boidSizeBlocks = (numObjects + blockSize - 1) / blockSize;
int gridSizeBlocks = (gridCellCount + blockSize - 1) / blockSize;
kernComputeIndices << < boidSizeBlocks, blockSize >> >(numObjects, gridSideCount, gridMinimum, gridCellWidth, dev_pos, dev_particleArrayIndices, dev_particleGridIndices);
thrust::sort_by_key(dev_thrust_particleGridIndices, dev_thrust_particleGridIndices + numObjects, dev_thrust_particleArrayIndices);
kernResetIntBuffer << < gridSizeBlocks, blockSize >> > (gridCellCount, dev_gridCellStartIndices, -1);
kernResetIntBuffer << < gridSizeBlocks, blockSize >> > (gridCellCount, dev_gridCellEndIndices, -1);
kernIdentifyCellStartEnd << < boidSizeBlocks, blockSize >> >(gridCellCount, dev_particleGridIndices, dev_gridCellStartIndices, dev_gridCellEndIndices);
kernUpdateVelNeighborSearchScattered << < boidSizeBlocks, blockSize >> > (numObjects, gridSideCount, gridMinimum, gridInverseCellWidth, gridCellWidth, dev_gridCellStartIndices, dev_gridCellEndIndices, dev_particleArrayIndices, dev_pos, dev_vel1, dev_vel2);
glm::vec3 *temp = dev_vel1;
dev_vel1 = dev_vel2;
dev_vel2 = temp;
kernUpdatePos << < boidSizeBlocks, blockSize >> > (numObjects, dt, dev_pos, dev_vel1);
}
__global__ void kernSortVelocities(int N, int *particleArrayIndices, glm::vec3 *pos, glm::vec3 *vel1, glm::vec3 *vel2, glm::vec3 *sortedPos, glm::vec3 *sortedVel1, glm::vec3 *sortedVel2) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index < N) {
int grid = particleArrayIndices[index];
sortedPos[index] = pos[grid];
sortedVel1[index] = vel1[grid];
sortedVel2[index] = vel2[grid];
}
}
void Boids::stepSimulationCoherentGrid(float dt) {
// TODO-2.3 - start by copying Boids::stepSimulationNaiveGrid
// Uniform Grid Neighbor search using Thrust sort on cell-coherent data.
// In Parallel:
// - Label each particle with its array index as well as its grid index.
// Use 2x width grids
// - Unstable key sort using Thrust. A stable sort isn't necessary, but you
// are welcome to do a performance comparison.
// - Naively unroll the loop for finding the start and end indices of each
// cell's data pointers in the array of boid indices
// - BIG DIFFERENCE: use the rearranged array index buffer to reshuffle all
// the particle data in the simulation array.
// CONSIDER WHAT ADDITIONAL BUFFERS YOU NEED
// - Perform velocity updates using neighbor search
// - Update positions
// - Ping-pong buffers as needed. THIS MAY BE DIFFERENT FROM BEFORE.
int boidSizeBlocks = (numObjects + blockSize - 1) / blockSize;
int gridSizeBlocks = (gridCellCount + blockSize - 1) / blockSize;
kernComputeIndices << < boidSizeBlocks, blockSize >> > (numObjects, gridSideCount, gridMinimum, gridCellWidth, dev_pos, dev_particleArrayIndices, dev_particleGridIndices);
thrust::sort_by_key(dev_thrust_particleGridIndices, dev_thrust_particleGridIndices + numObjects, dev_thrust_particleArrayIndices);
kernResetIntBuffer << < gridSizeBlocks, blockSize >> > (gridCellCount, dev_gridCellStartIndices, -1);
kernResetIntBuffer << < gridSizeBlocks, blockSize >> > (gridCellCount, dev_gridCellEndIndices, -1);
kernIdentifyCellStartEnd << < boidSizeBlocks, blockSize >> > (gridCellCount, dev_particleGridIndices, dev_gridCellStartIndices, dev_gridCellEndIndices);
kernSortVelocities << <boidSizeBlocks, blockSize >> > (numObjects, dev_particleArrayIndices, dev_pos, dev_vel1, dev_vel2, sorted_Pos, sorted_Vel1, sorted_Vel2);
glm::vec3 *temp = dev_pos;
dev_pos = sorted_Pos;
sorted_Pos = temp;
temp = dev_vel1;
dev_vel1 = sorted_Vel1;
sorted_Vel1 = temp;
temp = dev_vel2;
dev_vel2 = sorted_Vel2;
sorted_Vel2 = temp;
kernUpdateVelNeighborSearchScattered << < boidSizeBlocks, blockSize >> > (numObjects, gridSideCount, gridMinimum, gridInverseCellWidth, gridCellWidth, dev_gridCellStartIndices, dev_gridCellEndIndices, dev_particleArrayIndices, dev_pos, dev_vel1, dev_vel2);
temp = dev_vel1;
dev_vel1 = dev_vel2;
dev_vel2 = temp;
kernUpdatePos << < boidSizeBlocks, blockSize >> > (numObjects, dt, dev_pos, dev_vel1);
}
void Boids::endSimulation() {
cudaFree(dev_vel1);
cudaFree(dev_vel2);
cudaFree(dev_pos);
// TODO-2.1 TODO-2.3 - Free any additional buffers here.
}
void Boids::unitTest() {
// LOOK-1.2 Feel free to write additional tests here.
// test unstable sort
int *dev_intKeys;
int *dev_intValues;
int N = 10;
std::unique_ptr<int[]>intKeys{ new int[N] };
std::unique_ptr<int[]>intValues{ new int[N] };
intKeys[0] = 0; intValues[0] = 0;
intKeys[1] = 1; intValues[1] = 1;
intKeys[2] = 0; intValues[2] = 2;
intKeys[3] = 3; intValues[3] = 3;
intKeys[4] = 0; intValues[4] = 4;
intKeys[5] = 2; intValues[5] = 5;
intKeys[6] = 2; intValues[6] = 6;
intKeys[7] = 0; intValues[7] = 7;
intKeys[8] = 5; intValues[8] = 8;
intKeys[9] = 6; intValues[9] = 9;
cudaMalloc((void**)&dev_intKeys, N * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc dev_intKeys failed!");
cudaMalloc((void**)&dev_intValues, N * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc dev_intValues failed!");
dim3 fullBlocksPerGrid((N + blockSize - 1) / blockSize);
std::cout << "before unstable sort: " << std::endl;
for (int i = 0; i < N; i++) {
std::cout << " key: " << intKeys[i];
std::cout << " value: " << intValues[i] << std::endl;
}
// How to copy data to the GPU
cudaMemcpy(dev_intKeys, intKeys.get(), sizeof(int) * N, cudaMemcpyHostToDevice);
cudaMemcpy(dev_intValues, intValues.get(), sizeof(int) * N, cudaMemcpyHostToDevice);
// Wrap device vectors in thrust iterators for use with thrust.
thrust::device_ptr<int> dev_thrust_keys(dev_intKeys);
thrust::device_ptr<int> dev_thrust_values(dev_intValues);
// LOOK-2.1 Example for using thrust::sort_by_key
thrust::sort_by_key(dev_thrust_keys, dev_thrust_keys + N, dev_thrust_values);
// How to copy data back to the CPU side from the GPU
cudaMemcpy(intKeys.get(), dev_intKeys, sizeof(int) * N, cudaMemcpyDeviceToHost);
cudaMemcpy(intValues.get(), dev_intValues, sizeof(int) * N, cudaMemcpyDeviceToHost);
checkCUDAErrorWithLine("memcpy back failed!");
std::cout << "after unstable sort: " << std::endl;
for (int i = 0; i < N; i++) {
std::cout << " key: " << intKeys[i];
std::cout << " value: " << intValues[i] << std::endl;
}
// cleanup
cudaFree(dev_intKeys);
cudaFree(dev_intValues);
checkCUDAErrorWithLine("cudaFree failed!");
return;
}
|
467b8211bd6d68b0e8334e6caf321e55d633dffd.hip | // !!! This is a file automatically generated by hipify!!!
#include "cuNearestNeighbors.cuh"
#include <iostream>
void initDeviceVariables(DeviceVariables *dev_vars, int K, int num_docs, int biggestQuerySize = 10000){
int KK = 1; //Obtain the smallest power of 2 greater than K (facilitates the sorting algorithm)
while(KK < K) KK <<= 1;
dim3 grid, threads;
get_grid_config(grid, threads);
gpuAssert(hipMalloc(&dev_vars->d_dist, num_docs * sizeof(cuSimilarity)));
gpuAssert(hipMalloc(&dev_vars->d_nearestK, KK * grid.x * sizeof(cuSimilarity)));
gpuAssert(hipMalloc(&dev_vars->d_query, biggestQuerySize * sizeof(Entry)));
gpuAssert(hipMalloc(&dev_vars->d_index, biggestQuerySize * sizeof(int)));
gpuAssert(hipMalloc(&dev_vars->d_count, biggestQuerySize * sizeof(int)));
gpuAssert(hipMalloc(&dev_vars->d_qnorms, 2 * sizeof(float)));
}
void freeDeviceVariables(DeviceVariables *dev_vars){
gpuAssert(hipFree(dev_vars->d_dist));
gpuAssert(hipFree(dev_vars->d_nearestK));
gpuAssert(hipFree(dev_vars->d_query));
gpuAssert(hipFree(dev_vars->d_index));
gpuAssert(hipFree(dev_vars->d_count));
gpuAssert(hipFree(dev_vars->d_qnorms));
}
cuSimilarity* makeQuery(InvertedIndex &inverted_index, std::map<unsigned int, float> &test_features, int K,
void(*distance)(InvertedIndex, Entry*, int*, cuSimilarity*, int D), DeviceVariables *dev_vars) {
std::vector<Entry> query;
std::map<unsigned int, float>::const_iterator end = test_features.end();
for(std::map<unsigned int, float>::const_iterator it = test_features.begin(); it != end; ++it){
unsigned int term_id = it->first;
double term_count = it->second;
// it means that query has higher dimensonality
// than traning set. Thus, we remove that term
if(term_id < inverted_index.num_terms)
query.push_back(Entry(0, term_id, term_count)); // doc_id, term_id, term_count
}
//Creates an empty document if there are no terms
if(query.empty()) {
query.push_back(Entry(0, 0, 0));
}
return KNN(inverted_index, query, K, distance, dev_vars);
}
cuNearestNeighbors::~cuNearestNeighbors(){
doc_to_class.clear();
entries.clear();
// ver como liberar memoria da placa
for (int i = 0; i < n_gpus; ++i)
{
freeInvertedIndex(inverted_indices[i]);
}
delete [] inverted_indices;
}
cuNearestNeighbors::cuNearestNeighbors(Dataset &data, int n_gpus): n_gpus(n_gpus){
convertDataset(data);
buildInvertedIndex();
}
void cuNearestNeighbors::train(Dataset &data){
convertDataset(data);
buildInvertedIndex();
}
int cuNearestNeighbors::classify(std::map<unsigned int, float> &test_features, int K){
cuSimilarity *k_nearest = getKNearestNeighbors(test_features, K);
int vote = getMajorityVote(k_nearest, K);
delete[] k_nearest;
return vote;
}
cuSimilarity * cuNearestNeighbors::getKNearestNeighbors(const std::map<unsigned int, float> &test_features, int K){
std::vector<Entry> query;
std::map<unsigned int, float>::const_iterator end = test_features.end();
for(std::map<unsigned int, float>::const_iterator it = test_features.begin(); it != end; ++it){
unsigned int term_id = it->first;
double term_count = it->second;
// it means that query has higher dimensonality
// than traning set. Thus, we remove that term
if(term_id < num_terms)
query.push_back(Entry(0, term_id, term_count)); // doc_id, term_id, term_count
}
//Creates an empty document if there are no terms
if(query.empty()) {
query.push_back(Entry(0, 0, 0));
}
DeviceVariables dev_vars;
initDeviceVariables(&dev_vars, K, inverted_indices[0].num_docs);
cuSimilarity* k_nearest = KNN(inverted_indices[0], query, K, CosineDistance, &dev_vars);
freeDeviceVariables(&dev_vars);
return k_nearest;
}
std::vector<cuSimilarity*> cuNearestNeighbors::getKNearestNeighbors(Dataset &test, int K){
std::string distance = "cosine";
int gpuNum;
hipGetDeviceCount(&gpuNum);
if (gpuNum > n_gpus){
gpuNum = n_gpus;
if (gpuNum < 1)
gpuNum = 1;
}
n_gpus = gpuNum;
std::vector<sample> &samples = test.getSamples();
std::vector<std::pair<int, int> > intervals;
std::vector<cuSimilarity*> idxs(samples.size());
InvertedIndex* inverted_indices = this->inverted_indices;
int biggestQuerySize = test.biggestQuerySize;
omp_set_num_threads(gpuNum);
#pragma omp parallel shared(samples) shared(inverted_indices) shared(idxs)
{
int num_test_local = 0, i;
int cpuid = omp_get_thread_num();
hipSetDevice(cpuid);
DeviceVariables dev_vars;
initDeviceVariables(&dev_vars, K, inverted_indices[cpuid].num_docs, biggestQuerySize);
#pragma omp for
for (i = 0; i < samples.size(); ++i)
{
num_test_local++;
if(distance == "cosine" || distance == "both") {
idxs[i] = makeQuery(inverted_indices[cpuid], samples[i].features, K, CosineDistance, &dev_vars);
}
if(distance == "l2" || distance == "both") {
idxs[i] = makeQuery(inverted_indices[cpuid], samples[i].features, K, EuclideanDistance, &dev_vars);
}
if(distance == "l1" || distance == "both") {
idxs[i] = makeQuery(inverted_indices[cpuid], samples[i].features, K, ManhattanDistance, &dev_vars);
}
}
freeDeviceVariables(&dev_vars);
}
return idxs;
}
void cuNearestNeighbors::convertDataset(Dataset &data){
num_terms = 0;
// delete all old entries
entries.clear();
num_docs = data.getSamples().size();
for (unsigned int i = 0; i < num_docs; ++i)
{
unsigned int doc_id = i;
std::map<unsigned int, float>::iterator it;
for(it = data.getSamples()[i].features.begin(); it != data.getSamples()[i].features.end(); ++it){
unsigned int term_id = it->first;
double term_cout = it->second;
num_terms = ::max(num_terms, term_id + 1);
entries.push_back(Entry(doc_id, term_id, term_cout)); // doc_id, term_id, term_count
}
doc_to_class[doc_id] = data.getSamples()[i].y;
}
}
int cuNearestNeighbors::getMajorityVote(cuSimilarity *k_nearest, int K){
std::map<int, double> vote_count;
//cuSimilarity &closest = k_nearest[0];
//cuSimilarity &further = k_nearest[K-1];
for(int i = 0; i < K; ++i) {
cuSimilarity &sim = k_nearest[i];
vote_count[doc_to_class[sim.doc_id]] += sim.distance;
//vote_count[doc_to_class[sim.doc_id]]+=((further.distance-sim.distance)/(further.distance-closest.distance))*((sim.distance+further.distance)/(closest.distance+further.distance))*(i);
//vote_count[doc_to_class[sim.doc_id]]+=((further.distance-sim.distance)/(further.distance-closest.distance))*((double)i);
}
int max_votes = 0;
int guessed_class = -1;
std::map<int, double>::iterator end = vote_count.end();
for(std::map<int, double>::iterator it = vote_count.begin(); it != end; it++) {
if(it->second > max_votes) {
max_votes = it->second;
guessed_class = it->first;
}
}
return guessed_class;
}
void cuNearestNeighbors::buildInvertedIndex(){
int gpuNum;
hipGetDeviceCount(&gpuNum);
if (gpuNum > n_gpus){
gpuNum = n_gpus;
if (gpuNum < 1)
gpuNum = 1;
}
n_gpus = gpuNum;
omp_set_num_threads(gpuNum);
this->inverted_indices = new InvertedIndex[gpuNum];
std::vector<Entry> &entries = this->entries;
InvertedIndex* inverted_indices = this->inverted_indices;
#pragma omp parallel shared(entries) shared(inverted_indices)
{
int cpuid = omp_get_thread_num();
hipSetDevice(cpuid);
inverted_indices[cpuid] = make_inverted_index(num_docs, num_terms, entries);
}
entries.clear();
}
| 467b8211bd6d68b0e8334e6caf321e55d633dffd.cu | #include "cuNearestNeighbors.cuh"
#include <iostream>
void initDeviceVariables(DeviceVariables *dev_vars, int K, int num_docs, int biggestQuerySize = 10000){
int KK = 1; //Obtain the smallest power of 2 greater than K (facilitates the sorting algorithm)
while(KK < K) KK <<= 1;
dim3 grid, threads;
get_grid_config(grid, threads);
gpuAssert(cudaMalloc(&dev_vars->d_dist, num_docs * sizeof(cuSimilarity)));
gpuAssert(cudaMalloc(&dev_vars->d_nearestK, KK * grid.x * sizeof(cuSimilarity)));
gpuAssert(cudaMalloc(&dev_vars->d_query, biggestQuerySize * sizeof(Entry)));
gpuAssert(cudaMalloc(&dev_vars->d_index, biggestQuerySize * sizeof(int)));
gpuAssert(cudaMalloc(&dev_vars->d_count, biggestQuerySize * sizeof(int)));
gpuAssert(cudaMalloc(&dev_vars->d_qnorms, 2 * sizeof(float)));
}
void freeDeviceVariables(DeviceVariables *dev_vars){
gpuAssert(cudaFree(dev_vars->d_dist));
gpuAssert(cudaFree(dev_vars->d_nearestK));
gpuAssert(cudaFree(dev_vars->d_query));
gpuAssert(cudaFree(dev_vars->d_index));
gpuAssert(cudaFree(dev_vars->d_count));
gpuAssert(cudaFree(dev_vars->d_qnorms));
}
cuSimilarity* makeQuery(InvertedIndex &inverted_index, std::map<unsigned int, float> &test_features, int K,
void(*distance)(InvertedIndex, Entry*, int*, cuSimilarity*, int D), DeviceVariables *dev_vars) {
std::vector<Entry> query;
std::map<unsigned int, float>::const_iterator end = test_features.end();
for(std::map<unsigned int, float>::const_iterator it = test_features.begin(); it != end; ++it){
unsigned int term_id = it->first;
double term_count = it->second;
// it means that query has higher dimensonality
// than traning set. Thus, we remove that term
if(term_id < inverted_index.num_terms)
query.push_back(Entry(0, term_id, term_count)); // doc_id, term_id, term_count
}
//Creates an empty document if there are no terms
if(query.empty()) {
query.push_back(Entry(0, 0, 0));
}
return KNN(inverted_index, query, K, distance, dev_vars);
}
cuNearestNeighbors::~cuNearestNeighbors(){
doc_to_class.clear();
entries.clear();
// ver como liberar memoria da placa
for (int i = 0; i < n_gpus; ++i)
{
freeInvertedIndex(inverted_indices[i]);
}
delete [] inverted_indices;
}
cuNearestNeighbors::cuNearestNeighbors(Dataset &data, int n_gpus): n_gpus(n_gpus){
convertDataset(data);
buildInvertedIndex();
}
void cuNearestNeighbors::train(Dataset &data){
convertDataset(data);
buildInvertedIndex();
}
int cuNearestNeighbors::classify(std::map<unsigned int, float> &test_features, int K){
cuSimilarity *k_nearest = getKNearestNeighbors(test_features, K);
int vote = getMajorityVote(k_nearest, K);
delete[] k_nearest;
return vote;
}
cuSimilarity * cuNearestNeighbors::getKNearestNeighbors(const std::map<unsigned int, float> &test_features, int K){
std::vector<Entry> query;
std::map<unsigned int, float>::const_iterator end = test_features.end();
for(std::map<unsigned int, float>::const_iterator it = test_features.begin(); it != end; ++it){
unsigned int term_id = it->first;
double term_count = it->second;
// it means that query has higher dimensonality
// than traning set. Thus, we remove that term
if(term_id < num_terms)
query.push_back(Entry(0, term_id, term_count)); // doc_id, term_id, term_count
}
//Creates an empty document if there are no terms
if(query.empty()) {
query.push_back(Entry(0, 0, 0));
}
DeviceVariables dev_vars;
initDeviceVariables(&dev_vars, K, inverted_indices[0].num_docs);
cuSimilarity* k_nearest = KNN(inverted_indices[0], query, K, CosineDistance, &dev_vars);
freeDeviceVariables(&dev_vars);
return k_nearest;
}
std::vector<cuSimilarity*> cuNearestNeighbors::getKNearestNeighbors(Dataset &test, int K){
std::string distance = "cosine";
int gpuNum;
cudaGetDeviceCount(&gpuNum);
if (gpuNum > n_gpus){
gpuNum = n_gpus;
if (gpuNum < 1)
gpuNum = 1;
}
n_gpus = gpuNum;
std::vector<sample> &samples = test.getSamples();
std::vector<std::pair<int, int> > intervals;
std::vector<cuSimilarity*> idxs(samples.size());
InvertedIndex* inverted_indices = this->inverted_indices;
int biggestQuerySize = test.biggestQuerySize;
omp_set_num_threads(gpuNum);
#pragma omp parallel shared(samples) shared(inverted_indices) shared(idxs)
{
int num_test_local = 0, i;
int cpuid = omp_get_thread_num();
cudaSetDevice(cpuid);
DeviceVariables dev_vars;
initDeviceVariables(&dev_vars, K, inverted_indices[cpuid].num_docs, biggestQuerySize);
#pragma omp for
for (i = 0; i < samples.size(); ++i)
{
num_test_local++;
if(distance == "cosine" || distance == "both") {
idxs[i] = makeQuery(inverted_indices[cpuid], samples[i].features, K, CosineDistance, &dev_vars);
}
if(distance == "l2" || distance == "both") {
idxs[i] = makeQuery(inverted_indices[cpuid], samples[i].features, K, EuclideanDistance, &dev_vars);
}
if(distance == "l1" || distance == "both") {
idxs[i] = makeQuery(inverted_indices[cpuid], samples[i].features, K, ManhattanDistance, &dev_vars);
}
}
freeDeviceVariables(&dev_vars);
}
return idxs;
}
void cuNearestNeighbors::convertDataset(Dataset &data){
num_terms = 0;
// delete all old entries
entries.clear();
num_docs = data.getSamples().size();
for (unsigned int i = 0; i < num_docs; ++i)
{
unsigned int doc_id = i;
std::map<unsigned int, float>::iterator it;
for(it = data.getSamples()[i].features.begin(); it != data.getSamples()[i].features.end(); ++it){
unsigned int term_id = it->first;
double term_cout = it->second;
num_terms = std::max(num_terms, term_id + 1);
entries.push_back(Entry(doc_id, term_id, term_cout)); // doc_id, term_id, term_count
}
doc_to_class[doc_id] = data.getSamples()[i].y;
}
}
int cuNearestNeighbors::getMajorityVote(cuSimilarity *k_nearest, int K){
std::map<int, double> vote_count;
//cuSimilarity &closest = k_nearest[0];
//cuSimilarity &further = k_nearest[K-1];
for(int i = 0; i < K; ++i) {
cuSimilarity &sim = k_nearest[i];
vote_count[doc_to_class[sim.doc_id]] += sim.distance;
//vote_count[doc_to_class[sim.doc_id]]+=((further.distance-sim.distance)/(further.distance-closest.distance))*((sim.distance+further.distance)/(closest.distance+further.distance))*(i);
//vote_count[doc_to_class[sim.doc_id]]+=((further.distance-sim.distance)/(further.distance-closest.distance))*((double)i);
}
int max_votes = 0;
int guessed_class = -1;
std::map<int, double>::iterator end = vote_count.end();
for(std::map<int, double>::iterator it = vote_count.begin(); it != end; it++) {
if(it->second > max_votes) {
max_votes = it->second;
guessed_class = it->first;
}
}
return guessed_class;
}
void cuNearestNeighbors::buildInvertedIndex(){
int gpuNum;
cudaGetDeviceCount(&gpuNum);
if (gpuNum > n_gpus){
gpuNum = n_gpus;
if (gpuNum < 1)
gpuNum = 1;
}
n_gpus = gpuNum;
omp_set_num_threads(gpuNum);
this->inverted_indices = new InvertedIndex[gpuNum];
std::vector<Entry> &entries = this->entries;
InvertedIndex* inverted_indices = this->inverted_indices;
#pragma omp parallel shared(entries) shared(inverted_indices)
{
int cpuid = omp_get_thread_num();
cudaSetDevice(cpuid);
inverted_indices[cpuid] = make_inverted_index(num_docs, num_terms, entries);
}
entries.clear();
}
|
human.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// human approach to solving Sudoku
// requires fully populated arrays of possValues
#ifndef HUMAN_H
#define HUMAN_H
__device__
int findLocalBlockIdx(int tid) {
int blockRow = tid/27; // used to be floor
int col = tid%9;
int blockCol;
if (col<3)
blockCol = 0;
else if (col<6)
blockCol = 1;
else
blockCol = 2;
int starter = (blockRow*27) + (blockCol*3);
int difference = tid - starter;
if (difference==0)
return 0;
else if (difference==1)
return 1;
else if (difference==2)
return 2;
else if (difference==9)
return 3;
else if (difference==10)
return 4;
else if (difference==11)
return 5;
else if (difference==18)
return 6;
else if (difference==19)
return 7;
else
return 8;
}
__global__ void human(Square* d_board, int n, int* d_points) {
__shared__ Square s_board[81];
__shared__ int s_points;
if (threadIdx.x == 0) {
s_points = 0;
// initialize shared memory
for (int i = 0; i<(n*n); i++) {
s_board[i].value = d_board[i].value;
s_board[i].isLocked = d_board[i].isLocked;
for (int j=0; j<n; j++)
s_board[i].possValues[j] = d_board[i].possValues[j];
}
}
int tid = threadIdx.x + blockIdx.x*blockDim.x;
//d_points = 0; // for keeping track of work done
int numPossValues;
if ( (tid<(n*n)) && s_board[tid].isLocked!=-1) {
// enter the if statement if the thread is a valid Square
// and if the Square we're looking at is NOT locked (-1)
// first, check if only one option in possValues array
//numPossValues = sizeof(s_board[tid].possValues) / sizeof(int);
for (int k=0; k<n; k++) {
if (s_board[tid].possValues[k] != 0)
numPossValues++;
}
if (numPossValues==1) {
// only 1 number in possValues array
s_board[tid].value = s_board[tid].possValues[0];
s_board[tid].isLocked = -1;
//points++;
atomicAdd(&s_points, 1);
}
Square localRow[9];
Square localCol[9];
Square localBlock[9];
getRow(tid, s_board, localRow);
getCol(tid, s_board, localCol);
getBlock(tid, s_board, localBlock);
int num, nocheck, onlyOne;
// check if each number can only be in this Square for row/col/block
for (int i=0; i<9; i++) {
// cycle through all values in possValues array
// if any of row/col/block has no other Squares with curVal in possValues
// that value must be the Square's locked value
// ex: the first value in tid.possValues is a 4
// there are two 4's cutting off the other two columns for this block
// a 4 cuts off one of the rows in this block
// and the Square just above tid is already locked
// i.e., s_board[tid-9].isLocked = -1;
num = s_board[tid].possValues[i];
// first, make sure we're looking at a valid int
/* if (num==NULL)
break; */
if (num!=0) {
// now check for num in the possValues arrays for all Squares in the row
// if we see the number, break and start checking col,
// otherwise set the value to num and lock it
// just make sure you don't check against the current square, otherwise never win!
onlyOne = 0;
nocheck = tid%9; // for row, we don't want to check the column tid is in
for (int j=0; j<n; j++) {
if (j!=nocheck && localRow[j].isLocked!=-1) {
// skip checking current tid Square
// skip Squares that are locked as a precaution
// since we don't clear possValues after locking
// look for num in localRow[j].possValues[num-1]
if (num==localRow[j].possValues[num-1]) {
onlyOne = -1; // set onlyOne = -1 if NOT the only one
break;
}
}
}
// look for num in localRow[j].possValues, using device function
/*onlyOne = isPossibleNum(num, localRow[j].possValues);
if (onlyOne!=-1)
break;
}*/
// if you get here, means num was not a possValue in any other Square in row
// so set value and lock it
if (onlyOne!=-1) {
s_board[tid].value = num;
s_board[tid].isLocked = -1;
//points++;
atomicAdd(&s_points, 1);
break;
}
// now do the same for the column
onlyOne = 0;
nocheck = tid/9; // for col, we don't check the row we're in. used to be floor
for (int j=0; j<n; j++) {
if (j!=nocheck && localCol[j].isLocked!=-1) {
if (num==localCol[j].possValues[num-1]) {
onlyOne = -1;
break;
}
}
}
// look for num in localRow[j].possValues, using device function
/*onlyOne = isPossibleNum(num, localCol[j].possValues);
if (onlyOne!=-1)
break;
}*/
// if you get here, means num was not a possValue in any other Square in col
// so set value and lock it
if (onlyOne!=-1) {
s_board[tid].value = num;
s_board[tid].isLocked = -1;
//points++;
atomicAdd(&s_points, 1);
break;
}
// now do again for block
onlyOne = 0;
nocheck = findLocalBlockIdx(tid);
for (int j=0; j<n; j++) {
if (j!=nocheck && localBlock[j].isLocked!=-1) {
if (num==localBlock[j].possValues[num-1]) {
onlyOne = -1;
break;
}
}
}
// look for num in localRow[j].possValues, using device function
/*onlyOne = isPossibleNum(num, localBlock[j].possValues);
if (onlyOne!=-1)
break;
}*/
// if you get here, means num was not a possValue in any other Square in col
// so set value and lock it
if (onlyOne!=-1) {
s_board[tid].value = num;
s_board[tid].isLocked = -1;
atomicAdd(&s_points, 1);
//points++;
break;
}
}
}
__syncthreads();
// copy back from shared mem to global mem
if (threadIdx.x == 0) {
*d_points = s_points;
for (int i=0; i<(n*n); i++) {
d_board[i].value = s_board[i].value;
d_board[i].isLocked = s_board[i].isLocked;
if (s_board[i].isLocked!=-1) {
for (int j=0; j<n; j++) {
d_board[i].possValues[j] = s_board[i].possValues[j];
}
}
}
}
}
}
#endif
| human.cu | // human approach to solving Sudoku
// requires fully populated arrays of possValues
#ifndef HUMAN_H
#define HUMAN_H
__device__
int findLocalBlockIdx(int tid) {
int blockRow = tid/27; // used to be floor
int col = tid%9;
int blockCol;
if (col<3)
blockCol = 0;
else if (col<6)
blockCol = 1;
else
blockCol = 2;
int starter = (blockRow*27) + (blockCol*3);
int difference = tid - starter;
if (difference==0)
return 0;
else if (difference==1)
return 1;
else if (difference==2)
return 2;
else if (difference==9)
return 3;
else if (difference==10)
return 4;
else if (difference==11)
return 5;
else if (difference==18)
return 6;
else if (difference==19)
return 7;
else
return 8;
}
__global__ void human(Square* d_board, int n, int* d_points) {
__shared__ Square s_board[81];
__shared__ int s_points;
if (threadIdx.x == 0) {
s_points = 0;
// initialize shared memory
for (int i = 0; i<(n*n); i++) {
s_board[i].value = d_board[i].value;
s_board[i].isLocked = d_board[i].isLocked;
for (int j=0; j<n; j++)
s_board[i].possValues[j] = d_board[i].possValues[j];
}
}
int tid = threadIdx.x + blockIdx.x*blockDim.x;
//d_points = 0; // for keeping track of work done
int numPossValues;
if ( (tid<(n*n)) && s_board[tid].isLocked!=-1) {
// enter the if statement if the thread is a valid Square
// and if the Square we're looking at is NOT locked (-1)
// first, check if only one option in possValues array
//numPossValues = sizeof(s_board[tid].possValues) / sizeof(int);
for (int k=0; k<n; k++) {
if (s_board[tid].possValues[k] != 0)
numPossValues++;
}
if (numPossValues==1) {
// only 1 number in possValues array
s_board[tid].value = s_board[tid].possValues[0];
s_board[tid].isLocked = -1;
//points++;
atomicAdd(&s_points, 1);
}
Square localRow[9];
Square localCol[9];
Square localBlock[9];
getRow(tid, s_board, localRow);
getCol(tid, s_board, localCol);
getBlock(tid, s_board, localBlock);
int num, nocheck, onlyOne;
// check if each number can only be in this Square for row/col/block
for (int i=0; i<9; i++) {
// cycle through all values in possValues array
// if any of row/col/block has no other Squares with curVal in possValues
// that value must be the Square's locked value
// ex: the first value in tid.possValues is a 4
// there are two 4's cutting off the other two columns for this block
// a 4 cuts off one of the rows in this block
// and the Square just above tid is already locked
// i.e., s_board[tid-9].isLocked = -1;
num = s_board[tid].possValues[i];
// first, make sure we're looking at a valid int
/* if (num==NULL)
break; */
if (num!=0) {
// now check for num in the possValues arrays for all Squares in the row
// if we see the number, break and start checking col,
// otherwise set the value to num and lock it
// just make sure you don't check against the current square, otherwise never win!
onlyOne = 0;
nocheck = tid%9; // for row, we don't want to check the column tid is in
for (int j=0; j<n; j++) {
if (j!=nocheck && localRow[j].isLocked!=-1) {
// skip checking current tid Square
// skip Squares that are locked as a precaution
// since we don't clear possValues after locking
// look for num in localRow[j].possValues[num-1]
if (num==localRow[j].possValues[num-1]) {
onlyOne = -1; // set onlyOne = -1 if NOT the only one
break;
}
}
}
// look for num in localRow[j].possValues, using device function
/*onlyOne = isPossibleNum(num, localRow[j].possValues);
if (onlyOne!=-1)
break;
}*/
// if you get here, means num was not a possValue in any other Square in row
// so set value and lock it
if (onlyOne!=-1) {
s_board[tid].value = num;
s_board[tid].isLocked = -1;
//points++;
atomicAdd(&s_points, 1);
break;
}
// now do the same for the column
onlyOne = 0;
nocheck = tid/9; // for col, we don't check the row we're in. used to be floor
for (int j=0; j<n; j++) {
if (j!=nocheck && localCol[j].isLocked!=-1) {
if (num==localCol[j].possValues[num-1]) {
onlyOne = -1;
break;
}
}
}
// look for num in localRow[j].possValues, using device function
/*onlyOne = isPossibleNum(num, localCol[j].possValues);
if (onlyOne!=-1)
break;
}*/
// if you get here, means num was not a possValue in any other Square in col
// so set value and lock it
if (onlyOne!=-1) {
s_board[tid].value = num;
s_board[tid].isLocked = -1;
//points++;
atomicAdd(&s_points, 1);
break;
}
// now do again for block
onlyOne = 0;
nocheck = findLocalBlockIdx(tid);
for (int j=0; j<n; j++) {
if (j!=nocheck && localBlock[j].isLocked!=-1) {
if (num==localBlock[j].possValues[num-1]) {
onlyOne = -1;
break;
}
}
}
// look for num in localRow[j].possValues, using device function
/*onlyOne = isPossibleNum(num, localBlock[j].possValues);
if (onlyOne!=-1)
break;
}*/
// if you get here, means num was not a possValue in any other Square in col
// so set value and lock it
if (onlyOne!=-1) {
s_board[tid].value = num;
s_board[tid].isLocked = -1;
atomicAdd(&s_points, 1);
//points++;
break;
}
}
}
__syncthreads();
// copy back from shared mem to global mem
if (threadIdx.x == 0) {
*d_points = s_points;
for (int i=0; i<(n*n); i++) {
d_board[i].value = s_board[i].value;
d_board[i].isLocked = s_board[i].isLocked;
if (s_board[i].isLocked!=-1) {
for (int j=0; j<n; j++) {
d_board[i].possValues[j] = s_board[i].possValues[j];
}
}
}
}
}
}
#endif
|
24ed68fe3d5e23c2547ac3b8e88df5c9e64a371b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <stdio.h>
#include <sys/time.h>
#include <string.h>
#include <omp.h>
using namespace std;
__global__ void MatMul(double * A, double * B ,double * C,int m,int n,int k)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int sum = 0;
if (i < m && j < k){
for(int x = 0;x < n;x++){
sum += A[i * n + x] * B[x * k + j];
}
C[i * k + j] = sum;
}
}
void CPU_MatMul(double * A, double * B ,double * C,int m,int n,int k){
for(int i = 0;i < m;i++){
for(int j = 0;j < k;j++){
for(int x = 0;x < n;x++){
C[i * k + j] += A[i * n + x] * B[x * k + j];
}
}
}
}
int main()
{
timeval t1, t2;
int x,y;
cout << "Input threadsPerBlock.x:";
cin >> x;
cout << "Input threadsPerBlock.y:";
cin >> y;
dim3 threadsPerBlock(x,y);
int m, n, k;
cout << "Input problem size:";
cin >> m;
n = m;
k = m;
dim3 numBlocks((m % threadsPerBlock.x) ? m / threadsPerBlock.x + 1 : m / threadsPerBlock.x ,(k % threadsPerBlock.y) ? k / threadsPerBlock.y + 1 : k / threadsPerBlock.y);
double *A,*B,*C;
A = (double*)malloc(sizeof(double) * m * n);
B = (double*)malloc(sizeof(double) * k * n);
C = (double*)malloc(sizeof(double) * m * k);
for(int i = 0;i < m;i++){
for(int j = 0;j < n;j++){
A[i * n + j] = rand() % 10;
}
}
for(int i = 0;i < n;i++){
for(int j = 0;j < k;j++){
B[i * k + j] = rand() % 10;
}
}
memset(C,0,sizeof(C));
int omp_threads;
int gpu_count;
hipGetDeviceCount(&gpu_count);
cout << "Input number of omp threads:";
cin >> omp_threads;
gettimeofday(&t1, NULL);
#pragma omp parallel num_threads(omp_threads)
{
int id = omp_get_thread_num();
int size = omp_get_num_threads();
hipSetDevice(id % gpu_count);
double * d_A,*d_B,*d_C;
hipMalloc(&d_A, sizeof(double) * m * n / size);
hipMalloc(&d_B,sizeof(double) * n * k);
hipMalloc(&d_C,sizeof(double) * m * k / size);
hipMemcpy(d_A, A + id * m * n / size, sizeof(double) * m * n / size, hipMemcpyHostToDevice);
hipMemcpy(d_B, B, sizeof(double) * n * k, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( MatMul), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, d_A, d_B, d_C, m / size, n, k);
hipMemcpy(C + id * m * k / size, d_C, sizeof(double) * m * k / size, hipMemcpyDeviceToHost);
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
}
gettimeofday(&t2, NULL);
printf("GPU time is:%lds\n",t2.tv_sec*1000000 + t2.tv_usec - t1.tv_sec*1000000 - t1.tv_usec);
free(A);
free(B);
free(C);
}
| 24ed68fe3d5e23c2547ac3b8e88df5c9e64a371b.cu | #include <iostream>
#include <stdio.h>
#include <sys/time.h>
#include <string.h>
#include <omp.h>
using namespace std;
__global__ void MatMul(double * A, double * B ,double * C,int m,int n,int k)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int sum = 0;
if (i < m && j < k){
for(int x = 0;x < n;x++){
sum += A[i * n + x] * B[x * k + j];
}
C[i * k + j] = sum;
}
}
void CPU_MatMul(double * A, double * B ,double * C,int m,int n,int k){
for(int i = 0;i < m;i++){
for(int j = 0;j < k;j++){
for(int x = 0;x < n;x++){
C[i * k + j] += A[i * n + x] * B[x * k + j];
}
}
}
}
int main()
{
timeval t1, t2;
int x,y;
cout << "Input threadsPerBlock.x:";
cin >> x;
cout << "Input threadsPerBlock.y:";
cin >> y;
dim3 threadsPerBlock(x,y);
int m, n, k;
cout << "Input problem size:";
cin >> m;
n = m;
k = m;
dim3 numBlocks((m % threadsPerBlock.x) ? m / threadsPerBlock.x + 1 : m / threadsPerBlock.x ,(k % threadsPerBlock.y) ? k / threadsPerBlock.y + 1 : k / threadsPerBlock.y);
double *A,*B,*C;
A = (double*)malloc(sizeof(double) * m * n);
B = (double*)malloc(sizeof(double) * k * n);
C = (double*)malloc(sizeof(double) * m * k);
for(int i = 0;i < m;i++){
for(int j = 0;j < n;j++){
A[i * n + j] = rand() % 10;
}
}
for(int i = 0;i < n;i++){
for(int j = 0;j < k;j++){
B[i * k + j] = rand() % 10;
}
}
memset(C,0,sizeof(C));
int omp_threads;
int gpu_count;
cudaGetDeviceCount(&gpu_count);
cout << "Input number of omp threads:";
cin >> omp_threads;
gettimeofday(&t1, NULL);
#pragma omp parallel num_threads(omp_threads)
{
int id = omp_get_thread_num();
int size = omp_get_num_threads();
cudaSetDevice(id % gpu_count);
double * d_A,*d_B,*d_C;
cudaMalloc(&d_A, sizeof(double) * m * n / size);
cudaMalloc(&d_B,sizeof(double) * n * k);
cudaMalloc(&d_C,sizeof(double) * m * k / size);
cudaMemcpy(d_A, A + id * m * n / size, sizeof(double) * m * n / size, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, B, sizeof(double) * n * k, cudaMemcpyHostToDevice);
MatMul<<<numBlocks, threadsPerBlock>>>(d_A, d_B, d_C, m / size, n, k);
cudaMemcpy(C + id * m * k / size, d_C, sizeof(double) * m * k / size, cudaMemcpyDeviceToHost);
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
}
gettimeofday(&t2, NULL);
printf("GPU time is:%ldμs\n",t2.tv_sec*1000000 + t2.tv_usec - t1.tv_sec*1000000 - t1.tv_usec);
free(A);
free(B);
free(C);
}
|
c95511d939d419c86b3d75541b021c6cfc04cb49.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <iostream>
#include <sys/time.h>
#include <stdio.h>
using namespace std;
/* Overlapping data transfers and kernel execution
* - pinned memory
* - streams
* - different strategies depending on concurrent data transfers enabled or not
*/
#define TILE_DIM 16
#define BLOCK_ROWS 16
__global__ void transposeNaive(double *odata, double* idata, int width, int height, int nreps)
{
int xIndex = blockIdx.x * TILE_DIM + threadIdx.x;
int yIndex = blockIdx.y * TILE_DIM + threadIdx.y;
int index_in = xIndex + width * yIndex;
int index_out = yIndex + height * xIndex;
for (int r=0; r < nreps; r++) {
for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS) {
odata[index_out+i] = idata[index_in+i*width];
}
}
}
int main() {
// check if device can overlap data transfers with computation
int deviceCount;
hipGetDeviceCount(&deviceCount);
int device;
for (device = 0; device < deviceCount; ++device) {
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, device);
printf("Device %d has compute capability %d.%d.\n", device,
deviceProp.major, deviceProp.minor);
cout << " asyncEngineCount = " << deviceProp.deviceOverlap << endl;
}
/************************/
// side length of square matrix
int side = 2048;
// number of elements in a single matrx
int n = side*side;
// number of matrices to transpose
int nTranspose = 96;
// number of transpose operations on a single matrix
int nreps = 20;
int nStream = 8;
// (*) define streams here
hipStream_t stream[nStream];
for (int i=0; i<nStream; i++)
hipStreamCreate(&stream[i]);
// create events
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
// (*) modify here to allocate pinned host memory with hipHostMalloc
double *data;
hipHostMalloc((void**) &data, nTranspose * n * sizeof(double));
// data initialization
for (int j=0; j<nTranspose; j++)
for (int i=0; i<n; i++) {
data[i+j*n] = double(i+j*n);
}
double *data_dev;
// (*) modify device memory allocation size according to nStream
hipMalloc((void**) &data_dev, nStream * 2 * n * sizeof(double));
dim3 grid(side/16,side/16,1);
dim3 threads(16,16,1);
// record start event
hipEventRecord(start, 0);
// concurrent data transfers not supported
for (int j=0; j<nTranspose/nStream; j++) {
for (int i=0; i<nStream; i++) {
int ij = j*nStream+i;
int offset = i * n;
hipMemcpyAsync(data_dev + offset*2, data + ij*n, n * sizeof(double),
hipMemcpyHostToDevice, stream[i]);
}
for (int i=0; i<nStream; i++) {
int offset = i * n;
hipLaunchKernelGGL(( transposeNaive) , dim3(grid), dim3(threads), 0, stream[i] ,
data_dev + offset*2+n, data_dev + offset*2, side, side, nreps);
}
for (int i=0; i<nStream; i++) {
int ij = j*nStream+i;
int offset = i * n;
hipMemcpyAsync(data + ij*n, data_dev + offset*2 + n, n * sizeof(double),
hipMemcpyDeviceToHost, stream[i]);
}
}
// record stop event
hipEventRecord(stop, stream[nStream-1]);
// elapsed time
hipEventSynchronize(stop);
float elapsedTime;
hipEventElapsedTime(&elapsedTime, start, stop);
cout << "Comp time = " << elapsedTime/1000.0 << endl;
// destroy events
hipEventDestroy(start);
hipEventDestroy(stop);
// (*) Destroy streams here
for (int i=0; i<nStream; i++)
hipStreamDestroy(stream[i]);
hipFree(data_dev);
cout << "value check = " << data[n+5467] << endl;
// (*) modify here to free pinned host memory
hipHostFree(data);
}
| c95511d939d419c86b3d75541b021c6cfc04cb49.cu | #include <cuda.h>
#include <iostream>
#include <sys/time.h>
#include <stdio.h>
using namespace std;
/* Overlapping data transfers and kernel execution
* - pinned memory
* - streams
* - different strategies depending on concurrent data transfers enabled or not
*/
#define TILE_DIM 16
#define BLOCK_ROWS 16
__global__ void transposeNaive(double *odata, double* idata, int width, int height, int nreps)
{
int xIndex = blockIdx.x * TILE_DIM + threadIdx.x;
int yIndex = blockIdx.y * TILE_DIM + threadIdx.y;
int index_in = xIndex + width * yIndex;
int index_out = yIndex + height * xIndex;
for (int r=0; r < nreps; r++) {
for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS) {
odata[index_out+i] = idata[index_in+i*width];
}
}
}
int main() {
// check if device can overlap data transfers with computation
int deviceCount;
cudaGetDeviceCount(&deviceCount);
int device;
for (device = 0; device < deviceCount; ++device) {
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, device);
printf("Device %d has compute capability %d.%d.\n", device,
deviceProp.major, deviceProp.minor);
cout << " asyncEngineCount = " << deviceProp.deviceOverlap << endl;
}
/************************/
// side length of square matrix
int side = 2048;
// number of elements in a single matrx
int n = side*side;
// number of matrices to transpose
int nTranspose = 96;
// number of transpose operations on a single matrix
int nreps = 20;
int nStream = 8;
// (*) define streams here
cudaStream_t stream[nStream];
for (int i=0; i<nStream; i++)
cudaStreamCreate(&stream[i]);
// create events
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
// (*) modify here to allocate pinned host memory with cudaMallocHost
double *data;
cudaMallocHost((void**) &data, nTranspose * n * sizeof(double));
// data initialization
for (int j=0; j<nTranspose; j++)
for (int i=0; i<n; i++) {
data[i+j*n] = double(i+j*n);
}
double *data_dev;
// (*) modify device memory allocation size according to nStream
cudaMalloc((void**) &data_dev, nStream * 2 * n * sizeof(double));
dim3 grid(side/16,side/16,1);
dim3 threads(16,16,1);
// record start event
cudaEventRecord(start, 0);
// concurrent data transfers not supported
for (int j=0; j<nTranspose/nStream; j++) {
for (int i=0; i<nStream; i++) {
int ij = j*nStream+i;
int offset = i * n;
cudaMemcpyAsync(data_dev + offset*2, data + ij*n, n * sizeof(double),
cudaMemcpyHostToDevice, stream[i]);
}
for (int i=0; i<nStream; i++) {
int offset = i * n;
transposeNaive <<< grid, threads, 0, stream[i] >>>
(data_dev + offset*2+n, data_dev + offset*2, side, side, nreps);
}
for (int i=0; i<nStream; i++) {
int ij = j*nStream+i;
int offset = i * n;
cudaMemcpyAsync(data + ij*n, data_dev + offset*2 + n, n * sizeof(double),
cudaMemcpyDeviceToHost, stream[i]);
}
}
// record stop event
cudaEventRecord(stop, stream[nStream-1]);
// elapsed time
cudaEventSynchronize(stop);
float elapsedTime;
cudaEventElapsedTime(&elapsedTime, start, stop);
cout << "Comp time = " << elapsedTime/1000.0 << endl;
// destroy events
cudaEventDestroy(start);
cudaEventDestroy(stop);
// (*) Destroy streams here
for (int i=0; i<nStream; i++)
cudaStreamDestroy(stream[i]);
cudaFree(data_dev);
cout << "value check = " << data[n+5467] << endl;
// (*) modify here to free pinned host memory
cudaFreeHost(data);
}
|
b10dc8d93dfa74f7a10e39a4e532ad562b4d88d1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* -- NUPAR: A Benchmark Suite for Modern GPU Architectures
* NUPAR - 2 December 2014
* Fanny Nina-Paravecino
* Northeastern University
* NUCAR Research Laboratory
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions, and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* 3. All advertising materials mentioning features or use of this
* software must display the following acknowledgement:
* This product includes software developed at the Northeastern U.
*
* 4. The name of the University, the name of the Laboratory, or the
* names of its contributors may not be used to endorse or promote
* products derived from this software without specific written
* permission.
*
* -- Disclaimer:
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE UNIVERSITY
* OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
* ---------------------------------------------------------------------
*/
#include <stdio.h>
#include <math.h>
#include "log_helper.h"
#define THREADSX 16
#define THREADSY 16
#define THREADS 512
#define cudaErrChk(ans) { cudaAssert((ans), __FILE__, __LINE__); }
inline void cudaAssert(hipError_t code, const char *file, int line, bool abort=true)
{
if (code != hipSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
__global__ void findSpansKernel(int *out, int *components, const int *in,
const int rows, const int cols)
{
uint i = (blockIdx.x * blockDim.x) + threadIdx.x;
uint colsSpans = ((cols+2-1)/2)*2;
int current;
int colsComponents = colsSpans/2;
bool flagFirst = true;
int indexOut = 0;
int indexComp = 0;
int comp = i*colsComponents;
if (i<rows)
{
for (int j = 0; j < cols; j++)
{
if(flagFirst && in[i*cols+j]> 0)
{
current = in[i*cols+j];
out[i*colsSpans+indexOut] = j;
indexOut++;
flagFirst = false;
}
if (!flagFirst && in[i*cols+j] != current)
{
out[i*colsSpans+indexOut] = j-1;
indexOut++;
flagFirst = true;
/*add the respective label*/
components[i*colsComponents+indexComp] = comp;
indexComp++;
comp++;
}
}
if (!flagFirst)
{
out[i*colsSpans+indexOut] = cols - 1;
/*add the respective label*/
components[i*colsComponents+indexComp] = comp;
}
}
}
__global__ void relabelKernel(int *components, int previousLabel, int newLabel, const int colsComponents)
{
uint i = (blockIdx.x * blockDim.x) + threadIdx.x;
uint j = (blockIdx.y * blockDim.y) + threadIdx.y;
if(components[i*colsComponents+j]==previousLabel)
{
components[i*colsComponents+j] = newLabel;
}
}
__global__ void relabel2Kernel(int *components, int previousLabel, int newLabel, const int colsComponents, const int idx, const int frameRows)
{
uint i = (blockIdx.x * blockDim.x) + threadIdx.x;
uint j = (blockIdx.y * blockDim.y) + threadIdx.y;
i = i*colsComponents+j;
i = i +(colsComponents*frameRows*idx);
if(components[i]==previousLabel)
{
components[i] = newLabel;
}
}
__global__ void relabelUnrollKernel(int *components, int previousLabel, int newLabel, const int colsComponents, const int idx, const int frameRows, const int factor)
{
uint id_i_child = (blockIdx.x * blockDim.x) + threadIdx.x;
id_i_child = id_i_child +(frameRows*idx);
uint id_j_child = (blockIdx.y * blockDim.y) + threadIdx.y;
id_j_child = (colsComponents/factor)*id_j_child;
uint i = id_i_child;
for (int j=id_j_child; j< (colsComponents/factor); j++)
{
if(components[i*colsComponents+j]==previousLabel)
{
components[i*colsComponents+j] = newLabel;
}
}
}
__global__ void mergeSpansKernel(int *components, int *spans, const int rows, const int cols, const int frameRows)
{
uint idx = (blockIdx.x * blockDim.x) + threadIdx.x;
uint colsSpans = ((cols+2-1)/2)*2;
uint colsComponents = colsSpans/2;
/*Merge Spans*/
int startX, endX, newStartX, newEndX;
int label=-1;
/*threads and blocks need to relabel the components labels*/
int threads = 16;
const int factor =2;
/*--------For 256, 512--------*/
dim3 threadsPerBlockUnrollRelabel(threads*threads);
dim3 numBlocksUnrollRelabel((frameRows*factor)/(threads*threads));
/*-----------------*/
for (int i = idx*frameRows; i < ((idx*frameRows)+frameRows)-1; i++) //compute until penultimate row, since we need the below row to compare
{
for (int j=0; j < colsSpans-1 && spans[i*colsSpans+j] >=0; j=j+2) //verify if there is a Span available
{
startX = spans[i*colsSpans+j];
endX = spans[i*colsSpans+j+1];
int newI = i+1; //line below
for (int k=0; k<colsSpans-1 && spans[newI*colsSpans+k] >=0; k=k+2) //verify if there is a New Span available
{
newStartX = spans[newI*colsSpans+k];
newEndX = spans[newI*colsSpans+k+1];
if (startX <= newEndX && endX >= newStartX)//Merge components
{
label = components[i*(colsSpans/2)+(j/2)]; //choose the startSpan label
hipLaunchKernelGGL(( relabelUnrollKernel), dim3(numBlocksUnrollRelabel), dim3(threadsPerBlockUnrollRelabel), 0, 0, components, components[newI*(colsSpans/2)+(k/2)], label, colsComponents, idx, frameRows, factor);
hipDeviceSynchronize();
hipError_t err = hipGetLastError();
if (err != hipSuccess)
printf("\tError:%s \n", (char)err);
}
__syncthreads();
}
}
}
}
double acclCuda(int *out, int *components, const int *in, uint nFrames,
uint nFramsPerStream, const int rows, const int cols, int logs_active)
{
int *devIn = 0;
int *devComponents = 0;
int *devOut = 0;
const int colsSpans = ((cols+2-1)/2)*2; /*ceil(cols/2)*2*/
const int colsComponents = colsSpans/2;
/*compute sizes of matrices*/
const int sizeIn = rows * cols;
const int sizeComponents = colsComponents*rows;
const int sizeOut = colsSpans*rows;
/*Block and Grid size*/
int blockSize;
int minGridSize;
int gridSize;
/*Frame Info*/
const int frameRows = rows/nFrames;
/*Streams Information*/
uint nStreams = nFrames/nFramsPerStream;
int rowsOccupancyMax = frameRows * nFramsPerStream;
cudaErrChk(hipOccupancyMaxPotentialBlockSize( &minGridSize, &blockSize,
findSpansKernel, 0, rowsOccupancyMax));
// printf("Best Kernel Size\n");
// printf("-----------------\n");
// printf("\t Minimum gridSize to acchieve high occupancy: %d\n", minGridSize);
// printf("\t Block Size: %d\n", blockSize);
// printf("\t Rows Max Occupancy: %d\n", rowsOccupancyMax);
hipEvent_t start, stop;
float time;
hipEventCreate(&start);
hipEventCreate(&stop);
/* Choose which GPU to run on, change this on a multi-GPU system.*/
cudaErrChk(hipSetDevice(0));
/* Allocate GPU buffers for three vectors (two input, one output)*/
cudaErrChk(hipMalloc((void**)&devOut, sizeOut * sizeof(int)));
cudaErrChk(hipMalloc((void**)&devComponents, sizeComponents * sizeof(int)));
cudaErrChk(hipMalloc((void**)&devIn, sizeIn * sizeof(int)));
/* Copy input vectors from host memory to GPU buffers*/
cudaErrChk(hipMemcpy(devIn, in, sizeIn * sizeof(int), hipMemcpyHostToDevice));
cudaErrChk(hipMemcpy(devComponents, components, sizeComponents * sizeof(int),
hipMemcpyHostToDevice));
cudaErrChk(hipMemcpy(devOut, out, sizeOut * sizeof(int), hipMemcpyHostToDevice));
/*launch streams*/
hipStream_t *streams = (hipStream_t *) malloc(nStreams * sizeof(hipStream_t));
for (int i = 0; i < nStreams; i++)
{
cudaErrChk(hipStreamCreate(&(streams[i])));
}
/*variables for streaming*/
const int frameSpansSize = rows/nStreams * colsSpans;
const int frameCompSize = rows/nStreams * colsComponents;
/* Round up according to array size */
blockSize = 256;
gridSize = (rows/nStreams)/blockSize;
//gridSize = rows/blockSize;
/* Launch a kernel on the GPU with one thread for each element*/
// printf("Number of frames processed: %d\n", nFrames);
// printf("Number of streams created: %d\n", nStreams);
hipEventRecord(start, 0); /*measure time*/
if (logs_active) start_iteration();
for(int i=0; i<nStreams; ++i)
{
hipLaunchKernelGGL(( findSpansKernel), dim3(gridSize), dim3(blockSize), 0, 0, &devOut[i*frameSpansSize],
&devComponents[i*frameCompSize], &devIn[i*frameSpansSize],
rows, cols);
/*Merge Spans*/
hipLaunchKernelGGL(( mergeSpansKernel), dim3(1), dim3(nFramsPerStream), 0, 0, &devComponents[i*frameCompSize],
&devOut[i*frameSpansSize],
rows,
cols,
frameRows);
}
hipDeviceSynchronize();
if (logs_active) end_iteration();
/* Copy device to host*/
cudaErrChk(hipMemcpy(components, devComponents, sizeComponents * sizeof(int),
hipMemcpyDeviceToHost));
cudaErrChk(hipMemcpy(out, devOut, sizeOut * sizeof(int),
hipMemcpyDeviceToHost));
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&time, start, stop);
//printf ("Time kernel execution: %f ms\n", time);
/* Analysis of occupancy*/
int maxActiveBlocks;
hipOccupancyMaxActiveBlocksPerMultiprocessor( &maxActiveBlocks,
findSpansKernel, blockSize,0);
int device;
hipDeviceProp_t props;
hipGetDevice(&device);
hipGetDeviceProperties(&props, device);
float occupancy = (maxActiveBlocks * blockSize / props.warpSize) /
(float)(props.maxThreadsPerMultiProcessor /
props.warpSize);
// printf("Occupancy Results\n");
// printf("-----------------\n");
// printf("\t Block Size: %d\n", blockSize);
// printf("\t Grid Size: %d\n", gridSize);
// printf("\t Theoretical occupancy: %f\n", occupancy);
/*Free*/
hipFree(devOut);
hipFree(devIn);
hipFree(devComponents);
return time;
}
| b10dc8d93dfa74f7a10e39a4e532ad562b4d88d1.cu | /*
* -- NUPAR: A Benchmark Suite for Modern GPU Architectures
* NUPAR - 2 December 2014
* Fanny Nina-Paravecino
* Northeastern University
* NUCAR Research Laboratory
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions, and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* 3. All advertising materials mentioning features or use of this
* software must display the following acknowledgement:
* This product includes software developed at the Northeastern U.
*
* 4. The name of the University, the name of the Laboratory, or the
* names of its contributors may not be used to endorse or promote
* products derived from this software without specific written
* permission.
*
* -- Disclaimer:
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE UNIVERSITY
* OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
* ---------------------------------------------------------------------
*/
#include <stdio.h>
#include <math.h>
#include "log_helper.h"
#define THREADSX 16
#define THREADSY 16
#define THREADS 512
#define cudaErrChk(ans) { cudaAssert((ans), __FILE__, __LINE__); }
inline void cudaAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
__global__ void findSpansKernel(int *out, int *components, const int *in,
const int rows, const int cols)
{
uint i = (blockIdx.x * blockDim.x) + threadIdx.x;
uint colsSpans = ((cols+2-1)/2)*2;
int current;
int colsComponents = colsSpans/2;
bool flagFirst = true;
int indexOut = 0;
int indexComp = 0;
int comp = i*colsComponents;
if (i<rows)
{
for (int j = 0; j < cols; j++)
{
if(flagFirst && in[i*cols+j]> 0)
{
current = in[i*cols+j];
out[i*colsSpans+indexOut] = j;
indexOut++;
flagFirst = false;
}
if (!flagFirst && in[i*cols+j] != current)
{
out[i*colsSpans+indexOut] = j-1;
indexOut++;
flagFirst = true;
/*add the respective label*/
components[i*colsComponents+indexComp] = comp;
indexComp++;
comp++;
}
}
if (!flagFirst)
{
out[i*colsSpans+indexOut] = cols - 1;
/*add the respective label*/
components[i*colsComponents+indexComp] = comp;
}
}
}
__global__ void relabelKernel(int *components, int previousLabel, int newLabel, const int colsComponents)
{
uint i = (blockIdx.x * blockDim.x) + threadIdx.x;
uint j = (blockIdx.y * blockDim.y) + threadIdx.y;
if(components[i*colsComponents+j]==previousLabel)
{
components[i*colsComponents+j] = newLabel;
}
}
__global__ void relabel2Kernel(int *components, int previousLabel, int newLabel, const int colsComponents, const int idx, const int frameRows)
{
uint i = (blockIdx.x * blockDim.x) + threadIdx.x;
uint j = (blockIdx.y * blockDim.y) + threadIdx.y;
i = i*colsComponents+j;
i = i +(colsComponents*frameRows*idx);
if(components[i]==previousLabel)
{
components[i] = newLabel;
}
}
__global__ void relabelUnrollKernel(int *components, int previousLabel, int newLabel, const int colsComponents, const int idx, const int frameRows, const int factor)
{
uint id_i_child = (blockIdx.x * blockDim.x) + threadIdx.x;
id_i_child = id_i_child +(frameRows*idx);
uint id_j_child = (blockIdx.y * blockDim.y) + threadIdx.y;
id_j_child = (colsComponents/factor)*id_j_child;
uint i = id_i_child;
for (int j=id_j_child; j< (colsComponents/factor); j++)
{
if(components[i*colsComponents+j]==previousLabel)
{
components[i*colsComponents+j] = newLabel;
}
}
}
__global__ void mergeSpansKernel(int *components, int *spans, const int rows, const int cols, const int frameRows)
{
uint idx = (blockIdx.x * blockDim.x) + threadIdx.x;
uint colsSpans = ((cols+2-1)/2)*2;
uint colsComponents = colsSpans/2;
/*Merge Spans*/
int startX, endX, newStartX, newEndX;
int label=-1;
/*threads and blocks need to relabel the components labels*/
int threads = 16;
const int factor =2;
/*--------For 256, 512--------*/
dim3 threadsPerBlockUnrollRelabel(threads*threads);
dim3 numBlocksUnrollRelabel((frameRows*factor)/(threads*threads));
/*-----------------*/
for (int i = idx*frameRows; i < ((idx*frameRows)+frameRows)-1; i++) //compute until penultimate row, since we need the below row to compare
{
for (int j=0; j < colsSpans-1 && spans[i*colsSpans+j] >=0; j=j+2) //verify if there is a Span available
{
startX = spans[i*colsSpans+j];
endX = spans[i*colsSpans+j+1];
int newI = i+1; //line below
for (int k=0; k<colsSpans-1 && spans[newI*colsSpans+k] >=0; k=k+2) //verify if there is a New Span available
{
newStartX = spans[newI*colsSpans+k];
newEndX = spans[newI*colsSpans+k+1];
if (startX <= newEndX && endX >= newStartX)//Merge components
{
label = components[i*(colsSpans/2)+(j/2)]; //choose the startSpan label
relabelUnrollKernel<<<numBlocksUnrollRelabel, threadsPerBlockUnrollRelabel>>>(components, components[newI*(colsSpans/2)+(k/2)], label, colsComponents, idx, frameRows, factor);
cudaDeviceSynchronize();
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess)
printf("\tError:%s \n", (char)err);
}
__syncthreads();
}
}
}
}
double acclCuda(int *out, int *components, const int *in, uint nFrames,
uint nFramsPerStream, const int rows, const int cols, int logs_active)
{
int *devIn = 0;
int *devComponents = 0;
int *devOut = 0;
const int colsSpans = ((cols+2-1)/2)*2; /*ceil(cols/2)*2*/
const int colsComponents = colsSpans/2;
/*compute sizes of matrices*/
const int sizeIn = rows * cols;
const int sizeComponents = colsComponents*rows;
const int sizeOut = colsSpans*rows;
/*Block and Grid size*/
int blockSize;
int minGridSize;
int gridSize;
/*Frame Info*/
const int frameRows = rows/nFrames;
/*Streams Information*/
uint nStreams = nFrames/nFramsPerStream;
int rowsOccupancyMax = frameRows * nFramsPerStream;
cudaErrChk(cudaOccupancyMaxPotentialBlockSize( &minGridSize, &blockSize,
findSpansKernel, 0, rowsOccupancyMax));
// printf("Best Kernel Size\n");
// printf("-----------------\n");
// printf("\t Minimum gridSize to acchieve high occupancy: %d\n", minGridSize);
// printf("\t Block Size: %d\n", blockSize);
// printf("\t Rows Max Occupancy: %d\n", rowsOccupancyMax);
cudaEvent_t start, stop;
float time;
cudaEventCreate(&start);
cudaEventCreate(&stop);
/* Choose which GPU to run on, change this on a multi-GPU system.*/
cudaErrChk(cudaSetDevice(0));
/* Allocate GPU buffers for three vectors (two input, one output)*/
cudaErrChk(cudaMalloc((void**)&devOut, sizeOut * sizeof(int)));
cudaErrChk(cudaMalloc((void**)&devComponents, sizeComponents * sizeof(int)));
cudaErrChk(cudaMalloc((void**)&devIn, sizeIn * sizeof(int)));
/* Copy input vectors from host memory to GPU buffers*/
cudaErrChk(cudaMemcpy(devIn, in, sizeIn * sizeof(int), cudaMemcpyHostToDevice));
cudaErrChk(cudaMemcpy(devComponents, components, sizeComponents * sizeof(int),
cudaMemcpyHostToDevice));
cudaErrChk(cudaMemcpy(devOut, out, sizeOut * sizeof(int), cudaMemcpyHostToDevice));
/*launch streams*/
cudaStream_t *streams = (cudaStream_t *) malloc(nStreams * sizeof(cudaStream_t));
for (int i = 0; i < nStreams; i++)
{
cudaErrChk(cudaStreamCreate(&(streams[i])));
}
/*variables for streaming*/
const int frameSpansSize = rows/nStreams * colsSpans;
const int frameCompSize = rows/nStreams * colsComponents;
/* Round up according to array size */
blockSize = 256;
gridSize = (rows/nStreams)/blockSize;
//gridSize = rows/blockSize;
/* Launch a kernel on the GPU with one thread for each element*/
// printf("Number of frames processed: %d\n", nFrames);
// printf("Number of streams created: %d\n", nStreams);
cudaEventRecord(start, 0); /*measure time*/
if (logs_active) start_iteration();
for(int i=0; i<nStreams; ++i)
{
findSpansKernel<<<gridSize, blockSize>>>(&devOut[i*frameSpansSize],
&devComponents[i*frameCompSize], &devIn[i*frameSpansSize],
rows, cols);
/*Merge Spans*/
mergeSpansKernel<<<1, nFramsPerStream>>>(&devComponents[i*frameCompSize],
&devOut[i*frameSpansSize],
rows,
cols,
frameRows);
}
cudaDeviceSynchronize();
if (logs_active) end_iteration();
/* Copy device to host*/
cudaErrChk(cudaMemcpy(components, devComponents, sizeComponents * sizeof(int),
cudaMemcpyDeviceToHost));
cudaErrChk(cudaMemcpy(out, devOut, sizeOut * sizeof(int),
cudaMemcpyDeviceToHost));
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
//printf ("Time kernel execution: %f ms\n", time);
/* Analysis of occupancy*/
int maxActiveBlocks;
cudaOccupancyMaxActiveBlocksPerMultiprocessor( &maxActiveBlocks,
findSpansKernel, blockSize,0);
int device;
cudaDeviceProp props;
cudaGetDevice(&device);
cudaGetDeviceProperties(&props, device);
float occupancy = (maxActiveBlocks * blockSize / props.warpSize) /
(float)(props.maxThreadsPerMultiProcessor /
props.warpSize);
// printf("Occupancy Results\n");
// printf("-----------------\n");
// printf("\t Block Size: %d\n", blockSize);
// printf("\t Grid Size: %d\n", gridSize);
// printf("\t Theoretical occupancy: %f\n", occupancy);
/*Free*/
cudaFree(devOut);
cudaFree(devIn);
cudaFree(devComponents);
return time;
}
|
495ba9e860a4533b45d69e422d29fd1182c53ef7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* This is a technique to choose from a template instantiation at RUNTIME!
*
* Needs:
* a) -std=c++11
* b) boost::mpl
*
* Example: In our CUDA autotuner we need to try the kernel with a set of different template parameters and then choose the optimal one.
* We have 3 different parameters that can take several values and we want to try all combinations.
* 1) Make a list for each parameter with the values to try.
* 2) Define a method to call and hide it in a templated struct. The template arguments are the parameters. The method is defined in the operator().
* 3) Define a struct or whatever you want that can serve as a unique identifier of your RuntimeChooser.
* 4) typedef the unique RuntimeChooser for convenience. Template arguments are (a) the unique identifier from (3) and (b) the struct that holds the method to call from (2)
* where the template arguments are replaced by an underscore "_".
* 5) Instantiate the frontend. Pass the RuntimeChooser and all sequences in the order in which the parameters are used in the function structure (2).
* 6) From the RuntimeChooser you can get an iterator with all possible choices as a hash number.
* 7) The frontend instance run() can now be called with one of the possible hash numbers.
*/
#include <iostream>
#include <boost/mpl/vector.hpp>
#include <boost/mpl/vector_c.hpp>
#include <boost/mpl/placeholders.hpp>
#include "helper/LaunchBounds.h"
#include "SequenceRunnerFrontend.h"
#include "RuntimeChooser.h"
#include <boost/mpl/int.hpp>
#include <boost/mpl/bool.hpp>
using mpl_::int_;
using mpl_::bool_;
/*
* 1) make the lists
*/
typedef mpl::vector< LaunchBounds<1,2>, LaunchBounds<3,4> > launchBoundsSequence;
typedef mpl::vector_c< int, 4, 8 > threadsPerSiteSequence;
typedef mpl::vector_c< int, 0, 1 > useTextureSequence;
template<typename LaunchBounds, typename ThreadsPerSite, typename UseTexture> __global__ void someKernel( int* a )
{
printf( "device: (%d,%d), %d, %d\n", LaunchBounds::maxThreadsPerBlock, LaunchBounds::minBlocksPerMultiprocessor, ThreadsPerSite::value, UseTexture::value );
a[threadIdx.x] = threadIdx.x;
}
/*
* 2) define the method to call: here we just print out the values.
*/
template<typename LaunchBounds, typename ThreadsPerSite, typename UseTexture> struct Printer
{
template<typename T> static void exec( T* object )
{
hipLaunchKernelGGL(( someKernel<LaunchBounds,ThreadsPerSite,UseTexture>), dim3(1),dim3(1), 0, 0, object->devPtr );
cout << "(" << LaunchBounds::maxThreadsPerBlock << "," << LaunchBounds::minBlocksPerMultiprocessor << "), " << ThreadsPerSite::value << ", " << UseTexture::value << endl;
}
};
/*
* 3) Define a unique identifier
* Background: The possible runtime choices in the RuntimeChooser are static member variables, if you have several RuntimeChooser in your program we need to distinguish them...
*/
struct UniqueId
{
int* devPtr;
void allocate()
{
hipMalloc( &devPtr, sizeof(int) );
}
};
int main()
{
UniqueId obj;
obj.allocate();
// 4) typedef the RuntimeChooser, here we want to call the Printer template with three arguments: Printer<_,_,_>
typedef RuntimeChooser<UniqueId,Printer<_,_,_> > MyChooser;
MyChooser::object = &obj;
// Printer<LaunchBounds<4,3>,int_<3>, bool_<true> >::exec( MyChooser::object );
// 5) instantiate the frontend
SequenceRunnerFrontend<MyChooser,launchBoundsSequence,threadsPerSiteSequence,useTextureSequence> test;
// 6) get an iterator and loop over all possible choices.
for( vector<RuntimeChooserOption>::iterator it = MyChooser::begin(); it != MyChooser::end(); ++it )
{
cout << it->name << ": ";
test.run( it->id );
}
int hostPtr[1];
hipMemcpy( hostPtr, obj.devPtr, sizeof(int), hipMemcpyDeviceToHost );
}
| 495ba9e860a4533b45d69e422d29fd1182c53ef7.cu | /**
* This is a technique to choose from a template instantiation at RUNTIME!
*
* Needs:
* a) -std=c++11
* b) boost::mpl
*
* Example: In our CUDA autotuner we need to try the kernel with a set of different template parameters and then choose the optimal one.
* We have 3 different parameters that can take several values and we want to try all combinations.
* 1) Make a list for each parameter with the values to try.
* 2) Define a method to call and hide it in a templated struct. The template arguments are the parameters. The method is defined in the operator().
* 3) Define a struct or whatever you want that can serve as a unique identifier of your RuntimeChooser.
* 4) typedef the unique RuntimeChooser for convenience. Template arguments are (a) the unique identifier from (3) and (b) the struct that holds the method to call from (2)
* where the template arguments are replaced by an underscore "_".
* 5) Instantiate the frontend. Pass the RuntimeChooser and all sequences in the order in which the parameters are used in the function structure (2).
* 6) From the RuntimeChooser you can get an iterator with all possible choices as a hash number.
* 7) The frontend instance run() can now be called with one of the possible hash numbers.
*/
#include <iostream>
#include <boost/mpl/vector.hpp>
#include <boost/mpl/vector_c.hpp>
#include <boost/mpl/placeholders.hpp>
#include "helper/LaunchBounds.h"
#include "SequenceRunnerFrontend.h"
#include "RuntimeChooser.h"
#include <boost/mpl/int.hpp>
#include <boost/mpl/bool.hpp>
using mpl_::int_;
using mpl_::bool_;
/*
* 1) make the lists
*/
typedef mpl::vector< LaunchBounds<1,2>, LaunchBounds<3,4> > launchBoundsSequence;
typedef mpl::vector_c< int, 4, 8 > threadsPerSiteSequence;
typedef mpl::vector_c< int, 0, 1 > useTextureSequence;
template<typename LaunchBounds, typename ThreadsPerSite, typename UseTexture> __global__ void someKernel( int* a )
{
printf( "device: (%d,%d), %d, %d\n", LaunchBounds::maxThreadsPerBlock, LaunchBounds::minBlocksPerMultiprocessor, ThreadsPerSite::value, UseTexture::value );
a[threadIdx.x] = threadIdx.x;
}
/*
* 2) define the method to call: here we just print out the values.
*/
template<typename LaunchBounds, typename ThreadsPerSite, typename UseTexture> struct Printer
{
template<typename T> static void exec( T* object )
{
someKernel<LaunchBounds,ThreadsPerSite,UseTexture><<<1,1>>>( object->devPtr );
cout << "(" << LaunchBounds::maxThreadsPerBlock << "," << LaunchBounds::minBlocksPerMultiprocessor << "), " << ThreadsPerSite::value << ", " << UseTexture::value << endl;
}
};
/*
* 3) Define a unique identifier
* Background: The possible runtime choices in the RuntimeChooser are static member variables, if you have several RuntimeChooser in your program we need to distinguish them...
*/
struct UniqueId
{
int* devPtr;
void allocate()
{
cudaMalloc( &devPtr, sizeof(int) );
}
};
int main()
{
UniqueId obj;
obj.allocate();
// 4) typedef the RuntimeChooser, here we want to call the Printer template with three arguments: Printer<_,_,_>
typedef RuntimeChooser<UniqueId,Printer<_,_,_> > MyChooser;
MyChooser::object = &obj;
// Printer<LaunchBounds<4,3>,int_<3>, bool_<true> >::exec( MyChooser::object );
// 5) instantiate the frontend
SequenceRunnerFrontend<MyChooser,launchBoundsSequence,threadsPerSiteSequence,useTextureSequence> test;
// 6) get an iterator and loop over all possible choices.
for( vector<RuntimeChooserOption>::iterator it = MyChooser::begin(); it != MyChooser::end(); ++it )
{
cout << it->name << ": ";
test.run( it->id );
}
int hostPtr[1];
cudaMemcpy( hostPtr, obj.devPtr, sizeof(int), cudaMemcpyDeviceToHost );
}
|
678e4d5c14fe4bda6a1d7dcac26c16e015525e66.hip | // !!! This is a file automatically generated by hipify!!!
#include "THHTensorMathReduce.cuh"
#include "THHTensor.hpp"
THC_API int
THCudaByteTensor_logicalAndAll(THCState *state, THCudaByteTensor *self) {
THCAssertSameGPU(THCudaByteTensor_checkGPU(state, 1, self));
unsigned char result;
if (!THC_reduceAll(state, self,
thrust::identity<unsigned char>(),
LogicalAll(),
(unsigned char) 1, &result, 0)) {
THArgCheck(false, 1, CUTORCH_DIM_WARNING);
}
return (int) result;
}
THC_API int
THCudaByteTensor_logicalAnyAll(THCState *state, THCudaByteTensor *self) {
THCAssertSameGPU(THCudaByteTensor_checkGPU(state, 1, self));
unsigned char result;
if (!THC_reduceAll(state, self,
thrust::identity<unsigned char>(),
LogicalAny(),
(unsigned char) 0, &result, 0)) {
THArgCheck(false, 1, CUTORCH_DIM_WARNING);
}
return (int) result;
}
THC_API void
THCudaByteTensor_logicalAnd(THCState* state, THCudaByteTensor *self, THCudaByteTensor *src, int dimension, int keepdim) {
THCAssertSameGPU(THCudaByteTensor_checkGPU(state, 2, self, src));
if (!THC_reduceDim(state, self, src,
thrust::identity<unsigned char>(),
LogicalAll(),
thrust::identity<unsigned char>(),
(unsigned char) 1,
dimension,
keepdim)) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
THCudaCheck(hipGetLastError());
}
THC_API void
THCudaByteTensor_logicalAny(THCState* state, THCudaByteTensor *self, THCudaByteTensor *src, int dimension, int keepdim) {
THCAssertSameGPU(THCudaByteTensor_checkGPU(state, 2, self, src));
if (!THC_reduceDim(state, self, src,
thrust::identity<unsigned char>(),
LogicalAny(),
thrust::identity<unsigned char>(),
(unsigned char) 0,
dimension,
keepdim)) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
THCudaCheck(hipGetLastError());
}
| 678e4d5c14fe4bda6a1d7dcac26c16e015525e66.cu | #include "THCTensorMathReduce.cuh"
#include "THCTensor.hpp"
THC_API int
THCudaByteTensor_logicalAndAll(THCState *state, THCudaByteTensor *self) {
THCAssertSameGPU(THCudaByteTensor_checkGPU(state, 1, self));
unsigned char result;
if (!THC_reduceAll(state, self,
thrust::identity<unsigned char>(),
LogicalAll(),
(unsigned char) 1, &result, 0)) {
THArgCheck(false, 1, CUTORCH_DIM_WARNING);
}
return (int) result;
}
THC_API int
THCudaByteTensor_logicalAnyAll(THCState *state, THCudaByteTensor *self) {
THCAssertSameGPU(THCudaByteTensor_checkGPU(state, 1, self));
unsigned char result;
if (!THC_reduceAll(state, self,
thrust::identity<unsigned char>(),
LogicalAny(),
(unsigned char) 0, &result, 0)) {
THArgCheck(false, 1, CUTORCH_DIM_WARNING);
}
return (int) result;
}
THC_API void
THCudaByteTensor_logicalAnd(THCState* state, THCudaByteTensor *self, THCudaByteTensor *src, int dimension, int keepdim) {
THCAssertSameGPU(THCudaByteTensor_checkGPU(state, 2, self, src));
if (!THC_reduceDim(state, self, src,
thrust::identity<unsigned char>(),
LogicalAll(),
thrust::identity<unsigned char>(),
(unsigned char) 1,
dimension,
keepdim)) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
THCudaCheck(cudaGetLastError());
}
THC_API void
THCudaByteTensor_logicalAny(THCState* state, THCudaByteTensor *self, THCudaByteTensor *src, int dimension, int keepdim) {
THCAssertSameGPU(THCudaByteTensor_checkGPU(state, 2, self, src));
if (!THC_reduceDim(state, self, src,
thrust::identity<unsigned char>(),
LogicalAny(),
thrust::identity<unsigned char>(),
(unsigned char) 0,
dimension,
keepdim)) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
THCudaCheck(cudaGetLastError());
}
|
1d33212ec5feb2cca5ab608ed1b82559e443db17.hip | // !!! This is a file automatically generated by hipify!!!
#include "THHUNN.h"
#include "common.h"
#include "im2col.h"
#include "SmoothAndEdgeTerm.h"
void THNN_CudaSmoothAndEdgeTerm_updateOutput(THCState *state, THCudaTensor *input_cnn, THCudaTensor *input_edge, THCudaTensor *target_yuv, THCudaTensor *target_edge, THCudaTensor *target_edge_mask, THCudaTensor *smooth_mask_pre, THCudaTensor *smooth_mask, THCudaTensor *weight, THCudaTensor *output, float sigma_color, float sigma_space, int window_size, float lp, int isDetailEnhancement, int isStylization, int w_L2) {
long batchSize = input_cnn->size[0];
long plane = input_cnn->size[1];
long height = input_cnn->size[2];
long width = input_cnn->size[3];
THCudaTensor_resize4d(state, weight, batchSize, (window_size*2+1) * (window_size*2+1),
height, width);
THCudaTensor_fill(state, weight, 0);
THCudaTensor *input_cnn_n = THCudaTensor_new(state);
THCudaTensor *input_edge_n = THCudaTensor_new(state);
THCudaTensor *target_yuv_n = THCudaTensor_new(state);
THCudaTensor *target_edge_n = THCudaTensor_new(state);
THCudaTensor *target_edge_mask_n = THCudaTensor_new(state);
THCudaTensor *smooth_mask_pre_n = THCudaTensor_new(state);
THCudaTensor *smooth_mask_n = THCudaTensor_new(state);
THCudaTensor *weight_n = THCudaTensor_new(state);
THCudaTensor *output_n = THCudaTensor_new(state);
for (int elt = 0; elt < batchSize; elt++) {
// Matrix mulitply per output:
THCudaTensor_select(state, input_cnn_n, input_cnn, 0, elt);
THCudaTensor_select(state, input_edge_n, input_edge, 0, elt);
THCudaTensor_select(state, target_yuv_n, target_yuv, 0, elt);
THCudaTensor_select(state, target_edge_n, target_edge, 0, elt);
THCudaTensor_select(state, target_edge_mask_n, target_edge_mask, 0, elt);
THCudaTensor_select(state, smooth_mask_pre_n, smooth_mask_pre, 0, elt);
THCudaTensor_select(state, smooth_mask_n, smooth_mask, 0, elt);
THCudaTensor_select(state, weight_n, weight, 0, elt);
THCudaTensor_select(state, output_n, output, 0, elt);
SmoothAndEdgeTerm_Loss_forward(THCState_getCurrentStream(state),
THCudaTensor_data(state, input_cnn_n),
THCudaTensor_data(state, input_edge_n),
THCudaTensor_data(state, target_yuv_n),
THCudaTensor_data(state, target_edge_n),
THCudaTensor_data(state, target_edge_mask_n),
THCudaTensor_data(state, smooth_mask_pre_n),
THCudaTensor_data(state, smooth_mask_n),
THCudaTensor_data(state, weight_n),
THCudaTensor_data(state, output_n),
sigma_color, sigma_space, window_size, lp, height, width, isDetailEnhancement, isStylization, w_L2);
}
// Free
THCudaTensor_free(state, input_cnn_n);
THCudaTensor_free(state, input_edge_n);
THCudaTensor_free(state, target_yuv_n);
THCudaTensor_free(state, target_edge_n);
THCudaTensor_free(state, target_edge_mask_n);
THCudaTensor_free(state, smooth_mask_pre_n);
THCudaTensor_free(state, smooth_mask_n);
THCudaTensor_free(state, weight_n);
THCudaTensor_free(state, output_n);
}
void THNN_CudaSmoothAndEdgeTerm_updateGradInput(THCState *state, THCudaTensor *input_cnn, THCudaTensor *smooth_mask, THCudaTensor *target_edge_mask, THCudaTensor *weight, THCudaTensor *gradInput, float sigma_color, int window_size, float lp, int w_L2) {
long batchSize = input_cnn->size[0];
long plane = input_cnn->size[1];
long height = input_cnn->size[2];
long width = input_cnn->size[3];
THCudaTensor *input_cnn_n = THCudaTensor_new(state);
THCudaTensor *smooth_mask_n = THCudaTensor_new(state);
THCudaTensor *target_edge_mask_n = THCudaTensor_new(state);
THCudaTensor *weight_n = THCudaTensor_new(state);
THCudaTensor *gradInput_n = THCudaTensor_new(state);
for (int elt = 0; elt < batchSize; elt++) {
THCudaTensor_select(state, input_cnn_n, input_cnn, 0, elt);
THCudaTensor_select(state, smooth_mask_n, smooth_mask, 0, elt);
THCudaTensor_select(state, target_edge_mask_n, target_edge_mask, 0, elt);
THCudaTensor_select(state, weight_n, weight, 0, elt);
THCudaTensor_select(state, gradInput_n, gradInput, 0, elt);
SmoothAndEdgeTerm_Loss_backward(THCState_getCurrentStream(state),
THCudaTensor_data(state, input_cnn_n),
THCudaTensor_data(state, smooth_mask_n),
THCudaTensor_data(state, target_edge_mask_n),
THCudaTensor_data(state, weight_n),
THCudaTensor_data(state, gradInput_n),
sigma_color, window_size, lp, height, width, w_L2);
}
// Free
THCudaTensor_free(state, input_cnn_n);
THCudaTensor_free(state, smooth_mask_n);
THCudaTensor_free(state, target_edge_mask_n);
THCudaTensor_free(state, weight_n);
THCudaTensor_free(state, gradInput_n);
}
| 1d33212ec5feb2cca5ab608ed1b82559e443db17.cu | #include "THCUNN.h"
#include "common.h"
#include "im2col.h"
#include "SmoothAndEdgeTerm.h"
void THNN_CudaSmoothAndEdgeTerm_updateOutput(THCState *state, THCudaTensor *input_cnn, THCudaTensor *input_edge, THCudaTensor *target_yuv, THCudaTensor *target_edge, THCudaTensor *target_edge_mask, THCudaTensor *smooth_mask_pre, THCudaTensor *smooth_mask, THCudaTensor *weight, THCudaTensor *output, float sigma_color, float sigma_space, int window_size, float lp, int isDetailEnhancement, int isStylization, int w_L2) {
long batchSize = input_cnn->size[0];
long plane = input_cnn->size[1];
long height = input_cnn->size[2];
long width = input_cnn->size[3];
THCudaTensor_resize4d(state, weight, batchSize, (window_size*2+1) * (window_size*2+1),
height, width);
THCudaTensor_fill(state, weight, 0);
THCudaTensor *input_cnn_n = THCudaTensor_new(state);
THCudaTensor *input_edge_n = THCudaTensor_new(state);
THCudaTensor *target_yuv_n = THCudaTensor_new(state);
THCudaTensor *target_edge_n = THCudaTensor_new(state);
THCudaTensor *target_edge_mask_n = THCudaTensor_new(state);
THCudaTensor *smooth_mask_pre_n = THCudaTensor_new(state);
THCudaTensor *smooth_mask_n = THCudaTensor_new(state);
THCudaTensor *weight_n = THCudaTensor_new(state);
THCudaTensor *output_n = THCudaTensor_new(state);
for (int elt = 0; elt < batchSize; elt++) {
// Matrix mulitply per output:
THCudaTensor_select(state, input_cnn_n, input_cnn, 0, elt);
THCudaTensor_select(state, input_edge_n, input_edge, 0, elt);
THCudaTensor_select(state, target_yuv_n, target_yuv, 0, elt);
THCudaTensor_select(state, target_edge_n, target_edge, 0, elt);
THCudaTensor_select(state, target_edge_mask_n, target_edge_mask, 0, elt);
THCudaTensor_select(state, smooth_mask_pre_n, smooth_mask_pre, 0, elt);
THCudaTensor_select(state, smooth_mask_n, smooth_mask, 0, elt);
THCudaTensor_select(state, weight_n, weight, 0, elt);
THCudaTensor_select(state, output_n, output, 0, elt);
SmoothAndEdgeTerm_Loss_forward(THCState_getCurrentStream(state),
THCudaTensor_data(state, input_cnn_n),
THCudaTensor_data(state, input_edge_n),
THCudaTensor_data(state, target_yuv_n),
THCudaTensor_data(state, target_edge_n),
THCudaTensor_data(state, target_edge_mask_n),
THCudaTensor_data(state, smooth_mask_pre_n),
THCudaTensor_data(state, smooth_mask_n),
THCudaTensor_data(state, weight_n),
THCudaTensor_data(state, output_n),
sigma_color, sigma_space, window_size, lp, height, width, isDetailEnhancement, isStylization, w_L2);
}
// Free
THCudaTensor_free(state, input_cnn_n);
THCudaTensor_free(state, input_edge_n);
THCudaTensor_free(state, target_yuv_n);
THCudaTensor_free(state, target_edge_n);
THCudaTensor_free(state, target_edge_mask_n);
THCudaTensor_free(state, smooth_mask_pre_n);
THCudaTensor_free(state, smooth_mask_n);
THCudaTensor_free(state, weight_n);
THCudaTensor_free(state, output_n);
}
void THNN_CudaSmoothAndEdgeTerm_updateGradInput(THCState *state, THCudaTensor *input_cnn, THCudaTensor *smooth_mask, THCudaTensor *target_edge_mask, THCudaTensor *weight, THCudaTensor *gradInput, float sigma_color, int window_size, float lp, int w_L2) {
long batchSize = input_cnn->size[0];
long plane = input_cnn->size[1];
long height = input_cnn->size[2];
long width = input_cnn->size[3];
THCudaTensor *input_cnn_n = THCudaTensor_new(state);
THCudaTensor *smooth_mask_n = THCudaTensor_new(state);
THCudaTensor *target_edge_mask_n = THCudaTensor_new(state);
THCudaTensor *weight_n = THCudaTensor_new(state);
THCudaTensor *gradInput_n = THCudaTensor_new(state);
for (int elt = 0; elt < batchSize; elt++) {
THCudaTensor_select(state, input_cnn_n, input_cnn, 0, elt);
THCudaTensor_select(state, smooth_mask_n, smooth_mask, 0, elt);
THCudaTensor_select(state, target_edge_mask_n, target_edge_mask, 0, elt);
THCudaTensor_select(state, weight_n, weight, 0, elt);
THCudaTensor_select(state, gradInput_n, gradInput, 0, elt);
SmoothAndEdgeTerm_Loss_backward(THCState_getCurrentStream(state),
THCudaTensor_data(state, input_cnn_n),
THCudaTensor_data(state, smooth_mask_n),
THCudaTensor_data(state, target_edge_mask_n),
THCudaTensor_data(state, weight_n),
THCudaTensor_data(state, gradInput_n),
sigma_color, window_size, lp, height, width, w_L2);
}
// Free
THCudaTensor_free(state, input_cnn_n);
THCudaTensor_free(state, smooth_mask_n);
THCudaTensor_free(state, target_edge_mask_n);
THCudaTensor_free(state, weight_n);
THCudaTensor_free(state, gradInput_n);
}
|
ab3015443153832c6aa481e6249acf107ed87a8c.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
/* Functions to be implemented: */
float ftcs_solver_gpu ( int step, int block_size_x, int block_size_y );
float ftcs_solver_gpu_shared ( int step, int block_size_x, int block_size_y );
float ftcs_solver_gpu_texture ( int step, int block_size_x, int block_size_y );
void external_heat_gpu ( int step, int block_size_x, int block_size_y );
void transfer_from_gpu( int step );
void transfer_to_gpu();
void device_allocation();
/* Prototypes for functions found at the end of this file */
void write_temp( int step );
void print_local_temps();
void init_temp_material();
void init_local_temp();
void host_allocation();
void add_time(float time);
void print_time_stats();
/*
* Physical quantities:
* k : thermal conductivity [Watt / (meter Kelvin)]
* rho : density [kg / meter^3]
* cp : specific heat capacity [kJ / (kg Kelvin)]
* rho * cp : volumetric heat capacity [Joule / (meter^3 Kelvin)]
* alpha = k / (rho*cp) : thermal diffusivity [meter^2 / second]
*
* Mercury:
* cp = 0.140, rho = 13506, k = 8.69
* alpha = 8.69 / (0.140*13506) =~ 0.0619
*
* Copper:
* cp = 0.385, rho = 8960, k = 401
* alpha = 401.0 / (0.385 * 8960) =~ 0.120
*
* Tin:
* cp = 0.227, k = 67, rho = 7300
* alpha = 67.0 / (0.227 * 7300) =~ 0.040
*
* Aluminium:
* cp = 0.897, rho = 2700, k = 237
* alpha = 237 / (0.897 * 2700) =~ 0.098
*/
const float MERCURY = 0.0619;
const float COPPER = 0.116;
const float TIN = 0.040;
const float ALUMINIUM = 0.098;
/* Discretization: 5cm square cells, 2.5ms time intervals */
const float
h = 5e-2,
dt = 2.5e-3;
/* Size of the computational grid - 1024x1024 square */
const int GRID_SIZE[2] = {2048, 2048};
int BORDER = 1;
/* Parameters of the simulation: how many steps, and when to cut off the heat */
const int NSTEPS = 10000;
const int CUTOFF = 5000;
/* How often to dump state to file (steps). */
const int SNAPSHOT = 500;
/* For time statistics */
float min_time = -2.0;
float max_time = -2.0;
float avg_time = 0.0;
/* Arrays for the simulation data, on host */
float
*material, // Material constants
*temperature; // Temperature field
/* Arrays for the simulation data, on device */
float
*material_device, // Material constants
*temperature_device[2]; // Temperature field, 2 arrays
// **temperature_device;
texture<float,hipTextureType2D> texreference;
/* Error handling function */
static void HandleError( hipError_t err,
const char *file,
int line ) {
if (err != hipSuccess) {
printf( "%s in %s at line %d\n", hipGetErrorString( err ),
file, line );
exit( EXIT_FAILURE );
}
}
#define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ ))
float timing() {
static struct timeval t;
gettimeofday(&t,NULL);
return (t.tv_sec + 1e-6 * t.tv_usec);
}
// Find global temperature array index
__device__ int ti(int x, int y,const int* GRID_SIZE){
if ( x < 0 ){
x++;
}
if( x >= GRID_SIZE[0] ){
x--;
}
if( y < 0 ){
y++;
}
if( y >= GRID_SIZE[1] ){
y--;
}
return ((y)*GRID_SIZE[0]+x);
}
// Find global material array index
__device__ int mi(int x, int y,const int GRID_SIZE_X){
return ((y)*(GRID_SIZE_X) + x);
}
// Find local temperature index for shared memory
__device__ int lti(int x, int y,const int* BLOCK_SIZE){
x++;
y++;
return ((y)*(BLOCK_SIZE[0]+2)+x);
}
/* Allocate arrays on GPU */
void device_allocation(){
// Allocate memory for material
HANDLE_ERROR(hipMalloc((void**) &material_device,sizeof(float) * GRID_SIZE[0] * GRID_SIZE[1]));
// Allocate memory for temperature
HANDLE_ERROR(hipMalloc((void**) &temperature_device[0],sizeof(float) * GRID_SIZE[0] * GRID_SIZE[1]));
HANDLE_ERROR(hipMalloc((void**) &temperature_device[1],sizeof(float) * GRID_SIZE[0] * GRID_SIZE[1]));
}
/* Transfer input to GPU */
void transfer_to_gpu(){
// Transfer material array to GPU
HANDLE_ERROR(hipMemcpy(material_device,material,sizeof(float)*GRID_SIZE[0]*GRID_SIZE[1],hipMemcpyHostToDevice));
// Transfer temperature array to GPU
HANDLE_ERROR(hipMemcpy(temperature_device[0],temperature,sizeof(float)*GRID_SIZE[0]*GRID_SIZE[1],hipMemcpyHostToDevice));
HANDLE_ERROR(hipMemcpy(temperature_device[1],temperature,sizeof(float)*GRID_SIZE[0]*GRID_SIZE[1],hipMemcpyHostToDevice));
}
/* Transfer output from GPU to CPU */
void transfer_from_gpu(int step){
// Copy temperature from GPU -> CPU
HANDLE_ERROR(hipMemcpy(temperature,temperature_device[step%2],sizeof(float)*GRID_SIZE[0]*GRID_SIZE[1],hipMemcpyDeviceToHost));
}
// Plain/global memory only kernel
__global__ void ftcs_kernel(float* out, float* in, float* material_device, int step, int block_size_x,int block_size_y,int GRID_X,int GRID_Y ){
// Set grid size
const int GRID_SIZE[2] = {GRID_X, GRID_Y};
// Find matrix index
int x = blockIdx.x * block_size_x + threadIdx.x;
int y = blockIdx.y * block_size_y + threadIdx.y;
// Compute ftcs for thread
out[ti(x,y,GRID_SIZE)] = in[ti(x,y,GRID_SIZE)] + material_device[mi(x,y,GRID_SIZE[0])]*
(in[ti(x+1,y,GRID_SIZE)] +
in[ti(x-1,y,GRID_SIZE)] +
in[ti(x,y+1,GRID_SIZE)] +
in[ti(x,y-1,GRID_SIZE)] -
4*in[ti(x,y,GRID_SIZE)]);
}
/* Shared memory kernel */
__global__ void ftcs_kernel_shared(float* out,float* in, float* material_device, int step, int block_size_x , int block_size_y, int GRID_X,int GRID_Y ){
// Compute grid size globally and locally
const int GRID_SIZE[2] = {GRID_X, GRID_Y};
const int BLOCK_SIZE[2] = {block_size_x,block_size_y};
// Initialize shared memory
extern __shared__ float in_shared[];
// Find matrix index
int x = blockIdx.x * block_size_x + threadIdx.x;
int y = blockIdx.y * block_size_y + threadIdx.y;
// Find block matrix index
int local_x = threadIdx.x;
int local_y = threadIdx.y;
// Set halo around border in shared memory
if( local_x == 0 ){
in_shared[lti(local_x - 1,local_y,BLOCK_SIZE)] = in[ti(x - 1,y,GRID_SIZE)];
}
if( local_x == ( block_size_x - 1 ) ){
in_shared[lti(local_x + 1,local_y,BLOCK_SIZE)] = in[ti(x + 1,y,GRID_SIZE)];
}
if( local_y == 0 ){
in_shared[lti(local_x,local_y-1,BLOCK_SIZE)] = in[ti(x,y - 1,GRID_SIZE)];
}
if ( local_y == ( block_size_y - 1 ) ) {
in_shared[lti(local_x,local_y+1,BLOCK_SIZE)] = in[ti(x,y + 1,GRID_SIZE)];
}
// Set data in shared memory
in_shared[lti(local_x,local_y,BLOCK_SIZE)] = in[ti(x,y,GRID_SIZE)];
// Synch threads before reading
__syncthreads();
// Compute ftcs using shared memory
out[ti(x,y,GRID_SIZE)] = in_shared[lti(local_x,local_y,BLOCK_SIZE)] + material_device[mi(x,y,GRID_SIZE[0])]*
(in_shared[lti(local_x+1,local_y,BLOCK_SIZE)] +
in_shared[lti(local_x-1,local_y,BLOCK_SIZE)] +
in_shared[lti(local_x,local_y+1,BLOCK_SIZE)] +
in_shared[lti(local_x,local_y-1,BLOCK_SIZE)] -
4*in_shared[lti(local_x,local_y,BLOCK_SIZE)]);
}
/* Texture memory kernel */
__global__ void ftcs_kernel_texture(float* out, float* material_device,int step, int block_size_x, int block_size_y,int GRID_X,int GRID_Y ,size_t offset){
// Set grid size
const int GRID_SIZE[2] = {GRID_X, GRID_Y};
// Find linear index for x and y coordinates
int x = blockIdx.x * block_size_x + threadIdx.x;
int y = blockIdx.y * block_size_y + threadIdx.y;
// Find indices of neighbouring elements
int x_up = x+1;
int x_down = x-1;
int y_up = y+1;
int y_down = y-1;
// Edit border values to fit with Neuman boundary condition
if( x_up >= GRID_SIZE[0] ){
x_up--;
}
if( x_down < 0 ){
x_down++;
}
if( y_up >= GRID_SIZE[1] ){
y_up--;
}
if( y_down < 0 ){
y_down++;
}
offset /= sizeof(float);
size_t xOffset = offset % GRID_SIZE[0];
size_t yOffset = offset / GRID_SIZE[1];
// Fetch data from texture memory
float in_origin = tex2D(texreference,xOffset + x,yOffset + y);
float in_up_x = tex2D(texreference,xOffset +x_up,yOffset + y);
float in_down_x = tex2D(texreference,xOffset +x_down,yOffset + y);
float in_up_y = tex2D(texreference,xOffset +x,yOffset + y_up);
float in_down_y = tex2D(texreference,xOffset +x,yOffset + y_down);
// Compute ftcs using texture memory
out[ti(x,y,GRID_SIZE)] = in_origin + material_device[mi(x,y,GRID_SIZE[0])]*
(in_up_x +
in_down_x +
in_up_y +
in_down_y -
4*in_origin);
}
/* External heat kernel, should do the same work as the external
* heat function in the serial code
*/
__global__ void external_heat_kernel(float* in, int step, int block_size_x, int block_size_y,int GRID_X, int GRID_Y ){
// Set grid size
const int GRID_SIZE[2] = {GRID_X, GRID_Y};
// Find linear index for x and y coordinates
int x = blockIdx.x * block_size_x + threadIdx.x;
int y = blockIdx.y * block_size_y + threadIdx.y;
// Set value of external heat in array
if(x>= (GRID_SIZE[0]/4) && x<= (3*GRID_SIZE[0]/4) ){
if(y >= (GRID_SIZE[1]/2 - GRID_SIZE[1]/16) && y<= (GRID_SIZE[1]/2 + GRID_SIZE[1]/16)){
in[ti(x,y,GRID_SIZE)] = 100;
}
}
}
/* Set up and call ftcs_kernel
* should return the execution time of the kernel
*/
float ftcs_solver_gpu( int step, int block_size_x, int block_size_y ){
// Edit block sizes to be even numbers
if ( block_size_x % 2){
block_size_x++;
}
if (block_size_y % 2) {
block_size_y++;
}
// Initalize timing
hipEvent_t start,stop;
hipEventCreate(&start);
hipEventCreate(&stop);
// Compute thread block size
dim3 gridBlock(GRID_SIZE[0]/block_size_x,GRID_SIZE[1]/block_size_y);
dim3 threadBlock(block_size_x,block_size_y);
// Find device temperature arrays
float* out = temperature_device[(step+1)%2];
float* in = temperature_device[step%2];
// Record execution time
hipEventRecord(start);
// Compute global kernel
hipLaunchKernelGGL(( ftcs_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, out,in,material_device,step,block_size_x,block_size_y,GRID_SIZE[0],GRID_SIZE[1]);
hipEventRecord(stop);
// Synch device threads
HANDLE_ERROR( hipDeviceSynchronize() );
HANDLE_ERROR( hipPeekAtLastError() );
// Stop recording
hipEventSynchronize(stop);
float milliseconds = 0;
// Compute timing
hipEventElapsedTime(&milliseconds,start,stop);
return milliseconds;
}
/* Set up and call ftcs_kernel_shared
* should return the execution time of the kernel
*/
float ftcs_solver_gpu_shared( int step, int block_size_x, int block_size_y ){
// Edit block sizes to be even numbers
if ( block_size_x % 2){
block_size_x++;
}
if (block_size_y % 2) {
block_size_y++;
}
// Initalize timing
hipEvent_t start,stop;
hipEventCreate(&start);
hipEventCreate(&stop);
// Compute thread block size
dim3 gridBlock(GRID_SIZE[0]/block_size_x,GRID_SIZE[1]/block_size_y);
dim3 threadBlock(block_size_x,block_size_y);
// Compute size of shared memory
int shared_memory_size = (block_size_x+2) * (block_size_y+2) * sizeof(float);
// Find device temperature arrays
float* out = temperature_device[(step+1)%2];
float* in = temperature_device[step%2];
// Start recording
hipEventRecord(start);
// Compute shared kernel
hipLaunchKernelGGL(( ftcs_kernel_shared), dim3(gridBlock),dim3(threadBlock),shared_memory_size, 0, out,in,material_device,step,block_size_x,block_size_y,GRID_SIZE[0],GRID_SIZE[1]);
hipEventRecord(stop);
// Synch device threads
HANDLE_ERROR( hipDeviceSynchronize() );
HANDLE_ERROR( hipPeekAtLastError() );
// Stop timing recording
hipEventSynchronize(stop);
float milliseconds = 0;
// Compute timing
hipEventElapsedTime(&milliseconds,start,stop);
return milliseconds;
}
/* Set up and call ftcs_kernel_texture
* should return the execution time of the kernel
*/
float ftcs_solver_gpu_texture( int step, int block_size_x, int block_size_y ){
// Edit block sizes to be even numbers
if ( block_size_x % 2){
block_size_x++;
}
if (block_size_y % 2) {
block_size_y++;
}
// Initalize timing
hipEvent_t start,stop;
hipEventCreate(&start);
hipEventCreate(&stop);
// Compute thread block size
dim3 gridBlock(GRID_SIZE[0]/block_size_x,GRID_SIZE[1]/block_size_y);
dim3 threadBlock(block_size_x,block_size_y);
// Find device memory
float* out = temperature_device[(step+1)%2];
float* in = temperature_device[step%2];
// Create channel for texture memory
hipChannelFormatDesc desc = hipCreateChannelDesc<float>();
size_t offset;
//bind texture reference with linear memory
HANDLE_ERROR(hipBindTexture2D(&offset,texreference,
in,desc,GRID_SIZE[1],GRID_SIZE[0],sizeof(float)*GRID_SIZE[0]));
// Start recording
hipEventRecord(start);
// Compute texture kernel
hipLaunchKernelGGL(( ftcs_kernel_texture), dim3(gridBlock),dim3(threadBlock), 0, 0, out,material_device,step,block_size_x,block_size_y,GRID_SIZE[0],GRID_SIZE[1],offset);
hipEventRecord(stop);
// Synch threads
HANDLE_ERROR( hipDeviceSynchronize() );
HANDLE_ERROR( hipPeekAtLastError() );
// Stop recording
hipEventSynchronize(stop);
//Unbind texture reference
HANDLE_ERROR(hipUnbindTexture(texreference));
// Compute timing
float milliseconds = 0;
hipEventElapsedTime(&milliseconds,start,stop);
return milliseconds;
}
/* Set up and call external_heat_kernel */
void external_heat_gpu( int step, int block_size_x, int block_size_y ){
// Compute thread block size
dim3 gridBlock(GRID_SIZE[0]/block_size_x,GRID_SIZE[1]/block_size_y);
dim3 threadBlock(block_size_x,block_size_y);
// Find device temperature array
float* in = temperature_device[step%2];
// Compute external heat kernel
hipLaunchKernelGGL(( external_heat_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, in,step, block_size_x, block_size_y,GRID_SIZE[0],GRID_SIZE[1]);
// Synch threads
HANDLE_ERROR( hipDeviceSynchronize() );
HANDLE_ERROR( hipPeekAtLastError() );
}
void print_gpu_info(){
int n_devices;
hipGetDeviceCount(&n_devices);
printf("Number of CUDA devices: %d\n", n_devices);
hipDeviceProp_t device_prop;
hipGetDeviceProperties(&device_prop, 0);
printf("CUDA device name: %s\n" , device_prop.name);
printf("Compute capability: %d.%d\n", device_prop.major, device_prop.minor);
}
int main ( int argc, char **argv ){
// Parse command line arguments
int version = 0;
int block_size_x = 0;
int block_size_y = 0;
if(argc != 4){
printf("Useage: %s <version> <block_size_x> <block_size_y>\n\n<version> can be:\n0: plain\n1: shared memory\n2: texture memory\n", argv[0]);
exit(0);
}
else{
version = atoi(argv[1]);
block_size_x = atoi(argv[2]);
block_size_y = atoi(argv[3]);
}
print_gpu_info();
// Allocate and initialize data on host
host_allocation();
init_temp_material();
// Allocate arrays on device, and transfer inputs
device_allocation();
transfer_to_gpu();
// Main integration loop
for( int step=0; step<NSTEPS; step += 1 ){
if( step < CUTOFF ){
external_heat_gpu ( step, block_size_x, block_size_y );
}
float time;
// Call selected version of ftcs slover
if(version == 2){
time = ftcs_solver_gpu_texture( step, block_size_x, block_size_y );
}
else if(version == 1){
time = ftcs_solver_gpu_shared(step, block_size_x, block_size_y);
}
else{
time = ftcs_solver_gpu(step, block_size_x, block_size_y);
}
add_time(time);
if((step % SNAPSHOT) == 0){
// Transfer output from device, and write to file
transfer_from_gpu(step);
write_temp(step);
}
}
print_time_stats();
exit ( EXIT_SUCCESS );
}
void host_allocation(){
size_t temperature_size =GRID_SIZE[0]*GRID_SIZE[1];
temperature = (float*) calloc(temperature_size, sizeof(float));
size_t material_size = (GRID_SIZE[0])*(GRID_SIZE[1]);
material = (float*) calloc(material_size, sizeof(float));
}
void init_temp_material(){
for(int x = 0; x < GRID_SIZE[0]; x++){
for(int y = 0; y < GRID_SIZE[1]; y++){
temperature[y * GRID_SIZE[0] + x] = 10.0;
}
}
for(int x = 0; x < GRID_SIZE[0]; x++){
for(int y = 0; y < GRID_SIZE[1]; y++){
temperature[y * GRID_SIZE[0] + x] = 20.0;
material[y * GRID_SIZE[0] + x] = MERCURY * (dt/(h*h));
}
}
/* Set up the two blocks of copper and tin */
for(int x=(5*GRID_SIZE[0]/8); x<(7*GRID_SIZE[0]/8); x++ ){
for(int y=(GRID_SIZE[1]/8); y<(3*GRID_SIZE[1]/8); y++ ){
material[y * GRID_SIZE[0] + x] = COPPER * (dt/(h*h));
temperature[y * GRID_SIZE[0] + x] = 60.0;
}
}
for(int x=(GRID_SIZE[0]/8); x<(GRID_SIZE[0]/2)-(GRID_SIZE[0]/8); x++ ){
for(int y=(5*GRID_SIZE[1]/8); y<(7*GRID_SIZE[1]/8); y++ ){
material[y * GRID_SIZE[0] + x] = TIN * (dt/(h*h));
temperature[y * GRID_SIZE[0] + x] = 60.0;
}
}
/* Set up the heating element in the middle */
for(int x=(GRID_SIZE[0]/4); x<=(3*GRID_SIZE[0]/4); x++){
for(int y=(GRID_SIZE[1]/2)-(GRID_SIZE[1]/16); y<=(GRID_SIZE[1]/2)+(GRID_SIZE[1]/16); y++){
material[y * GRID_SIZE[0] + x] = ALUMINIUM * (dt/(h*h));
temperature[y * GRID_SIZE[0] + x] = 100.0;
}
}
}
void add_time(float time){
avg_time += time;
if(time < min_time || min_time < -1.0){
min_time = time;
}
if(time > max_time){
max_time = time;
}
}
void print_time_stats(){
printf("Kernel execution time (min, max, avg): %f %f %f\n", min_time, max_time, avg_time/NSTEPS);
}
/* Save 24 - bits bmp file, buffer must be in bmp format: upside - down
* Only works for images which dimensions are powers of two
*/
void savebmp(char *name, unsigned char *buffer, int x, int y) {
FILE *f = fopen(name, "wb");
if (!f) {
printf("Error writing image to disk.\n");
return;
}
unsigned int size = x * y * 3 + 54;
unsigned char header[54] = {'B', 'M',
size&255,
(size >> 8)&255,
(size >> 16)&255,
size >> 24,
0, 0, 0, 0, 54, 0, 0, 0, 40, 0, 0, 0, x&255, x >> 8, 0,
0, y&255, y >> 8, 0, 0, 1, 0, 24, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
fwrite(header, 1, 54, f);
fwrite(buffer, 1, GRID_SIZE[0] * GRID_SIZE[1] * 3, f);
fclose(f);
}
void fancycolour(unsigned char *p, float temp) {
if(temp <= 25){
p[2] = 0;
p[1] = (unsigned char)((temp/25)*255);
p[0] = 255;
}
else if (temp <= 50){
p[2] = 0;
p[1] = 255;
p[0] = 255 - (unsigned char)(((temp-25)/25) * 255);
}
else if (temp <= 75){
p[2] = (unsigned char)(255* (temp-50)/25);
p[1] = 255;
p[0] = 0;
}
else{
p[2] = 255;
p[1] = 255 -(unsigned char)(255* (temp-75)/25) ;
p[0] = 0;
}
}
/* Create nice image from iteration counts. take care to create it upside down (bmp format) */
void output(char* filename){
unsigned char *buffer = (unsigned char*)calloc(GRID_SIZE[0] * GRID_SIZE[1]* 3, 1);
for (int j = 0; j < GRID_SIZE[1]; j++) {
for (int i = 0; i < GRID_SIZE[0]; i++) {
int p = ((GRID_SIZE[1] - j - 1) * GRID_SIZE[0] + i) * 3;
fancycolour(buffer + p, temperature[j*GRID_SIZE[0] + i]);
}
}
/* write image to disk */
savebmp(filename, buffer, GRID_SIZE[0], GRID_SIZE[1]);
free(buffer);
}
void write_temp (int step ){
char filename[15];
sprintf ( filename, "data/%.4d.bmp", step/SNAPSHOT );
output ( filename );
printf ( "Snapshot at step %d\n", step );
}
| ab3015443153832c6aa481e6249acf107ed87a8c.cu | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <cuda.h>
#include <sys/time.h>
/* Functions to be implemented: */
float ftcs_solver_gpu ( int step, int block_size_x, int block_size_y );
float ftcs_solver_gpu_shared ( int step, int block_size_x, int block_size_y );
float ftcs_solver_gpu_texture ( int step, int block_size_x, int block_size_y );
void external_heat_gpu ( int step, int block_size_x, int block_size_y );
void transfer_from_gpu( int step );
void transfer_to_gpu();
void device_allocation();
/* Prototypes for functions found at the end of this file */
void write_temp( int step );
void print_local_temps();
void init_temp_material();
void init_local_temp();
void host_allocation();
void add_time(float time);
void print_time_stats();
/*
* Physical quantities:
* k : thermal conductivity [Watt / (meter Kelvin)]
* rho : density [kg / meter^3]
* cp : specific heat capacity [kJ / (kg Kelvin)]
* rho * cp : volumetric heat capacity [Joule / (meter^3 Kelvin)]
* alpha = k / (rho*cp) : thermal diffusivity [meter^2 / second]
*
* Mercury:
* cp = 0.140, rho = 13506, k = 8.69
* alpha = 8.69 / (0.140*13506) =~ 0.0619
*
* Copper:
* cp = 0.385, rho = 8960, k = 401
* alpha = 401.0 / (0.385 * 8960) =~ 0.120
*
* Tin:
* cp = 0.227, k = 67, rho = 7300
* alpha = 67.0 / (0.227 * 7300) =~ 0.040
*
* Aluminium:
* cp = 0.897, rho = 2700, k = 237
* alpha = 237 / (0.897 * 2700) =~ 0.098
*/
const float MERCURY = 0.0619;
const float COPPER = 0.116;
const float TIN = 0.040;
const float ALUMINIUM = 0.098;
/* Discretization: 5cm square cells, 2.5ms time intervals */
const float
h = 5e-2,
dt = 2.5e-3;
/* Size of the computational grid - 1024x1024 square */
const int GRID_SIZE[2] = {2048, 2048};
int BORDER = 1;
/* Parameters of the simulation: how many steps, and when to cut off the heat */
const int NSTEPS = 10000;
const int CUTOFF = 5000;
/* How often to dump state to file (steps). */
const int SNAPSHOT = 500;
/* For time statistics */
float min_time = -2.0;
float max_time = -2.0;
float avg_time = 0.0;
/* Arrays for the simulation data, on host */
float
*material, // Material constants
*temperature; // Temperature field
/* Arrays for the simulation data, on device */
float
*material_device, // Material constants
*temperature_device[2]; // Temperature field, 2 arrays
// **temperature_device;
texture<float,cudaTextureType2D> texreference;
/* Error handling function */
static void HandleError( cudaError_t err,
const char *file,
int line ) {
if (err != cudaSuccess) {
printf( "%s in %s at line %d\n", cudaGetErrorString( err ),
file, line );
exit( EXIT_FAILURE );
}
}
#define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ ))
float timing() {
static struct timeval t;
gettimeofday(&t,NULL);
return (t.tv_sec + 1e-6 * t.tv_usec);
}
// Find global temperature array index
__device__ int ti(int x, int y,const int* GRID_SIZE){
if ( x < 0 ){
x++;
}
if( x >= GRID_SIZE[0] ){
x--;
}
if( y < 0 ){
y++;
}
if( y >= GRID_SIZE[1] ){
y--;
}
return ((y)*GRID_SIZE[0]+x);
}
// Find global material array index
__device__ int mi(int x, int y,const int GRID_SIZE_X){
return ((y)*(GRID_SIZE_X) + x);
}
// Find local temperature index for shared memory
__device__ int lti(int x, int y,const int* BLOCK_SIZE){
x++;
y++;
return ((y)*(BLOCK_SIZE[0]+2)+x);
}
/* Allocate arrays on GPU */
void device_allocation(){
// Allocate memory for material
HANDLE_ERROR(cudaMalloc((void**) &material_device,sizeof(float) * GRID_SIZE[0] * GRID_SIZE[1]));
// Allocate memory for temperature
HANDLE_ERROR(cudaMalloc((void**) &temperature_device[0],sizeof(float) * GRID_SIZE[0] * GRID_SIZE[1]));
HANDLE_ERROR(cudaMalloc((void**) &temperature_device[1],sizeof(float) * GRID_SIZE[0] * GRID_SIZE[1]));
}
/* Transfer input to GPU */
void transfer_to_gpu(){
// Transfer material array to GPU
HANDLE_ERROR(cudaMemcpy(material_device,material,sizeof(float)*GRID_SIZE[0]*GRID_SIZE[1],cudaMemcpyHostToDevice));
// Transfer temperature array to GPU
HANDLE_ERROR(cudaMemcpy(temperature_device[0],temperature,sizeof(float)*GRID_SIZE[0]*GRID_SIZE[1],cudaMemcpyHostToDevice));
HANDLE_ERROR(cudaMemcpy(temperature_device[1],temperature,sizeof(float)*GRID_SIZE[0]*GRID_SIZE[1],cudaMemcpyHostToDevice));
}
/* Transfer output from GPU to CPU */
void transfer_from_gpu(int step){
// Copy temperature from GPU -> CPU
HANDLE_ERROR(cudaMemcpy(temperature,temperature_device[step%2],sizeof(float)*GRID_SIZE[0]*GRID_SIZE[1],cudaMemcpyDeviceToHost));
}
// Plain/global memory only kernel
__global__ void ftcs_kernel(float* out, float* in, float* material_device, int step, int block_size_x,int block_size_y,int GRID_X,int GRID_Y ){
// Set grid size
const int GRID_SIZE[2] = {GRID_X, GRID_Y};
// Find matrix index
int x = blockIdx.x * block_size_x + threadIdx.x;
int y = blockIdx.y * block_size_y + threadIdx.y;
// Compute ftcs for thread
out[ti(x,y,GRID_SIZE)] = in[ti(x,y,GRID_SIZE)] + material_device[mi(x,y,GRID_SIZE[0])]*
(in[ti(x+1,y,GRID_SIZE)] +
in[ti(x-1,y,GRID_SIZE)] +
in[ti(x,y+1,GRID_SIZE)] +
in[ti(x,y-1,GRID_SIZE)] -
4*in[ti(x,y,GRID_SIZE)]);
}
/* Shared memory kernel */
__global__ void ftcs_kernel_shared(float* out,float* in, float* material_device, int step, int block_size_x , int block_size_y, int GRID_X,int GRID_Y ){
// Compute grid size globally and locally
const int GRID_SIZE[2] = {GRID_X, GRID_Y};
const int BLOCK_SIZE[2] = {block_size_x,block_size_y};
// Initialize shared memory
extern __shared__ float in_shared[];
// Find matrix index
int x = blockIdx.x * block_size_x + threadIdx.x;
int y = blockIdx.y * block_size_y + threadIdx.y;
// Find block matrix index
int local_x = threadIdx.x;
int local_y = threadIdx.y;
// Set halo around border in shared memory
if( local_x == 0 ){
in_shared[lti(local_x - 1,local_y,BLOCK_SIZE)] = in[ti(x - 1,y,GRID_SIZE)];
}
if( local_x == ( block_size_x - 1 ) ){
in_shared[lti(local_x + 1,local_y,BLOCK_SIZE)] = in[ti(x + 1,y,GRID_SIZE)];
}
if( local_y == 0 ){
in_shared[lti(local_x,local_y-1,BLOCK_SIZE)] = in[ti(x,y - 1,GRID_SIZE)];
}
if ( local_y == ( block_size_y - 1 ) ) {
in_shared[lti(local_x,local_y+1,BLOCK_SIZE)] = in[ti(x,y + 1,GRID_SIZE)];
}
// Set data in shared memory
in_shared[lti(local_x,local_y,BLOCK_SIZE)] = in[ti(x,y,GRID_SIZE)];
// Synch threads before reading
__syncthreads();
// Compute ftcs using shared memory
out[ti(x,y,GRID_SIZE)] = in_shared[lti(local_x,local_y,BLOCK_SIZE)] + material_device[mi(x,y,GRID_SIZE[0])]*
(in_shared[lti(local_x+1,local_y,BLOCK_SIZE)] +
in_shared[lti(local_x-1,local_y,BLOCK_SIZE)] +
in_shared[lti(local_x,local_y+1,BLOCK_SIZE)] +
in_shared[lti(local_x,local_y-1,BLOCK_SIZE)] -
4*in_shared[lti(local_x,local_y,BLOCK_SIZE)]);
}
/* Texture memory kernel */
__global__ void ftcs_kernel_texture(float* out, float* material_device,int step, int block_size_x, int block_size_y,int GRID_X,int GRID_Y ,size_t offset){
// Set grid size
const int GRID_SIZE[2] = {GRID_X, GRID_Y};
// Find linear index for x and y coordinates
int x = blockIdx.x * block_size_x + threadIdx.x;
int y = blockIdx.y * block_size_y + threadIdx.y;
// Find indices of neighbouring elements
int x_up = x+1;
int x_down = x-1;
int y_up = y+1;
int y_down = y-1;
// Edit border values to fit with Neuman boundary condition
if( x_up >= GRID_SIZE[0] ){
x_up--;
}
if( x_down < 0 ){
x_down++;
}
if( y_up >= GRID_SIZE[1] ){
y_up--;
}
if( y_down < 0 ){
y_down++;
}
offset /= sizeof(float);
size_t xOffset = offset % GRID_SIZE[0];
size_t yOffset = offset / GRID_SIZE[1];
// Fetch data from texture memory
float in_origin = tex2D(texreference,xOffset + x,yOffset + y);
float in_up_x = tex2D(texreference,xOffset +x_up,yOffset + y);
float in_down_x = tex2D(texreference,xOffset +x_down,yOffset + y);
float in_up_y = tex2D(texreference,xOffset +x,yOffset + y_up);
float in_down_y = tex2D(texreference,xOffset +x,yOffset + y_down);
// Compute ftcs using texture memory
out[ti(x,y,GRID_SIZE)] = in_origin + material_device[mi(x,y,GRID_SIZE[0])]*
(in_up_x +
in_down_x +
in_up_y +
in_down_y -
4*in_origin);
}
/* External heat kernel, should do the same work as the external
* heat function in the serial code
*/
__global__ void external_heat_kernel(float* in, int step, int block_size_x, int block_size_y,int GRID_X, int GRID_Y ){
// Set grid size
const int GRID_SIZE[2] = {GRID_X, GRID_Y};
// Find linear index for x and y coordinates
int x = blockIdx.x * block_size_x + threadIdx.x;
int y = blockIdx.y * block_size_y + threadIdx.y;
// Set value of external heat in array
if(x>= (GRID_SIZE[0]/4) && x<= (3*GRID_SIZE[0]/4) ){
if(y >= (GRID_SIZE[1]/2 - GRID_SIZE[1]/16) && y<= (GRID_SIZE[1]/2 + GRID_SIZE[1]/16)){
in[ti(x,y,GRID_SIZE)] = 100;
}
}
}
/* Set up and call ftcs_kernel
* should return the execution time of the kernel
*/
float ftcs_solver_gpu( int step, int block_size_x, int block_size_y ){
// Edit block sizes to be even numbers
if ( block_size_x % 2){
block_size_x++;
}
if (block_size_y % 2) {
block_size_y++;
}
// Initalize timing
cudaEvent_t start,stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
// Compute thread block size
dim3 gridBlock(GRID_SIZE[0]/block_size_x,GRID_SIZE[1]/block_size_y);
dim3 threadBlock(block_size_x,block_size_y);
// Find device temperature arrays
float* out = temperature_device[(step+1)%2];
float* in = temperature_device[step%2];
// Record execution time
cudaEventRecord(start);
// Compute global kernel
ftcs_kernel<<<gridBlock,threadBlock>>>(out,in,material_device,step,block_size_x,block_size_y,GRID_SIZE[0],GRID_SIZE[1]);
cudaEventRecord(stop);
// Synch device threads
HANDLE_ERROR( cudaDeviceSynchronize() );
HANDLE_ERROR( cudaPeekAtLastError() );
// Stop recording
cudaEventSynchronize(stop);
float milliseconds = 0;
// Compute timing
cudaEventElapsedTime(&milliseconds,start,stop);
return milliseconds;
}
/* Set up and call ftcs_kernel_shared
* should return the execution time of the kernel
*/
float ftcs_solver_gpu_shared( int step, int block_size_x, int block_size_y ){
// Edit block sizes to be even numbers
if ( block_size_x % 2){
block_size_x++;
}
if (block_size_y % 2) {
block_size_y++;
}
// Initalize timing
cudaEvent_t start,stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
// Compute thread block size
dim3 gridBlock(GRID_SIZE[0]/block_size_x,GRID_SIZE[1]/block_size_y);
dim3 threadBlock(block_size_x,block_size_y);
// Compute size of shared memory
int shared_memory_size = (block_size_x+2) * (block_size_y+2) * sizeof(float);
// Find device temperature arrays
float* out = temperature_device[(step+1)%2];
float* in = temperature_device[step%2];
// Start recording
cudaEventRecord(start);
// Compute shared kernel
ftcs_kernel_shared<<<gridBlock,threadBlock,shared_memory_size>>>(out,in,material_device,step,block_size_x,block_size_y,GRID_SIZE[0],GRID_SIZE[1]);
cudaEventRecord(stop);
// Synch device threads
HANDLE_ERROR( cudaDeviceSynchronize() );
HANDLE_ERROR( cudaPeekAtLastError() );
// Stop timing recording
cudaEventSynchronize(stop);
float milliseconds = 0;
// Compute timing
cudaEventElapsedTime(&milliseconds,start,stop);
return milliseconds;
}
/* Set up and call ftcs_kernel_texture
* should return the execution time of the kernel
*/
float ftcs_solver_gpu_texture( int step, int block_size_x, int block_size_y ){
// Edit block sizes to be even numbers
if ( block_size_x % 2){
block_size_x++;
}
if (block_size_y % 2) {
block_size_y++;
}
// Initalize timing
cudaEvent_t start,stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
// Compute thread block size
dim3 gridBlock(GRID_SIZE[0]/block_size_x,GRID_SIZE[1]/block_size_y);
dim3 threadBlock(block_size_x,block_size_y);
// Find device memory
float* out = temperature_device[(step+1)%2];
float* in = temperature_device[step%2];
// Create channel for texture memory
cudaChannelFormatDesc desc = cudaCreateChannelDesc<float>();
size_t offset;
//bind texture reference with linear memory
HANDLE_ERROR(cudaBindTexture2D(&offset,texreference,
in,desc,GRID_SIZE[1],GRID_SIZE[0],sizeof(float)*GRID_SIZE[0]));
// Start recording
cudaEventRecord(start);
// Compute texture kernel
ftcs_kernel_texture<<<gridBlock,threadBlock>>>(out,material_device,step,block_size_x,block_size_y,GRID_SIZE[0],GRID_SIZE[1],offset);
cudaEventRecord(stop);
// Synch threads
HANDLE_ERROR( cudaDeviceSynchronize() );
HANDLE_ERROR( cudaPeekAtLastError() );
// Stop recording
cudaEventSynchronize(stop);
//Unbind texture reference
HANDLE_ERROR(cudaUnbindTexture(texreference));
// Compute timing
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds,start,stop);
return milliseconds;
}
/* Set up and call external_heat_kernel */
void external_heat_gpu( int step, int block_size_x, int block_size_y ){
// Compute thread block size
dim3 gridBlock(GRID_SIZE[0]/block_size_x,GRID_SIZE[1]/block_size_y);
dim3 threadBlock(block_size_x,block_size_y);
// Find device temperature array
float* in = temperature_device[step%2];
// Compute external heat kernel
external_heat_kernel<<<gridBlock,threadBlock>>>(in,step, block_size_x, block_size_y,GRID_SIZE[0],GRID_SIZE[1]);
// Synch threads
HANDLE_ERROR( cudaDeviceSynchronize() );
HANDLE_ERROR( cudaPeekAtLastError() );
}
void print_gpu_info(){
int n_devices;
cudaGetDeviceCount(&n_devices);
printf("Number of CUDA devices: %d\n", n_devices);
cudaDeviceProp device_prop;
cudaGetDeviceProperties(&device_prop, 0);
printf("CUDA device name: %s\n" , device_prop.name);
printf("Compute capability: %d.%d\n", device_prop.major, device_prop.minor);
}
int main ( int argc, char **argv ){
// Parse command line arguments
int version = 0;
int block_size_x = 0;
int block_size_y = 0;
if(argc != 4){
printf("Useage: %s <version> <block_size_x> <block_size_y>\n\n<version> can be:\n0: plain\n1: shared memory\n2: texture memory\n", argv[0]);
exit(0);
}
else{
version = atoi(argv[1]);
block_size_x = atoi(argv[2]);
block_size_y = atoi(argv[3]);
}
print_gpu_info();
// Allocate and initialize data on host
host_allocation();
init_temp_material();
// Allocate arrays on device, and transfer inputs
device_allocation();
transfer_to_gpu();
// Main integration loop
for( int step=0; step<NSTEPS; step += 1 ){
if( step < CUTOFF ){
external_heat_gpu ( step, block_size_x, block_size_y );
}
float time;
// Call selected version of ftcs slover
if(version == 2){
time = ftcs_solver_gpu_texture( step, block_size_x, block_size_y );
}
else if(version == 1){
time = ftcs_solver_gpu_shared(step, block_size_x, block_size_y);
}
else{
time = ftcs_solver_gpu(step, block_size_x, block_size_y);
}
add_time(time);
if((step % SNAPSHOT) == 0){
// Transfer output from device, and write to file
transfer_from_gpu(step);
write_temp(step);
}
}
print_time_stats();
exit ( EXIT_SUCCESS );
}
void host_allocation(){
size_t temperature_size =GRID_SIZE[0]*GRID_SIZE[1];
temperature = (float*) calloc(temperature_size, sizeof(float));
size_t material_size = (GRID_SIZE[0])*(GRID_SIZE[1]);
material = (float*) calloc(material_size, sizeof(float));
}
void init_temp_material(){
for(int x = 0; x < GRID_SIZE[0]; x++){
for(int y = 0; y < GRID_SIZE[1]; y++){
temperature[y * GRID_SIZE[0] + x] = 10.0;
}
}
for(int x = 0; x < GRID_SIZE[0]; x++){
for(int y = 0; y < GRID_SIZE[1]; y++){
temperature[y * GRID_SIZE[0] + x] = 20.0;
material[y * GRID_SIZE[0] + x] = MERCURY * (dt/(h*h));
}
}
/* Set up the two blocks of copper and tin */
for(int x=(5*GRID_SIZE[0]/8); x<(7*GRID_SIZE[0]/8); x++ ){
for(int y=(GRID_SIZE[1]/8); y<(3*GRID_SIZE[1]/8); y++ ){
material[y * GRID_SIZE[0] + x] = COPPER * (dt/(h*h));
temperature[y * GRID_SIZE[0] + x] = 60.0;
}
}
for(int x=(GRID_SIZE[0]/8); x<(GRID_SIZE[0]/2)-(GRID_SIZE[0]/8); x++ ){
for(int y=(5*GRID_SIZE[1]/8); y<(7*GRID_SIZE[1]/8); y++ ){
material[y * GRID_SIZE[0] + x] = TIN * (dt/(h*h));
temperature[y * GRID_SIZE[0] + x] = 60.0;
}
}
/* Set up the heating element in the middle */
for(int x=(GRID_SIZE[0]/4); x<=(3*GRID_SIZE[0]/4); x++){
for(int y=(GRID_SIZE[1]/2)-(GRID_SIZE[1]/16); y<=(GRID_SIZE[1]/2)+(GRID_SIZE[1]/16); y++){
material[y * GRID_SIZE[0] + x] = ALUMINIUM * (dt/(h*h));
temperature[y * GRID_SIZE[0] + x] = 100.0;
}
}
}
void add_time(float time){
avg_time += time;
if(time < min_time || min_time < -1.0){
min_time = time;
}
if(time > max_time){
max_time = time;
}
}
void print_time_stats(){
printf("Kernel execution time (min, max, avg): %f %f %f\n", min_time, max_time, avg_time/NSTEPS);
}
/* Save 24 - bits bmp file, buffer must be in bmp format: upside - down
* Only works for images which dimensions are powers of two
*/
void savebmp(char *name, unsigned char *buffer, int x, int y) {
FILE *f = fopen(name, "wb");
if (!f) {
printf("Error writing image to disk.\n");
return;
}
unsigned int size = x * y * 3 + 54;
unsigned char header[54] = {'B', 'M',
size&255,
(size >> 8)&255,
(size >> 16)&255,
size >> 24,
0, 0, 0, 0, 54, 0, 0, 0, 40, 0, 0, 0, x&255, x >> 8, 0,
0, y&255, y >> 8, 0, 0, 1, 0, 24, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
fwrite(header, 1, 54, f);
fwrite(buffer, 1, GRID_SIZE[0] * GRID_SIZE[1] * 3, f);
fclose(f);
}
void fancycolour(unsigned char *p, float temp) {
if(temp <= 25){
p[2] = 0;
p[1] = (unsigned char)((temp/25)*255);
p[0] = 255;
}
else if (temp <= 50){
p[2] = 0;
p[1] = 255;
p[0] = 255 - (unsigned char)(((temp-25)/25) * 255);
}
else if (temp <= 75){
p[2] = (unsigned char)(255* (temp-50)/25);
p[1] = 255;
p[0] = 0;
}
else{
p[2] = 255;
p[1] = 255 -(unsigned char)(255* (temp-75)/25) ;
p[0] = 0;
}
}
/* Create nice image from iteration counts. take care to create it upside down (bmp format) */
void output(char* filename){
unsigned char *buffer = (unsigned char*)calloc(GRID_SIZE[0] * GRID_SIZE[1]* 3, 1);
for (int j = 0; j < GRID_SIZE[1]; j++) {
for (int i = 0; i < GRID_SIZE[0]; i++) {
int p = ((GRID_SIZE[1] - j - 1) * GRID_SIZE[0] + i) * 3;
fancycolour(buffer + p, temperature[j*GRID_SIZE[0] + i]);
}
}
/* write image to disk */
savebmp(filename, buffer, GRID_SIZE[0], GRID_SIZE[1]);
free(buffer);
}
void write_temp (int step ){
char filename[15];
sprintf ( filename, "data/%.4d.bmp", step/SNAPSHOT );
output ( filename );
printf ( "Snapshot at step %d\n", step );
}
|
de72a1aded69e4e927f39cfc89315c37ed08431a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Host-side code to perform counting sort
*
* Author: Naga Kandasamy
* Date modified: March 2, 2021
*
* Student name(s): Dinh Nguyen, Tri Pham, Manh Cuong Phi
* Date modified: 03/14/2021
*
* Compile as follows: make clean && make
*/
#include <stdlib.h>
#include <stdio.h>
#include <time.h>
#include <sys/time.h>
#include <string.h>
#include <math.h>
#include <limits.h>
#include "counting_sort_kernel.cu"
/* Do not change the range value */
#define MIN_VALUE 0
#define MAX_VALUE 255
#define THREAD_BLOCK_SIZE 256
#define NUM_BLOCKS 4
#define HISTOGRAM_SIZE 256 /* Histogram has 256 bins */
/* Uncomment to spit out debug info */
// #define DEBUG
extern "C" int counting_sort_gold(int *, int *, int, int);
int rand_int(int, int);
void print_array(int *, int);
void print_min_and_max_in_array(int *, int);
void compute_on_device(int *, int *, int, int);
int check_if_sorted(int *, int);
int compare_results(int *, int *, int);
void check_for_error(const char *);
int main(int argc, char **argv)
{
if (argc < 2) {
printf("Usage: %s num-elements\n", argv[0]);
exit(EXIT_FAILURE);
}
int num_elements = atoi(argv[1]);
int range = MAX_VALUE - MIN_VALUE;
int *input_array, *sorted_array_reference, *sorted_array_d;
/* Populate input array with random integers between [0, RANGE] */
printf("Generating input array with %d elements in the range 0 to %d\n", num_elements, range);
input_array = (int *)malloc(num_elements * sizeof(int));
if (input_array == NULL) {
perror("malloc");
exit(EXIT_FAILURE);
}
srand(time(NULL));
int i;
for (i = 0; i < num_elements; i++)
input_array[i] = rand_int (MIN_VALUE, MAX_VALUE);
#ifdef DEBUG
print_array(input_array, num_elements);
print_min_and_max_in_array(input_array, num_elements);
#endif
struct timeval start, stop;
/* Sort elements in input array using reference implementation.
* The result is placed in sorted_array_reference. */
printf("\nSorting array on CPU\n");
int status;
sorted_array_reference = (int *)malloc(num_elements * sizeof(int));
if (sorted_array_reference == NULL) {
perror("malloc");
exit(EXIT_FAILURE);
}
memset(sorted_array_reference, 0, num_elements);
gettimeofday(&start, NULL);
status = counting_sort_gold(input_array, sorted_array_reference, num_elements, range);
gettimeofday(&stop, NULL);
if (status == -1) {
exit(EXIT_FAILURE);
}
status = check_if_sorted(sorted_array_reference, num_elements);
if (status == -1) {
printf("Error sorting the input array using the reference code\n");
exit(EXIT_FAILURE);
}
printf("Counting sort was successful on the CPU\n");
fprintf(stderr, "CPU Execution time = %fs\n", (float)(stop.tv_sec - start.tv_sec +(stop.tv_usec - start.tv_usec)/(float)1000000));
#ifdef DEBUG
print_array(sorted_array_reference, num_elements);
#endif
/* FIXME: Write function to sort elements in the array in parallel fashion.
* The result should be placed in sorted_array_mt. */
printf("\nSorting array on GPU\n");
sorted_array_d = (int *)malloc(num_elements * sizeof(int));
if (sorted_array_d == NULL) {
perror("malloc");
exit(EXIT_FAILURE);
}
memset(sorted_array_d, 0, num_elements);
compute_on_device(input_array, sorted_array_d, num_elements, range);
#ifdef DEBUG
print_array(sorted_array_d, num_elements);
#endif
/* Check the two results for correctness */
printf("\nComparing CPU and GPU results\n");
status = compare_results(sorted_array_reference, sorted_array_d, num_elements);
if (status == 0)
printf("Test passed\n");
else
printf("Test failed\n");
exit(EXIT_SUCCESS);
}
/* FIXME: Write the GPU implementation of counting sort */
void compute_on_device(int *input_array, int *sorted_array, int num_elements, int range)
{
struct timeval start, stop;
int *input_array_on_device = NULL;
int *sorted_array_on_device = NULL;
int *prefix_array = (int *)malloc(HISTOGRAM_SIZE * sizeof(int));
int *prefix_array_on_device = NULL;
/* Set up the execution grid on GPU */
dim3 thread_block(THREAD_BLOCK_SIZE, 1);
dim3 grid(NUM_BLOCKS,1);
/* Allocate space on GPU for input data */
hipMalloc((void**)&input_array_on_device, num_elements * sizeof(int));
hipMemcpy(input_array_on_device, input_array, num_elements * sizeof(int), hipMemcpyHostToDevice);
/* Allocate space on GPU initialize contents to zero */
hipMalloc((void**)&sorted_array_on_device, num_elements * sizeof(int));
hipMemset(sorted_array_on_device, 0, num_elements * sizeof(int));
hipMalloc((void**)&prefix_array_on_device, HISTOGRAM_SIZE * sizeof(int));
hipMemset(prefix_array_on_device, 0, HISTOGRAM_SIZE * sizeof(int));
gettimeofday(&start, NULL);
// Launch kernel to find prefix array
hipLaunchKernelGGL(( find_prefix_kernel), dim3(grid), dim3(thread_block), 0, 0, input_array_on_device, prefix_array_on_device, num_elements, range);
hipDeviceSynchronize();
// Launch kernel to form sorted array using the prefix array as input
hipLaunchKernelGGL(( counting_sort_kernel), dim3(grid),dim3(thread_block), 0, 0, prefix_array_on_device, sorted_array_on_device, num_elements, range);
hipDeviceSynchronize();
gettimeofday(&stop, NULL);
fprintf(stderr, "GPU Execution time = %fs\n", (float)(stop.tv_sec - start.tv_sec +(stop.tv_usec - start.tv_usec)/(float)1000000));
/* Copy result back from GPU */
hipMemcpy(sorted_array, sorted_array_on_device, num_elements * sizeof(int), hipMemcpyDeviceToHost);
check_for_error("KERNEL FAILURE");
/* Free memory */
hipFree(input_array_on_device);
hipFree(sorted_array_on_device);
hipFree(prefix_array_on_device);
free(prefix_array);
return;
}
/* Check for errors during kernel execution */
void check_for_error(const char *msg)
{
hipError_t err = hipGetLastError();
if (hipSuccess != err) {
printf("CUDA ERROR: %s (%s)\n", msg, hipGetErrorString(err));
exit(EXIT_FAILURE);
}
}
/* Check if array is sorted */
int check_if_sorted(int *array, int num_elements)
{
int status = 0;
int i;
for (i = 1; i < num_elements; i++) {
if (array[i - 1] > array[i]) {
status = -1;
break;
}
}
return status;
}
/* Check if the arrays elements are identical */
int compare_results(int *array_1, int *array_2, int num_elements)
{
int status = 0;
int i;
for (i = 0; i < num_elements; i++) {
if (array_1[i] != array_2[i]) {
status = -1;
break;
}
}
return status;
}
/* Return random integer between [min, max] */
int rand_int(int min, int max)
{
float r = rand()/(float)RAND_MAX;
return (int)floorf(min + (max - min) * r);
}
/* Print given array */
void print_array(int *this_array, int num_elements)
{
printf("Array: ");
int i;
for (i = 0; i < num_elements; i++)
printf("%d ", this_array[i]);
printf("\n");
return;
}
/* Return min and max values in given array */
void print_min_and_max_in_array(int *this_array, int num_elements)
{
int i;
int current_min = INT_MAX;
for (i = 0; i < num_elements; i++)
if (this_array[i] < current_min)
current_min = this_array[i];
int current_max = INT_MIN;
for (i = 0; i < num_elements; i++)
if (this_array[i] > current_max)
current_max = this_array[i];
printf("Minimum value in the array = %d\n", current_min);
printf("Maximum value in the array = %d\n", current_max);
return;
}
| de72a1aded69e4e927f39cfc89315c37ed08431a.cu | /* Host-side code to perform counting sort
*
* Author: Naga Kandasamy
* Date modified: March 2, 2021
*
* Student name(s): Dinh Nguyen, Tri Pham, Manh Cuong Phi
* Date modified: 03/14/2021
*
* Compile as follows: make clean && make
*/
#include <stdlib.h>
#include <stdio.h>
#include <time.h>
#include <sys/time.h>
#include <string.h>
#include <math.h>
#include <limits.h>
#include "counting_sort_kernel.cu"
/* Do not change the range value */
#define MIN_VALUE 0
#define MAX_VALUE 255
#define THREAD_BLOCK_SIZE 256
#define NUM_BLOCKS 4
#define HISTOGRAM_SIZE 256 /* Histogram has 256 bins */
/* Uncomment to spit out debug info */
// #define DEBUG
extern "C" int counting_sort_gold(int *, int *, int, int);
int rand_int(int, int);
void print_array(int *, int);
void print_min_and_max_in_array(int *, int);
void compute_on_device(int *, int *, int, int);
int check_if_sorted(int *, int);
int compare_results(int *, int *, int);
void check_for_error(const char *);
int main(int argc, char **argv)
{
if (argc < 2) {
printf("Usage: %s num-elements\n", argv[0]);
exit(EXIT_FAILURE);
}
int num_elements = atoi(argv[1]);
int range = MAX_VALUE - MIN_VALUE;
int *input_array, *sorted_array_reference, *sorted_array_d;
/* Populate input array with random integers between [0, RANGE] */
printf("Generating input array with %d elements in the range 0 to %d\n", num_elements, range);
input_array = (int *)malloc(num_elements * sizeof(int));
if (input_array == NULL) {
perror("malloc");
exit(EXIT_FAILURE);
}
srand(time(NULL));
int i;
for (i = 0; i < num_elements; i++)
input_array[i] = rand_int (MIN_VALUE, MAX_VALUE);
#ifdef DEBUG
print_array(input_array, num_elements);
print_min_and_max_in_array(input_array, num_elements);
#endif
struct timeval start, stop;
/* Sort elements in input array using reference implementation.
* The result is placed in sorted_array_reference. */
printf("\nSorting array on CPU\n");
int status;
sorted_array_reference = (int *)malloc(num_elements * sizeof(int));
if (sorted_array_reference == NULL) {
perror("malloc");
exit(EXIT_FAILURE);
}
memset(sorted_array_reference, 0, num_elements);
gettimeofday(&start, NULL);
status = counting_sort_gold(input_array, sorted_array_reference, num_elements, range);
gettimeofday(&stop, NULL);
if (status == -1) {
exit(EXIT_FAILURE);
}
status = check_if_sorted(sorted_array_reference, num_elements);
if (status == -1) {
printf("Error sorting the input array using the reference code\n");
exit(EXIT_FAILURE);
}
printf("Counting sort was successful on the CPU\n");
fprintf(stderr, "CPU Execution time = %fs\n", (float)(stop.tv_sec - start.tv_sec +(stop.tv_usec - start.tv_usec)/(float)1000000));
#ifdef DEBUG
print_array(sorted_array_reference, num_elements);
#endif
/* FIXME: Write function to sort elements in the array in parallel fashion.
* The result should be placed in sorted_array_mt. */
printf("\nSorting array on GPU\n");
sorted_array_d = (int *)malloc(num_elements * sizeof(int));
if (sorted_array_d == NULL) {
perror("malloc");
exit(EXIT_FAILURE);
}
memset(sorted_array_d, 0, num_elements);
compute_on_device(input_array, sorted_array_d, num_elements, range);
#ifdef DEBUG
print_array(sorted_array_d, num_elements);
#endif
/* Check the two results for correctness */
printf("\nComparing CPU and GPU results\n");
status = compare_results(sorted_array_reference, sorted_array_d, num_elements);
if (status == 0)
printf("Test passed\n");
else
printf("Test failed\n");
exit(EXIT_SUCCESS);
}
/* FIXME: Write the GPU implementation of counting sort */
void compute_on_device(int *input_array, int *sorted_array, int num_elements, int range)
{
struct timeval start, stop;
int *input_array_on_device = NULL;
int *sorted_array_on_device = NULL;
int *prefix_array = (int *)malloc(HISTOGRAM_SIZE * sizeof(int));
int *prefix_array_on_device = NULL;
/* Set up the execution grid on GPU */
dim3 thread_block(THREAD_BLOCK_SIZE, 1);
dim3 grid(NUM_BLOCKS,1);
/* Allocate space on GPU for input data */
cudaMalloc((void**)&input_array_on_device, num_elements * sizeof(int));
cudaMemcpy(input_array_on_device, input_array, num_elements * sizeof(int), cudaMemcpyHostToDevice);
/* Allocate space on GPU initialize contents to zero */
cudaMalloc((void**)&sorted_array_on_device, num_elements * sizeof(int));
cudaMemset(sorted_array_on_device, 0, num_elements * sizeof(int));
cudaMalloc((void**)&prefix_array_on_device, HISTOGRAM_SIZE * sizeof(int));
cudaMemset(prefix_array_on_device, 0, HISTOGRAM_SIZE * sizeof(int));
gettimeofday(&start, NULL);
// Launch kernel to find prefix array
find_prefix_kernel<<<grid, thread_block>>>(input_array_on_device, prefix_array_on_device, num_elements, range);
cudaDeviceSynchronize();
// Launch kernel to form sorted array using the prefix array as input
counting_sort_kernel<<<grid,thread_block>>>(prefix_array_on_device, sorted_array_on_device, num_elements, range);
cudaDeviceSynchronize();
gettimeofday(&stop, NULL);
fprintf(stderr, "GPU Execution time = %fs\n", (float)(stop.tv_sec - start.tv_sec +(stop.tv_usec - start.tv_usec)/(float)1000000));
/* Copy result back from GPU */
cudaMemcpy(sorted_array, sorted_array_on_device, num_elements * sizeof(int), cudaMemcpyDeviceToHost);
check_for_error("KERNEL FAILURE");
/* Free memory */
cudaFree(input_array_on_device);
cudaFree(sorted_array_on_device);
cudaFree(prefix_array_on_device);
free(prefix_array);
return;
}
/* Check for errors during kernel execution */
void check_for_error(const char *msg)
{
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err) {
printf("CUDA ERROR: %s (%s)\n", msg, cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
}
/* Check if array is sorted */
int check_if_sorted(int *array, int num_elements)
{
int status = 0;
int i;
for (i = 1; i < num_elements; i++) {
if (array[i - 1] > array[i]) {
status = -1;
break;
}
}
return status;
}
/* Check if the arrays elements are identical */
int compare_results(int *array_1, int *array_2, int num_elements)
{
int status = 0;
int i;
for (i = 0; i < num_elements; i++) {
if (array_1[i] != array_2[i]) {
status = -1;
break;
}
}
return status;
}
/* Return random integer between [min, max] */
int rand_int(int min, int max)
{
float r = rand()/(float)RAND_MAX;
return (int)floorf(min + (max - min) * r);
}
/* Print given array */
void print_array(int *this_array, int num_elements)
{
printf("Array: ");
int i;
for (i = 0; i < num_elements; i++)
printf("%d ", this_array[i]);
printf("\n");
return;
}
/* Return min and max values in given array */
void print_min_and_max_in_array(int *this_array, int num_elements)
{
int i;
int current_min = INT_MAX;
for (i = 0; i < num_elements; i++)
if (this_array[i] < current_min)
current_min = this_array[i];
int current_max = INT_MIN;
for (i = 0; i < num_elements; i++)
if (this_array[i] > current_max)
current_max = this_array[i];
printf("Minimum value in the array = %d\n", current_min);
printf("Maximum value in the array = %d\n", current_max);
return;
}
|
66827f0c29ee528b3ab099ede67d8b5b4a35e19f.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <string.h>
#include <math.h>
#define N 8 //Filas
#define M 8 //Columnas
__global__ void multMatrices(float *c, float *a, float *b){ //Kernel, salto a la GPU. Esta funcion es ejecutada por todos los hilos al mismo tiempo.
int ix = (blockIdx.y*blockDim.y+threadIdx.y)*N+(blockIdx.x*blockDim.x+threadIdx.x);
if(ix<N*M) {
int adder = 0;
for(int i=0;i<N;++i)
adder+= (float) a[(blockIdx.y*blockDim.y+threadIdx.y)*N+(i)]*b[(i)*N+(blockIdx.x*blockDim.x+threadIdx.x)];
c[ix]=adder;
}
}
int main() {
int memsize = sizeof(float )*N*M;
float *h_a,*h_b,*h_c; //Arrays en el host (CPU & RAM)
h_a=(float *)malloc(memsize);
h_b=(float *)malloc(memsize);
h_c=(float *)malloc(memsize);
for(int i=0; i<N*M; ++i)
h_a[i]=h_b[i]=h_c[i]=(float) 1.0f;
float *d_a,*d_b,*d_c; //Arrays en la GPU
hipMalloc(&d_a, memsize);
hipMalloc(&d_b, memsize);
hipMalloc(&d_c, memsize);
hipMemcpy(d_a, h_a, memsize, hipMemcpyHostToDevice);
hipMemcpy(d_b, h_b, memsize, hipMemcpyHostToDevice);
hipMemcpy(d_c, h_c, memsize, hipMemcpyHostToDevice);
dim3 block(4,4);
dim3 thread(2,2);
printf("El numero de bloques es %d, y el numero de hilos es %d\n", block.x, thread.x);
hipLaunchKernelGGL(( multMatrices) , dim3(block),dim3(thread), 0, 0, d_c, d_a, d_b);//El multiplicar ambos numeros tiene que darme N
hipMemcpy(h_c, d_c, memsize, hipMemcpyDeviceToHost);
printf("Resultado multiplicacion de matrices: \n");
for(int i=0; i<N*M; ++i){
printf("%f, ", h_c[i]);
if(i!=0 && i%N==(N-1))
printf("\n");
}
printf("\n");
free(h_a);
free(h_b);
free(h_c);
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
return 0;
}
| 66827f0c29ee528b3ab099ede67d8b5b4a35e19f.cu | #include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <string.h>
#include <math.h>
#define N 8 //Filas
#define M 8 //Columnas
__global__ void multMatrices(float *c, float *a, float *b){ //Kernel, salto a la GPU. Esta funcion es ejecutada por todos los hilos al mismo tiempo.
int ix = (blockIdx.y*blockDim.y+threadIdx.y)*N+(blockIdx.x*blockDim.x+threadIdx.x);
if(ix<N*M) {
int adder = 0;
for(int i=0;i<N;++i)
adder+= (float) a[(blockIdx.y*blockDim.y+threadIdx.y)*N+(i)]*b[(i)*N+(blockIdx.x*blockDim.x+threadIdx.x)];
c[ix]=adder;
}
}
int main() {
int memsize = sizeof(float )*N*M;
float *h_a,*h_b,*h_c; //Arrays en el host (CPU & RAM)
h_a=(float *)malloc(memsize);
h_b=(float *)malloc(memsize);
h_c=(float *)malloc(memsize);
for(int i=0; i<N*M; ++i)
h_a[i]=h_b[i]=h_c[i]=(float) 1.0f;
float *d_a,*d_b,*d_c; //Arrays en la GPU
cudaMalloc(&d_a, memsize);
cudaMalloc(&d_b, memsize);
cudaMalloc(&d_c, memsize);
cudaMemcpy(d_a, h_a, memsize, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, h_b, memsize, cudaMemcpyHostToDevice);
cudaMemcpy(d_c, h_c, memsize, cudaMemcpyHostToDevice);
dim3 block(4,4);
dim3 thread(2,2);
printf("El numero de bloques es %d, y el numero de hilos es %d\n", block.x, thread.x);
multMatrices <<<block,thread>>> (d_c, d_a, d_b);//El multiplicar ambos numeros tiene que darme N
cudaMemcpy(h_c, d_c, memsize, cudaMemcpyDeviceToHost);
printf("Resultado multiplicacion de matrices: \n");
for(int i=0; i<N*M; ++i){
printf("%f, ", h_c[i]);
if(i!=0 && i%N==(N-1))
printf("\n");
}
printf("\n");
free(h_a);
free(h_b);
free(h_c);
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
return 0;
}
|
e0ec4c201fa99c26d26a82352e51d88a617af641.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <random>
#include <type_traits>
#include <math.h>
#include <wmma_extension/wmma_extension.hpp>
#include "common.hpp"
#ifndef TEST_ARCH
#define TEST_ARCH (-1)
#endif
__device__ float myabs(const float a) {
if (a > 0) {
return a;
} else {
return -a;
}
}
template <class Use, int M, int N, int K, class Type, class Layout, unsigned MATRIX_DIM>
__global__ void test_kernel(float* const diff, const float* const src, const unsigned ld) {
using storage_t = typename mtk::wmma::detail::common::storage_t<Type>::type;
__shared__ storage_t smem[MATRIX_DIM * MATRIX_DIM];
for (unsigned i = 0; i < MATRIX_DIM * MATRIX_DIM; i += blockDim.x) {
smem[i + threadIdx.x] = src[i + threadIdx.x];
}
nvcuda::wmma::fragment<Use, M, N, K, Type, Layout> frag_nvcuda;
nvcuda::wmma::fragment<Use, M, N, K, Type, Layout> frag_mtk;
nvcuda::wmma::load_matrix_sync(frag_nvcuda, smem, ld);
mtk::wmma::foreach_ij<decltype(frag_mtk)>(
[&](const unsigned* frag_index_list, const unsigned num_indeces, const unsigned i, const unsigned j) {
unsigned mem_index;
if (std::is_same<Layout, nvcuda::wmma::col_major>::value) {
mem_index = i + j * ld;
} else {
mem_index = i * ld + j;
}
for (unsigned f = 0; f < num_indeces; f++) {
frag_mtk.x[frag_index_list[f]] = smem[mem_index];
}
}
);
float max_diff = 0.f;
for (unsigned i = 0; i < frag_mtk.num_elements; i++) {
max_diff = max(max_diff, myabs(frag_mtk.x[i] - frag_nvcuda.x[i]));
}
diff[threadIdx.x] = max_diff;
}
template <class Use, int M, int N, int K, class Type, nvcuda::wmma::layout_t layout, unsigned MATRIX_DIM>
__global__ void test_kernel_acc(float* const diff, const float* const src, const unsigned ld) {
using storage_t = typename mtk::wmma::detail::common::storage_t<Type>::type;
__shared__ storage_t smem[MATRIX_DIM * MATRIX_DIM];
for (unsigned i = 0; i < MATRIX_DIM * MATRIX_DIM; i += blockDim.x) {
smem[i + threadIdx.x] = src[i + threadIdx.x];
}
nvcuda::wmma::fragment<Use, M, N, K, Type, void> frag_nvcuda;
nvcuda::wmma::fragment<Use, M, N, K, Type, void> frag_mtk;
nvcuda::wmma::load_matrix_sync(frag_nvcuda, smem, ld, layout);
mtk::wmma::foreach_ij<decltype(frag_mtk)>(
layout,
[&](const unsigned* frag_index_list, const unsigned num_indeces, const unsigned i, const unsigned j) {
unsigned mem_index;
if (layout == nvcuda::wmma::mem_col_major) {
mem_index = i + j * ld;
} else {
mem_index = i * ld + j;
}
for (unsigned f = 0; f < num_indeces; f++) {
frag_mtk.x[frag_index_list[f]] = smem[mem_index];
}
}
);
float max_diff = 0.f;
for (unsigned i = 0; i < frag_mtk.num_elements; i++) {
max_diff = max(max_diff, myabs(frag_mtk.x[i] - frag_nvcuda.x[i]));
}
diff[threadIdx.x] = max_diff;
}
template <class Use, int M, int N, int K, class Type, class Layout>
void test() {
constexpr unsigned MATRIX_DIM = 32;
constexpr unsigned warp_size = 32;
float* src_matrix;
float* diff;
hipHostMalloc(&src_matrix, sizeof(float) * MATRIX_DIM * MATRIX_DIM);
hipHostMalloc(&diff, sizeof(float) * warp_size);
for (unsigned i = 0; i < MATRIX_DIM * MATRIX_DIM; i++) {
src_matrix[i] = static_cast<float>(i) / (MATRIX_DIM);
}
hipLaunchKernelGGL(( test_kernel<Use, M, N, K, Type, Layout, MATRIX_DIM>), dim3(1), dim3(warp_size), 0, 0, diff, src_matrix, MATRIX_DIM);
hipDeviceSynchronize();
bool passed = true;
for (unsigned i = 0; i < warp_size; i++) {
if (diff[i] > (1.f / MATRIX_DIM / 2)) {
passed = false;
}
}
std::printf("%s{SM=%2d,Use=%15s,M=%2d,N=%2d,K=%2d,Type=%5s,Layout=%8s}:",
__FILE__,
TEST_ARCH,
mtk::test_utils::get_string<Use>().c_str(),
M, N, K,
mtk::test_utils::get_string<Type>().c_str(),
mtk::test_utils::get_string<Layout>().c_str()
);
if (passed) {
std::printf("PASSED");
} else {
std::printf("FAILED");
}
std::printf("\n");
hipHostFree(diff);
hipHostFree(src_matrix);
}
template <class Use, int M, int N, int K, class Type, class Layout>
void test_acc() {
constexpr unsigned MATRIX_DIM = 32;
constexpr unsigned warp_size = 32;
float* src_matrix;
float* diff;
hipHostMalloc(&src_matrix, sizeof(float) * MATRIX_DIM * MATRIX_DIM);
hipHostMalloc(&diff, sizeof(float) * warp_size);
for (unsigned i = 0; i < MATRIX_DIM * MATRIX_DIM; i++) {
src_matrix[i] = static_cast<float>(i) / (MATRIX_DIM);
}
if (std::is_same<Layout, nvcuda::wmma::col_major>::value) {
hipLaunchKernelGGL(( test_kernel_acc<Use, M, N, K, Type, nvcuda::wmma::mem_col_major, MATRIX_DIM>), dim3(1), dim3(warp_size), 0, 0, diff, src_matrix, MATRIX_DIM);
} else {
hipLaunchKernelGGL(( test_kernel_acc<Use, M, N, K, Type, nvcuda::wmma::mem_row_major, MATRIX_DIM>), dim3(1), dim3(warp_size), 0, 0, diff, src_matrix, MATRIX_DIM);
}
hipDeviceSynchronize();
bool passed = true;
for (unsigned i = 0; i < warp_size; i++) {
if (diff[i] > (1.f / MATRIX_DIM / 2)) {
passed = false;
}
}
std::printf("%s{SM=%2d,Use=%15s,M=%2d,N=%2d,K=%2d,Type=%5s,Layout=%8s}:",
__FILE__,
TEST_ARCH,
mtk::test_utils::get_string<Use>().c_str(),
M, N, K,
mtk::test_utils::get_string<Type>().c_str(),
mtk::test_utils::get_string<Layout>().c_str()
);
if (passed) {
std::printf("PASSED");
} else {
std::printf("FAILED");
}
std::printf("\n");
hipHostFree(diff);
hipHostFree(src_matrix);
}
int main() {
test<nvcuda::wmma::matrix_a, 16, 16, 16, half, nvcuda::wmma::row_major>();
test<nvcuda::wmma::matrix_a, 16, 16, 16, half, nvcuda::wmma::col_major>();
test<nvcuda::wmma::matrix_b, 16, 16, 16, half, nvcuda::wmma::row_major>();
test<nvcuda::wmma::matrix_b, 16, 16, 16, half, nvcuda::wmma::col_major>();
test_acc<nvcuda::wmma::accumulator, 16, 16, 16, half , nvcuda::wmma::col_major>();
test_acc<nvcuda::wmma::accumulator, 16, 16, 16, float, nvcuda::wmma::col_major>();
test_acc<nvcuda::wmma::accumulator, 16, 16, 16, half , nvcuda::wmma::row_major>();
test_acc<nvcuda::wmma::accumulator, 16, 16, 16, float, nvcuda::wmma::row_major>();
#ifdef TEST_TF32
test<nvcuda::wmma::matrix_a, 16, 16, 8, nvcuda::wmma::precision::tf32, nvcuda::wmma::row_major>();
test<nvcuda::wmma::matrix_a, 16, 16, 8, nvcuda::wmma::precision::tf32, nvcuda::wmma::col_major>();
test<nvcuda::wmma::matrix_b, 16, 16, 8, nvcuda::wmma::precision::tf32, nvcuda::wmma::row_major>();
test<nvcuda::wmma::matrix_b, 16, 16, 8, nvcuda::wmma::precision::tf32, nvcuda::wmma::col_major>();
test_acc<nvcuda::wmma::accumulator, 16, 16, 8, float, nvcuda::wmma::col_major>();
test_acc<nvcuda::wmma::accumulator, 16, 16, 8, float, nvcuda::wmma::row_major>();
#endif
}
| e0ec4c201fa99c26d26a82352e51d88a617af641.cu | #include <iostream>
#include <random>
#include <type_traits>
#include <math.h>
#include <wmma_extension/wmma_extension.hpp>
#include "common.hpp"
#ifndef TEST_ARCH
#define TEST_ARCH (-1)
#endif
__device__ float myabs(const float a) {
if (a > 0) {
return a;
} else {
return -a;
}
}
template <class Use, int M, int N, int K, class Type, class Layout, unsigned MATRIX_DIM>
__global__ void test_kernel(float* const diff, const float* const src, const unsigned ld) {
using storage_t = typename mtk::wmma::detail::common::storage_t<Type>::type;
__shared__ storage_t smem[MATRIX_DIM * MATRIX_DIM];
for (unsigned i = 0; i < MATRIX_DIM * MATRIX_DIM; i += blockDim.x) {
smem[i + threadIdx.x] = src[i + threadIdx.x];
}
nvcuda::wmma::fragment<Use, M, N, K, Type, Layout> frag_nvcuda;
nvcuda::wmma::fragment<Use, M, N, K, Type, Layout> frag_mtk;
nvcuda::wmma::load_matrix_sync(frag_nvcuda, smem, ld);
mtk::wmma::foreach_ij<decltype(frag_mtk)>(
[&](const unsigned* frag_index_list, const unsigned num_indeces, const unsigned i, const unsigned j) {
unsigned mem_index;
if (std::is_same<Layout, nvcuda::wmma::col_major>::value) {
mem_index = i + j * ld;
} else {
mem_index = i * ld + j;
}
for (unsigned f = 0; f < num_indeces; f++) {
frag_mtk.x[frag_index_list[f]] = smem[mem_index];
}
}
);
float max_diff = 0.f;
for (unsigned i = 0; i < frag_mtk.num_elements; i++) {
max_diff = max(max_diff, myabs(frag_mtk.x[i] - frag_nvcuda.x[i]));
}
diff[threadIdx.x] = max_diff;
}
template <class Use, int M, int N, int K, class Type, nvcuda::wmma::layout_t layout, unsigned MATRIX_DIM>
__global__ void test_kernel_acc(float* const diff, const float* const src, const unsigned ld) {
using storage_t = typename mtk::wmma::detail::common::storage_t<Type>::type;
__shared__ storage_t smem[MATRIX_DIM * MATRIX_DIM];
for (unsigned i = 0; i < MATRIX_DIM * MATRIX_DIM; i += blockDim.x) {
smem[i + threadIdx.x] = src[i + threadIdx.x];
}
nvcuda::wmma::fragment<Use, M, N, K, Type, void> frag_nvcuda;
nvcuda::wmma::fragment<Use, M, N, K, Type, void> frag_mtk;
nvcuda::wmma::load_matrix_sync(frag_nvcuda, smem, ld, layout);
mtk::wmma::foreach_ij<decltype(frag_mtk)>(
layout,
[&](const unsigned* frag_index_list, const unsigned num_indeces, const unsigned i, const unsigned j) {
unsigned mem_index;
if (layout == nvcuda::wmma::mem_col_major) {
mem_index = i + j * ld;
} else {
mem_index = i * ld + j;
}
for (unsigned f = 0; f < num_indeces; f++) {
frag_mtk.x[frag_index_list[f]] = smem[mem_index];
}
}
);
float max_diff = 0.f;
for (unsigned i = 0; i < frag_mtk.num_elements; i++) {
max_diff = max(max_diff, myabs(frag_mtk.x[i] - frag_nvcuda.x[i]));
}
diff[threadIdx.x] = max_diff;
}
template <class Use, int M, int N, int K, class Type, class Layout>
void test() {
constexpr unsigned MATRIX_DIM = 32;
constexpr unsigned warp_size = 32;
float* src_matrix;
float* diff;
cudaMallocHost(&src_matrix, sizeof(float) * MATRIX_DIM * MATRIX_DIM);
cudaMallocHost(&diff, sizeof(float) * warp_size);
for (unsigned i = 0; i < MATRIX_DIM * MATRIX_DIM; i++) {
src_matrix[i] = static_cast<float>(i) / (MATRIX_DIM);
}
test_kernel<Use, M, N, K, Type, Layout, MATRIX_DIM><<<1, warp_size>>>(diff, src_matrix, MATRIX_DIM);
cudaDeviceSynchronize();
bool passed = true;
for (unsigned i = 0; i < warp_size; i++) {
if (diff[i] > (1.f / MATRIX_DIM / 2)) {
passed = false;
}
}
std::printf("%s{SM=%2d,Use=%15s,M=%2d,N=%2d,K=%2d,Type=%5s,Layout=%8s}:",
__FILE__,
TEST_ARCH,
mtk::test_utils::get_string<Use>().c_str(),
M, N, K,
mtk::test_utils::get_string<Type>().c_str(),
mtk::test_utils::get_string<Layout>().c_str()
);
if (passed) {
std::printf("PASSED");
} else {
std::printf("FAILED");
}
std::printf("\n");
cudaFreeHost(diff);
cudaFreeHost(src_matrix);
}
template <class Use, int M, int N, int K, class Type, class Layout>
void test_acc() {
constexpr unsigned MATRIX_DIM = 32;
constexpr unsigned warp_size = 32;
float* src_matrix;
float* diff;
cudaMallocHost(&src_matrix, sizeof(float) * MATRIX_DIM * MATRIX_DIM);
cudaMallocHost(&diff, sizeof(float) * warp_size);
for (unsigned i = 0; i < MATRIX_DIM * MATRIX_DIM; i++) {
src_matrix[i] = static_cast<float>(i) / (MATRIX_DIM);
}
if (std::is_same<Layout, nvcuda::wmma::col_major>::value) {
test_kernel_acc<Use, M, N, K, Type, nvcuda::wmma::mem_col_major, MATRIX_DIM><<<1, warp_size>>>(diff, src_matrix, MATRIX_DIM);
} else {
test_kernel_acc<Use, M, N, K, Type, nvcuda::wmma::mem_row_major, MATRIX_DIM><<<1, warp_size>>>(diff, src_matrix, MATRIX_DIM);
}
cudaDeviceSynchronize();
bool passed = true;
for (unsigned i = 0; i < warp_size; i++) {
if (diff[i] > (1.f / MATRIX_DIM / 2)) {
passed = false;
}
}
std::printf("%s{SM=%2d,Use=%15s,M=%2d,N=%2d,K=%2d,Type=%5s,Layout=%8s}:",
__FILE__,
TEST_ARCH,
mtk::test_utils::get_string<Use>().c_str(),
M, N, K,
mtk::test_utils::get_string<Type>().c_str(),
mtk::test_utils::get_string<Layout>().c_str()
);
if (passed) {
std::printf("PASSED");
} else {
std::printf("FAILED");
}
std::printf("\n");
cudaFreeHost(diff);
cudaFreeHost(src_matrix);
}
int main() {
test<nvcuda::wmma::matrix_a, 16, 16, 16, half, nvcuda::wmma::row_major>();
test<nvcuda::wmma::matrix_a, 16, 16, 16, half, nvcuda::wmma::col_major>();
test<nvcuda::wmma::matrix_b, 16, 16, 16, half, nvcuda::wmma::row_major>();
test<nvcuda::wmma::matrix_b, 16, 16, 16, half, nvcuda::wmma::col_major>();
test_acc<nvcuda::wmma::accumulator, 16, 16, 16, half , nvcuda::wmma::col_major>();
test_acc<nvcuda::wmma::accumulator, 16, 16, 16, float, nvcuda::wmma::col_major>();
test_acc<nvcuda::wmma::accumulator, 16, 16, 16, half , nvcuda::wmma::row_major>();
test_acc<nvcuda::wmma::accumulator, 16, 16, 16, float, nvcuda::wmma::row_major>();
#ifdef TEST_TF32
test<nvcuda::wmma::matrix_a, 16, 16, 8, nvcuda::wmma::precision::tf32, nvcuda::wmma::row_major>();
test<nvcuda::wmma::matrix_a, 16, 16, 8, nvcuda::wmma::precision::tf32, nvcuda::wmma::col_major>();
test<nvcuda::wmma::matrix_b, 16, 16, 8, nvcuda::wmma::precision::tf32, nvcuda::wmma::row_major>();
test<nvcuda::wmma::matrix_b, 16, 16, 8, nvcuda::wmma::precision::tf32, nvcuda::wmma::col_major>();
test_acc<nvcuda::wmma::accumulator, 16, 16, 8, float, nvcuda::wmma::col_major>();
test_acc<nvcuda::wmma::accumulator, 16, 16, 8, float, nvcuda::wmma::row_major>();
#endif
}
|
a955b78f05602dcb4c86858df94e473e72e10c43.hip | // !!! This is a file automatically generated by hipify!!!
/**
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/**
* Vector addition: C = A + B.
*
* This sample is a very basic sample that implements element by element
* vector addition. It is the same as the sample illustrating Chapter 2
* of the programming guide with some additions like error checking.
*/
#include <stdio.h>
// For the CUDA runtime routines (prefixed with "cuda_")
#include <hip/hip_runtime.h>
#include <helper_cuda.h>
/**
* CUDA Kernel Device code
*
* Computes the vector addition of A and B into C. The 3 vectors have the same
* number of elements numElements.
*/
__global__ void
vectorAdd(const float *A, const float *B, float *C, int numElements)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < numElements)
{
C[i] = A[i] + B[i];
}
}
/**
* Host main routine
*/
int
main(void)
{
// Error code to check return values for CUDA calls
hipError_t err = hipSuccess;
// Print the vector length to be used, and compute its size
int numElements = 1;
size_t size = numElements * sizeof(float);
printf("[Vector addition of %d elements]\n", numElements);
// Allocate the host input vector A
float *h_A = (float *)malloc(size);
// Allocate the host input vector B
float *h_B = (float *)malloc(size);
// Allocate the host output vector C
float *h_C = (float *)malloc(size);
// Verify that allocations succeeded
if (h_A == NULL || h_B == NULL || h_C == NULL)
{
fprintf(stderr, "Failed to allocate host vectors!\n");
exit(EXIT_FAILURE);
}
// Initialize the host input vectors
for (int i = 0; i < numElements; ++i)
{
h_A[i] = rand()/(float)RAND_MAX;
h_B[i] = rand()/(float)RAND_MAX;
}
// Allocate the device input vector A
float *d_A = NULL;
err = hipMalloc((void **)&d_A, size);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to allocate device vector A (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Allocate the device input vector B
float *d_B = NULL;
err = hipMalloc((void **)&d_B, size);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to allocate device vector B (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Allocate the device output vector C
float *d_C = NULL;
err = hipMalloc((void **)&d_C, size);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to allocate device vector C (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Copy the host input vectors A and B in host memory to the device input vectors in
// device memory
printf("Copy input data from the host memory to the CUDA device\n");
err = hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to copy vector A from host to device (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipMemcpy(d_B, h_B, size, hipMemcpyHostToDevice);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to copy vector B from host to device (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Launch the Vector Add CUDA Kernel
int threadsPerBlock = 256;
int blocksPerGrid =(numElements + threadsPerBlock - 1) / threadsPerBlock;
printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock);
hipLaunchKernelGGL(( vectorAdd), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_A, d_B, d_C, numElements);
err = hipGetLastError();
if (err != hipSuccess)
{
fprintf(stderr, "Failed to launch vectorAdd kernel (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Copy the device result vector in device memory to the host result vector
// in host memory.
printf("Copy output data from the CUDA device to the host memory\n");
err = hipMemcpy(h_C, d_C, size, hipMemcpyDeviceToHost);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to copy vector C from device to host (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Verify that the result vector is correct
for (int i = 0; i < numElements; ++i)
{
if (fabs(h_A[i] + h_B[i] - h_C[i]) > 1e-5)
{
fprintf(stderr, "Result verification failed at element %d!\n", i);
exit(EXIT_FAILURE);
}
}
printf("Test PASSED\n");
// Free device global memory
err = hipFree(d_A);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to free device vector A (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipFree(d_B);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to free device vector B (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipFree(d_C);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to free device vector C (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Free host memory
free(h_A);
free(h_B);
free(h_C);
printf("Done\n");
return 0;
}
| a955b78f05602dcb4c86858df94e473e72e10c43.cu | /**
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/**
* Vector addition: C = A + B.
*
* This sample is a very basic sample that implements element by element
* vector addition. It is the same as the sample illustrating Chapter 2
* of the programming guide with some additions like error checking.
*/
#include <stdio.h>
// For the CUDA runtime routines (prefixed with "cuda_")
#include <cuda_runtime.h>
#include <helper_cuda.h>
/**
* CUDA Kernel Device code
*
* Computes the vector addition of A and B into C. The 3 vectors have the same
* number of elements numElements.
*/
__global__ void
vectorAdd(const float *A, const float *B, float *C, int numElements)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < numElements)
{
C[i] = A[i] + B[i];
}
}
/**
* Host main routine
*/
int
main(void)
{
// Error code to check return values for CUDA calls
cudaError_t err = cudaSuccess;
// Print the vector length to be used, and compute its size
int numElements = 1;
size_t size = numElements * sizeof(float);
printf("[Vector addition of %d elements]\n", numElements);
// Allocate the host input vector A
float *h_A = (float *)malloc(size);
// Allocate the host input vector B
float *h_B = (float *)malloc(size);
// Allocate the host output vector C
float *h_C = (float *)malloc(size);
// Verify that allocations succeeded
if (h_A == NULL || h_B == NULL || h_C == NULL)
{
fprintf(stderr, "Failed to allocate host vectors!\n");
exit(EXIT_FAILURE);
}
// Initialize the host input vectors
for (int i = 0; i < numElements; ++i)
{
h_A[i] = rand()/(float)RAND_MAX;
h_B[i] = rand()/(float)RAND_MAX;
}
// Allocate the device input vector A
float *d_A = NULL;
err = cudaMalloc((void **)&d_A, size);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector A (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Allocate the device input vector B
float *d_B = NULL;
err = cudaMalloc((void **)&d_B, size);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector B (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Allocate the device output vector C
float *d_C = NULL;
err = cudaMalloc((void **)&d_C, size);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector C (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Copy the host input vectors A and B in host memory to the device input vectors in
// device memory
printf("Copy input data from the host memory to the CUDA device\n");
err = cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vector A from host to device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vector B from host to device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Launch the Vector Add CUDA Kernel
int threadsPerBlock = 256;
int blocksPerGrid =(numElements + threadsPerBlock - 1) / threadsPerBlock;
printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock);
vectorAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, numElements);
err = cudaGetLastError();
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to launch vectorAdd kernel (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Copy the device result vector in device memory to the host result vector
// in host memory.
printf("Copy output data from the CUDA device to the host memory\n");
err = cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vector C from device to host (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Verify that the result vector is correct
for (int i = 0; i < numElements; ++i)
{
if (fabs(h_A[i] + h_B[i] - h_C[i]) > 1e-5)
{
fprintf(stderr, "Result verification failed at element %d!\n", i);
exit(EXIT_FAILURE);
}
}
printf("Test PASSED\n");
// Free device global memory
err = cudaFree(d_A);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to free device vector A (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaFree(d_B);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to free device vector B (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaFree(d_C);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to free device vector C (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Free host memory
free(h_A);
free(h_B);
free(h_C);
printf("Done\n");
return 0;
}
|
7d5b08ece3c170a4d9115c4f86a6f0fd0c856df9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/kernels/sgd_kernel.h"
#include "paddle/fluid/framework/mixed_vector.h"
#include "paddle/fluid/operators/amp/fp16_type_traits.h"
#include "paddle/fluid/platform/device/gpu/gpu_primitives.h"
#include "paddle/phi/backends/gpu/gpu_helper.h"
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/core/kernel_registry.h"
namespace phi {
template <typename T, typename MT>
__global__ void SGDKernelMT(const T* param,
const T* grad,
const T* learning_rate,
const int num,
T* param_out,
const MT* master_param,
MT* master_param_out) {
MT lr = static_cast<MT>(learning_rate[0]);
CUDA_KERNEL_LOOP(i, num) {
MT p_data = master_param ? master_param[i] : static_cast<MT>(param[i]);
MT g_data = static_cast<MT>(grad[i]);
p_data = p_data - lr * g_data;
param_out[i] = static_cast<T>(p_data);
if (master_param_out) {
master_param_out[i] = p_data;
}
}
}
template <typename T>
__global__ void SparseSGDFunctorKernel(const T* selected_rows,
const int64_t* rows,
const T* learning_rate,
T* tensor_out,
int64_t row_numel,
int64_t limit) {
for (int64_t i = blockIdx.x; i < limit; i += gridDim.x) {
const T* selected_rows_ptr = selected_rows + i * row_numel;
T* tensor_out_ptr = tensor_out + rows[i] * row_numel;
for (int64_t index = threadIdx.x; index < row_numel; index += blockDim.x) {
// Since index in rows of SelectedRows can be duplicate, we have to use
// Atomic Operation to avoid concurrent write error.
paddle::platform::CudaAtomicAdd(
tensor_out_ptr + index,
-static_cast<T>(1.0) * learning_rate[0] * selected_rows_ptr[index]);
}
}
}
template <typename T, typename Context>
void SGDDenseKernel(const Context& dev_ctx,
const DenseTensor& param,
const DenseTensor& learning_rate,
const DenseTensor& grad,
const paddle::optional<DenseTensor>& master_param,
bool multi_precision,
DenseTensor* param_out,
DenseTensor* master_param_out) {
using MPDType = typename paddle::operators::details::MPTypeTrait<T>::Type;
// do check here
// if (multi_precision) {
// bool has_master =
// ctx.HasInput("MasterParam") && ctx.HasOutput("MasterParamOut");
// }
const MPDType* master_in_data =
multi_precision ? master_param->data<MPDType>() : nullptr;
MPDType* master_out_data =
multi_precision
? master_param_out->mutable_data<MPDType>(dev_ctx.GetPlace())
: nullptr;
int block = 512;
int grid = (param.numel() + block - 1) / block;
hipLaunchKernelGGL(( SGDKernelMT<T, MPDType>), dim3(grid), dim3(block), 0, dev_ctx.stream(),
param.data<T>(),
grad.data<T>(),
learning_rate.data<T>(),
param.numel(),
param_out->mutable_data<T>(dev_ctx.GetPlace()),
master_in_data,
master_out_data);
}
template <typename T, typename Context>
void SGDDenseParamSparseGradKernel(
const Context& dev_ctx,
const DenseTensor& param,
const DenseTensor& learning_rate,
const SelectedRows& grad,
const paddle::optional<DenseTensor>& master_param,
bool multi_precision,
DenseTensor* param_out,
DenseTensor* master_param_out) {
using MPDType = typename paddle::operators::details::MPTypeTrait<T>::Type;
// do some check here
// if (multi_precision) {
// bool has_master =
// ctx.HasInput("MasterParam") && ctx.HasOutput("MasterParamOut");
// }
const MPDType* master_in_data =
multi_precision ? master_param->data<MPDType>() : nullptr;
MPDType* master_out_data =
multi_precision
? master_param_out->mutable_data<MPDType>(dev_ctx.GetPlace())
: nullptr;
PADDLE_ENFORCE_EQ(
¶m,
param_out,
phi::errors::InvalidArgument(
"The input tensor Param of SgdOp should be equal with ParamOut "
"if variable's type is SelectedRows."));
auto in_height = grad.height();
auto out_dims = param_out->dims();
PADDLE_ENFORCE_EQ(in_height,
out_dims[0],
phi::errors::InvalidArgument(
"The input tensor Grad's height of SgdOp should be "
"equal with ParamOut's dims. But received Grad's "
"height [%s] and ParamOut's dims [%s]",
in_height,
out_dims[0]));
auto& in_value = grad.value();
auto& in_rows = grad.rows();
int64_t in_row_numel = in_value.numel() / in_rows.size();
PADDLE_ENFORCE_EQ(in_row_numel,
param_out->numel() / in_height,
phi::errors::InvalidArgument(
"The in_row_numel of SgdOp should be equal with "
"param_out's numel / in_height."));
auto* in_data = in_value.data<T>();
auto* out_data = param_out->data<T>();
const int kThreadsPerBlock = 256;
int thread_x = kThreadsPerBlock;
int max_threads = dev_ctx.GetMaxPhysicalThreadCount();
int max_blocks = ::max(max_threads / kThreadsPerBlock, 1);
paddle::framework::MixVector<int64_t> mixv_in_rows(&in_rows);
hipLaunchKernelGGL(( SparseSGDFunctorKernel), dim3(max_blocks), dim3(thread_x), 0, dev_ctx.stream(),
in_data,
mixv_in_rows.CUDAData(dev_ctx.GetPlace()),
learning_rate.data<T>(),
out_data,
in_row_numel,
in_rows.size());
}
template <typename T, typename Context>
void SGDSparseParamSparseGradKernel(
const Context& dev_ctx,
const SelectedRows& param,
const DenseTensor& learning_rate,
const SelectedRows& grad,
const paddle::optional<SelectedRows>& master_param,
bool multi_precision,
SelectedRows* param_out,
SelectedRows* master_param_out) {
PADDLE_THROW("not impl");
}
} // namespace phi
PD_REGISTER_KERNEL(sgd,
GPU,
ALL_LAYOUT,
phi::SGDDenseKernel,
phi::dtype::float16,
float,
double) {}
PD_REGISTER_KERNEL(sgd_dense_param_sparse_grad,
GPU,
ALL_LAYOUT,
phi::SGDDenseParamSparseGradKernel,
phi::dtype::float16,
float,
double) {}
PD_REGISTER_KERNEL(sgd_sparse_param_sparse_grad,
GPU,
ALL_LAYOUT,
phi::SGDSparseParamSparseGradKernel,
phi::dtype::float16,
float,
double) {}
| 7d5b08ece3c170a4d9115c4f86a6f0fd0c856df9.cu | // Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/kernels/sgd_kernel.h"
#include "paddle/fluid/framework/mixed_vector.h"
#include "paddle/fluid/operators/amp/fp16_type_traits.h"
#include "paddle/fluid/platform/device/gpu/gpu_primitives.h"
#include "paddle/phi/backends/gpu/gpu_helper.h"
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/core/kernel_registry.h"
namespace phi {
template <typename T, typename MT>
__global__ void SGDKernelMT(const T* param,
const T* grad,
const T* learning_rate,
const int num,
T* param_out,
const MT* master_param,
MT* master_param_out) {
MT lr = static_cast<MT>(learning_rate[0]);
CUDA_KERNEL_LOOP(i, num) {
MT p_data = master_param ? master_param[i] : static_cast<MT>(param[i]);
MT g_data = static_cast<MT>(grad[i]);
p_data = p_data - lr * g_data;
param_out[i] = static_cast<T>(p_data);
if (master_param_out) {
master_param_out[i] = p_data;
}
}
}
template <typename T>
__global__ void SparseSGDFunctorKernel(const T* selected_rows,
const int64_t* rows,
const T* learning_rate,
T* tensor_out,
int64_t row_numel,
int64_t limit) {
for (int64_t i = blockIdx.x; i < limit; i += gridDim.x) {
const T* selected_rows_ptr = selected_rows + i * row_numel;
T* tensor_out_ptr = tensor_out + rows[i] * row_numel;
for (int64_t index = threadIdx.x; index < row_numel; index += blockDim.x) {
// Since index in rows of SelectedRows can be duplicate, we have to use
// Atomic Operation to avoid concurrent write error.
paddle::platform::CudaAtomicAdd(
tensor_out_ptr + index,
-static_cast<T>(1.0) * learning_rate[0] * selected_rows_ptr[index]);
}
}
}
template <typename T, typename Context>
void SGDDenseKernel(const Context& dev_ctx,
const DenseTensor& param,
const DenseTensor& learning_rate,
const DenseTensor& grad,
const paddle::optional<DenseTensor>& master_param,
bool multi_precision,
DenseTensor* param_out,
DenseTensor* master_param_out) {
using MPDType = typename paddle::operators::details::MPTypeTrait<T>::Type;
// do check here
// if (multi_precision) {
// bool has_master =
// ctx.HasInput("MasterParam") && ctx.HasOutput("MasterParamOut");
// }
const MPDType* master_in_data =
multi_precision ? master_param->data<MPDType>() : nullptr;
MPDType* master_out_data =
multi_precision
? master_param_out->mutable_data<MPDType>(dev_ctx.GetPlace())
: nullptr;
int block = 512;
int grid = (param.numel() + block - 1) / block;
SGDKernelMT<T, MPDType><<<grid, block, 0, dev_ctx.stream()>>>(
param.data<T>(),
grad.data<T>(),
learning_rate.data<T>(),
param.numel(),
param_out->mutable_data<T>(dev_ctx.GetPlace()),
master_in_data,
master_out_data);
}
template <typename T, typename Context>
void SGDDenseParamSparseGradKernel(
const Context& dev_ctx,
const DenseTensor& param,
const DenseTensor& learning_rate,
const SelectedRows& grad,
const paddle::optional<DenseTensor>& master_param,
bool multi_precision,
DenseTensor* param_out,
DenseTensor* master_param_out) {
using MPDType = typename paddle::operators::details::MPTypeTrait<T>::Type;
// do some check here
// if (multi_precision) {
// bool has_master =
// ctx.HasInput("MasterParam") && ctx.HasOutput("MasterParamOut");
// }
const MPDType* master_in_data =
multi_precision ? master_param->data<MPDType>() : nullptr;
MPDType* master_out_data =
multi_precision
? master_param_out->mutable_data<MPDType>(dev_ctx.GetPlace())
: nullptr;
PADDLE_ENFORCE_EQ(
¶m,
param_out,
phi::errors::InvalidArgument(
"The input tensor Param of SgdOp should be equal with ParamOut "
"if variable's type is SelectedRows."));
auto in_height = grad.height();
auto out_dims = param_out->dims();
PADDLE_ENFORCE_EQ(in_height,
out_dims[0],
phi::errors::InvalidArgument(
"The input tensor Grad's height of SgdOp should be "
"equal with ParamOut's dims. But received Grad's "
"height [%s] and ParamOut's dims [%s]",
in_height,
out_dims[0]));
auto& in_value = grad.value();
auto& in_rows = grad.rows();
int64_t in_row_numel = in_value.numel() / in_rows.size();
PADDLE_ENFORCE_EQ(in_row_numel,
param_out->numel() / in_height,
phi::errors::InvalidArgument(
"The in_row_numel of SgdOp should be equal with "
"param_out's numel / in_height."));
auto* in_data = in_value.data<T>();
auto* out_data = param_out->data<T>();
const int kThreadsPerBlock = 256;
int thread_x = kThreadsPerBlock;
int max_threads = dev_ctx.GetMaxPhysicalThreadCount();
int max_blocks = std::max(max_threads / kThreadsPerBlock, 1);
paddle::framework::MixVector<int64_t> mixv_in_rows(&in_rows);
SparseSGDFunctorKernel<<<max_blocks, thread_x, 0, dev_ctx.stream()>>>(
in_data,
mixv_in_rows.CUDAData(dev_ctx.GetPlace()),
learning_rate.data<T>(),
out_data,
in_row_numel,
in_rows.size());
}
template <typename T, typename Context>
void SGDSparseParamSparseGradKernel(
const Context& dev_ctx,
const SelectedRows& param,
const DenseTensor& learning_rate,
const SelectedRows& grad,
const paddle::optional<SelectedRows>& master_param,
bool multi_precision,
SelectedRows* param_out,
SelectedRows* master_param_out) {
PADDLE_THROW("not impl");
}
} // namespace phi
PD_REGISTER_KERNEL(sgd,
GPU,
ALL_LAYOUT,
phi::SGDDenseKernel,
phi::dtype::float16,
float,
double) {}
PD_REGISTER_KERNEL(sgd_dense_param_sparse_grad,
GPU,
ALL_LAYOUT,
phi::SGDDenseParamSparseGradKernel,
phi::dtype::float16,
float,
double) {}
PD_REGISTER_KERNEL(sgd_sparse_param_sparse_grad,
GPU,
ALL_LAYOUT,
phi::SGDSparseParamSparseGradKernel,
phi::dtype::float16,
float,
double) {}
|
1382b16e51e6856114782c0ab96068236893543d.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <hip/hip_runtime.h>
#include <omp.h>
#define cudaCheck(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true)
{
if (code != hipSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
// CUDA kernel. Each thread takes care of one element of c
__global__ void first(float *input, float *output, int n)
{
int global_tid = blockIdx.x*blockDim.x+threadIdx.x;
int tid = threadIdx.x;
extern __shared__ float temp[];
if (global_tid < n)
temp[threadIdx.x] = input[tid];
else
temp[threadIdx.x] = 0;
for (int d=blockDim.x>>1; d>=1; d>>=1) {
__syncthreads();
if (tid<d) temp[tid] += temp[tid+d];
}
if (tid==0) atomicAdd(output, temp[0]);
}
int main( int argc, char* argv[] )
{
// Size of vectors
int n = 123456789;
//Host vector
float *h_c;
//Device output vector
float *d_c;
float *d_d;
// Size, in bytes, of each vector
size_t bytes = n*sizeof(float);
// Allocate memory on host
h_c = (float*)malloc(bytes);
for (int i = 0; i < n; i++)
h_c[i] = i+1;
//h_c[i] = (float)rand()/(float)(RAND_MAX);
// Allocate memory on GPU
cudaCheck(hipMalloc(&d_c, bytes));
cudaCheck(hipMalloc(&d_d, sizeof(float)));
cudaCheck(hipMemset(d_d, 0, sizeof(float)));
cudaCheck(hipMemcpy(d_c, h_c, bytes, hipMemcpyHostToDevice));
// Copy host vectors to device
int blockSize, gridSize;
// Number of threads in each thread block
blockSize = 1024;
// Number of thread blocks in grid
gridSize = (int)ceil((float)n/blockSize);
double t1 = omp_get_wtime();
// Execute the kernel
hipLaunchKernelGGL(( first), dim3(gridSize), dim3(blockSize), blockSize*sizeof(float), 0, d_c, d_d, n);
// Synchronize
cudaCheck(hipDeviceSynchronize());
double elapsed = omp_get_wtime() - t1;
printf("Time: %f\n", elapsed);
// Copy array back to host
cudaCheck(hipMemcpy( h_c, d_d, sizeof(float), hipMemcpyDeviceToHost ));
// Sum up vector c and print result divided by n, this should equal 1 within error
printf("%f\n", h_c[0]);
// Release device memory
hipFree(d_c);
hipFree(d_d);
// Release host memory
free(h_c);
return 0;
}
| 1382b16e51e6856114782c0ab96068236893543d.cu | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <cuda.h>
#include <omp.h>
#define cudaCheck(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
// CUDA kernel. Each thread takes care of one element of c
__global__ void first(float *input, float *output, int n)
{
int global_tid = blockIdx.x*blockDim.x+threadIdx.x;
int tid = threadIdx.x;
extern __shared__ float temp[];
if (global_tid < n)
temp[threadIdx.x] = input[tid];
else
temp[threadIdx.x] = 0;
for (int d=blockDim.x>>1; d>=1; d>>=1) {
__syncthreads();
if (tid<d) temp[tid] += temp[tid+d];
}
if (tid==0) atomicAdd(output, temp[0]);
}
int main( int argc, char* argv[] )
{
// Size of vectors
int n = 123456789;
//Host vector
float *h_c;
//Device output vector
float *d_c;
float *d_d;
// Size, in bytes, of each vector
size_t bytes = n*sizeof(float);
// Allocate memory on host
h_c = (float*)malloc(bytes);
for (int i = 0; i < n; i++)
h_c[i] = i+1;
//h_c[i] = (float)rand()/(float)(RAND_MAX);
// Allocate memory on GPU
cudaCheck(cudaMalloc(&d_c, bytes));
cudaCheck(cudaMalloc(&d_d, sizeof(float)));
cudaCheck(cudaMemset(d_d, 0, sizeof(float)));
cudaCheck(cudaMemcpy(d_c, h_c, bytes, cudaMemcpyHostToDevice));
// Copy host vectors to device
int blockSize, gridSize;
// Number of threads in each thread block
blockSize = 1024;
// Number of thread blocks in grid
gridSize = (int)ceil((float)n/blockSize);
double t1 = omp_get_wtime();
// Execute the kernel
first<<<gridSize, blockSize, blockSize*sizeof(float)>>>(d_c, d_d, n);
// Synchronize
cudaCheck(cudaDeviceSynchronize());
double elapsed = omp_get_wtime() - t1;
printf("Time: %f\n", elapsed);
// Copy array back to host
cudaCheck(cudaMemcpy( h_c, d_d, sizeof(float), cudaMemcpyDeviceToHost ));
// Sum up vector c and print result divided by n, this should equal 1 within error
printf("%f\n", h_c[0]);
// Release device memory
cudaFree(d_c);
cudaFree(d_d);
// Release host memory
free(h_c);
return 0;
}
|
dd12b3ba9c25b46c058bdf9e780fb537c7072028.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2014
@generated from zlobpcg_shift.cu normal z -> c, Tue Sep 2 12:38:33 2014
*/
#include "common_magma.h"
__global__ void
magma_clobpcg_shift_kernel( magma_int_t num_rows, magma_int_t num_vecs,
magma_int_t shift, magmaFloatComplex *x ){
int idx = threadIdx.x ; // thread in row
int row = blockIdx.y * gridDim.x + blockIdx.x; // global block index
if( row<num_rows){
magmaFloatComplex tmp = x[idx];
__syncthreads();
if( idx > shift-1 ){
idx-=shift;
x[idx] = tmp;
__syncthreads();
}
}
}
/**
Purpose
-------
For a Block-LOBPCG, the set of residuals (entries consecutive in memory)
shrinks and the vectors are shifted in case shift residuals drop below
threshold. The memory layout of x is:
/ x1[0] x2[0] x3[0] \
| x1[1] x2[1] x3[1] |
x = | x1[2] x2[2] x3[2] | = x1[0] x2[0] x3[0] x1[1] x2[1] x3[1] x1[2] .
| x1[3] x2[3] x3[3] |
\ x1[4] x2[4] x3[4] /
Arguments
---------
@param
num_rows magma_int_t
number of rows
@param
num_vecs magma_int_t
number of vectors
@param
shift magma_int_t
shift number
@param
x magmaFloatComplex*
input/output vector x
@ingroup magmasparse_caux
********************************************************************/
extern "C" magma_int_t
magma_clobpcg_shift( magma_int_t num_rows,
magma_int_t num_vecs,
magma_int_t shift,
magmaFloatComplex *x ){
magma_int_t num_threads = num_vecs;
// every thread handles one row containing the
if ( num_threads > 1024 )
printf("error: too many threads requested.\n");
int Ms = num_threads * sizeof( magmaFloatComplex );
if ( Ms > 1024*8 )
printf("error: too much shared memory requested.\n");
dim3 block( num_threads, 1, 1 );
int dimgrid1 = sqrt(num_rows);
int dimgrid2 = (num_rows + dimgrid1 -1 ) / dimgrid1;
dim3 grid( dimgrid1, dimgrid2, 1);
hipLaunchKernelGGL(( magma_clobpcg_shift_kernel), dim3(grid), dim3(block), Ms, magma_stream ,
num_rows, num_vecs, shift, x );
return MAGMA_SUCCESS;
}
| dd12b3ba9c25b46c058bdf9e780fb537c7072028.cu | /*
-- MAGMA (version 1.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2014
@generated from zlobpcg_shift.cu normal z -> c, Tue Sep 2 12:38:33 2014
*/
#include "common_magma.h"
__global__ void
magma_clobpcg_shift_kernel( magma_int_t num_rows, magma_int_t num_vecs,
magma_int_t shift, magmaFloatComplex *x ){
int idx = threadIdx.x ; // thread in row
int row = blockIdx.y * gridDim.x + blockIdx.x; // global block index
if( row<num_rows){
magmaFloatComplex tmp = x[idx];
__syncthreads();
if( idx > shift-1 ){
idx-=shift;
x[idx] = tmp;
__syncthreads();
}
}
}
/**
Purpose
-------
For a Block-LOBPCG, the set of residuals (entries consecutive in memory)
shrinks and the vectors are shifted in case shift residuals drop below
threshold. The memory layout of x is:
/ x1[0] x2[0] x3[0] \
| x1[1] x2[1] x3[1] |
x = | x1[2] x2[2] x3[2] | = x1[0] x2[0] x3[0] x1[1] x2[1] x3[1] x1[2] .
| x1[3] x2[3] x3[3] |
\ x1[4] x2[4] x3[4] /
Arguments
---------
@param
num_rows magma_int_t
number of rows
@param
num_vecs magma_int_t
number of vectors
@param
shift magma_int_t
shift number
@param
x magmaFloatComplex*
input/output vector x
@ingroup magmasparse_caux
********************************************************************/
extern "C" magma_int_t
magma_clobpcg_shift( magma_int_t num_rows,
magma_int_t num_vecs,
magma_int_t shift,
magmaFloatComplex *x ){
magma_int_t num_threads = num_vecs;
// every thread handles one row containing the
if ( num_threads > 1024 )
printf("error: too many threads requested.\n");
int Ms = num_threads * sizeof( magmaFloatComplex );
if ( Ms > 1024*8 )
printf("error: too much shared memory requested.\n");
dim3 block( num_threads, 1, 1 );
int dimgrid1 = sqrt(num_rows);
int dimgrid2 = (num_rows + dimgrid1 -1 ) / dimgrid1;
dim3 grid( dimgrid1, dimgrid2, 1);
magma_clobpcg_shift_kernel<<< grid, block, Ms, magma_stream >>>
( num_rows, num_vecs, shift, x );
return MAGMA_SUCCESS;
}
|
6c527bf0664e8d109c9bd06714eef9a8b0636347.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#if __linux__ && defined(__INTEL_COMPILER)
#define __sync_fetch_and_add(ptr,addend) _InterlockedExchangeAdd(const_cast<void*>(reinterpret_cast<volatile void*>(ptr)), addend)
#endif
#include <string>
#include <cstring>
#include <cctype>
#include <cstdlib>
#include <cstdio>
#include <iostream>
#include <fstream>
#include <bitset>
#include <sys/socket.h>
#include <sys/time.h>
#include <netinet/in.h>
#include <arpa/inet.h>
#include "tbb/concurrent_hash_map.h"
#include "tbb/blocked_range.h"
#include "tbb/parallel_for.h"
#include "tbb/tick_count.h"
#include "tbb/task_scheduler_init.h"
#include "tbb/concurrent_vector.h"
#include "utility.h"
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include "csv.hpp"
#include "timer.h"
using namespace tbb;
using namespace std;
std::vector<string> timestamp;
// data[], size, threads, blocks,
void mergesort(long*, long, dim3, dim3);
// A[]. B[], size, width, slices, nThreads
__global__ void gpu_mergesort(long*, long*, long, long, long, dim3*, dim3*);
__device__ void gpu_bottomUpMerge(long*, long*, long, long, long);
#define min(a, b) (a < b ? a : b)
__global__ void sumArraysOnGPU(long *A, long *B, long *C, long *D, const int N)
{
// extern __shared__ long *shared_data[];
int idx = blockIdx.x * blockDim.x + threadIdx.x;
// int idx = threadIdx.x;
for (int i = 0; i < N; ++i) {
if(A[idx]==A[i])
{
D[idx]++;
}
}
__syncthreads();
for (int i = 0; i < N; ++i) {
if( A[idx]==A[i] && idx > i)
{
C[idx]=0;
}
}
__syncthreads();
}
int main(int argc, char** argv) {
int N = atoi(argv[2]);
dim3 threadsPerBlock;
dim3 blocksPerGrid;
threadsPerBlock.x = 32;
threadsPerBlock.y = 1;
threadsPerBlock.z = 1;
blocksPerGrid.x = 8;
blocksPerGrid.y = 1;
blocksPerGrid.z = 1;
const string csv_file = std::string(argv[1]);
vector<vector<string>> data;
// tm();
Csv objCsv(csv_file);
if (!objCsv.getCsv(data)) {
cout << "read ERROR" << endl;
return 1;
}
long size=atoi(argv[2]);
size_t nBytes = size * sizeof(long);
long *data2;
data2 = (long *)malloc(nBytes);
size_t ullBytes = size * sizeof(unsigned long long);
unsigned long long *data3;
data3 = (unsigned long long *)malloc(ullBytes);
long *value;
value = (long *)malloc(nBytes);
long *value2;
value2 = (long *)malloc(nBytes);
// counter = 0;
for (unsigned int row = 0; row < data.size(); row++) {
vector<string> rec = data[row];
std::string tms = rec[0];
for(size_t c = tms.find_first_of("\""); c != string::npos; c = c = tms.find_first_of("\"")){
tms.erase(c,1);
}
for(size_t c = tms.find_first_of("/"); c != string::npos; c = c = tms.find_first_of("/")){
tms.erase(c,1);
}
for(size_t c = tms.find_first_of("."); c != string::npos; c = c = tms.find_first_of(".")){
tms.erase(c,1);
}
for(size_t c = tms.find_first_of(" "); c != string::npos; c = c = tms.find_first_of(" ")){
tms.erase(c,1);
}
for(size_t c = tms.find_first_of(":"); c != string::npos; c = c = tms.find_first_of(":")){
tms.erase(c,1);
}
data2[row] = stol(tms);
value[row] = 1;
}
for(int i = 0; i < 5; i++)
cout << data2[i] << endl;
std::cout << "sorting " << size << " numbers\n\n";
// merge-sort the data
mergesort(data2, size, threadsPerBlock, blocksPerGrid);
/*
for(int i = 0; i < 12; i++)
cout << data2[i] << "," << value[i] << endl;
*/
long *d_A;
long *d_B;
hipMalloc((long**)&d_A, nBytes);
hipMalloc((long**)&d_B, nBytes);
hipMemcpy(d_A, data2, nBytes, hipMemcpyHostToDevice);
hipMemcpy(d_B, value, nBytes, hipMemcpyHostToDevice);
long *d_new_key, *d_new_value;
hipMalloc((long**)&d_new_key, nBytes);
hipMalloc((long**)&d_new_value, nBytes);
hipMemcpy(d_new_key, data2, nBytes, hipMemcpyHostToDevice);
int iLen = 1024;
dim3 block (iLen);
dim3 grid ((N + block.x - 1) / block.x);
hipLaunchKernelGGL(( sumArraysOnGPU), dim3(grid), dim3(block), 0, 0, d_A, d_B, d_new_key, d_new_value, size);
long *gpuRef, *gpuRef2;
gpuRef = (long *)malloc(nBytes);
hipMemcpy(gpuRef, d_new_key, nBytes, hipMemcpyDeviceToHost);
gpuRef2 = (long *)malloc(nBytes);
hipMemcpy(gpuRef2, d_new_value, nBytes, hipMemcpyDeviceToHost);
for(int i = 0; i < 100; i++)
{
if(gpuRef[i] != 0)
cout << gpuRef[i]<< "," << gpuRef2[i] << endl;
}
}
void mergesort(long* data, long size, dim3 threadsPerBlock, dim3 blocksPerGrid) {
long* D_data;
long* D_swp;
dim3* D_threads;
dim3* D_blocks;
hipMalloc((void**) &D_data, size * sizeof(long));
hipMalloc((void**) &D_swp, size * sizeof(long));
hipMemcpy(D_data, data, size * sizeof(long), hipMemcpyHostToDevice);
//
hipMalloc((void**) &D_threads, sizeof(dim3));
hipMalloc((void**) &D_blocks, sizeof(dim3));
hipMemcpy(D_threads, &threadsPerBlock, sizeof(dim3), hipMemcpyHostToDevice);
hipMemcpy(D_blocks, &blocksPerGrid, sizeof(dim3), hipMemcpyHostToDevice);
long* A = D_data;
long* B = D_swp;
long nThreads = threadsPerBlock.x * threadsPerBlock.y * threadsPerBlock.z *
blocksPerGrid.x * blocksPerGrid.y * blocksPerGrid.z;
for (int width = 2; width < (size << 1); width <<= 1) {
long slices = size / ((nThreads) * width) + 1;
std::cout << "mergeSort - width: " << width
<< ", slices: " << slices
<< ", nThreads: " << nThreads << '\n';
hipLaunchKernelGGL(( gpu_mergesort), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, A, B, size, width, slices, D_threads, D_blocks);
A = A == D_data ? D_swp : D_data;
B = B == D_data ? D_swp : D_data;
}
hipMemcpy(data, A, size * sizeof(long), hipMemcpyDeviceToHost);
hipFree(A);
hipFree(B);
}
__device__ unsigned int getIdx(dim3* threads, dim3* blocks) {
int x;
return threadIdx.x +
threadIdx.y * (x = threads->x) +
threadIdx.z * (x *= threads->y) +
blockIdx.x * (x *= threads->z) +
blockIdx.y * (x *= blocks->z) +
blockIdx.z * (x *= blocks->y);
}
__global__ void gpu_mergesort(long* source, long* dest, long size, long width, long slices, dim3* threads, dim3* blocks) {
unsigned int idx = getIdx(threads, blocks);
long start = width*idx*slices,
middle,
end;
for (long slice = 0; slice < slices; slice++) {
if (start >= size)
break;
middle = min(start + (width >> 1), size);
end = min(start + width, size);
gpu_bottomUpMerge(source, dest, start, middle, end);
start += width;
}
}
//
// Finally, sort something
// gets called by gpu_mergesort() for each slice
//
__device__ void gpu_bottomUpMerge(long* source, long* dest, long start, long middle, long end) {
long i = start;
long j = middle;
for (long k = start; k < end; k++) {
if (i < middle && (j >= end || source[i] < source[j])) {
dest[k] = source[i];
i++;
} else {
dest[k] = source[j];
j++;
}
}
}
// read data into a minimal linked list
typedef struct {
int v;
void* next;
} LinkNode;
// helper function for reading numbers from stdin
// it's 'optimized' not to check validity of the characters it reads in..
long readList(long** list) {
//tm();
long v, size = 0;
LinkNode* node = 0;
LinkNode* first = 0;
while (std::cin >> v) {
LinkNode* next = new LinkNode();
next->v = v;
if (node)
node->next = next;
else
first = next;
node = next;
size++;
}
if (size) {
*list = new long[size];
LinkNode* node = first;
long i = 0;
while (node) {
(*list)[i++] = node->v;
node = (LinkNode*) node->next;
}
}
//std::cout << "read stdin: " << tm() << " microseconds\n";
return size;
}
//
// Get the time (in microseconds) since the last call to tm();
// the first value returned by this must not be trusted
//
timeval tStart;
int tm() {
timeval tEnd;
gettimeofday(&tEnd, 0);
int t = (tEnd.tv_sec - tStart.tv_sec) * 1000000 + tEnd.tv_usec - tStart.tv_usec;
tStart = tEnd;
return t;
}
| 6c527bf0664e8d109c9bd06714eef9a8b0636347.cu | #if __linux__ && defined(__INTEL_COMPILER)
#define __sync_fetch_and_add(ptr,addend) _InterlockedExchangeAdd(const_cast<void*>(reinterpret_cast<volatile void*>(ptr)), addend)
#endif
#include <string>
#include <cstring>
#include <cctype>
#include <cstdlib>
#include <cstdio>
#include <iostream>
#include <fstream>
#include <bitset>
#include <sys/socket.h>
#include <sys/time.h>
#include <netinet/in.h>
#include <arpa/inet.h>
#include "tbb/concurrent_hash_map.h"
#include "tbb/blocked_range.h"
#include "tbb/parallel_for.h"
#include "tbb/tick_count.h"
#include "tbb/task_scheduler_init.h"
#include "tbb/concurrent_vector.h"
#include "utility.h"
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include "csv.hpp"
#include "timer.h"
using namespace tbb;
using namespace std;
std::vector<string> timestamp;
// data[], size, threads, blocks,
void mergesort(long*, long, dim3, dim3);
// A[]. B[], size, width, slices, nThreads
__global__ void gpu_mergesort(long*, long*, long, long, long, dim3*, dim3*);
__device__ void gpu_bottomUpMerge(long*, long*, long, long, long);
#define min(a, b) (a < b ? a : b)
__global__ void sumArraysOnGPU(long *A, long *B, long *C, long *D, const int N)
{
// extern __shared__ long *shared_data[];
int idx = blockIdx.x * blockDim.x + threadIdx.x;
// int idx = threadIdx.x;
for (int i = 0; i < N; ++i) {
if(A[idx]==A[i])
{
D[idx]++;
}
}
__syncthreads();
for (int i = 0; i < N; ++i) {
if( A[idx]==A[i] && idx > i)
{
C[idx]=0;
}
}
__syncthreads();
}
int main(int argc, char** argv) {
int N = atoi(argv[2]);
dim3 threadsPerBlock;
dim3 blocksPerGrid;
threadsPerBlock.x = 32;
threadsPerBlock.y = 1;
threadsPerBlock.z = 1;
blocksPerGrid.x = 8;
blocksPerGrid.y = 1;
blocksPerGrid.z = 1;
const string csv_file = std::string(argv[1]);
vector<vector<string>> data;
// tm();
Csv objCsv(csv_file);
if (!objCsv.getCsv(data)) {
cout << "read ERROR" << endl;
return 1;
}
long size=atoi(argv[2]);
size_t nBytes = size * sizeof(long);
long *data2;
data2 = (long *)malloc(nBytes);
size_t ullBytes = size * sizeof(unsigned long long);
unsigned long long *data3;
data3 = (unsigned long long *)malloc(ullBytes);
long *value;
value = (long *)malloc(nBytes);
long *value2;
value2 = (long *)malloc(nBytes);
// counter = 0;
for (unsigned int row = 0; row < data.size(); row++) {
vector<string> rec = data[row];
std::string tms = rec[0];
for(size_t c = tms.find_first_of("\""); c != string::npos; c = c = tms.find_first_of("\"")){
tms.erase(c,1);
}
for(size_t c = tms.find_first_of("/"); c != string::npos; c = c = tms.find_first_of("/")){
tms.erase(c,1);
}
for(size_t c = tms.find_first_of("."); c != string::npos; c = c = tms.find_first_of(".")){
tms.erase(c,1);
}
for(size_t c = tms.find_first_of(" "); c != string::npos; c = c = tms.find_first_of(" ")){
tms.erase(c,1);
}
for(size_t c = tms.find_first_of(":"); c != string::npos; c = c = tms.find_first_of(":")){
tms.erase(c,1);
}
data2[row] = stol(tms);
value[row] = 1;
}
for(int i = 0; i < 5; i++)
cout << data2[i] << endl;
std::cout << "sorting " << size << " numbers\n\n";
// merge-sort the data
mergesort(data2, size, threadsPerBlock, blocksPerGrid);
/*
for(int i = 0; i < 12; i++)
cout << data2[i] << "," << value[i] << endl;
*/
long *d_A;
long *d_B;
cudaMalloc((long**)&d_A, nBytes);
cudaMalloc((long**)&d_B, nBytes);
cudaMemcpy(d_A, data2, nBytes, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, value, nBytes, cudaMemcpyHostToDevice);
long *d_new_key, *d_new_value;
cudaMalloc((long**)&d_new_key, nBytes);
cudaMalloc((long**)&d_new_value, nBytes);
cudaMemcpy(d_new_key, data2, nBytes, cudaMemcpyHostToDevice);
int iLen = 1024;
dim3 block (iLen);
dim3 grid ((N + block.x - 1) / block.x);
sumArraysOnGPU<<<grid, block>>>(d_A, d_B, d_new_key, d_new_value, size);
long *gpuRef, *gpuRef2;
gpuRef = (long *)malloc(nBytes);
cudaMemcpy(gpuRef, d_new_key, nBytes, cudaMemcpyDeviceToHost);
gpuRef2 = (long *)malloc(nBytes);
cudaMemcpy(gpuRef2, d_new_value, nBytes, cudaMemcpyDeviceToHost);
for(int i = 0; i < 100; i++)
{
if(gpuRef[i] != 0)
cout << gpuRef[i]<< "," << gpuRef2[i] << endl;
}
}
void mergesort(long* data, long size, dim3 threadsPerBlock, dim3 blocksPerGrid) {
long* D_data;
long* D_swp;
dim3* D_threads;
dim3* D_blocks;
cudaMalloc((void**) &D_data, size * sizeof(long));
cudaMalloc((void**) &D_swp, size * sizeof(long));
cudaMemcpy(D_data, data, size * sizeof(long), cudaMemcpyHostToDevice);
//
cudaMalloc((void**) &D_threads, sizeof(dim3));
cudaMalloc((void**) &D_blocks, sizeof(dim3));
cudaMemcpy(D_threads, &threadsPerBlock, sizeof(dim3), cudaMemcpyHostToDevice);
cudaMemcpy(D_blocks, &blocksPerGrid, sizeof(dim3), cudaMemcpyHostToDevice);
long* A = D_data;
long* B = D_swp;
long nThreads = threadsPerBlock.x * threadsPerBlock.y * threadsPerBlock.z *
blocksPerGrid.x * blocksPerGrid.y * blocksPerGrid.z;
for (int width = 2; width < (size << 1); width <<= 1) {
long slices = size / ((nThreads) * width) + 1;
std::cout << "mergeSort - width: " << width
<< ", slices: " << slices
<< ", nThreads: " << nThreads << '\n';
gpu_mergesort<<<blocksPerGrid, threadsPerBlock>>>(A, B, size, width, slices, D_threads, D_blocks);
A = A == D_data ? D_swp : D_data;
B = B == D_data ? D_swp : D_data;
}
cudaMemcpy(data, A, size * sizeof(long), cudaMemcpyDeviceToHost);
cudaFree(A);
cudaFree(B);
}
__device__ unsigned int getIdx(dim3* threads, dim3* blocks) {
int x;
return threadIdx.x +
threadIdx.y * (x = threads->x) +
threadIdx.z * (x *= threads->y) +
blockIdx.x * (x *= threads->z) +
blockIdx.y * (x *= blocks->z) +
blockIdx.z * (x *= blocks->y);
}
__global__ void gpu_mergesort(long* source, long* dest, long size, long width, long slices, dim3* threads, dim3* blocks) {
unsigned int idx = getIdx(threads, blocks);
long start = width*idx*slices,
middle,
end;
for (long slice = 0; slice < slices; slice++) {
if (start >= size)
break;
middle = min(start + (width >> 1), size);
end = min(start + width, size);
gpu_bottomUpMerge(source, dest, start, middle, end);
start += width;
}
}
//
// Finally, sort something
// gets called by gpu_mergesort() for each slice
//
__device__ void gpu_bottomUpMerge(long* source, long* dest, long start, long middle, long end) {
long i = start;
long j = middle;
for (long k = start; k < end; k++) {
if (i < middle && (j >= end || source[i] < source[j])) {
dest[k] = source[i];
i++;
} else {
dest[k] = source[j];
j++;
}
}
}
// read data into a minimal linked list
typedef struct {
int v;
void* next;
} LinkNode;
// helper function for reading numbers from stdin
// it's 'optimized' not to check validity of the characters it reads in..
long readList(long** list) {
//tm();
long v, size = 0;
LinkNode* node = 0;
LinkNode* first = 0;
while (std::cin >> v) {
LinkNode* next = new LinkNode();
next->v = v;
if (node)
node->next = next;
else
first = next;
node = next;
size++;
}
if (size) {
*list = new long[size];
LinkNode* node = first;
long i = 0;
while (node) {
(*list)[i++] = node->v;
node = (LinkNode*) node->next;
}
}
//std::cout << "read stdin: " << tm() << " microseconds\n";
return size;
}
//
// Get the time (in microseconds) since the last call to tm();
// the first value returned by this must not be trusted
//
timeval tStart;
int tm() {
timeval tEnd;
gettimeofday(&tEnd, 0);
int t = (tEnd.tv_sec - tStart.tv_sec) * 1000000 + tEnd.tv_usec - tStart.tv_usec;
tStart = tEnd;
return t;
}
|
c687cafcc47be01dfd3fb1b17db2fa1bb8824000.hip | // !!! This is a file automatically generated by hipify!!!
// This is the REAL "hello world" for CUDA!
// It takes the string "Hello ", prints it, then passes it to CUDA with an array
// of offsets. Then the offsets are added in parallel to produce the string "World!"
// By Ingemar Ragnemalm 2010
#include <stdio.h>
#include <stdint.h>
#include <hip/hip_runtime.h>
#include "device_launch_parameters.h"
#include "hip/device_functions.h"
#include <vector>
#include <thrust/device_vector.h>
#include <thrust/transform.h>
#include <thrust/sort.h>
typedef std::pair<std::pair<uint64_t, uint64_t>, std::pair<uint64_t, uint64_t> >
wide_as_pair;
typedef std::pair<wide_as_pair, std::vector<uint32_t> >
point_idx_pair;
typedef thrust::pair<thrust::pair<uint64_t, uint64_t>, thrust::pair<uint64_t, uint64_t> >
wide_as_pair_GPU;
struct bigint_t
{
uint32_t limbs[8];
};
struct halfbigint_t
{
uint32_t limbs[4];
};
/*
((uint64_t)roundInfo->c[3] << 32) + roundInfo->c[2],
((uint64_t)roundInfo->c[1] << 32) + roundInfo->c[0],
std::make_pair(
std::make_pair(
((uint64_t)point_preload.limbs[7] << 32) + point_preload.limbs[6],
((uint64_t)point_preload.limbs[5] << 32) + point_preload.limbs[4]
),
std::make_pair(
((uint64_t)point_preload.limbs[3] << 32) + point_preload.limbs[2],
((uint64_t)point_preload.limbs[1] << 32) + point_preload.limbs[0]
*/
/*! Add together two n-limb numbers, returning the carry limb.
\note the output can also be one of the inputs
*/
__device__
uint32_t wide_add_GPU(unsigned n, uint32_t *res, const uint32_t *a, const uint32_t *b)
{
uint64_t carry=0;
for(unsigned i=0;i<n;i++){
uint64_t tmp=uint64_t(a[i])+b[i]+carry;
res[i]=uint32_t(tmp&0xFFFFFFFFULL);
carry=tmp>>32;
}
return carry;
}
/*! Add a single limb to an n-limb number, returning the carry limb
\note the output can also be the input
*/
__device__
uint32_t wide_add_GPU(unsigned n, uint32_t *res, const uint32_t *a, uint32_t b)
{
uint64_t carry=b;
for(unsigned i=0;i<n;i++){
uint64_t tmp=a[i]+carry;
res[i]=uint32_t(tmp&0xFFFFFFFFULL);
carry=tmp>>32;
}
return carry;
}
/*! Multiply two n-limb numbers to produce a 2n-limb result
\note All the integers must be distinct, the output cannot overlap the input */
__device__
void wide_mul_GPU(unsigned n, uint32_t *res_hi, uint32_t *res_lo, const uint32_t *a, const uint32_t *b)
{
//assert(res_hi!=a && res_hi!=b);
//assert(res_lo!=a && res_lo!=b);
uint64_t carry=0, acc=0;
for(unsigned i=0; i<n; i++){
for(unsigned j=0; j<=i; j++){
//assert( (j+(i-j))==i );
uint64_t tmp=uint64_t(a[j])*b[i-j];
acc+=tmp;
if(acc < tmp)
carry++;
//fprintf(stderr, " (%d,%d)", j,i-j);
}
res_lo[i]=uint32_t(acc&0xFFFFFFFFull);
//fprintf(stderr, "\n %d : %u\n", i, res_lo[i]);
acc= (carry<<32) | (acc>>32);
carry=carry>>32;
}
for(unsigned i=1; i<n; i++){
for(unsigned j=i; j<n; j++){
uint64_t tmp=uint64_t(a[j])*b[n-j+i-1];
acc+=tmp;
if(acc < tmp)
carry++;
//fprintf(stderr, " (%d,%d)", j,n-j+i-1);
//assert( (j+(n-j))==n+i );
}
res_hi[i-1]=uint32_t(acc&0xFFFFFFFFull);
//fprintf(stderr, "\n %d : %u\n", i+n-1, res_hi[i-1]);
acc= (carry<<32) | (acc>>32);
carry=carry>>32;
}
res_hi[n-1]=acc;
}
struct genpoint
{
uint32_t point_preload[8];
const unsigned numsteps;
const unsigned diff;
uint32_t c[4];
genpoint(const uint32_t* const ppre_in, unsigned ns_in, const uint32_t* const c_in, unsigned diff_in) :
numsteps(ns_in), diff(diff_in)
{
for (int i = 0; i < 8; i++)
{
point_preload[i] = ppre_in[i];
}
for (int i = 0; i < 4; i++)
{
c[i] = c_in[i];
}
}
__device__
wide_as_pair_GPU operator()(uint32_t idx){
uint32_t point[2][8];
for (int isdiff = 0; isdiff <= 1 ; isdiff++)
{
point[isdiff][0] = idx + isdiff*diff;
for (int i = 1; i < 8; i++)
{
point[isdiff][i] = point_preload[i];
}
for (int i = 0; i < numsteps; i++)
{
bigint_t tmp;
// tmp=lo(x)*c;
wide_mul_GPU(4, tmp.limbs+4, tmp.limbs, point[isdiff], c);
// [carry,lo(x)] = lo(tmp)+hi(x)
uint32_t carry=wide_add_GPU(4, point[isdiff], tmp.limbs, point[isdiff]+4);
// hi(x) = hi(tmp) + carry
wide_add_GPU(4, point[isdiff]+4, tmp.limbs+4, carry);
// overall: tmp=lo(x)*c; x=tmp+hi(x)
}
}
uint32_t mpoint[8];
for (int i = 0; i < 8; i++)
{
mpoint[i] = point[0][i] ^ point[1][i];
}
return thrust::make_pair(
thrust::make_pair(
((uint64_t)mpoint[7] << 32) + mpoint[6],
((uint64_t)mpoint[5] << 32) + mpoint[4]
),
thrust::make_pair(
((uint64_t)mpoint[3] << 32) + mpoint[2],
((uint64_t)mpoint[1] << 32) + mpoint[0]
));
}
};
//modified from https://devtalk.nvidia.com/default/topic/610914/cuda-programming-and-performance/modular-exponentiation-amp-biginteger/
__device__ __forceinline__ bigint_t add256 (bigint_t a, bigint_t b)
{
bigint_t res;
asm ("{\n\t"
"add.cc.u32 %0, %8, %16; \n\t"
"addc.cc.u32 %1, %9, %17; \n\t"
"addc.cc.u32 %2, %10, %18; \n\t"
"addc.cc.u32 %3, %11, %19; \n\t"
"addc.cc.u32 %4, %12, %20; \n\t"
"addc.cc.u32 %5, %13, %21; \n\t"
"addc.cc.u32 %6, %14, %22; \n\t"
"addc.u32 %7, %15, %23; \n\t"
"}"
: "=r"(res.limbs[0]), "=r"(res.limbs[1]), "=r"(res.limbs[2]), "=r"(res.limbs[3]), "=r"(res.limbs[4]), "=r"(res.limbs[5]), "=r"(res.limbs[6]), "=r"(res.limbs[7])
: "r"(a.limbs[0]), "r"(a.limbs[1]), "r"(a.limbs[2]), "r"(a.limbs[3]),"r"(a.limbs[4]), "r"(a.limbs[5]), "r"(a.limbs[6]), "r"(a.limbs[7]),
"r"(b.limbs[0]), "r"(b.limbs[1]), "r"(b.limbs[2]), "r"(b.limbs[3]),"r"(b.limbs[4]), "r"(b.limbs[5]), "r"(b.limbs[6]), "r"(b.limbs[7]));
return res;
}
//modified from https://devtalk.nvidia.com/default/topic/610914/cuda-programming-and-performance/modular-exponentiation-amp-biginteger/
__device__ __forceinline__ bigint_t add256 (bigint_t a, halfbigint_t b)
{
bigint_t res;
asm ("{\n\t"
"add.cc.u32 %0, %8, %16; \n\t"
"addc.cc.u32 %1, %9, %17; \n\t"
"addc.cc.u32 %2, %10, %18; \n\t"
"addc.cc.u32 %3, %11, %19; \n\t"
"addc.u32 %4, %12, 0; \n\t"
//"addc.cc.u32 %5, %13, %21; \n\t"
"mov.u32 %5, %13 ; \n\t"
//"addc.cc.u32 %6, %14, %22; \n\t"
"mov.u32 %6, %14 ; \n\t"
//"addc.u32 %7, %15, %23; \n\t"
"mov.u32 %7, %15 ; \n\t"
"}"
: "=r"(res.limbs[0]), "=r"(res.limbs[1]), "=r"(res.limbs[2]), "=r"(res.limbs[3]), "=r"(res.limbs[4]), "=r"(res.limbs[5]), "=r"(res.limbs[6]), "=r"(res.limbs[7])
: "r"(a.limbs[0]), "r"(a.limbs[1]), "r"(a.limbs[2]), "r"(a.limbs[3]),"r"(a.limbs[4]), "r"(a.limbs[5]), "r"(a.limbs[6]), "r"(a.limbs[7]),
"r"(b.limbs[0]), "r"(b.limbs[1]), "r"(b.limbs[2]), "r"(b.limbs[3]));
return res;
}
//modified from https://devtalk.nvidia.com/default/topic/610914/cuda-programming-and-performance/modular-exponentiation-amp-biginteger/
__device__ __forceinline__ bigint_t umul256 (bigint_t a, bigint_t b)
{
bigint_t res;
//top 12 13 14 15, 20 21 22 23
asm ("{\n\t"
"mul.lo.u32 %0, %8, %16; \n\t"
"mul.hi.u32 %1, %8, %16; \n\t"
"mad.lo.cc.u32 %1, %8, %17, %1;\n\t"
"madc.hi.u32 %2, %8, %17, 0;\n\t"
"mad.lo.cc.u32 %1, %9, %16, %1;\n\t"
"madc.hi.cc.u32 %2, %9, %16, %2;\n\t"
"madc.hi.u32 %3, %8, %18, 0;\n\t"
"mad.lo.cc.u32 %2, %8, %18, %2;\n\t"
"madc.hi.cc.u32 %3, %9, %17, %3;\n\t"
"madc.hi.u32 %4, %8, %19, 0;\n\t"
"mad.lo.cc.u32 %2, %9, %17, %2;\n\t"
"madc.hi.cc.u32 %3, %10, %16, %3;\n\t"
"madc.hi.cc.u32 %4, %9, %18, %4;\n\t"
"madc.hi.u32 %5, %8, %20, 0;\n\t" // Uses top
"mad.lo.cc.u32 %2, %10, %16, %2;\n\t"
"madc.lo.cc.u32 %3, %8, %19, %3;\n\t"
"madc.hi.cc.u32 %4, %10, %17, %4;\n\t"
"madc.hi.cc.u32 %5, %9, %19, %5;\n\t"
"madc.hi.u32 %6, %8, %21, 0;\n\t" // Uses top
"mad.lo.cc.u32 %3, %9, %18, %3;\n\t"
"madc.hi.cc.u32 %4, %11, %16, %4;\n\t"
"madc.hi.cc.u32 %5, %10, %18, %5;\n\t"
"madc.hi.cc.u32 %6, %9, %20, %6;\n\t" // Uses top
"madc.hi.u32 %7, %8, %22, 0;\n\t" // Uses top
"mad.lo.cc.u32 %3, %10, %17, %3;\n\t"
"madc.lo.cc.u32 %4, %8, %20, %4;\n\t" // Uses top
"madc.hi.cc.u32 %5, %11, %17, %5;\n\t"
"madc.hi.cc.u32 %6, %10, %19, %6;\n\t"
"madc.hi.u32 %7, %9, %21, %7;\n\t" // Uses top
"mad.lo.cc.u32 %3, %11, %16, %3;\n\t"
"madc.lo.cc.u32 %4, %9, %19, %4;\n\t"
"madc.hi.cc.u32 %5, %12, %16, %5;\n\t" // Uses top
"madc.hi.cc.u32 %6, %11, %18, %6;\n\t"
"madc.hi.u32 %7, %10, %20, %7;\n\t" // Uses top
"mad.lo.cc.u32 %4, %10, %18, %4;\n\t"
"madc.lo.cc.u32 %5, %8, %21, %5;\n\t" // Uses top
"madc.hi.cc.u32 %6, %12, %17, %6;\n\t" // Uses top
"madc.hi.u32 %7, %11, %19, %7;\n\t"
"mad.lo.cc.u32 %4, %11, %17, %4;\n\t"
"madc.lo.cc.u32 %5, %9, %20, %5;\n\t" // Uses top
"madc.hi.cc.u32 %6, %13, %16, %6;\n\t" // Uses top
"madc.hi.u32 %7, %12, %18, %7;\n\t" // Uses top
"mad.lo.cc.u32 %4, %12, %16, %4;\n\t" // Uses top
"madc.lo.cc.u32 %5, %10, %19, %5;\n\t"
"madc.lo.cc.u32 %6, %8, %22, %6;\n\t" // Uses top
"madc.hi.u32 %7, %13, %17, %7;\n\t" // Uses top
"mad.lo.cc.u32 %5, %11, %18, %5;\n\t"
"madc.lo.cc.u32 %6, %9, %21, %6;\n\t" // Uses top
"madc.hi.u32 %7, %14, %16, %7;\n\t" // Uses top
"mad.lo.cc.u32 %5, %12, %17, %5;\n\t" // Uses top
"madc.lo.cc.u32 %6, %10, %20, %6;\n\t" // Uses top
"madc.lo.u32 %7, %8, %23, %7;\n\t" // Uses top
"mad.lo.cc.u32 %5, %13, %16, %5;\n\t" // Uses top
"madc.lo.cc.u32 %6, %11, %19, %6;\n\t"
"madc.lo.u32 %7, %9, %22, %7;\n\t" // Uses top
"mad.lo.cc.u32 %6, %12, %18, %6;\n\t" // Uses top
"madc.lo.u32 %7, %10, %21, %7;\n\t" // Uses top
"mad.lo.cc.u32 %6, %13, %17, %6;\n\t" // Uses top
"madc.lo.u32 %7, %11, %20, %7;\n\t" // Uses top
"mad.lo.cc.u32 %6, %14, %16, %6;\n\t" // Uses top
"madc.lo.u32 %7, %12, %19, %7;\n\t" // Uses top
"mad.lo.u32 %7, %13, %18, %7;\n\t" // Uses top
"mad.lo.u32 %7, %14, %17, %7;\n\t" // Uses top
"mad.lo.u32 %7, %15, %16, %7;\n\t" // Uses top
"}"
: "=r"(res.limbs[0]), "=r"(res.limbs[1]), "=r"(res.limbs[2]), "=r"(res.limbs[3]), "=r"(res.limbs[4]), "=r"(res.limbs[5]), "=r"(res.limbs[6]), "=r"(res.limbs[7])
: "r"(a.limbs[0]), "r"(a.limbs[1]), "r"(a.limbs[2]), "r"(a.limbs[3]),"r"(a.limbs[4]), "r"(a.limbs[5]), "r"(a.limbs[6]), "r"(a.limbs[7]),
"r"(b.limbs[0]), "r"(b.limbs[1]), "r"(b.limbs[2]), "r"(b.limbs[3]),"r"(b.limbs[4]), "r"(b.limbs[5]), "r"(b.limbs[6]), "r"(b.limbs[7]));
return res;
}
//modified from https://devtalk.nvidia.com/default/topic/610914/cuda-programming-and-performance/modular-exponentiation-amp-biginteger/
__device__ __forceinline__ bigint_t umul_256_128in (halfbigint_t a, halfbigint_t b)
{
bigint_t res;
//old top 12 13 14 15, 20 21 22 23
asm ("{\n\t"
"mul.lo.u32 %0, %8, %12; \n\t"
"mul.hi.u32 %1, %8, %12; \n\t"
"mad.lo.cc.u32 %1, %8, %13, %1;\n\t"
"madc.hi.u32 %2, %8, %13, 0;\n\t"
"mad.lo.cc.u32 %1, %9, %12, %1;\n\t"
"madc.hi.cc.u32 %2, %9, %12, %2;\n\t"
"madc.hi.u32 %3, %8, %14, 0;\n\t"
"mad.lo.cc.u32 %2, %8, %14, %2;\n\t"
"madc.hi.cc.u32 %3, %9, %13, %3;\n\t"
"madc.hi.u32 %4, %8, %15, 0;\n\t"
"mad.lo.cc.u32 %2, %9, %13, %2;\n\t"
"madc.hi.cc.u32 %3, %10, %12, %3;\n\t"
"madc.hi.cc.u32 %4, %9, %14, %4;\n\t"
//"madc.hi.u32 %5, %8, 0, 0;\n\t" // Uses top
"addc.u32 %5,0,0;\n\t"
"mad.lo.cc.u32 %2, %10, %12, %2;\n\t"
"madc.lo.cc.u32 %3, %8, %15, %3;\n\t"
"madc.hi.cc.u32 %4, %10, %13, %4;\n\t"
"madc.hi.cc.u32 %5, %9, %15, %5;\n\t"
//"madc.hi.u32 %6, %8, 0, 0;\n\t" // Uses top
"addc.u32 %6,0,0;\n\t"
"mad.lo.cc.u32 %3, %9, %14, %3;\n\t"
"madc.hi.cc.u32 %4, %11, %12, %4;\n\t"
"madc.hi.cc.u32 %5, %10, %14, %5;\n\t"
//"madc.hi.cc.u32 %6, %9, 0, 0;\n\t" // Uses top
"addc.u32 %6,%6,0;\n\t"
//"madc.hi.u32 %7, %8, 0, 0;\n\t" // Uses top
"mad.lo.cc.u32 %3, %10, %13, %3;\n\t"
//"madc.lo.cc.u32 %4, %8, 0, %4;\n\t" // Uses top
"addc.u32 %4,%4,0;\n\t"
"mad.hi.cc.u32 %5, %11, %13, %5;\n\t"
"madc.hi.cc.u32 %6, %10, %15, %6;\n\t"
//"madc.hi.u32 %7, %9, 0, %7;\n\t" // Uses top
"addc.u32 %7,0,0;\n\t"
"mad.lo.cc.u32 %3, %11, %12, %3;\n\t"
"madc.lo.cc.u32 %4, %9, %15, %4;\n\t"
//"madc.hi.cc.u32 %5, 0, %12, %5;\n\t" // Uses top
"addc.u32 %5,%5,0;\n\t"
"mad.hi.cc.u32 %6, %11, %14, %6;\n\t"
//"madc.hi.u32 %7, %10, 0, %7;\n\t" // Uses top
"addc.u32 %7,%7,0;\n\t"
"mad.lo.cc.u32 %4, %10, %14, %4;\n\t"
//"madc.lo.cc.u32 %5, %8, 0, %5;\n\t" // Uses top
"addc.u32 %5,%5,0;\n\t"
//"madc.hi.cc.u32 %6, 0, %13, %6;\n\t" // Uses top
"mad.hi.u32 %7, %11, %15, %7;\n\t"
"mad.lo.cc.u32 %4, %11, %13, %4;\n\t"
//"madc.lo.cc.u32 %5, %9, 0, %5;\n\t" // Uses top
"addc.u32 %5,%5,0;\n\t"
//"madc.hi.cc.u32 %6, 0, %12, %6;\n\t" // Uses top
//"madc.hi.u32 %7, 0, %14, %7;\n\t" // Uses top
//"mad.lo.cc.u32 %4, 0, %12, %4;\n\t" // Uses top
"mad.lo.cc.u32 %5, %10, %15, %5;\n\t"
//"madc.lo.cc.u32 %6, %8, 0, %6;\n\t" // Uses top
"addc.u32 %6,%6,0;\n\t"
//"madc.hi.u32 %7, 0, %13, %7;\n\t" // Uses top
"mad.lo.cc.u32 %5, %11, %14, %5;\n\t"
//"madc.lo.cc.u32 %6, %9, 0, %6;\n\t" // Uses top
"addc.u32 %6,%6,0;\n\t"
//"madc.hi.u32 %7, 0, %12, %7;\n\t" // Uses top
//"mad.lo.cc.u32 %5, 0, %13, %5;\n\t" // Uses top
//"madc.lo.cc.u32 %6, %10, 0, %6;\n\t" // Uses top
//"madc.lo.u32 %7, %8, 0, %7;\n\t" // Uses top
//"mad.lo.cc.u32 %5, 0, %12, %5;\n\t" // Uses top
"mad.lo.cc.u32 %6, %11, %15, %6;\n\t"
//"madc.lo.u32 %7, %9, 0, %7;\n\t" // Uses top
"addc.u32 %7,%7,0;\n\t"
//"mad.lo.cc.u32 %6, 0, %14, %6;\n\t" // Uses top
//"madc.lo.u32 %7, %10, 0, %7;\n\t" // Uses top
//"mad.lo.cc.u32 %6, 0, %13, %6;\n\t" // Uses top
//"madc.lo.u32 %7, %11, 0, %7;\n\t" // Uses top
//"mad.lo.cc.u32 %6, 0, %12, %6;\n\t" // Uses top
//"madc.lo.u32 %7, 0, %15, %7;\n\t" // Uses top
//"mad.lo.u32 %7, 0, %14, %7;\n\t" // Uses top
//"mad.lo.u32 %7, 0, %13, %7;\n\t" // Uses top
//"mad.lo.u32 %7, 0, %12, %7;\n\t" // Uses top
"}"
: "=r"(res.limbs[0]), "=r"(res.limbs[1]), "=r"(res.limbs[2]), "=r"(res.limbs[3]), "=r"(res.limbs[4]), "=r"(res.limbs[5]), "=r"(res.limbs[6]), "=r"(res.limbs[7])
: "r"(a.limbs[0]), "r"(a.limbs[1]), "r"(a.limbs[2]), "r"(a.limbs[3]),
"r"(b.limbs[0]), "r"(b.limbs[1]), "r"(b.limbs[2]), "r"(b.limbs[3]));
return res;
}
__global__ void genmpoints_on_GPU_fast (
unsigned hashsteps,
halfbigint_t c,
bigint_t point_preload,
uint32_t diff,
unsigned N,
const uint32_t* const __restrict__ indexbank,
bigint_t* const __restrict__ mpointsout
){
int ID = blockIdx.x * blockDim.x + threadIdx.x;
if(ID < N){
bigint_t points[2];
for (int isdiff = 0; isdiff <= 1 ; isdiff++)
{
points[isdiff] = point_preload;
points[isdiff].limbs[0] = indexbank[ID] + isdiff*diff;
for (int i = 0; i < hashsteps; i++)
{
bigint_t tmp;
halfbigint_t lox = {{ points[isdiff].limbs[0], points[isdiff].limbs[1], points[isdiff].limbs[2], points[isdiff].limbs[3] }};
halfbigint_t hix = {{ points[isdiff].limbs[4], points[isdiff].limbs[5], points[isdiff].limbs[6], points[isdiff].limbs[7] }};
//bigint_t loxtest = {{points[isdiff].limbs[0], points[isdiff].limbs[1], points[isdiff].limbs[2], points[isdiff].limbs[3], 0,0,0,0 }};
//bigint_t ctest = {{ c.limbs[0], c.limbs[1], c.limbs[2], c.limbs[3], 0,0,0,0 }};
// tmp=lo(x)*c;
tmp = umul_256_128in(lox, c);
//bigint_t tmp2 = umul256(loxtest, ctest);
//bool eq[8];
//for (int j = 0; j < 8; j++)
//{
// eq[j] = tmp.limbs[j] == tmp2.limbs[j];
//}
// x=tmp+hi(x)
points[isdiff] = add256(tmp, hix);
// overall: tmp=lo(x)*c; x=tmp+hi(x)
}
}
bigint_t mpoint;
for (int i = 0; i < 8; i++)
{
mpoint.limbs[i] = points[0].limbs[i] ^ points[1].limbs[i];
}
mpointsout[ID] = mpoint;
}
}
struct bigint_t_less : public thrust::binary_function<bigint_t,bigint_t,bool>
{
const unsigned len;
bigint_t_less(const unsigned lenin):len(lenin) {}
/*! Function call operator. The return value is <tt>lhs < rhs</tt>.
*/
__host__ __device__ bool operator()(const bigint_t &lhs, const bigint_t &rhs) const {
//return lhs < rhs;
for (int i = len-1; i >= 0; i--)
{
if(lhs.limbs[i] < rhs.limbs[i])
return true;
if(lhs.limbs[i] > rhs.limbs[i])
return false;
}
//all equal, so not strictly less than
return false;
}
}; // end less
struct bigint_t_less_idx : public thrust::binary_function<uint32_t,uint32_t,bool>
{
const unsigned len;
const bigint_t* const theBigint;
bigint_t_less_idx(const unsigned lenin, const bigint_t* const theBigintin):len(lenin), theBigint(theBigintin) {}
/*! Function call operator. The return value is <tt>lhs < rhs</tt>.
*/
__host__ __device__ bool operator()(const uint32_t &lhs, const uint32_t &rhs) const {
//return lhs < rhs;
for (int i = len-1; i >= 0; i--)
{
if(theBigint[lhs].limbs[i] < theBigint[rhs].limbs[i])
return true;
if(theBigint[lhs].limbs[i] > theBigint[rhs].limbs[i])
return false;
}
//all equal, so not strictly less than
return false;
}
}; // end less
int testcuda();
__global__ void gather_unzip(
const uint32_t N,
const uint32_t depth,
const uint32_t* const __restrict__ map,
const bigint_t* const __restrict__ mpointsin,
uint32_t* const __restrict__ unzipout
){
int ID = blockIdx.x * blockDim.x + threadIdx.x;
if(ID < N){
unzipout[ID] = mpointsin[map[ID]].limbs[depth];
}
}
//Diff implicit!
struct indicies
{
uint32_t parts[8];
};
struct point_idx{
bigint_t point;
indicies idx;
};
//set N = N-1 for each pass!!, npop should double
__global__ void xor_points_unconditional(
const uint32_t N,
const uint32_t depth,
const uint32_t nPopulated,
const bigint_t* const __restrict__ xorin,
bigint_t* const __restrict__ xoredout,
const indicies* const __restrict__ idxin,
indicies* const __restrict__ idxout
){
int ID = blockIdx.x * blockDim.x + threadIdx.x;
if(ID < N-1){
for (int i = 0; i < depth; i++)
{
xoredout[ID].limbs[i] = xorin[ID].limbs[i] ^ xorin[ID+1].limbs[i];
}
int j = 0;
for (int i = 0; i < nPopulated; i++)
{
idxout[ID].parts[j++] = idxin[ID].parts[i];
idxout[ID].parts[j++] = idxin[ID+1].parts[i];
}
}
}
__global__ void shove_flat_idx_into_struct(
const uint32_t N,
const uint32_t* const __restrict__ idxin,
indicies* const __restrict__ idxout
){
int ID = blockIdx.x * blockDim.x + threadIdx.x;
if(ID < N){
idxout[ID].parts[0] = idxin[ID];
}
};
namespace bitecoin{
std::pair<std::vector<bigint_t>, std::vector<uint32_t>> gensort_GPU (
const unsigned hashsteps,
const halfbigint_t c,
const bigint_t point_preload,
const uint32_t diff,
const std::vector<uint32_t> &indexbank
){
hipError_t e;
unsigned N = indexbank.size();
uint32_t* idxbankGPU, *idxbankGPUout;
if(e = hipMalloc(&idxbankGPU, N * sizeof(uint32_t))) fprintf(stderr, "Cuda error %d on line %d\n", e, __LINE__);
if(e = hipMalloc(&idxbankGPUout, N * sizeof(uint32_t))) fprintf(stderr, "Cuda error %d on line %d\n", e, __LINE__);
if(e = hipMemcpy(idxbankGPU, indexbank.data(), N * sizeof(uint32_t), hipMemcpyHostToDevice)) fprintf(stderr, "Cuda error %d on line %d\n", e, __LINE__);
// indicies* idxa, *idxb;
// if(e = hipMalloc(&idxa, N * sizeof(indicies))) fprintf(stderr, "Cuda error %d on line %d\n", e, __LINE__);
// if(e = hipMalloc(&idxb, N * sizeof(indicies))) fprintf(stderr, "Cuda error %d on line %d\n", e, __LINE__);
// auto idxaptr = thrust::device_pointer_cast(idxa);
// auto idxbptr = thrust::device_pointer_cast(idxb);
bigint_t* mpointsGPUa, *mpointsGPUb;
if(e = hipMalloc(&mpointsGPUa, N * sizeof(bigint_t))) fprintf(stderr, "Cuda error %d on line %d\n", e, __LINE__);
if(e = hipMalloc(&mpointsGPUb, N * sizeof(bigint_t))) fprintf(stderr, "Cuda error %d on line %d\n", e, __LINE__);
auto mpointsGPUtptra = thrust::device_pointer_cast(mpointsGPUa);
auto mpointsGPUtptrb = thrust::device_pointer_cast(mpointsGPUb);
//gen
unsigned nblocks = ::ceil((double)N/128);
hipLaunchKernelGGL(( genmpoints_on_GPU_fast) , dim3(nblocks), dim3(128), 0, 0, hashsteps, c, point_preload, diff, N, idxbankGPU, mpointsGPUa);
if(e = hipGetLastError()) printf("Cuda error %d on line %d\n", e, __LINE__);
//shove_flat_idx_into_struct <<<nblocks, 128>>> (N, idxbankGPU, idxa);
//sort
uint32_t* map;
if(e = hipMalloc(&map, N * sizeof(uint32_t))) fprintf(stderr, "Cuda error %d on line %d\n", e, __LINE__);
auto maptptr = thrust::device_pointer_cast(map);
thrust::sequence(maptptr, maptptr+N);
uint32_t* currlimb;
if(e = hipMalloc(&currlimb, N * sizeof(uint32_t))) fprintf(stderr, "Cuda error %d on line %d\n", e, __LINE__);
auto currlimbptr = thrust::device_pointer_cast(currlimb);
for (int i = 0; i < 7; i++)
{
hipLaunchKernelGGL(( gather_unzip) , dim3(nblocks), dim3(128), 0, 0, N,i,map,mpointsGPUa,currlimb);
if(e = hipGetLastError()) printf("Cuda error %d on line %d\n", e, __LINE__);
thrust::stable_sort_by_key(currlimbptr, currlimbptr+N, maptptr);
}
//gather sort results
thrust::gather(maptptr, maptptr+N, mpointsGPUtptra, mpointsGPUtptrb);
auto idxbankGPUptr = thrust::device_pointer_cast(idxbankGPU);
auto idxbankGPUoutptr = thrust::device_pointer_cast(idxbankGPUout);
thrust::gather(maptptr, maptptr+N, idxbankGPUptr, idxbankGPUoutptr);
//xor_points_unconditional(N, 8, 1, mptsGPUoutvecRaw, m2pointsGPU, idxa, idxb);
//std::swap(idxa,idxb);
//std::swap(m2pointsGPU, mpointsGPU);
//DEBUG
//std::vector<uint32_t> testmap(N);
//if(e = hipMemcpy(testmap.data(), map, N * sizeof(uint32_t), hipMemcpyDeviceToHost)) fprintf(stderr, "Cuda error %d on line %d\n", e, __LINE__);
std::vector<bigint_t> mpointsHost(N);
if(e = hipMemcpy(mpointsHost.data(), mpointsGPUb, N * sizeof(bigint_t), hipMemcpyDeviceToHost)) fprintf(stderr, "Cuda error %d on line %d\n", e, __LINE__);
std::vector<uint32_t> idxHost(N);
if(e = hipMemcpy(idxHost.data(), idxbankGPUout, N * sizeof(uint32_t), hipMemcpyDeviceToHost)) fprintf(stderr, "Cuda error %d on line %d\n", e, __LINE__);
hipFree(idxbankGPU);
hipFree(idxbankGPUout);
//hipFree(idxa);
//hipFree(idxb);
hipFree(mpointsGPUa);
hipFree(mpointsGPUb);
hipFree(map);
hipFree(currlimb);
return std::make_pair(mpointsHost, idxHost);
}
std::vector<wide_as_pair> genmpoints_on_GPU (
unsigned hashsteps,
const uint32_t* const c,
const uint32_t* const point_preload,
const std::vector<uint32_t> &indexbank,
unsigned diff
){
thrust::device_vector<uint32_t> indexbank_GPU = indexbank;
thrust::device_vector<wide_as_pair_GPU> output_GPU(indexbank.size());
thrust::transform(indexbank_GPU.begin(), indexbank_GPU.end(), output_GPU.begin(), genpoint(point_preload, hashsteps, c, diff));
thrust::host_vector<wide_as_pair_GPU> op_hv = output_GPU;
std::vector<wide_as_pair> output(indexbank.size());
for (int i = 0; i < indexbank.size(); i++)
{
output[i].first.first = op_hv[i].first.first;
output[i].first.second = op_hv[i].first.second;
output[i].second.first = op_hv[i].second.first;
output[i].second.second = op_hv[i].second.second;
}
return output;
}
#if 0
wide_as_pair gensortscan_on_GPU (
unsigned hashsteps,
const uint32_t* const c,
const uint32_t* const point_preload,
const std::vector<uint32_t> &indexbank
){
thrust::device_vector<uint32_t> indexbank_GPU = indexbank;
thrust::device_vector<wide_as_pair_GPU> pointbank_gpu(indexbank.size());
thrust::transform(indexbank_GPU.begin(), indexbank_GPU.end(), output_GPU.begin(), genpoint(point_preload, hashsteps, c));
wide_as_pair output;
output.first.first = op_hv.first.first;
output.first.second = op_hv.first.second;
output.second.first = op_hv.second.first;
output.second.second = op_hv.second.second;
return output;
}
#endif
} //namespace bitecoin
/////////////////////////////////////////////////////////////////////////////////////
const int N = 7;
const int blocksize = 7;
__global__
void hello(char *a, int *b)
{
a[threadIdx.x] += b[threadIdx.x];
}
int testcuda()
{
char a[N] = "Hello ";
int b[N] = {15, 10, 6, 0, -11, 1, 0};
char *ad;
int *bd;
const int csize = N*sizeof(char);
const int isize = N*sizeof(int);
printf("%s", a);
hipMalloc( (void**)&ad, csize );
hipMalloc( (void**)&bd, isize );
hipMemcpy( ad, a, csize, hipMemcpyHostToDevice );
hipMemcpy( bd, b, isize, hipMemcpyHostToDevice );
dim3 dimBlock( blocksize, 1 );
dim3 dimGrid( 1, 1 );
hipLaunchKernelGGL(( hello), dim3(dimGrid), dim3(dimBlock), 0, 0, ad, bd);
hipMemcpy( a, ad, csize, hipMemcpyDeviceToHost );
hipFree( ad );
printf("%s\n", a);
return 0;
}
template <unsigned N>
__global__ void Clockwork(uint32_t* staticbank,
uint32_t* regbank,
uint32_t* sharedbank2,
uint32_t* sharedbank1,
//uint32_t N,
int* bestiBuff,
int* bestiBuffHead)
{
unsigned threadID = blockIdx.x * blockDim.x + threadIdx.x;
uint32_t xstatic = staticbank[threadID];
__shared__ uint32_t sb1[N];
__shared__ uint32_t sb2[N];
for (int i = threadIdx.x; i < N; i += blockDim.x)
{
sb1[i] = sharedbank1[i];
sb2[i] = sharedbank2[i];
}
for (int rbidx = 0; rbidx < N; rbidx += 8)
{
uint32_t rb[8];
for (int i = 0; i < 8; i++)
{
rb[i] = regbank[rbidx + i];
}
for (int i = 0; i < N; i++)
{
uint32_t acc1 = xstatic ^ sb2[i];
for (int j = 0; j < N; j++)
{
uint32_t acc2 = acc1 ^ sb1[i];
for (int k = 0; k < 8; k++)
{
//Only bother cheking for perfect xor, same as equal
if (acc2 == rb[i])
{
int storeloc = atomicAdd(bestiBuffHead, 4);
bestiBuff[storeloc] = j;
bestiBuff[storeloc+1] = i;
bestiBuff[storeloc+2] = 8*rbidx + k;
bestiBuff[storeloc+3] = threadID;
}
}
}
}
}
}
void Clockwork_wrapper(uint32_t* staticbank,
uint32_t* regbank,
uint32_t* sharedbank2,
uint32_t* sharedbank1,
//uint32_t N,
int* bestiBuff,
int* bestiBuffHead,
int blocks,
int threadsPerBlock)
{
hipLaunchKernelGGL(( Clockwork <128>) , dim3(blocks), dim3(threadsPerBlock), 0, 0, staticbank, regbank, sharedbank2, sharedbank1, bestiBuff, bestiBuffHead);
} | c687cafcc47be01dfd3fb1b17db2fa1bb8824000.cu | // This is the REAL "hello world" for CUDA!
// It takes the string "Hello ", prints it, then passes it to CUDA with an array
// of offsets. Then the offsets are added in parallel to produce the string "World!"
// By Ingemar Ragnemalm 2010
#include <stdio.h>
#include <stdint.h>
#include <cuda_runtime.h>
#include "device_launch_parameters.h"
#include "device_functions.h"
#include <vector>
#include <thrust/device_vector.h>
#include <thrust/transform.h>
#include <thrust/sort.h>
typedef std::pair<std::pair<uint64_t, uint64_t>, std::pair<uint64_t, uint64_t> >
wide_as_pair;
typedef std::pair<wide_as_pair, std::vector<uint32_t> >
point_idx_pair;
typedef thrust::pair<thrust::pair<uint64_t, uint64_t>, thrust::pair<uint64_t, uint64_t> >
wide_as_pair_GPU;
struct bigint_t
{
uint32_t limbs[8];
};
struct halfbigint_t
{
uint32_t limbs[4];
};
/*
((uint64_t)roundInfo->c[3] << 32) + roundInfo->c[2],
((uint64_t)roundInfo->c[1] << 32) + roundInfo->c[0],
std::make_pair(
std::make_pair(
((uint64_t)point_preload.limbs[7] << 32) + point_preload.limbs[6],
((uint64_t)point_preload.limbs[5] << 32) + point_preload.limbs[4]
),
std::make_pair(
((uint64_t)point_preload.limbs[3] << 32) + point_preload.limbs[2],
((uint64_t)point_preload.limbs[1] << 32) + point_preload.limbs[0]
*/
/*! Add together two n-limb numbers, returning the carry limb.
\note the output can also be one of the inputs
*/
__device__
uint32_t wide_add_GPU(unsigned n, uint32_t *res, const uint32_t *a, const uint32_t *b)
{
uint64_t carry=0;
for(unsigned i=0;i<n;i++){
uint64_t tmp=uint64_t(a[i])+b[i]+carry;
res[i]=uint32_t(tmp&0xFFFFFFFFULL);
carry=tmp>>32;
}
return carry;
}
/*! Add a single limb to an n-limb number, returning the carry limb
\note the output can also be the input
*/
__device__
uint32_t wide_add_GPU(unsigned n, uint32_t *res, const uint32_t *a, uint32_t b)
{
uint64_t carry=b;
for(unsigned i=0;i<n;i++){
uint64_t tmp=a[i]+carry;
res[i]=uint32_t(tmp&0xFFFFFFFFULL);
carry=tmp>>32;
}
return carry;
}
/*! Multiply two n-limb numbers to produce a 2n-limb result
\note All the integers must be distinct, the output cannot overlap the input */
__device__
void wide_mul_GPU(unsigned n, uint32_t *res_hi, uint32_t *res_lo, const uint32_t *a, const uint32_t *b)
{
//assert(res_hi!=a && res_hi!=b);
//assert(res_lo!=a && res_lo!=b);
uint64_t carry=0, acc=0;
for(unsigned i=0; i<n; i++){
for(unsigned j=0; j<=i; j++){
//assert( (j+(i-j))==i );
uint64_t tmp=uint64_t(a[j])*b[i-j];
acc+=tmp;
if(acc < tmp)
carry++;
//fprintf(stderr, " (%d,%d)", j,i-j);
}
res_lo[i]=uint32_t(acc&0xFFFFFFFFull);
//fprintf(stderr, "\n %d : %u\n", i, res_lo[i]);
acc= (carry<<32) | (acc>>32);
carry=carry>>32;
}
for(unsigned i=1; i<n; i++){
for(unsigned j=i; j<n; j++){
uint64_t tmp=uint64_t(a[j])*b[n-j+i-1];
acc+=tmp;
if(acc < tmp)
carry++;
//fprintf(stderr, " (%d,%d)", j,n-j+i-1);
//assert( (j+(n-j))==n+i );
}
res_hi[i-1]=uint32_t(acc&0xFFFFFFFFull);
//fprintf(stderr, "\n %d : %u\n", i+n-1, res_hi[i-1]);
acc= (carry<<32) | (acc>>32);
carry=carry>>32;
}
res_hi[n-1]=acc;
}
struct genpoint
{
uint32_t point_preload[8];
const unsigned numsteps;
const unsigned diff;
uint32_t c[4];
genpoint(const uint32_t* const ppre_in, unsigned ns_in, const uint32_t* const c_in, unsigned diff_in) :
numsteps(ns_in), diff(diff_in)
{
for (int i = 0; i < 8; i++)
{
point_preload[i] = ppre_in[i];
}
for (int i = 0; i < 4; i++)
{
c[i] = c_in[i];
}
}
__device__
wide_as_pair_GPU operator()(uint32_t idx){
uint32_t point[2][8];
for (int isdiff = 0; isdiff <= 1 ; isdiff++)
{
point[isdiff][0] = idx + isdiff*diff;
for (int i = 1; i < 8; i++)
{
point[isdiff][i] = point_preload[i];
}
for (int i = 0; i < numsteps; i++)
{
bigint_t tmp;
// tmp=lo(x)*c;
wide_mul_GPU(4, tmp.limbs+4, tmp.limbs, point[isdiff], c);
// [carry,lo(x)] = lo(tmp)+hi(x)
uint32_t carry=wide_add_GPU(4, point[isdiff], tmp.limbs, point[isdiff]+4);
// hi(x) = hi(tmp) + carry
wide_add_GPU(4, point[isdiff]+4, tmp.limbs+4, carry);
// overall: tmp=lo(x)*c; x=tmp+hi(x)
}
}
uint32_t mpoint[8];
for (int i = 0; i < 8; i++)
{
mpoint[i] = point[0][i] ^ point[1][i];
}
return thrust::make_pair(
thrust::make_pair(
((uint64_t)mpoint[7] << 32) + mpoint[6],
((uint64_t)mpoint[5] << 32) + mpoint[4]
),
thrust::make_pair(
((uint64_t)mpoint[3] << 32) + mpoint[2],
((uint64_t)mpoint[1] << 32) + mpoint[0]
));
}
};
//modified from https://devtalk.nvidia.com/default/topic/610914/cuda-programming-and-performance/modular-exponentiation-amp-biginteger/
__device__ __forceinline__ bigint_t add256 (bigint_t a, bigint_t b)
{
bigint_t res;
asm ("{\n\t"
"add.cc.u32 %0, %8, %16; \n\t"
"addc.cc.u32 %1, %9, %17; \n\t"
"addc.cc.u32 %2, %10, %18; \n\t"
"addc.cc.u32 %3, %11, %19; \n\t"
"addc.cc.u32 %4, %12, %20; \n\t"
"addc.cc.u32 %5, %13, %21; \n\t"
"addc.cc.u32 %6, %14, %22; \n\t"
"addc.u32 %7, %15, %23; \n\t"
"}"
: "=r"(res.limbs[0]), "=r"(res.limbs[1]), "=r"(res.limbs[2]), "=r"(res.limbs[3]), "=r"(res.limbs[4]), "=r"(res.limbs[5]), "=r"(res.limbs[6]), "=r"(res.limbs[7])
: "r"(a.limbs[0]), "r"(a.limbs[1]), "r"(a.limbs[2]), "r"(a.limbs[3]),"r"(a.limbs[4]), "r"(a.limbs[5]), "r"(a.limbs[6]), "r"(a.limbs[7]),
"r"(b.limbs[0]), "r"(b.limbs[1]), "r"(b.limbs[2]), "r"(b.limbs[3]),"r"(b.limbs[4]), "r"(b.limbs[5]), "r"(b.limbs[6]), "r"(b.limbs[7]));
return res;
}
//modified from https://devtalk.nvidia.com/default/topic/610914/cuda-programming-and-performance/modular-exponentiation-amp-biginteger/
__device__ __forceinline__ bigint_t add256 (bigint_t a, halfbigint_t b)
{
bigint_t res;
asm ("{\n\t"
"add.cc.u32 %0, %8, %16; \n\t"
"addc.cc.u32 %1, %9, %17; \n\t"
"addc.cc.u32 %2, %10, %18; \n\t"
"addc.cc.u32 %3, %11, %19; \n\t"
"addc.u32 %4, %12, 0; \n\t"
//"addc.cc.u32 %5, %13, %21; \n\t"
"mov.u32 %5, %13 ; \n\t"
//"addc.cc.u32 %6, %14, %22; \n\t"
"mov.u32 %6, %14 ; \n\t"
//"addc.u32 %7, %15, %23; \n\t"
"mov.u32 %7, %15 ; \n\t"
"}"
: "=r"(res.limbs[0]), "=r"(res.limbs[1]), "=r"(res.limbs[2]), "=r"(res.limbs[3]), "=r"(res.limbs[4]), "=r"(res.limbs[5]), "=r"(res.limbs[6]), "=r"(res.limbs[7])
: "r"(a.limbs[0]), "r"(a.limbs[1]), "r"(a.limbs[2]), "r"(a.limbs[3]),"r"(a.limbs[4]), "r"(a.limbs[5]), "r"(a.limbs[6]), "r"(a.limbs[7]),
"r"(b.limbs[0]), "r"(b.limbs[1]), "r"(b.limbs[2]), "r"(b.limbs[3]));
return res;
}
//modified from https://devtalk.nvidia.com/default/topic/610914/cuda-programming-and-performance/modular-exponentiation-amp-biginteger/
__device__ __forceinline__ bigint_t umul256 (bigint_t a, bigint_t b)
{
bigint_t res;
//top 12 13 14 15, 20 21 22 23
asm ("{\n\t"
"mul.lo.u32 %0, %8, %16; \n\t"
"mul.hi.u32 %1, %8, %16; \n\t"
"mad.lo.cc.u32 %1, %8, %17, %1;\n\t"
"madc.hi.u32 %2, %8, %17, 0;\n\t"
"mad.lo.cc.u32 %1, %9, %16, %1;\n\t"
"madc.hi.cc.u32 %2, %9, %16, %2;\n\t"
"madc.hi.u32 %3, %8, %18, 0;\n\t"
"mad.lo.cc.u32 %2, %8, %18, %2;\n\t"
"madc.hi.cc.u32 %3, %9, %17, %3;\n\t"
"madc.hi.u32 %4, %8, %19, 0;\n\t"
"mad.lo.cc.u32 %2, %9, %17, %2;\n\t"
"madc.hi.cc.u32 %3, %10, %16, %3;\n\t"
"madc.hi.cc.u32 %4, %9, %18, %4;\n\t"
"madc.hi.u32 %5, %8, %20, 0;\n\t" // Uses top
"mad.lo.cc.u32 %2, %10, %16, %2;\n\t"
"madc.lo.cc.u32 %3, %8, %19, %3;\n\t"
"madc.hi.cc.u32 %4, %10, %17, %4;\n\t"
"madc.hi.cc.u32 %5, %9, %19, %5;\n\t"
"madc.hi.u32 %6, %8, %21, 0;\n\t" // Uses top
"mad.lo.cc.u32 %3, %9, %18, %3;\n\t"
"madc.hi.cc.u32 %4, %11, %16, %4;\n\t"
"madc.hi.cc.u32 %5, %10, %18, %5;\n\t"
"madc.hi.cc.u32 %6, %9, %20, %6;\n\t" // Uses top
"madc.hi.u32 %7, %8, %22, 0;\n\t" // Uses top
"mad.lo.cc.u32 %3, %10, %17, %3;\n\t"
"madc.lo.cc.u32 %4, %8, %20, %4;\n\t" // Uses top
"madc.hi.cc.u32 %5, %11, %17, %5;\n\t"
"madc.hi.cc.u32 %6, %10, %19, %6;\n\t"
"madc.hi.u32 %7, %9, %21, %7;\n\t" // Uses top
"mad.lo.cc.u32 %3, %11, %16, %3;\n\t"
"madc.lo.cc.u32 %4, %9, %19, %4;\n\t"
"madc.hi.cc.u32 %5, %12, %16, %5;\n\t" // Uses top
"madc.hi.cc.u32 %6, %11, %18, %6;\n\t"
"madc.hi.u32 %7, %10, %20, %7;\n\t" // Uses top
"mad.lo.cc.u32 %4, %10, %18, %4;\n\t"
"madc.lo.cc.u32 %5, %8, %21, %5;\n\t" // Uses top
"madc.hi.cc.u32 %6, %12, %17, %6;\n\t" // Uses top
"madc.hi.u32 %7, %11, %19, %7;\n\t"
"mad.lo.cc.u32 %4, %11, %17, %4;\n\t"
"madc.lo.cc.u32 %5, %9, %20, %5;\n\t" // Uses top
"madc.hi.cc.u32 %6, %13, %16, %6;\n\t" // Uses top
"madc.hi.u32 %7, %12, %18, %7;\n\t" // Uses top
"mad.lo.cc.u32 %4, %12, %16, %4;\n\t" // Uses top
"madc.lo.cc.u32 %5, %10, %19, %5;\n\t"
"madc.lo.cc.u32 %6, %8, %22, %6;\n\t" // Uses top
"madc.hi.u32 %7, %13, %17, %7;\n\t" // Uses top
"mad.lo.cc.u32 %5, %11, %18, %5;\n\t"
"madc.lo.cc.u32 %6, %9, %21, %6;\n\t" // Uses top
"madc.hi.u32 %7, %14, %16, %7;\n\t" // Uses top
"mad.lo.cc.u32 %5, %12, %17, %5;\n\t" // Uses top
"madc.lo.cc.u32 %6, %10, %20, %6;\n\t" // Uses top
"madc.lo.u32 %7, %8, %23, %7;\n\t" // Uses top
"mad.lo.cc.u32 %5, %13, %16, %5;\n\t" // Uses top
"madc.lo.cc.u32 %6, %11, %19, %6;\n\t"
"madc.lo.u32 %7, %9, %22, %7;\n\t" // Uses top
"mad.lo.cc.u32 %6, %12, %18, %6;\n\t" // Uses top
"madc.lo.u32 %7, %10, %21, %7;\n\t" // Uses top
"mad.lo.cc.u32 %6, %13, %17, %6;\n\t" // Uses top
"madc.lo.u32 %7, %11, %20, %7;\n\t" // Uses top
"mad.lo.cc.u32 %6, %14, %16, %6;\n\t" // Uses top
"madc.lo.u32 %7, %12, %19, %7;\n\t" // Uses top
"mad.lo.u32 %7, %13, %18, %7;\n\t" // Uses top
"mad.lo.u32 %7, %14, %17, %7;\n\t" // Uses top
"mad.lo.u32 %7, %15, %16, %7;\n\t" // Uses top
"}"
: "=r"(res.limbs[0]), "=r"(res.limbs[1]), "=r"(res.limbs[2]), "=r"(res.limbs[3]), "=r"(res.limbs[4]), "=r"(res.limbs[5]), "=r"(res.limbs[6]), "=r"(res.limbs[7])
: "r"(a.limbs[0]), "r"(a.limbs[1]), "r"(a.limbs[2]), "r"(a.limbs[3]),"r"(a.limbs[4]), "r"(a.limbs[5]), "r"(a.limbs[6]), "r"(a.limbs[7]),
"r"(b.limbs[0]), "r"(b.limbs[1]), "r"(b.limbs[2]), "r"(b.limbs[3]),"r"(b.limbs[4]), "r"(b.limbs[5]), "r"(b.limbs[6]), "r"(b.limbs[7]));
return res;
}
//modified from https://devtalk.nvidia.com/default/topic/610914/cuda-programming-and-performance/modular-exponentiation-amp-biginteger/
__device__ __forceinline__ bigint_t umul_256_128in (halfbigint_t a, halfbigint_t b)
{
bigint_t res;
//old top 12 13 14 15, 20 21 22 23
asm ("{\n\t"
"mul.lo.u32 %0, %8, %12; \n\t"
"mul.hi.u32 %1, %8, %12; \n\t"
"mad.lo.cc.u32 %1, %8, %13, %1;\n\t"
"madc.hi.u32 %2, %8, %13, 0;\n\t"
"mad.lo.cc.u32 %1, %9, %12, %1;\n\t"
"madc.hi.cc.u32 %2, %9, %12, %2;\n\t"
"madc.hi.u32 %3, %8, %14, 0;\n\t"
"mad.lo.cc.u32 %2, %8, %14, %2;\n\t"
"madc.hi.cc.u32 %3, %9, %13, %3;\n\t"
"madc.hi.u32 %4, %8, %15, 0;\n\t"
"mad.lo.cc.u32 %2, %9, %13, %2;\n\t"
"madc.hi.cc.u32 %3, %10, %12, %3;\n\t"
"madc.hi.cc.u32 %4, %9, %14, %4;\n\t"
//"madc.hi.u32 %5, %8, 0, 0;\n\t" // Uses top
"addc.u32 %5,0,0;\n\t"
"mad.lo.cc.u32 %2, %10, %12, %2;\n\t"
"madc.lo.cc.u32 %3, %8, %15, %3;\n\t"
"madc.hi.cc.u32 %4, %10, %13, %4;\n\t"
"madc.hi.cc.u32 %5, %9, %15, %5;\n\t"
//"madc.hi.u32 %6, %8, 0, 0;\n\t" // Uses top
"addc.u32 %6,0,0;\n\t"
"mad.lo.cc.u32 %3, %9, %14, %3;\n\t"
"madc.hi.cc.u32 %4, %11, %12, %4;\n\t"
"madc.hi.cc.u32 %5, %10, %14, %5;\n\t"
//"madc.hi.cc.u32 %6, %9, 0, 0;\n\t" // Uses top
"addc.u32 %6,%6,0;\n\t"
//"madc.hi.u32 %7, %8, 0, 0;\n\t" // Uses top
"mad.lo.cc.u32 %3, %10, %13, %3;\n\t"
//"madc.lo.cc.u32 %4, %8, 0, %4;\n\t" // Uses top
"addc.u32 %4,%4,0;\n\t"
"mad.hi.cc.u32 %5, %11, %13, %5;\n\t"
"madc.hi.cc.u32 %6, %10, %15, %6;\n\t"
//"madc.hi.u32 %7, %9, 0, %7;\n\t" // Uses top
"addc.u32 %7,0,0;\n\t"
"mad.lo.cc.u32 %3, %11, %12, %3;\n\t"
"madc.lo.cc.u32 %4, %9, %15, %4;\n\t"
//"madc.hi.cc.u32 %5, 0, %12, %5;\n\t" // Uses top
"addc.u32 %5,%5,0;\n\t"
"mad.hi.cc.u32 %6, %11, %14, %6;\n\t"
//"madc.hi.u32 %7, %10, 0, %7;\n\t" // Uses top
"addc.u32 %7,%7,0;\n\t"
"mad.lo.cc.u32 %4, %10, %14, %4;\n\t"
//"madc.lo.cc.u32 %5, %8, 0, %5;\n\t" // Uses top
"addc.u32 %5,%5,0;\n\t"
//"madc.hi.cc.u32 %6, 0, %13, %6;\n\t" // Uses top
"mad.hi.u32 %7, %11, %15, %7;\n\t"
"mad.lo.cc.u32 %4, %11, %13, %4;\n\t"
//"madc.lo.cc.u32 %5, %9, 0, %5;\n\t" // Uses top
"addc.u32 %5,%5,0;\n\t"
//"madc.hi.cc.u32 %6, 0, %12, %6;\n\t" // Uses top
//"madc.hi.u32 %7, 0, %14, %7;\n\t" // Uses top
//"mad.lo.cc.u32 %4, 0, %12, %4;\n\t" // Uses top
"mad.lo.cc.u32 %5, %10, %15, %5;\n\t"
//"madc.lo.cc.u32 %6, %8, 0, %6;\n\t" // Uses top
"addc.u32 %6,%6,0;\n\t"
//"madc.hi.u32 %7, 0, %13, %7;\n\t" // Uses top
"mad.lo.cc.u32 %5, %11, %14, %5;\n\t"
//"madc.lo.cc.u32 %6, %9, 0, %6;\n\t" // Uses top
"addc.u32 %6,%6,0;\n\t"
//"madc.hi.u32 %7, 0, %12, %7;\n\t" // Uses top
//"mad.lo.cc.u32 %5, 0, %13, %5;\n\t" // Uses top
//"madc.lo.cc.u32 %6, %10, 0, %6;\n\t" // Uses top
//"madc.lo.u32 %7, %8, 0, %7;\n\t" // Uses top
//"mad.lo.cc.u32 %5, 0, %12, %5;\n\t" // Uses top
"mad.lo.cc.u32 %6, %11, %15, %6;\n\t"
//"madc.lo.u32 %7, %9, 0, %7;\n\t" // Uses top
"addc.u32 %7,%7,0;\n\t"
//"mad.lo.cc.u32 %6, 0, %14, %6;\n\t" // Uses top
//"madc.lo.u32 %7, %10, 0, %7;\n\t" // Uses top
//"mad.lo.cc.u32 %6, 0, %13, %6;\n\t" // Uses top
//"madc.lo.u32 %7, %11, 0, %7;\n\t" // Uses top
//"mad.lo.cc.u32 %6, 0, %12, %6;\n\t" // Uses top
//"madc.lo.u32 %7, 0, %15, %7;\n\t" // Uses top
//"mad.lo.u32 %7, 0, %14, %7;\n\t" // Uses top
//"mad.lo.u32 %7, 0, %13, %7;\n\t" // Uses top
//"mad.lo.u32 %7, 0, %12, %7;\n\t" // Uses top
"}"
: "=r"(res.limbs[0]), "=r"(res.limbs[1]), "=r"(res.limbs[2]), "=r"(res.limbs[3]), "=r"(res.limbs[4]), "=r"(res.limbs[5]), "=r"(res.limbs[6]), "=r"(res.limbs[7])
: "r"(a.limbs[0]), "r"(a.limbs[1]), "r"(a.limbs[2]), "r"(a.limbs[3]),
"r"(b.limbs[0]), "r"(b.limbs[1]), "r"(b.limbs[2]), "r"(b.limbs[3]));
return res;
}
__global__ void genmpoints_on_GPU_fast (
unsigned hashsteps,
halfbigint_t c,
bigint_t point_preload,
uint32_t diff,
unsigned N,
const uint32_t* const __restrict__ indexbank,
bigint_t* const __restrict__ mpointsout
){
int ID = blockIdx.x * blockDim.x + threadIdx.x;
if(ID < N){
bigint_t points[2];
for (int isdiff = 0; isdiff <= 1 ; isdiff++)
{
points[isdiff] = point_preload;
points[isdiff].limbs[0] = indexbank[ID] + isdiff*diff;
for (int i = 0; i < hashsteps; i++)
{
bigint_t tmp;
halfbigint_t lox = {{ points[isdiff].limbs[0], points[isdiff].limbs[1], points[isdiff].limbs[2], points[isdiff].limbs[3] }};
halfbigint_t hix = {{ points[isdiff].limbs[4], points[isdiff].limbs[5], points[isdiff].limbs[6], points[isdiff].limbs[7] }};
//bigint_t loxtest = {{points[isdiff].limbs[0], points[isdiff].limbs[1], points[isdiff].limbs[2], points[isdiff].limbs[3], 0,0,0,0 }};
//bigint_t ctest = {{ c.limbs[0], c.limbs[1], c.limbs[2], c.limbs[3], 0,0,0,0 }};
// tmp=lo(x)*c;
tmp = umul_256_128in(lox, c);
//bigint_t tmp2 = umul256(loxtest, ctest);
//bool eq[8];
//for (int j = 0; j < 8; j++)
//{
// eq[j] = tmp.limbs[j] == tmp2.limbs[j];
//}
// x=tmp+hi(x)
points[isdiff] = add256(tmp, hix);
// overall: tmp=lo(x)*c; x=tmp+hi(x)
}
}
bigint_t mpoint;
for (int i = 0; i < 8; i++)
{
mpoint.limbs[i] = points[0].limbs[i] ^ points[1].limbs[i];
}
mpointsout[ID] = mpoint;
}
}
struct bigint_t_less : public thrust::binary_function<bigint_t,bigint_t,bool>
{
const unsigned len;
bigint_t_less(const unsigned lenin):len(lenin) {}
/*! Function call operator. The return value is <tt>lhs < rhs</tt>.
*/
__host__ __device__ bool operator()(const bigint_t &lhs, const bigint_t &rhs) const {
//return lhs < rhs;
for (int i = len-1; i >= 0; i--)
{
if(lhs.limbs[i] < rhs.limbs[i])
return true;
if(lhs.limbs[i] > rhs.limbs[i])
return false;
}
//all equal, so not strictly less than
return false;
}
}; // end less
struct bigint_t_less_idx : public thrust::binary_function<uint32_t,uint32_t,bool>
{
const unsigned len;
const bigint_t* const theBigint;
bigint_t_less_idx(const unsigned lenin, const bigint_t* const theBigintin):len(lenin), theBigint(theBigintin) {}
/*! Function call operator. The return value is <tt>lhs < rhs</tt>.
*/
__host__ __device__ bool operator()(const uint32_t &lhs, const uint32_t &rhs) const {
//return lhs < rhs;
for (int i = len-1; i >= 0; i--)
{
if(theBigint[lhs].limbs[i] < theBigint[rhs].limbs[i])
return true;
if(theBigint[lhs].limbs[i] > theBigint[rhs].limbs[i])
return false;
}
//all equal, so not strictly less than
return false;
}
}; // end less
int testcuda();
__global__ void gather_unzip(
const uint32_t N,
const uint32_t depth,
const uint32_t* const __restrict__ map,
const bigint_t* const __restrict__ mpointsin,
uint32_t* const __restrict__ unzipout
){
int ID = blockIdx.x * blockDim.x + threadIdx.x;
if(ID < N){
unzipout[ID] = mpointsin[map[ID]].limbs[depth];
}
}
//Diff implicit!
struct indicies
{
uint32_t parts[8];
};
struct point_idx{
bigint_t point;
indicies idx;
};
//set N = N-1 for each pass!!, npop should double
__global__ void xor_points_unconditional(
const uint32_t N,
const uint32_t depth,
const uint32_t nPopulated,
const bigint_t* const __restrict__ xorin,
bigint_t* const __restrict__ xoredout,
const indicies* const __restrict__ idxin,
indicies* const __restrict__ idxout
){
int ID = blockIdx.x * blockDim.x + threadIdx.x;
if(ID < N-1){
for (int i = 0; i < depth; i++)
{
xoredout[ID].limbs[i] = xorin[ID].limbs[i] ^ xorin[ID+1].limbs[i];
}
int j = 0;
for (int i = 0; i < nPopulated; i++)
{
idxout[ID].parts[j++] = idxin[ID].parts[i];
idxout[ID].parts[j++] = idxin[ID+1].parts[i];
}
}
}
__global__ void shove_flat_idx_into_struct(
const uint32_t N,
const uint32_t* const __restrict__ idxin,
indicies* const __restrict__ idxout
){
int ID = blockIdx.x * blockDim.x + threadIdx.x;
if(ID < N){
idxout[ID].parts[0] = idxin[ID];
}
};
namespace bitecoin{
std::pair<std::vector<bigint_t>, std::vector<uint32_t>> gensort_GPU (
const unsigned hashsteps,
const halfbigint_t c,
const bigint_t point_preload,
const uint32_t diff,
const std::vector<uint32_t> &indexbank
){
cudaError e;
unsigned N = indexbank.size();
uint32_t* idxbankGPU, *idxbankGPUout;
if(e = cudaMalloc(&idxbankGPU, N * sizeof(uint32_t))) fprintf(stderr, "Cuda error %d on line %d\n", e, __LINE__);
if(e = cudaMalloc(&idxbankGPUout, N * sizeof(uint32_t))) fprintf(stderr, "Cuda error %d on line %d\n", e, __LINE__);
if(e = cudaMemcpy(idxbankGPU, indexbank.data(), N * sizeof(uint32_t), cudaMemcpyHostToDevice)) fprintf(stderr, "Cuda error %d on line %d\n", e, __LINE__);
// indicies* idxa, *idxb;
// if(e = cudaMalloc(&idxa, N * sizeof(indicies))) fprintf(stderr, "Cuda error %d on line %d\n", e, __LINE__);
// if(e = cudaMalloc(&idxb, N * sizeof(indicies))) fprintf(stderr, "Cuda error %d on line %d\n", e, __LINE__);
// auto idxaptr = thrust::device_pointer_cast(idxa);
// auto idxbptr = thrust::device_pointer_cast(idxb);
bigint_t* mpointsGPUa, *mpointsGPUb;
if(e = cudaMalloc(&mpointsGPUa, N * sizeof(bigint_t))) fprintf(stderr, "Cuda error %d on line %d\n", e, __LINE__);
if(e = cudaMalloc(&mpointsGPUb, N * sizeof(bigint_t))) fprintf(stderr, "Cuda error %d on line %d\n", e, __LINE__);
auto mpointsGPUtptra = thrust::device_pointer_cast(mpointsGPUa);
auto mpointsGPUtptrb = thrust::device_pointer_cast(mpointsGPUb);
//gen
unsigned nblocks = std::ceil((double)N/128);
genmpoints_on_GPU_fast <<<nblocks, 128>>> (hashsteps, c, point_preload, diff, N, idxbankGPU, mpointsGPUa);
if(e = cudaGetLastError()) printf("Cuda error %d on line %d\n", e, __LINE__);
//shove_flat_idx_into_struct <<<nblocks, 128>>> (N, idxbankGPU, idxa);
//sort
uint32_t* map;
if(e = cudaMalloc(&map, N * sizeof(uint32_t))) fprintf(stderr, "Cuda error %d on line %d\n", e, __LINE__);
auto maptptr = thrust::device_pointer_cast(map);
thrust::sequence(maptptr, maptptr+N);
uint32_t* currlimb;
if(e = cudaMalloc(&currlimb, N * sizeof(uint32_t))) fprintf(stderr, "Cuda error %d on line %d\n", e, __LINE__);
auto currlimbptr = thrust::device_pointer_cast(currlimb);
for (int i = 0; i < 7; i++)
{
gather_unzip <<<nblocks, 128>>> (N,i,map,mpointsGPUa,currlimb);
if(e = cudaGetLastError()) printf("Cuda error %d on line %d\n", e, __LINE__);
thrust::stable_sort_by_key(currlimbptr, currlimbptr+N, maptptr);
}
//gather sort results
thrust::gather(maptptr, maptptr+N, mpointsGPUtptra, mpointsGPUtptrb);
auto idxbankGPUptr = thrust::device_pointer_cast(idxbankGPU);
auto idxbankGPUoutptr = thrust::device_pointer_cast(idxbankGPUout);
thrust::gather(maptptr, maptptr+N, idxbankGPUptr, idxbankGPUoutptr);
//xor_points_unconditional(N, 8, 1, mptsGPUoutvecRaw, m2pointsGPU, idxa, idxb);
//std::swap(idxa,idxb);
//std::swap(m2pointsGPU, mpointsGPU);
//DEBUG
//std::vector<uint32_t> testmap(N);
//if(e = cudaMemcpy(testmap.data(), map, N * sizeof(uint32_t), cudaMemcpyDeviceToHost)) fprintf(stderr, "Cuda error %d on line %d\n", e, __LINE__);
std::vector<bigint_t> mpointsHost(N);
if(e = cudaMemcpy(mpointsHost.data(), mpointsGPUb, N * sizeof(bigint_t), cudaMemcpyDeviceToHost)) fprintf(stderr, "Cuda error %d on line %d\n", e, __LINE__);
std::vector<uint32_t> idxHost(N);
if(e = cudaMemcpy(idxHost.data(), idxbankGPUout, N * sizeof(uint32_t), cudaMemcpyDeviceToHost)) fprintf(stderr, "Cuda error %d on line %d\n", e, __LINE__);
cudaFree(idxbankGPU);
cudaFree(idxbankGPUout);
//cudaFree(idxa);
//cudaFree(idxb);
cudaFree(mpointsGPUa);
cudaFree(mpointsGPUb);
cudaFree(map);
cudaFree(currlimb);
return std::make_pair(mpointsHost, idxHost);
}
std::vector<wide_as_pair> genmpoints_on_GPU (
unsigned hashsteps,
const uint32_t* const c,
const uint32_t* const point_preload,
const std::vector<uint32_t> &indexbank,
unsigned diff
){
thrust::device_vector<uint32_t> indexbank_GPU = indexbank;
thrust::device_vector<wide_as_pair_GPU> output_GPU(indexbank.size());
thrust::transform(indexbank_GPU.begin(), indexbank_GPU.end(), output_GPU.begin(), genpoint(point_preload, hashsteps, c, diff));
thrust::host_vector<wide_as_pair_GPU> op_hv = output_GPU;
std::vector<wide_as_pair> output(indexbank.size());
for (int i = 0; i < indexbank.size(); i++)
{
output[i].first.first = op_hv[i].first.first;
output[i].first.second = op_hv[i].first.second;
output[i].second.first = op_hv[i].second.first;
output[i].second.second = op_hv[i].second.second;
}
return output;
}
#if 0
wide_as_pair gensortscan_on_GPU (
unsigned hashsteps,
const uint32_t* const c,
const uint32_t* const point_preload,
const std::vector<uint32_t> &indexbank
){
thrust::device_vector<uint32_t> indexbank_GPU = indexbank;
thrust::device_vector<wide_as_pair_GPU> pointbank_gpu(indexbank.size());
thrust::transform(indexbank_GPU.begin(), indexbank_GPU.end(), output_GPU.begin(), genpoint(point_preload, hashsteps, c));
wide_as_pair output;
output.first.first = op_hv.first.first;
output.first.second = op_hv.first.second;
output.second.first = op_hv.second.first;
output.second.second = op_hv.second.second;
return output;
}
#endif
} //namespace bitecoin
/////////////////////////////////////////////////////////////////////////////////////
const int N = 7;
const int blocksize = 7;
__global__
void hello(char *a, int *b)
{
a[threadIdx.x] += b[threadIdx.x];
}
int testcuda()
{
char a[N] = "Hello ";
int b[N] = {15, 10, 6, 0, -11, 1, 0};
char *ad;
int *bd;
const int csize = N*sizeof(char);
const int isize = N*sizeof(int);
printf("%s", a);
cudaMalloc( (void**)&ad, csize );
cudaMalloc( (void**)&bd, isize );
cudaMemcpy( ad, a, csize, cudaMemcpyHostToDevice );
cudaMemcpy( bd, b, isize, cudaMemcpyHostToDevice );
dim3 dimBlock( blocksize, 1 );
dim3 dimGrid( 1, 1 );
hello<<<dimGrid, dimBlock>>>(ad, bd);
cudaMemcpy( a, ad, csize, cudaMemcpyDeviceToHost );
cudaFree( ad );
printf("%s\n", a);
return 0;
}
template <unsigned N>
__global__ void Clockwork(uint32_t* staticbank,
uint32_t* regbank,
uint32_t* sharedbank2,
uint32_t* sharedbank1,
//uint32_t N,
int* bestiBuff,
int* bestiBuffHead)
{
unsigned threadID = blockIdx.x * blockDim.x + threadIdx.x;
uint32_t xstatic = staticbank[threadID];
__shared__ uint32_t sb1[N];
__shared__ uint32_t sb2[N];
for (int i = threadIdx.x; i < N; i += blockDim.x)
{
sb1[i] = sharedbank1[i];
sb2[i] = sharedbank2[i];
}
for (int rbidx = 0; rbidx < N; rbidx += 8)
{
uint32_t rb[8];
for (int i = 0; i < 8; i++)
{
rb[i] = regbank[rbidx + i];
}
for (int i = 0; i < N; i++)
{
uint32_t acc1 = xstatic ^ sb2[i];
for (int j = 0; j < N; j++)
{
uint32_t acc2 = acc1 ^ sb1[i];
for (int k = 0; k < 8; k++)
{
//Only bother cheking for perfect xor, same as equal
if (acc2 == rb[i])
{
int storeloc = atomicAdd(bestiBuffHead, 4);
bestiBuff[storeloc] = j;
bestiBuff[storeloc+1] = i;
bestiBuff[storeloc+2] = 8*rbidx + k;
bestiBuff[storeloc+3] = threadID;
}
}
}
}
}
}
void Clockwork_wrapper(uint32_t* staticbank,
uint32_t* regbank,
uint32_t* sharedbank2,
uint32_t* sharedbank1,
//uint32_t N,
int* bestiBuff,
int* bestiBuffHead,
int blocks,
int threadsPerBlock)
{
Clockwork <128> <<<blocks, threadsPerBlock>>> (staticbank, regbank, sharedbank2, sharedbank1, bestiBuff, bestiBuffHead);
} |
fb5725f10800bc69ee9852696ff299bcf8a31088.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define BlockSize 32
#define kInfinity (1.0E37)
#define kMinExitingNormalCosine (1E-3)
typedef float G4double;
typedef float G4float;
typedef int G4int;
typedef int G4bool;
typedef long G4long;
//G4double kInfinity = 1.0E37;
// int BlockSize = 32;
int Multiplier = 4;
G4double twopi = 2.0*3.14159265358979323846264338327;
// G4double kMinExitingNormalCosine = 1E-3;
typedef enum {kOutside,kSurface,kInside} EInside;
typedef enum {kNormal,kReplica,kParameterised} EVolume;
typedef enum {kXAxis,kYAxis,kZAxis,kRho,kRadial3D,kPhi,kUndefined} EAxis;
typedef enum { kBox = 0 , kOrb, kTubs, kCons, kPolyCone, Solidcount } ESolid;
typedef struct
{
G4double x,y,z;
G4double w;
}
G4ThreeVector;
__device__
G4ThreeVector G4ThreeVector_create( G4double x, G4double y, G4double z )
{
G4ThreeVector v =
{x,y,z,0};
return v;
}
__device__
G4ThreeVector G4ThreeVector_saxpy( G4double a, G4ThreeVector x, G4ThreeVector y )
{
return G4ThreeVector_create(
a*x.x + y.x,
a*x.y + y.y,
a*x.z + y.z );
}
__device__
G4ThreeVector G4ThreeVector_sum( G4ThreeVector a, G4ThreeVector b )
{
return G4ThreeVector_create( a.x+b.x, a.y+b.y, a.z+b.z );
}
__device__
G4ThreeVector G4ThreeVector_subtract( G4ThreeVector a, G4ThreeVector b )
{
return G4ThreeVector_create( a.x-b.x, a.y-b.y, a.z-b.z );
}
__device__
G4ThreeVector G4ThreeVector_sum_assign( G4ThreeVector *This, G4ThreeVector b )
{
(*This).x += b.x;
(*This).y += b.y;
(*This).z += b.z;
return *This;
}
__device__
G4ThreeVector G4ThreeVector_subtract_assign( G4ThreeVector *This, G4ThreeVector b )
{
(*This).x -= b.x;
(*This).y -= b.y;
(*This).z -= b.z;
return *This;
}
__device__
G4ThreeVector G4ThreeVector_mult_assign( G4ThreeVector *This, G4double m )
{
(*This).x *= m;
(*This).y *= m;
(*This).z *= m;
return *This;
}
__device__
G4ThreeVector G4ThreeVector_negation( G4ThreeVector a )
{
return G4ThreeVector_create( -a.x, -a.y, -a.z );
}
__device__
G4double G4ThreeVector_mag2( G4ThreeVector v )
{
return v.x*v.x + v.y*v.y + v.z*v.z;
}
__device__
G4double G4ThreeVector_mag( G4ThreeVector v )
{
return sqrt(G4ThreeVector_mag2(v));
}
__device__
G4double G4ThreeVector_dot( G4ThreeVector a, G4ThreeVector b )
{
return a.x*b.x + a.y*b.y + a.z*b.z;
}
__device__
G4ThreeVector G4ThreeVector_cross( G4ThreeVector a, G4ThreeVector p )
{
return G4ThreeVector_create(
a.y*p.z-p.y*a.z,
a.z*p.x-p.z*a.x,
a.x*p.y-p.x*a.y );
}
__device__
G4ThreeVector G4ThreeVector_mult( G4ThreeVector a, G4double m )
{
return G4ThreeVector_create( a.x*m, a.y*m, a.z*m );
}
__device__
G4ThreeVector G4ThreeVector_unit( G4ThreeVector v )
{
G4double l = G4ThreeVector_mag(v);
if ( l > 0 )
return G4ThreeVector_mult( v, 1.0/l );
return v;
}
__device__
G4bool G4ThreeVector_equal( G4ThreeVector a, G4ThreeVector b )
{
return a.x == b.x && a.y == b.y && a.z == b.z;
}
__device__
G4double G4ThreeVector_diff2( G4ThreeVector a, G4ThreeVector b )
{
return G4ThreeVector_mag2( G4ThreeVector_subtract(a,b) );
}
__device__
G4double G4ThreeVector_coord( G4ThreeVector v, EAxis axis )
{
switch( axis )
{
case kXAxis: return v.x;
case kYAxis: return v.y;
case kZAxis: return v.z;
default:
(void)0;
return 0;
}
}
__device__
void G4ThreeVector_set_coord( G4ThreeVector *v, EAxis axis, G4double val )
{
switch( axis )
{
case kXAxis: v->x = val; break;
case kYAxis: v->y = val; break;
case kZAxis: v->z = val; break;
default:
(void)0;
break;
}
}
typedef struct
{
G4ThreeVector pos, dir;
}
StubParticle;
typedef struct
{
G4ThreeVector pos, dir;
G4double t;
}
ParticleWithLifetime;
typedef StubParticle Particle;
__device__ void Prefix_Sum ( int * input, int * output, int length)
{
int tid = (blockIdx.x * blockDim.x + threadIdx.x);
int offset = 1;
if ( tid< length)
output[tid] = input[ tid ];
for(int d = length>>1; d > 0; d >>=1)
{
__syncthreads();
if(tid<d)
{
int ai = offset*(2*tid + 1) - 1;
int bi = offset*(2*tid + 2) - 1;
output[bi] += output[ai];
}
offset *= 2;
}
if(tid == 0)
{
output[length - 1] = 0;
}
for(int d = 1; d < length ; d *= 2)
{
offset >>=1;
__syncthreads();
if(tid < d)
{
int ai = offset*(2*tid + 1) - 1;
int bi = offset*(2*tid + 2) - 1;
float t = output[ai];
output[ai] = output[bi];
output[bi] += t;
}
}
__syncthreads();
}
__device__
G4bool NoStepReduction( G4bool * noStepArray, int length )
{
int tid = (blockIdx.x * blockDim.x + threadIdx.x);
int offset = 1;
for(int d = length>>1; d > 0; d >>=1)
{
__syncthreads();
if(tid<d)
{
int ai = offset*(2*tid + 1) - 1;
int bi = offset*(2*tid + 2) - 1;
noStepArray[bi] = (noStepArray[ai] || noStepArray[bi]);
}
offset *= 2;
}
G4bool result = noStepArray[ length - 1 ];
__syncthreads();
return result;
}
typedef struct
{
G4double
rxx, rxy, rxz,
ryx, ryy, ryz,
rzx, rzy, rzz;
G4double align;
}
G4RotationMatrix;
__device__
G4RotationMatrix G4RotationMatrix_create_elements
(G4double mxx, G4double mxy, G4double mxz,
G4double myx, G4double myy, G4double myz,
G4double mzx, G4double mzy, G4double mzz)
{
G4RotationMatrix r =
{ mxx,mxy,mxz, myx,myy,myz, mzx,mzy,mzz
, 0
};
return r;
}
__device__
G4ThreeVector G4RotationMatrix_apply (const G4RotationMatrix *This, G4ThreeVector p)
{
return G4ThreeVector_create(
This->rxx*p.x + This->rxy*p.y + This->rxz*p.z,
This->ryx*p.x + This->ryy*p.y + This->ryz*p.z,
This->rzx*p.x + This->rzy*p.y + This->rzz*p.z);
}
__device__
G4RotationMatrix G4RotationMatrix_mult (const G4RotationMatrix *This, const G4RotationMatrix *other)
{
return G4RotationMatrix_create_elements(
This->rxx*(*other).rxx + This->rxy*(*other).ryx + This->rxz*(*other).rzx,
This->rxx*(*other).rxy + This->rxy*(*other).ryy + This->rxz*(*other).rzy,
This->rxx*(*other).rxz + This->rxy*(*other).ryz + This->rxz*(*other).rzz,
This->ryx*(*other).rxx + This->ryy*(*other).ryx + This->ryz*(*other).rzx,
This->ryx*(*other).rxy + This->ryy*(*other).ryy + This->ryz*(*other).rzy,
This->ryx*(*other).rxz + This->ryy*(*other).ryz + This->ryz*(*other).rzz,
This->rzx*(*other).rxx + This->rzy*(*other).ryx + This->rzz*(*other).rzx,
This->rzx*(*other).rxy + This->rzy*(*other).ryy + This->rzz*(*other).rzy,
This->rzx*(*other).rxz + This->rzy*(*other).ryz + This->rzz*(*other).rzz );
}
__device__
G4RotationMatrix G4RotationMatrix_transform(G4RotationMatrix *This, const G4RotationMatrix *other)
{
*This = G4RotationMatrix_mult(other,This);
return *This;
}
__device__
G4RotationMatrix G4RotationMatrix_inverse(const G4RotationMatrix *This)
{
return G4RotationMatrix_create_elements(
This->rxx, This->ryx, This->rzx,
This->rxy, This->ryy, This->rzy,
This->rxz, This->ryz, This->rzz );
}
__device__
G4RotationMatrix G4RotationMatrix_invert(G4RotationMatrix *This)
{
return *This = G4RotationMatrix_inverse(This);
}
typedef struct
{
G4double rxx,rxy,rxz;
G4double ryx,ryy,ryz;
G4double rzx,rzy,rzz;
G4double tx,ty,tz;
}
G4AffineTransform;
__device__
void G4AffineTransform_ctor_id( G4AffineTransform *This )
{
This->rxx = 1;
This->ryy = 1;
This->rzz = 1;
This->rxy = 0;
This->rxz = 0;
This->ryx = 0;
This->ryz = 0;
This->rzx = 0;
This->rzy = 0;
This->tx = 0;
This->ty = 0;
This->tz = 0;
}
__device__
void G4AffineTransform_ctor_vector( G4AffineTransform *This, G4ThreeVector tlate)
{
G4AffineTransform_ctor_id( This );
This->tx = tlate.x;
This->ty = tlate.y;
This->tz = tlate.z;
}
__device__
void G4AffineTransform_ctor_matrix( G4AffineTransform *This, G4RotationMatrix rot)
{
G4AffineTransform_ctor_id( This );
This->rxx = rot.rxx;
This->ryy = rot.ryy;
This->rzz = rot.rzz;
This->rxy = rot.rxy;
This->rxz = rot.rxz;
This->ryx = rot.ryx;
This->ryz = rot.ryz;
This->rzx = rot.rzx;
This->rzy = rot.rzy;
}
__device__
void G4AffineTransform_ctor_full(
G4AffineTransform *This, G4RotationMatrix rot, G4ThreeVector tlate )
{
This->rxx = rot.rxx;
This->ryy = rot.ryy;
This->rzz = rot.rzz;
This->rxy = rot.rxy;
This->rxz = rot.rxz;
This->ryx = rot.ryx;
This->ryz = rot.ryz;
This->rzx = rot.rzx;
This->rzy = rot.rzy;
This->tx = tlate.x;
This->ty = tlate.y;
This->tz = tlate.z;
}
__device__
void G4AffineTransform_ctor_ptr(
G4AffineTransform *This, const G4RotationMatrix *rot, G4ThreeVector tlate )
{
if (rot) G4AffineTransform_ctor_full( This, *rot, tlate );
else G4AffineTransform_ctor_vector( This, tlate );
}
__device__
void G4AffineTransform_ctor_elements(
G4AffineTransform *This,
const G4double prxx,const G4double prxy,const G4double prxz,
const G4double pryx,const G4double pryy,const G4double pryz,
const G4double przx,const G4double przy,const G4double przz,
const G4double ptx,const G4double pty,const G4double ptz)
{
This->rxx = prxx;
This->ryy = pryy;
This->rzz = przz;
This->rxy = prxy;
This->rxz = prxz;
This->ryx = pryx;
This->ryz = pryz;
This->rzx = przx;
This->rzy = przy;
This->tx = ptx;
This->ty = pty;
This->tz = ptz;
}
__device__
G4AffineTransform G4AffineTransform_create_id(void)
{
G4AffineTransform t;
G4AffineTransform_ctor_id(&t);
return t;
}
__device__
G4AffineTransform G4AffineTransform_create_vector(G4ThreeVector tlate)
{
G4AffineTransform t;
G4AffineTransform_ctor_vector(&t,tlate);
return t;
}
__device__
G4AffineTransform G4AffineTransform_create_matrix( G4RotationMatrix rot )
{
G4AffineTransform t;
G4AffineTransform_ctor_matrix(&t,rot);
return t;
}
__device__
G4AffineTransform G4AffineTransform_create_full(
G4RotationMatrix rot, G4ThreeVector tlate )
{
G4AffineTransform t;
G4AffineTransform_ctor_full(&t,rot,tlate);
return t;
}
__device__
G4AffineTransform G4AffineTransform_create_ptr(
const G4RotationMatrix *rot, G4ThreeVector tlate )
{
G4AffineTransform t;
G4AffineTransform_ctor_ptr(&t,rot,tlate);
return t;
}
__device__
G4AffineTransform G4AffineTransform_create_elements(
const G4double prxx,const G4double prxy,const G4double prxz,
const G4double pryx,const G4double pryy,const G4double pryz,
const G4double przx,const G4double przy,const G4double przz,
const G4double ptx,const G4double pty,const G4double ptz)
{
G4AffineTransform t;
G4AffineTransform_ctor_elements(&t,
prxx,prxy,prxz,
pryx,pryy,pryz,
przx,przy,przz,
ptx,pty,ptz);
return t;
}
__device__ G4AffineTransform
G4AffineTransform_InverseProduct(
G4AffineTransform *This,
const G4AffineTransform* ptrtf1,
const G4AffineTransform* ptrtf2)
{
G4double itf2tx = - (*ptrtf2).tx*(*ptrtf2).rxx - (*ptrtf2).ty*(*ptrtf2).rxy - (*ptrtf2).tz*(*ptrtf2).rxz;
G4double itf2ty = - (*ptrtf2).tx*(*ptrtf2).ryx - (*ptrtf2).ty*(*ptrtf2).ryy - (*ptrtf2).tz*(*ptrtf2).ryz;
G4double itf2tz = - (*ptrtf2).tx*(*ptrtf2).rzx - (*ptrtf2).ty*(*ptrtf2).rzy - (*ptrtf2).tz*(*ptrtf2).rzz;
This->rxx=(*ptrtf1).rxx*(*ptrtf2).rxx+(*ptrtf1).rxy*(*ptrtf2).rxy+(*ptrtf1).rxz*(*ptrtf2).rxz;
This->rxy=(*ptrtf1).rxx*(*ptrtf2).ryx+(*ptrtf1).rxy*(*ptrtf2).ryy+(*ptrtf1).rxz*(*ptrtf2).ryz;
This->rxz=(*ptrtf1).rxx*(*ptrtf2).rzx+(*ptrtf1).rxy*(*ptrtf2).rzy+(*ptrtf1).rxz*(*ptrtf2).rzz;
This->ryx=(*ptrtf1).ryx*(*ptrtf2).rxx+(*ptrtf1).ryy*(*ptrtf2).rxy+(*ptrtf1).ryz*(*ptrtf2).rxz;
This->ryy=(*ptrtf1).ryx*(*ptrtf2).ryx+(*ptrtf1).ryy*(*ptrtf2).ryy+(*ptrtf1).ryz*(*ptrtf2).ryz;
This->ryz=(*ptrtf1).ryx*(*ptrtf2).rzx+(*ptrtf1).ryy*(*ptrtf2).rzy+(*ptrtf1).ryz*(*ptrtf2).rzz;
This->rzx=(*ptrtf1).rzx*(*ptrtf2).rxx+(*ptrtf1).rzy*(*ptrtf2).rxy+(*ptrtf1).rzz*(*ptrtf2).rxz;
This->rzy=(*ptrtf1).rzx*(*ptrtf2).ryx+(*ptrtf1).rzy*(*ptrtf2).ryy+(*ptrtf1).rzz*(*ptrtf2).ryz;
This->rzz=(*ptrtf1).rzx*(*ptrtf2).rzx+(*ptrtf1).rzy*(*ptrtf2).rzy+(*ptrtf1).rzz*(*ptrtf2).rzz;
This->tx=(*ptrtf1).tx*(*ptrtf2).rxx+(*ptrtf1).ty*(*ptrtf2).rxy+(*ptrtf1).tz*(*ptrtf2).rxz+itf2tx;
This->ty=(*ptrtf1).tx*(*ptrtf2).ryx+(*ptrtf1).ty*(*ptrtf2).ryy+(*ptrtf1).tz*(*ptrtf2).ryz+itf2ty;
This->tz=(*ptrtf1).tx*(*ptrtf2).rzx+(*ptrtf1).ty*(*ptrtf2).rzy+(*ptrtf1).tz*(*ptrtf2).rzz+itf2tz;
return *This;
}
__device__
G4ThreeVector G4AffineTransform_TransformPoint(const G4AffineTransform *This, G4ThreeVector vec)
{
return G4ThreeVector_create(
vec.x*This->rxx + vec.y*This->ryx + vec.z*This->rzx + This->tx,
vec.x*This->rxy + vec.y*This->ryy + vec.z*This->rzy + This->ty,
vec.x*This->rxz + vec.y*This->ryz + vec.z*This->rzz + This->tz );
}
__device__
G4ThreeVector G4AffineTransform_TransformAxis(const G4AffineTransform *This, G4ThreeVector axis)
{
return G4ThreeVector_create(
axis.x*This->rxx + axis.y*This->ryx + axis.z*This->rzx,
axis.x*This->rxy + axis.y*This->ryy + axis.z*This->rzy,
axis.x*This->rxz + axis.y*This->ryz + axis.z*This->rzz );
}
__device__
G4AffineTransform G4AffineTransform_Inverse(const G4AffineTransform *This)
{
return G4AffineTransform_create_elements(
This->rxx, This->ryx, This->rzx,
This->rxy, This->ryy, This->rzy,
This->rxz, This->ryz, This->rzz,
-This->tx*This->rxx - This->ty*This->rxy - This->tz*This->rxz,
-This->tx*This->ryx - This->ty*This->ryy - This->tz*This->ryz,
-This->tx*This->rzx - This->ty*This->rzy - This->tz*This->rzz );
}
__device__
G4AffineTransform G4AffineTransform_Invert(G4AffineTransform *This)
{
G4double v1 = -This->tx*This->rxx - This->ty*This->rxy - This->tz*This->rxz;
G4double v2 = -This->tx*This->ryx - This->ty*This->ryy - This->tz*This->ryz;
G4double v3 = -This->tx*This->rzx - This->ty*This->rzy - This->tz*This->rzz;
This->tx=v1; This->ty=v2; This->tz=v3;
G4double tmp1=This->ryx; This->ryx=This->rxy; This->rxy=tmp1;
G4double tmp2=This->rzx; This->rzx=This->rxz; This->rxz=tmp2;
G4double tmp3=This->rzy; This->rzy=This->ryz; This->ryz=tmp3;
return *This;
}
__device__
G4ThreeVector G4AffineTransform_NetTranslation(const G4AffineTransform *This)
{
return G4ThreeVector_create(This->tx,This->ty,This->tz);
}
__device__
G4bool G4AffineTransform_IsRotated(const G4AffineTransform *This)
{
return (This->rxx==1.0 && This->ryy==1.0 && This->rzz==1.0) ? false : true;
}
typedef struct
{
G4double property;
}
StubMaterial;
struct G4SmartVoxelProxy;
typedef struct
{
G4double fmaxExtent;
G4double fminExtent;
struct G4SmartVoxelProxy* * fslices;
G4int fNumSlices;
G4int fminEquivalent;
G4int fmaxEquivalent;
EAxis faxis;
EAxis fparamAxis;
}
G4SmartVoxelHeader;
typedef struct
{
G4int *fcontents;
G4int fminEquivalent;
G4int fmaxEquivalent;
G4int fNumContents;
}
G4SmartVoxelNode;
typedef struct G4SmartVoxelProxy
{
G4SmartVoxelHeader* fHeader;
G4SmartVoxelNode* fNode;
}
G4SmartVoxelProxy;
__device__
void G4VoxelNode_ctor( G4SmartVoxelNode *This, G4int no )
{
This->fmaxEquivalent = no;
This->fminEquivalent = no;
This->fcontents = 0;
This->fNumContents = 0;
}
__device__ G4int
G4VoxelNode_GetNoContained( const G4SmartVoxelNode *This)
{
return This->fNumContents;
}
__device__ G4int
G4VoxelNode_GetVolume(
const G4SmartVoxelNode *This, G4int contentNo)
{
(void)0;
return This->fcontents[contentNo];
}
__device__ G4int
G4VoxelNode_GetMaxEquivalentSliceNo(
const G4SmartVoxelNode *This )
{
return This->fmaxEquivalent;
}
__device__ G4int
G4VoxelNode_GetMinEquivalentSliceNo(
const G4SmartVoxelNode *This )
{
return This->fminEquivalent;
}
__device__ G4int
G4VoxelHeader_GetMaxEquivalentSliceNo(
const G4SmartVoxelHeader *This )
{
return This->fmaxEquivalent;
}
__device__ G4int
G4VoxelHeader_GetMinEquivalentSliceNo(
const G4SmartVoxelHeader *This )
{
return This->fminEquivalent;
}
__device__ EAxis
G4VoxelHeader_GetAxis( const G4SmartVoxelHeader *This )
{
return This->faxis;
}
__device__ G4int
G4VoxelHeader_GetNoSlices( const G4SmartVoxelHeader *This )
{
return This->fNumSlices;
}
__device__ G4double
G4VoxelHeader_GetMinExtent( const G4SmartVoxelHeader *This )
{
return This->fminExtent;
}
__device__ G4double
G4VoxelHeader_GetMaxExtent( const G4SmartVoxelHeader *This )
{
return This->fmaxExtent;
}
__device__ G4SmartVoxelProxy*
G4VoxelHeader_GetSlice( const G4SmartVoxelHeader *This, G4int n )
{
(void)0;
return This->fslices[n];
}
__device__ G4bool
G4VoxelProxy_IsNode( const G4SmartVoxelProxy *This )
{
return This->fNode != 0;
}
__device__ G4bool
G4VoxelProxy_IsHeader( const G4SmartVoxelProxy *This )
{
return This->fHeader != 0;
}
__device__ G4SmartVoxelNode*
G4VoxelProxy_GetNode( const G4SmartVoxelProxy *This )
{
return This->fNode;
}
__device__ G4SmartVoxelHeader*
G4VoxelProxy_GetHeader( const G4SmartVoxelProxy *This )
{
return This->fHeader;
}
struct G4VPhysicalVolume;
struct G4VSolid;
typedef struct
{
G4int fNoDaughters;
struct G4VPhysicalVolume * *fDaughters;
int check;
StubMaterial* fMaterial;
struct G4VSolid* fSolid;
G4SmartVoxelHeader *fVoxel;
int align;
}
G4LogicalVolume;
typedef struct G4VSolid
{
ESolid type;
}
G4VSolid;
__device__
EInside G4VSolid_Inside( const G4VSolid *This, G4ThreeVector p);
__device__
G4ThreeVector G4VSolid_SurfaceNormal( const G4VSolid *This, G4ThreeVector p);
__device__
G4double G4VSolid_DistanceToIn_full(
const G4VSolid *This,
G4ThreeVector p,
G4ThreeVector v);
__device__
G4double G4VSolid_DistanceToIn( const G4VSolid *This, G4ThreeVector p);
__device__
G4double G4VSolid_DistanceToOut_full(
const G4VSolid *This,
G4ThreeVector p,
G4ThreeVector v,
const G4bool calcNorm,
G4bool *validNorm,
G4ThreeVector *n);
__device__
G4double G4VSolid_DistanceToOut( const G4VSolid *This, G4ThreeVector p);
typedef struct
{
G4VSolid solid;
G4double fDx,fDy,fDz;
}
G4Box;
extern "C" {
__device__ EInside G4Box_Inside( const G4Box *This, G4ThreeVector p);
__device__ G4ThreeVector G4Box_SurfaceNormal( const G4Box *This, G4ThreeVector p);
__device__ G4double G4Box_DistanceToIn_full(
const G4Box *This,
G4ThreeVector p,
G4ThreeVector v);
__device__ G4double G4Box_DistanceToIn( const G4Box *This, G4ThreeVector p);
__device__ G4double G4Box_DistanceToOut_full(
const G4Box *This,
G4ThreeVector p,
G4ThreeVector v,
const G4bool calcNorm,
G4bool *validNorm,
G4ThreeVector *n);
__device__ G4double G4Box_DistanceToOut( const G4Box *This, G4ThreeVector p);
__device__
G4ThreeVector G4Box_ApproxSurfaceNormal( const G4Box *This, G4ThreeVector p )
{
G4double distx, disty, distz ;
G4ThreeVector norm ;
distx = fabs(fabs(p.x) - This->fDx) ;
disty = fabs(fabs(p.y) - This->fDy) ;
distz = fabs(fabs(p.z) - This->fDz) ;
if ( distx <= disty )
{
if ( distx <= distz )
{
if ( p.x < 0 ) norm = G4ThreeVector_create(-1.0,0,0) ;
else norm = G4ThreeVector_create( 1.0,0,0) ;
}
else
{
if ( p.z < 0 ) norm = G4ThreeVector_create(0,0,-1.0) ;
else norm = G4ThreeVector_create(0,0, 1.0) ;
}
}
else
{
if ( disty <= distz )
{
if ( p.y < 0 ) norm = G4ThreeVector_create(0,-1.0,0) ;
else norm = G4ThreeVector_create(0, 1.0,0) ;
}
else
{
if ( p.z < 0 ) norm = G4ThreeVector_create(0,0,-1.0) ;
else norm = G4ThreeVector_create(0,0, 1.0) ;
}
}
return norm;
}
__device__
G4ThreeVector G4Box_SurfaceNormal( const G4Box *This, G4ThreeVector p)
{
G4double distx, disty, distz ;
G4ThreeVector norm ;
const G4double kCarTolerance = 1E-3;
distx = fabs(fabs(p.x) - This->fDx) ;
disty = fabs(fabs(p.y) - This->fDy) ;
distz = fabs(fabs(p.z) - This->fDz) ;
const G4double delta = 0.5*kCarTolerance;
const G4ThreeVector nX = G4ThreeVector_create( 1.0, 0,0 );
const G4ThreeVector nmX = G4ThreeVector_create(-1.0, 0,0 );
const G4ThreeVector nY = G4ThreeVector_create( 0, 1.0,0 );
const G4ThreeVector nmY = G4ThreeVector_create( 0,-1.0,0 );
const G4ThreeVector nZ = G4ThreeVector_create( 0, 0, 1.0);
const G4ThreeVector nmZ = G4ThreeVector_create( 0, 0,- 1.0);
G4ThreeVector
normX = G4ThreeVector_create(0.,0.,0.),
normY = G4ThreeVector_create(0.,0.,0.),
normZ = G4ThreeVector_create(0.,0.,0.);
G4ThreeVector sumnorm = G4ThreeVector_create(0., 0., 0.);
G4int noSurfaces=0;
if (distx <= delta)
{
noSurfaces ++;
if ( p.x >= 0.){
normX= nX ;
}else{
normX= nmX;
}
sumnorm= normX;
}
if (disty <= delta)
{
noSurfaces ++;
if ( p.y >= 0.){
normY= nY;
}else{
normY = nmY;
}
G4ThreeVector_sum_assign( &sumnorm, normY );
}
if (distz <= delta)
{
noSurfaces ++;
if ( p.z >= 0.){
normZ= nZ;
}else{
normZ = nmZ;
}
G4ThreeVector_sum_assign( &sumnorm, normZ );
}
const G4double invSqrt2 = 1.0 / sqrt( 2.0);
const G4double invSqrt3 = 1.0 / sqrt( 3.0);
norm= G4ThreeVector_create( 0., 0., 0.);
if( noSurfaces > 0 )
{
if( noSurfaces == 1 ){
norm= sumnorm;
}else{
if( noSurfaces == 2 ) {
norm = G4ThreeVector_mult(sumnorm, invSqrt2);
} else {
norm = G4ThreeVector_mult(sumnorm, invSqrt3);
}
}
}else{
norm = G4Box_ApproxSurfaceNormal(This, p);
}
return norm;
}
__device__
G4double G4Box_DistanceToIn_full( const G4Box *This, G4ThreeVector p,G4ThreeVector v)
{
G4double safx, safy, safz ;
G4double smin=0.0, sminy, sminz ;
G4double smax=kInfinity, smaxy, smaxz ;
G4double stmp ;
G4double sOut=kInfinity, sOuty=kInfinity, sOutz=kInfinity ;
const G4double kCarTolerance = 1E-3;
safx = fabs(p.x) - This->fDx ;
safy = fabs(p.y) - This->fDy ;
safz = fabs(p.z) - This->fDz ;
if ( ((p.x*v.x >= 0.0) && safx > -kCarTolerance*0.5)
|| ((p.y*v.y >= 0.0) && safy > -kCarTolerance*0.5)
|| ((p.z*v.z >= 0.0) && safz > -kCarTolerance*0.5) )
{
return kInfinity ;
}
if ( v.x)
{
stmp = 1.0/fabs(v.x) ;
if (safx >= 0.0)
{
smin = safx*stmp ;
smax = (This->fDx+fabs(p.x))*stmp ;
}
else
{
if (v.x > 0) sOut = (This->fDx - p.x)*stmp ;
if (v.x < 0) sOut = (This->fDx + p.x)*stmp ;
}
}
if ( v.y)
{
stmp = 1.0/fabs(v.y) ;
if (safy >= 0.0)
{
sminy = safy*stmp ;
smaxy = (This->fDy+fabs(p.y))*stmp ;
if (sminy > smin) smin=sminy ;
if (smaxy < smax) smax=smaxy ;
if (smin >= smax-kCarTolerance*0.5)
{
return kInfinity ;
}
}
else
{
if (v.y > 0) sOuty = (This->fDy - p.y)*stmp ;
if (v.y < 0) sOuty = (This->fDy + p.y)*stmp ;
if( sOuty < sOut ) sOut = sOuty ;
}
}
if ( v.z )
{
stmp = 1.0/fabs(v.z) ;
if ( safz >= 0.0)
{
sminz = safz*stmp ;
smaxz = (This->fDz+fabs(p.z))*stmp ;
if (sminz > smin) smin = sminz ;
if (smaxz < smax) smax = smaxz ;
if (smin >= smax-kCarTolerance*0.5)
{
return kInfinity ;
}
}
else
{
if (v.z > 0) sOutz = (This->fDz - p.z)*stmp ;
if (v.z < 0) sOutz = (This->fDz + p.z)*stmp ;
if( sOutz < sOut ) sOut = sOutz ;
}
}
if ( sOut <= smin + 0.5*kCarTolerance)
{
return kInfinity ;
}
if (smin < 0.5*kCarTolerance) smin = 0.0 ;
return smin ;
}
__device__
G4double G4Box_DistanceToIn( const G4Box *This, G4ThreeVector p)
{
G4double safex, safey, safez, safe = 0.0 ;
safex = fabs(p.x) - This->fDx ;
safey = fabs(p.y) - This->fDy ;
safez = fabs(p.z) - This->fDz ;
if (safex > safe) safe = safex ;
if (safey > safe) safe = safey ;
if (safez > safe) safe = safez ;
return safe ;
}
__device__
G4double G4Box_DistanceToOut_full( const G4Box *This, G4ThreeVector p,G4ThreeVector v,
const G4bool calcNorm,
G4bool *validNorm,G4ThreeVector *n)
{
const G4double kCarTolerance = 1E-3;
enum {kBoxUndefined,kPX,kMX,kPY,kMY,kPZ,kMZ} side = kBoxUndefined ;
G4double pdist,stmp,snxt;
if (calcNorm) *validNorm = true ;
if (v.x > 0)
{
pdist = This->fDx - p.x ;
if (pdist > kCarTolerance*0.5)
{
snxt = pdist/v.x ;
side = kPX ;
}
else
{
if (calcNorm) *n = G4ThreeVector_create(1,0,0) ;
return snxt = 0 ;
}
}
else if (v.x < 0)
{
pdist = This->fDx + p.x ;
if (pdist > kCarTolerance*0.5)
{
snxt = -pdist/v.x ;
side = kMX ;
}
else
{
if (calcNorm) *n = G4ThreeVector_create(-1,0,0) ;
return snxt = 0 ;
}
}
else snxt = kInfinity ;
if ( v.y > 0 )
{
pdist=This->fDy-p.y;
if (pdist>kCarTolerance*0.5)
{
stmp=pdist/v.y;
if (stmp<snxt)
{
snxt=stmp;
side=kPY;
}
}
else
{
if (calcNorm) *n = G4ThreeVector_create(0,1,0) ;
return snxt = 0 ;
}
}
else if ( v.y < 0 )
{
pdist = This->fDy + p.y ;
if (pdist > kCarTolerance*0.5)
{
stmp=-pdist/v.y;
if (stmp<snxt)
{
snxt=stmp;
side=kMY;
}
}
else
{
if (calcNorm) *n = G4ThreeVector_create(0,-1,0) ;
return snxt = 0 ;
}
}
if (v.z>0)
{
pdist=This->fDz-p.z;
if (pdist > kCarTolerance*0.5)
{
stmp=pdist/v.z;
if (stmp < snxt)
{
snxt=stmp;
side=kPZ;
}
}
else
{
if (calcNorm) *n = G4ThreeVector_create(0,0,1) ;
return snxt = 0 ;
}
}
else if (v.z<0)
{
pdist = This->fDz + p.z ;
if (pdist > kCarTolerance*0.5)
{
stmp=-pdist/v.z;
if (stmp < snxt)
{
snxt=stmp;
side=kMZ;
}
}
else
{
if (calcNorm) *n = G4ThreeVector_create(0,0,-1) ;
return snxt = 0 ;
}
}
if (calcNorm)
{
switch (side)
{
case kPX:
*n=G4ThreeVector_create(1,0,0);
break;
case kMX:
*n=G4ThreeVector_create(-1,0,0);
break;
case kPY:
*n=G4ThreeVector_create(0,1,0);
break;
case kMY:
*n=G4ThreeVector_create(0,-1,0);
break;
case kPZ:
*n=G4ThreeVector_create(0,0,1);
break;
case kMZ:
*n=G4ThreeVector_create(0,0,-1);
break;
default:
break;
}
}
return snxt;
}
__device__
G4double G4Box_DistanceToOut( const G4Box *This, G4ThreeVector p )
{
G4double safx1,safx2,safy1,safy2,safz1,safz2,safe=0.0;
safx1 = This->fDx - p.x ;
safx2 = This->fDx + p.x ;
safy1 = This->fDy - p.y ;
safy2 = This->fDy + p.y ;
safz1 = This->fDz - p.z ;
safz2 = This->fDz + p.z ;
if (safx2 < safx1) safe = safx2 ;
else safe = safx1 ;
if (safy1 < safe) safe = safy1 ;
if (safy2 < safe) safe = safy2 ;
if (safz1 < safe) safe = safz1 ;
if (safz2 < safe) safe = safz2 ;
if (safe < 0) safe = 0 ;
return safe ;
}
__device__
EInside G4Box_Inside( const G4Box *This, G4ThreeVector p)
{
const G4double kCarTolerance = 1E-3;
EInside in = kOutside ;
if ( fabs(p.x) <= This->fDx - kCarTolerance*0.5 )
{
if (fabs(p.y) <= This->fDy - kCarTolerance*0.5 )
{
if (fabs(p.z) <= This->fDz - kCarTolerance*0.5 ) in = kInside ;
else if (fabs(p.z) <= This->fDz + kCarTolerance*0.5 ) in = kSurface ;
}
else if (fabs(p.y) <= This->fDy + kCarTolerance*0.5 )
{
if (fabs(p.z) <= This->fDz + kCarTolerance*0.5 ) in = kSurface ;
}
}
else if (fabs(p.x) <= This->fDx + kCarTolerance*0.5 )
{
if (fabs(p.y) <= This->fDy + kCarTolerance*0.5 )
{
if (fabs(p.z) <= This->fDz + kCarTolerance*0.5) in = kSurface ;
}
}
return in ;
}
}
typedef struct
{
G4VSolid solid;
G4double fRmax;
G4double fRmaxTolerance;
G4double align;
}
G4Orb;
extern "C" {
__device__ EInside G4Orb_Inside( const G4Orb *This, G4ThreeVector p);
__device__ G4ThreeVector G4Orb_SurfaceNormal( const G4Orb *This, G4ThreeVector p);
__device__ G4double G4Orb_DistanceToIn_full(
const G4Orb *This,
G4ThreeVector p,
G4ThreeVector v);
__device__ G4double G4Orb_DistanceToIn( const G4Orb *This, G4ThreeVector p);
__device__ G4double G4Orb_DistanceToOut_full(
const G4Orb *This,
G4ThreeVector p,
G4ThreeVector v,
const G4bool calcNorm,
G4bool *validNorm,
G4ThreeVector *n);
__device__ G4double G4Orb_DistanceToOut( const G4Orb *This, G4ThreeVector p);
__device__
EInside G4Orb_Inside( const G4Orb *This, G4ThreeVector p)
{
G4double rad2,tolRMax;
EInside in;
rad2 = G4ThreeVector_mag2(p);
G4double rad = sqrt(rad2);
tolRMax = This->fRmax - This->fRmaxTolerance*0.5 ;
if ( rad <= tolRMax ) { in = kInside ; }
else
{
tolRMax = This->fRmax + This->fRmaxTolerance*0.5 ;
if ( rad <= tolRMax ) { in = kSurface ; }
else { in = kOutside ; }
}
return in;
}
__device__
G4ThreeVector G4Orb_SurfaceNormal( const G4Orb *This, G4ThreeVector p)
{
(void)This;
return G4ThreeVector_unit(p);
}
__device__
G4double G4Orb_DistanceToIn_full( const G4Orb *This, G4ThreeVector p,G4ThreeVector v)
{
G4double snxt = kInfinity ;
G4double rad2, pDotV3d;
G4double c, d2, s = kInfinity ;
rad2 = G4ThreeVector_mag2(p);
pDotV3d = G4ThreeVector_dot(p,v);
G4double rad = sqrt(rad2);
c = (rad - This->fRmax)*(rad + This->fRmax);
if ( c > This->fRmaxTolerance*This->fRmax )
{
d2 = pDotV3d*pDotV3d - c ;
if ( d2 >= 0 )
{
s = -pDotV3d - sqrt(d2) ;
if ( s >= 0 )
{
return snxt = s;
}
}
else
{
return snxt = kInfinity;
}
}
else
{
if ( c > -This->fRmaxTolerance*This->fRmax )
{
d2 = pDotV3d*pDotV3d - c ;
if ( (d2 < This->fRmaxTolerance*This->fRmax) || (pDotV3d >= 0) )
{
return snxt = kInfinity;
}
else
{
return snxt = 0.;
}
}
}
return snxt;
}
__device__
G4double G4Orb_DistanceToIn( const G4Orb *This, G4ThreeVector p)
{
G4double safe = 0.0,
rad = G4ThreeVector_mag(p);
safe = rad - This->fRmax;
if( safe < 0 ) { safe = 0.; }
return safe;
}
__device__
G4double G4Orb_DistanceToOut_full( const G4Orb *This, G4ThreeVector p,G4ThreeVector v,
const G4bool calcNorm,
G4bool *validNorm,G4ThreeVector *n)
{
G4double snxt = kInfinity;
enum {kNull,kRMax} side = kNull;
G4double rad2,pDotV3d;
G4ThreeVector ipoint;
G4double c,d2;
rad2 = G4ThreeVector_mag2(p);
pDotV3d = G4ThreeVector_dot(p,v);
const G4double Rmax_plus = This->fRmax + This->fRmaxTolerance*0.5;
G4double rad = sqrt(rad2);
if ( rad <= Rmax_plus )
{
c = (rad - This->fRmax)*(rad + This->fRmax);
if ( c < This->fRmaxTolerance*This->fRmax )
{
d2 = pDotV3d*pDotV3d - c;
if( ( c > -This->fRmaxTolerance*This->fRmax) &&
( ( pDotV3d >= 0 ) || ( d2 < 0 )) )
{
if(calcNorm)
{
*validNorm = true ;
*n = G4ThreeVector_create(p.x/This->fRmax,p.y/This->fRmax,p.z/This->fRmax) ;
}
return snxt = 0;
}
else
{
snxt = -pDotV3d + sqrt(d2);
side = kRMax ;
}
}
}
else
{
}
if (calcNorm)
{
switch( side )
{
case kRMax:
ipoint = G4ThreeVector_saxpy(snxt,v,p);
*n=G4ThreeVector_mult(ipoint,1.0/This->fRmax);
*validNorm=true;
break;
default:
break;
}
}
return snxt;
}
__device__
G4double G4Orb_DistanceToOut( const G4Orb *This, G4ThreeVector p )
{
G4double safe=0.0,rad = G4ThreeVector_mag(p);
safe = This->fRmax - rad;
if ( safe < 0. ) safe = 0.;
return safe;
}
}
__device__
EInside G4VSolid_Inside( const G4VSolid *This, G4ThreeVector p)
{
switch(This->type)
{
case kBox:
return G4Box_Inside(( const G4Box*)This,p);
case kOrb:
return G4Orb_Inside(( const G4Orb*)This,p);
default:
(void)0;
return kOutside;
}
}
__device__
G4ThreeVector G4VSolid_SurfaceNormal( const G4VSolid *This, G4ThreeVector p)
{
switch(This->type)
{
case kBox:
return G4Box_SurfaceNormal(( const G4Box*)This,p);
case kOrb:
return G4Orb_SurfaceNormal(( const G4Orb*)This,p);
default:
(void)0;
return G4ThreeVector_create(0,0,0);
}
}
__device__
G4double G4VSolid_DistanceToIn_full(
const G4VSolid *This,
G4ThreeVector p,
G4ThreeVector v)
{
switch(This->type)
{
case kBox:
return G4Box_DistanceToIn_full(( const G4Box*)This,p,v);
case kOrb:
return G4Orb_DistanceToIn_full(( const G4Orb*)This,p,v);
default:
(void)0;
return 0;
}
}
__device__
G4double G4VSolid_DistanceToIn( const G4VSolid *This, G4ThreeVector p)
{
switch(This->type)
{
case kBox:
return G4Box_DistanceToIn(( const G4Box*)This,p);
case kOrb:
return G4Orb_DistanceToIn(( const G4Orb*)This,p);
default:
(void)0;
return 0;
}
}
__device__
G4double G4VSolid_DistanceToOut_full(
const G4VSolid *This,
G4ThreeVector p,
G4ThreeVector v,
const G4bool calcNorm,
G4bool *validNorm,
G4ThreeVector *n)
{
switch(This->type)
{
case kBox:
return G4Box_DistanceToOut_full(( const G4Box*)This,p,v,calcNorm,validNorm,n);
case kOrb:
return G4Orb_DistanceToOut_full(( const G4Orb*)This,p,v,calcNorm,validNorm,n);
default:
(void)0;
return 0;
}
}
__device__
G4double G4VSolid_DistanceToOut( const G4VSolid *This, G4ThreeVector p)
{
switch(This->type)
{
case kBox:
return G4Box_DistanceToOut(( const G4Box*)This,p);
case kOrb:
return G4Orb_DistanceToOut(( const G4Orb*)This,p);
default:
(void)0;
return 0;
}
}
__device__
G4SmartVoxelHeader * G4LogicalVolume_GetVoxelHeader( const G4LogicalVolume* This)
{
return This->fVoxel;
}
__device__
G4int G4LogicalVolume_GetNoDaughters( const G4LogicalVolume* This)
{
return This->fNoDaughters;
}
__device__
struct G4VPhysicalVolume* G4LogicalVolume_GetDaughter( const G4LogicalVolume* This, const G4int i)
{
return This->fDaughters[i];
}
__device__
struct G4VSolid* G4LogicalVolume_GetSolid( const G4LogicalVolume* This)
{
return This->fSolid;
}
__device__
StubMaterial* G4LogicalVolume_GetMaterial( const G4LogicalVolume* This)
{
return This->fMaterial;
}
typedef struct G4VPhysicalVolume
{
G4RotationMatrix frot;
G4ThreeVector ftrans;
int guard1;
G4LogicalVolume *flogical;
int guard2;
G4LogicalVolume *flmother;
int guard3;
int count;
int counter_shadow;
}
G4VPhysicalVolume;
__device__
G4ThreeVector G4VPhysicalVolume_GetTranslation( const G4VPhysicalVolume *This)
{
return This->ftrans;
}
__device__
G4LogicalVolume* G4VPhysicalVolume_GetLogicalVolume( const G4VPhysicalVolume *This)
{
return This->flogical;
}
__device__
G4LogicalVolume* G4VPhysicalVolume_GetMotherLogical( const G4VPhysicalVolume *This)
{
return This->flmother;
}
__device__
G4RotationMatrix G4VPhysicalVolume_GetObjectRotationValue( const G4VPhysicalVolume *This)
{
return This->frot;
}
__device__
G4ThreeVector G4VPhysicalVolume_GetObjectTranslation( const G4VPhysicalVolume *This)
{
return This->ftrans;
}
typedef struct
{
G4AffineTransform fTransform;
G4VPhysicalVolume* fPhysicalVolumePtr;
EVolume fVolumeType;
}
G4NavigationLevel;
typedef struct
{
G4NavigationLevel fNavHistory[16];
G4int fStackDepth;
int align;
}
G4NavigationHistory;
__device__
void G4NavigationLevel_ctor(
G4NavigationLevel *This,
G4VPhysicalVolume* pPhysVol,
G4AffineTransform afTransform,
EVolume volTp )
{
This->fTransform = afTransform;
This->fPhysicalVolumePtr = pPhysVol;
This->fVolumeType = volTp;
}
__device__
void G4NavigationLevel_ctor_relative(
G4NavigationLevel *This,
G4VPhysicalVolume* pPhysVol,
G4AffineTransform levelAbove,
G4AffineTransform relativeCurrent,
EVolume volTp )
{
This->fPhysicalVolumePtr = pPhysVol;
This->fVolumeType = volTp;
G4AffineTransform_InverseProduct(&(This->fTransform), &levelAbove, &relativeCurrent );
}
__device__
G4NavigationLevel G4NavigationLevel_create(
G4VPhysicalVolume* pPhysVol,
G4AffineTransform afTransform,
EVolume volTp )
{
G4NavigationLevel lev;
G4NavigationLevel_ctor( &lev, pPhysVol, afTransform, volTp );
return lev;
}
__device__
G4NavigationLevel G4NavigationLevel_create_relative(
G4VPhysicalVolume* pPhysVol,
G4AffineTransform levelAbove,
G4AffineTransform relativeCurrent,
EVolume volTp)
{
G4NavigationLevel lev;
G4NavigationLevel_ctor_relative( &lev, pPhysVol, levelAbove, relativeCurrent, volTp );
return lev;
}
__device__
G4VPhysicalVolume* G4NavigationLevel_GetPhysicalVolume(
const G4NavigationLevel *This )
{
return This->fPhysicalVolumePtr;
}
__device__
G4AffineTransform G4NavigationLevel_GetTransform(
const G4NavigationLevel *This )
{
return This->fTransform;
}
__device__
const G4AffineTransform* G4NavigationLevel_GetPtrTransform(
const G4NavigationLevel *This )
{
return &(This->fTransform);
}
__device__
EVolume G4NavigationLevel_GetVolumeType(
const G4NavigationLevel *This )
{
return This->fVolumeType;
}
__device__
void G4NavigationHistory_Reset( G4NavigationHistory *This )
{
This->fStackDepth = 0;
}
__device__
void G4NavigationHistory_Clear( G4NavigationHistory *This )
{
G4AffineTransform origin = G4AffineTransform_create_vector(G4ThreeVector_create(0.,0.,0.));
G4NavigationLevel tmpNavLevel = G4NavigationLevel_create(0, origin, kNormal) ;
G4NavigationHistory_Reset( This );
for (G4int ilev=16 -1; ilev>=0; ilev--)
{
This->fNavHistory[ilev] = tmpNavLevel;
}
}
__device__
void G4NavigationHistory_ctor( G4NavigationHistory *This )
{
This->fStackDepth = 0;
G4NavigationHistory_Clear( This );
}
__device__
void G4NavigationHistory_dtor( G4NavigationHistory *This )
{
(void)This;
}
__device__
void G4NavigationHistory_SetFirstEntry(
G4NavigationHistory *This, G4VPhysicalVolume* pVol)
{
G4ThreeVector translation = G4ThreeVector_create(0.,0.,0.);
if( pVol!=0 )
{
translation = G4VPhysicalVolume_GetTranslation( pVol );
}
This->fNavHistory[0] =
G4NavigationLevel_create( pVol, G4AffineTransform_create_vector(translation), kNormal );
}
__device__
const G4AffineTransform* G4NavigationHistory_GetPtrTopTransform(
const G4NavigationHistory *This )
{
return G4NavigationLevel_GetPtrTransform( &(This->fNavHistory[This->fStackDepth]) );
}
__device__
G4AffineTransform G4NavigationHistory_GetTopTransform(
const G4NavigationHistory *This )
{
return G4NavigationLevel_GetTransform( &(This->fNavHistory[This->fStackDepth]) );
}
__device__
EVolume G4NavigationHistory_GetTopVolumeType(
const G4NavigationHistory *This )
{
return G4NavigationLevel_GetVolumeType( &(This->fNavHistory[This->fStackDepth]) );
}
__device__
G4VPhysicalVolume* G4NavigationHistory_GetTopVolume(
const G4NavigationHistory *This )
{
return G4NavigationLevel_GetPhysicalVolume( &(This->fNavHistory[This->fStackDepth]) );
}
__device__
G4int G4NavigationHistory_GetDepth(
const G4NavigationHistory *This )
{
return This->fStackDepth;
}
__device__
G4AffineTransform
G4NavigationHistory_GetTransform(
const G4NavigationHistory *This, G4int n )
{
return G4NavigationLevel_GetTransform( &(This->fNavHistory[n]) );
}
__device__
EVolume G4NavigationHistory_GetVolumeType(
const G4NavigationHistory *This, G4int n )
{
return G4NavigationLevel_GetVolumeType( &(This->fNavHistory[n]) );
}
__device__
G4VPhysicalVolume* G4NavigationHistory_GetVolume(
const G4NavigationHistory *This, G4int n )
{
return G4NavigationLevel_GetPhysicalVolume( &(This->fNavHistory[n]) );
}
__device__
G4int G4NavigationHistory_GetMaxDepth(
const G4NavigationHistory *This )
{
(void)This;
return 16;
}
__device__
void G4NavigationHistory_BackLevel( G4NavigationHistory *This )
{
(void)0;
This->fStackDepth--;
}
__device__
void G4NavigationHistory_NewLevel(
G4NavigationHistory *This,
G4VPhysicalVolume *pNewMother,
EVolume vType )
{
This->fStackDepth++;
(void)0;
This->fNavHistory[This->fStackDepth] =
G4NavigationLevel_create_relative(
pNewMother,
G4NavigationLevel_GetTransform( &(This->fNavHistory[This->fStackDepth-1]) ),
G4AffineTransform_create_full(
G4VPhysicalVolume_GetObjectRotationValue( pNewMother ),
G4VPhysicalVolume_GetTranslation( pNewMother )),
vType );
}
typedef struct{
G4VPhysicalVolume * PVolume;
G4int trackId;
}SolidInfo;
typedef struct{
float safety;
float step;
int trackId;
G4VPhysicalVolume * PVolume;
}ResultInfo;
typedef struct{
float safety;
float step;
G4VPhysicalVolume * PVolume;
}
FinalResult;
typedef struct{
G4ThreeVector Point;
G4ThreeVector Direction;
}PointInformation;
__device__ void Find_minimum ( ResultInfo * Result_For_Current_Solid, FinalResult * Compacter_Result, int PrevSum, int size)
{
int locationId = (blockIdx.x * blockDim.x + threadIdx.x);
int i, loc ;
float result_step, result_safety, Current_result_step, Current_result_safety;
float Initial_result_step = (Compacter_Result [ locationId ]).step;
float Initial_result_safety = (Compacter_Result [ locationId ]).safety;
Current_result_step = Initial_result_step;
Current_result_safety = Initial_result_safety;
for( i = 0; i < size ; i++)
{
result_step = Result_For_Current_Solid[ PrevSum + i].step;
result_safety = Result_For_Current_Solid[ PrevSum + i].safety;
if ( result_step < Current_result_step)
{
loc = PrevSum + i;
Current_result_step = result_step;
}
if ( result_safety < Current_result_safety)
{
Current_result_safety = result_safety;
}
}
if( Current_result_step != Initial_result_step)
{
FinalResult final = { Current_result_safety, Current_result_step, (Result_For_Current_Solid[ loc ].PVolume)};
Compacter_Result[ locationId ] = final;
}
__syncthreads();
}
typedef struct
{
G4double fVoxelSliceWidthStack[4];
G4SmartVoxelHeader* fVoxelHeaderStack[4];
G4int fVoxelNodeNoStack[4];
G4int fVoxelNoSlicesStack[4];
EAxis fVoxelAxisStack[4];
G4int fVoxelDepth;
G4SmartVoxelNode *fVoxelNode;
}
G4VoxelNavigation;
__device__ void G4VoxelNavigation_ctor( G4VoxelNavigation *This );
__device__ G4bool G4VoxelNavigation_LevelLocate(
G4VoxelNavigation *This,
G4NavigationHistory *history,
const G4VPhysicalVolume *blockedVol,
G4ThreeVector globalPoint,
const G4ThreeVector* globalDirection,
const G4bool pLocatedOnEdge,
G4ThreeVector *localPoint );
__device__ G4SmartVoxelNode* G4VoxelNavigation_VoxelLocate(
G4VoxelNavigation *This,
G4SmartVoxelHeader *voxelHeader,
G4ThreeVector point);
__device__
G4double
G4VoxelNavigation_ComputeStep(
G4VoxelNavigation *This,
G4ThreeVector localPoint,
G4ThreeVector localDirection,
const G4double currentProposedStepLength,
G4double *newSafety,
G4NavigationHistory *history,
G4bool *validExitNormal,
G4ThreeVector *exitNormal,
G4bool *exiting,
G4bool *entering,
G4VPhysicalVolume *(*pBlockedPhysical)
, G4double * Result
);
__device__ G4double G4VoxelNavigation_ComputeSafety(
G4VoxelNavigation *This,
G4ThreeVector localPoint,
const G4NavigationHistory *history);
typedef struct
{
G4NavigationHistory fHistory;
G4VoxelNavigation fVoxelNav;
G4ThreeVector fStepEndPoint;
G4ThreeVector fLastLocatedPointLocal;
G4ThreeVector fExitNormal;
G4ThreeVector fGrandMotherExitNormal;
G4bool fEnteredDaughter;
G4bool fExitedMother;
G4bool fWasLimitedByGeometry;
G4bool fEntering;
G4bool fExiting;
G4bool fLastStepWasZero;
G4bool fLocatedOnEdge;
G4bool fLocatedOutsideWorld;
G4bool fValidExitNormal;
G4bool fPushed;
G4int fNumberZeroSteps;
int align1;
G4double fPreviousSafety;
G4VPhysicalVolume *fBlockedPhysicalVolume;
G4VPhysicalVolume *fTopPhysical;
}
G4Navigator;
__device__ void G4Navigator_ctor( G4Navigator *This );
__device__ void G4Navigator_SetWorldVolume(
G4Navigator *This,
G4VPhysicalVolume* pWorld );
__device__ G4VPhysicalVolume* G4Navigator_LocateGlobalPointAndSetup(
G4Navigator *This,
G4ThreeVector globalPoint,
const G4ThreeVector* pGlobalDirection,
G4bool relativeSearch,
G4bool ignoreDirection,
float * Result);
__device__
G4double G4Navigator_ComputeStep(
G4Navigator *This,
G4ThreeVector pGlobalpoint,
G4ThreeVector pDirection,
const G4double pCurrentProposedStepLength,
G4double *pNewSafety
, G4bool cur_vol_local
, G4double * Result
);
__device__ void G4Navigator_SetGeometricallyLimitedStep( G4Navigator *This );
__device__ G4double G4NormalNavigation_ComputeStep(
G4ThreeVector localPoint,
G4ThreeVector localDirection,
const G4double currentProposedStepLength,
G4double *newSafety,
G4NavigationHistory *history,
G4bool *validExitNormal,
G4ThreeVector *exitNormal,
G4bool *exiting,
G4bool *entering,
G4VPhysicalVolume *(*pBlockedPhysical));
__device__ G4double G4NormalNavigation_ComputeSafety(
G4ThreeVector localPoint,
const G4NavigationHistory *history );
__device__ G4bool G4NormalNavigation_LevelLocate(
G4NavigationHistory *history,
const G4VPhysicalVolume *blockedVol,
G4ThreeVector* globalPoint,
const G4ThreeVector* globalDirection,
G4bool pLocatedOnEdge,
G4ThreeVector* localPoint );
__device__ void G4VoxelNavigation_ctor( G4VoxelNavigation *This );
__device__ void G4Navigator_ResetState( G4Navigator *This )
{
This->fWasLimitedByGeometry = false;
This->fEntering = false;
This->fExiting = false;
This->fLocatedOnEdge = false;
This->fLastStepWasZero = false;
This->fEnteredDaughter = false;
This->fExitedMother = false;
This->fPushed = false;
This->fValidExitNormal = false;
This->fExitNormal = G4ThreeVector_create(0,0,0);
This->fPreviousSafety = 0.0;
This->fNumberZeroSteps = 0;
This->fBlockedPhysicalVolume = 0;
This->fLastLocatedPointLocal = G4ThreeVector_create( 1e37, -1e37, 0.0 );
This->fLocatedOutsideWorld = false;
}
__device__
G4ThreeVector G4Navigator_ComputeLocalAxis( const G4Navigator *This, G4ThreeVector pVec)
{
G4AffineTransform t =
G4NavigationHistory_GetTopTransform( &(This->fHistory) );
return G4AffineTransform_TransformAxis(&t, pVec);
}
__device__ G4ThreeVector
G4Navigator_ComputeLocalPoint( const G4Navigator *This, G4ThreeVector pGlobalPoint)
{
G4AffineTransform t =
G4NavigationHistory_GetTopTransform( &(This->fHistory) );
return G4AffineTransform_TransformPoint(&t, pGlobalPoint);
}
__device__ void G4Navigator_SetWorldVolume( G4Navigator *This, G4VPhysicalVolume* pWorld )
{
This->fTopPhysical = pWorld;
G4NavigationHistory_SetFirstEntry( &(This->fHistory), pWorld );
}
__device__ void G4Navigator_SetGeometricallyLimitedStep( G4Navigator *This )
{
This->fWasLimitedByGeometry = true;
}
__device__
void G4Navigator_ResetStackAndState( G4Navigator *This )
{
G4NavigationHistory_Reset( &(This->fHistory) );
G4Navigator_ResetState( This );
}
__device__
EVolume G4Navigator_VolumeType( const G4Navigator *This, const G4VPhysicalVolume *pVol )
{
(void)This;
(void)pVol;
return kNormal;
}
__device__ void G4Navigator_ctor( G4Navigator *This )
{
G4NavigationHistory_ctor( &(This->fHistory) );
G4VoxelNavigation_ctor( &(This->fVoxelNav ) );
G4Navigator_ResetStackAndState( This );
This->fWasLimitedByGeometry = false;
This->fTopPhysical = 0;
This->fPushed = false;
This->fStepEndPoint = G4ThreeVector_create( kInfinity, kInfinity, kInfinity );
}
__device__
G4VPhysicalVolume*
G4Navigator_LocateGlobalPointAndSetup(
G4Navigator *This,
G4ThreeVector globalPoint,
const G4ThreeVector* pGlobalDirection,
G4bool relativeSearch,
G4bool ignoreDirection,
float * Result
)
{
G4bool notKnownContained=true, noResult;
G4VPhysicalVolume *targetPhysical;
G4VSolid *targetSolid = 0;
G4ThreeVector localPoint = G4ThreeVector_create(0,0,0);
G4ThreeVector globalDirection = G4ThreeVector_create(0,0,0);
EInside insideCode;
G4bool considerDirection = (!ignoreDirection) || This->fLocatedOnEdge;
if( considerDirection && pGlobalDirection != 0 )
{
globalDirection=*pGlobalDirection;
}
if ( 1 )
{
G4Navigator_ResetStackAndState( This );
}
else
{
if ( This->fWasLimitedByGeometry )
{
This->fWasLimitedByGeometry = false;
This->fEnteredDaughter = This->fEntering;
This->fExitedMother = This->fExiting;
if ( This->fExiting )
{
if ( G4NavigationHistory_GetDepth( &(This->fHistory) ) )
{
This->fBlockedPhysicalVolume = G4NavigationHistory_GetTopVolume( &(This->fHistory) );
G4NavigationHistory_BackLevel( &(This->fHistory) );
}
else
{
This->fLastLocatedPointLocal = localPoint;
This->fLocatedOutsideWorld = true;
return 0;
}
if ( This->fLocatedOnEdge )
{
This->fExiting= false;
}
}
else
if ( This->fEntering )
{
G4NavigationHistory_NewLevel( &(This->fHistory), This->fBlockedPhysicalVolume, kNormal);
This->fEntering = false;
This->fBlockedPhysicalVolume = 0;
G4AffineTransform t = G4NavigationHistory_GetTopTransform( &(This->fHistory) );
localPoint = G4AffineTransform_TransformPoint(&t,globalPoint);
notKnownContained = false;
}
}
else
{
This->fBlockedPhysicalVolume = 0;
This->fEntering = false;
This->fEnteredDaughter = false;
This->fExiting = false;
This->fExitedMother = false;
}
}
while (notKnownContained)
{
targetSolid =
G4LogicalVolume_GetSolid(
G4VPhysicalVolume_GetLogicalVolume(
G4NavigationHistory_GetTopVolume(&(This->fHistory))));
G4AffineTransform t = G4NavigationHistory_GetTopTransform( &(This->fHistory) );
localPoint = G4AffineTransform_TransformPoint(&t,globalPoint);
insideCode = G4VSolid_Inside(targetSolid,localPoint);
if ( insideCode==kOutside )
{
if ( G4NavigationHistory_GetDepth( &(This->fHistory) ) )
{
This->fBlockedPhysicalVolume = G4NavigationHistory_GetTopVolume( &(This->fHistory) );
G4NavigationHistory_BackLevel( &(This->fHistory) );
This->fExiting = false;
}
else
{
This->fLastLocatedPointLocal = localPoint;
This->fLocatedOutsideWorld = true;
return 0;
}
}
else
if ( insideCode==kSurface )
{
G4bool isExiting = This->fExiting;
if( (!This->fExiting)&&considerDirection )
{
G4bool directionExiting = false;
G4AffineTransform t = G4NavigationHistory_GetTopTransform( &(This->fHistory) );
G4ThreeVector localDirection =G4AffineTransform_TransformAxis(&t,globalDirection);
G4ThreeVector normal = G4VSolid_SurfaceNormal(targetSolid, localPoint);
directionExiting = G4ThreeVector_dot(normal,localDirection) > 0.0;
isExiting = isExiting || directionExiting;
}
if( isExiting )
{
if ( G4NavigationHistory_GetDepth( &(This->fHistory) ) )
{
This->fBlockedPhysicalVolume = G4NavigationHistory_GetTopVolume( &(This->fHistory) );
G4NavigationHistory_BackLevel( &(This->fHistory) );
This->fValidExitNormal = false;
}
else
{
This->fLastLocatedPointLocal = localPoint;
This->fLocatedOutsideWorld = true;
return 0;
}
}
else
{
notKnownContained=false;
}
}
else
{
notKnownContained=false;
}
}
noResult = true;
do
{
targetPhysical = G4NavigationHistory_GetTopVolume( &(This->fHistory) );
G4LogicalVolume *targetLogical = G4VPhysicalVolume_GetLogicalVolume(targetPhysical);
if ( G4LogicalVolume_GetVoxelHeader( targetLogical ) != 0 )
{
noResult =
G4VoxelNavigation_LevelLocate(
&(This->fVoxelNav),
&(This->fHistory),
This->fBlockedPhysicalVolume,
globalPoint,
pGlobalDirection,
considerDirection,
&localPoint);
}
else
{
noResult = G4NormalNavigation_LevelLocate(
&(This->fHistory),
This->fBlockedPhysicalVolume,
&globalPoint,
pGlobalDirection,
considerDirection,
&localPoint);
}
if ( noResult )
{
This->fBlockedPhysicalVolume = 0;
This->fEntering = false;
This->fEnteredDaughter = true;
}
} while (noResult);
This->fLastLocatedPointLocal = localPoint;
This->fLocatedOutsideWorld= false;
return targetPhysical;
}
__device__ void
G4Navigator_LocateGlobalPointWithinVolume( G4Navigator *This, G4ThreeVector pGlobalpoint)
{
This->fLastLocatedPointLocal = G4Navigator_ComputeLocalPoint( This, pGlobalpoint );
G4VPhysicalVolume* motherPhysical = G4NavigationHistory_GetTopVolume( &(This->fHistory) );
G4LogicalVolume* motherLogical = G4VPhysicalVolume_GetLogicalVolume( motherPhysical );
G4SmartVoxelHeader* pVoxelHeader = G4LogicalVolume_GetVoxelHeader( motherLogical );
if ( pVoxelHeader )
{
G4VoxelNavigation_VoxelLocate( &(This->fVoxelNav), pVoxelHeader, This->fLastLocatedPointLocal );
}
This->fBlockedPhysicalVolume = 0;
This->fEntering = false;
This->fEnteredDaughter = false;
This->fExiting = false;
This->fExitedMother = false;
}
__device__
G4double G4Navigator_ComputeStep(
G4Navigator *This,
G4ThreeVector pGlobalpoint,
G4ThreeVector pDirection,
const G4double pCurrentProposedStepLength,
G4double *pNewSafety
, G4bool cur_vol_local
, G4double * Result
)
{
G4ThreeVector localDirection = G4Navigator_ComputeLocalAxis(This,pDirection);
G4double Step = 1e37;
G4VPhysicalVolume *motherPhysical = G4NavigationHistory_GetTopVolume( &(This->fHistory) );
const G4double kCarTolerance = 1E-3;
G4LogicalVolume *motherLogical = G4VPhysicalVolume_GetLogicalVolume(motherPhysical);
G4ThreeVector newLocalPoint = G4Navigator_ComputeLocalPoint( This, pGlobalpoint);
if( !G4ThreeVector_equal(newLocalPoint, This->fLastLocatedPointLocal) )
{
G4ThreeVector oldLocalPoint = This->fLastLocatedPointLocal;
G4double moveLenSq = G4ThreeVector_diff2(newLocalPoint,oldLocalPoint);
if ( moveLenSq >= kCarTolerance*kCarTolerance )
{
G4Navigator_LocateGlobalPointWithinVolume( This, pGlobalpoint );
}
}
if ( G4LogicalVolume_GetVoxelHeader(motherLogical) != 0 )
{
if( cur_vol_local )
Step = G4VoxelNavigation_ComputeStep(
&(This->fVoxelNav),
This->fLastLocatedPointLocal,
localDirection,
pCurrentProposedStepLength,
pNewSafety,
&(This->fHistory),
&(This->fValidExitNormal),
&(This->fExitNormal),
&(This->fExiting),
&(This->fEntering),
&(This->fBlockedPhysicalVolume)
, Result
);
else
return 0;
}
else
{
Step = G4NormalNavigation_ComputeStep(
This->fLastLocatedPointLocal,
localDirection,
pCurrentProposedStepLength,
pNewSafety,
&(This->fHistory),
&(This->fValidExitNormal),
&(This->fExitNormal),
&(This->fExiting),
&(This->fEntering),
&(This->fBlockedPhysicalVolume));
}
This->fPreviousSafety = *pNewSafety;
This->fLocatedOnEdge = This->fLastStepWasZero && (Step==0.0);
This->fLastStepWasZero = (Step==0.0);
if (This->fPushed) This->fPushed = This->fLastStepWasZero;
if ( This->fLastStepWasZero )
{
This->fNumberZeroSteps++;
if( This->fNumberZeroSteps > 10 -1 )
{
Step += 0.9*kCarTolerance;
This->fPushed = true;
}
if( This->fNumberZeroSteps > 25 -1 )
{
(void)0;
}
}
else
{
if (!This->fPushed) This->fNumberZeroSteps = 0;
}
This->fEnteredDaughter = This->fEntering;
This->fExitedMother = This->fExiting;
if( This->fExiting )
{
if(This->fValidExitNormal)
{
This->fGrandMotherExitNormal= This->fExitNormal;
}
else
{
G4ThreeVector finalLocalPoint =
G4ThreeVector_saxpy( Step, localDirection, This->fLastLocatedPointLocal );
This->fGrandMotherExitNormal =
G4VSolid_SurfaceNormal(
G4LogicalVolume_GetSolid(motherLogical),finalLocalPoint);
G4RotationMatrix mRot = G4VPhysicalVolume_GetObjectRotationValue(motherPhysical);
G4RotationMatrix inv = G4RotationMatrix_inverse(&mRot);
This->fGrandMotherExitNormal
= G4RotationMatrix_apply(&inv,This->fGrandMotherExitNormal);
}
}
This->fStepEndPoint =
G4ThreeVector_saxpy(Step, pDirection, pGlobalpoint );
if( (Step == pCurrentProposedStepLength) && (!This->fExiting) && (!This->fEntering) )
{
Step = kInfinity;
}
return Step;
}
__device__ G4bool
G4AuxiliaryNavServices_CheckPointOnSurface(
const G4VSolid* sampleSolid,
G4ThreeVector localPoint,
const G4ThreeVector* globalDirection,
G4AffineTransform sampleTransform,
const G4bool locatedOnEdge)
{
G4ThreeVector localDirection, sampleNormal;
G4bool enter = false;
EInside insideSolid =
G4VSolid_Inside(sampleSolid, localPoint);
if ( insideSolid!=kOutside )
{
G4bool checkDirection= locatedOnEdge && (globalDirection!=0);
if( (insideSolid==kSurface) && checkDirection)
{
localDirection= G4AffineTransform_TransformAxis(&sampleTransform,*globalDirection);
sampleNormal = G4VSolid_SurfaceNormal(sampleSolid,localPoint);
if ( G4ThreeVector_dot(sampleNormal,localDirection) <= 0 )
{
if( G4ThreeVector_dot(sampleNormal,localDirection) == 0 )
{
G4double distanceToIn =
G4VSolid_DistanceToIn_full( sampleSolid, localPoint, localDirection );
if( distanceToIn != kInfinity )
{
enter = true;
}
}
else
{
enter = true;
}
}
}
else
{
enter = true;
}
}
return enter;
}
__device__ G4bool
G4NormalNavigation_LevelLocate(
G4NavigationHistory *history,
const G4VPhysicalVolume *blockedVol,
G4ThreeVector* globalPoint,
const G4ThreeVector* globalDirection,
G4bool pLocatedOnEdge,
G4ThreeVector* localPoint )
{
G4VPhysicalVolume *targetPhysical, *samplePhysical;
G4LogicalVolume *targetLogical;
G4VSolid *sampleSolid;
G4ThreeVector samplePoint;
G4int targetNoDaughters;
targetPhysical = G4NavigationHistory_GetTopVolume(history);
targetLogical = G4VPhysicalVolume_GetLogicalVolume(targetPhysical);
targetNoDaughters = G4LogicalVolume_GetNoDaughters(targetLogical);
if (targetNoDaughters == 0) return false;
for ( int sampleNo=targetNoDaughters-1; sampleNo>=0; sampleNo-- )
{
samplePhysical =
G4LogicalVolume_GetDaughter(targetLogical,sampleNo);
if ( samplePhysical!=blockedVol )
{
G4NavigationHistory_NewLevel(history, samplePhysical, kNormal );
sampleSolid =
G4LogicalVolume_GetSolid(
G4VPhysicalVolume_GetLogicalVolume(samplePhysical));
G4AffineTransform tf =
G4NavigationHistory_GetTopTransform(history);
samplePoint =
G4AffineTransform_TransformPoint( &tf, *globalPoint );
if( G4AuxiliaryNavServices_CheckPointOnSurface(
sampleSolid, samplePoint, globalDirection,
tf, pLocatedOnEdge) )
{
*localPoint = samplePoint;
return true;
}
else
{
G4NavigationHistory_BackLevel(history);
}
}
}
return false;
}
__device__
G4double
G4NormalNavigation_ComputeStep(
G4ThreeVector localPoint,
G4ThreeVector localDirection,
const G4double currentProposedStepLength,
G4double *newSafety,
G4NavigationHistory *history,
G4bool *validExitNormal,
G4ThreeVector *exitNormal,
G4bool *exiting,
G4bool *entering,
G4VPhysicalVolume *(*pBlockedPhysical))
{
G4VPhysicalVolume *motherPhysical, *samplePhysical, *blockedExitedVol=0;
G4LogicalVolume *motherLogical;
G4VSolid *motherSolid;
G4ThreeVector sampleDirection;
G4double ourStep=currentProposedStepLength, motherSafety, ourSafety;
G4int localNoDaughters, sampleNo;
motherPhysical = G4NavigationHistory_GetTopVolume(history);
motherLogical = G4VPhysicalVolume_GetLogicalVolume(motherPhysical);
motherSolid = G4LogicalVolume_GetSolid(motherLogical);
motherSafety = G4VSolid_DistanceToOut(motherSolid,localPoint);
ourSafety = motherSafety;
if ( *exiting && *validExitNormal )
{
if ( G4ThreeVector_dot(localDirection,*exitNormal)>=kMinExitingNormalCosine )
{
blockedExitedVol =* pBlockedPhysical;
ourSafety = 0;
}
}
*exiting = false;
*entering = false;
localNoDaughters = G4LogicalVolume_GetNoDaughters(motherLogical);
for ( sampleNo=localNoDaughters-1; sampleNo>=0; sampleNo--)
{
samplePhysical = G4LogicalVolume_GetDaughter(motherLogical,sampleNo);
if ( samplePhysical!=blockedExitedVol )
{
G4AffineTransform sampleTf =
G4AffineTransform_create_full(
G4VPhysicalVolume_GetObjectRotationValue(samplePhysical),
G4VPhysicalVolume_GetTranslation(samplePhysical));
G4AffineTransform_Invert(&sampleTf);
const G4ThreeVector samplePoint =
G4AffineTransform_TransformPoint(&sampleTf, localPoint);
const G4VSolid *sampleSolid =
G4LogicalVolume_GetSolid(
G4VPhysicalVolume_GetLogicalVolume( samplePhysical ));
const G4double sampleSafety =
G4VSolid_DistanceToIn(sampleSolid,samplePoint);
if ( sampleSafety<ourSafety )
{
ourSafety=sampleSafety;
}
if ( sampleSafety<=ourStep )
{
sampleDirection = G4AffineTransform_TransformAxis(&sampleTf, localDirection);
const G4double sampleStep =
G4VSolid_DistanceToIn_full(sampleSolid,samplePoint,sampleDirection);
if ( sampleStep<=ourStep )
{
ourStep = sampleStep;
*entering = true;
*exiting = false;
*pBlockedPhysical = samplePhysical;
}
}
}
}
if ( currentProposedStepLength<ourSafety )
{
*entering = false;
*exiting = false;
*pBlockedPhysical = 0;
ourStep = kInfinity;
}
else
{
if ( motherSafety<=ourStep )
{
G4double motherStep =
G4VSolid_DistanceToOut_full(
motherSolid,
localPoint,
localDirection,
true,
validExitNormal,
exitNormal);
if ( motherStep<=ourStep )
{
ourStep = motherStep;
*exiting = true;
*entering = false;
if ( *validExitNormal )
{
G4RotationMatrix rot = G4VPhysicalVolume_GetObjectRotationValue(motherPhysical);
G4RotationMatrix inv = G4RotationMatrix_inverse(&rot);
*exitNormal = G4RotationMatrix_apply(&inv, *exitNormal);
}
}
else
{
*validExitNormal = false;
}
}
}
*newSafety = ourSafety;
return ourStep;
}
__device__ G4bool
G4AuxiliaryNavServices_CheckPointOnSurface(
const G4VSolid* sampleSolid,
G4ThreeVector localPoint,
const G4ThreeVector* globalDirection,
G4AffineTransform sampleTransform,
const G4bool locatedOnEdge);
__device__ G4bool
G4AuxiliaryNavServices_CheckPointExiting(
const G4VSolid* sampleSolid,
G4ThreeVector localPoint,
const G4ThreeVector* globalDirection,
G4AffineTransform sampleTransform );
__device__
G4SmartVoxelNode*
G4VoxelNavigation_VoxelLocate(
G4VoxelNavigation *This,
G4SmartVoxelHeader* pHead,
G4ThreeVector localPoint )
{
G4SmartVoxelHeader *targetVoxelHeader=pHead;
G4SmartVoxelNode *targetVoxelNode = 0;
const G4SmartVoxelProxy *sampleProxy;
EAxis targetHeaderAxis;
G4double targetHeaderMin, targetHeaderNodeWidth;
G4int targetHeaderNoSlices, targetNodeNo;
This->fVoxelDepth = 0;
while ( targetVoxelNode == 0 )
{
targetHeaderAxis = G4VoxelHeader_GetAxis(targetVoxelHeader);
targetHeaderNoSlices = G4VoxelHeader_GetNoSlices(targetVoxelHeader);
targetHeaderMin = G4VoxelHeader_GetMinExtent(targetVoxelHeader);
targetHeaderNodeWidth =
(G4VoxelHeader_GetMaxExtent(targetVoxelHeader)-targetHeaderMin)
/ targetHeaderNoSlices;
targetNodeNo = (G4int)(
(G4ThreeVector_coord(localPoint,targetHeaderAxis)-targetHeaderMin)
/ targetHeaderNodeWidth);
if ( targetNodeNo<0 )
{
targetNodeNo = 0;
}
else if ( targetNodeNo>=targetHeaderNoSlices )
{
targetNodeNo = targetHeaderNoSlices-1;
}
This->fVoxelAxisStack[This->fVoxelDepth] = targetHeaderAxis;
This->fVoxelNoSlicesStack[This->fVoxelDepth] = targetHeaderNoSlices;
This->fVoxelSliceWidthStack[This->fVoxelDepth] = targetHeaderNodeWidth;
This->fVoxelNodeNoStack[This->fVoxelDepth] = targetNodeNo;
This->fVoxelHeaderStack[This->fVoxelDepth] = targetVoxelHeader;
sampleProxy = G4VoxelHeader_GetSlice(targetVoxelHeader, targetNodeNo);
if ( G4VoxelProxy_IsNode(sampleProxy) )
{
targetVoxelNode = G4VoxelProxy_GetNode(sampleProxy);
}
else
{
targetVoxelHeader = G4VoxelProxy_GetHeader(sampleProxy);
This->fVoxelDepth++;
(void)0;
}
}
This->fVoxelNode = targetVoxelNode;
return targetVoxelNode;
}
__device__
G4bool
G4VoxelNavigation_LocateNextVoxel(
G4VoxelNavigation *This,
G4ThreeVector localPoint,
G4ThreeVector localDirection,
const G4double currentStep )
{
G4SmartVoxelHeader *workHeader=0, *newHeader=0;
G4SmartVoxelProxy *newProxy=0;
G4SmartVoxelNode *newVoxelNode= 0;
G4ThreeVector targetPoint, voxelPoint;
G4double workNodeWidth, workMinExtent, workCoord;
G4double minVal, maxVal, newDistance=0.;
G4double newHeaderMin, newHeaderNodeWidth;
G4int depth=0, newDepth=0, workNodeNo=0, newNodeNo=0, newHeaderNoSlices=0;
EAxis workHeaderAxis, newHeaderAxis;
G4bool isNewVoxel=false;
G4double currentDistance = currentStep;
for (depth=0; depth<This->fVoxelDepth; depth++)
{
targetPoint =
G4ThreeVector_saxpy(currentDistance,localDirection,localPoint);
newDistance = currentDistance;
workHeader = This->fVoxelHeaderStack[depth];
workHeaderAxis = This->fVoxelAxisStack[depth];
workNodeNo = This->fVoxelNodeNoStack[depth];
workNodeWidth = This->fVoxelSliceWidthStack[depth];
workMinExtent = G4VoxelHeader_GetMinExtent(workHeader);
workCoord = G4ThreeVector_coord(targetPoint,workHeaderAxis);
minVal = workMinExtent+workNodeNo*workNodeWidth;
if ( minVal<=workCoord+1E-3*0.5 )
{
maxVal = minVal+workNodeWidth;
if ( maxVal<=workCoord-1E-3*0.5 )
{
newNodeNo = workNodeNo+1;
newHeader = workHeader;
newDistance = (maxVal-G4ThreeVector_coord(localPoint,workHeaderAxis))
/ G4ThreeVector_coord(localDirection,workHeaderAxis);
isNewVoxel = true;
newDepth = depth;
}
}
else
{
newNodeNo = workNodeNo-1;
newHeader = workHeader;
newDistance = (minVal-G4ThreeVector_coord(localPoint,workHeaderAxis))
/ G4ThreeVector_coord(localDirection,workHeaderAxis);
isNewVoxel = true;
newDepth = depth;
}
currentDistance = newDistance;
}
targetPoint =
G4ThreeVector_saxpy(currentDistance,localDirection,localPoint);
depth = This->fVoxelDepth;
{
workHeader = This->fVoxelHeaderStack[depth];
workHeaderAxis = This->fVoxelAxisStack[depth];
workNodeNo = This->fVoxelNodeNoStack[depth];
workNodeWidth = This->fVoxelSliceWidthStack[depth];
workMinExtent = G4VoxelHeader_GetMinExtent(workHeader);
workCoord = G4ThreeVector_coord(targetPoint,workHeaderAxis);
minVal = workMinExtent+G4VoxelNode_GetMinEquivalentSliceNo(This->fVoxelNode)*workNodeWidth;
if ( minVal<=workCoord+1E-3*0.5 )
{
maxVal = workMinExtent+(G4VoxelNode_GetMaxEquivalentSliceNo(This->fVoxelNode)+1)
*workNodeWidth;
if ( maxVal<=workCoord-1E-3*0.5 )
{
newNodeNo = G4VoxelNode_GetMaxEquivalentSliceNo(This->fVoxelNode)+1;
newHeader = workHeader;
newDistance = (maxVal-G4ThreeVector_coord(localPoint,workHeaderAxis))
/ G4ThreeVector_coord(localDirection,workHeaderAxis);
isNewVoxel = true;
newDepth = depth;
}
}
else
{
newNodeNo = G4VoxelNode_GetMinEquivalentSliceNo(This->fVoxelNode)-1;
newHeader = workHeader;
newDistance = (minVal-G4ThreeVector_coord(localPoint,workHeaderAxis))
/ G4ThreeVector_coord(localDirection,workHeaderAxis);
isNewVoxel = true;
newDepth = depth;
}
currentDistance = newDistance;
}
if (isNewVoxel)
{
if ( (newNodeNo<0) || (newNodeNo>=G4VoxelHeader_GetNoSlices(newHeader)))
{
isNewVoxel = false;
}
else
{
voxelPoint = G4ThreeVector_saxpy(newDistance,localDirection,localPoint);
(void)0;
This->fVoxelNodeNoStack[newDepth] = newNodeNo;
This->fVoxelDepth = newDepth;
newVoxelNode = 0;
while ( newVoxelNode == 0 )
{
newProxy = G4VoxelHeader_GetSlice(newHeader,newNodeNo);
if ( G4VoxelProxy_IsNode(newProxy) )
{
newVoxelNode = G4VoxelProxy_GetNode(newProxy);
}
else
{
This->fVoxelDepth++;
(void)0;
newHeader = G4VoxelProxy_GetHeader(newProxy);
newHeaderAxis = G4VoxelHeader_GetAxis(newHeader);
newHeaderNoSlices = G4VoxelHeader_GetNoSlices(newHeader);
newHeaderMin = G4VoxelHeader_GetMinExtent(newHeader);
newHeaderNodeWidth =
(G4VoxelHeader_GetMaxExtent(newHeader)-newHeaderMin)
/ newHeaderNoSlices;
newNodeNo = (G4int)(
(G4ThreeVector_coord(voxelPoint,newHeaderAxis)-newHeaderMin)
/ newHeaderNodeWidth );
if ( newNodeNo<0 )
{
newNodeNo=0;
}
else if ( newNodeNo>=newHeaderNoSlices )
{
newNodeNo = newHeaderNoSlices-1;
}
This->fVoxelAxisStack[This->fVoxelDepth] = newHeaderAxis;
This->fVoxelNoSlicesStack[This->fVoxelDepth] = newHeaderNoSlices;
This->fVoxelSliceWidthStack[This->fVoxelDepth] = newHeaderNodeWidth;
This->fVoxelNodeNoStack[This->fVoxelDepth] = newNodeNo;
This->fVoxelHeaderStack[This->fVoxelDepth] = newHeader;
}
}
This->fVoxelNode = newVoxelNode;
}
}
return isNewVoxel;
}
__device__
G4double
G4VoxelNavigation_ComputeVoxelSafety(
const G4VoxelNavigation *This,
G4ThreeVector localPoint)
{
G4SmartVoxelHeader *curHeader;
G4double voxelSafety, curNodeWidth;
G4double curNodeOffset, minCurCommonDelta, maxCurCommonDelta;
G4int minCurNodeNoDelta, maxCurNodeNoDelta;
G4int localVoxelDepth, curNodeNo;
EAxis curHeaderAxis;
localVoxelDepth = This->fVoxelDepth;
curHeader = This->fVoxelHeaderStack[localVoxelDepth];
curHeaderAxis = This->fVoxelAxisStack[localVoxelDepth];
curNodeNo = This->fVoxelNodeNoStack[localVoxelDepth];
curNodeWidth = This->fVoxelSliceWidthStack[localVoxelDepth];
curNodeOffset = curNodeNo*curNodeWidth;
maxCurNodeNoDelta = G4VoxelNode_GetMaxEquivalentSliceNo(This->fVoxelNode)-curNodeNo;
minCurNodeNoDelta = curNodeNo-G4VoxelNode_GetMaxEquivalentSliceNo(This->fVoxelNode);
minCurCommonDelta = G4ThreeVector_coord(localPoint,curHeaderAxis)
- G4VoxelHeader_GetMinExtent(curHeader) - curNodeOffset;
maxCurCommonDelta = curNodeWidth-minCurCommonDelta;
if ( minCurNodeNoDelta<maxCurNodeNoDelta )
{
voxelSafety = minCurNodeNoDelta*curNodeWidth;
voxelSafety += minCurCommonDelta;
}
else if (maxCurNodeNoDelta < minCurNodeNoDelta)
{
voxelSafety = maxCurNodeNoDelta*curNodeWidth;
voxelSafety += maxCurCommonDelta;
}
else
{
voxelSafety = minCurNodeNoDelta*curNodeWidth;
voxelSafety += (((minCurCommonDelta)<(maxCurCommonDelta))?(minCurCommonDelta):(maxCurCommonDelta));
}
while ( (localVoxelDepth>0) && (voxelSafety>0) )
{
localVoxelDepth--;
curHeader = This->fVoxelHeaderStack[localVoxelDepth];
curHeaderAxis = This->fVoxelAxisStack[localVoxelDepth];
curNodeNo = This->fVoxelNodeNoStack[localVoxelDepth];
curNodeWidth = This->fVoxelSliceWidthStack[localVoxelDepth];
curNodeOffset = curNodeNo*curNodeWidth;
minCurCommonDelta = G4ThreeVector_coord(localPoint,curHeaderAxis)
- G4VoxelHeader_GetMinExtent(curHeader) - curNodeOffset;
maxCurCommonDelta = curNodeWidth-minCurCommonDelta;
if ( minCurCommonDelta<voxelSafety )
{
voxelSafety = minCurCommonDelta;
}
if ( maxCurCommonDelta<voxelSafety )
{
voxelSafety = maxCurCommonDelta;
}
}
if ( voxelSafety<0 )
{
voxelSafety = 0;
}
return voxelSafety;
}
__device__
void G4VoxelNavigation_ctor( G4VoxelNavigation *This )
{
This->fVoxelDepth = -1;
This->fVoxelNode = 0;
}
__device__
G4bool
G4VoxelNavigation_LevelLocate(
G4VoxelNavigation *This,
G4NavigationHistory* history,
const G4VPhysicalVolume* blockedVol,
G4ThreeVector globalPoint,
const G4ThreeVector* globalDirection,
const G4bool pLocatedOnEdge,
G4ThreeVector *localPoint )
{
G4SmartVoxelHeader *targetVoxelHeader;
G4SmartVoxelNode *targetVoxelNode;
G4VPhysicalVolume *targetPhysical, *samplePhysical;
G4LogicalVolume *targetLogical;
G4VSolid *sampleSolid;
G4ThreeVector samplePoint;
G4int targetNoDaughters;
targetPhysical = G4NavigationHistory_GetTopVolume(history);
targetLogical = G4VPhysicalVolume_GetLogicalVolume(targetPhysical);
targetVoxelHeader = G4LogicalVolume_GetVoxelHeader(targetLogical);
targetVoxelNode =
G4VoxelNavigation_VoxelLocate(This,targetVoxelHeader,*localPoint);
targetNoDaughters=G4VoxelNode_GetNoContained(targetVoxelNode);
if ( targetNoDaughters==0 ) return false;
for ( int sampleNo=targetNoDaughters-1; sampleNo>=0; sampleNo-- )
{
samplePhysical =
G4LogicalVolume_GetDaughter( targetLogical,
G4VoxelNode_GetVolume(targetVoxelNode,sampleNo));
if ( samplePhysical!=blockedVol )
{
G4NavigationHistory_NewLevel(history, samplePhysical, kNormal);
sampleSolid =
G4LogicalVolume_GetSolid(
G4VPhysicalVolume_GetLogicalVolume( samplePhysical ));
G4AffineTransform tf = G4NavigationHistory_GetTopTransform( history );
samplePoint =
G4AffineTransform_TransformPoint( &tf, globalPoint );
if( G4AuxiliaryNavServices_CheckPointOnSurface(
sampleSolid, samplePoint, globalDirection,
tf, pLocatedOnEdge) )
{
*localPoint = samplePoint;
return true;
}
else
{
G4NavigationHistory_BackLevel( history );
}
}
}
return false;
}
__device__
G4double
G4VoxelNavigation_ComputeStep(
G4VoxelNavigation *This,
G4ThreeVector localPoint,
G4ThreeVector localDirection,
const G4double currentProposedStepLength,
G4double *newSafety,
G4NavigationHistory *history,
G4bool *validExitNormal,
G4ThreeVector *exitNormal,
G4bool *exiting,
G4bool *entering,
G4VPhysicalVolume *(*pBlockedPhysical)
, G4double * Result
)
{
G4VPhysicalVolume *motherPhysical, *samplePhysical,
*blockedExitedVol = 0;
G4LogicalVolume *motherLogical;
G4VSolid *motherSolid;
G4ThreeVector sampleDirection;
G4double ourStep=currentProposedStepLength, motherSafety, ourSafety;
G4int sampleNo;
G4bool initialNode, noStep;
const G4SmartVoxelNode *curVoxelNode;
G4int curNoVolumes, contentNo;
G4double voxelSafety;
motherPhysical = G4NavigationHistory_GetTopVolume( history );
motherLogical = G4VPhysicalVolume_GetLogicalVolume(motherPhysical);
motherSolid = G4LogicalVolume_GetSolid(motherLogical);
motherSafety = G4VSolid_DistanceToOut(motherSolid, localPoint);
ourSafety = motherSafety;
if ( *exiting && *validExitNormal )
{
if ( G4ThreeVector_dot(localDirection,*exitNormal)>=kMinExitingNormalCosine )
{
blockedExitedVol = *pBlockedPhysical;
ourSafety = 0;
}
}
*exiting = false;
*entering = false;
initialNode = true;
noStep = true;
while ( noStep )
{
curVoxelNode = This->fVoxelNode;
curNoVolumes = G4VoxelNode_GetNoContained(curVoxelNode);
for (contentNo=curNoVolumes-1; contentNo>=0; contentNo--)
{
sampleNo = G4VoxelNode_GetVolume( curVoxelNode, contentNo);
samplePhysical = G4LogicalVolume_GetDaughter(motherLogical,sampleNo);
if ( samplePhysical!=blockedExitedVol )
{
G4AffineTransform sampleTf =
G4AffineTransform_create_full(
G4VPhysicalVolume_GetObjectRotationValue(samplePhysical),
G4VPhysicalVolume_GetTranslation(samplePhysical));
G4AffineTransform_Invert(&sampleTf);
const G4ThreeVector samplePoint =
G4AffineTransform_TransformPoint(&sampleTf,localPoint);
const G4VSolid *sampleSolid =
G4LogicalVolume_GetSolid(
G4VPhysicalVolume_GetLogicalVolume(
samplePhysical ));
const G4double sampleSafety =
G4VSolid_DistanceToIn(sampleSolid,samplePoint);
if ( sampleSafety<ourSafety )
{
ourSafety = sampleSafety;
}
if ( sampleSafety<=ourStep )
{
sampleDirection =
G4AffineTransform_TransformAxis( &sampleTf, localDirection );
G4double sampleStep =
G4VSolid_DistanceToIn_full(sampleSolid, samplePoint, sampleDirection);
if ( sampleStep<=ourStep )
{
ourStep = sampleStep;
*entering = true;
*exiting = false;
*pBlockedPhysical = samplePhysical;
}
}
}
}
if (initialNode)
{
initialNode = false;
voxelSafety = G4VoxelNavigation_ComputeVoxelSafety(This,localPoint);
if ( voxelSafety<ourSafety )
{
ourSafety = voxelSafety;
}
if ( currentProposedStepLength<ourSafety )
{
noStep = false;
*entering = false;
*exiting = false;
*pBlockedPhysical = 0;
ourStep = kInfinity;
}
else
{
if ( motherSafety<=ourStep )
{
G4double motherStep =
G4VSolid_DistanceToOut_full( motherSolid, localPoint, localDirection,
true, validExitNormal, exitNormal);
if ( motherStep<=ourStep )
{
ourStep = motherStep;
*exiting = true;
*entering = false;
if ( *validExitNormal )
{
G4RotationMatrix rot = G4VPhysicalVolume_GetObjectRotationValue(motherPhysical);
G4RotationMatrix inv = G4RotationMatrix_inverse(&rot);
*exitNormal = G4RotationMatrix_apply( &inv, *exitNormal );
}
}
else
{
*validExitNormal = false;
}
}
}
*newSafety = ourSafety;
}
if (noStep)
{
noStep = G4VoxelNavigation_LocateNextVoxel(This, localPoint, localDirection, ourStep);
}
}
int locationId = (blockIdx.x * blockDim.x + threadIdx.x);
return ourStep;
}
__global__ void trace(
Particle *input,
G4double *output,
G4VPhysicalVolume *worldVolumeAndGeomBuffer,
G4double phys_step,
int totalSize
, G4double * Result
, SolidInfo * Solids
, ResultInfo * Result_For_Current_Solid
, FinalResult * Compacter_Result,
G4SmartVoxelNode * nullVNode
)
{
const unsigned globalIdx = (blockIdx.x * blockDim.x + threadIdx.x);
const unsigned localIdx = threadIdx.x;
const unsigned locationId = globalIdx;
if (globalIdx >= totalSize ) return;
__shared__ int Numbers_Of_Solid[ BlockSize ];
__shared__ int Sum_Of_Solids[ BlockSize ];
__shared__ bool noStepArray [ BlockSize ];
__shared__ PointInformation LocationArray[ BlockSize ];
__shared__ G4VPhysicalVolume * info[ BlockSize ];
G4VoxelNode_ctor( nullVNode ,1 );
__shared__ bool Cur_Vol_Store [ BlockSize ];
G4Navigator navi;
G4Navigator *nav = &navi;
G4Navigator_ctor(nav);
G4Navigator_SetWorldVolume( nav, worldVolumeAndGeomBuffer );
Particle p = input[globalIdx];
if( globalIdx == 0)
{
}
const G4VPhysicalVolume * cur_vol =
G4Navigator_LocateGlobalPointAndSetup(
nav, p.pos, 0, false, true, Result );
G4bool cur_vol_local = true, cur_vol_all = true;
G4double step, safety = 0.1;
G4double integratedDensity = 0;
int temp = 0;
while ( cur_vol_all )
{
{
const G4double curDensity =
G4LogicalVolume_GetMaterial( G4VPhysicalVolume_GetLogicalVolume( cur_vol ))->property;
PointInformation NewPoint = { p.pos, p.dir };
LocationArray[ locationId ] = NewPoint;
if( temp == 1)
{
Result[ locationId ] = step;
}
step = G4Navigator_ComputeStep( nav, p.pos, p.dir, phys_step, &safety
, cur_vol_local
, Result
);
if ( step == kInfinity ) step = phys_step;
const G4double nextStepIntegratedD = curDensity * step;
int locationId = (blockIdx.x * blockDim.x + threadIdx.x);
integratedDensity += nextStepIntegratedD;
G4ThreeVector_sum_assign( &(p.pos), G4ThreeVector_mult( p.dir, step ) );
G4Navigator_SetGeometricallyLimitedStep( nav );
if( globalIdx == 0 ){
}
cur_vol =
G4Navigator_LocateGlobalPointAndSetup(
nav, p.pos, &(p.dir), true, false, Result );
if ( !cur_vol )
cur_vol_local = false;
}
Cur_Vol_Store[ locationId ] = cur_vol_local;
__syncthreads();
// cur_vol_all = NoStepReduction( Cur_Vol_Store, BlockSize );
{
int tid = (blockIdx.x * blockDim.x + threadIdx.x);
int offset = 1;
for(int d = BlockSize>>1; d > 0; d >>=1)
{
__syncthreads();
if(tid<d)
{
int ai = offset*(2*tid + 1) - 1;
int bi = offset*(2*tid + 2) - 1;
Cur_Vol_Store[bi] = (Cur_Vol_Store[ai] || Cur_Vol_Store[bi]);
}
offset *= 2;
}
G4bool result = Cur_Vol_Store[ BlockSize - 1 ];
__syncthreads();
cur_vol_all = result;
}
__syncthreads();
temp++;
}
output[globalIdx] = integratedDensity;
}
__global__ void relocate ( int * ptr, void * buf, int size )
{
typedef unsigned char byte;
const unsigned globalidx = (blockIdx.x * blockDim.x + threadIdx.x);
if(globalidx>=size) return;
int destoffs, targoffs;
destoffs = *(ptr + 2*globalidx);
targoffs = *(ptr + 2*globalidx+ 1);
*((byte*)buf+destoffs) =(byte) ((byte*)buf + targoffs);
}
__global__ void check( G4VPhysicalVolume *worldVolumeAndGeomBuffer, unsigned long * result)
{
unsigned int hope = ( unsigned int )worldVolumeAndGeomBuffer;
*result = hope;
}
__global__ void test ( bool * output, bool * input)
{
int tid = (blockIdx.x * blockDim.x + threadIdx.x);
int offset = 1;
G4bool result;
if( tid == 0)
{
input[ 0] = true;
input[ 1] = true;
input[ 2] = true;
input[ 3] = true;
input[ 4] = true;
input[ 5] = true;
input[ 6] = false;
input[ 7] = true;
}
__syncthreads();
}
__global__ void checkgeom( G4VPhysicalVolume *worldVolumeAndGeomBuffer, int * result, int number_of_increments)
{
const unsigned globalid = (blockIdx.x * blockDim.x + threadIdx.x);
if(globalid>=1) return;
int i=0;
G4Navigator navi;
G4Navigator *nav = &navi;
G4Navigator_ctor(nav);
G4Navigator_SetWorldVolume( nav, worldVolumeAndGeomBuffer );
G4ThreeVector pos = G4ThreeVector_create( 0.0, 0.0, 0.0);
const G4VPhysicalVolume * cur_vol;
unsigned int geom_start = ( unsigned int )worldVolumeAndGeomBuffer;
pos = G4ThreeVector_create( 0.7, 1.0, 0.7);
float x_increment = 0.2, y_increment = 0.2, z_increment = 0.2;
for( i=0; i < number_of_increments*3 ; i+=3)
{
result[i] = ( int ) cur_vol->count;
result[i + 1] = (( unsigned int )(cur_vol->flogical) - geom_start);
result[i + 2] = ( int ) G4LogicalVolume_GetMaterial( G4VPhysicalVolume_GetLogicalVolume( cur_vol ))->property;
pos.x+=x_increment;
pos.y+=y_increment;
pos.z+=z_increment;
}
}
struct CameraParameters
{
double
heading,
pitch,
roll,
dist,
yfov,
target_x,
target_y,
target_z;
CameraParameters()
:
heading(0), pitch(0), roll(0), dist(1),
yfov(90), target_x(0), target_y(0), target_z(0)
{}
};
struct EventOrigin
{
double x,y,z;
};
class Geometry
{
public:
typedef unsigned char byte;
virtual ~Geometry() {}
virtual void create() = 0;
virtual void relocate( void *newbegin ) = 0;
virtual int size() const = 0;
virtual int ptrs_size() const=0;
virtual void *getBuffer() = 0;
virtual double getScale() const = 0;
// virtual CameraParameters getCamera() const
// {
// return 0;
// }
virtual EventOrigin getEvent() const
{
EventOrigin e = { 0,0,0 };
return e;
}
virtual int getNumVoxelNodes() const { return 0; }
};
typedef struct { const char *err, *fn; int line, errcode; } my_cuda_err;
typedef struct { int secs; int usecs; } mytimet;
extern "C"
{
void myprint( const char *chr );
void myprint1( const char *chr, int n );
mytimet mytimer();
void myprinttdiff(mytimet a, mytimet b);
void mysleep(int n);
}
static inline int ceilDiv( int a, int d )
{
return a/d + ((a%d)?1:0);
}
Particle *gpuInput;
G4double *gpuOutput;
Geometry::byte *gpuGeom;
int numInput, numOutput, numInputPerRound;
const int WARP_SIZE = 32;
void createGrid( int numInput, dim3* grid, dim3* block )
{
const int MAXSIZE = 10000000;
const int NUMCORES = 448;
const int NUMMULTIPROC = 14;
const int BLOCKS_PER_MULTIPROC = 8;
const int MAX_WARPS_PER_MULTIPROC = 48;
const int MAX_DATA_PER_MULTIPROC = MAX_WARPS_PER_MULTIPROC*WARP_SIZE;
int size = numInput;
if (size > MAXSIZE) size = MAXSIZE;
int dataPerMultiproc = ceilDiv(size,NUMMULTIPROC);
if ( dataPerMultiproc > MAX_DATA_PER_MULTIPROC )
dataPerMultiproc = MAX_DATA_PER_MULTIPROC;
int blockSize = ceilDiv(dataPerMultiproc,BLOCKS_PER_MULTIPROC);
const int MAX_BLOCK_SIZE = 1024;
if (blockSize > MAX_BLOCK_SIZE) blockSize = MAX_BLOCK_SIZE;
int numBlocks = ceilDiv(size,blockSize);
int numWarps = ceilDiv(blockSize,WARP_SIZE) * numBlocks;
if (numWarps > NUMCORES)
{
blockSize = ceilDiv(blockSize,WARP_SIZE)*WARP_SIZE;
dataPerMultiproc = blockSize * BLOCKS_PER_MULTIPROC;
if ( dataPerMultiproc > MAX_DATA_PER_MULTIPROC )
blockSize -= WARP_SIZE;
}
size = blockSize*ceilDiv(size,blockSize);
if (size > MAXSIZE) size = MAXSIZE;
block->x = blockSize;
block->y = block->z = 1;
grid->x = size/blockSize;
grid->y = 1;
grid->z = 1;
}
my_cuda_err cudainit( Geometry *geom, int N )
{
const mytimet t0 = mytimer();
numOutput = numInput = numInputPerRound = N;
do { hipError_t errc = hipSetDeviceFlags(0); if (errc != hipSuccess) { my_cuda_err r = { hipGetErrorString(errc), "cuda.cpp", 157, errc }; return r; } } while(0);
do { hipError_t errc = hipMalloc( (void**)&gpuInput, sizeof(Particle)*numInput ); if (errc != hipSuccess) { my_cuda_err r = { hipGetErrorString(errc), "cuda.cpp", 159, errc }; return r; } } while(0);
do { hipError_t errc = hipMalloc( (void**)&gpuOutput, sizeof(G4double)*numOutput ); if (errc != hipSuccess) { my_cuda_err r = { hipGetErrorString(errc), "cuda.cpp", 160, errc }; return r; } } while(0);
do { hipError_t errc = hipMalloc( (void**)&gpuGeom, geom->size() ); if (errc != hipSuccess) { my_cuda_err r = { hipGetErrorString(errc), "cuda.cpp", 161, errc }; return r; } } while(0);
geom->relocate( gpuGeom );
hipFuncSetCacheConfig(trace, hipFuncCachePreferL1);
do { hipError_t errc = hipMemcpy( gpuGeom, geom->getBuffer(), geom->size(), hipMemcpyHostToDevice ); if (errc != hipSuccess) { my_cuda_err r = { hipGetErrorString(errc), "cuda.cpp", 197, errc }; return r; } } while(0);
const mytimet t1 = mytimer();
myprint("Initialization: ");
myprinttdiff(t0, t1);
my_cuda_err ok = { 0, 0, 0, hipSuccess }; return ok;
}
my_cuda_err cudaexec( G4double phys_step, int totalInput, Particle *input, G4double *output )
{
G4double * Result;
SolidInfo * Solids;
ResultInfo * Result_For_Current_Solid;
FinalResult * Compacter_Result;
G4SmartVoxelNode * nullVNode;
for ( int i = 0; i < totalInput; i += numInput )
{
if ( i + numInput > totalInput ) numInput = totalInput-i;
do { hipError_t errc = hipMemcpy( gpuInput, input+i, sizeof(Particle)*numInput, hipMemcpyHostToDevice ); if (errc != hipSuccess) { my_cuda_err r = { hipGetErrorString(errc), "cuda.cpp", 333, errc }; return r; } } while(0);
dim3 grid, block;
createGrid( numInput, &grid, &block );
hipLaunchKernelGGL(( trace) , dim3(grid), dim3(block) , 0, 0, gpuInput, gpuOutput, (G4VPhysicalVolume*)gpuGeom, phys_step, numInput, Result, Solids, Result_For_Current_Solid, Compacter_Result, nullVNode);
do { hipError_t errc = hipGetLastError(); if (errc != hipSuccess) { my_cuda_err r = { hipGetErrorString(errc), "cuda.cpp", 340, errc }; return r; } } while(0);
do { hipError_t errc = hipMemcpy( output+i, gpuOutput, sizeof(G4double)*numOutput, hipMemcpyDeviceToHost ); if (errc != hipSuccess) { my_cuda_err r = { hipGetErrorString(errc), "cuda.cpp", 343, errc }; return r; } } while(0);
}
my_cuda_err ok = { 0, 0, 0, hipSuccess }; return ok;
}
my_cuda_err cudafinish()
{
const mytimet t0 = mytimer();
do { hipError_t errc = hipFree( gpuInput ); if (errc != hipSuccess) { my_cuda_err r = { hipGetErrorString(errc), "cuda.cpp", 356, errc }; return r; } } while(0);
do { hipError_t errc = hipFree( gpuOutput ); if (errc != hipSuccess) { my_cuda_err r = { hipGetErrorString(errc), "cuda.cpp", 357, errc }; return r; } } while(0);
do { hipError_t errc = hipFree( gpuGeom ); if (errc != hipSuccess) { my_cuda_err r = { hipGetErrorString(errc), "cuda.cpp", 358, errc }; return r; } } while(0);
do { hipError_t errc = hipDeviceReset(); if (errc != hipSuccess) { my_cuda_err r = { hipGetErrorString(errc), "cuda.cpp", 375, errc }; return r; } } while(0);
const mytimet t1 = mytimer();
myprint("Finalization: ");
myprinttdiff(t0, t1);
my_cuda_err ok = { 0, 0, 0, hipSuccess }; return ok;
}
| fb5725f10800bc69ee9852696ff299bcf8a31088.cu | #define BlockSize 32
#define kInfinity (1.0E37)
#define kMinExitingNormalCosine (1E-3)
typedef float G4double;
typedef float G4float;
typedef int G4int;
typedef int G4bool;
typedef long G4long;
//G4double kInfinity = 1.0E37;
// int BlockSize = 32;
int Multiplier = 4;
G4double twopi = 2.0*3.14159265358979323846264338327;
// G4double kMinExitingNormalCosine = 1E-3;
typedef enum {kOutside,kSurface,kInside} EInside;
typedef enum {kNormal,kReplica,kParameterised} EVolume;
typedef enum {kXAxis,kYAxis,kZAxis,kRho,kRadial3D,kPhi,kUndefined} EAxis;
typedef enum { kBox = 0 , kOrb, kTubs, kCons, kPolyCone, Solidcount } ESolid;
typedef struct
{
G4double x,y,z;
G4double w;
}
G4ThreeVector;
__device__
G4ThreeVector G4ThreeVector_create( G4double x, G4double y, G4double z )
{
G4ThreeVector v =
{x,y,z,0};
return v;
}
__device__
G4ThreeVector G4ThreeVector_saxpy( G4double a, G4ThreeVector x, G4ThreeVector y )
{
return G4ThreeVector_create(
a*x.x + y.x,
a*x.y + y.y,
a*x.z + y.z );
}
__device__
G4ThreeVector G4ThreeVector_sum( G4ThreeVector a, G4ThreeVector b )
{
return G4ThreeVector_create( a.x+b.x, a.y+b.y, a.z+b.z );
}
__device__
G4ThreeVector G4ThreeVector_subtract( G4ThreeVector a, G4ThreeVector b )
{
return G4ThreeVector_create( a.x-b.x, a.y-b.y, a.z-b.z );
}
__device__
G4ThreeVector G4ThreeVector_sum_assign( G4ThreeVector *This, G4ThreeVector b )
{
(*This).x += b.x;
(*This).y += b.y;
(*This).z += b.z;
return *This;
}
__device__
G4ThreeVector G4ThreeVector_subtract_assign( G4ThreeVector *This, G4ThreeVector b )
{
(*This).x -= b.x;
(*This).y -= b.y;
(*This).z -= b.z;
return *This;
}
__device__
G4ThreeVector G4ThreeVector_mult_assign( G4ThreeVector *This, G4double m )
{
(*This).x *= m;
(*This).y *= m;
(*This).z *= m;
return *This;
}
__device__
G4ThreeVector G4ThreeVector_negation( G4ThreeVector a )
{
return G4ThreeVector_create( -a.x, -a.y, -a.z );
}
__device__
G4double G4ThreeVector_mag2( G4ThreeVector v )
{
return v.x*v.x + v.y*v.y + v.z*v.z;
}
__device__
G4double G4ThreeVector_mag( G4ThreeVector v )
{
return sqrt(G4ThreeVector_mag2(v));
}
__device__
G4double G4ThreeVector_dot( G4ThreeVector a, G4ThreeVector b )
{
return a.x*b.x + a.y*b.y + a.z*b.z;
}
__device__
G4ThreeVector G4ThreeVector_cross( G4ThreeVector a, G4ThreeVector p )
{
return G4ThreeVector_create(
a.y*p.z-p.y*a.z,
a.z*p.x-p.z*a.x,
a.x*p.y-p.x*a.y );
}
__device__
G4ThreeVector G4ThreeVector_mult( G4ThreeVector a, G4double m )
{
return G4ThreeVector_create( a.x*m, a.y*m, a.z*m );
}
__device__
G4ThreeVector G4ThreeVector_unit( G4ThreeVector v )
{
G4double l = G4ThreeVector_mag(v);
if ( l > 0 )
return G4ThreeVector_mult( v, 1.0/l );
return v;
}
__device__
G4bool G4ThreeVector_equal( G4ThreeVector a, G4ThreeVector b )
{
return a.x == b.x && a.y == b.y && a.z == b.z;
}
__device__
G4double G4ThreeVector_diff2( G4ThreeVector a, G4ThreeVector b )
{
return G4ThreeVector_mag2( G4ThreeVector_subtract(a,b) );
}
__device__
G4double G4ThreeVector_coord( G4ThreeVector v, EAxis axis )
{
switch( axis )
{
case kXAxis: return v.x;
case kYAxis: return v.y;
case kZAxis: return v.z;
default:
(void)0;
return 0;
}
}
__device__
void G4ThreeVector_set_coord( G4ThreeVector *v, EAxis axis, G4double val )
{
switch( axis )
{
case kXAxis: v->x = val; break;
case kYAxis: v->y = val; break;
case kZAxis: v->z = val; break;
default:
(void)0;
break;
}
}
typedef struct
{
G4ThreeVector pos, dir;
}
StubParticle;
typedef struct
{
G4ThreeVector pos, dir;
G4double t;
}
ParticleWithLifetime;
typedef StubParticle Particle;
__device__ void Prefix_Sum ( int * input, int * output, int length)
{
int tid = (blockIdx.x * blockDim.x + threadIdx.x);
int offset = 1;
if ( tid< length)
output[tid] = input[ tid ];
for(int d = length>>1; d > 0; d >>=1)
{
__syncthreads();
if(tid<d)
{
int ai = offset*(2*tid + 1) - 1;
int bi = offset*(2*tid + 2) - 1;
output[bi] += output[ai];
}
offset *= 2;
}
if(tid == 0)
{
output[length - 1] = 0;
}
for(int d = 1; d < length ; d *= 2)
{
offset >>=1;
__syncthreads();
if(tid < d)
{
int ai = offset*(2*tid + 1) - 1;
int bi = offset*(2*tid + 2) - 1;
float t = output[ai];
output[ai] = output[bi];
output[bi] += t;
}
}
__syncthreads();
}
__device__
G4bool NoStepReduction( G4bool * noStepArray, int length )
{
int tid = (blockIdx.x * blockDim.x + threadIdx.x);
int offset = 1;
for(int d = length>>1; d > 0; d >>=1)
{
__syncthreads();
if(tid<d)
{
int ai = offset*(2*tid + 1) - 1;
int bi = offset*(2*tid + 2) - 1;
noStepArray[bi] = (noStepArray[ai] || noStepArray[bi]);
}
offset *= 2;
}
G4bool result = noStepArray[ length - 1 ];
__syncthreads();
return result;
}
typedef struct
{
G4double
rxx, rxy, rxz,
ryx, ryy, ryz,
rzx, rzy, rzz;
G4double align;
}
G4RotationMatrix;
__device__
G4RotationMatrix G4RotationMatrix_create_elements
(G4double mxx, G4double mxy, G4double mxz,
G4double myx, G4double myy, G4double myz,
G4double mzx, G4double mzy, G4double mzz)
{
G4RotationMatrix r =
{ mxx,mxy,mxz, myx,myy,myz, mzx,mzy,mzz
, 0
};
return r;
}
__device__
G4ThreeVector G4RotationMatrix_apply (const G4RotationMatrix *This, G4ThreeVector p)
{
return G4ThreeVector_create(
This->rxx*p.x + This->rxy*p.y + This->rxz*p.z,
This->ryx*p.x + This->ryy*p.y + This->ryz*p.z,
This->rzx*p.x + This->rzy*p.y + This->rzz*p.z);
}
__device__
G4RotationMatrix G4RotationMatrix_mult (const G4RotationMatrix *This, const G4RotationMatrix *other)
{
return G4RotationMatrix_create_elements(
This->rxx*(*other).rxx + This->rxy*(*other).ryx + This->rxz*(*other).rzx,
This->rxx*(*other).rxy + This->rxy*(*other).ryy + This->rxz*(*other).rzy,
This->rxx*(*other).rxz + This->rxy*(*other).ryz + This->rxz*(*other).rzz,
This->ryx*(*other).rxx + This->ryy*(*other).ryx + This->ryz*(*other).rzx,
This->ryx*(*other).rxy + This->ryy*(*other).ryy + This->ryz*(*other).rzy,
This->ryx*(*other).rxz + This->ryy*(*other).ryz + This->ryz*(*other).rzz,
This->rzx*(*other).rxx + This->rzy*(*other).ryx + This->rzz*(*other).rzx,
This->rzx*(*other).rxy + This->rzy*(*other).ryy + This->rzz*(*other).rzy,
This->rzx*(*other).rxz + This->rzy*(*other).ryz + This->rzz*(*other).rzz );
}
__device__
G4RotationMatrix G4RotationMatrix_transform(G4RotationMatrix *This, const G4RotationMatrix *other)
{
*This = G4RotationMatrix_mult(other,This);
return *This;
}
__device__
G4RotationMatrix G4RotationMatrix_inverse(const G4RotationMatrix *This)
{
return G4RotationMatrix_create_elements(
This->rxx, This->ryx, This->rzx,
This->rxy, This->ryy, This->rzy,
This->rxz, This->ryz, This->rzz );
}
__device__
G4RotationMatrix G4RotationMatrix_invert(G4RotationMatrix *This)
{
return *This = G4RotationMatrix_inverse(This);
}
typedef struct
{
G4double rxx,rxy,rxz;
G4double ryx,ryy,ryz;
G4double rzx,rzy,rzz;
G4double tx,ty,tz;
}
G4AffineTransform;
__device__
void G4AffineTransform_ctor_id( G4AffineTransform *This )
{
This->rxx = 1;
This->ryy = 1;
This->rzz = 1;
This->rxy = 0;
This->rxz = 0;
This->ryx = 0;
This->ryz = 0;
This->rzx = 0;
This->rzy = 0;
This->tx = 0;
This->ty = 0;
This->tz = 0;
}
__device__
void G4AffineTransform_ctor_vector( G4AffineTransform *This, G4ThreeVector tlate)
{
G4AffineTransform_ctor_id( This );
This->tx = tlate.x;
This->ty = tlate.y;
This->tz = tlate.z;
}
__device__
void G4AffineTransform_ctor_matrix( G4AffineTransform *This, G4RotationMatrix rot)
{
G4AffineTransform_ctor_id( This );
This->rxx = rot.rxx;
This->ryy = rot.ryy;
This->rzz = rot.rzz;
This->rxy = rot.rxy;
This->rxz = rot.rxz;
This->ryx = rot.ryx;
This->ryz = rot.ryz;
This->rzx = rot.rzx;
This->rzy = rot.rzy;
}
__device__
void G4AffineTransform_ctor_full(
G4AffineTransform *This, G4RotationMatrix rot, G4ThreeVector tlate )
{
This->rxx = rot.rxx;
This->ryy = rot.ryy;
This->rzz = rot.rzz;
This->rxy = rot.rxy;
This->rxz = rot.rxz;
This->ryx = rot.ryx;
This->ryz = rot.ryz;
This->rzx = rot.rzx;
This->rzy = rot.rzy;
This->tx = tlate.x;
This->ty = tlate.y;
This->tz = tlate.z;
}
__device__
void G4AffineTransform_ctor_ptr(
G4AffineTransform *This, const G4RotationMatrix *rot, G4ThreeVector tlate )
{
if (rot) G4AffineTransform_ctor_full( This, *rot, tlate );
else G4AffineTransform_ctor_vector( This, tlate );
}
__device__
void G4AffineTransform_ctor_elements(
G4AffineTransform *This,
const G4double prxx,const G4double prxy,const G4double prxz,
const G4double pryx,const G4double pryy,const G4double pryz,
const G4double przx,const G4double przy,const G4double przz,
const G4double ptx,const G4double pty,const G4double ptz)
{
This->rxx = prxx;
This->ryy = pryy;
This->rzz = przz;
This->rxy = prxy;
This->rxz = prxz;
This->ryx = pryx;
This->ryz = pryz;
This->rzx = przx;
This->rzy = przy;
This->tx = ptx;
This->ty = pty;
This->tz = ptz;
}
__device__
G4AffineTransform G4AffineTransform_create_id(void)
{
G4AffineTransform t;
G4AffineTransform_ctor_id(&t);
return t;
}
__device__
G4AffineTransform G4AffineTransform_create_vector(G4ThreeVector tlate)
{
G4AffineTransform t;
G4AffineTransform_ctor_vector(&t,tlate);
return t;
}
__device__
G4AffineTransform G4AffineTransform_create_matrix( G4RotationMatrix rot )
{
G4AffineTransform t;
G4AffineTransform_ctor_matrix(&t,rot);
return t;
}
__device__
G4AffineTransform G4AffineTransform_create_full(
G4RotationMatrix rot, G4ThreeVector tlate )
{
G4AffineTransform t;
G4AffineTransform_ctor_full(&t,rot,tlate);
return t;
}
__device__
G4AffineTransform G4AffineTransform_create_ptr(
const G4RotationMatrix *rot, G4ThreeVector tlate )
{
G4AffineTransform t;
G4AffineTransform_ctor_ptr(&t,rot,tlate);
return t;
}
__device__
G4AffineTransform G4AffineTransform_create_elements(
const G4double prxx,const G4double prxy,const G4double prxz,
const G4double pryx,const G4double pryy,const G4double pryz,
const G4double przx,const G4double przy,const G4double przz,
const G4double ptx,const G4double pty,const G4double ptz)
{
G4AffineTransform t;
G4AffineTransform_ctor_elements(&t,
prxx,prxy,prxz,
pryx,pryy,pryz,
przx,przy,przz,
ptx,pty,ptz);
return t;
}
__device__ G4AffineTransform
G4AffineTransform_InverseProduct(
G4AffineTransform *This,
const G4AffineTransform* ptrtf1,
const G4AffineTransform* ptrtf2)
{
G4double itf2tx = - (*ptrtf2).tx*(*ptrtf2).rxx - (*ptrtf2).ty*(*ptrtf2).rxy - (*ptrtf2).tz*(*ptrtf2).rxz;
G4double itf2ty = - (*ptrtf2).tx*(*ptrtf2).ryx - (*ptrtf2).ty*(*ptrtf2).ryy - (*ptrtf2).tz*(*ptrtf2).ryz;
G4double itf2tz = - (*ptrtf2).tx*(*ptrtf2).rzx - (*ptrtf2).ty*(*ptrtf2).rzy - (*ptrtf2).tz*(*ptrtf2).rzz;
This->rxx=(*ptrtf1).rxx*(*ptrtf2).rxx+(*ptrtf1).rxy*(*ptrtf2).rxy+(*ptrtf1).rxz*(*ptrtf2).rxz;
This->rxy=(*ptrtf1).rxx*(*ptrtf2).ryx+(*ptrtf1).rxy*(*ptrtf2).ryy+(*ptrtf1).rxz*(*ptrtf2).ryz;
This->rxz=(*ptrtf1).rxx*(*ptrtf2).rzx+(*ptrtf1).rxy*(*ptrtf2).rzy+(*ptrtf1).rxz*(*ptrtf2).rzz;
This->ryx=(*ptrtf1).ryx*(*ptrtf2).rxx+(*ptrtf1).ryy*(*ptrtf2).rxy+(*ptrtf1).ryz*(*ptrtf2).rxz;
This->ryy=(*ptrtf1).ryx*(*ptrtf2).ryx+(*ptrtf1).ryy*(*ptrtf2).ryy+(*ptrtf1).ryz*(*ptrtf2).ryz;
This->ryz=(*ptrtf1).ryx*(*ptrtf2).rzx+(*ptrtf1).ryy*(*ptrtf2).rzy+(*ptrtf1).ryz*(*ptrtf2).rzz;
This->rzx=(*ptrtf1).rzx*(*ptrtf2).rxx+(*ptrtf1).rzy*(*ptrtf2).rxy+(*ptrtf1).rzz*(*ptrtf2).rxz;
This->rzy=(*ptrtf1).rzx*(*ptrtf2).ryx+(*ptrtf1).rzy*(*ptrtf2).ryy+(*ptrtf1).rzz*(*ptrtf2).ryz;
This->rzz=(*ptrtf1).rzx*(*ptrtf2).rzx+(*ptrtf1).rzy*(*ptrtf2).rzy+(*ptrtf1).rzz*(*ptrtf2).rzz;
This->tx=(*ptrtf1).tx*(*ptrtf2).rxx+(*ptrtf1).ty*(*ptrtf2).rxy+(*ptrtf1).tz*(*ptrtf2).rxz+itf2tx;
This->ty=(*ptrtf1).tx*(*ptrtf2).ryx+(*ptrtf1).ty*(*ptrtf2).ryy+(*ptrtf1).tz*(*ptrtf2).ryz+itf2ty;
This->tz=(*ptrtf1).tx*(*ptrtf2).rzx+(*ptrtf1).ty*(*ptrtf2).rzy+(*ptrtf1).tz*(*ptrtf2).rzz+itf2tz;
return *This;
}
__device__
G4ThreeVector G4AffineTransform_TransformPoint(const G4AffineTransform *This, G4ThreeVector vec)
{
return G4ThreeVector_create(
vec.x*This->rxx + vec.y*This->ryx + vec.z*This->rzx + This->tx,
vec.x*This->rxy + vec.y*This->ryy + vec.z*This->rzy + This->ty,
vec.x*This->rxz + vec.y*This->ryz + vec.z*This->rzz + This->tz );
}
__device__
G4ThreeVector G4AffineTransform_TransformAxis(const G4AffineTransform *This, G4ThreeVector axis)
{
return G4ThreeVector_create(
axis.x*This->rxx + axis.y*This->ryx + axis.z*This->rzx,
axis.x*This->rxy + axis.y*This->ryy + axis.z*This->rzy,
axis.x*This->rxz + axis.y*This->ryz + axis.z*This->rzz );
}
__device__
G4AffineTransform G4AffineTransform_Inverse(const G4AffineTransform *This)
{
return G4AffineTransform_create_elements(
This->rxx, This->ryx, This->rzx,
This->rxy, This->ryy, This->rzy,
This->rxz, This->ryz, This->rzz,
-This->tx*This->rxx - This->ty*This->rxy - This->tz*This->rxz,
-This->tx*This->ryx - This->ty*This->ryy - This->tz*This->ryz,
-This->tx*This->rzx - This->ty*This->rzy - This->tz*This->rzz );
}
__device__
G4AffineTransform G4AffineTransform_Invert(G4AffineTransform *This)
{
G4double v1 = -This->tx*This->rxx - This->ty*This->rxy - This->tz*This->rxz;
G4double v2 = -This->tx*This->ryx - This->ty*This->ryy - This->tz*This->ryz;
G4double v3 = -This->tx*This->rzx - This->ty*This->rzy - This->tz*This->rzz;
This->tx=v1; This->ty=v2; This->tz=v3;
G4double tmp1=This->ryx; This->ryx=This->rxy; This->rxy=tmp1;
G4double tmp2=This->rzx; This->rzx=This->rxz; This->rxz=tmp2;
G4double tmp3=This->rzy; This->rzy=This->ryz; This->ryz=tmp3;
return *This;
}
__device__
G4ThreeVector G4AffineTransform_NetTranslation(const G4AffineTransform *This)
{
return G4ThreeVector_create(This->tx,This->ty,This->tz);
}
__device__
G4bool G4AffineTransform_IsRotated(const G4AffineTransform *This)
{
return (This->rxx==1.0 && This->ryy==1.0 && This->rzz==1.0) ? false : true;
}
typedef struct
{
G4double property;
}
StubMaterial;
struct G4SmartVoxelProxy;
typedef struct
{
G4double fmaxExtent;
G4double fminExtent;
struct G4SmartVoxelProxy* * fslices;
G4int fNumSlices;
G4int fminEquivalent;
G4int fmaxEquivalent;
EAxis faxis;
EAxis fparamAxis;
}
G4SmartVoxelHeader;
typedef struct
{
G4int *fcontents;
G4int fminEquivalent;
G4int fmaxEquivalent;
G4int fNumContents;
}
G4SmartVoxelNode;
typedef struct G4SmartVoxelProxy
{
G4SmartVoxelHeader* fHeader;
G4SmartVoxelNode* fNode;
}
G4SmartVoxelProxy;
__device__
void G4VoxelNode_ctor( G4SmartVoxelNode *This, G4int no )
{
This->fmaxEquivalent = no;
This->fminEquivalent = no;
This->fcontents = 0;
This->fNumContents = 0;
}
__device__ G4int
G4VoxelNode_GetNoContained( const G4SmartVoxelNode *This)
{
return This->fNumContents;
}
__device__ G4int
G4VoxelNode_GetVolume(
const G4SmartVoxelNode *This, G4int contentNo)
{
(void)0;
return This->fcontents[contentNo];
}
__device__ G4int
G4VoxelNode_GetMaxEquivalentSliceNo(
const G4SmartVoxelNode *This )
{
return This->fmaxEquivalent;
}
__device__ G4int
G4VoxelNode_GetMinEquivalentSliceNo(
const G4SmartVoxelNode *This )
{
return This->fminEquivalent;
}
__device__ G4int
G4VoxelHeader_GetMaxEquivalentSliceNo(
const G4SmartVoxelHeader *This )
{
return This->fmaxEquivalent;
}
__device__ G4int
G4VoxelHeader_GetMinEquivalentSliceNo(
const G4SmartVoxelHeader *This )
{
return This->fminEquivalent;
}
__device__ EAxis
G4VoxelHeader_GetAxis( const G4SmartVoxelHeader *This )
{
return This->faxis;
}
__device__ G4int
G4VoxelHeader_GetNoSlices( const G4SmartVoxelHeader *This )
{
return This->fNumSlices;
}
__device__ G4double
G4VoxelHeader_GetMinExtent( const G4SmartVoxelHeader *This )
{
return This->fminExtent;
}
__device__ G4double
G4VoxelHeader_GetMaxExtent( const G4SmartVoxelHeader *This )
{
return This->fmaxExtent;
}
__device__ G4SmartVoxelProxy*
G4VoxelHeader_GetSlice( const G4SmartVoxelHeader *This, G4int n )
{
(void)0;
return This->fslices[n];
}
__device__ G4bool
G4VoxelProxy_IsNode( const G4SmartVoxelProxy *This )
{
return This->fNode != 0;
}
__device__ G4bool
G4VoxelProxy_IsHeader( const G4SmartVoxelProxy *This )
{
return This->fHeader != 0;
}
__device__ G4SmartVoxelNode*
G4VoxelProxy_GetNode( const G4SmartVoxelProxy *This )
{
return This->fNode;
}
__device__ G4SmartVoxelHeader*
G4VoxelProxy_GetHeader( const G4SmartVoxelProxy *This )
{
return This->fHeader;
}
struct G4VPhysicalVolume;
struct G4VSolid;
typedef struct
{
G4int fNoDaughters;
struct G4VPhysicalVolume * *fDaughters;
int check;
StubMaterial* fMaterial;
struct G4VSolid* fSolid;
G4SmartVoxelHeader *fVoxel;
int align;
}
G4LogicalVolume;
typedef struct G4VSolid
{
ESolid type;
}
G4VSolid;
__device__
EInside G4VSolid_Inside( const G4VSolid *This, G4ThreeVector p);
__device__
G4ThreeVector G4VSolid_SurfaceNormal( const G4VSolid *This, G4ThreeVector p);
__device__
G4double G4VSolid_DistanceToIn_full(
const G4VSolid *This,
G4ThreeVector p,
G4ThreeVector v);
__device__
G4double G4VSolid_DistanceToIn( const G4VSolid *This, G4ThreeVector p);
__device__
G4double G4VSolid_DistanceToOut_full(
const G4VSolid *This,
G4ThreeVector p,
G4ThreeVector v,
const G4bool calcNorm,
G4bool *validNorm,
G4ThreeVector *n);
__device__
G4double G4VSolid_DistanceToOut( const G4VSolid *This, G4ThreeVector p);
typedef struct
{
G4VSolid solid;
G4double fDx,fDy,fDz;
}
G4Box;
extern "C" {
__device__ EInside G4Box_Inside( const G4Box *This, G4ThreeVector p);
__device__ G4ThreeVector G4Box_SurfaceNormal( const G4Box *This, G4ThreeVector p);
__device__ G4double G4Box_DistanceToIn_full(
const G4Box *This,
G4ThreeVector p,
G4ThreeVector v);
__device__ G4double G4Box_DistanceToIn( const G4Box *This, G4ThreeVector p);
__device__ G4double G4Box_DistanceToOut_full(
const G4Box *This,
G4ThreeVector p,
G4ThreeVector v,
const G4bool calcNorm,
G4bool *validNorm,
G4ThreeVector *n);
__device__ G4double G4Box_DistanceToOut( const G4Box *This, G4ThreeVector p);
__device__
G4ThreeVector G4Box_ApproxSurfaceNormal( const G4Box *This, G4ThreeVector p )
{
G4double distx, disty, distz ;
G4ThreeVector norm ;
distx = fabs(fabs(p.x) - This->fDx) ;
disty = fabs(fabs(p.y) - This->fDy) ;
distz = fabs(fabs(p.z) - This->fDz) ;
if ( distx <= disty )
{
if ( distx <= distz )
{
if ( p.x < 0 ) norm = G4ThreeVector_create(-1.0,0,0) ;
else norm = G4ThreeVector_create( 1.0,0,0) ;
}
else
{
if ( p.z < 0 ) norm = G4ThreeVector_create(0,0,-1.0) ;
else norm = G4ThreeVector_create(0,0, 1.0) ;
}
}
else
{
if ( disty <= distz )
{
if ( p.y < 0 ) norm = G4ThreeVector_create(0,-1.0,0) ;
else norm = G4ThreeVector_create(0, 1.0,0) ;
}
else
{
if ( p.z < 0 ) norm = G4ThreeVector_create(0,0,-1.0) ;
else norm = G4ThreeVector_create(0,0, 1.0) ;
}
}
return norm;
}
__device__
G4ThreeVector G4Box_SurfaceNormal( const G4Box *This, G4ThreeVector p)
{
G4double distx, disty, distz ;
G4ThreeVector norm ;
const G4double kCarTolerance = 1E-3;
distx = fabs(fabs(p.x) - This->fDx) ;
disty = fabs(fabs(p.y) - This->fDy) ;
distz = fabs(fabs(p.z) - This->fDz) ;
const G4double delta = 0.5*kCarTolerance;
const G4ThreeVector nX = G4ThreeVector_create( 1.0, 0,0 );
const G4ThreeVector nmX = G4ThreeVector_create(-1.0, 0,0 );
const G4ThreeVector nY = G4ThreeVector_create( 0, 1.0,0 );
const G4ThreeVector nmY = G4ThreeVector_create( 0,-1.0,0 );
const G4ThreeVector nZ = G4ThreeVector_create( 0, 0, 1.0);
const G4ThreeVector nmZ = G4ThreeVector_create( 0, 0,- 1.0);
G4ThreeVector
normX = G4ThreeVector_create(0.,0.,0.),
normY = G4ThreeVector_create(0.,0.,0.),
normZ = G4ThreeVector_create(0.,0.,0.);
G4ThreeVector sumnorm = G4ThreeVector_create(0., 0., 0.);
G4int noSurfaces=0;
if (distx <= delta)
{
noSurfaces ++;
if ( p.x >= 0.){
normX= nX ;
}else{
normX= nmX;
}
sumnorm= normX;
}
if (disty <= delta)
{
noSurfaces ++;
if ( p.y >= 0.){
normY= nY;
}else{
normY = nmY;
}
G4ThreeVector_sum_assign( &sumnorm, normY );
}
if (distz <= delta)
{
noSurfaces ++;
if ( p.z >= 0.){
normZ= nZ;
}else{
normZ = nmZ;
}
G4ThreeVector_sum_assign( &sumnorm, normZ );
}
const G4double invSqrt2 = 1.0 / sqrt( 2.0);
const G4double invSqrt3 = 1.0 / sqrt( 3.0);
norm= G4ThreeVector_create( 0., 0., 0.);
if( noSurfaces > 0 )
{
if( noSurfaces == 1 ){
norm= sumnorm;
}else{
if( noSurfaces == 2 ) {
norm = G4ThreeVector_mult(sumnorm, invSqrt2);
} else {
norm = G4ThreeVector_mult(sumnorm, invSqrt3);
}
}
}else{
norm = G4Box_ApproxSurfaceNormal(This, p);
}
return norm;
}
__device__
G4double G4Box_DistanceToIn_full( const G4Box *This, G4ThreeVector p,G4ThreeVector v)
{
G4double safx, safy, safz ;
G4double smin=0.0, sminy, sminz ;
G4double smax=kInfinity, smaxy, smaxz ;
G4double stmp ;
G4double sOut=kInfinity, sOuty=kInfinity, sOutz=kInfinity ;
const G4double kCarTolerance = 1E-3;
safx = fabs(p.x) - This->fDx ;
safy = fabs(p.y) - This->fDy ;
safz = fabs(p.z) - This->fDz ;
if ( ((p.x*v.x >= 0.0) && safx > -kCarTolerance*0.5)
|| ((p.y*v.y >= 0.0) && safy > -kCarTolerance*0.5)
|| ((p.z*v.z >= 0.0) && safz > -kCarTolerance*0.5) )
{
return kInfinity ;
}
if ( v.x)
{
stmp = 1.0/fabs(v.x) ;
if (safx >= 0.0)
{
smin = safx*stmp ;
smax = (This->fDx+fabs(p.x))*stmp ;
}
else
{
if (v.x > 0) sOut = (This->fDx - p.x)*stmp ;
if (v.x < 0) sOut = (This->fDx + p.x)*stmp ;
}
}
if ( v.y)
{
stmp = 1.0/fabs(v.y) ;
if (safy >= 0.0)
{
sminy = safy*stmp ;
smaxy = (This->fDy+fabs(p.y))*stmp ;
if (sminy > smin) smin=sminy ;
if (smaxy < smax) smax=smaxy ;
if (smin >= smax-kCarTolerance*0.5)
{
return kInfinity ;
}
}
else
{
if (v.y > 0) sOuty = (This->fDy - p.y)*stmp ;
if (v.y < 0) sOuty = (This->fDy + p.y)*stmp ;
if( sOuty < sOut ) sOut = sOuty ;
}
}
if ( v.z )
{
stmp = 1.0/fabs(v.z) ;
if ( safz >= 0.0)
{
sminz = safz*stmp ;
smaxz = (This->fDz+fabs(p.z))*stmp ;
if (sminz > smin) smin = sminz ;
if (smaxz < smax) smax = smaxz ;
if (smin >= smax-kCarTolerance*0.5)
{
return kInfinity ;
}
}
else
{
if (v.z > 0) sOutz = (This->fDz - p.z)*stmp ;
if (v.z < 0) sOutz = (This->fDz + p.z)*stmp ;
if( sOutz < sOut ) sOut = sOutz ;
}
}
if ( sOut <= smin + 0.5*kCarTolerance)
{
return kInfinity ;
}
if (smin < 0.5*kCarTolerance) smin = 0.0 ;
return smin ;
}
__device__
G4double G4Box_DistanceToIn( const G4Box *This, G4ThreeVector p)
{
G4double safex, safey, safez, safe = 0.0 ;
safex = fabs(p.x) - This->fDx ;
safey = fabs(p.y) - This->fDy ;
safez = fabs(p.z) - This->fDz ;
if (safex > safe) safe = safex ;
if (safey > safe) safe = safey ;
if (safez > safe) safe = safez ;
return safe ;
}
__device__
G4double G4Box_DistanceToOut_full( const G4Box *This, G4ThreeVector p,G4ThreeVector v,
const G4bool calcNorm,
G4bool *validNorm,G4ThreeVector *n)
{
const G4double kCarTolerance = 1E-3;
enum {kBoxUndefined,kPX,kMX,kPY,kMY,kPZ,kMZ} side = kBoxUndefined ;
G4double pdist,stmp,snxt;
if (calcNorm) *validNorm = true ;
if (v.x > 0)
{
pdist = This->fDx - p.x ;
if (pdist > kCarTolerance*0.5)
{
snxt = pdist/v.x ;
side = kPX ;
}
else
{
if (calcNorm) *n = G4ThreeVector_create(1,0,0) ;
return snxt = 0 ;
}
}
else if (v.x < 0)
{
pdist = This->fDx + p.x ;
if (pdist > kCarTolerance*0.5)
{
snxt = -pdist/v.x ;
side = kMX ;
}
else
{
if (calcNorm) *n = G4ThreeVector_create(-1,0,0) ;
return snxt = 0 ;
}
}
else snxt = kInfinity ;
if ( v.y > 0 )
{
pdist=This->fDy-p.y;
if (pdist>kCarTolerance*0.5)
{
stmp=pdist/v.y;
if (stmp<snxt)
{
snxt=stmp;
side=kPY;
}
}
else
{
if (calcNorm) *n = G4ThreeVector_create(0,1,0) ;
return snxt = 0 ;
}
}
else if ( v.y < 0 )
{
pdist = This->fDy + p.y ;
if (pdist > kCarTolerance*0.5)
{
stmp=-pdist/v.y;
if (stmp<snxt)
{
snxt=stmp;
side=kMY;
}
}
else
{
if (calcNorm) *n = G4ThreeVector_create(0,-1,0) ;
return snxt = 0 ;
}
}
if (v.z>0)
{
pdist=This->fDz-p.z;
if (pdist > kCarTolerance*0.5)
{
stmp=pdist/v.z;
if (stmp < snxt)
{
snxt=stmp;
side=kPZ;
}
}
else
{
if (calcNorm) *n = G4ThreeVector_create(0,0,1) ;
return snxt = 0 ;
}
}
else if (v.z<0)
{
pdist = This->fDz + p.z ;
if (pdist > kCarTolerance*0.5)
{
stmp=-pdist/v.z;
if (stmp < snxt)
{
snxt=stmp;
side=kMZ;
}
}
else
{
if (calcNorm) *n = G4ThreeVector_create(0,0,-1) ;
return snxt = 0 ;
}
}
if (calcNorm)
{
switch (side)
{
case kPX:
*n=G4ThreeVector_create(1,0,0);
break;
case kMX:
*n=G4ThreeVector_create(-1,0,0);
break;
case kPY:
*n=G4ThreeVector_create(0,1,0);
break;
case kMY:
*n=G4ThreeVector_create(0,-1,0);
break;
case kPZ:
*n=G4ThreeVector_create(0,0,1);
break;
case kMZ:
*n=G4ThreeVector_create(0,0,-1);
break;
default:
break;
}
}
return snxt;
}
__device__
G4double G4Box_DistanceToOut( const G4Box *This, G4ThreeVector p )
{
G4double safx1,safx2,safy1,safy2,safz1,safz2,safe=0.0;
safx1 = This->fDx - p.x ;
safx2 = This->fDx + p.x ;
safy1 = This->fDy - p.y ;
safy2 = This->fDy + p.y ;
safz1 = This->fDz - p.z ;
safz2 = This->fDz + p.z ;
if (safx2 < safx1) safe = safx2 ;
else safe = safx1 ;
if (safy1 < safe) safe = safy1 ;
if (safy2 < safe) safe = safy2 ;
if (safz1 < safe) safe = safz1 ;
if (safz2 < safe) safe = safz2 ;
if (safe < 0) safe = 0 ;
return safe ;
}
__device__
EInside G4Box_Inside( const G4Box *This, G4ThreeVector p)
{
const G4double kCarTolerance = 1E-3;
EInside in = kOutside ;
if ( fabs(p.x) <= This->fDx - kCarTolerance*0.5 )
{
if (fabs(p.y) <= This->fDy - kCarTolerance*0.5 )
{
if (fabs(p.z) <= This->fDz - kCarTolerance*0.5 ) in = kInside ;
else if (fabs(p.z) <= This->fDz + kCarTolerance*0.5 ) in = kSurface ;
}
else if (fabs(p.y) <= This->fDy + kCarTolerance*0.5 )
{
if (fabs(p.z) <= This->fDz + kCarTolerance*0.5 ) in = kSurface ;
}
}
else if (fabs(p.x) <= This->fDx + kCarTolerance*0.5 )
{
if (fabs(p.y) <= This->fDy + kCarTolerance*0.5 )
{
if (fabs(p.z) <= This->fDz + kCarTolerance*0.5) in = kSurface ;
}
}
return in ;
}
}
typedef struct
{
G4VSolid solid;
G4double fRmax;
G4double fRmaxTolerance;
G4double align;
}
G4Orb;
extern "C" {
__device__ EInside G4Orb_Inside( const G4Orb *This, G4ThreeVector p);
__device__ G4ThreeVector G4Orb_SurfaceNormal( const G4Orb *This, G4ThreeVector p);
__device__ G4double G4Orb_DistanceToIn_full(
const G4Orb *This,
G4ThreeVector p,
G4ThreeVector v);
__device__ G4double G4Orb_DistanceToIn( const G4Orb *This, G4ThreeVector p);
__device__ G4double G4Orb_DistanceToOut_full(
const G4Orb *This,
G4ThreeVector p,
G4ThreeVector v,
const G4bool calcNorm,
G4bool *validNorm,
G4ThreeVector *n);
__device__ G4double G4Orb_DistanceToOut( const G4Orb *This, G4ThreeVector p);
__device__
EInside G4Orb_Inside( const G4Orb *This, G4ThreeVector p)
{
G4double rad2,tolRMax;
EInside in;
rad2 = G4ThreeVector_mag2(p);
G4double rad = sqrt(rad2);
tolRMax = This->fRmax - This->fRmaxTolerance*0.5 ;
if ( rad <= tolRMax ) { in = kInside ; }
else
{
tolRMax = This->fRmax + This->fRmaxTolerance*0.5 ;
if ( rad <= tolRMax ) { in = kSurface ; }
else { in = kOutside ; }
}
return in;
}
__device__
G4ThreeVector G4Orb_SurfaceNormal( const G4Orb *This, G4ThreeVector p)
{
(void)This;
return G4ThreeVector_unit(p);
}
__device__
G4double G4Orb_DistanceToIn_full( const G4Orb *This, G4ThreeVector p,G4ThreeVector v)
{
G4double snxt = kInfinity ;
G4double rad2, pDotV3d;
G4double c, d2, s = kInfinity ;
rad2 = G4ThreeVector_mag2(p);
pDotV3d = G4ThreeVector_dot(p,v);
G4double rad = sqrt(rad2);
c = (rad - This->fRmax)*(rad + This->fRmax);
if ( c > This->fRmaxTolerance*This->fRmax )
{
d2 = pDotV3d*pDotV3d - c ;
if ( d2 >= 0 )
{
s = -pDotV3d - sqrt(d2) ;
if ( s >= 0 )
{
return snxt = s;
}
}
else
{
return snxt = kInfinity;
}
}
else
{
if ( c > -This->fRmaxTolerance*This->fRmax )
{
d2 = pDotV3d*pDotV3d - c ;
if ( (d2 < This->fRmaxTolerance*This->fRmax) || (pDotV3d >= 0) )
{
return snxt = kInfinity;
}
else
{
return snxt = 0.;
}
}
}
return snxt;
}
__device__
G4double G4Orb_DistanceToIn( const G4Orb *This, G4ThreeVector p)
{
G4double safe = 0.0,
rad = G4ThreeVector_mag(p);
safe = rad - This->fRmax;
if( safe < 0 ) { safe = 0.; }
return safe;
}
__device__
G4double G4Orb_DistanceToOut_full( const G4Orb *This, G4ThreeVector p,G4ThreeVector v,
const G4bool calcNorm,
G4bool *validNorm,G4ThreeVector *n)
{
G4double snxt = kInfinity;
enum {kNull,kRMax} side = kNull;
G4double rad2,pDotV3d;
G4ThreeVector ipoint;
G4double c,d2;
rad2 = G4ThreeVector_mag2(p);
pDotV3d = G4ThreeVector_dot(p,v);
const G4double Rmax_plus = This->fRmax + This->fRmaxTolerance*0.5;
G4double rad = sqrt(rad2);
if ( rad <= Rmax_plus )
{
c = (rad - This->fRmax)*(rad + This->fRmax);
if ( c < This->fRmaxTolerance*This->fRmax )
{
d2 = pDotV3d*pDotV3d - c;
if( ( c > -This->fRmaxTolerance*This->fRmax) &&
( ( pDotV3d >= 0 ) || ( d2 < 0 )) )
{
if(calcNorm)
{
*validNorm = true ;
*n = G4ThreeVector_create(p.x/This->fRmax,p.y/This->fRmax,p.z/This->fRmax) ;
}
return snxt = 0;
}
else
{
snxt = -pDotV3d + sqrt(d2);
side = kRMax ;
}
}
}
else
{
}
if (calcNorm)
{
switch( side )
{
case kRMax:
ipoint = G4ThreeVector_saxpy(snxt,v,p);
*n=G4ThreeVector_mult(ipoint,1.0/This->fRmax);
*validNorm=true;
break;
default:
break;
}
}
return snxt;
}
__device__
G4double G4Orb_DistanceToOut( const G4Orb *This, G4ThreeVector p )
{
G4double safe=0.0,rad = G4ThreeVector_mag(p);
safe = This->fRmax - rad;
if ( safe < 0. ) safe = 0.;
return safe;
}
}
__device__
EInside G4VSolid_Inside( const G4VSolid *This, G4ThreeVector p)
{
switch(This->type)
{
case kBox:
return G4Box_Inside(( const G4Box*)This,p);
case kOrb:
return G4Orb_Inside(( const G4Orb*)This,p);
default:
(void)0;
return kOutside;
}
}
__device__
G4ThreeVector G4VSolid_SurfaceNormal( const G4VSolid *This, G4ThreeVector p)
{
switch(This->type)
{
case kBox:
return G4Box_SurfaceNormal(( const G4Box*)This,p);
case kOrb:
return G4Orb_SurfaceNormal(( const G4Orb*)This,p);
default:
(void)0;
return G4ThreeVector_create(0,0,0);
}
}
__device__
G4double G4VSolid_DistanceToIn_full(
const G4VSolid *This,
G4ThreeVector p,
G4ThreeVector v)
{
switch(This->type)
{
case kBox:
return G4Box_DistanceToIn_full(( const G4Box*)This,p,v);
case kOrb:
return G4Orb_DistanceToIn_full(( const G4Orb*)This,p,v);
default:
(void)0;
return 0;
}
}
__device__
G4double G4VSolid_DistanceToIn( const G4VSolid *This, G4ThreeVector p)
{
switch(This->type)
{
case kBox:
return G4Box_DistanceToIn(( const G4Box*)This,p);
case kOrb:
return G4Orb_DistanceToIn(( const G4Orb*)This,p);
default:
(void)0;
return 0;
}
}
__device__
G4double G4VSolid_DistanceToOut_full(
const G4VSolid *This,
G4ThreeVector p,
G4ThreeVector v,
const G4bool calcNorm,
G4bool *validNorm,
G4ThreeVector *n)
{
switch(This->type)
{
case kBox:
return G4Box_DistanceToOut_full(( const G4Box*)This,p,v,calcNorm,validNorm,n);
case kOrb:
return G4Orb_DistanceToOut_full(( const G4Orb*)This,p,v,calcNorm,validNorm,n);
default:
(void)0;
return 0;
}
}
__device__
G4double G4VSolid_DistanceToOut( const G4VSolid *This, G4ThreeVector p)
{
switch(This->type)
{
case kBox:
return G4Box_DistanceToOut(( const G4Box*)This,p);
case kOrb:
return G4Orb_DistanceToOut(( const G4Orb*)This,p);
default:
(void)0;
return 0;
}
}
__device__
G4SmartVoxelHeader * G4LogicalVolume_GetVoxelHeader( const G4LogicalVolume* This)
{
return This->fVoxel;
}
__device__
G4int G4LogicalVolume_GetNoDaughters( const G4LogicalVolume* This)
{
return This->fNoDaughters;
}
__device__
struct G4VPhysicalVolume* G4LogicalVolume_GetDaughter( const G4LogicalVolume* This, const G4int i)
{
return This->fDaughters[i];
}
__device__
struct G4VSolid* G4LogicalVolume_GetSolid( const G4LogicalVolume* This)
{
return This->fSolid;
}
__device__
StubMaterial* G4LogicalVolume_GetMaterial( const G4LogicalVolume* This)
{
return This->fMaterial;
}
typedef struct G4VPhysicalVolume
{
G4RotationMatrix frot;
G4ThreeVector ftrans;
int guard1;
G4LogicalVolume *flogical;
int guard2;
G4LogicalVolume *flmother;
int guard3;
int count;
int counter_shadow;
}
G4VPhysicalVolume;
__device__
G4ThreeVector G4VPhysicalVolume_GetTranslation( const G4VPhysicalVolume *This)
{
return This->ftrans;
}
__device__
G4LogicalVolume* G4VPhysicalVolume_GetLogicalVolume( const G4VPhysicalVolume *This)
{
return This->flogical;
}
__device__
G4LogicalVolume* G4VPhysicalVolume_GetMotherLogical( const G4VPhysicalVolume *This)
{
return This->flmother;
}
__device__
G4RotationMatrix G4VPhysicalVolume_GetObjectRotationValue( const G4VPhysicalVolume *This)
{
return This->frot;
}
__device__
G4ThreeVector G4VPhysicalVolume_GetObjectTranslation( const G4VPhysicalVolume *This)
{
return This->ftrans;
}
typedef struct
{
G4AffineTransform fTransform;
G4VPhysicalVolume* fPhysicalVolumePtr;
EVolume fVolumeType;
}
G4NavigationLevel;
typedef struct
{
G4NavigationLevel fNavHistory[16];
G4int fStackDepth;
int align;
}
G4NavigationHistory;
__device__
void G4NavigationLevel_ctor(
G4NavigationLevel *This,
G4VPhysicalVolume* pPhysVol,
G4AffineTransform afTransform,
EVolume volTp )
{
This->fTransform = afTransform;
This->fPhysicalVolumePtr = pPhysVol;
This->fVolumeType = volTp;
}
__device__
void G4NavigationLevel_ctor_relative(
G4NavigationLevel *This,
G4VPhysicalVolume* pPhysVol,
G4AffineTransform levelAbove,
G4AffineTransform relativeCurrent,
EVolume volTp )
{
This->fPhysicalVolumePtr = pPhysVol;
This->fVolumeType = volTp;
G4AffineTransform_InverseProduct(&(This->fTransform), &levelAbove, &relativeCurrent );
}
__device__
G4NavigationLevel G4NavigationLevel_create(
G4VPhysicalVolume* pPhysVol,
G4AffineTransform afTransform,
EVolume volTp )
{
G4NavigationLevel lev;
G4NavigationLevel_ctor( &lev, pPhysVol, afTransform, volTp );
return lev;
}
__device__
G4NavigationLevel G4NavigationLevel_create_relative(
G4VPhysicalVolume* pPhysVol,
G4AffineTransform levelAbove,
G4AffineTransform relativeCurrent,
EVolume volTp)
{
G4NavigationLevel lev;
G4NavigationLevel_ctor_relative( &lev, pPhysVol, levelAbove, relativeCurrent, volTp );
return lev;
}
__device__
G4VPhysicalVolume* G4NavigationLevel_GetPhysicalVolume(
const G4NavigationLevel *This )
{
return This->fPhysicalVolumePtr;
}
__device__
G4AffineTransform G4NavigationLevel_GetTransform(
const G4NavigationLevel *This )
{
return This->fTransform;
}
__device__
const G4AffineTransform* G4NavigationLevel_GetPtrTransform(
const G4NavigationLevel *This )
{
return &(This->fTransform);
}
__device__
EVolume G4NavigationLevel_GetVolumeType(
const G4NavigationLevel *This )
{
return This->fVolumeType;
}
__device__
void G4NavigationHistory_Reset( G4NavigationHistory *This )
{
This->fStackDepth = 0;
}
__device__
void G4NavigationHistory_Clear( G4NavigationHistory *This )
{
G4AffineTransform origin = G4AffineTransform_create_vector(G4ThreeVector_create(0.,0.,0.));
G4NavigationLevel tmpNavLevel = G4NavigationLevel_create(0, origin, kNormal) ;
G4NavigationHistory_Reset( This );
for (G4int ilev=16 -1; ilev>=0; ilev--)
{
This->fNavHistory[ilev] = tmpNavLevel;
}
}
__device__
void G4NavigationHistory_ctor( G4NavigationHistory *This )
{
This->fStackDepth = 0;
G4NavigationHistory_Clear( This );
}
__device__
void G4NavigationHistory_dtor( G4NavigationHistory *This )
{
(void)This;
}
__device__
void G4NavigationHistory_SetFirstEntry(
G4NavigationHistory *This, G4VPhysicalVolume* pVol)
{
G4ThreeVector translation = G4ThreeVector_create(0.,0.,0.);
if( pVol!=0 )
{
translation = G4VPhysicalVolume_GetTranslation( pVol );
}
This->fNavHistory[0] =
G4NavigationLevel_create( pVol, G4AffineTransform_create_vector(translation), kNormal );
}
__device__
const G4AffineTransform* G4NavigationHistory_GetPtrTopTransform(
const G4NavigationHistory *This )
{
return G4NavigationLevel_GetPtrTransform( &(This->fNavHistory[This->fStackDepth]) );
}
__device__
G4AffineTransform G4NavigationHistory_GetTopTransform(
const G4NavigationHistory *This )
{
return G4NavigationLevel_GetTransform( &(This->fNavHistory[This->fStackDepth]) );
}
__device__
EVolume G4NavigationHistory_GetTopVolumeType(
const G4NavigationHistory *This )
{
return G4NavigationLevel_GetVolumeType( &(This->fNavHistory[This->fStackDepth]) );
}
__device__
G4VPhysicalVolume* G4NavigationHistory_GetTopVolume(
const G4NavigationHistory *This )
{
return G4NavigationLevel_GetPhysicalVolume( &(This->fNavHistory[This->fStackDepth]) );
}
__device__
G4int G4NavigationHistory_GetDepth(
const G4NavigationHistory *This )
{
return This->fStackDepth;
}
__device__
G4AffineTransform
G4NavigationHistory_GetTransform(
const G4NavigationHistory *This, G4int n )
{
return G4NavigationLevel_GetTransform( &(This->fNavHistory[n]) );
}
__device__
EVolume G4NavigationHistory_GetVolumeType(
const G4NavigationHistory *This, G4int n )
{
return G4NavigationLevel_GetVolumeType( &(This->fNavHistory[n]) );
}
__device__
G4VPhysicalVolume* G4NavigationHistory_GetVolume(
const G4NavigationHistory *This, G4int n )
{
return G4NavigationLevel_GetPhysicalVolume( &(This->fNavHistory[n]) );
}
__device__
G4int G4NavigationHistory_GetMaxDepth(
const G4NavigationHistory *This )
{
(void)This;
return 16;
}
__device__
void G4NavigationHistory_BackLevel( G4NavigationHistory *This )
{
(void)0;
This->fStackDepth--;
}
__device__
void G4NavigationHistory_NewLevel(
G4NavigationHistory *This,
G4VPhysicalVolume *pNewMother,
EVolume vType )
{
This->fStackDepth++;
(void)0;
This->fNavHistory[This->fStackDepth] =
G4NavigationLevel_create_relative(
pNewMother,
G4NavigationLevel_GetTransform( &(This->fNavHistory[This->fStackDepth-1]) ),
G4AffineTransform_create_full(
G4VPhysicalVolume_GetObjectRotationValue( pNewMother ),
G4VPhysicalVolume_GetTranslation( pNewMother )),
vType );
}
typedef struct{
G4VPhysicalVolume * PVolume;
G4int trackId;
}SolidInfo;
typedef struct{
float safety;
float step;
int trackId;
G4VPhysicalVolume * PVolume;
}ResultInfo;
typedef struct{
float safety;
float step;
G4VPhysicalVolume * PVolume;
}
FinalResult;
typedef struct{
G4ThreeVector Point;
G4ThreeVector Direction;
}PointInformation;
__device__ void Find_minimum ( ResultInfo * Result_For_Current_Solid, FinalResult * Compacter_Result, int PrevSum, int size)
{
int locationId = (blockIdx.x * blockDim.x + threadIdx.x);
int i, loc ;
float result_step, result_safety, Current_result_step, Current_result_safety;
float Initial_result_step = (Compacter_Result [ locationId ]).step;
float Initial_result_safety = (Compacter_Result [ locationId ]).safety;
Current_result_step = Initial_result_step;
Current_result_safety = Initial_result_safety;
for( i = 0; i < size ; i++)
{
result_step = Result_For_Current_Solid[ PrevSum + i].step;
result_safety = Result_For_Current_Solid[ PrevSum + i].safety;
if ( result_step < Current_result_step)
{
loc = PrevSum + i;
Current_result_step = result_step;
}
if ( result_safety < Current_result_safety)
{
Current_result_safety = result_safety;
}
}
if( Current_result_step != Initial_result_step)
{
FinalResult final = { Current_result_safety, Current_result_step, (Result_For_Current_Solid[ loc ].PVolume)};
Compacter_Result[ locationId ] = final;
}
__syncthreads();
}
typedef struct
{
G4double fVoxelSliceWidthStack[4];
G4SmartVoxelHeader* fVoxelHeaderStack[4];
G4int fVoxelNodeNoStack[4];
G4int fVoxelNoSlicesStack[4];
EAxis fVoxelAxisStack[4];
G4int fVoxelDepth;
G4SmartVoxelNode *fVoxelNode;
}
G4VoxelNavigation;
__device__ void G4VoxelNavigation_ctor( G4VoxelNavigation *This );
__device__ G4bool G4VoxelNavigation_LevelLocate(
G4VoxelNavigation *This,
G4NavigationHistory *history,
const G4VPhysicalVolume *blockedVol,
G4ThreeVector globalPoint,
const G4ThreeVector* globalDirection,
const G4bool pLocatedOnEdge,
G4ThreeVector *localPoint );
__device__ G4SmartVoxelNode* G4VoxelNavigation_VoxelLocate(
G4VoxelNavigation *This,
G4SmartVoxelHeader *voxelHeader,
G4ThreeVector point);
__device__
G4double
G4VoxelNavigation_ComputeStep(
G4VoxelNavigation *This,
G4ThreeVector localPoint,
G4ThreeVector localDirection,
const G4double currentProposedStepLength,
G4double *newSafety,
G4NavigationHistory *history,
G4bool *validExitNormal,
G4ThreeVector *exitNormal,
G4bool *exiting,
G4bool *entering,
G4VPhysicalVolume *(*pBlockedPhysical)
, G4double * Result
);
__device__ G4double G4VoxelNavigation_ComputeSafety(
G4VoxelNavigation *This,
G4ThreeVector localPoint,
const G4NavigationHistory *history);
typedef struct
{
G4NavigationHistory fHistory;
G4VoxelNavigation fVoxelNav;
G4ThreeVector fStepEndPoint;
G4ThreeVector fLastLocatedPointLocal;
G4ThreeVector fExitNormal;
G4ThreeVector fGrandMotherExitNormal;
G4bool fEnteredDaughter;
G4bool fExitedMother;
G4bool fWasLimitedByGeometry;
G4bool fEntering;
G4bool fExiting;
G4bool fLastStepWasZero;
G4bool fLocatedOnEdge;
G4bool fLocatedOutsideWorld;
G4bool fValidExitNormal;
G4bool fPushed;
G4int fNumberZeroSteps;
int align1;
G4double fPreviousSafety;
G4VPhysicalVolume *fBlockedPhysicalVolume;
G4VPhysicalVolume *fTopPhysical;
}
G4Navigator;
__device__ void G4Navigator_ctor( G4Navigator *This );
__device__ void G4Navigator_SetWorldVolume(
G4Navigator *This,
G4VPhysicalVolume* pWorld );
__device__ G4VPhysicalVolume* G4Navigator_LocateGlobalPointAndSetup(
G4Navigator *This,
G4ThreeVector globalPoint,
const G4ThreeVector* pGlobalDirection,
G4bool relativeSearch,
G4bool ignoreDirection,
float * Result);
__device__
G4double G4Navigator_ComputeStep(
G4Navigator *This,
G4ThreeVector pGlobalpoint,
G4ThreeVector pDirection,
const G4double pCurrentProposedStepLength,
G4double *pNewSafety
, G4bool cur_vol_local
, G4double * Result
);
__device__ void G4Navigator_SetGeometricallyLimitedStep( G4Navigator *This );
__device__ G4double G4NormalNavigation_ComputeStep(
G4ThreeVector localPoint,
G4ThreeVector localDirection,
const G4double currentProposedStepLength,
G4double *newSafety,
G4NavigationHistory *history,
G4bool *validExitNormal,
G4ThreeVector *exitNormal,
G4bool *exiting,
G4bool *entering,
G4VPhysicalVolume *(*pBlockedPhysical));
__device__ G4double G4NormalNavigation_ComputeSafety(
G4ThreeVector localPoint,
const G4NavigationHistory *history );
__device__ G4bool G4NormalNavigation_LevelLocate(
G4NavigationHistory *history,
const G4VPhysicalVolume *blockedVol,
G4ThreeVector* globalPoint,
const G4ThreeVector* globalDirection,
G4bool pLocatedOnEdge,
G4ThreeVector* localPoint );
__device__ void G4VoxelNavigation_ctor( G4VoxelNavigation *This );
__device__ void G4Navigator_ResetState( G4Navigator *This )
{
This->fWasLimitedByGeometry = false;
This->fEntering = false;
This->fExiting = false;
This->fLocatedOnEdge = false;
This->fLastStepWasZero = false;
This->fEnteredDaughter = false;
This->fExitedMother = false;
This->fPushed = false;
This->fValidExitNormal = false;
This->fExitNormal = G4ThreeVector_create(0,0,0);
This->fPreviousSafety = 0.0;
This->fNumberZeroSteps = 0;
This->fBlockedPhysicalVolume = 0;
This->fLastLocatedPointLocal = G4ThreeVector_create( 1e37, -1e37, 0.0 );
This->fLocatedOutsideWorld = false;
}
__device__
G4ThreeVector G4Navigator_ComputeLocalAxis( const G4Navigator *This, G4ThreeVector pVec)
{
G4AffineTransform t =
G4NavigationHistory_GetTopTransform( &(This->fHistory) );
return G4AffineTransform_TransformAxis(&t, pVec);
}
__device__ G4ThreeVector
G4Navigator_ComputeLocalPoint( const G4Navigator *This, G4ThreeVector pGlobalPoint)
{
G4AffineTransform t =
G4NavigationHistory_GetTopTransform( &(This->fHistory) );
return G4AffineTransform_TransformPoint(&t, pGlobalPoint);
}
__device__ void G4Navigator_SetWorldVolume( G4Navigator *This, G4VPhysicalVolume* pWorld )
{
This->fTopPhysical = pWorld;
G4NavigationHistory_SetFirstEntry( &(This->fHistory), pWorld );
}
__device__ void G4Navigator_SetGeometricallyLimitedStep( G4Navigator *This )
{
This->fWasLimitedByGeometry = true;
}
__device__
void G4Navigator_ResetStackAndState( G4Navigator *This )
{
G4NavigationHistory_Reset( &(This->fHistory) );
G4Navigator_ResetState( This );
}
__device__
EVolume G4Navigator_VolumeType( const G4Navigator *This, const G4VPhysicalVolume *pVol )
{
(void)This;
(void)pVol;
return kNormal;
}
__device__ void G4Navigator_ctor( G4Navigator *This )
{
G4NavigationHistory_ctor( &(This->fHistory) );
G4VoxelNavigation_ctor( &(This->fVoxelNav ) );
G4Navigator_ResetStackAndState( This );
This->fWasLimitedByGeometry = false;
This->fTopPhysical = 0;
This->fPushed = false;
This->fStepEndPoint = G4ThreeVector_create( kInfinity, kInfinity, kInfinity );
}
__device__
G4VPhysicalVolume*
G4Navigator_LocateGlobalPointAndSetup(
G4Navigator *This,
G4ThreeVector globalPoint,
const G4ThreeVector* pGlobalDirection,
G4bool relativeSearch,
G4bool ignoreDirection,
float * Result
)
{
G4bool notKnownContained=true, noResult;
G4VPhysicalVolume *targetPhysical;
G4VSolid *targetSolid = 0;
G4ThreeVector localPoint = G4ThreeVector_create(0,0,0);
G4ThreeVector globalDirection = G4ThreeVector_create(0,0,0);
EInside insideCode;
G4bool considerDirection = (!ignoreDirection) || This->fLocatedOnEdge;
if( considerDirection && pGlobalDirection != 0 )
{
globalDirection=*pGlobalDirection;
}
if ( 1 )
{
G4Navigator_ResetStackAndState( This );
}
else
{
if ( This->fWasLimitedByGeometry )
{
This->fWasLimitedByGeometry = false;
This->fEnteredDaughter = This->fEntering;
This->fExitedMother = This->fExiting;
if ( This->fExiting )
{
if ( G4NavigationHistory_GetDepth( &(This->fHistory) ) )
{
This->fBlockedPhysicalVolume = G4NavigationHistory_GetTopVolume( &(This->fHistory) );
G4NavigationHistory_BackLevel( &(This->fHistory) );
}
else
{
This->fLastLocatedPointLocal = localPoint;
This->fLocatedOutsideWorld = true;
return 0;
}
if ( This->fLocatedOnEdge )
{
This->fExiting= false;
}
}
else
if ( This->fEntering )
{
G4NavigationHistory_NewLevel( &(This->fHistory), This->fBlockedPhysicalVolume, kNormal);
This->fEntering = false;
This->fBlockedPhysicalVolume = 0;
G4AffineTransform t = G4NavigationHistory_GetTopTransform( &(This->fHistory) );
localPoint = G4AffineTransform_TransformPoint(&t,globalPoint);
notKnownContained = false;
}
}
else
{
This->fBlockedPhysicalVolume = 0;
This->fEntering = false;
This->fEnteredDaughter = false;
This->fExiting = false;
This->fExitedMother = false;
}
}
while (notKnownContained)
{
targetSolid =
G4LogicalVolume_GetSolid(
G4VPhysicalVolume_GetLogicalVolume(
G4NavigationHistory_GetTopVolume(&(This->fHistory))));
G4AffineTransform t = G4NavigationHistory_GetTopTransform( &(This->fHistory) );
localPoint = G4AffineTransform_TransformPoint(&t,globalPoint);
insideCode = G4VSolid_Inside(targetSolid,localPoint);
if ( insideCode==kOutside )
{
if ( G4NavigationHistory_GetDepth( &(This->fHistory) ) )
{
This->fBlockedPhysicalVolume = G4NavigationHistory_GetTopVolume( &(This->fHistory) );
G4NavigationHistory_BackLevel( &(This->fHistory) );
This->fExiting = false;
}
else
{
This->fLastLocatedPointLocal = localPoint;
This->fLocatedOutsideWorld = true;
return 0;
}
}
else
if ( insideCode==kSurface )
{
G4bool isExiting = This->fExiting;
if( (!This->fExiting)&&considerDirection )
{
G4bool directionExiting = false;
G4AffineTransform t = G4NavigationHistory_GetTopTransform( &(This->fHistory) );
G4ThreeVector localDirection =G4AffineTransform_TransformAxis(&t,globalDirection);
G4ThreeVector normal = G4VSolid_SurfaceNormal(targetSolid, localPoint);
directionExiting = G4ThreeVector_dot(normal,localDirection) > 0.0;
isExiting = isExiting || directionExiting;
}
if( isExiting )
{
if ( G4NavigationHistory_GetDepth( &(This->fHistory) ) )
{
This->fBlockedPhysicalVolume = G4NavigationHistory_GetTopVolume( &(This->fHistory) );
G4NavigationHistory_BackLevel( &(This->fHistory) );
This->fValidExitNormal = false;
}
else
{
This->fLastLocatedPointLocal = localPoint;
This->fLocatedOutsideWorld = true;
return 0;
}
}
else
{
notKnownContained=false;
}
}
else
{
notKnownContained=false;
}
}
noResult = true;
do
{
targetPhysical = G4NavigationHistory_GetTopVolume( &(This->fHistory) );
G4LogicalVolume *targetLogical = G4VPhysicalVolume_GetLogicalVolume(targetPhysical);
if ( G4LogicalVolume_GetVoxelHeader( targetLogical ) != 0 )
{
noResult =
G4VoxelNavigation_LevelLocate(
&(This->fVoxelNav),
&(This->fHistory),
This->fBlockedPhysicalVolume,
globalPoint,
pGlobalDirection,
considerDirection,
&localPoint);
}
else
{
noResult = G4NormalNavigation_LevelLocate(
&(This->fHistory),
This->fBlockedPhysicalVolume,
&globalPoint,
pGlobalDirection,
considerDirection,
&localPoint);
}
if ( noResult )
{
This->fBlockedPhysicalVolume = 0;
This->fEntering = false;
This->fEnteredDaughter = true;
}
} while (noResult);
This->fLastLocatedPointLocal = localPoint;
This->fLocatedOutsideWorld= false;
return targetPhysical;
}
__device__ void
G4Navigator_LocateGlobalPointWithinVolume( G4Navigator *This, G4ThreeVector pGlobalpoint)
{
This->fLastLocatedPointLocal = G4Navigator_ComputeLocalPoint( This, pGlobalpoint );
G4VPhysicalVolume* motherPhysical = G4NavigationHistory_GetTopVolume( &(This->fHistory) );
G4LogicalVolume* motherLogical = G4VPhysicalVolume_GetLogicalVolume( motherPhysical );
G4SmartVoxelHeader* pVoxelHeader = G4LogicalVolume_GetVoxelHeader( motherLogical );
if ( pVoxelHeader )
{
G4VoxelNavigation_VoxelLocate( &(This->fVoxelNav), pVoxelHeader, This->fLastLocatedPointLocal );
}
This->fBlockedPhysicalVolume = 0;
This->fEntering = false;
This->fEnteredDaughter = false;
This->fExiting = false;
This->fExitedMother = false;
}
__device__
G4double G4Navigator_ComputeStep(
G4Navigator *This,
G4ThreeVector pGlobalpoint,
G4ThreeVector pDirection,
const G4double pCurrentProposedStepLength,
G4double *pNewSafety
, G4bool cur_vol_local
, G4double * Result
)
{
G4ThreeVector localDirection = G4Navigator_ComputeLocalAxis(This,pDirection);
G4double Step = 1e37;
G4VPhysicalVolume *motherPhysical = G4NavigationHistory_GetTopVolume( &(This->fHistory) );
const G4double kCarTolerance = 1E-3;
G4LogicalVolume *motherLogical = G4VPhysicalVolume_GetLogicalVolume(motherPhysical);
G4ThreeVector newLocalPoint = G4Navigator_ComputeLocalPoint( This, pGlobalpoint);
if( !G4ThreeVector_equal(newLocalPoint, This->fLastLocatedPointLocal) )
{
G4ThreeVector oldLocalPoint = This->fLastLocatedPointLocal;
G4double moveLenSq = G4ThreeVector_diff2(newLocalPoint,oldLocalPoint);
if ( moveLenSq >= kCarTolerance*kCarTolerance )
{
G4Navigator_LocateGlobalPointWithinVolume( This, pGlobalpoint );
}
}
if ( G4LogicalVolume_GetVoxelHeader(motherLogical) != 0 )
{
if( cur_vol_local )
Step = G4VoxelNavigation_ComputeStep(
&(This->fVoxelNav),
This->fLastLocatedPointLocal,
localDirection,
pCurrentProposedStepLength,
pNewSafety,
&(This->fHistory),
&(This->fValidExitNormal),
&(This->fExitNormal),
&(This->fExiting),
&(This->fEntering),
&(This->fBlockedPhysicalVolume)
, Result
);
else
return 0;
}
else
{
Step = G4NormalNavigation_ComputeStep(
This->fLastLocatedPointLocal,
localDirection,
pCurrentProposedStepLength,
pNewSafety,
&(This->fHistory),
&(This->fValidExitNormal),
&(This->fExitNormal),
&(This->fExiting),
&(This->fEntering),
&(This->fBlockedPhysicalVolume));
}
This->fPreviousSafety = *pNewSafety;
This->fLocatedOnEdge = This->fLastStepWasZero && (Step==0.0);
This->fLastStepWasZero = (Step==0.0);
if (This->fPushed) This->fPushed = This->fLastStepWasZero;
if ( This->fLastStepWasZero )
{
This->fNumberZeroSteps++;
if( This->fNumberZeroSteps > 10 -1 )
{
Step += 0.9*kCarTolerance;
This->fPushed = true;
}
if( This->fNumberZeroSteps > 25 -1 )
{
(void)0;
}
}
else
{
if (!This->fPushed) This->fNumberZeroSteps = 0;
}
This->fEnteredDaughter = This->fEntering;
This->fExitedMother = This->fExiting;
if( This->fExiting )
{
if(This->fValidExitNormal)
{
This->fGrandMotherExitNormal= This->fExitNormal;
}
else
{
G4ThreeVector finalLocalPoint =
G4ThreeVector_saxpy( Step, localDirection, This->fLastLocatedPointLocal );
This->fGrandMotherExitNormal =
G4VSolid_SurfaceNormal(
G4LogicalVolume_GetSolid(motherLogical),finalLocalPoint);
G4RotationMatrix mRot = G4VPhysicalVolume_GetObjectRotationValue(motherPhysical);
G4RotationMatrix inv = G4RotationMatrix_inverse(&mRot);
This->fGrandMotherExitNormal
= G4RotationMatrix_apply(&inv,This->fGrandMotherExitNormal);
}
}
This->fStepEndPoint =
G4ThreeVector_saxpy(Step, pDirection, pGlobalpoint );
if( (Step == pCurrentProposedStepLength) && (!This->fExiting) && (!This->fEntering) )
{
Step = kInfinity;
}
return Step;
}
__device__ G4bool
G4AuxiliaryNavServices_CheckPointOnSurface(
const G4VSolid* sampleSolid,
G4ThreeVector localPoint,
const G4ThreeVector* globalDirection,
G4AffineTransform sampleTransform,
const G4bool locatedOnEdge)
{
G4ThreeVector localDirection, sampleNormal;
G4bool enter = false;
EInside insideSolid =
G4VSolid_Inside(sampleSolid, localPoint);
if ( insideSolid!=kOutside )
{
G4bool checkDirection= locatedOnEdge && (globalDirection!=0);
if( (insideSolid==kSurface) && checkDirection)
{
localDirection= G4AffineTransform_TransformAxis(&sampleTransform,*globalDirection);
sampleNormal = G4VSolid_SurfaceNormal(sampleSolid,localPoint);
if ( G4ThreeVector_dot(sampleNormal,localDirection) <= 0 )
{
if( G4ThreeVector_dot(sampleNormal,localDirection) == 0 )
{
G4double distanceToIn =
G4VSolid_DistanceToIn_full( sampleSolid, localPoint, localDirection );
if( distanceToIn != kInfinity )
{
enter = true;
}
}
else
{
enter = true;
}
}
}
else
{
enter = true;
}
}
return enter;
}
__device__ G4bool
G4NormalNavigation_LevelLocate(
G4NavigationHistory *history,
const G4VPhysicalVolume *blockedVol,
G4ThreeVector* globalPoint,
const G4ThreeVector* globalDirection,
G4bool pLocatedOnEdge,
G4ThreeVector* localPoint )
{
G4VPhysicalVolume *targetPhysical, *samplePhysical;
G4LogicalVolume *targetLogical;
G4VSolid *sampleSolid;
G4ThreeVector samplePoint;
G4int targetNoDaughters;
targetPhysical = G4NavigationHistory_GetTopVolume(history);
targetLogical = G4VPhysicalVolume_GetLogicalVolume(targetPhysical);
targetNoDaughters = G4LogicalVolume_GetNoDaughters(targetLogical);
if (targetNoDaughters == 0) return false;
for ( int sampleNo=targetNoDaughters-1; sampleNo>=0; sampleNo-- )
{
samplePhysical =
G4LogicalVolume_GetDaughter(targetLogical,sampleNo);
if ( samplePhysical!=blockedVol )
{
G4NavigationHistory_NewLevel(history, samplePhysical, kNormal );
sampleSolid =
G4LogicalVolume_GetSolid(
G4VPhysicalVolume_GetLogicalVolume(samplePhysical));
G4AffineTransform tf =
G4NavigationHistory_GetTopTransform(history);
samplePoint =
G4AffineTransform_TransformPoint( &tf, *globalPoint );
if( G4AuxiliaryNavServices_CheckPointOnSurface(
sampleSolid, samplePoint, globalDirection,
tf, pLocatedOnEdge) )
{
*localPoint = samplePoint;
return true;
}
else
{
G4NavigationHistory_BackLevel(history);
}
}
}
return false;
}
__device__
G4double
G4NormalNavigation_ComputeStep(
G4ThreeVector localPoint,
G4ThreeVector localDirection,
const G4double currentProposedStepLength,
G4double *newSafety,
G4NavigationHistory *history,
G4bool *validExitNormal,
G4ThreeVector *exitNormal,
G4bool *exiting,
G4bool *entering,
G4VPhysicalVolume *(*pBlockedPhysical))
{
G4VPhysicalVolume *motherPhysical, *samplePhysical, *blockedExitedVol=0;
G4LogicalVolume *motherLogical;
G4VSolid *motherSolid;
G4ThreeVector sampleDirection;
G4double ourStep=currentProposedStepLength, motherSafety, ourSafety;
G4int localNoDaughters, sampleNo;
motherPhysical = G4NavigationHistory_GetTopVolume(history);
motherLogical = G4VPhysicalVolume_GetLogicalVolume(motherPhysical);
motherSolid = G4LogicalVolume_GetSolid(motherLogical);
motherSafety = G4VSolid_DistanceToOut(motherSolid,localPoint);
ourSafety = motherSafety;
if ( *exiting && *validExitNormal )
{
if ( G4ThreeVector_dot(localDirection,*exitNormal)>=kMinExitingNormalCosine )
{
blockedExitedVol =* pBlockedPhysical;
ourSafety = 0;
}
}
*exiting = false;
*entering = false;
localNoDaughters = G4LogicalVolume_GetNoDaughters(motherLogical);
for ( sampleNo=localNoDaughters-1; sampleNo>=0; sampleNo--)
{
samplePhysical = G4LogicalVolume_GetDaughter(motherLogical,sampleNo);
if ( samplePhysical!=blockedExitedVol )
{
G4AffineTransform sampleTf =
G4AffineTransform_create_full(
G4VPhysicalVolume_GetObjectRotationValue(samplePhysical),
G4VPhysicalVolume_GetTranslation(samplePhysical));
G4AffineTransform_Invert(&sampleTf);
const G4ThreeVector samplePoint =
G4AffineTransform_TransformPoint(&sampleTf, localPoint);
const G4VSolid *sampleSolid =
G4LogicalVolume_GetSolid(
G4VPhysicalVolume_GetLogicalVolume( samplePhysical ));
const G4double sampleSafety =
G4VSolid_DistanceToIn(sampleSolid,samplePoint);
if ( sampleSafety<ourSafety )
{
ourSafety=sampleSafety;
}
if ( sampleSafety<=ourStep )
{
sampleDirection = G4AffineTransform_TransformAxis(&sampleTf, localDirection);
const G4double sampleStep =
G4VSolid_DistanceToIn_full(sampleSolid,samplePoint,sampleDirection);
if ( sampleStep<=ourStep )
{
ourStep = sampleStep;
*entering = true;
*exiting = false;
*pBlockedPhysical = samplePhysical;
}
}
}
}
if ( currentProposedStepLength<ourSafety )
{
*entering = false;
*exiting = false;
*pBlockedPhysical = 0;
ourStep = kInfinity;
}
else
{
if ( motherSafety<=ourStep )
{
G4double motherStep =
G4VSolid_DistanceToOut_full(
motherSolid,
localPoint,
localDirection,
true,
validExitNormal,
exitNormal);
if ( motherStep<=ourStep )
{
ourStep = motherStep;
*exiting = true;
*entering = false;
if ( *validExitNormal )
{
G4RotationMatrix rot = G4VPhysicalVolume_GetObjectRotationValue(motherPhysical);
G4RotationMatrix inv = G4RotationMatrix_inverse(&rot);
*exitNormal = G4RotationMatrix_apply(&inv, *exitNormal);
}
}
else
{
*validExitNormal = false;
}
}
}
*newSafety = ourSafety;
return ourStep;
}
__device__ G4bool
G4AuxiliaryNavServices_CheckPointOnSurface(
const G4VSolid* sampleSolid,
G4ThreeVector localPoint,
const G4ThreeVector* globalDirection,
G4AffineTransform sampleTransform,
const G4bool locatedOnEdge);
__device__ G4bool
G4AuxiliaryNavServices_CheckPointExiting(
const G4VSolid* sampleSolid,
G4ThreeVector localPoint,
const G4ThreeVector* globalDirection,
G4AffineTransform sampleTransform );
__device__
G4SmartVoxelNode*
G4VoxelNavigation_VoxelLocate(
G4VoxelNavigation *This,
G4SmartVoxelHeader* pHead,
G4ThreeVector localPoint )
{
G4SmartVoxelHeader *targetVoxelHeader=pHead;
G4SmartVoxelNode *targetVoxelNode = 0;
const G4SmartVoxelProxy *sampleProxy;
EAxis targetHeaderAxis;
G4double targetHeaderMin, targetHeaderNodeWidth;
G4int targetHeaderNoSlices, targetNodeNo;
This->fVoxelDepth = 0;
while ( targetVoxelNode == 0 )
{
targetHeaderAxis = G4VoxelHeader_GetAxis(targetVoxelHeader);
targetHeaderNoSlices = G4VoxelHeader_GetNoSlices(targetVoxelHeader);
targetHeaderMin = G4VoxelHeader_GetMinExtent(targetVoxelHeader);
targetHeaderNodeWidth =
(G4VoxelHeader_GetMaxExtent(targetVoxelHeader)-targetHeaderMin)
/ targetHeaderNoSlices;
targetNodeNo = (G4int)(
(G4ThreeVector_coord(localPoint,targetHeaderAxis)-targetHeaderMin)
/ targetHeaderNodeWidth);
if ( targetNodeNo<0 )
{
targetNodeNo = 0;
}
else if ( targetNodeNo>=targetHeaderNoSlices )
{
targetNodeNo = targetHeaderNoSlices-1;
}
This->fVoxelAxisStack[This->fVoxelDepth] = targetHeaderAxis;
This->fVoxelNoSlicesStack[This->fVoxelDepth] = targetHeaderNoSlices;
This->fVoxelSliceWidthStack[This->fVoxelDepth] = targetHeaderNodeWidth;
This->fVoxelNodeNoStack[This->fVoxelDepth] = targetNodeNo;
This->fVoxelHeaderStack[This->fVoxelDepth] = targetVoxelHeader;
sampleProxy = G4VoxelHeader_GetSlice(targetVoxelHeader, targetNodeNo);
if ( G4VoxelProxy_IsNode(sampleProxy) )
{
targetVoxelNode = G4VoxelProxy_GetNode(sampleProxy);
}
else
{
targetVoxelHeader = G4VoxelProxy_GetHeader(sampleProxy);
This->fVoxelDepth++;
(void)0;
}
}
This->fVoxelNode = targetVoxelNode;
return targetVoxelNode;
}
__device__
G4bool
G4VoxelNavigation_LocateNextVoxel(
G4VoxelNavigation *This,
G4ThreeVector localPoint,
G4ThreeVector localDirection,
const G4double currentStep )
{
G4SmartVoxelHeader *workHeader=0, *newHeader=0;
G4SmartVoxelProxy *newProxy=0;
G4SmartVoxelNode *newVoxelNode= 0;
G4ThreeVector targetPoint, voxelPoint;
G4double workNodeWidth, workMinExtent, workCoord;
G4double minVal, maxVal, newDistance=0.;
G4double newHeaderMin, newHeaderNodeWidth;
G4int depth=0, newDepth=0, workNodeNo=0, newNodeNo=0, newHeaderNoSlices=0;
EAxis workHeaderAxis, newHeaderAxis;
G4bool isNewVoxel=false;
G4double currentDistance = currentStep;
for (depth=0; depth<This->fVoxelDepth; depth++)
{
targetPoint =
G4ThreeVector_saxpy(currentDistance,localDirection,localPoint);
newDistance = currentDistance;
workHeader = This->fVoxelHeaderStack[depth];
workHeaderAxis = This->fVoxelAxisStack[depth];
workNodeNo = This->fVoxelNodeNoStack[depth];
workNodeWidth = This->fVoxelSliceWidthStack[depth];
workMinExtent = G4VoxelHeader_GetMinExtent(workHeader);
workCoord = G4ThreeVector_coord(targetPoint,workHeaderAxis);
minVal = workMinExtent+workNodeNo*workNodeWidth;
if ( minVal<=workCoord+1E-3*0.5 )
{
maxVal = minVal+workNodeWidth;
if ( maxVal<=workCoord-1E-3*0.5 )
{
newNodeNo = workNodeNo+1;
newHeader = workHeader;
newDistance = (maxVal-G4ThreeVector_coord(localPoint,workHeaderAxis))
/ G4ThreeVector_coord(localDirection,workHeaderAxis);
isNewVoxel = true;
newDepth = depth;
}
}
else
{
newNodeNo = workNodeNo-1;
newHeader = workHeader;
newDistance = (minVal-G4ThreeVector_coord(localPoint,workHeaderAxis))
/ G4ThreeVector_coord(localDirection,workHeaderAxis);
isNewVoxel = true;
newDepth = depth;
}
currentDistance = newDistance;
}
targetPoint =
G4ThreeVector_saxpy(currentDistance,localDirection,localPoint);
depth = This->fVoxelDepth;
{
workHeader = This->fVoxelHeaderStack[depth];
workHeaderAxis = This->fVoxelAxisStack[depth];
workNodeNo = This->fVoxelNodeNoStack[depth];
workNodeWidth = This->fVoxelSliceWidthStack[depth];
workMinExtent = G4VoxelHeader_GetMinExtent(workHeader);
workCoord = G4ThreeVector_coord(targetPoint,workHeaderAxis);
minVal = workMinExtent+G4VoxelNode_GetMinEquivalentSliceNo(This->fVoxelNode)*workNodeWidth;
if ( minVal<=workCoord+1E-3*0.5 )
{
maxVal = workMinExtent+(G4VoxelNode_GetMaxEquivalentSliceNo(This->fVoxelNode)+1)
*workNodeWidth;
if ( maxVal<=workCoord-1E-3*0.5 )
{
newNodeNo = G4VoxelNode_GetMaxEquivalentSliceNo(This->fVoxelNode)+1;
newHeader = workHeader;
newDistance = (maxVal-G4ThreeVector_coord(localPoint,workHeaderAxis))
/ G4ThreeVector_coord(localDirection,workHeaderAxis);
isNewVoxel = true;
newDepth = depth;
}
}
else
{
newNodeNo = G4VoxelNode_GetMinEquivalentSliceNo(This->fVoxelNode)-1;
newHeader = workHeader;
newDistance = (minVal-G4ThreeVector_coord(localPoint,workHeaderAxis))
/ G4ThreeVector_coord(localDirection,workHeaderAxis);
isNewVoxel = true;
newDepth = depth;
}
currentDistance = newDistance;
}
if (isNewVoxel)
{
if ( (newNodeNo<0) || (newNodeNo>=G4VoxelHeader_GetNoSlices(newHeader)))
{
isNewVoxel = false;
}
else
{
voxelPoint = G4ThreeVector_saxpy(newDistance,localDirection,localPoint);
(void)0;
This->fVoxelNodeNoStack[newDepth] = newNodeNo;
This->fVoxelDepth = newDepth;
newVoxelNode = 0;
while ( newVoxelNode == 0 )
{
newProxy = G4VoxelHeader_GetSlice(newHeader,newNodeNo);
if ( G4VoxelProxy_IsNode(newProxy) )
{
newVoxelNode = G4VoxelProxy_GetNode(newProxy);
}
else
{
This->fVoxelDepth++;
(void)0;
newHeader = G4VoxelProxy_GetHeader(newProxy);
newHeaderAxis = G4VoxelHeader_GetAxis(newHeader);
newHeaderNoSlices = G4VoxelHeader_GetNoSlices(newHeader);
newHeaderMin = G4VoxelHeader_GetMinExtent(newHeader);
newHeaderNodeWidth =
(G4VoxelHeader_GetMaxExtent(newHeader)-newHeaderMin)
/ newHeaderNoSlices;
newNodeNo = (G4int)(
(G4ThreeVector_coord(voxelPoint,newHeaderAxis)-newHeaderMin)
/ newHeaderNodeWidth );
if ( newNodeNo<0 )
{
newNodeNo=0;
}
else if ( newNodeNo>=newHeaderNoSlices )
{
newNodeNo = newHeaderNoSlices-1;
}
This->fVoxelAxisStack[This->fVoxelDepth] = newHeaderAxis;
This->fVoxelNoSlicesStack[This->fVoxelDepth] = newHeaderNoSlices;
This->fVoxelSliceWidthStack[This->fVoxelDepth] = newHeaderNodeWidth;
This->fVoxelNodeNoStack[This->fVoxelDepth] = newNodeNo;
This->fVoxelHeaderStack[This->fVoxelDepth] = newHeader;
}
}
This->fVoxelNode = newVoxelNode;
}
}
return isNewVoxel;
}
__device__
G4double
G4VoxelNavigation_ComputeVoxelSafety(
const G4VoxelNavigation *This,
G4ThreeVector localPoint)
{
G4SmartVoxelHeader *curHeader;
G4double voxelSafety, curNodeWidth;
G4double curNodeOffset, minCurCommonDelta, maxCurCommonDelta;
G4int minCurNodeNoDelta, maxCurNodeNoDelta;
G4int localVoxelDepth, curNodeNo;
EAxis curHeaderAxis;
localVoxelDepth = This->fVoxelDepth;
curHeader = This->fVoxelHeaderStack[localVoxelDepth];
curHeaderAxis = This->fVoxelAxisStack[localVoxelDepth];
curNodeNo = This->fVoxelNodeNoStack[localVoxelDepth];
curNodeWidth = This->fVoxelSliceWidthStack[localVoxelDepth];
curNodeOffset = curNodeNo*curNodeWidth;
maxCurNodeNoDelta = G4VoxelNode_GetMaxEquivalentSliceNo(This->fVoxelNode)-curNodeNo;
minCurNodeNoDelta = curNodeNo-G4VoxelNode_GetMaxEquivalentSliceNo(This->fVoxelNode);
minCurCommonDelta = G4ThreeVector_coord(localPoint,curHeaderAxis)
- G4VoxelHeader_GetMinExtent(curHeader) - curNodeOffset;
maxCurCommonDelta = curNodeWidth-minCurCommonDelta;
if ( minCurNodeNoDelta<maxCurNodeNoDelta )
{
voxelSafety = minCurNodeNoDelta*curNodeWidth;
voxelSafety += minCurCommonDelta;
}
else if (maxCurNodeNoDelta < minCurNodeNoDelta)
{
voxelSafety = maxCurNodeNoDelta*curNodeWidth;
voxelSafety += maxCurCommonDelta;
}
else
{
voxelSafety = minCurNodeNoDelta*curNodeWidth;
voxelSafety += (((minCurCommonDelta)<(maxCurCommonDelta))?(minCurCommonDelta):(maxCurCommonDelta));
}
while ( (localVoxelDepth>0) && (voxelSafety>0) )
{
localVoxelDepth--;
curHeader = This->fVoxelHeaderStack[localVoxelDepth];
curHeaderAxis = This->fVoxelAxisStack[localVoxelDepth];
curNodeNo = This->fVoxelNodeNoStack[localVoxelDepth];
curNodeWidth = This->fVoxelSliceWidthStack[localVoxelDepth];
curNodeOffset = curNodeNo*curNodeWidth;
minCurCommonDelta = G4ThreeVector_coord(localPoint,curHeaderAxis)
- G4VoxelHeader_GetMinExtent(curHeader) - curNodeOffset;
maxCurCommonDelta = curNodeWidth-minCurCommonDelta;
if ( minCurCommonDelta<voxelSafety )
{
voxelSafety = minCurCommonDelta;
}
if ( maxCurCommonDelta<voxelSafety )
{
voxelSafety = maxCurCommonDelta;
}
}
if ( voxelSafety<0 )
{
voxelSafety = 0;
}
return voxelSafety;
}
__device__
void G4VoxelNavigation_ctor( G4VoxelNavigation *This )
{
This->fVoxelDepth = -1;
This->fVoxelNode = 0;
}
__device__
G4bool
G4VoxelNavigation_LevelLocate(
G4VoxelNavigation *This,
G4NavigationHistory* history,
const G4VPhysicalVolume* blockedVol,
G4ThreeVector globalPoint,
const G4ThreeVector* globalDirection,
const G4bool pLocatedOnEdge,
G4ThreeVector *localPoint )
{
G4SmartVoxelHeader *targetVoxelHeader;
G4SmartVoxelNode *targetVoxelNode;
G4VPhysicalVolume *targetPhysical, *samplePhysical;
G4LogicalVolume *targetLogical;
G4VSolid *sampleSolid;
G4ThreeVector samplePoint;
G4int targetNoDaughters;
targetPhysical = G4NavigationHistory_GetTopVolume(history);
targetLogical = G4VPhysicalVolume_GetLogicalVolume(targetPhysical);
targetVoxelHeader = G4LogicalVolume_GetVoxelHeader(targetLogical);
targetVoxelNode =
G4VoxelNavigation_VoxelLocate(This,targetVoxelHeader,*localPoint);
targetNoDaughters=G4VoxelNode_GetNoContained(targetVoxelNode);
if ( targetNoDaughters==0 ) return false;
for ( int sampleNo=targetNoDaughters-1; sampleNo>=0; sampleNo-- )
{
samplePhysical =
G4LogicalVolume_GetDaughter( targetLogical,
G4VoxelNode_GetVolume(targetVoxelNode,sampleNo));
if ( samplePhysical!=blockedVol )
{
G4NavigationHistory_NewLevel(history, samplePhysical, kNormal);
sampleSolid =
G4LogicalVolume_GetSolid(
G4VPhysicalVolume_GetLogicalVolume( samplePhysical ));
G4AffineTransform tf = G4NavigationHistory_GetTopTransform( history );
samplePoint =
G4AffineTransform_TransformPoint( &tf, globalPoint );
if( G4AuxiliaryNavServices_CheckPointOnSurface(
sampleSolid, samplePoint, globalDirection,
tf, pLocatedOnEdge) )
{
*localPoint = samplePoint;
return true;
}
else
{
G4NavigationHistory_BackLevel( history );
}
}
}
return false;
}
__device__
G4double
G4VoxelNavigation_ComputeStep(
G4VoxelNavigation *This,
G4ThreeVector localPoint,
G4ThreeVector localDirection,
const G4double currentProposedStepLength,
G4double *newSafety,
G4NavigationHistory *history,
G4bool *validExitNormal,
G4ThreeVector *exitNormal,
G4bool *exiting,
G4bool *entering,
G4VPhysicalVolume *(*pBlockedPhysical)
, G4double * Result
)
{
G4VPhysicalVolume *motherPhysical, *samplePhysical,
*blockedExitedVol = 0;
G4LogicalVolume *motherLogical;
G4VSolid *motherSolid;
G4ThreeVector sampleDirection;
G4double ourStep=currentProposedStepLength, motherSafety, ourSafety;
G4int sampleNo;
G4bool initialNode, noStep;
const G4SmartVoxelNode *curVoxelNode;
G4int curNoVolumes, contentNo;
G4double voxelSafety;
motherPhysical = G4NavigationHistory_GetTopVolume( history );
motherLogical = G4VPhysicalVolume_GetLogicalVolume(motherPhysical);
motherSolid = G4LogicalVolume_GetSolid(motherLogical);
motherSafety = G4VSolid_DistanceToOut(motherSolid, localPoint);
ourSafety = motherSafety;
if ( *exiting && *validExitNormal )
{
if ( G4ThreeVector_dot(localDirection,*exitNormal)>=kMinExitingNormalCosine )
{
blockedExitedVol = *pBlockedPhysical;
ourSafety = 0;
}
}
*exiting = false;
*entering = false;
initialNode = true;
noStep = true;
while ( noStep )
{
curVoxelNode = This->fVoxelNode;
curNoVolumes = G4VoxelNode_GetNoContained(curVoxelNode);
for (contentNo=curNoVolumes-1; contentNo>=0; contentNo--)
{
sampleNo = G4VoxelNode_GetVolume( curVoxelNode, contentNo);
samplePhysical = G4LogicalVolume_GetDaughter(motherLogical,sampleNo);
if ( samplePhysical!=blockedExitedVol )
{
G4AffineTransform sampleTf =
G4AffineTransform_create_full(
G4VPhysicalVolume_GetObjectRotationValue(samplePhysical),
G4VPhysicalVolume_GetTranslation(samplePhysical));
G4AffineTransform_Invert(&sampleTf);
const G4ThreeVector samplePoint =
G4AffineTransform_TransformPoint(&sampleTf,localPoint);
const G4VSolid *sampleSolid =
G4LogicalVolume_GetSolid(
G4VPhysicalVolume_GetLogicalVolume(
samplePhysical ));
const G4double sampleSafety =
G4VSolid_DistanceToIn(sampleSolid,samplePoint);
if ( sampleSafety<ourSafety )
{
ourSafety = sampleSafety;
}
if ( sampleSafety<=ourStep )
{
sampleDirection =
G4AffineTransform_TransformAxis( &sampleTf, localDirection );
G4double sampleStep =
G4VSolid_DistanceToIn_full(sampleSolid, samplePoint, sampleDirection);
if ( sampleStep<=ourStep )
{
ourStep = sampleStep;
*entering = true;
*exiting = false;
*pBlockedPhysical = samplePhysical;
}
}
}
}
if (initialNode)
{
initialNode = false;
voxelSafety = G4VoxelNavigation_ComputeVoxelSafety(This,localPoint);
if ( voxelSafety<ourSafety )
{
ourSafety = voxelSafety;
}
if ( currentProposedStepLength<ourSafety )
{
noStep = false;
*entering = false;
*exiting = false;
*pBlockedPhysical = 0;
ourStep = kInfinity;
}
else
{
if ( motherSafety<=ourStep )
{
G4double motherStep =
G4VSolid_DistanceToOut_full( motherSolid, localPoint, localDirection,
true, validExitNormal, exitNormal);
if ( motherStep<=ourStep )
{
ourStep = motherStep;
*exiting = true;
*entering = false;
if ( *validExitNormal )
{
G4RotationMatrix rot = G4VPhysicalVolume_GetObjectRotationValue(motherPhysical);
G4RotationMatrix inv = G4RotationMatrix_inverse(&rot);
*exitNormal = G4RotationMatrix_apply( &inv, *exitNormal );
}
}
else
{
*validExitNormal = false;
}
}
}
*newSafety = ourSafety;
}
if (noStep)
{
noStep = G4VoxelNavigation_LocateNextVoxel(This, localPoint, localDirection, ourStep);
}
}
int locationId = (blockIdx.x * blockDim.x + threadIdx.x);
return ourStep;
}
__global__ void trace(
Particle *input,
G4double *output,
G4VPhysicalVolume *worldVolumeAndGeomBuffer,
G4double phys_step,
int totalSize
, G4double * Result
, SolidInfo * Solids
, ResultInfo * Result_For_Current_Solid
, FinalResult * Compacter_Result,
G4SmartVoxelNode * nullVNode
)
{
const unsigned globalIdx = (blockIdx.x * blockDim.x + threadIdx.x);
const unsigned localIdx = threadIdx.x;
const unsigned locationId = globalIdx;
if (globalIdx >= totalSize ) return;
__shared__ int Numbers_Of_Solid[ BlockSize ];
__shared__ int Sum_Of_Solids[ BlockSize ];
__shared__ bool noStepArray [ BlockSize ];
__shared__ PointInformation LocationArray[ BlockSize ];
__shared__ G4VPhysicalVolume * info[ BlockSize ];
G4VoxelNode_ctor( nullVNode ,1 );
__shared__ bool Cur_Vol_Store [ BlockSize ];
G4Navigator navi;
G4Navigator *nav = &navi;
G4Navigator_ctor(nav);
G4Navigator_SetWorldVolume( nav, worldVolumeAndGeomBuffer );
Particle p = input[globalIdx];
if( globalIdx == 0)
{
}
const G4VPhysicalVolume * cur_vol =
G4Navigator_LocateGlobalPointAndSetup(
nav, p.pos, 0, false, true, Result );
G4bool cur_vol_local = true, cur_vol_all = true;
G4double step, safety = 0.1;
G4double integratedDensity = 0;
int temp = 0;
while ( cur_vol_all )
{
{
const G4double curDensity =
G4LogicalVolume_GetMaterial( G4VPhysicalVolume_GetLogicalVolume( cur_vol ))->property;
PointInformation NewPoint = { p.pos, p.dir };
LocationArray[ locationId ] = NewPoint;
if( temp == 1)
{
Result[ locationId ] = step;
}
step = G4Navigator_ComputeStep( nav, p.pos, p.dir, phys_step, &safety
, cur_vol_local
, Result
);
if ( step == kInfinity ) step = phys_step;
const G4double nextStepIntegratedD = curDensity * step;
int locationId = (blockIdx.x * blockDim.x + threadIdx.x);
integratedDensity += nextStepIntegratedD;
G4ThreeVector_sum_assign( &(p.pos), G4ThreeVector_mult( p.dir, step ) );
G4Navigator_SetGeometricallyLimitedStep( nav );
if( globalIdx == 0 ){
}
cur_vol =
G4Navigator_LocateGlobalPointAndSetup(
nav, p.pos, &(p.dir), true, false, Result );
if ( !cur_vol )
cur_vol_local = false;
}
Cur_Vol_Store[ locationId ] = cur_vol_local;
__syncthreads();
// cur_vol_all = NoStepReduction( Cur_Vol_Store, BlockSize );
{
int tid = (blockIdx.x * blockDim.x + threadIdx.x);
int offset = 1;
for(int d = BlockSize>>1; d > 0; d >>=1)
{
__syncthreads();
if(tid<d)
{
int ai = offset*(2*tid + 1) - 1;
int bi = offset*(2*tid + 2) - 1;
Cur_Vol_Store[bi] = (Cur_Vol_Store[ai] || Cur_Vol_Store[bi]);
}
offset *= 2;
}
G4bool result = Cur_Vol_Store[ BlockSize - 1 ];
__syncthreads();
cur_vol_all = result;
}
__syncthreads();
temp++;
}
output[globalIdx] = integratedDensity;
}
__global__ void relocate ( int * ptr, void * buf, int size )
{
typedef unsigned char byte;
const unsigned globalidx = (blockIdx.x * blockDim.x + threadIdx.x);
if(globalidx>=size) return;
int destoffs, targoffs;
destoffs = *(ptr + 2*globalidx);
targoffs = *(ptr + 2*globalidx+ 1);
*((byte*)buf+destoffs) =(byte) ((byte*)buf + targoffs);
}
__global__ void check( G4VPhysicalVolume *worldVolumeAndGeomBuffer, unsigned long * result)
{
unsigned int hope = ( unsigned int )worldVolumeAndGeomBuffer;
*result = hope;
}
__global__ void test ( bool * output, bool * input)
{
int tid = (blockIdx.x * blockDim.x + threadIdx.x);
int offset = 1;
G4bool result;
if( tid == 0)
{
input[ 0] = true;
input[ 1] = true;
input[ 2] = true;
input[ 3] = true;
input[ 4] = true;
input[ 5] = true;
input[ 6] = false;
input[ 7] = true;
}
__syncthreads();
}
__global__ void checkgeom( G4VPhysicalVolume *worldVolumeAndGeomBuffer, int * result, int number_of_increments)
{
const unsigned globalid = (blockIdx.x * blockDim.x + threadIdx.x);
if(globalid>=1) return;
int i=0;
G4Navigator navi;
G4Navigator *nav = &navi;
G4Navigator_ctor(nav);
G4Navigator_SetWorldVolume( nav, worldVolumeAndGeomBuffer );
G4ThreeVector pos = G4ThreeVector_create( 0.0, 0.0, 0.0);
const G4VPhysicalVolume * cur_vol;
unsigned int geom_start = ( unsigned int )worldVolumeAndGeomBuffer;
pos = G4ThreeVector_create( 0.7, 1.0, 0.7);
float x_increment = 0.2, y_increment = 0.2, z_increment = 0.2;
for( i=0; i < number_of_increments*3 ; i+=3)
{
result[i] = ( int ) cur_vol->count;
result[i + 1] = (( unsigned int )(cur_vol->flogical) - geom_start);
result[i + 2] = ( int ) G4LogicalVolume_GetMaterial( G4VPhysicalVolume_GetLogicalVolume( cur_vol ))->property;
pos.x+=x_increment;
pos.y+=y_increment;
pos.z+=z_increment;
}
}
struct CameraParameters
{
double
heading,
pitch,
roll,
dist,
yfov,
target_x,
target_y,
target_z;
CameraParameters()
:
heading(0), pitch(0), roll(0), dist(1),
yfov(90), target_x(0), target_y(0), target_z(0)
{}
};
struct EventOrigin
{
double x,y,z;
};
class Geometry
{
public:
typedef unsigned char byte;
virtual ~Geometry() {}
virtual void create() = 0;
virtual void relocate( void *newbegin ) = 0;
virtual int size() const = 0;
virtual int ptrs_size() const=0;
virtual void *getBuffer() = 0;
virtual double getScale() const = 0;
// virtual CameraParameters getCamera() const
// {
// return 0;
// }
virtual EventOrigin getEvent() const
{
EventOrigin e = { 0,0,0 };
return e;
}
virtual int getNumVoxelNodes() const { return 0; }
};
typedef struct { const char *err, *fn; int line, errcode; } my_cuda_err;
typedef struct { int secs; int usecs; } mytimet;
extern "C"
{
void myprint( const char *chr );
void myprint1( const char *chr, int n );
mytimet mytimer();
void myprinttdiff(mytimet a, mytimet b);
void mysleep(int n);
}
static inline int ceilDiv( int a, int d )
{
return a/d + ((a%d)?1:0);
}
Particle *gpuInput;
G4double *gpuOutput;
Geometry::byte *gpuGeom;
int numInput, numOutput, numInputPerRound;
const int WARP_SIZE = 32;
void createGrid( int numInput, dim3* grid, dim3* block )
{
const int MAXSIZE = 10000000;
const int NUMCORES = 448;
const int NUMMULTIPROC = 14;
const int BLOCKS_PER_MULTIPROC = 8;
const int MAX_WARPS_PER_MULTIPROC = 48;
const int MAX_DATA_PER_MULTIPROC = MAX_WARPS_PER_MULTIPROC*WARP_SIZE;
int size = numInput;
if (size > MAXSIZE) size = MAXSIZE;
int dataPerMultiproc = ceilDiv(size,NUMMULTIPROC);
if ( dataPerMultiproc > MAX_DATA_PER_MULTIPROC )
dataPerMultiproc = MAX_DATA_PER_MULTIPROC;
int blockSize = ceilDiv(dataPerMultiproc,BLOCKS_PER_MULTIPROC);
const int MAX_BLOCK_SIZE = 1024;
if (blockSize > MAX_BLOCK_SIZE) blockSize = MAX_BLOCK_SIZE;
int numBlocks = ceilDiv(size,blockSize);
int numWarps = ceilDiv(blockSize,WARP_SIZE) * numBlocks;
if (numWarps > NUMCORES)
{
blockSize = ceilDiv(blockSize,WARP_SIZE)*WARP_SIZE;
dataPerMultiproc = blockSize * BLOCKS_PER_MULTIPROC;
if ( dataPerMultiproc > MAX_DATA_PER_MULTIPROC )
blockSize -= WARP_SIZE;
}
size = blockSize*ceilDiv(size,blockSize);
if (size > MAXSIZE) size = MAXSIZE;
block->x = blockSize;
block->y = block->z = 1;
grid->x = size/blockSize;
grid->y = 1;
grid->z = 1;
}
my_cuda_err cudainit( Geometry *geom, int N )
{
const mytimet t0 = mytimer();
numOutput = numInput = numInputPerRound = N;
do { cudaError_t errc = cudaSetDeviceFlags(0); if (errc != cudaSuccess) { my_cuda_err r = { cudaGetErrorString(errc), "cuda.cpp", 157, errc }; return r; } } while(0);
do { cudaError_t errc = cudaMalloc( (void**)&gpuInput, sizeof(Particle)*numInput ); if (errc != cudaSuccess) { my_cuda_err r = { cudaGetErrorString(errc), "cuda.cpp", 159, errc }; return r; } } while(0);
do { cudaError_t errc = cudaMalloc( (void**)&gpuOutput, sizeof(G4double)*numOutput ); if (errc != cudaSuccess) { my_cuda_err r = { cudaGetErrorString(errc), "cuda.cpp", 160, errc }; return r; } } while(0);
do { cudaError_t errc = cudaMalloc( (void**)&gpuGeom, geom->size() ); if (errc != cudaSuccess) { my_cuda_err r = { cudaGetErrorString(errc), "cuda.cpp", 161, errc }; return r; } } while(0);
geom->relocate( gpuGeom );
cudaFuncSetCacheConfig(trace, cudaFuncCachePreferL1);
do { cudaError_t errc = cudaMemcpy( gpuGeom, geom->getBuffer(), geom->size(), cudaMemcpyHostToDevice ); if (errc != cudaSuccess) { my_cuda_err r = { cudaGetErrorString(errc), "cuda.cpp", 197, errc }; return r; } } while(0);
const mytimet t1 = mytimer();
myprint("Initialization: ");
myprinttdiff(t0, t1);
my_cuda_err ok = { 0, 0, 0, cudaSuccess }; return ok;
}
my_cuda_err cudaexec( G4double phys_step, int totalInput, Particle *input, G4double *output )
{
G4double * Result;
SolidInfo * Solids;
ResultInfo * Result_For_Current_Solid;
FinalResult * Compacter_Result;
G4SmartVoxelNode * nullVNode;
for ( int i = 0; i < totalInput; i += numInput )
{
if ( i + numInput > totalInput ) numInput = totalInput-i;
do { cudaError_t errc = cudaMemcpy( gpuInput, input+i, sizeof(Particle)*numInput, cudaMemcpyHostToDevice ); if (errc != cudaSuccess) { my_cuda_err r = { cudaGetErrorString(errc), "cuda.cpp", 333, errc }; return r; } } while(0);
dim3 grid, block;
createGrid( numInput, &grid, &block );
trace <<< grid, block >>>( gpuInput, gpuOutput, (G4VPhysicalVolume*)gpuGeom, phys_step, numInput, Result, Solids, Result_For_Current_Solid, Compacter_Result, nullVNode);
do { cudaError_t errc = cudaGetLastError(); if (errc != cudaSuccess) { my_cuda_err r = { cudaGetErrorString(errc), "cuda.cpp", 340, errc }; return r; } } while(0);
do { cudaError_t errc = cudaMemcpy( output+i, gpuOutput, sizeof(G4double)*numOutput, cudaMemcpyDeviceToHost ); if (errc != cudaSuccess) { my_cuda_err r = { cudaGetErrorString(errc), "cuda.cpp", 343, errc }; return r; } } while(0);
}
my_cuda_err ok = { 0, 0, 0, cudaSuccess }; return ok;
}
my_cuda_err cudafinish()
{
const mytimet t0 = mytimer();
do { cudaError_t errc = cudaFree( gpuInput ); if (errc != cudaSuccess) { my_cuda_err r = { cudaGetErrorString(errc), "cuda.cpp", 356, errc }; return r; } } while(0);
do { cudaError_t errc = cudaFree( gpuOutput ); if (errc != cudaSuccess) { my_cuda_err r = { cudaGetErrorString(errc), "cuda.cpp", 357, errc }; return r; } } while(0);
do { cudaError_t errc = cudaFree( gpuGeom ); if (errc != cudaSuccess) { my_cuda_err r = { cudaGetErrorString(errc), "cuda.cpp", 358, errc }; return r; } } while(0);
do { cudaError_t errc = cudaThreadExit(); if (errc != cudaSuccess) { my_cuda_err r = { cudaGetErrorString(errc), "cuda.cpp", 375, errc }; return r; } } while(0);
const mytimet t1 = mytimer();
myprint("Finalization: ");
myprinttdiff(t0, t1);
my_cuda_err ok = { 0, 0, 0, cudaSuccess }; return ok;
}
|
79a59b355811c9e33aa1fedce3ce5c4fef8afa82.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include "customer_functions.h"
#include "cudaEGL.h"
#include <opencv2/core.hpp>
#include <opencv2/cudaimgproc.hpp>
#include <opencv2/cudafilters.hpp>
#include <hip/hip_runtime.h>
static void
pre_process (void **sBaseAddr,
unsigned int *smemsize,
unsigned int *swidth,
unsigned int *sheight,
unsigned int *spitch,
ColorFormat *sformat,
unsigned int nsurfcount,
void ** usrptr)
{
}
static void
post_process (void **sBaseAddr,
unsigned int *smemsize,
unsigned int *swidth,
unsigned int *sheight,
unsigned int *spitch,
ColorFormat *sformat,
unsigned int nsurfcount,
void ** usrptr)
{
}
/**
* Performs CUDA Operations on egl image.
*
* @param image : EGL image
*/
static void
gpu_process (EGLImageKHR image, void ** usrptr)
{
hipError_t status;
CUeglFrame eglFrame;
hipGraphicsResource_t pResource = NULL;
hipFree(0);
status = hipGraphicsEGLRegisterImage(&pResource, image, hipGraphicsMapFlagsNone);
if (status != hipSuccess) {
printf("hipGraphicsEGLRegisterImage failed : %d \n", status);
return;
}
status = hipGraphicsResourceGetMappedEglFrame( &eglFrame, pResource, 0, 0);
if (status != hipSuccess) {
printf ("hipGraphicsSubResourceGetMappedArray failed\n");
}
status = hipCtxSynchronize();
if (status != hipSuccess) {
printf ("hipCtxSynchronize failed \n");
}
if (eglFrame.frameType == CU_EGL_FRAME_TYPE_PITCH) {
if (eglFrame.eglColorFormat == CU_EGL_COLOR_FORMAT_ABGR) {
// // bilateral filter x 3
// cv::cuda::GpuMat gpuMat(eglFrame.height, eglFrame.width, CV_8UC4, eglFrame.frame.pPitch[0]);
// cv::cuda::bilateralFilter(gpuMat, gpuMat, 15, 30, 30);
// cv::cuda::bilateralFilter(gpuMat, gpuMat, 15, 30, 30);
// cv::cuda::bilateralFilter(gpuMat, gpuMat, 15, 30, 30);
} else {
printf ("Invalid eglcolorformat, \"RGBA\" format only.\n");
}
}
status = hipCtxSynchronize();
if (status != hipSuccess) {
printf ("hipCtxSynchronize failed after memcpy \n");
}
status = hipGraphicsUnregisterResource(pResource);
if (status != hipSuccess) {
printf("cuGraphicsEGLUnRegisterResource failed: %d \n", status);
}
}
extern "C" void
init (CustomerFunction * pFuncs)
{
pFuncs->fPreProcess = pre_process;
pFuncs->fGPUProcess = gpu_process;
pFuncs->fPostProcess = post_process;
}
extern "C" void
deinit (void)
{
/* deinitialization */
}
| 79a59b355811c9e33aa1fedce3ce5c4fef8afa82.cu | /*
* Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include "customer_functions.h"
#include "cudaEGL.h"
#include <opencv2/core.hpp>
#include <opencv2/cudaimgproc.hpp>
#include <opencv2/cudafilters.hpp>
#include <cuda_runtime.h>
static void
pre_process (void **sBaseAddr,
unsigned int *smemsize,
unsigned int *swidth,
unsigned int *sheight,
unsigned int *spitch,
ColorFormat *sformat,
unsigned int nsurfcount,
void ** usrptr)
{
}
static void
post_process (void **sBaseAddr,
unsigned int *smemsize,
unsigned int *swidth,
unsigned int *sheight,
unsigned int *spitch,
ColorFormat *sformat,
unsigned int nsurfcount,
void ** usrptr)
{
}
/**
* Performs CUDA Operations on egl image.
*
* @param image : EGL image
*/
static void
gpu_process (EGLImageKHR image, void ** usrptr)
{
CUresult status;
CUeglFrame eglFrame;
CUgraphicsResource pResource = NULL;
cudaFree(0);
status = cuGraphicsEGLRegisterImage(&pResource, image, CU_GRAPHICS_MAP_RESOURCE_FLAGS_NONE);
if (status != CUDA_SUCCESS) {
printf("cuGraphicsEGLRegisterImage failed : %d \n", status);
return;
}
status = cuGraphicsResourceGetMappedEglFrame( &eglFrame, pResource, 0, 0);
if (status != CUDA_SUCCESS) {
printf ("cuGraphicsSubResourceGetMappedArray failed\n");
}
status = cuCtxSynchronize();
if (status != CUDA_SUCCESS) {
printf ("cuCtxSynchronize failed \n");
}
if (eglFrame.frameType == CU_EGL_FRAME_TYPE_PITCH) {
if (eglFrame.eglColorFormat == CU_EGL_COLOR_FORMAT_ABGR) {
// // bilateral filter x 3
// cv::cuda::GpuMat gpuMat(eglFrame.height, eglFrame.width, CV_8UC4, eglFrame.frame.pPitch[0]);
// cv::cuda::bilateralFilter(gpuMat, gpuMat, 15, 30, 30);
// cv::cuda::bilateralFilter(gpuMat, gpuMat, 15, 30, 30);
// cv::cuda::bilateralFilter(gpuMat, gpuMat, 15, 30, 30);
} else {
printf ("Invalid eglcolorformat, \"RGBA\" format only.\n");
}
}
status = cuCtxSynchronize();
if (status != CUDA_SUCCESS) {
printf ("cuCtxSynchronize failed after memcpy \n");
}
status = cuGraphicsUnregisterResource(pResource);
if (status != CUDA_SUCCESS) {
printf("cuGraphicsEGLUnRegisterResource failed: %d \n", status);
}
}
extern "C" void
init (CustomerFunction * pFuncs)
{
pFuncs->fPreProcess = pre_process;
pFuncs->fGPUProcess = gpu_process;
pFuncs->fPostProcess = post_process;
}
extern "C" void
deinit (void)
{
/* deinitialization */
}
|
ea395845140e960a294ffa27d74ab63d7b83ee8c.hip | // !!! This is a file automatically generated by hipify!!!
#include <ctime>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/copy.h>
#include <thrust/sort.h>
#include <thrust/reduce.h>
#include <thrust/iterator/constant_iterator.h>
#include <string>
#include <iostream>
#include <stdio.h>
#include <fstream>
#include <chrono>
#include <queue>
#include <string.h>
#include <vector>
#include <algorithm>
#include <stdlib.h>
#include "HuffmanTreeBuilder.cpp"
using namespace std;
using namespace thrust;
__constant__ HuffmanTable deviceHuffmanTable[256];
__global__ void mykerneldecompres(myStringer* encode, int encodeSize,HuffmanTable* myStructure,int* myStrSize,myStringer* out,decompresserBinary* binary, int fs){
int UID= threadIdx.x + blockIdx.x * blockDim.x;
if(UID < encodeSize )
{
for(int i=0;i<=encode[UID].length ;i++)
{
if(i==encode[UID].length){
switch(encode[UID].position)
{
case 1:
binary[UID].ch[binary[UID].length] = (encode[UID].ch[i] & 0x01) ? 1:0;
binary[UID].length++;
break;
case 2:
binary[UID].ch[binary[UID].length] = (encode[UID].ch[i] & 0x02) ? 1:0;
binary[UID].length++;
binary[UID].ch[binary[UID].length] = (encode[UID].ch[i] & 0x01) ? 1:0;
binary[UID].length++;
break;
case 3:
binary[UID].ch[binary[UID].length] = (encode[UID].ch[i] & 0x04) ? 1:0;
binary[UID].length++;
binary[UID].ch[binary[UID].length] = (encode[UID].ch[i] & 0x02) ? 1:0;
binary[UID].length++;
binary[UID].ch[binary[UID].length] = (encode[UID].ch[i] & 0x01) ? 1:0;
binary[UID].length++;
break;
case 4:
binary[UID].ch[binary[UID].length] = (encode[UID].ch[i] & 0x08) ? 1:0;
binary[UID].length++;
binary[UID].ch[binary[UID].length] = (encode[UID].ch[i] & 0x04) ? 1:0;
binary[UID].length++;
binary[UID].ch[binary[UID].length] = (encode[UID].ch[i] & 0x02) ? 1:0;
binary[UID].length++;
binary[UID].ch[binary[UID].length] = (encode[UID].ch[i] & 0x01) ? 1:0;
binary[UID].length++;
break;
case 5:
binary[UID].ch[binary[UID].length] = (encode[UID].ch[i] & 0x10) ? 1:0;
binary[UID].length++;
binary[UID].ch[binary[UID].length] = (encode[UID].ch[i] & 0x08) ? 1:0;
binary[UID].length++;
binary[UID].ch[binary[UID].length] = (encode[UID].ch[i] & 0x04) ? 1:0;
binary[UID].length++;
binary[UID].ch[binary[UID].length] = (encode[UID].ch[i] & 0x02) ? 1:0;
binary[UID].length++;
binary[UID].ch[binary[UID].length] = (encode[UID].ch[i] & 0x01) ? 1:0;
binary[UID].length++;
break;
case 6:
binary[UID].ch[binary[UID].length] = (encode[UID].ch[i] & 0x20) ? 1:0;
binary[UID].length++;
binary[UID].ch[binary[UID].length] = (encode[UID].ch[i] & 0x10) ? 1:0;
binary[UID].length++;
binary[UID].ch[binary[UID].length] = (encode[UID].ch[i] & 0x08) ? 1:0;
binary[UID].length++;
binary[UID].ch[binary[UID].length] = (encode[UID].ch[i] & 0x04) ? 1:0;
binary[UID].length++;
binary[UID].ch[binary[UID].length] = (encode[UID].ch[i] & 0x02) ? 1:0;
binary[UID].length++;
binary[UID].ch[binary[UID].length] = (encode[UID].ch[i] & 0x01) ? 1:0;
binary[UID].length++;
break;
case 7:
binary[UID].ch[binary[UID].length] = (encode[UID].ch[i] & 0x40) ? 1:0;
binary[UID].length++;
binary[UID].ch[binary[UID].length] = (encode[UID].ch[i] & 0x20) ? 1:0;
binary[UID].length++;
binary[UID].ch[binary[UID].length] = (encode[UID].ch[i] & 0x10) ? 1:0;
binary[UID].length++;
binary[UID].ch[binary[UID].length] = (encode[UID].ch[i] & 0x08) ? 1:0;
binary[UID].length++;
binary[UID].ch[binary[UID].length] = (encode[UID].ch[i] & 0x04) ? 1:0;
binary[UID].length++;
binary[UID].ch[binary[UID].length] = (encode[UID].ch[i] & 0x02) ? 1:0;
binary[UID].length++;
binary[UID].ch[binary[UID].length] = (encode[UID].ch[i] & 0x01) ? 1:0;
binary[UID].length++;
break;
case 8:
binary[UID].ch[binary[UID].length] = (encode[UID].ch[i] & 0x80) ? 1:0;
binary[UID].length++;
binary[UID].ch[binary[UID].length] = (encode[UID].ch[i] & 0x40) ? 1:0;
binary[UID].length++;
binary[UID].ch[binary[UID].length] = (encode[UID].ch[i] & 0x20) ? 1:0;
binary[UID].length++;
binary[UID].ch[binary[UID].length] = (encode[UID].ch[i] & 0x10) ? 1:0;
binary[UID].length++;
binary[UID].ch[binary[UID].length] = (encode[UID].ch[i] & 0x08) ? 1:0;
binary[UID].length++;
binary[UID].ch[binary[UID].length] = (encode[UID].ch[i] & 0x04) ? 1:0;
binary[UID].length++;
binary[UID].ch[binary[UID].length] = (encode[UID].ch[i] & 0x02) ? 1:0;
binary[UID].length++;
binary[UID].ch[binary[UID].length] = (encode[UID].ch[i] & 0x01) ? 1:0;
binary[UID].length++;
break;
default:
printf("Fail",UID,encode[UID].position,encode[UID].length);
break;
}
break;
}
binary[UID].ch[binary[UID].length] = (encode[UID].ch[i] & 0x80) ? 1:0;
binary[UID].length++;
binary[UID].ch[binary[UID].length] = (encode[UID].ch[i] & 0x40) ? 1:0;
binary[UID].length++;
binary[UID].ch[binary[UID].length] = (encode[UID].ch[i] & 0x20) ? 1:0;
binary[UID].length++;
binary[UID].ch[binary[UID].length] = (encode[UID].ch[i] & 0x10) ? 1:0;
binary[UID].length++;
binary[UID].ch[binary[UID].length] = (encode[UID].ch[i] & 0x08) ? 1:0;
binary[UID].length++;
binary[UID].ch[binary[UID].length] = (encode[UID].ch[i] & 0x04) ? 1:0;
binary[UID].length++;
binary[UID].ch[binary[UID].length] = (encode[UID].ch[i] & 0x02) ? 1:0;
binary[UID].length++;
binary[UID].ch[binary[UID].length] = (encode[UID].ch[i] & 0x01) ? 1:0;
binary[UID].length++;
}
long p=0, k=0 ,j=0 ,e=0;
for ( p=0; p<binary[UID].length; p++)
{
for(k=0;k<myStrSize[0];k++)
{
for( j=0;j<myStructure[k].len;j++)
{
if (myStructure[k].code[j] == binary[UID].ch[p+j])
{
e++;
}
if(e == myStructure[k].len)
{
out[UID].ch[out[UID].length]=myStructure[k].c;
out[UID].length++;
p+=j;
if(p >= binary[UID].length)
goto jump;
goto jump;
}
if(j == myStructure[k].len-1)
e=0;
}
}
jump: e=0;
}
}
}
__global__ void kernelCompression(char* rawData,int size,int* myStrSize,myStringer* myString, int chunkSize)
{
int tableSize = myStrSize[0];
int UID= threadIdx.x + blockIdx.x * blockDim.x;
if(UID < size)
{
int o=0;
for(int j = UID * chunkSize; j<(chunkSize*UID)+chunkSize;j++)
{
if(j < size)
for(int i =0;i<tableSize;i++)
{
if(rawData[j] == deviceHuffmanTable[i].c)
{
for(int k=0;k< deviceHuffmanTable[i].len;k++)
{
if(myString[UID].position ==8)
{
o++;
myString[UID].position =0;
myString[UID].length++;
myString[UID].ch[o] = (myString[UID].ch[o] << 1) | deviceHuffmanTable[i].code[k];
myString[UID].position++;
}else{
myString[UID].ch[o] = (myString[UID].ch[o] << 1) | deviceHuffmanTable[i].code[k];
myString[UID].position++;
}
}
break;
}
}
}
}
}
int startGPUCompression(const char* inputFilename,const int BLOCK_NUMBER, const int THREAD_NUMBER,const char* outputFilename)
{
clock_t begin = clock();
int fileSize =0;
hipError_t cudaReturnValue;
char* device_rawData; //device input containing the raw data
FILE *f = fopen(inputFilename, "rb");
if(f == NULL) printf("FILE NOT FOUND");
string host_rawData;
if(f)
{
fseek(f, 0, SEEK_END);
fileSize = ftell(f);
printf("-----------------------------------------------------------------------------------------\nfile size is: %d %f\n\n", fileSize,ceil(fileSize/(float)(BLOCK_NUMBER*THREAD_NUMBER)));
host_rawData.resize(ftell(f));
rewind(f);
fread(&host_rawData[0],1,host_rawData.size(),f);
fclose(f);
}
hipMalloc(&device_rawData, host_rawData.size());
hipMemcpy(device_rawData, host_rawData.c_str(), host_rawData.size(), hipMemcpyHostToDevice);
device_vector<char> device_vectorIn(host_rawData.begin(), host_rawData.end());
thrust::sort(device_vectorIn.begin(), device_vectorIn.end());
device_vector<char> device_symbol(device_vectorIn.size());
device_vector<int> device_symbolFrequency(device_vectorIn.size());
thrust::pair<device_vector<char>::iterator, device_vector<int>::iterator> temporaryPair = reduce_by_key(device_vectorIn.begin(), device_vectorIn.end(),
make_constant_iterator(1), device_symbol.begin(), device_symbolFrequency.begin());
device_symbol.erase(temporaryPair.first, device_symbol.end());
device_symbolFrequency.erase(temporaryPair.second, device_symbolFrequency.end());
hipDeviceSynchronize();
clock_t end_thrust = clock();
int character_frequency[256];
char character[256];
thrust::copy(device_symbolFrequency.begin(),device_symbolFrequency.end(),character_frequency);
thrust::copy(device_symbol.begin(), device_symbol.end(), character);
//Build the tree and the table
buildHuffmanTree(character,character_frequency,device_symbol.size());
Node * root = myQueue.top();
myQueue.pop();
int binaryCode[256], top = 0;
buildHuffmanTable(root, binaryCode, top);
cudaReturnValue = hipMalloc((void**) &deviceHuffmanTable, sizeof(HuffmanTable)*myIdx);
if (cudaReturnValue != hipSuccess) printf("hipMemcpy failed!");
cudaReturnValue = hipMemcpyToSymbol (deviceHuffmanTable, hostHuffmanTable, sizeof(HuffmanTable)*myIdx );
if (cudaReturnValue != hipSuccess) printf("hipMemcpy failed!");
int* d_myStrSize;
hipMalloc((void**) &d_myStrSize, sizeof(int));
hipMemcpy(d_myStrSize, &myIdx, sizeof(int),hipMemcpyHostToDevice);
myStringer* myString;
hipMalloc((void**)&myString, sizeof(myStringer)*THREAD_NUMBER*BLOCK_NUMBER);
hipMemset(myString,0,sizeof(myStringer)*THREAD_NUMBER*BLOCK_NUMBER);
cout<<"SIZE: : "<<host_rawData.size()<<" "<<myIdx<<endl;
int chunkSize=ceil(fileSize/(float)(BLOCK_NUMBER*THREAD_NUMBER));
hipLaunchKernelGGL(( kernelCompression), dim3(BLOCK_NUMBER),dim3(THREAD_NUMBER), 0, 0, device_rawData,host_rawData.size(),d_myStrSize,myString,chunkSize);
ofstream myFile (outputFilename, ios::out | ios::binary);
myFile<<BLOCK_NUMBER<<" "<<THREAD_NUMBER<<endl;
myFile<<fileSize<<endl<<myIdx<<endl;
for(int i=0;i<myIdx;i++)
myFile<<(int)character[i]<<" "<<character_frequency[i]<<" ";
hipDeviceSynchronize();
cout<<"End compression\n";
clock_t end_encode = clock();
myStringer *host=(myStringer*)malloc(sizeof(myStringer)*BLOCK_NUMBER*THREAD_NUMBER);
hipMemcpy(host, myString, sizeof(myStringer)*BLOCK_NUMBER*THREAD_NUMBER,hipMemcpyDeviceToHost);
for( int i=0;i<BLOCK_NUMBER*THREAD_NUMBER & i< fileSize ;i++)
if(host[i].ch!=NULL)
{
myFile<<host[i].length<<" "<<host[i].position<<" ";
myFile.write (host[i].ch, host[i].length+1);
}
hipFree(device_rawData);
hipFree(myString);
hipFree(d_myStrSize);
hipFree(device_rawData);
free(host);
clock_t end = clock();
double elapsed_secs = double(end - begin) / CLOCKS_PER_SEC;
cout<<"ALL: " <<elapsed_secs<<endl;
cout<<"Thrust: "<< double(end_thrust - begin) / CLOCKS_PER_SEC<<endl;
cout<<"Encode: "<< double(end_encode - begin) / CLOCKS_PER_SEC<<endl;
cout<<"Chunk size" << chunkSize;
return 0;
}
int startGPUDecompression(const char* inputFileName, const char* outputFileName)
{
clock_t start = clock();
int sizeOfTable =0;
int fileSize = 0;
char ch[256];
int character_frequency[256];
int BLOCK_NUMBER = 256,THREAD_NUMBER = 32;
ifstream inputEncode(inputFileName,ios::in | ios::binary);
if(inputEncode == NULL)
printf("File not found or in use\n");
inputEncode>>BLOCK_NUMBER;
inputEncode>>THREAD_NUMBER;
inputEncode>>fileSize;
inputEncode>>sizeOfTable;
printf("\n------------------------------------\n%d\n",fileSize);
myStringer *host=(myStringer*)malloc(sizeof(myStringer)*BLOCK_NUMBER*THREAD_NUMBER);
int temp;
for(int i = 0 ; i< sizeOfTable;i++)
{
inputEncode>>temp;
inputEncode>>character_frequency[i];
ch[i]=(char)temp;
}
buildHuffmanTree(ch,character_frequency,sizeOfTable);
Node * root = myQueue.top();
myQueue.pop();
int binaryCode[256], top = 0;
buildHuffmanTable(root, binaryCode, top);
char x[10];
for(int i = 0 ; i< BLOCK_NUMBER*THREAD_NUMBER && i<fileSize;i++)
{
inputEncode>>host[i].length;
inputEncode>>host[i].position;
inputEncode.read(x,1);
inputEncode.read(host[i].ch,host[i].length+1);
}
cout<<"Stuff has been read\n";
clock_t middle = clock();
HuffmanTable *d_Str;
hipMalloc((void**) &d_Str, sizeof(HuffmanTable)*myIdx);
hipMemcpy(d_Str, hostHuffmanTable, sizeof(HuffmanTable)*myIdx,hipMemcpyHostToDevice);
int* d_myStrSize;
hipMalloc((void**) &d_myStrSize, sizeof(int));
hipMemcpy(d_myStrSize, &myIdx, sizeof(int),hipMemcpyHostToDevice);
myStringer* d_encoded;
hipMalloc((void**) &d_encoded,sizeof(myStringer)*BLOCK_NUMBER*THREAD_NUMBER);
hipMemcpy(d_encoded,host,sizeof(myStringer)*BLOCK_NUMBER*THREAD_NUMBER, hipMemcpyHostToDevice);
myStringer* d_output;
hipMalloc((void**) &d_output,sizeof(myStringer)*BLOCK_NUMBER*THREAD_NUMBER);
hipMemset(d_output,0,sizeof(myStringer)*BLOCK_NUMBER*THREAD_NUMBER);
decompresserBinary* d_bin;
hipMalloc((void**) &d_bin,sizeof(decompresserBinary)*BLOCK_NUMBER*THREAD_NUMBER);
hipMemset(d_bin,0,sizeof(decompresserBinary)*BLOCK_NUMBER*THREAD_NUMBER);
hipLaunchKernelGGL(( mykerneldecompres), dim3(BLOCK_NUMBER),dim3(THREAD_NUMBER), 0, 0, d_encoded,BLOCK_NUMBER*THREAD_NUMBER,d_Str,d_myStrSize,d_output,d_bin,fileSize);
hipDeviceSynchronize();
myStringer *host_output=(myStringer*)malloc(sizeof(myStringer)*BLOCK_NUMBER*THREAD_NUMBER);
memset(host_output,0,(sizeof(myStringer)*BLOCK_NUMBER*THREAD_NUMBER));
hipMemcpy(host_output, d_output, sizeof(myStringer)*BLOCK_NUMBER*THREAD_NUMBER,hipMemcpyDeviceToHost);
ofstream out(outputFileName,ios::binary);
for( int i=0;i<BLOCK_NUMBER*THREAD_NUMBER ;i++)
{
if(host_output[i].ch!=NULL)
out<<host_output[i].ch;
}
clock_t end = clock();
cout<<"Middle : " << double(middle-start)/ CLOCKS_PER_SEC<<endl;
cout<<"Decompress time: "<<double(end - start) / CLOCKS_PER_SEC;
return 0;
}
| ea395845140e960a294ffa27d74ab63d7b83ee8c.cu | #include <ctime>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/copy.h>
#include <thrust/sort.h>
#include <thrust/reduce.h>
#include <thrust/iterator/constant_iterator.h>
#include <string>
#include <iostream>
#include <stdio.h>
#include <fstream>
#include <chrono>
#include <queue>
#include <string.h>
#include <vector>
#include <algorithm>
#include <stdlib.h>
#include "HuffmanTreeBuilder.cpp"
using namespace std;
using namespace thrust;
__constant__ HuffmanTable deviceHuffmanTable[256];
__global__ void mykerneldecompres(myStringer* encode, int encodeSize,HuffmanTable* myStructure,int* myStrSize,myStringer* out,decompresserBinary* binary, int fs){
int UID= threadIdx.x + blockIdx.x * blockDim.x;
if(UID < encodeSize )
{
for(int i=0;i<=encode[UID].length ;i++)
{
if(i==encode[UID].length){
switch(encode[UID].position)
{
case 1:
binary[UID].ch[binary[UID].length] = (encode[UID].ch[i] & 0x01) ? 1:0;
binary[UID].length++;
break;
case 2:
binary[UID].ch[binary[UID].length] = (encode[UID].ch[i] & 0x02) ? 1:0;
binary[UID].length++;
binary[UID].ch[binary[UID].length] = (encode[UID].ch[i] & 0x01) ? 1:0;
binary[UID].length++;
break;
case 3:
binary[UID].ch[binary[UID].length] = (encode[UID].ch[i] & 0x04) ? 1:0;
binary[UID].length++;
binary[UID].ch[binary[UID].length] = (encode[UID].ch[i] & 0x02) ? 1:0;
binary[UID].length++;
binary[UID].ch[binary[UID].length] = (encode[UID].ch[i] & 0x01) ? 1:0;
binary[UID].length++;
break;
case 4:
binary[UID].ch[binary[UID].length] = (encode[UID].ch[i] & 0x08) ? 1:0;
binary[UID].length++;
binary[UID].ch[binary[UID].length] = (encode[UID].ch[i] & 0x04) ? 1:0;
binary[UID].length++;
binary[UID].ch[binary[UID].length] = (encode[UID].ch[i] & 0x02) ? 1:0;
binary[UID].length++;
binary[UID].ch[binary[UID].length] = (encode[UID].ch[i] & 0x01) ? 1:0;
binary[UID].length++;
break;
case 5:
binary[UID].ch[binary[UID].length] = (encode[UID].ch[i] & 0x10) ? 1:0;
binary[UID].length++;
binary[UID].ch[binary[UID].length] = (encode[UID].ch[i] & 0x08) ? 1:0;
binary[UID].length++;
binary[UID].ch[binary[UID].length] = (encode[UID].ch[i] & 0x04) ? 1:0;
binary[UID].length++;
binary[UID].ch[binary[UID].length] = (encode[UID].ch[i] & 0x02) ? 1:0;
binary[UID].length++;
binary[UID].ch[binary[UID].length] = (encode[UID].ch[i] & 0x01) ? 1:0;
binary[UID].length++;
break;
case 6:
binary[UID].ch[binary[UID].length] = (encode[UID].ch[i] & 0x20) ? 1:0;
binary[UID].length++;
binary[UID].ch[binary[UID].length] = (encode[UID].ch[i] & 0x10) ? 1:0;
binary[UID].length++;
binary[UID].ch[binary[UID].length] = (encode[UID].ch[i] & 0x08) ? 1:0;
binary[UID].length++;
binary[UID].ch[binary[UID].length] = (encode[UID].ch[i] & 0x04) ? 1:0;
binary[UID].length++;
binary[UID].ch[binary[UID].length] = (encode[UID].ch[i] & 0x02) ? 1:0;
binary[UID].length++;
binary[UID].ch[binary[UID].length] = (encode[UID].ch[i] & 0x01) ? 1:0;
binary[UID].length++;
break;
case 7:
binary[UID].ch[binary[UID].length] = (encode[UID].ch[i] & 0x40) ? 1:0;
binary[UID].length++;
binary[UID].ch[binary[UID].length] = (encode[UID].ch[i] & 0x20) ? 1:0;
binary[UID].length++;
binary[UID].ch[binary[UID].length] = (encode[UID].ch[i] & 0x10) ? 1:0;
binary[UID].length++;
binary[UID].ch[binary[UID].length] = (encode[UID].ch[i] & 0x08) ? 1:0;
binary[UID].length++;
binary[UID].ch[binary[UID].length] = (encode[UID].ch[i] & 0x04) ? 1:0;
binary[UID].length++;
binary[UID].ch[binary[UID].length] = (encode[UID].ch[i] & 0x02) ? 1:0;
binary[UID].length++;
binary[UID].ch[binary[UID].length] = (encode[UID].ch[i] & 0x01) ? 1:0;
binary[UID].length++;
break;
case 8:
binary[UID].ch[binary[UID].length] = (encode[UID].ch[i] & 0x80) ? 1:0;
binary[UID].length++;
binary[UID].ch[binary[UID].length] = (encode[UID].ch[i] & 0x40) ? 1:0;
binary[UID].length++;
binary[UID].ch[binary[UID].length] = (encode[UID].ch[i] & 0x20) ? 1:0;
binary[UID].length++;
binary[UID].ch[binary[UID].length] = (encode[UID].ch[i] & 0x10) ? 1:0;
binary[UID].length++;
binary[UID].ch[binary[UID].length] = (encode[UID].ch[i] & 0x08) ? 1:0;
binary[UID].length++;
binary[UID].ch[binary[UID].length] = (encode[UID].ch[i] & 0x04) ? 1:0;
binary[UID].length++;
binary[UID].ch[binary[UID].length] = (encode[UID].ch[i] & 0x02) ? 1:0;
binary[UID].length++;
binary[UID].ch[binary[UID].length] = (encode[UID].ch[i] & 0x01) ? 1:0;
binary[UID].length++;
break;
default:
printf("Fail",UID,encode[UID].position,encode[UID].length);
break;
}
break;
}
binary[UID].ch[binary[UID].length] = (encode[UID].ch[i] & 0x80) ? 1:0;
binary[UID].length++;
binary[UID].ch[binary[UID].length] = (encode[UID].ch[i] & 0x40) ? 1:0;
binary[UID].length++;
binary[UID].ch[binary[UID].length] = (encode[UID].ch[i] & 0x20) ? 1:0;
binary[UID].length++;
binary[UID].ch[binary[UID].length] = (encode[UID].ch[i] & 0x10) ? 1:0;
binary[UID].length++;
binary[UID].ch[binary[UID].length] = (encode[UID].ch[i] & 0x08) ? 1:0;
binary[UID].length++;
binary[UID].ch[binary[UID].length] = (encode[UID].ch[i] & 0x04) ? 1:0;
binary[UID].length++;
binary[UID].ch[binary[UID].length] = (encode[UID].ch[i] & 0x02) ? 1:0;
binary[UID].length++;
binary[UID].ch[binary[UID].length] = (encode[UID].ch[i] & 0x01) ? 1:0;
binary[UID].length++;
}
long p=0, k=0 ,j=0 ,e=0;
for ( p=0; p<binary[UID].length; p++)
{
for(k=0;k<myStrSize[0];k++)
{
for( j=0;j<myStructure[k].len;j++)
{
if (myStructure[k].code[j] == binary[UID].ch[p+j])
{
e++;
}
if(e == myStructure[k].len)
{
out[UID].ch[out[UID].length]=myStructure[k].c;
out[UID].length++;
p+=j;
if(p >= binary[UID].length)
goto jump;
goto jump;
}
if(j == myStructure[k].len-1)
e=0;
}
}
jump: e=0;
}
}
}
__global__ void kernelCompression(char* rawData,int size,int* myStrSize,myStringer* myString, int chunkSize)
{
int tableSize = myStrSize[0];
int UID= threadIdx.x + blockIdx.x * blockDim.x;
if(UID < size)
{
int o=0;
for(int j = UID * chunkSize; j<(chunkSize*UID)+chunkSize;j++)
{
if(j < size)
for(int i =0;i<tableSize;i++)
{
if(rawData[j] == deviceHuffmanTable[i].c)
{
for(int k=0;k< deviceHuffmanTable[i].len;k++)
{
if(myString[UID].position ==8)
{
o++;
myString[UID].position =0;
myString[UID].length++;
myString[UID].ch[o] = (myString[UID].ch[o] << 1) | deviceHuffmanTable[i].code[k];
myString[UID].position++;
}else{
myString[UID].ch[o] = (myString[UID].ch[o] << 1) | deviceHuffmanTable[i].code[k];
myString[UID].position++;
}
}
break;
}
}
}
}
}
int startGPUCompression(const char* inputFilename,const int BLOCK_NUMBER, const int THREAD_NUMBER,const char* outputFilename)
{
clock_t begin = clock();
int fileSize =0;
cudaError_t cudaReturnValue;
char* device_rawData; //device input containing the raw data
FILE *f = fopen(inputFilename, "rb");
if(f == NULL) printf("FILE NOT FOUND");
string host_rawData;
if(f)
{
fseek(f, 0, SEEK_END);
fileSize = ftell(f);
printf("-----------------------------------------------------------------------------------------\nfile size is: %d %f\n\n", fileSize,ceil(fileSize/(float)(BLOCK_NUMBER*THREAD_NUMBER)));
host_rawData.resize(ftell(f));
rewind(f);
fread(&host_rawData[0],1,host_rawData.size(),f);
fclose(f);
}
cudaMalloc(&device_rawData, host_rawData.size());
cudaMemcpy(device_rawData, host_rawData.c_str(), host_rawData.size(), cudaMemcpyHostToDevice);
device_vector<char> device_vectorIn(host_rawData.begin(), host_rawData.end());
thrust::sort(device_vectorIn.begin(), device_vectorIn.end());
device_vector<char> device_symbol(device_vectorIn.size());
device_vector<int> device_symbolFrequency(device_vectorIn.size());
thrust::pair<device_vector<char>::iterator, device_vector<int>::iterator> temporaryPair = reduce_by_key(device_vectorIn.begin(), device_vectorIn.end(),
make_constant_iterator(1), device_symbol.begin(), device_symbolFrequency.begin());
device_symbol.erase(temporaryPair.first, device_symbol.end());
device_symbolFrequency.erase(temporaryPair.second, device_symbolFrequency.end());
cudaDeviceSynchronize();
clock_t end_thrust = clock();
int character_frequency[256];
char character[256];
thrust::copy(device_symbolFrequency.begin(),device_symbolFrequency.end(),character_frequency);
thrust::copy(device_symbol.begin(), device_symbol.end(), character);
//Build the tree and the table
buildHuffmanTree(character,character_frequency,device_symbol.size());
Node * root = myQueue.top();
myQueue.pop();
int binaryCode[256], top = 0;
buildHuffmanTable(root, binaryCode, top);
cudaReturnValue = cudaMalloc((void**) &deviceHuffmanTable, sizeof(HuffmanTable)*myIdx);
if (cudaReturnValue != cudaSuccess) printf("cudaMemcpy failed!");
cudaReturnValue = cudaMemcpyToSymbol (deviceHuffmanTable, hostHuffmanTable, sizeof(HuffmanTable)*myIdx );
if (cudaReturnValue != cudaSuccess) printf("cudaMemcpy failed!");
int* d_myStrSize;
cudaMalloc((void**) &d_myStrSize, sizeof(int));
cudaMemcpy(d_myStrSize, &myIdx, sizeof(int),cudaMemcpyHostToDevice);
myStringer* myString;
cudaMalloc((void**)&myString, sizeof(myStringer)*THREAD_NUMBER*BLOCK_NUMBER);
cudaMemset(myString,0,sizeof(myStringer)*THREAD_NUMBER*BLOCK_NUMBER);
cout<<"SIZE: : "<<host_rawData.size()<<" "<<myIdx<<endl;
int chunkSize=ceil(fileSize/(float)(BLOCK_NUMBER*THREAD_NUMBER));
kernelCompression<<<BLOCK_NUMBER,THREAD_NUMBER>>>(device_rawData,host_rawData.size(),d_myStrSize,myString,chunkSize);
ofstream myFile (outputFilename, ios::out | ios::binary);
myFile<<BLOCK_NUMBER<<" "<<THREAD_NUMBER<<endl;
myFile<<fileSize<<endl<<myIdx<<endl;
for(int i=0;i<myIdx;i++)
myFile<<(int)character[i]<<" "<<character_frequency[i]<<" ";
cudaDeviceSynchronize();
cout<<"End compression\n";
clock_t end_encode = clock();
myStringer *host=(myStringer*)malloc(sizeof(myStringer)*BLOCK_NUMBER*THREAD_NUMBER);
cudaMemcpy(host, myString, sizeof(myStringer)*BLOCK_NUMBER*THREAD_NUMBER,cudaMemcpyDeviceToHost);
for( int i=0;i<BLOCK_NUMBER*THREAD_NUMBER & i< fileSize ;i++)
if(host[i].ch!=NULL)
{
myFile<<host[i].length<<" "<<host[i].position<<" ";
myFile.write (host[i].ch, host[i].length+1);
}
cudaFree(device_rawData);
cudaFree(myString);
cudaFree(d_myStrSize);
cudaFree(device_rawData);
free(host);
clock_t end = clock();
double elapsed_secs = double(end - begin) / CLOCKS_PER_SEC;
cout<<"ALL: " <<elapsed_secs<<endl;
cout<<"Thrust: "<< double(end_thrust - begin) / CLOCKS_PER_SEC<<endl;
cout<<"Encode: "<< double(end_encode - begin) / CLOCKS_PER_SEC<<endl;
cout<<"Chunk size" << chunkSize;
return 0;
}
int startGPUDecompression(const char* inputFileName, const char* outputFileName)
{
clock_t start = clock();
int sizeOfTable =0;
int fileSize = 0;
char ch[256];
int character_frequency[256];
int BLOCK_NUMBER = 256,THREAD_NUMBER = 32;
ifstream inputEncode(inputFileName,ios::in | ios::binary);
if(inputEncode == NULL)
printf("File not found or in use\n");
inputEncode>>BLOCK_NUMBER;
inputEncode>>THREAD_NUMBER;
inputEncode>>fileSize;
inputEncode>>sizeOfTable;
printf("\n------------------------------------\n%d\n",fileSize);
myStringer *host=(myStringer*)malloc(sizeof(myStringer)*BLOCK_NUMBER*THREAD_NUMBER);
int temp;
for(int i = 0 ; i< sizeOfTable;i++)
{
inputEncode>>temp;
inputEncode>>character_frequency[i];
ch[i]=(char)temp;
}
buildHuffmanTree(ch,character_frequency,sizeOfTable);
Node * root = myQueue.top();
myQueue.pop();
int binaryCode[256], top = 0;
buildHuffmanTable(root, binaryCode, top);
char x[10];
for(int i = 0 ; i< BLOCK_NUMBER*THREAD_NUMBER && i<fileSize;i++)
{
inputEncode>>host[i].length;
inputEncode>>host[i].position;
inputEncode.read(x,1);
inputEncode.read(host[i].ch,host[i].length+1);
}
cout<<"Stuff has been read\n";
clock_t middle = clock();
HuffmanTable *d_Str;
cudaMalloc((void**) &d_Str, sizeof(HuffmanTable)*myIdx);
cudaMemcpy(d_Str, hostHuffmanTable, sizeof(HuffmanTable)*myIdx,cudaMemcpyHostToDevice);
int* d_myStrSize;
cudaMalloc((void**) &d_myStrSize, sizeof(int));
cudaMemcpy(d_myStrSize, &myIdx, sizeof(int),cudaMemcpyHostToDevice);
myStringer* d_encoded;
cudaMalloc((void**) &d_encoded,sizeof(myStringer)*BLOCK_NUMBER*THREAD_NUMBER);
cudaMemcpy(d_encoded,host,sizeof(myStringer)*BLOCK_NUMBER*THREAD_NUMBER, cudaMemcpyHostToDevice);
myStringer* d_output;
cudaMalloc((void**) &d_output,sizeof(myStringer)*BLOCK_NUMBER*THREAD_NUMBER);
cudaMemset(d_output,0,sizeof(myStringer)*BLOCK_NUMBER*THREAD_NUMBER);
decompresserBinary* d_bin;
cudaMalloc((void**) &d_bin,sizeof(decompresserBinary)*BLOCK_NUMBER*THREAD_NUMBER);
cudaMemset(d_bin,0,sizeof(decompresserBinary)*BLOCK_NUMBER*THREAD_NUMBER);
mykerneldecompres<<<BLOCK_NUMBER,THREAD_NUMBER>>>(d_encoded,BLOCK_NUMBER*THREAD_NUMBER,d_Str,d_myStrSize,d_output,d_bin,fileSize);
cudaDeviceSynchronize();
myStringer *host_output=(myStringer*)malloc(sizeof(myStringer)*BLOCK_NUMBER*THREAD_NUMBER);
memset(host_output,0,(sizeof(myStringer)*BLOCK_NUMBER*THREAD_NUMBER));
cudaMemcpy(host_output, d_output, sizeof(myStringer)*BLOCK_NUMBER*THREAD_NUMBER,cudaMemcpyDeviceToHost);
ofstream out(outputFileName,ios::binary);
for( int i=0;i<BLOCK_NUMBER*THREAD_NUMBER ;i++)
{
if(host_output[i].ch!=NULL)
out<<host_output[i].ch;
}
clock_t end = clock();
cout<<"Middle : " << double(middle-start)/ CLOCKS_PER_SEC<<endl;
cout<<"Decompress time: "<<double(end - start) / CLOCKS_PER_SEC;
return 0;
}
|
75073157e26705402144754db0d248ff36f9a0e6.hip | // !!! This is a file automatically generated by hipify!!!
/////////////////////////////////////////////////////////////////////////
// Parallel Computing Assignment 3
// Chris Jimenez
// 5/1/14
// This CUDA program finds the max integer in an array of random integers.
// This program DOES NOT use shared meemory and DOES take thread
// divergaence in to consideration. The modification can be seen
// in the kernel function with the use of the WARP_SIZE defined var.
//
/////////////////////////////////////////////////////////////////////////
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
//define numebr of integers...
#define NUM_OF_INTEGERS 65536
//define max integer
#define MAX 100000
#define WARP_SIZE 32
///////////////////////////////////
/*The folllowing is dependent on whatever GPU this program is running on
if runnign on the NYU GPU's, the max threads per block is 512.
RUnning on a NVIDIA GeForce GT 650M(on personal machine), the max threads
per block is 1024
*/
#define THREADS_PER_BLOCK 512
#define NUM_BLOCKS NUM_OF_INTEGERS/THREADS_PER_BLOCK
/****** Function declarations */
void fill_array();
__global__ void get_max(int *array);
/********************************/
/////////////////////////////////////////////////////////
/*******************************************************/
/* Function fills the givne array a with random integers */
void fill_array(int *a){
int i;
time_t t;
/* Intializes random number generator */
srand((unsigned) time(&t));
for(i = 0; i < NUM_OF_INTEGERS; i++){
a[i] = random() % MAX;;
}
}
/*******************************************************/
/* Kernel Function finds the max integer in given array by
using reduction technique. Ultimately, the largest
will be located at the 0th position of the array */
__global__ void get_max(int *array){
int temp;
int index = threadIdx.x + (blockDim.x * blockIdx.x);
int nTotalThreads = NUM_OF_INTEGERS; // Total number of active threads
while(nTotalThreads > WARP_SIZE)
{
int halfPoint = nTotalThreads / 2; // divide by two
// only the first half of the threads will be active.
if (index < halfPoint){
temp = array[ index + halfPoint ];
if (temp > array[ index ]) {
array[index] = temp;
}
}
__syncthreads();
nTotalThreads = nTotalThreads / 2; // divide by two.
}
// at this point...nTotalThreads == 32
// that means that array[0:31] has the top
// 32 values...
}
/*******************************************************/
//Main function.....
int main(int argc, char *argv[]){
int *h_array; //array of random integers....
int *d_array; //device copy...
int max = 0;
printf("Initializing data...\n");
//allocating space for the array on host
h_array = (int *) malloc(NUM_OF_INTEGERS * sizeof(int));
//fill in random array
fill_array(h_array);
//allocate space for array and resultmax on device
hipMalloc( (void **)&d_array, sizeof(int) * NUM_OF_INTEGERS );
//Copy array from host to device...
hipMemcpy(d_array, h_array, sizeof(int) * NUM_OF_INTEGERS, hipMemcpyHostToDevice);
//call kernel! using for loop
hipLaunchKernelGGL(( get_max), dim3(NUM_BLOCKS),dim3(THREADS_PER_BLOCK), 0, 0, d_array);
//Copy array from device to host...
hipMemcpy(h_array, d_array, sizeof(int) * NUM_OF_INTEGERS, hipMemcpyDeviceToHost);
//given the top 32 largest numbers, search through to get max...
for(int i = 0; i < WARP_SIZE; i++){
if( max < h_array[i]){
max = h_array[i];
}
}
//print max value...
printf("The max integer in the array is: %d\n", h_array[0]);
printf("Cleaning up...\n");
free(h_array);
hipFree(d_array);
return 0;
} | 75073157e26705402144754db0d248ff36f9a0e6.cu | /////////////////////////////////////////////////////////////////////////
// Parallel Computing Assignment 3
// Chris Jimenez
// 5/1/14
// This CUDA program finds the max integer in an array of random integers.
// This program DOES NOT use shared meemory and DOES take thread
// divergaence in to consideration. The modification can be seen
// in the kernel function with the use of the WARP_SIZE defined var.
//
/////////////////////////////////////////////////////////////////////////
#include <cuda.h>
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
//define numebr of integers...
#define NUM_OF_INTEGERS 65536
//define max integer
#define MAX 100000
#define WARP_SIZE 32
///////////////////////////////////
/*The folllowing is dependent on whatever GPU this program is running on
if runnign on the NYU GPU's, the max threads per block is 512.
RUnning on a NVIDIA GeForce GT 650M(on personal machine), the max threads
per block is 1024
*/
#define THREADS_PER_BLOCK 512
#define NUM_BLOCKS NUM_OF_INTEGERS/THREADS_PER_BLOCK
/****** Function declarations */
void fill_array();
__global__ void get_max(int *array);
/********************************/
/////////////////////////////////////////////////////////
/*******************************************************/
/* Function fills the givne array a with random integers */
void fill_array(int *a){
int i;
time_t t;
/* Intializes random number generator */
srand((unsigned) time(&t));
for(i = 0; i < NUM_OF_INTEGERS; i++){
a[i] = random() % MAX;;
}
}
/*******************************************************/
/* Kernel Function finds the max integer in given array by
using reduction technique. Ultimately, the largest
will be located at the 0th position of the array */
__global__ void get_max(int *array){
int temp;
int index = threadIdx.x + (blockDim.x * blockIdx.x);
int nTotalThreads = NUM_OF_INTEGERS; // Total number of active threads
while(nTotalThreads > WARP_SIZE)
{
int halfPoint = nTotalThreads / 2; // divide by two
// only the first half of the threads will be active.
if (index < halfPoint){
temp = array[ index + halfPoint ];
if (temp > array[ index ]) {
array[index] = temp;
}
}
__syncthreads();
nTotalThreads = nTotalThreads / 2; // divide by two.
}
// at this point...nTotalThreads == 32
// that means that array[0:31] has the top
// 32 values...
}
/*******************************************************/
//Main function.....
int main(int argc, char *argv[]){
int *h_array; //array of random integers....
int *d_array; //device copy...
int max = 0;
printf("Initializing data...\n");
//allocating space for the array on host
h_array = (int *) malloc(NUM_OF_INTEGERS * sizeof(int));
//fill in random array
fill_array(h_array);
//allocate space for array and resultmax on device
cudaMalloc( (void **)&d_array, sizeof(int) * NUM_OF_INTEGERS );
//Copy array from host to device...
cudaMemcpy(d_array, h_array, sizeof(int) * NUM_OF_INTEGERS, cudaMemcpyHostToDevice);
//call kernel! using for loop
get_max<<<NUM_BLOCKS,THREADS_PER_BLOCK>>>(d_array);
//Copy array from device to host...
cudaMemcpy(h_array, d_array, sizeof(int) * NUM_OF_INTEGERS, cudaMemcpyDeviceToHost);
//given the top 32 largest numbers, search through to get max...
for(int i = 0; i < WARP_SIZE; i++){
if( max < h_array[i]){
max = h_array[i];
}
}
//print max value...
printf("The max integer in the array is: %d\n", h_array[0]);
printf("Cleaning up...\n");
free(h_array);
cudaFree(d_array);
return 0;
} |
f13e3cacf28838efe21f3101d036d70917084bed.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2020-2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "dali/operators/audio/preemphasis_filter_op.h"
#include <vector>
#include "dali/pipeline/data/types.h"
namespace dali {
namespace detail {
template <typename OutputType, typename InputType>
struct SampleDescriptor {
const InputType *in;
OutputType *out;
float coeff;
int64_t size;
};
using BorderType = PreemphasisFilter<GPUBackend>::BorderType;
template <typename OutputType, typename InputType>
void __global__ PreemphasisFilterKernel(const SampleDescriptor<OutputType, InputType> *samples,
BorderType border_type) {
const auto &sample = samples[blockIdx.y];
int64_t block_size = blockDim.x;
int64_t block_start = block_size * blockIdx.x;
int64_t grid_stride = block_size * gridDim.x;
int64_t k = block_start + threadIdx.x;
if (k >= sample.size)
return;
if (k == 0) {
if (border_type == BorderType::Zero) {
sample.out[k] = sample.in[k];
} else {
// BorderType::Reflect or BorderType::Clamp
InputType border = (border_type == BorderType::Reflect) ? sample.in[1] : sample.in[0];
sample.out[k] = sample.in[k] - sample.coeff * border;
}
k += grid_stride;
}
for (; k < sample.size; k += grid_stride)
sample.out[k] = sample.in[k] - sample.coeff * sample.in[k-1];
}
} // namespace detail
class PreemphasisFilterGPU : public PreemphasisFilter<GPUBackend> {
public:
explicit PreemphasisFilterGPU(const OpSpec &spec) : PreemphasisFilter<GPUBackend>(spec) {
// void is OK here, pointer sizes are the same size
int64_t sz = max_batch_size_ * sizeof(detail::SampleDescriptor<void, void>);
scratch_mem_.Resize({sz}, DALI_UINT8);
}
void RunImpl(workspace_t<GPUBackend> &ws) override;
private:
template <typename OutputType, typename InputType>
void RunImplTyped(workspace_t<GPUBackend> &ws);
Tensor<GPUBackend> scratch_mem_;
};
template <typename OutputType, typename InputType>
void PreemphasisFilterGPU::RunImplTyped(workspace_t<GPUBackend> &ws) {
using SampleDesc = detail::SampleDescriptor<OutputType, InputType>;
const auto &input = ws.Input<GPUBackend>(0);
auto &output = ws.Output<GPUBackend>(0);
auto curr_batch_size = ws.GetInputBatchSize(0);
std::vector<SampleDesc> samples_cpu(curr_batch_size);
for (int sample_idx = 0; sample_idx < curr_batch_size; sample_idx++) {
auto &sample = samples_cpu[sample_idx];
sample.in = input.tensor<InputType>(sample_idx);
sample.out = output.mutable_tensor<OutputType>(sample_idx);
sample.size = volume(input.tensor_shape(sample_idx));
sample.coeff = preemph_coeff_[sample_idx];
}
int64_t sz = curr_batch_size * sizeof(SampleDesc);
scratch_mem_.Resize({sz}, DALI_UINT8);
auto sample_descs_gpu = reinterpret_cast<SampleDesc*>(scratch_mem_.mutable_data<uint8_t>());
auto stream = ws.stream();
CUDA_CALL(
hipMemcpyAsync(sample_descs_gpu, samples_cpu.data(), sz, hipMemcpyHostToDevice, stream));
int block = 256;
auto blocks_per_sample = ::max(32, 1024 / curr_batch_size);
dim3 grid(blocks_per_sample, curr_batch_size);
hipLaunchKernelGGL(( detail::PreemphasisFilterKernel), dim3(grid), dim3(block), 0, stream, sample_descs_gpu, border_type_);
}
void PreemphasisFilterGPU::RunImpl(workspace_t<GPUBackend> &ws) {
const auto &input = ws.template Input<GPUBackend>(0);
TYPE_SWITCH(input.type(), type2id, InputType, PREEMPH_TYPES, (
TYPE_SWITCH(output_type_, type2id, OutputType, PREEMPH_TYPES, (
RunImplTyped<OutputType, InputType>(ws);
), DALI_FAIL(make_string("Unsupported output type: ", output_type_))); // NOLINT
), DALI_FAIL(make_string("Unsupported input type: ", input.type()))); // NOLINT
}
DALI_REGISTER_OPERATOR(PreemphasisFilter, PreemphasisFilterGPU, GPU);
} // namespace dali
| f13e3cacf28838efe21f3101d036d70917084bed.cu | // Copyright (c) 2020-2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "dali/operators/audio/preemphasis_filter_op.h"
#include <vector>
#include "dali/pipeline/data/types.h"
namespace dali {
namespace detail {
template <typename OutputType, typename InputType>
struct SampleDescriptor {
const InputType *in;
OutputType *out;
float coeff;
int64_t size;
};
using BorderType = PreemphasisFilter<GPUBackend>::BorderType;
template <typename OutputType, typename InputType>
void __global__ PreemphasisFilterKernel(const SampleDescriptor<OutputType, InputType> *samples,
BorderType border_type) {
const auto &sample = samples[blockIdx.y];
int64_t block_size = blockDim.x;
int64_t block_start = block_size * blockIdx.x;
int64_t grid_stride = block_size * gridDim.x;
int64_t k = block_start + threadIdx.x;
if (k >= sample.size)
return;
if (k == 0) {
if (border_type == BorderType::Zero) {
sample.out[k] = sample.in[k];
} else {
// BorderType::Reflect or BorderType::Clamp
InputType border = (border_type == BorderType::Reflect) ? sample.in[1] : sample.in[0];
sample.out[k] = sample.in[k] - sample.coeff * border;
}
k += grid_stride;
}
for (; k < sample.size; k += grid_stride)
sample.out[k] = sample.in[k] - sample.coeff * sample.in[k-1];
}
} // namespace detail
class PreemphasisFilterGPU : public PreemphasisFilter<GPUBackend> {
public:
explicit PreemphasisFilterGPU(const OpSpec &spec) : PreemphasisFilter<GPUBackend>(spec) {
// void is OK here, pointer sizes are the same size
int64_t sz = max_batch_size_ * sizeof(detail::SampleDescriptor<void, void>);
scratch_mem_.Resize({sz}, DALI_UINT8);
}
void RunImpl(workspace_t<GPUBackend> &ws) override;
private:
template <typename OutputType, typename InputType>
void RunImplTyped(workspace_t<GPUBackend> &ws);
Tensor<GPUBackend> scratch_mem_;
};
template <typename OutputType, typename InputType>
void PreemphasisFilterGPU::RunImplTyped(workspace_t<GPUBackend> &ws) {
using SampleDesc = detail::SampleDescriptor<OutputType, InputType>;
const auto &input = ws.Input<GPUBackend>(0);
auto &output = ws.Output<GPUBackend>(0);
auto curr_batch_size = ws.GetInputBatchSize(0);
std::vector<SampleDesc> samples_cpu(curr_batch_size);
for (int sample_idx = 0; sample_idx < curr_batch_size; sample_idx++) {
auto &sample = samples_cpu[sample_idx];
sample.in = input.tensor<InputType>(sample_idx);
sample.out = output.mutable_tensor<OutputType>(sample_idx);
sample.size = volume(input.tensor_shape(sample_idx));
sample.coeff = preemph_coeff_[sample_idx];
}
int64_t sz = curr_batch_size * sizeof(SampleDesc);
scratch_mem_.Resize({sz}, DALI_UINT8);
auto sample_descs_gpu = reinterpret_cast<SampleDesc*>(scratch_mem_.mutable_data<uint8_t>());
auto stream = ws.stream();
CUDA_CALL(
cudaMemcpyAsync(sample_descs_gpu, samples_cpu.data(), sz, cudaMemcpyHostToDevice, stream));
int block = 256;
auto blocks_per_sample = std::max(32, 1024 / curr_batch_size);
dim3 grid(blocks_per_sample, curr_batch_size);
detail::PreemphasisFilterKernel<<<grid, block, 0, stream>>>(sample_descs_gpu, border_type_);
}
void PreemphasisFilterGPU::RunImpl(workspace_t<GPUBackend> &ws) {
const auto &input = ws.template Input<GPUBackend>(0);
TYPE_SWITCH(input.type(), type2id, InputType, PREEMPH_TYPES, (
TYPE_SWITCH(output_type_, type2id, OutputType, PREEMPH_TYPES, (
RunImplTyped<OutputType, InputType>(ws);
), DALI_FAIL(make_string("Unsupported output type: ", output_type_))); // NOLINT
), DALI_FAIL(make_string("Unsupported input type: ", input.type()))); // NOLINT
}
DALI_REGISTER_OPERATOR(PreemphasisFilter, PreemphasisFilterGPU, GPU);
} // namespace dali
|
137f7cd2093f763d7503698fe343073d8896552f.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Implementing fft4 algorithm
* Input is multiple float32 vector, number given by B
* No spliting
* It's not a complete FFT
* To be used recursively by gfft
*/
// C includes
#include <stdio.h>
#include <assert.h>
#include <math.h>
#include <stdlib.h>
#include <string.h>
// CUDA includes
#include <hip/hip_runtime.h>
#include <rocblas.h>
// Matrix and vector
#include "helper/my_vector.h"
#include "helper/my_matrix.h"
#include "helper/my_const.h"
float UPPER_BOUND = 1000.0f;
int BATCH = 4;
fft::MatrixF F4_re;
fft::MatrixF F4_im;
FFT_S init_F4()
{
F4_re.width = 4;
F4_re.height = 4;
F4_re.array = (float*)malloc(F4_re.width * F4_re.height * sizeof(float));
F4_re.element(1, 1) = 1.0f;
F4_re.element(2, 1) = 1.0f;
F4_re.element(3, 1) = 1.0f;
F4_re.element(4, 1) = 1.0f;
F4_re.element(1, 2) = 1.0f;
F4_re.element(2, 2) = 0.0f;
F4_re.element(3, 2) =-1.0f;
F4_re.element(4, 2) = 0.0f;
F4_re.element(1, 3) = 1.0f;
F4_re.element(2, 3) =-1.0f;
F4_re.element(3, 3) = 1.0f;
F4_re.element(4, 3) =-1.0f;
F4_re.element(1, 4) = 1.0f;
F4_re.element(2, 4) = 0.0f;
F4_re.element(3, 4) =-1.0f;
F4_re.element(4, 4) = 0.0f;
F4_im.width = 4;
F4_im.height = 4;
F4_im.array = (float*)malloc(F4_re.width * F4_re.height * sizeof(float));
F4_im.element(1, 1) = 0.0f;
F4_im.element(2, 1) = 0.0f;
F4_im.element(3, 1) = 0.0f;
F4_im.element(4, 1) = 0.0f;
F4_im.element(1, 2) = 0.0f;
F4_im.element(2, 2) =-1.0f;
F4_im.element(3, 2) = 0.0f;
F4_im.element(4, 2) = 1.0f;
F4_im.element(1, 3) = 0.0f;
F4_im.element(2, 3) = 0.0f;
F4_im.element(3, 3) = 0.0f;
F4_im.element(4, 3) = 0.0f;
F4_im.element(1, 4) = 0.0f;
F4_im.element(2, 4) = 1.0f;
F4_im.element(3, 4) = 0.0f;
F4_im.element(4, 4) =-1.0f;
return FFT_SUCCESS;
}
FFT_S fft4(int B, fft::MatrixF X_re, fft::MatrixF X_im, fft::MatrixF FX_re, fft::MatrixF FX_im)
{
hipblasStatus_t status;
hipblasHandle_t handle;
float *dev_FM, *dev_input, *dev_result1, *dev_result2;
float alpha = 1.0f, beta = 0.0f;
// Initialize cublas and allocate device memory
status = hipblasCreate(&handle);
if (status != HIPBLAS_STATUS_SUCCESS) {
fprintf(stderr, "!!!! CUBLAS initialization error\n");
return FFT_FAILURE;
}
if (hipMalloc(reinterpret_cast<void **>(&dev_FM), 4 * 4 * sizeof(dev_FM[0])) !=
hipSuccess) {
fprintf(stderr, "!!!! device memory allocation error (allocate Fourier Matrix)\n");
return FFT_FAILURE;
}
if (hipMalloc(reinterpret_cast<void **>(&dev_input), 4 * B * 2 * sizeof(dev_input[0])) !=
hipSuccess) {
fprintf(stderr, "!!!! device memory allocation error (allocate Input Matrix)\n");
return FFT_FAILURE;
}
if (hipMalloc(reinterpret_cast<void **>(&dev_result1), 4 * B * 2 * sizeof(dev_result1[0])) !=
hipSuccess) {
fprintf(stderr, "!!!! device memory allocation error (allocate result 1 Matrix)\n");
return FFT_FAILURE;
}
if (hipMalloc(reinterpret_cast<void **>(&dev_result2), 4 * B * 2 * sizeof(dev_result2[0])) !=
hipSuccess) {
fprintf(stderr, "!!!! device memory allocation error (allocate result 2 Matrix)\n");
return FFT_FAILURE;
}
// F4_re * (X_re, X_im)
//// Copy host data to device
status = hipblasSetVector(4 * 4, sizeof(F4_re.array[0]), F4_re.array, 1, dev_FM, 1);
if (status != HIPBLAS_STATUS_SUCCESS) {
fprintf(stderr, "!!!! device access error (write F4_re)\n");
return FFT_FAILURE;
}
status = hipblasSetVector(4 * B, sizeof(X_re.array[0]), X_re.array, 1, dev_input, 1);
if (status != HIPBLAS_STATUS_SUCCESS) {
fprintf(stderr, "!!!! device access error (write X_re)\n");
return FFT_FAILURE;
}
status = hipblasSetVector(4 * B, sizeof(X_im.array[0]), X_im.array, 1, dev_input + 4 * B, 1);
if (status != HIPBLAS_STATUS_SUCCESS) {
fprintf(stderr, "!!!! device access error (write X_im)\n");
return FFT_FAILURE;
}
//// Call cublas gemm
status = hipblasSgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, 4, B * 2, 4, &alpha, dev_FM,
4, dev_input, 4, &beta, dev_result1, 4);
if (status != HIPBLAS_STATUS_SUCCESS) {
fprintf(stderr, "!!!! CUBLAS kernel execution error (a * (c, d)).\n");
return FFT_FAILURE;
}
// F4_im * (X_re, X_im)
//// Copy host data to device
status = hipblasSetVector(4 * 4, sizeof(F4_im.array[0]), F4_im.array, 1, dev_FM, 1);
if (status != HIPBLAS_STATUS_SUCCESS) {
fprintf(stderr, "!!!! device access error (write F4_im)\n");
return FFT_FAILURE;
}
//// Call cublas gemm
status = hipblasSgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, 4, B * 2, 4, &alpha, dev_FM,
4, dev_input, 4, &beta, dev_result2, 4);
if (status != HIPBLAS_STATUS_SUCCESS) {
fprintf(stderr, "!!!! CUBLAS kernel execution error (b * (c, d)).\n");
return FFT_FAILURE;
}
// Combine and get result, store in result1
alpha = -1.0f;
status = hipblasSaxpy(handle, 4 * B, &alpha, dev_result2 + 4 * B, 1, dev_result1, 1);
if (status != HIPBLAS_STATUS_SUCCESS) {
fprintf(stderr, "!!!! CUBLAS kernel execution error (ac - bd).\n");
return FFT_FAILURE;
}
alpha = 1.0f;
status = hipblasSaxpy(handle, 4 * B, &alpha, dev_result2, 1, dev_result1 + 4 * B, 1);
if (status != HIPBLAS_STATUS_SUCCESS) {
fprintf(stderr, "!!!! CUBLAS kernel execution error (ad + bc).\n");
return FFT_FAILURE;
}
// Copy device memory to host
status = hipblasGetVector(4 * B, sizeof(FX_re.array[0]), dev_result1, 1, FX_re.array, 1);
if (status != HIPBLAS_STATUS_SUCCESS) {
fprintf(stderr, "!!!! device access error (read FX_re)\n");
return FFT_FAILURE;
}
status = hipblasGetVector(4 * B, sizeof(FX_im.array[0]), dev_result1 + 4 * B, 1, FX_im.array, 1);
if (status != HIPBLAS_STATUS_SUCCESS) {
fprintf(stderr, "!!!! device access error (read FX_im)\n");
return FFT_FAILURE;
}
// Deallocate device memory and shutdown
if (hipFree(dev_FM) != hipSuccess) {
fprintf(stderr, "!!!! device memory free error (free Fourier Matrix)\n");
return FFT_FAILURE;
}
if (hipFree(dev_input) != hipSuccess) {
fprintf(stderr, "!!!! device memory free error (free Input Matrix)\n");
return FFT_FAILURE;
}
if (hipFree(dev_result1) != hipSuccess) {
fprintf(stderr, "!!!! device memory free error (free result 1 Matrix)\n");
return FFT_FAILURE;
}
if (hipFree(dev_result2) != hipSuccess) {
fprintf(stderr, "!!!! device memory free error (free result 2 Matrix)\n");
return FFT_FAILURE;
}
status = hipblasDestroy(handle);
if (status != HIPBLAS_STATUS_SUCCESS) {
fprintf(stderr, "!!!! shutdown error (A)\n");
return FFT_FAILURE;
}
return FFT_SUCCESS;
}
int main()
{
FFT_S status;
status = init_F4();
if (status != FFT_SUCCESS){
printf("Error in Fourier matrix initialization\n");
exit(1);
}
fft::MatrixF X_re;
X_re.height = 4;
X_re.width = BATCH;
X_re.array = (float*)malloc(X_re.height * X_re.width * sizeof(float));
fft::MatrixF X_im;
X_im.height = 4;
X_im.width = BATCH;
X_im.array = (float*)malloc(X_im.height * X_im.width * sizeof(float));
fft::MatrixF FX_re;
FX_re.height = 4;
FX_re.width = BATCH;
FX_re.array = (float*)malloc(FX_re.height * FX_re.width * sizeof(float));
fft::MatrixF FX_im;
FX_im.height = 4;
FX_im.width = BATCH;
FX_im.array = (float*)malloc(FX_im.height * FX_im.width * sizeof(float));
// Setting input value
srand(time(NULL));
printf("The input is: \n");
for (int j = 1; j <= BATCH; j++){
printf("Vector %d: \n", j);
for (int i = 1; i <= 4; i++){
X_re.element(i, j) = (float)rand() / (float)(RAND_MAX) * 2 * UPPER_BOUND - UPPER_BOUND;
X_im.element(i, j) = (float)rand() / (float)(RAND_MAX) * 2 * UPPER_BOUND - UPPER_BOUND;
printf("X[%d] = (%.10f, %.10f) \n", i, X_re.element(i, j), X_im.element(i, j));
}
}
status = fft4(BATCH, X_re,X_im, FX_re, FX_im);
if (status != FFT_SUCCESS){
printf("Error in running fft calculation\n");
exit(1);
}
printf("Result: \n");
for (int j = 1; j <= BATCH; j++){
printf("Resulting vector %d: \n", j);
for (int i = 1; i <= 4; i++){
printf("FX[%d] = (%.10f, %.10f) \n", i, FX_re.element(i, j), FX_im.element(i, j));
}
}
free(X_re.array);
free(X_im.array);
free(FX_re.array);
free(FX_im.array);
return 0;
}
| 137f7cd2093f763d7503698fe343073d8896552f.cu | /*
* Implementing fft4 algorithm
* Input is multiple float32 vector, number given by B
* No spliting
* It's not a complete FFT
* To be used recursively by gfft
*/
// C includes
#include <stdio.h>
#include <assert.h>
#include <math.h>
#include <stdlib.h>
#include <string.h>
// CUDA includes
#include <cuda_runtime.h>
#include <cublas_v2.h>
// Matrix and vector
#include "helper/my_vector.h"
#include "helper/my_matrix.h"
#include "helper/my_const.h"
float UPPER_BOUND = 1000.0f;
int BATCH = 4;
fft::MatrixF F4_re;
fft::MatrixF F4_im;
FFT_S init_F4()
{
F4_re.width = 4;
F4_re.height = 4;
F4_re.array = (float*)malloc(F4_re.width * F4_re.height * sizeof(float));
F4_re.element(1, 1) = 1.0f;
F4_re.element(2, 1) = 1.0f;
F4_re.element(3, 1) = 1.0f;
F4_re.element(4, 1) = 1.0f;
F4_re.element(1, 2) = 1.0f;
F4_re.element(2, 2) = 0.0f;
F4_re.element(3, 2) =-1.0f;
F4_re.element(4, 2) = 0.0f;
F4_re.element(1, 3) = 1.0f;
F4_re.element(2, 3) =-1.0f;
F4_re.element(3, 3) = 1.0f;
F4_re.element(4, 3) =-1.0f;
F4_re.element(1, 4) = 1.0f;
F4_re.element(2, 4) = 0.0f;
F4_re.element(3, 4) =-1.0f;
F4_re.element(4, 4) = 0.0f;
F4_im.width = 4;
F4_im.height = 4;
F4_im.array = (float*)malloc(F4_re.width * F4_re.height * sizeof(float));
F4_im.element(1, 1) = 0.0f;
F4_im.element(2, 1) = 0.0f;
F4_im.element(3, 1) = 0.0f;
F4_im.element(4, 1) = 0.0f;
F4_im.element(1, 2) = 0.0f;
F4_im.element(2, 2) =-1.0f;
F4_im.element(3, 2) = 0.0f;
F4_im.element(4, 2) = 1.0f;
F4_im.element(1, 3) = 0.0f;
F4_im.element(2, 3) = 0.0f;
F4_im.element(3, 3) = 0.0f;
F4_im.element(4, 3) = 0.0f;
F4_im.element(1, 4) = 0.0f;
F4_im.element(2, 4) = 1.0f;
F4_im.element(3, 4) = 0.0f;
F4_im.element(4, 4) =-1.0f;
return FFT_SUCCESS;
}
FFT_S fft4(int B, fft::MatrixF X_re, fft::MatrixF X_im, fft::MatrixF FX_re, fft::MatrixF FX_im)
{
cublasStatus_t status;
cublasHandle_t handle;
float *dev_FM, *dev_input, *dev_result1, *dev_result2;
float alpha = 1.0f, beta = 0.0f;
// Initialize cublas and allocate device memory
status = cublasCreate(&handle);
if (status != CUBLAS_STATUS_SUCCESS) {
fprintf(stderr, "!!!! CUBLAS initialization error\n");
return FFT_FAILURE;
}
if (cudaMalloc(reinterpret_cast<void **>(&dev_FM), 4 * 4 * sizeof(dev_FM[0])) !=
cudaSuccess) {
fprintf(stderr, "!!!! device memory allocation error (allocate Fourier Matrix)\n");
return FFT_FAILURE;
}
if (cudaMalloc(reinterpret_cast<void **>(&dev_input), 4 * B * 2 * sizeof(dev_input[0])) !=
cudaSuccess) {
fprintf(stderr, "!!!! device memory allocation error (allocate Input Matrix)\n");
return FFT_FAILURE;
}
if (cudaMalloc(reinterpret_cast<void **>(&dev_result1), 4 * B * 2 * sizeof(dev_result1[0])) !=
cudaSuccess) {
fprintf(stderr, "!!!! device memory allocation error (allocate result 1 Matrix)\n");
return FFT_FAILURE;
}
if (cudaMalloc(reinterpret_cast<void **>(&dev_result2), 4 * B * 2 * sizeof(dev_result2[0])) !=
cudaSuccess) {
fprintf(stderr, "!!!! device memory allocation error (allocate result 2 Matrix)\n");
return FFT_FAILURE;
}
// F4_re * (X_re, X_im)
//// Copy host data to device
status = cublasSetVector(4 * 4, sizeof(F4_re.array[0]), F4_re.array, 1, dev_FM, 1);
if (status != CUBLAS_STATUS_SUCCESS) {
fprintf(stderr, "!!!! device access error (write F4_re)\n");
return FFT_FAILURE;
}
status = cublasSetVector(4 * B, sizeof(X_re.array[0]), X_re.array, 1, dev_input, 1);
if (status != CUBLAS_STATUS_SUCCESS) {
fprintf(stderr, "!!!! device access error (write X_re)\n");
return FFT_FAILURE;
}
status = cublasSetVector(4 * B, sizeof(X_im.array[0]), X_im.array, 1, dev_input + 4 * B, 1);
if (status != CUBLAS_STATUS_SUCCESS) {
fprintf(stderr, "!!!! device access error (write X_im)\n");
return FFT_FAILURE;
}
//// Call cublas gemm
status = cublasSgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, 4, B * 2, 4, &alpha, dev_FM,
4, dev_input, 4, &beta, dev_result1, 4);
if (status != CUBLAS_STATUS_SUCCESS) {
fprintf(stderr, "!!!! CUBLAS kernel execution error (a * (c, d)).\n");
return FFT_FAILURE;
}
// F4_im * (X_re, X_im)
//// Copy host data to device
status = cublasSetVector(4 * 4, sizeof(F4_im.array[0]), F4_im.array, 1, dev_FM, 1);
if (status != CUBLAS_STATUS_SUCCESS) {
fprintf(stderr, "!!!! device access error (write F4_im)\n");
return FFT_FAILURE;
}
//// Call cublas gemm
status = cublasSgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, 4, B * 2, 4, &alpha, dev_FM,
4, dev_input, 4, &beta, dev_result2, 4);
if (status != CUBLAS_STATUS_SUCCESS) {
fprintf(stderr, "!!!! CUBLAS kernel execution error (b * (c, d)).\n");
return FFT_FAILURE;
}
// Combine and get result, store in result1
alpha = -1.0f;
status = cublasSaxpy(handle, 4 * B, &alpha, dev_result2 + 4 * B, 1, dev_result1, 1);
if (status != CUBLAS_STATUS_SUCCESS) {
fprintf(stderr, "!!!! CUBLAS kernel execution error (ac - bd).\n");
return FFT_FAILURE;
}
alpha = 1.0f;
status = cublasSaxpy(handle, 4 * B, &alpha, dev_result2, 1, dev_result1 + 4 * B, 1);
if (status != CUBLAS_STATUS_SUCCESS) {
fprintf(stderr, "!!!! CUBLAS kernel execution error (ad + bc).\n");
return FFT_FAILURE;
}
// Copy device memory to host
status = cublasGetVector(4 * B, sizeof(FX_re.array[0]), dev_result1, 1, FX_re.array, 1);
if (status != CUBLAS_STATUS_SUCCESS) {
fprintf(stderr, "!!!! device access error (read FX_re)\n");
return FFT_FAILURE;
}
status = cublasGetVector(4 * B, sizeof(FX_im.array[0]), dev_result1 + 4 * B, 1, FX_im.array, 1);
if (status != CUBLAS_STATUS_SUCCESS) {
fprintf(stderr, "!!!! device access error (read FX_im)\n");
return FFT_FAILURE;
}
// Deallocate device memory and shutdown
if (cudaFree(dev_FM) != cudaSuccess) {
fprintf(stderr, "!!!! device memory free error (free Fourier Matrix)\n");
return FFT_FAILURE;
}
if (cudaFree(dev_input) != cudaSuccess) {
fprintf(stderr, "!!!! device memory free error (free Input Matrix)\n");
return FFT_FAILURE;
}
if (cudaFree(dev_result1) != cudaSuccess) {
fprintf(stderr, "!!!! device memory free error (free result 1 Matrix)\n");
return FFT_FAILURE;
}
if (cudaFree(dev_result2) != cudaSuccess) {
fprintf(stderr, "!!!! device memory free error (free result 2 Matrix)\n");
return FFT_FAILURE;
}
status = cublasDestroy(handle);
if (status != CUBLAS_STATUS_SUCCESS) {
fprintf(stderr, "!!!! shutdown error (A)\n");
return FFT_FAILURE;
}
return FFT_SUCCESS;
}
int main()
{
FFT_S status;
status = init_F4();
if (status != FFT_SUCCESS){
printf("Error in Fourier matrix initialization\n");
exit(1);
}
fft::MatrixF X_re;
X_re.height = 4;
X_re.width = BATCH;
X_re.array = (float*)malloc(X_re.height * X_re.width * sizeof(float));
fft::MatrixF X_im;
X_im.height = 4;
X_im.width = BATCH;
X_im.array = (float*)malloc(X_im.height * X_im.width * sizeof(float));
fft::MatrixF FX_re;
FX_re.height = 4;
FX_re.width = BATCH;
FX_re.array = (float*)malloc(FX_re.height * FX_re.width * sizeof(float));
fft::MatrixF FX_im;
FX_im.height = 4;
FX_im.width = BATCH;
FX_im.array = (float*)malloc(FX_im.height * FX_im.width * sizeof(float));
// Setting input value
srand(time(NULL));
printf("The input is: \n");
for (int j = 1; j <= BATCH; j++){
printf("Vector %d: \n", j);
for (int i = 1; i <= 4; i++){
X_re.element(i, j) = (float)rand() / (float)(RAND_MAX) * 2 * UPPER_BOUND - UPPER_BOUND;
X_im.element(i, j) = (float)rand() / (float)(RAND_MAX) * 2 * UPPER_BOUND - UPPER_BOUND;
printf("X[%d] = (%.10f, %.10f) \n", i, X_re.element(i, j), X_im.element(i, j));
}
}
status = fft4(BATCH, X_re,X_im, FX_re, FX_im);
if (status != FFT_SUCCESS){
printf("Error in running fft calculation\n");
exit(1);
}
printf("Result: \n");
for (int j = 1; j <= BATCH; j++){
printf("Resulting vector %d: \n", j);
for (int i = 1; i <= 4; i++){
printf("FX[%d] = (%.10f, %.10f) \n", i, FX_re.element(i, j), FX_im.element(i, j));
}
}
free(X_re.array);
free(X_im.array);
free(FX_re.array);
free(FX_im.array);
return 0;
}
|
621d32e059e2002c6653aa133ed27de68533d635.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* \file dnn/src/cuda/convolution3d/chanwise/fwd.cu
* MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
*
* Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*/
#include "./kern.cuh"
#include "kern_helper_hip.cuh"
using namespace megdnn;
using namespace cuda;
using namespace convolution3d;
using namespace chanwise;
namespace {
template <typename T, int CHL_MUL_SET, int FD_SET, int FH_SET, int FW_SET>
__global__ void kern_fwd(T* dst, const T* src, const T* flt_tot, Param param) {
// extern __shared__ of dt_float16 does not work
extern __shared__ uint8_t flt_storage[];
T* const flt = reinterpret_cast<T*>(flt_storage);
const uint32_t N = param.batch, IC = param.src_chl, ic = blockIdx.x,
ID = param.src_d, IH = param.src_h, IW = param.src_w,
CHL_MUL = CHL_MUL_SET ? CHL_MUL_SET : param.chl_mul,
FD = FD_SET ? FD_SET : param.flt_d,
FH = FH_SET ? FH_SET : param.flt_h,
FW = FW_SET ? FW_SET : param.flt_w, FSIZE = FD * FH * FW,
PD = param.pad_d, PH = param.pad_h, PW = param.pad_w,
SD = param.stride_d, SH = param.stride_h, SW = param.stride_w,
OD = param.out_d, OH = param.out_h, OW = param.out_w,
TOT_OUT = N * CHL_MUL * OD * OH * OW;
block_memcpy(flt, flt_tot + ic * FSIZE * CHL_MUL, FSIZE * CHL_MUL);
uint32_t out_idx_ = blockIdx.y * blockDim.x + threadIdx.x,
nr_out_per_launch = blockDim.x * gridDim.y;
for (; out_idx_ < TOT_OUT; out_idx_ += nr_out_per_launch) {
uint32_t out_idx = out_idx_, n, chl_mul, od, oh, ow;
out_idx = div_mod(out_idx, OW, ow);
out_idx = div_mod(out_idx, OH, oh);
out_idx = div_mod(out_idx, OD, od);
if (CHL_MUL_SET == 1) {
chl_mul = 0;
n = out_idx;
} else {
n = div_mod(out_idx, CHL_MUL, chl_mul);
}
int id = int(od * SD) - int(PD), ih = int(oh * SH) - int(PH),
iw = int(ow * SW) - int(PW);
const T* flt_base = flt + chl_mul * FSIZE;
const T* src_base = src + int((((n * IC + ic) * ID + id) * IH + ih) * IW + iw);
T sum(0);
if (FD_SET && FH_SET && FW_SET) {
#pragma unroll
for (uint32_t fd = 0; fd < FD; ++fd) {
// fh + ih < 0 would overflow, so we do not need to check it
if (static_cast<uint32_t>(fd + id) < ID) {
#pragma unroll
for (uint32_t fh = 0; fh < FH; ++fh) {
if (static_cast<uint32_t>(fh + ih) < IH) {
#pragma unroll
for (uint32_t fw = 0; fw < FW; ++fw) {
if (static_cast<uint32_t>(fw + iw) < IW) {
sum += flt_base[fd * FH * FW + fh * FW + fw] *
src_base[fd * IH * IW + fh * IW + fw];
}
}
}
}
}
}
} else {
int fdmax = min(int(FD), int(ID - id)), fhmax = min(int(FH), int(IH - ih)),
fwmax = min(int(FW), int(IW - iw));
for (int fd = max(0, -id); fd < fdmax; ++fd) {
for (int fh = max(0, -ih); fh < fhmax; ++fh) {
for (int fw = max(0, -iw); fw < fwmax; ++fw) {
sum += flt_base[fd * FH * FW + fh * FW + fw] *
src_base[fd * IH * IW + fh * IW + fw];
}
}
}
}
dst[((((n * IC + ic) * CHL_MUL + chl_mul) * OD + od) * OH + oh) * OW + ow] =
sum;
}
}
} // anonymous namespace
template <typename T>
void chanwise::run_fwd(
T* dst, const T* src, const T* flt, const Param& param, hipStream_t stream) {
void (*kern)(T*, const T*, const T*, Param);
if (param.chl_mul == 1) {
if (param.flt_d == 2 && param.flt_h == 2 && param.flt_w == 2) {
kern = kern_fwd<T, 1, 2, 2, 2>;
} else if (param.flt_d == 3 && param.flt_h == 3 && param.flt_w == 3) {
kern = kern_fwd<T, 1, 3, 3, 3>;
} else {
kern = kern_fwd<T, 1, 0, 0, 0>;
}
} else {
kern = kern_fwd<T, 0, 0, 0, 0>;
}
int nr_thread = query_blocksize_for_kernel(kern),
nr_out_dimx =
param.out_d * param.out_h * param.out_w * param.batch * param.chl_mul;
dim3 nr_block(param.src_chl, ::min(512, max(nr_out_dimx / (nr_thread * 4), 1)));
uint32_t shared =
param.chl_mul * param.flt_d * param.flt_h * param.flt_w * sizeof(T);
hipLaunchKernelGGL(( kern), dim3(nr_block), dim3(nr_thread), shared, stream, dst, src, flt, param);
after_kernel_launch();
}
namespace megdnn {
namespace cuda {
namespace convolution3d {
namespace chanwise {
#define DO_INST(_ct) \
template void run_fwd(_ct*, const _ct*, const _ct*, const Param&, hipStream_t);
#define INST(_dt) DO_INST(DTypeTrait<_dt>::ctype)
MEGDNN_FOREACH_COMPUTING_DTYPE_FLOAT(INST)
#undef INST
#undef DO_INST
} // namespace chanwise
} // namespace convolution3d
} // namespace cuda
} // namespace megdnn
// vim: syntax=cuda.doxygen
| 621d32e059e2002c6653aa133ed27de68533d635.cu | /**
* \file dnn/src/cuda/convolution3d/chanwise/fwd.cu
* MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
*
* Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*/
#include "./kern.cuh"
#include "./kern_helper.cuh"
using namespace megdnn;
using namespace cuda;
using namespace convolution3d;
using namespace chanwise;
namespace {
template <typename T, int CHL_MUL_SET, int FD_SET, int FH_SET, int FW_SET>
__global__ void kern_fwd(T* dst, const T* src, const T* flt_tot, Param param) {
// extern __shared__ of dt_float16 does not work
extern __shared__ uint8_t flt_storage[];
T* const flt = reinterpret_cast<T*>(flt_storage);
const uint32_t N = param.batch, IC = param.src_chl, ic = blockIdx.x,
ID = param.src_d, IH = param.src_h, IW = param.src_w,
CHL_MUL = CHL_MUL_SET ? CHL_MUL_SET : param.chl_mul,
FD = FD_SET ? FD_SET : param.flt_d,
FH = FH_SET ? FH_SET : param.flt_h,
FW = FW_SET ? FW_SET : param.flt_w, FSIZE = FD * FH * FW,
PD = param.pad_d, PH = param.pad_h, PW = param.pad_w,
SD = param.stride_d, SH = param.stride_h, SW = param.stride_w,
OD = param.out_d, OH = param.out_h, OW = param.out_w,
TOT_OUT = N * CHL_MUL * OD * OH * OW;
block_memcpy(flt, flt_tot + ic * FSIZE * CHL_MUL, FSIZE * CHL_MUL);
uint32_t out_idx_ = blockIdx.y * blockDim.x + threadIdx.x,
nr_out_per_launch = blockDim.x * gridDim.y;
for (; out_idx_ < TOT_OUT; out_idx_ += nr_out_per_launch) {
uint32_t out_idx = out_idx_, n, chl_mul, od, oh, ow;
out_idx = div_mod(out_idx, OW, ow);
out_idx = div_mod(out_idx, OH, oh);
out_idx = div_mod(out_idx, OD, od);
if (CHL_MUL_SET == 1) {
chl_mul = 0;
n = out_idx;
} else {
n = div_mod(out_idx, CHL_MUL, chl_mul);
}
int id = int(od * SD) - int(PD), ih = int(oh * SH) - int(PH),
iw = int(ow * SW) - int(PW);
const T* flt_base = flt + chl_mul * FSIZE;
const T* src_base = src + int((((n * IC + ic) * ID + id) * IH + ih) * IW + iw);
T sum(0);
if (FD_SET && FH_SET && FW_SET) {
#pragma unroll
for (uint32_t fd = 0; fd < FD; ++fd) {
// fh + ih < 0 would overflow, so we do not need to check it
if (static_cast<uint32_t>(fd + id) < ID) {
#pragma unroll
for (uint32_t fh = 0; fh < FH; ++fh) {
if (static_cast<uint32_t>(fh + ih) < IH) {
#pragma unroll
for (uint32_t fw = 0; fw < FW; ++fw) {
if (static_cast<uint32_t>(fw + iw) < IW) {
sum += flt_base[fd * FH * FW + fh * FW + fw] *
src_base[fd * IH * IW + fh * IW + fw];
}
}
}
}
}
}
} else {
int fdmax = min(int(FD), int(ID - id)), fhmax = min(int(FH), int(IH - ih)),
fwmax = min(int(FW), int(IW - iw));
for (int fd = max(0, -id); fd < fdmax; ++fd) {
for (int fh = max(0, -ih); fh < fhmax; ++fh) {
for (int fw = max(0, -iw); fw < fwmax; ++fw) {
sum += flt_base[fd * FH * FW + fh * FW + fw] *
src_base[fd * IH * IW + fh * IW + fw];
}
}
}
}
dst[((((n * IC + ic) * CHL_MUL + chl_mul) * OD + od) * OH + oh) * OW + ow] =
sum;
}
}
} // anonymous namespace
template <typename T>
void chanwise::run_fwd(
T* dst, const T* src, const T* flt, const Param& param, cudaStream_t stream) {
void (*kern)(T*, const T*, const T*, Param);
if (param.chl_mul == 1) {
if (param.flt_d == 2 && param.flt_h == 2 && param.flt_w == 2) {
kern = kern_fwd<T, 1, 2, 2, 2>;
} else if (param.flt_d == 3 && param.flt_h == 3 && param.flt_w == 3) {
kern = kern_fwd<T, 1, 3, 3, 3>;
} else {
kern = kern_fwd<T, 1, 0, 0, 0>;
}
} else {
kern = kern_fwd<T, 0, 0, 0, 0>;
}
int nr_thread = query_blocksize_for_kernel(kern),
nr_out_dimx =
param.out_d * param.out_h * param.out_w * param.batch * param.chl_mul;
dim3 nr_block(param.src_chl, std::min(512, max(nr_out_dimx / (nr_thread * 4), 1)));
uint32_t shared =
param.chl_mul * param.flt_d * param.flt_h * param.flt_w * sizeof(T);
kern<<<nr_block, nr_thread, shared, stream>>>(dst, src, flt, param);
after_kernel_launch();
}
namespace megdnn {
namespace cuda {
namespace convolution3d {
namespace chanwise {
#define DO_INST(_ct) \
template void run_fwd(_ct*, const _ct*, const _ct*, const Param&, cudaStream_t);
#define INST(_dt) DO_INST(DTypeTrait<_dt>::ctype)
MEGDNN_FOREACH_COMPUTING_DTYPE_FLOAT(INST)
#undef INST
#undef DO_INST
} // namespace chanwise
} // namespace convolution3d
} // namespace cuda
} // namespace megdnn
// vim: syntax=cuda.doxygen
|
b518c36e097d4ba938eaaa9da04263701cfacb48.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <cutil.h>
// Includes
#include <stdio.h>
// includes, project
#include "../include/sdkHelper.h" // helper for shared functions common to CUDA SDK samples
//#include <shrQATest.h>
//#include <shrUtils.h>
// includes CUDA
#include <hip/hip_runtime.h>
#define THREADS_PER_BLOCK 256
#define NUM_OF_BLOCKS 60
#define ITERATIONS REPLACE_ITERATIONS
#include "../include/ContAcq-IntClk.h"
// Variables
float* h_A;
float* h_B;
float* h_C;
float* d_A;
float* d_B;
float* d_C;
bool noprompt = false;
unsigned int my_timer;
// Functions
void CleanupResources(void);
void RandomInit(float*, int);
void ParseArguments(int, char**);
////////////////////////////////////////////////////////////////////////////////
// These are CUDA Helper functions
// This will output the proper CUDA error strings in the event that a CUDA host call returns an error
#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
inline void __checkCudaErrors(hipError_t err, const char *file, const int line )
{
if(hipSuccess != err){
fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, hipGetErrorString( err ) );
exit(-1);
}
}
// This will output the proper error string when calling hipGetLastError
#define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__)
inline void __getLastCudaError(const char *errorMessage, const char *file, const int line )
{
hipError_t err = hipGetLastError();
if (hipSuccess != err){
fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",file, line, errorMessage, (int)err, hipGetErrorString( err ) );
exit(-1);
}
}
// end of CUDA Helper Functions
__global__ void PowerKernal2(const float* A, const float* B, float* C, int N)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
//Do Some Computation
float Value1;
float Value2;
float Value3;
float Value;
float I1=A[i];
float I2=B[i];
// Excessive Addition access
if(((i%32)<=11)){
for(unsigned k=0; k<ITERATIONS;k++) {
Value1=I1*I2;
Value3=I1*I2;
Value1*=Value2;
Value1*=Value2;
Value2=Value3*Value1;
Value1=Value2*Value3;
}
}
__syncthreads();
Value=Value1;
C[i]=Value*Value2;
}
int main()
{
printf("Power Microbenchmarks\n");
int N = THREADS_PER_BLOCK*NUM_OF_BLOCKS;
size_t size = N * sizeof(float);
// Allocate input vectors h_A and h_B in host memory
h_A = (float*)malloc(size);
if (h_A == 0) CleanupResources();
h_B = (float*)malloc(size);
if (h_B == 0) CleanupResources();
h_C = (float*)malloc(size);
if (h_C == 0) CleanupResources();
// Initialize input vectors
RandomInit(h_A, N);
RandomInit(h_B, N);
// Allocate vectors in device memory
printf("before\n");
checkCudaErrors( hipMalloc((void**)&d_A, size) );
checkCudaErrors( hipMalloc((void**)&d_B, size) );
checkCudaErrors( hipMalloc((void**)&d_C, size) );
printf("after\n");
// Copy vectors from host memory to device memory
checkCudaErrors( hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice) );
checkCudaErrors( hipMemcpy(d_B, h_B, size, hipMemcpyHostToDevice) );
//VecAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, N);
dim3 dimGrid(NUM_OF_BLOCKS,1);
dim3 dimBlock(THREADS_PER_BLOCK,1);
dim3 dimGrid2(1,1);
dim3 dimBlock2(1,1);
CUT_SAFE_CALL(cutCreateTimer(&my_timer));
TaskHandle taskhandle = LaunchDAQ();
CUT_SAFE_CALL(cutStartTimer(my_timer));
printf("execution time = %f\n", cutGetTimerValue(my_timer));
hipLaunchKernelGGL((
PowerKernal2), dim3(dimGrid),dim3(dimBlock), 0, 0, d_A, d_B, d_C, N);
CUDA_SAFE_CALL( hipDeviceSynchronize() );
printf("execution time = %f\n", cutGetTimerValue(my_timer));
getLastCudaError("kernel launch failure");
CUDA_SAFE_CALL( hipDeviceSynchronize() );
CUT_SAFE_CALL(cutStopTimer(my_timer));
TurnOffDAQ(taskhandle, cutGetTimerValue(my_timer));
printf("execution time = %f\n", cutGetTimerValue(my_timer));
CUT_SAFE_CALL(cutDeleteTimer(my_timer));
#ifdef _DEBUG
checkCudaErrors( hipDeviceSynchronize() );
#endif
// Copy result from device memory to host memory
// h_C contains the result in host memory
checkCudaErrors( hipMemcpy(h_C, d_C, size, hipMemcpyDeviceToHost) );
CleanupResources();
return 0;
}
void CleanupResources(void)
{
// Free device memory
if (d_A)
hipFree(d_A);
if (d_B)
hipFree(d_B);
if (d_C)
hipFree(d_C);
// Free host memory
if (h_A)
free(h_A);
if (h_B)
free(h_B);
if (h_C)
free(h_C);
}
// Allocates an array with random float entries.
void RandomInit(float* data, int n)
{
for (int i = 0; i < n; ++i){
data[i] = rand() / RAND_MAX;
}
}
| b518c36e097d4ba938eaaa9da04263701cfacb48.cu | #include <stdio.h>
#include <stdlib.h>
#include <cutil.h>
// Includes
#include <stdio.h>
// includes, project
#include "../include/sdkHelper.h" // helper for shared functions common to CUDA SDK samples
//#include <shrQATest.h>
//#include <shrUtils.h>
// includes CUDA
#include <cuda_runtime.h>
#define THREADS_PER_BLOCK 256
#define NUM_OF_BLOCKS 60
#define ITERATIONS REPLACE_ITERATIONS
#include "../include/ContAcq-IntClk.h"
// Variables
float* h_A;
float* h_B;
float* h_C;
float* d_A;
float* d_B;
float* d_C;
bool noprompt = false;
unsigned int my_timer;
// Functions
void CleanupResources(void);
void RandomInit(float*, int);
void ParseArguments(int, char**);
////////////////////////////////////////////////////////////////////////////////
// These are CUDA Helper functions
// This will output the proper CUDA error strings in the event that a CUDA host call returns an error
#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
inline void __checkCudaErrors(cudaError err, const char *file, const int line )
{
if(cudaSuccess != err){
fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, cudaGetErrorString( err ) );
exit(-1);
}
}
// This will output the proper error string when calling cudaGetLastError
#define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__)
inline void __getLastCudaError(const char *errorMessage, const char *file, const int line )
{
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err){
fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",file, line, errorMessage, (int)err, cudaGetErrorString( err ) );
exit(-1);
}
}
// end of CUDA Helper Functions
__global__ void PowerKernal2(const float* A, const float* B, float* C, int N)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
//Do Some Computation
float Value1;
float Value2;
float Value3;
float Value;
float I1=A[i];
float I2=B[i];
// Excessive Addition access
if(((i%32)<=11)){
for(unsigned k=0; k<ITERATIONS;k++) {
Value1=I1*I2;
Value3=I1*I2;
Value1*=Value2;
Value1*=Value2;
Value2=Value3*Value1;
Value1=Value2*Value3;
}
}
__syncthreads();
Value=Value1;
C[i]=Value*Value2;
}
int main()
{
printf("Power Microbenchmarks\n");
int N = THREADS_PER_BLOCK*NUM_OF_BLOCKS;
size_t size = N * sizeof(float);
// Allocate input vectors h_A and h_B in host memory
h_A = (float*)malloc(size);
if (h_A == 0) CleanupResources();
h_B = (float*)malloc(size);
if (h_B == 0) CleanupResources();
h_C = (float*)malloc(size);
if (h_C == 0) CleanupResources();
// Initialize input vectors
RandomInit(h_A, N);
RandomInit(h_B, N);
// Allocate vectors in device memory
printf("before\n");
checkCudaErrors( cudaMalloc((void**)&d_A, size) );
checkCudaErrors( cudaMalloc((void**)&d_B, size) );
checkCudaErrors( cudaMalloc((void**)&d_C, size) );
printf("after\n");
// Copy vectors from host memory to device memory
checkCudaErrors( cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice) );
checkCudaErrors( cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice) );
//VecAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, N);
dim3 dimGrid(NUM_OF_BLOCKS,1);
dim3 dimBlock(THREADS_PER_BLOCK,1);
dim3 dimGrid2(1,1);
dim3 dimBlock2(1,1);
CUT_SAFE_CALL(cutCreateTimer(&my_timer));
TaskHandle taskhandle = LaunchDAQ();
CUT_SAFE_CALL(cutStartTimer(my_timer));
printf("execution time = %f\n", cutGetTimerValue(my_timer));
PowerKernal2<<<dimGrid,dimBlock>>>(d_A, d_B, d_C, N);
CUDA_SAFE_CALL( cudaThreadSynchronize() );
printf("execution time = %f\n", cutGetTimerValue(my_timer));
getLastCudaError("kernel launch failure");
CUDA_SAFE_CALL( cudaThreadSynchronize() );
CUT_SAFE_CALL(cutStopTimer(my_timer));
TurnOffDAQ(taskhandle, cutGetTimerValue(my_timer));
printf("execution time = %f\n", cutGetTimerValue(my_timer));
CUT_SAFE_CALL(cutDeleteTimer(my_timer));
#ifdef _DEBUG
checkCudaErrors( cudaDeviceSynchronize() );
#endif
// Copy result from device memory to host memory
// h_C contains the result in host memory
checkCudaErrors( cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost) );
CleanupResources();
return 0;
}
void CleanupResources(void)
{
// Free device memory
if (d_A)
cudaFree(d_A);
if (d_B)
cudaFree(d_B);
if (d_C)
cudaFree(d_C);
// Free host memory
if (h_A)
free(h_A);
if (h_B)
free(h_B);
if (h_C)
free(h_C);
}
// Allocates an array with random float entries.
void RandomInit(float* data, int n)
{
for (int i = 0; i < n; ++i){
data[i] = rand() / RAND_MAX;
}
}
|
ed2ffe5c1766ad12246dbb976829125cf13f23ca.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <unittest/unittest.h>
#include <thrust/set_operations.h>
#include <thrust/sort.h>
#include <thrust/execution_policy.h>
template<typename Iterator1, typename Iterator2, typename Iterator3, typename Iterator4, typename Iterator5, typename Iterator6, typename Iterator7>
__global__
void set_difference_by_key_kernel(Iterator1 keys_first1, Iterator1 keys_last1,
Iterator2 keys_first2, Iterator2 keys_last2,
Iterator3 values_first1,
Iterator4 values_first2,
Iterator5 keys_result,
Iterator6 values_result,
Iterator7 result)
{
*result = thrust::set_difference_by_key(thrust::seq,
keys_first1, keys_last1,
keys_first2, keys_last2,
values_first1,
values_first2,
keys_result,
values_result);
}
void TestSetDifferenceByKeyDeviceSeq()
{
typedef thrust::device_vector<int> Vector;
typedef typename Vector::iterator Iterator;
Vector a_key(4), b_key(5);
Vector a_val(4), b_val(5);
a_key[0] = 0; a_key[1] = 2; a_key[2] = 4; a_key[3] = 5;
a_val[0] = 0; a_val[1] = 0; a_val[2] = 0; a_val[3] = 0;
b_key[0] = 0; b_key[1] = 3; b_key[2] = 3; b_key[3] = 4; b_key[4] = 6;
b_val[0] = 1; b_val[1] = 1; b_val[2] = 1; b_val[3] = 1; b_val[4] = 1;
Vector ref_key(2), ref_val(2);
ref_key[0] = 2; ref_key[1] = 5;
ref_val[0] = 0; ref_val[1] = 0;
Vector result_key(2), result_val(2);
typedef thrust::pair<Iterator,Iterator> iter_pair;
thrust::device_vector<iter_pair> end_vec(1);
hipLaunchKernelGGL(( set_difference_by_key_kernel), dim3(1),dim3(1), 0, 0, a_key.begin(), a_key.end(),
b_key.begin(), b_key.end(),
a_val.begin(),
b_val.begin(),
result_key.begin(),
result_val.begin(),
end_vec.begin());
iter_pair end = end_vec.front();
ASSERT_EQUAL_QUIET(result_key.end(), end.first);
ASSERT_EQUAL_QUIET(result_val.end(), end.second);
ASSERT_EQUAL(ref_key, result_key);
ASSERT_EQUAL(ref_val, result_val);
}
DECLARE_UNITTEST(TestSetDifferenceByKeyDeviceSeq);
void TestSetDifferenceByKeyCudaStreams()
{
typedef thrust::device_vector<int> Vector;
typedef typename Vector::iterator Iterator;
Vector a_key(4), b_key(5);
Vector a_val(4), b_val(5);
a_key[0] = 0; a_key[1] = 2; a_key[2] = 4; a_key[3] = 5;
a_val[0] = 0; a_val[1] = 0; a_val[2] = 0; a_val[3] = 0;
b_key[0] = 0; b_key[1] = 3; b_key[2] = 3; b_key[3] = 4; b_key[4] = 6;
b_val[0] = 1; b_val[1] = 1; b_val[2] = 1; b_val[3] = 1; b_val[4] = 1;
Vector ref_key(2), ref_val(2);
ref_key[0] = 2; ref_key[1] = 5;
ref_val[0] = 0; ref_val[1] = 0;
Vector result_key(2), result_val(2);
hipStream_t s;
hipStreamCreate(&s);
thrust::pair<Iterator,Iterator> end =
thrust::set_difference_by_key(thrust::hip::par(s),
a_key.begin(), a_key.end(),
b_key.begin(), b_key.end(),
a_val.begin(),
b_val.begin(),
result_key.begin(),
result_val.begin());
hipStreamSynchronize(s);
ASSERT_EQUAL_QUIET(result_key.end(), end.first);
ASSERT_EQUAL_QUIET(result_val.end(), end.second);
ASSERT_EQUAL(ref_key, result_key);
ASSERT_EQUAL(ref_val, result_val);
hipStreamDestroy(s);
}
DECLARE_UNITTEST(TestSetDifferenceByKeyCudaStreams);
| ed2ffe5c1766ad12246dbb976829125cf13f23ca.cu | #include <unittest/unittest.h>
#include <thrust/set_operations.h>
#include <thrust/sort.h>
#include <thrust/execution_policy.h>
template<typename Iterator1, typename Iterator2, typename Iterator3, typename Iterator4, typename Iterator5, typename Iterator6, typename Iterator7>
__global__
void set_difference_by_key_kernel(Iterator1 keys_first1, Iterator1 keys_last1,
Iterator2 keys_first2, Iterator2 keys_last2,
Iterator3 values_first1,
Iterator4 values_first2,
Iterator5 keys_result,
Iterator6 values_result,
Iterator7 result)
{
*result = thrust::set_difference_by_key(thrust::seq,
keys_first1, keys_last1,
keys_first2, keys_last2,
values_first1,
values_first2,
keys_result,
values_result);
}
void TestSetDifferenceByKeyDeviceSeq()
{
typedef thrust::device_vector<int> Vector;
typedef typename Vector::iterator Iterator;
Vector a_key(4), b_key(5);
Vector a_val(4), b_val(5);
a_key[0] = 0; a_key[1] = 2; a_key[2] = 4; a_key[3] = 5;
a_val[0] = 0; a_val[1] = 0; a_val[2] = 0; a_val[3] = 0;
b_key[0] = 0; b_key[1] = 3; b_key[2] = 3; b_key[3] = 4; b_key[4] = 6;
b_val[0] = 1; b_val[1] = 1; b_val[2] = 1; b_val[3] = 1; b_val[4] = 1;
Vector ref_key(2), ref_val(2);
ref_key[0] = 2; ref_key[1] = 5;
ref_val[0] = 0; ref_val[1] = 0;
Vector result_key(2), result_val(2);
typedef thrust::pair<Iterator,Iterator> iter_pair;
thrust::device_vector<iter_pair> end_vec(1);
set_difference_by_key_kernel<<<1,1>>>(a_key.begin(), a_key.end(),
b_key.begin(), b_key.end(),
a_val.begin(),
b_val.begin(),
result_key.begin(),
result_val.begin(),
end_vec.begin());
iter_pair end = end_vec.front();
ASSERT_EQUAL_QUIET(result_key.end(), end.first);
ASSERT_EQUAL_QUIET(result_val.end(), end.second);
ASSERT_EQUAL(ref_key, result_key);
ASSERT_EQUAL(ref_val, result_val);
}
DECLARE_UNITTEST(TestSetDifferenceByKeyDeviceSeq);
void TestSetDifferenceByKeyCudaStreams()
{
typedef thrust::device_vector<int> Vector;
typedef typename Vector::iterator Iterator;
Vector a_key(4), b_key(5);
Vector a_val(4), b_val(5);
a_key[0] = 0; a_key[1] = 2; a_key[2] = 4; a_key[3] = 5;
a_val[0] = 0; a_val[1] = 0; a_val[2] = 0; a_val[3] = 0;
b_key[0] = 0; b_key[1] = 3; b_key[2] = 3; b_key[3] = 4; b_key[4] = 6;
b_val[0] = 1; b_val[1] = 1; b_val[2] = 1; b_val[3] = 1; b_val[4] = 1;
Vector ref_key(2), ref_val(2);
ref_key[0] = 2; ref_key[1] = 5;
ref_val[0] = 0; ref_val[1] = 0;
Vector result_key(2), result_val(2);
cudaStream_t s;
cudaStreamCreate(&s);
thrust::pair<Iterator,Iterator> end =
thrust::set_difference_by_key(thrust::cuda::par(s),
a_key.begin(), a_key.end(),
b_key.begin(), b_key.end(),
a_val.begin(),
b_val.begin(),
result_key.begin(),
result_val.begin());
cudaStreamSynchronize(s);
ASSERT_EQUAL_QUIET(result_key.end(), end.first);
ASSERT_EQUAL_QUIET(result_val.end(), end.second);
ASSERT_EQUAL(ref_key, result_key);
ASSERT_EQUAL(ref_val, result_val);
cudaStreamDestroy(s);
}
DECLARE_UNITTEST(TestSetDifferenceByKeyCudaStreams);
|
4e8e96bf096ca63fd8a57ce1073f6b3f76d33f57.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//xfail:ASSERTION_ERROR
//--blockDim=2 --gridDim=1
__global__ void f(int *c) {
if(threadIdx.x == 0) {
*c = 0;
atomicAdd(c, 1);
int x = *c;
int y = *c;
__assert(x == 0);
__assert(y == 0);
}
}
| 4e8e96bf096ca63fd8a57ce1073f6b3f76d33f57.cu | //xfail:ASSERTION_ERROR
//--blockDim=2 --gridDim=1
__global__ void f(int *c) {
if(threadIdx.x == 0) {
*c = 0;
atomicAdd(c, 1);
int x = *c;
int y = *c;
__assert(x == 0);
__assert(y == 0);
}
}
|
b71658202df298d5031a10b05d8b5e4035049a01.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <assert.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#define EPS 1e-8
#define N 10000000
#define MAX_ERR 1e-6
//#define nb 23814 //no. of n bodies
//#define nb 1350
//#define nb 294
//#define nb 5766
//#define p 31 //no of threads in each block
#define PI 3.14159265358979323846
#define PIx8 25.132741228718345
extern "C" __device__ double smoothfun2(double x)
{
double a = erf(x);
double b = (4.0*x*x*x*x - 14.0*x*x + 3.0);
double c = -(2.0/3.0)*x*b*exp(-1.0*x*x)*(1.0/sqrt(M_PI));
return a+c;
}
extern "C" __device__ double smoothfun1(double x)
{
double a = erf(x);
double b = (2.0*x*x - 5.0);
double c = -(2.0/3.0)*x*b*exp(-1.0*x*x)*(1.0/sqrt(M_PI));
return a+c;
}
extern "C" __device__ double3 bodyBodyInteractionStokes(double3 bi, double3 bj, double3 fj, double3 ai, double delta)
{
//TODO: add delta parameter as input reg parameter
//33 FLOP ; change this to include regularization
//fj in shared memory too, fj has SL density,pou, area element, mesh size already in it
double3 r;
//double delta = 0.1114403363930094; //0.20795502088527543; //0.38805779048337563;
//delta = 0.20795502088527543; //0.38805779048337563;
//double delta = 0.38805779048337563;
r.x = bj.x - bi.x;
r.y = bj.y - bi.y;
r.z = bj.z - bi.z;
double distSqr_zc; //zero corrected
double distSqr = r.x *r.x + r.y*r.y + r.z*r.z;
distSqr_zc = distSqr;
if(distSqr <= 0.0) distSqr_zc = 1.0;
double dist = sqrt(distSqr);
double distSixth = distSqr_zc*distSqr_zc*distSqr_zc;
double invDistCube = 1.0/sqrt(distSixth);
double invDist = 1.0/sqrt(distSqr_zc);
double fdotr = fj.x*r.x + fj.y*r.y + fj.z*r.z;
double s1 = invDist*smoothfun1(dist/delta); //smoothing function 1
double s2 = fdotr*invDistCube*smoothfun2(dist/delta); //smoothing function 2
if(distSqr<=0.0)
{
s1 = 16.0/(3*sqrt(M_PI)*delta);
s2 = fdotr*32.0/(3*sqrt(M_PI)*delta*delta*delta);
}
//ai.x += fj.x*s1;
//ai.y += fj.y*s1;
//ai.z += fj.z*s1;
ai.x += fj.x*s1 + r.x*s2;
ai.y += fj.y*s1 + r.y*s2;
ai.z += fj.z*s1 + r.z*s2;
return ai;
}
extern "C" __global__ void calculate_forces_stokes(void *devX, void *devF, void *devA, void *delta, int nb, int p, void *bn, int blk_per_box, int last_box_threads, int points_per_box, int knn)
{
// bn is flattened box neighbor matrix kxk
// k is number of boxes, gridDim.x is equal to k
//extern __shared__ float3 shPosition[];
//extern __shared__ float3 shDensity[];
extern __shared__ double3 shPosDen[];
double3 *globalX = (double3 *)devX;
double3 *globalF = (double3 *)devF; //SL density
double3 *globalA = (double3 *)devA;
double *globalDelta = (double *)delta; //reg parameter
double *globalBoxNbr = (double *)bn; //box neighbors k^2, 0 or 1 values
double3 myPosition;
double myDelta;
int i, tile;
double3 acc = {0.0, 0.0, 0.0};
int box_num_y,extra_blocks_y;
int extra_blocks,gtid,pid,box_num; //pid is particle id
//gtid = blockIdx.x * blockDim.x + threadIdx.x;
box_num = blockIdx.x/blk_per_box;
pid = box_num*points_per_box;
extra_blocks = blockIdx.x+1-box_num*blk_per_box;
pid += (extra_blocks-1)*blockDim.x;
pid += threadIdx.x;
gtid = pid;
//int gtid;
//gtid = blockIdx.x * blockDim.x + threadIdx.x;
if(!((blockIdx.x+1)%blk_per_box==0 && (threadIdx.x+1)>last_box_threads)){
myPosition = globalX[gtid];
myDelta = globalDelta[gtid];
}
for(i=0, tile=0; tile < gridDim.x; i+=p, tile++)
{
if (globalBoxNbr[(blockIdx.x/blk_per_box)*(gridDim.x/blk_per_box)+(tile/blk_per_box)] == 1){
if(!((tile+1)%blk_per_box==0 && (threadIdx.x+1)>last_box_threads)){
box_num_y = tile/blk_per_box;
int idx = box_num_y*points_per_box;
extra_blocks_y = tile+1-box_num_y*blk_per_box;
idx += (extra_blocks_y-1)*blockDim.x;
idx += threadIdx.x;
//int idx = tile*blockDim.x + threadIdx.x;
//shPosition[threadIdx.x] = globalX[idx];
//shDensity[threadIdx.x] = globalF[idx];
shPosDen[2*threadIdx.x+0] = globalX[idx];
shPosDen[2*threadIdx.x+1] = globalF[idx];
}
__syncthreads();
//acc = tile_calculation(myPosition, acc);
if(!((blockIdx.x+1)%blk_per_box==0 && (threadIdx.x+1)>last_box_threads)){
if(!((tile+1)%blk_per_box==0)){
#pragma unroll
for(unsigned int counter = 0; counter < blockDim.x; counter++)
{
acc = bodyBodyInteractionStokes(myPosition, shPosDen[2*counter+0], shPosDen[2*counter+1], acc, myDelta);
}
}
else{
#pragma unroll
for(unsigned int counter = 0; counter < last_box_threads; counter++)
{
acc = bodyBodyInteractionStokes(myPosition, shPosDen[2*counter+0], shPosDen[2*counter+1], acc, myDelta);
}
}
}
}
__syncthreads();
}
if(!((blockIdx.x+1)%blk_per_box==0 && (threadIdx.x+1)>last_box_threads)){
//save result in global memory
double3 acc3 = {1.0/(8*M_PI)*acc.x, 1.0/(8*M_PI)*acc.y, 1.0/(8*M_PI)*acc.z};
globalA[gtid] = acc3;
}
}
| b71658202df298d5031a10b05d8b5e4035049a01.cu | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <assert.h>
#include <cuda.h>
#include <cuda_runtime.h>
#define EPS 1e-8
#define N 10000000
#define MAX_ERR 1e-6
//#define nb 23814 //no. of n bodies
//#define nb 1350
//#define nb 294
//#define nb 5766
//#define p 31 //no of threads in each block
#define PI 3.14159265358979323846
#define PIx8 25.132741228718345
extern "C" __device__ double smoothfun2(double x)
{
double a = erf(x);
double b = (4.0*x*x*x*x - 14.0*x*x + 3.0);
double c = -(2.0/3.0)*x*b*exp(-1.0*x*x)*(1.0/sqrt(M_PI));
return a+c;
}
extern "C" __device__ double smoothfun1(double x)
{
double a = erf(x);
double b = (2.0*x*x - 5.0);
double c = -(2.0/3.0)*x*b*exp(-1.0*x*x)*(1.0/sqrt(M_PI));
return a+c;
}
extern "C" __device__ double3 bodyBodyInteractionStokes(double3 bi, double3 bj, double3 fj, double3 ai, double delta)
{
//TODO: add delta parameter as input reg parameter
//33 FLOP ; change this to include regularization
//fj in shared memory too, fj has SL density,pou, area element, mesh size already in it
double3 r;
//double delta = 0.1114403363930094; //0.20795502088527543; //0.38805779048337563;
//delta = 0.20795502088527543; //0.38805779048337563;
//double delta = 0.38805779048337563;
r.x = bj.x - bi.x;
r.y = bj.y - bi.y;
r.z = bj.z - bi.z;
double distSqr_zc; //zero corrected
double distSqr = r.x *r.x + r.y*r.y + r.z*r.z;
distSqr_zc = distSqr;
if(distSqr <= 0.0) distSqr_zc = 1.0;
double dist = sqrt(distSqr);
double distSixth = distSqr_zc*distSqr_zc*distSqr_zc;
double invDistCube = 1.0/sqrt(distSixth);
double invDist = 1.0/sqrt(distSqr_zc);
double fdotr = fj.x*r.x + fj.y*r.y + fj.z*r.z;
double s1 = invDist*smoothfun1(dist/delta); //smoothing function 1
double s2 = fdotr*invDistCube*smoothfun2(dist/delta); //smoothing function 2
if(distSqr<=0.0)
{
s1 = 16.0/(3*sqrt(M_PI)*delta);
s2 = fdotr*32.0/(3*sqrt(M_PI)*delta*delta*delta);
}
//ai.x += fj.x*s1;
//ai.y += fj.y*s1;
//ai.z += fj.z*s1;
ai.x += fj.x*s1 + r.x*s2;
ai.y += fj.y*s1 + r.y*s2;
ai.z += fj.z*s1 + r.z*s2;
return ai;
}
extern "C" __global__ void calculate_forces_stokes(void *devX, void *devF, void *devA, void *delta, int nb, int p, void *bn, int blk_per_box, int last_box_threads, int points_per_box, int knn)
{
// bn is flattened box neighbor matrix kxk
// k is number of boxes, gridDim.x is equal to k
//extern __shared__ float3 shPosition[];
//extern __shared__ float3 shDensity[];
extern __shared__ double3 shPosDen[];
double3 *globalX = (double3 *)devX;
double3 *globalF = (double3 *)devF; //SL density
double3 *globalA = (double3 *)devA;
double *globalDelta = (double *)delta; //reg parameter
double *globalBoxNbr = (double *)bn; //box neighbors k^2, 0 or 1 values
double3 myPosition;
double myDelta;
int i, tile;
double3 acc = {0.0, 0.0, 0.0};
int box_num_y,extra_blocks_y;
int extra_blocks,gtid,pid,box_num; //pid is particle id
//gtid = blockIdx.x * blockDim.x + threadIdx.x;
box_num = blockIdx.x/blk_per_box;
pid = box_num*points_per_box;
extra_blocks = blockIdx.x+1-box_num*blk_per_box;
pid += (extra_blocks-1)*blockDim.x;
pid += threadIdx.x;
gtid = pid;
//int gtid;
//gtid = blockIdx.x * blockDim.x + threadIdx.x;
if(!((blockIdx.x+1)%blk_per_box==0 && (threadIdx.x+1)>last_box_threads)){
myPosition = globalX[gtid];
myDelta = globalDelta[gtid];
}
for(i=0, tile=0; tile < gridDim.x; i+=p, tile++)
{
if (globalBoxNbr[(blockIdx.x/blk_per_box)*(gridDim.x/blk_per_box)+(tile/blk_per_box)] == 1){
if(!((tile+1)%blk_per_box==0 && (threadIdx.x+1)>last_box_threads)){
box_num_y = tile/blk_per_box;
int idx = box_num_y*points_per_box;
extra_blocks_y = tile+1-box_num_y*blk_per_box;
idx += (extra_blocks_y-1)*blockDim.x;
idx += threadIdx.x;
//int idx = tile*blockDim.x + threadIdx.x;
//shPosition[threadIdx.x] = globalX[idx];
//shDensity[threadIdx.x] = globalF[idx];
shPosDen[2*threadIdx.x+0] = globalX[idx];
shPosDen[2*threadIdx.x+1] = globalF[idx];
}
__syncthreads();
//acc = tile_calculation(myPosition, acc);
if(!((blockIdx.x+1)%blk_per_box==0 && (threadIdx.x+1)>last_box_threads)){
if(!((tile+1)%blk_per_box==0)){
#pragma unroll
for(unsigned int counter = 0; counter < blockDim.x; counter++)
{
acc = bodyBodyInteractionStokes(myPosition, shPosDen[2*counter+0], shPosDen[2*counter+1], acc, myDelta);
}
}
else{
#pragma unroll
for(unsigned int counter = 0; counter < last_box_threads; counter++)
{
acc = bodyBodyInteractionStokes(myPosition, shPosDen[2*counter+0], shPosDen[2*counter+1], acc, myDelta);
}
}
}
}
__syncthreads();
}
if(!((blockIdx.x+1)%blk_per_box==0 && (threadIdx.x+1)>last_box_threads)){
//save result in global memory
double3 acc3 = {1.0/(8*M_PI)*acc.x, 1.0/(8*M_PI)*acc.y, 1.0/(8*M_PI)*acc.z};
globalA[gtid] = acc3;
}
}
|
978d5045d7d259fd2f023fe6cf8695c670e1b2b9.hip | // !!! This is a file automatically generated by hipify!!!
#ifndef __HIPCC__
#define __HIPCC__
#endif
#include "CUDASolver.hpp"
#include "Discretization.cu"
#include "device_launch_parameters.h"
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#define BLK_SIZE 128
#define BLK_SIZE_2D 32
inline void gpuAssert(hipError_t code, const char *file, int line, bool abort = true) {
if (code != hipSuccess) {
fprintf(stderr, "GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
#define chk(ans) \
{ gpuAssert((ans), __FILE__, __LINE__); }
int get_num_blks(int size) { return (size + BLK_SIZE - 1) / BLK_SIZE; }
dim3 get_num_blks_2d(int size_x, int size_y) {
return dim3((size_x + BLK_SIZE_2D - 1) / BLK_SIZE_2D, (size_y + BLK_SIZE_2D - 1) / BLK_SIZE_2D);
}
template <typename T> inline void malloc_assign(T *dev_ptr, T val) {
chk(hipMalloc(&dev_ptr, sizeof(T)));
chk(hipMemcpy(dev_ptr, &val, 1, hipMemcpyHostToDevice));
}
__global__ void init(Real *a, Real val, int size) {
uint32_t i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < size) {
a[i] = val;
}
}
__global__ void saxpy(Real *a, Real *x, Real *y, int size) {
uint32_t i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < size) {
y[i] = *a * x[i] + y[i];
}
}
__global__ void smaxpy(Real *a, Real *x, Real *y, int size) {
uint32_t i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < size) {
y[i] = -(*a) * x[i] + y[i];
}
}
__global__ void saxpy2(Real *a, Real *x, Real *y, int size) {
uint32_t i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < size) {
y[i] = x[i] + *a * y[i];
}
}
__global__ void vec_dot_vec(Real *a, Real *b, Real *o, int size) {
__shared__ Real sdata[BLK_SIZE];
uint32_t tid = threadIdx.x;
uint32_t i = blockIdx.x * blockDim.x + threadIdx.x;
if (i == 0) {
*o = 0;
}
if (i < size) {
sdata[tid] = a[i] * b[i];
} else {
sdata[tid] = 0;
}
__syncthreads();
for (int s = blockDim.x / 2; s > 0; s >>= 1) {
if (tid < s) {
sdata[tid] = sdata[tid] + sdata[tid + s];
}
__syncthreads();
}
if (tid == 0) {
atomicAdd(o, sdata[0]);
}
}
// https://www.nvidia.com/docs/IO/66889/nvr-2008-004.pdf
__global__ void spmv_dia(Real *data, int *offsets, int num_rows, int num_cols, int num_diags, Real *x, Real *y) {
int row = blockDim.x * blockIdx.x + threadIdx.x;
if (row < num_rows) {
Real dot = 0;
y[row] = 0;
for (int n = 0; n < num_diags; n++) {
int col = row + offsets[n];
Real val = data[num_rows * n + row];
if (col >= 0 && col < num_cols) dot += val * x[col];
}
y[row] += dot;
}
}
// See https://developer.download.nvidia.com/assets/cuda/files/reduction.pdf
__global__ void reduce_abs_max(Real *input, Real *output, int size) {
extern __shared__ Real sdata[];
uint32_t tid = threadIdx.x;
uint32_t i = blockIdx.x * blockDim.x * 2 + threadIdx.x;
Real curr_max = (i < size) ? real_abs(input[i]) : 0;
if (i + blockDim.x < size) {
curr_max = real_max(curr_max, real_abs(input[i + blockDim.x]));
}
sdata[tid] = curr_max;
__syncthreads();
for (uint32_t s = blockDim.x / 2; s > 0; s >>= 1) {
if (tid < s) {
sdata[tid] = real_max(sdata[tid], sdata[tid + s]);
}
__syncthreads();
}
if (tid == 0) {
output[blockIdx.x] = sdata[0];
}
}
__global__ void reduce_min(Real *input, Real *output, int size) {
extern __shared__ Real sdata[];
uint32_t tid = threadIdx.x;
uint32_t i = blockIdx.x * blockDim.x * 2 + threadIdx.x;
Real curr_max = (i < size) ? input[i] : 0;
if (i + blockDim.x < size) {
curr_max = real_min(curr_max, (input[i + blockDim.x]));
}
sdata[tid] = curr_max;
__syncthreads();
for (uint32_t s = blockDim.x / 2; s > 0; s >>= 1) {
if (tid < s) {
sdata[tid] = real_min(sdata[tid], sdata[tid + s]);
}
__syncthreads();
}
if (tid == 0) {
output[blockIdx.x] = sdata[0];
}
}
__global__ void reduce_max(Real *input, Real *output, int size) {
extern __shared__ Real sdata[];
uint32_t tid = threadIdx.x;
uint32_t i = blockIdx.x * blockDim.x * 2 + threadIdx.x;
Real curr_max = (i < size) ? input[i] : 0;
if (i + blockDim.x < size) {
curr_max = real_max(curr_max, (input[i + blockDim.x]));
}
sdata[tid] = curr_max;
__syncthreads();
for (uint32_t s = blockDim.x / 2; s > 0; s >>= 1) {
if (tid < s) {
sdata[tid] = real_max(sdata[tid], sdata[tid + s]);
}
__syncthreads();
}
if (tid == 0) {
output[blockIdx.x] = sdata[0];
}
}
__global__ void scalar_div(Real *num, Real *denom, Real *o) { *o = *num / *denom; }
__global__ void scalar_cpy(Real *dst, Real *src) { *dst = *src; }
void solve_pcg(Real *A, int *A_offsets, int num_diag, Real *x, Real *b, Real *q, Real *d, Real *r, Real *r_dot_r_old,
Real *r_dot_r, Real *z, Real *cg_beta, Real &delta_new, Real *cg_alpha, Real *d_dot_q, int precondition,
Real *M, int *M_offsets, int m_num_diag, uint32_t &it, uint32_t max_iter, Real eps, int vec_size) {
int num_blocks = get_num_blks(vec_size);
hipMemcpy(r, b, vec_size * sizeof(Real), hipMemcpyDeviceToDevice);
hipMemset(x, 0, vec_size * sizeof(Real));
if (precondition != -1) {
hipLaunchKernelGGL(( spmv_dia), dim3(num_blocks), dim3(BLK_SIZE), 0, 0, M, M_offsets, vec_size, vec_size, m_num_diag, r, d);
hipLaunchKernelGGL(( vec_dot_vec), dim3(num_blocks), dim3(BLK_SIZE), 0, 0, r, d, r_dot_r, vec_size);
} else {
hipMemcpy(d, b, vec_size * sizeof(Real), hipMemcpyDeviceToDevice);
hipLaunchKernelGGL(( vec_dot_vec), dim3(num_blocks), dim3(BLK_SIZE), 0, 0, r, r, r_dot_r, vec_size);
}
hipMemcpy(&delta_new, r_dot_r, sizeof(Real), hipMemcpyDeviceToHost);
Real cond = delta_new * eps * eps;
it = 0;
while (it < max_iter && delta_new > cond) {
// q <- A * d
hipLaunchKernelGGL(( spmv_dia), dim3(num_blocks), dim3(BLK_SIZE), 0, 0, A, A_offsets, vec_size, vec_size, num_diag, d, q);
hipLaunchKernelGGL(( vec_dot_vec), dim3(num_blocks), dim3(BLK_SIZE), 0, 0, d, q, d_dot_q, vec_size);
// cg_alpha <- r_dot_r / d_dot_q
hipLaunchKernelGGL(( scalar_div), dim3(1), dim3(1), 0, 0, r_dot_r, d_dot_q, cg_alpha);
// x <- x - cg_alpha * d
hipLaunchKernelGGL(( smaxpy), dim3(num_blocks), dim3(BLK_SIZE), 0, 0, cg_alpha, d, x, vec_size);
// r <- r - cg_alpha * q
hipLaunchKernelGGL(( smaxpy), dim3(num_blocks), dim3(BLK_SIZE), 0, 0, cg_alpha, q, r, vec_size);
hipLaunchKernelGGL(( scalar_cpy), dim3(1), dim3(1), 0, 0, r_dot_r_old, r_dot_r);
if (precondition != -1) {
// z <- M * r
hipLaunchKernelGGL(( spmv_dia), dim3(num_blocks), dim3(BLK_SIZE), 0, 0, M, M_offsets, vec_size, vec_size, m_num_diag, r, z);
hipLaunchKernelGGL(( vec_dot_vec), dim3(num_blocks), dim3(BLK_SIZE), 0, 0, r, z, r_dot_r, vec_size);
} else {
hipLaunchKernelGGL(( vec_dot_vec), dim3(num_blocks), dim3(BLK_SIZE), 0, 0, r, r, r_dot_r, vec_size);
}
// cg_beta <- r_dot_r / r_dot_r_old
hipLaunchKernelGGL(( scalar_div), dim3(1), dim3(1), 0, 0, r_dot_r, r_dot_r_old, cg_beta);
if (precondition != -1) {
// d <- z + cg_beta * d
hipLaunchKernelGGL(( saxpy2), dim3(num_blocks), dim3(BLK_SIZE), 0, 0, cg_beta, z, d, vec_size);
} else {
// d <- r + cg_beta *d
hipLaunchKernelGGL(( saxpy2), dim3(num_blocks), dim3(BLK_SIZE), 0, 0, cg_beta, r, d, vec_size);
}
it++;
hipMemcpy(&delta_new, r_dot_r, sizeof(Real), hipMemcpyDeviceToHost);
}
}
void get_abs_max(Real *input, Real *res, Real &out, int size) {
int num_blks(get_num_blks(size));
int smemsize = min(BLK_SIZE, size);
hipLaunchKernelGGL(( reduce_abs_max), dim3(num_blks), dim3(BLK_SIZE), smemsize * sizeof(Real), 0, input, res, size);
while (num_blks != 1) {
size = (int)ceil(size / Real(BLK_SIZE));
smemsize = min(BLK_SIZE, size);
num_blks = get_num_blks(size);
hipLaunchKernelGGL(( reduce_abs_max), dim3(num_blks), dim3(BLK_SIZE), smemsize * sizeof(Real), 0, res, res, size);
}
hipMemcpy(&out, res, sizeof(Real), hipMemcpyDeviceToHost);
}
void solve_pcg2(Real *A, int *A_offsets, int num_diag, Real *x, Real *b, Real *q, Real *d, Real *r, Real *rho_old,
Real *rho, Real *z, Real *cg_beta, Real *residual, Real &delta_new, Real *cg_alpha, Real *d_dot_q,
int precondition, Real *M, int *M_offsets, int m_num_diag, uint32_t &it, uint32_t max_iter, Real eps,
int vec_size) {
int num_blocks = get_num_blks(vec_size);
Real residual_out;
hipMemcpy(r, b, vec_size * sizeof(Real), hipMemcpyDeviceToDevice);
hipMemset(x, 0, vec_size * sizeof(Real));
get_abs_max(r, residual, residual_out, vec_size);
if (residual_out == 0) {
it = 0;
return;
}
if (precondition != -1) {
hipLaunchKernelGGL(( spmv_dia), dim3(num_blocks), dim3(BLK_SIZE), 0, 0, M, M_offsets, vec_size, vec_size, m_num_diag, r, d);
hipLaunchKernelGGL(( vec_dot_vec), dim3(num_blocks), dim3(BLK_SIZE), 0, 0, r, d, rho, vec_size);
} else {
hipMemcpy(d, b, vec_size * sizeof(Real), hipMemcpyDeviceToDevice);
hipLaunchKernelGGL(( vec_dot_vec), dim3(num_blocks), dim3(BLK_SIZE), 0, 0, r, r, rho, vec_size);
}
Real residual_0 = residual_out;
it = 0;
while (it < max_iter) {
// q <- A * d
hipMemset(q, 0, vec_size * sizeof(Real));
hipLaunchKernelGGL(( spmv_dia), dim3(num_blocks), dim3(BLK_SIZE), 0, 0, A, A_offsets, vec_size, vec_size, num_diag, d, q);
hipLaunchKernelGGL(( vec_dot_vec), dim3(num_blocks), dim3(BLK_SIZE), 0, 0, d, q, d_dot_q, vec_size);
// cg_alpha <- rho / d_dot_q
hipLaunchKernelGGL(( scalar_div), dim3(1), dim3(1), 0, 0, rho, d_dot_q, cg_alpha);
// x <- x - cg_alpha * d
hipLaunchKernelGGL(( smaxpy), dim3(num_blocks), dim3(BLK_SIZE), 0, 0, cg_alpha, d, x, vec_size);
// r <- r - cg_alpha * q
hipLaunchKernelGGL(( smaxpy), dim3(num_blocks), dim3(BLK_SIZE), 0, 0, cg_alpha, q, r, vec_size);
get_abs_max(r, residual, residual_out, vec_size);
if (residual_out <= eps) {
break;
}
hipLaunchKernelGGL(( scalar_cpy), dim3(1), dim3(1), 0, 0, rho_old, rho);
if (precondition != -1) {
// z <- M * r
hipLaunchKernelGGL(( spmv_dia), dim3(num_blocks), dim3(BLK_SIZE), 0, 0, M, M_offsets, vec_size, vec_size, m_num_diag, r, z);
hipLaunchKernelGGL(( vec_dot_vec), dim3(num_blocks), dim3(BLK_SIZE), 0, 0, r, z, rho, vec_size);
} else {
hipLaunchKernelGGL(( vec_dot_vec), dim3(num_blocks), dim3(BLK_SIZE), 0, 0, r, r, rho, vec_size);
}
// cg_beta <- rho / rho_old
hipLaunchKernelGGL(( scalar_div), dim3(1), dim3(1), 0, 0, rho, rho_old, cg_beta);
if (precondition != -1) {
// d <- z + cg_beta * d
hipLaunchKernelGGL(( saxpy2), dim3(num_blocks), dim3(BLK_SIZE), 0, 0, cg_beta, z, d, vec_size);
} else {
// d <- r + cg_beta *d
hipLaunchKernelGGL(( saxpy2), dim3(num_blocks), dim3(BLK_SIZE), 0, 0, cg_beta, r, d, vec_size);
}
it++;
}
delta_new = residual_out / residual_0;
}
__global__ void sor_iter(Real *P, Real *RS, Real coeff, int *cell_type, int imax, int jmax, Real omega, Real inv_dx,
Real inv_dy, int parity) {
uint32_t i = blockIdx.x * blockDim.x + threadIdx.x;
uint32_t j = blockIdx.y * blockDim.y + threadIdx.y;
int is_fluid = at(cell_type, i, j);
if (i >= imax || j >= jmax || is_fluid == 0) {
return;
}
if (parity == 0 && ((i + j) % 2) == 0) {
return;
} else if (parity == 1 && ((i + j) % 2) == 1) {
return;
}
Real p_stencil[4] = {at(P, i + 1, j), at(P, i - 1, j), at(P, i, j + 1), at(P, i, j - 1)};
at(P, i, j) = (1 - omega) * at(P, i, j) + coeff * (sor_helper(p_stencil, inv_dx, inv_dy) - at(RS, i, j));
}
__global__ void calc_residual(Real *P, Real *RS, int *cell_type, int imax, int jmax, Real inv_dx, Real inv_dy,
Real *residual) {
uint32_t i = blockIdx.x * blockDim.x + threadIdx.x;
uint32_t j = blockIdx.y * blockDim.y + threadIdx.y;
int is_fluid = at(cell_type, i, j);
if (i >= imax || j >= jmax || is_fluid == 0) {
return;
}
Real rloc = 0;
Real p_laplacian[5] = {at(P, i + 1, j), at(P, i, j), at(P, i - 1, j), at(P, i, j + 1), at(P, i, j - 1)};
Real val = laplacian_5(p_laplacian, inv_dx, inv_dy) - at(RS, i, j);
rloc += val * val;
at(residual, i, j) = rloc;
}
__global__ void reduce_residual(Real *residual, Real *o, int size) {
__shared__ Real sdata[BLK_SIZE];
uint32_t tid = threadIdx.x;
uint32_t i = blockIdx.x * blockDim.x + threadIdx.x;
if (i == 0) {
*o = 0;
}
if (i < size) {
sdata[tid] = residual[i];
} else {
sdata[tid] = 0;
}
__syncthreads();
for (int s = blockDim.x / 2; s > 0; s >>= 1) {
if (tid < s) {
sdata[tid] = sdata[tid] + sdata[tid + s];
}
__syncthreads();
}
if (tid == 0) {
atomicAdd(o, sdata[0]);
}
}
__global__ void negate_p(Real *p, int size) {
uint32_t i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < size) {
p[i] = -p[i];
}
}
__device__ Real vel_kernel(Real fg, Real dt, Real p[2], Real inv_dxy) { return fg - dt * inv_dxy * (p[1] - p[0]); }
__global__ void calc_vel(Real *u, Real *v, Real *p, Real *f, Real *g, int *cell_type, Real dt, int imax, int jmax,
Real dx, Real dy) {
uint32_t i = blockIdx.x * blockDim.x + threadIdx.x;
uint32_t j = blockIdx.y * blockDim.y + threadIdx.y;
int is_fluid = at(cell_type, i, j);
if (i >= imax || j >= jmax || is_fluid == 0) {
return;
}
Real inv_dx = 1 / dx;
Real inv_dy = 1 / dy;
Real p_diff_u[2] = {at(p, i, j), at(p, i + 1, j)};
Real p_diff_v[2] = {at(p, i, j), at(p, i, j + 1)};
/* at(u, i, j) = vel_kernel(at(f, i, j), *dt, p_diff_u, inv_dx);
at(v, i, j) = vel_kernel(at(g, i, j), *dt, p_diff_v, inv_dy);*/
at(u, i, j) = at(f, i, j) - dt * inv_dx * (p_diff_u[1] - p_diff_u[0]);
at(v, i, j) = at(g, i, j) - dt * inv_dy * (p_diff_v[1] - p_diff_v[0]);
}
__global__ void enforce_boundary(Real *u, int *row_start, int *col_idx, Real *mat, Real *rhs_vec, int size) {
uint32_t row = blockIdx.x * blockDim.x + threadIdx.x;
if (row < size) {
Real sum = 0;
for (int j = row_start[row]; j < row_start[row + 1]; j++) {
sum += mat[j] * u[col_idx[j]];
}
u[row] = sum + 2 * rhs_vec[row];
}
}
void uv_boundary(Real *u, Real *v, int *row_start_u, int *row_start_v, int *col_idx_u, int *col_idx_v, Real *mat_u,
Real *mat_v, Real *rhs_vec_u, Real *rhs_vec_v, int size) {
int num_blks(get_num_blks(size));
hipLaunchKernelGGL(( enforce_boundary), dim3(num_blks), dim3(BLK_SIZE), 0, 0, u, row_start_u, col_idx_u, mat_u, rhs_vec_u, size);
hipLaunchKernelGGL(( enforce_boundary), dim3(num_blks), dim3(BLK_SIZE), 0, 0, v, row_start_v, col_idx_v, mat_v, rhs_vec_v, size);
}
void t_boundary(Real *t, int *row_start_t, int *col_idx_t, Real *mat_t, Real *rhs_vec_t, int size) {
int num_blks(get_num_blks(size));
hipLaunchKernelGGL(( enforce_boundary), dim3(num_blks), dim3(BLK_SIZE), 0, 0, t, row_start_t, col_idx_t, mat_t, rhs_vec_t, size);
}
__global__ void calc_t(Real *u, Real *v, Real dx, Real dy, Real *t_new, Real *t_old, int *cell_type, Real alpha,
Real dt, Real gamma, int imax, int jmax) {
uint32_t i = blockIdx.x * blockDim.x + threadIdx.x;
uint32_t j = blockIdx.y * blockDim.y + threadIdx.y;
int is_fluid = at(cell_type, i, j);
if (i >= imax || j >= jmax || is_fluid == 0) {
return;
}
Real inv_dx = 1 / dx;
Real inv_dy = 1 / dy;
Real inv_dx2 = inv_dx * inv_dx;
Real inv_dy2 = inv_dy * inv_dy;
Real u_stencil[2] = {at(u, i - 1, j), at(u, i, j)};
Real v_stencil[2] = {at(v, i, j - 1), at(v, i, j)};
Real t_laplacian[5] = {at(t_old, i + 1, j), at(t_old, i, j), at(t_old, i - 1, j), at(t_old, i, j + 1),
at(t_old, i, j - 1)};
at(t_new, i, j) = at(t_new, i, j) + dt * (alpha * laplacian_5(t_laplacian, inv_dx, inv_dy) -
convection_uT(u_stencil, t_laplacian, inv_dx, gamma) -
convection_vT(v_stencil, t_laplacian, inv_dy, gamma));
}
__device__ Real calculate_f1_sst(Real omega, Real dk_di, Real dw_di, Real k, Real dist, Real nu) {
Real cd_kw = real_max(2 * 0.856 * 1 / omega * dk_di * dw_di, 1e-10);
Real f1 =
real_tanh(real_pow(real_min(real_max(real_sqrt(k) / (0.09 * omega * dist), 500 * nu / (dist * dist * omega)),
4 * 0.856 * k / (cd_kw * dist * dist)),
4));
return f1;
}
__device__ Real calculate_f2_sst(Real omega, Real k, Real dist, Real nu) {
Real max_sqr = real_max(2 * real_sqrt(k) / (0.09 * omega * dist), 500 * nu / (dist * dist * omega));
return real_tanh(max_sqr * max_sqr);
}
__device__ Real calculate_sst_term(Real K[3], Real EPS[3], Real kij, Real eij, Real dist, Real inv_dx, Real inv_dy,
Real nu) {
Real dk_dx = (K[0] - K[1]) * inv_dx;
Real dw_dx = (EPS[0] - EPS[1]) * inv_dx;
Real dk_dy = (K[0] - K[2]) * inv_dy;
Real dw_dy = (EPS[0] - EPS[2]) * inv_dy;
Real f1_x = calculate_f1_sst(eij, dk_dx, dw_dx, kij, dist, nu);
Real f1_y = calculate_f1_sst(eij, dk_dy, dw_dy, kij, dist, nu);
Real res_x = 2 * (1 - f1_x) * 0.856 * 1 / eij * dk_dx * dw_dx;
Real res_y = 2 * (1 - f1_y) * 0.856 * 1 / eij * dk_dy * dw_dy;
return res_x + res_y;
}
__global__ void calculate_nu_t(Real *NU_T, Real *K, Real *EPS, Real *dists, Real *S, int *cell_type, Real _nu, int imax,
int jmax, const int turb_model) {
uint32_t i = blockIdx.x * blockDim.x + threadIdx.x;
uint32_t j = blockIdx.y * blockDim.y + threadIdx.y;
int is_fluid = at(cell_type, i, j);
if (i >= imax || j >= jmax || is_fluid == 0) {
return;
}
Real kij = at(K, i, j);
Real epsij = at(EPS, i, j);
if (turb_model == 1) {
at(NU_T, i, j) = 0.09 * kij * kij / epsij + _nu;
} else if (turb_model == 2) {
at(NU_T, i, j) = kij / epsij + _nu;
} else if (turb_model == 3) {
const Real a1 = 5.0 / 9.0;
auto dist = at(dists, i, j);
auto f2 = calculate_f2_sst(epsij, kij, dist, _nu);
at(NU_T, i, j) = a1 * kij / (real_max(a1 * epsij, at(S, i, j) * f2)) + _nu;
}
}
__global__ void calculate_nu_ij(Real *NU_I, Real *NU_J, Real *K, Real *EPS, Real *dists, Real *S, int *cell_type,
Real _nu, int imax, int jmax, const int turb_model) {
uint32_t i = blockIdx.x * blockDim.x + threadIdx.x;
uint32_t j = blockIdx.y * blockDim.y + threadIdx.y;
int is_fluid = at(cell_type, i, j);
if (i >= imax || j >= jmax || is_fluid == 0) {
return;
}
Real num_i = (at(K, i, j) + at(K, i + 1, j)) / 2;
Real denom_i = (at(EPS, i, j) + at(EPS, i + 1, j)) / 2;
Real num_j = (at(K, i, j) + at(K, i, j + 1)) / 2;
Real denom_j = (at(EPS, i, j) + at(EPS, i, j + 1)) / 2;
if (turb_model == 1) {
at(NU_I, i, j) = 0.09 * num_i * num_i / denom_i;
at(NU_J, i, j) = 0.09 * num_j * num_j / denom_j;
} else if (turb_model == 2) {
at(NU_I, i, j) = 0.5 * num_i / denom_i;
at(NU_J, i, j) = 0.5 * num_j / denom_j;
} else if (turb_model == 3) {
constexpr Real a1 = 5.0 / 9.0;
auto dist = at(dists, i, j);
auto f2_1 = calculate_f2_sst(denom_i, num_i, dist, _nu);
auto f2_2 = calculate_f2_sst(denom_j, num_j, dist, _nu);
at(NU_I, i, j) = 0.85 * a1 * num_i / real_max(a1 * denom_i, at(S, i, j) * f2_1);
at(NU_J, i, j) = 0.5 * a1 * num_j / real_max(a1 * denom_j, at(S, i, j) * f2_2);
}
}
__global__ void calculate_k_and_epsilon(Real *K_old, Real *EPS_old, Real *K, Real *EPS, Real *NU_T, Real *NU_I,
Real *NU_J, Real *U, Real *V, int *cell_type, Real _nu, int imax, int jmax,
Real dt, Real inv_dx, Real inv_dy, int _turb_model, Real *S, Real *dists) {
uint32_t i = blockIdx.x * blockDim.x + threadIdx.x;
uint32_t j = blockIdx.y * blockDim.y + threadIdx.y;
int is_fluid = at(cell_type, i, j);
if (i >= imax || j >= jmax || is_fluid == 0) {
return;
}
Real f2_coeff = 1;
auto nut = at(NU_T, i, j);
auto kij = at(K_old, i, j);
auto eij = at(EPS_old, i, j);
Real K_stencil[5] = {at(K_old, i + 1, j), at(K_old, i, j), at(K_old, i - 1, j), at(K_old, i, j + 1),
at(K_old, i, j - 1)};
Real EPS_stencil[5] = {at(EPS_old, i + 1, j), at(EPS_old, i, j), at(EPS_old, i - 1, j), at(EPS_old, i, j + 1),
at(EPS_old, i, j - 1)};
Real U_diff[2] = {at(U, i - 1, j), at(U, i, j)};
Real V_diff[2] = {at(V, i, j - 1), at(V, i, j)};
Real NU_I_diff[2] = {at(NU_I, i, j), at(NU_I, i - 1, j)};
Real NU_J_diff[2] = {at(NU_J, i, j), at(NU_J, i, j - 1)};
Real U_stencil[6] = {at(U, i, j), at(U, i - 1, j), at(U, i, j + 1),
at(U, i - 1, j + 1), at(U, i, j - 1), at(U, i - 1, j - 1)};
Real V_stencil[6] = {at(V, i, j), at(V, i, j - 1), at(V, i + 1, j),
at(V, i + 1, j - 1), at(V, i - 1, j), at(V, i - 1, j - 1)};
auto k1_1 = convection_UKEPS(U_diff, K_stencil, inv_dx);
auto k1_2 = convection_VKEPS(V_diff, K_stencil, inv_dy);
auto e1_1 = convection_UKEPS(U_diff, EPS_stencil, inv_dx);
auto e1_2 = convection_VKEPS(V_diff, EPS_stencil, inv_dy);
auto k2 = laplacian_nu(K_stencil, NU_I_diff, NU_J_diff, inv_dx, inv_dy, _nu, 1);
auto e2 = laplacian_nu(EPS_stencil, NU_I_diff, NU_J_diff, inv_dx, inv_dy, _nu, _turb_model == 1 ? 1.3 : 1);
Real k3;
if (_turb_model != 3) {
k3 = nut * mean_strain_rate_squared(U_stencil, V_stencil, inv_dx, inv_dy);
} else {
k3 = nut * mean_strain_rate_squared_store_S(U_stencil, V_stencil, S, i, j, imax, jmax, inv_dx, inv_dy);
k3 = real_min(k3, 10 * 0.09 * kij * eij);
}
auto e3 = (_turb_model == 1 ? 1.44 : 5.0 / 9) * eij * k3 / kij;
auto e4 = _turb_model == 1 ? f2_coeff * 1.92 * eij * eij / kij : 3.0 / 40 * eij * eij;
Real eij_mul = _turb_model == 1 ? 1 : 0.09 * kij;
auto kij_new = kij + dt * (-(k1_1 + k1_2) + k2 + k3 - eij_mul * eij);
Real sst_term = 0;
if (_turb_model == 3) {
Real K_diff[3] = {at(K_old, i, j), at(K_old, i - 1, j), at(K_old, i, j - 1)};
Real EPS_diff[3] = {at(EPS_old, i, j), at(EPS_old, i - 1, j), at(EPS_old, i, j - 1)};
auto dist = at(dists, i, j);
sst_term = calculate_sst_term(K_diff, EPS_diff, kij, eij, dist, inv_dx, inv_dy, _nu);
}
auto epsij_new = eij + dt * (-(e1_1 + e1_2) + e2 + e3 - e4 + sst_term);
at(K, i, j) = kij_new;
at(EPS, i, j) = epsij_new;
}
__global__ void calc_fg(Real *f, Real *g, Real *u, Real *v, bool calc_temp, Real dx, Real dy, Real *t, int *cell_type,
Real dt, Real gamma, Real nu, Real beta, Real gx, Real gy, int imax, int jmax) {
uint32_t i = blockIdx.x * blockDim.x + threadIdx.x;
uint32_t j = blockIdx.y * blockDim.y + threadIdx.y;
int is_fluid = at(cell_type, i, j);
if (i >= imax || j >= jmax || is_fluid == 0) {
return;
}
Real inv_dx = 1 / dx;
Real inv_dy = 1 / dy;
Real inv_dx2 = inv_dx * inv_dx;
Real inv_dy2 = inv_dy * inv_dy;
// 5-point + 1 stencil for U and V
Real u_stencil[6] = {at(u, i + 1, j), at(u, i, j), at(u, i - 1, j),
at(u, i, j + 1), at(u, i, j - 1), at(u, i - 1, j + 1)};
Real v_stencil[6] = {at(v, i + 1, j), at(v, i, j), at(v, i - 1, j),
at(v, i, j + 1), at(v, i, j - 1), at(v, i + 1, j - 1)};
// Calculate fluxes
at(f, i, j) = at(u, i, j) + dt * (nu * laplacian(u_stencil, inv_dx, inv_dy) -
convection_u(u_stencil, v_stencil, inv_dx, inv_dy, gamma));
at(g, i, j) = at(v, i, j) + dt * (nu * laplacian(v_stencil, inv_dx, inv_dy) -
convection_v(u_stencil, v_stencil, inv_dx, inv_dy, gamma));
if (calc_temp) {
Real term1 = at(t, i, j) + at(t, i + 1, j);
Real term2 = at(t, i, j) + at(t, i, j + 1);
at(f, i, j) -= beta * dt / 2 * (term1)*gx;
at(g, i, j) -= beta * dt / 2 * (term2)*gy;
}
}
__global__ void calc_fg_turbulent(Real *f, Real *g, Real *u, Real *v, Real *NU_T, bool calc_temp, Real dx, Real dy,
Real *t, int *cell_type, Real dt, Real gamma, Real nu, Real beta, Real gx, Real gy,
int imax, int jmax) {
uint32_t i = blockIdx.x * blockDim.x + threadIdx.x;
uint32_t j = blockIdx.y * blockDim.y + threadIdx.y;
int is_fluid = at(cell_type, i, j);
if (i >= imax || j >= jmax || is_fluid == 0) {
return;
}
Real inv_dx = 1 / dx;
Real inv_dy = 1 / dy;
Real inv_dx2 = inv_dx * inv_dx;
Real inv_dy2 = inv_dy * inv_dy;
// 5-point + 1 stencil for U and V
Real u_stencil[6] = {at(u, i + 1, j), at(u, i, j), at(u, i - 1, j),
at(u, i, j + 1), at(u, i, j - 1), at(u, i - 1, j + 1)};
Real v_stencil[6] = {at(v, i + 1, j), at(v, i, j), at(v, i - 1, j),
at(v, i, j + 1), at(v, i, j - 1), at(v, i + 1, j - 1)};
Real nu_term1 = (at(NU_T, i, j) + at(NU_T, i + 1, j)) / 2;
Real nu_term2 = (at(NU_T, i, j) + at(NU_T, i, j + 1)) / 2;
// Calculate fluxes
at(f, i, j) = at(u, i, j) + dt * (nu_term1 * laplacian(u_stencil, inv_dx, inv_dy) -
convection_u(u_stencil, v_stencil, inv_dx, inv_dy, gamma));
at(g, i, j) = at(v, i, j) + dt * (nu_term2 * laplacian(v_stencil, inv_dx, inv_dy) -
convection_v(u_stencil, v_stencil, inv_dx, inv_dy, gamma));
if (calc_temp) {
Real term1 = at(t, i, j) + at(t, i + 1, j);
Real term2 = at(t, i, j) + at(t, i, j + 1);
at(f, i, j) -= beta * dt / 2 * (term1)*gx;
at(g, i, j) -= beta * dt / 2 * (term2)*gy;
}
}
__global__ void fg_boundary(Real *f, Real *g, Real *u, Real *v, int imax, int jmax, uint32_t *neighborhood,
int *cell_type) {
uint32_t i = blockIdx.x * blockDim.x + threadIdx.x;
uint32_t j = blockIdx.y * blockDim.y + threadIdx.y;
int is_fluid = at(cell_type, i, j);
if (i >= imax || j >= jmax || is_fluid == 1) {
return;
}
uint32_t neighbors = at(neighborhood, i, j) & 0xFF;
if ((neighbors & 0x1) == 1) {
at(f, i, j) = at(u, i, j);
}
if ((neighbors & 0x2) == 2) {
at(f, i - 1, j) = at(u, i - 1, j);
}
if ((neighbors & 0x4) == 4) {
at(g, i, j) = at(v, i, j);
}
if ((neighbors & 0x8) == 8) {
at(g, i, j - 1) = at(v, i, j - 1);
}
}
__global__ void p_boundary(Real *p, int imax, int jmax, uint32_t *neighborhood, int *cell_type, Real PI) {
uint32_t i = blockIdx.x * blockDim.x + threadIdx.x;
uint32_t j = blockIdx.y * blockDim.y + threadIdx.y;
int is_fluid = at(cell_type, i, j);
if (i >= imax || j >= jmax || is_fluid == 1) {
return;
}
uint32_t type = at(neighborhood, i, j) >> 8;
uint32_t neighbors = at(neighborhood, i, j) & 0xFF;
if (type == 0) { // Outlet
at(p, i, j) = PI;
} else {
int diag = 0;
if ((neighbors & 0x10) == 16) { // Right + top
diag = 1;
at(p, i, j) = (at(p, i + 1, j) + at(p, i, j + 1)) / 2;
}
if ((neighbors & 0x20) == 32) { // Right + bottom
diag = 1;
at(p, i, j) = (at(p, i + 1, j) + at(p, i, j - 1)) / 2;
}
if ((neighbors & 0x40) == 64) { // Left + top
diag = 1;
at(p, i, j) = (at(p, i - 1, j) + at(p, i, j + 1)) / 2;
}
if ((neighbors & 0x80) == 128) { // Left + bottom
diag = 1;
at(p, i, j) = (at(p, i - 1, j) + at(p, i, j - 1)) / 2;
}
if (!diag) {
if ((neighbors & 0x1) == 1) { // Right
at(p, i, j) = at(p, i + 1, j);
}
if ((neighbors & 0x2) == 2) { // Left
at(p, i, j) = at(p, i - 1, j);
}
if ((neighbors & 0x4) == 4) { // Top
at(p, i, j) = at(p, i, j + 1);
}
if ((neighbors & 0x8) == 8) { // Bottom
at(p, i, j) = at(p, i, j - 1);
}
}
}
}
__global__ void nu_t_boundary(Real *NU_T, Real *K, Real *EPS, int imax, int jmax, uint32_t *neighborhood,
int *cell_type, Real wk, Real weps, Real _nu, const int turb_model) {
uint32_t i = blockIdx.x * blockDim.x + threadIdx.x;
uint32_t j = blockIdx.y * blockDim.y + threadIdx.y;
int is_fluid = at(cell_type, i, j);
if (i >= imax || j >= jmax || is_fluid == 1) {
return;
}
uint32_t type = at(neighborhood, i, j) >> 8;
uint32_t neighbors = at(neighborhood, i, j) & 0xFF;
Real k = 0;
Real eps = 0;
if (type == 1) { // Inlet
int valid = 0;
if ((neighbors & 0x1) == 1) { // Right
k = 2 * wk - at(K, i + 1, j);
eps = 2 * weps - at(EPS, i + 1, j);
valid = 1;
}
if ((neighbors & 0x2) == 2) { // Left
k = 2 * wk - at(K, i - 1, j);
eps = 2 * weps - at(EPS, i - 1, j);
valid = 1;
}
if ((neighbors & 0x4) == 4) { // Top
k = 2 * wk - at(K, i, j + 1);
eps = 2 * weps - at(EPS, i, j + 1);
valid = 1;
}
if ((neighbors & 0x8) == 8) { // Bottom
k = 2 * wk - at(K, i, j - 1);
eps = 2 * weps - at(EPS, i, j - 1);
valid = 1;
}
if (valid) {
at(K, i, j) = k;
at(EPS, i, j) = eps;
// at(NU_T, i, j) = 0.09 * k * k / eps + _nu;
}
} else { // Other
int diag = 0;
int valid = 0;
if ((neighbors & 0x10) == 16) { // Right + top
diag = 1;
k = (at(K, i + 1, j) + at(K, i, j + 1)) / 2;
eps = (at(EPS, i + 1, j) + at(EPS, i, j + 1)) / 2;
}
if ((neighbors & 0x20) == 32) { // Right + bottom
diag = 1;
k = (at(K, i + 1, j) + at(K, i, j - 1)) / 2;
eps = (at(EPS, i + 1, j) + at(EPS, i, j - 1)) / 2;
}
if ((neighbors & 0x40) == 64) { // Left + top
diag = 1;
k = (at(K, i - 1, j) + at(K, i, j + 1)) / 2;
eps = (at(EPS, i - 1, j) + at(EPS, i, j + 1)) / 2;
}
if ((neighbors & 0x80) == 128) { // Left + bottom
diag = 1;
k = (at(K, i - 1, j) + at(K, i, j - 1)) / 2;
eps = (at(EPS, i - 1, j) + at(EPS, i, j - 1)) / 2;
}
if (!diag) {
if ((neighbors & 0x1) == 1) { // Right
k = at(K, i + 1, j);
eps = at(EPS, i + 1, j);
valid = 1;
}
if ((neighbors & 0x2) == 2) { // Left
k = at(K, i - 1, j);
eps = at(EPS, i - 1, j);
valid = 1;
}
if ((neighbors & 0x4) == 4) { // Top
k = at(K, i, j + 1);
eps = at(EPS, i, j + 1);
valid = 1;
}
if ((neighbors & 0x8) == 8) { // Bottom
k = at(K, i, j - 1);
eps = at(EPS, i, j - 1);
valid = 1;
}
}
if (diag || valid) {
at(K, i, j) = k;
at(EPS, i, j) = eps;
if (turb_model == 1) {
at(NU_T, i, j) = 0.09 * k * k / eps + _nu;
} else if (turb_model == 2 || turb_model == 3) {
at(NU_T, i, j) = k / eps + _nu;
}
}
}
}
void solve_sor(Real *P, Real *P_tmp, Real *P_residual, Real *P_residual_out, uint32_t *neighborhood, int imax, int jmax,
Real *RS, int *cell_type, uint32_t &it, uint32_t max_iter, Real dx, Real dy, Real PI, Real tolerance,
Real &res, int num_fluid_cells) {
it = 0;
const Real omega = 1.7;
auto grid_size = imax * jmax;
Real coeff = omega / (2 * (1 / (dx * dx) + 1 / (dy * dy)));
dim3 blk_size_2d(BLK_SIZE_2D, BLK_SIZE_2D);
dim3 num_blks_2d = get_num_blks_2d(imax, jmax);
int num_blks_1d(get_num_blks(grid_size));
Real inv_dx = 1 / dx;
Real inv_dy = 1 / dy;
res = REAL_MAX;
while (it < max_iter && res > tolerance) {
hipLaunchKernelGGL(( sor_iter), dim3(num_blks_2d), dim3(blk_size_2d), 0, 0, P, RS, coeff, cell_type, imax, jmax, omega, inv_dx, inv_dy, 0);
hipLaunchKernelGGL(( sor_iter), dim3(num_blks_2d), dim3(blk_size_2d), 0, 0, P, RS, coeff, cell_type, imax, jmax, omega, inv_dx, inv_dy, 1);
hipMemset(P_residual, 0, grid_size * sizeof(Real));
hipLaunchKernelGGL(( calc_residual), dim3(num_blks_2d), dim3(blk_size_2d), 0, 0, P, RS, cell_type, imax, jmax, inv_dx, inv_dy, P_residual);
hipLaunchKernelGGL(( reduce_residual), dim3(num_blks_1d), dim3(BLK_SIZE), 0, 0, P_residual, P_residual_out, grid_size);
hipLaunchKernelGGL(( p_boundary), dim3(num_blks_2d), dim3(blk_size_2d), 0, 0, P, imax, jmax, neighborhood, cell_type, PI);
hipMemcpy(&res, P_residual_out, sizeof(Real), hipMemcpyDeviceToHost);
res = std::sqrt(res / num_fluid_cells);
it++;
}
}
__global__ void calc_rs(Real *f, Real *g, Real *rs, Real dx, Real dy, int imax, int jmax, Real dt, int *cell_type) {
uint32_t i = blockIdx.x * blockDim.x + threadIdx.x;
uint32_t j = blockIdx.y * blockDim.y + threadIdx.y;
int is_fluid = at(cell_type, i, j);
if (i >= imax || j >= jmax || is_fluid == 0) {
return;
}
Real inv_dx = 1 / dx;
Real inv_dy = 1 / dy;
Real f_diff[2] = {at(f, i, j), at(f, i - 1, j)};
Real g_diff[2] = {at(g, i, j), at(g, i, j - 1)};
Real df = inv_dx * (f_diff[0] - f_diff[1]);
Real dg = inv_dy * (g_diff[0] - g_diff[1]);
at(rs, i, j) = (df + dg) * 1 / dt;
}
Real calculate_dt(int imax, int jmax, Real *u, Real *v, Real *u_residual, Real *v_residual, Real *nu_residual,
Real *k_residual, Real *eps_residual, Real *nu_t, Real *k, Real *eps, Real dx, Real dy, Real tau,
Real nu, Real alpha, bool calc_temp, int turb_model) {
// Calculate uv max
int size = imax * jmax;
int num_blks(get_num_blks(size));
Real u_max_abs = 0;
Real v_max_abs = 0;
Real nu_min = REAL_MAX;
Real k_max;
Real eps_max;
Real dx2 = dx * dx;
Real dy2 = dy * dy;
int smemsize = min(BLK_SIZE, size);
std::vector<Real> ucpu(imax * jmax);
std::vector<Real> vcpu(imax * jmax);
std::vector<Real> ures(imax * jmax);
std::vector<Real> vres(imax * jmax);
hipLaunchKernelGGL(( reduce_abs_max), dim3(num_blks), dim3(BLK_SIZE), smemsize * sizeof(Real), 0, u, u_residual, size);
hipLaunchKernelGGL(( reduce_abs_max), dim3(num_blks), dim3(BLK_SIZE), smemsize * sizeof(Real), 0, v, v_residual, size);
if (turb_model != 0) {
hipLaunchKernelGGL(( reduce_min), dim3(num_blks), dim3(BLK_SIZE), smemsize * sizeof(Real), 0, nu_t, nu_residual, size);
hipLaunchKernelGGL(( reduce_max), dim3(num_blks), dim3(BLK_SIZE), smemsize * sizeof(Real), 0, k, k_residual, size);
hipLaunchKernelGGL(( reduce_max), dim3(num_blks), dim3(BLK_SIZE), smemsize * sizeof(Real), 0, eps, eps_residual, size);
}
while (num_blks != 1) {
size = (int)ceil(size / Real(BLK_SIZE));
smemsize = min(BLK_SIZE, size);
num_blks = get_num_blks(size);
hipLaunchKernelGGL(( reduce_abs_max), dim3(num_blks), dim3(BLK_SIZE), smemsize * sizeof(Real), 0, u_residual, u_residual, size);
hipLaunchKernelGGL(( reduce_abs_max), dim3(num_blks), dim3(BLK_SIZE), smemsize * sizeof(Real), 0, v_residual, v_residual, size);
if (turb_model != 0) {
hipLaunchKernelGGL(( reduce_min), dim3(num_blks), dim3(BLK_SIZE), smemsize * sizeof(Real), 0, nu_residual, nu_residual, size);
hipLaunchKernelGGL(( reduce_max), dim3(num_blks), dim3(BLK_SIZE), smemsize * sizeof(Real), 0, k_residual, k_residual, size);
hipLaunchKernelGGL(( reduce_max), dim3(num_blks), dim3(BLK_SIZE), smemsize * sizeof(Real), 0, eps_residual, eps_residual, size);
}
}
hipMemcpy(&u_max_abs, u_residual, sizeof(Real), hipMemcpyDeviceToHost);
hipMemcpy(&v_max_abs, v_residual, sizeof(Real), hipMemcpyDeviceToHost);
if (turb_model != 0) {
hipMemcpy(&nu_min, nu_residual, sizeof(Real), hipMemcpyDeviceToHost);
hipMemcpy(&k_max, k_residual, sizeof(Real), hipMemcpyDeviceToHost);
hipMemcpy(&eps_max, eps_residual, sizeof(Real), hipMemcpyDeviceToHost);
}
Real min_cond = ::min(dx / u_max_abs, dy / v_max_abs);
nu_min = (nu_min == REAL_MAX || nu_min == 0) ? nu : nu_min;
if (nu_min != 0) {
Real cond_spatial = 1.0 / (2.0 * nu) * ((dx2 * dy2) / (dx2 + dy2));
min_cond = ::min(min_cond, cond_spatial);
}
if (calc_temp) {
Real inv_dx = 1 / dx;
Real inv_dx2 = inv_dx * inv_dx;
Real inv_dy = 1 / dy;
Real inv_dy2 = inv_dy * inv_dy;
Real cond_temp = 1 / (2 * alpha * (inv_dx2 + inv_dy2));
min_cond = ::min(min_cond, cond_temp);
}
if (turb_model != 0) {
Real cond_5 = 1 / (2 * k_max * (1 / dx2 + 1 / dy2));
Real cond_6;
if (turb_model == 1) {
cond_6 = 1 / (2 * eps_max * (1 / dx2 + 1 / dy2));
} else if (turb_model == 2 || turb_model == 3) {
cond_6 = 1 / (2 * (eps_max * 0.09 * k_max) * (1 / dx2 + 1 / dy2));
}
min_cond = ::min(min_cond, cond_5);
min_cond = ::min(min_cond, cond_6);
}
return tau * min_cond;
}
void CudaSolver::initialize() {
auto grid_x = _grid.imaxb();
auto grid_y = _grid.jmaxb();
auto grid_size = grid_x * grid_y;
build_pcg_matrix(_field, _grid, _boundaries, A_pcg, U_pcg, V_pcg, T_pcg, U_RHS, V_RHS, T_RHS, U_fixed, V_fixed,
T_fixed);
// Preprocess
std::vector<int> is_fluid(_grid.imaxb() * _grid.jmaxb(), 0);
for (const auto ¤t_cell : _grid.fluid_cells()) {
int i = current_cell->i();
int j = current_cell->j();
is_fluid[_grid.imaxb() * j + i] = 1;
}
std::vector<BoundaryData> neighbors(_grid.imaxb() * _grid.jmaxb());
for (const auto &boundary : _boundaries) {
uint32_t type = boundary->get_type();
auto cells = boundary->_cells;
for (auto &cell : *cells) {
int i = cell->i();
int j = cell->j();
BoundaryData data;
uint32_t type = boundary->get_type();
data.neighborhood |= type << 8;
// data.idx = _grid.imaxb() * j + i;
if (cell->is_border(border_position::RIGHT)) {
data.neighborhood |= 1;
}
if (cell->is_border(border_position::LEFT)) {
data.neighborhood |= 2;
}
if (cell->is_border(border_position::TOP)) {
data.neighborhood |= 4;
}
if (cell->is_border(border_position::BOTTOM)) {
data.neighborhood |= 8;
}
if (cell->is_border(border_position::RIGHT) && cell->is_border(border_position::TOP)) {
data.neighborhood |= 16;
}
if (cell->is_border(border_position::RIGHT) && cell->is_border(border_position::BOTTOM)) {
data.neighborhood |= 32;
}
if (cell->is_border(border_position::LEFT) && cell->is_border(border_position::TOP)) {
data.neighborhood |= 64;
}
if (cell->is_border(border_position::LEFT) && cell->is_border(border_position::BOTTOM)) {
data.neighborhood |= 128;
}
neighbors[j * _grid.imaxb() + i] = data;
}
}
DiagonalSparseMatrix<Real> A_matrix_diag =
create_diagonal_matrix(A_pcg, _grid.imaxb(), _grid.jmaxb(), {-_grid.imaxb(), -1, 0, 1, _grid.imaxb()});
DiagonalSparseMatrix<Real> A_precond_diag;
if (_preconditioner != -1) {
A_precond_diag = create_preconditioner_spai(A_pcg, _grid, _preconditioner);
}
std::vector<Real> dists_vec;
if (_turb_model == 3) {
dists_vec.resize(grid_size);
for (int i = 0; i < grid_size; i++) {
dists_vec[i] = _grid._cells._container[i].closest_dist;
}
}
num_offsets_a = (int)A_matrix_diag.offsets.size();
num_offsets_m = (int)A_precond_diag.offsets.size();
auto t_matrix_data = T_fixed.value.data();
auto t_matrix_size = T_fixed.value.size();
auto t_row_start_data = T_fixed.rowstart.data();
auto t_row_start_size = T_fixed.rowstart.size();
auto t_col_idx_data = T_fixed.colindex.data();
auto t_col_idx_size = T_fixed.colindex.size();
auto t_rhs_data = T_RHS.data();
auto t_rhs_size = T_RHS.size();
auto u_matrix_data = U_fixed.value.data();
auto u_matrix_size = U_fixed.value.size();
auto v_matrix_data = V_fixed.value.data();
auto v_matrix_size = V_fixed.value.size();
auto u_row_start_data = U_fixed.rowstart.data();
auto u_row_start_size = U_fixed.rowstart.size();
auto u_col_idx_data = U_fixed.colindex.data();
auto u_col_idx_size = U_fixed.colindex.size();
auto v_row_start_data = V_fixed.rowstart.data();
auto v_row_start_size = V_fixed.rowstart.size();
auto v_col_idx_data = V_fixed.colindex.data();
auto v_col_idx_size = V_fixed.colindex.size();
auto u_rhs_data = U_RHS.data();
auto u_rhs_size = U_RHS.size();
auto v_rhs_data = V_RHS.data();
auto v_rhs_size = V_RHS.size();
hipMalloc(&U, grid_size * sizeof(Real));
hipMalloc(&V, grid_size * sizeof(Real));
hipMalloc(&F, grid_size * sizeof(Real));
hipMalloc(&G, grid_size * sizeof(Real));
hipMalloc(&P, grid_size * sizeof(Real));
hipMalloc(&P_temp, grid_size * sizeof(Real));
hipMalloc(&RS, grid_size * sizeof(Real));
hipMalloc(&U_residual, grid_size * sizeof(Real));
hipMalloc(&V_residual, grid_size * sizeof(Real));
if (_turb_model != 0) {
hipMalloc(&NU_residual, grid_size * sizeof(Real));
hipMalloc(&K_residual, grid_size * sizeof(Real));
hipMalloc(&EPS_residual, grid_size * sizeof(Real));
hipMalloc(&NU_T, grid_size * sizeof(Real));
hipMalloc(&NU_I, grid_size * sizeof(Real));
hipMalloc(&NU_J, grid_size * sizeof(Real));
hipMalloc(&K, grid_size * sizeof(Real));
hipMalloc(&K_old, grid_size * sizeof(Real));
hipMalloc(&EPS, grid_size * sizeof(Real));
hipMalloc(&EPS_old, grid_size * sizeof(Real));
hipMemset(NU_residual, 0, grid_size * sizeof(Real));
hipMemset(K_residual, 0, grid_size * sizeof(Real));
hipMemset(EPS_residual, 0, grid_size * sizeof(Real));
hipMemset(NU_T, 0, grid_size * sizeof(Real));
hipMemset(NU_I, 0, grid_size * sizeof(Real));
hipMemset(NU_J, 0, grid_size * sizeof(Real));
hipMemcpy(K, _field._K._container.data(), grid_size * sizeof(Real), hipMemcpyHostToDevice);
hipMemcpy(EPS, _field._EPS._container.data(), grid_size * sizeof(Real), hipMemcpyHostToDevice);
if (_turb_model == 3) {
hipMalloc(&dists, grid_size * sizeof(Real));
hipMalloc(&S, grid_size * sizeof(Real));
hipMemcpy(dists, dists_vec.data(), grid_size * sizeof(Real), hipMemcpyHostToDevice);
}
}
hipMalloc(&P_residual, grid_size * sizeof(Real));
hipMalloc(&cell_type, grid_size * sizeof(int));
hipMalloc(&row_start_u, u_row_start_size * sizeof(int));
hipMalloc(&row_start_v, v_row_start_size * sizeof(int));
hipMalloc(&row_start_t, t_row_start_size * sizeof(int));
hipMalloc(&col_idx_u, u_col_idx_size * sizeof(int));
hipMalloc(&col_idx_v, v_col_idx_size * sizeof(int));
hipMalloc(&mat_u, u_matrix_size * sizeof(Real));
hipMalloc(&mat_v, v_matrix_size * sizeof(Real));
hipMalloc(&rhs_vec_u, u_rhs_size * sizeof(Real));
hipMalloc(&rhs_vec_v, v_rhs_size * sizeof(Real));
hipMalloc(&neighborhood, neighbors.size() * sizeof(uint32_t));
hipMalloc(&A, A_matrix_diag.data.size() * sizeof(Real));
hipMalloc(&A_offsets, A_matrix_diag.offsets.size() * sizeof(uint32_t));
if (_preconditioner != -1) {
hipMalloc(&M, A_precond_diag.data.size() * sizeof(Real));
hipMalloc(&M_offsets, A_precond_diag.offsets.size() * sizeof(uint32_t));
}
hipMalloc(&q, grid_size * sizeof(Real));
hipMalloc(&d, grid_size * sizeof(Real));
hipMalloc(&r, grid_size * sizeof(Real));
hipMalloc(&z, grid_size * sizeof(Real));
hipMalloc(&r_dot_r, sizeof(Real));
hipMalloc(&r_dot_r_old, sizeof(Real));
hipMalloc(&d_dot_q, sizeof(Real));
hipMalloc(&p_residual_out, sizeof(Real));
hipMalloc(&cg_alpha, sizeof(Real));
hipMalloc(&cg_beta, sizeof(Real));
chk(hipMemcpy(U, _field._U._container.data(), grid_size * sizeof(Real), hipMemcpyHostToDevice));
chk(hipMemcpy(V, _field._V._container.data(), grid_size * sizeof(Real), hipMemcpyHostToDevice));
chk(hipMemcpy(P, _field._P._container.data(), grid_size * sizeof(Real), hipMemcpyHostToDevice));
chk(hipMemcpy(mat_u, u_matrix_data, u_matrix_size * sizeof(Real), hipMemcpyHostToDevice));
chk(hipMemcpy(row_start_u, u_row_start_data, u_row_start_size * sizeof(int), hipMemcpyHostToDevice));
chk(hipMemcpy(col_idx_u, u_col_idx_data, u_col_idx_size * sizeof(int), hipMemcpyHostToDevice));
chk(hipMemcpy(rhs_vec_u, u_rhs_data, u_rhs_size * sizeof(Real), hipMemcpyHostToDevice));
chk(hipMemcpy(mat_v, v_matrix_data, v_matrix_size * sizeof(Real), hipMemcpyHostToDevice));
chk(hipMemcpy(row_start_v, v_row_start_data, v_row_start_size * sizeof(int), hipMemcpyHostToDevice));
chk(hipMemcpy(col_idx_v, v_col_idx_data, v_col_idx_size * sizeof(int), hipMemcpyHostToDevice));
chk(hipMemcpy(rhs_vec_v, v_rhs_data, v_rhs_size * sizeof(Real), hipMemcpyHostToDevice));
chk(hipMemcpy(neighborhood, neighbors.data(), neighbors.size() * sizeof(uint32_t), hipMemcpyHostToDevice));
chk(hipMemcpy(A, A_matrix_diag.data.data(), A_matrix_diag.data.size() * sizeof(Real), hipMemcpyHostToDevice));
chk(hipMemcpy(A_offsets, A_matrix_diag.offsets.data(), A_matrix_diag.offsets.size() * sizeof(int),
hipMemcpyHostToDevice));
if (_field.calc_temp) {
hipMalloc(&T, grid_size * sizeof(Real));
hipMalloc(&T_temp, grid_size * sizeof(Real));
hipMalloc(&mat_t, t_matrix_size * sizeof(Real));
hipMalloc(&rhs_vec_t, t_rhs_size * sizeof(Real));
hipMalloc(&col_idx_t, t_col_idx_size * sizeof(int));
hipMemcpy(mat_t, t_matrix_data, t_matrix_size * sizeof(Real), hipMemcpyHostToDevice);
hipMemcpy(row_start_t, t_row_start_data, t_row_start_size * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(col_idx_t, t_col_idx_data, t_col_idx_size * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(rhs_vec_t, t_rhs_data, t_rhs_size * sizeof(Real), hipMemcpyHostToDevice);
chk(hipMemcpy(T, _field._T._container.data(), grid_size * sizeof(Real), hipMemcpyHostToDevice));
}
if (_preconditioner != -1) {
chk(hipMemcpy(M, A_precond_diag.data.data(), A_precond_diag.data.size() * sizeof(Real),
hipMemcpyHostToDevice));
chk(hipMemcpy(M_offsets, A_precond_diag.offsets.data(), A_precond_diag.offsets.size() * sizeof(int),
hipMemcpyHostToDevice));
}
chk(hipMemcpy(cell_type, is_fluid.data(), is_fluid.size() * sizeof(int), hipMemcpyHostToDevice));
}
void CudaSolver::solve_pre_pressure(Real &dt) {
auto grid_x = _grid.imaxb();
auto grid_y = _grid.jmaxb();
auto grid_size = grid_x * grid_y;
dim3 num_blks_1d(get_num_blks(grid_size));
dim3 blk_size_2d(BLK_SIZE_2D, BLK_SIZE_2D);
dim3 num_blks_2d = get_num_blks_2d(grid_x, grid_y);
dt = calculate_dt(_grid.imaxb(), _grid.jmaxb(), U, V, U_residual, V_residual, NU_residual, K_residual, EPS_residual,
NU_T, K, EPS, _grid.dx(), _grid.dy(), _field._tau, _field._nu, _field._alpha, _field.calc_temp,
_turb_model);
_field._dt = dt;
uv_boundary(U, V, row_start_u, row_start_v, col_idx_u, col_idx_v, mat_u, mat_v, rhs_vec_u, rhs_vec_v, grid_size);
if (_field.calc_temp) {
t_boundary(T, row_start_t, col_idx_t, mat_t, rhs_vec_t, grid_size);
chk(hipMemcpy(T_temp, T, grid_size * sizeof(Real), hipMemcpyDeviceToDevice));
hipLaunchKernelGGL(( calc_t), dim3(num_blks_2d), dim3(blk_size_2d), 0, 0, U, V, _grid.dx(), _grid.dy(), T, T_temp, cell_type, _field._alpha, dt,
_discretization._gamma, _grid.imaxb(), _grid.jmaxb());
}
std::vector<Real> fcpu(grid_size);
std::vector<Real> gcpu(grid_size);
if (_turb_model != 0) {
hipLaunchKernelGGL(( calc_fg_turbulent), dim3(num_blks_2d), dim3(blk_size_2d), 0, 0, F, G, U, V, NU_T, _field.calc_temp, _grid.dx(), _grid.dy(), T,
cell_type, dt, _discretization._gamma, _field._nu, _field._beta,
_field._gx, _field._gy, grid_x, grid_y);
} else {
hipLaunchKernelGGL(( calc_fg), dim3(num_blks_2d), dim3(blk_size_2d), 0, 0, F, G, U, V, _field.calc_temp, _grid.dx(), _grid.dy(), T, cell_type, dt,
_discretization._gamma, _field._nu, _field._beta, _field._gx, _field._gy,
grid_x, grid_y);
}
hipLaunchKernelGGL(( fg_boundary), dim3(num_blks_2d), dim3(blk_size_2d), 0, 0, F, G, U, V, grid_x, grid_y, neighborhood, cell_type);
hipLaunchKernelGGL(( calc_rs), dim3(num_blks_2d), dim3(blk_size_2d), 0, 0, F, G, RS, _grid.dx(), _grid.dy(), grid_x, grid_y, dt, cell_type);
}
void CudaSolver::solve_pressure(Real &res, uint32_t &it) {
if (solver_type == SolverType::PCG) {
auto grid_x = _grid.imaxb();
auto grid_y = _grid.jmaxb();
auto grid_size = grid_x * grid_y;
int num_blks(get_num_blks(grid_size));
constexpr int PCG_MODE = 0;
if (PCG_MODE == 0) {
solve_pcg(A, A_offsets, num_offsets_a, P, RS, q, d, r, r_dot_r_old, r_dot_r, z, cg_beta, res, cg_alpha,
d_dot_q, _preconditioner, M, M_offsets, num_offsets_m, it, _max_iter, _tolerance,
_grid.imaxb() * _grid.jmaxb());
} else {
solve_pcg2(A, A_offsets, num_offsets_a, P, RS, q, d, r, r_dot_r_old, r_dot_r, z, cg_beta, U_residual, res,
cg_alpha, d_dot_q, _preconditioner, M, M_offsets, num_offsets_m, it, _max_iter, _tolerance,
_grid.imaxb() * _grid.jmaxb());
}
} else if (solver_type == SolverType::SOR) {
solve_sor(P, P_temp, P_residual, p_residual_out, neighborhood, _grid.imaxb(), _grid.jmaxb(), RS, cell_type, it,
_max_iter, _grid.dx(), _grid.dy(), _field._PI, _tolerance, res, (int)_grid.fluid_cells().size());
}
}
void CudaSolver::solve_post_pressure() {
auto grid_x = _grid.imaxb();
auto grid_y = _grid.jmaxb();
auto grid_size = grid_x * grid_y;
int num_blks(get_num_blks(grid_size));
dim3 blk_size_2d(BLK_SIZE_2D, BLK_SIZE_2D);
dim3 num_blks_2d = get_num_blks_2d(grid_x, grid_y);
hipLaunchKernelGGL(( calc_vel), dim3(num_blks_2d), dim3(blk_size_2d), 0, 0, U, V, P, F, G, cell_type, _field._dt, grid_x, grid_y, _grid.dx(),
_grid.dy());
if (_turb_model != 0) {
chk(hipMemcpy(K_old, K, grid_size * sizeof(Real), hipMemcpyDeviceToDevice));
chk(hipMemcpy(EPS_old, EPS, grid_size * sizeof(Real), hipMemcpyDeviceToDevice));
hipLaunchKernelGGL(( calculate_nu_t), dim3(num_blks_2d), dim3(blk_size_2d), 0, 0, NU_T, K, EPS, dists, S, cell_type, _field._nu, grid_x, grid_y,
_turb_model);
hipLaunchKernelGGL(( calculate_nu_ij), dim3(num_blks_2d), dim3(blk_size_2d), 0, 0, NU_I, NU_J, K, EPS, dists, S, cell_type, _field._nu, grid_x,
grid_y, _turb_model);
hipLaunchKernelGGL(( calculate_k_and_epsilon), dim3(num_blks_2d), dim3(blk_size_2d), 0, 0, K_old, EPS_old, K, EPS, NU_T, NU_I, NU_J, U, V, cell_type,
_field._nu, grid_x, grid_y, _field._dt, 1 / _grid.dx(),
1 / _grid.dy(), _turb_model, S, dists);
// TODO : Implement KIN and EPSIN
hipLaunchKernelGGL(( nu_t_boundary), dim3(num_blks_2d), dim3(blk_size_2d), 0, 0, NU_T, K, EPS, grid_x, grid_y, neighborhood, cell_type, _KIN, _EPSIN,
_field._nu, _turb_model);
}
if (_should_out) {
chk(hipMemcpy(_field._U._container.data(), U, grid_size * sizeof(Real), hipMemcpyDeviceToHost));
chk(hipMemcpy(_field._V._container.data(), V, grid_size * sizeof(Real), hipMemcpyDeviceToHost));
chk(hipMemcpy(_field._P._container.data(), P, grid_size * sizeof(Real), hipMemcpyDeviceToHost));
if (_field.calc_temp) {
chk(hipMemcpy(_field._T._container.data(), T, grid_size * sizeof(Real), hipMemcpyDeviceToHost));
}
if (_turb_model != 0) {
chk(hipMemcpy(_field._NU_T._container.data(), NU_T, grid_size * sizeof(Real), hipMemcpyDeviceToHost));
chk(hipMemcpy(_field._K._container.data(), K, grid_size * sizeof(Real), hipMemcpyDeviceToHost));
chk(hipMemcpy(_field._EPS._container.data(), EPS, grid_size * sizeof(Real), hipMemcpyDeviceToHost));
}
}
}
CudaSolver::~CudaSolver() {
hipFree(U);
hipFree(V);
hipFree(F);
hipFree(G);
hipFree(P);
hipFree(T);
hipFree(T_temp);
hipFree(RS);
hipFree(U_residual);
hipFree(V_residual);
if (_turb_model != 0) {
hipFree(NU_residual);
hipFree(K_residual);
hipFree(EPS_residual);
hipFree(NU_T);
hipFree(NU_I);
hipFree(NU_J);
hipFree(K);
hipFree(K_old);
hipFree(EPS_old);
hipFree(EPS);
if (_turb_model == 3) {
hipFree(dists);
hipFree(S);
}
}
hipFree(P_residual);
hipFree(cell_type);
hipFree(P);
hipFree(P_temp);
hipFree(row_start_u);
hipFree(row_start_v);
hipFree(row_start_t);
hipFree(col_idx_u);
hipFree(col_idx_v);
hipFree(col_idx_t);
hipFree(mat_u);
hipFree(mat_v);
hipFree(mat_t);
hipFree(rhs_vec_u);
hipFree(rhs_vec_v);
hipFree(rhs_vec_t);
hipFree(neighborhood);
hipFree(A);
hipFree(A_offsets);
if (_preconditioner != -1) {
hipFree(M);
hipFree(M_offsets);
}
hipFree(q);
hipFree(d);
hipFree(r);
hipFree(z);
hipFree(r_dot_r);
hipFree(r_dot_r_old);
hipFree(d_dot_q);
hipFree(p_residual_out);
hipFree(cg_alpha);
hipFree(cg_beta);
}
| 978d5045d7d259fd2f023fe6cf8695c670e1b2b9.cu | #ifndef __CUDACC__
#define __CUDACC__
#endif
#include "CUDASolver.hpp"
#include "Discretization.cu"
#include "device_launch_parameters.h"
#include <cuda.h>
#include <cuda_runtime.h>
#define BLK_SIZE 128
#define BLK_SIZE_2D 32
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort = true) {
if (code != cudaSuccess) {
fprintf(stderr, "GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
#define chk(ans) \
{ gpuAssert((ans), __FILE__, __LINE__); }
int get_num_blks(int size) { return (size + BLK_SIZE - 1) / BLK_SIZE; }
dim3 get_num_blks_2d(int size_x, int size_y) {
return dim3((size_x + BLK_SIZE_2D - 1) / BLK_SIZE_2D, (size_y + BLK_SIZE_2D - 1) / BLK_SIZE_2D);
}
template <typename T> inline void malloc_assign(T *dev_ptr, T val) {
chk(cudaMalloc(&dev_ptr, sizeof(T)));
chk(cudaMemcpy(dev_ptr, &val, 1, cudaMemcpyHostToDevice));
}
__global__ void init(Real *a, Real val, int size) {
uint32_t i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < size) {
a[i] = val;
}
}
__global__ void saxpy(Real *a, Real *x, Real *y, int size) {
uint32_t i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < size) {
y[i] = *a * x[i] + y[i];
}
}
__global__ void smaxpy(Real *a, Real *x, Real *y, int size) {
uint32_t i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < size) {
y[i] = -(*a) * x[i] + y[i];
}
}
__global__ void saxpy2(Real *a, Real *x, Real *y, int size) {
uint32_t i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < size) {
y[i] = x[i] + *a * y[i];
}
}
__global__ void vec_dot_vec(Real *a, Real *b, Real *o, int size) {
__shared__ Real sdata[BLK_SIZE];
uint32_t tid = threadIdx.x;
uint32_t i = blockIdx.x * blockDim.x + threadIdx.x;
if (i == 0) {
*o = 0;
}
if (i < size) {
sdata[tid] = a[i] * b[i];
} else {
sdata[tid] = 0;
}
__syncthreads();
for (int s = blockDim.x / 2; s > 0; s >>= 1) {
if (tid < s) {
sdata[tid] = sdata[tid] + sdata[tid + s];
}
__syncthreads();
}
if (tid == 0) {
atomicAdd(o, sdata[0]);
}
}
// https://www.nvidia.com/docs/IO/66889/nvr-2008-004.pdf
__global__ void spmv_dia(Real *data, int *offsets, int num_rows, int num_cols, int num_diags, Real *x, Real *y) {
int row = blockDim.x * blockIdx.x + threadIdx.x;
if (row < num_rows) {
Real dot = 0;
y[row] = 0;
for (int n = 0; n < num_diags; n++) {
int col = row + offsets[n];
Real val = data[num_rows * n + row];
if (col >= 0 && col < num_cols) dot += val * x[col];
}
y[row] += dot;
}
}
// See https://developer.download.nvidia.com/assets/cuda/files/reduction.pdf
__global__ void reduce_abs_max(Real *input, Real *output, int size) {
extern __shared__ Real sdata[];
uint32_t tid = threadIdx.x;
uint32_t i = blockIdx.x * blockDim.x * 2 + threadIdx.x;
Real curr_max = (i < size) ? real_abs(input[i]) : 0;
if (i + blockDim.x < size) {
curr_max = real_max(curr_max, real_abs(input[i + blockDim.x]));
}
sdata[tid] = curr_max;
__syncthreads();
for (uint32_t s = blockDim.x / 2; s > 0; s >>= 1) {
if (tid < s) {
sdata[tid] = real_max(sdata[tid], sdata[tid + s]);
}
__syncthreads();
}
if (tid == 0) {
output[blockIdx.x] = sdata[0];
}
}
__global__ void reduce_min(Real *input, Real *output, int size) {
extern __shared__ Real sdata[];
uint32_t tid = threadIdx.x;
uint32_t i = blockIdx.x * blockDim.x * 2 + threadIdx.x;
Real curr_max = (i < size) ? input[i] : 0;
if (i + blockDim.x < size) {
curr_max = real_min(curr_max, (input[i + blockDim.x]));
}
sdata[tid] = curr_max;
__syncthreads();
for (uint32_t s = blockDim.x / 2; s > 0; s >>= 1) {
if (tid < s) {
sdata[tid] = real_min(sdata[tid], sdata[tid + s]);
}
__syncthreads();
}
if (tid == 0) {
output[blockIdx.x] = sdata[0];
}
}
__global__ void reduce_max(Real *input, Real *output, int size) {
extern __shared__ Real sdata[];
uint32_t tid = threadIdx.x;
uint32_t i = blockIdx.x * blockDim.x * 2 + threadIdx.x;
Real curr_max = (i < size) ? input[i] : 0;
if (i + blockDim.x < size) {
curr_max = real_max(curr_max, (input[i + blockDim.x]));
}
sdata[tid] = curr_max;
__syncthreads();
for (uint32_t s = blockDim.x / 2; s > 0; s >>= 1) {
if (tid < s) {
sdata[tid] = real_max(sdata[tid], sdata[tid + s]);
}
__syncthreads();
}
if (tid == 0) {
output[blockIdx.x] = sdata[0];
}
}
__global__ void scalar_div(Real *num, Real *denom, Real *o) { *o = *num / *denom; }
__global__ void scalar_cpy(Real *dst, Real *src) { *dst = *src; }
void solve_pcg(Real *A, int *A_offsets, int num_diag, Real *x, Real *b, Real *q, Real *d, Real *r, Real *r_dot_r_old,
Real *r_dot_r, Real *z, Real *cg_beta, Real &delta_new, Real *cg_alpha, Real *d_dot_q, int precondition,
Real *M, int *M_offsets, int m_num_diag, uint32_t &it, uint32_t max_iter, Real eps, int vec_size) {
int num_blocks = get_num_blks(vec_size);
cudaMemcpy(r, b, vec_size * sizeof(Real), cudaMemcpyDeviceToDevice);
cudaMemset(x, 0, vec_size * sizeof(Real));
if (precondition != -1) {
spmv_dia<<<num_blocks, BLK_SIZE>>>(M, M_offsets, vec_size, vec_size, m_num_diag, r, d);
vec_dot_vec<<<num_blocks, BLK_SIZE>>>(r, d, r_dot_r, vec_size);
} else {
cudaMemcpy(d, b, vec_size * sizeof(Real), cudaMemcpyDeviceToDevice);
vec_dot_vec<<<num_blocks, BLK_SIZE>>>(r, r, r_dot_r, vec_size);
}
cudaMemcpy(&delta_new, r_dot_r, sizeof(Real), cudaMemcpyDeviceToHost);
Real cond = delta_new * eps * eps;
it = 0;
while (it < max_iter && delta_new > cond) {
// q <- A * d
spmv_dia<<<num_blocks, BLK_SIZE>>>(A, A_offsets, vec_size, vec_size, num_diag, d, q);
vec_dot_vec<<<num_blocks, BLK_SIZE>>>(d, q, d_dot_q, vec_size);
// cg_alpha <- r_dot_r / d_dot_q
scalar_div<<<1, 1>>>(r_dot_r, d_dot_q, cg_alpha);
// x <- x - cg_alpha * d
smaxpy<<<num_blocks, BLK_SIZE>>>(cg_alpha, d, x, vec_size);
// r <- r - cg_alpha * q
smaxpy<<<num_blocks, BLK_SIZE>>>(cg_alpha, q, r, vec_size);
scalar_cpy<<<1, 1>>>(r_dot_r_old, r_dot_r);
if (precondition != -1) {
// z <- M * r
spmv_dia<<<num_blocks, BLK_SIZE>>>(M, M_offsets, vec_size, vec_size, m_num_diag, r, z);
vec_dot_vec<<<num_blocks, BLK_SIZE>>>(r, z, r_dot_r, vec_size);
} else {
vec_dot_vec<<<num_blocks, BLK_SIZE>>>(r, r, r_dot_r, vec_size);
}
// cg_beta <- r_dot_r / r_dot_r_old
scalar_div<<<1, 1>>>(r_dot_r, r_dot_r_old, cg_beta);
if (precondition != -1) {
// d <- z + cg_beta * d
saxpy2<<<num_blocks, BLK_SIZE>>>(cg_beta, z, d, vec_size);
} else {
// d <- r + cg_beta *d
saxpy2<<<num_blocks, BLK_SIZE>>>(cg_beta, r, d, vec_size);
}
it++;
cudaMemcpy(&delta_new, r_dot_r, sizeof(Real), cudaMemcpyDeviceToHost);
}
}
void get_abs_max(Real *input, Real *res, Real &out, int size) {
int num_blks(get_num_blks(size));
int smemsize = min(BLK_SIZE, size);
reduce_abs_max<<<num_blks, BLK_SIZE, smemsize * sizeof(Real)>>>(input, res, size);
while (num_blks != 1) {
size = (int)ceil(size / Real(BLK_SIZE));
smemsize = min(BLK_SIZE, size);
num_blks = get_num_blks(size);
reduce_abs_max<<<num_blks, BLK_SIZE, smemsize * sizeof(Real)>>>(res, res, size);
}
cudaMemcpy(&out, res, sizeof(Real), cudaMemcpyDeviceToHost);
}
void solve_pcg2(Real *A, int *A_offsets, int num_diag, Real *x, Real *b, Real *q, Real *d, Real *r, Real *rho_old,
Real *rho, Real *z, Real *cg_beta, Real *residual, Real &delta_new, Real *cg_alpha, Real *d_dot_q,
int precondition, Real *M, int *M_offsets, int m_num_diag, uint32_t &it, uint32_t max_iter, Real eps,
int vec_size) {
int num_blocks = get_num_blks(vec_size);
Real residual_out;
cudaMemcpy(r, b, vec_size * sizeof(Real), cudaMemcpyDeviceToDevice);
cudaMemset(x, 0, vec_size * sizeof(Real));
get_abs_max(r, residual, residual_out, vec_size);
if (residual_out == 0) {
it = 0;
return;
}
if (precondition != -1) {
spmv_dia<<<num_blocks, BLK_SIZE>>>(M, M_offsets, vec_size, vec_size, m_num_diag, r, d);
vec_dot_vec<<<num_blocks, BLK_SIZE>>>(r, d, rho, vec_size);
} else {
cudaMemcpy(d, b, vec_size * sizeof(Real), cudaMemcpyDeviceToDevice);
vec_dot_vec<<<num_blocks, BLK_SIZE>>>(r, r, rho, vec_size);
}
Real residual_0 = residual_out;
it = 0;
while (it < max_iter) {
// q <- A * d
cudaMemset(q, 0, vec_size * sizeof(Real));
spmv_dia<<<num_blocks, BLK_SIZE>>>(A, A_offsets, vec_size, vec_size, num_diag, d, q);
vec_dot_vec<<<num_blocks, BLK_SIZE>>>(d, q, d_dot_q, vec_size);
// cg_alpha <- rho / d_dot_q
scalar_div<<<1, 1>>>(rho, d_dot_q, cg_alpha);
// x <- x - cg_alpha * d
smaxpy<<<num_blocks, BLK_SIZE>>>(cg_alpha, d, x, vec_size);
// r <- r - cg_alpha * q
smaxpy<<<num_blocks, BLK_SIZE>>>(cg_alpha, q, r, vec_size);
get_abs_max(r, residual, residual_out, vec_size);
if (residual_out <= eps) {
break;
}
scalar_cpy<<<1, 1>>>(rho_old, rho);
if (precondition != -1) {
// z <- M * r
spmv_dia<<<num_blocks, BLK_SIZE>>>(M, M_offsets, vec_size, vec_size, m_num_diag, r, z);
vec_dot_vec<<<num_blocks, BLK_SIZE>>>(r, z, rho, vec_size);
} else {
vec_dot_vec<<<num_blocks, BLK_SIZE>>>(r, r, rho, vec_size);
}
// cg_beta <- rho / rho_old
scalar_div<<<1, 1>>>(rho, rho_old, cg_beta);
if (precondition != -1) {
// d <- z + cg_beta * d
saxpy2<<<num_blocks, BLK_SIZE>>>(cg_beta, z, d, vec_size);
} else {
// d <- r + cg_beta *d
saxpy2<<<num_blocks, BLK_SIZE>>>(cg_beta, r, d, vec_size);
}
it++;
}
delta_new = residual_out / residual_0;
}
__global__ void sor_iter(Real *P, Real *RS, Real coeff, int *cell_type, int imax, int jmax, Real omega, Real inv_dx,
Real inv_dy, int parity) {
uint32_t i = blockIdx.x * blockDim.x + threadIdx.x;
uint32_t j = blockIdx.y * blockDim.y + threadIdx.y;
int is_fluid = at(cell_type, i, j);
if (i >= imax || j >= jmax || is_fluid == 0) {
return;
}
if (parity == 0 && ((i + j) % 2) == 0) {
return;
} else if (parity == 1 && ((i + j) % 2) == 1) {
return;
}
Real p_stencil[4] = {at(P, i + 1, j), at(P, i - 1, j), at(P, i, j + 1), at(P, i, j - 1)};
at(P, i, j) = (1 - omega) * at(P, i, j) + coeff * (sor_helper(p_stencil, inv_dx, inv_dy) - at(RS, i, j));
}
__global__ void calc_residual(Real *P, Real *RS, int *cell_type, int imax, int jmax, Real inv_dx, Real inv_dy,
Real *residual) {
uint32_t i = blockIdx.x * blockDim.x + threadIdx.x;
uint32_t j = blockIdx.y * blockDim.y + threadIdx.y;
int is_fluid = at(cell_type, i, j);
if (i >= imax || j >= jmax || is_fluid == 0) {
return;
}
Real rloc = 0;
Real p_laplacian[5] = {at(P, i + 1, j), at(P, i, j), at(P, i - 1, j), at(P, i, j + 1), at(P, i, j - 1)};
Real val = laplacian_5(p_laplacian, inv_dx, inv_dy) - at(RS, i, j);
rloc += val * val;
at(residual, i, j) = rloc;
}
__global__ void reduce_residual(Real *residual, Real *o, int size) {
__shared__ Real sdata[BLK_SIZE];
uint32_t tid = threadIdx.x;
uint32_t i = blockIdx.x * blockDim.x + threadIdx.x;
if (i == 0) {
*o = 0;
}
if (i < size) {
sdata[tid] = residual[i];
} else {
sdata[tid] = 0;
}
__syncthreads();
for (int s = blockDim.x / 2; s > 0; s >>= 1) {
if (tid < s) {
sdata[tid] = sdata[tid] + sdata[tid + s];
}
__syncthreads();
}
if (tid == 0) {
atomicAdd(o, sdata[0]);
}
}
__global__ void negate_p(Real *p, int size) {
uint32_t i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < size) {
p[i] = -p[i];
}
}
__device__ Real vel_kernel(Real fg, Real dt, Real p[2], Real inv_dxy) { return fg - dt * inv_dxy * (p[1] - p[0]); }
__global__ void calc_vel(Real *u, Real *v, Real *p, Real *f, Real *g, int *cell_type, Real dt, int imax, int jmax,
Real dx, Real dy) {
uint32_t i = blockIdx.x * blockDim.x + threadIdx.x;
uint32_t j = blockIdx.y * blockDim.y + threadIdx.y;
int is_fluid = at(cell_type, i, j);
if (i >= imax || j >= jmax || is_fluid == 0) {
return;
}
Real inv_dx = 1 / dx;
Real inv_dy = 1 / dy;
Real p_diff_u[2] = {at(p, i, j), at(p, i + 1, j)};
Real p_diff_v[2] = {at(p, i, j), at(p, i, j + 1)};
/* at(u, i, j) = vel_kernel(at(f, i, j), *dt, p_diff_u, inv_dx);
at(v, i, j) = vel_kernel(at(g, i, j), *dt, p_diff_v, inv_dy);*/
at(u, i, j) = at(f, i, j) - dt * inv_dx * (p_diff_u[1] - p_diff_u[0]);
at(v, i, j) = at(g, i, j) - dt * inv_dy * (p_diff_v[1] - p_diff_v[0]);
}
__global__ void enforce_boundary(Real *u, int *row_start, int *col_idx, Real *mat, Real *rhs_vec, int size) {
uint32_t row = blockIdx.x * blockDim.x + threadIdx.x;
if (row < size) {
Real sum = 0;
for (int j = row_start[row]; j < row_start[row + 1]; j++) {
sum += mat[j] * u[col_idx[j]];
}
u[row] = sum + 2 * rhs_vec[row];
}
}
void uv_boundary(Real *u, Real *v, int *row_start_u, int *row_start_v, int *col_idx_u, int *col_idx_v, Real *mat_u,
Real *mat_v, Real *rhs_vec_u, Real *rhs_vec_v, int size) {
int num_blks(get_num_blks(size));
enforce_boundary<<<num_blks, BLK_SIZE>>>(u, row_start_u, col_idx_u, mat_u, rhs_vec_u, size);
enforce_boundary<<<num_blks, BLK_SIZE>>>(v, row_start_v, col_idx_v, mat_v, rhs_vec_v, size);
}
void t_boundary(Real *t, int *row_start_t, int *col_idx_t, Real *mat_t, Real *rhs_vec_t, int size) {
int num_blks(get_num_blks(size));
enforce_boundary<<<num_blks, BLK_SIZE>>>(t, row_start_t, col_idx_t, mat_t, rhs_vec_t, size);
}
__global__ void calc_t(Real *u, Real *v, Real dx, Real dy, Real *t_new, Real *t_old, int *cell_type, Real alpha,
Real dt, Real gamma, int imax, int jmax) {
uint32_t i = blockIdx.x * blockDim.x + threadIdx.x;
uint32_t j = blockIdx.y * blockDim.y + threadIdx.y;
int is_fluid = at(cell_type, i, j);
if (i >= imax || j >= jmax || is_fluid == 0) {
return;
}
Real inv_dx = 1 / dx;
Real inv_dy = 1 / dy;
Real inv_dx2 = inv_dx * inv_dx;
Real inv_dy2 = inv_dy * inv_dy;
Real u_stencil[2] = {at(u, i - 1, j), at(u, i, j)};
Real v_stencil[2] = {at(v, i, j - 1), at(v, i, j)};
Real t_laplacian[5] = {at(t_old, i + 1, j), at(t_old, i, j), at(t_old, i - 1, j), at(t_old, i, j + 1),
at(t_old, i, j - 1)};
at(t_new, i, j) = at(t_new, i, j) + dt * (alpha * laplacian_5(t_laplacian, inv_dx, inv_dy) -
convection_uT(u_stencil, t_laplacian, inv_dx, gamma) -
convection_vT(v_stencil, t_laplacian, inv_dy, gamma));
}
__device__ Real calculate_f1_sst(Real omega, Real dk_di, Real dw_di, Real k, Real dist, Real nu) {
Real cd_kw = real_max(2 * 0.856 * 1 / omega * dk_di * dw_di, 1e-10);
Real f1 =
real_tanh(real_pow(real_min(real_max(real_sqrt(k) / (0.09 * omega * dist), 500 * nu / (dist * dist * omega)),
4 * 0.856 * k / (cd_kw * dist * dist)),
4));
return f1;
}
__device__ Real calculate_f2_sst(Real omega, Real k, Real dist, Real nu) {
Real max_sqr = real_max(2 * real_sqrt(k) / (0.09 * omega * dist), 500 * nu / (dist * dist * omega));
return real_tanh(max_sqr * max_sqr);
}
__device__ Real calculate_sst_term(Real K[3], Real EPS[3], Real kij, Real eij, Real dist, Real inv_dx, Real inv_dy,
Real nu) {
Real dk_dx = (K[0] - K[1]) * inv_dx;
Real dw_dx = (EPS[0] - EPS[1]) * inv_dx;
Real dk_dy = (K[0] - K[2]) * inv_dy;
Real dw_dy = (EPS[0] - EPS[2]) * inv_dy;
Real f1_x = calculate_f1_sst(eij, dk_dx, dw_dx, kij, dist, nu);
Real f1_y = calculate_f1_sst(eij, dk_dy, dw_dy, kij, dist, nu);
Real res_x = 2 * (1 - f1_x) * 0.856 * 1 / eij * dk_dx * dw_dx;
Real res_y = 2 * (1 - f1_y) * 0.856 * 1 / eij * dk_dy * dw_dy;
return res_x + res_y;
}
__global__ void calculate_nu_t(Real *NU_T, Real *K, Real *EPS, Real *dists, Real *S, int *cell_type, Real _nu, int imax,
int jmax, const int turb_model) {
uint32_t i = blockIdx.x * blockDim.x + threadIdx.x;
uint32_t j = blockIdx.y * blockDim.y + threadIdx.y;
int is_fluid = at(cell_type, i, j);
if (i >= imax || j >= jmax || is_fluid == 0) {
return;
}
Real kij = at(K, i, j);
Real epsij = at(EPS, i, j);
if (turb_model == 1) {
at(NU_T, i, j) = 0.09 * kij * kij / epsij + _nu;
} else if (turb_model == 2) {
at(NU_T, i, j) = kij / epsij + _nu;
} else if (turb_model == 3) {
const Real a1 = 5.0 / 9.0;
auto dist = at(dists, i, j);
auto f2 = calculate_f2_sst(epsij, kij, dist, _nu);
at(NU_T, i, j) = a1 * kij / (real_max(a1 * epsij, at(S, i, j) * f2)) + _nu;
}
}
__global__ void calculate_nu_ij(Real *NU_I, Real *NU_J, Real *K, Real *EPS, Real *dists, Real *S, int *cell_type,
Real _nu, int imax, int jmax, const int turb_model) {
uint32_t i = blockIdx.x * blockDim.x + threadIdx.x;
uint32_t j = blockIdx.y * blockDim.y + threadIdx.y;
int is_fluid = at(cell_type, i, j);
if (i >= imax || j >= jmax || is_fluid == 0) {
return;
}
Real num_i = (at(K, i, j) + at(K, i + 1, j)) / 2;
Real denom_i = (at(EPS, i, j) + at(EPS, i + 1, j)) / 2;
Real num_j = (at(K, i, j) + at(K, i, j + 1)) / 2;
Real denom_j = (at(EPS, i, j) + at(EPS, i, j + 1)) / 2;
if (turb_model == 1) {
at(NU_I, i, j) = 0.09 * num_i * num_i / denom_i;
at(NU_J, i, j) = 0.09 * num_j * num_j / denom_j;
} else if (turb_model == 2) {
at(NU_I, i, j) = 0.5 * num_i / denom_i;
at(NU_J, i, j) = 0.5 * num_j / denom_j;
} else if (turb_model == 3) {
constexpr Real a1 = 5.0 / 9.0;
auto dist = at(dists, i, j);
auto f2_1 = calculate_f2_sst(denom_i, num_i, dist, _nu);
auto f2_2 = calculate_f2_sst(denom_j, num_j, dist, _nu);
at(NU_I, i, j) = 0.85 * a1 * num_i / real_max(a1 * denom_i, at(S, i, j) * f2_1);
at(NU_J, i, j) = 0.5 * a1 * num_j / real_max(a1 * denom_j, at(S, i, j) * f2_2);
}
}
__global__ void calculate_k_and_epsilon(Real *K_old, Real *EPS_old, Real *K, Real *EPS, Real *NU_T, Real *NU_I,
Real *NU_J, Real *U, Real *V, int *cell_type, Real _nu, int imax, int jmax,
Real dt, Real inv_dx, Real inv_dy, int _turb_model, Real *S, Real *dists) {
uint32_t i = blockIdx.x * blockDim.x + threadIdx.x;
uint32_t j = blockIdx.y * blockDim.y + threadIdx.y;
int is_fluid = at(cell_type, i, j);
if (i >= imax || j >= jmax || is_fluid == 0) {
return;
}
Real f2_coeff = 1;
auto nut = at(NU_T, i, j);
auto kij = at(K_old, i, j);
auto eij = at(EPS_old, i, j);
Real K_stencil[5] = {at(K_old, i + 1, j), at(K_old, i, j), at(K_old, i - 1, j), at(K_old, i, j + 1),
at(K_old, i, j - 1)};
Real EPS_stencil[5] = {at(EPS_old, i + 1, j), at(EPS_old, i, j), at(EPS_old, i - 1, j), at(EPS_old, i, j + 1),
at(EPS_old, i, j - 1)};
Real U_diff[2] = {at(U, i - 1, j), at(U, i, j)};
Real V_diff[2] = {at(V, i, j - 1), at(V, i, j)};
Real NU_I_diff[2] = {at(NU_I, i, j), at(NU_I, i - 1, j)};
Real NU_J_diff[2] = {at(NU_J, i, j), at(NU_J, i, j - 1)};
Real U_stencil[6] = {at(U, i, j), at(U, i - 1, j), at(U, i, j + 1),
at(U, i - 1, j + 1), at(U, i, j - 1), at(U, i - 1, j - 1)};
Real V_stencil[6] = {at(V, i, j), at(V, i, j - 1), at(V, i + 1, j),
at(V, i + 1, j - 1), at(V, i - 1, j), at(V, i - 1, j - 1)};
auto k1_1 = convection_UKEPS(U_diff, K_stencil, inv_dx);
auto k1_2 = convection_VKEPS(V_diff, K_stencil, inv_dy);
auto e1_1 = convection_UKEPS(U_diff, EPS_stencil, inv_dx);
auto e1_2 = convection_VKEPS(V_diff, EPS_stencil, inv_dy);
auto k2 = laplacian_nu(K_stencil, NU_I_diff, NU_J_diff, inv_dx, inv_dy, _nu, 1);
auto e2 = laplacian_nu(EPS_stencil, NU_I_diff, NU_J_diff, inv_dx, inv_dy, _nu, _turb_model == 1 ? 1.3 : 1);
Real k3;
if (_turb_model != 3) {
k3 = nut * mean_strain_rate_squared(U_stencil, V_stencil, inv_dx, inv_dy);
} else {
k3 = nut * mean_strain_rate_squared_store_S(U_stencil, V_stencil, S, i, j, imax, jmax, inv_dx, inv_dy);
k3 = real_min(k3, 10 * 0.09 * kij * eij);
}
auto e3 = (_turb_model == 1 ? 1.44 : 5.0 / 9) * eij * k3 / kij;
auto e4 = _turb_model == 1 ? f2_coeff * 1.92 * eij * eij / kij : 3.0 / 40 * eij * eij;
Real eij_mul = _turb_model == 1 ? 1 : 0.09 * kij;
auto kij_new = kij + dt * (-(k1_1 + k1_2) + k2 + k3 - eij_mul * eij);
Real sst_term = 0;
if (_turb_model == 3) {
Real K_diff[3] = {at(K_old, i, j), at(K_old, i - 1, j), at(K_old, i, j - 1)};
Real EPS_diff[3] = {at(EPS_old, i, j), at(EPS_old, i - 1, j), at(EPS_old, i, j - 1)};
auto dist = at(dists, i, j);
sst_term = calculate_sst_term(K_diff, EPS_diff, kij, eij, dist, inv_dx, inv_dy, _nu);
}
auto epsij_new = eij + dt * (-(e1_1 + e1_2) + e2 + e3 - e4 + sst_term);
at(K, i, j) = kij_new;
at(EPS, i, j) = epsij_new;
}
__global__ void calc_fg(Real *f, Real *g, Real *u, Real *v, bool calc_temp, Real dx, Real dy, Real *t, int *cell_type,
Real dt, Real gamma, Real nu, Real beta, Real gx, Real gy, int imax, int jmax) {
uint32_t i = blockIdx.x * blockDim.x + threadIdx.x;
uint32_t j = blockIdx.y * blockDim.y + threadIdx.y;
int is_fluid = at(cell_type, i, j);
if (i >= imax || j >= jmax || is_fluid == 0) {
return;
}
Real inv_dx = 1 / dx;
Real inv_dy = 1 / dy;
Real inv_dx2 = inv_dx * inv_dx;
Real inv_dy2 = inv_dy * inv_dy;
// 5-point + 1 stencil for U and V
Real u_stencil[6] = {at(u, i + 1, j), at(u, i, j), at(u, i - 1, j),
at(u, i, j + 1), at(u, i, j - 1), at(u, i - 1, j + 1)};
Real v_stencil[6] = {at(v, i + 1, j), at(v, i, j), at(v, i - 1, j),
at(v, i, j + 1), at(v, i, j - 1), at(v, i + 1, j - 1)};
// Calculate fluxes
at(f, i, j) = at(u, i, j) + dt * (nu * laplacian(u_stencil, inv_dx, inv_dy) -
convection_u(u_stencil, v_stencil, inv_dx, inv_dy, gamma));
at(g, i, j) = at(v, i, j) + dt * (nu * laplacian(v_stencil, inv_dx, inv_dy) -
convection_v(u_stencil, v_stencil, inv_dx, inv_dy, gamma));
if (calc_temp) {
Real term1 = at(t, i, j) + at(t, i + 1, j);
Real term2 = at(t, i, j) + at(t, i, j + 1);
at(f, i, j) -= beta * dt / 2 * (term1)*gx;
at(g, i, j) -= beta * dt / 2 * (term2)*gy;
}
}
__global__ void calc_fg_turbulent(Real *f, Real *g, Real *u, Real *v, Real *NU_T, bool calc_temp, Real dx, Real dy,
Real *t, int *cell_type, Real dt, Real gamma, Real nu, Real beta, Real gx, Real gy,
int imax, int jmax) {
uint32_t i = blockIdx.x * blockDim.x + threadIdx.x;
uint32_t j = blockIdx.y * blockDim.y + threadIdx.y;
int is_fluid = at(cell_type, i, j);
if (i >= imax || j >= jmax || is_fluid == 0) {
return;
}
Real inv_dx = 1 / dx;
Real inv_dy = 1 / dy;
Real inv_dx2 = inv_dx * inv_dx;
Real inv_dy2 = inv_dy * inv_dy;
// 5-point + 1 stencil for U and V
Real u_stencil[6] = {at(u, i + 1, j), at(u, i, j), at(u, i - 1, j),
at(u, i, j + 1), at(u, i, j - 1), at(u, i - 1, j + 1)};
Real v_stencil[6] = {at(v, i + 1, j), at(v, i, j), at(v, i - 1, j),
at(v, i, j + 1), at(v, i, j - 1), at(v, i + 1, j - 1)};
Real nu_term1 = (at(NU_T, i, j) + at(NU_T, i + 1, j)) / 2;
Real nu_term2 = (at(NU_T, i, j) + at(NU_T, i, j + 1)) / 2;
// Calculate fluxes
at(f, i, j) = at(u, i, j) + dt * (nu_term1 * laplacian(u_stencil, inv_dx, inv_dy) -
convection_u(u_stencil, v_stencil, inv_dx, inv_dy, gamma));
at(g, i, j) = at(v, i, j) + dt * (nu_term2 * laplacian(v_stencil, inv_dx, inv_dy) -
convection_v(u_stencil, v_stencil, inv_dx, inv_dy, gamma));
if (calc_temp) {
Real term1 = at(t, i, j) + at(t, i + 1, j);
Real term2 = at(t, i, j) + at(t, i, j + 1);
at(f, i, j) -= beta * dt / 2 * (term1)*gx;
at(g, i, j) -= beta * dt / 2 * (term2)*gy;
}
}
__global__ void fg_boundary(Real *f, Real *g, Real *u, Real *v, int imax, int jmax, uint32_t *neighborhood,
int *cell_type) {
uint32_t i = blockIdx.x * blockDim.x + threadIdx.x;
uint32_t j = blockIdx.y * blockDim.y + threadIdx.y;
int is_fluid = at(cell_type, i, j);
if (i >= imax || j >= jmax || is_fluid == 1) {
return;
}
uint32_t neighbors = at(neighborhood, i, j) & 0xFF;
if ((neighbors & 0x1) == 1) {
at(f, i, j) = at(u, i, j);
}
if ((neighbors & 0x2) == 2) {
at(f, i - 1, j) = at(u, i - 1, j);
}
if ((neighbors & 0x4) == 4) {
at(g, i, j) = at(v, i, j);
}
if ((neighbors & 0x8) == 8) {
at(g, i, j - 1) = at(v, i, j - 1);
}
}
__global__ void p_boundary(Real *p, int imax, int jmax, uint32_t *neighborhood, int *cell_type, Real PI) {
uint32_t i = blockIdx.x * blockDim.x + threadIdx.x;
uint32_t j = blockIdx.y * blockDim.y + threadIdx.y;
int is_fluid = at(cell_type, i, j);
if (i >= imax || j >= jmax || is_fluid == 1) {
return;
}
uint32_t type = at(neighborhood, i, j) >> 8;
uint32_t neighbors = at(neighborhood, i, j) & 0xFF;
if (type == 0) { // Outlet
at(p, i, j) = PI;
} else {
int diag = 0;
if ((neighbors & 0x10) == 16) { // Right + top
diag = 1;
at(p, i, j) = (at(p, i + 1, j) + at(p, i, j + 1)) / 2;
}
if ((neighbors & 0x20) == 32) { // Right + bottom
diag = 1;
at(p, i, j) = (at(p, i + 1, j) + at(p, i, j - 1)) / 2;
}
if ((neighbors & 0x40) == 64) { // Left + top
diag = 1;
at(p, i, j) = (at(p, i - 1, j) + at(p, i, j + 1)) / 2;
}
if ((neighbors & 0x80) == 128) { // Left + bottom
diag = 1;
at(p, i, j) = (at(p, i - 1, j) + at(p, i, j - 1)) / 2;
}
if (!diag) {
if ((neighbors & 0x1) == 1) { // Right
at(p, i, j) = at(p, i + 1, j);
}
if ((neighbors & 0x2) == 2) { // Left
at(p, i, j) = at(p, i - 1, j);
}
if ((neighbors & 0x4) == 4) { // Top
at(p, i, j) = at(p, i, j + 1);
}
if ((neighbors & 0x8) == 8) { // Bottom
at(p, i, j) = at(p, i, j - 1);
}
}
}
}
__global__ void nu_t_boundary(Real *NU_T, Real *K, Real *EPS, int imax, int jmax, uint32_t *neighborhood,
int *cell_type, Real wk, Real weps, Real _nu, const int turb_model) {
uint32_t i = blockIdx.x * blockDim.x + threadIdx.x;
uint32_t j = blockIdx.y * blockDim.y + threadIdx.y;
int is_fluid = at(cell_type, i, j);
if (i >= imax || j >= jmax || is_fluid == 1) {
return;
}
uint32_t type = at(neighborhood, i, j) >> 8;
uint32_t neighbors = at(neighborhood, i, j) & 0xFF;
Real k = 0;
Real eps = 0;
if (type == 1) { // Inlet
int valid = 0;
if ((neighbors & 0x1) == 1) { // Right
k = 2 * wk - at(K, i + 1, j);
eps = 2 * weps - at(EPS, i + 1, j);
valid = 1;
}
if ((neighbors & 0x2) == 2) { // Left
k = 2 * wk - at(K, i - 1, j);
eps = 2 * weps - at(EPS, i - 1, j);
valid = 1;
}
if ((neighbors & 0x4) == 4) { // Top
k = 2 * wk - at(K, i, j + 1);
eps = 2 * weps - at(EPS, i, j + 1);
valid = 1;
}
if ((neighbors & 0x8) == 8) { // Bottom
k = 2 * wk - at(K, i, j - 1);
eps = 2 * weps - at(EPS, i, j - 1);
valid = 1;
}
if (valid) {
at(K, i, j) = k;
at(EPS, i, j) = eps;
// at(NU_T, i, j) = 0.09 * k * k / eps + _nu;
}
} else { // Other
int diag = 0;
int valid = 0;
if ((neighbors & 0x10) == 16) { // Right + top
diag = 1;
k = (at(K, i + 1, j) + at(K, i, j + 1)) / 2;
eps = (at(EPS, i + 1, j) + at(EPS, i, j + 1)) / 2;
}
if ((neighbors & 0x20) == 32) { // Right + bottom
diag = 1;
k = (at(K, i + 1, j) + at(K, i, j - 1)) / 2;
eps = (at(EPS, i + 1, j) + at(EPS, i, j - 1)) / 2;
}
if ((neighbors & 0x40) == 64) { // Left + top
diag = 1;
k = (at(K, i - 1, j) + at(K, i, j + 1)) / 2;
eps = (at(EPS, i - 1, j) + at(EPS, i, j + 1)) / 2;
}
if ((neighbors & 0x80) == 128) { // Left + bottom
diag = 1;
k = (at(K, i - 1, j) + at(K, i, j - 1)) / 2;
eps = (at(EPS, i - 1, j) + at(EPS, i, j - 1)) / 2;
}
if (!diag) {
if ((neighbors & 0x1) == 1) { // Right
k = at(K, i + 1, j);
eps = at(EPS, i + 1, j);
valid = 1;
}
if ((neighbors & 0x2) == 2) { // Left
k = at(K, i - 1, j);
eps = at(EPS, i - 1, j);
valid = 1;
}
if ((neighbors & 0x4) == 4) { // Top
k = at(K, i, j + 1);
eps = at(EPS, i, j + 1);
valid = 1;
}
if ((neighbors & 0x8) == 8) { // Bottom
k = at(K, i, j - 1);
eps = at(EPS, i, j - 1);
valid = 1;
}
}
if (diag || valid) {
at(K, i, j) = k;
at(EPS, i, j) = eps;
if (turb_model == 1) {
at(NU_T, i, j) = 0.09 * k * k / eps + _nu;
} else if (turb_model == 2 || turb_model == 3) {
at(NU_T, i, j) = k / eps + _nu;
}
}
}
}
void solve_sor(Real *P, Real *P_tmp, Real *P_residual, Real *P_residual_out, uint32_t *neighborhood, int imax, int jmax,
Real *RS, int *cell_type, uint32_t &it, uint32_t max_iter, Real dx, Real dy, Real PI, Real tolerance,
Real &res, int num_fluid_cells) {
it = 0;
const Real omega = 1.7;
auto grid_size = imax * jmax;
Real coeff = omega / (2 * (1 / (dx * dx) + 1 / (dy * dy)));
dim3 blk_size_2d(BLK_SIZE_2D, BLK_SIZE_2D);
dim3 num_blks_2d = get_num_blks_2d(imax, jmax);
int num_blks_1d(get_num_blks(grid_size));
Real inv_dx = 1 / dx;
Real inv_dy = 1 / dy;
res = REAL_MAX;
while (it < max_iter && res > tolerance) {
sor_iter<<<num_blks_2d, blk_size_2d>>>(P, RS, coeff, cell_type, imax, jmax, omega, inv_dx, inv_dy, 0);
sor_iter<<<num_blks_2d, blk_size_2d>>>(P, RS, coeff, cell_type, imax, jmax, omega, inv_dx, inv_dy, 1);
cudaMemset(P_residual, 0, grid_size * sizeof(Real));
calc_residual<<<num_blks_2d, blk_size_2d>>>(P, RS, cell_type, imax, jmax, inv_dx, inv_dy, P_residual);
reduce_residual<<<num_blks_1d, BLK_SIZE>>>(P_residual, P_residual_out, grid_size);
p_boundary<<<num_blks_2d, blk_size_2d>>>(P, imax, jmax, neighborhood, cell_type, PI);
cudaMemcpy(&res, P_residual_out, sizeof(Real), cudaMemcpyDeviceToHost);
res = std::sqrt(res / num_fluid_cells);
it++;
}
}
__global__ void calc_rs(Real *f, Real *g, Real *rs, Real dx, Real dy, int imax, int jmax, Real dt, int *cell_type) {
uint32_t i = blockIdx.x * blockDim.x + threadIdx.x;
uint32_t j = blockIdx.y * blockDim.y + threadIdx.y;
int is_fluid = at(cell_type, i, j);
if (i >= imax || j >= jmax || is_fluid == 0) {
return;
}
Real inv_dx = 1 / dx;
Real inv_dy = 1 / dy;
Real f_diff[2] = {at(f, i, j), at(f, i - 1, j)};
Real g_diff[2] = {at(g, i, j), at(g, i, j - 1)};
Real df = inv_dx * (f_diff[0] - f_diff[1]);
Real dg = inv_dy * (g_diff[0] - g_diff[1]);
at(rs, i, j) = (df + dg) * 1 / dt;
}
Real calculate_dt(int imax, int jmax, Real *u, Real *v, Real *u_residual, Real *v_residual, Real *nu_residual,
Real *k_residual, Real *eps_residual, Real *nu_t, Real *k, Real *eps, Real dx, Real dy, Real tau,
Real nu, Real alpha, bool calc_temp, int turb_model) {
// Calculate uv max
int size = imax * jmax;
int num_blks(get_num_blks(size));
Real u_max_abs = 0;
Real v_max_abs = 0;
Real nu_min = REAL_MAX;
Real k_max;
Real eps_max;
Real dx2 = dx * dx;
Real dy2 = dy * dy;
int smemsize = min(BLK_SIZE, size);
std::vector<Real> ucpu(imax * jmax);
std::vector<Real> vcpu(imax * jmax);
std::vector<Real> ures(imax * jmax);
std::vector<Real> vres(imax * jmax);
reduce_abs_max<<<num_blks, BLK_SIZE, smemsize * sizeof(Real)>>>(u, u_residual, size);
reduce_abs_max<<<num_blks, BLK_SIZE, smemsize * sizeof(Real)>>>(v, v_residual, size);
if (turb_model != 0) {
reduce_min<<<num_blks, BLK_SIZE, smemsize * sizeof(Real)>>>(nu_t, nu_residual, size);
reduce_max<<<num_blks, BLK_SIZE, smemsize * sizeof(Real)>>>(k, k_residual, size);
reduce_max<<<num_blks, BLK_SIZE, smemsize * sizeof(Real)>>>(eps, eps_residual, size);
}
while (num_blks != 1) {
size = (int)ceil(size / Real(BLK_SIZE));
smemsize = min(BLK_SIZE, size);
num_blks = get_num_blks(size);
reduce_abs_max<<<num_blks, BLK_SIZE, smemsize * sizeof(Real)>>>(u_residual, u_residual, size);
reduce_abs_max<<<num_blks, BLK_SIZE, smemsize * sizeof(Real)>>>(v_residual, v_residual, size);
if (turb_model != 0) {
reduce_min<<<num_blks, BLK_SIZE, smemsize * sizeof(Real)>>>(nu_residual, nu_residual, size);
reduce_max<<<num_blks, BLK_SIZE, smemsize * sizeof(Real)>>>(k_residual, k_residual, size);
reduce_max<<<num_blks, BLK_SIZE, smemsize * sizeof(Real)>>>(eps_residual, eps_residual, size);
}
}
cudaMemcpy(&u_max_abs, u_residual, sizeof(Real), cudaMemcpyDeviceToHost);
cudaMemcpy(&v_max_abs, v_residual, sizeof(Real), cudaMemcpyDeviceToHost);
if (turb_model != 0) {
cudaMemcpy(&nu_min, nu_residual, sizeof(Real), cudaMemcpyDeviceToHost);
cudaMemcpy(&k_max, k_residual, sizeof(Real), cudaMemcpyDeviceToHost);
cudaMemcpy(&eps_max, eps_residual, sizeof(Real), cudaMemcpyDeviceToHost);
}
Real min_cond = std::min(dx / u_max_abs, dy / v_max_abs);
nu_min = (nu_min == REAL_MAX || nu_min == 0) ? nu : nu_min;
if (nu_min != 0) {
Real cond_spatial = 1.0 / (2.0 * nu) * ((dx2 * dy2) / (dx2 + dy2));
min_cond = std::min(min_cond, cond_spatial);
}
if (calc_temp) {
Real inv_dx = 1 / dx;
Real inv_dx2 = inv_dx * inv_dx;
Real inv_dy = 1 / dy;
Real inv_dy2 = inv_dy * inv_dy;
Real cond_temp = 1 / (2 * alpha * (inv_dx2 + inv_dy2));
min_cond = std::min(min_cond, cond_temp);
}
if (turb_model != 0) {
Real cond_5 = 1 / (2 * k_max * (1 / dx2 + 1 / dy2));
Real cond_6;
if (turb_model == 1) {
cond_6 = 1 / (2 * eps_max * (1 / dx2 + 1 / dy2));
} else if (turb_model == 2 || turb_model == 3) {
cond_6 = 1 / (2 * (eps_max * 0.09 * k_max) * (1 / dx2 + 1 / dy2));
}
min_cond = std::min(min_cond, cond_5);
min_cond = std::min(min_cond, cond_6);
}
return tau * min_cond;
}
void CudaSolver::initialize() {
auto grid_x = _grid.imaxb();
auto grid_y = _grid.jmaxb();
auto grid_size = grid_x * grid_y;
build_pcg_matrix(_field, _grid, _boundaries, A_pcg, U_pcg, V_pcg, T_pcg, U_RHS, V_RHS, T_RHS, U_fixed, V_fixed,
T_fixed);
// Preprocess
std::vector<int> is_fluid(_grid.imaxb() * _grid.jmaxb(), 0);
for (const auto ¤t_cell : _grid.fluid_cells()) {
int i = current_cell->i();
int j = current_cell->j();
is_fluid[_grid.imaxb() * j + i] = 1;
}
std::vector<BoundaryData> neighbors(_grid.imaxb() * _grid.jmaxb());
for (const auto &boundary : _boundaries) {
uint32_t type = boundary->get_type();
auto cells = boundary->_cells;
for (auto &cell : *cells) {
int i = cell->i();
int j = cell->j();
BoundaryData data;
uint32_t type = boundary->get_type();
data.neighborhood |= type << 8;
// data.idx = _grid.imaxb() * j + i;
if (cell->is_border(border_position::RIGHT)) {
data.neighborhood |= 1;
}
if (cell->is_border(border_position::LEFT)) {
data.neighborhood |= 2;
}
if (cell->is_border(border_position::TOP)) {
data.neighborhood |= 4;
}
if (cell->is_border(border_position::BOTTOM)) {
data.neighborhood |= 8;
}
if (cell->is_border(border_position::RIGHT) && cell->is_border(border_position::TOP)) {
data.neighborhood |= 16;
}
if (cell->is_border(border_position::RIGHT) && cell->is_border(border_position::BOTTOM)) {
data.neighborhood |= 32;
}
if (cell->is_border(border_position::LEFT) && cell->is_border(border_position::TOP)) {
data.neighborhood |= 64;
}
if (cell->is_border(border_position::LEFT) && cell->is_border(border_position::BOTTOM)) {
data.neighborhood |= 128;
}
neighbors[j * _grid.imaxb() + i] = data;
}
}
DiagonalSparseMatrix<Real> A_matrix_diag =
create_diagonal_matrix(A_pcg, _grid.imaxb(), _grid.jmaxb(), {-_grid.imaxb(), -1, 0, 1, _grid.imaxb()});
DiagonalSparseMatrix<Real> A_precond_diag;
if (_preconditioner != -1) {
A_precond_diag = create_preconditioner_spai(A_pcg, _grid, _preconditioner);
}
std::vector<Real> dists_vec;
if (_turb_model == 3) {
dists_vec.resize(grid_size);
for (int i = 0; i < grid_size; i++) {
dists_vec[i] = _grid._cells._container[i].closest_dist;
}
}
num_offsets_a = (int)A_matrix_diag.offsets.size();
num_offsets_m = (int)A_precond_diag.offsets.size();
auto t_matrix_data = T_fixed.value.data();
auto t_matrix_size = T_fixed.value.size();
auto t_row_start_data = T_fixed.rowstart.data();
auto t_row_start_size = T_fixed.rowstart.size();
auto t_col_idx_data = T_fixed.colindex.data();
auto t_col_idx_size = T_fixed.colindex.size();
auto t_rhs_data = T_RHS.data();
auto t_rhs_size = T_RHS.size();
auto u_matrix_data = U_fixed.value.data();
auto u_matrix_size = U_fixed.value.size();
auto v_matrix_data = V_fixed.value.data();
auto v_matrix_size = V_fixed.value.size();
auto u_row_start_data = U_fixed.rowstart.data();
auto u_row_start_size = U_fixed.rowstart.size();
auto u_col_idx_data = U_fixed.colindex.data();
auto u_col_idx_size = U_fixed.colindex.size();
auto v_row_start_data = V_fixed.rowstart.data();
auto v_row_start_size = V_fixed.rowstart.size();
auto v_col_idx_data = V_fixed.colindex.data();
auto v_col_idx_size = V_fixed.colindex.size();
auto u_rhs_data = U_RHS.data();
auto u_rhs_size = U_RHS.size();
auto v_rhs_data = V_RHS.data();
auto v_rhs_size = V_RHS.size();
cudaMalloc(&U, grid_size * sizeof(Real));
cudaMalloc(&V, grid_size * sizeof(Real));
cudaMalloc(&F, grid_size * sizeof(Real));
cudaMalloc(&G, grid_size * sizeof(Real));
cudaMalloc(&P, grid_size * sizeof(Real));
cudaMalloc(&P_temp, grid_size * sizeof(Real));
cudaMalloc(&RS, grid_size * sizeof(Real));
cudaMalloc(&U_residual, grid_size * sizeof(Real));
cudaMalloc(&V_residual, grid_size * sizeof(Real));
if (_turb_model != 0) {
cudaMalloc(&NU_residual, grid_size * sizeof(Real));
cudaMalloc(&K_residual, grid_size * sizeof(Real));
cudaMalloc(&EPS_residual, grid_size * sizeof(Real));
cudaMalloc(&NU_T, grid_size * sizeof(Real));
cudaMalloc(&NU_I, grid_size * sizeof(Real));
cudaMalloc(&NU_J, grid_size * sizeof(Real));
cudaMalloc(&K, grid_size * sizeof(Real));
cudaMalloc(&K_old, grid_size * sizeof(Real));
cudaMalloc(&EPS, grid_size * sizeof(Real));
cudaMalloc(&EPS_old, grid_size * sizeof(Real));
cudaMemset(NU_residual, 0, grid_size * sizeof(Real));
cudaMemset(K_residual, 0, grid_size * sizeof(Real));
cudaMemset(EPS_residual, 0, grid_size * sizeof(Real));
cudaMemset(NU_T, 0, grid_size * sizeof(Real));
cudaMemset(NU_I, 0, grid_size * sizeof(Real));
cudaMemset(NU_J, 0, grid_size * sizeof(Real));
cudaMemcpy(K, _field._K._container.data(), grid_size * sizeof(Real), cudaMemcpyHostToDevice);
cudaMemcpy(EPS, _field._EPS._container.data(), grid_size * sizeof(Real), cudaMemcpyHostToDevice);
if (_turb_model == 3) {
cudaMalloc(&dists, grid_size * sizeof(Real));
cudaMalloc(&S, grid_size * sizeof(Real));
cudaMemcpy(dists, dists_vec.data(), grid_size * sizeof(Real), cudaMemcpyHostToDevice);
}
}
cudaMalloc(&P_residual, grid_size * sizeof(Real));
cudaMalloc(&cell_type, grid_size * sizeof(int));
cudaMalloc(&row_start_u, u_row_start_size * sizeof(int));
cudaMalloc(&row_start_v, v_row_start_size * sizeof(int));
cudaMalloc(&row_start_t, t_row_start_size * sizeof(int));
cudaMalloc(&col_idx_u, u_col_idx_size * sizeof(int));
cudaMalloc(&col_idx_v, v_col_idx_size * sizeof(int));
cudaMalloc(&mat_u, u_matrix_size * sizeof(Real));
cudaMalloc(&mat_v, v_matrix_size * sizeof(Real));
cudaMalloc(&rhs_vec_u, u_rhs_size * sizeof(Real));
cudaMalloc(&rhs_vec_v, v_rhs_size * sizeof(Real));
cudaMalloc(&neighborhood, neighbors.size() * sizeof(uint32_t));
cudaMalloc(&A, A_matrix_diag.data.size() * sizeof(Real));
cudaMalloc(&A_offsets, A_matrix_diag.offsets.size() * sizeof(uint32_t));
if (_preconditioner != -1) {
cudaMalloc(&M, A_precond_diag.data.size() * sizeof(Real));
cudaMalloc(&M_offsets, A_precond_diag.offsets.size() * sizeof(uint32_t));
}
cudaMalloc(&q, grid_size * sizeof(Real));
cudaMalloc(&d, grid_size * sizeof(Real));
cudaMalloc(&r, grid_size * sizeof(Real));
cudaMalloc(&z, grid_size * sizeof(Real));
cudaMalloc(&r_dot_r, sizeof(Real));
cudaMalloc(&r_dot_r_old, sizeof(Real));
cudaMalloc(&d_dot_q, sizeof(Real));
cudaMalloc(&p_residual_out, sizeof(Real));
cudaMalloc(&cg_alpha, sizeof(Real));
cudaMalloc(&cg_beta, sizeof(Real));
chk(cudaMemcpy(U, _field._U._container.data(), grid_size * sizeof(Real), cudaMemcpyHostToDevice));
chk(cudaMemcpy(V, _field._V._container.data(), grid_size * sizeof(Real), cudaMemcpyHostToDevice));
chk(cudaMemcpy(P, _field._P._container.data(), grid_size * sizeof(Real), cudaMemcpyHostToDevice));
chk(cudaMemcpy(mat_u, u_matrix_data, u_matrix_size * sizeof(Real), cudaMemcpyHostToDevice));
chk(cudaMemcpy(row_start_u, u_row_start_data, u_row_start_size * sizeof(int), cudaMemcpyHostToDevice));
chk(cudaMemcpy(col_idx_u, u_col_idx_data, u_col_idx_size * sizeof(int), cudaMemcpyHostToDevice));
chk(cudaMemcpy(rhs_vec_u, u_rhs_data, u_rhs_size * sizeof(Real), cudaMemcpyHostToDevice));
chk(cudaMemcpy(mat_v, v_matrix_data, v_matrix_size * sizeof(Real), cudaMemcpyHostToDevice));
chk(cudaMemcpy(row_start_v, v_row_start_data, v_row_start_size * sizeof(int), cudaMemcpyHostToDevice));
chk(cudaMemcpy(col_idx_v, v_col_idx_data, v_col_idx_size * sizeof(int), cudaMemcpyHostToDevice));
chk(cudaMemcpy(rhs_vec_v, v_rhs_data, v_rhs_size * sizeof(Real), cudaMemcpyHostToDevice));
chk(cudaMemcpy(neighborhood, neighbors.data(), neighbors.size() * sizeof(uint32_t), cudaMemcpyHostToDevice));
chk(cudaMemcpy(A, A_matrix_diag.data.data(), A_matrix_diag.data.size() * sizeof(Real), cudaMemcpyHostToDevice));
chk(cudaMemcpy(A_offsets, A_matrix_diag.offsets.data(), A_matrix_diag.offsets.size() * sizeof(int),
cudaMemcpyHostToDevice));
if (_field.calc_temp) {
cudaMalloc(&T, grid_size * sizeof(Real));
cudaMalloc(&T_temp, grid_size * sizeof(Real));
cudaMalloc(&mat_t, t_matrix_size * sizeof(Real));
cudaMalloc(&rhs_vec_t, t_rhs_size * sizeof(Real));
cudaMalloc(&col_idx_t, t_col_idx_size * sizeof(int));
cudaMemcpy(mat_t, t_matrix_data, t_matrix_size * sizeof(Real), cudaMemcpyHostToDevice);
cudaMemcpy(row_start_t, t_row_start_data, t_row_start_size * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(col_idx_t, t_col_idx_data, t_col_idx_size * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(rhs_vec_t, t_rhs_data, t_rhs_size * sizeof(Real), cudaMemcpyHostToDevice);
chk(cudaMemcpy(T, _field._T._container.data(), grid_size * sizeof(Real), cudaMemcpyHostToDevice));
}
if (_preconditioner != -1) {
chk(cudaMemcpy(M, A_precond_diag.data.data(), A_precond_diag.data.size() * sizeof(Real),
cudaMemcpyHostToDevice));
chk(cudaMemcpy(M_offsets, A_precond_diag.offsets.data(), A_precond_diag.offsets.size() * sizeof(int),
cudaMemcpyHostToDevice));
}
chk(cudaMemcpy(cell_type, is_fluid.data(), is_fluid.size() * sizeof(int), cudaMemcpyHostToDevice));
}
void CudaSolver::solve_pre_pressure(Real &dt) {
auto grid_x = _grid.imaxb();
auto grid_y = _grid.jmaxb();
auto grid_size = grid_x * grid_y;
dim3 num_blks_1d(get_num_blks(grid_size));
dim3 blk_size_2d(BLK_SIZE_2D, BLK_SIZE_2D);
dim3 num_blks_2d = get_num_blks_2d(grid_x, grid_y);
dt = calculate_dt(_grid.imaxb(), _grid.jmaxb(), U, V, U_residual, V_residual, NU_residual, K_residual, EPS_residual,
NU_T, K, EPS, _grid.dx(), _grid.dy(), _field._tau, _field._nu, _field._alpha, _field.calc_temp,
_turb_model);
_field._dt = dt;
uv_boundary(U, V, row_start_u, row_start_v, col_idx_u, col_idx_v, mat_u, mat_v, rhs_vec_u, rhs_vec_v, grid_size);
if (_field.calc_temp) {
t_boundary(T, row_start_t, col_idx_t, mat_t, rhs_vec_t, grid_size);
chk(cudaMemcpy(T_temp, T, grid_size * sizeof(Real), cudaMemcpyDeviceToDevice));
calc_t<<<num_blks_2d, blk_size_2d>>>(U, V, _grid.dx(), _grid.dy(), T, T_temp, cell_type, _field._alpha, dt,
_discretization._gamma, _grid.imaxb(), _grid.jmaxb());
}
std::vector<Real> fcpu(grid_size);
std::vector<Real> gcpu(grid_size);
if (_turb_model != 0) {
calc_fg_turbulent<<<num_blks_2d, blk_size_2d>>>(F, G, U, V, NU_T, _field.calc_temp, _grid.dx(), _grid.dy(), T,
cell_type, dt, _discretization._gamma, _field._nu, _field._beta,
_field._gx, _field._gy, grid_x, grid_y);
} else {
calc_fg<<<num_blks_2d, blk_size_2d>>>(F, G, U, V, _field.calc_temp, _grid.dx(), _grid.dy(), T, cell_type, dt,
_discretization._gamma, _field._nu, _field._beta, _field._gx, _field._gy,
grid_x, grid_y);
}
fg_boundary<<<num_blks_2d, blk_size_2d>>>(F, G, U, V, grid_x, grid_y, neighborhood, cell_type);
calc_rs<<<num_blks_2d, blk_size_2d>>>(F, G, RS, _grid.dx(), _grid.dy(), grid_x, grid_y, dt, cell_type);
}
void CudaSolver::solve_pressure(Real &res, uint32_t &it) {
if (solver_type == SolverType::PCG) {
auto grid_x = _grid.imaxb();
auto grid_y = _grid.jmaxb();
auto grid_size = grid_x * grid_y;
int num_blks(get_num_blks(grid_size));
constexpr int PCG_MODE = 0;
if (PCG_MODE == 0) {
solve_pcg(A, A_offsets, num_offsets_a, P, RS, q, d, r, r_dot_r_old, r_dot_r, z, cg_beta, res, cg_alpha,
d_dot_q, _preconditioner, M, M_offsets, num_offsets_m, it, _max_iter, _tolerance,
_grid.imaxb() * _grid.jmaxb());
} else {
solve_pcg2(A, A_offsets, num_offsets_a, P, RS, q, d, r, r_dot_r_old, r_dot_r, z, cg_beta, U_residual, res,
cg_alpha, d_dot_q, _preconditioner, M, M_offsets, num_offsets_m, it, _max_iter, _tolerance,
_grid.imaxb() * _grid.jmaxb());
}
} else if (solver_type == SolverType::SOR) {
solve_sor(P, P_temp, P_residual, p_residual_out, neighborhood, _grid.imaxb(), _grid.jmaxb(), RS, cell_type, it,
_max_iter, _grid.dx(), _grid.dy(), _field._PI, _tolerance, res, (int)_grid.fluid_cells().size());
}
}
void CudaSolver::solve_post_pressure() {
auto grid_x = _grid.imaxb();
auto grid_y = _grid.jmaxb();
auto grid_size = grid_x * grid_y;
int num_blks(get_num_blks(grid_size));
dim3 blk_size_2d(BLK_SIZE_2D, BLK_SIZE_2D);
dim3 num_blks_2d = get_num_blks_2d(grid_x, grid_y);
calc_vel<<<num_blks_2d, blk_size_2d>>>(U, V, P, F, G, cell_type, _field._dt, grid_x, grid_y, _grid.dx(),
_grid.dy());
if (_turb_model != 0) {
chk(cudaMemcpy(K_old, K, grid_size * sizeof(Real), cudaMemcpyDeviceToDevice));
chk(cudaMemcpy(EPS_old, EPS, grid_size * sizeof(Real), cudaMemcpyDeviceToDevice));
calculate_nu_t<<<num_blks_2d, blk_size_2d>>>(NU_T, K, EPS, dists, S, cell_type, _field._nu, grid_x, grid_y,
_turb_model);
calculate_nu_ij<<<num_blks_2d, blk_size_2d>>>(NU_I, NU_J, K, EPS, dists, S, cell_type, _field._nu, grid_x,
grid_y, _turb_model);
calculate_k_and_epsilon<<<num_blks_2d, blk_size_2d>>>(K_old, EPS_old, K, EPS, NU_T, NU_I, NU_J, U, V, cell_type,
_field._nu, grid_x, grid_y, _field._dt, 1 / _grid.dx(),
1 / _grid.dy(), _turb_model, S, dists);
// TODO : Implement KIN and EPSIN
nu_t_boundary<<<num_blks_2d, blk_size_2d>>>(NU_T, K, EPS, grid_x, grid_y, neighborhood, cell_type, _KIN, _EPSIN,
_field._nu, _turb_model);
}
if (_should_out) {
chk(cudaMemcpy(_field._U._container.data(), U, grid_size * sizeof(Real), cudaMemcpyDeviceToHost));
chk(cudaMemcpy(_field._V._container.data(), V, grid_size * sizeof(Real), cudaMemcpyDeviceToHost));
chk(cudaMemcpy(_field._P._container.data(), P, grid_size * sizeof(Real), cudaMemcpyDeviceToHost));
if (_field.calc_temp) {
chk(cudaMemcpy(_field._T._container.data(), T, grid_size * sizeof(Real), cudaMemcpyDeviceToHost));
}
if (_turb_model != 0) {
chk(cudaMemcpy(_field._NU_T._container.data(), NU_T, grid_size * sizeof(Real), cudaMemcpyDeviceToHost));
chk(cudaMemcpy(_field._K._container.data(), K, grid_size * sizeof(Real), cudaMemcpyDeviceToHost));
chk(cudaMemcpy(_field._EPS._container.data(), EPS, grid_size * sizeof(Real), cudaMemcpyDeviceToHost));
}
}
}
CudaSolver::~CudaSolver() {
cudaFree(U);
cudaFree(V);
cudaFree(F);
cudaFree(G);
cudaFree(P);
cudaFree(T);
cudaFree(T_temp);
cudaFree(RS);
cudaFree(U_residual);
cudaFree(V_residual);
if (_turb_model != 0) {
cudaFree(NU_residual);
cudaFree(K_residual);
cudaFree(EPS_residual);
cudaFree(NU_T);
cudaFree(NU_I);
cudaFree(NU_J);
cudaFree(K);
cudaFree(K_old);
cudaFree(EPS_old);
cudaFree(EPS);
if (_turb_model == 3) {
cudaFree(dists);
cudaFree(S);
}
}
cudaFree(P_residual);
cudaFree(cell_type);
cudaFree(P);
cudaFree(P_temp);
cudaFree(row_start_u);
cudaFree(row_start_v);
cudaFree(row_start_t);
cudaFree(col_idx_u);
cudaFree(col_idx_v);
cudaFree(col_idx_t);
cudaFree(mat_u);
cudaFree(mat_v);
cudaFree(mat_t);
cudaFree(rhs_vec_u);
cudaFree(rhs_vec_v);
cudaFree(rhs_vec_t);
cudaFree(neighborhood);
cudaFree(A);
cudaFree(A_offsets);
if (_preconditioner != -1) {
cudaFree(M);
cudaFree(M_offsets);
}
cudaFree(q);
cudaFree(d);
cudaFree(r);
cudaFree(z);
cudaFree(r_dot_r);
cudaFree(r_dot_r_old);
cudaFree(d_dot_q);
cudaFree(p_residual_out);
cudaFree(cg_alpha);
cudaFree(cg_beta);
}
|
8dbcd144b360d92a3ff8aad1ebb8b50b80b6f4da.hip | // !!! This is a file automatically generated by hipify!!!
#include <errno.h>
#include <stdio.h>
#include <time.h>
#include <assert.h>
#include <stdlib.h>
#include <math.h>
#define BLOCK_SIZE 16
#define STR_SIZE 256
#define block_x_ 128
#define block_y_ 2
#define block_z_ 1
#define MAX_PD (3.0e6)
/* required precision in degrees */
#define PRECISION 0.001
#define SPEC_HEAT_SI 1.75e6
#define K_SI 100
/* capacitance fitting factor */
#define FACTOR_CHIP 0.5
#include "../timing.h"
/*timing globals */
float time_pre = 0;
float time_post = 0;
float time_serial = 0;
float time_copy_in = 0;
float time_copy_out = 0;
float time_kernel = 0;
float time_malloc = 0;
float time_free = 0;
#include "opt1.cu"
/* chip parameters */
float t_chip = 0.0005;
float chip_height = 0.016;
float chip_width = 0.016; /* ambient temperature, assuming no package at all */
float amb_temp = 80.0;
void fatal(const char *s) {
fprintf(stderr, "Error: %s\n", s);
}
void readinput(float *vect, int grid_rows, int grid_cols, int layers, char *file) {
int i, j, k;
FILE *fp;
char str[STR_SIZE];
float val;
if ((fp = fopen(file, "r")) == 0)
fatal("The file was not opened");
for (i = 0; i <= grid_rows - 1; i++)
for (j = 0; j <= grid_cols - 1; j++)
for (k = 0; k <= layers - 1; k++) {
if (fgets(str, STR_SIZE, fp) == NULL)
fatal("Error reading file\n");
if (feof(fp))
fatal("not enough lines in file");
if ((sscanf(str, "%f", &val) != 1))
fatal("invalid file format");
vect[i * grid_cols + j + k * grid_rows * grid_cols] = val;
}
fclose(fp);
}
void writeoutput(float *vect, int grid_rows, int grid_cols, int layers, char *file) {
int i, j, k, index = 0;
FILE *fp;
char str[STR_SIZE];
if ((fp = fopen(file, "w")) == 0)
printf("The file was not opened\n");
for (i = 0; i < grid_rows; i++)
for (j = 0; j < grid_cols; j++)
for (k = 0; k < layers; k++) {
sprintf(str, "%d\t%g\n", index, vect[i * grid_cols + j + k * grid_rows * grid_cols]);
fputs(str, fp);
index++;
}
fclose(fp);
}
void computeTempCPU(float *pIn, float* tIn, float *tOut, int nx, int ny, int nz, float Cap,
float Rx, float Ry, float Rz, float dt, int numiter) {
float ce, cw, cn, cs, ct, cb, cc;
float stepDivCap = dt / Cap;
ce = cw = stepDivCap / Rx;
cn = cs = stepDivCap / Ry;
ct = cb = stepDivCap / Rz;
cc = 1.0 - (2.0 * ce + 2.0 * cn + 3.0 * ct);
int c, w, e, n, s, b, t;
int x, y, z;
int i = 0;
do {
for (z = 0; z < nz; z++)
for (y = 0; y < ny; y++)
for (x = 0; x < nx; x++) {
c = x + y * nx + z * nx * ny;
w = (x == 0) ? c : c - 1;
e = (x == nx - 1) ? c : c + 1;
n = (y == 0) ? c : c - nx;
s = (y == ny - 1) ? c : c + nx;
b = (z == 0) ? c : c - nx * ny;
t = (z == nz - 1) ? c : c + nx * ny;
tOut[c] = tIn[c] * cc + tIn[n] * cn + tIn[s] * cs + tIn[e] * ce + tIn[w] * cw
+ tIn[t] * ct + tIn[b] * cb + (dt / Cap) * pIn[c] + ct * amb_temp;
}
float *temp = tIn;
tIn = tOut;
tOut = temp;
i++;
} while (i < numiter);
}
float accuracy(float *arr1, float *arr2, int len) {
float err = 0.0;
int i;
for (i = 0; i < len; i++) {
err += (arr1[i] - arr2[i]) * (arr1[i] - arr2[i]);
}
return (float) sqrt(err / len);
}
void usage(int argc, char **argv) {
fprintf(stderr,
"Usage: %s <rows/cols> <layers> <iterations> <powerFile> <tempFile> <outputFile>\n", argv[0]);
fprintf(stderr, "\t<rows/cols> - number of rows/cols in the grid (positive integer)\n");
fprintf(stderr, "\t<layers> - number of layers in the grid (positive integer)\n");
fprintf(stderr, "\t<iteration> - number of iterations\n");
fprintf(stderr,
"\t<powerFile> - name of the file containing the initial power values of each cell\n");
fprintf(stderr,
"\t<tempFile> - name of the file containing the initial temperature values of each cell\n");
fprintf(stderr, "\t<outputFile - output file\n");
fprintf(stderr, "\t<optional unified flag> - unified memory\n");
exit(1);
}
int main(int argc, char** argv) {
if (argc < 7) {
usage(argc, argv);
}
char *pfile, *tfile, *ofile;
int iterations = atoi(argv[3]);
pfile = argv[4];
tfile = argv[5];
ofile = argv[6];
bool unified = argc == 8;
int numCols = atoi(argv[1]);
int numRows = atoi(argv[1]);
int layers = atoi(argv[2]);
/* calculating parameters*/
TIMESTAMP(t0);
float dx = chip_height / numRows;
float dy = chip_width / numCols;
float dz = t_chip / layers;
float Cap = FACTOR_CHIP * SPEC_HEAT_SI * t_chip * dx * dy;
float Rx = dy / (2.0 * K_SI * t_chip * dx);
float Ry = dx / (2.0 * K_SI * t_chip * dy);
float Rz = dz / (K_SI * dx * dy);
float max_slope = MAX_PD / (FACTOR_CHIP * t_chip * SPEC_HEAT_SI);
float dt = PRECISION / max_slope;
float *powerIn, *tempOut, *tempIn, *tempCopy;
int size = numCols * numRows * layers;
TIMESTAMP(t1);
time_pre += ELAPSED(t0, t1);
if (unified) {
hipMallocManaged(&powerIn, size * sizeof(float));
hipMallocManaged(&tempIn, size * sizeof(float));
hipMallocManaged(&tempOut, size * sizeof(float));
} else {
powerIn = (float*) calloc(size, sizeof(float));
tempIn = (float*) calloc(size, sizeof(float));
tempOut = (float*) calloc(size, sizeof(float));
}
tempCopy = (float*) malloc(size * sizeof(float));
float* answer = (float*) calloc(size, sizeof(float));
TIMESTAMP(t2);
time_malloc += ELAPSED(t1, t2);
TIMESTAMP(t3);
readinput(powerIn, numRows, numCols, layers, pfile);
readinput(tempIn, numRows, numCols, layers, tfile);
memcpy(tempCopy, tempIn, size * sizeof(float));
TIMESTAMP(t4);
time_pre += ELAPSED(t3, t4);
hotspot_opt1(powerIn, tempIn, tempOut, numCols, numRows, layers, Cap, Rx, Ry, Rz, dt, iterations,
unified);
TIMESTAMP(t5);
computeTempCPU(powerIn, tempCopy, answer, numCols, numRows, layers, Cap, Rx, Ry, Rz, dt,
iterations);
TIMESTAMP(t6);
time_serial += ELAPSED(t5, t6);
float acc = accuracy(tempOut, answer, numRows * numCols * layers);
printf("Accuracy: %e\n", acc);
writeoutput(tempOut, numRows, numCols, layers, ofile);
TIMESTAMP(t7);
time_post += ELAPSED(t6, t7);
if (unified) {
hipFree(tempIn);
hipFree(tempOut);
hipFree(powerIn);
} else {
free(tempIn);
free(tempOut);
free(powerIn);
}
TIMESTAMP(t8);
time_free += ELAPSED(t7, t8);
printf("====Timing info====\n");
printf("time malloc = %f ms\n", time_malloc);
printf("time pre = %f ms\n", time_pre);
printf("time copyIn = %f ms\n", time_copy_in);
printf("time kernel = %f ms\n", time_kernel);
printf("time serial = %f ms\n", time_serial);
printf("time copyOut = %f ms\n", time_copy_out);
printf("time post = %f ms\n", time_post);
printf("time free = %f ms\n", time_free);
printf("time end-to-end = %f ms\n", ELAPSED(t0, t8));
exit(EXIT_SUCCESS);
}
| 8dbcd144b360d92a3ff8aad1ebb8b50b80b6f4da.cu | #include <errno.h>
#include <stdio.h>
#include <time.h>
#include <assert.h>
#include <stdlib.h>
#include <math.h>
#define BLOCK_SIZE 16
#define STR_SIZE 256
#define block_x_ 128
#define block_y_ 2
#define block_z_ 1
#define MAX_PD (3.0e6)
/* required precision in degrees */
#define PRECISION 0.001
#define SPEC_HEAT_SI 1.75e6
#define K_SI 100
/* capacitance fitting factor */
#define FACTOR_CHIP 0.5
#include "../timing.h"
/*timing globals */
float time_pre = 0;
float time_post = 0;
float time_serial = 0;
float time_copy_in = 0;
float time_copy_out = 0;
float time_kernel = 0;
float time_malloc = 0;
float time_free = 0;
#include "opt1.cu"
/* chip parameters */
float t_chip = 0.0005;
float chip_height = 0.016;
float chip_width = 0.016; /* ambient temperature, assuming no package at all */
float amb_temp = 80.0;
void fatal(const char *s) {
fprintf(stderr, "Error: %s\n", s);
}
void readinput(float *vect, int grid_rows, int grid_cols, int layers, char *file) {
int i, j, k;
FILE *fp;
char str[STR_SIZE];
float val;
if ((fp = fopen(file, "r")) == 0)
fatal("The file was not opened");
for (i = 0; i <= grid_rows - 1; i++)
for (j = 0; j <= grid_cols - 1; j++)
for (k = 0; k <= layers - 1; k++) {
if (fgets(str, STR_SIZE, fp) == NULL)
fatal("Error reading file\n");
if (feof(fp))
fatal("not enough lines in file");
if ((sscanf(str, "%f", &val) != 1))
fatal("invalid file format");
vect[i * grid_cols + j + k * grid_rows * grid_cols] = val;
}
fclose(fp);
}
void writeoutput(float *vect, int grid_rows, int grid_cols, int layers, char *file) {
int i, j, k, index = 0;
FILE *fp;
char str[STR_SIZE];
if ((fp = fopen(file, "w")) == 0)
printf("The file was not opened\n");
for (i = 0; i < grid_rows; i++)
for (j = 0; j < grid_cols; j++)
for (k = 0; k < layers; k++) {
sprintf(str, "%d\t%g\n", index, vect[i * grid_cols + j + k * grid_rows * grid_cols]);
fputs(str, fp);
index++;
}
fclose(fp);
}
void computeTempCPU(float *pIn, float* tIn, float *tOut, int nx, int ny, int nz, float Cap,
float Rx, float Ry, float Rz, float dt, int numiter) {
float ce, cw, cn, cs, ct, cb, cc;
float stepDivCap = dt / Cap;
ce = cw = stepDivCap / Rx;
cn = cs = stepDivCap / Ry;
ct = cb = stepDivCap / Rz;
cc = 1.0 - (2.0 * ce + 2.0 * cn + 3.0 * ct);
int c, w, e, n, s, b, t;
int x, y, z;
int i = 0;
do {
for (z = 0; z < nz; z++)
for (y = 0; y < ny; y++)
for (x = 0; x < nx; x++) {
c = x + y * nx + z * nx * ny;
w = (x == 0) ? c : c - 1;
e = (x == nx - 1) ? c : c + 1;
n = (y == 0) ? c : c - nx;
s = (y == ny - 1) ? c : c + nx;
b = (z == 0) ? c : c - nx * ny;
t = (z == nz - 1) ? c : c + nx * ny;
tOut[c] = tIn[c] * cc + tIn[n] * cn + tIn[s] * cs + tIn[e] * ce + tIn[w] * cw
+ tIn[t] * ct + tIn[b] * cb + (dt / Cap) * pIn[c] + ct * amb_temp;
}
float *temp = tIn;
tIn = tOut;
tOut = temp;
i++;
} while (i < numiter);
}
float accuracy(float *arr1, float *arr2, int len) {
float err = 0.0;
int i;
for (i = 0; i < len; i++) {
err += (arr1[i] - arr2[i]) * (arr1[i] - arr2[i]);
}
return (float) sqrt(err / len);
}
void usage(int argc, char **argv) {
fprintf(stderr,
"Usage: %s <rows/cols> <layers> <iterations> <powerFile> <tempFile> <outputFile>\n", argv[0]);
fprintf(stderr, "\t<rows/cols> - number of rows/cols in the grid (positive integer)\n");
fprintf(stderr, "\t<layers> - number of layers in the grid (positive integer)\n");
fprintf(stderr, "\t<iteration> - number of iterations\n");
fprintf(stderr,
"\t<powerFile> - name of the file containing the initial power values of each cell\n");
fprintf(stderr,
"\t<tempFile> - name of the file containing the initial temperature values of each cell\n");
fprintf(stderr, "\t<outputFile - output file\n");
fprintf(stderr, "\t<optional unified flag> - unified memory\n");
exit(1);
}
int main(int argc, char** argv) {
if (argc < 7) {
usage(argc, argv);
}
char *pfile, *tfile, *ofile;
int iterations = atoi(argv[3]);
pfile = argv[4];
tfile = argv[5];
ofile = argv[6];
bool unified = argc == 8;
int numCols = atoi(argv[1]);
int numRows = atoi(argv[1]);
int layers = atoi(argv[2]);
/* calculating parameters*/
TIMESTAMP(t0);
float dx = chip_height / numRows;
float dy = chip_width / numCols;
float dz = t_chip / layers;
float Cap = FACTOR_CHIP * SPEC_HEAT_SI * t_chip * dx * dy;
float Rx = dy / (2.0 * K_SI * t_chip * dx);
float Ry = dx / (2.0 * K_SI * t_chip * dy);
float Rz = dz / (K_SI * dx * dy);
float max_slope = MAX_PD / (FACTOR_CHIP * t_chip * SPEC_HEAT_SI);
float dt = PRECISION / max_slope;
float *powerIn, *tempOut, *tempIn, *tempCopy;
int size = numCols * numRows * layers;
TIMESTAMP(t1);
time_pre += ELAPSED(t0, t1);
if (unified) {
cudaMallocManaged(&powerIn, size * sizeof(float));
cudaMallocManaged(&tempIn, size * sizeof(float));
cudaMallocManaged(&tempOut, size * sizeof(float));
} else {
powerIn = (float*) calloc(size, sizeof(float));
tempIn = (float*) calloc(size, sizeof(float));
tempOut = (float*) calloc(size, sizeof(float));
}
tempCopy = (float*) malloc(size * sizeof(float));
float* answer = (float*) calloc(size, sizeof(float));
TIMESTAMP(t2);
time_malloc += ELAPSED(t1, t2);
TIMESTAMP(t3);
readinput(powerIn, numRows, numCols, layers, pfile);
readinput(tempIn, numRows, numCols, layers, tfile);
memcpy(tempCopy, tempIn, size * sizeof(float));
TIMESTAMP(t4);
time_pre += ELAPSED(t3, t4);
hotspot_opt1(powerIn, tempIn, tempOut, numCols, numRows, layers, Cap, Rx, Ry, Rz, dt, iterations,
unified);
TIMESTAMP(t5);
computeTempCPU(powerIn, tempCopy, answer, numCols, numRows, layers, Cap, Rx, Ry, Rz, dt,
iterations);
TIMESTAMP(t6);
time_serial += ELAPSED(t5, t6);
float acc = accuracy(tempOut, answer, numRows * numCols * layers);
printf("Accuracy: %e\n", acc);
writeoutput(tempOut, numRows, numCols, layers, ofile);
TIMESTAMP(t7);
time_post += ELAPSED(t6, t7);
if (unified) {
cudaFree(tempIn);
cudaFree(tempOut);
cudaFree(powerIn);
} else {
free(tempIn);
free(tempOut);
free(powerIn);
}
TIMESTAMP(t8);
time_free += ELAPSED(t7, t8);
printf("====Timing info====\n");
printf("time malloc = %f ms\n", time_malloc);
printf("time pre = %f ms\n", time_pre);
printf("time copyIn = %f ms\n", time_copy_in);
printf("time kernel = %f ms\n", time_kernel);
printf("time serial = %f ms\n", time_serial);
printf("time copyOut = %f ms\n", time_copy_out);
printf("time post = %f ms\n", time_post);
printf("time free = %f ms\n", time_free);
printf("time end-to-end = %f ms\n", ELAPSED(t0, t8));
exit(EXIT_SUCCESS);
}
|
571b3b0cbd721b4479cf05f812422db26199fe5e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.2.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date November 2016
@generated from magmablas/zgemm_reduce.cu, normal z -> s, Sun Nov 20 20:20:28 2016
*/
#include "magma_internal.h"
#include "magma_templates.h"
// size of work for a thread block
#define BLK_M 16
#define BLK_N 16
// BLK_K gets defined in magmablas_sgemm_reduce,
// because it depends on the CUDA architecture at runtime.
/******************************************************************************/
// BLK_K size is templated, as it depends on CUDA architecture at runtime.
// Hmm... how to compile for both CUDA arch 1.x and 2.x?
template< int BLK_K >
__global__
void sgemm_reduce_kernel(
int m, int n, int k,
float alpha,
const float* __restrict__ dA, int lda,
const float* __restrict__ dB, int ldb,
float beta,
float * __restrict__ dC, int ldc)
{
#if (__CUDA_ARCH__ >= 200)
const int tx = threadIdx.x;
if (blockIdx.x*BLK_M + threadIdx.y < m && blockIdx.y*BLK_N + threadIdx.z < n) {
dA += (blockIdx.x*BLK_M + threadIdx.y) * lda;
dB += (blockIdx.y*BLK_N + threadIdx.z) * ldb;
dC += blockIdx.x*BLK_M + blockIdx.y*BLK_N * ldc;
// was: sum[BLK_M][BLK_N+1][BLK_K+1];
// moved 3rd dimension to 1st dimension to make magma_sum_reduce_3d interface nicer.
__shared__ float sum[BLK_K][BLK_M+1][BLK_N+1];
float lsum;
/* w := v**H * C */
lsum = MAGMA_S_ZERO;
for( int j = tx; j < k; j += BLK_K )
lsum += MAGMA_S_CONJ( dA[j] )* dB[j];
sum[tx][threadIdx.y][threadIdx.z] = lsum;
magma_sum_reduce_3d< BLK_K, BLK_M+1, BLK_N+1 >( tx, threadIdx.y, threadIdx.z, sum );
/* C := C - v * w */
__syncthreads();
if (threadIdx.x == 0) {
if (MAGMA_S_EQUAL(beta, MAGMA_S_ZERO))
dC[threadIdx.y + threadIdx.z*ldc] = alpha*sum[0][threadIdx.y][threadIdx.z];
else
dC[threadIdx.y + threadIdx.z*ldc] = beta* dC[threadIdx.y + threadIdx.z*ldc] +
alpha*sum[0][threadIdx.y][threadIdx.z];
}
}
#endif
}
/***************************************************************************//**
Purpose
-------
SGEMM_REDUCE performs one of the matrix-matrix operations
C := alpha*A^T*B + beta*C,
where alpha and beta are scalars, and A, B and C are matrices, with A
a k-by-m matrix, B a k-by-n matrix, and C an m-by-n matrix.
This routine is tuned for m, n << k. Typically, m and n are expected
to be less than 128.
@ingroup magma_gemm
*******************************************************************************/
extern "C" void
magmablas_sgemm_reduce(
magma_int_t m, magma_int_t n, magma_int_t k,
float alpha,
magmaFloat_const_ptr dA, magma_int_t ldda,
magmaFloat_const_ptr dB, magma_int_t lddb,
float beta,
magmaFloat_ptr dC, magma_int_t lddc,
magma_queue_t queue )
{
magma_int_t info = 0;
if ( m < 0 )
info = -1;
else if ( n < 0 )
info = -2;
else if ( k < 0 )
info = -3;
else if ( ldda < m )
info = -6;
else if ( lddb < k )
info = -8;
else if ( lddc < m )
info = -11;
if (info != 0) {
magma_xerbla( __func__, -(info) );
return; //info;
}
magma_int_t arch = magma_getdevice_arch();
if ( arch < 200 ) {
// --------------------
// call CUDA ARCH 1.x -- maximum 512 threads
const int NUM_THREADS = 512;
const int BLK_K = (NUM_THREADS / (BLK_M * BLK_N)); // == 2
dim3 threads( BLK_K, BLK_M, BLK_N );
dim3 blocks( magma_ceildiv( m, BLK_M ), magma_ceildiv( n, BLK_N ), 1 );
hipLaunchKernelGGL(( sgemm_reduce_kernel<BLK_K>) , dim3(blocks), dim3(threads), 0, queue->cuda_stream() ,
m, n, k, alpha, dA, ldda, dB, lddb, beta, dC, lddc );
}
else {
// --------------------
// call CUDA ARCH 2.x -- maximum 1024 threads
const int NUM_THREADS = 1024;
const int BLK_K = (NUM_THREADS / (BLK_M * BLK_N)); // == 4
dim3 threads( BLK_K, BLK_M, BLK_N );
dim3 blocks( magma_ceildiv( m, BLK_M ), magma_ceildiv( n, BLK_N ), 1 );
hipLaunchKernelGGL(( sgemm_reduce_kernel<BLK_K>) , dim3(blocks), dim3(threads), 0, queue->cuda_stream() ,
m, n, k, alpha, dA, ldda, dB, lddb, beta, dC, lddc );
}
}
| 571b3b0cbd721b4479cf05f812422db26199fe5e.cu | /*
-- MAGMA (version 2.2.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date November 2016
@generated from magmablas/zgemm_reduce.cu, normal z -> s, Sun Nov 20 20:20:28 2016
*/
#include "magma_internal.h"
#include "magma_templates.h"
// size of work for a thread block
#define BLK_M 16
#define BLK_N 16
// BLK_K gets defined in magmablas_sgemm_reduce,
// because it depends on the CUDA architecture at runtime.
/******************************************************************************/
// BLK_K size is templated, as it depends on CUDA architecture at runtime.
// Hmm... how to compile for both CUDA arch 1.x and 2.x?
template< int BLK_K >
__global__
void sgemm_reduce_kernel(
int m, int n, int k,
float alpha,
const float* __restrict__ dA, int lda,
const float* __restrict__ dB, int ldb,
float beta,
float * __restrict__ dC, int ldc)
{
#if (__CUDA_ARCH__ >= 200)
const int tx = threadIdx.x;
if (blockIdx.x*BLK_M + threadIdx.y < m && blockIdx.y*BLK_N + threadIdx.z < n) {
dA += (blockIdx.x*BLK_M + threadIdx.y) * lda;
dB += (blockIdx.y*BLK_N + threadIdx.z) * ldb;
dC += blockIdx.x*BLK_M + blockIdx.y*BLK_N * ldc;
// was: sum[BLK_M][BLK_N+1][BLK_K+1];
// moved 3rd dimension to 1st dimension to make magma_sum_reduce_3d interface nicer.
__shared__ float sum[BLK_K][BLK_M+1][BLK_N+1];
float lsum;
/* w := v**H * C */
lsum = MAGMA_S_ZERO;
for( int j = tx; j < k; j += BLK_K )
lsum += MAGMA_S_CONJ( dA[j] )* dB[j];
sum[tx][threadIdx.y][threadIdx.z] = lsum;
magma_sum_reduce_3d< BLK_K, BLK_M+1, BLK_N+1 >( tx, threadIdx.y, threadIdx.z, sum );
/* C := C - v * w */
__syncthreads();
if (threadIdx.x == 0) {
if (MAGMA_S_EQUAL(beta, MAGMA_S_ZERO))
dC[threadIdx.y + threadIdx.z*ldc] = alpha*sum[0][threadIdx.y][threadIdx.z];
else
dC[threadIdx.y + threadIdx.z*ldc] = beta* dC[threadIdx.y + threadIdx.z*ldc] +
alpha*sum[0][threadIdx.y][threadIdx.z];
}
}
#endif
}
/***************************************************************************//**
Purpose
-------
SGEMM_REDUCE performs one of the matrix-matrix operations
C := alpha*A^T*B + beta*C,
where alpha and beta are scalars, and A, B and C are matrices, with A
a k-by-m matrix, B a k-by-n matrix, and C an m-by-n matrix.
This routine is tuned for m, n << k. Typically, m and n are expected
to be less than 128.
@ingroup magma_gemm
*******************************************************************************/
extern "C" void
magmablas_sgemm_reduce(
magma_int_t m, magma_int_t n, magma_int_t k,
float alpha,
magmaFloat_const_ptr dA, magma_int_t ldda,
magmaFloat_const_ptr dB, magma_int_t lddb,
float beta,
magmaFloat_ptr dC, magma_int_t lddc,
magma_queue_t queue )
{
magma_int_t info = 0;
if ( m < 0 )
info = -1;
else if ( n < 0 )
info = -2;
else if ( k < 0 )
info = -3;
else if ( ldda < m )
info = -6;
else if ( lddb < k )
info = -8;
else if ( lddc < m )
info = -11;
if (info != 0) {
magma_xerbla( __func__, -(info) );
return; //info;
}
magma_int_t arch = magma_getdevice_arch();
if ( arch < 200 ) {
// --------------------
// call CUDA ARCH 1.x -- maximum 512 threads
const int NUM_THREADS = 512;
const int BLK_K = (NUM_THREADS / (BLK_M * BLK_N)); // == 2
dim3 threads( BLK_K, BLK_M, BLK_N );
dim3 blocks( magma_ceildiv( m, BLK_M ), magma_ceildiv( n, BLK_N ), 1 );
sgemm_reduce_kernel<BLK_K> <<< blocks, threads, 0, queue->cuda_stream() >>>
( m, n, k, alpha, dA, ldda, dB, lddb, beta, dC, lddc );
}
else {
// --------------------
// call CUDA ARCH 2.x -- maximum 1024 threads
const int NUM_THREADS = 1024;
const int BLK_K = (NUM_THREADS / (BLK_M * BLK_N)); // == 4
dim3 threads( BLK_K, BLK_M, BLK_N );
dim3 blocks( magma_ceildiv( m, BLK_M ), magma_ceildiv( n, BLK_N ), 1 );
sgemm_reduce_kernel<BLK_K> <<< blocks, threads, 0, queue->cuda_stream() >>>
( m, n, k, alpha, dA, ldda, dB, lddb, beta, dC, lddc );
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.