hip_filename
stringlengths 5
84
| hip_content
stringlengths 79
9.69M
| cuda_filename
stringlengths 4
83
| cuda_content
stringlengths 19
9.69M
|
---|---|---|---|
7aaeff53ec56e86a6faff65e8119fd8581ec3278.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <iostream>
#include <stdio.h>
#include <vector>
#include <cmath>
#define BLOCK_SIZE 1024
__global__ void reduceSum(int *ada, int *gabrys){
__shared__ int partialSum[2 * BLOCK_SIZE];
unsigned int t = threadIdx.x;
unsigned int start = 2 * blockIdx.x * BLOCK_SIZE;
if(start + t < N){
partialSum[t] = ada[start + t];
}
else{
partialSum[t] = 0;
}
if (start + BLOCK_SIZE + t < N){
partialSum[BLOCK_SIZE + t] = ada[start + BLOCK_SIZE + t];
}
else{
partialSum[BLOCK_SIZE + t] = 0;
}
for(unsigned int stride = 1; stride <= BLOCK_SIZE ; stride *= 2){
__syncthreads();
if (t % stride == 0 ){
partialSum[2*t] += partialSum[2*t + stride];
}
}
if(t == 0){
gabrys[blockIdx.x] = partialSum[0];
}
__syncthreads();
}
int main(void){
int * ada, * gabrys;
hipMallocManaged(&ada, N * sizeof(int));
hipMallocManaged(&gabrys, N * sizeof(int));
for(int i = 0; i < N; i++){
ada[i] = 1;
}
dim3 threadsPerBlock(BLOCK_SIZE);
dim3 blocksPerGrid((N + BLOCK_SIZE - 1)/BLOCK_SIZE);
hipLaunchKernelGGL(( reduceSum), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, ada,gabrys);
hipDeviceSynchronize();
int count = log(N);
for(int i = 0; i<count; i++){
hipLaunchKernelGGL(( reduceSum), dim3(blocksPerGrid), dim3(BLOCK_SIZE), 0, 0, gabrys,gabrys);
hipDeviceSynchronize();
}
std::cout<<gabrys[0]<<std::endl;
}
| 7aaeff53ec56e86a6faff65e8119fd8581ec3278.cu | #include <cuda_runtime.h>
#include <iostream>
#include <stdio.h>
#include <vector>
#include <cmath>
#define BLOCK_SIZE 1024
__global__ void reduceSum(int *ada, int *gabrys){
__shared__ int partialSum[2 * BLOCK_SIZE];
unsigned int t = threadIdx.x;
unsigned int start = 2 * blockIdx.x * BLOCK_SIZE;
if(start + t < N){
partialSum[t] = ada[start + t];
}
else{
partialSum[t] = 0;
}
if (start + BLOCK_SIZE + t < N){
partialSum[BLOCK_SIZE + t] = ada[start + BLOCK_SIZE + t];
}
else{
partialSum[BLOCK_SIZE + t] = 0;
}
for(unsigned int stride = 1; stride <= BLOCK_SIZE ; stride *= 2){
__syncthreads();
if (t % stride == 0 ){
partialSum[2*t] += partialSum[2*t + stride];
}
}
if(t == 0){
gabrys[blockIdx.x] = partialSum[0];
}
__syncthreads();
}
int main(void){
int * ada, * gabrys;
cudaMallocManaged(&ada, N * sizeof(int));
cudaMallocManaged(&gabrys, N * sizeof(int));
for(int i = 0; i < N; i++){
ada[i] = 1;
}
dim3 threadsPerBlock(BLOCK_SIZE);
dim3 blocksPerGrid((N + BLOCK_SIZE - 1)/BLOCK_SIZE);
reduceSum<<<blocksPerGrid, threadsPerBlock>>>(ada,gabrys);
cudaDeviceSynchronize();
int count = log(N);
for(int i = 0; i<count; i++){
reduceSum<<<blocksPerGrid, BLOCK_SIZE>>>(gabrys,gabrys);
cudaDeviceSynchronize();
}
std::cout<<gabrys[0]<<std::endl;
}
|
c9308c6f26dd03e21cfeb4473238b846cd3b6e99.hip | // !!! This is a file automatically generated by hipify!!!
//nvcc -arch sm_21 -o test -run --ptxas-options="-v" -lcufft cudatest.cu
#include <hip/hip_runtime.h>
#include <stdlib.h>
#include <stdio.h>
#include <unistd.h>
#include </usr/include/hipfft.h>
__global__ void cuda_print() {
int tx = threadIdx.x;
int ty = threadIdx.y;
int tz = threadIdx.z;
int bx = blockIdx.x;
int by = blockIdx.y;
int bz = blockIdx.z;
int bdx = blockDim.x;
int bdy = blockDim.y;
int bdz = blockDim.z;
int gdx = gridDim.x;
int gdy = gridDim.y;
int gdz = gridDim.z;
printf("Thread id (%d, %d, %d) \n", tx, ty,tz);
//printf("blockdim (%d, %d, %d) \n", bdx, bdy, bdz);
printf("Block idx (%d, %d, %d) \n", bx, by, bz);
//printf("griddim (%d, %d, %d) \n", gdx, gdy, gdz);
printf("=======================\n");
}
int main(int argc, char** argv) {
// do some cuda testing
hipError_t res;
printf("entered \n");
/*cuda_print<<<dim3(2,2,2), dim3(1,1,1)>>>();
res = hipDeviceSynchronize();*/
/*cuda_print<<<dim3(1,1,1), dim3(2,2,2)>>>();
res = hipDeviceSynchronize();*/
/*cuda_print<<<dim3(2,2), dim3(2,2)>>>();
res = hipDeviceSynchronize();
cuda_print<<<dim3(2,2,1), dim3(1,2,3)>>>();
res = hipDeviceSynchronize();*/
int n = 4;
float* mat = (float*) malloc(n*n*n*sizeof(float));
int i,j,k;
for (i=0; i < n*n*n; i++) {
*(mat+i) = i;//%n;
}
for (i=0; i<n; i++) {
printf("======= x sheet %d =====\n", i);
for (j=0; j<n; j++) {
for (k=0; k<n; k++) {
printf("%f ", *(mat+n*n*i+n*j+k));
}
printf("\n\n");
}
}
float* gpu_in;
hipfftComplex* gpu_out;
hipMalloc(&gpu_in, n*n*n*sizeof(float));
hipMalloc(&gpu_out, n*n*(n/2+1)*sizeof(hipfftComplex));
hipfftComplex* fft_out = (hipfftComplex*) malloc(n*n*(n/2+1)*sizeof(hipfftComplex));
hipMemcpy(gpu_in, mat, n*n*n*sizeof(float), hipMemcpyHostToDevice);
hipfftHandle plan;
hipfftPlan3d(&plan, n,n,n, HIPFFT_R2C);
hipfftExecR2C(plan, gpu_in, gpu_out);
//hipfftDestroy(plan);
hipMemcpy(fft_out, gpu_out, n*n*(n/2+1)*sizeof(hipfftComplex), hipMemcpyDeviceToHost);
for (i=0; i<n; i++) {
printf("======= x sheet %d =====\n", i);
for (j=0; j<n; j++) {
for (k=0; k<n/2+1; k++) {
printf("%f ", ((hipfftComplex)*(fft_out+n*n*i+n*j+k)).x);
printf("+%fi ", ((hipfftComplex)*(fft_out+n*n*i+n*j+k)).y);
}
printf("\n\n");
}
}
hipfftPlan3d(&plan, n,n,n, HIPFFT_C2R);
hipfftExecC2R(plan, gpu_out, gpu_in);
hipMemcpy(mat, gpu_in, n*n*n*sizeof(float), hipMemcpyDeviceToHost);
for (i=0; i<n; i++) {
printf("======= x sheet %d =====\n", i);
for (j=0; j<n; j++) {
for (k=0; k<n; k++) {
printf("%f ", (*(mat+i*n*n+j*n+k))/(n*n*n));
}
printf("\n\n");
}
}
}
| c9308c6f26dd03e21cfeb4473238b846cd3b6e99.cu | //nvcc -arch sm_21 -o test -run --ptxas-options="-v" -lcufft cudatest.cu
#include <cuda.h>
#include <stdlib.h>
#include <stdio.h>
#include <unistd.h>
#include </usr/include/cufft.h>
__global__ void cuda_print() {
int tx = threadIdx.x;
int ty = threadIdx.y;
int tz = threadIdx.z;
int bx = blockIdx.x;
int by = blockIdx.y;
int bz = blockIdx.z;
int bdx = blockDim.x;
int bdy = blockDim.y;
int bdz = blockDim.z;
int gdx = gridDim.x;
int gdy = gridDim.y;
int gdz = gridDim.z;
printf("Thread id (%d, %d, %d) \n", tx, ty,tz);
//printf("blockdim (%d, %d, %d) \n", bdx, bdy, bdz);
printf("Block idx (%d, %d, %d) \n", bx, by, bz);
//printf("griddim (%d, %d, %d) \n", gdx, gdy, gdz);
printf("=======================\n");
}
int main(int argc, char** argv) {
// do some cuda testing
cudaError_t res;
printf("entered \n");
/*cuda_print<<<dim3(2,2,2), dim3(1,1,1)>>>();
res = cudaDeviceSynchronize();*/
/*cuda_print<<<dim3(1,1,1), dim3(2,2,2)>>>();
res = cudaDeviceSynchronize();*/
/*cuda_print<<<dim3(2,2), dim3(2,2)>>>();
res = cudaDeviceSynchronize();
cuda_print<<<dim3(2,2,1), dim3(1,2,3)>>>();
res = cudaDeviceSynchronize();*/
int n = 4;
float* mat = (float*) malloc(n*n*n*sizeof(float));
int i,j,k;
for (i=0; i < n*n*n; i++) {
*(mat+i) = i;//%n;
}
for (i=0; i<n; i++) {
printf("======= x sheet %d =====\n", i);
for (j=0; j<n; j++) {
for (k=0; k<n; k++) {
printf("%f ", *(mat+n*n*i+n*j+k));
}
printf("\n\n");
}
}
float* gpu_in;
cufftComplex* gpu_out;
cudaMalloc(&gpu_in, n*n*n*sizeof(float));
cudaMalloc(&gpu_out, n*n*(n/2+1)*sizeof(cufftComplex));
cufftComplex* fft_out = (cufftComplex*) malloc(n*n*(n/2+1)*sizeof(cufftComplex));
cudaMemcpy(gpu_in, mat, n*n*n*sizeof(float), cudaMemcpyHostToDevice);
cufftHandle plan;
cufftPlan3d(&plan, n,n,n, CUFFT_R2C);
cufftExecR2C(plan, gpu_in, gpu_out);
//cufftDestroy(plan);
cudaMemcpy(fft_out, gpu_out, n*n*(n/2+1)*sizeof(cufftComplex), cudaMemcpyDeviceToHost);
for (i=0; i<n; i++) {
printf("======= x sheet %d =====\n", i);
for (j=0; j<n; j++) {
for (k=0; k<n/2+1; k++) {
printf("%f ", ((cufftComplex)*(fft_out+n*n*i+n*j+k)).x);
printf("+%fi ", ((cufftComplex)*(fft_out+n*n*i+n*j+k)).y);
}
printf("\n\n");
}
}
cufftPlan3d(&plan, n,n,n, CUFFT_C2R);
cufftExecC2R(plan, gpu_out, gpu_in);
cudaMemcpy(mat, gpu_in, n*n*n*sizeof(float), cudaMemcpyDeviceToHost);
for (i=0; i<n; i++) {
printf("======= x sheet %d =====\n", i);
for (j=0; j<n; j++) {
for (k=0; k<n; k++) {
printf("%f ", (*(mat+i*n*n+j*n+k))/(n*n*n));
}
printf("\n\n");
}
}
}
|
338eca20ec43f9032d3b7ecc33d23fdd36a7eab2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <cassert>
#include <iostream>
#include <chrono>
#include <random>
using namespace std;
//=========================== prototypes des fonctions ========================================
__global__ void vectorAdd(int *a, int *b, int *c, int N) {
int i = blockIdx.x*blockDim.x + threadIdx.x;
int j = blockIdx.y*blockDim.y + threadIdx.y;
int k = j*gridDim.x * blockDim.x + i;
if (k < N) c[k] = a[k] + b[k];
}
auto get_time() { return chrono::high_resolution_clock::now(); }
//=========================== fuction main ===================================================
int main() {
const int N = 1000 << 16;
size_t bytes = N * sizeof(int);
int BLOCK_SIZE = 1 << 10;
int GRID_SIZE = (N + BLOCK_SIZE - 1) / BLOCK_SIZE;
int *a, *b, *c;
hipMallocManaged(&a, bytes);
hipMallocManaged(&b, bytes);
hipMallocManaged(&c, bytes);
// initialisation les vacteurs a ,b
for (int i = 0; i < N; i++) {
a[i] = rand() % 100;
b[i] = rand() % 100;
}
auto start = get_time();
hipLaunchKernelGGL(( vectorAdd), dim3(GRID_SIZE), dim3(BLOCK_SIZE), 0, 0, a, b, c, N);
hipDeviceSynchronize();
auto finish = get_time();
auto duration =
chrono::duration_cast<std::chrono::milliseconds>(finish - start);
cout << "temps coul en kernel = " << duration.count() << " ms\n";
// vrification
for (int i = 0; i < N; i++) {
assert(c[i] == a[i] + b[i]);
}
hipFree(a);
hipFree(b);
hipFree(c);
cout << "termin avec succs!"<<endl;
return 0;
}
| 338eca20ec43f9032d3b7ecc33d23fdd36a7eab2.cu | #include <stdio.h>
#include <cassert>
#include <iostream>
#include <chrono>
#include <random>
using namespace std;
//=========================== prototypes des fonctions ========================================
__global__ void vectorAdd(int *a, int *b, int *c, int N) {
int i = blockIdx.x*blockDim.x + threadIdx.x;
int j = blockIdx.y*blockDim.y + threadIdx.y;
int k = j*gridDim.x * blockDim.x + i;
if (k < N) c[k] = a[k] + b[k];
}
auto get_time() { return chrono::high_resolution_clock::now(); }
//=========================== fuction main ===================================================
int main() {
const int N = 1000 << 16;
size_t bytes = N * sizeof(int);
int BLOCK_SIZE = 1 << 10;
int GRID_SIZE = (N + BLOCK_SIZE - 1) / BLOCK_SIZE;
int *a, *b, *c;
cudaMallocManaged(&a, bytes);
cudaMallocManaged(&b, bytes);
cudaMallocManaged(&c, bytes);
// initialisation les vacteurs a ,b
for (int i = 0; i < N; i++) {
a[i] = rand() % 100;
b[i] = rand() % 100;
}
auto start = get_time();
vectorAdd<<<GRID_SIZE, BLOCK_SIZE>>>(a, b, c, N);
cudaDeviceSynchronize();
auto finish = get_time();
auto duration =
chrono::duration_cast<std::chrono::milliseconds>(finish - start);
cout << "temps écoulé en kernel = " << duration.count() << " ms\n";
// vérification
for (int i = 0; i < N; i++) {
assert(c[i] == a[i] + b[i]);
}
cudaFree(a);
cudaFree(b);
cudaFree(c);
cout << "terminé avec succès!"<<endl;
return 0;
}
|
d90ad0e68af15c9e8556b687730a45f2877615c4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define CUB_STDERR
#include <hipcub/hipcub.hpp>
#include <hipcub/hipcub.hpp>
#include <hipcub/hipcub.hpp>
#include "caffe2/core/common_gpu.h"
#include "caffe2/core/context_gpu.h"
#include "caffe2/operators/elementwise_op.h"
#include "caffe2/utils/conversions.h"
namespace caffe2 {
#define CUDA_FUNCTOR(name, op, input_type, output_type) \
template <int b_is_scalar, typename T, typename R> \
__global__ void name##Kernel(const T* a, const T* b, R* out, int n) { \
CUDA_1D_KERNEL_LOOP(i, n) { \
out[i] = op(a[i], b[b_is_scalar ? 0 : i]); \
} \
} \
template <typename T, typename R> \
__global__ void name##BroadcastKernel( \
const T* a, const T* b, R* out, int pre, int n) { \
CUDA_1D_KERNEL_LOOP(i, pre * n) { \
out[i] = op(a[i], b[i % n]); \
} \
} \
template <typename T, typename R> \
__global__ void name##Broadcast2Kernel( \
const T* a, const T* b, R* out, int pre, int n, int post) { \
CUDA_1D_KERNEL_LOOP(i, pre * n * post) { \
out[i] = op(a[i], b[(i / post) % n]); \
} \
} \
\
struct Cuda##name##Functor { \
template <bool b_is_scalar, typename T, typename R> \
inline void Run( \
size_t n, const T* a, const T* b, R* out, CUDAContext* context) { \
hipLaunchKernelGGL(( name##Kernel<b_is_scalar, T, R>), dim3(CAFFE_GET_BLOCKS(n)), \
CAFFE_CUDA_NUM_THREADS, \
0, context->cuda_stream(), \
a, b, out, n); \
} \
template <typename T, typename R> \
void RunWithBroadcast( \
const T* a, const T* b, R* out, size_t pre, size_t n, \
CUDAContext* context) { \
hipLaunchKernelGGL(( name##BroadcastKernel<T, R>), dim3(CAFFE_GET_BLOCKS(pre * n)), \
CAFFE_CUDA_NUM_THREADS, \
0, context->cuda_stream(), \
a, b, out, pre, n); \
} \
template <typename T, typename R> \
void RunWithBroadcast2( \
const T* a, const T* b, R* out, size_t pre, size_t n, size_t post, \
CUDAContext* context) { \
hipLaunchKernelGGL(( name##Broadcast2Kernel<T, R>), dim3(CAFFE_GET_BLOCKS(pre * n * post)), \
CAFFE_CUDA_NUM_THREADS, \
0, context->cuda_stream(), \
a, b, out, pre, n, post); \
} \
}; \
REGISTER_CUDA_OPERATOR( \
name, BinaryElementwiseOp< \
input_type, CUDAContext, Cuda##name##Functor, output_type>)
#define CUDA_SUB(x, y) ((x) - (y))
CUDA_FUNCTOR(Sub, CUDA_SUB, NumericTypes, SameTypeAsInput);
#undef CUDA_SUB
#define CUDA_MUL(x, y) ((x) * (y))
CUDA_FUNCTOR(Mul, CUDA_MUL, NumericTypes, SameTypeAsInput);
#undef CUDA_MUL
#define CUDA_DIV(x, y) ((x) / (y))
CUDA_FUNCTOR(Div, CUDA_DIV, NumericTypes, SameTypeAsInput);
#undef CUDA_DIV
#define CUDA_LT(x, y) ((x) < (y))
CUDA_FUNCTOR(LT, CUDA_LT, NumericTypes, FixedType<bool>);
#undef CUDA_LT
#define CUDA_LE(x, y) ((x) <= (y))
CUDA_FUNCTOR(LE, CUDA_LE, NumericTypes, FixedType<bool>);
#undef CUDA_LE
#define CUDA_GT(x, y) ((x) > (y))
CUDA_FUNCTOR(GT, CUDA_GT, NumericTypes, FixedType<bool>);
#undef CUDA_GT
#define CUDA_GE(x, y) ((x) >= (y))
CUDA_FUNCTOR(GE, CUDA_GE, NumericTypes, FixedType<bool>);
#undef CUDA_GE
#define CUDA_EQ(x, y) ((x) == (y))
CUDA_FUNCTOR(EQ, CUDA_EQ, IntTypes, FixedType<bool>);
#undef CUDA_EQ
#define CUDA_AND(x, y) ((x) & (y))
CUDA_FUNCTOR(And, CUDA_AND, BoolTypes, FixedType<bool>);
#undef CUDA_AND
#define CUDA_OR(x, y) ((x) | (y))
CUDA_FUNCTOR(Or, CUDA_OR, BoolTypes, FixedType<bool>);
#undef CUDA_OR
#define CUDA_XOR(x, y) ((x) ^ (y))
CUDA_FUNCTOR(Xor, CUDA_XOR, BoolTypes, FixedType<bool>);
#undef CUDA_XOR
__global__ void NotKernel(const int n, const bool* x, bool* y) {
CUDA_1D_KERNEL_LOOP(i, n) {
y[i] = !x[i];
}
}
struct CudaNotFunctor {
inline void operator()(
const int n, const bool* x, bool* y, CUDAContext* context) {
hipLaunchKernelGGL(( NotKernel), dim3(CAFFE_GET_BLOCKS(n)), dim3(CAFFE_CUDA_NUM_THREADS), 0,
context->cuda_stream(), n, x, y);
}
};
REGISTER_CUDA_OPERATOR(
Not,
UnaryElementwiseOp<BoolTypes, CUDAContext, CudaNotFunctor>);
__global__ void DivKernel(const int n, float *dXdata, float *dYdata,
const float *dZdata, const float *Ydata,
const float *Zdata) {
CUDA_1D_KERNEL_LOOP(i, n) {
dXdata[i] = dZdata[i] / Ydata[i];
dYdata[i] = - (dZdata[i] * Zdata[i]) / Ydata[i];
}
}
void ElementWiseDivide(
CUDAContext& context,
const int n,
float* dXdata,
float* dYdata,
const float* dZdata,
const float* Ydata,
const float* Zdata) {
hipLaunchKernelGGL(( DivKernel), dim3(CAFFE_GET_BLOCKS(n)), dim3(CAFFE_CUDA_NUM_THREADS), 0,
context.cuda_stream(), n, dXdata, dYdata, dZdata, Ydata, Zdata);
}
REGISTER_CUDA_OPERATOR(DivGradient, DivGradientOp<CUDAContext>);
namespace {
template <typename T>
__global__ void
reduce_sum_like_post1(const T* g_idata, T* g_odata, int pre, int N) {
int n = blockIdx.x * blockDim.x + threadIdx.x;
if (n >= N) {
return;
}
T sum = (T)0;
for (int i = 0; i < pre; ++i) {
sum += g_idata[i * N + n];
}
g_odata[n] = sum;
}
template <typename T>
void device_reduce(
const T* d_in,
T* d_out,
int N,
Tensor<CUDAContext>* buffer,
CUDAContext* context) {
// Determine temporary device storage requirements
size_t temp_storage_bytes = 0;
hipcub::DeviceReduce::Sum(NULL, temp_storage_bytes, d_in, d_out, N);
auto buffer_size = temp_storage_bytes / sizeof(T);
buffer_size += temp_storage_bytes % sizeof(T) != 0 ? 1 : 0;
buffer->Resize(buffer_size);
void* d_temp_storage = static_cast<void*>(buffer->template mutable_data<T>());
// Run sum-reduction
hipcub::DeviceReduce::Sum(d_temp_storage, temp_storage_bytes, d_in, d_out, N);
}
template <typename T, int BLOCK_THREADS>
__global__ void
reduce_sum_like(const T* g_idata, T* g_odata, int pre, int N, int post) {
int n = blockIdx.x;
T sum = (T)0;
int limit = pre * post;
for (int i = threadIdx.x; i < limit; i += blockDim.x) {
int curPre = i / post;
int curPost = i % post;
sum += g_idata[curPre * N * post + n * post + curPost];
}
// uses a shared memory reduction within block
typedef hipcub::BlockReduce<T, BLOCK_THREADS> BlockReduceT;
// Shared memory
__shared__ typename BlockReduceT::TempStorage temp_storage;
T aggregate = BlockReduceT(temp_storage).Sum(sum);
if (threadIdx.x == 0) {
g_odata[n] = aggregate;
}
}
} // namespace
template <>
template <typename T>
bool SumReduceLikeOp<CUDAContext>::DoRunWithType() {
const auto& A = Input(0);
const auto& B = Input(1);
auto* C = Output(0);
auto count = A.size();
CAFFE_ENFORCE(&B != C, "In-place is not allowed.");
C->ResizeLike(B);
const T* Adata = A.template data<T>();
auto* Cdata = C->template mutable_data<T>();
if (B.size() == 1) {
device_reduce<T>(Adata, Cdata, count, &sum_buffer_, &context_);
} else {
CAFFE_ENFORCE_GT(
A.ndim(),
B.ndim(),
"If you are doing ReduceSumLike, input1 should have "
"a smaller number of dimensions.");
const int axis = (axis_ == -1 ? A.ndim() - B.ndim() : axis_);
CAFFE_ENFORCE(
axis >= 0 && axis < A.ndim(),
"ReduceSum axis should be in the range of the number "
"of dimensions of the first input.");
size_t pre = 1, n = 1, post = 1;
for (int i = 0; i < axis; ++i) {
pre *= A.dim(i);
}
for (int i = 0; i < B.ndim(); ++i) {
CAFFE_ENFORCE_EQ(
A.dim(i + axis), B.dim(i), "Broadcast dimension mismatch.");
n *= B.dim(i);
}
for (int i = axis + B.ndim(); i < A.ndim(); ++i) {
post *= A.dim(i);
}
// because we check shape(B) \in shape(A) before,
// post and pre cannot be 1 at same time
if (post == 1) {
hipLaunchKernelGGL(( reduce_sum_like_post1<T>)
, dim3(CAFFE_GET_BLOCKS(n)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(), Adata, Cdata, pre, n);
} else {
if (post >= 128) {
hipLaunchKernelGGL(( reduce_sum_like<T, 512>)
, dim3(n), dim3(512), 0, context_.cuda_stream(), Adata, Cdata, pre, n, post);
} else if (post >= 64) {
hipLaunchKernelGGL(( reduce_sum_like<T, 128>)
, dim3(n), dim3(128), 0, context_.cuda_stream(), Adata, Cdata, pre, n, post);
} else if (post >= 32) {
hipLaunchKernelGGL(( reduce_sum_like<T, 64>)
, dim3(n), dim3(64), 0, context_.cuda_stream(), Adata, Cdata, pre, n, post);
} else {
hipLaunchKernelGGL(( reduce_sum_like<T, 32>)
, dim3(n), dim3(32), 0, context_.cuda_stream(), Adata, Cdata, pre, n, post);
}
}
}
return true;
}
REGISTER_CUDA_OPERATOR(SumReduceLike, SumReduceLikeOp<CUDAContext>);
namespace {
template <bool is_scaler, typename T, typename M>
__global__ void binary_add_kernel(const int N, const T* a, const T* b, T* r) {
CUDA_1D_KERNEL_LOOP(idx, N) {
r[idx] = convert::To<M, T>(
convert::To<T, M>(a[idx]) +
convert::To<T, M>(is_scaler ? b[0] : b[idx]));
}
}
template <bool no_post, typename T, typename M>
__global__ void binary_add_kernel_broadcast(
const T* a,
const T* b,
T* r,
const int pre,
const int post,
const int n) {
CUDA_1D_KERNEL_LOOP(idx, no_post ? pre * n : pre * post * n) {
r[idx] = convert::To<M, T>(
convert::To<T, M>(a[idx]) +
convert::To<T, M>(no_post ? b[idx % n] : b[(idx / post) % n]));
}
}
} // namespace
// Actual Add operator, because the above macros are read-only.
class CUDAAddOp final : public Operator<CUDAContext> {
public:
CUDAAddOp(const OperatorDef& operator_def, Workspace* ws)
: Operator<CUDAContext>(operator_def, ws),
OP_SINGLE_ARG(bool, "broadcast", enable_broadcast_, 0),
OP_SINGLE_ARG(int, "axis", axis_, -1),
OP_SINGLE_ARG(string, "axis_str", axis_str_, ""),
OP_SINGLE_ARG(string, "order", order_, "NCHW") {
// Figure out the correct axis to use.
if (enable_broadcast_) {
if (axis_ != -1) {
// Get axis from an explicit axis argument.
CAFFE_ENFORCE_EQ(
axis_str_.size(),
0,
"Args axis and axis_str cannot be used simultaneously.");
} else if (axis_str_.size()) {
// Get the axis index semantically.
CAFFE_ENFORCE_EQ(
axis_str_.size(), 1, "Unsupported axis string", axis_str_);
size_t semantic_axis_ = order_.find(axis_str_);
CAFFE_ENFORCE_NE(
semantic_axis_,
string::npos,
"Unrecognizable axis string ",
axis_str_,
" from order string ",
order_);
axis_ = semantic_axis_;
}
} else {
CAFFE_ENFORCE(
axis_ == -1 && axis_str_.size() == 0,
"Do not specify axis or axis_str if broadcast is not enabled.");
}
}
~CUDAAddOp() {}
template <typename T, typename M>
bool DoRunWithType() {
auto& X0 = Input(0);
auto& X1 = Input(1);
auto* output = Output(0);
output->ResizeLike(X0);
const T* X0data = X0.template data<T>();
const T* X1data = X1.template data<T>();
T* outputData = output->template mutable_data<T>();
if (!enable_broadcast_) {
CAFFE_ENFORCE_EQ(
X0.dims(),
X1.dims(),
"Dimension mismatch - did you forget to set broadcast=1?");
hipLaunchKernelGGL(( binary_add_kernel<false, T, M>),
dim3(CAFFE_GET_BLOCKS(X0.size())),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(), X0.size(), X0data, X1data, outputData);
} else if (X1.size() == 1) {
hipLaunchKernelGGL(( binary_add_kernel<true, T, M>),
dim3(CAFFE_GET_BLOCKS(X0.size())),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(), X0.size(), X0data, X1data, outputData);
} else {
CAFFE_ENFORCE_GT(
X0.ndim(),
X1.ndim(),
"If you are doing broadcasting, input1 should have "
"a smaller number of dimensions.");
const int axis = (axis_ == -1 ? X0.ndim() - X1.ndim() : axis_);
CAFFE_ENFORCE(
axis >= 0 && axis < X0.ndim(),
"Broadcast axis should be in the range of the number "
"of dimensions of the first input.");
size_t pre = 1, n = 1, post = 1;
for (int i = 0; i < axis; ++i) {
pre *= X0.dim(i);
}
for (int i = 0; i < X1.ndim(); ++i) {
CAFFE_ENFORCE_EQ(
X0.dim(i + axis), X1.dim(i), "Broadcast dimension mismatch.");
n *= X1.dim(i);
}
for (int i = axis + X1.ndim(); i < X0.ndim(); ++i) {
post *= X0.dim(i);
}
if (post == 1) {
hipLaunchKernelGGL(( binary_add_kernel_broadcast<true, T, M>),
dim3(CAFFE_GET_BLOCKS(pre * n)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(), X0data, X1data, outputData, pre, post, n);
} else {
hipLaunchKernelGGL(( binary_add_kernel_broadcast<false, T, M>),
dim3(CAFFE_GET_BLOCKS(pre * post * n)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(), X0data, X1data, outputData, pre, post, n);
}
}
return true;
}
bool RunOnDevice() override {
if (Input(0).IsType<float>()) {
return DoRunWithType<float, float>();
} else if (Input(0).IsType<float16>()) {
return DoRunWithType<float16, float>();
} else if (Input(0).IsType<int32_t>()) {
return DoRunWithType<int32_t, int32_t>();
} else if (Input(0).IsType<int64_t>()) {
return DoRunWithType<int64_t, int64_t>();
} else {
return false;
}
}
private:
bool enable_broadcast_;
int axis_;
string axis_str_;
string order_;
};
namespace {
REGISTER_CUDA_OPERATOR(Add, CUDAAddOp);
} // namespace
} // namespace caffe2
| d90ad0e68af15c9e8556b687730a45f2877615c4.cu | #define CUB_STDERR
#include <cub/block/block_load.cuh>
#include <cub/block/block_reduce.cuh>
#include <cub/device/device_reduce.cuh>
#include "caffe2/core/common_gpu.h"
#include "caffe2/core/context_gpu.h"
#include "caffe2/operators/elementwise_op.h"
#include "caffe2/utils/conversions.h"
namespace caffe2 {
#define CUDA_FUNCTOR(name, op, input_type, output_type) \
template <int b_is_scalar, typename T, typename R> \
__global__ void name##Kernel(const T* a, const T* b, R* out, int n) { \
CUDA_1D_KERNEL_LOOP(i, n) { \
out[i] = op(a[i], b[b_is_scalar ? 0 : i]); \
} \
} \
template <typename T, typename R> \
__global__ void name##BroadcastKernel( \
const T* a, const T* b, R* out, int pre, int n) { \
CUDA_1D_KERNEL_LOOP(i, pre * n) { \
out[i] = op(a[i], b[i % n]); \
} \
} \
template <typename T, typename R> \
__global__ void name##Broadcast2Kernel( \
const T* a, const T* b, R* out, int pre, int n, int post) { \
CUDA_1D_KERNEL_LOOP(i, pre * n * post) { \
out[i] = op(a[i], b[(i / post) % n]); \
} \
} \
\
struct Cuda##name##Functor { \
template <bool b_is_scalar, typename T, typename R> \
inline void Run( \
size_t n, const T* a, const T* b, R* out, CUDAContext* context) { \
name##Kernel<b_is_scalar, T, R><<<CAFFE_GET_BLOCKS(n), \
CAFFE_CUDA_NUM_THREADS, \
0, context->cuda_stream()>>>( \
a, b, out, n); \
} \
template <typename T, typename R> \
void RunWithBroadcast( \
const T* a, const T* b, R* out, size_t pre, size_t n, \
CUDAContext* context) { \
name##BroadcastKernel<T, R><<<CAFFE_GET_BLOCKS(pre * n), \
CAFFE_CUDA_NUM_THREADS, \
0, context->cuda_stream()>>>( \
a, b, out, pre, n); \
} \
template <typename T, typename R> \
void RunWithBroadcast2( \
const T* a, const T* b, R* out, size_t pre, size_t n, size_t post, \
CUDAContext* context) { \
name##Broadcast2Kernel<T, R><<<CAFFE_GET_BLOCKS(pre * n * post), \
CAFFE_CUDA_NUM_THREADS, \
0, context->cuda_stream()>>>( \
a, b, out, pre, n, post); \
} \
}; \
REGISTER_CUDA_OPERATOR( \
name, BinaryElementwiseOp< \
input_type, CUDAContext, Cuda##name##Functor, output_type>)
#define CUDA_SUB(x, y) ((x) - (y))
CUDA_FUNCTOR(Sub, CUDA_SUB, NumericTypes, SameTypeAsInput);
#undef CUDA_SUB
#define CUDA_MUL(x, y) ((x) * (y))
CUDA_FUNCTOR(Mul, CUDA_MUL, NumericTypes, SameTypeAsInput);
#undef CUDA_MUL
#define CUDA_DIV(x, y) ((x) / (y))
CUDA_FUNCTOR(Div, CUDA_DIV, NumericTypes, SameTypeAsInput);
#undef CUDA_DIV
#define CUDA_LT(x, y) ((x) < (y))
CUDA_FUNCTOR(LT, CUDA_LT, NumericTypes, FixedType<bool>);
#undef CUDA_LT
#define CUDA_LE(x, y) ((x) <= (y))
CUDA_FUNCTOR(LE, CUDA_LE, NumericTypes, FixedType<bool>);
#undef CUDA_LE
#define CUDA_GT(x, y) ((x) > (y))
CUDA_FUNCTOR(GT, CUDA_GT, NumericTypes, FixedType<bool>);
#undef CUDA_GT
#define CUDA_GE(x, y) ((x) >= (y))
CUDA_FUNCTOR(GE, CUDA_GE, NumericTypes, FixedType<bool>);
#undef CUDA_GE
#define CUDA_EQ(x, y) ((x) == (y))
CUDA_FUNCTOR(EQ, CUDA_EQ, IntTypes, FixedType<bool>);
#undef CUDA_EQ
#define CUDA_AND(x, y) ((x) & (y))
CUDA_FUNCTOR(And, CUDA_AND, BoolTypes, FixedType<bool>);
#undef CUDA_AND
#define CUDA_OR(x, y) ((x) | (y))
CUDA_FUNCTOR(Or, CUDA_OR, BoolTypes, FixedType<bool>);
#undef CUDA_OR
#define CUDA_XOR(x, y) ((x) ^ (y))
CUDA_FUNCTOR(Xor, CUDA_XOR, BoolTypes, FixedType<bool>);
#undef CUDA_XOR
__global__ void NotKernel(const int n, const bool* x, bool* y) {
CUDA_1D_KERNEL_LOOP(i, n) {
y[i] = !x[i];
}
}
struct CudaNotFunctor {
inline void operator()(
const int n, const bool* x, bool* y, CUDAContext* context) {
NotKernel<<<CAFFE_GET_BLOCKS(n), CAFFE_CUDA_NUM_THREADS, 0,
context->cuda_stream()>>>(n, x, y);
}
};
REGISTER_CUDA_OPERATOR(
Not,
UnaryElementwiseOp<BoolTypes, CUDAContext, CudaNotFunctor>);
__global__ void DivKernel(const int n, float *dXdata, float *dYdata,
const float *dZdata, const float *Ydata,
const float *Zdata) {
CUDA_1D_KERNEL_LOOP(i, n) {
dXdata[i] = dZdata[i] / Ydata[i];
dYdata[i] = - (dZdata[i] * Zdata[i]) / Ydata[i];
}
}
void ElementWiseDivide(
CUDAContext& context,
const int n,
float* dXdata,
float* dYdata,
const float* dZdata,
const float* Ydata,
const float* Zdata) {
DivKernel<<<CAFFE_GET_BLOCKS(n), CAFFE_CUDA_NUM_THREADS, 0,
context.cuda_stream()>>>(n, dXdata, dYdata, dZdata, Ydata, Zdata);
}
REGISTER_CUDA_OPERATOR(DivGradient, DivGradientOp<CUDAContext>);
namespace {
template <typename T>
__global__ void
reduce_sum_like_post1(const T* g_idata, T* g_odata, int pre, int N) {
int n = blockIdx.x * blockDim.x + threadIdx.x;
if (n >= N) {
return;
}
T sum = (T)0;
for (int i = 0; i < pre; ++i) {
sum += g_idata[i * N + n];
}
g_odata[n] = sum;
}
template <typename T>
void device_reduce(
const T* d_in,
T* d_out,
int N,
Tensor<CUDAContext>* buffer,
CUDAContext* context) {
// Determine temporary device storage requirements
size_t temp_storage_bytes = 0;
cub::DeviceReduce::Sum(NULL, temp_storage_bytes, d_in, d_out, N);
auto buffer_size = temp_storage_bytes / sizeof(T);
buffer_size += temp_storage_bytes % sizeof(T) != 0 ? 1 : 0;
buffer->Resize(buffer_size);
void* d_temp_storage = static_cast<void*>(buffer->template mutable_data<T>());
// Run sum-reduction
cub::DeviceReduce::Sum(d_temp_storage, temp_storage_bytes, d_in, d_out, N);
}
template <typename T, int BLOCK_THREADS>
__global__ void
reduce_sum_like(const T* g_idata, T* g_odata, int pre, int N, int post) {
int n = blockIdx.x;
T sum = (T)0;
int limit = pre * post;
for (int i = threadIdx.x; i < limit; i += blockDim.x) {
int curPre = i / post;
int curPost = i % post;
sum += g_idata[curPre * N * post + n * post + curPost];
}
// uses a shared memory reduction within block
typedef cub::BlockReduce<T, BLOCK_THREADS> BlockReduceT;
// Shared memory
__shared__ typename BlockReduceT::TempStorage temp_storage;
T aggregate = BlockReduceT(temp_storage).Sum(sum);
if (threadIdx.x == 0) {
g_odata[n] = aggregate;
}
}
} // namespace
template <>
template <typename T>
bool SumReduceLikeOp<CUDAContext>::DoRunWithType() {
const auto& A = Input(0);
const auto& B = Input(1);
auto* C = Output(0);
auto count = A.size();
CAFFE_ENFORCE(&B != C, "In-place is not allowed.");
C->ResizeLike(B);
const T* Adata = A.template data<T>();
auto* Cdata = C->template mutable_data<T>();
if (B.size() == 1) {
device_reduce<T>(Adata, Cdata, count, &sum_buffer_, &context_);
} else {
CAFFE_ENFORCE_GT(
A.ndim(),
B.ndim(),
"If you are doing ReduceSumLike, input1 should have "
"a smaller number of dimensions.");
const int axis = (axis_ == -1 ? A.ndim() - B.ndim() : axis_);
CAFFE_ENFORCE(
axis >= 0 && axis < A.ndim(),
"ReduceSum axis should be in the range of the number "
"of dimensions of the first input.");
size_t pre = 1, n = 1, post = 1;
for (int i = 0; i < axis; ++i) {
pre *= A.dim(i);
}
for (int i = 0; i < B.ndim(); ++i) {
CAFFE_ENFORCE_EQ(
A.dim(i + axis), B.dim(i), "Broadcast dimension mismatch.");
n *= B.dim(i);
}
for (int i = axis + B.ndim(); i < A.ndim(); ++i) {
post *= A.dim(i);
}
// because we check shape(B) \in shape(A) before,
// post and pre cannot be 1 at same time
if (post == 1) {
reduce_sum_like_post1<T>
<<<CAFFE_GET_BLOCKS(n),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(Adata, Cdata, pre, n);
} else {
if (post >= 128) {
reduce_sum_like<T, 512>
<<<n, 512, 0, context_.cuda_stream()>>>(Adata, Cdata, pre, n, post);
} else if (post >= 64) {
reduce_sum_like<T, 128>
<<<n, 128, 0, context_.cuda_stream()>>>(Adata, Cdata, pre, n, post);
} else if (post >= 32) {
reduce_sum_like<T, 64>
<<<n, 64, 0, context_.cuda_stream()>>>(Adata, Cdata, pre, n, post);
} else {
reduce_sum_like<T, 32>
<<<n, 32, 0, context_.cuda_stream()>>>(Adata, Cdata, pre, n, post);
}
}
}
return true;
}
REGISTER_CUDA_OPERATOR(SumReduceLike, SumReduceLikeOp<CUDAContext>);
namespace {
template <bool is_scaler, typename T, typename M>
__global__ void binary_add_kernel(const int N, const T* a, const T* b, T* r) {
CUDA_1D_KERNEL_LOOP(idx, N) {
r[idx] = convert::To<M, T>(
convert::To<T, M>(a[idx]) +
convert::To<T, M>(is_scaler ? b[0] : b[idx]));
}
}
template <bool no_post, typename T, typename M>
__global__ void binary_add_kernel_broadcast(
const T* a,
const T* b,
T* r,
const int pre,
const int post,
const int n) {
CUDA_1D_KERNEL_LOOP(idx, no_post ? pre * n : pre * post * n) {
r[idx] = convert::To<M, T>(
convert::To<T, M>(a[idx]) +
convert::To<T, M>(no_post ? b[idx % n] : b[(idx / post) % n]));
}
}
} // namespace
// Actual Add operator, because the above macros are read-only.
class CUDAAddOp final : public Operator<CUDAContext> {
public:
CUDAAddOp(const OperatorDef& operator_def, Workspace* ws)
: Operator<CUDAContext>(operator_def, ws),
OP_SINGLE_ARG(bool, "broadcast", enable_broadcast_, 0),
OP_SINGLE_ARG(int, "axis", axis_, -1),
OP_SINGLE_ARG(string, "axis_str", axis_str_, ""),
OP_SINGLE_ARG(string, "order", order_, "NCHW") {
// Figure out the correct axis to use.
if (enable_broadcast_) {
if (axis_ != -1) {
// Get axis from an explicit axis argument.
CAFFE_ENFORCE_EQ(
axis_str_.size(),
0,
"Args axis and axis_str cannot be used simultaneously.");
} else if (axis_str_.size()) {
// Get the axis index semantically.
CAFFE_ENFORCE_EQ(
axis_str_.size(), 1, "Unsupported axis string", axis_str_);
size_t semantic_axis_ = order_.find(axis_str_);
CAFFE_ENFORCE_NE(
semantic_axis_,
string::npos,
"Unrecognizable axis string ",
axis_str_,
" from order string ",
order_);
axis_ = semantic_axis_;
}
} else {
CAFFE_ENFORCE(
axis_ == -1 && axis_str_.size() == 0,
"Do not specify axis or axis_str if broadcast is not enabled.");
}
}
~CUDAAddOp() {}
template <typename T, typename M>
bool DoRunWithType() {
auto& X0 = Input(0);
auto& X1 = Input(1);
auto* output = Output(0);
output->ResizeLike(X0);
const T* X0data = X0.template data<T>();
const T* X1data = X1.template data<T>();
T* outputData = output->template mutable_data<T>();
if (!enable_broadcast_) {
CAFFE_ENFORCE_EQ(
X0.dims(),
X1.dims(),
"Dimension mismatch - did you forget to set broadcast=1?");
binary_add_kernel<false, T, M><<<
CAFFE_GET_BLOCKS(X0.size()),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(X0.size(), X0data, X1data, outputData);
} else if (X1.size() == 1) {
binary_add_kernel<true, T, M><<<
CAFFE_GET_BLOCKS(X0.size()),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(X0.size(), X0data, X1data, outputData);
} else {
CAFFE_ENFORCE_GT(
X0.ndim(),
X1.ndim(),
"If you are doing broadcasting, input1 should have "
"a smaller number of dimensions.");
const int axis = (axis_ == -1 ? X0.ndim() - X1.ndim() : axis_);
CAFFE_ENFORCE(
axis >= 0 && axis < X0.ndim(),
"Broadcast axis should be in the range of the number "
"of dimensions of the first input.");
size_t pre = 1, n = 1, post = 1;
for (int i = 0; i < axis; ++i) {
pre *= X0.dim(i);
}
for (int i = 0; i < X1.ndim(); ++i) {
CAFFE_ENFORCE_EQ(
X0.dim(i + axis), X1.dim(i), "Broadcast dimension mismatch.");
n *= X1.dim(i);
}
for (int i = axis + X1.ndim(); i < X0.ndim(); ++i) {
post *= X0.dim(i);
}
if (post == 1) {
binary_add_kernel_broadcast<true, T, M><<<
CAFFE_GET_BLOCKS(pre * n),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(X0data, X1data, outputData, pre, post, n);
} else {
binary_add_kernel_broadcast<false, T, M><<<
CAFFE_GET_BLOCKS(pre * post * n),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(X0data, X1data, outputData, pre, post, n);
}
}
return true;
}
bool RunOnDevice() override {
if (Input(0).IsType<float>()) {
return DoRunWithType<float, float>();
} else if (Input(0).IsType<float16>()) {
return DoRunWithType<float16, float>();
} else if (Input(0).IsType<int32_t>()) {
return DoRunWithType<int32_t, int32_t>();
} else if (Input(0).IsType<int64_t>()) {
return DoRunWithType<int64_t, int64_t>();
} else {
return false;
}
}
private:
bool enable_broadcast_;
int axis_;
string axis_str_;
string order_;
};
namespace {
REGISTER_CUDA_OPERATOR(Add, CUDAAddOp);
} // namespace
} // namespace caffe2
|
c6ff5036358cd8d4d9dea2e06a84d412a625e314.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <iomanip>
#include <stdio.h>
#include <stdlib.h>
#include <ctime>
void checkCudaError(const char* msg)
{
hipError_t err = hipGetLastError();
if (hipSuccess != err)
{
fprintf(stderr, "ERROR: %s: %s.\n", msg, hipGetErrorString(err));
exit(0);
}
}
void CustomReverse(float* vec, int size) {
float* res = new float[size];
for (int i = 0; i < size; ++i) {
res[i] = vec[size - i - 1];
}
}
__global__ void Reverse(float* res, float* vec, int size)
{
int idx = blockDim.x * blockIdx.x + threadIdx.x;
int offset = gridDim.x * blockDim.x;
while (idx < size)
{
res[idx] = vec[size - idx - 1];
idx += offset;
}
}
int main(int argc, const char* argv[])
{
int size;
std::cin >> size;
const int MAX = 33554432;
const int MIN = 0;
if (size < MIN && size > MAX)
{
std::cerr << "ERROR: Incorrect size!\n";
exit(0);
}
float *hostVec = new float[size];
for (int i = 0; i < size; ++i)
{
hostVec[i] = i;
}
std::cout << "vector size = " << size << std::endl;
clock_t time;
time = clock();
CustomReverse(hostVec, size);
time = clock() - time;
std::cout << "CPU" << std::endl;
std::cout << "time = " << (double)time/CLOCKS_PER_SEC << std::endl;
float *deviceVec, *deviceRes;
// device
hipMalloc((void**) &deviceVec, sizeof(float) * size);
hipMalloc((void**) &deviceRes, sizeof(float) * size);
// device
hipMemcpy(deviceVec, hostVec, sizeof(float) * size, hipMemcpyHostToDevice);
int blockCount = 32;
int threadsCount = 32;
hipEvent_t start, end;
hipEventCreate(&start);
checkCudaError("hipEventCreate");
hipEventCreate(&end);
checkCudaError("hipEventCreate");
hipEventRecord(start);
checkCudaError("hipEventRecord");
// kernel
hipLaunchKernelGGL(( Reverse), dim3(blockCount), dim3(threadsCount), 0, 0, deviceRes, deviceVec, size);
checkCudaError("Kernel invocation");
hipEventRecord(end);
checkCudaError("hipEventRecord");
hipEventSynchronize(end);
checkCudaError("hipEventSynchronize");
float t;
hipEventElapsedTime(&t, start, end);
checkCudaError("hipEventElapsedTime");
hipEventDestroy(start);
checkCudaError("hipEventDestroy");
hipEventDestroy(end);
checkCudaError("hipEventDestroy");
printf("GPU\n");
printf("time = %f\n", t);
printf("blocks = %d\n", blockCount);
printf("threads = %d\n", threadsCount);
hipMemcpy(hostVec, deviceRes, sizeof(float) * size, hipMemcpyDeviceToHost);
checkCudaError("Memcpy");
//const int accuracy = 10;
//for (int i = 0; i < size - 1; ++i)
//{
//std::cout << std::scientific << std::setprecision(accuracy) << hostVec[i] << " ";
//}
//std::cout << std::scientific << std::setprecision(accuracy) << hostVec[size - 1];
hipFree(deviceVec);
checkCudaError("Free");
hipFree(deviceRes);
checkCudaError("Free");
delete[] hostVec;
return 0;
} | c6ff5036358cd8d4d9dea2e06a84d412a625e314.cu | #include <iostream>
#include <iomanip>
#include <stdio.h>
#include <stdlib.h>
#include <ctime>
void checkCudaError(const char* msg)
{
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err)
{
fprintf(stderr, "ERROR: %s: %s.\n", msg, cudaGetErrorString(err));
exit(0);
}
}
void CustomReverse(float* vec, int size) {
float* res = new float[size];
for (int i = 0; i < size; ++i) {
res[i] = vec[size - i - 1];
}
}
__global__ void Reverse(float* res, float* vec, int size)
{
int idx = blockDim.x * blockIdx.x + threadIdx.x;
int offset = gridDim.x * blockDim.x;
while (idx < size)
{
res[idx] = vec[size - idx - 1];
idx += offset;
}
}
int main(int argc, const char* argv[])
{
int size;
std::cin >> size;
const int MAX = 33554432;
const int MIN = 0;
if (size < MIN && size > MAX)
{
std::cerr << "ERROR: Incorrect size!\n";
exit(0);
}
float *hostVec = new float[size];
for (int i = 0; i < size; ++i)
{
hostVec[i] = i;
}
std::cout << "vector size = " << size << std::endl;
clock_t time;
time = clock();
CustomReverse(hostVec, size);
time = clock() - time;
std::cout << "CPU" << std::endl;
std::cout << "time = " << (double)time/CLOCKS_PER_SEC << std::endl;
float *deviceVec, *deviceRes;
// Выделяем память для device копий
cudaMalloc((void**) &deviceVec, sizeof(float) * size);
cudaMalloc((void**) &deviceRes, sizeof(float) * size);
// Копируем ввод на device
cudaMemcpy(deviceVec, hostVec, sizeof(float) * size, cudaMemcpyHostToDevice);
int blockCount = 32;
int threadsCount = 32;
cudaEvent_t start, end;
cudaEventCreate(&start);
checkCudaError("cudaEventCreate");
cudaEventCreate(&end);
checkCudaError("cudaEventCreate");
cudaEventRecord(start);
checkCudaError("cudaEventRecord");
// Запускаем kernel
Reverse<<<blockCount, threadsCount>>>(deviceRes, deviceVec, size);
checkCudaError("Kernel invocation");
cudaEventRecord(end);
checkCudaError("cudaEventRecord");
cudaEventSynchronize(end);
checkCudaError("cudaEventSynchronize");
float t;
cudaEventElapsedTime(&t, start, end);
checkCudaError("cudaEventElapsedTime");
cudaEventDestroy(start);
checkCudaError("cudaEventDestroy");
cudaEventDestroy(end);
checkCudaError("cudaEventDestroy");
printf("GPU\n");
printf("time = %f\n", t);
printf("blocks = %d\n", blockCount);
printf("threads = %d\n", threadsCount);
cudaMemcpy(hostVec, deviceRes, sizeof(float) * size, cudaMemcpyDeviceToHost);
checkCudaError("Memcpy");
//const int accuracy = 10;
//for (int i = 0; i < size - 1; ++i)
//{
//std::cout << std::scientific << std::setprecision(accuracy) << hostVec[i] << " ";
//}
//std::cout << std::scientific << std::setprecision(accuracy) << hostVec[size - 1];
cudaFree(deviceVec);
checkCudaError("Free");
cudaFree(deviceRes);
checkCudaError("Free");
delete[] hostVec;
return 0;
} |
b1fafb8ae9902cf31323a8a197a713aeaf953ec8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.1.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date August 2016
@generated from sparse-iter/blas/zgemvmdot.cu, normal z -> s, Tue Aug 30 09:38:43 2016
@author Hartwig Anzt
*/
#include "magmasparse_internal.h"
#define BLOCK_SIZE 256
#define PRECISION_s
// initialize arrays with zero
__global__ void
magma_sgpumemzero(
float * d,
int n,
int k )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if( i < n ){
for( int j=0; j<k; j++)
d[ i+j*n ] = MAGMA_S_MAKE( 0.0, 0.0 );
}
}
// dot product
__global__ void
magma_sdot_kernel(
int Gs,
int n,
float * v,
float * r,
float * vtmp)
{
extern __shared__ float temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
temp[ Idx ] = ( i < n ) ? v[ i ] * r[ i ] : MAGMA_S_MAKE( 0.0, 0.0);
__syncthreads();
if ( Idx < 128 ){
temp[ Idx ] += temp[ Idx + 128 ];
}
__syncthreads();
if ( Idx < 64 ){
temp[ Idx ] += temp[ Idx + 64 ];
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
temp[ Idx ] += temp[ Idx + 32 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 16 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 8 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 4 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 2 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 1 ]; __syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
if ( Idx == 0 ){
vtmp[ blockIdx.x ] = temp[ 0 ];
}
}
// dot product for multiple vectors
__global__ void
magma_sblockdot_kernel(
int Gs,
int n,
int k,
float * v,
float * r,
float * vtmp)
{
extern __shared__ float temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
int j;
// k vectors v(i)
if (i<n){
for( j=0; j<k; j++)
temp[Idx+j*blockDim.x] = v[i+j*n] * r[i];
}
else {
for( j=0; j<k; j++)
temp[Idx+j*blockDim.x] = MAGMA_S_MAKE( 0.0, 0.0);
}
__syncthreads();
if ( Idx < 128 ){
for( j=0; j<k; j++){
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 128 ];
}
}
__syncthreads();
if ( Idx < 64 ){
for( j=0; j<k; j++){
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 64 ];
}
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
for( j=0; j<k; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 32 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 16 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 8 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 4 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 2 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 1 ];
__syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile float *temp2 = temp;
for( j=0; j<k; j++){
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ];
}
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
for( j=0; j<k; j++){
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ];
}
}
#endif
if ( Idx == 0 ){
for( j=0; j<k; j++){
vtmp[ blockIdx.x+j*n ] = temp[ j*blockDim.x ];
}
}
}
// block reduction for multiple vectors
__global__ void
magma_sblockreduce_kernel(
int Gs,
int n,
int k,
float * vtmp,
float * vtmp2 )
{
extern __shared__ float temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
int j;
for( j=0; j<k; j++){
temp[ Idx+j*blockDim.x ] = ( i < n ) ? vtmp[ i+j*n ]
: MAGMA_S_MAKE( 0.0, 0.0);
}
__syncthreads();
if ( Idx < 128 ){
for( j=0; j<k; j++){
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 128 ];
}
}
__syncthreads();
if ( Idx < 64 ){
for( j=0; j<k; j++){
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 64 ];
}
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
for( j=0; j<k; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 32 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 16 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 8 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 4 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 2 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 1 ];
__syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile float *temp2 = temp;
for( j=0; j<k; j++){
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ];
}
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
for( j=0; j<k; j++){
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ];
}
}
#endif
if ( Idx == 0 ){
for( j=0; j<k; j++){
vtmp2[ blockIdx.x+j*n ] = temp[ j*blockDim.x ];
}
}
}
// accelerated reduction for one vector
__global__ void
magma_sreduce_kernel_fast( int Gs,
int n,
float * vtmp,
float * vtmp2 )
{
extern __shared__ float temp[];
int Idx = threadIdx.x;
int blockSize = 128;
int gridSize = blockSize * 2 * gridDim.x;
temp[Idx] = MAGMA_S_MAKE( 0.0, 0.0);
int i = blockIdx.x * ( blockSize * 2 ) + Idx;
while (i < Gs ) {
temp[ Idx ] += vtmp[ i ];
temp[ Idx ] += ( i + blockSize < Gs ) ? vtmp[ i + blockSize ]
: MAGMA_S_MAKE( 0.0, 0.0);
i += gridSize;
}
__syncthreads();
if ( Idx < 64 ){
temp[ Idx ] += temp[ Idx + 64 ];
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
temp[ Idx ] += temp[ Idx + 32 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 16 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 8 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 4 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 2 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 1 ]; __syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
if ( Idx == 0 ){
vtmp2[ blockIdx.x ] = temp[ 0 ];
}
}
// accelerated block reduction for multiple vectors
__global__ void
magma_sblockreduce_kernel_fast(
int Gs,
int n,
int k,
float * vtmp,
float * vtmp2 )
{
extern __shared__ float temp[];
int Idx = threadIdx.x;
int blockSize = 128;
int gridSize = blockSize * 2 * gridDim.x;
int j;
for( j=0; j<k; j++){
int i = blockIdx.x * ( blockSize * 2 ) + Idx;
temp[Idx+j*(blockSize)] = MAGMA_S_MAKE( 0.0, 0.0);
while (i < Gs ) {
temp[ Idx+j*(blockSize) ] += vtmp[ i+j*n ];
temp[ Idx+j*(blockSize) ] +=
( i + (blockSize) < Gs ) ? vtmp[ i+j*n + (blockSize) ]
: MAGMA_S_MAKE( 0.0, 0.0);
i += gridSize;
}
}
__syncthreads();
if ( Idx < 64 ){
for( j=0; j<k; j++){
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 64 ];
}
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
for( j=0; j<k; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 32 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 16 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 8 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 4 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 2 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 1 ];
__syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile float *temp2 = temp;
for( j=0; j<k; j++){
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 32 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 16 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 8 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 4 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 2 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 1 ];
}
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
for( j=0; j<k; j++){
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 32 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 16 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 8 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 4 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 2 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 1 ];
}
}
#endif
if ( Idx == 0 ){
for( j=0; j<k; j++){
vtmp2[ blockIdx.x+j*n ] = temp[ j*(blockSize) ];
}
}
}
/**
Purpose
-------
Computes the scalar product of a set of vectors v_i such that
skp = ( <v_0,r>, <v_1,r>, .. )
Returns the vector skp.
Arguments
---------
@param[in]
n int
length of v_i and r
@param[in]
k int
# vectors v_i
@param[in]
v magmaFloat_ptr
v = (v_0 .. v_i.. v_k)
@param[in]
r magmaFloat_ptr
r
@param[in]
d1 magmaFloat_ptr
workspace
@param[in]
d2 magmaFloat_ptr
workspace
@param[out]
skp magmaFloat_ptr
vector[k] of scalar products (<v_i,r>...)
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_sblas
********************************************************************/
extern "C" magma_int_t
magma_smdotc(
magma_int_t n,
magma_int_t k,
magmaFloat_ptr v,
magmaFloat_ptr r,
magmaFloat_ptr d1,
magmaFloat_ptr d2,
magmaFloat_ptr skp,
magma_queue_t queue )
{
int local_block_size=256;
dim3 Bs( local_block_size );
dim3 Gs( magma_ceildiv( n, local_block_size ) );
dim3 Gs_next;
int Ms = (k)* (local_block_size) * sizeof( float ); // k vecs
magmaFloat_ptr aux1 = d1, aux2 = d2;
int b = 1;
if (k>1) {
hipLaunchKernelGGL(( magma_sblockdot_kernel), dim3(Gs), dim3(Bs), Ms, queue->cuda_stream() , Gs.x, n, k, v, r, d1 );
}
else {
hipLaunchKernelGGL(( magma_sdot_kernel), dim3(Gs), dim3(Bs), Ms, queue->cuda_stream() , Gs.x, n, v, r, d1 );
}
/*
// not necessary to zero GPU mem
magma_sgpumemzero<<< Gs, Bs, 0, queue->cuda_stream >>>( d1, n*k,1 );
magma_sgpumemzero<<< Gs, Bs, 0, queue->cuda_stream >>>( d2, n*k,1 );
//magmablas_slaset( MagmaFull, n, k, d1, n, UNKNOWN );
//magmablas_slaset( MagmaFull, n, k, d2, n, UNKNOWN );
while( Gs.x > 1 ) {
Gs_next.x = magma_ceildiv( Gs.x, Bs.x );
magma_sblockreduce_kernel<<< Gs_next.x, Bs.x, Ms, queue->cuda_stream >>>
( Gs.x, n, k, aux1, aux2 );
Gs.x = Gs_next.x;
b = 1 - b;
if ( b ) { aux1 = d1; aux2 = d2; }
else { aux2 = d1; aux1 = d2; }
}
for( int j=0; j<k; j++) {
magma_scopyvector( 1, aux1+j*n, 1, skp+j, 1, UNKNOWN );
}
*/
if ( k>1) {
while( Gs.x > 1 ) {
Gs_next.x = magma_ceildiv( Gs.x, Bs.x );
if ( Gs_next.x == 1 ) Gs_next.x = 2;
hipLaunchKernelGGL(( magma_sblockreduce_kernel_fast), dim3(Gs_next.x/2), dim3(Bs.x/2), Ms/2, queue->cuda_stream() ,
Gs.x, n, k, aux1, aux2 );
Gs_next.x = Gs_next.x /2;
Gs.x = Gs_next.x;
b = 1 - b;
if ( b ) { aux1 = d1; aux2 = d2; }
else { aux2 = d1; aux1 = d2; }
}
}
else {
while( Gs.x > 1 ) {
Gs_next.x = magma_ceildiv( Gs.x, Bs.x );
if ( Gs_next.x == 1 ) Gs_next.x = 2;
hipLaunchKernelGGL(( magma_sreduce_kernel_fast), dim3(Gs_next.x/2), dim3(Bs.x/2), Ms/2, queue->cuda_stream() ,
Gs.x, n, aux1, aux2 );
Gs_next.x = Gs_next.x /2;
Gs.x = Gs_next.x;
b = 1 - b;
if ( b ) { aux1 = d1; aux2 = d2; }
else { aux2 = d1; aux1 = d2; }
}
}
magma_scopyvector_async( k, aux1, n, skp, 1, queue );
return MAGMA_SUCCESS;
}
/**
Purpose
-------
This is an extension of the merged dot product above by chunking
the set of vectors v_i such that the data always fits into cache.
It is equivalent to a matrix vecor product Vr where V
contains few rows and many columns. The computation is the same:
skp = ( <v_0,r>, <v_1,r>, .. )
Returns the vector skp.
Arguments
---------
@param[in]
n int
length of v_i and r
@param[in]
k int
# vectors v_i
@param[in]
v magmaFloat_ptr
v = (v_0 .. v_i.. v_k)
@param[in]
r magmaFloat_ptr
r
@param[in]
d1 magmaFloat_ptr
workspace
@param[in]
d2 magmaFloat_ptr
workspace
@param[out]
skp magmaFloat_ptr
vector[k] of scalar products (<v_i,r>...)
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_s
********************************************************************/
extern "C" magma_int_t
magma_sgemvmdot(
magma_int_t n,
magma_int_t k,
magmaFloat_ptr v,
magmaFloat_ptr r,
magmaFloat_ptr d1,
magmaFloat_ptr d2,
magmaFloat_ptr skp,
magma_queue_t queue )
{
int rows_left = k;
int offset = 0;
int chunk_size = 4;
// process in chunks of 10 - has to be adapted to hardware and precision
while( rows_left > (chunk_size) ) {
magma_smdotc( n, chunk_size, v+offset*n, r, d1, d2, skp+offset, queue );
offset = offset + chunk_size;
rows_left = rows_left-chunk_size;
}
// process rest
magma_smdotc( n, rows_left, v+offset*n, r, d1, d2, skp+offset, queue );
return MAGMA_SUCCESS;
}
| b1fafb8ae9902cf31323a8a197a713aeaf953ec8.cu | /*
-- MAGMA (version 2.1.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date August 2016
@generated from sparse-iter/blas/zgemvmdot.cu, normal z -> s, Tue Aug 30 09:38:43 2016
@author Hartwig Anzt
*/
#include "magmasparse_internal.h"
#define BLOCK_SIZE 256
#define PRECISION_s
// initialize arrays with zero
__global__ void
magma_sgpumemzero(
float * d,
int n,
int k )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if( i < n ){
for( int j=0; j<k; j++)
d[ i+j*n ] = MAGMA_S_MAKE( 0.0, 0.0 );
}
}
// dot product
__global__ void
magma_sdot_kernel(
int Gs,
int n,
float * v,
float * r,
float * vtmp)
{
extern __shared__ float temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
temp[ Idx ] = ( i < n ) ? v[ i ] * r[ i ] : MAGMA_S_MAKE( 0.0, 0.0);
__syncthreads();
if ( Idx < 128 ){
temp[ Idx ] += temp[ Idx + 128 ];
}
__syncthreads();
if ( Idx < 64 ){
temp[ Idx ] += temp[ Idx + 64 ];
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
temp[ Idx ] += temp[ Idx + 32 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 16 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 8 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 4 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 2 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 1 ]; __syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
if ( Idx == 0 ){
vtmp[ blockIdx.x ] = temp[ 0 ];
}
}
// dot product for multiple vectors
__global__ void
magma_sblockdot_kernel(
int Gs,
int n,
int k,
float * v,
float * r,
float * vtmp)
{
extern __shared__ float temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
int j;
// k vectors v(i)
if (i<n){
for( j=0; j<k; j++)
temp[Idx+j*blockDim.x] = v[i+j*n] * r[i];
}
else {
for( j=0; j<k; j++)
temp[Idx+j*blockDim.x] = MAGMA_S_MAKE( 0.0, 0.0);
}
__syncthreads();
if ( Idx < 128 ){
for( j=0; j<k; j++){
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 128 ];
}
}
__syncthreads();
if ( Idx < 64 ){
for( j=0; j<k; j++){
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 64 ];
}
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
for( j=0; j<k; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 32 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 16 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 8 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 4 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 2 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 1 ];
__syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile float *temp2 = temp;
for( j=0; j<k; j++){
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ];
}
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
for( j=0; j<k; j++){
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ];
}
}
#endif
if ( Idx == 0 ){
for( j=0; j<k; j++){
vtmp[ blockIdx.x+j*n ] = temp[ j*blockDim.x ];
}
}
}
// block reduction for multiple vectors
__global__ void
magma_sblockreduce_kernel(
int Gs,
int n,
int k,
float * vtmp,
float * vtmp2 )
{
extern __shared__ float temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
int j;
for( j=0; j<k; j++){
temp[ Idx+j*blockDim.x ] = ( i < n ) ? vtmp[ i+j*n ]
: MAGMA_S_MAKE( 0.0, 0.0);
}
__syncthreads();
if ( Idx < 128 ){
for( j=0; j<k; j++){
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 128 ];
}
}
__syncthreads();
if ( Idx < 64 ){
for( j=0; j<k; j++){
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 64 ];
}
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
for( j=0; j<k; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 32 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 16 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 8 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 4 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 2 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 1 ];
__syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile float *temp2 = temp;
for( j=0; j<k; j++){
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ];
}
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
for( j=0; j<k; j++){
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ];
}
}
#endif
if ( Idx == 0 ){
for( j=0; j<k; j++){
vtmp2[ blockIdx.x+j*n ] = temp[ j*blockDim.x ];
}
}
}
// accelerated reduction for one vector
__global__ void
magma_sreduce_kernel_fast( int Gs,
int n,
float * vtmp,
float * vtmp2 )
{
extern __shared__ float temp[];
int Idx = threadIdx.x;
int blockSize = 128;
int gridSize = blockSize * 2 * gridDim.x;
temp[Idx] = MAGMA_S_MAKE( 0.0, 0.0);
int i = blockIdx.x * ( blockSize * 2 ) + Idx;
while (i < Gs ) {
temp[ Idx ] += vtmp[ i ];
temp[ Idx ] += ( i + blockSize < Gs ) ? vtmp[ i + blockSize ]
: MAGMA_S_MAKE( 0.0, 0.0);
i += gridSize;
}
__syncthreads();
if ( Idx < 64 ){
temp[ Idx ] += temp[ Idx + 64 ];
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
temp[ Idx ] += temp[ Idx + 32 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 16 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 8 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 4 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 2 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 1 ]; __syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
if ( Idx == 0 ){
vtmp2[ blockIdx.x ] = temp[ 0 ];
}
}
// accelerated block reduction for multiple vectors
__global__ void
magma_sblockreduce_kernel_fast(
int Gs,
int n,
int k,
float * vtmp,
float * vtmp2 )
{
extern __shared__ float temp[];
int Idx = threadIdx.x;
int blockSize = 128;
int gridSize = blockSize * 2 * gridDim.x;
int j;
for( j=0; j<k; j++){
int i = blockIdx.x * ( blockSize * 2 ) + Idx;
temp[Idx+j*(blockSize)] = MAGMA_S_MAKE( 0.0, 0.0);
while (i < Gs ) {
temp[ Idx+j*(blockSize) ] += vtmp[ i+j*n ];
temp[ Idx+j*(blockSize) ] +=
( i + (blockSize) < Gs ) ? vtmp[ i+j*n + (blockSize) ]
: MAGMA_S_MAKE( 0.0, 0.0);
i += gridSize;
}
}
__syncthreads();
if ( Idx < 64 ){
for( j=0; j<k; j++){
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 64 ];
}
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
for( j=0; j<k; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 32 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 16 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 8 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 4 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 2 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 1 ];
__syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile float *temp2 = temp;
for( j=0; j<k; j++){
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 32 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 16 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 8 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 4 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 2 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 1 ];
}
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
for( j=0; j<k; j++){
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 32 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 16 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 8 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 4 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 2 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 1 ];
}
}
#endif
if ( Idx == 0 ){
for( j=0; j<k; j++){
vtmp2[ blockIdx.x+j*n ] = temp[ j*(blockSize) ];
}
}
}
/**
Purpose
-------
Computes the scalar product of a set of vectors v_i such that
skp = ( <v_0,r>, <v_1,r>, .. )
Returns the vector skp.
Arguments
---------
@param[in]
n int
length of v_i and r
@param[in]
k int
# vectors v_i
@param[in]
v magmaFloat_ptr
v = (v_0 .. v_i.. v_k)
@param[in]
r magmaFloat_ptr
r
@param[in]
d1 magmaFloat_ptr
workspace
@param[in]
d2 magmaFloat_ptr
workspace
@param[out]
skp magmaFloat_ptr
vector[k] of scalar products (<v_i,r>...)
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_sblas
********************************************************************/
extern "C" magma_int_t
magma_smdotc(
magma_int_t n,
magma_int_t k,
magmaFloat_ptr v,
magmaFloat_ptr r,
magmaFloat_ptr d1,
magmaFloat_ptr d2,
magmaFloat_ptr skp,
magma_queue_t queue )
{
int local_block_size=256;
dim3 Bs( local_block_size );
dim3 Gs( magma_ceildiv( n, local_block_size ) );
dim3 Gs_next;
int Ms = (k)* (local_block_size) * sizeof( float ); // k vecs
magmaFloat_ptr aux1 = d1, aux2 = d2;
int b = 1;
if (k>1) {
magma_sblockdot_kernel<<< Gs, Bs, Ms, queue->cuda_stream() >>>( Gs.x, n, k, v, r, d1 );
}
else {
magma_sdot_kernel<<< Gs, Bs, Ms, queue->cuda_stream() >>>( Gs.x, n, v, r, d1 );
}
/*
// not necessary to zero GPU mem
magma_sgpumemzero<<< Gs, Bs, 0, queue->cuda_stream >>>( d1, n*k,1 );
magma_sgpumemzero<<< Gs, Bs, 0, queue->cuda_stream >>>( d2, n*k,1 );
//magmablas_slaset( MagmaFull, n, k, d1, n, UNKNOWN );
//magmablas_slaset( MagmaFull, n, k, d2, n, UNKNOWN );
while( Gs.x > 1 ) {
Gs_next.x = magma_ceildiv( Gs.x, Bs.x );
magma_sblockreduce_kernel<<< Gs_next.x, Bs.x, Ms, queue->cuda_stream >>>
( Gs.x, n, k, aux1, aux2 );
Gs.x = Gs_next.x;
b = 1 - b;
if ( b ) { aux1 = d1; aux2 = d2; }
else { aux2 = d1; aux1 = d2; }
}
for( int j=0; j<k; j++) {
magma_scopyvector( 1, aux1+j*n, 1, skp+j, 1, UNKNOWN );
}
*/
if ( k>1) {
while( Gs.x > 1 ) {
Gs_next.x = magma_ceildiv( Gs.x, Bs.x );
if ( Gs_next.x == 1 ) Gs_next.x = 2;
magma_sblockreduce_kernel_fast<<< Gs_next.x/2, Bs.x/2, Ms/2, queue->cuda_stream() >>>
( Gs.x, n, k, aux1, aux2 );
Gs_next.x = Gs_next.x /2;
Gs.x = Gs_next.x;
b = 1 - b;
if ( b ) { aux1 = d1; aux2 = d2; }
else { aux2 = d1; aux1 = d2; }
}
}
else {
while( Gs.x > 1 ) {
Gs_next.x = magma_ceildiv( Gs.x, Bs.x );
if ( Gs_next.x == 1 ) Gs_next.x = 2;
magma_sreduce_kernel_fast<<< Gs_next.x/2, Bs.x/2, Ms/2, queue->cuda_stream() >>>
( Gs.x, n, aux1, aux2 );
Gs_next.x = Gs_next.x /2;
Gs.x = Gs_next.x;
b = 1 - b;
if ( b ) { aux1 = d1; aux2 = d2; }
else { aux2 = d1; aux1 = d2; }
}
}
magma_scopyvector_async( k, aux1, n, skp, 1, queue );
return MAGMA_SUCCESS;
}
/**
Purpose
-------
This is an extension of the merged dot product above by chunking
the set of vectors v_i such that the data always fits into cache.
It is equivalent to a matrix vecor product Vr where V
contains few rows and many columns. The computation is the same:
skp = ( <v_0,r>, <v_1,r>, .. )
Returns the vector skp.
Arguments
---------
@param[in]
n int
length of v_i and r
@param[in]
k int
# vectors v_i
@param[in]
v magmaFloat_ptr
v = (v_0 .. v_i.. v_k)
@param[in]
r magmaFloat_ptr
r
@param[in]
d1 magmaFloat_ptr
workspace
@param[in]
d2 magmaFloat_ptr
workspace
@param[out]
skp magmaFloat_ptr
vector[k] of scalar products (<v_i,r>...)
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_s
********************************************************************/
extern "C" magma_int_t
magma_sgemvmdot(
magma_int_t n,
magma_int_t k,
magmaFloat_ptr v,
magmaFloat_ptr r,
magmaFloat_ptr d1,
magmaFloat_ptr d2,
magmaFloat_ptr skp,
magma_queue_t queue )
{
int rows_left = k;
int offset = 0;
int chunk_size = 4;
// process in chunks of 10 - has to be adapted to hardware and precision
while( rows_left > (chunk_size) ) {
magma_smdotc( n, chunk_size, v+offset*n, r, d1, d2, skp+offset, queue );
offset = offset + chunk_size;
rows_left = rows_left-chunk_size;
}
// process rest
magma_smdotc( n, rows_left, v+offset*n, r, d1, d2, skp+offset, queue );
return MAGMA_SUCCESS;
}
|
e4600b36ad4ff644e5c398d5110152657a0712ea.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "../include/header.h"
using namespace std;
/**
* \brief The function `stdDev` compute the `standard deviation` of a given vector allocated on the CUDA device.
*
* The following function computes the `standard deviation` of a given input vector.
* \param *data Input vector
* \param n Size of the vector
* \param *avg `Mean` computed on the input vector
* \return `Standard deviation` computed on the input vector
*/
__device__ float stdDev(float *data, int n, float *avg) {
printf("N_SAMPLE: %d\n", n);
printf("DATA_SIZE: %d\n", sizeof(data));
float mean = 0.0, sum_deviation = 0.0;
int i;
for (i = 0; i < n; ++i) {
mean += data[i];
}
mean = mean / n;
*avg = mean;
for (i = 0; i < n; ++i)
sum_deviation += (data[i] - mean) * (data[i] - mean);
return sqrt(sum_deviation / (n - 1));
}
/**
* \brief The kernel function `MD_DTW_D` computes the `Dependent-Multi Dimensional Dynamic Time Warping` distance (D-MDDTW).
*
* The following kernel function computes the D-MDDTW taking advantage of the GPU, by using a specific number of threads for block.
It considers the comparison of many Multivariate Time Series (MTS) stored into the unrolled vector `*S` against the only unrolled vector `*T`.
By exploiting the CUDA threads, this computation can be done very fast.
For more information about how it's computed, refer to the following link: http://stats.stackexchange.com/questions/184977/multivariate-time-series-euclidean-distance
* \param *S Unrolled vector containing `trainSize` number of MTS
* \param *T Unrolled vector representing the second time Series to compare against `*S`
* \param *trainSize Number of MTS contained in the vector `T`
* \param window_size Length of the two given MTS
* \param dimensions Number of variables for the two MTS
* \param *data_out Vector containing the results achieved by comparing `*T` against `*S`
* \param task Integer discriminating the task to perform (e.g., 0: CLASSIFICATION, 1:SUBSEQUENCE SEARCH)
* \param gm Integer indicating where to store the unrolled vector `*T` (e.g., 0:shared memory, 1: global memory)
*/
template<int WS>
__global__ void MD_DTW_D(float *S, float *T, int trainSize, int window_size, int dimensions,
float *data_out, int task, int gm) {
long long int k, l, g;
long long int i, j, p;
int idx = blockIdx.x * blockDim.x + threadIdx.x;
float min_nb = 0;
float array[WS][2];
if (gm == 0) {
// query timeseries
extern __shared__ float T2[];
int t, offset;
if (task == 0) {
offset = window_size;
int wind = dimensions * window_size;
t = idx * wind;
if ((idx * wind) + wind > trainSize * wind)
return;
if (threadIdx.x == 0) {
for (i = 0; i < dimensions; i++)
for (j = 0; j < window_size; j++)
T2[window_size * i + j] = T[window_size * i + j];
}
__syncthreads();
} else {
offset = trainSize;
t = idx;
if ((idx + window_size) > trainSize)
return;
if (threadIdx.x == 0) {
for (i = 0; i < dimensions; i++)
for (j = 0; j < window_size; j++)
T2[window_size * i + j] = T[window_size * i + j];
}
__syncthreads();
}
k = 0;
l = 1;
// computing first row (instace versus query)
for (i = 0; i < window_size; i++) {
array[i][k] = 0.0;
for (p = 0; p < dimensions; p++) {
if (i == 0)
array[i][k] += pow((S[t + p * offset] - T2[p * window_size]), 2);
else
array[i][k] += pow((S[t + p * offset] - T2[p * window_size + i]), 2);
}
if (i != 0)
array[i][k] += array[i - 1][k];
}
k = 1;
l = 0;
for (j = 1; j < window_size; j++) {
i = 0;
array[i][k] = 0.0;
for (p = 0; p < dimensions; p++)
array[i][k] += pow((S[t + p * offset + j] - T2[p * window_size + i]), 2);
array[i][k] += array[i][l];
for (i = 1; i < window_size; i++) {
array[i][k] = 0.0;
float a = array[i - 1][l];
float b = array[i][l];
float c = array[i - 1][k];
min_nb = fminf(a, b);
min_nb = fminf(c, min_nb);
for (p = 0; p < dimensions; p++)
array[i][k] += pow((S[t + p * offset + j] - T2[p * window_size + i]), 2);
array[i][k] += min_nb;
}
g = k;
k = l;
l = g;
}
data_out[idx] = array[window_size - 1][g];
} else {
int t, offset;
if (task == 0) {
offset = window_size;
int wind = dimensions * window_size;
t = idx * wind;
if ((idx * wind) + wind > trainSize * wind)
return;
} else {
offset = trainSize;
t = idx;
if ((idx + window_size) > trainSize)
return;
}
k = 0;
l = 1;
// computing first row (instace versus query)
for (i = 0; i < window_size; i++) {
array[i][k] = 0.0;
for (p = 0; p < dimensions; p++) {
if (i == 0)
array[i][k] += pow((S[t + p * offset] - T[p * window_size]), 2);
else
array[i][k] += pow((S[t + p * offset] - T[p * window_size + i]), 2);
}
if (i != 0)
array[i][k] += array[i - 1][k];
}
k = 1;
l = 0;
for (j = 1; j < window_size; j++) {
i = 0;
array[i][k] = 0.0;
for (p = 0; p < dimensions; p++)
array[i][k] += pow((S[t + p * offset + j] - T[p * window_size + i]), 2);
array[i][k] += array[i][l];
for (i = 1; i < window_size; i++) {
array[i][k] = 0.0;
float a = array[i - 1][l];
float b = array[i][l];
float c = array[i - 1][k];
min_nb = fminf(a, b);
min_nb = fminf(c, min_nb);
for (p = 0; p < dimensions; p++)
array[i][k] += pow((S[t + p * offset + j] - T[p * window_size + i]), 2);
array[i][k] += min_nb;
}
g = k;
k = l;
l = g;
}
data_out[idx] = array[window_size - 1][g];
}
}
/**
* \brief The kernel function `MD_ED_I` computes the `Independent Multi Dimensional-Dynamic Time Warping` distance (I-MDDTW).
*
* The following kernel function computes the I-MDDTW taking advantage of the GPU, by using a specific number of threads for block.
It considers the comparison of many Multivariate Time Series (MTS) stored into the unrolled vector `*S` against the only unrolled vector `*T`.
By exploiting the CUDA threads, this computation can be done very fast.
For more information about how it's computed, refer to the following link: http://stats.stackexchange.com/questions/184977/multivariate-time-series-euclidean-distance
* \param *S Unrolled vector containing `trainSize` number of MTS
* \param *T Unrolled vector representing the second time Series to compare against `*S`
* \param *trainSize Number of MTS contained in the vector `T`
* \param window_size Length of the two given MTS
* \param dimensions Number of variables for the two MTS
* \param *data_out Vector containing the results achieved by comparing `*T` against `*S`
* \param task Integer discriminating the task to perform (e.g., 0: CLASSIFICATION, 1:SUBSEQUENCE SEARCH)
*/
template<int WS>
__global__ void MD_DTW_I(float *S, float *T, int trainSize, int window_size, int dimensions,
float *data_out, int task, int gm) {
int idx, offset_x;
long long int i, j;
long long int k, l, g;
float min_nb = 0;
float array[WS][2];
// float *T2 = 0;
// float *DTW_single_dim = 0;
if(gm == 0){
extern __shared__ float sh_mem[];
float *T2 = (float *)sh_mem;
float *DTW_single_dim =
(float *)&sh_mem[dimensions *
window_size]; // offset on the shared memory for the segment T2
if (task == 0) {
idx = threadIdx.x * dimensions + threadIdx.y;
offset_x = ((blockDim.x * blockDim.y * window_size) * blockIdx.x) + idx * window_size;
if (((blockDim.x * blockDim.y * blockIdx.x) + idx) >=
trainSize * dimensions) // 120=train_size
return;
} else { // SUBSEQ_SEARCH
idx = threadIdx.x * dimensions + threadIdx.y;
offset_x =
(blockDim.x * blockIdx.x) +
((threadIdx.y * trainSize) +
threadIdx.x); // use blockIdx and other measure to set well the offset
if ((idx + window_size) > trainSize)
return;
}
if (idx == 0) {
for (i = 0; i < dimensions; i++)
for (j = 0; j < window_size; j++)
*(T2 + (window_size * i + j)) = T[window_size * i + j];
}
__syncthreads();
k = 0;
l = 1;
for (i = 0; i < window_size; i++) {
if (i == 0)
array[i][k] = pow((S[offset_x] - T2[window_size * threadIdx.y]), 2);
else
array[i][k] =
pow((S[offset_x] - T2[window_size * threadIdx.y + i]), 2) + array[i - 1][k];
}
k = 1;
l = 0;
for (j = 1; j < window_size; j++) {
i = 0;
array[i][k] =
pow((S[offset_x + j] - T2[window_size * threadIdx.y + i]), 2) + array[i][l];
for (i = 1; i < window_size; i++) {
double a = array[i - 1][l];
double b = array[i][l];
double c = array[i - 1][k];
min_nb = fminf(a, b);
min_nb = fminf(c, min_nb);
array[i][k] =
pow((S[offset_x + j] - T2[window_size * threadIdx.y + i]), 2) + min_nb;
}
g = k;
k = l;
l = g;
}
DTW_single_dim[idx] = array[window_size - 1][g];
__syncthreads();
if (idx == 0) {
for (i = 0; i < blockDim.x; i++) {
data_out[(blockIdx.x * blockDim.x) + i] = 0.0;
for (j = 0; j < blockDim.y; j++) {
data_out[(blockIdx.x * blockDim.x) + i] +=
DTW_single_dim[i * dimensions + j];
}
}
}
}
else
{
extern __shared__ float DTW_single_dim[];
if (task == 0) {
idx = threadIdx.x * dimensions + threadIdx.y;
offset_x = ((blockDim.x * blockDim.y * window_size) * blockIdx.x) + idx * window_size;
if (((blockDim.x * blockDim.y * blockIdx.x) + idx) >=
trainSize * dimensions) // 120=train_size
return;
} else { // SUBSEQ_SEARCH
idx = threadIdx.x * dimensions + threadIdx.y;
offset_x =
(blockDim.x * blockIdx.x) +
((threadIdx.y * trainSize) +
threadIdx.x); // use blockIdx and other measure to set well the offset
if ((idx + window_size) > trainSize)
return;
}
k = 0;
l = 1;
for (i = 0; i < window_size; i++) {
if (i == 0)
array[i][k] = pow((S[offset_x] - T[window_size * threadIdx.y]), 2);
else
array[i][k] =
pow((S[offset_x] - T[window_size * threadIdx.y + i]), 2) + array[i - 1][k];
}
k = 1;
l = 0;
for (j = 1; j < window_size; j++) {
i = 0;
array[i][k] =
pow((S[offset_x + j] - T[window_size * threadIdx.y + i]), 2) + array[i][l];
for (i = 1; i < window_size; i++) {
double a = array[i - 1][l];
double b = array[i][l];
double c = array[i - 1][k];
min_nb = fminf(a, b);
min_nb = fminf(c, min_nb);
array[i][k] =
pow((S[offset_x + j] - T[window_size * threadIdx.y + i]), 2) + min_nb;
}
g = k;
k = l;
l = g;
}
DTW_single_dim[idx] = array[window_size - 1][g];
__syncthreads();
if (idx == 0) {
for (i = 0; i < blockDim.x; i++) {
data_out[(blockIdx.x * blockDim.x) + i] = 0.0;
for (j = 0; j < blockDim.y; j++) {
data_out[(blockIdx.x * blockDim.x) + i] +=
DTW_single_dim[i * dimensions + j];
}
}
}
}
}
/**
* \brief The kernel function `rMD_DTW_D` computes the `Rotation Dependent-Multi Dimensional Dynamic Time Warping` distance (rD-MDDTW).
*
* The following kernel function computes the rD-MDDTW taking advantage of the GPU, by using a specific number of threads for block.
It considers the comparison of all the possible `punctual rotation` of the Multivariate Time Series (MTS) stored into the unrolled vector `*S` against the only unrolled vector `*T`.
By exploiting the CUDA threads, this computation can be done very fast.
* \param *S Unrolled vector containing `trainSize` number of MTS
* \param *T Unrolled vector representing the second time Series to compare against `*S`
* \param *trainSize Number of MTS contained in the vector `T`
* \param window_size Length of the two given MTS
* \param dimensions Nnumber of variables for the two MTS
* \param *data_out Vector containing the results achieved by comparing `*T` against `*S`
* \param gm Integer indicating where to store the unrolled vector `*T` (e.g., 0:shared memory, 1: global memory)
*/
template<int WS>
__global__ void rMD_DTW_D(float *S, float *T, int trainSize, int window_size, int dimensions,
float *data_out, int gm) {
long long int k, l, g;
long long int i, j, p;
int idx = blockIdx.x * blockDim.x + threadIdx.x;
float min_nb = 0;
float array[WS][2];
if (gm == 0) {
extern __shared__ float T2[];
// offset training set
int s = dimensions * 2 * window_size * (idx / window_size);
int t = s + idx % window_size;
if (idx >= (trainSize * window_size)) //
return;
if (threadIdx.x == 0) {
for (i = 0; i < dimensions; i++)
for (j = 0; j < window_size; j++)
T2[window_size * i + j] = T[window_size * i + j];
}
__syncthreads();
k = 0;
l = 1;
for (i = 0; i < window_size; i++) {
array[i][k] = 0.0;
for (p = 0; p < dimensions; p++) {
if (i == 0)
array[i][k] += pow((S[t + p * 2 * window_size] - T2[p * window_size]), 2);
else
array[i][k] += pow((S[t + p * 2 * window_size] - T2[p * window_size + i]), 2);
}
if (i != 0)
array[i][k] += array[i - 1][k];
}
k = 1;
l = 0;
for (j = 1; j < window_size; j++) {
i = 0;
array[i][k] = 0.0;
for (p = 0; p < dimensions; p++) {
array[i][k] += pow((S[t + p * 2 * window_size + j] - T2[p * window_size + i]), 2);
}
array[i][k] += array[i][l];
for (i = 1; i < window_size; i++) {
array[i][k] = 0.0;
float a = array[i - 1][l];
float b = array[i][l];
float c = array[i - 1][k];
min_nb = fminf(a, b);
min_nb = fminf(c, min_nb);
for (p = 0; p < dimensions; p++)
array[i][k] += pow((S[t + p * 2 * window_size + j] - T2[p * window_size + i]), 2);
array[i][k] += min_nb;
}
g = k;
k = l;
l = g;
}
data_out[idx] = array[window_size - 1][g];
} else {
// offset training set
int s = dimensions * 2 * window_size * (idx / window_size);
int t = s + idx % window_size;
if (idx >= (trainSize * window_size)) //
return;
k = 0;
l = 1;
// computing first row (instace versus query)
for (i = 0; i < window_size; i++) {
array[i][k] = 0.0;
for (p = 0; p < dimensions; p++) {
if (i == 0)
array[i][k] += pow((S[t + p * 2 * window_size] - T[p * window_size]), 2);
else
array[i][k] += pow((S[t + p * 2 * window_size] - T[p * window_size + i]), 2);
}
if (i != 0)
array[i][k] += array[i - 1][k];
}
k = 1;
l = 0;
for (j = 1; j < window_size; j++) {
i = 0;
array[i][k] = 0.0;
for (p = 0; p < dimensions; p++) {
array[i][k] += pow((S[t + p * 2 * window_size + j] - T[p * window_size + i]), 2);
}
array[i][k] += array[i][l];
for (i = 1; i < window_size; i++) {
array[i][k] = 0.0;
float a = array[i - 1][l];
float b = array[i][l];
float c = array[i - 1][k];
min_nb = fminf(a, b);
min_nb = fminf(c, min_nb);
for (p = 0; p < dimensions; p++)
array[i][k] += pow((S[t + p * 2 * window_size + j] - T[p * window_size + i]), 2);
array[i][k] += min_nb;
}
g = k;
k = l;
l = g;
}
data_out[idx] = array[window_size - 1][g];
}
}
/**
* \brief The kernel function `MD_ED_D` computes the `Dependent-Multi Dimensional Euclidean` distance (D-MDE).
*
* The following kernel function computes the D-MDE taking advantage of the GPU, by using a specific number of threads for block.
It considers the comparison of many Multivariate Time Series (MTS) stored into the unrolled vector `*S` against the only unrolled vector `*T`.
By exploiting the CUDA threads, this computation can be done very fast.
For more information about how it's computed, refer to the following link: http://stats.stackexchange.com/questions/184977/multivariate-time-series-euclidean-distance
* \param *S Unrolled vector containing `trainSize` number of MTS
* \param *T Unrolled vector representing the second time Series to compare against `*S`
* \param *trainSize Number of MTS contained in the vector `T`
* \param window_size Length of the two given MTS
* \param dimensions Number of variables for the two MTS
* \param *data_out Vector containing the results achieved by comparing `*T` against `*S`
* \param task Integer discriminating the task to perform (e.g., 0: CLASSIFICATION, 1:SUBSEQUENCE SEARCH)
* \param gm Integer indicating where to store the unrolled vector `*T` (e.g., 0:shared memory, 1: global memory)
*/
__global__ void MD_ED_D(float *S, float *T, int trainSize, int window_size, int dimensions,
float *data_out, int task, int gm) {
long long int i, j, p;
float sumErr = 0, dd = 0;
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (gm == 0) {
extern __shared__ float T2[];
int t, offset;
if (task == 0) {
offset = window_size;
int wind = dimensions * window_size;
t = idx * wind;
if ((idx * wind) + wind >
trainSize * wind) // CHANGE FORMULA 120=train_size
return;
if (threadIdx.x == 0) {
for (i = 0; i < dimensions; i++)
for (j = 0; j < window_size; j++)
T2[window_size * i + j] = T[window_size * i + j];
}
__syncthreads();
} else {
// in this case 'trainSize' is the number of subsequence to search 'nss',
// that is, the length of dataset to perform on
offset = trainSize;
t = idx;
if ((idx + window_size) > trainSize)
return;
if (threadIdx.x == 0) {
for (i = 0; i < dimensions; i++)
for (j = 0; j < window_size; j++)
T2[window_size * i + j] = T[window_size * i + j];
}
__syncthreads();
}
for (j = 0; j < window_size; j++) {
dd = 0;
for (p = 0; p < dimensions; p++)
dd += (S[(t + p * offset) + j] - T2[(p * window_size) + j]) *
(S[(t + p * offset) + j] - T2[(p * window_size) + j]);
sumErr += dd;
}
data_out[idx] = sqrt(sumErr);
} else {
int t, offset;
if (task == 0) {
offset = window_size;
int wind = dimensions * window_size;
t = idx * wind;
if ((idx * wind) + wind > trainSize * wind)
return;
} else {
// in this case 'trainSize' is the number of subsequence to search 'nss',
// that is, the length of dataset to perform on
offset = trainSize;
t = idx;
if ((idx + window_size) > trainSize)
return;
}
for (j = 0; j < window_size; j++) {
dd = 0;
for (p = 0; p < dimensions; p++)
dd += (S[(t + p * offset) + j] - T[(p * window_size) + j]) *
(S[(t + p * offset) + j] - T[(p * window_size) + j]);
sumErr += dd;
}
data_out[idx] = sqrt(sumErr);
}
}
/**
* \brief The kernel function `MD_ED_I` computes the `Independent-Multi Dimensional Euclidean` distance (I-MDE).
*
* The following kernel function computes the I-MDE taking advantage of the GPU, by using a specific number of threads for block.
It considers the comparison of many Multivariate Time Series (MTS) stored into the unrolled vector `*S` against the only unrolled vector `*T`.
By exploiting the CUDA threads, this computation can be done very fast.
For more information about how it's computed, refer to the following link: http://stats.stackexchange.com/questions/184977/multivariate-time-series-euclidean-distance
* \param *S Unrolled vector containing `trainSize` number of MTS
* \param *T Unrolled vector representing the second time Series to compare against `*S`
* \param *trainSize Number of MTS contained in the vector `T`
* \param window_size Length of the two given MTS
* \param dimensions Nnumber of variables for the two MTS
* \param *data_out Vector containing the results achieved by comparing `*T` against `*S`
* \param task Integer discriminating the task to perform (e.g., 0: CLASSIFICATION, 1:SUBSEQUENCE SEARCH)
*/
__global__ void MD_ED_I(float *S, float *T, int trainSize, int window_size, int dimensions,
float *data_out, int task, int gm) {
int idx, offset_x;
float sumErr = 0;
long long int i, j;
if(gm == 0){
extern __shared__ float sh_mem[];
float *T2 = (float *)sh_mem;
float *DTW_single_dim =
(float *)&sh_mem[dimensions * window_size]; // offset on the shared memory
// for the segment T2
if (task == 0) {
idx = threadIdx.x * dimensions + threadIdx.y;
offset_x = ((blockDim.x * blockDim.y * window_size) * blockIdx.x) +
idx * window_size;
if (((blockDim.x * blockDim.y * blockIdx.x) + idx) >=
trainSize * dimensions) // 120=train_size
return;
} else { // SUBSEQ_SEARCH
idx = threadIdx.x * dimensions + threadIdx.y;
offset_x =
(blockDim.x * blockIdx.x) +
((threadIdx.y * trainSize) +
threadIdx.x); // use blockIdx and other measure to set well the offset
if ((idx + window_size) > trainSize)
return;
}
if (idx == 0) {
for (i = 0; i < dimensions; i++)
for (j = 0; j < window_size; j++)
*(T2 + (window_size * i + j)) = T[window_size * i + j];
}
__syncthreads();
for (j = 0; j < window_size; j++)
sumErr += (S[offset_x + j] - T2[window_size * threadIdx.y + j]) *
(S[offset_x + j] - T2[window_size * threadIdx.y + j]);
DTW_single_dim[idx] = sqrt(sumErr);
__syncthreads();
if (idx == 0) {
for (i = 0; i < blockDim.x; i++) {
data_out[(blockIdx.x * blockDim.x) + i] = 0.0;
for (j = 0; j < blockDim.y; j++) {
data_out[(blockIdx.x * blockDim.x) + i] +=
DTW_single_dim[i * dimensions + j]; // rivedere formula!
}
}
}
}
else {
extern __shared__ float DTW_single_dim[];
if (task == 0) {
idx = threadIdx.x * dimensions + threadIdx.y;
offset_x = ((blockDim.x * blockDim.y * window_size) * blockIdx.x) +
idx * window_size;
if (((blockDim.x * blockDim.y * blockIdx.x) + idx) >=
trainSize * dimensions) // 120=train_size
return;
} else { // SUBSEQ_SEARCH
idx = threadIdx.x * dimensions + threadIdx.y;
offset_x =
(blockDim.x * blockIdx.x) +
((threadIdx.y * trainSize) +
threadIdx.x); // use blockIdx and other measure to set well the offset
if ((idx + window_size) > trainSize)
return;
}
// if (idx == 0) {
// for (i = 0; i < dimensions; i++)
// for (j = 0; j < window_size; j++)
// *(T2 + (window_size * i + j)) = T[window_size * i + j];
// }
// __syncthreads();
for (j = 0; j < window_size; j++)
sumErr += (S[offset_x + j] - T[window_size * threadIdx.y + j]) *
(S[offset_x + j] - T[window_size * threadIdx.y + j]);
DTW_single_dim[idx] = sqrt(sumErr);
__syncthreads();
if (idx == 0) {
for (i = 0; i < blockDim.x; i++) {
data_out[(blockIdx.x * blockDim.x) + i] = 0.0;
for (j = 0; j < blockDim.y; j++) {
data_out[(blockIdx.x * blockDim.x) + i] +=
DTW_single_dim[i * dimensions + j]; // rivedere formula!
}
}
}
}
}
/**
* \brief The kernel function `rMD_ED_D` computes the `Rotation Dependent-Multi Dimensional Euclidean` distance (rD-MDE).
*
* The following kernel function computes the rD-MDE taking advantage of the GPU, by using a specific number of threads for block.
It considers the comparison of all the possible `punctual rotation` of the Multivariate Time Series (MTS) stored into the unrolled vector `*S` against the only unrolled vector `*T`.
By exploiting the CUDA threads, this computation can be done very fast.
* \param *S Unrolled vector containing `trainSize` number of MTS
* \param *T Unrolled vector representing the second time Series to compare against `*S`
* \param window_size Length of the two given MTS
* \param dimensions Number of variables for the two MTS
* \param *data_out Vector containing the results achieved by comparing `*T` against `*S`
* \param *trainSize Number of MTS contained in the vector `T`
* \param gm Integer indicating where to store the unrolled vector `*T` (e.g., 0:shared memory, 1: global memory)
*/
__global__ void rMD_ED_D(float *S, float *T, int window_size, int dimensions,
float *data_out, int trainSize, int gm) {
long long int i, j, p;
float sumErr = 0, dd = 0;
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (gm == 0) {
extern __shared__ float T2[];
// offset training set
int s = dimensions * 2 * window_size * (idx / window_size);
int t = s + idx % window_size;
if (idx >= (trainSize * window_size)) //
return;
if (threadIdx.x == 0) {
for (i = 0; i < dimensions; i++)
for (j = 0; j < window_size; j++)
T2[window_size * i + j] = T[window_size * i + j];
}
__syncthreads();
for (j = 0; j < window_size; j++) {
dd = 0;
for (p = 0; p < dimensions; p++)
dd += (S[(t + p * 2 * window_size) + j] - T2[(p * window_size) + j]) *
(S[(t + p * 2 * window_size) + j] - T2[(p * window_size) + j]);
sumErr += dd;
}
data_out[idx] = sqrt(sumErr);
} else {
int s = dimensions * 2 * window_size * (idx / window_size);
int t = s + idx % window_size;
if (idx >= (trainSize * window_size))
return;
for (j = 0; j < window_size; j++) {
dd = 0;
for (p = 0; p < dimensions; p++)
dd += (S[(t + p * 2 * window_size) + j] - T[(p * window_size) + j]) *
(S[(t + p * 2 * window_size) + j] - T[(p * window_size) + j]);
sumErr += dd;
}
data_out[idx] = sqrt(sumErr);
}
}
/**
* \brief The function `checkFlagOpts` check out the correctness of the parameters for a given flag.
*
* The following function check out the correctness of the parameters for a given flag by counting the number of parameters.
* \param **input_args Vector containing all the command line parameters passed to the program
* \param num_args Vector containing the number of arguments passed to the program
* \param ind Current index parsed on `**input_args`
* \param num_opts Number of parameters to parse for the current flag stored into input_args[ind]
* \return Integer (0,1) indicating the corretness of the number of parameters for the current flag
*/
__host__ int checkFlagOpts(char **input_args, int num_args, int ind,
int num_opts) {
int count = 0;
char *pch = NULL;
if (ind + num_opts < num_args) { // it means a wrong number of options params
// and that there's no other flag option
while (pch == NULL && count <= num_opts) {
pch = strchr(input_args[ind], '-');
ind++;
count++;
}
if (count - 1 != num_opts)
return 0;
else
return 1;
} else if (ind + num_opts > num_args)
return 0;
else
return 1;
}
/**
* \brief The function `readFileSubSeq` allows to read several file formats for the `SUBSEQUENCE SEARCH` task.
*
* The following function allows to read several file format for the `SUBSEQUENCE SEARCH` task by providing in input several parameters.
* \param **file_name Vector containing the absolute paths for the files to read
* \param *ind_files Vector containing parsed indices for the file to read
* \param n_file Number of file to read
* \param *t_series Vector that will contain the time series `*t`
* \param *q_series Vector that will contain the time series `*q`
* \param windows_size Length of both time series
* \param n_feat Number of variables for both time series
* \param read_mode Integer for handling different input file formats (for more information, refer to README)
*/
__host__ void readFileSubSeq(char **file_name, int *ind_files, int n_file,
float *t_series, int t_size, float *q_series,
int window_size, int n_feat, int read_mode) {
int i, k;
FILE **inputFile = NULL;
inputFile = (FILE **)malloc(n_file * sizeof(FILE *));
for (i = 0; i < n_file; i++) {
char *curr_file = file_name[ind_files[i]];
inputFile[i] = fopen(curr_file, "r");
if ( access(curr_file, F_OK ) == -1 ) {
fprintf(stderr, "Failed to open file: %s\n", curr_file);
exit(2);
}
}
float *tmp;
// dimension on x axis (columns) and time on y axis (rows)
if (read_mode == 0) {
tmp = (float *)malloc(n_feat * sizeof(float));
// reading t_series file
for (i = 0; i < t_size; i++) {
for (k = 0; k < n_feat; k++) {
if( fscanf(inputFile[0], "%f", &tmp[k]) < 1 ){
fprintf(stderr, "File reading error!\n");
exit(2);
}
t_series[(k * t_size) + i] = tmp[k];
}
}
// reading q_series file
for (i = 0; i < window_size; i++) {
for (k = 0; k < n_feat; k++) {
if( fscanf(inputFile[1], "%f", &tmp[k]) < 1){
fprintf(stderr, "File reading error!\n");
exit(2);
}
q_series[(k * window_size) + i] = tmp[k];
}
}
}
// time on x axis (row) and dimensions on y axis (columns)
else if (read_mode == 1) {
tmp = (float *)malloc(t_size * sizeof(float));
for (k = 0; k < n_feat; k++) {
for (i = 0; i < t_size; i++) {
if( fscanf(inputFile[0], "%f", &tmp[i]) < 0){
fprintf(stderr, "File reading error!\n");
exit(2);
}
t_series[(k * window_size) + i] = tmp[i];
}
}
free(tmp);
tmp = (float *)malloc(window_size * sizeof(float));
for (k = 0; k < n_feat; k++) {
for (i = 0; i < window_size; i++) {
if( fscanf(inputFile[1], "%f", &tmp[i]) < 0){
fprintf(stderr, "File reading error!\n");
exit(2);
}
q_series[(k * window_size) + i] = tmp[i];
}
}
}
}
/**
* \brief The function `readFile` allows to read several file formats for the `CLASSIFICATION` task.
*
* The following function allows to read several file format for the `CLASSIFICATION` task by providing in input several parameters.
* \param **file_name Vector containing the absolute paths for the files to read
* \param *ind_files Vector containing parsed indices for the file to read
* \param read_mode Integer for handling different input file formats (for more information, refer to README)
* \param *data Vector for storing all the data read contained in the file
* \param data_struct Struct containing some information about the data (e.g., dataset size, train size, ect.)
* \param window_size Length for the time series to be stored into `*data`
* \param *dataLabels Vector for storing all the label information contained in the file
* \param n_feat Number of variables for both time series
* \param class_alg Integer for handling different reading modes which depends on the the type of algorithm picked
*/
__host__ void readFile(char **file_name, int *ind_files, int n_file,
int read_mode, float *data, struct data data_struct,
int window_size, int *dataLabels, int n_feat,
int class_alg) {
FILE **inputFile = NULL;
inputFile = (FILE **)malloc(n_file * sizeof(FILE *));
for (int i = 0; i < n_file; i++) {
char *curr_file = file_name[ind_files[i]];
inputFile[i] = fopen(curr_file, "r");
if ( access(curr_file, F_OK ) == -1 ) {
fprintf(stderr, "File reading error!\n");
exit(2);
}
}
int i, j, k;
float label = 0;
// reading data from 1 big file
if (read_mode == 0) { // read_mode=0
// checking order input file
fseek(inputFile[0], 0L, SEEK_END);
int sz_file0 = ftell(inputFile[0]);
fseek(inputFile[0], 0L, SEEK_SET);
fseek(inputFile[1], 0L, SEEK_END);
int sz_file1 = ftell(inputFile[1]);
fseek(inputFile[1], 0L, SEEK_SET);
// obtaining indices on the basis of the files order
int lab_ind, data_ind;
if (sz_file0 > sz_file1) {
lab_ind = 1;
data_ind = 0;
} else {
lab_ind = 0;
data_ind = 1;
}
float tmp = 0;
// DIMENSION ON THE ROWS AND LENGTH ON COLS
for (i = 0; i < data_struct.tot_size; i++) {
if( fscanf(inputFile[lab_ind], "%f", &label) < 1){
fprintf(stderr, "File reading error!\n");
exit(2);
}
dataLabels[i] = (int)label;
for (k = 0; k < n_feat; k++) {
for (j = 0; j < window_size; j++) {
if( fscanf(inputFile[data_ind], "%f", &tmp) < 1){
fprintf(stderr, "File reading error!\n");
exit(2);
}
// MDT_D or MDT_I
if (class_alg < 2)
data[(n_feat * i * window_size) + (k * window_size) + j] = tmp;
else {
data[(n_feat * 2 * i * window_size) + (2 * k * window_size) + j] =
tmp;
data[(n_feat * 2 * i * window_size) +
((2 * k * window_size) + window_size) + j] = tmp;
}
}
}
}
}
// reading from k-files
else if (read_mode == 1) {
float *tmp = (float *)malloc(n_feat * sizeof(float));
for (i = 0; i < data_struct.tot_size; i++) {
// reading labels
for (k = 0; k < n_feat; k++)
if( fscanf(inputFile[k], "%f", &label) < 1){
fprintf(stderr, "File reading error!\n");
exit(2);
}
dataLabels[i] = (int)label;
for (j = 0; j < window_size; j++) {
for (k = 0; k < n_feat; k++)
if( fscanf(inputFile[k], "%f", &tmp[k]) < 1){
fprintf(stderr, "File reading error!\n");
exit(2);
}
for (k = 0; k < n_feat; k++) {
// MDT_D or MDT_I
if (class_alg < 2)
data[(n_feat * i * window_size) + (k * window_size) + j] = tmp[k];
else {
data[(n_feat * 2 * i * window_size) + (2 * k * window_size) + j] =
tmp[k];
data[(n_feat * 2 * i * window_size) +
((2 * k * window_size) + window_size) + j] = tmp[k];
}
}
}
}
} else {
float *tmp = (float *)malloc(window_size * sizeof(float));
int i = 0;
int size_arr[2] = {data_struct.train_size, data_struct.test_size};
for (int ll = 0; ll < n_file; ll++) {
for (int inn = 0; inn < size_arr[ll]; inn++) {
// reading data
for (k = 0; k < n_feat; k++) {
// reading labels from either train or test set
if( fscanf(inputFile[ll], "%f", &label) < 1){
fprintf(stderr, "File reading error!\n");
exit(2);
}
dataLabels[i] = (int)label;
for (j = 0; j < window_size; j++) {
if( fscanf(inputFile[ll], "%f", &tmp[j]) < 1){ // fd=0 data descript
fprintf(stderr, "File reading error!\n");
exit(2);
}
// MDT_D or MDT_I
if (class_alg < 2)
data[(n_feat * i * window_size) + (k * window_size) + j] = tmp[j];
else {
data[(n_feat * 2 * i * window_size) + (2 * k * window_size) + j] =
tmp[j];
data[(n_feat * 2 * i * window_size) +
((2 * k * window_size) + window_size) + j] = tmp[j];
}
}
}
i++;
}
}
} // END ELSE
// Closing and deallocatin all files
for (k = 0; k < n_file; k++)
fclose(inputFile[k]);
free(inputFile);
}
/**
* \brief The function `createTrainingTestingSet` splits the dataset information into random train and test subsets.
*
* The following function splits the `data` and `label` information into random two different train and test subsets.
* \param *data Vector containing the data
* \param *dataLabels Vector containing the label
* \param dataSize Number of time series stored in the '*data'
* \param windows_size Length for the time series stored into '*data'
* \param n_feat Number of variables for the time series stored into '*data'
* \param *h_train Vector containing the data for the train set
* \param *trainLabels Vector containing the labels for the train set
* \param trainSize Number of time series to be stored in the train set
* \param *h_test Vector containing the data for the test set
* \param *testLabels Vector containing the labels for the test set
* \param testSize Number of time series to be stored in the test set
* \param *tInd Vector providing train and test indices to split data in train test sets
* \param k_th_fold Number of folds. Must be at least 2
* \param class_mode Integer for handling different reading modes which depends on the the type of algorithm picked.
*/
__host__ void createTrainingTestingSet(
float *data, int *dataLabels, int dataSize, int window_size, int n_feat,
float *h_train, int *trainLabels, int trainSize, float *h_test,
int *testLabels, int testSize, int *tInd, int k_th_fold, int class_mode) {
int i, j, k, i_train = 0, i_test = 0;
if (tInd != NULL) {
/* Creating Training and Testing set */
for (i = 0; i < dataSize; i++) {
// training set
if (tInd[i] != k_th_fold) {
trainLabels[i_train] = dataLabels[i];
for (j = 0; j < window_size; j++) {
for (k = 0; k < n_feat; k++) {
if (class_mode < 2) {
h_train[(n_feat * i_train * window_size) + (k * window_size) +
j] =
data[(n_feat * i * window_size) + (k * window_size) + j];
} else {
h_train[(n_feat * 2 * i_train * window_size) +
(2 * k * window_size) + j] =
data[(n_feat * 2 * i * window_size) + (2 * k * window_size) +
j];
h_train[(n_feat * 2 * i_train * window_size) +
((2 * k * window_size) + window_size) + j] =
data[(n_feat * 2 * i * window_size) +
((2 * k * window_size) + window_size) + j];
}
}
}
i_train++;
}
// testing set
else {
testLabels[i_test] = dataLabels[i];
for (j = 0; j < window_size; j++) {
for (k = 0; k < n_feat; k++) {
if (class_mode < 2)
h_test[(window_size * n_feat * i_test) + window_size * k + j] =
data[(n_feat * i * window_size) + (k * window_size) + j];
else
h_test[(window_size * n_feat * i_test) + window_size * k + j] =
data[(n_feat * 2 * i * window_size) + (2 * k * window_size) +
j];
}
}
i_test++;
}
}
} else {
int i = 0;
for (int i_train = 0; i < trainSize; i++) {
trainLabels[i_train] = dataLabels[i];
for (j = 0; j < window_size; j++) {
// reading data
for (k = 0; k < n_feat; k++) {
if (class_mode < 2)
h_train[(n_feat * i_train * window_size) + (k * window_size) + j] =
data[(n_feat * i * window_size) + (k * window_size) + j];
else {
h_train[(n_feat * 2 * i_train * window_size) +
(2 * k * window_size) + j] =
data[(n_feat * 2 * i * window_size) + (2 * k * window_size) +
j];
h_train[(n_feat * 2 * i_train * window_size) +
((2 * k * window_size) + window_size) + j] =
data[(n_feat * 2 * i * window_size) +
((2 * k * window_size) + window_size) + j];
}
}
}
i_train++;
}
for (int i_test = 0; i_test < testSize; i++) {
testLabels[i_test] = dataLabels[i];
for (j = 0; j < window_size; j++) {
for (k = 0; k < n_feat; k++) {
if (class_mode < 2)
h_test[(window_size * n_feat * i_test) + window_size * k + j] =
data[(n_feat * i * window_size) + (k * window_size) + j];
else
h_test[(window_size * n_feat * i_test) + window_size * k + j] =
data[(n_feat * 2 * i * window_size) + (2 * k * window_size) +
j];
}
}
i_test++;
}
}
}
/**
* \brief The function `cmpfunc` is an utiliy function for sorting vector values
* \param *a Integer value
* \param *b Integer value
* \return Difference betwen `*a` and `*b`
*/
__host__ int cmpfunc(const void *a, const void *b) {
return (*(int *)a - *(int *)b);
}
/**
* \brief The function `generateArray` fills an input array from a desidered starting point.
* \param size Size of the vector
* \param *arrayG Vector to fill
* \param *offset Offset from where to start to fill `*arrayG`
*/
__host__ void generateArray(int size, int *arrayG, int offset) {
int i, j = 0;
if(offset > size - 1){
printf("The offset has to be smaller than the size of the array\n");
exit(-1);
}
for (i = offset; size > 0; i++) {
arrayG[j++] = i;
size--;
}
}
/**
* \brief The function `findInd` fill an array with incremental value whether a desiderd value exist into an input array.
* \param *array Vector where to search into
* \param size Size of the vector
* \param *arrayG Vector to fill with incremental value
* \param g Value to find in the `*array`
*/
__host__ void findInd(int *array, int size, int *arrayG, int g) {
int i, j = 0;
for (i = 0; i < size; i++) {
if (array[i] == g) {
arrayG[j++] = i;
}
}
}
/**
* \brief The function `unique_val` look for unique value into an array
* \param *array Vector where to search into
* \param size Size of the vector
* \param *arrayG Vector to fill with incremental value
* \return Number of unique value found into `*array`
*/
__host__ int unique_val(int *array, int size) {
int i;
qsort(array, size, sizeof(int), cmpfunc);
int unique = 1; // incase we have only one element; it is unique!
for (i = 0;
i < size - 1 /*since we don't want to compare last element with junk*/;
i++) {
if (array[i] == array[i + 1]) {
continue;
} else {
unique++;
}
}
return unique;
}
/**
* \brief The function `accumarray` is an utility function for the k-fold cross validation.
* \param *array Vector where to search into
* \param size Size of the vector
* \param *val Value to find
* \return Utility array
*/
__host__ int *accumarray(int *array, int size, int *val) {
int i, j = 0;
int u_val = unique_val(array, size);
int *nS = (int *)calloc(u_val, sizeof(int));
// memset(nS, 0, u_val * sizeof(int));
for (i = 0; i < size; i++) {
if (array[i] == array[i + 1]) {
nS[j]++;
continue;
} else {
val[j] = array[i];
nS[j]++;
j++;
}
}
return nS;
}
/**
* \brief The function `shuffle` is function for shuffling the data contained into an array.
* \param *array Vector to shuffle
* \param array_size Size of the vector
* \param shuff_size Shuffle factor size
*/
__host__ void shuffle(int *array, size_t array_size, size_t shuff_size) {
if (array_size > 1) {
size_t i;
for (i = 0; i < shuff_size - 1; i++) {
size_t j = i + rand() / (RAND_MAX / (array_size - i) + 1);
int t = array[j];
array[j] = array[i];
array[i] = t;
}
}
}
/**
* \brief The function `idAssign`is an utility function for the k-fold cross validation.
* \param *perm Vector of permutations
* \param size_perm Size of the permutations
* \param *group Support vector
* \param size_group Size of the support vector
* \param *rand_ind Vector of random value
* \param *h Supprt vector
* \param *tInd Vector of indices values for splitting the dataset into train and test set
*/
__host__ void idAssign(int *perm, int size_perm, int *group, int size_group,
int *rand_ind, int *h, int *tInd) {
int i;
int group_perm;
for (i = 0; i < size_group; i++) {
group_perm = perm[group[i]];
tInd[h[rand_ind[i]]] = group_perm;
}
}
/**
* \brief The function `checkCUDAError` display on the standard output more information about a type of CUDA error.
* \param *msg Message to display along with the error information provided by CUDA
*/
__host__ void checkCUDAError(const char *msg) {
hipError_t err = hipGetLastError();
if (err != hipSuccess) {
fprintf(stderr, "Cuda error: %s %s\n", msg, hipGetErrorString(err));
exit(EXIT_FAILURE);
}
}
/**
* \brief The function `crossvalind_Kfold` generates Cross-Validation indices for splitting the dataset into train and test set.
* \param *label Vector of labels
* \param N Size of the vector `*label`
* \param K Number of fold to generate
* \param flag_shuffle
* \return Vector containing 1s for observations that belong to the training set and 0s for observations that belong to the test (evaluation) set.
*/
__host__ int *crossvalind_Kfold(int *label, int N, int K, int flag_shuffle) {
int *label_copy = (int *)malloc(N * sizeof(int));
memcpy(label_copy, label, N * sizeof(int));
// output
int *tInd = (int *)calloc(N, sizeof(int));
// memset(tInd, 0, N * sizeof(int));
int ul = unique_val(label_copy, N);
int *arr_val = (int *)malloc(ul * sizeof(int));
int *nS = accumarray(label_copy, N, arr_val);
int i, j;
int *pq = (int *)malloc(K * sizeof(int));
generateArray(K, pq, 0);
for (i = 0; i < ul; i++) {
int *randInd = (int *)malloc(nS[i] * sizeof(int));
generateArray(nS[i], randInd, 0);
int *q = (int *)malloc(nS[i] * sizeof(int));
int *h = (int *)malloc(nS[i] * sizeof(int));
findInd(label, N, h, arr_val[i]);
for (j = 0; j < nS[i]; j++) {
float val = (float)(K * (j + 1)) / nS[i]; // j+1 because we need no zero
// values; MATLAB: q =
// ceil(K*(1:nS(g))/nS(g));
q[j] = (int)ceil(val) - 1; // C indices start from 0
}
if (flag_shuffle == 1) {
shuffle(pq, K, K);
shuffle(randInd, nS[i], nS[i]);
}
idAssign(pq, K, q, nS[i], randInd, h, tInd);
free(randInd);
free(q);
free(h);
}
return tInd;
}
/**
* \brief The function `countVal` count the number of occurences found for a desidered value stored into an array.
* \param *data Vector where to search
* \param N Size of the vector `*data`
* \param key Desidered value to search into `*data`
* \return Number of occurences found for the `key` into `*data`
*/
__host__ int countVal(int *data, int N, int key) {
int i, cnt = 0;
for (i = 0; i < N; i++) {
if (data[i] == key)
cnt++;
}
return cnt;
}
/**
* \brief The function `standard_deviation` compute the `standard deviation` of a given vector.
*
* The following function computes the `standard deviation` of a given input vector.
* \param *data Input vector
* \param n Size of the vector
* \param *avg `Mean` computed on the input vector
* \return `Standard deviation` computed on the input vector
*/
__host__ float standard_deviation(float *data, int n, float *avg) {
float mean = 0.0, sum_deviation = 0.0;
int i;
for (i = 0; i < n; ++i) {
mean += data[i];
}
mean = mean / n;
*avg = mean;
for (i = 0; i < n; ++i)
sum_deviation += (data[i] - mean) * (data[i] - mean);
return sqrt(sum_deviation / (n - 1));
}
/**
* \brief The function `z_normalize2D` z-normalize an input vector.
*
* The following function calculate the z score of each value into a vector, relative to the sample mean and standard deviation.
* The following function computes the `standard deviation` of a given input vector.
* \param *M Input matrix
* \param nrow number of rows
* \param ncol number of columns
*/
__host__ void z_normalize2D(float *M, int nrow, int ncol) {
int i;
float std_dev = 0;
float *mean = (float *)malloc(sizeof(float));
for (i = 0; i < nrow; i++) {
std_dev = 0;
*mean = 0;
std_dev = standard_deviation(&M[i * ncol], ncol, mean);
for (int k = 0; k < ncol; k++)
M[i * ncol + k] = (M[i * ncol + k] - (*mean)) / std_dev;
}
free(mean);
}
/**
* \brief The function `short_ed_c` computes the `mono-dimensional Euclidean` distance.
*
* It considers the calculation of the Euclidean distance for two mono-dimensional time series stored, rispectively into the vectors `*T` and `*S`
* \param *S Vector containing the first time series
* \param *T Vector containing the second time series
* \param window_size Length of the two given time series
* \return ED distance among the two time series
*/
__host__ float short_ed_c(float *T, float *S, int window_size) {
float sumErr = 0;
for (int i = 0; i < window_size; i++)
sumErr += (T[i] - S[i]) * (T[i] - S[i]);
return sqrt(sumErr);
}
/**
* \brief The function `short_dtw_c` computes the `mono-dimensional Dynamic Time Warping` distance (DTW).
*
* It considers the calculation of the DTW distance for two mono-dimensional time series stored, rispectively into the vectors `*instance` and `*query`
* \param *S instance containing the first time series
* \param *query Vector containing the time series to compare against `*instance`
* \param ns Length of the `*instance`
* \param nt Length of the `*query`
* \return DTW distance among the two time series
*/
__host__ float short_dtw_c(float *instance, float *query, int ns, int nt) {
int k = 0, l = 0, g = 0;
long long int i, j;
float **array;
float min_nb;
// create array
array = (float **)malloc((nt) * sizeof(float *));
for (i = 0; i < nt; i++) {
array[i] = (float *)malloc((2) * sizeof(float));
}
k = 0;
l = 1;
for (i = 0; i < nt; i++) {
if (i == 0)
array[i][k] = pow((instance[0] - query[i]),
2); // squared difference (ins[0]-query[0])^2
else
array[i][k] = pow((instance[0] - query[i]), 2) + array[i - 1][k];
}
k = 1;
l = 0;
// computing DTW
for (j = 1; j < ns; j++) {
i = 0;
array[i][k] = pow((instance[j] - query[i]), 2) + array[i][l];
for (i = 1; i < nt; i++) {
float a = array[i - 1][l];
float b = array[i][l];
float c = array[i - 1][k];
min_nb = fminf(a, b);
min_nb = fminf(c, min_nb);
array[i][k] = pow((instance[j] - query[i]), 2) + min_nb;
}
g = k;
k = l;
l = g;
}
float min = array[nt - 1][g];
for (i = 0; i < ns; i++)
free(array[i]);
free(array);
return min;
}
/**
* \brief The function `short_md_ed_c` computes the `Multi-Dimensional Euclidean` distance (MD-E).
*
* It considers the calculation of the MD-E distance for two multivariate time series (MTS) stored, rispectively into the vectors `*T` and `*S`
* \param *S Vector containing the first time series
* \param *T Vector containing the second time series
* \param window_size Length of the two given time series
* \param dimensions Number of variables for the two MTS
* \param offset Integer used for computing the rotation invariant euclidean distance (It's usually equal to window_size)
* \return Euclidean distance among the two MTS
*/
__host__ float short_md_ed_c(float *T, float *S, int window_size,
int dimensions, int offset) {
float sumErr = 0, dd = 0;
for (int i = 0; i < window_size; i++) {
dd = 0;
for (int p = 0; p < dimensions; p++)
dd += (T[(p * offset) + i] - S[(p * window_size) + i]) *
(T[(p * offset) + i] - S[(p * window_size) + i]);
sumErr += dd;
}
return sqrt(sumErr);
}
/**
* \brief The function `short_md_dtw_c` computes the `Multi-Dimensional Dynamic Time Warping` distance (MD-DTW).
*
* It considers the calculation of the MD-DTW distance for two multivariate time series (MTS) stored, rispectively into the vectors `*S` and `*T`
* \param *S instance containing the first time series
* \param *T Vector containing the time series to compare against `*instance`
* \param ns Length of the `*instance`
* \param nt Length of the `*query
* \param dim Number of variables for the two MTS
* \param offset Integer used for computing the rotation invariant euclidean distance (It's usually equal to window_size)
* \return Dynamic Time Warping distance among the two MTS
*/
__host__ float short_md_dtw_c(float *S, float *T, int ns, int nt, int dim,
int offset) {
int k = 0, l = 0, g = 0;
long long int i, j;
float **array;
float min_nb;
array = (float **)malloc((nt) * sizeof(float *));
for (i = 0; i < nt; i++) {
array[i] = (float *)malloc((2) * sizeof(float));
}
k = 0;
l = 1;
for (i = 0; i < nt; i++) {
array[i][k] = 0.0;
for (int p = 0; p < dim; p++) {
if (i == 0)
array[i][k] += pow((S[p * offset + i] - T[p * nt + i]), 2);
else
array[i][k] += pow((S[p * offset + 0] - T[p * nt + i]), 2);
}
if (i != 0)
array[i][k] += array[i - 1][k];
}
k = 1;
l = 0;
for (j = 1; j < ns; j++) {
i = 0;
array[i][k] = 0.0;
for (int p = 0; p < dim; p++)
array[i][k] += pow((S[p * offset + j] - T[p * nt + i]), 2);
array[i][k] += array[i][l];
for (i = 1; i < nt; i++) {
array[i][k] = 0.0;
float a = array[i - 1][l];
float b = array[i][l];
float c = array[i - 1][k];
min_nb = fminf(a, b);
min_nb = fminf(c, min_nb);
for (int p = 0; p < dim; p++)
array[i][k] += pow((S[p * offset + j] - T[p * nt + i]), 2);
array[i][k] += min_nb;
}
g = k;
k = l;
l = g;
}
float min = array[nt - 1][g];
return min;
}
/**
* \brief The function `print_help` print on the standard output several information about the input parameters to feed to the software.
*/
__host__ void print_help(void) {
fprintf(stderr,
"\nUsage: MTSS [OPTIONS]\n"
"Multivariate Time Serie Software (MTSS) using Multivariate Dynamic "
"Time Warping\n"
"\n"
"OPTIONS:\n"
"-t Task \t\tParameters\n"
"String value \t\tThis param. represents the kind of task "
"you want to perform (CLASSIFICATION or SUBSEQ_SEARCH)\n\n"
"-i Input \t\tParameters\n"
"String value \t\t This param. is used to pick up the CPU "
"or GPU version\n"
"Integer value \t\tThis param. represents the "
"dimensionality of MTS (TS) (e.g., 1,2,3, ect)\n"
"Integer values \t\tThe second/third argument (depending on "
"the first param.) represents either the desired number of threads "
"with whom the kernel will be executed (e.g., 64,128,...,1024) or "
"the read mode. For more information refer to the README.\n\n"
"-f Files \t\tParameter\n"
"String value \t\tFollow two or more text file "
"representing the data format (fore more information about the "
"structure of these files see the README file provided with the "
"software)\n\n"
"-k Cross Validation \t\tParameter\n"
"Integer value \t\tThis param. specify the number of K-fold "
"to use int he K-cross validation step\n\n"
"Integer value \t\tSetting this param. to 1 does not allow "
"the reproducibility of the results on the same dataset among the "
"GPU and CPU versions\n\n"
"-o Option Parameters \t\tParameter.\n"
"Integer value \t\tThis param. represents the size of the "
"dataset (number of sample)\n"
"Integer value \t\tThis param. represents the window size "
"of the MTS\n\n"
"-m Algorithm Mode \t\tParameters\n"
"Integer value \t\tThis param. represents the type of MTSS "
"algorithm to use in the tasks (for more information see the README "
"file)\n\n"
"-d Device Choice \t\tParameters\n"
"Integer value \t\tThis param. specify the GPU device (on "
"your machine) you want to use to execute the MTSS\n\n"
"-v Verbose Mode \t\tParameters\n"
"Integer value \t\tThis param. specify the verbosity "
"outputs for the software.The value 0 means no verbosity\n\n"
"--version \t\tDisplay version information.\n"
"--help \t\tDisplay help information.\n"
"\n"
"e.g.\n"
"./mdtwObj -t CLASSIFICATION -i CPU 3 1 -f "
"data/classification/rm_1/X_MAT data/classification/rm_1/Y_MAT "
"data/classification/rm_1/Z_MAT -k 10 0 -o 1000 152 -m 0 DTW\n"
"./mdtwObj -t CLASSIFICATION -i GPU 3 512 0 -f "
"data/classification/rm_0/DATA data/classification/rm_0/LABEL -k 10 "
"0 -o 1000 152 -m 0 DTW -d 0\n"
"./mdtwObj -t SUBSEQ_SEARCH -i CPU 1 0 -f ECGseries ECGquery -o 3907 "
"421 -m 0 -d 0\n"
"./mdtwObj -t SUBSEQ_SEARCH -i GPU 3 512 0 -f "
"data/subseq_search/T_series data/subseq_search/Q_series -o 3907 421 "
"-m 1 DTW -d 0\n");
exit(0);
}
/**
* \brief The function `print_version` print on the standard output the software version.
*/
__host__ void print_version(void) {
fprintf(stderr, "Multivariate Time Series Software version 1.0.0\n"
"Copyright (C) 2016 Davide Nardone <[email protected]>\n"
"Originally inspired by Doruk Sart et al\n"
"See the README file for license information.\n");
exit(0);
}
/**
* \brief The function `infoDev` print on the standard output several information abou the available GPUs.
*/
__host__ void infoDev() {
int deviceCount;
hipGetDeviceCount(&deviceCount);
printf("Number of device: %d\n", deviceCount);
int device;
hipDeviceProp_t deviceProp;
// retrieving all devices
for (device = 0; device < deviceCount; ++device) {
// getting information about i-th device
hipGetDeviceProperties(&deviceProp, device);
// printing information about i-th device
printf("\n\n>>>>>>>>>>>>>>>>>>\nSelected device:%d\n<<<<<<<<<<<<<<<<<<\n\n",
device);
printf("\ndevice %d : %s\n", device, deviceProp.name);
printf("major/minor : %d.%d compute capability\n", deviceProp.major,
deviceProp.minor);
printf("Total global mem : %lu bytes\n", deviceProp.totalGlobalMem);
printf("Shared block mem : %lu bytes\n", deviceProp.sharedMemPerBlock);
printf("Max memory pitch : %lu bytes\n", deviceProp.memPitch);
printf("RegsPerBlock : %d \n", deviceProp.regsPerBlock);
printf("WarpSize : %d \n", deviceProp.warpSize);
printf("MaxThreadsPerBlock : %d \n", deviceProp.maxThreadsPerBlock);
printf("TotalConstMem : %lu bytes\n", deviceProp.totalConstMem);
printf("ClockRate : %d (kHz)\n", deviceProp.clockRate);
printf("deviceOverlap : %d \n", deviceProp.deviceOverlap);
printf("deviceOverlap : %d \n", deviceProp.deviceOverlap);
printf("MultiProcessorCount: %d \n", deviceProp.multiProcessorCount);
printf("\n");
}
exit(-1);
}
/**
* \brief The function `getDevProp` return an object `deviceProp` containing all the information about a specific GPU device.
* \return `deviceProp` CUDA object containing several information about its own device.
*/
__host__ hipDeviceProp_t getDevProp(int device) {
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, device);
return deviceProp;
}
/**
* \brief The function `checkGPU_prop` check whether a GPU property for its own device is correct.
* \param *compution_type vector used for trigger the check only on for GPU execution
* \param *deviceProp CUDA object containing several information about its own device
* \param *prop_in GPU property to check
* \param *prop_GPU_in GPU property value to check
*/
__host__ void checkGPU_prop(char *compution_type, hipDeviceProp_t deviceProp, const char *prop_in, int prop_GPU_in){
if (strcmp(compution_type, "GPU") == 0) {
if ( (strcmp(prop_in, "maxThreadsPerBlock") == 0) && (prop_GPU_in < 0 || prop_GPU_in > deviceProp.maxThreadsPerBlock) ) {
printf(" %d is an irregular #threads for block for the device %s.\n The number of threads "
"for block has to be included in [0, %d]\n", prop_GPU_in, deviceProp.name, deviceProp.maxThreadsPerBlock);
exit(-2);
}
}
}
/**
* \brief The function `initializeArray` fills an input array with random values.
* \param *array Vector to fill
* \param n Size of the vector
* \param val Value to fill the array with
*/
__host__ void initializeArray(float *array, int n) {
int i;
for (i = 0; i < n; i++)
array[i] = ((float)rand()) / (float)RAND_MAX;
}
__host__ void initializeArray(int *array, int n) {
int i;
for (i = 0; i < n; i++)
array[i] = ((int)rand()) / (int)RAND_MAX;
}
/**
* \brief The function `initializeMatrix` fills an input matrix with random values.
* \param *matrix Matrix to fill
* \param M Number of rows
* \param N Number of columns
*/
__host__ void initializeMatrix(float *matrix, int M, int N) {
int i, j;
for (i = 0; i < M; i++)
for (j = 0; j < N; j++)
matrix[i * N + j] = ((float)rand()) / (float)RAND_MAX;
}
/**
* \brief The function `printArray` print on the standard output an input array of float values.
* \param *array array
* \param n Size of the vector
*/
__host__ void printArray(float *array, int n) {
int i;
for (i = 0; i < n; i++)
printf("val[%d]: %f\n", i, array[i]);
printf("\n");
}
/**
* \brief The function `printArrayI` print on the standard output an input array of integer values.
* \param *array array
* \param n Size of the vector
*/
__host__ void printArrayI(int *array, int n) {
int i;
for (i = 0; i < n; i++)
printf("val[%d]: %d\n", i, array[i]);
printf("\n");
}
/**
* \brief The function `printMatrix` print on the standard output an input matrix of float values.
* \param *array array
* \param M Number of rows
* \param N Number of columns
*/
__host__ void printMatrix(float *matrix, int M, int N) {
int i, j;
for (i = 0; i < M; i++) {
for (j = 0; j < N; j++)
printf("%f\n", matrix[i * N + j]);
printf("\n");
}
}
/**
* \brief The function `equalArray` check whether the host and device result are the same
* \param *a array host
* \param *b array device
* \param n Size of the two vector
*/
__host__ void equalArray(float *a, float *b, int n) {
int i = 0;
while (a[i] == b[i])
i++;
if (i < n) {
printf("I risultati dell'host e del device sono diversi\n");
printf("CPU[%d]: %f, GPU[%d]: %f \n", i, a[i], i, b[i]);
} else
printf("I risultati dell'host e del device coincidono\n");
}
/**
* \brief The function `equalArray` print on the standard output both the host and device array
* \param *a array host
* \param *b array device
* \param n Size of the two vector
*/
__host__ void compareArray(float *a, float *b, int n) {
int i = 0;
for (i = 0; i < n; ++i) {
if (a[i] != b[i])
printf("CPU[%d]: %f, GPU[%d]: %f \n", i, a[i], i, b[i]);
}
}
/**
* \brief The function `min_arr` computes the minimum value of an input array.
* \param *arr array
* \param n Size of the two vector
* \param *ind Index of the minimum value found into the array `*arr`
* \return minimum value found into the array `*arr`
*/
__host__ float min_arr(float *arr, int n, int *ind) {
float min = FLT_MAX;
*ind = -1;
for (int i = 0; i < n; ++i) {
if (arr[i] < min) {
min = arr[i];
*ind = i;
}
}
return min;
}
/**
* \brief The function `max_arr` computes the maximum value of an input array.
* \param *arr array
* \param n Size of the two vector
* \param *ind Index of the maximum value found into the array `*arr`
* \return maximum value found into the array `*arr`
*/
__host__ float max_arr(float *arr, int n, int *ind) {
float max = FLT_MIN;
*ind = -1;
for (int i = 0; i < n; ++i) {
if (arr[i] > max) {
max = arr[i];
*ind = i;
}
}
return max;
}
/**
* \brief The function `timedifference_msec` computes the time difference among `t0` and `t1`.
* \param t0 structure containing time took at `t0`
* \param t0 structure containing time took at `t1`
* \return Elapsed time among `t0` and `t1`
*/
float timedifference_msec(struct timeval t0, struct timeval t1) {
return (t1.tv_sec - t0.tv_sec) * 1000.0f +
(t1.tv_usec - t0.tv_usec) / 1000.0f;
}
/**
* \brief The function `foldit` implements the switch statement for a range of values.
* \param ws Length for both the time series
*/
__host__ int foldit (int ws) {
if (ws <= 0) return -1;
else if (ws > 0 and ws <= 64) return 0;
else if (ws > 64 and ws <= 128) return 1;
else if (ws > 128 and ws <= 256) return 2;
else if (ws > 256 and ws <= 512) return 3;
else if (ws > 512 and ws <= 1024) return 4;
else if (ws > 1024 and ws <= 2048) return 5;
else if (ws > 2048 and ws <= 4096) return 6;
else if (ws > 4096 and ws <= 8192) return 7;
else if (ws > 8192 and ws <= 16384) return 8;
else return 999; // triggers the default part of the switch
}
/**
* \brief The function `MDD_SIM_MES_CPU` is a wrapper function used for computing the CPU dependent multidimensional similary measure for the classification task.
* \param trainSize Number of MTS contained into the train set
* \param testSize Number of MTS contained into the test set
* \param *trainLabels Vector containing the labels for the train set
* \param *testLabels Vector containing the labels for the test set
* \param *h_train Vector containing the data for the train set
* \param *h_test Vector containing the data for the test set
* \param window_size Length for the time series to be stored into `*data`
* \param n_feat Number of variables for the time series stored into both train and test set
* \param *distance_type Type of similarity measure to adopt for performing the classification task
* \param verbose_mode Flag used to increase/reduce the verbosity of the output results
* \return the number of misclassification
*/
__host__ float MDD_SIM_MES_CPU(int trainSize, int testSize, int *trainLabels, int *testLabels, float *h_train, float *h_test, int window_size, int n_feat, char *distance_type, int verbose_mode){
int *minI = (int *)malloc(sizeof(int));
float *h_Out = (float *)malloc(trainSize * sizeof(float));
int err = 0;
float min = 0;
for (int k = 0; k < testSize; k++) {
for (int j = 0; j < trainSize; j++) {
if (strcmp(distance_type, "DTW") == 0) // DTW distance
h_Out[j] = short_md_dtw_c(&h_train[j * n_feat * window_size],
&h_test[k * n_feat * window_size],
window_size, window_size, n_feat,
window_size);
else // Euclidean Distance
h_Out[j] = short_md_ed_c(&h_train[j * n_feat * window_size],
&h_test[k * n_feat * window_size],
window_size, n_feat, window_size);
}
min = min_arr(h_Out, trainSize, minI);
if (trainLabels[*minI] != testLabels[k])
err++;
if (verbose_mode > 0 && verbose_mode < testSize) {
if (k % verbose_mode == 0)
printf("\t%d\t gt: %d\t\tRI: %d\t%3.6f\n", k, testLabels[k],
trainLabels[*minI], min);
else if (k == testSize-1)
printf("\t%d\t gt: %d\t\tRI: %d\t%3.6f\n", k, testLabels[k],
trainLabels[*minI], min);
}
}
free(minI);
return err;
}
/**
* \brief The function `MDD_SIM_MES_CPU` is a wrapper function used for computing the CPU multidimensional similary measure for the sub-sequence similarity search task.
* \param nss Number of sub-sequences to search
* \param *t_series Vector containing the first time series
* \param *q_series Vector containing the time series to compare against `*instance`
* \param t_size Length of the time series `*t_series`
* \param q_size Length of the time series `*q_series`
* \param n_feat Number of variables for the two MTS
* \param *distance_type Type of similarity measure to adopt for performing the classification task
* \param verbose_mode Flag used to increase/reduce the verbosity of the output results
* \param *owp Support vector containing all the comparing
* \param *ind_min_val Index containing the minimum value obtained by comparing `*q_series` over `*t_series`
* \return minimum value obtained by comparing `*q_series` over `*t_series`
*/
__host__ float MDD_SIM_MES_CPU(int nss, float *t_series, float *q_series, int t_size, int q_size, int n_feat, char *distance_type, int verbose_mode, float *owp, int *ind_min_val){
float min = 9999.99, dist;
for (int i = 0; i < nss; i++) {
dist = 0.0;
if (strcmp(distance_type, "DTW") == 0) // DTW distance
dist = short_md_dtw_c(&t_series[i], q_series, q_size,
q_size, n_feat, t_size);
else
dist = short_md_ed_c(&t_series[i], q_series, q_size, n_feat,
t_size);
owp[i] = dist;
if (verbose_mode > 0 && verbose_mode < nss) {
if (i % verbose_mode == 0)
printf("\tCurr val diff. [%d]: %f\n", i, owp[i]);
else if (i == nss)
printf("\tCurr val diff. [%d]: %f\n", i, owp[i]);
}
}
// computing minimum value
min = min_arr(owp, nss, ind_min_val);
return min;
}
/**
* \brief The function `MDI_SIM_MES_CPU` is a wrapper function used for computing the CPU independent multidimensional similary measure for the classification task.
* \param trainSize Number of MTS contained into the train set
* \param testSize Number of MTS contained into the test set
* \param *trainLabels Vector containing the labels for the train set
* \param *testLabels Vector containing the labels for the test set
* \param *h_train Vector containing the data for the train set
* \param *h_test Vector containing the data for the test set
* \param window_size Length for the time series to be stored into `*data`
* \param n_feat Number of variables for the time series stored into both train and test set
* \param *distance_type Type of similarity measure to adopt for performing the classification task
* \param verbose_mode Flag used to increase/reduce the verbosity of the output results
* \return the number of misclassification
*/
__host__ float MDI_SIM_MES_CPU(int trainSize, int testSize, int *trainLabels, int *testLabels, float *h_train, float *h_test, int window_size, int n_feat, char *distance_type, int verbose_mode){
int *minI = (int *)malloc(sizeof(int));
float *h_Out = (float *)malloc(trainSize * window_size * sizeof(float));
int err = 0;
float min = 0, dtw_curr = 0, cum_sum = 0;
for (int k = 0; k < testSize; k++) {
for (int j = 0; j < trainSize; j++) {
cum_sum = 0.0;
for (int d = 0; d < n_feat; d++) {
if (strcmp(distance_type, "DTW") == 0) // DTW distance
dtw_curr = short_dtw_c(
&h_train[(d * window_size) + (j * n_feat * window_size)],
&h_test[(k * n_feat * window_size) + (d * window_size)],
window_size, window_size);
else
dtw_curr = short_ed_c(
&h_train[(d * window_size) + (j * n_feat * window_size)],
&h_test[(k * n_feat * window_size) + (d * window_size)],
window_size);
cum_sum += dtw_curr;
}
h_Out[j] = cum_sum;
}
min = min_arr(h_Out, trainSize, minI);
if (trainLabels[*minI] != testLabels[k])
err++;
if (verbose_mode > 0 && verbose_mode < testSize) {
if (k % verbose_mode == 0)
printf("\t%d\t gt: %d\t\tRI: %d\t%3.6f\n", k, testLabels[k],
trainLabels[*minI], min);
else if (k == testSize-1)
printf("\t%d\t gt: %d\t\tRI: %d\t%3.6f\n", k, testLabels[k],
trainLabels[*minI], min);
}
}
free(minI);
return err;
}
/**
* \brief The function `MDI_SIM_MES_CPU` is a wrapper function used for computing the CPU multidimensional similary measure for the sub-sequence similarity search task.
* \param nss Number of sub-sequences to search
* \param *t_series Vector containing the first time series
* \param *q_series Vector containing the time series to compare against `*instance`
* \param t_size Length of the time series `*t_series`
* \param q_size Length of the time series `*q_series`
* \param n_feat Number of variables for the two MTS
* \param *distance_type Type of similarity measure to adopt for performing the classification task
* \param verbose_mode Flag used to increase/reduce the verbosity of the output results
* \param *owp Support vector containing all the comparing
* \param *ind_min_val Index containing the minimum value obtained by comparing `*q_series` over `*t_series`
* \return minimum value obtained by comparing `*q_series` over `*t_series`
*/
__host__ float MDI_SIM_MES_CPU(int nss, float *t_series, float *q_series, int t_size, int q_size, int n_feat, char *distance_type, int verbose_mode, float *owp, int *ind_min_val){
float min = 9999.99, dist, val_curr;
for (int i = 0; i < nss; i++) {
dist = 0.0;
for (int k = 0; k < n_feat; k++) {
if (strcmp(distance_type, "DTW") == 0) // DTW distance
val_curr = short_dtw_c(&t_series[(k * t_size) + i],
&q_series[(k * q_size)], q_size,
q_size);
else
val_curr = short_ed_c(&t_series[(k * t_size) + i],
&q_series[(k * q_size)], q_size);
dist += val_curr;
}
owp[i] = dist;
if (verbose_mode > 0 && verbose_mode < nss) {
if (i % verbose_mode == 0)
printf("\tCurr val diff. [%d]: %f\n", i, owp[i]);
else if (i == nss)
printf("\tCurr val diff. [%d]: %f\n", i, owp[i]);
}
}
min = min_arr(owp, nss, ind_min_val);
return min;
}
/**
* \brief The function `MDR_SIM_MES_CPU` is a wrapper function used for computing the CPU multidimensional rotation similary measure for the classification task.
* \param trainSize Number of MTS contained into the train set
* \param testSize Number of MTS contained into the test set
* \param *trainLabels Vector containing the labels for the train set
* \param *testLabels Vector containing the labels for the test set
* \param *h_train Vector containing the data for the train set
* \param *h_test Vector containing the data for the test set
* \param window_size Length for the time series to be stored into `*data`
* \param n_feat Number of variables for the time series stored into both train and test set
* \param *distance_type Type of similarity measure to adopt for performing the classification task
* \param verbose_mode Flag used to increase/reduce the verbosity of the output results
* \param *err The number of misclassification using the basic similarity measure
* \param *errNR The number of misclassification using the rotation similary measure
*/
__host__ void MDR_SIM_MES_CPU(int trainSize, int testSize, int *trainLabels, int *testLabels, float *h_train, float *h_test, int window_size, int n_feat, char *distance_type, int verbose_mode, int *err, int *errNR){
float *h_Out = (float *)malloc(trainSize * window_size * sizeof(float));
float minNR = 0.0, min = 0.0;
int minINR = 0, minI = 0;
for (int i = 0; i < testSize; i++) {
for (int j = 0; j < trainSize; j++) {
for (int k = 0; k < window_size; k++) {
if (strcmp(distance_type, "DTW") == 0) // DTW distance
h_Out[(j * window_size) + k] = short_md_dtw_c(
&h_train[(2 * j * n_feat * window_size) + k],
&h_test[i * n_feat * window_size], window_size,
window_size, n_feat, 2 * window_size);
else
h_Out[(j * window_size) + k] = short_md_ed_c(
&h_train[(2 * j * n_feat * window_size) + k],
&h_test[i * n_feat * window_size], window_size, n_feat,
2 * window_size);
}
}
min = 9999999999.99;
minI = -1;
minINR = -1;
minNR = 99999999999.99;
for (int m = 0; m < trainSize; m++) {
if (h_Out[m * window_size] < minNR) {
minNR = h_Out[m * window_size];
minINR = m;
}
for (int p = 0; p < window_size; p++) {
int t = m * window_size + p;
if (h_Out[t] < min) {
min = h_Out[t];
minI = m;
}
}
}
if (trainLabels[minI] != testLabels[i])
(*err)++;
if (trainLabels[minINR] != testLabels[i])
(*errNR)++;
if (verbose_mode > 0 && verbose_mode < testSize) {
if (i % verbose_mode == 0)
printf("\t%d\t gt: %d\t\tRI: %d\t%3.6f \t\t NRI: %d\t%3.6f\n", i,
testLabels[i], trainLabels[minI], min,
trainLabels[minINR], minNR);
else if (i == testSize-1)
printf("\t%d\t gt: %d\t\tRI: %d\t%3.6f \t\t NRI: %d\t%3.6f\n", i,
testLabels[i], trainLabels[minI], min,
trainLabels[minINR], minNR);
}
}
}
/**
* \brief The function `MDD_SIM_MES_GPU` is a wrapper function used for computing the GPU dependent multidimensional similary measure for the classification task.
* \param trainSize Number of MTS contained into the train set
* \param testSize Number of MTS contained into the test set
* \param *trainLabels Vector containing the labels for the train set
* \param *testLabels Vector containing the labels for the test set
* \param *h_train Vector containing the data for the train set
* \param *h_test Vector containing the data for the test set
* \param *d_train Vector containing the data for the train set stored in the GPU device
* \param *d_test Vector containing the data for the test set stored in the GPU device
* \param *d_Out Vector containing temporary result for the host
* \param *h_Out Vector containing temporary result for the device
* \param window_size Length for the time series to be stored into `*data`
* \param n_feat Number of variables for the time series stored into both train and test set
* \param blockSize Number of threads to use for comparing the MTS
* \param deviceProp CUDA object containing several information about its own device
* \param *distance_type Type of similarity measure to adopt for performing the classification task
* \param verbose_mode Flag used to increase/reduce the verbosity of the output results
* \return the number of misclassification
*/
__host__ float MDD_SIM_MES_GPU(int trainSize, int testSize, int *trainLabels, int *testLabels, float *h_train, float *h_test, float *d_train, float *d_test, float *d_Out, float *h_Out, int window_size, int n_feat, int blockSize, hipDeviceProp_t deviceProp, char *distance_type, int verbose_mode){
float grid_size, min = 9999.99;
dim3 grid;
dim3 threads;
int *minI = (int *)malloc(sizeof(int));
int err = 0;
float T2 = (n_feat * window_size) * sizeof(float);
int gm = 0;
if (T2 > deviceProp.sharedMemPerBlock) {
printf("\tWarning: The T2 test timeserie: %f doesn't fit into the shared "
"memory: %lu, so it will be allocated into the global "
"memory\n",
T2, deviceProp.sharedMemPerBlock);
gm = 1;
T2 = 0;
} else
gm = 0;
grid_size = ceil((float)trainSize / blockSize);
// number of blocks (x,y) for a grid
grid.x = grid_size;
grid.y = 1;
// number of threads (x,y) for each block
threads.x = blockSize;
threads.y = 1;
if(verbose_mode > 0){
printf("\tGrid_size_x: %d, number_of_threads_x: %d \n", grid.x,
threads.x);
printf("\tGrid_size_y: %d, number_of_threads_y: %d \n\n", grid.y,
threads.y);
}
for (int k = 0; k < testSize; k++) {
hipMemset(d_test, 0, n_feat * window_size * sizeof(float));
hipMemcpy(d_test, h_test + k * (n_feat * window_size),
n_feat * window_size * sizeof(float),
hipMemcpyHostToDevice);
if (strcmp(distance_type, "DTW") == 0){ // DTW distance
switch (foldit(window_size)) {
case 0:hipLaunchKernelGGL(( MD_DTW_D<64>), dim3(grid), dim3(threads), T2, 0, d_train, d_test, trainSize, window_size,
n_feat, d_Out, 0, gm);
break;
case 1:hipLaunchKernelGGL(( MD_DTW_D<128>), dim3(grid), dim3(threads), T2, 0, d_train, d_test, trainSize, window_size,
n_feat, d_Out, 0, gm);
break;
case 2:hipLaunchKernelGGL(( MD_DTW_D<256>), dim3(grid), dim3(threads), T2, 0, d_train, d_test, trainSize, window_size,
n_feat, d_Out, 0, gm);
break;
case 3:hipLaunchKernelGGL(( MD_DTW_D<512>), dim3(grid), dim3(threads), T2, 0, d_train, d_test, trainSize, window_size,
n_feat, d_Out, 0, gm);
break;
case 4:hipLaunchKernelGGL(( MD_DTW_D<1024>), dim3(grid), dim3(threads), T2, 0, d_train, d_test, trainSize, window_size,
n_feat, d_Out, 0, gm);
break;
case 5:hipLaunchKernelGGL(( MD_DTW_D<2048>), dim3(grid), dim3(threads), T2, 0, d_train, d_test, trainSize, window_size,
n_feat, d_Out, 0, gm);
break;
case 6:hipLaunchKernelGGL(( MD_DTW_D<4096>), dim3(grid), dim3(threads), T2, 0, d_train, d_test, trainSize, window_size,
n_feat, d_Out, 0, gm);
break;
case 7:hipLaunchKernelGGL(( MD_DTW_D<8192>), dim3(grid), dim3(threads), T2, 0, d_train, d_test, trainSize, window_size,
n_feat, d_Out, 0, gm);
break;
case 8:hipLaunchKernelGGL(( MD_DTW_D<16384>), dim3(grid), dim3(threads), T2, 0, d_train, d_test, trainSize, window_size,
n_feat, d_Out, 0, gm);
break;
default: printf("No kernel exists for %d window_size\n", window_size); break;
}
}
else
hipLaunchKernelGGL(( MD_ED_D) , dim3(grid), dim3(threads), T2, 0, d_train, d_test, trainSize, window_size,
n_feat, d_Out, 0, gm);
// hipDeviceSynchronize(); // it may be avoided if there's not printf
// in the kernel function
hipMemcpy(h_Out, d_Out, trainSize * sizeof(float),
hipMemcpyDeviceToHost);
min = min_arr(h_Out, trainSize, minI);
if (trainLabels[*minI] != testLabels[k])
err++;
if (verbose_mode > 0 && verbose_mode < testSize) {
if (k % verbose_mode == 0)
printf("\t%d\t gt: %d\t\tRI: %d\t%3.6f\n", k, testLabels[k],
trainLabels[*minI], min);
else if (k == testSize-1)
printf("\t%d\t gt: %d\t\tRI: %d\t%3.6f\n", k, testLabels[k],
trainLabels[*minI], min);
}
}
free(minI);
return err;
}
/**
* \brief The function `MDD_SIM_MES_GPU` is a wrapper function used for computing the GPU dependent multidimensional similary measure for the sub-sequence similarity search task.
* \param nss Number of sub-sequences to search
* \param *d_t_series Device vector containing the first time series
* \param *d_q_series Device vector containing the time series to compare against `*instance`
* \param t_size Length of the time series `*t_series`
* \param q_size Length of the time series `*q_series`
* \param n_feat Number of variables for the two MTS
* \param blockSize Number of threads to use for comparing the MTS
* \param deviceProp CUDA object containing several information about its own device
* \param *distance_type Type of similarity measure to adopt for performing the classification task
* \param verbose_mode Flag used to increase/reduce the verbosity of the output results
* \param *owp Support vector containing all the comparing
* \param *d_owp Device support vector containing all the comparing
* \param *ind_min_val Index containing the minimum value obtained by comparing `*q_series` over `*t_series`
* \return minimum value obtained by comparing `*q_series` over `*t_series`
*/
__host__ float MDD_SIM_MES_GPU(int nss, float *d_t_series, float *d_q_series, int t_size, int q_size, int n_feat, int blockSize, hipDeviceProp_t deviceProp, char *distance_type, int verbose_mode, float *owp, float *d_owp, int *ind_min_val){
float grid_size, min = 9999.99;
dim3 grid;
dim3 threads;
// Setting CUDA variables and structure
grid_size = ceil((double)nss / blockSize);
// number of blocks (x,y) for a grid
grid.x = grid_size;
grid.y = 1;
// number of threads (x,y) for each block
threads.x = blockSize;
threads.y = 1;
float T2 = (n_feat * q_size) * sizeof(float);
int gm = 0;
if (T2 > deviceProp.sharedMemPerBlock) {
printf("\tWarning: The T2 test timeserie: %f doesn't fit into the shared "
"memory: %lu, so it will be allocated into the global "
"memory\n",
T2, deviceProp.sharedMemPerBlock);
gm = 1;
T2 = 0;
} else
gm = 0;
if(verbose_mode > 0){
printf("\tGrid_size_x: %d, number_of_threads_x: %d \n", grid.x,
threads.x);
printf("\tGrid_size_y: %d, number_of_threads_y: %d \n\n", grid.y,
threads.y);
}
if (strcmp(distance_type, "DTW") == 0){ // DTW distance
switch (foldit(q_size)) {
case 0:hipLaunchKernelGGL(( MD_DTW_D<64>), dim3(grid), dim3(threads), T2, 0, d_t_series, d_q_series, t_size,
q_size, n_feat, d_owp, 1, gm);
break;
case 1:hipLaunchKernelGGL(( MD_DTW_D<128>), dim3(grid), dim3(threads), T2, 0, d_t_series, d_q_series, t_size,
q_size, n_feat, d_owp, 1, gm);
break;
case 2:hipLaunchKernelGGL(( MD_DTW_D<256>), dim3(grid), dim3(threads), T2, 0, d_t_series, d_q_series, t_size,
q_size, n_feat, d_owp, 1, gm);
break;
case 3:hipLaunchKernelGGL(( MD_DTW_D<512>), dim3(grid), dim3(threads), T2, 0, d_t_series, d_q_series, t_size,
q_size, n_feat, d_owp, 1, gm);
break;
case 4:hipLaunchKernelGGL(( MD_DTW_D<1024>), dim3(grid), dim3(threads), T2, 0, d_t_series, d_q_series, t_size,
q_size, n_feat, d_owp, 1, gm);
break;
case 5:hipLaunchKernelGGL(( MD_DTW_D<2048>), dim3(grid), dim3(threads), T2, 0, d_t_series, d_q_series, t_size,
q_size, n_feat, d_owp, 1, gm);
break;
case 6:hipLaunchKernelGGL(( MD_DTW_D<4096>), dim3(grid), dim3(threads), T2, 0, d_t_series, d_q_series, t_size,
q_size, n_feat, d_owp, 1, gm);
break;
case 7:hipLaunchKernelGGL(( MD_DTW_D<8192>), dim3(grid), dim3(threads), T2, 0, d_t_series, d_q_series, t_size,
q_size, n_feat, d_owp, 1, gm);
break;
case 8:hipLaunchKernelGGL(( MD_DTW_D<16384>), dim3(grid), dim3(threads), T2, 0, d_t_series, d_q_series, t_size,
q_size, n_feat, d_owp, 1, gm);
break;
}
}
else
MD_ED_D << <grid, threads, T2>>> (d_t_series, d_q_series, t_size, q_size,
n_feat, d_owp, 1, gm);
hipMemcpy(owp, d_owp, nss * sizeof(float), hipMemcpyDeviceToHost);
for (int i = 0; i < nss; ++i) {
if (verbose_mode > 0 && verbose_mode < nss) {
if (i % verbose_mode == 0)
printf("\tCurr val diff. [%d]: %f\n", i, owp[i]);
else if (i == nss)
printf("\tCurr val diff. [%d]: %f\n", i, owp[i]);
}
}
min = min_arr(owp, nss, ind_min_val);
return min;
}
/**
* \brief The function `MDI_SIM_MES_GPU` is a wrapper function used for computing the GPU independent multidimensional similary measure for the classification task.
* \param trainSize Number of MTS contained into the train set
* \param testSize Number of MTS contained into the test set
* \param *trainLabels Vector containing the labels for the train set
* \param *testLabels Vector containing the labels for the test set
* \param *h_train Vector containing the data for the train set
* \param *h_test Vector containing the data for the test set
* \param *d_train Vector containing the data for the train set stored in the GPU device
* \param *d_test Vector containing the data for the test set stored in the GPU device
* \param *d_Out Vector containing temporary result for the host
* \param *h_Out Vector containing temporary result for the device
* \param window_size Length for the time series to be stored into `*data`
* \param n_feat Number of variables for the time series stored into both train and test set
* \param blockSize Number of threads to use for comparing the MTS
* \param deviceProp CUDA object containing several information about its own device
* \param *distance_type Type of similarity measure to adopt for performing the classification task
* \param verbose_mode Flag used to increase/reduce the verbosity of the output results
* \return the number of misclassification
*/
__host__ float MDI_SIM_MES_GPU(int trainSize, int testSize, int *trainLabels, int *testLabels, float *h_train, float *h_test, float *d_train, float *d_test, float *d_Out, float *h_Out, int window_size, int n_feat, int blockSize, hipDeviceProp_t deviceProp, char *distance_type, int verbose_mode){
float grid_size, min = 9999.99;
dim3 grid;
dim3 threads;
int *minI = (int *)malloc(sizeof(int));
int err = 0;
grid_size = ceil((float)(trainSize * n_feat) / blockSize);
//the way to compute this measure can be envetually changed
//according with the logic implemented in the MD_DTW_I function
float dim_row = floor((float)blockSize / n_feat);
float dim_col = n_feat;
//block_size < n_feat
if (dim_row == 0){
printf("Warning: The number of threads for each grid is %f! Note: the number of threads for grid has been set at 1 by default to let the execution don't fail. Please increase the number of threads!\n", dim_row);
dim_row = 1;
}
// number of blocks (x,y) for a grid
grid.x = grid_size;
grid.y = 1;
// number of threads (x,y) for each block
threads.x = dim_row;
threads.y = dim_col;
float T2 = ((threads.x * threads.y) + (n_feat * window_size)) *
sizeof(float);
int gm = 0;
if (T2 > deviceProp.sharedMemPerBlock) {
printf("\tWarning: The T2 test timeserie: %f doesn't fit into the shared "
"memory: %lu, so it will be allocated into the global "
"memory\n",
T2, deviceProp.sharedMemPerBlock);
gm = 1;
T2 = 0;
T2 = (threads.x * threads.y) * sizeof(float);
} else
gm = 0;
if(verbose_mode > 0){
printf("\tGrid_size_x: %d, number_of_threads_x: %d \n", grid.x,
threads.x);
printf("\tGrid_size_y: %d, number_of_threads_y: %d \n\n", grid.y,
threads.y);
}
/*
float sh_mem = ((threads.x * threads.y) + (n_feat * window_size)) *
sizeof(float);*/
for (int k = 0; k < testSize; k++) {
hipMemcpy(d_test, h_test + k * (n_feat * window_size),
n_feat * window_size * sizeof(float),
hipMemcpyHostToDevice);
if (strcmp(distance_type, "DTW") == 0){ // DTW distance
switch (foldit(window_size)) {
case 0:hipLaunchKernelGGL(( MD_DTW_I<64>), dim3(grid), dim3(threads), T2, 0, d_train, d_test, trainSize,
window_size, n_feat, d_Out, 0, gm);
break;
case 1:hipLaunchKernelGGL(( MD_DTW_I<128>), dim3(grid), dim3(threads), T2, 0, d_train, d_test, trainSize,
window_size, n_feat, d_Out, 0, gm);
break;
case 2:hipLaunchKernelGGL(( MD_DTW_I<256>), dim3(grid), dim3(threads), T2, 0, d_train, d_test, trainSize,
window_size, n_feat, d_Out, 0, gm);
break;
case 3:hipLaunchKernelGGL(( MD_DTW_I<512>), dim3(grid), dim3(threads), T2, 0, d_train, d_test, trainSize,
window_size, n_feat, d_Out, 0, gm);
break;
case 4:hipLaunchKernelGGL(( MD_DTW_I<1024>), dim3(grid), dim3(threads), T2, 0, d_train, d_test, trainSize,
window_size, n_feat, d_Out, 0, gm);
break;
case 5:hipLaunchKernelGGL(( MD_DTW_I<2048>), dim3(grid), dim3(threads), T2, 0, d_train, d_test, trainSize,
window_size, n_feat, d_Out, 0, gm);
break;
case 6:hipLaunchKernelGGL(( MD_DTW_I<4096>), dim3(grid), dim3(threads), T2, 0, d_train, d_test, trainSize,
window_size, n_feat, d_Out, 0, gm);
break;
case 7:hipLaunchKernelGGL(( MD_DTW_I<8192>), dim3(grid), dim3(threads), T2, 0, d_train, d_test, trainSize,
window_size, n_feat, d_Out, 0, gm);
break;
case 8:hipLaunchKernelGGL(( MD_DTW_I<16384>), dim3(grid), dim3(threads), T2, 0, d_train, d_test, trainSize,
window_size, n_feat, d_Out, 0, gm);
break;
}
}
else
MD_ED_I << <grid, threads, T2>>>
(d_train, d_test, trainSize, window_size, n_feat, d_Out, 0, gm);
hipDeviceSynchronize();
hipMemcpy(h_Out, d_Out, trainSize * sizeof(float),
hipMemcpyDeviceToHost);
min = min_arr(h_Out, trainSize, minI);
if (trainLabels[*minI] != testLabels[k])
err++;
if (verbose_mode > 0 && verbose_mode < testSize) {
if (k % verbose_mode == 0)
printf("\t%d\t gt: %d\t\tRI: %d\t%3.6f\n", k, testLabels[k],
trainLabels[*minI], min);
else if (k == testSize-1)
printf("\t%d\t gt: %d\t\tRI: %d\t%3.6f\n", k, testLabels[k],
trainLabels[*minI], min);
}
}
free(minI);
return err;
}
/**
* \brief The function `MDD_SIM_MES_GPU` is a wrapper function used for computing the GPU independent multidimensional similary measure for the sub-sequence similarity search task.
* \param nss Number of sub-sequences to search
* \param *d_t_series Device vector containing the first time series
* \param *d_q_series Device vector containing the time series to compare against `*instance`
* \param t_size Length of the time series `*t_series`
* \param q_size Length of the time series `*q_series`
* \param n_feat Number of variables for the two MTS
* \param blockSize Number of threads to use for comparing the MTS
* \param deviceProp CUDA object containing several information about its own device
* \param *distance_type Type of similarity measure to adopt for performing the classification task
* \param verbose_mode Flag used to increase/reduce the verbosity of the output results
* \param *owp Support vector containing all the comparing
* \param *d_owp Device support vector containing all the comparing
* \param *ind_min_val Index containing the minimum value obtained by comparing `*q_series` over `*t_series`
* \return minimum value obtained by comparing `*q_series` over `*t_series`
*/
__host__ float MDI_SIM_MES_GPU(int nss, float *d_t_series, float *d_q_series, int t_size, int q_size, int n_feat, int blockSize, hipDeviceProp_t deviceProp, char *distance_type, int verbose_mode, float *owp, float *d_owp, int *ind_min_val){
float grid_size, min = 9999.99;
dim3 grid;
dim3 threads;
// Setting CUDA variables and structure
grid_size = ceil((float)(nss * n_feat) / blockSize);
float dim_row = floor((float)blockSize / n_feat);
float dim_col = n_feat;
// number of blocks (x,y) for a grid
grid.x = grid_size;
grid.y = 1;
// number of threads (x,y) for each block
threads.x = dim_row;
threads.y = dim_col;
int gm = 0;
printf("\tGrid_size_x: %d, number_of_threads_x: %d \n", grid.x,
threads.x);
printf("\tGrid_size_y: %d, number_of_threads_y: %d \n\n", grid.y,
threads.y);
float sh_mem = ((threads.x * threads.y) + (n_feat * t_size)) * sizeof(float);
if (strcmp(distance_type, "DTW") == 0){ // DTW distance
switch (foldit(q_size)) {
case 0:hipLaunchKernelGGL(( MD_DTW_I<64>), dim3(grid), dim3(threads), sh_mem, 0, d_t_series, d_q_series,
t_size, q_size, n_feat, d_owp, 1, gm);
break;
case 1:hipLaunchKernelGGL(( MD_DTW_I<128>), dim3(grid), dim3(threads), sh_mem, 0, d_t_series, d_q_series,
t_size, q_size, n_feat, d_owp, 1, gm);
break;
case 2:hipLaunchKernelGGL(( MD_DTW_I<256>), dim3(grid), dim3(threads), sh_mem, 0, d_t_series, d_q_series,
t_size, q_size, n_feat, d_owp, 1, gm);
break;
case 3:hipLaunchKernelGGL(( MD_DTW_I<512>), dim3(grid), dim3(threads), sh_mem, 0, d_t_series, d_q_series,
t_size, q_size, n_feat, d_owp, 1, gm);
break;
case 4:hipLaunchKernelGGL(( MD_DTW_I<1024>), dim3(grid), dim3(threads), sh_mem, 0, d_t_series, d_q_series,
t_size, q_size, n_feat, d_owp, 1, gm);
break;
case 5:hipLaunchKernelGGL(( MD_DTW_I<2048>), dim3(grid), dim3(threads), sh_mem, 0, d_t_series, d_q_series,
t_size, q_size, n_feat, d_owp, 1, gm);
break;
case 6:hipLaunchKernelGGL(( MD_DTW_I<4096>), dim3(grid), dim3(threads), sh_mem, 0, d_t_series, d_q_series,
t_size, q_size, n_feat, d_owp, 1, gm);
break;
case 7:hipLaunchKernelGGL(( MD_DTW_I<8192>), dim3(grid), dim3(threads), sh_mem, 0, d_t_series, d_q_series,
t_size, q_size, n_feat, d_owp, 1, gm);
break;
case 8:hipLaunchKernelGGL(( MD_DTW_I<16384>), dim3(grid), dim3(threads), sh_mem, 0, d_t_series, d_q_series,
t_size, q_size, n_feat, d_owp, 1, gm);
break;
}
}
else
MD_ED_I << <grid, threads, sh_mem>>>
(d_t_series, d_q_series, t_size, q_size, n_feat, d_owp, 1, gm);
hipMemcpy(owp, d_owp, nss * sizeof(float), hipMemcpyDeviceToHost);
for (int i = 0; i < nss; ++i) {
if (verbose_mode > 0 && verbose_mode < nss) {
if (i % verbose_mode == 0)
printf("\tCurr val diff. [%d]: %f\n", i, owp[i]);
else if (i == nss)
printf("\tCurr val diff. [%d]: %f\n", i, owp[i]);
}
}
min = min_arr(owp, nss, ind_min_val);
return min;
}
/**
* \brief The function `MDR_SIM_MES_GPU` is a wrapper function used for computing the CPU multidimensional rotation similary measure for the classification task.
* \param trainSize Number of MTS contained into the train set
* \param testSize Number of MTS contained into the test set
* \param *trainLabels Vector containing the labels for the train set
* \param *testLabels Vector containing the labels for the test set
* \param *h_train Vector containing the data for the train set
* \param *h_test Vector containing the data for the test set
* \param *d_train Vector containing the data for the train set stored in the GPU device
* \param *d_test Vector containing the data for the test set stored in the GPU device
* \param *d_Out Vector containing temporary result for the host
* \param *h_Out Vector containing temporary result for the device
* \param window_size Length for the time series to be stored into `*data`
* \param n_feat Number of variables for the time series stored into both train and test set
* \param blockSize Number of threads to use for comparing the MTS
* \param deviceProp CUDA object containing several information about its own device
* \param *distance_type Type of similarity measure to adopt for performing the classification task
* \param verbose_mode Flag used to increase/reduce the verbosity of the output results
* \param *err The number of misclassification using the basic similarity measure
* \param *errNR The number of misclassification using the rotation similary measure
* \return the number of misclassification
*/
__host__ void MDR_SIM_MES_GPU(int trainSize, int testSize, int *trainLabels, int *testLabels, float *h_train, float *h_test, float *d_train, float *d_test, float *d_Out, float *h_Out, int window_size, int n_feat, int blockSize, hipDeviceProp_t deviceProp, char *distance_type, int verbose_mode, int *err, int *errNR){
float grid_size, min = 9999.99,minNR = 99999.99;
dim3 grid;
dim3 threads;
int minINR = 0, minI = 0;
float T2 = (n_feat * window_size) * sizeof(float);
int gm = 0;
if (T2 > deviceProp.sharedMemPerBlock) {
printf("\tWarning: The T2 test timeserie: %f doesn't fit into the shared "
"memory: %lu, so it will be allocated into the global "
"memory\n",
T2, deviceProp.sharedMemPerBlock);
gm = 1;
T2 = 0;
} else
gm = 0;
grid_size = ceil((float)trainSize * window_size / blockSize);
// number of blocks (x,y) for a grid
grid.x = grid_size;
grid.y = 1;
// number of threads (x,y) for each block
threads.x = blockSize;
threads.y = 1;
if(verbose_mode > 0){
printf("\tGrid_size_x: %d, number_of_threads_x: %d \n", grid.x,
threads.x);
printf("\tGrid_size_y: %d, number_of_threads_y: %d \n\n", grid.y,
threads.y);
}
for (int k = 0; k < testSize; k++) {
hipMemcpy(d_test, h_test + (k * n_feat * window_size),
n_feat * window_size * sizeof(float),
hipMemcpyHostToDevice);
if (strcmp(distance_type, "DTW") == 0){ // DTW distance
switch (foldit(window_size)) {
case 0:hipLaunchKernelGGL(( rMD_DTW_D<64>), dim3(grid), dim3(threads), T2, 0, d_train, d_test, trainSize,
window_size, n_feat, d_Out, gm);
break;
case 1:hipLaunchKernelGGL(( rMD_DTW_D<128>), dim3(grid), dim3(threads), T2, 0, d_train, d_test, trainSize,
window_size, n_feat, d_Out, gm);
break;
case 2:hipLaunchKernelGGL(( rMD_DTW_D<256>), dim3(grid), dim3(threads), T2, 0, d_train, d_test, trainSize,
window_size, n_feat, d_Out, gm);
break;
case 3:hipLaunchKernelGGL(( rMD_DTW_D<512>), dim3(grid), dim3(threads), T2, 0, d_train, d_test, trainSize,
window_size, n_feat, d_Out, gm);
break;
case 4:hipLaunchKernelGGL(( rMD_DTW_D<1024>), dim3(grid), dim3(threads), T2, 0, d_train, d_test, trainSize,
window_size, n_feat, d_Out, gm);
break;
case 5:hipLaunchKernelGGL(( rMD_DTW_D<2048>), dim3(grid), dim3(threads), T2, 0, d_train, d_test, trainSize,
window_size, n_feat, d_Out, gm);
break;
case 6:hipLaunchKernelGGL(( rMD_DTW_D<4096>), dim3(grid), dim3(threads), T2, 0, d_train, d_test, trainSize,
window_size, n_feat, d_Out, gm);
break;
case 7:hipLaunchKernelGGL(( rMD_DTW_D<8192>), dim3(grid), dim3(threads), T2, 0, d_train, d_test, trainSize,
window_size, n_feat, d_Out, gm);
break;
case 8:hipLaunchKernelGGL(( rMD_DTW_D<16384>), dim3(grid), dim3(threads), T2, 0, d_train, d_test, trainSize,
window_size, n_feat, d_Out, gm);
break;
}
}
else
rMD_ED_D << <grid, threads, T2>>>
(d_train, d_test, window_size, n_feat, d_Out, trainSize, gm);
hipDeviceSynchronize();
hipMemcpy(h_Out, d_Out, trainSize * window_size * sizeof(float),
hipMemcpyDeviceToHost);
min = 9999999999.99;
minI = -1;
minINR = -1;
minNR = 99999999999.99;
int i = 0;
for (int j = 0; j < trainSize; j++) {
if (h_Out[j * window_size] < minNR) {
minNR = h_Out[j * window_size];
minINR = j;
}
for (i = 0; i < window_size; i++) {
int t = j * window_size + i;
if (h_Out[t] < min) {
min = h_Out[t];
minI = j;
}
}
}
if (trainLabels[minI] != testLabels[k])
(*err)++;
if (trainLabels[minINR] != testLabels[k])
(*errNR)++;
if (verbose_mode > 0 && verbose_mode < testSize) {
if (i % verbose_mode == 0)
printf("\t%d\t gt: %d\t\tRI: %d\t%3.6f \t\t NRI: %d\t%3.6f\n", k,
testLabels[i], trainLabels[minI], min,
trainLabels[minINR], minNR);
else if (i == testSize-1)
printf("\t%d\t gt: %d\t\tRI: %d\t%3.6f \t\t NRI: %d\t%3.6f\n", k,
testLabels[i], trainLabels[minI], min,
trainLabels[minINR], minNR);
}
}
} | e4600b36ad4ff644e5c398d5110152657a0712ea.cu | #include "../include/header.h"
using namespace std;
/**
* \brief The function `stdDev` compute the `standard deviation` of a given vector allocated on the CUDA device.
*
* The following function computes the `standard deviation` of a given input vector.
* \param *data Input vector
* \param n Size of the vector
* \param *avg `Mean` computed on the input vector
* \return `Standard deviation` computed on the input vector
*/
__device__ float stdDev(float *data, int n, float *avg) {
printf("N_SAMPLE: %d\n", n);
printf("DATA_SIZE: %d\n", sizeof(data));
float mean = 0.0, sum_deviation = 0.0;
int i;
for (i = 0; i < n; ++i) {
mean += data[i];
}
mean = mean / n;
*avg = mean;
for (i = 0; i < n; ++i)
sum_deviation += (data[i] - mean) * (data[i] - mean);
return sqrt(sum_deviation / (n - 1));
}
/**
* \brief The kernel function `MD_DTW_D` computes the `Dependent-Multi Dimensional Dynamic Time Warping` distance (D-MDDTW).
*
* The following kernel function computes the D-MDDTW taking advantage of the GPU, by using a specific number of threads for block.
It considers the comparison of many Multivariate Time Series (MTS) stored into the unrolled vector `*S` against the only unrolled vector `*T`.
By exploiting the CUDA threads, this computation can be done very fast.
For more information about how it's computed, refer to the following link: http://stats.stackexchange.com/questions/184977/multivariate-time-series-euclidean-distance
* \param *S Unrolled vector containing `trainSize` number of MTS
* \param *T Unrolled vector representing the second time Series to compare against `*S`
* \param *trainSize Number of MTS contained in the vector `T`
* \param window_size Length of the two given MTS
* \param dimensions Number of variables for the two MTS
* \param *data_out Vector containing the results achieved by comparing `*T` against `*S`
* \param task Integer discriminating the task to perform (e.g., 0: CLASSIFICATION, 1:SUBSEQUENCE SEARCH)
* \param gm Integer indicating where to store the unrolled vector `*T` (e.g., 0:shared memory, 1: global memory)
*/
template<int WS>
__global__ void MD_DTW_D(float *S, float *T, int trainSize, int window_size, int dimensions,
float *data_out, int task, int gm) {
long long int k, l, g;
long long int i, j, p;
int idx = blockIdx.x * blockDim.x + threadIdx.x;
float min_nb = 0;
float array[WS][2];
if (gm == 0) {
// query timeseries
extern __shared__ float T2[];
int t, offset;
if (task == 0) {
offset = window_size;
int wind = dimensions * window_size;
t = idx * wind;
if ((idx * wind) + wind > trainSize * wind)
return;
if (threadIdx.x == 0) {
for (i = 0; i < dimensions; i++)
for (j = 0; j < window_size; j++)
T2[window_size * i + j] = T[window_size * i + j];
}
__syncthreads();
} else {
offset = trainSize;
t = idx;
if ((idx + window_size) > trainSize)
return;
if (threadIdx.x == 0) {
for (i = 0; i < dimensions; i++)
for (j = 0; j < window_size; j++)
T2[window_size * i + j] = T[window_size * i + j];
}
__syncthreads();
}
k = 0;
l = 1;
// computing first row (instace versus query)
for (i = 0; i < window_size; i++) {
array[i][k] = 0.0;
for (p = 0; p < dimensions; p++) {
if (i == 0)
array[i][k] += pow((S[t + p * offset] - T2[p * window_size]), 2);
else
array[i][k] += pow((S[t + p * offset] - T2[p * window_size + i]), 2);
}
if (i != 0)
array[i][k] += array[i - 1][k];
}
k = 1;
l = 0;
for (j = 1; j < window_size; j++) {
i = 0;
array[i][k] = 0.0;
for (p = 0; p < dimensions; p++)
array[i][k] += pow((S[t + p * offset + j] - T2[p * window_size + i]), 2);
array[i][k] += array[i][l];
for (i = 1; i < window_size; i++) {
array[i][k] = 0.0;
float a = array[i - 1][l];
float b = array[i][l];
float c = array[i - 1][k];
min_nb = fminf(a, b);
min_nb = fminf(c, min_nb);
for (p = 0; p < dimensions; p++)
array[i][k] += pow((S[t + p * offset + j] - T2[p * window_size + i]), 2);
array[i][k] += min_nb;
}
g = k;
k = l;
l = g;
}
data_out[idx] = array[window_size - 1][g];
} else {
int t, offset;
if (task == 0) {
offset = window_size;
int wind = dimensions * window_size;
t = idx * wind;
if ((idx * wind) + wind > trainSize * wind)
return;
} else {
offset = trainSize;
t = idx;
if ((idx + window_size) > trainSize)
return;
}
k = 0;
l = 1;
// computing first row (instace versus query)
for (i = 0; i < window_size; i++) {
array[i][k] = 0.0;
for (p = 0; p < dimensions; p++) {
if (i == 0)
array[i][k] += pow((S[t + p * offset] - T[p * window_size]), 2);
else
array[i][k] += pow((S[t + p * offset] - T[p * window_size + i]), 2);
}
if (i != 0)
array[i][k] += array[i - 1][k];
}
k = 1;
l = 0;
for (j = 1; j < window_size; j++) {
i = 0;
array[i][k] = 0.0;
for (p = 0; p < dimensions; p++)
array[i][k] += pow((S[t + p * offset + j] - T[p * window_size + i]), 2);
array[i][k] += array[i][l];
for (i = 1; i < window_size; i++) {
array[i][k] = 0.0;
float a = array[i - 1][l];
float b = array[i][l];
float c = array[i - 1][k];
min_nb = fminf(a, b);
min_nb = fminf(c, min_nb);
for (p = 0; p < dimensions; p++)
array[i][k] += pow((S[t + p * offset + j] - T[p * window_size + i]), 2);
array[i][k] += min_nb;
}
g = k;
k = l;
l = g;
}
data_out[idx] = array[window_size - 1][g];
}
}
/**
* \brief The kernel function `MD_ED_I` computes the `Independent Multi Dimensional-Dynamic Time Warping` distance (I-MDDTW).
*
* The following kernel function computes the I-MDDTW taking advantage of the GPU, by using a specific number of threads for block.
It considers the comparison of many Multivariate Time Series (MTS) stored into the unrolled vector `*S` against the only unrolled vector `*T`.
By exploiting the CUDA threads, this computation can be done very fast.
For more information about how it's computed, refer to the following link: http://stats.stackexchange.com/questions/184977/multivariate-time-series-euclidean-distance
* \param *S Unrolled vector containing `trainSize` number of MTS
* \param *T Unrolled vector representing the second time Series to compare against `*S`
* \param *trainSize Number of MTS contained in the vector `T`
* \param window_size Length of the two given MTS
* \param dimensions Number of variables for the two MTS
* \param *data_out Vector containing the results achieved by comparing `*T` against `*S`
* \param task Integer discriminating the task to perform (e.g., 0: CLASSIFICATION, 1:SUBSEQUENCE SEARCH)
*/
template<int WS>
__global__ void MD_DTW_I(float *S, float *T, int trainSize, int window_size, int dimensions,
float *data_out, int task, int gm) {
int idx, offset_x;
long long int i, j;
long long int k, l, g;
float min_nb = 0;
float array[WS][2];
// float *T2 = 0;
// float *DTW_single_dim = 0;
if(gm == 0){
extern __shared__ float sh_mem[];
float *T2 = (float *)sh_mem;
float *DTW_single_dim =
(float *)&sh_mem[dimensions *
window_size]; // offset on the shared memory for the segment T2
if (task == 0) {
idx = threadIdx.x * dimensions + threadIdx.y;
offset_x = ((blockDim.x * blockDim.y * window_size) * blockIdx.x) + idx * window_size;
if (((blockDim.x * blockDim.y * blockIdx.x) + idx) >=
trainSize * dimensions) // 120=train_size
return;
} else { // SUBSEQ_SEARCH
idx = threadIdx.x * dimensions + threadIdx.y;
offset_x =
(blockDim.x * blockIdx.x) +
((threadIdx.y * trainSize) +
threadIdx.x); // use blockIdx and other measure to set well the offset
if ((idx + window_size) > trainSize)
return;
}
if (idx == 0) {
for (i = 0; i < dimensions; i++)
for (j = 0; j < window_size; j++)
*(T2 + (window_size * i + j)) = T[window_size * i + j];
}
__syncthreads();
k = 0;
l = 1;
for (i = 0; i < window_size; i++) {
if (i == 0)
array[i][k] = pow((S[offset_x] - T2[window_size * threadIdx.y]), 2);
else
array[i][k] =
pow((S[offset_x] - T2[window_size * threadIdx.y + i]), 2) + array[i - 1][k];
}
k = 1;
l = 0;
for (j = 1; j < window_size; j++) {
i = 0;
array[i][k] =
pow((S[offset_x + j] - T2[window_size * threadIdx.y + i]), 2) + array[i][l];
for (i = 1; i < window_size; i++) {
double a = array[i - 1][l];
double b = array[i][l];
double c = array[i - 1][k];
min_nb = fminf(a, b);
min_nb = fminf(c, min_nb);
array[i][k] =
pow((S[offset_x + j] - T2[window_size * threadIdx.y + i]), 2) + min_nb;
}
g = k;
k = l;
l = g;
}
DTW_single_dim[idx] = array[window_size - 1][g];
__syncthreads();
if (idx == 0) {
for (i = 0; i < blockDim.x; i++) {
data_out[(blockIdx.x * blockDim.x) + i] = 0.0;
for (j = 0; j < blockDim.y; j++) {
data_out[(blockIdx.x * blockDim.x) + i] +=
DTW_single_dim[i * dimensions + j];
}
}
}
}
else
{
extern __shared__ float DTW_single_dim[];
if (task == 0) {
idx = threadIdx.x * dimensions + threadIdx.y;
offset_x = ((blockDim.x * blockDim.y * window_size) * blockIdx.x) + idx * window_size;
if (((blockDim.x * blockDim.y * blockIdx.x) + idx) >=
trainSize * dimensions) // 120=train_size
return;
} else { // SUBSEQ_SEARCH
idx = threadIdx.x * dimensions + threadIdx.y;
offset_x =
(blockDim.x * blockIdx.x) +
((threadIdx.y * trainSize) +
threadIdx.x); // use blockIdx and other measure to set well the offset
if ((idx + window_size) > trainSize)
return;
}
k = 0;
l = 1;
for (i = 0; i < window_size; i++) {
if (i == 0)
array[i][k] = pow((S[offset_x] - T[window_size * threadIdx.y]), 2);
else
array[i][k] =
pow((S[offset_x] - T[window_size * threadIdx.y + i]), 2) + array[i - 1][k];
}
k = 1;
l = 0;
for (j = 1; j < window_size; j++) {
i = 0;
array[i][k] =
pow((S[offset_x + j] - T[window_size * threadIdx.y + i]), 2) + array[i][l];
for (i = 1; i < window_size; i++) {
double a = array[i - 1][l];
double b = array[i][l];
double c = array[i - 1][k];
min_nb = fminf(a, b);
min_nb = fminf(c, min_nb);
array[i][k] =
pow((S[offset_x + j] - T[window_size * threadIdx.y + i]), 2) + min_nb;
}
g = k;
k = l;
l = g;
}
DTW_single_dim[idx] = array[window_size - 1][g];
__syncthreads();
if (idx == 0) {
for (i = 0; i < blockDim.x; i++) {
data_out[(blockIdx.x * blockDim.x) + i] = 0.0;
for (j = 0; j < blockDim.y; j++) {
data_out[(blockIdx.x * blockDim.x) + i] +=
DTW_single_dim[i * dimensions + j];
}
}
}
}
}
/**
* \brief The kernel function `rMD_DTW_D` computes the `Rotation Dependent-Multi Dimensional Dynamic Time Warping` distance (rD-MDDTW).
*
* The following kernel function computes the rD-MDDTW taking advantage of the GPU, by using a specific number of threads for block.
It considers the comparison of all the possible `punctual rotation` of the Multivariate Time Series (MTS) stored into the unrolled vector `*S` against the only unrolled vector `*T`.
By exploiting the CUDA threads, this computation can be done very fast.
* \param *S Unrolled vector containing `trainSize` number of MTS
* \param *T Unrolled vector representing the second time Series to compare against `*S`
* \param *trainSize Number of MTS contained in the vector `T`
* \param window_size Length of the two given MTS
* \param dimensions Nnumber of variables for the two MTS
* \param *data_out Vector containing the results achieved by comparing `*T` against `*S`
* \param gm Integer indicating where to store the unrolled vector `*T` (e.g., 0:shared memory, 1: global memory)
*/
template<int WS>
__global__ void rMD_DTW_D(float *S, float *T, int trainSize, int window_size, int dimensions,
float *data_out, int gm) {
long long int k, l, g;
long long int i, j, p;
int idx = blockIdx.x * blockDim.x + threadIdx.x;
float min_nb = 0;
float array[WS][2];
if (gm == 0) {
extern __shared__ float T2[];
// offset training set
int s = dimensions * 2 * window_size * (idx / window_size);
int t = s + idx % window_size;
if (idx >= (trainSize * window_size)) //
return;
if (threadIdx.x == 0) {
for (i = 0; i < dimensions; i++)
for (j = 0; j < window_size; j++)
T2[window_size * i + j] = T[window_size * i + j];
}
__syncthreads();
k = 0;
l = 1;
for (i = 0; i < window_size; i++) {
array[i][k] = 0.0;
for (p = 0; p < dimensions; p++) {
if (i == 0)
array[i][k] += pow((S[t + p * 2 * window_size] - T2[p * window_size]), 2);
else
array[i][k] += pow((S[t + p * 2 * window_size] - T2[p * window_size + i]), 2);
}
if (i != 0)
array[i][k] += array[i - 1][k];
}
k = 1;
l = 0;
for (j = 1; j < window_size; j++) {
i = 0;
array[i][k] = 0.0;
for (p = 0; p < dimensions; p++) {
array[i][k] += pow((S[t + p * 2 * window_size + j] - T2[p * window_size + i]), 2);
}
array[i][k] += array[i][l];
for (i = 1; i < window_size; i++) {
array[i][k] = 0.0;
float a = array[i - 1][l];
float b = array[i][l];
float c = array[i - 1][k];
min_nb = fminf(a, b);
min_nb = fminf(c, min_nb);
for (p = 0; p < dimensions; p++)
array[i][k] += pow((S[t + p * 2 * window_size + j] - T2[p * window_size + i]), 2);
array[i][k] += min_nb;
}
g = k;
k = l;
l = g;
}
data_out[idx] = array[window_size - 1][g];
} else {
// offset training set
int s = dimensions * 2 * window_size * (idx / window_size);
int t = s + idx % window_size;
if (idx >= (trainSize * window_size)) //
return;
k = 0;
l = 1;
// computing first row (instace versus query)
for (i = 0; i < window_size; i++) {
array[i][k] = 0.0;
for (p = 0; p < dimensions; p++) {
if (i == 0)
array[i][k] += pow((S[t + p * 2 * window_size] - T[p * window_size]), 2);
else
array[i][k] += pow((S[t + p * 2 * window_size] - T[p * window_size + i]), 2);
}
if (i != 0)
array[i][k] += array[i - 1][k];
}
k = 1;
l = 0;
for (j = 1; j < window_size; j++) {
i = 0;
array[i][k] = 0.0;
for (p = 0; p < dimensions; p++) {
array[i][k] += pow((S[t + p * 2 * window_size + j] - T[p * window_size + i]), 2);
}
array[i][k] += array[i][l];
for (i = 1; i < window_size; i++) {
array[i][k] = 0.0;
float a = array[i - 1][l];
float b = array[i][l];
float c = array[i - 1][k];
min_nb = fminf(a, b);
min_nb = fminf(c, min_nb);
for (p = 0; p < dimensions; p++)
array[i][k] += pow((S[t + p * 2 * window_size + j] - T[p * window_size + i]), 2);
array[i][k] += min_nb;
}
g = k;
k = l;
l = g;
}
data_out[idx] = array[window_size - 1][g];
}
}
/**
* \brief The kernel function `MD_ED_D` computes the `Dependent-Multi Dimensional Euclidean` distance (D-MDE).
*
* The following kernel function computes the D-MDE taking advantage of the GPU, by using a specific number of threads for block.
It considers the comparison of many Multivariate Time Series (MTS) stored into the unrolled vector `*S` against the only unrolled vector `*T`.
By exploiting the CUDA threads, this computation can be done very fast.
For more information about how it's computed, refer to the following link: http://stats.stackexchange.com/questions/184977/multivariate-time-series-euclidean-distance
* \param *S Unrolled vector containing `trainSize` number of MTS
* \param *T Unrolled vector representing the second time Series to compare against `*S`
* \param *trainSize Number of MTS contained in the vector `T`
* \param window_size Length of the two given MTS
* \param dimensions Number of variables for the two MTS
* \param *data_out Vector containing the results achieved by comparing `*T` against `*S`
* \param task Integer discriminating the task to perform (e.g., 0: CLASSIFICATION, 1:SUBSEQUENCE SEARCH)
* \param gm Integer indicating where to store the unrolled vector `*T` (e.g., 0:shared memory, 1: global memory)
*/
__global__ void MD_ED_D(float *S, float *T, int trainSize, int window_size, int dimensions,
float *data_out, int task, int gm) {
long long int i, j, p;
float sumErr = 0, dd = 0;
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (gm == 0) {
extern __shared__ float T2[];
int t, offset;
if (task == 0) {
offset = window_size;
int wind = dimensions * window_size;
t = idx * wind;
if ((idx * wind) + wind >
trainSize * wind) // CHANGE FORMULA 120=train_size
return;
if (threadIdx.x == 0) {
for (i = 0; i < dimensions; i++)
for (j = 0; j < window_size; j++)
T2[window_size * i + j] = T[window_size * i + j];
}
__syncthreads();
} else {
// in this case 'trainSize' is the number of subsequence to search 'nss',
// that is, the length of dataset to perform on
offset = trainSize;
t = idx;
if ((idx + window_size) > trainSize)
return;
if (threadIdx.x == 0) {
for (i = 0; i < dimensions; i++)
for (j = 0; j < window_size; j++)
T2[window_size * i + j] = T[window_size * i + j];
}
__syncthreads();
}
for (j = 0; j < window_size; j++) {
dd = 0;
for (p = 0; p < dimensions; p++)
dd += (S[(t + p * offset) + j] - T2[(p * window_size) + j]) *
(S[(t + p * offset) + j] - T2[(p * window_size) + j]);
sumErr += dd;
}
data_out[idx] = sqrt(sumErr);
} else {
int t, offset;
if (task == 0) {
offset = window_size;
int wind = dimensions * window_size;
t = idx * wind;
if ((idx * wind) + wind > trainSize * wind)
return;
} else {
// in this case 'trainSize' is the number of subsequence to search 'nss',
// that is, the length of dataset to perform on
offset = trainSize;
t = idx;
if ((idx + window_size) > trainSize)
return;
}
for (j = 0; j < window_size; j++) {
dd = 0;
for (p = 0; p < dimensions; p++)
dd += (S[(t + p * offset) + j] - T[(p * window_size) + j]) *
(S[(t + p * offset) + j] - T[(p * window_size) + j]);
sumErr += dd;
}
data_out[idx] = sqrt(sumErr);
}
}
/**
* \brief The kernel function `MD_ED_I` computes the `Independent-Multi Dimensional Euclidean` distance (I-MDE).
*
* The following kernel function computes the I-MDE taking advantage of the GPU, by using a specific number of threads for block.
It considers the comparison of many Multivariate Time Series (MTS) stored into the unrolled vector `*S` against the only unrolled vector `*T`.
By exploiting the CUDA threads, this computation can be done very fast.
For more information about how it's computed, refer to the following link: http://stats.stackexchange.com/questions/184977/multivariate-time-series-euclidean-distance
* \param *S Unrolled vector containing `trainSize` number of MTS
* \param *T Unrolled vector representing the second time Series to compare against `*S`
* \param *trainSize Number of MTS contained in the vector `T`
* \param window_size Length of the two given MTS
* \param dimensions Nnumber of variables for the two MTS
* \param *data_out Vector containing the results achieved by comparing `*T` against `*S`
* \param task Integer discriminating the task to perform (e.g., 0: CLASSIFICATION, 1:SUBSEQUENCE SEARCH)
*/
__global__ void MD_ED_I(float *S, float *T, int trainSize, int window_size, int dimensions,
float *data_out, int task, int gm) {
int idx, offset_x;
float sumErr = 0;
long long int i, j;
if(gm == 0){
extern __shared__ float sh_mem[];
float *T2 = (float *)sh_mem;
float *DTW_single_dim =
(float *)&sh_mem[dimensions * window_size]; // offset on the shared memory
// for the segment T2
if (task == 0) {
idx = threadIdx.x * dimensions + threadIdx.y;
offset_x = ((blockDim.x * blockDim.y * window_size) * blockIdx.x) +
idx * window_size;
if (((blockDim.x * blockDim.y * blockIdx.x) + idx) >=
trainSize * dimensions) // 120=train_size
return;
} else { // SUBSEQ_SEARCH
idx = threadIdx.x * dimensions + threadIdx.y;
offset_x =
(blockDim.x * blockIdx.x) +
((threadIdx.y * trainSize) +
threadIdx.x); // use blockIdx and other measure to set well the offset
if ((idx + window_size) > trainSize)
return;
}
if (idx == 0) {
for (i = 0; i < dimensions; i++)
for (j = 0; j < window_size; j++)
*(T2 + (window_size * i + j)) = T[window_size * i + j];
}
__syncthreads();
for (j = 0; j < window_size; j++)
sumErr += (S[offset_x + j] - T2[window_size * threadIdx.y + j]) *
(S[offset_x + j] - T2[window_size * threadIdx.y + j]);
DTW_single_dim[idx] = sqrt(sumErr);
__syncthreads();
if (idx == 0) {
for (i = 0; i < blockDim.x; i++) {
data_out[(blockIdx.x * blockDim.x) + i] = 0.0;
for (j = 0; j < blockDim.y; j++) {
data_out[(blockIdx.x * blockDim.x) + i] +=
DTW_single_dim[i * dimensions + j]; // rivedere formula!
}
}
}
}
else {
extern __shared__ float DTW_single_dim[];
if (task == 0) {
idx = threadIdx.x * dimensions + threadIdx.y;
offset_x = ((blockDim.x * blockDim.y * window_size) * blockIdx.x) +
idx * window_size;
if (((blockDim.x * blockDim.y * blockIdx.x) + idx) >=
trainSize * dimensions) // 120=train_size
return;
} else { // SUBSEQ_SEARCH
idx = threadIdx.x * dimensions + threadIdx.y;
offset_x =
(blockDim.x * blockIdx.x) +
((threadIdx.y * trainSize) +
threadIdx.x); // use blockIdx and other measure to set well the offset
if ((idx + window_size) > trainSize)
return;
}
// if (idx == 0) {
// for (i = 0; i < dimensions; i++)
// for (j = 0; j < window_size; j++)
// *(T2 + (window_size * i + j)) = T[window_size * i + j];
// }
// __syncthreads();
for (j = 0; j < window_size; j++)
sumErr += (S[offset_x + j] - T[window_size * threadIdx.y + j]) *
(S[offset_x + j] - T[window_size * threadIdx.y + j]);
DTW_single_dim[idx] = sqrt(sumErr);
__syncthreads();
if (idx == 0) {
for (i = 0; i < blockDim.x; i++) {
data_out[(blockIdx.x * blockDim.x) + i] = 0.0;
for (j = 0; j < blockDim.y; j++) {
data_out[(blockIdx.x * blockDim.x) + i] +=
DTW_single_dim[i * dimensions + j]; // rivedere formula!
}
}
}
}
}
/**
* \brief The kernel function `rMD_ED_D` computes the `Rotation Dependent-Multi Dimensional Euclidean` distance (rD-MDE).
*
* The following kernel function computes the rD-MDE taking advantage of the GPU, by using a specific number of threads for block.
It considers the comparison of all the possible `punctual rotation` of the Multivariate Time Series (MTS) stored into the unrolled vector `*S` against the only unrolled vector `*T`.
By exploiting the CUDA threads, this computation can be done very fast.
* \param *S Unrolled vector containing `trainSize` number of MTS
* \param *T Unrolled vector representing the second time Series to compare against `*S`
* \param window_size Length of the two given MTS
* \param dimensions Number of variables for the two MTS
* \param *data_out Vector containing the results achieved by comparing `*T` against `*S`
* \param *trainSize Number of MTS contained in the vector `T`
* \param gm Integer indicating where to store the unrolled vector `*T` (e.g., 0:shared memory, 1: global memory)
*/
__global__ void rMD_ED_D(float *S, float *T, int window_size, int dimensions,
float *data_out, int trainSize, int gm) {
long long int i, j, p;
float sumErr = 0, dd = 0;
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (gm == 0) {
extern __shared__ float T2[];
// offset training set
int s = dimensions * 2 * window_size * (idx / window_size);
int t = s + idx % window_size;
if (idx >= (trainSize * window_size)) //
return;
if (threadIdx.x == 0) {
for (i = 0; i < dimensions; i++)
for (j = 0; j < window_size; j++)
T2[window_size * i + j] = T[window_size * i + j];
}
__syncthreads();
for (j = 0; j < window_size; j++) {
dd = 0;
for (p = 0; p < dimensions; p++)
dd += (S[(t + p * 2 * window_size) + j] - T2[(p * window_size) + j]) *
(S[(t + p * 2 * window_size) + j] - T2[(p * window_size) + j]);
sumErr += dd;
}
data_out[idx] = sqrt(sumErr);
} else {
int s = dimensions * 2 * window_size * (idx / window_size);
int t = s + idx % window_size;
if (idx >= (trainSize * window_size))
return;
for (j = 0; j < window_size; j++) {
dd = 0;
for (p = 0; p < dimensions; p++)
dd += (S[(t + p * 2 * window_size) + j] - T[(p * window_size) + j]) *
(S[(t + p * 2 * window_size) + j] - T[(p * window_size) + j]);
sumErr += dd;
}
data_out[idx] = sqrt(sumErr);
}
}
/**
* \brief The function `checkFlagOpts` check out the correctness of the parameters for a given flag.
*
* The following function check out the correctness of the parameters for a given flag by counting the number of parameters.
* \param **input_args Vector containing all the command line parameters passed to the program
* \param num_args Vector containing the number of arguments passed to the program
* \param ind Current index parsed on `**input_args`
* \param num_opts Number of parameters to parse for the current flag stored into input_args[ind]
* \return Integer (0,1) indicating the corretness of the number of parameters for the current flag
*/
__host__ int checkFlagOpts(char **input_args, int num_args, int ind,
int num_opts) {
int count = 0;
char *pch = NULL;
if (ind + num_opts < num_args) { // it means a wrong number of options params
// and that there's no other flag option
while (pch == NULL && count <= num_opts) {
pch = strchr(input_args[ind], '-');
ind++;
count++;
}
if (count - 1 != num_opts)
return 0;
else
return 1;
} else if (ind + num_opts > num_args)
return 0;
else
return 1;
}
/**
* \brief The function `readFileSubSeq` allows to read several file formats for the `SUBSEQUENCE SEARCH` task.
*
* The following function allows to read several file format for the `SUBSEQUENCE SEARCH` task by providing in input several parameters.
* \param **file_name Vector containing the absolute paths for the files to read
* \param *ind_files Vector containing parsed indices for the file to read
* \param n_file Number of file to read
* \param *t_series Vector that will contain the time series `*t`
* \param *q_series Vector that will contain the time series `*q`
* \param windows_size Length of both time series
* \param n_feat Number of variables for both time series
* \param read_mode Integer for handling different input file formats (for more information, refer to README)
*/
__host__ void readFileSubSeq(char **file_name, int *ind_files, int n_file,
float *t_series, int t_size, float *q_series,
int window_size, int n_feat, int read_mode) {
int i, k;
FILE **inputFile = NULL;
inputFile = (FILE **)malloc(n_file * sizeof(FILE *));
for (i = 0; i < n_file; i++) {
char *curr_file = file_name[ind_files[i]];
inputFile[i] = fopen(curr_file, "r");
if ( access(curr_file, F_OK ) == -1 ) {
fprintf(stderr, "Failed to open file: %s\n", curr_file);
exit(2);
}
}
float *tmp;
// dimension on x axis (columns) and time on y axis (rows)
if (read_mode == 0) {
tmp = (float *)malloc(n_feat * sizeof(float));
// reading t_series file
for (i = 0; i < t_size; i++) {
for (k = 0; k < n_feat; k++) {
if( fscanf(inputFile[0], "%f", &tmp[k]) < 1 ){
fprintf(stderr, "File reading error!\n");
exit(2);
}
t_series[(k * t_size) + i] = tmp[k];
}
}
// reading q_series file
for (i = 0; i < window_size; i++) {
for (k = 0; k < n_feat; k++) {
if( fscanf(inputFile[1], "%f", &tmp[k]) < 1){
fprintf(stderr, "File reading error!\n");
exit(2);
}
q_series[(k * window_size) + i] = tmp[k];
}
}
}
// time on x axis (row) and dimensions on y axis (columns)
else if (read_mode == 1) {
tmp = (float *)malloc(t_size * sizeof(float));
for (k = 0; k < n_feat; k++) {
for (i = 0; i < t_size; i++) {
if( fscanf(inputFile[0], "%f", &tmp[i]) < 0){
fprintf(stderr, "File reading error!\n");
exit(2);
}
t_series[(k * window_size) + i] = tmp[i];
}
}
free(tmp);
tmp = (float *)malloc(window_size * sizeof(float));
for (k = 0; k < n_feat; k++) {
for (i = 0; i < window_size; i++) {
if( fscanf(inputFile[1], "%f", &tmp[i]) < 0){
fprintf(stderr, "File reading error!\n");
exit(2);
}
q_series[(k * window_size) + i] = tmp[i];
}
}
}
}
/**
* \brief The function `readFile` allows to read several file formats for the `CLASSIFICATION` task.
*
* The following function allows to read several file format for the `CLASSIFICATION` task by providing in input several parameters.
* \param **file_name Vector containing the absolute paths for the files to read
* \param *ind_files Vector containing parsed indices for the file to read
* \param read_mode Integer for handling different input file formats (for more information, refer to README)
* \param *data Vector for storing all the data read contained in the file
* \param data_struct Struct containing some information about the data (e.g., dataset size, train size, ect.)
* \param window_size Length for the time series to be stored into `*data`
* \param *dataLabels Vector for storing all the label information contained in the file
* \param n_feat Number of variables for both time series
* \param class_alg Integer for handling different reading modes which depends on the the type of algorithm picked
*/
__host__ void readFile(char **file_name, int *ind_files, int n_file,
int read_mode, float *data, struct data data_struct,
int window_size, int *dataLabels, int n_feat,
int class_alg) {
FILE **inputFile = NULL;
inputFile = (FILE **)malloc(n_file * sizeof(FILE *));
for (int i = 0; i < n_file; i++) {
char *curr_file = file_name[ind_files[i]];
inputFile[i] = fopen(curr_file, "r");
if ( access(curr_file, F_OK ) == -1 ) {
fprintf(stderr, "File reading error!\n");
exit(2);
}
}
int i, j, k;
float label = 0;
// reading data from 1 big file
if (read_mode == 0) { // read_mode=0
// checking order input file
fseek(inputFile[0], 0L, SEEK_END);
int sz_file0 = ftell(inputFile[0]);
fseek(inputFile[0], 0L, SEEK_SET);
fseek(inputFile[1], 0L, SEEK_END);
int sz_file1 = ftell(inputFile[1]);
fseek(inputFile[1], 0L, SEEK_SET);
// obtaining indices on the basis of the files order
int lab_ind, data_ind;
if (sz_file0 > sz_file1) {
lab_ind = 1;
data_ind = 0;
} else {
lab_ind = 0;
data_ind = 1;
}
float tmp = 0;
// DIMENSION ON THE ROWS AND LENGTH ON COLS
for (i = 0; i < data_struct.tot_size; i++) {
if( fscanf(inputFile[lab_ind], "%f", &label) < 1){
fprintf(stderr, "File reading error!\n");
exit(2);
}
dataLabels[i] = (int)label;
for (k = 0; k < n_feat; k++) {
for (j = 0; j < window_size; j++) {
if( fscanf(inputFile[data_ind], "%f", &tmp) < 1){
fprintf(stderr, "File reading error!\n");
exit(2);
}
// MDT_D or MDT_I
if (class_alg < 2)
data[(n_feat * i * window_size) + (k * window_size) + j] = tmp;
else {
data[(n_feat * 2 * i * window_size) + (2 * k * window_size) + j] =
tmp;
data[(n_feat * 2 * i * window_size) +
((2 * k * window_size) + window_size) + j] = tmp;
}
}
}
}
}
// reading from k-files
else if (read_mode == 1) {
float *tmp = (float *)malloc(n_feat * sizeof(float));
for (i = 0; i < data_struct.tot_size; i++) {
// reading labels
for (k = 0; k < n_feat; k++)
if( fscanf(inputFile[k], "%f", &label) < 1){
fprintf(stderr, "File reading error!\n");
exit(2);
}
dataLabels[i] = (int)label;
for (j = 0; j < window_size; j++) {
for (k = 0; k < n_feat; k++)
if( fscanf(inputFile[k], "%f", &tmp[k]) < 1){
fprintf(stderr, "File reading error!\n");
exit(2);
}
for (k = 0; k < n_feat; k++) {
// MDT_D or MDT_I
if (class_alg < 2)
data[(n_feat * i * window_size) + (k * window_size) + j] = tmp[k];
else {
data[(n_feat * 2 * i * window_size) + (2 * k * window_size) + j] =
tmp[k];
data[(n_feat * 2 * i * window_size) +
((2 * k * window_size) + window_size) + j] = tmp[k];
}
}
}
}
} else {
float *tmp = (float *)malloc(window_size * sizeof(float));
int i = 0;
int size_arr[2] = {data_struct.train_size, data_struct.test_size};
for (int ll = 0; ll < n_file; ll++) {
for (int inn = 0; inn < size_arr[ll]; inn++) {
// reading data
for (k = 0; k < n_feat; k++) {
// reading labels from either train or test set
if( fscanf(inputFile[ll], "%f", &label) < 1){
fprintf(stderr, "File reading error!\n");
exit(2);
}
dataLabels[i] = (int)label;
for (j = 0; j < window_size; j++) {
if( fscanf(inputFile[ll], "%f", &tmp[j]) < 1){ // fd=0 data descript
fprintf(stderr, "File reading error!\n");
exit(2);
}
// MDT_D or MDT_I
if (class_alg < 2)
data[(n_feat * i * window_size) + (k * window_size) + j] = tmp[j];
else {
data[(n_feat * 2 * i * window_size) + (2 * k * window_size) + j] =
tmp[j];
data[(n_feat * 2 * i * window_size) +
((2 * k * window_size) + window_size) + j] = tmp[j];
}
}
}
i++;
}
}
} // END ELSE
// Closing and deallocatin all files
for (k = 0; k < n_file; k++)
fclose(inputFile[k]);
free(inputFile);
}
/**
* \brief The function `createTrainingTestingSet` splits the dataset information into random train and test subsets.
*
* The following function splits the `data` and `label` information into random two different train and test subsets.
* \param *data Vector containing the data
* \param *dataLabels Vector containing the label
* \param dataSize Number of time series stored in the '*data'
* \param windows_size Length for the time series stored into '*data'
* \param n_feat Number of variables for the time series stored into '*data'
* \param *h_train Vector containing the data for the train set
* \param *trainLabels Vector containing the labels for the train set
* \param trainSize Number of time series to be stored in the train set
* \param *h_test Vector containing the data for the test set
* \param *testLabels Vector containing the labels for the test set
* \param testSize Number of time series to be stored in the test set
* \param *tInd Vector providing train and test indices to split data in train test sets
* \param k_th_fold Number of folds. Must be at least 2
* \param class_mode Integer for handling different reading modes which depends on the the type of algorithm picked.
*/
__host__ void createTrainingTestingSet(
float *data, int *dataLabels, int dataSize, int window_size, int n_feat,
float *h_train, int *trainLabels, int trainSize, float *h_test,
int *testLabels, int testSize, int *tInd, int k_th_fold, int class_mode) {
int i, j, k, i_train = 0, i_test = 0;
if (tInd != NULL) {
/* Creating Training and Testing set */
for (i = 0; i < dataSize; i++) {
// training set
if (tInd[i] != k_th_fold) {
trainLabels[i_train] = dataLabels[i];
for (j = 0; j < window_size; j++) {
for (k = 0; k < n_feat; k++) {
if (class_mode < 2) {
h_train[(n_feat * i_train * window_size) + (k * window_size) +
j] =
data[(n_feat * i * window_size) + (k * window_size) + j];
} else {
h_train[(n_feat * 2 * i_train * window_size) +
(2 * k * window_size) + j] =
data[(n_feat * 2 * i * window_size) + (2 * k * window_size) +
j];
h_train[(n_feat * 2 * i_train * window_size) +
((2 * k * window_size) + window_size) + j] =
data[(n_feat * 2 * i * window_size) +
((2 * k * window_size) + window_size) + j];
}
}
}
i_train++;
}
// testing set
else {
testLabels[i_test] = dataLabels[i];
for (j = 0; j < window_size; j++) {
for (k = 0; k < n_feat; k++) {
if (class_mode < 2)
h_test[(window_size * n_feat * i_test) + window_size * k + j] =
data[(n_feat * i * window_size) + (k * window_size) + j];
else
h_test[(window_size * n_feat * i_test) + window_size * k + j] =
data[(n_feat * 2 * i * window_size) + (2 * k * window_size) +
j];
}
}
i_test++;
}
}
} else {
int i = 0;
for (int i_train = 0; i < trainSize; i++) {
trainLabels[i_train] = dataLabels[i];
for (j = 0; j < window_size; j++) {
// reading data
for (k = 0; k < n_feat; k++) {
if (class_mode < 2)
h_train[(n_feat * i_train * window_size) + (k * window_size) + j] =
data[(n_feat * i * window_size) + (k * window_size) + j];
else {
h_train[(n_feat * 2 * i_train * window_size) +
(2 * k * window_size) + j] =
data[(n_feat * 2 * i * window_size) + (2 * k * window_size) +
j];
h_train[(n_feat * 2 * i_train * window_size) +
((2 * k * window_size) + window_size) + j] =
data[(n_feat * 2 * i * window_size) +
((2 * k * window_size) + window_size) + j];
}
}
}
i_train++;
}
for (int i_test = 0; i_test < testSize; i++) {
testLabels[i_test] = dataLabels[i];
for (j = 0; j < window_size; j++) {
for (k = 0; k < n_feat; k++) {
if (class_mode < 2)
h_test[(window_size * n_feat * i_test) + window_size * k + j] =
data[(n_feat * i * window_size) + (k * window_size) + j];
else
h_test[(window_size * n_feat * i_test) + window_size * k + j] =
data[(n_feat * 2 * i * window_size) + (2 * k * window_size) +
j];
}
}
i_test++;
}
}
}
/**
* \brief The function `cmpfunc` is an utiliy function for sorting vector values
* \param *a Integer value
* \param *b Integer value
* \return Difference betwen `*a` and `*b`
*/
__host__ int cmpfunc(const void *a, const void *b) {
return (*(int *)a - *(int *)b);
}
/**
* \brief The function `generateArray` fills an input array from a desidered starting point.
* \param size Size of the vector
* \param *arrayG Vector to fill
* \param *offset Offset from where to start to fill `*arrayG`
*/
__host__ void generateArray(int size, int *arrayG, int offset) {
int i, j = 0;
if(offset > size - 1){
printf("The offset has to be smaller than the size of the array\n");
exit(-1);
}
for (i = offset; size > 0; i++) {
arrayG[j++] = i;
size--;
}
}
/**
* \brief The function `findInd` fill an array with incremental value whether a desiderd value exist into an input array.
* \param *array Vector where to search into
* \param size Size of the vector
* \param *arrayG Vector to fill with incremental value
* \param g Value to find in the `*array`
*/
__host__ void findInd(int *array, int size, int *arrayG, int g) {
int i, j = 0;
for (i = 0; i < size; i++) {
if (array[i] == g) {
arrayG[j++] = i;
}
}
}
/**
* \brief The function `unique_val` look for unique value into an array
* \param *array Vector where to search into
* \param size Size of the vector
* \param *arrayG Vector to fill with incremental value
* \return Number of unique value found into `*array`
*/
__host__ int unique_val(int *array, int size) {
int i;
qsort(array, size, sizeof(int), cmpfunc);
int unique = 1; // incase we have only one element; it is unique!
for (i = 0;
i < size - 1 /*since we don't want to compare last element with junk*/;
i++) {
if (array[i] == array[i + 1]) {
continue;
} else {
unique++;
}
}
return unique;
}
/**
* \brief The function `accumarray` is an utility function for the k-fold cross validation.
* \param *array Vector where to search into
* \param size Size of the vector
* \param *val Value to find
* \return Utility array
*/
__host__ int *accumarray(int *array, int size, int *val) {
int i, j = 0;
int u_val = unique_val(array, size);
int *nS = (int *)calloc(u_val, sizeof(int));
// memset(nS, 0, u_val * sizeof(int));
for (i = 0; i < size; i++) {
if (array[i] == array[i + 1]) {
nS[j]++;
continue;
} else {
val[j] = array[i];
nS[j]++;
j++;
}
}
return nS;
}
/**
* \brief The function `shuffle` is function for shuffling the data contained into an array.
* \param *array Vector to shuffle
* \param array_size Size of the vector
* \param shuff_size Shuffle factor size
*/
__host__ void shuffle(int *array, size_t array_size, size_t shuff_size) {
if (array_size > 1) {
size_t i;
for (i = 0; i < shuff_size - 1; i++) {
size_t j = i + rand() / (RAND_MAX / (array_size - i) + 1);
int t = array[j];
array[j] = array[i];
array[i] = t;
}
}
}
/**
* \brief The function `idAssign`is an utility function for the k-fold cross validation.
* \param *perm Vector of permutations
* \param size_perm Size of the permutations
* \param *group Support vector
* \param size_group Size of the support vector
* \param *rand_ind Vector of random value
* \param *h Supprt vector
* \param *tInd Vector of indices values for splitting the dataset into train and test set
*/
__host__ void idAssign(int *perm, int size_perm, int *group, int size_group,
int *rand_ind, int *h, int *tInd) {
int i;
int group_perm;
for (i = 0; i < size_group; i++) {
group_perm = perm[group[i]];
tInd[h[rand_ind[i]]] = group_perm;
}
}
/**
* \brief The function `checkCUDAError` display on the standard output more information about a type of CUDA error.
* \param *msg Message to display along with the error information provided by CUDA
*/
__host__ void checkCUDAError(const char *msg) {
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess) {
fprintf(stderr, "Cuda error: %s %s\n", msg, cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
}
/**
* \brief The function `crossvalind_Kfold` generates Cross-Validation indices for splitting the dataset into train and test set.
* \param *label Vector of labels
* \param N Size of the vector `*label`
* \param K Number of fold to generate
* \param flag_shuffle
* \return Vector containing 1s for observations that belong to the training set and 0s for observations that belong to the test (evaluation) set.
*/
__host__ int *crossvalind_Kfold(int *label, int N, int K, int flag_shuffle) {
int *label_copy = (int *)malloc(N * sizeof(int));
memcpy(label_copy, label, N * sizeof(int));
// output
int *tInd = (int *)calloc(N, sizeof(int));
// memset(tInd, 0, N * sizeof(int));
int ul = unique_val(label_copy, N);
int *arr_val = (int *)malloc(ul * sizeof(int));
int *nS = accumarray(label_copy, N, arr_val);
int i, j;
int *pq = (int *)malloc(K * sizeof(int));
generateArray(K, pq, 0);
for (i = 0; i < ul; i++) {
int *randInd = (int *)malloc(nS[i] * sizeof(int));
generateArray(nS[i], randInd, 0);
int *q = (int *)malloc(nS[i] * sizeof(int));
int *h = (int *)malloc(nS[i] * sizeof(int));
findInd(label, N, h, arr_val[i]);
for (j = 0; j < nS[i]; j++) {
float val = (float)(K * (j + 1)) / nS[i]; // j+1 because we need no zero
// values; MATLAB: q =
// ceil(K*(1:nS(g))/nS(g));
q[j] = (int)ceil(val) - 1; // C indices start from 0
}
if (flag_shuffle == 1) {
shuffle(pq, K, K);
shuffle(randInd, nS[i], nS[i]);
}
idAssign(pq, K, q, nS[i], randInd, h, tInd);
free(randInd);
free(q);
free(h);
}
return tInd;
}
/**
* \brief The function `countVal` count the number of occurences found for a desidered value stored into an array.
* \param *data Vector where to search
* \param N Size of the vector `*data`
* \param key Desidered value to search into `*data`
* \return Number of occurences found for the `key` into `*data`
*/
__host__ int countVal(int *data, int N, int key) {
int i, cnt = 0;
for (i = 0; i < N; i++) {
if (data[i] == key)
cnt++;
}
return cnt;
}
/**
* \brief The function `standard_deviation` compute the `standard deviation` of a given vector.
*
* The following function computes the `standard deviation` of a given input vector.
* \param *data Input vector
* \param n Size of the vector
* \param *avg `Mean` computed on the input vector
* \return `Standard deviation` computed on the input vector
*/
__host__ float standard_deviation(float *data, int n, float *avg) {
float mean = 0.0, sum_deviation = 0.0;
int i;
for (i = 0; i < n; ++i) {
mean += data[i];
}
mean = mean / n;
*avg = mean;
for (i = 0; i < n; ++i)
sum_deviation += (data[i] - mean) * (data[i] - mean);
return sqrt(sum_deviation / (n - 1));
}
/**
* \brief The function `z_normalize2D` z-normalize an input vector.
*
* The following function calculate the z score of each value into a vector, relative to the sample mean and standard deviation.
* The following function computes the `standard deviation` of a given input vector.
* \param *M Input matrix
* \param nrow number of rows
* \param ncol number of columns
*/
__host__ void z_normalize2D(float *M, int nrow, int ncol) {
int i;
float std_dev = 0;
float *mean = (float *)malloc(sizeof(float));
for (i = 0; i < nrow; i++) {
std_dev = 0;
*mean = 0;
std_dev = standard_deviation(&M[i * ncol], ncol, mean);
for (int k = 0; k < ncol; k++)
M[i * ncol + k] = (M[i * ncol + k] - (*mean)) / std_dev;
}
free(mean);
}
/**
* \brief The function `short_ed_c` computes the `mono-dimensional Euclidean` distance.
*
* It considers the calculation of the Euclidean distance for two mono-dimensional time series stored, rispectively into the vectors `*T` and `*S`
* \param *S Vector containing the first time series
* \param *T Vector containing the second time series
* \param window_size Length of the two given time series
* \return ED distance among the two time series
*/
__host__ float short_ed_c(float *T, float *S, int window_size) {
float sumErr = 0;
for (int i = 0; i < window_size; i++)
sumErr += (T[i] - S[i]) * (T[i] - S[i]);
return sqrt(sumErr);
}
/**
* \brief The function `short_dtw_c` computes the `mono-dimensional Dynamic Time Warping` distance (DTW).
*
* It considers the calculation of the DTW distance for two mono-dimensional time series stored, rispectively into the vectors `*instance` and `*query`
* \param *S instance containing the first time series
* \param *query Vector containing the time series to compare against `*instance`
* \param ns Length of the `*instance`
* \param nt Length of the `*query`
* \return DTW distance among the two time series
*/
__host__ float short_dtw_c(float *instance, float *query, int ns, int nt) {
int k = 0, l = 0, g = 0;
long long int i, j;
float **array;
float min_nb;
// create array
array = (float **)malloc((nt) * sizeof(float *));
for (i = 0; i < nt; i++) {
array[i] = (float *)malloc((2) * sizeof(float));
}
k = 0;
l = 1;
for (i = 0; i < nt; i++) {
if (i == 0)
array[i][k] = pow((instance[0] - query[i]),
2); // squared difference (ins[0]-query[0])^2
else
array[i][k] = pow((instance[0] - query[i]), 2) + array[i - 1][k];
}
k = 1;
l = 0;
// computing DTW
for (j = 1; j < ns; j++) {
i = 0;
array[i][k] = pow((instance[j] - query[i]), 2) + array[i][l];
for (i = 1; i < nt; i++) {
float a = array[i - 1][l];
float b = array[i][l];
float c = array[i - 1][k];
min_nb = fminf(a, b);
min_nb = fminf(c, min_nb);
array[i][k] = pow((instance[j] - query[i]), 2) + min_nb;
}
g = k;
k = l;
l = g;
}
float min = array[nt - 1][g];
for (i = 0; i < ns; i++)
free(array[i]);
free(array);
return min;
}
/**
* \brief The function `short_md_ed_c` computes the `Multi-Dimensional Euclidean` distance (MD-E).
*
* It considers the calculation of the MD-E distance for two multivariate time series (MTS) stored, rispectively into the vectors `*T` and `*S`
* \param *S Vector containing the first time series
* \param *T Vector containing the second time series
* \param window_size Length of the two given time series
* \param dimensions Number of variables for the two MTS
* \param offset Integer used for computing the rotation invariant euclidean distance (It's usually equal to window_size)
* \return Euclidean distance among the two MTS
*/
__host__ float short_md_ed_c(float *T, float *S, int window_size,
int dimensions, int offset) {
float sumErr = 0, dd = 0;
for (int i = 0; i < window_size; i++) {
dd = 0;
for (int p = 0; p < dimensions; p++)
dd += (T[(p * offset) + i] - S[(p * window_size) + i]) *
(T[(p * offset) + i] - S[(p * window_size) + i]);
sumErr += dd;
}
return sqrt(sumErr);
}
/**
* \brief The function `short_md_dtw_c` computes the `Multi-Dimensional Dynamic Time Warping` distance (MD-DTW).
*
* It considers the calculation of the MD-DTW distance for two multivariate time series (MTS) stored, rispectively into the vectors `*S` and `*T`
* \param *S instance containing the first time series
* \param *T Vector containing the time series to compare against `*instance`
* \param ns Length of the `*instance`
* \param nt Length of the `*query
* \param dim Number of variables for the two MTS
* \param offset Integer used for computing the rotation invariant euclidean distance (It's usually equal to window_size)
* \return Dynamic Time Warping distance among the two MTS
*/
__host__ float short_md_dtw_c(float *S, float *T, int ns, int nt, int dim,
int offset) {
int k = 0, l = 0, g = 0;
long long int i, j;
float **array;
float min_nb;
array = (float **)malloc((nt) * sizeof(float *));
for (i = 0; i < nt; i++) {
array[i] = (float *)malloc((2) * sizeof(float));
}
k = 0;
l = 1;
for (i = 0; i < nt; i++) {
array[i][k] = 0.0;
for (int p = 0; p < dim; p++) {
if (i == 0)
array[i][k] += pow((S[p * offset + i] - T[p * nt + i]), 2);
else
array[i][k] += pow((S[p * offset + 0] - T[p * nt + i]), 2);
}
if (i != 0)
array[i][k] += array[i - 1][k];
}
k = 1;
l = 0;
for (j = 1; j < ns; j++) {
i = 0;
array[i][k] = 0.0;
for (int p = 0; p < dim; p++)
array[i][k] += pow((S[p * offset + j] - T[p * nt + i]), 2);
array[i][k] += array[i][l];
for (i = 1; i < nt; i++) {
array[i][k] = 0.0;
float a = array[i - 1][l];
float b = array[i][l];
float c = array[i - 1][k];
min_nb = fminf(a, b);
min_nb = fminf(c, min_nb);
for (int p = 0; p < dim; p++)
array[i][k] += pow((S[p * offset + j] - T[p * nt + i]), 2);
array[i][k] += min_nb;
}
g = k;
k = l;
l = g;
}
float min = array[nt - 1][g];
return min;
}
/**
* \brief The function `print_help` print on the standard output several information about the input parameters to feed to the software.
*/
__host__ void print_help(void) {
fprintf(stderr,
"\nUsage: MTSS [OPTIONS]\n"
"Multivariate Time Serie Software (MTSS) using Multivariate Dynamic "
"Time Warping\n"
"\n"
"OPTIONS:\n"
"-t Task \t\tParameters\n"
"String value \t\tThis param. represents the kind of task "
"you want to perform (CLASSIFICATION or SUBSEQ_SEARCH)\n\n"
"-i Input \t\tParameters\n"
"String value \t\t This param. is used to pick up the CPU "
"or GPU version\n"
"Integer value \t\tThis param. represents the "
"dimensionality of MTS (TS) (e.g., 1,2,3, ect)\n"
"Integer values \t\tThe second/third argument (depending on "
"the first param.) represents either the desired number of threads "
"with whom the kernel will be executed (e.g., 64,128,...,1024) or "
"the read mode. For more information refer to the README.\n\n"
"-f Files \t\tParameter\n"
"String value \t\tFollow two or more text file "
"representing the data format (fore more information about the "
"structure of these files see the README file provided with the "
"software)\n\n"
"-k Cross Validation \t\tParameter\n"
"Integer value \t\tThis param. specify the number of K-fold "
"to use int he K-cross validation step\n\n"
"Integer value \t\tSetting this param. to 1 does not allow "
"the reproducibility of the results on the same dataset among the "
"GPU and CPU versions\n\n"
"-o Option Parameters \t\tParameter.\n"
"Integer value \t\tThis param. represents the size of the "
"dataset (number of sample)\n"
"Integer value \t\tThis param. represents the window size "
"of the MTS\n\n"
"-m Algorithm Mode \t\tParameters\n"
"Integer value \t\tThis param. represents the type of MTSS "
"algorithm to use in the tasks (for more information see the README "
"file)\n\n"
"-d Device Choice \t\tParameters\n"
"Integer value \t\tThis param. specify the GPU device (on "
"your machine) you want to use to execute the MTSS\n\n"
"-v Verbose Mode \t\tParameters\n"
"Integer value \t\tThis param. specify the verbosity "
"outputs for the software.The value 0 means no verbosity\n\n"
"--version \t\tDisplay version information.\n"
"--help \t\tDisplay help information.\n"
"\n"
"e.g.\n"
"./mdtwObj -t CLASSIFICATION -i CPU 3 1 -f "
"data/classification/rm_1/X_MAT data/classification/rm_1/Y_MAT "
"data/classification/rm_1/Z_MAT -k 10 0 -o 1000 152 -m 0 DTW\n"
"./mdtwObj -t CLASSIFICATION -i GPU 3 512 0 -f "
"data/classification/rm_0/DATA data/classification/rm_0/LABEL -k 10 "
"0 -o 1000 152 -m 0 DTW -d 0\n"
"./mdtwObj -t SUBSEQ_SEARCH -i CPU 1 0 -f ECGseries ECGquery -o 3907 "
"421 -m 0 -d 0\n"
"./mdtwObj -t SUBSEQ_SEARCH -i GPU 3 512 0 -f "
"data/subseq_search/T_series data/subseq_search/Q_series -o 3907 421 "
"-m 1 DTW -d 0\n");
exit(0);
}
/**
* \brief The function `print_version` print on the standard output the software version.
*/
__host__ void print_version(void) {
fprintf(stderr, "Multivariate Time Series Software version 1.0.0\n"
"Copyright (C) 2016 Davide Nardone <[email protected]>\n"
"Originally inspired by Doruk Sart et al\n"
"See the README file for license information.\n");
exit(0);
}
/**
* \brief The function `infoDev` print on the standard output several information abou the available GPUs.
*/
__host__ void infoDev() {
int deviceCount;
cudaGetDeviceCount(&deviceCount);
printf("Number of device: %d\n", deviceCount);
int device;
cudaDeviceProp deviceProp;
// retrieving all devices
for (device = 0; device < deviceCount; ++device) {
// getting information about i-th device
cudaGetDeviceProperties(&deviceProp, device);
// printing information about i-th device
printf("\n\n>>>>>>>>>>>>>>>>>>\nSelected device:%d\n<<<<<<<<<<<<<<<<<<\n\n",
device);
printf("\ndevice %d : %s\n", device, deviceProp.name);
printf("major/minor : %d.%d compute capability\n", deviceProp.major,
deviceProp.minor);
printf("Total global mem : %lu bytes\n", deviceProp.totalGlobalMem);
printf("Shared block mem : %lu bytes\n", deviceProp.sharedMemPerBlock);
printf("Max memory pitch : %lu bytes\n", deviceProp.memPitch);
printf("RegsPerBlock : %d \n", deviceProp.regsPerBlock);
printf("WarpSize : %d \n", deviceProp.warpSize);
printf("MaxThreadsPerBlock : %d \n", deviceProp.maxThreadsPerBlock);
printf("TotalConstMem : %lu bytes\n", deviceProp.totalConstMem);
printf("ClockRate : %d (kHz)\n", deviceProp.clockRate);
printf("deviceOverlap : %d \n", deviceProp.deviceOverlap);
printf("deviceOverlap : %d \n", deviceProp.deviceOverlap);
printf("MultiProcessorCount: %d \n", deviceProp.multiProcessorCount);
printf("\n");
}
exit(-1);
}
/**
* \brief The function `getDevProp` return an object `deviceProp` containing all the information about a specific GPU device.
* \return `deviceProp` CUDA object containing several information about its own device.
*/
__host__ cudaDeviceProp getDevProp(int device) {
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, device);
return deviceProp;
}
/**
* \brief The function `checkGPU_prop` check whether a GPU property for its own device is correct.
* \param *compution_type vector used for trigger the check only on for GPU execution
* \param *deviceProp CUDA object containing several information about its own device
* \param *prop_in GPU property to check
* \param *prop_GPU_in GPU property value to check
*/
__host__ void checkGPU_prop(char *compution_type, cudaDeviceProp deviceProp, const char *prop_in, int prop_GPU_in){
if (strcmp(compution_type, "GPU") == 0) {
if ( (strcmp(prop_in, "maxThreadsPerBlock") == 0) && (prop_GPU_in < 0 || prop_GPU_in > deviceProp.maxThreadsPerBlock) ) {
printf(" %d is an irregular #threads for block for the device %s.\n The number of threads "
"for block has to be included in [0, %d]\n", prop_GPU_in, deviceProp.name, deviceProp.maxThreadsPerBlock);
exit(-2);
}
}
}
/**
* \brief The function `initializeArray` fills an input array with random values.
* \param *array Vector to fill
* \param n Size of the vector
* \param val Value to fill the array with
*/
__host__ void initializeArray(float *array, int n) {
int i;
for (i = 0; i < n; i++)
array[i] = ((float)rand()) / (float)RAND_MAX;
}
__host__ void initializeArray(int *array, int n) {
int i;
for (i = 0; i < n; i++)
array[i] = ((int)rand()) / (int)RAND_MAX;
}
/**
* \brief The function `initializeMatrix` fills an input matrix with random values.
* \param *matrix Matrix to fill
* \param M Number of rows
* \param N Number of columns
*/
__host__ void initializeMatrix(float *matrix, int M, int N) {
int i, j;
for (i = 0; i < M; i++)
for (j = 0; j < N; j++)
matrix[i * N + j] = ((float)rand()) / (float)RAND_MAX;
}
/**
* \brief The function `printArray` print on the standard output an input array of float values.
* \param *array array
* \param n Size of the vector
*/
__host__ void printArray(float *array, int n) {
int i;
for (i = 0; i < n; i++)
printf("val[%d]: %f\n", i, array[i]);
printf("\n");
}
/**
* \brief The function `printArrayI` print on the standard output an input array of integer values.
* \param *array array
* \param n Size of the vector
*/
__host__ void printArrayI(int *array, int n) {
int i;
for (i = 0; i < n; i++)
printf("val[%d]: %d\n", i, array[i]);
printf("\n");
}
/**
* \brief The function `printMatrix` print on the standard output an input matrix of float values.
* \param *array array
* \param M Number of rows
* \param N Number of columns
*/
__host__ void printMatrix(float *matrix, int M, int N) {
int i, j;
for (i = 0; i < M; i++) {
for (j = 0; j < N; j++)
printf("%f\n", matrix[i * N + j]);
printf("\n");
}
}
/**
* \brief The function `equalArray` check whether the host and device result are the same
* \param *a array host
* \param *b array device
* \param n Size of the two vector
*/
__host__ void equalArray(float *a, float *b, int n) {
int i = 0;
while (a[i] == b[i])
i++;
if (i < n) {
printf("I risultati dell'host e del device sono diversi\n");
printf("CPU[%d]: %f, GPU[%d]: %f \n", i, a[i], i, b[i]);
} else
printf("I risultati dell'host e del device coincidono\n");
}
/**
* \brief The function `equalArray` print on the standard output both the host and device array
* \param *a array host
* \param *b array device
* \param n Size of the two vector
*/
__host__ void compareArray(float *a, float *b, int n) {
int i = 0;
for (i = 0; i < n; ++i) {
if (a[i] != b[i])
printf("CPU[%d]: %f, GPU[%d]: %f \n", i, a[i], i, b[i]);
}
}
/**
* \brief The function `min_arr` computes the minimum value of an input array.
* \param *arr array
* \param n Size of the two vector
* \param *ind Index of the minimum value found into the array `*arr`
* \return minimum value found into the array `*arr`
*/
__host__ float min_arr(float *arr, int n, int *ind) {
float min = FLT_MAX;
*ind = -1;
for (int i = 0; i < n; ++i) {
if (arr[i] < min) {
min = arr[i];
*ind = i;
}
}
return min;
}
/**
* \brief The function `max_arr` computes the maximum value of an input array.
* \param *arr array
* \param n Size of the two vector
* \param *ind Index of the maximum value found into the array `*arr`
* \return maximum value found into the array `*arr`
*/
__host__ float max_arr(float *arr, int n, int *ind) {
float max = FLT_MIN;
*ind = -1;
for (int i = 0; i < n; ++i) {
if (arr[i] > max) {
max = arr[i];
*ind = i;
}
}
return max;
}
/**
* \brief The function `timedifference_msec` computes the time difference among `t0` and `t1`.
* \param t0 structure containing time took at `t0`
* \param t0 structure containing time took at `t1`
* \return Elapsed time among `t0` and `t1`
*/
float timedifference_msec(struct timeval t0, struct timeval t1) {
return (t1.tv_sec - t0.tv_sec) * 1000.0f +
(t1.tv_usec - t0.tv_usec) / 1000.0f;
}
/**
* \brief The function `foldit` implements the switch statement for a range of values.
* \param ws Length for both the time series
*/
__host__ int foldit (int ws) {
if (ws <= 0) return -1;
else if (ws > 0 and ws <= 64) return 0;
else if (ws > 64 and ws <= 128) return 1;
else if (ws > 128 and ws <= 256) return 2;
else if (ws > 256 and ws <= 512) return 3;
else if (ws > 512 and ws <= 1024) return 4;
else if (ws > 1024 and ws <= 2048) return 5;
else if (ws > 2048 and ws <= 4096) return 6;
else if (ws > 4096 and ws <= 8192) return 7;
else if (ws > 8192 and ws <= 16384) return 8;
else return 999; // triggers the default part of the switch
}
/**
* \brief The function `MDD_SIM_MES_CPU` is a wrapper function used for computing the CPU dependent multidimensional similary measure for the classification task.
* \param trainSize Number of MTS contained into the train set
* \param testSize Number of MTS contained into the test set
* \param *trainLabels Vector containing the labels for the train set
* \param *testLabels Vector containing the labels for the test set
* \param *h_train Vector containing the data for the train set
* \param *h_test Vector containing the data for the test set
* \param window_size Length for the time series to be stored into `*data`
* \param n_feat Number of variables for the time series stored into both train and test set
* \param *distance_type Type of similarity measure to adopt for performing the classification task
* \param verbose_mode Flag used to increase/reduce the verbosity of the output results
* \return the number of misclassification
*/
__host__ float MDD_SIM_MES_CPU(int trainSize, int testSize, int *trainLabels, int *testLabels, float *h_train, float *h_test, int window_size, int n_feat, char *distance_type, int verbose_mode){
int *minI = (int *)malloc(sizeof(int));
float *h_Out = (float *)malloc(trainSize * sizeof(float));
int err = 0;
float min = 0;
for (int k = 0; k < testSize; k++) {
for (int j = 0; j < trainSize; j++) {
if (strcmp(distance_type, "DTW") == 0) // DTW distance
h_Out[j] = short_md_dtw_c(&h_train[j * n_feat * window_size],
&h_test[k * n_feat * window_size],
window_size, window_size, n_feat,
window_size);
else // Euclidean Distance
h_Out[j] = short_md_ed_c(&h_train[j * n_feat * window_size],
&h_test[k * n_feat * window_size],
window_size, n_feat, window_size);
}
min = min_arr(h_Out, trainSize, minI);
if (trainLabels[*minI] != testLabels[k])
err++;
if (verbose_mode > 0 && verbose_mode < testSize) {
if (k % verbose_mode == 0)
printf("\t%d\t gt: %d\t\tRI: %d\t%3.6f\n", k, testLabels[k],
trainLabels[*minI], min);
else if (k == testSize-1)
printf("\t%d\t gt: %d\t\tRI: %d\t%3.6f\n", k, testLabels[k],
trainLabels[*minI], min);
}
}
free(minI);
return err;
}
/**
* \brief The function `MDD_SIM_MES_CPU` is a wrapper function used for computing the CPU multidimensional similary measure for the sub-sequence similarity search task.
* \param nss Number of sub-sequences to search
* \param *t_series Vector containing the first time series
* \param *q_series Vector containing the time series to compare against `*instance`
* \param t_size Length of the time series `*t_series`
* \param q_size Length of the time series `*q_series`
* \param n_feat Number of variables for the two MTS
* \param *distance_type Type of similarity measure to adopt for performing the classification task
* \param verbose_mode Flag used to increase/reduce the verbosity of the output results
* \param *owp Support vector containing all the comparing
* \param *ind_min_val Index containing the minimum value obtained by comparing `*q_series` over `*t_series`
* \return minimum value obtained by comparing `*q_series` over `*t_series`
*/
__host__ float MDD_SIM_MES_CPU(int nss, float *t_series, float *q_series, int t_size, int q_size, int n_feat, char *distance_type, int verbose_mode, float *owp, int *ind_min_val){
float min = 9999.99, dist;
for (int i = 0; i < nss; i++) {
dist = 0.0;
if (strcmp(distance_type, "DTW") == 0) // DTW distance
dist = short_md_dtw_c(&t_series[i], q_series, q_size,
q_size, n_feat, t_size);
else
dist = short_md_ed_c(&t_series[i], q_series, q_size, n_feat,
t_size);
owp[i] = dist;
if (verbose_mode > 0 && verbose_mode < nss) {
if (i % verbose_mode == 0)
printf("\tCurr val diff. [%d]: %f\n", i, owp[i]);
else if (i == nss)
printf("\tCurr val diff. [%d]: %f\n", i, owp[i]);
}
}
// computing minimum value
min = min_arr(owp, nss, ind_min_val);
return min;
}
/**
* \brief The function `MDI_SIM_MES_CPU` is a wrapper function used for computing the CPU independent multidimensional similary measure for the classification task.
* \param trainSize Number of MTS contained into the train set
* \param testSize Number of MTS contained into the test set
* \param *trainLabels Vector containing the labels for the train set
* \param *testLabels Vector containing the labels for the test set
* \param *h_train Vector containing the data for the train set
* \param *h_test Vector containing the data for the test set
* \param window_size Length for the time series to be stored into `*data`
* \param n_feat Number of variables for the time series stored into both train and test set
* \param *distance_type Type of similarity measure to adopt for performing the classification task
* \param verbose_mode Flag used to increase/reduce the verbosity of the output results
* \return the number of misclassification
*/
__host__ float MDI_SIM_MES_CPU(int trainSize, int testSize, int *trainLabels, int *testLabels, float *h_train, float *h_test, int window_size, int n_feat, char *distance_type, int verbose_mode){
int *minI = (int *)malloc(sizeof(int));
float *h_Out = (float *)malloc(trainSize * window_size * sizeof(float));
int err = 0;
float min = 0, dtw_curr = 0, cum_sum = 0;
for (int k = 0; k < testSize; k++) {
for (int j = 0; j < trainSize; j++) {
cum_sum = 0.0;
for (int d = 0; d < n_feat; d++) {
if (strcmp(distance_type, "DTW") == 0) // DTW distance
dtw_curr = short_dtw_c(
&h_train[(d * window_size) + (j * n_feat * window_size)],
&h_test[(k * n_feat * window_size) + (d * window_size)],
window_size, window_size);
else
dtw_curr = short_ed_c(
&h_train[(d * window_size) + (j * n_feat * window_size)],
&h_test[(k * n_feat * window_size) + (d * window_size)],
window_size);
cum_sum += dtw_curr;
}
h_Out[j] = cum_sum;
}
min = min_arr(h_Out, trainSize, minI);
if (trainLabels[*minI] != testLabels[k])
err++;
if (verbose_mode > 0 && verbose_mode < testSize) {
if (k % verbose_mode == 0)
printf("\t%d\t gt: %d\t\tRI: %d\t%3.6f\n", k, testLabels[k],
trainLabels[*minI], min);
else if (k == testSize-1)
printf("\t%d\t gt: %d\t\tRI: %d\t%3.6f\n", k, testLabels[k],
trainLabels[*minI], min);
}
}
free(minI);
return err;
}
/**
* \brief The function `MDI_SIM_MES_CPU` is a wrapper function used for computing the CPU multidimensional similary measure for the sub-sequence similarity search task.
* \param nss Number of sub-sequences to search
* \param *t_series Vector containing the first time series
* \param *q_series Vector containing the time series to compare against `*instance`
* \param t_size Length of the time series `*t_series`
* \param q_size Length of the time series `*q_series`
* \param n_feat Number of variables for the two MTS
* \param *distance_type Type of similarity measure to adopt for performing the classification task
* \param verbose_mode Flag used to increase/reduce the verbosity of the output results
* \param *owp Support vector containing all the comparing
* \param *ind_min_val Index containing the minimum value obtained by comparing `*q_series` over `*t_series`
* \return minimum value obtained by comparing `*q_series` over `*t_series`
*/
__host__ float MDI_SIM_MES_CPU(int nss, float *t_series, float *q_series, int t_size, int q_size, int n_feat, char *distance_type, int verbose_mode, float *owp, int *ind_min_val){
float min = 9999.99, dist, val_curr;
for (int i = 0; i < nss; i++) {
dist = 0.0;
for (int k = 0; k < n_feat; k++) {
if (strcmp(distance_type, "DTW") == 0) // DTW distance
val_curr = short_dtw_c(&t_series[(k * t_size) + i],
&q_series[(k * q_size)], q_size,
q_size);
else
val_curr = short_ed_c(&t_series[(k * t_size) + i],
&q_series[(k * q_size)], q_size);
dist += val_curr;
}
owp[i] = dist;
if (verbose_mode > 0 && verbose_mode < nss) {
if (i % verbose_mode == 0)
printf("\tCurr val diff. [%d]: %f\n", i, owp[i]);
else if (i == nss)
printf("\tCurr val diff. [%d]: %f\n", i, owp[i]);
}
}
min = min_arr(owp, nss, ind_min_val);
return min;
}
/**
* \brief The function `MDR_SIM_MES_CPU` is a wrapper function used for computing the CPU multidimensional rotation similary measure for the classification task.
* \param trainSize Number of MTS contained into the train set
* \param testSize Number of MTS contained into the test set
* \param *trainLabels Vector containing the labels for the train set
* \param *testLabels Vector containing the labels for the test set
* \param *h_train Vector containing the data for the train set
* \param *h_test Vector containing the data for the test set
* \param window_size Length for the time series to be stored into `*data`
* \param n_feat Number of variables for the time series stored into both train and test set
* \param *distance_type Type of similarity measure to adopt for performing the classification task
* \param verbose_mode Flag used to increase/reduce the verbosity of the output results
* \param *err The number of misclassification using the basic similarity measure
* \param *errNR The number of misclassification using the rotation similary measure
*/
__host__ void MDR_SIM_MES_CPU(int trainSize, int testSize, int *trainLabels, int *testLabels, float *h_train, float *h_test, int window_size, int n_feat, char *distance_type, int verbose_mode, int *err, int *errNR){
float *h_Out = (float *)malloc(trainSize * window_size * sizeof(float));
float minNR = 0.0, min = 0.0;
int minINR = 0, minI = 0;
for (int i = 0; i < testSize; i++) {
for (int j = 0; j < trainSize; j++) {
for (int k = 0; k < window_size; k++) {
if (strcmp(distance_type, "DTW") == 0) // DTW distance
h_Out[(j * window_size) + k] = short_md_dtw_c(
&h_train[(2 * j * n_feat * window_size) + k],
&h_test[i * n_feat * window_size], window_size,
window_size, n_feat, 2 * window_size);
else
h_Out[(j * window_size) + k] = short_md_ed_c(
&h_train[(2 * j * n_feat * window_size) + k],
&h_test[i * n_feat * window_size], window_size, n_feat,
2 * window_size);
}
}
min = 9999999999.99;
minI = -1;
minINR = -1;
minNR = 99999999999.99;
for (int m = 0; m < trainSize; m++) {
if (h_Out[m * window_size] < minNR) {
minNR = h_Out[m * window_size];
minINR = m;
}
for (int p = 0; p < window_size; p++) {
int t = m * window_size + p;
if (h_Out[t] < min) {
min = h_Out[t];
minI = m;
}
}
}
if (trainLabels[minI] != testLabels[i])
(*err)++;
if (trainLabels[minINR] != testLabels[i])
(*errNR)++;
if (verbose_mode > 0 && verbose_mode < testSize) {
if (i % verbose_mode == 0)
printf("\t%d\t gt: %d\t\tRI: %d\t%3.6f \t\t NRI: %d\t%3.6f\n", i,
testLabels[i], trainLabels[minI], min,
trainLabels[minINR], minNR);
else if (i == testSize-1)
printf("\t%d\t gt: %d\t\tRI: %d\t%3.6f \t\t NRI: %d\t%3.6f\n", i,
testLabels[i], trainLabels[minI], min,
trainLabels[minINR], minNR);
}
}
}
/**
* \brief The function `MDD_SIM_MES_GPU` is a wrapper function used for computing the GPU dependent multidimensional similary measure for the classification task.
* \param trainSize Number of MTS contained into the train set
* \param testSize Number of MTS contained into the test set
* \param *trainLabels Vector containing the labels for the train set
* \param *testLabels Vector containing the labels for the test set
* \param *h_train Vector containing the data for the train set
* \param *h_test Vector containing the data for the test set
* \param *d_train Vector containing the data for the train set stored in the GPU device
* \param *d_test Vector containing the data for the test set stored in the GPU device
* \param *d_Out Vector containing temporary result for the host
* \param *h_Out Vector containing temporary result for the device
* \param window_size Length for the time series to be stored into `*data`
* \param n_feat Number of variables for the time series stored into both train and test set
* \param blockSize Number of threads to use for comparing the MTS
* \param deviceProp CUDA object containing several information about its own device
* \param *distance_type Type of similarity measure to adopt for performing the classification task
* \param verbose_mode Flag used to increase/reduce the verbosity of the output results
* \return the number of misclassification
*/
__host__ float MDD_SIM_MES_GPU(int trainSize, int testSize, int *trainLabels, int *testLabels, float *h_train, float *h_test, float *d_train, float *d_test, float *d_Out, float *h_Out, int window_size, int n_feat, int blockSize, cudaDeviceProp deviceProp, char *distance_type, int verbose_mode){
float grid_size, min = 9999.99;
dim3 grid;
dim3 threads;
int *minI = (int *)malloc(sizeof(int));
int err = 0;
float T2 = (n_feat * window_size) * sizeof(float);
int gm = 0;
if (T2 > deviceProp.sharedMemPerBlock) {
printf("\tWarning: The T2 test timeserie: %f doesn't fit into the shared "
"memory: %lu, so it will be allocated into the global "
"memory\n",
T2, deviceProp.sharedMemPerBlock);
gm = 1;
T2 = 0;
} else
gm = 0;
grid_size = ceil((float)trainSize / blockSize);
// number of blocks (x,y) for a grid
grid.x = grid_size;
grid.y = 1;
// number of threads (x,y) for each block
threads.x = blockSize;
threads.y = 1;
if(verbose_mode > 0){
printf("\tGrid_size_x: %d, number_of_threads_x: %d \n", grid.x,
threads.x);
printf("\tGrid_size_y: %d, number_of_threads_y: %d \n\n", grid.y,
threads.y);
}
for (int k = 0; k < testSize; k++) {
cudaMemset(d_test, 0, n_feat * window_size * sizeof(float));
cudaMemcpy(d_test, h_test + k * (n_feat * window_size),
n_feat * window_size * sizeof(float),
cudaMemcpyHostToDevice);
if (strcmp(distance_type, "DTW") == 0){ // DTW distance
switch (foldit(window_size)) {
case 0: MD_DTW_D<64><<<grid, threads, T2>>>(d_train, d_test, trainSize, window_size,
n_feat, d_Out, 0, gm);
break;
case 1: MD_DTW_D<128><<<grid, threads, T2>>>(d_train, d_test, trainSize, window_size,
n_feat, d_Out, 0, gm);
break;
case 2: MD_DTW_D<256><<<grid, threads, T2>>>(d_train, d_test, trainSize, window_size,
n_feat, d_Out, 0, gm);
break;
case 3: MD_DTW_D<512><<<grid, threads, T2>>>(d_train, d_test, trainSize, window_size,
n_feat, d_Out, 0, gm);
break;
case 4: MD_DTW_D<1024><<<grid, threads, T2>>>(d_train, d_test, trainSize, window_size,
n_feat, d_Out, 0, gm);
break;
case 5: MD_DTW_D<2048><<<grid, threads, T2>>>(d_train, d_test, trainSize, window_size,
n_feat, d_Out, 0, gm);
break;
case 6: MD_DTW_D<4096><<<grid, threads, T2>>>(d_train, d_test, trainSize, window_size,
n_feat, d_Out, 0, gm);
break;
case 7: MD_DTW_D<8192><<<grid, threads, T2>>>(d_train, d_test, trainSize, window_size,
n_feat, d_Out, 0, gm);
break;
case 8: MD_DTW_D<16384><<<grid, threads, T2>>>(d_train, d_test, trainSize, window_size,
n_feat, d_Out, 0, gm);
break;
default: printf("No kernel exists for %d window_size\n", window_size); break;
}
}
else
MD_ED_D <<<grid, threads, T2>>> (d_train, d_test, trainSize, window_size,
n_feat, d_Out, 0, gm);
// cudaDeviceSynchronize(); // it may be avoided if there's not printf
// in the kernel function
cudaMemcpy(h_Out, d_Out, trainSize * sizeof(float),
cudaMemcpyDeviceToHost);
min = min_arr(h_Out, trainSize, minI);
if (trainLabels[*minI] != testLabels[k])
err++;
if (verbose_mode > 0 && verbose_mode < testSize) {
if (k % verbose_mode == 0)
printf("\t%d\t gt: %d\t\tRI: %d\t%3.6f\n", k, testLabels[k],
trainLabels[*minI], min);
else if (k == testSize-1)
printf("\t%d\t gt: %d\t\tRI: %d\t%3.6f\n", k, testLabels[k],
trainLabels[*minI], min);
}
}
free(minI);
return err;
}
/**
* \brief The function `MDD_SIM_MES_GPU` is a wrapper function used for computing the GPU dependent multidimensional similary measure for the sub-sequence similarity search task.
* \param nss Number of sub-sequences to search
* \param *d_t_series Device vector containing the first time series
* \param *d_q_series Device vector containing the time series to compare against `*instance`
* \param t_size Length of the time series `*t_series`
* \param q_size Length of the time series `*q_series`
* \param n_feat Number of variables for the two MTS
* \param blockSize Number of threads to use for comparing the MTS
* \param deviceProp CUDA object containing several information about its own device
* \param *distance_type Type of similarity measure to adopt for performing the classification task
* \param verbose_mode Flag used to increase/reduce the verbosity of the output results
* \param *owp Support vector containing all the comparing
* \param *d_owp Device support vector containing all the comparing
* \param *ind_min_val Index containing the minimum value obtained by comparing `*q_series` over `*t_series`
* \return minimum value obtained by comparing `*q_series` over `*t_series`
*/
__host__ float MDD_SIM_MES_GPU(int nss, float *d_t_series, float *d_q_series, int t_size, int q_size, int n_feat, int blockSize, cudaDeviceProp deviceProp, char *distance_type, int verbose_mode, float *owp, float *d_owp, int *ind_min_val){
float grid_size, min = 9999.99;
dim3 grid;
dim3 threads;
// Setting CUDA variables and structure
grid_size = ceil((double)nss / blockSize);
// number of blocks (x,y) for a grid
grid.x = grid_size;
grid.y = 1;
// number of threads (x,y) for each block
threads.x = blockSize;
threads.y = 1;
float T2 = (n_feat * q_size) * sizeof(float);
int gm = 0;
if (T2 > deviceProp.sharedMemPerBlock) {
printf("\tWarning: The T2 test timeserie: %f doesn't fit into the shared "
"memory: %lu, so it will be allocated into the global "
"memory\n",
T2, deviceProp.sharedMemPerBlock);
gm = 1;
T2 = 0;
} else
gm = 0;
if(verbose_mode > 0){
printf("\tGrid_size_x: %d, number_of_threads_x: %d \n", grid.x,
threads.x);
printf("\tGrid_size_y: %d, number_of_threads_y: %d \n\n", grid.y,
threads.y);
}
if (strcmp(distance_type, "DTW") == 0){ // DTW distance
switch (foldit(q_size)) {
case 0: MD_DTW_D<64><<<grid, threads, T2>>>(d_t_series, d_q_series, t_size,
q_size, n_feat, d_owp, 1, gm);
break;
case 1: MD_DTW_D<128><<<grid, threads, T2>>>(d_t_series, d_q_series, t_size,
q_size, n_feat, d_owp, 1, gm);
break;
case 2: MD_DTW_D<256><<<grid, threads, T2>>>(d_t_series, d_q_series, t_size,
q_size, n_feat, d_owp, 1, gm);
break;
case 3: MD_DTW_D<512><<<grid, threads, T2>>>(d_t_series, d_q_series, t_size,
q_size, n_feat, d_owp, 1, gm);
break;
case 4: MD_DTW_D<1024><<<grid, threads, T2>>>(d_t_series, d_q_series, t_size,
q_size, n_feat, d_owp, 1, gm);
break;
case 5: MD_DTW_D<2048><<<grid, threads, T2>>>(d_t_series, d_q_series, t_size,
q_size, n_feat, d_owp, 1, gm);
break;
case 6: MD_DTW_D<4096><<<grid, threads, T2>>>(d_t_series, d_q_series, t_size,
q_size, n_feat, d_owp, 1, gm);
break;
case 7: MD_DTW_D<8192><<<grid, threads, T2>>>(d_t_series, d_q_series, t_size,
q_size, n_feat, d_owp, 1, gm);
break;
case 8: MD_DTW_D<16384><<<grid, threads, T2>>>(d_t_series, d_q_series, t_size,
q_size, n_feat, d_owp, 1, gm);
break;
}
}
else
MD_ED_D << <grid, threads, T2>>> (d_t_series, d_q_series, t_size, q_size,
n_feat, d_owp, 1, gm);
cudaMemcpy(owp, d_owp, nss * sizeof(float), cudaMemcpyDeviceToHost);
for (int i = 0; i < nss; ++i) {
if (verbose_mode > 0 && verbose_mode < nss) {
if (i % verbose_mode == 0)
printf("\tCurr val diff. [%d]: %f\n", i, owp[i]);
else if (i == nss)
printf("\tCurr val diff. [%d]: %f\n", i, owp[i]);
}
}
min = min_arr(owp, nss, ind_min_val);
return min;
}
/**
* \brief The function `MDI_SIM_MES_GPU` is a wrapper function used for computing the GPU independent multidimensional similary measure for the classification task.
* \param trainSize Number of MTS contained into the train set
* \param testSize Number of MTS contained into the test set
* \param *trainLabels Vector containing the labels for the train set
* \param *testLabels Vector containing the labels for the test set
* \param *h_train Vector containing the data for the train set
* \param *h_test Vector containing the data for the test set
* \param *d_train Vector containing the data for the train set stored in the GPU device
* \param *d_test Vector containing the data for the test set stored in the GPU device
* \param *d_Out Vector containing temporary result for the host
* \param *h_Out Vector containing temporary result for the device
* \param window_size Length for the time series to be stored into `*data`
* \param n_feat Number of variables for the time series stored into both train and test set
* \param blockSize Number of threads to use for comparing the MTS
* \param deviceProp CUDA object containing several information about its own device
* \param *distance_type Type of similarity measure to adopt for performing the classification task
* \param verbose_mode Flag used to increase/reduce the verbosity of the output results
* \return the number of misclassification
*/
__host__ float MDI_SIM_MES_GPU(int trainSize, int testSize, int *trainLabels, int *testLabels, float *h_train, float *h_test, float *d_train, float *d_test, float *d_Out, float *h_Out, int window_size, int n_feat, int blockSize, cudaDeviceProp deviceProp, char *distance_type, int verbose_mode){
float grid_size, min = 9999.99;
dim3 grid;
dim3 threads;
int *minI = (int *)malloc(sizeof(int));
int err = 0;
grid_size = ceil((float)(trainSize * n_feat) / blockSize);
//the way to compute this measure can be envetually changed
//according with the logic implemented in the MD_DTW_I function
float dim_row = floor((float)blockSize / n_feat);
float dim_col = n_feat;
//block_size < n_feat
if (dim_row == 0){
printf("Warning: The number of threads for each grid is %f! Note: the number of threads for grid has been set at 1 by default to let the execution don't fail. Please increase the number of threads!\n", dim_row);
dim_row = 1;
}
// number of blocks (x,y) for a grid
grid.x = grid_size;
grid.y = 1;
// number of threads (x,y) for each block
threads.x = dim_row;
threads.y = dim_col;
float T2 = ((threads.x * threads.y) + (n_feat * window_size)) *
sizeof(float);
int gm = 0;
if (T2 > deviceProp.sharedMemPerBlock) {
printf("\tWarning: The T2 test timeserie: %f doesn't fit into the shared "
"memory: %lu, so it will be allocated into the global "
"memory\n",
T2, deviceProp.sharedMemPerBlock);
gm = 1;
T2 = 0;
T2 = (threads.x * threads.y) * sizeof(float);
} else
gm = 0;
if(verbose_mode > 0){
printf("\tGrid_size_x: %d, number_of_threads_x: %d \n", grid.x,
threads.x);
printf("\tGrid_size_y: %d, number_of_threads_y: %d \n\n", grid.y,
threads.y);
}
/*
float sh_mem = ((threads.x * threads.y) + (n_feat * window_size)) *
sizeof(float);*/
for (int k = 0; k < testSize; k++) {
cudaMemcpy(d_test, h_test + k * (n_feat * window_size),
n_feat * window_size * sizeof(float),
cudaMemcpyHostToDevice);
if (strcmp(distance_type, "DTW") == 0){ // DTW distance
switch (foldit(window_size)) {
case 0: MD_DTW_I<64><<<grid, threads, T2>>>(d_train, d_test, trainSize,
window_size, n_feat, d_Out, 0, gm);
break;
case 1: MD_DTW_I<128><<<grid, threads, T2>>>(d_train, d_test, trainSize,
window_size, n_feat, d_Out, 0, gm);
break;
case 2: MD_DTW_I<256><<<grid, threads, T2>>>(d_train, d_test, trainSize,
window_size, n_feat, d_Out, 0, gm);
break;
case 3: MD_DTW_I<512><<<grid, threads, T2>>>(d_train, d_test, trainSize,
window_size, n_feat, d_Out, 0, gm);
break;
case 4: MD_DTW_I<1024><<<grid, threads, T2>>>(d_train, d_test, trainSize,
window_size, n_feat, d_Out, 0, gm);
break;
case 5: MD_DTW_I<2048><<<grid, threads, T2>>>(d_train, d_test, trainSize,
window_size, n_feat, d_Out, 0, gm);
break;
case 6: MD_DTW_I<4096><<<grid, threads, T2>>>(d_train, d_test, trainSize,
window_size, n_feat, d_Out, 0, gm);
break;
case 7: MD_DTW_I<8192><<<grid, threads, T2>>>(d_train, d_test, trainSize,
window_size, n_feat, d_Out, 0, gm);
break;
case 8: MD_DTW_I<16384><<<grid, threads, T2>>>(d_train, d_test, trainSize,
window_size, n_feat, d_Out, 0, gm);
break;
}
}
else
MD_ED_I << <grid, threads, T2>>>
(d_train, d_test, trainSize, window_size, n_feat, d_Out, 0, gm);
cudaThreadSynchronize();
cudaMemcpy(h_Out, d_Out, trainSize * sizeof(float),
cudaMemcpyDeviceToHost);
min = min_arr(h_Out, trainSize, minI);
if (trainLabels[*minI] != testLabels[k])
err++;
if (verbose_mode > 0 && verbose_mode < testSize) {
if (k % verbose_mode == 0)
printf("\t%d\t gt: %d\t\tRI: %d\t%3.6f\n", k, testLabels[k],
trainLabels[*minI], min);
else if (k == testSize-1)
printf("\t%d\t gt: %d\t\tRI: %d\t%3.6f\n", k, testLabels[k],
trainLabels[*minI], min);
}
}
free(minI);
return err;
}
/**
* \brief The function `MDD_SIM_MES_GPU` is a wrapper function used for computing the GPU independent multidimensional similary measure for the sub-sequence similarity search task.
* \param nss Number of sub-sequences to search
* \param *d_t_series Device vector containing the first time series
* \param *d_q_series Device vector containing the time series to compare against `*instance`
* \param t_size Length of the time series `*t_series`
* \param q_size Length of the time series `*q_series`
* \param n_feat Number of variables for the two MTS
* \param blockSize Number of threads to use for comparing the MTS
* \param deviceProp CUDA object containing several information about its own device
* \param *distance_type Type of similarity measure to adopt for performing the classification task
* \param verbose_mode Flag used to increase/reduce the verbosity of the output results
* \param *owp Support vector containing all the comparing
* \param *d_owp Device support vector containing all the comparing
* \param *ind_min_val Index containing the minimum value obtained by comparing `*q_series` over `*t_series`
* \return minimum value obtained by comparing `*q_series` over `*t_series`
*/
__host__ float MDI_SIM_MES_GPU(int nss, float *d_t_series, float *d_q_series, int t_size, int q_size, int n_feat, int blockSize, cudaDeviceProp deviceProp, char *distance_type, int verbose_mode, float *owp, float *d_owp, int *ind_min_val){
float grid_size, min = 9999.99;
dim3 grid;
dim3 threads;
// Setting CUDA variables and structure
grid_size = ceil((float)(nss * n_feat) / blockSize);
float dim_row = floor((float)blockSize / n_feat);
float dim_col = n_feat;
// number of blocks (x,y) for a grid
grid.x = grid_size;
grid.y = 1;
// number of threads (x,y) for each block
threads.x = dim_row;
threads.y = dim_col;
int gm = 0;
printf("\tGrid_size_x: %d, number_of_threads_x: %d \n", grid.x,
threads.x);
printf("\tGrid_size_y: %d, number_of_threads_y: %d \n\n", grid.y,
threads.y);
float sh_mem = ((threads.x * threads.y) + (n_feat * t_size)) * sizeof(float);
if (strcmp(distance_type, "DTW") == 0){ // DTW distance
switch (foldit(q_size)) {
case 0: MD_DTW_I<64><<<grid, threads, sh_mem>>> (d_t_series, d_q_series,
t_size, q_size, n_feat, d_owp, 1, gm);
break;
case 1: MD_DTW_I<128><<<grid, threads, sh_mem>>> (d_t_series, d_q_series,
t_size, q_size, n_feat, d_owp, 1, gm);
break;
case 2: MD_DTW_I<256><<<grid, threads, sh_mem>>> (d_t_series, d_q_series,
t_size, q_size, n_feat, d_owp, 1, gm);
break;
case 3: MD_DTW_I<512><<<grid, threads, sh_mem>>> (d_t_series, d_q_series,
t_size, q_size, n_feat, d_owp, 1, gm);
break;
case 4: MD_DTW_I<1024><<<grid, threads, sh_mem>>> (d_t_series, d_q_series,
t_size, q_size, n_feat, d_owp, 1, gm);
break;
case 5: MD_DTW_I<2048><<<grid, threads, sh_mem>>> (d_t_series, d_q_series,
t_size, q_size, n_feat, d_owp, 1, gm);
break;
case 6: MD_DTW_I<4096><<<grid, threads, sh_mem>>> (d_t_series, d_q_series,
t_size, q_size, n_feat, d_owp, 1, gm);
break;
case 7: MD_DTW_I<8192><<<grid, threads, sh_mem>>> (d_t_series, d_q_series,
t_size, q_size, n_feat, d_owp, 1, gm);
break;
case 8: MD_DTW_I<16384><<<grid, threads, sh_mem>>> (d_t_series, d_q_series,
t_size, q_size, n_feat, d_owp, 1, gm);
break;
}
}
else
MD_ED_I << <grid, threads, sh_mem>>>
(d_t_series, d_q_series, t_size, q_size, n_feat, d_owp, 1, gm);
cudaMemcpy(owp, d_owp, nss * sizeof(float), cudaMemcpyDeviceToHost);
for (int i = 0; i < nss; ++i) {
if (verbose_mode > 0 && verbose_mode < nss) {
if (i % verbose_mode == 0)
printf("\tCurr val diff. [%d]: %f\n", i, owp[i]);
else if (i == nss)
printf("\tCurr val diff. [%d]: %f\n", i, owp[i]);
}
}
min = min_arr(owp, nss, ind_min_val);
return min;
}
/**
* \brief The function `MDR_SIM_MES_GPU` is a wrapper function used for computing the CPU multidimensional rotation similary measure for the classification task.
* \param trainSize Number of MTS contained into the train set
* \param testSize Number of MTS contained into the test set
* \param *trainLabels Vector containing the labels for the train set
* \param *testLabels Vector containing the labels for the test set
* \param *h_train Vector containing the data for the train set
* \param *h_test Vector containing the data for the test set
* \param *d_train Vector containing the data for the train set stored in the GPU device
* \param *d_test Vector containing the data for the test set stored in the GPU device
* \param *d_Out Vector containing temporary result for the host
* \param *h_Out Vector containing temporary result for the device
* \param window_size Length for the time series to be stored into `*data`
* \param n_feat Number of variables for the time series stored into both train and test set
* \param blockSize Number of threads to use for comparing the MTS
* \param deviceProp CUDA object containing several information about its own device
* \param *distance_type Type of similarity measure to adopt for performing the classification task
* \param verbose_mode Flag used to increase/reduce the verbosity of the output results
* \param *err The number of misclassification using the basic similarity measure
* \param *errNR The number of misclassification using the rotation similary measure
* \return the number of misclassification
*/
__host__ void MDR_SIM_MES_GPU(int trainSize, int testSize, int *trainLabels, int *testLabels, float *h_train, float *h_test, float *d_train, float *d_test, float *d_Out, float *h_Out, int window_size, int n_feat, int blockSize, cudaDeviceProp deviceProp, char *distance_type, int verbose_mode, int *err, int *errNR){
float grid_size, min = 9999.99,minNR = 99999.99;
dim3 grid;
dim3 threads;
int minINR = 0, minI = 0;
float T2 = (n_feat * window_size) * sizeof(float);
int gm = 0;
if (T2 > deviceProp.sharedMemPerBlock) {
printf("\tWarning: The T2 test timeserie: %f doesn't fit into the shared "
"memory: %lu, so it will be allocated into the global "
"memory\n",
T2, deviceProp.sharedMemPerBlock);
gm = 1;
T2 = 0;
} else
gm = 0;
grid_size = ceil((float)trainSize * window_size / blockSize);
// number of blocks (x,y) for a grid
grid.x = grid_size;
grid.y = 1;
// number of threads (x,y) for each block
threads.x = blockSize;
threads.y = 1;
if(verbose_mode > 0){
printf("\tGrid_size_x: %d, number_of_threads_x: %d \n", grid.x,
threads.x);
printf("\tGrid_size_y: %d, number_of_threads_y: %d \n\n", grid.y,
threads.y);
}
for (int k = 0; k < testSize; k++) {
cudaMemcpy(d_test, h_test + (k * n_feat * window_size),
n_feat * window_size * sizeof(float),
cudaMemcpyHostToDevice);
if (strcmp(distance_type, "DTW") == 0){ // DTW distance
switch (foldit(window_size)) {
case 0: rMD_DTW_D<64><<<grid, threads, T2>>>(d_train, d_test, trainSize,
window_size, n_feat, d_Out, gm);
break;
case 1: rMD_DTW_D<128><<<grid, threads, T2>>>(d_train, d_test, trainSize,
window_size, n_feat, d_Out, gm);
break;
case 2: rMD_DTW_D<256><<<grid, threads, T2>>>(d_train, d_test, trainSize,
window_size, n_feat, d_Out, gm);
break;
case 3: rMD_DTW_D<512><<<grid, threads, T2>>>(d_train, d_test, trainSize,
window_size, n_feat, d_Out, gm);
break;
case 4: rMD_DTW_D<1024><<<grid, threads, T2>>>(d_train, d_test, trainSize,
window_size, n_feat, d_Out, gm);
break;
case 5: rMD_DTW_D<2048><<<grid, threads, T2>>>(d_train, d_test, trainSize,
window_size, n_feat, d_Out, gm);
break;
case 6: rMD_DTW_D<4096><<<grid, threads, T2>>>(d_train, d_test, trainSize,
window_size, n_feat, d_Out, gm);
break;
case 7: rMD_DTW_D<8192><<<grid, threads, T2>>>(d_train, d_test, trainSize,
window_size, n_feat, d_Out, gm);
break;
case 8: rMD_DTW_D<16384><<<grid, threads, T2>>>(d_train, d_test, trainSize,
window_size, n_feat, d_Out, gm);
break;
}
}
else
rMD_ED_D << <grid, threads, T2>>>
(d_train, d_test, window_size, n_feat, d_Out, trainSize, gm);
cudaThreadSynchronize();
cudaMemcpy(h_Out, d_Out, trainSize * window_size * sizeof(float),
cudaMemcpyDeviceToHost);
min = 9999999999.99;
minI = -1;
minINR = -1;
minNR = 99999999999.99;
int i = 0;
for (int j = 0; j < trainSize; j++) {
if (h_Out[j * window_size] < minNR) {
minNR = h_Out[j * window_size];
minINR = j;
}
for (i = 0; i < window_size; i++) {
int t = j * window_size + i;
if (h_Out[t] < min) {
min = h_Out[t];
minI = j;
}
}
}
if (trainLabels[minI] != testLabels[k])
(*err)++;
if (trainLabels[minINR] != testLabels[k])
(*errNR)++;
if (verbose_mode > 0 && verbose_mode < testSize) {
if (i % verbose_mode == 0)
printf("\t%d\t gt: %d\t\tRI: %d\t%3.6f \t\t NRI: %d\t%3.6f\n", k,
testLabels[i], trainLabels[minI], min,
trainLabels[minINR], minNR);
else if (i == testSize-1)
printf("\t%d\t gt: %d\t\tRI: %d\t%3.6f \t\t NRI: %d\t%3.6f\n", k,
testLabels[i], trainLabels[minI], min,
trainLabels[minINR], minNR);
}
}
} |
056f72f805e13800ce3e4725b07141507c060bac.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "math_functions.h"
#include <stdio.h>
#include <stdlib.h>
#include <cmath>
const unsigned int NUM = 12;
const unsigned int TARGRT_NUM = 30;
const unsigned int GENERATE_NUM = 1024;
typedef struct {
double x;
double y;
}Point;
__constant__ const Point target[TARGRT_NUM] = {
{ 600, -58 },{ 600, -54 },{ 600, -50 },{ 600, -46 },{ 600, -42 },{ 600, -38 },{ 600, -34 },{ 600, -30 },{ 600, -26 },{ 600, -22 },
{ 600, -18 },{ 600, -14 },{ 600, -10 },{ 600, -6 },{ 600, -2 },{ 600, 2 },{ 600, 6 },{ 600, 10 },{ 600, 14 },{ 600, 18 },
{ 600, 22 },{ 600, 26 },{ 600, 30 },{ 600, 34 },{ 600, 38 },{ 600, 42 },{ 600, 46 },{ 600, 50 },{ 600, 54 },{ 600, 58 }
};
typedef struct {
double x;
double y;
double dir;
double vx;
double vy;
double vr;
}OurPlayer;
typedef struct {
double x;
double y;
double dir;
}TheirPlayer;
__constant__ OurPlayer dev_ourplayer[NUM];
__constant__ TheirPlayer dev_theirplayer[NUM];
__constant__ Point dev_ball;
const int CONST_MEM_SIZE = NUM * sizeof(OurPlayer) + NUM * sizeof(TheirPlayer) + (TARGRT_NUM + 1) * sizeof(Point);
void getInf(const OurPlayer *ourplayer, const TheirPlayer *theirplayer, const Point ball) {
hipMemcpyToSymbol(dev_ourplayer, ourplayer, NUM * sizeof(OurPlayer));
hipMemcpyToSymbol(dev_theirplayer, theirplayer, NUM * sizeof(TheirPlayer));
hipMemcpyToSymbol(&dev_ball, &ball, sizeof(Point));
}
//truefalse
bool calcShootRange(Point source, Point target, Point object) {
double A = target.y - source.y;
double B = source.x - target.x;
double C = target.x * source.y - source.x * target.y;
double D = sqrt(A*A + B*B);
if (D < 0.01) return false;
double dist = abs(A*object.x + B*object.y + C) / D;
if (dist > 9.0) return true;
return false;
}
__device__ void calcShootRangeList(Point source, TheirPlayer *object) {
bool dev_shootList[TARGRT_NUM];
for (int i = 0; i < TARGRT_NUM; i++) {
dev_shootList[i] = true;//
double A = target[i].y - source.y;
double B = source.x - target[i].x;
double C = target[i].x * source.y - source.x * target[i].y;
double D = sqrt(A*A + B*B);
//D0?
if (D < 1) {
dev_shootList[i] = false;
continue;
}
for (int j = 0; j < NUM; j++) {
double dist = abs(A*object[j].x + B*object[j].y + C) / D;
if (dist < 9.0) {//
dev_shootList[i] = false;
break;
}
else continue;
}
}
}
//1024
void generatePoint(Point *shootPoint, unsigned int size) {
for (int i = 0; i < size; i++) {
shootPoint[i].x = rand() % 600;//0~600
shootPoint[i].y = (rand() % 900) -450;//-450~450
//printf("Point %d: (%.1f, %.1f)\n", i, shootPoint[i].x, shootPoint[i].y);
}
}
__global__ void kernel(Point *dev_shootPoint){
int i = threadIdx.x;
bool *dev_shootList = 0;
//hipMalloc((void**)&dev_shootPoint, TARGRT_NUM * sizeof(bool));
calcShootRangeList(dev_shootPoint[i], dev_theirplayer);
}
void main() {
OurPlayer ourplayer[NUM] = {0};
TheirPlayer theirplay[NUM] = {0};
Point ball = {0};
//
Point shootPoint[GENERATE_NUM] = {0};
generatePoint(shootPoint, GENERATE_NUM);
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
/*******************************************************/
//
getInf(ourplayer, theirplay, ball);
//
Point *dev_shootPoint = 0;
hipMalloc((void**)&dev_shootPoint, GENERATE_NUM * sizeof(Point));
hipMemcpy(dev_shootPoint, shootPoint, GENERATE_NUM * sizeof(Point), hipMemcpyHostToDevice);
//hipMalloc((void**)&dev_shootPoint, GENERATE_NUM * TARGRT_NUM * sizeof(bool));
//CUDA
//kernel <<<1, GENERATE_NUM, CONST_MEM_SIZE >>>(dev_shootPoint);
/*******************************************************/
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
float elapseTime;
hipEventElapsedTime(&elapseTime, start, stop);
printf("Time for I/O : %.5f ms\n", elapseTime);
system("pause");
} | 056f72f805e13800ce3e4725b07141507c060bac.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "math_functions.h"
#include <stdio.h>
#include <stdlib.h>
#include <cmath>
const unsigned int NUM = 12;
const unsigned int TARGRT_NUM = 30;
const unsigned int GENERATE_NUM = 1024;
typedef struct {
double x;
double y;
}Point;
__constant__ const Point target[TARGRT_NUM] = {
{ 600, -58 },{ 600, -54 },{ 600, -50 },{ 600, -46 },{ 600, -42 },{ 600, -38 },{ 600, -34 },{ 600, -30 },{ 600, -26 },{ 600, -22 },
{ 600, -18 },{ 600, -14 },{ 600, -10 },{ 600, -6 },{ 600, -2 },{ 600, 2 },{ 600, 6 },{ 600, 10 },{ 600, 14 },{ 600, 18 },
{ 600, 22 },{ 600, 26 },{ 600, 30 },{ 600, 34 },{ 600, 38 },{ 600, 42 },{ 600, 46 },{ 600, 50 },{ 600, 54 },{ 600, 58 }
};
typedef struct {
double x;
double y;
double dir;
double vx;
double vy;
double vr;
}OurPlayer;
typedef struct {
double x;
double y;
double dir;
}TheirPlayer;
__constant__ OurPlayer dev_ourplayer[NUM];
__constant__ TheirPlayer dev_theirplayer[NUM];
__constant__ Point dev_ball;
const int CONST_MEM_SIZE = NUM * sizeof(OurPlayer) + NUM * sizeof(TheirPlayer) + (TARGRT_NUM + 1) * sizeof(Point);
void getInf(const OurPlayer *ourplayer, const TheirPlayer *theirplayer, const Point ball) {
cudaMemcpyToSymbol(dev_ourplayer, ourplayer, NUM * sizeof(OurPlayer));
cudaMemcpyToSymbol(dev_theirplayer, theirplayer, NUM * sizeof(TheirPlayer));
cudaMemcpyToSymbol(&dev_ball, &ball, sizeof(Point));
}
//分别传入当前点、射门点、阻挡点,若能射门返回true,否则返回false
bool calcShootRange(Point source, Point target, Point object) {
double A = target.y - source.y;
double B = source.x - target.x;
double C = target.x * source.y - source.x * target.y;
double D = sqrt(A*A + B*B);
if (D < 0.01) return false;
double dist = abs(A*object.x + B*object.y + C) / D;
if (dist > 9.0) return true;
return false;
}
__device__ void calcShootRangeList(Point source, TheirPlayer *object) {
bool dev_shootList[TARGRT_NUM];
for (int i = 0; i < TARGRT_NUM; i++) {
dev_shootList[i] = true;//初始化
double A = target[i].y - source.y;
double B = source.x - target[i].x;
double C = target[i].x * source.y - source.x * target[i].y;
double D = sqrt(A*A + B*B);
//D可能等于0?
if (D < 1) {
dev_shootList[i] = false;
continue;
}
for (int j = 0; j < NUM; j++) {
double dist = abs(A*object[j].x + B*object[j].y + C) / D;
if (dist < 9.0) {//有阻挡
dev_shootList[i] = false;
break;
}
else continue;
}
}
}
//生成1024个射门点
void generatePoint(Point *shootPoint, unsigned int size) {
for (int i = 0; i < size; i++) {
shootPoint[i].x = rand() % 600;//生成0~600的整数
shootPoint[i].y = (rand() % 900) -450;//生成-450~450的整数
//printf("Point %d: (%.1f, %.1f)\n", i, shootPoint[i].x, shootPoint[i].y);
}
}
__global__ void kernel(Point *dev_shootPoint){
int i = threadIdx.x;
bool *dev_shootList = 0;
//cudaMalloc((void**)&dev_shootPoint, TARGRT_NUM * sizeof(bool));
calcShootRangeList(dev_shootPoint[i], dev_theirplayer);
}
void main() {
OurPlayer ourplayer[NUM] = {0};
TheirPlayer theirplay[NUM] = {0};
Point ball = {0};
//生成射门点
Point shootPoint[GENERATE_NUM] = {0};
generatePoint(shootPoint, GENERATE_NUM);
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
/*******************************************************/
//获得场上信息
getInf(ourplayer, theirplay, ball);
//获得射门点
Point *dev_shootPoint = 0;
cudaMalloc((void**)&dev_shootPoint, GENERATE_NUM * sizeof(Point));
cudaMemcpy(dev_shootPoint, shootPoint, GENERATE_NUM * sizeof(Point), cudaMemcpyHostToDevice);
//cudaMalloc((void**)&dev_shootPoint, GENERATE_NUM * TARGRT_NUM * sizeof(bool));
//启动CUDA运算
//kernel <<<1, GENERATE_NUM, CONST_MEM_SIZE >>>(dev_shootPoint);
/*******************************************************/
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
float elapseTime;
cudaEventElapsedTime(&elapseTime, start, stop);
printf("Time for I/O : %.5f ms\n", elapseTime);
system("pause");
} |
4530229a9876f5393fe7541654a3d67ecdd8c694.hip | // !!! This is a file automatically generated by hipify!!!
/**
* Copyright (c) 2020 Neka-Nat
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
**/
#include <Eigen/Dense>
#include "cupoch/geometry/geometry_utils.h"
#include "cupoch/utility/console.h"
namespace cupoch {
namespace geometry {
namespace {
template <int Dim>
struct transform_points_functor {
transform_points_functor(
const Eigen::Matrix<float, Dim + 1, Dim + 1> &transform)
: transform_(transform){};
const Eigen::Matrix<float, Dim + 1, Dim + 1> transform_;
__device__ void operator()(Eigen::Matrix<float, Dim, 1> &pt) {
pt = transform_.template block<Dim, Dim>(0, 0) * pt + transform_.template block<Dim, 1>(0, Dim);
}
};
struct transform_normals_functor {
transform_normals_functor(const Eigen::Matrix4f &transform)
: transform_(transform){};
const Eigen::Matrix4f transform_;
__device__ void operator()(Eigen::Vector3f &nl) {
nl = transform_.block<3, 3>(0, 0) * nl;
}
};
} // namespace
template <int Dim, typename FuncT>
Eigen::Matrix<float, Dim, 1> ComputeBound(
hipStream_t stream,
const utility::device_vector<Eigen::Matrix<float, Dim, 1>> &points) {
if (points.empty()) return Eigen::Matrix<float, Dim, 1>::Zero();
Eigen::Matrix<float, Dim, 1> init = points[0];
return thrust::reduce(
utility::exec_policy(stream)->on(stream), points.begin(),
points.end(), init, FuncT());
}
template <int Dim>
Eigen::Matrix<float, Dim, 1> ComputeMinBound(
const utility::device_vector<Eigen::Matrix<float, Dim, 1>> &points) {
return ComputeBound<Dim, thrust::elementwise_minimum<Eigen::Matrix<float, Dim, 1>>>(0, points);
}
template <int Dim>
Eigen::Matrix<float, Dim, 1> ComputeMaxBound(
const utility::device_vector<Eigen::Matrix<float, Dim, 1>> &points) {
return ComputeBound<Dim, thrust::elementwise_maximum<Eigen::Matrix<float, Dim, 1>>>(0, points);
}
template <int Dim>
Eigen::Matrix<float, Dim, 1> ComputeMaxBound(
hipStream_t stream,
const utility::device_vector<Eigen::Matrix<float, Dim, 1>> &points) {
if (points.empty()) return Eigen::Matrix<float, Dim, 1>::Zero();
Eigen::Matrix<float, Dim, 1> init = points[0];
return thrust::reduce(
utility::exec_policy(stream)->on(stream), points.begin(),
points.end(), init,
thrust::elementwise_maximum<Eigen::Matrix<float, Dim, 1>>());
}
template <int Dim>
Eigen::Matrix<float, Dim, 1> ComputeCenter(
const utility::device_vector<Eigen::Matrix<float, Dim, 1>> &points) {
Eigen::Matrix<float, Dim, 1> init = Eigen::Matrix<float, Dim, 1>::Zero();
if (points.empty()) return init;
Eigen::Matrix<float, Dim, 1> sum =
thrust::reduce(points.begin(), points.end(), init,
thrust::plus<Eigen::Matrix<float, Dim, 1>>());
return sum / points.size();
}
template Eigen::Matrix<float, 2, 1> ComputeBound<2, thrust::elementwise_minimum<Eigen::Matrix<float, 2, 1>>>(
hipStream_t stream,
const utility::device_vector<Eigen::Matrix<float, 2, 1>> &points);
template Eigen::Matrix<float, 3, 1> ComputeBound<3, thrust::elementwise_minimum<Eigen::Matrix<float, 3, 1>>>(
hipStream_t stream,
const utility::device_vector<Eigen::Matrix<float, 3, 1>> &points);
template Eigen::Matrix<float, 2, 1> ComputeBound<2, thrust::elementwise_maximum<Eigen::Matrix<float, 2, 1>>>(
hipStream_t stream,
const utility::device_vector<Eigen::Matrix<float, 2, 1>> &points);
template Eigen::Matrix<float, 3, 1> ComputeBound<3, thrust::elementwise_maximum<Eigen::Matrix<float, 3, 1>>>(
hipStream_t stream,
const utility::device_vector<Eigen::Matrix<float, 3, 1>> &points);
template Eigen::Matrix<float, 2, 1> ComputeMinBound<2>(
const utility::device_vector<Eigen::Matrix<float, 2, 1>> &points);
template Eigen::Matrix<float, 3, 1> ComputeMinBound<3>(
const utility::device_vector<Eigen::Matrix<float, 3, 1>> &points);
template Eigen::Matrix<float, 2, 1> ComputeMaxBound<2>(
const utility::device_vector<Eigen::Matrix<float, 2, 1>> &points);
template Eigen::Matrix<float, 3, 1> ComputeMaxBound<3>(
const utility::device_vector<Eigen::Matrix<float, 3, 1>> &points);
template Eigen::Matrix<float, 2, 1> ComputeCenter<2>(
const utility::device_vector<Eigen::Matrix<float, 2, 1>> &points);
template Eigen::Matrix<float, 3, 1> ComputeCenter<3>(
const utility::device_vector<Eigen::Matrix<float, 3, 1>> &points);
void ResizeAndPaintUniformColor(utility::device_vector<Eigen::Vector3f> &colors,
const size_t size,
const Eigen::Vector3f &color) {
colors.resize(size);
Eigen::Vector3f clipped_color = color;
if (color.minCoeff() < 0 || color.maxCoeff() > 1) {
utility::LogWarning(
"invalid color in PaintUniformColor, clipping to [0, 1]");
clipped_color = clipped_color.array()
.max(Eigen::Vector3f(0, 0, 0).array())
.matrix();
clipped_color = clipped_color.array()
.min(Eigen::Vector3f(1, 1, 1).array())
.matrix();
}
thrust::fill(colors.begin(), colors.end(), clipped_color);
}
template <int Dim>
void TransformPoints(
const Eigen::Matrix<float, Dim + 1, Dim + 1> &transformation,
utility::device_vector<Eigen::Matrix<float, Dim, 1>> &points) {
TransformPoints<Dim>(0, transformation, points);
}
template <int Dim>
void TransformPoints(
hipStream_t stream,
const Eigen::Matrix<float, Dim + 1, Dim + 1> &transformation,
utility::device_vector<Eigen::Matrix<float, Dim, 1>> &points) {
transform_points_functor<Dim> func(transformation);
thrust::for_each(utility::exec_policy(stream)->on(stream), points.begin(),
points.end(), func);
}
template void TransformPoints<2>(const Eigen::Matrix3f &transformation,
utility::device_vector<Eigen::Vector2f> &points);
template void TransformPoints<2>(hipStream_t stream,
const Eigen::Matrix3f &transformation,
utility::device_vector<Eigen::Vector2f> &points);
template void TransformPoints<3>(const Eigen::Matrix4f &transformation,
utility::device_vector<Eigen::Vector3f> &points);
template void TransformPoints<3>(hipStream_t stream,
const Eigen::Matrix4f &transformation,
utility::device_vector<Eigen::Vector3f> &points);
void TransformNormals(const Eigen::Matrix4f &transformation,
utility::device_vector<Eigen::Vector3f> &normals) {
TransformNormals(0, transformation, normals);
}
void TransformNormals(hipStream_t stream,
const Eigen::Matrix4f &transformation,
utility::device_vector<Eigen::Vector3f> &normals) {
transform_normals_functor func(transformation);
thrust::for_each(utility::exec_policy(stream)->on(stream), normals.begin(),
normals.end(), func);
}
template <int Dim>
void TranslatePoints(
const Eigen::Matrix<float, Dim, 1> &translation,
utility::device_vector<Eigen::Matrix<float, Dim, 1>> &points,
bool relative) {
Eigen::Matrix<float, Dim, 1> transform = translation;
if (!relative) {
transform -= ComputeCenter<Dim>(points);
}
thrust::for_each(points.begin(), points.end(),
[=] __device__(Eigen::Matrix<float, Dim, 1> & pt) {
pt += transform;
});
}
template <int Dim>
void ScalePoints(const float scale,
utility::device_vector<Eigen::Matrix<float, Dim, 1>> &points,
bool center) {
Eigen::Matrix<float, Dim, 1> points_center =
Eigen::Matrix<float, Dim, 1>::Zero();
if (center && !points.empty()) {
points_center = ComputeCenter<Dim>(points);
}
thrust::for_each(points.begin(), points.end(),
[=] __device__(Eigen::Matrix<float, Dim, 1> & pt) {
pt = (pt - points_center) * scale + points_center;
});
}
template void TranslatePoints<2>(const Eigen::Vector2f &translation,
utility::device_vector<Eigen::Vector2f> &points,
bool relative);
template void TranslatePoints<3>(const Eigen::Vector3f &translation,
utility::device_vector<Eigen::Vector3f> &points,
bool relative);
template void ScalePoints<2>(const float scale,
utility::device_vector<Eigen::Vector2f> &points,
bool center);
template void ScalePoints<3>(const float scale,
utility::device_vector<Eigen::Vector3f> &points,
bool center);
template <int Dim>
void RotatePoints(const Eigen::Matrix<float, Dim, Dim> &R,
utility::device_vector<Eigen::Matrix<float, Dim, 1>> &points,
bool center) {
RotatePoints<Dim>(0, R, points, center);
}
template <int Dim>
void RotatePoints(hipStream_t stream,
const Eigen::Matrix<float, Dim, Dim> &R,
utility::device_vector<Eigen::Matrix<float, Dim, 1>> &points,
bool center) {
Eigen::Matrix<float, Dim, 1> points_center =
Eigen::Matrix<float, Dim, 1>::Zero();
if (center && !points.empty()) {
points_center = ComputeCenter<Dim>(points);
}
thrust::for_each(utility::exec_policy(stream)->on(stream), points.begin(),
points.end(),
[=] __device__(Eigen::Matrix<float, Dim, 1> & pt) {
pt = R * (pt - points_center) + points_center;
});
}
template void RotatePoints<2>(const Eigen::Matrix2f &R,
utility::device_vector<Eigen::Vector2f> &points,
bool center);
template void RotatePoints<3>(const Eigen::Matrix3f &R,
utility::device_vector<Eigen::Vector3f> &points,
bool center);
template void RotatePoints<2>(hipStream_t stream,
const Eigen::Matrix2f &R,
utility::device_vector<Eigen::Vector2f> &points,
bool center);
template void RotatePoints<3>(hipStream_t stream,
const Eigen::Matrix3f &R,
utility::device_vector<Eigen::Vector3f> &points,
bool center);
void RotateNormals(const Eigen::Matrix3f &R,
utility::device_vector<Eigen::Vector3f> &normals) {
RotateNormals(0, R, normals);
}
void RotateNormals(hipStream_t stream,
const Eigen::Matrix3f &R,
utility::device_vector<Eigen::Vector3f> &normals) {
thrust::for_each(utility::exec_policy(stream)->on(stream), normals.begin(),
normals.end(), [=] __device__(Eigen::Vector3f & normal) {
normal = R * normal;
});
}
Eigen::Matrix3f GetRotationMatrixFromXYZ(const Eigen::Vector3f &rotation) {
return cupoch::utility::RotationMatrixX(rotation(0)) *
cupoch::utility::RotationMatrixY(rotation(1)) *
cupoch::utility::RotationMatrixZ(rotation(2));
}
Eigen::Matrix3f GetRotationMatrixFromYZX(const Eigen::Vector3f &rotation) {
return cupoch::utility::RotationMatrixY(rotation(0)) *
cupoch::utility::RotationMatrixZ(rotation(1)) *
cupoch::utility::RotationMatrixX(rotation(2));
}
Eigen::Matrix3f GetRotationMatrixFromZXY(const Eigen::Vector3f &rotation) {
return cupoch::utility::RotationMatrixZ(rotation(0)) *
cupoch::utility::RotationMatrixX(rotation(1)) *
cupoch::utility::RotationMatrixY(rotation(2));
}
Eigen::Matrix3f GetRotationMatrixFromXZY(const Eigen::Vector3f &rotation) {
return cupoch::utility::RotationMatrixX(rotation(0)) *
cupoch::utility::RotationMatrixZ(rotation(1)) *
cupoch::utility::RotationMatrixY(rotation(2));
}
Eigen::Matrix3f GetRotationMatrixFromZYX(const Eigen::Vector3f &rotation) {
return cupoch::utility::RotationMatrixZ(rotation(0)) *
cupoch::utility::RotationMatrixY(rotation(1)) *
cupoch::utility::RotationMatrixX(rotation(2));
}
Eigen::Matrix3f GetRotationMatrixFromYXZ(const Eigen::Vector3f &rotation) {
return cupoch::utility::RotationMatrixY(rotation(0)) *
cupoch::utility::RotationMatrixX(rotation(1)) *
cupoch::utility::RotationMatrixZ(rotation(2));
}
Eigen::Matrix3f GetRotationMatrixFromAxisAngle(
const Eigen::Vector3f &rotation) {
const float phi = rotation.norm();
return Eigen::AngleAxisf(phi, rotation / phi).toRotationMatrix();
}
Eigen::Matrix3f GetRotationMatrixFromQuaternion(
const Eigen::Vector4f &rotation) {
return Eigen::Quaternionf(rotation(0), rotation(1), rotation(2),
rotation(3))
.normalized()
.toRotationMatrix();
}
} // namespace geometry
} // namespace cupoch | 4530229a9876f5393fe7541654a3d67ecdd8c694.cu | /**
* Copyright (c) 2020 Neka-Nat
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
**/
#include <Eigen/Dense>
#include "cupoch/geometry/geometry_utils.h"
#include "cupoch/utility/console.h"
namespace cupoch {
namespace geometry {
namespace {
template <int Dim>
struct transform_points_functor {
transform_points_functor(
const Eigen::Matrix<float, Dim + 1, Dim + 1> &transform)
: transform_(transform){};
const Eigen::Matrix<float, Dim + 1, Dim + 1> transform_;
__device__ void operator()(Eigen::Matrix<float, Dim, 1> &pt) {
pt = transform_.template block<Dim, Dim>(0, 0) * pt + transform_.template block<Dim, 1>(0, Dim);
}
};
struct transform_normals_functor {
transform_normals_functor(const Eigen::Matrix4f &transform)
: transform_(transform){};
const Eigen::Matrix4f transform_;
__device__ void operator()(Eigen::Vector3f &nl) {
nl = transform_.block<3, 3>(0, 0) * nl;
}
};
} // namespace
template <int Dim, typename FuncT>
Eigen::Matrix<float, Dim, 1> ComputeBound(
cudaStream_t stream,
const utility::device_vector<Eigen::Matrix<float, Dim, 1>> &points) {
if (points.empty()) return Eigen::Matrix<float, Dim, 1>::Zero();
Eigen::Matrix<float, Dim, 1> init = points[0];
return thrust::reduce(
utility::exec_policy(stream)->on(stream), points.begin(),
points.end(), init, FuncT());
}
template <int Dim>
Eigen::Matrix<float, Dim, 1> ComputeMinBound(
const utility::device_vector<Eigen::Matrix<float, Dim, 1>> &points) {
return ComputeBound<Dim, thrust::elementwise_minimum<Eigen::Matrix<float, Dim, 1>>>(0, points);
}
template <int Dim>
Eigen::Matrix<float, Dim, 1> ComputeMaxBound(
const utility::device_vector<Eigen::Matrix<float, Dim, 1>> &points) {
return ComputeBound<Dim, thrust::elementwise_maximum<Eigen::Matrix<float, Dim, 1>>>(0, points);
}
template <int Dim>
Eigen::Matrix<float, Dim, 1> ComputeMaxBound(
cudaStream_t stream,
const utility::device_vector<Eigen::Matrix<float, Dim, 1>> &points) {
if (points.empty()) return Eigen::Matrix<float, Dim, 1>::Zero();
Eigen::Matrix<float, Dim, 1> init = points[0];
return thrust::reduce(
utility::exec_policy(stream)->on(stream), points.begin(),
points.end(), init,
thrust::elementwise_maximum<Eigen::Matrix<float, Dim, 1>>());
}
template <int Dim>
Eigen::Matrix<float, Dim, 1> ComputeCenter(
const utility::device_vector<Eigen::Matrix<float, Dim, 1>> &points) {
Eigen::Matrix<float, Dim, 1> init = Eigen::Matrix<float, Dim, 1>::Zero();
if (points.empty()) return init;
Eigen::Matrix<float, Dim, 1> sum =
thrust::reduce(points.begin(), points.end(), init,
thrust::plus<Eigen::Matrix<float, Dim, 1>>());
return sum / points.size();
}
template Eigen::Matrix<float, 2, 1> ComputeBound<2, thrust::elementwise_minimum<Eigen::Matrix<float, 2, 1>>>(
cudaStream_t stream,
const utility::device_vector<Eigen::Matrix<float, 2, 1>> &points);
template Eigen::Matrix<float, 3, 1> ComputeBound<3, thrust::elementwise_minimum<Eigen::Matrix<float, 3, 1>>>(
cudaStream_t stream,
const utility::device_vector<Eigen::Matrix<float, 3, 1>> &points);
template Eigen::Matrix<float, 2, 1> ComputeBound<2, thrust::elementwise_maximum<Eigen::Matrix<float, 2, 1>>>(
cudaStream_t stream,
const utility::device_vector<Eigen::Matrix<float, 2, 1>> &points);
template Eigen::Matrix<float, 3, 1> ComputeBound<3, thrust::elementwise_maximum<Eigen::Matrix<float, 3, 1>>>(
cudaStream_t stream,
const utility::device_vector<Eigen::Matrix<float, 3, 1>> &points);
template Eigen::Matrix<float, 2, 1> ComputeMinBound<2>(
const utility::device_vector<Eigen::Matrix<float, 2, 1>> &points);
template Eigen::Matrix<float, 3, 1> ComputeMinBound<3>(
const utility::device_vector<Eigen::Matrix<float, 3, 1>> &points);
template Eigen::Matrix<float, 2, 1> ComputeMaxBound<2>(
const utility::device_vector<Eigen::Matrix<float, 2, 1>> &points);
template Eigen::Matrix<float, 3, 1> ComputeMaxBound<3>(
const utility::device_vector<Eigen::Matrix<float, 3, 1>> &points);
template Eigen::Matrix<float, 2, 1> ComputeCenter<2>(
const utility::device_vector<Eigen::Matrix<float, 2, 1>> &points);
template Eigen::Matrix<float, 3, 1> ComputeCenter<3>(
const utility::device_vector<Eigen::Matrix<float, 3, 1>> &points);
void ResizeAndPaintUniformColor(utility::device_vector<Eigen::Vector3f> &colors,
const size_t size,
const Eigen::Vector3f &color) {
colors.resize(size);
Eigen::Vector3f clipped_color = color;
if (color.minCoeff() < 0 || color.maxCoeff() > 1) {
utility::LogWarning(
"invalid color in PaintUniformColor, clipping to [0, 1]");
clipped_color = clipped_color.array()
.max(Eigen::Vector3f(0, 0, 0).array())
.matrix();
clipped_color = clipped_color.array()
.min(Eigen::Vector3f(1, 1, 1).array())
.matrix();
}
thrust::fill(colors.begin(), colors.end(), clipped_color);
}
template <int Dim>
void TransformPoints(
const Eigen::Matrix<float, Dim + 1, Dim + 1> &transformation,
utility::device_vector<Eigen::Matrix<float, Dim, 1>> &points) {
TransformPoints<Dim>(0, transformation, points);
}
template <int Dim>
void TransformPoints(
cudaStream_t stream,
const Eigen::Matrix<float, Dim + 1, Dim + 1> &transformation,
utility::device_vector<Eigen::Matrix<float, Dim, 1>> &points) {
transform_points_functor<Dim> func(transformation);
thrust::for_each(utility::exec_policy(stream)->on(stream), points.begin(),
points.end(), func);
}
template void TransformPoints<2>(const Eigen::Matrix3f &transformation,
utility::device_vector<Eigen::Vector2f> &points);
template void TransformPoints<2>(cudaStream_t stream,
const Eigen::Matrix3f &transformation,
utility::device_vector<Eigen::Vector2f> &points);
template void TransformPoints<3>(const Eigen::Matrix4f &transformation,
utility::device_vector<Eigen::Vector3f> &points);
template void TransformPoints<3>(cudaStream_t stream,
const Eigen::Matrix4f &transformation,
utility::device_vector<Eigen::Vector3f> &points);
void TransformNormals(const Eigen::Matrix4f &transformation,
utility::device_vector<Eigen::Vector3f> &normals) {
TransformNormals(0, transformation, normals);
}
void TransformNormals(cudaStream_t stream,
const Eigen::Matrix4f &transformation,
utility::device_vector<Eigen::Vector3f> &normals) {
transform_normals_functor func(transformation);
thrust::for_each(utility::exec_policy(stream)->on(stream), normals.begin(),
normals.end(), func);
}
template <int Dim>
void TranslatePoints(
const Eigen::Matrix<float, Dim, 1> &translation,
utility::device_vector<Eigen::Matrix<float, Dim, 1>> &points,
bool relative) {
Eigen::Matrix<float, Dim, 1> transform = translation;
if (!relative) {
transform -= ComputeCenter<Dim>(points);
}
thrust::for_each(points.begin(), points.end(),
[=] __device__(Eigen::Matrix<float, Dim, 1> & pt) {
pt += transform;
});
}
template <int Dim>
void ScalePoints(const float scale,
utility::device_vector<Eigen::Matrix<float, Dim, 1>> &points,
bool center) {
Eigen::Matrix<float, Dim, 1> points_center =
Eigen::Matrix<float, Dim, 1>::Zero();
if (center && !points.empty()) {
points_center = ComputeCenter<Dim>(points);
}
thrust::for_each(points.begin(), points.end(),
[=] __device__(Eigen::Matrix<float, Dim, 1> & pt) {
pt = (pt - points_center) * scale + points_center;
});
}
template void TranslatePoints<2>(const Eigen::Vector2f &translation,
utility::device_vector<Eigen::Vector2f> &points,
bool relative);
template void TranslatePoints<3>(const Eigen::Vector3f &translation,
utility::device_vector<Eigen::Vector3f> &points,
bool relative);
template void ScalePoints<2>(const float scale,
utility::device_vector<Eigen::Vector2f> &points,
bool center);
template void ScalePoints<3>(const float scale,
utility::device_vector<Eigen::Vector3f> &points,
bool center);
template <int Dim>
void RotatePoints(const Eigen::Matrix<float, Dim, Dim> &R,
utility::device_vector<Eigen::Matrix<float, Dim, 1>> &points,
bool center) {
RotatePoints<Dim>(0, R, points, center);
}
template <int Dim>
void RotatePoints(cudaStream_t stream,
const Eigen::Matrix<float, Dim, Dim> &R,
utility::device_vector<Eigen::Matrix<float, Dim, 1>> &points,
bool center) {
Eigen::Matrix<float, Dim, 1> points_center =
Eigen::Matrix<float, Dim, 1>::Zero();
if (center && !points.empty()) {
points_center = ComputeCenter<Dim>(points);
}
thrust::for_each(utility::exec_policy(stream)->on(stream), points.begin(),
points.end(),
[=] __device__(Eigen::Matrix<float, Dim, 1> & pt) {
pt = R * (pt - points_center) + points_center;
});
}
template void RotatePoints<2>(const Eigen::Matrix2f &R,
utility::device_vector<Eigen::Vector2f> &points,
bool center);
template void RotatePoints<3>(const Eigen::Matrix3f &R,
utility::device_vector<Eigen::Vector3f> &points,
bool center);
template void RotatePoints<2>(cudaStream_t stream,
const Eigen::Matrix2f &R,
utility::device_vector<Eigen::Vector2f> &points,
bool center);
template void RotatePoints<3>(cudaStream_t stream,
const Eigen::Matrix3f &R,
utility::device_vector<Eigen::Vector3f> &points,
bool center);
void RotateNormals(const Eigen::Matrix3f &R,
utility::device_vector<Eigen::Vector3f> &normals) {
RotateNormals(0, R, normals);
}
void RotateNormals(cudaStream_t stream,
const Eigen::Matrix3f &R,
utility::device_vector<Eigen::Vector3f> &normals) {
thrust::for_each(utility::exec_policy(stream)->on(stream), normals.begin(),
normals.end(), [=] __device__(Eigen::Vector3f & normal) {
normal = R * normal;
});
}
Eigen::Matrix3f GetRotationMatrixFromXYZ(const Eigen::Vector3f &rotation) {
return cupoch::utility::RotationMatrixX(rotation(0)) *
cupoch::utility::RotationMatrixY(rotation(1)) *
cupoch::utility::RotationMatrixZ(rotation(2));
}
Eigen::Matrix3f GetRotationMatrixFromYZX(const Eigen::Vector3f &rotation) {
return cupoch::utility::RotationMatrixY(rotation(0)) *
cupoch::utility::RotationMatrixZ(rotation(1)) *
cupoch::utility::RotationMatrixX(rotation(2));
}
Eigen::Matrix3f GetRotationMatrixFromZXY(const Eigen::Vector3f &rotation) {
return cupoch::utility::RotationMatrixZ(rotation(0)) *
cupoch::utility::RotationMatrixX(rotation(1)) *
cupoch::utility::RotationMatrixY(rotation(2));
}
Eigen::Matrix3f GetRotationMatrixFromXZY(const Eigen::Vector3f &rotation) {
return cupoch::utility::RotationMatrixX(rotation(0)) *
cupoch::utility::RotationMatrixZ(rotation(1)) *
cupoch::utility::RotationMatrixY(rotation(2));
}
Eigen::Matrix3f GetRotationMatrixFromZYX(const Eigen::Vector3f &rotation) {
return cupoch::utility::RotationMatrixZ(rotation(0)) *
cupoch::utility::RotationMatrixY(rotation(1)) *
cupoch::utility::RotationMatrixX(rotation(2));
}
Eigen::Matrix3f GetRotationMatrixFromYXZ(const Eigen::Vector3f &rotation) {
return cupoch::utility::RotationMatrixY(rotation(0)) *
cupoch::utility::RotationMatrixX(rotation(1)) *
cupoch::utility::RotationMatrixZ(rotation(2));
}
Eigen::Matrix3f GetRotationMatrixFromAxisAngle(
const Eigen::Vector3f &rotation) {
const float phi = rotation.norm();
return Eigen::AngleAxisf(phi, rotation / phi).toRotationMatrix();
}
Eigen::Matrix3f GetRotationMatrixFromQuaternion(
const Eigen::Vector4f &rotation) {
return Eigen::Quaternionf(rotation(0), rotation(1), rotation(2),
rotation(3))
.normalized()
.toRotationMatrix();
}
} // namespace geometry
} // namespace cupoch |
0b957ae9ba563647a70a199890e552968c2e5a82.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/******************************************************************************
* Copyright 2020 The Apollo Authors. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*****************************************************************************/
/*
* Copyright 2018-2019 Autoware Foundation. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// headers in STL
#include <iostream>
// headers in local files
#include "modules/perception/lidar/lib/detection/lidar_point_pillars/common.h"
#include "modules/perception/lidar/lib/detection/lidar_point_pillars/preprocess_points_cuda.h"
namespace apollo {
namespace perception {
namespace lidar {
__global__ void make_pillar_histo_kernel(
const float* dev_points, float* dev_pillar_point_feature_in_coors,
int* pillar_count_histo, const int num_points,
const int max_points_per_pillar, const int grid_x_size,
const int grid_y_size, const int grid_z_size, const float min_x_range,
const float min_y_range, const float min_z_range, const float pillar_x_size,
const float pillar_y_size, const float pillar_z_size,
const int num_point_feature) {
int th_i = threadIdx.x + blockIdx.x * blockDim.x;
if (th_i >= num_points) {
return;
}
int y_coor = floor((dev_points[th_i * num_point_feature + 1] - min_y_range) /
pillar_y_size);
int x_coor = floor((dev_points[th_i * num_point_feature + 0] - min_x_range) /
pillar_x_size);
int z_coor = floor((dev_points[th_i * num_point_feature + 2] - min_z_range) /
pillar_z_size);
if (x_coor >= 0 && x_coor < grid_x_size && y_coor >= 0 &&
y_coor < grid_y_size && z_coor >= 0 && z_coor < grid_z_size) {
int count =
atomicAdd(&pillar_count_histo[y_coor * grid_x_size + x_coor], 1);
if (count < max_points_per_pillar) {
int ind =
y_coor * grid_x_size * max_points_per_pillar * num_point_feature +
x_coor * max_points_per_pillar * num_point_feature +
count * num_point_feature;
for (int i = 0; i < num_point_feature; ++i) {
dev_pillar_point_feature_in_coors[ind + i] =
dev_points[th_i * num_point_feature + i];
}
}
}
}
__global__ void make_pillar_index_kernel(
int* dev_pillar_count_histo, int* dev_counter, int* dev_pillar_count,
int* dev_x_coors, int* dev_y_coors, float* dev_num_points_per_pillar,
int* dev_sparse_pillar_map, const int max_pillars,
const int max_points_per_pillar, const int grid_x_size,
const int num_inds_for_scan) {
int x = blockIdx.x;
int y = threadIdx.x;
int num_points_at_this_pillar = dev_pillar_count_histo[y * grid_x_size + x];
if (num_points_at_this_pillar == 0) {
return;
}
int count = atomicAdd(dev_counter, 1);
if (count < max_pillars) {
atomicAdd(dev_pillar_count, 1);
if (num_points_at_this_pillar >= max_points_per_pillar) {
dev_num_points_per_pillar[count] = max_points_per_pillar;
} else {
dev_num_points_per_pillar[count] = num_points_at_this_pillar;
}
dev_x_coors[count] = x;
dev_y_coors[count] = y;
dev_sparse_pillar_map[y * num_inds_for_scan + x] = 1;
}
}
__global__ void make_pillar_feature_kernel(
float* dev_pillar_point_feature_in_coors, float* dev_pillar_point_feature,
float* dev_pillar_coors, int* dev_x_coors, int* dev_y_coors,
float* dev_num_points_per_pillar, const int max_points,
const int num_point_feature, const int grid_x_size) {
int ith_pillar = blockIdx.x;
int num_points_at_this_pillar = dev_num_points_per_pillar[ith_pillar];
int ith_point = threadIdx.x;
if (ith_point >= num_points_at_this_pillar) {
return;
}
int x_ind = dev_x_coors[ith_pillar];
int y_ind = dev_y_coors[ith_pillar];
int pillar_ind = ith_pillar * max_points * num_point_feature +
ith_point * num_point_feature;
int coors_ind = y_ind * grid_x_size * max_points * num_point_feature +
x_ind * max_points * num_point_feature +
ith_point * num_point_feature;
for (int i = 0; i < num_point_feature; ++i) {
dev_pillar_point_feature[pillar_ind + i] =
dev_pillar_point_feature_in_coors[coors_ind + i];
}
float coor_x = static_cast<float>(x_ind);
float coor_y = static_cast<float>(y_ind);
dev_pillar_coors[ith_pillar * 4 + 0] = 0; // batch idx
dev_pillar_coors[ith_pillar * 4 + 1] = 0; // z
dev_pillar_coors[ith_pillar * 4 + 2] = coor_y;
dev_pillar_coors[ith_pillar * 4 + 3] = coor_x;
}
PreprocessPointsCuda::PreprocessPointsCuda(
const int num_threads, const int max_num_pillars,
const int max_points_per_pillar, const int num_point_feature,
const int num_inds_for_scan, const int grid_x_size, const int grid_y_size,
const int grid_z_size, const float pillar_x_size, const float pillar_y_size,
const float pillar_z_size, const float min_x_range, const float min_y_range,
const float min_z_range)
: num_threads_(num_threads),
max_num_pillars_(max_num_pillars),
max_num_points_per_pillar_(max_points_per_pillar),
num_point_feature_(num_point_feature),
num_inds_for_scan_(num_inds_for_scan),
grid_x_size_(grid_x_size),
grid_y_size_(grid_y_size),
grid_z_size_(grid_z_size),
pillar_x_size_(pillar_x_size),
pillar_y_size_(pillar_y_size),
pillar_z_size_(pillar_z_size),
min_x_range_(min_x_range),
min_y_range_(min_y_range),
min_z_range_(min_z_range) {
GPU_CHECK(
hipMalloc(reinterpret_cast<void**>(&dev_pillar_point_feature_in_coors_),
grid_y_size_ * grid_x_size_ * max_num_points_per_pillar_ *
num_point_feature_ * sizeof(float)));
GPU_CHECK(hipMalloc(reinterpret_cast<void**>(&dev_pillar_count_histo_),
grid_y_size_ * grid_x_size_ * sizeof(int)));
GPU_CHECK(hipMalloc(reinterpret_cast<void**>(&dev_counter_), sizeof(int)));
GPU_CHECK(
hipMalloc(reinterpret_cast<void**>(&dev_pillar_count_), sizeof(int)));
}
PreprocessPointsCuda::~PreprocessPointsCuda() {
GPU_CHECK(hipFree(dev_pillar_point_feature_in_coors_));
GPU_CHECK(hipFree(dev_pillar_count_histo_));
GPU_CHECK(hipFree(dev_counter_));
GPU_CHECK(hipFree(dev_pillar_count_));
}
void PreprocessPointsCuda::DoPreprocessPointsCuda(
const float* dev_points, const int in_num_points, int* dev_x_coors,
int* dev_y_coors, float* dev_num_points_per_pillar,
float* dev_pillar_point_feature, float* dev_pillar_coors,
int* dev_sparse_pillar_map, int* host_pillar_count) {
GPU_CHECK(hipMemset(dev_pillar_count_histo_, 0,
grid_y_size_ * grid_x_size_ * sizeof(int)));
GPU_CHECK(hipMemset(dev_counter_, 0, sizeof(int)));
GPU_CHECK(hipMemset(dev_pillar_count_, 0, sizeof(int)));
int num_block = DIVUP(in_num_points, num_threads_);
hipLaunchKernelGGL(( make_pillar_histo_kernel), dim3(num_block), dim3(num_threads_), 0, 0,
dev_points, dev_pillar_point_feature_in_coors_, dev_pillar_count_histo_,
in_num_points, max_num_points_per_pillar_, grid_x_size_, grid_y_size_,
grid_z_size_, min_x_range_, min_y_range_, min_z_range_, pillar_x_size_,
pillar_y_size_, pillar_z_size_, num_point_feature_);
hipLaunchKernelGGL(( make_pillar_index_kernel), dim3(grid_x_size_), dim3(grid_y_size_), 0, 0,
dev_pillar_count_histo_, dev_counter_, dev_pillar_count_, dev_x_coors,
dev_y_coors, dev_num_points_per_pillar, dev_sparse_pillar_map,
max_num_pillars_, max_num_points_per_pillar_, grid_x_size_,
num_inds_for_scan_);
GPU_CHECK(hipMemcpy(host_pillar_count, dev_pillar_count_, sizeof(int),
hipMemcpyDeviceToHost));
hipLaunchKernelGGL(( make_pillar_feature_kernel), dim3(host_pillar_count[0]),
dim3(max_num_points_per_pillar_), 0, 0,
dev_pillar_point_feature_in_coors_, dev_pillar_point_feature,
dev_pillar_coors, dev_x_coors, dev_y_coors, dev_num_points_per_pillar,
max_num_points_per_pillar_, num_point_feature_, grid_x_size_);
}
} // namespace lidar
} // namespace perception
} // namespace apollo
| 0b957ae9ba563647a70a199890e552968c2e5a82.cu | /******************************************************************************
* Copyright 2020 The Apollo Authors. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*****************************************************************************/
/*
* Copyright 2018-2019 Autoware Foundation. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// headers in STL
#include <iostream>
// headers in local files
#include "modules/perception/lidar/lib/detection/lidar_point_pillars/common.h"
#include "modules/perception/lidar/lib/detection/lidar_point_pillars/preprocess_points_cuda.h"
namespace apollo {
namespace perception {
namespace lidar {
__global__ void make_pillar_histo_kernel(
const float* dev_points, float* dev_pillar_point_feature_in_coors,
int* pillar_count_histo, const int num_points,
const int max_points_per_pillar, const int grid_x_size,
const int grid_y_size, const int grid_z_size, const float min_x_range,
const float min_y_range, const float min_z_range, const float pillar_x_size,
const float pillar_y_size, const float pillar_z_size,
const int num_point_feature) {
int th_i = threadIdx.x + blockIdx.x * blockDim.x;
if (th_i >= num_points) {
return;
}
int y_coor = floor((dev_points[th_i * num_point_feature + 1] - min_y_range) /
pillar_y_size);
int x_coor = floor((dev_points[th_i * num_point_feature + 0] - min_x_range) /
pillar_x_size);
int z_coor = floor((dev_points[th_i * num_point_feature + 2] - min_z_range) /
pillar_z_size);
if (x_coor >= 0 && x_coor < grid_x_size && y_coor >= 0 &&
y_coor < grid_y_size && z_coor >= 0 && z_coor < grid_z_size) {
int count =
atomicAdd(&pillar_count_histo[y_coor * grid_x_size + x_coor], 1);
if (count < max_points_per_pillar) {
int ind =
y_coor * grid_x_size * max_points_per_pillar * num_point_feature +
x_coor * max_points_per_pillar * num_point_feature +
count * num_point_feature;
for (int i = 0; i < num_point_feature; ++i) {
dev_pillar_point_feature_in_coors[ind + i] =
dev_points[th_i * num_point_feature + i];
}
}
}
}
__global__ void make_pillar_index_kernel(
int* dev_pillar_count_histo, int* dev_counter, int* dev_pillar_count,
int* dev_x_coors, int* dev_y_coors, float* dev_num_points_per_pillar,
int* dev_sparse_pillar_map, const int max_pillars,
const int max_points_per_pillar, const int grid_x_size,
const int num_inds_for_scan) {
int x = blockIdx.x;
int y = threadIdx.x;
int num_points_at_this_pillar = dev_pillar_count_histo[y * grid_x_size + x];
if (num_points_at_this_pillar == 0) {
return;
}
int count = atomicAdd(dev_counter, 1);
if (count < max_pillars) {
atomicAdd(dev_pillar_count, 1);
if (num_points_at_this_pillar >= max_points_per_pillar) {
dev_num_points_per_pillar[count] = max_points_per_pillar;
} else {
dev_num_points_per_pillar[count] = num_points_at_this_pillar;
}
dev_x_coors[count] = x;
dev_y_coors[count] = y;
dev_sparse_pillar_map[y * num_inds_for_scan + x] = 1;
}
}
__global__ void make_pillar_feature_kernel(
float* dev_pillar_point_feature_in_coors, float* dev_pillar_point_feature,
float* dev_pillar_coors, int* dev_x_coors, int* dev_y_coors,
float* dev_num_points_per_pillar, const int max_points,
const int num_point_feature, const int grid_x_size) {
int ith_pillar = blockIdx.x;
int num_points_at_this_pillar = dev_num_points_per_pillar[ith_pillar];
int ith_point = threadIdx.x;
if (ith_point >= num_points_at_this_pillar) {
return;
}
int x_ind = dev_x_coors[ith_pillar];
int y_ind = dev_y_coors[ith_pillar];
int pillar_ind = ith_pillar * max_points * num_point_feature +
ith_point * num_point_feature;
int coors_ind = y_ind * grid_x_size * max_points * num_point_feature +
x_ind * max_points * num_point_feature +
ith_point * num_point_feature;
for (int i = 0; i < num_point_feature; ++i) {
dev_pillar_point_feature[pillar_ind + i] =
dev_pillar_point_feature_in_coors[coors_ind + i];
}
float coor_x = static_cast<float>(x_ind);
float coor_y = static_cast<float>(y_ind);
dev_pillar_coors[ith_pillar * 4 + 0] = 0; // batch idx
dev_pillar_coors[ith_pillar * 4 + 1] = 0; // z
dev_pillar_coors[ith_pillar * 4 + 2] = coor_y;
dev_pillar_coors[ith_pillar * 4 + 3] = coor_x;
}
PreprocessPointsCuda::PreprocessPointsCuda(
const int num_threads, const int max_num_pillars,
const int max_points_per_pillar, const int num_point_feature,
const int num_inds_for_scan, const int grid_x_size, const int grid_y_size,
const int grid_z_size, const float pillar_x_size, const float pillar_y_size,
const float pillar_z_size, const float min_x_range, const float min_y_range,
const float min_z_range)
: num_threads_(num_threads),
max_num_pillars_(max_num_pillars),
max_num_points_per_pillar_(max_points_per_pillar),
num_point_feature_(num_point_feature),
num_inds_for_scan_(num_inds_for_scan),
grid_x_size_(grid_x_size),
grid_y_size_(grid_y_size),
grid_z_size_(grid_z_size),
pillar_x_size_(pillar_x_size),
pillar_y_size_(pillar_y_size),
pillar_z_size_(pillar_z_size),
min_x_range_(min_x_range),
min_y_range_(min_y_range),
min_z_range_(min_z_range) {
GPU_CHECK(
cudaMalloc(reinterpret_cast<void**>(&dev_pillar_point_feature_in_coors_),
grid_y_size_ * grid_x_size_ * max_num_points_per_pillar_ *
num_point_feature_ * sizeof(float)));
GPU_CHECK(cudaMalloc(reinterpret_cast<void**>(&dev_pillar_count_histo_),
grid_y_size_ * grid_x_size_ * sizeof(int)));
GPU_CHECK(cudaMalloc(reinterpret_cast<void**>(&dev_counter_), sizeof(int)));
GPU_CHECK(
cudaMalloc(reinterpret_cast<void**>(&dev_pillar_count_), sizeof(int)));
}
PreprocessPointsCuda::~PreprocessPointsCuda() {
GPU_CHECK(cudaFree(dev_pillar_point_feature_in_coors_));
GPU_CHECK(cudaFree(dev_pillar_count_histo_));
GPU_CHECK(cudaFree(dev_counter_));
GPU_CHECK(cudaFree(dev_pillar_count_));
}
void PreprocessPointsCuda::DoPreprocessPointsCuda(
const float* dev_points, const int in_num_points, int* dev_x_coors,
int* dev_y_coors, float* dev_num_points_per_pillar,
float* dev_pillar_point_feature, float* dev_pillar_coors,
int* dev_sparse_pillar_map, int* host_pillar_count) {
GPU_CHECK(cudaMemset(dev_pillar_count_histo_, 0,
grid_y_size_ * grid_x_size_ * sizeof(int)));
GPU_CHECK(cudaMemset(dev_counter_, 0, sizeof(int)));
GPU_CHECK(cudaMemset(dev_pillar_count_, 0, sizeof(int)));
int num_block = DIVUP(in_num_points, num_threads_);
make_pillar_histo_kernel<<<num_block, num_threads_>>>(
dev_points, dev_pillar_point_feature_in_coors_, dev_pillar_count_histo_,
in_num_points, max_num_points_per_pillar_, grid_x_size_, grid_y_size_,
grid_z_size_, min_x_range_, min_y_range_, min_z_range_, pillar_x_size_,
pillar_y_size_, pillar_z_size_, num_point_feature_);
make_pillar_index_kernel<<<grid_x_size_, grid_y_size_>>>(
dev_pillar_count_histo_, dev_counter_, dev_pillar_count_, dev_x_coors,
dev_y_coors, dev_num_points_per_pillar, dev_sparse_pillar_map,
max_num_pillars_, max_num_points_per_pillar_, grid_x_size_,
num_inds_for_scan_);
GPU_CHECK(cudaMemcpy(host_pillar_count, dev_pillar_count_, sizeof(int),
cudaMemcpyDeviceToHost));
make_pillar_feature_kernel<<<host_pillar_count[0],
max_num_points_per_pillar_>>>(
dev_pillar_point_feature_in_coors_, dev_pillar_point_feature,
dev_pillar_coors, dev_x_coors, dev_y_coors, dev_num_points_per_pillar,
max_num_points_per_pillar_, num_point_feature_, grid_x_size_);
}
} // namespace lidar
} // namespace perception
} // namespace apollo
|
dacec50c85bf18760290f2c2267d1852a611acda.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include "someDefinitions.h"
#include <hip/hip_runtime.h>
#include <stdlib.h>
// Error handling macro
#define CUDA_CHECK(call) \
if((call) != hipSuccess) { \
hipError_t err = hipGetLastError(); \
printf("CUDA error calling, code is %d\n", err);}
__global__ void matrixMulKernel(double *d_a, double *d_b, double *d_c, int height, int width_a, int width_b) {
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
if (row < height && col < width_b) {
double p_result = 0;
for (int k = 0; k < width_a; k++) {
p_result += d_a[row * width_a + k] * d_b[k * width_b + col];
}
d_c[row * width_b + col] = p_result;
}
}
void cudaCall(int f1, int c1, int f2, double* M1, double* M2, double* M3){
double *d_M, *d_N, *d_P;
hipMalloc(&d_M, sizeof(double) * f1 * c1);
hipMalloc(&d_N, sizeof(double) * c1 * f2);
hipMalloc(&d_P, sizeof(double) * f1 * f2);
hipError_t error = hipSuccess;
error = hipMemcpy(d_M, M1, f1 * c1 * sizeof(double), hipMemcpyHostToDevice);
if(error != hipSuccess){
printf("Error copiando datos a d_M");
exit(0);
}
error = hipMemcpy(d_N, M2, c1 * f2 * sizeof(double), hipMemcpyHostToDevice);
if(error != hipSuccess){
printf("Error copiando datos a d_N");
exit(0);
}
int blockSize = 32;
dim3 dimBlock(blockSize,blockSize,1);
dim3 dimGrid(ceil(f1/double(blockSize)),ceil(f2/double(blockSize)),1);
hipLaunchKernelGGL(( matrixMulKernel), dim3(dimGrid),dim3(dimBlock), 0, 0, d_M,d_N,d_P, f1, c1, f2);
hipDeviceSynchronize();
hipMemcpy(M3,d_P, f1 * f2 * sizeof(double),hipMemcpyDeviceToHost);
hipFree(d_M);
hipFree(d_N);
hipFree(d_P);
//Fin multiplicacion con GPU
}
| dacec50c85bf18760290f2c2267d1852a611acda.cu | #include <stdio.h>
#include <stdlib.h>
#include "someDefinitions.h"
#include <cuda.h>
#include <stdlib.h>
// Error handling macro
#define CUDA_CHECK(call) \
if((call) != cudaSuccess) { \
cudaError_t err = cudaGetLastError(); \
printf("CUDA error calling, code is %d\n", err);}
__global__ void matrixMulKernel(double *d_a, double *d_b, double *d_c, int height, int width_a, int width_b) {
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
if (row < height && col < width_b) {
double p_result = 0;
for (int k = 0; k < width_a; k++) {
p_result += d_a[row * width_a + k] * d_b[k * width_b + col];
}
d_c[row * width_b + col] = p_result;
}
}
void cudaCall(int f1, int c1, int f2, double* M1, double* M2, double* M3){
double *d_M, *d_N, *d_P;
cudaMalloc(&d_M, sizeof(double) * f1 * c1);
cudaMalloc(&d_N, sizeof(double) * c1 * f2);
cudaMalloc(&d_P, sizeof(double) * f1 * f2);
cudaError_t error = cudaSuccess;
error = cudaMemcpy(d_M, M1, f1 * c1 * sizeof(double), cudaMemcpyHostToDevice);
if(error != cudaSuccess){
printf("Error copiando datos a d_M");
exit(0);
}
error = cudaMemcpy(d_N, M2, c1 * f2 * sizeof(double), cudaMemcpyHostToDevice);
if(error != cudaSuccess){
printf("Error copiando datos a d_N");
exit(0);
}
int blockSize = 32;
dim3 dimBlock(blockSize,blockSize,1);
dim3 dimGrid(ceil(f1/double(blockSize)),ceil(f2/double(blockSize)),1);
matrixMulKernel<<<dimGrid,dimBlock>>>(d_M,d_N,d_P, f1, c1, f2);
cudaDeviceSynchronize();
cudaMemcpy(M3,d_P, f1 * f2 * sizeof(double),cudaMemcpyDeviceToHost);
cudaFree(d_M);
cudaFree(d_N);
cudaFree(d_P);
//Fin multiplicacion con GPU
}
|
57da24b9724132ccecc880dbeba61e499895e753.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) "2019, by Stanford University
// Developer: Mario Di Renzo
// Affiliation: Center for Turbulence Research, Stanford University
// URL: https://ctr.stanford.edu
// Citation: Di Renzo, M., Lin, F., and Urzay, J. (2020).
// HTR solver: An open-source exascale-oriented task-based
// multi-GPU high-order code for hypersonic aerothermodynamics.
// Computer Physics Communications 255, 107262"
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE FOR ANY
// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "prometeo_bc.hpp"
#include "cuda_utils.hpp"
// Declare a constant memory that will hold the Mixture struct (initialized in prometeo_mixture.cu)
extern __device__ __constant__ Mix mix;
//-----------------------------------------------------------------------------
// KERNELS FOR AddRecycleAverageTask
//-----------------------------------------------------------------------------
__global__
void AddRecycleAverageTask_kernel(const AccessorRO<double, 3> dcsi_d,
const AccessorRO<double, 3> deta_d,
const AccessorRO<double, 3> dzet_d,
const AccessorRO<VecNSp, 3> MolarFracs_profile,
const AccessorRO<double, 3> temperature_profile,
const AccessorRO< Vec3, 3> velocity_profile,
const AccessorSumRD<VecNSp, 1> avg_MolarFracs,
const AccessorSumRD< Vec3, 1> avg_velocity,
const AccessorSumRD<double, 1> avg_temperature,
const AccessorSumRD<double, 1> avg_rho,
const double Pbc,
const Rect<3> my_bounds,
const coord_t size_x,
const coord_t size_y,
const coord_t size_z)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int z = blockIdx.z * blockDim.z + threadIdx.z;
if ((x < size_x) && (y < size_y) && (z < size_z)) {
const Point<3> p = Point<3>(x + my_bounds.lo.x,
y + my_bounds.lo.y,
z + my_bounds.lo.z);
AddRecycleAverageTask::collectAverages(dcsi_d, deta_d, dzet_d,
MolarFracs_profile, temperature_profile, velocity_profile,
avg_MolarFracs, avg_velocity, avg_temperature,
avg_rho, Pbc, p, mix);
}
}
__host__
void AddRecycleAverageTask::gpu_base_impl(
const Args &args,
const std::vector<PhysicalRegion> ®ions,
const std::vector<Future> &futures,
Context ctx, Runtime *runtime)
{
assert(regions.size() == 4);
assert(futures.size() == 0);
// Accessors for metrics
const AccessorRO<double, 3> acc_dcsi_d (regions[0], FID_dcsi_d);
const AccessorRO<double, 3> acc_deta_d (regions[0], FID_deta_d);
const AccessorRO<double, 3> acc_dzet_d (regions[0], FID_dzet_d);
// Accessors for profile variables
const AccessorRO<VecNSp, 3> acc_MolarFracs_profile (regions[0], FID_MolarFracs_profile);
const AccessorRO<double, 3> acc_temperature_profile (regions[0], FID_temperature_profile);
const AccessorRO< Vec3, 3> acc_velocity_profile (regions[0], FID_velocity_profile);
// Accessors for averages
const AccessorSumRD<double, 1> acc_avg_rho (regions[1], RA_FID_rho, LEGION_REDOP_SUM_FLOAT64);
const AccessorSumRD<double, 1> acc_avg_temperature (regions[1], RA_FID_temperature, LEGION_REDOP_SUM_FLOAT64);
const AccessorSumRD<VecNSp, 1> acc_avg_MolarFracs (regions[2], RA_FID_MolarFracs, REGENT_REDOP_SUM_VECNSP);
const AccessorSumRD< Vec3, 1> acc_avg_velocity (regions[3], RA_FID_velocity, REGENT_REDOP_SUM_VEC3);
// Extract execution domain
const Rect<3> r_plane = runtime->get_index_space_domain(ctx,
regions[0].get_logical_region().get_index_space());
// Launch the kernel
const int threads_per_block = 256;
const dim3 TPB_3d = splitThreadsPerBlock<Xdir>(threads_per_block, r_plane);
const dim3 num_blocks_3d = dim3((getSize<Xdir>(r_plane) + (TPB_3d.x - 1)) / TPB_3d.x,
(getSize<Ydir>(r_plane) + (TPB_3d.y - 1)) / TPB_3d.y,
(getSize<Zdir>(r_plane) + (TPB_3d.z - 1)) / TPB_3d.z);
hipLaunchKernelGGL(( AddRecycleAverageTask_kernel), dim3(num_blocks_3d), dim3(TPB_3d), 0, 0, acc_dcsi_d, acc_deta_d, acc_dzet_d,
acc_MolarFracs_profile, acc_temperature_profile, acc_velocity_profile,
acc_avg_MolarFracs, acc_avg_velocity, acc_avg_temperature, acc_avg_rho,
args.Pbc, r_plane,
getSize<Xdir>(r_plane), getSize<Ydir>(r_plane), getSize<Zdir>(r_plane));
}
//-----------------------------------------------------------------------------
// KERNELS FOR SetNSCBC_InflowBC
//-----------------------------------------------------------------------------
template<direction dir>
__global__
void SetNSCBC_InflowBC_kernel(const AccessorRO<VecNEq, 3> Conserved,
const AccessorRO<double, 3> SoS,
const AccessorRO<VecNSp, 3> MolarFracs_profile,
const AccessorRO<double, 3> temperature_profile,
const AccessorRO< Vec3, 3> velocity_profile,
const AccessorWO<double, 3> pressure,
const AccessorWO<double, 3> temperature,
const AccessorWO<VecNSp, 3> MolarFracs,
const AccessorWO< Vec3, 3> velocity,
const double Pbc,
const Rect<3> my_bounds,
const coord_t size_x,
const coord_t size_y,
const coord_t size_z)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int z = blockIdx.z * blockDim.z + threadIdx.z;
// Index of normal direction
constexpr int iN = normalIndex(dir);
if ((x < size_x) && (y < size_y) && (z < size_z)) {
const Point<3> p = Point<3>(x + my_bounds.lo.x,
y + my_bounds.lo.y,
z + my_bounds.lo.z);
MolarFracs[p] = MolarFracs_profile[p];
temperature[p] = temperature_profile[p];
velocity[p] = velocity_profile[p];
if (fabs(velocity_profile[p][iN]) >= SoS[p])
// It is supersonic, everything is imposed by the BC
pressure[p] = Pbc;
else
// Compute pressure from NSCBC conservation equations
SetNSCBC_InflowBCTask<dir>::setInflowPressure(
Conserved, MolarFracs_profile, temperature_profile,
pressure, p, mix);
}
}
template<direction dir>
__host__
void SetNSCBC_InflowBCTask<dir>::gpu_base_impl(
const Args &args,
const std::vector<PhysicalRegion> ®ions,
const std::vector<Future> &futures,
Context ctx, Runtime *runtime)
{
assert(regions.size() == 2);
assert(futures.size() == 0);
// Accessor for conserved variables
const AccessorRO<VecNEq, 3> acc_Conserved (regions[0], FID_Conserved);
// Accessor for speed of sound
const AccessorRO<double, 3> acc_SoS (regions[0], FID_SoS);
// Accessors for profile variables
const AccessorRO<VecNSp, 3> acc_MolarFracs_profile (regions[0], FID_MolarFracs_profile);
const AccessorRO<double, 3> acc_temperature_profile (regions[0], FID_temperature_profile);
const AccessorRO< Vec3, 3> acc_velocity_profile (regions[0], FID_velocity_profile);
// Accessors for primitive variables
const AccessorWO<double, 3> acc_pressure (regions[1], FID_pressure);
const AccessorWO<double, 3> acc_temperature (regions[1], FID_temperature);
const AccessorWO<VecNSp, 3> acc_MolarFracs (regions[1], FID_MolarFracs);
const AccessorWO< Vec3, 3> acc_velocity (regions[1], FID_velocity);
// Extract execution domain
const Rect<3> r_BC = runtime->get_index_space_domain(ctx,
regions[1].get_logical_region().get_index_space());
// Launch the kernel
const int threads_per_block = 256;
const dim3 TPB_3d = splitThreadsPerBlock<Xdir>(threads_per_block, r_BC);
const dim3 num_blocks_3d = dim3((getSize<Xdir>(r_BC) + (TPB_3d.x - 1)) / TPB_3d.x,
(getSize<Ydir>(r_BC) + (TPB_3d.y - 1)) / TPB_3d.y,
(getSize<Zdir>(r_BC) + (TPB_3d.z - 1)) / TPB_3d.z);
hipLaunchKernelGGL(( SetNSCBC_InflowBC_kernel<dir>), dim3(num_blocks_3d), dim3(TPB_3d), 0, 0, acc_Conserved, acc_SoS,
acc_MolarFracs_profile, acc_temperature_profile, acc_velocity_profile,
acc_pressure, acc_temperature, acc_MolarFracs, acc_velocity,
args.Pbc, r_BC,
getSize<Xdir>(r_BC), getSize<Ydir>(r_BC), getSize<Zdir>(r_BC));
}
template void SetNSCBC_InflowBCTask<Xdir>::gpu_base_impl(
const Args &args,
const std::vector<PhysicalRegion> ®ions,
const std::vector<Future> &futures,
Context ctx, Runtime *runtime);
template void SetNSCBC_InflowBCTask<Ydir>::gpu_base_impl(
const Args &args,
const std::vector<PhysicalRegion> ®ions,
const std::vector<Future> &futures,
Context ctx, Runtime *runtime);
template void SetNSCBC_InflowBCTask<Zdir>::gpu_base_impl(
const Args &args,
const std::vector<PhysicalRegion> ®ions,
const std::vector<Future> &futures,
Context ctx, Runtime *runtime);
//-----------------------------------------------------------------------------
// KERNELS FOR SetNSCBC_OutflowBC
//-----------------------------------------------------------------------------
__global__
void SetNSCBC_OutflowBC_kernel(const AccessorRO<VecNEq, 3> Conserved,
const AccessorRW<double, 3> temperature,
const AccessorWO<double, 3> pressure,
const AccessorWO<VecNSp, 3> MolarFracs,
const AccessorWO< Vec3, 3> velocity,
const Rect<3> my_bounds,
const coord_t size_x,
const coord_t size_y,
const coord_t size_z)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int z = blockIdx.z * blockDim.z + threadIdx.z;
if ((x < size_x) && (y < size_y) && (z < size_z)) {
const Point<3> p = Point<3>(x + my_bounds.lo.x,
y + my_bounds.lo.y,
z + my_bounds.lo.z);
UpdatePrimitiveFromConservedTask::UpdatePrimitive(
Conserved, temperature, pressure,
MolarFracs, velocity,
p, mix);
}
}
__host__
void SetNSCBC_OutflowBCTask::gpu_base_impl(
const Args &args,
const std::vector<PhysicalRegion> ®ions,
const std::vector<Future> &futures,
Context ctx, Runtime *runtime)
{
assert(regions.size() == 2);
assert(futures.size() == 0);
// Accessors for conserved variables
const AccessorRO<VecNEq, 3> acc_Conserved (regions[0], FID_Conserved);
// Accessors for temperature
const AccessorRW<double, 3> acc_temperature (regions[1], FID_temperature);
// Accessors for primitive variables
const AccessorWO<double, 3> acc_pressure (regions[1], FID_pressure);
const AccessorWO<VecNSp, 3> acc_MolarFracs (regions[1], FID_MolarFracs);
const AccessorWO< Vec3, 3> acc_velocity (regions[1], FID_velocity);
// Extract execution domain
const Rect<3> r_BC = runtime->get_index_space_domain(ctx,
regions[1].get_logical_region().get_index_space());
// Launch the kernel
const int threads_per_block = 256;
const dim3 TPB_3d = splitThreadsPerBlock<Xdir>(threads_per_block, r_BC);
const dim3 num_blocks_3d = dim3((getSize<Xdir>(r_BC) + (TPB_3d.x - 1)) / TPB_3d.x,
(getSize<Ydir>(r_BC) + (TPB_3d.y - 1)) / TPB_3d.y,
(getSize<Zdir>(r_BC) + (TPB_3d.z - 1)) / TPB_3d.z);
hipLaunchKernelGGL(( SetNSCBC_OutflowBC_kernel), dim3(num_blocks_3d), dim3(TPB_3d), 0, 0,
acc_Conserved, acc_temperature, acc_pressure,
acc_MolarFracs, acc_velocity, r_BC,
getSize<Xdir>(r_BC), getSize<Ydir>(r_BC), getSize<Zdir>(r_BC));
}
//-----------------------------------------------------------------------------
// KERNELS FOR SetIncomingShockBCTask
//-----------------------------------------------------------------------------
__global__
void SetIncomingShockBC_kernel(const AccessorRO<VecNEq, 3> Conserved,
const AccessorRW<double, 3> temperature,
const AccessorWO<double, 3> pressure,
const AccessorWO<VecNSp, 3> MolarFracs,
const AccessorWO< Vec3, 3> velocity,
const Vec3 velocity0,
const double temperature0,
const double pressure0,
const Vec3 velocity1,
const double temperature1,
const double pressure1,
const VecNSp MolarFracs0,
const int iShock,
const Rect<3> my_bounds,
const coord_t size_x,
const coord_t size_y,
const coord_t size_z)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int z = blockIdx.z * blockDim.z + threadIdx.z;
if ((x < size_x) && (y < size_y) && (z < size_z)) {
const Point<3> p = Point<3>(x + my_bounds.lo.x,
y + my_bounds.lo.y,
z + my_bounds.lo.z);
if ((p.x < iShock - 1) or
(p.x > iShock + 1)) {
// Threat as an outflow
UpdatePrimitiveFromConservedTask::UpdatePrimitive(
Conserved, temperature, pressure,
MolarFracs, velocity,
p, mix);
// Inject the shock over four points
} else if (p.x == iShock - 1) {
MolarFracs[p] = MolarFracs0;
velocity[p] = 0.75*velocity0 + 0.25*velocity1;
temperature[p] = 0.75*temperature0 + 0.25*temperature1;
pressure[p] = 0.75*pressure0 + 0.25*pressure1;
} else if (p.x == iShock) {
MolarFracs[p] = MolarFracs0;
velocity[p] = 0.25*velocity0 + 0.75*velocity1;
temperature[p] = 0.25*temperature0 + 0.75*temperature1;
pressure[p] = 0.25*pressure0 + 0.75*pressure1;
} else if (p.x == iShock + 1) {
MolarFracs[p] = MolarFracs0;
velocity[p] = velocity1;
temperature[p] = temperature1;
pressure[p] = pressure1;
}
}
}
__host__
void SetIncomingShockBCTask::gpu_base_impl(
const Args &args,
const std::vector<PhysicalRegion> ®ions,
const std::vector<Future> &futures,
Context ctx, Runtime *runtime)
{
assert(regions.size() == 2);
assert(futures.size() == 0);
// Accessor for conserved variables
const AccessorRO<VecNEq, 3> acc_Conserved (regions[0], FID_Conserved);
// Accessors for temperature
const AccessorRW<double, 3> acc_temperature (regions[1], FID_temperature);
// Accessors for primitive variables
const AccessorWO<double, 3> acc_pressure (regions[1], FID_pressure);
const AccessorWO<VecNSp, 3> acc_MolarFracs (regions[1], FID_MolarFracs);
const AccessorWO< Vec3, 3> acc_velocity (regions[1], FID_velocity);
// Extract execution domain
const Rect<3> r_BC = runtime->get_index_space_domain(ctx,
regions[1].get_logical_region().get_index_space());
// Launch the kernel
const int threads_per_block = 256;
const dim3 TPB_3d = splitThreadsPerBlock<Xdir>(threads_per_block, r_BC);
const dim3 num_blocks_3d = dim3((getSize<Xdir>(r_BC) + (TPB_3d.x - 1)) / TPB_3d.x,
(getSize<Ydir>(r_BC) + (TPB_3d.y - 1)) / TPB_3d.y,
(getSize<Zdir>(r_BC) + (TPB_3d.z - 1)) / TPB_3d.z);
hipLaunchKernelGGL(( SetIncomingShockBC_kernel), dim3(num_blocks_3d), dim3(TPB_3d), 0, 0,
acc_Conserved,
acc_temperature, acc_pressure, acc_MolarFracs, acc_velocity,
Vec3(args.params.velocity0), args.params.temperature0, args.params.pressure0,
Vec3(args.params.velocity1), args.params.temperature1, args.params.pressure1,
VecNSp(args.params.MolarFracs), args.params.iShock,
r_BC, getSize<Xdir>(r_BC), getSize<Ydir>(r_BC), getSize<Zdir>(r_BC));
}
//-----------------------------------------------------------------------------
// KERNELS FOR SetRecycleRescalingBCTask
//-----------------------------------------------------------------------------
// Workaroud for Legion issue #879
struct SetRecycleRescalingBC_kernelArgs {
const AccessorRO< Vec3, 3> centerCoordinates;
const AccessorRO<VecNEq, 3> Conserved;
const AccessorRO<double, 3> SoS;
const AccessorWO<double, 3> temperature;
const AccessorWO<double, 3> pressure;
const AccessorWO<VecNSp, 3> MolarFracs;
const AccessorWO< Vec3, 3> velocity;
const AccessorRO<double, 3> temperature_recycle;
const AccessorRO< Vec3, 3> velocity_recycle;
const AccessorRO<VecNSp, 3> MolarFracs_recycle;
const AccessorRO<double, 3> temperature_profile;
const AccessorRO< Vec3, 3> velocity_profile;
const AccessorRO<VecNSp, 3> MolarFracs_profile;
const AccessorRO<double, 1> avg_y;
const AccessorRO< float, 1> FI_xloc;
const AccessorRO< float, 1> FI_iloc;
};
__global__
#ifdef LEGION_BOUNDS_CHECKS
void SetRecycleRescalingBC_kernel(const DeferredBuffer<SetRecycleRescalingBC_kernelArgs, 1> buffer,
const FastInterpData FIdata,
const double Pbc,
const double yInnFact,
const double yOutFact,
const double uInnFact,
const double uOutFact,
const double idelta99Inl,
const Rect<3> my_bounds,
const coord_t size_x,
const coord_t size_y,
const coord_t size_z)
#else
void SetRecycleRescalingBC_kernel(const SetRecycleRescalingBC_kernelArgs a,
const FastInterpData FIdata,
const double Pbc,
const double yInnFact,
const double yOutFact,
const double uInnFact,
const double uOutFact,
const double idelta99Inl,
const Rect<3> my_bounds,
const coord_t size_x,
const coord_t size_y,
const coord_t size_z)
#endif
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int z = blockIdx.z * blockDim.z + threadIdx.z;
#ifdef LEGION_BOUNDS_CHECKS
SetRecycleRescalingBC_kernelArgs a = buffer[0];
#endif
if ((x < size_x) && (y < size_y) && (z < size_z)) {
const Point<3> p = Point<3>(x + my_bounds.lo.x,
y + my_bounds.lo.y,
z + my_bounds.lo.z);
// Compute the rescaled primitive quantities
double temperatureR; Vec3 velocityR; VecNSp MolarFracsR;
SetRecycleRescalingBCTask::GetRescaled(
temperatureR, velocityR, MolarFracsR, a.centerCoordinates,
a.temperature_recycle, a.velocity_recycle, a.MolarFracs_recycle,
a.temperature_profile, a.velocity_profile, a.MolarFracs_profile,
a.avg_y, a.FI_xloc, a.FI_iloc, FIdata, p,
yInnFact, yOutFact, uInnFact, uOutFact, idelta99Inl);
a.MolarFracs[p] = MolarFracsR;
a.temperature[p] = temperatureR;
a.velocity[p] = velocityR;
if (fabs(velocityR[0]) >= a.SoS[p])
// It is supersonic, everything is imposed by the BC
a.pressure[p] = Pbc;
else
// Compute pressure from NSCBC conservation equations
a.pressure[p] = SetRecycleRescalingBCTask::setPressure(a.Conserved, temperatureR, MolarFracsR, p, mix);
}
}
__host__
void SetRecycleRescalingBCTask::gpu_base_impl(
const Args &args,
const std::vector<PhysicalRegion> ®ions,
const std::vector<Future> &futures,
Context ctx, Runtime *runtime)
{
assert(regions.size() == 5);
assert(futures.size() == 1);
// Accessor for speed of sound
const AccessorRO< Vec3, 3> acc_centerCoordinates (regions[0], FID_centerCoordinates);
// Accessor for conserved variables
const AccessorRO<VecNEq, 3> acc_Conserved (regions[0], FID_Conserved);
// Accessor for speed of sound
const AccessorRO<double, 3> acc_SoS (regions[0], FID_SoS);
// Accessors for profile variables
const AccessorRO<VecNSp, 3> acc_MolarFracs_profile (regions[0], FID_MolarFracs_profile);
const AccessorRO<double, 3> acc_temperature_profile (regions[0], FID_temperature_profile);
const AccessorRO< Vec3, 3> acc_velocity_profile (regions[0], FID_velocity_profile);
// Accessors for primitive variables
const AccessorWO<double, 3> acc_pressure (regions[1], FID_pressure);
const AccessorWO<double, 3> acc_temperature (regions[1], FID_temperature);
const AccessorWO<VecNSp, 3> acc_MolarFracs (regions[1], FID_MolarFracs);
const AccessorWO< Vec3, 3> acc_velocity (regions[1], FID_velocity);
// Accessors for avg wall-normal coordinate
const AccessorRO<double, 1> acc_avg_y (regions[2], RA_FID_y);
// Accessors for recycle plane variables
const AccessorRO<VecNSp, 3> acc_MolarFracs_recycle (regions[3], FID_MolarFracs_recycle);
const AccessorRO<double, 3> acc_temperature_recycle (regions[3], FID_temperature_recycle);
const AccessorRO< Vec3, 3> acc_velocity_recycle (regions[3], FID_velocity_recycle);
// Accessors for fast interpolation region
const AccessorRO< float, 1> acc_FI_xloc (regions[4], FI_FID_xloc);
const AccessorRO< float, 1> acc_FI_iloc (regions[4], FI_FID_iloc);
// Extract execution domain
const Rect<3> r_BC = runtime->get_index_space_domain(ctx,
regions[1].get_logical_region().get_index_space());
// Compute rescaling coefficients
const RescalingDataType RdataRe = futures[0].get_result<RescalingDataType>();
const double yInnFact = RdataRe.deltaNu /args.RdataIn.deltaNu;
const double yOutFact = RdataRe.delta99VD/args.RdataIn.delta99VD;
const double uInnFact = args.RdataIn.uTau/RdataRe.uTau;
const double uOutFact = uInnFact*sqrt(args.RdataIn.rhow/RdataRe.rhow);
const double idelta99Inl = 1.0/args.RdataIn.delta99VD;
// Launch the kernel
const int threads_per_block = 256;
const dim3 TPB_3d = splitThreadsPerBlock<Xdir>(threads_per_block, r_BC);
const dim3 num_blocks_3d = dim3((getSize<Xdir>(r_BC) + (TPB_3d.x - 1)) / TPB_3d.x,
(getSize<Ydir>(r_BC) + (TPB_3d.y - 1)) / TPB_3d.y,
(getSize<Zdir>(r_BC) + (TPB_3d.z - 1)) / TPB_3d.z);
struct SetRecycleRescalingBC_kernelArgs kArgs = {
.centerCoordinates = acc_centerCoordinates,
.Conserved = acc_Conserved,
.SoS = acc_SoS,
.temperature = acc_temperature,
.pressure = acc_pressure,
.MolarFracs = acc_MolarFracs,
.velocity = acc_velocity,
.temperature_recycle = acc_temperature_recycle,
.velocity_recycle = acc_velocity_recycle,
.MolarFracs_recycle = acc_MolarFracs_recycle,
.temperature_profile = acc_temperature_profile,
.velocity_profile = acc_velocity_profile,
.MolarFracs_profile = acc_MolarFracs_profile,
.avg_y = acc_avg_y,
.FI_xloc = acc_FI_xloc,
.FI_iloc = acc_FI_iloc,
};
#ifdef LEGION_BOUNDS_CHECKS
DeferredBuffer<SetRecycleRescalingBC_kernelArgs, 1>
buffer(Rect<1>(Point<1>(0), Point<1>(1)), Memory::Z_COPY_MEM, &kArgs);
hipLaunchKernelGGL(( SetRecycleRescalingBC_kernel), dim3(num_blocks_3d), dim3(TPB_3d), 0, 0,
buffer,
args.FIdata, args.Pbc,
yInnFact, yOutFact, uInnFact, uOutFact, idelta99Inl,
r_BC, getSize<Xdir>(r_BC), getSize<Ydir>(r_BC), getSize<Zdir>(r_BC));
#else
hipLaunchKernelGGL(( SetRecycleRescalingBC_kernel), dim3(num_blocks_3d), dim3(TPB_3d), 0, 0,
kArgs,
args.FIdata, args.Pbc,
yInnFact, yOutFact, uInnFact, uOutFact, idelta99Inl,
r_BC, getSize<Xdir>(r_BC), getSize<Ydir>(r_BC), getSize<Zdir>(r_BC));
#endif
}
#if (defined(ELECTRIC_FIELD) && (nIons > 0))
//-----------------------------------------------------------------------------
// KERNELS FOR CorrectIonsBCTask
//-----------------------------------------------------------------------------
template<direction dir, side s>
__global__
void CorrectIonsBC_kernel(const AccessorRO<double, 3> ePotInt,
const AccessorRO<double, 3> ePot,
const AccessorRO<VecNSp, 3> MolarFracsInt,
const AccessorWO<VecNSp, 3> MolarFracs,
const Rect<3> my_bounds,
const coord_t size_x,
const coord_t size_y,
const coord_t size_z)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int z = blockIdx.z * blockDim.z + threadIdx.z;
if ((x < size_x) && (y < size_y) && (z < size_z)) {
const Point<3> p = Point<3>(x + my_bounds.lo.x,
y + my_bounds.lo.y,
z + my_bounds.lo.z);
const Point<3> pInt = getPIntBC<dir, s>(p);
const double dPhi = ePotInt[pInt] - ePot[p];
__UNROLL__
for (int i = 0; i < nIons; i++) {
int ind = mix.ions[i];
if (mix.GetSpeciesChargeNumber(ind)*dPhi > 0)
// the ion is flowing into the BC
MolarFracs[p][ind] = MolarFracsInt[pInt][ind];
else
// the ion is repelled by the BC
MolarFracs[p][ind] = 1e-60;
}
}
}
template<direction dir, side s>
__host__
void CorrectIonsBCTask<dir, s>::gpu_base_impl(
const Args &args,
const std::vector<PhysicalRegion> ®ions,
const std::vector<Future> &futures,
Context ctx, Runtime *runtime)
{
assert(regions.size() == 3);
assert(futures.size() == 0);
// Accessor for BC electric potential
const AccessorRO<double, 3> acc_ePot (regions[0], FID_electricPotential);
// Accessors for primitive variables
const AccessorWO<VecNSp, 3> acc_MolarFracs (regions[1], FID_MolarFracs);
// Accessor for internal electric potential and molar fractions
const AccessorRO<double, 3> acc_ePotInt (regions[2], FID_electricPotential);
const AccessorRO<VecNSp, 3> acc_MolarFracsInt(regions[2], FID_MolarFracs);
// Extract execution domain
const Rect<3> r_BC = runtime->get_index_space_domain(ctx,
regions[1].get_logical_region().get_index_space());
// Launch the kernel
const int threads_per_block = 256;
const dim3 TPB_3d = splitThreadsPerBlock<Xdir>(threads_per_block, r_BC);
const dim3 num_blocks_3d = dim3((getSize<Xdir>(r_BC) + (TPB_3d.x - 1)) / TPB_3d.x,
(getSize<Ydir>(r_BC) + (TPB_3d.y - 1)) / TPB_3d.y,
(getSize<Zdir>(r_BC) + (TPB_3d.z - 1)) / TPB_3d.z);
hipLaunchKernelGGL(( CorrectIonsBC_kernel<dir, s>), dim3(num_blocks_3d), dim3(TPB_3d), 0, 0,
acc_ePotInt, acc_ePot, acc_MolarFracsInt, acc_MolarFracs,
r_BC, getSize<Xdir>(r_BC), getSize<Ydir>(r_BC), getSize<Zdir>(r_BC));
};
template void CorrectIonsBCTask<Xdir, Minus>::gpu_base_impl(
const Args &args,
const std::vector<PhysicalRegion> ®ions,
const std::vector<Future> &futures,
Context ctx, Runtime *runtime);
template void CorrectIonsBCTask<Xdir, Plus >::gpu_base_impl(
const Args &args,
const std::vector<PhysicalRegion> ®ions,
const std::vector<Future> &futures,
Context ctx, Runtime *runtime);
template void CorrectIonsBCTask<Ydir, Minus>::gpu_base_impl(
const Args &args,
const std::vector<PhysicalRegion> ®ions,
const std::vector<Future> &futures,
Context ctx, Runtime *runtime);
template void CorrectIonsBCTask<Ydir, Plus >::gpu_base_impl(
const Args &args,
const std::vector<PhysicalRegion> ®ions,
const std::vector<Future> &futures,
Context ctx, Runtime *runtime);
template void CorrectIonsBCTask<Zdir, Minus>::gpu_base_impl(
const Args &args,
const std::vector<PhysicalRegion> ®ions,
const std::vector<Future> &futures,
Context ctx, Runtime *runtime);
template void CorrectIonsBCTask<Zdir, Plus >::gpu_base_impl(
const Args &args,
const std::vector<PhysicalRegion> ®ions,
const std::vector<Future> &futures,
Context ctx, Runtime *runtime);
#endif
| 57da24b9724132ccecc880dbeba61e499895e753.cu | // Copyright (c) "2019, by Stanford University
// Developer: Mario Di Renzo
// Affiliation: Center for Turbulence Research, Stanford University
// URL: https://ctr.stanford.edu
// Citation: Di Renzo, M., Lin, F., and Urzay, J. (2020).
// HTR solver: An open-source exascale-oriented task-based
// multi-GPU high-order code for hypersonic aerothermodynamics.
// Computer Physics Communications 255, 107262"
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE FOR ANY
// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "prometeo_bc.hpp"
#include "cuda_utils.hpp"
// Declare a constant memory that will hold the Mixture struct (initialized in prometeo_mixture.cu)
extern __device__ __constant__ Mix mix;
//-----------------------------------------------------------------------------
// KERNELS FOR AddRecycleAverageTask
//-----------------------------------------------------------------------------
__global__
void AddRecycleAverageTask_kernel(const AccessorRO<double, 3> dcsi_d,
const AccessorRO<double, 3> deta_d,
const AccessorRO<double, 3> dzet_d,
const AccessorRO<VecNSp, 3> MolarFracs_profile,
const AccessorRO<double, 3> temperature_profile,
const AccessorRO< Vec3, 3> velocity_profile,
const AccessorSumRD<VecNSp, 1> avg_MolarFracs,
const AccessorSumRD< Vec3, 1> avg_velocity,
const AccessorSumRD<double, 1> avg_temperature,
const AccessorSumRD<double, 1> avg_rho,
const double Pbc,
const Rect<3> my_bounds,
const coord_t size_x,
const coord_t size_y,
const coord_t size_z)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int z = blockIdx.z * blockDim.z + threadIdx.z;
if ((x < size_x) && (y < size_y) && (z < size_z)) {
const Point<3> p = Point<3>(x + my_bounds.lo.x,
y + my_bounds.lo.y,
z + my_bounds.lo.z);
AddRecycleAverageTask::collectAverages(dcsi_d, deta_d, dzet_d,
MolarFracs_profile, temperature_profile, velocity_profile,
avg_MolarFracs, avg_velocity, avg_temperature,
avg_rho, Pbc, p, mix);
}
}
__host__
void AddRecycleAverageTask::gpu_base_impl(
const Args &args,
const std::vector<PhysicalRegion> ®ions,
const std::vector<Future> &futures,
Context ctx, Runtime *runtime)
{
assert(regions.size() == 4);
assert(futures.size() == 0);
// Accessors for metrics
const AccessorRO<double, 3> acc_dcsi_d (regions[0], FID_dcsi_d);
const AccessorRO<double, 3> acc_deta_d (regions[0], FID_deta_d);
const AccessorRO<double, 3> acc_dzet_d (regions[0], FID_dzet_d);
// Accessors for profile variables
const AccessorRO<VecNSp, 3> acc_MolarFracs_profile (regions[0], FID_MolarFracs_profile);
const AccessorRO<double, 3> acc_temperature_profile (regions[0], FID_temperature_profile);
const AccessorRO< Vec3, 3> acc_velocity_profile (regions[0], FID_velocity_profile);
// Accessors for averages
const AccessorSumRD<double, 1> acc_avg_rho (regions[1], RA_FID_rho, LEGION_REDOP_SUM_FLOAT64);
const AccessorSumRD<double, 1> acc_avg_temperature (regions[1], RA_FID_temperature, LEGION_REDOP_SUM_FLOAT64);
const AccessorSumRD<VecNSp, 1> acc_avg_MolarFracs (regions[2], RA_FID_MolarFracs, REGENT_REDOP_SUM_VECNSP);
const AccessorSumRD< Vec3, 1> acc_avg_velocity (regions[3], RA_FID_velocity, REGENT_REDOP_SUM_VEC3);
// Extract execution domain
const Rect<3> r_plane = runtime->get_index_space_domain(ctx,
regions[0].get_logical_region().get_index_space());
// Launch the kernel
const int threads_per_block = 256;
const dim3 TPB_3d = splitThreadsPerBlock<Xdir>(threads_per_block, r_plane);
const dim3 num_blocks_3d = dim3((getSize<Xdir>(r_plane) + (TPB_3d.x - 1)) / TPB_3d.x,
(getSize<Ydir>(r_plane) + (TPB_3d.y - 1)) / TPB_3d.y,
(getSize<Zdir>(r_plane) + (TPB_3d.z - 1)) / TPB_3d.z);
AddRecycleAverageTask_kernel<<<num_blocks_3d, TPB_3d>>>(acc_dcsi_d, acc_deta_d, acc_dzet_d,
acc_MolarFracs_profile, acc_temperature_profile, acc_velocity_profile,
acc_avg_MolarFracs, acc_avg_velocity, acc_avg_temperature, acc_avg_rho,
args.Pbc, r_plane,
getSize<Xdir>(r_plane), getSize<Ydir>(r_plane), getSize<Zdir>(r_plane));
}
//-----------------------------------------------------------------------------
// KERNELS FOR SetNSCBC_InflowBC
//-----------------------------------------------------------------------------
template<direction dir>
__global__
void SetNSCBC_InflowBC_kernel(const AccessorRO<VecNEq, 3> Conserved,
const AccessorRO<double, 3> SoS,
const AccessorRO<VecNSp, 3> MolarFracs_profile,
const AccessorRO<double, 3> temperature_profile,
const AccessorRO< Vec3, 3> velocity_profile,
const AccessorWO<double, 3> pressure,
const AccessorWO<double, 3> temperature,
const AccessorWO<VecNSp, 3> MolarFracs,
const AccessorWO< Vec3, 3> velocity,
const double Pbc,
const Rect<3> my_bounds,
const coord_t size_x,
const coord_t size_y,
const coord_t size_z)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int z = blockIdx.z * blockDim.z + threadIdx.z;
// Index of normal direction
constexpr int iN = normalIndex(dir);
if ((x < size_x) && (y < size_y) && (z < size_z)) {
const Point<3> p = Point<3>(x + my_bounds.lo.x,
y + my_bounds.lo.y,
z + my_bounds.lo.z);
MolarFracs[p] = MolarFracs_profile[p];
temperature[p] = temperature_profile[p];
velocity[p] = velocity_profile[p];
if (fabs(velocity_profile[p][iN]) >= SoS[p])
// It is supersonic, everything is imposed by the BC
pressure[p] = Pbc;
else
// Compute pressure from NSCBC conservation equations
SetNSCBC_InflowBCTask<dir>::setInflowPressure(
Conserved, MolarFracs_profile, temperature_profile,
pressure, p, mix);
}
}
template<direction dir>
__host__
void SetNSCBC_InflowBCTask<dir>::gpu_base_impl(
const Args &args,
const std::vector<PhysicalRegion> ®ions,
const std::vector<Future> &futures,
Context ctx, Runtime *runtime)
{
assert(regions.size() == 2);
assert(futures.size() == 0);
// Accessor for conserved variables
const AccessorRO<VecNEq, 3> acc_Conserved (regions[0], FID_Conserved);
// Accessor for speed of sound
const AccessorRO<double, 3> acc_SoS (regions[0], FID_SoS);
// Accessors for profile variables
const AccessorRO<VecNSp, 3> acc_MolarFracs_profile (regions[0], FID_MolarFracs_profile);
const AccessorRO<double, 3> acc_temperature_profile (regions[0], FID_temperature_profile);
const AccessorRO< Vec3, 3> acc_velocity_profile (regions[0], FID_velocity_profile);
// Accessors for primitive variables
const AccessorWO<double, 3> acc_pressure (regions[1], FID_pressure);
const AccessorWO<double, 3> acc_temperature (regions[1], FID_temperature);
const AccessorWO<VecNSp, 3> acc_MolarFracs (regions[1], FID_MolarFracs);
const AccessorWO< Vec3, 3> acc_velocity (regions[1], FID_velocity);
// Extract execution domain
const Rect<3> r_BC = runtime->get_index_space_domain(ctx,
regions[1].get_logical_region().get_index_space());
// Launch the kernel
const int threads_per_block = 256;
const dim3 TPB_3d = splitThreadsPerBlock<Xdir>(threads_per_block, r_BC);
const dim3 num_blocks_3d = dim3((getSize<Xdir>(r_BC) + (TPB_3d.x - 1)) / TPB_3d.x,
(getSize<Ydir>(r_BC) + (TPB_3d.y - 1)) / TPB_3d.y,
(getSize<Zdir>(r_BC) + (TPB_3d.z - 1)) / TPB_3d.z);
SetNSCBC_InflowBC_kernel<dir><<<num_blocks_3d, TPB_3d>>>(acc_Conserved, acc_SoS,
acc_MolarFracs_profile, acc_temperature_profile, acc_velocity_profile,
acc_pressure, acc_temperature, acc_MolarFracs, acc_velocity,
args.Pbc, r_BC,
getSize<Xdir>(r_BC), getSize<Ydir>(r_BC), getSize<Zdir>(r_BC));
}
template void SetNSCBC_InflowBCTask<Xdir>::gpu_base_impl(
const Args &args,
const std::vector<PhysicalRegion> ®ions,
const std::vector<Future> &futures,
Context ctx, Runtime *runtime);
template void SetNSCBC_InflowBCTask<Ydir>::gpu_base_impl(
const Args &args,
const std::vector<PhysicalRegion> ®ions,
const std::vector<Future> &futures,
Context ctx, Runtime *runtime);
template void SetNSCBC_InflowBCTask<Zdir>::gpu_base_impl(
const Args &args,
const std::vector<PhysicalRegion> ®ions,
const std::vector<Future> &futures,
Context ctx, Runtime *runtime);
//-----------------------------------------------------------------------------
// KERNELS FOR SetNSCBC_OutflowBC
//-----------------------------------------------------------------------------
__global__
void SetNSCBC_OutflowBC_kernel(const AccessorRO<VecNEq, 3> Conserved,
const AccessorRW<double, 3> temperature,
const AccessorWO<double, 3> pressure,
const AccessorWO<VecNSp, 3> MolarFracs,
const AccessorWO< Vec3, 3> velocity,
const Rect<3> my_bounds,
const coord_t size_x,
const coord_t size_y,
const coord_t size_z)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int z = blockIdx.z * blockDim.z + threadIdx.z;
if ((x < size_x) && (y < size_y) && (z < size_z)) {
const Point<3> p = Point<3>(x + my_bounds.lo.x,
y + my_bounds.lo.y,
z + my_bounds.lo.z);
UpdatePrimitiveFromConservedTask::UpdatePrimitive(
Conserved, temperature, pressure,
MolarFracs, velocity,
p, mix);
}
}
__host__
void SetNSCBC_OutflowBCTask::gpu_base_impl(
const Args &args,
const std::vector<PhysicalRegion> ®ions,
const std::vector<Future> &futures,
Context ctx, Runtime *runtime)
{
assert(regions.size() == 2);
assert(futures.size() == 0);
// Accessors for conserved variables
const AccessorRO<VecNEq, 3> acc_Conserved (regions[0], FID_Conserved);
// Accessors for temperature
const AccessorRW<double, 3> acc_temperature (regions[1], FID_temperature);
// Accessors for primitive variables
const AccessorWO<double, 3> acc_pressure (regions[1], FID_pressure);
const AccessorWO<VecNSp, 3> acc_MolarFracs (regions[1], FID_MolarFracs);
const AccessorWO< Vec3, 3> acc_velocity (regions[1], FID_velocity);
// Extract execution domain
const Rect<3> r_BC = runtime->get_index_space_domain(ctx,
regions[1].get_logical_region().get_index_space());
// Launch the kernel
const int threads_per_block = 256;
const dim3 TPB_3d = splitThreadsPerBlock<Xdir>(threads_per_block, r_BC);
const dim3 num_blocks_3d = dim3((getSize<Xdir>(r_BC) + (TPB_3d.x - 1)) / TPB_3d.x,
(getSize<Ydir>(r_BC) + (TPB_3d.y - 1)) / TPB_3d.y,
(getSize<Zdir>(r_BC) + (TPB_3d.z - 1)) / TPB_3d.z);
SetNSCBC_OutflowBC_kernel<<<num_blocks_3d, TPB_3d>>>(
acc_Conserved, acc_temperature, acc_pressure,
acc_MolarFracs, acc_velocity, r_BC,
getSize<Xdir>(r_BC), getSize<Ydir>(r_BC), getSize<Zdir>(r_BC));
}
//-----------------------------------------------------------------------------
// KERNELS FOR SetIncomingShockBCTask
//-----------------------------------------------------------------------------
__global__
void SetIncomingShockBC_kernel(const AccessorRO<VecNEq, 3> Conserved,
const AccessorRW<double, 3> temperature,
const AccessorWO<double, 3> pressure,
const AccessorWO<VecNSp, 3> MolarFracs,
const AccessorWO< Vec3, 3> velocity,
const Vec3 velocity0,
const double temperature0,
const double pressure0,
const Vec3 velocity1,
const double temperature1,
const double pressure1,
const VecNSp MolarFracs0,
const int iShock,
const Rect<3> my_bounds,
const coord_t size_x,
const coord_t size_y,
const coord_t size_z)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int z = blockIdx.z * blockDim.z + threadIdx.z;
if ((x < size_x) && (y < size_y) && (z < size_z)) {
const Point<3> p = Point<3>(x + my_bounds.lo.x,
y + my_bounds.lo.y,
z + my_bounds.lo.z);
if ((p.x < iShock - 1) or
(p.x > iShock + 1)) {
// Threat as an outflow
UpdatePrimitiveFromConservedTask::UpdatePrimitive(
Conserved, temperature, pressure,
MolarFracs, velocity,
p, mix);
// Inject the shock over four points
} else if (p.x == iShock - 1) {
MolarFracs[p] = MolarFracs0;
velocity[p] = 0.75*velocity0 + 0.25*velocity1;
temperature[p] = 0.75*temperature0 + 0.25*temperature1;
pressure[p] = 0.75*pressure0 + 0.25*pressure1;
} else if (p.x == iShock) {
MolarFracs[p] = MolarFracs0;
velocity[p] = 0.25*velocity0 + 0.75*velocity1;
temperature[p] = 0.25*temperature0 + 0.75*temperature1;
pressure[p] = 0.25*pressure0 + 0.75*pressure1;
} else if (p.x == iShock + 1) {
MolarFracs[p] = MolarFracs0;
velocity[p] = velocity1;
temperature[p] = temperature1;
pressure[p] = pressure1;
}
}
}
__host__
void SetIncomingShockBCTask::gpu_base_impl(
const Args &args,
const std::vector<PhysicalRegion> ®ions,
const std::vector<Future> &futures,
Context ctx, Runtime *runtime)
{
assert(regions.size() == 2);
assert(futures.size() == 0);
// Accessor for conserved variables
const AccessorRO<VecNEq, 3> acc_Conserved (regions[0], FID_Conserved);
// Accessors for temperature
const AccessorRW<double, 3> acc_temperature (regions[1], FID_temperature);
// Accessors for primitive variables
const AccessorWO<double, 3> acc_pressure (regions[1], FID_pressure);
const AccessorWO<VecNSp, 3> acc_MolarFracs (regions[1], FID_MolarFracs);
const AccessorWO< Vec3, 3> acc_velocity (regions[1], FID_velocity);
// Extract execution domain
const Rect<3> r_BC = runtime->get_index_space_domain(ctx,
regions[1].get_logical_region().get_index_space());
// Launch the kernel
const int threads_per_block = 256;
const dim3 TPB_3d = splitThreadsPerBlock<Xdir>(threads_per_block, r_BC);
const dim3 num_blocks_3d = dim3((getSize<Xdir>(r_BC) + (TPB_3d.x - 1)) / TPB_3d.x,
(getSize<Ydir>(r_BC) + (TPB_3d.y - 1)) / TPB_3d.y,
(getSize<Zdir>(r_BC) + (TPB_3d.z - 1)) / TPB_3d.z);
SetIncomingShockBC_kernel<<<num_blocks_3d, TPB_3d>>>(
acc_Conserved,
acc_temperature, acc_pressure, acc_MolarFracs, acc_velocity,
Vec3(args.params.velocity0), args.params.temperature0, args.params.pressure0,
Vec3(args.params.velocity1), args.params.temperature1, args.params.pressure1,
VecNSp(args.params.MolarFracs), args.params.iShock,
r_BC, getSize<Xdir>(r_BC), getSize<Ydir>(r_BC), getSize<Zdir>(r_BC));
}
//-----------------------------------------------------------------------------
// KERNELS FOR SetRecycleRescalingBCTask
//-----------------------------------------------------------------------------
// Workaroud for Legion issue #879
struct SetRecycleRescalingBC_kernelArgs {
const AccessorRO< Vec3, 3> centerCoordinates;
const AccessorRO<VecNEq, 3> Conserved;
const AccessorRO<double, 3> SoS;
const AccessorWO<double, 3> temperature;
const AccessorWO<double, 3> pressure;
const AccessorWO<VecNSp, 3> MolarFracs;
const AccessorWO< Vec3, 3> velocity;
const AccessorRO<double, 3> temperature_recycle;
const AccessorRO< Vec3, 3> velocity_recycle;
const AccessorRO<VecNSp, 3> MolarFracs_recycle;
const AccessorRO<double, 3> temperature_profile;
const AccessorRO< Vec3, 3> velocity_profile;
const AccessorRO<VecNSp, 3> MolarFracs_profile;
const AccessorRO<double, 1> avg_y;
const AccessorRO< float, 1> FI_xloc;
const AccessorRO< float, 1> FI_iloc;
};
__global__
#ifdef LEGION_BOUNDS_CHECKS
void SetRecycleRescalingBC_kernel(const DeferredBuffer<SetRecycleRescalingBC_kernelArgs, 1> buffer,
const FastInterpData FIdata,
const double Pbc,
const double yInnFact,
const double yOutFact,
const double uInnFact,
const double uOutFact,
const double idelta99Inl,
const Rect<3> my_bounds,
const coord_t size_x,
const coord_t size_y,
const coord_t size_z)
#else
void SetRecycleRescalingBC_kernel(const SetRecycleRescalingBC_kernelArgs a,
const FastInterpData FIdata,
const double Pbc,
const double yInnFact,
const double yOutFact,
const double uInnFact,
const double uOutFact,
const double idelta99Inl,
const Rect<3> my_bounds,
const coord_t size_x,
const coord_t size_y,
const coord_t size_z)
#endif
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int z = blockIdx.z * blockDim.z + threadIdx.z;
#ifdef LEGION_BOUNDS_CHECKS
SetRecycleRescalingBC_kernelArgs a = buffer[0];
#endif
if ((x < size_x) && (y < size_y) && (z < size_z)) {
const Point<3> p = Point<3>(x + my_bounds.lo.x,
y + my_bounds.lo.y,
z + my_bounds.lo.z);
// Compute the rescaled primitive quantities
double temperatureR; Vec3 velocityR; VecNSp MolarFracsR;
SetRecycleRescalingBCTask::GetRescaled(
temperatureR, velocityR, MolarFracsR, a.centerCoordinates,
a.temperature_recycle, a.velocity_recycle, a.MolarFracs_recycle,
a.temperature_profile, a.velocity_profile, a.MolarFracs_profile,
a.avg_y, a.FI_xloc, a.FI_iloc, FIdata, p,
yInnFact, yOutFact, uInnFact, uOutFact, idelta99Inl);
a.MolarFracs[p] = MolarFracsR;
a.temperature[p] = temperatureR;
a.velocity[p] = velocityR;
if (fabs(velocityR[0]) >= a.SoS[p])
// It is supersonic, everything is imposed by the BC
a.pressure[p] = Pbc;
else
// Compute pressure from NSCBC conservation equations
a.pressure[p] = SetRecycleRescalingBCTask::setPressure(a.Conserved, temperatureR, MolarFracsR, p, mix);
}
}
__host__
void SetRecycleRescalingBCTask::gpu_base_impl(
const Args &args,
const std::vector<PhysicalRegion> ®ions,
const std::vector<Future> &futures,
Context ctx, Runtime *runtime)
{
assert(regions.size() == 5);
assert(futures.size() == 1);
// Accessor for speed of sound
const AccessorRO< Vec3, 3> acc_centerCoordinates (regions[0], FID_centerCoordinates);
// Accessor for conserved variables
const AccessorRO<VecNEq, 3> acc_Conserved (regions[0], FID_Conserved);
// Accessor for speed of sound
const AccessorRO<double, 3> acc_SoS (regions[0], FID_SoS);
// Accessors for profile variables
const AccessorRO<VecNSp, 3> acc_MolarFracs_profile (regions[0], FID_MolarFracs_profile);
const AccessorRO<double, 3> acc_temperature_profile (regions[0], FID_temperature_profile);
const AccessorRO< Vec3, 3> acc_velocity_profile (regions[0], FID_velocity_profile);
// Accessors for primitive variables
const AccessorWO<double, 3> acc_pressure (regions[1], FID_pressure);
const AccessorWO<double, 3> acc_temperature (regions[1], FID_temperature);
const AccessorWO<VecNSp, 3> acc_MolarFracs (regions[1], FID_MolarFracs);
const AccessorWO< Vec3, 3> acc_velocity (regions[1], FID_velocity);
// Accessors for avg wall-normal coordinate
const AccessorRO<double, 1> acc_avg_y (regions[2], RA_FID_y);
// Accessors for recycle plane variables
const AccessorRO<VecNSp, 3> acc_MolarFracs_recycle (regions[3], FID_MolarFracs_recycle);
const AccessorRO<double, 3> acc_temperature_recycle (regions[3], FID_temperature_recycle);
const AccessorRO< Vec3, 3> acc_velocity_recycle (regions[3], FID_velocity_recycle);
// Accessors for fast interpolation region
const AccessorRO< float, 1> acc_FI_xloc (regions[4], FI_FID_xloc);
const AccessorRO< float, 1> acc_FI_iloc (regions[4], FI_FID_iloc);
// Extract execution domain
const Rect<3> r_BC = runtime->get_index_space_domain(ctx,
regions[1].get_logical_region().get_index_space());
// Compute rescaling coefficients
const RescalingDataType RdataRe = futures[0].get_result<RescalingDataType>();
const double yInnFact = RdataRe.deltaNu /args.RdataIn.deltaNu;
const double yOutFact = RdataRe.delta99VD/args.RdataIn.delta99VD;
const double uInnFact = args.RdataIn.uTau/RdataRe.uTau;
const double uOutFact = uInnFact*sqrt(args.RdataIn.rhow/RdataRe.rhow);
const double idelta99Inl = 1.0/args.RdataIn.delta99VD;
// Launch the kernel
const int threads_per_block = 256;
const dim3 TPB_3d = splitThreadsPerBlock<Xdir>(threads_per_block, r_BC);
const dim3 num_blocks_3d = dim3((getSize<Xdir>(r_BC) + (TPB_3d.x - 1)) / TPB_3d.x,
(getSize<Ydir>(r_BC) + (TPB_3d.y - 1)) / TPB_3d.y,
(getSize<Zdir>(r_BC) + (TPB_3d.z - 1)) / TPB_3d.z);
struct SetRecycleRescalingBC_kernelArgs kArgs = {
.centerCoordinates = acc_centerCoordinates,
.Conserved = acc_Conserved,
.SoS = acc_SoS,
.temperature = acc_temperature,
.pressure = acc_pressure,
.MolarFracs = acc_MolarFracs,
.velocity = acc_velocity,
.temperature_recycle = acc_temperature_recycle,
.velocity_recycle = acc_velocity_recycle,
.MolarFracs_recycle = acc_MolarFracs_recycle,
.temperature_profile = acc_temperature_profile,
.velocity_profile = acc_velocity_profile,
.MolarFracs_profile = acc_MolarFracs_profile,
.avg_y = acc_avg_y,
.FI_xloc = acc_FI_xloc,
.FI_iloc = acc_FI_iloc,
};
#ifdef LEGION_BOUNDS_CHECKS
DeferredBuffer<SetRecycleRescalingBC_kernelArgs, 1>
buffer(Rect<1>(Point<1>(0), Point<1>(1)), Memory::Z_COPY_MEM, &kArgs);
SetRecycleRescalingBC_kernel<<<num_blocks_3d, TPB_3d>>>(
buffer,
args.FIdata, args.Pbc,
yInnFact, yOutFact, uInnFact, uOutFact, idelta99Inl,
r_BC, getSize<Xdir>(r_BC), getSize<Ydir>(r_BC), getSize<Zdir>(r_BC));
#else
SetRecycleRescalingBC_kernel<<<num_blocks_3d, TPB_3d>>>(
kArgs,
args.FIdata, args.Pbc,
yInnFact, yOutFact, uInnFact, uOutFact, idelta99Inl,
r_BC, getSize<Xdir>(r_BC), getSize<Ydir>(r_BC), getSize<Zdir>(r_BC));
#endif
}
#if (defined(ELECTRIC_FIELD) && (nIons > 0))
//-----------------------------------------------------------------------------
// KERNELS FOR CorrectIonsBCTask
//-----------------------------------------------------------------------------
template<direction dir, side s>
__global__
void CorrectIonsBC_kernel(const AccessorRO<double, 3> ePotInt,
const AccessorRO<double, 3> ePot,
const AccessorRO<VecNSp, 3> MolarFracsInt,
const AccessorWO<VecNSp, 3> MolarFracs,
const Rect<3> my_bounds,
const coord_t size_x,
const coord_t size_y,
const coord_t size_z)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int z = blockIdx.z * blockDim.z + threadIdx.z;
if ((x < size_x) && (y < size_y) && (z < size_z)) {
const Point<3> p = Point<3>(x + my_bounds.lo.x,
y + my_bounds.lo.y,
z + my_bounds.lo.z);
const Point<3> pInt = getPIntBC<dir, s>(p);
const double dPhi = ePotInt[pInt] - ePot[p];
__UNROLL__
for (int i = 0; i < nIons; i++) {
int ind = mix.ions[i];
if (mix.GetSpeciesChargeNumber(ind)*dPhi > 0)
// the ion is flowing into the BC
MolarFracs[p][ind] = MolarFracsInt[pInt][ind];
else
// the ion is repelled by the BC
MolarFracs[p][ind] = 1e-60;
}
}
}
template<direction dir, side s>
__host__
void CorrectIonsBCTask<dir, s>::gpu_base_impl(
const Args &args,
const std::vector<PhysicalRegion> ®ions,
const std::vector<Future> &futures,
Context ctx, Runtime *runtime)
{
assert(regions.size() == 3);
assert(futures.size() == 0);
// Accessor for BC electric potential
const AccessorRO<double, 3> acc_ePot (regions[0], FID_electricPotential);
// Accessors for primitive variables
const AccessorWO<VecNSp, 3> acc_MolarFracs (regions[1], FID_MolarFracs);
// Accessor for internal electric potential and molar fractions
const AccessorRO<double, 3> acc_ePotInt (regions[2], FID_electricPotential);
const AccessorRO<VecNSp, 3> acc_MolarFracsInt(regions[2], FID_MolarFracs);
// Extract execution domain
const Rect<3> r_BC = runtime->get_index_space_domain(ctx,
regions[1].get_logical_region().get_index_space());
// Launch the kernel
const int threads_per_block = 256;
const dim3 TPB_3d = splitThreadsPerBlock<Xdir>(threads_per_block, r_BC);
const dim3 num_blocks_3d = dim3((getSize<Xdir>(r_BC) + (TPB_3d.x - 1)) / TPB_3d.x,
(getSize<Ydir>(r_BC) + (TPB_3d.y - 1)) / TPB_3d.y,
(getSize<Zdir>(r_BC) + (TPB_3d.z - 1)) / TPB_3d.z);
CorrectIonsBC_kernel<dir, s><<<num_blocks_3d, TPB_3d>>>(
acc_ePotInt, acc_ePot, acc_MolarFracsInt, acc_MolarFracs,
r_BC, getSize<Xdir>(r_BC), getSize<Ydir>(r_BC), getSize<Zdir>(r_BC));
};
template void CorrectIonsBCTask<Xdir, Minus>::gpu_base_impl(
const Args &args,
const std::vector<PhysicalRegion> ®ions,
const std::vector<Future> &futures,
Context ctx, Runtime *runtime);
template void CorrectIonsBCTask<Xdir, Plus >::gpu_base_impl(
const Args &args,
const std::vector<PhysicalRegion> ®ions,
const std::vector<Future> &futures,
Context ctx, Runtime *runtime);
template void CorrectIonsBCTask<Ydir, Minus>::gpu_base_impl(
const Args &args,
const std::vector<PhysicalRegion> ®ions,
const std::vector<Future> &futures,
Context ctx, Runtime *runtime);
template void CorrectIonsBCTask<Ydir, Plus >::gpu_base_impl(
const Args &args,
const std::vector<PhysicalRegion> ®ions,
const std::vector<Future> &futures,
Context ctx, Runtime *runtime);
template void CorrectIonsBCTask<Zdir, Minus>::gpu_base_impl(
const Args &args,
const std::vector<PhysicalRegion> ®ions,
const std::vector<Future> &futures,
Context ctx, Runtime *runtime);
template void CorrectIonsBCTask<Zdir, Plus >::gpu_base_impl(
const Args &args,
const std::vector<PhysicalRegion> ®ions,
const std::vector<Future> &futures,
Context ctx, Runtime *runtime);
#endif
|
dac611fa19140ae8c69a28e1960147864f4b75eb.hip | // !!! This is a file automatically generated by hipify!!!
/*=========================================================================
Program: Insight Segmentation & Registration Toolkit
Module: CudaBinaryThresholdImageFilterKernel.cu
Language: CUDA
Copyright (c) Insight Software Consortium. All rights reserved.
See ITKCopyright.txt or http://www.itk.org/HTML/Copyright.htm for details.
This software is distributed WITHOUT ANY WARRANTY; without even
the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
PURPOSE. See the above copyright notices for more information.
=========================================================================*/
/** \class CudaBinaryThresholdImageFilterKernel.cu
* \brief Cuda kernel code
* \author Phillip Ward, Luke Parkinson, Daniel Micevski, Christopher
* Share, Victorian Partnership for Advanced Computing (VPAC).
* Richard Beare, Monash University
*/
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <cutil.h>
template <class T, class S>
__global__ void binaryThreshold(S *output, T lower, T upper, S inside, S outside, int N)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx<N)
{
T A = output[idx];
if ( lower <= A && A <= upper )
{
output[idx] = inside;
}
else
{
output[idx] = outside;
}
}
}
template <class T, class S>
__global__ void binaryThreshold(S *output, const T *input, T lower, T upper, S inside, S outside, int N)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx<N)
{
T A = input[idx];
if ( lower <= A && A <= upper )
{
output[idx] = inside;
}
else
{
output[idx] = outside;
}
}
}
template <class T, class S>
void BinaryThresholdImageKernelFunction(const T* input, S* output, T m_LowerThreshold,
T m_UpperThreshold, S m_InsideValue, S m_OutsideValue, unsigned int N)
{
// Compute execution configuration
int blockSize = 128;
int nBlocks = N/blockSize + (N%blockSize == 0?0:1);
// Call kernel
if (output == input)
hipLaunchKernelGGL(( binaryThreshold) , dim3(nBlocks), dim3(blockSize) , 0, 0, output, m_LowerThreshold, m_UpperThreshold, m_InsideValue, m_OutsideValue, N);
else
hipLaunchKernelGGL(( binaryThreshold) , dim3(nBlocks), dim3(blockSize) , 0, 0, output, input, m_LowerThreshold, m_UpperThreshold, m_InsideValue, m_OutsideValue, N);
}
// versions we wish to compile
#define THISTYPE float
template void BinaryThresholdImageKernelFunction<THISTYPE, THISTYPE>(const THISTYPE * input, THISTYPE * output, THISTYPE m_LowerThreshold, THISTYPE m_UpperThreshold, THISTYPE m_InsideValue, THISTYPE m_OutsideValue, unsigned int N);
#undef THISTYPE
#define THISTYPE int
template void BinaryThresholdImageKernelFunction<THISTYPE, THISTYPE>(const THISTYPE * input, THISTYPE * output, THISTYPE m_LowerThreshold, THISTYPE m_UpperThreshold, THISTYPE m_InsideValue, THISTYPE m_OutsideValue, unsigned int N);
#undef THISTYPE
#define THISTYPE short
template void BinaryThresholdImageKernelFunction<THISTYPE, THISTYPE>(const THISTYPE * input, THISTYPE * output, THISTYPE m_LowerThreshold, THISTYPE m_UpperThreshold, THISTYPE m_InsideValue, THISTYPE m_OutsideValue, unsigned int N);
#undef THISTYPE
#define THISTYPE unsigned char
template void BinaryThresholdImageKernelFunction<THISTYPE, THISTYPE>(const THISTYPE * input, THISTYPE * output, THISTYPE m_LowerThreshold, THISTYPE m_UpperThreshold, THISTYPE m_InsideValue, THISTYPE m_OutsideValue, unsigned int N);
#undef THISTYPE
| dac611fa19140ae8c69a28e1960147864f4b75eb.cu | /*=========================================================================
Program: Insight Segmentation & Registration Toolkit
Module: CudaBinaryThresholdImageFilterKernel.cu
Language: CUDA
Copyright (c) Insight Software Consortium. All rights reserved.
See ITKCopyright.txt or http://www.itk.org/HTML/Copyright.htm for details.
This software is distributed WITHOUT ANY WARRANTY; without even
the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
PURPOSE. See the above copyright notices for more information.
=========================================================================*/
/** \class CudaBinaryThresholdImageFilterKernel.cu
* \brief Cuda kernel code
* \author Phillip Ward, Luke Parkinson, Daniel Micevski, Christopher
* Share, Victorian Partnership for Advanced Computing (VPAC).
* Richard Beare, Monash University
*/
#include <stdio.h>
#include <cuda.h>
#include <cutil.h>
template <class T, class S>
__global__ void binaryThreshold(S *output, T lower, T upper, S inside, S outside, int N)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx<N)
{
T A = output[idx];
if ( lower <= A && A <= upper )
{
output[idx] = inside;
}
else
{
output[idx] = outside;
}
}
}
template <class T, class S>
__global__ void binaryThreshold(S *output, const T *input, T lower, T upper, S inside, S outside, int N)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx<N)
{
T A = input[idx];
if ( lower <= A && A <= upper )
{
output[idx] = inside;
}
else
{
output[idx] = outside;
}
}
}
template <class T, class S>
void BinaryThresholdImageKernelFunction(const T* input, S* output, T m_LowerThreshold,
T m_UpperThreshold, S m_InsideValue, S m_OutsideValue, unsigned int N)
{
// Compute execution configuration
int blockSize = 128;
int nBlocks = N/blockSize + (N%blockSize == 0?0:1);
// Call kernel
if (output == input)
binaryThreshold <<< nBlocks, blockSize >>> (output, m_LowerThreshold, m_UpperThreshold, m_InsideValue, m_OutsideValue, N);
else
binaryThreshold <<< nBlocks, blockSize >>> (output, input, m_LowerThreshold, m_UpperThreshold, m_InsideValue, m_OutsideValue, N);
}
// versions we wish to compile
#define THISTYPE float
template void BinaryThresholdImageKernelFunction<THISTYPE, THISTYPE>(const THISTYPE * input, THISTYPE * output, THISTYPE m_LowerThreshold, THISTYPE m_UpperThreshold, THISTYPE m_InsideValue, THISTYPE m_OutsideValue, unsigned int N);
#undef THISTYPE
#define THISTYPE int
template void BinaryThresholdImageKernelFunction<THISTYPE, THISTYPE>(const THISTYPE * input, THISTYPE * output, THISTYPE m_LowerThreshold, THISTYPE m_UpperThreshold, THISTYPE m_InsideValue, THISTYPE m_OutsideValue, unsigned int N);
#undef THISTYPE
#define THISTYPE short
template void BinaryThresholdImageKernelFunction<THISTYPE, THISTYPE>(const THISTYPE * input, THISTYPE * output, THISTYPE m_LowerThreshold, THISTYPE m_UpperThreshold, THISTYPE m_InsideValue, THISTYPE m_OutsideValue, unsigned int N);
#undef THISTYPE
#define THISTYPE unsigned char
template void BinaryThresholdImageKernelFunction<THISTYPE, THISTYPE>(const THISTYPE * input, THISTYPE * output, THISTYPE m_LowerThreshold, THISTYPE m_UpperThreshold, THISTYPE m_InsideValue, THISTYPE m_OutsideValue, unsigned int N);
#undef THISTYPE
|
b0049770fad8448399902958379bdc4bead86798.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "hist.cuh"
#include "point_hist_half_byte_template.cuh"
#include "compute_hist_loop_one_stat.cuh"
using namespace cooperative_groups;
namespace NKernel {
template<int BlockSize>
struct TPointHistBinary: public TPointHistHalfByteBase<BlockSize, TPointHistBinary<BlockSize>> {
using TPointHistHalfByteBase<BlockSize, TPointHistBinary<BlockSize>>::Histogram;
__forceinline__ __device__ TPointHistBinary(float* buff)
: TPointHistHalfByteBase<BlockSize,TPointHistBinary<BlockSize>>(buff) {
}
static constexpr int Unroll(ECIndexLoadType) {
#if __CUDA_ARCH__ < 500
return 4;
#elif __CUDA_ARCH__ < 700
return 1;
#else
return 2;
#endif
}
__forceinline__ __device__ void AddToGlobalMemory(int statId, int statCount, int blockCount,
const TFeatureInBlock* features,
int fCount,
int leafId, int leafCount,
float* binSums) {
const int fid = threadIdx.x;
const int fold = 0;
if (fid < fCount ) {
TFeatureInBlock group = features[fid];
if (group.Folds) {
const int deviceOffset = group.GroupOffset * statCount * leafCount;
const int entriesPerLeaf = statCount * group.GroupSize;
float* dst = binSums + deviceOffset + leafId * entriesPerLeaf + statId * group.GroupSize +
group.FoldOffsetInGroup;
const int groupId = fid / 4;
const int fMask = 1 << (3 - (fid & 3));
float val = 0.f;
#pragma uroll
for (int i = 0; i < 16; i++) {
if (!(i & fMask)) {
val += Histogram[8 * i + groupId];
}
}
if (abs(val) > 1e-20f) {
if (blockCount > 1) {
atomicAdd(dst + fold, val);
} else {
dst[fold] = val;
}
}
}
}
}
};
using THist = TPointHistBinary<768>;
void ComputeHistBinary(const TFeatureInBlock* features,
const int fCount,
const TDataPartition* parts,
const ui32* partIds,
ui32 partCount,
const ui32* bins,
ui32 binsLineSize,
const float* stats,
ui32 numStats,
ui32 statLineSize,
float* histograms,
TCudaStream stream) {
const int blockSize = 768;
dim3 numBlocks;
numBlocks.z = numStats;
numBlocks.y = partCount;
const int blocksPerSm = TArchProps::GetMajorVersion() > 3 ? 2 : 1;
const int maxActiveBlocks = blocksPerSm * TArchProps::SMCount();
numBlocks.x = (fCount + 31) / 32;
numBlocks.x *= CeilDivide(maxActiveBlocks, (int)(numBlocks.x * numBlocks.y * numBlocks.z));
hipLaunchKernelGGL(( ComputeSplitPropertiesDirectLoadsImpl<THist, blockSize, 32>), dim3(numBlocks), dim3(blockSize), 0, stream, features,
fCount,
bins,
binsLineSize,
stats,
statLineSize,
parts,
partIds,
histograms);
}
void ComputeHistBinary(const TFeatureInBlock* features,
const int fCount,
const TDataPartition* parts,
const ui32* partIds,
ui32 partCount,
const ui32* cindex,
const int* indices,
const float* stats,
ui32 numStats,
ui32 statLineSize,
float* histograms,
TCudaStream stream) {
const int blockSize = 768;
dim3 numBlocks;
numBlocks.z = numStats;
numBlocks.y = partCount;
const int blocksPerSm = TArchProps::GetMajorVersion() > 3 ? 2 : 1;
const int maxActiveBlocks = blocksPerSm * TArchProps::SMCount();
numBlocks.x = (fCount + 31) / 32;
numBlocks.x *= CeilDivide(maxActiveBlocks, (int)(numBlocks.x * numBlocks.y * numBlocks.z));
hipLaunchKernelGGL(( ComputeSplitPropertiesGatherImpl<THist, blockSize, 32>), dim3(numBlocks), dim3(blockSize), 0, stream, features,
fCount,
cindex,
indices,
stats,
statLineSize,
parts,
partIds,
histograms);
}
}
| b0049770fad8448399902958379bdc4bead86798.cu | #include "hist.cuh"
#include "point_hist_half_byte_template.cuh"
#include "compute_hist_loop_one_stat.cuh"
using namespace cooperative_groups;
namespace NKernel {
template<int BlockSize>
struct TPointHistBinary: public TPointHistHalfByteBase<BlockSize, TPointHistBinary<BlockSize>> {
using TPointHistHalfByteBase<BlockSize, TPointHistBinary<BlockSize>>::Histogram;
__forceinline__ __device__ TPointHistBinary(float* buff)
: TPointHistHalfByteBase<BlockSize,TPointHistBinary<BlockSize>>(buff) {
}
static constexpr int Unroll(ECIndexLoadType) {
#if __CUDA_ARCH__ < 500
return 4;
#elif __CUDA_ARCH__ < 700
return 1;
#else
return 2;
#endif
}
__forceinline__ __device__ void AddToGlobalMemory(int statId, int statCount, int blockCount,
const TFeatureInBlock* features,
int fCount,
int leafId, int leafCount,
float* binSums) {
const int fid = threadIdx.x;
const int fold = 0;
if (fid < fCount ) {
TFeatureInBlock group = features[fid];
if (group.Folds) {
const int deviceOffset = group.GroupOffset * statCount * leafCount;
const int entriesPerLeaf = statCount * group.GroupSize;
float* dst = binSums + deviceOffset + leafId * entriesPerLeaf + statId * group.GroupSize +
group.FoldOffsetInGroup;
const int groupId = fid / 4;
const int fMask = 1 << (3 - (fid & 3));
float val = 0.f;
#pragma uroll
for (int i = 0; i < 16; i++) {
if (!(i & fMask)) {
val += Histogram[8 * i + groupId];
}
}
if (abs(val) > 1e-20f) {
if (blockCount > 1) {
atomicAdd(dst + fold, val);
} else {
dst[fold] = val;
}
}
}
}
}
};
using THist = TPointHistBinary<768>;
void ComputeHistBinary(const TFeatureInBlock* features,
const int fCount,
const TDataPartition* parts,
const ui32* partIds,
ui32 partCount,
const ui32* bins,
ui32 binsLineSize,
const float* stats,
ui32 numStats,
ui32 statLineSize,
float* histograms,
TCudaStream stream) {
const int blockSize = 768;
dim3 numBlocks;
numBlocks.z = numStats;
numBlocks.y = partCount;
const int blocksPerSm = TArchProps::GetMajorVersion() > 3 ? 2 : 1;
const int maxActiveBlocks = blocksPerSm * TArchProps::SMCount();
numBlocks.x = (fCount + 31) / 32;
numBlocks.x *= CeilDivide(maxActiveBlocks, (int)(numBlocks.x * numBlocks.y * numBlocks.z));
ComputeSplitPropertiesDirectLoadsImpl<THist, blockSize, 32><<<numBlocks, blockSize, 0, stream>>>(features,
fCount,
bins,
binsLineSize,
stats,
statLineSize,
parts,
partIds,
histograms);
}
void ComputeHistBinary(const TFeatureInBlock* features,
const int fCount,
const TDataPartition* parts,
const ui32* partIds,
ui32 partCount,
const ui32* cindex,
const int* indices,
const float* stats,
ui32 numStats,
ui32 statLineSize,
float* histograms,
TCudaStream stream) {
const int blockSize = 768;
dim3 numBlocks;
numBlocks.z = numStats;
numBlocks.y = partCount;
const int blocksPerSm = TArchProps::GetMajorVersion() > 3 ? 2 : 1;
const int maxActiveBlocks = blocksPerSm * TArchProps::SMCount();
numBlocks.x = (fCount + 31) / 32;
numBlocks.x *= CeilDivide(maxActiveBlocks, (int)(numBlocks.x * numBlocks.y * numBlocks.z));
ComputeSplitPropertiesGatherImpl<THist, blockSize, 32><<<numBlocks, blockSize, 0, stream>>>(features,
fCount,
cindex,
indices,
stats,
statLineSize,
parts,
partIds,
histograms);
}
}
|
93c6a442657f33024367f4874db6ae60153b3994.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "BornShotsGpuFunctions.h"
#include <iostream>
#include "varDeclare.h"
#include <vector>
#include <algorithm>
#include <math.h>
#include "kernelsGpu.cu"
#include "cudaErrors.cu"
#include <stdio.h>
#include <assert.h>
/******************************************************************************/
/************************ Set GPU propagation parameters **********************/
/******************************************************************************/
bool getGpuInfo(std::vector<int> gpuList, int info, int deviceNumberInfo){
int nDevice, driver;
hipGetDeviceCount(&nDevice);
if (info == 1){
std::cout << " " << std::endl;
std::cout << "-------------------------------------------------------------------" << std::endl;
std::cout << "---------------------------- INFO FOR GPU# " << deviceNumberInfo << " ----------------------" << std::endl;
std::cout << "-------------------------------------------------------------------" << std::endl;
// Number of devices
std::cout << "Number of requested GPUs: " << gpuList.size() << std::endl;
std::cout << "Number of available GPUs: " << nDevice << std::endl;
std::cout << "Id of requested GPUs: ";
for (int iGpu=0; iGpu<gpuList.size(); iGpu++){
if (iGpu<gpuList.size()-1){std::cout << gpuList[iGpu] << ", ";}
else{ std::cout << gpuList[iGpu] << std::endl;}
}
// Driver version
std::cout << "Cuda driver version: " << hipDriverGetVersion(&driver) << std::endl; // Driver
// Get properties
hipDeviceProp_t dprop;
hipGetDeviceProperties(&dprop,deviceNumberInfo);
// Display
std::cout << "Name: " << dprop.name << std::endl;
std::cout << "Total global memory: " << dprop.totalGlobalMem/(1024*1024*1024) << " [GB] " << std::endl;
std::cout << "Shared memory per block: " << dprop.sharedMemPerBlock/1024 << " [kB]" << std::endl;
std::cout << "Number of register per block: " << dprop.regsPerBlock << std::endl;
std::cout << "Warp size: " << dprop.warpSize << " [threads]" << std::endl;
std::cout << "Maximum pitch allowed for memory copies in bytes: " << dprop.memPitch/(1024*1024*1024) << " [GB]" << std::endl;
std::cout << "Maximum threads per block: " << dprop.maxThreadsPerBlock << std::endl;
std::cout << "Maximum block dimensions: " << "(" << dprop.maxThreadsDim[0] << ", " << dprop.maxThreadsDim[1] << ", " << dprop.maxThreadsDim[2] << ")" << std::endl;
std::cout << "Maximum grid dimensions: " << "(" << dprop.maxGridSize[0] << ", " << dprop.maxGridSize[1] << ", " << dprop.maxGridSize[2] << ")" << std::endl;
std::cout << "Total constant memory: " << dprop.totalConstMem/1024 << " [kB]" << std::endl;
std::cout << "Number of streaming multiprocessors on device: " << dprop.multiProcessorCount << std::endl;
if (dprop.deviceOverlap == 1) {std::cout << "Device can simultaneously perform a hipMemcpy() and kernel execution" << std::endl;}
if (dprop.deviceOverlap != 1) {std::cout << "Device cannot simultaneously perform a hipMemcpy() and kernel execution" << std::endl;}
if (dprop.canMapHostMemory == 1) { std::cout << "Device can map host memory" << std::endl; }
if (dprop.canMapHostMemory != 1) { std::cout << "Device cannot map host memory" << std::endl; }
if (dprop.concurrentKernels == 1) {std::cout << "Device can support concurrent kernel" << std::endl;}
if (dprop.concurrentKernels != 1) {std::cout << "Device cannot support concurrent kernel execution" << std::endl;}
std::cout << "-------------------------------------------------------------------" << std::endl;
std::cout << " " << std::endl;
}
// Check that the number of requested GPU is less or equal to the total number of available GPUs
if (gpuList.size()>nDevice) {
std::cout << "**** ERROR [getGpuInfo]: Number of requested GPU greater than available GPUs ****" << std::endl;
return false;
}
// Check that the GPU numbers in the list are between 0 and nGpu-1
for (int iGpu=0; iGpu<gpuList.size(); iGpu++){
if (gpuList[iGpu]<0 || gpuList[iGpu]>nDevice-1){
std::cout << "**** ERROR [getGpuInfo]: One of the element of the GPU Id list is not a valid GPU Id number ****" << std::endl;
return false;
}
}
return true;
}
void initBornGpu(double dz, double dx, int nz, int nx, int nts, double dts, int sub, int minPad, int blockSize, double alphaCos, int nGpu, int iGpuId, int iGpuAlloc){
// Set GPU number
hipSetDevice(iGpuId);
host_nz = nz;
host_nx = nx;
host_dz = dz;
host_dx = dx;
host_nts = nts;
host_sub = sub;
host_ntw = (nts - 1) * sub + 1;
/**************************** ALLOCATE ARRAYS OF ARRAYS *****************************/
// Only one GPU will perform the following
if (iGpuId == iGpuAlloc) {
// Time slices for FD stepping
dev_p0 = new double*[nGpu];
dev_p1 = new double*[nGpu];
dev_temp1 = new double*[nGpu];
dev_ssLeft = new double*[nGpu];
dev_ssRight = new double*[nGpu];
dev_ssTemp1 = new double*[nGpu];
// Data
dev_dataRegDts = new double*[nGpu];
// Source and receivers
dev_sourcesPositionReg = new int*[nGpu];
dev_receiversPositionReg = new int*[nGpu];
// Sources signal
dev_sourcesSignals = new double*[nGpu];
// Scaled velocity
dev_vel2Dtw2 = new double*[nGpu];
// Reflectivity scaling
dev_reflectivityScale = new double*[nGpu];
// Reflectivity
dev_modelBorn = new double*[nGpu];
// Source wavefields
dev_BornSrcWavefield = new double*[nGpu];
}
/**************************** COMPUTE LAPLACIAN COEFFICIENTS ************************/
double zCoeff[COEFF_SIZE];
double xCoeff[COEFF_SIZE];
zCoeff[0] = -2.927222222 / (dz * dz);
zCoeff[1] = 1.666666667 / (dz * dz);
zCoeff[2] = -0.238095238 / (dz * dz);
zCoeff[3] = 0.039682539 / (dz * dz);
zCoeff[4] = -0.004960317 / (dz * dz);
zCoeff[5] = 0.000317460 / (dz * dz);
xCoeff[0] = -2.927222222 / (dx * dx);
xCoeff[1] = 1.666666667 / (dx * dx);
xCoeff[2] = -0.238095238 / (dx * dx);
xCoeff[3] = 0.039682539 / (dx * dx);
xCoeff[4] = -0.004960317 / (dx * dx);
xCoeff[5] = 0.000317460 / (dx * dx);
/**************************** COMPUTE TIME-INTERPOLATION FILTER *********************/
// Time interpolation filter length/half length
int hInterpFilter = sub + 1;
int nInterpFilter = 2 * hInterpFilter;
// Check the subsampling coefficient is smaller than the maximum allowed
if (sub>SUB_MAX){
std::cout << "**** ERROR: Subsampling parameter is too high ****" << std::endl;
throw std::runtime_error("");
}
// Allocate and fill interpolation filter
double interpFilter[nInterpFilter];
for (int iFilter = 0; iFilter < hInterpFilter; iFilter++){
interpFilter[iFilter] = 1.0 - 1.0 * iFilter/host_sub;
interpFilter[iFilter+hInterpFilter] = 1.0 - interpFilter[iFilter];
interpFilter[iFilter] = interpFilter[iFilter] * (1.0 / sqrt(double(host_ntw)/double(host_nts)));
interpFilter[iFilter+hInterpFilter] = interpFilter[iFilter+hInterpFilter] * (1.0 / sqrt(double(host_ntw)/double(host_nts)));
}
/************************* COMPUTE COSINE DAMPING COEFFICIENTS **********************/
// Check that the minimum padding is smaller than the max allowed
if (minPad>PAD_MAX){
std::cout << "**** ERROR: Padding value is too high ****" << std::endl;
throw std::runtime_error("");
}
double cosDampingCoeff[minPad];
// Cosine padding
for (int iFilter=FAT; iFilter<FAT+minPad; iFilter++){
double arg = M_PI / (1.0 * minPad) * 1.0 * (minPad-iFilter+FAT);
arg = alphaCos + (1.0-alphaCos) * cos(arg);
cosDampingCoeff[iFilter-FAT] = arg;
}
// Check that the block size is consistent between parfile and "varDeclare.h"
if (blockSize != BLOCK_SIZE) {
std::cout << "**** ERROR: Block size for time stepper is not consistent with parfile ****" << std::endl;
throw std::runtime_error("");
}
/**************************** COPY TO CONSTANT MEMORY *******************************/
// Laplacian coefficients
cuda_call(hipMemcpyToSymbol(dev_zCoeff, zCoeff, COEFF_SIZE*sizeof(double), 0, hipMemcpyHostToDevice)); // Copy Laplacian coefficients to device
cuda_call(hipMemcpyToSymbol(dev_xCoeff, xCoeff, COEFF_SIZE*sizeof(double), 0, hipMemcpyHostToDevice));
// Time interpolation filter
cuda_call(hipMemcpyToSymbol(dev_nInterpFilter, &nInterpFilter, sizeof(int), 0, hipMemcpyHostToDevice)); // Filter length
cuda_call(hipMemcpyToSymbol(dev_hInterpFilter, &hInterpFilter, sizeof(int), 0, hipMemcpyHostToDevice)); // Filter half-length
cuda_call(hipMemcpyToSymbol(dev_interpFilter, interpFilter, nInterpFilter*sizeof(double), 0, hipMemcpyHostToDevice)); // Filter
// Cosine damping parameters
cuda_call(hipMemcpyToSymbol(dev_cosDampingCoeff, &cosDampingCoeff, minPad*sizeof(double), 0, hipMemcpyHostToDevice)); // Array for damping
cuda_call(hipMemcpyToSymbol(dev_alphaCos, &alphaCos, sizeof(double), 0, hipMemcpyHostToDevice)); // Coefficient in the damping formula
cuda_call(hipMemcpyToSymbol(dev_minPad, &minPad, sizeof(int), 0, hipMemcpyHostToDevice)); // min (zPadMinus, zPadPlus, xPadMinus, xPadPlus)
// FD parameters
cuda_call(hipMemcpyToSymbol(dev_nz, &nz, sizeof(int), 0, hipMemcpyHostToDevice)); // Copy model size to device
cuda_call(hipMemcpyToSymbol(dev_nx, &nx, sizeof(int), 0, hipMemcpyHostToDevice));
cuda_call(hipMemcpyToSymbol(dev_nts, &nts, sizeof(int), 0, hipMemcpyHostToDevice)); // Copy number of coarse time parameters to device
cuda_call(hipMemcpyToSymbol(dev_sub, &sub, sizeof(int), 0, hipMemcpyHostToDevice));
cuda_call(hipMemcpyToSymbol(dev_ntw, &host_ntw, sizeof(int), 0, hipMemcpyHostToDevice)); // Copy number of coarse time parameters to device
}
void allocateBornShotsGpu(double *vel2Dtw2, double *reflectivityScale, int iGpu, int iGpuId){
// Set GPU number
hipSetDevice(iGpuId);
// Reflectivity scale
cuda_call(hipMalloc((void**) &dev_vel2Dtw2[iGpu], host_nz*host_nx*sizeof(double))); // Allocate scaled velocity model on device
cuda_call(hipMemcpy(dev_vel2Dtw2[iGpu], vel2Dtw2, host_nz*host_nx*sizeof(double), hipMemcpyHostToDevice)); //
// Scaled velocity
cuda_call(hipMalloc((void**) &dev_reflectivityScale[iGpu], host_nz*host_nx*sizeof(double))); // Allocate scaling for reflectivity
cuda_call(hipMemcpy(dev_reflectivityScale[iGpu], reflectivityScale, host_nz*host_nx*sizeof(double), hipMemcpyHostToDevice)); //
// Allocate time slices
cuda_call(hipMalloc((void**) &dev_p0[iGpu], host_nz*host_nx*sizeof(double)));
cuda_call(hipMalloc((void**) &dev_p1[iGpu], host_nz*host_nx*sizeof(double)));
cuda_call(hipMalloc((void**) &dev_ssLeft[iGpu], host_nz*host_nx*sizeof(double)));
cuda_call(hipMalloc((void**) &dev_ssRight[iGpu], host_nz*host_nx*sizeof(double)));
// Allocate non-extended model
cuda_call(hipMalloc((void**) &dev_modelBorn[iGpu], host_nz*host_nx*sizeof(double)));
// Allocate source wavefield
cuda_call(hipMalloc((void**) &dev_BornSrcWavefield[iGpu], host_nz*host_nx*host_nts*sizeof(double))); // Allocate on device
}
void deallocateBornShotsGpu(int iGpu, int iGpuId){
// Set device number on GPU cluster
hipSetDevice(iGpuId);
// Deallocate all the shit
cuda_call(hipFree(dev_vel2Dtw2[iGpu]));
cuda_call(hipFree(dev_reflectivityScale[iGpu]));
cuda_call(hipFree(dev_p0[iGpu]));
cuda_call(hipFree(dev_p1[iGpu]));
cuda_call(hipFree(dev_ssLeft[iGpu]));
cuda_call(hipFree(dev_ssRight[iGpu]));
cuda_call(hipFree(dev_BornSrcWavefield[iGpu]));
cuda_call(hipFree(dev_modelBorn[iGpu]));
}
/******************************************************************************/
/****************************** Born forward **********************************/
/******************************************************************************/
/********************************** Normal ************************************/
void BornShotsFwdGpu(double *model, double *dataRegDts, double *sourcesSignals, int *sourcesPositionReg, int nSourcesReg, int *receiversPositionReg, int nReceiversReg, double *srcWavefieldDts, double *scatWavefieldDts, int iGpu, int iGpuId){
// We assume the source wavelet/signals already contain the second time derivative
// Set device number
hipSetDevice(iGpuId);
// Sources geometry
cuda_call(hipMemcpyToSymbol(dev_nSourcesReg, &nSourcesReg, sizeof(int), 0, hipMemcpyHostToDevice));
cuda_call(hipMalloc((void**) &dev_sourcesPositionReg[iGpu], nSourcesReg*sizeof(int)));
cuda_call(hipMemcpy(dev_sourcesPositionReg[iGpu], sourcesPositionReg, nSourcesReg*sizeof(int), hipMemcpyHostToDevice));
// Sources signals
cuda_call(hipMalloc((void**) &dev_sourcesSignals[iGpu], nSourcesReg*host_ntw*sizeof(double))); // Allocate sources signals on device
cuda_call(hipMemcpy(dev_sourcesSignals[iGpu], sourcesSignals, nSourcesReg*host_ntw*sizeof(double), hipMemcpyHostToDevice)); // Copy sources signals on device
// Receivers geometry
cuda_call(hipMemcpyToSymbol(dev_nReceiversReg, &nReceiversReg, sizeof(int), 0, hipMemcpyHostToDevice));
cuda_call(hipMalloc((void**) &dev_receiversPositionReg[iGpu], nReceiversReg*sizeof(int)));
cuda_call(hipMemcpy(dev_receiversPositionReg[iGpu], receiversPositionReg, nReceiversReg*sizeof(int), hipMemcpyHostToDevice));
// Initialize source wavefield on device
cuda_call(hipMemset(dev_BornSrcWavefield[iGpu], 0, host_nz*host_nx*host_nts*sizeof(double))); // Initialize wavefield on device
// Initialize time-slices for time-stepping
cuda_call(hipMemset(dev_p0[iGpu], 0, host_nz*host_nx*sizeof(double)));
cuda_call(hipMemset(dev_p1[iGpu], 0, host_nz*host_nx*sizeof(double)));
// Kernel parameters
int nblockx = (host_nz-2*FAT) / BLOCK_SIZE;
int nblocky = (host_nx-2*FAT) / BLOCK_SIZE;
int nblockData = (nReceiversReg+BLOCK_SIZE_DATA-1) / BLOCK_SIZE_DATA;
dim3 dimGrid(nblockx, nblocky);
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
/************************** Source wavefield computation ****************************/
for (int its = 0; its < host_nts-1; its++){
for (int it2 = 1; it2 < host_sub+1; it2++){
// Compute fine time-step index
int itw = its * host_sub + it2;
// Step forward
hipLaunchKernelGGL(( kernel_exec(stepFwdGpu), dim3(dimGrid), dim3(dimBlock), 0, 0, dev_p0[iGpu], dev_p1[iGpu], dev_p0[iGpu], dev_vel2Dtw2[iGpu]));
// Inject source
hipLaunchKernelGGL(( kernel_exec(injectSource), dim3(1), dim3(nSourcesReg), 0, 0, dev_sourcesSignals[iGpu], dev_p0[iGpu], itw-1, dev_sourcesPositionReg[iGpu]));
// Damp wavefields
hipLaunchKernelGGL(( kernel_exec(dampCosineEdge), dim3(dimGrid), dim3(dimBlock), 0, 0, dev_p0[iGpu], dev_p1[iGpu]));
// Extract wavefield
hipLaunchKernelGGL(( kernel_exec(interpWavefield), dim3(dimGrid), dim3(dimBlock), 0, 0, dev_BornSrcWavefield[iGpu], dev_p0[iGpu], its, it2));
// Switch pointers
dev_temp1[iGpu] = dev_p0[iGpu];
dev_p0[iGpu] = dev_p1[iGpu];
dev_p1[iGpu] = dev_temp1[iGpu];
dev_temp1[iGpu] = NULL;
}
}
/************************** Scattered wavefield computation *************************/
// Initialize time slices on device
cuda_call(hipMemset(dev_p0[iGpu], 0, host_nz*host_nx*sizeof(double)));
cuda_call(hipMemset(dev_p1[iGpu], 0, host_nz*host_nx*sizeof(double)));
cuda_call(hipMemset(dev_ssLeft[iGpu], 0, host_nz*host_nx*sizeof(double)));
cuda_call(hipMemset(dev_ssRight[iGpu], 0, host_nz*host_nx*sizeof(double)));
// Copy model to device
cuda_call(hipMemcpy(dev_modelBorn[iGpu], model, host_nz*host_nx*sizeof(double), hipMemcpyHostToDevice)); // Copy model (reflectivity) on device
// Allocate and initialize data
cuda_call(hipMalloc((void**) &dev_dataRegDts[iGpu], nReceiversReg*host_nts*sizeof(double))); // Allocate data at coarse time-sampling on device
cuda_call(hipMemset(dev_dataRegDts[iGpu], 0, nReceiversReg*host_nts*sizeof(double))); // Initialize data on device
// Apply both scalings to reflectivity: (1) 2.0*1/v^3 (2) v^2*dtw^2
hipLaunchKernelGGL(( kernel_exec(scaleReflectivity), dim3(dimGrid), dim3(dimBlock), 0, 0, dev_modelBorn[iGpu], dev_reflectivityScale[iGpu], dev_vel2Dtw2[iGpu]));
// Compute secondary source for first coarse time index (its = 0)
hipLaunchKernelGGL(( kernel_exec(imagingFwdGpu), dim3(dimGrid), dim3(dimBlock), 0, 0, dev_modelBorn[iGpu], dev_ssLeft[iGpu], 0, dev_BornSrcWavefield[iGpu]));
// Start propagating scattered wavefield
for (int its = 0; its < host_nts-1; its++){
// Compute secondary source for first coarse time index (its+1)
hipLaunchKernelGGL(( kernel_exec(imagingFwdGpu), dim3(dimGrid), dim3(dimBlock), 0, 0, dev_modelBorn[iGpu], dev_ssRight[iGpu], its+1, dev_BornSrcWavefield[iGpu]));
for (int it2 = 1; it2 < host_sub+1; it2++){
// Step forward
hipLaunchKernelGGL(( kernel_exec(stepFwdGpu), dim3(dimGrid), dim3(dimBlock), 0, 0, dev_p0[iGpu], dev_p1[iGpu], dev_p0[iGpu], dev_vel2Dtw2[iGpu]));
// Inject secondary source sample itw-1
hipLaunchKernelGGL(( kernel_exec(injectSecondarySource), dim3(dimGrid), dim3(dimBlock), 0, 0, dev_ssLeft[iGpu], dev_ssRight[iGpu], dev_p0[iGpu], it2-1));
// Damp wavefields
hipLaunchKernelGGL(( kernel_exec(dampCosineEdge), dim3(dimGrid), dim3(dimBlock), 0, 0, dev_p0[iGpu], dev_p1[iGpu]));
// Extract data
hipLaunchKernelGGL(( kernel_exec(recordInterpData), dim3(nblockData), dim3(BLOCK_SIZE_DATA), 0, 0, dev_p0[iGpu], dev_dataRegDts[iGpu], its, it2, dev_receiversPositionReg[iGpu]));
// Switch pointers
dev_temp1[iGpu] = dev_p0[iGpu];
dev_p0[iGpu] = dev_p1[iGpu];
dev_p1[iGpu] = dev_temp1[iGpu];
dev_temp1[iGpu] = NULL;
}
// Switch pointers for secondary source
dev_ssTemp1[iGpu] = dev_ssLeft[iGpu];
dev_ssLeft[iGpu] = dev_ssRight[iGpu];
dev_ssRight[iGpu] = dev_ssTemp1[iGpu];
cuda_call(hipMemset(dev_ssRight[iGpu], 0, host_nz*host_nx*sizeof(double)));
}
// Copy data back to host
cuda_call(hipMemcpy(dataRegDts, dev_dataRegDts[iGpu], nReceiversReg*host_nts*sizeof(double), hipMemcpyDeviceToHost));
/******************************* Deallocation ***************************************/
// Deallocate all slices
cuda_call(hipFree(dev_dataRegDts[iGpu]));
cuda_call(hipFree(dev_sourcesPositionReg[iGpu]));
cuda_call(hipFree(dev_sourcesSignals[iGpu]));
cuda_call(hipFree(dev_receiversPositionReg[iGpu]));
}
void BornShotsFwdGpuWavefield(double *model, double *dataRegDts, double *sourcesSignals, int *sourcesPositionReg, int nSourcesReg, int *receiversPositionReg, int nReceiversReg, double *srcWavefieldDts, double *scatWavefieldDts, int iGpu, int iGpuId){
// Non-extended Born modeling operator (FORWARD)
// The source wavelet/signals already contain the second time derivative
// Set device number
hipSetDevice(iGpuId);
// Sources geometry
cuda_call(hipMemcpyToSymbol(dev_nSourcesReg, &nSourcesReg, sizeof(int), 0, hipMemcpyHostToDevice));
cuda_call(hipMalloc((void**) &dev_sourcesPositionReg[iGpu], nSourcesReg*sizeof(int)));
cuda_call(hipMemcpy(dev_sourcesPositionReg[iGpu], sourcesPositionReg, nSourcesReg*sizeof(int), hipMemcpyHostToDevice));
// Sources signals
cuda_call(hipMalloc((void**) &dev_sourcesSignals[iGpu], nSourcesReg*host_ntw*sizeof(double))); // Allocate sources signals on device
cuda_call(hipMemcpy(dev_sourcesSignals[iGpu], sourcesSignals, nSourcesReg*host_ntw*sizeof(double), hipMemcpyHostToDevice)); // Copy sources signals on device
// Receivers geometry
cuda_call(hipMemcpyToSymbol(dev_nReceiversReg, &nReceiversReg, sizeof(int), 0, hipMemcpyHostToDevice));
cuda_call(hipMalloc((void**) &dev_receiversPositionReg[iGpu], nReceiversReg*sizeof(int)));
cuda_call(hipMemcpy(dev_receiversPositionReg[iGpu], receiversPositionReg, nReceiversReg*sizeof(int), hipMemcpyHostToDevice));
// Allocate and initialize data
cuda_call(hipMalloc((void**) &dev_dataRegDts[iGpu], nReceiversReg*host_nts*sizeof(double))); // Allocate data at coarse time-sampling on device
cuda_call(hipMemset(dev_dataRegDts[iGpu], 0, nReceiversReg*host_nts*sizeof(double))); // Initialize data on device
// Initialize source wavefield on device
cuda_call(hipMemset(dev_BornSrcWavefield[iGpu], 0, host_nz*host_nx*host_nts*sizeof(double))); // Initialize wavefield on device
// Initialize time-slices for time-stepping
cuda_call(hipMemset(dev_p0[iGpu], 0, host_nz*host_nx*sizeof(double)));
cuda_call(hipMemset(dev_p1[iGpu], 0, host_nz*host_nx*sizeof(double)));
// Kernel parameters
int nblockx = (host_nz-2*FAT) / BLOCK_SIZE;
int nblocky = (host_nx-2*FAT) / BLOCK_SIZE;
int nblockData = (nReceiversReg+BLOCK_SIZE_DATA-1) / BLOCK_SIZE_DATA;
dim3 dimGrid(nblockx, nblocky);
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
/************************** Source wavefield computation ****************************/
for (int its = 0; its < host_nts-1; its++){
for (int it2 = 1; it2 < host_sub+1; it2++){
// Compute fine time-step index
int itw = its * host_sub + it2;
// Step forward
hipLaunchKernelGGL(( kernel_exec(stepFwdGpu), dim3(dimGrid), dim3(dimBlock), 0, 0, dev_p0[iGpu], dev_p1[iGpu], dev_p0[iGpu], dev_vel2Dtw2[iGpu]));
// Inject source
hipLaunchKernelGGL(( kernel_exec(injectSource), dim3(1), dim3(nSourcesReg), 0, 0, dev_sourcesSignals[iGpu], dev_p0[iGpu], itw-1, dev_sourcesPositionReg[iGpu]));
// Damp wavefields
hipLaunchKernelGGL(( kernel_exec(dampCosineEdge), dim3(dimGrid), dim3(dimBlock), 0, 0, dev_p0[iGpu], dev_p1[iGpu]));
// Extract wavefield
hipLaunchKernelGGL(( kernel_exec(interpWavefield), dim3(dimGrid), dim3(dimBlock), 0, 0, dev_BornSrcWavefield[iGpu], dev_p0[iGpu], its, it2));
// Switch pointers
dev_temp1[iGpu] = dev_p0[iGpu];
dev_p0[iGpu] = dev_p1[iGpu];
dev_p1[iGpu] = dev_temp1[iGpu];
dev_temp1[iGpu] = NULL;
}
}
// Copy wavefield back to host
cuda_call(hipMemcpy(srcWavefieldDts, dev_BornSrcWavefield[iGpu], host_nz*host_nx*host_nts*sizeof(double), hipMemcpyDeviceToHost));
/************************** Scattered wavefield computation *************************/
// Initialize time slices on device
cuda_call(hipMemset(dev_p0[iGpu], 0, host_nz*host_nx*sizeof(double)));
cuda_call(hipMemset(dev_p1[iGpu], 0, host_nz*host_nx*sizeof(double)));
cuda_call(hipMemset(dev_ssLeft[iGpu], 0, host_nz*host_nx*sizeof(double)));
cuda_call(hipMemset(dev_ssRight[iGpu], 0, host_nz*host_nx*sizeof(double)));
// Allocate and copy model
cuda_call(hipMemcpy(dev_modelBorn[iGpu], model, host_nz*host_nx*sizeof(double), hipMemcpyHostToDevice)); // Copy model (reflectivity) on device
// Allocate and initialize scattered wavefield on device
cuda_call(hipMalloc((void**) &dev_BornSecWavefield, host_nz*host_nx*host_nts*sizeof(double))); // Allocate on device
cuda_call(hipMemset(dev_BornSecWavefield, 0, host_nz*host_nx*host_nts*sizeof(double))); // Initialize wavefield on device
// Apply both scalings to reflectivity:
hipLaunchKernelGGL(( kernel_exec(scaleReflectivity), dim3(dimGrid), dim3(dimBlock), 0, 0, dev_modelBorn[iGpu], dev_reflectivityScale[iGpu], dev_vel2Dtw2[iGpu]));
// Compute secondary source for first coarse time index (its = 0)
hipLaunchKernelGGL(( kernel_exec(imagingFwdGpu), dim3(dimGrid), dim3(dimBlock), 0, 0, dev_modelBorn[iGpu], dev_ssLeft[iGpu], 0, dev_BornSrcWavefield[iGpu]));
// Start propagating scattered wavefield
for (int its = 0; its < host_nts-1; its++){
// Compute secondary source for first coarse time index (its+1)
hipLaunchKernelGGL(( kernel_exec(imagingFwdGpu), dim3(dimGrid), dim3(dimBlock), 0, 0, dev_modelBorn[iGpu], dev_ssRight[iGpu], its+1, dev_BornSrcWavefield[iGpu]));
for (int it2 = 1; it2 < host_sub+1; it2++){
// Step forward
hipLaunchKernelGGL(( kernel_exec(stepFwdGpu), dim3(dimGrid), dim3(dimBlock), 0, 0, dev_p0[iGpu], dev_p1[iGpu], dev_p0[iGpu], dev_vel2Dtw2[iGpu]));
// Inject secondary source sample itw-1
hipLaunchKernelGGL(( kernel_exec(injectSecondarySource), dim3(dimGrid), dim3(dimBlock), 0, 0, dev_ssLeft[iGpu], dev_ssRight[iGpu], dev_p0[iGpu], it2-1));
// Damp wavefields
hipLaunchKernelGGL(( kernel_exec(dampCosineEdge), dim3(dimGrid), dim3(dimBlock), 0, 0, dev_p0[iGpu], dev_p1[iGpu]));
// Record wavefield
hipLaunchKernelGGL(( kernel_exec(interpWavefield), dim3(dimGrid), dim3(dimBlock), 0, 0, dev_BornSecWavefield, dev_p0[iGpu], its, it2));
// Extract data
hipLaunchKernelGGL(( kernel_exec(recordInterpData), dim3(nblockData), dim3(BLOCK_SIZE_DATA), 0, 0, dev_p0[iGpu], dev_dataRegDts[iGpu], its, it2, dev_receiversPositionReg[iGpu]));
// Switch pointers
dev_temp1[iGpu] = dev_p0[iGpu];
dev_p0[iGpu] = dev_p1[iGpu];
dev_p1[iGpu] = dev_temp1[iGpu];
dev_temp1[iGpu] = NULL;
}
// Switch pointers for secondary source
dev_ssTemp1[iGpu] = dev_ssLeft[iGpu];
dev_ssLeft[iGpu] = dev_ssRight[iGpu];
dev_ssRight[iGpu] = dev_ssTemp1[iGpu];
cuda_call(hipMemset(dev_ssRight[iGpu], 0, host_nz*host_nx*sizeof(double)));
}
// Copy data back to host
cuda_call(hipMemcpy(dataRegDts, dev_dataRegDts[iGpu], nReceiversReg*host_nts*sizeof(double), hipMemcpyDeviceToHost));
// Copy scattered wavefield back to host
cuda_call(hipMemcpy(scatWavefieldDts, dev_BornSecWavefield, host_nz*host_nx*host_nts*sizeof(double), hipMemcpyDeviceToHost));
/******************************* Deallocation ***************************************/
// Deallocate all slices
cuda_call(hipFree(dev_dataRegDts[iGpu]));
cuda_call(hipFree(dev_sourcesPositionReg[iGpu]));
cuda_call(hipFree(dev_sourcesSignals[iGpu]));
cuda_call(hipFree(dev_receiversPositionReg[iGpu]));
cuda_call(hipFree(dev_BornSecWavefield));
}
/****************************** Free surface **********************************/
void BornShotsFwdFsGpu(double *model, double *dataRegDts, double *sourcesSignals, int *sourcesPositionReg, int nSourcesReg, int *receiversPositionReg, int nReceiversReg, double *srcWavefieldDts, double *scatWavefieldDts, int iGpu, int iGpuId){
// We assume the source wavelet/signals already contain the second time derivative
// Set device number
hipSetDevice(iGpuId);
// Sources geometry
cuda_call(hipMemcpyToSymbol(dev_nSourcesReg, &nSourcesReg, sizeof(int), 0, hipMemcpyHostToDevice));
cuda_call(hipMalloc((void**) &dev_sourcesPositionReg[iGpu], nSourcesReg*sizeof(int)));
cuda_call(hipMemcpy(dev_sourcesPositionReg[iGpu], sourcesPositionReg, nSourcesReg*sizeof(int), hipMemcpyHostToDevice));
// Sources signals
cuda_call(hipMalloc((void**) &dev_sourcesSignals[iGpu], nSourcesReg*host_ntw*sizeof(double))); // Allocate sources signals on device
cuda_call(hipMemcpy(dev_sourcesSignals[iGpu], sourcesSignals, nSourcesReg*host_ntw*sizeof(double), hipMemcpyHostToDevice)); // Copy sources signals on device
// Receivers geometry
cuda_call(hipMemcpyToSymbol(dev_nReceiversReg, &nReceiversReg, sizeof(int), 0, hipMemcpyHostToDevice));
cuda_call(hipMalloc((void**) &dev_receiversPositionReg[iGpu], nReceiversReg*sizeof(int)));
cuda_call(hipMemcpy(dev_receiversPositionReg[iGpu], receiversPositionReg, nReceiversReg*sizeof(int), hipMemcpyHostToDevice));
// Initialize source wavefield on device
cuda_call(hipMemset(dev_BornSrcWavefield[iGpu], 0, host_nz*host_nx*host_nts*sizeof(double))); // Initialize wavefield on device
// Initialize time-slices for time-stepping
cuda_call(hipMemset(dev_p0[iGpu], 0, host_nz*host_nx*sizeof(double)));
cuda_call(hipMemset(dev_p1[iGpu], 0, host_nz*host_nx*sizeof(double)));
// Kernel parameters
int nblockx = (host_nz-2*FAT) / BLOCK_SIZE;
int nblocky = (host_nx-2*FAT) / BLOCK_SIZE;
int nblockData = (nReceiversReg+BLOCK_SIZE_DATA-1) / BLOCK_SIZE_DATA;
dim3 dimGrid(nblockx, nblocky);
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
/************************** Source wavefield computation ****************************/
for (int its = 0; its < host_nts-1; its++){
for (int it2 = 1; it2 < host_sub+1; it2++){
// Compute fine time-step index
int itw = its * host_sub + it2;
// Apply free surface condition for Laplacian
hipLaunchKernelGGL(( kernel_exec(setFsConditionFwdGpu), dim3(nblocky), dim3(BLOCK_SIZE), 0, 0, dev_p1[iGpu]));
// Step forward
hipLaunchKernelGGL(( kernel_exec(stepFwdGpu), dim3(dimGrid), dim3(dimBlock), 0, 0, dev_p0[iGpu], dev_p1[iGpu], dev_p0[iGpu], dev_vel2Dtw2[iGpu]));
// Inject source
hipLaunchKernelGGL(( kernel_exec(injectSource), dim3(1), dim3(nSourcesReg), 0, 0, dev_sourcesSignals[iGpu], dev_p0[iGpu], itw-1, dev_sourcesPositionReg[iGpu]));
// Damp wavefields
hipLaunchKernelGGL(( kernel_exec(dampCosineEdgeFs), dim3(dimGrid), dim3(dimBlock), 0, 0, dev_p0[iGpu], dev_p1[iGpu]));
// Extract wavefield
hipLaunchKernelGGL(( kernel_exec(interpWavefield), dim3(dimGrid), dim3(dimBlock), 0, 0, dev_BornSrcWavefield[iGpu], dev_p0[iGpu], its, it2));
// Switch pointers
dev_temp1[iGpu] = dev_p0[iGpu];
dev_p0[iGpu] = dev_p1[iGpu];
dev_p1[iGpu] = dev_temp1[iGpu];
dev_temp1[iGpu] = NULL;
}
}
/************************** Scattered wavefield computation *************************/
// Initialize time slices on device
cuda_call(hipMemset(dev_p0[iGpu], 0, host_nz*host_nx*sizeof(double)));
cuda_call(hipMemset(dev_p1[iGpu], 0, host_nz*host_nx*sizeof(double)));
cuda_call(hipMemset(dev_ssLeft[iGpu], 0, host_nz*host_nx*sizeof(double)));
cuda_call(hipMemset(dev_ssRight[iGpu], 0, host_nz*host_nx*sizeof(double)));
// Copy model to device
cuda_call(hipMemcpy(dev_modelBorn[iGpu], model, host_nz*host_nx*sizeof(double), hipMemcpyHostToDevice)); // Copy model (reflectivity) on device
// Allocate and initialize data
cuda_call(hipMalloc((void**) &dev_dataRegDts[iGpu], nReceiversReg*host_nts*sizeof(double))); // Allocate data at coarse time-sampling on device
cuda_call(hipMemset(dev_dataRegDts[iGpu], 0, nReceiversReg*host_nts*sizeof(double))); // Initialize data on device
// Apply both scalings to reflectivity: (1) 2.0*1/v^3 (2) v^2*dtw^2
hipLaunchKernelGGL(( kernel_exec(scaleReflectivity), dim3(dimGrid), dim3(dimBlock), 0, 0, dev_modelBorn[iGpu], dev_reflectivityScale[iGpu], dev_vel2Dtw2[iGpu]));
// Compute secondary source for first coarse time index (its = 0)
hipLaunchKernelGGL(( kernel_exec(imagingFwdGpu), dim3(dimGrid), dim3(dimBlock), 0, 0, dev_modelBorn[iGpu], dev_ssLeft[iGpu], 0, dev_BornSrcWavefield[iGpu]));
// Start propagating scattered wavefield
for (int its = 0; its < host_nts-1; its++){
// Compute secondary source for first coarse time index (its+1)
hipLaunchKernelGGL(( kernel_exec(imagingFwdGpu), dim3(dimGrid), dim3(dimBlock), 0, 0, dev_modelBorn[iGpu], dev_ssRight[iGpu], its+1, dev_BornSrcWavefield[iGpu]));
for (int it2 = 1; it2 < host_sub+1; it2++){
// Apply free surface condition for Laplacian
hipLaunchKernelGGL(( kernel_exec(setFsConditionFwdGpu), dim3(nblocky), dim3(BLOCK_SIZE), 0, 0, dev_p1[iGpu]));
// Step forward
hipLaunchKernelGGL(( kernel_exec(stepFwdGpu), dim3(dimGrid), dim3(dimBlock), 0, 0, dev_p0[iGpu], dev_p1[iGpu], dev_p0[iGpu], dev_vel2Dtw2[iGpu]));
// Inject secondary source sample itw-1
hipLaunchKernelGGL(( kernel_exec(injectSecondarySource), dim3(dimGrid), dim3(dimBlock), 0, 0, dev_ssLeft[iGpu], dev_ssRight[iGpu], dev_p0[iGpu], it2-1));
// Damp wavefields
hipLaunchKernelGGL(( kernel_exec(dampCosineEdgeFs), dim3(dimGrid), dim3(dimBlock), 0, 0, dev_p0[iGpu], dev_p1[iGpu]));
// Extract data
hipLaunchKernelGGL(( kernel_exec(recordInterpData), dim3(nblockData), dim3(BLOCK_SIZE_DATA), 0, 0, dev_p0[iGpu], dev_dataRegDts[iGpu], its, it2, dev_receiversPositionReg[iGpu]));
// Switch pointers
dev_temp1[iGpu] = dev_p0[iGpu];
dev_p0[iGpu] = dev_p1[iGpu];
dev_p1[iGpu] = dev_temp1[iGpu];
dev_temp1[iGpu] = NULL;
}
// Switch pointers for secondary source
dev_ssTemp1[iGpu] = dev_ssLeft[iGpu];
dev_ssLeft[iGpu] = dev_ssRight[iGpu];
dev_ssRight[iGpu] = dev_ssTemp1[iGpu];
cuda_call(hipMemset(dev_ssRight[iGpu], 0, host_nz*host_nx*sizeof(double)));
}
// Copy data back to host
cuda_call(hipMemcpy(dataRegDts, dev_dataRegDts[iGpu], nReceiversReg*host_nts*sizeof(double), hipMemcpyDeviceToHost));
/******************************* Deallocation ***************************************/
// Deallocate all slices
cuda_call(hipFree(dev_dataRegDts[iGpu]));
cuda_call(hipFree(dev_sourcesPositionReg[iGpu]));
cuda_call(hipFree(dev_sourcesSignals[iGpu]));
cuda_call(hipFree(dev_receiversPositionReg[iGpu]));
}
void BornShotsFwdFsGpuWavefield(double *model, double *dataRegDts, double *sourcesSignals, int *sourcesPositionReg, int nSourcesReg, int *receiversPositionReg, int nReceiversReg, double *srcWavefieldDts, double *scatWavefieldDts, int iGpu, int iGpuId){
// Non-extended Born modeling operator (FORWARD)
// The source wavelet/signals already contain the second time derivative
// Set device number
hipSetDevice(iGpuId);
// Sources geometry
cuda_call(hipMemcpyToSymbol(dev_nSourcesReg, &nSourcesReg, sizeof(int), 0, hipMemcpyHostToDevice));
cuda_call(hipMalloc((void**) &dev_sourcesPositionReg[iGpu], nSourcesReg*sizeof(int)));
cuda_call(hipMemcpy(dev_sourcesPositionReg[iGpu], sourcesPositionReg, nSourcesReg*sizeof(int), hipMemcpyHostToDevice));
// Sources signals
cuda_call(hipMalloc((void**) &dev_sourcesSignals[iGpu], nSourcesReg*host_ntw*sizeof(double))); // Allocate sources signals on device
cuda_call(hipMemcpy(dev_sourcesSignals[iGpu], sourcesSignals, nSourcesReg*host_ntw*sizeof(double), hipMemcpyHostToDevice)); // Copy sources signals on device
// Receivers geometry
cuda_call(hipMemcpyToSymbol(dev_nReceiversReg, &nReceiversReg, sizeof(int), 0, hipMemcpyHostToDevice));
cuda_call(hipMalloc((void**) &dev_receiversPositionReg[iGpu], nReceiversReg*sizeof(int)));
cuda_call(hipMemcpy(dev_receiversPositionReg[iGpu], receiversPositionReg, nReceiversReg*sizeof(int), hipMemcpyHostToDevice));
// Allocate and initialize data
cuda_call(hipMalloc((void**) &dev_dataRegDts[iGpu], nReceiversReg*host_nts*sizeof(double))); // Allocate data at coarse time-sampling on device
cuda_call(hipMemset(dev_dataRegDts[iGpu], 0, nReceiversReg*host_nts*sizeof(double))); // Initialize data on device
// Initialize source wavefield on device
cuda_call(hipMemset(dev_BornSrcWavefield[iGpu], 0, host_nz*host_nx*host_nts*sizeof(double))); // Initialize wavefield on device
// Initialize time-slices for time-stepping
cuda_call(hipMemset(dev_p0[iGpu], 0, host_nz*host_nx*sizeof(double)));
cuda_call(hipMemset(dev_p1[iGpu], 0, host_nz*host_nx*sizeof(double)));
// Kernel parameters
int nblockx = (host_nz-2*FAT) / BLOCK_SIZE;
int nblocky = (host_nx-2*FAT) / BLOCK_SIZE;
int nblockData = (nReceiversReg+BLOCK_SIZE_DATA-1) / BLOCK_SIZE_DATA;
dim3 dimGrid(nblockx, nblocky);
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
/************************** Source wavefield computation ****************************/
for (int its = 0; its < host_nts-1; its++){
for (int it2 = 1; it2 < host_sub+1; it2++){
// Compute fine time-step index
int itw = its * host_sub + it2;
// Apply free surface condition for Laplacian
hipLaunchKernelGGL(( kernel_exec(setFsConditionFwdGpu), dim3(nblocky), dim3(BLOCK_SIZE), 0, 0, dev_p1[iGpu]));
// Step forward
hipLaunchKernelGGL(( kernel_exec(stepFwdGpu), dim3(dimGrid), dim3(dimBlock), 0, 0, dev_p0[iGpu], dev_p1[iGpu], dev_p0[iGpu], dev_vel2Dtw2[iGpu]));
// Inject source
hipLaunchKernelGGL(( kernel_exec(injectSource), dim3(1), dim3(nSourcesReg), 0, 0, dev_sourcesSignals[iGpu], dev_p0[iGpu], itw-1, dev_sourcesPositionReg[iGpu]));
// Damp wavefields
hipLaunchKernelGGL(( kernel_exec(dampCosineEdgeFs), dim3(dimGrid), dim3(dimBlock), 0, 0, dev_p0[iGpu], dev_p1[iGpu]));
// Extract wavefield
hipLaunchKernelGGL(( kernel_exec(interpWavefield), dim3(dimGrid), dim3(dimBlock), 0, 0, dev_BornSrcWavefield[iGpu], dev_p0[iGpu], its, it2));
// Switch pointers
dev_temp1[iGpu] = dev_p0[iGpu];
dev_p0[iGpu] = dev_p1[iGpu];
dev_p1[iGpu] = dev_temp1[iGpu];
dev_temp1[iGpu] = NULL;
}
}
// Copy wavefield back to host
cuda_call(hipMemcpy(srcWavefieldDts, dev_BornSrcWavefield[iGpu], host_nz*host_nx*host_nts*sizeof(double), hipMemcpyDeviceToHost));
/************************** Scattered wavefield computation *************************/
// Initialize time slices on device
cuda_call(hipMemset(dev_p0[iGpu], 0, host_nz*host_nx*sizeof(double)));
cuda_call(hipMemset(dev_p1[iGpu], 0, host_nz*host_nx*sizeof(double)));
cuda_call(hipMemset(dev_ssLeft[iGpu], 0, host_nz*host_nx*sizeof(double)));
cuda_call(hipMemset(dev_ssRight[iGpu], 0, host_nz*host_nx*sizeof(double)));
// Allocate and copy model
cuda_call(hipMemcpy(dev_modelBorn[iGpu], model, host_nz*host_nx*sizeof(double), hipMemcpyHostToDevice)); // Copy model (reflectivity) on device
// Allocate and initialize scattered wavefield on device
cuda_call(hipMalloc((void**) &dev_BornSecWavefield, host_nz*host_nx*host_nts*sizeof(double))); // Allocate on device
cuda_call(hipMemset(dev_BornSecWavefield, 0, host_nz*host_nx*host_nts*sizeof(double))); // Initialize wavefield on device
// Apply both scalings to reflectivity:
hipLaunchKernelGGL(( kernel_exec(scaleReflectivity), dim3(dimGrid), dim3(dimBlock), 0, 0, dev_modelBorn[iGpu], dev_reflectivityScale[iGpu], dev_vel2Dtw2[iGpu]));
// Compute secondary source for first coarse time index (its = 0)
hipLaunchKernelGGL(( kernel_exec(imagingFwdGpu), dim3(dimGrid), dim3(dimBlock), 0, 0, dev_modelBorn[iGpu], dev_ssLeft[iGpu], 0, dev_BornSrcWavefield[iGpu]));
// Start propagating scattered wavefield
for (int its = 0; its < host_nts-1; its++){
// Compute secondary source for first coarse time index (its+1)
hipLaunchKernelGGL(( kernel_exec(imagingFwdGpu), dim3(dimGrid), dim3(dimBlock), 0, 0, dev_modelBorn[iGpu], dev_ssRight[iGpu], its+1, dev_BornSrcWavefield[iGpu]));
for (int it2 = 1; it2 < host_sub+1; it2++){
// Apply free surface condition for Laplacian
hipLaunchKernelGGL(( kernel_exec(setFsConditionFwdGpu), dim3(nblocky), dim3(BLOCK_SIZE), 0, 0, dev_p1[iGpu]));
// Step forward
hipLaunchKernelGGL(( kernel_exec(stepFwdGpu), dim3(dimGrid), dim3(dimBlock), 0, 0, dev_p0[iGpu], dev_p1[iGpu], dev_p0[iGpu], dev_vel2Dtw2[iGpu]));
// Inject secondary source sample itw-1
hipLaunchKernelGGL(( kernel_exec(injectSecondarySource), dim3(dimGrid), dim3(dimBlock), 0, 0, dev_ssLeft[iGpu], dev_ssRight[iGpu], dev_p0[iGpu], it2-1));
// Damp wavefields
hipLaunchKernelGGL(( kernel_exec(dampCosineEdgeFs), dim3(dimGrid), dim3(dimBlock), 0, 0, dev_p0[iGpu], dev_p1[iGpu]));
// Record wavefield
hipLaunchKernelGGL(( kernel_exec(interpWavefield), dim3(dimGrid), dim3(dimBlock), 0, 0, dev_BornSecWavefield, dev_p0[iGpu], its, it2));
// Extract data
hipLaunchKernelGGL(( kernel_exec(recordInterpData), dim3(nblockData), dim3(BLOCK_SIZE_DATA), 0, 0, dev_p0[iGpu], dev_dataRegDts[iGpu], its, it2, dev_receiversPositionReg[iGpu]));
// Switch pointers
dev_temp1[iGpu] = dev_p0[iGpu];
dev_p0[iGpu] = dev_p1[iGpu];
dev_p1[iGpu] = dev_temp1[iGpu];
dev_temp1[iGpu] = NULL;
}
// Switch pointers for secondary source
dev_ssTemp1[iGpu] = dev_ssLeft[iGpu];
dev_ssLeft[iGpu] = dev_ssRight[iGpu];
dev_ssRight[iGpu] = dev_ssTemp1[iGpu];
cuda_call(hipMemset(dev_ssRight[iGpu], 0, host_nz*host_nx*sizeof(double)));
}
// Copy data back to host
cuda_call(hipMemcpy(dataRegDts, dev_dataRegDts[iGpu], nReceiversReg*host_nts*sizeof(double), hipMemcpyDeviceToHost));
// Copy scattered wavefield back to host
cuda_call(hipMemcpy(scatWavefieldDts, dev_BornSecWavefield, host_nz*host_nx*host_nts*sizeof(double), hipMemcpyDeviceToHost));
/******************************* Deallocation ***************************************/
// Deallocate all slices
cuda_call(hipFree(dev_dataRegDts[iGpu]));
cuda_call(hipFree(dev_sourcesPositionReg[iGpu]));
cuda_call(hipFree(dev_sourcesSignals[iGpu]));
cuda_call(hipFree(dev_receiversPositionReg[iGpu]));
cuda_call(hipFree(dev_BornSecWavefield));
}
/******************************************************************************/
/****************************** Born adjoint **********************************/
/******************************************************************************/
/********************************** Normal ************************************/
void BornShotsAdjGpu(double *model, double *dataRegDts, double *sourcesSignals, int *sourcesPositionReg, int nSourcesReg, int *receiversPositionReg, int nReceiversReg, double *srcWavefieldDts, double *recWavefieldDts, int iGpu, int iGpuId){
// Non-extended Born modeling operator (ADJOINT)
// We assume the source wavelet/signals already contain the second time derivative
// Set device number
hipSetDevice(iGpuId);
// Sources geometry
cuda_call(hipMemcpyToSymbol(dev_nSourcesReg, &nSourcesReg, sizeof(int), 0, hipMemcpyHostToDevice));
cuda_call(hipMalloc((void**) &dev_sourcesPositionReg[iGpu], nSourcesReg*sizeof(int)));
cuda_call(hipMemcpy(dev_sourcesPositionReg[iGpu], sourcesPositionReg, nSourcesReg*sizeof(int), hipMemcpyHostToDevice));
// Sources signals
cuda_call(hipMalloc((void**) &dev_sourcesSignals[iGpu], nSourcesReg*host_ntw*sizeof(double))); // Allocate sources signals on device
cuda_call(hipMemcpy(dev_sourcesSignals[iGpu], sourcesSignals, nSourcesReg*host_ntw*sizeof(double), hipMemcpyHostToDevice)); // Copy sources signals on device
// Receivers geometry
cuda_call(hipMemcpyToSymbol(dev_nReceiversReg, &nReceiversReg, sizeof(int), 0, hipMemcpyHostToDevice));
cuda_call(hipMalloc((void**) &dev_receiversPositionReg[iGpu], nReceiversReg*sizeof(int)));
cuda_call(hipMemcpy(dev_receiversPositionReg[iGpu], receiversPositionReg, nReceiversReg*sizeof(int), hipMemcpyHostToDevice));
// Initialize source wavefield on device
cuda_call(hipMemset(dev_BornSrcWavefield[iGpu], 0, host_nz*host_nx*host_nts*sizeof(double))); // Initialize wavefield on device
// Initialize time-slices for time-stepping
cuda_call(hipMemset(dev_p0[iGpu], 0, host_nz*host_nx*sizeof(double)));
cuda_call(hipMemset(dev_p1[iGpu], 0, host_nz*host_nx*sizeof(double)));
// Kernel parameters
int nblockx = (host_nz-2*FAT) / BLOCK_SIZE;
int nblocky = (host_nx-2*FAT) / BLOCK_SIZE;
int nblockData = (nReceiversReg+BLOCK_SIZE_DATA-1) / BLOCK_SIZE_DATA;
dim3 dimGrid(nblockx, nblocky);
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
/************************** Source wavefield computation ****************************/
for (int its = 0; its < host_nts-1; its++){
for (int it2 = 1; it2 < host_sub+1; it2++){
// Compute fine time-step index
int itw = its * host_sub + it2;
// Step forward
hipLaunchKernelGGL(( kernel_exec(stepFwdGpu), dim3(dimGrid), dim3(dimBlock), 0, 0, dev_p0[iGpu], dev_p1[iGpu], dev_p0[iGpu], dev_vel2Dtw2[iGpu]));
// Inject source
hipLaunchKernelGGL(( kernel_exec(injectSource), dim3(1), dim3(nSourcesReg), 0, 0, dev_sourcesSignals[iGpu], dev_p0[iGpu], itw-1, dev_sourcesPositionReg[iGpu]));
// Damp wavefields
hipLaunchKernelGGL(( kernel_exec(dampCosineEdge), dim3(dimGrid), dim3(dimBlock), 0, 0, dev_p0[iGpu], dev_p1[iGpu]));
// Extract wavefield
hipLaunchKernelGGL(( kernel_exec(interpWavefield), dim3(dimGrid), dim3(dimBlock), 0, 0, dev_BornSrcWavefield[iGpu], dev_p0[iGpu], its, it2));
// Switch pointers
dev_temp1[iGpu] = dev_p0[iGpu];
dev_p0[iGpu] = dev_p1[iGpu];
dev_p1[iGpu] = dev_temp1[iGpu];
dev_temp1[iGpu] = NULL;
}
}
/************************** Receiver wavefield computation **************************/
// Initialize time slices on device
cuda_call(hipMemset(dev_ssLeft[iGpu], 0, host_nz*host_nx*sizeof(double)));
cuda_call(hipMemset(dev_ssRight[iGpu], 0, host_nz*host_nx*sizeof(double)));
cuda_call(hipMemset(dev_p0[iGpu], 0, host_nz*host_nx*sizeof(double)));
cuda_call(hipMemset(dev_p1[iGpu], 0, host_nz*host_nx*sizeof(double)));
// Model
cuda_call(hipMemset(dev_modelBorn[iGpu], 0, host_nz*host_nx*sizeof(double))); // Initialize model on device
// Data
cuda_call(hipMalloc((void**) &dev_dataRegDts[iGpu], nReceiversReg*host_nts*sizeof(double))); // Allocate data on device
cuda_call(hipMemcpy(dev_dataRegDts[iGpu], dataRegDts, nReceiversReg*host_nts*sizeof(double), hipMemcpyHostToDevice)); // Copy data on device
// Main loop
for (int its = host_nts-2; its > -1; its--){
for (int it2 = host_sub-1; it2 > -1; it2--){
// Step adjoint in time
hipLaunchKernelGGL(( kernel_exec(stepAdjGpu), dim3(dimGrid), dim3(dimBlock), 0, 0, dev_p0[iGpu], dev_p1[iGpu], dev_p0[iGpu], dev_vel2Dtw2[iGpu]));
// Inject data
hipLaunchKernelGGL(( kernel_exec(interpInjectData), dim3(nblockData), dim3(BLOCK_SIZE_DATA), 0, 0, dev_dataRegDts[iGpu], dev_p0[iGpu], its, it2, dev_receiversPositionReg[iGpu]));
// Damp wavefield
hipLaunchKernelGGL(( kernel_exec(dampCosineEdge), dim3(dimGrid), dim3(dimBlock), 0, 0, dev_p0[iGpu], dev_p1[iGpu]));
// Interpolate and record time slices of receiver wavefield at coarse sampling (no scaling applied yet)
hipLaunchKernelGGL(( extractInterpAdjointWavefield), dim3(dimGrid), dim3(dimBlock), 0, 0, dev_ssLeft[iGpu], dev_ssRight[iGpu], dev_p0[iGpu], it2);
// Switch pointers for time slices at fine time-sampling
dev_temp1[iGpu] = dev_p0[iGpu];
dev_p0[iGpu] = dev_p1[iGpu];
dev_p1[iGpu] = dev_temp1[iGpu];
dev_temp1[iGpu] = NULL;
}
// Apply imaging condition for its+1
hipLaunchKernelGGL(( kernel_exec(imagingAdjGpu), dim3(dimGrid), dim3(dimBlock), 0, 0, dev_modelBorn[iGpu], dev_ssRight[iGpu], dev_BornSrcWavefield[iGpu], its+1));
// Switch pointers for receiver wavefield before imaging time derivative
dev_ssTemp1[iGpu] = dev_ssRight[iGpu];
dev_ssRight[iGpu] = dev_ssLeft[iGpu];
dev_ssLeft[iGpu] = dev_ssTemp1[iGpu];
cuda_call(hipMemset(dev_ssLeft[iGpu], 0, host_nz*host_nx*sizeof(double))); // Reinitialize slice for coarse time-sampling before time derivative
} // Finished main loop - we still have to compute imaging condition for its=0
// Apply imaging condition for its=0
hipLaunchKernelGGL(( kernel_exec(imagingAdjGpu), dim3(dimGrid), dim3(dimBlock), 0, 0, dev_modelBorn[iGpu], dev_ssRight[iGpu], dev_BornSrcWavefield[iGpu], 0));
// Scale model for finite-difference and secondary source coefficient
hipLaunchKernelGGL(( kernel_exec(scaleReflectivity), dim3(dimGrid), dim3(dimBlock), 0, 0, dev_modelBorn[iGpu], dev_reflectivityScale[iGpu], dev_vel2Dtw2[iGpu]));
// Copy model back to host
cuda_call(hipMemcpy(model, dev_modelBorn[iGpu], host_nz*host_nx*sizeof(double), hipMemcpyDeviceToHost));
/******************************* Deallocation ***************************************/
// Deallocate all slices
cuda_call(hipFree(dev_dataRegDts[iGpu]));
cuda_call(hipFree(dev_sourcesPositionReg[iGpu]));
cuda_call(hipFree(dev_sourcesSignals[iGpu]));
cuda_call(hipFree(dev_receiversPositionReg[iGpu]));
}
void BornShotsAdjGpuWavefield(double *model, double *dataRegDts, double *sourcesSignals, int *sourcesPositionReg, int nSourcesReg, int *receiversPositionReg, int nReceiversReg, double *srcWavefieldDts, double *recWavefieldDts, int iGpu, int iGpuId){
// Non-extended Born modeling operator (ADJOINT)
// We assume the source wavelet/signals already contain the second time derivative
// Set device number
hipSetDevice(iGpuId);
// Sources geometry
cuda_call(hipMemcpyToSymbol(dev_nSourcesReg, &nSourcesReg, sizeof(int), 0, hipMemcpyHostToDevice));
cuda_call(hipMalloc((void**) &dev_sourcesPositionReg[iGpu], nSourcesReg*sizeof(int)));
cuda_call(hipMemcpy(dev_sourcesPositionReg[iGpu], sourcesPositionReg, nSourcesReg*sizeof(int), hipMemcpyHostToDevice));
// Sources signals
cuda_call(hipMalloc((void**) &dev_sourcesSignals[iGpu], nSourcesReg*host_ntw*sizeof(double))); // Allocate sources signals on device
cuda_call(hipMemcpy(dev_sourcesSignals[iGpu], sourcesSignals, nSourcesReg*host_ntw*sizeof(double), hipMemcpyHostToDevice)); // Copy sources signals on device
// Receivers geometry
cuda_call(hipMemcpyToSymbol(dev_nReceiversReg, &nReceiversReg, sizeof(int), 0, hipMemcpyHostToDevice));
cuda_call(hipMalloc((void**) &dev_receiversPositionReg[iGpu], nReceiversReg*sizeof(int)));
cuda_call(hipMemcpy(dev_receiversPositionReg[iGpu], receiversPositionReg, nReceiversReg*sizeof(int), hipMemcpyHostToDevice));
// Initialize source wavefield on device
cuda_call(hipMemset(dev_BornSrcWavefield[iGpu], 0, host_nz*host_nx*host_nts*sizeof(double))); // Initialize wavefield on device
// Initialize time-slices for time-stepping
cuda_call(hipMemset(dev_p0[iGpu], 0, host_nz*host_nx*sizeof(double)));
cuda_call(hipMemset(dev_p1[iGpu], 0, host_nz*host_nx*sizeof(double)));
// Kernel parameters
int nblockx = (host_nz-2*FAT) / BLOCK_SIZE;
int nblocky = (host_nx-2*FAT) / BLOCK_SIZE;
int nblockData = (nReceiversReg+BLOCK_SIZE_DATA-1) / BLOCK_SIZE_DATA;
dim3 dimGrid(nblockx, nblocky);
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
/************************** Source wavefield computation ****************************/
for (int its = 0; its < host_nts-1; its++){
for (int it2 = 1; it2 < host_sub+1; it2++){
// Compute fine time-step index
int itw = its * host_sub + it2;
// Step forward
hipLaunchKernelGGL(( kernel_exec(stepFwdGpu), dim3(dimGrid), dim3(dimBlock), 0, 0, dev_p0[iGpu], dev_p1[iGpu], dev_p0[iGpu], dev_vel2Dtw2[iGpu]));
// Inject source
hipLaunchKernelGGL(( kernel_exec(injectSource), dim3(1), dim3(nSourcesReg), 0, 0, dev_sourcesSignals[iGpu], dev_p0[iGpu], itw-1, dev_sourcesPositionReg[iGpu]));
// Damp wavefields
hipLaunchKernelGGL(( kernel_exec(dampCosineEdge), dim3(dimGrid), dim3(dimBlock), 0, 0, dev_p0[iGpu], dev_p1[iGpu]));
// Extract wavefield
hipLaunchKernelGGL(( kernel_exec(interpWavefield), dim3(dimGrid), dim3(dimBlock), 0, 0, dev_BornSrcWavefield[iGpu], dev_p0[iGpu], its, it2));
// Switch pointers
dev_temp1[iGpu] = dev_p0[iGpu];
dev_p0[iGpu] = dev_p1[iGpu];
dev_p1[iGpu] = dev_temp1[iGpu];
dev_temp1[iGpu] = NULL;
}
}
/************************** Receiver wavefield computation **************************/
// Initialize time slices on device
cuda_call(hipMemset(dev_ssLeft[iGpu], 0, host_nz*host_nx*sizeof(double)));
cuda_call(hipMemset(dev_ssRight[iGpu], 0, host_nz*host_nx*sizeof(double)));
cuda_call(hipMemset(dev_p0[iGpu], 0, host_nz*host_nx*sizeof(double)));
cuda_call(hipMemset(dev_p1[iGpu], 0, host_nz*host_nx*sizeof(double)));
// Allocate and initialize receiver wavefield on device
cuda_call(hipMalloc((void**) &dev_BornSecWavefield, host_nz*host_nx*host_nts*sizeof(double))); // Allocate on device
cuda_call(hipMemset(dev_BornSecWavefield, 0, host_nz*host_nx*host_nts*sizeof(double))); // Initialize wavefield on device
// Model
cuda_call(hipMemset(dev_modelBorn[iGpu], 0, host_nz*host_nx*sizeof(double))); // Initialize model on device
// Data
cuda_call(hipMalloc((void**) &dev_dataRegDts[iGpu], nReceiversReg*host_nts*sizeof(double))); // Allocate data on device
cuda_call(hipMemcpy(dev_dataRegDts[iGpu], dataRegDts, nReceiversReg*host_nts*sizeof(double), hipMemcpyHostToDevice)); // Copy data on device
// Main loop
for (int its = host_nts-2; its > -1; its--){
for (int it2 = host_sub-1; it2 > -1; it2--){
// Step adjoint in time
hipLaunchKernelGGL(( kernel_exec(stepAdjGpu), dim3(dimGrid), dim3(dimBlock), 0, 0, dev_p0[iGpu], dev_p1[iGpu], dev_p0[iGpu], dev_vel2Dtw2[iGpu]));
// Inject data
hipLaunchKernelGGL(( kernel_exec(interpInjectData), dim3(nblockData), dim3(BLOCK_SIZE_DATA), 0, 0, dev_dataRegDts[iGpu], dev_p0[iGpu], its, it2, dev_receiversPositionReg[iGpu]));
// Damp wavefield
hipLaunchKernelGGL(( kernel_exec(dampCosineEdge), dim3(dimGrid), dim3(dimBlock), 0, 0, dev_p0[iGpu], dev_p1[iGpu]));
// Interpolate and record time slices of receiver wavefield at coarse sampling (no scaling applied yet)
hipLaunchKernelGGL(( extractInterpAdjointWavefield), dim3(dimGrid), dim3(dimBlock), 0, 0, dev_ssLeft[iGpu], dev_ssRight[iGpu], dev_p0[iGpu], it2);
// Switch pointers for time slices at fine time-sampling
dev_temp1[iGpu] = dev_p0[iGpu];
dev_p0[iGpu] = dev_p1[iGpu];
dev_p1[iGpu] = dev_temp1[iGpu];
dev_temp1[iGpu] = NULL;
}
// Apply imaging condition for its+1
hipLaunchKernelGGL(( kernel_exec(imagingAdjGpu), dim3(dimGrid), dim3(dimBlock), 0, 0, dev_modelBorn[iGpu], dev_ssRight[iGpu], dev_BornSrcWavefield[iGpu], its+1));
// Record and scale receiver wavefield at coarse sampling for its+1
hipLaunchKernelGGL(( kernel_exec(recordScaleWavefield), dim3(dimGrid), dim3(dimBlock), 0, 0, dev_BornSecWavefield, dev_ssRight[iGpu], its+1, dev_reflectivityScale[iGpu], dev_vel2Dtw2[iGpu]));
// Switch pointers for receiver wavefield before imaging time derivative
dev_ssTemp1[iGpu] = dev_ssRight[iGpu];
dev_ssRight[iGpu] = dev_ssLeft[iGpu];
dev_ssLeft[iGpu] = dev_ssTemp1[iGpu];
cuda_call(hipMemset(dev_ssLeft[iGpu], 0, host_nz*host_nx*sizeof(double))); // Reinitialize slice for coarse time-sampling before time derivative
} // Finished main loop - we still have to compute imaging condition for its=0
// Save receiver wavefield at its=0
hipLaunchKernelGGL(( kernel_exec(recordScaleWavefield), dim3(dimGrid), dim3(dimBlock), 0, 0, dev_BornSecWavefield, dev_ssRight[iGpu], 0, dev_reflectivityScale[iGpu], dev_vel2Dtw2[iGpu]));
// Apply imaging condition for its=0
hipLaunchKernelGGL(( kernel_exec(imagingAdjGpu), dim3(dimGrid), dim3(dimBlock), 0, 0, dev_modelBorn[iGpu], dev_ssRight[iGpu], dev_BornSrcWavefield[iGpu], 0));
// Scale model for finite-difference and secondary source coefficient
// It's better to apply it once and for all than at every time-steps
hipLaunchKernelGGL(( kernel_exec(scaleReflectivity), dim3(dimGrid), dim3(dimBlock), 0, 0, dev_modelBorn[iGpu], dev_reflectivityScale[iGpu], dev_vel2Dtw2[iGpu]));
// Copy model back to host
cuda_call(hipMemcpy(model, dev_modelBorn[iGpu], host_nz*host_nx*sizeof(double), hipMemcpyDeviceToHost));
// Copy scattered wavefield back to host
cuda_call(hipMemcpy(recWavefieldDts, dev_BornSecWavefield, host_nz*host_nx*host_nts*sizeof(double), hipMemcpyDeviceToHost));
/******************************* Deallocation ***************************************/
// Deallocate all slices
cuda_call(hipFree(dev_dataRegDts[iGpu]));
cuda_call(hipFree(dev_sourcesPositionReg[iGpu]));
cuda_call(hipFree(dev_sourcesSignals[iGpu]));
cuda_call(hipFree(dev_receiversPositionReg[iGpu]));
cuda_call(hipFree(dev_BornSecWavefield));
}
/****************************** Free surface **********************************/
void BornShotsAdjFsGpu(double *model, double *dataRegDts, double *sourcesSignals, int *sourcesPositionReg, int nSourcesReg, int *receiversPositionReg, int nReceiversReg, double *srcWavefieldDts, double *recWavefieldDts, int iGpu, int iGpuId){
// Non-extended Born modeling operator (ADJOINT)
// We assume the source wavelet/signals already contain the second time derivative
// Set device number
hipSetDevice(iGpuId);
// Sources geometry
cuda_call(hipMemcpyToSymbol(dev_nSourcesReg, &nSourcesReg, sizeof(int), 0, hipMemcpyHostToDevice));
cuda_call(hipMalloc((void**) &dev_sourcesPositionReg[iGpu], nSourcesReg*sizeof(int)));
cuda_call(hipMemcpy(dev_sourcesPositionReg[iGpu], sourcesPositionReg, nSourcesReg*sizeof(int), hipMemcpyHostToDevice));
// Sources signals
cuda_call(hipMalloc((void**) &dev_sourcesSignals[iGpu], nSourcesReg*host_ntw*sizeof(double))); // Allocate sources signals on device
cuda_call(hipMemcpy(dev_sourcesSignals[iGpu], sourcesSignals, nSourcesReg*host_ntw*sizeof(double), hipMemcpyHostToDevice)); // Copy sources signals on device
// Receivers geometry
cuda_call(hipMemcpyToSymbol(dev_nReceiversReg, &nReceiversReg, sizeof(int), 0, hipMemcpyHostToDevice));
cuda_call(hipMalloc((void**) &dev_receiversPositionReg[iGpu], nReceiversReg*sizeof(int)));
cuda_call(hipMemcpy(dev_receiversPositionReg[iGpu], receiversPositionReg, nReceiversReg*sizeof(int), hipMemcpyHostToDevice));
// Initialize source wavefield on device
cuda_call(hipMemset(dev_BornSrcWavefield[iGpu], 0, host_nz*host_nx*host_nts*sizeof(double))); // Initialize wavefield on device
// Initialize time-slices for time-stepping
cuda_call(hipMemset(dev_p0[iGpu], 0, host_nz*host_nx*sizeof(double)));
cuda_call(hipMemset(dev_p1[iGpu], 0, host_nz*host_nx*sizeof(double)));
// Kernel parameters
int nblockx = (host_nz-2*FAT) / BLOCK_SIZE;
int nblocky = (host_nx-2*FAT) / BLOCK_SIZE;
int nblockData = (nReceiversReg+BLOCK_SIZE_DATA-1) / BLOCK_SIZE_DATA;
dim3 dimGrid(nblockx, nblocky);
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
/************************** Source wavefield computation ****************************/
for (int its = 0; its < host_nts-1; its++){
for (int it2 = 1; it2 < host_sub+1; it2++){
// Compute fine time-step index
int itw = its * host_sub + it2;
// Apply free surface condition for Laplacian
hipLaunchKernelGGL(( kernel_exec(setFsConditionFwdGpu), dim3(nblocky), dim3(BLOCK_SIZE), 0, 0, dev_p1[iGpu]));
// Step forward
hipLaunchKernelGGL(( kernel_exec(stepFwdGpu), dim3(dimGrid), dim3(dimBlock), 0, 0, dev_p0[iGpu], dev_p1[iGpu], dev_p0[iGpu], dev_vel2Dtw2[iGpu]));
// Inject source
hipLaunchKernelGGL(( kernel_exec(injectSource), dim3(1), dim3(nSourcesReg), 0, 0, dev_sourcesSignals[iGpu], dev_p0[iGpu], itw-1, dev_sourcesPositionReg[iGpu]));
// Damp wavefields
hipLaunchKernelGGL(( kernel_exec(dampCosineEdgeFs), dim3(dimGrid), dim3(dimBlock), 0, 0, dev_p0[iGpu], dev_p1[iGpu]));
// Extract wavefield
hipLaunchKernelGGL(( kernel_exec(interpWavefield), dim3(dimGrid), dim3(dimBlock), 0, 0, dev_BornSrcWavefield[iGpu], dev_p0[iGpu], its, it2));
// Switch pointers
dev_temp1[iGpu] = dev_p0[iGpu];
dev_p0[iGpu] = dev_p1[iGpu];
dev_p1[iGpu] = dev_temp1[iGpu];
dev_temp1[iGpu] = NULL;
}
}
/************************** Receiver wavefield computation **************************/
// Initialize time slices on device
cuda_call(hipMemset(dev_ssLeft[iGpu], 0, host_nz*host_nx*sizeof(double)));
cuda_call(hipMemset(dev_ssRight[iGpu], 0, host_nz*host_nx*sizeof(double)));
cuda_call(hipMemset(dev_p0[iGpu], 0, host_nz*host_nx*sizeof(double)));
cuda_call(hipMemset(dev_p1[iGpu], 0, host_nz*host_nx*sizeof(double)));
// Model
cuda_call(hipMemset(dev_modelBorn[iGpu], 0, host_nz*host_nx*sizeof(double))); // Initialize model on device
// Data
cuda_call(hipMalloc((void**) &dev_dataRegDts[iGpu], nReceiversReg*host_nts*sizeof(double))); // Allocate data on device
cuda_call(hipMemcpy(dev_dataRegDts[iGpu], dataRegDts, nReceiversReg*host_nts*sizeof(double), hipMemcpyHostToDevice)); // Copy data on device
// Main loop
for (int its = host_nts-2; its > -1; its--){
for (int it2 = host_sub-1; it2 > -1; it2--){
// Step adjoint in time
hipLaunchKernelGGL(( kernel_exec(stepAdjFsGpu), dim3(dimGrid), dim3(dimBlock), 0, 0, dev_p0[iGpu], dev_p1[iGpu], dev_p0[iGpu], dev_vel2Dtw2[iGpu]));
// Inject data
hipLaunchKernelGGL(( kernel_exec(interpInjectData), dim3(nblockData), dim3(BLOCK_SIZE_DATA), 0, 0, dev_dataRegDts[iGpu], dev_p0[iGpu], its, it2, dev_receiversPositionReg[iGpu]));
// Damp wavefield
hipLaunchKernelGGL(( kernel_exec(dampCosineEdgeFs), dim3(dimGrid), dim3(dimBlock), 0, 0, dev_p0[iGpu], dev_p1[iGpu]));
// Interpolate and record time slices of receiver wavefield at coarse sampling (no scaling applied yet)
hipLaunchKernelGGL(( extractInterpAdjointWavefield), dim3(dimGrid), dim3(dimBlock), 0, 0, dev_ssLeft[iGpu], dev_ssRight[iGpu], dev_p0[iGpu], it2);
// Switch pointers for time slices at fine time-sampling
dev_temp1[iGpu] = dev_p0[iGpu];
dev_p0[iGpu] = dev_p1[iGpu];
dev_p1[iGpu] = dev_temp1[iGpu];
dev_temp1[iGpu] = NULL;
}
// Apply imaging condition for its+1
hipLaunchKernelGGL(( kernel_exec(imagingAdjGpu), dim3(dimGrid), dim3(dimBlock), 0, 0, dev_modelBorn[iGpu], dev_ssRight[iGpu], dev_BornSrcWavefield[iGpu], its+1));
// Switch pointers for receiver wavefield before imaging time derivative
dev_ssTemp1[iGpu] = dev_ssRight[iGpu];
dev_ssRight[iGpu] = dev_ssLeft[iGpu];
dev_ssLeft[iGpu] = dev_ssTemp1[iGpu];
cuda_call(hipMemset(dev_ssLeft[iGpu], 0, host_nz*host_nx*sizeof(double))); // Reinitialize slice for coarse time-sampling before time derivative
} // Finished main loop - we still have to compute imaging condition for its=0
// Apply imaging condition for its=0
hipLaunchKernelGGL(( kernel_exec(imagingAdjGpu), dim3(dimGrid), dim3(dimBlock), 0, 0, dev_modelBorn[iGpu], dev_ssRight[iGpu], dev_BornSrcWavefield[iGpu], 0));
// Scale model for finite-difference and secondary source coefficient
hipLaunchKernelGGL(( kernel_exec(scaleReflectivity), dim3(dimGrid), dim3(dimBlock), 0, 0, dev_modelBorn[iGpu], dev_reflectivityScale[iGpu], dev_vel2Dtw2[iGpu]));
// Copy model back to host
cuda_call(hipMemcpy(model, dev_modelBorn[iGpu], host_nz*host_nx*sizeof(double), hipMemcpyDeviceToHost));
/******************************* Deallocation ***************************************/
// Deallocate all slices
cuda_call(hipFree(dev_dataRegDts[iGpu]));
cuda_call(hipFree(dev_sourcesPositionReg[iGpu]));
cuda_call(hipFree(dev_sourcesSignals[iGpu]));
cuda_call(hipFree(dev_receiversPositionReg[iGpu]));
}
void BornShotsAdjFsGpuWavefield(double *model, double *dataRegDts, double *sourcesSignals, int *sourcesPositionReg, int nSourcesReg, int *receiversPositionReg, int nReceiversReg, double *srcWavefieldDts, double *recWavefieldDts, int iGpu, int iGpuId){
// Non-extended Born modeling operator (ADJOINT)
// We assume the source wavelet/signals already contain the second time derivative
// Set device number
hipSetDevice(iGpuId);
// Sources geometry
cuda_call(hipMemcpyToSymbol(dev_nSourcesReg, &nSourcesReg, sizeof(int), 0, hipMemcpyHostToDevice));
cuda_call(hipMalloc((void**) &dev_sourcesPositionReg[iGpu], nSourcesReg*sizeof(int)));
cuda_call(hipMemcpy(dev_sourcesPositionReg[iGpu], sourcesPositionReg, nSourcesReg*sizeof(int), hipMemcpyHostToDevice));
// Sources signals
cuda_call(hipMalloc((void**) &dev_sourcesSignals[iGpu], nSourcesReg*host_ntw*sizeof(double))); // Allocate sources signals on device
cuda_call(hipMemcpy(dev_sourcesSignals[iGpu], sourcesSignals, nSourcesReg*host_ntw*sizeof(double), hipMemcpyHostToDevice)); // Copy sources signals on device
// Receivers geometry
cuda_call(hipMemcpyToSymbol(dev_nReceiversReg, &nReceiversReg, sizeof(int), 0, hipMemcpyHostToDevice));
cuda_call(hipMalloc((void**) &dev_receiversPositionReg[iGpu], nReceiversReg*sizeof(int)));
cuda_call(hipMemcpy(dev_receiversPositionReg[iGpu], receiversPositionReg, nReceiversReg*sizeof(int), hipMemcpyHostToDevice));
// Initialize source wavefield on device
cuda_call(hipMemset(dev_BornSrcWavefield[iGpu], 0, host_nz*host_nx*host_nts*sizeof(double))); // Initialize wavefield on device
// Initialize time-slices for time-stepping
cuda_call(hipMemset(dev_p0[iGpu], 0, host_nz*host_nx*sizeof(double)));
cuda_call(hipMemset(dev_p1[iGpu], 0, host_nz*host_nx*sizeof(double)));
// Kernel parameters
int nblockx = (host_nz-2*FAT) / BLOCK_SIZE;
int nblocky = (host_nx-2*FAT) / BLOCK_SIZE;
int nblockData = (nReceiversReg+BLOCK_SIZE_DATA-1) / BLOCK_SIZE_DATA;
dim3 dimGrid(nblockx, nblocky);
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
/************************** Source wavefield computation ****************************/
for (int its = 0; its < host_nts-1; its++){
for (int it2 = 1; it2 < host_sub+1; it2++){
// Compute fine time-step index
int itw = its * host_sub + it2;
// Apply free surface condition for Laplacian
hipLaunchKernelGGL(( kernel_exec(setFsConditionFwdGpu), dim3(nblocky), dim3(BLOCK_SIZE), 0, 0, dev_p1[iGpu]));
// Step forward
hipLaunchKernelGGL(( kernel_exec(stepFwdGpu), dim3(dimGrid), dim3(dimBlock), 0, 0, dev_p0[iGpu], dev_p1[iGpu], dev_p0[iGpu], dev_vel2Dtw2[iGpu]));
// Inject source
hipLaunchKernelGGL(( kernel_exec(injectSource), dim3(1), dim3(nSourcesReg), 0, 0, dev_sourcesSignals[iGpu], dev_p0[iGpu], itw-1, dev_sourcesPositionReg[iGpu]));
// Damp wavefields
hipLaunchKernelGGL(( kernel_exec(dampCosineEdgeFs), dim3(dimGrid), dim3(dimBlock), 0, 0, dev_p0[iGpu], dev_p1[iGpu]));
// Extract wavefield
hipLaunchKernelGGL(( kernel_exec(interpWavefield), dim3(dimGrid), dim3(dimBlock), 0, 0, dev_BornSrcWavefield[iGpu], dev_p0[iGpu], its, it2));
// Switch pointers
dev_temp1[iGpu] = dev_p0[iGpu];
dev_p0[iGpu] = dev_p1[iGpu];
dev_p1[iGpu] = dev_temp1[iGpu];
dev_temp1[iGpu] = NULL;
}
}
/************************** Receiver wavefield computation **************************/
// Initialize time slices on device
cuda_call(hipMemset(dev_ssLeft[iGpu], 0, host_nz*host_nx*sizeof(double)));
cuda_call(hipMemset(dev_ssRight[iGpu], 0, host_nz*host_nx*sizeof(double)));
cuda_call(hipMemset(dev_p0[iGpu], 0, host_nz*host_nx*sizeof(double)));
cuda_call(hipMemset(dev_p1[iGpu], 0, host_nz*host_nx*sizeof(double)));
// Allocate and initialize receiver wavefield on device
cuda_call(hipMalloc((void**) &dev_BornSecWavefield, host_nz*host_nx*host_nts*sizeof(double))); // Allocate on device
cuda_call(hipMemset(dev_BornSecWavefield, 0, host_nz*host_nx*host_nts*sizeof(double))); // Initialize wavefield on device
// Model
cuda_call(hipMemset(dev_modelBorn[iGpu], 0, host_nz*host_nx*sizeof(double))); // Initialize model on device
// Data
cuda_call(hipMalloc((void**) &dev_dataRegDts[iGpu], nReceiversReg*host_nts*sizeof(double))); // Allocate data on device
cuda_call(hipMemcpy(dev_dataRegDts[iGpu], dataRegDts, nReceiversReg*host_nts*sizeof(double), hipMemcpyHostToDevice)); // Copy data on device
// Main loop
for (int its = host_nts-2; its > -1; its--){
for (int it2 = host_sub-1; it2 > -1; it2--){
// Step adjoint in time
hipLaunchKernelGGL(( kernel_exec(stepAdjFsGpu), dim3(dimGrid), dim3(dimBlock), 0, 0, dev_p0[iGpu], dev_p1[iGpu], dev_p0[iGpu], dev_vel2Dtw2[iGpu]));
// Inject data
hipLaunchKernelGGL(( kernel_exec(interpInjectData), dim3(nblockData), dim3(BLOCK_SIZE_DATA), 0, 0, dev_dataRegDts[iGpu], dev_p0[iGpu], its, it2, dev_receiversPositionReg[iGpu]));
// Damp wavefield
hipLaunchKernelGGL(( kernel_exec(dampCosineEdgeFs), dim3(dimGrid), dim3(dimBlock), 0, 0, dev_p0[iGpu], dev_p1[iGpu]));
// Interpolate and record time slices of receiver wavefield at coarse sampling (no scaling applied yet)
hipLaunchKernelGGL(( extractInterpAdjointWavefield), dim3(dimGrid), dim3(dimBlock), 0, 0, dev_ssLeft[iGpu], dev_ssRight[iGpu], dev_p0[iGpu], it2);
// Switch pointers for time slices at fine time-sampling
dev_temp1[iGpu] = dev_p0[iGpu];
dev_p0[iGpu] = dev_p1[iGpu];
dev_p1[iGpu] = dev_temp1[iGpu];
dev_temp1[iGpu] = NULL;
}
// Apply imaging condition for its+1
hipLaunchKernelGGL(( kernel_exec(imagingAdjGpu), dim3(dimGrid), dim3(dimBlock), 0, 0, dev_modelBorn[iGpu], dev_ssRight[iGpu], dev_BornSrcWavefield[iGpu], its+1));
// Record and scale receiver wavefield at coarse sampling for its+1
hipLaunchKernelGGL(( kernel_exec(recordScaleWavefield), dim3(dimGrid), dim3(dimBlock), 0, 0, dev_BornSecWavefield, dev_ssRight[iGpu], its+1, dev_reflectivityScale[iGpu], dev_vel2Dtw2[iGpu]));
// Switch pointers for receiver wavefield before imaging time derivative
dev_ssTemp1[iGpu] = dev_ssRight[iGpu];
dev_ssRight[iGpu] = dev_ssLeft[iGpu];
dev_ssLeft[iGpu] = dev_ssTemp1[iGpu];
cuda_call(hipMemset(dev_ssLeft[iGpu], 0, host_nz*host_nx*sizeof(double))); // Reinitialize slice for coarse time-sampling before time derivative
} // Finished main loop - we still have to compute imaging condition for its=0
// Save receiver wavefield at its=0
hipLaunchKernelGGL(( kernel_exec(recordScaleWavefield), dim3(dimGrid), dim3(dimBlock), 0, 0, dev_BornSecWavefield, dev_ssRight[iGpu], 0, dev_reflectivityScale[iGpu], dev_vel2Dtw2[iGpu]));
// Apply imaging condition for its=0
hipLaunchKernelGGL(( kernel_exec(imagingAdjGpu), dim3(dimGrid), dim3(dimBlock), 0, 0, dev_modelBorn[iGpu], dev_ssRight[iGpu], dev_BornSrcWavefield[iGpu], 0));
// Scale model for finite-difference and secondary source coefficient
// It's better to apply it once and for all than at every time-steps
hipLaunchKernelGGL(( kernel_exec(scaleReflectivity), dim3(dimGrid), dim3(dimBlock), 0, 0, dev_modelBorn[iGpu], dev_reflectivityScale[iGpu], dev_vel2Dtw2[iGpu]));
// Copy model back to host
cuda_call(hipMemcpy(model, dev_modelBorn[iGpu], host_nz*host_nx*sizeof(double), hipMemcpyDeviceToHost));
// Copy scattered wavefield back to host
cuda_call(hipMemcpy(recWavefieldDts, dev_BornSecWavefield, host_nz*host_nx*host_nts*sizeof(double), hipMemcpyDeviceToHost));
/******************************* Deallocation ***************************************/
// Deallocate all slices
cuda_call(hipFree(dev_dataRegDts[iGpu]));
cuda_call(hipFree(dev_sourcesPositionReg[iGpu]));
cuda_call(hipFree(dev_sourcesSignals[iGpu]));
cuda_call(hipFree(dev_receiversPositionReg[iGpu]));
cuda_call(hipFree(dev_BornSecWavefield));
}
| 93c6a442657f33024367f4874db6ae60153b3994.cu | #include "BornShotsGpuFunctions.h"
#include <iostream>
#include "varDeclare.h"
#include <vector>
#include <algorithm>
#include <math.h>
#include "kernelsGpu.cu"
#include "cudaErrors.cu"
#include <stdio.h>
#include <assert.h>
/******************************************************************************/
/************************ Set GPU propagation parameters **********************/
/******************************************************************************/
bool getGpuInfo(std::vector<int> gpuList, int info, int deviceNumberInfo){
int nDevice, driver;
cudaGetDeviceCount(&nDevice);
if (info == 1){
std::cout << " " << std::endl;
std::cout << "-------------------------------------------------------------------" << std::endl;
std::cout << "---------------------------- INFO FOR GPU# " << deviceNumberInfo << " ----------------------" << std::endl;
std::cout << "-------------------------------------------------------------------" << std::endl;
// Number of devices
std::cout << "Number of requested GPUs: " << gpuList.size() << std::endl;
std::cout << "Number of available GPUs: " << nDevice << std::endl;
std::cout << "Id of requested GPUs: ";
for (int iGpu=0; iGpu<gpuList.size(); iGpu++){
if (iGpu<gpuList.size()-1){std::cout << gpuList[iGpu] << ", ";}
else{ std::cout << gpuList[iGpu] << std::endl;}
}
// Driver version
std::cout << "Cuda driver version: " << cudaDriverGetVersion(&driver) << std::endl; // Driver
// Get properties
cudaDeviceProp dprop;
cudaGetDeviceProperties(&dprop,deviceNumberInfo);
// Display
std::cout << "Name: " << dprop.name << std::endl;
std::cout << "Total global memory: " << dprop.totalGlobalMem/(1024*1024*1024) << " [GB] " << std::endl;
std::cout << "Shared memory per block: " << dprop.sharedMemPerBlock/1024 << " [kB]" << std::endl;
std::cout << "Number of register per block: " << dprop.regsPerBlock << std::endl;
std::cout << "Warp size: " << dprop.warpSize << " [threads]" << std::endl;
std::cout << "Maximum pitch allowed for memory copies in bytes: " << dprop.memPitch/(1024*1024*1024) << " [GB]" << std::endl;
std::cout << "Maximum threads per block: " << dprop.maxThreadsPerBlock << std::endl;
std::cout << "Maximum block dimensions: " << "(" << dprop.maxThreadsDim[0] << ", " << dprop.maxThreadsDim[1] << ", " << dprop.maxThreadsDim[2] << ")" << std::endl;
std::cout << "Maximum grid dimensions: " << "(" << dprop.maxGridSize[0] << ", " << dprop.maxGridSize[1] << ", " << dprop.maxGridSize[2] << ")" << std::endl;
std::cout << "Total constant memory: " << dprop.totalConstMem/1024 << " [kB]" << std::endl;
std::cout << "Number of streaming multiprocessors on device: " << dprop.multiProcessorCount << std::endl;
if (dprop.deviceOverlap == 1) {std::cout << "Device can simultaneously perform a cudaMemcpy() and kernel execution" << std::endl;}
if (dprop.deviceOverlap != 1) {std::cout << "Device cannot simultaneously perform a cudaMemcpy() and kernel execution" << std::endl;}
if (dprop.canMapHostMemory == 1) { std::cout << "Device can map host memory" << std::endl; }
if (dprop.canMapHostMemory != 1) { std::cout << "Device cannot map host memory" << std::endl; }
if (dprop.concurrentKernels == 1) {std::cout << "Device can support concurrent kernel" << std::endl;}
if (dprop.concurrentKernels != 1) {std::cout << "Device cannot support concurrent kernel execution" << std::endl;}
std::cout << "-------------------------------------------------------------------" << std::endl;
std::cout << " " << std::endl;
}
// Check that the number of requested GPU is less or equal to the total number of available GPUs
if (gpuList.size()>nDevice) {
std::cout << "**** ERROR [getGpuInfo]: Number of requested GPU greater than available GPUs ****" << std::endl;
return false;
}
// Check that the GPU numbers in the list are between 0 and nGpu-1
for (int iGpu=0; iGpu<gpuList.size(); iGpu++){
if (gpuList[iGpu]<0 || gpuList[iGpu]>nDevice-1){
std::cout << "**** ERROR [getGpuInfo]: One of the element of the GPU Id list is not a valid GPU Id number ****" << std::endl;
return false;
}
}
return true;
}
void initBornGpu(double dz, double dx, int nz, int nx, int nts, double dts, int sub, int minPad, int blockSize, double alphaCos, int nGpu, int iGpuId, int iGpuAlloc){
// Set GPU number
cudaSetDevice(iGpuId);
host_nz = nz;
host_nx = nx;
host_dz = dz;
host_dx = dx;
host_nts = nts;
host_sub = sub;
host_ntw = (nts - 1) * sub + 1;
/**************************** ALLOCATE ARRAYS OF ARRAYS *****************************/
// Only one GPU will perform the following
if (iGpuId == iGpuAlloc) {
// Time slices for FD stepping
dev_p0 = new double*[nGpu];
dev_p1 = new double*[nGpu];
dev_temp1 = new double*[nGpu];
dev_ssLeft = new double*[nGpu];
dev_ssRight = new double*[nGpu];
dev_ssTemp1 = new double*[nGpu];
// Data
dev_dataRegDts = new double*[nGpu];
// Source and receivers
dev_sourcesPositionReg = new int*[nGpu];
dev_receiversPositionReg = new int*[nGpu];
// Sources signal
dev_sourcesSignals = new double*[nGpu];
// Scaled velocity
dev_vel2Dtw2 = new double*[nGpu];
// Reflectivity scaling
dev_reflectivityScale = new double*[nGpu];
// Reflectivity
dev_modelBorn = new double*[nGpu];
// Source wavefields
dev_BornSrcWavefield = new double*[nGpu];
}
/**************************** COMPUTE LAPLACIAN COEFFICIENTS ************************/
double zCoeff[COEFF_SIZE];
double xCoeff[COEFF_SIZE];
zCoeff[0] = -2.927222222 / (dz * dz);
zCoeff[1] = 1.666666667 / (dz * dz);
zCoeff[2] = -0.238095238 / (dz * dz);
zCoeff[3] = 0.039682539 / (dz * dz);
zCoeff[4] = -0.004960317 / (dz * dz);
zCoeff[5] = 0.000317460 / (dz * dz);
xCoeff[0] = -2.927222222 / (dx * dx);
xCoeff[1] = 1.666666667 / (dx * dx);
xCoeff[2] = -0.238095238 / (dx * dx);
xCoeff[3] = 0.039682539 / (dx * dx);
xCoeff[4] = -0.004960317 / (dx * dx);
xCoeff[5] = 0.000317460 / (dx * dx);
/**************************** COMPUTE TIME-INTERPOLATION FILTER *********************/
// Time interpolation filter length/half length
int hInterpFilter = sub + 1;
int nInterpFilter = 2 * hInterpFilter;
// Check the subsampling coefficient is smaller than the maximum allowed
if (sub>SUB_MAX){
std::cout << "**** ERROR: Subsampling parameter is too high ****" << std::endl;
throw std::runtime_error("");
}
// Allocate and fill interpolation filter
double interpFilter[nInterpFilter];
for (int iFilter = 0; iFilter < hInterpFilter; iFilter++){
interpFilter[iFilter] = 1.0 - 1.0 * iFilter/host_sub;
interpFilter[iFilter+hInterpFilter] = 1.0 - interpFilter[iFilter];
interpFilter[iFilter] = interpFilter[iFilter] * (1.0 / sqrt(double(host_ntw)/double(host_nts)));
interpFilter[iFilter+hInterpFilter] = interpFilter[iFilter+hInterpFilter] * (1.0 / sqrt(double(host_ntw)/double(host_nts)));
}
/************************* COMPUTE COSINE DAMPING COEFFICIENTS **********************/
// Check that the minimum padding is smaller than the max allowed
if (minPad>PAD_MAX){
std::cout << "**** ERROR: Padding value is too high ****" << std::endl;
throw std::runtime_error("");
}
double cosDampingCoeff[minPad];
// Cosine padding
for (int iFilter=FAT; iFilter<FAT+minPad; iFilter++){
double arg = M_PI / (1.0 * minPad) * 1.0 * (minPad-iFilter+FAT);
arg = alphaCos + (1.0-alphaCos) * cos(arg);
cosDampingCoeff[iFilter-FAT] = arg;
}
// Check that the block size is consistent between parfile and "varDeclare.h"
if (blockSize != BLOCK_SIZE) {
std::cout << "**** ERROR: Block size for time stepper is not consistent with parfile ****" << std::endl;
throw std::runtime_error("");
}
/**************************** COPY TO CONSTANT MEMORY *******************************/
// Laplacian coefficients
cuda_call(cudaMemcpyToSymbol(dev_zCoeff, zCoeff, COEFF_SIZE*sizeof(double), 0, cudaMemcpyHostToDevice)); // Copy Laplacian coefficients to device
cuda_call(cudaMemcpyToSymbol(dev_xCoeff, xCoeff, COEFF_SIZE*sizeof(double), 0, cudaMemcpyHostToDevice));
// Time interpolation filter
cuda_call(cudaMemcpyToSymbol(dev_nInterpFilter, &nInterpFilter, sizeof(int), 0, cudaMemcpyHostToDevice)); // Filter length
cuda_call(cudaMemcpyToSymbol(dev_hInterpFilter, &hInterpFilter, sizeof(int), 0, cudaMemcpyHostToDevice)); // Filter half-length
cuda_call(cudaMemcpyToSymbol(dev_interpFilter, interpFilter, nInterpFilter*sizeof(double), 0, cudaMemcpyHostToDevice)); // Filter
// Cosine damping parameters
cuda_call(cudaMemcpyToSymbol(dev_cosDampingCoeff, &cosDampingCoeff, minPad*sizeof(double), 0, cudaMemcpyHostToDevice)); // Array for damping
cuda_call(cudaMemcpyToSymbol(dev_alphaCos, &alphaCos, sizeof(double), 0, cudaMemcpyHostToDevice)); // Coefficient in the damping formula
cuda_call(cudaMemcpyToSymbol(dev_minPad, &minPad, sizeof(int), 0, cudaMemcpyHostToDevice)); // min (zPadMinus, zPadPlus, xPadMinus, xPadPlus)
// FD parameters
cuda_call(cudaMemcpyToSymbol(dev_nz, &nz, sizeof(int), 0, cudaMemcpyHostToDevice)); // Copy model size to device
cuda_call(cudaMemcpyToSymbol(dev_nx, &nx, sizeof(int), 0, cudaMemcpyHostToDevice));
cuda_call(cudaMemcpyToSymbol(dev_nts, &nts, sizeof(int), 0, cudaMemcpyHostToDevice)); // Copy number of coarse time parameters to device
cuda_call(cudaMemcpyToSymbol(dev_sub, &sub, sizeof(int), 0, cudaMemcpyHostToDevice));
cuda_call(cudaMemcpyToSymbol(dev_ntw, &host_ntw, sizeof(int), 0, cudaMemcpyHostToDevice)); // Copy number of coarse time parameters to device
}
void allocateBornShotsGpu(double *vel2Dtw2, double *reflectivityScale, int iGpu, int iGpuId){
// Set GPU number
cudaSetDevice(iGpuId);
// Reflectivity scale
cuda_call(cudaMalloc((void**) &dev_vel2Dtw2[iGpu], host_nz*host_nx*sizeof(double))); // Allocate scaled velocity model on device
cuda_call(cudaMemcpy(dev_vel2Dtw2[iGpu], vel2Dtw2, host_nz*host_nx*sizeof(double), cudaMemcpyHostToDevice)); //
// Scaled velocity
cuda_call(cudaMalloc((void**) &dev_reflectivityScale[iGpu], host_nz*host_nx*sizeof(double))); // Allocate scaling for reflectivity
cuda_call(cudaMemcpy(dev_reflectivityScale[iGpu], reflectivityScale, host_nz*host_nx*sizeof(double), cudaMemcpyHostToDevice)); //
// Allocate time slices
cuda_call(cudaMalloc((void**) &dev_p0[iGpu], host_nz*host_nx*sizeof(double)));
cuda_call(cudaMalloc((void**) &dev_p1[iGpu], host_nz*host_nx*sizeof(double)));
cuda_call(cudaMalloc((void**) &dev_ssLeft[iGpu], host_nz*host_nx*sizeof(double)));
cuda_call(cudaMalloc((void**) &dev_ssRight[iGpu], host_nz*host_nx*sizeof(double)));
// Allocate non-extended model
cuda_call(cudaMalloc((void**) &dev_modelBorn[iGpu], host_nz*host_nx*sizeof(double)));
// Allocate source wavefield
cuda_call(cudaMalloc((void**) &dev_BornSrcWavefield[iGpu], host_nz*host_nx*host_nts*sizeof(double))); // Allocate on device
}
void deallocateBornShotsGpu(int iGpu, int iGpuId){
// Set device number on GPU cluster
cudaSetDevice(iGpuId);
// Deallocate all the shit
cuda_call(cudaFree(dev_vel2Dtw2[iGpu]));
cuda_call(cudaFree(dev_reflectivityScale[iGpu]));
cuda_call(cudaFree(dev_p0[iGpu]));
cuda_call(cudaFree(dev_p1[iGpu]));
cuda_call(cudaFree(dev_ssLeft[iGpu]));
cuda_call(cudaFree(dev_ssRight[iGpu]));
cuda_call(cudaFree(dev_BornSrcWavefield[iGpu]));
cuda_call(cudaFree(dev_modelBorn[iGpu]));
}
/******************************************************************************/
/****************************** Born forward **********************************/
/******************************************************************************/
/********************************** Normal ************************************/
void BornShotsFwdGpu(double *model, double *dataRegDts, double *sourcesSignals, int *sourcesPositionReg, int nSourcesReg, int *receiversPositionReg, int nReceiversReg, double *srcWavefieldDts, double *scatWavefieldDts, int iGpu, int iGpuId){
// We assume the source wavelet/signals already contain the second time derivative
// Set device number
cudaSetDevice(iGpuId);
// Sources geometry
cuda_call(cudaMemcpyToSymbol(dev_nSourcesReg, &nSourcesReg, sizeof(int), 0, cudaMemcpyHostToDevice));
cuda_call(cudaMalloc((void**) &dev_sourcesPositionReg[iGpu], nSourcesReg*sizeof(int)));
cuda_call(cudaMemcpy(dev_sourcesPositionReg[iGpu], sourcesPositionReg, nSourcesReg*sizeof(int), cudaMemcpyHostToDevice));
// Sources signals
cuda_call(cudaMalloc((void**) &dev_sourcesSignals[iGpu], nSourcesReg*host_ntw*sizeof(double))); // Allocate sources signals on device
cuda_call(cudaMemcpy(dev_sourcesSignals[iGpu], sourcesSignals, nSourcesReg*host_ntw*sizeof(double), cudaMemcpyHostToDevice)); // Copy sources signals on device
// Receivers geometry
cuda_call(cudaMemcpyToSymbol(dev_nReceiversReg, &nReceiversReg, sizeof(int), 0, cudaMemcpyHostToDevice));
cuda_call(cudaMalloc((void**) &dev_receiversPositionReg[iGpu], nReceiversReg*sizeof(int)));
cuda_call(cudaMemcpy(dev_receiversPositionReg[iGpu], receiversPositionReg, nReceiversReg*sizeof(int), cudaMemcpyHostToDevice));
// Initialize source wavefield on device
cuda_call(cudaMemset(dev_BornSrcWavefield[iGpu], 0, host_nz*host_nx*host_nts*sizeof(double))); // Initialize wavefield on device
// Initialize time-slices for time-stepping
cuda_call(cudaMemset(dev_p0[iGpu], 0, host_nz*host_nx*sizeof(double)));
cuda_call(cudaMemset(dev_p1[iGpu], 0, host_nz*host_nx*sizeof(double)));
// Kernel parameters
int nblockx = (host_nz-2*FAT) / BLOCK_SIZE;
int nblocky = (host_nx-2*FAT) / BLOCK_SIZE;
int nblockData = (nReceiversReg+BLOCK_SIZE_DATA-1) / BLOCK_SIZE_DATA;
dim3 dimGrid(nblockx, nblocky);
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
/************************** Source wavefield computation ****************************/
for (int its = 0; its < host_nts-1; its++){
for (int it2 = 1; it2 < host_sub+1; it2++){
// Compute fine time-step index
int itw = its * host_sub + it2;
// Step forward
kernel_exec(stepFwdGpu<<<dimGrid, dimBlock>>>(dev_p0[iGpu], dev_p1[iGpu], dev_p0[iGpu], dev_vel2Dtw2[iGpu]));
// Inject source
kernel_exec(injectSource<<<1, nSourcesReg>>>(dev_sourcesSignals[iGpu], dev_p0[iGpu], itw-1, dev_sourcesPositionReg[iGpu]));
// Damp wavefields
kernel_exec(dampCosineEdge<<<dimGrid, dimBlock>>>(dev_p0[iGpu], dev_p1[iGpu]));
// Extract wavefield
kernel_exec(interpWavefield<<<dimGrid, dimBlock>>>(dev_BornSrcWavefield[iGpu], dev_p0[iGpu], its, it2));
// Switch pointers
dev_temp1[iGpu] = dev_p0[iGpu];
dev_p0[iGpu] = dev_p1[iGpu];
dev_p1[iGpu] = dev_temp1[iGpu];
dev_temp1[iGpu] = NULL;
}
}
/************************** Scattered wavefield computation *************************/
// Initialize time slices on device
cuda_call(cudaMemset(dev_p0[iGpu], 0, host_nz*host_nx*sizeof(double)));
cuda_call(cudaMemset(dev_p1[iGpu], 0, host_nz*host_nx*sizeof(double)));
cuda_call(cudaMemset(dev_ssLeft[iGpu], 0, host_nz*host_nx*sizeof(double)));
cuda_call(cudaMemset(dev_ssRight[iGpu], 0, host_nz*host_nx*sizeof(double)));
// Copy model to device
cuda_call(cudaMemcpy(dev_modelBorn[iGpu], model, host_nz*host_nx*sizeof(double), cudaMemcpyHostToDevice)); // Copy model (reflectivity) on device
// Allocate and initialize data
cuda_call(cudaMalloc((void**) &dev_dataRegDts[iGpu], nReceiversReg*host_nts*sizeof(double))); // Allocate data at coarse time-sampling on device
cuda_call(cudaMemset(dev_dataRegDts[iGpu], 0, nReceiversReg*host_nts*sizeof(double))); // Initialize data on device
// Apply both scalings to reflectivity: (1) 2.0*1/v^3 (2) v^2*dtw^2
kernel_exec(scaleReflectivity<<<dimGrid, dimBlock>>>(dev_modelBorn[iGpu], dev_reflectivityScale[iGpu], dev_vel2Dtw2[iGpu]));
// Compute secondary source for first coarse time index (its = 0)
kernel_exec(imagingFwdGpu<<<dimGrid, dimBlock>>>(dev_modelBorn[iGpu], dev_ssLeft[iGpu], 0, dev_BornSrcWavefield[iGpu]));
// Start propagating scattered wavefield
for (int its = 0; its < host_nts-1; its++){
// Compute secondary source for first coarse time index (its+1)
kernel_exec(imagingFwdGpu<<<dimGrid, dimBlock>>>(dev_modelBorn[iGpu], dev_ssRight[iGpu], its+1, dev_BornSrcWavefield[iGpu]));
for (int it2 = 1; it2 < host_sub+1; it2++){
// Step forward
kernel_exec(stepFwdGpu<<<dimGrid, dimBlock>>>(dev_p0[iGpu], dev_p1[iGpu], dev_p0[iGpu], dev_vel2Dtw2[iGpu]));
// Inject secondary source sample itw-1
kernel_exec(injectSecondarySource<<<dimGrid, dimBlock>>>(dev_ssLeft[iGpu], dev_ssRight[iGpu], dev_p0[iGpu], it2-1));
// Damp wavefields
kernel_exec(dampCosineEdge<<<dimGrid, dimBlock>>>(dev_p0[iGpu], dev_p1[iGpu]));
// Extract data
kernel_exec(recordInterpData<<<nblockData, BLOCK_SIZE_DATA>>>(dev_p0[iGpu], dev_dataRegDts[iGpu], its, it2, dev_receiversPositionReg[iGpu]));
// Switch pointers
dev_temp1[iGpu] = dev_p0[iGpu];
dev_p0[iGpu] = dev_p1[iGpu];
dev_p1[iGpu] = dev_temp1[iGpu];
dev_temp1[iGpu] = NULL;
}
// Switch pointers for secondary source
dev_ssTemp1[iGpu] = dev_ssLeft[iGpu];
dev_ssLeft[iGpu] = dev_ssRight[iGpu];
dev_ssRight[iGpu] = dev_ssTemp1[iGpu];
cuda_call(cudaMemset(dev_ssRight[iGpu], 0, host_nz*host_nx*sizeof(double)));
}
// Copy data back to host
cuda_call(cudaMemcpy(dataRegDts, dev_dataRegDts[iGpu], nReceiversReg*host_nts*sizeof(double), cudaMemcpyDeviceToHost));
/******************************* Deallocation ***************************************/
// Deallocate all slices
cuda_call(cudaFree(dev_dataRegDts[iGpu]));
cuda_call(cudaFree(dev_sourcesPositionReg[iGpu]));
cuda_call(cudaFree(dev_sourcesSignals[iGpu]));
cuda_call(cudaFree(dev_receiversPositionReg[iGpu]));
}
void BornShotsFwdGpuWavefield(double *model, double *dataRegDts, double *sourcesSignals, int *sourcesPositionReg, int nSourcesReg, int *receiversPositionReg, int nReceiversReg, double *srcWavefieldDts, double *scatWavefieldDts, int iGpu, int iGpuId){
// Non-extended Born modeling operator (FORWARD)
// The source wavelet/signals already contain the second time derivative
// Set device number
cudaSetDevice(iGpuId);
// Sources geometry
cuda_call(cudaMemcpyToSymbol(dev_nSourcesReg, &nSourcesReg, sizeof(int), 0, cudaMemcpyHostToDevice));
cuda_call(cudaMalloc((void**) &dev_sourcesPositionReg[iGpu], nSourcesReg*sizeof(int)));
cuda_call(cudaMemcpy(dev_sourcesPositionReg[iGpu], sourcesPositionReg, nSourcesReg*sizeof(int), cudaMemcpyHostToDevice));
// Sources signals
cuda_call(cudaMalloc((void**) &dev_sourcesSignals[iGpu], nSourcesReg*host_ntw*sizeof(double))); // Allocate sources signals on device
cuda_call(cudaMemcpy(dev_sourcesSignals[iGpu], sourcesSignals, nSourcesReg*host_ntw*sizeof(double), cudaMemcpyHostToDevice)); // Copy sources signals on device
// Receivers geometry
cuda_call(cudaMemcpyToSymbol(dev_nReceiversReg, &nReceiversReg, sizeof(int), 0, cudaMemcpyHostToDevice));
cuda_call(cudaMalloc((void**) &dev_receiversPositionReg[iGpu], nReceiversReg*sizeof(int)));
cuda_call(cudaMemcpy(dev_receiversPositionReg[iGpu], receiversPositionReg, nReceiversReg*sizeof(int), cudaMemcpyHostToDevice));
// Allocate and initialize data
cuda_call(cudaMalloc((void**) &dev_dataRegDts[iGpu], nReceiversReg*host_nts*sizeof(double))); // Allocate data at coarse time-sampling on device
cuda_call(cudaMemset(dev_dataRegDts[iGpu], 0, nReceiversReg*host_nts*sizeof(double))); // Initialize data on device
// Initialize source wavefield on device
cuda_call(cudaMemset(dev_BornSrcWavefield[iGpu], 0, host_nz*host_nx*host_nts*sizeof(double))); // Initialize wavefield on device
// Initialize time-slices for time-stepping
cuda_call(cudaMemset(dev_p0[iGpu], 0, host_nz*host_nx*sizeof(double)));
cuda_call(cudaMemset(dev_p1[iGpu], 0, host_nz*host_nx*sizeof(double)));
// Kernel parameters
int nblockx = (host_nz-2*FAT) / BLOCK_SIZE;
int nblocky = (host_nx-2*FAT) / BLOCK_SIZE;
int nblockData = (nReceiversReg+BLOCK_SIZE_DATA-1) / BLOCK_SIZE_DATA;
dim3 dimGrid(nblockx, nblocky);
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
/************************** Source wavefield computation ****************************/
for (int its = 0; its < host_nts-1; its++){
for (int it2 = 1; it2 < host_sub+1; it2++){
// Compute fine time-step index
int itw = its * host_sub + it2;
// Step forward
kernel_exec(stepFwdGpu<<<dimGrid, dimBlock>>>(dev_p0[iGpu], dev_p1[iGpu], dev_p0[iGpu], dev_vel2Dtw2[iGpu]));
// Inject source
kernel_exec(injectSource<<<1, nSourcesReg>>>(dev_sourcesSignals[iGpu], dev_p0[iGpu], itw-1, dev_sourcesPositionReg[iGpu]));
// Damp wavefields
kernel_exec(dampCosineEdge<<<dimGrid, dimBlock>>>(dev_p0[iGpu], dev_p1[iGpu]));
// Extract wavefield
kernel_exec(interpWavefield<<<dimGrid, dimBlock>>>(dev_BornSrcWavefield[iGpu], dev_p0[iGpu], its, it2));
// Switch pointers
dev_temp1[iGpu] = dev_p0[iGpu];
dev_p0[iGpu] = dev_p1[iGpu];
dev_p1[iGpu] = dev_temp1[iGpu];
dev_temp1[iGpu] = NULL;
}
}
// Copy wavefield back to host
cuda_call(cudaMemcpy(srcWavefieldDts, dev_BornSrcWavefield[iGpu], host_nz*host_nx*host_nts*sizeof(double), cudaMemcpyDeviceToHost));
/************************** Scattered wavefield computation *************************/
// Initialize time slices on device
cuda_call(cudaMemset(dev_p0[iGpu], 0, host_nz*host_nx*sizeof(double)));
cuda_call(cudaMemset(dev_p1[iGpu], 0, host_nz*host_nx*sizeof(double)));
cuda_call(cudaMemset(dev_ssLeft[iGpu], 0, host_nz*host_nx*sizeof(double)));
cuda_call(cudaMemset(dev_ssRight[iGpu], 0, host_nz*host_nx*sizeof(double)));
// Allocate and copy model
cuda_call(cudaMemcpy(dev_modelBorn[iGpu], model, host_nz*host_nx*sizeof(double), cudaMemcpyHostToDevice)); // Copy model (reflectivity) on device
// Allocate and initialize scattered wavefield on device
cuda_call(cudaMalloc((void**) &dev_BornSecWavefield, host_nz*host_nx*host_nts*sizeof(double))); // Allocate on device
cuda_call(cudaMemset(dev_BornSecWavefield, 0, host_nz*host_nx*host_nts*sizeof(double))); // Initialize wavefield on device
// Apply both scalings to reflectivity:
kernel_exec(scaleReflectivity<<<dimGrid, dimBlock>>>(dev_modelBorn[iGpu], dev_reflectivityScale[iGpu], dev_vel2Dtw2[iGpu]));
// Compute secondary source for first coarse time index (its = 0)
kernel_exec(imagingFwdGpu<<<dimGrid, dimBlock>>>(dev_modelBorn[iGpu], dev_ssLeft[iGpu], 0, dev_BornSrcWavefield[iGpu]));
// Start propagating scattered wavefield
for (int its = 0; its < host_nts-1; its++){
// Compute secondary source for first coarse time index (its+1)
kernel_exec(imagingFwdGpu<<<dimGrid, dimBlock>>>(dev_modelBorn[iGpu], dev_ssRight[iGpu], its+1, dev_BornSrcWavefield[iGpu]));
for (int it2 = 1; it2 < host_sub+1; it2++){
// Step forward
kernel_exec(stepFwdGpu<<<dimGrid, dimBlock>>>(dev_p0[iGpu], dev_p1[iGpu], dev_p0[iGpu], dev_vel2Dtw2[iGpu]));
// Inject secondary source sample itw-1
kernel_exec(injectSecondarySource<<<dimGrid, dimBlock>>>(dev_ssLeft[iGpu], dev_ssRight[iGpu], dev_p0[iGpu], it2-1));
// Damp wavefields
kernel_exec(dampCosineEdge<<<dimGrid, dimBlock>>>(dev_p0[iGpu], dev_p1[iGpu]));
// Record wavefield
kernel_exec(interpWavefield<<<dimGrid, dimBlock>>>(dev_BornSecWavefield, dev_p0[iGpu], its, it2));
// Extract data
kernel_exec(recordInterpData<<<nblockData, BLOCK_SIZE_DATA>>>(dev_p0[iGpu], dev_dataRegDts[iGpu], its, it2, dev_receiversPositionReg[iGpu]));
// Switch pointers
dev_temp1[iGpu] = dev_p0[iGpu];
dev_p0[iGpu] = dev_p1[iGpu];
dev_p1[iGpu] = dev_temp1[iGpu];
dev_temp1[iGpu] = NULL;
}
// Switch pointers for secondary source
dev_ssTemp1[iGpu] = dev_ssLeft[iGpu];
dev_ssLeft[iGpu] = dev_ssRight[iGpu];
dev_ssRight[iGpu] = dev_ssTemp1[iGpu];
cuda_call(cudaMemset(dev_ssRight[iGpu], 0, host_nz*host_nx*sizeof(double)));
}
// Copy data back to host
cuda_call(cudaMemcpy(dataRegDts, dev_dataRegDts[iGpu], nReceiversReg*host_nts*sizeof(double), cudaMemcpyDeviceToHost));
// Copy scattered wavefield back to host
cuda_call(cudaMemcpy(scatWavefieldDts, dev_BornSecWavefield, host_nz*host_nx*host_nts*sizeof(double), cudaMemcpyDeviceToHost));
/******************************* Deallocation ***************************************/
// Deallocate all slices
cuda_call(cudaFree(dev_dataRegDts[iGpu]));
cuda_call(cudaFree(dev_sourcesPositionReg[iGpu]));
cuda_call(cudaFree(dev_sourcesSignals[iGpu]));
cuda_call(cudaFree(dev_receiversPositionReg[iGpu]));
cuda_call(cudaFree(dev_BornSecWavefield));
}
/****************************** Free surface **********************************/
void BornShotsFwdFsGpu(double *model, double *dataRegDts, double *sourcesSignals, int *sourcesPositionReg, int nSourcesReg, int *receiversPositionReg, int nReceiversReg, double *srcWavefieldDts, double *scatWavefieldDts, int iGpu, int iGpuId){
// We assume the source wavelet/signals already contain the second time derivative
// Set device number
cudaSetDevice(iGpuId);
// Sources geometry
cuda_call(cudaMemcpyToSymbol(dev_nSourcesReg, &nSourcesReg, sizeof(int), 0, cudaMemcpyHostToDevice));
cuda_call(cudaMalloc((void**) &dev_sourcesPositionReg[iGpu], nSourcesReg*sizeof(int)));
cuda_call(cudaMemcpy(dev_sourcesPositionReg[iGpu], sourcesPositionReg, nSourcesReg*sizeof(int), cudaMemcpyHostToDevice));
// Sources signals
cuda_call(cudaMalloc((void**) &dev_sourcesSignals[iGpu], nSourcesReg*host_ntw*sizeof(double))); // Allocate sources signals on device
cuda_call(cudaMemcpy(dev_sourcesSignals[iGpu], sourcesSignals, nSourcesReg*host_ntw*sizeof(double), cudaMemcpyHostToDevice)); // Copy sources signals on device
// Receivers geometry
cuda_call(cudaMemcpyToSymbol(dev_nReceiversReg, &nReceiversReg, sizeof(int), 0, cudaMemcpyHostToDevice));
cuda_call(cudaMalloc((void**) &dev_receiversPositionReg[iGpu], nReceiversReg*sizeof(int)));
cuda_call(cudaMemcpy(dev_receiversPositionReg[iGpu], receiversPositionReg, nReceiversReg*sizeof(int), cudaMemcpyHostToDevice));
// Initialize source wavefield on device
cuda_call(cudaMemset(dev_BornSrcWavefield[iGpu], 0, host_nz*host_nx*host_nts*sizeof(double))); // Initialize wavefield on device
// Initialize time-slices for time-stepping
cuda_call(cudaMemset(dev_p0[iGpu], 0, host_nz*host_nx*sizeof(double)));
cuda_call(cudaMemset(dev_p1[iGpu], 0, host_nz*host_nx*sizeof(double)));
// Kernel parameters
int nblockx = (host_nz-2*FAT) / BLOCK_SIZE;
int nblocky = (host_nx-2*FAT) / BLOCK_SIZE;
int nblockData = (nReceiversReg+BLOCK_SIZE_DATA-1) / BLOCK_SIZE_DATA;
dim3 dimGrid(nblockx, nblocky);
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
/************************** Source wavefield computation ****************************/
for (int its = 0; its < host_nts-1; its++){
for (int it2 = 1; it2 < host_sub+1; it2++){
// Compute fine time-step index
int itw = its * host_sub + it2;
// Apply free surface condition for Laplacian
kernel_exec(setFsConditionFwdGpu<<<nblocky, BLOCK_SIZE>>>(dev_p1[iGpu]));
// Step forward
kernel_exec(stepFwdGpu<<<dimGrid, dimBlock>>>(dev_p0[iGpu], dev_p1[iGpu], dev_p0[iGpu], dev_vel2Dtw2[iGpu]));
// Inject source
kernel_exec(injectSource<<<1, nSourcesReg>>>(dev_sourcesSignals[iGpu], dev_p0[iGpu], itw-1, dev_sourcesPositionReg[iGpu]));
// Damp wavefields
kernel_exec(dampCosineEdgeFs<<<dimGrid, dimBlock>>>(dev_p0[iGpu], dev_p1[iGpu]));
// Extract wavefield
kernel_exec(interpWavefield<<<dimGrid, dimBlock>>>(dev_BornSrcWavefield[iGpu], dev_p0[iGpu], its, it2));
// Switch pointers
dev_temp1[iGpu] = dev_p0[iGpu];
dev_p0[iGpu] = dev_p1[iGpu];
dev_p1[iGpu] = dev_temp1[iGpu];
dev_temp1[iGpu] = NULL;
}
}
/************************** Scattered wavefield computation *************************/
// Initialize time slices on device
cuda_call(cudaMemset(dev_p0[iGpu], 0, host_nz*host_nx*sizeof(double)));
cuda_call(cudaMemset(dev_p1[iGpu], 0, host_nz*host_nx*sizeof(double)));
cuda_call(cudaMemset(dev_ssLeft[iGpu], 0, host_nz*host_nx*sizeof(double)));
cuda_call(cudaMemset(dev_ssRight[iGpu], 0, host_nz*host_nx*sizeof(double)));
// Copy model to device
cuda_call(cudaMemcpy(dev_modelBorn[iGpu], model, host_nz*host_nx*sizeof(double), cudaMemcpyHostToDevice)); // Copy model (reflectivity) on device
// Allocate and initialize data
cuda_call(cudaMalloc((void**) &dev_dataRegDts[iGpu], nReceiversReg*host_nts*sizeof(double))); // Allocate data at coarse time-sampling on device
cuda_call(cudaMemset(dev_dataRegDts[iGpu], 0, nReceiversReg*host_nts*sizeof(double))); // Initialize data on device
// Apply both scalings to reflectivity: (1) 2.0*1/v^3 (2) v^2*dtw^2
kernel_exec(scaleReflectivity<<<dimGrid, dimBlock>>>(dev_modelBorn[iGpu], dev_reflectivityScale[iGpu], dev_vel2Dtw2[iGpu]));
// Compute secondary source for first coarse time index (its = 0)
kernel_exec(imagingFwdGpu<<<dimGrid, dimBlock>>>(dev_modelBorn[iGpu], dev_ssLeft[iGpu], 0, dev_BornSrcWavefield[iGpu]));
// Start propagating scattered wavefield
for (int its = 0; its < host_nts-1; its++){
// Compute secondary source for first coarse time index (its+1)
kernel_exec(imagingFwdGpu<<<dimGrid, dimBlock>>>(dev_modelBorn[iGpu], dev_ssRight[iGpu], its+1, dev_BornSrcWavefield[iGpu]));
for (int it2 = 1; it2 < host_sub+1; it2++){
// Apply free surface condition for Laplacian
kernel_exec(setFsConditionFwdGpu<<<nblocky, BLOCK_SIZE>>>(dev_p1[iGpu]));
// Step forward
kernel_exec(stepFwdGpu<<<dimGrid, dimBlock>>>(dev_p0[iGpu], dev_p1[iGpu], dev_p0[iGpu], dev_vel2Dtw2[iGpu]));
// Inject secondary source sample itw-1
kernel_exec(injectSecondarySource<<<dimGrid, dimBlock>>>(dev_ssLeft[iGpu], dev_ssRight[iGpu], dev_p0[iGpu], it2-1));
// Damp wavefields
kernel_exec(dampCosineEdgeFs<<<dimGrid, dimBlock>>>(dev_p0[iGpu], dev_p1[iGpu]));
// Extract data
kernel_exec(recordInterpData<<<nblockData, BLOCK_SIZE_DATA>>>(dev_p0[iGpu], dev_dataRegDts[iGpu], its, it2, dev_receiversPositionReg[iGpu]));
// Switch pointers
dev_temp1[iGpu] = dev_p0[iGpu];
dev_p0[iGpu] = dev_p1[iGpu];
dev_p1[iGpu] = dev_temp1[iGpu];
dev_temp1[iGpu] = NULL;
}
// Switch pointers for secondary source
dev_ssTemp1[iGpu] = dev_ssLeft[iGpu];
dev_ssLeft[iGpu] = dev_ssRight[iGpu];
dev_ssRight[iGpu] = dev_ssTemp1[iGpu];
cuda_call(cudaMemset(dev_ssRight[iGpu], 0, host_nz*host_nx*sizeof(double)));
}
// Copy data back to host
cuda_call(cudaMemcpy(dataRegDts, dev_dataRegDts[iGpu], nReceiversReg*host_nts*sizeof(double), cudaMemcpyDeviceToHost));
/******************************* Deallocation ***************************************/
// Deallocate all slices
cuda_call(cudaFree(dev_dataRegDts[iGpu]));
cuda_call(cudaFree(dev_sourcesPositionReg[iGpu]));
cuda_call(cudaFree(dev_sourcesSignals[iGpu]));
cuda_call(cudaFree(dev_receiversPositionReg[iGpu]));
}
void BornShotsFwdFsGpuWavefield(double *model, double *dataRegDts, double *sourcesSignals, int *sourcesPositionReg, int nSourcesReg, int *receiversPositionReg, int nReceiversReg, double *srcWavefieldDts, double *scatWavefieldDts, int iGpu, int iGpuId){
// Non-extended Born modeling operator (FORWARD)
// The source wavelet/signals already contain the second time derivative
// Set device number
cudaSetDevice(iGpuId);
// Sources geometry
cuda_call(cudaMemcpyToSymbol(dev_nSourcesReg, &nSourcesReg, sizeof(int), 0, cudaMemcpyHostToDevice));
cuda_call(cudaMalloc((void**) &dev_sourcesPositionReg[iGpu], nSourcesReg*sizeof(int)));
cuda_call(cudaMemcpy(dev_sourcesPositionReg[iGpu], sourcesPositionReg, nSourcesReg*sizeof(int), cudaMemcpyHostToDevice));
// Sources signals
cuda_call(cudaMalloc((void**) &dev_sourcesSignals[iGpu], nSourcesReg*host_ntw*sizeof(double))); // Allocate sources signals on device
cuda_call(cudaMemcpy(dev_sourcesSignals[iGpu], sourcesSignals, nSourcesReg*host_ntw*sizeof(double), cudaMemcpyHostToDevice)); // Copy sources signals on device
// Receivers geometry
cuda_call(cudaMemcpyToSymbol(dev_nReceiversReg, &nReceiversReg, sizeof(int), 0, cudaMemcpyHostToDevice));
cuda_call(cudaMalloc((void**) &dev_receiversPositionReg[iGpu], nReceiversReg*sizeof(int)));
cuda_call(cudaMemcpy(dev_receiversPositionReg[iGpu], receiversPositionReg, nReceiversReg*sizeof(int), cudaMemcpyHostToDevice));
// Allocate and initialize data
cuda_call(cudaMalloc((void**) &dev_dataRegDts[iGpu], nReceiversReg*host_nts*sizeof(double))); // Allocate data at coarse time-sampling on device
cuda_call(cudaMemset(dev_dataRegDts[iGpu], 0, nReceiversReg*host_nts*sizeof(double))); // Initialize data on device
// Initialize source wavefield on device
cuda_call(cudaMemset(dev_BornSrcWavefield[iGpu], 0, host_nz*host_nx*host_nts*sizeof(double))); // Initialize wavefield on device
// Initialize time-slices for time-stepping
cuda_call(cudaMemset(dev_p0[iGpu], 0, host_nz*host_nx*sizeof(double)));
cuda_call(cudaMemset(dev_p1[iGpu], 0, host_nz*host_nx*sizeof(double)));
// Kernel parameters
int nblockx = (host_nz-2*FAT) / BLOCK_SIZE;
int nblocky = (host_nx-2*FAT) / BLOCK_SIZE;
int nblockData = (nReceiversReg+BLOCK_SIZE_DATA-1) / BLOCK_SIZE_DATA;
dim3 dimGrid(nblockx, nblocky);
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
/************************** Source wavefield computation ****************************/
for (int its = 0; its < host_nts-1; its++){
for (int it2 = 1; it2 < host_sub+1; it2++){
// Compute fine time-step index
int itw = its * host_sub + it2;
// Apply free surface condition for Laplacian
kernel_exec(setFsConditionFwdGpu<<<nblocky, BLOCK_SIZE>>>(dev_p1[iGpu]));
// Step forward
kernel_exec(stepFwdGpu<<<dimGrid, dimBlock>>>(dev_p0[iGpu], dev_p1[iGpu], dev_p0[iGpu], dev_vel2Dtw2[iGpu]));
// Inject source
kernel_exec(injectSource<<<1, nSourcesReg>>>(dev_sourcesSignals[iGpu], dev_p0[iGpu], itw-1, dev_sourcesPositionReg[iGpu]));
// Damp wavefields
kernel_exec(dampCosineEdgeFs<<<dimGrid, dimBlock>>>(dev_p0[iGpu], dev_p1[iGpu]));
// Extract wavefield
kernel_exec(interpWavefield<<<dimGrid, dimBlock>>>(dev_BornSrcWavefield[iGpu], dev_p0[iGpu], its, it2));
// Switch pointers
dev_temp1[iGpu] = dev_p0[iGpu];
dev_p0[iGpu] = dev_p1[iGpu];
dev_p1[iGpu] = dev_temp1[iGpu];
dev_temp1[iGpu] = NULL;
}
}
// Copy wavefield back to host
cuda_call(cudaMemcpy(srcWavefieldDts, dev_BornSrcWavefield[iGpu], host_nz*host_nx*host_nts*sizeof(double), cudaMemcpyDeviceToHost));
/************************** Scattered wavefield computation *************************/
// Initialize time slices on device
cuda_call(cudaMemset(dev_p0[iGpu], 0, host_nz*host_nx*sizeof(double)));
cuda_call(cudaMemset(dev_p1[iGpu], 0, host_nz*host_nx*sizeof(double)));
cuda_call(cudaMemset(dev_ssLeft[iGpu], 0, host_nz*host_nx*sizeof(double)));
cuda_call(cudaMemset(dev_ssRight[iGpu], 0, host_nz*host_nx*sizeof(double)));
// Allocate and copy model
cuda_call(cudaMemcpy(dev_modelBorn[iGpu], model, host_nz*host_nx*sizeof(double), cudaMemcpyHostToDevice)); // Copy model (reflectivity) on device
// Allocate and initialize scattered wavefield on device
cuda_call(cudaMalloc((void**) &dev_BornSecWavefield, host_nz*host_nx*host_nts*sizeof(double))); // Allocate on device
cuda_call(cudaMemset(dev_BornSecWavefield, 0, host_nz*host_nx*host_nts*sizeof(double))); // Initialize wavefield on device
// Apply both scalings to reflectivity:
kernel_exec(scaleReflectivity<<<dimGrid, dimBlock>>>(dev_modelBorn[iGpu], dev_reflectivityScale[iGpu], dev_vel2Dtw2[iGpu]));
// Compute secondary source for first coarse time index (its = 0)
kernel_exec(imagingFwdGpu<<<dimGrid, dimBlock>>>(dev_modelBorn[iGpu], dev_ssLeft[iGpu], 0, dev_BornSrcWavefield[iGpu]));
// Start propagating scattered wavefield
for (int its = 0; its < host_nts-1; its++){
// Compute secondary source for first coarse time index (its+1)
kernel_exec(imagingFwdGpu<<<dimGrid, dimBlock>>>(dev_modelBorn[iGpu], dev_ssRight[iGpu], its+1, dev_BornSrcWavefield[iGpu]));
for (int it2 = 1; it2 < host_sub+1; it2++){
// Apply free surface condition for Laplacian
kernel_exec(setFsConditionFwdGpu<<<nblocky, BLOCK_SIZE>>>(dev_p1[iGpu]));
// Step forward
kernel_exec(stepFwdGpu<<<dimGrid, dimBlock>>>(dev_p0[iGpu], dev_p1[iGpu], dev_p0[iGpu], dev_vel2Dtw2[iGpu]));
// Inject secondary source sample itw-1
kernel_exec(injectSecondarySource<<<dimGrid, dimBlock>>>(dev_ssLeft[iGpu], dev_ssRight[iGpu], dev_p0[iGpu], it2-1));
// Damp wavefields
kernel_exec(dampCosineEdgeFs<<<dimGrid, dimBlock>>>(dev_p0[iGpu], dev_p1[iGpu]));
// Record wavefield
kernel_exec(interpWavefield<<<dimGrid, dimBlock>>>(dev_BornSecWavefield, dev_p0[iGpu], its, it2));
// Extract data
kernel_exec(recordInterpData<<<nblockData, BLOCK_SIZE_DATA>>>(dev_p0[iGpu], dev_dataRegDts[iGpu], its, it2, dev_receiversPositionReg[iGpu]));
// Switch pointers
dev_temp1[iGpu] = dev_p0[iGpu];
dev_p0[iGpu] = dev_p1[iGpu];
dev_p1[iGpu] = dev_temp1[iGpu];
dev_temp1[iGpu] = NULL;
}
// Switch pointers for secondary source
dev_ssTemp1[iGpu] = dev_ssLeft[iGpu];
dev_ssLeft[iGpu] = dev_ssRight[iGpu];
dev_ssRight[iGpu] = dev_ssTemp1[iGpu];
cuda_call(cudaMemset(dev_ssRight[iGpu], 0, host_nz*host_nx*sizeof(double)));
}
// Copy data back to host
cuda_call(cudaMemcpy(dataRegDts, dev_dataRegDts[iGpu], nReceiversReg*host_nts*sizeof(double), cudaMemcpyDeviceToHost));
// Copy scattered wavefield back to host
cuda_call(cudaMemcpy(scatWavefieldDts, dev_BornSecWavefield, host_nz*host_nx*host_nts*sizeof(double), cudaMemcpyDeviceToHost));
/******************************* Deallocation ***************************************/
// Deallocate all slices
cuda_call(cudaFree(dev_dataRegDts[iGpu]));
cuda_call(cudaFree(dev_sourcesPositionReg[iGpu]));
cuda_call(cudaFree(dev_sourcesSignals[iGpu]));
cuda_call(cudaFree(dev_receiversPositionReg[iGpu]));
cuda_call(cudaFree(dev_BornSecWavefield));
}
/******************************************************************************/
/****************************** Born adjoint **********************************/
/******************************************************************************/
/********************************** Normal ************************************/
void BornShotsAdjGpu(double *model, double *dataRegDts, double *sourcesSignals, int *sourcesPositionReg, int nSourcesReg, int *receiversPositionReg, int nReceiversReg, double *srcWavefieldDts, double *recWavefieldDts, int iGpu, int iGpuId){
// Non-extended Born modeling operator (ADJOINT)
// We assume the source wavelet/signals already contain the second time derivative
// Set device number
cudaSetDevice(iGpuId);
// Sources geometry
cuda_call(cudaMemcpyToSymbol(dev_nSourcesReg, &nSourcesReg, sizeof(int), 0, cudaMemcpyHostToDevice));
cuda_call(cudaMalloc((void**) &dev_sourcesPositionReg[iGpu], nSourcesReg*sizeof(int)));
cuda_call(cudaMemcpy(dev_sourcesPositionReg[iGpu], sourcesPositionReg, nSourcesReg*sizeof(int), cudaMemcpyHostToDevice));
// Sources signals
cuda_call(cudaMalloc((void**) &dev_sourcesSignals[iGpu], nSourcesReg*host_ntw*sizeof(double))); // Allocate sources signals on device
cuda_call(cudaMemcpy(dev_sourcesSignals[iGpu], sourcesSignals, nSourcesReg*host_ntw*sizeof(double), cudaMemcpyHostToDevice)); // Copy sources signals on device
// Receivers geometry
cuda_call(cudaMemcpyToSymbol(dev_nReceiversReg, &nReceiversReg, sizeof(int), 0, cudaMemcpyHostToDevice));
cuda_call(cudaMalloc((void**) &dev_receiversPositionReg[iGpu], nReceiversReg*sizeof(int)));
cuda_call(cudaMemcpy(dev_receiversPositionReg[iGpu], receiversPositionReg, nReceiversReg*sizeof(int), cudaMemcpyHostToDevice));
// Initialize source wavefield on device
cuda_call(cudaMemset(dev_BornSrcWavefield[iGpu], 0, host_nz*host_nx*host_nts*sizeof(double))); // Initialize wavefield on device
// Initialize time-slices for time-stepping
cuda_call(cudaMemset(dev_p0[iGpu], 0, host_nz*host_nx*sizeof(double)));
cuda_call(cudaMemset(dev_p1[iGpu], 0, host_nz*host_nx*sizeof(double)));
// Kernel parameters
int nblockx = (host_nz-2*FAT) / BLOCK_SIZE;
int nblocky = (host_nx-2*FAT) / BLOCK_SIZE;
int nblockData = (nReceiversReg+BLOCK_SIZE_DATA-1) / BLOCK_SIZE_DATA;
dim3 dimGrid(nblockx, nblocky);
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
/************************** Source wavefield computation ****************************/
for (int its = 0; its < host_nts-1; its++){
for (int it2 = 1; it2 < host_sub+1; it2++){
// Compute fine time-step index
int itw = its * host_sub + it2;
// Step forward
kernel_exec(stepFwdGpu<<<dimGrid, dimBlock>>>(dev_p0[iGpu], dev_p1[iGpu], dev_p0[iGpu], dev_vel2Dtw2[iGpu]));
// Inject source
kernel_exec(injectSource<<<1, nSourcesReg>>>(dev_sourcesSignals[iGpu], dev_p0[iGpu], itw-1, dev_sourcesPositionReg[iGpu]));
// Damp wavefields
kernel_exec(dampCosineEdge<<<dimGrid, dimBlock>>>(dev_p0[iGpu], dev_p1[iGpu]));
// Extract wavefield
kernel_exec(interpWavefield<<<dimGrid, dimBlock>>>(dev_BornSrcWavefield[iGpu], dev_p0[iGpu], its, it2));
// Switch pointers
dev_temp1[iGpu] = dev_p0[iGpu];
dev_p0[iGpu] = dev_p1[iGpu];
dev_p1[iGpu] = dev_temp1[iGpu];
dev_temp1[iGpu] = NULL;
}
}
/************************** Receiver wavefield computation **************************/
// Initialize time slices on device
cuda_call(cudaMemset(dev_ssLeft[iGpu], 0, host_nz*host_nx*sizeof(double)));
cuda_call(cudaMemset(dev_ssRight[iGpu], 0, host_nz*host_nx*sizeof(double)));
cuda_call(cudaMemset(dev_p0[iGpu], 0, host_nz*host_nx*sizeof(double)));
cuda_call(cudaMemset(dev_p1[iGpu], 0, host_nz*host_nx*sizeof(double)));
// Model
cuda_call(cudaMemset(dev_modelBorn[iGpu], 0, host_nz*host_nx*sizeof(double))); // Initialize model on device
// Data
cuda_call(cudaMalloc((void**) &dev_dataRegDts[iGpu], nReceiversReg*host_nts*sizeof(double))); // Allocate data on device
cuda_call(cudaMemcpy(dev_dataRegDts[iGpu], dataRegDts, nReceiversReg*host_nts*sizeof(double), cudaMemcpyHostToDevice)); // Copy data on device
// Main loop
for (int its = host_nts-2; its > -1; its--){
for (int it2 = host_sub-1; it2 > -1; it2--){
// Step adjoint in time
kernel_exec(stepAdjGpu<<<dimGrid, dimBlock>>>(dev_p0[iGpu], dev_p1[iGpu], dev_p0[iGpu], dev_vel2Dtw2[iGpu]));
// Inject data
kernel_exec(interpInjectData<<<nblockData, BLOCK_SIZE_DATA>>>(dev_dataRegDts[iGpu], dev_p0[iGpu], its, it2, dev_receiversPositionReg[iGpu]));
// Damp wavefield
kernel_exec(dampCosineEdge<<<dimGrid, dimBlock>>>(dev_p0[iGpu], dev_p1[iGpu]));
// Interpolate and record time slices of receiver wavefield at coarse sampling (no scaling applied yet)
extractInterpAdjointWavefield<<<dimGrid, dimBlock>>>(dev_ssLeft[iGpu], dev_ssRight[iGpu], dev_p0[iGpu], it2);
// Switch pointers for time slices at fine time-sampling
dev_temp1[iGpu] = dev_p0[iGpu];
dev_p0[iGpu] = dev_p1[iGpu];
dev_p1[iGpu] = dev_temp1[iGpu];
dev_temp1[iGpu] = NULL;
}
// Apply imaging condition for its+1
kernel_exec(imagingAdjGpu<<<dimGrid, dimBlock>>>(dev_modelBorn[iGpu], dev_ssRight[iGpu], dev_BornSrcWavefield[iGpu], its+1));
// Switch pointers for receiver wavefield before imaging time derivative
dev_ssTemp1[iGpu] = dev_ssRight[iGpu];
dev_ssRight[iGpu] = dev_ssLeft[iGpu];
dev_ssLeft[iGpu] = dev_ssTemp1[iGpu];
cuda_call(cudaMemset(dev_ssLeft[iGpu], 0, host_nz*host_nx*sizeof(double))); // Reinitialize slice for coarse time-sampling before time derivative
} // Finished main loop - we still have to compute imaging condition for its=0
// Apply imaging condition for its=0
kernel_exec(imagingAdjGpu<<<dimGrid, dimBlock>>>(dev_modelBorn[iGpu], dev_ssRight[iGpu], dev_BornSrcWavefield[iGpu], 0));
// Scale model for finite-difference and secondary source coefficient
kernel_exec(scaleReflectivity<<<dimGrid, dimBlock>>>(dev_modelBorn[iGpu], dev_reflectivityScale[iGpu], dev_vel2Dtw2[iGpu]));
// Copy model back to host
cuda_call(cudaMemcpy(model, dev_modelBorn[iGpu], host_nz*host_nx*sizeof(double), cudaMemcpyDeviceToHost));
/******************************* Deallocation ***************************************/
// Deallocate all slices
cuda_call(cudaFree(dev_dataRegDts[iGpu]));
cuda_call(cudaFree(dev_sourcesPositionReg[iGpu]));
cuda_call(cudaFree(dev_sourcesSignals[iGpu]));
cuda_call(cudaFree(dev_receiversPositionReg[iGpu]));
}
void BornShotsAdjGpuWavefield(double *model, double *dataRegDts, double *sourcesSignals, int *sourcesPositionReg, int nSourcesReg, int *receiversPositionReg, int nReceiversReg, double *srcWavefieldDts, double *recWavefieldDts, int iGpu, int iGpuId){
// Non-extended Born modeling operator (ADJOINT)
// We assume the source wavelet/signals already contain the second time derivative
// Set device number
cudaSetDevice(iGpuId);
// Sources geometry
cuda_call(cudaMemcpyToSymbol(dev_nSourcesReg, &nSourcesReg, sizeof(int), 0, cudaMemcpyHostToDevice));
cuda_call(cudaMalloc((void**) &dev_sourcesPositionReg[iGpu], nSourcesReg*sizeof(int)));
cuda_call(cudaMemcpy(dev_sourcesPositionReg[iGpu], sourcesPositionReg, nSourcesReg*sizeof(int), cudaMemcpyHostToDevice));
// Sources signals
cuda_call(cudaMalloc((void**) &dev_sourcesSignals[iGpu], nSourcesReg*host_ntw*sizeof(double))); // Allocate sources signals on device
cuda_call(cudaMemcpy(dev_sourcesSignals[iGpu], sourcesSignals, nSourcesReg*host_ntw*sizeof(double), cudaMemcpyHostToDevice)); // Copy sources signals on device
// Receivers geometry
cuda_call(cudaMemcpyToSymbol(dev_nReceiversReg, &nReceiversReg, sizeof(int), 0, cudaMemcpyHostToDevice));
cuda_call(cudaMalloc((void**) &dev_receiversPositionReg[iGpu], nReceiversReg*sizeof(int)));
cuda_call(cudaMemcpy(dev_receiversPositionReg[iGpu], receiversPositionReg, nReceiversReg*sizeof(int), cudaMemcpyHostToDevice));
// Initialize source wavefield on device
cuda_call(cudaMemset(dev_BornSrcWavefield[iGpu], 0, host_nz*host_nx*host_nts*sizeof(double))); // Initialize wavefield on device
// Initialize time-slices for time-stepping
cuda_call(cudaMemset(dev_p0[iGpu], 0, host_nz*host_nx*sizeof(double)));
cuda_call(cudaMemset(dev_p1[iGpu], 0, host_nz*host_nx*sizeof(double)));
// Kernel parameters
int nblockx = (host_nz-2*FAT) / BLOCK_SIZE;
int nblocky = (host_nx-2*FAT) / BLOCK_SIZE;
int nblockData = (nReceiversReg+BLOCK_SIZE_DATA-1) / BLOCK_SIZE_DATA;
dim3 dimGrid(nblockx, nblocky);
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
/************************** Source wavefield computation ****************************/
for (int its = 0; its < host_nts-1; its++){
for (int it2 = 1; it2 < host_sub+1; it2++){
// Compute fine time-step index
int itw = its * host_sub + it2;
// Step forward
kernel_exec(stepFwdGpu<<<dimGrid, dimBlock>>>(dev_p0[iGpu], dev_p1[iGpu], dev_p0[iGpu], dev_vel2Dtw2[iGpu]));
// Inject source
kernel_exec(injectSource<<<1, nSourcesReg>>>(dev_sourcesSignals[iGpu], dev_p0[iGpu], itw-1, dev_sourcesPositionReg[iGpu]));
// Damp wavefields
kernel_exec(dampCosineEdge<<<dimGrid, dimBlock>>>(dev_p0[iGpu], dev_p1[iGpu]));
// Extract wavefield
kernel_exec(interpWavefield<<<dimGrid, dimBlock>>>(dev_BornSrcWavefield[iGpu], dev_p0[iGpu], its, it2));
// Switch pointers
dev_temp1[iGpu] = dev_p0[iGpu];
dev_p0[iGpu] = dev_p1[iGpu];
dev_p1[iGpu] = dev_temp1[iGpu];
dev_temp1[iGpu] = NULL;
}
}
/************************** Receiver wavefield computation **************************/
// Initialize time slices on device
cuda_call(cudaMemset(dev_ssLeft[iGpu], 0, host_nz*host_nx*sizeof(double)));
cuda_call(cudaMemset(dev_ssRight[iGpu], 0, host_nz*host_nx*sizeof(double)));
cuda_call(cudaMemset(dev_p0[iGpu], 0, host_nz*host_nx*sizeof(double)));
cuda_call(cudaMemset(dev_p1[iGpu], 0, host_nz*host_nx*sizeof(double)));
// Allocate and initialize receiver wavefield on device
cuda_call(cudaMalloc((void**) &dev_BornSecWavefield, host_nz*host_nx*host_nts*sizeof(double))); // Allocate on device
cuda_call(cudaMemset(dev_BornSecWavefield, 0, host_nz*host_nx*host_nts*sizeof(double))); // Initialize wavefield on device
// Model
cuda_call(cudaMemset(dev_modelBorn[iGpu], 0, host_nz*host_nx*sizeof(double))); // Initialize model on device
// Data
cuda_call(cudaMalloc((void**) &dev_dataRegDts[iGpu], nReceiversReg*host_nts*sizeof(double))); // Allocate data on device
cuda_call(cudaMemcpy(dev_dataRegDts[iGpu], dataRegDts, nReceiversReg*host_nts*sizeof(double), cudaMemcpyHostToDevice)); // Copy data on device
// Main loop
for (int its = host_nts-2; its > -1; its--){
for (int it2 = host_sub-1; it2 > -1; it2--){
// Step adjoint in time
kernel_exec(stepAdjGpu<<<dimGrid, dimBlock>>>(dev_p0[iGpu], dev_p1[iGpu], dev_p0[iGpu], dev_vel2Dtw2[iGpu]));
// Inject data
kernel_exec(interpInjectData<<<nblockData, BLOCK_SIZE_DATA>>>(dev_dataRegDts[iGpu], dev_p0[iGpu], its, it2, dev_receiversPositionReg[iGpu]));
// Damp wavefield
kernel_exec(dampCosineEdge<<<dimGrid, dimBlock>>>(dev_p0[iGpu], dev_p1[iGpu]));
// Interpolate and record time slices of receiver wavefield at coarse sampling (no scaling applied yet)
extractInterpAdjointWavefield<<<dimGrid, dimBlock>>>(dev_ssLeft[iGpu], dev_ssRight[iGpu], dev_p0[iGpu], it2);
// Switch pointers for time slices at fine time-sampling
dev_temp1[iGpu] = dev_p0[iGpu];
dev_p0[iGpu] = dev_p1[iGpu];
dev_p1[iGpu] = dev_temp1[iGpu];
dev_temp1[iGpu] = NULL;
}
// Apply imaging condition for its+1
kernel_exec(imagingAdjGpu<<<dimGrid, dimBlock>>>(dev_modelBorn[iGpu], dev_ssRight[iGpu], dev_BornSrcWavefield[iGpu], its+1));
// Record and scale receiver wavefield at coarse sampling for its+1
kernel_exec(recordScaleWavefield<<<dimGrid, dimBlock>>>(dev_BornSecWavefield, dev_ssRight[iGpu], its+1, dev_reflectivityScale[iGpu], dev_vel2Dtw2[iGpu]));
// Switch pointers for receiver wavefield before imaging time derivative
dev_ssTemp1[iGpu] = dev_ssRight[iGpu];
dev_ssRight[iGpu] = dev_ssLeft[iGpu];
dev_ssLeft[iGpu] = dev_ssTemp1[iGpu];
cuda_call(cudaMemset(dev_ssLeft[iGpu], 0, host_nz*host_nx*sizeof(double))); // Reinitialize slice for coarse time-sampling before time derivative
} // Finished main loop - we still have to compute imaging condition for its=0
// Save receiver wavefield at its=0
kernel_exec(recordScaleWavefield<<<dimGrid, dimBlock>>>(dev_BornSecWavefield, dev_ssRight[iGpu], 0, dev_reflectivityScale[iGpu], dev_vel2Dtw2[iGpu]));
// Apply imaging condition for its=0
kernel_exec(imagingAdjGpu<<<dimGrid, dimBlock>>>(dev_modelBorn[iGpu], dev_ssRight[iGpu], dev_BornSrcWavefield[iGpu], 0));
// Scale model for finite-difference and secondary source coefficient
// It's better to apply it once and for all than at every time-steps
kernel_exec(scaleReflectivity<<<dimGrid, dimBlock>>>(dev_modelBorn[iGpu], dev_reflectivityScale[iGpu], dev_vel2Dtw2[iGpu]));
// Copy model back to host
cuda_call(cudaMemcpy(model, dev_modelBorn[iGpu], host_nz*host_nx*sizeof(double), cudaMemcpyDeviceToHost));
// Copy scattered wavefield back to host
cuda_call(cudaMemcpy(recWavefieldDts, dev_BornSecWavefield, host_nz*host_nx*host_nts*sizeof(double), cudaMemcpyDeviceToHost));
/******************************* Deallocation ***************************************/
// Deallocate all slices
cuda_call(cudaFree(dev_dataRegDts[iGpu]));
cuda_call(cudaFree(dev_sourcesPositionReg[iGpu]));
cuda_call(cudaFree(dev_sourcesSignals[iGpu]));
cuda_call(cudaFree(dev_receiversPositionReg[iGpu]));
cuda_call(cudaFree(dev_BornSecWavefield));
}
/****************************** Free surface **********************************/
void BornShotsAdjFsGpu(double *model, double *dataRegDts, double *sourcesSignals, int *sourcesPositionReg, int nSourcesReg, int *receiversPositionReg, int nReceiversReg, double *srcWavefieldDts, double *recWavefieldDts, int iGpu, int iGpuId){
// Non-extended Born modeling operator (ADJOINT)
// We assume the source wavelet/signals already contain the second time derivative
// Set device number
cudaSetDevice(iGpuId);
// Sources geometry
cuda_call(cudaMemcpyToSymbol(dev_nSourcesReg, &nSourcesReg, sizeof(int), 0, cudaMemcpyHostToDevice));
cuda_call(cudaMalloc((void**) &dev_sourcesPositionReg[iGpu], nSourcesReg*sizeof(int)));
cuda_call(cudaMemcpy(dev_sourcesPositionReg[iGpu], sourcesPositionReg, nSourcesReg*sizeof(int), cudaMemcpyHostToDevice));
// Sources signals
cuda_call(cudaMalloc((void**) &dev_sourcesSignals[iGpu], nSourcesReg*host_ntw*sizeof(double))); // Allocate sources signals on device
cuda_call(cudaMemcpy(dev_sourcesSignals[iGpu], sourcesSignals, nSourcesReg*host_ntw*sizeof(double), cudaMemcpyHostToDevice)); // Copy sources signals on device
// Receivers geometry
cuda_call(cudaMemcpyToSymbol(dev_nReceiversReg, &nReceiversReg, sizeof(int), 0, cudaMemcpyHostToDevice));
cuda_call(cudaMalloc((void**) &dev_receiversPositionReg[iGpu], nReceiversReg*sizeof(int)));
cuda_call(cudaMemcpy(dev_receiversPositionReg[iGpu], receiversPositionReg, nReceiversReg*sizeof(int), cudaMemcpyHostToDevice));
// Initialize source wavefield on device
cuda_call(cudaMemset(dev_BornSrcWavefield[iGpu], 0, host_nz*host_nx*host_nts*sizeof(double))); // Initialize wavefield on device
// Initialize time-slices for time-stepping
cuda_call(cudaMemset(dev_p0[iGpu], 0, host_nz*host_nx*sizeof(double)));
cuda_call(cudaMemset(dev_p1[iGpu], 0, host_nz*host_nx*sizeof(double)));
// Kernel parameters
int nblockx = (host_nz-2*FAT) / BLOCK_SIZE;
int nblocky = (host_nx-2*FAT) / BLOCK_SIZE;
int nblockData = (nReceiversReg+BLOCK_SIZE_DATA-1) / BLOCK_SIZE_DATA;
dim3 dimGrid(nblockx, nblocky);
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
/************************** Source wavefield computation ****************************/
for (int its = 0; its < host_nts-1; its++){
for (int it2 = 1; it2 < host_sub+1; it2++){
// Compute fine time-step index
int itw = its * host_sub + it2;
// Apply free surface condition for Laplacian
kernel_exec(setFsConditionFwdGpu<<<nblocky, BLOCK_SIZE>>>(dev_p1[iGpu]));
// Step forward
kernel_exec(stepFwdGpu<<<dimGrid, dimBlock>>>(dev_p0[iGpu], dev_p1[iGpu], dev_p0[iGpu], dev_vel2Dtw2[iGpu]));
// Inject source
kernel_exec(injectSource<<<1, nSourcesReg>>>(dev_sourcesSignals[iGpu], dev_p0[iGpu], itw-1, dev_sourcesPositionReg[iGpu]));
// Damp wavefields
kernel_exec(dampCosineEdgeFs<<<dimGrid, dimBlock>>>(dev_p0[iGpu], dev_p1[iGpu]));
// Extract wavefield
kernel_exec(interpWavefield<<<dimGrid, dimBlock>>>(dev_BornSrcWavefield[iGpu], dev_p0[iGpu], its, it2));
// Switch pointers
dev_temp1[iGpu] = dev_p0[iGpu];
dev_p0[iGpu] = dev_p1[iGpu];
dev_p1[iGpu] = dev_temp1[iGpu];
dev_temp1[iGpu] = NULL;
}
}
/************************** Receiver wavefield computation **************************/
// Initialize time slices on device
cuda_call(cudaMemset(dev_ssLeft[iGpu], 0, host_nz*host_nx*sizeof(double)));
cuda_call(cudaMemset(dev_ssRight[iGpu], 0, host_nz*host_nx*sizeof(double)));
cuda_call(cudaMemset(dev_p0[iGpu], 0, host_nz*host_nx*sizeof(double)));
cuda_call(cudaMemset(dev_p1[iGpu], 0, host_nz*host_nx*sizeof(double)));
// Model
cuda_call(cudaMemset(dev_modelBorn[iGpu], 0, host_nz*host_nx*sizeof(double))); // Initialize model on device
// Data
cuda_call(cudaMalloc((void**) &dev_dataRegDts[iGpu], nReceiversReg*host_nts*sizeof(double))); // Allocate data on device
cuda_call(cudaMemcpy(dev_dataRegDts[iGpu], dataRegDts, nReceiversReg*host_nts*sizeof(double), cudaMemcpyHostToDevice)); // Copy data on device
// Main loop
for (int its = host_nts-2; its > -1; its--){
for (int it2 = host_sub-1; it2 > -1; it2--){
// Step adjoint in time
kernel_exec(stepAdjFsGpu<<<dimGrid, dimBlock>>>(dev_p0[iGpu], dev_p1[iGpu], dev_p0[iGpu], dev_vel2Dtw2[iGpu]));
// Inject data
kernel_exec(interpInjectData<<<nblockData, BLOCK_SIZE_DATA>>>(dev_dataRegDts[iGpu], dev_p0[iGpu], its, it2, dev_receiversPositionReg[iGpu]));
// Damp wavefield
kernel_exec(dampCosineEdgeFs<<<dimGrid, dimBlock>>>(dev_p0[iGpu], dev_p1[iGpu]));
// Interpolate and record time slices of receiver wavefield at coarse sampling (no scaling applied yet)
extractInterpAdjointWavefield<<<dimGrid, dimBlock>>>(dev_ssLeft[iGpu], dev_ssRight[iGpu], dev_p0[iGpu], it2);
// Switch pointers for time slices at fine time-sampling
dev_temp1[iGpu] = dev_p0[iGpu];
dev_p0[iGpu] = dev_p1[iGpu];
dev_p1[iGpu] = dev_temp1[iGpu];
dev_temp1[iGpu] = NULL;
}
// Apply imaging condition for its+1
kernel_exec(imagingAdjGpu<<<dimGrid, dimBlock>>>(dev_modelBorn[iGpu], dev_ssRight[iGpu], dev_BornSrcWavefield[iGpu], its+1));
// Switch pointers for receiver wavefield before imaging time derivative
dev_ssTemp1[iGpu] = dev_ssRight[iGpu];
dev_ssRight[iGpu] = dev_ssLeft[iGpu];
dev_ssLeft[iGpu] = dev_ssTemp1[iGpu];
cuda_call(cudaMemset(dev_ssLeft[iGpu], 0, host_nz*host_nx*sizeof(double))); // Reinitialize slice for coarse time-sampling before time derivative
} // Finished main loop - we still have to compute imaging condition for its=0
// Apply imaging condition for its=0
kernel_exec(imagingAdjGpu<<<dimGrid, dimBlock>>>(dev_modelBorn[iGpu], dev_ssRight[iGpu], dev_BornSrcWavefield[iGpu], 0));
// Scale model for finite-difference and secondary source coefficient
kernel_exec(scaleReflectivity<<<dimGrid, dimBlock>>>(dev_modelBorn[iGpu], dev_reflectivityScale[iGpu], dev_vel2Dtw2[iGpu]));
// Copy model back to host
cuda_call(cudaMemcpy(model, dev_modelBorn[iGpu], host_nz*host_nx*sizeof(double), cudaMemcpyDeviceToHost));
/******************************* Deallocation ***************************************/
// Deallocate all slices
cuda_call(cudaFree(dev_dataRegDts[iGpu]));
cuda_call(cudaFree(dev_sourcesPositionReg[iGpu]));
cuda_call(cudaFree(dev_sourcesSignals[iGpu]));
cuda_call(cudaFree(dev_receiversPositionReg[iGpu]));
}
void BornShotsAdjFsGpuWavefield(double *model, double *dataRegDts, double *sourcesSignals, int *sourcesPositionReg, int nSourcesReg, int *receiversPositionReg, int nReceiversReg, double *srcWavefieldDts, double *recWavefieldDts, int iGpu, int iGpuId){
// Non-extended Born modeling operator (ADJOINT)
// We assume the source wavelet/signals already contain the second time derivative
// Set device number
cudaSetDevice(iGpuId);
// Sources geometry
cuda_call(cudaMemcpyToSymbol(dev_nSourcesReg, &nSourcesReg, sizeof(int), 0, cudaMemcpyHostToDevice));
cuda_call(cudaMalloc((void**) &dev_sourcesPositionReg[iGpu], nSourcesReg*sizeof(int)));
cuda_call(cudaMemcpy(dev_sourcesPositionReg[iGpu], sourcesPositionReg, nSourcesReg*sizeof(int), cudaMemcpyHostToDevice));
// Sources signals
cuda_call(cudaMalloc((void**) &dev_sourcesSignals[iGpu], nSourcesReg*host_ntw*sizeof(double))); // Allocate sources signals on device
cuda_call(cudaMemcpy(dev_sourcesSignals[iGpu], sourcesSignals, nSourcesReg*host_ntw*sizeof(double), cudaMemcpyHostToDevice)); // Copy sources signals on device
// Receivers geometry
cuda_call(cudaMemcpyToSymbol(dev_nReceiversReg, &nReceiversReg, sizeof(int), 0, cudaMemcpyHostToDevice));
cuda_call(cudaMalloc((void**) &dev_receiversPositionReg[iGpu], nReceiversReg*sizeof(int)));
cuda_call(cudaMemcpy(dev_receiversPositionReg[iGpu], receiversPositionReg, nReceiversReg*sizeof(int), cudaMemcpyHostToDevice));
// Initialize source wavefield on device
cuda_call(cudaMemset(dev_BornSrcWavefield[iGpu], 0, host_nz*host_nx*host_nts*sizeof(double))); // Initialize wavefield on device
// Initialize time-slices for time-stepping
cuda_call(cudaMemset(dev_p0[iGpu], 0, host_nz*host_nx*sizeof(double)));
cuda_call(cudaMemset(dev_p1[iGpu], 0, host_nz*host_nx*sizeof(double)));
// Kernel parameters
int nblockx = (host_nz-2*FAT) / BLOCK_SIZE;
int nblocky = (host_nx-2*FAT) / BLOCK_SIZE;
int nblockData = (nReceiversReg+BLOCK_SIZE_DATA-1) / BLOCK_SIZE_DATA;
dim3 dimGrid(nblockx, nblocky);
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
/************************** Source wavefield computation ****************************/
for (int its = 0; its < host_nts-1; its++){
for (int it2 = 1; it2 < host_sub+1; it2++){
// Compute fine time-step index
int itw = its * host_sub + it2;
// Apply free surface condition for Laplacian
kernel_exec(setFsConditionFwdGpu<<<nblocky, BLOCK_SIZE>>>(dev_p1[iGpu]));
// Step forward
kernel_exec(stepFwdGpu<<<dimGrid, dimBlock>>>(dev_p0[iGpu], dev_p1[iGpu], dev_p0[iGpu], dev_vel2Dtw2[iGpu]));
// Inject source
kernel_exec(injectSource<<<1, nSourcesReg>>>(dev_sourcesSignals[iGpu], dev_p0[iGpu], itw-1, dev_sourcesPositionReg[iGpu]));
// Damp wavefields
kernel_exec(dampCosineEdgeFs<<<dimGrid, dimBlock>>>(dev_p0[iGpu], dev_p1[iGpu]));
// Extract wavefield
kernel_exec(interpWavefield<<<dimGrid, dimBlock>>>(dev_BornSrcWavefield[iGpu], dev_p0[iGpu], its, it2));
// Switch pointers
dev_temp1[iGpu] = dev_p0[iGpu];
dev_p0[iGpu] = dev_p1[iGpu];
dev_p1[iGpu] = dev_temp1[iGpu];
dev_temp1[iGpu] = NULL;
}
}
/************************** Receiver wavefield computation **************************/
// Initialize time slices on device
cuda_call(cudaMemset(dev_ssLeft[iGpu], 0, host_nz*host_nx*sizeof(double)));
cuda_call(cudaMemset(dev_ssRight[iGpu], 0, host_nz*host_nx*sizeof(double)));
cuda_call(cudaMemset(dev_p0[iGpu], 0, host_nz*host_nx*sizeof(double)));
cuda_call(cudaMemset(dev_p1[iGpu], 0, host_nz*host_nx*sizeof(double)));
// Allocate and initialize receiver wavefield on device
cuda_call(cudaMalloc((void**) &dev_BornSecWavefield, host_nz*host_nx*host_nts*sizeof(double))); // Allocate on device
cuda_call(cudaMemset(dev_BornSecWavefield, 0, host_nz*host_nx*host_nts*sizeof(double))); // Initialize wavefield on device
// Model
cuda_call(cudaMemset(dev_modelBorn[iGpu], 0, host_nz*host_nx*sizeof(double))); // Initialize model on device
// Data
cuda_call(cudaMalloc((void**) &dev_dataRegDts[iGpu], nReceiversReg*host_nts*sizeof(double))); // Allocate data on device
cuda_call(cudaMemcpy(dev_dataRegDts[iGpu], dataRegDts, nReceiversReg*host_nts*sizeof(double), cudaMemcpyHostToDevice)); // Copy data on device
// Main loop
for (int its = host_nts-2; its > -1; its--){
for (int it2 = host_sub-1; it2 > -1; it2--){
// Step adjoint in time
kernel_exec(stepAdjFsGpu<<<dimGrid, dimBlock>>>(dev_p0[iGpu], dev_p1[iGpu], dev_p0[iGpu], dev_vel2Dtw2[iGpu]));
// Inject data
kernel_exec(interpInjectData<<<nblockData, BLOCK_SIZE_DATA>>>(dev_dataRegDts[iGpu], dev_p0[iGpu], its, it2, dev_receiversPositionReg[iGpu]));
// Damp wavefield
kernel_exec(dampCosineEdgeFs<<<dimGrid, dimBlock>>>(dev_p0[iGpu], dev_p1[iGpu]));
// Interpolate and record time slices of receiver wavefield at coarse sampling (no scaling applied yet)
extractInterpAdjointWavefield<<<dimGrid, dimBlock>>>(dev_ssLeft[iGpu], dev_ssRight[iGpu], dev_p0[iGpu], it2);
// Switch pointers for time slices at fine time-sampling
dev_temp1[iGpu] = dev_p0[iGpu];
dev_p0[iGpu] = dev_p1[iGpu];
dev_p1[iGpu] = dev_temp1[iGpu];
dev_temp1[iGpu] = NULL;
}
// Apply imaging condition for its+1
kernel_exec(imagingAdjGpu<<<dimGrid, dimBlock>>>(dev_modelBorn[iGpu], dev_ssRight[iGpu], dev_BornSrcWavefield[iGpu], its+1));
// Record and scale receiver wavefield at coarse sampling for its+1
kernel_exec(recordScaleWavefield<<<dimGrid, dimBlock>>>(dev_BornSecWavefield, dev_ssRight[iGpu], its+1, dev_reflectivityScale[iGpu], dev_vel2Dtw2[iGpu]));
// Switch pointers for receiver wavefield before imaging time derivative
dev_ssTemp1[iGpu] = dev_ssRight[iGpu];
dev_ssRight[iGpu] = dev_ssLeft[iGpu];
dev_ssLeft[iGpu] = dev_ssTemp1[iGpu];
cuda_call(cudaMemset(dev_ssLeft[iGpu], 0, host_nz*host_nx*sizeof(double))); // Reinitialize slice for coarse time-sampling before time derivative
} // Finished main loop - we still have to compute imaging condition for its=0
// Save receiver wavefield at its=0
kernel_exec(recordScaleWavefield<<<dimGrid, dimBlock>>>(dev_BornSecWavefield, dev_ssRight[iGpu], 0, dev_reflectivityScale[iGpu], dev_vel2Dtw2[iGpu]));
// Apply imaging condition for its=0
kernel_exec(imagingAdjGpu<<<dimGrid, dimBlock>>>(dev_modelBorn[iGpu], dev_ssRight[iGpu], dev_BornSrcWavefield[iGpu], 0));
// Scale model for finite-difference and secondary source coefficient
// It's better to apply it once and for all than at every time-steps
kernel_exec(scaleReflectivity<<<dimGrid, dimBlock>>>(dev_modelBorn[iGpu], dev_reflectivityScale[iGpu], dev_vel2Dtw2[iGpu]));
// Copy model back to host
cuda_call(cudaMemcpy(model, dev_modelBorn[iGpu], host_nz*host_nx*sizeof(double), cudaMemcpyDeviceToHost));
// Copy scattered wavefield back to host
cuda_call(cudaMemcpy(recWavefieldDts, dev_BornSecWavefield, host_nz*host_nx*host_nts*sizeof(double), cudaMemcpyDeviceToHost));
/******************************* Deallocation ***************************************/
// Deallocate all slices
cuda_call(cudaFree(dev_dataRegDts[iGpu]));
cuda_call(cudaFree(dev_sourcesPositionReg[iGpu]));
cuda_call(cudaFree(dev_sourcesSignals[iGpu]));
cuda_call(cudaFree(dev_receiversPositionReg[iGpu]));
cuda_call(cudaFree(dev_BornSecWavefield));
}
|
f9f9b42b9dff93d4f6281edc5416e118bde29c6a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
The implementation of this file is based on skipLayerNorm plugin in TensorRT demo:
https://github.com/NVIDIA/TensorRT/tree/release/5.1/demo/BERT/
Copyright 2019 NVIDIA Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
// Modifications: Add SkipLayerNormKernelVec to
// leverage vectorized load/write.
// and templatize ComputeSkipLayerNorm for different
// data types.
// Copyright (c) Advanced Micro Devices, Inc. All rights reserved.
// Licensed under the MIT License.
#include "contrib_ops/cuda/bert/layer_norm.cuh"
#include "contrib_ops/cuda/bert/skip_layer_norm_impl.h"
#include <hip/hip_fp16.h>
namespace onnxruntime {
namespace contrib {
namespace cuda {
template<typename T>
T maybe2half(float x);
template<>
float maybe2half(float x) {
return x;
}
template<>
half maybe2half(float x) {
return __float2half_rn(x);
}
template <typename T, unsigned TPB>
__global__ void SkipLayerNormKernel(
const int ld, const T* input, const T* skip, const T* beta, const T* gamma, const T* bias,
const T epsilon, T* output) {
const T reverse_ld = T(1.f / ld);
const int offset = blockIdx.x * ld;
KeyValuePairSum pair_sum;
// reduce x and x^2
hipcub::KeyValuePair<T, T> thread_data(0, 0);
for (int i = threadIdx.x; i < ld; i += TPB) {
const int idx = offset + i;
const T val = (bias == nullptr) ? input[idx] + skip[idx] : input[idx] + skip[idx] + bias[i];
const T rldval = reverse_ld * val;
thread_data = pair_sum(thread_data, hipcub::KeyValuePair<T, T>(rldval, rldval * val));
output[idx] = val;
}
LayerNorm<T, TPB>(thread_data, ld, offset, beta, gamma, epsilon, output);
}
// Vectorized kernel
template <typename T, unsigned TPB, int ILP>
__global__ void SkipLayerNormKernelSmall(
const int ld, const T* input, const T* skip, const T* beta, const T* gamma,
const T* bias, const T epsilon, T* output, bool hasBias) {
const T rld = T(1.f / ld);
const int idx = blockIdx.x * ld + threadIdx.x * ILP; // grid_size = n / ld
using VecT = aligned_vector<T, ILP>;
__shared__ T mu; // mean
__shared__ T rsigma; // 1 / std.dev.
T input_v[ILP], skip_v[ILP], bias_v[ILP], output_v[ILP];
VecT* input_val = reinterpret_cast<VecT*>(&input_v);
*input_val = *reinterpret_cast<const VecT*>(&input[idx]);
VecT* skip_val = reinterpret_cast<VecT*>(&skip_v);
*skip_val = *reinterpret_cast<const VecT*>(&skip[idx]);
if (hasBias) {
VecT* bias_val = reinterpret_cast<VecT*>(&bias_v);
*bias_val = *reinterpret_cast<const VecT*>(&bias[threadIdx.x * ILP]);
}
hipcub::KeyValuePair<T, T> thread_data(T(0.f), T(0.f));
if (ILP * threadIdx.x < ld) {
T rldval_sum = T(0.f);
T rldvalsq_sum = T(0.f);
#pragma unroll
for (int i = 0; i < ILP; i++) {
input_v[i] += hasBias ? skip_v[i] + bias_v[i]: skip_v[i];
const T rldval = rld * input_v[i];
rldval_sum += rldval;
rldvalsq_sum += rldval * input_v[i];
}
thread_data = hipcub::KeyValuePair<T, T>(rldval_sum, rldvalsq_sum);
}
LayerNormSmall<T, TPB, ILP>(input_v, thread_data, ld, idx, beta, gamma, epsilon, output);
}
template <typename T>
bool LaunchSkipLayerNormKernel(
hipStream_t stream, T* output, const T* input, const T* skip, const T* gamma,
const T* beta, const T* bias, float epsilon, const int ld, const int element_count,
size_t element_size) {
// this must be true because n is the total size of the tensor
assert(element_count % ld == 0);
bool hasBias = (bias == nullptr) ? false : true;
if (0 == (ld % 4)) {
const int grid_size = element_count / ld;
if (ld <= 32) {
constexpr int block_size = 32;
hipLaunchKernelGGL(( SkipLayerNormKernelSmall<T, block_size, 1>)
, dim3(grid_size), dim3(block_size), 0, stream, ld, input, skip, beta, gamma, bias,
maybe2half<T>(epsilon), output, hasBias);
} else if (ld <= 64) {
constexpr int block_size = 64 / 2;
hipLaunchKernelGGL(( SkipLayerNormKernelSmall<T, block_size, 2>)
, dim3(grid_size), dim3(block_size), 0, stream, ld, input, skip, beta, gamma, bias,
maybe2half<T>(epsilon), output, hasBias);
} else if (ld <= 128) {
constexpr int block_size = 128 / 4;
hipLaunchKernelGGL(( SkipLayerNormKernelSmall<T, block_size, 4>)
, dim3(grid_size), dim3(block_size), 0, stream, ld, input, skip, beta, gamma, bias,
maybe2half<T>(epsilon), output, hasBias);
} else if (ld <= 384) {
constexpr int block_size = 384 / 4;
hipLaunchKernelGGL(( SkipLayerNormKernelSmall<T, block_size, 4>)
, dim3(grid_size), dim3(block_size), 0, stream, ld, input, skip, beta, gamma, bias,
maybe2half<T>(epsilon), output, hasBias);
} else if (ld <= 768) {
constexpr int block_size = 768 / 4;
hipLaunchKernelGGL(( SkipLayerNormKernelSmall<T, block_size, 4>)
, dim3(grid_size), dim3(block_size), 0, stream, ld, input, skip, beta, gamma, bias,
maybe2half<T>(epsilon), output, hasBias);
} else if (ld <= 1024) {
constexpr int block_size = 1024 / 4;
hipLaunchKernelGGL(( SkipLayerNormKernelSmall<T, block_size, 4>)
, dim3(grid_size), dim3(block_size), 0, stream, ld, input, skip, beta, gamma, bias,
maybe2half<T>(epsilon), output, hasBias);
} else {
constexpr int block_size = 256;
hipLaunchKernelGGL(( SkipLayerNormKernel<T, block_size>)
, dim3(grid_size), dim3(block_size), 0, stream, ld, input, skip, beta, gamma, bias,
maybe2half<T>(epsilon), output);
}
} else {
const int grid_size = element_count / ld;
if (ld <= 32) {
constexpr int block_size = 32;
hipLaunchKernelGGL(( SkipLayerNormKernelSmall<T, block_size, 1>)
, dim3(grid_size), dim3(block_size), 0, stream, ld, input, skip, beta, gamma, bias,
maybe2half<T>(epsilon), output, hasBias);
} else if (ld <= 64) {
constexpr int block_size = 64;
hipLaunchKernelGGL(( SkipLayerNormKernelSmall<T, block_size, 1>)
, dim3(grid_size), dim3(block_size), 0, stream, ld, input, skip, beta, gamma, bias,
maybe2half<T>(epsilon), output, hasBias);
} else if (ld <= 128) {
constexpr int block_size = 128;
hipLaunchKernelGGL(( SkipLayerNormKernelSmall<T, block_size, 1>)
, dim3(grid_size), dim3(block_size), 0, stream, ld, input, skip, beta, gamma, bias,
maybe2half<T>(epsilon), output, hasBias);
} else if (ld == 384) {
constexpr int block_size = 384;
hipLaunchKernelGGL(( SkipLayerNormKernelSmall<T, block_size, 1>)
, dim3(grid_size), dim3(block_size), 0, stream, ld, input, skip, beta, gamma, bias,
maybe2half<T>(epsilon), output, hasBias);
} else {
constexpr int block_size = 256;
hipLaunchKernelGGL(( SkipLayerNormKernel<T, block_size>)
, dim3(grid_size), dim3(block_size), 0, stream, ld, input, skip, beta, gamma, bias,
maybe2half<T>(epsilon), output);
}
}
return CUDA_CALL(hipPeekAtLastError());
}
template bool LaunchSkipLayerNormKernel<float>(hipStream_t stream, float* output, const float* input,
const float* skip, const float* gamma, const float* beta,
const float* bias, float epsilon, const int ld,
const int element_count, size_t element_size);
template bool LaunchSkipLayerNormKernel<half>(hipStream_t stream, half* output, const half* input,
const half* skip, const half* gamma, const half* beta,
const half* bias, float epsilon, const int ld,
const int element_count, size_t element_size);
} // namespace cuda
} // namespace contrib
} // namespace onnxruntime
| f9f9b42b9dff93d4f6281edc5416e118bde29c6a.cu | /*
The implementation of this file is based on skipLayerNorm plugin in TensorRT demo:
https://github.com/NVIDIA/TensorRT/tree/release/5.1/demo/BERT/
Copyright 2019 NVIDIA Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
// Modifications: Add SkipLayerNormKernelVec to
// leverage vectorized load/write.
// and templatize ComputeSkipLayerNorm for different
// data types.
// Copyright (c) Advanced Micro Devices, Inc. All rights reserved.
// Licensed under the MIT License.
#include "contrib_ops/cuda/bert/layer_norm.cuh"
#include "contrib_ops/cuda/bert/skip_layer_norm_impl.h"
#include <cuda_fp16.h>
namespace onnxruntime {
namespace contrib {
namespace cuda {
template<typename T>
T maybe2half(float x);
template<>
float maybe2half(float x) {
return x;
}
template<>
half maybe2half(float x) {
return __float2half_rn(x);
}
template <typename T, unsigned TPB>
__global__ void SkipLayerNormKernel(
const int ld, const T* input, const T* skip, const T* beta, const T* gamma, const T* bias,
const T epsilon, T* output) {
const T reverse_ld = T(1.f / ld);
const int offset = blockIdx.x * ld;
KeyValuePairSum pair_sum;
// reduce x and x^2
cub::KeyValuePair<T, T> thread_data(0, 0);
for (int i = threadIdx.x; i < ld; i += TPB) {
const int idx = offset + i;
const T val = (bias == nullptr) ? input[idx] + skip[idx] : input[idx] + skip[idx] + bias[i];
const T rldval = reverse_ld * val;
thread_data = pair_sum(thread_data, cub::KeyValuePair<T, T>(rldval, rldval * val));
output[idx] = val;
}
LayerNorm<T, TPB>(thread_data, ld, offset, beta, gamma, epsilon, output);
}
// Vectorized kernel
template <typename T, unsigned TPB, int ILP>
__global__ void SkipLayerNormKernelSmall(
const int ld, const T* input, const T* skip, const T* beta, const T* gamma,
const T* bias, const T epsilon, T* output, bool hasBias) {
const T rld = T(1.f / ld);
const int idx = blockIdx.x * ld + threadIdx.x * ILP; // grid_size = n / ld
using VecT = aligned_vector<T, ILP>;
__shared__ T mu; // mean
__shared__ T rsigma; // 1 / std.dev.
T input_v[ILP], skip_v[ILP], bias_v[ILP], output_v[ILP];
VecT* input_val = reinterpret_cast<VecT*>(&input_v);
*input_val = *reinterpret_cast<const VecT*>(&input[idx]);
VecT* skip_val = reinterpret_cast<VecT*>(&skip_v);
*skip_val = *reinterpret_cast<const VecT*>(&skip[idx]);
if (hasBias) {
VecT* bias_val = reinterpret_cast<VecT*>(&bias_v);
*bias_val = *reinterpret_cast<const VecT*>(&bias[threadIdx.x * ILP]);
}
cub::KeyValuePair<T, T> thread_data(T(0.f), T(0.f));
if (ILP * threadIdx.x < ld) {
T rldval_sum = T(0.f);
T rldvalsq_sum = T(0.f);
#pragma unroll
for (int i = 0; i < ILP; i++) {
input_v[i] += hasBias ? skip_v[i] + bias_v[i]: skip_v[i];
const T rldval = rld * input_v[i];
rldval_sum += rldval;
rldvalsq_sum += rldval * input_v[i];
}
thread_data = cub::KeyValuePair<T, T>(rldval_sum, rldvalsq_sum);
}
LayerNormSmall<T, TPB, ILP>(input_v, thread_data, ld, idx, beta, gamma, epsilon, output);
}
template <typename T>
bool LaunchSkipLayerNormKernel(
cudaStream_t stream, T* output, const T* input, const T* skip, const T* gamma,
const T* beta, const T* bias, float epsilon, const int ld, const int element_count,
size_t element_size) {
// this must be true because n is the total size of the tensor
assert(element_count % ld == 0);
bool hasBias = (bias == nullptr) ? false : true;
if (0 == (ld % 4)) {
const int grid_size = element_count / ld;
if (ld <= 32) {
constexpr int block_size = 32;
SkipLayerNormKernelSmall<T, block_size, 1>
<<<grid_size, block_size, 0, stream>>>(ld, input, skip, beta, gamma, bias,
maybe2half<T>(epsilon), output, hasBias);
} else if (ld <= 64) {
constexpr int block_size = 64 / 2;
SkipLayerNormKernelSmall<T, block_size, 2>
<<<grid_size, block_size, 0, stream>>>(ld, input, skip, beta, gamma, bias,
maybe2half<T>(epsilon), output, hasBias);
} else if (ld <= 128) {
constexpr int block_size = 128 / 4;
SkipLayerNormKernelSmall<T, block_size, 4>
<<<grid_size, block_size, 0, stream>>>(ld, input, skip, beta, gamma, bias,
maybe2half<T>(epsilon), output, hasBias);
} else if (ld <= 384) {
constexpr int block_size = 384 / 4;
SkipLayerNormKernelSmall<T, block_size, 4>
<<<grid_size, block_size, 0, stream>>>(ld, input, skip, beta, gamma, bias,
maybe2half<T>(epsilon), output, hasBias);
} else if (ld <= 768) {
constexpr int block_size = 768 / 4;
SkipLayerNormKernelSmall<T, block_size, 4>
<<<grid_size, block_size, 0, stream>>>(ld, input, skip, beta, gamma, bias,
maybe2half<T>(epsilon), output, hasBias);
} else if (ld <= 1024) {
constexpr int block_size = 1024 / 4;
SkipLayerNormKernelSmall<T, block_size, 4>
<<<grid_size, block_size, 0, stream>>>(ld, input, skip, beta, gamma, bias,
maybe2half<T>(epsilon), output, hasBias);
} else {
constexpr int block_size = 256;
SkipLayerNormKernel<T, block_size>
<<<grid_size, block_size, 0, stream>>>(ld, input, skip, beta, gamma, bias,
maybe2half<T>(epsilon), output);
}
} else {
const int grid_size = element_count / ld;
if (ld <= 32) {
constexpr int block_size = 32;
SkipLayerNormKernelSmall<T, block_size, 1>
<<<grid_size, block_size, 0, stream>>>(ld, input, skip, beta, gamma, bias,
maybe2half<T>(epsilon), output, hasBias);
} else if (ld <= 64) {
constexpr int block_size = 64;
SkipLayerNormKernelSmall<T, block_size, 1>
<<<grid_size, block_size, 0, stream>>>(ld, input, skip, beta, gamma, bias,
maybe2half<T>(epsilon), output, hasBias);
} else if (ld <= 128) {
constexpr int block_size = 128;
SkipLayerNormKernelSmall<T, block_size, 1>
<<<grid_size, block_size, 0, stream>>>(ld, input, skip, beta, gamma, bias,
maybe2half<T>(epsilon), output, hasBias);
} else if (ld == 384) {
constexpr int block_size = 384;
SkipLayerNormKernelSmall<T, block_size, 1>
<<<grid_size, block_size, 0, stream>>>(ld, input, skip, beta, gamma, bias,
maybe2half<T>(epsilon), output, hasBias);
} else {
constexpr int block_size = 256;
SkipLayerNormKernel<T, block_size>
<<<grid_size, block_size, 0, stream>>>(ld, input, skip, beta, gamma, bias,
maybe2half<T>(epsilon), output);
}
}
return CUDA_CALL(cudaPeekAtLastError());
}
template bool LaunchSkipLayerNormKernel<float>(cudaStream_t stream, float* output, const float* input,
const float* skip, const float* gamma, const float* beta,
const float* bias, float epsilon, const int ld,
const int element_count, size_t element_size);
template bool LaunchSkipLayerNormKernel<half>(cudaStream_t stream, half* output, const half* input,
const half* skip, const half* gamma, const half* beta,
const half* bias, float epsilon, const int ld,
const int element_count, size_t element_size);
} // namespace cuda
} // namespace contrib
} // namespace onnxruntime
|
298667cabae90d52fc0c331144a2551a9ac4b6b9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include "EuclideanDistanceCalculatorGPU.h"
#include "ImageMatchPair.h"
__global__ void calculate_euclidean_distance_kernel(ImageData *imageData1, ImageData *imageData2, uint16_t *deviceCandidateList, uint8_t *deviceCandidateCntList,int *deviceMatchingPoints, int imageIndex){
int index = blockIdx.x;
int tid = threadIdx.x;
if (index < imageData1->cntPoint) {
double candidateDistListTop[kCntCandidateTopMin];
int cntCandidateFound = deviceCandidateCntList[index];
for (int candidateIndex = 0; candidateIndex < cntCandidateFound; candidateIndex++) {
__shared__ double distEuclid[kDimSiftData];
__shared__ double diff[kDimSiftData];
distEuclid[tid] = 0.0f;
int dataIndex_2 = deviceCandidateList[index * kCntCandidateTopMin + candidateIndex];
diff[tid] = imageData1->deviceSiftDataPtrList[index * kDimSiftData + tid] - imageData2->deviceSiftDataPtrList[dataIndex_2 * kDimSiftData + tid];
__syncthreads();
distEuclid[tid] = diff[tid] * diff[tid];
__syncthreads();
for (int stride = kDimSiftData / 2; stride > 0; stride >>= 1) {
if (tid < stride) {
distEuclid[tid] += distEuclid[tid + stride];
}
__syncthreads();
}
if (tid == 0) {
candidateDistListTop[candidateIndex] = distEuclid[tid];
}
}
if (tid == 0) {
deviceMatchingPoints[index] = findMinValIndex_device(index, cntCandidateFound, deviceCandidateList, &candidateDistListTop[0]);
}
}
}
void compute_euclidean_distance_GPU(ImageData *imageData1, ImageData *imageData2, int siftCount, uint16_t *deviceCandidateList, uint8_t *deviceCandidateCntList, int *deviceMatchingPoints,int imageIndex, hipStream_t *stream) {
dim3 block(kDimSiftData);
dim3 grid((siftCount * block.x + block.x - 1) / block.x);
hipLaunchKernelGGL(( calculate_euclidean_distance_kernel), dim3(grid), dim3(block), 0, 0, imageData1, imageData2, deviceCandidateList, deviceCandidateCntList, deviceMatchingPoints, imageIndex);
//hipDeviceSynchronize();
}
__device__ int findMinValIndex_device(int data_index, int cntCandidateFound, uint16_t* hostCandidateList, double* candidateDistListTop) {
double minVal_1 = 0.0;
int minValInd_1 = -1;
double minVal_2 = 0.0;
int minValInd_2 = -1;
for (int candidateIndex = 0; candidateIndex < cntCandidateFound; candidateIndex++) {
if (minValInd_2 == -1 || minVal_2 > candidateDistListTop[candidateIndex]) {
minVal_2 = candidateDistListTop[candidateIndex];
minValInd_2 = hostCandidateList[data_index * kCntCandidateTopMin + candidateIndex];
}
if (minValInd_1 == -1 || minVal_1 > minVal_2) {
minVal_1 = minVal_1 + minVal_2;
minVal_2 = minVal_1 - minVal_2;
minVal_1 = minVal_1 - minVal_2;
minValInd_1 = minValInd_1 + minValInd_2;
minValInd_2 = minValInd_1 - minValInd_2;
minValInd_1 = minValInd_1 - minValInd_2;
}
}
if (minVal_1 < minVal_2 * matchThreshold) {
return minValInd_1;
}
else
return -1;
}
| 298667cabae90d52fc0c331144a2551a9ac4b6b9.cu | #include <stdio.h>
#include "EuclideanDistanceCalculatorGPU.h"
#include "ImageMatchPair.h"
__global__ void calculate_euclidean_distance_kernel(ImageData *imageData1, ImageData *imageData2, uint16_t *deviceCandidateList, uint8_t *deviceCandidateCntList,int *deviceMatchingPoints, int imageIndex){
int index = blockIdx.x;
int tid = threadIdx.x;
if (index < imageData1->cntPoint) {
double candidateDistListTop[kCntCandidateTopMin];
int cntCandidateFound = deviceCandidateCntList[index];
for (int candidateIndex = 0; candidateIndex < cntCandidateFound; candidateIndex++) {
__shared__ double distEuclid[kDimSiftData];
__shared__ double diff[kDimSiftData];
distEuclid[tid] = 0.0f;
int dataIndex_2 = deviceCandidateList[index * kCntCandidateTopMin + candidateIndex];
diff[tid] = imageData1->deviceSiftDataPtrList[index * kDimSiftData + tid] - imageData2->deviceSiftDataPtrList[dataIndex_2 * kDimSiftData + tid];
__syncthreads();
distEuclid[tid] = diff[tid] * diff[tid];
__syncthreads();
for (int stride = kDimSiftData / 2; stride > 0; stride >>= 1) {
if (tid < stride) {
distEuclid[tid] += distEuclid[tid + stride];
}
__syncthreads();
}
if (tid == 0) {
candidateDistListTop[candidateIndex] = distEuclid[tid];
}
}
if (tid == 0) {
deviceMatchingPoints[index] = findMinValIndex_device(index, cntCandidateFound, deviceCandidateList, &candidateDistListTop[0]);
}
}
}
void compute_euclidean_distance_GPU(ImageData *imageData1, ImageData *imageData2, int siftCount, uint16_t *deviceCandidateList, uint8_t *deviceCandidateCntList, int *deviceMatchingPoints,int imageIndex, cudaStream_t *stream) {
dim3 block(kDimSiftData);
dim3 grid((siftCount * block.x + block.x - 1) / block.x);
calculate_euclidean_distance_kernel<<<grid, block, 0>>>(imageData1, imageData2, deviceCandidateList, deviceCandidateCntList, deviceMatchingPoints, imageIndex);
//cudaDeviceSynchronize();
}
__device__ int findMinValIndex_device(int data_index, int cntCandidateFound, uint16_t* hostCandidateList, double* candidateDistListTop) {
double minVal_1 = 0.0;
int minValInd_1 = -1;
double minVal_2 = 0.0;
int minValInd_2 = -1;
for (int candidateIndex = 0; candidateIndex < cntCandidateFound; candidateIndex++) {
if (minValInd_2 == -1 || minVal_2 > candidateDistListTop[candidateIndex]) {
minVal_2 = candidateDistListTop[candidateIndex];
minValInd_2 = hostCandidateList[data_index * kCntCandidateTopMin + candidateIndex];
}
if (minValInd_1 == -1 || minVal_1 > minVal_2) {
minVal_1 = minVal_1 + minVal_2;
minVal_2 = minVal_1 - minVal_2;
minVal_1 = minVal_1 - minVal_2;
minValInd_1 = minValInd_1 + minValInd_2;
minValInd_2 = minValInd_1 - minValInd_2;
minValInd_1 = minValInd_1 - minValInd_2;
}
}
if (minVal_1 < minVal_2 * matchThreshold) {
return minValInd_1;
}
else
return -1;
}
|
685ea60f402e66008421556e4860bdea22847e79.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "MyCudaUtils.h"
// A region of interest which contains a rectangle (and is constructed with a rectangle).
// Turns out this is really only used for the keypoint near border removal, as a quick check of
// std::remove_if in the runByImageBorder below says that a predicate is a required input.
// The predicate is a function (or in this case the operator() of the object) which returns
// boolean true/false when called with a single element from the vector as to whether it
// should be removed or not. remove_if internally calls the RoiPredicate operator on
// each element of the keypoint, and it returns a boolean if the keypoint location
// is within the rectangle. remove_if then returns a vector of booleans which the
// std::vector.erase function uses to determine which keypoints to remove. Nifty
struct RoiPredicate
{
RoiPredicate(const cv::Rect& _r) : r(_r)
{}
bool operator()(const cv::KeyPoint& keyPt) const
{
return !r.contains(keyPt.pt);
}
cv::Rect r;
};
// Function removes keypoints near the boarder of the image within a particular border size
// This shouldn't be too much of a problem, I believe that STAR doesn't find them too close either
void runByImageBorder(std::vector<cv::KeyPoint>& keypoints, cv::Size imageSize, int borderSize)
{
if (borderSize > 0)
{
if (imageSize.height <= borderSize * 2 || imageSize.width <= borderSize * 2)
keypoints.clear();
else
keypoints.erase(std::remove_if(keypoints.begin(), keypoints.end(),
RoiPredicate(cv::Rect(cv::Point(borderSize, borderSize),
cv::Point(imageSize.width - borderSize, imageSize.height - borderSize)))),
keypoints.end());
}
}
// Device equivalent function for computing the smoothed function
// [in] Ptr to the integral image
// [in] The x and y coordinates of the keypoint the current CUDA thread is operating on
// [in] The relative position around this keypoint we are querying as a point for our Brief binary descriptor
// [in] The stride of the integral image so we can access elements without having nice row, column matrix
__device__ int smoothedSumDevice(int* intImage, int pty, int ptx, int rely, int relx, int stride) {
const int KERNEL_SIZE = 9;
const int HALF_KERNEL = KERNEL_SIZE / 2;
int img_y = (int)(pty + 0.5) + rely;
int img_x = (int)(ptx + 0.5) + relx;
return intImage[(img_y + HALF_KERNEL + 1)*stride + img_x + HALF_KERNEL + 1]
- intImage[(img_y + HALF_KERNEL + 1)*stride + img_x - HALF_KERNEL]
- intImage[(img_y - HALF_KERNEL)*stride + img_x + HALF_KERNEL + 1]
+ intImage[(img_y - HALF_KERNEL)*stride + img_x - HALF_KERNEL];
// Smooths by computing a box filter - average of all the surrounding pixels
// which of course requires their sum. We compute this efficiently by constructing the
// integral image and taking the four corner points around our kernel. Here we use a
// kernel size of 9 - i.e. we smooth over a window 9 pix across to get the intensity value
}
// Kernel for computing the descriptor for a single keypoint.
// We could probably be more efficient with global memory accesses because there's a lot per thread
// [in] A pointer to the row-major stored integral image data
// [in] The stride of the integral image in pixels (also bytes for a uchar image)
// [in] The set of keypoints laid out with all x-coords in first row, all y-coords in second row
// [in] The number of points N in the set of keypoints
// [out] The 256bit descriptor array, of size 32*N bytes
__global__ void pixelTest32Kernel(int* intImage, int imStride, float* kps, int num_pts, unsigned char* descriptors) {
int pt_idx = threadIdx.x + blockDim.x * blockIdx.x;
if (!(pt_idx < num_pts)) // Check thread index is valid
return;
int ptx = kps[pt_idx]; // X-coords on first row of matrix
int pty = kps[pt_idx + num_pts]; // Y-coords on second row
unsigned char* desc = descriptors + (32 * pt_idx); // Where to put the descriptor for this keypoint in output array
#define DEVSMOOTHED(y,x) smoothedSumDevice(intImage, pty, ptx, y, x, imStride)
desc[0] = (uchar)(((DEVSMOOTHED(-2, -1) < DEVSMOOTHED(7, -1)) << 7) + ((DEVSMOOTHED(-14, -1) < DEVSMOOTHED(-3, 3)) << 6) + ((DEVSMOOTHED(1, -2) < DEVSMOOTHED(11, 2)) << 5) + ((DEVSMOOTHED(1, 6) < DEVSMOOTHED(-10, -7)) << 4) + ((DEVSMOOTHED(13, 2) < DEVSMOOTHED(-1, 0)) << 3) + ((DEVSMOOTHED(-14, 5) < DEVSMOOTHED(5, -3)) << 2) + ((DEVSMOOTHED(-2, 8) < DEVSMOOTHED(2, 4)) << 1) + ((DEVSMOOTHED(-11, 8) < DEVSMOOTHED(-15, 5)) << 0));
desc[1] = (uchar)(((DEVSMOOTHED(-6, -23) < DEVSMOOTHED(8, -9)) << 7) + ((DEVSMOOTHED(-12, 6) < DEVSMOOTHED(-10, 8)) << 6) + ((DEVSMOOTHED(-3, -1) < DEVSMOOTHED(8, 1)) << 5) + ((DEVSMOOTHED(3, 6) < DEVSMOOTHED(5, 6)) << 4) + ((DEVSMOOTHED(-7, -6) < DEVSMOOTHED(5, -5)) << 3) + ((DEVSMOOTHED(22, -2) < DEVSMOOTHED(-11, -8)) << 2) + ((DEVSMOOTHED(14, 7) < DEVSMOOTHED(8, 5)) << 1) + ((DEVSMOOTHED(-1, 14) < DEVSMOOTHED(-5, -14)) << 0));
desc[2] = (uchar)(((DEVSMOOTHED(-14, 9) < DEVSMOOTHED(2, 0)) << 7) + ((DEVSMOOTHED(7, -3) < DEVSMOOTHED(22, 6)) << 6) + ((DEVSMOOTHED(-6, 6) < DEVSMOOTHED(-8, -5)) << 5) + ((DEVSMOOTHED(-5, 9) < DEVSMOOTHED(7, -1)) << 4) + ((DEVSMOOTHED(-3, -7) < DEVSMOOTHED(-10, -18)) << 3) + ((DEVSMOOTHED(4, -5) < DEVSMOOTHED(0, 11)) << 2) + ((DEVSMOOTHED(2, 3) < DEVSMOOTHED(9, 10)) << 1) + ((DEVSMOOTHED(-10, 3) < DEVSMOOTHED(4, 9)) << 0));
desc[3] = (uchar)(((DEVSMOOTHED(0, 12) < DEVSMOOTHED(-3, 19)) << 7) + ((DEVSMOOTHED(1, 15) < DEVSMOOTHED(-11, -5)) << 6) + ((DEVSMOOTHED(14, -1) < DEVSMOOTHED(7, 8)) << 5) + ((DEVSMOOTHED(7, -23) < DEVSMOOTHED(-5, 5)) << 4) + ((DEVSMOOTHED(0, -6) < DEVSMOOTHED(-10, 17)) << 3) + ((DEVSMOOTHED(13, -4) < DEVSMOOTHED(-3, -4)) << 2) + ((DEVSMOOTHED(-12, 1) < DEVSMOOTHED(-12, 2)) << 1) + ((DEVSMOOTHED(0, 8) < DEVSMOOTHED(3, 22)) << 0));
desc[4] = (uchar)(((DEVSMOOTHED(-13, 13) < DEVSMOOTHED(3, -1)) << 7) + ((DEVSMOOTHED(-16, 17) < DEVSMOOTHED(6, 10)) << 6) + ((DEVSMOOTHED(7, 15) < DEVSMOOTHED(-5, 0)) << 5) + ((DEVSMOOTHED(2, -12) < DEVSMOOTHED(19, -2)) << 4) + ((DEVSMOOTHED(3, -6) < DEVSMOOTHED(-4, -15)) << 3) + ((DEVSMOOTHED(8, 3) < DEVSMOOTHED(0, 14)) << 2) + ((DEVSMOOTHED(4, -11) < DEVSMOOTHED(5, 5)) << 1) + ((DEVSMOOTHED(11, -7) < DEVSMOOTHED(7, 1)) << 0));
desc[5] = (uchar)(((DEVSMOOTHED(6, 12) < DEVSMOOTHED(21, 3)) << 7) + ((DEVSMOOTHED(-3, 2) < DEVSMOOTHED(14, 1)) << 6) + ((DEVSMOOTHED(5, 1) < DEVSMOOTHED(-5, 11)) << 5) + ((DEVSMOOTHED(3, -17) < DEVSMOOTHED(-6, 2)) << 4) + ((DEVSMOOTHED(6, 8) < DEVSMOOTHED(5, -10)) << 3) + ((DEVSMOOTHED(-14, -2) < DEVSMOOTHED(0, 4)) << 2) + ((DEVSMOOTHED(5, -7) < DEVSMOOTHED(-6, 5)) << 1) + ((DEVSMOOTHED(10, 4) < DEVSMOOTHED(4, -7)) << 0));
desc[6] = (uchar)(((DEVSMOOTHED(22, 0) < DEVSMOOTHED(7, -18)) << 7) + ((DEVSMOOTHED(-1, -3) < DEVSMOOTHED(0, 18)) << 6) + ((DEVSMOOTHED(-4, 22) < DEVSMOOTHED(-5, 3)) << 5) + ((DEVSMOOTHED(1, -7) < DEVSMOOTHED(2, -3)) << 4) + ((DEVSMOOTHED(19, -20) < DEVSMOOTHED(17, -2)) << 3) + ((DEVSMOOTHED(3, -10) < DEVSMOOTHED(-8, 24)) << 2) + ((DEVSMOOTHED(-5, -14) < DEVSMOOTHED(7, 5)) << 1) + ((DEVSMOOTHED(-2, 12) < DEVSMOOTHED(-4, -15)) << 0));
desc[7] = (uchar)(((DEVSMOOTHED(4, 12) < DEVSMOOTHED(0, -19)) << 7) + ((DEVSMOOTHED(20, 13) < DEVSMOOTHED(3, 5)) << 6) + ((DEVSMOOTHED(-8, -12) < DEVSMOOTHED(5, 0)) << 5) + ((DEVSMOOTHED(-5, 6) < DEVSMOOTHED(-7, -11)) << 4) + ((DEVSMOOTHED(6, -11) < DEVSMOOTHED(-3, -22)) << 3) + ((DEVSMOOTHED(15, 4) < DEVSMOOTHED(10, 1)) << 2) + ((DEVSMOOTHED(-7, -4) < DEVSMOOTHED(15, -6)) << 1) + ((DEVSMOOTHED(5, 10) < DEVSMOOTHED(0, 24)) << 0));
desc[8] = (uchar)(((DEVSMOOTHED(3, 6) < DEVSMOOTHED(22, -2)) << 7) + ((DEVSMOOTHED(-13, 14) < DEVSMOOTHED(4, -4)) << 6) + ((DEVSMOOTHED(-13, 8) < DEVSMOOTHED(-18, -22)) << 5) + ((DEVSMOOTHED(-1, -1) < DEVSMOOTHED(-7, 3)) << 4) + ((DEVSMOOTHED(-19, -12) < DEVSMOOTHED(4, 3)) << 3) + ((DEVSMOOTHED(8, 10) < DEVSMOOTHED(13, -2)) << 2) + ((DEVSMOOTHED(-6, -1) < DEVSMOOTHED(-6, -5)) << 1) + ((DEVSMOOTHED(2, -21) < DEVSMOOTHED(-3, 2)) << 0));
desc[9] = (uchar)(((DEVSMOOTHED(4, -7) < DEVSMOOTHED(0, 16)) << 7) + ((DEVSMOOTHED(-6, -5) < DEVSMOOTHED(-12, -1)) << 6) + ((DEVSMOOTHED(1, -1) < DEVSMOOTHED(9, 18)) << 5) + ((DEVSMOOTHED(-7, 10) < DEVSMOOTHED(-11, 6)) << 4) + ((DEVSMOOTHED(4, 3) < DEVSMOOTHED(19, -7)) << 3) + ((DEVSMOOTHED(-18, 5) < DEVSMOOTHED(-4, 5)) << 2) + ((DEVSMOOTHED(4, 0) < DEVSMOOTHED(-20, 4)) << 1) + ((DEVSMOOTHED(7, -11) < DEVSMOOTHED(18, 12)) << 0));
desc[10] = (uchar)(((DEVSMOOTHED(-20, 17) < DEVSMOOTHED(-18, 7)) << 7) + ((DEVSMOOTHED(2, 15) < DEVSMOOTHED(19, -11)) << 6) + ((DEVSMOOTHED(-18, 6) < DEVSMOOTHED(-7, 3)) << 5) + ((DEVSMOOTHED(-4, 1) < DEVSMOOTHED(-14, 13)) << 4) + ((DEVSMOOTHED(17, 3) < DEVSMOOTHED(2, -8)) << 3) + ((DEVSMOOTHED(-7, 2) < DEVSMOOTHED(1, 6)) << 2) + ((DEVSMOOTHED(17, -9) < DEVSMOOTHED(-2, 8)) << 1) + ((DEVSMOOTHED(-8, -6) < DEVSMOOTHED(-1, 12)) << 0));
desc[11] = (uchar)(((DEVSMOOTHED(-2, 4) < DEVSMOOTHED(-1, 6)) << 7) + ((DEVSMOOTHED(-2, 7) < DEVSMOOTHED(6, 8)) << 6) + ((DEVSMOOTHED(-8, -1) < DEVSMOOTHED(-7, -9)) << 5) + ((DEVSMOOTHED(8, -9) < DEVSMOOTHED(15, 0)) << 4) + ((DEVSMOOTHED(0, 22) < DEVSMOOTHED(-4, -15)) << 3) + ((DEVSMOOTHED(-14, -1) < DEVSMOOTHED(3, -2)) << 2) + ((DEVSMOOTHED(-7, -4) < DEVSMOOTHED(17, -7)) << 1) + ((DEVSMOOTHED(-8, -2) < DEVSMOOTHED(9, -4)) << 0));
desc[12] = (uchar)(((DEVSMOOTHED(5, -7) < DEVSMOOTHED(7, 7)) << 7) + ((DEVSMOOTHED(-5, 13) < DEVSMOOTHED(-8, 11)) << 6) + ((DEVSMOOTHED(11, -4) < DEVSMOOTHED(0, 8)) << 5) + ((DEVSMOOTHED(5, -11) < DEVSMOOTHED(-9, -6)) << 4) + ((DEVSMOOTHED(2, -6) < DEVSMOOTHED(3, -20)) << 3) + ((DEVSMOOTHED(-6, 2) < DEVSMOOTHED(6, 10)) << 2) + ((DEVSMOOTHED(-6, -6) < DEVSMOOTHED(-15, 7)) << 1) + ((DEVSMOOTHED(-6, -3) < DEVSMOOTHED(2, 1)) << 0));
desc[13] = (uchar)(((DEVSMOOTHED(11, 0) < DEVSMOOTHED(-3, 2)) << 7) + ((DEVSMOOTHED(7, -12) < DEVSMOOTHED(14, 5)) << 6) + ((DEVSMOOTHED(0, -7) < DEVSMOOTHED(-1, -1)) << 5) + ((DEVSMOOTHED(-16, 0) < DEVSMOOTHED(6, 8)) << 4) + ((DEVSMOOTHED(22, 11) < DEVSMOOTHED(0, -3)) << 3) + ((DEVSMOOTHED(19, 0) < DEVSMOOTHED(5, -17)) << 2) + ((DEVSMOOTHED(-23, -14) < DEVSMOOTHED(-13, -19)) << 1) + ((DEVSMOOTHED(-8, 10) < DEVSMOOTHED(-11, -2)) << 0));
desc[14] = (uchar)(((DEVSMOOTHED(-11, 6) < DEVSMOOTHED(-10, 13)) << 7) + ((DEVSMOOTHED(1, -7) < DEVSMOOTHED(14, 0)) << 6) + ((DEVSMOOTHED(-12, 1) < DEVSMOOTHED(-5, -5)) << 5) + ((DEVSMOOTHED(4, 7) < DEVSMOOTHED(8, -1)) << 4) + ((DEVSMOOTHED(-1, -5) < DEVSMOOTHED(15, 2)) << 3) + ((DEVSMOOTHED(-3, -1) < DEVSMOOTHED(7, -10)) << 2) + ((DEVSMOOTHED(3, -6) < DEVSMOOTHED(10, -18)) << 1) + ((DEVSMOOTHED(-7, -13) < DEVSMOOTHED(-13, 10)) << 0));
desc[15] = (uchar)(((DEVSMOOTHED(1, -1) < DEVSMOOTHED(13, -10)) << 7) + ((DEVSMOOTHED(-19, 14) < DEVSMOOTHED(8, -14)) << 6) + ((DEVSMOOTHED(-4, -13) < DEVSMOOTHED(7, 1)) << 5) + ((DEVSMOOTHED(1, -2) < DEVSMOOTHED(12, -7)) << 4) + ((DEVSMOOTHED(3, -5) < DEVSMOOTHED(1, -5)) << 3) + ((DEVSMOOTHED(-2, -2) < DEVSMOOTHED(8, -10)) << 2) + ((DEVSMOOTHED(2, 14) < DEVSMOOTHED(8, 7)) << 1) + ((DEVSMOOTHED(3, 9) < DEVSMOOTHED(8, 2)) << 0));
desc[16] = (uchar)(((DEVSMOOTHED(-9, 1) < DEVSMOOTHED(-18, 0)) << 7) + ((DEVSMOOTHED(4, 0) < DEVSMOOTHED(1, 12)) << 6) + ((DEVSMOOTHED(0, 9) < DEVSMOOTHED(-14, -10)) << 5) + ((DEVSMOOTHED(-13, -9) < DEVSMOOTHED(-2, 6)) << 4) + ((DEVSMOOTHED(1, 5) < DEVSMOOTHED(10, 10)) << 3) + ((DEVSMOOTHED(-3, -6) < DEVSMOOTHED(-16, -5)) << 2) + ((DEVSMOOTHED(11, 6) < DEVSMOOTHED(-5, 0)) << 1) + ((DEVSMOOTHED(-23, 10) < DEVSMOOTHED(1, 2)) << 0));
desc[17] = (uchar)(((DEVSMOOTHED(13, -5) < DEVSMOOTHED(-3, 9)) << 7) + ((DEVSMOOTHED(-4, -1) < DEVSMOOTHED(-13, -5)) << 6) + ((DEVSMOOTHED(10, 13) < DEVSMOOTHED(-11, 8)) << 5) + ((DEVSMOOTHED(19, 20) < DEVSMOOTHED(-9, 2)) << 4) + ((DEVSMOOTHED(4, -8) < DEVSMOOTHED(0, -9)) << 3) + ((DEVSMOOTHED(-14, 10) < DEVSMOOTHED(15, 19)) << 2) + ((DEVSMOOTHED(-14, -12) < DEVSMOOTHED(-10, -3)) << 1) + ((DEVSMOOTHED(-23, -3) < DEVSMOOTHED(17, -2)) << 0));
desc[18] = (uchar)(((DEVSMOOTHED(-3, -11) < DEVSMOOTHED(6, -14)) << 7) + ((DEVSMOOTHED(19, -2) < DEVSMOOTHED(-4, 2)) << 6) + ((DEVSMOOTHED(-5, 5) < DEVSMOOTHED(3, -13)) << 5) + ((DEVSMOOTHED(2, -2) < DEVSMOOTHED(-5, 4)) << 4) + ((DEVSMOOTHED(17, 4) < DEVSMOOTHED(17, -11)) << 3) + ((DEVSMOOTHED(-7, -2) < DEVSMOOTHED(1, 23)) << 2) + ((DEVSMOOTHED(8, 13) < DEVSMOOTHED(1, -16)) << 1) + ((DEVSMOOTHED(-13, -5) < DEVSMOOTHED(1, -17)) << 0));
desc[19] = (uchar)(((DEVSMOOTHED(4, 6) < DEVSMOOTHED(-8, -3)) << 7) + ((DEVSMOOTHED(-5, -9) < DEVSMOOTHED(-2, -10)) << 6) + ((DEVSMOOTHED(-9, 0) < DEVSMOOTHED(-7, -2)) << 5) + ((DEVSMOOTHED(5, 0) < DEVSMOOTHED(5, 2)) << 4) + ((DEVSMOOTHED(-4, -16) < DEVSMOOTHED(6, 3)) << 3) + ((DEVSMOOTHED(2, -15) < DEVSMOOTHED(-2, 12)) << 2) + ((DEVSMOOTHED(4, -1) < DEVSMOOTHED(6, 2)) << 1) + ((DEVSMOOTHED(1, 1) < DEVSMOOTHED(-2, -8)) << 0));
desc[20] = (uchar)(((DEVSMOOTHED(-2, 12) < DEVSMOOTHED(-5, -2)) << 7) + ((DEVSMOOTHED(-8, 8) < DEVSMOOTHED(-9, 9)) << 6) + ((DEVSMOOTHED(2, -10) < DEVSMOOTHED(3, 1)) << 5) + ((DEVSMOOTHED(-4, 10) < DEVSMOOTHED(-9, 4)) << 4) + ((DEVSMOOTHED(6, 12) < DEVSMOOTHED(2, 5)) << 3) + ((DEVSMOOTHED(-3, -8) < DEVSMOOTHED(0, 5)) << 2) + ((DEVSMOOTHED(-13, 1) < DEVSMOOTHED(-7, 2)) << 1) + ((DEVSMOOTHED(-1, -10) < DEVSMOOTHED(7, -18)) << 0));
desc[21] = (uchar)(((DEVSMOOTHED(-1, 8) < DEVSMOOTHED(-9, -10)) << 7) + ((DEVSMOOTHED(-23, -1) < DEVSMOOTHED(6, 2)) << 6) + ((DEVSMOOTHED(-5, -3) < DEVSMOOTHED(3, 2)) << 5) + ((DEVSMOOTHED(0, 11) < DEVSMOOTHED(-4, -7)) << 4) + ((DEVSMOOTHED(15, 2) < DEVSMOOTHED(-10, -3)) << 3) + ((DEVSMOOTHED(-20, -8) < DEVSMOOTHED(-13, 3)) << 2) + ((DEVSMOOTHED(-19, -12) < DEVSMOOTHED(5, -11)) << 1) + ((DEVSMOOTHED(-17, -13) < DEVSMOOTHED(-3, 2)) << 0));
desc[22] = (uchar)(((DEVSMOOTHED(7, 4) < DEVSMOOTHED(-12, 0)) << 7) + ((DEVSMOOTHED(5, -1) < DEVSMOOTHED(-14, -6)) << 6) + ((DEVSMOOTHED(-4, 11) < DEVSMOOTHED(0, -4)) << 5) + ((DEVSMOOTHED(3, 10) < DEVSMOOTHED(7, -3)) << 4) + ((DEVSMOOTHED(13, 21) < DEVSMOOTHED(-11, 6)) << 3) + ((DEVSMOOTHED(-12, 24) < DEVSMOOTHED(-7, -4)) << 2) + ((DEVSMOOTHED(4, 16) < DEVSMOOTHED(3, -14)) << 1) + ((DEVSMOOTHED(-3, 5) < DEVSMOOTHED(-7, -12)) << 0));
desc[23] = (uchar)(((DEVSMOOTHED(0, -4) < DEVSMOOTHED(7, -5)) << 7) + ((DEVSMOOTHED(-17, -9) < DEVSMOOTHED(13, -7)) << 6) + ((DEVSMOOTHED(22, -6) < DEVSMOOTHED(-11, 5)) << 5) + ((DEVSMOOTHED(2, -8) < DEVSMOOTHED(23, -11)) << 4) + ((DEVSMOOTHED(7, -10) < DEVSMOOTHED(-1, 14)) << 3) + ((DEVSMOOTHED(-3, -10) < DEVSMOOTHED(8, 3)) << 2) + ((DEVSMOOTHED(-13, 1) < DEVSMOOTHED(-6, 0)) << 1) + ((DEVSMOOTHED(-7, -21) < DEVSMOOTHED(6, -14)) << 0));
desc[24] = (uchar)(((DEVSMOOTHED(18, 19) < DEVSMOOTHED(-4, -6)) << 7) + ((DEVSMOOTHED(10, 7) < DEVSMOOTHED(-1, -4)) << 6) + ((DEVSMOOTHED(-1, 21) < DEVSMOOTHED(1, -5)) << 5) + ((DEVSMOOTHED(-10, 6) < DEVSMOOTHED(-11, -2)) << 4) + ((DEVSMOOTHED(18, -3) < DEVSMOOTHED(-1, 7)) << 3) + ((DEVSMOOTHED(-3, -9) < DEVSMOOTHED(-5, 10)) << 2) + ((DEVSMOOTHED(-13, 14) < DEVSMOOTHED(17, -3)) << 1) + ((DEVSMOOTHED(11, -19) < DEVSMOOTHED(-1, -18)) << 0));
desc[25] = (uchar)(((DEVSMOOTHED(8, -2) < DEVSMOOTHED(-18, -23)) << 7) + ((DEVSMOOTHED(0, -5) < DEVSMOOTHED(-2, -9)) << 6) + ((DEVSMOOTHED(-4, -11) < DEVSMOOTHED(2, -8)) << 5) + ((DEVSMOOTHED(14, 6) < DEVSMOOTHED(-3, -6)) << 4) + ((DEVSMOOTHED(-3, 0) < DEVSMOOTHED(-15, 0)) << 3) + ((DEVSMOOTHED(-9, 4) < DEVSMOOTHED(-15, -9)) << 2) + ((DEVSMOOTHED(-1, 11) < DEVSMOOTHED(3, 11)) << 1) + ((DEVSMOOTHED(-10, -16) < DEVSMOOTHED(-7, 7)) << 0));
desc[26] = (uchar)(((DEVSMOOTHED(-2, -10) < DEVSMOOTHED(-10, -2)) << 7) + ((DEVSMOOTHED(-5, -3) < DEVSMOOTHED(5, -23)) << 6) + ((DEVSMOOTHED(13, -8) < DEVSMOOTHED(-15, -11)) << 5) + ((DEVSMOOTHED(-15, 11) < DEVSMOOTHED(6, -6)) << 4) + ((DEVSMOOTHED(-16, -3) < DEVSMOOTHED(-2, 2)) << 3) + ((DEVSMOOTHED(6, 12) < DEVSMOOTHED(-16, 24)) << 2) + ((DEVSMOOTHED(-10, 0) < DEVSMOOTHED(8, 11)) << 1) + ((DEVSMOOTHED(-7, 7) < DEVSMOOTHED(-19, -7)) << 0));
desc[27] = (uchar)(((DEVSMOOTHED(5, 16) < DEVSMOOTHED(9, -3)) << 7) + ((DEVSMOOTHED(9, 7) < DEVSMOOTHED(-7, -16)) << 6) + ((DEVSMOOTHED(3, 2) < DEVSMOOTHED(-10, 9)) << 5) + ((DEVSMOOTHED(21, 1) < DEVSMOOTHED(8, 7)) << 4) + ((DEVSMOOTHED(7, 0) < DEVSMOOTHED(1, 17)) << 3) + ((DEVSMOOTHED(-8, 12) < DEVSMOOTHED(9, 6)) << 2) + ((DEVSMOOTHED(11, -7) < DEVSMOOTHED(-8, -6)) << 1) + ((DEVSMOOTHED(19, 0) < DEVSMOOTHED(9, 3)) << 0));
desc[28] = (uchar)(((DEVSMOOTHED(1, -7) < DEVSMOOTHED(-5, -11)) << 7) + ((DEVSMOOTHED(0, 8) < DEVSMOOTHED(-2, 14)) << 6) + ((DEVSMOOTHED(12, -2) < DEVSMOOTHED(-15, -6)) << 5) + ((DEVSMOOTHED(4, 12) < DEVSMOOTHED(0, -21)) << 4) + ((DEVSMOOTHED(17, -4) < DEVSMOOTHED(-6, -7)) << 3) + ((DEVSMOOTHED(-10, -9) < DEVSMOOTHED(-14, -7)) << 2) + ((DEVSMOOTHED(-15, -10) < DEVSMOOTHED(-15, -14)) << 1) + ((DEVSMOOTHED(-7, -5) < DEVSMOOTHED(5, -12)) << 0));
desc[29] = (uchar)(((DEVSMOOTHED(-4, 0) < DEVSMOOTHED(15, -4)) << 7) + ((DEVSMOOTHED(5, 2) < DEVSMOOTHED(-6, -23)) << 6) + ((DEVSMOOTHED(-4, -21) < DEVSMOOTHED(-6, 4)) << 5) + ((DEVSMOOTHED(-10, 5) < DEVSMOOTHED(-15, 6)) << 4) + ((DEVSMOOTHED(4, -3) < DEVSMOOTHED(-1, 5)) << 3) + ((DEVSMOOTHED(-4, 19) < DEVSMOOTHED(-23, -4)) << 2) + ((DEVSMOOTHED(-4, 17) < DEVSMOOTHED(13, -11)) << 1) + ((DEVSMOOTHED(1, 12) < DEVSMOOTHED(4, -14)) << 0));
desc[30] = (uchar)(((DEVSMOOTHED(-11, -6) < DEVSMOOTHED(-20, 10)) << 7) + ((DEVSMOOTHED(4, 5) < DEVSMOOTHED(3, 20)) << 6) + ((DEVSMOOTHED(-8, -20) < DEVSMOOTHED(3, 1)) << 5) + ((DEVSMOOTHED(-19, 9) < DEVSMOOTHED(9, -3)) << 4) + ((DEVSMOOTHED(18, 15) < DEVSMOOTHED(11, -4)) << 3) + ((DEVSMOOTHED(12, 16) < DEVSMOOTHED(8, 7)) << 2) + ((DEVSMOOTHED(-14, -8) < DEVSMOOTHED(-3, 9)) << 1) + ((DEVSMOOTHED(-6, 0) < DEVSMOOTHED(2, -4)) << 0));
desc[31] = (uchar)(((DEVSMOOTHED(1, -10) < DEVSMOOTHED(-1, 2)) << 7) + ((DEVSMOOTHED(8, -7) < DEVSMOOTHED(-6, 18)) << 6) + ((DEVSMOOTHED(9, 12) < DEVSMOOTHED(-7, -23)) << 5) + ((DEVSMOOTHED(8, -6) < DEVSMOOTHED(5, 2)) << 4) + ((DEVSMOOTHED(-9, 6) < DEVSMOOTHED(-12, -7)) << 3) + ((DEVSMOOTHED(-1, -2) < DEVSMOOTHED(-7, 2)) << 2) + ((DEVSMOOTHED(9, 9) < DEVSMOOTHED(7, 15)) << 1) + ((DEVSMOOTHED(6, 2) < DEVSMOOTHED(-6, 6)) << 0));
#undef DEVSMOOTHED
}
hipError_t MyCudaUtils::ComputeBriefDescriptors(cv::Mat& image, std::vector<cv::KeyPoint>& kps, cv::Mat& desc, int descSize)
{
if (descSize != 32) {
std::cout << "Descriptor sizes other than 32 bytes currently not implemented" << std::endl;
std::cout << "Press q to exit the program or any other key to continue: ";
char c;
std::cin >> c;
if ('q' == c)
exit(EXIT_FAILURE);
}
// Convert to greyscale if required
cv::Mat grayImage = image;
if (image.type() != CV_8U) cv::cvtColor(image, grayImage, CV_BGR2GRAY);
// Compute the integral image for smoothing
cv::Mat intImage;
cv::integral(grayImage, intImage, CV_32S);
//Remove keypoints very close to the border
static const int PATCH_SIZE = 48; // Size of patch used to compute descriptors - 24 x 24 pixel window
static const int KERNEL_SIZE = 9; // Size of filtering Kernel used on each descriptor point to compare 9x9 pixel window
runByImageBorder(kps, image.size(), PATCH_SIZE / 2 + KERNEL_SIZE / 2); // We don't want our patch or kernel to overflow to the edge so offset by both of them
// Initialise list of descriptors to zero
desc = cv::Mat::zeros((int)kps.size(), descSize, CV_8U);
int knp = 2; // Number of params describing a keypoint
int imSize = intImage.rows * intImage.cols;
// Allocate memory
int* dev_intImage;
float* dev_kps;
unsigned char* dev_desc;
try {
CUDA_CHECK(hipSetDevice(0)); // I dunno about this because it means you can't see the error type involved.
CUDA_CHECK(hipMalloc((void**)&dev_intImage, imSize * sizeof(int)));
CUDA_CHECK(hipMalloc((void**)&dev_kps, kps.size() * knp * sizeof(float)));
CUDA_CHECK(hipMalloc((void**)&dev_desc, kps.size() * descSize * sizeof(unsigned char)));
// Copy the integral image and initialise descriptors to zero
CUDA_CHECK(hipMemcpy(dev_intImage, intImage.data, imSize * sizeof(int), hipMemcpyHostToDevice));
CUDA_CHECK(hipMemcpy(dev_desc, desc.data, kps.size() * descSize * sizeof(unsigned char), hipMemcpyHostToDevice));
// Copy the keypoints into an array and then onto the device
int num_kps = kps.size();
float* kpsArray = new float[num_kps * knp]; // SQUARE BRACKETS FOR DYNAMIC MEMORY
float* kpsXArray = kpsArray;
float* kpsYArray = kpsArray + num_kps;
for (int i = 0; i < num_kps; i++) {
kpsXArray[i] = kps[i].pt.x;
kpsYArray[i] = kps[i].pt.y;
}
CUDA_CHECK(hipMemcpy(dev_kps, kpsArray, num_kps * knp * sizeof(float), hipMemcpyHostToDevice));
delete[] kpsArray;
// Launch the Kernel
int blocks = cvCeil(num_kps / 128);
pixelTest32Kernel << < blocks, 128 >> >(dev_intImage, intImage.cols, dev_kps, num_kps, dev_desc);
CUDA_CHECK(hipGetLastError());
CUDA_CHECK(hipDeviceSynchronize());
// Download the output
CUDA_CHECK(hipMemcpy(desc.data, dev_desc, kps.size() * descSize * sizeof(unsigned char), hipMemcpyDeviceToHost));
throw (hipSuccess);
}
catch (hipError_t cudaStatus) {
hipFree(dev_intImage);
hipFree(dev_kps);
hipFree(dev_desc);
return cudaStatus;
}
}
/**************************************************************************************
* SERIAL BRIEF
**************************************************************************************/
//// This computes the boxed filter smoothing of the image (which is a just an average
//// and so can be computed by addition and subtraction of appropriate corners in the
//// integral image)
//inline int smoothedSum(const cv::Mat& sum, const cv::KeyPoint& pt, int y, int x)
//{
// static const int KERNEL_SIZE = 9;
// static const int HALF_KERNEL = KERNEL_SIZE / 2;
//
// int img_y = (int)(pt.pt.y + 0.5) + y;
// int img_x = (int)(pt.pt.x + 0.5) + x; // Add 0.5 and cast to int automatically rounds up I believe assuming pt.pt.x is float (keypoints can have sub-pixel precision
// return sum.at<int>(img_y + HALF_KERNEL + 1, img_x + HALF_KERNEL + 1) // The four corners of the integral image
// - sum.at<int>(img_y + HALF_KERNEL + 1, img_x - HALF_KERNEL)
// - sum.at<int>(img_y - HALF_KERNEL, img_x + HALF_KERNEL + 1)
// + sum.at<int>(img_y - HALF_KERNEL, img_x - HALF_KERNEL);
//}
//// Computes the response with a 32byte descriptor of all the keypoints in the set
//// It iterates through all the keypoints, and then defines the SMOOTHED function
//// to be called with the given integral image and the current keypoint coordinates.
//static void pixelTests32(const cv::Mat& sum, const std::vector<cv::KeyPoint>& keypoints, cv::Mat& descriptors)
//{
// // So this would be nicely parallelisable, although I'm not sure how CUDA likes #defines, or whether we could even include the function in there.
// // Okay so we only need to mark smoothedSum as GPU function using the __device__ specifier to make sure that it can be accessed inside the Kernel
// // Anyway the point is we can have each thread doing one of these just fine since they are all identical - and all the other stuff is so much fluffing
// // around.
// for (int i = 0; i < (int)keypoints.size(); ++i)
// {
// uchar* desc = descriptors.ptr(i);
// const cv::KeyPoint& pt = keypoints[i];
//
//#define SMOOTHED(y,x) smoothedSum(sum, pt, y, x)
// // Eight binary comparisons contribute a single bit in the descriptor each byte, bit shifted into place in 1 of the 32 total bytes
// desc[0] = (uchar)(((SMOOTHED(-2, -1) < SMOOTHED(7, -1)) << 7) + ((SMOOTHED(-14, -1) < SMOOTHED(-3, 3)) << 6) + ((SMOOTHED(1, -2) < SMOOTHED(11, 2)) << 5) + ((SMOOTHED(1, 6) < SMOOTHED(-10, -7)) << 4) + ((SMOOTHED(13, 2) < SMOOTHED(-1, 0)) << 3) + ((SMOOTHED(-14, 5) < SMOOTHED(5, -3)) << 2) + ((SMOOTHED(-2, 8) < SMOOTHED(2, 4)) << 1) + ((SMOOTHED(-11, 8) < SMOOTHED(-15, 5)) << 0));
// desc[1] = (uchar)(((SMOOTHED(-6, -23) < SMOOTHED(8, -9)) << 7) + ((SMOOTHED(-12, 6) < SMOOTHED(-10, 8)) << 6) + ((SMOOTHED(-3, -1) < SMOOTHED(8, 1)) << 5) + ((SMOOTHED(3, 6) < SMOOTHED(5, 6)) << 4) + ((SMOOTHED(-7, -6) < SMOOTHED(5, -5)) << 3) + ((SMOOTHED(22, -2) < SMOOTHED(-11, -8)) << 2) + ((SMOOTHED(14, 7) < SMOOTHED(8, 5)) << 1) + ((SMOOTHED(-1, 14) < SMOOTHED(-5, -14)) << 0));
// desc[2] = (uchar)(((SMOOTHED(-14, 9) < SMOOTHED(2, 0)) << 7) + ((SMOOTHED(7, -3) < SMOOTHED(22, 6)) << 6) + ((SMOOTHED(-6, 6) < SMOOTHED(-8, -5)) << 5) + ((SMOOTHED(-5, 9) < SMOOTHED(7, -1)) << 4) + ((SMOOTHED(-3, -7) < SMOOTHED(-10, -18)) << 3) + ((SMOOTHED(4, -5) < SMOOTHED(0, 11)) << 2) + ((SMOOTHED(2, 3) < SMOOTHED(9, 10)) << 1) + ((SMOOTHED(-10, 3) < SMOOTHED(4, 9)) << 0));
// desc[3] = (uchar)(((SMOOTHED(0, 12) < SMOOTHED(-3, 19)) << 7) + ((SMOOTHED(1, 15) < SMOOTHED(-11, -5)) << 6) + ((SMOOTHED(14, -1) < SMOOTHED(7, 8)) << 5) + ((SMOOTHED(7, -23) < SMOOTHED(-5, 5)) << 4) + ((SMOOTHED(0, -6) < SMOOTHED(-10, 17)) << 3) + ((SMOOTHED(13, -4) < SMOOTHED(-3, -4)) << 2) + ((SMOOTHED(-12, 1) < SMOOTHED(-12, 2)) << 1) + ((SMOOTHED(0, 8) < SMOOTHED(3, 22)) << 0));
// desc[4] = (uchar)(((SMOOTHED(-13, 13) < SMOOTHED(3, -1)) << 7) + ((SMOOTHED(-16, 17) < SMOOTHED(6, 10)) << 6) + ((SMOOTHED(7, 15) < SMOOTHED(-5, 0)) << 5) + ((SMOOTHED(2, -12) < SMOOTHED(19, -2)) << 4) + ((SMOOTHED(3, -6) < SMOOTHED(-4, -15)) << 3) + ((SMOOTHED(8, 3) < SMOOTHED(0, 14)) << 2) + ((SMOOTHED(4, -11) < SMOOTHED(5, 5)) << 1) + ((SMOOTHED(11, -7) < SMOOTHED(7, 1)) << 0));
// desc[5] = (uchar)(((SMOOTHED(6, 12) < SMOOTHED(21, 3)) << 7) + ((SMOOTHED(-3, 2) < SMOOTHED(14, 1)) << 6) + ((SMOOTHED(5, 1) < SMOOTHED(-5, 11)) << 5) + ((SMOOTHED(3, -17) < SMOOTHED(-6, 2)) << 4) + ((SMOOTHED(6, 8) < SMOOTHED(5, -10)) << 3) + ((SMOOTHED(-14, -2) < SMOOTHED(0, 4)) << 2) + ((SMOOTHED(5, -7) < SMOOTHED(-6, 5)) << 1) + ((SMOOTHED(10, 4) < SMOOTHED(4, -7)) << 0));
// desc[6] = (uchar)(((SMOOTHED(22, 0) < SMOOTHED(7, -18)) << 7) + ((SMOOTHED(-1, -3) < SMOOTHED(0, 18)) << 6) + ((SMOOTHED(-4, 22) < SMOOTHED(-5, 3)) << 5) + ((SMOOTHED(1, -7) < SMOOTHED(2, -3)) << 4) + ((SMOOTHED(19, -20) < SMOOTHED(17, -2)) << 3) + ((SMOOTHED(3, -10) < SMOOTHED(-8, 24)) << 2) + ((SMOOTHED(-5, -14) < SMOOTHED(7, 5)) << 1) + ((SMOOTHED(-2, 12) < SMOOTHED(-4, -15)) << 0));
// desc[7] = (uchar)(((SMOOTHED(4, 12) < SMOOTHED(0, -19)) << 7) + ((SMOOTHED(20, 13) < SMOOTHED(3, 5)) << 6) + ((SMOOTHED(-8, -12) < SMOOTHED(5, 0)) << 5) + ((SMOOTHED(-5, 6) < SMOOTHED(-7, -11)) << 4) + ((SMOOTHED(6, -11) < SMOOTHED(-3, -22)) << 3) + ((SMOOTHED(15, 4) < SMOOTHED(10, 1)) << 2) + ((SMOOTHED(-7, -4) < SMOOTHED(15, -6)) << 1) + ((SMOOTHED(5, 10) < SMOOTHED(0, 24)) << 0));
// desc[8] = (uchar)(((SMOOTHED(3, 6) < SMOOTHED(22, -2)) << 7) + ((SMOOTHED(-13, 14) < SMOOTHED(4, -4)) << 6) + ((SMOOTHED(-13, 8) < SMOOTHED(-18, -22)) << 5) + ((SMOOTHED(-1, -1) < SMOOTHED(-7, 3)) << 4) + ((SMOOTHED(-19, -12) < SMOOTHED(4, 3)) << 3) + ((SMOOTHED(8, 10) < SMOOTHED(13, -2)) << 2) + ((SMOOTHED(-6, -1) < SMOOTHED(-6, -5)) << 1) + ((SMOOTHED(2, -21) < SMOOTHED(-3, 2)) << 0));
// desc[9] = (uchar)(((SMOOTHED(4, -7) < SMOOTHED(0, 16)) << 7) + ((SMOOTHED(-6, -5) < SMOOTHED(-12, -1)) << 6) + ((SMOOTHED(1, -1) < SMOOTHED(9, 18)) << 5) + ((SMOOTHED(-7, 10) < SMOOTHED(-11, 6)) << 4) + ((SMOOTHED(4, 3) < SMOOTHED(19, -7)) << 3) + ((SMOOTHED(-18, 5) < SMOOTHED(-4, 5)) << 2) + ((SMOOTHED(4, 0) < SMOOTHED(-20, 4)) << 1) + ((SMOOTHED(7, -11) < SMOOTHED(18, 12)) << 0));
// desc[10] = (uchar)(((SMOOTHED(-20, 17) < SMOOTHED(-18, 7)) << 7) + ((SMOOTHED(2, 15) < SMOOTHED(19, -11)) << 6) + ((SMOOTHED(-18, 6) < SMOOTHED(-7, 3)) << 5) + ((SMOOTHED(-4, 1) < SMOOTHED(-14, 13)) << 4) + ((SMOOTHED(17, 3) < SMOOTHED(2, -8)) << 3) + ((SMOOTHED(-7, 2) < SMOOTHED(1, 6)) << 2) + ((SMOOTHED(17, -9) < SMOOTHED(-2, 8)) << 1) + ((SMOOTHED(-8, -6) < SMOOTHED(-1, 12)) << 0));
// desc[11] = (uchar)(((SMOOTHED(-2, 4) < SMOOTHED(-1, 6)) << 7) + ((SMOOTHED(-2, 7) < SMOOTHED(6, 8)) << 6) + ((SMOOTHED(-8, -1) < SMOOTHED(-7, -9)) << 5) + ((SMOOTHED(8, -9) < SMOOTHED(15, 0)) << 4) + ((SMOOTHED(0, 22) < SMOOTHED(-4, -15)) << 3) + ((SMOOTHED(-14, -1) < SMOOTHED(3, -2)) << 2) + ((SMOOTHED(-7, -4) < SMOOTHED(17, -7)) << 1) + ((SMOOTHED(-8, -2) < SMOOTHED(9, -4)) << 0));
// desc[12] = (uchar)(((SMOOTHED(5, -7) < SMOOTHED(7, 7)) << 7) + ((SMOOTHED(-5, 13) < SMOOTHED(-8, 11)) << 6) + ((SMOOTHED(11, -4) < SMOOTHED(0, 8)) << 5) + ((SMOOTHED(5, -11) < SMOOTHED(-9, -6)) << 4) + ((SMOOTHED(2, -6) < SMOOTHED(3, -20)) << 3) + ((SMOOTHED(-6, 2) < SMOOTHED(6, 10)) << 2) + ((SMOOTHED(-6, -6) < SMOOTHED(-15, 7)) << 1) + ((SMOOTHED(-6, -3) < SMOOTHED(2, 1)) << 0));
// desc[13] = (uchar)(((SMOOTHED(11, 0) < SMOOTHED(-3, 2)) << 7) + ((SMOOTHED(7, -12) < SMOOTHED(14, 5)) << 6) + ((SMOOTHED(0, -7) < SMOOTHED(-1, -1)) << 5) + ((SMOOTHED(-16, 0) < SMOOTHED(6, 8)) << 4) + ((SMOOTHED(22, 11) < SMOOTHED(0, -3)) << 3) + ((SMOOTHED(19, 0) < SMOOTHED(5, -17)) << 2) + ((SMOOTHED(-23, -14) < SMOOTHED(-13, -19)) << 1) + ((SMOOTHED(-8, 10) < SMOOTHED(-11, -2)) << 0));
// desc[14] = (uchar)(((SMOOTHED(-11, 6) < SMOOTHED(-10, 13)) << 7) + ((SMOOTHED(1, -7) < SMOOTHED(14, 0)) << 6) + ((SMOOTHED(-12, 1) < SMOOTHED(-5, -5)) << 5) + ((SMOOTHED(4, 7) < SMOOTHED(8, -1)) << 4) + ((SMOOTHED(-1, -5) < SMOOTHED(15, 2)) << 3) + ((SMOOTHED(-3, -1) < SMOOTHED(7, -10)) << 2) + ((SMOOTHED(3, -6) < SMOOTHED(10, -18)) << 1) + ((SMOOTHED(-7, -13) < SMOOTHED(-13, 10)) << 0));
// desc[15] = (uchar)(((SMOOTHED(1, -1) < SMOOTHED(13, -10)) << 7) + ((SMOOTHED(-19, 14) < SMOOTHED(8, -14)) << 6) + ((SMOOTHED(-4, -13) < SMOOTHED(7, 1)) << 5) + ((SMOOTHED(1, -2) < SMOOTHED(12, -7)) << 4) + ((SMOOTHED(3, -5) < SMOOTHED(1, -5)) << 3) + ((SMOOTHED(-2, -2) < SMOOTHED(8, -10)) << 2) + ((SMOOTHED(2, 14) < SMOOTHED(8, 7)) << 1) + ((SMOOTHED(3, 9) < SMOOTHED(8, 2)) << 0));
// desc[16] = (uchar)(((SMOOTHED(-9, 1) < SMOOTHED(-18, 0)) << 7) + ((SMOOTHED(4, 0) < SMOOTHED(1, 12)) << 6) + ((SMOOTHED(0, 9) < SMOOTHED(-14, -10)) << 5) + ((SMOOTHED(-13, -9) < SMOOTHED(-2, 6)) << 4) + ((SMOOTHED(1, 5) < SMOOTHED(10, 10)) << 3) + ((SMOOTHED(-3, -6) < SMOOTHED(-16, -5)) << 2) + ((SMOOTHED(11, 6) < SMOOTHED(-5, 0)) << 1) + ((SMOOTHED(-23, 10) < SMOOTHED(1, 2)) << 0));
// desc[17] = (uchar)(((SMOOTHED(13, -5) < SMOOTHED(-3, 9)) << 7) + ((SMOOTHED(-4, -1) < SMOOTHED(-13, -5)) << 6) + ((SMOOTHED(10, 13) < SMOOTHED(-11, 8)) << 5) + ((SMOOTHED(19, 20) < SMOOTHED(-9, 2)) << 4) + ((SMOOTHED(4, -8) < SMOOTHED(0, -9)) << 3) + ((SMOOTHED(-14, 10) < SMOOTHED(15, 19)) << 2) + ((SMOOTHED(-14, -12) < SMOOTHED(-10, -3)) << 1) + ((SMOOTHED(-23, -3) < SMOOTHED(17, -2)) << 0));
// desc[18] = (uchar)(((SMOOTHED(-3, -11) < SMOOTHED(6, -14)) << 7) + ((SMOOTHED(19, -2) < SMOOTHED(-4, 2)) << 6) + ((SMOOTHED(-5, 5) < SMOOTHED(3, -13)) << 5) + ((SMOOTHED(2, -2) < SMOOTHED(-5, 4)) << 4) + ((SMOOTHED(17, 4) < SMOOTHED(17, -11)) << 3) + ((SMOOTHED(-7, -2) < SMOOTHED(1, 23)) << 2) + ((SMOOTHED(8, 13) < SMOOTHED(1, -16)) << 1) + ((SMOOTHED(-13, -5) < SMOOTHED(1, -17)) << 0));
// desc[19] = (uchar)(((SMOOTHED(4, 6) < SMOOTHED(-8, -3)) << 7) + ((SMOOTHED(-5, -9) < SMOOTHED(-2, -10)) << 6) + ((SMOOTHED(-9, 0) < SMOOTHED(-7, -2)) << 5) + ((SMOOTHED(5, 0) < SMOOTHED(5, 2)) << 4) + ((SMOOTHED(-4, -16) < SMOOTHED(6, 3)) << 3) + ((SMOOTHED(2, -15) < SMOOTHED(-2, 12)) << 2) + ((SMOOTHED(4, -1) < SMOOTHED(6, 2)) << 1) + ((SMOOTHED(1, 1) < SMOOTHED(-2, -8)) << 0));
// desc[20] = (uchar)(((SMOOTHED(-2, 12) < SMOOTHED(-5, -2)) << 7) + ((SMOOTHED(-8, 8) < SMOOTHED(-9, 9)) << 6) + ((SMOOTHED(2, -10) < SMOOTHED(3, 1)) << 5) + ((SMOOTHED(-4, 10) < SMOOTHED(-9, 4)) << 4) + ((SMOOTHED(6, 12) < SMOOTHED(2, 5)) << 3) + ((SMOOTHED(-3, -8) < SMOOTHED(0, 5)) << 2) + ((SMOOTHED(-13, 1) < SMOOTHED(-7, 2)) << 1) + ((SMOOTHED(-1, -10) < SMOOTHED(7, -18)) << 0));
// desc[21] = (uchar)(((SMOOTHED(-1, 8) < SMOOTHED(-9, -10)) << 7) + ((SMOOTHED(-23, -1) < SMOOTHED(6, 2)) << 6) + ((SMOOTHED(-5, -3) < SMOOTHED(3, 2)) << 5) + ((SMOOTHED(0, 11) < SMOOTHED(-4, -7)) << 4) + ((SMOOTHED(15, 2) < SMOOTHED(-10, -3)) << 3) + ((SMOOTHED(-20, -8) < SMOOTHED(-13, 3)) << 2) + ((SMOOTHED(-19, -12) < SMOOTHED(5, -11)) << 1) + ((SMOOTHED(-17, -13) < SMOOTHED(-3, 2)) << 0));
// desc[22] = (uchar)(((SMOOTHED(7, 4) < SMOOTHED(-12, 0)) << 7) + ((SMOOTHED(5, -1) < SMOOTHED(-14, -6)) << 6) + ((SMOOTHED(-4, 11) < SMOOTHED(0, -4)) << 5) + ((SMOOTHED(3, 10) < SMOOTHED(7, -3)) << 4) + ((SMOOTHED(13, 21) < SMOOTHED(-11, 6)) << 3) + ((SMOOTHED(-12, 24) < SMOOTHED(-7, -4)) << 2) + ((SMOOTHED(4, 16) < SMOOTHED(3, -14)) << 1) + ((SMOOTHED(-3, 5) < SMOOTHED(-7, -12)) << 0));
// desc[23] = (uchar)(((SMOOTHED(0, -4) < SMOOTHED(7, -5)) << 7) + ((SMOOTHED(-17, -9) < SMOOTHED(13, -7)) << 6) + ((SMOOTHED(22, -6) < SMOOTHED(-11, 5)) << 5) + ((SMOOTHED(2, -8) < SMOOTHED(23, -11)) << 4) + ((SMOOTHED(7, -10) < SMOOTHED(-1, 14)) << 3) + ((SMOOTHED(-3, -10) < SMOOTHED(8, 3)) << 2) + ((SMOOTHED(-13, 1) < SMOOTHED(-6, 0)) << 1) + ((SMOOTHED(-7, -21) < SMOOTHED(6, -14)) << 0));
// desc[24] = (uchar)(((SMOOTHED(18, 19) < SMOOTHED(-4, -6)) << 7) + ((SMOOTHED(10, 7) < SMOOTHED(-1, -4)) << 6) + ((SMOOTHED(-1, 21) < SMOOTHED(1, -5)) << 5) + ((SMOOTHED(-10, 6) < SMOOTHED(-11, -2)) << 4) + ((SMOOTHED(18, -3) < SMOOTHED(-1, 7)) << 3) + ((SMOOTHED(-3, -9) < SMOOTHED(-5, 10)) << 2) + ((SMOOTHED(-13, 14) < SMOOTHED(17, -3)) << 1) + ((SMOOTHED(11, -19) < SMOOTHED(-1, -18)) << 0));
// desc[25] = (uchar)(((SMOOTHED(8, -2) < SMOOTHED(-18, -23)) << 7) + ((SMOOTHED(0, -5) < SMOOTHED(-2, -9)) << 6) + ((SMOOTHED(-4, -11) < SMOOTHED(2, -8)) << 5) + ((SMOOTHED(14, 6) < SMOOTHED(-3, -6)) << 4) + ((SMOOTHED(-3, 0) < SMOOTHED(-15, 0)) << 3) + ((SMOOTHED(-9, 4) < SMOOTHED(-15, -9)) << 2) + ((SMOOTHED(-1, 11) < SMOOTHED(3, 11)) << 1) + ((SMOOTHED(-10, -16) < SMOOTHED(-7, 7)) << 0));
// desc[26] = (uchar)(((SMOOTHED(-2, -10) < SMOOTHED(-10, -2)) << 7) + ((SMOOTHED(-5, -3) < SMOOTHED(5, -23)) << 6) + ((SMOOTHED(13, -8) < SMOOTHED(-15, -11)) << 5) + ((SMOOTHED(-15, 11) < SMOOTHED(6, -6)) << 4) + ((SMOOTHED(-16, -3) < SMOOTHED(-2, 2)) << 3) + ((SMOOTHED(6, 12) < SMOOTHED(-16, 24)) << 2) + ((SMOOTHED(-10, 0) < SMOOTHED(8, 11)) << 1) + ((SMOOTHED(-7, 7) < SMOOTHED(-19, -7)) << 0));
// desc[27] = (uchar)(((SMOOTHED(5, 16) < SMOOTHED(9, -3)) << 7) + ((SMOOTHED(9, 7) < SMOOTHED(-7, -16)) << 6) + ((SMOOTHED(3, 2) < SMOOTHED(-10, 9)) << 5) + ((SMOOTHED(21, 1) < SMOOTHED(8, 7)) << 4) + ((SMOOTHED(7, 0) < SMOOTHED(1, 17)) << 3) + ((SMOOTHED(-8, 12) < SMOOTHED(9, 6)) << 2) + ((SMOOTHED(11, -7) < SMOOTHED(-8, -6)) << 1) + ((SMOOTHED(19, 0) < SMOOTHED(9, 3)) << 0));
// desc[28] = (uchar)(((SMOOTHED(1, -7) < SMOOTHED(-5, -11)) << 7) + ((SMOOTHED(0, 8) < SMOOTHED(-2, 14)) << 6) + ((SMOOTHED(12, -2) < SMOOTHED(-15, -6)) << 5) + ((SMOOTHED(4, 12) < SMOOTHED(0, -21)) << 4) + ((SMOOTHED(17, -4) < SMOOTHED(-6, -7)) << 3) + ((SMOOTHED(-10, -9) < SMOOTHED(-14, -7)) << 2) + ((SMOOTHED(-15, -10) < SMOOTHED(-15, -14)) << 1) + ((SMOOTHED(-7, -5) < SMOOTHED(5, -12)) << 0));
// desc[29] = (uchar)(((SMOOTHED(-4, 0) < SMOOTHED(15, -4)) << 7) + ((SMOOTHED(5, 2) < SMOOTHED(-6, -23)) << 6) + ((SMOOTHED(-4, -21) < SMOOTHED(-6, 4)) << 5) + ((SMOOTHED(-10, 5) < SMOOTHED(-15, 6)) << 4) + ((SMOOTHED(4, -3) < SMOOTHED(-1, 5)) << 3) + ((SMOOTHED(-4, 19) < SMOOTHED(-23, -4)) << 2) + ((SMOOTHED(-4, 17) < SMOOTHED(13, -11)) << 1) + ((SMOOTHED(1, 12) < SMOOTHED(4, -14)) << 0));
// desc[30] = (uchar)(((SMOOTHED(-11, -6) < SMOOTHED(-20, 10)) << 7) + ((SMOOTHED(4, 5) < SMOOTHED(3, 20)) << 6) + ((SMOOTHED(-8, -20) < SMOOTHED(3, 1)) << 5) + ((SMOOTHED(-19, 9) < SMOOTHED(9, -3)) << 4) + ((SMOOTHED(18, 15) < SMOOTHED(11, -4)) << 3) + ((SMOOTHED(12, 16) < SMOOTHED(8, 7)) << 2) + ((SMOOTHED(-14, -8) < SMOOTHED(-3, 9)) << 1) + ((SMOOTHED(-6, 0) < SMOOTHED(2, -4)) << 0));
// desc[31] = (uchar)(((SMOOTHED(1, -10) < SMOOTHED(-1, 2)) << 7) + ((SMOOTHED(8, -7) < SMOOTHED(-6, 18)) << 6) + ((SMOOTHED(9, 12) < SMOOTHED(-7, -23)) << 5) + ((SMOOTHED(8, -6) < SMOOTHED(5, 2)) << 4) + ((SMOOTHED(-9, 6) < SMOOTHED(-12, -7)) << 3) + ((SMOOTHED(-1, -2) < SMOOTHED(-7, 2)) << 2) + ((SMOOTHED(9, 9) < SMOOTHED(7, 15)) << 1) + ((SMOOTHED(6, 2) < SMOOTHED(-6, 6)) << 0));
//#undef SMOOTHED
// }
//}
| 685ea60f402e66008421556e4860bdea22847e79.cu | #include "MyCudaUtils.h"
// A region of interest which contains a rectangle (and is constructed with a rectangle).
// Turns out this is really only used for the keypoint near border removal, as a quick check of
// std::remove_if in the runByImageBorder below says that a predicate is a required input.
// The predicate is a function (or in this case the operator() of the object) which returns
// boolean true/false when called with a single element from the vector as to whether it
// should be removed or not. remove_if internally calls the RoiPredicate operator on
// each element of the keypoint, and it returns a boolean if the keypoint location
// is within the rectangle. remove_if then returns a vector of booleans which the
// std::vector.erase function uses to determine which keypoints to remove. Nifty
struct RoiPredicate
{
RoiPredicate(const cv::Rect& _r) : r(_r)
{}
bool operator()(const cv::KeyPoint& keyPt) const
{
return !r.contains(keyPt.pt);
}
cv::Rect r;
};
// Function removes keypoints near the boarder of the image within a particular border size
// This shouldn't be too much of a problem, I believe that STAR doesn't find them too close either
void runByImageBorder(std::vector<cv::KeyPoint>& keypoints, cv::Size imageSize, int borderSize)
{
if (borderSize > 0)
{
if (imageSize.height <= borderSize * 2 || imageSize.width <= borderSize * 2)
keypoints.clear();
else
keypoints.erase(std::remove_if(keypoints.begin(), keypoints.end(),
RoiPredicate(cv::Rect(cv::Point(borderSize, borderSize),
cv::Point(imageSize.width - borderSize, imageSize.height - borderSize)))),
keypoints.end());
}
}
// Device equivalent function for computing the smoothed function
// [in] Ptr to the integral image
// [in] The x and y coordinates of the keypoint the current CUDA thread is operating on
// [in] The relative position around this keypoint we are querying as a point for our Brief binary descriptor
// [in] The stride of the integral image so we can access elements without having nice row, column matrix
__device__ int smoothedSumDevice(int* intImage, int pty, int ptx, int rely, int relx, int stride) {
const int KERNEL_SIZE = 9;
const int HALF_KERNEL = KERNEL_SIZE / 2;
int img_y = (int)(pty + 0.5) + rely;
int img_x = (int)(ptx + 0.5) + relx;
return intImage[(img_y + HALF_KERNEL + 1)*stride + img_x + HALF_KERNEL + 1]
- intImage[(img_y + HALF_KERNEL + 1)*stride + img_x - HALF_KERNEL]
- intImage[(img_y - HALF_KERNEL)*stride + img_x + HALF_KERNEL + 1]
+ intImage[(img_y - HALF_KERNEL)*stride + img_x - HALF_KERNEL];
// Smooths by computing a box filter - average of all the surrounding pixels
// which of course requires their sum. We compute this efficiently by constructing the
// integral image and taking the four corner points around our kernel. Here we use a
// kernel size of 9 - i.e. we smooth over a window 9 pix across to get the intensity value
}
// Kernel for computing the descriptor for a single keypoint.
// We could probably be more efficient with global memory accesses because there's a lot per thread
// [in] A pointer to the row-major stored integral image data
// [in] The stride of the integral image in pixels (also bytes for a uchar image)
// [in] The set of keypoints laid out with all x-coords in first row, all y-coords in second row
// [in] The number of points N in the set of keypoints
// [out] The 256bit descriptor array, of size 32*N bytes
__global__ void pixelTest32Kernel(int* intImage, int imStride, float* kps, int num_pts, unsigned char* descriptors) {
int pt_idx = threadIdx.x + blockDim.x * blockIdx.x;
if (!(pt_idx < num_pts)) // Check thread index is valid
return;
int ptx = kps[pt_idx]; // X-coords on first row of matrix
int pty = kps[pt_idx + num_pts]; // Y-coords on second row
unsigned char* desc = descriptors + (32 * pt_idx); // Where to put the descriptor for this keypoint in output array
#define DEVSMOOTHED(y,x) smoothedSumDevice(intImage, pty, ptx, y, x, imStride)
desc[0] = (uchar)(((DEVSMOOTHED(-2, -1) < DEVSMOOTHED(7, -1)) << 7) + ((DEVSMOOTHED(-14, -1) < DEVSMOOTHED(-3, 3)) << 6) + ((DEVSMOOTHED(1, -2) < DEVSMOOTHED(11, 2)) << 5) + ((DEVSMOOTHED(1, 6) < DEVSMOOTHED(-10, -7)) << 4) + ((DEVSMOOTHED(13, 2) < DEVSMOOTHED(-1, 0)) << 3) + ((DEVSMOOTHED(-14, 5) < DEVSMOOTHED(5, -3)) << 2) + ((DEVSMOOTHED(-2, 8) < DEVSMOOTHED(2, 4)) << 1) + ((DEVSMOOTHED(-11, 8) < DEVSMOOTHED(-15, 5)) << 0));
desc[1] = (uchar)(((DEVSMOOTHED(-6, -23) < DEVSMOOTHED(8, -9)) << 7) + ((DEVSMOOTHED(-12, 6) < DEVSMOOTHED(-10, 8)) << 6) + ((DEVSMOOTHED(-3, -1) < DEVSMOOTHED(8, 1)) << 5) + ((DEVSMOOTHED(3, 6) < DEVSMOOTHED(5, 6)) << 4) + ((DEVSMOOTHED(-7, -6) < DEVSMOOTHED(5, -5)) << 3) + ((DEVSMOOTHED(22, -2) < DEVSMOOTHED(-11, -8)) << 2) + ((DEVSMOOTHED(14, 7) < DEVSMOOTHED(8, 5)) << 1) + ((DEVSMOOTHED(-1, 14) < DEVSMOOTHED(-5, -14)) << 0));
desc[2] = (uchar)(((DEVSMOOTHED(-14, 9) < DEVSMOOTHED(2, 0)) << 7) + ((DEVSMOOTHED(7, -3) < DEVSMOOTHED(22, 6)) << 6) + ((DEVSMOOTHED(-6, 6) < DEVSMOOTHED(-8, -5)) << 5) + ((DEVSMOOTHED(-5, 9) < DEVSMOOTHED(7, -1)) << 4) + ((DEVSMOOTHED(-3, -7) < DEVSMOOTHED(-10, -18)) << 3) + ((DEVSMOOTHED(4, -5) < DEVSMOOTHED(0, 11)) << 2) + ((DEVSMOOTHED(2, 3) < DEVSMOOTHED(9, 10)) << 1) + ((DEVSMOOTHED(-10, 3) < DEVSMOOTHED(4, 9)) << 0));
desc[3] = (uchar)(((DEVSMOOTHED(0, 12) < DEVSMOOTHED(-3, 19)) << 7) + ((DEVSMOOTHED(1, 15) < DEVSMOOTHED(-11, -5)) << 6) + ((DEVSMOOTHED(14, -1) < DEVSMOOTHED(7, 8)) << 5) + ((DEVSMOOTHED(7, -23) < DEVSMOOTHED(-5, 5)) << 4) + ((DEVSMOOTHED(0, -6) < DEVSMOOTHED(-10, 17)) << 3) + ((DEVSMOOTHED(13, -4) < DEVSMOOTHED(-3, -4)) << 2) + ((DEVSMOOTHED(-12, 1) < DEVSMOOTHED(-12, 2)) << 1) + ((DEVSMOOTHED(0, 8) < DEVSMOOTHED(3, 22)) << 0));
desc[4] = (uchar)(((DEVSMOOTHED(-13, 13) < DEVSMOOTHED(3, -1)) << 7) + ((DEVSMOOTHED(-16, 17) < DEVSMOOTHED(6, 10)) << 6) + ((DEVSMOOTHED(7, 15) < DEVSMOOTHED(-5, 0)) << 5) + ((DEVSMOOTHED(2, -12) < DEVSMOOTHED(19, -2)) << 4) + ((DEVSMOOTHED(3, -6) < DEVSMOOTHED(-4, -15)) << 3) + ((DEVSMOOTHED(8, 3) < DEVSMOOTHED(0, 14)) << 2) + ((DEVSMOOTHED(4, -11) < DEVSMOOTHED(5, 5)) << 1) + ((DEVSMOOTHED(11, -7) < DEVSMOOTHED(7, 1)) << 0));
desc[5] = (uchar)(((DEVSMOOTHED(6, 12) < DEVSMOOTHED(21, 3)) << 7) + ((DEVSMOOTHED(-3, 2) < DEVSMOOTHED(14, 1)) << 6) + ((DEVSMOOTHED(5, 1) < DEVSMOOTHED(-5, 11)) << 5) + ((DEVSMOOTHED(3, -17) < DEVSMOOTHED(-6, 2)) << 4) + ((DEVSMOOTHED(6, 8) < DEVSMOOTHED(5, -10)) << 3) + ((DEVSMOOTHED(-14, -2) < DEVSMOOTHED(0, 4)) << 2) + ((DEVSMOOTHED(5, -7) < DEVSMOOTHED(-6, 5)) << 1) + ((DEVSMOOTHED(10, 4) < DEVSMOOTHED(4, -7)) << 0));
desc[6] = (uchar)(((DEVSMOOTHED(22, 0) < DEVSMOOTHED(7, -18)) << 7) + ((DEVSMOOTHED(-1, -3) < DEVSMOOTHED(0, 18)) << 6) + ((DEVSMOOTHED(-4, 22) < DEVSMOOTHED(-5, 3)) << 5) + ((DEVSMOOTHED(1, -7) < DEVSMOOTHED(2, -3)) << 4) + ((DEVSMOOTHED(19, -20) < DEVSMOOTHED(17, -2)) << 3) + ((DEVSMOOTHED(3, -10) < DEVSMOOTHED(-8, 24)) << 2) + ((DEVSMOOTHED(-5, -14) < DEVSMOOTHED(7, 5)) << 1) + ((DEVSMOOTHED(-2, 12) < DEVSMOOTHED(-4, -15)) << 0));
desc[7] = (uchar)(((DEVSMOOTHED(4, 12) < DEVSMOOTHED(0, -19)) << 7) + ((DEVSMOOTHED(20, 13) < DEVSMOOTHED(3, 5)) << 6) + ((DEVSMOOTHED(-8, -12) < DEVSMOOTHED(5, 0)) << 5) + ((DEVSMOOTHED(-5, 6) < DEVSMOOTHED(-7, -11)) << 4) + ((DEVSMOOTHED(6, -11) < DEVSMOOTHED(-3, -22)) << 3) + ((DEVSMOOTHED(15, 4) < DEVSMOOTHED(10, 1)) << 2) + ((DEVSMOOTHED(-7, -4) < DEVSMOOTHED(15, -6)) << 1) + ((DEVSMOOTHED(5, 10) < DEVSMOOTHED(0, 24)) << 0));
desc[8] = (uchar)(((DEVSMOOTHED(3, 6) < DEVSMOOTHED(22, -2)) << 7) + ((DEVSMOOTHED(-13, 14) < DEVSMOOTHED(4, -4)) << 6) + ((DEVSMOOTHED(-13, 8) < DEVSMOOTHED(-18, -22)) << 5) + ((DEVSMOOTHED(-1, -1) < DEVSMOOTHED(-7, 3)) << 4) + ((DEVSMOOTHED(-19, -12) < DEVSMOOTHED(4, 3)) << 3) + ((DEVSMOOTHED(8, 10) < DEVSMOOTHED(13, -2)) << 2) + ((DEVSMOOTHED(-6, -1) < DEVSMOOTHED(-6, -5)) << 1) + ((DEVSMOOTHED(2, -21) < DEVSMOOTHED(-3, 2)) << 0));
desc[9] = (uchar)(((DEVSMOOTHED(4, -7) < DEVSMOOTHED(0, 16)) << 7) + ((DEVSMOOTHED(-6, -5) < DEVSMOOTHED(-12, -1)) << 6) + ((DEVSMOOTHED(1, -1) < DEVSMOOTHED(9, 18)) << 5) + ((DEVSMOOTHED(-7, 10) < DEVSMOOTHED(-11, 6)) << 4) + ((DEVSMOOTHED(4, 3) < DEVSMOOTHED(19, -7)) << 3) + ((DEVSMOOTHED(-18, 5) < DEVSMOOTHED(-4, 5)) << 2) + ((DEVSMOOTHED(4, 0) < DEVSMOOTHED(-20, 4)) << 1) + ((DEVSMOOTHED(7, -11) < DEVSMOOTHED(18, 12)) << 0));
desc[10] = (uchar)(((DEVSMOOTHED(-20, 17) < DEVSMOOTHED(-18, 7)) << 7) + ((DEVSMOOTHED(2, 15) < DEVSMOOTHED(19, -11)) << 6) + ((DEVSMOOTHED(-18, 6) < DEVSMOOTHED(-7, 3)) << 5) + ((DEVSMOOTHED(-4, 1) < DEVSMOOTHED(-14, 13)) << 4) + ((DEVSMOOTHED(17, 3) < DEVSMOOTHED(2, -8)) << 3) + ((DEVSMOOTHED(-7, 2) < DEVSMOOTHED(1, 6)) << 2) + ((DEVSMOOTHED(17, -9) < DEVSMOOTHED(-2, 8)) << 1) + ((DEVSMOOTHED(-8, -6) < DEVSMOOTHED(-1, 12)) << 0));
desc[11] = (uchar)(((DEVSMOOTHED(-2, 4) < DEVSMOOTHED(-1, 6)) << 7) + ((DEVSMOOTHED(-2, 7) < DEVSMOOTHED(6, 8)) << 6) + ((DEVSMOOTHED(-8, -1) < DEVSMOOTHED(-7, -9)) << 5) + ((DEVSMOOTHED(8, -9) < DEVSMOOTHED(15, 0)) << 4) + ((DEVSMOOTHED(0, 22) < DEVSMOOTHED(-4, -15)) << 3) + ((DEVSMOOTHED(-14, -1) < DEVSMOOTHED(3, -2)) << 2) + ((DEVSMOOTHED(-7, -4) < DEVSMOOTHED(17, -7)) << 1) + ((DEVSMOOTHED(-8, -2) < DEVSMOOTHED(9, -4)) << 0));
desc[12] = (uchar)(((DEVSMOOTHED(5, -7) < DEVSMOOTHED(7, 7)) << 7) + ((DEVSMOOTHED(-5, 13) < DEVSMOOTHED(-8, 11)) << 6) + ((DEVSMOOTHED(11, -4) < DEVSMOOTHED(0, 8)) << 5) + ((DEVSMOOTHED(5, -11) < DEVSMOOTHED(-9, -6)) << 4) + ((DEVSMOOTHED(2, -6) < DEVSMOOTHED(3, -20)) << 3) + ((DEVSMOOTHED(-6, 2) < DEVSMOOTHED(6, 10)) << 2) + ((DEVSMOOTHED(-6, -6) < DEVSMOOTHED(-15, 7)) << 1) + ((DEVSMOOTHED(-6, -3) < DEVSMOOTHED(2, 1)) << 0));
desc[13] = (uchar)(((DEVSMOOTHED(11, 0) < DEVSMOOTHED(-3, 2)) << 7) + ((DEVSMOOTHED(7, -12) < DEVSMOOTHED(14, 5)) << 6) + ((DEVSMOOTHED(0, -7) < DEVSMOOTHED(-1, -1)) << 5) + ((DEVSMOOTHED(-16, 0) < DEVSMOOTHED(6, 8)) << 4) + ((DEVSMOOTHED(22, 11) < DEVSMOOTHED(0, -3)) << 3) + ((DEVSMOOTHED(19, 0) < DEVSMOOTHED(5, -17)) << 2) + ((DEVSMOOTHED(-23, -14) < DEVSMOOTHED(-13, -19)) << 1) + ((DEVSMOOTHED(-8, 10) < DEVSMOOTHED(-11, -2)) << 0));
desc[14] = (uchar)(((DEVSMOOTHED(-11, 6) < DEVSMOOTHED(-10, 13)) << 7) + ((DEVSMOOTHED(1, -7) < DEVSMOOTHED(14, 0)) << 6) + ((DEVSMOOTHED(-12, 1) < DEVSMOOTHED(-5, -5)) << 5) + ((DEVSMOOTHED(4, 7) < DEVSMOOTHED(8, -1)) << 4) + ((DEVSMOOTHED(-1, -5) < DEVSMOOTHED(15, 2)) << 3) + ((DEVSMOOTHED(-3, -1) < DEVSMOOTHED(7, -10)) << 2) + ((DEVSMOOTHED(3, -6) < DEVSMOOTHED(10, -18)) << 1) + ((DEVSMOOTHED(-7, -13) < DEVSMOOTHED(-13, 10)) << 0));
desc[15] = (uchar)(((DEVSMOOTHED(1, -1) < DEVSMOOTHED(13, -10)) << 7) + ((DEVSMOOTHED(-19, 14) < DEVSMOOTHED(8, -14)) << 6) + ((DEVSMOOTHED(-4, -13) < DEVSMOOTHED(7, 1)) << 5) + ((DEVSMOOTHED(1, -2) < DEVSMOOTHED(12, -7)) << 4) + ((DEVSMOOTHED(3, -5) < DEVSMOOTHED(1, -5)) << 3) + ((DEVSMOOTHED(-2, -2) < DEVSMOOTHED(8, -10)) << 2) + ((DEVSMOOTHED(2, 14) < DEVSMOOTHED(8, 7)) << 1) + ((DEVSMOOTHED(3, 9) < DEVSMOOTHED(8, 2)) << 0));
desc[16] = (uchar)(((DEVSMOOTHED(-9, 1) < DEVSMOOTHED(-18, 0)) << 7) + ((DEVSMOOTHED(4, 0) < DEVSMOOTHED(1, 12)) << 6) + ((DEVSMOOTHED(0, 9) < DEVSMOOTHED(-14, -10)) << 5) + ((DEVSMOOTHED(-13, -9) < DEVSMOOTHED(-2, 6)) << 4) + ((DEVSMOOTHED(1, 5) < DEVSMOOTHED(10, 10)) << 3) + ((DEVSMOOTHED(-3, -6) < DEVSMOOTHED(-16, -5)) << 2) + ((DEVSMOOTHED(11, 6) < DEVSMOOTHED(-5, 0)) << 1) + ((DEVSMOOTHED(-23, 10) < DEVSMOOTHED(1, 2)) << 0));
desc[17] = (uchar)(((DEVSMOOTHED(13, -5) < DEVSMOOTHED(-3, 9)) << 7) + ((DEVSMOOTHED(-4, -1) < DEVSMOOTHED(-13, -5)) << 6) + ((DEVSMOOTHED(10, 13) < DEVSMOOTHED(-11, 8)) << 5) + ((DEVSMOOTHED(19, 20) < DEVSMOOTHED(-9, 2)) << 4) + ((DEVSMOOTHED(4, -8) < DEVSMOOTHED(0, -9)) << 3) + ((DEVSMOOTHED(-14, 10) < DEVSMOOTHED(15, 19)) << 2) + ((DEVSMOOTHED(-14, -12) < DEVSMOOTHED(-10, -3)) << 1) + ((DEVSMOOTHED(-23, -3) < DEVSMOOTHED(17, -2)) << 0));
desc[18] = (uchar)(((DEVSMOOTHED(-3, -11) < DEVSMOOTHED(6, -14)) << 7) + ((DEVSMOOTHED(19, -2) < DEVSMOOTHED(-4, 2)) << 6) + ((DEVSMOOTHED(-5, 5) < DEVSMOOTHED(3, -13)) << 5) + ((DEVSMOOTHED(2, -2) < DEVSMOOTHED(-5, 4)) << 4) + ((DEVSMOOTHED(17, 4) < DEVSMOOTHED(17, -11)) << 3) + ((DEVSMOOTHED(-7, -2) < DEVSMOOTHED(1, 23)) << 2) + ((DEVSMOOTHED(8, 13) < DEVSMOOTHED(1, -16)) << 1) + ((DEVSMOOTHED(-13, -5) < DEVSMOOTHED(1, -17)) << 0));
desc[19] = (uchar)(((DEVSMOOTHED(4, 6) < DEVSMOOTHED(-8, -3)) << 7) + ((DEVSMOOTHED(-5, -9) < DEVSMOOTHED(-2, -10)) << 6) + ((DEVSMOOTHED(-9, 0) < DEVSMOOTHED(-7, -2)) << 5) + ((DEVSMOOTHED(5, 0) < DEVSMOOTHED(5, 2)) << 4) + ((DEVSMOOTHED(-4, -16) < DEVSMOOTHED(6, 3)) << 3) + ((DEVSMOOTHED(2, -15) < DEVSMOOTHED(-2, 12)) << 2) + ((DEVSMOOTHED(4, -1) < DEVSMOOTHED(6, 2)) << 1) + ((DEVSMOOTHED(1, 1) < DEVSMOOTHED(-2, -8)) << 0));
desc[20] = (uchar)(((DEVSMOOTHED(-2, 12) < DEVSMOOTHED(-5, -2)) << 7) + ((DEVSMOOTHED(-8, 8) < DEVSMOOTHED(-9, 9)) << 6) + ((DEVSMOOTHED(2, -10) < DEVSMOOTHED(3, 1)) << 5) + ((DEVSMOOTHED(-4, 10) < DEVSMOOTHED(-9, 4)) << 4) + ((DEVSMOOTHED(6, 12) < DEVSMOOTHED(2, 5)) << 3) + ((DEVSMOOTHED(-3, -8) < DEVSMOOTHED(0, 5)) << 2) + ((DEVSMOOTHED(-13, 1) < DEVSMOOTHED(-7, 2)) << 1) + ((DEVSMOOTHED(-1, -10) < DEVSMOOTHED(7, -18)) << 0));
desc[21] = (uchar)(((DEVSMOOTHED(-1, 8) < DEVSMOOTHED(-9, -10)) << 7) + ((DEVSMOOTHED(-23, -1) < DEVSMOOTHED(6, 2)) << 6) + ((DEVSMOOTHED(-5, -3) < DEVSMOOTHED(3, 2)) << 5) + ((DEVSMOOTHED(0, 11) < DEVSMOOTHED(-4, -7)) << 4) + ((DEVSMOOTHED(15, 2) < DEVSMOOTHED(-10, -3)) << 3) + ((DEVSMOOTHED(-20, -8) < DEVSMOOTHED(-13, 3)) << 2) + ((DEVSMOOTHED(-19, -12) < DEVSMOOTHED(5, -11)) << 1) + ((DEVSMOOTHED(-17, -13) < DEVSMOOTHED(-3, 2)) << 0));
desc[22] = (uchar)(((DEVSMOOTHED(7, 4) < DEVSMOOTHED(-12, 0)) << 7) + ((DEVSMOOTHED(5, -1) < DEVSMOOTHED(-14, -6)) << 6) + ((DEVSMOOTHED(-4, 11) < DEVSMOOTHED(0, -4)) << 5) + ((DEVSMOOTHED(3, 10) < DEVSMOOTHED(7, -3)) << 4) + ((DEVSMOOTHED(13, 21) < DEVSMOOTHED(-11, 6)) << 3) + ((DEVSMOOTHED(-12, 24) < DEVSMOOTHED(-7, -4)) << 2) + ((DEVSMOOTHED(4, 16) < DEVSMOOTHED(3, -14)) << 1) + ((DEVSMOOTHED(-3, 5) < DEVSMOOTHED(-7, -12)) << 0));
desc[23] = (uchar)(((DEVSMOOTHED(0, -4) < DEVSMOOTHED(7, -5)) << 7) + ((DEVSMOOTHED(-17, -9) < DEVSMOOTHED(13, -7)) << 6) + ((DEVSMOOTHED(22, -6) < DEVSMOOTHED(-11, 5)) << 5) + ((DEVSMOOTHED(2, -8) < DEVSMOOTHED(23, -11)) << 4) + ((DEVSMOOTHED(7, -10) < DEVSMOOTHED(-1, 14)) << 3) + ((DEVSMOOTHED(-3, -10) < DEVSMOOTHED(8, 3)) << 2) + ((DEVSMOOTHED(-13, 1) < DEVSMOOTHED(-6, 0)) << 1) + ((DEVSMOOTHED(-7, -21) < DEVSMOOTHED(6, -14)) << 0));
desc[24] = (uchar)(((DEVSMOOTHED(18, 19) < DEVSMOOTHED(-4, -6)) << 7) + ((DEVSMOOTHED(10, 7) < DEVSMOOTHED(-1, -4)) << 6) + ((DEVSMOOTHED(-1, 21) < DEVSMOOTHED(1, -5)) << 5) + ((DEVSMOOTHED(-10, 6) < DEVSMOOTHED(-11, -2)) << 4) + ((DEVSMOOTHED(18, -3) < DEVSMOOTHED(-1, 7)) << 3) + ((DEVSMOOTHED(-3, -9) < DEVSMOOTHED(-5, 10)) << 2) + ((DEVSMOOTHED(-13, 14) < DEVSMOOTHED(17, -3)) << 1) + ((DEVSMOOTHED(11, -19) < DEVSMOOTHED(-1, -18)) << 0));
desc[25] = (uchar)(((DEVSMOOTHED(8, -2) < DEVSMOOTHED(-18, -23)) << 7) + ((DEVSMOOTHED(0, -5) < DEVSMOOTHED(-2, -9)) << 6) + ((DEVSMOOTHED(-4, -11) < DEVSMOOTHED(2, -8)) << 5) + ((DEVSMOOTHED(14, 6) < DEVSMOOTHED(-3, -6)) << 4) + ((DEVSMOOTHED(-3, 0) < DEVSMOOTHED(-15, 0)) << 3) + ((DEVSMOOTHED(-9, 4) < DEVSMOOTHED(-15, -9)) << 2) + ((DEVSMOOTHED(-1, 11) < DEVSMOOTHED(3, 11)) << 1) + ((DEVSMOOTHED(-10, -16) < DEVSMOOTHED(-7, 7)) << 0));
desc[26] = (uchar)(((DEVSMOOTHED(-2, -10) < DEVSMOOTHED(-10, -2)) << 7) + ((DEVSMOOTHED(-5, -3) < DEVSMOOTHED(5, -23)) << 6) + ((DEVSMOOTHED(13, -8) < DEVSMOOTHED(-15, -11)) << 5) + ((DEVSMOOTHED(-15, 11) < DEVSMOOTHED(6, -6)) << 4) + ((DEVSMOOTHED(-16, -3) < DEVSMOOTHED(-2, 2)) << 3) + ((DEVSMOOTHED(6, 12) < DEVSMOOTHED(-16, 24)) << 2) + ((DEVSMOOTHED(-10, 0) < DEVSMOOTHED(8, 11)) << 1) + ((DEVSMOOTHED(-7, 7) < DEVSMOOTHED(-19, -7)) << 0));
desc[27] = (uchar)(((DEVSMOOTHED(5, 16) < DEVSMOOTHED(9, -3)) << 7) + ((DEVSMOOTHED(9, 7) < DEVSMOOTHED(-7, -16)) << 6) + ((DEVSMOOTHED(3, 2) < DEVSMOOTHED(-10, 9)) << 5) + ((DEVSMOOTHED(21, 1) < DEVSMOOTHED(8, 7)) << 4) + ((DEVSMOOTHED(7, 0) < DEVSMOOTHED(1, 17)) << 3) + ((DEVSMOOTHED(-8, 12) < DEVSMOOTHED(9, 6)) << 2) + ((DEVSMOOTHED(11, -7) < DEVSMOOTHED(-8, -6)) << 1) + ((DEVSMOOTHED(19, 0) < DEVSMOOTHED(9, 3)) << 0));
desc[28] = (uchar)(((DEVSMOOTHED(1, -7) < DEVSMOOTHED(-5, -11)) << 7) + ((DEVSMOOTHED(0, 8) < DEVSMOOTHED(-2, 14)) << 6) + ((DEVSMOOTHED(12, -2) < DEVSMOOTHED(-15, -6)) << 5) + ((DEVSMOOTHED(4, 12) < DEVSMOOTHED(0, -21)) << 4) + ((DEVSMOOTHED(17, -4) < DEVSMOOTHED(-6, -7)) << 3) + ((DEVSMOOTHED(-10, -9) < DEVSMOOTHED(-14, -7)) << 2) + ((DEVSMOOTHED(-15, -10) < DEVSMOOTHED(-15, -14)) << 1) + ((DEVSMOOTHED(-7, -5) < DEVSMOOTHED(5, -12)) << 0));
desc[29] = (uchar)(((DEVSMOOTHED(-4, 0) < DEVSMOOTHED(15, -4)) << 7) + ((DEVSMOOTHED(5, 2) < DEVSMOOTHED(-6, -23)) << 6) + ((DEVSMOOTHED(-4, -21) < DEVSMOOTHED(-6, 4)) << 5) + ((DEVSMOOTHED(-10, 5) < DEVSMOOTHED(-15, 6)) << 4) + ((DEVSMOOTHED(4, -3) < DEVSMOOTHED(-1, 5)) << 3) + ((DEVSMOOTHED(-4, 19) < DEVSMOOTHED(-23, -4)) << 2) + ((DEVSMOOTHED(-4, 17) < DEVSMOOTHED(13, -11)) << 1) + ((DEVSMOOTHED(1, 12) < DEVSMOOTHED(4, -14)) << 0));
desc[30] = (uchar)(((DEVSMOOTHED(-11, -6) < DEVSMOOTHED(-20, 10)) << 7) + ((DEVSMOOTHED(4, 5) < DEVSMOOTHED(3, 20)) << 6) + ((DEVSMOOTHED(-8, -20) < DEVSMOOTHED(3, 1)) << 5) + ((DEVSMOOTHED(-19, 9) < DEVSMOOTHED(9, -3)) << 4) + ((DEVSMOOTHED(18, 15) < DEVSMOOTHED(11, -4)) << 3) + ((DEVSMOOTHED(12, 16) < DEVSMOOTHED(8, 7)) << 2) + ((DEVSMOOTHED(-14, -8) < DEVSMOOTHED(-3, 9)) << 1) + ((DEVSMOOTHED(-6, 0) < DEVSMOOTHED(2, -4)) << 0));
desc[31] = (uchar)(((DEVSMOOTHED(1, -10) < DEVSMOOTHED(-1, 2)) << 7) + ((DEVSMOOTHED(8, -7) < DEVSMOOTHED(-6, 18)) << 6) + ((DEVSMOOTHED(9, 12) < DEVSMOOTHED(-7, -23)) << 5) + ((DEVSMOOTHED(8, -6) < DEVSMOOTHED(5, 2)) << 4) + ((DEVSMOOTHED(-9, 6) < DEVSMOOTHED(-12, -7)) << 3) + ((DEVSMOOTHED(-1, -2) < DEVSMOOTHED(-7, 2)) << 2) + ((DEVSMOOTHED(9, 9) < DEVSMOOTHED(7, 15)) << 1) + ((DEVSMOOTHED(6, 2) < DEVSMOOTHED(-6, 6)) << 0));
#undef DEVSMOOTHED
}
cudaError_t MyCudaUtils::ComputeBriefDescriptors(cv::Mat& image, std::vector<cv::KeyPoint>& kps, cv::Mat& desc, int descSize)
{
if (descSize != 32) {
std::cout << "Descriptor sizes other than 32 bytes currently not implemented" << std::endl;
std::cout << "Press q to exit the program or any other key to continue: ";
char c;
std::cin >> c;
if ('q' == c)
exit(EXIT_FAILURE);
}
// Convert to greyscale if required
cv::Mat grayImage = image;
if (image.type() != CV_8U) cv::cvtColor(image, grayImage, CV_BGR2GRAY);
// Compute the integral image for smoothing
cv::Mat intImage;
cv::integral(grayImage, intImage, CV_32S);
//Remove keypoints very close to the border
static const int PATCH_SIZE = 48; // Size of patch used to compute descriptors - 24 x 24 pixel window
static const int KERNEL_SIZE = 9; // Size of filtering Kernel used on each descriptor point to compare 9x9 pixel window
runByImageBorder(kps, image.size(), PATCH_SIZE / 2 + KERNEL_SIZE / 2); // We don't want our patch or kernel to overflow to the edge so offset by both of them
// Initialise list of descriptors to zero
desc = cv::Mat::zeros((int)kps.size(), descSize, CV_8U);
int knp = 2; // Number of params describing a keypoint
int imSize = intImage.rows * intImage.cols;
// Allocate memory
int* dev_intImage;
float* dev_kps;
unsigned char* dev_desc;
try {
CUDA_CHECK(cudaSetDevice(0)); // I dunno about this because it means you can't see the error type involved.
CUDA_CHECK(cudaMalloc((void**)&dev_intImage, imSize * sizeof(int)));
CUDA_CHECK(cudaMalloc((void**)&dev_kps, kps.size() * knp * sizeof(float)));
CUDA_CHECK(cudaMalloc((void**)&dev_desc, kps.size() * descSize * sizeof(unsigned char)));
// Copy the integral image and initialise descriptors to zero
CUDA_CHECK(cudaMemcpy(dev_intImage, intImage.data, imSize * sizeof(int), cudaMemcpyHostToDevice));
CUDA_CHECK(cudaMemcpy(dev_desc, desc.data, kps.size() * descSize * sizeof(unsigned char), cudaMemcpyHostToDevice));
// Copy the keypoints into an array and then onto the device
int num_kps = kps.size();
float* kpsArray = new float[num_kps * knp]; // SQUARE BRACKETS FOR DYNAMIC MEMORY
float* kpsXArray = kpsArray;
float* kpsYArray = kpsArray + num_kps;
for (int i = 0; i < num_kps; i++) {
kpsXArray[i] = kps[i].pt.x;
kpsYArray[i] = kps[i].pt.y;
}
CUDA_CHECK(cudaMemcpy(dev_kps, kpsArray, num_kps * knp * sizeof(float), cudaMemcpyHostToDevice));
delete[] kpsArray;
// Launch the Kernel
int blocks = cvCeil(num_kps / 128);
pixelTest32Kernel << < blocks, 128 >> >(dev_intImage, intImage.cols, dev_kps, num_kps, dev_desc);
CUDA_CHECK(cudaGetLastError());
CUDA_CHECK(cudaDeviceSynchronize());
// Download the output
CUDA_CHECK(cudaMemcpy(desc.data, dev_desc, kps.size() * descSize * sizeof(unsigned char), cudaMemcpyDeviceToHost));
throw (cudaSuccess);
}
catch (cudaError_t cudaStatus) {
cudaFree(dev_intImage);
cudaFree(dev_kps);
cudaFree(dev_desc);
return cudaStatus;
}
}
/**************************************************************************************
* SERIAL BRIEF
**************************************************************************************/
//// This computes the boxed filter smoothing of the image (which is a just an average
//// and so can be computed by addition and subtraction of appropriate corners in the
//// integral image)
//inline int smoothedSum(const cv::Mat& sum, const cv::KeyPoint& pt, int y, int x)
//{
// static const int KERNEL_SIZE = 9;
// static const int HALF_KERNEL = KERNEL_SIZE / 2;
//
// int img_y = (int)(pt.pt.y + 0.5) + y;
// int img_x = (int)(pt.pt.x + 0.5) + x; // Add 0.5 and cast to int automatically rounds up I believe assuming pt.pt.x is float (keypoints can have sub-pixel precision
// return sum.at<int>(img_y + HALF_KERNEL + 1, img_x + HALF_KERNEL + 1) // The four corners of the integral image
// - sum.at<int>(img_y + HALF_KERNEL + 1, img_x - HALF_KERNEL)
// - sum.at<int>(img_y - HALF_KERNEL, img_x + HALF_KERNEL + 1)
// + sum.at<int>(img_y - HALF_KERNEL, img_x - HALF_KERNEL);
//}
//// Computes the response with a 32byte descriptor of all the keypoints in the set
//// It iterates through all the keypoints, and then defines the SMOOTHED function
//// to be called with the given integral image and the current keypoint coordinates.
//static void pixelTests32(const cv::Mat& sum, const std::vector<cv::KeyPoint>& keypoints, cv::Mat& descriptors)
//{
// // So this would be nicely parallelisable, although I'm not sure how CUDA likes #defines, or whether we could even include the function in there.
// // Okay so we only need to mark smoothedSum as GPU function using the __device__ specifier to make sure that it can be accessed inside the Kernel
// // Anyway the point is we can have each thread doing one of these just fine since they are all identical - and all the other stuff is so much fluffing
// // around.
// for (int i = 0; i < (int)keypoints.size(); ++i)
// {
// uchar* desc = descriptors.ptr(i);
// const cv::KeyPoint& pt = keypoints[i];
//
//#define SMOOTHED(y,x) smoothedSum(sum, pt, y, x)
// // Eight binary comparisons contribute a single bit in the descriptor each byte, bit shifted into place in 1 of the 32 total bytes
// desc[0] = (uchar)(((SMOOTHED(-2, -1) < SMOOTHED(7, -1)) << 7) + ((SMOOTHED(-14, -1) < SMOOTHED(-3, 3)) << 6) + ((SMOOTHED(1, -2) < SMOOTHED(11, 2)) << 5) + ((SMOOTHED(1, 6) < SMOOTHED(-10, -7)) << 4) + ((SMOOTHED(13, 2) < SMOOTHED(-1, 0)) << 3) + ((SMOOTHED(-14, 5) < SMOOTHED(5, -3)) << 2) + ((SMOOTHED(-2, 8) < SMOOTHED(2, 4)) << 1) + ((SMOOTHED(-11, 8) < SMOOTHED(-15, 5)) << 0));
// desc[1] = (uchar)(((SMOOTHED(-6, -23) < SMOOTHED(8, -9)) << 7) + ((SMOOTHED(-12, 6) < SMOOTHED(-10, 8)) << 6) + ((SMOOTHED(-3, -1) < SMOOTHED(8, 1)) << 5) + ((SMOOTHED(3, 6) < SMOOTHED(5, 6)) << 4) + ((SMOOTHED(-7, -6) < SMOOTHED(5, -5)) << 3) + ((SMOOTHED(22, -2) < SMOOTHED(-11, -8)) << 2) + ((SMOOTHED(14, 7) < SMOOTHED(8, 5)) << 1) + ((SMOOTHED(-1, 14) < SMOOTHED(-5, -14)) << 0));
// desc[2] = (uchar)(((SMOOTHED(-14, 9) < SMOOTHED(2, 0)) << 7) + ((SMOOTHED(7, -3) < SMOOTHED(22, 6)) << 6) + ((SMOOTHED(-6, 6) < SMOOTHED(-8, -5)) << 5) + ((SMOOTHED(-5, 9) < SMOOTHED(7, -1)) << 4) + ((SMOOTHED(-3, -7) < SMOOTHED(-10, -18)) << 3) + ((SMOOTHED(4, -5) < SMOOTHED(0, 11)) << 2) + ((SMOOTHED(2, 3) < SMOOTHED(9, 10)) << 1) + ((SMOOTHED(-10, 3) < SMOOTHED(4, 9)) << 0));
// desc[3] = (uchar)(((SMOOTHED(0, 12) < SMOOTHED(-3, 19)) << 7) + ((SMOOTHED(1, 15) < SMOOTHED(-11, -5)) << 6) + ((SMOOTHED(14, -1) < SMOOTHED(7, 8)) << 5) + ((SMOOTHED(7, -23) < SMOOTHED(-5, 5)) << 4) + ((SMOOTHED(0, -6) < SMOOTHED(-10, 17)) << 3) + ((SMOOTHED(13, -4) < SMOOTHED(-3, -4)) << 2) + ((SMOOTHED(-12, 1) < SMOOTHED(-12, 2)) << 1) + ((SMOOTHED(0, 8) < SMOOTHED(3, 22)) << 0));
// desc[4] = (uchar)(((SMOOTHED(-13, 13) < SMOOTHED(3, -1)) << 7) + ((SMOOTHED(-16, 17) < SMOOTHED(6, 10)) << 6) + ((SMOOTHED(7, 15) < SMOOTHED(-5, 0)) << 5) + ((SMOOTHED(2, -12) < SMOOTHED(19, -2)) << 4) + ((SMOOTHED(3, -6) < SMOOTHED(-4, -15)) << 3) + ((SMOOTHED(8, 3) < SMOOTHED(0, 14)) << 2) + ((SMOOTHED(4, -11) < SMOOTHED(5, 5)) << 1) + ((SMOOTHED(11, -7) < SMOOTHED(7, 1)) << 0));
// desc[5] = (uchar)(((SMOOTHED(6, 12) < SMOOTHED(21, 3)) << 7) + ((SMOOTHED(-3, 2) < SMOOTHED(14, 1)) << 6) + ((SMOOTHED(5, 1) < SMOOTHED(-5, 11)) << 5) + ((SMOOTHED(3, -17) < SMOOTHED(-6, 2)) << 4) + ((SMOOTHED(6, 8) < SMOOTHED(5, -10)) << 3) + ((SMOOTHED(-14, -2) < SMOOTHED(0, 4)) << 2) + ((SMOOTHED(5, -7) < SMOOTHED(-6, 5)) << 1) + ((SMOOTHED(10, 4) < SMOOTHED(4, -7)) << 0));
// desc[6] = (uchar)(((SMOOTHED(22, 0) < SMOOTHED(7, -18)) << 7) + ((SMOOTHED(-1, -3) < SMOOTHED(0, 18)) << 6) + ((SMOOTHED(-4, 22) < SMOOTHED(-5, 3)) << 5) + ((SMOOTHED(1, -7) < SMOOTHED(2, -3)) << 4) + ((SMOOTHED(19, -20) < SMOOTHED(17, -2)) << 3) + ((SMOOTHED(3, -10) < SMOOTHED(-8, 24)) << 2) + ((SMOOTHED(-5, -14) < SMOOTHED(7, 5)) << 1) + ((SMOOTHED(-2, 12) < SMOOTHED(-4, -15)) << 0));
// desc[7] = (uchar)(((SMOOTHED(4, 12) < SMOOTHED(0, -19)) << 7) + ((SMOOTHED(20, 13) < SMOOTHED(3, 5)) << 6) + ((SMOOTHED(-8, -12) < SMOOTHED(5, 0)) << 5) + ((SMOOTHED(-5, 6) < SMOOTHED(-7, -11)) << 4) + ((SMOOTHED(6, -11) < SMOOTHED(-3, -22)) << 3) + ((SMOOTHED(15, 4) < SMOOTHED(10, 1)) << 2) + ((SMOOTHED(-7, -4) < SMOOTHED(15, -6)) << 1) + ((SMOOTHED(5, 10) < SMOOTHED(0, 24)) << 0));
// desc[8] = (uchar)(((SMOOTHED(3, 6) < SMOOTHED(22, -2)) << 7) + ((SMOOTHED(-13, 14) < SMOOTHED(4, -4)) << 6) + ((SMOOTHED(-13, 8) < SMOOTHED(-18, -22)) << 5) + ((SMOOTHED(-1, -1) < SMOOTHED(-7, 3)) << 4) + ((SMOOTHED(-19, -12) < SMOOTHED(4, 3)) << 3) + ((SMOOTHED(8, 10) < SMOOTHED(13, -2)) << 2) + ((SMOOTHED(-6, -1) < SMOOTHED(-6, -5)) << 1) + ((SMOOTHED(2, -21) < SMOOTHED(-3, 2)) << 0));
// desc[9] = (uchar)(((SMOOTHED(4, -7) < SMOOTHED(0, 16)) << 7) + ((SMOOTHED(-6, -5) < SMOOTHED(-12, -1)) << 6) + ((SMOOTHED(1, -1) < SMOOTHED(9, 18)) << 5) + ((SMOOTHED(-7, 10) < SMOOTHED(-11, 6)) << 4) + ((SMOOTHED(4, 3) < SMOOTHED(19, -7)) << 3) + ((SMOOTHED(-18, 5) < SMOOTHED(-4, 5)) << 2) + ((SMOOTHED(4, 0) < SMOOTHED(-20, 4)) << 1) + ((SMOOTHED(7, -11) < SMOOTHED(18, 12)) << 0));
// desc[10] = (uchar)(((SMOOTHED(-20, 17) < SMOOTHED(-18, 7)) << 7) + ((SMOOTHED(2, 15) < SMOOTHED(19, -11)) << 6) + ((SMOOTHED(-18, 6) < SMOOTHED(-7, 3)) << 5) + ((SMOOTHED(-4, 1) < SMOOTHED(-14, 13)) << 4) + ((SMOOTHED(17, 3) < SMOOTHED(2, -8)) << 3) + ((SMOOTHED(-7, 2) < SMOOTHED(1, 6)) << 2) + ((SMOOTHED(17, -9) < SMOOTHED(-2, 8)) << 1) + ((SMOOTHED(-8, -6) < SMOOTHED(-1, 12)) << 0));
// desc[11] = (uchar)(((SMOOTHED(-2, 4) < SMOOTHED(-1, 6)) << 7) + ((SMOOTHED(-2, 7) < SMOOTHED(6, 8)) << 6) + ((SMOOTHED(-8, -1) < SMOOTHED(-7, -9)) << 5) + ((SMOOTHED(8, -9) < SMOOTHED(15, 0)) << 4) + ((SMOOTHED(0, 22) < SMOOTHED(-4, -15)) << 3) + ((SMOOTHED(-14, -1) < SMOOTHED(3, -2)) << 2) + ((SMOOTHED(-7, -4) < SMOOTHED(17, -7)) << 1) + ((SMOOTHED(-8, -2) < SMOOTHED(9, -4)) << 0));
// desc[12] = (uchar)(((SMOOTHED(5, -7) < SMOOTHED(7, 7)) << 7) + ((SMOOTHED(-5, 13) < SMOOTHED(-8, 11)) << 6) + ((SMOOTHED(11, -4) < SMOOTHED(0, 8)) << 5) + ((SMOOTHED(5, -11) < SMOOTHED(-9, -6)) << 4) + ((SMOOTHED(2, -6) < SMOOTHED(3, -20)) << 3) + ((SMOOTHED(-6, 2) < SMOOTHED(6, 10)) << 2) + ((SMOOTHED(-6, -6) < SMOOTHED(-15, 7)) << 1) + ((SMOOTHED(-6, -3) < SMOOTHED(2, 1)) << 0));
// desc[13] = (uchar)(((SMOOTHED(11, 0) < SMOOTHED(-3, 2)) << 7) + ((SMOOTHED(7, -12) < SMOOTHED(14, 5)) << 6) + ((SMOOTHED(0, -7) < SMOOTHED(-1, -1)) << 5) + ((SMOOTHED(-16, 0) < SMOOTHED(6, 8)) << 4) + ((SMOOTHED(22, 11) < SMOOTHED(0, -3)) << 3) + ((SMOOTHED(19, 0) < SMOOTHED(5, -17)) << 2) + ((SMOOTHED(-23, -14) < SMOOTHED(-13, -19)) << 1) + ((SMOOTHED(-8, 10) < SMOOTHED(-11, -2)) << 0));
// desc[14] = (uchar)(((SMOOTHED(-11, 6) < SMOOTHED(-10, 13)) << 7) + ((SMOOTHED(1, -7) < SMOOTHED(14, 0)) << 6) + ((SMOOTHED(-12, 1) < SMOOTHED(-5, -5)) << 5) + ((SMOOTHED(4, 7) < SMOOTHED(8, -1)) << 4) + ((SMOOTHED(-1, -5) < SMOOTHED(15, 2)) << 3) + ((SMOOTHED(-3, -1) < SMOOTHED(7, -10)) << 2) + ((SMOOTHED(3, -6) < SMOOTHED(10, -18)) << 1) + ((SMOOTHED(-7, -13) < SMOOTHED(-13, 10)) << 0));
// desc[15] = (uchar)(((SMOOTHED(1, -1) < SMOOTHED(13, -10)) << 7) + ((SMOOTHED(-19, 14) < SMOOTHED(8, -14)) << 6) + ((SMOOTHED(-4, -13) < SMOOTHED(7, 1)) << 5) + ((SMOOTHED(1, -2) < SMOOTHED(12, -7)) << 4) + ((SMOOTHED(3, -5) < SMOOTHED(1, -5)) << 3) + ((SMOOTHED(-2, -2) < SMOOTHED(8, -10)) << 2) + ((SMOOTHED(2, 14) < SMOOTHED(8, 7)) << 1) + ((SMOOTHED(3, 9) < SMOOTHED(8, 2)) << 0));
// desc[16] = (uchar)(((SMOOTHED(-9, 1) < SMOOTHED(-18, 0)) << 7) + ((SMOOTHED(4, 0) < SMOOTHED(1, 12)) << 6) + ((SMOOTHED(0, 9) < SMOOTHED(-14, -10)) << 5) + ((SMOOTHED(-13, -9) < SMOOTHED(-2, 6)) << 4) + ((SMOOTHED(1, 5) < SMOOTHED(10, 10)) << 3) + ((SMOOTHED(-3, -6) < SMOOTHED(-16, -5)) << 2) + ((SMOOTHED(11, 6) < SMOOTHED(-5, 0)) << 1) + ((SMOOTHED(-23, 10) < SMOOTHED(1, 2)) << 0));
// desc[17] = (uchar)(((SMOOTHED(13, -5) < SMOOTHED(-3, 9)) << 7) + ((SMOOTHED(-4, -1) < SMOOTHED(-13, -5)) << 6) + ((SMOOTHED(10, 13) < SMOOTHED(-11, 8)) << 5) + ((SMOOTHED(19, 20) < SMOOTHED(-9, 2)) << 4) + ((SMOOTHED(4, -8) < SMOOTHED(0, -9)) << 3) + ((SMOOTHED(-14, 10) < SMOOTHED(15, 19)) << 2) + ((SMOOTHED(-14, -12) < SMOOTHED(-10, -3)) << 1) + ((SMOOTHED(-23, -3) < SMOOTHED(17, -2)) << 0));
// desc[18] = (uchar)(((SMOOTHED(-3, -11) < SMOOTHED(6, -14)) << 7) + ((SMOOTHED(19, -2) < SMOOTHED(-4, 2)) << 6) + ((SMOOTHED(-5, 5) < SMOOTHED(3, -13)) << 5) + ((SMOOTHED(2, -2) < SMOOTHED(-5, 4)) << 4) + ((SMOOTHED(17, 4) < SMOOTHED(17, -11)) << 3) + ((SMOOTHED(-7, -2) < SMOOTHED(1, 23)) << 2) + ((SMOOTHED(8, 13) < SMOOTHED(1, -16)) << 1) + ((SMOOTHED(-13, -5) < SMOOTHED(1, -17)) << 0));
// desc[19] = (uchar)(((SMOOTHED(4, 6) < SMOOTHED(-8, -3)) << 7) + ((SMOOTHED(-5, -9) < SMOOTHED(-2, -10)) << 6) + ((SMOOTHED(-9, 0) < SMOOTHED(-7, -2)) << 5) + ((SMOOTHED(5, 0) < SMOOTHED(5, 2)) << 4) + ((SMOOTHED(-4, -16) < SMOOTHED(6, 3)) << 3) + ((SMOOTHED(2, -15) < SMOOTHED(-2, 12)) << 2) + ((SMOOTHED(4, -1) < SMOOTHED(6, 2)) << 1) + ((SMOOTHED(1, 1) < SMOOTHED(-2, -8)) << 0));
// desc[20] = (uchar)(((SMOOTHED(-2, 12) < SMOOTHED(-5, -2)) << 7) + ((SMOOTHED(-8, 8) < SMOOTHED(-9, 9)) << 6) + ((SMOOTHED(2, -10) < SMOOTHED(3, 1)) << 5) + ((SMOOTHED(-4, 10) < SMOOTHED(-9, 4)) << 4) + ((SMOOTHED(6, 12) < SMOOTHED(2, 5)) << 3) + ((SMOOTHED(-3, -8) < SMOOTHED(0, 5)) << 2) + ((SMOOTHED(-13, 1) < SMOOTHED(-7, 2)) << 1) + ((SMOOTHED(-1, -10) < SMOOTHED(7, -18)) << 0));
// desc[21] = (uchar)(((SMOOTHED(-1, 8) < SMOOTHED(-9, -10)) << 7) + ((SMOOTHED(-23, -1) < SMOOTHED(6, 2)) << 6) + ((SMOOTHED(-5, -3) < SMOOTHED(3, 2)) << 5) + ((SMOOTHED(0, 11) < SMOOTHED(-4, -7)) << 4) + ((SMOOTHED(15, 2) < SMOOTHED(-10, -3)) << 3) + ((SMOOTHED(-20, -8) < SMOOTHED(-13, 3)) << 2) + ((SMOOTHED(-19, -12) < SMOOTHED(5, -11)) << 1) + ((SMOOTHED(-17, -13) < SMOOTHED(-3, 2)) << 0));
// desc[22] = (uchar)(((SMOOTHED(7, 4) < SMOOTHED(-12, 0)) << 7) + ((SMOOTHED(5, -1) < SMOOTHED(-14, -6)) << 6) + ((SMOOTHED(-4, 11) < SMOOTHED(0, -4)) << 5) + ((SMOOTHED(3, 10) < SMOOTHED(7, -3)) << 4) + ((SMOOTHED(13, 21) < SMOOTHED(-11, 6)) << 3) + ((SMOOTHED(-12, 24) < SMOOTHED(-7, -4)) << 2) + ((SMOOTHED(4, 16) < SMOOTHED(3, -14)) << 1) + ((SMOOTHED(-3, 5) < SMOOTHED(-7, -12)) << 0));
// desc[23] = (uchar)(((SMOOTHED(0, -4) < SMOOTHED(7, -5)) << 7) + ((SMOOTHED(-17, -9) < SMOOTHED(13, -7)) << 6) + ((SMOOTHED(22, -6) < SMOOTHED(-11, 5)) << 5) + ((SMOOTHED(2, -8) < SMOOTHED(23, -11)) << 4) + ((SMOOTHED(7, -10) < SMOOTHED(-1, 14)) << 3) + ((SMOOTHED(-3, -10) < SMOOTHED(8, 3)) << 2) + ((SMOOTHED(-13, 1) < SMOOTHED(-6, 0)) << 1) + ((SMOOTHED(-7, -21) < SMOOTHED(6, -14)) << 0));
// desc[24] = (uchar)(((SMOOTHED(18, 19) < SMOOTHED(-4, -6)) << 7) + ((SMOOTHED(10, 7) < SMOOTHED(-1, -4)) << 6) + ((SMOOTHED(-1, 21) < SMOOTHED(1, -5)) << 5) + ((SMOOTHED(-10, 6) < SMOOTHED(-11, -2)) << 4) + ((SMOOTHED(18, -3) < SMOOTHED(-1, 7)) << 3) + ((SMOOTHED(-3, -9) < SMOOTHED(-5, 10)) << 2) + ((SMOOTHED(-13, 14) < SMOOTHED(17, -3)) << 1) + ((SMOOTHED(11, -19) < SMOOTHED(-1, -18)) << 0));
// desc[25] = (uchar)(((SMOOTHED(8, -2) < SMOOTHED(-18, -23)) << 7) + ((SMOOTHED(0, -5) < SMOOTHED(-2, -9)) << 6) + ((SMOOTHED(-4, -11) < SMOOTHED(2, -8)) << 5) + ((SMOOTHED(14, 6) < SMOOTHED(-3, -6)) << 4) + ((SMOOTHED(-3, 0) < SMOOTHED(-15, 0)) << 3) + ((SMOOTHED(-9, 4) < SMOOTHED(-15, -9)) << 2) + ((SMOOTHED(-1, 11) < SMOOTHED(3, 11)) << 1) + ((SMOOTHED(-10, -16) < SMOOTHED(-7, 7)) << 0));
// desc[26] = (uchar)(((SMOOTHED(-2, -10) < SMOOTHED(-10, -2)) << 7) + ((SMOOTHED(-5, -3) < SMOOTHED(5, -23)) << 6) + ((SMOOTHED(13, -8) < SMOOTHED(-15, -11)) << 5) + ((SMOOTHED(-15, 11) < SMOOTHED(6, -6)) << 4) + ((SMOOTHED(-16, -3) < SMOOTHED(-2, 2)) << 3) + ((SMOOTHED(6, 12) < SMOOTHED(-16, 24)) << 2) + ((SMOOTHED(-10, 0) < SMOOTHED(8, 11)) << 1) + ((SMOOTHED(-7, 7) < SMOOTHED(-19, -7)) << 0));
// desc[27] = (uchar)(((SMOOTHED(5, 16) < SMOOTHED(9, -3)) << 7) + ((SMOOTHED(9, 7) < SMOOTHED(-7, -16)) << 6) + ((SMOOTHED(3, 2) < SMOOTHED(-10, 9)) << 5) + ((SMOOTHED(21, 1) < SMOOTHED(8, 7)) << 4) + ((SMOOTHED(7, 0) < SMOOTHED(1, 17)) << 3) + ((SMOOTHED(-8, 12) < SMOOTHED(9, 6)) << 2) + ((SMOOTHED(11, -7) < SMOOTHED(-8, -6)) << 1) + ((SMOOTHED(19, 0) < SMOOTHED(9, 3)) << 0));
// desc[28] = (uchar)(((SMOOTHED(1, -7) < SMOOTHED(-5, -11)) << 7) + ((SMOOTHED(0, 8) < SMOOTHED(-2, 14)) << 6) + ((SMOOTHED(12, -2) < SMOOTHED(-15, -6)) << 5) + ((SMOOTHED(4, 12) < SMOOTHED(0, -21)) << 4) + ((SMOOTHED(17, -4) < SMOOTHED(-6, -7)) << 3) + ((SMOOTHED(-10, -9) < SMOOTHED(-14, -7)) << 2) + ((SMOOTHED(-15, -10) < SMOOTHED(-15, -14)) << 1) + ((SMOOTHED(-7, -5) < SMOOTHED(5, -12)) << 0));
// desc[29] = (uchar)(((SMOOTHED(-4, 0) < SMOOTHED(15, -4)) << 7) + ((SMOOTHED(5, 2) < SMOOTHED(-6, -23)) << 6) + ((SMOOTHED(-4, -21) < SMOOTHED(-6, 4)) << 5) + ((SMOOTHED(-10, 5) < SMOOTHED(-15, 6)) << 4) + ((SMOOTHED(4, -3) < SMOOTHED(-1, 5)) << 3) + ((SMOOTHED(-4, 19) < SMOOTHED(-23, -4)) << 2) + ((SMOOTHED(-4, 17) < SMOOTHED(13, -11)) << 1) + ((SMOOTHED(1, 12) < SMOOTHED(4, -14)) << 0));
// desc[30] = (uchar)(((SMOOTHED(-11, -6) < SMOOTHED(-20, 10)) << 7) + ((SMOOTHED(4, 5) < SMOOTHED(3, 20)) << 6) + ((SMOOTHED(-8, -20) < SMOOTHED(3, 1)) << 5) + ((SMOOTHED(-19, 9) < SMOOTHED(9, -3)) << 4) + ((SMOOTHED(18, 15) < SMOOTHED(11, -4)) << 3) + ((SMOOTHED(12, 16) < SMOOTHED(8, 7)) << 2) + ((SMOOTHED(-14, -8) < SMOOTHED(-3, 9)) << 1) + ((SMOOTHED(-6, 0) < SMOOTHED(2, -4)) << 0));
// desc[31] = (uchar)(((SMOOTHED(1, -10) < SMOOTHED(-1, 2)) << 7) + ((SMOOTHED(8, -7) < SMOOTHED(-6, 18)) << 6) + ((SMOOTHED(9, 12) < SMOOTHED(-7, -23)) << 5) + ((SMOOTHED(8, -6) < SMOOTHED(5, 2)) << 4) + ((SMOOTHED(-9, 6) < SMOOTHED(-12, -7)) << 3) + ((SMOOTHED(-1, -2) < SMOOTHED(-7, 2)) << 2) + ((SMOOTHED(9, 9) < SMOOTHED(7, 15)) << 1) + ((SMOOTHED(6, 2) < SMOOTHED(-6, 6)) << 0));
//#undef SMOOTHED
// }
//}
|
UpperBM.hip | // !!! This is a file automatically generated by hipify!!!
// A program to generate a 1-dimensional NumPy array that stores the users desired number of Brownian Paths, all generated in parallel on crisprs 3 GPUs.
// Also generates an array indicating the time at which each path crosses the upper threhsold supplied by the user. See documentation.
#include <hip/hip_runtime.h>
#include <hiprand/hiprand_kernel.h>
#include <stdio.h>
#include "../book.h"
#include <vector>
int *crossTimes = nullptr;
int *failCross = nullptr;
//Function to generate brownian paths, which are stored in results. Executes on the GPU, hence the __global__ identifier
__global__ void randomWalk(double *results, int *crossTimes, int T, int N, int numSims, double upperThreshold, double deviceID) {
// a variable to keep track of this simulation's position in the crossTimes array
int crossTimeIndex = threadIdx.x + blockIdx.x * blockDim.x;
if (crossTimeIndex < numSims) {
// create random number generator
hiprandState_t state;
hiprand_init (blockIdx.x * (1000 * deviceID) + threadIdx.x + clock64(), 0, 0, &state);
double random;
// starting position of this siulation in results array
int start = (threadIdx.x + blockIdx.x * blockDim.x) * N;
// set default value of cross time for this simulation to 0, since the simulation hasn't crossed the threshold yet
crossTimes[crossTimeIndex] = 0;
// starting point of path is 0
results[start] = 0.0;
// boolean to keep track of whether this path has crossed
bool crossed = false;
for (int j = start + 1; j < start + N; j++) {
// generate random number
random = hiprand_normal_double(&state);
//calculate next step of path
results[j] = results[j-1] + random * sqrt((double) T / N);
// store crossing time as positive value if it has crossed the upper threshold. Negative value if crossed the lower threshold
if (!crossed && results[j] >= upperThreshold) {
crossTimes[crossTimeIndex] = j - start;
crossed = true;
}
}
}
}
// data structure to hold information for each GPU
struct DataStruct {
int deviceID; // id of gpu
int sims; // number of simulations to be executed on this gpu
double *resultArray; // array to store brownian paths calculated on this gpu
int *crossArray; // array to store cross times calculates on this gpu
int N; // number of simulations on this gpu
double T; // parameter for brownian path equation
double upperThreshold;
};
// function to execute on each individual GPU
void* routine(void *voidData) {
DataStruct *data = (DataStruct*)voidData;
hipSetDevice(data->deviceID);
int sims = data->sims;
// allocate arrays on host to store results, as well as temporary arrays on gpu for our global function
double *dev_results;
double *partialResults = (double*)malloc(sims * data->N * sizeof(double));
int *dev_crossTimes;
int *partialCrossTimes = (int*)malloc(sims * sizeof(int));
hipMalloc(&dev_results, data->N * sims * sizeof(double));
hipMalloc(&dev_crossTimes, sims * sizeof(int));
// calculate number of blocks and threads for global function
int numBlocks = (511 + sims) / sims;
int numThreads = 512;
// call GPU function
hipLaunchKernelGGL(( randomWalk), dim3(numBlocks), dim3(numThreads), 0, 0, dev_results, dev_crossTimes, data->T, data->N, sims, data->upperThreshold, data->deviceID);
// transfer data on gpu to host
hipMemcpy(partialResults, dev_results , data->N * sims * sizeof(double), hipMemcpyDeviceToHost);
hipMemcpy(partialCrossTimes, dev_crossTimes , sims * sizeof(int), hipMemcpyDeviceToHost);
data->resultArray = partialResults;
data->crossArray = partialCrossTimes;
// free gpu memory
hipFree(dev_results);
hipFree(dev_crossTimes);
return 0;
}
// host function to generate the results and crossTimes arrays, and then return the results array
// defined in 3gpubm.h in order to import into cython code (see GenerateNumPy3.pyx)
extern "C++" double *makePath(double T, int N, int numSims, double upperThreshold) {
// fill a data structure of each of crispr's 3 gpu's
DataStruct data[3];
data[0].deviceID = 0;
data[0].sims = numSims / 3;
data[0].N = N;
data[0].T = T;
data[0].upperThreshold = upperThreshold;
data[1].deviceID = 0;
data[1].sims = numSims / 3;
data[1].N = N;
data[1].T = T;
data[1].upperThreshold = upperThreshold;
data[2].deviceID = 0;
data[2].sims = numSims / 3 + numSims % 3;
data[2].N = N;
data[2].T = T;
data[2].upperThreshold = upperThreshold;
// start a separate thread for each gpu
CUTThread thread = start_thread(routine, &(data[0]));
CUTThread thread2 = start_thread(routine, &(data[1]));
routine(&(data[2]));
end_thread(thread);
end_thread(thread2);
double *results = new double[N * numSims]; // the main array to store the path for each simulations, with an index for each point along the path
crossTimes = new int[numSims]; // the array to store the cross time for each simulation
// get output of each gpu and concatenate the arrays
double *arr1 = data[0].resultArray;
int size1 = data[0].sims * N;
double *arr2 = data[1].resultArray;
int size2 = data[1].sims * N;
double *arr3 = data[2].resultArray;
int size3 = data[2].sims * N;
std::copy(arr1, arr1 + size1, results);
std::copy(arr2, arr2 + size2, results + size1);
std::copy(arr3, arr3 + size3, results + size1 + size2);
int *carr1 = data[0].crossArray;
size1 = data[0].sims;
int *carr2 = data[1].crossArray;
size2 = data[1].sims;
int *carr3 = data[2].crossArray;
size3 = data[2].sims;
std::copy(carr1, carr1 + size1, crossTimes);
std::copy(carr2, carr2 + size2, crossTimes + size1);
std::copy(carr3, carr3 + size3, crossTimes + size1 + size2);
return results;
}
// return crossTies array
// defined in 3gpubm.h in order to import into cython code (see GenerateNumPy3.pyx)
extern "C++" int *getCrossTimes() {
return crossTimes;
}
| UpperBM.cu | // A program to generate a 1-dimensional NumPy array that stores the user’s desired number of Brownian Paths, all generated in parallel on crispr’s 3 GPU’s.
// Also generates an array indicating the time at which each path crosses the upper threhsold supplied by the user. See documentation.
#include <cuda.h>
#include <curand_kernel.h>
#include <stdio.h>
#include "../book.h"
#include <vector>
int *crossTimes = nullptr;
int *failCross = nullptr;
//Function to generate brownian paths, which are stored in results. Executes on the GPU, hence the __global__ identifier
__global__ void randomWalk(double *results, int *crossTimes, int T, int N, int numSims, double upperThreshold, double deviceID) {
// a variable to keep track of this simulation's position in the crossTimes array
int crossTimeIndex = threadIdx.x + blockIdx.x * blockDim.x;
if (crossTimeIndex < numSims) {
// create random number generator
curandState_t state;
curand_init (blockIdx.x * (1000 * deviceID) + threadIdx.x + clock64(), 0, 0, &state);
double random;
// starting position of this siulation in results array
int start = (threadIdx.x + blockIdx.x * blockDim.x) * N;
// set default value of cross time for this simulation to 0, since the simulation hasn't crossed the threshold yet
crossTimes[crossTimeIndex] = 0;
// starting point of path is 0
results[start] = 0.0;
// boolean to keep track of whether this path has crossed
bool crossed = false;
for (int j = start + 1; j < start + N; j++) {
// generate random number
random = curand_normal_double(&state);
//calculate next step of path
results[j] = results[j-1] + random * sqrt((double) T / N);
// store crossing time as positive value if it has crossed the upper threshold. Negative value if crossed the lower threshold
if (!crossed && results[j] >= upperThreshold) {
crossTimes[crossTimeIndex] = j - start;
crossed = true;
}
}
}
}
// data structure to hold information for each GPU
struct DataStruct {
int deviceID; // id of gpu
int sims; // number of simulations to be executed on this gpu
double *resultArray; // array to store brownian paths calculated on this gpu
int *crossArray; // array to store cross times calculates on this gpu
int N; // number of simulations on this gpu
double T; // parameter for brownian path equation
double upperThreshold;
};
// function to execute on each individual GPU
void* routine(void *voidData) {
DataStruct *data = (DataStruct*)voidData;
cudaSetDevice(data->deviceID);
int sims = data->sims;
// allocate arrays on host to store results, as well as temporary arrays on gpu for our global function
double *dev_results;
double *partialResults = (double*)malloc(sims * data->N * sizeof(double));
int *dev_crossTimes;
int *partialCrossTimes = (int*)malloc(sims * sizeof(int));
cudaMalloc(&dev_results, data->N * sims * sizeof(double));
cudaMalloc(&dev_crossTimes, sims * sizeof(int));
// calculate number of blocks and threads for global function
int numBlocks = (511 + sims) / sims;
int numThreads = 512;
// call GPU function
randomWalk<<<numBlocks, numThreads>>>(dev_results, dev_crossTimes, data->T, data->N, sims, data->upperThreshold, data->deviceID);
// transfer data on gpu to host
cudaMemcpy(partialResults, dev_results , data->N * sims * sizeof(double), cudaMemcpyDeviceToHost);
cudaMemcpy(partialCrossTimes, dev_crossTimes , sims * sizeof(int), cudaMemcpyDeviceToHost);
data->resultArray = partialResults;
data->crossArray = partialCrossTimes;
// free gpu memory
cudaFree(dev_results);
cudaFree(dev_crossTimes);
return 0;
}
// host function to generate the results and crossTimes arrays, and then return the results array
// defined in 3gpubm.h in order to import into cython code (see GenerateNumPy3.pyx)
extern "C++" double *makePath(double T, int N, int numSims, double upperThreshold) {
// fill a data structure of each of crispr's 3 gpu's
DataStruct data[3];
data[0].deviceID = 0;
data[0].sims = numSims / 3;
data[0].N = N;
data[0].T = T;
data[0].upperThreshold = upperThreshold;
data[1].deviceID = 0;
data[1].sims = numSims / 3;
data[1].N = N;
data[1].T = T;
data[1].upperThreshold = upperThreshold;
data[2].deviceID = 0;
data[2].sims = numSims / 3 + numSims % 3;
data[2].N = N;
data[2].T = T;
data[2].upperThreshold = upperThreshold;
// start a separate thread for each gpu
CUTThread thread = start_thread(routine, &(data[0]));
CUTThread thread2 = start_thread(routine, &(data[1]));
routine(&(data[2]));
end_thread(thread);
end_thread(thread2);
double *results = new double[N * numSims]; // the main array to store the path for each simulations, with an index for each point along the path
crossTimes = new int[numSims]; // the array to store the cross time for each simulation
// get output of each gpu and concatenate the arrays
double *arr1 = data[0].resultArray;
int size1 = data[0].sims * N;
double *arr2 = data[1].resultArray;
int size2 = data[1].sims * N;
double *arr3 = data[2].resultArray;
int size3 = data[2].sims * N;
std::copy(arr1, arr1 + size1, results);
std::copy(arr2, arr2 + size2, results + size1);
std::copy(arr3, arr3 + size3, results + size1 + size2);
int *carr1 = data[0].crossArray;
size1 = data[0].sims;
int *carr2 = data[1].crossArray;
size2 = data[1].sims;
int *carr3 = data[2].crossArray;
size3 = data[2].sims;
std::copy(carr1, carr1 + size1, crossTimes);
std::copy(carr2, carr2 + size2, crossTimes + size1);
std::copy(carr3, carr3 + size3, crossTimes + size1 + size2);
return results;
}
// return crossTies array
// defined in 3gpubm.h in order to import into cython code (see GenerateNumPy3.pyx)
extern "C++" int *getCrossTimes() {
return crossTimes;
}
|
fb8662b397a2bedd7b0e4cff2454267f9b639490.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* http://majuric.org/software/cudamd5/ */
// CUDA MD5 hash calculation implementation (A: [email protected]).
//
// A very useful link: http://people.eku.edu/styere/Encrypt/JS-MD5.html
//
#define RSA_KERNEL md5_v2
#include <stdio.h>
#include "cutil.h"
#include <cutil_inline.h>
// 0 1 2 3 4 5 6
// 01234567890123456789012345678901234567890123456789012345678901
//#define MD5POOL "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"
//#define MD5POOL "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
#define MD5POOL "abcdefghijklmnopqrstuvwxyz0123456789"
#define ORIGINAL_WORD_LENGTH 6
#if ORIGINAL_WORD_LENGTH > 16*4
#error "Word length too long for CUDA implementation"
#endif
#define count_t ullong
typedef unsigned int uint;
typedef unsigned long long ullong;
//
// On-device variable declarations
//
extern __shared__ uint memory[]; // on-chip shared memory
__constant__ uint k[64], rconst[16]; // constants (in fast on-chip constant cache)
__constant__ uint steps[ORIGINAL_WORD_LENGTH]; // calculation helper to convert a number to a word using the MD5POOL
//
// MD5 routines (straight from Wikipedia's MD5 pseudocode description)
//
__device__ inline uint leftrotate (uint x, uint c)
{
return (x << c) | (x >> (32-c));
}
__device__ inline uint r(const uint i)
{
return rconst[(i / 16) * 4 + i % 4];
}
// Accessor for w[16] array. Naively, this would just be w[i]; however, this
// choice leads to worst-case-scenario access pattern wrt. shared memory
// bank conflicts, as the same indices in different threads fall into the
// same bank (as the words are 16 uints long). The packing below causes the
// same indices in different threads of a warp to map to different banks. In
// testing this gave a ~40% speedup.
//
// PS: An alternative solution would be to make the w array 17 uints long
// (thus wasting a little shared memory)
//
__device__ inline uint &getw(uint *w, const int i)
{
return w[(i+threadIdx.x) % 16];
}
__device__ inline uint getw(const uint *w, const int i) // const- version
{
return w[(i+threadIdx.x) % 16];
}
__device__ inline uint getk(const int i)
{
return k[i]; // Note: this is as fast as possible (measured)
}
__device__ void step(const uint i, const uint f, const uint g, uint &a, uint &b, uint &c, uint &d, const uint *w)
{
uint temp = d;
d = c;
c = b;
b = b + leftrotate((a + f + getk(i) + getw(w, g)), r(i));
a = temp;
}
__device__ void inline md5(const uint *w, uint &a, uint &b, uint &c, uint &d)
{
const uint a0 = 0x67452301;
const uint b0 = 0xEFCDAB89;
const uint c0 = 0x98BADCFE;
const uint d0 = 0x10325476;
//Initialize hash value for this chunk:
a = a0;
b = b0;
c = c0;
d = d0;
uint f, g, i = 0;
for(; i != 16; i++)
{
f = (b & c) | ((~b) & d);
g = i;
step(i, f, g, a, b, c, d, w);
}
for(; i != 32; i++)
{
f = (d & b) | ((~d) & c);
g = (5*i + 1) % 16;
step(i, f, g, a, b, c, d, w);
}
for(; i != 48; i++)
{
f = b ^ c ^ d;
g = (3*i + 5) % 16;
step(i, f, g, a, b, c, d, w);
}
for(; i != 64; i++)
{
f = c ^ (b | (~d));
g = (7*i) % 16;
step(i, f, g, a, b, c, d, w);
}
a += a0;
b += b0;
c += c0;
d += d0;
}
/*
* prepare a 56-byte (maximum) wide md5 message by appending the 64-bit length
* it will be padded with 0 and will contain the messaged 'packed' into an uint array
*
* NOTE: This function will fail badly if it is called with a number >= steps[0] * NUM_OF_CHARS
*
* word is assumed to be a w[16] array and is thus accessed via getw()
*/
__device__ void number2paddedword (count_t number, uint *word)
{
int srciter=0;
int dstiter=0;
char md5pool[sizeof(MD5POOL)] = MD5POOL;
char curChar;
int shiftoffset = 0; /* current offset to shift the next char into the uint */
uint nextArrayUint = 0;
/*
* Special case: Length of words is 0 or 1
* These cases can be determined at compile time and can therefore
* be optimized away by the compiler
*/
if (ORIGINAL_WORD_LENGTH < 1)
return;
/* loop through the source word */
for (srciter = 0; srciter < ORIGINAL_WORD_LENGTH; ++srciter) {
/* Decide if we have to encode a specific char or just md5pool[0] */
if (number >= steps[srciter] || srciter == ORIGINAL_WORD_LENGTH-1) {
uint temp = (uint)((count_t)number / (count_t)steps[srciter]);
curChar = md5pool[temp];
number -= (count_t)((count_t)temp * (count_t)steps[srciter]);
} else
curChar = md5pool[0];
/* Encode current char for the destination word */
nextArrayUint |= (curChar << shiftoffset);
shiftoffset += 8;
/* if we have packed 4 chars in the uint we have to write it to word */
if (shiftoffset > 24) {
getw(word, dstiter++) = nextArrayUint;
shiftoffset = 0;
nextArrayUint = 0;
}
}
/* Append a single 1 bit after the message as needed by md5 */
/* When arriving here shiftoffset is <= 24, so we can safely append one more char and encode it */
nextArrayUint |= (0x80 << shiftoffset);
getw(word, dstiter++) = nextArrayUint;
/* zero the words padding */
for (; dstiter < 16; ++dstiter)
getw(word, dstiter) = (uint)0;
__syncthreads();
/* write the message length in bits */
getw(word, 14) = ORIGINAL_WORD_LENGTH * 8;
}
//////////////////////////////////////////////////////////////////////////////
///////////// Ron Rivest's MD5 C Implementation //////////////////
//////////////////////////////////////////////////////////////////////////////
/*
**********************************************************************
** Copyright (C) 1990, RSA Data Security, Inc. All rights reserved. **
** **
** License to copy and use this software is granted provided that **
** it is identified as the "RSA Data Security, Inc. MD5 Message **
** Digest Algorithm" in all material mentioning or referencing this **
** software or this function. **
** **
** License is also granted to make and use derivative works **
** provided that such works are identified as "derived from the RSA **
** Data Security, Inc. MD5 Message Digest Algorithm" in all **
** material mentioning or referencing the derived work. **
** **
** RSA Data Security, Inc. makes no representations concerning **
** either the merchantability of this software or the suitability **
** of this software for any particular purpose. It is provided "as **
** is" without express or implied warranty of any kind. **
** **
** These notices must be retained in any copies of any part of this **
** documentation and/or software. **
**********************************************************************
*/
/* F, G and H are basic MD5 functions: selection, majority, parity */
#define F(x, y, z) (((x) & (y)) | ((~x) & (z)))
#define G(x, y, z) (((x) & (z)) | ((y) & (~z)))
#define H(x, y, z) ((x) ^ (y) ^ (z))
#define I(x, y, z) ((y) ^ ((x) | (~z)))
/* ROTATE_LEFT rotates x left n bits */
#define ROTATE_LEFT(x, n) (((x) << (n)) | ((x) >> (32-(n))))
/* FF, GG, HH, and II transformations for rounds 1, 2, 3, and 4 */
/* Rotation is separate from addition to prevent recomputation */
#define FF(a, b, c, d, x, s, ac) \
{(a) += F ((b), (c), (d)) + (x) + (uint)(ac); \
(a) = ROTATE_LEFT ((a), (s)); \
(a) += (b); \
}
#define GG(a, b, c, d, x, s, ac) \
{(a) += G ((b), (c), (d)) + (x) + (uint)(ac); \
(a) = ROTATE_LEFT ((a), (s)); \
(a) += (b); \
}
#define HH(a, b, c, d, x, s, ac) \
{(a) += H ((b), (c), (d)) + (x) + (uint)(ac); \
(a) = ROTATE_LEFT ((a), (s)); \
(a) += (b); \
}
#define II(a, b, c, d, x, s, ac) \
{(a) += I ((b), (c), (d)) + (x) + (uint)(ac); \
(a) = ROTATE_LEFT ((a), (s)); \
(a) += (b); \
}
/* Basic MD5 step. Transform buf based on in.
*/
void inline __device__ md5_v2(const uint *in, uint &a, uint &b, uint &c, uint &d)
{
const uint a0 = 0x67452301;
const uint b0 = 0xEFCDAB89;
const uint c0 = 0x98BADCFE;
const uint d0 = 0x10325476;
//Initialize hash value for this chunk:
a = a0;
b = b0;
c = c0;
d = d0;
/* Round 1 */
#define S11 7
#define S12 12
#define S13 17
#define S14 22
FF ( a, b, c, d, getw(in, 0), S11, 3614090360); /* 1 */
FF ( d, a, b, c, getw(in, 1), S12, 3905402710); /* 2 */
FF ( c, d, a, b, getw(in, 2), S13, 606105819); /* 3 */
FF ( b, c, d, a, getw(in, 3), S14, 3250441966); /* 4 */
FF ( a, b, c, d, getw(in, 4), S11, 4118548399); /* 5 */
FF ( d, a, b, c, getw(in, 5), S12, 1200080426); /* 6 */
FF ( c, d, a, b, getw(in, 6), S13, 2821735955); /* 7 */
FF ( b, c, d, a, getw(in, 7), S14, 4249261313); /* 8 */
FF ( a, b, c, d, getw(in, 8), S11, 1770035416); /* 9 */
FF ( d, a, b, c, getw(in, 9), S12, 2336552879); /* 10 */
FF ( c, d, a, b, getw(in, 10), S13, 4294925233); /* 11 */
FF ( b, c, d, a, getw(in, 11), S14, 2304563134); /* 12 */
FF ( a, b, c, d, getw(in, 12), S11, 1804603682); /* 13 */
FF ( d, a, b, c, getw(in, 13), S12, 4254626195); /* 14 */
FF ( c, d, a, b, getw(in, 14), S13, 2792965006); /* 15 */
FF ( b, c, d, a, getw(in, 15), S14, 1236535329); /* 16 */
/* Round 2 */
#define S21 5
#define S22 9
#define S23 14
#define S24 20
GG ( a, b, c, d, getw(in, 1), S21, 4129170786); /* 17 */
GG ( d, a, b, c, getw(in, 6), S22, 3225465664); /* 18 */
GG ( c, d, a, b, getw(in, 11), S23, 643717713); /* 19 */
GG ( b, c, d, a, getw(in, 0), S24, 3921069994); /* 20 */
GG ( a, b, c, d, getw(in, 5), S21, 3593408605); /* 21 */
GG ( d, a, b, c, getw(in, 10), S22, 38016083); /* 22 */
GG ( c, d, a, b, getw(in, 15), S23, 3634488961); /* 23 */
GG ( b, c, d, a, getw(in, 4), S24, 3889429448); /* 24 */
GG ( a, b, c, d, getw(in, 9), S21, 568446438); /* 25 */
GG ( d, a, b, c, getw(in, 14), S22, 3275163606); /* 26 */
GG ( c, d, a, b, getw(in, 3), S23, 4107603335); /* 27 */
GG ( b, c, d, a, getw(in, 8), S24, 1163531501); /* 28 */
GG ( a, b, c, d, getw(in, 13), S21, 2850285829); /* 29 */
GG ( d, a, b, c, getw(in, 2), S22, 4243563512); /* 30 */
GG ( c, d, a, b, getw(in, 7), S23, 1735328473); /* 31 */
GG ( b, c, d, a, getw(in, 12), S24, 2368359562); /* 32 */
/* Round 3 */
#define S31 4
#define S32 11
#define S33 16
#define S34 23
HH ( a, b, c, d, getw(in, 5), S31, 4294588738); /* 33 */
HH ( d, a, b, c, getw(in, 8), S32, 2272392833); /* 34 */
HH ( c, d, a, b, getw(in, 11), S33, 1839030562); /* 35 */
HH ( b, c, d, a, getw(in, 14), S34, 4259657740); /* 36 */
HH ( a, b, c, d, getw(in, 1), S31, 2763975236); /* 37 */
HH ( d, a, b, c, getw(in, 4), S32, 1272893353); /* 38 */
HH ( c, d, a, b, getw(in, 7), S33, 4139469664); /* 39 */
HH ( b, c, d, a, getw(in, 10), S34, 3200236656); /* 40 */
HH ( a, b, c, d, getw(in, 13), S31, 681279174); /* 41 */
HH ( d, a, b, c, getw(in, 0), S32, 3936430074); /* 42 */
HH ( c, d, a, b, getw(in, 3), S33, 3572445317); /* 43 */
HH ( b, c, d, a, getw(in, 6), S34, 76029189); /* 44 */
HH ( a, b, c, d, getw(in, 9), S31, 3654602809); /* 45 */
HH ( d, a, b, c, getw(in, 12), S32, 3873151461); /* 46 */
HH ( c, d, a, b, getw(in, 15), S33, 530742520); /* 47 */
HH ( b, c, d, a, getw(in, 2), S34, 3299628645); /* 48 */
/* Round 4 */
#define S41 6
#define S42 10
#define S43 15
#define S44 21
II ( a, b, c, d, getw(in, 0), S41, 4096336452); /* 49 */
II ( d, a, b, c, getw(in, 7), S42, 1126891415); /* 50 */
II ( c, d, a, b, getw(in, 14), S43, 2878612391); /* 51 */
II ( b, c, d, a, getw(in, 5), S44, 4237533241); /* 52 */
II ( a, b, c, d, getw(in, 12), S41, 1700485571); /* 53 */
II ( d, a, b, c, getw(in, 3), S42, 2399980690); /* 54 */
II ( c, d, a, b, getw(in, 10), S43, 4293915773); /* 55 */
II ( b, c, d, a, getw(in, 1), S44, 2240044497); /* 56 */
II ( a, b, c, d, getw(in, 8), S41, 1873313359); /* 57 */
II ( d, a, b, c, getw(in, 15), S42, 4264355552); /* 58 */
II ( c, d, a, b, getw(in, 6), S43, 2734768916); /* 59 */
II ( b, c, d, a, getw(in, 13), S44, 1309151649); /* 60 */
II ( a, b, c, d, getw(in, 4), S41, 4149444226); /* 61 */
II ( d, a, b, c, getw(in, 11), S42, 3174756917); /* 62 */
II ( c, d, a, b, getw(in, 2), S43, 718787259); /* 63 */
II ( b, c, d, a, getw(in, 9), S44, 3951481745); /* 64 */
a += a0;
b += b0;
c += c0;
d += d0;
}
//////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////
// The kernel (this is the entrypoint of GPU code)
// Calculates the 64-byte word from MD5POOL to be hashed in shared memory,
// calls the calculation routine, compares to target and flags if a match is found
extern "C"
__global__ void md5_search(ullong starting_number, uint words_per_call, uint iterations, ullong max_number, uint *succ, uint *target)
{
count_t linidx = (count_t)(gridDim.x*blockIdx.y + blockIdx.x)*blockDim.x + threadIdx.x; // assuming blockDim.y = 1 and threadIdx.y = 0, always
if(linidx >= words_per_call) { return; }
linidx += (count_t)starting_number;
/* get the shared memory region for our calculations */
uint *word = &memory[0] + threadIdx.x*16;
for (int i=0 ; i < iterations && linidx < max_number; ++i) {
// calculate the dictionary word for this thread
number2paddedword(linidx, word);
// compute MD5 hash
uint a, b, c, d;
RSA_KERNEL(word, a, b, c, d);
if(a == target[0] && b == target[1] && c == target[2] && d == target[3])
{
count_t *temp = (count_t *) &succ[0];
*temp = linidx;
succ[3] = 1;
break;
}
__syncthreads();
if (succ[3] != 0)
break;
linidx += (count_t)words_per_call;
}
/*
succ[0] = target[0];
succ[1] = target[1];
succ[2] = target[2];
succ[3] = target[3];
*/
}
| fb8662b397a2bedd7b0e4cff2454267f9b639490.cu | /* http://majuric.org/software/cudamd5/ */
// CUDA MD5 hash calculation implementation (A: [email protected]).
//
// A very useful link: http://people.eku.edu/styere/Encrypt/JS-MD5.html
//
#define RSA_KERNEL md5_v2
#include <stdio.h>
#include "cutil.h"
#include <cutil_inline.h>
// 0 1 2 3 4 5 6
// 01234567890123456789012345678901234567890123456789012345678901
//#define MD5POOL "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"
//#define MD5POOL "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
#define MD5POOL "abcdefghijklmnopqrstuvwxyz0123456789"
#define ORIGINAL_WORD_LENGTH 6
#if ORIGINAL_WORD_LENGTH > 16*4
#error "Word length too long for CUDA implementation"
#endif
#define count_t ullong
typedef unsigned int uint;
typedef unsigned long long ullong;
//
// On-device variable declarations
//
extern __shared__ uint memory[]; // on-chip shared memory
__constant__ uint k[64], rconst[16]; // constants (in fast on-chip constant cache)
__constant__ uint steps[ORIGINAL_WORD_LENGTH]; // calculation helper to convert a number to a word using the MD5POOL
//
// MD5 routines (straight from Wikipedia's MD5 pseudocode description)
//
__device__ inline uint leftrotate (uint x, uint c)
{
return (x << c) | (x >> (32-c));
}
__device__ inline uint r(const uint i)
{
return rconst[(i / 16) * 4 + i % 4];
}
// Accessor for w[16] array. Naively, this would just be w[i]; however, this
// choice leads to worst-case-scenario access pattern wrt. shared memory
// bank conflicts, as the same indices in different threads fall into the
// same bank (as the words are 16 uints long). The packing below causes the
// same indices in different threads of a warp to map to different banks. In
// testing this gave a ~40% speedup.
//
// PS: An alternative solution would be to make the w array 17 uints long
// (thus wasting a little shared memory)
//
__device__ inline uint &getw(uint *w, const int i)
{
return w[(i+threadIdx.x) % 16];
}
__device__ inline uint getw(const uint *w, const int i) // const- version
{
return w[(i+threadIdx.x) % 16];
}
__device__ inline uint getk(const int i)
{
return k[i]; // Note: this is as fast as possible (measured)
}
__device__ void step(const uint i, const uint f, const uint g, uint &a, uint &b, uint &c, uint &d, const uint *w)
{
uint temp = d;
d = c;
c = b;
b = b + leftrotate((a + f + getk(i) + getw(w, g)), r(i));
a = temp;
}
__device__ void inline md5(const uint *w, uint &a, uint &b, uint &c, uint &d)
{
const uint a0 = 0x67452301;
const uint b0 = 0xEFCDAB89;
const uint c0 = 0x98BADCFE;
const uint d0 = 0x10325476;
//Initialize hash value for this chunk:
a = a0;
b = b0;
c = c0;
d = d0;
uint f, g, i = 0;
for(; i != 16; i++)
{
f = (b & c) | ((~b) & d);
g = i;
step(i, f, g, a, b, c, d, w);
}
for(; i != 32; i++)
{
f = (d & b) | ((~d) & c);
g = (5*i + 1) % 16;
step(i, f, g, a, b, c, d, w);
}
for(; i != 48; i++)
{
f = b ^ c ^ d;
g = (3*i + 5) % 16;
step(i, f, g, a, b, c, d, w);
}
for(; i != 64; i++)
{
f = c ^ (b | (~d));
g = (7*i) % 16;
step(i, f, g, a, b, c, d, w);
}
a += a0;
b += b0;
c += c0;
d += d0;
}
/*
* prepare a 56-byte (maximum) wide md5 message by appending the 64-bit length
* it will be padded with 0 and will contain the messaged 'packed' into an uint array
*
* NOTE: This function will fail badly if it is called with a number >= steps[0] * NUM_OF_CHARS
*
* word is assumed to be a w[16] array and is thus accessed via getw()
*/
__device__ void number2paddedword (count_t number, uint *word)
{
int srciter=0;
int dstiter=0;
char md5pool[sizeof(MD5POOL)] = MD5POOL;
char curChar;
int shiftoffset = 0; /* current offset to shift the next char into the uint */
uint nextArrayUint = 0;
/*
* Special case: Length of words is 0 or 1
* These cases can be determined at compile time and can therefore
* be optimized away by the compiler
*/
if (ORIGINAL_WORD_LENGTH < 1)
return;
/* loop through the source word */
for (srciter = 0; srciter < ORIGINAL_WORD_LENGTH; ++srciter) {
/* Decide if we have to encode a specific char or just md5pool[0] */
if (number >= steps[srciter] || srciter == ORIGINAL_WORD_LENGTH-1) {
uint temp = (uint)((count_t)number / (count_t)steps[srciter]);
curChar = md5pool[temp];
number -= (count_t)((count_t)temp * (count_t)steps[srciter]);
} else
curChar = md5pool[0];
/* Encode current char for the destination word */
nextArrayUint |= (curChar << shiftoffset);
shiftoffset += 8;
/* if we have packed 4 chars in the uint we have to write it to word */
if (shiftoffset > 24) {
getw(word, dstiter++) = nextArrayUint;
shiftoffset = 0;
nextArrayUint = 0;
}
}
/* Append a single 1 bit after the message as needed by md5 */
/* When arriving here shiftoffset is <= 24, so we can safely append one more char and encode it */
nextArrayUint |= (0x80 << shiftoffset);
getw(word, dstiter++) = nextArrayUint;
/* zero the words padding */
for (; dstiter < 16; ++dstiter)
getw(word, dstiter) = (uint)0;
__syncthreads();
/* write the message length in bits */
getw(word, 14) = ORIGINAL_WORD_LENGTH * 8;
}
//////////////////////////////////////////////////////////////////////////////
///////////// Ron Rivest's MD5 C Implementation //////////////////
//////////////////////////////////////////////////////////////////////////////
/*
**********************************************************************
** Copyright (C) 1990, RSA Data Security, Inc. All rights reserved. **
** **
** License to copy and use this software is granted provided that **
** it is identified as the "RSA Data Security, Inc. MD5 Message **
** Digest Algorithm" in all material mentioning or referencing this **
** software or this function. **
** **
** License is also granted to make and use derivative works **
** provided that such works are identified as "derived from the RSA **
** Data Security, Inc. MD5 Message Digest Algorithm" in all **
** material mentioning or referencing the derived work. **
** **
** RSA Data Security, Inc. makes no representations concerning **
** either the merchantability of this software or the suitability **
** of this software for any particular purpose. It is provided "as **
** is" without express or implied warranty of any kind. **
** **
** These notices must be retained in any copies of any part of this **
** documentation and/or software. **
**********************************************************************
*/
/* F, G and H are basic MD5 functions: selection, majority, parity */
#define F(x, y, z) (((x) & (y)) | ((~x) & (z)))
#define G(x, y, z) (((x) & (z)) | ((y) & (~z)))
#define H(x, y, z) ((x) ^ (y) ^ (z))
#define I(x, y, z) ((y) ^ ((x) | (~z)))
/* ROTATE_LEFT rotates x left n bits */
#define ROTATE_LEFT(x, n) (((x) << (n)) | ((x) >> (32-(n))))
/* FF, GG, HH, and II transformations for rounds 1, 2, 3, and 4 */
/* Rotation is separate from addition to prevent recomputation */
#define FF(a, b, c, d, x, s, ac) \
{(a) += F ((b), (c), (d)) + (x) + (uint)(ac); \
(a) = ROTATE_LEFT ((a), (s)); \
(a) += (b); \
}
#define GG(a, b, c, d, x, s, ac) \
{(a) += G ((b), (c), (d)) + (x) + (uint)(ac); \
(a) = ROTATE_LEFT ((a), (s)); \
(a) += (b); \
}
#define HH(a, b, c, d, x, s, ac) \
{(a) += H ((b), (c), (d)) + (x) + (uint)(ac); \
(a) = ROTATE_LEFT ((a), (s)); \
(a) += (b); \
}
#define II(a, b, c, d, x, s, ac) \
{(a) += I ((b), (c), (d)) + (x) + (uint)(ac); \
(a) = ROTATE_LEFT ((a), (s)); \
(a) += (b); \
}
/* Basic MD5 step. Transform buf based on in.
*/
void inline __device__ md5_v2(const uint *in, uint &a, uint &b, uint &c, uint &d)
{
const uint a0 = 0x67452301;
const uint b0 = 0xEFCDAB89;
const uint c0 = 0x98BADCFE;
const uint d0 = 0x10325476;
//Initialize hash value for this chunk:
a = a0;
b = b0;
c = c0;
d = d0;
/* Round 1 */
#define S11 7
#define S12 12
#define S13 17
#define S14 22
FF ( a, b, c, d, getw(in, 0), S11, 3614090360); /* 1 */
FF ( d, a, b, c, getw(in, 1), S12, 3905402710); /* 2 */
FF ( c, d, a, b, getw(in, 2), S13, 606105819); /* 3 */
FF ( b, c, d, a, getw(in, 3), S14, 3250441966); /* 4 */
FF ( a, b, c, d, getw(in, 4), S11, 4118548399); /* 5 */
FF ( d, a, b, c, getw(in, 5), S12, 1200080426); /* 6 */
FF ( c, d, a, b, getw(in, 6), S13, 2821735955); /* 7 */
FF ( b, c, d, a, getw(in, 7), S14, 4249261313); /* 8 */
FF ( a, b, c, d, getw(in, 8), S11, 1770035416); /* 9 */
FF ( d, a, b, c, getw(in, 9), S12, 2336552879); /* 10 */
FF ( c, d, a, b, getw(in, 10), S13, 4294925233); /* 11 */
FF ( b, c, d, a, getw(in, 11), S14, 2304563134); /* 12 */
FF ( a, b, c, d, getw(in, 12), S11, 1804603682); /* 13 */
FF ( d, a, b, c, getw(in, 13), S12, 4254626195); /* 14 */
FF ( c, d, a, b, getw(in, 14), S13, 2792965006); /* 15 */
FF ( b, c, d, a, getw(in, 15), S14, 1236535329); /* 16 */
/* Round 2 */
#define S21 5
#define S22 9
#define S23 14
#define S24 20
GG ( a, b, c, d, getw(in, 1), S21, 4129170786); /* 17 */
GG ( d, a, b, c, getw(in, 6), S22, 3225465664); /* 18 */
GG ( c, d, a, b, getw(in, 11), S23, 643717713); /* 19 */
GG ( b, c, d, a, getw(in, 0), S24, 3921069994); /* 20 */
GG ( a, b, c, d, getw(in, 5), S21, 3593408605); /* 21 */
GG ( d, a, b, c, getw(in, 10), S22, 38016083); /* 22 */
GG ( c, d, a, b, getw(in, 15), S23, 3634488961); /* 23 */
GG ( b, c, d, a, getw(in, 4), S24, 3889429448); /* 24 */
GG ( a, b, c, d, getw(in, 9), S21, 568446438); /* 25 */
GG ( d, a, b, c, getw(in, 14), S22, 3275163606); /* 26 */
GG ( c, d, a, b, getw(in, 3), S23, 4107603335); /* 27 */
GG ( b, c, d, a, getw(in, 8), S24, 1163531501); /* 28 */
GG ( a, b, c, d, getw(in, 13), S21, 2850285829); /* 29 */
GG ( d, a, b, c, getw(in, 2), S22, 4243563512); /* 30 */
GG ( c, d, a, b, getw(in, 7), S23, 1735328473); /* 31 */
GG ( b, c, d, a, getw(in, 12), S24, 2368359562); /* 32 */
/* Round 3 */
#define S31 4
#define S32 11
#define S33 16
#define S34 23
HH ( a, b, c, d, getw(in, 5), S31, 4294588738); /* 33 */
HH ( d, a, b, c, getw(in, 8), S32, 2272392833); /* 34 */
HH ( c, d, a, b, getw(in, 11), S33, 1839030562); /* 35 */
HH ( b, c, d, a, getw(in, 14), S34, 4259657740); /* 36 */
HH ( a, b, c, d, getw(in, 1), S31, 2763975236); /* 37 */
HH ( d, a, b, c, getw(in, 4), S32, 1272893353); /* 38 */
HH ( c, d, a, b, getw(in, 7), S33, 4139469664); /* 39 */
HH ( b, c, d, a, getw(in, 10), S34, 3200236656); /* 40 */
HH ( a, b, c, d, getw(in, 13), S31, 681279174); /* 41 */
HH ( d, a, b, c, getw(in, 0), S32, 3936430074); /* 42 */
HH ( c, d, a, b, getw(in, 3), S33, 3572445317); /* 43 */
HH ( b, c, d, a, getw(in, 6), S34, 76029189); /* 44 */
HH ( a, b, c, d, getw(in, 9), S31, 3654602809); /* 45 */
HH ( d, a, b, c, getw(in, 12), S32, 3873151461); /* 46 */
HH ( c, d, a, b, getw(in, 15), S33, 530742520); /* 47 */
HH ( b, c, d, a, getw(in, 2), S34, 3299628645); /* 48 */
/* Round 4 */
#define S41 6
#define S42 10
#define S43 15
#define S44 21
II ( a, b, c, d, getw(in, 0), S41, 4096336452); /* 49 */
II ( d, a, b, c, getw(in, 7), S42, 1126891415); /* 50 */
II ( c, d, a, b, getw(in, 14), S43, 2878612391); /* 51 */
II ( b, c, d, a, getw(in, 5), S44, 4237533241); /* 52 */
II ( a, b, c, d, getw(in, 12), S41, 1700485571); /* 53 */
II ( d, a, b, c, getw(in, 3), S42, 2399980690); /* 54 */
II ( c, d, a, b, getw(in, 10), S43, 4293915773); /* 55 */
II ( b, c, d, a, getw(in, 1), S44, 2240044497); /* 56 */
II ( a, b, c, d, getw(in, 8), S41, 1873313359); /* 57 */
II ( d, a, b, c, getw(in, 15), S42, 4264355552); /* 58 */
II ( c, d, a, b, getw(in, 6), S43, 2734768916); /* 59 */
II ( b, c, d, a, getw(in, 13), S44, 1309151649); /* 60 */
II ( a, b, c, d, getw(in, 4), S41, 4149444226); /* 61 */
II ( d, a, b, c, getw(in, 11), S42, 3174756917); /* 62 */
II ( c, d, a, b, getw(in, 2), S43, 718787259); /* 63 */
II ( b, c, d, a, getw(in, 9), S44, 3951481745); /* 64 */
a += a0;
b += b0;
c += c0;
d += d0;
}
//////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////
// The kernel (this is the entrypoint of GPU code)
// Calculates the 64-byte word from MD5POOL to be hashed in shared memory,
// calls the calculation routine, compares to target and flags if a match is found
extern "C"
__global__ void md5_search(ullong starting_number, uint words_per_call, uint iterations, ullong max_number, uint *succ, uint *target)
{
count_t linidx = (count_t)(gridDim.x*blockIdx.y + blockIdx.x)*blockDim.x + threadIdx.x; // assuming blockDim.y = 1 and threadIdx.y = 0, always
if(linidx >= words_per_call) { return; }
linidx += (count_t)starting_number;
/* get the shared memory region for our calculations */
uint *word = &memory[0] + threadIdx.x*16;
for (int i=0 ; i < iterations && linidx < max_number; ++i) {
// calculate the dictionary word for this thread
number2paddedword(linidx, word);
// compute MD5 hash
uint a, b, c, d;
RSA_KERNEL(word, a, b, c, d);
if(a == target[0] && b == target[1] && c == target[2] && d == target[3])
{
count_t *temp = (count_t *) &succ[0];
*temp = linidx;
succ[3] = 1;
break;
}
__syncthreads();
if (succ[3] != 0)
break;
linidx += (count_t)words_per_call;
}
/*
succ[0] = target[0];
succ[1] = target[1];
succ[2] = target[2];
succ[3] = target[3];
*/
}
|
0fab0c066083ea0602ea479f541291f5b2d41143.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "saber/funcs/impl/cuda/saber_deformable_conv.h"
#include "hip/hip_fp16.h"
#include "saber/core/tensor_op.h"
namespace anakin {
namespace saber {
__device__ float deformable_im2col_bilinear(const float* bottom_data, const int data_width,
const int height, const int width, float h, float w) {
int h_low = floor(h);
int w_low = floor(w);
int h_high;
int w_high;
if (h_low >= height - 1) {
h_high = h_low = height - 1;
h = (float) h_low;
} else {
h_high = h_low + 1;
}
if (w_low >= width - 1) {
w_high = w_low = width - 1;
w = (float) w_low;
} else {
w_high = w_low + 1;
}
float lh = h - h_low;
float lw = w - w_low;
float hh = 1 - lh, hw = 1 - lw;
float v1 = bottom_data[h_low * data_width + w_low];
float v2 = bottom_data[h_low * data_width + w_high];
float v3 = bottom_data[h_high * data_width + w_low];
float v4 = bottom_data[h_high * data_width + w_high];
float w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw;
float val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
return val;
}
__global__ void deformable_im2col_gpu_kernel(const int n, const float* data_im,
const float* data_offset, const int height, const int width,
const int kernel_h, const int kernel_w, const int pad_h,
const int pad_w, const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int channel_per_deformable_group, const int height_col,
const int width_col, float* data_col) {
CUDA_KERNEL_LOOP(index, n) {
const int w_col = index % width_col;
const int h_col = (index / width_col) % height_col;
const int c_im = (index / width_col) / height_col;
const int c_col = c_im * kernel_h * kernel_w;
// compute deformable group index
// THIS IS THE TRUE CHANNEL
const int deformable_group_index = c_im / channel_per_deformable_group;
//input map coord(h_in, w_in)
const int h_in = h_col * stride_h - pad_h;
const int w_in = w_col * stride_w - pad_w;
//data_col (data & offset)
float* data_col_ptr = data_col
+ (c_col * height_col + h_col) * width_col + w_col;
const float* data_im_ptr = data_im + (c_im * height + h_in) * width
+ w_in;
const float* data_offset_ptr = data_offset
+ deformable_group_index * 2 * kernel_h * kernel_w * height_col
* width_col;
for (int i = 0; i < kernel_h; ++i) {
for (int j = 0; j < kernel_w; ++j) {
//offset_h and offset_w in the same channel
const int data_offset_h_ptr = ((2 * (i * kernel_w + j))
* height_col + h_col) * width_col + w_col;
const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1)
* height_col + h_col) * width_col + w_col;
const float offset_h = data_offset_ptr[data_offset_h_ptr];
const float offset_w = data_offset_ptr[data_offset_w_ptr];
float val = 0.f;
const float h_im = h_in + i * dilation_h + offset_h;
const float w_im = w_in + j * dilation_w + offset_w;
if (h_im >= 0 && w_im >= 0 && h_im < height && w_im < width) {
const float map_h = i * dilation_h + offset_h;
const float map_w = j * dilation_w + offset_w;
// cur_height (from h_in to height)
const int cur_height = height - h_in;
const int cur_width = width - w_in;
val = deformable_im2col_bilinear(data_im_ptr, width,
cur_height, cur_width, map_h, map_w);
}
*data_col_ptr = val;
data_col_ptr += height_col * width_col;
}
}
}
}
__global__ void gpu_add_bias(float * out_data, const int count,
int in_n, int in_c, int in_h, int in_w,
int in_n_stride, int in_c_stride,
int in_h_stride, int in_w_stride,
const float *bias) {
CUDA_KERNEL_LOOP(tid, count){
int read_w = tid % in_w;
int read_h = (tid / (in_w)) % in_h;
int read_c = (tid / (in_h * in_w)) % in_c;
int read_n = (tid / (in_c * in_h * in_w)) % in_n;
int in_idx = read_n * in_n_stride
+ read_c * in_c_stride
+ read_h * in_h_stride
+ read_w * in_w_stride;
float in_var = out_data[in_idx];
float in_bias = bias[read_c];
out_data[in_idx] = in_var + in_bias;
}
}
template <>
SaberStatus SaberDeformableConv2D<NV, AK_FLOAT, AK_FLOAT, AK_FLOAT,\
NCHW, NCHW, NCHW>::dispatch(const std::vector<DataTensor_in *>& inputs,
std::vector<DataTensor_out *>& outputs,
DeformableConvParam<OpTensor>& param) {
int in_channel = inputs[0]->channel();
int conv_out_channel = outputs[0]->channel();
const OpDataType* weight = (const float*)param.weight()->data();
const InDataType* data = inputs[0]->data();
const InDataType* offset = inputs[1]->data();
InDataType* top_data = outputs[0]->mutable_data();
InDataType* deformable_col_buffer_data = _deform_col_buffer.mutable_data();
const InDataType* deform_col_buffer_data_const = _deform_col_buffer.data();
hipStream_t cuda_stream = this->_ctx->get_compute_stream();
for (int n = 0; n < inputs[0]->num(); ++n) {
// transform image to col_buffer in order to use gemm
int channel_per_group = in_channel / param.group;
int num_kernels = in_channel * _deform_col_buffer.height() * _deform_col_buffer.width();
hipLaunchKernelGGL(( deformable_im2col_gpu_kernel)
, dim3(CUDA_GET_BLOCKS(num_kernels)), dim3(CUDA_NUM_THREADS), 0, cuda_stream,
num_kernels, data + n * _bottom_dim, offset + n * _offset_dim,
inputs[0]->height(), inputs[0]->width(),
param.weight()->height(), param.weight()->width(),
param.pad_h, param.pad_w, param.stride_h, param.stride_w,
param.dilation_h, param.dilation_w,
channel_per_group, _deform_col_buffer.height(),
_deform_col_buffer.width(),
deformable_col_buffer_data);
for (int g = 0; g < param.group; ++g) {
float alpha = 1.f;
float beta = 0.f;
CUBLAS_CHECK(hipblasSgemm(_handle, HIPBLAS_OP_N, HIPBLAS_OP_N,
_conv_out_spatial_dim,
conv_out_channel / param.group,
_kernel_dim / param.group,
&alpha,
deform_col_buffer_data_const + _col_offset * g,
_conv_out_spatial_dim,
weight + _kernel_offset * g,
_kernel_dim / param.group,
&beta,
top_data + _output_offset * g,
_conv_out_spatial_dim));
}
if (param.bias()->size() > 0) {
Shape out_shape = outputs[0]->valid_shape();
Shape out_stride = outputs[0]->get_stride();
int out_count = outputs[0]->size();
const float* bias_data = (const float*)param.bias()->data();
hipLaunchKernelGGL(( gpu_add_bias), dim3(CUDA_GET_BLOCKS(out_count)), dim3(CUDA_NUM_THREADS), 0, cuda_stream, top_data, out_count,
out_shape[0], out_shape[1],
out_shape[2], out_shape[3],
out_stride[0], out_stride[1],
out_stride[2], out_stride[3],
bias_data);
}
CUDA_POST_KERNEL_CHECK;
}
return SaberSuccess;
}
}
}
| 0fab0c066083ea0602ea479f541291f5b2d41143.cu | #include "saber/funcs/impl/cuda/saber_deformable_conv.h"
#include "cuda_fp16.h"
#include "saber/core/tensor_op.h"
namespace anakin {
namespace saber {
__device__ float deformable_im2col_bilinear(const float* bottom_data, const int data_width,
const int height, const int width, float h, float w) {
int h_low = floor(h);
int w_low = floor(w);
int h_high;
int w_high;
if (h_low >= height - 1) {
h_high = h_low = height - 1;
h = (float) h_low;
} else {
h_high = h_low + 1;
}
if (w_low >= width - 1) {
w_high = w_low = width - 1;
w = (float) w_low;
} else {
w_high = w_low + 1;
}
float lh = h - h_low;
float lw = w - w_low;
float hh = 1 - lh, hw = 1 - lw;
float v1 = bottom_data[h_low * data_width + w_low];
float v2 = bottom_data[h_low * data_width + w_high];
float v3 = bottom_data[h_high * data_width + w_low];
float v4 = bottom_data[h_high * data_width + w_high];
float w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw;
float val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
return val;
}
__global__ void deformable_im2col_gpu_kernel(const int n, const float* data_im,
const float* data_offset, const int height, const int width,
const int kernel_h, const int kernel_w, const int pad_h,
const int pad_w, const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int channel_per_deformable_group, const int height_col,
const int width_col, float* data_col) {
CUDA_KERNEL_LOOP(index, n) {
const int w_col = index % width_col;
const int h_col = (index / width_col) % height_col;
const int c_im = (index / width_col) / height_col;
const int c_col = c_im * kernel_h * kernel_w;
// compute deformable group index
// THIS IS THE TRUE CHANNEL
const int deformable_group_index = c_im / channel_per_deformable_group;
//input map coord(h_in, w_in)
const int h_in = h_col * stride_h - pad_h;
const int w_in = w_col * stride_w - pad_w;
//data_col (data & offset)
float* data_col_ptr = data_col
+ (c_col * height_col + h_col) * width_col + w_col;
const float* data_im_ptr = data_im + (c_im * height + h_in) * width
+ w_in;
const float* data_offset_ptr = data_offset
+ deformable_group_index * 2 * kernel_h * kernel_w * height_col
* width_col;
for (int i = 0; i < kernel_h; ++i) {
for (int j = 0; j < kernel_w; ++j) {
//offset_h and offset_w in the same channel
const int data_offset_h_ptr = ((2 * (i * kernel_w + j))
* height_col + h_col) * width_col + w_col;
const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1)
* height_col + h_col) * width_col + w_col;
const float offset_h = data_offset_ptr[data_offset_h_ptr];
const float offset_w = data_offset_ptr[data_offset_w_ptr];
float val = 0.f;
const float h_im = h_in + i * dilation_h + offset_h;
const float w_im = w_in + j * dilation_w + offset_w;
if (h_im >= 0 && w_im >= 0 && h_im < height && w_im < width) {
const float map_h = i * dilation_h + offset_h;
const float map_w = j * dilation_w + offset_w;
// cur_height (from h_in to height)
const int cur_height = height - h_in;
const int cur_width = width - w_in;
val = deformable_im2col_bilinear(data_im_ptr, width,
cur_height, cur_width, map_h, map_w);
}
*data_col_ptr = val;
data_col_ptr += height_col * width_col;
}
}
}
}
__global__ void gpu_add_bias(float * out_data, const int count,
int in_n, int in_c, int in_h, int in_w,
int in_n_stride, int in_c_stride,
int in_h_stride, int in_w_stride,
const float *bias) {
CUDA_KERNEL_LOOP(tid, count){
int read_w = tid % in_w;
int read_h = (tid / (in_w)) % in_h;
int read_c = (tid / (in_h * in_w)) % in_c;
int read_n = (tid / (in_c * in_h * in_w)) % in_n;
int in_idx = read_n * in_n_stride
+ read_c * in_c_stride
+ read_h * in_h_stride
+ read_w * in_w_stride;
float in_var = out_data[in_idx];
float in_bias = bias[read_c];
out_data[in_idx] = in_var + in_bias;
}
}
template <>
SaberStatus SaberDeformableConv2D<NV, AK_FLOAT, AK_FLOAT, AK_FLOAT,\
NCHW, NCHW, NCHW>::dispatch(const std::vector<DataTensor_in *>& inputs,
std::vector<DataTensor_out *>& outputs,
DeformableConvParam<OpTensor>& param) {
int in_channel = inputs[0]->channel();
int conv_out_channel = outputs[0]->channel();
const OpDataType* weight = (const float*)param.weight()->data();
const InDataType* data = inputs[0]->data();
const InDataType* offset = inputs[1]->data();
InDataType* top_data = outputs[0]->mutable_data();
InDataType* deformable_col_buffer_data = _deform_col_buffer.mutable_data();
const InDataType* deform_col_buffer_data_const = _deform_col_buffer.data();
cudaStream_t cuda_stream = this->_ctx->get_compute_stream();
for (int n = 0; n < inputs[0]->num(); ++n) {
// transform image to col_buffer in order to use gemm
int channel_per_group = in_channel / param.group;
int num_kernels = in_channel * _deform_col_buffer.height() * _deform_col_buffer.width();
deformable_im2col_gpu_kernel
<<<CUDA_GET_BLOCKS(num_kernels), CUDA_NUM_THREADS, 0, cuda_stream>>>(
num_kernels, data + n * _bottom_dim, offset + n * _offset_dim,
inputs[0]->height(), inputs[0]->width(),
param.weight()->height(), param.weight()->width(),
param.pad_h, param.pad_w, param.stride_h, param.stride_w,
param.dilation_h, param.dilation_w,
channel_per_group, _deform_col_buffer.height(),
_deform_col_buffer.width(),
deformable_col_buffer_data);
for (int g = 0; g < param.group; ++g) {
float alpha = 1.f;
float beta = 0.f;
CUBLAS_CHECK(cublasSgemm(_handle, CUBLAS_OP_N, CUBLAS_OP_N,
_conv_out_spatial_dim,
conv_out_channel / param.group,
_kernel_dim / param.group,
&alpha,
deform_col_buffer_data_const + _col_offset * g,
_conv_out_spatial_dim,
weight + _kernel_offset * g,
_kernel_dim / param.group,
&beta,
top_data + _output_offset * g,
_conv_out_spatial_dim));
}
if (param.bias()->size() > 0) {
Shape out_shape = outputs[0]->valid_shape();
Shape out_stride = outputs[0]->get_stride();
int out_count = outputs[0]->size();
const float* bias_data = (const float*)param.bias()->data();
gpu_add_bias<<<CUDA_GET_BLOCKS(out_count), CUDA_NUM_THREADS, 0, cuda_stream>>> (top_data, out_count,
out_shape[0], out_shape[1],
out_shape[2], out_shape[3],
out_stride[0], out_stride[1],
out_stride[2], out_stride[3],
bias_data);
}
CUDA_POST_KERNEL_CHECK;
}
return SaberSuccess;
}
}
}
|
23fe0a194a42ce4f678c0df0fffc10f11a15dbbf.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
<% unless type_name == 'robject' %>
__global__ void <%="cumo_#{c_iter}_stride_kernel"%>(char *p1, char *p2, char *p3, char *p4, ssize_t s1, ssize_t s2, ssize_t s3, ssize_t s4, uint64_t n)
{
for (uint64_t i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x) {
m_<%=name%>(*(dtype*)(p1+(i*s1)),*(dtype*)(p2+(i*s2)),*(dtype*)(p3+(i*s3)), *(dtype*)(p4+(i*s4)));
}
}
void <%="cumo_#{c_iter}_stride_kernel_launch"%>(char *p1, char *p2, char *p3, char *p4, ssize_t s1, ssize_t s2, ssize_t s3, ssize_t s4, uint64_t n)
{
size_t gridDim = get_gridDim(n);
size_t blockDim =hipLaunchKernelGGL(( get_blockDim(n);
<%="cumo_#{c_iter}_stride_kernel"%>), dim3(gridDim), dim3(blockDim), 0, 0, p1,p2,p3,p4,s1,s2,s3,s4,n);
}
<% end %>
| 23fe0a194a42ce4f678c0df0fffc10f11a15dbbf.cu | <% unless type_name == 'robject' %>
__global__ void <%="cumo_#{c_iter}_stride_kernel"%>(char *p1, char *p2, char *p3, char *p4, ssize_t s1, ssize_t s2, ssize_t s3, ssize_t s4, uint64_t n)
{
for (uint64_t i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x) {
m_<%=name%>(*(dtype*)(p1+(i*s1)),*(dtype*)(p2+(i*s2)),*(dtype*)(p3+(i*s3)), *(dtype*)(p4+(i*s4)));
}
}
void <%="cumo_#{c_iter}_stride_kernel_launch"%>(char *p1, char *p2, char *p3, char *p4, ssize_t s1, ssize_t s2, ssize_t s3, ssize_t s4, uint64_t n)
{
size_t gridDim = get_gridDim(n);
size_t blockDim = get_blockDim(n);
<%="cumo_#{c_iter}_stride_kernel"%><<<gridDim, blockDim>>>(p1,p2,p3,p4,s1,s2,s3,s4,n);
}
<% end %>
|
2d1ed93738d4bb7a9258a7fb0c05a547b9fb2b07.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2009-2021 The Regents of the University of Michigan
// This file is part of the HOOMD-blue project, released under the BSD 3-Clause License.
// Maintainer: mphoward
/*!
* \file BounceBackNVEGPU.cu
* \brief Template specialization of CUDA kernels for BounceBackNVEGPU geometries. Each instance of the
* nve_bounce_step_one must be templated explicitly for each geometry.
*/
#include "BounceBackNVEGPU.cuh"
#include "StreamingGeometry.h"
namespace mpcd
{
namespace gpu
{
//! Template instantiation of slit geometry streaming
template hipError_t nve_bounce_step_one<mpcd::detail::SlitGeometry>
(const bounce_args_t& args, const mpcd::detail::SlitGeometry& geom);
//! Template instantiation of slit pore geometry streaming
template hipError_t nve_bounce_step_one<mpcd::detail::SlitPoreGeometry>
(const bounce_args_t& args, const mpcd::detail::SlitPoreGeometry& geom);
namespace kernel
{
//! Kernel for applying second step of velocity Verlet algorithm with bounce back
/*!
* \param d_vel Particle velocities
* \param d_accel Particle accelerations
* \param d_net_force Net force on each particle
* \param d_group Indexes in particle group
* \param dt Timestep
* \param N Number of particles in group
*
* \b Implementation:
* Using one thread per particle, the particle velocities are updated according to the second step of the velocity Verlet
* algorithm. This is the standard update as in MD, and is only reimplemented here in case future modifications are necessary.
*/
__global__ void nve_bounce_step_two(Scalar4 *d_vel,
Scalar3 *d_accel,
const Scalar4 *d_net_force,
const unsigned int *d_group,
const Scalar dt,
const unsigned int N)
{
// one thread per particle
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= N)
return;
const unsigned int pid = d_group[idx];
const Scalar4 net_force = d_net_force[pid];
Scalar3 accel = make_scalar3(net_force.x, net_force.y, net_force.z);
Scalar4 vel = d_vel[pid];
accel.x /= vel.w;
accel.y /= vel.w;
accel.z /= vel.w;
// then, update the velocity
vel.x += Scalar(0.5) * accel.x * dt;
vel.y += Scalar(0.5) * accel.y * dt;
vel.z += Scalar(0.5) * accel.z * dt;
d_vel[pid] = vel;
d_accel[pid] = accel;
}
} // end namespace kernel
/*!
* \param d_vel Particle velocities
* \param d_accel Particle accelerations
* \param d_net_force Net force on each particle
* \param d_group Indexes in particle group
* \param dt Timestep
* \param N Number of particles in group
* \param block_size Number of threads per block
*
* \sa kernel::nve_bounce_step_two
*/
hipError_t nve_bounce_step_two(Scalar4 *d_vel,
Scalar3 *d_accel,
const Scalar4 *d_net_force,
const unsigned int *d_group,
const Scalar dt,
const unsigned int N,
const unsigned int block_size)
{
static unsigned int max_block_size = UINT_MAX;
if (max_block_size == UINT_MAX)
{
hipFuncAttributes attr;
hipFuncGetAttributes(&attr, (const void*)kernel::nve_bounce_step_two);
max_block_size = attr.maxThreadsPerBlock;
}
unsigned int run_block_size = min(block_size, max_block_size);
dim3 grid(N / run_block_size + 1);
hipLaunchKernelGGL(( kernel::nve_bounce_step_two), dim3(grid), dim3(run_block_size), 0, 0, d_vel, d_accel, d_net_force, d_group, dt, N);
return hipSuccess;
}
} // end namespace gpu
} // end namespace mpcd
| 2d1ed93738d4bb7a9258a7fb0c05a547b9fb2b07.cu | // Copyright (c) 2009-2021 The Regents of the University of Michigan
// This file is part of the HOOMD-blue project, released under the BSD 3-Clause License.
// Maintainer: mphoward
/*!
* \file BounceBackNVEGPU.cu
* \brief Template specialization of CUDA kernels for BounceBackNVEGPU geometries. Each instance of the
* nve_bounce_step_one must be templated explicitly for each geometry.
*/
#include "BounceBackNVEGPU.cuh"
#include "StreamingGeometry.h"
namespace mpcd
{
namespace gpu
{
//! Template instantiation of slit geometry streaming
template cudaError_t nve_bounce_step_one<mpcd::detail::SlitGeometry>
(const bounce_args_t& args, const mpcd::detail::SlitGeometry& geom);
//! Template instantiation of slit pore geometry streaming
template cudaError_t nve_bounce_step_one<mpcd::detail::SlitPoreGeometry>
(const bounce_args_t& args, const mpcd::detail::SlitPoreGeometry& geom);
namespace kernel
{
//! Kernel for applying second step of velocity Verlet algorithm with bounce back
/*!
* \param d_vel Particle velocities
* \param d_accel Particle accelerations
* \param d_net_force Net force on each particle
* \param d_group Indexes in particle group
* \param dt Timestep
* \param N Number of particles in group
*
* \b Implementation:
* Using one thread per particle, the particle velocities are updated according to the second step of the velocity Verlet
* algorithm. This is the standard update as in MD, and is only reimplemented here in case future modifications are necessary.
*/
__global__ void nve_bounce_step_two(Scalar4 *d_vel,
Scalar3 *d_accel,
const Scalar4 *d_net_force,
const unsigned int *d_group,
const Scalar dt,
const unsigned int N)
{
// one thread per particle
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= N)
return;
const unsigned int pid = d_group[idx];
const Scalar4 net_force = d_net_force[pid];
Scalar3 accel = make_scalar3(net_force.x, net_force.y, net_force.z);
Scalar4 vel = d_vel[pid];
accel.x /= vel.w;
accel.y /= vel.w;
accel.z /= vel.w;
// then, update the velocity
vel.x += Scalar(0.5) * accel.x * dt;
vel.y += Scalar(0.5) * accel.y * dt;
vel.z += Scalar(0.5) * accel.z * dt;
d_vel[pid] = vel;
d_accel[pid] = accel;
}
} // end namespace kernel
/*!
* \param d_vel Particle velocities
* \param d_accel Particle accelerations
* \param d_net_force Net force on each particle
* \param d_group Indexes in particle group
* \param dt Timestep
* \param N Number of particles in group
* \param block_size Number of threads per block
*
* \sa kernel::nve_bounce_step_two
*/
cudaError_t nve_bounce_step_two(Scalar4 *d_vel,
Scalar3 *d_accel,
const Scalar4 *d_net_force,
const unsigned int *d_group,
const Scalar dt,
const unsigned int N,
const unsigned int block_size)
{
static unsigned int max_block_size = UINT_MAX;
if (max_block_size == UINT_MAX)
{
cudaFuncAttributes attr;
cudaFuncGetAttributes(&attr, (const void*)kernel::nve_bounce_step_two);
max_block_size = attr.maxThreadsPerBlock;
}
unsigned int run_block_size = min(block_size, max_block_size);
dim3 grid(N / run_block_size + 1);
kernel::nve_bounce_step_two<<<grid, run_block_size>>>(d_vel, d_accel, d_net_force, d_group, dt, N);
return cudaSuccess;
}
} // end namespace gpu
} // end namespace mpcd
|
3130caef4bb9511250c2bce8f59600cf23c2e9ab.hip | // !!! This is a file automatically generated by hipify!!!
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <cassert>
#include <hipcub/hipcub.hpp> // NOLINT
#include <vector>
#include "glog/logging.h"
#include "paddle/fluid/inference/tensorrt/plugin/skip_layernorm_op_plugin.h"
#include "paddle/fluid/inference/tensorrt/plugin/trt_plugin_factory.h"
#include "paddle/fluid/operators/math/bert_encoder_functor.h"
namespace paddle {
namespace inference {
namespace tensorrt {
namespace plugin {
// Dynamic Plugin below.
#if IS_TRT_VERSION_GE(6000)
int SkipLayerNormPluginDynamic::initialize() {
hipMalloc(&bias_gpu_, sizeof(float) * bias_size_);
hipMemcpy(bias_gpu_, bias_, bias_size_ * sizeof(float),
hipMemcpyHostToDevice);
hipMalloc(&scale_gpu_, sizeof(float) * scale_size_);
hipMemcpy(scale_gpu_, scale_, scale_size_ * sizeof(float),
hipMemcpyHostToDevice);
return 0;
}
size_t SkipLayerNormPluginDynamic::getSerializationSize() const { return 0; }
void SkipLayerNormPluginDynamic::serialize(void *buffer) const {}
nvinfer1::DimsExprs SkipLayerNormPluginDynamic::getOutputDimensions(
int output_index, const nvinfer1::DimsExprs *inputs, int nb_inputs,
nvinfer1::IExprBuilder &expr_builder) {
PADDLE_ENFORCE_EQ(
inputs[0].nbDims, 5,
platform::errors::InvalidArgument(
"The Input dim of the SkipLayernorm should be 5, but it's (%d) now.",
inputs[0].nbDims));
return inputs[0];
}
bool SkipLayerNormPluginDynamic::supportsFormatCombination(
int pos, const nvinfer1::PluginTensorDesc *in_out, int nb_inputs,
int nb_outputs) {
PADDLE_ENFORCE_NOT_NULL(
in_out, platform::errors::InvalidArgument(
"The input of swish plugin shoule not be nullptr."));
PADDLE_ENFORCE_LT(
pos, nb_inputs + nb_outputs,
platform::errors::InvalidArgument("The pos(%d) should be less than the "
"num(%d) of the input and the output.",
pos, nb_inputs + nb_outputs));
const nvinfer1::PluginTensorDesc &in = in_out[pos];
if (pos == 0) {
#ifdef SUPPORTS_CUDA_FP16
if (ban_fp16_) {
return (in.type == nvinfer1::DataType::kFLOAT) &&
(in.format == nvinfer1::TensorFormat::kLINEAR);
} else {
return (in.type == nvinfer1::DataType::kFLOAT ||
in.type == nvinfer1::DataType::kHALF) &&
(in.format == nvinfer1::TensorFormat::kLINEAR);
}
#else
return (in.type == nvinfer1::DataType::kFLOAT) &&
(in.format == nvinfer1::TensorFormat::kLINEAR);
#endif
}
const nvinfer1::PluginTensorDesc &prev = in_out[pos - 1];
if (pos == 1) {
return in.type == prev.type && in.format == prev.format;
}
// output
return in.type == prev.type && in.format == prev.format;
}
nvinfer1::DataType SkipLayerNormPluginDynamic::getOutputDataType(
int index, const nvinfer1::DataType *input_types, int nb_inputs) const {
PADDLE_ENFORCE_EQ(index, 0,
platform::errors::InvalidArgument(
"The SkipLayerNorm Plugin only has one input, so the "
"index value should be 0, but get %d.",
index));
PADDLE_ENFORCE_EQ((input_types[0] == nvinfer1::DataType::kFLOAT ||
input_types[0] == nvinfer1::DataType::kHALF),
true, platform::errors::InvalidArgument(
"The input type should be half or float"));
return input_types[0];
}
int SkipLayerNormPluginDynamic::enqueue(
const nvinfer1::PluginTensorDesc *input_desc,
const nvinfer1::PluginTensorDesc *output_desc, const void *const *inputs,
void *const *outputs, void *workspace, hipStream_t stream) {
auto input_dims = input_desc[0].dims;
size_t num = ProductDim(input_dims);
int hidden = input_dims.d[2];
auto input_type = input_desc[0].type;
if (input_type == nvinfer1::DataType::kFLOAT) {
const float *input1 = static_cast<const float *>(inputs[0]);
const float *input2 = static_cast<const float *>(inputs[1]);
float *output = static_cast<float *>(outputs[0]);
operators::math::SkipLayerNormFunctor<float> skip_layer_norm_func;
skip_layer_norm_func(num, hidden, input1, input2, scale_gpu_, bias_gpu_,
output, eps_, stream);
} else if (input_type == nvinfer1::DataType::kHALF) {
#ifdef SUPPORTS_CUDA_FP16
const half *input1 = static_cast<const half *>(inputs[0]);
const half *input2 = static_cast<const half *>(inputs[1]);
half *output = static_cast<half *>(outputs[0]);
operators::math::SkipLayerNormFunctor<half> skip_layer_norm_func;
skip_layer_norm_func(num, hidden, input1, input2, scale_gpu_, bias_gpu_,
output, static_cast<half>(eps_), stream);
#else
PADDLE_THROW(platform::errors::Fatal(
"The cuda archs you specific should greater than 600."));
#endif
} else {
PADDLE_THROW(platform::errors::Fatal(
"The SkipLayerNorm TRT Plugin's input type should be float or half."));
}
return hipGetLastError() != hipSuccess;
}
#endif
} // namespace plugin
} // namespace tensorrt
} // namespace inference
} // namespace paddle
| 3130caef4bb9511250c2bce8f59600cf23c2e9ab.cu | // Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <cuda_runtime.h>
#include <stdio.h>
#include <cassert>
#include <cub/cub.cuh> // NOLINT
#include <vector>
#include "glog/logging.h"
#include "paddle/fluid/inference/tensorrt/plugin/skip_layernorm_op_plugin.h"
#include "paddle/fluid/inference/tensorrt/plugin/trt_plugin_factory.h"
#include "paddle/fluid/operators/math/bert_encoder_functor.h"
namespace paddle {
namespace inference {
namespace tensorrt {
namespace plugin {
// Dynamic Plugin below.
#if IS_TRT_VERSION_GE(6000)
int SkipLayerNormPluginDynamic::initialize() {
cudaMalloc(&bias_gpu_, sizeof(float) * bias_size_);
cudaMemcpy(bias_gpu_, bias_, bias_size_ * sizeof(float),
cudaMemcpyHostToDevice);
cudaMalloc(&scale_gpu_, sizeof(float) * scale_size_);
cudaMemcpy(scale_gpu_, scale_, scale_size_ * sizeof(float),
cudaMemcpyHostToDevice);
return 0;
}
size_t SkipLayerNormPluginDynamic::getSerializationSize() const { return 0; }
void SkipLayerNormPluginDynamic::serialize(void *buffer) const {}
nvinfer1::DimsExprs SkipLayerNormPluginDynamic::getOutputDimensions(
int output_index, const nvinfer1::DimsExprs *inputs, int nb_inputs,
nvinfer1::IExprBuilder &expr_builder) {
PADDLE_ENFORCE_EQ(
inputs[0].nbDims, 5,
platform::errors::InvalidArgument(
"The Input dim of the SkipLayernorm should be 5, but it's (%d) now.",
inputs[0].nbDims));
return inputs[0];
}
bool SkipLayerNormPluginDynamic::supportsFormatCombination(
int pos, const nvinfer1::PluginTensorDesc *in_out, int nb_inputs,
int nb_outputs) {
PADDLE_ENFORCE_NOT_NULL(
in_out, platform::errors::InvalidArgument(
"The input of swish plugin shoule not be nullptr."));
PADDLE_ENFORCE_LT(
pos, nb_inputs + nb_outputs,
platform::errors::InvalidArgument("The pos(%d) should be less than the "
"num(%d) of the input and the output.",
pos, nb_inputs + nb_outputs));
const nvinfer1::PluginTensorDesc &in = in_out[pos];
if (pos == 0) {
#ifdef SUPPORTS_CUDA_FP16
if (ban_fp16_) {
return (in.type == nvinfer1::DataType::kFLOAT) &&
(in.format == nvinfer1::TensorFormat::kLINEAR);
} else {
return (in.type == nvinfer1::DataType::kFLOAT ||
in.type == nvinfer1::DataType::kHALF) &&
(in.format == nvinfer1::TensorFormat::kLINEAR);
}
#else
return (in.type == nvinfer1::DataType::kFLOAT) &&
(in.format == nvinfer1::TensorFormat::kLINEAR);
#endif
}
const nvinfer1::PluginTensorDesc &prev = in_out[pos - 1];
if (pos == 1) {
return in.type == prev.type && in.format == prev.format;
}
// output
return in.type == prev.type && in.format == prev.format;
}
nvinfer1::DataType SkipLayerNormPluginDynamic::getOutputDataType(
int index, const nvinfer1::DataType *input_types, int nb_inputs) const {
PADDLE_ENFORCE_EQ(index, 0,
platform::errors::InvalidArgument(
"The SkipLayerNorm Plugin only has one input, so the "
"index value should be 0, but get %d.",
index));
PADDLE_ENFORCE_EQ((input_types[0] == nvinfer1::DataType::kFLOAT ||
input_types[0] == nvinfer1::DataType::kHALF),
true, platform::errors::InvalidArgument(
"The input type should be half or float"));
return input_types[0];
}
int SkipLayerNormPluginDynamic::enqueue(
const nvinfer1::PluginTensorDesc *input_desc,
const nvinfer1::PluginTensorDesc *output_desc, const void *const *inputs,
void *const *outputs, void *workspace, cudaStream_t stream) {
auto input_dims = input_desc[0].dims;
size_t num = ProductDim(input_dims);
int hidden = input_dims.d[2];
auto input_type = input_desc[0].type;
if (input_type == nvinfer1::DataType::kFLOAT) {
const float *input1 = static_cast<const float *>(inputs[0]);
const float *input2 = static_cast<const float *>(inputs[1]);
float *output = static_cast<float *>(outputs[0]);
operators::math::SkipLayerNormFunctor<float> skip_layer_norm_func;
skip_layer_norm_func(num, hidden, input1, input2, scale_gpu_, bias_gpu_,
output, eps_, stream);
} else if (input_type == nvinfer1::DataType::kHALF) {
#ifdef SUPPORTS_CUDA_FP16
const half *input1 = static_cast<const half *>(inputs[0]);
const half *input2 = static_cast<const half *>(inputs[1]);
half *output = static_cast<half *>(outputs[0]);
operators::math::SkipLayerNormFunctor<half> skip_layer_norm_func;
skip_layer_norm_func(num, hidden, input1, input2, scale_gpu_, bias_gpu_,
output, static_cast<half>(eps_), stream);
#else
PADDLE_THROW(platform::errors::Fatal(
"The cuda archs you specific should greater than 600."));
#endif
} else {
PADDLE_THROW(platform::errors::Fatal(
"The SkipLayerNorm TRT Plugin's input type should be float or half."));
}
return cudaGetLastError() != cudaSuccess;
}
#endif
} // namespace plugin
} // namespace tensorrt
} // namespace inference
} // namespace paddle
|
f3e98e6d03a226bbe8530b982027d740e5f078ca.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "CUDAFunc.cuh"
// Conv, Pool layer
float *dev_Conv_kernel[nConvLayer], *dev_Conv_grad[nConvLayer], *dev_Conv_m_prev[nConvLayer], *dev_Conv_v_prev[nConvLayer];
char *devicePoolMark[nPoolLayer];
// FC Layer
float *dev_FC_w[nFCLayer], *dev_FC_grad[nFCLayer], *dev_FC_m_prev[nFCLayer], *dev_FC_v_prev[nFCLayer];
// shared node
float *dev_Node[nCnPLayer + nFCLayer + 1], *dev_Node_delta[nCnPLayer + nFCLayer];
// test drop node
float *dev_drop[nFCLayer - 1];
__global__ void forward_layer(float *d_weights, int weightOffset, int weightsPerNeuron, float *d_ins, int neuronsPrev, float *d_outs, bool softmax)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int wid = threadIdx.x * weightsPerNeuron + (blockIdx.x * weightsPerNeuron);
float a = .0f;
for (int i = 0; i < weightsPerNeuron; ++i){
a += d_weights[wid + i] * d_ins[i];
//printf("\n [tid:%d], d_weights[%d](%.1f) * d_ins[%d](%.1f) = %.1f", tid, wid+i, d_weights[wid+i], i, d_ins[i]);
//printf("\n [tid:%d], d_weights[%d](%f) * d_ins[%d](%f) = %f", tid, wid + i, d_weights[wid + i], i, d_ins[i], a);
//printf("d_outs[%d] : %f\n", tid, a);
}
if (softmax) d_outs[tid] = a;
else d_outs[tid] = (a > 0.0f ? a : a*0.01f);
}
__global__ void CUDA_Conv2D(float *I, float* M, float *P, int inmap, int outmap, int width, int height, int kernel_size, int padding)
{
//CUDA_Conv2D << < Conv->Outmap, Conv->Output_data->y * Conv->Output_data->z >> > (dev_Node[layer_idx], dev_Conv_kernel[kernel_num], dev_Node[layer_idx + 1],
//Conv->Inmap, Conv->Outmap, Conv->Input_data->y, Conv->Input_data->z, Conv->kernel->y, Padding);
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int idx = 0, tidx = 0, mask_idx = 0, oddCnt = 0;
int x, y, outputSize, outputDim, _padding = 0;
int mask_dim = kernel_size*kernel_size;
float val = 0.0;
if (padding == 0){
outputSize = (width - (kernel_size - 1)) * (width - (kernel_size - 1));
outputDim = width - (kernel_size - 1);
}
else{
outputSize = width * height;
outputDim = width;
_padding = kernel_size / 2;
}
for (int mapCnt = 0; mapCnt < inmap; mapCnt++, tidx += (width * (outputDim))){
//printf("\n mapCnt : %d", mapCnt);
idx = tidx + threadIdx.x;
x = idx / outputDim;
y = idx % outputDim;
oddCnt = mapCnt * mask_dim;
for (int i = 0; i < kernel_size; i++){
int xx = x + i - _padding;
for (int j = 0; j < kernel_size; j++){
int yy = y + j - _padding;
//
if (padding == 0){
val += I[xx * width + yy] * M[((tid / (outputSize)) * mask_dim * inmap + oddCnt) + (i * kernel_size + j)];
//printf("\n mapCnt : %d, idx : %d, tid : %d, val += I[%d] * M[%d] : %.4f * %.4f = %.3f , i : %d , j : %d, tidx : %d, width : %d, outputDim : %d",
//mapCnt, idx, tid, xx * width + yy, ((tid / (outputSize))*mask_dim*inmap + oddCnt) + (i * kernel_size + j),
//I[xx * width + yy], M[((tid / (outputSize))*mask_dim*inmap + oddCnt) + (i * kernel_size + j)], val, i, j, tidx,width, outputDim);
//printf("\n blockIdx : %d, threadIdx : %d, tidx : %d , blockDimx : %d, width : %d, outputDim : %d", tid, idx, blockIdx.x, threadIdx.x, tidx, blockDim.x, width, outputDim);
}
else{
if ((xx >= 0 && yy >= 0) &&
((xx < (width*(mapCnt + 1))) &&
(yy < height)) &&
((xx * width + yy) < (outputSize * (mapCnt + 1))) &&
((xx * width + yy) >= tidx)){
val += I[xx * width + yy] * M[((tid / (outputSize)) * mask_dim * inmap + oddCnt) + (i * kernel_size + j)];
/*
printf("\n mapCnt : %d, idx : %d, tid : %d, val += I[%d] * M[%d] : %.4f * %.4f = %.3f , i : %d , j : %d, tidx : %d, width : %d, outputDim : %d",
mapCnt, idx, tid, xx * width + yy, ((tid / (outputSize))*mask_dim*inmap + oddCnt) + (i * kernel_size + j),
//I[xx * width + yy], M[((tid / (outputSize))*mask_dim*inmap + oddCnt) + (i * kernel_size + j)], val, i, j, tidx, width, outputDim);
*/
//printf("\n mapCnt : %d, idx : %d, tid : %d, val += I[%d] * M[%d] : %.4f * %.4f = %.3f x : %d, y : %d, outputDim : %d",
//mapCnt, idx, tid, xx * width + yy, ((tid / (outputSize))*mask_dim*inmap + oddCnt) + (i * kernel_size + j),
//I[xx * width + yy], M[((tid / (outputSize))*mask_dim*inmap + oddCnt) + (i * kernel_size + j)], val, x, y, outputDim);
/*
if (tid == 0){
printf("\n mapCnt : %d, idx : %d, tid : %d, val += I[%d] * M[%d] : %.4f * %.4f = %.3f x : %d, y : %d",
mapCnt, idx, tid, xx * width + yy, ((tid / (outputSize))*mask_dim*inmap + oddCnt) + (i * kernel_size + j),
I[xx * width + yy], M[((tid / (outputSize))*mask_dim*inmap + oddCnt) + (i * kernel_size + j)], val, x, y);
}
*/
}
}
/*
printf("\n mapCnt : %d, idx : %d, tid : %d, val += I[%d] * M[%d] : %.4f * %.4f = %.3f , i : %d , j : %d, tidx : %d, width : %d, outputDim : %d",
mapCnt, idx, tid, xx * width + yy, ((tid / (outputSize))*mask_dim*inmap + oddCnt) + (i * kernel_size + j),
I[xx * width + yy], M[((tid / (outputSize))*mask_dim*inmap + oddCnt) + (i * kernel_size + j)], val, i, j, tidx, width, outputDim);
*/
if (tid == 0){
//printf("\n mapCnt : %d, idx : %d, tid : %d, val += I[%d] * M[%d] : %.4f * %.4f = %.3f ",
//mapCnt, idx, tid, xx * width + yy, ((tid / (outputSize))*mask_dim*inmap + oddCnt) + (i * kernel_size + j),
//I[xx * width + yy], M[((tid / (outputSize))*mask_dim*inmap + oddCnt) + (i * kernel_size + j)], val);
}
}
}
P[tid] = (val > 0 ? val : val * 0.01); // Act. func.
//printf("\n P[%d] : %.3f", tid, P[tid]);
//if (tid == 0) printf("\n P[%d] : %.3f", tid, P[tid]);
}
}
__global__ void CUDA_MaxPooling(float *I, float *P, char *pool_mark, int inmap, int width, int height){
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int idx = 0, tidx = 0, cnt = 0;
char pool_idx = 0;
const int outputDim = width / 2;
//float _max = 0;
int init_h = (threadIdx.x / outputDim) * 2;
int init_w = (threadIdx.x % outputDim) * 2;
float _max = I[(blockIdx.x * width * width) + (init_h * width + init_w)];
//printf("\n B : %d, T : %d, tid :%d, Max : %.4f", blockIdx.x, threadIdx.x, tid, _max);
//const int outputDim = width / 2;
for (int h = 0; h < 2; h++){
int _h = h + (threadIdx.x / outputDim) * 2;
for (int w = 0; w < 2; w++){
int _w = w + (threadIdx.x % outputDim) * 2; //
if (I[(blockIdx.x * width * width) + (_h * width + _w)] > _max){
_max = I[(blockIdx.x * width * width) + (_h * width + _w)];
pool_idx = cnt;
}
cnt++;
}
}
P[tid] = _max;
pool_mark[tid] = pool_idx;
//printf("\n B:%d,T:%d, p[%d] : %.2f , pool_mark[%d] : %d",
// blockIdx.x, threadIdx.x, tid, P[tid], tid, pool_mark[tid]);
}
////////////////////////////////////////////////////////////
void CUDA_CnP_MemAlloc(ConvLayer *Conv, PoolLayer *Pool){
for (int i = 0; i < nConvLayer; i++){
hipMalloc((void **)&dev_Conv_kernel[i], (Conv[i].Inmap * Conv[i].Outmap) * (Conv[i].kernel->y * Conv[i].kernel->z) * sizeof(float));
hipMalloc((void **)&dev_Conv_grad[i], (Conv[i].Inmap * Conv[i].Outmap) * (Conv[i].kernel->y * Conv[i].kernel->z) * sizeof(float));
hipMalloc((void **)&dev_Conv_m_prev[i], (Conv[i].Inmap * Conv[i].Outmap) * (Conv[i].kernel->y * Conv[i].kernel->z) * sizeof(float));
hipMalloc((void **)&dev_Conv_v_prev[i], (Conv[i].Inmap * Conv[i].Outmap) * (Conv[i].kernel->y * Conv[i].kernel->z) * sizeof(float));
reshape3Dto1D(Conv[i].kernel);
hipMemcpy(dev_Conv_kernel[i], Conv[i].kernel->mem1D,
(Conv[i].Inmap * Conv[i].Outmap) * (Conv[i].kernel->y * Conv[i].kernel->z) * sizeof(float), hipMemcpyHostToDevice);
}
for (int i = 0; i < nPoolLayer; i++){
hipMalloc((void **)&devicePoolMark[i], Pool[i].Outmap * Pool[i].pool_mark->y * Pool[i].pool_mark->z * sizeof(char));
}
}
void CUDA_FC_MemAlloc(FCLayer *FC){
for (int i = 0; i < nFCLayer; i++){
if (i < nFCLayer - 1){
hipMalloc((void**)&dev_FC_w[i], FC[i].Input_size * FC[i + 1].Input_size * sizeof(float));
hipMalloc((void**)&dev_FC_grad[i], FC[i].Input_size * FC[i + 1].Input_size * sizeof(float));
hipMalloc((void**)&dev_FC_m_prev[i], FC[i].Input_size * FC[i + 1].Input_size * sizeof(float));
hipMalloc((void**)&dev_FC_v_prev[i], FC[i].Input_size * FC[i + 1].Input_size * sizeof(float));
hipMemcpy(dev_FC_w[i], FC[i].Weight->mem1D, FC[i].Input_size * FC[i + 1].Input_size * sizeof(float), hipMemcpyHostToDevice);
}
else{
hipMalloc((void**)&dev_FC_w[i], FC[i].Input_size * FC[i].Output_size * sizeof(float));
hipMalloc((void**)&dev_FC_grad[i], FC[i].Input_size * FC[i].Output_size* sizeof(float));
hipMalloc((void**)&dev_FC_m_prev[i], FC[i].Input_size * FC[i].Output_size * sizeof(float));
hipMalloc((void**)&dev_FC_v_prev[i], FC[i].Input_size * FC[i].Output_size * sizeof(float));
hipMemcpy(dev_FC_w[i], FC[i].Weight->mem1D, FC[i].Input_size * FC[i].Output_size * sizeof(float), hipMemcpyHostToDevice);
}
}
}
void CUDA_ioNode_MemAlloc(MemBlock32F *ioNode){
//
hipMalloc((void**)&dev_Node[0], ioNode[0].total * sizeof(float));
for (int i = 1; i < nCnPLayer + nFCLayer + 1; i++){
hipMalloc((void**)&dev_Node[i], ioNode[i].total * sizeof(float));
hipMalloc((void**)&dev_Node_delta[i], ioNode[i].total * sizeof(float));
}
#if Dropout
for (int i = 1; i < nFCLayer; i++)
hipMalloc((void**)&dev_drop[i - 1], (ioNode + nCnPLayer + i)->total * sizeof(int));
#endif
}
void CUDA_FC_Forward(FCLayer *FC, int &layer_idx){
int block = 0, thread = 0;
int offset = (FC - 1)->Input_size * (FC - 1)->Output_size;
bool softmax = false;
if (layer_idx == 0) offset = 0;
if (layer_idx == nFCLayer - 1) softmax = true;
if (FC->Output_size % MAX_THREAD == 0){
block = FC->Output_size / MAX_THREAD;
}
else{
block = (FC->Output_size / MAX_THREAD) + 1;
}
if (block > 1) thread = MAX_THREAD;
else thread = FC->Output_size;
//printf("forward layer\n");
//printf("offset : %d, FC->Input_size : %d\n", offset, FC->Input_size);
//printf("block : %d, thread : %d\n", block, thread);
// forward_layer << < 1, FC->Output_size >> > (dev_FC_w[layer_idx], offset, FC->Input_size, dev_Node[nCnPLayer + layer_idx], FC->Input_size, dev_Node[nCnPLayer + layer_idx + 1], softmax);
forward_layer << < block, thread >> > (dev_FC_w[layer_idx], offset, FC->Input_size, dev_Node[nCnPLayer + layer_idx], FC->Input_size, dev_Node[nCnPLayer + layer_idx + 1], softmax);
layer_idx++;
}
void CUDA_Conv_Forward(ConvLayer *Conv, int Padding, int &layer_idx, int &kernel_num){
//CUDA_Conv_Forward(&Conv[0], ConvPad_info[0], CnP_layer_idx, kernel_num);
CUDA_Conv2D << < Conv->Outmap, Conv->Output_data->y * Conv->Output_data->z >> > (dev_Node[layer_idx], dev_Conv_kernel[kernel_num], dev_Node[layer_idx + 1],
Conv->Inmap, Conv->Outmap, Conv->Input_data->y, Conv->Input_data->z, Conv->kernel->y, Padding);
// update .. update CUDA
hipMemcpy(Conv->Output_data->mem1D, dev_Node[layer_idx], Conv->Output_data->total * sizeof(float), hipMemcpyDeviceToHost);
reshape1Dto3D(Conv->Output_data);
layer_idx++; //
kernel_num++;
}
void CUDA_Pool_Forward(PoolLayer *Pool, int &layer_idx, int &poolMark_num){
CUDA_MaxPooling << < Pool->Inmap, Pool->Output_data->y * Pool->Output_data->z >> >(dev_Node[layer_idx], dev_Node[layer_idx + 1], devicePoolMark[poolMark_num]
, Pool->Inmap, Pool->Input_data->y, Pool->Input_data->z);
hipMemcpy(Pool->Output_data->mem1D, dev_Node[layer_idx + 1], Pool->Output_data->total * sizeof(float), hipMemcpyDeviceToHost);
reshape1Dto3D(Pool->Output_data); // update .. update CUDA
layer_idx++; //
poolMark_num++;
}
| f3e98e6d03a226bbe8530b982027d740e5f078ca.cu | #include "CUDAFunc.cuh"
// Conv, Pool layer
float *dev_Conv_kernel[nConvLayer], *dev_Conv_grad[nConvLayer], *dev_Conv_m_prev[nConvLayer], *dev_Conv_v_prev[nConvLayer];
char *devicePoolMark[nPoolLayer];
// FC Layer
float *dev_FC_w[nFCLayer], *dev_FC_grad[nFCLayer], *dev_FC_m_prev[nFCLayer], *dev_FC_v_prev[nFCLayer];
// shared node
float *dev_Node[nCnPLayer + nFCLayer + 1], *dev_Node_delta[nCnPLayer + nFCLayer];
// test drop node
float *dev_drop[nFCLayer - 1];
__global__ void forward_layer(float *d_weights, int weightOffset, int weightsPerNeuron, float *d_ins, int neuronsPrev, float *d_outs, bool softmax)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int wid = threadIdx.x * weightsPerNeuron + (blockIdx.x * weightsPerNeuron);
float a = .0f;
for (int i = 0; i < weightsPerNeuron; ++i){
a += d_weights[wid + i] * d_ins[i];
//printf("\n [tid:%d], d_weights[%d](%.1f) * d_ins[%d](%.1f) = %.1f", tid, wid+i, d_weights[wid+i], i, d_ins[i]);
//printf("\n [tid:%d], d_weights[%d](%f) * d_ins[%d](%f) = %f", tid, wid + i, d_weights[wid + i], i, d_ins[i], a);
//printf("d_outs[%d] : %f\n", tid, a);
}
if (softmax) d_outs[tid] = a;
else d_outs[tid] = (a > 0.0f ? a : a*0.01f);
}
__global__ void CUDA_Conv2D(float *I, float* M, float *P, int inmap, int outmap, int width, int height, int kernel_size, int padding)
{
//CUDA_Conv2D << < Conv->Outmap, Conv->Output_data->y * Conv->Output_data->z >> > (dev_Node[layer_idx], dev_Conv_kernel[kernel_num], dev_Node[layer_idx + 1],
//Conv->Inmap, Conv->Outmap, Conv->Input_data->y, Conv->Input_data->z, Conv->kernel->y, Padding);
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int idx = 0, tidx = 0, mask_idx = 0, oddCnt = 0;
int x, y, outputSize, outputDim, _padding = 0;
int mask_dim = kernel_size*kernel_size;
float val = 0.0;
if (padding == 0){
outputSize = (width - (kernel_size - 1)) * (width - (kernel_size - 1));
outputDim = width - (kernel_size - 1);
}
else{
outputSize = width * height;
outputDim = width;
_padding = kernel_size / 2;
}
for (int mapCnt = 0; mapCnt < inmap; mapCnt++, tidx += (width * (outputDim))){
//printf("\n mapCnt : %d", mapCnt);
idx = tidx + threadIdx.x;
x = idx / outputDim;
y = idx % outputDim;
oddCnt = mapCnt * mask_dim;
for (int i = 0; i < kernel_size; i++){
int xx = x + i - _padding;
for (int j = 0; j < kernel_size; j++){
int yy = y + j - _padding;
// 나중에 조건문 제거할 것
if (padding == 0){
val += I[xx * width + yy] * M[((tid / (outputSize)) * mask_dim * inmap + oddCnt) + (i * kernel_size + j)];
//printf("\n mapCnt : %d, idx : %d, tid : %d, val += I[%d] * M[%d] : %.4f * %.4f = %.3f , i : %d , j : %d, tidx : %d, width : %d, outputDim : %d",
//mapCnt, idx, tid, xx * width + yy, ((tid / (outputSize))*mask_dim*inmap + oddCnt) + (i * kernel_size + j),
//I[xx * width + yy], M[((tid / (outputSize))*mask_dim*inmap + oddCnt) + (i * kernel_size + j)], val, i, j, tidx,width, outputDim);
//printf("\n blockIdx : %d, threadIdx : %d, tidx : %d , blockDimx : %d, width : %d, outputDim : %d", tid, idx, blockIdx.x, threadIdx.x, tidx, blockDim.x, width, outputDim);
}
else{
if ((xx >= 0 && yy >= 0) &&
((xx < (width*(mapCnt + 1))) &&
(yy < height)) &&
((xx * width + yy) < (outputSize * (mapCnt + 1))) &&
((xx * width + yy) >= tidx)){
val += I[xx * width + yy] * M[((tid / (outputSize)) * mask_dim * inmap + oddCnt) + (i * kernel_size + j)];
/*
printf("\n mapCnt : %d, idx : %d, tid : %d, val += I[%d] * M[%d] : %.4f * %.4f = %.3f , i : %d , j : %d, tidx : %d, width : %d, outputDim : %d",
mapCnt, idx, tid, xx * width + yy, ((tid / (outputSize))*mask_dim*inmap + oddCnt) + (i * kernel_size + j),
//I[xx * width + yy], M[((tid / (outputSize))*mask_dim*inmap + oddCnt) + (i * kernel_size + j)], val, i, j, tidx, width, outputDim);
*/
//printf("\n mapCnt : %d, idx : %d, tid : %d, val += I[%d] * M[%d] : %.4f * %.4f = %.3f x : %d, y : %d, outputDim : %d",
//mapCnt, idx, tid, xx * width + yy, ((tid / (outputSize))*mask_dim*inmap + oddCnt) + (i * kernel_size + j),
//I[xx * width + yy], M[((tid / (outputSize))*mask_dim*inmap + oddCnt) + (i * kernel_size + j)], val, x, y, outputDim);
/*
if (tid == 0){
printf("\n mapCnt : %d, idx : %d, tid : %d, val += I[%d] * M[%d] : %.4f * %.4f = %.3f x : %d, y : %d",
mapCnt, idx, tid, xx * width + yy, ((tid / (outputSize))*mask_dim*inmap + oddCnt) + (i * kernel_size + j),
I[xx * width + yy], M[((tid / (outputSize))*mask_dim*inmap + oddCnt) + (i * kernel_size + j)], val, x, y);
}
*/
}
}
/*
printf("\n mapCnt : %d, idx : %d, tid : %d, val += I[%d] * M[%d] : %.4f * %.4f = %.3f , i : %d , j : %d, tidx : %d, width : %d, outputDim : %d",
mapCnt, idx, tid, xx * width + yy, ((tid / (outputSize))*mask_dim*inmap + oddCnt) + (i * kernel_size + j),
I[xx * width + yy], M[((tid / (outputSize))*mask_dim*inmap + oddCnt) + (i * kernel_size + j)], val, i, j, tidx, width, outputDim);
*/
if (tid == 0){
//printf("\n mapCnt : %d, idx : %d, tid : %d, val += I[%d] * M[%d] : %.4f * %.4f = %.3f ",
//mapCnt, idx, tid, xx * width + yy, ((tid / (outputSize))*mask_dim*inmap + oddCnt) + (i * kernel_size + j),
//I[xx * width + yy], M[((tid / (outputSize))*mask_dim*inmap + oddCnt) + (i * kernel_size + j)], val);
}
}
}
P[tid] = (val > 0 ? val : val * 0.01); // Act. func.
//printf("\n P[%d] : %.3f", tid, P[tid]);
//if (tid == 0) printf("\n P[%d] : %.3f", tid, P[tid]);
}
}
__global__ void CUDA_MaxPooling(float *I, float *P, char *pool_mark, int inmap, int width, int height){
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int idx = 0, tidx = 0, cnt = 0;
char pool_idx = 0;
const int outputDim = width / 2;
//float _max = 0;
int init_h = (threadIdx.x / outputDim) * 2;
int init_w = (threadIdx.x % outputDim) * 2;
float _max = I[(blockIdx.x * width * width) + (init_h * width + init_w)];
//printf("\n B : %d, T : %d, tid :%d, Max : %.4f", blockIdx.x, threadIdx.x, tid, _max);
//const int outputDim = width / 2;
for (int h = 0; h < 2; h++){
int _h = h + (threadIdx.x / outputDim) * 2;
for (int w = 0; w < 2; w++){
int _w = w + (threadIdx.x % outputDim) * 2; // 수정
if (I[(blockIdx.x * width * width) + (_h * width + _w)] > _max){
_max = I[(blockIdx.x * width * width) + (_h * width + _w)];
pool_idx = cnt;
}
cnt++;
}
}
P[tid] = _max;
pool_mark[tid] = pool_idx;
//printf("\n B:%d,T:%d, p[%d] : %.2f , pool_mark[%d] : %d",
// blockIdx.x, threadIdx.x, tid, P[tid], tid, pool_mark[tid]);
}
////////////////////////////////////////////////////////////
void CUDA_CnP_MemAlloc(ConvLayer *Conv, PoolLayer *Pool){
for (int i = 0; i < nConvLayer; i++){
cudaMalloc((void **)&dev_Conv_kernel[i], (Conv[i].Inmap * Conv[i].Outmap) * (Conv[i].kernel->y * Conv[i].kernel->z) * sizeof(float));
cudaMalloc((void **)&dev_Conv_grad[i], (Conv[i].Inmap * Conv[i].Outmap) * (Conv[i].kernel->y * Conv[i].kernel->z) * sizeof(float));
cudaMalloc((void **)&dev_Conv_m_prev[i], (Conv[i].Inmap * Conv[i].Outmap) * (Conv[i].kernel->y * Conv[i].kernel->z) * sizeof(float));
cudaMalloc((void **)&dev_Conv_v_prev[i], (Conv[i].Inmap * Conv[i].Outmap) * (Conv[i].kernel->y * Conv[i].kernel->z) * sizeof(float));
reshape3Dto1D(Conv[i].kernel);
cudaMemcpy(dev_Conv_kernel[i], Conv[i].kernel->mem1D,
(Conv[i].Inmap * Conv[i].Outmap) * (Conv[i].kernel->y * Conv[i].kernel->z) * sizeof(float), cudaMemcpyHostToDevice);
}
for (int i = 0; i < nPoolLayer; i++){
cudaMalloc((void **)&devicePoolMark[i], Pool[i].Outmap * Pool[i].pool_mark->y * Pool[i].pool_mark->z * sizeof(char));
}
}
void CUDA_FC_MemAlloc(FCLayer *FC){
for (int i = 0; i < nFCLayer; i++){
if (i < nFCLayer - 1){
cudaMalloc((void**)&dev_FC_w[i], FC[i].Input_size * FC[i + 1].Input_size * sizeof(float));
cudaMalloc((void**)&dev_FC_grad[i], FC[i].Input_size * FC[i + 1].Input_size * sizeof(float));
cudaMalloc((void**)&dev_FC_m_prev[i], FC[i].Input_size * FC[i + 1].Input_size * sizeof(float));
cudaMalloc((void**)&dev_FC_v_prev[i], FC[i].Input_size * FC[i + 1].Input_size * sizeof(float));
cudaMemcpy(dev_FC_w[i], FC[i].Weight->mem1D, FC[i].Input_size * FC[i + 1].Input_size * sizeof(float), cudaMemcpyHostToDevice);
}
else{
cudaMalloc((void**)&dev_FC_w[i], FC[i].Input_size * FC[i].Output_size * sizeof(float));
cudaMalloc((void**)&dev_FC_grad[i], FC[i].Input_size * FC[i].Output_size* sizeof(float));
cudaMalloc((void**)&dev_FC_m_prev[i], FC[i].Input_size * FC[i].Output_size * sizeof(float));
cudaMalloc((void**)&dev_FC_v_prev[i], FC[i].Input_size * FC[i].Output_size * sizeof(float));
cudaMemcpy(dev_FC_w[i], FC[i].Weight->mem1D, FC[i].Input_size * FC[i].Output_size * sizeof(float), cudaMemcpyHostToDevice);
}
}
}
void CUDA_ioNode_MemAlloc(MemBlock32F *ioNode){
// 입력 노드
cudaMalloc((void**)&dev_Node[0], ioNode[0].total * sizeof(float));
for (int i = 1; i < nCnPLayer + nFCLayer + 1; i++){
cudaMalloc((void**)&dev_Node[i], ioNode[i].total * sizeof(float));
cudaMalloc((void**)&dev_Node_delta[i], ioNode[i].total * sizeof(float));
}
#if Dropout
for (int i = 1; i < nFCLayer; i++)
cudaMalloc((void**)&dev_drop[i - 1], (ioNode + nCnPLayer + i)->total * sizeof(int));
#endif
}
void CUDA_FC_Forward(FCLayer *FC, int &layer_idx){
int block = 0, thread = 0;
int offset = (FC - 1)->Input_size * (FC - 1)->Output_size;
bool softmax = false;
if (layer_idx == 0) offset = 0;
if (layer_idx == nFCLayer - 1) softmax = true;
if (FC->Output_size % MAX_THREAD == 0){
block = FC->Output_size / MAX_THREAD;
}
else{
block = (FC->Output_size / MAX_THREAD) + 1;
}
if (block > 1) thread = MAX_THREAD;
else thread = FC->Output_size;
//printf("forward layer\n");
//printf("offset : %d, FC->Input_size : %d\n", offset, FC->Input_size);
//printf("block : %d, thread : %d\n", block, thread);
// forward_layer << < 1, FC->Output_size >> > (dev_FC_w[layer_idx], offset, FC->Input_size, dev_Node[nCnPLayer + layer_idx], FC->Input_size, dev_Node[nCnPLayer + layer_idx + 1], softmax);
forward_layer << < block, thread >> > (dev_FC_w[layer_idx], offset, FC->Input_size, dev_Node[nCnPLayer + layer_idx], FC->Input_size, dev_Node[nCnPLayer + layer_idx + 1], softmax);
layer_idx++;
}
void CUDA_Conv_Forward(ConvLayer *Conv, int Padding, int &layer_idx, int &kernel_num){
//CUDA_Conv_Forward(&Conv[0], ConvPad_info[0], CnP_layer_idx, kernel_num);
CUDA_Conv2D << < Conv->Outmap, Conv->Output_data->y * Conv->Output_data->z >> > (dev_Node[layer_idx], dev_Conv_kernel[kernel_num], dev_Node[layer_idx + 1],
Conv->Inmap, Conv->Outmap, Conv->Input_data->y, Conv->Input_data->z, Conv->kernel->y, Padding);
// update에 사용.. update CUDA 적용 시 제거
cudaMemcpy(Conv->Output_data->mem1D, dev_Node[layer_idx], Conv->Output_data->total * sizeof(float), cudaMemcpyDeviceToHost);
reshape1Dto3D(Conv->Output_data);
layer_idx++; // 인덱스 증가
kernel_num++;
}
void CUDA_Pool_Forward(PoolLayer *Pool, int &layer_idx, int &poolMark_num){
CUDA_MaxPooling << < Pool->Inmap, Pool->Output_data->y * Pool->Output_data->z >> >(dev_Node[layer_idx], dev_Node[layer_idx + 1], devicePoolMark[poolMark_num]
, Pool->Inmap, Pool->Input_data->y, Pool->Input_data->z);
cudaMemcpy(Pool->Output_data->mem1D, dev_Node[layer_idx + 1], Pool->Output_data->total * sizeof(float), cudaMemcpyDeviceToHost);
reshape1Dto3D(Pool->Output_data); // update에 사용.. update CUDA 적용 시 제거
layer_idx++; // 인덱스 증가
poolMark_num++;
}
|
6d0c5cc7ae8c7d94bd5e058c87e9fa3f3eca2834.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <cassert>
#include <cstring>
#include <vector>
#include "paddle/fluid/inference/tensorrt/plugin/stack_op_plugin.h"
namespace paddle {
namespace inference {
namespace tensorrt {
namespace plugin {
#if IS_TRT_VERSION_GE(6000)
StackPluginDynamic::StackPluginDynamic(int axis, int num_stack, bool with_fp16)
: axis_(axis), num_stack_(num_stack) {
with_fp16_ = with_fp16;
}
StackPluginDynamic::StackPluginDynamic(void const* serial_data,
size_t serial_length) {
DeserializeValue(&serial_data, &serial_length, &axis_);
DeserializeValue(&serial_data, &serial_length, &num_stack_);
DeserializeValue(&serial_data, &serial_length, &with_fp16_);
}
StackPluginDynamic::~StackPluginDynamic() {}
nvinfer1::IPluginV2DynamicExt* StackPluginDynamic::clone() const TRT_NOEXCEPT {
return new StackPluginDynamic(axis_, num_stack_, with_fp16_);
}
const char* StackPluginDynamic::getPluginType() const TRT_NOEXCEPT {
return "stack_plugin";
}
int StackPluginDynamic::getNbOutputs() const TRT_NOEXCEPT { return 1; }
int StackPluginDynamic::initialize() TRT_NOEXCEPT { return 0; }
size_t StackPluginDynamic::getSerializationSize() const TRT_NOEXCEPT {
size_t serialize_size = 0;
serialize_size += SerializedSize(axis_);
serialize_size += SerializedSize(num_stack_);
serialize_size += SerializedSize(with_fp16_);
return serialize_size;
}
void StackPluginDynamic::serialize(void* buffer) const TRT_NOEXCEPT {
SerializeValue(&buffer, axis_);
SerializeValue(&buffer, num_stack_);
SerializeValue(&buffer, with_fp16_);
}
nvinfer1::DimsExprs StackPluginDynamic::getOutputDimensions(
int output_index, const nvinfer1::DimsExprs* inputs, int nb_inputs,
nvinfer1::IExprBuilder& expr_builder) TRT_NOEXCEPT {
nvinfer1::DimsExprs output(inputs[0]);
output.nbDims = inputs[0].nbDims + 1;
for (int i = inputs[0].nbDims; i > axis_; --i) {
output.d[i] = inputs[0].d[i - 1];
}
output.d[axis_] = expr_builder.constant(nb_inputs);
return output;
}
void StackPluginDynamic::configurePlugin(
const nvinfer1::DynamicPluginTensorDesc* in, int nbInputs,
const nvinfer1::DynamicPluginTensorDesc* out, int nbOutputs) TRT_NOEXCEPT {}
size_t StackPluginDynamic::getWorkspaceSize(
const nvinfer1::PluginTensorDesc* inputs, int nbInputs,
const nvinfer1::PluginTensorDesc* outputs,
int nbOutputs) const TRT_NOEXCEPT {
return num_stack_ * sizeof(uintptr_t);
}
void StackPluginDynamic::destroy() TRT_NOEXCEPT { delete this; }
void StackPluginDynamic::terminate() TRT_NOEXCEPT {}
bool StackPluginDynamic::supportsFormatCombination(
int pos, const nvinfer1::PluginTensorDesc* in_out, int nb_inputs,
int nb_outputs) TRT_NOEXCEPT {
PADDLE_ENFORCE_NOT_NULL(
in_out, platform::errors::InvalidArgument(
"The input of stack plugin should not be nullptr."));
PADDLE_ENFORCE_LT(
pos, nb_inputs + nb_outputs,
platform::errors::InvalidArgument("The pos(%d) should be less than the "
"num(%d) of the input and the output.",
pos, nb_inputs + nb_outputs));
const nvinfer1::PluginTensorDesc& in = in_out[pos];
if (pos == 0) {
if (with_fp16_) {
return (in.type == nvinfer1::DataType::kFLOAT ||
in.type == nvinfer1::DataType::kHALF) &&
(in.format == nvinfer1::TensorFormat::kLINEAR);
} else {
return (in.type == nvinfer1::DataType::kFLOAT) &&
(in.format == nvinfer1::TensorFormat::kLINEAR);
}
}
const nvinfer1::PluginTensorDesc& prev = in_out[pos - 1];
// output
return in.type == prev.type && in.format == prev.format;
}
nvinfer1::DataType StackPluginDynamic::getOutputDataType(
int index, const nvinfer1::DataType* input_types,
int nb_inputs) const TRT_NOEXCEPT {
PADDLE_ENFORCE_EQ(index, 0, platform::errors::InvalidArgument(
"The index should be equal to 0"));
return input_types[0];
}
template <typename T>
__global__ void StackKernel(const T* const* input, T* output, int num_stack,
int base_unit) {
int stack_id = blockIdx.x;
int lead_id = blockIdx.y;
for (int i = threadIdx.x; i < base_unit; i += blockDim.x) {
output[lead_id * num_stack * base_unit + stack_id * base_unit + i] =
input[stack_id][lead_id * base_unit + i];
}
}
int StackPluginDynamic::enqueue(const nvinfer1::PluginTensorDesc* input_desc,
const nvinfer1::PluginTensorDesc* output_desc,
const void* const* inputs, void* const* outputs,
void* workspace,
hipStream_t stream) TRT_NOEXCEPT {
auto input_dims = input_desc[0].dims; // (batch, seq, seq)
auto out_dims = output_desc[0].dims; // (batch, num_head, seq, seq)
auto out_num_dims = out_dims.nbDims;
int base_unit = 1;
for (int i = axis_ + 1; i < out_num_dims; ++i) {
PADDLE_ENFORCE_GT(out_dims.d[i], 0,
platform::errors::InvalidArgument(
"Input dimensions should be greater than 0"));
base_unit *= out_dims.d[i];
}
int lead_unit = 1;
for (int i = 0; i < axis_; ++i) {
PADDLE_ENFORCE_GT(out_dims.d[i], 0,
platform::errors::InvalidArgument(
"Input dimensions should be greater than 0"));
lead_unit *= out_dims.d[i];
}
PADDLE_ENFORCE_EQ(
out_dims.d[axis_], num_stack_,
platform::errors::InvalidArgument("number of stack axis should be same"));
hipMemcpyAsync(workspace, reinterpret_cast<const void* const>(inputs),
sizeof(void*) * out_dims.d[axis_], hipMemcpyHostToDevice,
stream);
const int num_stacks = out_dims.d[axis_];
dim3 num_blocks(num_stacks, lead_unit);
const int num_threads = 256;
auto infer_type = input_desc[0].type;
if (infer_type == nvinfer1::DataType::kFLOAT) {
VLOG(1) << "TRT Plugin DataType selected. Stack-->fp32";
float* output = static_cast<float*>(outputs[0]);
hipLaunchKernelGGL(( StackKernel<float>), dim3(num_blocks), dim3(num_threads), 0, stream,
reinterpret_cast<const float* const*>(workspace), output, num_stacks,
base_unit);
} else if (infer_type == nvinfer1::DataType::kHALF) {
VLOG(1) << "TRT Plugin DataType selected. Stack-->fp16";
__half* output = static_cast<__half*>(outputs[0]);
hipLaunchKernelGGL(( StackKernel<__half>), dim3(num_blocks), dim3(num_threads), 0, stream,
reinterpret_cast<const __half* const*>(workspace), output, num_stacks,
base_unit);
} else {
PADDLE_THROW(
platform::errors::Fatal("The Stack TRT Plugin's input type only "
"support float or half currently."));
}
return hipGetLastError() != hipSuccess;
}
StackPluginDynamicCreator::StackPluginDynamicCreator() {}
const char* StackPluginDynamicCreator::getPluginName() const TRT_NOEXCEPT {
return "stack_plugin";
}
const char* StackPluginDynamicCreator::getPluginVersion() const TRT_NOEXCEPT {
return "1";
}
const nvinfer1::PluginFieldCollection*
StackPluginDynamicCreator::getFieldNames() TRT_NOEXCEPT {
return &field_collection_;
}
nvinfer1::IPluginV2* StackPluginDynamicCreator::createPlugin(
const char* name, const nvinfer1::PluginFieldCollection* fc) TRT_NOEXCEPT {
int axis = -1;
int num_stack = -1;
bool with_fp16 = false;
for (int i = 0; i < fc->nbFields; ++i) {
const std::string name(fc->fields[i].name);
if (name == "axis") {
axis = static_cast<const int*>(fc->fields[i].data)[0];
} else if (name == "num_stack") {
num_stack = static_cast<const int*>(fc->fields[i].data)[0];
} else if (name == "with_fp16") {
with_fp16 = static_cast<const bool*>(fc->fields[i].data)[0];
} else {
PADDLE_THROW(platform::errors::Fatal("Meet an unknown plugin field '" +
name +
"' when creating stack op plugin."));
}
}
return new StackPluginDynamic(axis, num_stack, with_fp16);
}
nvinfer1::IPluginV2* StackPluginDynamicCreator::deserializePlugin(
const char* name, const void* serial_data,
size_t serial_length) TRT_NOEXCEPT {
auto plugin = new StackPluginDynamic(serial_data, serial_length);
return plugin;
}
void StackPluginDynamicCreator::setPluginNamespace(const char* lib_namespace)
TRT_NOEXCEPT {
plugin_namespace_ = lib_namespace;
}
const char* StackPluginDynamicCreator::getPluginNamespace() const TRT_NOEXCEPT {
return plugin_namespace_.c_str();
}
#endif
} // namespace plugin
} // namespace tensorrt
} // namespace inference
} // namespace paddle
| 6d0c5cc7ae8c7d94bd5e058c87e9fa3f3eca2834.cu | // Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <cassert>
#include <cstring>
#include <vector>
#include "paddle/fluid/inference/tensorrt/plugin/stack_op_plugin.h"
namespace paddle {
namespace inference {
namespace tensorrt {
namespace plugin {
#if IS_TRT_VERSION_GE(6000)
StackPluginDynamic::StackPluginDynamic(int axis, int num_stack, bool with_fp16)
: axis_(axis), num_stack_(num_stack) {
with_fp16_ = with_fp16;
}
StackPluginDynamic::StackPluginDynamic(void const* serial_data,
size_t serial_length) {
DeserializeValue(&serial_data, &serial_length, &axis_);
DeserializeValue(&serial_data, &serial_length, &num_stack_);
DeserializeValue(&serial_data, &serial_length, &with_fp16_);
}
StackPluginDynamic::~StackPluginDynamic() {}
nvinfer1::IPluginV2DynamicExt* StackPluginDynamic::clone() const TRT_NOEXCEPT {
return new StackPluginDynamic(axis_, num_stack_, with_fp16_);
}
const char* StackPluginDynamic::getPluginType() const TRT_NOEXCEPT {
return "stack_plugin";
}
int StackPluginDynamic::getNbOutputs() const TRT_NOEXCEPT { return 1; }
int StackPluginDynamic::initialize() TRT_NOEXCEPT { return 0; }
size_t StackPluginDynamic::getSerializationSize() const TRT_NOEXCEPT {
size_t serialize_size = 0;
serialize_size += SerializedSize(axis_);
serialize_size += SerializedSize(num_stack_);
serialize_size += SerializedSize(with_fp16_);
return serialize_size;
}
void StackPluginDynamic::serialize(void* buffer) const TRT_NOEXCEPT {
SerializeValue(&buffer, axis_);
SerializeValue(&buffer, num_stack_);
SerializeValue(&buffer, with_fp16_);
}
nvinfer1::DimsExprs StackPluginDynamic::getOutputDimensions(
int output_index, const nvinfer1::DimsExprs* inputs, int nb_inputs,
nvinfer1::IExprBuilder& expr_builder) TRT_NOEXCEPT {
nvinfer1::DimsExprs output(inputs[0]);
output.nbDims = inputs[0].nbDims + 1;
for (int i = inputs[0].nbDims; i > axis_; --i) {
output.d[i] = inputs[0].d[i - 1];
}
output.d[axis_] = expr_builder.constant(nb_inputs);
return output;
}
void StackPluginDynamic::configurePlugin(
const nvinfer1::DynamicPluginTensorDesc* in, int nbInputs,
const nvinfer1::DynamicPluginTensorDesc* out, int nbOutputs) TRT_NOEXCEPT {}
size_t StackPluginDynamic::getWorkspaceSize(
const nvinfer1::PluginTensorDesc* inputs, int nbInputs,
const nvinfer1::PluginTensorDesc* outputs,
int nbOutputs) const TRT_NOEXCEPT {
return num_stack_ * sizeof(uintptr_t);
}
void StackPluginDynamic::destroy() TRT_NOEXCEPT { delete this; }
void StackPluginDynamic::terminate() TRT_NOEXCEPT {}
bool StackPluginDynamic::supportsFormatCombination(
int pos, const nvinfer1::PluginTensorDesc* in_out, int nb_inputs,
int nb_outputs) TRT_NOEXCEPT {
PADDLE_ENFORCE_NOT_NULL(
in_out, platform::errors::InvalidArgument(
"The input of stack plugin should not be nullptr."));
PADDLE_ENFORCE_LT(
pos, nb_inputs + nb_outputs,
platform::errors::InvalidArgument("The pos(%d) should be less than the "
"num(%d) of the input and the output.",
pos, nb_inputs + nb_outputs));
const nvinfer1::PluginTensorDesc& in = in_out[pos];
if (pos == 0) {
if (with_fp16_) {
return (in.type == nvinfer1::DataType::kFLOAT ||
in.type == nvinfer1::DataType::kHALF) &&
(in.format == nvinfer1::TensorFormat::kLINEAR);
} else {
return (in.type == nvinfer1::DataType::kFLOAT) &&
(in.format == nvinfer1::TensorFormat::kLINEAR);
}
}
const nvinfer1::PluginTensorDesc& prev = in_out[pos - 1];
// output
return in.type == prev.type && in.format == prev.format;
}
nvinfer1::DataType StackPluginDynamic::getOutputDataType(
int index, const nvinfer1::DataType* input_types,
int nb_inputs) const TRT_NOEXCEPT {
PADDLE_ENFORCE_EQ(index, 0, platform::errors::InvalidArgument(
"The index should be equal to 0"));
return input_types[0];
}
template <typename T>
__global__ void StackKernel(const T* const* input, T* output, int num_stack,
int base_unit) {
int stack_id = blockIdx.x;
int lead_id = blockIdx.y;
for (int i = threadIdx.x; i < base_unit; i += blockDim.x) {
output[lead_id * num_stack * base_unit + stack_id * base_unit + i] =
input[stack_id][lead_id * base_unit + i];
}
}
int StackPluginDynamic::enqueue(const nvinfer1::PluginTensorDesc* input_desc,
const nvinfer1::PluginTensorDesc* output_desc,
const void* const* inputs, void* const* outputs,
void* workspace,
cudaStream_t stream) TRT_NOEXCEPT {
auto input_dims = input_desc[0].dims; // (batch, seq, seq)
auto out_dims = output_desc[0].dims; // (batch, num_head, seq, seq)
auto out_num_dims = out_dims.nbDims;
int base_unit = 1;
for (int i = axis_ + 1; i < out_num_dims; ++i) {
PADDLE_ENFORCE_GT(out_dims.d[i], 0,
platform::errors::InvalidArgument(
"Input dimensions should be greater than 0"));
base_unit *= out_dims.d[i];
}
int lead_unit = 1;
for (int i = 0; i < axis_; ++i) {
PADDLE_ENFORCE_GT(out_dims.d[i], 0,
platform::errors::InvalidArgument(
"Input dimensions should be greater than 0"));
lead_unit *= out_dims.d[i];
}
PADDLE_ENFORCE_EQ(
out_dims.d[axis_], num_stack_,
platform::errors::InvalidArgument("number of stack axis should be same"));
cudaMemcpyAsync(workspace, reinterpret_cast<const void* const>(inputs),
sizeof(void*) * out_dims.d[axis_], cudaMemcpyHostToDevice,
stream);
const int num_stacks = out_dims.d[axis_];
dim3 num_blocks(num_stacks, lead_unit);
const int num_threads = 256;
auto infer_type = input_desc[0].type;
if (infer_type == nvinfer1::DataType::kFLOAT) {
VLOG(1) << "TRT Plugin DataType selected. Stack-->fp32";
float* output = static_cast<float*>(outputs[0]);
StackKernel<float><<<num_blocks, num_threads, 0, stream>>>(
reinterpret_cast<const float* const*>(workspace), output, num_stacks,
base_unit);
} else if (infer_type == nvinfer1::DataType::kHALF) {
VLOG(1) << "TRT Plugin DataType selected. Stack-->fp16";
__half* output = static_cast<__half*>(outputs[0]);
StackKernel<__half><<<num_blocks, num_threads, 0, stream>>>(
reinterpret_cast<const __half* const*>(workspace), output, num_stacks,
base_unit);
} else {
PADDLE_THROW(
platform::errors::Fatal("The Stack TRT Plugin's input type only "
"support float or half currently."));
}
return cudaGetLastError() != cudaSuccess;
}
StackPluginDynamicCreator::StackPluginDynamicCreator() {}
const char* StackPluginDynamicCreator::getPluginName() const TRT_NOEXCEPT {
return "stack_plugin";
}
const char* StackPluginDynamicCreator::getPluginVersion() const TRT_NOEXCEPT {
return "1";
}
const nvinfer1::PluginFieldCollection*
StackPluginDynamicCreator::getFieldNames() TRT_NOEXCEPT {
return &field_collection_;
}
nvinfer1::IPluginV2* StackPluginDynamicCreator::createPlugin(
const char* name, const nvinfer1::PluginFieldCollection* fc) TRT_NOEXCEPT {
int axis = -1;
int num_stack = -1;
bool with_fp16 = false;
for (int i = 0; i < fc->nbFields; ++i) {
const std::string name(fc->fields[i].name);
if (name == "axis") {
axis = static_cast<const int*>(fc->fields[i].data)[0];
} else if (name == "num_stack") {
num_stack = static_cast<const int*>(fc->fields[i].data)[0];
} else if (name == "with_fp16") {
with_fp16 = static_cast<const bool*>(fc->fields[i].data)[0];
} else {
PADDLE_THROW(platform::errors::Fatal("Meet an unknown plugin field '" +
name +
"' when creating stack op plugin."));
}
}
return new StackPluginDynamic(axis, num_stack, with_fp16);
}
nvinfer1::IPluginV2* StackPluginDynamicCreator::deserializePlugin(
const char* name, const void* serial_data,
size_t serial_length) TRT_NOEXCEPT {
auto plugin = new StackPluginDynamic(serial_data, serial_length);
return plugin;
}
void StackPluginDynamicCreator::setPluginNamespace(const char* lib_namespace)
TRT_NOEXCEPT {
plugin_namespace_ = lib_namespace;
}
const char* StackPluginDynamicCreator::getPluginNamespace() const TRT_NOEXCEPT {
return plugin_namespace_.c_str();
}
#endif
} // namespace plugin
} // namespace tensorrt
} // namespace inference
} // namespace paddle
|
acbe7998d8568fb5c7d833310d3e699ecb45524f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
GNU Public Licence Copyright (c) Colin Torney
Comments and questions to [email protected]
This code is provided freely, however when using this code you are asked to cite our related paper:
Berdahl, A., Torney, C.J., Ioannou, C.C., Faria, J. & Couzin, I.D. (2013) Emergent sensing of complex environments by mobile animal groups, Science
*/
#include <cutil_inline.h>
#include <hiprand/hiprand_kernel.h>
#include <cstdlib>
#include <cstdio>
#include <string.h>
//#include <particleSystem.cuh>
#include <noise.cuh>
#include <mt19937_ref.cu>
extern "C"
{
__device__ float genSpectrum(float k0, float ksq)
{
// float k=ksq/powf(k0,2);
// float k=ksq/powf(k0,2);
return (ksq/(powf(k0,4)))*exp(-1.0*ksq*powf((1.0/k0),2));
//return k*powf(1+k, NOISE_TAIL);
}
__global__ void gaussianRandsN(int gridSize, float* d_rands, int* d_seeds)
{
int indx = __mul24(blockIdx.x,blockDim.x) + threadIdx.x;
int indy = __mul24(blockIdx.y,blockDim.y) + threadIdx.y;
int indx1=(2*gridSize*indy)+(2*indx);
int indx2=(2*gridSize*indy)+(2*indx)+1;
hiprandState_t state;
hiprand_init(d_seeds[blockIdx.x], indx1, 0, &state);
d_rands[indx1] = hiprand_normal(&state);
hiprand_init(d_seeds[blockIdx.y], indx2, 0, &state);
d_rands[indx2] = hiprand_normal(&state);
}
__global__ void createRandArrayD(int gridSize, float* d_rands, float* d_gaussian)
{
int i = __mul24(blockIdx.x,blockDim.x) + threadIdx.x;
int j = __mul24(blockIdx.y,blockDim.y) + threadIdx.y;
int ind1=(2*gridSize*j)+(2*i);
int ind2=(2*gridSize*j)+(2*i)+1;
int gridSize2 = (int)(0.5*gridSize);
int ki,kj;
int c_i,c_j,c_ki,c_kj;
double real,imag;
double stddev=sqrt(0.5);
if (j<=gridSize2)
{
if (i<=gridSize2)
{
if ((i==0)||(j==0))
{
d_rands[ind1]=0.0;
d_rands[ind2]=0.0;
}
else
{
ki=(i-gridSize2);
kj=(j-gridSize2);
c_ki=-1*ki;
c_kj=-1*kj;
c_i=c_ki+gridSize2;
c_j=c_kj+gridSize2;
if ((ki==c_ki)&&(kj==c_kj))
{
d_rands[ind1]=d_gaussian[ind1];
d_rands[ind2]=0.0;
}
else
{
real = stddev*d_gaussian[ind1];
imag = stddev*d_gaussian[ind2];
d_rands[ind1]=real;
d_rands[ind2]=imag;
d_rands[(c_j*gridSize*2)+(c_i*2)]=real;
d_rands[(c_j*gridSize*2)+(c_i*2)+1]=-imag;
}
}
}
}
else
{
if (i<gridSize2)
{
if (i==0)
{
d_rands[ind1]=0.0;
d_rands[ind2]=0.0;
}
else
{
ki=(i-gridSize2);
kj=(j-gridSize2);
c_ki=-1*ki;
c_kj=-1*kj;
c_i=c_ki+gridSize2;
c_j=c_kj+gridSize2;
real = stddev*d_gaussian[ind1];
imag = stddev*d_gaussian[ind2];
d_rands[ind1]=real;
d_rands[ind2]=imag;
d_rands[(c_j*gridSize*2)+(c_i*2)]=real;
d_rands[(c_j*gridSize*2)+(c_i*2)+1]=-imag;
}
}
}
return;
}
__global__ void initializeArrayD(int gridSize, float* d_fftArray, float* d_rands, float k0, float scaleFact)
{
int i = __mul24(blockIdx.x,blockDim.x) + threadIdx.x;
int j = __mul24(blockIdx.y,blockDim.y) + threadIdx.y;
int ind1=(2*gridSize*j)+(2*i);
int ind2=(2*gridSize*j)+(2*i)+1;
float ki, kj, ksq, lambda_k, mag_ij;
if ((i==0)||(j==0))
{
d_fftArray[ind1]=0.0;
d_fftArray[ind2]=0.0;
}
else
{
ki=2*M_PI*(i-(gridSize*0.5));
kj=2*M_PI*(j-(gridSize*0.5));
ksq=((ki*ki)+(kj*kj));
lambda_k=scaleFact*genSpectrum(k0,ksq);
if (ksq==0.0)
mag_ij=0.0;
else
mag_ij=sqrt(lambda_k/(2.0*ksq));
d_fftArray[ind1]=mag_ij*d_rands[ind1];
d_fftArray[ind2]=mag_ij*d_rands[ind2];
}
}
__global__ void advanceArrayD(int gridSize, float* d_fftArray, float* d_rands, float k0, float scaleFact, float deltaT, float revertRate)
{
int i = __mul24(blockIdx.x,blockDim.x) + threadIdx.x;
int j = __mul24(blockIdx.y,blockDim.y) + threadIdx.y;
int ind1=(2*gridSize*j)+(2*i);
int ind2=(2*gridSize*j)+(2*i)+1;
double lambda_k, ki, kj, ksq;
double alpha_ij, real_ij, imag_ij, mag_ij;
if ((i==0)||(j==0))
{
d_fftArray[ind1]=0.0;
d_fftArray[ind2]=0.0;
}
else
{
ki=2*M_PI*(i-(gridSize*0.5));
kj=2*M_PI*(j-(gridSize*0.5));
ksq=((ki*ki)+(kj*kj));
lambda_k=scaleFact*revertRate*genSpectrum(k0,ksq);
alpha_ij=revertRate*ksq;
if (alpha_ij==0.0)
mag_ij=0.0;
else
mag_ij=sqrt((lambda_k/(2.0*alpha_ij))*(1-exp(-2.0*alpha_ij*deltaT)));
real_ij=mag_ij*d_rands[ind1];
imag_ij=mag_ij*d_rands[ind2];
d_fftArray[ind1]*=exp(-1.0*alpha_ij*deltaT);
d_fftArray[ind1]+=real_ij;
d_fftArray[ind2]*=exp(-1.0*alpha_ij*deltaT);
d_fftArray[ind2]+=imag_ij;
}
}
__global__ void convertComplex2RealD(int gridSize, float* input, float* output)
{
int i = __mul24(blockIdx.x,blockDim.x) + threadIdx.x;
int j = __mul24(blockIdx.y,blockDim.y) + threadIdx.y;
output[__mul24(j,gridSize)+i]=((((i+j)%2)==0)?1.0:-1.0)*input[__mul24(__mul24(2,gridSize),j)+__mul24(2,i)];
}
__global__ void mergeVector(int gridSize, float* xInput, float* yInput, float* output)
{
int i = __mul24(blockIdx.x,blockDim.x) + threadIdx.x;
int j = __mul24(blockIdx.y,blockDim.y) + threadIdx.y;
int indx1 = __mul24(j,gridSize)+i;
int indx2 = __mul24(__mul24(2,gridSize),j)+__mul24(2,i);
output[indx2]=xInput[indx1];
output[indx2+1]=yInput[indx1];
}
__global__ void convertGradD(int gridSize, float* input, float* xOutput, float* yOutput)
{
int i = __mul24(blockIdx.x,blockDim.x) + threadIdx.x;
int j = __mul24(blockIdx.y,blockDim.y) + threadIdx.y;
int ind1=(2*gridSize*j)+(2*i);
int ind2=(2*gridSize*j)+(2*i)+1;
float ki=2.0*M_PI*((float)i-((float)gridSize*0.5));
float kj=2.0*M_PI*((float)j-((float)gridSize*0.5));
xOutput[ind1]=-ki*input[ind2];
xOutput[ind2]=ki*input[ind1];
yOutput[ind1]=-kj*input[ind2];
yOutput[ind2]=kj*input[ind1];
}
}
| acbe7998d8568fb5c7d833310d3e699ecb45524f.cu | /*
GNU Public Licence Copyright (c) Colin Torney
Comments and questions to [email protected]
This code is provided freely, however when using this code you are asked to cite our related paper:
Berdahl, A., Torney, C.J., Ioannou, C.C., Faria, J. & Couzin, I.D. (2013) Emergent sensing of complex environments by mobile animal groups, Science
*/
#include <cutil_inline.h>
#include <curand_kernel.h>
#include <cstdlib>
#include <cstdio>
#include <string.h>
//#include <particleSystem.cuh>
#include <noise.cuh>
#include <mt19937_ref.cu>
extern "C"
{
__device__ float genSpectrum(float k0, float ksq)
{
// float k=ksq/powf(k0,2);
// float k=ksq/powf(k0,2);
return (ksq/(powf(k0,4)))*exp(-1.0*ksq*powf((1.0/k0),2));
//return k*powf(1+k, NOISE_TAIL);
}
__global__ void gaussianRandsN(int gridSize, float* d_rands, int* d_seeds)
{
int indx = __mul24(blockIdx.x,blockDim.x) + threadIdx.x;
int indy = __mul24(blockIdx.y,blockDim.y) + threadIdx.y;
int indx1=(2*gridSize*indy)+(2*indx);
int indx2=(2*gridSize*indy)+(2*indx)+1;
curandState state;
curand_init(d_seeds[blockIdx.x], indx1, 0, &state);
d_rands[indx1] = curand_normal(&state);
curand_init(d_seeds[blockIdx.y], indx2, 0, &state);
d_rands[indx2] = curand_normal(&state);
}
__global__ void createRandArrayD(int gridSize, float* d_rands, float* d_gaussian)
{
int i = __mul24(blockIdx.x,blockDim.x) + threadIdx.x;
int j = __mul24(blockIdx.y,blockDim.y) + threadIdx.y;
int ind1=(2*gridSize*j)+(2*i);
int ind2=(2*gridSize*j)+(2*i)+1;
int gridSize2 = (int)(0.5*gridSize);
int ki,kj;
int c_i,c_j,c_ki,c_kj;
double real,imag;
double stddev=sqrt(0.5);
if (j<=gridSize2)
{
if (i<=gridSize2)
{
if ((i==0)||(j==0))
{
d_rands[ind1]=0.0;
d_rands[ind2]=0.0;
}
else
{
ki=(i-gridSize2);
kj=(j-gridSize2);
c_ki=-1*ki;
c_kj=-1*kj;
c_i=c_ki+gridSize2;
c_j=c_kj+gridSize2;
if ((ki==c_ki)&&(kj==c_kj))
{
d_rands[ind1]=d_gaussian[ind1];
d_rands[ind2]=0.0;
}
else
{
real = stddev*d_gaussian[ind1];
imag = stddev*d_gaussian[ind2];
d_rands[ind1]=real;
d_rands[ind2]=imag;
d_rands[(c_j*gridSize*2)+(c_i*2)]=real;
d_rands[(c_j*gridSize*2)+(c_i*2)+1]=-imag;
}
}
}
}
else
{
if (i<gridSize2)
{
if (i==0)
{
d_rands[ind1]=0.0;
d_rands[ind2]=0.0;
}
else
{
ki=(i-gridSize2);
kj=(j-gridSize2);
c_ki=-1*ki;
c_kj=-1*kj;
c_i=c_ki+gridSize2;
c_j=c_kj+gridSize2;
real = stddev*d_gaussian[ind1];
imag = stddev*d_gaussian[ind2];
d_rands[ind1]=real;
d_rands[ind2]=imag;
d_rands[(c_j*gridSize*2)+(c_i*2)]=real;
d_rands[(c_j*gridSize*2)+(c_i*2)+1]=-imag;
}
}
}
return;
}
__global__ void initializeArrayD(int gridSize, float* d_fftArray, float* d_rands, float k0, float scaleFact)
{
int i = __mul24(blockIdx.x,blockDim.x) + threadIdx.x;
int j = __mul24(blockIdx.y,blockDim.y) + threadIdx.y;
int ind1=(2*gridSize*j)+(2*i);
int ind2=(2*gridSize*j)+(2*i)+1;
float ki, kj, ksq, lambda_k, mag_ij;
if ((i==0)||(j==0))
{
d_fftArray[ind1]=0.0;
d_fftArray[ind2]=0.0;
}
else
{
ki=2*M_PI*(i-(gridSize*0.5));
kj=2*M_PI*(j-(gridSize*0.5));
ksq=((ki*ki)+(kj*kj));
lambda_k=scaleFact*genSpectrum(k0,ksq);
if (ksq==0.0)
mag_ij=0.0;
else
mag_ij=sqrt(lambda_k/(2.0*ksq));
d_fftArray[ind1]=mag_ij*d_rands[ind1];
d_fftArray[ind2]=mag_ij*d_rands[ind2];
}
}
__global__ void advanceArrayD(int gridSize, float* d_fftArray, float* d_rands, float k0, float scaleFact, float deltaT, float revertRate)
{
int i = __mul24(blockIdx.x,blockDim.x) + threadIdx.x;
int j = __mul24(blockIdx.y,blockDim.y) + threadIdx.y;
int ind1=(2*gridSize*j)+(2*i);
int ind2=(2*gridSize*j)+(2*i)+1;
double lambda_k, ki, kj, ksq;
double alpha_ij, real_ij, imag_ij, mag_ij;
if ((i==0)||(j==0))
{
d_fftArray[ind1]=0.0;
d_fftArray[ind2]=0.0;
}
else
{
ki=2*M_PI*(i-(gridSize*0.5));
kj=2*M_PI*(j-(gridSize*0.5));
ksq=((ki*ki)+(kj*kj));
lambda_k=scaleFact*revertRate*genSpectrum(k0,ksq);
alpha_ij=revertRate*ksq;
if (alpha_ij==0.0)
mag_ij=0.0;
else
mag_ij=sqrt((lambda_k/(2.0*alpha_ij))*(1-exp(-2.0*alpha_ij*deltaT)));
real_ij=mag_ij*d_rands[ind1];
imag_ij=mag_ij*d_rands[ind2];
d_fftArray[ind1]*=exp(-1.0*alpha_ij*deltaT);
d_fftArray[ind1]+=real_ij;
d_fftArray[ind2]*=exp(-1.0*alpha_ij*deltaT);
d_fftArray[ind2]+=imag_ij;
}
}
__global__ void convertComplex2RealD(int gridSize, float* input, float* output)
{
int i = __mul24(blockIdx.x,blockDim.x) + threadIdx.x;
int j = __mul24(blockIdx.y,blockDim.y) + threadIdx.y;
output[__mul24(j,gridSize)+i]=((((i+j)%2)==0)?1.0:-1.0)*input[__mul24(__mul24(2,gridSize),j)+__mul24(2,i)];
}
__global__ void mergeVector(int gridSize, float* xInput, float* yInput, float* output)
{
int i = __mul24(blockIdx.x,blockDim.x) + threadIdx.x;
int j = __mul24(blockIdx.y,blockDim.y) + threadIdx.y;
int indx1 = __mul24(j,gridSize)+i;
int indx2 = __mul24(__mul24(2,gridSize),j)+__mul24(2,i);
output[indx2]=xInput[indx1];
output[indx2+1]=yInput[indx1];
}
__global__ void convertGradD(int gridSize, float* input, float* xOutput, float* yOutput)
{
int i = __mul24(blockIdx.x,blockDim.x) + threadIdx.x;
int j = __mul24(blockIdx.y,blockDim.y) + threadIdx.y;
int ind1=(2*gridSize*j)+(2*i);
int ind2=(2*gridSize*j)+(2*i)+1;
float ki=2.0*M_PI*((float)i-((float)gridSize*0.5));
float kj=2.0*M_PI*((float)j-((float)gridSize*0.5));
xOutput[ind1]=-ki*input[ind2];
xOutput[ind2]=ki*input[ind1];
yOutput[ind1]=-kj*input[ind2];
yOutput[ind2]=kj*input[ind1];
}
}
|
43b1fe56c265e381c7144d186aa134874bfde6d7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <math.h>
#include <string.h>
#include <hipfft.h>
#include <rocblas.h>
#include <cusolverDn.h>
#ifdef _WIN32
#include <direct.h>
#define mkdir _mkdir
#define rmdir _rmdir
#elif defined __linux__
#include <unistd.h>
#endif
#define devij int i = blockIdx.x, j = threadIdx.x
#define devij2 int i0 = blockIdx.x, i = blockIdx.x+blockDim.x*blockIdx.y, j = threadIdx.x;
const float pi = 3.1415927;
__constant__ float d_pi = 3.1415927;
hipblasHandle_t cublas_handle;
hipsolverDnHandle_t solver_handle;
hipfftHandle cufft_handle;
namespace dat{
int nx;
int nx2;
int nz;
int nt;
float dt;
float Lx;
float Lz;
float **X;
float **Z;
dim3 nxb;
dim3 nxb2;
dim3 nzt;
int sfe;
int nsfe;
int order;
int wave_propagation_sh;
int wave_propagation_psv;
int simulation_mode;
int absorb_left;
int absorb_right;
int absorb_top;
int absorb_bottom;
float absorb_width;
int *isrc;
int nsrc;
int nrec;
int ntask;
int obs_type;
int obs_su;
int misfit_type;
int parametrisation;
const char *parfile;
char *obs_su_path;
char *model_init;
char *model_true;
char *output_path;
int *src_type; // host (ricker = 1)
float *src_angle; // host
float *src_f0; // host
float *src_t0; // host
float *src_factor; // host
float *src_x;
float *src_z;
float *rec_x;
float *rec_z;
int *src_x_id;
int *src_z_id;
int *rec_x_id;
int *rec_z_id;
float **stf_x;
float **stf_y;
float **stf_z;
float **adstf_x;
float **adstf_y;
float **adstf_z;
float **lambda;
float **mu;
float **rho;
float **absbound;
float **ux;
float **uy;
float **uz;
float **vx;
float **vy;
float **vz;
float **sxx;
float **sxy;
float **sxz;
float **szy;
float **szz;
float **dsx;
float **dsy;
float **dsz;
float **dvxdx;
float **dvxdz;
float **dvydx;
float **dvydz;
float **dvzdx;
float **dvzdz;
float **dvxdx_fw;
float **dvxdz_fw;
float **dvydx_fw;
float **dvydz_fw;
float **dvzdx_fw;
float **dvzdz_fw;
float **K_lambda;
float **K_mu;
float **K_rho;
float **out_x;
float **out_y;
float **out_z;
float ***u_obs_x;
float ***u_obs_y;
float ***u_obs_z;
float ***ux_forward; // host
float ***uy_forward; // host
float ***uz_forward; // host
float ***vx_forward; // host
float ***vy_forward; // host
float ***vz_forward; // host
int filter_kernel;
float misfit_ref;
float **gsum;
float **gtemp;
float *tw;
int optimize;
int inv_parameter;
int inv_iteration;
int ls_stepcountmax;
int ls_count;
float ls_thresh;
float ls_steplenmax;
float ls_stepleninit;
float *func_vals; // host
float *step_lens; // host
float *ls_gtp; // host
float *ls_gtg; // host
int inv_count;
int inv_maxiter;
int lbfgs_mem;
float ***lbfgs_S; // host
float ***lbfgs_Y; // host
int lbfgs_used;
FILE *log_ls;
FILE *log_misfit;
int neval;
}
namespace mat{
__global__ void _setValue(float *mat, const float init){
int i = blockIdx.x;
mat[i] = init;
}
__global__ void _setValue(double *mat, const double init){
int i = blockIdx.x;
mat[i] = init;
}
__global__ void _setValue(float **mat, const float init){
devij;
mat[i][j] = init;
}
__global__ void _setValue(float ***mat, const float init, const int p){
devij;
mat[p][i][j] = init;
}
__global__ void _setPointerValue(float **mat, float *data, const int n){
int i = blockIdx.x;
mat[i] = data + n * i;
}
__global__ void _setPointerValue(float ***mat, float **data, const int i){
mat[i] = data;
}
__global__ void _setIndexValue(float *a, float *b, int index){
a[0] = b[index];
}
__global__ void _copy(float *mat, float *init){
int i = blockIdx.x;
mat[i] = init[i];
}
__global__ void _copy(float **mat, float **init){
devij;
mat[i][j] = init[i][j];
}
__global__ void _copy(float **mat, float **init, float k){
devij;
mat[i][j] = init[i][j] * k;
}
__global__ void _calc(float **c, float ka, float **a, float kb, float **b){
devij;
c[i][j] = ka * a[i][j] + kb * b[i][j];
}
__global__ void _calc(float *c, float ka, float *a, float kb, float *b){
int i = blockIdx.x;
c[i] = ka * a[i] + kb * b[i];
}
__global__ void _calc(float *c, float *a, float *b){
int i = blockIdx.x;
c[i] = a[i] * b[i];
}
float *init(float *mat, const float init, const int m){
hipLaunchKernelGGL(( mat::_setValue), dim3(m), dim3(1), 0, 0, mat, init);
return mat;
}
double *init(double *mat, const double init, const int m){
hipLaunchKernelGGL(( mat::_setValue), dim3(m), dim3(1), 0, 0, mat, init);
return mat;
}
float **init(float **mat, const float init, const int m, const int n){
hipLaunchKernelGGL(( mat::_setValue), dim3(m), dim3(n), 0, 0, mat, init);
return mat;
}
float ***init(float ***mat, const float init, const int p, const int m, const int n){
for(int i = 0; i < p; i++){
hipLaunchKernelGGL(( mat::_setValue), dim3(m), dim3(n), 0, 0, mat, init, i);
}
return mat;
}
float *initHost(float *mat, const float init, const int m){
for(int i = 0; i < m; i++){
mat[i] = init;
}
return mat;
}
float **initHost(float **mat, const float init, const int m, const int n){
for(int i = 0; i < m; i++){
for(int j = 0; j < n; j++){
mat[i][j] = init;
}
}
return mat;
}
float ***initHost(float ***mat, const float init, const int p, const int m, const int n){
for(int k = 0; k < p; k++){
for(int i = 0; i < m; i++){
for(int j = 0; j < n; j++){
mat[k][i][j] = init;
}
}
}
return mat;
}
float *create(const int m) {
float *data;
hipMalloc((void **)&data, m * sizeof(float));
return data;
}
float **create(const int m, const int n){
float *data = mat::create(m * n);
float **mat;
hipMalloc((void **)&mat, m * sizeof(float *));
hipLaunchKernelGGL(( mat::_setPointerValue), dim3(m), dim3(1), 0, 0, mat, data, n);
return mat;
}
float ***create(const int p, const int m, const int n){
float ***mat;
hipMalloc((void **)&mat, p * sizeof(float **));
for(int i = 0; i < p; i++){
hipLaunchKernelGGL(( mat::_setPointerValue), dim3(1),dim3(1), 0, 0, mat, mat::create(m, n), i);
}
return mat;
}
float *createHost(const int m) {
return (float *)malloc(m * sizeof(float));
}
float **createHost(const int m, const int n){
float *data = mat::createHost(m * n);
float **mat = (float **)malloc(m * sizeof(float *));
for(int i =0; i < m; i++){
mat[i] = data + n * i;
}
return mat;
}
float ***createHost(const int p, const int m, const int n){
float ***mat = (float ***)malloc(p * sizeof(float **));
for(int i = 0; i < p; i++){
mat[i] = mat::createHost(m, n);
}
return mat;
}
int *createInt(const int m){
int *a;
hipMalloc((void**)&a, m * sizeof(int));
return a;
}
int *createIntHost(const int m) {
return (int *)malloc(m * sizeof(int));
}
short int *createShortInt(const int m){
short int *a;
hipMalloc((void**)&a, m * sizeof(short int));
return a;
}
short int *createShortIntHost(const int m){
return (short int *)malloc(m * sizeof(short int));
}
double *createDouble(const int m){
double *a;
hipMalloc((void**)&a, m * sizeof(double));
return a;
}
double *createDoubleHost(const int m) {
return (double *)malloc(m * sizeof(double));
}
float *getDataPointer(float **mat){
float **p=(float **)malloc(sizeof(float *));
hipMemcpy(p, mat , sizeof(float *), hipMemcpyDeviceToHost);
return *p;
}
void copy(float *mat, float *init, const int m){
hipLaunchKernelGGL(( mat::_copy), dim3(m), dim3(1), 0, 0, mat, init);
}
void copy(float **mat, float **init, const int m, const int n){
hipLaunchKernelGGL(( mat::_copy), dim3(m), dim3(n), 0, 0, mat, init);
}
void copy(float **mat, float **init, float k, const int m, const int n){
hipLaunchKernelGGL(( mat::_copy), dim3(m), dim3(n), 0, 0, mat, init, k);
}
void copyHostToDevice(float *d_a, const float *a, const int m){
hipMemcpy(d_a, a , m * sizeof(float), hipMemcpyHostToDevice);
}
void copyHostToDevice(float **pd_a, float *pa, const int m, const int n){
float **phd_a=(float **)malloc(sizeof(float *));
hipMemcpy(phd_a, pd_a , sizeof(float *), hipMemcpyDeviceToHost);
hipMemcpy(*phd_a, pa , m * n * sizeof(float), hipMemcpyHostToDevice);
}
void copyHostToDevice(float **pd_a, float **pa, const int m, const int n){
float **phd_a=(float **)malloc(sizeof(float *));
hipMemcpy(phd_a, pd_a , sizeof(float *), hipMemcpyDeviceToHost);
hipMemcpy(*phd_a, *pa , m * n * sizeof(float), hipMemcpyHostToDevice);
}
void copyHostToDevice(float ***pd_a, float ***pa, const int p, const int m, const int n){
float ***phd_a=(float ***)malloc(p * sizeof(float **));
hipMemcpy(phd_a, pd_a, p * sizeof(float **), hipMemcpyDeviceToHost);
for(int i = 0; i < p; i++){
mat::copyHostToDevice(phd_a[i], pa[i], m, n);
}
}
void copyDeviceToHost(float *a, const float *d_a, const int m){
hipMemcpy(a, d_a , m * sizeof(float), hipMemcpyDeviceToHost);
}
void copyDeviceToHost(float *pa, float **pd_a, const int m, const int n){
float **phd_a=(float **)malloc(sizeof(float *));
hipMemcpy(phd_a, pd_a , sizeof(float *), hipMemcpyDeviceToHost);
hipMemcpy(pa, *phd_a , m * n * sizeof(float), hipMemcpyDeviceToHost);
}
void copyDeviceToHost(float **pa, float **pd_a, const int m, const int n){
float **phd_a=(float **)malloc(sizeof(float *));
hipMemcpy(phd_a, pd_a , sizeof(float *), hipMemcpyDeviceToHost);
hipMemcpy(*pa, *phd_a , m * n * sizeof(float), hipMemcpyDeviceToHost);
}
void copyDeviceToHost(float ***pa, float ***pd_a, const int p, const int m, const int n){
float ***phd_a=(float ***)malloc(p * sizeof(float **));
hipMemcpy(phd_a, pd_a, p * sizeof(float **), hipMemcpyDeviceToHost);
for(int i = 0; i < p; i++){
mat::copyDeviceToHost(pa[i], phd_a[i], m, n);
}
}
void calc(float *c, float *a, float *b, int m){
hipLaunchKernelGGL(( mat::_calc), dim3(m), dim3(1), 0, 0, c, a, b);
}
void calc(float *c, float ka, float *a, float kb, float *b, int m){
hipLaunchKernelGGL(( mat::_calc), dim3(m), dim3(1), 0, 0, c, ka, a, kb, b);
}
void calc(float **c, float ka, float **a, float kb, float **b, int m, int n){
hipLaunchKernelGGL(( mat::_calc), dim3(m), dim3(n), 0, 0, c, ka, a, kb, b);
}
float norm(float *a, int n){
float norm_a = 0;
hipblasSnrm2(cublas_handle, n, a, 1, &norm_a);
return norm_a;
}
float norm(float **a, int m, int n){
return mat::norm(mat::getDataPointer(a), m * n);
}
float amax(float *a, int n){
int index = 0;
hipblasIsamax(cublas_handle, n, a, 1, &index);
float *b = mat::create(1);
hipLaunchKernelGGL(( mat::_setIndexValue), dim3(1), dim3(1), 0, 0, b, a, index - 1);
float *c = mat::createHost(1);
mat::copyDeviceToHost(c, b, 1);
return fabs(c[0]);
}
float amax(float **a, int m, int n){
return mat::amax(mat::getDataPointer(a), m * n);
}
float dot(float *a, float *b, int n){
float dot_ab = 0;
hipblasSdot(cublas_handle, n, a, 1, b, 1, &dot_ab);
return dot_ab;
}
float dot(float **a, float **b, int m, int n){
return mat::dot(mat::getDataPointer(a), mat::getDataPointer(b), m * n);
}
void freeHost(float **mat){
free(*mat);
free(mat);
}
void freeHost(float ***mat){
free(**mat);
free(*mat);
free(mat);
}
void freeDevice(float **mat){
hipFree(getDataPointer(mat));
hipFree(mat);
}
void read(float *data, int n, const char *fname){
FILE *file = fopen(fname, "rb");
fread(data, sizeof(float), n, file);
fclose(file);
}
void write(float *data, int n, const char *fname){
FILE *file = fopen(fname, "wb");
fwrite(data, sizeof(float), n, file);
fclose(file);
}
void write(float **data, int m, int n, const char *fname){
FILE *file = fopen(fname, "wb");
for(int i = 0; i < m; i++){
fwrite(data[i], sizeof(float), n, file);
}
fclose(file);
}
void write(float ***data, int p, int m, int n, const char *fname){
FILE *file = fopen(fname, "wb");
for(int k = 0; k < p; k++){
for(int i = 0; i < m; i++){
fwrite(data[k][i], sizeof(float), n, file);
}
}
fclose(file);
}
void writeDevice(float *data, int n, const char *fname){
float *h_data = mat::createHost(n);
mat::copyDeviceToHost(h_data, data, n);
mat::write(h_data, n, fname);
free(h_data);
}
void writeDevice(float **data, const int m, int n, const char *fname){
float **h_data = mat::createHost(m, n);
mat::copyDeviceToHost(h_data, data, m, n);
mat::write(h_data, m, n, fname);
mat::freeHost(h_data);
}
void writeDevice(float ***data, const int p, const int m, int n, const char *fname){
float ***h_data = mat::createHost(p, m, n);
mat::copyDeviceToHost(h_data, data, p, m, n);
mat::write(h_data, p, m, n, fname);
mat::freeHost(h_data);
}
}
dim3 &nxb = dat::nxb;
dim3 &nxb2 = dat::nxb2;
dim3 &nzt = dat::nzt;
int &sh = dat::wave_propagation_sh;
int &psv = dat::wave_propagation_psv;
int &mode = dat::simulation_mode;
int &nx = dat::nx;
int &nx2 = dat::nx2;
int &nz = dat::nz;
int &nt = dat::nt;
int &nsrc = dat::nsrc;
int &nrec = dat::nrec;
int &ntask = dat::ntask;
float &dt = dat::dt;
__global__ void divSY(float **dsy, float **sxy, float **szy, float **X, float **Z, int nx, int nz){
devij2;
if(i0 >= 2 && i0 < nx - 2){
float dx = X[i0][j] - X[i0-1][j];
float dx3 = X[i0+1][j] - X[i0-2][j];
dsy[i][j] = 9*(sxy[i][j] - sxy[i-1][j])/(8*dx) - (sxy[i+1][j] - sxy[i-2][j])/(8*dx3);
}
else{
dsy[i][j] = 0;
}
if(j >= 2 && j < nz - 2){
float dz = Z[i0][j] - Z[i0][j-1];
float dz3 = Z[i0][j+1] - Z[i0][j-2];
dsy[i][j] += 9*(szy[i][j] - szy[i][j-1])/(8*dz) - (szy[i][j+1] - szy[i][j-2])/(8*dz3);
}
}
__global__ void divSXZ(float **dsx, float **dsz, float **sxx, float **szz, float **sxz, float **X, float **Z, int nx, int nz){
devij2;
if(i0 >= 2 && i0 < nx - 2){
float dx = X[i0][j] - X[i0-1][j];
float dx3 = X[i0+1][j] - X[i0-2][j];
dsx[i][j] = 9*(sxx[i][j] - sxx[i-1][j])/(8*dx) - (sxx[i+1][j] - sxx[i-2][j])/(8*dx3);
dsz[i][j] = 9*(sxz[i][j] - sxz[i-1][j])/(8*dx) - (sxz[i+1][j] - sxz[i-2][j])/(8*dx3);
}
else{
dsx[i][j] = 0;
dsz[i][j] = 0;
}
if(j >= 2 && j < nz - 2){
float dz = Z[i0][j] - Z[i0][j-1];
float dz3 = Z[i0][j+1] - Z[i0][j-2];
dsx[i][j] += 9*(sxz[i][j] - sxz[i][j-1])/(8*dz) - (sxz[i][j+1] - sxz[i][j-2])/(8*dz3);
dsz[i][j] += 9*(szz[i][j] - szz[i][j-1])/(8*dz) - (szz[i][j+1] - szz[i][j-2])/(8*dz3);
}
}
__global__ void divVY(float **dvydx, float **dvydz, float **vy, float **X, float **Z, int nx, int nz){
devij2;
if(i0 >= 1 && i0 < nx - 2){
float dx = X[i0+1][j] - X[i0][j];
float dx3 = X[i0+2][j] - X[i0-1][j];
dvydx[i][j] = 9*(vy[i+1][j] - vy[i][j])/(8*dx) - (vy[i+2][j] - vy[i-1][j])/(8*dx3);
}
else{
dvydx[i][j] = 0;
}
if(j >= 1 && j < nz - 2){
float dz = Z[i0][j+1] - Z[i0][j];
float dz3 = Z[i0][j+2] - Z[i0][j-1];
dvydz[i][j] = 9*(vy[i][j+1] - vy[i][j])/(8*dz) - (vy[i][j+2] - vy[i][j-1])/(8*dz3);
}
else{
dvydz[i][j] = 0;
}
}
__global__ void divVXZ(float **dvxdx, float **dvxdz, float **dvzdx, float **dvzdz, float **vx, float **vz, float **X, float **Z, int nx, int nz){
devij2;
if(i0 >= 1 && i0 < nx - 2){
float dx = X[i0+1][j] - X[i0][j];
float dx3 = X[i0+2][j] - X[i0-1][j];
dvxdx[i][j] = 9*(vx[i+1][j]-vx[i][j])/(8*dx)-(vx[i+2][j]-vx[i-1][j])/(8*dx3);
dvzdx[i][j] = 9*(vz[i+1][j]-vz[i][j])/(8*dx)-(vz[i+2][j]-vz[i-1][j])/(8*dx3);
}
else{
dvxdx[i][j] = 0;
dvzdx[i][j] = 0;
}
if(j >= 1 && j < nz - 2){
float dz = Z[i0][j+1] - Z[i0][j];
float dz3 = Z[i0][j+2] - Z[i0][j-1];
dvxdz[i][j] = 9*(vx[i][j+1]-vx[i][j])/(8*dz)-(vx[i][j+2]-vx[i][j-1])/(8*dz3);
dvzdz[i][j] = 9*(vz[i][j+1]-vz[i][j])/(8*dz)-(vz[i][j+2]-vz[i][j-1])/(8*dz3);
}
else{
dvxdz[i][j] = 0;
dvzdz[i][j] = 0;
}
}
__global__ void addSTF(float **dsx, float **dsy, float **dsz, float **stf_x, float **stf_y, float **stf_z,
int *src_x_id, int *src_z_id, int isrc, int sh, int psv, int it,int nx){
int is = blockIdx.x;
int xs = src_x_id[is];
int zs = src_z_id[is];
int is2 = threadIdx.x;
if(isrc < 0 || isrc + is2 == is){
if(sh){
dsy[xs+is2*nx][zs] += stf_y[is][it];
}
if(psv){
dsx[xs+is2*nx][zs] += stf_x[is][it];
dsz[xs+is2*nx][zs] += stf_z[is][it];
}
}
}
__global__ void saveRec(float **out_x, float **out_y, float **out_z, float **vx, float **vy, float **vz,
int *rec_x_id, int *rec_z_id, int nx, int nt, int sh, int psv, int it){
int ir = blockIdx.x;
int xr = rec_x_id[ir] + threadIdx.x * nx;
int zr = rec_z_id[ir];
it += threadIdx.x * nt;
if(sh){
out_y[ir][it] = vy[xr][zr];
}
if(psv){
out_x[ir][it] = vx[xr][zr];
out_z[ir][it] = vz[xr][zr];
}
}
__global__ void saveRec(float ***out_x, float ***out_y, float ***out_z, float **vx, float **vy, float **vz,
int *rec_x_id, int *rec_z_id, int isrc, int nx, int sh, int psv, int it){
int ir = blockIdx.x;
int xr = rec_x_id[ir];
int zr = rec_z_id[ir];
int is2 = threadIdx.x;
if(sh){
out_y[isrc+is2][ir][it] = vy[xr+is2*nx][zr];
}
if(psv){
out_x[isrc+is2][ir][it] = vx[xr+is2*nx][zr];
out_z[isrc+is2][ir][it] = vz[xr+is2*nx][zr];
}
}
__global__ void updateV(float **v, float **ds, float **rho, float **absbound, float dt){
devij2;
v[i][j] = absbound[i0][j] * (v[i][j] + dt * ds[i][j] / rho[i0][j]);
}
__global__ void updateSY(float **sxy, float **szy, float **dvydx, float **dvydz, float **mu, float dt){
devij2;
sxy[i][j] += dt * mu[i0][j] * dvydx[i][j];
szy[i][j] += dt * mu[i0][j] * dvydz[i][j];
}
__global__ void updateSXZ(float **sxx, float **szz, float **sxz, float **dvxdx, float **dvxdz, float **dvzdx, float **dvzdz,
float **lambda, float **mu, float dt){
devij2;
sxx[i][j] += dt * ((lambda[i0][j] + 2 * mu[i0][j]) * dvxdx[i][j] + lambda[i0][j] * dvzdz[i][j]);
szz[i][j] += dt * ((lambda[i0][j] + 2 * mu[i0][j]) * dvzdz[i][j] + lambda[i0][j] * dvxdx[i][j]);
sxz[i][j] += dt * (mu[i0][j] * (dvxdz[i][j] + dvzdx[i][j]));
}
__global__ void updateU(float **u, float **v, float dt){
devij2;
u[i][j] += v[i][j] * dt;
}
__global__ void interactionRhoY(float **K_rho, float **vy, float **vy_fw, float tsfe){
devij;
K_rho[i][j] -= vy_fw[i][j] * vy[i][j] * tsfe;
}
__global__ void interactionRhoXZ(float **K_rho, float **vx, float **vx_fw, float **vz, float **vz_fw, float tsfe){
devij;
K_rho[i][j] -= (vx_fw[i][j] * vx[i][j] + vz_fw[i][j] * vz[i][j]) * tsfe;
}
__global__ void interactionMuY(float **K_mu, float **dvydx, float **dvydx_fw, float **dvydz, float **dvydz_fw, float tsfe){
devij;
K_mu[i][j] -= (dvydx[i][j] * dvydx_fw[i][j] + dvydz[i][j] * dvydz_fw[i][j]) * tsfe;
}
__global__ void interactionMuXZ(float **K_mu, float **dvxdx, float **dvxdx_fw, float **dvxdz, float **dvxdz_fw,
float **dvzdx, float **dvzdx_fw, float **dvzdz, float **dvzdz_fw, float tsfe){
devij;
K_mu[i][j] -= (2 * dvxdx[i][j] * dvxdx_fw[i][j] + 2 * dvzdz[i][j] * dvzdz_fw[i][j] +
(dvxdz[i][j] + dvzdx[i][j]) * (dvzdx_fw[i][j] + dvxdz_fw[i][j])) * tsfe;
}
__global__ void interactionLambdaXZ(float **K_lambda, float **dvxdx, float **dvxdx_fw, float **dvzdz, float **dvzdz_fw, float tsfe){
devij;
K_lambda[i][j] -= ((dvxdx[i][j] + dvzdz[i][j]) * (dvxdx_fw[i][j] + dvzdz_fw[i][j])) * tsfe;
}
__device__ float gaussian(int x, int sigma){
float xf = (float)x;
float sigmaf = (float)sigma;
return (1 / (sqrtf(2 * d_pi) * sigmaf)) * expf(-xf * xf / (2 * sigmaf * sigmaf));
}
__global__ void initialiseGaussian(float **model, int nx, int nz, int sigma){
devij;
float sumx = 0;
for(int n = 0; n < nx; n++){
sumx += gaussian(i - n, sigma);
}
float sumz = 0;
for(int n = 0; n < nz; n++){
sumz += gaussian(j - n, sigma);
}
model[i][j] = sumx * sumz;
}
__global__ void computeIndices(int *coord_n_id, float *coord_n, float Ln, float n){
int i = blockIdx.x;
coord_n_id[i] = (int)(coord_n[i] / Ln * (n - 1) + 0.5);
}
__global__ void initialiseAbsorbingBoundaries(float **absbound, float width,
int absorb_left, int absorb_right, int absorb_bottom, int absorb_top,
float Lx, float Lz, float **X, float **Z){
devij;
absbound[i][j] = 1;
if(absorb_left){
if(X[i][j] < width){
absbound[i][j] *= exp(-pow((X[i][j] - width) / (2 * width), 2));
}
}
if(absorb_right){
if(X[i][j] > Lx - width){
absbound[i][j] *= exp(-pow((X[i][j] - (Lx - width)) / (2 * width), 2));
}
}
if(absorb_bottom){
if(Z[i][j] < width){
absbound[i][j] *= exp(-pow((Z[i][j] - width) / (2 * width), 2));
}
}
if(absorb_top){
if(Z[i][j] > Lz - width){
absbound[i][j] *= exp(-pow((Z[i][j] - (Lz - width)) / (2 * width), 2));
}
}
}
__global__ void prepareAdjointSTF(float **adstf, float **u_syn, float ***u_obs, float *tw, int nt, int isrc){
int it = blockIdx.x;
int irec = threadIdx.x;
adstf[irec][nt - it - 1] = (u_syn[irec][it] - u_obs[isrc][irec][it]) * tw[it] * 2;
}
__global__ void prepareEnvelopeSTF(float **adstf, float *etmp, float *syn, float *ersd, int nt, int irec){
int it = blockIdx.x;
adstf[irec][nt - it - 1] = etmp[it] * syn[it] - ersd[it];
}
__global__ void filterKernelX(float **model, float **gtemp, int nx, int sigma){
devij;
float sumx = 0;
for(int n = 0; n < nx; n++){
sumx += gaussian(i - n, sigma) * model[n][j];
}
gtemp[i][j] = sumx;
}
__global__ void filterKernelZ(float **model, float **gtemp, float **gsum, int nz, int sigma){
devij;
float sumz = 0;
for(int n = 0; n < nz; n++){
sumz += gaussian(j - n, sigma) * gtemp[i][n];
}
model[i][j] = sumz / gsum[i][j];
}
__global__ void getTaperWeights(float *tw, float dt, int nt){
int it = blockIdx.x;
float t_end = (nt - 1) * dt;
float taper_width = t_end / 10;
float t_min = taper_width;
float t_max = t_end - taper_width;
float t = it * dt;
if(t <= t_min){
tw[it] = 0.5 + 0.5 * cosf(d_pi * (t_min - t) / (taper_width));
}
else if(t >= t_max){
tw[it] = 0.5 + 0.5 * cosf(d_pi * (t_max - t) / (taper_width));
}
else{
tw[it] = 1;
}
}
__global__ void calculateMisfit(float *misfit, float **u_syn, float ***u_obs, float *tw, float dt, int isrc, int irec, int j, int nt){
int it = blockIdx.x;
float wavedif = (u_syn[irec][it+j*nt] - u_obs[isrc+j][irec][it]) * tw[it];
misfit[it] = wavedif * dt;
}
__global__ void envelopetmp(float *etmp, float *esyn, float *eobs, float max){
int it = blockIdx.x;
etmp[it] = (esyn[it] - eobs[it])/(esyn[it] + max);
}
__global__ void copyWaveform(float *misfit, float ***u_obs, int isrc, int irec){
int it = blockIdx.x;
misfit[it] = u_obs[isrc][irec][it];
}
__global__ void copyWaveform(float *misfit, float **out, int irec, int jnt){
int it = blockIdx.x;
misfit[it] = out[irec][it+jnt];
}
__global__ void initialiseGrids(float **X, float **Z, float Lx, int nx, float Lz, int nz){
devij;
X[i][j] = Lx / (nx - 1) * i;
Z[i][j] = Lz / (nz - 1) * j;
}
__global__ void mesh2grid(float *xbuffer, float *zbuffer, float *rbuffer, float *pbuffer, float *sbuffer,
float **lambda, float **mu, float **rho, float dx, float dz, float dmax, int npt){
devij;
float ix = i * dx;
float iz = j * dz;
float dmin = dmax;
for(int k = 0; k < npt; k++){
float dx = ix - xbuffer[k];
float dz = iz - zbuffer[k];
float d = dx * dx + dz * dz;
if(d < dmin){
dmin = d;
rho[i][j] = rbuffer[k];
mu[i][j] = sbuffer[k];
lambda[i][j] = pbuffer[k];
}
}
}
__global__ void changeParametrisation(float **lambda, float **mu, float **rho, int psv){
devij;
if(psv){
lambda[i][j] = rho[i][j] * (lambda[i][j] * lambda[i][j] - 2 * mu[i][j] * mu[i][j]);
}
else{
lambda[i][j] = 0;
}
mu[i][j] = rho[i][j] * mu[i][j] * mu[i][j];
}
__global__ void changeParametrisation(float *vp, float *vs, float *rho, int nz, int psv){
devij;
int ij = i * nz + j;
if(psv){
vp[ij] = sqrt((vp[ij] + 2*vs[ij]) / rho[ij]);
}
else{
vp[ij] = 0;
}
vs[ij] = sqrt(vs[ij] / rho[ij]);
}
__global__ void updateModel(float **m, float **p, float alpha){
devij;
m[i][j] += alpha * p[i][j];
}
__global__ void reduceSystem(const double * __restrict d_in1, double * __restrict d_out1, const double * __restrict d_in2, double * __restrict d_out2, const int M, const int N) {
const int i = blockIdx.x;
const int j = threadIdx.x;
if ((i < N) && (j < N)){
d_out1[j * N + i] = d_in1[j * M + i];
d_out2[j * N + i] = d_in2[j * M + i];
}
}
__global__ void generateChecker(float **p, float dp, float margin, float lx, float lz, float **X, float **Z){
devij;
float x = X[i][j];
float z = Z[i][j];
float marginx = lx * margin;
float marginz = lz * margin;
int idx = (int)((x - marginx*2) / (lx + marginx));
int idz = (int)((z - marginz*2) / (lz + marginz));
float rx = x - marginx*2 - idx * (lx + marginx);
float rz = z - marginz*2 - idz * (lz + marginz);
if(rx > 0 && rx < lx && rz > 0 && rz < lz){
if(idx % 2 == idz % 2){
p[i][j] *= (1 + dp) * (1 + dp);
}
else{
p[i][j] *= (1 - dp) * (1 - dp);
}
}
}
__global__ void generateLayer(float **p, float from, float to, float value, float **Z){
devij;
float z = Z[i][j];
if(z >=from && z <= to){
p[i][j] *= (1+value) * (1+value);
}
}
__global__ void generateRandomLayer(float **p, float from, float to, float value, float *layer1, float *layer2, float **Z){
devij;
float z = Z[i][j];
if(z >=from+layer1[i] && z <= to+layer2[i]){
p[i][j] *= (1+value) * (1+value);
}
}
__global__ void hilbert(hipfftComplex *h, int n){
int i = blockIdx.x;
if(i > 0){
if(n % 2 == 0){
if(i < n / 2 + 1){
h[i].x *= 2;
h[i].y *= 2;
}
else if(i > n / 2 + 1){
h[i].x = 0;
h[i].y = 0;
}
}
else{
if(i < (n+1) / 2){
h[i].x *= 2;
h[i].y *= 2;
}
else{
h[i].x = 0;
h[i].y = 0;
}
}
}
}
__global__ void copyR2C(hipfftComplex *a,float *b){
int i=blockIdx.x;
a[i].x=b[i];
a[i].y=0;
}
__global__ void copyC2Real(float *a, hipfftComplex *b, int n){
int i = blockIdx.x;
a[i] = b[i].x / n;
}
__global__ void copyC2Imag(float *a, hipfftComplex *b, int n){
int i = blockIdx.x;
a[i] = b[i].y / n;
}
__global__ void copyC2Abs(float *a, hipfftComplex *b, int n){
int i = blockIdx.x;
a[i] = sqrt(b[i].x*b[i].x + b[i].y*b[i].y) / n;
}
static int getTaskIndex(int isrc){
int index = isrc + ntask - 1;
if(index >= nsrc){
return nsrc - 1;
}
else{
return index;
}
}
static float calculateAngle(float **p, float **g, float k, int nx, int nz){
float xx = mat::dot(p, p, nx, nz);
float yy = mat::dot(g, g, nx, nz);
float xy = k * mat::dot(p, g, nx, nz);
return acos(xy / sqrt(xx * yy));
}
static void hilbert(float *x, hipfftComplex *data){
hipLaunchKernelGGL(( copyR2C), dim3(nt), dim3(1), 0, 0, data, x);
hipfftExecC2C(cufft_handle, data, data, HIPFFT_FORWARD);
hipLaunchKernelGGL(( hilbert), dim3(nt),dim3(1), 0, 0, data, nt);
hipfftExecC2C(cufft_handle, data, data, HIPFFT_BACKWARD);
}
static void solveQR(double *h_A, double *h_B, double *XC, const int Nrows, const int Ncols){
int work_size = 0;
int *devInfo = mat::createInt(1);
double *d_A = mat::createDouble(Nrows * Ncols);
hipMemcpy(d_A, h_A, Nrows * Ncols * sizeof(double), hipMemcpyHostToDevice);
double *d_TAU = mat::createDouble(min(Nrows, Ncols));
hipsolverDnDgeqrf_bufferSize(solver_handle, Nrows, Ncols, d_A, Nrows, &work_size);
double *work = mat::createDouble(work_size);
hipsolverDnDgeqrf(solver_handle, Nrows, Ncols, d_A, Nrows, d_TAU, work, work_size, devInfo);
double *d_Q = mat::createDouble(Nrows * Nrows);
hipsolverDnDormqr(solver_handle, HIPBLAS_SIDE_LEFT, HIPBLAS_OP_N, Nrows, Ncols, min(Nrows, Ncols), d_A, Nrows, d_TAU, d_Q, Nrows, work, work_size, devInfo);
double *d_C = mat::createDouble(Nrows * Nrows);
mat::init(d_C, 0, Nrows * Nrows);
hipMemcpy(d_C, h_B, Nrows * sizeof(double), hipMemcpyHostToDevice);
hipsolverDnDormqr(solver_handle, HIPBLAS_SIDE_LEFT, HIPBLAS_OP_T, Nrows, Ncols, min(Nrows, Ncols), d_A, Nrows, d_TAU, d_C, Nrows, work, work_size, devInfo);
double *d_R = mat::createDouble(Ncols * Ncols);
double *d_B = mat::createDouble(Ncols * Ncols);
hipLaunchKernelGGL(( reduceSystem), dim3(Ncols), dim3(Ncols), 0, 0, d_A, d_R, d_C, d_B, Nrows, Ncols);
const double alpha = 1.;
hipblasDtrsm(cublas_handle, HIPBLAS_SIDE_LEFT, HIPBLAS_FILL_MODE_UPPER, HIPBLAS_OP_N, HIPBLAS_DIAG_NON_UNIT, Ncols, Ncols,
&alpha, d_R, Ncols, d_B, Ncols);
hipMemcpy(XC, d_B, Ncols * sizeof(double), hipMemcpyDeviceToHost);
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
hipFree(d_Q);
hipFree(d_R);
hipFree(d_TAU);
hipFree(devInfo);
hipFree(work);
}
static double polyfit(double *x, double *y, double *p, int n){
double *A = mat::createDoubleHost(3 * n);
for(int i = 0; i < n; i++){
A[i] = x[i] * x[i];
A[i + n] = x[i];
A[i + n * 2] = 1;
}
solveQR(A, y, p, n, 3);
double rss = 0;
for(int i = 0; i < n; i++){
double ei = p[0] * x[i] * x[i] + p[1] * x[i] + p[2];
rss += pow(y[i] - ei, 2);
}
return rss;
}
static float polyfit(float *fx, float *fy, float *fp, int n){
double *x = mat::createDoubleHost(n);
double *y = mat::createDoubleHost(n);
double *p = mat::createDoubleHost(3);
for(int i = 0; i < n; i++){
x[i] = fx[i];
y[i] = fy[i];
}
float rss = polyfit(x, y, p, n);
for(int i = 0; i < 3; i++){
fp[i] = p[i];
}
free(x);
free(y);
free(p);
return rss;
}
static float str2float(const char *str){
char str1[20] = {'\0'};
char str2[20] = {'\0'};
char str3[10] = {'\0'};
float num = 0;
int len = strlen(str);
int offset = 0;
char *current = str1;
for(int i = 0; i < len; i++){
if((str[i] >= 48 && str[i] <= 57) || str[i] == '+' || str[i] == '-'){
current[i - offset] = str[i];
}
else if(str[i] == 'd' || str[i] == 'e'){
offset = i + 1;
current = str3;
}
else if(str[i] == '.'){
offset = i;
str2[0] = '.';
current = str2;
}
else{
break;
}
}
float e = 1;
float nege = 1;
if(strlen(str3) > 0){
int numi = atoi(str3);
if(numi < 0){
for(int i = 0; i < -numi; i++){
nege *= 10;
}
}
else{
for(int i = 0; i < numi; i++){
e *= 10;
}
}
}
if(strlen(str1) > 0){
num = e * atoi(str1);
}
if(strlen(str2) > 0){
float numf = e * atof(str2);
if(num >= 0){
num += numf;
}
else{
num -= numf;
}
}
return num / nege;
}
static int str2int(const char *str){
return lroundf(str2float(str));
}
static void printStat(int a, int b){
a++;
if(b >= 100){
if(a < 10){
printf(" task 00%d of %d\n", a, b);
return;
}
if(a < 100){
printf(" task 0%d of %d\n", a, b);
return;
}
}
else if(b >= 10){
if(a < 10){
printf(" task 0%d of %d\n", a, b);
return;
}
}
printf(" task %d of %d\n", a, b);
}
static int getFileLength(FILE *file){
fseek (file, 0, SEEK_END);
int length = ftell (file);
fseek (file, 0, SEEK_SET);
return length;
}
static void initialiseModel(const char *model_dir){
int npt;
char path[80];
sprintf(path, "%s/proc000000_x.bin", model_dir);
FILE *xfile = fopen(path,"rb");
sprintf(path, "%s/proc000000_z.bin", model_dir);
FILE *zfile = fopen(path,"rb");
fread(&npt, sizeof(int), 1, xfile);
fread(&npt, sizeof(int), 1, zfile);
npt /= 4;
float *xbuffer = mat::createHost(npt);
float *zbuffer = mat::createHost(npt);
fread(xbuffer, sizeof(float), npt, xfile);
fread(zbuffer, sizeof(float), npt, zfile);
dat::Lx = 0;
dat::Lz = 0;
for(int i = 0; i < npt; i++){
if(xbuffer[i] > dat::Lx) dat::Lx = xbuffer[i];
if(zbuffer[i] > dat::Lz) dat::Lz = zbuffer[i];
}
dat::nx = lroundf(sqrt(npt * dat::Lx / dat::Lz));
dat::nz = lroundf(npt / dat::nx);
dat::nx2 = dat::nx * ntask;
// dat::nx *= 2;
// dat::nz *= 2;
free(xbuffer);
free(zbuffer);
fclose(xfile);
fclose(zfile);
}
static void readFortran(const char *fname, int isrc){
FILE *parfile = fopen(fname,"r");
char key[80];
char value[80];
int stat = 0;
int offset = 0;
char c = 0;
int i = 0;
while(c != EOF){
c = fgetc(parfile);
switch(c){
case '\0': case '\r': case '\t': case '\n': case EOF:{
if(stat == 4){
value[i - offset] = '\0';
stat = 5;
}
if(stat == 5){
if(isrc < 0){
if(strcmp(key, "simulation_mode") == 0){
dat::simulation_mode = str2int(value);
}
else if(strcmp(key, "wave_propagation_type") == 0){
switch(str2int(value)){
case 0: dat::wave_propagation_sh = 1; dat::wave_propagation_psv = 0; break;
case 1: dat::wave_propagation_sh = 0; dat::wave_propagation_psv = 1; break;
case 2: dat::wave_propagation_sh = 1; dat::wave_propagation_psv = 1; break;
}
}
else if(strcmp(key, "nt") == 0){
dat::nt = str2int(value);
}
else if(strcmp(key, "dt") == 0){
dat::dt = str2float(value);
}
else if(strcmp(key, "obs_type") == 0){
dat::obs_type = str2int(value);
}
else if(strcmp(key, "ntask") == 0){
dat::ntask = str2int(value);
}
else if(strcmp(key, "misfit_type") == 0){
dat::misfit_type = str2int(value);
}
else if(strcmp(key, "obs_su") == 0){
dat::obs_su = str2int(value);
}
else if(strcmp(key, "absorb_left") == 0){
dat::absorb_left = str2int(value);
}
else if(strcmp(key, "absorb_right") == 0){
dat::absorb_right = str2int(value);
}
else if(strcmp(key, "absorb_top") == 0){
dat::absorb_top = str2int(value);
}
else if(strcmp(key, "absorb_bottom") == 0){
dat::absorb_bottom = str2int(value);
}
else if(strcmp(key, "absorb_width") == 0){
dat::absorb_width = str2float(value);
}
else if(strcmp(key, "nsrc") == 0){
dat::nsrc = str2int(value);
}
else if(strcmp(key, "sfe") == 0){
dat::sfe = str2int(value);
}
else if(strcmp(key, "filter_kernel") == 0){
dat::filter_kernel = str2int(value);
}
else if(strcmp(key, "inv_iteration") == 0){
dat::inv_iteration = str2int(value);
}
else if(strcmp(key, "inv_maxiter") == 0){
dat::inv_maxiter = str2int(value);
}
else if(strcmp(key, "lbfgs_mem") == 0){
dat::lbfgs_mem = str2int(value);
}
else if(strcmp(key, "optimize") == 0){
dat::optimize = str2int(value);
}
else if(strcmp(key, "ls_steplenmax") == 0){
dat::ls_steplenmax = str2float(value);
}
else if(strcmp(key, "ls_stepleninit") == 0){
dat::ls_stepleninit = str2float(value);
}
else if(strcmp(key, "ls_thresh") == 0){
dat::ls_thresh = str2float(value);
}
else if(strcmp(key, "ls_stepcountmax") == 0){
dat::ls_stepcountmax = str2int(value);
}
else if(strcmp(key, "parametrisation") == 0){
dat::parametrisation = str2int(value);
}
else if(strcmp(key, "model_init") == 0){
int len = strlen(value);
dat::model_init = (char *)malloc((len + 1)*sizeof(char));
strcpy(dat::model_init, value);
initialiseModel(value);
}
else if(strcmp(key, "model_true") == 0){
int len = strlen(value);
dat::model_true = (char *)malloc((len + 1)*sizeof(char));
strcpy(dat::model_true, value);
}
else if(strcmp(key, "output_path") == 0){
int len = strlen(value);
dat::output_path = (char *)malloc((len + 1)*sizeof(char));
strcpy(dat::output_path, value);
}
else if(strcmp(key, "obs_su_path") == 0){
int len = strlen(value);
dat::obs_su_path = (char *)malloc((len + 1)*sizeof(char));
strcpy(dat::obs_su_path, value);
}
else if(strcmp(key, "inv_parameter") == 0){
dat::inv_parameter = str2int(value);
}
}
else{
if(strcmp(key, "xs") == 0){
dat::src_x[isrc] = str2float(value);
}
else if(strcmp(key, "zs") == 0){
dat::src_z[isrc] = str2float(value);
}
else if(strcmp(key, "f0") == 0){
dat::src_f0[isrc] = str2float(value);
}
else if(strcmp(key, "t0") == 0 || strcmp(key, "tshift") == 0){
dat::src_t0[isrc] = str2float(value);
}
else if(strcmp(key, "angle") == 0 || strcmp(key, "anglesource") == 0){
dat::src_angle[isrc] = str2float(value);
}
else if(strcmp(key, "factor") == 0){
dat::src_factor[isrc] = str2float(value);
}
else if(strcmp(key, "type") == 0 || strcmp(key, "source_type") == 0){
dat::src_type[isrc] = str2float(value);
}
}
}
stat = 0;
offset = 0;
i = -1;
break;
}
case '#':{
switch(stat){
case 4: value[i - offset] = '\0'; stat = 5; break;
case 5: break;
default: stat = -1;
}
break;
}
case ' ':{
switch(stat){
case 1: key[i - offset] = '\0'; stat = 2; break;
case 4: value[i - offset] = '\0'; stat = 5; break;
}
break;
}
case '=':{
switch(stat){
case 1: key[i - offset] = '\0'; stat = 3; break;
case 2: stat = 3; break;
case 5: break;
default: stat = -1;
}
break;
}
default:{
if(c >= 65 && c <= 90){
c += 32;
}
switch(stat){
case 0: stat = 1; offset = i; key[0] = c; break;
case 1: key[i - offset] = c; break;
case 2: stat = -1; break;
case 3: stat = 4; offset = i; value[0] = c; break;
case 4: value[i - offset] = c; break;
}
}
}
i++;
}
fclose(parfile);
}
static int loadModel(const char *model_dir){
char path[80];
sprintf(path, "%s/proc000000_x.bin", model_dir);
FILE *xfile = fopen(path,"rb");
sprintf(path, "%s/proc000000_z.bin", model_dir);
FILE *zfile = fopen(path,"rb");
sprintf(path, "%s/proc000000_rho.bin", model_dir);
FILE *rfile = fopen(path,"rb");
sprintf(path, "%s/proc000000_vp.bin", model_dir);
FILE *pfile = fopen(path,"rb");
sprintf(path, "%s/proc000000_vs.bin", model_dir);
FILE *sfile = fopen(path,"rb");
int npt;
fread(&npt, sizeof(int), 1, xfile);
fread(&npt, sizeof(int), 1, zfile);
fread(&npt, sizeof(int), 1, rfile);
fread(&npt, sizeof(int), 1, pfile);
fread(&npt, sizeof(int), 1, sfile);
npt /= 4;
float *xbuffer = mat::createHost(npt);
float *zbuffer = mat::createHost(npt);
float *rbuffer = mat::createHost(npt);
float *pbuffer = mat::createHost(npt);
float *sbuffer = mat::createHost(npt);
fread(xbuffer, sizeof(float), npt, xfile);
fread(zbuffer, sizeof(float), npt, zfile);
fread(rbuffer, sizeof(float), npt, rfile);
fread(pbuffer, sizeof(float), npt, pfile);
fread(sbuffer, sizeof(float), npt, sfile);
float *dxbuffer = mat::create(npt);
float *dzbuffer = mat::create(npt);
float *drbuffer = mat::create(npt);
float *dpbuffer = mat::create(npt);
float *dsbuffer = mat::create(npt);
mat::copyHostToDevice(dxbuffer, xbuffer, npt);
mat::copyHostToDevice(dzbuffer, zbuffer, npt);
mat::copyHostToDevice(drbuffer, rbuffer, npt);
mat::copyHostToDevice(dpbuffer, pbuffer, npt);
mat::copyHostToDevice(dsbuffer, sbuffer, npt);
float dmax = dat::Lx * dat::Lx + dat::Lz * dat::Lz;
hipLaunchKernelGGL(( mesh2grid), dim3(nxb), dim3(nzt), 0, 0, dxbuffer, dzbuffer, drbuffer, dpbuffer, dsbuffer,
dat::lambda, dat::mu, dat::rho, dat::Lx/(nx-1), dat::Lz/(nz-1), dmax, npt);
if(dat::parametrisation){
hipLaunchKernelGGL(( changeParametrisation), dim3(nxb), dim3(nzt), 0, 0, dat::lambda, dat::mu, dat::rho, psv);
}
free(xbuffer);
free(zbuffer);
free(rbuffer);
free(pbuffer);
free(sbuffer);
hipFree(dxbuffer);
hipFree(dzbuffer);
hipFree(drbuffer);
hipFree(dpbuffer);
hipFree(dsbuffer);
fclose(xfile);
fclose(zfile);
fclose(rfile);
fclose(pfile);
fclose(sfile);
return 1;
}
static int importData(const char *datapath){
dat::simulation_mode = 0;
dat::wave_propagation_sh = 1;
dat::wave_propagation_psv = 0;
dat::obs_type = 0;
dat::ntask = 1;
dat::misfit_type = 0;
dat::parametrisation = 1;
dat::obs_su = 0;
dat::nt = 5000;
dat::dt = 0.06;
dat::sfe = 10;
dat::nsrc = 1;
dat::misfit_ref = 1;
dat::absorb_bottom = 1;
dat::absorb_right = 1;
dat::absorb_top = 1;
dat::absorb_left = 1;
dat::absorb_width = 48000;
dat::obs_su_path = "trace";
dat::output_path = "output";
dat::model_init = "model_init";
dat::model_true = "model_true";
dat::optimize = 1;
dat::filter_kernel = 4;
dat::inv_iteration = 5;
dat::inv_maxiter = 0;
dat::lbfgs_mem = 5;
dat::ls_stepleninit = 0.05;
dat::ls_steplenmax = 0.5;
dat::ls_stepcountmax = 10;
dat::ls_thresh = 1.2;
dat::inv_parameter = 1;
char path[80];
sprintf(path, "%s/Par_file", datapath);
readFortran(path, -1);
{
dat::src_x = mat::createHost(nsrc);
dat::src_z = mat::createHost(nsrc);
dat::src_type = mat::createIntHost(nsrc);
dat::src_f0 = mat::createHost(nsrc);
dat::src_t0 = mat::createHost(nsrc);
dat::src_angle = mat::createHost(nsrc);
dat::src_factor = mat::createHost(nsrc);
for(int isrc = 0; isrc < nsrc; isrc++){
if(isrc < 10){
sprintf(path, "%s/SOURCE_00000%d", datapath, isrc);
}
else if(isrc < 100){
sprintf(path, "%s/SOURCE_0000%d", datapath, isrc);
}
else{
sprintf(path, "%s/SOURCE_000%d", datapath, isrc);
}
readFortran(path, isrc);
}
float *src_x = dat::src_x;
float *src_z = dat::src_z;
dat::src_x = mat::create(nsrc);
dat::src_z = mat::create(nsrc);
mat::copyHostToDevice(dat::src_x, src_x, nsrc);
mat::copyHostToDevice(dat::src_z, src_z, nsrc);
free(src_x);
free(src_z);
}
{
sprintf(path, "%s/STATIONS", datapath);
FILE *stfile = fopen(path,"r");
char buffer[80];
char numbuffer[40];
dat::nrec = 0;
while(fgets(buffer, 80, stfile) != NULL){
if(buffer[0] == 'S'){
dat::nrec ++;
}
}
fseek (stfile, 0, SEEK_SET);
float *rec_x = mat::createHost(nrec);
float *rec_z = mat::createHost(nrec);
int irec = 0;
while(fgets(buffer, 80, stfile) != NULL){
if(buffer[0] == 'S'){
int stat = 0;
int offset = 0;
for(int i = 0; i < 80 && buffer[i] != '\0'; i++){
if(buffer[i] == ' '){
switch(stat){
case 0: stat++; break;
case 2: stat++; break;
case 4:{
stat++;
numbuffer[i - offset] = '\0';
rec_x[irec] = str2float(numbuffer);
break;
}
case 6:{
stat++;
numbuffer[i - offset] = '\0';
rec_z[irec] = str2float(numbuffer);
i = 80;
break;
}
}
}
else{
if(stat == 1 || stat == 3 || stat == 5){
stat++;
offset = i;
}
if(stat == 4 || stat == 6){
numbuffer[i - offset] = buffer[i];
}
}
}
irec++;
}
}
dat::rec_x = mat::create(nrec);
dat::rec_z = mat::create(nrec);
mat::copyHostToDevice(dat::rec_x, rec_x, nrec);
mat::copyHostToDevice(dat::rec_z, rec_z, nrec);
// mat::init(dat::rec_z, 12000, nrec); // later
free(rec_x);
free(rec_z);
fclose(stfile);
}
{
int adjoint = (dat::simulation_mode != 1);
dat::nxb = dim3(nx, 1);
dat::nxb2 = dim3(nx, ntask);
dat::nzt = dim3(nz);
dat::isrc = mat::createIntHost(2);
dat::X = mat::create(nx, nz);
dat::Z = mat::create(nx, nz);
hipLaunchKernelGGL(( initialiseGrids), dim3(nxb), dim3(nzt), 0, 0, dat::X, dat::Z, dat::Lx, nx, dat::Lz, nz);
if(nt % dat::sfe != 0){
nt = dat::sfe * lroundf((float)nt / dat::sfe);
}
dat::nsfe = nt / dat::sfe;
if(sh){
dat::vy = mat::create(nx2, nz);
dat::uy = mat::create(nx2, nz);
dat::sxy = mat::create(nx2, nz);
dat::szy = mat::create(nx2, nz);
dat::dsy = mat::create(nx2, nz);
dat::dvydx = mat::create(nx2, nz);
dat::dvydz = mat::create(nx2, nz);
dat::out_y = mat::create(nrec, nt * ntask);
dat::uy_forward = mat::createHost(dat::nsfe, nx2, nz);
dat::vy_forward = mat::createHost(dat::nsfe, nx2, nz);
}
if(psv){
dat::vx = mat::create(nx2, nz);
dat::vz = mat::create(nx2, nz);
dat::ux = mat::create(nx2, nz);
dat::uz = mat::create(nx2, nz);
dat::sxx = mat::create(nx2, nz);
dat::szz = mat::create(nx2, nz);
dat::sxz = mat::create(nx2, nz);
dat::dsx = mat::create(nx2, nz);
dat::dsz = mat::create(nx2, nz);
dat::dvxdx = mat::create(nx2, nz);
dat::dvxdz = mat::create(nx2, nz);
dat::dvzdx = mat::create(nx2, nz);
dat::dvzdz = mat::create(nx2, nz);
dat::out_x = mat::create(nrec, nt * ntask);
dat::out_z = mat::create(nrec, nt * ntask);
dat::ux_forward = mat::createHost(dat::nsfe, nx2, nz);
dat::uz_forward = mat::createHost(dat::nsfe, nx2, nz);
dat::vx_forward = mat::createHost(dat::nsfe, nx2, nz);
dat::vz_forward = mat::createHost(dat::nsfe, nx2, nz);
}
dat::lambda = mat::create(nx, nz);
dat::rho = mat::create(nx, nz);
dat::mu = mat::create(nx, nz);
dat::absbound = mat::create(nx, nz);
dat::stf_x = mat::create(nsrc, nt);
dat::stf_y = mat::create(nsrc, nt);
dat::stf_z = mat::create(nsrc, nt);
if(adjoint){
if(sh){
dat::dvydx_fw = mat::create(nx, nz);
dat::dvydz_fw = mat::create(nx, nz);
dat::u_obs_y = mat::create(nsrc, nrec, nt);
}
if(psv){
dat::dvxdx_fw = mat::create(nx, nz);
dat::dvxdz_fw = mat::create(nx, nz);
dat::dvzdx_fw = mat::create(nx, nz);
dat::dvzdz_fw = mat::create(nx, nz);
dat::u_obs_x = mat::create(nsrc, nrec, nt);
dat::u_obs_z = mat::create(nsrc, nrec, nt);
}
dat::K_lambda = mat::create(nx, nz);
dat::K_mu = mat::create(nx, nz);
dat::K_rho = mat::create(nx, nz);
dat::adstf_x = mat::create(nrec, nt);
dat::adstf_y = mat::create(nrec, nt);
dat::adstf_z = mat::create(nrec, nt);
}
dat::src_x_id = mat::createInt(nsrc);
dat::src_z_id = mat::createInt(nsrc);
dat::rec_x_id = mat::createInt(nrec);
dat::rec_z_id = mat::createInt(nrec);
hipLaunchKernelGGL(( computeIndices), dim3(nsrc), dim3(1), 0, 0, dat::src_x_id, dat::src_x, dat::Lx, nx);
hipLaunchKernelGGL(( computeIndices), dim3(nsrc), dim3(1), 0, 0, dat::src_z_id, dat::src_z, dat::Lz, nz);
hipLaunchKernelGGL(( computeIndices), dim3(nrec), dim3(1), 0, 0, dat::rec_x_id, dat::rec_x, dat::Lx, nx);
hipLaunchKernelGGL(( computeIndices), dim3(nrec), dim3(1), 0, 0, dat::rec_z_id, dat::rec_z, dat::Lz, nz);
hipLaunchKernelGGL(( initialiseAbsorbingBoundaries), dim3(nxb), dim3(nzt), 0, 0,
dat::absbound, dat::absorb_width,
dat::absorb_left, dat::absorb_right, dat::absorb_bottom, dat::absorb_top,
dat::Lx, dat::Lz, dat::X, dat::Z
);
}
return 1;
}
static void exportData(int iter){
iter++;
char name[80];
if(iter < 10){
sprintf(name, "%s/000%d", dat::output_path, iter);
}
else if(iter < 100){
sprintf(name, "%s/00%d", dat::output_path, iter);
}
else if(iter < 1000){
sprintf(name, "%s/0%d", dat::output_path, iter);
}
else{
sprintf(name, "%s/%d", dat::output_path, iter);
}
mkdir(name);
char path[80];
sprintf(path, "%s/proc000000_x.bin", name);
FILE *xfile = fopen(path,"wb");
sprintf(path, "%s/proc000000_z.bin", name);
FILE *zfile = fopen(path,"wb");
sprintf(path, "%s/proc000000_rho.bin", name);
FILE *rfile = fopen(path,"wb");
sprintf(path, "%s/proc000000_vp.bin", name);
FILE *pfile = fopen(path,"wb");
sprintf(path, "%s/proc000000_vs.bin", name);
FILE *sfile = fopen(path,"wb");
FILE *krfile = NULL;
FILE *klfile = NULL;
FILE *kmfile = NULL;
if(iter > 0){
sprintf(path, "%s/kernel_rho.bin", name);
krfile = fopen(path,"wb");
sprintf(path, "%s/kernel_lambda.bin", name);
klfile = fopen(path,"wb");
sprintf(path, "%s/kernel_mu.bin", name);
kmfile = fopen(path,"wb");
}
int npt = nx * nz * 4;
fwrite(&npt, sizeof(int), 1, xfile);
fwrite(&npt, sizeof(int), 1, zfile);
fwrite(&npt, sizeof(int), 1, rfile);
fwrite(&npt, sizeof(int), 1, pfile);
fwrite(&npt, sizeof(int), 1, sfile);
if(iter > 0){
fwrite(&npt, sizeof(int), 1, krfile);
fwrite(&npt, sizeof(int), 1, kmfile);
fwrite(&npt, sizeof(int), 1, klfile);
}
npt /= 4;
float *buffer = mat::createHost(npt);
mat::copyDeviceToHost(buffer, mat::getDataPointer(dat::X), npt);
fwrite(buffer, sizeof(float), npt, xfile);
mat::copyDeviceToHost(buffer, mat::getDataPointer(dat::Z), npt);
fwrite(buffer, sizeof(float), npt, zfile);
mat::copyDeviceToHost(buffer, mat::getDataPointer(dat::rho), npt);
fwrite(buffer, sizeof(float), npt, rfile);
float *vp = mat::create(npt);
mat::copy(vp, mat::getDataPointer(dat::lambda), npt);
float *vs = mat::create(npt);
mat::copy(vs, mat::getDataPointer(dat::mu), npt);
float *rho = mat::create(npt);
mat::copy(rho, mat::getDataPointer(dat::rho), npt);
if(dat::parametrisation){
hipLaunchKernelGGL(( changeParametrisation), dim3(nxb), dim3(nzt), 0, 0, vp, vs, rho, nz, psv);
}
mat::copyDeviceToHost(buffer, vp, npt);
fwrite(buffer, sizeof(float), npt, pfile);
mat::copyDeviceToHost(buffer, vs, npt);
fwrite(buffer, sizeof(float), npt, sfile);
if(iter > 0){
mat::copyDeviceToHost(buffer, mat::getDataPointer(dat::K_rho), npt);
fwrite(buffer, sizeof(float), npt, krfile);
mat::copyDeviceToHost(buffer, mat::getDataPointer(dat::K_mu), npt);
fwrite(buffer, sizeof(float), npt, kmfile);
mat::copyDeviceToHost(buffer, mat::getDataPointer(dat::K_lambda), npt);
fwrite(buffer, sizeof(float), npt, klfile);
}
hipFree(vp);
hipFree(vs);
hipFree(rho);
free(buffer);
fclose(xfile);
fclose(zfile);
fclose(rfile);
fclose(pfile);
fclose(sfile);
if(iter > 0){
fclose(krfile);
fclose(kmfile);
fclose(klfile);
}
}
static void checkMemoryUsage(){
size_t free_byte ;
size_t total_byte ;
hipMemGetInfo( &free_byte, &total_byte ) ;
float free_db = (float)free_byte ;
float total_db = (float)total_byte ;
float used_db = total_db - free_db ;
printf("memory usage: %.1fMB / %.1fMB\n", used_db / 1024.0 / 1024.0, total_db / 1024.0 / 1024.0);
}
static void applyGaussian(float **p, int sigma){
float **gsum = mat::create(nx, nz);
float **gtemp = mat::create(nx, nz);
hipLaunchKernelGGL(( initialiseGaussian), dim3(nxb), dim3(nzt), 0, 0, gsum, nx, nz, sigma);
hipLaunchKernelGGL(( filterKernelX), dim3(nxb), dim3(nzt), 0, 0, p, gtemp, nx, sigma);
hipLaunchKernelGGL(( filterKernelZ), dim3(nxb), dim3(nzt), 0, 0, p, gtemp, gsum, nz, sigma);
mat::freeDevice(gsum);
mat::freeDevice(gtemp);
}
static void generateChecker(float **p, float dp, float margin, int cx, int cz){
float lx = dat::Lx / (cx + (cx + 3) * margin);
float lz = dat::Lz / (cz + (cz + 3) * margin);
hipLaunchKernelGGL(( generateChecker), dim3(nxb), dim3(nzt), 0, 0, p, dp, margin, lx, lz, dat::X, dat::Z);
int sigma = (int) 15 / cx;
applyGaussian(p, sigma);
}
static void generateLayer(float **p, float dp, int n){
float dz = dat::Lz / n;
float dpi = 2*dp / (n-1);
for(int i = 0; i < n; i++){
hipLaunchKernelGGL(( generateLayer), dim3(nxb), dim3(nzt), 0, 0, p, i*dz, (i+1)*dz, -dp + dpi*i, dat::Z);
}
int sigma = (int) 15 / n;
applyGaussian(p, sigma);
}
static void generateRandomLayer(float **p, float dp, float dl, int n){
float dz = dat::Lz / n;
float dpi = 2*dp / (n-1);
float *layer1 = mat::create(nx);
float *layer2 = mat::create(nx);
float *hlayer = mat::createHost(nx);
float base = dl * dz / n;
float dx = dat::Lx / (nx - 1);
srand(time(0));
for(int i = 0; i < n; i++){
mat::initHost(hlayer, 0, nx);
for(int k = 0; k < n; k++){
float rng = (float)(rand() % 101) / 100;
for(int j = 0; j < nx; j++){
hlayer[j] += base * sin((k+rng)*2*pi*j*dx/dat::Lx+rng*pi);
}
}
mat::copyHostToDevice(layer2, hlayer, nx);
if(i==0){
mat::init(layer1,0,nx);
}
else if(i==n-1){
mat::init(layer2,0,nx);
}
hipLaunchKernelGGL(( generateRandomLayer), dim3(nxb), dim3(nzt), 0, 0, p, i*dz, (i+1)*dz, -dp + dpi*i, layer1, layer2, dat::Z);
mat::copy(layer1, layer2, nx);
}
int sigma = (int) 12 / n;
applyGaussian(dat::mu, sigma);
hipFree(layer1);
hipFree(layer2);
free(hlayer);
}
static void writeSU(const char *fname, const int isrc, float **data){
FILE *su = fopen(fname, "wb");
int header1[28];
short int header2[2];
short int header3[2];
float header4[30];
for(int i = 0; i < 28; i++) header1[i] = 0;
for(int i = 0; i < 2; i++) header2[i] = 0;
for(int i = 0; i < 2; i++) header3[i] = 0;
for(int i = 0; i < 30; i++) header4[i] = 0;
float *src_x = mat::createHost(nsrc);
float *src_z = mat::createHost(nsrc);
mat::copyDeviceToHost(src_x, dat::src_x, nsrc);
mat::copyDeviceToHost(src_z, dat::src_z, nsrc);
float xs = src_x[isrc];
float zs = src_z[isrc];
free(src_x);
free(src_z);
float *rec_x = mat::createHost(nrec);
float *rec_z = mat::createHost(nrec);
mat::copyDeviceToHost(rec_x, dat::rec_x, nsrc);
mat::copyDeviceToHost(rec_z, dat::rec_z, nsrc);
short int dt_int2;
if(dt * 1e6 > pow(2, 15)){
dt_int2 = 0;
}
else{
dt_int2 = (short int)(dt * 1e6);
}
header1[18] = lroundf(xs);
header1[19] = lroundf(zs);
header2[0] = 0;
header2[1] = nt;
header3[0] = dt_int2;
header3[1] = 0;
for(int irec = 0; irec < nrec; irec++){
header1[0] = irec + 1;
header1[9] = lroundf(rec_x[irec] - xs);
header1[20] = lroundf(rec_x[irec]);
header1[21] = lroundf(rec_z[irec]);
if(nrec > 1){
header4[1] = rec_x[1] - rec_x[0];
}
fwrite(header1, sizeof(int), 28, su);
fwrite(header2, sizeof(short int), 2, su);
fwrite(header3, sizeof(short int), 2, su);
fwrite(header4, sizeof(float), 30, su);
fwrite(data[irec], sizeof(float), nt, su);
}
free(rec_x);
free(rec_z);
fclose(su);
}
static void writeSU(float ***u_obs, char c){
char path[80];
for(int i = 0; i < nsrc; i++){
if(i < 10){
sprintf(path, "%s/U%c_00000%d", dat::obs_su_path, c, i);
}
else if(i < 100){
sprintf(path, "%s/U%c_0000%d", dat::obs_su_path, c, i);
}
else{
sprintf(path, "%s/U%c_000%d", dat::obs_su_path, c, i);
}
writeSU(path, i, u_obs[i]);
}
}
static void writeSU(){
float ***u_obs = mat::createHost(nsrc, nrec, nt);
mkdir(dat::obs_su_path);
if(sh){
mat::copyDeviceToHost(u_obs, dat::u_obs_y, nsrc, nrec, nt);
writeSU(u_obs, 'y');
}
if(psv){
mat::copyDeviceToHost(u_obs, dat::u_obs_x, nsrc, nrec, nt);
writeSU(u_obs, 'x');
mat::copyDeviceToHost(u_obs, dat::u_obs_z, nsrc, nrec, nt);
writeSU(u_obs, 'z');
}
mat::freeHost(u_obs);
}
static void readSU(const char *fname, float **data){
FILE *su = fopen(fname, "rb");
int header1[28];
short int header2[2];
short int header3[2];
float header4[30];
fread(header1, sizeof(int), 28, su);
fread(header2, sizeof(short int), 2, su);
int nt_su = header2[1];
int nrec_su = getFileLength(su) / (240 + 4 * nt);
if(nt_su != nt || nrec_su != nrec){
printf("Error loading Seismic Unix file\n");
}
else{
for(int irec = 0; irec < nrec; irec++){
fread(header1, sizeof(int), 28, su);
fread(header2, sizeof(short int), 2, su);
fread(header3, sizeof(short int), 2, su);
fread(header4, sizeof(float), 30, su);
fread(data[irec], sizeof(float), nt, su);
}
}
fclose(su);
}
static void readSU(float ***u_obs, char c){
char path[80];
for(int i = 0; i < nsrc; i++){
if(i < 10){
sprintf(path, "%s/U%c_00000%d", dat::obs_su_path, c, i);
}
else if(i < 100){
sprintf(path, "%s/U%c_0000%d", dat::obs_su_path, c, i);
}
else{
sprintf(path, "%s/U%c_000%d", dat::obs_su_path, c, i);
}
readSU(path, u_obs[i]);
}
}
static void readSU(){
float ***u_obs = mat::createHost(nsrc, nrec, nt);
if(sh){
readSU(u_obs, 'y');
mat::copyHostToDevice(dat::u_obs_y, u_obs, nsrc, nrec, nt);
}
if(psv){
readSU(u_obs, 'x');
mat::copyHostToDevice(dat::u_obs_x, u_obs, nsrc, nrec, nt);
readSU(u_obs, 'z');
mat::copyHostToDevice(dat::u_obs_z, u_obs, nsrc, nrec, nt);
}
mat::freeHost(u_obs);
}
static void makeSourceTimeFunction(float *stf, int index){
float max = 0;
float f0 = dat::src_f0[index];
float t0 = dat::src_t0[index];
for(int it = 0; it < nt; it++){
float t = it * dt;
switch(dat::src_type[index]){
case 1:{
float a = pi * pi * f0 * f0;
stf[it] = -(t - t0) * exp(-pow(a, 2) * pow(t - t0, 2));
break;
}
// other stf: later
}
if(fabs(stf[it]) > max){
max = fabs(stf[it]);
}
}
if(max > 0){
for(int it = 0; it < nt; it++){
stf[it] /= max;
}
}
}
static void prepareSTF(){
float **stf_x = mat::createHost(nsrc, nt);
float **stf_y = mat::createHost(nsrc, nt);
float **stf_z = mat::createHost(nsrc, nt);
float *stfn = mat::createHost(nt);
for(int isrc = 0; isrc < nsrc; isrc++){
makeSourceTimeFunction(stfn, isrc);
float angle = dat::src_angle[isrc];
float amp = dat::src_factor[isrc];
for(int it = 0; it < nt; it++){
stf_x[isrc][it] = amp * stfn[it] * cos(angle);
stf_y[isrc][it] = amp * stfn[it];
stf_z[isrc][it] = amp * stfn[it] * sin(angle);
}
}
mat::copyHostToDevice(dat::stf_x, stf_x, nsrc, nt);
mat::copyHostToDevice(dat::stf_y, stf_y, nsrc, nt);
mat::copyHostToDevice(dat::stf_z, stf_z, nsrc, nt);
mat::freeHost(stf_x);
mat::freeHost(stf_y);
mat::freeHost(stf_z);
free(stfn);
}
static void initialiseDynamicFields(){
if(sh){
mat::init(dat::vy, 0, nx2, nz);
mat::init(dat::uy, 0, nx2, nz);
mat::init(dat::sxy, 0, nx2, nz);
mat::init(dat::szy, 0, nx2, nz);
}
if(psv){
mat::init(dat::vx, 0, nx2, nz);
mat::init(dat::vz, 0, nx2, nz);
mat::init(dat::ux, 0, nx2, nz);
mat::init(dat::uz, 0, nx2, nz);
mat::init(dat::sxx, 0, nx2, nz);
mat::init(dat::szz, 0, nx2, nz);
mat::init(dat::sxz, 0, nx2, nz);
}
}
static void initialiseKernels(){
mat::init(dat::K_lambda, 0, nx, nz);
mat::init(dat::K_mu, 0, nx, nz);
mat::init(dat::K_rho, 0, nx, nz);
}
static void runWaveFieldPropagation(){
initialiseDynamicFields();
int ntask2 = dat::isrc[1]-dat::isrc[0]+1;
for(int it = 0; it < nt; it++){
if(mode == 0){
if((it + 1) % dat::sfe == 0){
int isfe = dat::nsfe - (it + 1) / dat::sfe;
if(sh){
mat::copyDeviceToHost(dat::uy_forward[isfe], dat::uy, nx2, nz);
}
if(psv){
mat::copyDeviceToHost(dat::ux_forward[isfe], dat::ux, nx2, nz);
mat::copyDeviceToHost(dat::uz_forward[isfe], dat::uz, nx2, nz);
}
}
}
if(sh){
hipLaunchKernelGGL(( divSY), dim3(nxb2), dim3(nzt), 0, 0, dat::dsy, dat::sxy, dat::szy, dat::X, dat::Z, nx, nz);
}
if(psv){
hipLaunchKernelGGL(( divSXZ), dim3(nxb2), dim3(nzt), 0, 0, dat::dsx, dat::dsz, dat::sxx, dat::szz, dat::sxz, dat::X, dat::Z, nx, nz);
}
if(mode == 0){
hipLaunchKernelGGL(( addSTF), dim3(nsrc), dim3(ntask2), 0, 0,
dat::dsx, dat::dsy, dat::dsz, dat::stf_x, dat::stf_y, dat::stf_z,
dat::src_x_id, dat::src_z_id, dat::isrc[0], sh, psv, it,nx
);
}
else if(mode == 1){
// next: adstf ntask
hipLaunchKernelGGL(( addSTF), dim3(nrec), dim3(1), 0, 0,
dat::dsx, dat::dsy, dat::dsz, dat::adstf_x, dat::adstf_y, dat::adstf_z,
dat::rec_x_id, dat::rec_z_id, -1, sh, psv, it,nx
);
}
if(sh){
hipLaunchKernelGGL(( updateV), dim3(nxb2), dim3(nzt), 0, 0, dat::vy, dat::dsy, dat::rho, dat::absbound, dt);
hipLaunchKernelGGL(( divVY), dim3(nxb2), dim3(nzt), 0, 0, dat::dvydx, dat::dvydz, dat::vy, dat::X, dat::Z, nx, nz);
hipLaunchKernelGGL(( updateSY), dim3(nxb2), dim3(nzt), 0, 0, dat::sxy, dat::szy, dat::dvydx, dat::dvydz, dat::mu, dt);
hipLaunchKernelGGL(( updateU), dim3(nxb2), dim3(nzt), 0, 0, dat::uy, dat::vy, dt);
}
if(psv){
hipLaunchKernelGGL(( updateV), dim3(nxb2), dim3(nzt), 0, 0, dat::vx, dat::dsx, dat::rho, dat::absbound, dt);
hipLaunchKernelGGL(( updateV), dim3(nxb2), dim3(nzt), 0, 0, dat::vz, dat::dsz, dat::rho, dat::absbound, dt);
hipLaunchKernelGGL(( divVXZ), dim3(nxb2), dim3(nzt), 0, 0, dat::dvxdx, dat::dvxdz, dat::dvzdx, dat::dvzdz, dat::vx, dat::vz, dat::X, dat::Z, nx, nz);
hipLaunchKernelGGL(( updateSXZ), dim3(nxb2), dim3(nzt), 0, 0, dat::sxx, dat::szz, dat::sxz, dat::dvxdx, dat::dvxdz, dat::dvzdx, dat::dvzdz, dat::lambda, dat::mu, dt);
hipLaunchKernelGGL(( updateU), dim3(nxb2), dim3(nzt), 0, 0, dat::ux, dat::vx, dt);
hipLaunchKernelGGL(( updateU), dim3(nxb2), dim3(nzt), 0, 0, dat::uz, dat::vz, dt);
}
if(mode == 0){
// next: saveRec type=1
if(dat::obs_type == 0){
hipLaunchKernelGGL(( saveRec), dim3(nrec), dim3(ntask2), 0, 0,
dat::out_x, dat::out_y, dat::out_z, dat::vx, dat::vy, dat::vz,
dat::rec_x_id, dat::rec_z_id, nx, nt, sh, psv, it
);
}
else if(dat::obs_type == 1){
hipLaunchKernelGGL(( saveRec), dim3(nrec), dim3(ntask2), 0, 0,
dat::out_x, dat::out_y, dat::out_z, dat::ux, dat::uy, dat::uz,
dat::rec_x_id, dat::rec_z_id, nx, nt, sh, psv, it
);
}
else if(dat::obs_type == 2 && dat::isrc[0] >= 0){
hipLaunchKernelGGL(( saveRec), dim3(nrec), dim3(ntask2), 0, 0,
dat::u_obs_x, dat::u_obs_y, dat::u_obs_z, dat::ux, dat::uy, dat::uz,
dat::rec_x_id, dat::rec_z_id, dat::isrc[0], nx, sh, psv, it
);
}
if((it + 1) % dat::sfe == 0){
int isfe = dat::nsfe - (it + 1) / dat::sfe;
if(sh){
mat::copyDeviceToHost(dat::vy_forward[isfe], dat::vy, nx2, nz);
}
if(psv){
mat::copyDeviceToHost(dat::vx_forward[isfe], dat::vx, nx2, nz);
mat::copyDeviceToHost(dat::vz_forward[isfe], dat::vz, nx2, nz);
}
}
}
else if(mode == 1){
if((it + dat::sfe) % dat::sfe == 0){
// dsi -> ui_fw -> vi_fw
int isfe = (it + dat::sfe) / dat::sfe - 1;
float tsfe = dat::sfe * dt;
if(sh){
mat::copyHostToDevice(dat::dsy, dat::uy_forward[isfe], nx2, nz);
hipLaunchKernelGGL(( divVY), dim3(nxb2), dim3(nzt), 0, 0, dat::dvydx, dat::dvydz, dat::uy, dat::X, dat::Z, nx, nz);
hipLaunchKernelGGL(( divVY), dim3(nxb2), dim3(nzt), 0, 0, dat::dvydx_fw, dat::dvydz_fw, dat::dsy, dat::X, dat::Z, nx, nz);
mat::copyHostToDevice(dat::dsy, dat::vy_forward[isfe], nx2, nz);
hipLaunchKernelGGL(( interactionRhoY), dim3(nxb), dim3(nzt), 0, 0, dat::K_rho, dat::vy, dat::dsy, tsfe);
hipLaunchKernelGGL(( interactionMuY), dim3(nxb), dim3(nzt), 0, 0, dat::K_mu, dat::dvydx, dat::dvydx_fw, dat::dvydz, dat::dvydz_fw, tsfe);
// next: K_rho *= ntask
}
if(psv){
mat::copyHostToDevice(dat::dsx, dat::ux_forward[isfe], nx2, nz);
mat::copyHostToDevice(dat::dsz, dat::uz_forward[isfe], nx2, nz);
hipLaunchKernelGGL(( divVXZ), dim3(nxb2), dim3(nzt), 0, 0,
dat::dvxdx, dat::dvxdz, dat::dvzdx, dat::dvzdz,
dat::ux, dat::uz, dat::X, dat::Z, nx, nz
);
hipLaunchKernelGGL(( divVXZ), dim3(nxb2), dim3(nzt), 0, 0,
dat::dvxdx_fw, dat::dvxdz_fw, dat::dvzdx_fw, dat::dvzdz_fw,
dat::dsx, dat::dsz, dat::X, dat::Z, nx, nz
);
mat::copyHostToDevice(dat::dsx, dat::vx_forward[isfe], nx2, nz);
mat::copyHostToDevice(dat::dsz, dat::vz_forward[isfe], nx2, nz);
hipLaunchKernelGGL(( interactionRhoXZ), dim3(nxb), dim3(nzt), 0, 0, dat::K_rho, dat::vx, dat::dsx, dat::vz, dat::dsz, tsfe);
hipLaunchKernelGGL(( interactionMuXZ), dim3(nxb), dim3(nzt), 0, 0,
dat::K_mu, dat::dvxdx, dat::dvxdx_fw, dat::dvxdz, dat::dvxdz_fw,
dat::dvzdx, dat::dvzdx_fw, dat::dvzdz, dat::dvzdz_fw, tsfe
);
hipLaunchKernelGGL(( interactionLambdaXZ), dim3(nxb), dim3(nzt), 0, 0, dat::K_lambda, dat::dvxdx, dat::dvxdx_fw, dat::dvzdz, dat::dvzdz_fw, tsfe);
}
}
}
}
}
static void runForward(int isrc0, int isrc1){
dat::simulation_mode = 0;
dat::isrc[0] = isrc0;
dat::isrc[1] = isrc1;
runWaveFieldPropagation();
}
static void runAdjoint(int init_kernel){
dat::simulation_mode = 1;
if(init_kernel){
initialiseKernels();
}
runWaveFieldPropagation();
}
static void initialiseFilters(){
// taper weights
dat::tw = mat::create(nt);
hipLaunchKernelGGL(( getTaperWeights), dim3(nt), dim3(1), 0, 0, dat::tw, dt, nt);
// gaussian filter
if(dat::filter_kernel){
dat::gsum = mat::create(nx, nz);
dat::gtemp = mat::create(nx, nz);
hipLaunchKernelGGL(( initialiseGaussian), dim3(nxb), dim3(nzt), 0, 0, dat::gsum, nx, nz, dat::filter_kernel);
}
}
static void prepareObs(){
dat::obs_type = 2;
prepareSTF();
if(dat::obs_su){
printf("Reading observed data");
readSU();
}
else{
printf("Generating observed data\n");
loadModel(dat::model_true);
for(int isrc = 0; isrc < nsrc; isrc += ntask){
runForward(isrc, getTaskIndex(isrc));
for(int i=isrc; i<=getTaskIndex(isrc); i++){
printStat(i, nsrc);
}
}
}
initialiseFilters();
dat::obs_type = 1;
}
static float calculateEnvelopeMisfit(float **adstf, float *d_misfit, float **out, float ***u_obs,
hipfftComplex *syn, hipfftComplex *obs, float *esyn, float *eobs, float *ersd, float *etmp,
float dt, int isrc, int irec, int j, int nt){
hipLaunchKernelGGL(( copyWaveform), dim3(nt), dim3(1), 0, 0, d_misfit, u_obs, isrc+j, irec);
hilbert(d_misfit, obs);
hipLaunchKernelGGL(( copyWaveform), dim3(nt), dim3(1), 0, 0, d_misfit, out, irec, j*nt);
hilbert(d_misfit, syn);
hipLaunchKernelGGL(( copyC2Abs), dim3(nt), dim3(1), 0, 0, esyn, syn, nt);
hipLaunchKernelGGL(( copyC2Abs), dim3(nt), dim3(1), 0, 0, eobs, obs, nt);
float max = mat::amax(esyn, nt) * 0.05;
hipLaunchKernelGGL(( envelopetmp), dim3(nt), dim3(1), 0, 0, etmp, esyn, eobs, max);
hipLaunchKernelGGL(( copyC2Imag), dim3(nt), dim3(1), 0, 0, ersd, syn, nt);
mat::calc(ersd, ersd, etmp, nt);
hilbert(ersd, obs);
hipLaunchKernelGGL(( copyC2Imag), dim3(nt), dim3(1), 0, 0, ersd, obs, nt);
hipLaunchKernelGGL(( prepareEnvelopeSTF), dim3(nt), dim3(1), 0, 0, adstf, etmp, d_misfit, ersd, nt, irec);
mat::calc(ersd, 1, esyn, -1, eobs, nt);
return mat::norm(ersd, nt);
}
static float computeKernelsAndMisfit(int kernel){
float misfit = 0;
float *d_misfit = mat::create(nt);
hipfftComplex *syn;
hipfftComplex *obs;
float *esyn;
float *eobs;
float *ersd;
float *etmp;
if(dat::misfit_type == 1){
hipMalloc((void**)&syn, nt * sizeof(hipfftComplex));
hipMalloc((void**)&obs, nt * sizeof(hipfftComplex));
esyn = mat::create(nt);
eobs = mat::create(nt);
ersd = mat::create(nt);
etmp = mat::create(nt);
}
if(kernel){
printf("Computing gradient\n");
initialiseKernels();
}
for(int isrc = 0; isrc < nsrc; isrc+=ntask){
int jsrc = getTaskIndex(isrc);
runForward(isrc, jsrc);
for(int j = 0; j <= jsrc - isrc; j++){
for(int irec = 0; irec < nrec; irec++){
if(dat::misfit_type == 1){
if(sh){
misfit += calculateEnvelopeMisfit(dat::adstf_y, d_misfit, dat::out_y, dat::u_obs_y,
syn, obs, esyn, eobs, ersd, etmp, dt, isrc, irec, j, nt);
}
if(psv){
misfit += calculateEnvelopeMisfit(dat::adstf_x, d_misfit, dat::out_x, dat::u_obs_x,
syn, obs, esyn, eobs, ersd, etmp, dt, isrc, irec, j, nt);
misfit += calculateEnvelopeMisfit(dat::adstf_z, d_misfit, dat::out_z, dat::u_obs_z,
syn, obs, esyn, eobs, ersd, etmp, dt, isrc, irec, j, nt);
}
}
else{
if(sh){
hipLaunchKernelGGL(( calculateMisfit), dim3(nt), dim3(1), 0, 0, d_misfit, dat::out_y, dat::u_obs_y, dat::tw, sqrt(dt), isrc, irec, j, nt);
misfit += mat::norm(d_misfit, nt);
}
if(psv){
hipLaunchKernelGGL(( calculateMisfit), dim3(nt), dim3(1), 0, 0, d_misfit, dat::out_x, dat::u_obs_x, dat::tw, sqrt(dt), isrc, irec, j, nt);
misfit += mat::norm(d_misfit, nt);
hipLaunchKernelGGL(( calculateMisfit), dim3(nt), dim3(1), 0, 0, d_misfit, dat::out_z, dat::u_obs_z, dat::tw, sqrt(dt), isrc, irec, j, nt);
misfit += mat::norm(d_misfit, nt);
}
}
}
}
if(kernel){
if(dat::misfit_type != 1){
if(sh){
hipLaunchKernelGGL(( prepareAdjointSTF), dim3(nt), dim3(nrec), 0, 0, dat::adstf_y, dat::out_y, dat::u_obs_y, dat::tw, nt, isrc);
if(!sh){
mat::init(dat::adstf_x, 0, nrec, nt);
mat::init(dat::adstf_z, 0, nrec, nt);
}
}
if(psv){
hipLaunchKernelGGL(( prepareAdjointSTF), dim3(nt), dim3(nrec), 0, 0, dat::adstf_x, dat::out_x, dat::u_obs_x, dat::tw, nt, isrc);
hipLaunchKernelGGL(( prepareAdjointSTF), dim3(nt), dim3(nrec), 0, 0, dat::adstf_z, dat::out_z, dat::u_obs_z, dat::tw, nt, isrc);
if(!sh){
mat::init(dat::adstf_y, 0, nrec, nt);
}
}
}
runAdjoint(0);
printStat(isrc, nsrc);
}
}
hipFree(d_misfit);
if(dat::misfit_type == 1){
hipFree(syn);
hipFree(obs);
hipFree(esyn);
hipFree(eobs);
hipFree(ersd);
hipFree(etmp);
}
if(kernel){
if(dat::filter_kernel){
hipLaunchKernelGGL(( filterKernelX), dim3(nxb), dim3(nzt), 0, 0, dat::K_rho, dat::gtemp, nx, dat::filter_kernel);
hipLaunchKernelGGL(( filterKernelZ), dim3(nxb), dim3(nzt), 0, 0, dat::K_rho, dat::gtemp, dat::gsum, nz, dat::filter_kernel);
hipLaunchKernelGGL(( filterKernelX), dim3(nxb), dim3(nzt), 0, 0, dat::K_mu, dat::gtemp, nx, dat::filter_kernel);
hipLaunchKernelGGL(( filterKernelZ), dim3(nxb), dim3(nzt), 0, 0, dat::K_mu, dat::gtemp, dat::gsum, nz, dat::filter_kernel);
hipLaunchKernelGGL(( filterKernelX), dim3(nxb), dim3(nzt), 0, 0, dat::K_lambda, dat::gtemp, nx, dat::filter_kernel);
hipLaunchKernelGGL(( filterKernelZ), dim3(nxb), dim3(nzt), 0, 0, dat::K_lambda, dat::gtemp, dat::gsum, nz, dat::filter_kernel);
}
}
return misfit;
}
static float calculateMisfit(){
return computeKernelsAndMisfit(0);
}
static float computeKernels(){
return computeKernelsAndMisfit(1);
}
static int computeDirectionCG(float **p_new, float **p_old, float **g_new, float **g_old){
dat::inv_count++;
if(dat::inv_count == 1){
mat::copy(p_new, g_new, -1, nx, nz);
return 0;
}
else if(dat::inv_maxiter && dat::inv_count > dat::inv_maxiter){
fprintf(dat::log_ls, " restarting NLCG... [periodic restart]\n");
printf(" restarting NLCG... [periodic restart]\n");
return -1;
}
// self.precond: later
float den = mat::dot(g_old, g_old, nx, nz);
mat::calc(p_new, 1, g_new, -1, g_old, nx, nz);
float num = mat::dot(g_new, p_new, nx, nz);
float beta = num / den;
mat::calc(p_new, -1, g_new, beta, p_old, nx, nz);
// lose of conjugacy? later
if(mat::dot(p_new, g_new, nx, nz) > 0){
fprintf(dat::log_ls, " restarting NLCG... [not a descent direction]\n");
printf(" restarting NLCG... [not a descent direction]\n");
return -1;
}
return 1;
}
static int computeDirectionLBFGS(float **p_new, float **p_old, float **g_new, float **g_old, float **m_new, float **m_old){
dat::inv_count++;
if(dat::inv_count == 1){
mat::copy(p_new, g_new, -1, nx, nz);
return 0;
}
else if(dat::inv_maxiter && dat::inv_count > dat::inv_maxiter){
fprintf(dat::log_ls, " restarting LBFGS... [periodic restart]\n");
printf(" restarting LBFGS... [periodic restart]\n");
return -1;
}
float **tmpS = dat::lbfgs_S[dat::lbfgs_mem-1];
float **tmpY = dat::lbfgs_Y[dat::lbfgs_mem-1];
for(int i = dat::lbfgs_mem-1; i > 0; i--){
dat::lbfgs_S[i] = dat::lbfgs_S[i-1];
dat::lbfgs_Y[i] = dat::lbfgs_Y[i-1];
}
dat::lbfgs_S[0] = tmpS;
dat::lbfgs_Y[0] = tmpY;
mat::calc(p_old, 1, m_new, -1, m_old, nx, nz);
mat::copyDeviceToHost(dat::lbfgs_S[0], p_old, nx, nz);
mat::calc(p_old, 1, g_new, -1, g_old, nx, nz);
mat::copyDeviceToHost(dat::lbfgs_Y[0], p_old, nx, nz);
if(dat::lbfgs_used < dat::lbfgs_mem){
dat::lbfgs_used++;
}
int &kk = dat::lbfgs_used;
float *rh = mat::createHost(kk);
float *al = mat::createHost(kk);
// S->m_old Y->p_old
mat::copy(p_new, g_new, nx, nz);
float sty, yty;
for(int i = 0; i < kk; i++){
mat::copyHostToDevice(m_old, dat::lbfgs_S[i], nx, nz);
mat::copyHostToDevice(p_old, dat::lbfgs_Y[i], nx, nz);
rh[i] = 1 / mat::dot(p_old, m_old, nx, nz);
al[i] = rh[i] * mat::dot(m_old, p_new, nx, nz);
mat::calc(p_new, 1, p_new, -al[i], p_old, nx, nz);
if(i == 0){
sty = 1 / rh[i];
yty = mat::dot(p_old, p_old, nx, nz);
}
}
mat::copy(p_new, p_new, sty/yty, nx, nz);
for(int i = kk-1; i >= 0; i--){
mat::copyHostToDevice(m_old, dat::lbfgs_S[i], nx, nz);
mat::copyHostToDevice(p_old, dat::lbfgs_Y[i], nx, nz);
float be = rh[i] * mat::dot(p_old, p_new, nx, nz);
mat::calc(p_new, 1, p_new, al[i] - be, m_old, nx, nz);
}
free(rh);
free(al);
float angle = calculateAngle(p_new, g_new, 1, nx, nz);
if(angle>=pi/2 || angle<=0){
fprintf(dat::log_ls, " restarting LBFGS... [not a descent direction]\n");
printf(" restarting LBFGS... [not a descent direction]\n");
return -1;
}
mat::copy(p_new, p_new, -1, nx, nz);
return 1;
}
static int argmin(float *f, int n){
float min = f[0];
int idx = 0;
for(int i = 1; i < n; i++){
if(f[i] < min){
min = f[i];
idx = i;
}
}
return idx;
}
static int checkBracket(float *x, float *f, int n){
int imin = argmin(f, n);
float fmin = f[imin];
if(fmin < f[0]){
for(int i = imin; i < n; i++){
if(f[i] > fmin){
return 1;
}
}
}
return 0;
}
static int goodEnough(float *x, float *f, int n, float *alpha){
float thresh = log10(dat::ls_thresh);
if(!checkBracket(x, f, n)){
return 0;
}
float p[3];
int idx = argmin(f, n) - 1;
int fitlen;
if(idx + 3 >= n){
fitlen = 3;
}
else{
fitlen = 4;
}
polyfit(x + idx, f + idx, p, fitlen);
if(p[0] <= 0){
printf("line search error\n");
}
else{
float x0 = -p[1]/(2*p[0]);
*alpha = x0;
for(int i = 1; i < n; i++){
if(fabs(log10(x[i]/x0)) < thresh){
return 1;
}
}
}
return 0;
}
static float backtrack2(float f0, float g0, float x1, float f1, float b1, float b2){
float x2 = -g0 * x1 * x1 / (2 *(f1 - f0 - g0 * x1));
if(x2 > b2*x1){
x2 = b2*x1;
}
else if(x2 < b1*x1){
x2 = b1*x1;
}
return x2;
}
static float updateModel(float **m, float **p, float alpha, float alpha_old){
hipLaunchKernelGGL(( updateModel), dim3(nxb), dim3(nzt), 0, 0, m, p, alpha - alpha_old);
return alpha;
}
static float calculateStep(const int step_count, float step_len_max, int *status){
float update_count = -1;
float alpha;
float *x = mat::createHost(step_count+1);
float *f = mat::createHost(step_count+1);
for(int i = 0; i < step_count+1; i++){
int j = dat::ls_count - 1 - step_count + i;
x[i] = dat::step_lens[j];
f[i] = dat::func_vals[j];
}
for(int i = 0; i < step_count+1; i++){
for(int j = i+1; j < step_count+1; j++){
if(x[j] < x[i]){
float tmp;
tmp = x[i]; x[i] = x[j]; x[j] = tmp;
tmp = f[i]; f[i] = f[j]; f[j] = tmp;
}
}
}
for(int i = 0; i < dat::ls_count; i++){
if(fabs(dat::step_lens[i]) < 1e-6){
update_count++;
}
}
if(step_count == 0){
if(update_count == 0){
alpha = 1 / dat::ls_gtg[dat::inv_count - 1];
*status = 0;
}
else{
int idx = argmin(dat::func_vals, dat::ls_count - 1);
alpha = dat::step_lens[idx] * dat::ls_gtp[dat::inv_count - 2] / dat::ls_gtp[dat::inv_count - 1];
*status = 0;
}
}
else if(checkBracket(x, f, step_count+1)){
if(goodEnough(x, f, step_count+1, &alpha)){
alpha = x[argmin(f, step_count+1)];
*status = 1;
}
else{
*status = 0;
}
}
else if(step_count <= dat::ls_stepcountmax){
int i;
for(i = 1; i < step_count+1; i++){
if(f[i] > f[0]) break;
}
if(i == step_count+1){
alpha = 1.618034 * x[step_count];
*status = 0;
}
else{
float slope = dat::ls_gtp[dat::inv_count-1]/dat::ls_gtg[dat::inv_count-1];
alpha = backtrack2(f[0], slope, x[1], f[1], 0.1, 0.5);
*status = 0;
}
}
else{
alpha = 0;
*status = -1;
}
if(alpha > step_len_max){
if(step_count == 0){
alpha = 0.618034 * step_len_max;
*status = 0;
}
else{
alpha = step_len_max;
*status = 1;
}
}
free(x);
free(f);
return alpha;
}
static float calculateStepBT(const int step_count, float step_len_max, int *status){
float update_count = -1;
for(int i = 0; i < dat::ls_count; i++){
if(fabs(dat::step_lens[i]) < 1e-6){
update_count++;
}
}
if(update_count == 0){
return calculateStep(step_count, step_len_max, status);
}
float alpha;
float *x = mat::createHost(step_count+1);
float *f = mat::createHost(step_count+1);
for(int i = 0; i < step_count+1; i++){
int j = dat::ls_count - 1 - step_count + i;
x[i] = dat::step_lens[j];
f[i] = dat::func_vals[j];
}
for(int i = 0; i < step_count+1; i++){
for(int j = i+1; j < step_count+1; j++){
if(x[j] < x[i]){
float tmp;
tmp = x[i]; x[i] = x[j]; x[j] = tmp;
tmp = f[i]; f[i] = f[j]; f[j] = tmp;
}
}
}
int idx = argmin(f, step_count+1);
if(step_count == 0){
alpha = step_len_max;
if(alpha > 1){
alpha = 1;
}
*status = 0;
}
else if(f[idx] < f[0]){
alpha = x[idx];
*status = 1;
}
else if(step_count <= dat::ls_stepcountmax){
float slope = dat::ls_gtp[dat::inv_count-1]/dat::ls_gtg[dat::inv_count-1];
alpha = backtrack2(f[0], slope, x[1], f[1], 0.1, 0.5);
*status = 0;
}
else{
alpha = 0;
*status = -1;
}
free(x);
free(f);
return alpha;
}
static void restartSearch(float **p, float **g){
mat::copy(p, g, -1, nx, nz);
dat::ls_count = 0;
dat::inv_count = 1;
if(dat::optimize == 1){
dat::lbfgs_used = 0;
}
}
static void lineSearch(float **m, float **g, float **p, float f){
printf("\nPerforming line search\n");
int status = 0;
float alpha = 0;
float norm_m = mat::amax(m, nx, nz);
float norm_p = mat::amax(p, nx, nz);
float gtg = mat::dot(g, g, nx, nz);
float gtp = mat::dot(g, p, nx, nz);
float step_len_max = dat::ls_steplenmax * norm_m / norm_p;
int step_count = 0;
dat::step_lens[dat::ls_count] = 0;
dat::func_vals[dat::ls_count] = f;
dat::ls_gtg[dat::inv_count-1] = gtg;
dat::ls_gtp[dat::inv_count-1] = gtp;
dat::ls_count++;
float alpha_old = 0;
if(dat::ls_stepleninit && dat::ls_count <= 1){
alpha = dat::ls_stepleninit * norm_m / norm_p;
}
else{
alpha = calculateStep(step_count, step_len_max, &status);
}
while(1){
alpha_old = updateModel(m, p, alpha, alpha_old);
dat::step_lens[dat::ls_count] = alpha;
dat::func_vals[dat::ls_count] = calculateMisfit();
dat::ls_count++;
step_count++;
dat::neval++;
if(dat::optimize == 1){
alpha = calculateStepBT(step_count, step_len_max, &status);
}
else{
alpha = calculateStep(step_count, step_len_max, &status);
}
if(step_count < 10){
fprintf(dat::log_ls, " step 0%d misfit = %f\n", step_count, dat::func_vals[dat::ls_count-1]/dat::misfit_ref);
printf(" step 0%d misfit = %f\n", step_count, dat::func_vals[dat::ls_count-1]/dat::misfit_ref);
}
else{
fprintf(dat::log_ls, " step %d misfit = %f\n", step_count, dat::func_vals[dat::ls_count-1]/dat::misfit_ref);
printf(" step %d misfit = %f\n", step_count, dat::func_vals[dat::ls_count-1]/dat::misfit_ref);
}
if(status > 0){
fprintf(dat::log_ls, " alpha = %.2e\n", alpha);
printf(" alpha = %.2e\n", alpha);
float angle = calculateAngle(p, g, -1, nx, nz)*180/pi;
fprintf(dat::log_ls, " angle = %f\n\n", angle);
printf(" angle = %f\n", angle);
updateModel(m, p, alpha, alpha_old);
fprintf(dat::log_misfit, "%d %f\n", dat::neval, dat::func_vals[argmin(dat::func_vals, dat::ls_count)]/dat::misfit_ref);
return;
}
else if(status < 0){
updateModel(m, p, 0, alpha_old);
if(calculateAngle(p, g, -1, nx, nz) < 1e-3){
printf(" line search failed\n");
dat::inv_iteration = 0;
return;
}
else{
printf(" restarting line search...\n");
restartSearch(p, g);
lineSearch(m, g, p, f);
}
}
}
}
static void inversionRoutine(){
hipblasCreate(&cublas_handle);
hipsolverDnCreate(&solver_handle);
if(dat::misfit_type == 1){
hipfftPlan1d(&cufft_handle, nt, HIPFFT_C2C, 1);
}
if(dat::optimize == 1){
dat::lbfgs_used = 0;
}
{
mkdir(dat::output_path);
char parbuffer[80];
sprintf(parbuffer, "%s/Par_file", dat::parfile);
FILE *parfile = fopen(parbuffer, "r");
sprintf(parbuffer, "%s/par", dat::output_path);
FILE *outfile = fopen(parbuffer, "w");
sprintf(parbuffer, "%s/log", dat::output_path);
dat::log_ls = fopen(parbuffer,"w");
sprintf(parbuffer, "%s/misfit", dat::output_path);
dat::log_misfit = fopen(parbuffer,"w");
dat::neval = 0;
while(fgets(parbuffer, 80, parfile) != NULL){
for(int i = 0; i < 79 && parbuffer[i] != '\0'; i++){
if(parbuffer[i] == '#'){
parbuffer[i] = '\n';
parbuffer[i+1] = '\0';
break;
}
}
fprintf(outfile, "%s", parbuffer);
}
fclose(parfile);
fclose(outfile);
}
prepareObs();
exportData(-1);
loadModel(dat::model_init);
float **m_new;
float **m_old;
float **g_new;
switch(dat::inv_parameter){
case 0: m_new = dat::lambda; g_new = dat::K_lambda; break;
case 1: m_new = dat::mu; g_new = dat::K_mu; break;
case 2: m_new = dat::rho; g_new = dat::K_rho; break;
}
if(dat::optimize == 1){
dat::lbfgs_S = mat::createHost(dat::lbfgs_mem, nx, nz);
dat::lbfgs_Y = mat::createHost(dat::lbfgs_mem, nx, nz);
m_old = mat::create(nx, nz);
}
float **g_old = mat::create(nx, nz);
float **p_old = mat::create(nx, nz);
float **p_new = mat::create(nx, nz);
dat::func_vals = mat::createHost(dat::inv_iteration * dat::ls_stepcountmax);
dat::step_lens = mat::createHost(dat::inv_iteration * dat::ls_stepcountmax);
dat::ls_gtg = mat::createHost(dat::inv_iteration);
dat::ls_gtp = mat::createHost(dat::inv_iteration);
dat::ls_count = 0;
dat::inv_count = 0;
for(int iter = 0; iter < dat::inv_iteration; iter++){
fprintf(dat::log_ls, "iteration %d / %d\n", iter + 1, dat::inv_iteration);
printf("\n\nStarting iteration %d / %d\n", iter + 1, dat::inv_iteration);
float f = computeKernels();
if(iter == 0){
dat::misfit_ref = f;
}
dat::neval += 2;
int dir;
if(dat::optimize == 0){
dir = computeDirectionCG(p_new, p_old, g_new, g_old);
}
else{
dir = computeDirectionLBFGS(p_new, p_old, g_new, g_old, m_new, m_old);
mat::copy(m_old, m_new, nx, nz);
}
if(dir < 0){
restartSearch(p_new, g_new);
}
lineSearch(m_new, g_new, p_new, f);
mat::copy(p_old, p_new, nx, nz);
mat::copy(g_old, g_new, nx, nz);
exportData(iter);
}
fclose(dat::log_ls);
fclose(dat::log_misfit);
hipblasDestroy(cublas_handle);
hipsolverDnDestroy(solver_handle);
if(dat::misfit_type == 1){
hipfftDestroy(cufft_handle);
}
}
int main(int argc, const char *argv[]){
const char *datapath;
if(argc == 1){
datapath = "data";
}
else{
datapath = argv[1];
}
dat::parfile = datapath;
if(importData(datapath)){
switch(mode){
case 0:{
inversionRoutine();
break;
}
case 1:{
loadModel(dat::model_init);
prepareSTF();
// dat::ntask = 1;
runForward(0, 3);
mkdir("output");
mkdir("output/0000");
if(sh){
mat::write(dat::uy_forward, dat::nsfe, nx2, nz, "vy");
}
if(psv){
mat::write(dat::ux_forward, dat::nsfe, nx, nz, "output/0000/ux_forward.bin");
mat::write(dat::uz_forward, dat::nsfe, nx, nz, "output/0000/uz_forward.bin");
}
writeSU();
float **tmp=mat::createHost(nrec,nt*ntask);
mat::copyDeviceToHost(tmp,dat::out_y,nrec,nt*ntask);
mat::write(tmp[0], nt*ntask, "ux");
mat::write(tmp[3], nt*ntask, "uy");
break;
}
case 2:{
hipblasCreate(&cublas_handle);
hipsolverDnCreate(&solver_handle);
mkdir("output");
dat::output_path = "output";
clock_t timestart = clock();
if(dat::misfit_type == 1){
hipfftPlan1d(&cufft_handle, nt, HIPFFT_C2C, 1);
}
prepareObs();
if(dat::obs_su){
printf("\n");
}
printf("\n");
loadModel(dat::model_init);
float f = computeKernels();
dat::misfit_ref = f;
printf("misfit = %f\ntotal time: %.2fs\n", f,(float)(clock() - timestart) / CLOCKS_PER_SEC);
exportData(0);
if(dat::misfit_type == 1){
hipfftDestroy(cufft_handle);
}
break;
}
case 10:{
dat::obs_su = 0;
prepareObs();
writeSU();
break;
}
case 11:{
mkdir("output");
dat::output_path = "output";
loadModel(dat::model_init);
generateChecker(dat::mu, 0.1, 0.5, 2, 2);
exportData(-1);
break;
}
case 12:{
mkdir("output");
dat::output_path = "output";
loadModel(dat::model_init);
generateLayer(dat::mu, 0.1, 5);
exportData(-1);
break;
}
case 13:{
mkdir("output");
dat::output_path = "output";
loadModel(dat::model_init);
generateRandomLayer(dat::mu, 0.1, 0.4, 5);
exportData(-1);
break;
}
case 15:{
mkdir("output");
dat::output_path = "output";
loadModel(dat::model_init);
mat::copy(dat::mu, dat::mu, 0.64, nx, nz);
exportData(-1);
}
}
}
else{
printf("error loading data\n");
}
checkMemoryUsage();
return 0;
}
| 43b1fe56c265e381c7144d186aa134874bfde6d7.cu | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <math.h>
#include <string.h>
#include <cufft.h>
#include <cublas_v2.h>
#include <cusolverDn.h>
#ifdef _WIN32
#include <direct.h>
#define mkdir _mkdir
#define rmdir _rmdir
#elif defined __linux__
#include <unistd.h>
#endif
#define devij int i = blockIdx.x, j = threadIdx.x
#define devij2 int i0 = blockIdx.x, i = blockIdx.x+blockDim.x*blockIdx.y, j = threadIdx.x;
const float pi = 3.1415927;
__constant__ float d_pi = 3.1415927;
cublasHandle_t cublas_handle;
cusolverDnHandle_t solver_handle;
cufftHandle cufft_handle;
namespace dat{
int nx;
int nx2;
int nz;
int nt;
float dt;
float Lx;
float Lz;
float **X;
float **Z;
dim3 nxb;
dim3 nxb2;
dim3 nzt;
int sfe;
int nsfe;
int order;
int wave_propagation_sh;
int wave_propagation_psv;
int simulation_mode;
int absorb_left;
int absorb_right;
int absorb_top;
int absorb_bottom;
float absorb_width;
int *isrc;
int nsrc;
int nrec;
int ntask;
int obs_type;
int obs_su;
int misfit_type;
int parametrisation;
const char *parfile;
char *obs_su_path;
char *model_init;
char *model_true;
char *output_path;
int *src_type; // host (ricker = 1)
float *src_angle; // host
float *src_f0; // host
float *src_t0; // host
float *src_factor; // host
float *src_x;
float *src_z;
float *rec_x;
float *rec_z;
int *src_x_id;
int *src_z_id;
int *rec_x_id;
int *rec_z_id;
float **stf_x;
float **stf_y;
float **stf_z;
float **adstf_x;
float **adstf_y;
float **adstf_z;
float **lambda;
float **mu;
float **rho;
float **absbound;
float **ux;
float **uy;
float **uz;
float **vx;
float **vy;
float **vz;
float **sxx;
float **sxy;
float **sxz;
float **szy;
float **szz;
float **dsx;
float **dsy;
float **dsz;
float **dvxdx;
float **dvxdz;
float **dvydx;
float **dvydz;
float **dvzdx;
float **dvzdz;
float **dvxdx_fw;
float **dvxdz_fw;
float **dvydx_fw;
float **dvydz_fw;
float **dvzdx_fw;
float **dvzdz_fw;
float **K_lambda;
float **K_mu;
float **K_rho;
float **out_x;
float **out_y;
float **out_z;
float ***u_obs_x;
float ***u_obs_y;
float ***u_obs_z;
float ***ux_forward; // host
float ***uy_forward; // host
float ***uz_forward; // host
float ***vx_forward; // host
float ***vy_forward; // host
float ***vz_forward; // host
int filter_kernel;
float misfit_ref;
float **gsum;
float **gtemp;
float *tw;
int optimize;
int inv_parameter;
int inv_iteration;
int ls_stepcountmax;
int ls_count;
float ls_thresh;
float ls_steplenmax;
float ls_stepleninit;
float *func_vals; // host
float *step_lens; // host
float *ls_gtp; // host
float *ls_gtg; // host
int inv_count;
int inv_maxiter;
int lbfgs_mem;
float ***lbfgs_S; // host
float ***lbfgs_Y; // host
int lbfgs_used;
FILE *log_ls;
FILE *log_misfit;
int neval;
}
namespace mat{
__global__ void _setValue(float *mat, const float init){
int i = blockIdx.x;
mat[i] = init;
}
__global__ void _setValue(double *mat, const double init){
int i = blockIdx.x;
mat[i] = init;
}
__global__ void _setValue(float **mat, const float init){
devij;
mat[i][j] = init;
}
__global__ void _setValue(float ***mat, const float init, const int p){
devij;
mat[p][i][j] = init;
}
__global__ void _setPointerValue(float **mat, float *data, const int n){
int i = blockIdx.x;
mat[i] = data + n * i;
}
__global__ void _setPointerValue(float ***mat, float **data, const int i){
mat[i] = data;
}
__global__ void _setIndexValue(float *a, float *b, int index){
a[0] = b[index];
}
__global__ void _copy(float *mat, float *init){
int i = blockIdx.x;
mat[i] = init[i];
}
__global__ void _copy(float **mat, float **init){
devij;
mat[i][j] = init[i][j];
}
__global__ void _copy(float **mat, float **init, float k){
devij;
mat[i][j] = init[i][j] * k;
}
__global__ void _calc(float **c, float ka, float **a, float kb, float **b){
devij;
c[i][j] = ka * a[i][j] + kb * b[i][j];
}
__global__ void _calc(float *c, float ka, float *a, float kb, float *b){
int i = blockIdx.x;
c[i] = ka * a[i] + kb * b[i];
}
__global__ void _calc(float *c, float *a, float *b){
int i = blockIdx.x;
c[i] = a[i] * b[i];
}
float *init(float *mat, const float init, const int m){
mat::_setValue<<<m, 1>>>(mat, init);
return mat;
}
double *init(double *mat, const double init, const int m){
mat::_setValue<<<m, 1>>>(mat, init);
return mat;
}
float **init(float **mat, const float init, const int m, const int n){
mat::_setValue<<<m, n>>>(mat, init);
return mat;
}
float ***init(float ***mat, const float init, const int p, const int m, const int n){
for(int i = 0; i < p; i++){
mat::_setValue<<<m, n>>>(mat, init, i);
}
return mat;
}
float *initHost(float *mat, const float init, const int m){
for(int i = 0; i < m; i++){
mat[i] = init;
}
return mat;
}
float **initHost(float **mat, const float init, const int m, const int n){
for(int i = 0; i < m; i++){
for(int j = 0; j < n; j++){
mat[i][j] = init;
}
}
return mat;
}
float ***initHost(float ***mat, const float init, const int p, const int m, const int n){
for(int k = 0; k < p; k++){
for(int i = 0; i < m; i++){
for(int j = 0; j < n; j++){
mat[k][i][j] = init;
}
}
}
return mat;
}
float *create(const int m) {
float *data;
cudaMalloc((void **)&data, m * sizeof(float));
return data;
}
float **create(const int m, const int n){
float *data = mat::create(m * n);
float **mat;
cudaMalloc((void **)&mat, m * sizeof(float *));
mat::_setPointerValue<<<m, 1>>>(mat, data, n);
return mat;
}
float ***create(const int p, const int m, const int n){
float ***mat;
cudaMalloc((void **)&mat, p * sizeof(float **));
for(int i = 0; i < p; i++){
mat::_setPointerValue<<<1,1>>>(mat, mat::create(m, n), i);
}
return mat;
}
float *createHost(const int m) {
return (float *)malloc(m * sizeof(float));
}
float **createHost(const int m, const int n){
float *data = mat::createHost(m * n);
float **mat = (float **)malloc(m * sizeof(float *));
for(int i =0; i < m; i++){
mat[i] = data + n * i;
}
return mat;
}
float ***createHost(const int p, const int m, const int n){
float ***mat = (float ***)malloc(p * sizeof(float **));
for(int i = 0; i < p; i++){
mat[i] = mat::createHost(m, n);
}
return mat;
}
int *createInt(const int m){
int *a;
cudaMalloc((void**)&a, m * sizeof(int));
return a;
}
int *createIntHost(const int m) {
return (int *)malloc(m * sizeof(int));
}
short int *createShortInt(const int m){
short int *a;
cudaMalloc((void**)&a, m * sizeof(short int));
return a;
}
short int *createShortIntHost(const int m){
return (short int *)malloc(m * sizeof(short int));
}
double *createDouble(const int m){
double *a;
cudaMalloc((void**)&a, m * sizeof(double));
return a;
}
double *createDoubleHost(const int m) {
return (double *)malloc(m * sizeof(double));
}
float *getDataPointer(float **mat){
float **p=(float **)malloc(sizeof(float *));
cudaMemcpy(p, mat , sizeof(float *), cudaMemcpyDeviceToHost);
return *p;
}
void copy(float *mat, float *init, const int m){
mat::_copy<<<m, 1>>>(mat, init);
}
void copy(float **mat, float **init, const int m, const int n){
mat::_copy<<<m, n>>>(mat, init);
}
void copy(float **mat, float **init, float k, const int m, const int n){
mat::_copy<<<m, n>>>(mat, init, k);
}
void copyHostToDevice(float *d_a, const float *a, const int m){
cudaMemcpy(d_a, a , m * sizeof(float), cudaMemcpyHostToDevice);
}
void copyHostToDevice(float **pd_a, float *pa, const int m, const int n){
float **phd_a=(float **)malloc(sizeof(float *));
cudaMemcpy(phd_a, pd_a , sizeof(float *), cudaMemcpyDeviceToHost);
cudaMemcpy(*phd_a, pa , m * n * sizeof(float), cudaMemcpyHostToDevice);
}
void copyHostToDevice(float **pd_a, float **pa, const int m, const int n){
float **phd_a=(float **)malloc(sizeof(float *));
cudaMemcpy(phd_a, pd_a , sizeof(float *), cudaMemcpyDeviceToHost);
cudaMemcpy(*phd_a, *pa , m * n * sizeof(float), cudaMemcpyHostToDevice);
}
void copyHostToDevice(float ***pd_a, float ***pa, const int p, const int m, const int n){
float ***phd_a=(float ***)malloc(p * sizeof(float **));
cudaMemcpy(phd_a, pd_a, p * sizeof(float **), cudaMemcpyDeviceToHost);
for(int i = 0; i < p; i++){
mat::copyHostToDevice(phd_a[i], pa[i], m, n);
}
}
void copyDeviceToHost(float *a, const float *d_a, const int m){
cudaMemcpy(a, d_a , m * sizeof(float), cudaMemcpyDeviceToHost);
}
void copyDeviceToHost(float *pa, float **pd_a, const int m, const int n){
float **phd_a=(float **)malloc(sizeof(float *));
cudaMemcpy(phd_a, pd_a , sizeof(float *), cudaMemcpyDeviceToHost);
cudaMemcpy(pa, *phd_a , m * n * sizeof(float), cudaMemcpyDeviceToHost);
}
void copyDeviceToHost(float **pa, float **pd_a, const int m, const int n){
float **phd_a=(float **)malloc(sizeof(float *));
cudaMemcpy(phd_a, pd_a , sizeof(float *), cudaMemcpyDeviceToHost);
cudaMemcpy(*pa, *phd_a , m * n * sizeof(float), cudaMemcpyDeviceToHost);
}
void copyDeviceToHost(float ***pa, float ***pd_a, const int p, const int m, const int n){
float ***phd_a=(float ***)malloc(p * sizeof(float **));
cudaMemcpy(phd_a, pd_a, p * sizeof(float **), cudaMemcpyDeviceToHost);
for(int i = 0; i < p; i++){
mat::copyDeviceToHost(pa[i], phd_a[i], m, n);
}
}
void calc(float *c, float *a, float *b, int m){
mat::_calc<<<m, 1>>>(c, a, b);
}
void calc(float *c, float ka, float *a, float kb, float *b, int m){
mat::_calc<<<m, 1>>>(c, ka, a, kb, b);
}
void calc(float **c, float ka, float **a, float kb, float **b, int m, int n){
mat::_calc<<<m, n>>>(c, ka, a, kb, b);
}
float norm(float *a, int n){
float norm_a = 0;
cublasSnrm2_v2(cublas_handle, n, a, 1, &norm_a);
return norm_a;
}
float norm(float **a, int m, int n){
return mat::norm(mat::getDataPointer(a), m * n);
}
float amax(float *a, int n){
int index = 0;
cublasIsamax_v2(cublas_handle, n, a, 1, &index);
float *b = mat::create(1);
mat::_setIndexValue<<<1, 1>>>(b, a, index - 1);
float *c = mat::createHost(1);
mat::copyDeviceToHost(c, b, 1);
return fabs(c[0]);
}
float amax(float **a, int m, int n){
return mat::amax(mat::getDataPointer(a), m * n);
}
float dot(float *a, float *b, int n){
float dot_ab = 0;
cublasSdot_v2(cublas_handle, n, a, 1, b, 1, &dot_ab);
return dot_ab;
}
float dot(float **a, float **b, int m, int n){
return mat::dot(mat::getDataPointer(a), mat::getDataPointer(b), m * n);
}
void freeHost(float **mat){
free(*mat);
free(mat);
}
void freeHost(float ***mat){
free(**mat);
free(*mat);
free(mat);
}
void freeDevice(float **mat){
cudaFree(getDataPointer(mat));
cudaFree(mat);
}
void read(float *data, int n, const char *fname){
FILE *file = fopen(fname, "rb");
fread(data, sizeof(float), n, file);
fclose(file);
}
void write(float *data, int n, const char *fname){
FILE *file = fopen(fname, "wb");
fwrite(data, sizeof(float), n, file);
fclose(file);
}
void write(float **data, int m, int n, const char *fname){
FILE *file = fopen(fname, "wb");
for(int i = 0; i < m; i++){
fwrite(data[i], sizeof(float), n, file);
}
fclose(file);
}
void write(float ***data, int p, int m, int n, const char *fname){
FILE *file = fopen(fname, "wb");
for(int k = 0; k < p; k++){
for(int i = 0; i < m; i++){
fwrite(data[k][i], sizeof(float), n, file);
}
}
fclose(file);
}
void writeDevice(float *data, int n, const char *fname){
float *h_data = mat::createHost(n);
mat::copyDeviceToHost(h_data, data, n);
mat::write(h_data, n, fname);
free(h_data);
}
void writeDevice(float **data, const int m, int n, const char *fname){
float **h_data = mat::createHost(m, n);
mat::copyDeviceToHost(h_data, data, m, n);
mat::write(h_data, m, n, fname);
mat::freeHost(h_data);
}
void writeDevice(float ***data, const int p, const int m, int n, const char *fname){
float ***h_data = mat::createHost(p, m, n);
mat::copyDeviceToHost(h_data, data, p, m, n);
mat::write(h_data, p, m, n, fname);
mat::freeHost(h_data);
}
}
dim3 &nxb = dat::nxb;
dim3 &nxb2 = dat::nxb2;
dim3 &nzt = dat::nzt;
int &sh = dat::wave_propagation_sh;
int &psv = dat::wave_propagation_psv;
int &mode = dat::simulation_mode;
int &nx = dat::nx;
int &nx2 = dat::nx2;
int &nz = dat::nz;
int &nt = dat::nt;
int &nsrc = dat::nsrc;
int &nrec = dat::nrec;
int &ntask = dat::ntask;
float &dt = dat::dt;
__global__ void divSY(float **dsy, float **sxy, float **szy, float **X, float **Z, int nx, int nz){
devij2;
if(i0 >= 2 && i0 < nx - 2){
float dx = X[i0][j] - X[i0-1][j];
float dx3 = X[i0+1][j] - X[i0-2][j];
dsy[i][j] = 9*(sxy[i][j] - sxy[i-1][j])/(8*dx) - (sxy[i+1][j] - sxy[i-2][j])/(8*dx3);
}
else{
dsy[i][j] = 0;
}
if(j >= 2 && j < nz - 2){
float dz = Z[i0][j] - Z[i0][j-1];
float dz3 = Z[i0][j+1] - Z[i0][j-2];
dsy[i][j] += 9*(szy[i][j] - szy[i][j-1])/(8*dz) - (szy[i][j+1] - szy[i][j-2])/(8*dz3);
}
}
__global__ void divSXZ(float **dsx, float **dsz, float **sxx, float **szz, float **sxz, float **X, float **Z, int nx, int nz){
devij2;
if(i0 >= 2 && i0 < nx - 2){
float dx = X[i0][j] - X[i0-1][j];
float dx3 = X[i0+1][j] - X[i0-2][j];
dsx[i][j] = 9*(sxx[i][j] - sxx[i-1][j])/(8*dx) - (sxx[i+1][j] - sxx[i-2][j])/(8*dx3);
dsz[i][j] = 9*(sxz[i][j] - sxz[i-1][j])/(8*dx) - (sxz[i+1][j] - sxz[i-2][j])/(8*dx3);
}
else{
dsx[i][j] = 0;
dsz[i][j] = 0;
}
if(j >= 2 && j < nz - 2){
float dz = Z[i0][j] - Z[i0][j-1];
float dz3 = Z[i0][j+1] - Z[i0][j-2];
dsx[i][j] += 9*(sxz[i][j] - sxz[i][j-1])/(8*dz) - (sxz[i][j+1] - sxz[i][j-2])/(8*dz3);
dsz[i][j] += 9*(szz[i][j] - szz[i][j-1])/(8*dz) - (szz[i][j+1] - szz[i][j-2])/(8*dz3);
}
}
__global__ void divVY(float **dvydx, float **dvydz, float **vy, float **X, float **Z, int nx, int nz){
devij2;
if(i0 >= 1 && i0 < nx - 2){
float dx = X[i0+1][j] - X[i0][j];
float dx3 = X[i0+2][j] - X[i0-1][j];
dvydx[i][j] = 9*(vy[i+1][j] - vy[i][j])/(8*dx) - (vy[i+2][j] - vy[i-1][j])/(8*dx3);
}
else{
dvydx[i][j] = 0;
}
if(j >= 1 && j < nz - 2){
float dz = Z[i0][j+1] - Z[i0][j];
float dz3 = Z[i0][j+2] - Z[i0][j-1];
dvydz[i][j] = 9*(vy[i][j+1] - vy[i][j])/(8*dz) - (vy[i][j+2] - vy[i][j-1])/(8*dz3);
}
else{
dvydz[i][j] = 0;
}
}
__global__ void divVXZ(float **dvxdx, float **dvxdz, float **dvzdx, float **dvzdz, float **vx, float **vz, float **X, float **Z, int nx, int nz){
devij2;
if(i0 >= 1 && i0 < nx - 2){
float dx = X[i0+1][j] - X[i0][j];
float dx3 = X[i0+2][j] - X[i0-1][j];
dvxdx[i][j] = 9*(vx[i+1][j]-vx[i][j])/(8*dx)-(vx[i+2][j]-vx[i-1][j])/(8*dx3);
dvzdx[i][j] = 9*(vz[i+1][j]-vz[i][j])/(8*dx)-(vz[i+2][j]-vz[i-1][j])/(8*dx3);
}
else{
dvxdx[i][j] = 0;
dvzdx[i][j] = 0;
}
if(j >= 1 && j < nz - 2){
float dz = Z[i0][j+1] - Z[i0][j];
float dz3 = Z[i0][j+2] - Z[i0][j-1];
dvxdz[i][j] = 9*(vx[i][j+1]-vx[i][j])/(8*dz)-(vx[i][j+2]-vx[i][j-1])/(8*dz3);
dvzdz[i][j] = 9*(vz[i][j+1]-vz[i][j])/(8*dz)-(vz[i][j+2]-vz[i][j-1])/(8*dz3);
}
else{
dvxdz[i][j] = 0;
dvzdz[i][j] = 0;
}
}
__global__ void addSTF(float **dsx, float **dsy, float **dsz, float **stf_x, float **stf_y, float **stf_z,
int *src_x_id, int *src_z_id, int isrc, int sh, int psv, int it,int nx){
int is = blockIdx.x;
int xs = src_x_id[is];
int zs = src_z_id[is];
int is2 = threadIdx.x;
if(isrc < 0 || isrc + is2 == is){
if(sh){
dsy[xs+is2*nx][zs] += stf_y[is][it];
}
if(psv){
dsx[xs+is2*nx][zs] += stf_x[is][it];
dsz[xs+is2*nx][zs] += stf_z[is][it];
}
}
}
__global__ void saveRec(float **out_x, float **out_y, float **out_z, float **vx, float **vy, float **vz,
int *rec_x_id, int *rec_z_id, int nx, int nt, int sh, int psv, int it){
int ir = blockIdx.x;
int xr = rec_x_id[ir] + threadIdx.x * nx;
int zr = rec_z_id[ir];
it += threadIdx.x * nt;
if(sh){
out_y[ir][it] = vy[xr][zr];
}
if(psv){
out_x[ir][it] = vx[xr][zr];
out_z[ir][it] = vz[xr][zr];
}
}
__global__ void saveRec(float ***out_x, float ***out_y, float ***out_z, float **vx, float **vy, float **vz,
int *rec_x_id, int *rec_z_id, int isrc, int nx, int sh, int psv, int it){
int ir = blockIdx.x;
int xr = rec_x_id[ir];
int zr = rec_z_id[ir];
int is2 = threadIdx.x;
if(sh){
out_y[isrc+is2][ir][it] = vy[xr+is2*nx][zr];
}
if(psv){
out_x[isrc+is2][ir][it] = vx[xr+is2*nx][zr];
out_z[isrc+is2][ir][it] = vz[xr+is2*nx][zr];
}
}
__global__ void updateV(float **v, float **ds, float **rho, float **absbound, float dt){
devij2;
v[i][j] = absbound[i0][j] * (v[i][j] + dt * ds[i][j] / rho[i0][j]);
}
__global__ void updateSY(float **sxy, float **szy, float **dvydx, float **dvydz, float **mu, float dt){
devij2;
sxy[i][j] += dt * mu[i0][j] * dvydx[i][j];
szy[i][j] += dt * mu[i0][j] * dvydz[i][j];
}
__global__ void updateSXZ(float **sxx, float **szz, float **sxz, float **dvxdx, float **dvxdz, float **dvzdx, float **dvzdz,
float **lambda, float **mu, float dt){
devij2;
sxx[i][j] += dt * ((lambda[i0][j] + 2 * mu[i0][j]) * dvxdx[i][j] + lambda[i0][j] * dvzdz[i][j]);
szz[i][j] += dt * ((lambda[i0][j] + 2 * mu[i0][j]) * dvzdz[i][j] + lambda[i0][j] * dvxdx[i][j]);
sxz[i][j] += dt * (mu[i0][j] * (dvxdz[i][j] + dvzdx[i][j]));
}
__global__ void updateU(float **u, float **v, float dt){
devij2;
u[i][j] += v[i][j] * dt;
}
__global__ void interactionRhoY(float **K_rho, float **vy, float **vy_fw, float tsfe){
devij;
K_rho[i][j] -= vy_fw[i][j] * vy[i][j] * tsfe;
}
__global__ void interactionRhoXZ(float **K_rho, float **vx, float **vx_fw, float **vz, float **vz_fw, float tsfe){
devij;
K_rho[i][j] -= (vx_fw[i][j] * vx[i][j] + vz_fw[i][j] * vz[i][j]) * tsfe;
}
__global__ void interactionMuY(float **K_mu, float **dvydx, float **dvydx_fw, float **dvydz, float **dvydz_fw, float tsfe){
devij;
K_mu[i][j] -= (dvydx[i][j] * dvydx_fw[i][j] + dvydz[i][j] * dvydz_fw[i][j]) * tsfe;
}
__global__ void interactionMuXZ(float **K_mu, float **dvxdx, float **dvxdx_fw, float **dvxdz, float **dvxdz_fw,
float **dvzdx, float **dvzdx_fw, float **dvzdz, float **dvzdz_fw, float tsfe){
devij;
K_mu[i][j] -= (2 * dvxdx[i][j] * dvxdx_fw[i][j] + 2 * dvzdz[i][j] * dvzdz_fw[i][j] +
(dvxdz[i][j] + dvzdx[i][j]) * (dvzdx_fw[i][j] + dvxdz_fw[i][j])) * tsfe;
}
__global__ void interactionLambdaXZ(float **K_lambda, float **dvxdx, float **dvxdx_fw, float **dvzdz, float **dvzdz_fw, float tsfe){
devij;
K_lambda[i][j] -= ((dvxdx[i][j] + dvzdz[i][j]) * (dvxdx_fw[i][j] + dvzdz_fw[i][j])) * tsfe;
}
__device__ float gaussian(int x, int sigma){
float xf = (float)x;
float sigmaf = (float)sigma;
return (1 / (sqrtf(2 * d_pi) * sigmaf)) * expf(-xf * xf / (2 * sigmaf * sigmaf));
}
__global__ void initialiseGaussian(float **model, int nx, int nz, int sigma){
devij;
float sumx = 0;
for(int n = 0; n < nx; n++){
sumx += gaussian(i - n, sigma);
}
float sumz = 0;
for(int n = 0; n < nz; n++){
sumz += gaussian(j - n, sigma);
}
model[i][j] = sumx * sumz;
}
__global__ void computeIndices(int *coord_n_id, float *coord_n, float Ln, float n){
int i = blockIdx.x;
coord_n_id[i] = (int)(coord_n[i] / Ln * (n - 1) + 0.5);
}
__global__ void initialiseAbsorbingBoundaries(float **absbound, float width,
int absorb_left, int absorb_right, int absorb_bottom, int absorb_top,
float Lx, float Lz, float **X, float **Z){
devij;
absbound[i][j] = 1;
if(absorb_left){
if(X[i][j] < width){
absbound[i][j] *= exp(-pow((X[i][j] - width) / (2 * width), 2));
}
}
if(absorb_right){
if(X[i][j] > Lx - width){
absbound[i][j] *= exp(-pow((X[i][j] - (Lx - width)) / (2 * width), 2));
}
}
if(absorb_bottom){
if(Z[i][j] < width){
absbound[i][j] *= exp(-pow((Z[i][j] - width) / (2 * width), 2));
}
}
if(absorb_top){
if(Z[i][j] > Lz - width){
absbound[i][j] *= exp(-pow((Z[i][j] - (Lz - width)) / (2 * width), 2));
}
}
}
__global__ void prepareAdjointSTF(float **adstf, float **u_syn, float ***u_obs, float *tw, int nt, int isrc){
int it = blockIdx.x;
int irec = threadIdx.x;
adstf[irec][nt - it - 1] = (u_syn[irec][it] - u_obs[isrc][irec][it]) * tw[it] * 2;
}
__global__ void prepareEnvelopeSTF(float **adstf, float *etmp, float *syn, float *ersd, int nt, int irec){
int it = blockIdx.x;
adstf[irec][nt - it - 1] = etmp[it] * syn[it] - ersd[it];
}
__global__ void filterKernelX(float **model, float **gtemp, int nx, int sigma){
devij;
float sumx = 0;
for(int n = 0; n < nx; n++){
sumx += gaussian(i - n, sigma) * model[n][j];
}
gtemp[i][j] = sumx;
}
__global__ void filterKernelZ(float **model, float **gtemp, float **gsum, int nz, int sigma){
devij;
float sumz = 0;
for(int n = 0; n < nz; n++){
sumz += gaussian(j - n, sigma) * gtemp[i][n];
}
model[i][j] = sumz / gsum[i][j];
}
__global__ void getTaperWeights(float *tw, float dt, int nt){
int it = blockIdx.x;
float t_end = (nt - 1) * dt;
float taper_width = t_end / 10;
float t_min = taper_width;
float t_max = t_end - taper_width;
float t = it * dt;
if(t <= t_min){
tw[it] = 0.5 + 0.5 * cosf(d_pi * (t_min - t) / (taper_width));
}
else if(t >= t_max){
tw[it] = 0.5 + 0.5 * cosf(d_pi * (t_max - t) / (taper_width));
}
else{
tw[it] = 1;
}
}
__global__ void calculateMisfit(float *misfit, float **u_syn, float ***u_obs, float *tw, float dt, int isrc, int irec, int j, int nt){
int it = blockIdx.x;
float wavedif = (u_syn[irec][it+j*nt] - u_obs[isrc+j][irec][it]) * tw[it];
misfit[it] = wavedif * dt;
}
__global__ void envelopetmp(float *etmp, float *esyn, float *eobs, float max){
int it = blockIdx.x;
etmp[it] = (esyn[it] - eobs[it])/(esyn[it] + max);
}
__global__ void copyWaveform(float *misfit, float ***u_obs, int isrc, int irec){
int it = blockIdx.x;
misfit[it] = u_obs[isrc][irec][it];
}
__global__ void copyWaveform(float *misfit, float **out, int irec, int jnt){
int it = blockIdx.x;
misfit[it] = out[irec][it+jnt];
}
__global__ void initialiseGrids(float **X, float **Z, float Lx, int nx, float Lz, int nz){
devij;
X[i][j] = Lx / (nx - 1) * i;
Z[i][j] = Lz / (nz - 1) * j;
}
__global__ void mesh2grid(float *xbuffer, float *zbuffer, float *rbuffer, float *pbuffer, float *sbuffer,
float **lambda, float **mu, float **rho, float dx, float dz, float dmax, int npt){
devij;
float ix = i * dx;
float iz = j * dz;
float dmin = dmax;
for(int k = 0; k < npt; k++){
float dx = ix - xbuffer[k];
float dz = iz - zbuffer[k];
float d = dx * dx + dz * dz;
if(d < dmin){
dmin = d;
rho[i][j] = rbuffer[k];
mu[i][j] = sbuffer[k];
lambda[i][j] = pbuffer[k];
}
}
}
__global__ void changeParametrisation(float **lambda, float **mu, float **rho, int psv){
devij;
if(psv){
lambda[i][j] = rho[i][j] * (lambda[i][j] * lambda[i][j] - 2 * mu[i][j] * mu[i][j]);
}
else{
lambda[i][j] = 0;
}
mu[i][j] = rho[i][j] * mu[i][j] * mu[i][j];
}
__global__ void changeParametrisation(float *vp, float *vs, float *rho, int nz, int psv){
devij;
int ij = i * nz + j;
if(psv){
vp[ij] = sqrt((vp[ij] + 2*vs[ij]) / rho[ij]);
}
else{
vp[ij] = 0;
}
vs[ij] = sqrt(vs[ij] / rho[ij]);
}
__global__ void updateModel(float **m, float **p, float alpha){
devij;
m[i][j] += alpha * p[i][j];
}
__global__ void reduceSystem(const double * __restrict d_in1, double * __restrict d_out1, const double * __restrict d_in2, double * __restrict d_out2, const int M, const int N) {
const int i = blockIdx.x;
const int j = threadIdx.x;
if ((i < N) && (j < N)){
d_out1[j * N + i] = d_in1[j * M + i];
d_out2[j * N + i] = d_in2[j * M + i];
}
}
__global__ void generateChecker(float **p, float dp, float margin, float lx, float lz, float **X, float **Z){
devij;
float x = X[i][j];
float z = Z[i][j];
float marginx = lx * margin;
float marginz = lz * margin;
int idx = (int)((x - marginx*2) / (lx + marginx));
int idz = (int)((z - marginz*2) / (lz + marginz));
float rx = x - marginx*2 - idx * (lx + marginx);
float rz = z - marginz*2 - idz * (lz + marginz);
if(rx > 0 && rx < lx && rz > 0 && rz < lz){
if(idx % 2 == idz % 2){
p[i][j] *= (1 + dp) * (1 + dp);
}
else{
p[i][j] *= (1 - dp) * (1 - dp);
}
}
}
__global__ void generateLayer(float **p, float from, float to, float value, float **Z){
devij;
float z = Z[i][j];
if(z >=from && z <= to){
p[i][j] *= (1+value) * (1+value);
}
}
__global__ void generateRandomLayer(float **p, float from, float to, float value, float *layer1, float *layer2, float **Z){
devij;
float z = Z[i][j];
if(z >=from+layer1[i] && z <= to+layer2[i]){
p[i][j] *= (1+value) * (1+value);
}
}
__global__ void hilbert(cufftComplex *h, int n){
int i = blockIdx.x;
if(i > 0){
if(n % 2 == 0){
if(i < n / 2 + 1){
h[i].x *= 2;
h[i].y *= 2;
}
else if(i > n / 2 + 1){
h[i].x = 0;
h[i].y = 0;
}
}
else{
if(i < (n+1) / 2){
h[i].x *= 2;
h[i].y *= 2;
}
else{
h[i].x = 0;
h[i].y = 0;
}
}
}
}
__global__ void copyR2C(cufftComplex *a,float *b){
int i=blockIdx.x;
a[i].x=b[i];
a[i].y=0;
}
__global__ void copyC2Real(float *a, cufftComplex *b, int n){
int i = blockIdx.x;
a[i] = b[i].x / n;
}
__global__ void copyC2Imag(float *a, cufftComplex *b, int n){
int i = blockIdx.x;
a[i] = b[i].y / n;
}
__global__ void copyC2Abs(float *a, cufftComplex *b, int n){
int i = blockIdx.x;
a[i] = sqrt(b[i].x*b[i].x + b[i].y*b[i].y) / n;
}
static int getTaskIndex(int isrc){
int index = isrc + ntask - 1;
if(index >= nsrc){
return nsrc - 1;
}
else{
return index;
}
}
static float calculateAngle(float **p, float **g, float k, int nx, int nz){
float xx = mat::dot(p, p, nx, nz);
float yy = mat::dot(g, g, nx, nz);
float xy = k * mat::dot(p, g, nx, nz);
return acos(xy / sqrt(xx * yy));
}
static void hilbert(float *x, cufftComplex *data){
copyR2C<<<nt, 1>>>(data, x);
cufftExecC2C(cufft_handle, data, data, CUFFT_FORWARD);
hilbert<<<nt,1>>>(data, nt);
cufftExecC2C(cufft_handle, data, data, CUFFT_INVERSE);
}
static void solveQR(double *h_A, double *h_B, double *XC, const int Nrows, const int Ncols){
int work_size = 0;
int *devInfo = mat::createInt(1);
double *d_A = mat::createDouble(Nrows * Ncols);
cudaMemcpy(d_A, h_A, Nrows * Ncols * sizeof(double), cudaMemcpyHostToDevice);
double *d_TAU = mat::createDouble(min(Nrows, Ncols));
cusolverDnDgeqrf_bufferSize(solver_handle, Nrows, Ncols, d_A, Nrows, &work_size);
double *work = mat::createDouble(work_size);
cusolverDnDgeqrf(solver_handle, Nrows, Ncols, d_A, Nrows, d_TAU, work, work_size, devInfo);
double *d_Q = mat::createDouble(Nrows * Nrows);
cusolverDnDormqr(solver_handle, CUBLAS_SIDE_LEFT, CUBLAS_OP_N, Nrows, Ncols, min(Nrows, Ncols), d_A, Nrows, d_TAU, d_Q, Nrows, work, work_size, devInfo);
double *d_C = mat::createDouble(Nrows * Nrows);
mat::init(d_C, 0, Nrows * Nrows);
cudaMemcpy(d_C, h_B, Nrows * sizeof(double), cudaMemcpyHostToDevice);
cusolverDnDormqr(solver_handle, CUBLAS_SIDE_LEFT, CUBLAS_OP_T, Nrows, Ncols, min(Nrows, Ncols), d_A, Nrows, d_TAU, d_C, Nrows, work, work_size, devInfo);
double *d_R = mat::createDouble(Ncols * Ncols);
double *d_B = mat::createDouble(Ncols * Ncols);
reduceSystem<<<Ncols, Ncols>>>(d_A, d_R, d_C, d_B, Nrows, Ncols);
const double alpha = 1.;
cublasDtrsm(cublas_handle, CUBLAS_SIDE_LEFT, CUBLAS_FILL_MODE_UPPER, CUBLAS_OP_N, CUBLAS_DIAG_NON_UNIT, Ncols, Ncols,
&alpha, d_R, Ncols, d_B, Ncols);
cudaMemcpy(XC, d_B, Ncols * sizeof(double), cudaMemcpyDeviceToHost);
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
cudaFree(d_Q);
cudaFree(d_R);
cudaFree(d_TAU);
cudaFree(devInfo);
cudaFree(work);
}
static double polyfit(double *x, double *y, double *p, int n){
double *A = mat::createDoubleHost(3 * n);
for(int i = 0; i < n; i++){
A[i] = x[i] * x[i];
A[i + n] = x[i];
A[i + n * 2] = 1;
}
solveQR(A, y, p, n, 3);
double rss = 0;
for(int i = 0; i < n; i++){
double ei = p[0] * x[i] * x[i] + p[1] * x[i] + p[2];
rss += pow(y[i] - ei, 2);
}
return rss;
}
static float polyfit(float *fx, float *fy, float *fp, int n){
double *x = mat::createDoubleHost(n);
double *y = mat::createDoubleHost(n);
double *p = mat::createDoubleHost(3);
for(int i = 0; i < n; i++){
x[i] = fx[i];
y[i] = fy[i];
}
float rss = polyfit(x, y, p, n);
for(int i = 0; i < 3; i++){
fp[i] = p[i];
}
free(x);
free(y);
free(p);
return rss;
}
static float str2float(const char *str){
char str1[20] = {'\0'};
char str2[20] = {'\0'};
char str3[10] = {'\0'};
float num = 0;
int len = strlen(str);
int offset = 0;
char *current = str1;
for(int i = 0; i < len; i++){
if((str[i] >= 48 && str[i] <= 57) || str[i] == '+' || str[i] == '-'){
current[i - offset] = str[i];
}
else if(str[i] == 'd' || str[i] == 'e'){
offset = i + 1;
current = str3;
}
else if(str[i] == '.'){
offset = i;
str2[0] = '.';
current = str2;
}
else{
break;
}
}
float e = 1;
float nege = 1;
if(strlen(str3) > 0){
int numi = atoi(str3);
if(numi < 0){
for(int i = 0; i < -numi; i++){
nege *= 10;
}
}
else{
for(int i = 0; i < numi; i++){
e *= 10;
}
}
}
if(strlen(str1) > 0){
num = e * atoi(str1);
}
if(strlen(str2) > 0){
float numf = e * atof(str2);
if(num >= 0){
num += numf;
}
else{
num -= numf;
}
}
return num / nege;
}
static int str2int(const char *str){
return lroundf(str2float(str));
}
static void printStat(int a, int b){
a++;
if(b >= 100){
if(a < 10){
printf(" task 00%d of %d\n", a, b);
return;
}
if(a < 100){
printf(" task 0%d of %d\n", a, b);
return;
}
}
else if(b >= 10){
if(a < 10){
printf(" task 0%d of %d\n", a, b);
return;
}
}
printf(" task %d of %d\n", a, b);
}
static int getFileLength(FILE *file){
fseek (file, 0, SEEK_END);
int length = ftell (file);
fseek (file, 0, SEEK_SET);
return length;
}
static void initialiseModel(const char *model_dir){
int npt;
char path[80];
sprintf(path, "%s/proc000000_x.bin", model_dir);
FILE *xfile = fopen(path,"rb");
sprintf(path, "%s/proc000000_z.bin", model_dir);
FILE *zfile = fopen(path,"rb");
fread(&npt, sizeof(int), 1, xfile);
fread(&npt, sizeof(int), 1, zfile);
npt /= 4;
float *xbuffer = mat::createHost(npt);
float *zbuffer = mat::createHost(npt);
fread(xbuffer, sizeof(float), npt, xfile);
fread(zbuffer, sizeof(float), npt, zfile);
dat::Lx = 0;
dat::Lz = 0;
for(int i = 0; i < npt; i++){
if(xbuffer[i] > dat::Lx) dat::Lx = xbuffer[i];
if(zbuffer[i] > dat::Lz) dat::Lz = zbuffer[i];
}
dat::nx = lroundf(sqrt(npt * dat::Lx / dat::Lz));
dat::nz = lroundf(npt / dat::nx);
dat::nx2 = dat::nx * ntask;
// dat::nx *= 2;
// dat::nz *= 2;
free(xbuffer);
free(zbuffer);
fclose(xfile);
fclose(zfile);
}
static void readFortran(const char *fname, int isrc){
FILE *parfile = fopen(fname,"r");
char key[80];
char value[80];
int stat = 0;
int offset = 0;
char c = 0;
int i = 0;
while(c != EOF){
c = fgetc(parfile);
switch(c){
case '\0': case '\r': case '\t': case '\n': case EOF:{
if(stat == 4){
value[i - offset] = '\0';
stat = 5;
}
if(stat == 5){
if(isrc < 0){
if(strcmp(key, "simulation_mode") == 0){
dat::simulation_mode = str2int(value);
}
else if(strcmp(key, "wave_propagation_type") == 0){
switch(str2int(value)){
case 0: dat::wave_propagation_sh = 1; dat::wave_propagation_psv = 0; break;
case 1: dat::wave_propagation_sh = 0; dat::wave_propagation_psv = 1; break;
case 2: dat::wave_propagation_sh = 1; dat::wave_propagation_psv = 1; break;
}
}
else if(strcmp(key, "nt") == 0){
dat::nt = str2int(value);
}
else if(strcmp(key, "dt") == 0){
dat::dt = str2float(value);
}
else if(strcmp(key, "obs_type") == 0){
dat::obs_type = str2int(value);
}
else if(strcmp(key, "ntask") == 0){
dat::ntask = str2int(value);
}
else if(strcmp(key, "misfit_type") == 0){
dat::misfit_type = str2int(value);
}
else if(strcmp(key, "obs_su") == 0){
dat::obs_su = str2int(value);
}
else if(strcmp(key, "absorb_left") == 0){
dat::absorb_left = str2int(value);
}
else if(strcmp(key, "absorb_right") == 0){
dat::absorb_right = str2int(value);
}
else if(strcmp(key, "absorb_top") == 0){
dat::absorb_top = str2int(value);
}
else if(strcmp(key, "absorb_bottom") == 0){
dat::absorb_bottom = str2int(value);
}
else if(strcmp(key, "absorb_width") == 0){
dat::absorb_width = str2float(value);
}
else if(strcmp(key, "nsrc") == 0){
dat::nsrc = str2int(value);
}
else if(strcmp(key, "sfe") == 0){
dat::sfe = str2int(value);
}
else if(strcmp(key, "filter_kernel") == 0){
dat::filter_kernel = str2int(value);
}
else if(strcmp(key, "inv_iteration") == 0){
dat::inv_iteration = str2int(value);
}
else if(strcmp(key, "inv_maxiter") == 0){
dat::inv_maxiter = str2int(value);
}
else if(strcmp(key, "lbfgs_mem") == 0){
dat::lbfgs_mem = str2int(value);
}
else if(strcmp(key, "optimize") == 0){
dat::optimize = str2int(value);
}
else if(strcmp(key, "ls_steplenmax") == 0){
dat::ls_steplenmax = str2float(value);
}
else if(strcmp(key, "ls_stepleninit") == 0){
dat::ls_stepleninit = str2float(value);
}
else if(strcmp(key, "ls_thresh") == 0){
dat::ls_thresh = str2float(value);
}
else if(strcmp(key, "ls_stepcountmax") == 0){
dat::ls_stepcountmax = str2int(value);
}
else if(strcmp(key, "parametrisation") == 0){
dat::parametrisation = str2int(value);
}
else if(strcmp(key, "model_init") == 0){
int len = strlen(value);
dat::model_init = (char *)malloc((len + 1)*sizeof(char));
strcpy(dat::model_init, value);
initialiseModel(value);
}
else if(strcmp(key, "model_true") == 0){
int len = strlen(value);
dat::model_true = (char *)malloc((len + 1)*sizeof(char));
strcpy(dat::model_true, value);
}
else if(strcmp(key, "output_path") == 0){
int len = strlen(value);
dat::output_path = (char *)malloc((len + 1)*sizeof(char));
strcpy(dat::output_path, value);
}
else if(strcmp(key, "obs_su_path") == 0){
int len = strlen(value);
dat::obs_su_path = (char *)malloc((len + 1)*sizeof(char));
strcpy(dat::obs_su_path, value);
}
else if(strcmp(key, "inv_parameter") == 0){
dat::inv_parameter = str2int(value);
}
}
else{
if(strcmp(key, "xs") == 0){
dat::src_x[isrc] = str2float(value);
}
else if(strcmp(key, "zs") == 0){
dat::src_z[isrc] = str2float(value);
}
else if(strcmp(key, "f0") == 0){
dat::src_f0[isrc] = str2float(value);
}
else if(strcmp(key, "t0") == 0 || strcmp(key, "tshift") == 0){
dat::src_t0[isrc] = str2float(value);
}
else if(strcmp(key, "angle") == 0 || strcmp(key, "anglesource") == 0){
dat::src_angle[isrc] = str2float(value);
}
else if(strcmp(key, "factor") == 0){
dat::src_factor[isrc] = str2float(value);
}
else if(strcmp(key, "type") == 0 || strcmp(key, "source_type") == 0){
dat::src_type[isrc] = str2float(value);
}
}
}
stat = 0;
offset = 0;
i = -1;
break;
}
case '#':{
switch(stat){
case 4: value[i - offset] = '\0'; stat = 5; break;
case 5: break;
default: stat = -1;
}
break;
}
case ' ':{
switch(stat){
case 1: key[i - offset] = '\0'; stat = 2; break;
case 4: value[i - offset] = '\0'; stat = 5; break;
}
break;
}
case '=':{
switch(stat){
case 1: key[i - offset] = '\0'; stat = 3; break;
case 2: stat = 3; break;
case 5: break;
default: stat = -1;
}
break;
}
default:{
if(c >= 65 && c <= 90){
c += 32;
}
switch(stat){
case 0: stat = 1; offset = i; key[0] = c; break;
case 1: key[i - offset] = c; break;
case 2: stat = -1; break;
case 3: stat = 4; offset = i; value[0] = c; break;
case 4: value[i - offset] = c; break;
}
}
}
i++;
}
fclose(parfile);
}
static int loadModel(const char *model_dir){
char path[80];
sprintf(path, "%s/proc000000_x.bin", model_dir);
FILE *xfile = fopen(path,"rb");
sprintf(path, "%s/proc000000_z.bin", model_dir);
FILE *zfile = fopen(path,"rb");
sprintf(path, "%s/proc000000_rho.bin", model_dir);
FILE *rfile = fopen(path,"rb");
sprintf(path, "%s/proc000000_vp.bin", model_dir);
FILE *pfile = fopen(path,"rb");
sprintf(path, "%s/proc000000_vs.bin", model_dir);
FILE *sfile = fopen(path,"rb");
int npt;
fread(&npt, sizeof(int), 1, xfile);
fread(&npt, sizeof(int), 1, zfile);
fread(&npt, sizeof(int), 1, rfile);
fread(&npt, sizeof(int), 1, pfile);
fread(&npt, sizeof(int), 1, sfile);
npt /= 4;
float *xbuffer = mat::createHost(npt);
float *zbuffer = mat::createHost(npt);
float *rbuffer = mat::createHost(npt);
float *pbuffer = mat::createHost(npt);
float *sbuffer = mat::createHost(npt);
fread(xbuffer, sizeof(float), npt, xfile);
fread(zbuffer, sizeof(float), npt, zfile);
fread(rbuffer, sizeof(float), npt, rfile);
fread(pbuffer, sizeof(float), npt, pfile);
fread(sbuffer, sizeof(float), npt, sfile);
float *dxbuffer = mat::create(npt);
float *dzbuffer = mat::create(npt);
float *drbuffer = mat::create(npt);
float *dpbuffer = mat::create(npt);
float *dsbuffer = mat::create(npt);
mat::copyHostToDevice(dxbuffer, xbuffer, npt);
mat::copyHostToDevice(dzbuffer, zbuffer, npt);
mat::copyHostToDevice(drbuffer, rbuffer, npt);
mat::copyHostToDevice(dpbuffer, pbuffer, npt);
mat::copyHostToDevice(dsbuffer, sbuffer, npt);
float dmax = dat::Lx * dat::Lx + dat::Lz * dat::Lz;
mesh2grid<<<nxb, nzt>>>(dxbuffer, dzbuffer, drbuffer, dpbuffer, dsbuffer,
dat::lambda, dat::mu, dat::rho, dat::Lx/(nx-1), dat::Lz/(nz-1), dmax, npt);
if(dat::parametrisation){
changeParametrisation<<<nxb, nzt>>>(dat::lambda, dat::mu, dat::rho, psv);
}
free(xbuffer);
free(zbuffer);
free(rbuffer);
free(pbuffer);
free(sbuffer);
cudaFree(dxbuffer);
cudaFree(dzbuffer);
cudaFree(drbuffer);
cudaFree(dpbuffer);
cudaFree(dsbuffer);
fclose(xfile);
fclose(zfile);
fclose(rfile);
fclose(pfile);
fclose(sfile);
return 1;
}
static int importData(const char *datapath){
dat::simulation_mode = 0;
dat::wave_propagation_sh = 1;
dat::wave_propagation_psv = 0;
dat::obs_type = 0;
dat::ntask = 1;
dat::misfit_type = 0;
dat::parametrisation = 1;
dat::obs_su = 0;
dat::nt = 5000;
dat::dt = 0.06;
dat::sfe = 10;
dat::nsrc = 1;
dat::misfit_ref = 1;
dat::absorb_bottom = 1;
dat::absorb_right = 1;
dat::absorb_top = 1;
dat::absorb_left = 1;
dat::absorb_width = 48000;
dat::obs_su_path = "trace";
dat::output_path = "output";
dat::model_init = "model_init";
dat::model_true = "model_true";
dat::optimize = 1;
dat::filter_kernel = 4;
dat::inv_iteration = 5;
dat::inv_maxiter = 0;
dat::lbfgs_mem = 5;
dat::ls_stepleninit = 0.05;
dat::ls_steplenmax = 0.5;
dat::ls_stepcountmax = 10;
dat::ls_thresh = 1.2;
dat::inv_parameter = 1;
char path[80];
sprintf(path, "%s/Par_file", datapath);
readFortran(path, -1);
{
dat::src_x = mat::createHost(nsrc);
dat::src_z = mat::createHost(nsrc);
dat::src_type = mat::createIntHost(nsrc);
dat::src_f0 = mat::createHost(nsrc);
dat::src_t0 = mat::createHost(nsrc);
dat::src_angle = mat::createHost(nsrc);
dat::src_factor = mat::createHost(nsrc);
for(int isrc = 0; isrc < nsrc; isrc++){
if(isrc < 10){
sprintf(path, "%s/SOURCE_00000%d", datapath, isrc);
}
else if(isrc < 100){
sprintf(path, "%s/SOURCE_0000%d", datapath, isrc);
}
else{
sprintf(path, "%s/SOURCE_000%d", datapath, isrc);
}
readFortran(path, isrc);
}
float *src_x = dat::src_x;
float *src_z = dat::src_z;
dat::src_x = mat::create(nsrc);
dat::src_z = mat::create(nsrc);
mat::copyHostToDevice(dat::src_x, src_x, nsrc);
mat::copyHostToDevice(dat::src_z, src_z, nsrc);
free(src_x);
free(src_z);
}
{
sprintf(path, "%s/STATIONS", datapath);
FILE *stfile = fopen(path,"r");
char buffer[80];
char numbuffer[40];
dat::nrec = 0;
while(fgets(buffer, 80, stfile) != NULL){
if(buffer[0] == 'S'){
dat::nrec ++;
}
}
fseek (stfile, 0, SEEK_SET);
float *rec_x = mat::createHost(nrec);
float *rec_z = mat::createHost(nrec);
int irec = 0;
while(fgets(buffer, 80, stfile) != NULL){
if(buffer[0] == 'S'){
int stat = 0;
int offset = 0;
for(int i = 0; i < 80 && buffer[i] != '\0'; i++){
if(buffer[i] == ' '){
switch(stat){
case 0: stat++; break;
case 2: stat++; break;
case 4:{
stat++;
numbuffer[i - offset] = '\0';
rec_x[irec] = str2float(numbuffer);
break;
}
case 6:{
stat++;
numbuffer[i - offset] = '\0';
rec_z[irec] = str2float(numbuffer);
i = 80;
break;
}
}
}
else{
if(stat == 1 || stat == 3 || stat == 5){
stat++;
offset = i;
}
if(stat == 4 || stat == 6){
numbuffer[i - offset] = buffer[i];
}
}
}
irec++;
}
}
dat::rec_x = mat::create(nrec);
dat::rec_z = mat::create(nrec);
mat::copyHostToDevice(dat::rec_x, rec_x, nrec);
mat::copyHostToDevice(dat::rec_z, rec_z, nrec);
// mat::init(dat::rec_z, 12000, nrec); // later
free(rec_x);
free(rec_z);
fclose(stfile);
}
{
int adjoint = (dat::simulation_mode != 1);
dat::nxb = dim3(nx, 1);
dat::nxb2 = dim3(nx, ntask);
dat::nzt = dim3(nz);
dat::isrc = mat::createIntHost(2);
dat::X = mat::create(nx, nz);
dat::Z = mat::create(nx, nz);
initialiseGrids<<<nxb, nzt>>>(dat::X, dat::Z, dat::Lx, nx, dat::Lz, nz);
if(nt % dat::sfe != 0){
nt = dat::sfe * lroundf((float)nt / dat::sfe);
}
dat::nsfe = nt / dat::sfe;
if(sh){
dat::vy = mat::create(nx2, nz);
dat::uy = mat::create(nx2, nz);
dat::sxy = mat::create(nx2, nz);
dat::szy = mat::create(nx2, nz);
dat::dsy = mat::create(nx2, nz);
dat::dvydx = mat::create(nx2, nz);
dat::dvydz = mat::create(nx2, nz);
dat::out_y = mat::create(nrec, nt * ntask);
dat::uy_forward = mat::createHost(dat::nsfe, nx2, nz);
dat::vy_forward = mat::createHost(dat::nsfe, nx2, nz);
}
if(psv){
dat::vx = mat::create(nx2, nz);
dat::vz = mat::create(nx2, nz);
dat::ux = mat::create(nx2, nz);
dat::uz = mat::create(nx2, nz);
dat::sxx = mat::create(nx2, nz);
dat::szz = mat::create(nx2, nz);
dat::sxz = mat::create(nx2, nz);
dat::dsx = mat::create(nx2, nz);
dat::dsz = mat::create(nx2, nz);
dat::dvxdx = mat::create(nx2, nz);
dat::dvxdz = mat::create(nx2, nz);
dat::dvzdx = mat::create(nx2, nz);
dat::dvzdz = mat::create(nx2, nz);
dat::out_x = mat::create(nrec, nt * ntask);
dat::out_z = mat::create(nrec, nt * ntask);
dat::ux_forward = mat::createHost(dat::nsfe, nx2, nz);
dat::uz_forward = mat::createHost(dat::nsfe, nx2, nz);
dat::vx_forward = mat::createHost(dat::nsfe, nx2, nz);
dat::vz_forward = mat::createHost(dat::nsfe, nx2, nz);
}
dat::lambda = mat::create(nx, nz);
dat::rho = mat::create(nx, nz);
dat::mu = mat::create(nx, nz);
dat::absbound = mat::create(nx, nz);
dat::stf_x = mat::create(nsrc, nt);
dat::stf_y = mat::create(nsrc, nt);
dat::stf_z = mat::create(nsrc, nt);
if(adjoint){
if(sh){
dat::dvydx_fw = mat::create(nx, nz);
dat::dvydz_fw = mat::create(nx, nz);
dat::u_obs_y = mat::create(nsrc, nrec, nt);
}
if(psv){
dat::dvxdx_fw = mat::create(nx, nz);
dat::dvxdz_fw = mat::create(nx, nz);
dat::dvzdx_fw = mat::create(nx, nz);
dat::dvzdz_fw = mat::create(nx, nz);
dat::u_obs_x = mat::create(nsrc, nrec, nt);
dat::u_obs_z = mat::create(nsrc, nrec, nt);
}
dat::K_lambda = mat::create(nx, nz);
dat::K_mu = mat::create(nx, nz);
dat::K_rho = mat::create(nx, nz);
dat::adstf_x = mat::create(nrec, nt);
dat::adstf_y = mat::create(nrec, nt);
dat::adstf_z = mat::create(nrec, nt);
}
dat::src_x_id = mat::createInt(nsrc);
dat::src_z_id = mat::createInt(nsrc);
dat::rec_x_id = mat::createInt(nrec);
dat::rec_z_id = mat::createInt(nrec);
computeIndices<<<nsrc, 1>>>(dat::src_x_id, dat::src_x, dat::Lx, nx);
computeIndices<<<nsrc, 1>>>(dat::src_z_id, dat::src_z, dat::Lz, nz);
computeIndices<<<nrec, 1>>>(dat::rec_x_id, dat::rec_x, dat::Lx, nx);
computeIndices<<<nrec, 1>>>(dat::rec_z_id, dat::rec_z, dat::Lz, nz);
initialiseAbsorbingBoundaries<<<nxb, nzt>>>(
dat::absbound, dat::absorb_width,
dat::absorb_left, dat::absorb_right, dat::absorb_bottom, dat::absorb_top,
dat::Lx, dat::Lz, dat::X, dat::Z
);
}
return 1;
}
static void exportData(int iter){
iter++;
char name[80];
if(iter < 10){
sprintf(name, "%s/000%d", dat::output_path, iter);
}
else if(iter < 100){
sprintf(name, "%s/00%d", dat::output_path, iter);
}
else if(iter < 1000){
sprintf(name, "%s/0%d", dat::output_path, iter);
}
else{
sprintf(name, "%s/%d", dat::output_path, iter);
}
mkdir(name);
char path[80];
sprintf(path, "%s/proc000000_x.bin", name);
FILE *xfile = fopen(path,"wb");
sprintf(path, "%s/proc000000_z.bin", name);
FILE *zfile = fopen(path,"wb");
sprintf(path, "%s/proc000000_rho.bin", name);
FILE *rfile = fopen(path,"wb");
sprintf(path, "%s/proc000000_vp.bin", name);
FILE *pfile = fopen(path,"wb");
sprintf(path, "%s/proc000000_vs.bin", name);
FILE *sfile = fopen(path,"wb");
FILE *krfile = NULL;
FILE *klfile = NULL;
FILE *kmfile = NULL;
if(iter > 0){
sprintf(path, "%s/kernel_rho.bin", name);
krfile = fopen(path,"wb");
sprintf(path, "%s/kernel_lambda.bin", name);
klfile = fopen(path,"wb");
sprintf(path, "%s/kernel_mu.bin", name);
kmfile = fopen(path,"wb");
}
int npt = nx * nz * 4;
fwrite(&npt, sizeof(int), 1, xfile);
fwrite(&npt, sizeof(int), 1, zfile);
fwrite(&npt, sizeof(int), 1, rfile);
fwrite(&npt, sizeof(int), 1, pfile);
fwrite(&npt, sizeof(int), 1, sfile);
if(iter > 0){
fwrite(&npt, sizeof(int), 1, krfile);
fwrite(&npt, sizeof(int), 1, kmfile);
fwrite(&npt, sizeof(int), 1, klfile);
}
npt /= 4;
float *buffer = mat::createHost(npt);
mat::copyDeviceToHost(buffer, mat::getDataPointer(dat::X), npt);
fwrite(buffer, sizeof(float), npt, xfile);
mat::copyDeviceToHost(buffer, mat::getDataPointer(dat::Z), npt);
fwrite(buffer, sizeof(float), npt, zfile);
mat::copyDeviceToHost(buffer, mat::getDataPointer(dat::rho), npt);
fwrite(buffer, sizeof(float), npt, rfile);
float *vp = mat::create(npt);
mat::copy(vp, mat::getDataPointer(dat::lambda), npt);
float *vs = mat::create(npt);
mat::copy(vs, mat::getDataPointer(dat::mu), npt);
float *rho = mat::create(npt);
mat::copy(rho, mat::getDataPointer(dat::rho), npt);
if(dat::parametrisation){
changeParametrisation<<<nxb, nzt>>>(vp, vs, rho, nz, psv);
}
mat::copyDeviceToHost(buffer, vp, npt);
fwrite(buffer, sizeof(float), npt, pfile);
mat::copyDeviceToHost(buffer, vs, npt);
fwrite(buffer, sizeof(float), npt, sfile);
if(iter > 0){
mat::copyDeviceToHost(buffer, mat::getDataPointer(dat::K_rho), npt);
fwrite(buffer, sizeof(float), npt, krfile);
mat::copyDeviceToHost(buffer, mat::getDataPointer(dat::K_mu), npt);
fwrite(buffer, sizeof(float), npt, kmfile);
mat::copyDeviceToHost(buffer, mat::getDataPointer(dat::K_lambda), npt);
fwrite(buffer, sizeof(float), npt, klfile);
}
cudaFree(vp);
cudaFree(vs);
cudaFree(rho);
free(buffer);
fclose(xfile);
fclose(zfile);
fclose(rfile);
fclose(pfile);
fclose(sfile);
if(iter > 0){
fclose(krfile);
fclose(kmfile);
fclose(klfile);
}
}
static void checkMemoryUsage(){
size_t free_byte ;
size_t total_byte ;
cudaMemGetInfo( &free_byte, &total_byte ) ;
float free_db = (float)free_byte ;
float total_db = (float)total_byte ;
float used_db = total_db - free_db ;
printf("memory usage: %.1fMB / %.1fMB\n", used_db / 1024.0 / 1024.0, total_db / 1024.0 / 1024.0);
}
static void applyGaussian(float **p, int sigma){
float **gsum = mat::create(nx, nz);
float **gtemp = mat::create(nx, nz);
initialiseGaussian<<<nxb, nzt>>>(gsum, nx, nz, sigma);
filterKernelX<<<nxb, nzt>>>(p, gtemp, nx, sigma);
filterKernelZ<<<nxb, nzt>>>(p, gtemp, gsum, nz, sigma);
mat::freeDevice(gsum);
mat::freeDevice(gtemp);
}
static void generateChecker(float **p, float dp, float margin, int cx, int cz){
float lx = dat::Lx / (cx + (cx + 3) * margin);
float lz = dat::Lz / (cz + (cz + 3) * margin);
generateChecker<<<nxb, nzt>>>(p, dp, margin, lx, lz, dat::X, dat::Z);
int sigma = (int) 15 / cx;
applyGaussian(p, sigma);
}
static void generateLayer(float **p, float dp, int n){
float dz = dat::Lz / n;
float dpi = 2*dp / (n-1);
for(int i = 0; i < n; i++){
generateLayer<<<nxb, nzt>>>(p, i*dz, (i+1)*dz, -dp + dpi*i, dat::Z);
}
int sigma = (int) 15 / n;
applyGaussian(p, sigma);
}
static void generateRandomLayer(float **p, float dp, float dl, int n){
float dz = dat::Lz / n;
float dpi = 2*dp / (n-1);
float *layer1 = mat::create(nx);
float *layer2 = mat::create(nx);
float *hlayer = mat::createHost(nx);
float base = dl * dz / n;
float dx = dat::Lx / (nx - 1);
srand(time(0));
for(int i = 0; i < n; i++){
mat::initHost(hlayer, 0, nx);
for(int k = 0; k < n; k++){
float rng = (float)(rand() % 101) / 100;
for(int j = 0; j < nx; j++){
hlayer[j] += base * sin((k+rng)*2*pi*j*dx/dat::Lx+rng*pi);
}
}
mat::copyHostToDevice(layer2, hlayer, nx);
if(i==0){
mat::init(layer1,0,nx);
}
else if(i==n-1){
mat::init(layer2,0,nx);
}
generateRandomLayer<<<nxb, nzt>>>(p, i*dz, (i+1)*dz, -dp + dpi*i, layer1, layer2, dat::Z);
mat::copy(layer1, layer2, nx);
}
int sigma = (int) 12 / n;
applyGaussian(dat::mu, sigma);
cudaFree(layer1);
cudaFree(layer2);
free(hlayer);
}
static void writeSU(const char *fname, const int isrc, float **data){
FILE *su = fopen(fname, "wb");
int header1[28];
short int header2[2];
short int header3[2];
float header4[30];
for(int i = 0; i < 28; i++) header1[i] = 0;
for(int i = 0; i < 2; i++) header2[i] = 0;
for(int i = 0; i < 2; i++) header3[i] = 0;
for(int i = 0; i < 30; i++) header4[i] = 0;
float *src_x = mat::createHost(nsrc);
float *src_z = mat::createHost(nsrc);
mat::copyDeviceToHost(src_x, dat::src_x, nsrc);
mat::copyDeviceToHost(src_z, dat::src_z, nsrc);
float xs = src_x[isrc];
float zs = src_z[isrc];
free(src_x);
free(src_z);
float *rec_x = mat::createHost(nrec);
float *rec_z = mat::createHost(nrec);
mat::copyDeviceToHost(rec_x, dat::rec_x, nsrc);
mat::copyDeviceToHost(rec_z, dat::rec_z, nsrc);
short int dt_int2;
if(dt * 1e6 > pow(2, 15)){
dt_int2 = 0;
}
else{
dt_int2 = (short int)(dt * 1e6);
}
header1[18] = lroundf(xs);
header1[19] = lroundf(zs);
header2[0] = 0;
header2[1] = nt;
header3[0] = dt_int2;
header3[1] = 0;
for(int irec = 0; irec < nrec; irec++){
header1[0] = irec + 1;
header1[9] = lroundf(rec_x[irec] - xs);
header1[20] = lroundf(rec_x[irec]);
header1[21] = lroundf(rec_z[irec]);
if(nrec > 1){
header4[1] = rec_x[1] - rec_x[0];
}
fwrite(header1, sizeof(int), 28, su);
fwrite(header2, sizeof(short int), 2, su);
fwrite(header3, sizeof(short int), 2, su);
fwrite(header4, sizeof(float), 30, su);
fwrite(data[irec], sizeof(float), nt, su);
}
free(rec_x);
free(rec_z);
fclose(su);
}
static void writeSU(float ***u_obs, char c){
char path[80];
for(int i = 0; i < nsrc; i++){
if(i < 10){
sprintf(path, "%s/U%c_00000%d", dat::obs_su_path, c, i);
}
else if(i < 100){
sprintf(path, "%s/U%c_0000%d", dat::obs_su_path, c, i);
}
else{
sprintf(path, "%s/U%c_000%d", dat::obs_su_path, c, i);
}
writeSU(path, i, u_obs[i]);
}
}
static void writeSU(){
float ***u_obs = mat::createHost(nsrc, nrec, nt);
mkdir(dat::obs_su_path);
if(sh){
mat::copyDeviceToHost(u_obs, dat::u_obs_y, nsrc, nrec, nt);
writeSU(u_obs, 'y');
}
if(psv){
mat::copyDeviceToHost(u_obs, dat::u_obs_x, nsrc, nrec, nt);
writeSU(u_obs, 'x');
mat::copyDeviceToHost(u_obs, dat::u_obs_z, nsrc, nrec, nt);
writeSU(u_obs, 'z');
}
mat::freeHost(u_obs);
}
static void readSU(const char *fname, float **data){
FILE *su = fopen(fname, "rb");
int header1[28];
short int header2[2];
short int header3[2];
float header4[30];
fread(header1, sizeof(int), 28, su);
fread(header2, sizeof(short int), 2, su);
int nt_su = header2[1];
int nrec_su = getFileLength(su) / (240 + 4 * nt);
if(nt_su != nt || nrec_su != nrec){
printf("Error loading Seismic Unix file\n");
}
else{
for(int irec = 0; irec < nrec; irec++){
fread(header1, sizeof(int), 28, su);
fread(header2, sizeof(short int), 2, su);
fread(header3, sizeof(short int), 2, su);
fread(header4, sizeof(float), 30, su);
fread(data[irec], sizeof(float), nt, su);
}
}
fclose(su);
}
static void readSU(float ***u_obs, char c){
char path[80];
for(int i = 0; i < nsrc; i++){
if(i < 10){
sprintf(path, "%s/U%c_00000%d", dat::obs_su_path, c, i);
}
else if(i < 100){
sprintf(path, "%s/U%c_0000%d", dat::obs_su_path, c, i);
}
else{
sprintf(path, "%s/U%c_000%d", dat::obs_su_path, c, i);
}
readSU(path, u_obs[i]);
}
}
static void readSU(){
float ***u_obs = mat::createHost(nsrc, nrec, nt);
if(sh){
readSU(u_obs, 'y');
mat::copyHostToDevice(dat::u_obs_y, u_obs, nsrc, nrec, nt);
}
if(psv){
readSU(u_obs, 'x');
mat::copyHostToDevice(dat::u_obs_x, u_obs, nsrc, nrec, nt);
readSU(u_obs, 'z');
mat::copyHostToDevice(dat::u_obs_z, u_obs, nsrc, nrec, nt);
}
mat::freeHost(u_obs);
}
static void makeSourceTimeFunction(float *stf, int index){
float max = 0;
float f0 = dat::src_f0[index];
float t0 = dat::src_t0[index];
for(int it = 0; it < nt; it++){
float t = it * dt;
switch(dat::src_type[index]){
case 1:{
float a = pi * pi * f0 * f0;
stf[it] = -(t - t0) * exp(-pow(a, 2) * pow(t - t0, 2));
break;
}
// other stf: later
}
if(fabs(stf[it]) > max){
max = fabs(stf[it]);
}
}
if(max > 0){
for(int it = 0; it < nt; it++){
stf[it] /= max;
}
}
}
static void prepareSTF(){
float **stf_x = mat::createHost(nsrc, nt);
float **stf_y = mat::createHost(nsrc, nt);
float **stf_z = mat::createHost(nsrc, nt);
float *stfn = mat::createHost(nt);
for(int isrc = 0; isrc < nsrc; isrc++){
makeSourceTimeFunction(stfn, isrc);
float angle = dat::src_angle[isrc];
float amp = dat::src_factor[isrc];
for(int it = 0; it < nt; it++){
stf_x[isrc][it] = amp * stfn[it] * cos(angle);
stf_y[isrc][it] = amp * stfn[it];
stf_z[isrc][it] = amp * stfn[it] * sin(angle);
}
}
mat::copyHostToDevice(dat::stf_x, stf_x, nsrc, nt);
mat::copyHostToDevice(dat::stf_y, stf_y, nsrc, nt);
mat::copyHostToDevice(dat::stf_z, stf_z, nsrc, nt);
mat::freeHost(stf_x);
mat::freeHost(stf_y);
mat::freeHost(stf_z);
free(stfn);
}
static void initialiseDynamicFields(){
if(sh){
mat::init(dat::vy, 0, nx2, nz);
mat::init(dat::uy, 0, nx2, nz);
mat::init(dat::sxy, 0, nx2, nz);
mat::init(dat::szy, 0, nx2, nz);
}
if(psv){
mat::init(dat::vx, 0, nx2, nz);
mat::init(dat::vz, 0, nx2, nz);
mat::init(dat::ux, 0, nx2, nz);
mat::init(dat::uz, 0, nx2, nz);
mat::init(dat::sxx, 0, nx2, nz);
mat::init(dat::szz, 0, nx2, nz);
mat::init(dat::sxz, 0, nx2, nz);
}
}
static void initialiseKernels(){
mat::init(dat::K_lambda, 0, nx, nz);
mat::init(dat::K_mu, 0, nx, nz);
mat::init(dat::K_rho, 0, nx, nz);
}
static void runWaveFieldPropagation(){
initialiseDynamicFields();
int ntask2 = dat::isrc[1]-dat::isrc[0]+1;
for(int it = 0; it < nt; it++){
if(mode == 0){
if((it + 1) % dat::sfe == 0){
int isfe = dat::nsfe - (it + 1) / dat::sfe;
if(sh){
mat::copyDeviceToHost(dat::uy_forward[isfe], dat::uy, nx2, nz);
}
if(psv){
mat::copyDeviceToHost(dat::ux_forward[isfe], dat::ux, nx2, nz);
mat::copyDeviceToHost(dat::uz_forward[isfe], dat::uz, nx2, nz);
}
}
}
if(sh){
divSY<<<nxb2, nzt>>>(dat::dsy, dat::sxy, dat::szy, dat::X, dat::Z, nx, nz);
}
if(psv){
divSXZ<<<nxb2, nzt>>>(dat::dsx, dat::dsz, dat::sxx, dat::szz, dat::sxz, dat::X, dat::Z, nx, nz);
}
if(mode == 0){
addSTF<<<nsrc, ntask2>>>(
dat::dsx, dat::dsy, dat::dsz, dat::stf_x, dat::stf_y, dat::stf_z,
dat::src_x_id, dat::src_z_id, dat::isrc[0], sh, psv, it,nx
);
}
else if(mode == 1){
// next: adstf ntask
addSTF<<<nrec, 1>>>(
dat::dsx, dat::dsy, dat::dsz, dat::adstf_x, dat::adstf_y, dat::adstf_z,
dat::rec_x_id, dat::rec_z_id, -1, sh, psv, it,nx
);
}
if(sh){
updateV<<<nxb2, nzt>>>(dat::vy, dat::dsy, dat::rho, dat::absbound, dt);
divVY<<<nxb2, nzt>>>(dat::dvydx, dat::dvydz, dat::vy, dat::X, dat::Z, nx, nz);
updateSY<<<nxb2, nzt>>>(dat::sxy, dat::szy, dat::dvydx, dat::dvydz, dat::mu, dt);
updateU<<<nxb2, nzt>>>(dat::uy, dat::vy, dt);
}
if(psv){
updateV<<<nxb2, nzt>>>(dat::vx, dat::dsx, dat::rho, dat::absbound, dt);
updateV<<<nxb2, nzt>>>(dat::vz, dat::dsz, dat::rho, dat::absbound, dt);
divVXZ<<<nxb2, nzt>>>(dat::dvxdx, dat::dvxdz, dat::dvzdx, dat::dvzdz, dat::vx, dat::vz, dat::X, dat::Z, nx, nz);
updateSXZ<<<nxb2, nzt>>>(dat::sxx, dat::szz, dat::sxz, dat::dvxdx, dat::dvxdz, dat::dvzdx, dat::dvzdz, dat::lambda, dat::mu, dt);
updateU<<<nxb2, nzt>>>(dat::ux, dat::vx, dt);
updateU<<<nxb2, nzt>>>(dat::uz, dat::vz, dt);
}
if(mode == 0){
// next: saveRec type=1
if(dat::obs_type == 0){
saveRec<<<nrec, ntask2>>>(
dat::out_x, dat::out_y, dat::out_z, dat::vx, dat::vy, dat::vz,
dat::rec_x_id, dat::rec_z_id, nx, nt, sh, psv, it
);
}
else if(dat::obs_type == 1){
saveRec<<<nrec, ntask2>>>(
dat::out_x, dat::out_y, dat::out_z, dat::ux, dat::uy, dat::uz,
dat::rec_x_id, dat::rec_z_id, nx, nt, sh, psv, it
);
}
else if(dat::obs_type == 2 && dat::isrc[0] >= 0){
saveRec<<<nrec, ntask2>>>(
dat::u_obs_x, dat::u_obs_y, dat::u_obs_z, dat::ux, dat::uy, dat::uz,
dat::rec_x_id, dat::rec_z_id, dat::isrc[0], nx, sh, psv, it
);
}
if((it + 1) % dat::sfe == 0){
int isfe = dat::nsfe - (it + 1) / dat::sfe;
if(sh){
mat::copyDeviceToHost(dat::vy_forward[isfe], dat::vy, nx2, nz);
}
if(psv){
mat::copyDeviceToHost(dat::vx_forward[isfe], dat::vx, nx2, nz);
mat::copyDeviceToHost(dat::vz_forward[isfe], dat::vz, nx2, nz);
}
}
}
else if(mode == 1){
if((it + dat::sfe) % dat::sfe == 0){
// dsi -> ui_fw -> vi_fw
int isfe = (it + dat::sfe) / dat::sfe - 1;
float tsfe = dat::sfe * dt;
if(sh){
mat::copyHostToDevice(dat::dsy, dat::uy_forward[isfe], nx2, nz);
divVY<<<nxb2, nzt>>>(dat::dvydx, dat::dvydz, dat::uy, dat::X, dat::Z, nx, nz);
divVY<<<nxb2, nzt>>>(dat::dvydx_fw, dat::dvydz_fw, dat::dsy, dat::X, dat::Z, nx, nz);
mat::copyHostToDevice(dat::dsy, dat::vy_forward[isfe], nx2, nz);
interactionRhoY<<<nxb, nzt>>>(dat::K_rho, dat::vy, dat::dsy, tsfe);
interactionMuY<<<nxb, nzt>>>(dat::K_mu, dat::dvydx, dat::dvydx_fw, dat::dvydz, dat::dvydz_fw, tsfe);
// next: K_rho *= ntask
}
if(psv){
mat::copyHostToDevice(dat::dsx, dat::ux_forward[isfe], nx2, nz);
mat::copyHostToDevice(dat::dsz, dat::uz_forward[isfe], nx2, nz);
divVXZ<<<nxb2, nzt>>>(
dat::dvxdx, dat::dvxdz, dat::dvzdx, dat::dvzdz,
dat::ux, dat::uz, dat::X, dat::Z, nx, nz
);
divVXZ<<<nxb2, nzt>>>(
dat::dvxdx_fw, dat::dvxdz_fw, dat::dvzdx_fw, dat::dvzdz_fw,
dat::dsx, dat::dsz, dat::X, dat::Z, nx, nz
);
mat::copyHostToDevice(dat::dsx, dat::vx_forward[isfe], nx2, nz);
mat::copyHostToDevice(dat::dsz, dat::vz_forward[isfe], nx2, nz);
interactionRhoXZ<<<nxb, nzt>>>(dat::K_rho, dat::vx, dat::dsx, dat::vz, dat::dsz, tsfe);
interactionMuXZ<<<nxb, nzt>>>(
dat::K_mu, dat::dvxdx, dat::dvxdx_fw, dat::dvxdz, dat::dvxdz_fw,
dat::dvzdx, dat::dvzdx_fw, dat::dvzdz, dat::dvzdz_fw, tsfe
);
interactionLambdaXZ<<<nxb, nzt>>>(dat::K_lambda, dat::dvxdx, dat::dvxdx_fw, dat::dvzdz, dat::dvzdz_fw, tsfe);
}
}
}
}
}
static void runForward(int isrc0, int isrc1){
dat::simulation_mode = 0;
dat::isrc[0] = isrc0;
dat::isrc[1] = isrc1;
runWaveFieldPropagation();
}
static void runAdjoint(int init_kernel){
dat::simulation_mode = 1;
if(init_kernel){
initialiseKernels();
}
runWaveFieldPropagation();
}
static void initialiseFilters(){
// taper weights
dat::tw = mat::create(nt);
getTaperWeights<<<nt, 1>>>(dat::tw, dt, nt);
// gaussian filter
if(dat::filter_kernel){
dat::gsum = mat::create(nx, nz);
dat::gtemp = mat::create(nx, nz);
initialiseGaussian<<<nxb, nzt>>>(dat::gsum, nx, nz, dat::filter_kernel);
}
}
static void prepareObs(){
dat::obs_type = 2;
prepareSTF();
if(dat::obs_su){
printf("Reading observed data");
readSU();
}
else{
printf("Generating observed data\n");
loadModel(dat::model_true);
for(int isrc = 0; isrc < nsrc; isrc += ntask){
runForward(isrc, getTaskIndex(isrc));
for(int i=isrc; i<=getTaskIndex(isrc); i++){
printStat(i, nsrc);
}
}
}
initialiseFilters();
dat::obs_type = 1;
}
static float calculateEnvelopeMisfit(float **adstf, float *d_misfit, float **out, float ***u_obs,
cufftComplex *syn, cufftComplex *obs, float *esyn, float *eobs, float *ersd, float *etmp,
float dt, int isrc, int irec, int j, int nt){
copyWaveform<<<nt, 1>>>(d_misfit, u_obs, isrc+j, irec);
hilbert(d_misfit, obs);
copyWaveform<<<nt, 1>>>(d_misfit, out, irec, j*nt);
hilbert(d_misfit, syn);
copyC2Abs<<<nt, 1>>>(esyn, syn, nt);
copyC2Abs<<<nt, 1>>>(eobs, obs, nt);
float max = mat::amax(esyn, nt) * 0.05;
envelopetmp<<<nt, 1>>>(etmp, esyn, eobs, max);
copyC2Imag<<<nt, 1>>>(ersd, syn, nt);
mat::calc(ersd, ersd, etmp, nt);
hilbert(ersd, obs);
copyC2Imag<<<nt, 1>>>(ersd, obs, nt);
prepareEnvelopeSTF<<<nt, 1>>>(adstf, etmp, d_misfit, ersd, nt, irec);
mat::calc(ersd, 1, esyn, -1, eobs, nt);
return mat::norm(ersd, nt);
}
static float computeKernelsAndMisfit(int kernel){
float misfit = 0;
float *d_misfit = mat::create(nt);
cufftComplex *syn;
cufftComplex *obs;
float *esyn;
float *eobs;
float *ersd;
float *etmp;
if(dat::misfit_type == 1){
cudaMalloc((void**)&syn, nt * sizeof(cufftComplex));
cudaMalloc((void**)&obs, nt * sizeof(cufftComplex));
esyn = mat::create(nt);
eobs = mat::create(nt);
ersd = mat::create(nt);
etmp = mat::create(nt);
}
if(kernel){
printf("Computing gradient\n");
initialiseKernels();
}
for(int isrc = 0; isrc < nsrc; isrc+=ntask){
int jsrc = getTaskIndex(isrc);
runForward(isrc, jsrc);
for(int j = 0; j <= jsrc - isrc; j++){
for(int irec = 0; irec < nrec; irec++){
if(dat::misfit_type == 1){
if(sh){
misfit += calculateEnvelopeMisfit(dat::adstf_y, d_misfit, dat::out_y, dat::u_obs_y,
syn, obs, esyn, eobs, ersd, etmp, dt, isrc, irec, j, nt);
}
if(psv){
misfit += calculateEnvelopeMisfit(dat::adstf_x, d_misfit, dat::out_x, dat::u_obs_x,
syn, obs, esyn, eobs, ersd, etmp, dt, isrc, irec, j, nt);
misfit += calculateEnvelopeMisfit(dat::adstf_z, d_misfit, dat::out_z, dat::u_obs_z,
syn, obs, esyn, eobs, ersd, etmp, dt, isrc, irec, j, nt);
}
}
else{
if(sh){
calculateMisfit<<<nt, 1>>>(d_misfit, dat::out_y, dat::u_obs_y, dat::tw, sqrt(dt), isrc, irec, j, nt);
misfit += mat::norm(d_misfit, nt);
}
if(psv){
calculateMisfit<<<nt, 1>>>(d_misfit, dat::out_x, dat::u_obs_x, dat::tw, sqrt(dt), isrc, irec, j, nt);
misfit += mat::norm(d_misfit, nt);
calculateMisfit<<<nt, 1>>>(d_misfit, dat::out_z, dat::u_obs_z, dat::tw, sqrt(dt), isrc, irec, j, nt);
misfit += mat::norm(d_misfit, nt);
}
}
}
}
if(kernel){
if(dat::misfit_type != 1){
if(sh){
prepareAdjointSTF<<<nt, nrec>>>(dat::adstf_y, dat::out_y, dat::u_obs_y, dat::tw, nt, isrc);
if(!sh){
mat::init(dat::adstf_x, 0, nrec, nt);
mat::init(dat::adstf_z, 0, nrec, nt);
}
}
if(psv){
prepareAdjointSTF<<<nt, nrec>>>(dat::adstf_x, dat::out_x, dat::u_obs_x, dat::tw, nt, isrc);
prepareAdjointSTF<<<nt, nrec>>>(dat::adstf_z, dat::out_z, dat::u_obs_z, dat::tw, nt, isrc);
if(!sh){
mat::init(dat::adstf_y, 0, nrec, nt);
}
}
}
runAdjoint(0);
printStat(isrc, nsrc);
}
}
cudaFree(d_misfit);
if(dat::misfit_type == 1){
cudaFree(syn);
cudaFree(obs);
cudaFree(esyn);
cudaFree(eobs);
cudaFree(ersd);
cudaFree(etmp);
}
if(kernel){
if(dat::filter_kernel){
filterKernelX<<<nxb, nzt>>>(dat::K_rho, dat::gtemp, nx, dat::filter_kernel);
filterKernelZ<<<nxb, nzt>>>(dat::K_rho, dat::gtemp, dat::gsum, nz, dat::filter_kernel);
filterKernelX<<<nxb, nzt>>>(dat::K_mu, dat::gtemp, nx, dat::filter_kernel);
filterKernelZ<<<nxb, nzt>>>(dat::K_mu, dat::gtemp, dat::gsum, nz, dat::filter_kernel);
filterKernelX<<<nxb, nzt>>>(dat::K_lambda, dat::gtemp, nx, dat::filter_kernel);
filterKernelZ<<<nxb, nzt>>>(dat::K_lambda, dat::gtemp, dat::gsum, nz, dat::filter_kernel);
}
}
return misfit;
}
static float calculateMisfit(){
return computeKernelsAndMisfit(0);
}
static float computeKernels(){
return computeKernelsAndMisfit(1);
}
static int computeDirectionCG(float **p_new, float **p_old, float **g_new, float **g_old){
dat::inv_count++;
if(dat::inv_count == 1){
mat::copy(p_new, g_new, -1, nx, nz);
return 0;
}
else if(dat::inv_maxiter && dat::inv_count > dat::inv_maxiter){
fprintf(dat::log_ls, " restarting NLCG... [periodic restart]\n");
printf(" restarting NLCG... [periodic restart]\n");
return -1;
}
// self.precond: later
float den = mat::dot(g_old, g_old, nx, nz);
mat::calc(p_new, 1, g_new, -1, g_old, nx, nz);
float num = mat::dot(g_new, p_new, nx, nz);
float beta = num / den;
mat::calc(p_new, -1, g_new, beta, p_old, nx, nz);
// lose of conjugacy? later
if(mat::dot(p_new, g_new, nx, nz) > 0){
fprintf(dat::log_ls, " restarting NLCG... [not a descent direction]\n");
printf(" restarting NLCG... [not a descent direction]\n");
return -1;
}
return 1;
}
static int computeDirectionLBFGS(float **p_new, float **p_old, float **g_new, float **g_old, float **m_new, float **m_old){
dat::inv_count++;
if(dat::inv_count == 1){
mat::copy(p_new, g_new, -1, nx, nz);
return 0;
}
else if(dat::inv_maxiter && dat::inv_count > dat::inv_maxiter){
fprintf(dat::log_ls, " restarting LBFGS... [periodic restart]\n");
printf(" restarting LBFGS... [periodic restart]\n");
return -1;
}
float **tmpS = dat::lbfgs_S[dat::lbfgs_mem-1];
float **tmpY = dat::lbfgs_Y[dat::lbfgs_mem-1];
for(int i = dat::lbfgs_mem-1; i > 0; i--){
dat::lbfgs_S[i] = dat::lbfgs_S[i-1];
dat::lbfgs_Y[i] = dat::lbfgs_Y[i-1];
}
dat::lbfgs_S[0] = tmpS;
dat::lbfgs_Y[0] = tmpY;
mat::calc(p_old, 1, m_new, -1, m_old, nx, nz);
mat::copyDeviceToHost(dat::lbfgs_S[0], p_old, nx, nz);
mat::calc(p_old, 1, g_new, -1, g_old, nx, nz);
mat::copyDeviceToHost(dat::lbfgs_Y[0], p_old, nx, nz);
if(dat::lbfgs_used < dat::lbfgs_mem){
dat::lbfgs_used++;
}
int &kk = dat::lbfgs_used;
float *rh = mat::createHost(kk);
float *al = mat::createHost(kk);
// S->m_old Y->p_old
mat::copy(p_new, g_new, nx, nz);
float sty, yty;
for(int i = 0; i < kk; i++){
mat::copyHostToDevice(m_old, dat::lbfgs_S[i], nx, nz);
mat::copyHostToDevice(p_old, dat::lbfgs_Y[i], nx, nz);
rh[i] = 1 / mat::dot(p_old, m_old, nx, nz);
al[i] = rh[i] * mat::dot(m_old, p_new, nx, nz);
mat::calc(p_new, 1, p_new, -al[i], p_old, nx, nz);
if(i == 0){
sty = 1 / rh[i];
yty = mat::dot(p_old, p_old, nx, nz);
}
}
mat::copy(p_new, p_new, sty/yty, nx, nz);
for(int i = kk-1; i >= 0; i--){
mat::copyHostToDevice(m_old, dat::lbfgs_S[i], nx, nz);
mat::copyHostToDevice(p_old, dat::lbfgs_Y[i], nx, nz);
float be = rh[i] * mat::dot(p_old, p_new, nx, nz);
mat::calc(p_new, 1, p_new, al[i] - be, m_old, nx, nz);
}
free(rh);
free(al);
float angle = calculateAngle(p_new, g_new, 1, nx, nz);
if(angle>=pi/2 || angle<=0){
fprintf(dat::log_ls, " restarting LBFGS... [not a descent direction]\n");
printf(" restarting LBFGS... [not a descent direction]\n");
return -1;
}
mat::copy(p_new, p_new, -1, nx, nz);
return 1;
}
static int argmin(float *f, int n){
float min = f[0];
int idx = 0;
for(int i = 1; i < n; i++){
if(f[i] < min){
min = f[i];
idx = i;
}
}
return idx;
}
static int checkBracket(float *x, float *f, int n){
int imin = argmin(f, n);
float fmin = f[imin];
if(fmin < f[0]){
for(int i = imin; i < n; i++){
if(f[i] > fmin){
return 1;
}
}
}
return 0;
}
static int goodEnough(float *x, float *f, int n, float *alpha){
float thresh = log10(dat::ls_thresh);
if(!checkBracket(x, f, n)){
return 0;
}
float p[3];
int idx = argmin(f, n) - 1;
int fitlen;
if(idx + 3 >= n){
fitlen = 3;
}
else{
fitlen = 4;
}
polyfit(x + idx, f + idx, p, fitlen);
if(p[0] <= 0){
printf("line search error\n");
}
else{
float x0 = -p[1]/(2*p[0]);
*alpha = x0;
for(int i = 1; i < n; i++){
if(fabs(log10(x[i]/x0)) < thresh){
return 1;
}
}
}
return 0;
}
static float backtrack2(float f0, float g0, float x1, float f1, float b1, float b2){
float x2 = -g0 * x1 * x1 / (2 *(f1 - f0 - g0 * x1));
if(x2 > b2*x1){
x2 = b2*x1;
}
else if(x2 < b1*x1){
x2 = b1*x1;
}
return x2;
}
static float updateModel(float **m, float **p, float alpha, float alpha_old){
updateModel<<<nxb, nzt>>>(m, p, alpha - alpha_old);
return alpha;
}
static float calculateStep(const int step_count, float step_len_max, int *status){
float update_count = -1;
float alpha;
float *x = mat::createHost(step_count+1);
float *f = mat::createHost(step_count+1);
for(int i = 0; i < step_count+1; i++){
int j = dat::ls_count - 1 - step_count + i;
x[i] = dat::step_lens[j];
f[i] = dat::func_vals[j];
}
for(int i = 0; i < step_count+1; i++){
for(int j = i+1; j < step_count+1; j++){
if(x[j] < x[i]){
float tmp;
tmp = x[i]; x[i] = x[j]; x[j] = tmp;
tmp = f[i]; f[i] = f[j]; f[j] = tmp;
}
}
}
for(int i = 0; i < dat::ls_count; i++){
if(fabs(dat::step_lens[i]) < 1e-6){
update_count++;
}
}
if(step_count == 0){
if(update_count == 0){
alpha = 1 / dat::ls_gtg[dat::inv_count - 1];
*status = 0;
}
else{
int idx = argmin(dat::func_vals, dat::ls_count - 1);
alpha = dat::step_lens[idx] * dat::ls_gtp[dat::inv_count - 2] / dat::ls_gtp[dat::inv_count - 1];
*status = 0;
}
}
else if(checkBracket(x, f, step_count+1)){
if(goodEnough(x, f, step_count+1, &alpha)){
alpha = x[argmin(f, step_count+1)];
*status = 1;
}
else{
*status = 0;
}
}
else if(step_count <= dat::ls_stepcountmax){
int i;
for(i = 1; i < step_count+1; i++){
if(f[i] > f[0]) break;
}
if(i == step_count+1){
alpha = 1.618034 * x[step_count];
*status = 0;
}
else{
float slope = dat::ls_gtp[dat::inv_count-1]/dat::ls_gtg[dat::inv_count-1];
alpha = backtrack2(f[0], slope, x[1], f[1], 0.1, 0.5);
*status = 0;
}
}
else{
alpha = 0;
*status = -1;
}
if(alpha > step_len_max){
if(step_count == 0){
alpha = 0.618034 * step_len_max;
*status = 0;
}
else{
alpha = step_len_max;
*status = 1;
}
}
free(x);
free(f);
return alpha;
}
static float calculateStepBT(const int step_count, float step_len_max, int *status){
float update_count = -1;
for(int i = 0; i < dat::ls_count; i++){
if(fabs(dat::step_lens[i]) < 1e-6){
update_count++;
}
}
if(update_count == 0){
return calculateStep(step_count, step_len_max, status);
}
float alpha;
float *x = mat::createHost(step_count+1);
float *f = mat::createHost(step_count+1);
for(int i = 0; i < step_count+1; i++){
int j = dat::ls_count - 1 - step_count + i;
x[i] = dat::step_lens[j];
f[i] = dat::func_vals[j];
}
for(int i = 0; i < step_count+1; i++){
for(int j = i+1; j < step_count+1; j++){
if(x[j] < x[i]){
float tmp;
tmp = x[i]; x[i] = x[j]; x[j] = tmp;
tmp = f[i]; f[i] = f[j]; f[j] = tmp;
}
}
}
int idx = argmin(f, step_count+1);
if(step_count == 0){
alpha = step_len_max;
if(alpha > 1){
alpha = 1;
}
*status = 0;
}
else if(f[idx] < f[0]){
alpha = x[idx];
*status = 1;
}
else if(step_count <= dat::ls_stepcountmax){
float slope = dat::ls_gtp[dat::inv_count-1]/dat::ls_gtg[dat::inv_count-1];
alpha = backtrack2(f[0], slope, x[1], f[1], 0.1, 0.5);
*status = 0;
}
else{
alpha = 0;
*status = -1;
}
free(x);
free(f);
return alpha;
}
static void restartSearch(float **p, float **g){
mat::copy(p, g, -1, nx, nz);
dat::ls_count = 0;
dat::inv_count = 1;
if(dat::optimize == 1){
dat::lbfgs_used = 0;
}
}
static void lineSearch(float **m, float **g, float **p, float f){
printf("\nPerforming line search\n");
int status = 0;
float alpha = 0;
float norm_m = mat::amax(m, nx, nz);
float norm_p = mat::amax(p, nx, nz);
float gtg = mat::dot(g, g, nx, nz);
float gtp = mat::dot(g, p, nx, nz);
float step_len_max = dat::ls_steplenmax * norm_m / norm_p;
int step_count = 0;
dat::step_lens[dat::ls_count] = 0;
dat::func_vals[dat::ls_count] = f;
dat::ls_gtg[dat::inv_count-1] = gtg;
dat::ls_gtp[dat::inv_count-1] = gtp;
dat::ls_count++;
float alpha_old = 0;
if(dat::ls_stepleninit && dat::ls_count <= 1){
alpha = dat::ls_stepleninit * norm_m / norm_p;
}
else{
alpha = calculateStep(step_count, step_len_max, &status);
}
while(1){
alpha_old = updateModel(m, p, alpha, alpha_old);
dat::step_lens[dat::ls_count] = alpha;
dat::func_vals[dat::ls_count] = calculateMisfit();
dat::ls_count++;
step_count++;
dat::neval++;
if(dat::optimize == 1){
alpha = calculateStepBT(step_count, step_len_max, &status);
}
else{
alpha = calculateStep(step_count, step_len_max, &status);
}
if(step_count < 10){
fprintf(dat::log_ls, " step 0%d misfit = %f\n", step_count, dat::func_vals[dat::ls_count-1]/dat::misfit_ref);
printf(" step 0%d misfit = %f\n", step_count, dat::func_vals[dat::ls_count-1]/dat::misfit_ref);
}
else{
fprintf(dat::log_ls, " step %d misfit = %f\n", step_count, dat::func_vals[dat::ls_count-1]/dat::misfit_ref);
printf(" step %d misfit = %f\n", step_count, dat::func_vals[dat::ls_count-1]/dat::misfit_ref);
}
if(status > 0){
fprintf(dat::log_ls, " alpha = %.2e\n", alpha);
printf(" alpha = %.2e\n", alpha);
float angle = calculateAngle(p, g, -1, nx, nz)*180/pi;
fprintf(dat::log_ls, " angle = %f\n\n", angle);
printf(" angle = %f\n", angle);
updateModel(m, p, alpha, alpha_old);
fprintf(dat::log_misfit, "%d %f\n", dat::neval, dat::func_vals[argmin(dat::func_vals, dat::ls_count)]/dat::misfit_ref);
return;
}
else if(status < 0){
updateModel(m, p, 0, alpha_old);
if(calculateAngle(p, g, -1, nx, nz) < 1e-3){
printf(" line search failed\n");
dat::inv_iteration = 0;
return;
}
else{
printf(" restarting line search...\n");
restartSearch(p, g);
lineSearch(m, g, p, f);
}
}
}
}
static void inversionRoutine(){
cublasCreate(&cublas_handle);
cusolverDnCreate(&solver_handle);
if(dat::misfit_type == 1){
cufftPlan1d(&cufft_handle, nt, CUFFT_C2C, 1);
}
if(dat::optimize == 1){
dat::lbfgs_used = 0;
}
{
mkdir(dat::output_path);
char parbuffer[80];
sprintf(parbuffer, "%s/Par_file", dat::parfile);
FILE *parfile = fopen(parbuffer, "r");
sprintf(parbuffer, "%s/par", dat::output_path);
FILE *outfile = fopen(parbuffer, "w");
sprintf(parbuffer, "%s/log", dat::output_path);
dat::log_ls = fopen(parbuffer,"w");
sprintf(parbuffer, "%s/misfit", dat::output_path);
dat::log_misfit = fopen(parbuffer,"w");
dat::neval = 0;
while(fgets(parbuffer, 80, parfile) != NULL){
for(int i = 0; i < 79 && parbuffer[i] != '\0'; i++){
if(parbuffer[i] == '#'){
parbuffer[i] = '\n';
parbuffer[i+1] = '\0';
break;
}
}
fprintf(outfile, "%s", parbuffer);
}
fclose(parfile);
fclose(outfile);
}
prepareObs();
exportData(-1);
loadModel(dat::model_init);
float **m_new;
float **m_old;
float **g_new;
switch(dat::inv_parameter){
case 0: m_new = dat::lambda; g_new = dat::K_lambda; break;
case 1: m_new = dat::mu; g_new = dat::K_mu; break;
case 2: m_new = dat::rho; g_new = dat::K_rho; break;
}
if(dat::optimize == 1){
dat::lbfgs_S = mat::createHost(dat::lbfgs_mem, nx, nz);
dat::lbfgs_Y = mat::createHost(dat::lbfgs_mem, nx, nz);
m_old = mat::create(nx, nz);
}
float **g_old = mat::create(nx, nz);
float **p_old = mat::create(nx, nz);
float **p_new = mat::create(nx, nz);
dat::func_vals = mat::createHost(dat::inv_iteration * dat::ls_stepcountmax);
dat::step_lens = mat::createHost(dat::inv_iteration * dat::ls_stepcountmax);
dat::ls_gtg = mat::createHost(dat::inv_iteration);
dat::ls_gtp = mat::createHost(dat::inv_iteration);
dat::ls_count = 0;
dat::inv_count = 0;
for(int iter = 0; iter < dat::inv_iteration; iter++){
fprintf(dat::log_ls, "iteration %d / %d\n", iter + 1, dat::inv_iteration);
printf("\n\nStarting iteration %d / %d\n", iter + 1, dat::inv_iteration);
float f = computeKernels();
if(iter == 0){
dat::misfit_ref = f;
}
dat::neval += 2;
int dir;
if(dat::optimize == 0){
dir = computeDirectionCG(p_new, p_old, g_new, g_old);
}
else{
dir = computeDirectionLBFGS(p_new, p_old, g_new, g_old, m_new, m_old);
mat::copy(m_old, m_new, nx, nz);
}
if(dir < 0){
restartSearch(p_new, g_new);
}
lineSearch(m_new, g_new, p_new, f);
mat::copy(p_old, p_new, nx, nz);
mat::copy(g_old, g_new, nx, nz);
exportData(iter);
}
fclose(dat::log_ls);
fclose(dat::log_misfit);
cublasDestroy(cublas_handle);
cusolverDnDestroy(solver_handle);
if(dat::misfit_type == 1){
cufftDestroy(cufft_handle);
}
}
int main(int argc, const char *argv[]){
const char *datapath;
if(argc == 1){
datapath = "data";
}
else{
datapath = argv[1];
}
dat::parfile = datapath;
if(importData(datapath)){
switch(mode){
case 0:{
inversionRoutine();
break;
}
case 1:{
loadModel(dat::model_init);
prepareSTF();
// dat::ntask = 1;
runForward(0, 3);
mkdir("output");
mkdir("output/0000");
if(sh){
mat::write(dat::uy_forward, dat::nsfe, nx2, nz, "vy");
}
if(psv){
mat::write(dat::ux_forward, dat::nsfe, nx, nz, "output/0000/ux_forward.bin");
mat::write(dat::uz_forward, dat::nsfe, nx, nz, "output/0000/uz_forward.bin");
}
writeSU();
float **tmp=mat::createHost(nrec,nt*ntask);
mat::copyDeviceToHost(tmp,dat::out_y,nrec,nt*ntask);
mat::write(tmp[0], nt*ntask, "ux");
mat::write(tmp[3], nt*ntask, "uy");
break;
}
case 2:{
cublasCreate(&cublas_handle);
cusolverDnCreate(&solver_handle);
mkdir("output");
dat::output_path = "output";
clock_t timestart = clock();
if(dat::misfit_type == 1){
cufftPlan1d(&cufft_handle, nt, CUFFT_C2C, 1);
}
prepareObs();
if(dat::obs_su){
printf("\n");
}
printf("\n");
loadModel(dat::model_init);
float f = computeKernels();
dat::misfit_ref = f;
printf("misfit = %f\ntotal time: %.2fs\n", f,(float)(clock() - timestart) / CLOCKS_PER_SEC);
exportData(0);
if(dat::misfit_type == 1){
cufftDestroy(cufft_handle);
}
break;
}
case 10:{
dat::obs_su = 0;
prepareObs();
writeSU();
break;
}
case 11:{
mkdir("output");
dat::output_path = "output";
loadModel(dat::model_init);
generateChecker(dat::mu, 0.1, 0.5, 2, 2);
exportData(-1);
break;
}
case 12:{
mkdir("output");
dat::output_path = "output";
loadModel(dat::model_init);
generateLayer(dat::mu, 0.1, 5);
exportData(-1);
break;
}
case 13:{
mkdir("output");
dat::output_path = "output";
loadModel(dat::model_init);
generateRandomLayer(dat::mu, 0.1, 0.4, 5);
exportData(-1);
break;
}
case 15:{
mkdir("output");
dat::output_path = "output";
loadModel(dat::model_init);
mat::copy(dat::mu, dat::mu, 0.64, nx, nz);
exportData(-1);
}
}
}
else{
printf("error loading data\n");
}
checkMemoryUsage();
return 0;
}
|
b00bd0dd668ed259873f14f076c032de4334b951.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "histogram_equalization_gpu_son.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
unsigned char *d_in = NULL;
hipMalloc(&d_in, XSIZE*YSIZE);
unsigned char *d_out = NULL;
hipMalloc(&d_out, XSIZE*YSIZE);
int *d_lut = NULL;
hipMalloc(&d_lut, XSIZE*YSIZE);
int img_size = XSIZE*YSIZE;
int serialNum = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
histogram_equalization_gpu_son), dim3(gridBlock),dim3(threadBlock), 0, 0, d_in,d_out,d_lut,img_size,serialNum);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
histogram_equalization_gpu_son), dim3(gridBlock),dim3(threadBlock), 0, 0, d_in,d_out,d_lut,img_size,serialNum);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
histogram_equalization_gpu_son), dim3(gridBlock),dim3(threadBlock), 0, 0, d_in,d_out,d_lut,img_size,serialNum);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | b00bd0dd668ed259873f14f076c032de4334b951.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "histogram_equalization_gpu_son.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
unsigned char *d_in = NULL;
cudaMalloc(&d_in, XSIZE*YSIZE);
unsigned char *d_out = NULL;
cudaMalloc(&d_out, XSIZE*YSIZE);
int *d_lut = NULL;
cudaMalloc(&d_lut, XSIZE*YSIZE);
int img_size = XSIZE*YSIZE;
int serialNum = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
histogram_equalization_gpu_son<<<gridBlock,threadBlock>>>(d_in,d_out,d_lut,img_size,serialNum);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
histogram_equalization_gpu_son<<<gridBlock,threadBlock>>>(d_in,d_out,d_lut,img_size,serialNum);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
histogram_equalization_gpu_son<<<gridBlock,threadBlock>>>(d_in,d_out,d_lut,img_size,serialNum);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
9d292a833d395d01313a747bff723eb5bf87420a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Dan Rolfe
#define BLOCKSIZE 32
/**
* cuda vector add function
**/
// there is a problem here, running this ruins the add
__global__ void d_add( float* __restrict__ x, float* __restrict__ y, float* __restrict__ z, int size)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
if(index < size)
z[index] = x[index] + y[index];
}
/**
* mul:
* cuda vector multiply function
**/
__global__ void d_mul( float* __restrict__ x, float* __restrict__ y, float* __restrict__ z, int size)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
if(index < size)
z[index] = x[index] * y[index];
}
| 9d292a833d395d01313a747bff723eb5bf87420a.cu | // Dan Rolfe
#define BLOCKSIZE 32
/**
* cuda vector add function
**/
// there is a problem here, running this ruins the add
__global__ void d_add( float* __restrict__ x, float* __restrict__ y, float* __restrict__ z, int size)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
if(index < size)
z[index] = x[index] + y[index];
}
/**
* mul:
* cuda vector multiply function
**/
__global__ void d_mul( float* __restrict__ x, float* __restrict__ y, float* __restrict__ z, int size)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
if(index < size)
z[index] = x[index] * y[index];
}
|
7c0b8d82b1edce406eaaa2675b3d2f0de15090ce.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <stdio.h>
#include <omp.h>
#include <string>
#include <time.h>
#include <stdlib.h>
void reduction(double* sum_ptr, const double* a, const double* b, long M, long N){
double sum = 0;
for (long j=0; j < M; j++) {
sum=0;
#pragma omp parallel for schedule(static) reduction(+:sum)
for (long i = 0; i < N; i++){
sum += a[j*M+i]*b[i];
}
sum_ptr[j]=sum;
}
}
void Check_CUDA_Error(const char *message){
hipError_t error = hipGetLastError();
if(error!=hipSuccess) {
fprintf(stderr,"ERROR: %s: %s\n", message, hipGetErrorString(error) );
exit(-1);
}
}
#define BLOCK_SIZE 1024
// Warp divergence
__global__ void reduction_kernel0(double* sum, const double* a, long N){
__shared__ double smem[BLOCK_SIZE];
int idx = (blockIdx.x) * blockDim.x + threadIdx.x;
if (idx < N) smem[threadIdx.x] = a[idx];
else smem[threadIdx.x] = 0;
__syncthreads();
if (threadIdx.x % 2 == 0) smem[threadIdx.x] += smem[threadIdx.x + 1];
__syncthreads();
if (threadIdx.x % 4 == 0) smem[threadIdx.x] += smem[threadIdx.x + 2];
__syncthreads();
if (threadIdx.x % 8 == 0) smem[threadIdx.x] += smem[threadIdx.x + 4];
__syncthreads();
if (threadIdx.x % 16 == 0) smem[threadIdx.x] += smem[threadIdx.x + 8];
__syncthreads();
if (threadIdx.x % 32 == 0) smem[threadIdx.x] += smem[threadIdx.x + 16];
__syncthreads();
if (threadIdx.x % 64 == 0) smem[threadIdx.x] += smem[threadIdx.x + 32];
__syncthreads();
if (threadIdx.x % 128 == 0) smem[threadIdx.x] += smem[threadIdx.x + 64];
__syncthreads();
if (threadIdx.x % 256 == 0) smem[threadIdx.x] += smem[threadIdx.x + 128];
__syncthreads();
if (threadIdx.x % 512 == 0) smem[threadIdx.x] += smem[threadIdx.x + 256];
__syncthreads();
if (threadIdx.x == 0) sum[blockIdx.x] = smem[threadIdx.x] + smem[threadIdx.x + 512];
}
// Shared memory bank conflicts
__global__ void reduction_kernel1(double* sum, const double* a, long N){
__shared__ double smem[BLOCK_SIZE];
int idx = (blockIdx.x) * blockDim.x + threadIdx.x;
if (idx < N) smem[threadIdx.x] = a[idx];
else smem[threadIdx.x] = 0;
__syncthreads();
if (threadIdx.x < 512) smem[threadIdx.x * 2] += smem[threadIdx.x * 2 + 1];
__syncthreads();
if (threadIdx.x < 256) smem[threadIdx.x * 4] += smem[threadIdx.x * 4 + 2];
__syncthreads();
if (threadIdx.x < 128) smem[threadIdx.x * 8] += smem[threadIdx.x * 8 + 4];
__syncthreads();
if (threadIdx.x < 64) smem[threadIdx.x * 16] += smem[threadIdx.x * 16 + 8];
__syncthreads();
if (threadIdx.x < 32) smem[threadIdx.x * 32] += smem[threadIdx.x * 32 + 16];
__syncwarp();
if (threadIdx.x < 16) smem[threadIdx.x * 64] += smem[threadIdx.x * 64 + 32];
__syncwarp();
if (threadIdx.x < 8) smem[threadIdx.x * 128] += smem[threadIdx.x * 128 + 64];
__syncwarp();
if (threadIdx.x < 4) smem[threadIdx.x * 256] += smem[threadIdx.x * 256 + 128];
__syncwarp();
if (threadIdx.x < 2) smem[threadIdx.x * 512] += smem[threadIdx.x * 512 + 256];
__syncwarp();
if (threadIdx.x == 0) sum[blockIdx.x] = smem[0] + smem[512];
}
__global__ void reduction_product(double* sum, const double* a, const double* b, long N){
__shared__ double smem[BLOCK_SIZE];
int idx = (blockIdx.x) * blockDim.x + threadIdx.x;
if (idx < N) smem[threadIdx.x] = a[idx]*b[idx];
else smem[threadIdx.x] = 0;
__syncthreads();
if (threadIdx.x < 512) smem[threadIdx.x] += smem[threadIdx.x + 512];
__syncthreads();
if (threadIdx.x < 256) smem[threadIdx.x] += smem[threadIdx.x + 256];
__syncthreads();
if (threadIdx.x < 128) smem[threadIdx.x] += smem[threadIdx.x + 128];
__syncthreads();
if (threadIdx.x < 64) smem[threadIdx.x] += smem[threadIdx.x + 64];
__syncthreads();
if (threadIdx.x < 32) {
smem[threadIdx.x] += smem[threadIdx.x + 32];
__syncwarp();
smem[threadIdx.x] += smem[threadIdx.x + 16];
__syncwarp();
smem[threadIdx.x] += smem[threadIdx.x + 8];
__syncwarp();
smem[threadIdx.x] += smem[threadIdx.x + 4];
__syncwarp();
smem[threadIdx.x] += smem[threadIdx.x + 2];
__syncwarp();
if (threadIdx.x == 0) sum[blockIdx.x] = smem[0] + smem[1];
}
}
__global__ void reduction_kernel2(double* sum, const double* a, long N){
__shared__ double smem[BLOCK_SIZE];
int idx = (blockIdx.x) * blockDim.x + threadIdx.x;
if (idx < N) smem[threadIdx.x] = a[idx];
else smem[threadIdx.x] = 0;
__syncthreads();
if (threadIdx.x < 512) smem[threadIdx.x] += smem[threadIdx.x + 512];
__syncthreads();
if (threadIdx.x < 256) smem[threadIdx.x] += smem[threadIdx.x + 256];
__syncthreads();
if (threadIdx.x < 128) smem[threadIdx.x] += smem[threadIdx.x + 128];
__syncthreads();
if (threadIdx.x < 64) smem[threadIdx.x] += smem[threadIdx.x + 64];
__syncthreads();
if (threadIdx.x < 32) {
smem[threadIdx.x] += smem[threadIdx.x + 32];
__syncwarp();
smem[threadIdx.x] += smem[threadIdx.x + 16];
__syncwarp();
smem[threadIdx.x] += smem[threadIdx.x + 8];
__syncwarp();
smem[threadIdx.x] += smem[threadIdx.x + 4];
__syncwarp();
smem[threadIdx.x] += smem[threadIdx.x + 2];
__syncwarp();
if (threadIdx.x == 0) sum[blockIdx.x] = smem[0] + smem[1];
}
}
int main() {
long N = (1UL<<24);
// long M = (1UL<<10);
srand((int)time(0));
double *a, *b;
hipHostMalloc((void**)&a, N * sizeof(double));
hipHostMalloc((void**)&b, N * sizeof(double));
#pragma omp parallel for schedule(static)
for (long i = 0; i < N; i++){
a[i] = ((double)rand())/RAND_MAX;
b[i] = ((double)rand())/RAND_MAX;
}
double sum_ref, sum;
double tt = omp_get_wtime();
reduction(&sum_ref, a, b, 1, N);
printf("CPU Bandwidth = %f GB/s\n", 2*N*sizeof(double) / (omp_get_wtime()-tt)/1e9);
double *a_d, *b_d, *y_d;
hipMalloc(&a_d, N*sizeof(double));
hipMalloc(&b_d, N*sizeof(double));
long N_work = 1;
for (long i = (N+BLOCK_SIZE-1)/(BLOCK_SIZE); i > 1; i = (i+BLOCK_SIZE-1)/(BLOCK_SIZE)) N_work += i;
hipMalloc(&y_d, N_work*sizeof(double)); // extra memory buffer for reduction across thread-blocks
hipMemcpyAsync(a_d, a, N*sizeof(double), hipMemcpyHostToDevice);
hipMemcpyAsync(b_d, b, N*sizeof(double), hipMemcpyHostToDevice);
hipDeviceSynchronize();
tt = omp_get_wtime();
double* sum_d = y_d;
long Nb = (N+BLOCK_SIZE-1)/(BLOCK_SIZE);
hipLaunchKernelGGL(( reduction_product), dim3(Nb),dim3(BLOCK_SIZE), 0, 0, sum_d, a_d, b_d, N);
while (Nb > 1) {
long N1 = Nb;
Nb = (Nb+BLOCK_SIZE-1)/(BLOCK_SIZE);
hipLaunchKernelGGL(( reduction_kernel2), dim3(Nb),dim3(BLOCK_SIZE), 0, 0, sum_d + N1, sum_d, N1);
sum_d += N1;
}
hipMemcpyAsync(&sum, sum_d, 1*sizeof(double), hipMemcpyDeviceToHost);
hipDeviceSynchronize();
printf("GPU Bandwidth = %f GB/s\n", 2*N*sizeof(double) / (omp_get_wtime()-tt)/1e9);
printf("Error = %f\n", fabs(sum-sum_ref));
hipFree(a_d);
hipFree(b_d);
hipFree(y_d);
return 0;
}
| 7c0b8d82b1edce406eaaa2675b3d2f0de15090ce.cu | #include <algorithm>
#include <stdio.h>
#include <omp.h>
#include <string>
#include <time.h>
#include <stdlib.h>
void reduction(double* sum_ptr, const double* a, const double* b, long M, long N){
double sum = 0;
for (long j=0; j < M; j++) {
sum=0;
#pragma omp parallel for schedule(static) reduction(+:sum)
for (long i = 0; i < N; i++){
sum += a[j*M+i]*b[i];
}
sum_ptr[j]=sum;
}
}
void Check_CUDA_Error(const char *message){
cudaError_t error = cudaGetLastError();
if(error!=cudaSuccess) {
fprintf(stderr,"ERROR: %s: %s\n", message, cudaGetErrorString(error) );
exit(-1);
}
}
#define BLOCK_SIZE 1024
// Warp divergence
__global__ void reduction_kernel0(double* sum, const double* a, long N){
__shared__ double smem[BLOCK_SIZE];
int idx = (blockIdx.x) * blockDim.x + threadIdx.x;
if (idx < N) smem[threadIdx.x] = a[idx];
else smem[threadIdx.x] = 0;
__syncthreads();
if (threadIdx.x % 2 == 0) smem[threadIdx.x] += smem[threadIdx.x + 1];
__syncthreads();
if (threadIdx.x % 4 == 0) smem[threadIdx.x] += smem[threadIdx.x + 2];
__syncthreads();
if (threadIdx.x % 8 == 0) smem[threadIdx.x] += smem[threadIdx.x + 4];
__syncthreads();
if (threadIdx.x % 16 == 0) smem[threadIdx.x] += smem[threadIdx.x + 8];
__syncthreads();
if (threadIdx.x % 32 == 0) smem[threadIdx.x] += smem[threadIdx.x + 16];
__syncthreads();
if (threadIdx.x % 64 == 0) smem[threadIdx.x] += smem[threadIdx.x + 32];
__syncthreads();
if (threadIdx.x % 128 == 0) smem[threadIdx.x] += smem[threadIdx.x + 64];
__syncthreads();
if (threadIdx.x % 256 == 0) smem[threadIdx.x] += smem[threadIdx.x + 128];
__syncthreads();
if (threadIdx.x % 512 == 0) smem[threadIdx.x] += smem[threadIdx.x + 256];
__syncthreads();
if (threadIdx.x == 0) sum[blockIdx.x] = smem[threadIdx.x] + smem[threadIdx.x + 512];
}
// Shared memory bank conflicts
__global__ void reduction_kernel1(double* sum, const double* a, long N){
__shared__ double smem[BLOCK_SIZE];
int idx = (blockIdx.x) * blockDim.x + threadIdx.x;
if (idx < N) smem[threadIdx.x] = a[idx];
else smem[threadIdx.x] = 0;
__syncthreads();
if (threadIdx.x < 512) smem[threadIdx.x * 2] += smem[threadIdx.x * 2 + 1];
__syncthreads();
if (threadIdx.x < 256) smem[threadIdx.x * 4] += smem[threadIdx.x * 4 + 2];
__syncthreads();
if (threadIdx.x < 128) smem[threadIdx.x * 8] += smem[threadIdx.x * 8 + 4];
__syncthreads();
if (threadIdx.x < 64) smem[threadIdx.x * 16] += smem[threadIdx.x * 16 + 8];
__syncthreads();
if (threadIdx.x < 32) smem[threadIdx.x * 32] += smem[threadIdx.x * 32 + 16];
__syncwarp();
if (threadIdx.x < 16) smem[threadIdx.x * 64] += smem[threadIdx.x * 64 + 32];
__syncwarp();
if (threadIdx.x < 8) smem[threadIdx.x * 128] += smem[threadIdx.x * 128 + 64];
__syncwarp();
if (threadIdx.x < 4) smem[threadIdx.x * 256] += smem[threadIdx.x * 256 + 128];
__syncwarp();
if (threadIdx.x < 2) smem[threadIdx.x * 512] += smem[threadIdx.x * 512 + 256];
__syncwarp();
if (threadIdx.x == 0) sum[blockIdx.x] = smem[0] + smem[512];
}
__global__ void reduction_product(double* sum, const double* a, const double* b, long N){
__shared__ double smem[BLOCK_SIZE];
int idx = (blockIdx.x) * blockDim.x + threadIdx.x;
if (idx < N) smem[threadIdx.x] = a[idx]*b[idx];
else smem[threadIdx.x] = 0;
__syncthreads();
if (threadIdx.x < 512) smem[threadIdx.x] += smem[threadIdx.x + 512];
__syncthreads();
if (threadIdx.x < 256) smem[threadIdx.x] += smem[threadIdx.x + 256];
__syncthreads();
if (threadIdx.x < 128) smem[threadIdx.x] += smem[threadIdx.x + 128];
__syncthreads();
if (threadIdx.x < 64) smem[threadIdx.x] += smem[threadIdx.x + 64];
__syncthreads();
if (threadIdx.x < 32) {
smem[threadIdx.x] += smem[threadIdx.x + 32];
__syncwarp();
smem[threadIdx.x] += smem[threadIdx.x + 16];
__syncwarp();
smem[threadIdx.x] += smem[threadIdx.x + 8];
__syncwarp();
smem[threadIdx.x] += smem[threadIdx.x + 4];
__syncwarp();
smem[threadIdx.x] += smem[threadIdx.x + 2];
__syncwarp();
if (threadIdx.x == 0) sum[blockIdx.x] = smem[0] + smem[1];
}
}
__global__ void reduction_kernel2(double* sum, const double* a, long N){
__shared__ double smem[BLOCK_SIZE];
int idx = (blockIdx.x) * blockDim.x + threadIdx.x;
if (idx < N) smem[threadIdx.x] = a[idx];
else smem[threadIdx.x] = 0;
__syncthreads();
if (threadIdx.x < 512) smem[threadIdx.x] += smem[threadIdx.x + 512];
__syncthreads();
if (threadIdx.x < 256) smem[threadIdx.x] += smem[threadIdx.x + 256];
__syncthreads();
if (threadIdx.x < 128) smem[threadIdx.x] += smem[threadIdx.x + 128];
__syncthreads();
if (threadIdx.x < 64) smem[threadIdx.x] += smem[threadIdx.x + 64];
__syncthreads();
if (threadIdx.x < 32) {
smem[threadIdx.x] += smem[threadIdx.x + 32];
__syncwarp();
smem[threadIdx.x] += smem[threadIdx.x + 16];
__syncwarp();
smem[threadIdx.x] += smem[threadIdx.x + 8];
__syncwarp();
smem[threadIdx.x] += smem[threadIdx.x + 4];
__syncwarp();
smem[threadIdx.x] += smem[threadIdx.x + 2];
__syncwarp();
if (threadIdx.x == 0) sum[blockIdx.x] = smem[0] + smem[1];
}
}
int main() {
long N = (1UL<<24);
// long M = (1UL<<10);
srand((int)time(0));
double *a, *b;
cudaMallocHost((void**)&a, N * sizeof(double));
cudaMallocHost((void**)&b, N * sizeof(double));
#pragma omp parallel for schedule(static)
for (long i = 0; i < N; i++){
a[i] = ((double)rand())/RAND_MAX;
b[i] = ((double)rand())/RAND_MAX;
}
double sum_ref, sum;
double tt = omp_get_wtime();
reduction(&sum_ref, a, b, 1, N);
printf("CPU Bandwidth = %f GB/s\n", 2*N*sizeof(double) / (omp_get_wtime()-tt)/1e9);
double *a_d, *b_d, *y_d;
cudaMalloc(&a_d, N*sizeof(double));
cudaMalloc(&b_d, N*sizeof(double));
long N_work = 1;
for (long i = (N+BLOCK_SIZE-1)/(BLOCK_SIZE); i > 1; i = (i+BLOCK_SIZE-1)/(BLOCK_SIZE)) N_work += i;
cudaMalloc(&y_d, N_work*sizeof(double)); // extra memory buffer for reduction across thread-blocks
cudaMemcpyAsync(a_d, a, N*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpyAsync(b_d, b, N*sizeof(double), cudaMemcpyHostToDevice);
cudaDeviceSynchronize();
tt = omp_get_wtime();
double* sum_d = y_d;
long Nb = (N+BLOCK_SIZE-1)/(BLOCK_SIZE);
reduction_product<<<Nb,BLOCK_SIZE>>>(sum_d, a_d, b_d, N);
while (Nb > 1) {
long N1 = Nb;
Nb = (Nb+BLOCK_SIZE-1)/(BLOCK_SIZE);
reduction_kernel2<<<Nb,BLOCK_SIZE>>>(sum_d + N1, sum_d, N1);
sum_d += N1;
}
cudaMemcpyAsync(&sum, sum_d, 1*sizeof(double), cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
printf("GPU Bandwidth = %f GB/s\n", 2*N*sizeof(double) / (omp_get_wtime()-tt)/1e9);
printf("Error = %f\n", fabs(sum-sum_ref));
cudaFree(a_d);
cudaFree(b_d);
cudaFree(y_d);
return 0;
}
|
5de47fdd220f9d7230c149e85e2f98f63d2e10f9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<stdio.h>
#include "repeat.h"
typedef unsigned long long Dtype;
//typedef double Dtype;
//typedef int Dtype;
#define REPEAT 10
__global__ void VecAdd(Dtype** A, int* N, unsigned long long* d_time, Dtype* xj, Dtype* xi)
{
Dtype *j = *A;
Dtype **k;
unsigned int start_t, end_t;
k = (Dtype **) malloc(sizeof(Dtype) * (1024 * (*N)));
for (int it=0; it < *N; it++)
j=*(Dtype **)j;
*xi=*j;
*d_time = 0;
for (int it=0; it < (*N) * 10; it++)
{
start_t = clock();
repeat256(j=*(Dtype **)(unsigned long long)j;)
// k[it] = j;
end_t = clock();
((Dtype *)A)[*N]=(Dtype) j;
*d_time += (unsigned long long)(end_t - start_t);
}
// ((Dtype *)A)[*N]=(Dtype) j;
//*d_time = (unsigned long long)(end_t - start_t);
// printf( "%d %llu\n", *N, *d_time/256/4);
}
| 5de47fdd220f9d7230c149e85e2f98f63d2e10f9.cu | #include<stdio.h>
#include "repeat.h"
typedef unsigned long long Dtype;
//typedef double Dtype;
//typedef int Dtype;
#define REPEAT 10
__global__ void VecAdd(Dtype** A, int* N, unsigned long long* d_time, Dtype* xj, Dtype* xi)
{
Dtype *j = *A;
Dtype **k;
unsigned int start_t, end_t;
k = (Dtype **) malloc(sizeof(Dtype) * (1024 * (*N)));
for (int it=0; it < *N; it++)
j=*(Dtype **)j;
*xi=*j;
*d_time = 0;
for (int it=0; it < (*N) * 10; it++)
{
start_t = clock();
repeat256(j=*(Dtype **)(unsigned long long)j;)
// k[it] = j;
end_t = clock();
((Dtype *)A)[*N]=(Dtype) j;
*d_time += (unsigned long long)(end_t - start_t);
}
// ((Dtype *)A)[*N]=(Dtype) j;
//*d_time = (unsigned long long)(end_t - start_t);
// printf( "%d %llu\n", *N, *d_time/256/4);
}
|
059624ae2461dce7ec95eddc7edb758f5f0229ef.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "initWeights.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *dst = NULL;
hipMalloc(&dst, XSIZE*YSIZE);
unsigned int seed = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
initWeights), dim3(gridBlock),dim3(threadBlock), 0, 0, dst,seed);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
initWeights), dim3(gridBlock),dim3(threadBlock), 0, 0, dst,seed);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
initWeights), dim3(gridBlock),dim3(threadBlock), 0, 0, dst,seed);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 059624ae2461dce7ec95eddc7edb758f5f0229ef.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "initWeights.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *dst = NULL;
cudaMalloc(&dst, XSIZE*YSIZE);
unsigned int seed = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
initWeights<<<gridBlock,threadBlock>>>(dst,seed);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
initWeights<<<gridBlock,threadBlock>>>(dst,seed);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
initWeights<<<gridBlock,threadBlock>>>(dst,seed);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
db2c5d1f057e7d41a2fed1a44f331a7c45d52266.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void square(float * d_out, float * d_in) {
const unsigned int lid = threadIdx.x;
const unsigned int gid = blockIdx.x*blockDim.x + lid;
float f = d_in[gid];
d_out[gid] = f * f;
} | db2c5d1f057e7d41a2fed1a44f331a7c45d52266.cu | #include "includes.h"
__global__ void square(float * d_out, float * d_in) {
const unsigned int lid = threadIdx.x;
const unsigned int gid = blockIdx.x*blockDim.x + lid;
float f = d_in[gid];
d_out[gid] = f * f;
} |
39c0d33a54e8950d53a45e7f4e24341f90b36faf.hip | // !!! This is a file automatically generated by hipify!!!
#include "light_transport_common.cuh"
namespace VLR {
// Context-scope Variables
rtDeclareVariable(optix::uint2, pv_imageSize, , );
rtDeclareVariable(uint32_t, pv_numAccumFrames, , );
rtDeclareVariable(ProgSigSampleLensPosition, pv_progSampleLensPosition, , );
rtDeclareVariable(ProgSigSampleIDF, pv_progSampleIDF, , );
rtBuffer<KernelRNG, 2> pv_rngBuffer;
rtBuffer<SpectrumStorage, 2> pv_outputBuffer;
// Common Closest Hit Program for All Primitive Types and Materials
RT_PROGRAM void pathTracingIteration() {
KernelRNG &rng = sm_payload.rng;
WavelengthSamples &wls = sm_payload.wls;
SurfacePoint surfPt;
float hypAreaPDF;
calcSurfacePoint(&surfPt, &hypAreaPDF);
const SurfaceMaterialDescriptor matDesc = pv_materialDescriptorBuffer[pv_materialIndex];
BSDF bsdf(matDesc, surfPt, wls);
EDF edf(matDesc, surfPt, wls);
Vector3D dirOutLocal = surfPt.shadingFrame.toLocal(-asVector3D(sm_ray.direction));
// implicit light sampling
SampledSpectrum spEmittance = edf.evaluateEmittance();
if (spEmittance.hasNonZero()) {
SampledSpectrum Le = spEmittance * edf.evaluate(EDFQuery(), dirOutLocal);
float MISWeight = 1.0f;
if (!sm_payload.prevSampledType.isDelta() && sm_ray.ray_type != RayType::Primary) {
float bsdfPDF = sm_payload.prevDirPDF;
float dist2 = surfPt.calcSquaredDistance(asPoint3D(sm_ray.origin));
float lightPDF = pv_importance / getSumLightImportances() * hypAreaPDF * dist2 / ::fabs(dirOutLocal.z);
MISWeight = (bsdfPDF * bsdfPDF) / (lightPDF * lightPDF + bsdfPDF * bsdfPDF);
}
sm_payload.contribution += sm_payload.alpha * Le * MISWeight;
}
if (surfPt.atInfinity || sm_payload.maxLengthTerminate)
return;
// Russian roulette
float continueProb = std::fmin(sm_payload.alpha.importance(wls.selectedLambdaIndex()) / sm_payload.initImportance, 1.0f);
if (rng.getFloat0cTo1o() >= continueProb)
return;
sm_payload.alpha /= continueProb;
Normal3D geomNormalLocal = surfPt.shadingFrame.toLocal(surfPt.geometricNormal);
BSDFQuery fsQuery(dirOutLocal, geomNormalLocal, DirectionType::All(), wls);
// Next Event Estimation (explicit light sampling)
if (bsdf.hasNonDelta()) {
SurfaceLight light;
float lightProb;
float uPrim;
selectSurfaceLight(rng.getFloat0cTo1o(), &light, &lightProb, &uPrim);
SurfaceLightPosSample lpSample(uPrim, rng.getFloat0cTo1o(), rng.getFloat0cTo1o());
SurfaceLightPosQueryResult lpResult;
light.sample(lpSample, &lpResult);
const SurfaceMaterialDescriptor lightMatDesc = pv_materialDescriptorBuffer[lpResult.materialIndex];
EDF ledf(lightMatDesc, lpResult.surfPt, wls);
SampledSpectrum M = ledf.evaluateEmittance();
Vector3D shadowRayDir;
float squaredDistance;
float fractionalVisibility;
if (M.hasNonZero() && testVisibility(surfPt, lpResult.surfPt, &shadowRayDir, &squaredDistance, &fractionalVisibility)) {
Vector3D shadowRayDir_l = lpResult.surfPt.toLocal(-shadowRayDir);
Vector3D shadowRayDir_sn = surfPt.toLocal(shadowRayDir);
SampledSpectrum Le = M * ledf.evaluate(EDFQuery(), shadowRayDir_l);
float lightPDF = lightProb * lpResult.areaPDF;
SampledSpectrum fs = bsdf.evaluate(fsQuery, shadowRayDir_sn);
float cosLight = lpResult.surfPt.calcCosTerm(-shadowRayDir);
float bsdfPDF = bsdf.evaluatePDF(fsQuery, shadowRayDir_sn) * cosLight / squaredDistance;
float MISWeight = 1.0f;
if (!lpResult.posType.isDelta() && !std::isinf(lightPDF))
MISWeight = (lightPDF * lightPDF) / (lightPDF * lightPDF + bsdfPDF * bsdfPDF);
float G = fractionalVisibility * absDot(shadowRayDir_sn, geomNormalLocal) * cosLight / squaredDistance;
float scalarCoeff = G * MISWeight / lightPDF; // contributionCUDA
sm_payload.contribution += sm_payload.alpha * Le * fs * scalarCoeff;
}
}
BSDFSample sample(rng.getFloat0cTo1o(), rng.getFloat0cTo1o(), rng.getFloat0cTo1o());
BSDFQueryResult fsResult;
SampledSpectrum fs = bsdf.sample(fsQuery, sample, &fsResult);
if (fs == SampledSpectrum::Zero() || fsResult.dirPDF == 0.0f)
return;
if (fsResult.sampledType.isDispersive() && !wls.singleIsSelected()) {
fsResult.dirPDF /= SampledSpectrum::NumComponents();
wls.setSingleIsSelected();
}
float cosFactor = dot(fsResult.dirLocal, geomNormalLocal);
sm_payload.alpha *= fs * (::fabs(cosFactor) / fsResult.dirPDF);
Vector3D dirIn = surfPt.fromLocal(fsResult.dirLocal);
sm_payload.origin = offsetRayOrigin(surfPt.position, cosFactor > 0.0f ? surfPt.geometricNormal : -surfPt.geometricNormal);
sm_payload.direction = dirIn;
sm_payload.prevDirPDF = fsResult.dirPDF;
sm_payload.prevSampledType = fsResult.sampledType;
sm_payload.terminate = false;
}
// JP: Intersection/Bounding Box ProgramClosest Hit Program
// OptiXBVHLBVHAABB
// Miss Program
RT_PROGRAM void pathTracingMiss() {
if (pv_envLightDescriptor.importance == 0)
return;
Vector3D direction = asVector3D(sm_ray.direction);
float phi, theta;
direction.toPolarYUp(&theta, &phi);
float sinTheta, cosTheta;
VLR::sincos(theta, &sinTheta, &cosTheta);
Vector3D texCoord0Dir = Vector3D(-cosTheta, 0.0f, -sinTheta);
ReferenceFrame shadingFrame;
shadingFrame.x = texCoord0Dir;
shadingFrame.z = -direction;
shadingFrame.y = cross(shadingFrame.z, shadingFrame.x);
SurfacePoint surfPt;
surfPt.position = Point3D(direction.x, direction.y, direction.z);
surfPt.shadingFrame = shadingFrame;
surfPt.isPoint = false;
surfPt.atInfinity = true;
surfPt.geometricNormal = -direction;
surfPt.u = phi;
surfPt.v = theta;
surfPt.texCoord = TexCoord2D(phi / (2 * M_PIf), theta / M_PIf);
float hypAreaPDF = evaluateEnvironmentAreaPDF(phi, theta);
const SurfaceMaterialDescriptor matDesc = pv_materialDescriptorBuffer[pv_envLightDescriptor.body.asEnvironmentLight.materialIndex];
EDF edf(matDesc, surfPt, sm_payload.wls);
Vector3D dirOutLocal = surfPt.shadingFrame.toLocal(-asVector3D(sm_ray.direction));
// implicit light sampling
SampledSpectrum spEmittance = edf.evaluateEmittance();
if (spEmittance.hasNonZero()) {
SampledSpectrum Le = spEmittance * edf.evaluate(EDFQuery(), dirOutLocal);
float MISWeight = 1.0f;
if (!sm_payload.prevSampledType.isDelta() && sm_ray.ray_type != RayType::Primary) {
float bsdfPDF = sm_payload.prevDirPDF;
float dist2 = surfPt.calcSquaredDistance(asPoint3D(sm_ray.origin));
float lightPDF = pv_envLightDescriptor.importance / getSumLightImportances() * hypAreaPDF * dist2 / ::fabs(dirOutLocal.z);
MISWeight = (bsdfPDF * bsdfPDF) / (lightPDF * lightPDF + bsdfPDF * bsdfPDF);
}
sm_payload.contribution += sm_payload.alpha * Le * MISWeight;
}
}
// Common Ray Generation Program for All Camera Types
RT_PROGRAM void pathTracing() {
KernelRNG rng = pv_rngBuffer[sm_launchIndex];
optix::float2 p = make_float2(sm_launchIndex.x + rng.getFloat0cTo1o(), sm_launchIndex.y + rng.getFloat0cTo1o());
float selectWLPDF;
WavelengthSamples wls = WavelengthSamples::createWithEqualOffsets(rng.getFloat0cTo1o(), rng.getFloat0cTo1o(), &selectWLPDF);
LensPosSample We0Sample(rng.getFloat0cTo1o(), rng.getFloat0cTo1o());
LensPosQueryResult We0Result;
SampledSpectrum We0 = pv_progSampleLensPosition(wls, We0Sample, &We0Result);
IDFSample We1Sample(p.x / pv_imageSize.x, p.y / pv_imageSize.y);
IDFQueryResult We1Result;
SampledSpectrum We1 = pv_progSampleIDF(We0Result.surfPt, wls, We1Sample, &We1Result);
Vector3D rayDir = We0Result.surfPt.fromLocal(We1Result.dirLocal);
SampledSpectrum alpha = (We0 * We1) * (We0Result.surfPt.calcCosTerm(rayDir) / (We0Result.areaPDF * We1Result.dirPDF * selectWLPDF));
optix::Ray ray = optix::make_Ray(asOptiXType(We0Result.surfPt.position), asOptiXType(rayDir), RayType::Primary, 0.0f, FLT_MAX);
Payload payload;
payload.maxLengthTerminate = false;
payload.rng = rng;
payload.initImportance = alpha.importance(wls.selectedLambdaIndex());
payload.wls = wls;
payload.alpha = alpha;
payload.contribution = SampledSpectrum::Zero();
const uint32_t MaxPathLength = 25;
uint32_t pathLength = 0;
while (true) {
payload.terminate = true;
++pathLength;
if (pathLength >= MaxPathLength)
payload.maxLengthTerminate = true;
rtTrace(pv_topGroup, ray, payload);
if (payload.terminate)
break;
VLRAssert(pathLength < MaxPathLength, "Path should be terminated... Something went wrong...");
ray = optix::make_Ray(asOptiXType(payload.origin), asOptiXType(payload.direction), RayType::Scattered, 0.0f, FLT_MAX);
}
pv_rngBuffer[sm_launchIndex] = payload.rng;
if (!payload.contribution.allFinite()) {
vlrprintf("Pass %u, (%u, %u): Not a finite value.\n", pv_numAccumFrames, sm_launchIndex.x, sm_launchIndex.y);
return;
}
if (pv_numAccumFrames == 1)
pv_outputBuffer[sm_launchIndex].reset();
pv_outputBuffer[sm_launchIndex].add(wls, payload.contribution);
}
// Exception Program
RT_PROGRAM void exception() {
//uint32_t code = rtGetExceptionCode();
rtPrintExceptionDetails();
}
}
| 39c0d33a54e8950d53a45e7f4e24341f90b36faf.cu | #include "light_transport_common.cuh"
namespace VLR {
// Context-scope Variables
rtDeclareVariable(optix::uint2, pv_imageSize, , );
rtDeclareVariable(uint32_t, pv_numAccumFrames, , );
rtDeclareVariable(ProgSigSampleLensPosition, pv_progSampleLensPosition, , );
rtDeclareVariable(ProgSigSampleIDF, pv_progSampleIDF, , );
rtBuffer<KernelRNG, 2> pv_rngBuffer;
rtBuffer<SpectrumStorage, 2> pv_outputBuffer;
// Common Closest Hit Program for All Primitive Types and Materials
RT_PROGRAM void pathTracingIteration() {
KernelRNG &rng = sm_payload.rng;
WavelengthSamples &wls = sm_payload.wls;
SurfacePoint surfPt;
float hypAreaPDF;
calcSurfacePoint(&surfPt, &hypAreaPDF);
const SurfaceMaterialDescriptor matDesc = pv_materialDescriptorBuffer[pv_materialIndex];
BSDF bsdf(matDesc, surfPt, wls);
EDF edf(matDesc, surfPt, wls);
Vector3D dirOutLocal = surfPt.shadingFrame.toLocal(-asVector3D(sm_ray.direction));
// implicit light sampling
SampledSpectrum spEmittance = edf.evaluateEmittance();
if (spEmittance.hasNonZero()) {
SampledSpectrum Le = spEmittance * edf.evaluate(EDFQuery(), dirOutLocal);
float MISWeight = 1.0f;
if (!sm_payload.prevSampledType.isDelta() && sm_ray.ray_type != RayType::Primary) {
float bsdfPDF = sm_payload.prevDirPDF;
float dist2 = surfPt.calcSquaredDistance(asPoint3D(sm_ray.origin));
float lightPDF = pv_importance / getSumLightImportances() * hypAreaPDF * dist2 / std::fabs(dirOutLocal.z);
MISWeight = (bsdfPDF * bsdfPDF) / (lightPDF * lightPDF + bsdfPDF * bsdfPDF);
}
sm_payload.contribution += sm_payload.alpha * Le * MISWeight;
}
if (surfPt.atInfinity || sm_payload.maxLengthTerminate)
return;
// Russian roulette
float continueProb = std::fmin(sm_payload.alpha.importance(wls.selectedLambdaIndex()) / sm_payload.initImportance, 1.0f);
if (rng.getFloat0cTo1o() >= continueProb)
return;
sm_payload.alpha /= continueProb;
Normal3D geomNormalLocal = surfPt.shadingFrame.toLocal(surfPt.geometricNormal);
BSDFQuery fsQuery(dirOutLocal, geomNormalLocal, DirectionType::All(), wls);
// Next Event Estimation (explicit light sampling)
if (bsdf.hasNonDelta()) {
SurfaceLight light;
float lightProb;
float uPrim;
selectSurfaceLight(rng.getFloat0cTo1o(), &light, &lightProb, &uPrim);
SurfaceLightPosSample lpSample(uPrim, rng.getFloat0cTo1o(), rng.getFloat0cTo1o());
SurfaceLightPosQueryResult lpResult;
light.sample(lpSample, &lpResult);
const SurfaceMaterialDescriptor lightMatDesc = pv_materialDescriptorBuffer[lpResult.materialIndex];
EDF ledf(lightMatDesc, lpResult.surfPt, wls);
SampledSpectrum M = ledf.evaluateEmittance();
Vector3D shadowRayDir;
float squaredDistance;
float fractionalVisibility;
if (M.hasNonZero() && testVisibility(surfPt, lpResult.surfPt, &shadowRayDir, &squaredDistance, &fractionalVisibility)) {
Vector3D shadowRayDir_l = lpResult.surfPt.toLocal(-shadowRayDir);
Vector3D shadowRayDir_sn = surfPt.toLocal(shadowRayDir);
SampledSpectrum Le = M * ledf.evaluate(EDFQuery(), shadowRayDir_l);
float lightPDF = lightProb * lpResult.areaPDF;
SampledSpectrum fs = bsdf.evaluate(fsQuery, shadowRayDir_sn);
float cosLight = lpResult.surfPt.calcCosTerm(-shadowRayDir);
float bsdfPDF = bsdf.evaluatePDF(fsQuery, shadowRayDir_sn) * cosLight / squaredDistance;
float MISWeight = 1.0f;
if (!lpResult.posType.isDelta() && !std::isinf(lightPDF))
MISWeight = (lightPDF * lightPDF) / (lightPDF * lightPDF + bsdfPDF * bsdfPDF);
float G = fractionalVisibility * absDot(shadowRayDir_sn, geomNormalLocal) * cosLight / squaredDistance;
float scalarCoeff = G * MISWeight / lightPDF; // 直接contributionの計算式に入れるとCUDAのバグなのかおかしな結果になる。
sm_payload.contribution += sm_payload.alpha * Le * fs * scalarCoeff;
}
}
BSDFSample sample(rng.getFloat0cTo1o(), rng.getFloat0cTo1o(), rng.getFloat0cTo1o());
BSDFQueryResult fsResult;
SampledSpectrum fs = bsdf.sample(fsQuery, sample, &fsResult);
if (fs == SampledSpectrum::Zero() || fsResult.dirPDF == 0.0f)
return;
if (fsResult.sampledType.isDispersive() && !wls.singleIsSelected()) {
fsResult.dirPDF /= SampledSpectrum::NumComponents();
wls.setSingleIsSelected();
}
float cosFactor = dot(fsResult.dirLocal, geomNormalLocal);
sm_payload.alpha *= fs * (std::fabs(cosFactor) / fsResult.dirPDF);
Vector3D dirIn = surfPt.fromLocal(fsResult.dirLocal);
sm_payload.origin = offsetRayOrigin(surfPt.position, cosFactor > 0.0f ? surfPt.geometricNormal : -surfPt.geometricNormal);
sm_payload.direction = dirIn;
sm_payload.prevDirPDF = fsResult.dirPDF;
sm_payload.prevSampledType = fsResult.sampledType;
sm_payload.terminate = false;
}
// JP: 本当は無限大の球のIntersection/Bounding Box Programを使用して環境光に関する処理もClosest Hit Programで統一的に行いたい。
// が、OptiXのBVHビルダーがLBVHベースなので無限大のAABBを生成するのは危険。
// 仕方なくMiss Programで環境光を処理する。
RT_PROGRAM void pathTracingMiss() {
if (pv_envLightDescriptor.importance == 0)
return;
Vector3D direction = asVector3D(sm_ray.direction);
float phi, theta;
direction.toPolarYUp(&theta, &phi);
float sinTheta, cosTheta;
VLR::sincos(theta, &sinTheta, &cosTheta);
Vector3D texCoord0Dir = Vector3D(-cosTheta, 0.0f, -sinTheta);
ReferenceFrame shadingFrame;
shadingFrame.x = texCoord0Dir;
shadingFrame.z = -direction;
shadingFrame.y = cross(shadingFrame.z, shadingFrame.x);
SurfacePoint surfPt;
surfPt.position = Point3D(direction.x, direction.y, direction.z);
surfPt.shadingFrame = shadingFrame;
surfPt.isPoint = false;
surfPt.atInfinity = true;
surfPt.geometricNormal = -direction;
surfPt.u = phi;
surfPt.v = theta;
surfPt.texCoord = TexCoord2D(phi / (2 * M_PIf), theta / M_PIf);
float hypAreaPDF = evaluateEnvironmentAreaPDF(phi, theta);
const SurfaceMaterialDescriptor matDesc = pv_materialDescriptorBuffer[pv_envLightDescriptor.body.asEnvironmentLight.materialIndex];
EDF edf(matDesc, surfPt, sm_payload.wls);
Vector3D dirOutLocal = surfPt.shadingFrame.toLocal(-asVector3D(sm_ray.direction));
// implicit light sampling
SampledSpectrum spEmittance = edf.evaluateEmittance();
if (spEmittance.hasNonZero()) {
SampledSpectrum Le = spEmittance * edf.evaluate(EDFQuery(), dirOutLocal);
float MISWeight = 1.0f;
if (!sm_payload.prevSampledType.isDelta() && sm_ray.ray_type != RayType::Primary) {
float bsdfPDF = sm_payload.prevDirPDF;
float dist2 = surfPt.calcSquaredDistance(asPoint3D(sm_ray.origin));
float lightPDF = pv_envLightDescriptor.importance / getSumLightImportances() * hypAreaPDF * dist2 / std::fabs(dirOutLocal.z);
MISWeight = (bsdfPDF * bsdfPDF) / (lightPDF * lightPDF + bsdfPDF * bsdfPDF);
}
sm_payload.contribution += sm_payload.alpha * Le * MISWeight;
}
}
// Common Ray Generation Program for All Camera Types
RT_PROGRAM void pathTracing() {
KernelRNG rng = pv_rngBuffer[sm_launchIndex];
optix::float2 p = make_float2(sm_launchIndex.x + rng.getFloat0cTo1o(), sm_launchIndex.y + rng.getFloat0cTo1o());
float selectWLPDF;
WavelengthSamples wls = WavelengthSamples::createWithEqualOffsets(rng.getFloat0cTo1o(), rng.getFloat0cTo1o(), &selectWLPDF);
LensPosSample We0Sample(rng.getFloat0cTo1o(), rng.getFloat0cTo1o());
LensPosQueryResult We0Result;
SampledSpectrum We0 = pv_progSampleLensPosition(wls, We0Sample, &We0Result);
IDFSample We1Sample(p.x / pv_imageSize.x, p.y / pv_imageSize.y);
IDFQueryResult We1Result;
SampledSpectrum We1 = pv_progSampleIDF(We0Result.surfPt, wls, We1Sample, &We1Result);
Vector3D rayDir = We0Result.surfPt.fromLocal(We1Result.dirLocal);
SampledSpectrum alpha = (We0 * We1) * (We0Result.surfPt.calcCosTerm(rayDir) / (We0Result.areaPDF * We1Result.dirPDF * selectWLPDF));
optix::Ray ray = optix::make_Ray(asOptiXType(We0Result.surfPt.position), asOptiXType(rayDir), RayType::Primary, 0.0f, FLT_MAX);
Payload payload;
payload.maxLengthTerminate = false;
payload.rng = rng;
payload.initImportance = alpha.importance(wls.selectedLambdaIndex());
payload.wls = wls;
payload.alpha = alpha;
payload.contribution = SampledSpectrum::Zero();
const uint32_t MaxPathLength = 25;
uint32_t pathLength = 0;
while (true) {
payload.terminate = true;
++pathLength;
if (pathLength >= MaxPathLength)
payload.maxLengthTerminate = true;
rtTrace(pv_topGroup, ray, payload);
if (payload.terminate)
break;
VLRAssert(pathLength < MaxPathLength, "Path should be terminated... Something went wrong...");
ray = optix::make_Ray(asOptiXType(payload.origin), asOptiXType(payload.direction), RayType::Scattered, 0.0f, FLT_MAX);
}
pv_rngBuffer[sm_launchIndex] = payload.rng;
if (!payload.contribution.allFinite()) {
vlrprintf("Pass %u, (%u, %u): Not a finite value.\n", pv_numAccumFrames, sm_launchIndex.x, sm_launchIndex.y);
return;
}
if (pv_numAccumFrames == 1)
pv_outputBuffer[sm_launchIndex].reset();
pv_outputBuffer[sm_launchIndex].add(wls, payload.contribution);
}
// Exception Program
RT_PROGRAM void exception() {
//uint32_t code = rtGetExceptionCode();
rtPrintExceptionDetails();
}
}
|
d211f1cd43ec6f93cbc42e15b5d1409c32857cbe.hip | // !!! This is a file automatically generated by hipify!!!
#include <ATen/ATen.h>
#include <ATen/AccumulateType.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/hip/Exceptions.h>
// Another possibility:
// #include <torch/all.h>
#include <assert.h>
#include "type_shim.h"
#include "multi_tensor_apply.cuh"
#define BLOCK_SIZE 512
#define ILP 4
typedef enum{
MOMENT_MODE_0 =0, // L2 regularization mode
MOMENT_MODE_1 =1 // Decoupled weight decay mode
} adamMode_t;
std::tuple<at::Tensor, at::Tensor> multi_tensor_l2norm_cuda(
int chunk_size,
at::Tensor noop_flag,
std::vector<std::vector<at::Tensor>> tensor_lists,
at::optional<bool> per_tensor_python);
using MATH_T = float;
template<typename T>
struct LANSStage1Functor
{
__device__ __forceinline__ void operator()(
int chunk_size,
volatile int* noop_gmem,
TensorListMetadata<5>& tl,
const float beta1,
const float beta2,
const float beta3,
const float beta1_correction,
const float beta2_correction,
const float epsilon,
adamMode_t mode,
const float decay,
float* per_tensor_grad_norm,
bool normalize_grad)
{
// I'd like this kernel to propagate infs/nans.
// if(*noop_gmem == 1)
// return;
int tensor_loc = tl.block_to_tensor[blockIdx.x];
int tensor_num = tl.start_tensor_this_launch + tensor_loc;
int chunk_idx = tl.block_to_chunk[blockIdx.x];
int n = tl.sizes[tensor_loc];
float grad_norm = per_tensor_grad_norm[tensor_num];
T* g = (T*)tl.addresses[0][tensor_loc];
g += chunk_idx*chunk_size;
T* q = (T*)tl.addresses[1][tensor_loc];
q += chunk_idx*chunk_size;
T* p = (T*)tl.addresses[2][tensor_loc];
p += chunk_idx*chunk_size;
T* m = (T*)tl.addresses[3][tensor_loc];
m += chunk_idx*chunk_size;
T* v = (T*)tl.addresses[4][tensor_loc];
v += chunk_idx*chunk_size;
n -= chunk_idx*chunk_size;
// see note in multi_tensor_scale_kernel.cu
for(int i_start = 0;
i_start < n && i_start < chunk_size;
i_start += blockDim.x*ILP)
{
MATH_T r_g[ILP];
MATH_T r_q[ILP];
MATH_T r_p[ILP];
MATH_T r_m[ILP];
MATH_T r_v[ILP];
#pragma unroll
for(int ii = 0; ii < ILP; ii++)
{
int i = i_start + threadIdx.x + ii*blockDim.x;
if(i < n && i < chunk_size)
{
r_g[ii] = g[i];
r_q[ii] = q[i];
// special ?optimization? for lans stage 1
if (decay == 0) {
r_p[ii] = MATH_T(0);
}
else {
r_p[ii] = p[i];
}
r_m[ii] = m[i];
r_v[ii] = v[i];
} else {
r_g[ii] = MATH_T(0);
r_q[ii] = MATH_T(0);
r_p[ii] = MATH_T(0);
r_m[ii] = MATH_T(0);
r_v[ii] = MATH_T(0);
}
}
#pragma unroll
for(int ii = 0; ii < ILP; ii++)
{
MATH_T scaled_grad = r_g[ii];
if (normalize_grad && grad_norm != 0.0f) {
scaled_grad /= (grad_norm + epsilon);
}
if (mode == MOMENT_MODE_0) {
// L2 on scaled grad
scaled_grad = scaled_grad + decay*r_p[ii];
r_m[ii] = r_m[ii] * beta1 + beta3 * scaled_grad;
r_v[ii] = r_v[ii] * beta2 + (1-beta2) * scaled_grad * scaled_grad;
MATH_T next_m_unbiased = r_m[ii] / beta1_correction;
MATH_T next_v_unbiased = r_v[ii] / beta2_correction;
MATH_T denom = sqrtf(next_v_unbiased) + epsilon;
r_p[ii] = next_m_unbiased / denom;
r_q[ii] = scaled_grad / denom;
}
else {
r_m[ii] = r_m[ii] * beta1 + beta3 * scaled_grad;
r_v[ii] = r_v[ii] * beta2 + (1-beta2) * scaled_grad * scaled_grad;
MATH_T next_m_unbiased = r_m[ii] / beta1_correction;
MATH_T next_v_unbiased = r_v[ii] / beta2_correction;
MATH_T denom = sqrtf(next_v_unbiased) + epsilon;
MATH_T scaled_p = decay * r_p[ii];
r_p[ii] = (next_m_unbiased/denom) + scaled_p;
r_q[ii] = (scaled_grad/denom) + scaled_p;
}
}
#pragma unroll
for(int ii = 0; ii < ILP; ii++)
{
int i = i_start + threadIdx.x + ii*blockDim.x;
if(i < n && i < chunk_size)
{
g[i] = r_p[ii];
q[i] = r_q[ii];
m[i] = r_m[ii];
v[i] = r_v[ii];
}
}
}
}
};
// Step 2 reads in 'update' value and per-tensor param_norm and update_norm.
// It computes new parameter value.
template<typename T>
struct LANSStage2Functor
{
__device__ __forceinline__ void operator()(
int chunk_size,
volatile int* noop_gmem,
TensorListMetadata<3>& tl,
const float beta1,
const float beta3,
const float* per_tensor_param_norm,
const float* per_tensor_update_m_norm,
const float* per_tensor_update_g_norm,
const float learning_rate)
{
// I'd like this kernel to propagate infs/nans.
// if(*noop_gmem == 1)
// return;
int tensor_loc = tl.block_to_tensor[blockIdx.x];
int tensor_num = tl.start_tensor_this_launch + tensor_loc;
int chunk_idx = tl.block_to_chunk[blockIdx.x];
int n = tl.sizes[tensor_loc];
float param_norm = per_tensor_param_norm[tensor_num];
float update_m_norm = per_tensor_update_m_norm[tensor_num];
float update_g_norm = per_tensor_update_g_norm[tensor_num];
MATH_T ratio_m = (update_m_norm != 0.0f && param_norm != 0.0f) ? learning_rate * (param_norm / update_m_norm) : learning_rate;
MATH_T ratio_g = (update_g_norm != 0.0f && param_norm != 0.0f) ? learning_rate * (param_norm / update_g_norm) : learning_rate;
ratio_m *= beta1;
ratio_g *= beta3;
T* update_m = (T*)tl.addresses[0][tensor_loc];
update_m += chunk_idx*chunk_size;
T* update_g = (T*)tl.addresses[1][tensor_loc];
update_g += chunk_idx*chunk_size;
T* p = (T*)tl.addresses[2][tensor_loc];
p += chunk_idx*chunk_size;
n -= chunk_idx*chunk_size;
for(int i_start = 0;
i_start < n && i_start < chunk_size;
i_start += blockDim.x*ILP)
{
MATH_T r_p[ILP];
MATH_T r_update_m[ILP];
MATH_T r_update_g[ILP];
#pragma unroll
for(int ii = 0; ii < ILP; ii++)
{
int i = i_start + threadIdx.x + ii*blockDim.x;
if(i < n && i < chunk_size)
{
r_p[ii] = p[i];
r_update_m[ii] = update_m[i];
r_update_g[ii] = update_g[i];
}
}
#pragma unroll
for(int ii = 0; ii < ILP; ii++)
{
r_p[ii] = r_p[ii] - (ratio_m * r_update_m[ii]) - (ratio_g * r_update_g[ii]);
}
#pragma unroll
for(int ii = 0; ii < ILP; ii++)
{
int i = i_start + threadIdx.x + ii*blockDim.x;
if(i < n && i < chunk_size)
{
p[i] = r_p[ii];
}
}
}
}
};
void multi_tensor_lans_cuda(
int chunk_size,
at::Tensor noop_flag,
std::vector<std::vector<at::Tensor>> tensor_lists,
const float lr,
const float beta1,
const float beta2,
const float epsilon,
const int step,
const int bias_correction,
const float weight_decay,
const int grad_averaging,
const int mode,
const bool normalize_grad)
{
using namespace at;
// Master weight and 32bit momentum(potentially changing) is not handled by this
// So we assume every tensor are all in the same type
// Handle bias correction mode
float bias_correction1 = 1.0f, bias_correction2 = 1.0f;
if (bias_correction == 1) {
bias_correction1 = 1 - ::pow(beta1, step);
bias_correction2 = 1 - ::pow(beta2, step);
}
// Handle grad averaging mode
float beta3 = 1.0f;
if (grad_averaging == 1) beta3 = 1 - beta1;
std::vector<std::vector<at::Tensor>> grad_list(tensor_lists.begin(), tensor_lists.begin()+1);
std::vector<std::vector<at::Tensor>> param_list(tensor_lists.begin()+2, tensor_lists.begin()+3);
// Compute per-layer grad norm
auto grad_norm_tuple = multi_tensor_l2norm_cuda(chunk_size, noop_flag, grad_list, true);
// Compute per tensor param norm
auto param_norm_tuple = multi_tensor_l2norm_cuda(chunk_size, noop_flag, param_list, true);
// We now in-place modify grad to store update before compute its norm
// Generally this is not a issue since people modify grad in step() method all the time
// We can also grab list of empty tensor to avoid this, but I'd like to save space/cpu code
DISPATCH_FLOAT_AND_HALF(tensor_lists[0][0].scalar_type(), 0, "lans_stage_1",
multi_tensor_apply<5>(
BLOCK_SIZE,
chunk_size,
noop_flag,
tensor_lists,
LANSStage1Functor<scalar_t_0>(),
beta1,
beta2,
beta3, // 1-beta1 or 1 depends on averaging mode
bias_correction1,
bias_correction2,
epsilon,
(adamMode_t) mode,
weight_decay,
std::get<1>(grad_norm_tuple).DATA_PTR<float>(),
normalize_grad); )
// Compute update norms
auto update_m_norm_tuple = multi_tensor_l2norm_cuda(chunk_size, noop_flag, grad_list, true);
std::vector<std::vector<at::Tensor>> q_list(tensor_lists.begin()+1, tensor_lists.begin()+2);
auto update_g_norm_tuple = multi_tensor_l2norm_cuda(chunk_size, noop_flag, q_list, true);
std::vector<std::vector<at::Tensor>> grad_q_param_list(tensor_lists.begin(), tensor_lists.begin()+3);
DISPATCH_FLOAT_AND_HALF(tensor_lists[0][0].scalar_type(), 0, "lans_stage_2",
multi_tensor_apply<3>(
BLOCK_SIZE,
chunk_size,
noop_flag,
grad_q_param_list,
LANSStage2Functor<scalar_t_0>(),
beta1,
beta3,
std::get<1>(param_norm_tuple).DATA_PTR<float>(),
std::get<1>(update_m_norm_tuple).DATA_PTR<float>(),
std::get<1>(update_g_norm_tuple).DATA_PTR<float>(),
lr); )
AT_CUDA_CHECK(hipGetLastError());
}
| d211f1cd43ec6f93cbc42e15b5d1409c32857cbe.cu | #include <ATen/ATen.h>
#include <ATen/AccumulateType.h>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/cuda/Exceptions.h>
// Another possibility:
// #include <torch/all.h>
#include <assert.h>
#include "type_shim.h"
#include "multi_tensor_apply.cuh"
#define BLOCK_SIZE 512
#define ILP 4
typedef enum{
MOMENT_MODE_0 =0, // L2 regularization mode
MOMENT_MODE_1 =1 // Decoupled weight decay mode
} adamMode_t;
std::tuple<at::Tensor, at::Tensor> multi_tensor_l2norm_cuda(
int chunk_size,
at::Tensor noop_flag,
std::vector<std::vector<at::Tensor>> tensor_lists,
at::optional<bool> per_tensor_python);
using MATH_T = float;
template<typename T>
struct LANSStage1Functor
{
__device__ __forceinline__ void operator()(
int chunk_size,
volatile int* noop_gmem,
TensorListMetadata<5>& tl,
const float beta1,
const float beta2,
const float beta3,
const float beta1_correction,
const float beta2_correction,
const float epsilon,
adamMode_t mode,
const float decay,
float* per_tensor_grad_norm,
bool normalize_grad)
{
// I'd like this kernel to propagate infs/nans.
// if(*noop_gmem == 1)
// return;
int tensor_loc = tl.block_to_tensor[blockIdx.x];
int tensor_num = tl.start_tensor_this_launch + tensor_loc;
int chunk_idx = tl.block_to_chunk[blockIdx.x];
int n = tl.sizes[tensor_loc];
float grad_norm = per_tensor_grad_norm[tensor_num];
T* g = (T*)tl.addresses[0][tensor_loc];
g += chunk_idx*chunk_size;
T* q = (T*)tl.addresses[1][tensor_loc];
q += chunk_idx*chunk_size;
T* p = (T*)tl.addresses[2][tensor_loc];
p += chunk_idx*chunk_size;
T* m = (T*)tl.addresses[3][tensor_loc];
m += chunk_idx*chunk_size;
T* v = (T*)tl.addresses[4][tensor_loc];
v += chunk_idx*chunk_size;
n -= chunk_idx*chunk_size;
// see note in multi_tensor_scale_kernel.cu
for(int i_start = 0;
i_start < n && i_start < chunk_size;
i_start += blockDim.x*ILP)
{
MATH_T r_g[ILP];
MATH_T r_q[ILP];
MATH_T r_p[ILP];
MATH_T r_m[ILP];
MATH_T r_v[ILP];
#pragma unroll
for(int ii = 0; ii < ILP; ii++)
{
int i = i_start + threadIdx.x + ii*blockDim.x;
if(i < n && i < chunk_size)
{
r_g[ii] = g[i];
r_q[ii] = q[i];
// special ?optimization? for lans stage 1
if (decay == 0) {
r_p[ii] = MATH_T(0);
}
else {
r_p[ii] = p[i];
}
r_m[ii] = m[i];
r_v[ii] = v[i];
} else {
r_g[ii] = MATH_T(0);
r_q[ii] = MATH_T(0);
r_p[ii] = MATH_T(0);
r_m[ii] = MATH_T(0);
r_v[ii] = MATH_T(0);
}
}
#pragma unroll
for(int ii = 0; ii < ILP; ii++)
{
MATH_T scaled_grad = r_g[ii];
if (normalize_grad && grad_norm != 0.0f) {
scaled_grad /= (grad_norm + epsilon);
}
if (mode == MOMENT_MODE_0) {
// L2 on scaled grad
scaled_grad = scaled_grad + decay*r_p[ii];
r_m[ii] = r_m[ii] * beta1 + beta3 * scaled_grad;
r_v[ii] = r_v[ii] * beta2 + (1-beta2) * scaled_grad * scaled_grad;
MATH_T next_m_unbiased = r_m[ii] / beta1_correction;
MATH_T next_v_unbiased = r_v[ii] / beta2_correction;
MATH_T denom = sqrtf(next_v_unbiased) + epsilon;
r_p[ii] = next_m_unbiased / denom;
r_q[ii] = scaled_grad / denom;
}
else {
r_m[ii] = r_m[ii] * beta1 + beta3 * scaled_grad;
r_v[ii] = r_v[ii] * beta2 + (1-beta2) * scaled_grad * scaled_grad;
MATH_T next_m_unbiased = r_m[ii] / beta1_correction;
MATH_T next_v_unbiased = r_v[ii] / beta2_correction;
MATH_T denom = sqrtf(next_v_unbiased) + epsilon;
MATH_T scaled_p = decay * r_p[ii];
r_p[ii] = (next_m_unbiased/denom) + scaled_p;
r_q[ii] = (scaled_grad/denom) + scaled_p;
}
}
#pragma unroll
for(int ii = 0; ii < ILP; ii++)
{
int i = i_start + threadIdx.x + ii*blockDim.x;
if(i < n && i < chunk_size)
{
g[i] = r_p[ii];
q[i] = r_q[ii];
m[i] = r_m[ii];
v[i] = r_v[ii];
}
}
}
}
};
// Step 2 reads in 'update' value and per-tensor param_norm and update_norm.
// It computes new parameter value.
template<typename T>
struct LANSStage2Functor
{
__device__ __forceinline__ void operator()(
int chunk_size,
volatile int* noop_gmem,
TensorListMetadata<3>& tl,
const float beta1,
const float beta3,
const float* per_tensor_param_norm,
const float* per_tensor_update_m_norm,
const float* per_tensor_update_g_norm,
const float learning_rate)
{
// I'd like this kernel to propagate infs/nans.
// if(*noop_gmem == 1)
// return;
int tensor_loc = tl.block_to_tensor[blockIdx.x];
int tensor_num = tl.start_tensor_this_launch + tensor_loc;
int chunk_idx = tl.block_to_chunk[blockIdx.x];
int n = tl.sizes[tensor_loc];
float param_norm = per_tensor_param_norm[tensor_num];
float update_m_norm = per_tensor_update_m_norm[tensor_num];
float update_g_norm = per_tensor_update_g_norm[tensor_num];
MATH_T ratio_m = (update_m_norm != 0.0f && param_norm != 0.0f) ? learning_rate * (param_norm / update_m_norm) : learning_rate;
MATH_T ratio_g = (update_g_norm != 0.0f && param_norm != 0.0f) ? learning_rate * (param_norm / update_g_norm) : learning_rate;
ratio_m *= beta1;
ratio_g *= beta3;
T* update_m = (T*)tl.addresses[0][tensor_loc];
update_m += chunk_idx*chunk_size;
T* update_g = (T*)tl.addresses[1][tensor_loc];
update_g += chunk_idx*chunk_size;
T* p = (T*)tl.addresses[2][tensor_loc];
p += chunk_idx*chunk_size;
n -= chunk_idx*chunk_size;
for(int i_start = 0;
i_start < n && i_start < chunk_size;
i_start += blockDim.x*ILP)
{
MATH_T r_p[ILP];
MATH_T r_update_m[ILP];
MATH_T r_update_g[ILP];
#pragma unroll
for(int ii = 0; ii < ILP; ii++)
{
int i = i_start + threadIdx.x + ii*blockDim.x;
if(i < n && i < chunk_size)
{
r_p[ii] = p[i];
r_update_m[ii] = update_m[i];
r_update_g[ii] = update_g[i];
}
}
#pragma unroll
for(int ii = 0; ii < ILP; ii++)
{
r_p[ii] = r_p[ii] - (ratio_m * r_update_m[ii]) - (ratio_g * r_update_g[ii]);
}
#pragma unroll
for(int ii = 0; ii < ILP; ii++)
{
int i = i_start + threadIdx.x + ii*blockDim.x;
if(i < n && i < chunk_size)
{
p[i] = r_p[ii];
}
}
}
}
};
void multi_tensor_lans_cuda(
int chunk_size,
at::Tensor noop_flag,
std::vector<std::vector<at::Tensor>> tensor_lists,
const float lr,
const float beta1,
const float beta2,
const float epsilon,
const int step,
const int bias_correction,
const float weight_decay,
const int grad_averaging,
const int mode,
const bool normalize_grad)
{
using namespace at;
// Master weight and 32bit momentum(potentially changing) is not handled by this
// So we assume every tensor are all in the same type
// Handle bias correction mode
float bias_correction1 = 1.0f, bias_correction2 = 1.0f;
if (bias_correction == 1) {
bias_correction1 = 1 - std::pow(beta1, step);
bias_correction2 = 1 - std::pow(beta2, step);
}
// Handle grad averaging mode
float beta3 = 1.0f;
if (grad_averaging == 1) beta3 = 1 - beta1;
std::vector<std::vector<at::Tensor>> grad_list(tensor_lists.begin(), tensor_lists.begin()+1);
std::vector<std::vector<at::Tensor>> param_list(tensor_lists.begin()+2, tensor_lists.begin()+3);
// Compute per-layer grad norm
auto grad_norm_tuple = multi_tensor_l2norm_cuda(chunk_size, noop_flag, grad_list, true);
// Compute per tensor param norm
auto param_norm_tuple = multi_tensor_l2norm_cuda(chunk_size, noop_flag, param_list, true);
// We now in-place modify grad to store update before compute its norm
// Generally this is not a issue since people modify grad in step() method all the time
// We can also grab list of empty tensor to avoid this, but I'd like to save space/cpu code
DISPATCH_FLOAT_AND_HALF(tensor_lists[0][0].scalar_type(), 0, "lans_stage_1",
multi_tensor_apply<5>(
BLOCK_SIZE,
chunk_size,
noop_flag,
tensor_lists,
LANSStage1Functor<scalar_t_0>(),
beta1,
beta2,
beta3, // 1-beta1 or 1 depends on averaging mode
bias_correction1,
bias_correction2,
epsilon,
(adamMode_t) mode,
weight_decay,
std::get<1>(grad_norm_tuple).DATA_PTR<float>(),
normalize_grad); )
// Compute update norms
auto update_m_norm_tuple = multi_tensor_l2norm_cuda(chunk_size, noop_flag, grad_list, true);
std::vector<std::vector<at::Tensor>> q_list(tensor_lists.begin()+1, tensor_lists.begin()+2);
auto update_g_norm_tuple = multi_tensor_l2norm_cuda(chunk_size, noop_flag, q_list, true);
std::vector<std::vector<at::Tensor>> grad_q_param_list(tensor_lists.begin(), tensor_lists.begin()+3);
DISPATCH_FLOAT_AND_HALF(tensor_lists[0][0].scalar_type(), 0, "lans_stage_2",
multi_tensor_apply<3>(
BLOCK_SIZE,
chunk_size,
noop_flag,
grad_q_param_list,
LANSStage2Functor<scalar_t_0>(),
beta1,
beta3,
std::get<1>(param_norm_tuple).DATA_PTR<float>(),
std::get<1>(update_m_norm_tuple).DATA_PTR<float>(),
std::get<1>(update_g_norm_tuple).DATA_PTR<float>(),
lr); )
AT_CUDA_CHECK(cudaGetLastError());
}
|
f81cb55a77a1160800c306ceb7385f289fbe3d37.hip | // !!! This is a file automatically generated by hipify!!!
/* Modified by ycmd contributors */
/*
University of Illinois/NCSA
Open Source License
Copyright (c) 2007-2016 University of Illinois at Urbana-Champaign.
All rights reserved.
Developed by:
LLVM Team
University of Illinois at Urbana-Champaign
http://llvm.org
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal with
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do
so, subject to the following conditions:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimers.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimers in the
documentation and/or other materials provided with the distribution.
* Neither the names of the LLVM Team, University of Illinois at
Urbana-Champaign, nor the names of its contributors may be used to
endorse or promote products derived from this Software without specific
prior written permission.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE
SOFTWARE.
*/
#include "hip/hip_runtime.h"
__global__ void g1(int x) {}
template <typename T> void t1(T arg) {
hipLaunchKernelGGL(( g1), dim3(arg), dim3(arg), 0, 0, 1);
}
void h1(int x) {}
int h2(int x) { return 1; }
int main(void) {
hipLaunchKernelGGL(( g1), dim3(1), dim3(1), 0, 0, 42);
g1(42); // expected-error {{call to global function 'g1' not configured}}
g1<<<1>>>(42); // expected-error {{too few execution configuration arguments to kernel function call}}
hipLaunchKernelGGL(( g1), dim3(1), dim3(1), 0, 0, 0, 42); // expected-error {{too many execution configuration arguments to kernel function call}}
t1(1);
hipLaunchKernelGGL(( h1), dim3(1), dim3(1), 0, 0, 42); // expected-error {{kernel call to non-global function 'h1'}}
int (*fp)(int) = h2;
hipLaunchKernelGGL(( fp), dim3(1), dim3(1), 0, 0, 42); // expected-error {{must have void return type}}
hipLaunchKernelGGL(( g1), dim3(undeclared), dim3(1), 0, 0, 42); // expected-error {{use of undeclared identifier 'undeclared'}}
}
| f81cb55a77a1160800c306ceb7385f289fbe3d37.cu | /* Modified by ycmd contributors */
/*
University of Illinois/NCSA
Open Source License
Copyright (c) 2007-2016 University of Illinois at Urbana-Champaign.
All rights reserved.
Developed by:
LLVM Team
University of Illinois at Urbana-Champaign
http://llvm.org
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal with
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do
so, subject to the following conditions:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimers.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimers in the
documentation and/or other materials provided with the distribution.
* Neither the names of the LLVM Team, University of Illinois at
Urbana-Champaign, nor the names of its contributors may be used to
endorse or promote products derived from this Software without specific
prior written permission.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE
SOFTWARE.
*/
#include "cuda.h"
__global__ void g1(int x) {}
template <typename T> void t1(T arg) {
g1<<<arg, arg>>>(1);
}
void h1(int x) {}
int h2(int x) { return 1; }
int main(void) {
g1<<<1, 1>>>(42);
g1(42); // expected-error {{call to global function 'g1' not configured}}
g1<<<1>>>(42); // expected-error {{too few execution configuration arguments to kernel function call}}
g1<<<1, 1, 0, 0, 0>>>(42); // expected-error {{too many execution configuration arguments to kernel function call}}
t1(1);
h1<<<1, 1>>>(42); // expected-error {{kernel call to non-global function 'h1'}}
int (*fp)(int) = h2;
fp<<<1, 1>>>(42); // expected-error {{must have void return type}}
g1<<<undeclared, 1>>>(42); // expected-error {{use of undeclared identifier 'undeclared'}}
}
|
0961fd286810d605cba7f1969c51a48d8264f445.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2019-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column_device_view.cuh>
#include <cudf/detail/null_mask.cuh>
#include <cudf/detail/null_mask.hpp>
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/detail/utilities/cuda.cuh>
#include <cudf/detail/utilities/integer_utils.hpp>
#include <cudf/detail/utilities/vector_factories.hpp>
#include <cudf/null_mask.hpp>
#include <cudf/table/table_view.hpp>
#include <cudf/utilities/bit.hpp>
#include <cudf/utilities/error.hpp>
#include <cudf/utilities/span.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/device_buffer.hpp>
#include <rmm/device_scalar.hpp>
#include <rmm/device_uvector.hpp>
#include <rmm/mr/device/device_memory_resource.hpp>
#include <thrust/binary_search.h>
#include <thrust/copy.h>
#include <thrust/device_ptr.h>
#include <thrust/extrema.h>
#include <hipcub/hipcub.hpp>
#include <algorithm>
#include <numeric>
#include <type_traits>
namespace cudf {
size_type state_null_count(mask_state state, size_type size)
{
switch (state) {
case mask_state::UNALLOCATED: return 0;
case mask_state::UNINITIALIZED: return UNKNOWN_NULL_COUNT;
case mask_state::ALL_NULL: return size;
case mask_state::ALL_VALID: return 0;
default: CUDF_FAIL("Invalid null mask state.");
}
}
// Computes required allocation size of a bitmask
std::size_t bitmask_allocation_size_bytes(size_type number_of_bits, std::size_t padding_boundary)
{
CUDF_EXPECTS(padding_boundary > 0, "Invalid padding boundary");
auto necessary_bytes = cudf::util::div_rounding_up_safe<size_type>(number_of_bits, CHAR_BIT);
auto padded_bytes = padding_boundary * cudf::util::div_rounding_up_safe<size_type>(
necessary_bytes, padding_boundary);
return padded_bytes;
}
// Computes number of *actual* bitmask_type elements needed
size_type num_bitmask_words(size_type number_of_bits)
{
return cudf::util::div_rounding_up_safe<size_type>(number_of_bits,
detail::size_in_bits<bitmask_type>());
}
namespace detail {
// Create a device_buffer for a null mask
rmm::device_buffer create_null_mask(size_type size,
mask_state state,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource *mr)
{
size_type mask_size{0};
if (state != mask_state::UNALLOCATED) { mask_size = bitmask_allocation_size_bytes(size); }
rmm::device_buffer mask(mask_size, stream, mr);
if (state != mask_state::UNINITIALIZED) {
uint8_t fill_value = (state == mask_state::ALL_VALID) ? 0xff : 0x00;
CUDA_TRY(hipMemsetAsync(
static_cast<bitmask_type *>(mask.data()), fill_value, mask_size, stream.value()));
}
return mask;
}
namespace {
__global__ void set_null_mask_kernel(bitmask_type *__restrict__ destination,
size_type begin_bit,
size_type end_bit,
bool valid,
size_type number_of_mask_words)
{
auto x = destination + word_index(begin_bit);
const auto last_word = word_index(end_bit) - word_index(begin_bit);
bitmask_type fill_value = (valid == true) ? 0xffffffff : 0x00;
for (size_type destination_word_index = threadIdx.x + blockIdx.x * blockDim.x;
destination_word_index < number_of_mask_words;
destination_word_index += blockDim.x * gridDim.x) {
if (destination_word_index == 0 || destination_word_index == last_word) {
bitmask_type mask = ~bitmask_type{0};
if (destination_word_index == 0) {
mask = ~(set_least_significant_bits(intra_word_index(begin_bit)));
}
if (destination_word_index == last_word) {
mask = mask & set_least_significant_bits(intra_word_index(end_bit));
}
x[destination_word_index] =
(valid == true) ? x[destination_word_index] | mask : x[destination_word_index] & ~mask;
} else {
x[destination_word_index] = fill_value;
}
}
}
} // namespace
// Set pre-allocated null mask of given bit range [begin_bit, end_bit) to valid, if valid==true,
// or null, otherwise;
void set_null_mask(bitmask_type *bitmask,
size_type begin_bit,
size_type end_bit,
bool valid,
rmm::cuda_stream_view stream)
{
CUDF_FUNC_RANGE();
CUDF_EXPECTS(begin_bit >= 0, "Invalid range.");
CUDF_EXPECTS(begin_bit <= end_bit, "Invalid bit range.");
if (begin_bit == end_bit) return;
if (bitmask != nullptr) {
auto number_of_mask_words =
num_bitmask_words(end_bit) - begin_bit / detail::size_in_bits<bitmask_type>();
cudf::detail::grid_1d config(number_of_mask_words, 256);
hipLaunchKernelGGL(( set_null_mask_kernel), dim3(config.num_blocks), dim3(config.num_threads_per_block), 0, stream.value(),
static_cast<bitmask_type *>(bitmask), begin_bit, end_bit, valid, number_of_mask_words);
CHECK_CUDA(stream.value());
}
}
} // namespace detail
// Create a device_buffer for a null mask
rmm::device_buffer create_null_mask(size_type size,
mask_state state,
rmm::mr::device_memory_resource *mr)
{
return detail::create_null_mask(size, state, rmm::cuda_stream_default, mr);
}
// Set pre-allocated null mask of given bit range [begin_bit, end_bit) to valid, if valid==true,
// or null, otherwise;
void set_null_mask(bitmask_type *bitmask, size_type begin_bit, size_type end_bit, bool valid)
{
return detail::set_null_mask(bitmask, begin_bit, end_bit, valid);
}
namespace {
/**
* @brief Counts the number of non-zero bits in a bitmask in the range
* `[first_bit_index, last_bit_index]`.
*
* Expects `0 <= first_bit_index <= last_bit_index`.
*
* @param[in] bitmask The bitmask whose non-zero bits will be counted.
* @param[in] first_bit_index The index (inclusive) of the first bit to count
* @param[in] last_bit_index The index (inclusive) of the last bit to count
* @param[out] global_count The number of non-zero bits in the specified range
*/
template <size_type block_size>
__global__ void count_set_bits_kernel(bitmask_type const *bitmask,
size_type first_bit_index,
size_type last_bit_index,
size_type *global_count)
{
constexpr auto const word_size{detail::size_in_bits<bitmask_type>()};
auto const first_word_index{word_index(first_bit_index)};
auto const last_word_index{word_index(last_bit_index)};
auto const tid = threadIdx.x + blockIdx.x * blockDim.x;
auto thread_word_index = tid + first_word_index;
size_type thread_count{0};
// First, just count the bits in all words
while (thread_word_index <= last_word_index) {
thread_count += __popc(bitmask[thread_word_index]);
thread_word_index += blockDim.x * gridDim.x;
}
// Subtract any slack bits counted from the first and last word
// Two threads handle this -- one for first word, one for last
if (tid < 2) {
bool const first{tid == 0};
bool const last{not first};
size_type bit_index = (first) ? first_bit_index : last_bit_index;
size_type word_index = (first) ? first_word_index : last_word_index;
size_type num_slack_bits = bit_index % word_size;
if (last) { num_slack_bits = word_size - num_slack_bits - 1; }
if (num_slack_bits > 0) {
bitmask_type word = bitmask[word_index];
auto slack_mask = (first) ? set_least_significant_bits(num_slack_bits)
: set_most_significant_bits(num_slack_bits);
thread_count -= __popc(word & slack_mask);
}
}
using BlockReduce = hipcub::BlockReduce<size_type, block_size>;
__shared__ typename BlockReduce::TempStorage temp_storage;
size_type block_count{BlockReduce(temp_storage).Sum(thread_count)};
if (threadIdx.x == 0) { atomicAdd(global_count, block_count); }
}
/**
* For each range `[first_bit_indices[i], last_bit_indices[i])`
* (where 0 <= i < `num_ranges`), count the number of bits set outside the range
* in the boundary words (i.e. words that include either
* `first_bit_indices[i]'th` bit or `(last_bit_indices[i] - 1)'th` bit) and
* subtract the count from the range's null count.
*
* Expects `0 <= first_bit_indices[i] <= last_bit_indices[i]`.
*
* @param[in] bitmask The bitmask whose non-zero bits outside the range in the
* boundary words will be counted.
* @param[in] num_ranges The number of ranges
* @param[in] first_bit_indices The indices (inclusive) of the first bit in each
* range
* @param[in] last_bit_indices The indices (exclusive) of the last bit in each
* range
* @param[in,out] null_counts The number of non-zero bits in each range to be
* updated
*/
template <typename OffsetIterator, typename OutputIterator>
__global__ void subtract_set_bits_range_boundaries_kerenel(bitmask_type const *bitmask,
size_type num_ranges,
OffsetIterator first_bit_indices,
OffsetIterator last_bit_indices,
OutputIterator null_counts)
{
constexpr size_type const word_size_in_bits{detail::size_in_bits<bitmask_type>()};
cudf::size_type const tid = threadIdx.x + blockIdx.x * blockDim.x;
cudf::size_type range_id = tid;
while (range_id < num_ranges) {
size_type const first_bit_index = *(first_bit_indices + range_id);
size_type const last_bit_index = *(last_bit_indices + range_id);
size_type delta = 0;
size_type num_slack_bits = 0;
// compute delta due to the preceding bits in the first word in the range
num_slack_bits = intra_word_index(first_bit_index);
if (num_slack_bits > 0) {
bitmask_type word = bitmask[word_index(first_bit_index)];
bitmask_type slack_mask = set_least_significant_bits(num_slack_bits);
delta -= __popc(word & slack_mask);
}
// compute delta due to the following bits in the last word in the range
num_slack_bits = (last_bit_index % word_size_in_bits) == 0
? 0
: word_size_in_bits - intra_word_index(last_bit_index);
if (num_slack_bits > 0) {
bitmask_type word = bitmask[word_index(last_bit_index)];
bitmask_type slack_mask = set_most_significant_bits(num_slack_bits);
delta -= __popc(word & slack_mask);
}
size_type updated_null_count = *(null_counts + range_id) + delta;
*(null_counts + range_id) = updated_null_count;
range_id += blockDim.x * gridDim.x;
}
}
/**
* @brief Copies the bits starting at the specified offset from a source
* bitmask into the destination bitmask.
*
* Bit `i` in `destination` will be equal to bit `i + offset` from `source`.
*
* @param destination The mask to copy into
* @param source The mask to copy from
* @param source_begin_bit The offset into `source` from which to begin the copy
* @param source_end_bit The offset into `source` till which copying is done
* @param number_of_mask_words The number of `cudf::bitmask_type` words to copy
*/
// TODO: Also make binops test that uses offset in column_view
__global__ void copy_offset_bitmask(bitmask_type *__restrict__ destination,
bitmask_type const *__restrict__ source,
size_type source_begin_bit,
size_type source_end_bit,
size_type number_of_mask_words)
{
for (size_type destination_word_index = threadIdx.x + blockIdx.x * blockDim.x;
destination_word_index < number_of_mask_words;
destination_word_index += blockDim.x * gridDim.x) {
destination[destination_word_index] = detail::get_mask_offset_word(
source, destination_word_index, source_begin_bit, source_end_bit);
}
}
// convert [first_bit_index,last_bit_index) to
// [first_word_index,last_word_index)
struct to_word_index : public thrust::unary_function<size_type, size_type> {
const bool _inclusive = false;
size_type const *const _d_bit_indices = nullptr;
/**
* @brief Constructor of a functor that converts bit indices to bitmask word
* indices.
*
* @param[in] inclusive Flag that indicates whether bit indices are inclusive
* or exclusive.
* @param[in] d_bit_indices Pointer to an array of bit indices
*/
__host__ to_word_index(bool inclusive, size_type const *d_bit_indices)
: _inclusive(inclusive), _d_bit_indices(d_bit_indices)
{
}
__device__ size_type operator()(const size_type &i) const
{
auto bit_index = _d_bit_indices[i];
return word_index(bit_index) + ((_inclusive || intra_word_index(bit_index) == 0) ? 0 : 1);
}
};
} // namespace
namespace detail {
// Create a bitmask from a specific range
rmm::device_buffer copy_bitmask(bitmask_type const *mask,
size_type begin_bit,
size_type end_bit,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource *mr)
{
CUDF_FUNC_RANGE();
CUDF_EXPECTS(begin_bit >= 0, "Invalid range.");
CUDF_EXPECTS(begin_bit <= end_bit, "Invalid bit range.");
rmm::device_buffer dest_mask{};
auto num_bytes = bitmask_allocation_size_bytes(end_bit - begin_bit);
if ((mask == nullptr) || (num_bytes == 0)) { return dest_mask; }
if (begin_bit == 0) {
dest_mask = rmm::device_buffer{static_cast<void const *>(mask), num_bytes, stream, mr};
} else {
auto number_of_mask_words = num_bitmask_words(end_bit - begin_bit);
dest_mask = rmm::device_buffer{num_bytes, stream, mr};
cudf::detail::grid_1d config(number_of_mask_words, 256);
hipLaunchKernelGGL(( copy_offset_bitmask), dim3(config.num_blocks), dim3(config.num_threads_per_block), 0, stream.value(),
static_cast<bitmask_type *>(dest_mask.data()),
mask,
begin_bit,
end_bit,
number_of_mask_words);
CHECK_CUDA(stream.value());
}
return dest_mask;
}
// Create a bitmask from a column view
rmm::device_buffer copy_bitmask(column_view const &view,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource *mr)
{
CUDF_FUNC_RANGE();
rmm::device_buffer null_mask{0, stream, mr};
if (view.nullable()) {
null_mask =
copy_bitmask(view.null_mask(), view.offset(), view.offset() + view.size(), stream, mr);
}
return null_mask;
}
// Inplace Bitwise AND of the masks
void inplace_bitmask_and(device_span<bitmask_type> dest_mask,
host_span<bitmask_type const *> masks,
host_span<size_type const> begin_bits,
size_type mask_size,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource *mr)
{
inplace_bitmask_binop(
[] __device__(bitmask_type left, bitmask_type right) { return left & right; },
dest_mask,
masks,
begin_bits,
mask_size,
stream,
mr);
}
// Bitwise AND of the masks
rmm::device_buffer bitmask_and(host_span<bitmask_type const *> masks,
host_span<size_type const> begin_bits,
size_type mask_size,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource *mr)
{
return bitmask_binop(
[] __device__(bitmask_type left, bitmask_type right) { return left & right; },
masks,
begin_bits,
mask_size,
stream,
mr);
}
cudf::size_type count_set_bits(bitmask_type const *bitmask,
size_type start,
size_type stop,
rmm::cuda_stream_view stream = rmm::cuda_stream_default)
{
if (nullptr == bitmask) { return 0; }
CUDF_EXPECTS(start >= 0, "Invalid range.");
CUDF_EXPECTS(start <= stop, "Invalid bit range.");
std::size_t num_bits_to_count = stop - start;
if (num_bits_to_count == 0) { return 0; }
auto num_words = num_bitmask_words(num_bits_to_count);
constexpr size_type block_size{256};
cudf::detail::grid_1d grid(num_words, block_size);
rmm::device_scalar<size_type> non_zero_count(0, stream);
hipLaunchKernelGGL(( count_set_bits_kernel<block_size>)
, dim3(grid.num_blocks), dim3(grid.num_threads_per_block), 0, stream.value(),
bitmask, start, stop - 1, non_zero_count.data());
return non_zero_count.value(stream);
}
cudf::size_type count_unset_bits(bitmask_type const *bitmask,
size_type start,
size_type stop,
rmm::cuda_stream_view stream = rmm::cuda_stream_default)
{
if (nullptr == bitmask) { return 0; }
auto num_bits = (stop - start);
return (num_bits - detail::count_set_bits(bitmask, start, stop, stream));
}
std::vector<size_type> segmented_count_set_bits(bitmask_type const *bitmask,
host_span<size_type const> indices,
rmm::cuda_stream_view stream)
{
CUDF_EXPECTS(indices.size() % 2 == 0,
"Array of indices needs to have an even number of elements.");
for (size_t i = 0; i < indices.size() / 2; i++) {
auto begin = indices[i * 2];
auto end = indices[i * 2 + 1];
CUDF_EXPECTS(begin >= 0, "Starting index cannot be negative.");
CUDF_EXPECTS(end >= begin, "End index cannot be smaller than the starting index.");
}
if (indices.empty()) {
return std::vector<size_type>{};
} else if (bitmask == nullptr) {
std::vector<size_type> ret(indices.size() / 2);
for (size_t i = 0; i < indices.size() / 2; i++) {
ret[i] = indices[2 * i + 1] - indices[2 * i];
}
return ret;
}
size_type num_ranges = indices.size() / 2;
std::vector<size_type> h_first_indices(num_ranges);
std::vector<size_type> h_last_indices(num_ranges);
thrust::stable_partition_copy(thrust::seq,
std::begin(indices),
std::end(indices),
thrust::make_counting_iterator(0),
h_first_indices.begin(),
h_last_indices.begin(),
[](auto i) { return (i % 2) == 0; });
auto d_first_indices = make_device_uvector_async(h_first_indices, stream);
auto d_last_indices = make_device_uvector_async(h_last_indices, stream);
rmm::device_uvector<size_type> d_null_counts(num_ranges, stream);
auto word_num_set_bits = thrust::make_transform_iterator(
thrust::make_counting_iterator(0),
[bitmask] __device__(auto i) { return static_cast<size_type>(__popc(bitmask[i])); });
auto first_word_indices = thrust::make_transform_iterator(
thrust::make_counting_iterator(0),
// We cannot use lambda as hipcub::DeviceSegmentedReduce::Sum() requires
// first_word_indices and last_word_indices to have the same type.
to_word_index(true, d_first_indices.data()));
auto last_word_indices = thrust::make_transform_iterator(
thrust::make_counting_iterator(0),
// We cannot use lambda as hipcub::DeviceSegmentedReduce::Sum() requires
// first_word_indices and last_word_indices to have the same type.
to_word_index(false, d_last_indices.data()));
// first allocate temporary memroy
size_t temp_storage_bytes{0};
CUDA_TRY(hipcub::DeviceSegmentedReduce::Sum(nullptr,
temp_storage_bytes,
word_num_set_bits,
d_null_counts.begin(),
num_ranges,
first_word_indices,
last_word_indices,
stream.value()));
rmm::device_buffer d_temp_storage(temp_storage_bytes, stream);
// second perform segmented reduction
CUDA_TRY(hipcub::DeviceSegmentedReduce::Sum(d_temp_storage.data(),
temp_storage_bytes,
word_num_set_bits,
d_null_counts.begin(),
num_ranges,
first_word_indices,
last_word_indices,
stream.value()));
CHECK_CUDA(stream.value());
// third, adjust counts in segment boundaries (if segments are not
// word-aligned)
constexpr size_type block_size{256};
cudf::detail::grid_1d grid(num_ranges, block_size);
hipLaunchKernelGGL(( subtract_set_bits_range_boundaries_kerenel), dim3(grid.num_blocks),
dim3(grid.num_threads_per_block),
0,
stream.value(),
bitmask, num_ranges, d_first_indices.begin(), d_last_indices.begin(), d_null_counts.begin());
CHECK_CUDA(stream.value());
std::vector<size_type> ret(num_ranges);
CUDA_TRY(hipMemcpyAsync(ret.data(),
d_null_counts.data(),
num_ranges * sizeof(size_type),
hipMemcpyDeviceToHost,
stream.value()));
stream.synchronize(); // now ret is valid.
return ret;
}
std::vector<size_type> segmented_count_unset_bits(bitmask_type const *bitmask,
host_span<size_type const> indices,
rmm::cuda_stream_view stream)
{
if (indices.empty()) {
return std::vector<size_type>{};
} else if (bitmask == nullptr) {
return std::vector<size_type>(indices.size() / 2, 0);
}
auto ret = segmented_count_set_bits(bitmask, indices, stream);
for (size_t i = 0; i < ret.size(); i++) {
auto begin = indices[i * 2];
auto end = indices[i * 2 + 1];
ret[i] = (end - begin) - ret[i];
}
return ret;
}
// Returns the bitwise AND of the null masks of all columns in the table view
rmm::device_buffer bitmask_and(table_view const &view,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource *mr)
{
CUDF_FUNC_RANGE();
rmm::device_buffer null_mask{0, stream, mr};
if (view.num_rows() == 0 or view.num_columns() == 0) { return null_mask; }
std::vector<bitmask_type const *> masks;
std::vector<size_type> offsets;
for (auto &&col : view) {
if (col.nullable()) {
masks.push_back(col.null_mask());
offsets.push_back(col.offset());
}
}
if (masks.size() > 0) {
return cudf::detail::bitmask_binop(
[] __device__(bitmask_type left, bitmask_type right) { return left & right; },
masks,
offsets,
view.num_rows(),
stream,
mr);
}
return null_mask;
}
// Returns the bitwise OR of the null masks of all columns in the table view
rmm::device_buffer bitmask_or(table_view const &view,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource *mr)
{
CUDF_FUNC_RANGE();
rmm::device_buffer null_mask{0, stream, mr};
if (view.num_rows() == 0 or view.num_columns() == 0) { return null_mask; }
std::vector<bitmask_type const *> masks;
std::vector<size_type> offsets;
for (auto &&col : view) {
if (col.nullable()) {
masks.push_back(col.null_mask());
offsets.push_back(col.offset());
}
}
if (static_cast<size_type>(masks.size()) == view.num_columns()) {
return cudf::detail::bitmask_binop(
[] __device__(bitmask_type left, bitmask_type right) { return left | right; },
masks,
offsets,
view.num_rows(),
stream,
mr);
}
return null_mask;
}
} // namespace detail
// Count non-zero bits in the specified range
cudf::size_type count_set_bits(bitmask_type const *bitmask, size_type start, size_type stop)
{
CUDF_FUNC_RANGE();
return detail::count_set_bits(bitmask, start, stop);
}
// Count zero bits in the specified range
cudf::size_type count_unset_bits(bitmask_type const *bitmask, size_type start, size_type stop)
{
CUDF_FUNC_RANGE();
return detail::count_unset_bits(bitmask, start, stop);
}
// Count non-zero bits in the specified ranges
std::vector<size_type> segmented_count_set_bits(bitmask_type const *bitmask,
host_span<size_type const> indices)
{
CUDF_FUNC_RANGE();
return detail::segmented_count_set_bits(bitmask, indices, rmm::cuda_stream_default);
}
// Count zero bits in the specified ranges
std::vector<size_type> segmented_count_unset_bits(bitmask_type const *bitmask,
host_span<size_type const> indices)
{
CUDF_FUNC_RANGE();
return detail::segmented_count_unset_bits(bitmask, indices, rmm::cuda_stream_default);
}
// Create a bitmask from a specific range
rmm::device_buffer copy_bitmask(bitmask_type const *mask,
size_type begin_bit,
size_type end_bit,
rmm::mr::device_memory_resource *mr)
{
return detail::copy_bitmask(mask, begin_bit, end_bit, rmm::cuda_stream_default, mr);
}
// Create a bitmask from a column view
rmm::device_buffer copy_bitmask(column_view const &view, rmm::mr::device_memory_resource *mr)
{
return detail::copy_bitmask(view, rmm::cuda_stream_default, mr);
}
rmm::device_buffer bitmask_and(table_view const &view, rmm::mr::device_memory_resource *mr)
{
return detail::bitmask_and(view, rmm::cuda_stream_default, mr);
}
rmm::device_buffer bitmask_or(table_view const &view, rmm::mr::device_memory_resource *mr)
{
return detail::bitmask_or(view, rmm::cuda_stream_default, mr);
}
} // namespace cudf
| 0961fd286810d605cba7f1969c51a48d8264f445.cu | /*
* Copyright (c) 2019-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column_device_view.cuh>
#include <cudf/detail/null_mask.cuh>
#include <cudf/detail/null_mask.hpp>
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/detail/utilities/cuda.cuh>
#include <cudf/detail/utilities/integer_utils.hpp>
#include <cudf/detail/utilities/vector_factories.hpp>
#include <cudf/null_mask.hpp>
#include <cudf/table/table_view.hpp>
#include <cudf/utilities/bit.hpp>
#include <cudf/utilities/error.hpp>
#include <cudf/utilities/span.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/device_buffer.hpp>
#include <rmm/device_scalar.hpp>
#include <rmm/device_uvector.hpp>
#include <rmm/mr/device/device_memory_resource.hpp>
#include <thrust/binary_search.h>
#include <thrust/copy.h>
#include <thrust/device_ptr.h>
#include <thrust/extrema.h>
#include <cub/cub.cuh>
#include <algorithm>
#include <numeric>
#include <type_traits>
namespace cudf {
size_type state_null_count(mask_state state, size_type size)
{
switch (state) {
case mask_state::UNALLOCATED: return 0;
case mask_state::UNINITIALIZED: return UNKNOWN_NULL_COUNT;
case mask_state::ALL_NULL: return size;
case mask_state::ALL_VALID: return 0;
default: CUDF_FAIL("Invalid null mask state.");
}
}
// Computes required allocation size of a bitmask
std::size_t bitmask_allocation_size_bytes(size_type number_of_bits, std::size_t padding_boundary)
{
CUDF_EXPECTS(padding_boundary > 0, "Invalid padding boundary");
auto necessary_bytes = cudf::util::div_rounding_up_safe<size_type>(number_of_bits, CHAR_BIT);
auto padded_bytes = padding_boundary * cudf::util::div_rounding_up_safe<size_type>(
necessary_bytes, padding_boundary);
return padded_bytes;
}
// Computes number of *actual* bitmask_type elements needed
size_type num_bitmask_words(size_type number_of_bits)
{
return cudf::util::div_rounding_up_safe<size_type>(number_of_bits,
detail::size_in_bits<bitmask_type>());
}
namespace detail {
// Create a device_buffer for a null mask
rmm::device_buffer create_null_mask(size_type size,
mask_state state,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource *mr)
{
size_type mask_size{0};
if (state != mask_state::UNALLOCATED) { mask_size = bitmask_allocation_size_bytes(size); }
rmm::device_buffer mask(mask_size, stream, mr);
if (state != mask_state::UNINITIALIZED) {
uint8_t fill_value = (state == mask_state::ALL_VALID) ? 0xff : 0x00;
CUDA_TRY(cudaMemsetAsync(
static_cast<bitmask_type *>(mask.data()), fill_value, mask_size, stream.value()));
}
return mask;
}
namespace {
__global__ void set_null_mask_kernel(bitmask_type *__restrict__ destination,
size_type begin_bit,
size_type end_bit,
bool valid,
size_type number_of_mask_words)
{
auto x = destination + word_index(begin_bit);
const auto last_word = word_index(end_bit) - word_index(begin_bit);
bitmask_type fill_value = (valid == true) ? 0xffffffff : 0x00;
for (size_type destination_word_index = threadIdx.x + blockIdx.x * blockDim.x;
destination_word_index < number_of_mask_words;
destination_word_index += blockDim.x * gridDim.x) {
if (destination_word_index == 0 || destination_word_index == last_word) {
bitmask_type mask = ~bitmask_type{0};
if (destination_word_index == 0) {
mask = ~(set_least_significant_bits(intra_word_index(begin_bit)));
}
if (destination_word_index == last_word) {
mask = mask & set_least_significant_bits(intra_word_index(end_bit));
}
x[destination_word_index] =
(valid == true) ? x[destination_word_index] | mask : x[destination_word_index] & ~mask;
} else {
x[destination_word_index] = fill_value;
}
}
}
} // namespace
// Set pre-allocated null mask of given bit range [begin_bit, end_bit) to valid, if valid==true,
// or null, otherwise;
void set_null_mask(bitmask_type *bitmask,
size_type begin_bit,
size_type end_bit,
bool valid,
rmm::cuda_stream_view stream)
{
CUDF_FUNC_RANGE();
CUDF_EXPECTS(begin_bit >= 0, "Invalid range.");
CUDF_EXPECTS(begin_bit <= end_bit, "Invalid bit range.");
if (begin_bit == end_bit) return;
if (bitmask != nullptr) {
auto number_of_mask_words =
num_bitmask_words(end_bit) - begin_bit / detail::size_in_bits<bitmask_type>();
cudf::detail::grid_1d config(number_of_mask_words, 256);
set_null_mask_kernel<<<config.num_blocks, config.num_threads_per_block, 0, stream.value()>>>(
static_cast<bitmask_type *>(bitmask), begin_bit, end_bit, valid, number_of_mask_words);
CHECK_CUDA(stream.value());
}
}
} // namespace detail
// Create a device_buffer for a null mask
rmm::device_buffer create_null_mask(size_type size,
mask_state state,
rmm::mr::device_memory_resource *mr)
{
return detail::create_null_mask(size, state, rmm::cuda_stream_default, mr);
}
// Set pre-allocated null mask of given bit range [begin_bit, end_bit) to valid, if valid==true,
// or null, otherwise;
void set_null_mask(bitmask_type *bitmask, size_type begin_bit, size_type end_bit, bool valid)
{
return detail::set_null_mask(bitmask, begin_bit, end_bit, valid);
}
namespace {
/**
* @brief Counts the number of non-zero bits in a bitmask in the range
* `[first_bit_index, last_bit_index]`.
*
* Expects `0 <= first_bit_index <= last_bit_index`.
*
* @param[in] bitmask The bitmask whose non-zero bits will be counted.
* @param[in] first_bit_index The index (inclusive) of the first bit to count
* @param[in] last_bit_index The index (inclusive) of the last bit to count
* @param[out] global_count The number of non-zero bits in the specified range
*/
template <size_type block_size>
__global__ void count_set_bits_kernel(bitmask_type const *bitmask,
size_type first_bit_index,
size_type last_bit_index,
size_type *global_count)
{
constexpr auto const word_size{detail::size_in_bits<bitmask_type>()};
auto const first_word_index{word_index(first_bit_index)};
auto const last_word_index{word_index(last_bit_index)};
auto const tid = threadIdx.x + blockIdx.x * blockDim.x;
auto thread_word_index = tid + first_word_index;
size_type thread_count{0};
// First, just count the bits in all words
while (thread_word_index <= last_word_index) {
thread_count += __popc(bitmask[thread_word_index]);
thread_word_index += blockDim.x * gridDim.x;
}
// Subtract any slack bits counted from the first and last word
// Two threads handle this -- one for first word, one for last
if (tid < 2) {
bool const first{tid == 0};
bool const last{not first};
size_type bit_index = (first) ? first_bit_index : last_bit_index;
size_type word_index = (first) ? first_word_index : last_word_index;
size_type num_slack_bits = bit_index % word_size;
if (last) { num_slack_bits = word_size - num_slack_bits - 1; }
if (num_slack_bits > 0) {
bitmask_type word = bitmask[word_index];
auto slack_mask = (first) ? set_least_significant_bits(num_slack_bits)
: set_most_significant_bits(num_slack_bits);
thread_count -= __popc(word & slack_mask);
}
}
using BlockReduce = cub::BlockReduce<size_type, block_size>;
__shared__ typename BlockReduce::TempStorage temp_storage;
size_type block_count{BlockReduce(temp_storage).Sum(thread_count)};
if (threadIdx.x == 0) { atomicAdd(global_count, block_count); }
}
/**
* For each range `[first_bit_indices[i], last_bit_indices[i])`
* (where 0 <= i < `num_ranges`), count the number of bits set outside the range
* in the boundary words (i.e. words that include either
* `first_bit_indices[i]'th` bit or `(last_bit_indices[i] - 1)'th` bit) and
* subtract the count from the range's null count.
*
* Expects `0 <= first_bit_indices[i] <= last_bit_indices[i]`.
*
* @param[in] bitmask The bitmask whose non-zero bits outside the range in the
* boundary words will be counted.
* @param[in] num_ranges The number of ranges
* @param[in] first_bit_indices The indices (inclusive) of the first bit in each
* range
* @param[in] last_bit_indices The indices (exclusive) of the last bit in each
* range
* @param[in,out] null_counts The number of non-zero bits in each range to be
* updated
*/
template <typename OffsetIterator, typename OutputIterator>
__global__ void subtract_set_bits_range_boundaries_kerenel(bitmask_type const *bitmask,
size_type num_ranges,
OffsetIterator first_bit_indices,
OffsetIterator last_bit_indices,
OutputIterator null_counts)
{
constexpr size_type const word_size_in_bits{detail::size_in_bits<bitmask_type>()};
cudf::size_type const tid = threadIdx.x + blockIdx.x * blockDim.x;
cudf::size_type range_id = tid;
while (range_id < num_ranges) {
size_type const first_bit_index = *(first_bit_indices + range_id);
size_type const last_bit_index = *(last_bit_indices + range_id);
size_type delta = 0;
size_type num_slack_bits = 0;
// compute delta due to the preceding bits in the first word in the range
num_slack_bits = intra_word_index(first_bit_index);
if (num_slack_bits > 0) {
bitmask_type word = bitmask[word_index(first_bit_index)];
bitmask_type slack_mask = set_least_significant_bits(num_slack_bits);
delta -= __popc(word & slack_mask);
}
// compute delta due to the following bits in the last word in the range
num_slack_bits = (last_bit_index % word_size_in_bits) == 0
? 0
: word_size_in_bits - intra_word_index(last_bit_index);
if (num_slack_bits > 0) {
bitmask_type word = bitmask[word_index(last_bit_index)];
bitmask_type slack_mask = set_most_significant_bits(num_slack_bits);
delta -= __popc(word & slack_mask);
}
size_type updated_null_count = *(null_counts + range_id) + delta;
*(null_counts + range_id) = updated_null_count;
range_id += blockDim.x * gridDim.x;
}
}
/**
* @brief Copies the bits starting at the specified offset from a source
* bitmask into the destination bitmask.
*
* Bit `i` in `destination` will be equal to bit `i + offset` from `source`.
*
* @param destination The mask to copy into
* @param source The mask to copy from
* @param source_begin_bit The offset into `source` from which to begin the copy
* @param source_end_bit The offset into `source` till which copying is done
* @param number_of_mask_words The number of `cudf::bitmask_type` words to copy
*/
// TODO: Also make binops test that uses offset in column_view
__global__ void copy_offset_bitmask(bitmask_type *__restrict__ destination,
bitmask_type const *__restrict__ source,
size_type source_begin_bit,
size_type source_end_bit,
size_type number_of_mask_words)
{
for (size_type destination_word_index = threadIdx.x + blockIdx.x * blockDim.x;
destination_word_index < number_of_mask_words;
destination_word_index += blockDim.x * gridDim.x) {
destination[destination_word_index] = detail::get_mask_offset_word(
source, destination_word_index, source_begin_bit, source_end_bit);
}
}
// convert [first_bit_index,last_bit_index) to
// [first_word_index,last_word_index)
struct to_word_index : public thrust::unary_function<size_type, size_type> {
const bool _inclusive = false;
size_type const *const _d_bit_indices = nullptr;
/**
* @brief Constructor of a functor that converts bit indices to bitmask word
* indices.
*
* @param[in] inclusive Flag that indicates whether bit indices are inclusive
* or exclusive.
* @param[in] d_bit_indices Pointer to an array of bit indices
*/
__host__ to_word_index(bool inclusive, size_type const *d_bit_indices)
: _inclusive(inclusive), _d_bit_indices(d_bit_indices)
{
}
__device__ size_type operator()(const size_type &i) const
{
auto bit_index = _d_bit_indices[i];
return word_index(bit_index) + ((_inclusive || intra_word_index(bit_index) == 0) ? 0 : 1);
}
};
} // namespace
namespace detail {
// Create a bitmask from a specific range
rmm::device_buffer copy_bitmask(bitmask_type const *mask,
size_type begin_bit,
size_type end_bit,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource *mr)
{
CUDF_FUNC_RANGE();
CUDF_EXPECTS(begin_bit >= 0, "Invalid range.");
CUDF_EXPECTS(begin_bit <= end_bit, "Invalid bit range.");
rmm::device_buffer dest_mask{};
auto num_bytes = bitmask_allocation_size_bytes(end_bit - begin_bit);
if ((mask == nullptr) || (num_bytes == 0)) { return dest_mask; }
if (begin_bit == 0) {
dest_mask = rmm::device_buffer{static_cast<void const *>(mask), num_bytes, stream, mr};
} else {
auto number_of_mask_words = num_bitmask_words(end_bit - begin_bit);
dest_mask = rmm::device_buffer{num_bytes, stream, mr};
cudf::detail::grid_1d config(number_of_mask_words, 256);
copy_offset_bitmask<<<config.num_blocks, config.num_threads_per_block, 0, stream.value()>>>(
static_cast<bitmask_type *>(dest_mask.data()),
mask,
begin_bit,
end_bit,
number_of_mask_words);
CHECK_CUDA(stream.value());
}
return dest_mask;
}
// Create a bitmask from a column view
rmm::device_buffer copy_bitmask(column_view const &view,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource *mr)
{
CUDF_FUNC_RANGE();
rmm::device_buffer null_mask{0, stream, mr};
if (view.nullable()) {
null_mask =
copy_bitmask(view.null_mask(), view.offset(), view.offset() + view.size(), stream, mr);
}
return null_mask;
}
// Inplace Bitwise AND of the masks
void inplace_bitmask_and(device_span<bitmask_type> dest_mask,
host_span<bitmask_type const *> masks,
host_span<size_type const> begin_bits,
size_type mask_size,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource *mr)
{
inplace_bitmask_binop(
[] __device__(bitmask_type left, bitmask_type right) { return left & right; },
dest_mask,
masks,
begin_bits,
mask_size,
stream,
mr);
}
// Bitwise AND of the masks
rmm::device_buffer bitmask_and(host_span<bitmask_type const *> masks,
host_span<size_type const> begin_bits,
size_type mask_size,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource *mr)
{
return bitmask_binop(
[] __device__(bitmask_type left, bitmask_type right) { return left & right; },
masks,
begin_bits,
mask_size,
stream,
mr);
}
cudf::size_type count_set_bits(bitmask_type const *bitmask,
size_type start,
size_type stop,
rmm::cuda_stream_view stream = rmm::cuda_stream_default)
{
if (nullptr == bitmask) { return 0; }
CUDF_EXPECTS(start >= 0, "Invalid range.");
CUDF_EXPECTS(start <= stop, "Invalid bit range.");
std::size_t num_bits_to_count = stop - start;
if (num_bits_to_count == 0) { return 0; }
auto num_words = num_bitmask_words(num_bits_to_count);
constexpr size_type block_size{256};
cudf::detail::grid_1d grid(num_words, block_size);
rmm::device_scalar<size_type> non_zero_count(0, stream);
count_set_bits_kernel<block_size>
<<<grid.num_blocks, grid.num_threads_per_block, 0, stream.value()>>>(
bitmask, start, stop - 1, non_zero_count.data());
return non_zero_count.value(stream);
}
cudf::size_type count_unset_bits(bitmask_type const *bitmask,
size_type start,
size_type stop,
rmm::cuda_stream_view stream = rmm::cuda_stream_default)
{
if (nullptr == bitmask) { return 0; }
auto num_bits = (stop - start);
return (num_bits - detail::count_set_bits(bitmask, start, stop, stream));
}
std::vector<size_type> segmented_count_set_bits(bitmask_type const *bitmask,
host_span<size_type const> indices,
rmm::cuda_stream_view stream)
{
CUDF_EXPECTS(indices.size() % 2 == 0,
"Array of indices needs to have an even number of elements.");
for (size_t i = 0; i < indices.size() / 2; i++) {
auto begin = indices[i * 2];
auto end = indices[i * 2 + 1];
CUDF_EXPECTS(begin >= 0, "Starting index cannot be negative.");
CUDF_EXPECTS(end >= begin, "End index cannot be smaller than the starting index.");
}
if (indices.empty()) {
return std::vector<size_type>{};
} else if (bitmask == nullptr) {
std::vector<size_type> ret(indices.size() / 2);
for (size_t i = 0; i < indices.size() / 2; i++) {
ret[i] = indices[2 * i + 1] - indices[2 * i];
}
return ret;
}
size_type num_ranges = indices.size() / 2;
std::vector<size_type> h_first_indices(num_ranges);
std::vector<size_type> h_last_indices(num_ranges);
thrust::stable_partition_copy(thrust::seq,
std::begin(indices),
std::end(indices),
thrust::make_counting_iterator(0),
h_first_indices.begin(),
h_last_indices.begin(),
[](auto i) { return (i % 2) == 0; });
auto d_first_indices = make_device_uvector_async(h_first_indices, stream);
auto d_last_indices = make_device_uvector_async(h_last_indices, stream);
rmm::device_uvector<size_type> d_null_counts(num_ranges, stream);
auto word_num_set_bits = thrust::make_transform_iterator(
thrust::make_counting_iterator(0),
[bitmask] __device__(auto i) { return static_cast<size_type>(__popc(bitmask[i])); });
auto first_word_indices = thrust::make_transform_iterator(
thrust::make_counting_iterator(0),
// We cannot use lambda as cub::DeviceSegmentedReduce::Sum() requires
// first_word_indices and last_word_indices to have the same type.
to_word_index(true, d_first_indices.data()));
auto last_word_indices = thrust::make_transform_iterator(
thrust::make_counting_iterator(0),
// We cannot use lambda as cub::DeviceSegmentedReduce::Sum() requires
// first_word_indices and last_word_indices to have the same type.
to_word_index(false, d_last_indices.data()));
// first allocate temporary memroy
size_t temp_storage_bytes{0};
CUDA_TRY(cub::DeviceSegmentedReduce::Sum(nullptr,
temp_storage_bytes,
word_num_set_bits,
d_null_counts.begin(),
num_ranges,
first_word_indices,
last_word_indices,
stream.value()));
rmm::device_buffer d_temp_storage(temp_storage_bytes, stream);
// second perform segmented reduction
CUDA_TRY(cub::DeviceSegmentedReduce::Sum(d_temp_storage.data(),
temp_storage_bytes,
word_num_set_bits,
d_null_counts.begin(),
num_ranges,
first_word_indices,
last_word_indices,
stream.value()));
CHECK_CUDA(stream.value());
// third, adjust counts in segment boundaries (if segments are not
// word-aligned)
constexpr size_type block_size{256};
cudf::detail::grid_1d grid(num_ranges, block_size);
subtract_set_bits_range_boundaries_kerenel<<<grid.num_blocks,
grid.num_threads_per_block,
0,
stream.value()>>>(
bitmask, num_ranges, d_first_indices.begin(), d_last_indices.begin(), d_null_counts.begin());
CHECK_CUDA(stream.value());
std::vector<size_type> ret(num_ranges);
CUDA_TRY(cudaMemcpyAsync(ret.data(),
d_null_counts.data(),
num_ranges * sizeof(size_type),
cudaMemcpyDeviceToHost,
stream.value()));
stream.synchronize(); // now ret is valid.
return ret;
}
std::vector<size_type> segmented_count_unset_bits(bitmask_type const *bitmask,
host_span<size_type const> indices,
rmm::cuda_stream_view stream)
{
if (indices.empty()) {
return std::vector<size_type>{};
} else if (bitmask == nullptr) {
return std::vector<size_type>(indices.size() / 2, 0);
}
auto ret = segmented_count_set_bits(bitmask, indices, stream);
for (size_t i = 0; i < ret.size(); i++) {
auto begin = indices[i * 2];
auto end = indices[i * 2 + 1];
ret[i] = (end - begin) - ret[i];
}
return ret;
}
// Returns the bitwise AND of the null masks of all columns in the table view
rmm::device_buffer bitmask_and(table_view const &view,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource *mr)
{
CUDF_FUNC_RANGE();
rmm::device_buffer null_mask{0, stream, mr};
if (view.num_rows() == 0 or view.num_columns() == 0) { return null_mask; }
std::vector<bitmask_type const *> masks;
std::vector<size_type> offsets;
for (auto &&col : view) {
if (col.nullable()) {
masks.push_back(col.null_mask());
offsets.push_back(col.offset());
}
}
if (masks.size() > 0) {
return cudf::detail::bitmask_binop(
[] __device__(bitmask_type left, bitmask_type right) { return left & right; },
masks,
offsets,
view.num_rows(),
stream,
mr);
}
return null_mask;
}
// Returns the bitwise OR of the null masks of all columns in the table view
rmm::device_buffer bitmask_or(table_view const &view,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource *mr)
{
CUDF_FUNC_RANGE();
rmm::device_buffer null_mask{0, stream, mr};
if (view.num_rows() == 0 or view.num_columns() == 0) { return null_mask; }
std::vector<bitmask_type const *> masks;
std::vector<size_type> offsets;
for (auto &&col : view) {
if (col.nullable()) {
masks.push_back(col.null_mask());
offsets.push_back(col.offset());
}
}
if (static_cast<size_type>(masks.size()) == view.num_columns()) {
return cudf::detail::bitmask_binop(
[] __device__(bitmask_type left, bitmask_type right) { return left | right; },
masks,
offsets,
view.num_rows(),
stream,
mr);
}
return null_mask;
}
} // namespace detail
// Count non-zero bits in the specified range
cudf::size_type count_set_bits(bitmask_type const *bitmask, size_type start, size_type stop)
{
CUDF_FUNC_RANGE();
return detail::count_set_bits(bitmask, start, stop);
}
// Count zero bits in the specified range
cudf::size_type count_unset_bits(bitmask_type const *bitmask, size_type start, size_type stop)
{
CUDF_FUNC_RANGE();
return detail::count_unset_bits(bitmask, start, stop);
}
// Count non-zero bits in the specified ranges
std::vector<size_type> segmented_count_set_bits(bitmask_type const *bitmask,
host_span<size_type const> indices)
{
CUDF_FUNC_RANGE();
return detail::segmented_count_set_bits(bitmask, indices, rmm::cuda_stream_default);
}
// Count zero bits in the specified ranges
std::vector<size_type> segmented_count_unset_bits(bitmask_type const *bitmask,
host_span<size_type const> indices)
{
CUDF_FUNC_RANGE();
return detail::segmented_count_unset_bits(bitmask, indices, rmm::cuda_stream_default);
}
// Create a bitmask from a specific range
rmm::device_buffer copy_bitmask(bitmask_type const *mask,
size_type begin_bit,
size_type end_bit,
rmm::mr::device_memory_resource *mr)
{
return detail::copy_bitmask(mask, begin_bit, end_bit, rmm::cuda_stream_default, mr);
}
// Create a bitmask from a column view
rmm::device_buffer copy_bitmask(column_view const &view, rmm::mr::device_memory_resource *mr)
{
return detail::copy_bitmask(view, rmm::cuda_stream_default, mr);
}
rmm::device_buffer bitmask_and(table_view const &view, rmm::mr::device_memory_resource *mr)
{
return detail::bitmask_and(view, rmm::cuda_stream_default, mr);
}
rmm::device_buffer bitmask_or(table_view const &view, rmm::mr::device_memory_resource *mr)
{
return detail::bitmask_or(view, rmm::cuda_stream_default, mr);
}
} // namespace cudf
|
c6a19bc99d8181fa31f4048198c4c3190c95c540.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "stencil.cuh"
// Reference from lecture 11 notess
__global__ void stencil_kernel(const float* image, const float* mask, float* output, unsigned int n, unsigned int R_) {
extern __shared__ float shMemArray[];
int R = (int)R_;
int t_id = threadIdx.x;
int t_len = blockDim.x;
int i = blockIdx.x * t_len + t_id;
if (i >= n) {
return;
}
// Copy mask to shared memory
float *shared_mask = shMemArray;
if (t_id < 2*R+1) {
shared_mask[t_id] = mask[t_id];
}
// Initialize shared output
float *shared_output = shared_mask + 2*R + 1;
shared_output[t_id] = 0;
// Copy image to shared memory
float *shared_image = shared_output + t_len + R;
shared_image[t_id] = image[i];
if (t_id < R) {
shared_image[t_id - R] = i - R > 0 ? image[i - R] : 0;
} else if (t_len - t_id < R) {
shared_image[t_id + R] = i + R < n ? image[i + R] : 0;
}
__syncthreads();
for (int j = -R; j <= R; j++) {
shared_output[t_id] += shared_image[t_id + j] * shared_mask[j + R];
}
// copy back
output[i] = shared_output[t_id];
}
__host__ void stencil(const float* image, const float* mask, float* output, unsigned int n, unsigned int R, unsigned int threads_per_block) {
int numBlock = (n - 1 + threads_per_block) / threads_per_block;
int shared_size = (1 + 4*R + 2*threads_per_block) * sizeof(float);
hipLaunchKernelGGL(( stencil_kernel), dim3(numBlock), dim3(threads_per_block), shared_size, 0, image, mask, output, n, R);
hipDeviceSynchronize();
}
| c6a19bc99d8181fa31f4048198c4c3190c95c540.cu | #include "stencil.cuh"
// Reference from lecture 11 notess
__global__ void stencil_kernel(const float* image, const float* mask, float* output, unsigned int n, unsigned int R_) {
extern __shared__ float shMemArray[];
int R = (int)R_;
int t_id = threadIdx.x;
int t_len = blockDim.x;
int i = blockIdx.x * t_len + t_id;
if (i >= n) {
return;
}
// Copy mask to shared memory
float *shared_mask = shMemArray;
if (t_id < 2*R+1) {
shared_mask[t_id] = mask[t_id];
}
// Initialize shared output
float *shared_output = shared_mask + 2*R + 1;
shared_output[t_id] = 0;
// Copy image to shared memory
float *shared_image = shared_output + t_len + R;
shared_image[t_id] = image[i];
if (t_id < R) {
shared_image[t_id - R] = i - R > 0 ? image[i - R] : 0;
} else if (t_len - t_id < R) {
shared_image[t_id + R] = i + R < n ? image[i + R] : 0;
}
__syncthreads();
for (int j = -R; j <= R; j++) {
shared_output[t_id] += shared_image[t_id + j] * shared_mask[j + R];
}
// copy back
output[i] = shared_output[t_id];
}
__host__ void stencil(const float* image, const float* mask, float* output, unsigned int n, unsigned int R, unsigned int threads_per_block) {
int numBlock = (n - 1 + threads_per_block) / threads_per_block;
int shared_size = (1 + 4*R + 2*threads_per_block) * sizeof(float);
stencil_kernel<<<numBlock, threads_per_block, shared_size>>>(image, mask, output, n, R);
cudaDeviceSynchronize();
}
|
4b0bfbb12210f7d8d49d101210dd204f534fbf52.hip | // !!! This is a file automatically generated by hipify!!!
/**
* @file ReluLayer_device.cu
* @date 2017-02-15
* @author moonhoen lee
* @brief
* @details
*/
#include "hip/hip_runtime.h"
#include "ReluLayer.h"
#include "Network.h"
#include "SysLog.h"
#include "StdOutLog.h"
#include "ColdLog.h"
#include "Perf.h"
#include "PropMgmt.h"
using namespace std;
///////////////////////////////////////////////////////////////////////////////////////////
// GPU Kernels
//
template <typename Dtype>
__global__ void ApplyLeakyForward(const Dtype* input, Dtype* output, int size, Dtype leaky)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= size)
return;
if (input[idx] < 0)
output[idx] = leaky * input[idx];
else
output[idx] = input[idx];
}
template <typename Dtype>
__global__ void ApplyLeakyBackward(const Dtype* input, const Dtype* outputGrad,
Dtype* inputGrad, int size, const double leaky)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= size)
return;
if (input[idx] < 0)
inputGrad[idx] = leaky * outputGrad[idx];
else
inputGrad[idx] = outputGrad[idx];
}
template <typename Dtype>
void ReluLayer<Dtype>::applyLeakyForward() {
int size = this->_outputData[0]->getCountByAxis(0);
const Dtype* inputData = this->_inputData[0]->device_data();
Dtype* outputData = this->_outputData[0]->mutable_device_data();
hipLaunchKernelGGL(( ApplyLeakyForward), dim3(SOOOA_GET_BLOCKS(size)), dim3(SOOOA_CUDA_NUM_THREADS), 0, 0,
inputData, outputData, size, (Dtype)SLPROP(Relu, leaky));
}
template <typename Dtype>
void ReluLayer<Dtype>::applyLeakyBackward() {
int size = this->_outputData[0]->getCountByAxis(0);
const Dtype* inputData = this->_inputData[0]->device_data();
const Dtype* outputGrad = this->_outputData[0]->device_grad();
Dtype* inputGrad = this->_inputData[0]->mutable_device_grad();
const double leaky = SLPROP(Relu, leaky);
hipLaunchKernelGGL(( ApplyLeakyBackward), dim3(SOOOA_GET_BLOCKS(size)), dim3(SOOOA_CUDA_NUM_THREADS), 0, 0,
inputData, outputGrad, inputGrad, size, leaky);
}
template void ReluLayer<float>::applyLeakyForward();
template void ReluLayer<float>::applyLeakyBackward();
| 4b0bfbb12210f7d8d49d101210dd204f534fbf52.cu | /**
* @file ReluLayer_device.cu
* @date 2017-02-15
* @author moonhoen lee
* @brief
* @details
*/
#include "cuda_runtime.h"
#include "ReluLayer.h"
#include "Network.h"
#include "SysLog.h"
#include "StdOutLog.h"
#include "ColdLog.h"
#include "Perf.h"
#include "PropMgmt.h"
using namespace std;
///////////////////////////////////////////////////////////////////////////////////////////
// GPU Kernels
//
template <typename Dtype>
__global__ void ApplyLeakyForward(const Dtype* input, Dtype* output, int size, Dtype leaky)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= size)
return;
if (input[idx] < 0)
output[idx] = leaky * input[idx];
else
output[idx] = input[idx];
}
template <typename Dtype>
__global__ void ApplyLeakyBackward(const Dtype* input, const Dtype* outputGrad,
Dtype* inputGrad, int size, const double leaky)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= size)
return;
if (input[idx] < 0)
inputGrad[idx] = leaky * outputGrad[idx];
else
inputGrad[idx] = outputGrad[idx];
}
template <typename Dtype>
void ReluLayer<Dtype>::applyLeakyForward() {
int size = this->_outputData[0]->getCountByAxis(0);
const Dtype* inputData = this->_inputData[0]->device_data();
Dtype* outputData = this->_outputData[0]->mutable_device_data();
ApplyLeakyForward<<<SOOOA_GET_BLOCKS(size), SOOOA_CUDA_NUM_THREADS>>>(
inputData, outputData, size, (Dtype)SLPROP(Relu, leaky));
}
template <typename Dtype>
void ReluLayer<Dtype>::applyLeakyBackward() {
int size = this->_outputData[0]->getCountByAxis(0);
const Dtype* inputData = this->_inputData[0]->device_data();
const Dtype* outputGrad = this->_outputData[0]->device_grad();
Dtype* inputGrad = this->_inputData[0]->mutable_device_grad();
const double leaky = SLPROP(Relu, leaky);
ApplyLeakyBackward<<<SOOOA_GET_BLOCKS(size), SOOOA_CUDA_NUM_THREADS>>>(
inputData, outputGrad, inputGrad, size, leaky);
}
template void ReluLayer<float>::applyLeakyForward();
template void ReluLayer<float>::applyLeakyBackward();
|
e1dcc27c764899f63713c8c01093376f9cc5d46b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
__global__ void inpoly(const int N, const float *vertx, const float *verty,const int nv, bool *out )
{
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
if (x<N&&y<N){
bool inpoly=false;
for (int i = 0, j = nv-1; i < nv; j = i++) {
if ( ((verty[i]>y) != (verty[j]>y)) &&
//inside verty range
(x < (vertx[j]-vertx[i]) * (y-verty[i]) / (verty[j]-verty[i]) + vertx[i]) )
{
inpoly = !inpoly;
}
}
out[x*N+y]=inpoly;
}
}
| e1dcc27c764899f63713c8c01093376f9cc5d46b.cu | __global__ void inpoly(const int N, const float *vertx, const float *verty,const int nv, bool *out )
{
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
if (x<N&&y<N){
bool inpoly=false;
for (int i = 0, j = nv-1; i < nv; j = i++) {
if ( ((verty[i]>y) != (verty[j]>y)) &&
//inside verty range
(x < (vertx[j]-vertx[i]) * (y-verty[i]) / (verty[j]-verty[i]) + vertx[i]) )
{
inpoly = !inpoly;
}
}
out[x*N+y]=inpoly;
}
}
|
783478287d555addef86f8db3c2e664b3018907b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <cutil.h>
#include "CudaAlgorythmBusiness.cuh"
////////////////////////////////////////////////////////////////////////////////
// Convolucin de referencia en CPU filas y columnas
////////////////////////////////////////////////////////////////////////////////
void convolutionRowCPU(float *h_Result, float *h_DataA, float *h_Kernel,
int dataW, int dataH, int kernelR);
void convolutionColumnCPU(float *h_Result, float *h_DataA, float *h_Kernel,
int dataW, int dataH, int kernelR);
////////////////////////////////////////////////////////////////////////////////
// Convolucin de referencia para filas en CPU
////////////////////////////////////////////////////////////////////////////////
void convolutionRowCPU(float *h_Result, float *h_Data, float *h_Kernel,
int dataW, int dataH, int kernelR) {
int x, y, k, d;
float sum;
for (y = 0; y < dataH; y++)
for (x = 0; x < dataW; x++) {
sum = 0;
for (k = -kernelR; k <= kernelR; k++) {
d = x + k;
if (d < 0)
d = 0;
if (d >= dataW)
d = dataW - 1;
sum += h_Data[y * dataW + d] * h_Kernel[kernelR - k];
}
h_Result[y * dataW + x] = sum;
}
}
////////////////////////////////////////////////////////////////////////////////
// Convolucin de referencia para columnas en CPU
////////////////////////////////////////////////////////////////////////////////
void convolutionColumnCPU(float *h_Result, float *h_Data, float *h_Kernel,
int dataW, int dataH, int kernelR) {
int x, y, k, d;
float sum;
for (y = 0; y < dataH; y++)
for (x = 0; x < dataW; x++) {
sum = 0;
for (k = -kernelR; k <= kernelR; k++) {
d = y + k;
if (d < 0)
d = 0;
if (d >= dataH)
d = dataH - 1;
sum += h_Data[d * dataW + x] * h_Kernel[kernelR - k];
}
h_Result[y * dataW + x] = sum;
}
}
//Fast integer multiplication macro
#define IMUL(a, b) __mul24(a, b)
//Input data texture reference
texture<float, 2, hipReadModeElementType> texData;
////////////////////////////////////////////////////////////////////////////////
// configuracin del kernel
////////////////////////////////////////////////////////////////////////////////
#define KERNEL_RADIUS 1
#define KERNEL_W (2 * KERNEL_RADIUS + 1)
__device__ __constant__ float d_Kernel1[KERNEL_W];
__device__ __constant__ float d_Kernel2[KERNEL_W];
////////////////////////////////////////////////////////////////////////////////
// Row convolution filter
////////////////////////////////////////////////////////////////////////////////
__global__ void convolutionRowGPU(float *d_Result, int dataW, int dataH) {
const int ix = IMUL(blockDim.x, blockIdx.x) + threadIdx.x;
const int iy = IMUL(blockDim.y, blockIdx.y) + threadIdx.y;
const float x = (float) ix + 0.5f;
const float y = (float) iy + 0.5f;
if (ix < dataW && iy < dataH) {
float sum = 0;
for (int k = -KERNEL_RADIUS; k <= KERNEL_RADIUS; k++)
sum += tex2D(texData, x + k, y) * d_Kernel1[KERNEL_RADIUS - k];
d_Result[IMUL(iy, dataW) + ix] = sum;
}
}
////////////////////////////////////////////////////////////////////////////////
// Column convolution filter
////////////////////////////////////////////////////////////////////////////////
__global__ void convolutionColumnGPU(float *d_Result, int dataW, int dataH) {
const int ix = IMUL(blockDim.x, blockIdx.x) + threadIdx.x;
const int iy = IMUL(blockDim.y, blockIdx.y) + threadIdx.y;
const float x = (float) ix + 0.5f;
const float y = (float) iy + 0.5f;
if (ix < dataW && iy < dataH) {
float sum = 0;
for (int k = -KERNEL_RADIUS; k <= KERNEL_RADIUS; k++)
sum += tex2D(texData, x, y + k) * d_Kernel2[KERNEL_RADIUS - k];
d_Result[IMUL(iy, dataW) + ix] = sum;
}
}
////////////////////////////////////////////////////////////////////////////////
// Common host and device functions
////////////////////////////////////////////////////////////////////////////////
//Round a / b to nearest higher integer value
int iDivUp(int a, int b) {
return (a % b != 0) ? (a / b + 1) : (a / b);
}
//Align a to nearest higher multiple of b
int iAlignUp(int a, int b) {
return (a % b != 0) ? (a - a % b + b) : a;
}
////////////////////////////////////////////////////////////////////////////////
// GPU convolution
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
// Data configuration
////////////////////////////////////////////////////////////////////////////////
//Reduce problem size to have reasonable emulation time
int DATA_W;
int DATA_H;
int DATA_SIZE;
unsigned int width, height;
int KERNEL_SIZE = KERNEL_W * sizeof(float);
CudaAlgorythmBusiness::CudaAlgorythmBusiness() {
// TODO Auto-generated constructor stub
}
CudaAlgorythmBusiness::~CudaAlgorythmBusiness() {
// TODO Auto-generated destructor stub
}
float* CudaAlgorythmBusiness::GaussCUDA(float *imagen, int ancho, int alto,
float *h_kernel1, float *h_kernel2) {
float *h_DataA, *h_DataB, *h_ResultGPU, *h_Kernelx1,*h_Kernelx2;
hipArray *a_Data;
hipChannelFormatDesc floatTex = hipCreateChannelDesc<float> ();
float *d_Result;
double L1norm, rCPU, rGPU, sum_delta, sum_ref;
int i, x, y;
DATA_W = ancho;
DATA_H = alto;
DATA_SIZE = DATA_W * DATA_H * sizeof(float);
h_DataA = (float *) malloc(DATA_SIZE);
h_DataB = (float *) malloc(DATA_SIZE);
h_ResultGPU = (float *) malloc(DATA_SIZE);
h_Kernelx1 = (float *) malloc(KERNEL_SIZE);
h_Kernelx2 = (float *) malloc(KERNEL_SIZE);
CUDA_SAFE_CALL(hipMallocArray(&a_Data, &floatTex, DATA_W, DATA_H));
CUDA_SAFE_CALL(hipMalloc((void **) &d_Result, DATA_SIZE));
h_Kernelx1 = h_kernel1;
h_Kernelx2 = h_kernel2;
h_DataA = imagen;
CUDA_SAFE_CALL(hipMemcpyToSymbol(d_Kernel1, h_Kernelx1, KERNEL_SIZE));
CUDA_SAFE_CALL(hipMemcpyToSymbol(d_Kernel2, h_Kernelx2, KERNEL_SIZE));
CUDA_SAFE_CALL(
hipMemcpyToArray(a_Data, 0, 0, h_DataA, DATA_SIZE,
hipMemcpyHostToDevice));
CUDA_SAFE_CALL(hipBindTextureToArray(texData, a_Data));
//el ancho del bloque debe ser multiplo del mximo de memorria coalescente en escritura
//para escrituras coalescentes en convolutionRowGPU y convolutionColumnGP
dim3 threadBlock(16, 12);
dim3
blockGrid(iDivUp(DATA_W, threadBlock.x),
iDivUp(DATA_H, threadBlock.y));
CUDA_SAFE_CALL(hipDeviceSynchronize());
hipLaunchKernelGGL(( convolutionRowGPU), dim3(blockGrid), dim3(threadBlock), 0, 0,
d_Result,
DATA_W,
DATA_H
);
CUDA_SAFE_CALL(hipDeviceSynchronize());
CUDA_SAFE_CALL(
hipMemcpyToArray(a_Data, 0, 0, d_Result, DATA_SIZE,
hipMemcpyDeviceToDevice));
CUDA_SAFE_CALL(hipDeviceSynchronize());
hipLaunchKernelGGL(( convolutionColumnGPU), dim3(blockGrid), dim3(threadBlock), 0, 0,
d_Result,
DATA_W,
DATA_H
);
CUDA_SAFE_CALL(hipDeviceSynchronize());
CUDA_SAFE_CALL(
hipMemcpy(h_ResultGPU, d_Result, DATA_SIZE, hipMemcpyDeviceToHost));
convolutionRowCPU(h_DataB, h_DataA, h_Kernelx1, DATA_W, DATA_H,
KERNEL_RADIUS);
CUDA_SAFE_CALL(hipUnbindTexture(texData));
CUDA_SAFE_CALL(hipFree(d_Result));
CUDA_SAFE_CALL(hipFreeArray(a_Data));
free(h_DataB);
free(h_Kernelx1);
free(h_Kernelx2);
return h_ResultGPU;
}
| 783478287d555addef86f8db3c2e664b3018907b.cu | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <cutil.h>
#include "CudaAlgorythmBusiness.cuh"
////////////////////////////////////////////////////////////////////////////////
// Convolución de referencia en CPU filas y columnas
////////////////////////////////////////////////////////////////////////////////
void convolutionRowCPU(float *h_Result, float *h_DataA, float *h_Kernel,
int dataW, int dataH, int kernelR);
void convolutionColumnCPU(float *h_Result, float *h_DataA, float *h_Kernel,
int dataW, int dataH, int kernelR);
////////////////////////////////////////////////////////////////////////////////
// Convolución de referencia para filas en CPU
////////////////////////////////////////////////////////////////////////////////
void convolutionRowCPU(float *h_Result, float *h_Data, float *h_Kernel,
int dataW, int dataH, int kernelR) {
int x, y, k, d;
float sum;
for (y = 0; y < dataH; y++)
for (x = 0; x < dataW; x++) {
sum = 0;
for (k = -kernelR; k <= kernelR; k++) {
d = x + k;
if (d < 0)
d = 0;
if (d >= dataW)
d = dataW - 1;
sum += h_Data[y * dataW + d] * h_Kernel[kernelR - k];
}
h_Result[y * dataW + x] = sum;
}
}
////////////////////////////////////////////////////////////////////////////////
// Convolución de referencia para columnas en CPU
////////////////////////////////////////////////////////////////////////////////
void convolutionColumnCPU(float *h_Result, float *h_Data, float *h_Kernel,
int dataW, int dataH, int kernelR) {
int x, y, k, d;
float sum;
for (y = 0; y < dataH; y++)
for (x = 0; x < dataW; x++) {
sum = 0;
for (k = -kernelR; k <= kernelR; k++) {
d = y + k;
if (d < 0)
d = 0;
if (d >= dataH)
d = dataH - 1;
sum += h_Data[d * dataW + x] * h_Kernel[kernelR - k];
}
h_Result[y * dataW + x] = sum;
}
}
//Fast integer multiplication macro
#define IMUL(a, b) __mul24(a, b)
//Input data texture reference
texture<float, 2, cudaReadModeElementType> texData;
////////////////////////////////////////////////////////////////////////////////
// configuración del kernel
////////////////////////////////////////////////////////////////////////////////
#define KERNEL_RADIUS 1
#define KERNEL_W (2 * KERNEL_RADIUS + 1)
__device__ __constant__ float d_Kernel1[KERNEL_W];
__device__ __constant__ float d_Kernel2[KERNEL_W];
////////////////////////////////////////////////////////////////////////////////
// Row convolution filter
////////////////////////////////////////////////////////////////////////////////
__global__ void convolutionRowGPU(float *d_Result, int dataW, int dataH) {
const int ix = IMUL(blockDim.x, blockIdx.x) + threadIdx.x;
const int iy = IMUL(blockDim.y, blockIdx.y) + threadIdx.y;
const float x = (float) ix + 0.5f;
const float y = (float) iy + 0.5f;
if (ix < dataW && iy < dataH) {
float sum = 0;
for (int k = -KERNEL_RADIUS; k <= KERNEL_RADIUS; k++)
sum += tex2D(texData, x + k, y) * d_Kernel1[KERNEL_RADIUS - k];
d_Result[IMUL(iy, dataW) + ix] = sum;
}
}
////////////////////////////////////////////////////////////////////////////////
// Column convolution filter
////////////////////////////////////////////////////////////////////////////////
__global__ void convolutionColumnGPU(float *d_Result, int dataW, int dataH) {
const int ix = IMUL(blockDim.x, blockIdx.x) + threadIdx.x;
const int iy = IMUL(blockDim.y, blockIdx.y) + threadIdx.y;
const float x = (float) ix + 0.5f;
const float y = (float) iy + 0.5f;
if (ix < dataW && iy < dataH) {
float sum = 0;
for (int k = -KERNEL_RADIUS; k <= KERNEL_RADIUS; k++)
sum += tex2D(texData, x, y + k) * d_Kernel2[KERNEL_RADIUS - k];
d_Result[IMUL(iy, dataW) + ix] = sum;
}
}
////////////////////////////////////////////////////////////////////////////////
// Common host and device functions
////////////////////////////////////////////////////////////////////////////////
//Round a / b to nearest higher integer value
int iDivUp(int a, int b) {
return (a % b != 0) ? (a / b + 1) : (a / b);
}
//Align a to nearest higher multiple of b
int iAlignUp(int a, int b) {
return (a % b != 0) ? (a - a % b + b) : a;
}
////////////////////////////////////////////////////////////////////////////////
// GPU convolution
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
// Data configuration
////////////////////////////////////////////////////////////////////////////////
//Reduce problem size to have reasonable emulation time
int DATA_W;
int DATA_H;
int DATA_SIZE;
unsigned int width, height;
int KERNEL_SIZE = KERNEL_W * sizeof(float);
CudaAlgorythmBusiness::CudaAlgorythmBusiness() {
// TODO Auto-generated constructor stub
}
CudaAlgorythmBusiness::~CudaAlgorythmBusiness() {
// TODO Auto-generated destructor stub
}
float* CudaAlgorythmBusiness::GaussCUDA(float *imagen, int ancho, int alto,
float *h_kernel1, float *h_kernel2) {
float *h_DataA, *h_DataB, *h_ResultGPU, *h_Kernelx1,*h_Kernelx2;
cudaArray *a_Data;
cudaChannelFormatDesc floatTex = cudaCreateChannelDesc<float> ();
float *d_Result;
double L1norm, rCPU, rGPU, sum_delta, sum_ref;
int i, x, y;
DATA_W = ancho;
DATA_H = alto;
DATA_SIZE = DATA_W * DATA_H * sizeof(float);
h_DataA = (float *) malloc(DATA_SIZE);
h_DataB = (float *) malloc(DATA_SIZE);
h_ResultGPU = (float *) malloc(DATA_SIZE);
h_Kernelx1 = (float *) malloc(KERNEL_SIZE);
h_Kernelx2 = (float *) malloc(KERNEL_SIZE);
CUDA_SAFE_CALL(cudaMallocArray(&a_Data, &floatTex, DATA_W, DATA_H));
CUDA_SAFE_CALL(cudaMalloc((void **) &d_Result, DATA_SIZE));
h_Kernelx1 = h_kernel1;
h_Kernelx2 = h_kernel2;
h_DataA = imagen;
CUDA_SAFE_CALL(cudaMemcpyToSymbol(d_Kernel1, h_Kernelx1, KERNEL_SIZE));
CUDA_SAFE_CALL(cudaMemcpyToSymbol(d_Kernel2, h_Kernelx2, KERNEL_SIZE));
CUDA_SAFE_CALL(
cudaMemcpyToArray(a_Data, 0, 0, h_DataA, DATA_SIZE,
cudaMemcpyHostToDevice));
CUDA_SAFE_CALL(cudaBindTextureToArray(texData, a_Data));
//el ancho del bloque debe ser multiplo del máximo de memorria coalescente en escritura
//para escrituras coalescentes en convolutionRowGPU y convolutionColumnGP
dim3 threadBlock(16, 12);
dim3
blockGrid(iDivUp(DATA_W, threadBlock.x),
iDivUp(DATA_H, threadBlock.y));
CUDA_SAFE_CALL(cudaThreadSynchronize());
convolutionRowGPU<<<blockGrid, threadBlock>>>(
d_Result,
DATA_W,
DATA_H
);
CUDA_SAFE_CALL(cudaThreadSynchronize());
CUDA_SAFE_CALL(
cudaMemcpyToArray(a_Data, 0, 0, d_Result, DATA_SIZE,
cudaMemcpyDeviceToDevice));
CUDA_SAFE_CALL(cudaThreadSynchronize());
convolutionColumnGPU<<<blockGrid, threadBlock>>>(
d_Result,
DATA_W,
DATA_H
);
CUDA_SAFE_CALL(cudaThreadSynchronize());
CUDA_SAFE_CALL(
cudaMemcpy(h_ResultGPU, d_Result, DATA_SIZE, cudaMemcpyDeviceToHost));
convolutionRowCPU(h_DataB, h_DataA, h_Kernelx1, DATA_W, DATA_H,
KERNEL_RADIUS);
CUDA_SAFE_CALL(cudaUnbindTexture(texData));
CUDA_SAFE_CALL(cudaFree(d_Result));
CUDA_SAFE_CALL(cudaFreeArray(a_Data));
free(h_DataB);
free(h_Kernelx1);
free(h_Kernelx2);
return h_ResultGPU;
}
|
b88c2af49612a86ee3042b690e103de1c28fa635.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#pragma once
#include "gpu.h"
namespace fastgm {
namespace regularise_kernel {
__global__ void update_gradient(device_array<float> grad, device_array<float> w, device_scalar<T> result) {
for (int i = 0; i < w.size(); i++) {
result.value() += w[i] * w[i];
grad[i] += 2 * lambda * w[i];
}
result.value() *= lambda;
}
}
}
| b88c2af49612a86ee3042b690e103de1c28fa635.cu | #pragma once
#include "gpu.h"
namespace fastgm {
namespace regularise_kernel {
__global__ void update_gradient(device_array<float> grad, device_array<float> w, device_scalar<T> result) {
for (int i = 0; i < w.size(); i++) {
result.value() += w[i] * w[i];
grad[i] += 2 * lambda * w[i];
}
result.value() *= lambda;
}
}
}
|
70d89ced299b531f347e65c42847f7e86a6235e0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "caffe2/core/common_cudnn.h"
#include "caffe2/core/context_gpu.h"
#include "caffe2/core/cudnn_wrappers.h"
#include "caffe2/operators/conv_op.h"
#include "caffe2/operators/conv_op_cache_cudnn.h"
#include "caffe2/operators/conv_pool_op_base.h"
namespace caffe2 {
// Modified from TensorFlow,
// https://github.com/tensorflow/tensorflow/blob/4cb482dc3e0424c3d658ba373a6354dded6a32df/tensorflow/core/kernels/depthwise_conv_op_gpu.cu.cc
// A Cuda kernel to compute the depthwise convolution forward pass
// in NCHW format.
struct DepthwiseArgs {
// Input layer dimensions
int batch{0};
int in_rows{0};
int in_cols{0};
int in_depth{0};
int filter_rows{0};
int filter_cols{0};
int stride{0};
int pad_rows{0};
int pad_cols{0};
// Output layer dimensions
int out_rows{0};
int out_cols{0};
int out_depth{0};
};
template <typename T, int kKnownFilterWidth, int kKnownFilterHeight>
__global__ void DepthwiseConv2dGPUKernelNCHW(
const DepthwiseArgs args,
const T* input,
const T* filter,
T* output,
int num_outputs) {
const int in_rows = args.in_rows;
const int in_cols = args.in_cols;
const int in_depth = args.in_depth;
const int filter_rows = kKnownFilterHeight;
const int filter_cols = kKnownFilterWidth;
const int stride = args.stride;
const int pad_rows = args.pad_rows;
const int pad_cols = args.pad_cols;
const int out_rows = args.out_rows;
const int out_cols = args.out_cols;
const int out_depth = args.out_depth;
CUDA_1D_KERNEL_LOOP(thread_id, num_outputs) {
const int OW = thread_id % out_cols;
const int OH = (thread_id / out_cols) % out_rows;
const int OC = (thread_id / out_cols / out_rows) % out_depth;
const int OB = thread_id / out_cols / out_rows / out_depth;
const int in_d = OC;
const int input_offset_temp = (OB * in_depth + OC) * (in_rows * in_cols);
const int input_row_start = OH * stride - pad_rows;
const int input_col_start = OW * stride - pad_cols;
const int input_row_end = input_row_start + filter_rows;
const int input_col_end = input_col_start + filter_cols;
const float* filter_start = filter + in_d * filter_rows * filter_cols;
T sum = 0;
if (input_row_start >= 0 && input_col_start >= 0 &&
input_row_end < in_rows && input_col_end < in_cols) {
// Loop that doesn't need to check for boundary conditions.
#pragma unroll
for (int f_r = 0; f_r < filter_rows; ++f_r) {
const int in_r = input_row_start + f_r;
const float* filter_offset = filter_start + filter_cols * f_r;
#pragma unroll
for (int f_c = 0; f_c < filter_cols; ++f_c) {
const int in_c = input_col_start + f_c;
const int input_offset =
(input_offset_temp) + (in_r * in_cols) + in_c;
#if __CUDA_ARCH__ >= 350
sum += __ldg(input + input_offset) * __ldg(filter_offset + f_c);
#else
sum += input[input_offset] * filter_offset[f_c];
#endif
}
}
} else {
// Loop that needs to check for boundary conditions.
#pragma unroll
for (int f_r = 0; f_r < filter_rows; ++f_r) {
const int in_r = input_row_start + f_r;
const float* filter_offset = filter_start + filter_cols * f_r;
#pragma unroll
for (int f_c = 0; f_c < filter_cols; ++f_c) {
const int in_c = input_col_start + f_c;
if (in_r >= 0 && in_r < in_rows && in_c >= 0 && in_c < in_cols) {
const int in_c = input_col_start + f_c;
const int input_offset =
(input_offset_temp) + (in_r * in_cols) + in_c;
#if __CUDA_ARCH__ >= 350
sum += __ldg(input + input_offset) * __ldg(filter_offset + f_c);
#else
sum += input[input_offset] * filter_offset[f_c];
#endif
}
}
}
}
output[thread_id] = sum;
}
}
// A Cuda kernel to compute the depthwise convolution backprop w.r.t. filter.
template <typename T, int kKnownFilterWidth, int kKnownFilterHeight>
__global__ void DepthwiseConv2dBackpropFilterGPUKernelNCHW(
const DepthwiseArgs args,
const T* out_backprop,
const T* input,
T* filter_backprop,
int num_out_backprop) {
const int in_rows = args.in_rows;
const int in_cols = args.in_cols;
const int in_depth = args.in_depth;
const int filter_rows = kKnownFilterHeight;
const int filter_cols = kKnownFilterWidth;
const int stride = args.stride;
const int pad_rows = args.pad_rows;
const int pad_cols = args.pad_cols;
const int out_rows = args.out_rows;
const int out_cols = args.out_cols;
const int out_depth = args.out_depth;
CUDA_1D_KERNEL_LOOP(thread_id, num_out_backprop) {
// Compute the indexes of this thread in the output.
const int OW = thread_id % out_cols;
const int OH = (thread_id / out_cols) % out_rows;
const int OC = (thread_id / out_cols / out_rows) % out_depth;
const int OB = thread_id / out_cols / out_rows / out_depth;
// Compute the input depth and the index of depth multiplier.
const int in_d = OC;
// Decide if all input is valid, if yes, we can skip the boundary checks
// for each input.
const int in_r_start = OH * stride - pad_rows;
const int in_c_start = OW * stride - pad_cols;
const int in_r_end = in_r_start + filter_rows;
const int in_c_end = in_c_start + filter_cols;
const int out_backprop_offset = (OB * out_depth * out_rows * out_cols) +
(OC * out_rows * out_cols) + (OH * out_cols) + (OW);
#if __CUDA_ARCH__ >= 350
const T out_bp = __ldg(out_backprop + out_backprop_offset);
#else
const T out_bp = out_backprop[out_backprop_offset];
#endif
if (in_r_start >= 0 && in_c_start >= 0 && in_r_end < in_rows &&
in_c_end < in_cols) {
#pragma unroll
for (int f_r = 0; f_r < filter_rows; ++f_r) {
const int in_r = in_r_start + f_r;
// Avoid repeated computation.
const int input_offset_temp = (OB * in_depth * in_rows * in_cols) +
(OC * in_rows * in_cols) + (in_r * in_cols);
#pragma unroll
for (int f_c = 0; f_c < filter_cols; ++f_c) {
const int in_c = in_c_start + f_c;
const int input_offset = input_offset_temp + in_c;
#if __CUDA_ARCH__ >= 350
T partial_sum = __ldg(input + input_offset) * out_bp;
#else
T partial_sum = input[input_offset] * out_bp;
#endif
T* addr = filter_backprop + (in_d * filter_rows * filter_cols) +
(f_c + filter_cols * f_r);
atomicAdd(addr, partial_sum);
}
}
} else {
#pragma unroll
for (int f_r = 0; f_r < filter_rows; ++f_r) {
const int in_r = in_r_start + f_r;
// Avoid repeated computation.
const int input_offset_temp = (OB * in_depth * in_rows * in_cols) +
(OC * in_rows * in_cols) + (in_r * in_cols);
#pragma unroll
for (int f_c = 0; f_c < filter_cols; ++f_c) {
const int in_c = in_c_start + f_c;
if (in_r >= 0 && in_r < in_rows && in_c >= 0 && in_c < in_cols) {
const int input_offset = input_offset_temp + in_c;
#if __CUDA_ARCH__ >= 350
T partial_sum = __ldg(input + input_offset) * out_bp;
#else
T partial_sum = input[input_offset] * out_bp;
#endif
T* addr = filter_backprop + (in_d * filter_rows * filter_cols) +
(f_c + filter_cols * f_r);
atomicAdd(addr, partial_sum);
}
}
}
}
}
}
template <typename T, int kKnownFilterWidth, int kKnownFilterHeight>
__global__ void DepthwiseConv2dBackpropInputGPUKernelNCHW(
const DepthwiseArgs args,
const T* out_backprop,
const T* filter,
T* in_backprop,
int num_in_backprop) {
const int in_rows = args.in_rows;
const int in_cols = args.in_cols;
const int in_depth = args.in_depth;
const int filter_rows = kKnownFilterHeight;
const int filter_cols = kKnownFilterWidth;
const int stride = args.stride;
const int pad_rows = args.pad_rows;
const int pad_cols = args.pad_cols;
const int out_rows = args.out_rows;
const int out_cols = args.out_cols;
const int out_depth = args.out_depth;
// TODO(vrv): Consider assigning threads to output and using
// atomics for accumulation, similar to the filter case.
CUDA_1D_KERNEL_LOOP(thread_id, num_in_backprop) {
const int IW = thread_id % in_cols;
const int IH = (thread_id / in_cols) % in_rows;
const int IC = (thread_id / in_cols / in_rows) % in_depth;
const int IB = thread_id / in_cols / in_rows / in_depth;
T sum = 0;
const int out_r_start =
max(0, (IH - filter_rows + pad_rows + stride) / stride);
const int out_r_end = min(out_rows - 1, (IH + pad_rows) / stride);
const int out_c_start =
max(0, (IW - filter_cols + pad_cols + stride) / stride);
const int out_c_end = min(out_cols - 1, (IW + pad_cols) / stride);
#pragma unroll
for (int out_r = out_r_start; out_r <= out_r_end; ++out_r) {
const int f_r = IH + pad_rows - out_r * stride;
for (int out_c = out_c_start; out_c <= out_c_end; ++out_c) {
const int f_c = IW + pad_cols - out_c * stride;
const int filter_offset =
IC * filter_rows * filter_cols + f_r * filter_cols + f_c;
const int out_backprop_offset = (IB * out_depth * out_rows * out_cols) +
(IC * out_rows * out_cols) + (out_r * out_cols) + (out_c);
#if __CUDA_ARCH__ >= 350
sum += __ldg(out_backprop + out_backprop_offset) *
__ldg(filter + filter_offset);
#else
sum += out_backprop[out_backprop_offset] * filter[filter_offset];
#endif
}
}
const int in_backprop_offset = (IB * in_rows * in_cols * in_depth) +
(IC * in_rows * in_cols) + (IH * in_cols) + (IW);
in_backprop[in_backprop_offset] = sum;
}
}
class Depthwise3x3ConvOp final : public ConvPoolOpBase<CUDAContext> {
public:
USE_CONV_POOL_BASE_FUNCTIONS(CUDAContext);
Depthwise3x3ConvOp(const OperatorDef& operator_def, Workspace* ws)
: ConvPoolOpBase<CUDAContext>(operator_def, ws),
cudnn_wrapper_(&context_) {
OPERATOR_NEEDS_FEATURE(
this->order_ == StorageOrder::NCHW,
"Depthwise3x3ConvOp only supports NCHW order");
CUDNN_ENFORCE(cudnnCreateTensorDescriptor(&bias_desc_));
CUDNN_ENFORCE(cudnnCreateTensorDescriptor(&top_desc_for_bias_));
}
~Depthwise3x3ConvOp() {
CUDNN_ENFORCE(cudnnDestroyTensorDescriptor(bias_desc_));
CUDNN_ENFORCE(cudnnDestroyTensorDescriptor(top_desc_for_bias_));
}
bool RunOnDeviceWithOrderNCHW() override {
const Tensor& X = Input(0);
auto& filter = Input(1);
const int N = X.dim32(0), C = X.dim32(1);
CAFFE_ENFORCE_EQ(X.dim(), filter.dim());
const int M = filter.dim32(0);
CAFFE_ENFORCE_EQ(M, X.dim32(1));
CAFFE_ENFORCE_EQ(C, X.dim32(1));
CAFFE_ENFORCE_EQ(C, this->group_);
CAFFE_ENFORCE_GT(this->group_, 1);
CAFFE_ENFORCE_EQ(this->kernel_w(), 3);
CAFFE_ENFORCE_EQ(this->kernel_h(), 3);
CAFFE_ENFORCE_EQ(this->stride_h(), this->stride_w());
auto sizes = ConvPoolOpBase<CUDAContext>::GetOutputSize(X, filter.dim32(0));
Tensor* Y = Output(0, sizes, at::dtype<float>());
DepthwiseArgs args;
args.batch = X.dim32(0);
args.in_rows = X.dim32(2);
args.in_cols = X.dim32(3);
args.in_depth = X.dim32(1);
args.filter_cols = 3;
args.filter_rows = 3;
args.stride = this->stride_w();
args.pad_rows = this->pad_t();
args.pad_cols = this->pad_l();
args.out_rows = Y->dim32(2);
args.out_cols = Y->dim32(3);
args.out_depth = Y->dim32(1);
hipLaunchKernelGGL(( DepthwiseConv2dGPUKernelNCHW<float, 3, 3>)
, dim3(CAFFE_GET_BLOCKS(Y->size())),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
args,
X.data<float>(),
filter.data<float>(),
Y->mutable_data<float>(),
Y->size());
C10_HIP_KERNEL_LAUNCH_CHECK();
if (InputSize() == 3) {
CUDNN_ENFORCE(cudnnSetTensor4dDescriptor(
bias_desc_,
GetCudnnTensorFormat(order_),
cudnnTypeWrapper<float>::type,
1,
M,
1,
1));
CUDNN_ENFORCE(cudnnSetTensor4dDescriptor(
top_desc_for_bias_,
GetCudnnTensorFormat(order_),
cudnnTypeWrapper<float>::type,
Y->dim32(0),
M,
Y->dim32(2),
Y->dim32(3)));
auto& bias = Input(2);
CAFFE_ENFORCE_EQ(bias.dim(), 1);
CAFFE_ENFORCE_EQ(bias.dim32(0), M);
CUDNN_ENFORCE(cudnnAddTensor(
cudnn_wrapper_.inline_cudnn_handle(),
cudnnTypeWrapper<float>::kOne(),
bias_desc_,
bias.data<float>(),
cudnnTypeWrapper<float>::kOne(),
top_desc_for_bias_,
Y->mutable_data<float>()));
}
return true;
}
private:
CuDNNWrapper cudnn_wrapper_;
cudnnTensorDescriptor_t bias_desc_;
cudnnTensorDescriptor_t top_desc_for_bias_;
};
class Depthwise3x3ConvGradientOp final : public ConvPoolOpBase<CUDAContext> {
public:
USE_CONV_POOL_BASE_FUNCTIONS(CUDAContext);
Depthwise3x3ConvGradientOp(const OperatorDef& operator_def, Workspace* ws)
: ConvPoolOpBase<CUDAContext>(operator_def, ws),
cudnn_wrapper_(&context_),
no_bias_(OperatorBase::GetSingleArgument<int>("no_bias", 0)) {
CAFFE_ENFORCE(
!(no_bias_ && OutputSize() == 3),
"If bias is not present, you should not have 3 grad output.");
OPERATOR_NEEDS_FEATURE(
this->order_ == StorageOrder::NCHW,
"Depthwise3x3ConvGradientOp only supports NCHW order");
CUDNN_ENFORCE(cudnnCreateTensorDescriptor(&bias_desc_));
CUDNN_ENFORCE(cudnnCreateTensorDescriptor(&top_desc_for_bias_));
}
~Depthwise3x3ConvGradientOp() {
CUDNN_ENFORCE(cudnnDestroyTensorDescriptor(bias_desc_));
CUDNN_ENFORCE(cudnnDestroyTensorDescriptor(top_desc_for_bias_));
}
bool RunOnDeviceWithOrderNCHW() override {
auto& X = Input(INPUT);
auto& filter = Input(FILTER);
auto& dY = Input(OUTPUT_GRAD);
const int N = X.dim32(0), C = X.dim32(1);
const vector<int> input_dims = this->GetDims(X);
ConvPoolOpBase<CUDAContext>::ComputePads(input_dims);
CAFFE_ENFORCE_EQ(X.dim(), filter.dim());
const int M = filter.dim32(0);
CAFFE_ENFORCE(filter.dim32(1) * group_ == C);
CAFFE_ENFORCE(M % group_ == 0);
auto* dfilter = Output(FILTER_GRAD, filter.sizes(), at::dtype<float>());
DepthwiseArgs args;
args.batch = X.dim32(0);
args.in_rows = X.dim32(2);
args.in_cols = X.dim32(3);
args.in_depth = X.dim32(1);
args.filter_cols = 3;
args.filter_rows = 3;
args.stride = this->stride_w();
args.pad_rows = this->pad_t();
args.pad_cols = this->pad_l();
args.out_rows = dY.dim32(2);
args.out_cols = dY.dim32(3);
args.out_depth = dY.dim32(1);
CAFFE_ENFORCE(OutputSize() == 3 || (no_bias_ && (OutputSize() == 2)));
auto* dX = Output(
no_bias_ ? BIAS_OR_INPUT_GRAD : INPUT_GRAD,
X.sizes(),
at::dtype<float>());
math::Set<float, CUDAContext>(
dfilter->size(), 0, dfilter->mutable_data<float>(), &context_);
hipLaunchKernelGGL(( DepthwiseConv2dBackpropFilterGPUKernelNCHW<float, 3, 3>)
, dim3(CAFFE_GET_BLOCKS(dY.size())),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
args,
dY.data<float>(),
X.data<float>(),
dfilter->mutable_data<float>(),
dY.size());
C10_HIP_KERNEL_LAUNCH_CHECK();
hipLaunchKernelGGL(( DepthwiseConv2dBackpropInputGPUKernelNCHW<float, 3, 3>)
, dim3(CAFFE_GET_BLOCKS(dX->size())),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
args,
dY.data<float>(),
filter.data<float>(),
dX->mutable_data<float>(),
dX->size());
C10_HIP_KERNEL_LAUNCH_CHECK();
if (!no_bias_) {
CUDNN_ENFORCE(cudnnSetTensor4dDescriptor(
bias_desc_,
GetCudnnTensorFormat(order_),
cudnnTypeWrapper<float>::type,
1,
M,
1,
1));
CUDNN_ENFORCE(cudnnSetTensor4dDescriptor(
top_desc_for_bias_,
GetCudnnTensorFormat(order_),
cudnnTypeWrapper<float>::type,
dY.dim32(0),
M,
dY.dim32(2),
dY.dim32(3)));
auto* dbias = Output(BIAS_OR_INPUT_GRAD, {M}, at::dtype<float>());
CUDNN_ENFORCE(cudnnConvolutionBackwardBias(
cudnn_wrapper_.inline_cudnn_handle(),
cudnnTypeWrapper<float>::kOne(),
top_desc_for_bias_,
dY.data<float>(),
cudnnTypeWrapper<float>::kZero(),
bias_desc_,
dbias->mutable_data<float>()));
}
return true;
}
private:
CuDNNWrapper cudnn_wrapper_;
cudnnTensorDescriptor_t bias_desc_;
cudnnTensorDescriptor_t top_desc_for_bias_;
bool no_bias_;
INPUT_TAGS(INPUT, FILTER, OUTPUT_GRAD);
OUTPUT_TAGS(FILTER_GRAD, BIAS_OR_INPUT_GRAD, INPUT_GRAD);
};
REGISTER_CUDA_OPERATOR_WITH_ENGINE(Conv, DEPTHWISE_3x3, Depthwise3x3ConvOp);
REGISTER_CUDA_OPERATOR_WITH_ENGINE(
ConvGradient,
DEPTHWISE_3x3,
Depthwise3x3ConvGradientOp);
} // namespace caffe2
| 70d89ced299b531f347e65c42847f7e86a6235e0.cu | #include "caffe2/core/common_cudnn.h"
#include "caffe2/core/context_gpu.h"
#include "caffe2/core/cudnn_wrappers.h"
#include "caffe2/operators/conv_op.h"
#include "caffe2/operators/conv_op_cache_cudnn.h"
#include "caffe2/operators/conv_pool_op_base.h"
namespace caffe2 {
// Modified from TensorFlow,
// https://github.com/tensorflow/tensorflow/blob/4cb482dc3e0424c3d658ba373a6354dded6a32df/tensorflow/core/kernels/depthwise_conv_op_gpu.cu.cc
// A Cuda kernel to compute the depthwise convolution forward pass
// in NCHW format.
struct DepthwiseArgs {
// Input layer dimensions
int batch{0};
int in_rows{0};
int in_cols{0};
int in_depth{0};
int filter_rows{0};
int filter_cols{0};
int stride{0};
int pad_rows{0};
int pad_cols{0};
// Output layer dimensions
int out_rows{0};
int out_cols{0};
int out_depth{0};
};
template <typename T, int kKnownFilterWidth, int kKnownFilterHeight>
__global__ void DepthwiseConv2dGPUKernelNCHW(
const DepthwiseArgs args,
const T* input,
const T* filter,
T* output,
int num_outputs) {
const int in_rows = args.in_rows;
const int in_cols = args.in_cols;
const int in_depth = args.in_depth;
const int filter_rows = kKnownFilterHeight;
const int filter_cols = kKnownFilterWidth;
const int stride = args.stride;
const int pad_rows = args.pad_rows;
const int pad_cols = args.pad_cols;
const int out_rows = args.out_rows;
const int out_cols = args.out_cols;
const int out_depth = args.out_depth;
CUDA_1D_KERNEL_LOOP(thread_id, num_outputs) {
const int OW = thread_id % out_cols;
const int OH = (thread_id / out_cols) % out_rows;
const int OC = (thread_id / out_cols / out_rows) % out_depth;
const int OB = thread_id / out_cols / out_rows / out_depth;
const int in_d = OC;
const int input_offset_temp = (OB * in_depth + OC) * (in_rows * in_cols);
const int input_row_start = OH * stride - pad_rows;
const int input_col_start = OW * stride - pad_cols;
const int input_row_end = input_row_start + filter_rows;
const int input_col_end = input_col_start + filter_cols;
const float* filter_start = filter + in_d * filter_rows * filter_cols;
T sum = 0;
if (input_row_start >= 0 && input_col_start >= 0 &&
input_row_end < in_rows && input_col_end < in_cols) {
// Loop that doesn't need to check for boundary conditions.
#pragma unroll
for (int f_r = 0; f_r < filter_rows; ++f_r) {
const int in_r = input_row_start + f_r;
const float* filter_offset = filter_start + filter_cols * f_r;
#pragma unroll
for (int f_c = 0; f_c < filter_cols; ++f_c) {
const int in_c = input_col_start + f_c;
const int input_offset =
(input_offset_temp) + (in_r * in_cols) + in_c;
#if __CUDA_ARCH__ >= 350
sum += __ldg(input + input_offset) * __ldg(filter_offset + f_c);
#else
sum += input[input_offset] * filter_offset[f_c];
#endif
}
}
} else {
// Loop that needs to check for boundary conditions.
#pragma unroll
for (int f_r = 0; f_r < filter_rows; ++f_r) {
const int in_r = input_row_start + f_r;
const float* filter_offset = filter_start + filter_cols * f_r;
#pragma unroll
for (int f_c = 0; f_c < filter_cols; ++f_c) {
const int in_c = input_col_start + f_c;
if (in_r >= 0 && in_r < in_rows && in_c >= 0 && in_c < in_cols) {
const int in_c = input_col_start + f_c;
const int input_offset =
(input_offset_temp) + (in_r * in_cols) + in_c;
#if __CUDA_ARCH__ >= 350
sum += __ldg(input + input_offset) * __ldg(filter_offset + f_c);
#else
sum += input[input_offset] * filter_offset[f_c];
#endif
}
}
}
}
output[thread_id] = sum;
}
}
// A Cuda kernel to compute the depthwise convolution backprop w.r.t. filter.
template <typename T, int kKnownFilterWidth, int kKnownFilterHeight>
__global__ void DepthwiseConv2dBackpropFilterGPUKernelNCHW(
const DepthwiseArgs args,
const T* out_backprop,
const T* input,
T* filter_backprop,
int num_out_backprop) {
const int in_rows = args.in_rows;
const int in_cols = args.in_cols;
const int in_depth = args.in_depth;
const int filter_rows = kKnownFilterHeight;
const int filter_cols = kKnownFilterWidth;
const int stride = args.stride;
const int pad_rows = args.pad_rows;
const int pad_cols = args.pad_cols;
const int out_rows = args.out_rows;
const int out_cols = args.out_cols;
const int out_depth = args.out_depth;
CUDA_1D_KERNEL_LOOP(thread_id, num_out_backprop) {
// Compute the indexes of this thread in the output.
const int OW = thread_id % out_cols;
const int OH = (thread_id / out_cols) % out_rows;
const int OC = (thread_id / out_cols / out_rows) % out_depth;
const int OB = thread_id / out_cols / out_rows / out_depth;
// Compute the input depth and the index of depth multiplier.
const int in_d = OC;
// Decide if all input is valid, if yes, we can skip the boundary checks
// for each input.
const int in_r_start = OH * stride - pad_rows;
const int in_c_start = OW * stride - pad_cols;
const int in_r_end = in_r_start + filter_rows;
const int in_c_end = in_c_start + filter_cols;
const int out_backprop_offset = (OB * out_depth * out_rows * out_cols) +
(OC * out_rows * out_cols) + (OH * out_cols) + (OW);
#if __CUDA_ARCH__ >= 350
const T out_bp = __ldg(out_backprop + out_backprop_offset);
#else
const T out_bp = out_backprop[out_backprop_offset];
#endif
if (in_r_start >= 0 && in_c_start >= 0 && in_r_end < in_rows &&
in_c_end < in_cols) {
#pragma unroll
for (int f_r = 0; f_r < filter_rows; ++f_r) {
const int in_r = in_r_start + f_r;
// Avoid repeated computation.
const int input_offset_temp = (OB * in_depth * in_rows * in_cols) +
(OC * in_rows * in_cols) + (in_r * in_cols);
#pragma unroll
for (int f_c = 0; f_c < filter_cols; ++f_c) {
const int in_c = in_c_start + f_c;
const int input_offset = input_offset_temp + in_c;
#if __CUDA_ARCH__ >= 350
T partial_sum = __ldg(input + input_offset) * out_bp;
#else
T partial_sum = input[input_offset] * out_bp;
#endif
T* addr = filter_backprop + (in_d * filter_rows * filter_cols) +
(f_c + filter_cols * f_r);
atomicAdd(addr, partial_sum);
}
}
} else {
#pragma unroll
for (int f_r = 0; f_r < filter_rows; ++f_r) {
const int in_r = in_r_start + f_r;
// Avoid repeated computation.
const int input_offset_temp = (OB * in_depth * in_rows * in_cols) +
(OC * in_rows * in_cols) + (in_r * in_cols);
#pragma unroll
for (int f_c = 0; f_c < filter_cols; ++f_c) {
const int in_c = in_c_start + f_c;
if (in_r >= 0 && in_r < in_rows && in_c >= 0 && in_c < in_cols) {
const int input_offset = input_offset_temp + in_c;
#if __CUDA_ARCH__ >= 350
T partial_sum = __ldg(input + input_offset) * out_bp;
#else
T partial_sum = input[input_offset] * out_bp;
#endif
T* addr = filter_backprop + (in_d * filter_rows * filter_cols) +
(f_c + filter_cols * f_r);
atomicAdd(addr, partial_sum);
}
}
}
}
}
}
template <typename T, int kKnownFilterWidth, int kKnownFilterHeight>
__global__ void DepthwiseConv2dBackpropInputGPUKernelNCHW(
const DepthwiseArgs args,
const T* out_backprop,
const T* filter,
T* in_backprop,
int num_in_backprop) {
const int in_rows = args.in_rows;
const int in_cols = args.in_cols;
const int in_depth = args.in_depth;
const int filter_rows = kKnownFilterHeight;
const int filter_cols = kKnownFilterWidth;
const int stride = args.stride;
const int pad_rows = args.pad_rows;
const int pad_cols = args.pad_cols;
const int out_rows = args.out_rows;
const int out_cols = args.out_cols;
const int out_depth = args.out_depth;
// TODO(vrv): Consider assigning threads to output and using
// atomics for accumulation, similar to the filter case.
CUDA_1D_KERNEL_LOOP(thread_id, num_in_backprop) {
const int IW = thread_id % in_cols;
const int IH = (thread_id / in_cols) % in_rows;
const int IC = (thread_id / in_cols / in_rows) % in_depth;
const int IB = thread_id / in_cols / in_rows / in_depth;
T sum = 0;
const int out_r_start =
max(0, (IH - filter_rows + pad_rows + stride) / stride);
const int out_r_end = min(out_rows - 1, (IH + pad_rows) / stride);
const int out_c_start =
max(0, (IW - filter_cols + pad_cols + stride) / stride);
const int out_c_end = min(out_cols - 1, (IW + pad_cols) / stride);
#pragma unroll
for (int out_r = out_r_start; out_r <= out_r_end; ++out_r) {
const int f_r = IH + pad_rows - out_r * stride;
for (int out_c = out_c_start; out_c <= out_c_end; ++out_c) {
const int f_c = IW + pad_cols - out_c * stride;
const int filter_offset =
IC * filter_rows * filter_cols + f_r * filter_cols + f_c;
const int out_backprop_offset = (IB * out_depth * out_rows * out_cols) +
(IC * out_rows * out_cols) + (out_r * out_cols) + (out_c);
#if __CUDA_ARCH__ >= 350
sum += __ldg(out_backprop + out_backprop_offset) *
__ldg(filter + filter_offset);
#else
sum += out_backprop[out_backprop_offset] * filter[filter_offset];
#endif
}
}
const int in_backprop_offset = (IB * in_rows * in_cols * in_depth) +
(IC * in_rows * in_cols) + (IH * in_cols) + (IW);
in_backprop[in_backprop_offset] = sum;
}
}
class Depthwise3x3ConvOp final : public ConvPoolOpBase<CUDAContext> {
public:
USE_CONV_POOL_BASE_FUNCTIONS(CUDAContext);
Depthwise3x3ConvOp(const OperatorDef& operator_def, Workspace* ws)
: ConvPoolOpBase<CUDAContext>(operator_def, ws),
cudnn_wrapper_(&context_) {
OPERATOR_NEEDS_FEATURE(
this->order_ == StorageOrder::NCHW,
"Depthwise3x3ConvOp only supports NCHW order");
CUDNN_ENFORCE(cudnnCreateTensorDescriptor(&bias_desc_));
CUDNN_ENFORCE(cudnnCreateTensorDescriptor(&top_desc_for_bias_));
}
~Depthwise3x3ConvOp() {
CUDNN_ENFORCE(cudnnDestroyTensorDescriptor(bias_desc_));
CUDNN_ENFORCE(cudnnDestroyTensorDescriptor(top_desc_for_bias_));
}
bool RunOnDeviceWithOrderNCHW() override {
const Tensor& X = Input(0);
auto& filter = Input(1);
const int N = X.dim32(0), C = X.dim32(1);
CAFFE_ENFORCE_EQ(X.dim(), filter.dim());
const int M = filter.dim32(0);
CAFFE_ENFORCE_EQ(M, X.dim32(1));
CAFFE_ENFORCE_EQ(C, X.dim32(1));
CAFFE_ENFORCE_EQ(C, this->group_);
CAFFE_ENFORCE_GT(this->group_, 1);
CAFFE_ENFORCE_EQ(this->kernel_w(), 3);
CAFFE_ENFORCE_EQ(this->kernel_h(), 3);
CAFFE_ENFORCE_EQ(this->stride_h(), this->stride_w());
auto sizes = ConvPoolOpBase<CUDAContext>::GetOutputSize(X, filter.dim32(0));
Tensor* Y = Output(0, sizes, at::dtype<float>());
DepthwiseArgs args;
args.batch = X.dim32(0);
args.in_rows = X.dim32(2);
args.in_cols = X.dim32(3);
args.in_depth = X.dim32(1);
args.filter_cols = 3;
args.filter_rows = 3;
args.stride = this->stride_w();
args.pad_rows = this->pad_t();
args.pad_cols = this->pad_l();
args.out_rows = Y->dim32(2);
args.out_cols = Y->dim32(3);
args.out_depth = Y->dim32(1);
DepthwiseConv2dGPUKernelNCHW<float, 3, 3>
<<<CAFFE_GET_BLOCKS(Y->size()),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
args,
X.data<float>(),
filter.data<float>(),
Y->mutable_data<float>(),
Y->size());
C10_CUDA_KERNEL_LAUNCH_CHECK();
if (InputSize() == 3) {
CUDNN_ENFORCE(cudnnSetTensor4dDescriptor(
bias_desc_,
GetCudnnTensorFormat(order_),
cudnnTypeWrapper<float>::type,
1,
M,
1,
1));
CUDNN_ENFORCE(cudnnSetTensor4dDescriptor(
top_desc_for_bias_,
GetCudnnTensorFormat(order_),
cudnnTypeWrapper<float>::type,
Y->dim32(0),
M,
Y->dim32(2),
Y->dim32(3)));
auto& bias = Input(2);
CAFFE_ENFORCE_EQ(bias.dim(), 1);
CAFFE_ENFORCE_EQ(bias.dim32(0), M);
CUDNN_ENFORCE(cudnnAddTensor(
cudnn_wrapper_.inline_cudnn_handle(),
cudnnTypeWrapper<float>::kOne(),
bias_desc_,
bias.data<float>(),
cudnnTypeWrapper<float>::kOne(),
top_desc_for_bias_,
Y->mutable_data<float>()));
}
return true;
}
private:
CuDNNWrapper cudnn_wrapper_;
cudnnTensorDescriptor_t bias_desc_;
cudnnTensorDescriptor_t top_desc_for_bias_;
};
class Depthwise3x3ConvGradientOp final : public ConvPoolOpBase<CUDAContext> {
public:
USE_CONV_POOL_BASE_FUNCTIONS(CUDAContext);
Depthwise3x3ConvGradientOp(const OperatorDef& operator_def, Workspace* ws)
: ConvPoolOpBase<CUDAContext>(operator_def, ws),
cudnn_wrapper_(&context_),
no_bias_(OperatorBase::GetSingleArgument<int>("no_bias", 0)) {
CAFFE_ENFORCE(
!(no_bias_ && OutputSize() == 3),
"If bias is not present, you should not have 3 grad output.");
OPERATOR_NEEDS_FEATURE(
this->order_ == StorageOrder::NCHW,
"Depthwise3x3ConvGradientOp only supports NCHW order");
CUDNN_ENFORCE(cudnnCreateTensorDescriptor(&bias_desc_));
CUDNN_ENFORCE(cudnnCreateTensorDescriptor(&top_desc_for_bias_));
}
~Depthwise3x3ConvGradientOp() {
CUDNN_ENFORCE(cudnnDestroyTensorDescriptor(bias_desc_));
CUDNN_ENFORCE(cudnnDestroyTensorDescriptor(top_desc_for_bias_));
}
bool RunOnDeviceWithOrderNCHW() override {
auto& X = Input(INPUT);
auto& filter = Input(FILTER);
auto& dY = Input(OUTPUT_GRAD);
const int N = X.dim32(0), C = X.dim32(1);
const vector<int> input_dims = this->GetDims(X);
ConvPoolOpBase<CUDAContext>::ComputePads(input_dims);
CAFFE_ENFORCE_EQ(X.dim(), filter.dim());
const int M = filter.dim32(0);
CAFFE_ENFORCE(filter.dim32(1) * group_ == C);
CAFFE_ENFORCE(M % group_ == 0);
auto* dfilter = Output(FILTER_GRAD, filter.sizes(), at::dtype<float>());
DepthwiseArgs args;
args.batch = X.dim32(0);
args.in_rows = X.dim32(2);
args.in_cols = X.dim32(3);
args.in_depth = X.dim32(1);
args.filter_cols = 3;
args.filter_rows = 3;
args.stride = this->stride_w();
args.pad_rows = this->pad_t();
args.pad_cols = this->pad_l();
args.out_rows = dY.dim32(2);
args.out_cols = dY.dim32(3);
args.out_depth = dY.dim32(1);
CAFFE_ENFORCE(OutputSize() == 3 || (no_bias_ && (OutputSize() == 2)));
auto* dX = Output(
no_bias_ ? BIAS_OR_INPUT_GRAD : INPUT_GRAD,
X.sizes(),
at::dtype<float>());
math::Set<float, CUDAContext>(
dfilter->size(), 0, dfilter->mutable_data<float>(), &context_);
DepthwiseConv2dBackpropFilterGPUKernelNCHW<float, 3, 3>
<<<CAFFE_GET_BLOCKS(dY.size()),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
args,
dY.data<float>(),
X.data<float>(),
dfilter->mutable_data<float>(),
dY.size());
C10_CUDA_KERNEL_LAUNCH_CHECK();
DepthwiseConv2dBackpropInputGPUKernelNCHW<float, 3, 3>
<<<CAFFE_GET_BLOCKS(dX->size()),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
args,
dY.data<float>(),
filter.data<float>(),
dX->mutable_data<float>(),
dX->size());
C10_CUDA_KERNEL_LAUNCH_CHECK();
if (!no_bias_) {
CUDNN_ENFORCE(cudnnSetTensor4dDescriptor(
bias_desc_,
GetCudnnTensorFormat(order_),
cudnnTypeWrapper<float>::type,
1,
M,
1,
1));
CUDNN_ENFORCE(cudnnSetTensor4dDescriptor(
top_desc_for_bias_,
GetCudnnTensorFormat(order_),
cudnnTypeWrapper<float>::type,
dY.dim32(0),
M,
dY.dim32(2),
dY.dim32(3)));
auto* dbias = Output(BIAS_OR_INPUT_GRAD, {M}, at::dtype<float>());
CUDNN_ENFORCE(cudnnConvolutionBackwardBias(
cudnn_wrapper_.inline_cudnn_handle(),
cudnnTypeWrapper<float>::kOne(),
top_desc_for_bias_,
dY.data<float>(),
cudnnTypeWrapper<float>::kZero(),
bias_desc_,
dbias->mutable_data<float>()));
}
return true;
}
private:
CuDNNWrapper cudnn_wrapper_;
cudnnTensorDescriptor_t bias_desc_;
cudnnTensorDescriptor_t top_desc_for_bias_;
bool no_bias_;
INPUT_TAGS(INPUT, FILTER, OUTPUT_GRAD);
OUTPUT_TAGS(FILTER_GRAD, BIAS_OR_INPUT_GRAD, INPUT_GRAD);
};
REGISTER_CUDA_OPERATOR_WITH_ENGINE(Conv, DEPTHWISE_3x3, Depthwise3x3ConvOp);
REGISTER_CUDA_OPERATOR_WITH_ENGINE(
ConvGradient,
DEPTHWISE_3x3,
Depthwise3x3ConvGradientOp);
} // namespace caffe2
|
1b19dbb269868db200f3f363c31cc0db17e98bd8.hip | // !!! This is a file automatically generated by hipify!!!
//jacobi7.cu
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <math.h>
#include <jacobi7_cuda_shared_only.h>
#include <jacobi7.h>
// Convenience function for checking CUDA runtime API results
// can be wrapped around any runtime API call. No-op in release builds.
inline
hipError_t checkCuda(hipError_t result)
{
#if defined(DEBUG) || defined(_DEBUG)
if (result != hipSuccess) {
fprintf(stderr, "CUDA Runtime Error: %s\n", hipGetErrorString(result));
assert(result == hipSuccess);
}
#endif
return result;
}
int main(int argc, char* *argv){
if(argc != 7) {
printf("USAGE: %s <NX> <NY> <NZ> <TX> <TY> <TIME STEPS>\n", argv[0]);
return 1;
}
// program parameters trans
// st_cached is for whether the computed results cached in shared memory
// 0: no; 1: yes
const int nx = atoi(argv[1]);
const int ny = atoi(argv[2]);
const int nz = atoi(argv[3]);
const int tx = atoi(argv[4]);
const int ty = atoi(argv[5]);
const int timesteps = atoi(argv[6]);
const int xyz = nx * ny * nz;
const int xyz_bytes = xyz * sizeof(float);
float *h_A, *h_A1;
float *h_B, *h_B1;
float *d_A;
float *d_B;
int devId = 0;
hipDeviceProp_t prop;
checkCuda( hipGetDeviceProperties(&prop, devId));
//printf("Device : %s\n", prop.name);
checkCuda( hipSetDevice(devId));
// Allocate host buffers
checkCuda(hipHostMalloc((void**)&h_A, xyz_bytes)); // host pinned
checkCuda(hipHostMalloc((void**)&h_B, xyz_bytes));
// for comparison btw CPU and GPU version
checkCuda(hipHostMalloc((void**)&h_A1, xyz_bytes));
checkCuda(hipHostMalloc((void**)&h_B1, xyz_bytes));
// grid data iniatialization
// randomly generaed test data
srand(time(NULL));
int i = 0;
for(; i < xyz; i++) {
h_A[i] = 1 + (float)rand() / (float)RAND_MAX;
h_A1[i] = h_B1[i] = h_B[i] = h_A[i];
}
// A simple comparison of the result
/*int testIndex = 3 + 3*nx+ 3*nx*ny;
printf("Iniatialized data[%d]=%f\n", testIndex , h_A[testIndex]);
printf("h_A[%d]=%f\n", testIndex, h_A[testIndex]);
printf("h_B[%d]=%f\n", testIndex, h_B[testIndex]);
printf("h_A1[%d]=%f\n", testIndex, h_A1[testIndex]);
printf("h_B1[%d]=%f\n", testIndex, h_B1[testIndex]);
*/
const float fac = 6.0/(h_A[0] * h_A[0]);
float *tmp;
// modify nx/tx and ny/ty to (nx+tx-1)/tx and (ny+ty-1)/ty
// inorder to avoid wrong configuration
dim3 grid((nx+tx-1)/tx, (ny+ty-1)/ty);
dim3 block(tx, ty);
printf("grid:(%d, %d)\n", grid.x, grid.y);
printf("block:(%d, %d)\n", tx, ty);
float ms, ms1; // elapsed time in milliseconds
//printf("Start computing...\n");
/* set the ratio of cache/shared memory
hipFuncCachePreferNone: Default function cache configuration, no preference
hipFuncCachePreferShared: Prefer larger shared memory and smaller L1 cache
hipFuncCachePreferL1: Prefer larger L1 cache and smaller shared memory
checkCuda(hipDeviceSetCacheConfig(hipFuncCachePreferShared));
*/
// set the shared memory bank size to eight bytes
//checkCuda(hipDeviceSetSharedMemConfig(hipSharedMemBankSizeEightByte));
// ****** front and back not in shmem
const int sharedMemSize = 3 * (block.x + 2) * (block.y + 2) * sizeof(float);
printf("Shared Memory Size: %dKB\n", sharedMemSize>>10);
// create events and streams
hipEvent_t startEvent, stopEvent, startEvent1, stopEvent1;
checkCuda( hipEventCreate(&startEvent));
checkCuda( hipEventCreate(&stopEvent));
checkCuda( hipEventCreate(&startEvent1));
checkCuda( hipEventCreate(&stopEvent1));
// timing start include data transfer and memory allocation
checkCuda( hipEventRecord(startEvent,0));
// Allocate device buffers
checkCuda(hipMalloc((void**)&d_A, xyz_bytes)); // device
checkCuda(hipMalloc((void**)&d_B, xyz_bytes));
float* input = d_A;
float* output = d_B;
// copy data to device
checkCuda(hipMemcpy(d_A, h_A, xyz_bytes, hipMemcpyHostToDevice));
checkCuda(hipMemcpy(d_B, d_A, xyz_bytes, hipMemcpyDeviceToDevice));
// timing start pure gpu computing
checkCuda(hipEventRecord(startEvent1, 0));
// Run the GPU kernel
for(int t = 0; t < timesteps; t += 1) {
hipLaunchKernelGGL(( jacobi3d_7p_shmem_only), dim3(grid), dim3(block), sharedMemSize, 0, input, output, nx, ny, nz, fac);
tmp = input;
input = output;
output = tmp;
}
// timing end pure gpu computing
checkCuda( hipEventRecord(stopEvent1, 0));
checkCuda( hipEventSynchronize(stopEvent1));
checkCuda( hipEventElapsedTime(&ms1, startEvent1, stopEvent1));
printf("Time of shared memory version (pure GPU) (ms): %f\n", ms1);
double gflop = (xyz * 1e-9) * 7.0 * timesteps;
double gflop_per_sec = gflop * 1e3 / ms1;
printf("(GPU) %lf GFlop/s\n", gflop_per_sec);
double mupdate_per_sec = ((xyz >> 20) * timesteps) * 1e3 / ms1;
printf("(GPU) %lf M updates/s\n", mupdate_per_sec);
checkCuda( hipMemcpy(h_A, input, xyz_bytes, hipMemcpyDeviceToHost));
checkCuda( hipEventRecord(stopEvent, 0));
checkCuda( hipEventSynchronize(stopEvent));
checkCuda( hipEventElapsedTime(&ms, startEvent, stopEvent));
float *gpuResult = h_A;
printf("Time of shared memory version (ms): %f\n", ms);
printf("(including data transfer and memory allocation in GPU.)\n");
gflop = (xyz * 1e-9) * 7.0 * timesteps;
gflop_per_sec = gflop * 1e3 / ms;
printf("(GPU) %lf GFlop/s\n", gflop_per_sec);
mupdate_per_sec = ((xyz >> 20) * timesteps) * 1e3 / ms;
printf("(GPU) %lf M updates/s\n", mupdate_per_sec);
// Run the CPU version
//float startTime = rtclock();
float *tmp1;
for(int t = 0; t < timesteps; t += 1) {
jacobi7(nx, ny, nz, h_A1, h_B1, fac);
tmp1 = h_A1;
h_A1 = h_B1;
h_B1 = tmp1;
}
float *cpuResult;
cpuResult = h_A1;
/*float endTime = rtclock();
double elapsedTimeC = endTime - startTime;
printf("Elapsed Time:%lf\n", elapsedTimeC);
flops = xyz * 7.0 * timesteps;
gflops = flops / elapsedTimeC / 1e9;
printf("(CPU) %lf GFlop/s\n", gflops);
*/
// compare the results btw CPU and GPU version
double errorNorm, refNorm, diff;
errorNorm = 0.0;
refNorm = 0.0;
i = 0;
for (; i < xyz; ++i){
diff = cpuResult[i] - gpuResult[i];
errorNorm += diff * diff;
refNorm += cpuResult[i] * cpuResult[i];
/*if (abs(diff)> 1e-4)
{
printf("GPU[%d]=%f\n", i, gpuResult[i]);
printf("CPU[%d]=%f\n", i, cpuResult[i]);
}*/
}
errorNorm = sqrt(errorNorm);
refNorm = sqrt(refNorm);
printf("Error Norm:%lf\n", errorNorm);
printf("Ref Norm:%lf\n", refNorm);
if(abs(refNorm) < 1e-7) {
printf("Correctness, FAILED\n");
}
else if((errorNorm / refNorm) > 1e-2) {
printf("Correctness, FAILED\n");
}
else {
printf("Correctness, PASSED\n");
}
/*printf("GPU[%d]=%f\n", testIndex, gpuResult[testIndex]);
printf("CPU[%d]=%f\n", testIndex, cpuResult[testIndex]);
printf("h_A[%d]=%f\n", testIndex, h_A[testIndex]);
printf("h_B[%d]=%f\n", testIndex, h_B[testIndex]);
printf("h_A1[%d]=%f\n", testIndex, h_A1[testIndex]);
printf("h_B1[%d]=%f\n", testIndex, h_B1[testIndex]);
testIndex = 2 + 2*nx+ 2*nx*ny;
printf("GPU[%d]=%f\n", testIndex, gpuResult[testIndex]);
printf("CPU[%d]=%f\n", testIndex, cpuResult[testIndex]);
printf("h_A[%d]=%f\n", testIndex, h_A[testIndex]);
printf("h_B[%d]=%f\n", testIndex, h_B[testIndex]);
printf("h_A1[%d]=%f\n", testIndex, h_A1[testIndex]);
printf("h_B1[%d]=%f\n", testIndex, h_B1[testIndex]);
*/
// cleanup
checkCuda( hipEventDestroy(startEvent));
checkCuda( hipEventDestroy(stopEvent));
checkCuda( hipEventDestroy(startEvent1));
checkCuda( hipEventDestroy(stopEvent1));
hipHostFree(h_A);
hipHostFree(h_B);
hipHostFree(h_A1);
hipHostFree(h_B1);
hipFree(d_A);
hipFree(d_B);
return 0;
} | 1b19dbb269868db200f3f363c31cc0db17e98bd8.cu | //jacobi7.cu
#include <cuda.h>
#include <stdio.h>
#include <math.h>
#include <jacobi7_cuda_shared_only.h>
#include <jacobi7.h>
// Convenience function for checking CUDA runtime API results
// can be wrapped around any runtime API call. No-op in release builds.
inline
cudaError_t checkCuda(cudaError_t result)
{
#if defined(DEBUG) || defined(_DEBUG)
if (result != cudaSuccess) {
fprintf(stderr, "CUDA Runtime Error: %s\n", cudaGetErrorString(result));
assert(result == cudaSuccess);
}
#endif
return result;
}
int main(int argc, char* *argv){
if(argc != 7) {
printf("USAGE: %s <NX> <NY> <NZ> <TX> <TY> <TIME STEPS>\n", argv[0]);
return 1;
}
// program parameters trans
// st_cached is for whether the computed results cached in shared memory
// 0: no; 1: yes
const int nx = atoi(argv[1]);
const int ny = atoi(argv[2]);
const int nz = atoi(argv[3]);
const int tx = atoi(argv[4]);
const int ty = atoi(argv[5]);
const int timesteps = atoi(argv[6]);
const int xyz = nx * ny * nz;
const int xyz_bytes = xyz * sizeof(float);
float *h_A, *h_A1;
float *h_B, *h_B1;
float *d_A;
float *d_B;
int devId = 0;
cudaDeviceProp prop;
checkCuda( cudaGetDeviceProperties(&prop, devId));
//printf("Device : %s\n", prop.name);
checkCuda( cudaSetDevice(devId));
// Allocate host buffers
checkCuda(cudaMallocHost((void**)&h_A, xyz_bytes)); // host pinned
checkCuda(cudaMallocHost((void**)&h_B, xyz_bytes));
// for comparison btw CPU and GPU version
checkCuda(cudaMallocHost((void**)&h_A1, xyz_bytes));
checkCuda(cudaMallocHost((void**)&h_B1, xyz_bytes));
// grid data iniatialization
// randomly generaed test data
srand(time(NULL));
int i = 0;
for(; i < xyz; i++) {
h_A[i] = 1 + (float)rand() / (float)RAND_MAX;
h_A1[i] = h_B1[i] = h_B[i] = h_A[i];
}
// A simple comparison of the result
/*int testIndex = 3 + 3*nx+ 3*nx*ny;
printf("Iniatialized data[%d]=%f\n", testIndex , h_A[testIndex]);
printf("h_A[%d]=%f\n", testIndex, h_A[testIndex]);
printf("h_B[%d]=%f\n", testIndex, h_B[testIndex]);
printf("h_A1[%d]=%f\n", testIndex, h_A1[testIndex]);
printf("h_B1[%d]=%f\n", testIndex, h_B1[testIndex]);
*/
const float fac = 6.0/(h_A[0] * h_A[0]);
float *tmp;
// modify nx/tx and ny/ty to (nx+tx-1)/tx and (ny+ty-1)/ty
// inorder to avoid wrong configuration
dim3 grid((nx+tx-1)/tx, (ny+ty-1)/ty);
dim3 block(tx, ty);
printf("grid:(%d, %d)\n", grid.x, grid.y);
printf("block:(%d, %d)\n", tx, ty);
float ms, ms1; // elapsed time in milliseconds
//printf("Start computing...\n");
/* set the ratio of cache/shared memory
cudaFuncCachePreferNone: Default function cache configuration, no preference
cudaFuncCachePreferShared: Prefer larger shared memory and smaller L1 cache
cudaFuncCachePreferL1: Prefer larger L1 cache and smaller shared memory
checkCuda(cudaDeviceSetCacheConfig(cudaFuncCachePreferShared));
*/
// set the shared memory bank size to eight bytes
//checkCuda(cudaDeviceSetSharedMemConfig(cudaSharedMemBankSizeEightByte));
// ****** front and back not in shmem
const int sharedMemSize = 3 * (block.x + 2) * (block.y + 2) * sizeof(float);
printf("Shared Memory Size: %dKB\n", sharedMemSize>>10);
// create events and streams
cudaEvent_t startEvent, stopEvent, startEvent1, stopEvent1;
checkCuda( cudaEventCreate(&startEvent));
checkCuda( cudaEventCreate(&stopEvent));
checkCuda( cudaEventCreate(&startEvent1));
checkCuda( cudaEventCreate(&stopEvent1));
// timing start include data transfer and memory allocation
checkCuda( cudaEventRecord(startEvent,0));
// Allocate device buffers
checkCuda(cudaMalloc((void**)&d_A, xyz_bytes)); // device
checkCuda(cudaMalloc((void**)&d_B, xyz_bytes));
float* input = d_A;
float* output = d_B;
// copy data to device
checkCuda(cudaMemcpy(d_A, h_A, xyz_bytes, cudaMemcpyHostToDevice));
checkCuda(cudaMemcpy(d_B, d_A, xyz_bytes, cudaMemcpyDeviceToDevice));
// timing start pure gpu computing
checkCuda(cudaEventRecord(startEvent1, 0));
// Run the GPU kernel
for(int t = 0; t < timesteps; t += 1) {
jacobi3d_7p_shmem_only<<<grid, block, sharedMemSize>>>(input, output, nx, ny, nz, fac);
tmp = input;
input = output;
output = tmp;
}
// timing end pure gpu computing
checkCuda( cudaEventRecord(stopEvent1, 0));
checkCuda( cudaEventSynchronize(stopEvent1));
checkCuda( cudaEventElapsedTime(&ms1, startEvent1, stopEvent1));
printf("Time of shared memory version (pure GPU) (ms): %f\n", ms1);
double gflop = (xyz * 1e-9) * 7.0 * timesteps;
double gflop_per_sec = gflop * 1e3 / ms1;
printf("(GPU) %lf GFlop/s\n", gflop_per_sec);
double mupdate_per_sec = ((xyz >> 20) * timesteps) * 1e3 / ms1;
printf("(GPU) %lf M updates/s\n", mupdate_per_sec);
checkCuda( cudaMemcpy(h_A, input, xyz_bytes, cudaMemcpyDeviceToHost));
checkCuda( cudaEventRecord(stopEvent, 0));
checkCuda( cudaEventSynchronize(stopEvent));
checkCuda( cudaEventElapsedTime(&ms, startEvent, stopEvent));
float *gpuResult = h_A;
printf("Time of shared memory version (ms): %f\n", ms);
printf("(including data transfer and memory allocation in GPU.)\n");
gflop = (xyz * 1e-9) * 7.0 * timesteps;
gflop_per_sec = gflop * 1e3 / ms;
printf("(GPU) %lf GFlop/s\n", gflop_per_sec);
mupdate_per_sec = ((xyz >> 20) * timesteps) * 1e3 / ms;
printf("(GPU) %lf M updates/s\n", mupdate_per_sec);
// Run the CPU version
//float startTime = rtclock();
float *tmp1;
for(int t = 0; t < timesteps; t += 1) {
jacobi7(nx, ny, nz, h_A1, h_B1, fac);
tmp1 = h_A1;
h_A1 = h_B1;
h_B1 = tmp1;
}
float *cpuResult;
cpuResult = h_A1;
/*float endTime = rtclock();
double elapsedTimeC = endTime - startTime;
printf("Elapsed Time:%lf\n", elapsedTimeC);
flops = xyz * 7.0 * timesteps;
gflops = flops / elapsedTimeC / 1e9;
printf("(CPU) %lf GFlop/s\n", gflops);
*/
// compare the results btw CPU and GPU version
double errorNorm, refNorm, diff;
errorNorm = 0.0;
refNorm = 0.0;
i = 0;
for (; i < xyz; ++i){
diff = cpuResult[i] - gpuResult[i];
errorNorm += diff * diff;
refNorm += cpuResult[i] * cpuResult[i];
/*if (abs(diff)> 1e-4)
{
printf("GPU[%d]=%f\n", i, gpuResult[i]);
printf("CPU[%d]=%f\n", i, cpuResult[i]);
}*/
}
errorNorm = sqrt(errorNorm);
refNorm = sqrt(refNorm);
printf("Error Norm:%lf\n", errorNorm);
printf("Ref Norm:%lf\n", refNorm);
if(abs(refNorm) < 1e-7) {
printf("Correctness, FAILED\n");
}
else if((errorNorm / refNorm) > 1e-2) {
printf("Correctness, FAILED\n");
}
else {
printf("Correctness, PASSED\n");
}
/*printf("GPU[%d]=%f\n", testIndex, gpuResult[testIndex]);
printf("CPU[%d]=%f\n", testIndex, cpuResult[testIndex]);
printf("h_A[%d]=%f\n", testIndex, h_A[testIndex]);
printf("h_B[%d]=%f\n", testIndex, h_B[testIndex]);
printf("h_A1[%d]=%f\n", testIndex, h_A1[testIndex]);
printf("h_B1[%d]=%f\n", testIndex, h_B1[testIndex]);
testIndex = 2 + 2*nx+ 2*nx*ny;
printf("GPU[%d]=%f\n", testIndex, gpuResult[testIndex]);
printf("CPU[%d]=%f\n", testIndex, cpuResult[testIndex]);
printf("h_A[%d]=%f\n", testIndex, h_A[testIndex]);
printf("h_B[%d]=%f\n", testIndex, h_B[testIndex]);
printf("h_A1[%d]=%f\n", testIndex, h_A1[testIndex]);
printf("h_B1[%d]=%f\n", testIndex, h_B1[testIndex]);
*/
// cleanup
checkCuda( cudaEventDestroy(startEvent));
checkCuda( cudaEventDestroy(stopEvent));
checkCuda( cudaEventDestroy(startEvent1));
checkCuda( cudaEventDestroy(stopEvent1));
cudaFreeHost(h_A);
cudaFreeHost(h_B);
cudaFreeHost(h_A1);
cudaFreeHost(h_B1);
cudaFree(d_A);
cudaFree(d_B);
return 0;
} |
9087ef7abbcb8ce809f474c83214c5ae9859b782.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <helper_cuda.h>
#define cudamalloc(p, size) { \
hipMalloc(&p, size); \
if (p) \
printf("Allocated %zu bytes from %p \n", size, p); \
else \
printf("Failed to allocate %zu bytes\n", size); \
}
int main()
{
size_t step = 0x1000000;
size_t size = step;
static size_t best = 0;
hipError_t e;
while (step > 0)
{
void *p;
//Try allocating Memory
cudamalloc(p, size);
e = hipGetLastError();
//Check if successful
if (e==hipSuccess) {
hipFree(p);
best = size;
}
else {
step /= 0x10;
}
size += step;
}
void *p;
//Confirm
cudamalloc(p, best);
e = hipGetLastError();
if (e==hipSuccess)
{
printf("\nBest possible allocatable block size is %.4f GB\n",
(float)best/1000000000.0);
hipFree(p);
return 0;
}
else
return 1;
}
| 9087ef7abbcb8ce809f474c83214c5ae9859b782.cu | #include <stdio.h>
#include <helper_cuda.h>
#define cudamalloc(p, size) { \
cudaMalloc(&p, size); \
if (p) \
printf("Allocated %zu bytes from %p \n", size, p); \
else \
printf("Failed to allocate %zu bytes\n", size); \
}
int main()
{
size_t step = 0x1000000;
size_t size = step;
static size_t best = 0;
cudaError_t e;
while (step > 0)
{
void *p;
//Try allocating Memory
cudamalloc(p, size);
e = cudaGetLastError();
//Check if successful
if (e==cudaSuccess) {
cudaFree(p);
best = size;
}
else {
step /= 0x10;
}
size += step;
}
void *p;
//Confirm
cudamalloc(p, best);
e = cudaGetLastError();
if (e==cudaSuccess)
{
printf("\nBest possible allocatable block size is %.4f GB\n",
(float)best/1000000000.0);
cudaFree(p);
return 0;
}
else
return 1;
}
|
9d1b947ab04314e9bbdb510c98805c078dc13a43.hip | // !!! This is a file automatically generated by hipify!!!
#include <ATen/Context.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/Dispatch.h>
#include <ATen/NativeFunctions.h>
#include <ATen/hip/PinnedMemoryAllocator.h>
#include <ATen/hip/HIPSolver.h>
#include <ATen/hip/HIPBlas.h>
#include <ATen/hip/HIPEvent.h>
#include <ATen/hip/impl/HIPStreamMasqueradingAsCUDA.h>
#include <ATen/native/LinearAlgebraUtils.h>
#include <ATen/native/hip/MiscUtils.h>
#include <ATen/native/hip/BatchLinearAlgebraLib.h>
namespace at {
namespace native {
// Some cuBLAS and cuSOLVER batched routines require input to be a device array of pointers to device individual matrices
// 'input' must be a contiguous tensor
template <typename scalar_t>
static Tensor get_device_pointers(const Tensor& input) {
auto input_data = input.data_ptr<scalar_t>();
int64_t input_mat_stride = matrixStride(input);
// cublas/cusolver interface requires 'int'
int batch_size = cuda_int_cast(batchCount(input), "batch_size");
// if batch_size==0, then start=0 and end=0
// if input_mat_stride==0, then step=sizeof(scalar_t)
return at::arange(
/*start=*/reinterpret_cast<int64_t>(input_data),
/*end=*/reinterpret_cast<int64_t>(input_data + batch_size * input_mat_stride),
/*step=*/static_cast<int64_t>(std::max<int64_t>(input_mat_stride, 1) * sizeof(scalar_t)),
input.options().dtype(at::kLong));
}
template <typename scalar_t>
void apply_geqrf_batched(const Tensor& input, const Tensor& tau) {
// AMD ROCm backend is implemented via rewriting all CUDA calls to HIP
// rocBLAS does not implement BLAS-like extensions of cuBLAS, they're in rocSOLVER
// rocSOLVER is currently not used in ATen, therefore we raise an error in this case
#ifndef CUDART_VERSION
TORCH_CHECK(false, "geqrf: Batched version is supported only with cuBLAS backend.")
#else
auto batch_size = cuda_int_cast(batchCount(input), "batch_size");
auto m = cuda_int_cast(input.size(-2), "m");
auto n = cuda_int_cast(input.size(-1), "n");
auto lda = std::max<int>(1, m);
// cuBLAS batched geqrf requires input to be the device array of pointers to device single matrices
Tensor input_ptr_array = get_device_pointers<scalar_t>(input);
Tensor tau_ptr_array = get_device_pointers<scalar_t>(tau.unsqueeze(-1));
auto input_ptr_array_data = reinterpret_cast<scalar_t**>(input_ptr_array.data_ptr());
auto tau_ptr_array_data = reinterpret_cast<scalar_t**>(tau_ptr_array.data_ptr());
int info;
auto handle = at::cuda::getCurrentCUDABlasHandle();
at::cuda::blas::geqrfBatched(handle, m, n, input_ptr_array_data, lda, tau_ptr_array_data, &info, batch_size);
// info only indicates wrong arguments to geqrfBatched call
// info is a host variable, we can check it without device synchronization
TORCH_INTERNAL_ASSERT(info == 0);
#endif
}
void geqrf_batched_cublas(const Tensor& input, const Tensor& tau) {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(input.scalar_type(), "geqrf_batched_cuda", [&]{
apply_geqrf_batched<scalar_t>(input, tau);
});
}
template <typename scalar_t>
static void apply_lu_solve_batched_cublas(const Tensor& b, const Tensor& lu, const Tensor& pivots, hipblasOperation_t trans) {
#ifndef CUDART_VERSION
TORCH_CHECK(false, "lu_solve: cuBLAS backend for lu_solve is not available.")
#else
auto pivots_data = pivots.data_ptr<int>();
auto batch_size = cuda_int_cast(batchCount(lu), "batch_size");;
auto m = cuda_int_cast(lu.size(-2), "m");
auto nrhs = cuda_int_cast(b.size(-1), "nrhs");
auto lda = cuda_int_cast(std::max<int>(1, m), "lda");
int info = 0;
Tensor lu_ptr_array = get_device_pointers<scalar_t>(lu);
Tensor b_ptr_array = get_device_pointers<scalar_t>(b);
auto lu_ptr_array_data = reinterpret_cast<scalar_t**>(lu_ptr_array.data_ptr());
auto b_ptr_array_data = reinterpret_cast<scalar_t**>(b_ptr_array.data_ptr());
auto handle = at::cuda::getCurrentCUDABlasHandle();
at::cuda::blas::getrsBatched(handle, trans, m, nrhs, lu_ptr_array_data,
lda, pivots_data, b_ptr_array_data, lda, &info, batch_size);
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(info == 0);
#endif
}
void lu_solve_batched_cublas(const Tensor& b, const Tensor& lu, const Tensor& pivots, hipblasOperation_t trans) {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(lu.scalar_type(), "lu_solve_cublas", [&]{
apply_lu_solve_batched_cublas<scalar_t>(b, lu, pivots, trans);
});
}
template <typename scalar_t>
static void apply_triangular_solve(Tensor& A, Tensor& B, bool upper, bool transpose, bool conjugate_transpose, bool unitriangular) {
hipblasFillMode_t uplo = upper ? HIPBLAS_FILL_MODE_UPPER : HIPBLAS_FILL_MODE_LOWER;
hipblasOperation_t trans = transpose ? HIPBLAS_OP_T : HIPBLAS_OP_N;
trans = conjugate_transpose ? HIPBLAS_OP_C : trans;
hipblasDiagType_t diag = unitriangular ? HIPBLAS_DIAG_UNIT : HIPBLAS_DIAG_NON_UNIT;
hipblasSideMode_t side = HIPBLAS_SIDE_LEFT;
auto A_data = A.data_ptr<scalar_t>();
auto B_data = B.data_ptr<scalar_t>();
auto A_mat_stride = matrixStride(A);
auto B_mat_stride = matrixStride(B);
auto batch_size = batchCount(A);
auto m = cuda_int_cast(A.size(-2), "m");
auto n = cuda_int_cast(A.size(-1), "n");
auto nrhs = cuda_int_cast(B.size(-1), "nrhs");
auto lda = std::max<int>(1, m);
auto alpha = scalar_t{1};
for (decltype(batch_size) i = 0; i < batch_size; i++) {
scalar_t* A_working_ptr = &A_data[i * A_mat_stride];
scalar_t* B_working_ptr = &B_data[i * B_mat_stride];
auto handle = at::cuda::getCurrentCUDABlasHandle();
at::cuda::blas::trsm(handle, side, uplo, trans, diag, n, nrhs, &alpha, A_working_ptr, lda, B_working_ptr, lda);
}
}
void triangular_solve_cublas(Tensor& A, Tensor& B, Tensor& infos, bool upper, bool transpose, bool conjugate_transpose, bool unitriangular) {
(void)infos; // unused
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(A.scalar_type(), "triangular_solve_cuda", [&]{
apply_triangular_solve<scalar_t>(A, B, upper, transpose, conjugate_transpose, unitriangular);
});
}
template <typename scalar_t>
static void apply_triangular_solve_batched(Tensor& A, Tensor& B, bool upper, bool transpose, bool conjugate_transpose, bool unitriangular) {
hipblasFillMode_t uplo = upper ? HIPBLAS_FILL_MODE_UPPER : HIPBLAS_FILL_MODE_LOWER;
hipblasOperation_t trans = transpose ? HIPBLAS_OP_T : HIPBLAS_OP_N;
trans = conjugate_transpose ? HIPBLAS_OP_C : trans;
hipblasDiagType_t diag = unitriangular ? HIPBLAS_DIAG_UNIT : HIPBLAS_DIAG_NON_UNIT;
hipblasSideMode_t side = HIPBLAS_SIDE_LEFT;
auto A_data = A.data_ptr<scalar_t>();
auto B_data = B.data_ptr<scalar_t>();
auto A_mat_stride = matrixStride(A);
auto B_mat_stride = matrixStride(B);
auto batch_size = cuda_int_cast(batchCount(A), "batch_size");
auto m = cuda_int_cast(A.size(-2), "m");
auto n = cuda_int_cast(A.size(-1), "n");
auto nrhs = cuda_int_cast(B.size(-1), "nrhs");
auto lda = std::max<int>(1, m);
auto alpha = scalar_t{1};
// cuBLAS batched trsm requires input to be the device array of pointers to device single matrices
Tensor A_ptr_array = get_device_pointers<scalar_t>(A);
Tensor B_ptr_array = get_device_pointers<scalar_t>(B);
auto A_ptr_array_data = reinterpret_cast<scalar_t**>(A_ptr_array.data_ptr());
auto B_ptr_array_data = reinterpret_cast<scalar_t**>(B_ptr_array.data_ptr());
auto handle = at::cuda::getCurrentCUDABlasHandle();
at::cuda::blas::trsmBatched(handle, side, uplo, trans, diag, n, nrhs, &alpha, A_ptr_array_data, lda, B_ptr_array_data, lda, batch_size);
}
void triangular_solve_batched_cublas(Tensor& A, Tensor& B, Tensor& infos, bool upper, bool transpose, bool conjugate_transpose, bool unitriangular) {
(void)infos; // unused
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(A.scalar_type(), "triangular_solve_cuda", [&]{
apply_triangular_solve_batched<scalar_t>(A, B, upper, transpose, conjugate_transpose, unitriangular);
});
}
template <typename scalar_t>
inline void apply_gels_batched(const Tensor& A, Tensor& B, Tensor& infos) {
// AMD ROCm backend is implemented via rewriting all CUDA calls to HIP
// rocBLAS does not implement BLAS-like extensions of cuBLAS, they're in rocSOLVER
// rocSOLVER is currently not used in ATen, therefore we raise an error in this case
#ifndef CUDART_VERSION
TORCH_CHECK(false, "torch.linalg.lstsq: Batched version is supported only with cuBLAS backend.")
#else
auto trans = HIPBLAS_OP_N;
auto m = cuda_int_cast(A.size(-2), "m");
auto n = cuda_int_cast(A.size(-1), "n");
auto nrhs = cuda_int_cast(B.size(-1), "nrhs");
// cuBLAS from cuda10 and older doesn't work with nrhs == 0 (cuda11 works)
// so we need to put this early return
if (nrhs == 0) {
return;
}
auto batch_size = cuda_int_cast(batchCount(B), "batch_size");
auto lda = std::max<int>(1, m);
auto ldb = std::max<int>(1, m);
// cuBLAS's requirement
TORCH_CHECK(
m >= n,
"torch.linalg.lstsq: only overdetermined systems (input.size(-2) >= input.size(-1)) are allowed on CUDA with cuBLAS backend.");
// cuBLAS documentation says:
// Matrices Aarray[i] should not overlap; otherwise, undefined behavior is expected.
// explicitly broadcast the batch dimensions of A
IntArrayRef A_batch_sizes(A.sizes().data(), A.dim() - 2);
IntArrayRef B_batch_sizes(B.sizes().data(), B.dim() - 2);
std::vector<int64_t> expand_batch_portion = at::infer_size(A_batch_sizes, B_batch_sizes);
expand_batch_portion.insert(expand_batch_portion.end(), {A.size(-2), A.size(-1)});
Tensor A_expanded = A.expand({expand_batch_portion});
Tensor A_broadcasted = cloneBatchedColumnMajor(A_expanded);
// cuBLAS batched gels requires input to be the device array of pointers to device single matrices
Tensor A_ptr_array = get_device_pointers<scalar_t>(A_broadcasted);
Tensor B_ptr_array = get_device_pointers<scalar_t>(B);
auto A_ptr_array_data = reinterpret_cast<scalar_t**>(A_ptr_array.data_ptr());
auto B_ptr_array_data = reinterpret_cast<scalar_t**>(B_ptr_array.data_ptr());
auto infos_data = infos.data_ptr<int>();
auto handle = at::cuda::getCurrentCUDABlasHandle();
int info;
at::cuda::blas::gelsBatched<scalar_t>(
handle, trans, m, n, nrhs,
A_ptr_array_data, lda,
B_ptr_array_data, ldb,
&info,
infos_data,
batch_size);
// negative info indicates that an argument to gelsBatched call is invalid
TORCH_INTERNAL_ASSERT(info == 0);
#endif
}
// This is a type dispatching helper function for 'apply_gels_batched'
void gels_batched_cublas(const Tensor& a, Tensor& b, Tensor& infos) {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(a.scalar_type(), "gels_batched_cublas", [&]{
apply_gels_batched<scalar_t>(a, b, infos);
});
}
#ifdef USE_CUSOLVER
inline static Tensor column_major_identity_matrix_like(const Tensor& self) {
auto size = self.sizes();
auto size_slice = IntArrayRef(size.data(), size.size()-1);
return at::ones(size_slice, self.options()).diag_embed().transpose(-2, -1);
}
template <typename scalar_t>
inline static void _apply_single_inverse_helper(scalar_t* self_ptr, scalar_t* self_inv_ptr, int* ipiv_ptr, int* info_getrf_ptr, int* info_getrs_ptr, int n, int lda) {
// self_inv_ptr should already be an identity matrix
auto handle = at::cuda::getCurrentCUDASolverDnHandle();
at::cuda::solver::getrf<scalar_t>(handle, n, n, self_ptr, lda, ipiv_ptr, info_getrf_ptr);
at::cuda::solver::getrs<scalar_t>(handle, n, n, self_ptr, lda, ipiv_ptr, self_inv_ptr, lda, info_getrs_ptr, HIPBLAS_OP_N);
}
template <typename scalar_t>
static void apply_batched_inverse_lib(Tensor& self, Tensor& self_inv, Tensor& infos_getrf, Tensor& infos_getrs) {
const int batch_size = cuda_int_cast(batchCount(self), "batchCount");
const int n = cuda_int_cast(self.size(-2), "self.size(-2)");
const int lda = std::max<int>(1, n);
auto self_data = self.data_ptr<scalar_t>();
auto self_mat_stride = matrixStride(self);
auto self_inv_data = self_inv.data_ptr<scalar_t>();
auto self_inv_mat_stride = matrixStride(self_inv);
auto infos_getrf_data = infos_getrf.data_ptr<int>();
auto infos_getrs_data = infos_getrs.data_ptr<int>();
auto& allocator = *::c10::hip::HIPCachingAllocatorMasqueradingAsCUDA::get();
// Heuristic: For small batch size or large matrix size, we use for-loop to iterate over the batches instead of
// calling the batched cublas routine.
if (batch_size <= 8 || /* batch_size > 8 && */ n >= 512) {
for (int64_t i = 0; i < batch_size; i++) {
auto dataPtr = allocator.allocate(sizeof(int) * lda);
int* pivot = reinterpret_cast<int*>(dataPtr.get());
int* infos_getrf_working_ptr = &infos_getrf_data[i];
int* infos_getrs_working_ptr = &infos_getrs_data[i];
_apply_single_inverse_helper<scalar_t>(
&self_data[i * self_mat_stride], &self_inv_data[i * self_inv_mat_stride], pivot, infos_getrf_working_ptr, infos_getrs_working_ptr, n, lda);
}
} else {
// cublas batched kernels require input be "device array of device pointers"
Tensor self_array = at::arange(
reinterpret_cast<int64_t>(self_data),
reinterpret_cast<int64_t>(&self_data[(batch_size-1) * self_mat_stride]) + 1,
static_cast<int64_t>(self_mat_stride * sizeof(scalar_t)), self.options().dtype(at::kLong));
Tensor self_inv_array = at::arange(
reinterpret_cast<int64_t>(self_inv_data),
reinterpret_cast<int64_t>(&self_inv_data[(batch_size-1) * self_inv_mat_stride]) + 1,
static_cast<int64_t>(self_inv_mat_stride * sizeof(scalar_t)), self.options().dtype(at::kLong));
auto dataPtr = allocator.allocate(sizeof(int)*batch_size*lda);
int* ipiv_array = reinterpret_cast<int*>(dataPtr.get());
at::cuda::blas::getrfBatched<scalar_t>(n, reinterpret_cast<scalar_t**>(self_array.data_ptr()), lda,
ipiv_array, infos_getrf_data, batch_size);
at::cuda::blas::getriBatched<scalar_t>(n, reinterpret_cast<scalar_t**>(self_array.data_ptr()), lda,
ipiv_array, reinterpret_cast<scalar_t**>(self_inv_array.data_ptr()), lda, infos_getrs_data, batch_size);
}
}
template <typename scalar_t>
static void apply_single_inverse_lib(const Tensor& self, Tensor& self_inv, Tensor& infos_getrf, Tensor& infos_getrs) {
int n = cuda_int_cast(self.size(-2), "self.size(-2)");
int lda = std::max<int>(1, n);
Tensor ipiv = at::empty({lda}, self.options().dtype(at::kInt));
_apply_single_inverse_helper<scalar_t>(
self.data_ptr<scalar_t>(), self_inv.data_ptr<scalar_t>(), ipiv.data_ptr<int>(), infos_getrf.data_ptr<int>(), infos_getrs.data_ptr<int>(), n, lda);
}
// This is a type dispatching helper function for 'apply_batched_inverse_lib' and 'apply_single_inverse_lib'
Tensor& _linalg_inv_out_helper_cuda_lib(Tensor& result, Tensor& infos_getrf, Tensor& infos_getrs) {
// assuming result is in column major order and contains the matrices to invert
Tensor input_working_copy = cloneBatchedColumnMajor(result);
// for getrf + getrs (cusolver path)
// result should be filled with identity matrices
result.zero_();
result.diagonal(/*offset=*/0, /*dim1=*/-2, /*dim2=*/-1).fill_(1);
const int batch_size = cuda_int_cast(batchCount(result), "batchCount");
if (result.dim() > 2) {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(result.scalar_type(), "linalg_inv_out_cuda", [&]{
apply_batched_inverse_lib<scalar_t>(
input_working_copy, result, infos_getrf, infos_getrs);
});
} else {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(result.scalar_type(), "linalg_inv_out_cuda", [&]{
apply_single_inverse_lib<scalar_t>(input_working_copy, result, infos_getrf, infos_getrs);
});
}
return result;
}
// entrance of calculations of `inverse` using cusolver getrf + getrs, cublas getrfBatched + getriBatched
Tensor _inverse_helper_cuda_lib(const Tensor& self) {
Tensor self_working_copy = cloneBatchedColumnMajor(self);
Tensor self_inv_working_copy = column_major_identity_matrix_like(self_working_copy);
const int batch_size = cuda_int_cast(batchCount(self), "batchCount");
if (self.dim() > 2 && batch_size > 1) {
Tensor infos_getrf = at::zeros({std::max<int64_t>(1, batchCount(self))}, self.options().dtype(kInt));
Tensor infos_getrs = at::zeros({std::max<int64_t>(1, batchCount(self))}, self.options().dtype(kInt));
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "inverse_cuda", [&]{
apply_batched_inverse_lib<scalar_t>(
self_working_copy, self_inv_working_copy, infos_getrf, infos_getrs);
});
batchCheckErrors(infos_getrf, "inverse_cuda");
batchCheckErrors(infos_getrs, "inverse_cuda");
} else {
Tensor infos_getrf = at::zeros({1}, self.options().dtype(kInt));
Tensor infos_getrs = at::zeros({1}, self.options().dtype(kInt));
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "inverse_cuda", [&]{
apply_single_inverse_lib<scalar_t>(self_working_copy, self_inv_working_copy, infos_getrf, infos_getrs);
});
batchCheckErrors(infos_getrf, "inverse_cuda");
batchCheckErrors(infos_getrs, "inverse_cuda");
}
return self_inv_working_copy;
}
// call cusolver gesvdj function to calculate svd
template<typename scalar_t>
inline static void _apply_svd_lib_gesvdj(const Tensor& self, Tensor& U, Tensor& S, Tensor& VT, Tensor& infos, bool compute_uv, bool some) {
using value_t = typename c10::scalar_value_type<scalar_t>::type;
auto self_data = self.data_ptr<scalar_t>();
auto U_data = U.data_ptr<scalar_t>();
auto S_data = S.data_ptr<value_t>();
auto VT_data = VT.data_ptr<scalar_t>();
auto self_stride = matrixStride(self);
auto U_stride = matrixStride(U);
auto S_stride = S.size(-1);
auto VT_stride = matrixStride(VT);
int batchsize = cuda_int_cast(batchCount(self), "batch size");
int m = cuda_int_cast(self.size(-2), "m");
int n = cuda_int_cast(self.size(-1), "n");
int lda = std::max<int>(1, m);
int ldvt = std::max<int>(1, n);
for(int i = 0; i < batchsize; i++){
// gesvdj_params controls the numerical accuracy of cusolver gesvdj iterations on GPU
hipsolverGesvdjInfo_t gesvdj_params;
TORCH_CUSOLVER_CHECK(hipsolverDnCreateGesvdjInfo(&gesvdj_params));
// TORCH_CUSOLVER_CHECK(hipsolverDnXgesvdjSetTolerance(gesvdj_params, 1.0e-7));
// TORCH_CUSOLVER_CHECK(hipsolverDnXgesvdjSetMaxSweeps(gesvdj_params, 15));
auto handle = at::cuda::getCurrentCUDASolverDnHandle();
auto jobz = compute_uv ? HIPSOLVER_EIG_MODE_VECTOR : HIPSOLVER_EIG_MODE_NOVECTOR;
at::cuda::solver::gesvdj<scalar_t>(
handle, jobz, /*econ=*/ some ? 1 : 0, m, n,
self_data + i * self_stride,
lda,
S_data + i * S_stride,
U_data + i * U_stride,
lda,
VT_data + i * VT_stride,
ldvt,
infos.data_ptr<int>() + i,
gesvdj_params
);
TORCH_CUSOLVER_CHECK(hipsolverDnDestroyGesvdjInfo(gesvdj_params));
}
}
// wrapper around _apply_svd_lib_gesvdj that handles dtype dispatch,
// creates a working copy of the input, and creates V^H from the V returned by gesvdj
inline static void apply_svd_lib_gesvdj(const Tensor& self, Tensor& U, Tensor& S, Tensor& VT, Tensor& infos, bool compute_uv, bool some) {
const int64_t m = self.size(-2);
const int64_t n = self.size(-1);
Tensor self_working_copy = cloneBatchedColumnMajor(self);
VT = VT.transpose(-2, -1); // gesvdj returns V instead of V^H
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "svd_cuda_gesvdj", [&] {
_apply_svd_lib_gesvdj<scalar_t>(self_working_copy, U, S, VT, infos, compute_uv, some);
});
}
// call cusolver gesvdj batched function to calculate svd
template<typename scalar_t>
inline static void _apply_svd_lib_gesvdjBatched(const Tensor& self, Tensor& U, Tensor& S, Tensor& VT, Tensor& infos, bool compute_uv) {
using value_t = typename c10::scalar_value_type<scalar_t>::type;
auto self_data = self.data_ptr<scalar_t>();
auto U_data = U.data_ptr<scalar_t>();
auto S_data = S.data_ptr<value_t>();
auto VT_data = VT.data_ptr<scalar_t>();
auto self_stride = matrixStride(self);
auto U_stride = matrixStride(U);
auto S_stride = S.size(-1);
auto VT_stride = matrixStride(VT);
int batchsize = cuda_int_cast(batchCount(self), "batch size");
int m = cuda_int_cast(self.size(-2), "m");
int n = cuda_int_cast(self.size(-1), "n");
int lda = std::max<int>(1, m);
int ldvt = std::max<int>(1, n);
TORCH_INTERNAL_ASSERT(m <= 32 && n <= 32, "gesvdjBatched requires both matrix dimensions not greater than 32, but got "
"m = ", m, " n = ", n);
// gesvdj_params controls the numerical accuracy of cusolver gesvdj iterations on GPU
hipsolverGesvdjInfo_t gesvdj_params;
TORCH_CUSOLVER_CHECK(hipsolverDnCreateGesvdjInfo(&gesvdj_params));
// TORCH_CUSOLVER_CHECK(hipsolverDnXgesvdjSetTolerance(gesvdj_params, 1.0e-7));
// TORCH_CUSOLVER_CHECK(hipsolverDnXgesvdjSetMaxSweeps(gesvdj_params, 15));
TORCH_CUSOLVER_CHECK(hipsolverDnXgesvdjSetSortEig(gesvdj_params, 1));
auto handle = at::cuda::getCurrentCUDASolverDnHandle();
auto jobz = compute_uv ? HIPSOLVER_EIG_MODE_VECTOR : HIPSOLVER_EIG_MODE_NOVECTOR;
at::cuda::solver::gesvdjBatched<scalar_t>(
handle, jobz, m, n, self_data, lda, S_data, U_data, lda, VT_data, ldvt,
infos.data_ptr<int>(), gesvdj_params, batchsize
);
TORCH_CUSOLVER_CHECK(hipsolverDnDestroyGesvdjInfo(gesvdj_params));
}
// wrapper around _apply_svd_lib_gesvdjBatched that handles dtype dispatch,
// creates a working copy of the input, and creates V^H from the V returned by gesvdj
inline static void apply_svd_lib_gesvdjBatched(const Tensor& self, Tensor& U, Tensor& S, Tensor& VT, Tensor& infos, bool compute_uv) {
const int64_t m = self.size(-2);
const int64_t n = self.size(-1);
Tensor self_working_copy = cloneBatchedColumnMajor(self);
VT = VT.transpose(-2, -1); // gesvdj returns V instead of V^H
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "svd_cuda_gesvdjBatched", [&] {
_apply_svd_lib_gesvdjBatched<scalar_t>(self_working_copy, U, S, VT, infos, compute_uv);
});
}
// entrance of calculations of `svd` using cusolver gesvdj and gesvdjBatched
std::tuple<Tensor, Tensor, Tensor> _svd_helper_cuda_lib(const Tensor& self, bool some, bool compute_uv) {
const int64_t batch_size = batchCount(self);
at::Tensor infos = at::zeros({batch_size}, self.options().dtype(at::kInt));
const int64_t m = self.size(-2);
const int64_t n = self.size(-1);
const int64_t k = ::min(m, n);
Tensor U_working_copy, S_working_copy, VT_working_copy;
std::tie(U_working_copy, S_working_copy, VT_working_copy) = \
_create_U_S_VT(self, some, compute_uv, /* svd_use_cusolver = */ true);
// U, S, V working copies are already column majored now
// heuristic for using `gesvdjBatched` over `gesvdj`
if (m <= 32 && n <= 32 && batch_size > 1 && (!some || m == n)) {
apply_svd_lib_gesvdjBatched(self, U_working_copy, S_working_copy, VT_working_copy, infos, compute_uv);
} else {
apply_svd_lib_gesvdj(self, U_working_copy, S_working_copy, VT_working_copy, infos, compute_uv, some);
}
// A device-host sync will be performed.
batchCheckErrors(infos, "svd_cuda");
if (!compute_uv) {
VT_working_copy.zero_();
U_working_copy.zero_();
}
if (some) {
VT_working_copy = VT_working_copy.narrow(-2, 0, k);
}
// so far we have computed VT, but torch.svd returns V instead. Adjust accordingly.
VT_working_copy.transpose_(-2, -1);
return std::make_tuple(U_working_copy, S_working_copy, VT_working_copy);
}
// Implementation of Cholesky decomposition using looped cusolverDn<T>potrf or hipsolverDnXpotrf (64-bit)
template<typename scalar_t>
inline static void apply_cholesky_cusolver_potrf_looped(const Tensor& self_working_copy, bool upper, const Tensor& infos) {
auto handle = at::cuda::getCurrentCUDASolverDnHandle();
const auto uplo = upper ? HIPBLAS_FILL_MODE_UPPER : HIPBLAS_FILL_MODE_LOWER;
const int64_t n = self_working_copy.size(-1);
const int64_t lda = std::max<int64_t>(1, n);
const int64_t batch_size = batchCount(self_working_copy);
const int64_t matrix_stride = matrixStride(self_working_copy);
scalar_t* self_working_copy_ptr = self_working_copy.data_ptr<scalar_t>();
int* infos_ptr = infos.data_ptr<int>();
#ifdef USE_CUSOLVER_64_BIT
size_t worksize_device;
size_t worksize_host;
hipsolverDnParams_t params;
hipDataType datatype = at::cuda::solver::get_cusolver_datatype<scalar_t>();
TORCH_CUSOLVER_CHECK(cusolverDnCreateParams(¶ms));
at::cuda::solver::xpotrf_buffersize(handle, params, uplo, n, datatype, nullptr, lda, datatype, &worksize_device, &worksize_host);
// allocate workspace storage
auto& device_allocator = *at::cuda::getCUDADeviceAllocator();
auto workdata_device = device_allocator.allocate(worksize_device * batch_size);
void* workdata_device_ptr = workdata_device.get();
auto& host_allocator = *at::getCPUAllocator();
auto workdata_host = host_allocator.allocate(worksize_host * batch_size);
void* workdata_host_ptr = workdata_host.get();
for (int64_t i = 0; i < batch_size; i++) {
at::cuda::solver::xpotrf(
handle, params, uplo, n, datatype,
self_working_copy_ptr + i * matrix_stride,
lda, datatype,
(char*)workdata_device_ptr + i * worksize_device, worksize_device,
(char*)workdata_host_ptr + i * worksize_host, worksize_host,
infos_ptr + i
);
}
TORCH_CUSOLVER_CHECK(cusolverDnDestroyParams(params));
#else // USE_CUSOLVER_64_BIT
int n_32 = cuda_int_cast(n, "n");
int lda_32 = cuda_int_cast(lda, "lda");
int lwork;
at::cuda::solver::potrf_buffersize<scalar_t>(
handle, uplo, n_32, nullptr, lda_32, &lwork);
// allocate workspace storage
auto& allocator = *at::cuda::getCUDADeviceAllocator();
auto work_data = allocator.allocate(sizeof(scalar_t)*lwork * batch_size);
scalar_t* work_data_ptr = static_cast<scalar_t*>(work_data.get());
for (int64_t i = 0; i < batch_size; i++) {
at::cuda::solver::potrf<scalar_t>(
handle, uplo, n_32,
self_working_copy_ptr + i * matrix_stride,
lda_32,
work_data_ptr + i * lwork,
lwork,
infos_ptr + i
);
}
#endif // USE_CUSOLVER_64_BIT
}
// Implementation of Cholesky decomposition using batched cusolverDn<T>potrfBatched
// Warning: cusolverDn<T>potrfBatched doesn't work quite well when matrix size or batch size is zero.
// If you write your own C++ extension and use this function, make sure you do a zero numel check for the input.
template<typename scalar_t>
inline static void apply_cholesky_cusolver_potrfBatched(const Tensor& self_working_copy, bool upper, const Tensor& infos) {
auto handle = at::cuda::getCurrentCUDASolverDnHandle();
const auto uplo = upper ? HIPBLAS_FILL_MODE_UPPER : HIPBLAS_FILL_MODE_LOWER;
const int n = cuda_int_cast(self_working_copy.size(-1), "n");
const int lda = std::max<int>(1, n);
const int batch_size = cuda_int_cast(batchCount(self_working_copy), "batch_size");
// cusolver batched kernels require input be "device array of device pointers"
Tensor self_working_copy_array = get_device_pointers<scalar_t>(self_working_copy);
at::cuda::solver::potrfBatched<scalar_t>(
handle, uplo, n,
reinterpret_cast<scalar_t**>(self_working_copy_array.data_ptr()),
lda, infos.data_ptr<int>(), batch_size);
}
void cholesky_helper_cusolver(const Tensor& input, bool upper, const Tensor& info) {
if (input.numel() == 0) {
return;
}
if (use_cusolver_potrf_batched_ && batchCount(input) > 1) {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(input.scalar_type(), "cholesky_cusolver", [&] {
apply_cholesky_cusolver_potrfBatched<scalar_t>(input, upper, info);
});
} else {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(input.scalar_type(), "cholesky_cusolver", [&] {
apply_cholesky_cusolver_potrf_looped<scalar_t>(input, upper, info);
});
}
}
template<typename scalar_t>
inline static void apply_cholesky_cusolver_potrs(Tensor& self_working_copy, const Tensor& A_column_major_copy, bool upper, Tensor& infos) {
auto handle = at::cuda::getCurrentCUDASolverDnHandle();
const auto uplo = upper ? HIPBLAS_FILL_MODE_UPPER : HIPBLAS_FILL_MODE_LOWER;
const int64_t n = self_working_copy.size(-2);
const int64_t nrhs = self_working_copy.size(-1);
const int64_t lda = std::max<int64_t>(1, n);
const int64_t batch_size = batchCount(self_working_copy);
const int64_t self_matrix_stride = matrixStride(self_working_copy);
scalar_t* self_working_copy_ptr = self_working_copy.data_ptr<scalar_t>();
const scalar_t* A_ptr = A_column_major_copy.data_ptr<scalar_t>();
const int64_t A_matrix_stride = matrixStride(A_column_major_copy);
const int64_t ldb = std::max<int64_t>(1, A_column_major_copy.size(-1));
int* infos_ptr = infos.data_ptr<int>();
#ifdef USE_CUSOLVER_64_BIT
hipsolverDnParams_t params;
hipDataType datatype = at::cuda::solver::get_cusolver_datatype<scalar_t>();
TORCH_CUSOLVER_CHECK(cusolverDnCreateParams(¶ms));
for (int64_t i = 0; i < batch_size; i++) {
at::cuda::solver::xpotrs(
handle, params, uplo, n, nrhs, datatype,
A_ptr + i * A_matrix_stride,
lda, datatype,
self_working_copy_ptr + i * self_matrix_stride,
ldb,
infos_ptr
);
}
TORCH_CUSOLVER_CHECK(cusolverDnDestroyParams(params));
#else // USE_CUSOLVER_64_BIT
int n_32 = cuda_int_cast(n, "n");
int nrhs_32 = cuda_int_cast(nrhs, "nrhs");
int lda_32 = cuda_int_cast(lda, "lda");
int ldb_32 = cuda_int_cast(ldb, "ldb");
for (int64_t i = 0; i < batch_size; i++) {
at::cuda::solver::potrs<scalar_t>(
handle, uplo, n_32, nrhs_32,
A_ptr + i * A_matrix_stride,
lda_32,
self_working_copy_ptr + i * self_matrix_stride,
ldb_32,
infos_ptr
);
}
#endif // USE_CUSOLVER_64_BIT
}
// This code path is only dispatched to if MAGMA is not linked in the pytorch build.
// cusolverDn<t>potrsBatched only supports nrhs == 1
template<typename scalar_t>
inline static void apply_cholesky_cusolver_potrsBatched(Tensor& self_working_copy, const Tensor& A_column_major_copy, bool upper, Tensor& infos) {
auto handle = at::cuda::getCurrentCUDASolverDnHandle();
const auto uplo = upper ? HIPBLAS_FILL_MODE_UPPER : HIPBLAS_FILL_MODE_LOWER;
const int64_t n = self_working_copy.size(-2);
const int64_t nrhs = self_working_copy.size(-1);
const int64_t lda = std::max<int64_t>(1, n);
const int64_t batch_size = batchCount(self_working_copy);
const int64_t self_matrix_stride = matrixStride(self_working_copy);
scalar_t* self_working_copy_ptr = self_working_copy.data_ptr<scalar_t>();
const scalar_t* A_ptr = A_column_major_copy.data_ptr<scalar_t>();
const int64_t A_matrix_stride = matrixStride(A_column_major_copy);
const int64_t ldb = std::max<int64_t>(1, A_column_major_copy.size(-1));
int* infos_ptr = infos.data_ptr<int>();
auto self_ptr_array = get_device_pointers<scalar_t>(self_working_copy);
auto A_ptr_array = get_device_pointers<scalar_t>(A_column_major_copy);
at::cuda::solver::potrsBatched(
handle, uplo,
cuda_int_cast(n, "n"),
cuda_int_cast(nrhs, "nrhs"),
reinterpret_cast<scalar_t**>(A_ptr_array.data_ptr()),
cuda_int_cast(lda, "lda"),
reinterpret_cast<scalar_t**>(self_ptr_array.data_ptr()),
cuda_int_cast(ldb, "ldb"),
infos_ptr,
cuda_int_cast(batch_size, "batch_size")
);
}
Tensor _cholesky_solve_helper_cuda_cusolver(const Tensor& self, const Tensor& A, bool upper) {
const int64_t batch_size = batchCount(self);
at::Tensor infos = at::zeros({1}, self.options().dtype(at::kInt));
at::Tensor self_working_copy = cloneBatchedColumnMajor(self);
at::Tensor A_column_major_copy = cloneBatchedColumnMajor(A);
const int64_t nrhs = self_working_copy.size(-1);
// cusolverDn<t>potrsBatched only supports nrhs == 1
if (batch_size > 1 && nrhs == 1) {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "cholesky_cuda_potrs_batched", [&] {
apply_cholesky_cusolver_potrsBatched<scalar_t>(self_working_copy, A_column_major_copy, upper, infos);
});
} else {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "cholesky_cuda_potrs", [&] {
apply_cholesky_cusolver_potrs<scalar_t>(self_working_copy, A_column_major_copy, upper, infos);
});
}
// info from potrs and potrsBatched only report if the i-th parameter is wrong, not about the matrix singularity, etc.
// So we don't need to check it all the time.
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(infos.item().toInt() == 0);
return self_working_copy;
}
void _cholesky_inverse_cusolver_potrs_based(Tensor& result, Tensor& infos, bool upper) {
at::Tensor input_working_copy = cloneBatchedColumnMajor(result);
at::Tensor infos_gpu = at::zeros({1}, result.options().dtype(at::kInt));
result.fill_(0);
result.diagonal(/*offset=*/0, /*dim1=*/-2, /*dim2=*/-1).fill_(1);
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(result.scalar_type(), "cholesky_cuda_potri", [&] {
apply_cholesky_cusolver_potrs<scalar_t>(result, input_working_copy, upper, infos_gpu);
});
// Debug only: info of cusolver potrs only check if the i-th parameter is wrong
// Function argument `infos` is a CPU tensor, the following copy will cause a device-host sync.
// infos.copy_(infos_gpu);
}
Tensor& cholesky_inverse_kernel_impl_cusolver(Tensor &result, Tensor& infos, bool upper) {
_cholesky_inverse_cusolver_potrs_based(result, infos, upper);
return result;
}
/*
The geqrf function computes the QR decomposition of a m x n matrix A.
Args:
* `A` - [in] Tensor with matrices for QR decomposition,
[out] Tensor containing R in the upper triangle of A
and elementary reflectors below the main diagonal of A
* `tau` - Tensor containing the magnitudes of the elementary reflectors
* `m` - The number of rows of `input` to consider
* `n` - The number of columns of `input` to consider (actual sizes of `input` could be larger)
For further details, please see the cuSOLVER documentation for GEQRF.
*/
template <typename scalar_t>
static void apply_geqrf(const Tensor& A, const Tensor& tau) {
int64_t m = A.size(-2);
int64_t n = A.size(-1);
int64_t lda = std::max<int64_t>(1, m);
int64_t batch_size = batchCount(A);
auto A_stride = matrixStride(A);
auto tau_stride = tau.size(-1);
auto A_data = A.data_ptr<scalar_t>();
auto tau_data = tau.data_ptr<scalar_t>();
auto infos = at::zeros({1}, A.options().dtype(at::kInt));
auto infos_data = infos.data_ptr<int>();
// get the optimal work size and allocate workspace tensor
#ifdef USE_CUSOLVER_64_BIT
size_t worksize_device; // workspaceInBytesOnDevice
size_t worksize_host; // workspaceInBytesOnHost
hipsolverDnParams_t params = NULL; // use default algorithm (currently it's the only option)
at::cuda::solver::xgeqrf_bufferSize<scalar_t>(
at::cuda::getCurrentCUDASolverDnHandle(),
params,
m,
n,
A_data,
lda,
tau_data,
&worksize_device,
&worksize_host);
#else
int lwork;
int m_32 = cuda_int_cast(m, "m");
int n_32 = cuda_int_cast(n, "n");
int lda_32 = cuda_int_cast(lda, "lda");
at::cuda::solver::geqrf_bufferSize<scalar_t>(
at::cuda::getCurrentCUDASolverDnHandle(), m_32, n_32, A_data, lda_32, &lwork);
#endif // USE_CUSOLVER_64_BIT
for (decltype(batch_size) i = 0; i < batch_size; i++) {
scalar_t* A_working_ptr = &A_data[i * A_stride];
scalar_t* tau_working_ptr = &tau_data[i * tau_stride];
auto handle = at::cuda::getCurrentCUDASolverDnHandle();
#ifdef USE_CUSOLVER_64_BIT
// allocate workspace storage on device and host
auto& device_allocator = *at::cuda::getCUDADeviceAllocator();
auto work_device_data = device_allocator.allocate(worksize_device);
auto& host_allocator = *at::getCPUAllocator();
auto work_host_data = host_allocator.allocate(worksize_host);
at::cuda::solver::xgeqrf<scalar_t>(
handle,
params,
m,
n,
A_working_ptr,
lda,
tau_working_ptr,
static_cast<scalar_t*>(work_device_data.get()),
worksize_device,
static_cast<scalar_t*>(work_host_data.get()),
worksize_host,
infos_data);
#else
// allocate workspace storage on device
auto& allocator = *at::cuda::getCUDADeviceAllocator();
auto work_data = allocator.allocate(sizeof(scalar_t) * std::max<int>(1, lwork));
at::cuda::solver::geqrf<scalar_t>(
handle,
m_32,
n_32,
A_working_ptr,
lda_32,
tau_working_ptr,
static_cast<scalar_t*>(work_data.get()),
lwork,
infos_data);
#endif // USE_CUSOLVER_64_BIT
}
// info from geqrf only reports if the i-th parameter is wrong, not about the matrix singularity
// so we don't need to check it all the time
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(infos.item().toInt() == 0);
}
// This is a type dispatching helper function for 'apply_geqrf'
void geqrf_cusolver(const Tensor& input, const Tensor& tau) {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(input.scalar_type(), "geqrf_cuda", [&]{
apply_geqrf<scalar_t>(input, tau);
});
}
/*
The ormqr function multiplies Q with another matrix from a sequence of
elementary reflectors, such as is produced by the geqrf function.
Args:
* `input` - Tensor with elementary reflectors below the diagonal,
encoding the matrix Q.
* `tau` - Tensor containing the magnitudes of the elementary
reflectors.
* `other` - [in] Tensor containing the matrix to be multiplied.
[out] result of the matrix multiplication with Q.
* `left` - bool, determining whether `other` is left- or right-multiplied with Q.
* `transpose` - bool, determining whether to transpose (or conjugate transpose) Q before multiplying.
For further details, please see the cuSOLVER documentation for ORMQR and UNMQR.
*/
template <typename scalar_t>
static void apply_ormqr(const Tensor& input, const Tensor& tau, const Tensor& other, bool left, bool transpose) {
using value_t = typename c10::scalar_value_type<scalar_t>::type;
auto side = left ? HIPBLAS_SIDE_LEFT : HIPBLAS_SIDE_RIGHT;
auto trans = transpose ? (input.is_complex() ? HIPBLAS_OP_C : HIPBLAS_OP_T) : HIPBLAS_OP_N;
auto input_data = input.data_ptr<scalar_t>();
auto tau_data = tau.data_ptr<scalar_t>();
auto other_data = other.data_ptr<scalar_t>();
auto input_matrix_stride = matrixStride(input);
auto other_matrix_stride = matrixStride(other);
auto tau_stride = tau.size(-1);
auto batch_size = batchCount(input);
auto m = cuda_int_cast(other.size(-2), "m");
auto n = cuda_int_cast(other.size(-1), "n");
auto k = cuda_int_cast(tau.size(-1), "k");
auto lda = std::max<int>(1, left ? m : n);
auto ldc = std::max<int>(1, m);
// get the optimal work size and allocate workspace tensor
int lwork;
at::cuda::solver::ormqr_bufferSize<scalar_t>(
at::cuda::getCurrentCUDASolverDnHandle(), side, trans, m, n, k, input_data, lda, tau_data, other_data, ldc, &lwork);
auto info = at::zeros({1}, input.options().dtype(at::kInt));
auto info_data = info.data_ptr<int>();
for (auto i = decltype(batch_size){0}; i < batch_size; i++) {
scalar_t* input_working_ptr = &input_data[i * input_matrix_stride];
scalar_t* other_working_ptr = &other_data[i * other_matrix_stride];
scalar_t* tau_working_ptr = &tau_data[i * tau_stride];
auto handle = at::cuda::getCurrentCUDASolverDnHandle();
// allocate workspace storage
auto& allocator = *at::cuda::getCUDADeviceAllocator();
auto work_data = allocator.allocate(sizeof(scalar_t)*lwork);
at::cuda::solver::ormqr<scalar_t>(
handle, side, trans, m, n, k,
input_working_ptr,
lda,
tau_working_ptr,
other_working_ptr,
ldc,
static_cast<scalar_t*>(work_data.get()),
lwork,
info_data
);
// info from ormqr only reports if the i-th parameter is wrong
// so we don't need to check it all the time
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(info.item().toInt() == 0);
}
}
// This is a type dispatching helper function for 'apply_ormqr'
void ormqr_cusolver(const Tensor& input, const Tensor& tau, const Tensor& other, bool left, bool transpose) {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(input.scalar_type(), "orgmr_cuda", [&]{
apply_ormqr<scalar_t>(input, tau, other, left, transpose);
});
}
/*
The orgqr function allows reconstruction of an orthogonal (or unitary) matrix Q,
from a sequence of elementary reflectors, such as produced by the geqrf function.
Args:
* `self` - Tensor with the directions of the elementary reflectors below the diagonal,
it will be overwritten with the result
* `tau` - Tensor containing the magnitudes of the elementary reflectors
For further details, please see the cuSOLVER documentation for ORGQR and UNGQR.
*/
template <typename scalar_t>
inline static void apply_orgqr(Tensor& self, const Tensor& tau) {
using value_t = typename c10::scalar_value_type<scalar_t>::type;
auto self_data = self.data_ptr<scalar_t>();
auto tau_data = tau.data_ptr<scalar_t>();
auto self_matrix_stride = matrixStride(self);
auto batchsize = cuda_int_cast(batchCount(self), "batch size");
auto m = cuda_int_cast(self.size(-2), "m");
auto n = cuda_int_cast(self.size(-1), "n");
auto k = cuda_int_cast(tau.size(-1), "k");
auto tau_stride = std::max<int>(1, k);
auto lda = std::max<int>(1, m);
// LAPACK's requirement
TORCH_INTERNAL_ASSERT(m >= n);
TORCH_INTERNAL_ASSERT(n >= k);
// cuSOLVER doesn't compute anything for this case, which is wrong
// the result should be a matrix with 1 on the diagonal
if (k == 0) {
self.fill_(0);
self.diagonal(/*offset=*/0, /*dim1=*/-2, /*dim2=*/-1).fill_(1);
return;
}
// get the optimal work size and allocate workspace tensor
int lwork;
at::cuda::solver::orgqr_buffersize<scalar_t>(
at::cuda::getCurrentCUDASolverDnHandle(), m, n, k, self_data, lda, tau_data, &lwork);
auto info = at::zeros({1}, self.options().dtype(at::kInt));
auto info_data = info.data_ptr<int>();
for (auto i = decltype(batchsize){0}; i < batchsize; i++) {
scalar_t* self_working_ptr = &self_data[i * self_matrix_stride];
scalar_t* tau_working_ptr = &tau_data[i * tau_stride];
auto handle = at::cuda::getCurrentCUDASolverDnHandle();
// allocate workspace storage
auto& allocator = *at::cuda::getCUDADeviceAllocator();
auto work_data = allocator.allocate(sizeof(scalar_t)*lwork);
at::cuda::solver::orgqr<scalar_t>(
handle, m, n, k,
self_working_ptr,
lda,
tau_working_ptr,
static_cast<scalar_t*>(work_data.get()),
lwork,
info_data
);
// info from orgqr only reports if the i-th parameter is wrong
// so we don't need to check it all the time
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(info.item().toInt() == 0);
}
}
// This is a type dispatching helper function for 'apply_orgqr'
Tensor& orgqr_helper_cusolver(Tensor& result, const Tensor& tau) {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(result.scalar_type(), "orgqr_cuda", [&]{
apply_orgqr<scalar_t>(result, tau);
});
return result;
}
template <typename scalar_t>
static void apply_syevd(const Tensor& values, const Tensor& vectors, const Tensor& infos, bool upper, bool compute_eigenvectors) {
using value_t = typename c10::scalar_value_type<scalar_t>::type;
hipblasFillMode_t uplo = upper ? HIPBLAS_FILL_MODE_UPPER : HIPBLAS_FILL_MODE_LOWER;
hipsolverEigMode_t jobz = compute_eigenvectors ? HIPSOLVER_EIG_MODE_VECTOR : HIPSOLVER_EIG_MODE_NOVECTOR;
int64_t n = vectors.size(-1);
int64_t lda = std::max<int64_t>(1, n);
int64_t batch_size = batchCount(vectors);
auto vectors_stride = matrixStride(vectors);
auto values_stride = values.size(-1);
auto vectors_data = vectors.data_ptr<scalar_t>();
auto values_data = values.data_ptr<value_t>();
auto infos_data = infos.data_ptr<int>();
// get the optimal work size and allocate workspace tensor
#ifdef USE_CUSOLVER_64_BIT
size_t worksize_device; // workspaceInBytesOnDevice
size_t worksize_host; // workspaceInBytesOnHost
hipsolverDnParams_t params = NULL; // use default algorithm (currently it's the only option)
at::cuda::solver::xsyevd_bufferSize<scalar_t>(
at::cuda::getCurrentCUDASolverDnHandle(),
params,
jobz,
uplo,
n,
vectors_data,
lda,
values_data,
&worksize_device,
&worksize_host);
#else
int lwork;
int n_32 = cuda_int_cast(n, "n");
int lda_32 = cuda_int_cast(lda, "lda");
at::cuda::solver::syevd_bufferSize<scalar_t>(
at::cuda::getCurrentCUDASolverDnHandle(), jobz, uplo, n_32, vectors_data, lda_32, values_data, &lwork);
#endif // USE_CUSOLVER_64_BIT
for (decltype(batch_size) i = 0; i < batch_size; i++) {
scalar_t* vectors_working_ptr = &vectors_data[i * vectors_stride];
value_t* values_working_ptr = &values_data[i * values_stride];
int* info_working_ptr = &infos_data[i];
auto handle = at::cuda::getCurrentCUDASolverDnHandle();
#ifdef USE_CUSOLVER_64_BIT
// allocate workspace storage on device and host
auto& device_allocator = *at::cuda::getCUDADeviceAllocator();
auto work_device_data = device_allocator.allocate(worksize_device);
auto& host_allocator = *at::getCPUAllocator();
auto work_host_data = host_allocator.allocate(worksize_host);
at::cuda::solver::xsyevd<scalar_t>(
handle,
params,
jobz,
uplo,
n,
vectors_working_ptr,
lda,
values_working_ptr,
static_cast<scalar_t*>(work_device_data.get()),
worksize_device,
static_cast<scalar_t*>(work_host_data.get()),
worksize_host,
info_working_ptr);
#else
// allocate workspace storage on device
auto& allocator = *at::cuda::getCUDADeviceAllocator();
auto work_data = allocator.allocate(sizeof(scalar_t) * lwork);
at::cuda::solver::syevd<scalar_t>(
handle,
jobz,
uplo,
n_32,
vectors_working_ptr,
lda_32,
values_working_ptr,
static_cast<scalar_t*>(work_data.get()),
lwork,
info_working_ptr);
#endif // USE_CUSOLVER_64_BIT
}
}
template <typename scalar_t>
static void apply_syevj(const Tensor& values, const Tensor& vectors, const Tensor& infos, bool upper, bool compute_eigenvectors) {
using value_t = typename c10::scalar_value_type<scalar_t>::type;
hipblasFillMode_t uplo = upper ? HIPBLAS_FILL_MODE_UPPER : HIPBLAS_FILL_MODE_LOWER;
hipsolverEigMode_t jobz = compute_eigenvectors ? HIPSOLVER_EIG_MODE_VECTOR : HIPSOLVER_EIG_MODE_NOVECTOR;
int n = cuda_int_cast(vectors.size(-1), "n");
int lda = std::max<int>(1, n);
auto batch_size = batchCount(vectors);
auto vectors_stride = matrixStride(vectors);
auto values_stride = values.size(-1);
auto vectors_data = vectors.data_ptr<scalar_t>();
auto values_data = values.data_ptr<value_t>();
auto infos_data = infos.data_ptr<int>();
// syevj_params controls the numerical accuracy of syevj
// by default the tolerance is set to machine accuracy
// the maximum number of iteration of Jacobi method by default is 100
// cuSOLVER documentations says: "15 sweeps are good enough to converge to machine accuracy"
// LAPACK has SVD routine based on similar Jacobi algorithm (gesvj) and there a maximum of 30 iterations is set
// Let's use the default values for now
hipsolverSyevjInfo_t syevj_params;
TORCH_CUSOLVER_CHECK(hipsolverDnCreateSyevjInfo(&syevj_params));
// get the optimal work size and allocate workspace tensor
int lwork;
at::cuda::solver::syevj_bufferSize<scalar_t>(
at::cuda::getCurrentCUDASolverDnHandle(), jobz, uplo, n, vectors_data, lda, values_data, &lwork, syevj_params);
for (decltype(batch_size) i = 0; i < batch_size; i++) {
scalar_t* vectors_working_ptr = &vectors_data[i * vectors_stride];
value_t* values_working_ptr = &values_data[i * values_stride];
int* info_working_ptr = &infos_data[i];
auto handle = at::cuda::getCurrentCUDASolverDnHandle();
// allocate workspace storage on device
auto& allocator = *at::cuda::getCUDADeviceAllocator();
auto work_data = allocator.allocate(sizeof(scalar_t) * lwork);
at::cuda::solver::syevj<scalar_t>(
handle,
jobz,
uplo,
n,
vectors_working_ptr,
lda,
values_working_ptr,
static_cast<scalar_t*>(work_data.get()),
lwork,
info_working_ptr,
syevj_params);
}
TORCH_CUSOLVER_CHECK(hipsolverDnDestroySyevjInfo(syevj_params));
}
template <typename scalar_t>
static void apply_syevj_batched(const Tensor& values, const Tensor& vectors, const Tensor& infos, bool upper, bool compute_eigenvectors) {
using value_t = typename c10::scalar_value_type<scalar_t>::type;
hipblasFillMode_t uplo = upper ? HIPBLAS_FILL_MODE_UPPER : HIPBLAS_FILL_MODE_LOWER;
hipsolverEigMode_t jobz = compute_eigenvectors ? HIPSOLVER_EIG_MODE_VECTOR : HIPSOLVER_EIG_MODE_NOVECTOR;
int n = cuda_int_cast(vectors.size(-1), "n");
int lda = std::max<int>(1, n);
int batch_size = cuda_int_cast(batchCount(vectors), "batch_size");
auto vectors_data = vectors.data_ptr<scalar_t>();
auto values_data = values.data_ptr<value_t>();
auto infos_data = infos.data_ptr<int>();
// syevj_params controls the numerical accuracy of syevj
// by default the tolerance is set to machine accuracy
// the maximum number of iteration of Jacobi method by default is 100
// cuSOLVER documentations says: "15 sweeps are good enough to converge to machine accuracy"
// LAPACK has SVD routine based on similar Jacobi algorithm (gesvj) and there a maximum of 30 iterations is set
// Let's use the default values for now
hipsolverSyevjInfo_t syevj_params;
TORCH_CUSOLVER_CHECK(hipsolverDnCreateSyevjInfo(&syevj_params));
TORCH_CUSOLVER_CHECK(hipsolverDnXsyevjSetSortEig(syevj_params, 1));
auto handle = at::cuda::getCurrentCUDASolverDnHandle();
// get the optimal work size and allocate workspace tensor
int lwork;
at::cuda::solver::syevjBatched_bufferSize<scalar_t>(
handle,
jobz,
uplo,
n,
vectors_data,
lda,
values_data,
&lwork,
syevj_params,
batch_size);
// allocate workspace storage on device
auto& allocator = *at::cuda::getCUDADeviceAllocator();
auto work_data = allocator.allocate(sizeof(scalar_t) * lwork);
at::cuda::solver::syevjBatched<scalar_t>(
handle,
jobz,
uplo,
n,
vectors_data,
lda,
values_data,
static_cast<scalar_t*>(work_data.get()),
lwork,
infos_data,
syevj_params,
batch_size);
TORCH_CUSOLVER_CHECK(hipsolverDnDestroySyevjInfo(syevj_params));
}
static void linalg_eigh_cusolver_syevd(const Tensor& eigenvalues, const Tensor& eigenvectors, const Tensor& infos, bool upper, bool compute_eigenvectors) {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(eigenvectors.scalar_type(), "linalg_eigh_cuda", [&] {
apply_syevd<scalar_t>(eigenvalues, eigenvectors, infos, upper, compute_eigenvectors);
});
}
static void linalg_eigh_cusolver_syevj(const Tensor& eigenvalues, const Tensor& eigenvectors, const Tensor& infos, bool upper, bool compute_eigenvectors) {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(eigenvectors.scalar_type(), "linalg_eigh_cuda", [&] {
apply_syevj<scalar_t>(eigenvalues, eigenvectors, infos, upper, compute_eigenvectors);
});
}
static void linalg_eigh_cusolver_syevj_batched(const Tensor& eigenvalues, const Tensor& eigenvectors, const Tensor& infos, bool upper, bool compute_eigenvectors) {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(eigenvectors.scalar_type(), "linalg_eigh_cuda", [&] {
apply_syevj_batched<scalar_t>(eigenvalues, eigenvectors, infos, upper, compute_eigenvectors);
});
}
void linalg_eigh_cusolver(const Tensor& eigenvalues, const Tensor& eigenvectors, const Tensor& infos, bool upper, bool compute_eigenvectors) {
if (use_cusolver_syevj_batched_ && batchCount(eigenvectors) > 1 && eigenvectors.size(-1) <= 32) {
// Use syevjBatched for batched matrix opertion when matrix size <= 32
// See https://github.com/pytorch/pytorch/pull/53040#issuecomment-788264724
linalg_eigh_cusolver_syevj_batched(eigenvalues, eigenvectors, infos, upper, compute_eigenvectors);
} else if (eigenvectors.scalar_type() == at::kFloat && eigenvectors.size(-1) >= 32 && eigenvectors.size(-1) <= 512) {
// syevj is better than syevd for float32 dtype and matrix sizes 32x32 - 512x512
// See https://github.com/pytorch/pytorch/pull/53040#issuecomment-788264724
linalg_eigh_cusolver_syevj(eigenvalues, eigenvectors, infos, upper, compute_eigenvectors);
} else {
linalg_eigh_cusolver_syevd(eigenvalues, eigenvectors, infos, upper, compute_eigenvectors);
}
}
// The 'apply_' word is used for templated by dtype functions that call an API routine
// underneath. Since the cusolver API has a slightly different structure we do not prepend
// apply_ to this function.
void lu_looped_cusolver(const Tensor& self, const Tensor& pivots, const Tensor& infos, bool get_pivots) {
// Fill the pivots tensor with indices using 1-based (Fortran) indexing. This
// is needed for maintaining the same results with MAGMA.
auto k = ::min(self.size(-2), self.size(-1));
Tensor pivots_tmp = at::arange(1, k + 1, self.options().dtype(at::kInt)).expand_as(pivots);
pivots.copy_(pivots_tmp);
AT_DISPATCH_FLOATING_TYPES(
self.scalar_type(),
"lu_cusolver",
[&self,
&pivots,
&infos,
&get_pivots]() {
int m = cuda_int_cast(self.size(-2), "m");
int n = cuda_int_cast(self.size(-1), "n");
int lda = std::max<int>(1, m);
int64_t self_stride = matrixStride(self);
int64_t batch_size = batchCount(self);
scalar_t* self_data = self.data_ptr<scalar_t>();
int* infos_data = infos.data_ptr<int>();
auto handle = at::cuda::getCurrentCUDASolverDnHandle();
for (auto batch = decltype(batch_size){0}; batch < batch_size; ++batch) {
if (get_pivots) {
auto pivots_data = pivots.data_ptr<int>();
auto pivots_stride = pivots.size(-1);
at::cuda::solver::getrf<scalar_t>(
handle, m, n,
self_data + batch * self_stride,
lda,
pivots_data + batch * pivots_stride,
infos_data + batch
);
}
else {
at::cuda::solver::getrf<scalar_t>(
handle, m, n,
self_data + batch * self_stride,
lda,
nullptr,
infos_data + batch
);
}
}
});
// Necessary because cuSOLVER uses nan for outputs that correspond to 0 in MAGMA for non-pivoted LU.
// See https://github.com/pytorch/pytorch/issues/53879 for more details.
if (!get_pivots) {
at::nan_to_num_(const_cast<Tensor&>(self), 0, std::numeric_limits<double>::infinity(),
-std::numeric_limits<double>::infinity());
}
}
void lu_solve_looped_cusolver(const Tensor& b, const Tensor& lu, const Tensor& pivots, hipblasOperation_t trans) {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(b.scalar_type(), "lu_solve_cusolver", [&] {
int n = cuda_int_cast(lu.size(-2), "n");
int nrhs = cuda_int_cast(b.size(-1), "nrhs");
auto batch_size = batchCount(lu);
auto info = at::zeros({1}, lu.options().dtype(kInt));
auto info_data = info.data_ptr<int>();
auto b_data = b.data_ptr<scalar_t>();
auto lu_data = lu.data_ptr<scalar_t>();
auto pivots_data = pivots.data_ptr<int>();
auto pivots_stride = pivots.size(-1);
auto lu_stride = matrixStride(lu);
auto b_stride = matrixStride(b);
int leading_dimension = cuda_int_cast(std::max<int>(1, n), "leading_dimension");
auto handle = at::cuda::getCurrentCUDASolverDnHandle();
for (auto batch = decltype(batch_size){0}; batch < batch_size; ++batch) {
at::cuda::solver::getrs<scalar_t>(
handle,
n,
nrhs,
lu_data + batch * lu_stride,
leading_dimension,
pivots_data + batch * pivots_stride,
b_data + batch * b_stride,
leading_dimension,
info_data,
trans);
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(info.item().toInt() == 0);
}
});
}
#endif // USE_CUSOLVER
}} // namespace at::native
| 9d1b947ab04314e9bbdb510c98805c078dc13a43.cu | #include <ATen/Context.h>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/Dispatch.h>
#include <ATen/NativeFunctions.h>
#include <ATen/cuda/PinnedMemoryAllocator.h>
#include <ATen/cuda/CUDASolver.h>
#include <ATen/cuda/CUDABlas.h>
#include <ATen/cuda/CUDAEvent.h>
#include <c10/cuda/CUDAStream.h>
#include <ATen/native/LinearAlgebraUtils.h>
#include <ATen/native/cuda/MiscUtils.h>
#include <ATen/native/cuda/BatchLinearAlgebraLib.h>
namespace at {
namespace native {
// Some cuBLAS and cuSOLVER batched routines require input to be a device array of pointers to device individual matrices
// 'input' must be a contiguous tensor
template <typename scalar_t>
static Tensor get_device_pointers(const Tensor& input) {
auto input_data = input.data_ptr<scalar_t>();
int64_t input_mat_stride = matrixStride(input);
// cublas/cusolver interface requires 'int'
int batch_size = cuda_int_cast(batchCount(input), "batch_size");
// if batch_size==0, then start=0 and end=0
// if input_mat_stride==0, then step=sizeof(scalar_t)
return at::arange(
/*start=*/reinterpret_cast<int64_t>(input_data),
/*end=*/reinterpret_cast<int64_t>(input_data + batch_size * input_mat_stride),
/*step=*/static_cast<int64_t>(std::max<int64_t>(input_mat_stride, 1) * sizeof(scalar_t)),
input.options().dtype(at::kLong));
}
template <typename scalar_t>
void apply_geqrf_batched(const Tensor& input, const Tensor& tau) {
// AMD ROCm backend is implemented via rewriting all CUDA calls to HIP
// rocBLAS does not implement BLAS-like extensions of cuBLAS, they're in rocSOLVER
// rocSOLVER is currently not used in ATen, therefore we raise an error in this case
#ifndef CUDART_VERSION
TORCH_CHECK(false, "geqrf: Batched version is supported only with cuBLAS backend.")
#else
auto batch_size = cuda_int_cast(batchCount(input), "batch_size");
auto m = cuda_int_cast(input.size(-2), "m");
auto n = cuda_int_cast(input.size(-1), "n");
auto lda = std::max<int>(1, m);
// cuBLAS batched geqrf requires input to be the device array of pointers to device single matrices
Tensor input_ptr_array = get_device_pointers<scalar_t>(input);
Tensor tau_ptr_array = get_device_pointers<scalar_t>(tau.unsqueeze(-1));
auto input_ptr_array_data = reinterpret_cast<scalar_t**>(input_ptr_array.data_ptr());
auto tau_ptr_array_data = reinterpret_cast<scalar_t**>(tau_ptr_array.data_ptr());
int info;
auto handle = at::cuda::getCurrentCUDABlasHandle();
at::cuda::blas::geqrfBatched(handle, m, n, input_ptr_array_data, lda, tau_ptr_array_data, &info, batch_size);
// info only indicates wrong arguments to geqrfBatched call
// info is a host variable, we can check it without device synchronization
TORCH_INTERNAL_ASSERT(info == 0);
#endif
}
void geqrf_batched_cublas(const Tensor& input, const Tensor& tau) {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(input.scalar_type(), "geqrf_batched_cuda", [&]{
apply_geqrf_batched<scalar_t>(input, tau);
});
}
template <typename scalar_t>
static void apply_lu_solve_batched_cublas(const Tensor& b, const Tensor& lu, const Tensor& pivots, cublasOperation_t trans) {
#ifndef CUDART_VERSION
TORCH_CHECK(false, "lu_solve: cuBLAS backend for lu_solve is not available.")
#else
auto pivots_data = pivots.data_ptr<int>();
auto batch_size = cuda_int_cast(batchCount(lu), "batch_size");;
auto m = cuda_int_cast(lu.size(-2), "m");
auto nrhs = cuda_int_cast(b.size(-1), "nrhs");
auto lda = cuda_int_cast(std::max<int>(1, m), "lda");
int info = 0;
Tensor lu_ptr_array = get_device_pointers<scalar_t>(lu);
Tensor b_ptr_array = get_device_pointers<scalar_t>(b);
auto lu_ptr_array_data = reinterpret_cast<scalar_t**>(lu_ptr_array.data_ptr());
auto b_ptr_array_data = reinterpret_cast<scalar_t**>(b_ptr_array.data_ptr());
auto handle = at::cuda::getCurrentCUDABlasHandle();
at::cuda::blas::getrsBatched(handle, trans, m, nrhs, lu_ptr_array_data,
lda, pivots_data, b_ptr_array_data, lda, &info, batch_size);
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(info == 0);
#endif
}
void lu_solve_batched_cublas(const Tensor& b, const Tensor& lu, const Tensor& pivots, cublasOperation_t trans) {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(lu.scalar_type(), "lu_solve_cublas", [&]{
apply_lu_solve_batched_cublas<scalar_t>(b, lu, pivots, trans);
});
}
template <typename scalar_t>
static void apply_triangular_solve(Tensor& A, Tensor& B, bool upper, bool transpose, bool conjugate_transpose, bool unitriangular) {
cublasFillMode_t uplo = upper ? CUBLAS_FILL_MODE_UPPER : CUBLAS_FILL_MODE_LOWER;
cublasOperation_t trans = transpose ? CUBLAS_OP_T : CUBLAS_OP_N;
trans = conjugate_transpose ? CUBLAS_OP_C : trans;
cublasDiagType_t diag = unitriangular ? CUBLAS_DIAG_UNIT : CUBLAS_DIAG_NON_UNIT;
cublasSideMode_t side = CUBLAS_SIDE_LEFT;
auto A_data = A.data_ptr<scalar_t>();
auto B_data = B.data_ptr<scalar_t>();
auto A_mat_stride = matrixStride(A);
auto B_mat_stride = matrixStride(B);
auto batch_size = batchCount(A);
auto m = cuda_int_cast(A.size(-2), "m");
auto n = cuda_int_cast(A.size(-1), "n");
auto nrhs = cuda_int_cast(B.size(-1), "nrhs");
auto lda = std::max<int>(1, m);
auto alpha = scalar_t{1};
for (decltype(batch_size) i = 0; i < batch_size; i++) {
scalar_t* A_working_ptr = &A_data[i * A_mat_stride];
scalar_t* B_working_ptr = &B_data[i * B_mat_stride];
auto handle = at::cuda::getCurrentCUDABlasHandle();
at::cuda::blas::trsm(handle, side, uplo, trans, diag, n, nrhs, &alpha, A_working_ptr, lda, B_working_ptr, lda);
}
}
void triangular_solve_cublas(Tensor& A, Tensor& B, Tensor& infos, bool upper, bool transpose, bool conjugate_transpose, bool unitriangular) {
(void)infos; // unused
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(A.scalar_type(), "triangular_solve_cuda", [&]{
apply_triangular_solve<scalar_t>(A, B, upper, transpose, conjugate_transpose, unitriangular);
});
}
template <typename scalar_t>
static void apply_triangular_solve_batched(Tensor& A, Tensor& B, bool upper, bool transpose, bool conjugate_transpose, bool unitriangular) {
cublasFillMode_t uplo = upper ? CUBLAS_FILL_MODE_UPPER : CUBLAS_FILL_MODE_LOWER;
cublasOperation_t trans = transpose ? CUBLAS_OP_T : CUBLAS_OP_N;
trans = conjugate_transpose ? CUBLAS_OP_C : trans;
cublasDiagType_t diag = unitriangular ? CUBLAS_DIAG_UNIT : CUBLAS_DIAG_NON_UNIT;
cublasSideMode_t side = CUBLAS_SIDE_LEFT;
auto A_data = A.data_ptr<scalar_t>();
auto B_data = B.data_ptr<scalar_t>();
auto A_mat_stride = matrixStride(A);
auto B_mat_stride = matrixStride(B);
auto batch_size = cuda_int_cast(batchCount(A), "batch_size");
auto m = cuda_int_cast(A.size(-2), "m");
auto n = cuda_int_cast(A.size(-1), "n");
auto nrhs = cuda_int_cast(B.size(-1), "nrhs");
auto lda = std::max<int>(1, m);
auto alpha = scalar_t{1};
// cuBLAS batched trsm requires input to be the device array of pointers to device single matrices
Tensor A_ptr_array = get_device_pointers<scalar_t>(A);
Tensor B_ptr_array = get_device_pointers<scalar_t>(B);
auto A_ptr_array_data = reinterpret_cast<scalar_t**>(A_ptr_array.data_ptr());
auto B_ptr_array_data = reinterpret_cast<scalar_t**>(B_ptr_array.data_ptr());
auto handle = at::cuda::getCurrentCUDABlasHandle();
at::cuda::blas::trsmBatched(handle, side, uplo, trans, diag, n, nrhs, &alpha, A_ptr_array_data, lda, B_ptr_array_data, lda, batch_size);
}
void triangular_solve_batched_cublas(Tensor& A, Tensor& B, Tensor& infos, bool upper, bool transpose, bool conjugate_transpose, bool unitriangular) {
(void)infos; // unused
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(A.scalar_type(), "triangular_solve_cuda", [&]{
apply_triangular_solve_batched<scalar_t>(A, B, upper, transpose, conjugate_transpose, unitriangular);
});
}
template <typename scalar_t>
inline void apply_gels_batched(const Tensor& A, Tensor& B, Tensor& infos) {
// AMD ROCm backend is implemented via rewriting all CUDA calls to HIP
// rocBLAS does not implement BLAS-like extensions of cuBLAS, they're in rocSOLVER
// rocSOLVER is currently not used in ATen, therefore we raise an error in this case
#ifndef CUDART_VERSION
TORCH_CHECK(false, "torch.linalg.lstsq: Batched version is supported only with cuBLAS backend.")
#else
auto trans = CUBLAS_OP_N;
auto m = cuda_int_cast(A.size(-2), "m");
auto n = cuda_int_cast(A.size(-1), "n");
auto nrhs = cuda_int_cast(B.size(-1), "nrhs");
// cuBLAS from cuda10 and older doesn't work with nrhs == 0 (cuda11 works)
// so we need to put this early return
if (nrhs == 0) {
return;
}
auto batch_size = cuda_int_cast(batchCount(B), "batch_size");
auto lda = std::max<int>(1, m);
auto ldb = std::max<int>(1, m);
// cuBLAS's requirement
TORCH_CHECK(
m >= n,
"torch.linalg.lstsq: only overdetermined systems (input.size(-2) >= input.size(-1)) are allowed on CUDA with cuBLAS backend.");
// cuBLAS documentation says:
// Matrices Aarray[i] should not overlap; otherwise, undefined behavior is expected.
// explicitly broadcast the batch dimensions of A
IntArrayRef A_batch_sizes(A.sizes().data(), A.dim() - 2);
IntArrayRef B_batch_sizes(B.sizes().data(), B.dim() - 2);
std::vector<int64_t> expand_batch_portion = at::infer_size(A_batch_sizes, B_batch_sizes);
expand_batch_portion.insert(expand_batch_portion.end(), {A.size(-2), A.size(-1)});
Tensor A_expanded = A.expand({expand_batch_portion});
Tensor A_broadcasted = cloneBatchedColumnMajor(A_expanded);
// cuBLAS batched gels requires input to be the device array of pointers to device single matrices
Tensor A_ptr_array = get_device_pointers<scalar_t>(A_broadcasted);
Tensor B_ptr_array = get_device_pointers<scalar_t>(B);
auto A_ptr_array_data = reinterpret_cast<scalar_t**>(A_ptr_array.data_ptr());
auto B_ptr_array_data = reinterpret_cast<scalar_t**>(B_ptr_array.data_ptr());
auto infos_data = infos.data_ptr<int>();
auto handle = at::cuda::getCurrentCUDABlasHandle();
int info;
at::cuda::blas::gelsBatched<scalar_t>(
handle, trans, m, n, nrhs,
A_ptr_array_data, lda,
B_ptr_array_data, ldb,
&info,
infos_data,
batch_size);
// negative info indicates that an argument to gelsBatched call is invalid
TORCH_INTERNAL_ASSERT(info == 0);
#endif
}
// This is a type dispatching helper function for 'apply_gels_batched'
void gels_batched_cublas(const Tensor& a, Tensor& b, Tensor& infos) {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(a.scalar_type(), "gels_batched_cublas", [&]{
apply_gels_batched<scalar_t>(a, b, infos);
});
}
#ifdef USE_CUSOLVER
inline static Tensor column_major_identity_matrix_like(const Tensor& self) {
auto size = self.sizes();
auto size_slice = IntArrayRef(size.data(), size.size()-1);
return at::ones(size_slice, self.options()).diag_embed().transpose(-2, -1);
}
template <typename scalar_t>
inline static void _apply_single_inverse_helper(scalar_t* self_ptr, scalar_t* self_inv_ptr, int* ipiv_ptr, int* info_getrf_ptr, int* info_getrs_ptr, int n, int lda) {
// self_inv_ptr should already be an identity matrix
auto handle = at::cuda::getCurrentCUDASolverDnHandle();
at::cuda::solver::getrf<scalar_t>(handle, n, n, self_ptr, lda, ipiv_ptr, info_getrf_ptr);
at::cuda::solver::getrs<scalar_t>(handle, n, n, self_ptr, lda, ipiv_ptr, self_inv_ptr, lda, info_getrs_ptr, CUBLAS_OP_N);
}
template <typename scalar_t>
static void apply_batched_inverse_lib(Tensor& self, Tensor& self_inv, Tensor& infos_getrf, Tensor& infos_getrs) {
const int batch_size = cuda_int_cast(batchCount(self), "batchCount");
const int n = cuda_int_cast(self.size(-2), "self.size(-2)");
const int lda = std::max<int>(1, n);
auto self_data = self.data_ptr<scalar_t>();
auto self_mat_stride = matrixStride(self);
auto self_inv_data = self_inv.data_ptr<scalar_t>();
auto self_inv_mat_stride = matrixStride(self_inv);
auto infos_getrf_data = infos_getrf.data_ptr<int>();
auto infos_getrs_data = infos_getrs.data_ptr<int>();
auto& allocator = *::c10::cuda::CUDACachingAllocator::get();
// Heuristic: For small batch size or large matrix size, we use for-loop to iterate over the batches instead of
// calling the batched cublas routine.
if (batch_size <= 8 || /* batch_size > 8 && */ n >= 512) {
for (int64_t i = 0; i < batch_size; i++) {
auto dataPtr = allocator.allocate(sizeof(int) * lda);
int* pivot = reinterpret_cast<int*>(dataPtr.get());
int* infos_getrf_working_ptr = &infos_getrf_data[i];
int* infos_getrs_working_ptr = &infos_getrs_data[i];
_apply_single_inverse_helper<scalar_t>(
&self_data[i * self_mat_stride], &self_inv_data[i * self_inv_mat_stride], pivot, infos_getrf_working_ptr, infos_getrs_working_ptr, n, lda);
}
} else {
// cublas batched kernels require input be "device array of device pointers"
Tensor self_array = at::arange(
reinterpret_cast<int64_t>(self_data),
reinterpret_cast<int64_t>(&self_data[(batch_size-1) * self_mat_stride]) + 1,
static_cast<int64_t>(self_mat_stride * sizeof(scalar_t)), self.options().dtype(at::kLong));
Tensor self_inv_array = at::arange(
reinterpret_cast<int64_t>(self_inv_data),
reinterpret_cast<int64_t>(&self_inv_data[(batch_size-1) * self_inv_mat_stride]) + 1,
static_cast<int64_t>(self_inv_mat_stride * sizeof(scalar_t)), self.options().dtype(at::kLong));
auto dataPtr = allocator.allocate(sizeof(int)*batch_size*lda);
int* ipiv_array = reinterpret_cast<int*>(dataPtr.get());
at::cuda::blas::getrfBatched<scalar_t>(n, reinterpret_cast<scalar_t**>(self_array.data_ptr()), lda,
ipiv_array, infos_getrf_data, batch_size);
at::cuda::blas::getriBatched<scalar_t>(n, reinterpret_cast<scalar_t**>(self_array.data_ptr()), lda,
ipiv_array, reinterpret_cast<scalar_t**>(self_inv_array.data_ptr()), lda, infos_getrs_data, batch_size);
}
}
template <typename scalar_t>
static void apply_single_inverse_lib(const Tensor& self, Tensor& self_inv, Tensor& infos_getrf, Tensor& infos_getrs) {
int n = cuda_int_cast(self.size(-2), "self.size(-2)");
int lda = std::max<int>(1, n);
Tensor ipiv = at::empty({lda}, self.options().dtype(at::kInt));
_apply_single_inverse_helper<scalar_t>(
self.data_ptr<scalar_t>(), self_inv.data_ptr<scalar_t>(), ipiv.data_ptr<int>(), infos_getrf.data_ptr<int>(), infos_getrs.data_ptr<int>(), n, lda);
}
// This is a type dispatching helper function for 'apply_batched_inverse_lib' and 'apply_single_inverse_lib'
Tensor& _linalg_inv_out_helper_cuda_lib(Tensor& result, Tensor& infos_getrf, Tensor& infos_getrs) {
// assuming result is in column major order and contains the matrices to invert
Tensor input_working_copy = cloneBatchedColumnMajor(result);
// for getrf + getrs (cusolver path)
// result should be filled with identity matrices
result.zero_();
result.diagonal(/*offset=*/0, /*dim1=*/-2, /*dim2=*/-1).fill_(1);
const int batch_size = cuda_int_cast(batchCount(result), "batchCount");
if (result.dim() > 2) {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(result.scalar_type(), "linalg_inv_out_cuda", [&]{
apply_batched_inverse_lib<scalar_t>(
input_working_copy, result, infos_getrf, infos_getrs);
});
} else {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(result.scalar_type(), "linalg_inv_out_cuda", [&]{
apply_single_inverse_lib<scalar_t>(input_working_copy, result, infos_getrf, infos_getrs);
});
}
return result;
}
// entrance of calculations of `inverse` using cusolver getrf + getrs, cublas getrfBatched + getriBatched
Tensor _inverse_helper_cuda_lib(const Tensor& self) {
Tensor self_working_copy = cloneBatchedColumnMajor(self);
Tensor self_inv_working_copy = column_major_identity_matrix_like(self_working_copy);
const int batch_size = cuda_int_cast(batchCount(self), "batchCount");
if (self.dim() > 2 && batch_size > 1) {
Tensor infos_getrf = at::zeros({std::max<int64_t>(1, batchCount(self))}, self.options().dtype(kInt));
Tensor infos_getrs = at::zeros({std::max<int64_t>(1, batchCount(self))}, self.options().dtype(kInt));
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "inverse_cuda", [&]{
apply_batched_inverse_lib<scalar_t>(
self_working_copy, self_inv_working_copy, infos_getrf, infos_getrs);
});
batchCheckErrors(infos_getrf, "inverse_cuda");
batchCheckErrors(infos_getrs, "inverse_cuda");
} else {
Tensor infos_getrf = at::zeros({1}, self.options().dtype(kInt));
Tensor infos_getrs = at::zeros({1}, self.options().dtype(kInt));
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "inverse_cuda", [&]{
apply_single_inverse_lib<scalar_t>(self_working_copy, self_inv_working_copy, infos_getrf, infos_getrs);
});
batchCheckErrors(infos_getrf, "inverse_cuda");
batchCheckErrors(infos_getrs, "inverse_cuda");
}
return self_inv_working_copy;
}
// call cusolver gesvdj function to calculate svd
template<typename scalar_t>
inline static void _apply_svd_lib_gesvdj(const Tensor& self, Tensor& U, Tensor& S, Tensor& VT, Tensor& infos, bool compute_uv, bool some) {
using value_t = typename c10::scalar_value_type<scalar_t>::type;
auto self_data = self.data_ptr<scalar_t>();
auto U_data = U.data_ptr<scalar_t>();
auto S_data = S.data_ptr<value_t>();
auto VT_data = VT.data_ptr<scalar_t>();
auto self_stride = matrixStride(self);
auto U_stride = matrixStride(U);
auto S_stride = S.size(-1);
auto VT_stride = matrixStride(VT);
int batchsize = cuda_int_cast(batchCount(self), "batch size");
int m = cuda_int_cast(self.size(-2), "m");
int n = cuda_int_cast(self.size(-1), "n");
int lda = std::max<int>(1, m);
int ldvt = std::max<int>(1, n);
for(int i = 0; i < batchsize; i++){
// gesvdj_params controls the numerical accuracy of cusolver gesvdj iterations on GPU
gesvdjInfo_t gesvdj_params;
TORCH_CUSOLVER_CHECK(cusolverDnCreateGesvdjInfo(&gesvdj_params));
// TORCH_CUSOLVER_CHECK(cusolverDnXgesvdjSetTolerance(gesvdj_params, 1.0e-7));
// TORCH_CUSOLVER_CHECK(cusolverDnXgesvdjSetMaxSweeps(gesvdj_params, 15));
auto handle = at::cuda::getCurrentCUDASolverDnHandle();
auto jobz = compute_uv ? CUSOLVER_EIG_MODE_VECTOR : CUSOLVER_EIG_MODE_NOVECTOR;
at::cuda::solver::gesvdj<scalar_t>(
handle, jobz, /*econ=*/ some ? 1 : 0, m, n,
self_data + i * self_stride,
lda,
S_data + i * S_stride,
U_data + i * U_stride,
lda,
VT_data + i * VT_stride,
ldvt,
infos.data_ptr<int>() + i,
gesvdj_params
);
TORCH_CUSOLVER_CHECK(cusolverDnDestroyGesvdjInfo(gesvdj_params));
}
}
// wrapper around _apply_svd_lib_gesvdj that handles dtype dispatch,
// creates a working copy of the input, and creates V^H from the V returned by gesvdj
inline static void apply_svd_lib_gesvdj(const Tensor& self, Tensor& U, Tensor& S, Tensor& VT, Tensor& infos, bool compute_uv, bool some) {
const int64_t m = self.size(-2);
const int64_t n = self.size(-1);
Tensor self_working_copy = cloneBatchedColumnMajor(self);
VT = VT.transpose(-2, -1); // gesvdj returns V instead of V^H
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "svd_cuda_gesvdj", [&] {
_apply_svd_lib_gesvdj<scalar_t>(self_working_copy, U, S, VT, infos, compute_uv, some);
});
}
// call cusolver gesvdj batched function to calculate svd
template<typename scalar_t>
inline static void _apply_svd_lib_gesvdjBatched(const Tensor& self, Tensor& U, Tensor& S, Tensor& VT, Tensor& infos, bool compute_uv) {
using value_t = typename c10::scalar_value_type<scalar_t>::type;
auto self_data = self.data_ptr<scalar_t>();
auto U_data = U.data_ptr<scalar_t>();
auto S_data = S.data_ptr<value_t>();
auto VT_data = VT.data_ptr<scalar_t>();
auto self_stride = matrixStride(self);
auto U_stride = matrixStride(U);
auto S_stride = S.size(-1);
auto VT_stride = matrixStride(VT);
int batchsize = cuda_int_cast(batchCount(self), "batch size");
int m = cuda_int_cast(self.size(-2), "m");
int n = cuda_int_cast(self.size(-1), "n");
int lda = std::max<int>(1, m);
int ldvt = std::max<int>(1, n);
TORCH_INTERNAL_ASSERT(m <= 32 && n <= 32, "gesvdjBatched requires both matrix dimensions not greater than 32, but got "
"m = ", m, " n = ", n);
// gesvdj_params controls the numerical accuracy of cusolver gesvdj iterations on GPU
gesvdjInfo_t gesvdj_params;
TORCH_CUSOLVER_CHECK(cusolverDnCreateGesvdjInfo(&gesvdj_params));
// TORCH_CUSOLVER_CHECK(cusolverDnXgesvdjSetTolerance(gesvdj_params, 1.0e-7));
// TORCH_CUSOLVER_CHECK(cusolverDnXgesvdjSetMaxSweeps(gesvdj_params, 15));
TORCH_CUSOLVER_CHECK(cusolverDnXgesvdjSetSortEig(gesvdj_params, 1));
auto handle = at::cuda::getCurrentCUDASolverDnHandle();
auto jobz = compute_uv ? CUSOLVER_EIG_MODE_VECTOR : CUSOLVER_EIG_MODE_NOVECTOR;
at::cuda::solver::gesvdjBatched<scalar_t>(
handle, jobz, m, n, self_data, lda, S_data, U_data, lda, VT_data, ldvt,
infos.data_ptr<int>(), gesvdj_params, batchsize
);
TORCH_CUSOLVER_CHECK(cusolverDnDestroyGesvdjInfo(gesvdj_params));
}
// wrapper around _apply_svd_lib_gesvdjBatched that handles dtype dispatch,
// creates a working copy of the input, and creates V^H from the V returned by gesvdj
inline static void apply_svd_lib_gesvdjBatched(const Tensor& self, Tensor& U, Tensor& S, Tensor& VT, Tensor& infos, bool compute_uv) {
const int64_t m = self.size(-2);
const int64_t n = self.size(-1);
Tensor self_working_copy = cloneBatchedColumnMajor(self);
VT = VT.transpose(-2, -1); // gesvdj returns V instead of V^H
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "svd_cuda_gesvdjBatched", [&] {
_apply_svd_lib_gesvdjBatched<scalar_t>(self_working_copy, U, S, VT, infos, compute_uv);
});
}
// entrance of calculations of `svd` using cusolver gesvdj and gesvdjBatched
std::tuple<Tensor, Tensor, Tensor> _svd_helper_cuda_lib(const Tensor& self, bool some, bool compute_uv) {
const int64_t batch_size = batchCount(self);
at::Tensor infos = at::zeros({batch_size}, self.options().dtype(at::kInt));
const int64_t m = self.size(-2);
const int64_t n = self.size(-1);
const int64_t k = std::min(m, n);
Tensor U_working_copy, S_working_copy, VT_working_copy;
std::tie(U_working_copy, S_working_copy, VT_working_copy) = \
_create_U_S_VT(self, some, compute_uv, /* svd_use_cusolver = */ true);
// U, S, V working copies are already column majored now
// heuristic for using `gesvdjBatched` over `gesvdj`
if (m <= 32 && n <= 32 && batch_size > 1 && (!some || m == n)) {
apply_svd_lib_gesvdjBatched(self, U_working_copy, S_working_copy, VT_working_copy, infos, compute_uv);
} else {
apply_svd_lib_gesvdj(self, U_working_copy, S_working_copy, VT_working_copy, infos, compute_uv, some);
}
// A device-host sync will be performed.
batchCheckErrors(infos, "svd_cuda");
if (!compute_uv) {
VT_working_copy.zero_();
U_working_copy.zero_();
}
if (some) {
VT_working_copy = VT_working_copy.narrow(-2, 0, k);
}
// so far we have computed VT, but torch.svd returns V instead. Adjust accordingly.
VT_working_copy.transpose_(-2, -1);
return std::make_tuple(U_working_copy, S_working_copy, VT_working_copy);
}
// Implementation of Cholesky decomposition using looped cusolverDn<T>potrf or cusolverDnXpotrf (64-bit)
template<typename scalar_t>
inline static void apply_cholesky_cusolver_potrf_looped(const Tensor& self_working_copy, bool upper, const Tensor& infos) {
auto handle = at::cuda::getCurrentCUDASolverDnHandle();
const auto uplo = upper ? CUBLAS_FILL_MODE_UPPER : CUBLAS_FILL_MODE_LOWER;
const int64_t n = self_working_copy.size(-1);
const int64_t lda = std::max<int64_t>(1, n);
const int64_t batch_size = batchCount(self_working_copy);
const int64_t matrix_stride = matrixStride(self_working_copy);
scalar_t* self_working_copy_ptr = self_working_copy.data_ptr<scalar_t>();
int* infos_ptr = infos.data_ptr<int>();
#ifdef USE_CUSOLVER_64_BIT
size_t worksize_device;
size_t worksize_host;
cusolverDnParams_t params;
cudaDataType datatype = at::cuda::solver::get_cusolver_datatype<scalar_t>();
TORCH_CUSOLVER_CHECK(cusolverDnCreateParams(¶ms));
at::cuda::solver::xpotrf_buffersize(handle, params, uplo, n, datatype, nullptr, lda, datatype, &worksize_device, &worksize_host);
// allocate workspace storage
auto& device_allocator = *at::cuda::getCUDADeviceAllocator();
auto workdata_device = device_allocator.allocate(worksize_device * batch_size);
void* workdata_device_ptr = workdata_device.get();
auto& host_allocator = *at::getCPUAllocator();
auto workdata_host = host_allocator.allocate(worksize_host * batch_size);
void* workdata_host_ptr = workdata_host.get();
for (int64_t i = 0; i < batch_size; i++) {
at::cuda::solver::xpotrf(
handle, params, uplo, n, datatype,
self_working_copy_ptr + i * matrix_stride,
lda, datatype,
(char*)workdata_device_ptr + i * worksize_device, worksize_device,
(char*)workdata_host_ptr + i * worksize_host, worksize_host,
infos_ptr + i
);
}
TORCH_CUSOLVER_CHECK(cusolverDnDestroyParams(params));
#else // USE_CUSOLVER_64_BIT
int n_32 = cuda_int_cast(n, "n");
int lda_32 = cuda_int_cast(lda, "lda");
int lwork;
at::cuda::solver::potrf_buffersize<scalar_t>(
handle, uplo, n_32, nullptr, lda_32, &lwork);
// allocate workspace storage
auto& allocator = *at::cuda::getCUDADeviceAllocator();
auto work_data = allocator.allocate(sizeof(scalar_t)*lwork * batch_size);
scalar_t* work_data_ptr = static_cast<scalar_t*>(work_data.get());
for (int64_t i = 0; i < batch_size; i++) {
at::cuda::solver::potrf<scalar_t>(
handle, uplo, n_32,
self_working_copy_ptr + i * matrix_stride,
lda_32,
work_data_ptr + i * lwork,
lwork,
infos_ptr + i
);
}
#endif // USE_CUSOLVER_64_BIT
}
// Implementation of Cholesky decomposition using batched cusolverDn<T>potrfBatched
// Warning: cusolverDn<T>potrfBatched doesn't work quite well when matrix size or batch size is zero.
// If you write your own C++ extension and use this function, make sure you do a zero numel check for the input.
template<typename scalar_t>
inline static void apply_cholesky_cusolver_potrfBatched(const Tensor& self_working_copy, bool upper, const Tensor& infos) {
auto handle = at::cuda::getCurrentCUDASolverDnHandle();
const auto uplo = upper ? CUBLAS_FILL_MODE_UPPER : CUBLAS_FILL_MODE_LOWER;
const int n = cuda_int_cast(self_working_copy.size(-1), "n");
const int lda = std::max<int>(1, n);
const int batch_size = cuda_int_cast(batchCount(self_working_copy), "batch_size");
// cusolver batched kernels require input be "device array of device pointers"
Tensor self_working_copy_array = get_device_pointers<scalar_t>(self_working_copy);
at::cuda::solver::potrfBatched<scalar_t>(
handle, uplo, n,
reinterpret_cast<scalar_t**>(self_working_copy_array.data_ptr()),
lda, infos.data_ptr<int>(), batch_size);
}
void cholesky_helper_cusolver(const Tensor& input, bool upper, const Tensor& info) {
if (input.numel() == 0) {
return;
}
if (use_cusolver_potrf_batched_ && batchCount(input) > 1) {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(input.scalar_type(), "cholesky_cusolver", [&] {
apply_cholesky_cusolver_potrfBatched<scalar_t>(input, upper, info);
});
} else {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(input.scalar_type(), "cholesky_cusolver", [&] {
apply_cholesky_cusolver_potrf_looped<scalar_t>(input, upper, info);
});
}
}
template<typename scalar_t>
inline static void apply_cholesky_cusolver_potrs(Tensor& self_working_copy, const Tensor& A_column_major_copy, bool upper, Tensor& infos) {
auto handle = at::cuda::getCurrentCUDASolverDnHandle();
const auto uplo = upper ? CUBLAS_FILL_MODE_UPPER : CUBLAS_FILL_MODE_LOWER;
const int64_t n = self_working_copy.size(-2);
const int64_t nrhs = self_working_copy.size(-1);
const int64_t lda = std::max<int64_t>(1, n);
const int64_t batch_size = batchCount(self_working_copy);
const int64_t self_matrix_stride = matrixStride(self_working_copy);
scalar_t* self_working_copy_ptr = self_working_copy.data_ptr<scalar_t>();
const scalar_t* A_ptr = A_column_major_copy.data_ptr<scalar_t>();
const int64_t A_matrix_stride = matrixStride(A_column_major_copy);
const int64_t ldb = std::max<int64_t>(1, A_column_major_copy.size(-1));
int* infos_ptr = infos.data_ptr<int>();
#ifdef USE_CUSOLVER_64_BIT
cusolverDnParams_t params;
cudaDataType datatype = at::cuda::solver::get_cusolver_datatype<scalar_t>();
TORCH_CUSOLVER_CHECK(cusolverDnCreateParams(¶ms));
for (int64_t i = 0; i < batch_size; i++) {
at::cuda::solver::xpotrs(
handle, params, uplo, n, nrhs, datatype,
A_ptr + i * A_matrix_stride,
lda, datatype,
self_working_copy_ptr + i * self_matrix_stride,
ldb,
infos_ptr
);
}
TORCH_CUSOLVER_CHECK(cusolverDnDestroyParams(params));
#else // USE_CUSOLVER_64_BIT
int n_32 = cuda_int_cast(n, "n");
int nrhs_32 = cuda_int_cast(nrhs, "nrhs");
int lda_32 = cuda_int_cast(lda, "lda");
int ldb_32 = cuda_int_cast(ldb, "ldb");
for (int64_t i = 0; i < batch_size; i++) {
at::cuda::solver::potrs<scalar_t>(
handle, uplo, n_32, nrhs_32,
A_ptr + i * A_matrix_stride,
lda_32,
self_working_copy_ptr + i * self_matrix_stride,
ldb_32,
infos_ptr
);
}
#endif // USE_CUSOLVER_64_BIT
}
// This code path is only dispatched to if MAGMA is not linked in the pytorch build.
// cusolverDn<t>potrsBatched only supports nrhs == 1
template<typename scalar_t>
inline static void apply_cholesky_cusolver_potrsBatched(Tensor& self_working_copy, const Tensor& A_column_major_copy, bool upper, Tensor& infos) {
auto handle = at::cuda::getCurrentCUDASolverDnHandle();
const auto uplo = upper ? CUBLAS_FILL_MODE_UPPER : CUBLAS_FILL_MODE_LOWER;
const int64_t n = self_working_copy.size(-2);
const int64_t nrhs = self_working_copy.size(-1);
const int64_t lda = std::max<int64_t>(1, n);
const int64_t batch_size = batchCount(self_working_copy);
const int64_t self_matrix_stride = matrixStride(self_working_copy);
scalar_t* self_working_copy_ptr = self_working_copy.data_ptr<scalar_t>();
const scalar_t* A_ptr = A_column_major_copy.data_ptr<scalar_t>();
const int64_t A_matrix_stride = matrixStride(A_column_major_copy);
const int64_t ldb = std::max<int64_t>(1, A_column_major_copy.size(-1));
int* infos_ptr = infos.data_ptr<int>();
auto self_ptr_array = get_device_pointers<scalar_t>(self_working_copy);
auto A_ptr_array = get_device_pointers<scalar_t>(A_column_major_copy);
at::cuda::solver::potrsBatched(
handle, uplo,
cuda_int_cast(n, "n"),
cuda_int_cast(nrhs, "nrhs"),
reinterpret_cast<scalar_t**>(A_ptr_array.data_ptr()),
cuda_int_cast(lda, "lda"),
reinterpret_cast<scalar_t**>(self_ptr_array.data_ptr()),
cuda_int_cast(ldb, "ldb"),
infos_ptr,
cuda_int_cast(batch_size, "batch_size")
);
}
Tensor _cholesky_solve_helper_cuda_cusolver(const Tensor& self, const Tensor& A, bool upper) {
const int64_t batch_size = batchCount(self);
at::Tensor infos = at::zeros({1}, self.options().dtype(at::kInt));
at::Tensor self_working_copy = cloneBatchedColumnMajor(self);
at::Tensor A_column_major_copy = cloneBatchedColumnMajor(A);
const int64_t nrhs = self_working_copy.size(-1);
// cusolverDn<t>potrsBatched only supports nrhs == 1
if (batch_size > 1 && nrhs == 1) {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "cholesky_cuda_potrs_batched", [&] {
apply_cholesky_cusolver_potrsBatched<scalar_t>(self_working_copy, A_column_major_copy, upper, infos);
});
} else {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "cholesky_cuda_potrs", [&] {
apply_cholesky_cusolver_potrs<scalar_t>(self_working_copy, A_column_major_copy, upper, infos);
});
}
// info from potrs and potrsBatched only report if the i-th parameter is wrong, not about the matrix singularity, etc.
// So we don't need to check it all the time.
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(infos.item().toInt() == 0);
return self_working_copy;
}
void _cholesky_inverse_cusolver_potrs_based(Tensor& result, Tensor& infos, bool upper) {
at::Tensor input_working_copy = cloneBatchedColumnMajor(result);
at::Tensor infos_gpu = at::zeros({1}, result.options().dtype(at::kInt));
result.fill_(0);
result.diagonal(/*offset=*/0, /*dim1=*/-2, /*dim2=*/-1).fill_(1);
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(result.scalar_type(), "cholesky_cuda_potri", [&] {
apply_cholesky_cusolver_potrs<scalar_t>(result, input_working_copy, upper, infos_gpu);
});
// Debug only: info of cusolver potrs only check if the i-th parameter is wrong
// Function argument `infos` is a CPU tensor, the following copy will cause a device-host sync.
// infos.copy_(infos_gpu);
}
Tensor& cholesky_inverse_kernel_impl_cusolver(Tensor &result, Tensor& infos, bool upper) {
_cholesky_inverse_cusolver_potrs_based(result, infos, upper);
return result;
}
/*
The geqrf function computes the QR decomposition of a m x n matrix A.
Args:
* `A` - [in] Tensor with matrices for QR decomposition,
[out] Tensor containing R in the upper triangle of A
and elementary reflectors below the main diagonal of A
* `tau` - Tensor containing the magnitudes of the elementary reflectors
* `m` - The number of rows of `input` to consider
* `n` - The number of columns of `input` to consider (actual sizes of `input` could be larger)
For further details, please see the cuSOLVER documentation for GEQRF.
*/
template <typename scalar_t>
static void apply_geqrf(const Tensor& A, const Tensor& tau) {
int64_t m = A.size(-2);
int64_t n = A.size(-1);
int64_t lda = std::max<int64_t>(1, m);
int64_t batch_size = batchCount(A);
auto A_stride = matrixStride(A);
auto tau_stride = tau.size(-1);
auto A_data = A.data_ptr<scalar_t>();
auto tau_data = tau.data_ptr<scalar_t>();
auto infos = at::zeros({1}, A.options().dtype(at::kInt));
auto infos_data = infos.data_ptr<int>();
// get the optimal work size and allocate workspace tensor
#ifdef USE_CUSOLVER_64_BIT
size_t worksize_device; // workspaceInBytesOnDevice
size_t worksize_host; // workspaceInBytesOnHost
cusolverDnParams_t params = NULL; // use default algorithm (currently it's the only option)
at::cuda::solver::xgeqrf_bufferSize<scalar_t>(
at::cuda::getCurrentCUDASolverDnHandle(),
params,
m,
n,
A_data,
lda,
tau_data,
&worksize_device,
&worksize_host);
#else
int lwork;
int m_32 = cuda_int_cast(m, "m");
int n_32 = cuda_int_cast(n, "n");
int lda_32 = cuda_int_cast(lda, "lda");
at::cuda::solver::geqrf_bufferSize<scalar_t>(
at::cuda::getCurrentCUDASolverDnHandle(), m_32, n_32, A_data, lda_32, &lwork);
#endif // USE_CUSOLVER_64_BIT
for (decltype(batch_size) i = 0; i < batch_size; i++) {
scalar_t* A_working_ptr = &A_data[i * A_stride];
scalar_t* tau_working_ptr = &tau_data[i * tau_stride];
auto handle = at::cuda::getCurrentCUDASolverDnHandle();
#ifdef USE_CUSOLVER_64_BIT
// allocate workspace storage on device and host
auto& device_allocator = *at::cuda::getCUDADeviceAllocator();
auto work_device_data = device_allocator.allocate(worksize_device);
auto& host_allocator = *at::getCPUAllocator();
auto work_host_data = host_allocator.allocate(worksize_host);
at::cuda::solver::xgeqrf<scalar_t>(
handle,
params,
m,
n,
A_working_ptr,
lda,
tau_working_ptr,
static_cast<scalar_t*>(work_device_data.get()),
worksize_device,
static_cast<scalar_t*>(work_host_data.get()),
worksize_host,
infos_data);
#else
// allocate workspace storage on device
auto& allocator = *at::cuda::getCUDADeviceAllocator();
auto work_data = allocator.allocate(sizeof(scalar_t) * std::max<int>(1, lwork));
at::cuda::solver::geqrf<scalar_t>(
handle,
m_32,
n_32,
A_working_ptr,
lda_32,
tau_working_ptr,
static_cast<scalar_t*>(work_data.get()),
lwork,
infos_data);
#endif // USE_CUSOLVER_64_BIT
}
// info from geqrf only reports if the i-th parameter is wrong, not about the matrix singularity
// so we don't need to check it all the time
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(infos.item().toInt() == 0);
}
// This is a type dispatching helper function for 'apply_geqrf'
void geqrf_cusolver(const Tensor& input, const Tensor& tau) {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(input.scalar_type(), "geqrf_cuda", [&]{
apply_geqrf<scalar_t>(input, tau);
});
}
/*
The ormqr function multiplies Q with another matrix from a sequence of
elementary reflectors, such as is produced by the geqrf function.
Args:
* `input` - Tensor with elementary reflectors below the diagonal,
encoding the matrix Q.
* `tau` - Tensor containing the magnitudes of the elementary
reflectors.
* `other` - [in] Tensor containing the matrix to be multiplied.
[out] result of the matrix multiplication with Q.
* `left` - bool, determining whether `other` is left- or right-multiplied with Q.
* `transpose` - bool, determining whether to transpose (or conjugate transpose) Q before multiplying.
For further details, please see the cuSOLVER documentation for ORMQR and UNMQR.
*/
template <typename scalar_t>
static void apply_ormqr(const Tensor& input, const Tensor& tau, const Tensor& other, bool left, bool transpose) {
using value_t = typename c10::scalar_value_type<scalar_t>::type;
auto side = left ? CUBLAS_SIDE_LEFT : CUBLAS_SIDE_RIGHT;
auto trans = transpose ? (input.is_complex() ? CUBLAS_OP_C : CUBLAS_OP_T) : CUBLAS_OP_N;
auto input_data = input.data_ptr<scalar_t>();
auto tau_data = tau.data_ptr<scalar_t>();
auto other_data = other.data_ptr<scalar_t>();
auto input_matrix_stride = matrixStride(input);
auto other_matrix_stride = matrixStride(other);
auto tau_stride = tau.size(-1);
auto batch_size = batchCount(input);
auto m = cuda_int_cast(other.size(-2), "m");
auto n = cuda_int_cast(other.size(-1), "n");
auto k = cuda_int_cast(tau.size(-1), "k");
auto lda = std::max<int>(1, left ? m : n);
auto ldc = std::max<int>(1, m);
// get the optimal work size and allocate workspace tensor
int lwork;
at::cuda::solver::ormqr_bufferSize<scalar_t>(
at::cuda::getCurrentCUDASolverDnHandle(), side, trans, m, n, k, input_data, lda, tau_data, other_data, ldc, &lwork);
auto info = at::zeros({1}, input.options().dtype(at::kInt));
auto info_data = info.data_ptr<int>();
for (auto i = decltype(batch_size){0}; i < batch_size; i++) {
scalar_t* input_working_ptr = &input_data[i * input_matrix_stride];
scalar_t* other_working_ptr = &other_data[i * other_matrix_stride];
scalar_t* tau_working_ptr = &tau_data[i * tau_stride];
auto handle = at::cuda::getCurrentCUDASolverDnHandle();
// allocate workspace storage
auto& allocator = *at::cuda::getCUDADeviceAllocator();
auto work_data = allocator.allocate(sizeof(scalar_t)*lwork);
at::cuda::solver::ormqr<scalar_t>(
handle, side, trans, m, n, k,
input_working_ptr,
lda,
tau_working_ptr,
other_working_ptr,
ldc,
static_cast<scalar_t*>(work_data.get()),
lwork,
info_data
);
// info from ormqr only reports if the i-th parameter is wrong
// so we don't need to check it all the time
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(info.item().toInt() == 0);
}
}
// This is a type dispatching helper function for 'apply_ormqr'
void ormqr_cusolver(const Tensor& input, const Tensor& tau, const Tensor& other, bool left, bool transpose) {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(input.scalar_type(), "orgmr_cuda", [&]{
apply_ormqr<scalar_t>(input, tau, other, left, transpose);
});
}
/*
The orgqr function allows reconstruction of an orthogonal (or unitary) matrix Q,
from a sequence of elementary reflectors, such as produced by the geqrf function.
Args:
* `self` - Tensor with the directions of the elementary reflectors below the diagonal,
it will be overwritten with the result
* `tau` - Tensor containing the magnitudes of the elementary reflectors
For further details, please see the cuSOLVER documentation for ORGQR and UNGQR.
*/
template <typename scalar_t>
inline static void apply_orgqr(Tensor& self, const Tensor& tau) {
using value_t = typename c10::scalar_value_type<scalar_t>::type;
auto self_data = self.data_ptr<scalar_t>();
auto tau_data = tau.data_ptr<scalar_t>();
auto self_matrix_stride = matrixStride(self);
auto batchsize = cuda_int_cast(batchCount(self), "batch size");
auto m = cuda_int_cast(self.size(-2), "m");
auto n = cuda_int_cast(self.size(-1), "n");
auto k = cuda_int_cast(tau.size(-1), "k");
auto tau_stride = std::max<int>(1, k);
auto lda = std::max<int>(1, m);
// LAPACK's requirement
TORCH_INTERNAL_ASSERT(m >= n);
TORCH_INTERNAL_ASSERT(n >= k);
// cuSOLVER doesn't compute anything for this case, which is wrong
// the result should be a matrix with 1 on the diagonal
if (k == 0) {
self.fill_(0);
self.diagonal(/*offset=*/0, /*dim1=*/-2, /*dim2=*/-1).fill_(1);
return;
}
// get the optimal work size and allocate workspace tensor
int lwork;
at::cuda::solver::orgqr_buffersize<scalar_t>(
at::cuda::getCurrentCUDASolverDnHandle(), m, n, k, self_data, lda, tau_data, &lwork);
auto info = at::zeros({1}, self.options().dtype(at::kInt));
auto info_data = info.data_ptr<int>();
for (auto i = decltype(batchsize){0}; i < batchsize; i++) {
scalar_t* self_working_ptr = &self_data[i * self_matrix_stride];
scalar_t* tau_working_ptr = &tau_data[i * tau_stride];
auto handle = at::cuda::getCurrentCUDASolverDnHandle();
// allocate workspace storage
auto& allocator = *at::cuda::getCUDADeviceAllocator();
auto work_data = allocator.allocate(sizeof(scalar_t)*lwork);
at::cuda::solver::orgqr<scalar_t>(
handle, m, n, k,
self_working_ptr,
lda,
tau_working_ptr,
static_cast<scalar_t*>(work_data.get()),
lwork,
info_data
);
// info from orgqr only reports if the i-th parameter is wrong
// so we don't need to check it all the time
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(info.item().toInt() == 0);
}
}
// This is a type dispatching helper function for 'apply_orgqr'
Tensor& orgqr_helper_cusolver(Tensor& result, const Tensor& tau) {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(result.scalar_type(), "orgqr_cuda", [&]{
apply_orgqr<scalar_t>(result, tau);
});
return result;
}
template <typename scalar_t>
static void apply_syevd(const Tensor& values, const Tensor& vectors, const Tensor& infos, bool upper, bool compute_eigenvectors) {
using value_t = typename c10::scalar_value_type<scalar_t>::type;
cublasFillMode_t uplo = upper ? CUBLAS_FILL_MODE_UPPER : CUBLAS_FILL_MODE_LOWER;
cusolverEigMode_t jobz = compute_eigenvectors ? CUSOLVER_EIG_MODE_VECTOR : CUSOLVER_EIG_MODE_NOVECTOR;
int64_t n = vectors.size(-1);
int64_t lda = std::max<int64_t>(1, n);
int64_t batch_size = batchCount(vectors);
auto vectors_stride = matrixStride(vectors);
auto values_stride = values.size(-1);
auto vectors_data = vectors.data_ptr<scalar_t>();
auto values_data = values.data_ptr<value_t>();
auto infos_data = infos.data_ptr<int>();
// get the optimal work size and allocate workspace tensor
#ifdef USE_CUSOLVER_64_BIT
size_t worksize_device; // workspaceInBytesOnDevice
size_t worksize_host; // workspaceInBytesOnHost
cusolverDnParams_t params = NULL; // use default algorithm (currently it's the only option)
at::cuda::solver::xsyevd_bufferSize<scalar_t>(
at::cuda::getCurrentCUDASolverDnHandle(),
params,
jobz,
uplo,
n,
vectors_data,
lda,
values_data,
&worksize_device,
&worksize_host);
#else
int lwork;
int n_32 = cuda_int_cast(n, "n");
int lda_32 = cuda_int_cast(lda, "lda");
at::cuda::solver::syevd_bufferSize<scalar_t>(
at::cuda::getCurrentCUDASolverDnHandle(), jobz, uplo, n_32, vectors_data, lda_32, values_data, &lwork);
#endif // USE_CUSOLVER_64_BIT
for (decltype(batch_size) i = 0; i < batch_size; i++) {
scalar_t* vectors_working_ptr = &vectors_data[i * vectors_stride];
value_t* values_working_ptr = &values_data[i * values_stride];
int* info_working_ptr = &infos_data[i];
auto handle = at::cuda::getCurrentCUDASolverDnHandle();
#ifdef USE_CUSOLVER_64_BIT
// allocate workspace storage on device and host
auto& device_allocator = *at::cuda::getCUDADeviceAllocator();
auto work_device_data = device_allocator.allocate(worksize_device);
auto& host_allocator = *at::getCPUAllocator();
auto work_host_data = host_allocator.allocate(worksize_host);
at::cuda::solver::xsyevd<scalar_t>(
handle,
params,
jobz,
uplo,
n,
vectors_working_ptr,
lda,
values_working_ptr,
static_cast<scalar_t*>(work_device_data.get()),
worksize_device,
static_cast<scalar_t*>(work_host_data.get()),
worksize_host,
info_working_ptr);
#else
// allocate workspace storage on device
auto& allocator = *at::cuda::getCUDADeviceAllocator();
auto work_data = allocator.allocate(sizeof(scalar_t) * lwork);
at::cuda::solver::syevd<scalar_t>(
handle,
jobz,
uplo,
n_32,
vectors_working_ptr,
lda_32,
values_working_ptr,
static_cast<scalar_t*>(work_data.get()),
lwork,
info_working_ptr);
#endif // USE_CUSOLVER_64_BIT
}
}
template <typename scalar_t>
static void apply_syevj(const Tensor& values, const Tensor& vectors, const Tensor& infos, bool upper, bool compute_eigenvectors) {
using value_t = typename c10::scalar_value_type<scalar_t>::type;
cublasFillMode_t uplo = upper ? CUBLAS_FILL_MODE_UPPER : CUBLAS_FILL_MODE_LOWER;
cusolverEigMode_t jobz = compute_eigenvectors ? CUSOLVER_EIG_MODE_VECTOR : CUSOLVER_EIG_MODE_NOVECTOR;
int n = cuda_int_cast(vectors.size(-1), "n");
int lda = std::max<int>(1, n);
auto batch_size = batchCount(vectors);
auto vectors_stride = matrixStride(vectors);
auto values_stride = values.size(-1);
auto vectors_data = vectors.data_ptr<scalar_t>();
auto values_data = values.data_ptr<value_t>();
auto infos_data = infos.data_ptr<int>();
// syevj_params controls the numerical accuracy of syevj
// by default the tolerance is set to machine accuracy
// the maximum number of iteration of Jacobi method by default is 100
// cuSOLVER documentations says: "15 sweeps are good enough to converge to machine accuracy"
// LAPACK has SVD routine based on similar Jacobi algorithm (gesvj) and there a maximum of 30 iterations is set
// Let's use the default values for now
syevjInfo_t syevj_params;
TORCH_CUSOLVER_CHECK(cusolverDnCreateSyevjInfo(&syevj_params));
// get the optimal work size and allocate workspace tensor
int lwork;
at::cuda::solver::syevj_bufferSize<scalar_t>(
at::cuda::getCurrentCUDASolverDnHandle(), jobz, uplo, n, vectors_data, lda, values_data, &lwork, syevj_params);
for (decltype(batch_size) i = 0; i < batch_size; i++) {
scalar_t* vectors_working_ptr = &vectors_data[i * vectors_stride];
value_t* values_working_ptr = &values_data[i * values_stride];
int* info_working_ptr = &infos_data[i];
auto handle = at::cuda::getCurrentCUDASolverDnHandle();
// allocate workspace storage on device
auto& allocator = *at::cuda::getCUDADeviceAllocator();
auto work_data = allocator.allocate(sizeof(scalar_t) * lwork);
at::cuda::solver::syevj<scalar_t>(
handle,
jobz,
uplo,
n,
vectors_working_ptr,
lda,
values_working_ptr,
static_cast<scalar_t*>(work_data.get()),
lwork,
info_working_ptr,
syevj_params);
}
TORCH_CUSOLVER_CHECK(cusolverDnDestroySyevjInfo(syevj_params));
}
template <typename scalar_t>
static void apply_syevj_batched(const Tensor& values, const Tensor& vectors, const Tensor& infos, bool upper, bool compute_eigenvectors) {
using value_t = typename c10::scalar_value_type<scalar_t>::type;
cublasFillMode_t uplo = upper ? CUBLAS_FILL_MODE_UPPER : CUBLAS_FILL_MODE_LOWER;
cusolverEigMode_t jobz = compute_eigenvectors ? CUSOLVER_EIG_MODE_VECTOR : CUSOLVER_EIG_MODE_NOVECTOR;
int n = cuda_int_cast(vectors.size(-1), "n");
int lda = std::max<int>(1, n);
int batch_size = cuda_int_cast(batchCount(vectors), "batch_size");
auto vectors_data = vectors.data_ptr<scalar_t>();
auto values_data = values.data_ptr<value_t>();
auto infos_data = infos.data_ptr<int>();
// syevj_params controls the numerical accuracy of syevj
// by default the tolerance is set to machine accuracy
// the maximum number of iteration of Jacobi method by default is 100
// cuSOLVER documentations says: "15 sweeps are good enough to converge to machine accuracy"
// LAPACK has SVD routine based on similar Jacobi algorithm (gesvj) and there a maximum of 30 iterations is set
// Let's use the default values for now
syevjInfo_t syevj_params;
TORCH_CUSOLVER_CHECK(cusolverDnCreateSyevjInfo(&syevj_params));
TORCH_CUSOLVER_CHECK(cusolverDnXsyevjSetSortEig(syevj_params, 1));
auto handle = at::cuda::getCurrentCUDASolverDnHandle();
// get the optimal work size and allocate workspace tensor
int lwork;
at::cuda::solver::syevjBatched_bufferSize<scalar_t>(
handle,
jobz,
uplo,
n,
vectors_data,
lda,
values_data,
&lwork,
syevj_params,
batch_size);
// allocate workspace storage on device
auto& allocator = *at::cuda::getCUDADeviceAllocator();
auto work_data = allocator.allocate(sizeof(scalar_t) * lwork);
at::cuda::solver::syevjBatched<scalar_t>(
handle,
jobz,
uplo,
n,
vectors_data,
lda,
values_data,
static_cast<scalar_t*>(work_data.get()),
lwork,
infos_data,
syevj_params,
batch_size);
TORCH_CUSOLVER_CHECK(cusolverDnDestroySyevjInfo(syevj_params));
}
static void linalg_eigh_cusolver_syevd(const Tensor& eigenvalues, const Tensor& eigenvectors, const Tensor& infos, bool upper, bool compute_eigenvectors) {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(eigenvectors.scalar_type(), "linalg_eigh_cuda", [&] {
apply_syevd<scalar_t>(eigenvalues, eigenvectors, infos, upper, compute_eigenvectors);
});
}
static void linalg_eigh_cusolver_syevj(const Tensor& eigenvalues, const Tensor& eigenvectors, const Tensor& infos, bool upper, bool compute_eigenvectors) {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(eigenvectors.scalar_type(), "linalg_eigh_cuda", [&] {
apply_syevj<scalar_t>(eigenvalues, eigenvectors, infos, upper, compute_eigenvectors);
});
}
static void linalg_eigh_cusolver_syevj_batched(const Tensor& eigenvalues, const Tensor& eigenvectors, const Tensor& infos, bool upper, bool compute_eigenvectors) {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(eigenvectors.scalar_type(), "linalg_eigh_cuda", [&] {
apply_syevj_batched<scalar_t>(eigenvalues, eigenvectors, infos, upper, compute_eigenvectors);
});
}
void linalg_eigh_cusolver(const Tensor& eigenvalues, const Tensor& eigenvectors, const Tensor& infos, bool upper, bool compute_eigenvectors) {
if (use_cusolver_syevj_batched_ && batchCount(eigenvectors) > 1 && eigenvectors.size(-1) <= 32) {
// Use syevjBatched for batched matrix opertion when matrix size <= 32
// See https://github.com/pytorch/pytorch/pull/53040#issuecomment-788264724
linalg_eigh_cusolver_syevj_batched(eigenvalues, eigenvectors, infos, upper, compute_eigenvectors);
} else if (eigenvectors.scalar_type() == at::kFloat && eigenvectors.size(-1) >= 32 && eigenvectors.size(-1) <= 512) {
// syevj is better than syevd for float32 dtype and matrix sizes 32x32 - 512x512
// See https://github.com/pytorch/pytorch/pull/53040#issuecomment-788264724
linalg_eigh_cusolver_syevj(eigenvalues, eigenvectors, infos, upper, compute_eigenvectors);
} else {
linalg_eigh_cusolver_syevd(eigenvalues, eigenvectors, infos, upper, compute_eigenvectors);
}
}
// The 'apply_' word is used for templated by dtype functions that call an API routine
// underneath. Since the cusolver API has a slightly different structure we do not prepend
// apply_ to this function.
void lu_looped_cusolver(const Tensor& self, const Tensor& pivots, const Tensor& infos, bool get_pivots) {
// Fill the pivots tensor with indices using 1-based (Fortran) indexing. This
// is needed for maintaining the same results with MAGMA.
auto k = std::min(self.size(-2), self.size(-1));
Tensor pivots_tmp = at::arange(1, k + 1, self.options().dtype(at::kInt)).expand_as(pivots);
pivots.copy_(pivots_tmp);
AT_DISPATCH_FLOATING_TYPES(
self.scalar_type(),
"lu_cusolver",
[&self,
&pivots,
&infos,
&get_pivots]() {
int m = cuda_int_cast(self.size(-2), "m");
int n = cuda_int_cast(self.size(-1), "n");
int lda = std::max<int>(1, m);
int64_t self_stride = matrixStride(self);
int64_t batch_size = batchCount(self);
scalar_t* self_data = self.data_ptr<scalar_t>();
int* infos_data = infos.data_ptr<int>();
auto handle = at::cuda::getCurrentCUDASolverDnHandle();
for (auto batch = decltype(batch_size){0}; batch < batch_size; ++batch) {
if (get_pivots) {
auto pivots_data = pivots.data_ptr<int>();
auto pivots_stride = pivots.size(-1);
at::cuda::solver::getrf<scalar_t>(
handle, m, n,
self_data + batch * self_stride,
lda,
pivots_data + batch * pivots_stride,
infos_data + batch
);
}
else {
at::cuda::solver::getrf<scalar_t>(
handle, m, n,
self_data + batch * self_stride,
lda,
nullptr,
infos_data + batch
);
}
}
});
// Necessary because cuSOLVER uses nan for outputs that correspond to 0 in MAGMA for non-pivoted LU.
// See https://github.com/pytorch/pytorch/issues/53879 for more details.
if (!get_pivots) {
at::nan_to_num_(const_cast<Tensor&>(self), 0, std::numeric_limits<double>::infinity(),
-std::numeric_limits<double>::infinity());
}
}
void lu_solve_looped_cusolver(const Tensor& b, const Tensor& lu, const Tensor& pivots, cublasOperation_t trans) {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(b.scalar_type(), "lu_solve_cusolver", [&] {
int n = cuda_int_cast(lu.size(-2), "n");
int nrhs = cuda_int_cast(b.size(-1), "nrhs");
auto batch_size = batchCount(lu);
auto info = at::zeros({1}, lu.options().dtype(kInt));
auto info_data = info.data_ptr<int>();
auto b_data = b.data_ptr<scalar_t>();
auto lu_data = lu.data_ptr<scalar_t>();
auto pivots_data = pivots.data_ptr<int>();
auto pivots_stride = pivots.size(-1);
auto lu_stride = matrixStride(lu);
auto b_stride = matrixStride(b);
int leading_dimension = cuda_int_cast(std::max<int>(1, n), "leading_dimension");
auto handle = at::cuda::getCurrentCUDASolverDnHandle();
for (auto batch = decltype(batch_size){0}; batch < batch_size; ++batch) {
at::cuda::solver::getrs<scalar_t>(
handle,
n,
nrhs,
lu_data + batch * lu_stride,
leading_dimension,
pivots_data + batch * pivots_stride,
b_data + batch * b_stride,
leading_dimension,
info_data,
trans);
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(info.item().toInt() == 0);
}
});
}
#endif // USE_CUSOLVER
}} // namespace at::native
|
b8a595fb57ba8908a0ff6bd2580a20115148eac8.hip | // !!! This is a file automatically generated by hipify!!!
/*
* File common.cu contains definitions used by both sequential and parallel systematic resampling.
*/
#include <random>
#include <time.h>
#include "common_hip.cuh"
#include "inference/smc/smc.cuh"
#include "utils/misc.cuh"
#include "inference/smc/particles_memory_handler.cuh"
#ifdef __NVCC__
std::default_random_engine generatorRes;
std::uniform_real_distribution<floating_t> uniformCPU(0.0, 1.0);
#endif
resampler_t initResampler(int numParticles, size_t progStateSize) {
resampler_t resampler;
#ifdef __NVCC__
generatorRes.seed(time(NULL) * 3); // Multiply by 3 to avoid same seed as distributions.
allocateMemory<floating_t>(&resampler.wSquared, numParticles);
#endif
allocateMemory<int>(&resampler.ancestor, numParticles);
allocateMemory<int>(&resampler.cumulativeOffspring, numParticles);
allocateMemory<floating_t>(&resampler.prefixSum, numParticles);
resampler.auxParticles = allocateParticles(numParticles, progStateSize);
resampler.progStateSize = progStateSize;
return resampler;
}
void destResampler(resampler_t resampler) {
freeMemory<int>(resampler.ancestor);
freeMemory<int>(resampler.cumulativeOffspring);
freeMemory<floating_t>(resampler.prefixSum);
#ifdef __NVCC__
freeMemory<floating_t>(resampler.wSquared);
#endif
freeParticles(resampler.auxParticles);
}
/*
// Obsolete
HOST DEV resampler_t initResamplerNested(int numParticles, size_t progStateSize) {
resampler_t resampler;
resampler.ancestor = new int[numParticles];
resampler.cumulativeOffspring = new int[numParticles];
resampler.prefixSum = new floating_t[numParticles];
resampler.auxParticles = allocateParticlesNested(numParticles, progStateSize);
return resampler;
}
HOST DEV void destResamplerNested(resampler_t resampler) {
delete[] resampler.ancestor;
delete[] resampler.cumulativeOffspring;
delete[] resampler.prefixSum;
freeParticlesNested(resampler.auxParticles);
}
*/
HOST DEV void copyParticle(const particles_t particlesDst, const particles_t particlesSrc, int dstIdx, int srcIdx, size_t progStateSize) {
// Program states
#ifdef STACK_SIZE_PROGSTATE
progStateStack_t* dstProgState = particlesDst.progStates + dstIdx;
progStateStack_t* srcProgState = particlesSrc.progStates + srcIdx;
copyStack(dstProgState, srcProgState);
#else
char* psDstAddress = static_cast<char*>(particlesDst.progStates) + progStateSize * dstIdx;
char* psSrcAddress = static_cast<char*>(particlesSrc.progStates) + progStateSize * srcIdx;
copyChunk(psDstAddress, psSrcAddress, progStateSize);
#endif
// Generic particle stuff
particlesDst.next[dstIdx] = particlesSrc.next[srcIdx];
particlesDst.weights[dstIdx] = 0;
}
#ifdef STACK_SIZE_PROGSTATE
HOST DEV void copyStack(progStateStack_t* dst, progStateStack_t* src) {
dst->stackPtr = src->stackPtr;
size_t stackSpaceUsed = src->stackPtr;
// Try to round up copy size to nearest multiple of sizeof(long), this can speed up GPU copying
#ifdef __NVCC__
int remainder = stackSpaceUsed % sizeof(long);
if (remainder > 0)
stackSpaceUsed = MIN(stackSpaceUsed + sizeof(long) - remainder, STACK_SIZE_PROGSTATE);
#endif
copyChunk(dst->stack, src->stack, stackSpaceUsed);
}
#endif
HOST DEV void copyChunk(void* dst, void* src, size_t bytes) {
#ifdef __NVCC__
// Manual loop copying can be much faster on GPU than device memcpy
// If the struct is aligned in the correct way, the loop long copying can give huge speedups compared to memcpy on GPU
bool longAligned = bytes % sizeof(long) == 0
&& ((std::uintptr_t)dst) % sizeof(long) == 0
&& ((std::uintptr_t)src) % sizeof(long) == 0;
if(longAligned) {
long* dstLong = (long*)(dst);
long* srcLong = (long*)(src);
int numDblWords = bytes / sizeof(long);
for(int i = 0; i < numDblWords; i++) {
dstLong[i] = srcLong[i];
}
} else {
bool intAligned = bytes % sizeof(int) == 0
&& ((std::uintptr_t)dst) % sizeof(int) == 0
&& ((std::uintptr_t)src) % sizeof(int) == 0;
if(intAligned) {
int* dstInt = (int*)(dst);
int* srcInt = (int*)(src);
int numWords = bytes / sizeof(int);
for(int i = 0; i < numWords; i++) {
dstInt[i] = srcInt[i];
}
} else {
// Not aligned, fall back to memcpy
memcpy(dst, src, bytes);
}
}
#else
// On CPU, memcpy seems to perform much better. Seems to be true with OpenMP as well
memcpy(dst, src, bytes);
#endif
}
// This could probably be optimized by sorting the particles by descending weights first
DEV int sampleAncestor(RAND_STATE_DECLARE const floating_t* w, const floating_t logWeightSum, const int numParticles) {
floating_t u = SAMPLE(uniform, 0.0f, logWeightSum);
floating_t accLogWeightSum = 0;
for (int i = 0; i < numParticles; i++) {
accLogWeightSum += w[i];
if (accLogWeightSum >= u) {
return i;
}
}
return numParticles - 1;
}
| b8a595fb57ba8908a0ff6bd2580a20115148eac8.cu |
/*
* File common.cu contains definitions used by both sequential and parallel systematic resampling.
*/
#include <random>
#include <time.h>
#include "common.cuh"
#include "inference/smc/smc.cuh"
#include "utils/misc.cuh"
#include "inference/smc/particles_memory_handler.cuh"
#ifdef __NVCC__
std::default_random_engine generatorRes;
std::uniform_real_distribution<floating_t> uniformCPU(0.0, 1.0);
#endif
resampler_t initResampler(int numParticles, size_t progStateSize) {
resampler_t resampler;
#ifdef __NVCC__
generatorRes.seed(time(NULL) * 3); // Multiply by 3 to avoid same seed as distributions.
allocateMemory<floating_t>(&resampler.wSquared, numParticles);
#endif
allocateMemory<int>(&resampler.ancestor, numParticles);
allocateMemory<int>(&resampler.cumulativeOffspring, numParticles);
allocateMemory<floating_t>(&resampler.prefixSum, numParticles);
resampler.auxParticles = allocateParticles(numParticles, progStateSize);
resampler.progStateSize = progStateSize;
return resampler;
}
void destResampler(resampler_t resampler) {
freeMemory<int>(resampler.ancestor);
freeMemory<int>(resampler.cumulativeOffspring);
freeMemory<floating_t>(resampler.prefixSum);
#ifdef __NVCC__
freeMemory<floating_t>(resampler.wSquared);
#endif
freeParticles(resampler.auxParticles);
}
/*
// Obsolete
HOST DEV resampler_t initResamplerNested(int numParticles, size_t progStateSize) {
resampler_t resampler;
resampler.ancestor = new int[numParticles];
resampler.cumulativeOffspring = new int[numParticles];
resampler.prefixSum = new floating_t[numParticles];
resampler.auxParticles = allocateParticlesNested(numParticles, progStateSize);
return resampler;
}
HOST DEV void destResamplerNested(resampler_t resampler) {
delete[] resampler.ancestor;
delete[] resampler.cumulativeOffspring;
delete[] resampler.prefixSum;
freeParticlesNested(resampler.auxParticles);
}
*/
HOST DEV void copyParticle(const particles_t particlesDst, const particles_t particlesSrc, int dstIdx, int srcIdx, size_t progStateSize) {
// Program states
#ifdef STACK_SIZE_PROGSTATE
progStateStack_t* dstProgState = particlesDst.progStates + dstIdx;
progStateStack_t* srcProgState = particlesSrc.progStates + srcIdx;
copyStack(dstProgState, srcProgState);
#else
char* psDstAddress = static_cast<char*>(particlesDst.progStates) + progStateSize * dstIdx;
char* psSrcAddress = static_cast<char*>(particlesSrc.progStates) + progStateSize * srcIdx;
copyChunk(psDstAddress, psSrcAddress, progStateSize);
#endif
// Generic particle stuff
particlesDst.next[dstIdx] = particlesSrc.next[srcIdx];
particlesDst.weights[dstIdx] = 0;
}
#ifdef STACK_SIZE_PROGSTATE
HOST DEV void copyStack(progStateStack_t* dst, progStateStack_t* src) {
dst->stackPtr = src->stackPtr;
size_t stackSpaceUsed = src->stackPtr;
// Try to round up copy size to nearest multiple of sizeof(long), this can speed up GPU copying
#ifdef __NVCC__
int remainder = stackSpaceUsed % sizeof(long);
if (remainder > 0)
stackSpaceUsed = MIN(stackSpaceUsed + sizeof(long) - remainder, STACK_SIZE_PROGSTATE);
#endif
copyChunk(dst->stack, src->stack, stackSpaceUsed);
}
#endif
HOST DEV void copyChunk(void* dst, void* src, size_t bytes) {
#ifdef __NVCC__
// Manual loop copying can be much faster on GPU than device memcpy
// If the struct is aligned in the correct way, the loop long copying can give huge speedups compared to memcpy on GPU
bool longAligned = bytes % sizeof(long) == 0
&& ((std::uintptr_t)dst) % sizeof(long) == 0
&& ((std::uintptr_t)src) % sizeof(long) == 0;
if(longAligned) {
long* dstLong = (long*)(dst);
long* srcLong = (long*)(src);
int numDblWords = bytes / sizeof(long);
for(int i = 0; i < numDblWords; i++) {
dstLong[i] = srcLong[i];
}
} else {
bool intAligned = bytes % sizeof(int) == 0
&& ((std::uintptr_t)dst) % sizeof(int) == 0
&& ((std::uintptr_t)src) % sizeof(int) == 0;
if(intAligned) {
int* dstInt = (int*)(dst);
int* srcInt = (int*)(src);
int numWords = bytes / sizeof(int);
for(int i = 0; i < numWords; i++) {
dstInt[i] = srcInt[i];
}
} else {
// Not aligned, fall back to memcpy
memcpy(dst, src, bytes);
}
}
#else
// On CPU, memcpy seems to perform much better. Seems to be true with OpenMP as well
memcpy(dst, src, bytes);
#endif
}
// This could probably be optimized by sorting the particles by descending weights first
DEV int sampleAncestor(RAND_STATE_DECLARE const floating_t* w, const floating_t logWeightSum, const int numParticles) {
floating_t u = SAMPLE(uniform, 0.0f, logWeightSum);
floating_t accLogWeightSum = 0;
for (int i = 0; i < numParticles; i++) {
accLogWeightSum += w[i];
if (accLogWeightSum >= u) {
return i;
}
}
return numParticles - 1;
}
|
1c65256030a0093c14a1958484dc09e715fd52aa.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <ATen/ATen.h>
#include <ATen/AccumulateType.h>
#include <ATen/hip/HIPApplyUtils.cuh>
#include <ATen/CUDAGeneratorImpl.h>
#include <ATen/hip/detail/IndexUtils.cuh>
#include <ATen/hip/detail/TensorInfo.cuh>
#include <ATen/hip/HIPGraphsUtils.cuh>
#include <c10/macros/Macros.h>
#include <hiprand/hiprand_kernel.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/hip/Loops.cuh>
#include <ATen/native/hip/MemoryAccess.cuh>
#include <THH/THHGeneral.h>
namespace at{
namespace native{
namespace {
// philox generates 128 bits of randomness at a time. Kernel uses this explicitly by putting suitably transformed result into float4
// for all members of float4 to be consumed UNROLL has to be 4. Don't change!
// Note: VEC <= 4 (and in most real-world cases will be 4), so same logic applies.
const int UNROLL = 4;
template <
typename scalar_t,
typename accscalar_t,
typename IndexType,
int ADims,
int VEC>
#if __CUDA_ARCH__ >= 350 || defined __HIP_PLATFORM_HCC__
C10_LAUNCH_BOUNDS_2(256, 4)
#endif
__global__ void fused_dropout_kernel_vec(
at::cuda::detail::TensorInfo<scalar_t, IndexType> a,
at::cuda::detail::TensorInfo<scalar_t, IndexType> b,
at::cuda::detail::TensorInfo<uint8_t, IndexType> c,
IndexType totalElements,
accscalar_t p,
PhiloxCudaState philox_args) {
// make sure we don't break assumption that we can't have > 4 elements / thread
static_assert(VEC <= 4, "Value of VEC must be in [2, 4]");
using LoadT = memory::aligned_vector<scalar_t, VEC>;
using MaskLoadT = memory::aligned_vector<uint8_t, VEC>;
auto seeds = at::cuda::philox::unpack(philox_args);
IndexType idx = blockIdx.x * blockDim.x + threadIdx.x;
hiprandStatePhilox4_32_10_t state;
hiprand_init(std::get<0>(seeds),
idx,
std::get<1>(seeds),
&state);
accscalar_t pinv = accscalar_t(1)/p;
// Helps align the total number of times hiprand_uniform4 is called by each thread for the same totalElements
// in the vec=2 and vec=4 cases.
bool gridxvec_loop_state = 0;
float4 rand;
// Note: Vectorized loads means we'll stride each thread by an additional VEC factor, as we'll load VEC elements at a time
for (IndexType linearIndex = idx * VEC;
linearIndex < totalElements;
linearIndex += gridDim.x * blockDim.x * VEC) {
// local storage
scalar_t src[VEC];
// We'll use this to actually cause vectorized loads later
LoadT *value = reinterpret_cast<LoadT*>(&src);
//hiprand_uniform_double was pure evil anyway, not doing what it promises, and there's nothing for halfs, so generate float for everything
// Note: need a new set of random values per 4 elements -- we'll handle VEC elements in this thread, so need ceil(VEC / 4)
// sets of rand.
if ((VEC == 4) || (gridxvec_loop_state == 0)) {
rand = hiprand_uniform4(&state);
} else {
// sets up the last two values we generated last iteration to be used this iteration.
rand.x = rand.z;
rand.y = rand.w;
gridxvec_loop_state ^= 1;
}
rand.x = rand.x < p;
rand.y = rand.y < p;
if (VEC == 4) {
rand.z = rand.z < p;
rand.w = rand.w < p;
}
// Note: We explicitly check for is_contiguous() before launching the vectorized kernel
// and replace IndexToOffset call with linearIndex to allow vectorization of NHWC (or other)
// ordering.
// Single vectorized load
*value = *reinterpret_cast<LoadT*>(&a.data[linearIndex]);
scalar_t r[VEC];
uint8_t mask[VEC];
// Perform the actual computation
#pragma unroll
for (int ii = 0; ii < VEC; ii++) {
r[ii] = src[ii]*(&rand.x)[ii]*pinv;
mask[ii] = (uint8_t)(&rand.x)[ii];
}
// Vectorized writes for both mask & result
*(reinterpret_cast<LoadT*>(&b.data[linearIndex])) = *reinterpret_cast<LoadT*>(&r[0]);
*(reinterpret_cast<MaskLoadT*>(&c.data[linearIndex])) = *reinterpret_cast<MaskLoadT*>(&mask[0]);
__syncthreads();
}
}
template <
typename scalar_t,
typename accscalar_t,
typename IndexType,
int ADims,
int BDims = ADims>
#if __CUDA_ARCH__ >= 350 || defined __HIP_PLATFORM_HCC__
C10_LAUNCH_BOUNDS_2(256, 4)
#endif
__global__ void fused_dropout_kernel(
cuda::detail::TensorInfo<scalar_t, IndexType> a,
cuda::detail::TensorInfo<scalar_t, IndexType> b,
cuda::detail::TensorInfo<uint8_t, IndexType> c,
IndexType totalElements,
accscalar_t p,
PhiloxCudaState philox_args) {
auto seeds = at::cuda::philox::unpack(philox_args);
IndexType idx = blockIdx.x * blockDim.x + threadIdx.x;
hiprandStatePhilox4_32_10_t state;
hiprand_init(std::get<0>(seeds),
idx,
std::get<1>(seeds),
&state);
accscalar_t pinv = accscalar_t(1)/p;
IndexType rounded_size = ((totalElements - 1)/(blockDim.x * gridDim.x * UNROLL)+1) *
blockDim.x * gridDim.x * UNROLL;
for (IndexType linearIndex = idx;
linearIndex < rounded_size;
linearIndex += gridDim.x * blockDim.x*UNROLL) {
//hiprand_uniform_double was pure evil anyway, not doing what it promises, and there's nothing for halfs, so generate float for everything
float4 rand = hiprand_uniform4(&state);
scalar_t src[UNROLL];
rand.x = rand.x < p;
rand.y = rand.y < p;
rand.z = rand.z < p;
rand.w = rand.w < p;
for (int ii = 0; ii < UNROLL; ii++) {
IndexType li = linearIndex + blockDim.x * gridDim.x * ii;
if (li < totalElements) {
// Convert `linearIndex` into an offset of `a`
const IndexType aOffset =
cuda::detail::IndexToOffset<scalar_t, IndexType, ADims>::get(li, a);
src[ii] = a.data[aOffset];
}
}
for (int ii = 0; ii < UNROLL; ii++) {
IndexType li = linearIndex + blockDim.x * gridDim.x * ii;
if (li < totalElements) {
// Convert `linearIndex` into an offset of `b`
const IndexType bOffset =
cuda::detail::IndexToOffset<scalar_t, IndexType, BDims>::get(li, b);
b.data[bOffset] = src[ii]*(&rand.x)[ii]*pinv;
c.data[bOffset] = (uint8_t)(&rand.x)[ii];
}
}
__syncthreads();
}
}
template<typename scalar_t, typename accscalar_t>
void masked_scale_kernel(at::Tensor& ret, const at::Tensor& src, const at::Tensor& mask, accscalar_t scale){
auto iter = at::TensorIteratorConfig()
.check_all_same_dtype(false)
.add_borrowed_output(ret)
.add_borrowed_input(src)
.add_borrowed_input(mask)
.build();
at::native::gpu_kernel(
iter,
[=]GPU_LAMBDA(const scalar_t src_val, const uint8_t mask_val) -> scalar_t {
return (float)mask_val * src_val * scale;
});
}
template <typename scalar_t>
int get_vector_size(at::Tensor self, at::Tensor ret, at::Tensor mask) {
int vec_size = 4;
// get the vector size
if (!self.is_non_overlapping_and_dense() || !ret.is_non_overlapping_and_dense() || !mask.is_non_overlapping_and_dense()) {
vec_size = 1;
} else {
vec_size = memory::can_vectorize_up_to<scalar_t>((char*)self.data_ptr());
}
// check that we'd have no remainders - prefer a smaller vector size with no remainders over a larger vector and remainder.
bool can_vectorize = true;
do {
can_vectorize = self.numel() % vec_size == 0 && ret.numel() % vec_size == 0 && mask.numel() % vec_size == 0;
if (!can_vectorize) vec_size /= 2;
} while (vec_size > 1 && !can_vectorize);
return can_vectorize ? vec_size : 1;
}
template <typename index_type>
inline void launcher(
const Tensor& self,
Tensor& ret,
Tensor& mask,
double p,
const int64_t nelem,
const PhiloxCudaState rng_engine_inputs,
dim3 grid,
dim3 dim_block) {
AT_DISPATCH_FLOATING_TYPES_AND2(
at::ScalarType::Half,
at::ScalarType::BFloat16,
self.scalar_type(),
"fused_dropout",
[&] {
using accscalar_t = acc_type<scalar_t, true>;
accscalar_t pa = (accscalar_t)(p);
auto self_info =
cuda::detail::getTensorInfo<scalar_t, index_type>(self);
auto ret_info =
cuda::detail::getTensorInfo<scalar_t, index_type>(ret);
auto mask_info =
cuda::detail::getTensorInfo<uint8_t, index_type>(mask);
self_info.collapseDims();
ret_info.collapseDims();
mask_info.collapseDims(); // ret and mask are collapsed to 1d
// contiguous tensor
int vec_size = get_vector_size<scalar_t>(self, ret, mask);
if (vec_size > 1) {
switch (vec_size) {
case 4:
hipLaunchKernelGGL(( fused_dropout_kernel_vec<
scalar_t,
accscalar_t,
index_type,
1,
4>)
, dim3(grid), dim3(dim_block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
self_info,
ret_info,
mask_info,
nelem,
pa,
rng_engine_inputs);
C10_HIP_KERNEL_LAUNCH_CHECK();
break;
case 2:
hipLaunchKernelGGL(( fused_dropout_kernel_vec<
scalar_t,
accscalar_t,
index_type,
1,
2>)
, dim3(grid), dim3(dim_block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
self_info,
ret_info,
mask_info,
nelem,
pa,
rng_engine_inputs);
C10_HIP_KERNEL_LAUNCH_CHECK();
break;
}
} else {
switch (self_info.dims) {
case 1:
hipLaunchKernelGGL(( fused_dropout_kernel<scalar_t, accscalar_t, index_type, 1>)
, dim3(grid), dim3(dim_block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
self_info,
ret_info,
mask_info,
nelem,
pa,
rng_engine_inputs);
C10_HIP_KERNEL_LAUNCH_CHECK();
break;
default:
if (!self.is_contiguous() && ret.is_contiguous() &&
mask.is_contiguous()) {
hipLaunchKernelGGL(( fused_dropout_kernel<scalar_t, accscalar_t, index_type, -1, 1>)
, dim3(grid),
dim3(dim_block),
0,
at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
self_info,
ret_info,
mask_info,
nelem,
pa,
rng_engine_inputs);
C10_HIP_KERNEL_LAUNCH_CHECK();
} else {
hipLaunchKernelGGL(( fused_dropout_kernel<scalar_t, accscalar_t, index_type, -1>)
, dim3(grid),
dim3(dim_block),
0,
at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
self_info,
ret_info,
mask_info,
nelem,
pa,
rng_engine_inputs);
C10_HIP_KERNEL_LAUNCH_CHECK();
}
}
}
});
}
} //anonymous namespace
std::tuple<Tensor,Tensor>
fused_dropout_cuda(const Tensor& self, double p, c10::optional<Generator> gen_){
auto gen = get_generator_or_default<CUDAGeneratorImpl>(gen_, cuda::detail::getDefaultCUDAGenerator());
Tensor ret = at::empty_like(self);
Tensor mask = at::empty_like(self, self.options().dtype(kByte));
const int64_t nelem = self.numel();
//empty tensors should not get here, but just in case, avoid FPE
if (nelem==0) return std::tuple<Tensor,Tensor>(self, mask);
const int64_t block_size = 256;
unsigned int blocks_per_sm = at::cuda::getCurrentDeviceProperties()->maxThreadsPerMultiProcessor/block_size;
dim3 dim_block(block_size);
dim3 grid((nelem + block_size -1)/block_size);
grid.x = ::min((unsigned int)at::cuda::getCurrentDeviceProperties()->multiProcessorCount * blocks_per_sm, grid.x);
//number of times random will be generated per thread, to offset philox counter in thc random state
int64_t counter_offset = ((nelem - 1)/(block_size*grid.x*UNROLL)+1)*UNROLL;
PhiloxCudaState rng_engine_inputs;
{
// See Note [Acquire lock when using random generators]
std::lock_guard<std::mutex> lock(gen->mutex_);
rng_engine_inputs = gen->philox_cuda_state(counter_offset);
}
if (cuda::detail::canUse32BitIndexMath(self)){
launcher<unsigned int>(
self, ret, mask, p, nelem, rng_engine_inputs, grid, dim_block);
} else {
launcher<uint64_t>(
self, ret, mask, p, nelem, rng_engine_inputs, grid, dim_block);
}
return std::tuple<Tensor,Tensor>(ret, mask);
}
Tensor masked_scale_cuda(const Tensor& self, const Tensor& mask, double scale){
Tensor ret = at::empty_like(self, self.suggest_memory_format());
TORCH_CHECK(mask.scalar_type() == at::ScalarType::Byte, "mask should be torch.uint8 dtype");
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, ret.scalar_type(), "masked_scale", [&] {
using accscalar_t = acc_type<scalar_t, true>;
accscalar_t pa = (accscalar_t)(scale);
masked_scale_kernel<scalar_t>(ret, self, mask, pa);
});
return ret;
}
}
}
| 1c65256030a0093c14a1958484dc09e715fd52aa.cu | #include <ATen/ATen.h>
#include <ATen/AccumulateType.h>
#include <ATen/cuda/CUDAApplyUtils.cuh>
#include <ATen/CUDAGeneratorImpl.h>
#include <ATen/cuda/detail/IndexUtils.cuh>
#include <ATen/cuda/detail/TensorInfo.cuh>
#include <ATen/cuda/CUDAGraphsUtils.cuh>
#include <c10/macros/Macros.h>
#include <curand_kernel.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/native/cuda/MemoryAccess.cuh>
#include <THC/THCGeneral.h>
namespace at{
namespace native{
namespace {
// philox generates 128 bits of randomness at a time. Kernel uses this explicitly by putting suitably transformed result into float4
// for all members of float4 to be consumed UNROLL has to be 4. Don't change!
// Note: VEC <= 4 (and in most real-world cases will be 4), so same logic applies.
const int UNROLL = 4;
template <
typename scalar_t,
typename accscalar_t,
typename IndexType,
int ADims,
int VEC>
#if __CUDA_ARCH__ >= 350 || defined __HIP_PLATFORM_HCC__
C10_LAUNCH_BOUNDS_2(256, 4)
#endif
__global__ void fused_dropout_kernel_vec(
at::cuda::detail::TensorInfo<scalar_t, IndexType> a,
at::cuda::detail::TensorInfo<scalar_t, IndexType> b,
at::cuda::detail::TensorInfo<uint8_t, IndexType> c,
IndexType totalElements,
accscalar_t p,
PhiloxCudaState philox_args) {
// make sure we don't break assumption that we can't have > 4 elements / thread
static_assert(VEC <= 4, "Value of VEC must be in [2, 4]");
using LoadT = memory::aligned_vector<scalar_t, VEC>;
using MaskLoadT = memory::aligned_vector<uint8_t, VEC>;
auto seeds = at::cuda::philox::unpack(philox_args);
IndexType idx = blockIdx.x * blockDim.x + threadIdx.x;
curandStatePhilox4_32_10_t state;
curand_init(std::get<0>(seeds),
idx,
std::get<1>(seeds),
&state);
accscalar_t pinv = accscalar_t(1)/p;
// Helps align the total number of times curand_uniform4 is called by each thread for the same totalElements
// in the vec=2 and vec=4 cases.
bool gridxvec_loop_state = 0;
float4 rand;
// Note: Vectorized loads means we'll stride each thread by an additional VEC factor, as we'll load VEC elements at a time
for (IndexType linearIndex = idx * VEC;
linearIndex < totalElements;
linearIndex += gridDim.x * blockDim.x * VEC) {
// local storage
scalar_t src[VEC];
// We'll use this to actually cause vectorized loads later
LoadT *value = reinterpret_cast<LoadT*>(&src);
//curand_uniform_double was pure evil anyway, not doing what it promises, and there's nothing for halfs, so generate float for everything
// Note: need a new set of random values per 4 elements -- we'll handle VEC elements in this thread, so need ceil(VEC / 4)
// sets of rand.
if ((VEC == 4) || (gridxvec_loop_state == 0)) {
rand = curand_uniform4(&state);
} else {
// sets up the last two values we generated last iteration to be used this iteration.
rand.x = rand.z;
rand.y = rand.w;
gridxvec_loop_state ^= 1;
}
rand.x = rand.x < p;
rand.y = rand.y < p;
if (VEC == 4) {
rand.z = rand.z < p;
rand.w = rand.w < p;
}
// Note: We explicitly check for is_contiguous() before launching the vectorized kernel
// and replace IndexToOffset call with linearIndex to allow vectorization of NHWC (or other)
// ordering.
// Single vectorized load
*value = *reinterpret_cast<LoadT*>(&a.data[linearIndex]);
scalar_t r[VEC];
uint8_t mask[VEC];
// Perform the actual computation
#pragma unroll
for (int ii = 0; ii < VEC; ii++) {
r[ii] = src[ii]*(&rand.x)[ii]*pinv;
mask[ii] = (uint8_t)(&rand.x)[ii];
}
// Vectorized writes for both mask & result
*(reinterpret_cast<LoadT*>(&b.data[linearIndex])) = *reinterpret_cast<LoadT*>(&r[0]);
*(reinterpret_cast<MaskLoadT*>(&c.data[linearIndex])) = *reinterpret_cast<MaskLoadT*>(&mask[0]);
__syncthreads();
}
}
template <
typename scalar_t,
typename accscalar_t,
typename IndexType,
int ADims,
int BDims = ADims>
#if __CUDA_ARCH__ >= 350 || defined __HIP_PLATFORM_HCC__
C10_LAUNCH_BOUNDS_2(256, 4)
#endif
__global__ void fused_dropout_kernel(
cuda::detail::TensorInfo<scalar_t, IndexType> a,
cuda::detail::TensorInfo<scalar_t, IndexType> b,
cuda::detail::TensorInfo<uint8_t, IndexType> c,
IndexType totalElements,
accscalar_t p,
PhiloxCudaState philox_args) {
auto seeds = at::cuda::philox::unpack(philox_args);
IndexType idx = blockIdx.x * blockDim.x + threadIdx.x;
curandStatePhilox4_32_10_t state;
curand_init(std::get<0>(seeds),
idx,
std::get<1>(seeds),
&state);
accscalar_t pinv = accscalar_t(1)/p;
IndexType rounded_size = ((totalElements - 1)/(blockDim.x * gridDim.x * UNROLL)+1) *
blockDim.x * gridDim.x * UNROLL;
for (IndexType linearIndex = idx;
linearIndex < rounded_size;
linearIndex += gridDim.x * blockDim.x*UNROLL) {
//curand_uniform_double was pure evil anyway, not doing what it promises, and there's nothing for halfs, so generate float for everything
float4 rand = curand_uniform4(&state);
scalar_t src[UNROLL];
rand.x = rand.x < p;
rand.y = rand.y < p;
rand.z = rand.z < p;
rand.w = rand.w < p;
for (int ii = 0; ii < UNROLL; ii++) {
IndexType li = linearIndex + blockDim.x * gridDim.x * ii;
if (li < totalElements) {
// Convert `linearIndex` into an offset of `a`
const IndexType aOffset =
cuda::detail::IndexToOffset<scalar_t, IndexType, ADims>::get(li, a);
src[ii] = a.data[aOffset];
}
}
for (int ii = 0; ii < UNROLL; ii++) {
IndexType li = linearIndex + blockDim.x * gridDim.x * ii;
if (li < totalElements) {
// Convert `linearIndex` into an offset of `b`
const IndexType bOffset =
cuda::detail::IndexToOffset<scalar_t, IndexType, BDims>::get(li, b);
b.data[bOffset] = src[ii]*(&rand.x)[ii]*pinv;
c.data[bOffset] = (uint8_t)(&rand.x)[ii];
}
}
__syncthreads();
}
}
template<typename scalar_t, typename accscalar_t>
void masked_scale_kernel(at::Tensor& ret, const at::Tensor& src, const at::Tensor& mask, accscalar_t scale){
auto iter = at::TensorIteratorConfig()
.check_all_same_dtype(false)
.add_borrowed_output(ret)
.add_borrowed_input(src)
.add_borrowed_input(mask)
.build();
at::native::gpu_kernel(
iter,
[=]GPU_LAMBDA(const scalar_t src_val, const uint8_t mask_val) -> scalar_t {
return (float)mask_val * src_val * scale;
});
}
template <typename scalar_t>
int get_vector_size(at::Tensor self, at::Tensor ret, at::Tensor mask) {
int vec_size = 4;
// get the vector size
if (!self.is_non_overlapping_and_dense() || !ret.is_non_overlapping_and_dense() || !mask.is_non_overlapping_and_dense()) {
vec_size = 1;
} else {
vec_size = memory::can_vectorize_up_to<scalar_t>((char*)self.data_ptr());
}
// check that we'd have no remainders - prefer a smaller vector size with no remainders over a larger vector and remainder.
bool can_vectorize = true;
do {
can_vectorize = self.numel() % vec_size == 0 && ret.numel() % vec_size == 0 && mask.numel() % vec_size == 0;
if (!can_vectorize) vec_size /= 2;
} while (vec_size > 1 && !can_vectorize);
return can_vectorize ? vec_size : 1;
}
template <typename index_type>
inline void launcher(
const Tensor& self,
Tensor& ret,
Tensor& mask,
double p,
const int64_t nelem,
const PhiloxCudaState rng_engine_inputs,
dim3 grid,
dim3 dim_block) {
AT_DISPATCH_FLOATING_TYPES_AND2(
at::ScalarType::Half,
at::ScalarType::BFloat16,
self.scalar_type(),
"fused_dropout",
[&] {
using accscalar_t = acc_type<scalar_t, true>;
accscalar_t pa = (accscalar_t)(p);
auto self_info =
cuda::detail::getTensorInfo<scalar_t, index_type>(self);
auto ret_info =
cuda::detail::getTensorInfo<scalar_t, index_type>(ret);
auto mask_info =
cuda::detail::getTensorInfo<uint8_t, index_type>(mask);
self_info.collapseDims();
ret_info.collapseDims();
mask_info.collapseDims(); // ret and mask are collapsed to 1d
// contiguous tensor
int vec_size = get_vector_size<scalar_t>(self, ret, mask);
if (vec_size > 1) {
switch (vec_size) {
case 4:
fused_dropout_kernel_vec<
scalar_t,
accscalar_t,
index_type,
1,
4>
<<<grid, dim_block, 0, at::cuda::getCurrentCUDAStream()>>>(
self_info,
ret_info,
mask_info,
nelem,
pa,
rng_engine_inputs);
C10_CUDA_KERNEL_LAUNCH_CHECK();
break;
case 2:
fused_dropout_kernel_vec<
scalar_t,
accscalar_t,
index_type,
1,
2>
<<<grid, dim_block, 0, at::cuda::getCurrentCUDAStream()>>>(
self_info,
ret_info,
mask_info,
nelem,
pa,
rng_engine_inputs);
C10_CUDA_KERNEL_LAUNCH_CHECK();
break;
}
} else {
switch (self_info.dims) {
case 1:
fused_dropout_kernel<scalar_t, accscalar_t, index_type, 1>
<<<grid, dim_block, 0, at::cuda::getCurrentCUDAStream()>>>(
self_info,
ret_info,
mask_info,
nelem,
pa,
rng_engine_inputs);
C10_CUDA_KERNEL_LAUNCH_CHECK();
break;
default:
if (!self.is_contiguous() && ret.is_contiguous() &&
mask.is_contiguous()) {
fused_dropout_kernel<scalar_t, accscalar_t, index_type, -1, 1>
<<<grid,
dim_block,
0,
at::cuda::getCurrentCUDAStream()>>>(
self_info,
ret_info,
mask_info,
nelem,
pa,
rng_engine_inputs);
C10_CUDA_KERNEL_LAUNCH_CHECK();
} else {
fused_dropout_kernel<scalar_t, accscalar_t, index_type, -1>
<<<grid,
dim_block,
0,
at::cuda::getCurrentCUDAStream()>>>(
self_info,
ret_info,
mask_info,
nelem,
pa,
rng_engine_inputs);
C10_CUDA_KERNEL_LAUNCH_CHECK();
}
}
}
});
}
} //anonymous namespace
std::tuple<Tensor,Tensor>
fused_dropout_cuda(const Tensor& self, double p, c10::optional<Generator> gen_){
auto gen = get_generator_or_default<CUDAGeneratorImpl>(gen_, cuda::detail::getDefaultCUDAGenerator());
Tensor ret = at::empty_like(self);
Tensor mask = at::empty_like(self, self.options().dtype(kByte));
const int64_t nelem = self.numel();
//empty tensors should not get here, but just in case, avoid FPE
if (nelem==0) return std::tuple<Tensor,Tensor>(self, mask);
const int64_t block_size = 256;
unsigned int blocks_per_sm = at::cuda::getCurrentDeviceProperties()->maxThreadsPerMultiProcessor/block_size;
dim3 dim_block(block_size);
dim3 grid((nelem + block_size -1)/block_size);
grid.x = std::min((unsigned int)at::cuda::getCurrentDeviceProperties()->multiProcessorCount * blocks_per_sm, grid.x);
//number of times random will be generated per thread, to offset philox counter in thc random state
int64_t counter_offset = ((nelem - 1)/(block_size*grid.x*UNROLL)+1)*UNROLL;
PhiloxCudaState rng_engine_inputs;
{
// See Note [Acquire lock when using random generators]
std::lock_guard<std::mutex> lock(gen->mutex_);
rng_engine_inputs = gen->philox_cuda_state(counter_offset);
}
if (cuda::detail::canUse32BitIndexMath(self)){
launcher<unsigned int>(
self, ret, mask, p, nelem, rng_engine_inputs, grid, dim_block);
} else {
launcher<uint64_t>(
self, ret, mask, p, nelem, rng_engine_inputs, grid, dim_block);
}
return std::tuple<Tensor,Tensor>(ret, mask);
}
Tensor masked_scale_cuda(const Tensor& self, const Tensor& mask, double scale){
Tensor ret = at::empty_like(self, self.suggest_memory_format());
TORCH_CHECK(mask.scalar_type() == at::ScalarType::Byte, "mask should be torch.uint8 dtype");
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, ret.scalar_type(), "masked_scale", [&] {
using accscalar_t = acc_type<scalar_t, true>;
accscalar_t pa = (accscalar_t)(scale);
masked_scale_kernel<scalar_t>(ret, self, mask, pa);
});
return ret;
}
}
}
|
a7faecc649b8134c2edd4741823771c3910cd4c7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdlib.h>
#include <string.h>
#include <stdio.h>
#include <string.h>
#include <float.h>
#include <math.h>
#include <assert.h>
#include <xmmintrin.h>
#include <immintrin.h>
#include "cudnn.h"
#include "util.h"
#include "Kernel256_one.h"
#define cudaCheckError() { \
hipError_t e=hipGetLastError(); \
if(e!=hipSuccess) { \
printf("Cuda failure %s:%d:'%s'\n",__FILE__,__LINE__,hipGetErrorString(e)); \
exit(EXIT_FAILURE); \
} \
}
__global__ void kernel_1024_one_256(float *A, float *B, float *bnBias, float *bnScale, float *C) {
int tile = blockIdx.x, in_channel = threadIdx.x, line = threadIdx.y;
int ind = line*256 + in_channel;
extern __shared__ float shared_[];
float *weights = shared_ + 1024*4, *output = weights + 256*16, *input = shared_;
float *bias = output + 4*256, *scale = bias + 256;
for (int i = 0; i < 4; i++)
input[ind + i*1024] = A[tile*4096 + i*1024 + ind];
bias[in_channel] = bnBias[in_channel];
scale[in_channel] = bnScale[in_channel];
output[ind] = 0.0f;
__syncthreads();
for (int k = 0; k < 256; k += 4) {
for (int i = 0; i < 4; i++)
weights[ind + i*1024] = B[(k+i)*1024 + ind];
__syncthreads();
float *A_start = input + k*16;
for (int p = 0; p < 16; p++) {
output[ind] += A_start[line*1024 + p] * weights[in_channel + p*256];
}
__syncthreads();
}
float *C_start = C + tile*1024, res = scale[in_channel] * output[ind] + bias[in_channel];
C_start[ind] = res > 0 ? res : 0;
}
int kernel_256_1_in() {
float *input = get_parameter(inputName256one, 14*14*1024);
float *weight = get_parameter(weightName256one, 256*1024);
float *bnBias = get_parameter(bnBiasName256one, 256);
float *bnScale = get_parameter(bnScaleName256one, 256);
float *bnBias_myKernel = get_parameter(bnBias_myKernel_Name256one, 256);
float *bnScale_myKernel = get_parameter(bnScale_myKernel_Name256one, 256);
float *eMeanName = get_parameter(eMeanName256one, 256);
float *eVarName = get_parameter(eVarName256one, 256);
float *input_, *output_, *weight_, *bnBias_, *bnScale_, *eMeanName_, *eVarName_;
int nInput = 14*14*1024, nOutput = 14*14*256, nWeights = 256*1024;
float tmp[nOutput], tmp_cudnn[nOutput];
uint64_t nT1 = 0, nT2 = 0, nT1_cudnn = 0, nT2_cudnn = 0;
hipError_t s;
/////////////////////////////////
// My Kernel
/////////////////////////////////
/* 1. Data preparation */
hipMalloc((void **) &input_, nInput<<3);
hipMalloc((void **) &output_, nOutput<<2);
hipMalloc((void **) &weight_, nWeights<<2);
hipMalloc((void **) &bnBias_, 256<<2);
hipMalloc((void **) &bnScale_, 256<<2);
hipMemcpy(input_, input, nInput<<2, hipMemcpyHostToDevice);
hipMemcpy(weight_, weight_, nWeights<<2, hipMemcpyHostToDevice);
hipMemcpy(bnBias_, bnBias_myKernel, 256<<2, hipMemcpyHostToDevice);
hipMemcpy(bnScale_, bnScale_myKernel, 256<<2, hipMemcpyHostToDevice);
/* 2. Computing */
nT1 = getTimeMicroseconds64();
hipLaunchKernelGGL(( kernel_1024_one_256) , dim3(dim3(49)), dim3(dim3(256, 4)), (4*1024 + 16*256 + 4*256 + 2*256)<<2 , 0, 0, input_, weight_, bnBias_, bnScale_, output_);
//cudaCheckError();
hipDeviceSynchronize();
nT2 = getTimeMicroseconds64();
printf("TotalTime = %d us\n", nT2-nT1);
/* 3. Copy back and free */
s = hipMemcpy(tmp, output_, nOutput<<2, hipMemcpyDeviceToHost);
printf("%s\n", hipGetErrorName(s));
cudaCheckError();
free(bnBias_myKernel);
free(bnScale_myKernel);
/////////////////////////////////
// cuDNN
/////////////////////////////////
/* 1. Data preparation */
hipMalloc((void **) &eMeanName_, 256<<2);
hipMalloc((void **) &eVarName_, 256<<2);
hipMemcpy(bnBias_, bnBias, 256<<2, hipMemcpyHostToDevice);
hipMemcpy(bnScale_, bnScale, 256<<2, hipMemcpyHostToDevice);
hipMemcpy(eMeanName_, eMeanName, 256<<2, hipMemcpyHostToDevice);
hipMemcpy(eVarName_, eVarName, 256<<2, hipMemcpyHostToDevice);
/* 2. cuDNN preparation */
cudnnStatus_t status;
float one = 1.0, zero = 0.0;
int size;
cudnnHandle_t handle;
status = cudnnCreate(&handle);
if (status != CUDNN_STATUS_SUCCESS) printf("failed1\n");
cudnnTensorDescriptor_t xdesc, ydesc;
cudnnFilterDescriptor_t wdesc; // CUDNN_TENSOR_NHWC, CUDNN_TENSOR_NCHW
status = cudnnCreateTensorDescriptor(&xdesc);
if (status != CUDNN_STATUS_SUCCESS) printf("failed2\n");
status = cudnnSetTensor4dDescriptor(xdesc, CUDNN_TENSOR_NHWC, CUDNN_DATA_FLOAT, 1, 1024, 14, 14);
if (status != CUDNN_STATUS_SUCCESS) printf("failed3\n");
status = cudnnCreateTensorDescriptor(&ydesc);
if (status != CUDNN_STATUS_SUCCESS) printf("failed4\n");
status = cudnnSetTensor4dDescriptor(ydesc, CUDNN_TENSOR_NHWC, CUDNN_DATA_FLOAT, 1, 256, 14, 14);
if (status != CUDNN_STATUS_SUCCESS) printf("failed5\n");
status = cudnnCreateFilterDescriptor(&wdesc);
if (status != CUDNN_STATUS_SUCCESS) printf("failed6\n");
status = cudnnSetFilter4dDescriptor(wdesc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 256, 1024, 1, 1);
if (status != CUDNN_STATUS_SUCCESS) printf("failed7\n");
cudnnConvolutionDescriptor_t conv_desc;
status = cudnnCreateConvolutionDescriptor(&conv_desc);
if (status != CUDNN_STATUS_SUCCESS) printf("failed10\n");
status = cudnnSetConvolution2dDescriptor(conv_desc, 0,0, 1,1,1,1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT); //CUDNN_DATA_FLOAT
if (status != CUDNN_STATUS_SUCCESS) printf("failed11\n");
cudnnActivationDescriptor_t act_desc;
status = cudnnCreateActivationDescriptor(&act_desc);
if (status != CUDNN_STATUS_SUCCESS) printf("failed12\n");
status = cudnnSetActivationDescriptor(act_desc, CUDNN_ACTIVATION_RELU, CUDNN_NOT_PROPAGATE_NAN, 0);
if (status != CUDNN_STATUS_SUCCESS) printf("failed13\n");
cudnnTensorDescriptor_t bnScaleBiasMeanVarDesc;
status = cudnnCreateTensorDescriptor(&bnScaleBiasMeanVarDesc);
if (status != CUDNN_STATUS_SUCCESS) printf("failed14\n");
status = cudnnSetTensor4dDescriptor(bnScaleBiasMeanVarDesc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 256, 1, 1);
if (status != CUDNN_STATUS_SUCCESS) printf("failed15\n");
cudnnConvolutionFwdAlgo_t algo = (cudnnConvolutionFwdAlgo_t)0;
status = cudnnGetConvolutionForwardWorkspaceSize(handle,
xdesc,
wdesc,
conv_desc,
ydesc,
algo,
(size_t *)&(size));
float *extra;
hipMalloc((void **) &extra, size);
/* 3. Computing */
nT1_cudnn = getTimeMicroseconds64();
status = cudnnConvolutionForward(handle, &one,
xdesc, input_, wdesc, weight_,
conv_desc, algo,
extra, size, &zero,
ydesc, output_);
if (status != CUDNN_STATUS_SUCCESS) printf("Not Successed1\n");
status = cudnnBatchNormalizationForwardInference(handle, CUDNN_BATCHNORM_SPATIAL,
&one, &zero,
ydesc, output_, ydesc, output_,
bnScaleBiasMeanVarDesc, bnScale_, bnBias_, eMeanName_, eVarName_, CUDNN_BN_MIN_EPSILON);
if (status != CUDNN_STATUS_SUCCESS) printf("Not Successed2\n");
status = cudnnActivationForward(handle, act_desc, &one,
ydesc, output_, &zero,
ydesc, output_);
if (status != CUDNN_STATUS_SUCCESS) printf("Not Successed3\n");
hipDeviceSynchronize();
nT2_cudnn = getTimeMicroseconds64();
printf("cuDNN TotalTime = %d us\n", nT2_cudnn-nT1_cudnn);
/* 4. Copy back and free */
s = hipMemcpy(tmp_cudnn, output_, nOutput<<2, hipMemcpyDeviceToHost);
printf("%s\n", hipGetErrorName(s));
hipFree(extra);
hipFree(input_);
hipFree(output_);
hipFree(weight_);
hipFree(bnScale_);
hipFree(bnBias_);
hipFree(eMeanName_);
hipFree(eVarName_);
free(input);
free(weight);
free(bnScale);
free(bnBias);
free(eMeanName);
free(eVarName);
output_checker(tmp, tmp_cudnn, 14, 256, 0);
return ((nT2-nT1) << 16) | (nT2_cudnn-nT1_cudnn);
}
__global__ void kernel_256_one_1024(float *A, float *B, float *bnBias, float *bnScale, float *C) {
int tile = blockIdx.x, part = blockIdx.y, in_channel = threadIdx.x, line = threadIdx.y;
int ind = line*256 + in_channel;
extern __shared__ float shared_[];
float *weights = shared_ + 256*4, *output = weights + 256*32, *input = shared_;
float *bias = output + 4*256, *scale = bias + 256;
input[ind] = A[tile * 1024 + ind];
bias[in_channel] = bnBias[part*256 + in_channel];
scale[in_channel] = bnScale[part*256+ in_channel];
output[ind] = 0.0f;
__syncthreads();
for (int k = 0; k < 64; k += 8) {
for (int i = 0; i < 8; i++)
weights[ind + 1024*i] = B[(k+i)*4096 + part*256 + in_channel + line*1024];
__syncthreads();
float *A_start = input + k*32;
for (int p = 0; p < 32; p++) {
output[ind] += A_start[line*256 + p] * weights[in_channel + p*256];
}
__syncthreads();
}
float *C_start = C + tile*4096 + part*256;
C_start[line * 1024 + in_channel] = scale[in_channel] * output[ind] + bias[in_channel];
}
int kernel_256_1_out() {
float *input = get_parameter(inputName256one, 14*14*256);
float *weight = get_parameter(weightName256one, 256*1024);
float *bnBias = get_parameter(bnBiasName256one, 1024);
float *bnScale = get_parameter(bnScaleName256one, 1024);
float *bnBias_myKernel = get_parameter(bnBias_myKernel_Name256one, 1024);
float *bnScale_myKernel = get_parameter(bnScale_myKernel_Name256one, 1024);
float *eMeanName = get_parameter(eMeanName256one, 1024);
float *eVarName = get_parameter(eVarName256one, 1024);
float *input_, *output_, *weight_, *bnBias_, *bnScale_, *eMeanName_, *eVarName_;
int nInput = 14*14*256, nOutput = 14*14*1024, nWeights = 256*1024;
float tmp[nOutput], tmp_cudnn[nOutput];
uint64_t nT1 = 0, nT2 = 0, nT1_cudnn = 0, nT2_cudnn = 0;
hipError_t s;
/////////////////////////////////
// My Kernel
/////////////////////////////////
/* 1. Data preparation */
hipMalloc((void **) &input_, nInput<<3);
hipMalloc((void **) &output_, nOutput<<2);
hipMalloc((void **) &weight_, nWeights<<2);
hipMalloc((void **) &bnBias_, 1024<<2);
hipMalloc((void **) &bnScale_, 1024<<2);
hipMemcpy(input_, input, nInput<<2, hipMemcpyHostToDevice);
hipMemcpy(weight_, weight_, nWeights<<2, hipMemcpyHostToDevice);
hipMemcpy(bnBias_, bnBias_myKernel, 1024<<2, hipMemcpyHostToDevice);
hipMemcpy(bnScale_, bnScale_myKernel, 1024<<2, hipMemcpyHostToDevice);
/* 2. Computing */
nT1 = getTimeMicroseconds64();
hipLaunchKernelGGL(( kernel_256_one_1024) , dim3(dim3(49, 4)), dim3(dim3(256, 4)), (4*256 + 32*256 + 4*256 + 2*256)<<2 , 0, 0, input_, weight_, bnBias_, bnScale_, output_);
cudaCheckError();
hipDeviceSynchronize();
nT2 = getTimeMicroseconds64();
printf("TotalTime = %d us\n", nT2-nT1);
/* 3. Copy back and free */
s = hipMemcpy(tmp, output_, nOutput<<2, hipMemcpyDeviceToHost);
printf("%s\n", hipGetErrorName(s));
cudaCheckError();
free(bnBias_myKernel);
free(bnScale_myKernel);
/////////////////////////////////
// cuDNN
/////////////////////////////////
/* 1. Data preparation */
hipMalloc((void **) &eMeanName_, 1024<<2);
hipMalloc((void **) &eVarName_, 1024<<2);
hipMemcpy(bnBias_, bnBias, 1024<<2, hipMemcpyHostToDevice);
hipMemcpy(bnScale_, bnScale, 1024<<2, hipMemcpyHostToDevice);
hipMemcpy(eMeanName_, eMeanName, 1024<<2, hipMemcpyHostToDevice);
hipMemcpy(eVarName_, eVarName, 1024<<2, hipMemcpyHostToDevice);
/* 2. cuDNN preparation */
cudnnStatus_t status;
float one = 1.0, zero = 0.0;
int size;
cudnnHandle_t handle;
status = cudnnCreate(&handle);
if (status != CUDNN_STATUS_SUCCESS) printf("failed1\n");
cudnnTensorDescriptor_t xdesc, ydesc;
cudnnFilterDescriptor_t wdesc; // CUDNN_TENSOR_NHWC, CUDNN_TENSOR_NCHW
status = cudnnCreateTensorDescriptor(&xdesc);
if (status != CUDNN_STATUS_SUCCESS) printf("failed2\n");
status = cudnnSetTensor4dDescriptor(xdesc, CUDNN_TENSOR_NHWC, CUDNN_DATA_FLOAT, 1, 256, 14, 14);
if (status != CUDNN_STATUS_SUCCESS) printf("failed3\n");
status = cudnnCreateTensorDescriptor(&ydesc);
if (status != CUDNN_STATUS_SUCCESS) printf("failed4\n");
status = cudnnSetTensor4dDescriptor(ydesc, CUDNN_TENSOR_NHWC, CUDNN_DATA_FLOAT, 1, 1024, 14, 14);
if (status != CUDNN_STATUS_SUCCESS) printf("failed5\n");
status = cudnnCreateFilterDescriptor(&wdesc);
if (status != CUDNN_STATUS_SUCCESS) printf("failed6\n");
status = cudnnSetFilter4dDescriptor(wdesc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 1024, 256, 1, 1);
if (status != CUDNN_STATUS_SUCCESS) printf("failed7\n");
cudnnConvolutionDescriptor_t conv_desc;
status = cudnnCreateConvolutionDescriptor(&conv_desc);
if (status != CUDNN_STATUS_SUCCESS) printf("failed10\n");
status = cudnnSetConvolution2dDescriptor(conv_desc, 0,0, 1,1,1,1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT); //CUDNN_CONVOLUTION, CUDNN_DATA_FLOAT
if (status != CUDNN_STATUS_SUCCESS) printf("failed11\n");
cudnnTensorDescriptor_t bnScaleBiasMeanVarDesc;
status = cudnnCreateTensorDescriptor(&bnScaleBiasMeanVarDesc);
if (status != CUDNN_STATUS_SUCCESS) printf("failed14\n");
status = cudnnSetTensor4dDescriptor(bnScaleBiasMeanVarDesc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 1024, 1, 1);
if (status != CUDNN_STATUS_SUCCESS) printf("failed15\n");
cudnnConvolutionFwdAlgo_t algo = (cudnnConvolutionFwdAlgo_t)0;
status = cudnnGetConvolutionForwardWorkspaceSize(handle,
xdesc,
wdesc,
conv_desc,
ydesc,
algo,
(size_t *)&(size));
float *extra;
hipMalloc((void **) &extra, size);
/* 3. Computing */
nT1_cudnn = getTimeMicroseconds64();
status = cudnnConvolutionForward(handle, &one,
xdesc, input_, wdesc, weight_,
conv_desc, algo,
extra, size, &zero,
ydesc, output_);
if (status != CUDNN_STATUS_SUCCESS) printf("Not Successed1\n");
status = cudnnBatchNormalizationForwardInference(handle, CUDNN_BATCHNORM_SPATIAL,
&one, &zero,
ydesc, output_, ydesc, output_,
bnScaleBiasMeanVarDesc, bnScale_, bnBias_, eMeanName_, eVarName_, CUDNN_BN_MIN_EPSILON);
if (status != CUDNN_STATUS_SUCCESS) printf("Not Successed2\n");
hipDeviceSynchronize();
nT2_cudnn = getTimeMicroseconds64();
printf("cuDNN TotalTime = %d us\n", nT2_cudnn-nT1_cudnn);
/* 4. Copy back and free */
s = hipMemcpy(tmp_cudnn, output_, nOutput<<2, hipMemcpyDeviceToHost);
printf("%s\n", hipGetErrorName(s));
hipFree(extra);
hipFree(input_);
hipFree(output_);
hipFree(weight_);
hipFree(bnScale_);
hipFree(bnBias_);
hipFree(eMeanName_);
hipFree(eVarName_);
free(input);
free(weight);
free(bnScale);
free(bnBias);
free(eMeanName);
free(eVarName);
output_checker(tmp, tmp_cudnn, 14, 1024, 0);
return ((nT2-nT1) << 16) | (nT2_cudnn-nT1_cudnn);
}
| a7faecc649b8134c2edd4741823771c3910cd4c7.cu | #include <stdlib.h>
#include <string.h>
#include <stdio.h>
#include <string.h>
#include <float.h>
#include <math.h>
#include <assert.h>
#include <xmmintrin.h>
#include <immintrin.h>
#include "cudnn.h"
#include "util.h"
#include "Kernel256_one.h"
#define cudaCheckError() { \
cudaError_t e=cudaGetLastError(); \
if(e!=cudaSuccess) { \
printf("Cuda failure %s:%d:'%s'\n",__FILE__,__LINE__,cudaGetErrorString(e)); \
exit(EXIT_FAILURE); \
} \
}
__global__ void kernel_1024_one_256(float *A, float *B, float *bnBias, float *bnScale, float *C) {
int tile = blockIdx.x, in_channel = threadIdx.x, line = threadIdx.y;
int ind = line*256 + in_channel;
extern __shared__ float shared_[];
float *weights = shared_ + 1024*4, *output = weights + 256*16, *input = shared_;
float *bias = output + 4*256, *scale = bias + 256;
for (int i = 0; i < 4; i++)
input[ind + i*1024] = A[tile*4096 + i*1024 + ind];
bias[in_channel] = bnBias[in_channel];
scale[in_channel] = bnScale[in_channel];
output[ind] = 0.0f;
__syncthreads();
for (int k = 0; k < 256; k += 4) {
for (int i = 0; i < 4; i++)
weights[ind + i*1024] = B[(k+i)*1024 + ind];
__syncthreads();
float *A_start = input + k*16;
for (int p = 0; p < 16; p++) {
output[ind] += A_start[line*1024 + p] * weights[in_channel + p*256];
}
__syncthreads();
}
float *C_start = C + tile*1024, res = scale[in_channel] * output[ind] + bias[in_channel];
C_start[ind] = res > 0 ? res : 0;
}
int kernel_256_1_in() {
float *input = get_parameter(inputName256one, 14*14*1024);
float *weight = get_parameter(weightName256one, 256*1024);
float *bnBias = get_parameter(bnBiasName256one, 256);
float *bnScale = get_parameter(bnScaleName256one, 256);
float *bnBias_myKernel = get_parameter(bnBias_myKernel_Name256one, 256);
float *bnScale_myKernel = get_parameter(bnScale_myKernel_Name256one, 256);
float *eMeanName = get_parameter(eMeanName256one, 256);
float *eVarName = get_parameter(eVarName256one, 256);
float *input_, *output_, *weight_, *bnBias_, *bnScale_, *eMeanName_, *eVarName_;
int nInput = 14*14*1024, nOutput = 14*14*256, nWeights = 256*1024;
float tmp[nOutput], tmp_cudnn[nOutput];
uint64_t nT1 = 0, nT2 = 0, nT1_cudnn = 0, nT2_cudnn = 0;
cudaError_t s;
/////////////////////////////////
// My Kernel
/////////////////////////////////
/* 1. Data preparation */
cudaMalloc((void **) &input_, nInput<<3);
cudaMalloc((void **) &output_, nOutput<<2);
cudaMalloc((void **) &weight_, nWeights<<2);
cudaMalloc((void **) &bnBias_, 256<<2);
cudaMalloc((void **) &bnScale_, 256<<2);
cudaMemcpy(input_, input, nInput<<2, cudaMemcpyHostToDevice);
cudaMemcpy(weight_, weight_, nWeights<<2, cudaMemcpyHostToDevice);
cudaMemcpy(bnBias_, bnBias_myKernel, 256<<2, cudaMemcpyHostToDevice);
cudaMemcpy(bnScale_, bnScale_myKernel, 256<<2, cudaMemcpyHostToDevice);
/* 2. Computing */
nT1 = getTimeMicroseconds64();
kernel_1024_one_256 <<<dim3(49), dim3(256, 4), (4*1024 + 16*256 + 4*256 + 2*256)<<2 >>> (input_, weight_, bnBias_, bnScale_, output_);
//cudaCheckError();
cudaDeviceSynchronize();
nT2 = getTimeMicroseconds64();
printf("TotalTime = %d us\n", nT2-nT1);
/* 3. Copy back and free */
s = cudaMemcpy(tmp, output_, nOutput<<2, cudaMemcpyDeviceToHost);
printf("%s\n", cudaGetErrorName(s));
cudaCheckError();
free(bnBias_myKernel);
free(bnScale_myKernel);
/////////////////////////////////
// cuDNN
/////////////////////////////////
/* 1. Data preparation */
cudaMalloc((void **) &eMeanName_, 256<<2);
cudaMalloc((void **) &eVarName_, 256<<2);
cudaMemcpy(bnBias_, bnBias, 256<<2, cudaMemcpyHostToDevice);
cudaMemcpy(bnScale_, bnScale, 256<<2, cudaMemcpyHostToDevice);
cudaMemcpy(eMeanName_, eMeanName, 256<<2, cudaMemcpyHostToDevice);
cudaMemcpy(eVarName_, eVarName, 256<<2, cudaMemcpyHostToDevice);
/* 2. cuDNN preparation */
cudnnStatus_t status;
float one = 1.0, zero = 0.0;
int size;
cudnnHandle_t handle;
status = cudnnCreate(&handle);
if (status != CUDNN_STATUS_SUCCESS) printf("failed1\n");
cudnnTensorDescriptor_t xdesc, ydesc;
cudnnFilterDescriptor_t wdesc; // CUDNN_TENSOR_NHWC, CUDNN_TENSOR_NCHW
status = cudnnCreateTensorDescriptor(&xdesc);
if (status != CUDNN_STATUS_SUCCESS) printf("failed2\n");
status = cudnnSetTensor4dDescriptor(xdesc, CUDNN_TENSOR_NHWC, CUDNN_DATA_FLOAT, 1, 1024, 14, 14);
if (status != CUDNN_STATUS_SUCCESS) printf("failed3\n");
status = cudnnCreateTensorDescriptor(&ydesc);
if (status != CUDNN_STATUS_SUCCESS) printf("failed4\n");
status = cudnnSetTensor4dDescriptor(ydesc, CUDNN_TENSOR_NHWC, CUDNN_DATA_FLOAT, 1, 256, 14, 14);
if (status != CUDNN_STATUS_SUCCESS) printf("failed5\n");
status = cudnnCreateFilterDescriptor(&wdesc);
if (status != CUDNN_STATUS_SUCCESS) printf("failed6\n");
status = cudnnSetFilter4dDescriptor(wdesc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 256, 1024, 1, 1);
if (status != CUDNN_STATUS_SUCCESS) printf("failed7\n");
cudnnConvolutionDescriptor_t conv_desc;
status = cudnnCreateConvolutionDescriptor(&conv_desc);
if (status != CUDNN_STATUS_SUCCESS) printf("failed10\n");
status = cudnnSetConvolution2dDescriptor(conv_desc, 0,0, 1,1,1,1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT); //CUDNN_DATA_FLOAT
if (status != CUDNN_STATUS_SUCCESS) printf("failed11\n");
cudnnActivationDescriptor_t act_desc;
status = cudnnCreateActivationDescriptor(&act_desc);
if (status != CUDNN_STATUS_SUCCESS) printf("failed12\n");
status = cudnnSetActivationDescriptor(act_desc, CUDNN_ACTIVATION_RELU, CUDNN_NOT_PROPAGATE_NAN, 0);
if (status != CUDNN_STATUS_SUCCESS) printf("failed13\n");
cudnnTensorDescriptor_t bnScaleBiasMeanVarDesc;
status = cudnnCreateTensorDescriptor(&bnScaleBiasMeanVarDesc);
if (status != CUDNN_STATUS_SUCCESS) printf("failed14\n");
status = cudnnSetTensor4dDescriptor(bnScaleBiasMeanVarDesc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 256, 1, 1);
if (status != CUDNN_STATUS_SUCCESS) printf("failed15\n");
cudnnConvolutionFwdAlgo_t algo = (cudnnConvolutionFwdAlgo_t)0;
status = cudnnGetConvolutionForwardWorkspaceSize(handle,
xdesc,
wdesc,
conv_desc,
ydesc,
algo,
(size_t *)&(size));
float *extra;
cudaMalloc((void **) &extra, size);
/* 3. Computing */
nT1_cudnn = getTimeMicroseconds64();
status = cudnnConvolutionForward(handle, &one,
xdesc, input_, wdesc, weight_,
conv_desc, algo,
extra, size, &zero,
ydesc, output_);
if (status != CUDNN_STATUS_SUCCESS) printf("Not Successed1\n");
status = cudnnBatchNormalizationForwardInference(handle, CUDNN_BATCHNORM_SPATIAL,
&one, &zero,
ydesc, output_, ydesc, output_,
bnScaleBiasMeanVarDesc, bnScale_, bnBias_, eMeanName_, eVarName_, CUDNN_BN_MIN_EPSILON);
if (status != CUDNN_STATUS_SUCCESS) printf("Not Successed2\n");
status = cudnnActivationForward(handle, act_desc, &one,
ydesc, output_, &zero,
ydesc, output_);
if (status != CUDNN_STATUS_SUCCESS) printf("Not Successed3\n");
cudaDeviceSynchronize();
nT2_cudnn = getTimeMicroseconds64();
printf("cuDNN TotalTime = %d us\n", nT2_cudnn-nT1_cudnn);
/* 4. Copy back and free */
s = cudaMemcpy(tmp_cudnn, output_, nOutput<<2, cudaMemcpyDeviceToHost);
printf("%s\n", cudaGetErrorName(s));
cudaFree(extra);
cudaFree(input_);
cudaFree(output_);
cudaFree(weight_);
cudaFree(bnScale_);
cudaFree(bnBias_);
cudaFree(eMeanName_);
cudaFree(eVarName_);
free(input);
free(weight);
free(bnScale);
free(bnBias);
free(eMeanName);
free(eVarName);
output_checker(tmp, tmp_cudnn, 14, 256, 0);
return ((nT2-nT1) << 16) | (nT2_cudnn-nT1_cudnn);
}
__global__ void kernel_256_one_1024(float *A, float *B, float *bnBias, float *bnScale, float *C) {
int tile = blockIdx.x, part = blockIdx.y, in_channel = threadIdx.x, line = threadIdx.y;
int ind = line*256 + in_channel;
extern __shared__ float shared_[];
float *weights = shared_ + 256*4, *output = weights + 256*32, *input = shared_;
float *bias = output + 4*256, *scale = bias + 256;
input[ind] = A[tile * 1024 + ind];
bias[in_channel] = bnBias[part*256 + in_channel];
scale[in_channel] = bnScale[part*256+ in_channel];
output[ind] = 0.0f;
__syncthreads();
for (int k = 0; k < 64; k += 8) {
for (int i = 0; i < 8; i++)
weights[ind + 1024*i] = B[(k+i)*4096 + part*256 + in_channel + line*1024];
__syncthreads();
float *A_start = input + k*32;
for (int p = 0; p < 32; p++) {
output[ind] += A_start[line*256 + p] * weights[in_channel + p*256];
}
__syncthreads();
}
float *C_start = C + tile*4096 + part*256;
C_start[line * 1024 + in_channel] = scale[in_channel] * output[ind] + bias[in_channel];
}
int kernel_256_1_out() {
float *input = get_parameter(inputName256one, 14*14*256);
float *weight = get_parameter(weightName256one, 256*1024);
float *bnBias = get_parameter(bnBiasName256one, 1024);
float *bnScale = get_parameter(bnScaleName256one, 1024);
float *bnBias_myKernel = get_parameter(bnBias_myKernel_Name256one, 1024);
float *bnScale_myKernel = get_parameter(bnScale_myKernel_Name256one, 1024);
float *eMeanName = get_parameter(eMeanName256one, 1024);
float *eVarName = get_parameter(eVarName256one, 1024);
float *input_, *output_, *weight_, *bnBias_, *bnScale_, *eMeanName_, *eVarName_;
int nInput = 14*14*256, nOutput = 14*14*1024, nWeights = 256*1024;
float tmp[nOutput], tmp_cudnn[nOutput];
uint64_t nT1 = 0, nT2 = 0, nT1_cudnn = 0, nT2_cudnn = 0;
cudaError_t s;
/////////////////////////////////
// My Kernel
/////////////////////////////////
/* 1. Data preparation */
cudaMalloc((void **) &input_, nInput<<3);
cudaMalloc((void **) &output_, nOutput<<2);
cudaMalloc((void **) &weight_, nWeights<<2);
cudaMalloc((void **) &bnBias_, 1024<<2);
cudaMalloc((void **) &bnScale_, 1024<<2);
cudaMemcpy(input_, input, nInput<<2, cudaMemcpyHostToDevice);
cudaMemcpy(weight_, weight_, nWeights<<2, cudaMemcpyHostToDevice);
cudaMemcpy(bnBias_, bnBias_myKernel, 1024<<2, cudaMemcpyHostToDevice);
cudaMemcpy(bnScale_, bnScale_myKernel, 1024<<2, cudaMemcpyHostToDevice);
/* 2. Computing */
nT1 = getTimeMicroseconds64();
kernel_256_one_1024 <<<dim3(49, 4), dim3(256, 4), (4*256 + 32*256 + 4*256 + 2*256)<<2 >>> (input_, weight_, bnBias_, bnScale_, output_);
cudaCheckError();
cudaDeviceSynchronize();
nT2 = getTimeMicroseconds64();
printf("TotalTime = %d us\n", nT2-nT1);
/* 3. Copy back and free */
s = cudaMemcpy(tmp, output_, nOutput<<2, cudaMemcpyDeviceToHost);
printf("%s\n", cudaGetErrorName(s));
cudaCheckError();
free(bnBias_myKernel);
free(bnScale_myKernel);
/////////////////////////////////
// cuDNN
/////////////////////////////////
/* 1. Data preparation */
cudaMalloc((void **) &eMeanName_, 1024<<2);
cudaMalloc((void **) &eVarName_, 1024<<2);
cudaMemcpy(bnBias_, bnBias, 1024<<2, cudaMemcpyHostToDevice);
cudaMemcpy(bnScale_, bnScale, 1024<<2, cudaMemcpyHostToDevice);
cudaMemcpy(eMeanName_, eMeanName, 1024<<2, cudaMemcpyHostToDevice);
cudaMemcpy(eVarName_, eVarName, 1024<<2, cudaMemcpyHostToDevice);
/* 2. cuDNN preparation */
cudnnStatus_t status;
float one = 1.0, zero = 0.0;
int size;
cudnnHandle_t handle;
status = cudnnCreate(&handle);
if (status != CUDNN_STATUS_SUCCESS) printf("failed1\n");
cudnnTensorDescriptor_t xdesc, ydesc;
cudnnFilterDescriptor_t wdesc; // CUDNN_TENSOR_NHWC, CUDNN_TENSOR_NCHW
status = cudnnCreateTensorDescriptor(&xdesc);
if (status != CUDNN_STATUS_SUCCESS) printf("failed2\n");
status = cudnnSetTensor4dDescriptor(xdesc, CUDNN_TENSOR_NHWC, CUDNN_DATA_FLOAT, 1, 256, 14, 14);
if (status != CUDNN_STATUS_SUCCESS) printf("failed3\n");
status = cudnnCreateTensorDescriptor(&ydesc);
if (status != CUDNN_STATUS_SUCCESS) printf("failed4\n");
status = cudnnSetTensor4dDescriptor(ydesc, CUDNN_TENSOR_NHWC, CUDNN_DATA_FLOAT, 1, 1024, 14, 14);
if (status != CUDNN_STATUS_SUCCESS) printf("failed5\n");
status = cudnnCreateFilterDescriptor(&wdesc);
if (status != CUDNN_STATUS_SUCCESS) printf("failed6\n");
status = cudnnSetFilter4dDescriptor(wdesc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 1024, 256, 1, 1);
if (status != CUDNN_STATUS_SUCCESS) printf("failed7\n");
cudnnConvolutionDescriptor_t conv_desc;
status = cudnnCreateConvolutionDescriptor(&conv_desc);
if (status != CUDNN_STATUS_SUCCESS) printf("failed10\n");
status = cudnnSetConvolution2dDescriptor(conv_desc, 0,0, 1,1,1,1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT); //CUDNN_CONVOLUTION, CUDNN_DATA_FLOAT
if (status != CUDNN_STATUS_SUCCESS) printf("failed11\n");
cudnnTensorDescriptor_t bnScaleBiasMeanVarDesc;
status = cudnnCreateTensorDescriptor(&bnScaleBiasMeanVarDesc);
if (status != CUDNN_STATUS_SUCCESS) printf("failed14\n");
status = cudnnSetTensor4dDescriptor(bnScaleBiasMeanVarDesc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 1024, 1, 1);
if (status != CUDNN_STATUS_SUCCESS) printf("failed15\n");
cudnnConvolutionFwdAlgo_t algo = (cudnnConvolutionFwdAlgo_t)0;
status = cudnnGetConvolutionForwardWorkspaceSize(handle,
xdesc,
wdesc,
conv_desc,
ydesc,
algo,
(size_t *)&(size));
float *extra;
cudaMalloc((void **) &extra, size);
/* 3. Computing */
nT1_cudnn = getTimeMicroseconds64();
status = cudnnConvolutionForward(handle, &one,
xdesc, input_, wdesc, weight_,
conv_desc, algo,
extra, size, &zero,
ydesc, output_);
if (status != CUDNN_STATUS_SUCCESS) printf("Not Successed1\n");
status = cudnnBatchNormalizationForwardInference(handle, CUDNN_BATCHNORM_SPATIAL,
&one, &zero,
ydesc, output_, ydesc, output_,
bnScaleBiasMeanVarDesc, bnScale_, bnBias_, eMeanName_, eVarName_, CUDNN_BN_MIN_EPSILON);
if (status != CUDNN_STATUS_SUCCESS) printf("Not Successed2\n");
cudaDeviceSynchronize();
nT2_cudnn = getTimeMicroseconds64();
printf("cuDNN TotalTime = %d us\n", nT2_cudnn-nT1_cudnn);
/* 4. Copy back and free */
s = cudaMemcpy(tmp_cudnn, output_, nOutput<<2, cudaMemcpyDeviceToHost);
printf("%s\n", cudaGetErrorName(s));
cudaFree(extra);
cudaFree(input_);
cudaFree(output_);
cudaFree(weight_);
cudaFree(bnScale_);
cudaFree(bnBias_);
cudaFree(eMeanName_);
cudaFree(eVarName_);
free(input);
free(weight);
free(bnScale);
free(bnBias);
free(eMeanName);
free(eVarName);
output_checker(tmp, tmp_cudnn, 14, 1024, 0);
return ((nT2-nT1) << 16) | (nT2_cudnn-nT1_cudnn);
}
|
356546a33619038c4233d3949519994bec1deb67.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void print_details()
{
printf("blockIdx.x: %d, blockIdx.y: %d, blockIdx: %d, \
blockDim.x: %d, blockDim.y: %d, blockDim: %d, \
gridDim.x: %d, gridDim.y: %d, girdDim: %d\n", \
blockIdx.x, blockIdx.y, blockIdx.z, \
blockDim.x, blockDim.y, blockDim.z, \
gridDim.x, gridDim.y, gridDim.z);
} | 356546a33619038c4233d3949519994bec1deb67.cu | #include "includes.h"
__global__ void print_details()
{
printf("blockIdx.x: %d, blockIdx.y: %d, blockIdx: %d, \
blockDim.x: %d, blockDim.y: %d, blockDim: %d, \
gridDim.x: %d, gridDim.y: %d, girdDim: %d\n", \
blockIdx.x, blockIdx.y, blockIdx.z, \
blockDim.x, blockDim.y, blockDim.z, \
gridDim.x, gridDim.y, gridDim.z);
} |
169a1f4da228dfa26462c3d9299617f36da0bf2a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "cuda_space_model.h"
#include "helper_cuda.h"
#include "space_model.h"
#include "stdio.h"
#include "stdlib.h"
// input - body nodes
__constant__ volatile float *mass_const;
__constant__ volatile float *point_x_const, *point_y_const;
__constant__ volatile float *speed_x_const, *speed_y_const;
__constant__ volatile float *acc_x_const, *acc_y_const;
__constant__ int capacity_const;
__device__ volatile int object_len;
__device__ volatile float length_const;
// cell nodes
__constant__ volatile int *node_cnt_const, *node_child_const;
__constant__ volatile float *node_mass_const;
__constant__ volatile float *node_point_x_const, *node_point_y_const;
// sorting related
__constant__ volatile int *inorder_const, *sort_const;
// calculation related
__constant__ float dt_const;
#define NUM_BLOCK 12
#define LOCK -2
#define NULL_POINTER -1
__device__ __inline__ int getQuadrant(float root_x, float root_y, float x,
float y) {
int idx = 0;
if (root_x < x) {
idx += 1;
}
if (root_y < y) {
idx += 2;
}
return idx;
}
__device__ __inline__ void shuffleNonNullPointer(int quad_idx, int nonnull_idx,
int child_idx, int cell_idx) {
if (quad_idx != nonnull_idx) {
node_child_const[cell_idx * 4 + nonnull_idx] = child_idx;
node_child_const[cell_idx * 4 + quad_idx] = -1;
}
return;
}
// compute 1 / sqrt of the displacement
__device__ __inline__ float getDistance(float x, float y) {
return rsqrtf(x * x + y * y + SOFT_CONST);
}
__device__ void update_acc(float mass, float r_inv, float dr_x, float dr_y, float *acc_x, float *acc_y) {
float F = mass * G_CONST * pow(r_inv, 3.0f);
*acc_x += dr_x * F;
*acc_y += dr_y * F;
}
// kenerl1: Init root
__global__ void kernel1Init() {
int root_idx = 0;
for (int i = 0; i < 4; i++) {
node_child_const[4 * root_idx + i] = NULL_POINTER;
}
node_mass_const[root_idx] = -1.f; // -1.f represents no mass computed yet
node_point_x_const[root_idx] = WINDOW_W / 2;
node_point_y_const[root_idx] = WINDOW_H / 2;
object_len = 0;
length_const = WINDOW_H / 2 + BORDER;
inorder_const[root_idx] = 0;
#if __CUDA_ARCH__ >= 200
// if (blockIdx.x * blockDim.x + threadIdx.x == 0) {
// printf("Hello from kernel 1\n");
// }
#endif
}
// kernel2: Build hierarchical decomposition by inserting each body into
// quadtree
__global__ void kernel2BuildTree() {
#if __CUDA_ARCH__ >= 200
// if (blockIdx.x * blockDim.x + threadIdx.x == 0)
// printf("Hello from kernel 2\n");
// caches the roots data in the register file
register float length = length_const;
register float root_point_x = node_point_x_const[0];
register float root_point_y = node_point_y_const[0];
register int idx = blockIdx.x * blockDim.x + threadIdx.x;
register int step = blockDim.x * gridDim.x;
register float body_point_x = 0.f;
register float body_point_y = 0.f;
register float cell_point_x = 0.f;
register float cell_point_y = 0.f;
register int new_node_idx = 0;
register int insert_idx = 0;
register int quad_idx = 0;
register float curr_length;
register int child;
register int target = 0;
register int new_cell_idx;
register bool first_insert = true;
register int quad_iter = 0;
while (idx < capacity_const) {
body_point_x = point_x_const[idx];
body_point_y = point_y_const[idx];
// reset for each iter
curr_length = length;
insert_idx = 0;
first_insert = true;
// find the cell
while (true) {
quad_idx = getQuadrant(node_point_x_const[insert_idx],
node_point_y_const[insert_idx], body_point_x,
body_point_y);
child = node_child_const[insert_idx * 4 + quad_idx];
if (child < capacity_const)
break;
curr_length *= 0.5;
insert_idx = child-capacity_const;
}
if (child != LOCK) {
target = insert_idx * 4 + quad_idx;
if (child == atomicCAS((int *)&node_child_const[target], child, LOCK)) {
if (child == NULL_POINTER) {
node_child_const[target] = idx; // insert body and release lock
} else { // colided with another body
do {
new_node_idx = atomicAdd((int *)&object_len, 1) + 1; // atomically get the next unused cell
if (first_insert) {
new_cell_idx = new_node_idx;
}
// finder center coordinate of the new cell
curr_length *= 0.5f;
cell_point_x = node_point_x_const[insert_idx] -
pow((double)-1.f, (double)((quad_idx) & 1)) * curr_length;
cell_point_y = node_point_y_const[insert_idx] -
pow((double)-1.f, (double)((quad_idx >> 1) & 1)) * curr_length;
// init new cell
node_point_x_const[new_node_idx] = cell_point_x;
node_point_y_const[new_node_idx] = cell_point_y;
node_mass_const[new_node_idx] = -1.0f;
inorder_const[new_node_idx] = -1;
for (quad_iter = 0; quad_iter < 4; quad_iter++) {
node_child_const[new_node_idx * 4 + quad_iter] = NULL_POINTER;
}
// insert new cell if not the first insert if not the first insert
// do not insert the first new cell to avoid releasing lock too early
if (!first_insert) {
node_child_const[insert_idx * 4 + quad_idx] = new_node_idx + capacity_const;
} else {
first_insert = false;
}
// update collided body to the new cell
quad_idx = getQuadrant(cell_point_x, cell_point_y,
point_x_const[child], point_y_const[child]);
node_child_const[new_node_idx * 4 + quad_idx] = child;
// check for further collisions
insert_idx = new_node_idx;
quad_idx = getQuadrant(cell_point_x, cell_point_y, body_point_x,
body_point_y);
child = node_child_const[insert_idx * 4 + quad_idx];
} while (child >= 0);
// insert new body
node_child_const[insert_idx * 4 + quad_idx] = idx;
// make sure newcell subtree is visible
__threadfence();
// insert new_cell and release lock
node_child_const[target] = new_cell_idx + capacity_const;
}
idx += step;
}
}
// wait for other warps to finish insertion
__syncthreads();
}
#endif
}
// kernel3: Summarize body information in each internal octree node
__global__ void kernel3Summarize() {
#if __CUDA_ARCH__ >= 200
// if (blockIdx.x * blockDim.x + threadIdx.x == 0)
// printf("Hello from kernel 3\n");
register int idx = object_len - blockIdx.x * blockDim.x - threadIdx.x;
register int step = blockDim.x * gridDim.x;
register int quad_iter;
register int nonnull_idx; // keep track of non-null pointer
register int cnt;
register int child_idx;
register int missing = 0;
register float mass;
register float cumulative_mass;
register float sum_x; // sum of product of mass * point_x
register float sum_y; // sum of product of mass * point_y
__shared__ volatile int cache[256 * 4]; // NUM_THREAD * 4
while (idx >= 0) {
// initialize default settings for each cell
nonnull_idx = 0;
sum_x = 0.f;
sum_y = 0.f;
if (missing == 0) {
cumulative_mass = 0.f;
cnt = 0;
// initialize center of gravity
for (quad_iter = 0; quad_iter < 4; quad_iter++) {
child_idx = node_child_const[idx * 4 + quad_iter];
// iterate over existing children ONLY
if (child_idx >= 0) {
// reset mass for each child
mass = 0.f;
// move the existing children to the front of child array in each
// cell and move all the nulls to the end
shuffleNonNullPointer(quad_iter, nonnull_idx, child_idx, idx);
if (child_idx < capacity_const) { // body
mass = mass_const[child_idx];
} else { // cell
mass = node_mass_const[child_idx - capacity_const];
}
if (mass >= 0.f) { // child is ready
// add its contribution to center of gravity
cumulative_mass += mass;
if (child_idx < capacity_const) { // body
sum_x += point_x_const[child_idx] * mass;
sum_y += point_y_const[child_idx] * mass;
cnt++;
} else { // cell
sum_x += node_point_x_const[child_idx - capacity_const] * mass;
sum_y += node_point_y_const[child_idx - capacity_const] * mass;
cnt += node_cnt_const[child_idx - capacity_const];
}
} else {
// cache child index
cache[missing * 256 + threadIdx.x] = child_idx;
missing++;
}
nonnull_idx++;
}
}
}
if (missing != 0) {
do {
child_idx = cache[(missing - 1) * 256 + threadIdx.x];
mass = node_mass_const[child_idx - capacity_const];
// check if child in cache is ready
// if not, break out of the loop
// "thread divergence deliberately forces the thread to wait for
// a while before trying again to throttle polling requests."
if (mass >= 0.f) {
// remove from cache and add its contribution to center of gravity
missing--;
cumulative_mass += mass;
sum_x += node_point_x_const[child_idx - capacity_const] * mass;
sum_y += node_point_y_const[child_idx - capacity_const] * mass;
cnt += node_cnt_const[child_idx - capacity_const];
}
} while (mass >= 0.f && missing != 0);
}
if (missing == 0) {
// store center of gravity
node_point_x_const[idx] = sum_x / cumulative_mass;
node_point_y_const[idx] = sum_y / cumulative_mass;
// store cumulative count
node_cnt_const[idx] = cnt;
__threadfence(); // make sure center of gravity is visible
// store cumulative mass
node_mass_const[idx] = cumulative_mass;
__threadfence(); // make sure to sync before next iteration
idx -= step;
}
}
#endif
}
// kernel4: Approximately sort the bodies by spatial distance
__global__ void kernel4Sort() {
#if __CUDA_ARCH__ >= 200
// if (blockIdx.x * blockDim.x + threadIdx.x == 0)
// printf("Hello from kernel 4 \n");
register int idx = blockIdx.x * blockDim.x + threadIdx.x;
register int step = blockDim.x * gridDim.x;
register int child_idx;
register int quad_iter; // traverse 4 child
register int inorder_rank;
// top-down traversal of cell nodes
while (idx <= object_len) {
inorder_rank = inorder_const[idx];
// check if rank has been assigned
if (inorder_rank >= 0) {
for (quad_iter = 0; quad_iter < 4; quad_iter++) {
child_idx = node_child_const[idx * 4 + quad_iter];
if (child_idx >= capacity_const) { // cell
child_idx -= capacity_const;
inorder_const[child_idx] = inorder_rank;
inorder_rank += node_cnt_const[child_idx];
} else if (child_idx >= 0) { // body
sort_const[inorder_rank] = child_idx;
inorder_rank++;
}
}
idx += step;
}
__threadfence();
}
__syncthreads();
#endif
}
__device__ float2 kernel5HelperComputeForNode(int node_idx) {
register int stack[2048];
register int depth = 0;
// push root node onto the stack
stack[depth++] = length_const;
stack[depth++] = capacity_const;
// cache
register int curr_idx = 0;
register int curr_length = 0;
register float r_inv = 0.f;
register float dr_x = 0.f, dr_y = 0.f;
register float acc_x = 0.f, acc_y = 0.f;
register float mass;
register float point_x = point_x_const[node_idx];
register float point_y = point_y_const[node_idx];
while (depth > 0) {
curr_idx = stack[--depth];
curr_length = stack[--depth];
if (curr_idx >= 0 && curr_idx < capacity_const) { // body node
if (curr_idx != node_idx) {
dr_x = point_x_const[curr_idx] - point_x;
dr_y = point_y_const[curr_idx] - point_y;
mass = mass_const[curr_idx];
r_inv = getDistance(dr_x, dr_y);
update_acc(mass, r_inv, dr_x, dr_y, &acc_x, &acc_y);
}
} else { // cell node
curr_idx -= capacity_const;
dr_x = node_point_x_const[curr_idx] - point_x;
dr_y = node_point_y_const[curr_idx] - point_y;
mass = node_mass_const[curr_idx];
// if the cell distance is sufficiently far way
if (curr_length * r_inv < SD_TRESHOLD) {
r_inv = getDistance(dr_x, dr_y);
update_acc(mass, r_inv, dr_x, dr_y, &acc_x, &acc_y);
} else {
for (int quad_iter = 0; quad_iter < 4; quad_iter++) {
// add the length and child_idx of children that are not null
if (node_child_const[4 * curr_idx + quad_iter] != NULL_POINTER) {
stack[depth++] = curr_length * 0.5;
stack[depth++] = node_child_const[4 * curr_idx + quad_iter];
} else {
break; // early return
}
}
}
}
}
__syncthreads();
return make_float2(acc_x, acc_y);
}
// kernel5: Compute forces acting on each body with help of quadtree
__global__ void kernel5Compute() {
#if __CUDA_ARCH__ >= 200
// if (blockIdx.x * blockDim.x + threadIdx.x == 0)
// printf("Hello from kernel 5\n");
register int idx;
register int step = blockDim.x * gridDim.x;
register int node_idx;
register float2 acc;
for (idx = blockIdx.x * blockDim.x + threadIdx.x; idx < capacity_const; idx += step) {
node_idx = sort_const[idx];
// precompute and cache info
acc = kernel5HelperComputeForNode(node_idx);
acc_x_const[node_idx] = acc.x;
acc_y_const[node_idx] = acc.y;
}
#endif
}
// kernel 6: Update body positions and velocities
__global__ void kernel6Update() {
#if __CUDA_ARCH__ >= 200
// if (blockIdx.x * blockDim.x + threadIdx.x == 0)
// printf("Hello from kernel 6\n");
register int idx;
register int step = blockDim.x * gridDim.x;
register float delta_speed_x, delta_speed_y;
register float speed_x, speed_y;
for (idx = blockIdx.x * blockDim.x + threadIdx.x; idx < capacity_const; idx += step) {
delta_speed_x = acc_x_const[idx] * dt_const;
delta_speed_y = acc_x_const[idx] * dt_const;
speed_x = speed_x_const[idx] + delta_speed_x;
speed_y = speed_y_const[idx] + delta_speed_y;
speed_x_const[idx] = speed_x;
speed_y_const[idx] = speed_y;
point_x_const[idx] += speed_x * dt_const;
point_y_const[idx] += speed_y * dt_const;
}
#endif
}
void main_update(SpaceModel *m, SimulationConfig config, GS_FLOAT dt) {
float *mass, *point_x, *point_y, *speed_x, *speed_y; // host vars
float *mass_dev, *point_x_dev, *point_y_dev, *speed_x_dev, *speed_y_dev,
*acc_x, *acc_y; // device vars
size_t capacity;
int cell_capacity;
float max_split;
int len;
int *len_dev;
len = m->objects->len;
capacity = len;
// calculate the max # of potential splits in a galaxy to estiamte size of cell array
max_split = log2f(config.view_bounds.size.x / config.galaxy_size) * config.objects_n;
cell_capacity = config.galaxies_n * round(max_split);
hipMalloc((void **)&len_dev, sizeof(int));
hipMemcpy(len_dev, &len, sizeof(int), hipMemcpyHostToDevice);
hipMemcpyToSymbol(object_len, &len_dev, sizeof(int));
mass = (float *)malloc(sizeof(float) * capacity);
point_x = (float *)malloc(sizeof(float) * capacity);
point_y = (float *)malloc(sizeof(float) * capacity);
speed_x = (float *)malloc(sizeof(float) * capacity);
speed_y = (float *)malloc(sizeof(float) * capacity);
// Flattening out the Object struct into multiple arrays
// One array per field
for (int i = 0; i < capacity; i++) {
mass[i] = m->objects->objects[i].mass;
point_x[i] = m->objects->objects[i].position.x;
point_y[i] = m->objects->objects[i].position.y;
speed_x[i] = m->objects->objects[i].speed.x;
speed_y[i] = m->objects->objects[i].speed.y;
}
hipMalloc(&mass_dev, sizeof(float) * (capacity + 1));
hipMalloc(&point_x_dev, sizeof(float) * (capacity + 1));
hipMalloc(&point_y_dev, sizeof(float) * (capacity + 1));
hipMalloc(&speed_x_dev, sizeof(float) * (capacity + 1));
hipMalloc(&speed_y_dev, sizeof(float) * (capacity + 1));
hipMalloc(&acc_x, sizeof(float) * (capacity + 1));
hipMalloc(&acc_y, sizeof(float) * (capacity + 1));
hipMemcpy(mass_dev, mass, sizeof(float) * capacity, hipMemcpyHostToDevice);
hipMemcpy(point_x_dev, point_x, sizeof(float) * capacity,
hipMemcpyHostToDevice);
hipMemcpy(point_y_dev, point_y, sizeof(float) * capacity,
hipMemcpyHostToDevice);
hipMemcpy(speed_x_dev, speed_x, sizeof(float) * capacity,
hipMemcpyHostToDevice);
hipMemcpy(speed_y_dev, speed_y, sizeof(float) * capacity,
hipMemcpyHostToDevice);
// Copy device memory to constant memory
hipMemcpyToSymbol(mass_const, &mass_dev, sizeof(void *));
hipMemcpyToSymbol(point_x_const, &point_x_dev, sizeof(void *));
hipMemcpyToSymbol(point_y_const, &point_y_dev, sizeof(void *));
hipMemcpyToSymbol(speed_x_const, &speed_x_dev, sizeof(void *));
hipMemcpyToSymbol(speed_y_const, &speed_y_dev, sizeof(void *));
hipMemcpyToSymbol(acc_x_const, &acc_x, sizeof(void *));
hipMemcpyToSymbol(acc_y_const, &acc_y, sizeof(void *));
// cell vars
int *node_cnt, *node_child;
float *node_mass, *node_point_x, *node_point_y;
hipMalloc(&node_cnt, sizeof(int) * (cell_capacity + 1));
hipMalloc(&node_child, sizeof(int) * (cell_capacity + 1) * 4);
hipMalloc(&node_mass, sizeof(float) * (cell_capacity + 1));
hipMalloc(&node_point_x, sizeof(float) * (cell_capacity + 1));
hipMalloc(&node_point_y, sizeof(float) * (cell_capacity + 1));
// initialize all counts to 0
hipMemset(node_cnt, 0, sizeof(int) * (cell_capacity + 1));
hipMemcpyToSymbol(node_cnt_const, &node_cnt, sizeof(void *));
hipMemcpyToSymbol(node_child_const, &node_child, sizeof(void *));
hipMemcpyToSymbol(node_mass_const, &node_mass, sizeof(void *));
hipMemcpyToSymbol(node_point_x_const, &node_point_x, sizeof(void *));
hipMemcpyToSymbol(node_point_y_const, &node_point_y, sizeof(void *));
// for sorting
int *inorder_rank, *sort;
hipMalloc((void **)&inorder_rank, sizeof(int) * (cell_capacity + 1));
hipMalloc((void **)&sort, sizeof(int) * (capacity + 1));
hipMemcpyToSymbol(inorder_const, &inorder_rank, sizeof(void *));
hipMemcpyToSymbol(sort_const, &sort, sizeof(void *));
hipMemcpyToSymbol(capacity_const, &capacity, sizeof(size_t));
hipMemcpyToSymbol(dt_const, &dt, sizeof(float));
// alternative to kernel1
hipEvent_t start, stop;
float time;
hipEventCreate (&start);
hipEventCreate (&stop);
hipEventRecord (start, 0);
hipLaunchKernelGGL(( kernel1Init), dim3(NUM_BLOCK * 8), dim3(256), 0, 0, );
hipEventRecord (stop, 0);
hipEventSynchronize (stop);
hipEventElapsedTime (&time, start, stop);
hipEventDestroy (start);
hipEventDestroy (stop);
// printf("Kernel 1 time: %3.4f \n", time);
// kernel2: Build hierarchical decomposition by inserting each body into
// quadtree
hipEventCreate (&start);
hipEventCreate (&stop);
hipEventRecord (start, 0);
hipLaunchKernelGGL(( kernel2BuildTree), dim3(NUM_BLOCK * 8), dim3(256), 0, 0, );
hipEventRecord (stop, 0);
hipEventSynchronize (stop);
hipEventElapsedTime (&time, start, stop);
hipEventDestroy (start);
hipEventDestroy (stop);
// printf("Kernel 2 time: %3.4f \n", time);
// kernel3: Summarize body information in each internal octree node
hipEventCreate (&start);
hipEventCreate (&stop);
hipEventRecord (start, 0);
hipLaunchKernelGGL(( kernel3Summarize), dim3(NUM_BLOCK * 8), dim3(256), 0, 0, );
hipEventRecord (stop, 0);
hipEventSynchronize (stop);
hipEventElapsedTime (&time, start, stop);
hipEventDestroy (start);
hipEventDestroy (stop);
// printf("Kernel 3 time: %3.4f \n", time);
// kernel4: Approximately sort the bodies by spatial distance
hipEventCreate (&start);
hipEventCreate (&stop);
hipEventRecord (start, 0);
hipLaunchKernelGGL(( kernel4Sort), dim3(NUM_BLOCK * 8), dim3(256), 0, 0, );
hipEventRecord (stop, 0);
hipEventSynchronize (stop);
hipEventElapsedTime (&time, start, stop);
hipEventDestroy (start);
hipEventDestroy (stop);
// printf("Kernel 4 time: %3.4f \n", time);
// kernel5: Compute forces acting on each body with help of octree
hipEventCreate (&start);
hipEventCreate (&stop);
hipEventRecord (start, 0);
hipLaunchKernelGGL(( kernel5Compute), dim3(NUM_BLOCK * 8), dim3(256), 0, 0, );
hipEventRecord (stop, 0);
hipEventSynchronize (stop);
hipEventElapsedTime (&time, start, stop);
hipEventDestroy (start);
hipEventDestroy (stop);
// printf("Kernel 5 time: %3.4f \n", time);
// kernel 6: Update body positions and velocities
hipEventCreate (&start);
hipEventCreate (&stop);
hipEventRecord (start, 0);
hipLaunchKernelGGL(( kernel6Update), dim3(NUM_BLOCK * 8), dim3(256), 0, 0, );
hipEventRecord (stop, 0);
hipEventSynchronize (stop);
hipEventElapsedTime (&time, start, stop);
hipEventDestroy (start);
hipEventDestroy (stop);
// printf("Kernel 6 time: %3.4f \n", time);
// hipError_t error = hipPeekAtLastError();
// if (error != hipSuccess) {
// printLastCudaError(hipGetErrorString(error));
// exit(-1);
// }
// GPU to CPU
hipMemcpy(point_x, point_x_dev, sizeof(float) * capacity,
hipMemcpyDeviceToHost);
hipMemcpy(point_y, point_y_dev, sizeof(float) * capacity,
hipMemcpyDeviceToHost);
hipMemcpy(speed_x, speed_x_dev, sizeof(float) * capacity,
hipMemcpyDeviceToHost);
hipMemcpy(speed_y, speed_y_dev, sizeof(float) * capacity,
hipMemcpyDeviceToHost);
for (int i = 0; i < capacity; i++) {
m->objects->objects[i].position.x = point_x[i];
m->objects->objects[i].position.y = point_y[i];
m->objects->objects[i].speed.x = speed_x[i];
m->objects->objects[i].speed.y = speed_y[i];
}
// remove out of bounds bodies
spacemodel_remove_objects_outside_bounds(m);
free(mass);
free(point_x);
free(point_y);
free(speed_x);
free(speed_y);
hipFree(len_dev);
hipFree(mass_dev);
hipFree(point_x_dev);
hipFree(point_y_dev);
hipFree(speed_x_dev);
hipFree(speed_y_dev);
hipFree(inorder_rank);
hipFree(sort);
hipFree(acc_x);
hipFree(acc_y);
hipFree(node_cnt);
hipFree(node_child);
hipFree(node_mass);
hipFree(node_point_x);
hipFree(node_point_y);
}
void gpu_timer_start(hipEvent_t start, hipEvent_t stop) {
hipEventCreate (&start);
hipEventCreate (&stop);
hipEventRecord (start, 0);
}
float gpu_timer_stop(hipEvent_t start, hipEvent_t stop) {
float time;
hipEventRecord (stop, 0);
hipEventSynchronize (stop);
hipEventElapsedTime (&time, start, stop);
hipEventDestroy (start);
hipEventDestroy (stop);
return time;
}
| 169a1f4da228dfa26462c3d9299617f36da0bf2a.cu | #include "cuda_space_model.h"
#include "helper_cuda.h"
#include "space_model.h"
#include "stdio.h"
#include "stdlib.h"
// input - body nodes
__constant__ volatile float *mass_const;
__constant__ volatile float *point_x_const, *point_y_const;
__constant__ volatile float *speed_x_const, *speed_y_const;
__constant__ volatile float *acc_x_const, *acc_y_const;
__constant__ int capacity_const;
__device__ volatile int object_len;
__device__ volatile float length_const;
// cell nodes
__constant__ volatile int *node_cnt_const, *node_child_const;
__constant__ volatile float *node_mass_const;
__constant__ volatile float *node_point_x_const, *node_point_y_const;
// sorting related
__constant__ volatile int *inorder_const, *sort_const;
// calculation related
__constant__ float dt_const;
#define NUM_BLOCK 12
#define LOCK -2
#define NULL_POINTER -1
__device__ __inline__ int getQuadrant(float root_x, float root_y, float x,
float y) {
int idx = 0;
if (root_x < x) {
idx += 1;
}
if (root_y < y) {
idx += 2;
}
return idx;
}
__device__ __inline__ void shuffleNonNullPointer(int quad_idx, int nonnull_idx,
int child_idx, int cell_idx) {
if (quad_idx != nonnull_idx) {
node_child_const[cell_idx * 4 + nonnull_idx] = child_idx;
node_child_const[cell_idx * 4 + quad_idx] = -1;
}
return;
}
// compute 1 / sqrt of the displacement
__device__ __inline__ float getDistance(float x, float y) {
return rsqrtf(x * x + y * y + SOFT_CONST);
}
__device__ void update_acc(float mass, float r_inv, float dr_x, float dr_y, float *acc_x, float *acc_y) {
float F = mass * G_CONST * pow(r_inv, 3.0f);
*acc_x += dr_x * F;
*acc_y += dr_y * F;
}
// kenerl1: Init root
__global__ void kernel1Init() {
int root_idx = 0;
for (int i = 0; i < 4; i++) {
node_child_const[4 * root_idx + i] = NULL_POINTER;
}
node_mass_const[root_idx] = -1.f; // -1.f represents no mass computed yet
node_point_x_const[root_idx] = WINDOW_W / 2;
node_point_y_const[root_idx] = WINDOW_H / 2;
object_len = 0;
length_const = WINDOW_H / 2 + BORDER;
inorder_const[root_idx] = 0;
#if __CUDA_ARCH__ >= 200
// if (blockIdx.x * blockDim.x + threadIdx.x == 0) {
// printf("Hello from kernel 1\n");
// }
#endif
}
// kernel2: Build hierarchical decomposition by inserting each body into
// quadtree
__global__ void kernel2BuildTree() {
#if __CUDA_ARCH__ >= 200
// if (blockIdx.x * blockDim.x + threadIdx.x == 0)
// printf("Hello from kernel 2\n");
// caches the root’s data in the register file
register float length = length_const;
register float root_point_x = node_point_x_const[0];
register float root_point_y = node_point_y_const[0];
register int idx = blockIdx.x * blockDim.x + threadIdx.x;
register int step = blockDim.x * gridDim.x;
register float body_point_x = 0.f;
register float body_point_y = 0.f;
register float cell_point_x = 0.f;
register float cell_point_y = 0.f;
register int new_node_idx = 0;
register int insert_idx = 0;
register int quad_idx = 0;
register float curr_length;
register int child;
register int target = 0;
register int new_cell_idx;
register bool first_insert = true;
register int quad_iter = 0;
while (idx < capacity_const) {
body_point_x = point_x_const[idx];
body_point_y = point_y_const[idx];
// reset for each iter
curr_length = length;
insert_idx = 0;
first_insert = true;
// find the cell
while (true) {
quad_idx = getQuadrant(node_point_x_const[insert_idx],
node_point_y_const[insert_idx], body_point_x,
body_point_y);
child = node_child_const[insert_idx * 4 + quad_idx];
if (child < capacity_const)
break;
curr_length *= 0.5;
insert_idx = child-capacity_const;
}
if (child != LOCK) {
target = insert_idx * 4 + quad_idx;
if (child == atomicCAS((int *)&node_child_const[target], child, LOCK)) {
if (child == NULL_POINTER) {
node_child_const[target] = idx; // insert body and release lock
} else { // colided with another body
do {
new_node_idx = atomicAdd((int *)&object_len, 1) + 1; // atomically get the next unused cell
if (first_insert) {
new_cell_idx = new_node_idx;
}
// finder center coordinate of the new cell
curr_length *= 0.5f;
cell_point_x = node_point_x_const[insert_idx] -
pow((double)-1.f, (double)((quad_idx) & 1)) * curr_length;
cell_point_y = node_point_y_const[insert_idx] -
pow((double)-1.f, (double)((quad_idx >> 1) & 1)) * curr_length;
// init new cell
node_point_x_const[new_node_idx] = cell_point_x;
node_point_y_const[new_node_idx] = cell_point_y;
node_mass_const[new_node_idx] = -1.0f;
inorder_const[new_node_idx] = -1;
for (quad_iter = 0; quad_iter < 4; quad_iter++) {
node_child_const[new_node_idx * 4 + quad_iter] = NULL_POINTER;
}
// insert new cell if not the first insert if not the first insert
// do not insert the first new cell to avoid releasing lock too early
if (!first_insert) {
node_child_const[insert_idx * 4 + quad_idx] = new_node_idx + capacity_const;
} else {
first_insert = false;
}
// update collided body to the new cell
quad_idx = getQuadrant(cell_point_x, cell_point_y,
point_x_const[child], point_y_const[child]);
node_child_const[new_node_idx * 4 + quad_idx] = child;
// check for further collisions
insert_idx = new_node_idx;
quad_idx = getQuadrant(cell_point_x, cell_point_y, body_point_x,
body_point_y);
child = node_child_const[insert_idx * 4 + quad_idx];
} while (child >= 0);
// insert new body
node_child_const[insert_idx * 4 + quad_idx] = idx;
// make sure newcell subtree is visible
__threadfence();
// insert new_cell and release lock
node_child_const[target] = new_cell_idx + capacity_const;
}
idx += step;
}
}
// wait for other warps to finish insertion
__syncthreads();
}
#endif
}
// kernel3: Summarize body information in each internal octree node
__global__ void kernel3Summarize() {
#if __CUDA_ARCH__ >= 200
// if (blockIdx.x * blockDim.x + threadIdx.x == 0)
// printf("Hello from kernel 3\n");
register int idx = object_len - blockIdx.x * blockDim.x - threadIdx.x;
register int step = blockDim.x * gridDim.x;
register int quad_iter;
register int nonnull_idx; // keep track of non-null pointer
register int cnt;
register int child_idx;
register int missing = 0;
register float mass;
register float cumulative_mass;
register float sum_x; // sum of product of mass * point_x
register float sum_y; // sum of product of mass * point_y
__shared__ volatile int cache[256 * 4]; // NUM_THREAD * 4
while (idx >= 0) {
// initialize default settings for each cell
nonnull_idx = 0;
sum_x = 0.f;
sum_y = 0.f;
if (missing == 0) {
cumulative_mass = 0.f;
cnt = 0;
// initialize center of gravity
for (quad_iter = 0; quad_iter < 4; quad_iter++) {
child_idx = node_child_const[idx * 4 + quad_iter];
// iterate over existing children ONLY
if (child_idx >= 0) {
// reset mass for each child
mass = 0.f;
// move the existing children to the front of child array in each
// cell and move all the nulls to the end
shuffleNonNullPointer(quad_iter, nonnull_idx, child_idx, idx);
if (child_idx < capacity_const) { // body
mass = mass_const[child_idx];
} else { // cell
mass = node_mass_const[child_idx - capacity_const];
}
if (mass >= 0.f) { // child is ready
// add its contribution to center of gravity
cumulative_mass += mass;
if (child_idx < capacity_const) { // body
sum_x += point_x_const[child_idx] * mass;
sum_y += point_y_const[child_idx] * mass;
cnt++;
} else { // cell
sum_x += node_point_x_const[child_idx - capacity_const] * mass;
sum_y += node_point_y_const[child_idx - capacity_const] * mass;
cnt += node_cnt_const[child_idx - capacity_const];
}
} else {
// cache child index
cache[missing * 256 + threadIdx.x] = child_idx;
missing++;
}
nonnull_idx++;
}
}
}
if (missing != 0) {
do {
child_idx = cache[(missing - 1) * 256 + threadIdx.x];
mass = node_mass_const[child_idx - capacity_const];
// check if child in cache is ready
// if not, break out of the loop
// "thread divergence deliberately forces the thread to wait for
// a while before trying again to throttle polling requests."
if (mass >= 0.f) {
// remove from cache and add its contribution to center of gravity
missing--;
cumulative_mass += mass;
sum_x += node_point_x_const[child_idx - capacity_const] * mass;
sum_y += node_point_y_const[child_idx - capacity_const] * mass;
cnt += node_cnt_const[child_idx - capacity_const];
}
} while (mass >= 0.f && missing != 0);
}
if (missing == 0) {
// store center of gravity
node_point_x_const[idx] = sum_x / cumulative_mass;
node_point_y_const[idx] = sum_y / cumulative_mass;
// store cumulative count
node_cnt_const[idx] = cnt;
__threadfence(); // make sure center of gravity is visible
// store cumulative mass
node_mass_const[idx] = cumulative_mass;
__threadfence(); // make sure to sync before next iteration
idx -= step;
}
}
#endif
}
// kernel4: Approximately sort the bodies by spatial distance
__global__ void kernel4Sort() {
#if __CUDA_ARCH__ >= 200
// if (blockIdx.x * blockDim.x + threadIdx.x == 0)
// printf("Hello from kernel 4 \n");
register int idx = blockIdx.x * blockDim.x + threadIdx.x;
register int step = blockDim.x * gridDim.x;
register int child_idx;
register int quad_iter; // traverse 4 child
register int inorder_rank;
// top-down traversal of cell nodes
while (idx <= object_len) {
inorder_rank = inorder_const[idx];
// check if rank has been assigned
if (inorder_rank >= 0) {
for (quad_iter = 0; quad_iter < 4; quad_iter++) {
child_idx = node_child_const[idx * 4 + quad_iter];
if (child_idx >= capacity_const) { // cell
child_idx -= capacity_const;
inorder_const[child_idx] = inorder_rank;
inorder_rank += node_cnt_const[child_idx];
} else if (child_idx >= 0) { // body
sort_const[inorder_rank] = child_idx;
inorder_rank++;
}
}
idx += step;
}
__threadfence();
}
__syncthreads();
#endif
}
__device__ float2 kernel5HelperComputeForNode(int node_idx) {
register int stack[2048];
register int depth = 0;
// push root node onto the stack
stack[depth++] = length_const;
stack[depth++] = capacity_const;
// cache
register int curr_idx = 0;
register int curr_length = 0;
register float r_inv = 0.f;
register float dr_x = 0.f, dr_y = 0.f;
register float acc_x = 0.f, acc_y = 0.f;
register float mass;
register float point_x = point_x_const[node_idx];
register float point_y = point_y_const[node_idx];
while (depth > 0) {
curr_idx = stack[--depth];
curr_length = stack[--depth];
if (curr_idx >= 0 && curr_idx < capacity_const) { // body node
if (curr_idx != node_idx) {
dr_x = point_x_const[curr_idx] - point_x;
dr_y = point_y_const[curr_idx] - point_y;
mass = mass_const[curr_idx];
r_inv = getDistance(dr_x, dr_y);
update_acc(mass, r_inv, dr_x, dr_y, &acc_x, &acc_y);
}
} else { // cell node
curr_idx -= capacity_const;
dr_x = node_point_x_const[curr_idx] - point_x;
dr_y = node_point_y_const[curr_idx] - point_y;
mass = node_mass_const[curr_idx];
// if the cell distance is sufficiently far way
if (curr_length * r_inv < SD_TRESHOLD) {
r_inv = getDistance(dr_x, dr_y);
update_acc(mass, r_inv, dr_x, dr_y, &acc_x, &acc_y);
} else {
for (int quad_iter = 0; quad_iter < 4; quad_iter++) {
// add the length and child_idx of children that are not null
if (node_child_const[4 * curr_idx + quad_iter] != NULL_POINTER) {
stack[depth++] = curr_length * 0.5;
stack[depth++] = node_child_const[4 * curr_idx + quad_iter];
} else {
break; // early return
}
}
}
}
}
__syncthreads();
return make_float2(acc_x, acc_y);
}
// kernel5: Compute forces acting on each body with help of quadtree
__global__ void kernel5Compute() {
#if __CUDA_ARCH__ >= 200
// if (blockIdx.x * blockDim.x + threadIdx.x == 0)
// printf("Hello from kernel 5\n");
register int idx;
register int step = blockDim.x * gridDim.x;
register int node_idx;
register float2 acc;
for (idx = blockIdx.x * blockDim.x + threadIdx.x; idx < capacity_const; idx += step) {
node_idx = sort_const[idx];
// precompute and cache info
acc = kernel5HelperComputeForNode(node_idx);
acc_x_const[node_idx] = acc.x;
acc_y_const[node_idx] = acc.y;
}
#endif
}
// kernel 6: Update body positions and velocities
__global__ void kernel6Update() {
#if __CUDA_ARCH__ >= 200
// if (blockIdx.x * blockDim.x + threadIdx.x == 0)
// printf("Hello from kernel 6\n");
register int idx;
register int step = blockDim.x * gridDim.x;
register float delta_speed_x, delta_speed_y;
register float speed_x, speed_y;
for (idx = blockIdx.x * blockDim.x + threadIdx.x; idx < capacity_const; idx += step) {
delta_speed_x = acc_x_const[idx] * dt_const;
delta_speed_y = acc_x_const[idx] * dt_const;
speed_x = speed_x_const[idx] + delta_speed_x;
speed_y = speed_y_const[idx] + delta_speed_y;
speed_x_const[idx] = speed_x;
speed_y_const[idx] = speed_y;
point_x_const[idx] += speed_x * dt_const;
point_y_const[idx] += speed_y * dt_const;
}
#endif
}
void main_update(SpaceModel *m, SimulationConfig config, GS_FLOAT dt) {
float *mass, *point_x, *point_y, *speed_x, *speed_y; // host vars
float *mass_dev, *point_x_dev, *point_y_dev, *speed_x_dev, *speed_y_dev,
*acc_x, *acc_y; // device vars
size_t capacity;
int cell_capacity;
float max_split;
int len;
int *len_dev;
len = m->objects->len;
capacity = len;
// calculate the max # of potential splits in a galaxy to estiamte size of cell array
max_split = log2f(config.view_bounds.size.x / config.galaxy_size) * config.objects_n;
cell_capacity = config.galaxies_n * round(max_split);
cudaMalloc((void **)&len_dev, sizeof(int));
cudaMemcpy(len_dev, &len, sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(object_len, &len_dev, sizeof(int));
mass = (float *)malloc(sizeof(float) * capacity);
point_x = (float *)malloc(sizeof(float) * capacity);
point_y = (float *)malloc(sizeof(float) * capacity);
speed_x = (float *)malloc(sizeof(float) * capacity);
speed_y = (float *)malloc(sizeof(float) * capacity);
// Flattening out the Object struct into multiple arrays
// One array per field
for (int i = 0; i < capacity; i++) {
mass[i] = m->objects->objects[i].mass;
point_x[i] = m->objects->objects[i].position.x;
point_y[i] = m->objects->objects[i].position.y;
speed_x[i] = m->objects->objects[i].speed.x;
speed_y[i] = m->objects->objects[i].speed.y;
}
cudaMalloc(&mass_dev, sizeof(float) * (capacity + 1));
cudaMalloc(&point_x_dev, sizeof(float) * (capacity + 1));
cudaMalloc(&point_y_dev, sizeof(float) * (capacity + 1));
cudaMalloc(&speed_x_dev, sizeof(float) * (capacity + 1));
cudaMalloc(&speed_y_dev, sizeof(float) * (capacity + 1));
cudaMalloc(&acc_x, sizeof(float) * (capacity + 1));
cudaMalloc(&acc_y, sizeof(float) * (capacity + 1));
cudaMemcpy(mass_dev, mass, sizeof(float) * capacity, cudaMemcpyHostToDevice);
cudaMemcpy(point_x_dev, point_x, sizeof(float) * capacity,
cudaMemcpyHostToDevice);
cudaMemcpy(point_y_dev, point_y, sizeof(float) * capacity,
cudaMemcpyHostToDevice);
cudaMemcpy(speed_x_dev, speed_x, sizeof(float) * capacity,
cudaMemcpyHostToDevice);
cudaMemcpy(speed_y_dev, speed_y, sizeof(float) * capacity,
cudaMemcpyHostToDevice);
// Copy device memory to constant memory
cudaMemcpyToSymbol(mass_const, &mass_dev, sizeof(void *));
cudaMemcpyToSymbol(point_x_const, &point_x_dev, sizeof(void *));
cudaMemcpyToSymbol(point_y_const, &point_y_dev, sizeof(void *));
cudaMemcpyToSymbol(speed_x_const, &speed_x_dev, sizeof(void *));
cudaMemcpyToSymbol(speed_y_const, &speed_y_dev, sizeof(void *));
cudaMemcpyToSymbol(acc_x_const, &acc_x, sizeof(void *));
cudaMemcpyToSymbol(acc_y_const, &acc_y, sizeof(void *));
// cell vars
int *node_cnt, *node_child;
float *node_mass, *node_point_x, *node_point_y;
cudaMalloc(&node_cnt, sizeof(int) * (cell_capacity + 1));
cudaMalloc(&node_child, sizeof(int) * (cell_capacity + 1) * 4);
cudaMalloc(&node_mass, sizeof(float) * (cell_capacity + 1));
cudaMalloc(&node_point_x, sizeof(float) * (cell_capacity + 1));
cudaMalloc(&node_point_y, sizeof(float) * (cell_capacity + 1));
// initialize all counts to 0
cudaMemset(node_cnt, 0, sizeof(int) * (cell_capacity + 1));
cudaMemcpyToSymbol(node_cnt_const, &node_cnt, sizeof(void *));
cudaMemcpyToSymbol(node_child_const, &node_child, sizeof(void *));
cudaMemcpyToSymbol(node_mass_const, &node_mass, sizeof(void *));
cudaMemcpyToSymbol(node_point_x_const, &node_point_x, sizeof(void *));
cudaMemcpyToSymbol(node_point_y_const, &node_point_y, sizeof(void *));
// for sorting
int *inorder_rank, *sort;
cudaMalloc((void **)&inorder_rank, sizeof(int) * (cell_capacity + 1));
cudaMalloc((void **)&sort, sizeof(int) * (capacity + 1));
cudaMemcpyToSymbol(inorder_const, &inorder_rank, sizeof(void *));
cudaMemcpyToSymbol(sort_const, &sort, sizeof(void *));
cudaMemcpyToSymbol(capacity_const, &capacity, sizeof(size_t));
cudaMemcpyToSymbol(dt_const, &dt, sizeof(float));
// alternative to kernel1
cudaEvent_t start, stop;
float time;
cudaEventCreate (&start);
cudaEventCreate (&stop);
cudaEventRecord (start, 0);
kernel1Init<<<NUM_BLOCK * 8, 256>>>();
cudaEventRecord (stop, 0);
cudaEventSynchronize (stop);
cudaEventElapsedTime (&time, start, stop);
cudaEventDestroy (start);
cudaEventDestroy (stop);
// printf("Kernel 1 time: %3.4f \n", time);
// kernel2: Build hierarchical decomposition by inserting each body into
// quadtree
cudaEventCreate (&start);
cudaEventCreate (&stop);
cudaEventRecord (start, 0);
kernel2BuildTree<<<NUM_BLOCK * 8, 256>>>();
cudaEventRecord (stop, 0);
cudaEventSynchronize (stop);
cudaEventElapsedTime (&time, start, stop);
cudaEventDestroy (start);
cudaEventDestroy (stop);
// printf("Kernel 2 time: %3.4f \n", time);
// kernel3: Summarize body information in each internal octree node
cudaEventCreate (&start);
cudaEventCreate (&stop);
cudaEventRecord (start, 0);
kernel3Summarize<<<NUM_BLOCK * 8, 256>>>();
cudaEventRecord (stop, 0);
cudaEventSynchronize (stop);
cudaEventElapsedTime (&time, start, stop);
cudaEventDestroy (start);
cudaEventDestroy (stop);
// printf("Kernel 3 time: %3.4f \n", time);
// kernel4: Approximately sort the bodies by spatial distance
cudaEventCreate (&start);
cudaEventCreate (&stop);
cudaEventRecord (start, 0);
kernel4Sort<<<NUM_BLOCK * 8, 256>>>();
cudaEventRecord (stop, 0);
cudaEventSynchronize (stop);
cudaEventElapsedTime (&time, start, stop);
cudaEventDestroy (start);
cudaEventDestroy (stop);
// printf("Kernel 4 time: %3.4f \n", time);
// kernel5: Compute forces acting on each body with help of octree
cudaEventCreate (&start);
cudaEventCreate (&stop);
cudaEventRecord (start, 0);
kernel5Compute<<<NUM_BLOCK * 8, 256>>>();
cudaEventRecord (stop, 0);
cudaEventSynchronize (stop);
cudaEventElapsedTime (&time, start, stop);
cudaEventDestroy (start);
cudaEventDestroy (stop);
// printf("Kernel 5 time: %3.4f \n", time);
// kernel 6: Update body positions and velocities
cudaEventCreate (&start);
cudaEventCreate (&stop);
cudaEventRecord (start, 0);
kernel6Update<<<NUM_BLOCK * 8, 256>>>();
cudaEventRecord (stop, 0);
cudaEventSynchronize (stop);
cudaEventElapsedTime (&time, start, stop);
cudaEventDestroy (start);
cudaEventDestroy (stop);
// printf("Kernel 6 time: %3.4f \n", time);
// cudaError_t error = cudaPeekAtLastError();
// if (error != cudaSuccess) {
// printLastCudaError(cudaGetErrorString(error));
// exit(-1);
// }
// GPU to CPU
cudaMemcpy(point_x, point_x_dev, sizeof(float) * capacity,
cudaMemcpyDeviceToHost);
cudaMemcpy(point_y, point_y_dev, sizeof(float) * capacity,
cudaMemcpyDeviceToHost);
cudaMemcpy(speed_x, speed_x_dev, sizeof(float) * capacity,
cudaMemcpyDeviceToHost);
cudaMemcpy(speed_y, speed_y_dev, sizeof(float) * capacity,
cudaMemcpyDeviceToHost);
for (int i = 0; i < capacity; i++) {
m->objects->objects[i].position.x = point_x[i];
m->objects->objects[i].position.y = point_y[i];
m->objects->objects[i].speed.x = speed_x[i];
m->objects->objects[i].speed.y = speed_y[i];
}
// remove out of bounds bodies
spacemodel_remove_objects_outside_bounds(m);
free(mass);
free(point_x);
free(point_y);
free(speed_x);
free(speed_y);
cudaFree(len_dev);
cudaFree(mass_dev);
cudaFree(point_x_dev);
cudaFree(point_y_dev);
cudaFree(speed_x_dev);
cudaFree(speed_y_dev);
cudaFree(inorder_rank);
cudaFree(sort);
cudaFree(acc_x);
cudaFree(acc_y);
cudaFree(node_cnt);
cudaFree(node_child);
cudaFree(node_mass);
cudaFree(node_point_x);
cudaFree(node_point_y);
}
void gpu_timer_start(cudaEvent_t start, cudaEvent_t stop) {
cudaEventCreate (&start);
cudaEventCreate (&stop);
cudaEventRecord (start, 0);
}
float gpu_timer_stop(cudaEvent_t start, cudaEvent_t stop) {
float time;
cudaEventRecord (stop, 0);
cudaEventSynchronize (stop);
cudaEventElapsedTime (&time, start, stop);
cudaEventDestroy (start);
cudaEventDestroy (stop);
return time;
}
|
31c917f44505b0803d0299026dc15a9984175531.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* This expermental software is provided AS IS.
* Feel free to use/modify/distribute,
* If used, please retain this disclaimer and cite
* "GPUfs: Integrating a file system with GPUs",
* M Silberstein,B Ford,I Keidar,E Witchel
* ASPLOS13, March 2013, Houston,USA
*/
/*
* This expermental software is provided AS IS.
* Feel free to use/modify/distribute,
* If used, please retain this disclaimer and cite
* "GPUfs: Integrating a file system with GPUs",
* M Silberstein,B Ford,I Keidar,E Witchel
* ASPLOS13, March 2013, Houston,USA
*/
//#ifndef MAIN_FS_FILE
//#error "This file must be included in the fs.cu"
//#endif
#include <stdio.h>
#include <pthread.h>
#include <unistd.h>
#include "fs_constants.h"
#include "fs_debug.cu.h"
#include "util.cu.h"
#include "cpu_ipc.cu.h"
#include "mallocfree.cu.h"
#include "fs_structures.cu.h"
#include "timer.h"
#include "fs_globals.cu.h"
#include "async_ipc.cu.h"
#include "fs_initializer.cu.h"
#include <roctracer/roctx.h>
/************GLOBALS********/
// CPU Write-shared memory //
__device__ volatile CPU_IPC_OPEN_Queue* g_cpu_ipcOpenQueue;
__device__ volatile CPU_IPC_RW_Queue* g_cpu_ipcRWQueue;
__device__ volatile CPU_IPC_RW_Flags* g_cpu_ipcRWFlags;
//
// manager for rw RPC queue
__device__ volatile GPU_IPC_RW_Manager* g_ipcRWManager;
// Memory pool
__device__ volatile PPool* g_ppool;
// File table with block pointers
__device__ volatile FTable* g_ftable;
// Hash table with all the previously opened files indexed by their inodes
//__device__ volatile hash_table g_closed_ftable;
// HashMap with mapping from <fd, offset> to pframes
__device__ volatile HashMap* g_hashMap;
// file_id uniq counter
__device__ int g_file_id;
// a ring buffer for write back
__device__ async_close_rb_t* g_async_close_rb;
__device__ volatile uchar* g_stagingArea[RW_HOST_WORKERS][RW_SCRATCH_PER_WORKER];
__global__ void init_fs(volatile CPU_IPC_OPEN_Queue* _ipcOpenQueue,
volatile CPU_IPC_RW_Queue* _ipcRWQueue,
volatile GPU_IPC_RW_Manager* _ipcRWManager,
volatile PPool* _ppool,
volatile Page* _rawStorage,
volatile FTable* _ftable,
volatile HashMap* _hashMap,
volatile void** _stagingArea)
{
g_cpu_ipcOpenQueue=_ipcOpenQueue;
g_cpu_ipcRWQueue=_ipcRWQueue;
g_ipcRWManager=_ipcRWManager;
g_ipcRWManager->init_thread();
g_ppool=_ppool;
g_ppool->init_thread(_rawStorage);
g_hashMap=_hashMap;
g_hashMap->init_thread();
g_ftable=_ftable;
g_file_id=0;
for( int i = 0; i < RW_HOST_WORKERS; ++i )
{
for( int j = 0; j < RW_SCRATCH_PER_WORKER; ++j )
{
g_stagingArea[i][j] = (volatile uchar*)_stagingArea[i * RW_SCRATCH_PER_WORKER + j];
}
}
INIT_ALL_STATS
INIT_TIMING_STATS
//INIT_DEBUG
}
typedef volatile GPUGlobals* GPUGlobals_ptr;
void initializer(GPUGlobals_ptr* globals)
{
CUDA_SAFE_CALL(hipSetDeviceFlags(hipDeviceMapHost));
*globals=new GPUGlobals();
//ssd_init( (*globals)->stagingArea, sizeof(uchar) * RW_HOST_WORKERS * RW_SCRATCH_PER_WORKER * FS_BLOCKSIZE * RW_SLOTS_PER_WORKER );
volatile void** temp;
CUDA_SAFE_CALL(hipMalloc(&temp,sizeof(void*) * RW_HOST_WORKERS * RW_SCRATCH_PER_WORKER));
for( int i = 0; i < RW_HOST_WORKERS; ++i )
{
for( int j = 0; j < RW_SCRATCH_PER_WORKER; ++j )
{
void* blockAddress = getStagingAreaOffset((*globals)->stagingArea, i, j);
CUDA_SAFE_CALL(
hipMemcpy(&(temp[i * RW_SCRATCH_PER_WORKER + j]), &blockAddress, sizeof(void*), hipMemcpyHostToDevice) );
}
}
// this must be done from a single thread!
hipLaunchKernelGGL(( init_fs), dim3(1),dim3(1), 0, 0, (*globals)->cpu_ipcOpenQueue,
(*globals)->cpu_ipcRWQueue,
(*globals)->ipcRWManager,
(*globals)->ppool,
(*globals)->rawStorage,
(*globals)->ftable,
(*globals)->hashMap,
temp);
hipDeviceSynchronize();
CUDA_SAFE_CALL(hipPeekAtLastError());
// pthread_attr_t attr;
// pthread_attr_init( &attr );
// pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
//
// (*globals)->done = 0;
//
// for( int i = 0; i < RW_HOST_WORKERS; ++i )
// {
// (*globals)->rwLoopTasksData[i].id = i;
// (*globals)->rwLoopTasksData[i].gpuGlobals = *globals;
// (*globals)->rwLoopTasksData[i].gpuid = 0;
//
// pthread_create( (pthread_t*)&((*globals)->rwLoopTasksIDs[i]), &attr, rw_task, (TaskData*)&((*globals)->rwLoopTasksData[i]) );
// }
//
// pthread_attr_destroy( &attr );
}
| 31c917f44505b0803d0299026dc15a9984175531.cu | /*
* This expermental software is provided AS IS.
* Feel free to use/modify/distribute,
* If used, please retain this disclaimer and cite
* "GPUfs: Integrating a file system with GPUs",
* M Silberstein,B Ford,I Keidar,E Witchel
* ASPLOS13, March 2013, Houston,USA
*/
/*
* This expermental software is provided AS IS.
* Feel free to use/modify/distribute,
* If used, please retain this disclaimer and cite
* "GPUfs: Integrating a file system with GPUs",
* M Silberstein,B Ford,I Keidar,E Witchel
* ASPLOS13, March 2013, Houston,USA
*/
//#ifndef MAIN_FS_FILE
//#error "This file must be included in the fs.cu"
//#endif
#include <stdio.h>
#include <pthread.h>
#include <unistd.h>
#include "fs_constants.h"
#include "fs_debug.cu.h"
#include "util.cu.h"
#include "cpu_ipc.cu.h"
#include "mallocfree.cu.h"
#include "fs_structures.cu.h"
#include "timer.h"
#include "fs_globals.cu.h"
#include "async_ipc.cu.h"
#include "fs_initializer.cu.h"
#include <nvToolsExt.h>
/************GLOBALS********/
// CPU Write-shared memory //
__device__ volatile CPU_IPC_OPEN_Queue* g_cpu_ipcOpenQueue;
__device__ volatile CPU_IPC_RW_Queue* g_cpu_ipcRWQueue;
__device__ volatile CPU_IPC_RW_Flags* g_cpu_ipcRWFlags;
//
// manager for rw RPC queue
__device__ volatile GPU_IPC_RW_Manager* g_ipcRWManager;
// Memory pool
__device__ volatile PPool* g_ppool;
// File table with block pointers
__device__ volatile FTable* g_ftable;
// Hash table with all the previously opened files indexed by their inodes
//__device__ volatile hash_table g_closed_ftable;
// HashMap with mapping from <fd, offset> to pframes
__device__ volatile HashMap* g_hashMap;
// file_id uniq counter
__device__ int g_file_id;
// a ring buffer for write back
__device__ async_close_rb_t* g_async_close_rb;
__device__ volatile uchar* g_stagingArea[RW_HOST_WORKERS][RW_SCRATCH_PER_WORKER];
__global__ void init_fs(volatile CPU_IPC_OPEN_Queue* _ipcOpenQueue,
volatile CPU_IPC_RW_Queue* _ipcRWQueue,
volatile GPU_IPC_RW_Manager* _ipcRWManager,
volatile PPool* _ppool,
volatile Page* _rawStorage,
volatile FTable* _ftable,
volatile HashMap* _hashMap,
volatile void** _stagingArea)
{
g_cpu_ipcOpenQueue=_ipcOpenQueue;
g_cpu_ipcRWQueue=_ipcRWQueue;
g_ipcRWManager=_ipcRWManager;
g_ipcRWManager->init_thread();
g_ppool=_ppool;
g_ppool->init_thread(_rawStorage);
g_hashMap=_hashMap;
g_hashMap->init_thread();
g_ftable=_ftable;
g_file_id=0;
for( int i = 0; i < RW_HOST_WORKERS; ++i )
{
for( int j = 0; j < RW_SCRATCH_PER_WORKER; ++j )
{
g_stagingArea[i][j] = (volatile uchar*)_stagingArea[i * RW_SCRATCH_PER_WORKER + j];
}
}
INIT_ALL_STATS
INIT_TIMING_STATS
//INIT_DEBUG
}
typedef volatile GPUGlobals* GPUGlobals_ptr;
void initializer(GPUGlobals_ptr* globals)
{
CUDA_SAFE_CALL(cudaSetDeviceFlags(cudaDeviceMapHost));
*globals=new GPUGlobals();
//ssd_init( (*globals)->stagingArea, sizeof(uchar) * RW_HOST_WORKERS * RW_SCRATCH_PER_WORKER * FS_BLOCKSIZE * RW_SLOTS_PER_WORKER );
volatile void** temp;
CUDA_SAFE_CALL(cudaMalloc(&temp,sizeof(void*) * RW_HOST_WORKERS * RW_SCRATCH_PER_WORKER));
for( int i = 0; i < RW_HOST_WORKERS; ++i )
{
for( int j = 0; j < RW_SCRATCH_PER_WORKER; ++j )
{
void* blockAddress = getStagingAreaOffset((*globals)->stagingArea, i, j);
CUDA_SAFE_CALL(
cudaMemcpy(&(temp[i * RW_SCRATCH_PER_WORKER + j]), &blockAddress, sizeof(void*), cudaMemcpyHostToDevice) );
}
}
// this must be done from a single thread!
init_fs<<<1,1>>>((*globals)->cpu_ipcOpenQueue,
(*globals)->cpu_ipcRWQueue,
(*globals)->ipcRWManager,
(*globals)->ppool,
(*globals)->rawStorage,
(*globals)->ftable,
(*globals)->hashMap,
temp);
cudaThreadSynchronize();
CUDA_SAFE_CALL(cudaPeekAtLastError());
// pthread_attr_t attr;
// pthread_attr_init( &attr );
// pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
//
// (*globals)->done = 0;
//
// for( int i = 0; i < RW_HOST_WORKERS; ++i )
// {
// (*globals)->rwLoopTasksData[i].id = i;
// (*globals)->rwLoopTasksData[i].gpuGlobals = *globals;
// (*globals)->rwLoopTasksData[i].gpuid = 0;
//
// pthread_create( (pthread_t*)&((*globals)->rwLoopTasksIDs[i]), &attr, rw_task, (TaskData*)&((*globals)->rwLoopTasksData[i]) );
// }
//
// pthread_attr_destroy( &attr );
}
|
e34239789e4b961a3f2f187637b9950afebae12e.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include "cudaGFlopTimer.cuh"
using namespace std;
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <rocblas.h>
float cuBLAS_MxMT(float *h_m, float *h_r, int d){
cudaGFlopTimer *tr = new cudaGFlopTimer();
float *d_m, *d_r;
hipMalloc((void **) &d_m, d*d*sizeof(float));
hipMalloc((void **) &d_r, d*d*sizeof(float));
hipMemcpy(d_m, h_m, d*d*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_r, h_r, d*d*sizeof(float), hipMemcpyHostToDevice);
hipblasHandle_t handle;
hipblasCreate(&handle);
float alpha = 1.0f, beta = 1.0f;
// calling cuda buid-in library to executing computation
tr->start();
hipblasSgemm(handle,
HIPBLAS_OP_T, HIPBLAS_OP_N,
d, d ,d,
&alpha,
d_m, d,
d_m, d,
&beta,
d_r, d);
//timer->stop();
hipMemcpy(h_r, d_r, d*d*sizeof(float), hipMemcpyDeviceToHost);
hipDeviceSynchronize();
hipFree(d_m);
hipFree(d_r);
float Gflops = 0; //= timer->getGFlops(d);
return Gflops;
}
int main(int argc, char *argv[]){
Hi *hi = new Hi();
cudaGFlopTimer *cgt = new cudaGFlopTimer();
cgt->start();
cgt->stop();
cgt->getElapsedTime();
cgt->getGFlops(100);
hi->x = 2;
delete hi;
delete cgt;
return 0;
}
| e34239789e4b961a3f2f187637b9950afebae12e.cu | #include <iostream>
#include "cudaGFlopTimer.cuh"
using namespace std;
#include <cuda.h>
#include <cuda_runtime.h>
#include <cublas_v2.h>
float cuBLAS_MxMT(float *h_m, float *h_r, int d){
cudaGFlopTimer *tr = new cudaGFlopTimer();
float *d_m, *d_r;
cudaMalloc((void **) &d_m, d*d*sizeof(float));
cudaMalloc((void **) &d_r, d*d*sizeof(float));
cudaMemcpy(d_m, h_m, d*d*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_r, h_r, d*d*sizeof(float), cudaMemcpyHostToDevice);
cublasHandle_t handle;
cublasCreate(&handle);
float alpha = 1.0f, beta = 1.0f;
// calling cuda buid-in library to executing computation
tr->start();
cublasSgemm_v2(handle,
CUBLAS_OP_T, CUBLAS_OP_N,
d, d ,d,
&alpha,
d_m, d,
d_m, d,
&beta,
d_r, d);
//timer->stop();
cudaMemcpy(h_r, d_r, d*d*sizeof(float), cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
cudaFree(d_m);
cudaFree(d_r);
float Gflops = 0; //= timer->getGFlops(d);
return Gflops;
}
int main(int argc, char *argv[]){
Hi *hi = new Hi();
cudaGFlopTimer *cgt = new cudaGFlopTimer();
cgt->start();
cgt->stop();
cgt->getElapsedTime();
cgt->getGFlops(100);
hi->x = 2;
delete hi;
delete cgt;
return 0;
}
|
35f4269d5430ce604c0f8c377313543789657992.hip | // !!! This is a file automatically generated by hipify!!!
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/* File: cuda_kmeans.cu (CUDA version) */
/* Description: Implementation of simple k-means clustering algorithm */
/* This program takes an array of N data objects, each with */
/* M coordinates and performs a k-means clustering given a */
/* user-provided value of the number of clusters (K). The */
/* clustering results are saved in 2 arrays: */
/* 1. a returned array of size [K][N] indicating the center */
/* coordinates of K clusters */
/* 2. membership[N] stores the cluster center ids, each */
/* corresponding to the cluster a data object is assigned */
/* */
/* Author: Wei-keng Liao */
/* ECE Department, Northwestern University */
/* email: [email protected] */
/* Copyright, 2005, Wei-keng Liao */
/* */
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
// Copyright (c) 2005 Wei-keng Liao
// Copyright (c) 2011 Serban Giuroiu
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
// -----------------------------------------------------------------------------
#include <stdio.h>
#include <stdlib.h>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "kmeans.h"
static inline int nextPowerOfTwo(int n) {
n--;
n = n >> 1 | n;
n = n >> 2 | n;
n = n >> 4 | n;
n = n >> 8 | n;
n = n >> 16 | n;
// n = n >> 32 | n; // For 64-bit ints
return ++n;
}
/*----< euclid_dist_2() >----------------------------------------------------*/
/* square of Euclid distance between two multi-dimensional points */
__host__ __device__ inline static
float euclid_dist_2(int numCoords,
int numObjs,
int numClusters,
float *objects, // [numCoords][numObjs]
float *clusters, // [numCoords][numClusters]
int objectId,
int clusterId)
{
int i;
float ans=0.0;
for (i = 0; i < numCoords; i++) {
ans += (objects[numObjs * i + objectId] - clusters[numClusters * i + clusterId]) *
(objects[numObjs * i + objectId] - clusters[numClusters * i + clusterId]);
}
return(ans);
}
/*----< find_nearest_cluster() >---------------------------------------------*/
__global__ static
void find_nearest_cluster(int numCoords,
int numObjs,
int numClusters,
float *objects, // [numCoords][numObjs]
float *deviceClusters, // [numCoords][numClusters]
int *membership, // [numObjs]
int *intermediates)
{
extern __shared__ char sharedMemory[];
// The type chosen for membershipChanged must be large enough to support
// reductions! There are blockDim.x elements, one for each thread in the
// block. See numThreadsPerClusterBlock in cuda_kmeans().
unsigned char *membershipChanged = (unsigned char *)sharedMemory;
#if BLOCK_SHARED_MEM_OPTIMIZATION
float *clusters = (float *)(sharedMemory + blockDim.x);
#else
float *clusters = deviceClusters;
#endif
membershipChanged[threadIdx.x] = 0;
#if BLOCK_SHARED_MEM_OPTIMIZATION
// BEWARE: We can overrun our shared memory here if there are too many
// clusters or too many coordinates! For reference, a Tesla C1060 has 16
// KiB of shared memory per block, and a GeForce GTX 480 has 48 KiB of
// shared memory per block.
for (int i = threadIdx.x; i < numClusters; i += blockDim.x) {
for (int j = 0; j < numCoords; j++) {
clusters[numClusters * j + i] = deviceClusters[numClusters * j + i];
}
}
__syncthreads();
#endif
int objectId = blockDim.x * blockIdx.x + threadIdx.x;
if (objectId < numObjs) {
int index, i;
float dist, min_dist;
/* find the cluster id that has min distance to object */
index = 0;
min_dist = euclid_dist_2(numCoords, numObjs, numClusters,
objects, clusters, objectId, 0);
for (i=1; i<numClusters; i++) {
dist = euclid_dist_2(numCoords, numObjs, numClusters,
objects, clusters, objectId, i);
/* no need square root */
if (dist < min_dist) { /* find the min and its array index */
min_dist = dist;
index = i;
}
}
if (membership[objectId] != index) {
membershipChanged[threadIdx.x] = 1;
}
/* assign the membership to object objectId */
membership[objectId] = index;
__syncthreads(); // For membershipChanged[]
// blockDim.x *must* be a power of two!
for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1) {
if (threadIdx.x < s) {
membershipChanged[threadIdx.x] +=
membershipChanged[threadIdx.x + s];
}
__syncthreads();
}
if (threadIdx.x == 0) {
intermediates[blockIdx.x] = membershipChanged[0];
}
}
}
__global__ static
void compute_delta(int *deviceIntermediates,
int numIntermediates, // The actual number of intermediates
int numIntermediates2) // The next power of two
{
// The number of elements in this array should be equal to
// numIntermediates2, the number of threads launched. It *must* be a power
// of two!
extern __shared__ unsigned int intermediates[];
// Copy global intermediate values into shared memory.
intermediates[threadIdx.x] =
(threadIdx.x < numIntermediates) ? deviceIntermediates[threadIdx.x] : 0;
__syncthreads();
// numIntermediates2 *must* be a power of two!
for (unsigned int s = numIntermediates2 / 2; s > 0; s >>= 1) {
if (threadIdx.x < s) {
intermediates[threadIdx.x] += intermediates[threadIdx.x + s];
}
__syncthreads();
}
if (threadIdx.x == 0) {
deviceIntermediates[0] = intermediates[0];
}
}
/*----< cuda_kmeans() >-------------------------------------------------------*/
//
// ----------------------------------------
// DATA LAYOUT
//
// objects [numObjs][numCoords]
// clusters [numClusters][numCoords]
// dimObjects [numCoords][numObjs]
// dimClusters [numCoords][numClusters]
// newClusters [numCoords][numClusters]
// deviceObjects [numCoords][numObjs]
// deviceClusters [numCoords][numClusters]
// ----------------------------------------
//
/* return an array of cluster centers of size [numClusters][numCoords] */
float** cuda_kmeans(float **objects, /* in: [numObjs][numCoords] */
int numCoords, /* no. features */
int numObjs, /* no. objects */
int numClusters, /* no. clusters */
float threshold, /* % objects change membership */
int *membership, /* out: [numObjs] */
int *loop_iterations)
{
hipSetDevice(0);
int i, j, index, loop=0;
int *newClusterSize; /* [numClusters]: no. objects assigned in each
new cluster */
float delta; /* % of objects change their clusters */
float **dimObjects;
float **clusters; /* out: [numClusters][numCoords] */
float **dimClusters;
float **newClusters; /* [numCoords][numClusters] */
float *deviceObjects;
float *deviceClusters;
int *deviceMembership;
int *deviceIntermediates;
// Copy objects given in [numObjs][numCoords] layout to new
// [numCoords][numObjs] layout
printf("before memory allocation\n");
malloc2D(dimObjects, numCoords, numObjs, float);
printf("after memory allocation\n");
for (i = 0; i < numCoords; i++) {
for (j = 0; j < numObjs; j++) {
dimObjects[i][j] = objects[j][i];
}
}
/* pick first numClusters elements of objects[] as initial cluster centers*/
malloc2D(dimClusters, numCoords, numClusters, float);
for (i = 0; i < numCoords; i++) {
for (j = 0; j < numClusters; j++) {
dimClusters[i][j] = dimObjects[i][j];
}
}
/* initialize membership[] */
for (i=0; i<numObjs; i++) membership[i] = -1;
/* need to initialize newClusterSize and newClusters[0] to all 0 */
newClusterSize = (int*) calloc(numClusters, sizeof(int));
assert(newClusterSize != NULL);
malloc2D(newClusters, numCoords, numClusters, float);
memset(newClusters[0], 0, numCoords * numClusters * sizeof(float));
// To support reduction, numThreadsPerClusterBlock *must* be a power of
// two, and it *must* be no larger than the number of bits that will
// fit into an unsigned char, the type used to keep track of membership
// changes in the kernel.
const unsigned int numThreadsPerClusterBlock = 128;
const unsigned int numClusterBlocks =
(numObjs + numThreadsPerClusterBlock - 1) / numThreadsPerClusterBlock;
#if BLOCK_SHARED_MEM_OPTIMIZATION
const unsigned int clusterBlockSharedDataSize =
numThreadsPerClusterBlock * sizeof(unsigned char) +
numClusters * numCoords * sizeof(float);
hipDeviceProp_t deviceProp;
int deviceNum;
hipGetDevice(&deviceNum);
hipGetDeviceProperties(&deviceProp, deviceNum);
if (clusterBlockSharedDataSize > deviceProp.sharedMemPerBlock) {
err("WARNING: Your CUDA hardware has insufficient block shared memory. "
"You need to recompile with BLOCK_SHARED_MEM_OPTIMIZATION=0. "
"See the README for details.\n");
}
#else
const unsigned int clusterBlockSharedDataSize =
numThreadsPerClusterBlock * sizeof(unsigned char);
#endif
const unsigned int numReductionThreads =
nextPowerOfTwo(numClusterBlocks);
const unsigned int reductionBlockSharedDataSize =
numReductionThreads * sizeof(unsigned int);
checkCuda(hipMalloc(&deviceObjects, numObjs*numCoords*sizeof(float)));
checkCuda(hipMalloc(&deviceClusters, numClusters*numCoords*sizeof(float)));
checkCuda(hipMalloc(&deviceMembership, numObjs*sizeof(int)));
checkCuda(hipMalloc(&deviceIntermediates, numReductionThreads*sizeof(unsigned int)));
checkCuda(hipMemcpy(deviceObjects, dimObjects[0],
numObjs*numCoords*sizeof(float), hipMemcpyHostToDevice));
checkCuda(hipMemcpy(deviceMembership, membership,
numObjs*sizeof(int), hipMemcpyHostToDevice));
do {
checkCuda(hipMemcpy(deviceClusters, dimClusters[0],
numClusters*numCoords*sizeof(float), hipMemcpyHostToDevice));
hipLaunchKernelGGL(( find_nearest_cluster)
, dim3(numClusterBlocks), dim3(numThreadsPerClusterBlock), clusterBlockSharedDataSize , 0,
numCoords, numObjs, numClusters,
deviceObjects, deviceClusters, deviceMembership, deviceIntermediates);
hipDeviceSynchronize(); checkLastCudaError();
hipLaunchKernelGGL(( compute_delta) , dim3(1), dim3(numReductionThreads), reductionBlockSharedDataSize , 0,
deviceIntermediates, numClusterBlocks, numReductionThreads);
hipDeviceSynchronize(); checkLastCudaError();
int d;
checkCuda(hipMemcpy(&d, deviceIntermediates,
sizeof(int), hipMemcpyDeviceToHost));
delta = (float)d;
checkCuda(hipMemcpy(membership, deviceMembership,
numObjs*sizeof(int), hipMemcpyDeviceToHost));
for (i=0; i<numObjs; i++) {
/* find the array index of nestest cluster center */
index = membership[i];
/* update new cluster centers : sum of objects located within */
newClusterSize[index]++;
for (j=0; j<numCoords; j++)
newClusters[j][index] += objects[i][j];
}
// TODO: Flip the nesting order
// TODO: Change layout of newClusters to [numClusters][numCoords]
/* average the sum and replace old cluster centers with newClusters */
for (i=0; i<numClusters; i++) {
for (j=0; j<numCoords; j++) {
if (newClusterSize[i] > 0)
dimClusters[j][i] = newClusters[j][i] / newClusterSize[i];
newClusters[j][i] = 0.0; /* set back to 0 */
}
newClusterSize[i] = 0; /* set back to 0 */
}
delta /= numObjs;
printf("%d\n",loop);
} while (delta > threshold && loop++ < 2000);
*loop_iterations = loop + 1;
/* allocate a 2D space for returning variable clusters[] (coordinates
of cluster centers) */
malloc2D(clusters, numClusters, numCoords, float);
for (i = 0; i < numClusters; i++) {
for (j = 0; j < numCoords; j++) {
clusters[i][j] = dimClusters[j][i];
}
}
checkCuda(hipFree(deviceObjects));
checkCuda(hipFree(deviceClusters));
checkCuda(hipFree(deviceMembership));
checkCuda(hipFree(deviceIntermediates));
free(dimObjects[0]);
free(dimObjects);
free(dimClusters[0]);
free(dimClusters);
free(newClusters[0]);
free(newClusters);
free(newClusterSize);
return clusters;
}
| 35f4269d5430ce604c0f8c377313543789657992.cu | /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/* File: cuda_kmeans.cu (CUDA version) */
/* Description: Implementation of simple k-means clustering algorithm */
/* This program takes an array of N data objects, each with */
/* M coordinates and performs a k-means clustering given a */
/* user-provided value of the number of clusters (K). The */
/* clustering results are saved in 2 arrays: */
/* 1. a returned array of size [K][N] indicating the center */
/* coordinates of K clusters */
/* 2. membership[N] stores the cluster center ids, each */
/* corresponding to the cluster a data object is assigned */
/* */
/* Author: Wei-keng Liao */
/* ECE Department, Northwestern University */
/* email: [email protected] */
/* Copyright, 2005, Wei-keng Liao */
/* */
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
// Copyright (c) 2005 Wei-keng Liao
// Copyright (c) 2011 Serban Giuroiu
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
// -----------------------------------------------------------------------------
#include <stdio.h>
#include <stdlib.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "kmeans.h"
static inline int nextPowerOfTwo(int n) {
n--;
n = n >> 1 | n;
n = n >> 2 | n;
n = n >> 4 | n;
n = n >> 8 | n;
n = n >> 16 | n;
// n = n >> 32 | n; // For 64-bit ints
return ++n;
}
/*----< euclid_dist_2() >----------------------------------------------------*/
/* square of Euclid distance between two multi-dimensional points */
__host__ __device__ inline static
float euclid_dist_2(int numCoords,
int numObjs,
int numClusters,
float *objects, // [numCoords][numObjs]
float *clusters, // [numCoords][numClusters]
int objectId,
int clusterId)
{
int i;
float ans=0.0;
for (i = 0; i < numCoords; i++) {
ans += (objects[numObjs * i + objectId] - clusters[numClusters * i + clusterId]) *
(objects[numObjs * i + objectId] - clusters[numClusters * i + clusterId]);
}
return(ans);
}
/*----< find_nearest_cluster() >---------------------------------------------*/
__global__ static
void find_nearest_cluster(int numCoords,
int numObjs,
int numClusters,
float *objects, // [numCoords][numObjs]
float *deviceClusters, // [numCoords][numClusters]
int *membership, // [numObjs]
int *intermediates)
{
extern __shared__ char sharedMemory[];
// The type chosen for membershipChanged must be large enough to support
// reductions! There are blockDim.x elements, one for each thread in the
// block. See numThreadsPerClusterBlock in cuda_kmeans().
unsigned char *membershipChanged = (unsigned char *)sharedMemory;
#if BLOCK_SHARED_MEM_OPTIMIZATION
float *clusters = (float *)(sharedMemory + blockDim.x);
#else
float *clusters = deviceClusters;
#endif
membershipChanged[threadIdx.x] = 0;
#if BLOCK_SHARED_MEM_OPTIMIZATION
// BEWARE: We can overrun our shared memory here if there are too many
// clusters or too many coordinates! For reference, a Tesla C1060 has 16
// KiB of shared memory per block, and a GeForce GTX 480 has 48 KiB of
// shared memory per block.
for (int i = threadIdx.x; i < numClusters; i += blockDim.x) {
for (int j = 0; j < numCoords; j++) {
clusters[numClusters * j + i] = deviceClusters[numClusters * j + i];
}
}
__syncthreads();
#endif
int objectId = blockDim.x * blockIdx.x + threadIdx.x;
if (objectId < numObjs) {
int index, i;
float dist, min_dist;
/* find the cluster id that has min distance to object */
index = 0;
min_dist = euclid_dist_2(numCoords, numObjs, numClusters,
objects, clusters, objectId, 0);
for (i=1; i<numClusters; i++) {
dist = euclid_dist_2(numCoords, numObjs, numClusters,
objects, clusters, objectId, i);
/* no need square root */
if (dist < min_dist) { /* find the min and its array index */
min_dist = dist;
index = i;
}
}
if (membership[objectId] != index) {
membershipChanged[threadIdx.x] = 1;
}
/* assign the membership to object objectId */
membership[objectId] = index;
__syncthreads(); // For membershipChanged[]
// blockDim.x *must* be a power of two!
for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1) {
if (threadIdx.x < s) {
membershipChanged[threadIdx.x] +=
membershipChanged[threadIdx.x + s];
}
__syncthreads();
}
if (threadIdx.x == 0) {
intermediates[blockIdx.x] = membershipChanged[0];
}
}
}
__global__ static
void compute_delta(int *deviceIntermediates,
int numIntermediates, // The actual number of intermediates
int numIntermediates2) // The next power of two
{
// The number of elements in this array should be equal to
// numIntermediates2, the number of threads launched. It *must* be a power
// of two!
extern __shared__ unsigned int intermediates[];
// Copy global intermediate values into shared memory.
intermediates[threadIdx.x] =
(threadIdx.x < numIntermediates) ? deviceIntermediates[threadIdx.x] : 0;
__syncthreads();
// numIntermediates2 *must* be a power of two!
for (unsigned int s = numIntermediates2 / 2; s > 0; s >>= 1) {
if (threadIdx.x < s) {
intermediates[threadIdx.x] += intermediates[threadIdx.x + s];
}
__syncthreads();
}
if (threadIdx.x == 0) {
deviceIntermediates[0] = intermediates[0];
}
}
/*----< cuda_kmeans() >-------------------------------------------------------*/
//
// ----------------------------------------
// DATA LAYOUT
//
// objects [numObjs][numCoords]
// clusters [numClusters][numCoords]
// dimObjects [numCoords][numObjs]
// dimClusters [numCoords][numClusters]
// newClusters [numCoords][numClusters]
// deviceObjects [numCoords][numObjs]
// deviceClusters [numCoords][numClusters]
// ----------------------------------------
//
/* return an array of cluster centers of size [numClusters][numCoords] */
float** cuda_kmeans(float **objects, /* in: [numObjs][numCoords] */
int numCoords, /* no. features */
int numObjs, /* no. objects */
int numClusters, /* no. clusters */
float threshold, /* % objects change membership */
int *membership, /* out: [numObjs] */
int *loop_iterations)
{
cudaSetDevice(0);
int i, j, index, loop=0;
int *newClusterSize; /* [numClusters]: no. objects assigned in each
new cluster */
float delta; /* % of objects change their clusters */
float **dimObjects;
float **clusters; /* out: [numClusters][numCoords] */
float **dimClusters;
float **newClusters; /* [numCoords][numClusters] */
float *deviceObjects;
float *deviceClusters;
int *deviceMembership;
int *deviceIntermediates;
// Copy objects given in [numObjs][numCoords] layout to new
// [numCoords][numObjs] layout
printf("before memory allocation\n");
malloc2D(dimObjects, numCoords, numObjs, float);
printf("after memory allocation\n");
for (i = 0; i < numCoords; i++) {
for (j = 0; j < numObjs; j++) {
dimObjects[i][j] = objects[j][i];
}
}
/* pick first numClusters elements of objects[] as initial cluster centers*/
malloc2D(dimClusters, numCoords, numClusters, float);
for (i = 0; i < numCoords; i++) {
for (j = 0; j < numClusters; j++) {
dimClusters[i][j] = dimObjects[i][j];
}
}
/* initialize membership[] */
for (i=0; i<numObjs; i++) membership[i] = -1;
/* need to initialize newClusterSize and newClusters[0] to all 0 */
newClusterSize = (int*) calloc(numClusters, sizeof(int));
assert(newClusterSize != NULL);
malloc2D(newClusters, numCoords, numClusters, float);
memset(newClusters[0], 0, numCoords * numClusters * sizeof(float));
// To support reduction, numThreadsPerClusterBlock *must* be a power of
// two, and it *must* be no larger than the number of bits that will
// fit into an unsigned char, the type used to keep track of membership
// changes in the kernel.
const unsigned int numThreadsPerClusterBlock = 128;
const unsigned int numClusterBlocks =
(numObjs + numThreadsPerClusterBlock - 1) / numThreadsPerClusterBlock;
#if BLOCK_SHARED_MEM_OPTIMIZATION
const unsigned int clusterBlockSharedDataSize =
numThreadsPerClusterBlock * sizeof(unsigned char) +
numClusters * numCoords * sizeof(float);
cudaDeviceProp deviceProp;
int deviceNum;
cudaGetDevice(&deviceNum);
cudaGetDeviceProperties(&deviceProp, deviceNum);
if (clusterBlockSharedDataSize > deviceProp.sharedMemPerBlock) {
err("WARNING: Your CUDA hardware has insufficient block shared memory. "
"You need to recompile with BLOCK_SHARED_MEM_OPTIMIZATION=0. "
"See the README for details.\n");
}
#else
const unsigned int clusterBlockSharedDataSize =
numThreadsPerClusterBlock * sizeof(unsigned char);
#endif
const unsigned int numReductionThreads =
nextPowerOfTwo(numClusterBlocks);
const unsigned int reductionBlockSharedDataSize =
numReductionThreads * sizeof(unsigned int);
checkCuda(cudaMalloc(&deviceObjects, numObjs*numCoords*sizeof(float)));
checkCuda(cudaMalloc(&deviceClusters, numClusters*numCoords*sizeof(float)));
checkCuda(cudaMalloc(&deviceMembership, numObjs*sizeof(int)));
checkCuda(cudaMalloc(&deviceIntermediates, numReductionThreads*sizeof(unsigned int)));
checkCuda(cudaMemcpy(deviceObjects, dimObjects[0],
numObjs*numCoords*sizeof(float), cudaMemcpyHostToDevice));
checkCuda(cudaMemcpy(deviceMembership, membership,
numObjs*sizeof(int), cudaMemcpyHostToDevice));
do {
checkCuda(cudaMemcpy(deviceClusters, dimClusters[0],
numClusters*numCoords*sizeof(float), cudaMemcpyHostToDevice));
find_nearest_cluster
<<< numClusterBlocks, numThreadsPerClusterBlock, clusterBlockSharedDataSize >>>
(numCoords, numObjs, numClusters,
deviceObjects, deviceClusters, deviceMembership, deviceIntermediates);
cudaDeviceSynchronize(); checkLastCudaError();
compute_delta <<< 1, numReductionThreads, reductionBlockSharedDataSize >>>
(deviceIntermediates, numClusterBlocks, numReductionThreads);
cudaDeviceSynchronize(); checkLastCudaError();
int d;
checkCuda(cudaMemcpy(&d, deviceIntermediates,
sizeof(int), cudaMemcpyDeviceToHost));
delta = (float)d;
checkCuda(cudaMemcpy(membership, deviceMembership,
numObjs*sizeof(int), cudaMemcpyDeviceToHost));
for (i=0; i<numObjs; i++) {
/* find the array index of nestest cluster center */
index = membership[i];
/* update new cluster centers : sum of objects located within */
newClusterSize[index]++;
for (j=0; j<numCoords; j++)
newClusters[j][index] += objects[i][j];
}
// TODO: Flip the nesting order
// TODO: Change layout of newClusters to [numClusters][numCoords]
/* average the sum and replace old cluster centers with newClusters */
for (i=0; i<numClusters; i++) {
for (j=0; j<numCoords; j++) {
if (newClusterSize[i] > 0)
dimClusters[j][i] = newClusters[j][i] / newClusterSize[i];
newClusters[j][i] = 0.0; /* set back to 0 */
}
newClusterSize[i] = 0; /* set back to 0 */
}
delta /= numObjs;
printf("%d\n",loop);
} while (delta > threshold && loop++ < 2000);
*loop_iterations = loop + 1;
/* allocate a 2D space for returning variable clusters[] (coordinates
of cluster centers) */
malloc2D(clusters, numClusters, numCoords, float);
for (i = 0; i < numClusters; i++) {
for (j = 0; j < numCoords; j++) {
clusters[i][j] = dimClusters[j][i];
}
}
checkCuda(cudaFree(deviceObjects));
checkCuda(cudaFree(deviceClusters));
checkCuda(cudaFree(deviceMembership));
checkCuda(cudaFree(deviceIntermediates));
free(dimObjects[0]);
free(dimObjects);
free(dimClusters[0]);
free(dimClusters);
free(newClusters[0]);
free(newClusters);
free(newClusterSize);
return clusters;
}
|
b2848fdf068e814dcb013eb54dc32ba9b8c130fb.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include <stdint.h>
#include <sys/time.h>
#ifndef BLOCK_SIZE
#define BLOCK_SIZE 128
#endif
#ifndef BLOCK_COUNT
#define BLOCK_COUNT 128
#endif
#define CENTER_X -0.75
#define CENTER_Y 0.0
#define ZOOM (float(height) / 2.5)
__global__ void mandelbrot(unsigned* dim, float* output, unsigned iterations) {
unsigned width = dim[0];
unsigned height = dim[1];
unsigned tid = blockDim.x * blockIdx.x + threadIdx.x;
for(; tid < width * height; tid += blockDim.x * gridDim.x) {
float x = tid % width;
float y = tid / width;
x -= width / 2.0;
y -= height / 2.0;
x /= ZOOM;
y /= ZOOM;
x += CENTER_X;
y += CENTER_Y;
float a = 0.0, b = 0.0;
for(unsigned i = 0; i < iterations; i++) {
float tmp_a = a * a - b * b + x;
b = 2.0 * a * b + y;
a = tmp_a;
}
output[tid] = a * a + b * b;
}
}
int main(int argc, char* argv[]) {
assert(argc == 4);
unsigned WIDTH, HEIGHT, ITERATIONS;
WIDTH = atoi(argv[1]);
HEIGHT = atoi(argv[2]);
ITERATIONS = atoi(argv[3]);
unsigned* host_dim;
float* host_output;
unsigned* device_dim;
float* device_output;
struct timespec start, end;
clock_gettime(CLOCK_MONOTONIC_RAW, &start);
host_dim = (unsigned*)malloc(2 * sizeof(unsigned));
assert(host_dim);
host_output = (float*)malloc(WIDTH * HEIGHT * sizeof(float));
assert(host_output);
hipMalloc(&device_dim, 2 * sizeof(unsigned));
hipMalloc(&device_output, WIDTH * HEIGHT * sizeof(float));
host_dim[0] = WIDTH;
host_dim[1] = HEIGHT;
hipMemcpy(device_dim, host_dim, 2 * sizeof(unsigned), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( mandelbrot), dim3(BLOCK_COUNT), dim3(BLOCK_SIZE), 0, 0, device_dim, device_output, ITERATIONS);
hipMemcpy(host_output, device_output, WIDTH * HEIGHT * sizeof(float), hipMemcpyDeviceToHost);
hipFree(device_output);
hipFree(device_dim);
clock_gettime(CLOCK_MONOTONIC_RAW, &end);
FILE* output = fopen("out.ppm", "w");
fprintf(output, "P2\n%u %u\n255\n", host_dim[0], host_dim[1]);
for(unsigned i = 0; i < WIDTH * HEIGHT; i++) {
fprintf(output, "%d\n", (host_output[i] <= 2.0) ? (0) : (255));
}
fclose(output);
free(host_dim);
free(host_output);
uint64_t delta_us = (end.tv_sec - start.tv_sec) * 1000000 + (end.tv_nsec - start.tv_nsec) / 1000;
double delta = double(delta_us) / 1e6;
printf("%.9lf\n", delta);
return 0;
}
| b2848fdf068e814dcb013eb54dc32ba9b8c130fb.cu | #include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include <stdint.h>
#include <sys/time.h>
#ifndef BLOCK_SIZE
#define BLOCK_SIZE 128
#endif
#ifndef BLOCK_COUNT
#define BLOCK_COUNT 128
#endif
#define CENTER_X -0.75
#define CENTER_Y 0.0
#define ZOOM (float(height) / 2.5)
__global__ void mandelbrot(unsigned* dim, float* output, unsigned iterations) {
unsigned width = dim[0];
unsigned height = dim[1];
unsigned tid = blockDim.x * blockIdx.x + threadIdx.x;
for(; tid < width * height; tid += blockDim.x * gridDim.x) {
float x = tid % width;
float y = tid / width;
x -= width / 2.0;
y -= height / 2.0;
x /= ZOOM;
y /= ZOOM;
x += CENTER_X;
y += CENTER_Y;
float a = 0.0, b = 0.0;
for(unsigned i = 0; i < iterations; i++) {
float tmp_a = a * a - b * b + x;
b = 2.0 * a * b + y;
a = tmp_a;
}
output[tid] = a * a + b * b;
}
}
int main(int argc, char* argv[]) {
assert(argc == 4);
unsigned WIDTH, HEIGHT, ITERATIONS;
WIDTH = atoi(argv[1]);
HEIGHT = atoi(argv[2]);
ITERATIONS = atoi(argv[3]);
unsigned* host_dim;
float* host_output;
unsigned* device_dim;
float* device_output;
struct timespec start, end;
clock_gettime(CLOCK_MONOTONIC_RAW, &start);
host_dim = (unsigned*)malloc(2 * sizeof(unsigned));
assert(host_dim);
host_output = (float*)malloc(WIDTH * HEIGHT * sizeof(float));
assert(host_output);
cudaMalloc(&device_dim, 2 * sizeof(unsigned));
cudaMalloc(&device_output, WIDTH * HEIGHT * sizeof(float));
host_dim[0] = WIDTH;
host_dim[1] = HEIGHT;
cudaMemcpy(device_dim, host_dim, 2 * sizeof(unsigned), cudaMemcpyHostToDevice);
mandelbrot<<<BLOCK_COUNT, BLOCK_SIZE>>>(device_dim, device_output, ITERATIONS);
cudaMemcpy(host_output, device_output, WIDTH * HEIGHT * sizeof(float), cudaMemcpyDeviceToHost);
cudaFree(device_output);
cudaFree(device_dim);
clock_gettime(CLOCK_MONOTONIC_RAW, &end);
FILE* output = fopen("out.ppm", "w");
fprintf(output, "P2\n%u %u\n255\n", host_dim[0], host_dim[1]);
for(unsigned i = 0; i < WIDTH * HEIGHT; i++) {
fprintf(output, "%d\n", (host_output[i] <= 2.0) ? (0) : (255));
}
fclose(output);
free(host_dim);
free(host_output);
uint64_t delta_us = (end.tv_sec - start.tv_sec) * 1000000 + (end.tv_nsec - start.tv_nsec) / 1000;
double delta = double(delta_us) / 1e6;
printf("%.9lf\n", delta);
return 0;
}
|
d45b194649833d45df7e756cc1cae6559c1c2af9.hip | // !!! This is a file automatically generated by hipify!!!
#include<stdlib.h>
#include<stdio.h>
#include<string.h>
#include<hip/hip_runtime_api.h>
#include<cusparse_v2.h>
#include<rocblas.h>
#include"CG.h"
int gpu_solver(const struct N n, const REAL *b, const REAL* x, REAL resreduction){
unsigned int maxiter=20;
unsigned int k;
unsigned long n_lin=n.x*n.y*n.z;
REAL* r;
REAL* z;
REAL* p;
REAL* q;
REAL* tq;
const REAL negone=-1.0;
N *dev_n,*l_n;
hipHostMalloc(&r, n_lin*sizeof(REAL));
hipHostMalloc(&z, n_lin*sizeof(REAL));
hipHostMalloc(&p, n_lin*sizeof(REAL));
hipHostMalloc(&q, n_lin*sizeof(REAL));
hipHostMalloc(&l_n, n_lin*sizeof(N));
hipHostMalloc(&tq,n_lin*sizeof(REAL));//for test qpply function
REAL alpha, beta, temp;
REAL rnorm, rnorm0, rnorm_old,rz, rznew;
//GPU Memory Allocation
REAL *dev_x,*dev_b,*dev_r,*dev_z,*dev_p,*dev_q;
hipMalloc((void**)&dev_x,n_lin*sizeof(REAL));
hipMalloc((void**)&dev_b,n_lin*sizeof(REAL));
hipMalloc((void**)&dev_r,n_lin*sizeof(REAL));
hipMalloc((void**)&dev_z,n_lin*sizeof(REAL));
hipMalloc((void**)&dev_p,n_lin*sizeof(REAL));
hipMalloc((void**)&dev_q,n_lin*sizeof(REAL));
hipMalloc((void**)&dev_n,sizeof(N));
//Memory copy
l_n->x=n.x;
l_n->y=n.y;
l_n->z=n.z;
hipMemcpy(dev_n,l_n,sizeof(N),hipMemcpyHostToDevice);
hipMemcpy(dev_x,x,n_lin*sizeof(REAL),hipMemcpyHostToDevice);
hipMemcpy(dev_b,b,n_lin*sizeof(REAL),hipMemcpyHostToDevice);
//Initialise CUBLAS
hipblasHandle_t cublasHandle=0;
hipblasCreate(&cublasHandle);
/*Initilise CG solver (Iteration 0)*/
hipblasScopy(cublasHandle,n_lin,dev_b,1,dev_r,1);
gpu_apply(l_n,dev_x,dev_q);
hipblasSaxpy(cublasHandle,n_lin,&negone,dev_q,1,dev_r,1);//r_0=b_0-Ax_0
if(use_prec){
// gpu_bj(l_n,dev_r,dev_z);
}
else
hipblasScopy(cublasHandle,n_lin,dev_r,1,dev_z,1);
hipblasScopy(cublasHandle,n_lin,dev_z,1,dev_p,1);//r_0->p_0
hipblasSdot(cublasHandle,n_lin,dev_r,1,dev_z,1,&rz);
hipblasSnrm2(cublasHandle,n_lin,dev_r,1,&rnorm0);
rnorm_old=rnorm0;
printf("CG initial residual %8.4e\n",rnorm0);
/*
*CG Iteration
*/
for(k=1;k<2;k++){
gpu_apply(l_n,dev_p,dev_q);
///////////////////////////////////////////////////////////////////////////
hipMemcpy(p,dev_p,n_lin*sizeof(REAL),hipMemcpyDeviceToHost);
hipMemcpy(q,dev_q,n_lin*sizeof(REAL),hipMemcpyDeviceToHost);
apply(n,p,tq);
int err=0;
int j;
for(j=0;j<n_lin;j++){
if(tq[j]!=q[j]){
err++;
printf("%f---%f\n",tq[j],q[j]);
}
}
printf("apply(%d) error=%d\n",k,err);
/////////////////////////////////////////////////////////////////////////
hipblasSdot(cublasHandle,n_lin,dev_p,1,dev_q,1,&temp);
alpha=rz/temp;
float negalpha=0-alpha;
hipblasSaxpy(cublasHandle,n_lin,&alpha,dev_p,1,dev_x,1);
hipblasSaxpy(cublasHandle,n_lin,&negalpha,dev_q,1,dev_r,1);
hipblasSnrm2(cublasHandle,n_lin,dev_r,1,&rnorm);
// printf("iteration %d||r||=%8.3e rho_r=%6.3f, beta=%f, alpha=%f\n",k,rnorm,rnorm/rnorm_old,beta,alpha);
if(rnorm/rnorm0<resreduction) break;
if(use_prec){
//gpu_bj(l_n,dev_r,dev_z);
}
else
hipblasScopy(cublasHandle,n_lin,dev_r,1,dev_z,1);
hipblasSdot(cublasHandle,n_lin,dev_r,1,dev_z,1,&rznew);
beta=rznew/rz;
hipblasSaxpy(cublasHandle,n_lin,&beta,dev_z,1,dev_p,1);
rz=rznew;
rnorm_old=rnorm;
}
hipblasDestroy(cublasHandle);
hipFree(dev_r);
hipFree(dev_z);
hipFree(dev_p);
hipFree(dev_q);
return 0;
}
| d45b194649833d45df7e756cc1cae6559c1c2af9.cu | #include<stdlib.h>
#include<stdio.h>
#include<string.h>
#include<cuda_runtime_api.h>
#include<cusparse_v2.h>
#include<cublas_v2.h>
#include"CG.h"
int gpu_solver(const struct N n, const REAL *b, const REAL* x, REAL resreduction){
unsigned int maxiter=20;
unsigned int k;
unsigned long n_lin=n.x*n.y*n.z;
REAL* r;
REAL* z;
REAL* p;
REAL* q;
REAL* tq;
const REAL negone=-1.0;
N *dev_n,*l_n;
cudaMallocHost(&r, n_lin*sizeof(REAL));
cudaMallocHost(&z, n_lin*sizeof(REAL));
cudaMallocHost(&p, n_lin*sizeof(REAL));
cudaMallocHost(&q, n_lin*sizeof(REAL));
cudaMallocHost(&l_n, n_lin*sizeof(N));
cudaMallocHost(&tq,n_lin*sizeof(REAL));//for test qpply function
REAL alpha, beta, temp;
REAL rnorm, rnorm0, rnorm_old,rz, rznew;
//GPU Memory Allocation
REAL *dev_x,*dev_b,*dev_r,*dev_z,*dev_p,*dev_q;
cudaMalloc((void**)&dev_x,n_lin*sizeof(REAL));
cudaMalloc((void**)&dev_b,n_lin*sizeof(REAL));
cudaMalloc((void**)&dev_r,n_lin*sizeof(REAL));
cudaMalloc((void**)&dev_z,n_lin*sizeof(REAL));
cudaMalloc((void**)&dev_p,n_lin*sizeof(REAL));
cudaMalloc((void**)&dev_q,n_lin*sizeof(REAL));
cudaMalloc((void**)&dev_n,sizeof(N));
//Memory copy
l_n->x=n.x;
l_n->y=n.y;
l_n->z=n.z;
cudaMemcpy(dev_n,l_n,sizeof(N),cudaMemcpyHostToDevice);
cudaMemcpy(dev_x,x,n_lin*sizeof(REAL),cudaMemcpyHostToDevice);
cudaMemcpy(dev_b,b,n_lin*sizeof(REAL),cudaMemcpyHostToDevice);
//Initialise CUBLAS
cublasHandle_t cublasHandle=0;
cublasCreate(&cublasHandle);
/*Initilise CG solver (Iteration 0)*/
cublasScopy(cublasHandle,n_lin,dev_b,1,dev_r,1);
gpu_apply(l_n,dev_x,dev_q);
cublasSaxpy(cublasHandle,n_lin,&negone,dev_q,1,dev_r,1);//r_0=b_0-Ax_0
if(use_prec){
// gpu_bj(l_n,dev_r,dev_z);
}
else
cublasScopy(cublasHandle,n_lin,dev_r,1,dev_z,1);
cublasScopy(cublasHandle,n_lin,dev_z,1,dev_p,1);//r_0->p_0
cublasSdot(cublasHandle,n_lin,dev_r,1,dev_z,1,&rz);
cublasSnrm2(cublasHandle,n_lin,dev_r,1,&rnorm0);
rnorm_old=rnorm0;
printf("CG initial residual %8.4e\n",rnorm0);
/*
*CG Iteration
*/
for(k=1;k<2;k++){
gpu_apply(l_n,dev_p,dev_q);
///////////////////////////////////////////////////////////////////////////
cudaMemcpy(p,dev_p,n_lin*sizeof(REAL),cudaMemcpyDeviceToHost);
cudaMemcpy(q,dev_q,n_lin*sizeof(REAL),cudaMemcpyDeviceToHost);
apply(n,p,tq);
int err=0;
int j;
for(j=0;j<n_lin;j++){
if(tq[j]!=q[j]){
err++;
printf("%f---%f\n",tq[j],q[j]);
}
}
printf("apply(%d) error=%d\n",k,err);
/////////////////////////////////////////////////////////////////////////
cublasSdot(cublasHandle,n_lin,dev_p,1,dev_q,1,&temp);
alpha=rz/temp;
float negalpha=0-alpha;
cublasSaxpy(cublasHandle,n_lin,&alpha,dev_p,1,dev_x,1);
cublasSaxpy(cublasHandle,n_lin,&negalpha,dev_q,1,dev_r,1);
cublasSnrm2(cublasHandle,n_lin,dev_r,1,&rnorm);
// printf("iteration %d||r||=%8.3e rho_r=%6.3f, beta=%f, alpha=%f\n",k,rnorm,rnorm/rnorm_old,beta,alpha);
if(rnorm/rnorm0<resreduction) break;
if(use_prec){
//gpu_bj(l_n,dev_r,dev_z);
}
else
cublasScopy(cublasHandle,n_lin,dev_r,1,dev_z,1);
cublasSdot(cublasHandle,n_lin,dev_r,1,dev_z,1,&rznew);
beta=rznew/rz;
cublasSaxpy(cublasHandle,n_lin,&beta,dev_z,1,dev_p,1);
rz=rznew;
rnorm_old=rnorm;
}
cublasDestroy(cublasHandle);
cudaFree(dev_r);
cudaFree(dev_z);
cudaFree(dev_p);
cudaFree(dev_q);
return 0;
}
|
6baeb841651d622f66a437a41f0320e8fb3260de.hip | // !!! This is a file automatically generated by hipify!!!
#if defined(__HIPCC__)
#ifndef BOOST_NOINLINE
# define BOOST_NOINLINE __attribute__ ((noinline))
#endif //BOOST_NOINLINE
#endif //__HIPCC__
#include <cugip/advanced_operations/graph_cut.hpp>
#include <vector>
void
test_graph_cut()
{
using namespace cugip;
cugip::Graph<float> graph;
graph.set_vertex_count(16);
//std::vector<int>
int nlinksVertices1[24] = {
0, 1, 2, 4, 5, 6, 8, 9, 10, 12, 13, 14,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11
};
//std::vector<int>
int nlinksVertices2[24] = {
1, 2, 3, 5, 6, 7, 9, 10, 11, 13, 14, 15,
4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15
};
EdgeRecord edges[24];
for (int i = 0; i < 24; ++i) {
edges[i] = EdgeRecord(nlinksVertices1[i], nlinksVertices2[i]);
}
//std::vector<float>
float nlinksWeights[24] = {
5.0f, 5.0f, 5.0f, 5.0f, 5.0f, 5.0f, 5.0f, 5.0f, 5.0f, 5.0f, 5.0f, 5.0f,
5.0f, 5.0f, 5.0f, 5.0f, 1.0f, 1.0f, 1.0f, 1.0f, 5.0f, 5.0f, 5.0f, 5.0f
};
float tlinksSource[16] = {
100.0f, 100.0f, 100.0f, 100.0f,
0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f
};
float tlinksSink[16] = {
0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
100.0f, 100.0f, 100.0f, 100.0f
};
graph.set_nweights(
24,
/*nlinksVertices1,
nlinksVertices2,*/
edges,
nlinksWeights,
nlinksWeights);
graph.set_tweights(
tlinksSource,
tlinksSink
);
float flow = graph.max_flow();
CUGIP_DPRINT("Max flow = " << flow);
}
| 6baeb841651d622f66a437a41f0320e8fb3260de.cu | #if defined(__CUDACC__)
#ifndef BOOST_NOINLINE
# define BOOST_NOINLINE __attribute__ ((noinline))
#endif //BOOST_NOINLINE
#endif //__CUDACC__
#include <cugip/advanced_operations/graph_cut.hpp>
#include <vector>
void
test_graph_cut()
{
using namespace cugip;
cugip::Graph<float> graph;
graph.set_vertex_count(16);
//std::vector<int>
int nlinksVertices1[24] = {
0, 1, 2, 4, 5, 6, 8, 9, 10, 12, 13, 14,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11
};
//std::vector<int>
int nlinksVertices2[24] = {
1, 2, 3, 5, 6, 7, 9, 10, 11, 13, 14, 15,
4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15
};
EdgeRecord edges[24];
for (int i = 0; i < 24; ++i) {
edges[i] = EdgeRecord(nlinksVertices1[i], nlinksVertices2[i]);
}
//std::vector<float>
float nlinksWeights[24] = {
5.0f, 5.0f, 5.0f, 5.0f, 5.0f, 5.0f, 5.0f, 5.0f, 5.0f, 5.0f, 5.0f, 5.0f,
5.0f, 5.0f, 5.0f, 5.0f, 1.0f, 1.0f, 1.0f, 1.0f, 5.0f, 5.0f, 5.0f, 5.0f
};
float tlinksSource[16] = {
100.0f, 100.0f, 100.0f, 100.0f,
0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f
};
float tlinksSink[16] = {
0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
100.0f, 100.0f, 100.0f, 100.0f
};
graph.set_nweights(
24,
/*nlinksVertices1,
nlinksVertices2,*/
edges,
nlinksWeights,
nlinksWeights);
graph.set_tweights(
tlinksSource,
tlinksSink
);
float flow = graph.max_flow();
CUGIP_DPRINT("Max flow = " << flow);
}
|
3051b89e9c8c6b7c31e34d06bf25e2f1092b18c5.hip | // !!! This is a file automatically generated by hipify!!!
//pass
//--blockDim=32 --gridDim=1
#include <hip/hip_runtime.h>
__global__ void test_Prog(int *A, int N) {
const int tid = threadIdx.x;
int tmp=A[tid+1];
tmp=tmp+11;
A[tid]+=tmp;
} | 3051b89e9c8c6b7c31e34d06bf25e2f1092b18c5.cu | //pass
//--blockDim=32 --gridDim=1
#include <cuda.h>
__global__ void test_Prog(int *A, int N) {
const int tid = threadIdx.x;
int tmp=A[tid+1];
tmp=tmp+11;
A[tid]+=tmp;
} |
2a173883abadb636214229bb566ade859a2021aa.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "jacobi_init.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const int x_inner = 1;
const int y_inner = 1;
const int halo_depth = 1;
const double *density = NULL;
hipMalloc(&density, XSIZE*YSIZE);
const double *energy = NULL;
hipMalloc(&energy, XSIZE*YSIZE);
const double rx = 1;
const double ry = 1;
double *kx = NULL;
hipMalloc(&kx, XSIZE*YSIZE);
double *ky = NULL;
hipMalloc(&ky, XSIZE*YSIZE);
double *u0 = NULL;
hipMalloc(&u0, XSIZE*YSIZE);
double *u = NULL;
hipMalloc(&u, XSIZE*YSIZE);
const int coefficient = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
jacobi_init), dim3(gridBlock),dim3(threadBlock), 0, 0, x_inner,y_inner,halo_depth,density,energy,rx,ry,kx,ky,u0,u,coefficient);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
jacobi_init), dim3(gridBlock),dim3(threadBlock), 0, 0, x_inner,y_inner,halo_depth,density,energy,rx,ry,kx,ky,u0,u,coefficient);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
jacobi_init), dim3(gridBlock),dim3(threadBlock), 0, 0, x_inner,y_inner,halo_depth,density,energy,rx,ry,kx,ky,u0,u,coefficient);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 2a173883abadb636214229bb566ade859a2021aa.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "jacobi_init.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const int x_inner = 1;
const int y_inner = 1;
const int halo_depth = 1;
const double *density = NULL;
cudaMalloc(&density, XSIZE*YSIZE);
const double *energy = NULL;
cudaMalloc(&energy, XSIZE*YSIZE);
const double rx = 1;
const double ry = 1;
double *kx = NULL;
cudaMalloc(&kx, XSIZE*YSIZE);
double *ky = NULL;
cudaMalloc(&ky, XSIZE*YSIZE);
double *u0 = NULL;
cudaMalloc(&u0, XSIZE*YSIZE);
double *u = NULL;
cudaMalloc(&u, XSIZE*YSIZE);
const int coefficient = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
jacobi_init<<<gridBlock,threadBlock>>>(x_inner,y_inner,halo_depth,density,energy,rx,ry,kx,ky,u0,u,coefficient);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
jacobi_init<<<gridBlock,threadBlock>>>(x_inner,y_inner,halo_depth,density,energy,rx,ry,kx,ky,u0,u,coefficient);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
jacobi_init<<<gridBlock,threadBlock>>>(x_inner,y_inner,halo_depth,density,energy,rx,ry,kx,ky,u0,u,coefficient);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
ffd0db8fdc3775aeabfe941ee17855a72b4d4d6a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <vector>
#include "caffe/layers/smooth_l1_loss_layer.hpp"
namespace caffe {
template <typename Dtype>
__global__ void SmoothL1Forward(const int n, const Dtype* in, Dtype* out,
Dtype sigma2) {
// f(x) = 0.5 * (sigma * x)^2 if |x| < 1 / sigma / sigma
// |x| - 0.5 / sigma / sigma otherwise
CUDA_KERNEL_LOOP(index, n) {
Dtype val = in[index];
Dtype abs_val = abs(val);
if (abs_val < 1.0 / sigma2) {
out[index] = 0.5 * val * val * sigma2;
} else {
out[index] = abs_val - 0.5 / sigma2;
}
}
}
template <typename Dtype>
void SmoothL1LossLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
int count = bottom[0]->count();
caffe_gpu_sub(
count,
bottom[0]->gpu_data(),
bottom[1]->gpu_data(),
diff_.mutable_gpu_data()); // d := b0 - b1
if (has_weights_) {
// apply "inside" weights
caffe_gpu_mul(
count,
bottom[2]->gpu_data(),
diff_.gpu_data(),
diff_.mutable_gpu_data()); // d := w_in * (b0 - b1)
}
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( SmoothL1Forward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, diff_.gpu_data(), errors_.mutable_gpu_data(), sigma2_);
CUDA_POST_KERNEL_CHECK;
if (has_weights_) {
// apply "outside" weights
caffe_gpu_mul(
count,
bottom[3]->gpu_data(),
errors_.gpu_data(),
errors_.mutable_gpu_data()); // d := w_out * SmoothL1(w_in * (b0 - b1))
}
Dtype loss;
caffe_gpu_dot(count, ones_.gpu_data(), errors_.gpu_data(), &loss);
top[0]->mutable_cpu_data()[0] = loss / bottom[0]->num();
}
template <typename Dtype>
__global__ void SmoothL1Backward(const int n, const Dtype* in, Dtype* out,
Dtype sigma2) {
// f'(x) = sigma * sigma * x if |x| < 1 / sigma / sigma
// = sign(x) otherwise
CUDA_KERNEL_LOOP(index, n) {
Dtype val = in[index];
Dtype abs_val = abs(val);
if (abs_val < 1.0 / sigma2) {
out[index] = sigma2 * val;
} else {
out[index] = (Dtype(0) < val) - (val < Dtype(0));
}
}
}
template <typename Dtype>
void SmoothL1LossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
// after forwards, diff_ holds w_in * (b0 - b1)
int count = diff_.count();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( SmoothL1Backward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)),
dim3(CAFFE_CUDA_NUM_THREADS) , 0, 0,
count, diff_.gpu_data(), diff_.mutable_gpu_data(), sigma2_);
CUDA_POST_KERNEL_CHECK;
for (int i = 0; i < 2; ++i) {
if (propagate_down[i]) {
const Dtype sign = (i == 0) ? 1 : -1;
const Dtype alpha = sign * top[0]->cpu_diff()[0] / bottom[i]->num();
caffe_gpu_axpby(
count, // count
alpha, // alpha
diff_.gpu_data(), // x
Dtype(0), // beta
bottom[i]->mutable_gpu_diff()); // y
if (has_weights_) {
// Scale by "inside" weight
caffe_gpu_mul(
count,
bottom[2]->gpu_data(),
bottom[i]->gpu_diff(),
bottom[i]->mutable_gpu_diff());
// Scale by "outside" weight
caffe_gpu_mul(
count,
bottom[3]->gpu_data(),
bottom[i]->gpu_diff(),
bottom[i]->mutable_gpu_diff());
}
}
}
}
INSTANTIATE_LAYER_GPU_FUNCS(SmoothL1LossLayer);
} // namespace caffe
| ffd0db8fdc3775aeabfe941ee17855a72b4d4d6a.cu | #include <vector>
#include "caffe/layers/smooth_l1_loss_layer.hpp"
namespace caffe {
template <typename Dtype>
__global__ void SmoothL1Forward(const int n, const Dtype* in, Dtype* out,
Dtype sigma2) {
// f(x) = 0.5 * (sigma * x)^2 if |x| < 1 / sigma / sigma
// |x| - 0.5 / sigma / sigma otherwise
CUDA_KERNEL_LOOP(index, n) {
Dtype val = in[index];
Dtype abs_val = abs(val);
if (abs_val < 1.0 / sigma2) {
out[index] = 0.5 * val * val * sigma2;
} else {
out[index] = abs_val - 0.5 / sigma2;
}
}
}
template <typename Dtype>
void SmoothL1LossLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
int count = bottom[0]->count();
caffe_gpu_sub(
count,
bottom[0]->gpu_data(),
bottom[1]->gpu_data(),
diff_.mutable_gpu_data()); // d := b0 - b1
if (has_weights_) {
// apply "inside" weights
caffe_gpu_mul(
count,
bottom[2]->gpu_data(),
diff_.gpu_data(),
diff_.mutable_gpu_data()); // d := w_in * (b0 - b1)
}
// NOLINT_NEXT_LINE(whitespace/operators)
SmoothL1Forward<Dtype><<<CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS>>>(
count, diff_.gpu_data(), errors_.mutable_gpu_data(), sigma2_);
CUDA_POST_KERNEL_CHECK;
if (has_weights_) {
// apply "outside" weights
caffe_gpu_mul(
count,
bottom[3]->gpu_data(),
errors_.gpu_data(),
errors_.mutable_gpu_data()); // d := w_out * SmoothL1(w_in * (b0 - b1))
}
Dtype loss;
caffe_gpu_dot(count, ones_.gpu_data(), errors_.gpu_data(), &loss);
top[0]->mutable_cpu_data()[0] = loss / bottom[0]->num();
}
template <typename Dtype>
__global__ void SmoothL1Backward(const int n, const Dtype* in, Dtype* out,
Dtype sigma2) {
// f'(x) = sigma * sigma * x if |x| < 1 / sigma / sigma
// = sign(x) otherwise
CUDA_KERNEL_LOOP(index, n) {
Dtype val = in[index];
Dtype abs_val = abs(val);
if (abs_val < 1.0 / sigma2) {
out[index] = sigma2 * val;
} else {
out[index] = (Dtype(0) < val) - (val < Dtype(0));
}
}
}
template <typename Dtype>
void SmoothL1LossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
// after forwards, diff_ holds w_in * (b0 - b1)
int count = diff_.count();
// NOLINT_NEXT_LINE(whitespace/operators)
SmoothL1Backward<Dtype><<<CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS >>>(
count, diff_.gpu_data(), diff_.mutable_gpu_data(), sigma2_);
CUDA_POST_KERNEL_CHECK;
for (int i = 0; i < 2; ++i) {
if (propagate_down[i]) {
const Dtype sign = (i == 0) ? 1 : -1;
const Dtype alpha = sign * top[0]->cpu_diff()[0] / bottom[i]->num();
caffe_gpu_axpby(
count, // count
alpha, // alpha
diff_.gpu_data(), // x
Dtype(0), // beta
bottom[i]->mutable_gpu_diff()); // y
if (has_weights_) {
// Scale by "inside" weight
caffe_gpu_mul(
count,
bottom[2]->gpu_data(),
bottom[i]->gpu_diff(),
bottom[i]->mutable_gpu_diff());
// Scale by "outside" weight
caffe_gpu_mul(
count,
bottom[3]->gpu_data(),
bottom[i]->gpu_diff(),
bottom[i]->mutable_gpu_diff());
}
}
}
}
INSTANTIATE_LAYER_GPU_FUNCS(SmoothL1LossLayer);
} // namespace caffe
|
975757a0d48647462450ad08af739d0c90eb01c1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//---------------------------------------------------------------------------------------------------
// prefix "g" for pointer pointing to "Global" memory space in device
// prefix "s" for pointer pointing to "Shared" memory space in device
//
// prefix "I/J" for varialbe of "I/J" particles
// each thread calculates the potential of "(I_Base+tx)th" "I" particle
// from all "J" particles
//
// "I" particles are packed into "N/GroupSize_I" "IGroup"s with "GroupSize_I" particles within each group
// "J" particles are packed into "N/GroupSize_J" "JGroup"s with "GroupSize_J" particles within each group
//---------------------------------------------------------------------------------------------------
#include "Dori.h"
#define I_Start bx * BLOCK_SIZE
#define J_Start 0
#define GroupSize_I GRID_SIZE * BLOCK_SIZE
#define GroupSize_J BLOCK_SIZE
__global__ void CUCAL_Pot( const int Nj, real gJ_Mass[], real gJ_Pos[][3],
const int Ni, real gI_Pos[][3], real gI_Pot[], const real Eps2 )
{
const unsigned int tx = threadIdx.x;
const unsigned int bx = blockIdx.x;
__shared__ real sJ_Mass [BLOCK_SIZE];
__shared__ real sJ_Pos_x[BLOCK_SIZE];
__shared__ real sJ_Pos_y[BLOCK_SIZE];
__shared__ real sJ_Pos_z[BLOCK_SIZE];
// (I/J)_Base : Base Address for (I/J)Group
for (int I_Base=I_Start; I_Base<Ni; I_Base+=GroupSize_I)
{
real_acc Pot = (real_acc)0.0;
int ii = I_Base+tx;
int i = ii%Ni;
real I_Pos_x = gI_Pos[i][0];
real I_Pos_y = gI_Pos[i][1];
real I_Pos_z = gI_Pos[i][2];
for (int J_Base=J_Start; J_Base<Nj; J_Base+=GroupSize_J)
{
int jj = J_Base+tx;
int j = jj%Nj;
sJ_Mass [tx] = gJ_Mass[j];
sJ_Pos_x[tx] = gJ_Pos [j][0];
sJ_Pos_y[tx] = gJ_Pos [j][1];
sJ_Pos_z[tx] = gJ_Pos [j][2];
__syncthreads();
// k : kth particle in JGroup
for (int k=0; k<GroupSize_J; k++)
{
# ifndef N_IS_MULTIPLE_OF_BS
int kk = J_Base+k;
# endif
// evaluate the gravitaional potential
//---------------------------------------------------------------------
real dx = sJ_Pos_x[k] - I_Pos_x;
real dy = sJ_Pos_y[k] - I_Pos_y;
real dz = sJ_Pos_z[k] - I_Pos_z;
# ifdef SOFTEN
real R2 = dx*dx + Eps2;
# else
real R2 = dx*dx;
# endif
R2 += dy*dy;
R2 += dz*dz;
real mRinv = -(real)1.0 / SQRT(R2);
# ifndef N_IS_MULTIPLE_OF_BS
if ( kk < Nj )
{
# endif
// exclude contribution from itself
# ifdef SOFTEN
if ( R2 != Eps2 )
# else
if ( R2 != (real)0.0 )
#endif
Pot += sJ_Mass[k]*mRinv;
# ifndef N_IS_MULTIPLE_OF_BS
}
# endif
} // for (int k=0; k<GroupSize_J; k++)
__syncthreads();
} // for (int J_Base=J_Start; J_Base<Nj; J_Base+=GroupSize_J)
if ( ii < Ni )
{
gI_Pot [i] = Pot;
}
} // for (int I_Base=I_Start; I_Base<Ni; I_Base+=GroupSize_I)
}
| 975757a0d48647462450ad08af739d0c90eb01c1.cu | //---------------------------------------------------------------------------------------------------
// prefix "g" for pointer pointing to "Global" memory space in device
// prefix "s" for pointer pointing to "Shared" memory space in device
//
// prefix "I/J" for varialbe of "I/J" particles
// each thread calculates the potential of "(I_Base+tx)th" "I" particle
// from all "J" particles
//
// "I" particles are packed into "N/GroupSize_I" "IGroup"s with "GroupSize_I" particles within each group
// "J" particles are packed into "N/GroupSize_J" "JGroup"s with "GroupSize_J" particles within each group
//---------------------------------------------------------------------------------------------------
#include "Dori.h"
#define I_Start bx * BLOCK_SIZE
#define J_Start 0
#define GroupSize_I GRID_SIZE * BLOCK_SIZE
#define GroupSize_J BLOCK_SIZE
__global__ void CUCAL_Pot( const int Nj, real gJ_Mass[], real gJ_Pos[][3],
const int Ni, real gI_Pos[][3], real gI_Pot[], const real Eps2 )
{
const unsigned int tx = threadIdx.x;
const unsigned int bx = blockIdx.x;
__shared__ real sJ_Mass [BLOCK_SIZE];
__shared__ real sJ_Pos_x[BLOCK_SIZE];
__shared__ real sJ_Pos_y[BLOCK_SIZE];
__shared__ real sJ_Pos_z[BLOCK_SIZE];
// (I/J)_Base : Base Address for (I/J)Group
for (int I_Base=I_Start; I_Base<Ni; I_Base+=GroupSize_I)
{
real_acc Pot = (real_acc)0.0;
int ii = I_Base+tx;
int i = ii%Ni;
real I_Pos_x = gI_Pos[i][0];
real I_Pos_y = gI_Pos[i][1];
real I_Pos_z = gI_Pos[i][2];
for (int J_Base=J_Start; J_Base<Nj; J_Base+=GroupSize_J)
{
int jj = J_Base+tx;
int j = jj%Nj;
sJ_Mass [tx] = gJ_Mass[j];
sJ_Pos_x[tx] = gJ_Pos [j][0];
sJ_Pos_y[tx] = gJ_Pos [j][1];
sJ_Pos_z[tx] = gJ_Pos [j][2];
__syncthreads();
// k : kth particle in JGroup
for (int k=0; k<GroupSize_J; k++)
{
# ifndef N_IS_MULTIPLE_OF_BS
int kk = J_Base+k;
# endif
// evaluate the gravitaional potential
//---------------------------------------------------------------------
real dx = sJ_Pos_x[k] - I_Pos_x;
real dy = sJ_Pos_y[k] - I_Pos_y;
real dz = sJ_Pos_z[k] - I_Pos_z;
# ifdef SOFTEN
real R2 = dx*dx + Eps2;
# else
real R2 = dx*dx;
# endif
R2 += dy*dy;
R2 += dz*dz;
real mRinv = -(real)1.0 / SQRT(R2);
# ifndef N_IS_MULTIPLE_OF_BS
if ( kk < Nj )
{
# endif
// exclude contribution from itself
# ifdef SOFTEN
if ( R2 != Eps2 )
# else
if ( R2 != (real)0.0 )
#endif
Pot += sJ_Mass[k]*mRinv;
# ifndef N_IS_MULTIPLE_OF_BS
}
# endif
} // for (int k=0; k<GroupSize_J; k++)
__syncthreads();
} // for (int J_Base=J_Start; J_Base<Nj; J_Base+=GroupSize_J)
if ( ii < Ni )
{
gI_Pot [i] = Pot;
}
} // for (int I_Base=I_Start; I_Base<Ni; I_Base+=GroupSize_I)
}
|
a61f75e4a2abefe8c1e15c39b691cc7f4fc7e61e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void group_point_gpu(int b, int n, int c, int m, int nsample, const float *points, const int *idx, float *out) {
int batch_index = blockIdx.x;
points += n*c*batch_index;
idx += m*nsample*batch_index;
out += m*nsample*c*batch_index;
int index = threadIdx.x;
int stride = blockDim.x;
for (int j=index;j<m;j+=stride) {
for (int k=0;k<nsample;++k) {
int ii = idx[j*nsample+k];
for (int l=0;l<c;++l) {
out[j*nsample*c+k*c+l] = points[ii*c+l];
}
}
}
} | a61f75e4a2abefe8c1e15c39b691cc7f4fc7e61e.cu | #include "includes.h"
__global__ void group_point_gpu(int b, int n, int c, int m, int nsample, const float *points, const int *idx, float *out) {
int batch_index = blockIdx.x;
points += n*c*batch_index;
idx += m*nsample*batch_index;
out += m*nsample*c*batch_index;
int index = threadIdx.x;
int stride = blockDim.x;
for (int j=index;j<m;j+=stride) {
for (int k=0;k<nsample;++k) {
int ii = idx[j*nsample+k];
for (int l=0;l<c;++l) {
out[j*nsample*c+k*c+l] = points[ii*c+l];
}
}
}
} |
2f7bc066c7bcf466fa41aab911497b9f39152d17.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#define MYDEBUG
#ifdef MYDEBUG
#define DEBUG_PRINT printf("here: %d\n", __LINE__); fflush(stdout);
#else
#define DEBUG_PRINT
#endif
#define SIZE 10240
__global__ void MyKernel(int *a, int *b, int size)
{
int id = blockIdx.x * blockDim.x + threadIdx.x;
if(id < size)
b[id] = (a[id] >> 1) + a[id];
}
int main()
{
int i;
int size = SIZE;
int BlockSize = 256;
int BlockNum = (size + BlockSize - 1) / BlockSize;
int *d_a, *d_b;
int sum_a, sum_b;
sum_a = sum_b = 0;
DEBUG_PRINT
hipMallocManaged((void **)&d_a, size*sizeof(int));
hipMallocManaged((void **)&d_b, size*sizeof(int));
DEBUG_PRINT
for(i = 0; i < size; i++) {
d_a[i] = rand() % 100;
sum_a += d_a[i];
}
DEBUG_PRINT
hipLaunchKernelGGL(( MyKernel), dim3(BlockNum), dim3(BlockSize), 0, 0, d_a, d_b, size);
hipDeviceSynchronize();
DEBUG_PRINT
for(i = 0; i < size; i++)
sum_b += d_b[i];
DEBUG_PRINT
hipFree(d_a);
hipFree(d_b);
DEBUG_PRINT
printf("sum_a: %d, sum_b: %d\n", sum_a, sum_b);
return 0;
}
| 2f7bc066c7bcf466fa41aab911497b9f39152d17.cu | #include <stdio.h>
#include <stdlib.h>
#define MYDEBUG
#ifdef MYDEBUG
#define DEBUG_PRINT printf("here: %d\n", __LINE__); fflush(stdout);
#else
#define DEBUG_PRINT
#endif
#define SIZE 10240
__global__ void MyKernel(int *a, int *b, int size)
{
int id = blockIdx.x * blockDim.x + threadIdx.x;
if(id < size)
b[id] = (a[id] >> 1) + a[id];
}
int main()
{
int i;
int size = SIZE;
int BlockSize = 256;
int BlockNum = (size + BlockSize - 1) / BlockSize;
int *d_a, *d_b;
int sum_a, sum_b;
sum_a = sum_b = 0;
DEBUG_PRINT
cudaMallocManaged((void **)&d_a, size*sizeof(int));
cudaMallocManaged((void **)&d_b, size*sizeof(int));
DEBUG_PRINT
for(i = 0; i < size; i++) {
d_a[i] = rand() % 100;
sum_a += d_a[i];
}
DEBUG_PRINT
MyKernel<<<BlockNum, BlockSize>>>(d_a, d_b, size);
cudaDeviceSynchronize();
DEBUG_PRINT
for(i = 0; i < size; i++)
sum_b += d_b[i];
DEBUG_PRINT
cudaFree(d_a);
cudaFree(d_b);
DEBUG_PRINT
printf("sum_a: %d, sum_b: %d\n", sum_a, sum_b);
return 0;
}
|
092113c9766f08fd3ca46be9c900e48c9e30e3d6.hip | // !!! This is a file automatically generated by hipify!!!
/*
* hemisphere_generator.cu - entry point for geometry sampling after the first bounce on GPUs.
*/
#include "accelerad_copyright.h"
#include <optix_world.h>
#include "optix_shader_common.h"
#include "optix_point_common.h"
using namespace optix;
/* Contex variables */
//rtBuffer<PointDirection, 1> cluster_buffer; /* input */
rtDeclareVariable(PointDirectionBuffer, cluster_buffer, , ); /* input */
rtBuffer<PointDirection, 3> seed_buffer; /* output */
rtDeclareVariable(rtObject, top_object, , );
rtDeclareVariable(unsigned int, segment_offset, , ) = 0u; /* Offset into data if computed with multiple segments */
/* OptiX variables */
rtDeclareVariable(uint3, launch_index, rtLaunchIndex, );
rtDeclareVariable(uint3, launch_dim, rtLaunchDim, );
RT_PROGRAM void hemisphere_camera()
{
PerRayData_point_cloud prd;
clear(seed_buffer[launch_index]);
PointDirection eye = cluster_buffer[launch_index.z + segment_offset];
// Check for valid input
if ( isfinite( eye.pos ) && isfinite( eye.dir ) && dot( eye.dir, eye.dir ) > FTINY ) { // NaN values will be false
// Init random state
init_rand(&prd.state, launch_index.x + launch_dim.x * (launch_index.y + launch_dim.y * launch_index.z));
// Make axes
float3 uz = normalize(eye.dir);
float3 ux = getperpendicular(uz, prd.state);
float3 uy = cross(uz, ux);
/* avoid coincident samples */
float2 spt = 0.1f + 0.8f * make_float2(hiprand_uniform(prd.state), hiprand_uniform(prd.state));
SDsquare2disk(spt, (launch_index.y + spt.y) / launch_dim.y, (launch_index.x + spt.x) / launch_dim.x);
float zd = sqrtf(1.0f - dot(spt, spt));
float3 rdir = normalize(spt.x * ux + spt.y * uy + zd * uz);
prd.index = launch_index;
prd.seeds = launch_index.z + 1;
#ifdef ANTIMATTER
prd.mask = 0u;
prd.inside = 0;
#endif
// Trace the current ray
Ray ray = make_Ray(eye.pos, rdir, POINT_CLOUD_RAY, ray_start( eye.pos, rdir, uz, RAY_START ), RAY_END);
rtTrace(top_object, ray, prd);
}
}
RT_PROGRAM void exception()
{
#ifdef PRINT_OPTIX
rtPrintExceptionDetails();
#endif
seed_buffer[launch_index].pos = exceptionToFloat3(rtGetExceptionCode());
seed_buffer[launch_index].dir = make_float3( 0.0f );
#ifdef AMBIENT_CELL
seed_buffer[launch_index].cell = make_uint2(0);
#endif
}
| 092113c9766f08fd3ca46be9c900e48c9e30e3d6.cu | /*
* hemisphere_generator.cu - entry point for geometry sampling after the first bounce on GPUs.
*/
#include "accelerad_copyright.h"
#include <optix_world.h>
#include "optix_shader_common.h"
#include "optix_point_common.h"
using namespace optix;
/* Contex variables */
//rtBuffer<PointDirection, 1> cluster_buffer; /* input */
rtDeclareVariable(PointDirectionBuffer, cluster_buffer, , ); /* input */
rtBuffer<PointDirection, 3> seed_buffer; /* output */
rtDeclareVariable(rtObject, top_object, , );
rtDeclareVariable(unsigned int, segment_offset, , ) = 0u; /* Offset into data if computed with multiple segments */
/* OptiX variables */
rtDeclareVariable(uint3, launch_index, rtLaunchIndex, );
rtDeclareVariable(uint3, launch_dim, rtLaunchDim, );
RT_PROGRAM void hemisphere_camera()
{
PerRayData_point_cloud prd;
clear(seed_buffer[launch_index]);
PointDirection eye = cluster_buffer[launch_index.z + segment_offset];
// Check for valid input
if ( isfinite( eye.pos ) && isfinite( eye.dir ) && dot( eye.dir, eye.dir ) > FTINY ) { // NaN values will be false
// Init random state
init_rand(&prd.state, launch_index.x + launch_dim.x * (launch_index.y + launch_dim.y * launch_index.z));
// Make axes
float3 uz = normalize(eye.dir);
float3 ux = getperpendicular(uz, prd.state);
float3 uy = cross(uz, ux);
/* avoid coincident samples */
float2 spt = 0.1f + 0.8f * make_float2(curand_uniform(prd.state), curand_uniform(prd.state));
SDsquare2disk(spt, (launch_index.y + spt.y) / launch_dim.y, (launch_index.x + spt.x) / launch_dim.x);
float zd = sqrtf(1.0f - dot(spt, spt));
float3 rdir = normalize(spt.x * ux + spt.y * uy + zd * uz);
prd.index = launch_index;
prd.seeds = launch_index.z + 1;
#ifdef ANTIMATTER
prd.mask = 0u;
prd.inside = 0;
#endif
// Trace the current ray
Ray ray = make_Ray(eye.pos, rdir, POINT_CLOUD_RAY, ray_start( eye.pos, rdir, uz, RAY_START ), RAY_END);
rtTrace(top_object, ray, prd);
}
}
RT_PROGRAM void exception()
{
#ifdef PRINT_OPTIX
rtPrintExceptionDetails();
#endif
seed_buffer[launch_index].pos = exceptionToFloat3(rtGetExceptionCode());
seed_buffer[launch_index].dir = make_float3( 0.0f );
#ifdef AMBIENT_CELL
seed_buffer[launch_index].cell = make_uint2(0);
#endif
}
|
affddf4d50582d5370ffebaea4d79364b98b1144.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <math.h>
#include "cuda-multiply.cu"
// Kernel function to add the elements of two arrays
/*__global__
void add(int n, float *x, float *y)
{
for (int i = 0; i < n; i++)
y[i] = x[i] + y[i];
}*/
int main(void)
{
int N = 1<<20;
float *x, *y;
// Allocate Unified Memory accessible from CPU or GPU
hipMallocManaged(&x, N*sizeof(float));
hipMallocManaged(&y, N*sizeof(float));
// initialize x and y arrays on the host
for (int i = 0; i < N; i++) {
x[i] = 1.0f;
y[i] = 2.0f;
}
// Run kernel on 1M elements on the GPU
hipLaunchKernelGGL(( multiply), dim3(1), dim3(1), 0, 0, N, x, y);
// Wait for GPU to finish before accessing on host
hipDeviceSynchronize();
// Check for errors (all values should be 3.0f)
float maxError = 0.0f;
for (int i = 0; i < N; i++)
maxError = fmax(maxError, fabs(y[i]-3.0f));
std::cout << "Max error: " << maxError << std::endl;
// Free memory
hipFree(x);
hipFree(y);
return 0;
}
| affddf4d50582d5370ffebaea4d79364b98b1144.cu | #include <iostream>
#include <math.h>
#include "cuda-multiply.cu"
// Kernel function to add the elements of two arrays
/*__global__
void add(int n, float *x, float *y)
{
for (int i = 0; i < n; i++)
y[i] = x[i] + y[i];
}*/
int main(void)
{
int N = 1<<20;
float *x, *y;
// Allocate Unified Memory – accessible from CPU or GPU
cudaMallocManaged(&x, N*sizeof(float));
cudaMallocManaged(&y, N*sizeof(float));
// initialize x and y arrays on the host
for (int i = 0; i < N; i++) {
x[i] = 1.0f;
y[i] = 2.0f;
}
// Run kernel on 1M elements on the GPU
multiply<<<1, 1>>>(N, x, y);
// Wait for GPU to finish before accessing on host
cudaDeviceSynchronize();
// Check for errors (all values should be 3.0f)
float maxError = 0.0f;
for (int i = 0; i < N; i++)
maxError = fmax(maxError, fabs(y[i]-3.0f));
std::cout << "Max error: " << maxError << std::endl;
// Free memory
cudaFree(x);
cudaFree(y);
return 0;
}
|
3ed79be5154fcfa96b9ecc099ab8fb454e2fe7f5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "hamiltonian.h"
__device__ float HOffBondXHeisenberg(const int si, const int bra, const float JJ)
{
float valH;
//int S0, S1;
//int T0, T1;
valH = JJ*0.5; //contribution from the J part of the Hamiltonian
return valH;
}
__device__ float HOffBondYHeisenberg(const int si, const int bra, const float JJ)
{
float valH;
//int S0, S1;
//int T0, T1;
valH = JJ*0.5; //contribution from the J part of the Hamiltonian
return valH;
}
__device__ float HDiagPartHeisenberg1D(const int bra, int latticeSize, int3* d_Bond, const float JJ)
{
int S0b,S1b ; //spins (bra
int T0,T1; //site
//int P0, P1, P2, P3; //sites for plaquette (Q)
//int s0p, s1p, s2p, s3p;
float valH = 0.f;
for (int Ti=0; Ti<latticeSize; Ti++)
{
//***HEISENBERG PART
T0 = (d_Bond[Ti]).x; //lower left spin
S0b = (bra>>T0)&1;
//if (T0 != Ti) cout<<"Square error 3\n";
T1 = (d_Bond[Ti]).y; //first bond
S1b = (bra>>T1)&1; //unpack bra
valH += JJ*(S0b-0.5)*(S1b-0.5);
}//T0
//cout<<bra<<" "<<valH<<endl;
return valH;
}//HdiagPart
__device__ float HDiagPartHeisenberg2D(const int bra, int latticeSize, int3* d_Bond, const float JJ)
{
int S0b,S1b ; //spins (bra
int T0,T1; //site
//int P0, P1, P2, P3; //sites for plaquette (Q)
//int s0p, s1p, s2p, s3p;
float valH = 0.f;
for (int Ti=0; Ti<latticeSize; Ti++)
{
//***HEISENBERG PART
T0 = (d_Bond[Ti]).x; //lower left spin
S0b = (bra>>T0)&1;
//if (T0 != Ti) cout<<"Square error 3\n";
T1 = (d_Bond[Ti]).y; //first bond
S1b = (bra>>T1)&1; //unpack bra
valH += JJ*(S0b-0.5)*(S1b-0.5);
T1 = (d_Bond[Ti]).z; //second bond
S1b = (bra>>T1)&1; //unpack bra
valH += JJ*(S0b-0.5)*(S1b-0.5);
}//T0
//cout<<bra<<" "<<valH<<endl;
return valH;
}//HdiagPart
__global__ void FillDiagonalsHeisenberg(int* d_basis, f_hamiltonian H, int* d_Bond, parameters data)
{
int row = blockIdx.x*blockDim.x + threadIdx.x;
int latticeSize = data.nsite;
int site = threadIdx.x%(latticeSize);
int dim = H.sectorDim;
unsigned int tempi;
__shared__ int3 tempBond[32];
if (row < dim)
{
tempi = d_basis[row];
(tempBond[site]).x = d_Bond[site];
(tempBond[site]).y = d_Bond[latticeSize + site];
switch( data.dimension )
{
case 1 :
H.vals[row] = HDiagPartHeisenberg1D(tempi, latticeSize, tempBond, data.J1);
break;
case 2 :
(tempBond[site]).z = d_Bond[2*latticeSize + site];
H.vals[row] = HDiagPartHeisenberg2D(tempi, latticeSize, tempBond, data.J1);
break;
}
H.rows[row] = row;
H.cols[row] = row;
H.set[row] = 1;
}
else
{
H.rows[row] = 2*dim;
H.cols[row] = 2*dim;
H.set[row] = 0;
}
}
__global__ void FillSparseHeisenberg(int* d_basisPosition, int* d_basis, f_hamiltonian H, int* d_Bond, parameters data, int offset)
{
int latticeSize = data.nsite;
int dim = H.sectorDim;
int ii = (blockDim.x/(2*latticeSize))*blockIdx.x + threadIdx.x/(2*latticeSize) + offset*(blockDim.x/(2*latticeSize));
int T0 = threadIdx.x%(2*latticeSize);
#if __CUDA_ARCH__ < 200
const int arraySize = 512;
#elif __CUDA_ARCH__ >= 200
const int arraySize = 1024;
#else
#error Could not detect GPU architecture
#endif
__shared__ int3 tempBond[32];
int count;
__shared__ int tempPos[arraySize];
__shared__ float tempVal[arraySize];
//__shared__ uint tempi[arraySize];
unsigned int tempi;
__shared__ uint tempod[arraySize];
int stride = 2*data.dimension*latticeSize;
//int tempcount;
int site = T0%(latticeSize);
count = 0;
int rowTemp;
int start = (bool)(dim%arraySize) ? (dim/arraySize + 1)*arraySize : dim/arraySize;
int s;
//int si, sj;//sk,sl; //spin operators
//unsigned int tempi;// tempod; //tempj;
//hipDoubleComplex tempD;
bool compare;
if( ii < dim )
{
tempi = d_basis[ii];
if (T0 < 2*latticeSize)
{
//Putting bond info in shared memory
(tempBond[site]).x = d_Bond[site];
(tempBond[site]).y = d_Bond[latticeSize + site];
__syncthreads();
//Horizontal bond ---------------
s = (tempBond[site]).x;
tempod[threadIdx.x] = tempi;
tempod[threadIdx.x] ^= (1<<s);
s = (tempBond[site]).y;
tempod[threadIdx.x] ^= (1<<s);
//tempod[threadIdx.x] ^= (1<<si); //toggle bit
//tempod[threadIdx.x] ^= (1<<sj); //toggle bit
compare = (d_basisPosition[tempod[threadIdx.x]] > ii);
tempPos[threadIdx.x] = (compare) ? d_basisPosition[tempod[threadIdx.x]] : dim;
tempVal[threadIdx.x] = HOffBondXHeisenberg(site, tempi, data.J1);
count += (int)compare;
//tempcount = (T0/latticeSize);
rowTemp = (T0/latticeSize) ? ii : tempPos[threadIdx.x];
rowTemp = (compare) ? rowTemp : 2*dim;
H.vals[ ii*stride + 4*site + (T0/latticeSize) + start ] = tempVal[threadIdx.x]; // (T0/latticeSize) ? tempVal[threadIdx.x] : cuConj(tempVal[threadIdx.x]);
H.cols[ ii*stride + 4*site + (T0/latticeSize) + start ] = (T0/latticeSize) ? tempPos[threadIdx.x] : ii;
H.rows[ ii*stride + 4*site + (T0/latticeSize) + start ] = rowTemp;
H.set[ ii*stride + 4*site + (T0/latticeSize) + start ] = (int)compare;
if (data.dimension == 2)
{
(tempBond[site]).z = d_Bond[2*latticeSize + site];
//Vertical bond -----------------
s = (tempBond[site]).x;
tempod[threadIdx.x] = tempi;
tempod[threadIdx.x] ^= (1<<s);
s = (tempBond[site]).z;
tempod[threadIdx.x] ^= (1<<s);
//tempod[threadIdx.x] ^= (1<<si); //toggle bit
//tempod[threadIdx.x] ^= (1<<sj); //toggle bit
compare = (d_basisPosition[tempod[threadIdx.x]] > ii);
tempPos[threadIdx.x] = (compare) ? d_basisPosition[tempod[threadIdx.x]] : dim;
tempVal[threadIdx.x] = HOffBondYHeisenberg(site,tempi, data.J1);
count += (int)compare;
//tempcount = (T0/latticeSize);
rowTemp = (T0/latticeSize) ? ii : tempPos[threadIdx.x];
rowTemp = (compare) ? rowTemp : 2*dim;
H.vals[ ii*stride + 4*site + 2 + (T0/latticeSize) + start ] = tempVal[threadIdx.x]; // (T0/latticeSize) ? tempVal[threadIdx.x] : cuConj(tempVal[threadIdx.x]);
H.cols[ ii*stride + 4*site + 2 + (T0/latticeSize) + start ] = (T0/latticeSize) ? tempPos[threadIdx.x] : ii;
H.rows[ ii*stride + 4*site + 2 + (T0/latticeSize) + start ] = rowTemp;
H.set[ ii*stride + 4*site + 2 + (T0/latticeSize) + start ] = (int)compare;
}
}
}//end of ii
}//end of FillSparse
| 3ed79be5154fcfa96b9ecc099ab8fb454e2fe7f5.cu | #include "hamiltonian.h"
__device__ float HOffBondXHeisenberg(const int si, const int bra, const float JJ)
{
float valH;
//int S0, S1;
//int T0, T1;
valH = JJ*0.5; //contribution from the J part of the Hamiltonian
return valH;
}
__device__ float HOffBondYHeisenberg(const int si, const int bra, const float JJ)
{
float valH;
//int S0, S1;
//int T0, T1;
valH = JJ*0.5; //contribution from the J part of the Hamiltonian
return valH;
}
__device__ float HDiagPartHeisenberg1D(const int bra, int latticeSize, int3* d_Bond, const float JJ)
{
int S0b,S1b ; //spins (bra
int T0,T1; //site
//int P0, P1, P2, P3; //sites for plaquette (Q)
//int s0p, s1p, s2p, s3p;
float valH = 0.f;
for (int Ti=0; Ti<latticeSize; Ti++)
{
//***HEISENBERG PART
T0 = (d_Bond[Ti]).x; //lower left spin
S0b = (bra>>T0)&1;
//if (T0 != Ti) cout<<"Square error 3\n";
T1 = (d_Bond[Ti]).y; //first bond
S1b = (bra>>T1)&1; //unpack bra
valH += JJ*(S0b-0.5)*(S1b-0.5);
}//T0
//cout<<bra<<" "<<valH<<endl;
return valH;
}//HdiagPart
__device__ float HDiagPartHeisenberg2D(const int bra, int latticeSize, int3* d_Bond, const float JJ)
{
int S0b,S1b ; //spins (bra
int T0,T1; //site
//int P0, P1, P2, P3; //sites for plaquette (Q)
//int s0p, s1p, s2p, s3p;
float valH = 0.f;
for (int Ti=0; Ti<latticeSize; Ti++)
{
//***HEISENBERG PART
T0 = (d_Bond[Ti]).x; //lower left spin
S0b = (bra>>T0)&1;
//if (T0 != Ti) cout<<"Square error 3\n";
T1 = (d_Bond[Ti]).y; //first bond
S1b = (bra>>T1)&1; //unpack bra
valH += JJ*(S0b-0.5)*(S1b-0.5);
T1 = (d_Bond[Ti]).z; //second bond
S1b = (bra>>T1)&1; //unpack bra
valH += JJ*(S0b-0.5)*(S1b-0.5);
}//T0
//cout<<bra<<" "<<valH<<endl;
return valH;
}//HdiagPart
__global__ void FillDiagonalsHeisenberg(int* d_basis, f_hamiltonian H, int* d_Bond, parameters data)
{
int row = blockIdx.x*blockDim.x + threadIdx.x;
int latticeSize = data.nsite;
int site = threadIdx.x%(latticeSize);
int dim = H.sectorDim;
unsigned int tempi;
__shared__ int3 tempBond[32];
if (row < dim)
{
tempi = d_basis[row];
(tempBond[site]).x = d_Bond[site];
(tempBond[site]).y = d_Bond[latticeSize + site];
switch( data.dimension )
{
case 1 :
H.vals[row] = HDiagPartHeisenberg1D(tempi, latticeSize, tempBond, data.J1);
break;
case 2 :
(tempBond[site]).z = d_Bond[2*latticeSize + site];
H.vals[row] = HDiagPartHeisenberg2D(tempi, latticeSize, tempBond, data.J1);
break;
}
H.rows[row] = row;
H.cols[row] = row;
H.set[row] = 1;
}
else
{
H.rows[row] = 2*dim;
H.cols[row] = 2*dim;
H.set[row] = 0;
}
}
__global__ void FillSparseHeisenberg(int* d_basisPosition, int* d_basis, f_hamiltonian H, int* d_Bond, parameters data, int offset)
{
int latticeSize = data.nsite;
int dim = H.sectorDim;
int ii = (blockDim.x/(2*latticeSize))*blockIdx.x + threadIdx.x/(2*latticeSize) + offset*(blockDim.x/(2*latticeSize));
int T0 = threadIdx.x%(2*latticeSize);
#if __CUDA_ARCH__ < 200
const int arraySize = 512;
#elif __CUDA_ARCH__ >= 200
const int arraySize = 1024;
#else
#error Could not detect GPU architecture
#endif
__shared__ int3 tempBond[32];
int count;
__shared__ int tempPos[arraySize];
__shared__ float tempVal[arraySize];
//__shared__ uint tempi[arraySize];
unsigned int tempi;
__shared__ uint tempod[arraySize];
int stride = 2*data.dimension*latticeSize;
//int tempcount;
int site = T0%(latticeSize);
count = 0;
int rowTemp;
int start = (bool)(dim%arraySize) ? (dim/arraySize + 1)*arraySize : dim/arraySize;
int s;
//int si, sj;//sk,sl; //spin operators
//unsigned int tempi;// tempod; //tempj;
//cuDoubleComplex tempD;
bool compare;
if( ii < dim )
{
tempi = d_basis[ii];
if (T0 < 2*latticeSize)
{
//Putting bond info in shared memory
(tempBond[site]).x = d_Bond[site];
(tempBond[site]).y = d_Bond[latticeSize + site];
__syncthreads();
//Horizontal bond ---------------
s = (tempBond[site]).x;
tempod[threadIdx.x] = tempi;
tempod[threadIdx.x] ^= (1<<s);
s = (tempBond[site]).y;
tempod[threadIdx.x] ^= (1<<s);
//tempod[threadIdx.x] ^= (1<<si); //toggle bit
//tempod[threadIdx.x] ^= (1<<sj); //toggle bit
compare = (d_basisPosition[tempod[threadIdx.x]] > ii);
tempPos[threadIdx.x] = (compare) ? d_basisPosition[tempod[threadIdx.x]] : dim;
tempVal[threadIdx.x] = HOffBondXHeisenberg(site, tempi, data.J1);
count += (int)compare;
//tempcount = (T0/latticeSize);
rowTemp = (T0/latticeSize) ? ii : tempPos[threadIdx.x];
rowTemp = (compare) ? rowTemp : 2*dim;
H.vals[ ii*stride + 4*site + (T0/latticeSize) + start ] = tempVal[threadIdx.x]; // (T0/latticeSize) ? tempVal[threadIdx.x] : cuConj(tempVal[threadIdx.x]);
H.cols[ ii*stride + 4*site + (T0/latticeSize) + start ] = (T0/latticeSize) ? tempPos[threadIdx.x] : ii;
H.rows[ ii*stride + 4*site + (T0/latticeSize) + start ] = rowTemp;
H.set[ ii*stride + 4*site + (T0/latticeSize) + start ] = (int)compare;
if (data.dimension == 2)
{
(tempBond[site]).z = d_Bond[2*latticeSize + site];
//Vertical bond -----------------
s = (tempBond[site]).x;
tempod[threadIdx.x] = tempi;
tempod[threadIdx.x] ^= (1<<s);
s = (tempBond[site]).z;
tempod[threadIdx.x] ^= (1<<s);
//tempod[threadIdx.x] ^= (1<<si); //toggle bit
//tempod[threadIdx.x] ^= (1<<sj); //toggle bit
compare = (d_basisPosition[tempod[threadIdx.x]] > ii);
tempPos[threadIdx.x] = (compare) ? d_basisPosition[tempod[threadIdx.x]] : dim;
tempVal[threadIdx.x] = HOffBondYHeisenberg(site,tempi, data.J1);
count += (int)compare;
//tempcount = (T0/latticeSize);
rowTemp = (T0/latticeSize) ? ii : tempPos[threadIdx.x];
rowTemp = (compare) ? rowTemp : 2*dim;
H.vals[ ii*stride + 4*site + 2 + (T0/latticeSize) + start ] = tempVal[threadIdx.x]; // (T0/latticeSize) ? tempVal[threadIdx.x] : cuConj(tempVal[threadIdx.x]);
H.cols[ ii*stride + 4*site + 2 + (T0/latticeSize) + start ] = (T0/latticeSize) ? tempPos[threadIdx.x] : ii;
H.rows[ ii*stride + 4*site + 2 + (T0/latticeSize) + start ] = rowTemp;
H.set[ ii*stride + 4*site + 2 + (T0/latticeSize) + start ] = (int)compare;
}
}
}//end of ii
}//end of FillSparse
|
452c06c764a0c4597ad7f7545c5c4afe9ba84c8b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define COALESCED_NUM 16
#define blockDimX 256
#define blockDimY 1
#define gridDimX (gridDim.x)
#define gridDimY (gridDim.y)
#define idx (blockIdx.x*blockDimX+threadIdx.x)
#define idy (blockIdx.y*blockDimY+threadIdx.y)
#define bidy (blockIdx.y)
#define bidx (blockIdx.x)
#define tidx (threadIdx.x)
#define tidy (threadIdx.y)
#define merger_y 32
#define coalesced_idy (bidy/(COALESCED_NUM/(merger_y*blockDimY))*COALESCED_NUM)
#define C(y,x) C[(y)*WIDTH_C+(x)]
#define A(y,x) A[(y)*WIDTH_A+(x)]
__global__ void imregionmax(float * A, float * C, int width)
{
__shared__ float shared_0[272];
float temp_0[9];
float temp_1[9];
float temp_2[9];
float temp_3[9];
float temp_4[9];
float temp_5[9];
float temp_6[9];
float temp_7[9];
float temp_8[9];
float temp_9[9];
float temp_10[9];
float temp_11[9];
float temp_12[9];
float temp_13[9];
float temp_14[9];
float temp_15[9];
float temp_16[9];
float temp_17[9];
float temp_18[9];
float temp_19[9];
float temp_20[9];
float temp_21[9];
float temp_22[9];
float temp_23[9];
float temp_24[9];
float temp_25[9];
float temp_26[9];
float temp_27[9];
float temp_28[9];
float temp_29[9];
float temp_30[9];
float temp_31[9];
int t_0;
int t_1;
int t_2;
int t_3;
int t_4;
int t_5;
int t_6;
int t_7;
int t_8;
int t_9;
int t_10;
int t_11;
int t_12;
int t_13;
int t_14;
int t_15;
int t_16;
int t_17;
int t_18;
int t_19;
int t_20;
int t_21;
int t_22;
int t_23;
int t_24;
int t_25;
int t_26;
int t_27;
int t_28;
int t_29;
int t_30;
int t_31;
int it_1;
t_0=0;
t_1=0;
t_2=0;
t_3=0;
t_4=0;
t_5=0;
t_6=0;
t_7=0;
t_8=0;
t_9=0;
t_10=0;
t_11=0;
t_12=0;
t_13=0;
t_14=0;
t_15=0;
t_16=0;
t_17=0;
t_18=0;
t_19=0;
t_20=0;
t_21=0;
t_22=0;
t_23=0;
t_24=0;
t_25=0;
t_26=0;
t_27=0;
t_28=0;
t_29=0;
t_30=0;
t_31=0;
if ((tidx<16))
{
{
shared_0[(tidx+0)]=A((((idy*32)+(( - 1)*(3-1)))+16), (idx+(( - 1)*0)));
}
}
{
shared_0[(tidx+16)]=A((((idy*32)+(( - 1)*(3-1)))+16), ((idx+(( - 1)*0))+16));
}
__syncthreads();
#pragma unroll
for (it_1=0; it_1<3; it_1=(it_1+1))
{
float a;
a=shared_0[((tidx+(( - 1)*it_1))+16)];
temp_0[t_0]=a;
t_0=(t_0+1);
}
__syncthreads();
if ((tidx<16))
{
{
shared_0[(tidx+0)]=A((((idy*32)+(( - 1)*(3-2)))+16), (idx+(( - 1)*0)));
}
}
{
shared_0[(tidx+16)]=A((((idy*32)+(( - 1)*(3-2)))+16), ((idx+(( - 1)*0))+16));
}
__syncthreads();
#pragma unroll
for (it_1=0; it_1<3; it_1=(it_1+1))
{
float a;
a=shared_0[((tidx+(( - 1)*it_1))+16)];
temp_0[t_0]=a;
temp_1[t_1]=a;
t_0=(t_0+1);
t_1=(t_1+1);
}
__syncthreads();
if ((tidx<16))
{
{
shared_0[(tidx+0)]=A((((idy*32)+(( - 1)*(3-3)))+16), (idx+(( - 1)*0)));
}
}
{
shared_0[(tidx+16)]=A((((idy*32)+(( - 1)*(3-3)))+16), ((idx+(( - 1)*0))+16));
}
__syncthreads();
#pragma unroll
for (it_1=0; it_1<3; it_1=(it_1+1))
{
float a;
a=shared_0[((tidx+(( - 1)*it_1))+16)];
temp_0[t_0]=a;
temp_1[t_1]=a;
temp_2[t_2]=a;
t_0=(t_0+1);
t_1=(t_1+1);
t_2=(t_2+1);
}
{
C(((idy*32)+0), idx)=cal(temp_0);
}
__syncthreads();
if ((tidx<16))
{
{
shared_0[(tidx+0)]=A((((idy*32)+(( - 1)*(0-1)))+16), (idx+(( - 1)*0)));
}
}
{
shared_0[(tidx+16)]=A((((idy*32)+(( - 1)*(0-1)))+16), ((idx+(( - 1)*0))+16));
}
__syncthreads();
#pragma unroll
for (it_1=0; it_1<3; it_1=(it_1+1))
{
float a;
a=shared_0[((tidx+(( - 1)*it_1))+16)];
temp_1[t_1]=a;
temp_2[t_2]=a;
temp_3[t_3]=a;
t_1=(t_1+1);
t_2=(t_2+1);
t_3=(t_3+1);
}
{
C(((idy*32)+1), idx)=cal(temp_1);
}
__syncthreads();
if ((tidx<16))
{
{
shared_0[(tidx+0)]=A((((idy*32)+(( - 1)*(0-2)))+16), (idx+(( - 1)*0)));
}
}
{
shared_0[(tidx+16)]=A((((idy*32)+(( - 1)*(0-2)))+16), ((idx+(( - 1)*0))+16));
}
__syncthreads();
#pragma unroll
for (it_1=0; it_1<3; it_1=(it_1+1))
{
float a;
a=shared_0[((tidx+(( - 1)*it_1))+16)];
temp_2[t_2]=a;
temp_3[t_3]=a;
temp_4[t_4]=a;
t_2=(t_2+1);
t_3=(t_3+1);
t_4=(t_4+1);
}
{
C(((idy*32)+2), idx)=cal(temp_2);
}
__syncthreads();
if ((tidx<16))
{
{
shared_0[(tidx+0)]=A((((idy*32)+(( - 1)*(0-3)))+16), (idx+(( - 1)*0)));
}
}
{
shared_0[(tidx+16)]=A((((idy*32)+(( - 1)*(0-3)))+16), ((idx+(( - 1)*0))+16));
}
__syncthreads();
#pragma unroll
for (it_1=0; it_1<3; it_1=(it_1+1))
{
float a;
a=shared_0[((tidx+(( - 1)*it_1))+16)];
temp_3[t_3]=a;
temp_4[t_4]=a;
temp_5[t_5]=a;
t_3=(t_3+1);
t_4=(t_4+1);
t_5=(t_5+1);
}
{
C(((idy*32)+3), idx)=cal(temp_3);
}
__syncthreads();
if ((tidx<16))
{
{
shared_0[(tidx+0)]=A((((idy*32)+(( - 1)*(0-4)))+16), (idx+(( - 1)*0)));
}
}
{
shared_0[(tidx+16)]=A((((idy*32)+(( - 1)*(0-4)))+16), ((idx+(( - 1)*0))+16));
}
__syncthreads();
#pragma unroll
for (it_1=0; it_1<3; it_1=(it_1+1))
{
float a;
a=shared_0[((tidx+(( - 1)*it_1))+16)];
temp_4[t_4]=a;
temp_5[t_5]=a;
temp_6[t_6]=a;
t_4=(t_4+1);
t_5=(t_5+1);
t_6=(t_6+1);
}
{
C(((idy*32)+4), idx)=cal(temp_4);
}
__syncthreads();
if ((tidx<16))
{
{
shared_0[(tidx+0)]=A((((idy*32)+(( - 1)*(0-5)))+16), (idx+(( - 1)*0)));
}
}
{
shared_0[(tidx+16)]=A((((idy*32)+(( - 1)*(0-5)))+16), ((idx+(( - 1)*0))+16));
}
__syncthreads();
#pragma unroll
for (it_1=0; it_1<3; it_1=(it_1+1))
{
float a;
a=shared_0[((tidx+(( - 1)*it_1))+16)];
temp_5[t_5]=a;
temp_6[t_6]=a;
temp_7[t_7]=a;
t_5=(t_5+1);
t_6=(t_6+1);
t_7=(t_7+1);
}
{
C(((idy*32)+5), idx)=cal(temp_5);
}
__syncthreads();
if ((tidx<16))
{
{
shared_0[(tidx+0)]=A((((idy*32)+(( - 1)*(0-6)))+16), (idx+(( - 1)*0)));
}
}
{
shared_0[(tidx+16)]=A((((idy*32)+(( - 1)*(0-6)))+16), ((idx+(( - 1)*0))+16));
}
__syncthreads();
#pragma unroll
for (it_1=0; it_1<3; it_1=(it_1+1))
{
float a;
a=shared_0[((tidx+(( - 1)*it_1))+16)];
temp_6[t_6]=a;
temp_7[t_7]=a;
temp_8[t_8]=a;
t_6=(t_6+1);
t_7=(t_7+1);
t_8=(t_8+1);
}
{
C(((idy*32)+6), idx)=cal(temp_6);
}
__syncthreads();
if ((tidx<16))
{
{
shared_0[(tidx+0)]=A((((idy*32)+(( - 1)*(0-7)))+16), (idx+(( - 1)*0)));
}
}
{
shared_0[(tidx+16)]=A((((idy*32)+(( - 1)*(0-7)))+16), ((idx+(( - 1)*0))+16));
}
__syncthreads();
#pragma unroll
for (it_1=0; it_1<3; it_1=(it_1+1))
{
float a;
a=shared_0[((tidx+(( - 1)*it_1))+16)];
temp_7[t_7]=a;
temp_8[t_8]=a;
temp_9[t_9]=a;
t_7=(t_7+1);
t_8=(t_8+1);
t_9=(t_9+1);
}
{
C(((idy*32)+7), idx)=cal(temp_7);
}
__syncthreads();
if ((tidx<16))
{
{
shared_0[(tidx+0)]=A((((idy*32)+(( - 1)*(0-8)))+16), (idx+(( - 1)*0)));
}
}
{
shared_0[(tidx+16)]=A((((idy*32)+(( - 1)*(0-8)))+16), ((idx+(( - 1)*0))+16));
}
__syncthreads();
#pragma unroll
for (it_1=0; it_1<3; it_1=(it_1+1))
{
float a;
a=shared_0[((tidx+(( - 1)*it_1))+16)];
temp_8[t_8]=a;
temp_9[t_9]=a;
temp_10[t_10]=a;
t_8=(t_8+1);
t_9=(t_9+1);
t_10=(t_10+1);
}
{
C(((idy*32)+8), idx)=cal(temp_8);
}
__syncthreads();
if ((tidx<16))
{
{
shared_0[(tidx+0)]=A((((idy*32)+(( - 1)*(0-9)))+16), (idx+(( - 1)*0)));
}
}
{
shared_0[(tidx+16)]=A((((idy*32)+(( - 1)*(0-9)))+16), ((idx+(( - 1)*0))+16));
}
__syncthreads();
#pragma unroll
for (it_1=0; it_1<3; it_1=(it_1+1))
{
float a;
a=shared_0[((tidx+(( - 1)*it_1))+16)];
temp_9[t_9]=a;
temp_10[t_10]=a;
temp_11[t_11]=a;
t_9=(t_9+1);
t_10=(t_10+1);
t_11=(t_11+1);
}
{
C(((idy*32)+9), idx)=cal(temp_9);
}
__syncthreads();
if ((tidx<16))
{
{
shared_0[(tidx+0)]=A((((idy*32)+(( - 1)*(0-10)))+16), (idx+(( - 1)*0)));
}
}
{
shared_0[(tidx+16)]=A((((idy*32)+(( - 1)*(0-10)))+16), ((idx+(( - 1)*0))+16));
}
__syncthreads();
#pragma unroll
for (it_1=0; it_1<3; it_1=(it_1+1))
{
float a;
a=shared_0[((tidx+(( - 1)*it_1))+16)];
temp_10[t_10]=a;
temp_11[t_11]=a;
temp_12[t_12]=a;
t_10=(t_10+1);
t_11=(t_11+1);
t_12=(t_12+1);
}
{
C(((idy*32)+10), idx)=cal(temp_10);
}
__syncthreads();
if ((tidx<16))
{
{
shared_0[(tidx+0)]=A((((idy*32)+(( - 1)*(0-11)))+16), (idx+(( - 1)*0)));
}
}
{
shared_0[(tidx+16)]=A((((idy*32)+(( - 1)*(0-11)))+16), ((idx+(( - 1)*0))+16));
}
__syncthreads();
#pragma unroll
for (it_1=0; it_1<3; it_1=(it_1+1))
{
float a;
a=shared_0[((tidx+(( - 1)*it_1))+16)];
temp_11[t_11]=a;
temp_12[t_12]=a;
temp_13[t_13]=a;
t_11=(t_11+1);
t_12=(t_12+1);
t_13=(t_13+1);
}
{
C(((idy*32)+11), idx)=cal(temp_11);
}
__syncthreads();
if ((tidx<16))
{
{
shared_0[(tidx+0)]=A((((idy*32)+(( - 1)*(0-12)))+16), (idx+(( - 1)*0)));
}
}
{
shared_0[(tidx+16)]=A((((idy*32)+(( - 1)*(0-12)))+16), ((idx+(( - 1)*0))+16));
}
__syncthreads();
#pragma unroll
for (it_1=0; it_1<3; it_1=(it_1+1))
{
float a;
a=shared_0[((tidx+(( - 1)*it_1))+16)];
temp_12[t_12]=a;
temp_13[t_13]=a;
temp_14[t_14]=a;
t_12=(t_12+1);
t_13=(t_13+1);
t_14=(t_14+1);
}
{
C(((idy*32)+12), idx)=cal(temp_12);
}
__syncthreads();
if ((tidx<16))
{
{
shared_0[(tidx+0)]=A((((idy*32)+(( - 1)*(0-13)))+16), (idx+(( - 1)*0)));
}
}
{
shared_0[(tidx+16)]=A((((idy*32)+(( - 1)*(0-13)))+16), ((idx+(( - 1)*0))+16));
}
__syncthreads();
#pragma unroll
for (it_1=0; it_1<3; it_1=(it_1+1))
{
float a;
a=shared_0[((tidx+(( - 1)*it_1))+16)];
temp_13[t_13]=a;
temp_14[t_14]=a;
temp_15[t_15]=a;
t_13=(t_13+1);
t_14=(t_14+1);
t_15=(t_15+1);
}
{
C(((idy*32)+13), idx)=cal(temp_13);
}
__syncthreads();
if ((tidx<16))
{
{
shared_0[(tidx+0)]=A((((idy*32)+(( - 1)*(0-14)))+16), (idx+(( - 1)*0)));
}
}
{
shared_0[(tidx+16)]=A((((idy*32)+(( - 1)*(0-14)))+16), ((idx+(( - 1)*0))+16));
}
__syncthreads();
#pragma unroll
for (it_1=0; it_1<3; it_1=(it_1+1))
{
float a;
a=shared_0[((tidx+(( - 1)*it_1))+16)];
temp_14[t_14]=a;
temp_15[t_15]=a;
temp_16[t_16]=a;
t_14=(t_14+1);
t_15=(t_15+1);
t_16=(t_16+1);
}
{
C(((idy*32)+14), idx)=cal(temp_14);
}
__syncthreads();
if ((tidx<16))
{
{
shared_0[(tidx+0)]=A((((idy*32)+(( - 1)*(0-15)))+16), (idx+(( - 1)*0)));
}
}
{
shared_0[(tidx+16)]=A((((idy*32)+(( - 1)*(0-15)))+16), ((idx+(( - 1)*0))+16));
}
__syncthreads();
#pragma unroll
for (it_1=0; it_1<3; it_1=(it_1+1))
{
float a;
a=shared_0[((tidx+(( - 1)*it_1))+16)];
temp_15[t_15]=a;
temp_16[t_16]=a;
temp_17[t_17]=a;
t_15=(t_15+1);
t_16=(t_16+1);
t_17=(t_17+1);
}
{
C(((idy*32)+15), idx)=cal(temp_15);
}
__syncthreads();
if ((tidx<16))
{
{
shared_0[(tidx+0)]=A((((idy*32)+(( - 1)*(0-16)))+16), (idx+(( - 1)*0)));
}
}
{
shared_0[(tidx+16)]=A((((idy*32)+(( - 1)*(0-16)))+16), ((idx+(( - 1)*0))+16));
}
__syncthreads();
#pragma unroll
for (it_1=0; it_1<3; it_1=(it_1+1))
{
float a;
a=shared_0[((tidx+(( - 1)*it_1))+16)];
temp_16[t_16]=a;
temp_17[t_17]=a;
temp_18[t_18]=a;
t_16=(t_16+1);
t_17=(t_17+1);
t_18=(t_18+1);
}
{
C(((idy*32)+16), idx)=cal(temp_16);
}
__syncthreads();
if ((tidx<16))
{
{
shared_0[(tidx+0)]=A((((idy*32)+(( - 1)*(0-17)))+16), (idx+(( - 1)*0)));
}
}
{
shared_0[(tidx+16)]=A((((idy*32)+(( - 1)*(0-17)))+16), ((idx+(( - 1)*0))+16));
}
__syncthreads();
#pragma unroll
for (it_1=0; it_1<3; it_1=(it_1+1))
{
float a;
a=shared_0[((tidx+(( - 1)*it_1))+16)];
temp_17[t_17]=a;
temp_18[t_18]=a;
temp_19[t_19]=a;
t_17=(t_17+1);
t_18=(t_18+1);
t_19=(t_19+1);
}
{
C(((idy*32)+17), idx)=cal(temp_17);
}
__syncthreads();
if ((tidx<16))
{
{
shared_0[(tidx+0)]=A((((idy*32)+(( - 1)*(0-18)))+16), (idx+(( - 1)*0)));
}
}
{
shared_0[(tidx+16)]=A((((idy*32)+(( - 1)*(0-18)))+16), ((idx+(( - 1)*0))+16));
}
__syncthreads();
#pragma unroll
for (it_1=0; it_1<3; it_1=(it_1+1))
{
float a;
a=shared_0[((tidx+(( - 1)*it_1))+16)];
temp_18[t_18]=a;
temp_19[t_19]=a;
temp_20[t_20]=a;
t_18=(t_18+1);
t_19=(t_19+1);
t_20=(t_20+1);
}
{
C(((idy*32)+18), idx)=cal(temp_18);
}
__syncthreads();
if ((tidx<16))
{
{
shared_0[(tidx+0)]=A((((idy*32)+(( - 1)*(0-19)))+16), (idx+(( - 1)*0)));
}
}
{
shared_0[(tidx+16)]=A((((idy*32)+(( - 1)*(0-19)))+16), ((idx+(( - 1)*0))+16));
}
__syncthreads();
#pragma unroll
for (it_1=0; it_1<3; it_1=(it_1+1))
{
float a;
a=shared_0[((tidx+(( - 1)*it_1))+16)];
temp_19[t_19]=a;
temp_20[t_20]=a;
temp_21[t_21]=a;
t_19=(t_19+1);
t_20=(t_20+1);
t_21=(t_21+1);
}
{
C(((idy*32)+19), idx)=cal(temp_19);
}
__syncthreads();
if ((tidx<16))
{
{
shared_0[(tidx+0)]=A((((idy*32)+(( - 1)*(0-20)))+16), (idx+(( - 1)*0)));
}
}
{
shared_0[(tidx+16)]=A((((idy*32)+(( - 1)*(0-20)))+16), ((idx+(( - 1)*0))+16));
}
__syncthreads();
#pragma unroll
for (it_1=0; it_1<3; it_1=(it_1+1))
{
float a;
a=shared_0[((tidx+(( - 1)*it_1))+16)];
temp_20[t_20]=a;
temp_21[t_21]=a;
temp_22[t_22]=a;
t_20=(t_20+1);
t_21=(t_21+1);
t_22=(t_22+1);
}
{
C(((idy*32)+20), idx)=cal(temp_20);
}
__syncthreads();
if ((tidx<16))
{
{
shared_0[(tidx+0)]=A((((idy*32)+(( - 1)*(0-21)))+16), (idx+(( - 1)*0)));
}
}
{
shared_0[(tidx+16)]=A((((idy*32)+(( - 1)*(0-21)))+16), ((idx+(( - 1)*0))+16));
}
__syncthreads();
#pragma unroll
for (it_1=0; it_1<3; it_1=(it_1+1))
{
float a;
a=shared_0[((tidx+(( - 1)*it_1))+16)];
temp_21[t_21]=a;
temp_22[t_22]=a;
temp_23[t_23]=a;
t_21=(t_21+1);
t_22=(t_22+1);
t_23=(t_23+1);
}
{
C(((idy*32)+21), idx)=cal(temp_21);
}
__syncthreads();
if ((tidx<16))
{
{
shared_0[(tidx+0)]=A((((idy*32)+(( - 1)*(0-22)))+16), (idx+(( - 1)*0)));
}
}
{
shared_0[(tidx+16)]=A((((idy*32)+(( - 1)*(0-22)))+16), ((idx+(( - 1)*0))+16));
}
__syncthreads();
#pragma unroll
for (it_1=0; it_1<3; it_1=(it_1+1))
{
float a;
a=shared_0[((tidx+(( - 1)*it_1))+16)];
temp_22[t_22]=a;
temp_23[t_23]=a;
temp_24[t_24]=a;
t_22=(t_22+1);
t_23=(t_23+1);
t_24=(t_24+1);
}
{
C(((idy*32)+22), idx)=cal(temp_22);
}
__syncthreads();
if ((tidx<16))
{
{
shared_0[(tidx+0)]=A((((idy*32)+(( - 1)*(0-23)))+16), (idx+(( - 1)*0)));
}
}
{
shared_0[(tidx+16)]=A((((idy*32)+(( - 1)*(0-23)))+16), ((idx+(( - 1)*0))+16));
}
__syncthreads();
#pragma unroll
for (it_1=0; it_1<3; it_1=(it_1+1))
{
float a;
a=shared_0[((tidx+(( - 1)*it_1))+16)];
temp_23[t_23]=a;
temp_24[t_24]=a;
temp_25[t_25]=a;
t_23=(t_23+1);
t_24=(t_24+1);
t_25=(t_25+1);
}
{
C(((idy*32)+23), idx)=cal(temp_23);
}
__syncthreads();
if ((tidx<16))
{
{
shared_0[(tidx+0)]=A((((idy*32)+(( - 1)*(0-24)))+16), (idx+(( - 1)*0)));
}
}
{
shared_0[(tidx+16)]=A((((idy*32)+(( - 1)*(0-24)))+16), ((idx+(( - 1)*0))+16));
}
__syncthreads();
#pragma unroll
for (it_1=0; it_1<3; it_1=(it_1+1))
{
float a;
a=shared_0[((tidx+(( - 1)*it_1))+16)];
temp_24[t_24]=a;
temp_25[t_25]=a;
temp_26[t_26]=a;
t_24=(t_24+1);
t_25=(t_25+1);
t_26=(t_26+1);
}
{
C(((idy*32)+24), idx)=cal(temp_24);
}
__syncthreads();
if ((tidx<16))
{
{
shared_0[(tidx+0)]=A((((idy*32)+(( - 1)*(0-25)))+16), (idx+(( - 1)*0)));
}
}
{
shared_0[(tidx+16)]=A((((idy*32)+(( - 1)*(0-25)))+16), ((idx+(( - 1)*0))+16));
}
__syncthreads();
#pragma unroll
for (it_1=0; it_1<3; it_1=(it_1+1))
{
float a;
a=shared_0[((tidx+(( - 1)*it_1))+16)];
temp_25[t_25]=a;
temp_26[t_26]=a;
temp_27[t_27]=a;
t_25=(t_25+1);
t_26=(t_26+1);
t_27=(t_27+1);
}
{
C(((idy*32)+25), idx)=cal(temp_25);
}
__syncthreads();
if ((tidx<16))
{
{
shared_0[(tidx+0)]=A((((idy*32)+(( - 1)*(0-26)))+16), (idx+(( - 1)*0)));
}
}
{
shared_0[(tidx+16)]=A((((idy*32)+(( - 1)*(0-26)))+16), ((idx+(( - 1)*0))+16));
}
__syncthreads();
#pragma unroll
for (it_1=0; it_1<3; it_1=(it_1+1))
{
float a;
a=shared_0[((tidx+(( - 1)*it_1))+16)];
temp_26[t_26]=a;
temp_27[t_27]=a;
temp_28[t_28]=a;
t_26=(t_26+1);
t_27=(t_27+1);
t_28=(t_28+1);
}
{
C(((idy*32)+26), idx)=cal(temp_26);
}
__syncthreads();
if ((tidx<16))
{
{
shared_0[(tidx+0)]=A((((idy*32)+(( - 1)*(0-27)))+16), (idx+(( - 1)*0)));
}
}
{
shared_0[(tidx+16)]=A((((idy*32)+(( - 1)*(0-27)))+16), ((idx+(( - 1)*0))+16));
}
__syncthreads();
#pragma unroll
for (it_1=0; it_1<3; it_1=(it_1+1))
{
float a;
a=shared_0[((tidx+(( - 1)*it_1))+16)];
temp_27[t_27]=a;
temp_28[t_28]=a;
temp_29[t_29]=a;
t_27=(t_27+1);
t_28=(t_28+1);
t_29=(t_29+1);
}
{
C(((idy*32)+27), idx)=cal(temp_27);
}
__syncthreads();
if ((tidx<16))
{
{
shared_0[(tidx+0)]=A((((idy*32)+(( - 1)*(0-28)))+16), (idx+(( - 1)*0)));
}
}
{
shared_0[(tidx+16)]=A((((idy*32)+(( - 1)*(0-28)))+16), ((idx+(( - 1)*0))+16));
}
__syncthreads();
#pragma unroll
for (it_1=0; it_1<3; it_1=(it_1+1))
{
float a;
a=shared_0[((tidx+(( - 1)*it_1))+16)];
temp_28[t_28]=a;
temp_29[t_29]=a;
temp_30[t_30]=a;
t_28=(t_28+1);
t_29=(t_29+1);
t_30=(t_30+1);
}
{
C(((idy*32)+28), idx)=cal(temp_28);
}
__syncthreads();
if ((tidx<16))
{
{
shared_0[(tidx+0)]=A((((idy*32)+(( - 1)*(0-29)))+16), (idx+(( - 1)*0)));
}
}
{
shared_0[(tidx+16)]=A((((idy*32)+(( - 1)*(0-29)))+16), ((idx+(( - 1)*0))+16));
}
__syncthreads();
#pragma unroll
for (it_1=0; it_1<3; it_1=(it_1+1))
{
float a;
a=shared_0[((tidx+(( - 1)*it_1))+16)];
temp_29[t_29]=a;
temp_30[t_30]=a;
temp_31[t_31]=a;
t_29=(t_29+1);
t_30=(t_30+1);
t_31=(t_31+1);
}
{
C(((idy*32)+29), idx)=cal(temp_29);
}
__syncthreads();
if ((tidx<16))
{
{
shared_0[(tidx+0)]=A((((idy*32)+(( - 1)*(0-30)))+16), (idx+(( - 1)*0)));
}
}
{
shared_0[(tidx+16)]=A((((idy*32)+(( - 1)*(0-30)))+16), ((idx+(( - 1)*0))+16));
}
__syncthreads();
#pragma unroll
for (it_1=0; it_1<3; it_1=(it_1+1))
{
float a;
a=shared_0[((tidx+(( - 1)*it_1))+16)];
temp_30[t_30]=a;
temp_31[t_31]=a;
t_30=(t_30+1);
t_31=(t_31+1);
}
{
C(((idy*32)+30), idx)=cal(temp_30);
}
__syncthreads();
if ((tidx<16))
{
{
shared_0[(tidx+0)]=A((((idy*32)+(( - 1)*(0-31)))+16), (idx+(( - 1)*0)));
}
}
{
shared_0[(tidx+16)]=A((((idy*32)+(( - 1)*(0-31)))+16), ((idx+(( - 1)*0))+16));
}
__syncthreads();
#pragma unroll
for (it_1=0; it_1<3; it_1=(it_1+1))
{
float a;
a=shared_0[((tidx+(( - 1)*it_1))+16)];
temp_31[t_31]=a;
t_31=(t_31+1);
}
{
C(((idy*32)+31), idx)=cal(temp_31);
}
__syncthreads();
}
| 452c06c764a0c4597ad7f7545c5c4afe9ba84c8b.cu | #define COALESCED_NUM 16
#define blockDimX 256
#define blockDimY 1
#define gridDimX (gridDim.x)
#define gridDimY (gridDim.y)
#define idx (blockIdx.x*blockDimX+threadIdx.x)
#define idy (blockIdx.y*blockDimY+threadIdx.y)
#define bidy (blockIdx.y)
#define bidx (blockIdx.x)
#define tidx (threadIdx.x)
#define tidy (threadIdx.y)
#define merger_y 32
#define coalesced_idy (bidy/(COALESCED_NUM/(merger_y*blockDimY))*COALESCED_NUM)
#define C(y,x) C[(y)*WIDTH_C+(x)]
#define A(y,x) A[(y)*WIDTH_A+(x)]
__global__ void imregionmax(float * A, float * C, int width)
{
__shared__ float shared_0[272];
float temp_0[9];
float temp_1[9];
float temp_2[9];
float temp_3[9];
float temp_4[9];
float temp_5[9];
float temp_6[9];
float temp_7[9];
float temp_8[9];
float temp_9[9];
float temp_10[9];
float temp_11[9];
float temp_12[9];
float temp_13[9];
float temp_14[9];
float temp_15[9];
float temp_16[9];
float temp_17[9];
float temp_18[9];
float temp_19[9];
float temp_20[9];
float temp_21[9];
float temp_22[9];
float temp_23[9];
float temp_24[9];
float temp_25[9];
float temp_26[9];
float temp_27[9];
float temp_28[9];
float temp_29[9];
float temp_30[9];
float temp_31[9];
int t_0;
int t_1;
int t_2;
int t_3;
int t_4;
int t_5;
int t_6;
int t_7;
int t_8;
int t_9;
int t_10;
int t_11;
int t_12;
int t_13;
int t_14;
int t_15;
int t_16;
int t_17;
int t_18;
int t_19;
int t_20;
int t_21;
int t_22;
int t_23;
int t_24;
int t_25;
int t_26;
int t_27;
int t_28;
int t_29;
int t_30;
int t_31;
int it_1;
t_0=0;
t_1=0;
t_2=0;
t_3=0;
t_4=0;
t_5=0;
t_6=0;
t_7=0;
t_8=0;
t_9=0;
t_10=0;
t_11=0;
t_12=0;
t_13=0;
t_14=0;
t_15=0;
t_16=0;
t_17=0;
t_18=0;
t_19=0;
t_20=0;
t_21=0;
t_22=0;
t_23=0;
t_24=0;
t_25=0;
t_26=0;
t_27=0;
t_28=0;
t_29=0;
t_30=0;
t_31=0;
if ((tidx<16))
{
{
shared_0[(tidx+0)]=A((((idy*32)+(( - 1)*(3-1)))+16), (idx+(( - 1)*0)));
}
}
{
shared_0[(tidx+16)]=A((((idy*32)+(( - 1)*(3-1)))+16), ((idx+(( - 1)*0))+16));
}
__syncthreads();
#pragma unroll
for (it_1=0; it_1<3; it_1=(it_1+1))
{
float a;
a=shared_0[((tidx+(( - 1)*it_1))+16)];
temp_0[t_0]=a;
t_0=(t_0+1);
}
__syncthreads();
if ((tidx<16))
{
{
shared_0[(tidx+0)]=A((((idy*32)+(( - 1)*(3-2)))+16), (idx+(( - 1)*0)));
}
}
{
shared_0[(tidx+16)]=A((((idy*32)+(( - 1)*(3-2)))+16), ((idx+(( - 1)*0))+16));
}
__syncthreads();
#pragma unroll
for (it_1=0; it_1<3; it_1=(it_1+1))
{
float a;
a=shared_0[((tidx+(( - 1)*it_1))+16)];
temp_0[t_0]=a;
temp_1[t_1]=a;
t_0=(t_0+1);
t_1=(t_1+1);
}
__syncthreads();
if ((tidx<16))
{
{
shared_0[(tidx+0)]=A((((idy*32)+(( - 1)*(3-3)))+16), (idx+(( - 1)*0)));
}
}
{
shared_0[(tidx+16)]=A((((idy*32)+(( - 1)*(3-3)))+16), ((idx+(( - 1)*0))+16));
}
__syncthreads();
#pragma unroll
for (it_1=0; it_1<3; it_1=(it_1+1))
{
float a;
a=shared_0[((tidx+(( - 1)*it_1))+16)];
temp_0[t_0]=a;
temp_1[t_1]=a;
temp_2[t_2]=a;
t_0=(t_0+1);
t_1=(t_1+1);
t_2=(t_2+1);
}
{
C(((idy*32)+0), idx)=cal(temp_0);
}
__syncthreads();
if ((tidx<16))
{
{
shared_0[(tidx+0)]=A((((idy*32)+(( - 1)*(0-1)))+16), (idx+(( - 1)*0)));
}
}
{
shared_0[(tidx+16)]=A((((idy*32)+(( - 1)*(0-1)))+16), ((idx+(( - 1)*0))+16));
}
__syncthreads();
#pragma unroll
for (it_1=0; it_1<3; it_1=(it_1+1))
{
float a;
a=shared_0[((tidx+(( - 1)*it_1))+16)];
temp_1[t_1]=a;
temp_2[t_2]=a;
temp_3[t_3]=a;
t_1=(t_1+1);
t_2=(t_2+1);
t_3=(t_3+1);
}
{
C(((idy*32)+1), idx)=cal(temp_1);
}
__syncthreads();
if ((tidx<16))
{
{
shared_0[(tidx+0)]=A((((idy*32)+(( - 1)*(0-2)))+16), (idx+(( - 1)*0)));
}
}
{
shared_0[(tidx+16)]=A((((idy*32)+(( - 1)*(0-2)))+16), ((idx+(( - 1)*0))+16));
}
__syncthreads();
#pragma unroll
for (it_1=0; it_1<3; it_1=(it_1+1))
{
float a;
a=shared_0[((tidx+(( - 1)*it_1))+16)];
temp_2[t_2]=a;
temp_3[t_3]=a;
temp_4[t_4]=a;
t_2=(t_2+1);
t_3=(t_3+1);
t_4=(t_4+1);
}
{
C(((idy*32)+2), idx)=cal(temp_2);
}
__syncthreads();
if ((tidx<16))
{
{
shared_0[(tidx+0)]=A((((idy*32)+(( - 1)*(0-3)))+16), (idx+(( - 1)*0)));
}
}
{
shared_0[(tidx+16)]=A((((idy*32)+(( - 1)*(0-3)))+16), ((idx+(( - 1)*0))+16));
}
__syncthreads();
#pragma unroll
for (it_1=0; it_1<3; it_1=(it_1+1))
{
float a;
a=shared_0[((tidx+(( - 1)*it_1))+16)];
temp_3[t_3]=a;
temp_4[t_4]=a;
temp_5[t_5]=a;
t_3=(t_3+1);
t_4=(t_4+1);
t_5=(t_5+1);
}
{
C(((idy*32)+3), idx)=cal(temp_3);
}
__syncthreads();
if ((tidx<16))
{
{
shared_0[(tidx+0)]=A((((idy*32)+(( - 1)*(0-4)))+16), (idx+(( - 1)*0)));
}
}
{
shared_0[(tidx+16)]=A((((idy*32)+(( - 1)*(0-4)))+16), ((idx+(( - 1)*0))+16));
}
__syncthreads();
#pragma unroll
for (it_1=0; it_1<3; it_1=(it_1+1))
{
float a;
a=shared_0[((tidx+(( - 1)*it_1))+16)];
temp_4[t_4]=a;
temp_5[t_5]=a;
temp_6[t_6]=a;
t_4=(t_4+1);
t_5=(t_5+1);
t_6=(t_6+1);
}
{
C(((idy*32)+4), idx)=cal(temp_4);
}
__syncthreads();
if ((tidx<16))
{
{
shared_0[(tidx+0)]=A((((idy*32)+(( - 1)*(0-5)))+16), (idx+(( - 1)*0)));
}
}
{
shared_0[(tidx+16)]=A((((idy*32)+(( - 1)*(0-5)))+16), ((idx+(( - 1)*0))+16));
}
__syncthreads();
#pragma unroll
for (it_1=0; it_1<3; it_1=(it_1+1))
{
float a;
a=shared_0[((tidx+(( - 1)*it_1))+16)];
temp_5[t_5]=a;
temp_6[t_6]=a;
temp_7[t_7]=a;
t_5=(t_5+1);
t_6=(t_6+1);
t_7=(t_7+1);
}
{
C(((idy*32)+5), idx)=cal(temp_5);
}
__syncthreads();
if ((tidx<16))
{
{
shared_0[(tidx+0)]=A((((idy*32)+(( - 1)*(0-6)))+16), (idx+(( - 1)*0)));
}
}
{
shared_0[(tidx+16)]=A((((idy*32)+(( - 1)*(0-6)))+16), ((idx+(( - 1)*0))+16));
}
__syncthreads();
#pragma unroll
for (it_1=0; it_1<3; it_1=(it_1+1))
{
float a;
a=shared_0[((tidx+(( - 1)*it_1))+16)];
temp_6[t_6]=a;
temp_7[t_7]=a;
temp_8[t_8]=a;
t_6=(t_6+1);
t_7=(t_7+1);
t_8=(t_8+1);
}
{
C(((idy*32)+6), idx)=cal(temp_6);
}
__syncthreads();
if ((tidx<16))
{
{
shared_0[(tidx+0)]=A((((idy*32)+(( - 1)*(0-7)))+16), (idx+(( - 1)*0)));
}
}
{
shared_0[(tidx+16)]=A((((idy*32)+(( - 1)*(0-7)))+16), ((idx+(( - 1)*0))+16));
}
__syncthreads();
#pragma unroll
for (it_1=0; it_1<3; it_1=(it_1+1))
{
float a;
a=shared_0[((tidx+(( - 1)*it_1))+16)];
temp_7[t_7]=a;
temp_8[t_8]=a;
temp_9[t_9]=a;
t_7=(t_7+1);
t_8=(t_8+1);
t_9=(t_9+1);
}
{
C(((idy*32)+7), idx)=cal(temp_7);
}
__syncthreads();
if ((tidx<16))
{
{
shared_0[(tidx+0)]=A((((idy*32)+(( - 1)*(0-8)))+16), (idx+(( - 1)*0)));
}
}
{
shared_0[(tidx+16)]=A((((idy*32)+(( - 1)*(0-8)))+16), ((idx+(( - 1)*0))+16));
}
__syncthreads();
#pragma unroll
for (it_1=0; it_1<3; it_1=(it_1+1))
{
float a;
a=shared_0[((tidx+(( - 1)*it_1))+16)];
temp_8[t_8]=a;
temp_9[t_9]=a;
temp_10[t_10]=a;
t_8=(t_8+1);
t_9=(t_9+1);
t_10=(t_10+1);
}
{
C(((idy*32)+8), idx)=cal(temp_8);
}
__syncthreads();
if ((tidx<16))
{
{
shared_0[(tidx+0)]=A((((idy*32)+(( - 1)*(0-9)))+16), (idx+(( - 1)*0)));
}
}
{
shared_0[(tidx+16)]=A((((idy*32)+(( - 1)*(0-9)))+16), ((idx+(( - 1)*0))+16));
}
__syncthreads();
#pragma unroll
for (it_1=0; it_1<3; it_1=(it_1+1))
{
float a;
a=shared_0[((tidx+(( - 1)*it_1))+16)];
temp_9[t_9]=a;
temp_10[t_10]=a;
temp_11[t_11]=a;
t_9=(t_9+1);
t_10=(t_10+1);
t_11=(t_11+1);
}
{
C(((idy*32)+9), idx)=cal(temp_9);
}
__syncthreads();
if ((tidx<16))
{
{
shared_0[(tidx+0)]=A((((idy*32)+(( - 1)*(0-10)))+16), (idx+(( - 1)*0)));
}
}
{
shared_0[(tidx+16)]=A((((idy*32)+(( - 1)*(0-10)))+16), ((idx+(( - 1)*0))+16));
}
__syncthreads();
#pragma unroll
for (it_1=0; it_1<3; it_1=(it_1+1))
{
float a;
a=shared_0[((tidx+(( - 1)*it_1))+16)];
temp_10[t_10]=a;
temp_11[t_11]=a;
temp_12[t_12]=a;
t_10=(t_10+1);
t_11=(t_11+1);
t_12=(t_12+1);
}
{
C(((idy*32)+10), idx)=cal(temp_10);
}
__syncthreads();
if ((tidx<16))
{
{
shared_0[(tidx+0)]=A((((idy*32)+(( - 1)*(0-11)))+16), (idx+(( - 1)*0)));
}
}
{
shared_0[(tidx+16)]=A((((idy*32)+(( - 1)*(0-11)))+16), ((idx+(( - 1)*0))+16));
}
__syncthreads();
#pragma unroll
for (it_1=0; it_1<3; it_1=(it_1+1))
{
float a;
a=shared_0[((tidx+(( - 1)*it_1))+16)];
temp_11[t_11]=a;
temp_12[t_12]=a;
temp_13[t_13]=a;
t_11=(t_11+1);
t_12=(t_12+1);
t_13=(t_13+1);
}
{
C(((idy*32)+11), idx)=cal(temp_11);
}
__syncthreads();
if ((tidx<16))
{
{
shared_0[(tidx+0)]=A((((idy*32)+(( - 1)*(0-12)))+16), (idx+(( - 1)*0)));
}
}
{
shared_0[(tidx+16)]=A((((idy*32)+(( - 1)*(0-12)))+16), ((idx+(( - 1)*0))+16));
}
__syncthreads();
#pragma unroll
for (it_1=0; it_1<3; it_1=(it_1+1))
{
float a;
a=shared_0[((tidx+(( - 1)*it_1))+16)];
temp_12[t_12]=a;
temp_13[t_13]=a;
temp_14[t_14]=a;
t_12=(t_12+1);
t_13=(t_13+1);
t_14=(t_14+1);
}
{
C(((idy*32)+12), idx)=cal(temp_12);
}
__syncthreads();
if ((tidx<16))
{
{
shared_0[(tidx+0)]=A((((idy*32)+(( - 1)*(0-13)))+16), (idx+(( - 1)*0)));
}
}
{
shared_0[(tidx+16)]=A((((idy*32)+(( - 1)*(0-13)))+16), ((idx+(( - 1)*0))+16));
}
__syncthreads();
#pragma unroll
for (it_1=0; it_1<3; it_1=(it_1+1))
{
float a;
a=shared_0[((tidx+(( - 1)*it_1))+16)];
temp_13[t_13]=a;
temp_14[t_14]=a;
temp_15[t_15]=a;
t_13=(t_13+1);
t_14=(t_14+1);
t_15=(t_15+1);
}
{
C(((idy*32)+13), idx)=cal(temp_13);
}
__syncthreads();
if ((tidx<16))
{
{
shared_0[(tidx+0)]=A((((idy*32)+(( - 1)*(0-14)))+16), (idx+(( - 1)*0)));
}
}
{
shared_0[(tidx+16)]=A((((idy*32)+(( - 1)*(0-14)))+16), ((idx+(( - 1)*0))+16));
}
__syncthreads();
#pragma unroll
for (it_1=0; it_1<3; it_1=(it_1+1))
{
float a;
a=shared_0[((tidx+(( - 1)*it_1))+16)];
temp_14[t_14]=a;
temp_15[t_15]=a;
temp_16[t_16]=a;
t_14=(t_14+1);
t_15=(t_15+1);
t_16=(t_16+1);
}
{
C(((idy*32)+14), idx)=cal(temp_14);
}
__syncthreads();
if ((tidx<16))
{
{
shared_0[(tidx+0)]=A((((idy*32)+(( - 1)*(0-15)))+16), (idx+(( - 1)*0)));
}
}
{
shared_0[(tidx+16)]=A((((idy*32)+(( - 1)*(0-15)))+16), ((idx+(( - 1)*0))+16));
}
__syncthreads();
#pragma unroll
for (it_1=0; it_1<3; it_1=(it_1+1))
{
float a;
a=shared_0[((tidx+(( - 1)*it_1))+16)];
temp_15[t_15]=a;
temp_16[t_16]=a;
temp_17[t_17]=a;
t_15=(t_15+1);
t_16=(t_16+1);
t_17=(t_17+1);
}
{
C(((idy*32)+15), idx)=cal(temp_15);
}
__syncthreads();
if ((tidx<16))
{
{
shared_0[(tidx+0)]=A((((idy*32)+(( - 1)*(0-16)))+16), (idx+(( - 1)*0)));
}
}
{
shared_0[(tidx+16)]=A((((idy*32)+(( - 1)*(0-16)))+16), ((idx+(( - 1)*0))+16));
}
__syncthreads();
#pragma unroll
for (it_1=0; it_1<3; it_1=(it_1+1))
{
float a;
a=shared_0[((tidx+(( - 1)*it_1))+16)];
temp_16[t_16]=a;
temp_17[t_17]=a;
temp_18[t_18]=a;
t_16=(t_16+1);
t_17=(t_17+1);
t_18=(t_18+1);
}
{
C(((idy*32)+16), idx)=cal(temp_16);
}
__syncthreads();
if ((tidx<16))
{
{
shared_0[(tidx+0)]=A((((idy*32)+(( - 1)*(0-17)))+16), (idx+(( - 1)*0)));
}
}
{
shared_0[(tidx+16)]=A((((idy*32)+(( - 1)*(0-17)))+16), ((idx+(( - 1)*0))+16));
}
__syncthreads();
#pragma unroll
for (it_1=0; it_1<3; it_1=(it_1+1))
{
float a;
a=shared_0[((tidx+(( - 1)*it_1))+16)];
temp_17[t_17]=a;
temp_18[t_18]=a;
temp_19[t_19]=a;
t_17=(t_17+1);
t_18=(t_18+1);
t_19=(t_19+1);
}
{
C(((idy*32)+17), idx)=cal(temp_17);
}
__syncthreads();
if ((tidx<16))
{
{
shared_0[(tidx+0)]=A((((idy*32)+(( - 1)*(0-18)))+16), (idx+(( - 1)*0)));
}
}
{
shared_0[(tidx+16)]=A((((idy*32)+(( - 1)*(0-18)))+16), ((idx+(( - 1)*0))+16));
}
__syncthreads();
#pragma unroll
for (it_1=0; it_1<3; it_1=(it_1+1))
{
float a;
a=shared_0[((tidx+(( - 1)*it_1))+16)];
temp_18[t_18]=a;
temp_19[t_19]=a;
temp_20[t_20]=a;
t_18=(t_18+1);
t_19=(t_19+1);
t_20=(t_20+1);
}
{
C(((idy*32)+18), idx)=cal(temp_18);
}
__syncthreads();
if ((tidx<16))
{
{
shared_0[(tidx+0)]=A((((idy*32)+(( - 1)*(0-19)))+16), (idx+(( - 1)*0)));
}
}
{
shared_0[(tidx+16)]=A((((idy*32)+(( - 1)*(0-19)))+16), ((idx+(( - 1)*0))+16));
}
__syncthreads();
#pragma unroll
for (it_1=0; it_1<3; it_1=(it_1+1))
{
float a;
a=shared_0[((tidx+(( - 1)*it_1))+16)];
temp_19[t_19]=a;
temp_20[t_20]=a;
temp_21[t_21]=a;
t_19=(t_19+1);
t_20=(t_20+1);
t_21=(t_21+1);
}
{
C(((idy*32)+19), idx)=cal(temp_19);
}
__syncthreads();
if ((tidx<16))
{
{
shared_0[(tidx+0)]=A((((idy*32)+(( - 1)*(0-20)))+16), (idx+(( - 1)*0)));
}
}
{
shared_0[(tidx+16)]=A((((idy*32)+(( - 1)*(0-20)))+16), ((idx+(( - 1)*0))+16));
}
__syncthreads();
#pragma unroll
for (it_1=0; it_1<3; it_1=(it_1+1))
{
float a;
a=shared_0[((tidx+(( - 1)*it_1))+16)];
temp_20[t_20]=a;
temp_21[t_21]=a;
temp_22[t_22]=a;
t_20=(t_20+1);
t_21=(t_21+1);
t_22=(t_22+1);
}
{
C(((idy*32)+20), idx)=cal(temp_20);
}
__syncthreads();
if ((tidx<16))
{
{
shared_0[(tidx+0)]=A((((idy*32)+(( - 1)*(0-21)))+16), (idx+(( - 1)*0)));
}
}
{
shared_0[(tidx+16)]=A((((idy*32)+(( - 1)*(0-21)))+16), ((idx+(( - 1)*0))+16));
}
__syncthreads();
#pragma unroll
for (it_1=0; it_1<3; it_1=(it_1+1))
{
float a;
a=shared_0[((tidx+(( - 1)*it_1))+16)];
temp_21[t_21]=a;
temp_22[t_22]=a;
temp_23[t_23]=a;
t_21=(t_21+1);
t_22=(t_22+1);
t_23=(t_23+1);
}
{
C(((idy*32)+21), idx)=cal(temp_21);
}
__syncthreads();
if ((tidx<16))
{
{
shared_0[(tidx+0)]=A((((idy*32)+(( - 1)*(0-22)))+16), (idx+(( - 1)*0)));
}
}
{
shared_0[(tidx+16)]=A((((idy*32)+(( - 1)*(0-22)))+16), ((idx+(( - 1)*0))+16));
}
__syncthreads();
#pragma unroll
for (it_1=0; it_1<3; it_1=(it_1+1))
{
float a;
a=shared_0[((tidx+(( - 1)*it_1))+16)];
temp_22[t_22]=a;
temp_23[t_23]=a;
temp_24[t_24]=a;
t_22=(t_22+1);
t_23=(t_23+1);
t_24=(t_24+1);
}
{
C(((idy*32)+22), idx)=cal(temp_22);
}
__syncthreads();
if ((tidx<16))
{
{
shared_0[(tidx+0)]=A((((idy*32)+(( - 1)*(0-23)))+16), (idx+(( - 1)*0)));
}
}
{
shared_0[(tidx+16)]=A((((idy*32)+(( - 1)*(0-23)))+16), ((idx+(( - 1)*0))+16));
}
__syncthreads();
#pragma unroll
for (it_1=0; it_1<3; it_1=(it_1+1))
{
float a;
a=shared_0[((tidx+(( - 1)*it_1))+16)];
temp_23[t_23]=a;
temp_24[t_24]=a;
temp_25[t_25]=a;
t_23=(t_23+1);
t_24=(t_24+1);
t_25=(t_25+1);
}
{
C(((idy*32)+23), idx)=cal(temp_23);
}
__syncthreads();
if ((tidx<16))
{
{
shared_0[(tidx+0)]=A((((idy*32)+(( - 1)*(0-24)))+16), (idx+(( - 1)*0)));
}
}
{
shared_0[(tidx+16)]=A((((idy*32)+(( - 1)*(0-24)))+16), ((idx+(( - 1)*0))+16));
}
__syncthreads();
#pragma unroll
for (it_1=0; it_1<3; it_1=(it_1+1))
{
float a;
a=shared_0[((tidx+(( - 1)*it_1))+16)];
temp_24[t_24]=a;
temp_25[t_25]=a;
temp_26[t_26]=a;
t_24=(t_24+1);
t_25=(t_25+1);
t_26=(t_26+1);
}
{
C(((idy*32)+24), idx)=cal(temp_24);
}
__syncthreads();
if ((tidx<16))
{
{
shared_0[(tidx+0)]=A((((idy*32)+(( - 1)*(0-25)))+16), (idx+(( - 1)*0)));
}
}
{
shared_0[(tidx+16)]=A((((idy*32)+(( - 1)*(0-25)))+16), ((idx+(( - 1)*0))+16));
}
__syncthreads();
#pragma unroll
for (it_1=0; it_1<3; it_1=(it_1+1))
{
float a;
a=shared_0[((tidx+(( - 1)*it_1))+16)];
temp_25[t_25]=a;
temp_26[t_26]=a;
temp_27[t_27]=a;
t_25=(t_25+1);
t_26=(t_26+1);
t_27=(t_27+1);
}
{
C(((idy*32)+25), idx)=cal(temp_25);
}
__syncthreads();
if ((tidx<16))
{
{
shared_0[(tidx+0)]=A((((idy*32)+(( - 1)*(0-26)))+16), (idx+(( - 1)*0)));
}
}
{
shared_0[(tidx+16)]=A((((idy*32)+(( - 1)*(0-26)))+16), ((idx+(( - 1)*0))+16));
}
__syncthreads();
#pragma unroll
for (it_1=0; it_1<3; it_1=(it_1+1))
{
float a;
a=shared_0[((tidx+(( - 1)*it_1))+16)];
temp_26[t_26]=a;
temp_27[t_27]=a;
temp_28[t_28]=a;
t_26=(t_26+1);
t_27=(t_27+1);
t_28=(t_28+1);
}
{
C(((idy*32)+26), idx)=cal(temp_26);
}
__syncthreads();
if ((tidx<16))
{
{
shared_0[(tidx+0)]=A((((idy*32)+(( - 1)*(0-27)))+16), (idx+(( - 1)*0)));
}
}
{
shared_0[(tidx+16)]=A((((idy*32)+(( - 1)*(0-27)))+16), ((idx+(( - 1)*0))+16));
}
__syncthreads();
#pragma unroll
for (it_1=0; it_1<3; it_1=(it_1+1))
{
float a;
a=shared_0[((tidx+(( - 1)*it_1))+16)];
temp_27[t_27]=a;
temp_28[t_28]=a;
temp_29[t_29]=a;
t_27=(t_27+1);
t_28=(t_28+1);
t_29=(t_29+1);
}
{
C(((idy*32)+27), idx)=cal(temp_27);
}
__syncthreads();
if ((tidx<16))
{
{
shared_0[(tidx+0)]=A((((idy*32)+(( - 1)*(0-28)))+16), (idx+(( - 1)*0)));
}
}
{
shared_0[(tidx+16)]=A((((idy*32)+(( - 1)*(0-28)))+16), ((idx+(( - 1)*0))+16));
}
__syncthreads();
#pragma unroll
for (it_1=0; it_1<3; it_1=(it_1+1))
{
float a;
a=shared_0[((tidx+(( - 1)*it_1))+16)];
temp_28[t_28]=a;
temp_29[t_29]=a;
temp_30[t_30]=a;
t_28=(t_28+1);
t_29=(t_29+1);
t_30=(t_30+1);
}
{
C(((idy*32)+28), idx)=cal(temp_28);
}
__syncthreads();
if ((tidx<16))
{
{
shared_0[(tidx+0)]=A((((idy*32)+(( - 1)*(0-29)))+16), (idx+(( - 1)*0)));
}
}
{
shared_0[(tidx+16)]=A((((idy*32)+(( - 1)*(0-29)))+16), ((idx+(( - 1)*0))+16));
}
__syncthreads();
#pragma unroll
for (it_1=0; it_1<3; it_1=(it_1+1))
{
float a;
a=shared_0[((tidx+(( - 1)*it_1))+16)];
temp_29[t_29]=a;
temp_30[t_30]=a;
temp_31[t_31]=a;
t_29=(t_29+1);
t_30=(t_30+1);
t_31=(t_31+1);
}
{
C(((idy*32)+29), idx)=cal(temp_29);
}
__syncthreads();
if ((tidx<16))
{
{
shared_0[(tidx+0)]=A((((idy*32)+(( - 1)*(0-30)))+16), (idx+(( - 1)*0)));
}
}
{
shared_0[(tidx+16)]=A((((idy*32)+(( - 1)*(0-30)))+16), ((idx+(( - 1)*0))+16));
}
__syncthreads();
#pragma unroll
for (it_1=0; it_1<3; it_1=(it_1+1))
{
float a;
a=shared_0[((tidx+(( - 1)*it_1))+16)];
temp_30[t_30]=a;
temp_31[t_31]=a;
t_30=(t_30+1);
t_31=(t_31+1);
}
{
C(((idy*32)+30), idx)=cal(temp_30);
}
__syncthreads();
if ((tidx<16))
{
{
shared_0[(tidx+0)]=A((((idy*32)+(( - 1)*(0-31)))+16), (idx+(( - 1)*0)));
}
}
{
shared_0[(tidx+16)]=A((((idy*32)+(( - 1)*(0-31)))+16), ((idx+(( - 1)*0))+16));
}
__syncthreads();
#pragma unroll
for (it_1=0; it_1<3; it_1=(it_1+1))
{
float a;
a=shared_0[((tidx+(( - 1)*it_1))+16)];
temp_31[t_31]=a;
t_31=(t_31+1);
}
{
C(((idy*32)+31), idx)=cal(temp_31);
}
__syncthreads();
}
|
8a77e5054eb496010c79792fbbf8fba2b981732b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "volrenraycastraf.cuda.h"
#include <stdio.h>
#include <thrust/device_ptr.h>
#include <thrust/fill.h>
#include "cutil_math.h"
static const int maxBlockSize2D = 16;
static texture<float, hipTextureType3D, hipReadModeElementType> volTex;
static texture<float4, hipTextureType1D, hipReadModeElementType> tfTex;
static texture<float4, hipTextureType2D, hipReadModeElementType> entryTex;
static texture<float4, hipTextureType2D, hipReadModeElementType> exitTex;
static dim3 getDimBlock2D(int w, int h)
{
dim3 dimBlock;
if (w < maxBlockSize2D)
dimBlock.x = w;
else
dimBlock.x = maxBlockSize2D;
if (h < maxBlockSize2D)
dimBlock.y = h;
else
dimBlock.y = maxBlockSize2D;
return dimBlock;
}
static dim3 getDimGrid2D(int w, int h)
{
dim3 dimGrid;
if (w < maxBlockSize2D)
dimGrid.x = 1;
else
dimGrid.x = int(ceil(float(w) / maxBlockSize2D));
if (h < maxBlockSize2D)
dimGrid.y = 1;
else
dimGrid.y = int(ceil(float(h) / maxBlockSize2D));
return dimGrid;
}
__host__ __device__ static int getBinID(float* binDivs, int layers, float scalar, int begLayer = 0)
{
if (scalar == 1.f)
return layers - 1;
for (int iLayer = begLayer; iLayer < layers; ++iLayer)
{
float lower = binDivs[iLayer];
float upper = binDivs[iLayer + 1];
if (scalar >= lower && scalar < upper)
return iLayer;
}
return -1;
}
// | 0 4 8 12 | | 0 |
// | 1 5 9 13 | | 1 |
// | 2 6 10 14 | | 2 |
// | 3 7 11 15 | | 3 |
__device__ float4 mat4x4_mult_vec4(float* mat, float4 vec)
{
float4 out;
out.x = mat[0] * vec.x + mat[4] * vec.y + mat[8] * vec.z + mat[12] * vec.w;
out.y = mat[1] * vec.x + mat[5] * vec.y + mat[9] * vec.z + mat[13] * vec.w;
out.z = mat[2] * vec.x + mat[6] * vec.y + mat[10] * vec.z + mat[14] * vec.w;
out.w = mat[3] * vec.x + mat[7] * vec.y + mat[11] * vec.z + mat[15] * vec.w;
return out;
}
__device__ float tf2sample(float tfVal)
{
return tfVal + 0.5f;
}
__device__ float sample2tf(float sample)
{
return sample - 0.5f;
}
__device__ float scalar2sample(float scalar, int tfSize)
{
return scalar * float(tfSize);
}
__device__ float sample2scalar(float sample, int tfSize)
{
return sample / float(tfSize);
}
__device__ float scalar2tf(float scalar, int tfSize)
{
return sample2tf(scalar2sample(scalar, tfSize));
}
__device__ float tf2scalar(float tfVal, int tfSize)
{
return sample2scalar(tf2sample(tfVal), tfSize);
}
__global__ static void castray(int volWidth, int volHeight, int volDepth,
int tfSize, float stepSize, bool preinteg,
float scalarMin, float scalarMax,
int texWidth, int texHeight, int layers, float* binDivs,
float* rafPtr,
float* mPtr, float* mvPtr, float near, float far, float* depPtr)
{
int x = blockDim.x * blockIdx.x + threadIdx.x;
int y = blockDim.y * blockIdx.y + threadIdx.y;
if (x >= texWidth || y >= texHeight)
return;
const float baseSample = 0.01f;
float3 entry = make_float3(tex2D(entryTex, x + 0.5f, y + 0.5f));
float3 exit = make_float3(tex2D(exitTex, x + 0.5f, y + 0.5f));
float3 dir = normalize(exit - entry);
float maxLength = length(exit - entry);
float4 entryObj = mat4x4_mult_vec4(mPtr, make_float4(entry, 1.f));
float4 entryView = mat4x4_mult_vec4(mvPtr, make_float4(entry, 1.f));
float2 scalar = make_float2(0.f, 0.f);
scalar.y = tex3D(volTex, entryObj.x, entryObj.y, entryObj.z);
scalar.y = clamp(float((scalar.x - scalarMin) / (scalarMax - scalarMin)), 0.f, 1.f);
float2 depth = make_float2(0.f, 0.f);
depth.y = (-entryView.z - near) / (far - near);
depth.y = clamp(depth.x, 0.f, 1.f);
float4 acc = make_float4(0.f, 0.f, 0.f, 0.f);
for (int step = 1; step * stepSize < maxLength; ++step)
{
float3 spot = entry + dir * (step * stepSize);
float4 spotObj = mat4x4_mult_vec4(mPtr, make_float4(spot, 1.f));
float4 spotView = mat4x4_mult_vec4(mvPtr, make_float4(spot, 1.f));
depth.x = (-spotView.z - near) / (far - near);
depth.x = clamp(depth.x, 0.f, 1.f);
scalar.x = tex3D(volTex, spotObj.x, spotObj.y, spotObj.z);
scalar.x = clamp(float((scalar.x - scalarMin) / (scalarMax - scalarMin)), 0.f, 1.f);
// if preintegration is not enabled
if (!preinteg)
{
float4 spotColor = tex1D(tfTex, scalar2sample(scalar.x, tfSize));
spotColor.w = 1.f - pow(1.f - spotColor.w, stepSize / baseSample);
spotColor.x *= spotColor.w;
spotColor.y *= spotColor.w;
spotColor.z *= spotColor.w;
float4 spotAtten = spotColor * (1.f - acc.w);
int binID = getBinID(binDivs, layers, scalar.x);
int outLoc = binID * texWidth * texHeight + y * texWidth + x;
rafPtr[outLoc] += spotAtten.w;
// accumulate
acc += spotAtten;
// depth
if (depth.x < depPtr[outLoc])
depPtr[outLoc] = depth.x;
} else
{
float tfCoordBeg = scalar2tf(scalar.y, tfSize);
float tfCoordEnd = scalar2tf(scalar.x, tfSize);
// find the TF bucket
int tfBeg = int(floor(tfCoordBeg));
int tfEnd = int(floor(tfCoordEnd));
// if they are in the same TF bucket
if (tfBeg == tfEnd)
{
float4 spotColor = tex1D(tfTex, scalar2sample(scalar.x, tfSize));
spotColor.w = 1.f - pow(1.f - spotColor.w, stepSize / baseSample);
spotColor.x *= spotColor.w;
spotColor.y *= spotColor.w;
spotColor.z *= spotColor.w;
float4 spotAtten = spotColor * (1.f - acc.w);
int binID = getBinID(binDivs, layers, scalar.x);
int outLoc = binID * texWidth * texHeight + y * texWidth + x;
rafPtr[outLoc] += spotAtten.w;
// accumulate
acc += spotAtten;
// depth
if (depth.x < depPtr[outLoc])
depPtr[outLoc] = depth.x;
} else
{
float4 spotColor, spotAtten;
float miniStepsize, miniDepth;
int binID, tfBin, outLoc;
int dir = abs(tfEnd - tfBeg) / (tfEnd - tfBeg);
tfBin = tfBeg + max(dir, 0);
miniStepsize = (float(tfBin) - tfCoordBeg) / (tfCoordEnd - tfCoordBeg) * stepSize;
spotColor = tex1D(tfTex, tf2sample(float(tfBin)));
spotColor.w = 1.f - pow(1.f - spotColor.w, miniStepsize / baseSample);
spotColor.x *= spotColor.w;
spotColor.y *= spotColor.w;
spotColor.z *= spotColor.w;
spotAtten = spotColor * (1.f - acc.w);
binID = getBinID(binDivs, layers, tf2scalar(float(tfBin), tfSize));
outLoc = binID * texWidth * texHeight + y * texWidth + x;
rafPtr[outLoc] += spotAtten.w;
acc += spotAtten;
miniDepth = (float(tfBin) - tfCoordBeg) / (tfCoordEnd - tfCoordBeg) * (depth.x - depth.y) + depth.y;
if (miniDepth < depPtr[outLoc])
depPtr[outLoc] = miniDepth;
for (int tfBin = tfBeg + max(dir, 0) + dir; tfBin != tfEnd + max(dir, 0); tfBin += dir)
{
miniStepsize = float(dir) / (tfCoordEnd - tfCoordBeg) * stepSize;
spotColor = tex1D(tfTex, tf2sample(float(tfBin)));
spotColor.w = 1.f - pow(1.f - spotColor.w, miniStepsize / baseSample);
spotColor.x *= spotColor.w;
spotColor.y *= spotColor.w;
spotColor.z *= spotColor.w;
spotAtten = spotColor * (1.f - acc.w);
binID = getBinID(binDivs, layers, tf2scalar(float(tfBin), tfSize));
outLoc = binID * texWidth * texHeight + y * texWidth + x;
rafPtr[outLoc] += spotAtten.w;
acc += spotAtten;
miniDepth = (float(tfBin) - tfCoordBeg) / (tfCoordEnd - tfCoordBeg) * (depth.x - depth.y) + depth.y;
if (miniDepth < depPtr[outLoc])
depPtr[outLoc] = miniDepth;
}
miniStepsize = (tfCoordEnd - float(tfEnd + max(-dir, 0))) / (tfCoordEnd - tfCoordBeg) * stepSize;
spotColor = tex1D(tfTex, tf2sample(tfCoordEnd));
spotColor.w = 1.f - pow(1.f - spotColor.w, miniStepsize / baseSample);
spotColor.x *= spotColor.w;
spotColor.y *= spotColor.w;
spotColor.z *= spotColor.w;
spotAtten = spotColor * (1.f - acc.w);
binID = getBinID(binDivs, layers, tf2scalar(tfCoordEnd, tfSize));
outLoc = binID * texWidth * texHeight + y * texWidth + x;
rafPtr[outLoc] += spotAtten.w;
acc += spotAtten;
if (depth.x < depPtr[outLoc])
depPtr[outLoc] = depth.x;
}
}
if (acc.w > 0.999f)
break;
depth.y = depth.x;
scalar.y = scalar.x;
}
}
void rafcast(int volWidth, int volHeight, int volDepth, hipArray *volArr,
int tfSize, float stepSize, hipTextureFilterMode filter, bool preinteg, hipArray *tfArr,
float scalarMin, float scalarMax,
int texWidth, int texHeight, hipArray *entryArr, hipArray *exitArr,
int layers, float *binDivs, float *rafPtr, float *mPtr, float *mvPtr, float near, float far, float *depPtr)
{
// bind textures
hipBindTextureToArray(volTex, volArr);
volTex.filterMode = hipFilterModeLinear;
hipBindTextureToArray(tfTex, tfArr);
tfTex.filterMode = filter;
hipBindTextureToArray(entryTex, entryArr);
entryTex.filterMode = hipFilterModeLinear;
hipBindTextureToArray(exitTex, exitArr);
exitTex.filterMode = hipFilterModeLinear;
// clear raf and dep resource
hipMemset(rafPtr, 0, texWidth * texHeight * layers * sizeof(float));
thrust::device_ptr<float> depPtrDev = thrust::device_pointer_cast(depPtr);
thrust::fill(depPtrDev, depPtrDev + texWidth * texHeight * layers, 1.f);
// RAF bin divisions
// float* binDivs = new float [layers + 1];
// fillBinDivs(binDivs, layers);
float* devBinDivs;
hipMalloc(&devBinDivs, (layers + 1) * sizeof(float));
hipMemcpy(devBinDivs, binDivs, (layers + 1) * sizeof(float), hipMemcpyHostToDevice);
// cuda kernel
dim3 dimBlock = getDimBlock2D(texWidth, texHeight);
dim3 dimGrid = getDimGrid2D(texWidth, texHeight);
hipLaunchKernelGGL(( castray), dim3(dimGrid), dim3(dimBlock), 0, 0, volWidth, volHeight, volDepth,
tfSize, stepSize, preinteg,
scalarMin, scalarMax,
texWidth, texHeight, layers, devBinDivs,
rafPtr,
mPtr, mvPtr, near, far, depPtr);
// free memory
hipFree(devBinDivs);
// delete [] binDivs;
// unbind textures
hipUnbindTexture(exitTex);
hipUnbindTexture(entryTex);
hipUnbindTexture(tfTex);
hipUnbindTexture(volTex);
}
| 8a77e5054eb496010c79792fbbf8fba2b981732b.cu | #include "volrenraycastraf.cuda.h"
#include <stdio.h>
#include <thrust/device_ptr.h>
#include <thrust/fill.h>
#include "cutil_math.h"
static const int maxBlockSize2D = 16;
static texture<float, cudaTextureType3D, cudaReadModeElementType> volTex;
static texture<float4, cudaTextureType1D, cudaReadModeElementType> tfTex;
static texture<float4, cudaTextureType2D, cudaReadModeElementType> entryTex;
static texture<float4, cudaTextureType2D, cudaReadModeElementType> exitTex;
static dim3 getDimBlock2D(int w, int h)
{
dim3 dimBlock;
if (w < maxBlockSize2D)
dimBlock.x = w;
else
dimBlock.x = maxBlockSize2D;
if (h < maxBlockSize2D)
dimBlock.y = h;
else
dimBlock.y = maxBlockSize2D;
return dimBlock;
}
static dim3 getDimGrid2D(int w, int h)
{
dim3 dimGrid;
if (w < maxBlockSize2D)
dimGrid.x = 1;
else
dimGrid.x = int(ceil(float(w) / maxBlockSize2D));
if (h < maxBlockSize2D)
dimGrid.y = 1;
else
dimGrid.y = int(ceil(float(h) / maxBlockSize2D));
return dimGrid;
}
__host__ __device__ static int getBinID(float* binDivs, int layers, float scalar, int begLayer = 0)
{
if (scalar == 1.f)
return layers - 1;
for (int iLayer = begLayer; iLayer < layers; ++iLayer)
{
float lower = binDivs[iLayer];
float upper = binDivs[iLayer + 1];
if (scalar >= lower && scalar < upper)
return iLayer;
}
return -1;
}
// | 0 4 8 12 | | 0 |
// | 1 5 9 13 | | 1 |
// | 2 6 10 14 | | 2 |
// | 3 7 11 15 | | 3 |
__device__ float4 mat4x4_mult_vec4(float* mat, float4 vec)
{
float4 out;
out.x = mat[0] * vec.x + mat[4] * vec.y + mat[8] * vec.z + mat[12] * vec.w;
out.y = mat[1] * vec.x + mat[5] * vec.y + mat[9] * vec.z + mat[13] * vec.w;
out.z = mat[2] * vec.x + mat[6] * vec.y + mat[10] * vec.z + mat[14] * vec.w;
out.w = mat[3] * vec.x + mat[7] * vec.y + mat[11] * vec.z + mat[15] * vec.w;
return out;
}
__device__ float tf2sample(float tfVal)
{
return tfVal + 0.5f;
}
__device__ float sample2tf(float sample)
{
return sample - 0.5f;
}
__device__ float scalar2sample(float scalar, int tfSize)
{
return scalar * float(tfSize);
}
__device__ float sample2scalar(float sample, int tfSize)
{
return sample / float(tfSize);
}
__device__ float scalar2tf(float scalar, int tfSize)
{
return sample2tf(scalar2sample(scalar, tfSize));
}
__device__ float tf2scalar(float tfVal, int tfSize)
{
return sample2scalar(tf2sample(tfVal), tfSize);
}
__global__ static void castray(int volWidth, int volHeight, int volDepth,
int tfSize, float stepSize, bool preinteg,
float scalarMin, float scalarMax,
int texWidth, int texHeight, int layers, float* binDivs,
float* rafPtr,
float* mPtr, float* mvPtr, float near, float far, float* depPtr)
{
int x = blockDim.x * blockIdx.x + threadIdx.x;
int y = blockDim.y * blockIdx.y + threadIdx.y;
if (x >= texWidth || y >= texHeight)
return;
const float baseSample = 0.01f;
float3 entry = make_float3(tex2D(entryTex, x + 0.5f, y + 0.5f));
float3 exit = make_float3(tex2D(exitTex, x + 0.5f, y + 0.5f));
float3 dir = normalize(exit - entry);
float maxLength = length(exit - entry);
float4 entryObj = mat4x4_mult_vec4(mPtr, make_float4(entry, 1.f));
float4 entryView = mat4x4_mult_vec4(mvPtr, make_float4(entry, 1.f));
float2 scalar = make_float2(0.f, 0.f);
scalar.y = tex3D(volTex, entryObj.x, entryObj.y, entryObj.z);
scalar.y = clamp(float((scalar.x - scalarMin) / (scalarMax - scalarMin)), 0.f, 1.f);
float2 depth = make_float2(0.f, 0.f);
depth.y = (-entryView.z - near) / (far - near);
depth.y = clamp(depth.x, 0.f, 1.f);
float4 acc = make_float4(0.f, 0.f, 0.f, 0.f);
for (int step = 1; step * stepSize < maxLength; ++step)
{
float3 spot = entry + dir * (step * stepSize);
float4 spotObj = mat4x4_mult_vec4(mPtr, make_float4(spot, 1.f));
float4 spotView = mat4x4_mult_vec4(mvPtr, make_float4(spot, 1.f));
depth.x = (-spotView.z - near) / (far - near);
depth.x = clamp(depth.x, 0.f, 1.f);
scalar.x = tex3D(volTex, spotObj.x, spotObj.y, spotObj.z);
scalar.x = clamp(float((scalar.x - scalarMin) / (scalarMax - scalarMin)), 0.f, 1.f);
// if preintegration is not enabled
if (!preinteg)
{
float4 spotColor = tex1D(tfTex, scalar2sample(scalar.x, tfSize));
spotColor.w = 1.f - pow(1.f - spotColor.w, stepSize / baseSample);
spotColor.x *= spotColor.w;
spotColor.y *= spotColor.w;
spotColor.z *= spotColor.w;
float4 spotAtten = spotColor * (1.f - acc.w);
int binID = getBinID(binDivs, layers, scalar.x);
int outLoc = binID * texWidth * texHeight + y * texWidth + x;
rafPtr[outLoc] += spotAtten.w;
// accumulate
acc += spotAtten;
// depth
if (depth.x < depPtr[outLoc])
depPtr[outLoc] = depth.x;
} else
{
float tfCoordBeg = scalar2tf(scalar.y, tfSize);
float tfCoordEnd = scalar2tf(scalar.x, tfSize);
// find the TF bucket
int tfBeg = int(floor(tfCoordBeg));
int tfEnd = int(floor(tfCoordEnd));
// if they are in the same TF bucket
if (tfBeg == tfEnd)
{
float4 spotColor = tex1D(tfTex, scalar2sample(scalar.x, tfSize));
spotColor.w = 1.f - pow(1.f - spotColor.w, stepSize / baseSample);
spotColor.x *= spotColor.w;
spotColor.y *= spotColor.w;
spotColor.z *= spotColor.w;
float4 spotAtten = spotColor * (1.f - acc.w);
int binID = getBinID(binDivs, layers, scalar.x);
int outLoc = binID * texWidth * texHeight + y * texWidth + x;
rafPtr[outLoc] += spotAtten.w;
// accumulate
acc += spotAtten;
// depth
if (depth.x < depPtr[outLoc])
depPtr[outLoc] = depth.x;
} else
{
float4 spotColor, spotAtten;
float miniStepsize, miniDepth;
int binID, tfBin, outLoc;
int dir = abs(tfEnd - tfBeg) / (tfEnd - tfBeg);
tfBin = tfBeg + max(dir, 0);
miniStepsize = (float(tfBin) - tfCoordBeg) / (tfCoordEnd - tfCoordBeg) * stepSize;
spotColor = tex1D(tfTex, tf2sample(float(tfBin)));
spotColor.w = 1.f - pow(1.f - spotColor.w, miniStepsize / baseSample);
spotColor.x *= spotColor.w;
spotColor.y *= spotColor.w;
spotColor.z *= spotColor.w;
spotAtten = spotColor * (1.f - acc.w);
binID = getBinID(binDivs, layers, tf2scalar(float(tfBin), tfSize));
outLoc = binID * texWidth * texHeight + y * texWidth + x;
rafPtr[outLoc] += spotAtten.w;
acc += spotAtten;
miniDepth = (float(tfBin) - tfCoordBeg) / (tfCoordEnd - tfCoordBeg) * (depth.x - depth.y) + depth.y;
if (miniDepth < depPtr[outLoc])
depPtr[outLoc] = miniDepth;
for (int tfBin = tfBeg + max(dir, 0) + dir; tfBin != tfEnd + max(dir, 0); tfBin += dir)
{
miniStepsize = float(dir) / (tfCoordEnd - tfCoordBeg) * stepSize;
spotColor = tex1D(tfTex, tf2sample(float(tfBin)));
spotColor.w = 1.f - pow(1.f - spotColor.w, miniStepsize / baseSample);
spotColor.x *= spotColor.w;
spotColor.y *= spotColor.w;
spotColor.z *= spotColor.w;
spotAtten = spotColor * (1.f - acc.w);
binID = getBinID(binDivs, layers, tf2scalar(float(tfBin), tfSize));
outLoc = binID * texWidth * texHeight + y * texWidth + x;
rafPtr[outLoc] += spotAtten.w;
acc += spotAtten;
miniDepth = (float(tfBin) - tfCoordBeg) / (tfCoordEnd - tfCoordBeg) * (depth.x - depth.y) + depth.y;
if (miniDepth < depPtr[outLoc])
depPtr[outLoc] = miniDepth;
}
miniStepsize = (tfCoordEnd - float(tfEnd + max(-dir, 0))) / (tfCoordEnd - tfCoordBeg) * stepSize;
spotColor = tex1D(tfTex, tf2sample(tfCoordEnd));
spotColor.w = 1.f - pow(1.f - spotColor.w, miniStepsize / baseSample);
spotColor.x *= spotColor.w;
spotColor.y *= spotColor.w;
spotColor.z *= spotColor.w;
spotAtten = spotColor * (1.f - acc.w);
binID = getBinID(binDivs, layers, tf2scalar(tfCoordEnd, tfSize));
outLoc = binID * texWidth * texHeight + y * texWidth + x;
rafPtr[outLoc] += spotAtten.w;
acc += spotAtten;
if (depth.x < depPtr[outLoc])
depPtr[outLoc] = depth.x;
}
}
if (acc.w > 0.999f)
break;
depth.y = depth.x;
scalar.y = scalar.x;
}
}
void rafcast(int volWidth, int volHeight, int volDepth, cudaArray *volArr,
int tfSize, float stepSize, cudaTextureFilterMode filter, bool preinteg, cudaArray *tfArr,
float scalarMin, float scalarMax,
int texWidth, int texHeight, cudaArray *entryArr, cudaArray *exitArr,
int layers, float *binDivs, float *rafPtr, float *mPtr, float *mvPtr, float near, float far, float *depPtr)
{
// bind textures
cudaBindTextureToArray(volTex, volArr);
volTex.filterMode = cudaFilterModeLinear;
cudaBindTextureToArray(tfTex, tfArr);
tfTex.filterMode = filter;
cudaBindTextureToArray(entryTex, entryArr);
entryTex.filterMode = cudaFilterModeLinear;
cudaBindTextureToArray(exitTex, exitArr);
exitTex.filterMode = cudaFilterModeLinear;
// clear raf and dep resource
cudaMemset(rafPtr, 0, texWidth * texHeight * layers * sizeof(float));
thrust::device_ptr<float> depPtrDev = thrust::device_pointer_cast(depPtr);
thrust::fill(depPtrDev, depPtrDev + texWidth * texHeight * layers, 1.f);
// RAF bin divisions
// float* binDivs = new float [layers + 1];
// fillBinDivs(binDivs, layers);
float* devBinDivs;
cudaMalloc(&devBinDivs, (layers + 1) * sizeof(float));
cudaMemcpy(devBinDivs, binDivs, (layers + 1) * sizeof(float), cudaMemcpyHostToDevice);
// cuda kernel
dim3 dimBlock = getDimBlock2D(texWidth, texHeight);
dim3 dimGrid = getDimGrid2D(texWidth, texHeight);
castray<<<dimGrid, dimBlock>>>(volWidth, volHeight, volDepth,
tfSize, stepSize, preinteg,
scalarMin, scalarMax,
texWidth, texHeight, layers, devBinDivs,
rafPtr,
mPtr, mvPtr, near, far, depPtr);
// free memory
cudaFree(devBinDivs);
// delete [] binDivs;
// unbind textures
cudaUnbindTexture(exitTex);
cudaUnbindTexture(entryTex);
cudaUnbindTexture(tfTex);
cudaUnbindTexture(volTex);
}
|
4c542c0c86517a59bd6369457a9ae82968fa4b62.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// kernel_example.cu.cc
#ifdef GOOGLE_CUDA
#define EIGEN_USE_GPU
#include "ps_roi_pooling_op.h"
#include "tensorflow/core/util/cuda_kernel_helper.h"
#include "tensorflow/core/framework/register_types.h"
using namespace tensorflow;
// Define the CUDA kernel.
template <typename T>
__global__ void PSROIPoolingGradCudaKernel(const int size, const T* in, T* out) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < size;
i += blockDim.x * gridDim.x) {
out[i] = static_cast<T>(2) * ldg(in + i);
}
}
template <typename T>
void PSROIPoolingGradFunctorGPU<T>::operator()(const GPUDevice& d, typename TTypes<T>::ConstMatrix logits, typename TTypes<T>::ConstVec labels,
typename TTypes<T>::Matrix softmax, typename TTypes<T>::Vec alpha, typename TTypes<T>::ConstScalar gamma, typename TTypes<T>::ConstVec focal_loss,
typename TTypes<T>::Matrix grads) {
// Launch the cuda kernel.
//
// See core/util/cuda_kernel_helper.h for example of computing
// block count and thread_per_block count.
int block_count = 1024;
int thread_per_block = 20;
hipLaunchKernelGGL(( PSROIPoolingGradCudaKernel<T>)
, dim3(block_count), dim3(thread_per_block), 0, d.stream(), 1, nullptr, nullptr);
}
template struct PSROIPoolingGradFunctorGPU<float>;
// #define DEFINE_GPU_SPECS(T) \
// template struct PSROIPoolingGradFunctorGPU<T>;
// TF_CALL_GPU_NUMBER_TYPES(DEFINE_GPU_SPECS);
#endif // GOOGLE_CUDA
| 4c542c0c86517a59bd6369457a9ae82968fa4b62.cu | // kernel_example.cu.cc
#ifdef GOOGLE_CUDA
#define EIGEN_USE_GPU
#include "ps_roi_pooling_op.h"
#include "tensorflow/core/util/cuda_kernel_helper.h"
#include "tensorflow/core/framework/register_types.h"
using namespace tensorflow;
// Define the CUDA kernel.
template <typename T>
__global__ void PSROIPoolingGradCudaKernel(const int size, const T* in, T* out) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < size;
i += blockDim.x * gridDim.x) {
out[i] = static_cast<T>(2) * ldg(in + i);
}
}
template <typename T>
void PSROIPoolingGradFunctorGPU<T>::operator()(const GPUDevice& d, typename TTypes<T>::ConstMatrix logits, typename TTypes<T>::ConstVec labels,
typename TTypes<T>::Matrix softmax, typename TTypes<T>::Vec alpha, typename TTypes<T>::ConstScalar gamma, typename TTypes<T>::ConstVec focal_loss,
typename TTypes<T>::Matrix grads) {
// Launch the cuda kernel.
//
// See core/util/cuda_kernel_helper.h for example of computing
// block count and thread_per_block count.
int block_count = 1024;
int thread_per_block = 20;
PSROIPoolingGradCudaKernel<T>
<<<block_count, thread_per_block, 0, d.stream()>>>(1, nullptr, nullptr);
}
template struct PSROIPoolingGradFunctorGPU<float>;
// #define DEFINE_GPU_SPECS(T) \
// template struct PSROIPoolingGradFunctorGPU<T>;
// TF_CALL_GPU_NUMBER_TYPES(DEFINE_GPU_SPECS);
#endif // GOOGLE_CUDA
|
0aef1ce63481cf562c85678cea2c86e4b3ce69c7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Highly Optimized Object-oriented Many-particle Dynamics -- Blue Edition
(HOOMD-blue) Open Source Software License Copyright 2008-2011 Ames Laboratory
Iowa State University and The Regents of the University of Michigan All rights
reserved.
HOOMD-blue may contain modifications ("Contributions") provided, and to which
copyright is held, by various Contributors who have granted The Regents of the
University of Michigan the right to modify and/or distribute such Contributions.
You may redistribute, use, and create derivate works of HOOMD-blue, in source
and binary forms, provided you abide by the following conditions:
* Redistributions of source code must retain the above copyright notice, this
list of conditions, and the following disclaimer both in the code and
prominently in any materials provided with the distribution.
* Redistributions in binary form must reproduce the above copyright notice, this
list of conditions, and the following disclaimer in the documentation and/or
other materials provided with the distribution.
* All publications and presentations based on HOOMD-blue, including any reports
or published results obtained, in whole or in part, with HOOMD-blue, will
acknowledge its use according to the terms posted at the time of submission on:
http://codeblue.umich.edu/hoomd-blue/citations.html
* Any electronic documents citing HOOMD-Blue will link to the HOOMD-Blue website:
http://codeblue.umich.edu/hoomd-blue/
* Apart from the above required attributions, neither the name of the copyright
holder nor the names of HOOMD-blue's contributors may be used to endorse or
promote products derived from this software without specific prior written
permission.
Disclaimer
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER AND CONTRIBUTORS ``AS IS'' AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND/OR ANY
WARRANTIES THAT THIS SOFTWARE IS FREE OF INFRINGEMENT ARE DISCLAIMED.
IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
// Maintainer: jglaser
#include "TwoStepNPTMTKGPU.cuh"
#ifdef WIN32
#include <cassert>
#else
#include <assert.h>
#endif
/*! \file TwoStepNPTMTKGPU.cu
\brief Defines GPU kernel code for NPT integration on the GPU using the Martyna-Tobias-Klein update equations. Used by TwoStepNPTMTKGPU.
*/
//! Shared memory used in reducing the sum of the squared velocities
extern __shared__ Scalar npt_mtk_sdata[];
//! Kernel to propagate the positions and velocities, first half of NPT update
__global__ void gpu_npt_mtk_step_one_kernel(Scalar4 *d_pos,
Scalar4 *d_vel,
const Scalar3 *d_accel,
unsigned int *d_group_members,
unsigned int group_size,
Scalar exp_thermo_fac,
Scalar mat_exp_v_xx,
Scalar mat_exp_v_xy,
Scalar mat_exp_v_xz,
Scalar mat_exp_v_yy,
Scalar mat_exp_v_yz,
Scalar mat_exp_v_zz,
Scalar mat_exp_v_int_xx,
Scalar mat_exp_v_int_xy,
Scalar mat_exp_v_int_xz,
Scalar mat_exp_v_int_yy,
Scalar mat_exp_v_int_yz,
Scalar mat_exp_v_int_zz,
Scalar mat_exp_r_xx,
Scalar mat_exp_r_xy,
Scalar mat_exp_r_xz,
Scalar mat_exp_r_yy,
Scalar mat_exp_r_yz,
Scalar mat_exp_r_zz,
Scalar mat_exp_r_int_xx,
Scalar mat_exp_r_int_xy,
Scalar mat_exp_r_int_xz,
Scalar mat_exp_r_int_yy,
Scalar mat_exp_r_int_yz,
Scalar mat_exp_r_int_zz,
Scalar deltaT)
{
// determine which particle this thread works on
int group_idx = blockIdx.x * blockDim.x + threadIdx.x;
// initialize eigenvectors
if (group_idx < group_size)
{
unsigned int idx = d_group_members[group_idx];
// fetch particle position
Scalar4 pos = d_pos[idx];
// fetch particle velocity and acceleration
Scalar4 vel = d_vel[idx];
Scalar3 v = make_scalar3(vel.x, vel.y, vel.z);
Scalar3 accel = d_accel[idx];;
Scalar3 r = make_scalar3(pos.x, pos.y, pos.z);
// apply thermostat update of velocity
v *= exp_thermo_fac;
// propagate velocity by half a time step and position by the full time step
// by multiplying with upper triangular matrix
v.x = mat_exp_v_xx * v.x + mat_exp_v_xy * v.y + mat_exp_v_xz * v.z;
v.y = mat_exp_v_yy * v.y + mat_exp_v_yz * v.z;
v.z = mat_exp_v_zz * v.z;
v.x += mat_exp_v_int_xx * accel.x + mat_exp_v_int_xy * accel.y + mat_exp_v_int_xz * accel.z;
v.y += mat_exp_v_int_yy * accel.y + mat_exp_v_int_yz * accel.z;
v.z += mat_exp_v_int_zz * accel.z;
r.x = mat_exp_r_xx * r.x + mat_exp_r_xy * r.y + mat_exp_r_xz * r.z;
r.y = mat_exp_r_yy * r.y + mat_exp_r_yz * r.z;
r.z = mat_exp_r_zz * r.z;
r.x += mat_exp_r_int_xx * v.x + mat_exp_r_int_xy * v.y + mat_exp_r_int_xz * v.z;
r.y += mat_exp_r_int_yy * v.y + mat_exp_r_int_yz * v.z;
r.z += mat_exp_r_int_zz * v.z;
// write out the results
d_pos[idx] = make_scalar4(r.x,r.y,r.z,pos.w);
d_vel[idx] = make_scalar4(v.x,v.y,v.z,vel.w);
}
}
/*! \param d_pos array of particle positions
\param d_vel array of particle velocities
\param d_accel array of particle accelerations
\param d_group_members Device array listing the indicies of the mebers of the group to integrate
\param group_size Number of members in the group
\param exp_thermo_fac Update factor for thermostat
\param mat_exp_v Matrix exponential for velocity update
\param mat_exp_v_int Integrated matrix exp for velocity update
\param mat_exp_r Matrix exponential for position update
\param mat_exp_r_int Integrated matrix exp for position update
\param deltaT Time to advance (for one full step)
\param deltaT Time to move forward in one whole step
This is just a kernel driver for gpu_npt_mtk_step_one_kernel(). See it for more details.
*/
hipError_t gpu_npt_mtk_step_one(Scalar4 *d_pos,
Scalar4 *d_vel,
const Scalar3 *d_accel,
unsigned int *d_group_members,
unsigned int group_size,
Scalar exp_thermo_fac,
Scalar *mat_exp_v,
Scalar *mat_exp_v_int,
Scalar *mat_exp_r,
Scalar *mat_exp_r_int,
Scalar deltaT)
{
// setup the grid to run the kernel
unsigned int block_size = 256;
dim3 grid( (group_size / block_size) + 1, 1, 1);
dim3 threads(block_size, 1, 1);
// run the kernel
hipLaunchKernelGGL(( gpu_npt_mtk_step_one_kernel), dim3(grid), dim3(threads) , 0, 0, d_pos,
d_vel,
d_accel,
d_group_members,
group_size,
exp_thermo_fac,
mat_exp_v[0],
mat_exp_v[1],
mat_exp_v[2],
mat_exp_v[3],
mat_exp_v[4],
mat_exp_v[5],
mat_exp_v_int[0],
mat_exp_v_int[1],
mat_exp_v_int[2],
mat_exp_v_int[3],
mat_exp_v_int[4],
mat_exp_v_int[5],
mat_exp_r[0],
mat_exp_r[1],
mat_exp_r[2],
mat_exp_r[3],
mat_exp_r[4],
mat_exp_r[5],
mat_exp_r_int[0],
mat_exp_r_int[1],
mat_exp_r_int[2],
mat_exp_r_int[3],
mat_exp_r_int[4],
mat_exp_r_int[5],
deltaT);
return hipSuccess;
}
/*! \param N number of particles in the system
\param d_pos array of particle positions
\param d_image array of particle images
\param box The new box the particles where the particles now reside
Wrap particle positions for all particles in the box
*/
extern "C" __global__
void gpu_npt_mtk_wrap_kernel(const unsigned int N,
Scalar4 *d_pos,
int3 *d_image,
BoxDim box)
{
// determine which particle this thread works on
int idx = blockIdx.x * blockDim.x + threadIdx.x;
// wrap ALL particles in the box
if (idx < N)
{
// fetch particle position
Scalar4 postype = d_pos[idx];
Scalar3 pos = make_scalar3(postype.x, postype.y, postype.z);
// read in the image flags
int3 image = d_image[idx];
// fix periodic boundary conditions
box.wrap(pos, image);
// write out the results
d_pos[idx] = make_scalar4(pos.x, pos.y, pos.z, postype.w);
d_image[idx] = image;
}
}
/*! \param N number of particles in the system
\param d_pos array of particle positions
\param d_image array of particle images
\param box The new box the particles where the particles now reside
This is just a kernel driver for gpu_npt_mtk_wrap_kernel(). See it for more details.
*/
hipError_t gpu_npt_mtk_wrap(const unsigned int N,
Scalar4 *d_pos,
int3 *d_image,
const BoxDim& box)
{
// setup the grid to run the kernel
unsigned int block_size=256;
dim3 grid( (N / block_size) + 1, 1, 1);
dim3 threads(block_size, 1, 1);
// run the kernel
hipLaunchKernelGGL(( gpu_npt_mtk_wrap_kernel), dim3(grid), dim3(threads) , 0, 0, N, d_pos, d_image, box);
return hipSuccess;
}
//! Kernel to propagate the positions and velocities, second half of NPT update
__global__ void gpu_npt_mtk_step_two_kernel(Scalar4 *d_vel,
Scalar3 *d_accel,
const Scalar4 *d_net_force,
unsigned int *d_group_members,
unsigned int group_size,
Scalar mat_exp_v_xx,
Scalar mat_exp_v_xy,
Scalar mat_exp_v_xz,
Scalar mat_exp_v_yy,
Scalar mat_exp_v_yz,
Scalar mat_exp_v_zz,
Scalar mat_exp_v_int_xx,
Scalar mat_exp_v_int_xy,
Scalar mat_exp_v_int_xz,
Scalar mat_exp_v_int_yy,
Scalar mat_exp_v_int_yz,
Scalar mat_exp_v_int_zz,
Scalar deltaT)
{
// determine which particle this thread works on
int group_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (group_idx < group_size)
{
unsigned int idx = d_group_members[group_idx];
// fetch particle velocity and acceleration
Scalar4 vel = d_vel[idx];
// compute acceleration
Scalar minv = Scalar(1.0)/vel.w;
Scalar4 net_force = d_net_force[idx];
Scalar3 accel = make_scalar3(net_force.x, net_force.y, net_force.z);
accel *= minv;
Scalar3 v = make_scalar3(vel.x, vel.y, vel.z);
// propagate velocity by half a time step by multiplying with an upper triangular matrix
v.x = mat_exp_v_xx * v.x + mat_exp_v_xy * v.y + mat_exp_v_xz * v.z;
v.y = mat_exp_v_yy * v.y + mat_exp_v_yz * v.z;
v.z = mat_exp_v_zz * v.z;
v.x += mat_exp_v_int_xx * accel.x + mat_exp_v_int_xy * accel.y + mat_exp_v_int_xz * accel.z;
v.y += mat_exp_v_int_yy * accel.y + mat_exp_v_int_yz * accel.z;
v.z += mat_exp_v_int_zz * accel.z;
// write out velocity
d_vel[idx] = make_scalar4(v.x, v.y, v.z, vel.w);
// since we calculate the acceleration, we need to write it for the next step
d_accel[idx] = accel;
}
}
/*! \param d_vel array of particle velocities
\param d_accel array of particle accelerations
\param d_group_members Device array listing the indicies of the mebers of the group to integrate
\param group_size Number of members in the group
\param mat_exp_v Matrix exponential for velocity update
\param mat_exp_v_int Integrated matrix exp for velocity update
\param d_net_force Net force on each particle
\param deltaT Time to move forward in one whole step
This is just a kernel driver for gpu_npt_mtk_step_kernel(). See it for more details.
*/
hipError_t gpu_npt_mtk_step_two(Scalar4 *d_vel,
Scalar3 *d_accel,
unsigned int *d_group_members,
unsigned int group_size,
Scalar4 *d_net_force,
Scalar* mat_exp_v,
Scalar* mat_exp_v_int,
Scalar deltaT)
{
// setup the grid to run the kernel
unsigned int block_size=256;
dim3 grid( (group_size / block_size) + 1, 1, 1);
dim3 threads(block_size, 1, 1);
// run the kernel
hipLaunchKernelGGL(( gpu_npt_mtk_step_two_kernel), dim3(grid), dim3(threads) , 0, 0, d_vel,
d_accel,
d_net_force,
d_group_members,
group_size,
mat_exp_v[0],
mat_exp_v[1],
mat_exp_v[2],
mat_exp_v[3],
mat_exp_v[4],
mat_exp_v[5],
mat_exp_v_int[0],
mat_exp_v_int[1],
mat_exp_v_int[2],
mat_exp_v_int[3],
mat_exp_v_int[4],
mat_exp_v_int[5],
deltaT);
return hipSuccess;
}
//! GPU kernel to perform partial reduction of temperature
__global__ void gpu_npt_mtk_temperature_partial(unsigned int *d_group_members,
unsigned int group_size,
Scalar *d_scratch,
Scalar4 *d_velocity)
{
// determine which particle this thread works on
int group_idx = blockIdx.x * blockDim.x + threadIdx.x;
Scalar mv2_element; // element of scratch space read in
if (group_idx < group_size)
{
unsigned int idx = d_group_members[group_idx];
Scalar4 vel = d_velocity[idx];
Scalar mass = vel.w;
mv2_element = mass * (vel.x*vel.x + vel.y*vel.y + vel.z*vel.z);
}
else
{
// non-participating thread: contribute 0 to the sum
mv2_element = Scalar(0.0);
}
npt_mtk_sdata[threadIdx.x] = mv2_element;
__syncthreads();
int offs = blockDim.x >> 1;
while (offs > 0)
{
if (threadIdx.x < offs)
npt_mtk_sdata[threadIdx.x] += npt_mtk_sdata[threadIdx.x + offs];
offs >>= 1;
__syncthreads();
}
// write out partial sum
if (threadIdx.x == 0)
d_scratch[blockIdx.x] = npt_mtk_sdata[0];
}
//! GPU kernel to perform final reduction of temperature
__global__ void gpu_npt_mtk_temperature_final_sum(Scalar *d_scratch,
Scalar *d_temperature,
unsigned int ndof,
unsigned int num_partial_sums)
{
Scalar final_sum(0.0);
for (int start = 0; start < num_partial_sums; start += blockDim.x)
{
__syncthreads();
if (start + threadIdx.x < num_partial_sums)
{
npt_mtk_sdata[threadIdx.x] = d_scratch[start + threadIdx.x];
}
else
npt_mtk_sdata[threadIdx.x] = Scalar(0.0);
__syncthreads();
// reduce the sum in parallel
int offs = blockDim.x >> 1;
while (offs > 0)
{
if (threadIdx.x < offs)
npt_mtk_sdata[threadIdx.x] += npt_mtk_sdata[threadIdx.x + offs];
offs >>=1;
__syncthreads();
}
if (threadIdx.x == 0)
final_sum += npt_mtk_sdata[0];
}
if (threadIdx.x == 0)
*d_temperature = final_sum/Scalar(ndof);
}
/*!\param d_temperature Device variable to store the temperature value (output)
\param d_vel Array of particle velocities and masses
\param d_scratch Temporary scratch space for reduction
\param num_blocks Number of CUDA blocks used in reduction
\param block_size Size of blocks used in reduction
\param d_group_members Members of group for which the reduction is performed
\param group_size Size of group
\param ndof Number of degrees of freedom of group
This function performs the reduction of the temperature on the GPU. It is just
a driver function that calls the appropriate GPU kernels.
*/
hipError_t gpu_npt_mtk_temperature(Scalar *d_temperature,
Scalar4 *d_vel,
Scalar *d_scratch,
unsigned int num_blocks,
unsigned int block_size,
unsigned int *d_group_members,
unsigned int group_size,
unsigned int ndof)
{
assert(d_temperature);
assert(d_vel);
assert(d_group_members);
assert(d_scratch);
dim3 grid(num_blocks,1,1);
dim3 threads(block_size,1,1);
unsigned int shared_bytes = sizeof(Scalar)*block_size;
// reduce squared velocity norm times mass, first pass
hipLaunchKernelGGL(( gpu_npt_mtk_temperature_partial), dim3(grid), dim3(threads), shared_bytes, 0,
d_group_members,
group_size,
d_scratch,
d_vel);
unsigned int final_block_size = 512;
grid = dim3(1,1,1);
threads = dim3(final_block_size, 1, 1);
shared_bytes = sizeof(Scalar)*final_block_size;
// reduction, second pass
hipLaunchKernelGGL(( gpu_npt_mtk_temperature_final_sum), dim3(grid), dim3(threads), shared_bytes, 0,
d_scratch,
d_temperature,
ndof,
num_blocks);
return hipSuccess;
}
/*! \param d_vel array of particle velocities and masses
\param d_group_members Device array listing the indicies of the mebers of the group to integrate
\param group_size Number of members in the group
\param exp_v_fac_thermo scaling factor (per direction) for velocity update generated by thermostat
GPU kernel to thermostat velocities
*/
__global__ void gpu_npt_mtk_thermostat_kernel(Scalar4 *d_vel,
unsigned int *d_group_members,
unsigned int group_size,
Scalar exp_v_fac_thermo)
{
// determine which particle this thread works on
int group_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (group_idx < group_size)
{
unsigned int idx = d_group_members[group_idx];
// fetch particle velocity and acceleration
Scalar4 vel = d_vel[idx];
Scalar3 v = make_scalar3(vel.x, vel.y, vel.z);
v = v*exp_v_fac_thermo;
// write out the results
d_vel[idx] = make_scalar4(v.x,v.y,v.z,vel.w);
}
}
/*! \param d_vel array of particle velocities
\param d_group_members Device array listing the indicies of the mebers of the group to integrate
\param group_size Number of members in the group
\param xi Thermostat velocity
\param deltaT Time to move forward in one whole step
This is just a kernel driver for gpu_npt_step_kernel(). See it for more details.
*/
hipError_t gpu_npt_mtk_thermostat(Scalar4 *d_vel,
unsigned int *d_group_members,
unsigned int group_size,
Scalar xi,
Scalar deltaT)
{
// setup the grid to run the kernel
unsigned int block_size=256;
dim3 grid( (group_size / block_size) + 1, 1, 1);
dim3 threads(block_size, 1, 1);
Scalar exp_v_fac_thermo = exp(-Scalar(1.0/2.0)*xi*deltaT);
// run the kernel
hipLaunchKernelGGL(( gpu_npt_mtk_thermostat_kernel), dim3(grid), dim3(threads) , 0, 0, d_vel,
d_group_members,
group_size,
exp_v_fac_thermo);
return hipSuccess;
}
| 0aef1ce63481cf562c85678cea2c86e4b3ce69c7.cu | /*
Highly Optimized Object-oriented Many-particle Dynamics -- Blue Edition
(HOOMD-blue) Open Source Software License Copyright 2008-2011 Ames Laboratory
Iowa State University and The Regents of the University of Michigan All rights
reserved.
HOOMD-blue may contain modifications ("Contributions") provided, and to which
copyright is held, by various Contributors who have granted The Regents of the
University of Michigan the right to modify and/or distribute such Contributions.
You may redistribute, use, and create derivate works of HOOMD-blue, in source
and binary forms, provided you abide by the following conditions:
* Redistributions of source code must retain the above copyright notice, this
list of conditions, and the following disclaimer both in the code and
prominently in any materials provided with the distribution.
* Redistributions in binary form must reproduce the above copyright notice, this
list of conditions, and the following disclaimer in the documentation and/or
other materials provided with the distribution.
* All publications and presentations based on HOOMD-blue, including any reports
or published results obtained, in whole or in part, with HOOMD-blue, will
acknowledge its use according to the terms posted at the time of submission on:
http://codeblue.umich.edu/hoomd-blue/citations.html
* Any electronic documents citing HOOMD-Blue will link to the HOOMD-Blue website:
http://codeblue.umich.edu/hoomd-blue/
* Apart from the above required attributions, neither the name of the copyright
holder nor the names of HOOMD-blue's contributors may be used to endorse or
promote products derived from this software without specific prior written
permission.
Disclaimer
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER AND CONTRIBUTORS ``AS IS'' AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND/OR ANY
WARRANTIES THAT THIS SOFTWARE IS FREE OF INFRINGEMENT ARE DISCLAIMED.
IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
// Maintainer: jglaser
#include "TwoStepNPTMTKGPU.cuh"
#ifdef WIN32
#include <cassert>
#else
#include <assert.h>
#endif
/*! \file TwoStepNPTMTKGPU.cu
\brief Defines GPU kernel code for NPT integration on the GPU using the Martyna-Tobias-Klein update equations. Used by TwoStepNPTMTKGPU.
*/
//! Shared memory used in reducing the sum of the squared velocities
extern __shared__ Scalar npt_mtk_sdata[];
//! Kernel to propagate the positions and velocities, first half of NPT update
__global__ void gpu_npt_mtk_step_one_kernel(Scalar4 *d_pos,
Scalar4 *d_vel,
const Scalar3 *d_accel,
unsigned int *d_group_members,
unsigned int group_size,
Scalar exp_thermo_fac,
Scalar mat_exp_v_xx,
Scalar mat_exp_v_xy,
Scalar mat_exp_v_xz,
Scalar mat_exp_v_yy,
Scalar mat_exp_v_yz,
Scalar mat_exp_v_zz,
Scalar mat_exp_v_int_xx,
Scalar mat_exp_v_int_xy,
Scalar mat_exp_v_int_xz,
Scalar mat_exp_v_int_yy,
Scalar mat_exp_v_int_yz,
Scalar mat_exp_v_int_zz,
Scalar mat_exp_r_xx,
Scalar mat_exp_r_xy,
Scalar mat_exp_r_xz,
Scalar mat_exp_r_yy,
Scalar mat_exp_r_yz,
Scalar mat_exp_r_zz,
Scalar mat_exp_r_int_xx,
Scalar mat_exp_r_int_xy,
Scalar mat_exp_r_int_xz,
Scalar mat_exp_r_int_yy,
Scalar mat_exp_r_int_yz,
Scalar mat_exp_r_int_zz,
Scalar deltaT)
{
// determine which particle this thread works on
int group_idx = blockIdx.x * blockDim.x + threadIdx.x;
// initialize eigenvectors
if (group_idx < group_size)
{
unsigned int idx = d_group_members[group_idx];
// fetch particle position
Scalar4 pos = d_pos[idx];
// fetch particle velocity and acceleration
Scalar4 vel = d_vel[idx];
Scalar3 v = make_scalar3(vel.x, vel.y, vel.z);
Scalar3 accel = d_accel[idx];;
Scalar3 r = make_scalar3(pos.x, pos.y, pos.z);
// apply thermostat update of velocity
v *= exp_thermo_fac;
// propagate velocity by half a time step and position by the full time step
// by multiplying with upper triangular matrix
v.x = mat_exp_v_xx * v.x + mat_exp_v_xy * v.y + mat_exp_v_xz * v.z;
v.y = mat_exp_v_yy * v.y + mat_exp_v_yz * v.z;
v.z = mat_exp_v_zz * v.z;
v.x += mat_exp_v_int_xx * accel.x + mat_exp_v_int_xy * accel.y + mat_exp_v_int_xz * accel.z;
v.y += mat_exp_v_int_yy * accel.y + mat_exp_v_int_yz * accel.z;
v.z += mat_exp_v_int_zz * accel.z;
r.x = mat_exp_r_xx * r.x + mat_exp_r_xy * r.y + mat_exp_r_xz * r.z;
r.y = mat_exp_r_yy * r.y + mat_exp_r_yz * r.z;
r.z = mat_exp_r_zz * r.z;
r.x += mat_exp_r_int_xx * v.x + mat_exp_r_int_xy * v.y + mat_exp_r_int_xz * v.z;
r.y += mat_exp_r_int_yy * v.y + mat_exp_r_int_yz * v.z;
r.z += mat_exp_r_int_zz * v.z;
// write out the results
d_pos[idx] = make_scalar4(r.x,r.y,r.z,pos.w);
d_vel[idx] = make_scalar4(v.x,v.y,v.z,vel.w);
}
}
/*! \param d_pos array of particle positions
\param d_vel array of particle velocities
\param d_accel array of particle accelerations
\param d_group_members Device array listing the indicies of the mebers of the group to integrate
\param group_size Number of members in the group
\param exp_thermo_fac Update factor for thermostat
\param mat_exp_v Matrix exponential for velocity update
\param mat_exp_v_int Integrated matrix exp for velocity update
\param mat_exp_r Matrix exponential for position update
\param mat_exp_r_int Integrated matrix exp for position update
\param deltaT Time to advance (for one full step)
\param deltaT Time to move forward in one whole step
This is just a kernel driver for gpu_npt_mtk_step_one_kernel(). See it for more details.
*/
cudaError_t gpu_npt_mtk_step_one(Scalar4 *d_pos,
Scalar4 *d_vel,
const Scalar3 *d_accel,
unsigned int *d_group_members,
unsigned int group_size,
Scalar exp_thermo_fac,
Scalar *mat_exp_v,
Scalar *mat_exp_v_int,
Scalar *mat_exp_r,
Scalar *mat_exp_r_int,
Scalar deltaT)
{
// setup the grid to run the kernel
unsigned int block_size = 256;
dim3 grid( (group_size / block_size) + 1, 1, 1);
dim3 threads(block_size, 1, 1);
// run the kernel
gpu_npt_mtk_step_one_kernel<<< grid, threads >>>(d_pos,
d_vel,
d_accel,
d_group_members,
group_size,
exp_thermo_fac,
mat_exp_v[0],
mat_exp_v[1],
mat_exp_v[2],
mat_exp_v[3],
mat_exp_v[4],
mat_exp_v[5],
mat_exp_v_int[0],
mat_exp_v_int[1],
mat_exp_v_int[2],
mat_exp_v_int[3],
mat_exp_v_int[4],
mat_exp_v_int[5],
mat_exp_r[0],
mat_exp_r[1],
mat_exp_r[2],
mat_exp_r[3],
mat_exp_r[4],
mat_exp_r[5],
mat_exp_r_int[0],
mat_exp_r_int[1],
mat_exp_r_int[2],
mat_exp_r_int[3],
mat_exp_r_int[4],
mat_exp_r_int[5],
deltaT);
return cudaSuccess;
}
/*! \param N number of particles in the system
\param d_pos array of particle positions
\param d_image array of particle images
\param box The new box the particles where the particles now reside
Wrap particle positions for all particles in the box
*/
extern "C" __global__
void gpu_npt_mtk_wrap_kernel(const unsigned int N,
Scalar4 *d_pos,
int3 *d_image,
BoxDim box)
{
// determine which particle this thread works on
int idx = blockIdx.x * blockDim.x + threadIdx.x;
// wrap ALL particles in the box
if (idx < N)
{
// fetch particle position
Scalar4 postype = d_pos[idx];
Scalar3 pos = make_scalar3(postype.x, postype.y, postype.z);
// read in the image flags
int3 image = d_image[idx];
// fix periodic boundary conditions
box.wrap(pos, image);
// write out the results
d_pos[idx] = make_scalar4(pos.x, pos.y, pos.z, postype.w);
d_image[idx] = image;
}
}
/*! \param N number of particles in the system
\param d_pos array of particle positions
\param d_image array of particle images
\param box The new box the particles where the particles now reside
This is just a kernel driver for gpu_npt_mtk_wrap_kernel(). See it for more details.
*/
cudaError_t gpu_npt_mtk_wrap(const unsigned int N,
Scalar4 *d_pos,
int3 *d_image,
const BoxDim& box)
{
// setup the grid to run the kernel
unsigned int block_size=256;
dim3 grid( (N / block_size) + 1, 1, 1);
dim3 threads(block_size, 1, 1);
// run the kernel
gpu_npt_mtk_wrap_kernel<<< grid, threads >>>(N, d_pos, d_image, box);
return cudaSuccess;
}
//! Kernel to propagate the positions and velocities, second half of NPT update
__global__ void gpu_npt_mtk_step_two_kernel(Scalar4 *d_vel,
Scalar3 *d_accel,
const Scalar4 *d_net_force,
unsigned int *d_group_members,
unsigned int group_size,
Scalar mat_exp_v_xx,
Scalar mat_exp_v_xy,
Scalar mat_exp_v_xz,
Scalar mat_exp_v_yy,
Scalar mat_exp_v_yz,
Scalar mat_exp_v_zz,
Scalar mat_exp_v_int_xx,
Scalar mat_exp_v_int_xy,
Scalar mat_exp_v_int_xz,
Scalar mat_exp_v_int_yy,
Scalar mat_exp_v_int_yz,
Scalar mat_exp_v_int_zz,
Scalar deltaT)
{
// determine which particle this thread works on
int group_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (group_idx < group_size)
{
unsigned int idx = d_group_members[group_idx];
// fetch particle velocity and acceleration
Scalar4 vel = d_vel[idx];
// compute acceleration
Scalar minv = Scalar(1.0)/vel.w;
Scalar4 net_force = d_net_force[idx];
Scalar3 accel = make_scalar3(net_force.x, net_force.y, net_force.z);
accel *= minv;
Scalar3 v = make_scalar3(vel.x, vel.y, vel.z);
// propagate velocity by half a time step by multiplying with an upper triangular matrix
v.x = mat_exp_v_xx * v.x + mat_exp_v_xy * v.y + mat_exp_v_xz * v.z;
v.y = mat_exp_v_yy * v.y + mat_exp_v_yz * v.z;
v.z = mat_exp_v_zz * v.z;
v.x += mat_exp_v_int_xx * accel.x + mat_exp_v_int_xy * accel.y + mat_exp_v_int_xz * accel.z;
v.y += mat_exp_v_int_yy * accel.y + mat_exp_v_int_yz * accel.z;
v.z += mat_exp_v_int_zz * accel.z;
// write out velocity
d_vel[idx] = make_scalar4(v.x, v.y, v.z, vel.w);
// since we calculate the acceleration, we need to write it for the next step
d_accel[idx] = accel;
}
}
/*! \param d_vel array of particle velocities
\param d_accel array of particle accelerations
\param d_group_members Device array listing the indicies of the mebers of the group to integrate
\param group_size Number of members in the group
\param mat_exp_v Matrix exponential for velocity update
\param mat_exp_v_int Integrated matrix exp for velocity update
\param d_net_force Net force on each particle
\param deltaT Time to move forward in one whole step
This is just a kernel driver for gpu_npt_mtk_step_kernel(). See it for more details.
*/
cudaError_t gpu_npt_mtk_step_two(Scalar4 *d_vel,
Scalar3 *d_accel,
unsigned int *d_group_members,
unsigned int group_size,
Scalar4 *d_net_force,
Scalar* mat_exp_v,
Scalar* mat_exp_v_int,
Scalar deltaT)
{
// setup the grid to run the kernel
unsigned int block_size=256;
dim3 grid( (group_size / block_size) + 1, 1, 1);
dim3 threads(block_size, 1, 1);
// run the kernel
gpu_npt_mtk_step_two_kernel<<< grid, threads >>>(d_vel,
d_accel,
d_net_force,
d_group_members,
group_size,
mat_exp_v[0],
mat_exp_v[1],
mat_exp_v[2],
mat_exp_v[3],
mat_exp_v[4],
mat_exp_v[5],
mat_exp_v_int[0],
mat_exp_v_int[1],
mat_exp_v_int[2],
mat_exp_v_int[3],
mat_exp_v_int[4],
mat_exp_v_int[5],
deltaT);
return cudaSuccess;
}
//! GPU kernel to perform partial reduction of temperature
__global__ void gpu_npt_mtk_temperature_partial(unsigned int *d_group_members,
unsigned int group_size,
Scalar *d_scratch,
Scalar4 *d_velocity)
{
// determine which particle this thread works on
int group_idx = blockIdx.x * blockDim.x + threadIdx.x;
Scalar mv2_element; // element of scratch space read in
if (group_idx < group_size)
{
unsigned int idx = d_group_members[group_idx];
Scalar4 vel = d_velocity[idx];
Scalar mass = vel.w;
mv2_element = mass * (vel.x*vel.x + vel.y*vel.y + vel.z*vel.z);
}
else
{
// non-participating thread: contribute 0 to the sum
mv2_element = Scalar(0.0);
}
npt_mtk_sdata[threadIdx.x] = mv2_element;
__syncthreads();
int offs = blockDim.x >> 1;
while (offs > 0)
{
if (threadIdx.x < offs)
npt_mtk_sdata[threadIdx.x] += npt_mtk_sdata[threadIdx.x + offs];
offs >>= 1;
__syncthreads();
}
// write out partial sum
if (threadIdx.x == 0)
d_scratch[blockIdx.x] = npt_mtk_sdata[0];
}
//! GPU kernel to perform final reduction of temperature
__global__ void gpu_npt_mtk_temperature_final_sum(Scalar *d_scratch,
Scalar *d_temperature,
unsigned int ndof,
unsigned int num_partial_sums)
{
Scalar final_sum(0.0);
for (int start = 0; start < num_partial_sums; start += blockDim.x)
{
__syncthreads();
if (start + threadIdx.x < num_partial_sums)
{
npt_mtk_sdata[threadIdx.x] = d_scratch[start + threadIdx.x];
}
else
npt_mtk_sdata[threadIdx.x] = Scalar(0.0);
__syncthreads();
// reduce the sum in parallel
int offs = blockDim.x >> 1;
while (offs > 0)
{
if (threadIdx.x < offs)
npt_mtk_sdata[threadIdx.x] += npt_mtk_sdata[threadIdx.x + offs];
offs >>=1;
__syncthreads();
}
if (threadIdx.x == 0)
final_sum += npt_mtk_sdata[0];
}
if (threadIdx.x == 0)
*d_temperature = final_sum/Scalar(ndof);
}
/*!\param d_temperature Device variable to store the temperature value (output)
\param d_vel Array of particle velocities and masses
\param d_scratch Temporary scratch space for reduction
\param num_blocks Number of CUDA blocks used in reduction
\param block_size Size of blocks used in reduction
\param d_group_members Members of group for which the reduction is performed
\param group_size Size of group
\param ndof Number of degrees of freedom of group
This function performs the reduction of the temperature on the GPU. It is just
a driver function that calls the appropriate GPU kernels.
*/
cudaError_t gpu_npt_mtk_temperature(Scalar *d_temperature,
Scalar4 *d_vel,
Scalar *d_scratch,
unsigned int num_blocks,
unsigned int block_size,
unsigned int *d_group_members,
unsigned int group_size,
unsigned int ndof)
{
assert(d_temperature);
assert(d_vel);
assert(d_group_members);
assert(d_scratch);
dim3 grid(num_blocks,1,1);
dim3 threads(block_size,1,1);
unsigned int shared_bytes = sizeof(Scalar)*block_size;
// reduce squared velocity norm times mass, first pass
gpu_npt_mtk_temperature_partial<<<grid, threads, shared_bytes>>>(
d_group_members,
group_size,
d_scratch,
d_vel);
unsigned int final_block_size = 512;
grid = dim3(1,1,1);
threads = dim3(final_block_size, 1, 1);
shared_bytes = sizeof(Scalar)*final_block_size;
// reduction, second pass
gpu_npt_mtk_temperature_final_sum<<<grid, threads, shared_bytes>>>(
d_scratch,
d_temperature,
ndof,
num_blocks);
return cudaSuccess;
}
/*! \param d_vel array of particle velocities and masses
\param d_group_members Device array listing the indicies of the mebers of the group to integrate
\param group_size Number of members in the group
\param exp_v_fac_thermo scaling factor (per direction) for velocity update generated by thermostat
GPU kernel to thermostat velocities
*/
__global__ void gpu_npt_mtk_thermostat_kernel(Scalar4 *d_vel,
unsigned int *d_group_members,
unsigned int group_size,
Scalar exp_v_fac_thermo)
{
// determine which particle this thread works on
int group_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (group_idx < group_size)
{
unsigned int idx = d_group_members[group_idx];
// fetch particle velocity and acceleration
Scalar4 vel = d_vel[idx];
Scalar3 v = make_scalar3(vel.x, vel.y, vel.z);
v = v*exp_v_fac_thermo;
// write out the results
d_vel[idx] = make_scalar4(v.x,v.y,v.z,vel.w);
}
}
/*! \param d_vel array of particle velocities
\param d_group_members Device array listing the indicies of the mebers of the group to integrate
\param group_size Number of members in the group
\param xi Thermostat velocity
\param deltaT Time to move forward in one whole step
This is just a kernel driver for gpu_npt_step_kernel(). See it for more details.
*/
cudaError_t gpu_npt_mtk_thermostat(Scalar4 *d_vel,
unsigned int *d_group_members,
unsigned int group_size,
Scalar xi,
Scalar deltaT)
{
// setup the grid to run the kernel
unsigned int block_size=256;
dim3 grid( (group_size / block_size) + 1, 1, 1);
dim3 threads(block_size, 1, 1);
Scalar exp_v_fac_thermo = exp(-Scalar(1.0/2.0)*xi*deltaT);
// run the kernel
gpu_npt_mtk_thermostat_kernel<<< grid, threads >>>(d_vel,
d_group_members,
group_size,
exp_v_fac_thermo);
return cudaSuccess;
}
|
c9af261bbba09357b562a6849fcd6b873e13bf2d.hip | // !!! This is a file automatically generated by hipify!!!
/* All or part of this file was contributed by Intel under license:
* Copyright (C) 2017-2018 Intel Corporation
* SPDX-License-Identifier: MIT
*/
#include <hip/hip_runtime.h>
#include <limits>
#include "data/types.h"
#include "tensors/tensor.h"
#include "translator/helpers.h"
namespace marian {
namespace gpu {
__global__ void gSetColumn(float* d_in,
size_t n_columns,
size_t n_rows,
size_t noColumn,
float value) {
size_t rowNumber = threadIdx.x + blockDim.x * blockIdx.x;
size_t index = noColumn + rowNumber * n_columns;
if(index < n_columns * n_rows) {
d_in[index] = value;
}
}
void SetColumn(Tensor in_, size_t col, float value) {
int nRows = in_->shape().elements() / in_->shape()[-1];
int nColumns = in_->shape()[-1];
int nBlocks = nRows / 512 + ((nRows % 512 == 0) ? 0 : 1);
int nThreads = ::min(512, nRows);
hipLaunchKernelGGL(( gSetColumn), dim3(nBlocks), dim3(nThreads), 0, 0, in_->data(), nColumns, nRows, col, value);
}
void suppressWord(Expr probs, Word id) {
SetColumn(probs->val(), id, std::numeric_limits<float>::lowest());
}
// force decoding
__global__ void gSetColumnId(float* d_in,
size_t n_columns,
size_t n_rows,
size_t noColumn,
float value) {
size_t rowNumber = threadIdx.x + blockDim.x * blockIdx.x;
size_t index = noColumn + rowNumber * n_columns;
if(index < n_columns * n_rows) {
d_in[index] = value;
}
}
void SetColumnId(Tensor in_, size_t col, float value) {
int nRows = in_->shape().elements() / in_->shape()[-1];
int nColumns = in_->shape()[-1];
//int nBlocks = nRows / 512 + ((nRows % 512 == 0) ? 0 : 1);
//int nThreads = ::min(512, nRows);
int nBlocks = 1;
int nThreads = ::min(512, 1);
hipLaunchKernelGGL(( gSetColumnId), dim3(nBlocks), dim3(nThreads), 0, 0, in_->data(), nColumns, nRows, col, value);
}
} // namespace gpu
} // namespace marian
| c9af261bbba09357b562a6849fcd6b873e13bf2d.cu | /* All or part of this file was contributed by Intel under license:
* Copyright (C) 2017-2018 Intel Corporation
* SPDX-License-Identifier: MIT
*/
#include <cuda.h>
#include <limits>
#include "data/types.h"
#include "tensors/tensor.h"
#include "translator/helpers.h"
namespace marian {
namespace gpu {
__global__ void gSetColumn(float* d_in,
size_t n_columns,
size_t n_rows,
size_t noColumn,
float value) {
size_t rowNumber = threadIdx.x + blockDim.x * blockIdx.x;
size_t index = noColumn + rowNumber * n_columns;
if(index < n_columns * n_rows) {
d_in[index] = value;
}
}
void SetColumn(Tensor in_, size_t col, float value) {
int nRows = in_->shape().elements() / in_->shape()[-1];
int nColumns = in_->shape()[-1];
int nBlocks = nRows / 512 + ((nRows % 512 == 0) ? 0 : 1);
int nThreads = std::min(512, nRows);
gSetColumn<<<nBlocks, nThreads>>>(in_->data(), nColumns, nRows, col, value);
}
void suppressWord(Expr probs, Word id) {
SetColumn(probs->val(), id, std::numeric_limits<float>::lowest());
}
// force decoding
__global__ void gSetColumnId(float* d_in,
size_t n_columns,
size_t n_rows,
size_t noColumn,
float value) {
size_t rowNumber = threadIdx.x + blockDim.x * blockIdx.x;
size_t index = noColumn + rowNumber * n_columns;
if(index < n_columns * n_rows) {
d_in[index] = value;
}
}
void SetColumnId(Tensor in_, size_t col, float value) {
int nRows = in_->shape().elements() / in_->shape()[-1];
int nColumns = in_->shape()[-1];
//int nBlocks = nRows / 512 + ((nRows % 512 == 0) ? 0 : 1);
//int nThreads = std::min(512, nRows);
int nBlocks = 1;
int nThreads = std::min(512, 1);
gSetColumnId<<<nBlocks, nThreads>>>(in_->data(), nColumns, nRows, col, value);
}
} // namespace gpu
} // namespace marian
|
9a456d560637a1262e387a5167fd2661835bf81d.hip | // !!! This is a file automatically generated by hipify!!!
#undef USE_DISPATCH // nvcc doesn't support libdispatch
extern "C" {
#include "ccv.h"
}
#include <ctype.h>
#define CASE_TESTS // so that we don't include public available methods
#include "../lib/cuda/cwc_convnet.cu"
#include "../lib/ccv_convnet.c"
static const int DEVICE_COUNT = 4;
extern "C" void cwc_backwards_runtime(ccv_convnet_t* convnet, ccv_array_t* categorizeds, ccv_convnet_train_param_t params)
{
int dual_batch = params.mini_batch;
int category_count = 1000;
int mini_batch = dual_batch / DEVICE_COUNT;
params.device_count = DEVICE_COUNT;
_cwc_convnet_alloc_reserved_both(convnet, mini_batch, DEVICE_COUNT, params.layer_params);
cwc_convnet_context_t* context = GPU(convnet)->contexts;
int i, device_id;
int conv_layers[] = {0, 3, 6, 7, 8};
for (device_id = 0; device_id < DEVICE_COUNT; device_id++)
for (i = 0; i < 5; i++)
{
ccv_convnet_layer_t* layer = GPU(convnet)->device[device_id].layers + conv_layers[i];
EXTRA(layer)->vary.convolutional.forward.x = 4;
EXTRA(layer)->vary.convolutional.forward.y = 8;
EXTRA(layer)->vary.convolutional.forward.z = 32;
if (conv_layers[i] == 3)
{
EXTRA(layer)->vary.convolutional.backward.gradient.x = 4;
EXTRA(layer)->vary.convolutional.backward.gradient.y = 6;
EXTRA(layer)->vary.convolutional.backward.gradient.z = 24;
EXTRA(layer)->vary.convolutional.backward.coefficient.x = 6;
EXTRA(layer)->vary.convolutional.backward.coefficient.y = 4;
EXTRA(layer)->vary.convolutional.backward.coefficient.z = 24;
} else if (conv_layers[i] == 0) {
EXTRA(layer)->vary.convolutional.backward.coefficient.x = 1;
EXTRA(layer)->vary.convolutional.backward.coefficient.y = 3;
EXTRA(layer)->vary.convolutional.backward.coefficient.z = 1;
} else {
EXTRA(layer)->vary.convolutional.backward.gradient.x = 8;
EXTRA(layer)->vary.convolutional.backward.gradient.y = 4;
EXTRA(layer)->vary.convolutional.backward.gradient.z = 32;
EXTRA(layer)->vary.convolutional.backward.coefficient.x = 8;
EXTRA(layer)->vary.convolutional.backward.coefficient.y = 4;
EXTRA(layer)->vary.convolutional.backward.coefficient.z = 32;
}
}
if (params.peer_access)
_cwc_convnet_enable_peer_access(convnet, params.device_count);
// doing model parallelism
for (device_id = 0; device_id < DEVICE_COUNT; device_id++)
{
hipSetDevice(device_id);
cwc_convnet_batch_formation(0, categorizeds, convnet->mean_activity, 0, 0, 0, 0, 0, convnet->input, params.input.min_dim, params.input.max_dim, convnet->rows, convnet->cols, convnet->channels, category_count, 0, mini_batch, mini_batch * device_id, mini_batch, context->host[device_id].input, context->host[device_id].c);
hipMemcpyAsync(context->device[device_id].input, context->host[device_id].input, sizeof(float) * convnet->rows * convnet->cols * convnet->channels * mini_batch, hipMemcpyHostToDevice, context->device[device_id].data_stream);
assert(hipGetLastError() == hipSuccess);
hipMemcpyAsync(context->device[device_id].c, context->host[device_id].c, sizeof(int) * mini_batch, hipMemcpyHostToDevice, context->device[device_id].data_stream);
assert(hipGetLastError() == hipSuccess);
}
for (device_id = 0; device_id < DEVICE_COUNT; device_id++)
{
hipSetDevice(device_id);
hipDeviceSynchronize();
}
hipSetDevice(0);
hipEvent_t start;
hipEvent_t stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, context->device[0].data_stream);
_cwc_convnet_encode_impl(convnet, DEVICE_COUNT, mini_batch, 0, context);
for (device_id = 0; device_id < DEVICE_COUNT; device_id++)
{
hipSetDevice(device_id);
// do the logistic loss
_cwc_convnet_softmax_with_logistic_loss(mini_batch, category_count, GPU(convnet)->device[device_id].forwards[convnet->count - 1] + device_id * mini_batch * category_count, context->device[device_id].c, context->device[device_id].data_stream);
}
_cwc_convnet_backward_propagate_error(convnet, DEVICE_COUNT, mini_batch, context);
_cwc_convnet_reduce_data_parallelism(convnet, DEVICE_COUNT, context);
for (device_id = 1; device_id < DEVICE_COUNT; device_id++)
{
hipSetDevice(device_id);
hipEventRecord(context->device[device_id].data_joint, context->device[device_id].data_stream);
}
hipSetDevice(0);
for (device_id = 1; device_id < DEVICE_COUNT; device_id++)
hipStreamWaitEvent(context->device[0].data_stream, context->device[device_id].data_joint, 0);
hipEventRecord(stop, context->device[0].data_stream);
hipEventSynchronize(stop);
assert(hipGetLastError() == hipSuccess);
float elapsed_time = 0;
hipEventElapsedTime(&elapsed_time, start, stop);
printf("%d GPUs uses %f ms\n", DEVICE_COUNT, elapsed_time);
float *dual_out[DEVICE_COUNT] = {0};
for (device_id = 0; device_id < DEVICE_COUNT; device_id++)
hipHostMalloc(&dual_out[device_id], sizeof(float) * dual_batch * category_count);
float *back_out[DEVICE_COUNT] = {0};
ccv_convnet_layer_t* second_layer = convnet->layers + 1;
int second_count = second_layer->input.matrix.rows * second_layer->input.matrix.cols * second_layer->input.matrix.channels;
for (device_id = 0; device_id < DEVICE_COUNT; device_id++)
hipHostMalloc(&back_out[device_id], sizeof(float) * mini_batch * second_count);
ccv_convnet_layer_t* last_layer = GPU(convnet)->device[0].layers + convnet->count - 1;
float *dual_w[DEVICE_COUNT] = {0};
for (device_id = 0; device_id < DEVICE_COUNT; device_id++)
hipHostMalloc(&dual_w[device_id], sizeof(float) * last_layer->wnum);
ccv_convnet_layer_t* second_last_layer = GPU(convnet)->device[0].layers + convnet->count - 2;
float *dual_w_2[DEVICE_COUNT] = {0};
for (device_id = 0; device_id < DEVICE_COUNT; device_id++)
hipHostMalloc(&dual_w_2[device_id], sizeof(float) * second_last_layer->wnum);
for (device_id = 0; device_id < DEVICE_COUNT; device_id++)
{
hipSetDevice(device_id);
hipMemcpy(dual_out[device_id], GPU(convnet)->device[device_id].forwards[convnet->count - 1], sizeof(float) * dual_batch * category_count, hipMemcpyDeviceToHost);
hipMemcpy(back_out[device_id], GPU(convnet)->device[device_id].backwards[1], sizeof(float) * mini_batch * second_count, hipMemcpyDeviceToHost);
hipMemcpy(dual_w[device_id], GPU(convnet)->device[device_id].configurations[convnet->count - 1].w, sizeof(float) * last_layer->wnum, hipMemcpyDeviceToHost);
hipMemcpy(dual_w_2[device_id], GPU(convnet)->device[device_id].configurations[convnet->count - 2].w, sizeof(float) * second_last_layer->wnum, hipMemcpyDeviceToHost);
}
ccv_convnet_compact(convnet);
assert(hipGetLastError() == hipSuccess);
// do it on one device
device_id = 0;
hipSetDevice(device_id);
_cwc_convnet_alloc_reserved_both(convnet, dual_batch, 1, params.layer_params);
assert(hipGetLastError() == hipSuccess);
context = GPU(convnet)->contexts;
for (i = 0; i < 5; i++)
{
ccv_convnet_layer_t* layer = GPU(convnet)->device[device_id].layers + conv_layers[i];
EXTRA(layer)->vary.convolutional.forward.x = 4;
EXTRA(layer)->vary.convolutional.forward.y = 8;
EXTRA(layer)->vary.convolutional.forward.z = 32;
if (conv_layers[i] == 3)
{
EXTRA(layer)->vary.convolutional.backward.gradient.x = 4;
EXTRA(layer)->vary.convolutional.backward.gradient.y = 6;
EXTRA(layer)->vary.convolutional.backward.gradient.z = 24;
EXTRA(layer)->vary.convolutional.backward.coefficient.x = 6;
EXTRA(layer)->vary.convolutional.backward.coefficient.y = 4;
EXTRA(layer)->vary.convolutional.backward.coefficient.z = 24;
} else if (conv_layers[i] == 0) {
EXTRA(layer)->vary.convolutional.backward.coefficient.x = 1;
EXTRA(layer)->vary.convolutional.backward.coefficient.y = 3;
EXTRA(layer)->vary.convolutional.backward.coefficient.z = 1;
} else {
EXTRA(layer)->vary.convolutional.backward.gradient.x = 8;
EXTRA(layer)->vary.convolutional.backward.gradient.y = 4;
EXTRA(layer)->vary.convolutional.backward.gradient.z = 32;
EXTRA(layer)->vary.convolutional.backward.coefficient.x = 8;
EXTRA(layer)->vary.convolutional.backward.coefficient.y = 4;
EXTRA(layer)->vary.convolutional.backward.coefficient.z = 32;
}
}
cwc_convnet_batch_formation(0, categorizeds, convnet->mean_activity, 0, 0, 0, 0, 0, convnet->input, params.input.min_dim, params.input.max_dim, convnet->rows, convnet->cols, convnet->channels, category_count, 0, dual_batch, 0, dual_batch, context->host[device_id].input, context->host[device_id].c);
hipMemcpyAsync(context->device[device_id].input, context->host[device_id].input, sizeof(float) * convnet->rows * convnet->cols * convnet->channels * dual_batch, hipMemcpyHostToDevice, context->device[device_id].data_stream);
assert(hipGetLastError() == hipSuccess);
hipMemcpyAsync(context->device[device_id].c, context->host[device_id].c, sizeof(int) * dual_batch, hipMemcpyHostToDevice, context->device[device_id].data_stream);
assert(hipGetLastError() == hipSuccess);
hipDeviceSynchronize();
hipEventRecord(start, context->device[device_id].data_stream);
_cwc_convnet_encode_impl(convnet, 1, dual_batch, 0, context);
// do the logistic loss
_cwc_convnet_softmax_with_logistic_loss(dual_batch, category_count, GPU(convnet)->device[device_id].forwards[convnet->count - 1], context->device[device_id].c, context->device[device_id].data_stream);
_cwc_convnet_backward_propagate_error(convnet, 1, dual_batch, context);
hipEventRecord(stop, context->device[device_id].data_stream);
hipEventSynchronize(stop);
elapsed_time = 0;
hipEventElapsedTime(&elapsed_time, start, stop);
printf("one GPU uses %f ms\n", elapsed_time);
hipEventDestroy(start);
hipEventDestroy(stop);
hipDeviceSynchronize();
float* out = 0;
hipHostMalloc(&out, sizeof(float) * dual_batch * category_count);
hipMemcpy(out, GPU(convnet)->device[device_id].forwards[convnet->count - 1], sizeof(float) * dual_batch * category_count, hipMemcpyDeviceToHost);
float* back = 0;
hipHostMalloc(&back, sizeof(float) * dual_batch * second_count);
hipMemcpy(back, GPU(convnet)->device[device_id].backwards[1], sizeof(float) * dual_batch * second_count, hipMemcpyDeviceToHost);
float* w = 0;
int wnum = GPU(convnet)->device[device_id].configurations[convnet->count - 1].wnum;
hipHostMalloc(&w, sizeof(float) * wnum);
hipMemcpy(w, GPU(convnet)->device[device_id].configurations[convnet->count - 1].w, sizeof(float) * wnum, hipMemcpyDeviceToHost);
float* w_2 = 0;
int wnum_2 = GPU(convnet)->device[device_id].configurations[convnet->count - 2].wnum;
hipHostMalloc(&w_2, sizeof(float) * wnum_2);
hipMemcpy(w_2, GPU(convnet)->device[device_id].configurations[convnet->count - 2].w, sizeof(float) * wnum_2, hipMemcpyDeviceToHost);
ccv_convnet_free(convnet);
int j;
for (i = 0; i < category_count; i++)
{
for (device_id = 0; device_id < DEVICE_COUNT; device_id++)
for (j = 0; j < mini_batch; j++)
{
float p = out[i * dual_batch + mini_batch * device_id + j];
float q = dual_out[device_id][category_count * mini_batch * device_id + i * mini_batch + j];
float delta = fabs(p - q) / ccv_max(ccv_max(fabs(p), fabs(q)), 1);
if (delta > 1e-4)
printf("softmax with logistic loss doesn't match: %d %d %d %g %g\n", device_id, i, j, out[i * dual_batch + mini_batch * device_id + j], dual_out[device_id][category_count * mini_batch * device_id + i * mini_batch + j]);
}
}
for (device_id = 0; device_id < DEVICE_COUNT; device_id++)
{
const int pwnum = wnum / DEVICE_COUNT;
for (i = 0; i < pwnum; i++)
{
float p = w[i + pwnum * device_id];
float q = dual_w[device_id][i];
float delta = fabs(p - q) / ccv_max(ccv_max(fabs(p), fabs(q)), 1e-3);
if (delta > 1e-3)
printf("the weight update on last layer doesn't match: %d %d %g %g\n", device_id, i + pwnum * device_id, w[i + pwnum * device_id], dual_w[device_id][i]);
}
}
for (device_id = 0; device_id < DEVICE_COUNT; device_id++)
{
const int pwnum_2 = wnum_2 / DEVICE_COUNT;
for (i = 0; i < pwnum_2; i++)
{
float p = w_2[i + pwnum_2 * device_id];
float q = dual_w_2[device_id][i];
float delta = fabs(p - q) / ccv_max(ccv_max(fabs(p), fabs(q)), 1e-3);
if (delta > 1e-3)
printf("the weight update on second to last layer doesn't match: %d %d %g %g\n", device_id, i + pwnum_2 * device_id, w_2[i + pwnum_2 * device_id], dual_w_2[device_id][i]);
}
}
for (i = 0; i < second_count; i++)
{
for (device_id = 0; device_id < DEVICE_COUNT; device_id++)
for (j = 0; j < mini_batch; j++)
{
float p = back[i * dual_batch + mini_batch * device_id + j];
float q = back_out[device_id][i * mini_batch + j];
float delta = fabs(p - q) / ccv_max(ccv_max(fabs(p), fabs(q)), 1e-3);
if (delta > 1e-3)
printf("the last layer of backwards propagated error doesn't match: %d %d %d %g %g\n", device_id, i, j, back[i * dual_batch + mini_batch * device_id + j], back_out[device_id][i * mini_batch + j]);
}
}
for (device_id = 0; device_id < DEVICE_COUNT; device_id++)
{
hipHostFree(dual_out[device_id]);
hipHostFree(back_out[device_id]);
hipHostFree(dual_w[device_id]);
}
hipHostFree(out);
hipHostFree(back);
hipHostFree(w);
}
| 9a456d560637a1262e387a5167fd2661835bf81d.cu | #undef USE_DISPATCH // nvcc doesn't support libdispatch
extern "C" {
#include "ccv.h"
}
#include <ctype.h>
#define CASE_TESTS // so that we don't include public available methods
#include "../lib/cuda/cwc_convnet.cu"
#include "../lib/ccv_convnet.c"
static const int DEVICE_COUNT = 4;
extern "C" void cwc_backwards_runtime(ccv_convnet_t* convnet, ccv_array_t* categorizeds, ccv_convnet_train_param_t params)
{
int dual_batch = params.mini_batch;
int category_count = 1000;
int mini_batch = dual_batch / DEVICE_COUNT;
params.device_count = DEVICE_COUNT;
_cwc_convnet_alloc_reserved_both(convnet, mini_batch, DEVICE_COUNT, params.layer_params);
cwc_convnet_context_t* context = GPU(convnet)->contexts;
int i, device_id;
int conv_layers[] = {0, 3, 6, 7, 8};
for (device_id = 0; device_id < DEVICE_COUNT; device_id++)
for (i = 0; i < 5; i++)
{
ccv_convnet_layer_t* layer = GPU(convnet)->device[device_id].layers + conv_layers[i];
EXTRA(layer)->vary.convolutional.forward.x = 4;
EXTRA(layer)->vary.convolutional.forward.y = 8;
EXTRA(layer)->vary.convolutional.forward.z = 32;
if (conv_layers[i] == 3)
{
EXTRA(layer)->vary.convolutional.backward.gradient.x = 4;
EXTRA(layer)->vary.convolutional.backward.gradient.y = 6;
EXTRA(layer)->vary.convolutional.backward.gradient.z = 24;
EXTRA(layer)->vary.convolutional.backward.coefficient.x = 6;
EXTRA(layer)->vary.convolutional.backward.coefficient.y = 4;
EXTRA(layer)->vary.convolutional.backward.coefficient.z = 24;
} else if (conv_layers[i] == 0) {
EXTRA(layer)->vary.convolutional.backward.coefficient.x = 1;
EXTRA(layer)->vary.convolutional.backward.coefficient.y = 3;
EXTRA(layer)->vary.convolutional.backward.coefficient.z = 1;
} else {
EXTRA(layer)->vary.convolutional.backward.gradient.x = 8;
EXTRA(layer)->vary.convolutional.backward.gradient.y = 4;
EXTRA(layer)->vary.convolutional.backward.gradient.z = 32;
EXTRA(layer)->vary.convolutional.backward.coefficient.x = 8;
EXTRA(layer)->vary.convolutional.backward.coefficient.y = 4;
EXTRA(layer)->vary.convolutional.backward.coefficient.z = 32;
}
}
if (params.peer_access)
_cwc_convnet_enable_peer_access(convnet, params.device_count);
// doing model parallelism
for (device_id = 0; device_id < DEVICE_COUNT; device_id++)
{
cudaSetDevice(device_id);
cwc_convnet_batch_formation(0, categorizeds, convnet->mean_activity, 0, 0, 0, 0, 0, convnet->input, params.input.min_dim, params.input.max_dim, convnet->rows, convnet->cols, convnet->channels, category_count, 0, mini_batch, mini_batch * device_id, mini_batch, context->host[device_id].input, context->host[device_id].c);
cudaMemcpyAsync(context->device[device_id].input, context->host[device_id].input, sizeof(float) * convnet->rows * convnet->cols * convnet->channels * mini_batch, cudaMemcpyHostToDevice, context->device[device_id].data_stream);
assert(cudaGetLastError() == cudaSuccess);
cudaMemcpyAsync(context->device[device_id].c, context->host[device_id].c, sizeof(int) * mini_batch, cudaMemcpyHostToDevice, context->device[device_id].data_stream);
assert(cudaGetLastError() == cudaSuccess);
}
for (device_id = 0; device_id < DEVICE_COUNT; device_id++)
{
cudaSetDevice(device_id);
cudaDeviceSynchronize();
}
cudaSetDevice(0);
cudaEvent_t start;
cudaEvent_t stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, context->device[0].data_stream);
_cwc_convnet_encode_impl(convnet, DEVICE_COUNT, mini_batch, 0, context);
for (device_id = 0; device_id < DEVICE_COUNT; device_id++)
{
cudaSetDevice(device_id);
// do the logistic loss
_cwc_convnet_softmax_with_logistic_loss(mini_batch, category_count, GPU(convnet)->device[device_id].forwards[convnet->count - 1] + device_id * mini_batch * category_count, context->device[device_id].c, context->device[device_id].data_stream);
}
_cwc_convnet_backward_propagate_error(convnet, DEVICE_COUNT, mini_batch, context);
_cwc_convnet_reduce_data_parallelism(convnet, DEVICE_COUNT, context);
for (device_id = 1; device_id < DEVICE_COUNT; device_id++)
{
cudaSetDevice(device_id);
cudaEventRecord(context->device[device_id].data_joint, context->device[device_id].data_stream);
}
cudaSetDevice(0);
for (device_id = 1; device_id < DEVICE_COUNT; device_id++)
cudaStreamWaitEvent(context->device[0].data_stream, context->device[device_id].data_joint, 0);
cudaEventRecord(stop, context->device[0].data_stream);
cudaEventSynchronize(stop);
assert(cudaGetLastError() == cudaSuccess);
float elapsed_time = 0;
cudaEventElapsedTime(&elapsed_time, start, stop);
printf("%d GPUs uses %f ms\n", DEVICE_COUNT, elapsed_time);
float *dual_out[DEVICE_COUNT] = {0};
for (device_id = 0; device_id < DEVICE_COUNT; device_id++)
cudaMallocHost(&dual_out[device_id], sizeof(float) * dual_batch * category_count);
float *back_out[DEVICE_COUNT] = {0};
ccv_convnet_layer_t* second_layer = convnet->layers + 1;
int second_count = second_layer->input.matrix.rows * second_layer->input.matrix.cols * second_layer->input.matrix.channels;
for (device_id = 0; device_id < DEVICE_COUNT; device_id++)
cudaMallocHost(&back_out[device_id], sizeof(float) * mini_batch * second_count);
ccv_convnet_layer_t* last_layer = GPU(convnet)->device[0].layers + convnet->count - 1;
float *dual_w[DEVICE_COUNT] = {0};
for (device_id = 0; device_id < DEVICE_COUNT; device_id++)
cudaMallocHost(&dual_w[device_id], sizeof(float) * last_layer->wnum);
ccv_convnet_layer_t* second_last_layer = GPU(convnet)->device[0].layers + convnet->count - 2;
float *dual_w_2[DEVICE_COUNT] = {0};
for (device_id = 0; device_id < DEVICE_COUNT; device_id++)
cudaMallocHost(&dual_w_2[device_id], sizeof(float) * second_last_layer->wnum);
for (device_id = 0; device_id < DEVICE_COUNT; device_id++)
{
cudaSetDevice(device_id);
cudaMemcpy(dual_out[device_id], GPU(convnet)->device[device_id].forwards[convnet->count - 1], sizeof(float) * dual_batch * category_count, cudaMemcpyDeviceToHost);
cudaMemcpy(back_out[device_id], GPU(convnet)->device[device_id].backwards[1], sizeof(float) * mini_batch * second_count, cudaMemcpyDeviceToHost);
cudaMemcpy(dual_w[device_id], GPU(convnet)->device[device_id].configurations[convnet->count - 1].w, sizeof(float) * last_layer->wnum, cudaMemcpyDeviceToHost);
cudaMemcpy(dual_w_2[device_id], GPU(convnet)->device[device_id].configurations[convnet->count - 2].w, sizeof(float) * second_last_layer->wnum, cudaMemcpyDeviceToHost);
}
ccv_convnet_compact(convnet);
assert(cudaGetLastError() == cudaSuccess);
// do it on one device
device_id = 0;
cudaSetDevice(device_id);
_cwc_convnet_alloc_reserved_both(convnet, dual_batch, 1, params.layer_params);
assert(cudaGetLastError() == cudaSuccess);
context = GPU(convnet)->contexts;
for (i = 0; i < 5; i++)
{
ccv_convnet_layer_t* layer = GPU(convnet)->device[device_id].layers + conv_layers[i];
EXTRA(layer)->vary.convolutional.forward.x = 4;
EXTRA(layer)->vary.convolutional.forward.y = 8;
EXTRA(layer)->vary.convolutional.forward.z = 32;
if (conv_layers[i] == 3)
{
EXTRA(layer)->vary.convolutional.backward.gradient.x = 4;
EXTRA(layer)->vary.convolutional.backward.gradient.y = 6;
EXTRA(layer)->vary.convolutional.backward.gradient.z = 24;
EXTRA(layer)->vary.convolutional.backward.coefficient.x = 6;
EXTRA(layer)->vary.convolutional.backward.coefficient.y = 4;
EXTRA(layer)->vary.convolutional.backward.coefficient.z = 24;
} else if (conv_layers[i] == 0) {
EXTRA(layer)->vary.convolutional.backward.coefficient.x = 1;
EXTRA(layer)->vary.convolutional.backward.coefficient.y = 3;
EXTRA(layer)->vary.convolutional.backward.coefficient.z = 1;
} else {
EXTRA(layer)->vary.convolutional.backward.gradient.x = 8;
EXTRA(layer)->vary.convolutional.backward.gradient.y = 4;
EXTRA(layer)->vary.convolutional.backward.gradient.z = 32;
EXTRA(layer)->vary.convolutional.backward.coefficient.x = 8;
EXTRA(layer)->vary.convolutional.backward.coefficient.y = 4;
EXTRA(layer)->vary.convolutional.backward.coefficient.z = 32;
}
}
cwc_convnet_batch_formation(0, categorizeds, convnet->mean_activity, 0, 0, 0, 0, 0, convnet->input, params.input.min_dim, params.input.max_dim, convnet->rows, convnet->cols, convnet->channels, category_count, 0, dual_batch, 0, dual_batch, context->host[device_id].input, context->host[device_id].c);
cudaMemcpyAsync(context->device[device_id].input, context->host[device_id].input, sizeof(float) * convnet->rows * convnet->cols * convnet->channels * dual_batch, cudaMemcpyHostToDevice, context->device[device_id].data_stream);
assert(cudaGetLastError() == cudaSuccess);
cudaMemcpyAsync(context->device[device_id].c, context->host[device_id].c, sizeof(int) * dual_batch, cudaMemcpyHostToDevice, context->device[device_id].data_stream);
assert(cudaGetLastError() == cudaSuccess);
cudaDeviceSynchronize();
cudaEventRecord(start, context->device[device_id].data_stream);
_cwc_convnet_encode_impl(convnet, 1, dual_batch, 0, context);
// do the logistic loss
_cwc_convnet_softmax_with_logistic_loss(dual_batch, category_count, GPU(convnet)->device[device_id].forwards[convnet->count - 1], context->device[device_id].c, context->device[device_id].data_stream);
_cwc_convnet_backward_propagate_error(convnet, 1, dual_batch, context);
cudaEventRecord(stop, context->device[device_id].data_stream);
cudaEventSynchronize(stop);
elapsed_time = 0;
cudaEventElapsedTime(&elapsed_time, start, stop);
printf("one GPU uses %f ms\n", elapsed_time);
cudaEventDestroy(start);
cudaEventDestroy(stop);
cudaDeviceSynchronize();
float* out = 0;
cudaMallocHost(&out, sizeof(float) * dual_batch * category_count);
cudaMemcpy(out, GPU(convnet)->device[device_id].forwards[convnet->count - 1], sizeof(float) * dual_batch * category_count, cudaMemcpyDeviceToHost);
float* back = 0;
cudaMallocHost(&back, sizeof(float) * dual_batch * second_count);
cudaMemcpy(back, GPU(convnet)->device[device_id].backwards[1], sizeof(float) * dual_batch * second_count, cudaMemcpyDeviceToHost);
float* w = 0;
int wnum = GPU(convnet)->device[device_id].configurations[convnet->count - 1].wnum;
cudaMallocHost(&w, sizeof(float) * wnum);
cudaMemcpy(w, GPU(convnet)->device[device_id].configurations[convnet->count - 1].w, sizeof(float) * wnum, cudaMemcpyDeviceToHost);
float* w_2 = 0;
int wnum_2 = GPU(convnet)->device[device_id].configurations[convnet->count - 2].wnum;
cudaMallocHost(&w_2, sizeof(float) * wnum_2);
cudaMemcpy(w_2, GPU(convnet)->device[device_id].configurations[convnet->count - 2].w, sizeof(float) * wnum_2, cudaMemcpyDeviceToHost);
ccv_convnet_free(convnet);
int j;
for (i = 0; i < category_count; i++)
{
for (device_id = 0; device_id < DEVICE_COUNT; device_id++)
for (j = 0; j < mini_batch; j++)
{
float p = out[i * dual_batch + mini_batch * device_id + j];
float q = dual_out[device_id][category_count * mini_batch * device_id + i * mini_batch + j];
float delta = fabs(p - q) / ccv_max(ccv_max(fabs(p), fabs(q)), 1);
if (delta > 1e-4)
printf("softmax with logistic loss doesn't match: %d %d %d %g %g\n", device_id, i, j, out[i * dual_batch + mini_batch * device_id + j], dual_out[device_id][category_count * mini_batch * device_id + i * mini_batch + j]);
}
}
for (device_id = 0; device_id < DEVICE_COUNT; device_id++)
{
const int pwnum = wnum / DEVICE_COUNT;
for (i = 0; i < pwnum; i++)
{
float p = w[i + pwnum * device_id];
float q = dual_w[device_id][i];
float delta = fabs(p - q) / ccv_max(ccv_max(fabs(p), fabs(q)), 1e-3);
if (delta > 1e-3)
printf("the weight update on last layer doesn't match: %d %d %g %g\n", device_id, i + pwnum * device_id, w[i + pwnum * device_id], dual_w[device_id][i]);
}
}
for (device_id = 0; device_id < DEVICE_COUNT; device_id++)
{
const int pwnum_2 = wnum_2 / DEVICE_COUNT;
for (i = 0; i < pwnum_2; i++)
{
float p = w_2[i + pwnum_2 * device_id];
float q = dual_w_2[device_id][i];
float delta = fabs(p - q) / ccv_max(ccv_max(fabs(p), fabs(q)), 1e-3);
if (delta > 1e-3)
printf("the weight update on second to last layer doesn't match: %d %d %g %g\n", device_id, i + pwnum_2 * device_id, w_2[i + pwnum_2 * device_id], dual_w_2[device_id][i]);
}
}
for (i = 0; i < second_count; i++)
{
for (device_id = 0; device_id < DEVICE_COUNT; device_id++)
for (j = 0; j < mini_batch; j++)
{
float p = back[i * dual_batch + mini_batch * device_id + j];
float q = back_out[device_id][i * mini_batch + j];
float delta = fabs(p - q) / ccv_max(ccv_max(fabs(p), fabs(q)), 1e-3);
if (delta > 1e-3)
printf("the last layer of backwards propagated error doesn't match: %d %d %d %g %g\n", device_id, i, j, back[i * dual_batch + mini_batch * device_id + j], back_out[device_id][i * mini_batch + j]);
}
}
for (device_id = 0; device_id < DEVICE_COUNT; device_id++)
{
cudaFreeHost(dual_out[device_id]);
cudaFreeHost(back_out[device_id]);
cudaFreeHost(dual_w[device_id]);
}
cudaFreeHost(out);
cudaFreeHost(back);
cudaFreeHost(w);
}
|
57ea94d8b6d8cba1aa2249ffb4e602f5f912ceae.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<cuda.h>
#include<math.h>
#include"womegas.h"
#define MAX(x, y) (((x) > (y)) ? (x) : (y))
#define MIN(x, y) (((x) < (y)) ? (x) : (y))
__device__ double Phi_x_WENO(
double beta1,
double beta2,
double beta3,
double beta4,
double beta5
)
{
double s_b1, s_b2, s_b3,
alpha_1, alpha_2, alpha_3,
omega_1, omega_2, omega_3, result;
s_b1 = (13.0/12.0)*(beta1 - 2.0*beta2 + beta3)*(beta1 - 2.0*beta2 + beta3)
+ (0.25)*(beta1 - 4.0*beta2 + 3.0*beta3)*(beta1 - 4.0*beta2 + 3.0*beta3);
s_b2 = (13.0/12.0)*(beta2 - 2.0*beta3 + beta4)*(beta2 - 2.0*beta3 + beta4)
+ (0.25)*(beta2 - beta4)*(beta2 - beta4);
s_b3 = (13.0/12.0)*(beta3 - 2.0*beta4 + beta5)*(beta3 - 2.0*beta4 + beta5)
+ (0.25)*(3.0*beta3 - 4.0*beta4 + beta5)*(3.0*beta3 - 4.0*beta4 + beta5);
alpha_1 = 0.1 /((s_b1 + 1.0e-6)*(s_b1 + 1.0e-6));
alpha_2 = 0.6 /((s_b2 + 1.0e-6)*(s_b2 + 1.0e-6));
alpha_3 = 0.3 /((s_b3 + 1.0e-6)*(s_b3 + 1.0e-6));
omega_1 = alpha_1 / (alpha_1 + alpha_2 + alpha_3);
omega_2 = alpha_2 / (alpha_1 + alpha_2 + alpha_3);
omega_3 = alpha_3 / (alpha_1 + alpha_2 + alpha_3);
result = ((omega_1*(2.0*beta1 - 7.0*beta2 + 11.0*beta3)
+ omega_2*(-1.0*beta2 + 5.0*beta3 + 2.0*beta4)
+ omega_3*(2.0*beta3 + 5.0*beta4 - beta5))*(1.0/6.0));
return result;
}
__global__ void Dev1thO_Downwind(
double* const d_Phi,
const double* const phiS,
const double deltaX,
const double deltaY,
const double deltaZ,
const unsigned int Nx,
const unsigned int Ny,
const unsigned int Nz
)
{
const unsigned int idx = blockIdx.x*blockDim.x + threadIdx.x,
idy = blockIdx.y*blockDim.y + threadIdx.y,
idz = blockIdx.z*blockDim.z + threadIdx.z;
//Offsets sample (id_ip) EQ (i+1,j,k)
unsigned int id = Nx*Ny*idz + Nx*idy + idx,
id_im = Nx*Ny*idz + Nx*idy + idx - 1,
id_jm = Nx*Ny*idz + Nx*(idy - 1) + idx,
id_km = Nx*Ny*(idz - 1) + Nx*idy + idx;
unsigned int ix = id,
iy = id,
iz = id;
//Dealing with boundaries
if(idx==0){id_im = id; ix = Nx*Ny*idz + Nx*idy;}
if(idy==0){id_jm = id; iy = Nx*Ny*idz + idx;}
if(idz==0){id_km = id; iz = Nx*idy + idx;}
const unsigned int Offset = Nx*Ny*Nz;
d_Phi[ id] = deltaX*(phiS[ix] - phiS[id_im]);
d_Phi[1*Offset + id] = deltaY*(phiS[iy] - phiS[id_jm]);
d_Phi[2*Offset + id] = deltaZ*(phiS[iz] - phiS[id_km]);
return;
}
__global__ void PhiDevPlusParameter(
double* const phi_xyz,
const double* const d_Phi,
const double* const jbn,
unsigned const int Nx,
unsigned const int Ny,
unsigned const int Nz
)
{
unsigned const int Offset = Nx*Ny*Nz;
unsigned int idx = blockDim.x*blockIdx.x + threadIdx.x,
idy = blockDim.y*blockIdx.y + threadIdx.y,
idz = blockDim.z*blockIdx.z + threadIdx.z;
unsigned int id = idx + idy*Nx + idz*Nx*Ny,
id_im1 = (idx - 1) + idy*Nx + idz*Nx*Ny,
id_ip1 = (idx + 1) + idy*Nx + idz*Nx*Ny,
id_jm1 = idx + (idy - 1)*Nx + idz*Nx*Ny,
id_jp1 = idx + (idy + 1)*Nx + idz*Nx*Ny,
id_km1 = idx + idy*Nx + (idz - 1)*Nx*Ny,
id_kp1 = idx + idy*Nx + (idz + 1)*Nx*Ny,
id_im2 = (idx - 2) + idy*Nx + idz*Nx*Ny,
id_ip2 = (idx + 2) + idy*Nx + idz*Nx*Ny,
id_jm2 = idx + (idy - 2)*Nx + idz*Nx*Ny,
id_jp2 = idx + (idy + 2)*Nx + idz*Nx*Ny,
id_km2 = idx + idy*Nx + (idz - 2)*Nx*Ny,
id_kp2 = idx + idy*Nx + (idz + 2)*Nx*Ny;
//Dealing with boundaries
if(idx == 0){id_im1 = id; id_im2 = id;}
if(idx == 1){id_im2 = id;}
if(idx == Nx -1){id_ip1 = id; id_ip2 = id;}
if(idx == Nx -2){id_ip2 = id;}
if(idy == 0){id_jm1 = id; id_jm2 = id;}
if(idy == 1){id_jm2 = id;}
if(idy == Ny -1){id_jp1 = id; id_jp2 = id;}
if(idy == Ny -2){id_jp2 = id;}
if(idz == 0){id_km1 = id; id_km2 = id;}
if(idz == 1){id_km2 = id;}
if(idz == Nz -1){id_kp1 = id; id_kp2 = id;}
if(idz == Nz -2){id_kp2 = id;}
double beta1, beta2, beta3, beta4, beta5;
//Axis X
beta1 = jbn[id]*d_Phi[id_im2];
beta2 = jbn[id]*d_Phi[id_im1];
beta3 = jbn[id]*d_Phi[id];
beta4 = jbn[id]*d_Phi[id_ip1];
beta5 = jbn[id]*d_Phi[id_ip2];
phi_xyz[id] = Phi_x_WENO(beta1, beta2, beta3, beta4, beta5);
//Axis Y
beta1 = jbn[id + 4*Offset]*d_Phi[id_jm2 + 1*Offset];
beta2 = jbn[id + 4*Offset]*d_Phi[id_jm1 + 1*Offset];
beta3 = jbn[id + 4*Offset]*d_Phi[id + 1*Offset];
beta4 = jbn[id + 4*Offset]*d_Phi[id_jp1 + 1*Offset];
beta5 = jbn[id + 4*Offset]*d_Phi[id_jp2 + 1*Offset];
phi_xyz[id + 1*Offset] = Phi_x_WENO(beta1, beta2, beta3, beta4, beta5);
//Axis Z
beta1 = jbn[id + 8*Offset]*d_Phi[id_km2 + 2*Offset];
beta2 = jbn[id + 8*Offset]*d_Phi[id_km1 + 2*Offset];
beta3 = jbn[id + 8*Offset]*d_Phi[id + 2*Offset];
beta4 = jbn[id + 8*Offset]*d_Phi[id_kp1 + 2*Offset];
beta5 = jbn[id + 8*Offset]*d_Phi[id_kp2 + 2*Offset];
phi_xyz[id + 2*Offset] = Phi_x_WENO(beta1, beta2, beta3, beta4, beta5);
return;
}
__global__ void PhiDevMinusParameter(
double* const phi_xyz,
const double* const d_Phi,
const double* const jbn,
unsigned const int Nx,
unsigned const int Ny,
unsigned const int Nz
)
{
unsigned const int Offset = Nx*Ny*Nz;
unsigned int idx = blockDim.x*blockIdx.x + threadIdx.x,
idy = blockDim.y*blockIdx.y + threadIdx.y,
idz = blockDim.z*blockIdx.z + threadIdx.z;
unsigned int id = idx + idy*Nx + idz*Nx*Ny,
id_im1 = (idx - 1) + idy*Nx + idz*Nx*Ny,
id_ip1 = (idx + 1) + idy*Nx + idz*Nx*Ny,
id_jm1 = idx + (idy - 1)*Nx + idz*Nx*Ny,
id_jp1 = idx + (idy + 1)*Nx + idz*Nx*Ny,
id_km1 = idx + idy*Nx + (idz - 1)*Nx*Ny,
id_kp1 = idx + idy*Nx + (idz + 1)*Nx*Ny,
id_ip2 = (idx + 2) + idy*Nx + idz*Nx*Ny,
id_jp2 = idx + (idy + 2)*Nx + idz*Nx*Ny,
id_kp2 = idx + idy*Nx + (idz + 2)*Nx*Ny,
id_ip3 = (idx + 3) + idy*Nx + idz*Nx*Ny,
id_jp3 = idx + (idy + 3)*Nx + idz*Nx*Ny,
id_kp3 = idx + idy*Nx + (idz + 3)*Nx*Ny;
//Dealing with boundaries
if(idx == 0){id_im1 = id;}
if(idx == Nx -1){id_ip1 = id; id_ip2 = id; id_ip3 = id; }
if(idx == Nx -2){id_ip2 = id; id_ip3 = id; }
if(idx == Nx -3){id_ip3 = id; }
if(idy == 0){id_jm1 = id;}
if(idy == Ny -1){id_jp1 = id; id_jp2 = id; id_jp3 = id; }
if(idy == Ny -2){id_jp2 = id; id_jp3 = id; }
if(idy == Ny -3){id_jp3 = id; }
if(idz == 0){id_im1 = id;}
if(idz == Nz -1){id_kp1 = id; id_kp2 = id; id_kp3 = id; }
if(idz == Nz -2){id_kp2 = id; id_kp3 = id; }
if(idz == Nz -3){id_kp3 = id; }
double beta1, beta2, beta3, beta4, beta5;
//Axis X
beta1 = jbn[id]*d_Phi[id_ip3];
beta2 = jbn[id]*d_Phi[id_ip2];
beta3 = jbn[id]*d_Phi[id_ip1];
beta4 = jbn[id]*d_Phi[id];
beta5 = jbn[id]*d_Phi[id_im1];
phi_xyz[id ] = Phi_x_WENO(beta1, beta2, beta3, beta4, beta5);
//Axis Y
beta1 = jbn[id + 4*Offset]*d_Phi[id_jp3 + 1*Offset];
beta2 = jbn[id + 4*Offset]*d_Phi[id_jp2 + 1*Offset];
beta3 = jbn[id + 4*Offset]*d_Phi[id_jp1 + 1*Offset];
beta4 = jbn[id + 4*Offset]*d_Phi[id + 1*Offset];
beta5 = jbn[id + 4*Offset]*d_Phi[id_jm1 + 1*Offset];
phi_xyz[id + 1*Offset] = Phi_x_WENO(beta1, beta2, beta3, beta4, beta5);
//Axis Z
beta1 = jbn[id + 8*Offset]*d_Phi[id_kp3 + 2*Offset];
beta2 = jbn[id + 8*Offset]*d_Phi[id_kp2 + 2*Offset];
beta3 = jbn[id + 8*Offset]*d_Phi[id_kp1 + 2*Offset];
beta4 = jbn[id + 8*Offset]*d_Phi[id + 2*Offset];
beta5 = jbn[id + 8*Offset]*d_Phi[id_km1 + 2*Offset];
phi_xyz[id + 2*Offset] = Phi_x_WENO(beta1, beta2, beta3, beta4, beta5);
return;
}
__global__ void reini_RS_WENO(
double* const rs,
const double* const phiS,
const double* const phiS0,
const double* const deltaXYZ,
const double* const d_phiP,
const double* const d_phiM,
unsigned int Nx,
unsigned int Ny,
unsigned int Nz
)
{
unsigned int Offset = Nx*Ny*Nz;
unsigned int idx = blockDim.x*blockIdx.x + threadIdx.x,
idy = blockDim.y*blockIdx.y + threadIdx.y,
idz = blockDim.z*blockIdx.z + threadIdx.z;
unsigned int id = idx + Nx*idy + Nx*Ny*idz;
double so, rs_x, rs_y, rs_z, ta, grad_mod;
double phiMax, phiMin;
ta = (double)(phiS[id] > 0.0) - (double)(phiS[id] < 0.0);
//Getting gradient axis X
phiMax = MAX(d_phiP[id ], 0.0)*MAX(d_phiP[id ], 0.0);
phiMin = MIN(d_phiM[id ], 0.0)*MIN(d_phiM[id ], 0.0);
rs_x = 0.5*(ta + 1.0)*MAX(phiMax, phiMin);
phiMax = MAX(d_phiM[id ], 0.0)*MAX(d_phiM[id ], 0.0);
phiMin = MIN(d_phiP[id ], 0.0)*MIN(d_phiP[id ], 0.0);
rs_x += 0.5*abs(ta - 1.0)*MAX(phiMax, phiMin);
//Getting gradient axis Y
phiMax = MAX(d_phiP[id + 1*Offset], 0.0)*MAX(d_phiP[id + 1*Offset], 0.0);
phiMin = MIN(d_phiM[id + 1*Offset], 0.0)*MIN(d_phiM[id + 1*Offset], 0.0);
rs_y = 0.5*(ta + 1.0)*MAX(phiMax, phiMin);
phiMax = MAX(d_phiM[id + 1*Offset], 0.0)*MAX(d_phiM[id + 1*Offset], 0.0);
phiMin = MIN(d_phiP[id + 1*Offset], 0.0)*MIN(d_phiP[id + 1*Offset], 0.0);
rs_y += 0.5*abs(ta - 1.0)*MAX(phiMax, phiMin);
//Getting gradient axis Z
phiMax = MAX(d_phiP[id + 2*Offset], 0.0)*MAX(d_phiP[id + 2*Offset], 0.0);
phiMin = MIN(d_phiM[id + 2*Offset], 0.0)*MIN(d_phiM[id + 2*Offset], 0.0);
rs_z = 0.5*(ta + 1.0)*MAX(phiMax, phiMin);
phiMax = MAX(d_phiM[id + 2*Offset], 0.0)*MAX(d_phiM[id + 2*Offset], 0.0);
phiMin = MIN(d_phiP[id + 2*Offset], 0.0)*MIN(d_phiP[id + 2*Offset], 0.0);
rs_z += 0.5*abs(ta - 1.0)*MAX(phiMax, phiMin);
grad_mod = sqrt(rs_x + rs_y + rs_z);
so = phiS[id]
/ sqrt(phiS[id]*phiS[id] + grad_mod*deltaXYZ[id]*deltaXYZ[id] );
rs[id] = 1.0*so*(grad_mod - 1.0);
return;
}
__global__ void advect_RS_WENO(
double* const rs,
const double* const velocity,
const double* const d_phiP_d,
const double* const d_phiM_d,
unsigned int Nx,
unsigned int Ny,
unsigned int Nz
)
{
unsigned int idx = blockDim.x*blockIdx.x + threadIdx.x,
idy = blockDim.y*blockIdx.y + threadIdx.y,
idz = blockDim.z*blockIdx.z + threadIdx.z;
unsigned int id = idx + Nx*idy + Nx*Ny*idz,
Offset = Nx*Ny*Nz;
double rs_x, rs_y, rs_z;
double grad_x, grad_y, grad_z;
double rsign;
rsign = (double)(velocity[id] > 0.0)
- (double)(velocity[id] < 0.0);
rs_x = 0.5*(rsign + 1.0)*velocity[id]*d_phiP_d[id]
+ 0.5*abs(rsign - 1.0)*velocity[id]*d_phiM_d[id];
grad_x = 0.5*(rsign + 1.0)*d_phiP_d[id]
+ 0.5*abs(rsign - 1.0)*d_phiM_d[id];
rsign = (double)(velocity[id + 1*Offset] > 0.0)
- (double)(velocity[id + 1*Offset] < 0.0);
rs_y = 0.5*(rsign + 1.0)*velocity[id + 1*Offset]*d_phiP_d[id + 1*Offset]
+ 0.5*abs(rsign - 1.0)*velocity[id + 1*Offset]*d_phiM_d[id + 1*Offset];
grad_y = 0.5*(rsign + 1.0)*d_phiP_d[id + 1*Offset]
+ 0.5*abs(rsign - 1.0)*d_phiM_d[id + 1*Offset];
rsign = (double)(velocity[id + 2*Offset] > 0.0)
- (double)(velocity[id + 2*Offset] < 0.0);
rs_z = 0.5*(rsign + 1.0)*velocity[id + 2*Offset]*d_phiP_d[id + 2*Offset]
+ 0.5*abs(rsign - 1.0)*velocity[id + 2*Offset]*d_phiM_d[id + 2*Offset];
grad_z = 0.5*(rsign + 1.0)*d_phiP_d[id + 2*Offset]
+ 0.5*abs(rsign - 1.0)*d_phiM_d[id + 2*Offset];
rs[id] = rs_x + rs_y + rs_z;
return;
}
| 57ea94d8b6d8cba1aa2249ffb4e602f5f912ceae.cu | #include<cuda.h>
#include<math.h>
#include"womegas.h"
#define MAX(x, y) (((x) > (y)) ? (x) : (y))
#define MIN(x, y) (((x) < (y)) ? (x) : (y))
__device__ double Phi_x_WENO(
double beta1,
double beta2,
double beta3,
double beta4,
double beta5
)
{
double s_b1, s_b2, s_b3,
alpha_1, alpha_2, alpha_3,
omega_1, omega_2, omega_3, result;
s_b1 = (13.0/12.0)*(beta1 - 2.0*beta2 + beta3)*(beta1 - 2.0*beta2 + beta3)
+ (0.25)*(beta1 - 4.0*beta2 + 3.0*beta3)*(beta1 - 4.0*beta2 + 3.0*beta3);
s_b2 = (13.0/12.0)*(beta2 - 2.0*beta3 + beta4)*(beta2 - 2.0*beta3 + beta4)
+ (0.25)*(beta2 - beta4)*(beta2 - beta4);
s_b3 = (13.0/12.0)*(beta3 - 2.0*beta4 + beta5)*(beta3 - 2.0*beta4 + beta5)
+ (0.25)*(3.0*beta3 - 4.0*beta4 + beta5)*(3.0*beta3 - 4.0*beta4 + beta5);
alpha_1 = 0.1 /((s_b1 + 1.0e-6)*(s_b1 + 1.0e-6));
alpha_2 = 0.6 /((s_b2 + 1.0e-6)*(s_b2 + 1.0e-6));
alpha_3 = 0.3 /((s_b3 + 1.0e-6)*(s_b3 + 1.0e-6));
omega_1 = alpha_1 / (alpha_1 + alpha_2 + alpha_3);
omega_2 = alpha_2 / (alpha_1 + alpha_2 + alpha_3);
omega_3 = alpha_3 / (alpha_1 + alpha_2 + alpha_3);
result = ((omega_1*(2.0*beta1 - 7.0*beta2 + 11.0*beta3)
+ omega_2*(-1.0*beta2 + 5.0*beta3 + 2.0*beta4)
+ omega_3*(2.0*beta3 + 5.0*beta4 - beta5))*(1.0/6.0));
return result;
}
__global__ void Dev1thO_Downwind(
double* const d_Phi,
const double* const phiS,
const double deltaX,
const double deltaY,
const double deltaZ,
const unsigned int Nx,
const unsigned int Ny,
const unsigned int Nz
)
{
const unsigned int idx = blockIdx.x*blockDim.x + threadIdx.x,
idy = blockIdx.y*blockDim.y + threadIdx.y,
idz = blockIdx.z*blockDim.z + threadIdx.z;
//Offsets sample (id_ip) EQ (i+1,j,k)
unsigned int id = Nx*Ny*idz + Nx*idy + idx,
id_im = Nx*Ny*idz + Nx*idy + idx - 1,
id_jm = Nx*Ny*idz + Nx*(idy - 1) + idx,
id_km = Nx*Ny*(idz - 1) + Nx*idy + idx;
unsigned int ix = id,
iy = id,
iz = id;
//Dealing with boundaries
if(idx==0){id_im = id; ix = Nx*Ny*idz + Nx*idy;}
if(idy==0){id_jm = id; iy = Nx*Ny*idz + idx;}
if(idz==0){id_km = id; iz = Nx*idy + idx;}
const unsigned int Offset = Nx*Ny*Nz;
d_Phi[ id] = deltaX*(phiS[ix] - phiS[id_im]);
d_Phi[1*Offset + id] = deltaY*(phiS[iy] - phiS[id_jm]);
d_Phi[2*Offset + id] = deltaZ*(phiS[iz] - phiS[id_km]);
return;
}
__global__ void PhiDevPlusParameter(
double* const phi_xyz,
const double* const d_Phi,
const double* const jbn,
unsigned const int Nx,
unsigned const int Ny,
unsigned const int Nz
)
{
unsigned const int Offset = Nx*Ny*Nz;
unsigned int idx = blockDim.x*blockIdx.x + threadIdx.x,
idy = blockDim.y*blockIdx.y + threadIdx.y,
idz = blockDim.z*blockIdx.z + threadIdx.z;
unsigned int id = idx + idy*Nx + idz*Nx*Ny,
id_im1 = (idx - 1) + idy*Nx + idz*Nx*Ny,
id_ip1 = (idx + 1) + idy*Nx + idz*Nx*Ny,
id_jm1 = idx + (idy - 1)*Nx + idz*Nx*Ny,
id_jp1 = idx + (idy + 1)*Nx + idz*Nx*Ny,
id_km1 = idx + idy*Nx + (idz - 1)*Nx*Ny,
id_kp1 = idx + idy*Nx + (idz + 1)*Nx*Ny,
id_im2 = (idx - 2) + idy*Nx + idz*Nx*Ny,
id_ip2 = (idx + 2) + idy*Nx + idz*Nx*Ny,
id_jm2 = idx + (idy - 2)*Nx + idz*Nx*Ny,
id_jp2 = idx + (idy + 2)*Nx + idz*Nx*Ny,
id_km2 = idx + idy*Nx + (idz - 2)*Nx*Ny,
id_kp2 = idx + idy*Nx + (idz + 2)*Nx*Ny;
//Dealing with boundaries
if(idx == 0){id_im1 = id; id_im2 = id;}
if(idx == 1){id_im2 = id;}
if(idx == Nx -1){id_ip1 = id; id_ip2 = id;}
if(idx == Nx -2){id_ip2 = id;}
if(idy == 0){id_jm1 = id; id_jm2 = id;}
if(idy == 1){id_jm2 = id;}
if(idy == Ny -1){id_jp1 = id; id_jp2 = id;}
if(idy == Ny -2){id_jp2 = id;}
if(idz == 0){id_km1 = id; id_km2 = id;}
if(idz == 1){id_km2 = id;}
if(idz == Nz -1){id_kp1 = id; id_kp2 = id;}
if(idz == Nz -2){id_kp2 = id;}
double beta1, beta2, beta3, beta4, beta5;
//Axis X
beta1 = jbn[id]*d_Phi[id_im2];
beta2 = jbn[id]*d_Phi[id_im1];
beta3 = jbn[id]*d_Phi[id];
beta4 = jbn[id]*d_Phi[id_ip1];
beta5 = jbn[id]*d_Phi[id_ip2];
phi_xyz[id] = Phi_x_WENO(beta1, beta2, beta3, beta4, beta5);
//Axis Y
beta1 = jbn[id + 4*Offset]*d_Phi[id_jm2 + 1*Offset];
beta2 = jbn[id + 4*Offset]*d_Phi[id_jm1 + 1*Offset];
beta3 = jbn[id + 4*Offset]*d_Phi[id + 1*Offset];
beta4 = jbn[id + 4*Offset]*d_Phi[id_jp1 + 1*Offset];
beta5 = jbn[id + 4*Offset]*d_Phi[id_jp2 + 1*Offset];
phi_xyz[id + 1*Offset] = Phi_x_WENO(beta1, beta2, beta3, beta4, beta5);
//Axis Z
beta1 = jbn[id + 8*Offset]*d_Phi[id_km2 + 2*Offset];
beta2 = jbn[id + 8*Offset]*d_Phi[id_km1 + 2*Offset];
beta3 = jbn[id + 8*Offset]*d_Phi[id + 2*Offset];
beta4 = jbn[id + 8*Offset]*d_Phi[id_kp1 + 2*Offset];
beta5 = jbn[id + 8*Offset]*d_Phi[id_kp2 + 2*Offset];
phi_xyz[id + 2*Offset] = Phi_x_WENO(beta1, beta2, beta3, beta4, beta5);
return;
}
__global__ void PhiDevMinusParameter(
double* const phi_xyz,
const double* const d_Phi,
const double* const jbn,
unsigned const int Nx,
unsigned const int Ny,
unsigned const int Nz
)
{
unsigned const int Offset = Nx*Ny*Nz;
unsigned int idx = blockDim.x*blockIdx.x + threadIdx.x,
idy = blockDim.y*blockIdx.y + threadIdx.y,
idz = blockDim.z*blockIdx.z + threadIdx.z;
unsigned int id = idx + idy*Nx + idz*Nx*Ny,
id_im1 = (idx - 1) + idy*Nx + idz*Nx*Ny,
id_ip1 = (idx + 1) + idy*Nx + idz*Nx*Ny,
id_jm1 = idx + (idy - 1)*Nx + idz*Nx*Ny,
id_jp1 = idx + (idy + 1)*Nx + idz*Nx*Ny,
id_km1 = idx + idy*Nx + (idz - 1)*Nx*Ny,
id_kp1 = idx + idy*Nx + (idz + 1)*Nx*Ny,
id_ip2 = (idx + 2) + idy*Nx + idz*Nx*Ny,
id_jp2 = idx + (idy + 2)*Nx + idz*Nx*Ny,
id_kp2 = idx + idy*Nx + (idz + 2)*Nx*Ny,
id_ip3 = (idx + 3) + idy*Nx + idz*Nx*Ny,
id_jp3 = idx + (idy + 3)*Nx + idz*Nx*Ny,
id_kp3 = idx + idy*Nx + (idz + 3)*Nx*Ny;
//Dealing with boundaries
if(idx == 0){id_im1 = id;}
if(idx == Nx -1){id_ip1 = id; id_ip2 = id; id_ip3 = id; }
if(idx == Nx -2){id_ip2 = id; id_ip3 = id; }
if(idx == Nx -3){id_ip3 = id; }
if(idy == 0){id_jm1 = id;}
if(idy == Ny -1){id_jp1 = id; id_jp2 = id; id_jp3 = id; }
if(idy == Ny -2){id_jp2 = id; id_jp3 = id; }
if(idy == Ny -3){id_jp3 = id; }
if(idz == 0){id_im1 = id;}
if(idz == Nz -1){id_kp1 = id; id_kp2 = id; id_kp3 = id; }
if(idz == Nz -2){id_kp2 = id; id_kp3 = id; }
if(idz == Nz -3){id_kp3 = id; }
double beta1, beta2, beta3, beta4, beta5;
//Axis X
beta1 = jbn[id]*d_Phi[id_ip3];
beta2 = jbn[id]*d_Phi[id_ip2];
beta3 = jbn[id]*d_Phi[id_ip1];
beta4 = jbn[id]*d_Phi[id];
beta5 = jbn[id]*d_Phi[id_im1];
phi_xyz[id ] = Phi_x_WENO(beta1, beta2, beta3, beta4, beta5);
//Axis Y
beta1 = jbn[id + 4*Offset]*d_Phi[id_jp3 + 1*Offset];
beta2 = jbn[id + 4*Offset]*d_Phi[id_jp2 + 1*Offset];
beta3 = jbn[id + 4*Offset]*d_Phi[id_jp1 + 1*Offset];
beta4 = jbn[id + 4*Offset]*d_Phi[id + 1*Offset];
beta5 = jbn[id + 4*Offset]*d_Phi[id_jm1 + 1*Offset];
phi_xyz[id + 1*Offset] = Phi_x_WENO(beta1, beta2, beta3, beta4, beta5);
//Axis Z
beta1 = jbn[id + 8*Offset]*d_Phi[id_kp3 + 2*Offset];
beta2 = jbn[id + 8*Offset]*d_Phi[id_kp2 + 2*Offset];
beta3 = jbn[id + 8*Offset]*d_Phi[id_kp1 + 2*Offset];
beta4 = jbn[id + 8*Offset]*d_Phi[id + 2*Offset];
beta5 = jbn[id + 8*Offset]*d_Phi[id_km1 + 2*Offset];
phi_xyz[id + 2*Offset] = Phi_x_WENO(beta1, beta2, beta3, beta4, beta5);
return;
}
__global__ void reini_RS_WENO(
double* const rs,
const double* const phiS,
const double* const phiS0,
const double* const deltaXYZ,
const double* const d_phiP,
const double* const d_phiM,
unsigned int Nx,
unsigned int Ny,
unsigned int Nz
)
{
unsigned int Offset = Nx*Ny*Nz;
unsigned int idx = blockDim.x*blockIdx.x + threadIdx.x,
idy = blockDim.y*blockIdx.y + threadIdx.y,
idz = blockDim.z*blockIdx.z + threadIdx.z;
unsigned int id = idx + Nx*idy + Nx*Ny*idz;
double so, rs_x, rs_y, rs_z, ta, grad_mod;
double phiMax, phiMin;
ta = (double)(phiS[id] > 0.0) - (double)(phiS[id] < 0.0);
//Getting gradient axis X
phiMax = MAX(d_phiP[id ], 0.0)*MAX(d_phiP[id ], 0.0);
phiMin = MIN(d_phiM[id ], 0.0)*MIN(d_phiM[id ], 0.0);
rs_x = 0.5*(ta + 1.0)*MAX(phiMax, phiMin);
phiMax = MAX(d_phiM[id ], 0.0)*MAX(d_phiM[id ], 0.0);
phiMin = MIN(d_phiP[id ], 0.0)*MIN(d_phiP[id ], 0.0);
rs_x += 0.5*abs(ta - 1.0)*MAX(phiMax, phiMin);
//Getting gradient axis Y
phiMax = MAX(d_phiP[id + 1*Offset], 0.0)*MAX(d_phiP[id + 1*Offset], 0.0);
phiMin = MIN(d_phiM[id + 1*Offset], 0.0)*MIN(d_phiM[id + 1*Offset], 0.0);
rs_y = 0.5*(ta + 1.0)*MAX(phiMax, phiMin);
phiMax = MAX(d_phiM[id + 1*Offset], 0.0)*MAX(d_phiM[id + 1*Offset], 0.0);
phiMin = MIN(d_phiP[id + 1*Offset], 0.0)*MIN(d_phiP[id + 1*Offset], 0.0);
rs_y += 0.5*abs(ta - 1.0)*MAX(phiMax, phiMin);
//Getting gradient axis Z
phiMax = MAX(d_phiP[id + 2*Offset], 0.0)*MAX(d_phiP[id + 2*Offset], 0.0);
phiMin = MIN(d_phiM[id + 2*Offset], 0.0)*MIN(d_phiM[id + 2*Offset], 0.0);
rs_z = 0.5*(ta + 1.0)*MAX(phiMax, phiMin);
phiMax = MAX(d_phiM[id + 2*Offset], 0.0)*MAX(d_phiM[id + 2*Offset], 0.0);
phiMin = MIN(d_phiP[id + 2*Offset], 0.0)*MIN(d_phiP[id + 2*Offset], 0.0);
rs_z += 0.5*abs(ta - 1.0)*MAX(phiMax, phiMin);
grad_mod = sqrt(rs_x + rs_y + rs_z);
so = phiS[id]
/ sqrt(phiS[id]*phiS[id] + grad_mod*deltaXYZ[id]*deltaXYZ[id] );
rs[id] = 1.0*so*(grad_mod - 1.0);
return;
}
__global__ void advect_RS_WENO(
double* const rs,
const double* const velocity,
const double* const d_phiP_d,
const double* const d_phiM_d,
unsigned int Nx,
unsigned int Ny,
unsigned int Nz
)
{
unsigned int idx = blockDim.x*blockIdx.x + threadIdx.x,
idy = blockDim.y*blockIdx.y + threadIdx.y,
idz = blockDim.z*blockIdx.z + threadIdx.z;
unsigned int id = idx + Nx*idy + Nx*Ny*idz,
Offset = Nx*Ny*Nz;
double rs_x, rs_y, rs_z;
double grad_x, grad_y, grad_z;
double rsign;
rsign = (double)(velocity[id] > 0.0)
- (double)(velocity[id] < 0.0);
rs_x = 0.5*(rsign + 1.0)*velocity[id]*d_phiP_d[id]
+ 0.5*abs(rsign - 1.0)*velocity[id]*d_phiM_d[id];
grad_x = 0.5*(rsign + 1.0)*d_phiP_d[id]
+ 0.5*abs(rsign - 1.0)*d_phiM_d[id];
rsign = (double)(velocity[id + 1*Offset] > 0.0)
- (double)(velocity[id + 1*Offset] < 0.0);
rs_y = 0.5*(rsign + 1.0)*velocity[id + 1*Offset]*d_phiP_d[id + 1*Offset]
+ 0.5*abs(rsign - 1.0)*velocity[id + 1*Offset]*d_phiM_d[id + 1*Offset];
grad_y = 0.5*(rsign + 1.0)*d_phiP_d[id + 1*Offset]
+ 0.5*abs(rsign - 1.0)*d_phiM_d[id + 1*Offset];
rsign = (double)(velocity[id + 2*Offset] > 0.0)
- (double)(velocity[id + 2*Offset] < 0.0);
rs_z = 0.5*(rsign + 1.0)*velocity[id + 2*Offset]*d_phiP_d[id + 2*Offset]
+ 0.5*abs(rsign - 1.0)*velocity[id + 2*Offset]*d_phiM_d[id + 2*Offset];
grad_z = 0.5*(rsign + 1.0)*d_phiP_d[id + 2*Offset]
+ 0.5*abs(rsign - 1.0)*d_phiM_d[id + 2*Offset];
rs[id] = rs_x + rs_y + rs_z;
return;
}
|
fa6c1c6e6de28c7f8fcb6f3add8ebaaa740b7156.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
texture<float, 3, hipReadModeElementType> texSrc, texK;
__global__
void conv3d_tex(float* out, const int3 im_shape, const int3 k_radius)
{
long x = blockIdx.x * blockDim.x + threadIdx.x;
long y = blockIdx.y * blockDim.y + threadIdx.y;
long z = blockIdx.z * blockDim.z + threadIdx.z;
if ( x >= im_shape.x || y >= im_shape.y || z >= im_shape.z ) {
return;
}
float sum = 0;
long i, j, k;
long px, py, pz;
for ( k = -k_radius.z; k <= k_radius.z; k++ ) {
pz = z + k;
for ( i = -k_radius.y; i <= k_radius.y; i++ ) {
py = y + i;
for ( j = -k_radius.x; j <= k_radius.x; j++ ) {
px = x + j;
sum += tex3D(texSrc,
px + 0.5, py + 0.5, pz + 0.5) * \
tex3D(texK,
j + k_radius.x + 0.5, \
i + k_radius.y + 0.5, \
k + k_radius.z + 0.5);
}
}
}
out[z * im_shape.y * im_shape.x + y * im_shape.x + x] = sum;
}
__global__
void conv3d_axis0(const float* data, const float* kernel, float* out,
const int3 im_shape, int k_radius)
{
long x = blockIdx.x * blockDim.x + threadIdx.x;
long y = blockIdx.y * blockDim.y + threadIdx.y;
long z = blockIdx.z * blockDim.z + threadIdx.z;
long s2d = im_shape.y * im_shape.x;
int kshape = 2 * k_radius + 1;
extern __shared__ float skernel[];
long lidx = threadIdx.z * blockDim.y * blockDim.x + threadIdx.y * blockDim.x + threadIdx.x;
if ( lidx < kshape ) {
skernel[lidx] = kernel[lidx];
}
__syncthreads();
if ( x >= im_shape.x || y >= im_shape.y || z >= im_shape.z ) {
return;
}
float sum = 0;
int k;
long pz;
for ( k = -k_radius; k <= k_radius; k++ ) {
pz = z + k;
if ( pz < 0 ) pz = -pz;
if ( pz >= im_shape.z ) pz = im_shape.z - (pz - im_shape.z) - 1;
if ( pz < 0 ) continue;
sum += data[pz * s2d + y * im_shape.x + x] * skernel[k + k_radius];
}
out[z * s2d + y * im_shape.x + x] = sum;
}
__global__
void conv3d_axis1(const float* data, const float* kernel, float* out,
const int3 im_shape, int k_radius)
{
long x = blockIdx.x * blockDim.x + threadIdx.x;
long y = blockIdx.y * blockDim.y + threadIdx.y;
long z = blockIdx.z * blockDim.z + threadIdx.z;
long s2d = im_shape.y * im_shape.x;
int kshape = 2 * k_radius + 1;
extern __shared__ float skernel[];
long lidx = threadIdx.z * blockDim.y * blockDim.x + threadIdx.y * blockDim.x + threadIdx.x;
if ( lidx < kshape ) {
skernel[lidx] = kernel[lidx];
}
__syncthreads();
if ( x >= im_shape.x || y >= im_shape.y || z >= im_shape.z ) {
return;
}
float sum = 0;
int k;
long py;
for ( k = -k_radius; k <= k_radius; k++ ) {
py = y + k;
if ( py < 0 ) py = -py;
if ( py >= im_shape.y ) py = im_shape.y - (py - im_shape.y) - 1;
if ( py < 0 ) continue;
sum += data[z * s2d + py * im_shape.x + x] * skernel[k + k_radius];
}
out[z * s2d + y * im_shape.x + x] = sum;
}
__global__
void conv3d_axis2(const float* data, const float* kernel, float* out,
const int3 im_shape, int k_radius)
{
long x = blockIdx.x * blockDim.x + threadIdx.x;
long y = blockIdx.y * blockDim.y + threadIdx.y;
long z = blockIdx.z * blockDim.z + threadIdx.z;
long s2d = im_shape.y * im_shape.x;
int kshape = 2 * k_radius + 1;
extern __shared__ float skernel[];
long lidx = threadIdx.z * blockDim.y * blockDim.x + threadIdx.y * blockDim.x + threadIdx.x;
if ( lidx < kshape ) {
skernel[lidx] = kernel[lidx];
}
__syncthreads();
if ( x >= im_shape.x || y >= im_shape.y || z >= im_shape.z ) {
return;
}
float sum = 0;
int k;
long px;
for ( k = -k_radius; k <= k_radius; k++ ) {
px = x + k;
if ( px < 0 ) px = -px;
if ( px >= im_shape.x ) px = im_shape.x - (px - im_shape.x) - 1;
if ( px < 0 ) continue;
sum += data[z * s2d + y * im_shape.x + px] * skernel[k + k_radius];
}
out[z * s2d + y * im_shape.x + x] = sum;
}
| fa6c1c6e6de28c7f8fcb6f3add8ebaaa740b7156.cu |
texture<float, 3, cudaReadModeElementType> texSrc, texK;
__global__
void conv3d_tex(float* out, const int3 im_shape, const int3 k_radius)
{
long x = blockIdx.x * blockDim.x + threadIdx.x;
long y = blockIdx.y * blockDim.y + threadIdx.y;
long z = blockIdx.z * blockDim.z + threadIdx.z;
if ( x >= im_shape.x || y >= im_shape.y || z >= im_shape.z ) {
return;
}
float sum = 0;
long i, j, k;
long px, py, pz;
for ( k = -k_radius.z; k <= k_radius.z; k++ ) {
pz = z + k;
for ( i = -k_radius.y; i <= k_radius.y; i++ ) {
py = y + i;
for ( j = -k_radius.x; j <= k_radius.x; j++ ) {
px = x + j;
sum += tex3D(texSrc,
px + 0.5, py + 0.5, pz + 0.5) * \
tex3D(texK,
j + k_radius.x + 0.5, \
i + k_radius.y + 0.5, \
k + k_radius.z + 0.5);
}
}
}
out[z * im_shape.y * im_shape.x + y * im_shape.x + x] = sum;
}
__global__
void conv3d_axis0(const float* data, const float* kernel, float* out,
const int3 im_shape, int k_radius)
{
long x = blockIdx.x * blockDim.x + threadIdx.x;
long y = blockIdx.y * blockDim.y + threadIdx.y;
long z = blockIdx.z * blockDim.z + threadIdx.z;
long s2d = im_shape.y * im_shape.x;
int kshape = 2 * k_radius + 1;
extern __shared__ float skernel[];
long lidx = threadIdx.z * blockDim.y * blockDim.x + threadIdx.y * blockDim.x + threadIdx.x;
if ( lidx < kshape ) {
skernel[lidx] = kernel[lidx];
}
__syncthreads();
if ( x >= im_shape.x || y >= im_shape.y || z >= im_shape.z ) {
return;
}
float sum = 0;
int k;
long pz;
for ( k = -k_radius; k <= k_radius; k++ ) {
pz = z + k;
if ( pz < 0 ) pz = -pz;
if ( pz >= im_shape.z ) pz = im_shape.z - (pz - im_shape.z) - 1;
if ( pz < 0 ) continue;
sum += data[pz * s2d + y * im_shape.x + x] * skernel[k + k_radius];
}
out[z * s2d + y * im_shape.x + x] = sum;
}
__global__
void conv3d_axis1(const float* data, const float* kernel, float* out,
const int3 im_shape, int k_radius)
{
long x = blockIdx.x * blockDim.x + threadIdx.x;
long y = blockIdx.y * blockDim.y + threadIdx.y;
long z = blockIdx.z * blockDim.z + threadIdx.z;
long s2d = im_shape.y * im_shape.x;
int kshape = 2 * k_radius + 1;
extern __shared__ float skernel[];
long lidx = threadIdx.z * blockDim.y * blockDim.x + threadIdx.y * blockDim.x + threadIdx.x;
if ( lidx < kshape ) {
skernel[lidx] = kernel[lidx];
}
__syncthreads();
if ( x >= im_shape.x || y >= im_shape.y || z >= im_shape.z ) {
return;
}
float sum = 0;
int k;
long py;
for ( k = -k_radius; k <= k_radius; k++ ) {
py = y + k;
if ( py < 0 ) py = -py;
if ( py >= im_shape.y ) py = im_shape.y - (py - im_shape.y) - 1;
if ( py < 0 ) continue;
sum += data[z * s2d + py * im_shape.x + x] * skernel[k + k_radius];
}
out[z * s2d + y * im_shape.x + x] = sum;
}
__global__
void conv3d_axis2(const float* data, const float* kernel, float* out,
const int3 im_shape, int k_radius)
{
long x = blockIdx.x * blockDim.x + threadIdx.x;
long y = blockIdx.y * blockDim.y + threadIdx.y;
long z = blockIdx.z * blockDim.z + threadIdx.z;
long s2d = im_shape.y * im_shape.x;
int kshape = 2 * k_radius + 1;
extern __shared__ float skernel[];
long lidx = threadIdx.z * blockDim.y * blockDim.x + threadIdx.y * blockDim.x + threadIdx.x;
if ( lidx < kshape ) {
skernel[lidx] = kernel[lidx];
}
__syncthreads();
if ( x >= im_shape.x || y >= im_shape.y || z >= im_shape.z ) {
return;
}
float sum = 0;
int k;
long px;
for ( k = -k_radius; k <= k_radius; k++ ) {
px = x + k;
if ( px < 0 ) px = -px;
if ( px >= im_shape.x ) px = im_shape.x - (px - im_shape.x) - 1;
if ( px < 0 ) continue;
sum += data[z * s2d + y * im_shape.x + px] * skernel[k + k_radius];
}
out[z * s2d + y * im_shape.x + x] = sum;
}
|
0e571e75a159a7eee7dafaac6abe62071e7790e0.hip | // !!! This is a file automatically generated by hipify!!!
/*
* University of Illinois Open Source License
* Copyright 2012-2018 Luthey-Schulten Group,
* All rights reserved.
*
* Developed by: Luthey-Schulten Group
* University of Illinois at Urbana-Champaign
* http://www.scs.uiuc.edu/~schulten
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of
* this software and associated documentation files (the Software), to deal with
* the Software without restriction, including without limitation the rights to
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is furnished to
* do so, subject to the following conditions:
*
* - Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimers.
*
* - Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimers in the documentation
* and/or other materials provided with the distribution.
*
* - Neither the names of the Luthey-Schulten Group, University of Illinois at
* Urbana-Champaign, nor the names of its contributors may be used to endorse or
* promote products derived from this Software without specific prior written
* permission.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS WITH THE SOFTWARE.
*
* Author(s): Mike Hallock
*/
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <string.h>
#include <stdio.h>
#include <math.h>
#include <assert.h>
#include "config.h"
#include "lptf/Profile.h"
#include "MultiGPUMapper.h"
#include "ZDivMultiGPUMapper.h"
#include "core/Print.h"
using lm::Print;
#define LB_MAX_MEM_THRESHOLD 0.9f
#define LB_IMBALANCE_DAMPENER 0.40f
ZDivMultiGPUMapper::ZDivMultiGPUMapper(dim3 l, size_t m, int a, int c, int n, int *gl, bool pz, int pages)
: MultiGPUMapper(l, m, a, c, n, gl, pages), info(NULL), periodic_z(pz)
{
info=new gpu_info[num_gpus];
size_t total_mem=0;
int fair_chunks=int(round((float)l.z/TUNE_MPD_Z_BLOCK_Z_SIZE)/num_gpus);
for(int i=0; i<num_gpus; i++)
{
total_mem += device_memory[i];
size_t max_mem=(device_memory[i]*LB_MAX_MEM_THRESHOLD)/2;
info[i].lb_max_chunks=max_mem/(l.x*l.y*TUNE_MPD_Z_BLOCK_Z_SIZE*sizeof(float));
info[i].lb_chunks=(info[i].lb_max_chunks < fair_chunks ? info[i].lb_max_chunks : fair_chunks);
info[i].lb_imbalance=0;
}
/*
printf("Required Memory: %llu, available aggregte: %llu\n",
l.x*l.y*l.z*sizeof(float)*2,
total_mem);
*/
if(total_mem < l.x*l.y*l.z*sizeof(float)*2)
{
throw("Insufficient aggregate memory");
}
determine_load_balance();
initialize();
}
ZDivMultiGPUMapper::ZDivMultiGPUMapper(int x, int y, int z, size_t cellsize, int apron, int overlap, int ngpu, int *gl, bool pz, int pages)
: MultiGPUMapper(dim3(x,y,z), cellsize, apron, overlap, ngpu, gl, pages), info(NULL), periodic_z(pz)
{
info=new gpu_info[num_gpus];
size_t total_mem=0;
int fair_chunks=int(round((float)z/TUNE_MPD_Z_BLOCK_Z_SIZE)/num_gpus);
for(int i=0; i<num_gpus; i++)
{
total_mem += device_memory[i];
size_t max_mem=(device_memory[i]*LB_MAX_MEM_THRESHOLD)/2;
info[i].lb_max_chunks=max_mem/(x*y*TUNE_MPD_Z_BLOCK_Z_SIZE*sizeof(float));
info[i].lb_chunks=(info[i].lb_max_chunks < fair_chunks ? info[i].lb_max_chunks : fair_chunks);
info[i].lb_imbalance=0;
}
/*
printf("Required Memory: %llu, available aggregte: %llu\n",
x*y*z*sizeof(float)*2*pages,
total_mem);
*/
if(total_mem < x*y*z*sizeof(float)*2*pages)
{
throw("Insufficient aggregate memory");
}
determine_load_balance();
initialize();
}
ZDivMultiGPUMapper::~ZDivMultiGPUMapper()
{
}
void ZDivMultiGPUMapper::initialize()
{
int zstart=0;
for(int i=0; i<num_gpus; i++)
{
gpu_info g=info[i];
// Determine the length of the lattice as dictated by load balancing
int zdim=g.lb_chunks*TUNE_MPD_Z_BLOCK_Z_SIZE;
assert(zdim <= lattice_dim.z);
int top_overlap = ((periodic_z || i > 0) ? overlap : 0);
int bottom_overlap = ((periodic_z || i < num_gpus-1) ? overlap : 0);
int top_apron = ((periodic_z || i > 0) ? apron : 0);
int bottom_apron = ((periodic_z || i < num_gpus-1) ? apron : 0);
int top_shadow = top_overlap+top_apron;
int bottom_shadow = bottom_overlap+bottom_apron;
// Store global position and dimension that this segment represents
g.global_pos=dim3(0,0,zstart);
g.global_dim=dim3(lattice_dim.x, lattice_dim.y, zdim);
// Store the dimensions of the local lattice
g.local_dim=dim3(lattice_dim.x, lattice_dim.y, zdim+top_shadow+bottom_shadow);
// Local authority is where in the local lattice authoritative data begins
g.local_authority=dim3(0,0, top_shadow);
// Dependent position is where the global authority, less shadow region
// NOTE: can be negative in periodic cases!
g.dependent_pos.x=0;
g.dependent_pos.y=0;
g.dependent_pos.z=zstart - top_shadow;
// dimensions of the shadow regions
g.overlap_dim=dim3(lattice_dim.x, lattice_dim.y, overlap+apron);
// Where my neighbor reads from me
g.overlap_send[Z_SHADOW_TOP]=dim3(0,0,overlap+apron);
g.overlap_send[Z_SHADOW_BOTTOM]=dim3(0,0, zdim-(overlap+apron) + top_shadow);
// Where I receive
g.overlap_recv[Z_SHADOW_TOP]=dim3(0,0,0);
g.overlap_recv[Z_SHADOW_BOTTOM]=dim3(0,0, zdim + top_shadow);
g.neighbor[Z_SHADOW_TOP]=i-1;
g.neighbor[Z_SHADOW_BOTTOM]=i+1;
// If Z-Periodic, set top and bottom to be neighbors
if(periodic_z)
{
if(i==0)
{
g.neighbor[Z_SHADOW_TOP]=num_gpus-1;
}
if(i==num_gpus-1)
{
g.neighbor[Z_SHADOW_BOTTOM]=0;
}
}
info[i]=g;
build_descriptor(i, g.local_dim, g.dependent_pos,
dim3(lattice_dim.x, lattice_dim.y, (zdim+top_overlap+bottom_overlap)),
dim3(0, 0, top_apron));
zstart+=zdim;
}
}
void ZDivMultiGPUMapper::initialize_gpu(int gpu)
{
MultiGPUMapper::initialize_gpu(gpu);
// we assume peering is always commutative; so if we can peer with a
// neighbor, they can peer with us. If we can peer, then allocate memory
// here so that they can write to it during publish. Otherwise, make a
// host bounce buffer.
size_t ss=DIMSIZE(info[gpu].overlap_dim)*cellsize;
// Peer with top neighbor
int neighbor = info[gpu].neighbor[Z_SHADOW_TOP];
//printf("top neighbor of %d is %d\n", gpu, neighbor);
if(neighbor >= 0 && neighbor < num_gpus)
{
neighbor_buffer *nb=new neighbor_buffer;
nb->size=ss;
if(enable_peer_access(gpu, neighbor))
{
hipMalloc(&(nb->buffer[0]), ss*pagecount);
hipMalloc(&(nb->buffer[1]), ss*pagecount);
hipMemset(nb->buffer[0], 255, ss*pagecount);
hipMemset(nb->buffer[1], 255, ss*pagecount);
}
else
{
hipHostMalloc(&(nb->buffer[0]), ss*pagecount, hipHostMallocPortable);
hipHostMalloc(&(nb->buffer[1]), ss*pagecount, hipHostMallocPortable);
}
info[gpu].read_buffer[Z_SHADOW_TOP]=nb;
info[neighbor].write_buffer[Z_SHADOW_BOTTOM]=nb;
// make local temp buffer to collect pages before P2P
hipMalloc(&(info[gpu].tmp_buffer[0]), ss*pagecount);
}
// Peer with bottom neighbor
neighbor = info[gpu].neighbor[Z_SHADOW_BOTTOM];
//printf("bottom neighbor of %d is %d\n", gpu, neighbor);
if(neighbor >= 0 && neighbor < num_gpus)
{
neighbor_buffer *nb=new neighbor_buffer;
nb->size=ss;
if(enable_peer_access(gpu, neighbor))
{
hipMalloc(&(nb->buffer[0]), ss*pagecount);
hipMalloc(&(nb->buffer[1]), ss*pagecount);
hipMemset(nb->buffer[0], 255, ss*pagecount);
hipMemset(nb->buffer[1], 255, ss*pagecount);
}
else
{
hipHostMalloc(&(nb->buffer[0]), ss*pagecount, hipHostMallocPortable);
hipHostMalloc(&(nb->buffer[1]), ss*pagecount, hipHostMallocPortable);
}
info[gpu].read_buffer[Z_SHADOW_BOTTOM]=nb;
info[neighbor].write_buffer[Z_SHADOW_TOP]=nb;
// make local temp buffer to collect pages before P2P
hipMalloc(&(info[gpu].tmp_buffer[1]), ss*pagecount);
}
check_error();
}
dim3 ZDivMultiGPUMapper::get_global_dim(int gpu)
{
return info[gpu].global_dim;
}
dim3 ZDivMultiGPUMapper::get_local_dim(int gpu)
{
return info[gpu].local_dim;
}
int3 ZDivMultiGPUMapper::get_global_offset(int gpu)
{
return info[gpu].dependent_pos;
}
size_t ZDivMultiGPUMapper::get_local_size(int gpu)
{
return DIMSIZE(info[gpu].local_dim)*cellsize;
}
size_t ZDivMultiGPUMapper::get_authority_size(int gpu)
{
return DIMSIZE(info[gpu].global_dim)*cellsize;
}
ssize_t ZDivMultiGPUMapper::get_global_input_offset(int gpu)
{
const int3 dp=info[gpu].dependent_pos;
const dim3 gd=info[gpu].global_dim;
return dp.x + dp.y * (int)gd.x + dp.z * (int)gd.x * (int)gd.y;
}
size_t ZDivMultiGPUMapper::get_global_output_offset(int gpu)
{
return POSDIM(info[gpu].global_pos, info[gpu].global_dim);
}
size_t ZDivMultiGPUMapper::get_authority_offset(int gpu)
{
return POSDIM(info[gpu].local_authority, info[gpu].local_dim);
}
void ZDivMultiGPUMapper::stage_in(int gpu, void *dptr, void *hptr)
{
for(int p=0; p<pagecount; p++)
{
char *dpage = (char*)dptr + p*get_local_size(gpu);
char *hpage = (char*)hptr + p*get_global_size();
stage_in_real(gpu, dpage, hpage, cellsize);
}
}
void ZDivMultiGPUMapper::stage_in_sites(int gpu, void *dptr, void *hptr)
{
stage_in_real(gpu, dptr, hptr, 1);
}
void ZDivMultiGPUMapper::stage_in_real(int gpu, void *dptr, void *hptr, unsigned int element_size)
{
PROF_BEGIN(PROF_H2D);
char *src=(char*)hptr;
char *dst=(char*)dptr;
ssize_t offset=get_global_input_offset(gpu)*element_size;
size_t localsize=DIMSIZE(info[gpu].local_dim)*element_size;
size_t latsize=DIMSIZE(lattice_dim)*element_size;
if(offset < 0)
{
// Read adj bytes from the end of the lattice
size_t adj=abs(offset);
hipMemcpyAsync(dst, src+latsize-adj, adj, hipMemcpyDefault);
check_error();
dst+=adj;
offset=0;
localsize-=adj;
}
else if((offset+localsize) >=latsize)
{
// read adj bytes from the beginning of the lattice
size_t adj=(offset+localsize)-latsize;
hipMemcpyAsync(dst+localsize-adj, src, adj, hipMemcpyDefault);
check_error();
localsize-=adj;
}
src += offset;
hipMemcpy(dst, src, localsize, hipMemcpyDefault);
check_error();
PROF_END(PROF_H2D);
}
void ZDivMultiGPUMapper::stage_out(int gpu, void *hptr, void *dptr)
{
for(int p=0; p<pagecount; p++)
{
char *dpage = (char*)dptr + p*get_local_size(gpu);
char *hpage = (char*)hptr + p*get_global_size();
stage_out_real(gpu, hpage, dpage);
}
}
void ZDivMultiGPUMapper::stage_out_real(int gpu, void *hptr, void *dptr)
{
PROF_BEGIN(PROF_D2H);
char *src=(char*)dptr;
src += get_authority_offset(gpu) * cellsize;
char *dst=(char*)hptr;
dst += get_global_output_offset(gpu) * cellsize;
hipMemcpy(dst, src, get_authority_size(gpu), hipMemcpyDefault);
check_error();
PROF_END(PROF_D2H);
}
void ZDivMultiGPUMapper::publish_state(int gpu, void *dptr, int timestep)
{
schedule_send(gpu, dptr, timestep, Z_SHADOW_TOP, 0);
schedule_send(gpu, dptr, timestep, Z_SHADOW_BOTTOM, 0);
if(hipDeviceSynchronize() != hipSuccess)
throw("cuda error");
}
void ZDivMultiGPUMapper::schedule_recv(int gpu, void *dptr, int key, int neighbor, hipStream_t s)
{
int other=info[gpu].neighbor[neighbor];
if(other < 0 || other >= num_gpus)
return;
key=key&1;
neighbor_buffer *nb=info[gpu].read_buffer[neighbor];
char *lbuf=(char*)dptr;
size_t lofs=POSDIM(info[gpu].overlap_recv[neighbor], info[gpu].local_dim);
lbuf+=lofs*cellsize;
PROF_CUDA_BEGIN(PROF_MRBASE+neighbor, s);
char *nbuf=nb->buffer[key];
for(int p=0; p<pagecount; p++)
hipMemcpyAsync(lbuf+p*get_local_size(gpu),
nbuf+p*(nb->size),
nb->size, hipMemcpyDefault,s);
PROF_CUDA_END(PROF_MRBASE+neighbor, s);
}
void ZDivMultiGPUMapper::schedule_send(int gpu, void *dptr, int key, int neighbor, hipStream_t s)
{
int other=info[gpu].neighbor[neighbor];
if(other < 0 || other >= num_gpus)
return;
size_t ofs;
char *buf;
key=(key&1)^1;
neighbor_buffer *nb=info[gpu].write_buffer[neighbor];
ofs=POSDIM(info[gpu].overlap_send[neighbor], info[gpu].local_dim);
buf=(char*)dptr+ofs*cellsize;
PROF_CUDA_BEGIN(PROF_MSBASE+neighbor, s);
char *tbuf=info[gpu].tmp_buffer[neighbor];
for(int p=0; p<pagecount; p++)
hipMemcpyAsync(tbuf+p*(nb->size),
buf+p*get_local_size(gpu),
nb->size, hipMemcpyDefault,s);
PROF_CUDA_END(PROF_MSBASE+neighbor, s);
char *nbuf=nb->buffer[key];
hipMemcpyAsync(nbuf,
tbuf,
nb->size*pagecount, hipMemcpyDefault,s);
}
gpu_info* ZDivMultiGPUMapper::getinfo(int gpu)
{
return &(info[gpu]);
}
unsigned int* ZDivMultiGPUMapper::gettbuf(int gpu, int key, int neighbor)
{
return (unsigned int*)info[gpu].tmp_buffer[neighbor];
}
unsigned int* ZDivMultiGPUMapper::getrbuf(int gpu, int key, int neighbor)
{
key=key&1;
neighbor_buffer *nb=info[gpu].read_buffer[neighbor];
return (unsigned int*)nb->buffer[key];
}
void ZDivMultiGPUMapper::send_tbuf(int gpu, int key, int neighbor, hipStream_t s)
{
key=(key&1)^1;
neighbor_buffer *nb=info[gpu].write_buffer[neighbor];
char *tbuf=info[gpu].tmp_buffer[neighbor];
char *nbuf=nb->buffer[key];
hipMemcpyAsync(nbuf,
tbuf,
nb->size*pagecount, hipMemcpyDefault,s);
}
/*
void ZDivMultiGPUMapper::copy_to_neighbor(int gpu, int neighbor, int key)
{
int other=info[gpu].neighbor[neighbor];
if(other < 0 || other >= num_gpus)
return;
size_t ofs;
char *buf;
neighbor_buffer *nb=info[gpu].write_buffer[neighbor];
ofs=POSDIM(info[gpu].overlap_send[neighbor], info[gpu].local_dim);
buf=(char*)info[gpu].current_state+ofs*cellsize;
PROF_BEGIN(PROF_D2D);
hipMemcpy(nb->buffer[key], buf, nb->size, hipMemcpyDefault);
check_error();
PROF_END(PROF_D2D);
}
void ZDivMultiGPUMapper::copy_from_neighbor(int gpu, int neighbor, void *dptr, int key)
{
int other=info[gpu].neighbor[neighbor];
if(other < 0 || other >= num_gpus)
return;
neighbor_buffer *nb=info[gpu].read_buffer[neighbor];
char *lbuf=(char*)dptr;
size_t lofs=POSDIM(info[gpu].overlap_recv[neighbor], info[gpu].local_dim);
lbuf+=lofs*cellsize;
//printf("gpu %d dir %d from buf %X\n",gpu,neighbor,nptr);
PROF_BEGIN(PROF_D2D);
hipMemcpy(lbuf, nb->buffer[key], nb->size, hipMemcpyDefault);
PROF_END(PROF_D2D);
}
*/
void ZDivMultiGPUMapper::refresh(int gpu, void *dptr, int timestep)
{
schedule_recv(gpu, dptr, timestep, Z_SHADOW_TOP, 0);
schedule_recv(gpu, dptr, timestep, Z_SHADOW_BOTTOM, 0);
if(hipDeviceSynchronize() != hipSuccess)
throw("cuda error");
}
int ZDivMultiGPUMapper::map_index_to_gpu(size_t index)
{
size_t plane=lattice_dim.x * lattice_dim.y;
size_t zplane = index / plane;
int i;
for(i=0; i<num_gpus; i++)
{
if(zplane < info[i].global_pos.z)
break;
}
return i-1;
}
bool ZDivMultiGPUMapper::determine_load_balance()
{
bool changed=false;
// Determine the average runtime among 'active' gpus
// That is, ones that are not capacity constrained.
unsigned int sum=0;
unsigned int active=0;
for(int i=0; i<num_gpus; i++)
{
if(info[i].lb_chunks < info[i].lb_max_chunks)
{
active++;
sum+=lb_cost[i];
}
}
float avg=(float)sum/active;
//printf("* sum = %d, active = %d, avg = %f\n", sum, active, avg);
// Determine balance for non-constrained gpus
int zblocks=lattice_dim.z/TUNE_MPD_Z_BLOCK_Z_SIZE;
float chunk_pct=1.0f/(float)zblocks;
float equalpoint=1.0f/active;
for(int i=0; i<num_gpus; i++)
{
// skip constrained gpus
if(info[i].lb_chunks == info[i].lb_max_chunks)
continue;
// Calculate percent differnence from the average
// and scale it against the equal point
float ib_acc=((lb_cost[i]-avg)/avg)*equalpoint;
//printf("%d cost %d ca %f eq %f ib now %f ib acc %f\n",
// i, lb_cost[i], lb_cost[i]/avg, equalpoint, ib_acc, info[i].lb_imbalance);
// combine this imbalance with the cumulative
// effects of the past, dampened by a factor
float imbalance=ib_acc+info[i].lb_imbalance*LB_IMBALANCE_DAMPENER;
info[i].lb_imbalance=imbalance;
// Compute the magnitude of the number of chunks to adjust
int magnitude=(int)floor(fabs(imbalance)/chunk_pct);
if(magnitude > 0)
{
int sign=(imbalance > 0 ? -1 : 1);
int adjust=magnitude*sign;
alter_chunks(i, adjust);
changed=true;
info[i].lb_imbalance=0;
printf("[%d] %s %d blocks\n",i,(adjust>0 ? "added" : "shaved"),magnitude);
}
}
// Check to make sure everything is accounted for
int blocks_consumed=0;
for(int i=0; i<num_gpus; i++)
{
blocks_consumed+=info[i].lb_chunks;
}
if(blocks_consumed != zblocks)
{
for(int i=0; i<num_gpus; i++)
{
alter_chunks(i, 1);
blocks_consumed++;
if(blocks_consumed == zblocks) break;
}
/*
// Attempt to equally distribute any discrepency
int correction=(int)round((zblocks-blocks_consumed)/active);
if(correction > 0)
{
blocks_consumed=0;
for(int i=0; i<num_gpus; i++)
{
if(info[i].lb_chunks == info[i].lb_max_chunks)
{
blocks_consumed+=info[i].lb_chunks;
continue;
}
blocks_consumed+=alter_chunks(i, correction);
printf("[%d] augmented %d blocks\n",i,correction);
}
}
// If still uneven, find the most imbalanced and apply difference
if(blocks_consumed != zblocks)
{
int most_off=0;
float most_off_amt=0.0f;
for(int i=0; i<num_gpus; i++)
{
if(info[i].lb_chunks == info[i].lb_max_chunks)
continue;
if(fabs(info[i].lb_imbalance) >= most_off_amt)
{
most_off_amt=fabs(info[i].lb_imbalance);
most_off=i;
}
}
alter_chunks(most_off, zblocks-blocks_consumed);
printf("[%d] forced %d blocks\n",most_off,zblocks-blocks_consumed);
info[most_off].lb_imbalance=0;
}
*/
changed=true;
}
Print::printf(Print::DEBUG, "LB State: ");
for(int i=0; i<num_gpus; i++)
Print::printf(Print::DEBUG, "%+.02f%% (n=%d) \t", info[i].lb_imbalance*100, info[i].lb_chunks);
Print::printf(Print::DEBUG, "\n");
return changed;
}
int ZDivMultiGPUMapper::alter_chunks(int gpu, int count)
{
count+=info[gpu].lb_chunks;
if(count < 1)
count=1;
//printf("[%d] req of %d, max is %d\n", gpu, count, max_chunks);
count = (info[gpu].lb_max_chunks < count ? info[gpu].lb_max_chunks : count);
info[gpu].lb_chunks=count;
return count;
}
| 0e571e75a159a7eee7dafaac6abe62071e7790e0.cu | /*
* University of Illinois Open Source License
* Copyright 2012-2018 Luthey-Schulten Group,
* All rights reserved.
*
* Developed by: Luthey-Schulten Group
* University of Illinois at Urbana-Champaign
* http://www.scs.uiuc.edu/~schulten
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of
* this software and associated documentation files (the Software), to deal with
* the Software without restriction, including without limitation the rights to
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is furnished to
* do so, subject to the following conditions:
*
* - Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimers.
*
* - Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimers in the documentation
* and/or other materials provided with the distribution.
*
* - Neither the names of the Luthey-Schulten Group, University of Illinois at
* Urbana-Champaign, nor the names of its contributors may be used to endorse or
* promote products derived from this Software without specific prior written
* permission.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS WITH THE SOFTWARE.
*
* Author(s): Mike Hallock
*/
#include <cuda.h>
#include <cuda_runtime.h>
#include <string.h>
#include <stdio.h>
#include <math.h>
#include <assert.h>
#include "config.h"
#include "lptf/Profile.h"
#include "MultiGPUMapper.h"
#include "ZDivMultiGPUMapper.h"
#include "core/Print.h"
using lm::Print;
#define LB_MAX_MEM_THRESHOLD 0.9f
#define LB_IMBALANCE_DAMPENER 0.40f
ZDivMultiGPUMapper::ZDivMultiGPUMapper(dim3 l, size_t m, int a, int c, int n, int *gl, bool pz, int pages)
: MultiGPUMapper(l, m, a, c, n, gl, pages), info(NULL), periodic_z(pz)
{
info=new gpu_info[num_gpus];
size_t total_mem=0;
int fair_chunks=int(round((float)l.z/TUNE_MPD_Z_BLOCK_Z_SIZE)/num_gpus);
for(int i=0; i<num_gpus; i++)
{
total_mem += device_memory[i];
size_t max_mem=(device_memory[i]*LB_MAX_MEM_THRESHOLD)/2;
info[i].lb_max_chunks=max_mem/(l.x*l.y*TUNE_MPD_Z_BLOCK_Z_SIZE*sizeof(float));
info[i].lb_chunks=(info[i].lb_max_chunks < fair_chunks ? info[i].lb_max_chunks : fair_chunks);
info[i].lb_imbalance=0;
}
/*
printf("Required Memory: %llu, available aggregte: %llu\n",
l.x*l.y*l.z*sizeof(float)*2,
total_mem);
*/
if(total_mem < l.x*l.y*l.z*sizeof(float)*2)
{
throw("Insufficient aggregate memory");
}
determine_load_balance();
initialize();
}
ZDivMultiGPUMapper::ZDivMultiGPUMapper(int x, int y, int z, size_t cellsize, int apron, int overlap, int ngpu, int *gl, bool pz, int pages)
: MultiGPUMapper(dim3(x,y,z), cellsize, apron, overlap, ngpu, gl, pages), info(NULL), periodic_z(pz)
{
info=new gpu_info[num_gpus];
size_t total_mem=0;
int fair_chunks=int(round((float)z/TUNE_MPD_Z_BLOCK_Z_SIZE)/num_gpus);
for(int i=0; i<num_gpus; i++)
{
total_mem += device_memory[i];
size_t max_mem=(device_memory[i]*LB_MAX_MEM_THRESHOLD)/2;
info[i].lb_max_chunks=max_mem/(x*y*TUNE_MPD_Z_BLOCK_Z_SIZE*sizeof(float));
info[i].lb_chunks=(info[i].lb_max_chunks < fair_chunks ? info[i].lb_max_chunks : fair_chunks);
info[i].lb_imbalance=0;
}
/*
printf("Required Memory: %llu, available aggregte: %llu\n",
x*y*z*sizeof(float)*2*pages,
total_mem);
*/
if(total_mem < x*y*z*sizeof(float)*2*pages)
{
throw("Insufficient aggregate memory");
}
determine_load_balance();
initialize();
}
ZDivMultiGPUMapper::~ZDivMultiGPUMapper()
{
}
void ZDivMultiGPUMapper::initialize()
{
int zstart=0;
for(int i=0; i<num_gpus; i++)
{
gpu_info g=info[i];
// Determine the length of the lattice as dictated by load balancing
int zdim=g.lb_chunks*TUNE_MPD_Z_BLOCK_Z_SIZE;
assert(zdim <= lattice_dim.z);
int top_overlap = ((periodic_z || i > 0) ? overlap : 0);
int bottom_overlap = ((periodic_z || i < num_gpus-1) ? overlap : 0);
int top_apron = ((periodic_z || i > 0) ? apron : 0);
int bottom_apron = ((periodic_z || i < num_gpus-1) ? apron : 0);
int top_shadow = top_overlap+top_apron;
int bottom_shadow = bottom_overlap+bottom_apron;
// Store global position and dimension that this segment represents
g.global_pos=dim3(0,0,zstart);
g.global_dim=dim3(lattice_dim.x, lattice_dim.y, zdim);
// Store the dimensions of the local lattice
g.local_dim=dim3(lattice_dim.x, lattice_dim.y, zdim+top_shadow+bottom_shadow);
// Local authority is where in the local lattice authoritative data begins
g.local_authority=dim3(0,0, top_shadow);
// Dependent position is where the global authority, less shadow region
// NOTE: can be negative in periodic cases!
g.dependent_pos.x=0;
g.dependent_pos.y=0;
g.dependent_pos.z=zstart - top_shadow;
// dimensions of the shadow regions
g.overlap_dim=dim3(lattice_dim.x, lattice_dim.y, overlap+apron);
// Where my neighbor reads from me
g.overlap_send[Z_SHADOW_TOP]=dim3(0,0,overlap+apron);
g.overlap_send[Z_SHADOW_BOTTOM]=dim3(0,0, zdim-(overlap+apron) + top_shadow);
// Where I receive
g.overlap_recv[Z_SHADOW_TOP]=dim3(0,0,0);
g.overlap_recv[Z_SHADOW_BOTTOM]=dim3(0,0, zdim + top_shadow);
g.neighbor[Z_SHADOW_TOP]=i-1;
g.neighbor[Z_SHADOW_BOTTOM]=i+1;
// If Z-Periodic, set top and bottom to be neighbors
if(periodic_z)
{
if(i==0)
{
g.neighbor[Z_SHADOW_TOP]=num_gpus-1;
}
if(i==num_gpus-1)
{
g.neighbor[Z_SHADOW_BOTTOM]=0;
}
}
info[i]=g;
build_descriptor(i, g.local_dim, g.dependent_pos,
dim3(lattice_dim.x, lattice_dim.y, (zdim+top_overlap+bottom_overlap)),
dim3(0, 0, top_apron));
zstart+=zdim;
}
}
void ZDivMultiGPUMapper::initialize_gpu(int gpu)
{
MultiGPUMapper::initialize_gpu(gpu);
// we assume peering is always commutative; so if we can peer with a
// neighbor, they can peer with us. If we can peer, then allocate memory
// here so that they can write to it during publish. Otherwise, make a
// host bounce buffer.
size_t ss=DIMSIZE(info[gpu].overlap_dim)*cellsize;
// Peer with top neighbor
int neighbor = info[gpu].neighbor[Z_SHADOW_TOP];
//printf("top neighbor of %d is %d\n", gpu, neighbor);
if(neighbor >= 0 && neighbor < num_gpus)
{
neighbor_buffer *nb=new neighbor_buffer;
nb->size=ss;
if(enable_peer_access(gpu, neighbor))
{
cudaMalloc(&(nb->buffer[0]), ss*pagecount);
cudaMalloc(&(nb->buffer[1]), ss*pagecount);
cudaMemset(nb->buffer[0], 255, ss*pagecount);
cudaMemset(nb->buffer[1], 255, ss*pagecount);
}
else
{
cudaHostAlloc(&(nb->buffer[0]), ss*pagecount, cudaHostAllocPortable);
cudaHostAlloc(&(nb->buffer[1]), ss*pagecount, cudaHostAllocPortable);
}
info[gpu].read_buffer[Z_SHADOW_TOP]=nb;
info[neighbor].write_buffer[Z_SHADOW_BOTTOM]=nb;
// make local temp buffer to collect pages before P2P
cudaMalloc(&(info[gpu].tmp_buffer[0]), ss*pagecount);
}
// Peer with bottom neighbor
neighbor = info[gpu].neighbor[Z_SHADOW_BOTTOM];
//printf("bottom neighbor of %d is %d\n", gpu, neighbor);
if(neighbor >= 0 && neighbor < num_gpus)
{
neighbor_buffer *nb=new neighbor_buffer;
nb->size=ss;
if(enable_peer_access(gpu, neighbor))
{
cudaMalloc(&(nb->buffer[0]), ss*pagecount);
cudaMalloc(&(nb->buffer[1]), ss*pagecount);
cudaMemset(nb->buffer[0], 255, ss*pagecount);
cudaMemset(nb->buffer[1], 255, ss*pagecount);
}
else
{
cudaHostAlloc(&(nb->buffer[0]), ss*pagecount, cudaHostAllocPortable);
cudaHostAlloc(&(nb->buffer[1]), ss*pagecount, cudaHostAllocPortable);
}
info[gpu].read_buffer[Z_SHADOW_BOTTOM]=nb;
info[neighbor].write_buffer[Z_SHADOW_TOP]=nb;
// make local temp buffer to collect pages before P2P
cudaMalloc(&(info[gpu].tmp_buffer[1]), ss*pagecount);
}
check_error();
}
dim3 ZDivMultiGPUMapper::get_global_dim(int gpu)
{
return info[gpu].global_dim;
}
dim3 ZDivMultiGPUMapper::get_local_dim(int gpu)
{
return info[gpu].local_dim;
}
int3 ZDivMultiGPUMapper::get_global_offset(int gpu)
{
return info[gpu].dependent_pos;
}
size_t ZDivMultiGPUMapper::get_local_size(int gpu)
{
return DIMSIZE(info[gpu].local_dim)*cellsize;
}
size_t ZDivMultiGPUMapper::get_authority_size(int gpu)
{
return DIMSIZE(info[gpu].global_dim)*cellsize;
}
ssize_t ZDivMultiGPUMapper::get_global_input_offset(int gpu)
{
const int3 dp=info[gpu].dependent_pos;
const dim3 gd=info[gpu].global_dim;
return dp.x + dp.y * (int)gd.x + dp.z * (int)gd.x * (int)gd.y;
}
size_t ZDivMultiGPUMapper::get_global_output_offset(int gpu)
{
return POSDIM(info[gpu].global_pos, info[gpu].global_dim);
}
size_t ZDivMultiGPUMapper::get_authority_offset(int gpu)
{
return POSDIM(info[gpu].local_authority, info[gpu].local_dim);
}
void ZDivMultiGPUMapper::stage_in(int gpu, void *dptr, void *hptr)
{
for(int p=0; p<pagecount; p++)
{
char *dpage = (char*)dptr + p*get_local_size(gpu);
char *hpage = (char*)hptr + p*get_global_size();
stage_in_real(gpu, dpage, hpage, cellsize);
}
}
void ZDivMultiGPUMapper::stage_in_sites(int gpu, void *dptr, void *hptr)
{
stage_in_real(gpu, dptr, hptr, 1);
}
void ZDivMultiGPUMapper::stage_in_real(int gpu, void *dptr, void *hptr, unsigned int element_size)
{
PROF_BEGIN(PROF_H2D);
char *src=(char*)hptr;
char *dst=(char*)dptr;
ssize_t offset=get_global_input_offset(gpu)*element_size;
size_t localsize=DIMSIZE(info[gpu].local_dim)*element_size;
size_t latsize=DIMSIZE(lattice_dim)*element_size;
if(offset < 0)
{
// Read adj bytes from the end of the lattice
size_t adj=abs(offset);
cudaMemcpyAsync(dst, src+latsize-adj, adj, cudaMemcpyDefault);
check_error();
dst+=adj;
offset=0;
localsize-=adj;
}
else if((offset+localsize) >=latsize)
{
// read adj bytes from the beginning of the lattice
size_t adj=(offset+localsize)-latsize;
cudaMemcpyAsync(dst+localsize-adj, src, adj, cudaMemcpyDefault);
check_error();
localsize-=adj;
}
src += offset;
cudaMemcpy(dst, src, localsize, cudaMemcpyDefault);
check_error();
PROF_END(PROF_H2D);
}
void ZDivMultiGPUMapper::stage_out(int gpu, void *hptr, void *dptr)
{
for(int p=0; p<pagecount; p++)
{
char *dpage = (char*)dptr + p*get_local_size(gpu);
char *hpage = (char*)hptr + p*get_global_size();
stage_out_real(gpu, hpage, dpage);
}
}
void ZDivMultiGPUMapper::stage_out_real(int gpu, void *hptr, void *dptr)
{
PROF_BEGIN(PROF_D2H);
char *src=(char*)dptr;
src += get_authority_offset(gpu) * cellsize;
char *dst=(char*)hptr;
dst += get_global_output_offset(gpu) * cellsize;
cudaMemcpy(dst, src, get_authority_size(gpu), cudaMemcpyDefault);
check_error();
PROF_END(PROF_D2H);
}
void ZDivMultiGPUMapper::publish_state(int gpu, void *dptr, int timestep)
{
schedule_send(gpu, dptr, timestep, Z_SHADOW_TOP, 0);
schedule_send(gpu, dptr, timestep, Z_SHADOW_BOTTOM, 0);
if(cudaDeviceSynchronize() != cudaSuccess)
throw("cuda error");
}
void ZDivMultiGPUMapper::schedule_recv(int gpu, void *dptr, int key, int neighbor, cudaStream_t s)
{
int other=info[gpu].neighbor[neighbor];
if(other < 0 || other >= num_gpus)
return;
key=key&1;
neighbor_buffer *nb=info[gpu].read_buffer[neighbor];
char *lbuf=(char*)dptr;
size_t lofs=POSDIM(info[gpu].overlap_recv[neighbor], info[gpu].local_dim);
lbuf+=lofs*cellsize;
PROF_CUDA_BEGIN(PROF_MRBASE+neighbor, s);
char *nbuf=nb->buffer[key];
for(int p=0; p<pagecount; p++)
cudaMemcpyAsync(lbuf+p*get_local_size(gpu),
nbuf+p*(nb->size),
nb->size, cudaMemcpyDefault,s);
PROF_CUDA_END(PROF_MRBASE+neighbor, s);
}
void ZDivMultiGPUMapper::schedule_send(int gpu, void *dptr, int key, int neighbor, cudaStream_t s)
{
int other=info[gpu].neighbor[neighbor];
if(other < 0 || other >= num_gpus)
return;
size_t ofs;
char *buf;
key=(key&1)^1;
neighbor_buffer *nb=info[gpu].write_buffer[neighbor];
ofs=POSDIM(info[gpu].overlap_send[neighbor], info[gpu].local_dim);
buf=(char*)dptr+ofs*cellsize;
PROF_CUDA_BEGIN(PROF_MSBASE+neighbor, s);
char *tbuf=info[gpu].tmp_buffer[neighbor];
for(int p=0; p<pagecount; p++)
cudaMemcpyAsync(tbuf+p*(nb->size),
buf+p*get_local_size(gpu),
nb->size, cudaMemcpyDefault,s);
PROF_CUDA_END(PROF_MSBASE+neighbor, s);
char *nbuf=nb->buffer[key];
cudaMemcpyAsync(nbuf,
tbuf,
nb->size*pagecount, cudaMemcpyDefault,s);
}
gpu_info* ZDivMultiGPUMapper::getinfo(int gpu)
{
return &(info[gpu]);
}
unsigned int* ZDivMultiGPUMapper::gettbuf(int gpu, int key, int neighbor)
{
return (unsigned int*)info[gpu].tmp_buffer[neighbor];
}
unsigned int* ZDivMultiGPUMapper::getrbuf(int gpu, int key, int neighbor)
{
key=key&1;
neighbor_buffer *nb=info[gpu].read_buffer[neighbor];
return (unsigned int*)nb->buffer[key];
}
void ZDivMultiGPUMapper::send_tbuf(int gpu, int key, int neighbor, cudaStream_t s)
{
key=(key&1)^1;
neighbor_buffer *nb=info[gpu].write_buffer[neighbor];
char *tbuf=info[gpu].tmp_buffer[neighbor];
char *nbuf=nb->buffer[key];
cudaMemcpyAsync(nbuf,
tbuf,
nb->size*pagecount, cudaMemcpyDefault,s);
}
/*
void ZDivMultiGPUMapper::copy_to_neighbor(int gpu, int neighbor, int key)
{
int other=info[gpu].neighbor[neighbor];
if(other < 0 || other >= num_gpus)
return;
size_t ofs;
char *buf;
neighbor_buffer *nb=info[gpu].write_buffer[neighbor];
ofs=POSDIM(info[gpu].overlap_send[neighbor], info[gpu].local_dim);
buf=(char*)info[gpu].current_state+ofs*cellsize;
PROF_BEGIN(PROF_D2D);
cudaMemcpy(nb->buffer[key], buf, nb->size, cudaMemcpyDefault);
check_error();
PROF_END(PROF_D2D);
}
void ZDivMultiGPUMapper::copy_from_neighbor(int gpu, int neighbor, void *dptr, int key)
{
int other=info[gpu].neighbor[neighbor];
if(other < 0 || other >= num_gpus)
return;
neighbor_buffer *nb=info[gpu].read_buffer[neighbor];
char *lbuf=(char*)dptr;
size_t lofs=POSDIM(info[gpu].overlap_recv[neighbor], info[gpu].local_dim);
lbuf+=lofs*cellsize;
//printf("gpu %d dir %d from buf %X\n",gpu,neighbor,nptr);
PROF_BEGIN(PROF_D2D);
cudaMemcpy(lbuf, nb->buffer[key], nb->size, cudaMemcpyDefault);
PROF_END(PROF_D2D);
}
*/
void ZDivMultiGPUMapper::refresh(int gpu, void *dptr, int timestep)
{
schedule_recv(gpu, dptr, timestep, Z_SHADOW_TOP, 0);
schedule_recv(gpu, dptr, timestep, Z_SHADOW_BOTTOM, 0);
if(cudaDeviceSynchronize() != cudaSuccess)
throw("cuda error");
}
int ZDivMultiGPUMapper::map_index_to_gpu(size_t index)
{
size_t plane=lattice_dim.x * lattice_dim.y;
size_t zplane = index / plane;
int i;
for(i=0; i<num_gpus; i++)
{
if(zplane < info[i].global_pos.z)
break;
}
return i-1;
}
bool ZDivMultiGPUMapper::determine_load_balance()
{
bool changed=false;
// Determine the average runtime among 'active' gpus
// That is, ones that are not capacity constrained.
unsigned int sum=0;
unsigned int active=0;
for(int i=0; i<num_gpus; i++)
{
if(info[i].lb_chunks < info[i].lb_max_chunks)
{
active++;
sum+=lb_cost[i];
}
}
float avg=(float)sum/active;
//printf("* sum = %d, active = %d, avg = %f\n", sum, active, avg);
// Determine balance for non-constrained gpus
int zblocks=lattice_dim.z/TUNE_MPD_Z_BLOCK_Z_SIZE;
float chunk_pct=1.0f/(float)zblocks;
float equalpoint=1.0f/active;
for(int i=0; i<num_gpus; i++)
{
// skip constrained gpus
if(info[i].lb_chunks == info[i].lb_max_chunks)
continue;
// Calculate percent differnence from the average
// and scale it against the equal point
float ib_acc=((lb_cost[i]-avg)/avg)*equalpoint;
//printf("%d cost %d ca %f eq %f ib now %f ib acc %f\n",
// i, lb_cost[i], lb_cost[i]/avg, equalpoint, ib_acc, info[i].lb_imbalance);
// combine this imbalance with the cumulative
// effects of the past, dampened by a factor
float imbalance=ib_acc+info[i].lb_imbalance*LB_IMBALANCE_DAMPENER;
info[i].lb_imbalance=imbalance;
// Compute the magnitude of the number of chunks to adjust
int magnitude=(int)floor(fabs(imbalance)/chunk_pct);
if(magnitude > 0)
{
int sign=(imbalance > 0 ? -1 : 1);
int adjust=magnitude*sign;
alter_chunks(i, adjust);
changed=true;
info[i].lb_imbalance=0;
printf("[%d] %s %d blocks\n",i,(adjust>0 ? "added" : "shaved"),magnitude);
}
}
// Check to make sure everything is accounted for
int blocks_consumed=0;
for(int i=0; i<num_gpus; i++)
{
blocks_consumed+=info[i].lb_chunks;
}
if(blocks_consumed != zblocks)
{
for(int i=0; i<num_gpus; i++)
{
alter_chunks(i, 1);
blocks_consumed++;
if(blocks_consumed == zblocks) break;
}
/*
// Attempt to equally distribute any discrepency
int correction=(int)round((zblocks-blocks_consumed)/active);
if(correction > 0)
{
blocks_consumed=0;
for(int i=0; i<num_gpus; i++)
{
if(info[i].lb_chunks == info[i].lb_max_chunks)
{
blocks_consumed+=info[i].lb_chunks;
continue;
}
blocks_consumed+=alter_chunks(i, correction);
printf("[%d] augmented %d blocks\n",i,correction);
}
}
// If still uneven, find the most imbalanced and apply difference
if(blocks_consumed != zblocks)
{
int most_off=0;
float most_off_amt=0.0f;
for(int i=0; i<num_gpus; i++)
{
if(info[i].lb_chunks == info[i].lb_max_chunks)
continue;
if(fabs(info[i].lb_imbalance) >= most_off_amt)
{
most_off_amt=fabs(info[i].lb_imbalance);
most_off=i;
}
}
alter_chunks(most_off, zblocks-blocks_consumed);
printf("[%d] forced %d blocks\n",most_off,zblocks-blocks_consumed);
info[most_off].lb_imbalance=0;
}
*/
changed=true;
}
Print::printf(Print::DEBUG, "LB State: ");
for(int i=0; i<num_gpus; i++)
Print::printf(Print::DEBUG, "%+.02f%% (n=%d) \t", info[i].lb_imbalance*100, info[i].lb_chunks);
Print::printf(Print::DEBUG, "\n");
return changed;
}
int ZDivMultiGPUMapper::alter_chunks(int gpu, int count)
{
count+=info[gpu].lb_chunks;
if(count < 1)
count=1;
//printf("[%d] req of %d, max is %d\n", gpu, count, max_chunks);
count = (info[gpu].lb_max_chunks < count ? info[gpu].lb_max_chunks : count);
info[gpu].lb_chunks=count;
return count;
}
|
429427d188c158f35c0c1f784d0a9fcbcbba0eef.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <memory>
#include <cassert>
using namespace std;
#include <hip/hip_runtime.h>
__global__ void getValue(float *outdata, float *indata) {
outdata[0] = indata[0] + 3.0f;
}
void floats() {
int memSizeBytes = 65536;
int N = 128;
hipStream_t stream;
hipStreamCreate__(&stream, 0);
char *hostMemGiant;
hipHostMalloc((void **)&hostMemGiant, memSizeBytes, HIP_MEMHOSTALLOC_PORTABLE);
hipDeviceptr_t deviceMemGiant;
cuMemAlloc(&deviceMemGiant, memSizeBytes);
int floats1_offset_bytes = 128 * 4;
int floats2_offset_bytes = 256 * 4;
float *hostFloats1 = (float *)(hostMemGiant + floats1_offset_bytes);
float *hostFloats2 = (float *)(hostMemGiant + floats2_offset_bytes);
hostFloats1[0] = 123.456f;
hipDeviceptr_t deviceFloats1 = deviceMemGiant + floats1_offset_bytes;
hipDeviceptr_t deviceFloats2 = deviceMemGiant + floats2_offset_bytes;
cuMemcpyHtoDAsync(
(hipDeviceptr_t)(((float *)deviceFloats1)),
hostFloats1,
N * sizeof(float),
stream
);
hipLaunchKernelGGL(( getValue), dim3(dim3(1,1,1)), dim3(dim3(32,1,1)), 0, stream, (float *)deviceFloats2, (float *)deviceFloats1);
// now copy back entire buffer
cuMemcpyDtoHAsync(hostFloats2, deviceFloats2, N * sizeof(float), stream);
hipStreamSynchronize(stream);
// and check the values...
cout << hostFloats2[0] << endl;
assert(hostFloats2[0] == 126.456f);
hipHostFree(hostMemGiant);
hipFree(deviceMemGiant);
hipStreamDestroy(stream);
}
__global__ void getValueChar(char *outdata, char *indata) {
outdata[0] = indata[0] + 3;
}
void chars() {
int memSizeBytes = 65536;
int N = 128;
hipStream_t stream;
hipStreamCreate__(&stream, 0);
char *hostMemGiant;
hipHostMalloc((void **)&hostMemGiant, memSizeBytes, HIP_MEMHOSTALLOC_PORTABLE);
hipDeviceptr_t deviceMemGiant;
cuMemAlloc(&deviceMemGiant, memSizeBytes);
int chars1_offset_bytes = 128 * 4;
int chars2_offset_bytes = 256 * 4;
char *hostChars1 = (char *)(hostMemGiant + chars1_offset_bytes);
char *hostChars2 = (char *)(hostMemGiant + chars2_offset_bytes);
hostChars1[0] = 67;
hipDeviceptr_t deviceChars1 = deviceMemGiant + chars1_offset_bytes;
hipDeviceptr_t deviceChars2 = deviceMemGiant + chars2_offset_bytes;
cuMemcpyHtoDAsync(
(hipDeviceptr_t)(((float *)deviceChars1)),
hostChars1,
N * sizeof(char),
stream
);
hipLaunchKernelGGL(( getValueChar), dim3(dim3(1,1,1)), dim3(dim3(32,1,1)), 0, stream, (char *)deviceChars2, (char *)deviceChars1);
// now copy back entire buffer
cuMemcpyDtoHAsync(hostChars2, deviceChars2, N * sizeof(char), stream);
hipStreamSynchronize(stream);
// and check the values...
cout << hostChars2[0] << endl;
assert(hostChars2[0] == 70);
hipHostFree(hostMemGiant);
hipFree(deviceMemGiant);
hipStreamDestroy(stream);
}
int main(int argc, char *argv[]) {
floats();
chars();
return 0;
}
| 429427d188c158f35c0c1f784d0a9fcbcbba0eef.cu |
#include <iostream>
#include <memory>
#include <cassert>
using namespace std;
#include <cuda.h>
__global__ void getValue(float *outdata, float *indata) {
outdata[0] = indata[0] + 3.0f;
}
void floats() {
int memSizeBytes = 65536;
int N = 128;
CUstream stream;
cuStreamCreate(&stream, 0);
char *hostMemGiant;
cuMemHostAlloc((void **)&hostMemGiant, memSizeBytes, CU_MEMHOSTALLOC_PORTABLE);
CUdeviceptr deviceMemGiant;
cuMemAlloc(&deviceMemGiant, memSizeBytes);
int floats1_offset_bytes = 128 * 4;
int floats2_offset_bytes = 256 * 4;
float *hostFloats1 = (float *)(hostMemGiant + floats1_offset_bytes);
float *hostFloats2 = (float *)(hostMemGiant + floats2_offset_bytes);
hostFloats1[0] = 123.456f;
CUdeviceptr deviceFloats1 = deviceMemGiant + floats1_offset_bytes;
CUdeviceptr deviceFloats2 = deviceMemGiant + floats2_offset_bytes;
cuMemcpyHtoDAsync(
(CUdeviceptr)(((float *)deviceFloats1)),
hostFloats1,
N * sizeof(float),
stream
);
getValue<<<dim3(1,1,1), dim3(32,1,1), 0, stream>>>((float *)deviceFloats2, (float *)deviceFloats1);
// now copy back entire buffer
cuMemcpyDtoHAsync(hostFloats2, deviceFloats2, N * sizeof(float), stream);
cuStreamSynchronize(stream);
// and check the values...
cout << hostFloats2[0] << endl;
assert(hostFloats2[0] == 126.456f);
cuMemFreeHost(hostMemGiant);
cuMemFree(deviceMemGiant);
cuStreamDestroy(stream);
}
__global__ void getValueChar(char *outdata, char *indata) {
outdata[0] = indata[0] + 3;
}
void chars() {
int memSizeBytes = 65536;
int N = 128;
CUstream stream;
cuStreamCreate(&stream, 0);
char *hostMemGiant;
cuMemHostAlloc((void **)&hostMemGiant, memSizeBytes, CU_MEMHOSTALLOC_PORTABLE);
CUdeviceptr deviceMemGiant;
cuMemAlloc(&deviceMemGiant, memSizeBytes);
int chars1_offset_bytes = 128 * 4;
int chars2_offset_bytes = 256 * 4;
char *hostChars1 = (char *)(hostMemGiant + chars1_offset_bytes);
char *hostChars2 = (char *)(hostMemGiant + chars2_offset_bytes);
hostChars1[0] = 67;
CUdeviceptr deviceChars1 = deviceMemGiant + chars1_offset_bytes;
CUdeviceptr deviceChars2 = deviceMemGiant + chars2_offset_bytes;
cuMemcpyHtoDAsync(
(CUdeviceptr)(((float *)deviceChars1)),
hostChars1,
N * sizeof(char),
stream
);
getValueChar<<<dim3(1,1,1), dim3(32,1,1), 0, stream>>>((char *)deviceChars2, (char *)deviceChars1);
// now copy back entire buffer
cuMemcpyDtoHAsync(hostChars2, deviceChars2, N * sizeof(char), stream);
cuStreamSynchronize(stream);
// and check the values...
cout << hostChars2[0] << endl;
assert(hostChars2[0] == 70);
cuMemFreeHost(hostMemGiant);
cuMemFree(deviceMemGiant);
cuStreamDestroy(stream);
}
int main(int argc, char *argv[]) {
floats();
chars();
return 0;
}
|
b1fff1915f114a6927bf927b4470e444af60d22c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* Copyright (c) 2017 by Contributors
* \file bounding_box.cu
* \brief Bounding box util functions and operators
* \author Joshua Zhang
*/
#include <hipcub/hipcub.hpp>
#include "./bounding_box-inl.cuh"
#include "./bounding_box-inl.h"
#include "../elemwise_op_common.h"
namespace mxnet {
namespace op {
namespace {
using mshadow::Tensor;
using mshadow::Stream;
template <typename DType>
struct TempWorkspace {
size_t scores_temp_space;
DType* scores;
size_t scratch_space;
uint8_t* scratch;
size_t buffer_space;
DType* buffer;
size_t nms_scratch_space;
uint32_t* nms_scratch;
size_t indices_temp_spaces;
index_t* indices;
};
inline size_t ceil_div(size_t x, size_t y) {
return (x + y - 1) / y;
}
inline size_t align(size_t x, size_t alignment) {
return ceil_div(x, alignment) * alignment;
}
template <typename DType>
__global__ void FilterAndPrepareAuxDataKernel(const DType* data, DType* out, DType* scores,
index_t num_elements_per_batch,
const index_t element_width,
const index_t N,
const float threshold,
const int id_index, const int score_index,
const int background_id) {
index_t tid = blockIdx.x * blockDim.x + threadIdx.x;
bool first_in_element = (tid % element_width == 0);
index_t start_of_my_element = tid - (tid % element_width);
if (tid < N) {
DType my_score = data[start_of_my_element + score_index];
bool filtered_out = my_score <= threshold;
if (id_index != -1 && background_id != -1) {
DType my_id = data[start_of_my_element + id_index];
filtered_out = filtered_out || (my_id == background_id);
}
if (!filtered_out) {
out[tid] = data[tid];
} else {
out[tid] = -1;
my_score = -1;
}
if (first_in_element) {
index_t offset = tid / element_width;
scores[offset] = my_score;
}
}
}
template <typename DType>
void FilterAndPrepareAuxData(const Tensor<gpu, 3, DType>& data,
Tensor<gpu, 3, DType>* out,
const TempWorkspace<DType>& workspace,
const BoxNMSParam& param,
Stream<gpu>* s) {
const int n_threads = 512;
index_t N = data.shape_.Size();
const auto blocks = ceil_div(N, n_threads);
hipLaunchKernelGGL(( FilterAndPrepareAuxDataKernel), dim3(blocks),
dim3(n_threads),
0,
Stream<gpu>::GetStream(s),
data.dptr_, out->dptr_, workspace.scores,
data.shape_[1], data.shape_[2], N,
param.valid_thresh, param.id_index,
param.score_index, param.background_id);
}
template <bool check_topk, bool check_score, typename DType>
__global__ void CompactDataKernel(const index_t* indices, const DType* source,
DType* destination, const index_t topk,
const index_t element_width,
const index_t num_elements_per_batch,
const int score_index,
const index_t N) {
const index_t tid_start = blockIdx.x * blockDim.x + threadIdx.x;
for (index_t tid = tid_start; tid < N; tid += blockDim.x * gridDim.x) {
const index_t my_element = tid / element_width;
const index_t my_element_in_batch = my_element % num_elements_per_batch;
if (check_topk && my_element_in_batch >= topk) {
destination[tid] = -1;
} else {
DType ret;
const index_t source_element = indices[my_element];
DType score = 0;
if (check_score) {
score = source[source_element * element_width + score_index];
}
if (score >= 0) {
ret = source[source_element * element_width + tid % element_width];
} else {
ret = -1;
}
destination[tid] = ret;
}
}
}
template <bool check_score, typename DType>
void CompactData(const Tensor<gpu, 1, index_t>& indices,
const Tensor<gpu, 3, DType>& source,
Tensor<gpu, 3, DType>* destination,
const index_t topk,
const int score_index,
Stream<gpu>* s) {
const int n_threads = 512;
const size_t max_blocks = 320;
index_t N = source.shape_.Size();
const auto blocks = ::min(ceil_div(N, n_threads), max_blocks);
if (topk > 0) {
hipLaunchKernelGGL(( CompactDataKernel<true, check_score>), dim3(blocks), dim3(n_threads), 0,
Stream<gpu>::GetStream(s),
indices.dptr_, source.dptr_,
destination->dptr_, topk,
source.shape_[2], source.shape_[1],
score_index, N);
} else {
hipLaunchKernelGGL(( CompactDataKernel<false, check_score>), dim3(blocks), dim3(n_threads), 0,
Stream<gpu>::GetStream(s),
indices.dptr_, source.dptr_,
destination->dptr_, topk,
source.shape_[2], source.shape_[1],
score_index, N);
}
}
template <typename DType>
void WorkspaceForSort(const index_t num_elem,
const index_t topk,
const int alignment,
TempWorkspace<DType>* workspace) {
const size_t sort_scores_temp_space =
mxnet::op::SortByKeyWorkspaceSize<DType, index_t, gpu>(num_elem, false, false);
const size_t sort_topk_scores_temp_space =
mxnet::op::SortByKeyWorkspaceSize<DType, index_t, gpu>(topk, 1, false, false);
workspace->scratch_space = align(::max(sort_scores_temp_space, sort_topk_scores_temp_space),
alignment);
}
template <int encode, typename DType>
__global__ void CalculateGreedyNMSResultsKernel(const DType* data, uint32_t* result,
const index_t current_start,
const index_t num_elems,
const index_t num_batches,
const index_t num_blocks_per_row_batch,
const index_t num_blocks_per_row,
const index_t topk,
const index_t element_width,
const index_t num_elements_per_batch,
const int coord_index,
const int class_index,
const int score_index,
const float threshold);
template <typename DType>
__global__ void ReduceNMSResultTriangleKernel(uint32_t* nms_results,
DType * data,
const index_t score_index,
const index_t element_width,
const index_t num_batches,
const index_t num_elems,
const index_t start_index,
const index_t topk);
template <typename DType>
__global__ void ReduceNMSResultRestKernel(DType* data,
const uint32_t* nms_results,
const index_t score_index,
const index_t element_width,
const index_t num_batches,
const index_t num_elements_per_batch,
const index_t start_index,
const index_t topk,
const index_t num_blocks_per_batch);
template <typename DType>
struct NMS {
static constexpr int THRESHOLD = 512;
void operator()(Tensor<gpu, 3, DType>* data,
Tensor<gpu, 2, uint32_t>* scratch,
const index_t topk,
const BoxNMSParam& param,
Stream<gpu>* s) {
const int n_threads = 512;
const index_t num_batches = data->shape_[0];
const index_t num_elements_per_batch = data->shape_[1];
const index_t element_width = data->shape_[2];
for (index_t current_start = 0; current_start < topk; current_start += THRESHOLD) {
const index_t n_elems = topk - current_start;
const index_t num_blocks_per_row_batch = ceil_div(n_elems, n_threads);
const index_t num_blocks_per_row = num_blocks_per_row_batch * num_batches;
const index_t n_blocks = THRESHOLD / (sizeof(uint32_t) * 8) * num_blocks_per_row;
if (param.in_format == box_common_enum::kCorner) {
hipLaunchKernelGGL(( CalculateGreedyNMSResultsKernel<box_common_enum::kCorner>)
, dim3(n_blocks), dim3(n_threads), 0, Stream<gpu>::GetStream(s),
data->dptr_, scratch->dptr_, current_start, n_elems, num_batches,
num_blocks_per_row_batch, num_blocks_per_row, topk, element_width,
num_elements_per_batch, param.coord_start,
param.force_suppress ? -1 : param.id_index,
param.score_index, param.overlap_thresh);
} else {
hipLaunchKernelGGL(( CalculateGreedyNMSResultsKernel<box_common_enum::kCenter>)
, dim3(n_blocks), dim3(n_threads), 0, Stream<gpu>::GetStream(s),
data->dptr_, scratch->dptr_, current_start, n_elems, num_batches,
num_blocks_per_row_batch, num_blocks_per_row, topk, element_width,
num_elements_per_batch, param.coord_start,
param.force_suppress ? -1 : param.id_index,
param.score_index, param.overlap_thresh);
}
hipLaunchKernelGGL(( ReduceNMSResultTriangleKernel), dim3(num_batches), dim3(THRESHOLD), 0, Stream<gpu>::GetStream(s),
scratch->dptr_, data->dptr_, param.score_index,
element_width, num_batches, num_elements_per_batch,
current_start, topk);
const index_t n_rest_elems = n_elems - THRESHOLD;
const index_t num_rest_blocks_per_batch = ceil_div(n_rest_elems, n_threads);
const index_t num_rest_blocks = num_rest_blocks_per_batch * num_batches;
if (n_rest_elems > 0) {
hipLaunchKernelGGL(( ReduceNMSResultRestKernel), dim3(num_rest_blocks), dim3(n_threads), 0, Stream<gpu>::GetStream(s),
data->dptr_, scratch->dptr_, param.score_index, element_width,
num_batches, num_elements_per_batch, current_start, topk,
num_rest_blocks_per_batch);
}
}
}
};
template <int encode, typename DType>
__device__ __forceinline__ DType calculate_area(const DType b0, const DType b1,
const DType b2, const DType b3) {
DType width = b2;
DType height = b3;
if (encode == box_common_enum::kCorner) {
width -= b0;
height -= b1;
}
if (width < 0 || height < 0) return 0;
return width * height;
}
template <int encode, typename DType>
__device__ __forceinline__ DType calculate_intersection(const DType a0, const DType a1,
const DType a2, const DType a3,
const DType b0, const DType b1,
const DType b2, const DType b3) {
DType wx, wy;
if (encode == box_common_enum::kCorner) {
const DType left = a0 > b0 ? a0 : b0;
const DType bottom = a1 > b1 ? a1 : b1;
const DType right = a2 < b2 ? a2 : b2;
const DType top = a3 < b3 ? a3 : b3;
wx = right - left;
wy = top - bottom;
} else {
const DType al = 2 * a0 - a2;
const DType ar = 2 * a0 + a2;
const DType bl = 2 * b0 - b2;
const DType br = 2 * b0 + b2;
const DType left = bl > al ? bl : al;
const DType right = br < ar ? br : ar;
wx = right - left;
const DType ab = 2 * a1 - a3;
const DType at = 2 * a1 + a3;
const DType bb = 2 * b1 - b3;
const DType bt = 2 * b1 + b3;
const DType bottom = bb > ab ? bb : ab;
const DType top = bt < at ? bt : at;
wy = top - bottom;
wy = wy / 4; // To compensate for both wx and wy being 2x too large
}
if (wx <= 0 || wy <= 0) {
return 0;
} else {
return (wx * wy);
}
}
template <int encode, typename DType>
__launch_bounds__(512)
__global__ void CalculateGreedyNMSResultsKernel(const DType* data, uint32_t* result,
const index_t current_start,
const index_t num_elems,
const index_t num_batches,
const index_t num_blocks_per_row_batch,
const index_t num_blocks_per_row,
const index_t topk,
const index_t element_width,
const index_t num_elements_per_batch,
const int coord_index,
const int class_index,
const int score_index,
const float threshold) {
constexpr int max_elem_width = 20;
constexpr int num_other_boxes = sizeof(uint32_t) * 8;
__shared__ DType other_boxes[max_elem_width * num_other_boxes];
__shared__ DType other_boxes_areas[num_other_boxes];
const index_t my_row = blockIdx.x / num_blocks_per_row;
const index_t my_block_offset_in_row = blockIdx.x % num_blocks_per_row;
const index_t my_block_offset_in_batch = my_block_offset_in_row % num_blocks_per_row_batch;
const index_t my_batch = (my_block_offset_in_row) / num_blocks_per_row_batch;
const index_t my_element_in_batch = my_block_offset_in_batch * blockDim.x +
current_start + threadIdx.x;
// Load other boxes
const index_t offset = (my_batch * num_elements_per_batch +
current_start + my_row * num_other_boxes) *
element_width;
for (int i = threadIdx.x; i < element_width * num_other_boxes; i += blockDim.x) {
other_boxes[i] = data[offset + i];
}
__syncthreads();
if (threadIdx.x < num_other_boxes) {
const int other_boxes_offset = element_width * threadIdx.x;
const DType their_area = calculate_area<encode>(
other_boxes[other_boxes_offset + coord_index + 0],
other_boxes[other_boxes_offset + coord_index + 1],
other_boxes[other_boxes_offset + coord_index + 2],
other_boxes[other_boxes_offset + coord_index + 3]);
other_boxes_areas[threadIdx.x] = their_area;
}
__syncthreads();
if (my_element_in_batch >= topk) return;
DType my_box[4];
DType my_class = -1;
DType my_score = -1;
const index_t my_offset = (my_batch * num_elements_per_batch + my_element_in_batch) *
element_width;
my_score = data[my_offset + score_index];
#pragma unroll
for (int i = 0; i < 4; ++i) {
my_box[i] = data[my_offset + coord_index + i];
}
if (class_index != -1) {
my_class = data[my_offset + class_index];
}
DType my_area = calculate_area<encode>(my_box[0], my_box[1], my_box[2], my_box[3]);
uint32_t ret = 0;
if (my_score != -1) {
#pragma unroll
for (int i = 0; i < num_other_boxes; ++i) {
const int other_boxes_offset = element_width * i;
if ((class_index == -1 || my_class == other_boxes[other_boxes_offset + class_index]) &&
other_boxes[other_boxes_offset + score_index] != -1) {
const DType their_area = other_boxes_areas[i];
const DType intersect = calculate_intersection<encode>(
my_box[0], my_box[1], my_box[2], my_box[3],
other_boxes[other_boxes_offset + coord_index + 0],
other_boxes[other_boxes_offset + coord_index + 1],
other_boxes[other_boxes_offset + coord_index + 2],
other_boxes[other_boxes_offset + coord_index + 3]);
if (intersect > threshold * (my_area + their_area - intersect)) {
ret = ret | (1u << i);
}
}
}
}
result[(my_row * num_batches + my_batch) * topk + my_element_in_batch] = ~ret;
}
template <typename DType>
__launch_bounds__(NMS<DType>::THRESHOLD)
__global__ void ReduceNMSResultTriangleKernel(uint32_t* nms_results,
DType * data,
const index_t score_index,
const index_t element_width,
const index_t num_batches,
const index_t num_elements_per_batch,
const index_t start_index,
const index_t topk) {
constexpr int n_threads = NMS<DType>::THRESHOLD;
constexpr int warp_size = 32;
const index_t my_batch = blockIdx.x;
const index_t my_element_in_batch = threadIdx.x + start_index;
const index_t my_element = my_batch * topk + my_element_in_batch;
const int my_warp = threadIdx.x / warp_size;
const int my_lane = threadIdx.x % warp_size;
__shared__ uint32_t current_valid_boxes[n_threads / warp_size];
const uint32_t full_mask = 0xFFFFFFFF;
const uint32_t my_lane_mask = 1 << my_lane;
const uint32_t earlier_threads_mask = (1 << (my_lane + 1)) - 1;
uint32_t valid = my_lane_mask;
uint32_t valid_boxes = full_mask;
uint32_t my_next_mask = my_element_in_batch < topk ?
nms_results[my_element]:
full_mask;
#pragma unroll
for (int i = 0; i < n_threads / warp_size; ++i) {
uint32_t my_mask = my_next_mask;
my_next_mask = (((i + 1) < n_threads / warp_size) &&
(my_element_in_batch < topk)) ?
nms_results[(i + 1) * topk * num_batches + my_element]:
full_mask;
if (my_warp == i && !__all_sync(full_mask, my_mask == full_mask)) {
my_mask = my_mask | earlier_threads_mask;
// Loop over warp_size - 1 because the last
// thread does not contribute to the mask anyway
#pragma unroll
for (int j = 0; j < warp_size - 1; ++j) {
const uint32_t mask = __shfl_sync(full_mask, valid ? my_mask : full_mask, j);
valid = valid & mask;
}
valid_boxes = __ballot_sync(full_mask, valid);
}
if (my_lane == 0 && my_warp == i) {
current_valid_boxes[i] = valid_boxes;
}
__syncthreads();
if ((my_warp > i) && (((~my_mask) & current_valid_boxes[i]) != 0)) {
valid = 0;
}
}
if (my_lane == 0) {
nms_results[my_element] = valid_boxes;
}
if (valid == 0) {
data[(my_batch * num_elements_per_batch + my_element_in_batch) * element_width +
score_index] = -1;
}
}
template <typename DType>
__launch_bounds__(512)
__global__ void ReduceNMSResultRestKernel(DType* data,
const uint32_t* nms_results,
const index_t score_index,
const index_t element_width,
const index_t num_batches,
const index_t num_elements_per_batch,
const index_t start_index,
const index_t topk,
const index_t num_blocks_per_batch) {
constexpr int num_other_boxes = sizeof(uint32_t) * 8;
constexpr int num_iterations = NMS<DType>::THRESHOLD / num_other_boxes;
constexpr int warp_size = 32;
const index_t my_block_offset_in_batch = blockIdx.x % num_blocks_per_batch;
const index_t my_batch = blockIdx.x / num_blocks_per_batch;
const index_t my_element_in_batch = my_block_offset_in_batch * blockDim.x +
start_index + NMS<DType>::THRESHOLD + threadIdx.x;
const index_t my_element = my_batch * topk + my_element_in_batch;
if (my_element_in_batch >= topk) return;
bool valid = true;
#pragma unroll
for (int i = 0; i < num_iterations; ++i) {
const uint32_t my_mask = nms_results[i * topk * num_batches + my_element];
const uint32_t valid_boxes = nms_results[my_batch * topk + i * warp_size + start_index];
const bool no_hit = (valid_boxes & (~my_mask)) == 0;
valid = valid && no_hit;
}
if (!valid) {
data[(my_batch * num_elements_per_batch + my_element_in_batch) * element_width +
score_index] = -1;
}
}
template <typename DType>
TempWorkspace<DType> GetWorkspace(const index_t num_batch,
const index_t num_elem,
const int width_elem,
const index_t topk,
const OpContext& ctx) {
TempWorkspace<DType> workspace;
Stream<gpu> *s = ctx.get_stream<gpu>();
const int alignment = 128;
// Get the workspace size
workspace.scores_temp_space = 2 * align(num_batch * num_elem * sizeof(DType), alignment);
workspace.indices_temp_spaces = 2 * align(num_batch * num_elem * sizeof(index_t), alignment);
WorkspaceForSort(num_elem, topk, alignment, &workspace);
// Place for a buffer
workspace.buffer_space = align(num_batch * num_elem * width_elem * sizeof(DType), alignment);
workspace.nms_scratch_space = align(NMS<DType>::THRESHOLD / (sizeof(uint32_t) * 8) *
num_batch * topk * sizeof(uint32_t), alignment);
const size_t workspace_size = workspace.scores_temp_space +
workspace.scratch_space +
workspace.buffer_space +
workspace.nms_scratch_space +
workspace.indices_temp_spaces;
// Obtain the memory for workspace
Tensor<gpu, 1, double> scratch_memory = ctx.requested[box_nms_enum::kTempSpace]
.get_space_typed<gpu, 1, double>(mshadow::Shape1(ceil_div(workspace_size, sizeof(double))), s);
// Populate workspace pointers
workspace.scores = reinterpret_cast<DType*>(scratch_memory.dptr_);
workspace.scratch = reinterpret_cast<uint8_t*>(workspace.scores) +
workspace.scores_temp_space;
workspace.buffer = reinterpret_cast<DType*>(workspace.scratch +
workspace.scratch_space);
workspace.nms_scratch = reinterpret_cast<uint32_t*>(
reinterpret_cast<uint8_t*>(workspace.buffer) +
workspace.buffer_space);
workspace.indices = reinterpret_cast<index_t*>(
reinterpret_cast<uint8_t*>(workspace.nms_scratch) +
workspace.nms_scratch_space);
return workspace;
}
template <typename DType>
__global__ void ExtractScoresKernel(const DType* data, DType* scores,
const index_t N, const int element_width,
const int score_index) {
const index_t tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < N) {
scores[tid] = data[tid * element_width + score_index];
}
}
template <typename DType>
void CompactNMSResults(const Tensor<gpu, 3, DType>& data,
Tensor<gpu, 3, DType>* out,
Tensor<gpu, 1, index_t>* indices,
Tensor<gpu, 1, DType>* scores,
Tensor<gpu, 1, index_t>* sorted_indices,
Tensor<gpu, 1, DType>* sorted_scores,
Tensor<gpu, 1, char>* scratch,
const int score_index,
const index_t topk,
Stream<gpu>* s) {
using mshadow::Shape1;
constexpr int n_threads = 512;
const index_t num_elements = scores->shape_.Size();
const index_t num_elements_per_batch = data.shape_[1];
const index_t num_batches = data.shape_[0];
const int element_width = data.shape_[2];
const index_t n_blocks = ceil_div(num_elements, n_threads);
hipLaunchKernelGGL(( ExtractScoresKernel), dim3(n_blocks), dim3(n_threads), 0, Stream<gpu>::GetStream(s),
data.dptr_, scores->dptr_, num_elements, element_width, score_index);
*indices = mshadow::expr::range<index_t>(0, num_elements);
for (index_t i = 0; i < num_batches; ++i) {
// Sort each batch separately
Tensor<gpu, 1, DType> scores_batch(scores->dptr_ + i * num_elements_per_batch,
Shape1(topk),
s);
Tensor<gpu, 1, index_t> indices_batch(indices->dptr_ + i * num_elements_per_batch,
Shape1(topk),
s);
Tensor<gpu, 1, DType> sorted_scores_batch(sorted_scores->dptr_ + i * num_elements_per_batch,
Shape1(topk),
s);
Tensor<gpu, 1, index_t> sorted_indices_batch(sorted_indices->dptr_ + i * num_elements_per_batch,
Shape1(topk),
s);
mxnet::op::SortByKey(scores_batch, indices_batch, false, scratch,
0, 8 * sizeof(DType), 1, &sorted_scores_batch,
&sorted_indices_batch);
}
CompactData<true>(*sorted_indices, data, out, topk, score_index, s);
}
} // namespace
void BoxNMSForwardGPU_notemp(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
using mshadow::Shape1;
using mshadow::Shape2;
using mshadow::Shape3;
CHECK_NE(req[0], kAddTo) << "BoxNMS does not support kAddTo";
CHECK_NE(req[0], kWriteInplace) << "BoxNMS does not support in place computation";
CHECK_EQ(inputs.size(), 1U);
CHECK_EQ(outputs.size(), 2U) << "BoxNMS output: [output, temp]";
const BoxNMSParam& param = nnvm::get<BoxNMSParam>(attrs.parsed);
Stream<gpu> *s = ctx.get_stream<gpu>();
mxnet::TShape in_shape = inputs[box_nms_enum::kData].shape_;
int indim = in_shape.ndim();
int num_batch = indim <= 2? 1 : in_shape.ProdShape(0, indim - 2);
int num_elem = in_shape[indim - 2];
int width_elem = in_shape[indim - 1];
MSHADOW_REAL_TYPE_SWITCH(outputs[0].type_flag_, DType, {
Tensor<gpu, 3, DType> data = inputs[box_nms_enum::kData]
.get_with_shape<gpu, 3, DType>(Shape3(num_batch, num_elem, width_elem), s);
Tensor<gpu, 3, DType> out = outputs[box_nms_enum::kOut]
.get_with_shape<gpu, 3, DType>(Shape3(num_batch, num_elem, width_elem), s);
// Special case for topk == 0
if (param.topk == 0) {
if (req[0] != kNullOp &&
req[0] != kWriteInplace) {
out = mshadow::expr::F<mshadow_op::identity>(data);
}
return;
}
index_t topk = param.topk > 0 ? ::min(param.topk, num_elem) : num_elem;
const auto& workspace = GetWorkspace<DType>(num_batch, num_elem,
width_elem, topk, ctx);
FilterAndPrepareAuxData(data, &out, workspace, param, s);
Tensor<gpu, 1, DType> scores(workspace.scores, Shape1(num_batch * num_elem), s);
Tensor<gpu, 1, DType> sorted_scores(workspace.scores + scores.MSize(),
Shape1(num_batch * num_elem), s);
Tensor<gpu, 1, index_t> indices(workspace.indices, Shape1(num_batch * num_elem), s);
Tensor<gpu, 1, index_t> sorted_indices(workspace.indices + indices.MSize(),
Shape1(num_batch * num_elem), s);
Tensor<gpu, 1, char> scratch(reinterpret_cast<char*>(workspace.scratch),
Shape1(workspace.scratch_space), s);
Tensor<gpu, 3, DType> buffer(workspace.buffer,
Shape3(num_batch, num_elem, width_elem), s);
Tensor<gpu, 2, uint32_t> nms_scratch(workspace.nms_scratch,
Shape2(NMS<DType>::THRESHOLD / (sizeof(uint32_t) * 8),
topk * num_batch),
s);
indices = mshadow::expr::range<index_t>(0, num_batch * num_elem);
for (index_t i = 0; i < num_batch; ++i) {
// Sort each batch separately
Tensor<gpu, 1, DType> scores_batch(scores.dptr_ + i * num_elem,
Shape1(num_elem),
s);
Tensor<gpu, 1, index_t> indices_batch(indices.dptr_ + i * num_elem,
Shape1(num_elem),
s);
Tensor<gpu, 1, DType> sorted_scores_batch(sorted_scores.dptr_ + i * num_elem,
Shape1(num_elem),
s);
Tensor<gpu, 1, index_t> sorted_indices_batch(sorted_indices.dptr_ + i * num_elem,
Shape1(num_elem),
s);
mxnet::op::SortByKey(scores_batch, indices_batch, false, &scratch, 0,
8 * sizeof(DType), 1, &sorted_scores_batch,
&sorted_indices_batch);
}
CompactData<false>(sorted_indices, out, &buffer, topk, -1, s);
NMS<DType> nms;
nms(&buffer, &nms_scratch, topk, param, s);
CompactNMSResults(buffer, &out, &indices, &scores, &sorted_indices,
&sorted_scores, &scratch, param.score_index, topk, s);
// convert encoding
if (param.in_format != param.out_format) {
if (box_common_enum::kCenter == param.out_format) {
mxnet::op::mxnet_op::Kernel<corner_to_center, gpu>::Launch(s, num_batch * num_elem,
out.dptr_ + param.coord_start, width_elem);
} else {
mxnet::op::mxnet_op::Kernel<center_to_corner, gpu>::Launch(s, num_batch * num_elem,
out.dptr_ + param.coord_start, width_elem);
}
}
});
}
void BoxNMSForwardGPU(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
using namespace mshadow;
using namespace mshadow::expr;
using namespace mxnet_op;
CHECK_EQ(inputs.size(), 1U);
CHECK_EQ(outputs.size(), 2U) << "BoxNMS output: [output, temp]";
if (req[1] == kNullOp) {
BoxNMSForwardGPU_notemp(attrs, ctx, inputs, req, outputs);
return;
}
BoxNMSForward<gpu>(attrs, ctx, inputs, req, outputs);
}
NNVM_REGISTER_OP(_contrib_box_nms)
.set_attr<FCompute>("FCompute<gpu>", BoxNMSForwardGPU);
NNVM_REGISTER_OP(_backward_contrib_box_nms)
.set_attr<FCompute>("FCompute<gpu>", BoxNMSBackward<gpu>);
NNVM_REGISTER_OP(_contrib_box_iou)
.set_attr<FCompute>("FCompute<gpu>", BoxOverlapForward<gpu>);
NNVM_REGISTER_OP(_backward_contrib_box_iou)
.set_attr<FCompute>("FCompute<gpu>", BoxOverlapBackward<gpu>);
NNVM_REGISTER_OP(_contrib_bipartite_matching)
.set_attr<FCompute>("FCompute<gpu>", BipartiteMatchingForward<gpu>);
NNVM_REGISTER_OP(_backward_contrib_bipartite_matching)
.set_attr<FCompute>("FCompute<gpu>", BipartiteMatchingBackward<gpu>);
NNVM_REGISTER_OP(_contrib_box_encode)
.set_attr<FCompute>("FCompute<gpu>", BoxEncodeForward<gpu>);
NNVM_REGISTER_OP(_contrib_box_decode)
.set_attr<FCompute>("FCompute<gpu>", BoxDecodeForward<gpu>);
} // namespace op
} // namespace mxnet
| b1fff1915f114a6927bf927b4470e444af60d22c.cu | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* Copyright (c) 2017 by Contributors
* \file bounding_box.cu
* \brief Bounding box util functions and operators
* \author Joshua Zhang
*/
#include <cub/cub.cuh>
#include "./bounding_box-inl.cuh"
#include "./bounding_box-inl.h"
#include "../elemwise_op_common.h"
namespace mxnet {
namespace op {
namespace {
using mshadow::Tensor;
using mshadow::Stream;
template <typename DType>
struct TempWorkspace {
size_t scores_temp_space;
DType* scores;
size_t scratch_space;
uint8_t* scratch;
size_t buffer_space;
DType* buffer;
size_t nms_scratch_space;
uint32_t* nms_scratch;
size_t indices_temp_spaces;
index_t* indices;
};
inline size_t ceil_div(size_t x, size_t y) {
return (x + y - 1) / y;
}
inline size_t align(size_t x, size_t alignment) {
return ceil_div(x, alignment) * alignment;
}
template <typename DType>
__global__ void FilterAndPrepareAuxDataKernel(const DType* data, DType* out, DType* scores,
index_t num_elements_per_batch,
const index_t element_width,
const index_t N,
const float threshold,
const int id_index, const int score_index,
const int background_id) {
index_t tid = blockIdx.x * blockDim.x + threadIdx.x;
bool first_in_element = (tid % element_width == 0);
index_t start_of_my_element = tid - (tid % element_width);
if (tid < N) {
DType my_score = data[start_of_my_element + score_index];
bool filtered_out = my_score <= threshold;
if (id_index != -1 && background_id != -1) {
DType my_id = data[start_of_my_element + id_index];
filtered_out = filtered_out || (my_id == background_id);
}
if (!filtered_out) {
out[tid] = data[tid];
} else {
out[tid] = -1;
my_score = -1;
}
if (first_in_element) {
index_t offset = tid / element_width;
scores[offset] = my_score;
}
}
}
template <typename DType>
void FilterAndPrepareAuxData(const Tensor<gpu, 3, DType>& data,
Tensor<gpu, 3, DType>* out,
const TempWorkspace<DType>& workspace,
const BoxNMSParam& param,
Stream<gpu>* s) {
const int n_threads = 512;
index_t N = data.shape_.Size();
const auto blocks = ceil_div(N, n_threads);
FilterAndPrepareAuxDataKernel<<<blocks,
n_threads,
0,
Stream<gpu>::GetStream(s)>>>(
data.dptr_, out->dptr_, workspace.scores,
data.shape_[1], data.shape_[2], N,
param.valid_thresh, param.id_index,
param.score_index, param.background_id);
}
template <bool check_topk, bool check_score, typename DType>
__global__ void CompactDataKernel(const index_t* indices, const DType* source,
DType* destination, const index_t topk,
const index_t element_width,
const index_t num_elements_per_batch,
const int score_index,
const index_t N) {
const index_t tid_start = blockIdx.x * blockDim.x + threadIdx.x;
for (index_t tid = tid_start; tid < N; tid += blockDim.x * gridDim.x) {
const index_t my_element = tid / element_width;
const index_t my_element_in_batch = my_element % num_elements_per_batch;
if (check_topk && my_element_in_batch >= topk) {
destination[tid] = -1;
} else {
DType ret;
const index_t source_element = indices[my_element];
DType score = 0;
if (check_score) {
score = source[source_element * element_width + score_index];
}
if (score >= 0) {
ret = source[source_element * element_width + tid % element_width];
} else {
ret = -1;
}
destination[tid] = ret;
}
}
}
template <bool check_score, typename DType>
void CompactData(const Tensor<gpu, 1, index_t>& indices,
const Tensor<gpu, 3, DType>& source,
Tensor<gpu, 3, DType>* destination,
const index_t topk,
const int score_index,
Stream<gpu>* s) {
const int n_threads = 512;
const size_t max_blocks = 320;
index_t N = source.shape_.Size();
const auto blocks = std::min(ceil_div(N, n_threads), max_blocks);
if (topk > 0) {
CompactDataKernel<true, check_score><<<blocks, n_threads, 0,
Stream<gpu>::GetStream(s)>>>(
indices.dptr_, source.dptr_,
destination->dptr_, topk,
source.shape_[2], source.shape_[1],
score_index, N);
} else {
CompactDataKernel<false, check_score><<<blocks, n_threads, 0,
Stream<gpu>::GetStream(s)>>>(
indices.dptr_, source.dptr_,
destination->dptr_, topk,
source.shape_[2], source.shape_[1],
score_index, N);
}
}
template <typename DType>
void WorkspaceForSort(const index_t num_elem,
const index_t topk,
const int alignment,
TempWorkspace<DType>* workspace) {
const size_t sort_scores_temp_space =
mxnet::op::SortByKeyWorkspaceSize<DType, index_t, gpu>(num_elem, false, false);
const size_t sort_topk_scores_temp_space =
mxnet::op::SortByKeyWorkspaceSize<DType, index_t, gpu>(topk, 1, false, false);
workspace->scratch_space = align(std::max(sort_scores_temp_space, sort_topk_scores_temp_space),
alignment);
}
template <int encode, typename DType>
__global__ void CalculateGreedyNMSResultsKernel(const DType* data, uint32_t* result,
const index_t current_start,
const index_t num_elems,
const index_t num_batches,
const index_t num_blocks_per_row_batch,
const index_t num_blocks_per_row,
const index_t topk,
const index_t element_width,
const index_t num_elements_per_batch,
const int coord_index,
const int class_index,
const int score_index,
const float threshold);
template <typename DType>
__global__ void ReduceNMSResultTriangleKernel(uint32_t* nms_results,
DType * data,
const index_t score_index,
const index_t element_width,
const index_t num_batches,
const index_t num_elems,
const index_t start_index,
const index_t topk);
template <typename DType>
__global__ void ReduceNMSResultRestKernel(DType* data,
const uint32_t* nms_results,
const index_t score_index,
const index_t element_width,
const index_t num_batches,
const index_t num_elements_per_batch,
const index_t start_index,
const index_t topk,
const index_t num_blocks_per_batch);
template <typename DType>
struct NMS {
static constexpr int THRESHOLD = 512;
void operator()(Tensor<gpu, 3, DType>* data,
Tensor<gpu, 2, uint32_t>* scratch,
const index_t topk,
const BoxNMSParam& param,
Stream<gpu>* s) {
const int n_threads = 512;
const index_t num_batches = data->shape_[0];
const index_t num_elements_per_batch = data->shape_[1];
const index_t element_width = data->shape_[2];
for (index_t current_start = 0; current_start < topk; current_start += THRESHOLD) {
const index_t n_elems = topk - current_start;
const index_t num_blocks_per_row_batch = ceil_div(n_elems, n_threads);
const index_t num_blocks_per_row = num_blocks_per_row_batch * num_batches;
const index_t n_blocks = THRESHOLD / (sizeof(uint32_t) * 8) * num_blocks_per_row;
if (param.in_format == box_common_enum::kCorner) {
CalculateGreedyNMSResultsKernel<box_common_enum::kCorner>
<<<n_blocks, n_threads, 0, Stream<gpu>::GetStream(s)>>>(
data->dptr_, scratch->dptr_, current_start, n_elems, num_batches,
num_blocks_per_row_batch, num_blocks_per_row, topk, element_width,
num_elements_per_batch, param.coord_start,
param.force_suppress ? -1 : param.id_index,
param.score_index, param.overlap_thresh);
} else {
CalculateGreedyNMSResultsKernel<box_common_enum::kCenter>
<<<n_blocks, n_threads, 0, Stream<gpu>::GetStream(s)>>>(
data->dptr_, scratch->dptr_, current_start, n_elems, num_batches,
num_blocks_per_row_batch, num_blocks_per_row, topk, element_width,
num_elements_per_batch, param.coord_start,
param.force_suppress ? -1 : param.id_index,
param.score_index, param.overlap_thresh);
}
ReduceNMSResultTriangleKernel<<<num_batches, THRESHOLD, 0, Stream<gpu>::GetStream(s)>>>(
scratch->dptr_, data->dptr_, param.score_index,
element_width, num_batches, num_elements_per_batch,
current_start, topk);
const index_t n_rest_elems = n_elems - THRESHOLD;
const index_t num_rest_blocks_per_batch = ceil_div(n_rest_elems, n_threads);
const index_t num_rest_blocks = num_rest_blocks_per_batch * num_batches;
if (n_rest_elems > 0) {
ReduceNMSResultRestKernel<<<num_rest_blocks, n_threads, 0, Stream<gpu>::GetStream(s)>>>(
data->dptr_, scratch->dptr_, param.score_index, element_width,
num_batches, num_elements_per_batch, current_start, topk,
num_rest_blocks_per_batch);
}
}
}
};
template <int encode, typename DType>
__device__ __forceinline__ DType calculate_area(const DType b0, const DType b1,
const DType b2, const DType b3) {
DType width = b2;
DType height = b3;
if (encode == box_common_enum::kCorner) {
width -= b0;
height -= b1;
}
if (width < 0 || height < 0) return 0;
return width * height;
}
template <int encode, typename DType>
__device__ __forceinline__ DType calculate_intersection(const DType a0, const DType a1,
const DType a2, const DType a3,
const DType b0, const DType b1,
const DType b2, const DType b3) {
DType wx, wy;
if (encode == box_common_enum::kCorner) {
const DType left = a0 > b0 ? a0 : b0;
const DType bottom = a1 > b1 ? a1 : b1;
const DType right = a2 < b2 ? a2 : b2;
const DType top = a3 < b3 ? a3 : b3;
wx = right - left;
wy = top - bottom;
} else {
const DType al = 2 * a0 - a2;
const DType ar = 2 * a0 + a2;
const DType bl = 2 * b0 - b2;
const DType br = 2 * b0 + b2;
const DType left = bl > al ? bl : al;
const DType right = br < ar ? br : ar;
wx = right - left;
const DType ab = 2 * a1 - a3;
const DType at = 2 * a1 + a3;
const DType bb = 2 * b1 - b3;
const DType bt = 2 * b1 + b3;
const DType bottom = bb > ab ? bb : ab;
const DType top = bt < at ? bt : at;
wy = top - bottom;
wy = wy / 4; // To compensate for both wx and wy being 2x too large
}
if (wx <= 0 || wy <= 0) {
return 0;
} else {
return (wx * wy);
}
}
template <int encode, typename DType>
__launch_bounds__(512)
__global__ void CalculateGreedyNMSResultsKernel(const DType* data, uint32_t* result,
const index_t current_start,
const index_t num_elems,
const index_t num_batches,
const index_t num_blocks_per_row_batch,
const index_t num_blocks_per_row,
const index_t topk,
const index_t element_width,
const index_t num_elements_per_batch,
const int coord_index,
const int class_index,
const int score_index,
const float threshold) {
constexpr int max_elem_width = 20;
constexpr int num_other_boxes = sizeof(uint32_t) * 8;
__shared__ DType other_boxes[max_elem_width * num_other_boxes];
__shared__ DType other_boxes_areas[num_other_boxes];
const index_t my_row = blockIdx.x / num_blocks_per_row;
const index_t my_block_offset_in_row = blockIdx.x % num_blocks_per_row;
const index_t my_block_offset_in_batch = my_block_offset_in_row % num_blocks_per_row_batch;
const index_t my_batch = (my_block_offset_in_row) / num_blocks_per_row_batch;
const index_t my_element_in_batch = my_block_offset_in_batch * blockDim.x +
current_start + threadIdx.x;
// Load other boxes
const index_t offset = (my_batch * num_elements_per_batch +
current_start + my_row * num_other_boxes) *
element_width;
for (int i = threadIdx.x; i < element_width * num_other_boxes; i += blockDim.x) {
other_boxes[i] = data[offset + i];
}
__syncthreads();
if (threadIdx.x < num_other_boxes) {
const int other_boxes_offset = element_width * threadIdx.x;
const DType their_area = calculate_area<encode>(
other_boxes[other_boxes_offset + coord_index + 0],
other_boxes[other_boxes_offset + coord_index + 1],
other_boxes[other_boxes_offset + coord_index + 2],
other_boxes[other_boxes_offset + coord_index + 3]);
other_boxes_areas[threadIdx.x] = their_area;
}
__syncthreads();
if (my_element_in_batch >= topk) return;
DType my_box[4];
DType my_class = -1;
DType my_score = -1;
const index_t my_offset = (my_batch * num_elements_per_batch + my_element_in_batch) *
element_width;
my_score = data[my_offset + score_index];
#pragma unroll
for (int i = 0; i < 4; ++i) {
my_box[i] = data[my_offset + coord_index + i];
}
if (class_index != -1) {
my_class = data[my_offset + class_index];
}
DType my_area = calculate_area<encode>(my_box[0], my_box[1], my_box[2], my_box[3]);
uint32_t ret = 0;
if (my_score != -1) {
#pragma unroll
for (int i = 0; i < num_other_boxes; ++i) {
const int other_boxes_offset = element_width * i;
if ((class_index == -1 || my_class == other_boxes[other_boxes_offset + class_index]) &&
other_boxes[other_boxes_offset + score_index] != -1) {
const DType their_area = other_boxes_areas[i];
const DType intersect = calculate_intersection<encode>(
my_box[0], my_box[1], my_box[2], my_box[3],
other_boxes[other_boxes_offset + coord_index + 0],
other_boxes[other_boxes_offset + coord_index + 1],
other_boxes[other_boxes_offset + coord_index + 2],
other_boxes[other_boxes_offset + coord_index + 3]);
if (intersect > threshold * (my_area + their_area - intersect)) {
ret = ret | (1u << i);
}
}
}
}
result[(my_row * num_batches + my_batch) * topk + my_element_in_batch] = ~ret;
}
template <typename DType>
__launch_bounds__(NMS<DType>::THRESHOLD)
__global__ void ReduceNMSResultTriangleKernel(uint32_t* nms_results,
DType * data,
const index_t score_index,
const index_t element_width,
const index_t num_batches,
const index_t num_elements_per_batch,
const index_t start_index,
const index_t topk) {
constexpr int n_threads = NMS<DType>::THRESHOLD;
constexpr int warp_size = 32;
const index_t my_batch = blockIdx.x;
const index_t my_element_in_batch = threadIdx.x + start_index;
const index_t my_element = my_batch * topk + my_element_in_batch;
const int my_warp = threadIdx.x / warp_size;
const int my_lane = threadIdx.x % warp_size;
__shared__ uint32_t current_valid_boxes[n_threads / warp_size];
const uint32_t full_mask = 0xFFFFFFFF;
const uint32_t my_lane_mask = 1 << my_lane;
const uint32_t earlier_threads_mask = (1 << (my_lane + 1)) - 1;
uint32_t valid = my_lane_mask;
uint32_t valid_boxes = full_mask;
uint32_t my_next_mask = my_element_in_batch < topk ?
nms_results[my_element]:
full_mask;
#pragma unroll
for (int i = 0; i < n_threads / warp_size; ++i) {
uint32_t my_mask = my_next_mask;
my_next_mask = (((i + 1) < n_threads / warp_size) &&
(my_element_in_batch < topk)) ?
nms_results[(i + 1) * topk * num_batches + my_element]:
full_mask;
if (my_warp == i && !__all_sync(full_mask, my_mask == full_mask)) {
my_mask = my_mask | earlier_threads_mask;
// Loop over warp_size - 1 because the last
// thread does not contribute to the mask anyway
#pragma unroll
for (int j = 0; j < warp_size - 1; ++j) {
const uint32_t mask = __shfl_sync(full_mask, valid ? my_mask : full_mask, j);
valid = valid & mask;
}
valid_boxes = __ballot_sync(full_mask, valid);
}
if (my_lane == 0 && my_warp == i) {
current_valid_boxes[i] = valid_boxes;
}
__syncthreads();
if ((my_warp > i) && (((~my_mask) & current_valid_boxes[i]) != 0)) {
valid = 0;
}
}
if (my_lane == 0) {
nms_results[my_element] = valid_boxes;
}
if (valid == 0) {
data[(my_batch * num_elements_per_batch + my_element_in_batch) * element_width +
score_index] = -1;
}
}
template <typename DType>
__launch_bounds__(512)
__global__ void ReduceNMSResultRestKernel(DType* data,
const uint32_t* nms_results,
const index_t score_index,
const index_t element_width,
const index_t num_batches,
const index_t num_elements_per_batch,
const index_t start_index,
const index_t topk,
const index_t num_blocks_per_batch) {
constexpr int num_other_boxes = sizeof(uint32_t) * 8;
constexpr int num_iterations = NMS<DType>::THRESHOLD / num_other_boxes;
constexpr int warp_size = 32;
const index_t my_block_offset_in_batch = blockIdx.x % num_blocks_per_batch;
const index_t my_batch = blockIdx.x / num_blocks_per_batch;
const index_t my_element_in_batch = my_block_offset_in_batch * blockDim.x +
start_index + NMS<DType>::THRESHOLD + threadIdx.x;
const index_t my_element = my_batch * topk + my_element_in_batch;
if (my_element_in_batch >= topk) return;
bool valid = true;
#pragma unroll
for (int i = 0; i < num_iterations; ++i) {
const uint32_t my_mask = nms_results[i * topk * num_batches + my_element];
const uint32_t valid_boxes = nms_results[my_batch * topk + i * warp_size + start_index];
const bool no_hit = (valid_boxes & (~my_mask)) == 0;
valid = valid && no_hit;
}
if (!valid) {
data[(my_batch * num_elements_per_batch + my_element_in_batch) * element_width +
score_index] = -1;
}
}
template <typename DType>
TempWorkspace<DType> GetWorkspace(const index_t num_batch,
const index_t num_elem,
const int width_elem,
const index_t topk,
const OpContext& ctx) {
TempWorkspace<DType> workspace;
Stream<gpu> *s = ctx.get_stream<gpu>();
const int alignment = 128;
// Get the workspace size
workspace.scores_temp_space = 2 * align(num_batch * num_elem * sizeof(DType), alignment);
workspace.indices_temp_spaces = 2 * align(num_batch * num_elem * sizeof(index_t), alignment);
WorkspaceForSort(num_elem, topk, alignment, &workspace);
// Place for a buffer
workspace.buffer_space = align(num_batch * num_elem * width_elem * sizeof(DType), alignment);
workspace.nms_scratch_space = align(NMS<DType>::THRESHOLD / (sizeof(uint32_t) * 8) *
num_batch * topk * sizeof(uint32_t), alignment);
const size_t workspace_size = workspace.scores_temp_space +
workspace.scratch_space +
workspace.buffer_space +
workspace.nms_scratch_space +
workspace.indices_temp_spaces;
// Obtain the memory for workspace
Tensor<gpu, 1, double> scratch_memory = ctx.requested[box_nms_enum::kTempSpace]
.get_space_typed<gpu, 1, double>(mshadow::Shape1(ceil_div(workspace_size, sizeof(double))), s);
// Populate workspace pointers
workspace.scores = reinterpret_cast<DType*>(scratch_memory.dptr_);
workspace.scratch = reinterpret_cast<uint8_t*>(workspace.scores) +
workspace.scores_temp_space;
workspace.buffer = reinterpret_cast<DType*>(workspace.scratch +
workspace.scratch_space);
workspace.nms_scratch = reinterpret_cast<uint32_t*>(
reinterpret_cast<uint8_t*>(workspace.buffer) +
workspace.buffer_space);
workspace.indices = reinterpret_cast<index_t*>(
reinterpret_cast<uint8_t*>(workspace.nms_scratch) +
workspace.nms_scratch_space);
return workspace;
}
template <typename DType>
__global__ void ExtractScoresKernel(const DType* data, DType* scores,
const index_t N, const int element_width,
const int score_index) {
const index_t tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < N) {
scores[tid] = data[tid * element_width + score_index];
}
}
template <typename DType>
void CompactNMSResults(const Tensor<gpu, 3, DType>& data,
Tensor<gpu, 3, DType>* out,
Tensor<gpu, 1, index_t>* indices,
Tensor<gpu, 1, DType>* scores,
Tensor<gpu, 1, index_t>* sorted_indices,
Tensor<gpu, 1, DType>* sorted_scores,
Tensor<gpu, 1, char>* scratch,
const int score_index,
const index_t topk,
Stream<gpu>* s) {
using mshadow::Shape1;
constexpr int n_threads = 512;
const index_t num_elements = scores->shape_.Size();
const index_t num_elements_per_batch = data.shape_[1];
const index_t num_batches = data.shape_[0];
const int element_width = data.shape_[2];
const index_t n_blocks = ceil_div(num_elements, n_threads);
ExtractScoresKernel<<<n_blocks, n_threads, 0, Stream<gpu>::GetStream(s)>>>(
data.dptr_, scores->dptr_, num_elements, element_width, score_index);
*indices = mshadow::expr::range<index_t>(0, num_elements);
for (index_t i = 0; i < num_batches; ++i) {
// Sort each batch separately
Tensor<gpu, 1, DType> scores_batch(scores->dptr_ + i * num_elements_per_batch,
Shape1(topk),
s);
Tensor<gpu, 1, index_t> indices_batch(indices->dptr_ + i * num_elements_per_batch,
Shape1(topk),
s);
Tensor<gpu, 1, DType> sorted_scores_batch(sorted_scores->dptr_ + i * num_elements_per_batch,
Shape1(topk),
s);
Tensor<gpu, 1, index_t> sorted_indices_batch(sorted_indices->dptr_ + i * num_elements_per_batch,
Shape1(topk),
s);
mxnet::op::SortByKey(scores_batch, indices_batch, false, scratch,
0, 8 * sizeof(DType), 1, &sorted_scores_batch,
&sorted_indices_batch);
}
CompactData<true>(*sorted_indices, data, out, topk, score_index, s);
}
} // namespace
void BoxNMSForwardGPU_notemp(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
using mshadow::Shape1;
using mshadow::Shape2;
using mshadow::Shape3;
CHECK_NE(req[0], kAddTo) << "BoxNMS does not support kAddTo";
CHECK_NE(req[0], kWriteInplace) << "BoxNMS does not support in place computation";
CHECK_EQ(inputs.size(), 1U);
CHECK_EQ(outputs.size(), 2U) << "BoxNMS output: [output, temp]";
const BoxNMSParam& param = nnvm::get<BoxNMSParam>(attrs.parsed);
Stream<gpu> *s = ctx.get_stream<gpu>();
mxnet::TShape in_shape = inputs[box_nms_enum::kData].shape_;
int indim = in_shape.ndim();
int num_batch = indim <= 2? 1 : in_shape.ProdShape(0, indim - 2);
int num_elem = in_shape[indim - 2];
int width_elem = in_shape[indim - 1];
MSHADOW_REAL_TYPE_SWITCH(outputs[0].type_flag_, DType, {
Tensor<gpu, 3, DType> data = inputs[box_nms_enum::kData]
.get_with_shape<gpu, 3, DType>(Shape3(num_batch, num_elem, width_elem), s);
Tensor<gpu, 3, DType> out = outputs[box_nms_enum::kOut]
.get_with_shape<gpu, 3, DType>(Shape3(num_batch, num_elem, width_elem), s);
// Special case for topk == 0
if (param.topk == 0) {
if (req[0] != kNullOp &&
req[0] != kWriteInplace) {
out = mshadow::expr::F<mshadow_op::identity>(data);
}
return;
}
index_t topk = param.topk > 0 ? std::min(param.topk, num_elem) : num_elem;
const auto& workspace = GetWorkspace<DType>(num_batch, num_elem,
width_elem, topk, ctx);
FilterAndPrepareAuxData(data, &out, workspace, param, s);
Tensor<gpu, 1, DType> scores(workspace.scores, Shape1(num_batch * num_elem), s);
Tensor<gpu, 1, DType> sorted_scores(workspace.scores + scores.MSize(),
Shape1(num_batch * num_elem), s);
Tensor<gpu, 1, index_t> indices(workspace.indices, Shape1(num_batch * num_elem), s);
Tensor<gpu, 1, index_t> sorted_indices(workspace.indices + indices.MSize(),
Shape1(num_batch * num_elem), s);
Tensor<gpu, 1, char> scratch(reinterpret_cast<char*>(workspace.scratch),
Shape1(workspace.scratch_space), s);
Tensor<gpu, 3, DType> buffer(workspace.buffer,
Shape3(num_batch, num_elem, width_elem), s);
Tensor<gpu, 2, uint32_t> nms_scratch(workspace.nms_scratch,
Shape2(NMS<DType>::THRESHOLD / (sizeof(uint32_t) * 8),
topk * num_batch),
s);
indices = mshadow::expr::range<index_t>(0, num_batch * num_elem);
for (index_t i = 0; i < num_batch; ++i) {
// Sort each batch separately
Tensor<gpu, 1, DType> scores_batch(scores.dptr_ + i * num_elem,
Shape1(num_elem),
s);
Tensor<gpu, 1, index_t> indices_batch(indices.dptr_ + i * num_elem,
Shape1(num_elem),
s);
Tensor<gpu, 1, DType> sorted_scores_batch(sorted_scores.dptr_ + i * num_elem,
Shape1(num_elem),
s);
Tensor<gpu, 1, index_t> sorted_indices_batch(sorted_indices.dptr_ + i * num_elem,
Shape1(num_elem),
s);
mxnet::op::SortByKey(scores_batch, indices_batch, false, &scratch, 0,
8 * sizeof(DType), 1, &sorted_scores_batch,
&sorted_indices_batch);
}
CompactData<false>(sorted_indices, out, &buffer, topk, -1, s);
NMS<DType> nms;
nms(&buffer, &nms_scratch, topk, param, s);
CompactNMSResults(buffer, &out, &indices, &scores, &sorted_indices,
&sorted_scores, &scratch, param.score_index, topk, s);
// convert encoding
if (param.in_format != param.out_format) {
if (box_common_enum::kCenter == param.out_format) {
mxnet::op::mxnet_op::Kernel<corner_to_center, gpu>::Launch(s, num_batch * num_elem,
out.dptr_ + param.coord_start, width_elem);
} else {
mxnet::op::mxnet_op::Kernel<center_to_corner, gpu>::Launch(s, num_batch * num_elem,
out.dptr_ + param.coord_start, width_elem);
}
}
});
}
void BoxNMSForwardGPU(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
using namespace mshadow;
using namespace mshadow::expr;
using namespace mxnet_op;
CHECK_EQ(inputs.size(), 1U);
CHECK_EQ(outputs.size(), 2U) << "BoxNMS output: [output, temp]";
if (req[1] == kNullOp) {
BoxNMSForwardGPU_notemp(attrs, ctx, inputs, req, outputs);
return;
}
BoxNMSForward<gpu>(attrs, ctx, inputs, req, outputs);
}
NNVM_REGISTER_OP(_contrib_box_nms)
.set_attr<FCompute>("FCompute<gpu>", BoxNMSForwardGPU);
NNVM_REGISTER_OP(_backward_contrib_box_nms)
.set_attr<FCompute>("FCompute<gpu>", BoxNMSBackward<gpu>);
NNVM_REGISTER_OP(_contrib_box_iou)
.set_attr<FCompute>("FCompute<gpu>", BoxOverlapForward<gpu>);
NNVM_REGISTER_OP(_backward_contrib_box_iou)
.set_attr<FCompute>("FCompute<gpu>", BoxOverlapBackward<gpu>);
NNVM_REGISTER_OP(_contrib_bipartite_matching)
.set_attr<FCompute>("FCompute<gpu>", BipartiteMatchingForward<gpu>);
NNVM_REGISTER_OP(_backward_contrib_bipartite_matching)
.set_attr<FCompute>("FCompute<gpu>", BipartiteMatchingBackward<gpu>);
NNVM_REGISTER_OP(_contrib_box_encode)
.set_attr<FCompute>("FCompute<gpu>", BoxEncodeForward<gpu>);
NNVM_REGISTER_OP(_contrib_box_decode)
.set_attr<FCompute>("FCompute<gpu>", BoxDecodeForward<gpu>);
} // namespace op
} // namespace mxnet
|
5c7769aefdfdf5c258bba846a0fa2304ae88ee65.hip | // !!! This is a file automatically generated by hipify!!!
/*
* ============================================================================
*
* Authors: Prashant Pandey <[email protected]>
* Rob Johnson <[email protected]>
* Hunter McCoy <[email protected]>
*
* ============================================================================
*/
#include <hip/hip_runtime.h>
#include <hip/hip_runtime_api.h>
#include <stdlib.h>
#include <assert.h>
#include <string.h>
#include <inttypes.h>
#include <stdio.h>
#include <unistd.h>
#include <math.h>
#include <time.h>
#include <sys/mman.h>
#include <sys/stat.h>
#include <fcntl.h>
//timing stuff
#include <chrono>
#include <iostream>
#include <cmath>
//how fast is a thrust sort?
#include <thrust/sort.h>
#include <thrust/execution_policy.h>
#include <thrust/device_vector.h>
#include <thrust/reduce.h>
#include <thrust/fill.h>
#include <thrust/memory.h>
#include <thrust/device_malloc.h>
#include <thrust/device_free.h>
#include "hashutil.cuh"
#include "gqf.cuh"
#include "gqf_int.cuh"
#include <stdexcept>
#include <hip/hip_runtime_api.h>
/******************************************************************
* Code for managing the metadata bits and slots w/o interpreting *
* the content of the slots.
******************************************************************/
#define MAX_VALUE(nbits) ((1ULL << (nbits)) - 1)
#define BITMASK(nbits) \
((nbits) == 64 ? 0xffffffffffffffff : MAX_VALUE(nbits))
#define NUM_SLOTS_TO_LOCK (1ULL<<13)
#define LOCK_DIST 64
#define EXP_BEFORE_FAILURE -15
#define CLUSTER_SIZE (1ULL<<14)
#define METADATA_WORD(qf,field,slot_index) \
(get_block((qf), (slot_index) / QF_SLOTS_PER_BLOCK)->field[((slot_index) % QF_SLOTS_PER_BLOCK) / 64])
#define GET_NO_LOCK(flag) (flag & QF_NO_LOCK)
#define GET_TRY_ONCE_LOCK(flag) (flag & QF_TRY_ONCE_LOCK)
#define GET_WAIT_FOR_LOCK(flag) (flag & QF_WAIT_FOR_LOCK)
#define GET_KEY_HASH(flag) (flag & QF_KEY_IS_HASH)
#define NUM_BUFFERS 10
#define MAX_BUFFER_SIZE 100
#define CYCLES_PER_SECOND 1601000000
#define MAX_DEPTH 16
#define SELECT_BOUND 32
#define DEBUG_ASSERTS 0
#define DROP_ON_RUNEND 0
#define RUNEND_CUTOFF 15
#define DROP_ON_BIG_CLUSTER 0
#define BIG_CLUSTER_DROPOFF 4096
#define DISTANCE_FROM_HOME_SLOT_CUTOFF 1000
#define BILLION 1000000000L
#define CUDA_CHECK(ans) \
gpuAssert((ans), __FILE__, __LINE__);
inline void gpuAssert(hipError_t code, const char* file, int line, bool abort = true)
{
if (code != hipSuccess)
{
printf("GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort)
exit(code);
}
}
__constant__ char kmer_vals[6] = {'F', 'A', 'C', 'T', 'G', '0'};
#ifdef DEBUG
#define PRINT_DEBUG 1
#else
#define PRINT_DEBUG 0
#endif
#define DEBUG_CQF(fmt, ...) \
do { if (PRINT_DEBUG) printf( fmt, __VA_ARGS__); } while (0)
#define DEBUG_DUMP(qf) \
do { if (PRINT_DEBUG) qf_dump_metadata(qf); } while (0)
#if QF_BITS_PER_SLOT > 0
__host__ __device__ static inline qfblock* get_block(const QF* qf, uint64_t block_index)
{
return &qf->blocks[block_index];
}
#else
__host__ __device__ static inline qfblock* get_block(const QF* qf, uint64_t block_index)
{
return (qfblock*)(((char*)qf->blocks)
+ block_index * (sizeof(qfblock) + QF_SLOTS_PER_BLOCK *
qf->metadata->bits_per_slot / 8));
}
#endif
/*
__device__ static __inline__ unsigned long long rdtsc(void)
{
unsigned hi, lo;
__asm__ __volatile__ ("rdtsc" : "=a"(lo), "=d"(hi));
return ( (unsigned long long)lo)|( ((unsigned long long)hi)<<32 );
}
*/
/*
__host__ __device__ static void modify_metadata(pc_t *metadata, int cnt)
{
pc_add(metadata, cnt);
return;
}
*/
/*changing sizes of register based on https://docs.nvidia.com/cuda/inline-ptx-assembly/index.html
l is for "l" = .u64 reg
*/
__host__ __device__ static inline int popcnt(uint64_t val)
{
#ifdef __CUDA_ARCH__
val = __popcll(val);
#else
#ifndef __x86_64
val = __builtin_popcount(val);
#else
asm("popcnt %[val], %[val]"
: [val] "+r" (val)
:
: "cc");
#endif
#endif
return val;
}
// __device__ static inline int64_t bitscanreverse(uint64_t val)
// {
// if (val == 0) {
// return -1;
// } else {
// asm("bsr %[val], %[val]"
// : [val] "+l" (val)
// :
// : );
// return val;
// }
// }
__host__ __device__ static inline int popcntv(const uint64_t val, int ignore)
{
if (ignore % 64)
return popcnt (val & ~BITMASK(ignore % 64));
else
return popcnt(val);
}
// Returns the number of 1s up to (and including) the pos'th bit
// Bits are numbered from 0
__host__ __device__ static inline int bitrank(uint64_t val, int pos) {
val = val & ((2ULL << pos) - 1);
#ifdef __CUDA_ARCH__
val = __popcll(val);
#else
//quick fix for summit
#ifndef __x86_64
val = __builtin_popcount(val);
#else
asm("popcnt %[val], %[val]"
: [val] "+r" (val)
:
: "cc");
#endif
#endif
return val;
}
//moved dump functions
__host__ __device__ static inline void qf_dump_block(const QF *qf, uint64_t i)
{
uint64_t j;
printf("Block %llu Runs from %llu to %llu\n",i, i*QF_SLOTS_PER_BLOCK, (i+1)*QF_SLOTS_PER_BLOCK);
printf("Offset: %-192d", get_block(qf, i)->offset);
printf("\n");
for (j = 0; j < QF_SLOTS_PER_BLOCK; j++)
printf("%02lx ", j);
printf("\n");
for (j = 0; j < QF_SLOTS_PER_BLOCK; j++)
printf(" %d ", (get_block(qf, i)->occupieds[j/64] & (1ULL << (j%64))) ? 1 : 0);
printf("\n");
for (j = 0; j < QF_SLOTS_PER_BLOCK; j++)
printf(" %d ", (get_block(qf, i)->runends[j/64] & (1ULL << (j%64))) ? 1 : 0);
printf("\n");
#if QF_BITS_PER_SLOT == 8 || QF_BITS_PER_SLOT == 16 || QF_BITS_PER_SLOT == 32
for (j = 0; j < QF_SLOTS_PER_BLOCK; j++)
printf("%02x ", get_block(qf, i)->slots[j]);
#elif QF_BITS_PER_SLOT == 64
for (j = 0; j < QF_SLOTS_PER_BLOCK; j++)
printf("%02lx ", get_block(qf, i)->slots[j]);
#else
for (j = 0; j < QF_SLOTS_PER_BLOCK * qf->metadata->bits_per_slot / 8; j++)
printf("%02x ", get_block(qf, i)->slots[j]);
#endif
printf("\n");
printf("\n");
}
__host__ __device__ void qf_dump_metadata(const QF *qf) {
printf("Slots: %lu Occupied: %lu Elements: %lu Distinct: %lu\n",
qf->metadata->nslots,
qf->metadata->noccupied_slots,
qf->metadata->nelts,
qf->metadata->ndistinct_elts);
printf("Key_bits: %lu Value_bits: %lu Remainder_bits: %lu Bits_per_slot: %lu\n",
qf->metadata->key_bits,
qf->metadata->value_bits,
qf->metadata->key_remainder_bits,
qf->metadata->bits_per_slot);
}
__host__ __device__ void qf_dump(const QF *qf)
{
uint64_t i;
printf("%lu %lu %lu\n",
qf->metadata->nblocks,
qf->metadata->ndistinct_elts,
qf->metadata->nelts);
for (i = 0; i < qf->metadata->nblocks; i++) {
qf_dump_block(qf, i);
}
}
/**
* Returns the position of the k-th 1 in the 64-bit word x.
* k is 0-based, so k=0 returns the position of the first 1.
*
* Uses the broadword selection algorithm by Vigna [1], improved by Gog
* and Petri [2] and Vigna [3].
*
* [1] Sebastiano Vigna. Broadword Implementation of Rank/Select
* Queries. WEA, 2008
*
* [2] Simon Gog, Matthias Petri. Optimized succinct data
* structures for massive data. Softw. Pract. Exper., 2014
*
* [3] Sebastiano Vigna. MG4J 5.2.1. http://mg4j.di.unimi.it/
* The following code is taken from
* https://github.com/facebook/folly/blob/b28186247104f8b90cfbe094d289c91f9e413317/folly/experimental/Select64.h
*/
__device__ __constant__ uint8_t gpukSelectInByte[2048] = {
8, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 4, 0, 1, 0, 2, 0, 1, 0, 3, 0,
1, 0, 2, 0, 1, 0, 5, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 4, 0, 1, 0,
2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 6, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0,
1, 0, 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 5, 0, 1, 0, 2, 0, 1, 0,
3, 0, 1, 0, 2, 0, 1, 0, 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 7, 0,
1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0,
2, 0, 1, 0, 5, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 4, 0, 1, 0, 2, 0,
1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 6, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 5, 0, 1, 0, 2, 0, 1, 0, 3, 0,
1, 0, 2, 0, 1, 0, 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 8, 8, 8, 1,
8, 2, 2, 1, 8, 3, 3, 1, 3, 2, 2, 1, 8, 4, 4, 1, 4, 2, 2, 1, 4, 3, 3, 1, 3, 2,
2, 1, 8, 5, 5, 1, 5, 2, 2, 1, 5, 3, 3, 1, 3, 2, 2, 1, 5, 4, 4, 1, 4, 2, 2, 1,
4, 3, 3, 1, 3, 2, 2, 1, 8, 6, 6, 1, 6, 2, 2, 1, 6, 3, 3, 1, 3, 2, 2, 1, 6, 4,
4, 1, 4, 2, 2, 1, 4, 3, 3, 1, 3, 2, 2, 1, 6, 5, 5, 1, 5, 2, 2, 1, 5, 3, 3, 1,
3, 2, 2, 1, 5, 4, 4, 1, 4, 2, 2, 1, 4, 3, 3, 1, 3, 2, 2, 1, 8, 7, 7, 1, 7, 2,
2, 1, 7, 3, 3, 1, 3, 2, 2, 1, 7, 4, 4, 1, 4, 2, 2, 1, 4, 3, 3, 1, 3, 2, 2, 1,
7, 5, 5, 1, 5, 2, 2, 1, 5, 3, 3, 1, 3, 2, 2, 1, 5, 4, 4, 1, 4, 2, 2, 1, 4, 3,
3, 1, 3, 2, 2, 1, 7, 6, 6, 1, 6, 2, 2, 1, 6, 3, 3, 1, 3, 2, 2, 1, 6, 4, 4, 1,
4, 2, 2, 1, 4, 3, 3, 1, 3, 2, 2, 1, 6, 5, 5, 1, 5, 2, 2, 1, 5, 3, 3, 1, 3, 2,
2, 1, 5, 4, 4, 1, 4, 2, 2, 1, 4, 3, 3, 1, 3, 2, 2, 1, 8, 8, 8, 8, 8, 8, 8, 2,
8, 8, 8, 3, 8, 3, 3, 2, 8, 8, 8, 4, 8, 4, 4, 2, 8, 4, 4, 3, 4, 3, 3, 2, 8, 8,
8, 5, 8, 5, 5, 2, 8, 5, 5, 3, 5, 3, 3, 2, 8, 5, 5, 4, 5, 4, 4, 2, 5, 4, 4, 3,
4, 3, 3, 2, 8, 8, 8, 6, 8, 6, 6, 2, 8, 6, 6, 3, 6, 3, 3, 2, 8, 6, 6, 4, 6, 4,
4, 2, 6, 4, 4, 3, 4, 3, 3, 2, 8, 6, 6, 5, 6, 5, 5, 2, 6, 5, 5, 3, 5, 3, 3, 2,
6, 5, 5, 4, 5, 4, 4, 2, 5, 4, 4, 3, 4, 3, 3, 2, 8, 8, 8, 7, 8, 7, 7, 2, 8, 7,
7, 3, 7, 3, 3, 2, 8, 7, 7, 4, 7, 4, 4, 2, 7, 4, 4, 3, 4, 3, 3, 2, 8, 7, 7, 5,
7, 5, 5, 2, 7, 5, 5, 3, 5, 3, 3, 2, 7, 5, 5, 4, 5, 4, 4, 2, 5, 4, 4, 3, 4, 3,
3, 2, 8, 7, 7, 6, 7, 6, 6, 2, 7, 6, 6, 3, 6, 3, 3, 2, 7, 6, 6, 4, 6, 4, 4, 2,
6, 4, 4, 3, 4, 3, 3, 2, 7, 6, 6, 5, 6, 5, 5, 2, 6, 5, 5, 3, 5, 3, 3, 2, 6, 5,
5, 4, 5, 4, 4, 2, 5, 4, 4, 3, 4, 3, 3, 2, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
8, 8, 8, 3, 8, 8, 8, 8, 8, 8, 8, 4, 8, 8, 8, 4, 8, 4, 4, 3, 8, 8, 8, 8, 8, 8,
8, 5, 8, 8, 8, 5, 8, 5, 5, 3, 8, 8, 8, 5, 8, 5, 5, 4, 8, 5, 5, 4, 5, 4, 4, 3,
8, 8, 8, 8, 8, 8, 8, 6, 8, 8, 8, 6, 8, 6, 6, 3, 8, 8, 8, 6, 8, 6, 6, 4, 8, 6,
6, 4, 6, 4, 4, 3, 8, 8, 8, 6, 8, 6, 6, 5, 8, 6, 6, 5, 6, 5, 5, 3, 8, 6, 6, 5,
6, 5, 5, 4, 6, 5, 5, 4, 5, 4, 4, 3, 8, 8, 8, 8, 8, 8, 8, 7, 8, 8, 8, 7, 8, 7,
7, 3, 8, 8, 8, 7, 8, 7, 7, 4, 8, 7, 7, 4, 7, 4, 4, 3, 8, 8, 8, 7, 8, 7, 7, 5,
8, 7, 7, 5, 7, 5, 5, 3, 8, 7, 7, 5, 7, 5, 5, 4, 7, 5, 5, 4, 5, 4, 4, 3, 8, 8,
8, 7, 8, 7, 7, 6, 8, 7, 7, 6, 7, 6, 6, 3, 8, 7, 7, 6, 7, 6, 6, 4, 7, 6, 6, 4,
6, 4, 4, 3, 8, 7, 7, 6, 7, 6, 6, 5, 7, 6, 6, 5, 6, 5, 5, 3, 7, 6, 6, 5, 6, 5,
5, 4, 6, 5, 5, 4, 5, 4, 4, 3, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 4, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
8, 8, 8, 8, 8, 5, 8, 8, 8, 8, 8, 8, 8, 5, 8, 8, 8, 5, 8, 5, 5, 4, 8, 8, 8, 8,
8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 6, 8, 8, 8, 8, 8, 8, 8, 6, 8, 8, 8, 6, 8, 6,
6, 4, 8, 8, 8, 8, 8, 8, 8, 6, 8, 8, 8, 6, 8, 6, 6, 5, 8, 8, 8, 6, 8, 6, 6, 5,
8, 6, 6, 5, 6, 5, 5, 4, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 7, 8, 8,
8, 8, 8, 8, 8, 7, 8, 8, 8, 7, 8, 7, 7, 4, 8, 8, 8, 8, 8, 8, 8, 7, 8, 8, 8, 7,
8, 7, 7, 5, 8, 8, 8, 7, 8, 7, 7, 5, 8, 7, 7, 5, 7, 5, 5, 4, 8, 8, 8, 8, 8, 8,
8, 7, 8, 8, 8, 7, 8, 7, 7, 6, 8, 8, 8, 7, 8, 7, 7, 6, 8, 7, 7, 6, 7, 6, 6, 4,
8, 8, 8, 7, 8, 7, 7, 6, 8, 7, 7, 6, 7, 6, 6, 5, 8, 7, 7, 6, 7, 6, 6, 5, 7, 6,
6, 5, 6, 5, 5, 4, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 5, 8, 8, 8, 8, 8, 8, 8, 8,
8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 6, 8, 8,
8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 6, 8, 8, 8, 8, 8, 8, 8, 6, 8, 8, 8, 6,
8, 6, 6, 5, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
8, 8, 8, 8, 8, 8, 8, 8, 8, 7, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 7,
8, 8, 8, 8, 8, 8, 8, 7, 8, 8, 8, 7, 8, 7, 7, 5, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
8, 8, 8, 8, 8, 7, 8, 8, 8, 8, 8, 8, 8, 7, 8, 8, 8, 7, 8, 7, 7, 6, 8, 8, 8, 8,
8, 8, 8, 7, 8, 8, 8, 7, 8, 7, 7, 6, 8, 8, 8, 7, 8, 7, 7, 6, 8, 7, 7, 6, 7, 6,
6, 5, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 6,
8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 7, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 7, 8, 8, 8, 8, 8, 8, 8, 8,
8, 8, 8, 8, 8, 8, 8, 7, 8, 8, 8, 8, 8, 8, 8, 7, 8, 8, 8, 7, 8, 7, 7, 6, 8, 8,
8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 7
};
// const uint8_t hostkSelectInByte[2048] = {
// 8, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 4, 0, 1, 0, 2, 0, 1, 0, 3, 0,
// 1, 0, 2, 0, 1, 0, 5, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 4, 0, 1, 0,
// 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 6, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0,
// 1, 0, 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 5, 0, 1, 0, 2, 0, 1, 0,
// 3, 0, 1, 0, 2, 0, 1, 0, 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 7, 0,
// 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0,
// 2, 0, 1, 0, 5, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 4, 0, 1, 0, 2, 0,
// 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 6, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
// 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 5, 0, 1, 0, 2, 0, 1, 0, 3, 0,
// 1, 0, 2, 0, 1, 0, 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 8, 8, 8, 1,
// 8, 2, 2, 1, 8, 3, 3, 1, 3, 2, 2, 1, 8, 4, 4, 1, 4, 2, 2, 1, 4, 3, 3, 1, 3, 2,
// 2, 1, 8, 5, 5, 1, 5, 2, 2, 1, 5, 3, 3, 1, 3, 2, 2, 1, 5, 4, 4, 1, 4, 2, 2, 1,
// 4, 3, 3, 1, 3, 2, 2, 1, 8, 6, 6, 1, 6, 2, 2, 1, 6, 3, 3, 1, 3, 2, 2, 1, 6, 4,
// 4, 1, 4, 2, 2, 1, 4, 3, 3, 1, 3, 2, 2, 1, 6, 5, 5, 1, 5, 2, 2, 1, 5, 3, 3, 1,
// 3, 2, 2, 1, 5, 4, 4, 1, 4, 2, 2, 1, 4, 3, 3, 1, 3, 2, 2, 1, 8, 7, 7, 1, 7, 2,
// 2, 1, 7, 3, 3, 1, 3, 2, 2, 1, 7, 4, 4, 1, 4, 2, 2, 1, 4, 3, 3, 1, 3, 2, 2, 1,
// 7, 5, 5, 1, 5, 2, 2, 1, 5, 3, 3, 1, 3, 2, 2, 1, 5, 4, 4, 1, 4, 2, 2, 1, 4, 3,
// 3, 1, 3, 2, 2, 1, 7, 6, 6, 1, 6, 2, 2, 1, 6, 3, 3, 1, 3, 2, 2, 1, 6, 4, 4, 1,
// 4, 2, 2, 1, 4, 3, 3, 1, 3, 2, 2, 1, 6, 5, 5, 1, 5, 2, 2, 1, 5, 3, 3, 1, 3, 2,
// 2, 1, 5, 4, 4, 1, 4, 2, 2, 1, 4, 3, 3, 1, 3, 2, 2, 1, 8, 8, 8, 8, 8, 8, 8, 2,
// 8, 8, 8, 3, 8, 3, 3, 2, 8, 8, 8, 4, 8, 4, 4, 2, 8, 4, 4, 3, 4, 3, 3, 2, 8, 8,
// 8, 5, 8, 5, 5, 2, 8, 5, 5, 3, 5, 3, 3, 2, 8, 5, 5, 4, 5, 4, 4, 2, 5, 4, 4, 3,
// 4, 3, 3, 2, 8, 8, 8, 6, 8, 6, 6, 2, 8, 6, 6, 3, 6, 3, 3, 2, 8, 6, 6, 4, 6, 4,
// 4, 2, 6, 4, 4, 3, 4, 3, 3, 2, 8, 6, 6, 5, 6, 5, 5, 2, 6, 5, 5, 3, 5, 3, 3, 2,
// 6, 5, 5, 4, 5, 4, 4, 2, 5, 4, 4, 3, 4, 3, 3, 2, 8, 8, 8, 7, 8, 7, 7, 2, 8, 7,
// 7, 3, 7, 3, 3, 2, 8, 7, 7, 4, 7, 4, 4, 2, 7, 4, 4, 3, 4, 3, 3, 2, 8, 7, 7, 5,
// 7, 5, 5, 2, 7, 5, 5, 3, 5, 3, 3, 2, 7, 5, 5, 4, 5, 4, 4, 2, 5, 4, 4, 3, 4, 3,
// 3, 2, 8, 7, 7, 6, 7, 6, 6, 2, 7, 6, 6, 3, 6, 3, 3, 2, 7, 6, 6, 4, 6, 4, 4, 2,
// 6, 4, 4, 3, 4, 3, 3, 2, 7, 6, 6, 5, 6, 5, 5, 2, 6, 5, 5, 3, 5, 3, 3, 2, 6, 5,
// 5, 4, 5, 4, 4, 2, 5, 4, 4, 3, 4, 3, 3, 2, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
// 8, 8, 8, 3, 8, 8, 8, 8, 8, 8, 8, 4, 8, 8, 8, 4, 8, 4, 4, 3, 8, 8, 8, 8, 8, 8,
// 8, 5, 8, 8, 8, 5, 8, 5, 5, 3, 8, 8, 8, 5, 8, 5, 5, 4, 8, 5, 5, 4, 5, 4, 4, 3,
// 8, 8, 8, 8, 8, 8, 8, 6, 8, 8, 8, 6, 8, 6, 6, 3, 8, 8, 8, 6, 8, 6, 6, 4, 8, 6,
// 6, 4, 6, 4, 4, 3, 8, 8, 8, 6, 8, 6, 6, 5, 8, 6, 6, 5, 6, 5, 5, 3, 8, 6, 6, 5,
// 6, 5, 5, 4, 6, 5, 5, 4, 5, 4, 4, 3, 8, 8, 8, 8, 8, 8, 8, 7, 8, 8, 8, 7, 8, 7,
// 7, 3, 8, 8, 8, 7, 8, 7, 7, 4, 8, 7, 7, 4, 7, 4, 4, 3, 8, 8, 8, 7, 8, 7, 7, 5,
// 8, 7, 7, 5, 7, 5, 5, 3, 8, 7, 7, 5, 7, 5, 5, 4, 7, 5, 5, 4, 5, 4, 4, 3, 8, 8,
// 8, 7, 8, 7, 7, 6, 8, 7, 7, 6, 7, 6, 6, 3, 8, 7, 7, 6, 7, 6, 6, 4, 7, 6, 6, 4,
// 6, 4, 4, 3, 8, 7, 7, 6, 7, 6, 6, 5, 7, 6, 6, 5, 6, 5, 5, 3, 7, 6, 6, 5, 6, 5,
// 5, 4, 6, 5, 5, 4, 5, 4, 4, 3, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
// 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 4, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
// 8, 8, 8, 8, 8, 5, 8, 8, 8, 8, 8, 8, 8, 5, 8, 8, 8, 5, 8, 5, 5, 4, 8, 8, 8, 8,
// 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 6, 8, 8, 8, 8, 8, 8, 8, 6, 8, 8, 8, 6, 8, 6,
// 6, 4, 8, 8, 8, 8, 8, 8, 8, 6, 8, 8, 8, 6, 8, 6, 6, 5, 8, 8, 8, 6, 8, 6, 6, 5,
// 8, 6, 6, 5, 6, 5, 5, 4, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 7, 8, 8,
// 8, 8, 8, 8, 8, 7, 8, 8, 8, 7, 8, 7, 7, 4, 8, 8, 8, 8, 8, 8, 8, 7, 8, 8, 8, 7,
// 8, 7, 7, 5, 8, 8, 8, 7, 8, 7, 7, 5, 8, 7, 7, 5, 7, 5, 5, 4, 8, 8, 8, 8, 8, 8,
// 8, 7, 8, 8, 8, 7, 8, 7, 7, 6, 8, 8, 8, 7, 8, 7, 7, 6, 8, 7, 7, 6, 7, 6, 6, 4,
// 8, 8, 8, 7, 8, 7, 7, 6, 8, 7, 7, 6, 7, 6, 6, 5, 8, 7, 7, 6, 7, 6, 6, 5, 7, 6,
// 6, 5, 6, 5, 5, 4, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
// 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
// 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 5, 8, 8, 8, 8, 8, 8, 8, 8,
// 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 6, 8, 8,
// 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 6, 8, 8, 8, 8, 8, 8, 8, 6, 8, 8, 8, 6,
// 8, 6, 6, 5, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
// 8, 8, 8, 8, 8, 8, 8, 8, 8, 7, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 7,
// 8, 8, 8, 8, 8, 8, 8, 7, 8, 8, 8, 7, 8, 7, 7, 5, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
// 8, 8, 8, 8, 8, 7, 8, 8, 8, 8, 8, 8, 8, 7, 8, 8, 8, 7, 8, 7, 7, 6, 8, 8, 8, 8,
// 8, 8, 8, 7, 8, 8, 8, 7, 8, 7, 7, 6, 8, 8, 8, 7, 8, 7, 7, 6, 8, 7, 7, 6, 7, 6,
// 6, 5, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
// 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
// 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
// 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
// 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 6,
// 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
// 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
// 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 7, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
// 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 7, 8, 8, 8, 8, 8, 8, 8, 8,
// 8, 8, 8, 8, 8, 8, 8, 7, 8, 8, 8, 8, 8, 8, 8, 7, 8, 8, 8, 7, 8, 7, 7, 6, 8, 8,
// 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
// 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
// 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
// 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
// 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
// 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
// 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
// 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
// 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
// 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 7
// };
__host__ __device__ static inline uint64_t _select64(uint64_t x, int k)
{
if (k >= popcnt(x)) { return 64; }
const uint64_t kOnesStep4 = 0x1111111111111111ULL;
const uint64_t kOnesStep8 = 0x0101010101010101ULL;
const uint64_t kMSBsStep8 = 0x80ULL * kOnesStep8;
uint64_t s = x;
s = s - ((s & 0xA * kOnesStep4) >> 1);
s = (s & 0x3 * kOnesStep4) + ((s >> 2) & 0x3 * kOnesStep4);
s = (s + (s >> 4)) & 0xF * kOnesStep8;
uint64_t byteSums = s * kOnesStep8;
uint64_t kStep8 = k * kOnesStep8;
uint64_t geqKStep8 = (((kStep8 | kMSBsStep8) - byteSums) & kMSBsStep8);
uint64_t place = popcnt(geqKStep8) * 8;
uint64_t byteRank = k - (((byteSums << 8) >> place) & (uint64_t)(0xFF));
#ifdef __CUDA_ARCH__
return place + gpukSelectInByte[((x >> place) & 0xFF) | (byteRank << 8)];
#else
abort();
return 0;
//return place + hostkSelectInByte[((x >> place) & 0xFF) | (byteRank << 8)];
#endif // __CUDA_ARCH__
}
// Returns the position of the rank'th 1. (rank = 0 returns the 1st 1)
// Returns 64 if there are fewer than rank+1 1s.
__host__ __device__ static inline uint64_t bitselect(uint64_t val, int rank) {
#ifdef __SSE4_2_
uint64_t i = 1ULL << rank;
asm("pdep %[val], %[mask], %[val]"
: [val] "+r" (val)
: [mask] "r" (i));
asm("tzcnt %[bit], %[index]"
: [index] "=r" (i)
: [bit] "g" (val)
: "cc");
return i;
#endif
return _select64(val, rank);
}
__host__ __device__ static inline uint64_t bitselectv(const uint64_t val, int ignore, int rank)
{
return bitselect(val & ~BITMASK(ignore % 64), rank);
}
__host__ __device__ static inline int is_runend(const QF *qf, uint64_t index)
{
return (METADATA_WORD(qf, runends, index) >> ((index % QF_SLOTS_PER_BLOCK) %
64)) & 1ULL;
}
__host__ __device__ static inline int is_occupied(const QF *qf, uint64_t index)
{
return (METADATA_WORD(qf, occupieds, index) >> ((index % QF_SLOTS_PER_BLOCK) %
64)) & 1ULL;
}
#if QF_BITS_PER_SLOT == 8 || QF_BITS_PER_SLOT == 16 || QF_BITS_PER_SLOT == 32 || QF_BITS_PER_SLOT == 64
__host__ __device__ static inline uint64_t get_slot(const QF *qf, uint64_t index)
{
//ERR: Index passed in is incorrect
//printf("slots %lu, index %lu\n", qf->metadata->nslots, index);
#if DEBUG_ASSERTS
assert(index < qf->metadata->xnslots);
#endif
return get_block(qf, index / QF_SLOTS_PER_BLOCK)->slots[index % QF_SLOTS_PER_BLOCK];
}
__host__ __device__ static inline void set_slot(const QF *qf, uint64_t index, uint64_t value)
{
#if DEBUG_ASSERTS
assert(index < qf->metadata->xnslots);
#endif
get_block(qf, index / QF_SLOTS_PER_BLOCK)->slots[index % QF_SLOTS_PER_BLOCK] =
value & BITMASK(qf->metadata->bits_per_slot);
}
#elif QF_BITS_PER_SLOT > 0
/* Little-endian code .... Big-endian is TODO */
__host__ __device__ static inline uint64_t get_slot(const QF *qf, uint64_t index)
{
/* Should use __uint128_t to support up to 64-bit remainders, but gcc seems
* to generate buggy code. :/ */
//printf("Other get slot: slots %lu, index %lu\n", qf->metadata->nslots, index);
#if DEBUG_ASSERTS
assert(index < qf->metadata->xnslots);
#endif
uint64_t *p = (uint64_t *)&get_block(qf, index /
QF_SLOTS_PER_BLOCK)->slots[(index %
QF_SLOTS_PER_BLOCK)
* QF_BITS_PER_SLOT / 8];
return (uint64_t)(((*p) >> (((index % QF_SLOTS_PER_BLOCK) * QF_BITS_PER_SLOT) %
8)) & BITMASK(QF_BITS_PER_SLOT));
}
__host__ __device__ static inline void set_slot(const QF *qf, uint64_t index, uint64_t value)
{
/* Should use __uint128_t to support up to 64-bit remainders, but gcc seems
* to generate buggy code. :/ */
#if DEBUG_ASSERTS
assert(index < qf->metadata->xnslots);
#endif
uint64_t *p = (uint64_t *)&get_block(qf, index /
QF_SLOTS_PER_BLOCK)->slots[(index %
QF_SLOTS_PER_BLOCK)
* QF_BITS_PER_SLOT / 8];
uint64_t t = *p;
uint64_t mask = BITMASK(QF_BITS_PER_SLOT);
uint64_t v = value;
int shift = ((index % QF_SLOTS_PER_BLOCK) * QF_BITS_PER_SLOT) % 8;
mask <<= shift;
v <<= shift;
t &= ~mask;
t |= v;
*p = t;
}
#else
/* Little-endian code .... Big-endian is TODO */
__host__ __device__ static inline uint64_t get_slot(const QF *qf, uint64_t index)
{
//rintf("Third get slot?!? slots %lu, index %lu\n", qf->metadata->nslots, index);
#if DEBUG_ASSERTS
assert(index < qf->metadata->xnslots);
#endif
/* Should use __uint128_t to support up to 64-bit remainders, but gcc seems
* to generate buggy code. :/ */
uint64_t *p = (uint64_t *)&get_block(qf, index / QF_SLOTS_PER_BLOCK)->slots[(index %QF_SLOTS_PER_BLOCK)* qf->metadata->bits_per_slot / 8];
return (uint64_t)(((*p) >> (((index % QF_SLOTS_PER_BLOCK) *qf->metadata->bits_per_slot) % 8)) & BITMASK(qf->metadata->bits_per_slot));
}
__host__ __device__ static inline void set_slot(const QF *qf, uint64_t index, uint64_t value)
{
#if DEBUG_ASSERTS
assert(index < qf->metadata->xnslots);
#endif
/* Should use __uint128_t to support up to 64-bit remainders, but gcc seems
* to generate buggy code. :/ */
uint64_t *p = (uint64_t *)&get_block(qf, index /QF_SLOTS_PER_BLOCK)->slots[(index %QF_SLOTS_PER_BLOCK)* qf->metadata->bits_per_slot / 8];
uint64_t t = *p;
uint64_t mask = BITMASK(qf->metadata->bits_per_slot);
uint64_t v = value;
int shift = ((index % QF_SLOTS_PER_BLOCK) * qf->metadata->bits_per_slot) % 8;
mask <<= shift;
v <<= shift;
t &= ~mask;
t |= v;
*p = t;
}
#endif
__host__ __device__ static inline uint64_t run_end(const QF *qf, uint64_t hash_bucket_index);
__host__ __device__ static inline uint64_t block_offset(const QF *qf, uint64_t blockidx)
{
/* If we have extended counters and a 16-bit (or larger) offset
field, then we can safely ignore the possibility of overflowing
that field. */
if (sizeof(qf->blocks[0].offset) > 1 ||
get_block(qf, blockidx)->offset < BITMASK(8*sizeof(qf->blocks[0].offset)))
return get_block(qf, blockidx)->offset;
return run_end(qf, QF_SLOTS_PER_BLOCK * blockidx - 1) - QF_SLOTS_PER_BLOCK *
blockidx + 1;
}
__host__ __device__ static inline uint64_t run_end(const QF *qf, uint64_t hash_bucket_index)
{
uint64_t bucket_block_index = hash_bucket_index / QF_SLOTS_PER_BLOCK;
uint64_t bucket_intrablock_offset = hash_bucket_index % QF_SLOTS_PER_BLOCK;
uint64_t bucket_blocks_offset = block_offset(qf, bucket_block_index);
uint64_t bucket_intrablock_rank = bitrank(get_block(qf, bucket_block_index)->occupieds[0], bucket_intrablock_offset);
if (bucket_intrablock_rank == 0) {
if (bucket_blocks_offset <= bucket_intrablock_offset)
return hash_bucket_index;
else
return QF_SLOTS_PER_BLOCK * bucket_block_index + bucket_blocks_offset - 1;
}
uint64_t runend_block_index = bucket_block_index + bucket_blocks_offset /
QF_SLOTS_PER_BLOCK;
uint64_t runend_ignore_bits = bucket_blocks_offset % QF_SLOTS_PER_BLOCK;
uint64_t runend_rank = bucket_intrablock_rank - 1;
uint64_t runend_block_offset = bitselectv(get_block(qf,
runend_block_index)->runends[0],
runend_ignore_bits, runend_rank);
if (runend_block_offset == QF_SLOTS_PER_BLOCK) {
if (bucket_blocks_offset == 0 && bucket_intrablock_rank == 0) {
/* The block begins in empty space, and this bucket is in that region of
* empty space */
return hash_bucket_index;
} else {
do {
runend_rank -= popcntv(get_block(qf,
runend_block_index)->runends[0],
runend_ignore_bits);
runend_block_index++;
runend_ignore_bits = 0;
runend_block_offset = bitselectv(get_block(qf,
runend_block_index)->runends[0],
runend_ignore_bits, runend_rank);
} while (runend_block_offset == QF_SLOTS_PER_BLOCK);
}
}
uint64_t runend_index = QF_SLOTS_PER_BLOCK * runend_block_index +
runend_block_offset;
if (runend_index < hash_bucket_index)
return hash_bucket_index;
else
return runend_index;
}
__host__ __device__ static inline int offset_lower_bound(const QF *qf, uint64_t slot_index)
{
const qfblock * b = get_block(qf, slot_index / QF_SLOTS_PER_BLOCK);
const uint64_t slot_offset = slot_index % QF_SLOTS_PER_BLOCK;
const uint64_t boffset = b->offset;
const uint64_t occupieds = b->occupieds[0] & BITMASK(slot_offset+1);
//printf("slot %llu, slot_offset %02lx, block offset %llu, occupieds: %d ", slot_index, slot_offset, boffset, popcnt(occupieds));
#if DEBUG_ASSERTS
assert(QF_SLOTS_PER_BLOCK == 64);
#endif
//if (boffset < slot_offset) {
if (boffset <= slot_offset) {
const uint64_t runends = (b->runends[0] & BITMASK(slot_offset)) >> boffset;
//printf(" runends %d\n", popcnt(runends));
//printf("boffset < slot_offset, runends %llu, popcnt(occupieds) %d, popcnt(runends) %d\n", runends, popcnt(occupieds), popcnt(runends));
//printf("returning %d\n", popcnt(occupieds)-popcnt(runends));
return popcnt(occupieds) - popcnt(runends);
}
//printf("\n");
//printf("boffset > slot_offset, boffset-slotoffset %llu, popcnt(occupieds) %d\n", boffset-slot_offset, popcnt(occupieds));
//printf("returning %d\n", boffset-slot_offset+popcnt(occupieds));
return boffset - slot_offset + popcnt(occupieds);
}
__host__ __device__ static inline int is_empty(const QF *qf, uint64_t slot_index)
{
return offset_lower_bound(qf, slot_index) == 0;
}
__host__ __device__ static inline int might_be_empty(const QF *qf, uint64_t slot_index)
{
return !is_occupied(qf, slot_index)
&& !is_runend(qf, slot_index);
}
// __device__ static inline int probably_is_empty(const QF *qf, uint64_t slot_index)
// {
// return get_slot(qf, slot_index) == 0
// && !is_occupied(qf, slot_index)
// && !is_runend(qf, slot_index);
// }
//static inlines were re-added, should
__host__ __device__ uint64_t static inline find_first_empty_slot(QF *qf, uint64_t from)
{
uint64_t start_from = from;
do {
int t = offset_lower_bound(qf, from);
//get block of from
// if (t < 0){
// //this implies a failure in the code - you are going to
// find_first_empty_slot_verbose(qf, start_from);
// }
//this assert breaks testing as we can't query the last slot for the next slot
//this throws an assertion, instead we want to throw an out of range exception
//that can be captured to finalize the test instead.
#if DEBUG_ASSERTS
assert(t>=0);
#endif
//assert must happen, checks cannot happen in device code
//alternate version must exist that is host exclusive.
//if (t < 0) throw std::out_of_range("next free slot is before current slot, either final slot or gqf corruption.\n");
if (t == 0)
break;
from = from + t;
} while(1);
uint64_t bucket_start_from = start_from/NUM_SLOTS_TO_LOCK;
uint64_t end_start_from = from/NUM_SLOTS_TO_LOCK;
//testing without this gate to check if we see speed improvements
// if (end_start_from>bucket_start_from+1){
// //return -1;
// printf("Find first empty ran over a bucket: %llu\n", end_start_from-bucket_start_from);
// }
return from;
}
__host__ __device__ uint64_t first_empty_slot_wrapper(QF * qf, uint64_t from){
return find_first_empty_slot(qf, from);
}
//exact same function as above, but forced to be host exclusive so that a try_catch statement in cluster counting will succeed.
__host__ uint64_t host_debug_find_first_empty_slot(QF *qf, uint64_t from)
{
uint64_t start_from = from;
do {
int t = offset_lower_bound(qf, from);
//get block of from
// if (t < 0){
// //this implies a failure in the code - you are going to
// find_first_empty_slot_verbose(qf, start_from);
// }
//this assert breaks testing as we can't query the last slot for the next slot
//this throws an assertion, instead we want to throw an out of range exception
//that can be captured to finalize the test instead.
//assert(t>=0);
//assert must happen, checks cannot happen in device code
//alternate version must exist that is host exclusive.
if (t < 0) throw std::out_of_range("next free slot is before current slot, either final slot or gqf corruption.\n");
if (t == 0)
break;
from = from + t;
} while(1);
uint64_t bucket_start_from = start_from/NUM_SLOTS_TO_LOCK;
uint64_t end_start_from = from/NUM_SLOTS_TO_LOCK;
//testing without this gate to check if we see speed improvements
if (end_start_from>bucket_start_from+1){
printf("Find first empty ran over a bucket: %llu\n", end_start_from-bucket_start_from);
}
return from;
}
__host__ __device__ static inline uint64_t shift_into_b(const uint64_t a, const uint64_t b,
const int bstart, const int bend,
const int amount)
{
const uint64_t a_component = bstart == 0 ? (a >> (64 - amount)) : 0;
const uint64_t b_shifted_mask = BITMASK(bend - bstart) << bstart;
const uint64_t b_shifted = ((b_shifted_mask & b) << amount) & b_shifted_mask;
const uint64_t b_mask = ~b_shifted_mask;
return a_component | b_shifted | (b & b_mask);
}
// __device__ void* gpu_memmove(void* dst, const void* src, size_t n)
// {
// //printf("Launching memmove\n");
// //todo: allocate space per thread for this buffer before launching the kernel
// void* temp_buffer = malloc(n);
// //maybe stack allocation?
// //void* temp_buffer = void* char[n];
// // hipMemcpyAsync(temp_buffer, src, n, hipMemcpyDeviceToDevice);
// // hipMemcpyAsync(dst, temp_buffer, n, hipMemcpyDeviceToDevice);
// // //hipFree(temp_buffer);
// // return dst;
// memcpy(temp_buffer, src, n);
// memcpy(dst, temp_buffer, n);
// free(temp_buffer);
// }
//a variant of memmove that compares the two pointers
__device__ void gpu_memmove(void* dst, const void* src, size_t n)
{
//printf("Launching memmove\n");
//todo: allocate space per thread for this buffer before launching the kernel
char * char_dst = (char *) dst;
char * char_src = (char *) src;
//double check this,
//think it is just > since dst+n does not get copied
if (char_src+n > char_dst){
//copy backwards
for (int i =n-1; i >= 0; i--){
char_dst[i] = char_src[i];
}
} else {
//copy regular
for (int i =0; i<n; i++){
char_dst[i] = char_src[i];
}
}
//free(temp_buffer);
}
//a variant of memmove that compares the two pointers
__device__ void gpu_memmove_cooperative(void* dst, const void* src, size_t n, int warpID)
{
//printf("Launching memmove\n");
//todo: allocate space per thread for this buffer before launching the kernel
char * char_dst = (char *) dst;
char * char_src = (char *) src;
//double check this,
//think it is just > since dst+n does not get copied
if (char_src+n > char_dst){
//copy backwards
for (int i =n-1-warpID; i >= 0; i-=32){
char_dst[i] = char_src[i];
}
} else {
//copy regular
for (int i =warpID; i<n; i+=32){
char_dst[i] = char_src[i];
}
}
//free(temp_buffer);
}
#if QF_BITS_PER_SLOT == 8 || QF_BITS_PER_SLOT == 16 || QF_BITS_PER_SLOT == 32 || QF_BITS_PER_SLOT == 64
__host__ __device__ static inline void shift_remainders(QF *qf, uint64_t start_index, uint64_t
empty_index)
{
uint64_t start_block = start_index / QF_SLOTS_PER_BLOCK;
uint64_t start_offset = start_index % QF_SLOTS_PER_BLOCK;
uint64_t empty_block = empty_index / QF_SLOTS_PER_BLOCK;
uint64_t empty_offset = empty_index % QF_SLOTS_PER_BLOCK;
#if DEBUG_ASSERTS
assert (start_index <= empty_index);
assert (empty_index < qf->metadata->xnslots);
#endif
while (start_block < empty_block) {
#ifdef __CUDA_ARCH__
gpu_memmove(&get_block(qf, empty_block)->slots[1],
&get_block(qf, empty_block)->slots[0],
empty_offset * sizeof(qf->blocks[0].slots[0]));
#else
memmove(&get_block(qf, empty_block)->slots[1],
&get_block(qf, empty_block)->slots[0],
empty_offset * sizeof(qf->blocks[0].slots[0]));
#endif
get_block(qf, empty_block)->slots[0] = get_block(qf,
empty_block-1)->slots[QF_SLOTS_PER_BLOCK-1];
empty_block--;
empty_offset = QF_SLOTS_PER_BLOCK-1;
}
#ifdef __CUDA_ARCH__
gpu_memmove(&get_block(qf, empty_block)->slots[start_offset + 1],
&get_block(qf, empty_block)->slots[start_offset],
(empty_offset - start_offset) * sizeof(qf->blocks[0].slots[0]));
#else
memmove(&get_block(qf, empty_block)->slots[start_offset+1],
&get_block(qf, empty_block)->slots[start_offset],
(empty_offset - start_offset) * sizeof(qf->blocks[0].slots[0]));
#endif
}
__device__ static inline void shift_remainders_cooperative(QF *qf, uint64_t start_index, uint64_t
empty_index, int warpID)
{
uint64_t start_block = start_index / QF_SLOTS_PER_BLOCK;
uint64_t start_offset = start_index % QF_SLOTS_PER_BLOCK;
uint64_t empty_block = empty_index / QF_SLOTS_PER_BLOCK;
uint64_t empty_offset = empty_index % QF_SLOTS_PER_BLOCK;
#if DEBUG_ASSERTS
assert (start_index <= empty_index);
assert (empty_index < qf->metadata->xnslots);
#endif
while (start_block < empty_block) {
gpu_memmove_cooperative(&get_block(qf, empty_block)->slots[1],
&get_block(qf, empty_block)->slots[0],
empty_offset * sizeof(qf->blocks[0].slots[0]), warpID);
get_block(qf, empty_block)->slots[0] = get_block(qf,
empty_block-1)->slots[QF_SLOTS_PER_BLOCK-1];
empty_block--;
empty_offset = QF_SLOTS_PER_BLOCK-1;
}
gpu_memmove_cooperative(&get_block(qf, empty_block)->slots[start_offset + 1],
&get_block(qf, empty_block)->slots[start_offset],
(empty_offset - start_offset) * sizeof(qf->blocks[0].slots[0]), warpID);
}
#else
#define REMAINDER_WORD(qf, i) ((uint64_t *)&(get_block(qf, (i)/qf->metadata->bits_per_slot)->slots[8 * ((i) % qf->metadata->bits_per_slot)]))
__host__ __device__ static inline void shift_remainders(QF *qf, const uint64_t start_index, const
uint64_t empty_index)
{
uint64_t last_word = (empty_index + 1) * qf->metadata->bits_per_slot / 64;
const uint64_t first_word = start_index * qf->metadata->bits_per_slot / 64;
int bend = ((empty_index + 1) * qf->metadata->bits_per_slot) % 64;
const int bstart = (start_index * qf->metadata->bits_per_slot) % 64;
while (last_word != first_word) {
*REMAINDER_WORD(qf, last_word) = shift_into_b(*REMAINDER_WORD(qf, last_word-1),
*REMAINDER_WORD(qf, last_word),
0, bend, qf->metadata->bits_per_slot);
last_word--;
bend = 64;
}
*REMAINDER_WORD(qf, last_word) = shift_into_b(0, *REMAINDER_WORD(qf,
last_word),
bstart, bend,
qf->metadata->bits_per_slot);
}
#endif
__host__ __device__ static inline void find_next_n_empty_slots(QF *qf, uint64_t from, uint64_t n,
uint64_t *indices)
{
while (n) {
indices[--n] = find_first_empty_slot(qf, from);
from = indices[n] + 1;
}
}
__host__ __device__ static inline void shift_slots(QF *qf, int64_t first, uint64_t last, uint64_t
distance)
{
int64_t i;
if (distance == 1)
shift_remainders(qf, first, last+1);
else
for (i = last; i >= first; i--)
set_slot(qf, i + distance, get_slot(qf, i));
}
__host__ __device__ static inline void shift_runends(QF *qf, int64_t first, uint64_t last,
uint64_t distance)
{
#if DEBUG_ASSERTS
assert(last < qf->metadata->xnslots && distance < 64);
#endif
uint64_t first_word = first / 64;
uint64_t bstart = first % 64;
uint64_t last_word = (last + distance + 1) / 64;
uint64_t bend = (last + distance + 1) % 64;
if (last_word != first_word) {
METADATA_WORD(qf, runends, 64*last_word) = shift_into_b(METADATA_WORD(qf, runends, 64*(last_word-1)),
METADATA_WORD(qf, runends, 64*last_word),
0, bend, distance);
bend = 64;
last_word--;
while (last_word != first_word) {
METADATA_WORD(qf, runends, 64*last_word) = shift_into_b(METADATA_WORD(qf, runends, 64*(last_word-1)),
METADATA_WORD(qf, runends, 64*last_word),
0, bend, distance);
last_word--;
}
}
METADATA_WORD(qf, runends, 64*last_word) = shift_into_b(0, METADATA_WORD(qf,
runends,
64*last_word),
bstart, bend, distance);
}
__host__ __device__ static inline bool insert_replace_slots_and_shift_remainders_and_runends_and_offsets(QF *qf,
int operation,
uint64_t bucket_index,
uint64_t overwrite_index,
const uint64_t *remainders,
uint64_t total_remainders,
uint64_t noverwrites)
{
uint64_t empties[67];
uint64_t i;
int64_t j;
int64_t ninserts = total_remainders - noverwrites;
uint64_t insert_index = overwrite_index + noverwrites;
if (ninserts > 0) {
/* First, shift things to create n empty spaces where we need them. */
find_next_n_empty_slots(qf, insert_index, ninserts, empties);
if (empties[0] >= qf->metadata->xnslots) {
return false;
}
for (j = 0; j < ninserts - 1; j++)
shift_slots(qf, empties[j+1] + 1, empties[j] - 1, j + 1);
shift_slots(qf, insert_index, empties[ninserts - 1] - 1, ninserts);
for (j = 0; j < ninserts - 1; j++)
shift_runends(qf, empties[j+1] + 1, empties[j] - 1, j + 1);
shift_runends(qf, insert_index, empties[ninserts - 1] - 1, ninserts);
for (i = noverwrites; i < total_remainders - 1; i++)
METADATA_WORD(qf, runends, overwrite_index + i) &= ~(1ULL <<
(((overwrite_index
+ i) %
QF_SLOTS_PER_BLOCK)
% 64));
switch (operation) {
case 0: /* insert into empty bucket */
#if DEBUG_ASSERTS
assert (noverwrites == 0);
#endif
METADATA_WORD(qf, runends, overwrite_index + total_remainders - 1) |=
1ULL << (((overwrite_index + total_remainders - 1) %
QF_SLOTS_PER_BLOCK) % 64);
break;
case 1: /* append to bucket */
METADATA_WORD(qf, runends, overwrite_index + noverwrites - 1) &=
~(1ULL << (((overwrite_index + noverwrites - 1) % QF_SLOTS_PER_BLOCK) %
64));
METADATA_WORD(qf, runends, overwrite_index + total_remainders - 1) |=
1ULL << (((overwrite_index + total_remainders - 1) %
QF_SLOTS_PER_BLOCK) % 64);
break;
case 2: /* insert into bucket */
METADATA_WORD(qf, runends, overwrite_index + total_remainders - 1) &=
~(1ULL << (((overwrite_index + total_remainders - 1) %
QF_SLOTS_PER_BLOCK) % 64));
break;
default:
printf("Invalid operation %d\n", operation);
#ifdef __CUDA_ARCH__
__threadfence(); // ensure store issued before trap
asm("trap;");
#else
abort();
#endif
}
uint64_t npreceding_empties = 0;
for (i = bucket_index / QF_SLOTS_PER_BLOCK + 1; i <= empties[0]/QF_SLOTS_PER_BLOCK; i++) {
while ((int64_t)npreceding_empties < ninserts &&
empties[ninserts - 1 - npreceding_empties] / QF_SLOTS_PER_BLOCK < i)
npreceding_empties++;
if (get_block(qf, i)->offset + ninserts - npreceding_empties < BITMASK(8*sizeof(qf->blocks[0].offset)))
get_block(qf, i)->offset += ninserts - npreceding_empties;
else
get_block(qf, i)->offset = (uint8_t) BITMASK(8*sizeof(qf->blocks[0].offset));
}
}
for (i = 0; i < total_remainders; i++)
set_slot(qf, overwrite_index + i, remainders[i]);
//modify_metadata(&qf->runtimedata->pc_noccupied_slots, ninserts);
return true;
}
__host__ __device__ static inline int remove_replace_slots_and_shift_remainders_and_runends_and_offsets(QF *qf,
int operation,
uint64_t bucket_index,
uint64_t overwrite_index,
const uint64_t *remainders,
uint64_t total_remainders,
uint64_t old_length)
{
uint64_t i;
// Update the slots
for (i = 0; i < total_remainders; i++)
set_slot(qf, overwrite_index + i, remainders[i]);
// If this is the last thing in its run, then we may need to set a new runend bit
if (is_runend(qf, overwrite_index + old_length - 1)) {
if (total_remainders > 0) {
// If we're not deleting this entry entirely, then it will still the last entry in this run
METADATA_WORD(qf, runends, overwrite_index + total_remainders - 1) |= 1ULL << ((overwrite_index + total_remainders - 1) % 64);
} else if (overwrite_index > bucket_index &&
!is_runend(qf, overwrite_index - 1)) {
// If we're deleting this entry entirely, but it is not the first entry in this run,
// then set the preceding entry to be the runend
METADATA_WORD(qf, runends, overwrite_index - 1) |= 1ULL << ((overwrite_index - 1) % 64);
}
}
// shift slots back one run at a time
uint64_t original_bucket = bucket_index;
uint64_t current_bucket = bucket_index;
uint64_t current_slot = overwrite_index + total_remainders;
uint64_t current_distance = old_length - total_remainders;
int ret_current_distance = current_distance;
while (current_distance > 0) {
if (is_runend(qf, current_slot + current_distance - 1)) {
do {
current_bucket++;
} while (current_bucket < current_slot + current_distance &&
!is_occupied(qf, current_bucket));
}
if (current_bucket <= current_slot) {
set_slot(qf, current_slot, get_slot(qf, current_slot + current_distance));
if (is_runend(qf, current_slot) !=
is_runend(qf, current_slot + current_distance))
METADATA_WORD(qf, runends, current_slot) ^= 1ULL << (current_slot % 64);
current_slot++;
} else if (current_bucket <= current_slot + current_distance) {
uint64_t i;
for (i = current_slot; i < current_slot + current_distance; i++) {
set_slot(qf, i, 0);
METADATA_WORD(qf, runends, i) &= ~(1ULL << (i % 64));
}
current_distance = current_slot + current_distance - current_bucket;
current_slot = current_bucket;
} else {
current_distance = 0;
}
}
// reset the occupied bit of the hash bucket index if the hash is the
// only item in the run and is removed completely.
if (operation && !total_remainders)
METADATA_WORD(qf, occupieds, bucket_index) &= ~(1ULL << (bucket_index % 64));
// update the offset bits.
// find the number of occupied slots in the original_bucket block.
// Then find the runend slot corresponding to the last run in the
// original_bucket block.
// Update the offset of the block to which it belongs.
uint64_t original_block = original_bucket / QF_SLOTS_PER_BLOCK;
if (old_length > total_remainders) { // we only update offsets if we shift/delete anything
while (1) {
uint64_t last_occupieds_hash_index = QF_SLOTS_PER_BLOCK * original_block + (QF_SLOTS_PER_BLOCK - 1);
uint64_t runend_index = run_end(qf, last_occupieds_hash_index);
// runend spans across the block
// update the offset of the next block
if (runend_index / QF_SLOTS_PER_BLOCK == original_block) { // if the run ends in the same block
if (get_block(qf, original_block + 1)->offset == 0)
break;
get_block(qf, original_block + 1)->offset = 0;
} else { // if the last run spans across the block
if (get_block(qf, original_block + 1)->offset == (runend_index - last_occupieds_hash_index))
break;
get_block(qf, original_block + 1)->offset = (runend_index - last_occupieds_hash_index);
}
original_block++;
}
}
//int num_slots_freed = old_length - total_remainders;
//modify_metadata(&qf->runtimedata->pc_noccupied_slots, -num_slots_freed);
/*qf->metadata->noccupied_slots -= (old_length - total_remainders);*/
if (!total_remainders) {
//modify_metadata(&qf->runtimedata->pc_ndistinct_elts, -1);
/*qf->metadata->ndistinct_elts--;*/
}
return ret_current_distance;
}
/*****************************************************************************
* Code that uses the above to implement a QF with keys and inline counters. *
*****************************************************************************/
/*
Counter format:
0 xs: <empty string>
1 x: x
2 xs: xx
3 0s: 000
>2 xs: xbc...cx for x != 0, b < x, c != 0, x
>3 0s: 0c...c00 for c != 0
*/
__host__ __device__ static inline uint64_t *encode_counter(QF *qf, uint64_t remainder, uint64_t
counter, uint64_t *slots)
{
uint64_t digit = remainder;
uint64_t base = (1ULL << qf->metadata->bits_per_slot) - 1;
uint64_t *p = slots;
if (counter == 0)
return p;
*--p = remainder;
if (counter == 1)
return p;
if (counter == 2) {
*--p = remainder;
return p;
}
if (counter == 3 && remainder == 0) {
*--p = remainder;
*--p = remainder;
return p;
}
if (counter == 3 && remainder > 0) {
*--p = 0;
*--p = remainder;
return p;
}
if (remainder == 0)
*--p = remainder;
else
base--;
if (remainder)
counter -= 3;
else
counter -= 4;
do {
digit = counter % base;
digit++; /* Zero not allowed */
if (remainder && digit >= remainder)
digit++; /* Cannot overflow since digit is mod 2^r-2 */
*--p = digit;
counter /= base;
} while (counter);
if (remainder && digit >= remainder)
*--p = 0;
*--p = remainder;
return p;
}
/* Returns the length of the encoding.
REQUIRES: index points to first slot of a counter. */
__host__ __device__ static inline uint64_t decode_counter(const QF *qf, uint64_t index, uint64_t *remainder, uint64_t *count)
{
uint64_t base;
uint64_t rem;
uint64_t cnt;
uint64_t digit;
uint64_t end;
*remainder = rem = get_slot(qf, index);
if (is_runend(qf, index)) { /* Entire run is "0" */
*count = 1;
return index;
}
digit = get_slot(qf, index + 1);
if (is_runend(qf, index + 1)) {
*count = digit == rem ? 2 : 1;
return index + (digit == rem ? 1 : 0);
}
if (rem > 0 && digit >= rem) {
*count = digit == rem ? 2 : 1;
return index + (digit == rem ? 1 : 0);
}
if (rem > 0 && digit == 0 && get_slot(qf, index + 2) == rem) {
*count = 3;
return index + 2;
}
if (rem == 0 && digit == 0) {
if (get_slot(qf, index + 2) == 0) {
*count = 3;
return index + 2;
} else {
*count = 2;
return index + 1;
}
}
cnt = 0;
base = (1ULL << qf->metadata->bits_per_slot) - (rem ? 2 : 1);
end = index + 1;
while (digit != rem && !is_runend(qf, end)) {
if (digit > rem)
digit--;
if (digit && rem)
digit--;
cnt = cnt * base + digit;
end++;
digit = get_slot(qf, end);
}
if (rem) {
*count = cnt + 3;
return end;
}
if (is_runend(qf, end) || get_slot(qf, end + 1) != 0) {
*count = 1;
return index;
}
*count = cnt + 4;
return end + 1;
}
/* return the next slot which corresponds to a
* different element
* */
// __device__ static inline uint64_t next_slot(QF *qf, uint64_t current)
// {
// uint64_t rem = get_slot(qf, current);
// current++;
// while (get_slot(qf, current) == rem && current <= qf->metadata->nslots) {
// current++;
// }
// return current;
// }
//code for approx inserts
__host__ __device__ static inline qf_returns insert1_if_not_exists(QF *qf, __uint64_t hash, uint8_t * value)
{
uint64_t hash_remainder = hash & BITMASK(qf->metadata->bits_per_slot);
uint64_t hash_bucket_index = hash >> qf->metadata->bits_per_slot;
uint64_t hash_bucket_block_offset = hash_bucket_index % QF_SLOTS_PER_BLOCK;
uint64_t compare_remainder = hash_remainder >> qf->metadata->value_bits;
/*
if (GET_NO_LOCK(runtime_lock) != QF_NO_LOCK) {
if (!qf_lock(qf, hash_bucket_index, true, runtime_lock))
return QF_COULDNT_LOCK;
}
*/
//printf("In insert1, Index is %llu, block_offset is %llu, remainder is %llu \n", hash_bucket_index, hash_bucket_block_offset, hash_remainder);
//approx filter has estimate of only one insert per item
// #ifdef __CUDA_ARCH__
// atomicAdd((unsigned long long *)&qf->metadata->noccupied_slots, 1ULL);
// #else
// abort();
// #endif
if (is_empty(qf, hash_bucket_index) /* might_be_empty(qf, hash_bucket_index) && runend_index == hash_bucket_index */) {
METADATA_WORD(qf, runends, hash_bucket_index) |= 1ULL <<
(hash_bucket_block_offset % 64);
set_slot(qf, hash_bucket_index, hash_remainder);
METADATA_WORD(qf, occupieds, hash_bucket_index) |= 1ULL <<
(hash_bucket_block_offset % 64);
//modify_metadata(&qf->runtimedata->pc_ndistinct_elts, 1);
//modify_metadata(&qf->runtimedata->pc_noccupied_slots, 1);
//modify_metadata(&qf->runtimedata->pc_nelts, 1);
} else {
uint64_t runend_index = run_end(qf, hash_bucket_index);
int operation = 0; /* Insert into empty bucket */
uint64_t insert_index = runend_index + 1;
uint64_t new_value = hash_remainder;
/* printf("RUNSTART: %02lx RUNEND: %02lx\n", runstart_index, runend_index); */
uint64_t runstart_index = hash_bucket_index == 0 ? 0 : run_end(qf, hash_bucket_index- 1) + 1;
if (is_occupied(qf, hash_bucket_index)) {
/* Find the counter for this remainder if it exists. */
uint64_t current_remainder = get_slot(qf, runstart_index) >> qf->metadata->value_bits;
//uint64_t zero_terminator = runstart_index;
/* Skip over counters for other remainders. */
while (current_remainder < compare_remainder && runstart_index <=
runend_index) {
runstart_index++;
current_remainder = get_slot(qf, runstart_index) >> qf->metadata->value_bits;
}
/* If this is the first time we've inserted the new remainder,
and it is larger than any remainder in the run. */
if (runstart_index > runend_index) {
operation = 1;
insert_index = runstart_index;
new_value = hash_remainder;
//modify_metadata(&qf->runtimedata->pc_ndistinct_elts, 1);
/* This is the first time we're inserting this remainder, but
there are larger remainders already in the run. */
} else if (current_remainder != compare_remainder) {
operation = 2; /* Inserting */
insert_index = runstart_index;
new_value = hash_remainder;
//modify_metadata(&qf->runtimedata->pc_ndistinct_elts, 1);
/* Cases below here: we're incrementing the (simple or
extended) counter for this remainder. */
/* If there's exactly one instance of this remainder. */
} else {
//get remainder
*value = get_slot(qf, runstart_index) && BITMASK(qf->metadata->value_bits);
return QF_ITEM_FOUND;
}
} //else {
//modify_metadata(&qf->runtimedata->pc_ndistinct_elts, 1);
//}
if (operation >= 0) {
uint64_t empty_slot_index = find_first_empty_slot(qf, runend_index+1);
#if DROP_ON_BIG_CLUSTER
// if (empty_slot_index - hash_bucket_index > BIG_CLUSTER_DROPOFF){
// return QF_FULL;
// }
if (qf->metadata->qf_full){
return QF_FULL;
}
if (empty_slot_index - hash_bucket_index > BIG_CLUSTER_DROPOFF){
qf->metadata->qf_full = true;
return QF_FULL;
}
#endif
if (empty_slot_index/NUM_SLOTS_TO_LOCK > hash_bucket_index/NUM_SLOTS_TO_LOCK+1){
return QF_FULL;
}
if (empty_slot_index >= qf->metadata->xnslots) {
printf("Ran out of space. Total xnslots is %lu, first empty slot is %lu\n", qf->metadata->xnslots, empty_slot_index);
return QF_FULL;
}
shift_remainders(qf, insert_index, empty_slot_index);
set_slot(qf, insert_index, new_value);
//ret_distance = insert_index - hash_bucket_index;
shift_runends(qf, insert_index, empty_slot_index-1, 1);
switch (operation) {
case 0:
METADATA_WORD(qf, runends, insert_index) |= 1ULL << ((insert_index%QF_SLOTS_PER_BLOCK) % 64);
break;
case 1:
METADATA_WORD(qf, runends, insert_index-1) &= ~(1ULL << (((insert_index-1) %QF_SLOTS_PER_BLOCK) %64));
METADATA_WORD(qf, runends, insert_index) |= 1ULL << ((insert_index%QF_SLOTS_PER_BLOCK)% 64);
break;
case 2:
METADATA_WORD(qf, runends, insert_index) &= ~(1ULL <<((insert_index %QF_SLOTS_PER_BLOCK) %64));
break;
default:
printf("Invalid operation %d\n", operation);
#ifdef __CUDA_ARCH__
__threadfence(); // ensure store issued before trap
asm("trap;");
#else
abort();
#endif
}
/*
* Increment the offset for each block between the hash bucket index
* and block of the empty slot
* */
uint64_t i;
for (i = hash_bucket_index / QF_SLOTS_PER_BLOCK + 1; i <=
empty_slot_index/QF_SLOTS_PER_BLOCK; i++) {
if (get_block(qf, i)->offset < BITMASK(8*sizeof(qf->blocks[0].offset)))
get_block(qf, i)->offset++;
#if DEBUG_ASSERTS
assert(get_block(qf, i)->offset != 0);
#endif
}
//modify_metadata(&qf->runtimedata->pc_noccupied_slots, 1);
}
//modify_metadata(&qf->runtimedata->pc_nelts, 1);
METADATA_WORD(qf, occupieds, hash_bucket_index) |= 1ULL <<
(hash_bucket_block_offset % 64);
}
/*
if (GET_NO_LOCK(runtime_lock) != QF_NO_LOCK) {
qf_unlock(qf, hash_bucket_index, true);
}
*/
return QF_ITEM_INSERTED;
}
__device__ static inline qf_returns insert1_if_not_exists_cooperative(QF *qf, __uint64_t hash, uint8_t * value, int warpID)
{
uint64_t hash_remainder = hash & BITMASK(qf->metadata->bits_per_slot);
uint64_t hash_bucket_index = hash >> qf->metadata->bits_per_slot;
uint64_t hash_bucket_block_offset = hash_bucket_index % QF_SLOTS_PER_BLOCK;
uint64_t compare_remainder = hash_remainder >> qf->metadata->value_bits;
/*
if (GET_NO_LOCK(runtime_lock) != QF_NO_LOCK) {
if (!qf_lock(qf, hash_bucket_index, true, runtime_lock))
return QF_COULDNT_LOCK;
}
*/
//printf("In insert1, Index is %llu, block_offset is %llu, remainder is %llu \n", hash_bucket_index, hash_bucket_block_offset, hash_remainder);
//approx filter has estimate of only one insert per item
// #ifdef __CUDA_ARCH__
// atomicAdd((unsigned long long *)&qf->metadata->noccupied_slots, 1ULL);
// #else
// abort();
// #endif
//this step can't be improved, minimum one mem check
if (is_empty(qf, hash_bucket_index) /* might_be_empty(qf, hash_bucket_index) && runend_index == hash_bucket_index */) {
METADATA_WORD(qf, runends, hash_bucket_index) |= 1ULL <<
(hash_bucket_block_offset % 64);
set_slot(qf, hash_bucket_index, hash_remainder);
METADATA_WORD(qf, occupieds, hash_bucket_index) |= 1ULL <<
(hash_bucket_block_offset % 64);
//modify_metadata(&qf->runtimedata->pc_ndistinct_elts, 1);
//modify_metadata(&qf->runtimedata->pc_noccupied_slots, 1);
//modify_metadata(&qf->runtimedata->pc_nelts, 1);
} else {
//maybe improve run_end, come back later and check
uint64_t runend_index = run_end(qf, hash_bucket_index);
int operation = 0; /* Insert into empty bucket */
uint64_t insert_index = runend_index + 1;
uint64_t new_value = hash_remainder;
/* printf("RUNSTART: %02lx RUNEND: %02lx\n", runstart_index, runend_index); */
uint64_t runstart_index = hash_bucket_index == 0 ? 0 : run_end(qf, hash_bucket_index- 1) + 1;
if (is_occupied(qf, hash_bucket_index)) {
/* Find the counter for this remainder if it exists. */
uint64_t current_remainder = get_slot(qf, runstart_index) >> qf->metadata->value_bits;
//uint64_t zero_terminator = runstart_index;
/* Skip over counters for other remainders. */
//we look for runstart_index <= runend and current_remainder >= compare_remainder
uint64_t my_runstart_index = runstart_index + warpID;
uint64_t my_current_remainder = get_slot(qf, my_runstart_index) >> qf->metadata->value_bits;
while(true){
//generate ballot
bool ballot = !((my_runstart_index <= runend_index) && (my_current_remainder < compare_remainder));
int warp_to_query = __ffs(__ballot_sync(0xffffffff, ballot))-1;
if (warp_to_query != -1){
//match kinda found!
runstart_index = __shfl_sync(0xffffffff, my_runstart_index, warp_to_query);
//exit successfully
break;
}
//if all fail retry at the next iteration
my_runstart_index+=32;
}
// while (current_remainder < compare_remainder && runstart_index <=
// runend_index) {
// runstart_index++;
// current_remainder = get_slot(qf, runstart_index) >> qf->metadata->value_bits;
// }
//reset current remainder to be correct
current_remainder = get_slot(qf, runstart_index) >> qf->metadata->value_bits;
/* If this is the first time we've inserted the new remainder,
and it is larger than any remainder in the run. */
if (runstart_index > runend_index) {
operation = 1;
insert_index = runstart_index;
new_value = hash_remainder;
//modify_metadata(&qf->runtimedata->pc_ndistinct_elts, 1);
/* This is the first time we're inserting this remainder, but
there are larger remainders already in the run. */
} else if (current_remainder != compare_remainder) {
operation = 2; /* Inserting */
insert_index = runstart_index;
new_value = hash_remainder;
//modify_metadata(&qf->runtimedata->pc_ndistinct_elts, 1);
/* Cases below here: we're incrementing the (simple or
extended) counter for this remainder. */
/* If there's exactly one instance of this remainder. */
} else {
//get remainder
*value = get_slot(qf, runstart_index) && BITMASK(qf->metadata->value_bits);
return QF_ITEM_FOUND;
}
} //else {
//modify_metadata(&qf->runtimedata->pc_ndistinct_elts, 1);
//}
if (operation >= 0) {
uint64_t empty_slot_index;
if (warpID == 0) empty_slot_index = find_first_empty_slot(qf, runend_index+1);
#if DROP_ON_BIG_CLUSTER
// if (empty_slot_index - hash_bucket_index > BIG_CLUSTER_DROPOFF){
// return QF_FULL;
// }
if (qf->metadata->qf_full){
return QF_FULL;
}
if (empty_slot_index - hash_bucket_index > BIG_CLUSTER_DROPOFF){
qf->metadata->qf_full = true;
return QF_FULL;
}
#endif
if (empty_slot_index/NUM_SLOTS_TO_LOCK > hash_bucket_index/NUM_SLOTS_TO_LOCK+1){
return QF_FULL;
}
empty_slot_index = __shfl_sync(0xffffffff, empty_slot_index, 0);
if (empty_slot_index >= qf->metadata->xnslots) {
printf("Ran out of space. Total xnslots is %lu, first empty slot is %lu\n", qf->metadata->xnslots, empty_slot_index);
return QF_FULL;
}
// if (warpID == 0){
// }
//shift remainders changes - atm, none
shift_remainders_cooperative(qf, insert_index, empty_slot_index, warpID);
//set slot changes, atm, none
if (warpID == 0){
set_slot(qf, insert_index, new_value);
//ret_distance = insert_index - hash_bucket_index;
shift_runends(qf, insert_index, empty_slot_index-1, 1);
switch (operation) {
case 0:
METADATA_WORD(qf, runends, insert_index) |= 1ULL << ((insert_index%QF_SLOTS_PER_BLOCK) % 64);
break;
case 1:
METADATA_WORD(qf, runends, insert_index-1) &= ~(1ULL << (((insert_index-1) %QF_SLOTS_PER_BLOCK) %64));
METADATA_WORD(qf, runends, insert_index) |= 1ULL << ((insert_index%QF_SLOTS_PER_BLOCK)% 64);
break;
case 2:
METADATA_WORD(qf, runends, insert_index) &= ~(1ULL <<((insert_index %QF_SLOTS_PER_BLOCK) %64));
break;
default:
printf("Invalid operation %d\n", operation);
#ifdef __CUDA_ARCH__
__threadfence(); // ensure store issued before trap
asm("trap;");
#else
abort();
#endif
}
/*
* Increment the offset for each block between the hash bucket index
* and block of the empty slot
* */
uint64_t i;
for (i = hash_bucket_index / QF_SLOTS_PER_BLOCK + 1; i <=
empty_slot_index/QF_SLOTS_PER_BLOCK; i++) {
if (get_block(qf, i)->offset < BITMASK(8*sizeof(qf->blocks[0].offset)))
get_block(qf, i)->offset++;
#if DEBUG_ASSERTS
assert(get_block(qf, i)->offset != 0);
#endif
}
//modify_metadata(&qf->runtimedata->pc_noccupied_slots, 1);
} // end of single threaded brace
}
//modify_metadata(&qf->runtimedata->pc_nelts, 1);
METADATA_WORD(qf, occupieds, hash_bucket_index) |= 1ULL <<
(hash_bucket_block_offset % 64);
//closing barce for warpID == 0
}
/*
if (GET_NO_LOCK(runtime_lock) != QF_NO_LOCK) {
qf_unlock(qf, hash_bucket_index, true);
}
*/
return QF_ITEM_INSERTED;
}
__host__ __device__ static inline qf_returns insert1(QF *qf, __uint64_t hash, uint8_t runtime_lock)
{
int ret_distance = 0;
uint64_t hash_remainder = hash & BITMASK(qf->metadata->bits_per_slot);
uint64_t hash_bucket_index = hash >> qf->metadata->bits_per_slot;
uint64_t hash_bucket_block_offset = hash_bucket_index % QF_SLOTS_PER_BLOCK;
/*
if (GET_NO_LOCK(runtime_lock) != QF_NO_LOCK) {
if (!qf_lock(qf, hash_bucket_index, true, runtime_lock))
return QF_COULDNT_LOCK;
}
*/
//printf("In insert1, Index is %llu, block_offset is %llu, remainder is %llu \n", hash_bucket_index, hash_bucket_block_offset, hash_remainder);
if (is_empty(qf, hash_bucket_index) /* might_be_empty(qf, hash_bucket_index) && runend_index == hash_bucket_index */) {
METADATA_WORD(qf, runends, hash_bucket_index) |= 1ULL <<
(hash_bucket_block_offset % 64);
set_slot(qf, hash_bucket_index, hash_remainder);
METADATA_WORD(qf, occupieds, hash_bucket_index) |= 1ULL <<
(hash_bucket_block_offset % 64);
ret_distance = 0;
//modify_metadata(&qf->runtimedata->pc_ndistinct_elts, 1);
//modify_metadata(&qf->runtimedata->pc_noccupied_slots, 1);
//modify_metadata(&qf->runtimedata->pc_nelts, 1);
} else {
uint64_t runend_index = run_end(qf, hash_bucket_index);
#if DROP_ON_RUNEND
if (runend_index - hash_bucket_index >= RUNEND_CUTOFF){
//printf("Dropping\n");
return QF_FULL;
}
#endif
int operation = 0; /* Insert into empty bucket */
uint64_t insert_index = runend_index + 1;
uint64_t new_value = hash_remainder;
/* printf("RUNSTART: %02lx RUNEND: %02lx\n", runstart_index, runend_index); */
uint64_t runstart_index = hash_bucket_index == 0 ? 0 : run_end(qf, hash_bucket_index- 1) + 1;
if (is_occupied(qf, hash_bucket_index)) {
/* Find the counter for this remainder if it exists. */
uint64_t current_remainder = get_slot(qf, runstart_index);
uint64_t zero_terminator = runstart_index;
/* The counter for 0 is special. */
if (current_remainder == 0) {
uint64_t t = runstart_index + 1;
while (t < runend_index && get_slot(qf, t) != 0)
t++;
if (t < runend_index && get_slot(qf, t+1) == 0)
zero_terminator = t+1; /* Three or more 0s */
else if (runstart_index < runend_index && get_slot(qf, runstart_index
+ 1) == 0)
zero_terminator = runstart_index + 1; /* Exactly two 0s */
/* Otherwise, exactly one 0 (i.e. zero_terminator == runstart_index) */
/* May read past end of run, but that's OK because loop below
can handle that */
if (hash_remainder != 0) {
runstart_index = zero_terminator + 1;
current_remainder = get_slot(qf, runstart_index);
}
}
/* Skip over counters for other remainders. */
while (current_remainder < hash_remainder && runstart_index <=
runend_index) {
/* If this remainder has an extended counter, skip over it. */
if (runstart_index < runend_index &&
get_slot(qf, runstart_index + 1) < current_remainder) {
runstart_index = runstart_index + 2;
while (runstart_index < runend_index &&
get_slot(qf, runstart_index) != current_remainder)
runstart_index++;
runstart_index++;
/* This remainder has a simple counter. */
} else {
runstart_index++;
}
/* This may read past the end of the run, but the while loop
condition will prevent us from using the invalid result in
that case. */
current_remainder = get_slot(qf, runstart_index);
}
/* If this is the first time we've inserted the new remainder,
and it is larger than any remainder in the run. */
if (runstart_index > runend_index) {
operation = 1;
insert_index = runstart_index;
new_value = hash_remainder;
//modify_metadata(&qf->runtimedata->pc_ndistinct_elts, 1);
/* This is the first time we're inserting this remainder, but
there are larger remainders already in the run. */
} else if (current_remainder != hash_remainder) {
operation = 2; /* Inserting */
insert_index = runstart_index;
new_value = hash_remainder;
//modify_metadata(&qf->runtimedata->pc_ndistinct_elts, 1);
/* Cases below here: we're incrementing the (simple or
extended) counter for this remainder. */
/* If there's exactly one instance of this remainder. */
} else if (runstart_index == runend_index ||
(hash_remainder > 0 && get_slot(qf, runstart_index + 1) >
hash_remainder) ||
(hash_remainder == 0 && zero_terminator == runstart_index)) {
operation = 2; /* Insert */
insert_index = runstart_index;
new_value = hash_remainder;
/* If there are exactly two instances of this remainder. */
} else if ((hash_remainder > 0 && get_slot(qf, runstart_index + 1) ==
hash_remainder) ||
(hash_remainder == 0 && zero_terminator == runstart_index + 1)) {
operation = 2; /* Insert */
insert_index = runstart_index + 1;
new_value = 0;
/* Special case for three 0s */
} else if (hash_remainder == 0 && zero_terminator == runstart_index + 2) {
operation = 2; /* Insert */
insert_index = runstart_index + 1;
new_value = 1;
/* There is an extended counter for this remainder. */
} else {
/* Move to the LSD of the counter. */
insert_index = runstart_index + 1;
while (get_slot(qf, insert_index+1) != hash_remainder)
insert_index++;
/* Increment the counter. */
uint64_t digit, carry;
do {
carry = 0;
digit = get_slot(qf, insert_index);
// Convert a leading 0 (which is special) to a normal encoded digit
if (digit == 0) {
digit++;
if (digit == current_remainder)
digit++;
}
// Increment the digit
digit = (digit + 1) & BITMASK(qf->metadata->bits_per_slot);
// Ensure digit meets our encoding requirements
if (digit == 0) {
digit++;
carry = 1;
}
if (digit == current_remainder)
digit = (digit + 1) & BITMASK(qf->metadata->bits_per_slot);
if (digit == 0) {
digit++;
carry = 1;
}
set_slot(qf, insert_index, digit);
insert_index--;
} while(insert_index > runstart_index && carry);
/* If the counter needs to be expanded. */
if (insert_index == runstart_index && (carry > 0 || (current_remainder
!= 0 && digit >=
current_remainder)))
{
operation = 2; /* insert */
insert_index = runstart_index + 1;
if (!carry) /* To prepend a 0 before the counter if the MSD is greater than the rem */
new_value = 0;
else if (carry) { /* Increment the new value because we don't use 0 to encode counters */
new_value = 2;
/* If the rem is greater than or equal to the new_value then fail*/
#if DEBUG_ASSERTS
if (current_remainder > 0)
assert(new_value < current_remainder);
#endif
}
} else {
operation = -1;
}
}
} //else {
//modify_metadata(&qf->runtimedata->pc_ndistinct_elts, 1);
//}
if (operation >= 0) {
uint64_t empty_slot_index = find_first_empty_slot(qf, runend_index+1);
#if DROP_ON_BIG_CLUSTER
// if (empty_slot_index - hash_bucket_index > BIG_CLUSTER_DROPOFF){
// return QF_FULL;
// }
if (qf->metadata->qf_full){
return QF_FULL;
}
if (empty_slot_index - hash_bucket_index > BIG_CLUSTER_DROPOFF){
qf->metadata->qf_full = true;
return QF_FULL;
}
#endif
if (empty_slot_index/NUM_SLOTS_TO_LOCK > hash_bucket_index/NUM_SLOTS_TO_LOCK+1){
return QF_FULL;
}
if (empty_slot_index >= qf->metadata->xnslots) {
printf("Ran out of space. Total xnslots is %lu, first empty slot is %lu\n", qf->metadata->xnslots, empty_slot_index);
return QF_FULL;
}
shift_remainders(qf, insert_index, empty_slot_index);
set_slot(qf, insert_index, new_value);
ret_distance = insert_index - hash_bucket_index;
shift_runends(qf, insert_index, empty_slot_index-1, 1);
switch (operation) {
case 0:
METADATA_WORD(qf, runends, insert_index) |= 1ULL << ((insert_index%QF_SLOTS_PER_BLOCK) % 64);
break;
case 1:
METADATA_WORD(qf, runends, insert_index-1) &= ~(1ULL << (((insert_index-1) %QF_SLOTS_PER_BLOCK) %64));
METADATA_WORD(qf, runends, insert_index) |= 1ULL << ((insert_index%QF_SLOTS_PER_BLOCK)% 64);
break;
case 2:
METADATA_WORD(qf, runends, insert_index) &= ~(1ULL <<((insert_index %QF_SLOTS_PER_BLOCK) %64));
break;
default:
printf("Invalid operation %d\n", operation);
#ifdef __CUDA_ARCH__
__threadfence(); // ensure store issued before trap
asm("trap;");
#else
abort();
#endif
}
/*
* Increment the offset for each block between the hash bucket index
* and block of the empty slot
* */
uint64_t i;
for (i = hash_bucket_index / QF_SLOTS_PER_BLOCK + 1; i <=
empty_slot_index/QF_SLOTS_PER_BLOCK; i++) {
if (get_block(qf, i)->offset < BITMASK(8*sizeof(qf->blocks[0].offset)))
get_block(qf, i)->offset++;
#if DEBUG_ASSERTS
assert(get_block(qf, i)->offset != 0);
#endif
}
//modify_metadata(&qf->runtimedata->pc_noccupied_slots, 1);
}
//modify_metadata(&qf->runtimedata->pc_nelts, 1);
METADATA_WORD(qf, occupieds, hash_bucket_index) |= 1ULL <<
(hash_bucket_block_offset % 64);
}
/*
if (GET_NO_LOCK(runtime_lock) != QF_NO_LOCK) {
qf_unlock(qf, hash_bucket_index, true);
}
*/
//return ret_distance;
return QF_ITEM_INSERTED;
}
__device__ static inline int insert1_cooperative(QF *qf, __uint64_t hash, uint8_t runtime_lock, int warpID)
{
int ret_distance = 0;
uint64_t hash_remainder = hash & BITMASK(qf->metadata->bits_per_slot);
uint64_t hash_bucket_index = hash >> qf->metadata->bits_per_slot;
uint64_t hash_bucket_block_offset = hash_bucket_index % QF_SLOTS_PER_BLOCK;
/*
if (GET_NO_LOCK(runtime_lock) != QF_NO_LOCK) {
if (!qf_lock(qf, hash_bucket_index, true, runtime_lock))
return QF_COULDNT_LOCK;
}
*/
//printf("In insert1, Index is %llu, block_offset is %llu, remainder is %llu \n", hash_bucket_index, hash_bucket_block_offset, hash_remainder);
//this is checking if the slot is empty, i.e. direct insert
//no memmove required, no warp fancyness
//no space for optimization on a warp level
if (is_empty(qf, hash_bucket_index) /* might_be_empty(qf, hash_bucket_index) && runend_index == hash_bucket_index */) {
METADATA_WORD(qf, runends, hash_bucket_index) |= 1ULL <<
(hash_bucket_block_offset % 64);
set_slot(qf, hash_bucket_index, hash_remainder);
METADATA_WORD(qf, occupieds, hash_bucket_index) |= 1ULL <<
(hash_bucket_block_offset % 64);
ret_distance = 0;
//modify_metadata(&qf->runtimedata->pc_ndistinct_elts, 1);
//modify_metadata(&qf->runtimedata->pc_noccupied_slots, 1);
//modify_metadata(&qf->runtimedata->pc_nelts, 1);
} else {
//slot was occupied
//I believe this can be optimized? not super certain about the performance reqs
uint64_t runend_index = run_end(qf, hash_bucket_index);
int operation = 0; /* Insert into empty bucket */
uint64_t insert_index = runend_index + 1;
uint64_t new_value = hash_remainder;
/* printf("RUNSTART: %02lx RUNEND: %02lx\n", runstart_index, runend_index); */
uint64_t runstart_index = hash_bucket_index == 0 ? 0 : run_end(qf, hash_bucket_index- 1) + 1;
if (is_occupied(qf, hash_bucket_index)) {
/* Find the counter for this remainder if it exists. */
uint64_t current_remainder = get_slot(qf, runstart_index);
uint64_t zero_terminator = runstart_index;
/* The counter for 0 is special. */
//this logic can't be optimized
if (current_remainder == 0) {
uint64_t t = runstart_index + 1;
while (t < runend_index && get_slot(qf, t) != 0)
t++;
if (t < runend_index && get_slot(qf, t+1) == 0)
zero_terminator = t+1; /* Three or more 0s */
else if (runstart_index < runend_index && get_slot(qf, runstart_index
+ 1) == 0)
zero_terminator = runstart_index + 1; /* Exactly two 0s */
/* Otherwise, exactly one 0 (i.e. zero_terminator == runstart_index) */
/* May read past end of run, but that's OK because loop below
can handle that */
if (hash_remainder != 0) {
runstart_index = zero_terminator + 1;
current_remainder = get_slot(qf, runstart_index);
}
}
//THIS CAN BE OPTIMIZED
//rewrite
//needs to be loopy boy and handle special counters
//I'm thinking if you are weird then step back once?
uint64_t my_runstart_index = runstart_index+warpID;
uint64_t my_current_remainder = get_slot(qf, my_runstart_index);
//everyone has one of 32 partitions
//get slot - feeds the remainder
//each remainder is either < us - good
// = us - great!
// > us - bad
// => only occur before the specified points iff
//on correct use there should be a dividing line?
if (my_runstart_index <= runend_index){
}
/* Skip over counters for other remainders. */
while (current_remainder < hash_remainder && runstart_index <=
runend_index) {
/* If this remainder has an extended counter, skip over it. */
if (runstart_index < runend_index &&
get_slot(qf, runstart_index + 1) < current_remainder) {
//if the current slot < current remainder
//a
runstart_index = runstart_index + 2;
while (runstart_index < runend_index &&
get_slot(qf, runstart_index) != current_remainder)
runstart_index++;
runstart_index++;
/* This remainder has a simple counter. */
} else {
runstart_index++;
}
/* This may read past the end of the run, but the while loop
condition will prevent us from using the invalid result in
that case. */
current_remainder = get_slot(qf, runstart_index);
}
/* If this is the first time we've inserted the new remainder,
and it is larger than any remainder in the run. */
if (runstart_index > runend_index) {
operation = 1;
insert_index = runstart_index;
new_value = hash_remainder;
//modify_metadata(&qf->runtimedata->pc_ndistinct_elts, 1);
/* This is the first time we're inserting this remainder, but
there are larger remainders already in the run. */
} else if (current_remainder != hash_remainder) {
operation = 2; /* Inserting */
insert_index = runstart_index;
new_value = hash_remainder;
//modify_metadata(&qf->runtimedata->pc_ndistinct_elts, 1);
/* Cases below here: we're incrementing the (simple or
extended) counter for this remainder. */
/* If there's exactly one instance of this remainder. */
} else if (runstart_index == runend_index ||
(hash_remainder > 0 && get_slot(qf, runstart_index + 1) >
hash_remainder) ||
(hash_remainder == 0 && zero_terminator == runstart_index)) {
operation = 2; /* Insert */
insert_index = runstart_index;
new_value = hash_remainder;
/* If there are exactly two instances of this remainder. */
} else if ((hash_remainder > 0 && get_slot(qf, runstart_index + 1) ==
hash_remainder) ||
(hash_remainder == 0 && zero_terminator == runstart_index + 1)) {
operation = 2; /* Insert */
insert_index = runstart_index + 1;
new_value = 0;
/* Special case for three 0s */
} else if (hash_remainder == 0 && zero_terminator == runstart_index + 2) {
operation = 2; /* Insert */
insert_index = runstart_index + 1;
new_value = 1;
/* There is an extended counter for this remainder. */
} else {
/* Move to the LSD of the counter. */
insert_index = runstart_index + 1;
while (get_slot(qf, insert_index+1) != hash_remainder)
insert_index++;
/* Increment the counter. */
uint64_t digit, carry;
do {
carry = 0;
digit = get_slot(qf, insert_index);
// Convert a leading 0 (which is special) to a normal encoded digit
if (digit == 0) {
digit++;
if (digit == current_remainder)
digit++;
}
// Increment the digit
digit = (digit + 1) & BITMASK(qf->metadata->bits_per_slot);
// Ensure digit meets our encoding requirements
if (digit == 0) {
digit++;
carry = 1;
}
if (digit == current_remainder)
digit = (digit + 1) & BITMASK(qf->metadata->bits_per_slot);
if (digit == 0) {
digit++;
carry = 1;
}
set_slot(qf, insert_index, digit);
insert_index--;
} while(insert_index > runstart_index && carry);
/* If the counter needs to be expanded. */
if (insert_index == runstart_index && (carry > 0 || (current_remainder
!= 0 && digit >=
current_remainder)))
{
operation = 2; /* insert */
insert_index = runstart_index + 1;
if (!carry) /* To prepend a 0 before the counter if the MSD is greater than the rem */
new_value = 0;
else if (carry) { /* Increment the new value because we don't use 0 to encode counters */
new_value = 2;
/* If the rem is greater than or equal to the new_value then fail*/
#if DEBUG_ASSERTS
if (current_remainder > 0)
assert(new_value < current_remainder);
#endif
}
} else {
operation = -1;
}
}
} //else {
//modify_metadata(&qf->runtimedata->pc_ndistinct_elts, 1);
//}
if (operation >= 0) {
uint64_t empty_slot_index = find_first_empty_slot(qf, runend_index+1);
#if DROP_ON_BIG_CLUSTER
// if (empty_slot_index - hash_bucket_index > BIG_CLUSTER_DROPOFF){
// return QF_FULL;
// }
if (qf->metadata->qf_full){
return QF_FULL;
}
if (empty_slot_index - hash_bucket_index > BIG_CLUSTER_DROPOFF){
qf->metadata->qf_full = true;
return QF_FULL;
}
#endif
if (empty_slot_index/NUM_SLOTS_TO_LOCK > hash_bucket_index/NUM_SLOTS_TO_LOCK+1){
return QF_FULL;
}
if (empty_slot_index >= qf->metadata->xnslots) {
printf("Ran out of space. Total xnslots is %lu, first empty slot is %lu\n", qf->metadata->xnslots, empty_slot_index);
return QF_FULL;
}
shift_remainders(qf, insert_index, empty_slot_index);
set_slot(qf, insert_index, new_value);
ret_distance = insert_index - hash_bucket_index;
shift_runends(qf, insert_index, empty_slot_index-1, 1);
switch (operation) {
case 0:
METADATA_WORD(qf, runends, insert_index) |= 1ULL << ((insert_index%QF_SLOTS_PER_BLOCK) % 64);
break;
case 1:
METADATA_WORD(qf, runends, insert_index-1) &= ~(1ULL << (((insert_index-1) %QF_SLOTS_PER_BLOCK) %64));
METADATA_WORD(qf, runends, insert_index) |= 1ULL << ((insert_index%QF_SLOTS_PER_BLOCK)% 64);
break;
case 2:
METADATA_WORD(qf, runends, insert_index) &= ~(1ULL <<((insert_index %QF_SLOTS_PER_BLOCK) %64));
break;
default:
printf("Invalid operation %d\n", operation);
#ifdef __CUDA_ARCH__
__threadfence(); // ensure store issued before trap
asm("trap;");
#else
abort();
#endif
}
/*
* Increment the offset for each block between the hash bucket index
* and block of the empty slot
* */
uint64_t i;
for (i = hash_bucket_index / QF_SLOTS_PER_BLOCK + 1; i <=
empty_slot_index/QF_SLOTS_PER_BLOCK; i++) {
if (get_block(qf, i)->offset < BITMASK(8*sizeof(qf->blocks[0].offset)))
get_block(qf, i)->offset++;
#if DEBUG_ASSERTS
assert(get_block(qf, i)->offset != 0);
#endif
}
//modify_metadata(&qf->runtimedata->pc_noccupied_slots, 1);
}
//modify_metadata(&qf->runtimedata->pc_nelts, 1);
METADATA_WORD(qf, occupieds, hash_bucket_index) |= 1ULL <<
(hash_bucket_block_offset % 64);
}
/*
if (GET_NO_LOCK(runtime_lock) != QF_NO_LOCK) {
qf_unlock(qf, hash_bucket_index, true);
}
*/
return ret_distance;
}
__host__ __device__ static inline qf_returns insert(QF *qf, __uint64_t hash, uint64_t count, uint8_t
runtime_lock)
{
int ret_distance = 0;
uint64_t hash_remainder = hash & BITMASK(qf->metadata->bits_per_slot);
uint64_t hash_bucket_index = hash >> qf->metadata->bits_per_slot;
uint64_t hash_bucket_block_offset = hash_bucket_index % QF_SLOTS_PER_BLOCK;
/*uint64_t hash_bucket_lock_offset = hash_bucket_index % NUM_SLOTS_TO_LOCK;*/
/*
if (GET_NO_LOCK(runtime_lock) != QF_NO_LOCK) {
if (!qf_lock(qf, hash_bucket_index, false, runtime_lock))
return QF_COULDNT_LOCK;
}
*/
uint64_t runend_index = run_end(qf, hash_bucket_index);
/* Empty slot */
if (might_be_empty(qf, hash_bucket_index) && runend_index ==
hash_bucket_index) {
METADATA_WORD(qf, runends, hash_bucket_index) |= 1ULL <<
(hash_bucket_block_offset % 64);
set_slot(qf, hash_bucket_index, hash_remainder);
METADATA_WORD(qf, occupieds, hash_bucket_index) |= 1ULL <<
(hash_bucket_block_offset % 64);
//ERIC TODO: see if this metadata is needed--probably isn't compatible with GPU
//modify_metadata(&qf->runtimedata->pc_ndistinct_elts, 1);
//modify_metadata(&qf->runtimedata->pc_noccupied_slots, 1);
//modify_metadata(&qf->runtimedata->pc_nelts, 1);
/* This trick will, I hope, keep the fast case fast. */
if (count > 1) {
insert(qf, hash, count - 1, QF_NO_LOCK);
}
} else { /* Non-empty slot */
uint64_t new_values[67];
int64_t runstart_index = hash_bucket_index == 0 ? 0 : run_end(qf,hash_bucket_index- 1) + 1;
bool ret;
if (!is_occupied(qf, hash_bucket_index)) { /* Empty bucket, but its slot is occupied. */
uint64_t *p = encode_counter(qf, hash_remainder, count, &new_values[67]);
ret = insert_replace_slots_and_shift_remainders_and_runends_and_offsets(qf, 0, hash_bucket_index, runstart_index, p, &new_values[67] - p, 0);
if (!ret)
return QF_FULL;
//modify_metadata(&qf->runtimedata->pc_ndistinct_elts, 1);
ret_distance = runstart_index - hash_bucket_index;
} else { /* Non-empty bucket */
uint64_t current_remainder, current_count, current_end;
/* Find the counter for this remainder, if one exists. */
current_end = decode_counter(qf, runstart_index, ¤t_remainder,¤t_count);
while (current_remainder < hash_remainder && !is_runend(qf, current_end)) {
runstart_index = current_end + 1;
current_end = decode_counter(qf, runstart_index, ¤t_remainder,
¤t_count);
}
/* If we reached the end of the run w/o finding a counter for this remainder,
then append a counter for this remainder to the run. */
if (current_remainder < hash_remainder) {
uint64_t *p = encode_counter(qf, hash_remainder, count, &new_values[67]);
ret = insert_replace_slots_and_shift_remainders_and_runends_and_offsets(qf, 1, /* Append to bucket */hash_bucket_index, current_end + 1, p, &new_values[67] - p, 0);
if (!ret)
return QF_FULL;
//modify_metadata(&qf->runtimedata->pc_ndistinct_elts, 1);
ret_distance = (current_end + 1) - hash_bucket_index;
/* Found a counter for this remainder. Add in the new count. */
} else if (current_remainder == hash_remainder) {
uint64_t *p = encode_counter(qf, hash_remainder, current_count + count, &new_values[67]);
ret = insert_replace_slots_and_shift_remainders_and_runends_and_offsets(qf,
is_runend(qf, current_end) ? 1 : 2,
hash_bucket_index,
runstart_index,
p,
&new_values[67] - p,
current_end - runstart_index + 1);
if (!ret)
return QF_FULL;
ret_distance = runstart_index - hash_bucket_index;
/* No counter for this remainder, but there are larger
remainders, so we're not appending to the bucket. */
} else {
uint64_t *p = encode_counter(qf, hash_remainder, count, &new_values[67]);
ret = insert_replace_slots_and_shift_remainders_and_runends_and_offsets(qf,
2, /* Insert to bucket */
hash_bucket_index,
runstart_index,
p,
&new_values[67] - p,
0);
if (!ret)
return QF_FULL;
//modify_metadata(&qf->runtimedata->pc_ndistinct_elts, 1);
ret_distance = runstart_index - hash_bucket_index;
}
}
METADATA_WORD(qf, occupieds, hash_bucket_index) |= 1ULL << (hash_bucket_block_offset % 64);
//modify_metadata(&qf->runtimedata->pc_nelts, count);
}
/*
if (GET_NO_LOCK(runtime_lock) != QF_NO_LOCK) {
qf_unlock(qf, hash_bucket_index, false);
}
*/
//return ret_distance;
return QF_ITEM_INSERTED;
}
__host__ __device__ inline static int _remove(QF *qf, __uint64_t hash, uint64_t count, uint8_t
runtime_lock)
{
int ret_numfreedslots = 0;
uint64_t hash_remainder = hash & BITMASK(qf->metadata->bits_per_slot);
uint64_t hash_bucket_index = hash >> qf->metadata->bits_per_slot;
uint64_t current_remainder, current_count, current_end;
uint64_t new_values[67];
/*
if (GET_NO_LOCK(runtime_lock) != QF_NO_LOCK) {
if (!qf_lock(qf, hash_bucket_index, false, runtime_lock))
return -2;
}
*/
/* Empty bucket */
if (!is_occupied(qf, hash_bucket_index))
return -1;
uint64_t runstart_index = hash_bucket_index == 0 ? 0 : run_end(qf, hash_bucket_index - 1) + 1;
uint64_t original_runstart_index = runstart_index;
int only_item_in_the_run = 0;
/*Find the counter for this remainder, if one exists.*/
current_end = decode_counter(qf, runstart_index, ¤t_remainder, ¤t_count);
while (current_remainder < hash_remainder && !is_runend(qf, current_end)) {
runstart_index = current_end + 1;
current_end = decode_counter(qf, runstart_index, ¤t_remainder, ¤t_count);
}
/* remainder not found in the given run */
if (current_remainder != hash_remainder)
return -1;
if (original_runstart_index == runstart_index && is_runend(qf, current_end))
only_item_in_the_run = 1;
/* endode the new counter */
uint64_t *p = encode_counter(qf, hash_remainder,
count > current_count ? 0 : current_count - count,
&new_values[67]);
ret_numfreedslots = remove_replace_slots_and_shift_remainders_and_runends_and_offsets(qf,
only_item_in_the_run,
hash_bucket_index,
runstart_index,
p,
&new_values[67] - p,
current_end - runstart_index + 1);
// update the nelements.
//modify_metadata(&qf->runtimedata->pc_nelts, -count);
/*qf->metadata->nelts -= count;*/
/*
if (GET_NO_LOCK(runtime_lock) != QF_NO_LOCK) {
qf_unlock(qf, hash_bucket_index, false);
}
*/
return ret_numfreedslots;
}
/***********************************************************************
* Code that uses the above to implement key-value-counter operations. *
***********************************************************************/
__host__ uint64_t qf_init(QF *qf, uint64_t nslots, uint64_t key_bits, uint64_t value_bits,
enum qf_hashmode hash, uint32_t seed, void* buffer, uint64_t
buffer_len)
{
uint64_t num_slots, xnslots, nblocks;
uint64_t key_remainder_bits, bits_per_slot;
uint64_t size;
uint64_t total_num_bytes;
assert(popcnt(nslots) == 1); /* nslots must be a power of 2 */
num_slots = nslots;
xnslots = nslots + 10*sqrt((double)nslots);
nblocks = (xnslots + QF_SLOTS_PER_BLOCK - 1) / QF_SLOTS_PER_BLOCK;
key_remainder_bits = key_bits;
while (nslots > 1 && key_remainder_bits > 0) {
key_remainder_bits--;
nslots >>= 1;
}
assert(key_remainder_bits >= 2);
bits_per_slot = key_remainder_bits + value_bits;
assert (QF_BITS_PER_SLOT == 0 || QF_BITS_PER_SLOT == bits_per_slot);
assert(bits_per_slot > 1);
#if QF_BITS_PER_SLOT == 8 || QF_BITS_PER_SLOT == 16 || QF_BITS_PER_SLOT == 32 || QF_BITS_PER_SLOT == 64
size = nblocks * sizeof(qfblock);
#else
size = nblocks * (sizeof(qfblock) + QF_SLOTS_PER_BLOCK * bits_per_slot / 8);
#endif
total_num_bytes = sizeof(qfmetadata) + size;
if (buffer == NULL || total_num_bytes > buffer_len)
return total_num_bytes;
// memset(buffer, 0, total_num_bytes);
qf->metadata = (qfmetadata *)(buffer);
qf->blocks = (qfblock *)(qf->metadata + 1);
qf->metadata->magic_endian_number = MAGIC_NUMBER;
qf->metadata->reserved = 0;
qf->metadata->hash_mode = hash;
qf->metadata->total_size_in_bytes = size;
qf->metadata->seed = seed;
qf->metadata->nslots = num_slots;
qf->metadata->xnslots = xnslots;
qf->metadata->key_bits = key_bits;
qf->metadata->value_bits = value_bits;
qf->metadata->key_remainder_bits = key_remainder_bits;
qf->metadata->bits_per_slot = bits_per_slot;
qf->metadata->range = qf->metadata->nslots;
qf->metadata->range <<= qf->metadata->key_remainder_bits;
qf->metadata->nblocks = (qf->metadata->xnslots + QF_SLOTS_PER_BLOCK - 1) /
QF_SLOTS_PER_BLOCK;
qf->metadata->nelts = 0;
qf->metadata->ndistinct_elts = 0;
qf->metadata->noccupied_slots = 0;
qf->metadata->qf_full = false;
qf->runtimedata->num_locks = ((qf->metadata->xnslots/NUM_SLOTS_TO_LOCK)+2);
pc_init(&qf->runtimedata->pc_nelts, (int64_t*)&qf->metadata->nelts, 8, 100);
pc_init(&qf->runtimedata->pc_ndistinct_elts, (int64_t*)&qf->metadata->ndistinct_elts, 8, 100);
pc_init(&qf->runtimedata->pc_noccupied_slots, (int64_t*)&qf->metadata->noccupied_slots, 8, 100);
/* initialize container resize */
qf->runtimedata->auto_resize = 0;
qf->runtimedata->container_resize = qf_resize_malloc;
/* initialize all the locks to 0 */
qf->runtimedata->metadata_lock = 0;
//etodo: copy this to GPU
qf->runtimedata->locks = (uint16_t *)calloc(qf->runtimedata->num_locks, sizeof(uint16_t));
if (qf->runtimedata->locks == NULL) {
perror("Couldn't allocate memory for runtime locks.");
exit(EXIT_FAILURE);
}
#ifdef LOG_WAIT_TIME
qf->runtimedata->wait_times = (wait_time_data*
)calloc(qf->runtimedata->num_locks+1,
sizeof(wait_time_data));
if (qf->runtimedata->wait_times == NULL) {
perror("Couldn't allocate memory for runtime wait_times.");
exit(EXIT_FAILURE);
}
#endif
return total_num_bytes;
}
__host__ uint64_t qf_use(QF* qf, void* buffer, uint64_t buffer_len)
{
qf->metadata = (qfmetadata *)(buffer);
if (qf->metadata->total_size_in_bytes + sizeof(qfmetadata) > buffer_len) {
return qf->metadata->total_size_in_bytes + sizeof(qfmetadata);
}
qf->blocks = (qfblock *)(qf->metadata + 1);
qf->runtimedata = (qfruntime *)calloc(sizeof(qfruntime), 1);
if (qf->runtimedata == NULL) {
perror("Couldn't allocate memory for runtime data.");
exit(EXIT_FAILURE);
}
/* initialize all the locks to 0 */
qf->runtimedata->metadata_lock = 0;
qf->runtimedata->locks = (uint16_t *)calloc(qf->runtimedata->num_locks,
sizeof(uint16_t));
if (qf->runtimedata->locks == NULL) {
perror("Couldn't allocate memory for runtime locks.");
exit(EXIT_FAILURE);
}
#ifdef LOG_WAIT_TIME
qf->runtimedata->wait_times = (wait_time_data*
)calloc(qf->runtimedata->num_locks+1,
sizeof(wait_time_data));
if (qf->runtimedata->wait_times == NULL) {
perror("Couldn't allocate memory for runtime wait_times.");
exit(EXIT_FAILURE);
}
#endif
return sizeof(qfmetadata) + qf->metadata->total_size_in_bytes;
}
__host__ void *qf_destroy(QF *qf)
{
assert(qf->runtimedata != NULL);
if (qf->runtimedata->locks != NULL)
free((void*)qf->runtimedata->locks);
if (qf->runtimedata->wait_times != NULL)
free(qf->runtimedata->wait_times);
if (qf->runtimedata->f_info.filepath != NULL)
free(qf->runtimedata->f_info.filepath);
free(qf->runtimedata);
return (void*)qf->metadata;
}
__host__ bool qf_malloc(QF *qf, uint64_t nslots, uint64_t key_bits, uint64_t
value_bits, enum qf_hashmode hash, bool on_device, uint32_t seed)
{
uint64_t total_num_bytes = qf_init(qf, nslots, key_bits, value_bits,
hash, seed, NULL, 0);
//buffer malloc bad?
void* buffer = malloc(total_num_bytes);
memset(buffer, 0, total_num_bytes);
printf("QF bytes: %llu\n", total_num_bytes);
if (buffer == NULL) {
perror("Couldn't allocate memory for the CQF.");
exit(EXIT_FAILURE);
}
qf->runtimedata = (qfruntime*)calloc(sizeof(qfruntime), 1);
if (qf->runtimedata == NULL) {
perror("Couldn't allocate memory for runtime data.");
exit(EXIT_FAILURE);
}
uint64_t init_size = qf_init(qf, nslots, key_bits, value_bits, hash, seed,
buffer, total_num_bytes);
if (init_size == total_num_bytes)
return total_num_bytes;
else
return -1;
}
__host__ bool qf_free(QF *qf)
{
assert(qf->metadata != NULL);
void *buffer = qf_destroy(qf);
if (buffer != NULL) {
free(buffer);
return true;
}
return false;
}
//consolidate all of the device construction into one convenient func!
__host__ void qf_malloc_device(QF** qf, int nbits, bool bulk_config){
//bring in compile #define
int rbits = 8;
int vbits = 0;
QF host_qf;
QF temp_device_qf;
QF* temp_dev_ptr;
uint64_t nslots = 1ULL << nbits;
int num_hash_bits = nbits+rbits;
qf_malloc(&host_qf, nslots, num_hash_bits, vbits, QF_HASH_INVERTIBLE, false, 0);
qf_set_auto_resize(&host_qf, false);
qfruntime* _runtime;
qfmetadata* _metadata;
qfblock* _blocks;
uint16_t * dev_locks;
uint64_t ** buffers;
uint64_t * buffer_sizes;
if (bulk_config){
uint64_t num_locks = host_qf.runtimedata->num_locks;
//allocate 1 lock so that hipFree doesn't break later
hipMalloc((void ** )&dev_locks, 1 * sizeof(uint16_t));
//are these 2x necessary?
hipMalloc((void **) & buffer_sizes, 2*num_locks*sizeof(uint64_t));
hipMalloc((void **)&buffers, 2*num_locks*sizeof(uint64_t*));
} else {
//point API, multiply locks
hipMalloc((void ** )&dev_locks, host_qf.runtimedata->num_locks*LOCK_DIST * sizeof(uint16_t));
hipMemset(dev_locks, 0, host_qf.runtimedata->num_locks*LOCK_DIST * sizeof(uint16_t));
hipMalloc((void **) & buffer_sizes, 1*sizeof(uint64_t));
hipMalloc((void **)&buffers, 1*sizeof(uint64_t*));
}
//wipe and replace
free(host_qf.runtimedata->locks);
host_qf.runtimedata->locks = dev_locks;
hipMalloc((void**)&_runtime, sizeof(qfruntime));
hipMalloc((void**)&_metadata, sizeof(qfmetadata));
hipMalloc((void**)&_blocks, qf_get_total_size_in_bytes(&host_qf));
//uint64_t num_locks = host_qf.runtimedata->num_locks;
//insert these into host_qf so dev qf has access.
//they don't need to be wiped as buffers are reset before every insert.
host_qf.runtimedata->buffers = buffers;
host_qf.runtimedata->buffer_sizes = buffer_sizes;
hipMemcpy(_runtime, host_qf.runtimedata, sizeof(qfruntime), hipMemcpyHostToDevice);
hipMemcpy(_metadata, host_qf.metadata, sizeof(qfmetadata), hipMemcpyHostToDevice);
hipMemcpy(_blocks, host_qf.blocks, qf_get_total_size_in_bytes(&host_qf), hipMemcpyHostToDevice);
temp_device_qf.runtimedata = _runtime;
temp_device_qf.metadata = _metadata;
temp_device_qf.blocks = _blocks;
//this might be buggy
//request to fill the dev ptr with a QF, then copy over, then copy that to qf
hipMalloc((void **)&temp_dev_ptr, sizeof(QF));
hipMemcpy(temp_dev_ptr, &temp_device_qf, sizeof(QF), hipMemcpyHostToDevice);
*qf = temp_dev_ptr;
}
//TODO: make me destroy buffers modifiable
__host__ void qf_destroy_device(QF * qf){
QF * host_qf;
hipHostMalloc((void ** )&host_qf, sizeof(QF));
hipMemcpy(host_qf, qf, sizeof(QF), hipMemcpyDeviceToHost);
qfruntime* _runtime;
hipHostMalloc((void **) &_runtime, sizeof(qfruntime));
hipMemcpy(_runtime, host_qf->runtimedata, sizeof(qfruntime), hipMemcpyDeviceToHost);
//may need to have _runtimedata shunted into another host object
//ill synchronize before this to double check
assert(_runtime != NULL);
if (_runtime->locks != NULL)
hipFree(_runtime->locks);
if (_runtime->buffers != NULL){
hipFree(_runtime->buffers);
hipFree(_runtime->buffer_sizes);
}
if (_runtime->wait_times != NULL)
hipFree(_runtime->wait_times);
//this one may break
if (_runtime->f_info.filepath != NULL)
hipFree(host_qf->runtimedata->f_info.filepath);
hipFree(host_qf->runtimedata);
hipFree(host_qf->metadata);
hipFree(host_qf->blocks);
hipHostFree(host_qf);
hipHostFree(_runtime);
}
__host__ void qf_copy(QF *dest, const QF *src)
{
DEBUG_CQF("%s\n","Source CQF");
DEBUG_DUMP(src);
memcpy(dest->runtimedata, src->runtimedata, sizeof(qfruntime));
memcpy(dest->metadata, src->metadata, sizeof(qfmetadata));
memcpy(dest->blocks, src->blocks, src->metadata->total_size_in_bytes);
DEBUG_CQF("%s\n","Destination CQF after copy.");
DEBUG_DUMP(dest);
}
__host__ void qf_reset(QF *qf)
{
qf->metadata->nelts = 0;
qf->metadata->ndistinct_elts = 0;
qf->metadata->noccupied_slots = 0;
#ifdef LOG_WAIT_TIME
memset(qf->wait_times, 0,
(qf->runtimedata->num_locks+1)*sizeof(wait_time_data));
#endif
#if QF_BITS_PER_SLOT == 8 || QF_BITS_PER_SLOT == 16 || QF_BITS_PER_SLOT == 32 || QF_BITS_PER_SLOT == 64
memset(qf->blocks, 0, qf->metadata->nblocks* sizeof(qfblock));
#else
memset(qf->blocks, 0, qf->metadata->nblocks*(sizeof(qfblock) + QF_SLOTS_PER_BLOCK *
qf->metadata->bits_per_slot / 8));
#endif
}
__host__ int64_t qf_resize_malloc(QF *qf, uint64_t nslots)
{
QF new_qf;
if (!qf_malloc(&new_qf, nslots, qf->metadata->key_bits,
qf->metadata->value_bits, qf->metadata->hash_mode,
false, qf->metadata->seed))
return -1;
if (qf->runtimedata->auto_resize) qf_set_auto_resize(&new_qf, true);
// copy keys from qf into new_qf
QFi qfi;
qf_iterator_from_position(qf, &qfi, 0);
int64_t ret_numkeys = 0;
do {
uint64_t key, value, count;
qfi_get_hash(&qfi, &key, &value, &count);
qfi_next(&qfi);
int ret = qf_insert(&new_qf, key, value, count, QF_NO_LOCK | QF_KEY_IS_HASH);
if (ret < 0) {
printf("Failed to insert key: %ld into the new CQF.\n", key);
return ret;
}
ret_numkeys++;
} while(!qfi_end(&qfi));
qf_free(qf);
memcpy(qf, &new_qf, sizeof(QF));
return ret_numkeys;
}
uint64_t qf_resize(QF* qf, uint64_t nslots, void* buffer, uint64_t buffer_len)
{
printf("QF attempting resize - This will fail\n");
QF new_qf;
new_qf.runtimedata = (qfruntime *)calloc(sizeof(qfruntime), 1);
if (new_qf.runtimedata == NULL) {
perror("Couldn't allocate memory for runtime data.\n");
exit(EXIT_FAILURE);
}
uint64_t init_size = qf_init(&new_qf, nslots, qf->metadata->key_bits,
qf->metadata->value_bits,
qf->metadata->hash_mode, qf->metadata->seed,
buffer, buffer_len);
if (init_size > buffer_len)
return init_size;
if (qf->runtimedata->auto_resize)
qf_set_auto_resize(&new_qf, true);
// copy keys from qf into new_qf
QFi qfi;
qf_iterator_from_position(qf, &qfi, 0);
do {
uint64_t key, value, count;
qfi_get_hash(&qfi, &key, &value, &count);
qfi_next(&qfi);
int ret = qf_insert(&new_qf, key, value, count, QF_NO_LOCK | QF_KEY_IS_HASH);
if (ret < 0) {
printf("Failed to insert key: %ld into the new CQF.\n", key);
abort(); // kill kernel with error
}
} while(!qfi_end(&qfi));
qf_free(qf);
memcpy(qf, &new_qf, sizeof(QF));
return init_size;
}
__host__ void qf_set_auto_resize(QF* qf, bool enabled)
{
if (enabled)
qf->runtimedata->auto_resize = 1;
else
qf->runtimedata->auto_resize = 0;
}
__host__ __device__ qf_returns qf_insert_not_exists(QF *qf, uint64_t key, uint64_t value, uint64_t count, uint8_t
flags, uint8_t * retvalue)
{
// We fill up the CQF up to 95% load factor.
// This is a very conservative check.
//TODO: GPU resizing
/*
if (qf_get_num_occupied_slots(qf) >= qf->metadata->nslots * 0.95) {
if (qf->runtimedata->auto_resize) {
fprintf(stdout, "Resizing the CQF.\n");
if (qf->runtimedata->container_resize(qf, qf->metadata->nslots * 2) < 0)
{
fprintf(stderr, "Resizing the failed.\n");
return QF_FULL;
}
} else
return QF_FULL;
}
*/
// if (count == 0)
// return 0;
if (GET_KEY_HASH(flags) != QF_KEY_IS_HASH) {
if (qf->metadata->hash_mode == QF_HASH_DEFAULT)
key = MurmurHash64A(((void *)&key), sizeof(key), qf->metadata->seed) % qf->metadata->range;
else if (qf->metadata->hash_mode == QF_HASH_INVERTIBLE)
key = hash_64(key, BITMASK(qf->metadata->key_bits));
}
uint64_t hash = (key << qf->metadata->value_bits) | (value & BITMASK(qf->metadata->value_bits));
//printf("Inside insert, new hash is recorded as %llu\n", hash);
qf_returns ret;
if (count == 1)
ret = insert1_if_not_exists(qf, hash, retvalue);
//for now count is always 1
//else
//ret = insert(qf, hash, count, flags);
// check for fullness based on the distance from the home slot to the slot
// in which the key is inserted
/*
if (ret == QF_FULL || ret > DISTANCE_FROM_HOME_SLOT_CUTOFF) {
float load_factor = qf_get_num_occupied_slots(qf) /
(float)qf->metadata->nslots;
fprintf(stdout, "Load factor: %lf\n", load_factor);
if (qf->runtimedata->auto_resize) {
fprintf(stdout, "Resizing the CQF.\n");
if (qf->runtimedata->container_resize(qf, qf->metadata->nslots * 2) > 0)
{
if (ret == QF_FULL) {
if (count == 1)
ret = insert1(qf, hash, flags);
else
ret = insert(qf, hash, count, flags);
}
fprintf(stderr, "Resize finished.\n");
} else {
fprintf(stderr, "Resize failed\n");
ret = QF_FULL;
}
} else {
fprintf(stderr, "The CQF is filling up.\n");
ret = QF_FULL;
}
}
*/
return ret;
}
__device__ qf_returns qf_insert_not_exists_cooperative(QF *qf, uint64_t key, uint64_t value, uint64_t count, uint8_t
flags, uint8_t * retvalue, int warpID)
{
// We fill up the CQF up to 95% load factor.
// This is a very conservative check.
//TODO: GPU resizing
/*
if (qf_get_num_occupied_slots(qf) >= qf->metadata->nslots * 0.95) {
if (qf->runtimedata->auto_resize) {
fprintf(stdout, "Resizing the CQF.\n");
if (qf->runtimedata->container_resize(qf, qf->metadata->nslots * 2) < 0)
{
fprintf(stderr, "Resizing the failed.\n");
return QF_FULL;
}
} else
return QF_FULL;
}
*/
// if (count == 0)
// return 0;
if (GET_KEY_HASH(flags) != QF_KEY_IS_HASH) {
if (qf->metadata->hash_mode == QF_HASH_DEFAULT)
key = MurmurHash64A(((void *)&key), sizeof(key), qf->metadata->seed) % qf->metadata->range;
else if (qf->metadata->hash_mode == QF_HASH_INVERTIBLE)
key = hash_64(key, BITMASK(qf->metadata->key_bits));
}
uint64_t hash = (key << qf->metadata->value_bits) | (value & BITMASK(qf->metadata->value_bits));
//printf("Inside insert, new hash is recorded as %llu\n", hash);
qf_returns ret;
if (count == 1)
ret = insert1_if_not_exists_cooperative(qf, hash, retvalue, warpID);
//for now count is always 1
//else
//ret = insert(qf, hash, count, flags);
// check for fullness based on the distance from the home slot to the slot
// in which the key is inserted
/*
if (ret == QF_FULL || ret > DISTANCE_FROM_HOME_SLOT_CUTOFF) {
float load_factor = qf_get_num_occupied_slots(qf) /
(float)qf->metadata->nslots;
fprintf(stdout, "Load factor: %lf\n", load_factor);
if (qf->runtimedata->auto_resize) {
fprintf(stdout, "Resizing the CQF.\n");
if (qf->runtimedata->container_resize(qf, qf->metadata->nslots * 2) > 0)
{
if (ret == QF_FULL) {
if (count == 1)
ret = insert1(qf, hash, flags);
else
ret = insert(qf, hash, count, flags);
}
fprintf(stderr, "Resize finished.\n");
} else {
fprintf(stderr, "Resize failed\n");
ret = QF_FULL;
}
} else {
fprintf(stderr, "The CQF is filling up.\n");
ret = QF_FULL;
}
}
*/
return ret;
}
__host__ __device__ qf_returns qf_insert(QF *qf, uint64_t key, uint64_t value, uint64_t count, uint8_t
flags)
{
// We fill up the CQF up to 95% load factor.
// This is a very conservative check.
//TODO: GPU resizing
/*
if (qf_get_num_occupied_slots(qf) >= qf->metadata->nslots * 0.95) {
if (qf->runtimedata->auto_resize) {
fprintf(stdout, "Resizing the CQF.\n");
if (qf->runtimedata->container_resize(qf, qf->metadata->nslots * 2) < 0)
{
fprintf(stderr, "Resizing the failed.\n");
return QF_FULL;
}
} else
return QF_FULL;
}
*/
if (count == 0)
return QF_ITEM_INSERTED;
if (GET_KEY_HASH(flags) != QF_KEY_IS_HASH) {
if (qf->metadata->hash_mode == QF_HASH_DEFAULT)
key = MurmurHash64A(((void *)&key), sizeof(key), qf->metadata->seed) % qf->metadata->range;
else if (qf->metadata->hash_mode == QF_HASH_INVERTIBLE)
key = hash_64(key, BITMASK(qf->metadata->key_bits));
}
uint64_t hash = (key << qf->metadata->value_bits) | (value & BITMASK(qf->metadata->value_bits));
//printf("Inside insert, new hash is recorded as %llu\n", hash);
qf_returns ret;
if (count == 1){
ret = insert1(qf, hash, flags);
}
else {
ret = insert(qf, hash, count, flags);
}
// check for fullness based on the distance from the home slot to the slot
// in which the key is inserted
/*
if (ret == QF_FULL || ret > DISTANCE_FROM_HOME_SLOT_CUTOFF) {
float load_factor = qf_get_num_occupied_slots(qf) /
(float)qf->metadata->nslots;
fprintf(stdout, "Load factor: %lf\n", load_factor);
if (qf->runtimedata->auto_resize) {
fprintf(stdout, "Resizing the CQF.\n");
if (qf->runtimedata->container_resize(qf, qf->metadata->nslots * 2) > 0)
{
if (ret == QF_FULL) {
if (count == 1)
ret = insert1(qf, hash, flags);
else
ret = insert(qf, hash, count, flags);
}
fprintf(stderr, "Resize finished.\n");
} else {
fprintf(stderr, "Resize failed\n");
ret = QF_FULL;
}
} else {
fprintf(stderr, "The CQF is filling up.\n");
ret = QF_FULL;
}
}
*/
return ret;
}
/*------------------------
GPU Modifications
--------------------------*/
//approx filter locking code
//locking implementation for the 16 bit locks
//undefined behavior if you try to unlock a not locked lock
__device__ void lock_16(uint16_t * lock, uint64_t index){
uint16_t zero = 0;
uint16_t one = 1;
while (atomicCAS((uint16_t *) &lock[index*LOCK_DIST], zero, one) != zero)
;
}
__device__ void lock_16_coop(uint16_t * lock, uint64_t index, int warpID){
uint16_t zero = 0;
uint16_t one = 1;
if (warpID ==0){
while (atomicCAS((uint16_t *) &lock[index*LOCK_DIST], zero, one) != zero)
;
}
__syncwarp();
}
__device__ void unlock_16(uint16_t * lock, uint64_t index){
uint16_t zero = 0;
uint16_t one = 1;
atomicCAS((uint16_t *) &lock[index*LOCK_DIST], one, zero);
}
//lock_16 but built to be included as a piece of a while loop
// this is more in line with traditional cuda processing, may increase throughput
__device__ bool try_lock_16(uint16_t * lock, uint64_t index){
uint16_t zero = 0;
uint16_t one = 1;
if (atomicCAS((uint16_t *) &lock[index*LOCK_DIST], zero, one) == zero){
return true;
}
return false;
}
__device__ bool try_lock_16_coop(uint16_t * lock, uint64_t index, int warpID){
uint16_t zero = 0;
uint16_t one = 1;
bool ballot = 0;
if (warpID == 0){
if (atomicCAS((uint16_t *) &lock[index*LOCK_DIST], zero, one) == zero){
ballot = 1;
}
}
ballot = __shfl_sync(0xffffffff, ballot, 0);
return ballot;
}
__device__ __forceinline__ void exchange(uint64_t * arr, uint64_t i, uint64_t j){
uint64_t temp = arr[i];
arr[i] = arr[j];
arr[j] = temp;
//maybe synchthreads?
}
__device__ __forceinline__ void compare(uint64_t * arr, uint64_t i, uint64_t j, bool dir){
if (dir == (arr[i] > arr[j])){
exchange(arr, i, j);
}
}
//return the biggest int of a uint64
__device__ __forceinline__ int biggest_bit(uint64_t n){
return 63 - __clzll((unsigned long long int) n);
}
__device__ __forceinline__ uint64_t biggest_pow_2(uint64_t n){
return 1UL<<biggest_bit(n)-1;
}
__global__ void hash_all(QF* qf, uint64_t* vals, uint64_t* hashes, uint64_t nvals, uint8_t flags) {
uint64_t idx = threadIdx.x + blockDim.x * blockIdx.x;
if (idx >= nvals){
return;
}
uint64_t key = vals[idx];
if (GET_KEY_HASH(flags) != QF_KEY_IS_HASH) {
if (qf->metadata->hash_mode == QF_HASH_DEFAULT)
key = MurmurHash64A(((void *)&key), sizeof(key),
qf->metadata->seed) & (qf->metadata->range - 1);
else if (qf->metadata->hash_mode == QF_HASH_INVERTIBLE)
key = hash_64(key, BITMASK(qf->metadata->key_bits));
}
//uint64_t hash = (key << qf->metadata->value_bits) | (value & BITMASK(qf->metadata->value_bits));
hashes[idx] = key;
return;
}
//revised work pipeline
// 1) Set all offsets to keys here based on relative offset + keys - skips the launch call later - TODO: double check that (keys + offset) - keys == offset. -- cpp says this works
// 2) subtract sets of keys from each other to get the relative offsets - these will give offsets, last key needs to subtract from origin pointer
// this means that the keys here are set to point to the START of their bucket
__global__ void set_buffers_binary(QF * qf, uint64_t num_keys, uint64_t * keys, uint8_t flags){
uint64_t idx = threadIdx.x + blockDim.x * blockIdx.x;
if (idx >= qf->runtimedata->num_locks) return;
uint64_t slots_per_lock = NUM_SLOTS_TO_LOCK;
//since we are finding all boundaries, we only need
//printf("idx %llu\n", idx);
//this sounds right? - they divide to go back so I think this is fine
uint64_t boundary = (slots_per_lock*idx); //<< qf->metadata->bits_per_slot;
//This is the code I'm stealing that assumption from
//uint64_t hash_bucket_index = hash >> qf->metadata->bits_per_slot;
//uint64_t hash_remainder = hash & BITMASK(qf->metadata->bits_per_slot);
//uint64_t lock_index = hash_bucket_index / slots_per_lock;
uint64_t lower = 0;
uint64_t upper = num_keys;
uint64_t index = upper-lower;
//upper is non inclusive bound
//if we exceed bounds that's our index
while (upper != lower){
index = lower + (upper - lower)/2;
if ((keys[index] >> qf->metadata->bits_per_slot) < boundary){
//false - the list before this point can be removed
lower = index+1;
//jump to a new midpoint
} else if (index==0){
//will this fix? otherwise need to patch via round up
upper = index;
} else if ((keys[index-1] >> qf->metadata->bits_per_slot) < boundary) {
//set index! this is the first instance where I am valid and the next isnt
//buffers[idx] = keys+index;
break;
} else {
//we are too far right, all keys to the right do not matter
upper = index;
}
}
//we either exited or have an edge condition:
//upper == lower iff 0 or max key
index = lower + (upper - lower)/2;
qf->runtimedata->buffers[idx] = keys + index;
}
__global__ void find_clusters(QF* qf, uint64_t * cluster_lengths, uint64_t * max_clusters){
uint64_t tid = threadIdx.x+blockIdx.x*blockDim.x;
if (tid != 0) return;
uint64_t start_slot = 0;
uint64_t i =0;
while (start_slot < qf->metadata->nslots){
uint64_t old_start = start_slot;
start_slot = find_first_empty_slot(qf, start_slot);
if (start_slot == old_start){
start_slot++;
} else {
cluster_lengths[i] = start_slot-old_start;
i++;
}
}
max_clusters[0] = i;
}
//this can maybe be rolled into set_buffers_binary
//it performs an identical set of operations that are O(1) here
// O(log n) there, but maybe amortized
__global__ void set_buffer_lens(QF * qf, uint64_t num_keys, uint64_t * keys){
uint64_t num_buffers = qf->runtimedata->num_locks;
uint64_t idx = threadIdx.x + blockDim.x*blockIdx.x;
if (idx >= num_buffers) return;
uint64_t** buffers = qf->runtimedata->buffers;
uint64_t * buffer_sizes = qf->runtimedata->buffer_sizes;
//only 1 thread will diverge - should be fine - any cost already exists because of tail
if (idx != num_buffers-1){
//this should work? not 100% convinced but it seems ok
buffer_sizes[idx] = buffers[idx+1] - buffers[idx];
} else {
buffer_sizes[idx] = num_keys - (buffers[idx] - keys);
}
return;
}
//insert from buffers using prehashed_data
__global__ void insert_from_buffers_hashed(QF* qf, uint64_t evenness){
//uint64_t num_buffers, uint64_t** buffers, volatile uint64_t * buffer_counts;
uint64_t idx = 2*(threadIdx.x + blockDim.x * blockIdx.x)+evenness;
if (idx >= qf->runtimedata->num_locks) return;
uint64_t * buffer_counts = qf->runtimedata->buffer_sizes;
uint64_t ** buffers = qf->runtimedata->buffers;
//at the start, we sort
//we are exceeding bounds by 1
//quick_sort(buffers[idx], 0, buffer_counts[idx]-1,0);
//no need to sort if empty - this will cause overflow as 0-1 == max_uint
// if (buffer_counts[idx] > 0) {
// quick_sort(buffers[idx], 0, buffer_counts[idx]-1, 0);
// //assert(assert_sorted(buffers[idx], buffer_counts[idx]));
// }
uint64_t my_count = buffer_counts[idx];
for (uint64_t i =0; i < my_count; i++){
int ret = qf_insert(qf, buffers[idx][i], 0, 1, QF_NO_LOCK | QF_KEY_IS_HASH);
//internal threadfence. Bad? actually seems to be fine
//__threadfence();
}
__threadfence();
}
//insert from buffers using prehashed_data
//use warp cooperative operations
__global__ void insert_from_buffers_cooperative(QF* qf, uint64_t evenness){
//uint64_t idx = 2*(threadIdx.x + blockDim.x * blockIdx.x)+evenness;
uint64_t tid = threadIdx.x+blockDim.x*blockIdx.x;
uint64_t itemID = tid / 32;
int warpID = tid % 32;
uint64_t idx = 2*itemID+evenness;
if (idx >= qf->runtimedata->num_locks) return;
uint64_t * buffer_counts = qf->runtimedata->buffer_sizes;
uint64_t ** buffers = qf->runtimedata->buffers;
//at the start, we sort
//we are exceeding bounds by 1
//quick_sort(buffers[idx], 0, buffer_counts[idx]-1,0);
//no need to sort if empty - this will cause overflow as 0-1 == max_uint
// if (buffer_counts[idx] > 0) {
// quick_sort(buffers[idx], 0, buffer_counts[idx]-1, 0);
// //assert(assert_sorted(buffers[idx], buffer_counts[idx]));
// }
//uint64_t - uint64_t should yield offset into vals
//uint64_t absolute_offset = buffers[idx]- buffers;
uint64_t my_count = buffer_counts[idx];
for (uint64_t i =0; i < my_count; i++){
//assert(keys[absolute_offset+i] == buffers[idx][i]);
uint8_t query;
qf_returns ret_val = qf_insert_not_exists_cooperative(qf, buffers[idx][i], 0, 1, QF_NO_LOCK | QF_KEY_IS_HASH, &query, warpID);
#if DEBUG_ASSERTS
assert(ret_val != QF_FULL);
#endif
//internal threadfence. Bad? actually seems to be fine
//__threadfence();
}
__threadfence();
}
__global__ void insert_from_buffers_thrust(QF* qf, uint64_t evenness, uint64_t * keys, uint64_t * vals){
uint64_t idx = 2*(threadIdx.x + blockDim.x * blockIdx.x)+evenness;
if (idx >= qf->runtimedata->num_locks) return;
uint64_t * buffer_counts = qf->runtimedata->buffer_sizes;
uint64_t ** buffers = qf->runtimedata->buffers;
//at the start, we sort
//we are exceeding bounds by 1
//quick_sort(buffers[idx], 0, buffer_counts[idx]-1,0);
//no need to sort if empty - this will cause overflow as 0-1 == max_uint
// if (buffer_counts[idx] > 0) {
// quick_sort(buffers[idx], 0, buffer_counts[idx]-1, 0);
// //assert(assert_sorted(buffers[idx], buffer_counts[idx]));
// }
//uint64_t - uint64_t should yield offset into vals
uint64_t absolute_offset = buffers[idx]- keys;
uint64_t my_count = buffer_counts[idx];
for (uint64_t i =0; i < my_count; i++){
//assert(keys[absolute_offset+i] == buffers[idx][i]);
int ret = qf_insert(qf, buffers[idx][i], 0, vals[absolute_offset+i], QF_NO_LOCK | QF_KEY_IS_HASH);
//internal threadfence. Bad? actually seems to be fine
//__threadfence();
}
__threadfence();
}
//insert from buffers using prehashed_data
__global__ void delete_from_buffers_hashed(QF* qf, uint64_t evenness){
uint64_t idx = 2*(threadIdx.x + blockDim.x * blockIdx.x)+evenness;
if (idx >= qf->runtimedata->num_locks) return;
uint64_t ** buffers = qf->runtimedata->buffers;
uint64_t * buffer_counts = qf->runtimedata->buffer_sizes;
//at the start, we sort
//we are exceeding bounds by 1
//quick_sort(buffers[idx], 0, buffer_counts[idx]-1,0);
//no need to sort if empty - this will cause overflow as 0-1 == max_uint
// if (buffer_counts[idx] > 0) {
// quick_sort(buffers[idx], 0, buffer_counts[idx]-1, 0);
// //assert(assert_sorted(buffers[idx], buffer_counts[idx]));
// }
uint64_t my_count = buffer_counts[idx];
//0 - my count for loop, working backwords should be faster?
for (uint64_t i = my_count; i >=1; i--){
int ret = qf_remove(qf, buffers[idx][i-1], 0, 1, QF_NO_LOCK | QF_KEY_IS_HASH);
//internal threadfence. Bad? actually seems to be fine
//__threadfence();
}
__threadfence();
}
__device__ qf_returns point_insert_not_exists(QF* qf, uint64_t key, uint8_t value, uint8_t& returnedVal, uint8_t flags){
uint8_t query;
if (GET_KEY_HASH(flags) != QF_KEY_IS_HASH) {
if (qf->metadata->hash_mode == QF_HASH_DEFAULT)
key = MurmurHash64A(((void *)&key), sizeof(key), qf->metadata->seed) % qf->metadata->range;
else if (qf->metadata->hash_mode == QF_HASH_INVERTIBLE)
key = hash_64(key, BITMASK(qf->metadata->key_bits));
}
uint64_t hash = key % qf->metadata->range;
uint64_t hash_bucket_index = hash >> qf->metadata->key_remainder_bits;
//uint64_t hash_bucket_index = hash >> qf->metadata->bits_per_slot;
uint64_t lock_index = hash_bucket_index / NUM_SLOTS_TO_LOCK;
//encode extensions outside of the lock
while (true){
if (try_lock_16(qf->runtimedata->locks, lock_index)){
//this can also be a regular lock?
//if (try_lock_16(qf->runtimedata->locks, lock_index+1)){
lock_16(qf->runtimedata->locks, lock_index+1);
qf_returns ret = qf_insert_not_exists(qf, hash, value, 1, QF_NO_LOCK | QF_KEY_IS_HASH, &query);
if (ret == QF_ITEM_FOUND){
returnedVal = query;
}
__threadfence();
unlock_16(qf->runtimedata->locks, lock_index+1);
unlock_16(qf->runtimedata->locks, lock_index);
return ret;
//}
unlock_16(qf->runtimedata->locks, lock_index);
}
}
}
__device__ qf_returns point_insert_not_exists_cooperative(QF* qf, uint64_t key, uint8_t value, uint8_t& returnedVal, uint8_t flags, int warpID){
uint8_t query;
if (GET_KEY_HASH(flags) != QF_KEY_IS_HASH) {
if (qf->metadata->hash_mode == QF_HASH_DEFAULT)
key = MurmurHash64A(((void *)&key), sizeof(key), qf->metadata->seed) % qf->metadata->range;
else if (qf->metadata->hash_mode == QF_HASH_INVERTIBLE)
key = hash_64(key, BITMASK(qf->metadata->key_bits));
}
uint64_t hash = key % qf->metadata->range;
uint64_t hash_bucket_index = hash >> qf->metadata->key_remainder_bits;
//uint64_t hash_bucket_index = hash >> qf->metadata->bits_per_slot;
uint64_t lock_index = hash_bucket_index / NUM_SLOTS_TO_LOCK;
//encode extensions outside of the lock
while (true){
if (try_lock_16_coop(qf->runtimedata->locks, lock_index, warpID)){
//this can also be a regular lock?
//if (try_lock_16(qf->runtimedata->locks, lock_index+1)){
lock_16_coop(qf->runtimedata->locks, lock_index+1, warpID);
qf_returns ret = qf_insert_not_exists_cooperative(qf, hash, value, 1, QF_NO_LOCK | QF_KEY_IS_HASH, &query, warpID);
if (ret == QF_ITEM_FOUND){
returnedVal = query;
}
__threadfence();
if (warpID ==0){
unlock_16(qf->runtimedata->locks, lock_index+1);
unlock_16(qf->runtimedata->locks, lock_index);
}
return ret;
//}
if (warpID ==0) unlock_16(qf->runtimedata->locks, lock_index);
}
}
}
__device__ qf_returns point_insert(QF* qf, uint64_t key, uint8_t value, uint8_t flags){
if (GET_KEY_HASH(flags) != QF_KEY_IS_HASH) {
if (qf->metadata->hash_mode == QF_HASH_DEFAULT)
key = MurmurHash64A(((void *)&key), sizeof(key), qf->metadata->seed) % qf->metadata->range;
else if (qf->metadata->hash_mode == QF_HASH_INVERTIBLE)
key = hash_64(key, BITMASK(qf->metadata->key_bits));
}
uint64_t hash = key % qf->metadata->range;
uint64_t hash_bucket_index = hash >> qf->metadata->key_remainder_bits;
//uint64_t hash_bucket_index = hash >> qf->metadata->bits_per_slot;
uint64_t lock_index = hash_bucket_index / NUM_SLOTS_TO_LOCK;
//encode extensions outside of the lock
while (true){
if (try_lock_16(qf->runtimedata->locks, lock_index)){
//this can also be a regular lock?
//if (try_lock_16(qf->runtimedata->locks, lock_index+1)){
lock_16(qf->runtimedata->locks, lock_index+1);
qf_returns ret = qf_insert(qf, hash, value, 1, QF_NO_LOCK | QF_KEY_IS_HASH);
__threadfence();
unlock_16(qf->runtimedata->locks, lock_index+1);
unlock_16(qf->runtimedata->locks, lock_index);
return ret;
//}
unlock_16(qf->runtimedata->locks, lock_index);
}
}
}
__device__ uint64_t point_query(QF* qf, uint64_t key, uint8_t value, uint8_t& returnedVal, uint8_t flags){
if (GET_KEY_HASH(flags) != QF_KEY_IS_HASH) {
if (qf->metadata->hash_mode == QF_HASH_DEFAULT)
key = MurmurHash64A(((void *)&key), sizeof(key), qf->metadata->seed) % qf->metadata->range;
else if (qf->metadata->hash_mode == QF_HASH_INVERTIBLE)
key = hash_64(key, BITMASK(qf->metadata->key_bits));
}
uint64_t hash = key % qf->metadata->range;
uint64_t query;
uint64_t ret = qf_query(qf, hash, &query, QF_NO_LOCK | QF_KEY_IS_HASH);
returnedVal = query;
return ret;
}
__device__ uint64_t point_query_concurrent(QF* qf, uint64_t key, uint8_t value, uint8_t& returnedVal, uint8_t flags){
if (GET_KEY_HASH(flags) != QF_KEY_IS_HASH) {
if (qf->metadata->hash_mode == QF_HASH_DEFAULT)
key = MurmurHash64A(((void *)&key), sizeof(key), qf->metadata->seed) % qf->metadata->range;
else if (qf->metadata->hash_mode == QF_HASH_INVERTIBLE)
key = hash_64(key, BITMASK(qf->metadata->key_bits));
}
uint64_t hash = key % qf->metadata->range;
uint64_t hash_bucket_index = hash >> qf->metadata->key_remainder_bits;
//uint64_t hash_bucket_index = hash >> qf->metadata->bits_per_slot;
uint64_t lock_index = hash_bucket_index / NUM_SLOTS_TO_LOCK;
//encode extensions outside of the lock
while (true){
if (try_lock_16(qf->runtimedata->locks, lock_index)){
//this can also be a regular lock?
//if (try_lock_16(qf->runtimedata->locks, lock_index+1)){
lock_16(qf->runtimedata->locks, lock_index+1);
uint64_t query;
uint64_t ret = qf_query(qf, hash, &query, QF_NO_LOCK | QF_KEY_IS_HASH);
__threadfence();
unlock_16(qf->runtimedata->locks, lock_index+1);
unlock_16(qf->runtimedata->locks, lock_index);
returnedVal = query;
return ret;
//}
unlock_16(qf->runtimedata->locks, lock_index);
}
}
}
__global__ void point_bulk_get(QF * qf, uint64_t * hashes, uint64_t nitems, uint64_t * counter){
uint64_t tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid >=nitems) return;
uint8_t query;
//point_query(QF* qf, uint64_t key, uint8_t value, uint8_t& returnedVal, uint8_t flags)
if (point_query(qf, hashes[tid] % qf->metadata->range, 0, query, QF_NO_LOCK) ==0){
//on item not found increment
atomicAdd((unsigned long long int *) counter, (unsigned long long int) 1);
}
}
__global__ void point_bulk_get_nocount(QF * qf, uint64_t * hashes, uint64_t nitems){
uint64_t tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid >=nitems) return;
uint8_t query;
//point_query(QF* qf, uint64_t key, uint8_t value, uint8_t& returnedVal, uint8_t flags)
point_query(qf, hashes[tid] % qf->metadata->range, 0, query, QF_NO_LOCK);
}
__global__ void bulk_get_cooperative(QF * qf, uint64_t * hashes, uint64_t nitems, uint64_t * counter){
uint64_t tid = threadIdx.x+blockIdx.x*blockDim.x;
uint64_t itemID = tid /32;
int warpID = tid % 32;
if (itemID >= qf->runtimedata->num_locks) return;
uint64_t * buffer_counts = qf->runtimedata->buffer_sizes;
uint64_t ** buffers = qf->runtimedata->buffers;
//at the start, we sort
//we are exceeding bounds by 1
//quick_sort(buffers[idx], 0, buffer_counts[idx]-1,0);
//no need to sort if empty - this will cause overflow as 0-1 == max_uint
// if (buffer_counts[idx] > 0) {
// quick_sort(buffers[idx], 0, buffer_counts[idx]-1, 0);
// //assert(assert_sorted(buffers[idx], buffer_counts[idx]));
// }
uint64_t my_count = buffer_counts[itemID];
for (uint64_t i =warpID; i < my_count; i+=32){
//int ret = qf_insert(qf, buffers[itemID][i], 0, 1, QF_NO_LOCK | QF_KEY_IS_HASH);
uint8_t query;
if (point_query(qf, buffers[itemID][i] % qf->metadata->range, 0, query, QF_NO_LOCK | QF_KEY_IS_HASH) ==0){
//atomicAdd((unsigned long long int *) counter, (unsigned long long int) 1);
}
//internal threadfence. Bad? actually seems to be fine
//__threadfence();
}
}
__host__ uint64_t cooperative_bulk_get_wrapper(QF * qf, uint64_t * hashes, uint64_t nitems){
auto start = std::chrono::high_resolution_clock::now();
uint64_t key_block_size = 32;
//start with num_locks, get counts
//This is slow, but there isn't a better way to do it
//we'll have to see how this affects performance
uint64_t * dev_num_locks;
hipMallocManaged((void **)&dev_num_locks, sizeof(uint64_t));
hipLaunchKernelGGL(( get_dev_nvals), dim3(1),dim3(1), 0, 0, qf, dev_num_locks);
hipDeviceSynchronize();
uint64_t num_locks = dev_num_locks[0];
hipFree(dev_num_locks);
uint64_t key_block = (nitems-1)/key_block_size + 1;
//keys are hashed, now need to treat them as hashed in all further functions
hipLaunchKernelGGL(( hash_all), dim3(key_block), dim3(key_block_size), 0, 0, qf, hashes, hashes, nitems, 0);
thrust::sort(thrust::device, hashes, hashes+nitems);
hipLaunchKernelGGL(( set_buffers_binary), dim3((num_locks-1)/key_block_size+1), dim3(key_block_size), 0, 0, qf, nitems, hashes, 0);
hipLaunchKernelGGL(( set_buffer_lens), dim3((num_locks-1)/key_block_size+1), dim3(key_block_size), 0, 0, qf, nitems, hashes);
uint64_t * misses;
//this is fine, should never be triggered
hipMallocManaged((void **)&misses, sizeof(uint64_t));
hipMemset(misses, 0, sizeof(uint64_t));
hipDeviceSynchronize();
auto midpoint = std::chrono::high_resolution_clock::now();
const int bulk_block_size = 1024;
hipLaunchKernelGGL(( bulk_get_cooperative), dim3((nitems*32-1)/bulk_block_size+1), dim3(bulk_block_size), 0, 0, qf, hashes, nitems, misses);
hipDeviceSynchronize();
auto end = std::chrono::high_resolution_clock::now();
std::chrono::duration<double> sort_diff = midpoint-start;
std::chrono::duration<double> diff = end-midpoint;
std::cout << "sorted " << nitems << " in " << sort_diff.count() << " seconds\n";
std::cout << "Queried " << nitems << " in " << diff.count() << " seconds\n";
uint64_t output = misses[0];
hipFree(misses);
return output;
}
__host__ uint64_t point_get_wrapper(QF * qf, uint64_t * hashes, uint64_t nitems){
// uint64_t * misses;
// //this is fine, should never be triggered
// hipMallocManaged((void **)&misses, sizeof(uint64_t));
// hipMemset(misses, 0, sizeof(uint64_t));
hipLaunchKernelGGL(( point_bulk_get_nocount), dim3((nitems-1)/512+1), dim3(512), 0, 0, qf, hashes, nitems);
hipDeviceSynchronize();
// uint64_t toReturn = *misses;
// hipFree(misses);
// return toReturn;
return 0;
}
__host__ uint64_t point_get_wrapper_fp(QF * qf, uint64_t * hashes, uint64_t nitems){
uint64_t * misses;
//this is fine, should never be triggered
hipMallocManaged((void **)&misses, sizeof(uint64_t));
hipMemset(misses, 0, sizeof(uint64_t));
hipLaunchKernelGGL(( point_bulk_get), dim3((nitems-1)/512+1), dim3(512), 0, 0, qf, hashes, nitems, misses);
hipDeviceSynchronize();
uint64_t toReturn = *misses;
hipFree(misses);
return toReturn;
//return 0;
}
__global__ void point_bulk_insert(QF * qf, uint64_t * hashes, uint64_t nitems){
uint64_t tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid >=nitems) return;
//#if DROP_ON_RUNEND
point_insert(qf, hashes[tid], 0, 0);
// #else
// assert(point_insert(qf, hashes[tid], 0, 0) != QF_FULL);
// #endif
}
__global__ void point_bulk_insert_cooperative(QF * qf, uint64_t * hashes, uint64_t nitems){
uint64_t itemID = threadIdx.x + blockIdx.x * blockDim.x;
uint64_t tid = itemID / 32;
int warpID = itemID % 32;
if (tid >=nitems) return;
uint8_t retvalue;
assert(point_insert_not_exists_cooperative(qf, hashes[tid], 0, retvalue, 0, warpID) != QF_FULL);
}
//set a uint64_t reference to point at device memory;
__global__ void get_dev_nvals(QF* qf, uint64_t * external_nvals){
uint64_t tid = threadIdx.x + blockIdx.x*blockDim.x;
if (tid >= 1) return;
external_nvals[0] = qf->runtimedata->num_locks;
}
//modified version of buffers_provided - performs an initial bulk hash, should save work over other versions
//note: this DOES modify the given buffer - fine for all versions now
//This variant performs an ititial sort that allows us to save time overall
//as we avoid the atomic count-off and any sort of cross-thread communication
__host__ void bulk_insert(QF* qf, uint64_t nvals, uint64_t* keys, uint8_t flags) {
uint64_t key_block_size = 32;
uint64_t key_block = (nvals -1)/key_block_size + 1;
//start with num_locks, get counts
//This is slow, but there isn't a better way to do it
//we'll have to see how this affects performance
uint64_t * dev_num_locks;
hipMallocManaged((void **)&dev_num_locks, sizeof(uint64_t));
hipLaunchKernelGGL(( get_dev_nvals), dim3(1),dim3(1), 0, 0, qf, dev_num_locks);
hipDeviceSynchronize();
uint64_t num_locks = dev_num_locks[0];
hipFree(dev_num_locks);
//keys are hashed, now need to treat them as hashed in all further functions
hipLaunchKernelGGL(( hash_all), dim3(key_block), dim3(key_block_size), 0, 0, qf, keys, keys, nvals, flags);
thrust::sort(thrust::device, keys, keys+nvals);
hipLaunchKernelGGL(( set_buffers_binary), dim3((num_locks-1)/key_block_size+1), dim3(key_block_size), 0, 0, qf, nvals, keys, flags);
hipLaunchKernelGGL(( set_buffer_lens), dim3((num_locks-1)/key_block_size+1), dim3(key_block_size), 0, 0, qf, nvals, keys);
//insert_from_buffers_hashed_onepass<<<(num_locks-1)/key_block_size+1, key_block_size>>>(qf, num_locks, buffers, buffer_sizes);
//return;
const int bulk_block_size = 32;
uint64_t evenness = 0;
hipLaunchKernelGGL(( insert_from_buffers_hashed), dim3((num_locks-1)/bulk_block_size+1), dim3(bulk_block_size), 0, 0, qf, evenness);
evenness = 1;
hipLaunchKernelGGL(( insert_from_buffers_hashed), dim3((num_locks-1)/bulk_block_size+1), dim3(bulk_block_size), 0, 0, qf, evenness);
}
__host__ void bulk_insert_cooperative(QF* qf, uint64_t nvals, uint64_t* keys, uint8_t flags) {
uint64_t key_block_size = 32;
uint64_t key_block = (nvals -1)/key_block_size + 1;
//start with num_locks, get counts
//This is slow, but there isn't a better way to do it
//we'll have to see how this affects performance
uint64_t * dev_num_locks;
hipMallocManaged((void **)&dev_num_locks, sizeof(uint64_t));
hipLaunchKernelGGL(( get_dev_nvals), dim3(1),dim3(1), 0, 0, qf, dev_num_locks);
hipDeviceSynchronize();
uint64_t num_locks = dev_num_locks[0];
hipFree(dev_num_locks);
//keys are hashed, now need to treat them as hashed in all further functions
hipLaunchKernelGGL(( hash_all), dim3(key_block), dim3(key_block_size), 0, 0, qf, keys, keys, nvals, flags);
thrust::sort(thrust::device, keys, keys+nvals);
hipLaunchKernelGGL(( set_buffers_binary), dim3((num_locks-1)/key_block_size+1), dim3(key_block_size), 0, 0, qf, nvals, keys, flags);
hipLaunchKernelGGL(( set_buffer_lens), dim3((num_locks-1)/key_block_size+1), dim3(key_block_size), 0, 0, qf, nvals, keys);
//insert_from_buffers_hashed_onepass<<<(num_locks-1)/key_block_size+1, key_block_size>>>(qf, num_locks, buffers, buffer_sizes);
//return;
uint64_t evenness = 0;
hipLaunchKernelGGL(( insert_from_buffers_cooperative), dim3((32*num_locks-1)/key_block_size+1), dim3(key_block_size), 0, 0, qf, evenness);
evenness = 1;
hipLaunchKernelGGL(( insert_from_buffers_cooperative), dim3((32*num_locks-1)/key_block_size+1), dim3(key_block_size), 0, 0, qf, evenness);
}
//modified version of buffers_provided - performs an initial bulk hash, should save work over other versions
//note: this DOES modify the given buffer - fine for all versions now
//This variant performs an ititial sort that allows us to save time overall
//as we avoid the atomic count-off and any sort of cross-thread communication
__host__ void bulk_insert_reduce(QF* qf, uint64_t nvals, uint64_t* keys, uint8_t flags) {
uint64_t key_block_size = 32;
uint64_t key_block = (nvals -1)/key_block_size + 1;
//start with num_locks, get counts
//This is slow, but there isn't a better way to
uint64_t * dev_num_locks;
hipMallocManaged((void **)&dev_num_locks, sizeof(uint64_t));
hipLaunchKernelGGL(( get_dev_nvals), dim3(1),dim3(1), 0, 0, qf, dev_num_locks);
hipDeviceSynchronize();
uint64_t num_locks = dev_num_locks[0];
hipFree(dev_num_locks);
//keys are hashed, now need to treat them as hashed in all further functions
hipLaunchKernelGGL(( hash_all), dim3(key_block), dim3(key_block_size), 0, 0, qf, keys, keys, nvals, flags);
thrust::sort(thrust::device, keys, keys+nvals);
thrust::device_ptr<uint64_t> keys_ptr(keys);
thrust::device_ptr<uint64_t> dupe_counts= thrust::device_malloc<uint64_t>(nvals);
thrust::fill(dupe_counts, dupe_counts+nvals, 1);
thrust::device_ptr<uint64_t> thrust_keys = thrust::device_malloc<uint64_t>(nvals);
thrust::device_ptr <uint64_t> thrust_vals = thrust::device_malloc<uint64_t>(nvals);
thrust::pair<thrust::device_ptr<uint64_t>,thrust::device_ptr<uint64_t>> new_end;
new_end = thrust::reduce_by_key(thrust::device, keys_ptr, keys_ptr+nvals, dupe_counts, thrust_keys, thrust_vals);
uint64_t new_nvals = new_end.first - thrust_keys;
printf("New nvals %llu\n", new_nvals);
uint64_t * new_keys = thrust::raw_pointer_cast(thrust_keys);
uint64_t * new_key_counts = thrust::raw_pointer_cast(thrust_vals);
//set_buffers_binary<<<(num_locks-1)/key_block_size+1, key_block_size>>>(qf, new_nvals, slots_per_lock, new_keys, num_locks, buffers, flags);
hipLaunchKernelGGL(( set_buffers_binary), dim3((num_locks-1)/key_block_size+1), dim3(key_block_size), 0, 0, qf, new_nvals, new_keys, flags);
//set_buffer_lens<<<(num_locks-1)/key_block_size+1, key_block_size>>>(qf, new_nvals, new_keys, num_locks, (uint64_t *) buffer_sizes, buffers);
hipLaunchKernelGGL(( set_buffer_lens), dim3((num_locks-1)/key_block_size+1), dim3(key_block_size), 0, 0, qf, new_nvals, new_keys);
//insert_from_buffers_hashed_onepass<<<(num_locks-1)/key_block_size+1, key_block_size>>>(qf, num_locks, buffers, buffer_sizes);
//return;
uint64_t evenness = 0;
hipLaunchKernelGGL(( insert_from_buffers_thrust), dim3((num_locks-1)/key_block_size+1), dim3(key_block_size), 0, 0, qf, evenness, new_keys,new_key_counts);
evenness = 1;
hipLaunchKernelGGL(( insert_from_buffers_thrust), dim3((num_locks-1)/key_block_size+1), dim3(key_block_size), 0, 0, qf, evenness, new_keys, new_key_counts);
//free resources
thrust::device_free(thrust_keys);
thrust::device_free(thrust_vals);
thrust::device_free(dupe_counts);
}
__host__ void bulk_delete(QF* qf, uint64_t nvals, uint64_t* keys, uint8_t flags) {
uint64_t key_block_size = 32;
uint64_t key_block = (nvals -1)/key_block_size + 1;
//start with num_locks, get counts
//This is slow, but there isn't a better way to
uint64_t * dev_num_locks;
hipMallocManaged((void **)&dev_num_locks, sizeof(uint64_t));
hipLaunchKernelGGL(( get_dev_nvals), dim3(1),dim3(1), 0, 0, qf, dev_num_locks);
hipDeviceSynchronize();
uint64_t num_locks = dev_num_locks[0];
hipFree(dev_num_locks);
//keys are hashed, now need to treat them as hashed in all further functions
hipLaunchKernelGGL(( hash_all), dim3(key_block), dim3(key_block_size), 0, 0, qf, keys, keys, nvals, flags);
thrust::sort(thrust::device, keys, keys+nvals);
//set_buffers_binary<<<(num_locks-1)/key_block_size+1, key_block_size>>>(qf, nvals, slots_per_lock, keys, num_locks, buffers, flags);
hipLaunchKernelGGL(( set_buffers_binary), dim3((num_locks-1)/key_block_size+1), dim3(key_block_size), 0, 0, qf, nvals, keys, flags);
//set_buffer_lens<<<(num_locks-1)/key_block_size+1, key_block_size>>>(qf, nvals, keys, num_locks, (uint64_t *) buffer_sizes, buffers);
hipLaunchKernelGGL(( set_buffer_lens), dim3((num_locks-1)/key_block_size+1), dim3(key_block_size), 0, 0, qf, nvals, keys);
//insert_from_buffers_hashed_onepass<<<(num_locks-1)/key_block_size+1, key_block_size>>>(qf, num_locks, buffers, buffer_sizes);
//return;
uint64_t evenness = 0;
hipLaunchKernelGGL(( delete_from_buffers_hashed), dim3((num_locks-1)/key_block_size+1), dim3(key_block_size), 0, 0, qf, evenness);
evenness = 1;
hipLaunchKernelGGL(( delete_from_buffers_hashed), dim3((num_locks-1)/key_block_size+1), dim3(key_block_size), 0, 0, qf, evenness);
}
__global__ void bulk_get_nocount(QF * qf, uint64_t * vals, uint64_t nvals, uint64_t key_count, uint8_t flags){
uint64_t tid = threadIdx.x+blockDim.x*blockIdx.x;
if (tid >= nvals) return;
uint64_t count = qf_count_key_value(qf, vals[tid], 0, 0);
return;
}
__global__ void bulk_get_misses(QF * qf, uint64_t * vals, uint64_t nvals, uint64_t key_count, uint64_t * counter, uint8_t flags){
uint64_t tid = threadIdx.x + blockDim.x * blockIdx.x;
//should never happen, but just in case
if (tid >= nvals) return;
uint64_t count = qf_count_key_value(qf, vals[tid], 0, 0);
if (count < key_count) {
atomicAdd((long long unsigned int *)counter, (long long unsigned int) 1);
}
}
__global__ void bulk_get_kernel(QF * qf, uint64_t * vals, uint64_t nvals, uint64_t * returns, uint8_t flags){
uint64_t tid = threadIdx.x + blockDim.x * blockIdx.x;
//should never happen, but just in case
if (tid >= nvals) return;
returns[tid] = qf_count_key_value(qf, vals[tid], 0, flags);
}
__host__ void bulk_get(QF * qf, uint64_t nvals, uint64_t * vals, uint64_t * returns){
hipLaunchKernelGGL(( bulk_get_kernel), dim3((nvals-1)/512+1), dim3(512), 0, 0, qf, vals, nvals, returns, QF_NO_LOCK);
}
__host__ uint64_t bulk_get_misses_wrapper(QF * qf, uint64_t * vals, uint64_t nvals){
uint64_t * misses;
//this is fine, should never be triggered
hipMallocManaged((void **)&misses, sizeof(uint64_t));
hipMemset(misses, 0, sizeof(uint64_t));
hipLaunchKernelGGL(( bulk_get_misses), dim3((nvals-1)/512+1), dim3(512), 0, 0, qf, vals, nvals, 1, misses, QF_NO_LOCK);
hipDeviceSynchronize();
uint64_t toReturn = *misses;
hipFree(misses);
return toReturn;
//return 0;
}
//this bad boy doesn't check
__host__ uint64_t bulk_get_nocount_wrapper(QF * qf, uint64_t * vals, uint64_t nvals){
hipLaunchKernelGGL(( bulk_get_nocount), dim3((nvals-1)/512+1), dim3(512), 0, 0, qf, vals, nvals, 1, QF_NO_LOCK);
hipDeviceSynchronize();
return 0;
//return 0;
}
__host__ __device__ int qf_set_count(QF *qf, uint64_t key, uint64_t value, uint64_t count, uint8_t
flags)
{
if (count == 0)
return 0;
uint64_t cur_count = qf_count_key_value(qf, key, value, flags);
int64_t delta = count - cur_count;
int ret;
if (delta == 0)
ret = 0;
else if (delta > 0)
ret = qf_insert(qf, key, value, delta, flags);
else
ret = qf_remove(qf, key, value, labs(delta), flags);
return ret;
}
__host__ __device__ int qf_remove(QF *qf, uint64_t key, uint64_t value, uint64_t count, uint8_t
flags)
{
if (count == 0)
return true;
if (GET_KEY_HASH(flags) != QF_KEY_IS_HASH) {
if (qf->metadata->hash_mode == QF_HASH_DEFAULT)
key = MurmurHash64A(((void *)&key), sizeof(key),
qf->metadata->seed) % qf->metadata->range;
else if (qf->metadata->hash_mode == QF_HASH_INVERTIBLE)
key = hash_64(key, BITMASK(qf->metadata->key_bits));
}
uint64_t hash = (key << qf->metadata->value_bits) | (value &
BITMASK(qf->metadata->value_bits));
return _remove(qf, hash, count, flags);
}
__host__ __device__ int qf_delete_key_value(QF *qf, uint64_t key, uint64_t value, uint8_t flags)
{
uint64_t count = qf_count_key_value(qf, key, value, flags);
if (count == 0)
return true;
if (GET_KEY_HASH(flags) != QF_KEY_IS_HASH) {
if (qf->metadata->hash_mode == QF_HASH_DEFAULT)
key = MurmurHash64A(((void *)&key), sizeof(key),
qf->metadata->seed) % qf->metadata->range;
else if (qf->metadata->hash_mode == QF_HASH_INVERTIBLE)
key = hash_64(key, BITMASK(qf->metadata->key_bits));
}
uint64_t hash = (key << qf->metadata->value_bits) | (value &
BITMASK(qf->metadata->value_bits));
return _remove(qf, hash, count, flags);
}
__host__ __device__ uint64_t qf_count_key_value(const QF *qf, uint64_t key, uint64_t value,
uint8_t flags)
{
if (GET_KEY_HASH(flags) != QF_KEY_IS_HASH) {
if (qf->metadata->hash_mode == QF_HASH_DEFAULT)
key = MurmurHash64A(((void *)&key), sizeof(key),
qf->metadata->seed) % qf->metadata->range;
else if (qf->metadata->hash_mode == QF_HASH_INVERTIBLE)
key = hash_64(key, BITMASK(qf->metadata->key_bits));
}
uint64_t hash = (key << qf->metadata->value_bits) | (value &
BITMASK(qf->metadata->value_bits));
uint64_t hash_remainder = hash & BITMASK(qf->metadata->bits_per_slot);
int64_t hash_bucket_index = hash >> qf->metadata->bits_per_slot;
if (!is_occupied(qf, hash_bucket_index))
return 0;
int64_t runstart_index = hash_bucket_index == 0 ? 0 : run_end(qf,
hash_bucket_index-1)
+ 1;
if (runstart_index < hash_bucket_index)
runstart_index = hash_bucket_index;
/* printf("MC RUNSTART: %02lx RUNEND: %02lx\n", runstart_index, runend_index); */
uint64_t current_remainder, current_count, current_end;
do {
current_end = decode_counter(qf, runstart_index, ¤t_remainder,
¤t_count);
if (current_remainder == hash_remainder)
return current_count;
runstart_index = current_end + 1;
} while (!is_runend(qf, current_end));
return 0;
}
__host__ __device__ uint64_t qf_query(const QF *qf, uint64_t key, uint64_t *value, uint8_t flags)
{
if (GET_KEY_HASH(flags) != QF_KEY_IS_HASH) {
if (qf->metadata->hash_mode == QF_HASH_DEFAULT)
key = MurmurHash64A(((void *)&key), sizeof(key),
qf->metadata->seed) % qf->metadata->range;
else if (qf->metadata->hash_mode == QF_HASH_INVERTIBLE)
key = hash_64(key, BITMASK(qf->metadata->key_bits));
}
uint64_t hash = key;
uint64_t hash_remainder = hash & BITMASK(qf->metadata->key_remainder_bits);
int64_t hash_bucket_index = hash >> qf->metadata->key_remainder_bits;
if (!is_occupied(qf, hash_bucket_index))
return 0;
int64_t runstart_index = hash_bucket_index == 0 ? 0 : run_end(qf,
hash_bucket_index-1)
+ 1;
if (runstart_index < hash_bucket_index)
runstart_index = hash_bucket_index;
/* printf("MC RUNSTART: %02lx RUNEND: %02lx\n", runstart_index, runend_index); */
uint64_t current_remainder, current_count, current_end;
do {
current_end = decode_counter(qf, runstart_index, ¤t_remainder,
¤t_count);
*value = current_remainder & BITMASK(qf->metadata->value_bits);
current_remainder = current_remainder >> qf->metadata->value_bits;
if (current_remainder == hash_remainder) {
return current_count;
}
runstart_index = current_end + 1;
} while (!is_runend(qf, current_end));
return 0;
}
__host__ __device__ int64_t qf_get_unique_index(const QF *qf, uint64_t key, uint64_t value,
uint8_t flags)
{
if (GET_KEY_HASH(flags) != QF_KEY_IS_HASH) {
if (qf->metadata->hash_mode == QF_HASH_DEFAULT)
key = MurmurHash64A(((void *)&key), sizeof(key),
qf->metadata->seed) % qf->metadata->range;
else if (qf->metadata->hash_mode == QF_HASH_INVERTIBLE)
key = hash_64(key, BITMASK(qf->metadata->key_bits));
}
uint64_t hash = (key << qf->metadata->value_bits) | (value &
BITMASK(qf->metadata->value_bits));
uint64_t hash_remainder = hash & BITMASK(qf->metadata->bits_per_slot);
int64_t hash_bucket_index = hash >> qf->metadata->bits_per_slot;
if (!is_occupied(qf, hash_bucket_index))
return QF_DOESNT_EXIST;
int64_t runstart_index = hash_bucket_index == 0 ? 0 : run_end(qf,
hash_bucket_index-1)
+ 1;
if (runstart_index < hash_bucket_index)
runstart_index = hash_bucket_index;
/* printf("MC RUNSTART: %02lx RUNEND: %02lx\n", runstart_index, runend_index); */
uint64_t current_remainder, current_count, current_end;
do {
current_end = decode_counter(qf, runstart_index, ¤t_remainder,
¤t_count);
if (current_remainder == hash_remainder)
return runstart_index;
runstart_index = current_end + 1;
} while (!is_runend(qf, current_end));
return QF_DOESNT_EXIST;
}
enum qf_hashmode qf_get_hashmode(const QF *qf) {
return qf->metadata->hash_mode;
}
uint64_t qf_get_hash_seed(const QF *qf) {
return qf->metadata->seed;
}
__uint64_t qf_get_hash_range(const QF *qf) {
return qf->metadata->range;
}
bool qf_is_auto_resize_enabled(const QF *qf) {
if (qf->runtimedata->auto_resize == 1)
return true;
return false;
}
uint64_t qf_get_total_size_in_bytes(const QF *qf) {
return qf->metadata->total_size_in_bytes;
}
uint64_t qf_get_nslots(const QF *qf) {
return qf->metadata->nslots;
}
uint64_t qf_get_num_occupied_slots(const QF *qf) {
pc_sync(&qf->runtimedata->pc_noccupied_slots);
return qf->metadata->noccupied_slots;
}
uint64_t qf_get_num_key_bits(const QF *qf) {
return qf->metadata->key_bits;
}
uint64_t qf_get_num_value_bits(const QF *qf) {
return qf->metadata->value_bits;
}
uint64_t qf_get_num_key_remainder_bits(const QF *qf) {
return qf->metadata->key_remainder_bits;
}
uint64_t qf_get_bits_per_slot(const QF *qf) {
return qf->metadata->bits_per_slot;
}
uint64_t qf_get_sum_of_counts(const QF *qf) {
pc_sync(&qf->runtimedata->pc_nelts);
return qf->metadata->nelts;
}
uint64_t qf_get_num_distinct_key_value_pairs(const QF *qf) {
pc_sync(&qf->runtimedata->pc_ndistinct_elts);
return qf->metadata->ndistinct_elts;
}
void qf_sync_counters(const QF *qf) {
pc_sync(&qf->runtimedata->pc_ndistinct_elts);
pc_sync(&qf->runtimedata->pc_nelts);
pc_sync(&qf->runtimedata->pc_noccupied_slots);
}
/* initialize the iterator at the run corresponding
* to the position index
*/
int64_t qf_iterator_from_position(const QF *qf, QFi *qfi, uint64_t position)
{
if (position == 0xffffffffffffffff) {
qfi->current = 0xffffffffffffffff;
qfi->qf = qf;
return QFI_INVALID;
}
assert(position < qf->metadata->nslots);
if (!is_occupied(qf, position)) {
uint64_t block_index = position;
uint64_t idx = bitselect(get_block(qf, block_index)->occupieds[0], 0);
if (idx == 64) {
while(idx == 64 && block_index < qf->metadata->nblocks) {
block_index++;
idx = bitselect(get_block(qf, block_index)->occupieds[0], 0);
}
}
position = block_index * QF_SLOTS_PER_BLOCK + idx;
}
qfi->qf = qf;
qfi->num_clusters = 0;
qfi->run = position;
qfi->current = position == 0 ? 0 : run_end(qfi->qf, position-1) + 1;
if (qfi->current < position)
qfi->current = position;
#ifdef LOG_CLUSTER_LENGTH
qfi->c_info = (cluster_data* )calloc(qf->metadata->nslots/32,
sizeof(cluster_data));
if (qfi->c_info == NULL) {
perror("Couldn't allocate memory for c_info.");
exit(EXIT_FAILURE);
}
qfi->cur_start_index = position;
qfi->cur_length = 1;
#endif
if (qfi->current >= qf->metadata->nslots)
return QFI_INVALID;
return qfi->current;
}
int64_t qf_iterator_from_key_value(const QF *qf, QFi *qfi, uint64_t key,
uint64_t value, uint8_t flags)
{
if (key >= qf->metadata->range) {
qfi->current = 0xffffffffffffffff;
qfi->qf = qf;
return QFI_INVALID;
}
qfi->qf = qf;
qfi->num_clusters = 0;
if (GET_KEY_HASH(flags) != QF_KEY_IS_HASH) {
if (qf->metadata->hash_mode == QF_HASH_DEFAULT)
key = MurmurHash64A(((void *)&key), sizeof(key),
qf->metadata->seed) % qf->metadata->range;
else if (qf->metadata->hash_mode == QF_HASH_INVERTIBLE)
key = hash_64(key, BITMASK(qf->metadata->key_bits));
}
uint64_t hash = (key << qf->metadata->value_bits) | (value &
BITMASK(qf->metadata->value_bits));
uint64_t hash_remainder = hash & BITMASK(qf->metadata->bits_per_slot);
uint64_t hash_bucket_index = hash >> qf->metadata->bits_per_slot;
bool flag = false;
// If a run starts at "position" move the iterator to point it to the
// smallest key greater than or equal to "hash".
if (is_occupied(qf, hash_bucket_index)) {
uint64_t runstart_index = hash_bucket_index == 0 ? 0 : run_end(qf,
hash_bucket_index-1)
+ 1;
if (runstart_index < hash_bucket_index)
runstart_index = hash_bucket_index;
uint64_t current_remainder, current_count, current_end;
do {
current_end = decode_counter(qf, runstart_index, ¤t_remainder,
¤t_count);
if (current_remainder >= hash_remainder) {
flag = true;
break;
}
runstart_index = current_end + 1;
} while (!is_runend(qf, current_end));
// found "hash" or smallest key greater than "hash" in this run.
if (flag) {
qfi->run = hash_bucket_index;
qfi->current = runstart_index;
}
}
// If a run doesn't start at "position" or the largest key in the run
// starting at "position" is smaller than "hash" then find the start of the
// next run.
if (!is_occupied(qf, hash_bucket_index) || !flag) {
uint64_t position = hash_bucket_index;
assert(position < qf->metadata->nslots);
uint64_t block_index = position / QF_SLOTS_PER_BLOCK;
uint64_t idx = bitselect(get_block(qf, block_index)->occupieds[0], 0);
if (idx == 64) {
while(idx == 64 && block_index < qf->metadata->nblocks) {
block_index++;
idx = bitselect(get_block(qf, block_index)->occupieds[0], 0);
}
}
position = block_index * QF_SLOTS_PER_BLOCK + idx;
qfi->run = position;
qfi->current = position == 0 ? 0 : run_end(qfi->qf, position-1) + 1;
if (qfi->current < position)
qfi->current = position;
}
if (qfi->current >= qf->metadata->nslots)
return QFI_INVALID;
return qfi->current;
}
static int qfi_get(const QFi *qfi, uint64_t *key, uint64_t *value, uint64_t
*count)
{
if (qfi_end(qfi))
return QFI_INVALID;
uint64_t current_remainder, current_count;
decode_counter(qfi->qf, qfi->current, ¤t_remainder, ¤t_count);
*value = current_remainder & BITMASK(qfi->qf->metadata->value_bits);
current_remainder = current_remainder >> qfi->qf->metadata->value_bits;
*key = (qfi->run << qfi->qf->metadata->key_remainder_bits) | current_remainder;
*count = current_count;
return 0;
}
int qfi_get_key(const QFi *qfi, uint64_t *key, uint64_t *value, uint64_t
*count)
{
*key = *value = *count = 0;
int ret = qfi_get(qfi, key, value, count);
if (ret == 0) {
if (qfi->qf->metadata->hash_mode == QF_HASH_DEFAULT) {
*key = 0; *value = 0; *count = 0;
return QF_INVALID;
} else if (qfi->qf->metadata->hash_mode == QF_HASH_INVERTIBLE)
*key = hash_64i(*key, BITMASK(qfi->qf->metadata->key_bits));
}
return ret;
}
int qfi_get_hash(const QFi *qfi, uint64_t *key, uint64_t *value, uint64_t
*count)
{
*key = *value = *count = 0;
return qfi_get(qfi, key, value, count);
}
int qfi_next(QFi *qfi)
{
if (qfi_end(qfi))
return QFI_INVALID;
else {
/* move to the end of the current counter*/
uint64_t current_remainder, current_count;
qfi->current = decode_counter(qfi->qf, qfi->current, ¤t_remainder,
¤t_count);
if (!is_runend(qfi->qf, qfi->current)) {
qfi->current++;
#ifdef LOG_CLUSTER_LENGTH
qfi->cur_length++;
#endif
if (qfi_end(qfi))
return QFI_INVALID;
return 0;
} else {
#ifdef LOG_CLUSTER_LENGTH
/* save to check if the new current is the new cluster. */
uint64_t old_current = qfi->current;
#endif
uint64_t block_index = qfi->run / QF_SLOTS_PER_BLOCK;
uint64_t rank = bitrank(get_block(qfi->qf, block_index)->occupieds[0],
qfi->run % QF_SLOTS_PER_BLOCK);
uint64_t next_run = bitselect(get_block(qfi->qf,
block_index)->occupieds[0],
rank);
if (next_run == 64) {
rank = 0;
while (next_run == 64 && block_index < qfi->qf->metadata->nblocks) {
block_index++;
next_run = bitselect(get_block(qfi->qf, block_index)->occupieds[0],
rank);
}
}
if (block_index == qfi->qf->metadata->nblocks) {
/* set the index values to max. */
qfi->run = qfi->current = qfi->qf->metadata->xnslots;
return QFI_INVALID;
}
qfi->run = block_index * QF_SLOTS_PER_BLOCK + next_run;
qfi->current++;
if (qfi->current < qfi->run)
qfi->current = qfi->run;
#ifdef LOG_CLUSTER_LENGTH
if (qfi->current > old_current + 1) { /* new cluster. */
if (qfi->cur_length > 10) {
qfi->c_info[qfi->num_clusters].start_index = qfi->cur_start_index;
qfi->c_info[qfi->num_clusters].length = qfi->cur_length;
qfi->num_clusters++;
}
qfi->cur_start_index = qfi->run;
qfi->cur_length = 1;
} else {
qfi->cur_length++;
}
#endif
return 0;
}
}
}
bool qfi_end(const QFi *qfi)
{
if (qfi->current >= qfi->qf->metadata->xnslots /*&& is_runend(qfi->qf, qfi->current)*/)
return true;
return false;
}
/*
* Merge qfa and qfb into qfc
*/
/*
* iterate over both qf (qfa and qfb)
* simultaneously
* for each index i
* min(get_value(qfa, ia) < get_value(qfb, ib))
* insert(min, ic)
* increment either ia or ib, whichever is minimum.
*/
void qf_merge(const QF *qfa, const QF *qfb, QF *qfc)
{
QFi qfia, qfib;
qf_iterator_from_position(qfa, &qfia, 0);
qf_iterator_from_position(qfb, &qfib, 0);
if (qfa->metadata->hash_mode != qfc->metadata->hash_mode &&
qfa->metadata->seed != qfc->metadata->seed &&
qfb->metadata->hash_mode != qfc->metadata->hash_mode &&
qfb->metadata->seed != qfc->metadata->seed) {
fprintf(stderr, "Output QF and input QFs do not have the same hash mode or seed.\n");
exit(1);
}
uint64_t keya, valuea, counta, keyb, valueb, countb;
qfi_get_hash(&qfia, &keya, &valuea, &counta);
qfi_get_hash(&qfib, &keyb, &valueb, &countb);
do {
if (keya < keyb) {
qf_insert(qfc, keya, valuea, counta, QF_NO_LOCK | QF_KEY_IS_HASH);
qfi_next(&qfia);
qfi_get_hash(&qfia, &keya, &valuea, &counta);
}
else {
qf_insert(qfc, keyb, valueb, countb, QF_NO_LOCK | QF_KEY_IS_HASH);
qfi_next(&qfib);
qfi_get_hash(&qfib, &keyb, &valueb, &countb);
}
} while(!qfi_end(&qfia) && !qfi_end(&qfib));
if (!qfi_end(&qfia)) {
do {
qfi_get_hash(&qfia, &keya, &valuea, &counta);
qf_insert(qfc, keya, valuea, counta, QF_NO_LOCK | QF_KEY_IS_HASH);
} while(!qfi_next(&qfia));
}
if (!qfi_end(&qfib)) {
do {
qfi_get_hash(&qfib, &keyb, &valueb, &countb);
qf_insert(qfc, keyb, valueb, countb, QF_NO_LOCK | QF_KEY_IS_HASH);
} while(!qfi_next(&qfib));
}
}
/*
* Merge an array of qfs into the resultant QF
*/
void qf_multi_merge(const QF *qf_arr[], int nqf, QF *qfr)
{
int i;
QFi qfi_arr[nqf];
int smallest_idx = 0;
uint64_t smallest_key = UINT64_MAX;
for (i=0; i<nqf; i++) {
if (qf_arr[i]->metadata->hash_mode != qfr->metadata->hash_mode &&
qf_arr[i]->metadata->seed != qfr->metadata->seed) {
fprintf(stderr, "Output QF and input QFs do not have the same hash mode or seed.\n");
exit(1);
}
qf_iterator_from_position(qf_arr[i], &qfi_arr[i], 0);
}
DEBUG_CQF("Merging %d CQFs\n", nqf);
for (i=0; i<nqf; i++) {
DEBUG_CQF("CQF %d\n", i);
DEBUG_DUMP(qf_arr[i]);
}
while (nqf > 1) {
uint64_t keys[nqf];
uint64_t values[nqf];
uint64_t counts[nqf];
for (i=0; i<nqf; i++)
qfi_get_hash(&qfi_arr[i], &keys[i], &values[i], &counts[i]);
do {
smallest_key = UINT64_MAX;
for (i=0; i<nqf; i++) {
if (keys[i] < smallest_key) {
smallest_key = keys[i]; smallest_idx = i;
}
}
qf_insert(qfr, keys[smallest_idx], values[smallest_idx],
counts[smallest_idx], QF_NO_LOCK | QF_KEY_IS_HASH);
qfi_next(&qfi_arr[smallest_idx]);
qfi_get_hash(&qfi_arr[smallest_idx], &keys[smallest_idx],
&values[smallest_idx],
&counts[smallest_idx]);
} while(!qfi_end(&qfi_arr[smallest_idx]));
/* remove the qf that is exhausted from the array */
if (smallest_idx < nqf-1)
memmove(&qfi_arr[smallest_idx], &qfi_arr[smallest_idx+1],
(nqf-smallest_idx-1)*sizeof(qfi_arr[0]));
nqf--;
}
if (!qfi_end(&qfi_arr[0])) {
uint64_t iters = 0;
do {
uint64_t key, value, count;
qfi_get_hash(&qfi_arr[0], &key, &value, &count);
qf_insert(qfr, key, value, count, QF_NO_LOCK | QF_KEY_IS_HASH);
qfi_next(&qfi_arr[0]);
iters++;
} while(!qfi_end(&qfi_arr[0]));
DEBUG_CQF("Num of iterations: %lu\n", iters);
}
DEBUG_CQF("%s", "Final CQF after merging.\n");
DEBUG_DUMP(qfr);
return;
}
/* find cosine similarity between two QFs. */
uint64_t qf_inner_product(const QF *qfa, const QF *qfb)
{
uint64_t acc = 0;
QFi qfi;
const QF *qf_mem, *qf_disk;
if (qfa->metadata->hash_mode != qfb->metadata->hash_mode &&
qfa->metadata->seed != qfb->metadata->seed) {
fprintf(stderr, "Input QFs do not have the same hash mode or seed.\n");
exit(1);
}
// create the iterator on the larger QF.
if (qfa->metadata->total_size_in_bytes > qfb->metadata->total_size_in_bytes)
{
qf_mem = qfb;
qf_disk = qfa;
} else {
qf_mem = qfa;
qf_disk = qfb;
}
qf_iterator_from_position(qf_disk, &qfi, 0);
do {
uint64_t key = 0, value = 0, count = 0;
uint64_t count_mem;
qfi_get_hash(&qfi, &key, &value, &count);
if ((count_mem = qf_count_key_value(qf_mem, key, 0, QF_KEY_IS_HASH)) > 0) {
acc += count*count_mem;
}
} while (!qfi_next(&qfi));
return acc;
}
/* find cosine similarity between two QFs. */
void qf_intersect(const QF *qfa, const QF *qfb, QF *qfr)
{
QFi qfi;
const QF *qf_mem, *qf_disk;
if (qfa->metadata->hash_mode != qfr->metadata->hash_mode &&
qfa->metadata->seed != qfr->metadata->seed &&
qfb->metadata->hash_mode != qfr->metadata->hash_mode &&
qfb->metadata->seed != qfr->metadata->seed) {
fprintf(stderr, "Output QF and input QFs do not have the same hash mode or seed.\n");
exit(1);
}
// create the iterator on the larger QF.
if (qfa->metadata->total_size_in_bytes > qfb->metadata->total_size_in_bytes)
{
qf_mem = qfb;
qf_disk = qfa;
} else {
qf_mem = qfa;
qf_disk = qfb;
}
qf_iterator_from_position(qf_disk, &qfi, 0);
do {
uint64_t key = 0, value = 0, count = 0;
qfi_get_hash(&qfi, &key, &value, &count);
if (qf_count_key_value(qf_mem, key, 0, QF_KEY_IS_HASH) > 0)
qf_insert(qfr, key, value, count, QF_NO_LOCK | QF_KEY_IS_HASH);
} while (!qfi_next(&qfi));
}
| 5c7769aefdfdf5c258bba846a0fa2304ae88ee65.cu | /*
* ============================================================================
*
* Authors: Prashant Pandey <[email protected]>
* Rob Johnson <[email protected]>
* Hunter McCoy <[email protected]>
*
* ============================================================================
*/
#include <cuda.h>
#include <cuda_runtime_api.h>
#include <stdlib.h>
#include <assert.h>
#include <string.h>
#include <inttypes.h>
#include <stdio.h>
#include <unistd.h>
#include <math.h>
#include <time.h>
#include <sys/mman.h>
#include <sys/stat.h>
#include <fcntl.h>
//timing stuff
#include <chrono>
#include <iostream>
#include <cmath>
//how fast is a thrust sort?
#include <thrust/sort.h>
#include <thrust/execution_policy.h>
#include <thrust/device_vector.h>
#include <thrust/reduce.h>
#include <thrust/fill.h>
#include <thrust/memory.h>
#include <thrust/device_malloc.h>
#include <thrust/device_free.h>
#include "hashutil.cuh"
#include "gqf.cuh"
#include "gqf_int.cuh"
#include <stdexcept>
#include <cuda_profiler_api.h>
/******************************************************************
* Code for managing the metadata bits and slots w/o interpreting *
* the content of the slots.
******************************************************************/
#define MAX_VALUE(nbits) ((1ULL << (nbits)) - 1)
#define BITMASK(nbits) \
((nbits) == 64 ? 0xffffffffffffffff : MAX_VALUE(nbits))
#define NUM_SLOTS_TO_LOCK (1ULL<<13)
#define LOCK_DIST 64
#define EXP_BEFORE_FAILURE -15
#define CLUSTER_SIZE (1ULL<<14)
#define METADATA_WORD(qf,field,slot_index) \
(get_block((qf), (slot_index) / QF_SLOTS_PER_BLOCK)->field[((slot_index) % QF_SLOTS_PER_BLOCK) / 64])
#define GET_NO_LOCK(flag) (flag & QF_NO_LOCK)
#define GET_TRY_ONCE_LOCK(flag) (flag & QF_TRY_ONCE_LOCK)
#define GET_WAIT_FOR_LOCK(flag) (flag & QF_WAIT_FOR_LOCK)
#define GET_KEY_HASH(flag) (flag & QF_KEY_IS_HASH)
#define NUM_BUFFERS 10
#define MAX_BUFFER_SIZE 100
#define CYCLES_PER_SECOND 1601000000
#define MAX_DEPTH 16
#define SELECT_BOUND 32
#define DEBUG_ASSERTS 0
#define DROP_ON_RUNEND 0
#define RUNEND_CUTOFF 15
#define DROP_ON_BIG_CLUSTER 0
#define BIG_CLUSTER_DROPOFF 4096
#define DISTANCE_FROM_HOME_SLOT_CUTOFF 1000
#define BILLION 1000000000L
#define CUDA_CHECK(ans) \
gpuAssert((ans), __FILE__, __LINE__);
inline void gpuAssert(cudaError_t code, const char* file, int line, bool abort = true)
{
if (code != cudaSuccess)
{
printf("GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort)
exit(code);
}
}
__constant__ char kmer_vals[6] = {'F', 'A', 'C', 'T', 'G', '0'};
#ifdef DEBUG
#define PRINT_DEBUG 1
#else
#define PRINT_DEBUG 0
#endif
#define DEBUG_CQF(fmt, ...) \
do { if (PRINT_DEBUG) printf( fmt, __VA_ARGS__); } while (0)
#define DEBUG_DUMP(qf) \
do { if (PRINT_DEBUG) qf_dump_metadata(qf); } while (0)
#if QF_BITS_PER_SLOT > 0
__host__ __device__ static inline qfblock* get_block(const QF* qf, uint64_t block_index)
{
return &qf->blocks[block_index];
}
#else
__host__ __device__ static inline qfblock* get_block(const QF* qf, uint64_t block_index)
{
return (qfblock*)(((char*)qf->blocks)
+ block_index * (sizeof(qfblock) + QF_SLOTS_PER_BLOCK *
qf->metadata->bits_per_slot / 8));
}
#endif
/*
__device__ static __inline__ unsigned long long rdtsc(void)
{
unsigned hi, lo;
__asm__ __volatile__ ("rdtsc" : "=a"(lo), "=d"(hi));
return ( (unsigned long long)lo)|( ((unsigned long long)hi)<<32 );
}
*/
/*
__host__ __device__ static void modify_metadata(pc_t *metadata, int cnt)
{
pc_add(metadata, cnt);
return;
}
*/
/*changing sizes of register based on https://docs.nvidia.com/cuda/inline-ptx-assembly/index.html
l is for "l" = .u64 reg
*/
__host__ __device__ static inline int popcnt(uint64_t val)
{
#ifdef __CUDA_ARCH__
val = __popcll(val);
#else
#ifndef __x86_64
val = __builtin_popcount(val);
#else
asm("popcnt %[val], %[val]"
: [val] "+r" (val)
:
: "cc");
#endif
#endif
return val;
}
// __device__ static inline int64_t bitscanreverse(uint64_t val)
// {
// if (val == 0) {
// return -1;
// } else {
// asm("bsr %[val], %[val]"
// : [val] "+l" (val)
// :
// : );
// return val;
// }
// }
__host__ __device__ static inline int popcntv(const uint64_t val, int ignore)
{
if (ignore % 64)
return popcnt (val & ~BITMASK(ignore % 64));
else
return popcnt(val);
}
// Returns the number of 1s up to (and including) the pos'th bit
// Bits are numbered from 0
__host__ __device__ static inline int bitrank(uint64_t val, int pos) {
val = val & ((2ULL << pos) - 1);
#ifdef __CUDA_ARCH__
val = __popcll(val);
#else
//quick fix for summit
#ifndef __x86_64
val = __builtin_popcount(val);
#else
asm("popcnt %[val], %[val]"
: [val] "+r" (val)
:
: "cc");
#endif
#endif
return val;
}
//moved dump functions
__host__ __device__ static inline void qf_dump_block(const QF *qf, uint64_t i)
{
uint64_t j;
printf("Block %llu Runs from %llu to %llu\n",i, i*QF_SLOTS_PER_BLOCK, (i+1)*QF_SLOTS_PER_BLOCK);
printf("Offset: %-192d", get_block(qf, i)->offset);
printf("\n");
for (j = 0; j < QF_SLOTS_PER_BLOCK; j++)
printf("%02lx ", j);
printf("\n");
for (j = 0; j < QF_SLOTS_PER_BLOCK; j++)
printf(" %d ", (get_block(qf, i)->occupieds[j/64] & (1ULL << (j%64))) ? 1 : 0);
printf("\n");
for (j = 0; j < QF_SLOTS_PER_BLOCK; j++)
printf(" %d ", (get_block(qf, i)->runends[j/64] & (1ULL << (j%64))) ? 1 : 0);
printf("\n");
#if QF_BITS_PER_SLOT == 8 || QF_BITS_PER_SLOT == 16 || QF_BITS_PER_SLOT == 32
for (j = 0; j < QF_SLOTS_PER_BLOCK; j++)
printf("%02x ", get_block(qf, i)->slots[j]);
#elif QF_BITS_PER_SLOT == 64
for (j = 0; j < QF_SLOTS_PER_BLOCK; j++)
printf("%02lx ", get_block(qf, i)->slots[j]);
#else
for (j = 0; j < QF_SLOTS_PER_BLOCK * qf->metadata->bits_per_slot / 8; j++)
printf("%02x ", get_block(qf, i)->slots[j]);
#endif
printf("\n");
printf("\n");
}
__host__ __device__ void qf_dump_metadata(const QF *qf) {
printf("Slots: %lu Occupied: %lu Elements: %lu Distinct: %lu\n",
qf->metadata->nslots,
qf->metadata->noccupied_slots,
qf->metadata->nelts,
qf->metadata->ndistinct_elts);
printf("Key_bits: %lu Value_bits: %lu Remainder_bits: %lu Bits_per_slot: %lu\n",
qf->metadata->key_bits,
qf->metadata->value_bits,
qf->metadata->key_remainder_bits,
qf->metadata->bits_per_slot);
}
__host__ __device__ void qf_dump(const QF *qf)
{
uint64_t i;
printf("%lu %lu %lu\n",
qf->metadata->nblocks,
qf->metadata->ndistinct_elts,
qf->metadata->nelts);
for (i = 0; i < qf->metadata->nblocks; i++) {
qf_dump_block(qf, i);
}
}
/**
* Returns the position of the k-th 1 in the 64-bit word x.
* k is 0-based, so k=0 returns the position of the first 1.
*
* Uses the broadword selection algorithm by Vigna [1], improved by Gog
* and Petri [2] and Vigna [3].
*
* [1] Sebastiano Vigna. Broadword Implementation of Rank/Select
* Queries. WEA, 2008
*
* [2] Simon Gog, Matthias Petri. Optimized succinct data
* structures for massive data. Softw. Pract. Exper., 2014
*
* [3] Sebastiano Vigna. MG4J 5.2.1. http://mg4j.di.unimi.it/
* The following code is taken from
* https://github.com/facebook/folly/blob/b28186247104f8b90cfbe094d289c91f9e413317/folly/experimental/Select64.h
*/
__device__ __constant__ uint8_t gpukSelectInByte[2048] = {
8, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 4, 0, 1, 0, 2, 0, 1, 0, 3, 0,
1, 0, 2, 0, 1, 0, 5, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 4, 0, 1, 0,
2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 6, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0,
1, 0, 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 5, 0, 1, 0, 2, 0, 1, 0,
3, 0, 1, 0, 2, 0, 1, 0, 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 7, 0,
1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0,
2, 0, 1, 0, 5, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 4, 0, 1, 0, 2, 0,
1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 6, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 5, 0, 1, 0, 2, 0, 1, 0, 3, 0,
1, 0, 2, 0, 1, 0, 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 8, 8, 8, 1,
8, 2, 2, 1, 8, 3, 3, 1, 3, 2, 2, 1, 8, 4, 4, 1, 4, 2, 2, 1, 4, 3, 3, 1, 3, 2,
2, 1, 8, 5, 5, 1, 5, 2, 2, 1, 5, 3, 3, 1, 3, 2, 2, 1, 5, 4, 4, 1, 4, 2, 2, 1,
4, 3, 3, 1, 3, 2, 2, 1, 8, 6, 6, 1, 6, 2, 2, 1, 6, 3, 3, 1, 3, 2, 2, 1, 6, 4,
4, 1, 4, 2, 2, 1, 4, 3, 3, 1, 3, 2, 2, 1, 6, 5, 5, 1, 5, 2, 2, 1, 5, 3, 3, 1,
3, 2, 2, 1, 5, 4, 4, 1, 4, 2, 2, 1, 4, 3, 3, 1, 3, 2, 2, 1, 8, 7, 7, 1, 7, 2,
2, 1, 7, 3, 3, 1, 3, 2, 2, 1, 7, 4, 4, 1, 4, 2, 2, 1, 4, 3, 3, 1, 3, 2, 2, 1,
7, 5, 5, 1, 5, 2, 2, 1, 5, 3, 3, 1, 3, 2, 2, 1, 5, 4, 4, 1, 4, 2, 2, 1, 4, 3,
3, 1, 3, 2, 2, 1, 7, 6, 6, 1, 6, 2, 2, 1, 6, 3, 3, 1, 3, 2, 2, 1, 6, 4, 4, 1,
4, 2, 2, 1, 4, 3, 3, 1, 3, 2, 2, 1, 6, 5, 5, 1, 5, 2, 2, 1, 5, 3, 3, 1, 3, 2,
2, 1, 5, 4, 4, 1, 4, 2, 2, 1, 4, 3, 3, 1, 3, 2, 2, 1, 8, 8, 8, 8, 8, 8, 8, 2,
8, 8, 8, 3, 8, 3, 3, 2, 8, 8, 8, 4, 8, 4, 4, 2, 8, 4, 4, 3, 4, 3, 3, 2, 8, 8,
8, 5, 8, 5, 5, 2, 8, 5, 5, 3, 5, 3, 3, 2, 8, 5, 5, 4, 5, 4, 4, 2, 5, 4, 4, 3,
4, 3, 3, 2, 8, 8, 8, 6, 8, 6, 6, 2, 8, 6, 6, 3, 6, 3, 3, 2, 8, 6, 6, 4, 6, 4,
4, 2, 6, 4, 4, 3, 4, 3, 3, 2, 8, 6, 6, 5, 6, 5, 5, 2, 6, 5, 5, 3, 5, 3, 3, 2,
6, 5, 5, 4, 5, 4, 4, 2, 5, 4, 4, 3, 4, 3, 3, 2, 8, 8, 8, 7, 8, 7, 7, 2, 8, 7,
7, 3, 7, 3, 3, 2, 8, 7, 7, 4, 7, 4, 4, 2, 7, 4, 4, 3, 4, 3, 3, 2, 8, 7, 7, 5,
7, 5, 5, 2, 7, 5, 5, 3, 5, 3, 3, 2, 7, 5, 5, 4, 5, 4, 4, 2, 5, 4, 4, 3, 4, 3,
3, 2, 8, 7, 7, 6, 7, 6, 6, 2, 7, 6, 6, 3, 6, 3, 3, 2, 7, 6, 6, 4, 6, 4, 4, 2,
6, 4, 4, 3, 4, 3, 3, 2, 7, 6, 6, 5, 6, 5, 5, 2, 6, 5, 5, 3, 5, 3, 3, 2, 6, 5,
5, 4, 5, 4, 4, 2, 5, 4, 4, 3, 4, 3, 3, 2, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
8, 8, 8, 3, 8, 8, 8, 8, 8, 8, 8, 4, 8, 8, 8, 4, 8, 4, 4, 3, 8, 8, 8, 8, 8, 8,
8, 5, 8, 8, 8, 5, 8, 5, 5, 3, 8, 8, 8, 5, 8, 5, 5, 4, 8, 5, 5, 4, 5, 4, 4, 3,
8, 8, 8, 8, 8, 8, 8, 6, 8, 8, 8, 6, 8, 6, 6, 3, 8, 8, 8, 6, 8, 6, 6, 4, 8, 6,
6, 4, 6, 4, 4, 3, 8, 8, 8, 6, 8, 6, 6, 5, 8, 6, 6, 5, 6, 5, 5, 3, 8, 6, 6, 5,
6, 5, 5, 4, 6, 5, 5, 4, 5, 4, 4, 3, 8, 8, 8, 8, 8, 8, 8, 7, 8, 8, 8, 7, 8, 7,
7, 3, 8, 8, 8, 7, 8, 7, 7, 4, 8, 7, 7, 4, 7, 4, 4, 3, 8, 8, 8, 7, 8, 7, 7, 5,
8, 7, 7, 5, 7, 5, 5, 3, 8, 7, 7, 5, 7, 5, 5, 4, 7, 5, 5, 4, 5, 4, 4, 3, 8, 8,
8, 7, 8, 7, 7, 6, 8, 7, 7, 6, 7, 6, 6, 3, 8, 7, 7, 6, 7, 6, 6, 4, 7, 6, 6, 4,
6, 4, 4, 3, 8, 7, 7, 6, 7, 6, 6, 5, 7, 6, 6, 5, 6, 5, 5, 3, 7, 6, 6, 5, 6, 5,
5, 4, 6, 5, 5, 4, 5, 4, 4, 3, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 4, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
8, 8, 8, 8, 8, 5, 8, 8, 8, 8, 8, 8, 8, 5, 8, 8, 8, 5, 8, 5, 5, 4, 8, 8, 8, 8,
8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 6, 8, 8, 8, 8, 8, 8, 8, 6, 8, 8, 8, 6, 8, 6,
6, 4, 8, 8, 8, 8, 8, 8, 8, 6, 8, 8, 8, 6, 8, 6, 6, 5, 8, 8, 8, 6, 8, 6, 6, 5,
8, 6, 6, 5, 6, 5, 5, 4, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 7, 8, 8,
8, 8, 8, 8, 8, 7, 8, 8, 8, 7, 8, 7, 7, 4, 8, 8, 8, 8, 8, 8, 8, 7, 8, 8, 8, 7,
8, 7, 7, 5, 8, 8, 8, 7, 8, 7, 7, 5, 8, 7, 7, 5, 7, 5, 5, 4, 8, 8, 8, 8, 8, 8,
8, 7, 8, 8, 8, 7, 8, 7, 7, 6, 8, 8, 8, 7, 8, 7, 7, 6, 8, 7, 7, 6, 7, 6, 6, 4,
8, 8, 8, 7, 8, 7, 7, 6, 8, 7, 7, 6, 7, 6, 6, 5, 8, 7, 7, 6, 7, 6, 6, 5, 7, 6,
6, 5, 6, 5, 5, 4, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 5, 8, 8, 8, 8, 8, 8, 8, 8,
8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 6, 8, 8,
8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 6, 8, 8, 8, 8, 8, 8, 8, 6, 8, 8, 8, 6,
8, 6, 6, 5, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
8, 8, 8, 8, 8, 8, 8, 8, 8, 7, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 7,
8, 8, 8, 8, 8, 8, 8, 7, 8, 8, 8, 7, 8, 7, 7, 5, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
8, 8, 8, 8, 8, 7, 8, 8, 8, 8, 8, 8, 8, 7, 8, 8, 8, 7, 8, 7, 7, 6, 8, 8, 8, 8,
8, 8, 8, 7, 8, 8, 8, 7, 8, 7, 7, 6, 8, 8, 8, 7, 8, 7, 7, 6, 8, 7, 7, 6, 7, 6,
6, 5, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 6,
8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 7, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 7, 8, 8, 8, 8, 8, 8, 8, 8,
8, 8, 8, 8, 8, 8, 8, 7, 8, 8, 8, 8, 8, 8, 8, 7, 8, 8, 8, 7, 8, 7, 7, 6, 8, 8,
8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 7
};
// const uint8_t hostkSelectInByte[2048] = {
// 8, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 4, 0, 1, 0, 2, 0, 1, 0, 3, 0,
// 1, 0, 2, 0, 1, 0, 5, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 4, 0, 1, 0,
// 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 6, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0,
// 1, 0, 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 5, 0, 1, 0, 2, 0, 1, 0,
// 3, 0, 1, 0, 2, 0, 1, 0, 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 7, 0,
// 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0,
// 2, 0, 1, 0, 5, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 4, 0, 1, 0, 2, 0,
// 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 6, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
// 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 5, 0, 1, 0, 2, 0, 1, 0, 3, 0,
// 1, 0, 2, 0, 1, 0, 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 8, 8, 8, 1,
// 8, 2, 2, 1, 8, 3, 3, 1, 3, 2, 2, 1, 8, 4, 4, 1, 4, 2, 2, 1, 4, 3, 3, 1, 3, 2,
// 2, 1, 8, 5, 5, 1, 5, 2, 2, 1, 5, 3, 3, 1, 3, 2, 2, 1, 5, 4, 4, 1, 4, 2, 2, 1,
// 4, 3, 3, 1, 3, 2, 2, 1, 8, 6, 6, 1, 6, 2, 2, 1, 6, 3, 3, 1, 3, 2, 2, 1, 6, 4,
// 4, 1, 4, 2, 2, 1, 4, 3, 3, 1, 3, 2, 2, 1, 6, 5, 5, 1, 5, 2, 2, 1, 5, 3, 3, 1,
// 3, 2, 2, 1, 5, 4, 4, 1, 4, 2, 2, 1, 4, 3, 3, 1, 3, 2, 2, 1, 8, 7, 7, 1, 7, 2,
// 2, 1, 7, 3, 3, 1, 3, 2, 2, 1, 7, 4, 4, 1, 4, 2, 2, 1, 4, 3, 3, 1, 3, 2, 2, 1,
// 7, 5, 5, 1, 5, 2, 2, 1, 5, 3, 3, 1, 3, 2, 2, 1, 5, 4, 4, 1, 4, 2, 2, 1, 4, 3,
// 3, 1, 3, 2, 2, 1, 7, 6, 6, 1, 6, 2, 2, 1, 6, 3, 3, 1, 3, 2, 2, 1, 6, 4, 4, 1,
// 4, 2, 2, 1, 4, 3, 3, 1, 3, 2, 2, 1, 6, 5, 5, 1, 5, 2, 2, 1, 5, 3, 3, 1, 3, 2,
// 2, 1, 5, 4, 4, 1, 4, 2, 2, 1, 4, 3, 3, 1, 3, 2, 2, 1, 8, 8, 8, 8, 8, 8, 8, 2,
// 8, 8, 8, 3, 8, 3, 3, 2, 8, 8, 8, 4, 8, 4, 4, 2, 8, 4, 4, 3, 4, 3, 3, 2, 8, 8,
// 8, 5, 8, 5, 5, 2, 8, 5, 5, 3, 5, 3, 3, 2, 8, 5, 5, 4, 5, 4, 4, 2, 5, 4, 4, 3,
// 4, 3, 3, 2, 8, 8, 8, 6, 8, 6, 6, 2, 8, 6, 6, 3, 6, 3, 3, 2, 8, 6, 6, 4, 6, 4,
// 4, 2, 6, 4, 4, 3, 4, 3, 3, 2, 8, 6, 6, 5, 6, 5, 5, 2, 6, 5, 5, 3, 5, 3, 3, 2,
// 6, 5, 5, 4, 5, 4, 4, 2, 5, 4, 4, 3, 4, 3, 3, 2, 8, 8, 8, 7, 8, 7, 7, 2, 8, 7,
// 7, 3, 7, 3, 3, 2, 8, 7, 7, 4, 7, 4, 4, 2, 7, 4, 4, 3, 4, 3, 3, 2, 8, 7, 7, 5,
// 7, 5, 5, 2, 7, 5, 5, 3, 5, 3, 3, 2, 7, 5, 5, 4, 5, 4, 4, 2, 5, 4, 4, 3, 4, 3,
// 3, 2, 8, 7, 7, 6, 7, 6, 6, 2, 7, 6, 6, 3, 6, 3, 3, 2, 7, 6, 6, 4, 6, 4, 4, 2,
// 6, 4, 4, 3, 4, 3, 3, 2, 7, 6, 6, 5, 6, 5, 5, 2, 6, 5, 5, 3, 5, 3, 3, 2, 6, 5,
// 5, 4, 5, 4, 4, 2, 5, 4, 4, 3, 4, 3, 3, 2, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
// 8, 8, 8, 3, 8, 8, 8, 8, 8, 8, 8, 4, 8, 8, 8, 4, 8, 4, 4, 3, 8, 8, 8, 8, 8, 8,
// 8, 5, 8, 8, 8, 5, 8, 5, 5, 3, 8, 8, 8, 5, 8, 5, 5, 4, 8, 5, 5, 4, 5, 4, 4, 3,
// 8, 8, 8, 8, 8, 8, 8, 6, 8, 8, 8, 6, 8, 6, 6, 3, 8, 8, 8, 6, 8, 6, 6, 4, 8, 6,
// 6, 4, 6, 4, 4, 3, 8, 8, 8, 6, 8, 6, 6, 5, 8, 6, 6, 5, 6, 5, 5, 3, 8, 6, 6, 5,
// 6, 5, 5, 4, 6, 5, 5, 4, 5, 4, 4, 3, 8, 8, 8, 8, 8, 8, 8, 7, 8, 8, 8, 7, 8, 7,
// 7, 3, 8, 8, 8, 7, 8, 7, 7, 4, 8, 7, 7, 4, 7, 4, 4, 3, 8, 8, 8, 7, 8, 7, 7, 5,
// 8, 7, 7, 5, 7, 5, 5, 3, 8, 7, 7, 5, 7, 5, 5, 4, 7, 5, 5, 4, 5, 4, 4, 3, 8, 8,
// 8, 7, 8, 7, 7, 6, 8, 7, 7, 6, 7, 6, 6, 3, 8, 7, 7, 6, 7, 6, 6, 4, 7, 6, 6, 4,
// 6, 4, 4, 3, 8, 7, 7, 6, 7, 6, 6, 5, 7, 6, 6, 5, 6, 5, 5, 3, 7, 6, 6, 5, 6, 5,
// 5, 4, 6, 5, 5, 4, 5, 4, 4, 3, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
// 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 4, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
// 8, 8, 8, 8, 8, 5, 8, 8, 8, 8, 8, 8, 8, 5, 8, 8, 8, 5, 8, 5, 5, 4, 8, 8, 8, 8,
// 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 6, 8, 8, 8, 8, 8, 8, 8, 6, 8, 8, 8, 6, 8, 6,
// 6, 4, 8, 8, 8, 8, 8, 8, 8, 6, 8, 8, 8, 6, 8, 6, 6, 5, 8, 8, 8, 6, 8, 6, 6, 5,
// 8, 6, 6, 5, 6, 5, 5, 4, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 7, 8, 8,
// 8, 8, 8, 8, 8, 7, 8, 8, 8, 7, 8, 7, 7, 4, 8, 8, 8, 8, 8, 8, 8, 7, 8, 8, 8, 7,
// 8, 7, 7, 5, 8, 8, 8, 7, 8, 7, 7, 5, 8, 7, 7, 5, 7, 5, 5, 4, 8, 8, 8, 8, 8, 8,
// 8, 7, 8, 8, 8, 7, 8, 7, 7, 6, 8, 8, 8, 7, 8, 7, 7, 6, 8, 7, 7, 6, 7, 6, 6, 4,
// 8, 8, 8, 7, 8, 7, 7, 6, 8, 7, 7, 6, 7, 6, 6, 5, 8, 7, 7, 6, 7, 6, 6, 5, 7, 6,
// 6, 5, 6, 5, 5, 4, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
// 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
// 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 5, 8, 8, 8, 8, 8, 8, 8, 8,
// 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 6, 8, 8,
// 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 6, 8, 8, 8, 8, 8, 8, 8, 6, 8, 8, 8, 6,
// 8, 6, 6, 5, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
// 8, 8, 8, 8, 8, 8, 8, 8, 8, 7, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 7,
// 8, 8, 8, 8, 8, 8, 8, 7, 8, 8, 8, 7, 8, 7, 7, 5, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
// 8, 8, 8, 8, 8, 7, 8, 8, 8, 8, 8, 8, 8, 7, 8, 8, 8, 7, 8, 7, 7, 6, 8, 8, 8, 8,
// 8, 8, 8, 7, 8, 8, 8, 7, 8, 7, 7, 6, 8, 8, 8, 7, 8, 7, 7, 6, 8, 7, 7, 6, 7, 6,
// 6, 5, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
// 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
// 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
// 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
// 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 6,
// 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
// 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
// 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 7, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
// 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 7, 8, 8, 8, 8, 8, 8, 8, 8,
// 8, 8, 8, 8, 8, 8, 8, 7, 8, 8, 8, 8, 8, 8, 8, 7, 8, 8, 8, 7, 8, 7, 7, 6, 8, 8,
// 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
// 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
// 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
// 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
// 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
// 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
// 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
// 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
// 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
// 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 7
// };
__host__ __device__ static inline uint64_t _select64(uint64_t x, int k)
{
if (k >= popcnt(x)) { return 64; }
const uint64_t kOnesStep4 = 0x1111111111111111ULL;
const uint64_t kOnesStep8 = 0x0101010101010101ULL;
const uint64_t kMSBsStep8 = 0x80ULL * kOnesStep8;
uint64_t s = x;
s = s - ((s & 0xA * kOnesStep4) >> 1);
s = (s & 0x3 * kOnesStep4) + ((s >> 2) & 0x3 * kOnesStep4);
s = (s + (s >> 4)) & 0xF * kOnesStep8;
uint64_t byteSums = s * kOnesStep8;
uint64_t kStep8 = k * kOnesStep8;
uint64_t geqKStep8 = (((kStep8 | kMSBsStep8) - byteSums) & kMSBsStep8);
uint64_t place = popcnt(geqKStep8) * 8;
uint64_t byteRank = k - (((byteSums << 8) >> place) & (uint64_t)(0xFF));
#ifdef __CUDA_ARCH__
return place + gpukSelectInByte[((x >> place) & 0xFF) | (byteRank << 8)];
#else
abort();
return 0;
//return place + hostkSelectInByte[((x >> place) & 0xFF) | (byteRank << 8)];
#endif // __CUDA_ARCH__
}
// Returns the position of the rank'th 1. (rank = 0 returns the 1st 1)
// Returns 64 if there are fewer than rank+1 1s.
__host__ __device__ static inline uint64_t bitselect(uint64_t val, int rank) {
#ifdef __SSE4_2_
uint64_t i = 1ULL << rank;
asm("pdep %[val], %[mask], %[val]"
: [val] "+r" (val)
: [mask] "r" (i));
asm("tzcnt %[bit], %[index]"
: [index] "=r" (i)
: [bit] "g" (val)
: "cc");
return i;
#endif
return _select64(val, rank);
}
__host__ __device__ static inline uint64_t bitselectv(const uint64_t val, int ignore, int rank)
{
return bitselect(val & ~BITMASK(ignore % 64), rank);
}
__host__ __device__ static inline int is_runend(const QF *qf, uint64_t index)
{
return (METADATA_WORD(qf, runends, index) >> ((index % QF_SLOTS_PER_BLOCK) %
64)) & 1ULL;
}
__host__ __device__ static inline int is_occupied(const QF *qf, uint64_t index)
{
return (METADATA_WORD(qf, occupieds, index) >> ((index % QF_SLOTS_PER_BLOCK) %
64)) & 1ULL;
}
#if QF_BITS_PER_SLOT == 8 || QF_BITS_PER_SLOT == 16 || QF_BITS_PER_SLOT == 32 || QF_BITS_PER_SLOT == 64
__host__ __device__ static inline uint64_t get_slot(const QF *qf, uint64_t index)
{
//ERR: Index passed in is incorrect
//printf("slots %lu, index %lu\n", qf->metadata->nslots, index);
#if DEBUG_ASSERTS
assert(index < qf->metadata->xnslots);
#endif
return get_block(qf, index / QF_SLOTS_PER_BLOCK)->slots[index % QF_SLOTS_PER_BLOCK];
}
__host__ __device__ static inline void set_slot(const QF *qf, uint64_t index, uint64_t value)
{
#if DEBUG_ASSERTS
assert(index < qf->metadata->xnslots);
#endif
get_block(qf, index / QF_SLOTS_PER_BLOCK)->slots[index % QF_SLOTS_PER_BLOCK] =
value & BITMASK(qf->metadata->bits_per_slot);
}
#elif QF_BITS_PER_SLOT > 0
/* Little-endian code .... Big-endian is TODO */
__host__ __device__ static inline uint64_t get_slot(const QF *qf, uint64_t index)
{
/* Should use __uint128_t to support up to 64-bit remainders, but gcc seems
* to generate buggy code. :/ */
//printf("Other get slot: slots %lu, index %lu\n", qf->metadata->nslots, index);
#if DEBUG_ASSERTS
assert(index < qf->metadata->xnslots);
#endif
uint64_t *p = (uint64_t *)&get_block(qf, index /
QF_SLOTS_PER_BLOCK)->slots[(index %
QF_SLOTS_PER_BLOCK)
* QF_BITS_PER_SLOT / 8];
return (uint64_t)(((*p) >> (((index % QF_SLOTS_PER_BLOCK) * QF_BITS_PER_SLOT) %
8)) & BITMASK(QF_BITS_PER_SLOT));
}
__host__ __device__ static inline void set_slot(const QF *qf, uint64_t index, uint64_t value)
{
/* Should use __uint128_t to support up to 64-bit remainders, but gcc seems
* to generate buggy code. :/ */
#if DEBUG_ASSERTS
assert(index < qf->metadata->xnslots);
#endif
uint64_t *p = (uint64_t *)&get_block(qf, index /
QF_SLOTS_PER_BLOCK)->slots[(index %
QF_SLOTS_PER_BLOCK)
* QF_BITS_PER_SLOT / 8];
uint64_t t = *p;
uint64_t mask = BITMASK(QF_BITS_PER_SLOT);
uint64_t v = value;
int shift = ((index % QF_SLOTS_PER_BLOCK) * QF_BITS_PER_SLOT) % 8;
mask <<= shift;
v <<= shift;
t &= ~mask;
t |= v;
*p = t;
}
#else
/* Little-endian code .... Big-endian is TODO */
__host__ __device__ static inline uint64_t get_slot(const QF *qf, uint64_t index)
{
//rintf("Third get slot?!? slots %lu, index %lu\n", qf->metadata->nslots, index);
#if DEBUG_ASSERTS
assert(index < qf->metadata->xnslots);
#endif
/* Should use __uint128_t to support up to 64-bit remainders, but gcc seems
* to generate buggy code. :/ */
uint64_t *p = (uint64_t *)&get_block(qf, index / QF_SLOTS_PER_BLOCK)->slots[(index %QF_SLOTS_PER_BLOCK)* qf->metadata->bits_per_slot / 8];
return (uint64_t)(((*p) >> (((index % QF_SLOTS_PER_BLOCK) *qf->metadata->bits_per_slot) % 8)) & BITMASK(qf->metadata->bits_per_slot));
}
__host__ __device__ static inline void set_slot(const QF *qf, uint64_t index, uint64_t value)
{
#if DEBUG_ASSERTS
assert(index < qf->metadata->xnslots);
#endif
/* Should use __uint128_t to support up to 64-bit remainders, but gcc seems
* to generate buggy code. :/ */
uint64_t *p = (uint64_t *)&get_block(qf, index /QF_SLOTS_PER_BLOCK)->slots[(index %QF_SLOTS_PER_BLOCK)* qf->metadata->bits_per_slot / 8];
uint64_t t = *p;
uint64_t mask = BITMASK(qf->metadata->bits_per_slot);
uint64_t v = value;
int shift = ((index % QF_SLOTS_PER_BLOCK) * qf->metadata->bits_per_slot) % 8;
mask <<= shift;
v <<= shift;
t &= ~mask;
t |= v;
*p = t;
}
#endif
__host__ __device__ static inline uint64_t run_end(const QF *qf, uint64_t hash_bucket_index);
__host__ __device__ static inline uint64_t block_offset(const QF *qf, uint64_t blockidx)
{
/* If we have extended counters and a 16-bit (or larger) offset
field, then we can safely ignore the possibility of overflowing
that field. */
if (sizeof(qf->blocks[0].offset) > 1 ||
get_block(qf, blockidx)->offset < BITMASK(8*sizeof(qf->blocks[0].offset)))
return get_block(qf, blockidx)->offset;
return run_end(qf, QF_SLOTS_PER_BLOCK * blockidx - 1) - QF_SLOTS_PER_BLOCK *
blockidx + 1;
}
__host__ __device__ static inline uint64_t run_end(const QF *qf, uint64_t hash_bucket_index)
{
uint64_t bucket_block_index = hash_bucket_index / QF_SLOTS_PER_BLOCK;
uint64_t bucket_intrablock_offset = hash_bucket_index % QF_SLOTS_PER_BLOCK;
uint64_t bucket_blocks_offset = block_offset(qf, bucket_block_index);
uint64_t bucket_intrablock_rank = bitrank(get_block(qf, bucket_block_index)->occupieds[0], bucket_intrablock_offset);
if (bucket_intrablock_rank == 0) {
if (bucket_blocks_offset <= bucket_intrablock_offset)
return hash_bucket_index;
else
return QF_SLOTS_PER_BLOCK * bucket_block_index + bucket_blocks_offset - 1;
}
uint64_t runend_block_index = bucket_block_index + bucket_blocks_offset /
QF_SLOTS_PER_BLOCK;
uint64_t runend_ignore_bits = bucket_blocks_offset % QF_SLOTS_PER_BLOCK;
uint64_t runend_rank = bucket_intrablock_rank - 1;
uint64_t runend_block_offset = bitselectv(get_block(qf,
runend_block_index)->runends[0],
runend_ignore_bits, runend_rank);
if (runend_block_offset == QF_SLOTS_PER_BLOCK) {
if (bucket_blocks_offset == 0 && bucket_intrablock_rank == 0) {
/* The block begins in empty space, and this bucket is in that region of
* empty space */
return hash_bucket_index;
} else {
do {
runend_rank -= popcntv(get_block(qf,
runend_block_index)->runends[0],
runend_ignore_bits);
runend_block_index++;
runend_ignore_bits = 0;
runend_block_offset = bitselectv(get_block(qf,
runend_block_index)->runends[0],
runend_ignore_bits, runend_rank);
} while (runend_block_offset == QF_SLOTS_PER_BLOCK);
}
}
uint64_t runend_index = QF_SLOTS_PER_BLOCK * runend_block_index +
runend_block_offset;
if (runend_index < hash_bucket_index)
return hash_bucket_index;
else
return runend_index;
}
__host__ __device__ static inline int offset_lower_bound(const QF *qf, uint64_t slot_index)
{
const qfblock * b = get_block(qf, slot_index / QF_SLOTS_PER_BLOCK);
const uint64_t slot_offset = slot_index % QF_SLOTS_PER_BLOCK;
const uint64_t boffset = b->offset;
const uint64_t occupieds = b->occupieds[0] & BITMASK(slot_offset+1);
//printf("slot %llu, slot_offset %02lx, block offset %llu, occupieds: %d ", slot_index, slot_offset, boffset, popcnt(occupieds));
#if DEBUG_ASSERTS
assert(QF_SLOTS_PER_BLOCK == 64);
#endif
//if (boffset < slot_offset) {
if (boffset <= slot_offset) {
const uint64_t runends = (b->runends[0] & BITMASK(slot_offset)) >> boffset;
//printf(" runends %d\n", popcnt(runends));
//printf("boffset < slot_offset, runends %llu, popcnt(occupieds) %d, popcnt(runends) %d\n", runends, popcnt(occupieds), popcnt(runends));
//printf("returning %d\n", popcnt(occupieds)-popcnt(runends));
return popcnt(occupieds) - popcnt(runends);
}
//printf("\n");
//printf("boffset > slot_offset, boffset-slotoffset %llu, popcnt(occupieds) %d\n", boffset-slot_offset, popcnt(occupieds));
//printf("returning %d\n", boffset-slot_offset+popcnt(occupieds));
return boffset - slot_offset + popcnt(occupieds);
}
__host__ __device__ static inline int is_empty(const QF *qf, uint64_t slot_index)
{
return offset_lower_bound(qf, slot_index) == 0;
}
__host__ __device__ static inline int might_be_empty(const QF *qf, uint64_t slot_index)
{
return !is_occupied(qf, slot_index)
&& !is_runend(qf, slot_index);
}
// __device__ static inline int probably_is_empty(const QF *qf, uint64_t slot_index)
// {
// return get_slot(qf, slot_index) == 0
// && !is_occupied(qf, slot_index)
// && !is_runend(qf, slot_index);
// }
//static inlines were re-added, should
__host__ __device__ uint64_t static inline find_first_empty_slot(QF *qf, uint64_t from)
{
uint64_t start_from = from;
do {
int t = offset_lower_bound(qf, from);
//get block of from
// if (t < 0){
// //this implies a failure in the code - you are going to
// find_first_empty_slot_verbose(qf, start_from);
// }
//this assert breaks testing as we can't query the last slot for the next slot
//this throws an assertion, instead we want to throw an out of range exception
//that can be captured to finalize the test instead.
#if DEBUG_ASSERTS
assert(t>=0);
#endif
//assert must happen, checks cannot happen in device code
//alternate version must exist that is host exclusive.
//if (t < 0) throw std::out_of_range("next free slot is before current slot, either final slot or gqf corruption.\n");
if (t == 0)
break;
from = from + t;
} while(1);
uint64_t bucket_start_from = start_from/NUM_SLOTS_TO_LOCK;
uint64_t end_start_from = from/NUM_SLOTS_TO_LOCK;
//testing without this gate to check if we see speed improvements
// if (end_start_from>bucket_start_from+1){
// //return -1;
// printf("Find first empty ran over a bucket: %llu\n", end_start_from-bucket_start_from);
// }
return from;
}
__host__ __device__ uint64_t first_empty_slot_wrapper(QF * qf, uint64_t from){
return find_first_empty_slot(qf, from);
}
//exact same function as above, but forced to be host exclusive so that a try_catch statement in cluster counting will succeed.
__host__ uint64_t host_debug_find_first_empty_slot(QF *qf, uint64_t from)
{
uint64_t start_from = from;
do {
int t = offset_lower_bound(qf, from);
//get block of from
// if (t < 0){
// //this implies a failure in the code - you are going to
// find_first_empty_slot_verbose(qf, start_from);
// }
//this assert breaks testing as we can't query the last slot for the next slot
//this throws an assertion, instead we want to throw an out of range exception
//that can be captured to finalize the test instead.
//assert(t>=0);
//assert must happen, checks cannot happen in device code
//alternate version must exist that is host exclusive.
if (t < 0) throw std::out_of_range("next free slot is before current slot, either final slot or gqf corruption.\n");
if (t == 0)
break;
from = from + t;
} while(1);
uint64_t bucket_start_from = start_from/NUM_SLOTS_TO_LOCK;
uint64_t end_start_from = from/NUM_SLOTS_TO_LOCK;
//testing without this gate to check if we see speed improvements
if (end_start_from>bucket_start_from+1){
printf("Find first empty ran over a bucket: %llu\n", end_start_from-bucket_start_from);
}
return from;
}
__host__ __device__ static inline uint64_t shift_into_b(const uint64_t a, const uint64_t b,
const int bstart, const int bend,
const int amount)
{
const uint64_t a_component = bstart == 0 ? (a >> (64 - amount)) : 0;
const uint64_t b_shifted_mask = BITMASK(bend - bstart) << bstart;
const uint64_t b_shifted = ((b_shifted_mask & b) << amount) & b_shifted_mask;
const uint64_t b_mask = ~b_shifted_mask;
return a_component | b_shifted | (b & b_mask);
}
// __device__ void* gpu_memmove(void* dst, const void* src, size_t n)
// {
// //printf("Launching memmove\n");
// //todo: allocate space per thread for this buffer before launching the kernel
// void* temp_buffer = malloc(n);
// //maybe stack allocation?
// //void* temp_buffer = void* char[n];
// // cudaMemcpyAsync(temp_buffer, src, n, cudaMemcpyDeviceToDevice);
// // cudaMemcpyAsync(dst, temp_buffer, n, cudaMemcpyDeviceToDevice);
// // //cudaFree(temp_buffer);
// // return dst;
// memcpy(temp_buffer, src, n);
// memcpy(dst, temp_buffer, n);
// free(temp_buffer);
// }
//a variant of memmove that compares the two pointers
__device__ void gpu_memmove(void* dst, const void* src, size_t n)
{
//printf("Launching memmove\n");
//todo: allocate space per thread for this buffer before launching the kernel
char * char_dst = (char *) dst;
char * char_src = (char *) src;
//double check this,
//think it is just > since dst+n does not get copied
if (char_src+n > char_dst){
//copy backwards
for (int i =n-1; i >= 0; i--){
char_dst[i] = char_src[i];
}
} else {
//copy regular
for (int i =0; i<n; i++){
char_dst[i] = char_src[i];
}
}
//free(temp_buffer);
}
//a variant of memmove that compares the two pointers
__device__ void gpu_memmove_cooperative(void* dst, const void* src, size_t n, int warpID)
{
//printf("Launching memmove\n");
//todo: allocate space per thread for this buffer before launching the kernel
char * char_dst = (char *) dst;
char * char_src = (char *) src;
//double check this,
//think it is just > since dst+n does not get copied
if (char_src+n > char_dst){
//copy backwards
for (int i =n-1-warpID; i >= 0; i-=32){
char_dst[i] = char_src[i];
}
} else {
//copy regular
for (int i =warpID; i<n; i+=32){
char_dst[i] = char_src[i];
}
}
//free(temp_buffer);
}
#if QF_BITS_PER_SLOT == 8 || QF_BITS_PER_SLOT == 16 || QF_BITS_PER_SLOT == 32 || QF_BITS_PER_SLOT == 64
__host__ __device__ static inline void shift_remainders(QF *qf, uint64_t start_index, uint64_t
empty_index)
{
uint64_t start_block = start_index / QF_SLOTS_PER_BLOCK;
uint64_t start_offset = start_index % QF_SLOTS_PER_BLOCK;
uint64_t empty_block = empty_index / QF_SLOTS_PER_BLOCK;
uint64_t empty_offset = empty_index % QF_SLOTS_PER_BLOCK;
#if DEBUG_ASSERTS
assert (start_index <= empty_index);
assert (empty_index < qf->metadata->xnslots);
#endif
while (start_block < empty_block) {
#ifdef __CUDA_ARCH__
gpu_memmove(&get_block(qf, empty_block)->slots[1],
&get_block(qf, empty_block)->slots[0],
empty_offset * sizeof(qf->blocks[0].slots[0]));
#else
memmove(&get_block(qf, empty_block)->slots[1],
&get_block(qf, empty_block)->slots[0],
empty_offset * sizeof(qf->blocks[0].slots[0]));
#endif
get_block(qf, empty_block)->slots[0] = get_block(qf,
empty_block-1)->slots[QF_SLOTS_PER_BLOCK-1];
empty_block--;
empty_offset = QF_SLOTS_PER_BLOCK-1;
}
#ifdef __CUDA_ARCH__
gpu_memmove(&get_block(qf, empty_block)->slots[start_offset + 1],
&get_block(qf, empty_block)->slots[start_offset],
(empty_offset - start_offset) * sizeof(qf->blocks[0].slots[0]));
#else
memmove(&get_block(qf, empty_block)->slots[start_offset+1],
&get_block(qf, empty_block)->slots[start_offset],
(empty_offset - start_offset) * sizeof(qf->blocks[0].slots[0]));
#endif
}
__device__ static inline void shift_remainders_cooperative(QF *qf, uint64_t start_index, uint64_t
empty_index, int warpID)
{
uint64_t start_block = start_index / QF_SLOTS_PER_BLOCK;
uint64_t start_offset = start_index % QF_SLOTS_PER_BLOCK;
uint64_t empty_block = empty_index / QF_SLOTS_PER_BLOCK;
uint64_t empty_offset = empty_index % QF_SLOTS_PER_BLOCK;
#if DEBUG_ASSERTS
assert (start_index <= empty_index);
assert (empty_index < qf->metadata->xnslots);
#endif
while (start_block < empty_block) {
gpu_memmove_cooperative(&get_block(qf, empty_block)->slots[1],
&get_block(qf, empty_block)->slots[0],
empty_offset * sizeof(qf->blocks[0].slots[0]), warpID);
get_block(qf, empty_block)->slots[0] = get_block(qf,
empty_block-1)->slots[QF_SLOTS_PER_BLOCK-1];
empty_block--;
empty_offset = QF_SLOTS_PER_BLOCK-1;
}
gpu_memmove_cooperative(&get_block(qf, empty_block)->slots[start_offset + 1],
&get_block(qf, empty_block)->slots[start_offset],
(empty_offset - start_offset) * sizeof(qf->blocks[0].slots[0]), warpID);
}
#else
#define REMAINDER_WORD(qf, i) ((uint64_t *)&(get_block(qf, (i)/qf->metadata->bits_per_slot)->slots[8 * ((i) % qf->metadata->bits_per_slot)]))
__host__ __device__ static inline void shift_remainders(QF *qf, const uint64_t start_index, const
uint64_t empty_index)
{
uint64_t last_word = (empty_index + 1) * qf->metadata->bits_per_slot / 64;
const uint64_t first_word = start_index * qf->metadata->bits_per_slot / 64;
int bend = ((empty_index + 1) * qf->metadata->bits_per_slot) % 64;
const int bstart = (start_index * qf->metadata->bits_per_slot) % 64;
while (last_word != first_word) {
*REMAINDER_WORD(qf, last_word) = shift_into_b(*REMAINDER_WORD(qf, last_word-1),
*REMAINDER_WORD(qf, last_word),
0, bend, qf->metadata->bits_per_slot);
last_word--;
bend = 64;
}
*REMAINDER_WORD(qf, last_word) = shift_into_b(0, *REMAINDER_WORD(qf,
last_word),
bstart, bend,
qf->metadata->bits_per_slot);
}
#endif
__host__ __device__ static inline void find_next_n_empty_slots(QF *qf, uint64_t from, uint64_t n,
uint64_t *indices)
{
while (n) {
indices[--n] = find_first_empty_slot(qf, from);
from = indices[n] + 1;
}
}
__host__ __device__ static inline void shift_slots(QF *qf, int64_t first, uint64_t last, uint64_t
distance)
{
int64_t i;
if (distance == 1)
shift_remainders(qf, first, last+1);
else
for (i = last; i >= first; i--)
set_slot(qf, i + distance, get_slot(qf, i));
}
__host__ __device__ static inline void shift_runends(QF *qf, int64_t first, uint64_t last,
uint64_t distance)
{
#if DEBUG_ASSERTS
assert(last < qf->metadata->xnslots && distance < 64);
#endif
uint64_t first_word = first / 64;
uint64_t bstart = first % 64;
uint64_t last_word = (last + distance + 1) / 64;
uint64_t bend = (last + distance + 1) % 64;
if (last_word != first_word) {
METADATA_WORD(qf, runends, 64*last_word) = shift_into_b(METADATA_WORD(qf, runends, 64*(last_word-1)),
METADATA_WORD(qf, runends, 64*last_word),
0, bend, distance);
bend = 64;
last_word--;
while (last_word != first_word) {
METADATA_WORD(qf, runends, 64*last_word) = shift_into_b(METADATA_WORD(qf, runends, 64*(last_word-1)),
METADATA_WORD(qf, runends, 64*last_word),
0, bend, distance);
last_word--;
}
}
METADATA_WORD(qf, runends, 64*last_word) = shift_into_b(0, METADATA_WORD(qf,
runends,
64*last_word),
bstart, bend, distance);
}
__host__ __device__ static inline bool insert_replace_slots_and_shift_remainders_and_runends_and_offsets(QF *qf,
int operation,
uint64_t bucket_index,
uint64_t overwrite_index,
const uint64_t *remainders,
uint64_t total_remainders,
uint64_t noverwrites)
{
uint64_t empties[67];
uint64_t i;
int64_t j;
int64_t ninserts = total_remainders - noverwrites;
uint64_t insert_index = overwrite_index + noverwrites;
if (ninserts > 0) {
/* First, shift things to create n empty spaces where we need them. */
find_next_n_empty_slots(qf, insert_index, ninserts, empties);
if (empties[0] >= qf->metadata->xnslots) {
return false;
}
for (j = 0; j < ninserts - 1; j++)
shift_slots(qf, empties[j+1] + 1, empties[j] - 1, j + 1);
shift_slots(qf, insert_index, empties[ninserts - 1] - 1, ninserts);
for (j = 0; j < ninserts - 1; j++)
shift_runends(qf, empties[j+1] + 1, empties[j] - 1, j + 1);
shift_runends(qf, insert_index, empties[ninserts - 1] - 1, ninserts);
for (i = noverwrites; i < total_remainders - 1; i++)
METADATA_WORD(qf, runends, overwrite_index + i) &= ~(1ULL <<
(((overwrite_index
+ i) %
QF_SLOTS_PER_BLOCK)
% 64));
switch (operation) {
case 0: /* insert into empty bucket */
#if DEBUG_ASSERTS
assert (noverwrites == 0);
#endif
METADATA_WORD(qf, runends, overwrite_index + total_remainders - 1) |=
1ULL << (((overwrite_index + total_remainders - 1) %
QF_SLOTS_PER_BLOCK) % 64);
break;
case 1: /* append to bucket */
METADATA_WORD(qf, runends, overwrite_index + noverwrites - 1) &=
~(1ULL << (((overwrite_index + noverwrites - 1) % QF_SLOTS_PER_BLOCK) %
64));
METADATA_WORD(qf, runends, overwrite_index + total_remainders - 1) |=
1ULL << (((overwrite_index + total_remainders - 1) %
QF_SLOTS_PER_BLOCK) % 64);
break;
case 2: /* insert into bucket */
METADATA_WORD(qf, runends, overwrite_index + total_remainders - 1) &=
~(1ULL << (((overwrite_index + total_remainders - 1) %
QF_SLOTS_PER_BLOCK) % 64));
break;
default:
printf("Invalid operation %d\n", operation);
#ifdef __CUDA_ARCH__
__threadfence(); // ensure store issued before trap
asm("trap;");
#else
abort();
#endif
}
uint64_t npreceding_empties = 0;
for (i = bucket_index / QF_SLOTS_PER_BLOCK + 1; i <= empties[0]/QF_SLOTS_PER_BLOCK; i++) {
while ((int64_t)npreceding_empties < ninserts &&
empties[ninserts - 1 - npreceding_empties] / QF_SLOTS_PER_BLOCK < i)
npreceding_empties++;
if (get_block(qf, i)->offset + ninserts - npreceding_empties < BITMASK(8*sizeof(qf->blocks[0].offset)))
get_block(qf, i)->offset += ninserts - npreceding_empties;
else
get_block(qf, i)->offset = (uint8_t) BITMASK(8*sizeof(qf->blocks[0].offset));
}
}
for (i = 0; i < total_remainders; i++)
set_slot(qf, overwrite_index + i, remainders[i]);
//modify_metadata(&qf->runtimedata->pc_noccupied_slots, ninserts);
return true;
}
__host__ __device__ static inline int remove_replace_slots_and_shift_remainders_and_runends_and_offsets(QF *qf,
int operation,
uint64_t bucket_index,
uint64_t overwrite_index,
const uint64_t *remainders,
uint64_t total_remainders,
uint64_t old_length)
{
uint64_t i;
// Update the slots
for (i = 0; i < total_remainders; i++)
set_slot(qf, overwrite_index + i, remainders[i]);
// If this is the last thing in its run, then we may need to set a new runend bit
if (is_runend(qf, overwrite_index + old_length - 1)) {
if (total_remainders > 0) {
// If we're not deleting this entry entirely, then it will still the last entry in this run
METADATA_WORD(qf, runends, overwrite_index + total_remainders - 1) |= 1ULL << ((overwrite_index + total_remainders - 1) % 64);
} else if (overwrite_index > bucket_index &&
!is_runend(qf, overwrite_index - 1)) {
// If we're deleting this entry entirely, but it is not the first entry in this run,
// then set the preceding entry to be the runend
METADATA_WORD(qf, runends, overwrite_index - 1) |= 1ULL << ((overwrite_index - 1) % 64);
}
}
// shift slots back one run at a time
uint64_t original_bucket = bucket_index;
uint64_t current_bucket = bucket_index;
uint64_t current_slot = overwrite_index + total_remainders;
uint64_t current_distance = old_length - total_remainders;
int ret_current_distance = current_distance;
while (current_distance > 0) {
if (is_runend(qf, current_slot + current_distance - 1)) {
do {
current_bucket++;
} while (current_bucket < current_slot + current_distance &&
!is_occupied(qf, current_bucket));
}
if (current_bucket <= current_slot) {
set_slot(qf, current_slot, get_slot(qf, current_slot + current_distance));
if (is_runend(qf, current_slot) !=
is_runend(qf, current_slot + current_distance))
METADATA_WORD(qf, runends, current_slot) ^= 1ULL << (current_slot % 64);
current_slot++;
} else if (current_bucket <= current_slot + current_distance) {
uint64_t i;
for (i = current_slot; i < current_slot + current_distance; i++) {
set_slot(qf, i, 0);
METADATA_WORD(qf, runends, i) &= ~(1ULL << (i % 64));
}
current_distance = current_slot + current_distance - current_bucket;
current_slot = current_bucket;
} else {
current_distance = 0;
}
}
// reset the occupied bit of the hash bucket index if the hash is the
// only item in the run and is removed completely.
if (operation && !total_remainders)
METADATA_WORD(qf, occupieds, bucket_index) &= ~(1ULL << (bucket_index % 64));
// update the offset bits.
// find the number of occupied slots in the original_bucket block.
// Then find the runend slot corresponding to the last run in the
// original_bucket block.
// Update the offset of the block to which it belongs.
uint64_t original_block = original_bucket / QF_SLOTS_PER_BLOCK;
if (old_length > total_remainders) { // we only update offsets if we shift/delete anything
while (1) {
uint64_t last_occupieds_hash_index = QF_SLOTS_PER_BLOCK * original_block + (QF_SLOTS_PER_BLOCK - 1);
uint64_t runend_index = run_end(qf, last_occupieds_hash_index);
// runend spans across the block
// update the offset of the next block
if (runend_index / QF_SLOTS_PER_BLOCK == original_block) { // if the run ends in the same block
if (get_block(qf, original_block + 1)->offset == 0)
break;
get_block(qf, original_block + 1)->offset = 0;
} else { // if the last run spans across the block
if (get_block(qf, original_block + 1)->offset == (runend_index - last_occupieds_hash_index))
break;
get_block(qf, original_block + 1)->offset = (runend_index - last_occupieds_hash_index);
}
original_block++;
}
}
//int num_slots_freed = old_length - total_remainders;
//modify_metadata(&qf->runtimedata->pc_noccupied_slots, -num_slots_freed);
/*qf->metadata->noccupied_slots -= (old_length - total_remainders);*/
if (!total_remainders) {
//modify_metadata(&qf->runtimedata->pc_ndistinct_elts, -1);
/*qf->metadata->ndistinct_elts--;*/
}
return ret_current_distance;
}
/*****************************************************************************
* Code that uses the above to implement a QF with keys and inline counters. *
*****************************************************************************/
/*
Counter format:
0 xs: <empty string>
1 x: x
2 xs: xx
3 0s: 000
>2 xs: xbc...cx for x != 0, b < x, c != 0, x
>3 0s: 0c...c00 for c != 0
*/
__host__ __device__ static inline uint64_t *encode_counter(QF *qf, uint64_t remainder, uint64_t
counter, uint64_t *slots)
{
uint64_t digit = remainder;
uint64_t base = (1ULL << qf->metadata->bits_per_slot) - 1;
uint64_t *p = slots;
if (counter == 0)
return p;
*--p = remainder;
if (counter == 1)
return p;
if (counter == 2) {
*--p = remainder;
return p;
}
if (counter == 3 && remainder == 0) {
*--p = remainder;
*--p = remainder;
return p;
}
if (counter == 3 && remainder > 0) {
*--p = 0;
*--p = remainder;
return p;
}
if (remainder == 0)
*--p = remainder;
else
base--;
if (remainder)
counter -= 3;
else
counter -= 4;
do {
digit = counter % base;
digit++; /* Zero not allowed */
if (remainder && digit >= remainder)
digit++; /* Cannot overflow since digit is mod 2^r-2 */
*--p = digit;
counter /= base;
} while (counter);
if (remainder && digit >= remainder)
*--p = 0;
*--p = remainder;
return p;
}
/* Returns the length of the encoding.
REQUIRES: index points to first slot of a counter. */
__host__ __device__ static inline uint64_t decode_counter(const QF *qf, uint64_t index, uint64_t *remainder, uint64_t *count)
{
uint64_t base;
uint64_t rem;
uint64_t cnt;
uint64_t digit;
uint64_t end;
*remainder = rem = get_slot(qf, index);
if (is_runend(qf, index)) { /* Entire run is "0" */
*count = 1;
return index;
}
digit = get_slot(qf, index + 1);
if (is_runend(qf, index + 1)) {
*count = digit == rem ? 2 : 1;
return index + (digit == rem ? 1 : 0);
}
if (rem > 0 && digit >= rem) {
*count = digit == rem ? 2 : 1;
return index + (digit == rem ? 1 : 0);
}
if (rem > 0 && digit == 0 && get_slot(qf, index + 2) == rem) {
*count = 3;
return index + 2;
}
if (rem == 0 && digit == 0) {
if (get_slot(qf, index + 2) == 0) {
*count = 3;
return index + 2;
} else {
*count = 2;
return index + 1;
}
}
cnt = 0;
base = (1ULL << qf->metadata->bits_per_slot) - (rem ? 2 : 1);
end = index + 1;
while (digit != rem && !is_runend(qf, end)) {
if (digit > rem)
digit--;
if (digit && rem)
digit--;
cnt = cnt * base + digit;
end++;
digit = get_slot(qf, end);
}
if (rem) {
*count = cnt + 3;
return end;
}
if (is_runend(qf, end) || get_slot(qf, end + 1) != 0) {
*count = 1;
return index;
}
*count = cnt + 4;
return end + 1;
}
/* return the next slot which corresponds to a
* different element
* */
// __device__ static inline uint64_t next_slot(QF *qf, uint64_t current)
// {
// uint64_t rem = get_slot(qf, current);
// current++;
// while (get_slot(qf, current) == rem && current <= qf->metadata->nslots) {
// current++;
// }
// return current;
// }
//code for approx inserts
__host__ __device__ static inline qf_returns insert1_if_not_exists(QF *qf, __uint64_t hash, uint8_t * value)
{
uint64_t hash_remainder = hash & BITMASK(qf->metadata->bits_per_slot);
uint64_t hash_bucket_index = hash >> qf->metadata->bits_per_slot;
uint64_t hash_bucket_block_offset = hash_bucket_index % QF_SLOTS_PER_BLOCK;
uint64_t compare_remainder = hash_remainder >> qf->metadata->value_bits;
/*
if (GET_NO_LOCK(runtime_lock) != QF_NO_LOCK) {
if (!qf_lock(qf, hash_bucket_index, true, runtime_lock))
return QF_COULDNT_LOCK;
}
*/
//printf("In insert1, Index is %llu, block_offset is %llu, remainder is %llu \n", hash_bucket_index, hash_bucket_block_offset, hash_remainder);
//approx filter has estimate of only one insert per item
// #ifdef __CUDA_ARCH__
// atomicAdd((unsigned long long *)&qf->metadata->noccupied_slots, 1ULL);
// #else
// abort();
// #endif
if (is_empty(qf, hash_bucket_index) /* might_be_empty(qf, hash_bucket_index) && runend_index == hash_bucket_index */) {
METADATA_WORD(qf, runends, hash_bucket_index) |= 1ULL <<
(hash_bucket_block_offset % 64);
set_slot(qf, hash_bucket_index, hash_remainder);
METADATA_WORD(qf, occupieds, hash_bucket_index) |= 1ULL <<
(hash_bucket_block_offset % 64);
//modify_metadata(&qf->runtimedata->pc_ndistinct_elts, 1);
//modify_metadata(&qf->runtimedata->pc_noccupied_slots, 1);
//modify_metadata(&qf->runtimedata->pc_nelts, 1);
} else {
uint64_t runend_index = run_end(qf, hash_bucket_index);
int operation = 0; /* Insert into empty bucket */
uint64_t insert_index = runend_index + 1;
uint64_t new_value = hash_remainder;
/* printf("RUNSTART: %02lx RUNEND: %02lx\n", runstart_index, runend_index); */
uint64_t runstart_index = hash_bucket_index == 0 ? 0 : run_end(qf, hash_bucket_index- 1) + 1;
if (is_occupied(qf, hash_bucket_index)) {
/* Find the counter for this remainder if it exists. */
uint64_t current_remainder = get_slot(qf, runstart_index) >> qf->metadata->value_bits;
//uint64_t zero_terminator = runstart_index;
/* Skip over counters for other remainders. */
while (current_remainder < compare_remainder && runstart_index <=
runend_index) {
runstart_index++;
current_remainder = get_slot(qf, runstart_index) >> qf->metadata->value_bits;
}
/* If this is the first time we've inserted the new remainder,
and it is larger than any remainder in the run. */
if (runstart_index > runend_index) {
operation = 1;
insert_index = runstart_index;
new_value = hash_remainder;
//modify_metadata(&qf->runtimedata->pc_ndistinct_elts, 1);
/* This is the first time we're inserting this remainder, but
there are larger remainders already in the run. */
} else if (current_remainder != compare_remainder) {
operation = 2; /* Inserting */
insert_index = runstart_index;
new_value = hash_remainder;
//modify_metadata(&qf->runtimedata->pc_ndistinct_elts, 1);
/* Cases below here: we're incrementing the (simple or
extended) counter for this remainder. */
/* If there's exactly one instance of this remainder. */
} else {
//get remainder
*value = get_slot(qf, runstart_index) && BITMASK(qf->metadata->value_bits);
return QF_ITEM_FOUND;
}
} //else {
//modify_metadata(&qf->runtimedata->pc_ndistinct_elts, 1);
//}
if (operation >= 0) {
uint64_t empty_slot_index = find_first_empty_slot(qf, runend_index+1);
#if DROP_ON_BIG_CLUSTER
// if (empty_slot_index - hash_bucket_index > BIG_CLUSTER_DROPOFF){
// return QF_FULL;
// }
if (qf->metadata->qf_full){
return QF_FULL;
}
if (empty_slot_index - hash_bucket_index > BIG_CLUSTER_DROPOFF){
qf->metadata->qf_full = true;
return QF_FULL;
}
#endif
if (empty_slot_index/NUM_SLOTS_TO_LOCK > hash_bucket_index/NUM_SLOTS_TO_LOCK+1){
return QF_FULL;
}
if (empty_slot_index >= qf->metadata->xnslots) {
printf("Ran out of space. Total xnslots is %lu, first empty slot is %lu\n", qf->metadata->xnslots, empty_slot_index);
return QF_FULL;
}
shift_remainders(qf, insert_index, empty_slot_index);
set_slot(qf, insert_index, new_value);
//ret_distance = insert_index - hash_bucket_index;
shift_runends(qf, insert_index, empty_slot_index-1, 1);
switch (operation) {
case 0:
METADATA_WORD(qf, runends, insert_index) |= 1ULL << ((insert_index%QF_SLOTS_PER_BLOCK) % 64);
break;
case 1:
METADATA_WORD(qf, runends, insert_index-1) &= ~(1ULL << (((insert_index-1) %QF_SLOTS_PER_BLOCK) %64));
METADATA_WORD(qf, runends, insert_index) |= 1ULL << ((insert_index%QF_SLOTS_PER_BLOCK)% 64);
break;
case 2:
METADATA_WORD(qf, runends, insert_index) &= ~(1ULL <<((insert_index %QF_SLOTS_PER_BLOCK) %64));
break;
default:
printf("Invalid operation %d\n", operation);
#ifdef __CUDA_ARCH__
__threadfence(); // ensure store issued before trap
asm("trap;");
#else
abort();
#endif
}
/*
* Increment the offset for each block between the hash bucket index
* and block of the empty slot
* */
uint64_t i;
for (i = hash_bucket_index / QF_SLOTS_PER_BLOCK + 1; i <=
empty_slot_index/QF_SLOTS_PER_BLOCK; i++) {
if (get_block(qf, i)->offset < BITMASK(8*sizeof(qf->blocks[0].offset)))
get_block(qf, i)->offset++;
#if DEBUG_ASSERTS
assert(get_block(qf, i)->offset != 0);
#endif
}
//modify_metadata(&qf->runtimedata->pc_noccupied_slots, 1);
}
//modify_metadata(&qf->runtimedata->pc_nelts, 1);
METADATA_WORD(qf, occupieds, hash_bucket_index) |= 1ULL <<
(hash_bucket_block_offset % 64);
}
/*
if (GET_NO_LOCK(runtime_lock) != QF_NO_LOCK) {
qf_unlock(qf, hash_bucket_index, true);
}
*/
return QF_ITEM_INSERTED;
}
__device__ static inline qf_returns insert1_if_not_exists_cooperative(QF *qf, __uint64_t hash, uint8_t * value, int warpID)
{
uint64_t hash_remainder = hash & BITMASK(qf->metadata->bits_per_slot);
uint64_t hash_bucket_index = hash >> qf->metadata->bits_per_slot;
uint64_t hash_bucket_block_offset = hash_bucket_index % QF_SLOTS_PER_BLOCK;
uint64_t compare_remainder = hash_remainder >> qf->metadata->value_bits;
/*
if (GET_NO_LOCK(runtime_lock) != QF_NO_LOCK) {
if (!qf_lock(qf, hash_bucket_index, true, runtime_lock))
return QF_COULDNT_LOCK;
}
*/
//printf("In insert1, Index is %llu, block_offset is %llu, remainder is %llu \n", hash_bucket_index, hash_bucket_block_offset, hash_remainder);
//approx filter has estimate of only one insert per item
// #ifdef __CUDA_ARCH__
// atomicAdd((unsigned long long *)&qf->metadata->noccupied_slots, 1ULL);
// #else
// abort();
// #endif
//this step can't be improved, minimum one mem check
if (is_empty(qf, hash_bucket_index) /* might_be_empty(qf, hash_bucket_index) && runend_index == hash_bucket_index */) {
METADATA_WORD(qf, runends, hash_bucket_index) |= 1ULL <<
(hash_bucket_block_offset % 64);
set_slot(qf, hash_bucket_index, hash_remainder);
METADATA_WORD(qf, occupieds, hash_bucket_index) |= 1ULL <<
(hash_bucket_block_offset % 64);
//modify_metadata(&qf->runtimedata->pc_ndistinct_elts, 1);
//modify_metadata(&qf->runtimedata->pc_noccupied_slots, 1);
//modify_metadata(&qf->runtimedata->pc_nelts, 1);
} else {
//maybe improve run_end, come back later and check
uint64_t runend_index = run_end(qf, hash_bucket_index);
int operation = 0; /* Insert into empty bucket */
uint64_t insert_index = runend_index + 1;
uint64_t new_value = hash_remainder;
/* printf("RUNSTART: %02lx RUNEND: %02lx\n", runstart_index, runend_index); */
uint64_t runstart_index = hash_bucket_index == 0 ? 0 : run_end(qf, hash_bucket_index- 1) + 1;
if (is_occupied(qf, hash_bucket_index)) {
/* Find the counter for this remainder if it exists. */
uint64_t current_remainder = get_slot(qf, runstart_index) >> qf->metadata->value_bits;
//uint64_t zero_terminator = runstart_index;
/* Skip over counters for other remainders. */
//we look for runstart_index <= runend and current_remainder >= compare_remainder
uint64_t my_runstart_index = runstart_index + warpID;
uint64_t my_current_remainder = get_slot(qf, my_runstart_index) >> qf->metadata->value_bits;
while(true){
//generate ballot
bool ballot = !((my_runstart_index <= runend_index) && (my_current_remainder < compare_remainder));
int warp_to_query = __ffs(__ballot_sync(0xffffffff, ballot))-1;
if (warp_to_query != -1){
//match kinda found!
runstart_index = __shfl_sync(0xffffffff, my_runstart_index, warp_to_query);
//exit successfully
break;
}
//if all fail retry at the next iteration
my_runstart_index+=32;
}
// while (current_remainder < compare_remainder && runstart_index <=
// runend_index) {
// runstart_index++;
// current_remainder = get_slot(qf, runstart_index) >> qf->metadata->value_bits;
// }
//reset current remainder to be correct
current_remainder = get_slot(qf, runstart_index) >> qf->metadata->value_bits;
/* If this is the first time we've inserted the new remainder,
and it is larger than any remainder in the run. */
if (runstart_index > runend_index) {
operation = 1;
insert_index = runstart_index;
new_value = hash_remainder;
//modify_metadata(&qf->runtimedata->pc_ndistinct_elts, 1);
/* This is the first time we're inserting this remainder, but
there are larger remainders already in the run. */
} else if (current_remainder != compare_remainder) {
operation = 2; /* Inserting */
insert_index = runstart_index;
new_value = hash_remainder;
//modify_metadata(&qf->runtimedata->pc_ndistinct_elts, 1);
/* Cases below here: we're incrementing the (simple or
extended) counter for this remainder. */
/* If there's exactly one instance of this remainder. */
} else {
//get remainder
*value = get_slot(qf, runstart_index) && BITMASK(qf->metadata->value_bits);
return QF_ITEM_FOUND;
}
} //else {
//modify_metadata(&qf->runtimedata->pc_ndistinct_elts, 1);
//}
if (operation >= 0) {
uint64_t empty_slot_index;
if (warpID == 0) empty_slot_index = find_first_empty_slot(qf, runend_index+1);
#if DROP_ON_BIG_CLUSTER
// if (empty_slot_index - hash_bucket_index > BIG_CLUSTER_DROPOFF){
// return QF_FULL;
// }
if (qf->metadata->qf_full){
return QF_FULL;
}
if (empty_slot_index - hash_bucket_index > BIG_CLUSTER_DROPOFF){
qf->metadata->qf_full = true;
return QF_FULL;
}
#endif
if (empty_slot_index/NUM_SLOTS_TO_LOCK > hash_bucket_index/NUM_SLOTS_TO_LOCK+1){
return QF_FULL;
}
empty_slot_index = __shfl_sync(0xffffffff, empty_slot_index, 0);
if (empty_slot_index >= qf->metadata->xnslots) {
printf("Ran out of space. Total xnslots is %lu, first empty slot is %lu\n", qf->metadata->xnslots, empty_slot_index);
return QF_FULL;
}
// if (warpID == 0){
// }
//shift remainders changes - atm, none
shift_remainders_cooperative(qf, insert_index, empty_slot_index, warpID);
//set slot changes, atm, none
if (warpID == 0){
set_slot(qf, insert_index, new_value);
//ret_distance = insert_index - hash_bucket_index;
shift_runends(qf, insert_index, empty_slot_index-1, 1);
switch (operation) {
case 0:
METADATA_WORD(qf, runends, insert_index) |= 1ULL << ((insert_index%QF_SLOTS_PER_BLOCK) % 64);
break;
case 1:
METADATA_WORD(qf, runends, insert_index-1) &= ~(1ULL << (((insert_index-1) %QF_SLOTS_PER_BLOCK) %64));
METADATA_WORD(qf, runends, insert_index) |= 1ULL << ((insert_index%QF_SLOTS_PER_BLOCK)% 64);
break;
case 2:
METADATA_WORD(qf, runends, insert_index) &= ~(1ULL <<((insert_index %QF_SLOTS_PER_BLOCK) %64));
break;
default:
printf("Invalid operation %d\n", operation);
#ifdef __CUDA_ARCH__
__threadfence(); // ensure store issued before trap
asm("trap;");
#else
abort();
#endif
}
/*
* Increment the offset for each block between the hash bucket index
* and block of the empty slot
* */
uint64_t i;
for (i = hash_bucket_index / QF_SLOTS_PER_BLOCK + 1; i <=
empty_slot_index/QF_SLOTS_PER_BLOCK; i++) {
if (get_block(qf, i)->offset < BITMASK(8*sizeof(qf->blocks[0].offset)))
get_block(qf, i)->offset++;
#if DEBUG_ASSERTS
assert(get_block(qf, i)->offset != 0);
#endif
}
//modify_metadata(&qf->runtimedata->pc_noccupied_slots, 1);
} // end of single threaded brace
}
//modify_metadata(&qf->runtimedata->pc_nelts, 1);
METADATA_WORD(qf, occupieds, hash_bucket_index) |= 1ULL <<
(hash_bucket_block_offset % 64);
//closing barce for warpID == 0
}
/*
if (GET_NO_LOCK(runtime_lock) != QF_NO_LOCK) {
qf_unlock(qf, hash_bucket_index, true);
}
*/
return QF_ITEM_INSERTED;
}
__host__ __device__ static inline qf_returns insert1(QF *qf, __uint64_t hash, uint8_t runtime_lock)
{
int ret_distance = 0;
uint64_t hash_remainder = hash & BITMASK(qf->metadata->bits_per_slot);
uint64_t hash_bucket_index = hash >> qf->metadata->bits_per_slot;
uint64_t hash_bucket_block_offset = hash_bucket_index % QF_SLOTS_PER_BLOCK;
/*
if (GET_NO_LOCK(runtime_lock) != QF_NO_LOCK) {
if (!qf_lock(qf, hash_bucket_index, true, runtime_lock))
return QF_COULDNT_LOCK;
}
*/
//printf("In insert1, Index is %llu, block_offset is %llu, remainder is %llu \n", hash_bucket_index, hash_bucket_block_offset, hash_remainder);
if (is_empty(qf, hash_bucket_index) /* might_be_empty(qf, hash_bucket_index) && runend_index == hash_bucket_index */) {
METADATA_WORD(qf, runends, hash_bucket_index) |= 1ULL <<
(hash_bucket_block_offset % 64);
set_slot(qf, hash_bucket_index, hash_remainder);
METADATA_WORD(qf, occupieds, hash_bucket_index) |= 1ULL <<
(hash_bucket_block_offset % 64);
ret_distance = 0;
//modify_metadata(&qf->runtimedata->pc_ndistinct_elts, 1);
//modify_metadata(&qf->runtimedata->pc_noccupied_slots, 1);
//modify_metadata(&qf->runtimedata->pc_nelts, 1);
} else {
uint64_t runend_index = run_end(qf, hash_bucket_index);
#if DROP_ON_RUNEND
if (runend_index - hash_bucket_index >= RUNEND_CUTOFF){
//printf("Dropping\n");
return QF_FULL;
}
#endif
int operation = 0; /* Insert into empty bucket */
uint64_t insert_index = runend_index + 1;
uint64_t new_value = hash_remainder;
/* printf("RUNSTART: %02lx RUNEND: %02lx\n", runstart_index, runend_index); */
uint64_t runstart_index = hash_bucket_index == 0 ? 0 : run_end(qf, hash_bucket_index- 1) + 1;
if (is_occupied(qf, hash_bucket_index)) {
/* Find the counter for this remainder if it exists. */
uint64_t current_remainder = get_slot(qf, runstart_index);
uint64_t zero_terminator = runstart_index;
/* The counter for 0 is special. */
if (current_remainder == 0) {
uint64_t t = runstart_index + 1;
while (t < runend_index && get_slot(qf, t) != 0)
t++;
if (t < runend_index && get_slot(qf, t+1) == 0)
zero_terminator = t+1; /* Three or more 0s */
else if (runstart_index < runend_index && get_slot(qf, runstart_index
+ 1) == 0)
zero_terminator = runstart_index + 1; /* Exactly two 0s */
/* Otherwise, exactly one 0 (i.e. zero_terminator == runstart_index) */
/* May read past end of run, but that's OK because loop below
can handle that */
if (hash_remainder != 0) {
runstart_index = zero_terminator + 1;
current_remainder = get_slot(qf, runstart_index);
}
}
/* Skip over counters for other remainders. */
while (current_remainder < hash_remainder && runstart_index <=
runend_index) {
/* If this remainder has an extended counter, skip over it. */
if (runstart_index < runend_index &&
get_slot(qf, runstart_index + 1) < current_remainder) {
runstart_index = runstart_index + 2;
while (runstart_index < runend_index &&
get_slot(qf, runstart_index) != current_remainder)
runstart_index++;
runstart_index++;
/* This remainder has a simple counter. */
} else {
runstart_index++;
}
/* This may read past the end of the run, but the while loop
condition will prevent us from using the invalid result in
that case. */
current_remainder = get_slot(qf, runstart_index);
}
/* If this is the first time we've inserted the new remainder,
and it is larger than any remainder in the run. */
if (runstart_index > runend_index) {
operation = 1;
insert_index = runstart_index;
new_value = hash_remainder;
//modify_metadata(&qf->runtimedata->pc_ndistinct_elts, 1);
/* This is the first time we're inserting this remainder, but
there are larger remainders already in the run. */
} else if (current_remainder != hash_remainder) {
operation = 2; /* Inserting */
insert_index = runstart_index;
new_value = hash_remainder;
//modify_metadata(&qf->runtimedata->pc_ndistinct_elts, 1);
/* Cases below here: we're incrementing the (simple or
extended) counter for this remainder. */
/* If there's exactly one instance of this remainder. */
} else if (runstart_index == runend_index ||
(hash_remainder > 0 && get_slot(qf, runstart_index + 1) >
hash_remainder) ||
(hash_remainder == 0 && zero_terminator == runstart_index)) {
operation = 2; /* Insert */
insert_index = runstart_index;
new_value = hash_remainder;
/* If there are exactly two instances of this remainder. */
} else if ((hash_remainder > 0 && get_slot(qf, runstart_index + 1) ==
hash_remainder) ||
(hash_remainder == 0 && zero_terminator == runstart_index + 1)) {
operation = 2; /* Insert */
insert_index = runstart_index + 1;
new_value = 0;
/* Special case for three 0s */
} else if (hash_remainder == 0 && zero_terminator == runstart_index + 2) {
operation = 2; /* Insert */
insert_index = runstart_index + 1;
new_value = 1;
/* There is an extended counter for this remainder. */
} else {
/* Move to the LSD of the counter. */
insert_index = runstart_index + 1;
while (get_slot(qf, insert_index+1) != hash_remainder)
insert_index++;
/* Increment the counter. */
uint64_t digit, carry;
do {
carry = 0;
digit = get_slot(qf, insert_index);
// Convert a leading 0 (which is special) to a normal encoded digit
if (digit == 0) {
digit++;
if (digit == current_remainder)
digit++;
}
// Increment the digit
digit = (digit + 1) & BITMASK(qf->metadata->bits_per_slot);
// Ensure digit meets our encoding requirements
if (digit == 0) {
digit++;
carry = 1;
}
if (digit == current_remainder)
digit = (digit + 1) & BITMASK(qf->metadata->bits_per_slot);
if (digit == 0) {
digit++;
carry = 1;
}
set_slot(qf, insert_index, digit);
insert_index--;
} while(insert_index > runstart_index && carry);
/* If the counter needs to be expanded. */
if (insert_index == runstart_index && (carry > 0 || (current_remainder
!= 0 && digit >=
current_remainder)))
{
operation = 2; /* insert */
insert_index = runstart_index + 1;
if (!carry) /* To prepend a 0 before the counter if the MSD is greater than the rem */
new_value = 0;
else if (carry) { /* Increment the new value because we don't use 0 to encode counters */
new_value = 2;
/* If the rem is greater than or equal to the new_value then fail*/
#if DEBUG_ASSERTS
if (current_remainder > 0)
assert(new_value < current_remainder);
#endif
}
} else {
operation = -1;
}
}
} //else {
//modify_metadata(&qf->runtimedata->pc_ndistinct_elts, 1);
//}
if (operation >= 0) {
uint64_t empty_slot_index = find_first_empty_slot(qf, runend_index+1);
#if DROP_ON_BIG_CLUSTER
// if (empty_slot_index - hash_bucket_index > BIG_CLUSTER_DROPOFF){
// return QF_FULL;
// }
if (qf->metadata->qf_full){
return QF_FULL;
}
if (empty_slot_index - hash_bucket_index > BIG_CLUSTER_DROPOFF){
qf->metadata->qf_full = true;
return QF_FULL;
}
#endif
if (empty_slot_index/NUM_SLOTS_TO_LOCK > hash_bucket_index/NUM_SLOTS_TO_LOCK+1){
return QF_FULL;
}
if (empty_slot_index >= qf->metadata->xnslots) {
printf("Ran out of space. Total xnslots is %lu, first empty slot is %lu\n", qf->metadata->xnslots, empty_slot_index);
return QF_FULL;
}
shift_remainders(qf, insert_index, empty_slot_index);
set_slot(qf, insert_index, new_value);
ret_distance = insert_index - hash_bucket_index;
shift_runends(qf, insert_index, empty_slot_index-1, 1);
switch (operation) {
case 0:
METADATA_WORD(qf, runends, insert_index) |= 1ULL << ((insert_index%QF_SLOTS_PER_BLOCK) % 64);
break;
case 1:
METADATA_WORD(qf, runends, insert_index-1) &= ~(1ULL << (((insert_index-1) %QF_SLOTS_PER_BLOCK) %64));
METADATA_WORD(qf, runends, insert_index) |= 1ULL << ((insert_index%QF_SLOTS_PER_BLOCK)% 64);
break;
case 2:
METADATA_WORD(qf, runends, insert_index) &= ~(1ULL <<((insert_index %QF_SLOTS_PER_BLOCK) %64));
break;
default:
printf("Invalid operation %d\n", operation);
#ifdef __CUDA_ARCH__
__threadfence(); // ensure store issued before trap
asm("trap;");
#else
abort();
#endif
}
/*
* Increment the offset for each block between the hash bucket index
* and block of the empty slot
* */
uint64_t i;
for (i = hash_bucket_index / QF_SLOTS_PER_BLOCK + 1; i <=
empty_slot_index/QF_SLOTS_PER_BLOCK; i++) {
if (get_block(qf, i)->offset < BITMASK(8*sizeof(qf->blocks[0].offset)))
get_block(qf, i)->offset++;
#if DEBUG_ASSERTS
assert(get_block(qf, i)->offset != 0);
#endif
}
//modify_metadata(&qf->runtimedata->pc_noccupied_slots, 1);
}
//modify_metadata(&qf->runtimedata->pc_nelts, 1);
METADATA_WORD(qf, occupieds, hash_bucket_index) |= 1ULL <<
(hash_bucket_block_offset % 64);
}
/*
if (GET_NO_LOCK(runtime_lock) != QF_NO_LOCK) {
qf_unlock(qf, hash_bucket_index, true);
}
*/
//return ret_distance;
return QF_ITEM_INSERTED;
}
__device__ static inline int insert1_cooperative(QF *qf, __uint64_t hash, uint8_t runtime_lock, int warpID)
{
int ret_distance = 0;
uint64_t hash_remainder = hash & BITMASK(qf->metadata->bits_per_slot);
uint64_t hash_bucket_index = hash >> qf->metadata->bits_per_slot;
uint64_t hash_bucket_block_offset = hash_bucket_index % QF_SLOTS_PER_BLOCK;
/*
if (GET_NO_LOCK(runtime_lock) != QF_NO_LOCK) {
if (!qf_lock(qf, hash_bucket_index, true, runtime_lock))
return QF_COULDNT_LOCK;
}
*/
//printf("In insert1, Index is %llu, block_offset is %llu, remainder is %llu \n", hash_bucket_index, hash_bucket_block_offset, hash_remainder);
//this is checking if the slot is empty, i.e. direct insert
//no memmove required, no warp fancyness
//no space for optimization on a warp level
if (is_empty(qf, hash_bucket_index) /* might_be_empty(qf, hash_bucket_index) && runend_index == hash_bucket_index */) {
METADATA_WORD(qf, runends, hash_bucket_index) |= 1ULL <<
(hash_bucket_block_offset % 64);
set_slot(qf, hash_bucket_index, hash_remainder);
METADATA_WORD(qf, occupieds, hash_bucket_index) |= 1ULL <<
(hash_bucket_block_offset % 64);
ret_distance = 0;
//modify_metadata(&qf->runtimedata->pc_ndistinct_elts, 1);
//modify_metadata(&qf->runtimedata->pc_noccupied_slots, 1);
//modify_metadata(&qf->runtimedata->pc_nelts, 1);
} else {
//slot was occupied
//I believe this can be optimized? not super certain about the performance reqs
uint64_t runend_index = run_end(qf, hash_bucket_index);
int operation = 0; /* Insert into empty bucket */
uint64_t insert_index = runend_index + 1;
uint64_t new_value = hash_remainder;
/* printf("RUNSTART: %02lx RUNEND: %02lx\n", runstart_index, runend_index); */
uint64_t runstart_index = hash_bucket_index == 0 ? 0 : run_end(qf, hash_bucket_index- 1) + 1;
if (is_occupied(qf, hash_bucket_index)) {
/* Find the counter for this remainder if it exists. */
uint64_t current_remainder = get_slot(qf, runstart_index);
uint64_t zero_terminator = runstart_index;
/* The counter for 0 is special. */
//this logic can't be optimized
if (current_remainder == 0) {
uint64_t t = runstart_index + 1;
while (t < runend_index && get_slot(qf, t) != 0)
t++;
if (t < runend_index && get_slot(qf, t+1) == 0)
zero_terminator = t+1; /* Three or more 0s */
else if (runstart_index < runend_index && get_slot(qf, runstart_index
+ 1) == 0)
zero_terminator = runstart_index + 1; /* Exactly two 0s */
/* Otherwise, exactly one 0 (i.e. zero_terminator == runstart_index) */
/* May read past end of run, but that's OK because loop below
can handle that */
if (hash_remainder != 0) {
runstart_index = zero_terminator + 1;
current_remainder = get_slot(qf, runstart_index);
}
}
//THIS CAN BE OPTIMIZED
//rewrite
//needs to be loopy boy and handle special counters
//I'm thinking if you are weird then step back once?
uint64_t my_runstart_index = runstart_index+warpID;
uint64_t my_current_remainder = get_slot(qf, my_runstart_index);
//everyone has one of 32 partitions
//get slot - feeds the remainder
//each remainder is either < us - good
// = us - great!
// > us - bad
// => only occur before the specified points iff
//on correct use there should be a dividing line?
if (my_runstart_index <= runend_index){
}
/* Skip over counters for other remainders. */
while (current_remainder < hash_remainder && runstart_index <=
runend_index) {
/* If this remainder has an extended counter, skip over it. */
if (runstart_index < runend_index &&
get_slot(qf, runstart_index + 1) < current_remainder) {
//if the current slot < current remainder
//a
runstart_index = runstart_index + 2;
while (runstart_index < runend_index &&
get_slot(qf, runstart_index) != current_remainder)
runstart_index++;
runstart_index++;
/* This remainder has a simple counter. */
} else {
runstart_index++;
}
/* This may read past the end of the run, but the while loop
condition will prevent us from using the invalid result in
that case. */
current_remainder = get_slot(qf, runstart_index);
}
/* If this is the first time we've inserted the new remainder,
and it is larger than any remainder in the run. */
if (runstart_index > runend_index) {
operation = 1;
insert_index = runstart_index;
new_value = hash_remainder;
//modify_metadata(&qf->runtimedata->pc_ndistinct_elts, 1);
/* This is the first time we're inserting this remainder, but
there are larger remainders already in the run. */
} else if (current_remainder != hash_remainder) {
operation = 2; /* Inserting */
insert_index = runstart_index;
new_value = hash_remainder;
//modify_metadata(&qf->runtimedata->pc_ndistinct_elts, 1);
/* Cases below here: we're incrementing the (simple or
extended) counter for this remainder. */
/* If there's exactly one instance of this remainder. */
} else if (runstart_index == runend_index ||
(hash_remainder > 0 && get_slot(qf, runstart_index + 1) >
hash_remainder) ||
(hash_remainder == 0 && zero_terminator == runstart_index)) {
operation = 2; /* Insert */
insert_index = runstart_index;
new_value = hash_remainder;
/* If there are exactly two instances of this remainder. */
} else if ((hash_remainder > 0 && get_slot(qf, runstart_index + 1) ==
hash_remainder) ||
(hash_remainder == 0 && zero_terminator == runstart_index + 1)) {
operation = 2; /* Insert */
insert_index = runstart_index + 1;
new_value = 0;
/* Special case for three 0s */
} else if (hash_remainder == 0 && zero_terminator == runstart_index + 2) {
operation = 2; /* Insert */
insert_index = runstart_index + 1;
new_value = 1;
/* There is an extended counter for this remainder. */
} else {
/* Move to the LSD of the counter. */
insert_index = runstart_index + 1;
while (get_slot(qf, insert_index+1) != hash_remainder)
insert_index++;
/* Increment the counter. */
uint64_t digit, carry;
do {
carry = 0;
digit = get_slot(qf, insert_index);
// Convert a leading 0 (which is special) to a normal encoded digit
if (digit == 0) {
digit++;
if (digit == current_remainder)
digit++;
}
// Increment the digit
digit = (digit + 1) & BITMASK(qf->metadata->bits_per_slot);
// Ensure digit meets our encoding requirements
if (digit == 0) {
digit++;
carry = 1;
}
if (digit == current_remainder)
digit = (digit + 1) & BITMASK(qf->metadata->bits_per_slot);
if (digit == 0) {
digit++;
carry = 1;
}
set_slot(qf, insert_index, digit);
insert_index--;
} while(insert_index > runstart_index && carry);
/* If the counter needs to be expanded. */
if (insert_index == runstart_index && (carry > 0 || (current_remainder
!= 0 && digit >=
current_remainder)))
{
operation = 2; /* insert */
insert_index = runstart_index + 1;
if (!carry) /* To prepend a 0 before the counter if the MSD is greater than the rem */
new_value = 0;
else if (carry) { /* Increment the new value because we don't use 0 to encode counters */
new_value = 2;
/* If the rem is greater than or equal to the new_value then fail*/
#if DEBUG_ASSERTS
if (current_remainder > 0)
assert(new_value < current_remainder);
#endif
}
} else {
operation = -1;
}
}
} //else {
//modify_metadata(&qf->runtimedata->pc_ndistinct_elts, 1);
//}
if (operation >= 0) {
uint64_t empty_slot_index = find_first_empty_slot(qf, runend_index+1);
#if DROP_ON_BIG_CLUSTER
// if (empty_slot_index - hash_bucket_index > BIG_CLUSTER_DROPOFF){
// return QF_FULL;
// }
if (qf->metadata->qf_full){
return QF_FULL;
}
if (empty_slot_index - hash_bucket_index > BIG_CLUSTER_DROPOFF){
qf->metadata->qf_full = true;
return QF_FULL;
}
#endif
if (empty_slot_index/NUM_SLOTS_TO_LOCK > hash_bucket_index/NUM_SLOTS_TO_LOCK+1){
return QF_FULL;
}
if (empty_slot_index >= qf->metadata->xnslots) {
printf("Ran out of space. Total xnslots is %lu, first empty slot is %lu\n", qf->metadata->xnslots, empty_slot_index);
return QF_FULL;
}
shift_remainders(qf, insert_index, empty_slot_index);
set_slot(qf, insert_index, new_value);
ret_distance = insert_index - hash_bucket_index;
shift_runends(qf, insert_index, empty_slot_index-1, 1);
switch (operation) {
case 0:
METADATA_WORD(qf, runends, insert_index) |= 1ULL << ((insert_index%QF_SLOTS_PER_BLOCK) % 64);
break;
case 1:
METADATA_WORD(qf, runends, insert_index-1) &= ~(1ULL << (((insert_index-1) %QF_SLOTS_PER_BLOCK) %64));
METADATA_WORD(qf, runends, insert_index) |= 1ULL << ((insert_index%QF_SLOTS_PER_BLOCK)% 64);
break;
case 2:
METADATA_WORD(qf, runends, insert_index) &= ~(1ULL <<((insert_index %QF_SLOTS_PER_BLOCK) %64));
break;
default:
printf("Invalid operation %d\n", operation);
#ifdef __CUDA_ARCH__
__threadfence(); // ensure store issued before trap
asm("trap;");
#else
abort();
#endif
}
/*
* Increment the offset for each block between the hash bucket index
* and block of the empty slot
* */
uint64_t i;
for (i = hash_bucket_index / QF_SLOTS_PER_BLOCK + 1; i <=
empty_slot_index/QF_SLOTS_PER_BLOCK; i++) {
if (get_block(qf, i)->offset < BITMASK(8*sizeof(qf->blocks[0].offset)))
get_block(qf, i)->offset++;
#if DEBUG_ASSERTS
assert(get_block(qf, i)->offset != 0);
#endif
}
//modify_metadata(&qf->runtimedata->pc_noccupied_slots, 1);
}
//modify_metadata(&qf->runtimedata->pc_nelts, 1);
METADATA_WORD(qf, occupieds, hash_bucket_index) |= 1ULL <<
(hash_bucket_block_offset % 64);
}
/*
if (GET_NO_LOCK(runtime_lock) != QF_NO_LOCK) {
qf_unlock(qf, hash_bucket_index, true);
}
*/
return ret_distance;
}
__host__ __device__ static inline qf_returns insert(QF *qf, __uint64_t hash, uint64_t count, uint8_t
runtime_lock)
{
int ret_distance = 0;
uint64_t hash_remainder = hash & BITMASK(qf->metadata->bits_per_slot);
uint64_t hash_bucket_index = hash >> qf->metadata->bits_per_slot;
uint64_t hash_bucket_block_offset = hash_bucket_index % QF_SLOTS_PER_BLOCK;
/*uint64_t hash_bucket_lock_offset = hash_bucket_index % NUM_SLOTS_TO_LOCK;*/
/*
if (GET_NO_LOCK(runtime_lock) != QF_NO_LOCK) {
if (!qf_lock(qf, hash_bucket_index, false, runtime_lock))
return QF_COULDNT_LOCK;
}
*/
uint64_t runend_index = run_end(qf, hash_bucket_index);
/* Empty slot */
if (might_be_empty(qf, hash_bucket_index) && runend_index ==
hash_bucket_index) {
METADATA_WORD(qf, runends, hash_bucket_index) |= 1ULL <<
(hash_bucket_block_offset % 64);
set_slot(qf, hash_bucket_index, hash_remainder);
METADATA_WORD(qf, occupieds, hash_bucket_index) |= 1ULL <<
(hash_bucket_block_offset % 64);
//ERIC TODO: see if this metadata is needed--probably isn't compatible with GPU
//modify_metadata(&qf->runtimedata->pc_ndistinct_elts, 1);
//modify_metadata(&qf->runtimedata->pc_noccupied_slots, 1);
//modify_metadata(&qf->runtimedata->pc_nelts, 1);
/* This trick will, I hope, keep the fast case fast. */
if (count > 1) {
insert(qf, hash, count - 1, QF_NO_LOCK);
}
} else { /* Non-empty slot */
uint64_t new_values[67];
int64_t runstart_index = hash_bucket_index == 0 ? 0 : run_end(qf,hash_bucket_index- 1) + 1;
bool ret;
if (!is_occupied(qf, hash_bucket_index)) { /* Empty bucket, but its slot is occupied. */
uint64_t *p = encode_counter(qf, hash_remainder, count, &new_values[67]);
ret = insert_replace_slots_and_shift_remainders_and_runends_and_offsets(qf, 0, hash_bucket_index, runstart_index, p, &new_values[67] - p, 0);
if (!ret)
return QF_FULL;
//modify_metadata(&qf->runtimedata->pc_ndistinct_elts, 1);
ret_distance = runstart_index - hash_bucket_index;
} else { /* Non-empty bucket */
uint64_t current_remainder, current_count, current_end;
/* Find the counter for this remainder, if one exists. */
current_end = decode_counter(qf, runstart_index, ¤t_remainder,¤t_count);
while (current_remainder < hash_remainder && !is_runend(qf, current_end)) {
runstart_index = current_end + 1;
current_end = decode_counter(qf, runstart_index, ¤t_remainder,
¤t_count);
}
/* If we reached the end of the run w/o finding a counter for this remainder,
then append a counter for this remainder to the run. */
if (current_remainder < hash_remainder) {
uint64_t *p = encode_counter(qf, hash_remainder, count, &new_values[67]);
ret = insert_replace_slots_and_shift_remainders_and_runends_and_offsets(qf, 1, /* Append to bucket */hash_bucket_index, current_end + 1, p, &new_values[67] - p, 0);
if (!ret)
return QF_FULL;
//modify_metadata(&qf->runtimedata->pc_ndistinct_elts, 1);
ret_distance = (current_end + 1) - hash_bucket_index;
/* Found a counter for this remainder. Add in the new count. */
} else if (current_remainder == hash_remainder) {
uint64_t *p = encode_counter(qf, hash_remainder, current_count + count, &new_values[67]);
ret = insert_replace_slots_and_shift_remainders_and_runends_and_offsets(qf,
is_runend(qf, current_end) ? 1 : 2,
hash_bucket_index,
runstart_index,
p,
&new_values[67] - p,
current_end - runstart_index + 1);
if (!ret)
return QF_FULL;
ret_distance = runstart_index - hash_bucket_index;
/* No counter for this remainder, but there are larger
remainders, so we're not appending to the bucket. */
} else {
uint64_t *p = encode_counter(qf, hash_remainder, count, &new_values[67]);
ret = insert_replace_slots_and_shift_remainders_and_runends_and_offsets(qf,
2, /* Insert to bucket */
hash_bucket_index,
runstart_index,
p,
&new_values[67] - p,
0);
if (!ret)
return QF_FULL;
//modify_metadata(&qf->runtimedata->pc_ndistinct_elts, 1);
ret_distance = runstart_index - hash_bucket_index;
}
}
METADATA_WORD(qf, occupieds, hash_bucket_index) |= 1ULL << (hash_bucket_block_offset % 64);
//modify_metadata(&qf->runtimedata->pc_nelts, count);
}
/*
if (GET_NO_LOCK(runtime_lock) != QF_NO_LOCK) {
qf_unlock(qf, hash_bucket_index, false);
}
*/
//return ret_distance;
return QF_ITEM_INSERTED;
}
__host__ __device__ inline static int _remove(QF *qf, __uint64_t hash, uint64_t count, uint8_t
runtime_lock)
{
int ret_numfreedslots = 0;
uint64_t hash_remainder = hash & BITMASK(qf->metadata->bits_per_slot);
uint64_t hash_bucket_index = hash >> qf->metadata->bits_per_slot;
uint64_t current_remainder, current_count, current_end;
uint64_t new_values[67];
/*
if (GET_NO_LOCK(runtime_lock) != QF_NO_LOCK) {
if (!qf_lock(qf, hash_bucket_index, false, runtime_lock))
return -2;
}
*/
/* Empty bucket */
if (!is_occupied(qf, hash_bucket_index))
return -1;
uint64_t runstart_index = hash_bucket_index == 0 ? 0 : run_end(qf, hash_bucket_index - 1) + 1;
uint64_t original_runstart_index = runstart_index;
int only_item_in_the_run = 0;
/*Find the counter for this remainder, if one exists.*/
current_end = decode_counter(qf, runstart_index, ¤t_remainder, ¤t_count);
while (current_remainder < hash_remainder && !is_runend(qf, current_end)) {
runstart_index = current_end + 1;
current_end = decode_counter(qf, runstart_index, ¤t_remainder, ¤t_count);
}
/* remainder not found in the given run */
if (current_remainder != hash_remainder)
return -1;
if (original_runstart_index == runstart_index && is_runend(qf, current_end))
only_item_in_the_run = 1;
/* endode the new counter */
uint64_t *p = encode_counter(qf, hash_remainder,
count > current_count ? 0 : current_count - count,
&new_values[67]);
ret_numfreedslots = remove_replace_slots_and_shift_remainders_and_runends_and_offsets(qf,
only_item_in_the_run,
hash_bucket_index,
runstart_index,
p,
&new_values[67] - p,
current_end - runstart_index + 1);
// update the nelements.
//modify_metadata(&qf->runtimedata->pc_nelts, -count);
/*qf->metadata->nelts -= count;*/
/*
if (GET_NO_LOCK(runtime_lock) != QF_NO_LOCK) {
qf_unlock(qf, hash_bucket_index, false);
}
*/
return ret_numfreedslots;
}
/***********************************************************************
* Code that uses the above to implement key-value-counter operations. *
***********************************************************************/
__host__ uint64_t qf_init(QF *qf, uint64_t nslots, uint64_t key_bits, uint64_t value_bits,
enum qf_hashmode hash, uint32_t seed, void* buffer, uint64_t
buffer_len)
{
uint64_t num_slots, xnslots, nblocks;
uint64_t key_remainder_bits, bits_per_slot;
uint64_t size;
uint64_t total_num_bytes;
assert(popcnt(nslots) == 1); /* nslots must be a power of 2 */
num_slots = nslots;
xnslots = nslots + 10*sqrt((double)nslots);
nblocks = (xnslots + QF_SLOTS_PER_BLOCK - 1) / QF_SLOTS_PER_BLOCK;
key_remainder_bits = key_bits;
while (nslots > 1 && key_remainder_bits > 0) {
key_remainder_bits--;
nslots >>= 1;
}
assert(key_remainder_bits >= 2);
bits_per_slot = key_remainder_bits + value_bits;
assert (QF_BITS_PER_SLOT == 0 || QF_BITS_PER_SLOT == bits_per_slot);
assert(bits_per_slot > 1);
#if QF_BITS_PER_SLOT == 8 || QF_BITS_PER_SLOT == 16 || QF_BITS_PER_SLOT == 32 || QF_BITS_PER_SLOT == 64
size = nblocks * sizeof(qfblock);
#else
size = nblocks * (sizeof(qfblock) + QF_SLOTS_PER_BLOCK * bits_per_slot / 8);
#endif
total_num_bytes = sizeof(qfmetadata) + size;
if (buffer == NULL || total_num_bytes > buffer_len)
return total_num_bytes;
// memset(buffer, 0, total_num_bytes);
qf->metadata = (qfmetadata *)(buffer);
qf->blocks = (qfblock *)(qf->metadata + 1);
qf->metadata->magic_endian_number = MAGIC_NUMBER;
qf->metadata->reserved = 0;
qf->metadata->hash_mode = hash;
qf->metadata->total_size_in_bytes = size;
qf->metadata->seed = seed;
qf->metadata->nslots = num_slots;
qf->metadata->xnslots = xnslots;
qf->metadata->key_bits = key_bits;
qf->metadata->value_bits = value_bits;
qf->metadata->key_remainder_bits = key_remainder_bits;
qf->metadata->bits_per_slot = bits_per_slot;
qf->metadata->range = qf->metadata->nslots;
qf->metadata->range <<= qf->metadata->key_remainder_bits;
qf->metadata->nblocks = (qf->metadata->xnslots + QF_SLOTS_PER_BLOCK - 1) /
QF_SLOTS_PER_BLOCK;
qf->metadata->nelts = 0;
qf->metadata->ndistinct_elts = 0;
qf->metadata->noccupied_slots = 0;
qf->metadata->qf_full = false;
qf->runtimedata->num_locks = ((qf->metadata->xnslots/NUM_SLOTS_TO_LOCK)+2);
pc_init(&qf->runtimedata->pc_nelts, (int64_t*)&qf->metadata->nelts, 8, 100);
pc_init(&qf->runtimedata->pc_ndistinct_elts, (int64_t*)&qf->metadata->ndistinct_elts, 8, 100);
pc_init(&qf->runtimedata->pc_noccupied_slots, (int64_t*)&qf->metadata->noccupied_slots, 8, 100);
/* initialize container resize */
qf->runtimedata->auto_resize = 0;
qf->runtimedata->container_resize = qf_resize_malloc;
/* initialize all the locks to 0 */
qf->runtimedata->metadata_lock = 0;
//etodo: copy this to GPU
qf->runtimedata->locks = (uint16_t *)calloc(qf->runtimedata->num_locks, sizeof(uint16_t));
if (qf->runtimedata->locks == NULL) {
perror("Couldn't allocate memory for runtime locks.");
exit(EXIT_FAILURE);
}
#ifdef LOG_WAIT_TIME
qf->runtimedata->wait_times = (wait_time_data*
)calloc(qf->runtimedata->num_locks+1,
sizeof(wait_time_data));
if (qf->runtimedata->wait_times == NULL) {
perror("Couldn't allocate memory for runtime wait_times.");
exit(EXIT_FAILURE);
}
#endif
return total_num_bytes;
}
__host__ uint64_t qf_use(QF* qf, void* buffer, uint64_t buffer_len)
{
qf->metadata = (qfmetadata *)(buffer);
if (qf->metadata->total_size_in_bytes + sizeof(qfmetadata) > buffer_len) {
return qf->metadata->total_size_in_bytes + sizeof(qfmetadata);
}
qf->blocks = (qfblock *)(qf->metadata + 1);
qf->runtimedata = (qfruntime *)calloc(sizeof(qfruntime), 1);
if (qf->runtimedata == NULL) {
perror("Couldn't allocate memory for runtime data.");
exit(EXIT_FAILURE);
}
/* initialize all the locks to 0 */
qf->runtimedata->metadata_lock = 0;
qf->runtimedata->locks = (uint16_t *)calloc(qf->runtimedata->num_locks,
sizeof(uint16_t));
if (qf->runtimedata->locks == NULL) {
perror("Couldn't allocate memory for runtime locks.");
exit(EXIT_FAILURE);
}
#ifdef LOG_WAIT_TIME
qf->runtimedata->wait_times = (wait_time_data*
)calloc(qf->runtimedata->num_locks+1,
sizeof(wait_time_data));
if (qf->runtimedata->wait_times == NULL) {
perror("Couldn't allocate memory for runtime wait_times.");
exit(EXIT_FAILURE);
}
#endif
return sizeof(qfmetadata) + qf->metadata->total_size_in_bytes;
}
__host__ void *qf_destroy(QF *qf)
{
assert(qf->runtimedata != NULL);
if (qf->runtimedata->locks != NULL)
free((void*)qf->runtimedata->locks);
if (qf->runtimedata->wait_times != NULL)
free(qf->runtimedata->wait_times);
if (qf->runtimedata->f_info.filepath != NULL)
free(qf->runtimedata->f_info.filepath);
free(qf->runtimedata);
return (void*)qf->metadata;
}
__host__ bool qf_malloc(QF *qf, uint64_t nslots, uint64_t key_bits, uint64_t
value_bits, enum qf_hashmode hash, bool on_device, uint32_t seed)
{
uint64_t total_num_bytes = qf_init(qf, nslots, key_bits, value_bits,
hash, seed, NULL, 0);
//buffer malloc bad?
void* buffer = malloc(total_num_bytes);
memset(buffer, 0, total_num_bytes);
printf("QF bytes: %llu\n", total_num_bytes);
if (buffer == NULL) {
perror("Couldn't allocate memory for the CQF.");
exit(EXIT_FAILURE);
}
qf->runtimedata = (qfruntime*)calloc(sizeof(qfruntime), 1);
if (qf->runtimedata == NULL) {
perror("Couldn't allocate memory for runtime data.");
exit(EXIT_FAILURE);
}
uint64_t init_size = qf_init(qf, nslots, key_bits, value_bits, hash, seed,
buffer, total_num_bytes);
if (init_size == total_num_bytes)
return total_num_bytes;
else
return -1;
}
__host__ bool qf_free(QF *qf)
{
assert(qf->metadata != NULL);
void *buffer = qf_destroy(qf);
if (buffer != NULL) {
free(buffer);
return true;
}
return false;
}
//consolidate all of the device construction into one convenient func!
__host__ void qf_malloc_device(QF** qf, int nbits, bool bulk_config){
//bring in compile #define
int rbits = 8;
int vbits = 0;
QF host_qf;
QF temp_device_qf;
QF* temp_dev_ptr;
uint64_t nslots = 1ULL << nbits;
int num_hash_bits = nbits+rbits;
qf_malloc(&host_qf, nslots, num_hash_bits, vbits, QF_HASH_INVERTIBLE, false, 0);
qf_set_auto_resize(&host_qf, false);
qfruntime* _runtime;
qfmetadata* _metadata;
qfblock* _blocks;
uint16_t * dev_locks;
uint64_t ** buffers;
uint64_t * buffer_sizes;
if (bulk_config){
uint64_t num_locks = host_qf.runtimedata->num_locks;
//allocate 1 lock so that cudaFree doesn't break later
cudaMalloc((void ** )&dev_locks, 1 * sizeof(uint16_t));
//are these 2x necessary?
cudaMalloc((void **) & buffer_sizes, 2*num_locks*sizeof(uint64_t));
cudaMalloc((void **)&buffers, 2*num_locks*sizeof(uint64_t*));
} else {
//point API, multiply locks
cudaMalloc((void ** )&dev_locks, host_qf.runtimedata->num_locks*LOCK_DIST * sizeof(uint16_t));
cudaMemset(dev_locks, 0, host_qf.runtimedata->num_locks*LOCK_DIST * sizeof(uint16_t));
cudaMalloc((void **) & buffer_sizes, 1*sizeof(uint64_t));
cudaMalloc((void **)&buffers, 1*sizeof(uint64_t*));
}
//wipe and replace
free(host_qf.runtimedata->locks);
host_qf.runtimedata->locks = dev_locks;
cudaMalloc((void**)&_runtime, sizeof(qfruntime));
cudaMalloc((void**)&_metadata, sizeof(qfmetadata));
cudaMalloc((void**)&_blocks, qf_get_total_size_in_bytes(&host_qf));
//uint64_t num_locks = host_qf.runtimedata->num_locks;
//insert these into host_qf so dev qf has access.
//they don't need to be wiped as buffers are reset before every insert.
host_qf.runtimedata->buffers = buffers;
host_qf.runtimedata->buffer_sizes = buffer_sizes;
cudaMemcpy(_runtime, host_qf.runtimedata, sizeof(qfruntime), cudaMemcpyHostToDevice);
cudaMemcpy(_metadata, host_qf.metadata, sizeof(qfmetadata), cudaMemcpyHostToDevice);
cudaMemcpy(_blocks, host_qf.blocks, qf_get_total_size_in_bytes(&host_qf), cudaMemcpyHostToDevice);
temp_device_qf.runtimedata = _runtime;
temp_device_qf.metadata = _metadata;
temp_device_qf.blocks = _blocks;
//this might be buggy
//request to fill the dev ptr with a QF, then copy over, then copy that to qf
cudaMalloc((void **)&temp_dev_ptr, sizeof(QF));
cudaMemcpy(temp_dev_ptr, &temp_device_qf, sizeof(QF), cudaMemcpyHostToDevice);
*qf = temp_dev_ptr;
}
//TODO: make me destroy buffers modifiable
__host__ void qf_destroy_device(QF * qf){
QF * host_qf;
cudaMallocHost((void ** )&host_qf, sizeof(QF));
cudaMemcpy(host_qf, qf, sizeof(QF), cudaMemcpyDeviceToHost);
qfruntime* _runtime;
cudaMallocHost((void **) &_runtime, sizeof(qfruntime));
cudaMemcpy(_runtime, host_qf->runtimedata, sizeof(qfruntime), cudaMemcpyDeviceToHost);
//may need to have _runtimedata shunted into another host object
//ill synchronize before this to double check
assert(_runtime != NULL);
if (_runtime->locks != NULL)
cudaFree(_runtime->locks);
if (_runtime->buffers != NULL){
cudaFree(_runtime->buffers);
cudaFree(_runtime->buffer_sizes);
}
if (_runtime->wait_times != NULL)
cudaFree(_runtime->wait_times);
//this one may break
if (_runtime->f_info.filepath != NULL)
cudaFree(host_qf->runtimedata->f_info.filepath);
cudaFree(host_qf->runtimedata);
cudaFree(host_qf->metadata);
cudaFree(host_qf->blocks);
cudaFreeHost(host_qf);
cudaFreeHost(_runtime);
}
__host__ void qf_copy(QF *dest, const QF *src)
{
DEBUG_CQF("%s\n","Source CQF");
DEBUG_DUMP(src);
memcpy(dest->runtimedata, src->runtimedata, sizeof(qfruntime));
memcpy(dest->metadata, src->metadata, sizeof(qfmetadata));
memcpy(dest->blocks, src->blocks, src->metadata->total_size_in_bytes);
DEBUG_CQF("%s\n","Destination CQF after copy.");
DEBUG_DUMP(dest);
}
__host__ void qf_reset(QF *qf)
{
qf->metadata->nelts = 0;
qf->metadata->ndistinct_elts = 0;
qf->metadata->noccupied_slots = 0;
#ifdef LOG_WAIT_TIME
memset(qf->wait_times, 0,
(qf->runtimedata->num_locks+1)*sizeof(wait_time_data));
#endif
#if QF_BITS_PER_SLOT == 8 || QF_BITS_PER_SLOT == 16 || QF_BITS_PER_SLOT == 32 || QF_BITS_PER_SLOT == 64
memset(qf->blocks, 0, qf->metadata->nblocks* sizeof(qfblock));
#else
memset(qf->blocks, 0, qf->metadata->nblocks*(sizeof(qfblock) + QF_SLOTS_PER_BLOCK *
qf->metadata->bits_per_slot / 8));
#endif
}
__host__ int64_t qf_resize_malloc(QF *qf, uint64_t nslots)
{
QF new_qf;
if (!qf_malloc(&new_qf, nslots, qf->metadata->key_bits,
qf->metadata->value_bits, qf->metadata->hash_mode,
false, qf->metadata->seed))
return -1;
if (qf->runtimedata->auto_resize) qf_set_auto_resize(&new_qf, true);
// copy keys from qf into new_qf
QFi qfi;
qf_iterator_from_position(qf, &qfi, 0);
int64_t ret_numkeys = 0;
do {
uint64_t key, value, count;
qfi_get_hash(&qfi, &key, &value, &count);
qfi_next(&qfi);
int ret = qf_insert(&new_qf, key, value, count, QF_NO_LOCK | QF_KEY_IS_HASH);
if (ret < 0) {
printf("Failed to insert key: %ld into the new CQF.\n", key);
return ret;
}
ret_numkeys++;
} while(!qfi_end(&qfi));
qf_free(qf);
memcpy(qf, &new_qf, sizeof(QF));
return ret_numkeys;
}
uint64_t qf_resize(QF* qf, uint64_t nslots, void* buffer, uint64_t buffer_len)
{
printf("QF attempting resize - This will fail\n");
QF new_qf;
new_qf.runtimedata = (qfruntime *)calloc(sizeof(qfruntime), 1);
if (new_qf.runtimedata == NULL) {
perror("Couldn't allocate memory for runtime data.\n");
exit(EXIT_FAILURE);
}
uint64_t init_size = qf_init(&new_qf, nslots, qf->metadata->key_bits,
qf->metadata->value_bits,
qf->metadata->hash_mode, qf->metadata->seed,
buffer, buffer_len);
if (init_size > buffer_len)
return init_size;
if (qf->runtimedata->auto_resize)
qf_set_auto_resize(&new_qf, true);
// copy keys from qf into new_qf
QFi qfi;
qf_iterator_from_position(qf, &qfi, 0);
do {
uint64_t key, value, count;
qfi_get_hash(&qfi, &key, &value, &count);
qfi_next(&qfi);
int ret = qf_insert(&new_qf, key, value, count, QF_NO_LOCK | QF_KEY_IS_HASH);
if (ret < 0) {
printf("Failed to insert key: %ld into the new CQF.\n", key);
abort(); // kill kernel with error
}
} while(!qfi_end(&qfi));
qf_free(qf);
memcpy(qf, &new_qf, sizeof(QF));
return init_size;
}
__host__ void qf_set_auto_resize(QF* qf, bool enabled)
{
if (enabled)
qf->runtimedata->auto_resize = 1;
else
qf->runtimedata->auto_resize = 0;
}
__host__ __device__ qf_returns qf_insert_not_exists(QF *qf, uint64_t key, uint64_t value, uint64_t count, uint8_t
flags, uint8_t * retvalue)
{
// We fill up the CQF up to 95% load factor.
// This is a very conservative check.
//TODO: GPU resizing
/*
if (qf_get_num_occupied_slots(qf) >= qf->metadata->nslots * 0.95) {
if (qf->runtimedata->auto_resize) {
fprintf(stdout, "Resizing the CQF.\n");
if (qf->runtimedata->container_resize(qf, qf->metadata->nslots * 2) < 0)
{
fprintf(stderr, "Resizing the failed.\n");
return QF_FULL;
}
} else
return QF_FULL;
}
*/
// if (count == 0)
// return 0;
if (GET_KEY_HASH(flags) != QF_KEY_IS_HASH) {
if (qf->metadata->hash_mode == QF_HASH_DEFAULT)
key = MurmurHash64A(((void *)&key), sizeof(key), qf->metadata->seed) % qf->metadata->range;
else if (qf->metadata->hash_mode == QF_HASH_INVERTIBLE)
key = hash_64(key, BITMASK(qf->metadata->key_bits));
}
uint64_t hash = (key << qf->metadata->value_bits) | (value & BITMASK(qf->metadata->value_bits));
//printf("Inside insert, new hash is recorded as %llu\n", hash);
qf_returns ret;
if (count == 1)
ret = insert1_if_not_exists(qf, hash, retvalue);
//for now count is always 1
//else
//ret = insert(qf, hash, count, flags);
// check for fullness based on the distance from the home slot to the slot
// in which the key is inserted
/*
if (ret == QF_FULL || ret > DISTANCE_FROM_HOME_SLOT_CUTOFF) {
float load_factor = qf_get_num_occupied_slots(qf) /
(float)qf->metadata->nslots;
fprintf(stdout, "Load factor: %lf\n", load_factor);
if (qf->runtimedata->auto_resize) {
fprintf(stdout, "Resizing the CQF.\n");
if (qf->runtimedata->container_resize(qf, qf->metadata->nslots * 2) > 0)
{
if (ret == QF_FULL) {
if (count == 1)
ret = insert1(qf, hash, flags);
else
ret = insert(qf, hash, count, flags);
}
fprintf(stderr, "Resize finished.\n");
} else {
fprintf(stderr, "Resize failed\n");
ret = QF_FULL;
}
} else {
fprintf(stderr, "The CQF is filling up.\n");
ret = QF_FULL;
}
}
*/
return ret;
}
__device__ qf_returns qf_insert_not_exists_cooperative(QF *qf, uint64_t key, uint64_t value, uint64_t count, uint8_t
flags, uint8_t * retvalue, int warpID)
{
// We fill up the CQF up to 95% load factor.
// This is a very conservative check.
//TODO: GPU resizing
/*
if (qf_get_num_occupied_slots(qf) >= qf->metadata->nslots * 0.95) {
if (qf->runtimedata->auto_resize) {
fprintf(stdout, "Resizing the CQF.\n");
if (qf->runtimedata->container_resize(qf, qf->metadata->nslots * 2) < 0)
{
fprintf(stderr, "Resizing the failed.\n");
return QF_FULL;
}
} else
return QF_FULL;
}
*/
// if (count == 0)
// return 0;
if (GET_KEY_HASH(flags) != QF_KEY_IS_HASH) {
if (qf->metadata->hash_mode == QF_HASH_DEFAULT)
key = MurmurHash64A(((void *)&key), sizeof(key), qf->metadata->seed) % qf->metadata->range;
else if (qf->metadata->hash_mode == QF_HASH_INVERTIBLE)
key = hash_64(key, BITMASK(qf->metadata->key_bits));
}
uint64_t hash = (key << qf->metadata->value_bits) | (value & BITMASK(qf->metadata->value_bits));
//printf("Inside insert, new hash is recorded as %llu\n", hash);
qf_returns ret;
if (count == 1)
ret = insert1_if_not_exists_cooperative(qf, hash, retvalue, warpID);
//for now count is always 1
//else
//ret = insert(qf, hash, count, flags);
// check for fullness based on the distance from the home slot to the slot
// in which the key is inserted
/*
if (ret == QF_FULL || ret > DISTANCE_FROM_HOME_SLOT_CUTOFF) {
float load_factor = qf_get_num_occupied_slots(qf) /
(float)qf->metadata->nslots;
fprintf(stdout, "Load factor: %lf\n", load_factor);
if (qf->runtimedata->auto_resize) {
fprintf(stdout, "Resizing the CQF.\n");
if (qf->runtimedata->container_resize(qf, qf->metadata->nslots * 2) > 0)
{
if (ret == QF_FULL) {
if (count == 1)
ret = insert1(qf, hash, flags);
else
ret = insert(qf, hash, count, flags);
}
fprintf(stderr, "Resize finished.\n");
} else {
fprintf(stderr, "Resize failed\n");
ret = QF_FULL;
}
} else {
fprintf(stderr, "The CQF is filling up.\n");
ret = QF_FULL;
}
}
*/
return ret;
}
__host__ __device__ qf_returns qf_insert(QF *qf, uint64_t key, uint64_t value, uint64_t count, uint8_t
flags)
{
// We fill up the CQF up to 95% load factor.
// This is a very conservative check.
//TODO: GPU resizing
/*
if (qf_get_num_occupied_slots(qf) >= qf->metadata->nslots * 0.95) {
if (qf->runtimedata->auto_resize) {
fprintf(stdout, "Resizing the CQF.\n");
if (qf->runtimedata->container_resize(qf, qf->metadata->nslots * 2) < 0)
{
fprintf(stderr, "Resizing the failed.\n");
return QF_FULL;
}
} else
return QF_FULL;
}
*/
if (count == 0)
return QF_ITEM_INSERTED;
if (GET_KEY_HASH(flags) != QF_KEY_IS_HASH) {
if (qf->metadata->hash_mode == QF_HASH_DEFAULT)
key = MurmurHash64A(((void *)&key), sizeof(key), qf->metadata->seed) % qf->metadata->range;
else if (qf->metadata->hash_mode == QF_HASH_INVERTIBLE)
key = hash_64(key, BITMASK(qf->metadata->key_bits));
}
uint64_t hash = (key << qf->metadata->value_bits) | (value & BITMASK(qf->metadata->value_bits));
//printf("Inside insert, new hash is recorded as %llu\n", hash);
qf_returns ret;
if (count == 1){
ret = insert1(qf, hash, flags);
}
else {
ret = insert(qf, hash, count, flags);
}
// check for fullness based on the distance from the home slot to the slot
// in which the key is inserted
/*
if (ret == QF_FULL || ret > DISTANCE_FROM_HOME_SLOT_CUTOFF) {
float load_factor = qf_get_num_occupied_slots(qf) /
(float)qf->metadata->nslots;
fprintf(stdout, "Load factor: %lf\n", load_factor);
if (qf->runtimedata->auto_resize) {
fprintf(stdout, "Resizing the CQF.\n");
if (qf->runtimedata->container_resize(qf, qf->metadata->nslots * 2) > 0)
{
if (ret == QF_FULL) {
if (count == 1)
ret = insert1(qf, hash, flags);
else
ret = insert(qf, hash, count, flags);
}
fprintf(stderr, "Resize finished.\n");
} else {
fprintf(stderr, "Resize failed\n");
ret = QF_FULL;
}
} else {
fprintf(stderr, "The CQF is filling up.\n");
ret = QF_FULL;
}
}
*/
return ret;
}
/*------------------------
GPU Modifications
--------------------------*/
//approx filter locking code
//locking implementation for the 16 bit locks
//undefined behavior if you try to unlock a not locked lock
__device__ void lock_16(uint16_t * lock, uint64_t index){
uint16_t zero = 0;
uint16_t one = 1;
while (atomicCAS((uint16_t *) &lock[index*LOCK_DIST], zero, one) != zero)
;
}
__device__ void lock_16_coop(uint16_t * lock, uint64_t index, int warpID){
uint16_t zero = 0;
uint16_t one = 1;
if (warpID ==0){
while (atomicCAS((uint16_t *) &lock[index*LOCK_DIST], zero, one) != zero)
;
}
__syncwarp();
}
__device__ void unlock_16(uint16_t * lock, uint64_t index){
uint16_t zero = 0;
uint16_t one = 1;
atomicCAS((uint16_t *) &lock[index*LOCK_DIST], one, zero);
}
//lock_16 but built to be included as a piece of a while loop
// this is more in line with traditional cuda processing, may increase throughput
__device__ bool try_lock_16(uint16_t * lock, uint64_t index){
uint16_t zero = 0;
uint16_t one = 1;
if (atomicCAS((uint16_t *) &lock[index*LOCK_DIST], zero, one) == zero){
return true;
}
return false;
}
__device__ bool try_lock_16_coop(uint16_t * lock, uint64_t index, int warpID){
uint16_t zero = 0;
uint16_t one = 1;
bool ballot = 0;
if (warpID == 0){
if (atomicCAS((uint16_t *) &lock[index*LOCK_DIST], zero, one) == zero){
ballot = 1;
}
}
ballot = __shfl_sync(0xffffffff, ballot, 0);
return ballot;
}
__device__ __forceinline__ void exchange(uint64_t * arr, uint64_t i, uint64_t j){
uint64_t temp = arr[i];
arr[i] = arr[j];
arr[j] = temp;
//maybe synchthreads?
}
__device__ __forceinline__ void compare(uint64_t * arr, uint64_t i, uint64_t j, bool dir){
if (dir == (arr[i] > arr[j])){
exchange(arr, i, j);
}
}
//return the biggest int of a uint64
__device__ __forceinline__ int biggest_bit(uint64_t n){
return 63 - __clzll((unsigned long long int) n);
}
__device__ __forceinline__ uint64_t biggest_pow_2(uint64_t n){
return 1UL<<biggest_bit(n)-1;
}
__global__ void hash_all(QF* qf, uint64_t* vals, uint64_t* hashes, uint64_t nvals, uint8_t flags) {
uint64_t idx = threadIdx.x + blockDim.x * blockIdx.x;
if (idx >= nvals){
return;
}
uint64_t key = vals[idx];
if (GET_KEY_HASH(flags) != QF_KEY_IS_HASH) {
if (qf->metadata->hash_mode == QF_HASH_DEFAULT)
key = MurmurHash64A(((void *)&key), sizeof(key),
qf->metadata->seed) & (qf->metadata->range - 1);
else if (qf->metadata->hash_mode == QF_HASH_INVERTIBLE)
key = hash_64(key, BITMASK(qf->metadata->key_bits));
}
//uint64_t hash = (key << qf->metadata->value_bits) | (value & BITMASK(qf->metadata->value_bits));
hashes[idx] = key;
return;
}
//revised work pipeline
// 1) Set all offsets to keys here based on relative offset + keys - skips the launch call later - TODO: double check that (keys + offset) - keys == offset. -- cpp says this works
// 2) subtract sets of keys from each other to get the relative offsets - these will give offsets, last key needs to subtract from origin pointer
// this means that the keys here are set to point to the START of their bucket
__global__ void set_buffers_binary(QF * qf, uint64_t num_keys, uint64_t * keys, uint8_t flags){
uint64_t idx = threadIdx.x + blockDim.x * blockIdx.x;
if (idx >= qf->runtimedata->num_locks) return;
uint64_t slots_per_lock = NUM_SLOTS_TO_LOCK;
//since we are finding all boundaries, we only need
//printf("idx %llu\n", idx);
//this sounds right? - they divide to go back so I think this is fine
uint64_t boundary = (slots_per_lock*idx); //<< qf->metadata->bits_per_slot;
//This is the code I'm stealing that assumption from
//uint64_t hash_bucket_index = hash >> qf->metadata->bits_per_slot;
//uint64_t hash_remainder = hash & BITMASK(qf->metadata->bits_per_slot);
//uint64_t lock_index = hash_bucket_index / slots_per_lock;
uint64_t lower = 0;
uint64_t upper = num_keys;
uint64_t index = upper-lower;
//upper is non inclusive bound
//if we exceed bounds that's our index
while (upper != lower){
index = lower + (upper - lower)/2;
if ((keys[index] >> qf->metadata->bits_per_slot) < boundary){
//false - the list before this point can be removed
lower = index+1;
//jump to a new midpoint
} else if (index==0){
//will this fix? otherwise need to patch via round up
upper = index;
} else if ((keys[index-1] >> qf->metadata->bits_per_slot) < boundary) {
//set index! this is the first instance where I am valid and the next isnt
//buffers[idx] = keys+index;
break;
} else {
//we are too far right, all keys to the right do not matter
upper = index;
}
}
//we either exited or have an edge condition:
//upper == lower iff 0 or max key
index = lower + (upper - lower)/2;
qf->runtimedata->buffers[idx] = keys + index;
}
__global__ void find_clusters(QF* qf, uint64_t * cluster_lengths, uint64_t * max_clusters){
uint64_t tid = threadIdx.x+blockIdx.x*blockDim.x;
if (tid != 0) return;
uint64_t start_slot = 0;
uint64_t i =0;
while (start_slot < qf->metadata->nslots){
uint64_t old_start = start_slot;
start_slot = find_first_empty_slot(qf, start_slot);
if (start_slot == old_start){
start_slot++;
} else {
cluster_lengths[i] = start_slot-old_start;
i++;
}
}
max_clusters[0] = i;
}
//this can maybe be rolled into set_buffers_binary
//it performs an identical set of operations that are O(1) here
// O(log n) there, but maybe amortized
__global__ void set_buffer_lens(QF * qf, uint64_t num_keys, uint64_t * keys){
uint64_t num_buffers = qf->runtimedata->num_locks;
uint64_t idx = threadIdx.x + blockDim.x*blockIdx.x;
if (idx >= num_buffers) return;
uint64_t** buffers = qf->runtimedata->buffers;
uint64_t * buffer_sizes = qf->runtimedata->buffer_sizes;
//only 1 thread will diverge - should be fine - any cost already exists because of tail
if (idx != num_buffers-1){
//this should work? not 100% convinced but it seems ok
buffer_sizes[idx] = buffers[idx+1] - buffers[idx];
} else {
buffer_sizes[idx] = num_keys - (buffers[idx] - keys);
}
return;
}
//insert from buffers using prehashed_data
__global__ void insert_from_buffers_hashed(QF* qf, uint64_t evenness){
//uint64_t num_buffers, uint64_t** buffers, volatile uint64_t * buffer_counts;
uint64_t idx = 2*(threadIdx.x + blockDim.x * blockIdx.x)+evenness;
if (idx >= qf->runtimedata->num_locks) return;
uint64_t * buffer_counts = qf->runtimedata->buffer_sizes;
uint64_t ** buffers = qf->runtimedata->buffers;
//at the start, we sort
//we are exceeding bounds by 1
//quick_sort(buffers[idx], 0, buffer_counts[idx]-1,0);
//no need to sort if empty - this will cause overflow as 0-1 == max_uint
// if (buffer_counts[idx] > 0) {
// quick_sort(buffers[idx], 0, buffer_counts[idx]-1, 0);
// //assert(assert_sorted(buffers[idx], buffer_counts[idx]));
// }
uint64_t my_count = buffer_counts[idx];
for (uint64_t i =0; i < my_count; i++){
int ret = qf_insert(qf, buffers[idx][i], 0, 1, QF_NO_LOCK | QF_KEY_IS_HASH);
//internal threadfence. Bad? actually seems to be fine
//__threadfence();
}
__threadfence();
}
//insert from buffers using prehashed_data
//use warp cooperative operations
__global__ void insert_from_buffers_cooperative(QF* qf, uint64_t evenness){
//uint64_t idx = 2*(threadIdx.x + blockDim.x * blockIdx.x)+evenness;
uint64_t tid = threadIdx.x+blockDim.x*blockIdx.x;
uint64_t itemID = tid / 32;
int warpID = tid % 32;
uint64_t idx = 2*itemID+evenness;
if (idx >= qf->runtimedata->num_locks) return;
uint64_t * buffer_counts = qf->runtimedata->buffer_sizes;
uint64_t ** buffers = qf->runtimedata->buffers;
//at the start, we sort
//we are exceeding bounds by 1
//quick_sort(buffers[idx], 0, buffer_counts[idx]-1,0);
//no need to sort if empty - this will cause overflow as 0-1 == max_uint
// if (buffer_counts[idx] > 0) {
// quick_sort(buffers[idx], 0, buffer_counts[idx]-1, 0);
// //assert(assert_sorted(buffers[idx], buffer_counts[idx]));
// }
//uint64_t - uint64_t should yield offset into vals
//uint64_t absolute_offset = buffers[idx]- buffers;
uint64_t my_count = buffer_counts[idx];
for (uint64_t i =0; i < my_count; i++){
//assert(keys[absolute_offset+i] == buffers[idx][i]);
uint8_t query;
qf_returns ret_val = qf_insert_not_exists_cooperative(qf, buffers[idx][i], 0, 1, QF_NO_LOCK | QF_KEY_IS_HASH, &query, warpID);
#if DEBUG_ASSERTS
assert(ret_val != QF_FULL);
#endif
//internal threadfence. Bad? actually seems to be fine
//__threadfence();
}
__threadfence();
}
__global__ void insert_from_buffers_thrust(QF* qf, uint64_t evenness, uint64_t * keys, uint64_t * vals){
uint64_t idx = 2*(threadIdx.x + blockDim.x * blockIdx.x)+evenness;
if (idx >= qf->runtimedata->num_locks) return;
uint64_t * buffer_counts = qf->runtimedata->buffer_sizes;
uint64_t ** buffers = qf->runtimedata->buffers;
//at the start, we sort
//we are exceeding bounds by 1
//quick_sort(buffers[idx], 0, buffer_counts[idx]-1,0);
//no need to sort if empty - this will cause overflow as 0-1 == max_uint
// if (buffer_counts[idx] > 0) {
// quick_sort(buffers[idx], 0, buffer_counts[idx]-1, 0);
// //assert(assert_sorted(buffers[idx], buffer_counts[idx]));
// }
//uint64_t - uint64_t should yield offset into vals
uint64_t absolute_offset = buffers[idx]- keys;
uint64_t my_count = buffer_counts[idx];
for (uint64_t i =0; i < my_count; i++){
//assert(keys[absolute_offset+i] == buffers[idx][i]);
int ret = qf_insert(qf, buffers[idx][i], 0, vals[absolute_offset+i], QF_NO_LOCK | QF_KEY_IS_HASH);
//internal threadfence. Bad? actually seems to be fine
//__threadfence();
}
__threadfence();
}
//insert from buffers using prehashed_data
__global__ void delete_from_buffers_hashed(QF* qf, uint64_t evenness){
uint64_t idx = 2*(threadIdx.x + blockDim.x * blockIdx.x)+evenness;
if (idx >= qf->runtimedata->num_locks) return;
uint64_t ** buffers = qf->runtimedata->buffers;
uint64_t * buffer_counts = qf->runtimedata->buffer_sizes;
//at the start, we sort
//we are exceeding bounds by 1
//quick_sort(buffers[idx], 0, buffer_counts[idx]-1,0);
//no need to sort if empty - this will cause overflow as 0-1 == max_uint
// if (buffer_counts[idx] > 0) {
// quick_sort(buffers[idx], 0, buffer_counts[idx]-1, 0);
// //assert(assert_sorted(buffers[idx], buffer_counts[idx]));
// }
uint64_t my_count = buffer_counts[idx];
//0 - my count for loop, working backwords should be faster?
for (uint64_t i = my_count; i >=1; i--){
int ret = qf_remove(qf, buffers[idx][i-1], 0, 1, QF_NO_LOCK | QF_KEY_IS_HASH);
//internal threadfence. Bad? actually seems to be fine
//__threadfence();
}
__threadfence();
}
__device__ qf_returns point_insert_not_exists(QF* qf, uint64_t key, uint8_t value, uint8_t& returnedVal, uint8_t flags){
uint8_t query;
if (GET_KEY_HASH(flags) != QF_KEY_IS_HASH) {
if (qf->metadata->hash_mode == QF_HASH_DEFAULT)
key = MurmurHash64A(((void *)&key), sizeof(key), qf->metadata->seed) % qf->metadata->range;
else if (qf->metadata->hash_mode == QF_HASH_INVERTIBLE)
key = hash_64(key, BITMASK(qf->metadata->key_bits));
}
uint64_t hash = key % qf->metadata->range;
uint64_t hash_bucket_index = hash >> qf->metadata->key_remainder_bits;
//uint64_t hash_bucket_index = hash >> qf->metadata->bits_per_slot;
uint64_t lock_index = hash_bucket_index / NUM_SLOTS_TO_LOCK;
//encode extensions outside of the lock
while (true){
if (try_lock_16(qf->runtimedata->locks, lock_index)){
//this can also be a regular lock?
//if (try_lock_16(qf->runtimedata->locks, lock_index+1)){
lock_16(qf->runtimedata->locks, lock_index+1);
qf_returns ret = qf_insert_not_exists(qf, hash, value, 1, QF_NO_LOCK | QF_KEY_IS_HASH, &query);
if (ret == QF_ITEM_FOUND){
returnedVal = query;
}
__threadfence();
unlock_16(qf->runtimedata->locks, lock_index+1);
unlock_16(qf->runtimedata->locks, lock_index);
return ret;
//}
unlock_16(qf->runtimedata->locks, lock_index);
}
}
}
__device__ qf_returns point_insert_not_exists_cooperative(QF* qf, uint64_t key, uint8_t value, uint8_t& returnedVal, uint8_t flags, int warpID){
uint8_t query;
if (GET_KEY_HASH(flags) != QF_KEY_IS_HASH) {
if (qf->metadata->hash_mode == QF_HASH_DEFAULT)
key = MurmurHash64A(((void *)&key), sizeof(key), qf->metadata->seed) % qf->metadata->range;
else if (qf->metadata->hash_mode == QF_HASH_INVERTIBLE)
key = hash_64(key, BITMASK(qf->metadata->key_bits));
}
uint64_t hash = key % qf->metadata->range;
uint64_t hash_bucket_index = hash >> qf->metadata->key_remainder_bits;
//uint64_t hash_bucket_index = hash >> qf->metadata->bits_per_slot;
uint64_t lock_index = hash_bucket_index / NUM_SLOTS_TO_LOCK;
//encode extensions outside of the lock
while (true){
if (try_lock_16_coop(qf->runtimedata->locks, lock_index, warpID)){
//this can also be a regular lock?
//if (try_lock_16(qf->runtimedata->locks, lock_index+1)){
lock_16_coop(qf->runtimedata->locks, lock_index+1, warpID);
qf_returns ret = qf_insert_not_exists_cooperative(qf, hash, value, 1, QF_NO_LOCK | QF_KEY_IS_HASH, &query, warpID);
if (ret == QF_ITEM_FOUND){
returnedVal = query;
}
__threadfence();
if (warpID ==0){
unlock_16(qf->runtimedata->locks, lock_index+1);
unlock_16(qf->runtimedata->locks, lock_index);
}
return ret;
//}
if (warpID ==0) unlock_16(qf->runtimedata->locks, lock_index);
}
}
}
__device__ qf_returns point_insert(QF* qf, uint64_t key, uint8_t value, uint8_t flags){
if (GET_KEY_HASH(flags) != QF_KEY_IS_HASH) {
if (qf->metadata->hash_mode == QF_HASH_DEFAULT)
key = MurmurHash64A(((void *)&key), sizeof(key), qf->metadata->seed) % qf->metadata->range;
else if (qf->metadata->hash_mode == QF_HASH_INVERTIBLE)
key = hash_64(key, BITMASK(qf->metadata->key_bits));
}
uint64_t hash = key % qf->metadata->range;
uint64_t hash_bucket_index = hash >> qf->metadata->key_remainder_bits;
//uint64_t hash_bucket_index = hash >> qf->metadata->bits_per_slot;
uint64_t lock_index = hash_bucket_index / NUM_SLOTS_TO_LOCK;
//encode extensions outside of the lock
while (true){
if (try_lock_16(qf->runtimedata->locks, lock_index)){
//this can also be a regular lock?
//if (try_lock_16(qf->runtimedata->locks, lock_index+1)){
lock_16(qf->runtimedata->locks, lock_index+1);
qf_returns ret = qf_insert(qf, hash, value, 1, QF_NO_LOCK | QF_KEY_IS_HASH);
__threadfence();
unlock_16(qf->runtimedata->locks, lock_index+1);
unlock_16(qf->runtimedata->locks, lock_index);
return ret;
//}
unlock_16(qf->runtimedata->locks, lock_index);
}
}
}
__device__ uint64_t point_query(QF* qf, uint64_t key, uint8_t value, uint8_t& returnedVal, uint8_t flags){
if (GET_KEY_HASH(flags) != QF_KEY_IS_HASH) {
if (qf->metadata->hash_mode == QF_HASH_DEFAULT)
key = MurmurHash64A(((void *)&key), sizeof(key), qf->metadata->seed) % qf->metadata->range;
else if (qf->metadata->hash_mode == QF_HASH_INVERTIBLE)
key = hash_64(key, BITMASK(qf->metadata->key_bits));
}
uint64_t hash = key % qf->metadata->range;
uint64_t query;
uint64_t ret = qf_query(qf, hash, &query, QF_NO_LOCK | QF_KEY_IS_HASH);
returnedVal = query;
return ret;
}
__device__ uint64_t point_query_concurrent(QF* qf, uint64_t key, uint8_t value, uint8_t& returnedVal, uint8_t flags){
if (GET_KEY_HASH(flags) != QF_KEY_IS_HASH) {
if (qf->metadata->hash_mode == QF_HASH_DEFAULT)
key = MurmurHash64A(((void *)&key), sizeof(key), qf->metadata->seed) % qf->metadata->range;
else if (qf->metadata->hash_mode == QF_HASH_INVERTIBLE)
key = hash_64(key, BITMASK(qf->metadata->key_bits));
}
uint64_t hash = key % qf->metadata->range;
uint64_t hash_bucket_index = hash >> qf->metadata->key_remainder_bits;
//uint64_t hash_bucket_index = hash >> qf->metadata->bits_per_slot;
uint64_t lock_index = hash_bucket_index / NUM_SLOTS_TO_LOCK;
//encode extensions outside of the lock
while (true){
if (try_lock_16(qf->runtimedata->locks, lock_index)){
//this can also be a regular lock?
//if (try_lock_16(qf->runtimedata->locks, lock_index+1)){
lock_16(qf->runtimedata->locks, lock_index+1);
uint64_t query;
uint64_t ret = qf_query(qf, hash, &query, QF_NO_LOCK | QF_KEY_IS_HASH);
__threadfence();
unlock_16(qf->runtimedata->locks, lock_index+1);
unlock_16(qf->runtimedata->locks, lock_index);
returnedVal = query;
return ret;
//}
unlock_16(qf->runtimedata->locks, lock_index);
}
}
}
__global__ void point_bulk_get(QF * qf, uint64_t * hashes, uint64_t nitems, uint64_t * counter){
uint64_t tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid >=nitems) return;
uint8_t query;
//point_query(QF* qf, uint64_t key, uint8_t value, uint8_t& returnedVal, uint8_t flags)
if (point_query(qf, hashes[tid] % qf->metadata->range, 0, query, QF_NO_LOCK) ==0){
//on item not found increment
atomicAdd((unsigned long long int *) counter, (unsigned long long int) 1);
}
}
__global__ void point_bulk_get_nocount(QF * qf, uint64_t * hashes, uint64_t nitems){
uint64_t tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid >=nitems) return;
uint8_t query;
//point_query(QF* qf, uint64_t key, uint8_t value, uint8_t& returnedVal, uint8_t flags)
point_query(qf, hashes[tid] % qf->metadata->range, 0, query, QF_NO_LOCK);
}
__global__ void bulk_get_cooperative(QF * qf, uint64_t * hashes, uint64_t nitems, uint64_t * counter){
uint64_t tid = threadIdx.x+blockIdx.x*blockDim.x;
uint64_t itemID = tid /32;
int warpID = tid % 32;
if (itemID >= qf->runtimedata->num_locks) return;
uint64_t * buffer_counts = qf->runtimedata->buffer_sizes;
uint64_t ** buffers = qf->runtimedata->buffers;
//at the start, we sort
//we are exceeding bounds by 1
//quick_sort(buffers[idx], 0, buffer_counts[idx]-1,0);
//no need to sort if empty - this will cause overflow as 0-1 == max_uint
// if (buffer_counts[idx] > 0) {
// quick_sort(buffers[idx], 0, buffer_counts[idx]-1, 0);
// //assert(assert_sorted(buffers[idx], buffer_counts[idx]));
// }
uint64_t my_count = buffer_counts[itemID];
for (uint64_t i =warpID; i < my_count; i+=32){
//int ret = qf_insert(qf, buffers[itemID][i], 0, 1, QF_NO_LOCK | QF_KEY_IS_HASH);
uint8_t query;
if (point_query(qf, buffers[itemID][i] % qf->metadata->range, 0, query, QF_NO_LOCK | QF_KEY_IS_HASH) ==0){
//atomicAdd((unsigned long long int *) counter, (unsigned long long int) 1);
}
//internal threadfence. Bad? actually seems to be fine
//__threadfence();
}
}
__host__ uint64_t cooperative_bulk_get_wrapper(QF * qf, uint64_t * hashes, uint64_t nitems){
auto start = std::chrono::high_resolution_clock::now();
uint64_t key_block_size = 32;
//start with num_locks, get counts
//This is slow, but there isn't a better way to do it
//we'll have to see how this affects performance
uint64_t * dev_num_locks;
cudaMallocManaged((void **)&dev_num_locks, sizeof(uint64_t));
get_dev_nvals<<<1,1>>>(qf, dev_num_locks);
cudaDeviceSynchronize();
uint64_t num_locks = dev_num_locks[0];
cudaFree(dev_num_locks);
uint64_t key_block = (nitems-1)/key_block_size + 1;
//keys are hashed, now need to treat them as hashed in all further functions
hash_all<<<key_block, key_block_size>>>(qf, hashes, hashes, nitems, 0);
thrust::sort(thrust::device, hashes, hashes+nitems);
set_buffers_binary<<<(num_locks-1)/key_block_size+1, key_block_size>>>(qf, nitems, hashes, 0);
set_buffer_lens<<<(num_locks-1)/key_block_size+1, key_block_size>>>(qf, nitems, hashes);
uint64_t * misses;
//this is fine, should never be triggered
cudaMallocManaged((void **)&misses, sizeof(uint64_t));
cudaMemset(misses, 0, sizeof(uint64_t));
cudaDeviceSynchronize();
auto midpoint = std::chrono::high_resolution_clock::now();
const int bulk_block_size = 1024;
bulk_get_cooperative<<<(nitems*32-1)/bulk_block_size+1, bulk_block_size>>>(qf, hashes, nitems, misses);
cudaDeviceSynchronize();
auto end = std::chrono::high_resolution_clock::now();
std::chrono::duration<double> sort_diff = midpoint-start;
std::chrono::duration<double> diff = end-midpoint;
std::cout << "sorted " << nitems << " in " << sort_diff.count() << " seconds\n";
std::cout << "Queried " << nitems << " in " << diff.count() << " seconds\n";
uint64_t output = misses[0];
cudaFree(misses);
return output;
}
__host__ uint64_t point_get_wrapper(QF * qf, uint64_t * hashes, uint64_t nitems){
// uint64_t * misses;
// //this is fine, should never be triggered
// cudaMallocManaged((void **)&misses, sizeof(uint64_t));
// cudaMemset(misses, 0, sizeof(uint64_t));
point_bulk_get_nocount<<<(nitems-1)/512+1, 512>>>(qf, hashes, nitems);
cudaDeviceSynchronize();
// uint64_t toReturn = *misses;
// cudaFree(misses);
// return toReturn;
return 0;
}
__host__ uint64_t point_get_wrapper_fp(QF * qf, uint64_t * hashes, uint64_t nitems){
uint64_t * misses;
//this is fine, should never be triggered
cudaMallocManaged((void **)&misses, sizeof(uint64_t));
cudaMemset(misses, 0, sizeof(uint64_t));
point_bulk_get<<<(nitems-1)/512+1, 512>>>(qf, hashes, nitems, misses);
cudaDeviceSynchronize();
uint64_t toReturn = *misses;
cudaFree(misses);
return toReturn;
//return 0;
}
__global__ void point_bulk_insert(QF * qf, uint64_t * hashes, uint64_t nitems){
uint64_t tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid >=nitems) return;
//#if DROP_ON_RUNEND
point_insert(qf, hashes[tid], 0, 0);
// #else
// assert(point_insert(qf, hashes[tid], 0, 0) != QF_FULL);
// #endif
}
__global__ void point_bulk_insert_cooperative(QF * qf, uint64_t * hashes, uint64_t nitems){
uint64_t itemID = threadIdx.x + blockIdx.x * blockDim.x;
uint64_t tid = itemID / 32;
int warpID = itemID % 32;
if (tid >=nitems) return;
uint8_t retvalue;
assert(point_insert_not_exists_cooperative(qf, hashes[tid], 0, retvalue, 0, warpID) != QF_FULL);
}
//set a uint64_t reference to point at device memory;
__global__ void get_dev_nvals(QF* qf, uint64_t * external_nvals){
uint64_t tid = threadIdx.x + blockIdx.x*blockDim.x;
if (tid >= 1) return;
external_nvals[0] = qf->runtimedata->num_locks;
}
//modified version of buffers_provided - performs an initial bulk hash, should save work over other versions
//note: this DOES modify the given buffer - fine for all versions now
//This variant performs an ititial sort that allows us to save time overall
//as we avoid the atomic count-off and any sort of cross-thread communication
__host__ void bulk_insert(QF* qf, uint64_t nvals, uint64_t* keys, uint8_t flags) {
uint64_t key_block_size = 32;
uint64_t key_block = (nvals -1)/key_block_size + 1;
//start with num_locks, get counts
//This is slow, but there isn't a better way to do it
//we'll have to see how this affects performance
uint64_t * dev_num_locks;
cudaMallocManaged((void **)&dev_num_locks, sizeof(uint64_t));
get_dev_nvals<<<1,1>>>(qf, dev_num_locks);
cudaDeviceSynchronize();
uint64_t num_locks = dev_num_locks[0];
cudaFree(dev_num_locks);
//keys are hashed, now need to treat them as hashed in all further functions
hash_all<<<key_block, key_block_size>>>(qf, keys, keys, nvals, flags);
thrust::sort(thrust::device, keys, keys+nvals);
set_buffers_binary<<<(num_locks-1)/key_block_size+1, key_block_size>>>(qf, nvals, keys, flags);
set_buffer_lens<<<(num_locks-1)/key_block_size+1, key_block_size>>>(qf, nvals, keys);
//insert_from_buffers_hashed_onepass<<<(num_locks-1)/key_block_size+1, key_block_size>>>(qf, num_locks, buffers, buffer_sizes);
//return;
const int bulk_block_size = 32;
uint64_t evenness = 0;
insert_from_buffers_hashed<<<(num_locks-1)/bulk_block_size+1, bulk_block_size>>>(qf, evenness);
evenness = 1;
insert_from_buffers_hashed<<<(num_locks-1)/bulk_block_size+1, bulk_block_size>>>(qf, evenness);
}
__host__ void bulk_insert_cooperative(QF* qf, uint64_t nvals, uint64_t* keys, uint8_t flags) {
uint64_t key_block_size = 32;
uint64_t key_block = (nvals -1)/key_block_size + 1;
//start with num_locks, get counts
//This is slow, but there isn't a better way to do it
//we'll have to see how this affects performance
uint64_t * dev_num_locks;
cudaMallocManaged((void **)&dev_num_locks, sizeof(uint64_t));
get_dev_nvals<<<1,1>>>(qf, dev_num_locks);
cudaDeviceSynchronize();
uint64_t num_locks = dev_num_locks[0];
cudaFree(dev_num_locks);
//keys are hashed, now need to treat them as hashed in all further functions
hash_all<<<key_block, key_block_size>>>(qf, keys, keys, nvals, flags);
thrust::sort(thrust::device, keys, keys+nvals);
set_buffers_binary<<<(num_locks-1)/key_block_size+1, key_block_size>>>(qf, nvals, keys, flags);
set_buffer_lens<<<(num_locks-1)/key_block_size+1, key_block_size>>>(qf, nvals, keys);
//insert_from_buffers_hashed_onepass<<<(num_locks-1)/key_block_size+1, key_block_size>>>(qf, num_locks, buffers, buffer_sizes);
//return;
uint64_t evenness = 0;
insert_from_buffers_cooperative<<<(32*num_locks-1)/key_block_size+1, key_block_size>>>(qf, evenness);
evenness = 1;
insert_from_buffers_cooperative<<<(32*num_locks-1)/key_block_size+1, key_block_size>>>(qf, evenness);
}
//modified version of buffers_provided - performs an initial bulk hash, should save work over other versions
//note: this DOES modify the given buffer - fine for all versions now
//This variant performs an ititial sort that allows us to save time overall
//as we avoid the atomic count-off and any sort of cross-thread communication
__host__ void bulk_insert_reduce(QF* qf, uint64_t nvals, uint64_t* keys, uint8_t flags) {
uint64_t key_block_size = 32;
uint64_t key_block = (nvals -1)/key_block_size + 1;
//start with num_locks, get counts
//This is slow, but there isn't a better way to
uint64_t * dev_num_locks;
cudaMallocManaged((void **)&dev_num_locks, sizeof(uint64_t));
get_dev_nvals<<<1,1>>>(qf, dev_num_locks);
cudaDeviceSynchronize();
uint64_t num_locks = dev_num_locks[0];
cudaFree(dev_num_locks);
//keys are hashed, now need to treat them as hashed in all further functions
hash_all<<<key_block, key_block_size>>>(qf, keys, keys, nvals, flags);
thrust::sort(thrust::device, keys, keys+nvals);
thrust::device_ptr<uint64_t> keys_ptr(keys);
thrust::device_ptr<uint64_t> dupe_counts= thrust::device_malloc<uint64_t>(nvals);
thrust::fill(dupe_counts, dupe_counts+nvals, 1);
thrust::device_ptr<uint64_t> thrust_keys = thrust::device_malloc<uint64_t>(nvals);
thrust::device_ptr <uint64_t> thrust_vals = thrust::device_malloc<uint64_t>(nvals);
thrust::pair<thrust::device_ptr<uint64_t>,thrust::device_ptr<uint64_t>> new_end;
new_end = thrust::reduce_by_key(thrust::device, keys_ptr, keys_ptr+nvals, dupe_counts, thrust_keys, thrust_vals);
uint64_t new_nvals = new_end.first - thrust_keys;
printf("New nvals %llu\n", new_nvals);
uint64_t * new_keys = thrust::raw_pointer_cast(thrust_keys);
uint64_t * new_key_counts = thrust::raw_pointer_cast(thrust_vals);
//set_buffers_binary<<<(num_locks-1)/key_block_size+1, key_block_size>>>(qf, new_nvals, slots_per_lock, new_keys, num_locks, buffers, flags);
set_buffers_binary<<<(num_locks-1)/key_block_size+1, key_block_size>>>(qf, new_nvals, new_keys, flags);
//set_buffer_lens<<<(num_locks-1)/key_block_size+1, key_block_size>>>(qf, new_nvals, new_keys, num_locks, (uint64_t *) buffer_sizes, buffers);
set_buffer_lens<<<(num_locks-1)/key_block_size+1, key_block_size>>>(qf, new_nvals, new_keys);
//insert_from_buffers_hashed_onepass<<<(num_locks-1)/key_block_size+1, key_block_size>>>(qf, num_locks, buffers, buffer_sizes);
//return;
uint64_t evenness = 0;
insert_from_buffers_thrust<<<(num_locks-1)/key_block_size+1, key_block_size>>>(qf, evenness, new_keys,new_key_counts);
evenness = 1;
insert_from_buffers_thrust<<<(num_locks-1)/key_block_size+1, key_block_size>>>(qf, evenness, new_keys, new_key_counts);
//free resources
thrust::device_free(thrust_keys);
thrust::device_free(thrust_vals);
thrust::device_free(dupe_counts);
}
__host__ void bulk_delete(QF* qf, uint64_t nvals, uint64_t* keys, uint8_t flags) {
uint64_t key_block_size = 32;
uint64_t key_block = (nvals -1)/key_block_size + 1;
//start with num_locks, get counts
//This is slow, but there isn't a better way to
uint64_t * dev_num_locks;
cudaMallocManaged((void **)&dev_num_locks, sizeof(uint64_t));
get_dev_nvals<<<1,1>>>(qf, dev_num_locks);
cudaDeviceSynchronize();
uint64_t num_locks = dev_num_locks[0];
cudaFree(dev_num_locks);
//keys are hashed, now need to treat them as hashed in all further functions
hash_all<<<key_block, key_block_size>>>(qf, keys, keys, nvals, flags);
thrust::sort(thrust::device, keys, keys+nvals);
//set_buffers_binary<<<(num_locks-1)/key_block_size+1, key_block_size>>>(qf, nvals, slots_per_lock, keys, num_locks, buffers, flags);
set_buffers_binary<<<(num_locks-1)/key_block_size+1, key_block_size>>>(qf, nvals, keys, flags);
//set_buffer_lens<<<(num_locks-1)/key_block_size+1, key_block_size>>>(qf, nvals, keys, num_locks, (uint64_t *) buffer_sizes, buffers);
set_buffer_lens<<<(num_locks-1)/key_block_size+1, key_block_size>>>(qf, nvals, keys);
//insert_from_buffers_hashed_onepass<<<(num_locks-1)/key_block_size+1, key_block_size>>>(qf, num_locks, buffers, buffer_sizes);
//return;
uint64_t evenness = 0;
delete_from_buffers_hashed<<<(num_locks-1)/key_block_size+1, key_block_size>>>(qf, evenness);
evenness = 1;
delete_from_buffers_hashed<<<(num_locks-1)/key_block_size+1, key_block_size>>>(qf, evenness);
}
__global__ void bulk_get_nocount(QF * qf, uint64_t * vals, uint64_t nvals, uint64_t key_count, uint8_t flags){
uint64_t tid = threadIdx.x+blockDim.x*blockIdx.x;
if (tid >= nvals) return;
uint64_t count = qf_count_key_value(qf, vals[tid], 0, 0);
return;
}
__global__ void bulk_get_misses(QF * qf, uint64_t * vals, uint64_t nvals, uint64_t key_count, uint64_t * counter, uint8_t flags){
uint64_t tid = threadIdx.x + blockDim.x * blockIdx.x;
//should never happen, but just in case
if (tid >= nvals) return;
uint64_t count = qf_count_key_value(qf, vals[tid], 0, 0);
if (count < key_count) {
atomicAdd((long long unsigned int *)counter, (long long unsigned int) 1);
}
}
__global__ void bulk_get_kernel(QF * qf, uint64_t * vals, uint64_t nvals, uint64_t * returns, uint8_t flags){
uint64_t tid = threadIdx.x + blockDim.x * blockIdx.x;
//should never happen, but just in case
if (tid >= nvals) return;
returns[tid] = qf_count_key_value(qf, vals[tid], 0, flags);
}
__host__ void bulk_get(QF * qf, uint64_t nvals, uint64_t * vals, uint64_t * returns){
bulk_get_kernel<<<(nvals-1)/512+1, 512>>>(qf, vals, nvals, returns, QF_NO_LOCK);
}
__host__ uint64_t bulk_get_misses_wrapper(QF * qf, uint64_t * vals, uint64_t nvals){
uint64_t * misses;
//this is fine, should never be triggered
cudaMallocManaged((void **)&misses, sizeof(uint64_t));
cudaMemset(misses, 0, sizeof(uint64_t));
bulk_get_misses<<<(nvals-1)/512+1, 512>>>(qf, vals, nvals, 1, misses, QF_NO_LOCK);
cudaDeviceSynchronize();
uint64_t toReturn = *misses;
cudaFree(misses);
return toReturn;
//return 0;
}
//this bad boy doesn't check
__host__ uint64_t bulk_get_nocount_wrapper(QF * qf, uint64_t * vals, uint64_t nvals){
bulk_get_nocount<<<(nvals-1)/512+1, 512>>>(qf, vals, nvals, 1, QF_NO_LOCK);
cudaDeviceSynchronize();
return 0;
//return 0;
}
__host__ __device__ int qf_set_count(QF *qf, uint64_t key, uint64_t value, uint64_t count, uint8_t
flags)
{
if (count == 0)
return 0;
uint64_t cur_count = qf_count_key_value(qf, key, value, flags);
int64_t delta = count - cur_count;
int ret;
if (delta == 0)
ret = 0;
else if (delta > 0)
ret = qf_insert(qf, key, value, delta, flags);
else
ret = qf_remove(qf, key, value, labs(delta), flags);
return ret;
}
__host__ __device__ int qf_remove(QF *qf, uint64_t key, uint64_t value, uint64_t count, uint8_t
flags)
{
if (count == 0)
return true;
if (GET_KEY_HASH(flags) != QF_KEY_IS_HASH) {
if (qf->metadata->hash_mode == QF_HASH_DEFAULT)
key = MurmurHash64A(((void *)&key), sizeof(key),
qf->metadata->seed) % qf->metadata->range;
else if (qf->metadata->hash_mode == QF_HASH_INVERTIBLE)
key = hash_64(key, BITMASK(qf->metadata->key_bits));
}
uint64_t hash = (key << qf->metadata->value_bits) | (value &
BITMASK(qf->metadata->value_bits));
return _remove(qf, hash, count, flags);
}
__host__ __device__ int qf_delete_key_value(QF *qf, uint64_t key, uint64_t value, uint8_t flags)
{
uint64_t count = qf_count_key_value(qf, key, value, flags);
if (count == 0)
return true;
if (GET_KEY_HASH(flags) != QF_KEY_IS_HASH) {
if (qf->metadata->hash_mode == QF_HASH_DEFAULT)
key = MurmurHash64A(((void *)&key), sizeof(key),
qf->metadata->seed) % qf->metadata->range;
else if (qf->metadata->hash_mode == QF_HASH_INVERTIBLE)
key = hash_64(key, BITMASK(qf->metadata->key_bits));
}
uint64_t hash = (key << qf->metadata->value_bits) | (value &
BITMASK(qf->metadata->value_bits));
return _remove(qf, hash, count, flags);
}
__host__ __device__ uint64_t qf_count_key_value(const QF *qf, uint64_t key, uint64_t value,
uint8_t flags)
{
if (GET_KEY_HASH(flags) != QF_KEY_IS_HASH) {
if (qf->metadata->hash_mode == QF_HASH_DEFAULT)
key = MurmurHash64A(((void *)&key), sizeof(key),
qf->metadata->seed) % qf->metadata->range;
else if (qf->metadata->hash_mode == QF_HASH_INVERTIBLE)
key = hash_64(key, BITMASK(qf->metadata->key_bits));
}
uint64_t hash = (key << qf->metadata->value_bits) | (value &
BITMASK(qf->metadata->value_bits));
uint64_t hash_remainder = hash & BITMASK(qf->metadata->bits_per_slot);
int64_t hash_bucket_index = hash >> qf->metadata->bits_per_slot;
if (!is_occupied(qf, hash_bucket_index))
return 0;
int64_t runstart_index = hash_bucket_index == 0 ? 0 : run_end(qf,
hash_bucket_index-1)
+ 1;
if (runstart_index < hash_bucket_index)
runstart_index = hash_bucket_index;
/* printf("MC RUNSTART: %02lx RUNEND: %02lx\n", runstart_index, runend_index); */
uint64_t current_remainder, current_count, current_end;
do {
current_end = decode_counter(qf, runstart_index, ¤t_remainder,
¤t_count);
if (current_remainder == hash_remainder)
return current_count;
runstart_index = current_end + 1;
} while (!is_runend(qf, current_end));
return 0;
}
__host__ __device__ uint64_t qf_query(const QF *qf, uint64_t key, uint64_t *value, uint8_t flags)
{
if (GET_KEY_HASH(flags) != QF_KEY_IS_HASH) {
if (qf->metadata->hash_mode == QF_HASH_DEFAULT)
key = MurmurHash64A(((void *)&key), sizeof(key),
qf->metadata->seed) % qf->metadata->range;
else if (qf->metadata->hash_mode == QF_HASH_INVERTIBLE)
key = hash_64(key, BITMASK(qf->metadata->key_bits));
}
uint64_t hash = key;
uint64_t hash_remainder = hash & BITMASK(qf->metadata->key_remainder_bits);
int64_t hash_bucket_index = hash >> qf->metadata->key_remainder_bits;
if (!is_occupied(qf, hash_bucket_index))
return 0;
int64_t runstart_index = hash_bucket_index == 0 ? 0 : run_end(qf,
hash_bucket_index-1)
+ 1;
if (runstart_index < hash_bucket_index)
runstart_index = hash_bucket_index;
/* printf("MC RUNSTART: %02lx RUNEND: %02lx\n", runstart_index, runend_index); */
uint64_t current_remainder, current_count, current_end;
do {
current_end = decode_counter(qf, runstart_index, ¤t_remainder,
¤t_count);
*value = current_remainder & BITMASK(qf->metadata->value_bits);
current_remainder = current_remainder >> qf->metadata->value_bits;
if (current_remainder == hash_remainder) {
return current_count;
}
runstart_index = current_end + 1;
} while (!is_runend(qf, current_end));
return 0;
}
__host__ __device__ int64_t qf_get_unique_index(const QF *qf, uint64_t key, uint64_t value,
uint8_t flags)
{
if (GET_KEY_HASH(flags) != QF_KEY_IS_HASH) {
if (qf->metadata->hash_mode == QF_HASH_DEFAULT)
key = MurmurHash64A(((void *)&key), sizeof(key),
qf->metadata->seed) % qf->metadata->range;
else if (qf->metadata->hash_mode == QF_HASH_INVERTIBLE)
key = hash_64(key, BITMASK(qf->metadata->key_bits));
}
uint64_t hash = (key << qf->metadata->value_bits) | (value &
BITMASK(qf->metadata->value_bits));
uint64_t hash_remainder = hash & BITMASK(qf->metadata->bits_per_slot);
int64_t hash_bucket_index = hash >> qf->metadata->bits_per_slot;
if (!is_occupied(qf, hash_bucket_index))
return QF_DOESNT_EXIST;
int64_t runstart_index = hash_bucket_index == 0 ? 0 : run_end(qf,
hash_bucket_index-1)
+ 1;
if (runstart_index < hash_bucket_index)
runstart_index = hash_bucket_index;
/* printf("MC RUNSTART: %02lx RUNEND: %02lx\n", runstart_index, runend_index); */
uint64_t current_remainder, current_count, current_end;
do {
current_end = decode_counter(qf, runstart_index, ¤t_remainder,
¤t_count);
if (current_remainder == hash_remainder)
return runstart_index;
runstart_index = current_end + 1;
} while (!is_runend(qf, current_end));
return QF_DOESNT_EXIST;
}
enum qf_hashmode qf_get_hashmode(const QF *qf) {
return qf->metadata->hash_mode;
}
uint64_t qf_get_hash_seed(const QF *qf) {
return qf->metadata->seed;
}
__uint64_t qf_get_hash_range(const QF *qf) {
return qf->metadata->range;
}
bool qf_is_auto_resize_enabled(const QF *qf) {
if (qf->runtimedata->auto_resize == 1)
return true;
return false;
}
uint64_t qf_get_total_size_in_bytes(const QF *qf) {
return qf->metadata->total_size_in_bytes;
}
uint64_t qf_get_nslots(const QF *qf) {
return qf->metadata->nslots;
}
uint64_t qf_get_num_occupied_slots(const QF *qf) {
pc_sync(&qf->runtimedata->pc_noccupied_slots);
return qf->metadata->noccupied_slots;
}
uint64_t qf_get_num_key_bits(const QF *qf) {
return qf->metadata->key_bits;
}
uint64_t qf_get_num_value_bits(const QF *qf) {
return qf->metadata->value_bits;
}
uint64_t qf_get_num_key_remainder_bits(const QF *qf) {
return qf->metadata->key_remainder_bits;
}
uint64_t qf_get_bits_per_slot(const QF *qf) {
return qf->metadata->bits_per_slot;
}
uint64_t qf_get_sum_of_counts(const QF *qf) {
pc_sync(&qf->runtimedata->pc_nelts);
return qf->metadata->nelts;
}
uint64_t qf_get_num_distinct_key_value_pairs(const QF *qf) {
pc_sync(&qf->runtimedata->pc_ndistinct_elts);
return qf->metadata->ndistinct_elts;
}
void qf_sync_counters(const QF *qf) {
pc_sync(&qf->runtimedata->pc_ndistinct_elts);
pc_sync(&qf->runtimedata->pc_nelts);
pc_sync(&qf->runtimedata->pc_noccupied_slots);
}
/* initialize the iterator at the run corresponding
* to the position index
*/
int64_t qf_iterator_from_position(const QF *qf, QFi *qfi, uint64_t position)
{
if (position == 0xffffffffffffffff) {
qfi->current = 0xffffffffffffffff;
qfi->qf = qf;
return QFI_INVALID;
}
assert(position < qf->metadata->nslots);
if (!is_occupied(qf, position)) {
uint64_t block_index = position;
uint64_t idx = bitselect(get_block(qf, block_index)->occupieds[0], 0);
if (idx == 64) {
while(idx == 64 && block_index < qf->metadata->nblocks) {
block_index++;
idx = bitselect(get_block(qf, block_index)->occupieds[0], 0);
}
}
position = block_index * QF_SLOTS_PER_BLOCK + idx;
}
qfi->qf = qf;
qfi->num_clusters = 0;
qfi->run = position;
qfi->current = position == 0 ? 0 : run_end(qfi->qf, position-1) + 1;
if (qfi->current < position)
qfi->current = position;
#ifdef LOG_CLUSTER_LENGTH
qfi->c_info = (cluster_data* )calloc(qf->metadata->nslots/32,
sizeof(cluster_data));
if (qfi->c_info == NULL) {
perror("Couldn't allocate memory for c_info.");
exit(EXIT_FAILURE);
}
qfi->cur_start_index = position;
qfi->cur_length = 1;
#endif
if (qfi->current >= qf->metadata->nslots)
return QFI_INVALID;
return qfi->current;
}
int64_t qf_iterator_from_key_value(const QF *qf, QFi *qfi, uint64_t key,
uint64_t value, uint8_t flags)
{
if (key >= qf->metadata->range) {
qfi->current = 0xffffffffffffffff;
qfi->qf = qf;
return QFI_INVALID;
}
qfi->qf = qf;
qfi->num_clusters = 0;
if (GET_KEY_HASH(flags) != QF_KEY_IS_HASH) {
if (qf->metadata->hash_mode == QF_HASH_DEFAULT)
key = MurmurHash64A(((void *)&key), sizeof(key),
qf->metadata->seed) % qf->metadata->range;
else if (qf->metadata->hash_mode == QF_HASH_INVERTIBLE)
key = hash_64(key, BITMASK(qf->metadata->key_bits));
}
uint64_t hash = (key << qf->metadata->value_bits) | (value &
BITMASK(qf->metadata->value_bits));
uint64_t hash_remainder = hash & BITMASK(qf->metadata->bits_per_slot);
uint64_t hash_bucket_index = hash >> qf->metadata->bits_per_slot;
bool flag = false;
// If a run starts at "position" move the iterator to point it to the
// smallest key greater than or equal to "hash".
if (is_occupied(qf, hash_bucket_index)) {
uint64_t runstart_index = hash_bucket_index == 0 ? 0 : run_end(qf,
hash_bucket_index-1)
+ 1;
if (runstart_index < hash_bucket_index)
runstart_index = hash_bucket_index;
uint64_t current_remainder, current_count, current_end;
do {
current_end = decode_counter(qf, runstart_index, ¤t_remainder,
¤t_count);
if (current_remainder >= hash_remainder) {
flag = true;
break;
}
runstart_index = current_end + 1;
} while (!is_runend(qf, current_end));
// found "hash" or smallest key greater than "hash" in this run.
if (flag) {
qfi->run = hash_bucket_index;
qfi->current = runstart_index;
}
}
// If a run doesn't start at "position" or the largest key in the run
// starting at "position" is smaller than "hash" then find the start of the
// next run.
if (!is_occupied(qf, hash_bucket_index) || !flag) {
uint64_t position = hash_bucket_index;
assert(position < qf->metadata->nslots);
uint64_t block_index = position / QF_SLOTS_PER_BLOCK;
uint64_t idx = bitselect(get_block(qf, block_index)->occupieds[0], 0);
if (idx == 64) {
while(idx == 64 && block_index < qf->metadata->nblocks) {
block_index++;
idx = bitselect(get_block(qf, block_index)->occupieds[0], 0);
}
}
position = block_index * QF_SLOTS_PER_BLOCK + idx;
qfi->run = position;
qfi->current = position == 0 ? 0 : run_end(qfi->qf, position-1) + 1;
if (qfi->current < position)
qfi->current = position;
}
if (qfi->current >= qf->metadata->nslots)
return QFI_INVALID;
return qfi->current;
}
static int qfi_get(const QFi *qfi, uint64_t *key, uint64_t *value, uint64_t
*count)
{
if (qfi_end(qfi))
return QFI_INVALID;
uint64_t current_remainder, current_count;
decode_counter(qfi->qf, qfi->current, ¤t_remainder, ¤t_count);
*value = current_remainder & BITMASK(qfi->qf->metadata->value_bits);
current_remainder = current_remainder >> qfi->qf->metadata->value_bits;
*key = (qfi->run << qfi->qf->metadata->key_remainder_bits) | current_remainder;
*count = current_count;
return 0;
}
int qfi_get_key(const QFi *qfi, uint64_t *key, uint64_t *value, uint64_t
*count)
{
*key = *value = *count = 0;
int ret = qfi_get(qfi, key, value, count);
if (ret == 0) {
if (qfi->qf->metadata->hash_mode == QF_HASH_DEFAULT) {
*key = 0; *value = 0; *count = 0;
return QF_INVALID;
} else if (qfi->qf->metadata->hash_mode == QF_HASH_INVERTIBLE)
*key = hash_64i(*key, BITMASK(qfi->qf->metadata->key_bits));
}
return ret;
}
int qfi_get_hash(const QFi *qfi, uint64_t *key, uint64_t *value, uint64_t
*count)
{
*key = *value = *count = 0;
return qfi_get(qfi, key, value, count);
}
int qfi_next(QFi *qfi)
{
if (qfi_end(qfi))
return QFI_INVALID;
else {
/* move to the end of the current counter*/
uint64_t current_remainder, current_count;
qfi->current = decode_counter(qfi->qf, qfi->current, ¤t_remainder,
¤t_count);
if (!is_runend(qfi->qf, qfi->current)) {
qfi->current++;
#ifdef LOG_CLUSTER_LENGTH
qfi->cur_length++;
#endif
if (qfi_end(qfi))
return QFI_INVALID;
return 0;
} else {
#ifdef LOG_CLUSTER_LENGTH
/* save to check if the new current is the new cluster. */
uint64_t old_current = qfi->current;
#endif
uint64_t block_index = qfi->run / QF_SLOTS_PER_BLOCK;
uint64_t rank = bitrank(get_block(qfi->qf, block_index)->occupieds[0],
qfi->run % QF_SLOTS_PER_BLOCK);
uint64_t next_run = bitselect(get_block(qfi->qf,
block_index)->occupieds[0],
rank);
if (next_run == 64) {
rank = 0;
while (next_run == 64 && block_index < qfi->qf->metadata->nblocks) {
block_index++;
next_run = bitselect(get_block(qfi->qf, block_index)->occupieds[0],
rank);
}
}
if (block_index == qfi->qf->metadata->nblocks) {
/* set the index values to max. */
qfi->run = qfi->current = qfi->qf->metadata->xnslots;
return QFI_INVALID;
}
qfi->run = block_index * QF_SLOTS_PER_BLOCK + next_run;
qfi->current++;
if (qfi->current < qfi->run)
qfi->current = qfi->run;
#ifdef LOG_CLUSTER_LENGTH
if (qfi->current > old_current + 1) { /* new cluster. */
if (qfi->cur_length > 10) {
qfi->c_info[qfi->num_clusters].start_index = qfi->cur_start_index;
qfi->c_info[qfi->num_clusters].length = qfi->cur_length;
qfi->num_clusters++;
}
qfi->cur_start_index = qfi->run;
qfi->cur_length = 1;
} else {
qfi->cur_length++;
}
#endif
return 0;
}
}
}
bool qfi_end(const QFi *qfi)
{
if (qfi->current >= qfi->qf->metadata->xnslots /*&& is_runend(qfi->qf, qfi->current)*/)
return true;
return false;
}
/*
* Merge qfa and qfb into qfc
*/
/*
* iterate over both qf (qfa and qfb)
* simultaneously
* for each index i
* min(get_value(qfa, ia) < get_value(qfb, ib))
* insert(min, ic)
* increment either ia or ib, whichever is minimum.
*/
void qf_merge(const QF *qfa, const QF *qfb, QF *qfc)
{
QFi qfia, qfib;
qf_iterator_from_position(qfa, &qfia, 0);
qf_iterator_from_position(qfb, &qfib, 0);
if (qfa->metadata->hash_mode != qfc->metadata->hash_mode &&
qfa->metadata->seed != qfc->metadata->seed &&
qfb->metadata->hash_mode != qfc->metadata->hash_mode &&
qfb->metadata->seed != qfc->metadata->seed) {
fprintf(stderr, "Output QF and input QFs do not have the same hash mode or seed.\n");
exit(1);
}
uint64_t keya, valuea, counta, keyb, valueb, countb;
qfi_get_hash(&qfia, &keya, &valuea, &counta);
qfi_get_hash(&qfib, &keyb, &valueb, &countb);
do {
if (keya < keyb) {
qf_insert(qfc, keya, valuea, counta, QF_NO_LOCK | QF_KEY_IS_HASH);
qfi_next(&qfia);
qfi_get_hash(&qfia, &keya, &valuea, &counta);
}
else {
qf_insert(qfc, keyb, valueb, countb, QF_NO_LOCK | QF_KEY_IS_HASH);
qfi_next(&qfib);
qfi_get_hash(&qfib, &keyb, &valueb, &countb);
}
} while(!qfi_end(&qfia) && !qfi_end(&qfib));
if (!qfi_end(&qfia)) {
do {
qfi_get_hash(&qfia, &keya, &valuea, &counta);
qf_insert(qfc, keya, valuea, counta, QF_NO_LOCK | QF_KEY_IS_HASH);
} while(!qfi_next(&qfia));
}
if (!qfi_end(&qfib)) {
do {
qfi_get_hash(&qfib, &keyb, &valueb, &countb);
qf_insert(qfc, keyb, valueb, countb, QF_NO_LOCK | QF_KEY_IS_HASH);
} while(!qfi_next(&qfib));
}
}
/*
* Merge an array of qfs into the resultant QF
*/
void qf_multi_merge(const QF *qf_arr[], int nqf, QF *qfr)
{
int i;
QFi qfi_arr[nqf];
int smallest_idx = 0;
uint64_t smallest_key = UINT64_MAX;
for (i=0; i<nqf; i++) {
if (qf_arr[i]->metadata->hash_mode != qfr->metadata->hash_mode &&
qf_arr[i]->metadata->seed != qfr->metadata->seed) {
fprintf(stderr, "Output QF and input QFs do not have the same hash mode or seed.\n");
exit(1);
}
qf_iterator_from_position(qf_arr[i], &qfi_arr[i], 0);
}
DEBUG_CQF("Merging %d CQFs\n", nqf);
for (i=0; i<nqf; i++) {
DEBUG_CQF("CQF %d\n", i);
DEBUG_DUMP(qf_arr[i]);
}
while (nqf > 1) {
uint64_t keys[nqf];
uint64_t values[nqf];
uint64_t counts[nqf];
for (i=0; i<nqf; i++)
qfi_get_hash(&qfi_arr[i], &keys[i], &values[i], &counts[i]);
do {
smallest_key = UINT64_MAX;
for (i=0; i<nqf; i++) {
if (keys[i] < smallest_key) {
smallest_key = keys[i]; smallest_idx = i;
}
}
qf_insert(qfr, keys[smallest_idx], values[smallest_idx],
counts[smallest_idx], QF_NO_LOCK | QF_KEY_IS_HASH);
qfi_next(&qfi_arr[smallest_idx]);
qfi_get_hash(&qfi_arr[smallest_idx], &keys[smallest_idx],
&values[smallest_idx],
&counts[smallest_idx]);
} while(!qfi_end(&qfi_arr[smallest_idx]));
/* remove the qf that is exhausted from the array */
if (smallest_idx < nqf-1)
memmove(&qfi_arr[smallest_idx], &qfi_arr[smallest_idx+1],
(nqf-smallest_idx-1)*sizeof(qfi_arr[0]));
nqf--;
}
if (!qfi_end(&qfi_arr[0])) {
uint64_t iters = 0;
do {
uint64_t key, value, count;
qfi_get_hash(&qfi_arr[0], &key, &value, &count);
qf_insert(qfr, key, value, count, QF_NO_LOCK | QF_KEY_IS_HASH);
qfi_next(&qfi_arr[0]);
iters++;
} while(!qfi_end(&qfi_arr[0]));
DEBUG_CQF("Num of iterations: %lu\n", iters);
}
DEBUG_CQF("%s", "Final CQF after merging.\n");
DEBUG_DUMP(qfr);
return;
}
/* find cosine similarity between two QFs. */
uint64_t qf_inner_product(const QF *qfa, const QF *qfb)
{
uint64_t acc = 0;
QFi qfi;
const QF *qf_mem, *qf_disk;
if (qfa->metadata->hash_mode != qfb->metadata->hash_mode &&
qfa->metadata->seed != qfb->metadata->seed) {
fprintf(stderr, "Input QFs do not have the same hash mode or seed.\n");
exit(1);
}
// create the iterator on the larger QF.
if (qfa->metadata->total_size_in_bytes > qfb->metadata->total_size_in_bytes)
{
qf_mem = qfb;
qf_disk = qfa;
} else {
qf_mem = qfa;
qf_disk = qfb;
}
qf_iterator_from_position(qf_disk, &qfi, 0);
do {
uint64_t key = 0, value = 0, count = 0;
uint64_t count_mem;
qfi_get_hash(&qfi, &key, &value, &count);
if ((count_mem = qf_count_key_value(qf_mem, key, 0, QF_KEY_IS_HASH)) > 0) {
acc += count*count_mem;
}
} while (!qfi_next(&qfi));
return acc;
}
/* find cosine similarity between two QFs. */
void qf_intersect(const QF *qfa, const QF *qfb, QF *qfr)
{
QFi qfi;
const QF *qf_mem, *qf_disk;
if (qfa->metadata->hash_mode != qfr->metadata->hash_mode &&
qfa->metadata->seed != qfr->metadata->seed &&
qfb->metadata->hash_mode != qfr->metadata->hash_mode &&
qfb->metadata->seed != qfr->metadata->seed) {
fprintf(stderr, "Output QF and input QFs do not have the same hash mode or seed.\n");
exit(1);
}
// create the iterator on the larger QF.
if (qfa->metadata->total_size_in_bytes > qfb->metadata->total_size_in_bytes)
{
qf_mem = qfb;
qf_disk = qfa;
} else {
qf_mem = qfa;
qf_disk = qfb;
}
qf_iterator_from_position(qf_disk, &qfi, 0);
do {
uint64_t key = 0, value = 0, count = 0;
qfi_get_hash(&qfi, &key, &value, &count);
if (qf_count_key_value(qf_mem, key, 0, QF_KEY_IS_HASH) > 0)
qf_insert(qfr, key, value, count, QF_NO_LOCK | QF_KEY_IS_HASH);
} while (!qfi_next(&qfi));
}
|
76c7425538a7d541b1b36616f1ff8a8f3f17a1dd.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// SDSC Summer Institute 2018
// Andreas Goetz ([email protected])
// CUDA program to add two integer numbers on the GPU
//
#include<stdio.h>
//
// CUDA device function that adds two integer numbers
//
__global__ void add(int *a, int *b, int *c){
*c = *a + *b;
}
//
// main program
//
int main(void) {
int h_a, h_b, h_c; // host copies
int *d_a, *d_b, *d_c; // device copies
int size = sizeof(int);
// allocate device memory
hipMalloc((void **)&d_a, size);
hipMalloc((void **)&d_b, size);
hipMalloc((void **)&d_c, size);
// setup input data
h_a = 5;
h_b = 7;
// copy input data to device
hipMemcpy(d_a, &h_a, size, hipMemcpyHostToDevice);
hipMemcpy(d_b, &h_b, size, hipMemcpyHostToDevice);
// launch kernel
hipLaunchKernelGGL(( add), dim3(1),dim3(1), 0, 0, d_a, d_b, d_c);
// copy results back to host
hipMemcpy(&h_c, d_c, size, hipMemcpyDeviceToHost);
// deallocate memory
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
// print results
printf("\n Addition on CPU: %d + %d = %d\n", h_a, h_b, h_a + h_b);
printf("\n Addition on GPU: %d + %d = %d\n\n",h_a, h_b, h_c);
return 0;
}
| 76c7425538a7d541b1b36616f1ff8a8f3f17a1dd.cu | // SDSC Summer Institute 2018
// Andreas Goetz ([email protected])
// CUDA program to add two integer numbers on the GPU
//
#include<stdio.h>
//
// CUDA device function that adds two integer numbers
//
__global__ void add(int *a, int *b, int *c){
*c = *a + *b;
}
//
// main program
//
int main(void) {
int h_a, h_b, h_c; // host copies
int *d_a, *d_b, *d_c; // device copies
int size = sizeof(int);
// allocate device memory
cudaMalloc((void **)&d_a, size);
cudaMalloc((void **)&d_b, size);
cudaMalloc((void **)&d_c, size);
// setup input data
h_a = 5;
h_b = 7;
// copy input data to device
cudaMemcpy(d_a, &h_a, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, &h_b, size, cudaMemcpyHostToDevice);
// launch kernel
add<<<1,1>>>(d_a, d_b, d_c);
// copy results back to host
cudaMemcpy(&h_c, d_c, size, cudaMemcpyDeviceToHost);
// deallocate memory
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
// print results
printf("\n Addition on CPU: %d + %d = %d\n", h_a, h_b, h_a + h_b);
printf("\n Addition on GPU: %d + %d = %d\n\n",h_a, h_b, h_c);
return 0;
}
|
43cf7cb0ca528e378fe488e99e33ec5fe90a61b7.hip | // !!! This is a file automatically generated by hipify!!!
/***************************************************************************
* penguinV: https://github.com/ihhub/penguinV *
* Copyright (C) 2017 - 2022 *
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
* This program is distributed in the hope that it will be useful, *
* but WITHOUT ANY WARRANTY; without even the implied warranty of *
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
* GNU General Public License for more details. *
* *
* You should have received a copy of the GNU General Public License *
* along with this program; if not, write to the *
* Free Software Foundation, Inc., *
* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
***************************************************************************/
#include <algorithm>
#include <assert.h>
#include "cuda_device.cuh"
#include "cuda_helper.cuh"
#include "../penguinv_exception.h"
namespace
{
#if (_MSC_VER && _MSC_VER >= 1400)
#ifndef thread_local
#define thread_local __declspec(thread)
#endif
#endif
thread_local int defaultDeviceId = 0;
void setDefaultDeviceId( int deviceId )
{
defaultDeviceId = deviceId;
}
int getDefaultDeviceId()
{
return defaultDeviceId;
}
// This class is a helper to remember and restore back
// previous device ID for current thread
class DeviceAutoRestorer
{
public:
DeviceAutoRestorer( int currentDeviceId )
: _currentDeviceId ( currentDeviceId )
, _previousDeviceId( getDefaultDeviceId() )
{
if( _currentDeviceId != _previousDeviceId )
multiCuda::cudaCheck( hipSetDevice( _currentDeviceId ) );
}
~DeviceAutoRestorer()
{
if( _currentDeviceId != _previousDeviceId ) {
multiCuda::cudaCheck( hipSetDevice( _previousDeviceId ) );
setDefaultDeviceId( _previousDeviceId );
}
}
private:
int _currentDeviceId;
int _previousDeviceId;
};
}
namespace multiCuda
{
namespace MemoryManager
{
MemoryAllocator & memory()
{
return CudaDeviceManager::instance().device( getDefaultDeviceId() ).allocator();
}
MemoryAllocator & memory( int deviceId )
{
return CudaDeviceManager::instance().device( deviceId ).allocator();
}
}
CudaDevice::CudaDevice( int deviceId_ )
: _currentStreamId( 0u )
{
if( deviceId_ < 0 )
penguinVException( "Invalid CUDA device ID" );
_deviceId = deviceId_;
cudaCheck( hipGetDeviceProperties( &_deviceProperty, _deviceId ) );
DeviceAutoRestorer restorer( _deviceId );
size_t freeSpace = 0;
size_t totalSpace = 0;
cudaCheck( hipMemGetInfo( &freeSpace, &totalSpace ) );
assert( totalSpace == _deviceProperty.totalGlobalMem );
_allocator = new MemoryAllocator( freeSpace );
_backupDeviceProperty = _deviceProperty;
}
CudaDevice::CudaDevice( const CudaDevice & )
{
}
CudaDevice & CudaDevice::operator=( const CudaDevice & )
{
return (*this);
}
CudaDevice::~CudaDevice()
{
setActive();
delete _allocator;
for( std::vector< CudaStream * >::iterator streamId = _stream.begin(); streamId != _stream.end(); ++streamId )
delete (*streamId);
}
int CudaDevice::deviceId() const
{
return _deviceId;
}
std::string CudaDevice::name() const
{
return _deviceProperty.name;
}
size_t CudaDevice::totalMemorySize() const
{
return _deviceProperty.totalGlobalMem;
}
std::string CudaDevice::computeCapability() const
{
char capability[32];
sprintf( capability, "%d.%d", _deviceProperty.major, _deviceProperty.minor );
return capability;
}
size_t CudaDevice::sharedMemoryPerBlock() const
{
return _deviceProperty.sharedMemPerBlock;
}
uint32_t CudaDevice::threadsPerBlock() const
{
return static_cast<uint32_t>(_deviceProperty.maxThreadsPerBlock);
}
uint32_t CudaDevice::maximumThreadsPerBlock() const
{
return static_cast<uint32_t>(_backupDeviceProperty.maxThreadsPerBlock);
}
dim3 CudaDevice::blockDimension() const
{
return dim3( static_cast<uint32_t>(_deviceProperty.maxThreadsDim[0]),
static_cast<uint32_t>(_deviceProperty.maxThreadsDim[1]),
static_cast<uint32_t>(_deviceProperty.maxThreadsDim[2]) );
}
dim3 CudaDevice::dimensionSize() const
{
return dim3( static_cast<uint32_t>(_deviceProperty.maxGridSize[0]),
static_cast<uint32_t>(_deviceProperty.maxGridSize[1]),
static_cast<uint32_t>(_deviceProperty.maxGridSize[2]) );
}
int CudaDevice::dmaEngineCount() const
{
return _deviceProperty.asyncEngineCount;
}
void CudaDevice::setThreadsPerBlock( uint32_t threadCount )
{
if( (threadCount == 0) || (threadCount % 32) != 0 )
throw penguinVException( "Invalid thread count per block" );
const int threads = static_cast<int>(threadCount);
if( threads <= _backupDeviceProperty.maxThreadsPerBlock )
_deviceProperty.maxThreadsPerBlock = threads;
}
void CudaDevice::synchronize()
{
cudaCheck( hipDeviceSynchronize() );
}
size_t CudaDevice::currentStreamId() const
{
return _currentStreamId;
}
void CudaDevice::setCurrentStreamId( size_t streamId )
{
if( _currentStreamId != streamId && streamId < _stream.size() )
_currentStreamId = streamId;
}
CudaStream & CudaDevice::stream()
{
return *(_stream[_currentStreamId]);
}
const CudaStream & CudaDevice::stream() const
{
return *(_stream[_currentStreamId]);
}
CudaStream & CudaDevice::stream( size_t streamId )
{
return *(_stream[streamId]);
}
const CudaStream & CudaDevice::stream( size_t streamId ) const
{
return *(_stream[streamId]);
}
size_t CudaDevice::streamCount() const
{
return _stream.size();
}
void CudaDevice::setStreamCount( size_t streamCount )
{
if( streamCount != _stream.size() ) {
if( streamCount > _stream.size() ) {
while( streamCount != _stream.size() )
_stream.push_back( new CudaStream() );
}
else {
if( _currentStreamId >= streamCount )
_currentStreamId = 0;
for( std::vector< CudaStream * >::iterator streamId = _stream.begin() + streamCount; streamId != _stream.end(); ++streamId )
delete (*streamId);
_stream.resize( streamCount );
}
}
}
MemoryAllocator & CudaDevice::allocator()
{
return *_allocator;
}
const MemoryAllocator & CudaDevice::allocator() const
{
return *_allocator;
}
void CudaDevice::setActive()
{
cudaCheck( hipSetDevice( _deviceId ) );
setDefaultDeviceId( _deviceId );
}
CudaDeviceManager::CudaDeviceManager()
: _supportedDeviceCount( 0 )
{
int deviceCount = 0;
if( cudaSafeCheck( hipGetDeviceCount( &deviceCount ) ) )
_supportedDeviceCount = deviceCount;
}
CudaDeviceManager::~CudaDeviceManager()
{
closeDevices();
}
CudaDeviceManager & CudaDeviceManager::instance()
{
static CudaDeviceManager manager;
return manager;
}
void CudaDeviceManager::initializeDevices()
{
for( int deviceId = 0; deviceId < _supportedDeviceCount; ++deviceId )
initializeDevice( deviceId );
}
void CudaDeviceManager::initializeDevice( int deviceId )
{
if( deviceId < 0 || deviceId >= _supportedDeviceCount )
throw penguinVException( "System does not contain a device with such ID" );
std::list<CudaDevice *>::const_iterator foundDevice = std::find_if( _device.begin(), _device.end(),
[&deviceId]( const CudaDevice * device ) { return device->deviceId() == deviceId; } );
if( foundDevice == _device.end() )
_device.push_back( new CudaDevice( deviceId ) );
}
void CudaDeviceManager::closeDevice( int deviceId )
{
if( deviceId < 0 || deviceId >= _supportedDeviceCount )
throw penguinVException( "System does not contain a device with such ID" );
std::list<CudaDevice *>::iterator foundDevice = std::find_if( _device.begin(), _device.end(),
[&deviceId]( const CudaDevice * device ) { return device->deviceId() == deviceId; } );
if( foundDevice != _device.end() ) {
delete (*foundDevice);
_device.erase( foundDevice );
}
}
void CudaDeviceManager::closeDevices()
{
for( std::list<CudaDevice *>::iterator device = _device.begin(); device != _device.end(); ++device )
delete (*device);
_device.clear();
}
int CudaDeviceManager::deviceCount() const
{
return static_cast<int>(_device.size()); // CUDA works with signed int rathen than unsigned int :(
}
int CudaDeviceManager::supportedDeviceCount() const
{
return _supportedDeviceCount; // CUDA works with signed int rathen than unsigned int :(
}
CudaDevice & CudaDeviceManager::device()
{
return device( getDefaultDeviceId() );
}
const CudaDevice & CudaDeviceManager::device() const
{
return device( getDefaultDeviceId() );
}
CudaDevice & CudaDeviceManager::device( int deviceId )
{
if( _device.empty() )
throw penguinVException( "Device manager does not contain any devices" );
std::list<CudaDevice *>::iterator foundDevice = std::find_if( _device.begin(), _device.end(),
[&deviceId]( const CudaDevice * cudaDevice ) { return cudaDevice->deviceId() == deviceId; } );
if( foundDevice == _device.end() )
throw penguinVException( "Device ID is invalid. Please check that you initialize devices!" );
return *(*foundDevice);
}
const CudaDevice & CudaDeviceManager::device( int deviceId ) const
{
if( _device.empty() )
throw penguinVException( "Device manager does not contain any devices" );
std::list<CudaDevice *>::const_iterator foundDevice = std::find_if( _device.begin(), _device.end(),
[&deviceId]( const CudaDevice * cudaDevice ) { return cudaDevice->deviceId() == deviceId; } );
if( foundDevice == _device.end() )
throw penguinVException( "Device ID is invalid. Please check that you initialize devices!" );
return *(*foundDevice);
}
void CudaDeviceManager::setActiveDevice( int deviceId )
{
std::list<CudaDevice *>::iterator foundDevice = std::find_if( _device.begin(), _device.end(),
[&deviceId]( const CudaDevice * cudaDevice ) { return cudaDevice->deviceId() == deviceId; } );
if( foundDevice == _device.end() )
throw penguinVException( "Device ID is invalid. Please check that you initialize devices!" );
(*foundDevice)->setActive();
}
CudaStream::CudaStream()
: _id( 0 )
{
cudaCheck( hipStreamCreateWithFlags( &_id, hipStreamNonBlocking ) );
}
CudaStream::~CudaStream()
{
cudaSafeCheck( hipStreamDestroy( _id ) );
}
void CudaStream::synchronize()
{
cudaCheck( hipStreamSynchronize( _id ) );
}
bool CudaStream::isFree()
{
hipError_t error = hipStreamQuery( _id );
if( error == hipSuccess )
return true;
if( error == hipErrorNotReady )
return false;
cudaCheck( error );
return false;
}
hipStream_t CudaStream::id() const
{
return _id;
}
void CudaStream::setCallback( hipStreamCallback_t callbackFunction, void * data )
{
cudaCheck( hipStreamAddCallback( _id, callbackFunction, data, 0 ) );
}
}
| 43cf7cb0ca528e378fe488e99e33ec5fe90a61b7.cu | /***************************************************************************
* penguinV: https://github.com/ihhub/penguinV *
* Copyright (C) 2017 - 2022 *
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
* This program is distributed in the hope that it will be useful, *
* but WITHOUT ANY WARRANTY; without even the implied warranty of *
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
* GNU General Public License for more details. *
* *
* You should have received a copy of the GNU General Public License *
* along with this program; if not, write to the *
* Free Software Foundation, Inc., *
* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
***************************************************************************/
#include <algorithm>
#include <assert.h>
#include "cuda_device.cuh"
#include "cuda_helper.cuh"
#include "../penguinv_exception.h"
namespace
{
#if (_MSC_VER && _MSC_VER >= 1400)
#ifndef thread_local
#define thread_local __declspec(thread)
#endif
#endif
thread_local int defaultDeviceId = 0;
void setDefaultDeviceId( int deviceId )
{
defaultDeviceId = deviceId;
}
int getDefaultDeviceId()
{
return defaultDeviceId;
}
// This class is a helper to remember and restore back
// previous device ID for current thread
class DeviceAutoRestorer
{
public:
DeviceAutoRestorer( int currentDeviceId )
: _currentDeviceId ( currentDeviceId )
, _previousDeviceId( getDefaultDeviceId() )
{
if( _currentDeviceId != _previousDeviceId )
multiCuda::cudaCheck( cudaSetDevice( _currentDeviceId ) );
}
~DeviceAutoRestorer()
{
if( _currentDeviceId != _previousDeviceId ) {
multiCuda::cudaCheck( cudaSetDevice( _previousDeviceId ) );
setDefaultDeviceId( _previousDeviceId );
}
}
private:
int _currentDeviceId;
int _previousDeviceId;
};
}
namespace multiCuda
{
namespace MemoryManager
{
MemoryAllocator & memory()
{
return CudaDeviceManager::instance().device( getDefaultDeviceId() ).allocator();
}
MemoryAllocator & memory( int deviceId )
{
return CudaDeviceManager::instance().device( deviceId ).allocator();
}
}
CudaDevice::CudaDevice( int deviceId_ )
: _currentStreamId( 0u )
{
if( deviceId_ < 0 )
penguinVException( "Invalid CUDA device ID" );
_deviceId = deviceId_;
cudaCheck( cudaGetDeviceProperties( &_deviceProperty, _deviceId ) );
DeviceAutoRestorer restorer( _deviceId );
size_t freeSpace = 0;
size_t totalSpace = 0;
cudaCheck( cudaMemGetInfo( &freeSpace, &totalSpace ) );
assert( totalSpace == _deviceProperty.totalGlobalMem );
_allocator = new MemoryAllocator( freeSpace );
_backupDeviceProperty = _deviceProperty;
}
CudaDevice::CudaDevice( const CudaDevice & )
{
}
CudaDevice & CudaDevice::operator=( const CudaDevice & )
{
return (*this);
}
CudaDevice::~CudaDevice()
{
setActive();
delete _allocator;
for( std::vector< CudaStream * >::iterator streamId = _stream.begin(); streamId != _stream.end(); ++streamId )
delete (*streamId);
}
int CudaDevice::deviceId() const
{
return _deviceId;
}
std::string CudaDevice::name() const
{
return _deviceProperty.name;
}
size_t CudaDevice::totalMemorySize() const
{
return _deviceProperty.totalGlobalMem;
}
std::string CudaDevice::computeCapability() const
{
char capability[32];
sprintf( capability, "%d.%d", _deviceProperty.major, _deviceProperty.minor );
return capability;
}
size_t CudaDevice::sharedMemoryPerBlock() const
{
return _deviceProperty.sharedMemPerBlock;
}
uint32_t CudaDevice::threadsPerBlock() const
{
return static_cast<uint32_t>(_deviceProperty.maxThreadsPerBlock);
}
uint32_t CudaDevice::maximumThreadsPerBlock() const
{
return static_cast<uint32_t>(_backupDeviceProperty.maxThreadsPerBlock);
}
dim3 CudaDevice::blockDimension() const
{
return dim3( static_cast<uint32_t>(_deviceProperty.maxThreadsDim[0]),
static_cast<uint32_t>(_deviceProperty.maxThreadsDim[1]),
static_cast<uint32_t>(_deviceProperty.maxThreadsDim[2]) );
}
dim3 CudaDevice::dimensionSize() const
{
return dim3( static_cast<uint32_t>(_deviceProperty.maxGridSize[0]),
static_cast<uint32_t>(_deviceProperty.maxGridSize[1]),
static_cast<uint32_t>(_deviceProperty.maxGridSize[2]) );
}
int CudaDevice::dmaEngineCount() const
{
return _deviceProperty.asyncEngineCount;
}
void CudaDevice::setThreadsPerBlock( uint32_t threadCount )
{
if( (threadCount == 0) || (threadCount % 32) != 0 )
throw penguinVException( "Invalid thread count per block" );
const int threads = static_cast<int>(threadCount);
if( threads <= _backupDeviceProperty.maxThreadsPerBlock )
_deviceProperty.maxThreadsPerBlock = threads;
}
void CudaDevice::synchronize()
{
cudaCheck( cudaDeviceSynchronize() );
}
size_t CudaDevice::currentStreamId() const
{
return _currentStreamId;
}
void CudaDevice::setCurrentStreamId( size_t streamId )
{
if( _currentStreamId != streamId && streamId < _stream.size() )
_currentStreamId = streamId;
}
CudaStream & CudaDevice::stream()
{
return *(_stream[_currentStreamId]);
}
const CudaStream & CudaDevice::stream() const
{
return *(_stream[_currentStreamId]);
}
CudaStream & CudaDevice::stream( size_t streamId )
{
return *(_stream[streamId]);
}
const CudaStream & CudaDevice::stream( size_t streamId ) const
{
return *(_stream[streamId]);
}
size_t CudaDevice::streamCount() const
{
return _stream.size();
}
void CudaDevice::setStreamCount( size_t streamCount )
{
if( streamCount != _stream.size() ) {
if( streamCount > _stream.size() ) {
while( streamCount != _stream.size() )
_stream.push_back( new CudaStream() );
}
else {
if( _currentStreamId >= streamCount )
_currentStreamId = 0;
for( std::vector< CudaStream * >::iterator streamId = _stream.begin() + streamCount; streamId != _stream.end(); ++streamId )
delete (*streamId);
_stream.resize( streamCount );
}
}
}
MemoryAllocator & CudaDevice::allocator()
{
return *_allocator;
}
const MemoryAllocator & CudaDevice::allocator() const
{
return *_allocator;
}
void CudaDevice::setActive()
{
cudaCheck( cudaSetDevice( _deviceId ) );
setDefaultDeviceId( _deviceId );
}
CudaDeviceManager::CudaDeviceManager()
: _supportedDeviceCount( 0 )
{
int deviceCount = 0;
if( cudaSafeCheck( cudaGetDeviceCount( &deviceCount ) ) )
_supportedDeviceCount = deviceCount;
}
CudaDeviceManager::~CudaDeviceManager()
{
closeDevices();
}
CudaDeviceManager & CudaDeviceManager::instance()
{
static CudaDeviceManager manager;
return manager;
}
void CudaDeviceManager::initializeDevices()
{
for( int deviceId = 0; deviceId < _supportedDeviceCount; ++deviceId )
initializeDevice( deviceId );
}
void CudaDeviceManager::initializeDevice( int deviceId )
{
if( deviceId < 0 || deviceId >= _supportedDeviceCount )
throw penguinVException( "System does not contain a device with such ID" );
std::list<CudaDevice *>::const_iterator foundDevice = std::find_if( _device.begin(), _device.end(),
[&deviceId]( const CudaDevice * device ) { return device->deviceId() == deviceId; } );
if( foundDevice == _device.end() )
_device.push_back( new CudaDevice( deviceId ) );
}
void CudaDeviceManager::closeDevice( int deviceId )
{
if( deviceId < 0 || deviceId >= _supportedDeviceCount )
throw penguinVException( "System does not contain a device with such ID" );
std::list<CudaDevice *>::iterator foundDevice = std::find_if( _device.begin(), _device.end(),
[&deviceId]( const CudaDevice * device ) { return device->deviceId() == deviceId; } );
if( foundDevice != _device.end() ) {
delete (*foundDevice);
_device.erase( foundDevice );
}
}
void CudaDeviceManager::closeDevices()
{
for( std::list<CudaDevice *>::iterator device = _device.begin(); device != _device.end(); ++device )
delete (*device);
_device.clear();
}
int CudaDeviceManager::deviceCount() const
{
return static_cast<int>(_device.size()); // CUDA works with signed int rathen than unsigned int :(
}
int CudaDeviceManager::supportedDeviceCount() const
{
return _supportedDeviceCount; // CUDA works with signed int rathen than unsigned int :(
}
CudaDevice & CudaDeviceManager::device()
{
return device( getDefaultDeviceId() );
}
const CudaDevice & CudaDeviceManager::device() const
{
return device( getDefaultDeviceId() );
}
CudaDevice & CudaDeviceManager::device( int deviceId )
{
if( _device.empty() )
throw penguinVException( "Device manager does not contain any devices" );
std::list<CudaDevice *>::iterator foundDevice = std::find_if( _device.begin(), _device.end(),
[&deviceId]( const CudaDevice * cudaDevice ) { return cudaDevice->deviceId() == deviceId; } );
if( foundDevice == _device.end() )
throw penguinVException( "Device ID is invalid. Please check that you initialize devices!" );
return *(*foundDevice);
}
const CudaDevice & CudaDeviceManager::device( int deviceId ) const
{
if( _device.empty() )
throw penguinVException( "Device manager does not contain any devices" );
std::list<CudaDevice *>::const_iterator foundDevice = std::find_if( _device.begin(), _device.end(),
[&deviceId]( const CudaDevice * cudaDevice ) { return cudaDevice->deviceId() == deviceId; } );
if( foundDevice == _device.end() )
throw penguinVException( "Device ID is invalid. Please check that you initialize devices!" );
return *(*foundDevice);
}
void CudaDeviceManager::setActiveDevice( int deviceId )
{
std::list<CudaDevice *>::iterator foundDevice = std::find_if( _device.begin(), _device.end(),
[&deviceId]( const CudaDevice * cudaDevice ) { return cudaDevice->deviceId() == deviceId; } );
if( foundDevice == _device.end() )
throw penguinVException( "Device ID is invalid. Please check that you initialize devices!" );
(*foundDevice)->setActive();
}
CudaStream::CudaStream()
: _id( 0 )
{
cudaCheck( cudaStreamCreateWithFlags( &_id, cudaStreamNonBlocking ) );
}
CudaStream::~CudaStream()
{
cudaSafeCheck( cudaStreamDestroy( _id ) );
}
void CudaStream::synchronize()
{
cudaCheck( cudaStreamSynchronize( _id ) );
}
bool CudaStream::isFree()
{
cudaError error = cudaStreamQuery( _id );
if( error == cudaSuccess )
return true;
if( error == cudaErrorNotReady )
return false;
cudaCheck( error );
return false;
}
cudaStream_t CudaStream::id() const
{
return _id;
}
void CudaStream::setCallback( cudaStreamCallback_t callbackFunction, void * data )
{
cudaCheck( cudaStreamAddCallback( _id, callbackFunction, data, 0 ) );
}
}
|
4ea0d6529edae79c7ba0dd827a6627fe8e82d225.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include "readppm.c"
#ifdef __APPLE__
#include <GLUT/glut.h>
#include <OpenGL/gl.h>
#else
#include <GL/glut.h>
#endif
#define BLOCKSIZE 16
#define SHAREDMEMSIZE (BLOCKSIZE + 4)
__device__
inline
void
set_pixel(unsigned char *shared_image, unsigned char *image, int shared_index, int image_index)
{
shared_image[shared_index+0] = image[image_index+0];
shared_image[shared_index+1] = image[image_index+1];
shared_image[shared_index+2] = image[image_index+2];
}
__global__ void filter(unsigned char *image, unsigned char *out, int n, int m)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int sumx, sumy, sumz, k, l;
// printf is OK under --device-emulation
// printf("%d %d %d %d\n", i, j, n, m);
if (j < n && i < m)
{
out[(i*n+j)*3+0] = image[(i*n+j)*3+0];
out[(i*n+j)*3+1] = image[(i*n+j)*3+1];
out[(i*n+j)*3+2] = image[(i*n+j)*3+2];
}
if (i > 1 && i < m-2 && j > 1 && j < n-2)
{
// Filter kernel
sumx=0;sumy=0;sumz=0;
for(k=-2;k<3;k++)
for(l=-2;l<3;l++)
{
sumx += image[((i+k)*n+(j+l))*3+0];
sumy += image[((i+k)*n+(j+l))*3+1];
sumz += image[((i+k)*n+(j+l))*3+2];
}
out[(i*n+j)*3+0] = sumx/25;
out[(i*n+j)*3+1] = sumy/25;
out[(i*n+j)*3+2] = sumz/25;
}
}
__global__ void filter_shared(unsigned char *image, unsigned char *out, int n, int m)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int sumx, sumy, sumz, k, l;
// printf is OK under --device-emulation
// printf("%d %d %d %d\n", i, j, n, m);
int local_x = threadIdx.x + 2;
int local_y = threadIdx.y + 2;
int global_index = (j*n+i) * 3;
int local_index = (local_y*SHAREDMEMSIZE+local_x) * 3;
__shared__ unsigned char shared_image[SHAREDMEMSIZE*SHAREDMEMSIZE*3];
if (j < n && i < m)
{
set_pixel(shared_image, image, local_index, global_index);
set_pixel(out, shared_image, global_index, local_index);
}
// Top
if (threadIdx.y == 0 && blockIdx.y != 0)
{
set_pixel(shared_image, image, local_index-(3*SHAREDMEMSIZE), global_index-(3*n));
set_pixel(shared_image, image, local_index-(6*SHAREDMEMSIZE), global_index-(6*n));
// Upper left
if (threadIdx.x == 0 && blockIdx.x != 0)
{
set_pixel(shared_image, image, local_index-(3*SHAREDMEMSIZE)-3, global_index-(3*n)-3);
set_pixel(shared_image, image, local_index-(3*SHAREDMEMSIZE)-6, global_index-(3*n)-6);
set_pixel(shared_image, image, local_index-(6*SHAREDMEMSIZE)-3, global_index-(6*n)-3);
set_pixel(shared_image, image, local_index-(6*SHAREDMEMSIZE)-6, global_index-(6*n)-6);
}
// Upper right
else if (threadIdx.x == blockDim.x - 1 && blockIdx.x != gridDim.x - 1)
{
set_pixel(shared_image, image, local_index-(3*SHAREDMEMSIZE)+3, global_index-(3*n)+3);
set_pixel(shared_image, image, local_index-(3*SHAREDMEMSIZE)+6, global_index-(3*n)+6);
set_pixel(shared_image, image, local_index-(6*SHAREDMEMSIZE)+3, global_index-(6*n)+3);
set_pixel(shared_image, image, local_index-(6*SHAREDMEMSIZE)+6, global_index-(6*n)+6);
}
}
else if (threadIdx.y == blockDim.y - 1 && blockIdx.y != gridDim.y - 1)
{
set_pixel(shared_image, image, local_index+(3*SHAREDMEMSIZE), global_index+(3*n));
set_pixel(shared_image, image, local_index+(6*SHAREDMEMSIZE), global_index+(6*n));
// Lower left
if (threadIdx.x == 0 && blockIdx.x != 0)
{
set_pixel(shared_image, image, local_index+(3*SHAREDMEMSIZE)-3, global_index+(3*n)-3);
set_pixel(shared_image, image, local_index+(3*SHAREDMEMSIZE)-6, global_index+(3*n)-6);
set_pixel(shared_image, image, local_index+(6*SHAREDMEMSIZE)-3, global_index+(6*n)-3);
set_pixel(shared_image, image, local_index+(6*SHAREDMEMSIZE)-6, global_index+(6*n)-6);
}
// Lower right
else if (threadIdx.x == blockDim.x - 1 && blockIdx.x != gridDim.x - 1)
{
set_pixel(shared_image, image, local_index+(3*SHAREDMEMSIZE)+3, global_index+(3*n)+3);
set_pixel(shared_image, image, local_index+(3*SHAREDMEMSIZE)+6, global_index+(3*n)+6);
set_pixel(shared_image, image, local_index+(6*SHAREDMEMSIZE)+3, global_index+(6*n)+3);
set_pixel(shared_image, image, local_index+(6*SHAREDMEMSIZE)+6, global_index+(6*n)+6);
}
}
// Left
if (threadIdx.x == 0 && blockIdx.x != 0)
{
set_pixel(shared_image, image, local_index-3, global_index-3);
set_pixel(shared_image, image, local_index-6, global_index-6);
}
// Right
else if (threadIdx.x == blockDim.x - 1 && blockIdx.x != gridDim.x - 1)
{
set_pixel(shared_image, image, local_index+3, global_index+3);
set_pixel(shared_image, image, local_index+6, global_index+6);
}
__syncthreads();
if (i > 1 && i < m-2 && j > 1 && j < n-2)
{
// Filter kernel
sumx=0;sumy=0;sumz=0;
for(k=-2;k<3;k++)
for(l=-2;l<3;l++)
{
int index = local_index+(3*k*SHAREDMEMSIZE)+3*l;
sumx += shared_image[index+0];
sumy += shared_image[index+1];
sumz += shared_image[index+2];
}
out[global_index+0] = sumx/25;
out[global_index+1] = sumy/25;
out[global_index+2] = sumz/25;
}
}
// Compute CUDA kernel and display image
void Draw()
{
unsigned char *image, *out;
int n, m;
unsigned char *dev_image, *dev_out;
hipEvent_t start_event;
hipEvent_t end_event;
float theTime;
image = readppm("maskros512.ppm", &n, &m);
out = (unsigned char*) malloc(n*m*3);
hipEventCreate(&start_event);
hipEventCreate(&end_event);
hipMalloc( (void**)&dev_image, n*m*3);
hipMalloc( (void**)&dev_out, n*m*3);
hipMemset(dev_out, 0, n*m*3);
hipMemcpy( dev_image, image, n*m*3, hipMemcpyHostToDevice);
dim3 dimBlock( 16, 16 );
dim3 dimGrid( 32, 32 );
dim3 ourBlock( BLOCKSIZE, BLOCKSIZE );
dim3 ourGrid( n / BLOCKSIZE, m / BLOCKSIZE );
hipEventRecord(start_event, 0);
hipEventSynchronize(start_event);
hipLaunchKernelGGL(( filter), dim3(dimGrid), dim3(dimBlock), 0, 0, dev_image, dev_out, n, m);
hipDeviceSynchronize();
hipMemset(dev_out, 0, n*m*3);
hipEventRecord(end_event, 0);
hipEventSynchronize(end_event);
hipEventElapsedTime(&theTime, start_event, end_event);
printf("Slow time: %f ms\n", theTime);
hipEventRecord(start_event, 0);
hipEventSynchronize(start_event);
hipLaunchKernelGGL(( filter_shared), dim3(ourGrid), dim3(ourBlock), 0, 0, dev_image, dev_out, n, m);
hipDeviceSynchronize();
hipEventRecord(end_event, 0);
hipEventSynchronize(end_event);
hipEventElapsedTime(&theTime, start_event, end_event);
printf("The time: %f ms\n", theTime);
hipMemcpy( out, dev_out, n*m*3, hipMemcpyDeviceToHost );
hipFree(dev_image);
hipFree(dev_out);
hipEventDestroy(start_event);
hipEventDestroy(end_event);
// Dump the whole picture onto the screen.
glClearColor( 0.0, 0.0, 0.0, 1.0 );
glClear( GL_COLOR_BUFFER_BIT );
glRasterPos2f(-1, -1);
glDrawPixels( n, m, GL_RGB, GL_UNSIGNED_BYTE, image );
glRasterPos2i(0, -1);
glDrawPixels( n, m, GL_RGB, GL_UNSIGNED_BYTE, out );
glFlush();
}
// Main program, inits
int main( int argc, char** argv)
{
glutInit(&argc, argv);
glutInitDisplayMode( GLUT_SINGLE | GLUT_RGBA );
glutInitWindowSize( 1024, 512 );
glutCreateWindow("CUDA on live GL");
glutDisplayFunc(Draw);
glutMainLoop();
}
| 4ea0d6529edae79c7ba0dd827a6627fe8e82d225.cu |
#include <stdio.h>
#include "readppm.c"
#ifdef __APPLE__
#include <GLUT/glut.h>
#include <OpenGL/gl.h>
#else
#include <GL/glut.h>
#endif
#define BLOCKSIZE 16
#define SHAREDMEMSIZE (BLOCKSIZE + 4)
__device__
inline
void
set_pixel(unsigned char *shared_image, unsigned char *image, int shared_index, int image_index)
{
shared_image[shared_index+0] = image[image_index+0];
shared_image[shared_index+1] = image[image_index+1];
shared_image[shared_index+2] = image[image_index+2];
}
__global__ void filter(unsigned char *image, unsigned char *out, int n, int m)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int sumx, sumy, sumz, k, l;
// printf is OK under --device-emulation
// printf("%d %d %d %d\n", i, j, n, m);
if (j < n && i < m)
{
out[(i*n+j)*3+0] = image[(i*n+j)*3+0];
out[(i*n+j)*3+1] = image[(i*n+j)*3+1];
out[(i*n+j)*3+2] = image[(i*n+j)*3+2];
}
if (i > 1 && i < m-2 && j > 1 && j < n-2)
{
// Filter kernel
sumx=0;sumy=0;sumz=0;
for(k=-2;k<3;k++)
for(l=-2;l<3;l++)
{
sumx += image[((i+k)*n+(j+l))*3+0];
sumy += image[((i+k)*n+(j+l))*3+1];
sumz += image[((i+k)*n+(j+l))*3+2];
}
out[(i*n+j)*3+0] = sumx/25;
out[(i*n+j)*3+1] = sumy/25;
out[(i*n+j)*3+2] = sumz/25;
}
}
__global__ void filter_shared(unsigned char *image, unsigned char *out, int n, int m)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int sumx, sumy, sumz, k, l;
// printf is OK under --device-emulation
// printf("%d %d %d %d\n", i, j, n, m);
int local_x = threadIdx.x + 2;
int local_y = threadIdx.y + 2;
int global_index = (j*n+i) * 3;
int local_index = (local_y*SHAREDMEMSIZE+local_x) * 3;
__shared__ unsigned char shared_image[SHAREDMEMSIZE*SHAREDMEMSIZE*3];
if (j < n && i < m)
{
set_pixel(shared_image, image, local_index, global_index);
set_pixel(out, shared_image, global_index, local_index);
}
// Top
if (threadIdx.y == 0 && blockIdx.y != 0)
{
set_pixel(shared_image, image, local_index-(3*SHAREDMEMSIZE), global_index-(3*n));
set_pixel(shared_image, image, local_index-(6*SHAREDMEMSIZE), global_index-(6*n));
// Upper left
if (threadIdx.x == 0 && blockIdx.x != 0)
{
set_pixel(shared_image, image, local_index-(3*SHAREDMEMSIZE)-3, global_index-(3*n)-3);
set_pixel(shared_image, image, local_index-(3*SHAREDMEMSIZE)-6, global_index-(3*n)-6);
set_pixel(shared_image, image, local_index-(6*SHAREDMEMSIZE)-3, global_index-(6*n)-3);
set_pixel(shared_image, image, local_index-(6*SHAREDMEMSIZE)-6, global_index-(6*n)-6);
}
// Upper right
else if (threadIdx.x == blockDim.x - 1 && blockIdx.x != gridDim.x - 1)
{
set_pixel(shared_image, image, local_index-(3*SHAREDMEMSIZE)+3, global_index-(3*n)+3);
set_pixel(shared_image, image, local_index-(3*SHAREDMEMSIZE)+6, global_index-(3*n)+6);
set_pixel(shared_image, image, local_index-(6*SHAREDMEMSIZE)+3, global_index-(6*n)+3);
set_pixel(shared_image, image, local_index-(6*SHAREDMEMSIZE)+6, global_index-(6*n)+6);
}
}
else if (threadIdx.y == blockDim.y - 1 && blockIdx.y != gridDim.y - 1)
{
set_pixel(shared_image, image, local_index+(3*SHAREDMEMSIZE), global_index+(3*n));
set_pixel(shared_image, image, local_index+(6*SHAREDMEMSIZE), global_index+(6*n));
// Lower left
if (threadIdx.x == 0 && blockIdx.x != 0)
{
set_pixel(shared_image, image, local_index+(3*SHAREDMEMSIZE)-3, global_index+(3*n)-3);
set_pixel(shared_image, image, local_index+(3*SHAREDMEMSIZE)-6, global_index+(3*n)-6);
set_pixel(shared_image, image, local_index+(6*SHAREDMEMSIZE)-3, global_index+(6*n)-3);
set_pixel(shared_image, image, local_index+(6*SHAREDMEMSIZE)-6, global_index+(6*n)-6);
}
// Lower right
else if (threadIdx.x == blockDim.x - 1 && blockIdx.x != gridDim.x - 1)
{
set_pixel(shared_image, image, local_index+(3*SHAREDMEMSIZE)+3, global_index+(3*n)+3);
set_pixel(shared_image, image, local_index+(3*SHAREDMEMSIZE)+6, global_index+(3*n)+6);
set_pixel(shared_image, image, local_index+(6*SHAREDMEMSIZE)+3, global_index+(6*n)+3);
set_pixel(shared_image, image, local_index+(6*SHAREDMEMSIZE)+6, global_index+(6*n)+6);
}
}
// Left
if (threadIdx.x == 0 && blockIdx.x != 0)
{
set_pixel(shared_image, image, local_index-3, global_index-3);
set_pixel(shared_image, image, local_index-6, global_index-6);
}
// Right
else if (threadIdx.x == blockDim.x - 1 && blockIdx.x != gridDim.x - 1)
{
set_pixel(shared_image, image, local_index+3, global_index+3);
set_pixel(shared_image, image, local_index+6, global_index+6);
}
__syncthreads();
if (i > 1 && i < m-2 && j > 1 && j < n-2)
{
// Filter kernel
sumx=0;sumy=0;sumz=0;
for(k=-2;k<3;k++)
for(l=-2;l<3;l++)
{
int index = local_index+(3*k*SHAREDMEMSIZE)+3*l;
sumx += shared_image[index+0];
sumy += shared_image[index+1];
sumz += shared_image[index+2];
}
out[global_index+0] = sumx/25;
out[global_index+1] = sumy/25;
out[global_index+2] = sumz/25;
}
}
// Compute CUDA kernel and display image
void Draw()
{
unsigned char *image, *out;
int n, m;
unsigned char *dev_image, *dev_out;
cudaEvent_t start_event;
cudaEvent_t end_event;
float theTime;
image = readppm("maskros512.ppm", &n, &m);
out = (unsigned char*) malloc(n*m*3);
cudaEventCreate(&start_event);
cudaEventCreate(&end_event);
cudaMalloc( (void**)&dev_image, n*m*3);
cudaMalloc( (void**)&dev_out, n*m*3);
cudaMemset(dev_out, 0, n*m*3);
cudaMemcpy( dev_image, image, n*m*3, cudaMemcpyHostToDevice);
dim3 dimBlock( 16, 16 );
dim3 dimGrid( 32, 32 );
dim3 ourBlock( BLOCKSIZE, BLOCKSIZE );
dim3 ourGrid( n / BLOCKSIZE, m / BLOCKSIZE );
cudaEventRecord(start_event, 0);
cudaEventSynchronize(start_event);
filter<<<dimGrid, dimBlock>>>(dev_image, dev_out, n, m);
cudaThreadSynchronize();
cudaMemset(dev_out, 0, n*m*3);
cudaEventRecord(end_event, 0);
cudaEventSynchronize(end_event);
cudaEventElapsedTime(&theTime, start_event, end_event);
printf("Slow time: %f ms\n", theTime);
cudaEventRecord(start_event, 0);
cudaEventSynchronize(start_event);
filter_shared<<<ourGrid, ourBlock>>>(dev_image, dev_out, n, m);
cudaThreadSynchronize();
cudaEventRecord(end_event, 0);
cudaEventSynchronize(end_event);
cudaEventElapsedTime(&theTime, start_event, end_event);
printf("The time: %f ms\n", theTime);
cudaMemcpy( out, dev_out, n*m*3, cudaMemcpyDeviceToHost );
cudaFree(dev_image);
cudaFree(dev_out);
cudaEventDestroy(start_event);
cudaEventDestroy(end_event);
// Dump the whole picture onto the screen.
glClearColor( 0.0, 0.0, 0.0, 1.0 );
glClear( GL_COLOR_BUFFER_BIT );
glRasterPos2f(-1, -1);
glDrawPixels( n, m, GL_RGB, GL_UNSIGNED_BYTE, image );
glRasterPos2i(0, -1);
glDrawPixels( n, m, GL_RGB, GL_UNSIGNED_BYTE, out );
glFlush();
}
// Main program, inits
int main( int argc, char** argv)
{
glutInit(&argc, argv);
glutInitDisplayMode( GLUT_SINGLE | GLUT_RGBA );
glutInitWindowSize( 1024, 512 );
glutCreateWindow("CUDA on live GL");
glutDisplayFunc(Draw);
glutMainLoop();
}
|
7d113ea28e3943087415363daeed0e1858e50948.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
// includes, project
#include <cutil.h>
// includes, kernels
#include <mv_kernel.cu>
#include <rocblas.h>
////////////////////////////////////////////////////////////////////////////////
// declaration, forward
void runTest();
void randomInit(float*, int);
void printDiff(float*, float*, int, int);
extern "C"
void computeGold(float*, const float*, const float*, unsigned int, unsigned int, unsigned int);
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int
main(int argc, char** argv)
{
CUT_DEVICE_INIT(argc, argv);
runTest();
CUT_EXIT(argc, argv);
}
////////////////////////////////////////////////////////////////////////////////
//! Run a simple test for CUDA
////////////////////////////////////////////////////////////////////////////////
void
runTest()
{
char* p[32];
float result[32];
cublasStatus status;
status = hipblasInit();
if (status != HIPBLAS_STATUS_SUCCESS) {
fprintf (stderr, "!!!! CUBLAS initialization error\n");
exit (1);
}
// set seed for rand()
srand(2006);
// allocate host memory for matrices A and B
unsigned int size_A = WA * HA;
unsigned int mem_size_A = sizeof(float) * size_A;
float* h_A = (float*) malloc(mem_size_A);
unsigned int size_B = WB * HB;
unsigned int mem_size_B = sizeof(float) * size_B;
float* h_B = (float*) malloc(mem_size_B);
// initialize host memory
randomInit(h_A, size_A);
randomInit(h_B, size_B);
// allocate device memory
float* d_A;
CUDA_SAFE_CALL(hipMalloc((void**) &d_A, mem_size_A));
float* d_B;
CUDA_SAFE_CALL(hipMalloc((void**) &d_B, mem_size_B));
// copy host memory to device
CUDA_SAFE_CALL(hipMemcpy(d_A, h_A, mem_size_A,
hipMemcpyHostToDevice) );
CUDA_SAFE_CALL(hipMemcpy(d_B, h_B, mem_size_B,
hipMemcpyHostToDevice) );
// allocate device memory for result
unsigned int size_C = WC * HC;
unsigned int mem_size_C = sizeof(float) * size_C;
// allocate host memory for the result
float* h_C = (float*) malloc(mem_size_C);
// create and start timer
unsigned int timer = 0;
// compute reference solution
float* reference = (float*) malloc(mem_size_C);
computeGold(reference, h_A, h_B, HA, WA, WB);
CUTBoolean res;
int iterator = 16;
int pid = 0;
{
free(h_C);
h_C = (float*) malloc(mem_size_C);
float* d_C;
CUDA_SAFE_CALL(hipMalloc((void**) &d_C, mem_size_C));
// setup execution parameters
dim3 threads(256, 1);
dim3 grid(WC / threads.x, HC / threads.y);
CUT_SAFE_CALL(cutCreateTimer(&timer));
hipDeviceSynchronize();
CUT_SAFE_CALL(cutStartTimer(timer));
for (int i=0; i<iterator; i++) {
// execute the kernel
hipLaunchKernelGGL(( mv_naive), dim3(grid), dim3(threads) , 0, 0, d_A, d_B, d_C, WA);
// stop and destroy timer
}
hipDeviceSynchronize();
CUT_SAFE_CALL(cutStopTimer(timer));
// check if kernel execution generated and error
CUT_CHECK_ERROR("Kernel execution failed");
// copy result from device to host
CUDA_SAFE_CALL(hipMemcpy(h_C, d_C, mem_size_C,
hipMemcpyDeviceToHost) );
p[pid] = "naive";
result[pid] = 2000.0*HA*WA/cutGetTimerValue(timer)*iterator/1024/1024/1024;
pid++;
printf("mv_naive Processing time: %f (ms), %f Gflops \n", cutGetTimerValue(timer), 2000.0*HA*WA*iterator/cutGetTimerValue(timer)/1024/1024/1024);
CUT_SAFE_CALL(cutDeleteTimer(timer));
CUDA_SAFE_CALL(hipFree(d_C));
}
// check result
res = cutCompareL2fe(reference, h_C, size_C, 1e-5f);
printf("Test %s \n", (1 == res) ? "PASSED" : "FAILED");
{
free(h_C);
h_C = (float*) malloc(mem_size_C);
float* d_C;
CUDA_SAFE_CALL(hipMalloc((void**) &d_C, mem_size_C));
CUT_SAFE_CALL(cutCreateTimer(&timer));
hipDeviceSynchronize();
CUT_SAFE_CALL(cutStartTimer(timer));
for (int i=0; i<iterator; i++) {
// execute the kernel
hipblasSgemv('t', WA, HA, 1.0f, d_A,
WA, d_B, 1, 0.0f, d_C, 1);
/*
hipblasSgemm('n', 'n', , 1, , 1.0f, d_A,
, d_B, , 0.0f, d_C, );*/
// stop and destroy timer
}
hipDeviceSynchronize();
CUT_SAFE_CALL(cutStopTimer(timer));
// check if kernel execution generated and error
CUT_CHECK_ERROR("Kernel execution failed");
// copy result from device to host
CUDA_SAFE_CALL(hipMemcpy(h_C, d_C, mem_size_C,
hipMemcpyDeviceToHost) );
result[pid] = 2000.0*HA*WA/cutGetTimerValue(timer)*iterator/1024/1024/1024;
p[pid] = "hipblasSgemm";
pid++;
printf("hipblasSgemm Processing time: %f (ms), %f Gflops \n", cutGetTimerValue(timer), 2000.0*WA*HA*iterator/cutGetTimerValue(timer)/1024/1024/1024);
CUT_SAFE_CALL(cutDeleteTimer(timer));
CUDA_SAFE_CALL(hipFree(d_C));
}
res = cutCompareL2fe(reference, h_C, size_C, 1e-5f);
printf("Test %s \n", (1 == res) ? "PASSED" : "FAILED");
{
free(h_C);
h_C = (float*) malloc(mem_size_C);
float* d_C;
CUDA_SAFE_CALL(hipMalloc((void**) &d_C, mem_size_C));
CUDA_SAFE_CALL(hipMemcpy(d_C, h_C, mem_size_C,
hipMemcpyHostToDevice) );
// setup execution parameters
dim3 threads(32, 1);
dim3 grid(WC / threads.x, HC / threads.y);
CUT_SAFE_CALL(cutCreateTimer(&timer));
hipDeviceSynchronize();
CUT_SAFE_CALL(cutStartTimer(timer));
for (int i=0; i<iterator; i++) {
// execute the kernel
hipLaunchKernelGGL(( mv_coalesced), dim3(grid), dim3(threads) , 0, 0, d_A, d_B, d_C, WA);
// stop and destroy timer
}
hipDeviceSynchronize();
CUT_SAFE_CALL(cutStopTimer(timer));
// check if kernel execution generated and error
CUT_CHECK_ERROR("Kernel execution failed");
// copy result from device to host
CUDA_SAFE_CALL(hipMemcpy(h_C, d_C, mem_size_C,
hipMemcpyDeviceToHost) );
result[pid] = 2000.0*HA*WA/cutGetTimerValue(timer)*iterator/1024/1024/1024;
p[pid] = "mv_coalesced";
pid++;
printf("mv_coalesced Processing time: %f (ms), %f Gflops \n", cutGetTimerValue(timer), 2000.0*WA*HA*iterator/cutGetTimerValue(timer)/1024/1024/1024);
CUT_SAFE_CALL(cutDeleteTimer(timer));
CUDA_SAFE_CALL(hipFree(d_C));
}
res = cutCompareL2fe(reference, h_C, size_C, 1e-5f);
printf("Test %s \n", (1 == res) ? "PASSED" : "FAILED");
{
free(h_C);
h_C = (float*) malloc(mem_size_C);
float* d_C;
CUDA_SAFE_CALL(hipMalloc((void**) &d_C, mem_size_C));
CUDA_SAFE_CALL(hipMemcpy(d_C, h_C, mem_size_C,
hipMemcpyHostToDevice) );
// setup execution parameters
dim3 threads(32, 1);
dim3 grid(WC / threads.x, HC / threads.y);
CUT_SAFE_CALL(cutCreateTimer(&timer));
hipDeviceSynchronize();
CUT_SAFE_CALL(cutStartTimer(timer));
for (int i=0; i<iterator; i++) {
// execute the kernel
hipLaunchKernelGGL(( mv_opt), dim3(grid), dim3(threads) , 0, 0, d_A, d_B, d_C, WA);
// stop and destroy timer
}
hipDeviceSynchronize();
CUT_SAFE_CALL(cutStopTimer(timer));
// check if kernel execution generated and error
CUT_CHECK_ERROR("Kernel execution failed");
// copy result from device to host
CUDA_SAFE_CALL(hipMemcpy(h_C, d_C, mem_size_C,
hipMemcpyDeviceToHost) );
result[pid] = 2000.0*HA*WA/cutGetTimerValue(timer)*iterator/1024/1024/1024;
p[pid] = "mv_opt";
pid++;
printf("mv_opt Processing time: %f (ms), %f Gflops \n", cutGetTimerValue(timer), 2000.0*WA*HA*iterator/cutGetTimerValue(timer)/1024/1024/1024);
CUT_SAFE_CALL(cutDeleteTimer(timer));
CUDA_SAFE_CALL(hipFree(d_C));
}
res = cutCompareL2fe(reference, h_C, size_C, 1e-5f);
printf("Test %s \n", (1 == res) ? "PASSED" : "FAILED");
// clean up memory
free(h_A);
free(h_B);
free(h_C);
free(reference);
CUDA_SAFE_CALL(hipFree(d_A));
CUDA_SAFE_CALL(hipFree(d_B));
status = hipblasShutdown();
if (status != HIPBLAS_STATUS_SUCCESS) {
fprintf (stderr, "!!!! shutdown error\n");
}
// CUDA_SAFE_CALL(hipFree(d_C));
}
// Allocates a matrix with random float entries.
void randomInit(float* data, int size)
{
for (int i = 0; i < size; ++i)
data[i] = rand() / (float)RAND_MAX;
}
void printDiff(float *data1, float *data2, int width, int height)
{
int i,j,k;
int error_count=0;
for (j=0; j<height; j++) {
for (i=0; i<width; i++) {
k = j*width+i;
if (data1[k] != data2[k]) {
printf("diff(%d,%d) CPU=%4.4f, GPU=%4.4f n", i,j, data1[k], data2[k]);
error_count++;
}
}
}
printf(" nTotal Errors = %d n", error_count);
}
| 7d113ea28e3943087415363daeed0e1858e50948.cu |
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
// includes, project
#include <cutil.h>
// includes, kernels
#include <mv_kernel.cu>
#include <cublas.h>
////////////////////////////////////////////////////////////////////////////////
// declaration, forward
void runTest();
void randomInit(float*, int);
void printDiff(float*, float*, int, int);
extern "C"
void computeGold(float*, const float*, const float*, unsigned int, unsigned int, unsigned int);
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int
main(int argc, char** argv)
{
CUT_DEVICE_INIT(argc, argv);
runTest();
CUT_EXIT(argc, argv);
}
////////////////////////////////////////////////////////////////////////////////
//! Run a simple test for CUDA
////////////////////////////////////////////////////////////////////////////////
void
runTest()
{
char* p[32];
float result[32];
cublasStatus status;
status = cublasInit();
if (status != CUBLAS_STATUS_SUCCESS) {
fprintf (stderr, "!!!! CUBLAS initialization error\n");
exit (1);
}
// set seed for rand()
srand(2006);
// allocate host memory for matrices A and B
unsigned int size_A = WA * HA;
unsigned int mem_size_A = sizeof(float) * size_A;
float* h_A = (float*) malloc(mem_size_A);
unsigned int size_B = WB * HB;
unsigned int mem_size_B = sizeof(float) * size_B;
float* h_B = (float*) malloc(mem_size_B);
// initialize host memory
randomInit(h_A, size_A);
randomInit(h_B, size_B);
// allocate device memory
float* d_A;
CUDA_SAFE_CALL(cudaMalloc((void**) &d_A, mem_size_A));
float* d_B;
CUDA_SAFE_CALL(cudaMalloc((void**) &d_B, mem_size_B));
// copy host memory to device
CUDA_SAFE_CALL(cudaMemcpy(d_A, h_A, mem_size_A,
cudaMemcpyHostToDevice) );
CUDA_SAFE_CALL(cudaMemcpy(d_B, h_B, mem_size_B,
cudaMemcpyHostToDevice) );
// allocate device memory for result
unsigned int size_C = WC * HC;
unsigned int mem_size_C = sizeof(float) * size_C;
// allocate host memory for the result
float* h_C = (float*) malloc(mem_size_C);
// create and start timer
unsigned int timer = 0;
// compute reference solution
float* reference = (float*) malloc(mem_size_C);
computeGold(reference, h_A, h_B, HA, WA, WB);
CUTBoolean res;
int iterator = 16;
int pid = 0;
{
free(h_C);
h_C = (float*) malloc(mem_size_C);
float* d_C;
CUDA_SAFE_CALL(cudaMalloc((void**) &d_C, mem_size_C));
// setup execution parameters
dim3 threads(256, 1);
dim3 grid(WC / threads.x, HC / threads.y);
CUT_SAFE_CALL(cutCreateTimer(&timer));
cudaThreadSynchronize();
CUT_SAFE_CALL(cutStartTimer(timer));
for (int i=0; i<iterator; i++) {
// execute the kernel
mv_naive<<< grid, threads >>>(d_A, d_B, d_C, WA);
// stop and destroy timer
}
cudaThreadSynchronize();
CUT_SAFE_CALL(cutStopTimer(timer));
// check if kernel execution generated and error
CUT_CHECK_ERROR("Kernel execution failed");
// copy result from device to host
CUDA_SAFE_CALL(cudaMemcpy(h_C, d_C, mem_size_C,
cudaMemcpyDeviceToHost) );
p[pid] = "naive";
result[pid] = 2000.0*HA*WA/cutGetTimerValue(timer)*iterator/1024/1024/1024;
pid++;
printf("mv_naive Processing time: %f (ms), %f Gflops \n", cutGetTimerValue(timer), 2000.0*HA*WA*iterator/cutGetTimerValue(timer)/1024/1024/1024);
CUT_SAFE_CALL(cutDeleteTimer(timer));
CUDA_SAFE_CALL(cudaFree(d_C));
}
// check result
res = cutCompareL2fe(reference, h_C, size_C, 1e-5f);
printf("Test %s \n", (1 == res) ? "PASSED" : "FAILED");
{
free(h_C);
h_C = (float*) malloc(mem_size_C);
float* d_C;
CUDA_SAFE_CALL(cudaMalloc((void**) &d_C, mem_size_C));
CUT_SAFE_CALL(cutCreateTimer(&timer));
cudaThreadSynchronize();
CUT_SAFE_CALL(cutStartTimer(timer));
for (int i=0; i<iterator; i++) {
// execute the kernel
cublasSgemv('t', WA, HA, 1.0f, d_A,
WA, d_B, 1, 0.0f, d_C, 1);
/*
cublasSgemm('n', 'n', , 1, , 1.0f, d_A,
, d_B, , 0.0f, d_C, );*/
// stop and destroy timer
}
cudaThreadSynchronize();
CUT_SAFE_CALL(cutStopTimer(timer));
// check if kernel execution generated and error
CUT_CHECK_ERROR("Kernel execution failed");
// copy result from device to host
CUDA_SAFE_CALL(cudaMemcpy(h_C, d_C, mem_size_C,
cudaMemcpyDeviceToHost) );
result[pid] = 2000.0*HA*WA/cutGetTimerValue(timer)*iterator/1024/1024/1024;
p[pid] = "cublasSgemm";
pid++;
printf("cublasSgemm Processing time: %f (ms), %f Gflops \n", cutGetTimerValue(timer), 2000.0*WA*HA*iterator/cutGetTimerValue(timer)/1024/1024/1024);
CUT_SAFE_CALL(cutDeleteTimer(timer));
CUDA_SAFE_CALL(cudaFree(d_C));
}
res = cutCompareL2fe(reference, h_C, size_C, 1e-5f);
printf("Test %s \n", (1 == res) ? "PASSED" : "FAILED");
{
free(h_C);
h_C = (float*) malloc(mem_size_C);
float* d_C;
CUDA_SAFE_CALL(cudaMalloc((void**) &d_C, mem_size_C));
CUDA_SAFE_CALL(cudaMemcpy(d_C, h_C, mem_size_C,
cudaMemcpyHostToDevice) );
// setup execution parameters
dim3 threads(32, 1);
dim3 grid(WC / threads.x, HC / threads.y);
CUT_SAFE_CALL(cutCreateTimer(&timer));
cudaThreadSynchronize();
CUT_SAFE_CALL(cutStartTimer(timer));
for (int i=0; i<iterator; i++) {
// execute the kernel
mv_coalesced<<< grid, threads >>>(d_A, d_B, d_C, WA);
// stop and destroy timer
}
cudaThreadSynchronize();
CUT_SAFE_CALL(cutStopTimer(timer));
// check if kernel execution generated and error
CUT_CHECK_ERROR("Kernel execution failed");
// copy result from device to host
CUDA_SAFE_CALL(cudaMemcpy(h_C, d_C, mem_size_C,
cudaMemcpyDeviceToHost) );
result[pid] = 2000.0*HA*WA/cutGetTimerValue(timer)*iterator/1024/1024/1024;
p[pid] = "mv_coalesced";
pid++;
printf("mv_coalesced Processing time: %f (ms), %f Gflops \n", cutGetTimerValue(timer), 2000.0*WA*HA*iterator/cutGetTimerValue(timer)/1024/1024/1024);
CUT_SAFE_CALL(cutDeleteTimer(timer));
CUDA_SAFE_CALL(cudaFree(d_C));
}
res = cutCompareL2fe(reference, h_C, size_C, 1e-5f);
printf("Test %s \n", (1 == res) ? "PASSED" : "FAILED");
{
free(h_C);
h_C = (float*) malloc(mem_size_C);
float* d_C;
CUDA_SAFE_CALL(cudaMalloc((void**) &d_C, mem_size_C));
CUDA_SAFE_CALL(cudaMemcpy(d_C, h_C, mem_size_C,
cudaMemcpyHostToDevice) );
// setup execution parameters
dim3 threads(32, 1);
dim3 grid(WC / threads.x, HC / threads.y);
CUT_SAFE_CALL(cutCreateTimer(&timer));
cudaThreadSynchronize();
CUT_SAFE_CALL(cutStartTimer(timer));
for (int i=0; i<iterator; i++) {
// execute the kernel
mv_opt<<< grid, threads >>>(d_A, d_B, d_C, WA);
// stop and destroy timer
}
cudaThreadSynchronize();
CUT_SAFE_CALL(cutStopTimer(timer));
// check if kernel execution generated and error
CUT_CHECK_ERROR("Kernel execution failed");
// copy result from device to host
CUDA_SAFE_CALL(cudaMemcpy(h_C, d_C, mem_size_C,
cudaMemcpyDeviceToHost) );
result[pid] = 2000.0*HA*WA/cutGetTimerValue(timer)*iterator/1024/1024/1024;
p[pid] = "mv_opt";
pid++;
printf("mv_opt Processing time: %f (ms), %f Gflops \n", cutGetTimerValue(timer), 2000.0*WA*HA*iterator/cutGetTimerValue(timer)/1024/1024/1024);
CUT_SAFE_CALL(cutDeleteTimer(timer));
CUDA_SAFE_CALL(cudaFree(d_C));
}
res = cutCompareL2fe(reference, h_C, size_C, 1e-5f);
printf("Test %s \n", (1 == res) ? "PASSED" : "FAILED");
// clean up memory
free(h_A);
free(h_B);
free(h_C);
free(reference);
CUDA_SAFE_CALL(cudaFree(d_A));
CUDA_SAFE_CALL(cudaFree(d_B));
status = cublasShutdown();
if (status != CUBLAS_STATUS_SUCCESS) {
fprintf (stderr, "!!!! shutdown error\n");
}
// CUDA_SAFE_CALL(cudaFree(d_C));
}
// Allocates a matrix with random float entries.
void randomInit(float* data, int size)
{
for (int i = 0; i < size; ++i)
data[i] = rand() / (float)RAND_MAX;
}
void printDiff(float *data1, float *data2, int width, int height)
{
int i,j,k;
int error_count=0;
for (j=0; j<height; j++) {
for (i=0; i<width; i++) {
k = j*width+i;
if (data1[k] != data2[k]) {
printf("diff(%d,%d) CPU=%4.4f, GPU=%4.4f n", i,j, data1[k], data2[k]);
error_count++;
}
}
}
printf(" nTotal Errors = %d n", error_count);
}
|
07dd3793778b4d560a572afdceffb45b94608cb7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// [[Rcpp::depends(RcppArmadillo)]]
// [[Rcpp::depends(RcppEigen)]]
#include <iostream>
#include <omp.h>
using namespace std;
//======================================
// Grids
//======================================
void gridx(const int nx, const double xmin, const double xmax, double* xgrid){
const double size = nx;
const double xstep = (xmax - xmin) /(size - 1);
double it = 0;
for(int i = 0; i < nx; i++){
xgrid[i] = xmin + it*xstep;
it++;
}
}
void gride(const int ne, const double ssigma_eps, const double llambda_eps, const double m, double* egrid){
// This grid is made with Tauchen (1986)
const double size = ne;
const double ssigma_y = sqrt(pow(ssigma_eps, 2) / (1 - pow(llambda_eps, 2)));
const double estep = 2*ssigma_y*m / (size-1);
double it = 0;
for(int i = 0; i < ne; i++){
egrid[i] = (-m*sqrt(pow(ssigma_eps, 2) / (1 - pow(llambda_eps, 2))) + it*estep);
it++;
}
}
double normCDF(const double value){
return 0.5 * erfc(-value * M_SQRT1_2);
}
void eprob(const int ne, const double ssigma_eps, const double llambda_eps, const double m, const double* egrid, double* P){
// This grid is made with Tauchen (1986)
// P is: first ne elements are transition from e_0 to e_i,
// second ne elementrs are from e_1 to e_i, ...
const double w = egrid[1] - egrid[0];
for(int j = 0; j < ne; j++){
for(int k = 0; k < ne; k++){
if(k == 0){
P[j*ne + k] = normCDF((egrid[k] - llambda_eps*egrid[j] + (w/2))/ssigma_eps);
} else if(k == ne-1){
P[j*ne + k] = 1 - normCDF((egrid[k] - llambda_eps*egrid[j] - (w/2))/ssigma_eps);
} else{
P[j*ne + k] = normCDF((egrid[k] - llambda_eps*egrid[j] + (w/2))/ssigma_eps) - normCDF((egrid[k] - llambda_eps*egrid[j] - (w/2))/ssigma_eps);
}
}
}
}
//======================================
// Parameter structure
//======================================
class parameters{
public:
int nx;
double xmin;
double xmax;
int ne;
double ssigma_eps;
double llambda_eps;
double m;
double ssigma;
double eeta;
double ppsi;
double rrho;
double llambda;
double bbeta;
int T;
double r;
double w;
void load(const char*);
};
//======================================
// MAIN MAIN MAIN
//======================================
__global__ void Vmaximization(const parameters params, const double* xgrid, const double* egrid, const double* P, const int age, double* V){
// Recover the parameters
const int nx = params.nx;
const int ne = params.ne;
const double ssigma = params.ssigma;
const double bbeta = params.bbeta;
const int T = params.T;
const double r = params.r;
const double w = params.w;
// Recover state variables from indices
const int ix = blockIdx.x * blockDim.x + threadIdx.x;
const int ie = threadIdx.y;
double expected;
double utility;
double cons;
double VV = pow(-10.0,5.0);
for(int ixp = 0; ixp < nx; ixp++){
expected = 0.0;
if(age < T-1){
for(int iep = 0; iep < ne; iep++){
expected = expected + P[ie*ne + iep]*V[(age+1)*nx*ne + ixp*ne + iep];
}
}
cons = (1 + r)*xgrid[ix] + egrid[ie]*w - xgrid[ixp];
utility = pow(cons, 1-ssigma) / (1-ssigma) + bbeta*expected;
if(cons <= 0){
utility = pow(-10.0, 5.0);
}
if(utility >= VV){
VV = utility;
}
utility = 0.0;
}
V[age*nx*ne + ix*ne + ie] = VV;
}
int main()
{
// Grids
const int nx = 300;
const double xmin = 0.1;
const double xmax = 4.0;
const int ne = 15;
const double ssigma_eps = 0.02058;
const double llambda_eps = 0.99;
const double m = 1.5;
// Parameters
const double ssigma = 2;
const double eeta = 0.36;
const double ppsi = 0.89;
const double rrho = 0.5;
const double llambda = 1;
const double bbeta = 0.97;
const int T = 10;
// Prices
const double r = 0.07;
const double w = 5;
parameters params = {nx, xmin, xmax, ne, ssigma_eps, llambda_eps, m, ssigma, eeta, ppsi, rrho, llambda, bbeta, T, r, w};
// Pointers to variables in the DEVICE memory
double *V, *X, *E, *P;
size_t sizeX = nx*sizeof(double);
size_t sizeE = ne*sizeof(double);
size_t sizeP = ne*ne*sizeof(double);
size_t sizeV = T*ne*nx*sizeof(double);
hipMalloc((void**)&X, sizeX);
hipMalloc((void**)&E, sizeE);
hipMalloc((void**)&P, sizeP);
hipMalloc((void**)&V, sizeV);
// Parameters for CUDA: cada block tiene ne columnas, y una fila que representa un valor de x
// Hay nx blocks
// Cada layer es una edad >= hay 80 layers
const int block_size = 30;
dim3 dimBlock(block_size, ne);
dim3 dimGrid(nx/block_size, 1);
// Variables in the host have "h" prefix
// I create the grid for X
double hxgrid[nx];
gridx(nx, xmin, xmax, hxgrid);
// I create the grid for E and the probability matrix
double hegrid[ne];
double hP[ne*ne];
gride(ne, ssigma_eps, llambda_eps, m, hegrid);
eprob(ne, ssigma_eps, llambda_eps, m, hegrid, hP);
// Exponential of the grid e
for(int i=0; i<ne; i++){
hegrid[i] = exp(hegrid[i]);
}
double *hV;
hV = (double *)malloc(sizeV);
// Copy matrices from host (CPU) to device (GPU) memory
hipMemcpy(X, hxgrid, sizeX, hipMemcpyHostToDevice);
hipMemcpy(E, hegrid, sizeE, hipMemcpyHostToDevice);
hipMemcpy(P, hP, sizeP, hipMemcpyHostToDevice);
hipMemcpy(V, hV, sizeV, hipMemcpyHostToDevice);
std::cout << " " << std::endl;
std::cout << "Life cycle computation: " << std::endl;
std::cout << " " << std::endl;
// Time the GPU startup overhead
clock_t t;
clock_t t0;
t0 = clock();
t = t0;
for(int age=T-1; age>=0; age--){
hipLaunchKernelGGL(( Vmaximization), dim3(dimGrid),dim3(dimBlock), 0, 0, params, X, E, P, age, V);
hipDeviceSynchronize();
t = clock() - t0;
std::cout << "Age: " << age << ". Time: " << ((double)t)/CLOCKS_PER_SEC << " seconds." << std::endl;
}
std::cout << " " << std::endl;
t = clock() - t0;
std::cout << "TOTAL ELAPSED TIME: " << ((double)t)/CLOCKS_PER_SEC << " seconds. " << std::endl;
hipMemcpy(hV, V, sizeV, hipMemcpyDeviceToHost);
// Free variables in device memory
hipFree(V);
hipFree(X);
hipFree(E);
hipFree(P);
std::cout << " " << std::endl;
std::cout << " - - - - - - - - - - - - - - - - - - - - - " << std::endl;
std::cout << " " << std::endl;
std::cout << "The first entries of the value function: " << std::endl;
std::cout << " " << std::endl;
for(int i = 0; i<3; i++){
std::cout << hV[i] << std::endl;
}
std::cout << " " << std::endl;
return 0;
}
| 07dd3793778b4d560a572afdceffb45b94608cb7.cu | // [[Rcpp::depends(RcppArmadillo)]]
// [[Rcpp::depends(RcppEigen)]]
#include <iostream>
#include <omp.h>
using namespace std;
//======================================
// Grids
//======================================
void gridx(const int nx, const double xmin, const double xmax, double* xgrid){
const double size = nx;
const double xstep = (xmax - xmin) /(size - 1);
double it = 0;
for(int i = 0; i < nx; i++){
xgrid[i] = xmin + it*xstep;
it++;
}
}
void gride(const int ne, const double ssigma_eps, const double llambda_eps, const double m, double* egrid){
// This grid is made with Tauchen (1986)
const double size = ne;
const double ssigma_y = sqrt(pow(ssigma_eps, 2) / (1 - pow(llambda_eps, 2)));
const double estep = 2*ssigma_y*m / (size-1);
double it = 0;
for(int i = 0; i < ne; i++){
egrid[i] = (-m*sqrt(pow(ssigma_eps, 2) / (1 - pow(llambda_eps, 2))) + it*estep);
it++;
}
}
double normCDF(const double value){
return 0.5 * erfc(-value * M_SQRT1_2);
}
void eprob(const int ne, const double ssigma_eps, const double llambda_eps, const double m, const double* egrid, double* P){
// This grid is made with Tauchen (1986)
// P is: first ne elements are transition from e_0 to e_i,
// second ne elementrs are from e_1 to e_i, ...
const double w = egrid[1] - egrid[0];
for(int j = 0; j < ne; j++){
for(int k = 0; k < ne; k++){
if(k == 0){
P[j*ne + k] = normCDF((egrid[k] - llambda_eps*egrid[j] + (w/2))/ssigma_eps);
} else if(k == ne-1){
P[j*ne + k] = 1 - normCDF((egrid[k] - llambda_eps*egrid[j] - (w/2))/ssigma_eps);
} else{
P[j*ne + k] = normCDF((egrid[k] - llambda_eps*egrid[j] + (w/2))/ssigma_eps) - normCDF((egrid[k] - llambda_eps*egrid[j] - (w/2))/ssigma_eps);
}
}
}
}
//======================================
// Parameter structure
//======================================
class parameters{
public:
int nx;
double xmin;
double xmax;
int ne;
double ssigma_eps;
double llambda_eps;
double m;
double ssigma;
double eeta;
double ppsi;
double rrho;
double llambda;
double bbeta;
int T;
double r;
double w;
void load(const char*);
};
//======================================
// MAIN MAIN MAIN
//======================================
__global__ void Vmaximization(const parameters params, const double* xgrid, const double* egrid, const double* P, const int age, double* V){
// Recover the parameters
const int nx = params.nx;
const int ne = params.ne;
const double ssigma = params.ssigma;
const double bbeta = params.bbeta;
const int T = params.T;
const double r = params.r;
const double w = params.w;
// Recover state variables from indices
const int ix = blockIdx.x * blockDim.x + threadIdx.x;
const int ie = threadIdx.y;
double expected;
double utility;
double cons;
double VV = pow(-10.0,5.0);
for(int ixp = 0; ixp < nx; ixp++){
expected = 0.0;
if(age < T-1){
for(int iep = 0; iep < ne; iep++){
expected = expected + P[ie*ne + iep]*V[(age+1)*nx*ne + ixp*ne + iep];
}
}
cons = (1 + r)*xgrid[ix] + egrid[ie]*w - xgrid[ixp];
utility = pow(cons, 1-ssigma) / (1-ssigma) + bbeta*expected;
if(cons <= 0){
utility = pow(-10.0, 5.0);
}
if(utility >= VV){
VV = utility;
}
utility = 0.0;
}
V[age*nx*ne + ix*ne + ie] = VV;
}
int main()
{
// Grids
const int nx = 300;
const double xmin = 0.1;
const double xmax = 4.0;
const int ne = 15;
const double ssigma_eps = 0.02058;
const double llambda_eps = 0.99;
const double m = 1.5;
// Parameters
const double ssigma = 2;
const double eeta = 0.36;
const double ppsi = 0.89;
const double rrho = 0.5;
const double llambda = 1;
const double bbeta = 0.97;
const int T = 10;
// Prices
const double r = 0.07;
const double w = 5;
parameters params = {nx, xmin, xmax, ne, ssigma_eps, llambda_eps, m, ssigma, eeta, ppsi, rrho, llambda, bbeta, T, r, w};
// Pointers to variables in the DEVICE memory
double *V, *X, *E, *P;
size_t sizeX = nx*sizeof(double);
size_t sizeE = ne*sizeof(double);
size_t sizeP = ne*ne*sizeof(double);
size_t sizeV = T*ne*nx*sizeof(double);
cudaMalloc((void**)&X, sizeX);
cudaMalloc((void**)&E, sizeE);
cudaMalloc((void**)&P, sizeP);
cudaMalloc((void**)&V, sizeV);
// Parameters for CUDA: cada block tiene ne columnas, y una fila que representa un valor de x
// Hay nx blocks
// Cada layer es una edad >= hay 80 layers
const int block_size = 30;
dim3 dimBlock(block_size, ne);
dim3 dimGrid(nx/block_size, 1);
// Variables in the host have "h" prefix
// I create the grid for X
double hxgrid[nx];
gridx(nx, xmin, xmax, hxgrid);
// I create the grid for E and the probability matrix
double hegrid[ne];
double hP[ne*ne];
gride(ne, ssigma_eps, llambda_eps, m, hegrid);
eprob(ne, ssigma_eps, llambda_eps, m, hegrid, hP);
// Exponential of the grid e
for(int i=0; i<ne; i++){
hegrid[i] = exp(hegrid[i]);
}
double *hV;
hV = (double *)malloc(sizeV);
// Copy matrices from host (CPU) to device (GPU) memory
cudaMemcpy(X, hxgrid, sizeX, cudaMemcpyHostToDevice);
cudaMemcpy(E, hegrid, sizeE, cudaMemcpyHostToDevice);
cudaMemcpy(P, hP, sizeP, cudaMemcpyHostToDevice);
cudaMemcpy(V, hV, sizeV, cudaMemcpyHostToDevice);
std::cout << " " << std::endl;
std::cout << "Life cycle computation: " << std::endl;
std::cout << " " << std::endl;
// Time the GPU startup overhead
clock_t t;
clock_t t0;
t0 = clock();
t = t0;
for(int age=T-1; age>=0; age--){
Vmaximization<<<dimGrid,dimBlock>>>(params, X, E, P, age, V);
cudaDeviceSynchronize();
t = clock() - t0;
std::cout << "Age: " << age << ". Time: " << ((double)t)/CLOCKS_PER_SEC << " seconds." << std::endl;
}
std::cout << " " << std::endl;
t = clock() - t0;
std::cout << "TOTAL ELAPSED TIME: " << ((double)t)/CLOCKS_PER_SEC << " seconds. " << std::endl;
cudaMemcpy(hV, V, sizeV, cudaMemcpyDeviceToHost);
// Free variables in device memory
cudaFree(V);
cudaFree(X);
cudaFree(E);
cudaFree(P);
std::cout << " " << std::endl;
std::cout << " - - - - - - - - - - - - - - - - - - - - - " << std::endl;
std::cout << " " << std::endl;
std::cout << "The first entries of the value function: " << std::endl;
std::cout << " " << std::endl;
for(int i = 0; i<3; i++){
std::cout << hV[i] << std::endl;
}
std::cout << " " << std::endl;
return 0;
}
|
68ae88b12e3b3b31a7b00350b90d4b2e0bf4c6ce.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/***********************************************************************************************
* * Implementing Graph Cuts on CUDA using algorithm given in CVGPU '08 **
* * paper "CUDA Cuts: Fast Graph Cuts on GPUs" **
* * **
* * Copyright (c) 2008 International Institute of Information Technology. **
* * All rights reserved. **
* * **
* * Permission to use, copy, modify and distribute this software and its documentation for **
* * educational purpose is hereby granted without fee, provided that the above copyright **
* * notice and this permission notice appear in all copies of this software and that you do **
* * not sell the software. **
* * **
* * THE SOFTWARE IS PROVIDED "AS IS" AND WITHOUT WARRANTY OF ANY KIND,EXPRESS, IMPLIED OR **
* * OTHERWISE. **
* * **
* * Created By Vibhav Vineet. **
* ********************************************************************************************/
#ifndef _PUSHRELABEL_KERNEL_CU_
#define _PUSHRELABEL_KERNEL_CU_
#include "CudaCuts.h"
/************************************************
* Push operation is performed **
* *********************************************/
__global__ void
kernel_push1_atomic( int *g_left_weight, int *g_right_weight, int *g_down_weight, int *g_up_weight,
int *g_sink_weight, int *g_push_reser, int *g_pull_left, int *g_pull_right, int *g_pull_down,
int *g_pull_up, int *g_relabel_mask, int *g_graph_height, int *g_height_write,
int graph_size, int width, int rows, int graph_size1, int width1, int rows1 )
{
int x1 = threadIdx.x ;
int y1 = threadIdx.y ;
int x = __umul24( blockIdx.x, blockDim.x ) + threadIdx.x ;
int y = __umul24( blockIdx.y , blockDim.y ) + threadIdx.y ;
int thid = __umul24( y , width1 ) + x ;
__shared__ int height_fn[356];
int temp_mult = __umul24(y1+1 , 34 ) + x1 + 1, temp_mult1 = __umul24(y1,32) + x1 ;
height_fn[temp_mult] = g_graph_height[thid] ;
(threadIdx.x == 31 && x < width1 - 1 ) ? height_fn[temp_mult + 1] = (g_graph_height[thid + 1]) : 0;
(threadIdx.x == 0 && x > 0 ) ? height_fn[temp_mult - 1] = (g_graph_height[thid - 1]) : 0;
(threadIdx.y == 7 && y < rows1 - 1 ) ? height_fn[temp_mult + 34] = (g_graph_height[thid + width1]) : 0;
(threadIdx.y == 0 && y > 0 ) ? height_fn[temp_mult - 34] = (g_graph_height[thid - width1]) : 0;
__syncthreads();
int flow_push = 0, min_flow_pushed = 0 ;
flow_push = g_push_reser[thid] ;
if( thid < graph_size1 && g_relabel_mask[thid] == 1 && x < width-1 && x > 0 && y < rows-1 && y > 0 )
{
int temp_weight = 0;
temp_weight = g_sink_weight[thid] ;
min_flow_pushed = flow_push ;
if(temp_weight > 0 && flow_push > 0 && height_fn[temp_mult] == 1 )
{
(temp_weight < flow_push) ? min_flow_pushed = temp_weight : 0 ;
temp_weight = temp_weight - min_flow_pushed ;
g_sink_weight[thid] = temp_weight ;
atomicSub(&g_push_reser[thid] , min_flow_pushed);
}
flow_push = g_push_reser[thid] ;
min_flow_pushed = flow_push ;
temp_weight = g_left_weight[thid] ;
if(temp_weight > 0 && flow_push > 0 && height_fn[temp_mult] == height_fn[temp_mult - 1] + 1 )
{
(temp_weight < flow_push) ? min_flow_pushed = temp_weight : 0;
temp_weight = temp_weight - min_flow_pushed ;
atomicSub(&g_left_weight[thid] , min_flow_pushed);
atomicAdd(&g_right_weight[thid-1],min_flow_pushed);
atomicSub(&g_push_reser[thid] , min_flow_pushed);
atomicAdd(&g_push_reser[thid-1], min_flow_pushed);
}else atomicSub(&g_pull_left[thid-1], 1) ;
flow_push = g_push_reser[thid] ;
min_flow_pushed = flow_push ;
temp_weight = g_up_weight[thid] ;
if(temp_weight > 0 && flow_push > 0 && height_fn[temp_mult] == height_fn[temp_mult - 34] + 1)
{
(temp_weight<flow_push) ? min_flow_pushed = temp_weight : 0 ;
temp_weight = temp_weight - min_flow_pushed ;
atomicSub(&g_up_weight[thid] , min_flow_pushed);
atomicAdd(&g_down_weight[thid-width1],min_flow_pushed);
atomicSub(&g_push_reser[thid] , min_flow_pushed);
atomicAdd(&g_push_reser[thid-width1], min_flow_pushed);
} else atomicSub(&g_pull_up[thid - width1] , 1) ;
flow_push = g_push_reser[thid] ;
min_flow_pushed = flow_push ;
temp_weight = g_right_weight[thid] ;
if(temp_weight > 0 && flow_push > 0 && height_fn[temp_mult] == height_fn[temp_mult + 1] + 1 )
{
(temp_weight < flow_push) ? min_flow_pushed = temp_weight : 0 ;
temp_weight = temp_weight - min_flow_pushed ;
atomicSub(&g_right_weight[thid] , min_flow_pushed);
atomicAdd(&g_left_weight[thid+1],min_flow_pushed);
atomicSub(&g_push_reser[thid] , min_flow_pushed);
atomicAdd(&g_push_reser[thid+1], min_flow_pushed);
}else atomicSub( &g_pull_right[thid + 1], 1) ;
flow_push = g_push_reser[thid] ;
min_flow_pushed = flow_push ;
temp_weight = g_down_weight[thid] ;
if(temp_weight > 0 && flow_push > 0 && height_fn[temp_mult] == height_fn[temp_mult + 34] + 1 )
{
(temp_weight<flow_push) ? min_flow_pushed = temp_weight : 0 ;
temp_weight = temp_weight - min_flow_pushed ;
atomicSub(&g_down_weight[thid] , min_flow_pushed);
atomicAdd(&g_up_weight[thid+width1], min_flow_pushed);
atomicSub(&g_push_reser[thid] , min_flow_pushed);
atomicAdd(&g_push_reser[thid+width1], min_flow_pushed);
}else atomicSub( &g_pull_down[thid+width1], 1) ;
}
}
__global__ void
kernel_relabel_atomic( int *g_left_weight, int *g_right_weight, int *g_down_weight, int *g_up_weight,
int *g_sink_weight, int *g_push_reser, int *g_pull_left, int *g_pull_right, int *g_pull_down,
int *g_pull_up, int *g_relabel_mask, int *g_graph_height, int *g_height_write,
int graph_size, int width, int rows, int graph_size1, int width1, int rows1 )
{
int x1 = threadIdx.x ;
int y1 = threadIdx.y ;
int x = __umul24( blockIdx.x, blockDim.x ) + threadIdx.x ;
int y = __umul24( blockIdx.y , blockDim.y ) + threadIdx.y ;
int thid = __umul24( y , width1 ) + x ;
__shared__ int height_fn[356];
int temp_mult = __umul24(y1+1 , 34 ) + x1 + 1, temp_mult1 = __umul24(y1,32) + x1 ;
height_fn[temp_mult] = g_graph_height[thid] ;
(threadIdx.x == 31 && x < width1 - 1 ) ? height_fn[temp_mult + 1] = (g_graph_height[thid + 1]) : 0;
(threadIdx.x == 0 && x > 0 ) ? height_fn[temp_mult - 1] = (g_graph_height[thid - 1]) : 0;
(threadIdx.y == 7 && y < rows1 - 1 ) ? height_fn[temp_mult + 34] = (g_graph_height[thid + width1]) : 0;
(threadIdx.y == 0 && y > 0 ) ? height_fn[temp_mult - 34] = (g_graph_height[thid - width1]) : 0;
__syncthreads();
int min_flow_pushed = g_left_weight[thid] ;
int flow_push = g_push_reser[thid] ;
if(flow_push <= 0 || (g_left_weight[thid] == 0 && g_right_weight[thid] == 0 && g_down_weight[thid] == 0 && g_up_weight[thid] == 0 && g_sink_weight[thid] == 0))
g_relabel_mask[thid] = 2 ;
else
{
( flow_push > 0 && ( ( (height_fn[temp_mult] == height_fn[temp_mult-1] + 1 ) && g_left_weight[thid] > 0 ) ||( (height_fn[temp_mult] == height_fn[temp_mult+1]+1 ) && g_right_weight[thid] > 0) || ( ( height_fn[temp_mult] == height_fn[temp_mult+34]+1 ) && g_down_weight[thid] > 0) || ( (height_fn[temp_mult] == height_fn[temp_mult-34]+1 ) && g_up_weight[thid] > 0 ) || ( height_fn[temp_mult] == 1 && g_sink_weight[thid] > 0 ) ) ) ? g_relabel_mask[thid] = 1 : g_relabel_mask[thid] = 0 ;
}
__syncthreads();
if(thid < graph_size1 && x < width - 1 && x > 0 && y < rows - 1 && y > 0 )
{
if(g_sink_weight[thid] > 0)
{
g_height_write[thid] = 1 ;
}
else
{
int min_height = graph_size ;
(min_flow_pushed > 0 && min_height > height_fn[temp_mult - 1] ) ? min_height = height_fn[temp_mult - 1] : 0 ;
(g_right_weight[thid] > 0 && min_height > height_fn[temp_mult + 1]) ? min_height = height_fn[temp_mult + 1] : 0 ;
(g_down_weight[thid] > 0 && min_height > height_fn[temp_mult + 34] ) ? min_height = height_fn[temp_mult + 34] : 0 ;
(g_up_weight[thid] > 0 && min_height > height_fn[temp_mult - 34] ) ? min_height = height_fn[temp_mult - 34] : 0 ;
g_height_write[thid] = min_height + 1 ;
}
}
}
__global__ void
kernel_relabel_stochastic( int *g_left_weight, int *g_right_weight, int *g_down_weight, int *g_up_weight,
int *g_sink_weight, int *g_push_reser, int *g_pull_left, int *g_pull_right, int *g_pull_down, int *g_pull_up,
int *g_relabel_mask, int *g_graph_height, int *g_height_write,
int graph_size, int width, int rows, int graph_size1, int width1, int rows1, int *d_stochastic, int *g_block_num )
{
if(d_stochastic[blockIdx.y * (*g_block_num) + blockIdx.x] == 1 )
{
int x1 = threadIdx.x ;
int y1 = threadIdx.y ;
int x = __umul24( blockIdx.x, blockDim.x ) + threadIdx.x ;
int y = __umul24( blockIdx.y , blockDim.y ) + threadIdx.y ;
int thid = __umul24( y , width1 ) + x ;
__shared__ int height_fn[356];
int temp_mult = __umul24(y1+1 , 34 ) + x1 + 1, temp_mult1 = __umul24(y1,32) + x1 ;
height_fn[temp_mult] = g_graph_height[thid] ;
(threadIdx.x == 31 && x < width1 - 1 ) ? height_fn[temp_mult + 1] = (g_graph_height[thid + 1]) : 0;
(threadIdx.x == 0 && x > 0 ) ? height_fn[temp_mult - 1] = (g_graph_height[thid - 1]) : 0;
(threadIdx.y == 7 && y < rows1 - 1 ) ? height_fn[temp_mult + 34] = (g_graph_height[thid + width1]) : 0;
(threadIdx.y == 0 && y > 0 ) ? height_fn[temp_mult - 34] = (g_graph_height[thid - width1]) : 0;
__syncthreads();
int min_flow_pushed = g_left_weight[thid] ;
int flow_push = g_push_reser[thid] ;
if(flow_push <= 0 || (g_left_weight[thid] == 0 && g_right_weight[thid] == 0 && g_down_weight[thid] == 0 && g_up_weight[thid] == 0 && g_sink_weight[thid] == 0))
g_relabel_mask[thid] = 2 ;
else
{
( flow_push > 0 && ( ( (height_fn[temp_mult] == height_fn[temp_mult-1] + 1 ) && g_left_weight[thid] > 0 ) ||( (height_fn[temp_mult] == height_fn[temp_mult+1]+1 ) && g_right_weight[thid] > 0) || ( ( height_fn[temp_mult] == height_fn[temp_mult+34]+1 ) && g_down_weight[thid] > 0) || ( (height_fn[temp_mult] == height_fn[temp_mult-34]+1 ) && g_up_weight[thid] > 0 ) || ( height_fn[temp_mult] == 1 && g_sink_weight[thid] > 0 ) ) ) ? g_relabel_mask[thid] = 1 : g_relabel_mask[thid] = 0 ;
}
__syncthreads();
if(thid < graph_size1 && x < width - 1 && x > 0 && y < rows - 1 && y > 0 )
{
if(g_sink_weight[thid] > 0)
{
g_height_write[thid] = 1 ;
}
else
{
int min_height = graph_size ;
(min_flow_pushed > 0 && min_height > height_fn[temp_mult - 1] ) ? min_height = height_fn[temp_mult - 1] : 0 ;
(g_right_weight[thid] > 0 && min_height > height_fn[temp_mult + 1]) ? min_height = height_fn[temp_mult + 1] : 0 ;
(g_down_weight[thid] > 0 && min_height > height_fn[temp_mult + 34] ) ? min_height = height_fn[temp_mult + 34] : 0 ;
(g_up_weight[thid] > 0 && min_height > height_fn[temp_mult - 34] ) ? min_height = height_fn[temp_mult - 34] : 0 ;
g_height_write[thid] = min_height + 1 ;
}
}
}
}
__global__ void
kernel_push2_atomic( int *g_left_weight, int *g_right_weight, int *g_down_weight, int *g_up_weight,
int *g_sink_weight, int *g_push_reser, int *g_pull_left, int *g_pull_right, int *g_pull_down, int *g_pull_up,
int *g_relabel_mask, int *g_graph_height, int *g_height_write,
int graph_size, int width, int rows, int graph_size1, int width1, int rows1)
{
int x1 = threadIdx.x ;
int y1 = threadIdx.y ;
int x = __umul24( blockIdx.x, blockDim.x ) + threadIdx.x ;
int y = __umul24( blockIdx.y , blockDim.y ) + threadIdx.y ;
int thid = __umul24( y , width1 ) + x ;
__shared__ int height_fn[356];
int temp_mult = __umul24(y1+1 , 34 ) + x1 + 1, temp_mult1 = __umul24(y1,32) + x1 ;
height_fn[temp_mult] = g_graph_height[thid] ;
(threadIdx.x == 31 && x < width1 - 1 ) ? height_fn[temp_mult + 1] = (g_graph_height[thid + 1]) : 0;
(threadIdx.x == 0 && x > 0 ) ? height_fn[temp_mult - 1] = (g_graph_height[thid - 1]) : 0;
(threadIdx.y == 7 && y < rows1 - 1 ) ? height_fn[temp_mult + 34] = (g_graph_height[thid + width1]) : 0;
(threadIdx.y == 0 && y > 0 ) ? height_fn[temp_mult - 34] = (g_graph_height[thid - width1]) : 0;
__syncthreads();
int flow_push = 0, min_flow_pushed = 0 ;
flow_push = g_push_reser[thid] ;
if( thid < graph_size1 && g_relabel_mask[thid] == 1 && x < width-1 && x > 0 && y < rows-1 && y > 0 )
{
int temp_weight = 0;
temp_weight = g_sink_weight[thid] ;
min_flow_pushed = flow_push ;
if(temp_weight > 0 && flow_push > 0 && height_fn[temp_mult] == 1 )
{
(temp_weight < flow_push) ? min_flow_pushed = temp_weight : 0 ;
temp_weight = temp_weight - min_flow_pushed ;
g_sink_weight[thid] = temp_weight ;
atomicSub(&g_push_reser[thid] , min_flow_pushed);
}
flow_push = g_push_reser[thid] ;
min_flow_pushed = flow_push ;
temp_weight = g_left_weight[thid] ;
if(temp_weight > 0 && flow_push > 0 && height_fn[temp_mult] == height_fn[temp_mult - 1] + 1 )
{
(temp_weight < flow_push) ? min_flow_pushed = temp_weight : 0;
temp_weight = temp_weight - min_flow_pushed ;
atomicSub(&g_left_weight[thid] , min_flow_pushed);
atomicAdd(&g_right_weight[thid-1],min_flow_pushed);
atomicSub(&g_push_reser[thid] , min_flow_pushed);
atomicAdd(&g_push_reser[thid-1], min_flow_pushed);
}else atomicSub(&g_pull_left[thid-1], 1) ;
flow_push = g_push_reser[thid] ;
min_flow_pushed = flow_push ;
temp_weight = g_up_weight[thid] ;
if(temp_weight > 0 && flow_push > 0 && height_fn[temp_mult] == height_fn[temp_mult - 34] + 1)
{
(temp_weight<flow_push) ? min_flow_pushed = temp_weight : 0 ;
temp_weight = temp_weight - min_flow_pushed ;
atomicSub(&g_up_weight[thid] , min_flow_pushed);
atomicAdd(&g_down_weight[thid-width1],min_flow_pushed);
atomicSub(&g_push_reser[thid] , min_flow_pushed);
atomicAdd(&g_push_reser[thid-width1], min_flow_pushed);
} else atomicSub(&g_pull_up[thid - width1] , 1) ;
flow_push = g_push_reser[thid] ;
min_flow_pushed = flow_push ;
temp_weight = g_right_weight[thid] ;
if(temp_weight > 0 && flow_push > 0 && height_fn[temp_mult] == height_fn[temp_mult + 1] + 1 )
{
(temp_weight < flow_push) ? min_flow_pushed = temp_weight : 0 ;
temp_weight = temp_weight - min_flow_pushed ;
atomicSub(&g_right_weight[thid] , min_flow_pushed);
atomicAdd(&g_left_weight[thid+1],min_flow_pushed);
atomicSub(&g_push_reser[thid] , min_flow_pushed);
atomicAdd(&g_push_reser[thid+1], min_flow_pushed);
}else atomicSub( &g_pull_right[thid + 1], 1) ;
flow_push = g_push_reser[thid] ;
min_flow_pushed = flow_push ;
temp_weight = g_down_weight[thid] ;
if(temp_weight > 0 && flow_push > 0 && height_fn[temp_mult] == height_fn[temp_mult + 34] + 1 )
{
(temp_weight<flow_push) ? min_flow_pushed = temp_weight : 0 ;
temp_weight = temp_weight - min_flow_pushed ;
atomicSub(&g_down_weight[thid] , min_flow_pushed);
atomicAdd(&g_up_weight[thid+width1], min_flow_pushed);
atomicSub(&g_push_reser[thid] , min_flow_pushed);
atomicAdd(&g_push_reser[thid+width1], min_flow_pushed);
}else atomicSub( &g_pull_down[thid+width1], 1) ;
}
__syncthreads() ;
min_flow_pushed = g_left_weight[thid] ;
flow_push = g_push_reser[thid] ;
if(flow_push <= 0 || (g_left_weight[thid] == 0 && g_right_weight[thid] == 0 && g_down_weight[thid] == 0 && g_up_weight[thid] == 0 && g_sink_weight[thid] == 0))
g_relabel_mask[thid] = 2 ;
else
{
( flow_push > 0 && ( ( (height_fn[temp_mult] == height_fn[temp_mult-1] + 1 ) && g_left_weight[thid] > 0 ) ||( (height_fn[temp_mult] == height_fn[temp_mult+1]+1 ) && g_right_weight[thid] > 0) || ( ( height_fn[temp_mult] == height_fn[temp_mult+34]+1 ) && g_down_weight[thid] > 0) || ( (height_fn[temp_mult] == height_fn[temp_mult-34]+1 ) && g_up_weight[thid] > 0 ) || ( height_fn[temp_mult] == 1 && g_sink_weight[thid] > 0 ) ) ) ? g_relabel_mask[thid] = 1 : g_relabel_mask[thid] = 0 ;
}
__syncthreads() ;
if( thid < graph_size1 && g_relabel_mask[thid] == 1 && x < width-1 && x > 0 && y < rows-1 && y > 0 )
{
int temp_weight = 0;
temp_weight = g_sink_weight[thid] ;
min_flow_pushed = flow_push ;
if(temp_weight > 0 && flow_push > 0 && height_fn[temp_mult] == 1 )
{
(temp_weight < flow_push) ? min_flow_pushed = temp_weight : 0 ;
temp_weight = temp_weight - min_flow_pushed ;
g_sink_weight[thid] = temp_weight ;
atomicSub(&g_push_reser[thid] , min_flow_pushed);
}
flow_push = g_push_reser[thid] ;
min_flow_pushed = flow_push ;
temp_weight = g_left_weight[thid] ;
if(temp_weight > 0 && flow_push > 0 && height_fn[temp_mult] == height_fn[temp_mult - 1] + 1 )
{
(temp_weight < flow_push) ? min_flow_pushed = temp_weight : 0;
temp_weight = temp_weight - min_flow_pushed ;
atomicSub(&g_left_weight[thid] , min_flow_pushed);
atomicAdd(&g_right_weight[thid-1],min_flow_pushed);
atomicSub(&g_push_reser[thid] , min_flow_pushed);
atomicAdd(&g_push_reser[thid-1], min_flow_pushed);
}else atomicSub(&g_pull_left[thid-1], 1) ;
flow_push = g_push_reser[thid] ;
min_flow_pushed = flow_push ;
temp_weight = g_up_weight[thid] ;
if(temp_weight > 0 && flow_push > 0 && height_fn[temp_mult] == height_fn[temp_mult - 34] + 1)
{
(temp_weight<flow_push) ? min_flow_pushed = temp_weight : 0 ;
temp_weight = temp_weight - min_flow_pushed ;
atomicSub(&g_up_weight[thid] , min_flow_pushed);
atomicAdd(&g_down_weight[thid-width1],min_flow_pushed);
atomicSub(&g_push_reser[thid] , min_flow_pushed);
atomicAdd(&g_push_reser[thid-width1], min_flow_pushed);
} else atomicSub(&g_pull_up[thid - width1] , 1) ;
flow_push = g_push_reser[thid] ;
min_flow_pushed = flow_push ;
temp_weight = g_right_weight[thid] ;
if(temp_weight > 0 && flow_push > 0 && height_fn[temp_mult] == height_fn[temp_mult + 1] + 1 )
{
(temp_weight < flow_push) ? min_flow_pushed = temp_weight : 0 ;
temp_weight = temp_weight - min_flow_pushed ;
atomicSub(&g_right_weight[thid] , min_flow_pushed);
atomicAdd(&g_left_weight[thid+1],min_flow_pushed);
atomicSub(&g_push_reser[thid] , min_flow_pushed);
atomicAdd(&g_push_reser[thid+1], min_flow_pushed);
}else atomicSub( &g_pull_right[thid + 1], 1) ;
flow_push = g_push_reser[thid] ;
min_flow_pushed = flow_push ;
temp_weight = g_down_weight[thid] ;
if(temp_weight > 0 && flow_push > 0 && height_fn[temp_mult] == height_fn[temp_mult + 34] + 1 )
{
(temp_weight<flow_push) ? min_flow_pushed = temp_weight : 0 ;
temp_weight = temp_weight - min_flow_pushed ;
atomicSub(&g_down_weight[thid] , min_flow_pushed);
atomicAdd(&g_up_weight[thid+width1], min_flow_pushed);
atomicSub(&g_push_reser[thid] , min_flow_pushed);
atomicAdd(&g_push_reser[thid+width1], min_flow_pushed);
}else atomicSub( &g_pull_down[thid+width1], 1) ;
}
}
__global__ void
kernel_End( int *g_stochastic, int *g_count_blocks, int g_counter)
{
int thid = blockIdx.x * blockDim.x + threadIdx.x ;
if( thid < (g_counter ) )
{
if( g_stochastic[thid] == 1 )
atomicAdd(g_count_blocks,1);
//(*g_count_blocks) = (*g_count_blocks) + 1 ;
}
}
__global__ void
kernel_push1_start_atomic( int *g_left_weight, int *g_right_weight, int *g_down_weight, int *g_up_weight,
int *g_sink_weight, int *g_push_reser,
int *g_relabel_mask, int *g_graph_height, int *g_height_write,
int graph_size, int width, int rows, int graph_size1, int width1, int rows1, int *d_relabel, int *d_stochastic, int d_counter, bool *d_finish)
{
int x1 = threadIdx.x ;
int y1 = threadIdx.y ;
int x = __umul24( blockIdx.x, blockDim.x ) + threadIdx.x ;
int y = __umul24( blockIdx.y , blockDim.y ) + threadIdx.y ;
int thid = __umul24( y , width1 ) + x ;
__shared__ int height_fn[356];
int temp_mult = __umul24(y1+1 , 34 ) + x1 + 1, temp_mult1 = __umul24(y1,32) + x1 ;
height_fn[temp_mult] = g_graph_height[thid] ;
(threadIdx.x == 31 && x < width1 - 1 ) ? height_fn[temp_mult + 1] = (g_graph_height[thid + 1]) : 0;
(threadIdx.x == 0 && x > 0 ) ? height_fn[temp_mult - 1] = (g_graph_height[thid - 1]) : 0;
(threadIdx.y == 7 && y < rows1 - 1 ) ? height_fn[temp_mult + 34] = (g_graph_height[thid + width1]) : 0;
(threadIdx.y == 0 && y > 0 ) ? height_fn[temp_mult - 34] = (g_graph_height[thid - width1]) : 0;
__syncthreads();
int flow_push = 0, min_flow_pushed = 0 ;
flow_push = g_push_reser[thid] ;
if( thid < graph_size1 && g_relabel_mask[thid] == 1 && x < width-1 && x > 0 && y < rows-1 && y > 0 )
{
int temp_weight = 0;
temp_weight = g_sink_weight[thid] ;
min_flow_pushed = flow_push ;
if(temp_weight > 0 && flow_push > 0 && height_fn[temp_mult] == 1 )
{
(temp_weight < flow_push) ? min_flow_pushed = temp_weight : 0 ;
temp_weight = temp_weight - min_flow_pushed ;
g_sink_weight[thid] = temp_weight ;
atomicSub(&g_push_reser[thid] , min_flow_pushed);
flow_push = flow_push - min_flow_pushed ;
}
}
__syncthreads() ;
min_flow_pushed = g_left_weight[thid] ;
( flow_push > 0 && ( ((height_fn[temp_mult] == height_fn[temp_mult-1] + 1 ) && min_flow_pushed > 0 ) ||( (height_fn[temp_mult] == height_fn[temp_mult+1]+1 ) && g_right_weight[thid] > 0) || ( ( height_fn[temp_mult] == height_fn[temp_mult+34]+1 ) && g_down_weight[thid] > 0) || ( (height_fn[temp_mult] == height_fn[temp_mult-34]+1 ) && g_up_weight[thid] > 0 ) || ( height_fn[temp_mult] == 1 && g_sink_weight[thid] > 0 ) ) ) ? g_relabel_mask[thid] = 1 : g_relabel_mask[thid] = 0 ;
if(thid < graph_size1 && x < width - 1 && x > 0 && y < rows - 1 && y > 0 )
{
if(g_sink_weight[thid] > 0)
{
g_height_write[thid] = 1 ;
}
else
{
int min_height = graph_size ;
(min_flow_pushed > 0 && min_height > height_fn[temp_mult - 1] ) ? min_height = height_fn[temp_mult - 1] : 0 ;
(g_right_weight[thid] > 0 && min_height > height_fn[temp_mult + 1]) ? min_height = height_fn[temp_mult + 1] : 0 ;
(g_down_weight[thid] > 0 && min_height > height_fn[temp_mult + 34] ) ? min_height = height_fn[temp_mult + 34] : 0 ;
(g_up_weight[thid] > 0 && min_height > height_fn[temp_mult - 34] ) ? min_height = height_fn[temp_mult - 34] : 0 ;
g_height_write[thid] = min_height + 1 ;
}
}
}
__global__ void
kernel_push1_stochastic( int *g_left_weight, int *g_right_weight, int *g_down_weight, int *g_up_weight,
int *g_sink_weight, int *g_push_reser, int *g_pull_left, int *g_pull_right, int *g_pull_down, int *g_pull_up,
int *g_relabel_mask, int *g_graph_height, int *g_height_write,
int graph_size, int width, int rows, int graph_size1, int width1, int rows1, int *d_stochastic,int *g_block_num )
{
if(d_stochastic[blockIdx.y * (*g_block_num) + blockIdx.x] == 1 )
{
int x1 = threadIdx.x ;
int y1 = threadIdx.y ;
int x = __umul24( blockIdx.x, blockDim.x ) + threadIdx.x ;
int y = __umul24( blockIdx.y , blockDim.y ) + threadIdx.y ;
int thid = __umul24( y , width1 ) + x ;
__shared__ int height_fn[356];
int temp_mult = __umul24(y1+1 , 34 ) + x1 + 1, temp_mult1 = __umul24(y1,32) + x1 ;
height_fn[temp_mult] = g_graph_height[thid] ;
(threadIdx.x == 31 && x < width1 - 1 ) ? height_fn[temp_mult + 1] = (g_graph_height[thid + 1]) : 0;
(threadIdx.x == 0 && x > 0 ) ? height_fn[temp_mult - 1] = (g_graph_height[thid - 1]) : 0;
(threadIdx.y == 7 && y < rows1 - 1 ) ? height_fn[temp_mult + 34] = (g_graph_height[thid + width1]) : 0;
(threadIdx.y == 0 && y > 0 ) ? height_fn[temp_mult - 34] = (g_graph_height[thid - width1]) : 0;
__syncthreads();
int flow_push = 0, min_flow_pushed = 0 ;
flow_push = g_push_reser[thid] ;
if( thid < graph_size1 && g_relabel_mask[thid] == 1 && x < width-1 && x > 0 && y < rows-1 && y > 0 )
{
int temp_weight = 0;
temp_weight = g_sink_weight[thid] ;
min_flow_pushed = flow_push ;
if(temp_weight > 0 && flow_push > 0 && height_fn[temp_mult] == 1 )
{
(temp_weight < flow_push) ? min_flow_pushed = temp_weight : 0 ;
temp_weight = temp_weight - min_flow_pushed ;
g_sink_weight[thid] = temp_weight ;
atomicSub(&g_push_reser[thid] , min_flow_pushed);
}
flow_push = g_push_reser[thid] ;
min_flow_pushed = flow_push ;
temp_weight = g_left_weight[thid] ;
if(temp_weight > 0 && flow_push > 0 && height_fn[temp_mult] == height_fn[temp_mult - 1] + 1 )
{
(temp_weight < flow_push) ? min_flow_pushed = temp_weight : 0;
temp_weight = temp_weight - min_flow_pushed ;
atomicSub(&g_left_weight[thid] , min_flow_pushed);
atomicAdd(&g_right_weight[thid-1],min_flow_pushed);
atomicSub(&g_push_reser[thid] , min_flow_pushed);
atomicAdd(&g_push_reser[thid-1], min_flow_pushed);
}else atomicSub(&g_pull_left[thid-1], 1) ;
flow_push = g_push_reser[thid] ;
min_flow_pushed = flow_push ;
temp_weight = g_up_weight[thid] ;
if(temp_weight > 0 && flow_push > 0 && height_fn[temp_mult] == height_fn[temp_mult - 34] + 1)
{
(temp_weight<flow_push) ? min_flow_pushed = temp_weight : 0 ;
temp_weight = temp_weight - min_flow_pushed ;
atomicSub(&g_up_weight[thid] , min_flow_pushed);
atomicAdd(&g_down_weight[thid-width1],min_flow_pushed);
atomicSub(&g_push_reser[thid] , min_flow_pushed);
atomicAdd(&g_push_reser[thid-width1], min_flow_pushed);
} else atomicSub(&g_pull_up[thid - width1] , 1) ;
flow_push = g_push_reser[thid] ;
min_flow_pushed = flow_push ;
temp_weight = g_right_weight[thid] ;
if(temp_weight > 0 && flow_push > 0 && height_fn[temp_mult] == height_fn[temp_mult + 1] + 1 )
{
(temp_weight < flow_push) ? min_flow_pushed = temp_weight : 0 ;
temp_weight = temp_weight - min_flow_pushed ;
atomicSub(&g_right_weight[thid] , min_flow_pushed);
atomicAdd(&g_left_weight[thid+1],min_flow_pushed);
atomicSub(&g_push_reser[thid] , min_flow_pushed);
atomicAdd(&g_push_reser[thid+1], min_flow_pushed);
}else atomicSub( &g_pull_right[thid + 1], 1) ;
flow_push = g_push_reser[thid] ;
min_flow_pushed = flow_push ;
temp_weight = g_down_weight[thid] ;
if(temp_weight > 0 && flow_push > 0 && height_fn[temp_mult] == height_fn[temp_mult + 34] + 1 )
{
(temp_weight<flow_push) ? min_flow_pushed = temp_weight : 0 ;
temp_weight = temp_weight - min_flow_pushed ;
atomicSub(&g_down_weight[thid] , min_flow_pushed);
atomicAdd(&g_up_weight[thid+width1], min_flow_pushed);
atomicSub(&g_push_reser[thid] , min_flow_pushed);
atomicAdd(&g_push_reser[thid+width1], min_flow_pushed);
}else atomicSub( &g_pull_down[thid+width1], 1) ;
}
}
}
__global__ void
kernel_push2_stochastic( int *g_left_weight, int *g_right_weight, int *g_down_weight, int *g_up_weight,
int *g_sink_weight, int *g_push_reser, int *g_pull_left, int *g_pull_right, int *g_pull_down, int *g_pull_up,
int *g_relabel_mask, int *g_graph_height, int *g_height_write,
int graph_size, int width, int rows, int graph_size1, int width1, int rows1, int *d_relabel, int *d_stochastic, int *d_counter, bool *d_finish)
{
if(d_stochastic[blockIdx.y * 20 + blockIdx.x] == 1 )
{
int x1 = threadIdx.x ;
int y1 = threadIdx.y ;
int x = __umul24( blockIdx.x, blockDim.x ) + threadIdx.x ;
int y = __umul24( blockIdx.y , blockDim.y ) + threadIdx.y ;
int thid = __umul24( y , width1 ) + x ;
__shared__ int height_fn[356];
int temp_mult = __umul24(y1+1 , 34 ) + x1 + 1, temp_mult1 = __umul24(y1,32) + x1 ;
height_fn[temp_mult] = g_graph_height[thid] ;
(threadIdx.x == 31 && x < width1 - 1 ) ? height_fn[temp_mult + 1] = (g_graph_height[thid + 1]) : 0;
(threadIdx.x == 0 && x > 0 ) ? height_fn[temp_mult - 1] = (g_graph_height[thid - 1]) : 0;
(threadIdx.y == 7 && y < rows1 - 1 ) ? height_fn[temp_mult + 34] = (g_graph_height[thid + width1]) : 0;
(threadIdx.y == 0 && y > 0 ) ? height_fn[temp_mult - 34] = (g_graph_height[thid - width1]) : 0;
__syncthreads();
int flow_push = 0, min_flow_pushed = 0 ;
flow_push = g_push_reser[thid] ;
if( thid < graph_size1 && g_relabel_mask[thid] == 1 && x < width-1 && x > 0 && y < rows-1 && y > 0 )
{
int temp_weight = 0;
temp_weight = g_sink_weight[thid] ;
min_flow_pushed = flow_push ;
if(temp_weight > 0 && flow_push > 0 && height_fn[temp_mult] == 1 )
{
(temp_weight < flow_push) ? min_flow_pushed = temp_weight : 0 ;
temp_weight = temp_weight - min_flow_pushed ;
g_sink_weight[thid] = temp_weight ;
atomicSub(&g_push_reser[thid] , min_flow_pushed);
}
flow_push = g_push_reser[thid] ;
min_flow_pushed = flow_push ;
temp_weight = g_left_weight[thid] ;
if(temp_weight > 0 && flow_push > 0 && height_fn[temp_mult] == height_fn[temp_mult - 1] + 1 )
{
(temp_weight < flow_push) ? min_flow_pushed = temp_weight : 0;
temp_weight = temp_weight - min_flow_pushed ;
atomicSub(&g_left_weight[thid] , min_flow_pushed);
atomicAdd(&g_right_weight[thid-1],min_flow_pushed);
atomicSub(&g_push_reser[thid] , min_flow_pushed);
atomicAdd(&g_push_reser[thid-1], min_flow_pushed);
}else atomicSub(&g_pull_left[thid-1], 1) ;
flow_push = g_push_reser[thid] ;
min_flow_pushed = flow_push ;
temp_weight = g_up_weight[thid] ;
if(temp_weight > 0 && flow_push > 0 && height_fn[temp_mult] == height_fn[temp_mult - 34] + 1)
{
(temp_weight<flow_push) ? min_flow_pushed = temp_weight : 0 ;
temp_weight = temp_weight - min_flow_pushed ;
atomicSub(&g_up_weight[thid] , min_flow_pushed);
atomicAdd(&g_down_weight[thid-width1],min_flow_pushed);
atomicSub(&g_push_reser[thid] , min_flow_pushed);
atomicAdd(&g_push_reser[thid-width1], min_flow_pushed);
} else atomicSub(&g_pull_up[thid - width1] , 1) ;
flow_push = g_push_reser[thid] ;
min_flow_pushed = flow_push ;
temp_weight = g_right_weight[thid] ;
if(temp_weight > 0 && flow_push > 0 && height_fn[temp_mult] == height_fn[temp_mult + 1] + 1 )
{
(temp_weight < flow_push) ? min_flow_pushed = temp_weight : 0 ;
temp_weight = temp_weight - min_flow_pushed ;
atomicSub(&g_right_weight[thid] , min_flow_pushed);
atomicAdd(&g_left_weight[thid+1],min_flow_pushed);
atomicSub(&g_push_reser[thid] , min_flow_pushed);
atomicAdd(&g_push_reser[thid+1], min_flow_pushed);
}else atomicSub( &g_pull_right[thid + 1], 1) ;
flow_push = g_push_reser[thid] ;
min_flow_pushed = flow_push ;
temp_weight = g_down_weight[thid] ;
if(temp_weight > 0 && flow_push > 0 && height_fn[temp_mult] == height_fn[temp_mult + 34] + 1 )
{
(temp_weight<flow_push) ? min_flow_pushed = temp_weight : 0 ;
temp_weight = temp_weight - min_flow_pushed ;
atomicSub(&g_down_weight[thid] , min_flow_pushed);
atomicAdd(&g_up_weight[thid+width1], min_flow_pushed);
atomicSub(&g_push_reser[thid] , min_flow_pushed);
atomicAdd(&g_push_reser[thid+width1], min_flow_pushed);
}else atomicSub( &g_pull_down[thid+width1], 1) ;
}
__syncthreads() ;
min_flow_pushed = g_left_weight[thid] ;
flow_push = g_push_reser[thid] ;
if(flow_push <= 0 || (g_left_weight[thid] == 0 && g_right_weight[thid] == 0 && g_down_weight[thid] == 0 && g_up_weight[thid] == 0 && g_sink_weight[thid] == 0))
g_relabel_mask[thid] = 2 ;
else
{
( flow_push > 0 && ( ( (height_fn[temp_mult] == height_fn[temp_mult-1] + 1 ) && g_left_weight[thid] > 0 ) ||( (height_fn[temp_mult] == height_fn[temp_mult+1]+1 ) && g_right_weight[thid] > 0) || ( ( height_fn[temp_mult] == height_fn[temp_mult+34]+1 ) && g_down_weight[thid] > 0) || ( (height_fn[temp_mult] == height_fn[temp_mult-34]+1 ) && g_up_weight[thid] > 0 ) || ( height_fn[temp_mult] == 1 && g_sink_weight[thid] > 0 ) ) ) ? g_relabel_mask[thid] = 1 : g_relabel_mask[thid] = 0 ;
}
__syncthreads() ;
if( thid < graph_size1 && g_relabel_mask[thid] == 1 && x < width-1 && x > 0 && y < rows-1 && y > 0 )
{
int temp_weight = 0;
temp_weight = g_sink_weight[thid] ;
min_flow_pushed = flow_push ;
if(temp_weight > 0 && flow_push > 0 && height_fn[temp_mult] == 1 )
{
(temp_weight < flow_push) ? min_flow_pushed = temp_weight : 0 ;
temp_weight = temp_weight - min_flow_pushed ;
g_sink_weight[thid] = temp_weight ;
atomicSub(&g_push_reser[thid] , min_flow_pushed);
}
flow_push = g_push_reser[thid] ;
min_flow_pushed = flow_push ;
temp_weight = g_left_weight[thid] ;
if(temp_weight > 0 && flow_push > 0 && height_fn[temp_mult] == height_fn[temp_mult - 1] + 1 )
{
(temp_weight < flow_push) ? min_flow_pushed = temp_weight : 0;
temp_weight = temp_weight - min_flow_pushed ;
atomicSub(&g_left_weight[thid] , min_flow_pushed);
atomicAdd(&g_right_weight[thid-1],min_flow_pushed);
atomicSub(&g_push_reser[thid] , min_flow_pushed);
atomicAdd(&g_push_reser[thid-1], min_flow_pushed);
}else atomicSub(&g_pull_left[thid-1], 1) ;
flow_push = g_push_reser[thid] ;
min_flow_pushed = flow_push ;
temp_weight = g_up_weight[thid] ;
if(temp_weight > 0 && flow_push > 0 && height_fn[temp_mult] == height_fn[temp_mult - 34] + 1)
{
(temp_weight<flow_push) ? min_flow_pushed = temp_weight : 0 ;
temp_weight = temp_weight - min_flow_pushed ;
atomicSub(&g_up_weight[thid] , min_flow_pushed);
atomicAdd(&g_down_weight[thid-width1],min_flow_pushed);
atomicSub(&g_push_reser[thid] , min_flow_pushed);
atomicAdd(&g_push_reser[thid-width1], min_flow_pushed);
} else atomicSub(&g_pull_up[thid - width1] , 1) ;
flow_push = g_push_reser[thid] ;
min_flow_pushed = flow_push ;
temp_weight = g_right_weight[thid] ;
if(temp_weight > 0 && flow_push > 0 && height_fn[temp_mult] == height_fn[temp_mult + 1] + 1 )
{
(temp_weight < flow_push) ? min_flow_pushed = temp_weight : 0 ;
temp_weight = temp_weight - min_flow_pushed ;
atomicSub(&g_right_weight[thid] , min_flow_pushed);
atomicAdd(&g_left_weight[thid+1],min_flow_pushed);
atomicSub(&g_push_reser[thid] , min_flow_pushed);
atomicAdd(&g_push_reser[thid+1], min_flow_pushed);
}else atomicSub( &g_pull_right[thid + 1], 1) ;
flow_push = g_push_reser[thid] ;
min_flow_pushed = flow_push ;
temp_weight = g_down_weight[thid] ;
if(temp_weight > 0 && flow_push > 0 && height_fn[temp_mult] == height_fn[temp_mult + 34] + 1 )
{
(temp_weight<flow_push) ? min_flow_pushed = temp_weight : 0 ;
temp_weight = temp_weight - min_flow_pushed ;
atomicSub(&g_down_weight[thid] , min_flow_pushed);
atomicAdd(&g_up_weight[thid+width1], min_flow_pushed);
atomicSub(&g_push_reser[thid] , min_flow_pushed);
atomicAdd(&g_push_reser[thid+width1], min_flow_pushed);
}else atomicSub( &g_pull_down[thid+width1], 1) ;
}
}
}
__global__ void
kernel_bfs_t(int *g_push_reser, int *g_sink_weight, int *g_graph_height, bool *g_pixel_mask,
int vertex_num, int width, int height, int vertex_num1, int width1, int height1)
{
int thid = __umul24(blockIdx.x, blockDim.x) + threadIdx.x ;
if(thid < vertex_num && g_pixel_mask[thid] == true )
{
int col = thid % width1 , row = thid / width1 ;
if(col > 0 && row > 0 && col < width - 1 && row < height - 1 && g_push_reser[thid] > 0 )
{
g_graph_height[thid] = 1 ;
g_pixel_mask[thid] = false ;
}
else
if(g_sink_weight[thid] > 0)
{
g_graph_height[thid] = -1 ;
g_pixel_mask[thid] = false ;
}
}
}
__global__ void
kernel_push_stochastic1( int *g_push_reser, int *s_push_reser, int *g_count_blocks, bool *g_finish, int g_block_num, int width1)
{
int x = __umul24( blockIdx.x, blockDim.x ) + threadIdx.x ;
int y = __umul24( blockIdx.y , blockDim.y ) + threadIdx.y ;
int thid = __umul24( y , width1 ) + x ;
s_push_reser[thid] = g_push_reser[thid] ;
if( thid == 0 )
{
if((*g_count_blocks) < 50 )
(*g_finish) = false ;
}
}
__global__ void
kernel_push_stochastic2( int *g_push_reser, int *s_push_reser, int *d_stochastic, int g_block_num, int width1)
{
int x = __umul24( blockIdx.x, blockDim.x ) + threadIdx.x ;
int y = __umul24( blockIdx.y , blockDim.y ) + threadIdx.y ;
int thid = __umul24( y , width1 ) + x ;
int stochastic = 0 ;
stochastic = ( s_push_reser[thid] - g_push_reser[thid]) ;
if(stochastic != 0)
{
d_stochastic[blockIdx.y * (g_block_num) + blockIdx.x] = 1 ;
}
}
__global__ void
kernel_push1_start_stochastic( int *g_left_weight, int *g_right_weight, int *g_down_weight, int *g_up_weight,
int *g_sink_weight, int *g_push_reser,
int *g_relabel_mask, int *g_graph_height, int *g_height_write,
int graph_size, int width, int rows, int graph_size1, int width1, int rows1, int *d_relabel, int *d_stochastic, int d_counter, bool *d_finish)
{
int x1 = threadIdx.x ;
int y1 = threadIdx.y ;
int x = __umul24( blockIdx.x, blockDim.x ) + threadIdx.x ;
int y = __umul24( blockIdx.y , blockDim.y ) + threadIdx.y ;
int thid = __umul24( y , width1 ) + x ;
__shared__ int height_fn[356];
int temp_mult = __umul24(y1+1 , 34 ) + x1 + 1, temp_mult1 = __umul24(y1,32) + x1 ;
height_fn[temp_mult] = g_graph_height[thid] ;
(threadIdx.x == 31 && x < width1 - 1 ) ? height_fn[temp_mult + 1] = (g_graph_height[thid + 1]) : 0;
(threadIdx.x == 0 && x > 0 ) ? height_fn[temp_mult - 1] = (g_graph_height[thid - 1]) : 0;
(threadIdx.y == 7 && y < rows1 - 1 ) ? height_fn[temp_mult + 34] = (g_graph_height[thid + width1]) : 0;
(threadIdx.y == 0 && y > 0 ) ? height_fn[temp_mult - 34] = (g_graph_height[thid - width1]) : 0;
__syncthreads();
int flow_push = 0, min_flow_pushed = 0 ;
flow_push = g_push_reser[thid] ;
if( thid < graph_size1 && g_relabel_mask[thid] == 1 && x < width-1 && x > 0 && y < rows-1 && y > 0 )
{
int temp_weight = 0;
temp_weight = g_sink_weight[thid] ;
min_flow_pushed = flow_push ;
if(temp_weight > 0 && flow_push > 0 && height_fn[temp_mult] == 1 )
{
(temp_weight < flow_push) ? min_flow_pushed = temp_weight : 0 ;
temp_weight = temp_weight - min_flow_pushed ;
g_sink_weight[thid] = temp_weight ;
atomicSub(&g_push_reser[thid] , min_flow_pushed);
flow_push = flow_push - min_flow_pushed ;
}
}
__syncthreads() ;
min_flow_pushed = g_left_weight[thid] ;
( flow_push > 0 && ( ((height_fn[temp_mult] == height_fn[temp_mult-1] + 1 ) && min_flow_pushed > 0 ) ||( (height_fn[temp_mult] == height_fn[temp_mult+1]+1 ) && g_right_weight[thid] > 0) || ( ( height_fn[temp_mult] == height_fn[temp_mult+34]+1 ) && g_down_weight[thid] > 0) || ( (height_fn[temp_mult] == height_fn[temp_mult-34]+1 ) && g_up_weight[thid] > 0 ) || ( height_fn[temp_mult] == 1 && g_sink_weight[thid] > 0 ) ) ) ? g_relabel_mask[thid] = 1 : g_relabel_mask[thid] = 0 ;
if(thid < graph_size1 && x < width - 1 && x > 0 && y < rows - 1 && y > 0 )
{
if(g_sink_weight[thid] > 0)
{
g_height_write[thid] = 1 ;
}
else
{
int min_height = graph_size ;
(min_flow_pushed > 0 && min_height > height_fn[temp_mult - 1] ) ? min_height = height_fn[temp_mult - 1] : 0 ;
(g_right_weight[thid] > 0 && min_height > height_fn[temp_mult + 1]) ? min_height = height_fn[temp_mult + 1] : 0 ;
(g_down_weight[thid] > 0 && min_height > height_fn[temp_mult + 34] ) ? min_height = height_fn[temp_mult + 34] : 0 ;
(g_up_weight[thid] > 0 && min_height > height_fn[temp_mult - 34] ) ? min_height = height_fn[temp_mult - 34] : 0 ;
g_height_write[thid] = min_height + 1 ;
}
}
}
__global__ void
kernel_bfs(int *g_left_weight, int *g_right_weight, int *g_down_weight, int *g_up_weight,
int *g_graph_height, bool *g_pixel_mask, int vertex_num,int width,int height,
int vertex_num1, int width1, int height1, bool *g_over, int *g_counter)
{
/*******************************
*threadId is calculated ******
*****************************/
int thid = __umul24(blockIdx.x, blockDim.x) + threadIdx.x ;
if(thid < vertex_num && g_pixel_mask[thid] == true)
{
int col = thid % width1 , row = thid / width1 ;
if(col < width - 1 && col > 0 && row < height - 1 && row > 0 )
{
int height_l = 0, height_d = 0, height_u = 0 , height_r = 0 ;
height_r = g_graph_height[thid+1] ;
height_l = g_graph_height[thid-1] ;
height_d = g_graph_height[thid+width1] ;
height_u = g_graph_height[thid-width1] ;
if(((height_l == (*g_counter) && g_right_weight[thid-1] > 0)) ||((height_d == (*g_counter) && g_up_weight[thid+width1] > 0) || ( height_r == (*g_counter) && g_left_weight[thid+1] > 0 ) || ( height_u == (*g_counter) && g_down_weight[thid-width1] > 0 ) ))
{
g_graph_height[thid] = (*g_counter) + 1 ;
g_pixel_mask[thid] = false ;
*g_over = true ;
}
}
}
}
/************************************************************
* functions to construct the graph on the device **
* *********************************************************/
__device__
void add_edge(int from, int to, int cap, int rev_cap, int type, int *d_left_weight,
int *d_right_weight, int *d_down_weight, int *d_up_weight)
{
if(type==1)
{
d_left_weight[from] = d_left_weight[from]+cap;
d_right_weight[to] = d_right_weight[to]+rev_cap;
}
if(type==2)
{
d_right_weight[from] = d_right_weight[from]+cap;
d_left_weight[to] = d_left_weight[to]+rev_cap;
}
if(type==3)
{
d_down_weight[from] = d_down_weight[from]+cap;
d_up_weight[to] = d_up_weight[to]+rev_cap;
}
if(type==4)
{
d_up_weight[from] = d_up_weight[from]+cap;
d_down_weight[to] = d_down_weight[to]+cap;
}
}
__device__
void add_tweights(int i, int cap_source, int cap_sink, int *d_push_reser, int *d_sink_weight)
{
int diff = cap_source - cap_sink ;
if(diff>0)
{
d_push_reser[i] = d_push_reser[i] + diff ;
}
else
{
d_sink_weight[i] = d_sink_weight[i] - diff ;
}
}
__device__
void add_term1(int i, int A, int B, int *d_push_reser, int *d_sink_weight)
{
add_tweights(i,B,A, d_push_reser, d_sink_weight);
}
__device__
void add_t_links_Cue(int alpha_label, int thid, int *d_left_weight, int *d_right_weight,
int *d_down_weight, int *d_up_weight, int *d_push_reser, int *d_sink_weight,
int *dPixelLabel, int *dDataTerm, int width , int height, int num_labels)
{
{
if(dPixelLabel[thid]!=alpha_label) {
add_term1(thid , dDataTerm[thid*num_labels+alpha_label] , dDataTerm[thid * num_labels + dPixelLabel[thid]], d_push_reser, d_sink_weight );
}
}
}
__device__
void add_t_links(int alpha_label, int thid, int *d_left_weight, int *d_right_weight,
int *d_down_weight, int *d_up_weight, int *d_push_reser, int *d_sink_weight,
int *dPixelLabel, int *dDataTerm, int width , int height, int num_labels)
{
{
if(dPixelLabel[thid]!=alpha_label) {
add_term1(thid , dDataTerm[thid*num_labels+alpha_label] , dDataTerm[thid * num_labels + dPixelLabel[thid]], d_push_reser, d_sink_weight );
}
}
}
__device__
void add_term2(int x, int y, int A, int B, int C, int D, int type, int *d_left_weight,
int *d_right_weight, int *d_down_weight, int *d_up_weight, int *d_push_reser, int *d_sink_weight )
{
if ( A+D > C+B) {
int delta = A+D-C-B;
int subtrA = delta/3;
A = A-subtrA;
C = C+subtrA;
B = B+(delta-subtrA*2);
#ifdef COUNT_TRUNCATIONS
truncCnt++;
#endif
}
#ifdef COUNT_TRUNCATIONS
totalCnt++;
#endif
add_tweights(x, D, A, d_push_reser, d_sink_weight);
B -= A; C -= D;
if (B < 0)
{
add_tweights(x, 0, B, d_push_reser, d_sink_weight);
add_tweights(y, 0, -B, d_push_reser, d_sink_weight ) ;
add_edge(x, y, 0, B+C,type , d_left_weight, d_right_weight, d_down_weight, d_up_weight );
}
else if (C < 0)
{
add_tweights(x, 0, -C, d_push_reser, d_sink_weight);
add_tweights(y, 0, C , d_push_reser, d_sink_weight);
add_edge(x, y, B+C, 0,type , d_left_weight, d_right_weight, d_down_weight, d_up_weight);
}
else
{
add_edge(x, y, B, C,type, d_left_weight, d_right_weight , d_down_weight, d_up_weight);
}
}
__device__
void set_up_expansion_energy_G_ARRAY(int alpha_label,int thid, int *d_left_weight,int *d_right_weight,
int *d_down_weight, int *d_up_weight, int *d_push_reser,
int *d_sink_weight, int *dPixelLabel, int *dDataTerm, int *dSmoothTerm,
int width , int height, int num_labels )
{
int x,y,nPix;
int weight;
int i = thid ;
{
if(dPixelLabel[i]!=alpha_label)
{
y = i/width;
x = i - y*width;
if ( x < width - 1 )
{
nPix = i + 1;
weight = 1 ;
if ( dPixelLabel[nPix] != alpha_label )
{
add_term2(i,nPix,
( dSmoothTerm[alpha_label + alpha_label * num_labels]) * weight,
( dSmoothTerm[alpha_label + dPixelLabel[nPix]*num_labels]) * weight,
( dSmoothTerm[ dPixelLabel[i] + alpha_label * num_labels] ) * weight,
( dSmoothTerm[ dPixelLabel[i] + dPixelLabel[nPix] * num_labels] ) * weight,
2, d_left_weight, d_right_weight, d_down_weight, d_up_weight, d_push_reser, d_sink_weight); // 1-left, 2-right, 3-down, 4-up
}
else add_term1(i,
( dSmoothTerm[alpha_label + dPixelLabel[nPix] * num_labels]) * weight,
( dSmoothTerm[dPixelLabel[i] + alpha_label*num_labels]) * weight,
d_push_reser, d_sink_weight);
}
if ( y < height - 1 )
{
nPix = i + width;
weight = 1 ;
if ( dPixelLabel[nPix] != alpha_label )
{
add_term2(i,nPix,
( dSmoothTerm[alpha_label + alpha_label * num_labels]) * weight,
( dSmoothTerm[alpha_label + dPixelLabel[nPix]*num_labels]) * weight,
( dSmoothTerm[ dPixelLabel[i] + alpha_label * num_labels] ) * weight,
( dSmoothTerm[ dPixelLabel[i] + dPixelLabel[nPix] * num_labels] ) * weight,
3, d_left_weight, d_right_weight, d_down_weight, d_up_weight, d_push_reser, d_sink_weight );
}
else add_term1(i,
( dSmoothTerm[alpha_label + dPixelLabel[nPix] * num_labels]) * weight,
( dSmoothTerm[dPixelLabel[i] + alpha_label*num_labels]) * weight,
d_push_reser, d_sink_weight);
}
if ( x > 0 )
{
nPix = i - 1;
weight = 1 ;
if ( dPixelLabel[nPix] == alpha_label )
add_term1(i,
( dSmoothTerm[alpha_label + dPixelLabel[nPix] * num_labels]) * weight,
( dSmoothTerm[dPixelLabel[i] + alpha_label*num_labels]) * weight,
d_push_reser, d_sink_weight );
}
if ( y > 0 )
{
nPix = i - width;
weight = 1 ;
if ( dPixelLabel[nPix] == alpha_label )
{
add_term1(i,
( dSmoothTerm[alpha_label + alpha_label * num_labels]) * weight,
( dSmoothTerm[dPixelLabel[i] + alpha_label*num_labels]) * weight,
d_push_reser, d_sink_weight);
}
}
}
}
}
__device__
void set_up_expansion_energy_G_ARRAY_Cue(int alpha_label,int thid, int *d_left_weight,int *d_right_weight,
int *d_down_weight, int *d_up_weight, int *d_push_reser,
int *d_sink_weight, int *dPixelLabel, int *dDataTerm, int *dSmoothTerm,
int *dHcue, int *dVcue, int width , int height, int num_labels )
{
int x,y,nPix;
int weight;
int i = thid ;
{
if(dPixelLabel[i]!=alpha_label)
{
y = i/width;
x = i - y*width;
if ( x < width - 1 )
{
nPix = i + 1;
weight=dHcue[i];
if ( dPixelLabel[nPix] != alpha_label )
{
add_term2(i,nPix,
( dSmoothTerm[alpha_label + alpha_label * num_labels]) * weight,
( dSmoothTerm[alpha_label + dPixelLabel[nPix]*num_labels]) * weight,
( dSmoothTerm[ dPixelLabel[i] + alpha_label * num_labels] ) * weight,
( dSmoothTerm[ dPixelLabel[i] + dPixelLabel[nPix] * num_labels] ) * weight,
2, d_left_weight, d_right_weight, d_down_weight, d_up_weight, d_push_reser, d_sink_weight); // 1-left, 2-right, 3-down, 4-up
}
else add_term1(i,
( dSmoothTerm[alpha_label + dPixelLabel[nPix] * num_labels]) * weight,
( dSmoothTerm[dPixelLabel[i] + alpha_label*num_labels]) * weight,
d_push_reser, d_sink_weight);
}
if ( y < height - 1 )
{
nPix = i + width;
weight=dVcue[i];
if ( dPixelLabel[nPix] != alpha_label )
{
add_term2(i,nPix,
( dSmoothTerm[alpha_label + alpha_label * num_labels]) * weight,
( dSmoothTerm[alpha_label + dPixelLabel[nPix]*num_labels]) * weight,
( dSmoothTerm[ dPixelLabel[i] + alpha_label * num_labels] ) * weight,
( dSmoothTerm[ dPixelLabel[i] + dPixelLabel[nPix] * num_labels] ) * weight,
3, d_left_weight, d_right_weight, d_down_weight, d_up_weight, d_push_reser, d_sink_weight );
}
else add_term1(i,
( dSmoothTerm[alpha_label + dPixelLabel[nPix] * num_labels]) * weight,
( dSmoothTerm[dPixelLabel[i] + alpha_label*num_labels]) * weight,
d_push_reser, d_sink_weight);
}
if ( x > 0 )
{
nPix = i - 1;
weight=dHcue[nPix];
if ( dPixelLabel[nPix] == alpha_label )
add_term1(i,
( dSmoothTerm[alpha_label + dPixelLabel[nPix] * num_labels]) * weight,
( dSmoothTerm[dPixelLabel[i] + alpha_label*num_labels]) * weight,
d_push_reser, d_sink_weight );
}
if ( y > 0 )
{
nPix = i - width;
weight = dVcue[nPix] ;
if ( dPixelLabel[nPix] == alpha_label )
{
add_term1(i,
( dSmoothTerm[alpha_label + alpha_label * num_labels]) * weight,
( dSmoothTerm[dPixelLabel[i] + alpha_label*num_labels]) * weight,
d_push_reser, d_sink_weight);
}
}
}
}
}
__global__
void CudaWeightCue(int alpha_label, int *d_left_weight, int *d_right_weight, int *d_down_weight,
int *d_up_weight, int *d_push_reser, int *d_sink_weight, int *dPixelLabel,
int *dDataTerm, int *dSmoothTerm, int *dHcue, int *dVcue, int width, int height, int num_labels )
{
int thid = blockIdx.x * 256 + threadIdx.x ;
add_t_links_Cue(alpha_label, thid, d_left_weight, d_right_weight, d_down_weight, d_up_weight, d_push_reser, d_sink_weight, dPixelLabel, dDataTerm, width, height, num_labels);
set_up_expansion_energy_G_ARRAY_Cue(alpha_label, thid, d_left_weight, d_right_weight, d_down_weight, d_up_weight, d_push_reser, d_sink_weight, dPixelLabel, dDataTerm, dSmoothTerm, dHcue, dVcue, width, height, num_labels);
}
__global__
void CudaWeight(int alpha_label, int *d_left_weight, int *d_right_weight, int *d_down_weight,
int *d_up_weight, int *d_push_reser, int *d_sink_weight, int *dPixelLabel,
int *dDataTerm, int *dSmoothTerm, int width, int height, int num_labels)
{
int thid = blockIdx.x * 256 + threadIdx.x ;
add_t_links(alpha_label, thid, d_left_weight, d_right_weight, d_down_weight, d_up_weight, d_push_reser, d_sink_weight, dPixelLabel, dDataTerm, width, height, num_labels);
set_up_expansion_energy_G_ARRAY(alpha_label, thid, d_left_weight, d_right_weight, d_down_weight, d_up_weight, d_push_reser, d_sink_weight, dPixelLabel, dDataTerm, dSmoothTerm, width, height, num_labels);
}
/*********************************************************
* function which adjusts the array size for efficiency **
* consideration **
* ******************************************************/
__global__
void adjustedgeweight(int *d_left_weight, int *d_right_weight, int *d_down_weight, int *d_up_weight,
int *d_push_reser, int *d_sink_weight, int *temp_left_weight, int *temp_right_weight,
int *temp_down_weight, int *temp_up_weight, int *temp_push_reser, int *temp_sink_weight,
int width, int height, int graph_size, int width1, int height1, int graph_size1)
{
int thid = blockIdx.x * 256 + threadIdx.x ;
if( thid < graph_size1 )
{
int row = thid / width1 , col = thid % width1 ;
if(row < height && col < width)
{
temp_left_weight[row* width1 + col] = d_left_weight[row * width + col] ;
temp_right_weight[row * width1 + col] = d_right_weight[row * width + col] ;
temp_down_weight[row * width1 + col] = d_down_weight[row * width + col] ;
temp_up_weight[row * width1 + col] = d_up_weight[row * width + col] ;
temp_push_reser[row * width1 + col] = d_push_reser[row * width + col] ;
temp_sink_weight[row * width1 + col] = d_sink_weight[row * width + col] ;
}
else
{
temp_left_weight[row * width1 + col] = 0 ;
temp_right_weight[row * width1 + col] = 0 ;
temp_down_weight[row * width1 + col] = 0 ;
temp_up_weight[row * width1 + col] = 0 ;
temp_push_reser[row * width1 + col] = 0 ;
temp_sink_weight[row * width1 + col] = 0 ;
}
}
}
/************************************************************
* Intializes memory on the gpu **
* ********************************************************/
__global__
void copyedgeweight( int *d_left_weight, int *d_right_weight, int *d_down_weight, int *d_up_weight,
int *d_push_reser, int *d_sink_weight, int *temp_left_weight, int *temp_right_weight,
int *temp_down_weight, int *temp_up_weight, int *temp_push_reser, int *temp_sink_weight,
int *d_pull_left, int *d_pull_right, int *d_pull_down, int *d_pull_up, int *d_relabel_mask,
int *d_graph_heightr, int *d_graph_heightw, int width, int height, int graph_size, int width1, int height1, int graph_size1)
{
int thid = blockIdx.x * 256 + threadIdx.x ;
if( thid < graph_size1 )
{
d_left_weight[thid] = temp_left_weight[thid] ;
d_right_weight[thid] = temp_right_weight[thid] ;
d_down_weight[thid] = temp_down_weight[thid] ;
d_up_weight[thid] = temp_up_weight[thid] ;
d_push_reser[thid] = temp_push_reser[thid] ;
d_sink_weight[thid] = temp_sink_weight[thid] ;
d_pull_left[thid] = 0 ;
d_pull_right[thid] = 0 ;
d_pull_down[thid] = 0 ;
d_pull_up[thid] = 0 ;
d_relabel_mask[thid] = 0 ;
d_graph_heightr[thid] = 1 ;
d_graph_heightw[thid] = 1 ;
}
}
#endif
| 68ae88b12e3b3b31a7b00350b90d4b2e0bf4c6ce.cu |
/***********************************************************************************************
* * Implementing Graph Cuts on CUDA using algorithm given in CVGPU '08 **
* * paper "CUDA Cuts: Fast Graph Cuts on GPUs" **
* * **
* * Copyright (c) 2008 International Institute of Information Technology. **
* * All rights reserved. **
* * **
* * Permission to use, copy, modify and distribute this software and its documentation for **
* * educational purpose is hereby granted without fee, provided that the above copyright **
* * notice and this permission notice appear in all copies of this software and that you do **
* * not sell the software. **
* * **
* * THE SOFTWARE IS PROVIDED "AS IS" AND WITHOUT WARRANTY OF ANY KIND,EXPRESS, IMPLIED OR **
* * OTHERWISE. **
* * **
* * Created By Vibhav Vineet. **
* ********************************************************************************************/
#ifndef _PUSHRELABEL_KERNEL_CU_
#define _PUSHRELABEL_KERNEL_CU_
#include "CudaCuts.h"
/************************************************
* Push operation is performed **
* *********************************************/
__global__ void
kernel_push1_atomic( int *g_left_weight, int *g_right_weight, int *g_down_weight, int *g_up_weight,
int *g_sink_weight, int *g_push_reser, int *g_pull_left, int *g_pull_right, int *g_pull_down,
int *g_pull_up, int *g_relabel_mask, int *g_graph_height, int *g_height_write,
int graph_size, int width, int rows, int graph_size1, int width1, int rows1 )
{
int x1 = threadIdx.x ;
int y1 = threadIdx.y ;
int x = __umul24( blockIdx.x, blockDim.x ) + threadIdx.x ;
int y = __umul24( blockIdx.y , blockDim.y ) + threadIdx.y ;
int thid = __umul24( y , width1 ) + x ;
__shared__ int height_fn[356];
int temp_mult = __umul24(y1+1 , 34 ) + x1 + 1, temp_mult1 = __umul24(y1,32) + x1 ;
height_fn[temp_mult] = g_graph_height[thid] ;
(threadIdx.x == 31 && x < width1 - 1 ) ? height_fn[temp_mult + 1] = (g_graph_height[thid + 1]) : 0;
(threadIdx.x == 0 && x > 0 ) ? height_fn[temp_mult - 1] = (g_graph_height[thid - 1]) : 0;
(threadIdx.y == 7 && y < rows1 - 1 ) ? height_fn[temp_mult + 34] = (g_graph_height[thid + width1]) : 0;
(threadIdx.y == 0 && y > 0 ) ? height_fn[temp_mult - 34] = (g_graph_height[thid - width1]) : 0;
__syncthreads();
int flow_push = 0, min_flow_pushed = 0 ;
flow_push = g_push_reser[thid] ;
if( thid < graph_size1 && g_relabel_mask[thid] == 1 && x < width-1 && x > 0 && y < rows-1 && y > 0 )
{
int temp_weight = 0;
temp_weight = g_sink_weight[thid] ;
min_flow_pushed = flow_push ;
if(temp_weight > 0 && flow_push > 0 && height_fn[temp_mult] == 1 )
{
(temp_weight < flow_push) ? min_flow_pushed = temp_weight : 0 ;
temp_weight = temp_weight - min_flow_pushed ;
g_sink_weight[thid] = temp_weight ;
atomicSub(&g_push_reser[thid] , min_flow_pushed);
}
flow_push = g_push_reser[thid] ;
min_flow_pushed = flow_push ;
temp_weight = g_left_weight[thid] ;
if(temp_weight > 0 && flow_push > 0 && height_fn[temp_mult] == height_fn[temp_mult - 1] + 1 )
{
(temp_weight < flow_push) ? min_flow_pushed = temp_weight : 0;
temp_weight = temp_weight - min_flow_pushed ;
atomicSub(&g_left_weight[thid] , min_flow_pushed);
atomicAdd(&g_right_weight[thid-1],min_flow_pushed);
atomicSub(&g_push_reser[thid] , min_flow_pushed);
atomicAdd(&g_push_reser[thid-1], min_flow_pushed);
}else atomicSub(&g_pull_left[thid-1], 1) ;
flow_push = g_push_reser[thid] ;
min_flow_pushed = flow_push ;
temp_weight = g_up_weight[thid] ;
if(temp_weight > 0 && flow_push > 0 && height_fn[temp_mult] == height_fn[temp_mult - 34] + 1)
{
(temp_weight<flow_push) ? min_flow_pushed = temp_weight : 0 ;
temp_weight = temp_weight - min_flow_pushed ;
atomicSub(&g_up_weight[thid] , min_flow_pushed);
atomicAdd(&g_down_weight[thid-width1],min_flow_pushed);
atomicSub(&g_push_reser[thid] , min_flow_pushed);
atomicAdd(&g_push_reser[thid-width1], min_flow_pushed);
} else atomicSub(&g_pull_up[thid - width1] , 1) ;
flow_push = g_push_reser[thid] ;
min_flow_pushed = flow_push ;
temp_weight = g_right_weight[thid] ;
if(temp_weight > 0 && flow_push > 0 && height_fn[temp_mult] == height_fn[temp_mult + 1] + 1 )
{
(temp_weight < flow_push) ? min_flow_pushed = temp_weight : 0 ;
temp_weight = temp_weight - min_flow_pushed ;
atomicSub(&g_right_weight[thid] , min_flow_pushed);
atomicAdd(&g_left_weight[thid+1],min_flow_pushed);
atomicSub(&g_push_reser[thid] , min_flow_pushed);
atomicAdd(&g_push_reser[thid+1], min_flow_pushed);
}else atomicSub( &g_pull_right[thid + 1], 1) ;
flow_push = g_push_reser[thid] ;
min_flow_pushed = flow_push ;
temp_weight = g_down_weight[thid] ;
if(temp_weight > 0 && flow_push > 0 && height_fn[temp_mult] == height_fn[temp_mult + 34] + 1 )
{
(temp_weight<flow_push) ? min_flow_pushed = temp_weight : 0 ;
temp_weight = temp_weight - min_flow_pushed ;
atomicSub(&g_down_weight[thid] , min_flow_pushed);
atomicAdd(&g_up_weight[thid+width1], min_flow_pushed);
atomicSub(&g_push_reser[thid] , min_flow_pushed);
atomicAdd(&g_push_reser[thid+width1], min_flow_pushed);
}else atomicSub( &g_pull_down[thid+width1], 1) ;
}
}
__global__ void
kernel_relabel_atomic( int *g_left_weight, int *g_right_weight, int *g_down_weight, int *g_up_weight,
int *g_sink_weight, int *g_push_reser, int *g_pull_left, int *g_pull_right, int *g_pull_down,
int *g_pull_up, int *g_relabel_mask, int *g_graph_height, int *g_height_write,
int graph_size, int width, int rows, int graph_size1, int width1, int rows1 )
{
int x1 = threadIdx.x ;
int y1 = threadIdx.y ;
int x = __umul24( blockIdx.x, blockDim.x ) + threadIdx.x ;
int y = __umul24( blockIdx.y , blockDim.y ) + threadIdx.y ;
int thid = __umul24( y , width1 ) + x ;
__shared__ int height_fn[356];
int temp_mult = __umul24(y1+1 , 34 ) + x1 + 1, temp_mult1 = __umul24(y1,32) + x1 ;
height_fn[temp_mult] = g_graph_height[thid] ;
(threadIdx.x == 31 && x < width1 - 1 ) ? height_fn[temp_mult + 1] = (g_graph_height[thid + 1]) : 0;
(threadIdx.x == 0 && x > 0 ) ? height_fn[temp_mult - 1] = (g_graph_height[thid - 1]) : 0;
(threadIdx.y == 7 && y < rows1 - 1 ) ? height_fn[temp_mult + 34] = (g_graph_height[thid + width1]) : 0;
(threadIdx.y == 0 && y > 0 ) ? height_fn[temp_mult - 34] = (g_graph_height[thid - width1]) : 0;
__syncthreads();
int min_flow_pushed = g_left_weight[thid] ;
int flow_push = g_push_reser[thid] ;
if(flow_push <= 0 || (g_left_weight[thid] == 0 && g_right_weight[thid] == 0 && g_down_weight[thid] == 0 && g_up_weight[thid] == 0 && g_sink_weight[thid] == 0))
g_relabel_mask[thid] = 2 ;
else
{
( flow_push > 0 && ( ( (height_fn[temp_mult] == height_fn[temp_mult-1] + 1 ) && g_left_weight[thid] > 0 ) ||( (height_fn[temp_mult] == height_fn[temp_mult+1]+1 ) && g_right_weight[thid] > 0) || ( ( height_fn[temp_mult] == height_fn[temp_mult+34]+1 ) && g_down_weight[thid] > 0) || ( (height_fn[temp_mult] == height_fn[temp_mult-34]+1 ) && g_up_weight[thid] > 0 ) || ( height_fn[temp_mult] == 1 && g_sink_weight[thid] > 0 ) ) ) ? g_relabel_mask[thid] = 1 : g_relabel_mask[thid] = 0 ;
}
__syncthreads();
if(thid < graph_size1 && x < width - 1 && x > 0 && y < rows - 1 && y > 0 )
{
if(g_sink_weight[thid] > 0)
{
g_height_write[thid] = 1 ;
}
else
{
int min_height = graph_size ;
(min_flow_pushed > 0 && min_height > height_fn[temp_mult - 1] ) ? min_height = height_fn[temp_mult - 1] : 0 ;
(g_right_weight[thid] > 0 && min_height > height_fn[temp_mult + 1]) ? min_height = height_fn[temp_mult + 1] : 0 ;
(g_down_weight[thid] > 0 && min_height > height_fn[temp_mult + 34] ) ? min_height = height_fn[temp_mult + 34] : 0 ;
(g_up_weight[thid] > 0 && min_height > height_fn[temp_mult - 34] ) ? min_height = height_fn[temp_mult - 34] : 0 ;
g_height_write[thid] = min_height + 1 ;
}
}
}
__global__ void
kernel_relabel_stochastic( int *g_left_weight, int *g_right_weight, int *g_down_weight, int *g_up_weight,
int *g_sink_weight, int *g_push_reser, int *g_pull_left, int *g_pull_right, int *g_pull_down, int *g_pull_up,
int *g_relabel_mask, int *g_graph_height, int *g_height_write,
int graph_size, int width, int rows, int graph_size1, int width1, int rows1, int *d_stochastic, int *g_block_num )
{
if(d_stochastic[blockIdx.y * (*g_block_num) + blockIdx.x] == 1 )
{
int x1 = threadIdx.x ;
int y1 = threadIdx.y ;
int x = __umul24( blockIdx.x, blockDim.x ) + threadIdx.x ;
int y = __umul24( blockIdx.y , blockDim.y ) + threadIdx.y ;
int thid = __umul24( y , width1 ) + x ;
__shared__ int height_fn[356];
int temp_mult = __umul24(y1+1 , 34 ) + x1 + 1, temp_mult1 = __umul24(y1,32) + x1 ;
height_fn[temp_mult] = g_graph_height[thid] ;
(threadIdx.x == 31 && x < width1 - 1 ) ? height_fn[temp_mult + 1] = (g_graph_height[thid + 1]) : 0;
(threadIdx.x == 0 && x > 0 ) ? height_fn[temp_mult - 1] = (g_graph_height[thid - 1]) : 0;
(threadIdx.y == 7 && y < rows1 - 1 ) ? height_fn[temp_mult + 34] = (g_graph_height[thid + width1]) : 0;
(threadIdx.y == 0 && y > 0 ) ? height_fn[temp_mult - 34] = (g_graph_height[thid - width1]) : 0;
__syncthreads();
int min_flow_pushed = g_left_weight[thid] ;
int flow_push = g_push_reser[thid] ;
if(flow_push <= 0 || (g_left_weight[thid] == 0 && g_right_weight[thid] == 0 && g_down_weight[thid] == 0 && g_up_weight[thid] == 0 && g_sink_weight[thid] == 0))
g_relabel_mask[thid] = 2 ;
else
{
( flow_push > 0 && ( ( (height_fn[temp_mult] == height_fn[temp_mult-1] + 1 ) && g_left_weight[thid] > 0 ) ||( (height_fn[temp_mult] == height_fn[temp_mult+1]+1 ) && g_right_weight[thid] > 0) || ( ( height_fn[temp_mult] == height_fn[temp_mult+34]+1 ) && g_down_weight[thid] > 0) || ( (height_fn[temp_mult] == height_fn[temp_mult-34]+1 ) && g_up_weight[thid] > 0 ) || ( height_fn[temp_mult] == 1 && g_sink_weight[thid] > 0 ) ) ) ? g_relabel_mask[thid] = 1 : g_relabel_mask[thid] = 0 ;
}
__syncthreads();
if(thid < graph_size1 && x < width - 1 && x > 0 && y < rows - 1 && y > 0 )
{
if(g_sink_weight[thid] > 0)
{
g_height_write[thid] = 1 ;
}
else
{
int min_height = graph_size ;
(min_flow_pushed > 0 && min_height > height_fn[temp_mult - 1] ) ? min_height = height_fn[temp_mult - 1] : 0 ;
(g_right_weight[thid] > 0 && min_height > height_fn[temp_mult + 1]) ? min_height = height_fn[temp_mult + 1] : 0 ;
(g_down_weight[thid] > 0 && min_height > height_fn[temp_mult + 34] ) ? min_height = height_fn[temp_mult + 34] : 0 ;
(g_up_weight[thid] > 0 && min_height > height_fn[temp_mult - 34] ) ? min_height = height_fn[temp_mult - 34] : 0 ;
g_height_write[thid] = min_height + 1 ;
}
}
}
}
__global__ void
kernel_push2_atomic( int *g_left_weight, int *g_right_weight, int *g_down_weight, int *g_up_weight,
int *g_sink_weight, int *g_push_reser, int *g_pull_left, int *g_pull_right, int *g_pull_down, int *g_pull_up,
int *g_relabel_mask, int *g_graph_height, int *g_height_write,
int graph_size, int width, int rows, int graph_size1, int width1, int rows1)
{
int x1 = threadIdx.x ;
int y1 = threadIdx.y ;
int x = __umul24( blockIdx.x, blockDim.x ) + threadIdx.x ;
int y = __umul24( blockIdx.y , blockDim.y ) + threadIdx.y ;
int thid = __umul24( y , width1 ) + x ;
__shared__ int height_fn[356];
int temp_mult = __umul24(y1+1 , 34 ) + x1 + 1, temp_mult1 = __umul24(y1,32) + x1 ;
height_fn[temp_mult] = g_graph_height[thid] ;
(threadIdx.x == 31 && x < width1 - 1 ) ? height_fn[temp_mult + 1] = (g_graph_height[thid + 1]) : 0;
(threadIdx.x == 0 && x > 0 ) ? height_fn[temp_mult - 1] = (g_graph_height[thid - 1]) : 0;
(threadIdx.y == 7 && y < rows1 - 1 ) ? height_fn[temp_mult + 34] = (g_graph_height[thid + width1]) : 0;
(threadIdx.y == 0 && y > 0 ) ? height_fn[temp_mult - 34] = (g_graph_height[thid - width1]) : 0;
__syncthreads();
int flow_push = 0, min_flow_pushed = 0 ;
flow_push = g_push_reser[thid] ;
if( thid < graph_size1 && g_relabel_mask[thid] == 1 && x < width-1 && x > 0 && y < rows-1 && y > 0 )
{
int temp_weight = 0;
temp_weight = g_sink_weight[thid] ;
min_flow_pushed = flow_push ;
if(temp_weight > 0 && flow_push > 0 && height_fn[temp_mult] == 1 )
{
(temp_weight < flow_push) ? min_flow_pushed = temp_weight : 0 ;
temp_weight = temp_weight - min_flow_pushed ;
g_sink_weight[thid] = temp_weight ;
atomicSub(&g_push_reser[thid] , min_flow_pushed);
}
flow_push = g_push_reser[thid] ;
min_flow_pushed = flow_push ;
temp_weight = g_left_weight[thid] ;
if(temp_weight > 0 && flow_push > 0 && height_fn[temp_mult] == height_fn[temp_mult - 1] + 1 )
{
(temp_weight < flow_push) ? min_flow_pushed = temp_weight : 0;
temp_weight = temp_weight - min_flow_pushed ;
atomicSub(&g_left_weight[thid] , min_flow_pushed);
atomicAdd(&g_right_weight[thid-1],min_flow_pushed);
atomicSub(&g_push_reser[thid] , min_flow_pushed);
atomicAdd(&g_push_reser[thid-1], min_flow_pushed);
}else atomicSub(&g_pull_left[thid-1], 1) ;
flow_push = g_push_reser[thid] ;
min_flow_pushed = flow_push ;
temp_weight = g_up_weight[thid] ;
if(temp_weight > 0 && flow_push > 0 && height_fn[temp_mult] == height_fn[temp_mult - 34] + 1)
{
(temp_weight<flow_push) ? min_flow_pushed = temp_weight : 0 ;
temp_weight = temp_weight - min_flow_pushed ;
atomicSub(&g_up_weight[thid] , min_flow_pushed);
atomicAdd(&g_down_weight[thid-width1],min_flow_pushed);
atomicSub(&g_push_reser[thid] , min_flow_pushed);
atomicAdd(&g_push_reser[thid-width1], min_flow_pushed);
} else atomicSub(&g_pull_up[thid - width1] , 1) ;
flow_push = g_push_reser[thid] ;
min_flow_pushed = flow_push ;
temp_weight = g_right_weight[thid] ;
if(temp_weight > 0 && flow_push > 0 && height_fn[temp_mult] == height_fn[temp_mult + 1] + 1 )
{
(temp_weight < flow_push) ? min_flow_pushed = temp_weight : 0 ;
temp_weight = temp_weight - min_flow_pushed ;
atomicSub(&g_right_weight[thid] , min_flow_pushed);
atomicAdd(&g_left_weight[thid+1],min_flow_pushed);
atomicSub(&g_push_reser[thid] , min_flow_pushed);
atomicAdd(&g_push_reser[thid+1], min_flow_pushed);
}else atomicSub( &g_pull_right[thid + 1], 1) ;
flow_push = g_push_reser[thid] ;
min_flow_pushed = flow_push ;
temp_weight = g_down_weight[thid] ;
if(temp_weight > 0 && flow_push > 0 && height_fn[temp_mult] == height_fn[temp_mult + 34] + 1 )
{
(temp_weight<flow_push) ? min_flow_pushed = temp_weight : 0 ;
temp_weight = temp_weight - min_flow_pushed ;
atomicSub(&g_down_weight[thid] , min_flow_pushed);
atomicAdd(&g_up_weight[thid+width1], min_flow_pushed);
atomicSub(&g_push_reser[thid] , min_flow_pushed);
atomicAdd(&g_push_reser[thid+width1], min_flow_pushed);
}else atomicSub( &g_pull_down[thid+width1], 1) ;
}
__syncthreads() ;
min_flow_pushed = g_left_weight[thid] ;
flow_push = g_push_reser[thid] ;
if(flow_push <= 0 || (g_left_weight[thid] == 0 && g_right_weight[thid] == 0 && g_down_weight[thid] == 0 && g_up_weight[thid] == 0 && g_sink_weight[thid] == 0))
g_relabel_mask[thid] = 2 ;
else
{
( flow_push > 0 && ( ( (height_fn[temp_mult] == height_fn[temp_mult-1] + 1 ) && g_left_weight[thid] > 0 ) ||( (height_fn[temp_mult] == height_fn[temp_mult+1]+1 ) && g_right_weight[thid] > 0) || ( ( height_fn[temp_mult] == height_fn[temp_mult+34]+1 ) && g_down_weight[thid] > 0) || ( (height_fn[temp_mult] == height_fn[temp_mult-34]+1 ) && g_up_weight[thid] > 0 ) || ( height_fn[temp_mult] == 1 && g_sink_weight[thid] > 0 ) ) ) ? g_relabel_mask[thid] = 1 : g_relabel_mask[thid] = 0 ;
}
__syncthreads() ;
if( thid < graph_size1 && g_relabel_mask[thid] == 1 && x < width-1 && x > 0 && y < rows-1 && y > 0 )
{
int temp_weight = 0;
temp_weight = g_sink_weight[thid] ;
min_flow_pushed = flow_push ;
if(temp_weight > 0 && flow_push > 0 && height_fn[temp_mult] == 1 )
{
(temp_weight < flow_push) ? min_flow_pushed = temp_weight : 0 ;
temp_weight = temp_weight - min_flow_pushed ;
g_sink_weight[thid] = temp_weight ;
atomicSub(&g_push_reser[thid] , min_flow_pushed);
}
flow_push = g_push_reser[thid] ;
min_flow_pushed = flow_push ;
temp_weight = g_left_weight[thid] ;
if(temp_weight > 0 && flow_push > 0 && height_fn[temp_mult] == height_fn[temp_mult - 1] + 1 )
{
(temp_weight < flow_push) ? min_flow_pushed = temp_weight : 0;
temp_weight = temp_weight - min_flow_pushed ;
atomicSub(&g_left_weight[thid] , min_flow_pushed);
atomicAdd(&g_right_weight[thid-1],min_flow_pushed);
atomicSub(&g_push_reser[thid] , min_flow_pushed);
atomicAdd(&g_push_reser[thid-1], min_flow_pushed);
}else atomicSub(&g_pull_left[thid-1], 1) ;
flow_push = g_push_reser[thid] ;
min_flow_pushed = flow_push ;
temp_weight = g_up_weight[thid] ;
if(temp_weight > 0 && flow_push > 0 && height_fn[temp_mult] == height_fn[temp_mult - 34] + 1)
{
(temp_weight<flow_push) ? min_flow_pushed = temp_weight : 0 ;
temp_weight = temp_weight - min_flow_pushed ;
atomicSub(&g_up_weight[thid] , min_flow_pushed);
atomicAdd(&g_down_weight[thid-width1],min_flow_pushed);
atomicSub(&g_push_reser[thid] , min_flow_pushed);
atomicAdd(&g_push_reser[thid-width1], min_flow_pushed);
} else atomicSub(&g_pull_up[thid - width1] , 1) ;
flow_push = g_push_reser[thid] ;
min_flow_pushed = flow_push ;
temp_weight = g_right_weight[thid] ;
if(temp_weight > 0 && flow_push > 0 && height_fn[temp_mult] == height_fn[temp_mult + 1] + 1 )
{
(temp_weight < flow_push) ? min_flow_pushed = temp_weight : 0 ;
temp_weight = temp_weight - min_flow_pushed ;
atomicSub(&g_right_weight[thid] , min_flow_pushed);
atomicAdd(&g_left_weight[thid+1],min_flow_pushed);
atomicSub(&g_push_reser[thid] , min_flow_pushed);
atomicAdd(&g_push_reser[thid+1], min_flow_pushed);
}else atomicSub( &g_pull_right[thid + 1], 1) ;
flow_push = g_push_reser[thid] ;
min_flow_pushed = flow_push ;
temp_weight = g_down_weight[thid] ;
if(temp_weight > 0 && flow_push > 0 && height_fn[temp_mult] == height_fn[temp_mult + 34] + 1 )
{
(temp_weight<flow_push) ? min_flow_pushed = temp_weight : 0 ;
temp_weight = temp_weight - min_flow_pushed ;
atomicSub(&g_down_weight[thid] , min_flow_pushed);
atomicAdd(&g_up_weight[thid+width1], min_flow_pushed);
atomicSub(&g_push_reser[thid] , min_flow_pushed);
atomicAdd(&g_push_reser[thid+width1], min_flow_pushed);
}else atomicSub( &g_pull_down[thid+width1], 1) ;
}
}
__global__ void
kernel_End( int *g_stochastic, int *g_count_blocks, int g_counter)
{
int thid = blockIdx.x * blockDim.x + threadIdx.x ;
if( thid < (g_counter ) )
{
if( g_stochastic[thid] == 1 )
atomicAdd(g_count_blocks,1);
//(*g_count_blocks) = (*g_count_blocks) + 1 ;
}
}
__global__ void
kernel_push1_start_atomic( int *g_left_weight, int *g_right_weight, int *g_down_weight, int *g_up_weight,
int *g_sink_weight, int *g_push_reser,
int *g_relabel_mask, int *g_graph_height, int *g_height_write,
int graph_size, int width, int rows, int graph_size1, int width1, int rows1, int *d_relabel, int *d_stochastic, int d_counter, bool *d_finish)
{
int x1 = threadIdx.x ;
int y1 = threadIdx.y ;
int x = __umul24( blockIdx.x, blockDim.x ) + threadIdx.x ;
int y = __umul24( blockIdx.y , blockDim.y ) + threadIdx.y ;
int thid = __umul24( y , width1 ) + x ;
__shared__ int height_fn[356];
int temp_mult = __umul24(y1+1 , 34 ) + x1 + 1, temp_mult1 = __umul24(y1,32) + x1 ;
height_fn[temp_mult] = g_graph_height[thid] ;
(threadIdx.x == 31 && x < width1 - 1 ) ? height_fn[temp_mult + 1] = (g_graph_height[thid + 1]) : 0;
(threadIdx.x == 0 && x > 0 ) ? height_fn[temp_mult - 1] = (g_graph_height[thid - 1]) : 0;
(threadIdx.y == 7 && y < rows1 - 1 ) ? height_fn[temp_mult + 34] = (g_graph_height[thid + width1]) : 0;
(threadIdx.y == 0 && y > 0 ) ? height_fn[temp_mult - 34] = (g_graph_height[thid - width1]) : 0;
__syncthreads();
int flow_push = 0, min_flow_pushed = 0 ;
flow_push = g_push_reser[thid] ;
if( thid < graph_size1 && g_relabel_mask[thid] == 1 && x < width-1 && x > 0 && y < rows-1 && y > 0 )
{
int temp_weight = 0;
temp_weight = g_sink_weight[thid] ;
min_flow_pushed = flow_push ;
if(temp_weight > 0 && flow_push > 0 && height_fn[temp_mult] == 1 )
{
(temp_weight < flow_push) ? min_flow_pushed = temp_weight : 0 ;
temp_weight = temp_weight - min_flow_pushed ;
g_sink_weight[thid] = temp_weight ;
atomicSub(&g_push_reser[thid] , min_flow_pushed);
flow_push = flow_push - min_flow_pushed ;
}
}
__syncthreads() ;
min_flow_pushed = g_left_weight[thid] ;
( flow_push > 0 && ( ((height_fn[temp_mult] == height_fn[temp_mult-1] + 1 ) && min_flow_pushed > 0 ) ||( (height_fn[temp_mult] == height_fn[temp_mult+1]+1 ) && g_right_weight[thid] > 0) || ( ( height_fn[temp_mult] == height_fn[temp_mult+34]+1 ) && g_down_weight[thid] > 0) || ( (height_fn[temp_mult] == height_fn[temp_mult-34]+1 ) && g_up_weight[thid] > 0 ) || ( height_fn[temp_mult] == 1 && g_sink_weight[thid] > 0 ) ) ) ? g_relabel_mask[thid] = 1 : g_relabel_mask[thid] = 0 ;
if(thid < graph_size1 && x < width - 1 && x > 0 && y < rows - 1 && y > 0 )
{
if(g_sink_weight[thid] > 0)
{
g_height_write[thid] = 1 ;
}
else
{
int min_height = graph_size ;
(min_flow_pushed > 0 && min_height > height_fn[temp_mult - 1] ) ? min_height = height_fn[temp_mult - 1] : 0 ;
(g_right_weight[thid] > 0 && min_height > height_fn[temp_mult + 1]) ? min_height = height_fn[temp_mult + 1] : 0 ;
(g_down_weight[thid] > 0 && min_height > height_fn[temp_mult + 34] ) ? min_height = height_fn[temp_mult + 34] : 0 ;
(g_up_weight[thid] > 0 && min_height > height_fn[temp_mult - 34] ) ? min_height = height_fn[temp_mult - 34] : 0 ;
g_height_write[thid] = min_height + 1 ;
}
}
}
__global__ void
kernel_push1_stochastic( int *g_left_weight, int *g_right_weight, int *g_down_weight, int *g_up_weight,
int *g_sink_weight, int *g_push_reser, int *g_pull_left, int *g_pull_right, int *g_pull_down, int *g_pull_up,
int *g_relabel_mask, int *g_graph_height, int *g_height_write,
int graph_size, int width, int rows, int graph_size1, int width1, int rows1, int *d_stochastic,int *g_block_num )
{
if(d_stochastic[blockIdx.y * (*g_block_num) + blockIdx.x] == 1 )
{
int x1 = threadIdx.x ;
int y1 = threadIdx.y ;
int x = __umul24( blockIdx.x, blockDim.x ) + threadIdx.x ;
int y = __umul24( blockIdx.y , blockDim.y ) + threadIdx.y ;
int thid = __umul24( y , width1 ) + x ;
__shared__ int height_fn[356];
int temp_mult = __umul24(y1+1 , 34 ) + x1 + 1, temp_mult1 = __umul24(y1,32) + x1 ;
height_fn[temp_mult] = g_graph_height[thid] ;
(threadIdx.x == 31 && x < width1 - 1 ) ? height_fn[temp_mult + 1] = (g_graph_height[thid + 1]) : 0;
(threadIdx.x == 0 && x > 0 ) ? height_fn[temp_mult - 1] = (g_graph_height[thid - 1]) : 0;
(threadIdx.y == 7 && y < rows1 - 1 ) ? height_fn[temp_mult + 34] = (g_graph_height[thid + width1]) : 0;
(threadIdx.y == 0 && y > 0 ) ? height_fn[temp_mult - 34] = (g_graph_height[thid - width1]) : 0;
__syncthreads();
int flow_push = 0, min_flow_pushed = 0 ;
flow_push = g_push_reser[thid] ;
if( thid < graph_size1 && g_relabel_mask[thid] == 1 && x < width-1 && x > 0 && y < rows-1 && y > 0 )
{
int temp_weight = 0;
temp_weight = g_sink_weight[thid] ;
min_flow_pushed = flow_push ;
if(temp_weight > 0 && flow_push > 0 && height_fn[temp_mult] == 1 )
{
(temp_weight < flow_push) ? min_flow_pushed = temp_weight : 0 ;
temp_weight = temp_weight - min_flow_pushed ;
g_sink_weight[thid] = temp_weight ;
atomicSub(&g_push_reser[thid] , min_flow_pushed);
}
flow_push = g_push_reser[thid] ;
min_flow_pushed = flow_push ;
temp_weight = g_left_weight[thid] ;
if(temp_weight > 0 && flow_push > 0 && height_fn[temp_mult] == height_fn[temp_mult - 1] + 1 )
{
(temp_weight < flow_push) ? min_flow_pushed = temp_weight : 0;
temp_weight = temp_weight - min_flow_pushed ;
atomicSub(&g_left_weight[thid] , min_flow_pushed);
atomicAdd(&g_right_weight[thid-1],min_flow_pushed);
atomicSub(&g_push_reser[thid] , min_flow_pushed);
atomicAdd(&g_push_reser[thid-1], min_flow_pushed);
}else atomicSub(&g_pull_left[thid-1], 1) ;
flow_push = g_push_reser[thid] ;
min_flow_pushed = flow_push ;
temp_weight = g_up_weight[thid] ;
if(temp_weight > 0 && flow_push > 0 && height_fn[temp_mult] == height_fn[temp_mult - 34] + 1)
{
(temp_weight<flow_push) ? min_flow_pushed = temp_weight : 0 ;
temp_weight = temp_weight - min_flow_pushed ;
atomicSub(&g_up_weight[thid] , min_flow_pushed);
atomicAdd(&g_down_weight[thid-width1],min_flow_pushed);
atomicSub(&g_push_reser[thid] , min_flow_pushed);
atomicAdd(&g_push_reser[thid-width1], min_flow_pushed);
} else atomicSub(&g_pull_up[thid - width1] , 1) ;
flow_push = g_push_reser[thid] ;
min_flow_pushed = flow_push ;
temp_weight = g_right_weight[thid] ;
if(temp_weight > 0 && flow_push > 0 && height_fn[temp_mult] == height_fn[temp_mult + 1] + 1 )
{
(temp_weight < flow_push) ? min_flow_pushed = temp_weight : 0 ;
temp_weight = temp_weight - min_flow_pushed ;
atomicSub(&g_right_weight[thid] , min_flow_pushed);
atomicAdd(&g_left_weight[thid+1],min_flow_pushed);
atomicSub(&g_push_reser[thid] , min_flow_pushed);
atomicAdd(&g_push_reser[thid+1], min_flow_pushed);
}else atomicSub( &g_pull_right[thid + 1], 1) ;
flow_push = g_push_reser[thid] ;
min_flow_pushed = flow_push ;
temp_weight = g_down_weight[thid] ;
if(temp_weight > 0 && flow_push > 0 && height_fn[temp_mult] == height_fn[temp_mult + 34] + 1 )
{
(temp_weight<flow_push) ? min_flow_pushed = temp_weight : 0 ;
temp_weight = temp_weight - min_flow_pushed ;
atomicSub(&g_down_weight[thid] , min_flow_pushed);
atomicAdd(&g_up_weight[thid+width1], min_flow_pushed);
atomicSub(&g_push_reser[thid] , min_flow_pushed);
atomicAdd(&g_push_reser[thid+width1], min_flow_pushed);
}else atomicSub( &g_pull_down[thid+width1], 1) ;
}
}
}
__global__ void
kernel_push2_stochastic( int *g_left_weight, int *g_right_weight, int *g_down_weight, int *g_up_weight,
int *g_sink_weight, int *g_push_reser, int *g_pull_left, int *g_pull_right, int *g_pull_down, int *g_pull_up,
int *g_relabel_mask, int *g_graph_height, int *g_height_write,
int graph_size, int width, int rows, int graph_size1, int width1, int rows1, int *d_relabel, int *d_stochastic, int *d_counter, bool *d_finish)
{
if(d_stochastic[blockIdx.y * 20 + blockIdx.x] == 1 )
{
int x1 = threadIdx.x ;
int y1 = threadIdx.y ;
int x = __umul24( blockIdx.x, blockDim.x ) + threadIdx.x ;
int y = __umul24( blockIdx.y , blockDim.y ) + threadIdx.y ;
int thid = __umul24( y , width1 ) + x ;
__shared__ int height_fn[356];
int temp_mult = __umul24(y1+1 , 34 ) + x1 + 1, temp_mult1 = __umul24(y1,32) + x1 ;
height_fn[temp_mult] = g_graph_height[thid] ;
(threadIdx.x == 31 && x < width1 - 1 ) ? height_fn[temp_mult + 1] = (g_graph_height[thid + 1]) : 0;
(threadIdx.x == 0 && x > 0 ) ? height_fn[temp_mult - 1] = (g_graph_height[thid - 1]) : 0;
(threadIdx.y == 7 && y < rows1 - 1 ) ? height_fn[temp_mult + 34] = (g_graph_height[thid + width1]) : 0;
(threadIdx.y == 0 && y > 0 ) ? height_fn[temp_mult - 34] = (g_graph_height[thid - width1]) : 0;
__syncthreads();
int flow_push = 0, min_flow_pushed = 0 ;
flow_push = g_push_reser[thid] ;
if( thid < graph_size1 && g_relabel_mask[thid] == 1 && x < width-1 && x > 0 && y < rows-1 && y > 0 )
{
int temp_weight = 0;
temp_weight = g_sink_weight[thid] ;
min_flow_pushed = flow_push ;
if(temp_weight > 0 && flow_push > 0 && height_fn[temp_mult] == 1 )
{
(temp_weight < flow_push) ? min_flow_pushed = temp_weight : 0 ;
temp_weight = temp_weight - min_flow_pushed ;
g_sink_weight[thid] = temp_weight ;
atomicSub(&g_push_reser[thid] , min_flow_pushed);
}
flow_push = g_push_reser[thid] ;
min_flow_pushed = flow_push ;
temp_weight = g_left_weight[thid] ;
if(temp_weight > 0 && flow_push > 0 && height_fn[temp_mult] == height_fn[temp_mult - 1] + 1 )
{
(temp_weight < flow_push) ? min_flow_pushed = temp_weight : 0;
temp_weight = temp_weight - min_flow_pushed ;
atomicSub(&g_left_weight[thid] , min_flow_pushed);
atomicAdd(&g_right_weight[thid-1],min_flow_pushed);
atomicSub(&g_push_reser[thid] , min_flow_pushed);
atomicAdd(&g_push_reser[thid-1], min_flow_pushed);
}else atomicSub(&g_pull_left[thid-1], 1) ;
flow_push = g_push_reser[thid] ;
min_flow_pushed = flow_push ;
temp_weight = g_up_weight[thid] ;
if(temp_weight > 0 && flow_push > 0 && height_fn[temp_mult] == height_fn[temp_mult - 34] + 1)
{
(temp_weight<flow_push) ? min_flow_pushed = temp_weight : 0 ;
temp_weight = temp_weight - min_flow_pushed ;
atomicSub(&g_up_weight[thid] , min_flow_pushed);
atomicAdd(&g_down_weight[thid-width1],min_flow_pushed);
atomicSub(&g_push_reser[thid] , min_flow_pushed);
atomicAdd(&g_push_reser[thid-width1], min_flow_pushed);
} else atomicSub(&g_pull_up[thid - width1] , 1) ;
flow_push = g_push_reser[thid] ;
min_flow_pushed = flow_push ;
temp_weight = g_right_weight[thid] ;
if(temp_weight > 0 && flow_push > 0 && height_fn[temp_mult] == height_fn[temp_mult + 1] + 1 )
{
(temp_weight < flow_push) ? min_flow_pushed = temp_weight : 0 ;
temp_weight = temp_weight - min_flow_pushed ;
atomicSub(&g_right_weight[thid] , min_flow_pushed);
atomicAdd(&g_left_weight[thid+1],min_flow_pushed);
atomicSub(&g_push_reser[thid] , min_flow_pushed);
atomicAdd(&g_push_reser[thid+1], min_flow_pushed);
}else atomicSub( &g_pull_right[thid + 1], 1) ;
flow_push = g_push_reser[thid] ;
min_flow_pushed = flow_push ;
temp_weight = g_down_weight[thid] ;
if(temp_weight > 0 && flow_push > 0 && height_fn[temp_mult] == height_fn[temp_mult + 34] + 1 )
{
(temp_weight<flow_push) ? min_flow_pushed = temp_weight : 0 ;
temp_weight = temp_weight - min_flow_pushed ;
atomicSub(&g_down_weight[thid] , min_flow_pushed);
atomicAdd(&g_up_weight[thid+width1], min_flow_pushed);
atomicSub(&g_push_reser[thid] , min_flow_pushed);
atomicAdd(&g_push_reser[thid+width1], min_flow_pushed);
}else atomicSub( &g_pull_down[thid+width1], 1) ;
}
__syncthreads() ;
min_flow_pushed = g_left_weight[thid] ;
flow_push = g_push_reser[thid] ;
if(flow_push <= 0 || (g_left_weight[thid] == 0 && g_right_weight[thid] == 0 && g_down_weight[thid] == 0 && g_up_weight[thid] == 0 && g_sink_weight[thid] == 0))
g_relabel_mask[thid] = 2 ;
else
{
( flow_push > 0 && ( ( (height_fn[temp_mult] == height_fn[temp_mult-1] + 1 ) && g_left_weight[thid] > 0 ) ||( (height_fn[temp_mult] == height_fn[temp_mult+1]+1 ) && g_right_weight[thid] > 0) || ( ( height_fn[temp_mult] == height_fn[temp_mult+34]+1 ) && g_down_weight[thid] > 0) || ( (height_fn[temp_mult] == height_fn[temp_mult-34]+1 ) && g_up_weight[thid] > 0 ) || ( height_fn[temp_mult] == 1 && g_sink_weight[thid] > 0 ) ) ) ? g_relabel_mask[thid] = 1 : g_relabel_mask[thid] = 0 ;
}
__syncthreads() ;
if( thid < graph_size1 && g_relabel_mask[thid] == 1 && x < width-1 && x > 0 && y < rows-1 && y > 0 )
{
int temp_weight = 0;
temp_weight = g_sink_weight[thid] ;
min_flow_pushed = flow_push ;
if(temp_weight > 0 && flow_push > 0 && height_fn[temp_mult] == 1 )
{
(temp_weight < flow_push) ? min_flow_pushed = temp_weight : 0 ;
temp_weight = temp_weight - min_flow_pushed ;
g_sink_weight[thid] = temp_weight ;
atomicSub(&g_push_reser[thid] , min_flow_pushed);
}
flow_push = g_push_reser[thid] ;
min_flow_pushed = flow_push ;
temp_weight = g_left_weight[thid] ;
if(temp_weight > 0 && flow_push > 0 && height_fn[temp_mult] == height_fn[temp_mult - 1] + 1 )
{
(temp_weight < flow_push) ? min_flow_pushed = temp_weight : 0;
temp_weight = temp_weight - min_flow_pushed ;
atomicSub(&g_left_weight[thid] , min_flow_pushed);
atomicAdd(&g_right_weight[thid-1],min_flow_pushed);
atomicSub(&g_push_reser[thid] , min_flow_pushed);
atomicAdd(&g_push_reser[thid-1], min_flow_pushed);
}else atomicSub(&g_pull_left[thid-1], 1) ;
flow_push = g_push_reser[thid] ;
min_flow_pushed = flow_push ;
temp_weight = g_up_weight[thid] ;
if(temp_weight > 0 && flow_push > 0 && height_fn[temp_mult] == height_fn[temp_mult - 34] + 1)
{
(temp_weight<flow_push) ? min_flow_pushed = temp_weight : 0 ;
temp_weight = temp_weight - min_flow_pushed ;
atomicSub(&g_up_weight[thid] , min_flow_pushed);
atomicAdd(&g_down_weight[thid-width1],min_flow_pushed);
atomicSub(&g_push_reser[thid] , min_flow_pushed);
atomicAdd(&g_push_reser[thid-width1], min_flow_pushed);
} else atomicSub(&g_pull_up[thid - width1] , 1) ;
flow_push = g_push_reser[thid] ;
min_flow_pushed = flow_push ;
temp_weight = g_right_weight[thid] ;
if(temp_weight > 0 && flow_push > 0 && height_fn[temp_mult] == height_fn[temp_mult + 1] + 1 )
{
(temp_weight < flow_push) ? min_flow_pushed = temp_weight : 0 ;
temp_weight = temp_weight - min_flow_pushed ;
atomicSub(&g_right_weight[thid] , min_flow_pushed);
atomicAdd(&g_left_weight[thid+1],min_flow_pushed);
atomicSub(&g_push_reser[thid] , min_flow_pushed);
atomicAdd(&g_push_reser[thid+1], min_flow_pushed);
}else atomicSub( &g_pull_right[thid + 1], 1) ;
flow_push = g_push_reser[thid] ;
min_flow_pushed = flow_push ;
temp_weight = g_down_weight[thid] ;
if(temp_weight > 0 && flow_push > 0 && height_fn[temp_mult] == height_fn[temp_mult + 34] + 1 )
{
(temp_weight<flow_push) ? min_flow_pushed = temp_weight : 0 ;
temp_weight = temp_weight - min_flow_pushed ;
atomicSub(&g_down_weight[thid] , min_flow_pushed);
atomicAdd(&g_up_weight[thid+width1], min_flow_pushed);
atomicSub(&g_push_reser[thid] , min_flow_pushed);
atomicAdd(&g_push_reser[thid+width1], min_flow_pushed);
}else atomicSub( &g_pull_down[thid+width1], 1) ;
}
}
}
__global__ void
kernel_bfs_t(int *g_push_reser, int *g_sink_weight, int *g_graph_height, bool *g_pixel_mask,
int vertex_num, int width, int height, int vertex_num1, int width1, int height1)
{
int thid = __umul24(blockIdx.x, blockDim.x) + threadIdx.x ;
if(thid < vertex_num && g_pixel_mask[thid] == true )
{
int col = thid % width1 , row = thid / width1 ;
if(col > 0 && row > 0 && col < width - 1 && row < height - 1 && g_push_reser[thid] > 0 )
{
g_graph_height[thid] = 1 ;
g_pixel_mask[thid] = false ;
}
else
if(g_sink_weight[thid] > 0)
{
g_graph_height[thid] = -1 ;
g_pixel_mask[thid] = false ;
}
}
}
__global__ void
kernel_push_stochastic1( int *g_push_reser, int *s_push_reser, int *g_count_blocks, bool *g_finish, int g_block_num, int width1)
{
int x = __umul24( blockIdx.x, blockDim.x ) + threadIdx.x ;
int y = __umul24( blockIdx.y , blockDim.y ) + threadIdx.y ;
int thid = __umul24( y , width1 ) + x ;
s_push_reser[thid] = g_push_reser[thid] ;
if( thid == 0 )
{
if((*g_count_blocks) < 50 )
(*g_finish) = false ;
}
}
__global__ void
kernel_push_stochastic2( int *g_push_reser, int *s_push_reser, int *d_stochastic, int g_block_num, int width1)
{
int x = __umul24( blockIdx.x, blockDim.x ) + threadIdx.x ;
int y = __umul24( blockIdx.y , blockDim.y ) + threadIdx.y ;
int thid = __umul24( y , width1 ) + x ;
int stochastic = 0 ;
stochastic = ( s_push_reser[thid] - g_push_reser[thid]) ;
if(stochastic != 0)
{
d_stochastic[blockIdx.y * (g_block_num) + blockIdx.x] = 1 ;
}
}
__global__ void
kernel_push1_start_stochastic( int *g_left_weight, int *g_right_weight, int *g_down_weight, int *g_up_weight,
int *g_sink_weight, int *g_push_reser,
int *g_relabel_mask, int *g_graph_height, int *g_height_write,
int graph_size, int width, int rows, int graph_size1, int width1, int rows1, int *d_relabel, int *d_stochastic, int d_counter, bool *d_finish)
{
int x1 = threadIdx.x ;
int y1 = threadIdx.y ;
int x = __umul24( blockIdx.x, blockDim.x ) + threadIdx.x ;
int y = __umul24( blockIdx.y , blockDim.y ) + threadIdx.y ;
int thid = __umul24( y , width1 ) + x ;
__shared__ int height_fn[356];
int temp_mult = __umul24(y1+1 , 34 ) + x1 + 1, temp_mult1 = __umul24(y1,32) + x1 ;
height_fn[temp_mult] = g_graph_height[thid] ;
(threadIdx.x == 31 && x < width1 - 1 ) ? height_fn[temp_mult + 1] = (g_graph_height[thid + 1]) : 0;
(threadIdx.x == 0 && x > 0 ) ? height_fn[temp_mult - 1] = (g_graph_height[thid - 1]) : 0;
(threadIdx.y == 7 && y < rows1 - 1 ) ? height_fn[temp_mult + 34] = (g_graph_height[thid + width1]) : 0;
(threadIdx.y == 0 && y > 0 ) ? height_fn[temp_mult - 34] = (g_graph_height[thid - width1]) : 0;
__syncthreads();
int flow_push = 0, min_flow_pushed = 0 ;
flow_push = g_push_reser[thid] ;
if( thid < graph_size1 && g_relabel_mask[thid] == 1 && x < width-1 && x > 0 && y < rows-1 && y > 0 )
{
int temp_weight = 0;
temp_weight = g_sink_weight[thid] ;
min_flow_pushed = flow_push ;
if(temp_weight > 0 && flow_push > 0 && height_fn[temp_mult] == 1 )
{
(temp_weight < flow_push) ? min_flow_pushed = temp_weight : 0 ;
temp_weight = temp_weight - min_flow_pushed ;
g_sink_weight[thid] = temp_weight ;
atomicSub(&g_push_reser[thid] , min_flow_pushed);
flow_push = flow_push - min_flow_pushed ;
}
}
__syncthreads() ;
min_flow_pushed = g_left_weight[thid] ;
( flow_push > 0 && ( ((height_fn[temp_mult] == height_fn[temp_mult-1] + 1 ) && min_flow_pushed > 0 ) ||( (height_fn[temp_mult] == height_fn[temp_mult+1]+1 ) && g_right_weight[thid] > 0) || ( ( height_fn[temp_mult] == height_fn[temp_mult+34]+1 ) && g_down_weight[thid] > 0) || ( (height_fn[temp_mult] == height_fn[temp_mult-34]+1 ) && g_up_weight[thid] > 0 ) || ( height_fn[temp_mult] == 1 && g_sink_weight[thid] > 0 ) ) ) ? g_relabel_mask[thid] = 1 : g_relabel_mask[thid] = 0 ;
if(thid < graph_size1 && x < width - 1 && x > 0 && y < rows - 1 && y > 0 )
{
if(g_sink_weight[thid] > 0)
{
g_height_write[thid] = 1 ;
}
else
{
int min_height = graph_size ;
(min_flow_pushed > 0 && min_height > height_fn[temp_mult - 1] ) ? min_height = height_fn[temp_mult - 1] : 0 ;
(g_right_weight[thid] > 0 && min_height > height_fn[temp_mult + 1]) ? min_height = height_fn[temp_mult + 1] : 0 ;
(g_down_weight[thid] > 0 && min_height > height_fn[temp_mult + 34] ) ? min_height = height_fn[temp_mult + 34] : 0 ;
(g_up_weight[thid] > 0 && min_height > height_fn[temp_mult - 34] ) ? min_height = height_fn[temp_mult - 34] : 0 ;
g_height_write[thid] = min_height + 1 ;
}
}
}
__global__ void
kernel_bfs(int *g_left_weight, int *g_right_weight, int *g_down_weight, int *g_up_weight,
int *g_graph_height, bool *g_pixel_mask, int vertex_num,int width,int height,
int vertex_num1, int width1, int height1, bool *g_over, int *g_counter)
{
/*******************************
*threadId is calculated ******
*****************************/
int thid = __umul24(blockIdx.x, blockDim.x) + threadIdx.x ;
if(thid < vertex_num && g_pixel_mask[thid] == true)
{
int col = thid % width1 , row = thid / width1 ;
if(col < width - 1 && col > 0 && row < height - 1 && row > 0 )
{
int height_l = 0, height_d = 0, height_u = 0 , height_r = 0 ;
height_r = g_graph_height[thid+1] ;
height_l = g_graph_height[thid-1] ;
height_d = g_graph_height[thid+width1] ;
height_u = g_graph_height[thid-width1] ;
if(((height_l == (*g_counter) && g_right_weight[thid-1] > 0)) ||((height_d == (*g_counter) && g_up_weight[thid+width1] > 0) || ( height_r == (*g_counter) && g_left_weight[thid+1] > 0 ) || ( height_u == (*g_counter) && g_down_weight[thid-width1] > 0 ) ))
{
g_graph_height[thid] = (*g_counter) + 1 ;
g_pixel_mask[thid] = false ;
*g_over = true ;
}
}
}
}
/************************************************************
* functions to construct the graph on the device **
* *********************************************************/
__device__
void add_edge(int from, int to, int cap, int rev_cap, int type, int *d_left_weight,
int *d_right_weight, int *d_down_weight, int *d_up_weight)
{
if(type==1)
{
d_left_weight[from] = d_left_weight[from]+cap;
d_right_weight[to] = d_right_weight[to]+rev_cap;
}
if(type==2)
{
d_right_weight[from] = d_right_weight[from]+cap;
d_left_weight[to] = d_left_weight[to]+rev_cap;
}
if(type==3)
{
d_down_weight[from] = d_down_weight[from]+cap;
d_up_weight[to] = d_up_weight[to]+rev_cap;
}
if(type==4)
{
d_up_weight[from] = d_up_weight[from]+cap;
d_down_weight[to] = d_down_weight[to]+cap;
}
}
__device__
void add_tweights(int i, int cap_source, int cap_sink, int *d_push_reser, int *d_sink_weight)
{
int diff = cap_source - cap_sink ;
if(diff>0)
{
d_push_reser[i] = d_push_reser[i] + diff ;
}
else
{
d_sink_weight[i] = d_sink_weight[i] - diff ;
}
}
__device__
void add_term1(int i, int A, int B, int *d_push_reser, int *d_sink_weight)
{
add_tweights(i,B,A, d_push_reser, d_sink_weight);
}
__device__
void add_t_links_Cue(int alpha_label, int thid, int *d_left_weight, int *d_right_weight,
int *d_down_weight, int *d_up_weight, int *d_push_reser, int *d_sink_weight,
int *dPixelLabel, int *dDataTerm, int width , int height, int num_labels)
{
{
if(dPixelLabel[thid]!=alpha_label) {
add_term1(thid , dDataTerm[thid*num_labels+alpha_label] , dDataTerm[thid * num_labels + dPixelLabel[thid]], d_push_reser, d_sink_weight );
}
}
}
__device__
void add_t_links(int alpha_label, int thid, int *d_left_weight, int *d_right_weight,
int *d_down_weight, int *d_up_weight, int *d_push_reser, int *d_sink_weight,
int *dPixelLabel, int *dDataTerm, int width , int height, int num_labels)
{
{
if(dPixelLabel[thid]!=alpha_label) {
add_term1(thid , dDataTerm[thid*num_labels+alpha_label] , dDataTerm[thid * num_labels + dPixelLabel[thid]], d_push_reser, d_sink_weight );
}
}
}
__device__
void add_term2(int x, int y, int A, int B, int C, int D, int type, int *d_left_weight,
int *d_right_weight, int *d_down_weight, int *d_up_weight, int *d_push_reser, int *d_sink_weight )
{
if ( A+D > C+B) {
int delta = A+D-C-B;
int subtrA = delta/3;
A = A-subtrA;
C = C+subtrA;
B = B+(delta-subtrA*2);
#ifdef COUNT_TRUNCATIONS
truncCnt++;
#endif
}
#ifdef COUNT_TRUNCATIONS
totalCnt++;
#endif
add_tweights(x, D, A, d_push_reser, d_sink_weight);
B -= A; C -= D;
if (B < 0)
{
add_tweights(x, 0, B, d_push_reser, d_sink_weight);
add_tweights(y, 0, -B, d_push_reser, d_sink_weight ) ;
add_edge(x, y, 0, B+C,type , d_left_weight, d_right_weight, d_down_weight, d_up_weight );
}
else if (C < 0)
{
add_tweights(x, 0, -C, d_push_reser, d_sink_weight);
add_tweights(y, 0, C , d_push_reser, d_sink_weight);
add_edge(x, y, B+C, 0,type , d_left_weight, d_right_weight, d_down_weight, d_up_weight);
}
else
{
add_edge(x, y, B, C,type, d_left_weight, d_right_weight , d_down_weight, d_up_weight);
}
}
__device__
void set_up_expansion_energy_G_ARRAY(int alpha_label,int thid, int *d_left_weight,int *d_right_weight,
int *d_down_weight, int *d_up_weight, int *d_push_reser,
int *d_sink_weight, int *dPixelLabel, int *dDataTerm, int *dSmoothTerm,
int width , int height, int num_labels )
{
int x,y,nPix;
int weight;
int i = thid ;
{
if(dPixelLabel[i]!=alpha_label)
{
y = i/width;
x = i - y*width;
if ( x < width - 1 )
{
nPix = i + 1;
weight = 1 ;
if ( dPixelLabel[nPix] != alpha_label )
{
add_term2(i,nPix,
( dSmoothTerm[alpha_label + alpha_label * num_labels]) * weight,
( dSmoothTerm[alpha_label + dPixelLabel[nPix]*num_labels]) * weight,
( dSmoothTerm[ dPixelLabel[i] + alpha_label * num_labels] ) * weight,
( dSmoothTerm[ dPixelLabel[i] + dPixelLabel[nPix] * num_labels] ) * weight,
2, d_left_weight, d_right_weight, d_down_weight, d_up_weight, d_push_reser, d_sink_weight); // 1-left, 2-right, 3-down, 4-up
}
else add_term1(i,
( dSmoothTerm[alpha_label + dPixelLabel[nPix] * num_labels]) * weight,
( dSmoothTerm[dPixelLabel[i] + alpha_label*num_labels]) * weight,
d_push_reser, d_sink_weight);
}
if ( y < height - 1 )
{
nPix = i + width;
weight = 1 ;
if ( dPixelLabel[nPix] != alpha_label )
{
add_term2(i,nPix,
( dSmoothTerm[alpha_label + alpha_label * num_labels]) * weight,
( dSmoothTerm[alpha_label + dPixelLabel[nPix]*num_labels]) * weight,
( dSmoothTerm[ dPixelLabel[i] + alpha_label * num_labels] ) * weight,
( dSmoothTerm[ dPixelLabel[i] + dPixelLabel[nPix] * num_labels] ) * weight,
3, d_left_weight, d_right_weight, d_down_weight, d_up_weight, d_push_reser, d_sink_weight );
}
else add_term1(i,
( dSmoothTerm[alpha_label + dPixelLabel[nPix] * num_labels]) * weight,
( dSmoothTerm[dPixelLabel[i] + alpha_label*num_labels]) * weight,
d_push_reser, d_sink_weight);
}
if ( x > 0 )
{
nPix = i - 1;
weight = 1 ;
if ( dPixelLabel[nPix] == alpha_label )
add_term1(i,
( dSmoothTerm[alpha_label + dPixelLabel[nPix] * num_labels]) * weight,
( dSmoothTerm[dPixelLabel[i] + alpha_label*num_labels]) * weight,
d_push_reser, d_sink_weight );
}
if ( y > 0 )
{
nPix = i - width;
weight = 1 ;
if ( dPixelLabel[nPix] == alpha_label )
{
add_term1(i,
( dSmoothTerm[alpha_label + alpha_label * num_labels]) * weight,
( dSmoothTerm[dPixelLabel[i] + alpha_label*num_labels]) * weight,
d_push_reser, d_sink_weight);
}
}
}
}
}
__device__
void set_up_expansion_energy_G_ARRAY_Cue(int alpha_label,int thid, int *d_left_weight,int *d_right_weight,
int *d_down_weight, int *d_up_weight, int *d_push_reser,
int *d_sink_weight, int *dPixelLabel, int *dDataTerm, int *dSmoothTerm,
int *dHcue, int *dVcue, int width , int height, int num_labels )
{
int x,y,nPix;
int weight;
int i = thid ;
{
if(dPixelLabel[i]!=alpha_label)
{
y = i/width;
x = i - y*width;
if ( x < width - 1 )
{
nPix = i + 1;
weight=dHcue[i];
if ( dPixelLabel[nPix] != alpha_label )
{
add_term2(i,nPix,
( dSmoothTerm[alpha_label + alpha_label * num_labels]) * weight,
( dSmoothTerm[alpha_label + dPixelLabel[nPix]*num_labels]) * weight,
( dSmoothTerm[ dPixelLabel[i] + alpha_label * num_labels] ) * weight,
( dSmoothTerm[ dPixelLabel[i] + dPixelLabel[nPix] * num_labels] ) * weight,
2, d_left_weight, d_right_weight, d_down_weight, d_up_weight, d_push_reser, d_sink_weight); // 1-left, 2-right, 3-down, 4-up
}
else add_term1(i,
( dSmoothTerm[alpha_label + dPixelLabel[nPix] * num_labels]) * weight,
( dSmoothTerm[dPixelLabel[i] + alpha_label*num_labels]) * weight,
d_push_reser, d_sink_weight);
}
if ( y < height - 1 )
{
nPix = i + width;
weight=dVcue[i];
if ( dPixelLabel[nPix] != alpha_label )
{
add_term2(i,nPix,
( dSmoothTerm[alpha_label + alpha_label * num_labels]) * weight,
( dSmoothTerm[alpha_label + dPixelLabel[nPix]*num_labels]) * weight,
( dSmoothTerm[ dPixelLabel[i] + alpha_label * num_labels] ) * weight,
( dSmoothTerm[ dPixelLabel[i] + dPixelLabel[nPix] * num_labels] ) * weight,
3, d_left_weight, d_right_weight, d_down_weight, d_up_weight, d_push_reser, d_sink_weight );
}
else add_term1(i,
( dSmoothTerm[alpha_label + dPixelLabel[nPix] * num_labels]) * weight,
( dSmoothTerm[dPixelLabel[i] + alpha_label*num_labels]) * weight,
d_push_reser, d_sink_weight);
}
if ( x > 0 )
{
nPix = i - 1;
weight=dHcue[nPix];
if ( dPixelLabel[nPix] == alpha_label )
add_term1(i,
( dSmoothTerm[alpha_label + dPixelLabel[nPix] * num_labels]) * weight,
( dSmoothTerm[dPixelLabel[i] + alpha_label*num_labels]) * weight,
d_push_reser, d_sink_weight );
}
if ( y > 0 )
{
nPix = i - width;
weight = dVcue[nPix] ;
if ( dPixelLabel[nPix] == alpha_label )
{
add_term1(i,
( dSmoothTerm[alpha_label + alpha_label * num_labels]) * weight,
( dSmoothTerm[dPixelLabel[i] + alpha_label*num_labels]) * weight,
d_push_reser, d_sink_weight);
}
}
}
}
}
__global__
void CudaWeightCue(int alpha_label, int *d_left_weight, int *d_right_weight, int *d_down_weight,
int *d_up_weight, int *d_push_reser, int *d_sink_weight, int *dPixelLabel,
int *dDataTerm, int *dSmoothTerm, int *dHcue, int *dVcue, int width, int height, int num_labels )
{
int thid = blockIdx.x * 256 + threadIdx.x ;
add_t_links_Cue(alpha_label, thid, d_left_weight, d_right_weight, d_down_weight, d_up_weight, d_push_reser, d_sink_weight, dPixelLabel, dDataTerm, width, height, num_labels);
set_up_expansion_energy_G_ARRAY_Cue(alpha_label, thid, d_left_weight, d_right_weight, d_down_weight, d_up_weight, d_push_reser, d_sink_weight, dPixelLabel, dDataTerm, dSmoothTerm, dHcue, dVcue, width, height, num_labels);
}
__global__
void CudaWeight(int alpha_label, int *d_left_weight, int *d_right_weight, int *d_down_weight,
int *d_up_weight, int *d_push_reser, int *d_sink_weight, int *dPixelLabel,
int *dDataTerm, int *dSmoothTerm, int width, int height, int num_labels)
{
int thid = blockIdx.x * 256 + threadIdx.x ;
add_t_links(alpha_label, thid, d_left_weight, d_right_weight, d_down_weight, d_up_weight, d_push_reser, d_sink_weight, dPixelLabel, dDataTerm, width, height, num_labels);
set_up_expansion_energy_G_ARRAY(alpha_label, thid, d_left_weight, d_right_weight, d_down_weight, d_up_weight, d_push_reser, d_sink_weight, dPixelLabel, dDataTerm, dSmoothTerm, width, height, num_labels);
}
/*********************************************************
* function which adjusts the array size for efficiency **
* consideration **
* ******************************************************/
__global__
void adjustedgeweight(int *d_left_weight, int *d_right_weight, int *d_down_weight, int *d_up_weight,
int *d_push_reser, int *d_sink_weight, int *temp_left_weight, int *temp_right_weight,
int *temp_down_weight, int *temp_up_weight, int *temp_push_reser, int *temp_sink_weight,
int width, int height, int graph_size, int width1, int height1, int graph_size1)
{
int thid = blockIdx.x * 256 + threadIdx.x ;
if( thid < graph_size1 )
{
int row = thid / width1 , col = thid % width1 ;
if(row < height && col < width)
{
temp_left_weight[row* width1 + col] = d_left_weight[row * width + col] ;
temp_right_weight[row * width1 + col] = d_right_weight[row * width + col] ;
temp_down_weight[row * width1 + col] = d_down_weight[row * width + col] ;
temp_up_weight[row * width1 + col] = d_up_weight[row * width + col] ;
temp_push_reser[row * width1 + col] = d_push_reser[row * width + col] ;
temp_sink_weight[row * width1 + col] = d_sink_weight[row * width + col] ;
}
else
{
temp_left_weight[row * width1 + col] = 0 ;
temp_right_weight[row * width1 + col] = 0 ;
temp_down_weight[row * width1 + col] = 0 ;
temp_up_weight[row * width1 + col] = 0 ;
temp_push_reser[row * width1 + col] = 0 ;
temp_sink_weight[row * width1 + col] = 0 ;
}
}
}
/************************************************************
* Intializes memory on the gpu **
* ********************************************************/
__global__
void copyedgeweight( int *d_left_weight, int *d_right_weight, int *d_down_weight, int *d_up_weight,
int *d_push_reser, int *d_sink_weight, int *temp_left_weight, int *temp_right_weight,
int *temp_down_weight, int *temp_up_weight, int *temp_push_reser, int *temp_sink_weight,
int *d_pull_left, int *d_pull_right, int *d_pull_down, int *d_pull_up, int *d_relabel_mask,
int *d_graph_heightr, int *d_graph_heightw, int width, int height, int graph_size, int width1, int height1, int graph_size1)
{
int thid = blockIdx.x * 256 + threadIdx.x ;
if( thid < graph_size1 )
{
d_left_weight[thid] = temp_left_weight[thid] ;
d_right_weight[thid] = temp_right_weight[thid] ;
d_down_weight[thid] = temp_down_weight[thid] ;
d_up_weight[thid] = temp_up_weight[thid] ;
d_push_reser[thid] = temp_push_reser[thid] ;
d_sink_weight[thid] = temp_sink_weight[thid] ;
d_pull_left[thid] = 0 ;
d_pull_right[thid] = 0 ;
d_pull_down[thid] = 0 ;
d_pull_up[thid] = 0 ;
d_relabel_mask[thid] = 0 ;
d_graph_heightr[thid] = 1 ;
d_graph_heightw[thid] = 1 ;
}
}
#endif
|
f281d4607d77e35c5a6eea6d5aaff7177c9f7b34.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/copying.hpp>
#include <cudf/filling.hpp>
#include <cudf/types.hpp>
#include <cudf/column/column.hpp>
#include <cudf/column/column_view.hpp>
#include <cudf/column/column_factories.hpp>
#include <cudf/detail/gather.cuh>
#include <cudf/detail/repeat.hpp>
#include <cudf/scalar/scalar.hpp>
#include <cudf/table/table.hpp>
#include <cudf/table/table_view.hpp>
#include <cudf/utilities/type_dispatcher.hpp>
#include <rmm/thrust_rmm_allocator.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/iterator/constant_iterator.h>
#include <thrust/iterator/transform_output_iterator.h>
#include <thrust/scan.h>
#include <thrust/binary_search.h>
#include <hip/hip_runtime.h>
#include <limits>
#include <memory>
namespace {
struct count_accessor {
cudf::scalar const* p_scalar = nullptr;
template <typename T>
std::enable_if_t<std::is_integral<T>::value, cudf::size_type>
operator()(hipStream_t stream = 0) {
using ScalarType = cudf::experimental::scalar_type_t<T>;
#if 1
// TODO: temporary till cudf::scalar's value() function is marked as const
auto p_count =
const_cast<ScalarType*>(static_cast<ScalarType const*>(this->p_scalar));
#else
auto p_count = static_cast<ScalarType const*>(this->p_scalar);
#endif
auto count = p_count->value();
// static_cast is necessary due to bool8
CUDF_EXPECTS(static_cast<int64_t>(count) <=
std::numeric_limits<cudf::size_type>::max(),
"count should not exceed size_type's limit.");
return static_cast<cudf::size_type>(count);
}
template <typename T>
std::enable_if_t<not std::is_integral<T>::value, cudf::size_type>
operator()(hipStream_t stream) {
CUDF_FAIL("count value should be a integral type.");
}
};
struct compute_offsets {
cudf::column_view const* p_column = nullptr;
template <typename T>
std::enable_if_t<std::is_integral<T>::value,
rmm::device_vector<cudf::size_type>>
operator()(bool check_count, hipStream_t stream = 0) {
// static_cast is necessary due to bool8
if (check_count &&
static_cast<int64_t>(std::numeric_limits<T>::max()) >
std::numeric_limits<cudf::size_type>::max()) {
auto max = thrust::reduce(p_column->begin<T>(), p_column->end<T>(),
0, thrust::maximum<T>());
CUDF_EXPECTS(max <= std::numeric_limits<cudf::size_type>::max(),
"count should not have values larger than size_type's limit."
);
}
rmm::device_vector<cudf::size_type> offsets(p_column->size());
thrust::inclusive_scan(rmm::exec_policy(stream)->on(stream),
p_column->begin<T>(), p_column->end<T>(),
offsets.begin());
if (check_count == true) {
CUDF_EXPECTS(thrust::is_sorted(rmm::exec_policy(stream)->on(stream),
offsets.begin(), offsets.end()) == true,
"count has negative values or the resulting table has more \
rows than size_type's limit.");
}
return offsets;
}
template <typename T>
std::enable_if_t<not std::is_integral<T>::value,
rmm::device_vector<cudf::size_type>>
operator()(bool check_count, hipStream_t stream) {
CUDF_FAIL("count value should be a integral type.");
}
};
}
namespace cudf {
namespace experimental {
namespace detail {
std::unique_ptr<table> repeat(table_view const& input_table,
column_view const& count, bool check_count,
rmm::mr::device_memory_resource* mr,
hipStream_t stream) {
CUDF_EXPECTS(input_table.num_rows() == count.size(),
"in and count must have equal size");
CUDF_EXPECTS(count.has_nulls() == false, "count cannot contain nulls");
if (input_table.num_rows() == 0) {
return cudf::experimental::empty_like(input_table);
}
auto offsets =
cudf::experimental::type_dispatcher(count.type(),
compute_offsets{&count},
check_count, stream);
size_type output_size{offsets.back()};
rmm::device_vector<size_type> indices(output_size);
thrust::upper_bound(rmm::exec_policy(stream)->on(stream),
offsets.begin(), offsets.end(),
thrust::make_counting_iterator(0),
thrust::make_counting_iterator(output_size),
indices.begin());
return gather(input_table, indices.begin(), indices.end(),
false, false, false, mr, stream);
}
std::unique_ptr<table> repeat(table_view const& input_table,
scalar const& count,
rmm::mr::device_memory_resource* mr,
hipStream_t stream) {
CUDF_EXPECTS(count.is_valid(), "count cannot be null");
auto stride =
cudf::experimental::type_dispatcher(
count.type(), count_accessor{&count}, stream);
CUDF_EXPECTS(stride >= 0, "count value should be non-negative");
CUDF_EXPECTS(static_cast<int64_t>(input_table.num_rows()) * stride <=
std::numeric_limits<size_type>::max(),
"The resulting table has more rows than size_type's limit.");
if ((input_table.num_rows() == 0) || (stride == 0)) {
return cudf::experimental::empty_like(input_table);
}
auto output_size = input_table.num_rows() * stride;
auto map_begin =
thrust::make_transform_iterator(
thrust::make_counting_iterator(0),
[stride] __device__ (auto i) { return i / stride; });
auto map_end = map_begin + output_size;
return gather(input_table, map_begin, map_end,
false, false, false, mr, stream);
}
} // namespace detail
std::unique_ptr<table> repeat(table_view const& input_table,
column_view const& count,
bool check_count,
rmm::mr::device_memory_resource* mr) {
return detail::repeat(input_table, count, check_count, mr, 0);
}
std::unique_ptr<table> repeat(table_view const& input_table,
scalar const& count,
rmm::mr::device_memory_resource* mr) {
return detail::repeat(input_table, count, mr, 0);
}
} // namespace experimental
} // namespace cudf
| f281d4607d77e35c5a6eea6d5aaff7177c9f7b34.cu | /*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/copying.hpp>
#include <cudf/filling.hpp>
#include <cudf/types.hpp>
#include <cudf/column/column.hpp>
#include <cudf/column/column_view.hpp>
#include <cudf/column/column_factories.hpp>
#include <cudf/detail/gather.cuh>
#include <cudf/detail/repeat.hpp>
#include <cudf/scalar/scalar.hpp>
#include <cudf/table/table.hpp>
#include <cudf/table/table_view.hpp>
#include <cudf/utilities/type_dispatcher.hpp>
#include <rmm/thrust_rmm_allocator.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/iterator/constant_iterator.h>
#include <thrust/iterator/transform_output_iterator.h>
#include <thrust/scan.h>
#include <thrust/binary_search.h>
#include <cuda_runtime.h>
#include <limits>
#include <memory>
namespace {
struct count_accessor {
cudf::scalar const* p_scalar = nullptr;
template <typename T>
std::enable_if_t<std::is_integral<T>::value, cudf::size_type>
operator()(cudaStream_t stream = 0) {
using ScalarType = cudf::experimental::scalar_type_t<T>;
#if 1
// TODO: temporary till cudf::scalar's value() function is marked as const
auto p_count =
const_cast<ScalarType*>(static_cast<ScalarType const*>(this->p_scalar));
#else
auto p_count = static_cast<ScalarType const*>(this->p_scalar);
#endif
auto count = p_count->value();
// static_cast is necessary due to bool8
CUDF_EXPECTS(static_cast<int64_t>(count) <=
std::numeric_limits<cudf::size_type>::max(),
"count should not exceed size_type's limit.");
return static_cast<cudf::size_type>(count);
}
template <typename T>
std::enable_if_t<not std::is_integral<T>::value, cudf::size_type>
operator()(cudaStream_t stream) {
CUDF_FAIL("count value should be a integral type.");
}
};
struct compute_offsets {
cudf::column_view const* p_column = nullptr;
template <typename T>
std::enable_if_t<std::is_integral<T>::value,
rmm::device_vector<cudf::size_type>>
operator()(bool check_count, cudaStream_t stream = 0) {
// static_cast is necessary due to bool8
if (check_count &&
static_cast<int64_t>(std::numeric_limits<T>::max()) >
std::numeric_limits<cudf::size_type>::max()) {
auto max = thrust::reduce(p_column->begin<T>(), p_column->end<T>(),
0, thrust::maximum<T>());
CUDF_EXPECTS(max <= std::numeric_limits<cudf::size_type>::max(),
"count should not have values larger than size_type's limit."
);
}
rmm::device_vector<cudf::size_type> offsets(p_column->size());
thrust::inclusive_scan(rmm::exec_policy(stream)->on(stream),
p_column->begin<T>(), p_column->end<T>(),
offsets.begin());
if (check_count == true) {
CUDF_EXPECTS(thrust::is_sorted(rmm::exec_policy(stream)->on(stream),
offsets.begin(), offsets.end()) == true,
"count has negative values or the resulting table has more \
rows than size_type's limit.");
}
return offsets;
}
template <typename T>
std::enable_if_t<not std::is_integral<T>::value,
rmm::device_vector<cudf::size_type>>
operator()(bool check_count, cudaStream_t stream) {
CUDF_FAIL("count value should be a integral type.");
}
};
}
namespace cudf {
namespace experimental {
namespace detail {
std::unique_ptr<table> repeat(table_view const& input_table,
column_view const& count, bool check_count,
rmm::mr::device_memory_resource* mr,
cudaStream_t stream) {
CUDF_EXPECTS(input_table.num_rows() == count.size(),
"in and count must have equal size");
CUDF_EXPECTS(count.has_nulls() == false, "count cannot contain nulls");
if (input_table.num_rows() == 0) {
return cudf::experimental::empty_like(input_table);
}
auto offsets =
cudf::experimental::type_dispatcher(count.type(),
compute_offsets{&count},
check_count, stream);
size_type output_size{offsets.back()};
rmm::device_vector<size_type> indices(output_size);
thrust::upper_bound(rmm::exec_policy(stream)->on(stream),
offsets.begin(), offsets.end(),
thrust::make_counting_iterator(0),
thrust::make_counting_iterator(output_size),
indices.begin());
return gather(input_table, indices.begin(), indices.end(),
false, false, false, mr, stream);
}
std::unique_ptr<table> repeat(table_view const& input_table,
scalar const& count,
rmm::mr::device_memory_resource* mr,
cudaStream_t stream) {
CUDF_EXPECTS(count.is_valid(), "count cannot be null");
auto stride =
cudf::experimental::type_dispatcher(
count.type(), count_accessor{&count}, stream);
CUDF_EXPECTS(stride >= 0, "count value should be non-negative");
CUDF_EXPECTS(static_cast<int64_t>(input_table.num_rows()) * stride <=
std::numeric_limits<size_type>::max(),
"The resulting table has more rows than size_type's limit.");
if ((input_table.num_rows() == 0) || (stride == 0)) {
return cudf::experimental::empty_like(input_table);
}
auto output_size = input_table.num_rows() * stride;
auto map_begin =
thrust::make_transform_iterator(
thrust::make_counting_iterator(0),
[stride] __device__ (auto i) { return i / stride; });
auto map_end = map_begin + output_size;
return gather(input_table, map_begin, map_end,
false, false, false, mr, stream);
}
} // namespace detail
std::unique_ptr<table> repeat(table_view const& input_table,
column_view const& count,
bool check_count,
rmm::mr::device_memory_resource* mr) {
return detail::repeat(input_table, count, check_count, mr, 0);
}
std::unique_ptr<table> repeat(table_view const& input_table,
scalar const& count,
rmm::mr::device_memory_resource* mr) {
return detail::repeat(input_table, count, mr, 0);
}
} // namespace experimental
} // namespace cudf
|
4c47fe8c0fccce64767673ff05faa7bc4f046d24.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "core/self_add_bias.cuh"
#include <assert.h>
#include "core/common.cuh"
// add_QKV_bias kernel code modified from Nvidia's DeepLearningExamples
// https://github.com/NVIDIA/DeepLearningExamples/blob/master/FasterTransformer/v3.1/fastertransformer/cuda/open_attention.cu#L1342-L1395
template<typename T>
__global__
void add_QKV_bias_opt(T* Q, const T* bias_Q, T* K, const T* bias_K, T* V, const T* bias_V, T* q_buf_, T* k_buf_, T* v_buf_,
const int batch_size, const int seq_len, const int head_num, const int size_per_head)
{
T* data_ptr;
T* buf_ptr;
const T* bias_ptr;
int m = batch_size * seq_len;
int n = head_num * size_per_head;
int qkv_id = blockIdx.x / m ;
int row_offset = (blockIdx.x % m) * n;
if(qkv_id == 0)
{
data_ptr = Q + row_offset;
buf_ptr = q_buf_;
bias_ptr = bias_Q;
}
else if(qkv_id == 1)
{
data_ptr = K + row_offset;
buf_ptr = k_buf_;
bias_ptr = bias_K;
}
else
{
data_ptr = V + row_offset;
buf_ptr = v_buf_;
bias_ptr = bias_V;
}
int batch_id = (blockIdx.x % m) / seq_len;
int head_id = (threadIdx.x + blockIdx.y * blockDim.x) / size_per_head;
int id_in_head = threadIdx.x % size_per_head;
int word_start_id = (blockIdx.x ) % seq_len;
T bias = __ldg(&bias_ptr[threadIdx.x + blockDim.x * blockIdx.y]);
for(int i = word_start_id; i < word_start_id + 1; ++i)
{
T tmp = data_ptr[threadIdx.x + blockDim.x * blockIdx.y] + bias;
int target_id = batch_id * (seq_len * head_num * size_per_head) + head_id * seq_len * size_per_head +
i * size_per_head + id_in_head;
buf_ptr[target_id] = tmp;
data_ptr += n;
}
}
template<>
__global__
void add_QKV_bias_opt<half>( half* Q, const half* bias_Q, half* K, const half* bias_K, half* V, const half* bias_V,
half* q_buf_, half* k_buf_, half* v_buf_,
const int batch_size, const int seq_len, const int head_num, const int size_per_head)
{
int tid = blockIdx.x * (size_per_head * head_num) + threadIdx.x + blockDim.x * blockIdx.y;
int batch_id = tid / (head_num * seq_len * size_per_head);
int seq_id = (tid % (head_num * seq_len * size_per_head)) / (head_num * size_per_head);
int head_id = (tid % (head_num * size_per_head)) / size_per_head;
int id = tid % size_per_head;
int target_id = target_index(batch_id, seq_id, head_id, id, batch_size, seq_len, head_num, size_per_head);
int bias_id = threadIdx.x + blockDim.x * blockIdx.y;
half2* src_ptr = (half2*)Q;
half2* dst_ptr = (half2*)q_buf_;
const half2* bias_ptr = (const half2*)bias_Q;
dst_ptr[target_id] = __hadd2(src_ptr[tid], __ldg(&bias_ptr[bias_id]));
src_ptr = (half2*)K;
dst_ptr = (half2*)k_buf_;
bias_ptr = (const half2*)bias_K;
dst_ptr[target_id] = __hadd2(src_ptr[tid], __ldg(&bias_ptr[bias_id]));
src_ptr = (half2*)V;
dst_ptr = (half2*)v_buf_;
bias_ptr = (const half2*)bias_V;
dst_ptr[target_id] = __hadd2(src_ptr[tid], __ldg(&bias_ptr[bias_id]));
}
template<typename T>
void add_QKV_bias_opt_kernel( void* Q, const void* bias_Q, void* K, const void* bias_K, void* V, const void* bias_V, void* q_buf_, void* k_buf_, void* v_buf_,
const int& batch_size, const int& seq_len, const int& head_num, const int& size_per_head, const hipStream_t stream){
int qkv_types = 3;
int m = batch_size * seq_len;
int k = head_num * size_per_head;
//assert(m * qkv_types <= 65536 && "batch_size * seq_len must <= 65536");
int fold_coeff = 1;
dim3 grid;
dim3 block;
//TODO - int8
if (sizeof(T) == sizeof(float)){
if (k <= 1024){
fold_coeff = 1;
}else if( k <= 2048){
fold_coeff = 2;
}else if(k <= 4096){
fold_coeff = 4;
}else if(k <= 8192){
fold_coeff = 8;
}else if(k <= 16384){
fold_coeff = 16;
}
grid.x = m * qkv_types;
grid.y = fold_coeff;
block.x = k / fold_coeff;
hipLaunchKernelGGL(( add_QKV_bias_opt), dim3(grid), dim3(block), 0, stream, (float*)Q, (float*)bias_Q, (float*)K, (float*)bias_K, (float*)V, (float*)bias_V, (float*)q_buf_, (float*)k_buf_,
(float*)v_buf_, batch_size, seq_len, head_num, size_per_head);
}else{
if (k <= 2048){
fold_coeff = 2;
}else if( k <= 4096){
fold_coeff = 2;
}else if(k <= 8192){
fold_coeff = 4;
}else if(k <= 16384){
fold_coeff = 8;
}else if(k <= 16384 * 2){
fold_coeff = 16;
}
grid.x = m;
grid.y = fold_coeff;
block.x = k / (2 * fold_coeff);
hipLaunchKernelGGL(( add_QKV_bias_opt), dim3(grid), dim3(block), 0, stream, (half*)Q, (half*)bias_Q, (half*)K, (half*)bias_K, (half*)V, (half*)bias_V, (half*)q_buf_, (half*)k_buf_,
(half*)v_buf_, batch_size, seq_len, head_num, size_per_head / 2);
}
}
template void add_QKV_bias_opt_kernel<float>( void* Q, const void* bias_Q, void* K, const void* bias_K, void* V, const void* bias_V, void* q_buf_, void* k_buf_, void* v_buf_,
const int& batch_size, const int& seq_len, const int& head_num, const int& size_per_head, const hipStream_t stream);
template void add_QKV_bias_opt_kernel<half>(void* Q, const void* bias_Q, void* K, const void* bias_K, void* V, const void* bias_V, void* q_buf_, void* k_buf_, void* v_buf_,
const int& batch_size, const int& seq_len, const int& head_num, const int& size_per_head, const hipStream_t stream);
| 4c47fe8c0fccce64767673ff05faa7bc4f046d24.cu | #include "core/self_add_bias.cuh"
#include <assert.h>
#include "core/common.cuh"
// add_QKV_bias kernel code modified from Nvidia's DeepLearningExamples
// https://github.com/NVIDIA/DeepLearningExamples/blob/master/FasterTransformer/v3.1/fastertransformer/cuda/open_attention.cu#L1342-L1395
template<typename T>
__global__
void add_QKV_bias_opt(T* Q, const T* bias_Q, T* K, const T* bias_K, T* V, const T* bias_V, T* q_buf_, T* k_buf_, T* v_buf_,
const int batch_size, const int seq_len, const int head_num, const int size_per_head)
{
T* data_ptr;
T* buf_ptr;
const T* bias_ptr;
int m = batch_size * seq_len;
int n = head_num * size_per_head;
int qkv_id = blockIdx.x / m ;
int row_offset = (blockIdx.x % m) * n;
if(qkv_id == 0)
{
data_ptr = Q + row_offset;
buf_ptr = q_buf_;
bias_ptr = bias_Q;
}
else if(qkv_id == 1)
{
data_ptr = K + row_offset;
buf_ptr = k_buf_;
bias_ptr = bias_K;
}
else
{
data_ptr = V + row_offset;
buf_ptr = v_buf_;
bias_ptr = bias_V;
}
int batch_id = (blockIdx.x % m) / seq_len;
int head_id = (threadIdx.x + blockIdx.y * blockDim.x) / size_per_head;
int id_in_head = threadIdx.x % size_per_head;
int word_start_id = (blockIdx.x ) % seq_len;
T bias = __ldg(&bias_ptr[threadIdx.x + blockDim.x * blockIdx.y]);
for(int i = word_start_id; i < word_start_id + 1; ++i)
{
T tmp = data_ptr[threadIdx.x + blockDim.x * blockIdx.y] + bias;
int target_id = batch_id * (seq_len * head_num * size_per_head) + head_id * seq_len * size_per_head +
i * size_per_head + id_in_head;
buf_ptr[target_id] = tmp;
data_ptr += n;
}
}
template<>
__global__
void add_QKV_bias_opt<half>( half* Q, const half* bias_Q, half* K, const half* bias_K, half* V, const half* bias_V,
half* q_buf_, half* k_buf_, half* v_buf_,
const int batch_size, const int seq_len, const int head_num, const int size_per_head)
{
int tid = blockIdx.x * (size_per_head * head_num) + threadIdx.x + blockDim.x * blockIdx.y;
int batch_id = tid / (head_num * seq_len * size_per_head);
int seq_id = (tid % (head_num * seq_len * size_per_head)) / (head_num * size_per_head);
int head_id = (tid % (head_num * size_per_head)) / size_per_head;
int id = tid % size_per_head;
int target_id = target_index(batch_id, seq_id, head_id, id, batch_size, seq_len, head_num, size_per_head);
int bias_id = threadIdx.x + blockDim.x * blockIdx.y;
half2* src_ptr = (half2*)Q;
half2* dst_ptr = (half2*)q_buf_;
const half2* bias_ptr = (const half2*)bias_Q;
dst_ptr[target_id] = __hadd2(src_ptr[tid], __ldg(&bias_ptr[bias_id]));
src_ptr = (half2*)K;
dst_ptr = (half2*)k_buf_;
bias_ptr = (const half2*)bias_K;
dst_ptr[target_id] = __hadd2(src_ptr[tid], __ldg(&bias_ptr[bias_id]));
src_ptr = (half2*)V;
dst_ptr = (half2*)v_buf_;
bias_ptr = (const half2*)bias_V;
dst_ptr[target_id] = __hadd2(src_ptr[tid], __ldg(&bias_ptr[bias_id]));
}
template<typename T>
void add_QKV_bias_opt_kernel( void* Q, const void* bias_Q, void* K, const void* bias_K, void* V, const void* bias_V, void* q_buf_, void* k_buf_, void* v_buf_,
const int& batch_size, const int& seq_len, const int& head_num, const int& size_per_head, const cudaStream_t stream){
int qkv_types = 3;
int m = batch_size * seq_len;
int k = head_num * size_per_head;
//assert(m * qkv_types <= 65536 && "batch_size * seq_len must <= 65536");
int fold_coeff = 1;
dim3 grid;
dim3 block;
//TODO - int8
if (sizeof(T) == sizeof(float)){
if (k <= 1024){
fold_coeff = 1;
}else if( k <= 2048){
fold_coeff = 2;
}else if(k <= 4096){
fold_coeff = 4;
}else if(k <= 8192){
fold_coeff = 8;
}else if(k <= 16384){
fold_coeff = 16;
}
grid.x = m * qkv_types;
grid.y = fold_coeff;
block.x = k / fold_coeff;
add_QKV_bias_opt<<<grid, block, 0, stream>>>((float*)Q, (float*)bias_Q, (float*)K, (float*)bias_K, (float*)V, (float*)bias_V, (float*)q_buf_, (float*)k_buf_,
(float*)v_buf_, batch_size, seq_len, head_num, size_per_head);
}else{
if (k <= 2048){
fold_coeff = 2;
}else if( k <= 4096){
fold_coeff = 2;
}else if(k <= 8192){
fold_coeff = 4;
}else if(k <= 16384){
fold_coeff = 8;
}else if(k <= 16384 * 2){
fold_coeff = 16;
}
grid.x = m;
grid.y = fold_coeff;
block.x = k / (2 * fold_coeff);
add_QKV_bias_opt<<<grid, block, 0, stream>>>((half*)Q, (half*)bias_Q, (half*)K, (half*)bias_K, (half*)V, (half*)bias_V, (half*)q_buf_, (half*)k_buf_,
(half*)v_buf_, batch_size, seq_len, head_num, size_per_head / 2);
}
}
template void add_QKV_bias_opt_kernel<float>( void* Q, const void* bias_Q, void* K, const void* bias_K, void* V, const void* bias_V, void* q_buf_, void* k_buf_, void* v_buf_,
const int& batch_size, const int& seq_len, const int& head_num, const int& size_per_head, const cudaStream_t stream);
template void add_QKV_bias_opt_kernel<half>(void* Q, const void* bias_Q, void* K, const void* bias_K, void* V, const void* bias_V, void* q_buf_, void* k_buf_, void* v_buf_,
const int& batch_size, const int& seq_len, const int& head_num, const int& size_per_head, const cudaStream_t stream);
|
8ed3d8cb161dec0d069cbdf44db0d673f919df9e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <helper_cuda.h>
#define random() ((double(rand())/RAND_MAX) - 0.5)*2 // between
#define epsilon 0.97
#define debug 1
void cpu_recque(double* s, double* u, int N, double* result){
double s_sum = 1;
double u_sum = 0;
for(int i = 0;i<N;i++){
u_sum+=(s_sum * u[i]);
s_sum*=s[i];
if(debug >= 2){
printf("%.3lf %.3lf\n",u_sum,s_sum);
}
}
result[0] = u_sum/(1-s_sum);
for (int i = 1;i<N;i++){
result[i] = (result[i-1] - u[i-1])/s[i-1];
}
}
__global__ void gpu_recque(double* s, double* u, int N ,int nThreads, double* d_s_sum, double* d_u_sum){
double s_sum = 1;
double u_sum = 0;
int tid = threadIdx.x + blockDim.x*blockIdx.x;
int begin = floor(double(N)/nThreads) * tid;
int end = floor(double(N)/nThreads) * (tid + 1);
if(tid == nThreads - 1) end = N;
for(int i = begin;i<end;i++){
u_sum+=(s_sum * u[i]);
s_sum*=s[i];
if(debug == 3){
printf("tid = %d pos = %d\n",tid,i);
printf("u = %.3lf s = %.3lf\n",u[i],s[i]);
printf("%.3lf %.3lf\n",s_sum,u_sum);
}
}
d_s_sum[tid] = s_sum;
d_u_sum[tid] = u_sum;
}
__global__ void gpu_blocks_recque(double* s, double* u, int N ,int nThreads, double* d_s_sum, double* d_u_sum){
extern __shared__ double sum[]; // s_sum u_sum s_sum ...
int tid = threadIdx.x + blockDim.x*blockIdx.x;
//printf("pid = %d\n",tid);
if (tid >= N){
sum[threadIdx.x*2] = 1;
sum[threadIdx.x*2+1] = 0;
}
else{
sum[threadIdx.x*2] = s[tid];
sum[threadIdx.x*2+1] = u[tid];
}
if(debug == 2){
printf("blockid = %d pos = %d\n",blockIdx.x,tid);
printf("u = %.3lf s = %.3lf\n",u[tid],s[tid]);
}
for (int d = 1; d <= blockDim.x>>1; d <<= 1) {
__syncthreads();
if (threadIdx.x%(2*d)==0){
sum[threadIdx.x*2+1]+=(sum[threadIdx.x*2] * sum[(threadIdx.x+d)*2+1]); // u_sum+=(s_sum * u[i])
sum[threadIdx.x*2]*=sum[(threadIdx.x+d)*2]; // s_sum*=s[i];
if(debug == 2){
printf("tid = %d merging %d and %d result: u = %.3lf s = %.3lf\n",tid,threadIdx.x,threadIdx.x+d,sum[threadIdx.x*2+1],sum[threadIdx.x*2]);
}
}
}
if (threadIdx.x==0){
d_s_sum[blockIdx.x] = sum[0];
d_u_sum[blockIdx.x] = sum[1];
}
}
double validation(double* s, double* u, int N, double* result){
double sqrt_root_sum = 0;
for (int i = 1;i<=N;i++){
double t = fabs(s[i-1] * result[i==N?0:i] + u[i-1] - result[i-1]);
sqrt_root_sum += (t*t);
}
return sqrt(sqrt_root_sum);
}
int main(int argc, const char **argv){
srand(0);
int N = 1000;
double* s;
double* u;
double* result;
s = (double* )malloc(N*sizeof(double));
u = (double* )malloc(N*sizeof(double));
result = (double* )malloc(N*sizeof(double));
for(int i = 0;i<N;i++){
s[i] = 0;
while(fabs(s[i] < epsilon)) s[i] = random();
u[i] = 0;
while(fabs(u[i] < epsilon)) u[i] = random();
}
if(debug){
printf("The equation is: \n");
for(int i = 1;i<=3;i++){
printf("V[%d] = %.3lf*V[%d] + %.3lf \n",i-1,s[i-1],i==N?0:i,u[i-1]);
}
printf("...\n");
for(int i = N-2;i<=N;i++){
printf("V[%d] = %.3lf*V[%d] + %.3lf \n",i-1,s[i-1],i==N?0:i,u[i-1]);
}
}
// CPU version
cpu_recque(s,u,N,result);
if(debug){
printf("\nCPU result is :\n");
for(int i = 0;i<3;i++){
printf("V[%d] = %.3f \n",i,result[i]);
}
printf("...\n");
for(int i = N-3;i<N;i++){
printf("V[%d] = %.3f \n",i,result[i]);
}
}
double SRS = validation(s,u,N,result);
printf("\nCPU Validation: \nSRS = %.3lf \n",SRS);
for(int i = 0;i<N;i++){
result[i] = 0;
}
// GPU version 1
findCudaDevice(argc, argv);
int nThreads = 16;
double* d_s;
double* d_u;
double* d_s_sum;
double* h_s_sum;
double* d_u_sum;
double* h_u_sum;
h_s_sum = (double *)malloc(nThreads*sizeof(double));
h_u_sum = (double *)malloc(nThreads*sizeof(double));
hipMalloc((void **)&d_s, N*sizeof(double));
hipMalloc((void **)&d_u, N*sizeof(double));
hipMalloc((void **)&d_s_sum, nThreads*sizeof(double));
hipMalloc((void **)&d_u_sum, nThreads*sizeof(double));
hipMemcpy(d_s,s,N*sizeof(double),hipMemcpyHostToDevice);
hipMemcpy(d_u,u,N*sizeof(double),hipMemcpyHostToDevice);
hipLaunchKernelGGL(( gpu_recque), dim3(1),dim3(nThreads), 0, 0, d_s,d_u,N,nThreads,d_s_sum,d_u_sum);
getLastCudaError("gpu_recque execution failed\n");
checkCudaErrors(hipMemcpy(h_u_sum,d_u_sum,nThreads*sizeof(double),hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(h_s_sum,d_s_sum,nThreads*sizeof(double),hipMemcpyDeviceToHost));
double u_sum = 0;
double s_sum = 1;
for(int i = 0;i<nThreads;i++){
u_sum+=(s_sum * h_u_sum[i]);
s_sum*=h_s_sum[i];
}
result[0] = u_sum/(1-s_sum);
for (int i = 1;i<N;i++){
result[i] = (result[i-1] - u[i-1])/s[i-1];
}
if(debug){
printf("\nGPU result is :\n");
for(int i = 0;i<3;i++){
printf("V[%d] = %.3f \n",i,result[i]);
}
printf("...\n");
for(int i = N-3;i<N;i++){
printf("V[%d] = %.3f \n",i,result[i]);
}
}
SRS = validation(s,u,N,result);
printf("\nGPU Validation: \nSRS = %.3lf \n",SRS);
// GPU version 2: MultiBlocks Version
for(int i = 0;i<N;i++){
result[i] = 0;
}
checkCudaErrors(hipFree(d_s_sum));
checkCudaErrors(hipFree(d_u_sum));
free(h_s_sum);
free(h_u_sum);
nThreads = 16;
int nBlocks = ceil(double(N)/nThreads);
int shared_mem_size = nBlocks*2*sizeof(double);
h_s_sum = (double *)malloc(nBlocks*sizeof(double));
h_u_sum = (double *)malloc(nBlocks*sizeof(double));
hipMalloc((void **)&d_s_sum, nBlocks*sizeof(double));
hipMalloc((void **)&d_u_sum, nBlocks*sizeof(double));
hipLaunchKernelGGL(( gpu_blocks_recque), dim3(nBlocks),dim3(nThreads),shared_mem_size, 0, d_s,d_u,N,nThreads,d_s_sum,d_u_sum);
getLastCudaError("gpu_blocks_recque execution failed\n");
checkCudaErrors(hipMemcpy(h_u_sum,d_u_sum,nBlocks*sizeof(double),hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(h_s_sum,d_s_sum,nBlocks*sizeof(double),hipMemcpyDeviceToHost));
u_sum = 0;
s_sum = 1;
for(int i = 0;i<nBlocks;i++){
u_sum+=(s_sum * h_u_sum[i]);
s_sum*=h_s_sum[i];
}
result[0] = u_sum/(1-s_sum);
for (int i = 1;i<N;i++){
result[i] = (result[i-1] - u[i-1])/s[i-1];
}
if(debug){
printf("\nGPU(multiblocks) result is :\n");
for(int i = 0;i<3;i++){
printf("V[%d] = %.3f \n",i,result[i]);
}
printf("...\n");
for(int i = N-3;i<N;i++){
printf("V[%d] = %.3f \n",i,result[i]);
}
}
SRS = validation(s,u,N,result);
printf("\nGPU(multiblocks) Validation: \nSRS = %.3lf \n",SRS);
} | 8ed3d8cb161dec0d069cbdf44db0d673f919df9e.cu | #include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <helper_cuda.h>
#define random() ((double(rand())/RAND_MAX) - 0.5)*2 // between
#define epsilon 0.97
#define debug 1
void cpu_recque(double* s, double* u, int N, double* result){
double s_sum = 1;
double u_sum = 0;
for(int i = 0;i<N;i++){
u_sum+=(s_sum * u[i]);
s_sum*=s[i];
if(debug >= 2){
printf("%.3lf %.3lf\n",u_sum,s_sum);
}
}
result[0] = u_sum/(1-s_sum);
for (int i = 1;i<N;i++){
result[i] = (result[i-1] - u[i-1])/s[i-1];
}
}
__global__ void gpu_recque(double* s, double* u, int N ,int nThreads, double* d_s_sum, double* d_u_sum){
double s_sum = 1;
double u_sum = 0;
int tid = threadIdx.x + blockDim.x*blockIdx.x;
int begin = floor(double(N)/nThreads) * tid;
int end = floor(double(N)/nThreads) * (tid + 1);
if(tid == nThreads - 1) end = N;
for(int i = begin;i<end;i++){
u_sum+=(s_sum * u[i]);
s_sum*=s[i];
if(debug == 3){
printf("tid = %d pos = %d\n",tid,i);
printf("u = %.3lf s = %.3lf\n",u[i],s[i]);
printf("%.3lf %.3lf\n",s_sum,u_sum);
}
}
d_s_sum[tid] = s_sum;
d_u_sum[tid] = u_sum;
}
__global__ void gpu_blocks_recque(double* s, double* u, int N ,int nThreads, double* d_s_sum, double* d_u_sum){
extern __shared__ double sum[]; // s_sum u_sum s_sum ...
int tid = threadIdx.x + blockDim.x*blockIdx.x;
//printf("pid = %d\n",tid);
if (tid >= N){
sum[threadIdx.x*2] = 1;
sum[threadIdx.x*2+1] = 0;
}
else{
sum[threadIdx.x*2] = s[tid];
sum[threadIdx.x*2+1] = u[tid];
}
if(debug == 2){
printf("blockid = %d pos = %d\n",blockIdx.x,tid);
printf("u = %.3lf s = %.3lf\n",u[tid],s[tid]);
}
for (int d = 1; d <= blockDim.x>>1; d <<= 1) {
__syncthreads();
if (threadIdx.x%(2*d)==0){
sum[threadIdx.x*2+1]+=(sum[threadIdx.x*2] * sum[(threadIdx.x+d)*2+1]); // u_sum+=(s_sum * u[i])
sum[threadIdx.x*2]*=sum[(threadIdx.x+d)*2]; // s_sum*=s[i];
if(debug == 2){
printf("tid = %d merging %d and %d result: u = %.3lf s = %.3lf\n",tid,threadIdx.x,threadIdx.x+d,sum[threadIdx.x*2+1],sum[threadIdx.x*2]);
}
}
}
if (threadIdx.x==0){
d_s_sum[blockIdx.x] = sum[0];
d_u_sum[blockIdx.x] = sum[1];
}
}
double validation(double* s, double* u, int N, double* result){
double sqrt_root_sum = 0;
for (int i = 1;i<=N;i++){
double t = fabs(s[i-1] * result[i==N?0:i] + u[i-1] - result[i-1]);
sqrt_root_sum += (t*t);
}
return sqrt(sqrt_root_sum);
}
int main(int argc, const char **argv){
srand(0);
int N = 1000;
double* s;
double* u;
double* result;
s = (double* )malloc(N*sizeof(double));
u = (double* )malloc(N*sizeof(double));
result = (double* )malloc(N*sizeof(double));
for(int i = 0;i<N;i++){
s[i] = 0;
while(fabs(s[i] < epsilon)) s[i] = random();
u[i] = 0;
while(fabs(u[i] < epsilon)) u[i] = random();
}
if(debug){
printf("The equation is: \n");
for(int i = 1;i<=3;i++){
printf("V[%d] = %.3lf*V[%d] + %.3lf \n",i-1,s[i-1],i==N?0:i,u[i-1]);
}
printf("...\n");
for(int i = N-2;i<=N;i++){
printf("V[%d] = %.3lf*V[%d] + %.3lf \n",i-1,s[i-1],i==N?0:i,u[i-1]);
}
}
// CPU version
cpu_recque(s,u,N,result);
if(debug){
printf("\nCPU result is :\n");
for(int i = 0;i<3;i++){
printf("V[%d] = %.3f \n",i,result[i]);
}
printf("...\n");
for(int i = N-3;i<N;i++){
printf("V[%d] = %.3f \n",i,result[i]);
}
}
double SRS = validation(s,u,N,result);
printf("\nCPU Validation: \nSRS = %.3lf \n",SRS);
for(int i = 0;i<N;i++){
result[i] = 0;
}
// GPU version 1
findCudaDevice(argc, argv);
int nThreads = 16;
double* d_s;
double* d_u;
double* d_s_sum;
double* h_s_sum;
double* d_u_sum;
double* h_u_sum;
h_s_sum = (double *)malloc(nThreads*sizeof(double));
h_u_sum = (double *)malloc(nThreads*sizeof(double));
cudaMalloc((void **)&d_s, N*sizeof(double));
cudaMalloc((void **)&d_u, N*sizeof(double));
cudaMalloc((void **)&d_s_sum, nThreads*sizeof(double));
cudaMalloc((void **)&d_u_sum, nThreads*sizeof(double));
cudaMemcpy(d_s,s,N*sizeof(double),cudaMemcpyHostToDevice);
cudaMemcpy(d_u,u,N*sizeof(double),cudaMemcpyHostToDevice);
gpu_recque<<<1,nThreads>>>(d_s,d_u,N,nThreads,d_s_sum,d_u_sum);
getLastCudaError("gpu_recque execution failed\n");
checkCudaErrors(cudaMemcpy(h_u_sum,d_u_sum,nThreads*sizeof(double),cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(h_s_sum,d_s_sum,nThreads*sizeof(double),cudaMemcpyDeviceToHost));
double u_sum = 0;
double s_sum = 1;
for(int i = 0;i<nThreads;i++){
u_sum+=(s_sum * h_u_sum[i]);
s_sum*=h_s_sum[i];
}
result[0] = u_sum/(1-s_sum);
for (int i = 1;i<N;i++){
result[i] = (result[i-1] - u[i-1])/s[i-1];
}
if(debug){
printf("\nGPU result is :\n");
for(int i = 0;i<3;i++){
printf("V[%d] = %.3f \n",i,result[i]);
}
printf("...\n");
for(int i = N-3;i<N;i++){
printf("V[%d] = %.3f \n",i,result[i]);
}
}
SRS = validation(s,u,N,result);
printf("\nGPU Validation: \nSRS = %.3lf \n",SRS);
// GPU version 2: MultiBlocks Version
for(int i = 0;i<N;i++){
result[i] = 0;
}
checkCudaErrors(cudaFree(d_s_sum));
checkCudaErrors(cudaFree(d_u_sum));
free(h_s_sum);
free(h_u_sum);
nThreads = 16;
int nBlocks = ceil(double(N)/nThreads);
int shared_mem_size = nBlocks*2*sizeof(double);
h_s_sum = (double *)malloc(nBlocks*sizeof(double));
h_u_sum = (double *)malloc(nBlocks*sizeof(double));
cudaMalloc((void **)&d_s_sum, nBlocks*sizeof(double));
cudaMalloc((void **)&d_u_sum, nBlocks*sizeof(double));
gpu_blocks_recque<<<nBlocks,nThreads,shared_mem_size>>>(d_s,d_u,N,nThreads,d_s_sum,d_u_sum);
getLastCudaError("gpu_blocks_recque execution failed\n");
checkCudaErrors(cudaMemcpy(h_u_sum,d_u_sum,nBlocks*sizeof(double),cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(h_s_sum,d_s_sum,nBlocks*sizeof(double),cudaMemcpyDeviceToHost));
u_sum = 0;
s_sum = 1;
for(int i = 0;i<nBlocks;i++){
u_sum+=(s_sum * h_u_sum[i]);
s_sum*=h_s_sum[i];
}
result[0] = u_sum/(1-s_sum);
for (int i = 1;i<N;i++){
result[i] = (result[i-1] - u[i-1])/s[i-1];
}
if(debug){
printf("\nGPU(multiblocks) result is :\n");
for(int i = 0;i<3;i++){
printf("V[%d] = %.3f \n",i,result[i]);
}
printf("...\n");
for(int i = N-3;i<N;i++){
printf("V[%d] = %.3f \n",i,result[i]);
}
}
SRS = validation(s,u,N,result);
printf("\nGPU(multiblocks) Validation: \nSRS = %.3lf \n",SRS);
} |
ff98829eb585dc6464b24577100e654cbd5d4003.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "hiprand/hiprand.h"
#include "rocblas.h"
extern "C" {
#include "layer.h"
#include "hip/hip_runtime.h"
}
__global__ void forward_maxpool_layer_kernel(int n, int in_h, int in_w, int in_c, int stride, int size, int pad, float *input, float *output, int *indexes)
{
int h = (in_h + 2*pad)/stride;
int w = (in_w + 2*pad)/stride;
int c = in_c;
int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(id >= n) return;
int j = id % w;
id /= w;
int i = id % h;
id /= h;
int k = id % c;
id /= c;
int b = id;
int w_offset = -pad;
int h_offset = -pad;
int out_index = j + w*(i + h*(k + c*b));
float max = -INFINITY;
int max_i = -1;
int l, m;
for(l = 0; l < size; ++l){
for(m = 0; m < size; ++m){
int cur_h = h_offset + i*stride + l;
int cur_w = w_offset + j*stride + m;
int index = cur_w + in_w*(cur_h + in_h*(k + b*in_c));
int valid = (cur_h >= 0 && cur_h < in_h &&
cur_w >= 0 && cur_w < in_w);
float val = (valid != 0) ? input[index] : -INFINITY;
max_i = (val > max) ? index : max_i;
max = (val > max) ? val : max;
}
}
output[out_index] = max;
indexes[out_index] = max_i;
}
__global__ void backward_maxpool_layer_kernel(int n, int in_h, int in_w, int in_c, int stride, int size, int pad, float *delta, float *prev_delta, int *indexes)
{
int h = (in_h + 2*pad)/stride;
int w = (in_w + 2*pad)/stride;
int c = in_c;
int area = (size-1)/stride;
int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(id >= n) return;
int index = id;
int j = id % in_w;
id /= in_w;
int i = id % in_h;
id /= in_h;
int k = id % in_c;
id /= in_c;
int b = id;
int w_offset = -pad;
int h_offset = -pad;
float d = 0;
int l, m;
for(l = -area; l < area+1; ++l){
for(m = -area; m < area+1; ++m){
int out_w = (j-w_offset)/stride + m;
int out_h = (i-h_offset)/stride + l;
int out_index = out_w + w*(out_h + h*(k + c*b));
int valid = (out_w >= 0 && out_w < w &&
out_h >= 0 && out_h < h);
d += (valid && indexes[out_index] == index) ? delta[out_index] : 0;
}
}
prev_delta[index] += d;
}
extern "C" void forward_maxpool_layer_gpu(maxpool_layer layer, network_state state)
{
int h = layer.out_h;
int w = layer.out_w;
int c = layer.c;
size_t n = h*w*c*layer.batch;
hipLaunchKernelGGL(( forward_maxpool_layer_kernel), dim3(cuda_gridsize(n)), dim3(BLOCK), 0, 0, n, layer.h, layer.w, layer.c, layer.stride, layer.size, layer.pad, state.input, layer.output_gpu, layer.indexes_gpu);
check_error(hipPeekAtLastError());
}
extern "C" void backward_maxpool_layer_gpu(maxpool_layer layer, network_state state)
{
size_t n = layer.h*layer.w*layer.c*layer.batch;
hipLaunchKernelGGL(( backward_maxpool_layer_kernel), dim3(cuda_gridsize(n)), dim3(BLOCK), 0, 0, n, layer.h, layer.w, layer.c, layer.stride, layer.size, layer.pad, layer.delta_gpu, state.delta, layer.indexes_gpu);
check_error(hipPeekAtLastError());
}
| ff98829eb585dc6464b24577100e654cbd5d4003.cu | #include "cuda_runtime.h"
#include "curand.h"
#include "cublas_v2.h"
extern "C" {
#include "layer.h"
#include "cuda.h"
}
__global__ void forward_maxpool_layer_kernel(int n, int in_h, int in_w, int in_c, int stride, int size, int pad, float *input, float *output, int *indexes)
{
int h = (in_h + 2*pad)/stride;
int w = (in_w + 2*pad)/stride;
int c = in_c;
int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(id >= n) return;
int j = id % w;
id /= w;
int i = id % h;
id /= h;
int k = id % c;
id /= c;
int b = id;
int w_offset = -pad;
int h_offset = -pad;
int out_index = j + w*(i + h*(k + c*b));
float max = -INFINITY;
int max_i = -1;
int l, m;
for(l = 0; l < size; ++l){
for(m = 0; m < size; ++m){
int cur_h = h_offset + i*stride + l;
int cur_w = w_offset + j*stride + m;
int index = cur_w + in_w*(cur_h + in_h*(k + b*in_c));
int valid = (cur_h >= 0 && cur_h < in_h &&
cur_w >= 0 && cur_w < in_w);
float val = (valid != 0) ? input[index] : -INFINITY;
max_i = (val > max) ? index : max_i;
max = (val > max) ? val : max;
}
}
output[out_index] = max;
indexes[out_index] = max_i;
}
__global__ void backward_maxpool_layer_kernel(int n, int in_h, int in_w, int in_c, int stride, int size, int pad, float *delta, float *prev_delta, int *indexes)
{
int h = (in_h + 2*pad)/stride;
int w = (in_w + 2*pad)/stride;
int c = in_c;
int area = (size-1)/stride;
int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(id >= n) return;
int index = id;
int j = id % in_w;
id /= in_w;
int i = id % in_h;
id /= in_h;
int k = id % in_c;
id /= in_c;
int b = id;
int w_offset = -pad;
int h_offset = -pad;
float d = 0;
int l, m;
for(l = -area; l < area+1; ++l){
for(m = -area; m < area+1; ++m){
int out_w = (j-w_offset)/stride + m;
int out_h = (i-h_offset)/stride + l;
int out_index = out_w + w*(out_h + h*(k + c*b));
int valid = (out_w >= 0 && out_w < w &&
out_h >= 0 && out_h < h);
d += (valid && indexes[out_index] == index) ? delta[out_index] : 0;
}
}
prev_delta[index] += d;
}
extern "C" void forward_maxpool_layer_gpu(maxpool_layer layer, network_state state)
{
int h = layer.out_h;
int w = layer.out_w;
int c = layer.c;
size_t n = h*w*c*layer.batch;
forward_maxpool_layer_kernel<<<cuda_gridsize(n), BLOCK>>>(n, layer.h, layer.w, layer.c, layer.stride, layer.size, layer.pad, state.input, layer.output_gpu, layer.indexes_gpu);
check_error(cudaPeekAtLastError());
}
extern "C" void backward_maxpool_layer_gpu(maxpool_layer layer, network_state state)
{
size_t n = layer.h*layer.w*layer.c*layer.batch;
backward_maxpool_layer_kernel<<<cuda_gridsize(n), BLOCK>>>(n, layer.h, layer.w, layer.c, layer.stride, layer.size, layer.pad, layer.delta_gpu, state.delta, layer.indexes_gpu);
check_error(cudaPeekAtLastError());
}
|
e80cab46eda1f0b7d1c11e970ac8025dade6d201.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2017 Sony Corporation. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <nbla/cuda/common.hpp>
#include <nbla/cuda/solver/amsbound.hpp>
#include "./mixed_precision_training.cuh"
#include "./weight_decay.cuh"
namespace nbla {
template <typename T>
__global__ void kernel_amsbound_update(const int num, T *theta, T *m, T *v,
T *v_hat, const T *g, float alpha_t,
const float beta1, const float beta2,
const float eps, const float final_lr,
const float gamma) {
NBLA_CUDA_KERNEL_LOOP(s, num) {
// Updating running mean and var.
m[s] = beta1 * m[s] + (1 - beta1) * g[s];
v[s] = beta2 * v[s] + (1 - beta2) * g[s] * g[s];
v_hat[s] = max(v_hat[s], v[s]);
T lower_bound = final_lr * (1 - 1 / (gamma + 1));
T upper_bound = final_lr * (1 + 1 / gamma);
T denom = std::sqrt(v_hat[s]) + eps;
T eta = min(upper_bound, max(alpha_t / denom, lower_bound));
// Update parameters.
theta[s] = theta[s] - eta * m[s];
}
}
template <typename T>
void AMSBoundCuda<T>::update_impl(const string &key, VariablePtr param) {
cuda_set_device(std::stoi(this->ctx_.device_id));
Size_t size = param->size();
auto &state = this->states_.at(key);
auto &t = state.t;
const T *g = param->get_grad_pointer<T>(this->ctx_);
shared_ptr<Variable> mean_ = state.pstate["m"]; // To prevent compile error.
shared_ptr<Variable> var_ = state.pstate["v"]; // To prevent compile error.
shared_ptr<Variable> var_hat_ =
state.pstate["v_hat"]; // To prevent compile error.
T *m = mean_->cast_data_and_get_pointer<T>(this->ctx_);
T *v = var_->cast_data_and_get_pointer<T>(this->ctx_);
T *v_hat = var_hat_->cast_data_and_get_pointer<T>(this->ctx_);
T *theta = param->cast_data_and_get_pointer<T>(this->ctx_);
t = ::min(t + 1, std::numeric_limits<uint32_t>::max() - 1);
const T bias_correction = std::sqrt(1 - ::pow(this->beta2_, t)) /
(1 - ::pow(this->beta1_, t));
T alpha_t = this->alpha_ * (this->bias_correction_ ? bias_correction : 1);
T final_lr = this->final_lr_ * (this->alpha_ / this->init_alpha_);
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel_amsbound_update, size, theta, m, v,
v_hat, g, alpha_t, this->beta1_, this->beta2_,
this->eps_, final_lr, this->gamma_);
}
NBLA_DEF_WEIGHT_DECAY(AMSBoundCuda, weight_decay_cuda);
NBLA_DEF_CHECK_INF_GRAD(AMSBoundCuda, check_inf_grad_cuda);
NBLA_DEF_CHECK_NAN_GRAD(AMSBoundCuda, check_nan_grad_cuda);
NBLA_DEF_CHECK_INF_OR_NAN_GRAD(AMSBoundCuda, check_inf_or_nan_grad_cuda);
NBLA_DEF_SCALE_GRAD(AMSBoundCuda, scale_grad_impl_cuda);
}
| e80cab46eda1f0b7d1c11e970ac8025dade6d201.cu | // Copyright (c) 2017 Sony Corporation. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <nbla/cuda/common.hpp>
#include <nbla/cuda/solver/amsbound.hpp>
#include "./mixed_precision_training.cuh"
#include "./weight_decay.cuh"
namespace nbla {
template <typename T>
__global__ void kernel_amsbound_update(const int num, T *theta, T *m, T *v,
T *v_hat, const T *g, float alpha_t,
const float beta1, const float beta2,
const float eps, const float final_lr,
const float gamma) {
NBLA_CUDA_KERNEL_LOOP(s, num) {
// Updating running mean and var.
m[s] = beta1 * m[s] + (1 - beta1) * g[s];
v[s] = beta2 * v[s] + (1 - beta2) * g[s] * g[s];
v_hat[s] = max(v_hat[s], v[s]);
T lower_bound = final_lr * (1 - 1 / (gamma + 1));
T upper_bound = final_lr * (1 + 1 / gamma);
T denom = std::sqrt(v_hat[s]) + eps;
T eta = min(upper_bound, max(alpha_t / denom, lower_bound));
// Update parameters.
theta[s] = theta[s] - eta * m[s];
}
}
template <typename T>
void AMSBoundCuda<T>::update_impl(const string &key, VariablePtr param) {
cuda_set_device(std::stoi(this->ctx_.device_id));
Size_t size = param->size();
auto &state = this->states_.at(key);
auto &t = state.t;
const T *g = param->get_grad_pointer<T>(this->ctx_);
shared_ptr<Variable> mean_ = state.pstate["m"]; // To prevent compile error.
shared_ptr<Variable> var_ = state.pstate["v"]; // To prevent compile error.
shared_ptr<Variable> var_hat_ =
state.pstate["v_hat"]; // To prevent compile error.
T *m = mean_->cast_data_and_get_pointer<T>(this->ctx_);
T *v = var_->cast_data_and_get_pointer<T>(this->ctx_);
T *v_hat = var_hat_->cast_data_and_get_pointer<T>(this->ctx_);
T *theta = param->cast_data_and_get_pointer<T>(this->ctx_);
t = std::min(t + 1, std::numeric_limits<uint32_t>::max() - 1);
const T bias_correction = std::sqrt(1 - std::pow(this->beta2_, t)) /
(1 - std::pow(this->beta1_, t));
T alpha_t = this->alpha_ * (this->bias_correction_ ? bias_correction : 1);
T final_lr = this->final_lr_ * (this->alpha_ / this->init_alpha_);
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel_amsbound_update, size, theta, m, v,
v_hat, g, alpha_t, this->beta1_, this->beta2_,
this->eps_, final_lr, this->gamma_);
}
NBLA_DEF_WEIGHT_DECAY(AMSBoundCuda, weight_decay_cuda);
NBLA_DEF_CHECK_INF_GRAD(AMSBoundCuda, check_inf_grad_cuda);
NBLA_DEF_CHECK_NAN_GRAD(AMSBoundCuda, check_nan_grad_cuda);
NBLA_DEF_CHECK_INF_OR_NAN_GRAD(AMSBoundCuda, check_inf_or_nan_grad_cuda);
NBLA_DEF_SCALE_GRAD(AMSBoundCuda, scale_grad_impl_cuda);
}
|
d1d8719e7f6d0d0412306e2fea837454ca795bd1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void histogram ( unsigned char *utime, unsigned int* histo, size_t n)
{
__shared__ unsigned int lhisto[512];
lhisto[threadIdx.x] = 0;
__syncthreads ();
int i = threadIdx.x + blockIdx.x*blockDim.x;
for (; i < n/2; i += blockDim.x*gridDim.x)
atomicAdd (lhisto+utime[i], 1);
for (; i < n; i += blockDim.x*gridDim.x)
atomicAdd ((lhisto+256)+utime[i], 1);
__syncthreads ();
// MUST run with 512 threads for this global accumulation to work
atomicAdd ( histo+threadIdx.x, lhisto[threadIdx.x]);
} | d1d8719e7f6d0d0412306e2fea837454ca795bd1.cu | #include "includes.h"
__global__ void histogram ( unsigned char *utime, unsigned int* histo, size_t n)
{
__shared__ unsigned int lhisto[512];
lhisto[threadIdx.x] = 0;
__syncthreads ();
int i = threadIdx.x + blockIdx.x*blockDim.x;
for (; i < n/2; i += blockDim.x*gridDim.x)
atomicAdd (lhisto+utime[i], 1);
for (; i < n; i += blockDim.x*gridDim.x)
atomicAdd ((lhisto+256)+utime[i], 1);
__syncthreads ();
// MUST run with 512 threads for this global accumulation to work
atomicAdd ( histo+threadIdx.x, lhisto[threadIdx.x]);
} |
e404568c97e628be94513ea2b60a7e1731a0b794.hip | // !!! This is a file automatically generated by hipify!!!
#include <memory>
#include <string.h>
#include <stdlib.h>
#include <string>
#include <vector>
#include <hip/hip_runtime.h>
#include <thrust/device_vector.h>
#include <thrust/execution_policy.h>
#include <thrust/for_each.h>
#include "../include/NVStrings.h"
//
// cd ../build
// nvcc -w -std=c++11 --expt-extended-lambda -gencode arch=compute_70,code=sm_70 ../tests/csv.cu -L. -lNVStrings -o csv --linker-options -rpath,.:
//
// csv file contents in device memory
void* d_fileContents = 0;
// return a vector of DString's we wish to process
std::pair<const char*,size_t>* setupTest(int& linesCount, int column)
{
//FILE* fp = fopen("../../data/1420-rows.csv", "rb");
//FILE* fp = fopen("../../data/7584-rows.csv", "rb");
FILE* fp = fopen("../../data/36634-rows.csv", "rb");
if( !fp )
{
printf("missing csv file\n");
return 0;
}
fseek(fp, 0, SEEK_END);
int fileSize = (int)ftell(fp);
fseek(fp, 0, SEEK_SET);
printf("File size = %d bytes\n", fileSize);
if( fileSize < 2 )
{
fclose(fp);
return 0;
}
// load file into memory
int contentsSize = fileSize+2;
char* contents = new char[contentsSize+2];
fread(contents, 1, fileSize, fp);
contents[fileSize] = '\r'; // line terminate
contents[fileSize+1] = 0; // and null-terminate
fclose(fp);
// find lines -- compute offsets vector values
thrust::host_vector<int> lineOffsets;
char* ptr = contents;
while( *ptr )
{
char ch = *ptr;
if( ch=='\r' )
{
*ptr = 0;
while(ch && (ch < ' ')) ch = *(++ptr);
lineOffsets.push_back((int)(ptr - contents));
continue;
}
++ptr;
}
linesCount = (int)lineOffsets.size();
printf("Found %d lines\n",linesCount);
// copy file contents into device memory
char* d_contents = 0;
hipMalloc(&d_contents,contentsSize);
hipMemcpy(d_contents,contents,contentsSize,hipMemcpyHostToDevice);
delete contents; // done with the host data
// copy offsets vector into device memory
thrust::device_vector<int> offsets(lineOffsets);
int* d_offsets = offsets.data().get();
// build empty output vector of DString*'s
--linesCount; // removed header line
std::pair<const char*,size_t>* d_column1 = 0;
hipMalloc(&d_column1, linesCount * sizeof(std::pair<const char*,size_t>));
// create a vector of DStrings using the first column of each line
thrust::for_each_n(thrust::device,
thrust::make_counting_iterator<size_t>(0), linesCount,
[d_contents, d_offsets, column, d_column1] __device__(size_t idx){
// probably some more elegant way to do this
int lineOffset = d_offsets[idx];
int lineLength = d_offsets[idx+1] - lineOffset;
d_column1[idx].first = (const char*)0;
if( lineLength < 1 )
return;
char* line = &(d_contents[lineOffset]);
char* stringStart = line;
int columnLength = 0, col = 0;
for( int i=0; (i < lineLength); ++i )
{
if( line[i] && line[i] != ',' )
{
++columnLength;
continue;
}
if( col++ >= column )
break;
stringStart = line + i + 1;
columnLength = 0;
}
if( columnLength==0 )
return;
// add string to vector array
d_column1[idx].first = (const char*)stringStart;
d_column1[idx].second = (size_t)columnLength;
});
//
hipDeviceSynchronize();
d_fileContents = d_contents;
return d_column1;
}
int main( int argc, char** argv )
{
//NVStrings::initLibrary();
int count = 0;
std::pair<const char*,size_t>* column1 = setupTest(count,2);
if( column1==0 )
return -1;
NVStrings* dstrs = NVStrings::create_from_index( column1, count );
hipFree(d_fileContents); // csv data not needed once dstrs is created
hipFree(column1); // string index data has done its job as well
// simple strings op
int* rtn = new int[count];
dstrs->len(rtn,false);
for( int idx=0; idx < count; ++idx )
printf("%d,",rtn[idx]);
printf("\n");
delete rtn;
// show column values
dstrs->print();
NVStrings::destroy(dstrs);
return 0;
} | e404568c97e628be94513ea2b60a7e1731a0b794.cu | #include <memory>
#include <string.h>
#include <stdlib.h>
#include <string>
#include <vector>
#include <cuda_runtime.h>
#include <thrust/device_vector.h>
#include <thrust/execution_policy.h>
#include <thrust/for_each.h>
#include "../include/NVStrings.h"
//
// cd ../build
// nvcc -w -std=c++11 --expt-extended-lambda -gencode arch=compute_70,code=sm_70 ../tests/csv.cu -L. -lNVStrings -o csv --linker-options -rpath,.:
//
// csv file contents in device memory
void* d_fileContents = 0;
// return a vector of DString's we wish to process
std::pair<const char*,size_t>* setupTest(int& linesCount, int column)
{
//FILE* fp = fopen("../../data/1420-rows.csv", "rb");
//FILE* fp = fopen("../../data/7584-rows.csv", "rb");
FILE* fp = fopen("../../data/36634-rows.csv", "rb");
if( !fp )
{
printf("missing csv file\n");
return 0;
}
fseek(fp, 0, SEEK_END);
int fileSize = (int)ftell(fp);
fseek(fp, 0, SEEK_SET);
printf("File size = %d bytes\n", fileSize);
if( fileSize < 2 )
{
fclose(fp);
return 0;
}
// load file into memory
int contentsSize = fileSize+2;
char* contents = new char[contentsSize+2];
fread(contents, 1, fileSize, fp);
contents[fileSize] = '\r'; // line terminate
contents[fileSize+1] = 0; // and null-terminate
fclose(fp);
// find lines -- compute offsets vector values
thrust::host_vector<int> lineOffsets;
char* ptr = contents;
while( *ptr )
{
char ch = *ptr;
if( ch=='\r' )
{
*ptr = 0;
while(ch && (ch < ' ')) ch = *(++ptr);
lineOffsets.push_back((int)(ptr - contents));
continue;
}
++ptr;
}
linesCount = (int)lineOffsets.size();
printf("Found %d lines\n",linesCount);
// copy file contents into device memory
char* d_contents = 0;
cudaMalloc(&d_contents,contentsSize);
cudaMemcpy(d_contents,contents,contentsSize,cudaMemcpyHostToDevice);
delete contents; // done with the host data
// copy offsets vector into device memory
thrust::device_vector<int> offsets(lineOffsets);
int* d_offsets = offsets.data().get();
// build empty output vector of DString*'s
--linesCount; // removed header line
std::pair<const char*,size_t>* d_column1 = 0;
cudaMalloc(&d_column1, linesCount * sizeof(std::pair<const char*,size_t>));
// create a vector of DStrings using the first column of each line
thrust::for_each_n(thrust::device,
thrust::make_counting_iterator<size_t>(0), linesCount,
[d_contents, d_offsets, column, d_column1] __device__(size_t idx){
// probably some more elegant way to do this
int lineOffset = d_offsets[idx];
int lineLength = d_offsets[idx+1] - lineOffset;
d_column1[idx].first = (const char*)0;
if( lineLength < 1 )
return;
char* line = &(d_contents[lineOffset]);
char* stringStart = line;
int columnLength = 0, col = 0;
for( int i=0; (i < lineLength); ++i )
{
if( line[i] && line[i] != ',' )
{
++columnLength;
continue;
}
if( col++ >= column )
break;
stringStart = line + i + 1;
columnLength = 0;
}
if( columnLength==0 )
return;
// add string to vector array
d_column1[idx].first = (const char*)stringStart;
d_column1[idx].second = (size_t)columnLength;
});
//
cudaThreadSynchronize();
d_fileContents = d_contents;
return d_column1;
}
int main( int argc, char** argv )
{
//NVStrings::initLibrary();
int count = 0;
std::pair<const char*,size_t>* column1 = setupTest(count,2);
if( column1==0 )
return -1;
NVStrings* dstrs = NVStrings::create_from_index( column1, count );
cudaFree(d_fileContents); // csv data not needed once dstrs is created
cudaFree(column1); // string index data has done its job as well
// simple strings op
int* rtn = new int[count];
dstrs->len(rtn,false);
for( int idx=0; idx < count; ++idx )
printf("%d,",rtn[idx]);
printf("\n");
delete rtn;
// show column values
dstrs->print();
NVStrings::destroy(dstrs);
return 0;
} |
53ef09fd2befe2c41c4c7caf9bd7ea1cd1d026b5.hip | // !!! This is a file automatically generated by hipify!!!
/**
* \file dnn/src/cuda/conv_bias/int8_imma/kimpl/conv_bias_int8_implicit_gemm_imma32x8x16_cdiv4hwn4_per_chan_hswish.cu
* MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
*
* Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*/
// generated by gen_cuda_conv_bias_kern_impls.py
#include "../conv_bias_int8_implicit_gemm_imma32x8x16_cdiv4hwn4.cuinl"
template void megdnn::cuda::conv_bias_int8::do_conv_bias_int8_implicit_gemm_imma32x8x16_cdiv4hwn4<PerChannelBiasVisitor,
IConvEpilogue<Activation<megdnn::param_enumv::ConvBias::NonlineMode::H_SWISH>>>(
const int8_t* d_src,
const int8_t* d_filter,
PerChannelBiasVisitor bias,
IConvEpilogue<Activation<megdnn::param_enumv::ConvBias::NonlineMode::H_SWISH>> epilogue,
const ConvParam& param,
float alpha,
float beta,
hipStream_t stream);
| 53ef09fd2befe2c41c4c7caf9bd7ea1cd1d026b5.cu | /**
* \file dnn/src/cuda/conv_bias/int8_imma/kimpl/conv_bias_int8_implicit_gemm_imma32x8x16_cdiv4hwn4_per_chan_hswish.cu
* MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
*
* Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*/
// generated by gen_cuda_conv_bias_kern_impls.py
#include "../conv_bias_int8_implicit_gemm_imma32x8x16_cdiv4hwn4.cuinl"
template void megdnn::cuda::conv_bias_int8::do_conv_bias_int8_implicit_gemm_imma32x8x16_cdiv4hwn4<PerChannelBiasVisitor,
IConvEpilogue<Activation<megdnn::param_enumv::ConvBias::NonlineMode::H_SWISH>>>(
const int8_t* d_src,
const int8_t* d_filter,
PerChannelBiasVisitor bias,
IConvEpilogue<Activation<megdnn::param_enumv::ConvBias::NonlineMode::H_SWISH>> epilogue,
const ConvParam& param,
float alpha,
float beta,
cudaStream_t stream);
|
7560169536067c50c21fe6b2a4ab03d80487642f.hip | // !!! This is a file automatically generated by hipify!!!
#if !MEGDNN_TEGRA_X1
// generated by gen_cuda_conv_bias_kern_impls.py
// ignore warning of cutlass
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl"
using LayoutSrc = cutlass::layout::TensorNCxHWx<4>;
using LayoutFilter = cutlass::layout::TensorCxRSKx<4>;
using ThreadBlockShape = cutlass::gemm::GemmShape<32, 64, 32>;
using WarpShape = cutlass::gemm::GemmShape<32, 64, 32>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 4>;
using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationHSwishClamp<
int8_t, 4, int32_t, int32_t, float>;
using Convolution = cutlass::convolution::device::Convolution<
int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t,
LayoutSrc, int32_t, LayoutSrc, int32_t,
cutlass::convolution::ConvType::kConvolution, cutlass::arch::OpClassSimt, cutlass::arch::Sm61,
ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp,
cutlass::convolution::threadblock::ConvolutionNCxHWxThreadblockSwizzle<
cutlass::convolution::ConvType::kConvolution>,
2, 4, 16, true>;
template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>(
const int8_t* d_src,
const int8_t* d_filter,
const int32_t* d_bias,
const int8_t* d_z,
int8_t* d_dst,
int* workspace,
typename Convolution::ConvolutionParameter const& conv_param,
typename Convolution::EpilogueOutputOp::Params const& epilogue,
hipStream_t stream);
#pragma GCC diagnostic pop
#endif
| 7560169536067c50c21fe6b2a4ab03d80487642f.cu | #if !MEGDNN_TEGRA_X1
// generated by gen_cuda_conv_bias_kern_impls.py
// ignore warning of cutlass
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl"
using LayoutSrc = cutlass::layout::TensorNCxHWx<4>;
using LayoutFilter = cutlass::layout::TensorCxRSKx<4>;
using ThreadBlockShape = cutlass::gemm::GemmShape<32, 64, 32>;
using WarpShape = cutlass::gemm::GemmShape<32, 64, 32>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 4>;
using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationHSwishClamp<
int8_t, 4, int32_t, int32_t, float>;
using Convolution = cutlass::convolution::device::Convolution<
int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t,
LayoutSrc, int32_t, LayoutSrc, int32_t,
cutlass::convolution::ConvType::kConvolution, cutlass::arch::OpClassSimt, cutlass::arch::Sm61,
ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp,
cutlass::convolution::threadblock::ConvolutionNCxHWxThreadblockSwizzle<
cutlass::convolution::ConvType::kConvolution>,
2, 4, 16, true>;
template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>(
const int8_t* d_src,
const int8_t* d_filter,
const int32_t* d_bias,
const int8_t* d_z,
int8_t* d_dst,
int* workspace,
typename Convolution::ConvolutionParameter const& conv_param,
typename Convolution::EpilogueOutputOp::Params const& epilogue,
cudaStream_t stream);
#pragma GCC diagnostic pop
#endif
|
f5a7c24075746c70670ecf3a14a8b3b942146ba4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/****************************************/
// CUDA implementation using constant memory + 3D texture memory + GL interop
/****************************************/
#include "cuda_utils.h"
#include "Renderer.h"
static __constant__ Raycaster raycaster;
//static __constant__ float4 transfer_fn[TF_SIZE];
hipArray *volume_array = 0;
texture<unsigned char, 3, hipReadModeNormalizedFloat> volume_texture;
hipArray *transfer_fn_array = 0;
texture<float4, 1, hipReadModeElementType> transfer_fn_texture;
hipArray *esl_array = 0;
texture<esl_type, 2, hipReadModeElementType> esl_texture;
GPURenderer4::GPURenderer4(Raycaster r) {
set_window_buffer(r.view);
set_transfer_fn(r);
set_volume(r.volume);
}
GPURenderer4::~GPURenderer4() {
cuda_safe_call(hipUnbindTexture(volume_texture));
cuda_safe_call(hipFreeArray(volume_array));
cuda_safe_call(hipUnbindTexture(transfer_fn_texture));
cuda_safe_call(hipFreeArray(transfer_fn_array));
cuda_safe_call(hipUnbindTexture(esl_texture));
cuda_safe_call(hipFreeArray(esl_array));
}
__device__ bool sample_data_esl_texture(float3 pos) { // additional function for esl texture fetching
esl_type sample = tex2D(esl_texture,
map_float_int((pos.y + 1)*0.5f, raycaster.volume.dims.y) / raycaster.esl_block_dims,
map_float_int((pos.z + 1)*0.5f, raycaster.volume.dims.z) / raycaster.esl_block_dims);
unsigned short index = map_float_int((pos.x + 1)*0.5f, raycaster.volume.dims.x) / raycaster.esl_block_dims;
return ((sample & (1 << index)) != 0);
}
__device__ void shade_texture(float4 *color, float3 pos, float sample) { // additional function for shading
float3 light_dir = vector_normalize(raycaster.view.light_pos - pos);
float sample_l = tex3D(volume_texture,
(pos.x + light_dir.x * 0.01f + 1)*0.5f,
(pos.y + light_dir.y * 0.01f + 1)*0.5f,
(pos.z + light_dir.z * 0.01f + 1)*0.5f);
float diffuse_light = (sample_l - sample) * raycaster.light_kd;
color->x += diffuse_light;
color->y += diffuse_light;
color->z += diffuse_light;
}
static __global__ void render_ray(uchar4 dev_buffer[]) {
short2 pos = {blockIdx.x * blockDim.x + threadIdx.x, blockIdx.y * blockDim.y + threadIdx.y};
if ((pos.x >= raycaster.view.dims.x) || (pos.y >= raycaster.view.dims.y)) // terminate empty thread, when view dimensions are not divisible by 16
return;
float3 origin, direction;
float2 k_range;
raycaster.view.get_ray(pos, &origin, &direction);
if (!raycaster.intersect(origin, direction, &k_range))
return;
float3 pt = origin + (direction * k_range.x);
while(k_range.x <= k_range.y) { // empty space leaping loop
if (raycaster.esl && sample_data_esl_texture(pt))
raycaster.leap_empty_space(pt, direction, &k_range);
else
break;
k_range.x += raycaster.ray_step;
pt = origin + (direction * k_range.x);
}
if (k_range.x > k_range.y)
return;
float4 color_acc = {0, 0, 0, 0};
while (k_range.x <= k_range.y) { // color accumulation loop
float sample = tex3D(volume_texture, (pt.x + 1)*0.5f, (pt.y + 1)*0.5f, (pt.z + 1)*0.5f);
float4 color_cur = tex1D(transfer_fn_texture, sample);
if (color_cur.w > 0.05f && raycaster.light_kd > 0.01f)
shade_texture(&color_cur, pt, sample); // shading
color_acc = color_acc + (color_cur * (1 - color_acc.w)); // transparency formula: C_out = C_in + C * (1-alpha_in); alpha_out = aplha_in + alpha * (1-alpha_in)
if (color_acc.w > raycaster.ray_threshold) // early ray termination
break;
k_range.x += raycaster.ray_step;
pt = origin + (direction * k_range.x);
}
raycaster.write_color(color_acc, pos, dev_buffer);
}
void GPURenderer4::set_transfer_fn(Raycaster r) {
if (transfer_fn_array == 0) {
hipChannelFormatDesc channel_desc = hipCreateChannelDesc<float4>();
cuda_safe_malloc(hipMallocArray(&transfer_fn_array, &channel_desc, TF_SIZE, 1));
transfer_fn_texture.filterMode = hipFilterModeLinear;
transfer_fn_texture.normalized = true;
transfer_fn_texture.addressMode[0] = hipAddressModeClamp;
cuda_safe_call(hipBindTextureToArray(transfer_fn_texture, transfer_fn_array, channel_desc));
}
cuda_safe_call(hipMemcpyToArray(transfer_fn_array, 0, 0, r.transfer_fn, TF_SIZE * sizeof(float4), hipMemcpyHostToDevice));
//cuda_safe_call(hipMemcpyToSymbol(transfer_fn, r.transfer_fn, TF_SIZE * sizeof(float4)));
if (esl_array == 0) {
hipChannelFormatDesc channel_desc = hipCreateChannelDesc<esl_type>();
cuda_safe_malloc(hipMallocArray(&esl_array, &channel_desc, ESL_VOLUME_DIMS, ESL_VOLUME_DIMS));
esl_texture.normalized = false;
esl_texture.filterMode = hipFilterModePoint;
esl_texture.addressMode[0] = hipAddressModeClamp;
esl_texture.addressMode[1] = hipAddressModeClamp;
cuda_safe_call(hipBindTextureToArray(esl_texture, esl_array, channel_desc));
}
cuda_safe_call(hipMemcpyToArray(esl_array, 0, 0, r.esl_volume, ESL_VOLUME_SIZE * sizeof(esl_type), hipMemcpyHostToDevice));
}
int GPURenderer4::set_volume(Model volume) {
if (volume_array != 0) {
cuda_safe_call(hipUnbindTexture(volume_texture));
cuda_safe_call(hipFreeArray(volume_array));
volume_array = 0;
}
if (volume.data == NULL)
return 1;
hipExtent volume_dims = {volume.dims.x, volume.dims.y, volume.dims.z};
hipChannelFormatDesc channel_desc = hipCreateChannelDesc<unsigned char>();
cuda_safe_malloc(hipMalloc3DArray(&volume_array, &channel_desc, volume_dims));
if (hipGetLastError() == hipErrorMemoryAllocation)
return 1;
hipMemcpy3DParms copyParams = {0};
copyParams.srcPtr = make_hipPitchedPtr(volume.data, volume_dims.width*sizeof(unsigned char), volume_dims.width, volume_dims.height);
copyParams.dstArray = volume_array;
copyParams.extent = volume_dims;
copyParams.kind = hipMemcpyHostToDevice;
cuda_safe_call(hipMemcpy3D(©Params));
volume_texture.normalized = true;
volume_texture.filterMode = hipFilterModeLinear; // trilinear interpolation
volume_texture.addressMode[0] = hipAddressModeClamp;
volume_texture.addressMode[1] = hipAddressModeClamp;
volume_texture.addressMode[2] = hipAddressModeClamp;
cuda_safe_call(hipBindTextureToArray(volume_texture, volume_array, channel_desc));
return 0;
}
int GPURenderer4::render_volume(uchar4 *buffer, Raycaster r) {
if (volume_array == 0 || transfer_fn_array == 0 || esl_array == 0 || buffer == NULL)
return 1;
cuda_safe_call(hipMemset(buffer, 0, dev_buffer_size));
cuda_safe_call(hipMemcpyToSymbol(raycaster, &r, sizeof(Raycaster)));
hipLaunchKernelGGL(( render_ray), dim3(num_blocks), dim3(THREADS_PER_BLOCK), 0, 0, buffer);
cuda_safe_check();
return 0;
} | f5a7c24075746c70670ecf3a14a8b3b942146ba4.cu | /****************************************/
// CUDA implementation using constant memory + 3D texture memory + GL interop
/****************************************/
#include "cuda_utils.h"
#include "Renderer.h"
static __constant__ Raycaster raycaster;
//static __constant__ float4 transfer_fn[TF_SIZE];
cudaArray *volume_array = 0;
texture<unsigned char, 3, cudaReadModeNormalizedFloat> volume_texture;
cudaArray *transfer_fn_array = 0;
texture<float4, 1, cudaReadModeElementType> transfer_fn_texture;
cudaArray *esl_array = 0;
texture<esl_type, 2, cudaReadModeElementType> esl_texture;
GPURenderer4::GPURenderer4(Raycaster r) {
set_window_buffer(r.view);
set_transfer_fn(r);
set_volume(r.volume);
}
GPURenderer4::~GPURenderer4() {
cuda_safe_call(cudaUnbindTexture(volume_texture));
cuda_safe_call(cudaFreeArray(volume_array));
cuda_safe_call(cudaUnbindTexture(transfer_fn_texture));
cuda_safe_call(cudaFreeArray(transfer_fn_array));
cuda_safe_call(cudaUnbindTexture(esl_texture));
cuda_safe_call(cudaFreeArray(esl_array));
}
__device__ bool sample_data_esl_texture(float3 pos) { // additional function for esl texture fetching
esl_type sample = tex2D(esl_texture,
map_float_int((pos.y + 1)*0.5f, raycaster.volume.dims.y) / raycaster.esl_block_dims,
map_float_int((pos.z + 1)*0.5f, raycaster.volume.dims.z) / raycaster.esl_block_dims);
unsigned short index = map_float_int((pos.x + 1)*0.5f, raycaster.volume.dims.x) / raycaster.esl_block_dims;
return ((sample & (1 << index)) != 0);
}
__device__ void shade_texture(float4 *color, float3 pos, float sample) { // additional function for shading
float3 light_dir = vector_normalize(raycaster.view.light_pos - pos);
float sample_l = tex3D(volume_texture,
(pos.x + light_dir.x * 0.01f + 1)*0.5f,
(pos.y + light_dir.y * 0.01f + 1)*0.5f,
(pos.z + light_dir.z * 0.01f + 1)*0.5f);
float diffuse_light = (sample_l - sample) * raycaster.light_kd;
color->x += diffuse_light;
color->y += diffuse_light;
color->z += diffuse_light;
}
static __global__ void render_ray(uchar4 dev_buffer[]) {
short2 pos = {blockIdx.x * blockDim.x + threadIdx.x, blockIdx.y * blockDim.y + threadIdx.y};
if ((pos.x >= raycaster.view.dims.x) || (pos.y >= raycaster.view.dims.y)) // terminate empty thread, when view dimensions are not divisible by 16
return;
float3 origin, direction;
float2 k_range;
raycaster.view.get_ray(pos, &origin, &direction);
if (!raycaster.intersect(origin, direction, &k_range))
return;
float3 pt = origin + (direction * k_range.x);
while(k_range.x <= k_range.y) { // empty space leaping loop
if (raycaster.esl && sample_data_esl_texture(pt))
raycaster.leap_empty_space(pt, direction, &k_range);
else
break;
k_range.x += raycaster.ray_step;
pt = origin + (direction * k_range.x);
}
if (k_range.x > k_range.y)
return;
float4 color_acc = {0, 0, 0, 0};
while (k_range.x <= k_range.y) { // color accumulation loop
float sample = tex3D(volume_texture, (pt.x + 1)*0.5f, (pt.y + 1)*0.5f, (pt.z + 1)*0.5f);
float4 color_cur = tex1D(transfer_fn_texture, sample);
if (color_cur.w > 0.05f && raycaster.light_kd > 0.01f)
shade_texture(&color_cur, pt, sample); // shading
color_acc = color_acc + (color_cur * (1 - color_acc.w)); // transparency formula: C_out = C_in + C * (1-alpha_in); alpha_out = aplha_in + alpha * (1-alpha_in)
if (color_acc.w > raycaster.ray_threshold) // early ray termination
break;
k_range.x += raycaster.ray_step;
pt = origin + (direction * k_range.x);
}
raycaster.write_color(color_acc, pos, dev_buffer);
}
void GPURenderer4::set_transfer_fn(Raycaster r) {
if (transfer_fn_array == 0) {
cudaChannelFormatDesc channel_desc = cudaCreateChannelDesc<float4>();
cuda_safe_malloc(cudaMallocArray(&transfer_fn_array, &channel_desc, TF_SIZE, 1));
transfer_fn_texture.filterMode = cudaFilterModeLinear;
transfer_fn_texture.normalized = true;
transfer_fn_texture.addressMode[0] = cudaAddressModeClamp;
cuda_safe_call(cudaBindTextureToArray(transfer_fn_texture, transfer_fn_array, channel_desc));
}
cuda_safe_call(cudaMemcpyToArray(transfer_fn_array, 0, 0, r.transfer_fn, TF_SIZE * sizeof(float4), cudaMemcpyHostToDevice));
//cuda_safe_call(cudaMemcpyToSymbol(transfer_fn, r.transfer_fn, TF_SIZE * sizeof(float4)));
if (esl_array == 0) {
cudaChannelFormatDesc channel_desc = cudaCreateChannelDesc<esl_type>();
cuda_safe_malloc(cudaMallocArray(&esl_array, &channel_desc, ESL_VOLUME_DIMS, ESL_VOLUME_DIMS));
esl_texture.normalized = false;
esl_texture.filterMode = cudaFilterModePoint;
esl_texture.addressMode[0] = cudaAddressModeClamp;
esl_texture.addressMode[1] = cudaAddressModeClamp;
cuda_safe_call(cudaBindTextureToArray(esl_texture, esl_array, channel_desc));
}
cuda_safe_call(cudaMemcpyToArray(esl_array, 0, 0, r.esl_volume, ESL_VOLUME_SIZE * sizeof(esl_type), cudaMemcpyHostToDevice));
}
int GPURenderer4::set_volume(Model volume) {
if (volume_array != 0) {
cuda_safe_call(cudaUnbindTexture(volume_texture));
cuda_safe_call(cudaFreeArray(volume_array));
volume_array = 0;
}
if (volume.data == NULL)
return 1;
cudaExtent volume_dims = {volume.dims.x, volume.dims.y, volume.dims.z};
cudaChannelFormatDesc channel_desc = cudaCreateChannelDesc<unsigned char>();
cuda_safe_malloc(cudaMalloc3DArray(&volume_array, &channel_desc, volume_dims));
if (cudaGetLastError() == cudaErrorMemoryAllocation)
return 1;
cudaMemcpy3DParms copyParams = {0};
copyParams.srcPtr = make_cudaPitchedPtr(volume.data, volume_dims.width*sizeof(unsigned char), volume_dims.width, volume_dims.height);
copyParams.dstArray = volume_array;
copyParams.extent = volume_dims;
copyParams.kind = cudaMemcpyHostToDevice;
cuda_safe_call(cudaMemcpy3D(©Params));
volume_texture.normalized = true;
volume_texture.filterMode = cudaFilterModeLinear; // trilinear interpolation
volume_texture.addressMode[0] = cudaAddressModeClamp;
volume_texture.addressMode[1] = cudaAddressModeClamp;
volume_texture.addressMode[2] = cudaAddressModeClamp;
cuda_safe_call(cudaBindTextureToArray(volume_texture, volume_array, channel_desc));
return 0;
}
int GPURenderer4::render_volume(uchar4 *buffer, Raycaster r) {
if (volume_array == 0 || transfer_fn_array == 0 || esl_array == 0 || buffer == NULL)
return 1;
cuda_safe_call(cudaMemset(buffer, 0, dev_buffer_size));
cuda_safe_call(cudaMemcpyToSymbol(raycaster, &r, sizeof(Raycaster)));
render_ray<<<num_blocks, THREADS_PER_BLOCK>>>(buffer);
cuda_safe_check();
return 0;
} |
8566c23b2b67a052ad54ad45e40d9424798f8e65.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <ATen/ATen.h>
#include <ATen/hip/detail/IndexUtils.cuh>
#include <ATen/hip/detail/TensorInfo.cuh>
#define THREADS 1024
#define BLOCKS(N) (N + THREADS - 1) / THREADS
template <typename scalar_t> struct BasisForward {
static inline __device__ scalar_t linear(scalar_t v, int64_t k_mod) {
return 1 - v - k_mod + 2 * v * k_mod;
}
static inline __device__ scalar_t quadratic(scalar_t v, int64_t k_mod) {
if (k_mod == 0)
return 0.5 * v * v - v + 0.5;
else if (k_mod == 1)
return -v * v + v + 0.5;
else
return 0.5 * v * v;
}
static inline __device__ scalar_t cubic(scalar_t v, int64_t k_mod) {
if (k_mod == 0)
return (1 - v) * (1 - v) * (1 - v) / 6.0;
else if (k_mod == 1)
return (3 * v * v * v - 6 * v * v + 4) / 6;
else if (k_mod == 2)
return (-3 * v * v * v + 3 * v * v + 3 * v + 1) / 6;
else
return v * v * v / 6;
}
};
#define BASIS_FORWARD(M, PSEUDO, KERNEL_SIZE, IS_OPEN_SPLINE, KERNEL_NAME) \
[&]() -> std::tuple<at::Tensor, at::Tensor> { \
auto E = PSEUDO.size(0); \
auto S = (int64_t)(pow(M + 1, KERNEL_SIZE.size(0)) + 0.5); \
auto basis = at::empty({E, S}, PSEUDO.options()); \
auto weight_index = at::empty({E, S}, KERNEL_SIZE.options()); \
\
AT_DISPATCH_FLOATING_TYPES(PSEUDO.type(), "basis_forward_##M", [&] { \
hipLaunchKernelGGL(( KERNEL_NAME<scalar_t>), dim3(BLOCKS(basis.numel())), dim3(THREADS), 0, 0, \
at::cuda::detail::getTensorInfo<scalar_t, int64_t>(basis), \
at::cuda::detail::getTensorInfo<int64_t, int64_t>(weight_index), \
at::cuda::detail::getTensorInfo<scalar_t, int64_t>(PSEUDO), \
KERNEL_SIZE.data<int64_t>(), IS_OPEN_SPLINE.data<uint8_t>(), \
basis.numel()); \
}); \
\
return std::make_tuple(basis, weight_index); \
}()
#define BASIS_FORWARD_KERNEL(M, BASIS, WEIGHT_INDEX, PSEUDO, KERNEL_SIZE, \
IS_OPEN_SPLINE, NUMEL, CODE) \
[&] { \
const size_t index = blockIdx.x * blockDim.x + threadIdx.x; \
const size_t stride = blockDim.x * gridDim.x; \
for (ptrdiff_t i = index; i < NUMEL; i += stride) { \
int64_t e = i / BASIS.sizes[1], s = i % BASIS.sizes[1]; \
int64_t k = s, wi = 0, wi_offset = 1; \
scalar_t b = 1; \
\
for (ptrdiff_t d = 0; d < PSEUDO.sizes[1]; d++) { \
auto k_mod = k % (M + 1); \
k /= M + 1; \
\
auto v = PSEUDO.data[e * PSEUDO.strides[0] + d * PSEUDO.strides[1]]; \
v *= KERNEL_SIZE[d] - M * IS_OPEN_SPLINE[d]; \
\
wi += (((int64_t)v + k_mod) % KERNEL_SIZE[d]) * wi_offset; \
wi_offset *= KERNEL_SIZE[d]; \
\
v -= floor(v); \
v = CODE; \
b *= v; \
} \
\
BASIS.data[i] = b; \
WEIGHT_INDEX.data[i] = wi; \
} \
}()
template <typename scalar_t>
__global__ void
linear_fw_kernel(at::cuda::detail::TensorInfo<scalar_t, int64_t> basis,
at::cuda::detail::TensorInfo<int64_t, int64_t> weight_index,
at::cuda::detail::TensorInfo<scalar_t, int64_t> pseudo,
int64_t *kernel_size, uint8_t *is_open_spline, size_t numel) {
BASIS_FORWARD_KERNEL(1, basis, weight_index, pseudo, kernel_size,
is_open_spline, numel,
BasisForward<scalar_t>::linear(v, k_mod));
}
std::tuple<at::Tensor, at::Tensor> linear_fw_cuda(at::Tensor pseudo,
at::Tensor kernel_size,
at::Tensor is_open_spline) {
return BASIS_FORWARD(1, pseudo, kernel_size, is_open_spline,
linear_fw_kernel);
}
template <typename scalar_t>
__global__ void
quadratic_fw_kernel(at::cuda::detail::TensorInfo<scalar_t, int64_t> basis,
at::cuda::detail::TensorInfo<int64_t, int64_t> weight_index,
at::cuda::detail::TensorInfo<scalar_t, int64_t> pseudo,
int64_t *kernel_size, uint8_t *is_open_spline,
size_t numel) {
BASIS_FORWARD_KERNEL(2, basis, weight_index, pseudo, kernel_size,
is_open_spline, numel,
BasisForward<scalar_t>::quadratic(v, k_mod));
}
std::tuple<at::Tensor, at::Tensor>
quadratic_fw_cuda(at::Tensor pseudo, at::Tensor kernel_size,
at::Tensor is_open_spline) {
return BASIS_FORWARD(2, pseudo, kernel_size, is_open_spline,
quadratic_fw_kernel);
}
template <typename scalar_t>
__global__ void
cubic_fw_kernel(at::cuda::detail::TensorInfo<scalar_t, int64_t> basis,
at::cuda::detail::TensorInfo<int64_t, int64_t> weight_index,
at::cuda::detail::TensorInfo<scalar_t, int64_t> pseudo,
int64_t *kernel_size, uint8_t *is_open_spline, size_t numel) {
BASIS_FORWARD_KERNEL(3, basis, weight_index, pseudo, kernel_size,
is_open_spline, numel,
BasisForward<scalar_t>::cubic(v, k_mod));
}
std::tuple<at::Tensor, at::Tensor> cubic_fw_cuda(at::Tensor pseudo,
at::Tensor kernel_size,
at::Tensor is_open_spline) {
return BASIS_FORWARD(3, pseudo, kernel_size, is_open_spline, cubic_fw_kernel);
}
template <typename scalar_t> struct BasisBackward {
static inline __device__ scalar_t linear(scalar_t v, int64_t k_mod) {
return 2 * k_mod - 1;
}
static inline __device__ scalar_t quadratic(scalar_t v, int64_t k_mod) {
if (k_mod == 0)
return v - 1;
else if (k_mod == 1)
return -2 * v + 1;
else
return v;
}
static inline __device__ scalar_t cubic(scalar_t v, int64_t k_mod) {
if (k_mod == 0)
return (-v * v + 2 * v - 1) / 2;
else if (k_mod == 1)
return (3 * v * v - 4 * v) / 2;
else if (k_mod == 2)
return (-3 * v * v + 2 * v + 1) / 2;
else
return v * v / 2;
}
};
#define BASIS_BACKWARD(M, GRAD_BASIS, PSEUDO, KERNEL_SIZE, IS_OPEN_SPLINE, \
KERNEL_NAME) \
[&]() -> at::Tensor { \
auto E = PSEUDO.size(0); \
auto D = PSEUDO.size(1); \
auto grad_pseudo = at::empty({E, D}, PSEUDO.options()); \
\
AT_DISPATCH_FLOATING_TYPES(GRAD_BASIS.type(), "basis_backward_##M", [&] { \
hipLaunchKernelGGL(( KERNEL_NAME<scalar_t>), dim3(BLOCKS(grad_pseudo.numel())), dim3(THREADS), 0, 0, \
at::cuda::detail::getTensorInfo<scalar_t, int64_t>(grad_pseudo), \
at::cuda::detail::getTensorInfo<scalar_t, int64_t>(GRAD_BASIS), \
at::cuda::detail::getTensorInfo<scalar_t, int64_t>(PSEUDO), \
KERNEL_SIZE.data<int64_t>(), IS_OPEN_SPLINE.data<uint8_t>(), \
grad_pseudo.numel()); \
}); \
\
return grad_pseudo; \
}()
#define BASIS_BACKWARD_KERNEL(M, GRAD_PSEUDO, GRAD_BASIS, PSEUDO, KERNEL_SIZE, \
IS_OPEN_SPLINE, NUMEL, CODE, GRAD_CODE) \
[&] { \
const size_t index = blockIdx.x * blockDim.x + threadIdx.x; \
const size_t stride = blockDim.x * gridDim.x; \
for (ptrdiff_t i = index; i < NUMEL; i += stride) { \
int64_t e = i / GRAD_PSEUDO.sizes[1], d = i % GRAD_PSEUDO.sizes[1]; \
scalar_t g = 0, tmp; \
\
for (ptrdiff_t s = 0; s < GRAD_BASIS.sizes[1]; s++) { \
auto k_mod = (s / (int64_t)(pow(M + 1, d) + 0.5)) % (M + 1); \
auto v = PSEUDO.data[e * PSEUDO.strides[0] + d * PSEUDO.strides[1]]; \
v *= KERNEL_SIZE[d] - M * IS_OPEN_SPLINE[d]; \
v -= floor(v); \
v = GRAD_CODE; \
tmp = v; \
\
for (ptrdiff_t d_it = 1; d_it < GRAD_PSEUDO.sizes[1]; d_it++) { \
auto d_new = d_it - (d >= d_it); \
k_mod = (s / (int64_t)(pow(M + 1, d_new) + 0.5)) % (M + 1); \
v = PSEUDO.data[e * pseudo.strides[0] + d_new * PSEUDO.strides[1]]; \
v *= KERNEL_SIZE[d_new] - M * IS_OPEN_SPLINE[d_new]; \
v -= floor(v); \
v = CODE; \
tmp *= v; \
} \
g += tmp * \
GRAD_BASIS \
.data[e * GRAD_BASIS.strides[0] + s * GRAD_BASIS.strides[1]]; \
} \
g *= KERNEL_SIZE[d] - M * IS_OPEN_SPLINE[d]; \
GRAD_PSEUDO.data[i] = g; \
} \
}()
template <typename scalar_t>
__global__ void
linear_bw_kernel(at::cuda::detail::TensorInfo<scalar_t, int64_t> grad_pseudo,
at::cuda::detail::TensorInfo<scalar_t, int64_t> grad_basis,
at::cuda::detail::TensorInfo<scalar_t, int64_t> pseudo,
int64_t *kernel_size, uint8_t *is_open_spline, size_t numel) {
BASIS_BACKWARD_KERNEL(1, grad_pseudo, grad_basis, pseudo, kernel_size,
is_open_spline, numel,
BasisForward<scalar_t>::linear(v, k_mod),
BasisBackward<scalar_t>::linear(v, k_mod));
}
at::Tensor linear_bw_cuda(at::Tensor grad_basis, at::Tensor pseudo,
at::Tensor kernel_size, at::Tensor is_open_spline) {
return BASIS_BACKWARD(1, grad_basis, pseudo, kernel_size, is_open_spline,
linear_bw_kernel);
}
template <typename scalar_t>
__global__ void
quadratic_bw_kernel(at::cuda::detail::TensorInfo<scalar_t, int64_t> grad_pseudo,
at::cuda::detail::TensorInfo<scalar_t, int64_t> grad_basis,
at::cuda::detail::TensorInfo<scalar_t, int64_t> pseudo,
int64_t *kernel_size, uint8_t *is_open_spline,
size_t numel) {
BASIS_BACKWARD_KERNEL(2, grad_pseudo, grad_basis, pseudo, kernel_size,
is_open_spline, numel,
BasisForward<scalar_t>::quadratic(v, k_mod),
BasisBackward<scalar_t>::quadratic(v, k_mod));
}
at::Tensor quadratic_bw_cuda(at::Tensor grad_basis, at::Tensor pseudo,
at::Tensor kernel_size,
at::Tensor is_open_spline) {
return BASIS_BACKWARD(2, grad_basis, pseudo, kernel_size, is_open_spline,
quadratic_bw_kernel);
}
template <typename scalar_t>
__global__ void
cubic_bw_kernel(at::cuda::detail::TensorInfo<scalar_t, int64_t> grad_pseudo,
at::cuda::detail::TensorInfo<scalar_t, int64_t> grad_basis,
at::cuda::detail::TensorInfo<scalar_t, int64_t> pseudo,
int64_t *kernel_size, uint8_t *is_open_spline, size_t numel) {
BASIS_BACKWARD_KERNEL(3, grad_pseudo, grad_basis, pseudo, kernel_size,
is_open_spline, numel,
BasisForward<scalar_t>::cubic(v, k_mod),
BasisBackward<scalar_t>::cubic(v, k_mod));
}
at::Tensor cubic_bw_cuda(at::Tensor grad_basis, at::Tensor pseudo,
at::Tensor kernel_size, at::Tensor is_open_spline) {
return BASIS_BACKWARD(3, grad_basis, pseudo, kernel_size, is_open_spline,
cubic_bw_kernel);
}
| 8566c23b2b67a052ad54ad45e40d9424798f8e65.cu | #include <ATen/ATen.h>
#include <ATen/cuda/detail/IndexUtils.cuh>
#include <ATen/cuda/detail/TensorInfo.cuh>
#define THREADS 1024
#define BLOCKS(N) (N + THREADS - 1) / THREADS
template <typename scalar_t> struct BasisForward {
static inline __device__ scalar_t linear(scalar_t v, int64_t k_mod) {
return 1 - v - k_mod + 2 * v * k_mod;
}
static inline __device__ scalar_t quadratic(scalar_t v, int64_t k_mod) {
if (k_mod == 0)
return 0.5 * v * v - v + 0.5;
else if (k_mod == 1)
return -v * v + v + 0.5;
else
return 0.5 * v * v;
}
static inline __device__ scalar_t cubic(scalar_t v, int64_t k_mod) {
if (k_mod == 0)
return (1 - v) * (1 - v) * (1 - v) / 6.0;
else if (k_mod == 1)
return (3 * v * v * v - 6 * v * v + 4) / 6;
else if (k_mod == 2)
return (-3 * v * v * v + 3 * v * v + 3 * v + 1) / 6;
else
return v * v * v / 6;
}
};
#define BASIS_FORWARD(M, PSEUDO, KERNEL_SIZE, IS_OPEN_SPLINE, KERNEL_NAME) \
[&]() -> std::tuple<at::Tensor, at::Tensor> { \
auto E = PSEUDO.size(0); \
auto S = (int64_t)(pow(M + 1, KERNEL_SIZE.size(0)) + 0.5); \
auto basis = at::empty({E, S}, PSEUDO.options()); \
auto weight_index = at::empty({E, S}, KERNEL_SIZE.options()); \
\
AT_DISPATCH_FLOATING_TYPES(PSEUDO.type(), "basis_forward_##M", [&] { \
KERNEL_NAME<scalar_t><<<BLOCKS(basis.numel()), THREADS>>>( \
at::cuda::detail::getTensorInfo<scalar_t, int64_t>(basis), \
at::cuda::detail::getTensorInfo<int64_t, int64_t>(weight_index), \
at::cuda::detail::getTensorInfo<scalar_t, int64_t>(PSEUDO), \
KERNEL_SIZE.data<int64_t>(), IS_OPEN_SPLINE.data<uint8_t>(), \
basis.numel()); \
}); \
\
return std::make_tuple(basis, weight_index); \
}()
#define BASIS_FORWARD_KERNEL(M, BASIS, WEIGHT_INDEX, PSEUDO, KERNEL_SIZE, \
IS_OPEN_SPLINE, NUMEL, CODE) \
[&] { \
const size_t index = blockIdx.x * blockDim.x + threadIdx.x; \
const size_t stride = blockDim.x * gridDim.x; \
for (ptrdiff_t i = index; i < NUMEL; i += stride) { \
int64_t e = i / BASIS.sizes[1], s = i % BASIS.sizes[1]; \
int64_t k = s, wi = 0, wi_offset = 1; \
scalar_t b = 1; \
\
for (ptrdiff_t d = 0; d < PSEUDO.sizes[1]; d++) { \
auto k_mod = k % (M + 1); \
k /= M + 1; \
\
auto v = PSEUDO.data[e * PSEUDO.strides[0] + d * PSEUDO.strides[1]]; \
v *= KERNEL_SIZE[d] - M * IS_OPEN_SPLINE[d]; \
\
wi += (((int64_t)v + k_mod) % KERNEL_SIZE[d]) * wi_offset; \
wi_offset *= KERNEL_SIZE[d]; \
\
v -= floor(v); \
v = CODE; \
b *= v; \
} \
\
BASIS.data[i] = b; \
WEIGHT_INDEX.data[i] = wi; \
} \
}()
template <typename scalar_t>
__global__ void
linear_fw_kernel(at::cuda::detail::TensorInfo<scalar_t, int64_t> basis,
at::cuda::detail::TensorInfo<int64_t, int64_t> weight_index,
at::cuda::detail::TensorInfo<scalar_t, int64_t> pseudo,
int64_t *kernel_size, uint8_t *is_open_spline, size_t numel) {
BASIS_FORWARD_KERNEL(1, basis, weight_index, pseudo, kernel_size,
is_open_spline, numel,
BasisForward<scalar_t>::linear(v, k_mod));
}
std::tuple<at::Tensor, at::Tensor> linear_fw_cuda(at::Tensor pseudo,
at::Tensor kernel_size,
at::Tensor is_open_spline) {
return BASIS_FORWARD(1, pseudo, kernel_size, is_open_spline,
linear_fw_kernel);
}
template <typename scalar_t>
__global__ void
quadratic_fw_kernel(at::cuda::detail::TensorInfo<scalar_t, int64_t> basis,
at::cuda::detail::TensorInfo<int64_t, int64_t> weight_index,
at::cuda::detail::TensorInfo<scalar_t, int64_t> pseudo,
int64_t *kernel_size, uint8_t *is_open_spline,
size_t numel) {
BASIS_FORWARD_KERNEL(2, basis, weight_index, pseudo, kernel_size,
is_open_spline, numel,
BasisForward<scalar_t>::quadratic(v, k_mod));
}
std::tuple<at::Tensor, at::Tensor>
quadratic_fw_cuda(at::Tensor pseudo, at::Tensor kernel_size,
at::Tensor is_open_spline) {
return BASIS_FORWARD(2, pseudo, kernel_size, is_open_spline,
quadratic_fw_kernel);
}
template <typename scalar_t>
__global__ void
cubic_fw_kernel(at::cuda::detail::TensorInfo<scalar_t, int64_t> basis,
at::cuda::detail::TensorInfo<int64_t, int64_t> weight_index,
at::cuda::detail::TensorInfo<scalar_t, int64_t> pseudo,
int64_t *kernel_size, uint8_t *is_open_spline, size_t numel) {
BASIS_FORWARD_KERNEL(3, basis, weight_index, pseudo, kernel_size,
is_open_spline, numel,
BasisForward<scalar_t>::cubic(v, k_mod));
}
std::tuple<at::Tensor, at::Tensor> cubic_fw_cuda(at::Tensor pseudo,
at::Tensor kernel_size,
at::Tensor is_open_spline) {
return BASIS_FORWARD(3, pseudo, kernel_size, is_open_spline, cubic_fw_kernel);
}
template <typename scalar_t> struct BasisBackward {
static inline __device__ scalar_t linear(scalar_t v, int64_t k_mod) {
return 2 * k_mod - 1;
}
static inline __device__ scalar_t quadratic(scalar_t v, int64_t k_mod) {
if (k_mod == 0)
return v - 1;
else if (k_mod == 1)
return -2 * v + 1;
else
return v;
}
static inline __device__ scalar_t cubic(scalar_t v, int64_t k_mod) {
if (k_mod == 0)
return (-v * v + 2 * v - 1) / 2;
else if (k_mod == 1)
return (3 * v * v - 4 * v) / 2;
else if (k_mod == 2)
return (-3 * v * v + 2 * v + 1) / 2;
else
return v * v / 2;
}
};
#define BASIS_BACKWARD(M, GRAD_BASIS, PSEUDO, KERNEL_SIZE, IS_OPEN_SPLINE, \
KERNEL_NAME) \
[&]() -> at::Tensor { \
auto E = PSEUDO.size(0); \
auto D = PSEUDO.size(1); \
auto grad_pseudo = at::empty({E, D}, PSEUDO.options()); \
\
AT_DISPATCH_FLOATING_TYPES(GRAD_BASIS.type(), "basis_backward_##M", [&] { \
KERNEL_NAME<scalar_t><<<BLOCKS(grad_pseudo.numel()), THREADS>>>( \
at::cuda::detail::getTensorInfo<scalar_t, int64_t>(grad_pseudo), \
at::cuda::detail::getTensorInfo<scalar_t, int64_t>(GRAD_BASIS), \
at::cuda::detail::getTensorInfo<scalar_t, int64_t>(PSEUDO), \
KERNEL_SIZE.data<int64_t>(), IS_OPEN_SPLINE.data<uint8_t>(), \
grad_pseudo.numel()); \
}); \
\
return grad_pseudo; \
}()
#define BASIS_BACKWARD_KERNEL(M, GRAD_PSEUDO, GRAD_BASIS, PSEUDO, KERNEL_SIZE, \
IS_OPEN_SPLINE, NUMEL, CODE, GRAD_CODE) \
[&] { \
const size_t index = blockIdx.x * blockDim.x + threadIdx.x; \
const size_t stride = blockDim.x * gridDim.x; \
for (ptrdiff_t i = index; i < NUMEL; i += stride) { \
int64_t e = i / GRAD_PSEUDO.sizes[1], d = i % GRAD_PSEUDO.sizes[1]; \
scalar_t g = 0, tmp; \
\
for (ptrdiff_t s = 0; s < GRAD_BASIS.sizes[1]; s++) { \
auto k_mod = (s / (int64_t)(pow(M + 1, d) + 0.5)) % (M + 1); \
auto v = PSEUDO.data[e * PSEUDO.strides[0] + d * PSEUDO.strides[1]]; \
v *= KERNEL_SIZE[d] - M * IS_OPEN_SPLINE[d]; \
v -= floor(v); \
v = GRAD_CODE; \
tmp = v; \
\
for (ptrdiff_t d_it = 1; d_it < GRAD_PSEUDO.sizes[1]; d_it++) { \
auto d_new = d_it - (d >= d_it); \
k_mod = (s / (int64_t)(pow(M + 1, d_new) + 0.5)) % (M + 1); \
v = PSEUDO.data[e * pseudo.strides[0] + d_new * PSEUDO.strides[1]]; \
v *= KERNEL_SIZE[d_new] - M * IS_OPEN_SPLINE[d_new]; \
v -= floor(v); \
v = CODE; \
tmp *= v; \
} \
g += tmp * \
GRAD_BASIS \
.data[e * GRAD_BASIS.strides[0] + s * GRAD_BASIS.strides[1]]; \
} \
g *= KERNEL_SIZE[d] - M * IS_OPEN_SPLINE[d]; \
GRAD_PSEUDO.data[i] = g; \
} \
}()
template <typename scalar_t>
__global__ void
linear_bw_kernel(at::cuda::detail::TensorInfo<scalar_t, int64_t> grad_pseudo,
at::cuda::detail::TensorInfo<scalar_t, int64_t> grad_basis,
at::cuda::detail::TensorInfo<scalar_t, int64_t> pseudo,
int64_t *kernel_size, uint8_t *is_open_spline, size_t numel) {
BASIS_BACKWARD_KERNEL(1, grad_pseudo, grad_basis, pseudo, kernel_size,
is_open_spline, numel,
BasisForward<scalar_t>::linear(v, k_mod),
BasisBackward<scalar_t>::linear(v, k_mod));
}
at::Tensor linear_bw_cuda(at::Tensor grad_basis, at::Tensor pseudo,
at::Tensor kernel_size, at::Tensor is_open_spline) {
return BASIS_BACKWARD(1, grad_basis, pseudo, kernel_size, is_open_spline,
linear_bw_kernel);
}
template <typename scalar_t>
__global__ void
quadratic_bw_kernel(at::cuda::detail::TensorInfo<scalar_t, int64_t> grad_pseudo,
at::cuda::detail::TensorInfo<scalar_t, int64_t> grad_basis,
at::cuda::detail::TensorInfo<scalar_t, int64_t> pseudo,
int64_t *kernel_size, uint8_t *is_open_spline,
size_t numel) {
BASIS_BACKWARD_KERNEL(2, grad_pseudo, grad_basis, pseudo, kernel_size,
is_open_spline, numel,
BasisForward<scalar_t>::quadratic(v, k_mod),
BasisBackward<scalar_t>::quadratic(v, k_mod));
}
at::Tensor quadratic_bw_cuda(at::Tensor grad_basis, at::Tensor pseudo,
at::Tensor kernel_size,
at::Tensor is_open_spline) {
return BASIS_BACKWARD(2, grad_basis, pseudo, kernel_size, is_open_spline,
quadratic_bw_kernel);
}
template <typename scalar_t>
__global__ void
cubic_bw_kernel(at::cuda::detail::TensorInfo<scalar_t, int64_t> grad_pseudo,
at::cuda::detail::TensorInfo<scalar_t, int64_t> grad_basis,
at::cuda::detail::TensorInfo<scalar_t, int64_t> pseudo,
int64_t *kernel_size, uint8_t *is_open_spline, size_t numel) {
BASIS_BACKWARD_KERNEL(3, grad_pseudo, grad_basis, pseudo, kernel_size,
is_open_spline, numel,
BasisForward<scalar_t>::cubic(v, k_mod),
BasisBackward<scalar_t>::cubic(v, k_mod));
}
at::Tensor cubic_bw_cuda(at::Tensor grad_basis, at::Tensor pseudo,
at::Tensor kernel_size, at::Tensor is_open_spline) {
return BASIS_BACKWARD(3, grad_basis, pseudo, kernel_size, is_open_spline,
cubic_bw_kernel);
}
|
3a21b638ea7e544bc34ffb1d83ae64db392db80b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
sparse_matrix.cu:
Cuda implementation Sparse Matrix Multiplication by Vector
compile & run:
nvcc sparse_matrix.cu -o sparse_matrix.sh -lm && ./sparse_matrix.sh 32768 256 256 1
input:
NNZ: None Zero Values
ROWS: The number of Rows (max 1024)
COLS: The number of Columns (max 1024)
DEBUG: 1 to debug, 0 to no-debug
output:
Time in MS
Throughput in GFLOPS
author: Ivan Reyes-Amezcua
date: June, 2020
*/
#include <stdio.h>
#include <math.h>
#include <time.h>
#include <iostream>
#include <fstream>
#include <stdlib.h>
using namespace std;
__global__ void spmv(int num_rows, int num_cols, int *row_ptrs,
int *col_index, double *values, double *x, double *y
) {
extern __shared__ double s_sum[]; // sum of the values per row per block
int tid = threadIdx.x; // Local: Thread ID
int g_tid = threadIdx.x + row_ptrs[blockIdx.x]; // Global: Thread ID + offset in row
int NNZ_in_row = row_ptrs[blockIdx.x + 1] - row_ptrs[blockIdx.x]; // Non-zero values in current row-block
s_sum[tid] = 0.0;
__syncthreads();
// TODO: check col_index vector, possible memory issue
if (tid < NNZ_in_row)
s_sum[tid] = values[g_tid] * x[col_index[g_tid]]; // Map: value[n] * X[index[n]]
__syncthreads();
// Inclusive Scan
double temp;
for (int j = 1; j < blockDim.x; j *= 2 ){
if ( (tid - j) >= 0)
temp = s_sum[tid - j];
__syncthreads();
if ( (tid - j) >= 0)
s_sum[tid] += temp;
__syncthreads();
}
// Save the result of Row-Block on global memory
if(tid == blockDim.x - 1)
y[blockIdx.x] = s_sum[tid];
}
int main(int argc, char *argv[]) {
// Get and validate arguments
if(argc != 5){
printf("Usage %s NNZ ROWS COLS DEBUG\n",argv[0]);
exit(0);
}
int NNZ = atoi ( argv[1] ); // Non Zero Values
int num_rows = atoi ( argv[2] ); // rows
int num_cols = atoi ( argv[3] ); // columns
int debug = atoi ( argv[4] ); // 1 for debug, 0 for NO-debug
double values[NNZ]; // CSR format
int col_index[NNZ]; // CSR format
int row_ptrs[num_rows + 1]; // CSR format
double x[num_cols]; // the vector to multiply
double y[num_rows]; // the output
double true_y[num_rows]; // the true Y results of operation
// Declare GPU memory pointers
double *d_values;
double *d_x;
double *d_y;
int *d_col_index;
int *d_row_ptrs;
// Allocate GPU memory
int r1 = hipMalloc((void **) &d_values, NNZ*sizeof( double ));
int r2 = hipMalloc((void **) &d_x, num_cols*sizeof( double ));
int r3 = hipMalloc((void **) &d_y, num_rows*sizeof( double ));
int r4 = hipMalloc((void **) &d_col_index, NNZ*sizeof( int ));
int r5 = hipMalloc((void **) &d_row_ptrs, (num_rows + 1)*sizeof( int ));
if( r1 || r2 || r3 || r4 || r5 ) {
printf( "Error allocating memory in GPU\n" );
exit( 0 );
}
// Read the Values and Index:
std::ifstream values_file("./data/values.txt");
std::ifstream col_ind_file("./data/col_ind.txt");
for (int i = 0; i < NNZ; i++) {
values_file >> values[i];
double aux;
col_ind_file >> aux;
col_index[i] = (int) aux;
}
// Read the row_ptr and the True Ys:
std::ifstream row_ptr_file("./data/row_ptr.txt");
std::ifstream true_y_file("./data/y.txt");
for (int i = 0; i < (num_rows + 1); i++) {
double aux, aux2;
row_ptr_file >> aux;
true_y_file >> aux2;
row_ptrs[i] = (int) aux;
true_y[i] = (int) aux2;
}
// Read the X values:
std::ifstream x_file("./data/x.txt");
for (int i = 0; i < num_cols; i++)
x_file >> x[i];
// Transfer the arrays to the GPU:
hipMemcpy(d_values, values, NNZ*sizeof( double ), hipMemcpyHostToDevice);
hipMemcpy(d_x, x, num_cols*sizeof( double ), hipMemcpyHostToDevice);
hipMemcpy(d_y, y, num_rows*sizeof( double ), hipMemcpyHostToDevice);
hipMemcpy(d_col_index, col_index, NNZ*sizeof( int ), hipMemcpyHostToDevice);
hipMemcpy(d_row_ptrs, row_ptrs, (num_rows + 1)*sizeof( int ), hipMemcpyHostToDevice);
// Start Time:
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start);
// Call to kernel:
double size_sharedmem = num_cols*sizeof(double); // Size of shared memory
hipLaunchKernelGGL(( spmv), dim3(num_rows), dim3(num_cols), size_sharedmem, 0, num_rows, num_cols, d_row_ptrs, d_col_index, d_values, d_x, d_y);
hipDeviceSynchronize();
// Stop Time:
hipEventRecord(stop);
hipEventSynchronize(stop);
float milliseconds = 0;
hipEventElapsedTime(&milliseconds, start, stop);
// Transfer the values to the CPU:
hipMemcpy(y, d_y, num_rows * sizeof(double), hipMemcpyDeviceToHost);
// Get the error:
int errors = 0; // count of errors
float e = 500.0; // tolerance to error
for (int i = 0; i < num_rows; i++) {
if (abs(true_y[i] - y[i]) > e) {
errors++;
if(debug == 1)
printf("Error in Y%d, True: %f, Calc: %f\n", i, true_y[i], y[i]);
} else if ( i < 10) {
printf("Y%d, True: %f, Calc: %f\n", i, true_y[i], y[i]);
}
}
float error_rate = ((double)errors/(double)num_rows) * 100.0;
float density = ((float)NNZ/((float)num_cols*(float)num_rows))*100.0;
printf("\nM. Density: %0.2f%%, #Ys: %d, Errors: %d, Error Rate: %0.2f%%\n", density, num_rows, errors, error_rate);
// Free Memory
hipFree( d_values );
hipFree( d_x );
hipFree( d_y );
hipFree( d_col_index );
hipFree( d_row_ptrs );
// Calculate Throughput:
float bw;
bw = (float )num_rows*(float )num_cols*log2((float) num_cols);
bw /= milliseconds * 1000000.0;
printf( "\nSpmV GPU execution time: %7.3f ms, Throughput: %6.2f GFLOPS\n\n", milliseconds, bw );
// Store Runtime
FILE *pFile = fopen("GPU_results.txt","a");
fprintf(pFile, "%d, %0.2f, %0.2f, %d, %d, %7.3f, %6.2f\n", NNZ, density, error_rate, num_cols, num_rows, milliseconds, bw);
fclose(pFile);
return 0;
} | 3a21b638ea7e544bc34ffb1d83ae64db392db80b.cu | /*
sparse_matrix.cu:
Cuda implementation Sparse Matrix Multiplication by Vector
compile & run:
nvcc sparse_matrix.cu -o sparse_matrix.sh -lm && ./sparse_matrix.sh 32768 256 256 1
input:
NNZ: None Zero Values
ROWS: The number of Rows (max 1024)
COLS: The number of Columns (max 1024)
DEBUG: 1 to debug, 0 to no-debug
output:
Time in MS
Throughput in GFLOPS
author: Ivan Reyes-Amezcua
date: June, 2020
*/
#include <stdio.h>
#include <math.h>
#include <time.h>
#include <iostream>
#include <fstream>
#include <stdlib.h>
using namespace std;
__global__ void spmv(int num_rows, int num_cols, int *row_ptrs,
int *col_index, double *values, double *x, double *y
) {
extern __shared__ double s_sum[]; // sum of the values per row per block
int tid = threadIdx.x; // Local: Thread ID
int g_tid = threadIdx.x + row_ptrs[blockIdx.x]; // Global: Thread ID + offset in row
int NNZ_in_row = row_ptrs[blockIdx.x + 1] - row_ptrs[blockIdx.x]; // Non-zero values in current row-block
s_sum[tid] = 0.0;
__syncthreads();
// TODO: check col_index vector, possible memory issue
if (tid < NNZ_in_row)
s_sum[tid] = values[g_tid] * x[col_index[g_tid]]; // Map: value[n] * X[index[n]]
__syncthreads();
// Inclusive Scan
double temp;
for (int j = 1; j < blockDim.x; j *= 2 ){
if ( (tid - j) >= 0)
temp = s_sum[tid - j];
__syncthreads();
if ( (tid - j) >= 0)
s_sum[tid] += temp;
__syncthreads();
}
// Save the result of Row-Block on global memory
if(tid == blockDim.x - 1)
y[blockIdx.x] = s_sum[tid];
}
int main(int argc, char *argv[]) {
// Get and validate arguments
if(argc != 5){
printf("Usage %s NNZ ROWS COLS DEBUG\n",argv[0]);
exit(0);
}
int NNZ = atoi ( argv[1] ); // Non Zero Values
int num_rows = atoi ( argv[2] ); // rows
int num_cols = atoi ( argv[3] ); // columns
int debug = atoi ( argv[4] ); // 1 for debug, 0 for NO-debug
double values[NNZ]; // CSR format
int col_index[NNZ]; // CSR format
int row_ptrs[num_rows + 1]; // CSR format
double x[num_cols]; // the vector to multiply
double y[num_rows]; // the output
double true_y[num_rows]; // the true Y results of operation
// Declare GPU memory pointers
double *d_values;
double *d_x;
double *d_y;
int *d_col_index;
int *d_row_ptrs;
// Allocate GPU memory
int r1 = cudaMalloc((void **) &d_values, NNZ*sizeof( double ));
int r2 = cudaMalloc((void **) &d_x, num_cols*sizeof( double ));
int r3 = cudaMalloc((void **) &d_y, num_rows*sizeof( double ));
int r4 = cudaMalloc((void **) &d_col_index, NNZ*sizeof( int ));
int r5 = cudaMalloc((void **) &d_row_ptrs, (num_rows + 1)*sizeof( int ));
if( r1 || r2 || r3 || r4 || r5 ) {
printf( "Error allocating memory in GPU\n" );
exit( 0 );
}
// Read the Values and Index:
std::ifstream values_file("./data/values.txt");
std::ifstream col_ind_file("./data/col_ind.txt");
for (int i = 0; i < NNZ; i++) {
values_file >> values[i];
double aux;
col_ind_file >> aux;
col_index[i] = (int) aux;
}
// Read the row_ptr and the True Ys:
std::ifstream row_ptr_file("./data/row_ptr.txt");
std::ifstream true_y_file("./data/y.txt");
for (int i = 0; i < (num_rows + 1); i++) {
double aux, aux2;
row_ptr_file >> aux;
true_y_file >> aux2;
row_ptrs[i] = (int) aux;
true_y[i] = (int) aux2;
}
// Read the X values:
std::ifstream x_file("./data/x.txt");
for (int i = 0; i < num_cols; i++)
x_file >> x[i];
// Transfer the arrays to the GPU:
cudaMemcpy(d_values, values, NNZ*sizeof( double ), cudaMemcpyHostToDevice);
cudaMemcpy(d_x, x, num_cols*sizeof( double ), cudaMemcpyHostToDevice);
cudaMemcpy(d_y, y, num_rows*sizeof( double ), cudaMemcpyHostToDevice);
cudaMemcpy(d_col_index, col_index, NNZ*sizeof( int ), cudaMemcpyHostToDevice);
cudaMemcpy(d_row_ptrs, row_ptrs, (num_rows + 1)*sizeof( int ), cudaMemcpyHostToDevice);
// Start Time:
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
// Call to kernel:
double size_sharedmem = num_cols*sizeof(double); // Size of shared memory
spmv<<<num_rows, num_cols, size_sharedmem>>>(num_rows, num_cols, d_row_ptrs, d_col_index, d_values, d_x, d_y);
cudaDeviceSynchronize();
// Stop Time:
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
// Transfer the values to the CPU:
cudaMemcpy(y, d_y, num_rows * sizeof(double), cudaMemcpyDeviceToHost);
// Get the error:
int errors = 0; // count of errors
float e = 500.0; // tolerance to error
for (int i = 0; i < num_rows; i++) {
if (abs(true_y[i] - y[i]) > e) {
errors++;
if(debug == 1)
printf("Error in Y%d, True: %f, Calc: %f\n", i, true_y[i], y[i]);
} else if ( i < 10) {
printf("Y%d, True: %f, Calc: %f\n", i, true_y[i], y[i]);
}
}
float error_rate = ((double)errors/(double)num_rows) * 100.0;
float density = ((float)NNZ/((float)num_cols*(float)num_rows))*100.0;
printf("\nM. Density: %0.2f%%, #Ys: %d, Errors: %d, Error Rate: %0.2f%%\n", density, num_rows, errors, error_rate);
// Free Memory
cudaFree( d_values );
cudaFree( d_x );
cudaFree( d_y );
cudaFree( d_col_index );
cudaFree( d_row_ptrs );
// Calculate Throughput:
float bw;
bw = (float )num_rows*(float )num_cols*log2((float) num_cols);
bw /= milliseconds * 1000000.0;
printf( "\nSpmV GPU execution time: %7.3f ms, Throughput: %6.2f GFLOPS\n\n", milliseconds, bw );
// Store Runtime
FILE *pFile = fopen("GPU_results.txt","a");
fprintf(pFile, "%d, %0.2f, %0.2f, %d, %d, %7.3f, %6.2f\n", NNZ, density, error_rate, num_cols, num_rows, milliseconds, bw);
fclose(pFile);
return 0;
} |
040f0ab66889f1860311c9210cc51efe9b8fad5b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* ReplicaExchange.cu
*
* Created on: Aug 10, 2011
* Author: serxa
*/
#include <iostream>
#include <algorithm>
#include <fstream>
#include "../Util/Log.h"
#include <map>
#include "../Core/global.h"
#include "ReplicaExchange.cuh"
#include "../Core/parameters.h"
#include "../Util/ran2.h"
#include "../Updaters/EnergyOutputManager.cuh"
#ifdef USE_MPI
#include <mpi.h>
#endif
namespace replica_exchange {
class Log: public ILog {
virtual void Write(const char* message) const {
std::cout << makeTimePrefix() << "<replica_exchange> " << message << std::endl;
}
} log;
#define LOG LogStream(log)
int numberOfTrajectories() {
#ifdef USE_MPI
static const int result = parameters.Ntr * MPI::COMM_WORLD.Get_size();
#else
static const int result = parameters.Ntr;
#endif
return result;
}
int mpiRank() {
#ifdef USE_MPI
static const int result = MPI::COMM_WORLD.Get_rank();
#else
static const int result = 0;
#endif
return result;
}
int trajId(int traj, int rank) {
return traj + rank * parameters.Ntr;
}
void create() {
LOG << "create";
debug = (bool)getYesNoParameter(PARAMETER_REMD_DEBUG, 0);
if (!getYesNoParameter(PARAMETER_REMD_ENABLED, 0))
return;
// Initialize globals
isExchangesDisabled = (bool)getYesNoParameter(PARAMETER_REMD_DISABLE_EXCHANGES, 0);
heatUpdatesLimit = getIntegerParameter(PARAMETER_REMD_HEATSTEPS, 0) / getIntegerParameter(PARAMETER_REMD_FREQ);
blockCount = (int)ceil((float)gsystem.Ntot/BLOCK_SIZE);
blockSize = BLOCK_SIZE;
hybrid_taus::initRand(getLongIntegerParameter(PARAMETER_RSEED)*parameters.firstrun*(mpiRank()+1), gsystem.Ntot);
rseed = getIntegerParameter(PARAMETER_RSEED);
// Initialize replica-related stuff
float Tmin = getFloatParameter(PARAMETER_REMD_MIN_TEMPERATURE);
float Tmax = getFloatParameter(PARAMETER_REMD_MAX_TEMPERATURE);
float dT = (Tmax - Tmin) / numberOfTrajectories();
replica.resize(numberOfTrajectories());
LOG << "REMD Temperatures:";
for (int traj = 0; traj < numberOfTrajectories(); traj++) {
replica[traj].id = traj;
replica[traj].Tmax = Tmin + dT * traj;
LOG << replica[traj].id << ": " << replica[traj].Tmax;
if (isHeatModeEnabled())
replica[traj].T = 0;
else
replica[traj].T = replica[traj].Tmax;
}
localReplica = &replica[mpiRank() * parameters.Ntr];
refreshVars();
gamma = getFloatParameter(PARAMETER_DAMPING, DEFAULT_DAMPING);
// Initialize potential
potential.compute = compute;
potential.destroy = destroy;
sprintf(potential.name, "Replica exchange");
potentials[potentialsCount++] = &potential;
// Initialize updater
updater.update = update;
updater.destroy = destroy;
updater.frequency = getIntegerParameter(PARAMETER_REMD_FREQ);
sprintf(updater.name, "Replica exchange");
updaters[updatersCount++] = &updater;
// Initialize restarter
addRestarter("ReplicaExchange", save, load);
LOG << "create done";
}
__global__ void compute_kernel(float* var, float gamma) {
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < c_gsystem.Ntot) {
float4 f = c_gsystem.d_forces[i];
int at = c_gsystem.d_atomTypes[i];
f.w = c_gsystem.d_m[at]; // Mass is now here.
// TODO: This should be optimize with use of constant/texture memory. Artem, what do you think will be best here?
// The best would be constant memory, since most of the times all threads in a warp will access the same 'var'.
// However, I'm not sure that it is possible to allocate constant memory dynamically - we don't know in advance how many trajectories we will have
// Texture memory is cached spatially, so some cache will be wasted if texture is used here.
// I think texture is the best choice here though.
/*float mult = var[i / c_gsystem.N] * sqrtf(f.w);
float4 rf = hybrid_taus::rforce(i);
f.x += mult * rf.x;
f.y += mult * rf.y;
f.z += mult * rf.z;
c_gsystem.d_forces[i] = f;*/
float4 v = c_gsystem.d_vel[i];
float mgamma = f.w*gamma;
float mult = var[i / c_gsystem.N] * sqrtf(f.w);
float4 rf = hybrid_taus::rforce(i);
f.x += mult*rf.x - mgamma*v.x;
f.y += mult*rf.y - mgamma*v.y;
f.z += mult*rf.z - mgamma*v.z;
c_gsystem.d_forces[i] = f;
}
}
void inline compute() {
hipLaunchKernelGGL(( compute_kernel), dim3(blockCount), dim3(blockSize), 0, 0, d_var, gamma);
}
void destroy() {}
float computeReplicaEnergy(int traj) {
// NOTE: this works only if called after energyOutputManager.update()
return energyOutputData.E[traj];
}
double exchangeProbability(const Replica& a, const Replica& b) {
return ::min(1.0, exp((a.E - b.E) * (1.0/(Kb_MD * a.T) - 1.0/(Kb_MD * b.T))));
}
void refreshVars() {
// Allocate memory for the first time
if (h_var.empty()) {
h_var.resize(parameters.Ntr);
hipMalloc((void**)&d_var, sizeof(float) * parameters.Ntr);
}
// Compute coefficients
static float h = getFloatParameter(PARAMETER_TIMESTEP);
static float gamma = getFloatParameter(PARAMETER_DAMPING, DEFAULT_DAMPING);
for (int traj = 0; traj < parameters.Ntr; traj++)
h_var[traj] = sqrtf(2.0f*gamma*Kb_MD*localReplica[traj].T/h);
// Write coefficients to device
hipMemcpy(d_var, &h_var[0], sizeof(float) * parameters.Ntr, hipMemcpyHostToDevice);
}
__global__ void normalizeVelocities_kernel(int base, int size, float coef) {
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < size)
c_gsystem.d_vel[base + i] *= coef;
}
void logExchangeReplicas(int i, int j) {
static bool first = true;
static std::string fileName;
if (first) {
first = false;
// Get and save filename for exchanges
fileName = getMaskedParameterAs<std::string>(PARAMETER_REMD_EXCHANGESFILE);
// Clear file (we are going to append text to the end of it)
if (!fileName.empty())
std::ofstream(fileName.c_str());
}
if (!fileName.empty()) {
std::ofstream ofs(fileName.c_str(), std::ios::app);
ofs << step << '\t' << replica[i].id << '\t' << replica[j].id << '\n';
}
}
int localTrajId(int traj) {
return traj - mpiRank() * parameters.Ntr;
}
int rankForTraj(int traj) {
return traj / parameters.Ntr;
}
void normalizeVelocities(int traj, float coef) {
if (mpiRank() == rankForTraj(traj)) {
unsigned gridSize = (unsigned)ceil((float)gsystem.N/blockSize);
hipLaunchKernelGGL(( normalizeVelocities_kernel), dim3(gridSize), dim3(blockSize), 0, 0, gsystem.N * localTrajId(traj), gsystem.N, coef);
}
}
void exchangeReplicas(int i, int j) {
// Actually exchange replicas/trajectories on CPU and GPU
float coef = sqrt(replica[i].T/replica[j].T);
normalizeVelocities(i, 1.0f/coef);
normalizeVelocities(j, coef);
std::swap(replica[i], replica[j]);
}
void broadcastExchangeInfo(int i, int j) {
#ifdef USE_MPI
int size = MPI::COMM_WORLD.Get_size();
for (int dst_rank = 1; dst_rank < size; dst_rank++) {
if (MPI_Send(&i, 1, MPI_INT, dst_rank, 0, MPI_COMM_WORLD) != MPI_SUCCESS)
DIE("Unable to send traj num to exchange: traj=%d dst_rank=%d", i, dst_rank);
if (MPI_Send(&j, 1, MPI_INT, dst_rank, 0, MPI_COMM_WORLD) != MPI_SUCCESS)
DIE("Unable to send traj num to exchange: traj=%d dst_rank=%d", j, dst_rank);
}
#endif
}
void broadcastEndOfExchangesMarker() {
#ifdef USE_MPI
int size = MPI::COMM_WORLD.Get_size();
int marker = -1;
for (int dst_rank = 1; dst_rank < size; dst_rank++)
if (MPI_Send(&marker, 1, MPI_INT, dst_rank, 0, MPI_COMM_WORLD) != MPI_SUCCESS)
DIE("Unable to send marker: dst_rank=%d", dst_rank);
#endif
}
void update() {
updatesCount++;
if (isHeatMode())
heatMode();
else
exchangeMode();
}
void heatMode() {
LOG << "update (heat mode)";
// Update temperatures of all replicas
for (int i = 0; i < numberOfTrajectories(); i++)
replica[i].T = double(updatesCount) / double(heatUpdatesLimit) * replica[i].Tmax;
// Update environment
refreshVars();
}
struct ReplicaTemperatureComp {
bool operator()(int l, int r) const {
return replica[l].T < replica[r].T;
}
};
void sendReceiveEnergy() {
#ifdef USE_MPI
int rank = MPI::COMM_WORLD.Get_rank();
int size = MPI::COMM_WORLD.Get_size();
MPI_Status status;
if (rank == 0) {
// Receive energies
for (int src_rank = 1; src_rank < size; src_rank++)
for (int traj = 0; traj < parameters.Ntr; traj++)
if (MPI_Recv(&replica[trajId(traj, src_rank)].E, 1, MPI_FLOAT, src_rank, 0, MPI_COMM_WORLD, &status) != MPI_SUCCESS)
DIE("Unable to receive energy: src_rank=%d traj=%d error_code=%d", src_rank, traj, status.MPI_ERROR);
} else {
// Send energies
for (int traj = 0; traj < parameters.Ntr; traj++)
if (MPI_Send(&localReplica[traj].E, 1, MPI_FLOAT, 0, 0, MPI_COMM_WORLD) != MPI_SUCCESS)
DIE("Unable to send energy: traj=%d", traj);
}
#endif
}
void exchangeMode() {
LOG << "update (exchange mode)";
static bool checked = false;
if (!checked) {
checked = true;
if (updater.frequency % energy_output::updater.frequency != 0) {
DIE("REMD updater frequency (%d) is not a multiple of energy output manager frequency (%d)", updater.frequency, energy_output::updater.frequency);
}
}
// Compute local replicas energies
for (int i = 0; i < parameters.Ntr; i++)
localReplica[i].energySum += (localReplica[i].E = computeReplicaEnergy(i));
// Send/receive replicas energies
sendReceiveEnergy();
if (mpiRank() == 0) {
// Create sorted by replica temperature list of replica indices
std::vector<int> idx;
for (int i = 0, e = numberOfTrajectories(); i < e; i++)
idx.push_back(i);
std::sort(idx.begin(), idx.end(), ReplicaTemperatureComp());
// Attempt replica exchanges
int totalExchanges = 0, successfulExchanges = 0;
std::stringstream sxchg;
for (int n = int(ran2::ran2(&rseed) * 2); n < numberOfTrajectories() - 1; n += 2) {
int i = idx[n];
int j = idx[n + 1];
replica[i].exchangeAttempts++;
replica[j].exchangeAttempts++;
totalExchanges++;
double p = exchangeProbability(replica[i], replica[j]);
int success = (ran2::ran2(&rseed) < p);
if (success && !isExchangesDisabled) {
replica[i].successfulExchanges++;
replica[j].successfulExchanges++;
logExchangeReplicas(i, j);
broadcastExchangeInfo(i, j);
exchangeReplicas(i, j);
successfulExchanges++;
}
sxchg << " (" << i << ", " << j << ", " << p << ", " << success << ")";
}
broadcastEndOfExchangesMarker();
// Create replica_id -> replica map
std::map<int, Replica*> replicaIdx;
std::stringstream smap;
for (int i = 0; i < numberOfTrajectories(); i++) {
smap << " " << replica[i].id;
replicaIdx[replica[i].id] = &replica[i];
}
std::stringstream srates;
std::stringstream senergy;
for (int i = 0; i < numberOfTrajectories(); i++) {
srates << " " << float(replicaIdx[i]->successfulExchanges) / replicaIdx[i]->exchangeAttempts;
senergy << " " << (replicaIdx[i]->energySum / updatesCount) * KCALL_PER_KJ;
}
LOG << "Xchg:" << sxchg.str();
LOG << successfulExchanges << "/" << totalExchanges << " exchanges have been done";
LOG << "Map:" << smap.str();
LOG << "Rates:" << srates.str();
LOG << "AvgEnergy:" << senergy.str();
} else {
#ifdef USE_MPI
// Receive list of traj numers to exchange
MPI_Status status;
while (true) {
int i, j;
if (MPI_Recv(&i, 1, MPI_INT, 0, 0, MPI_COMM_WORLD, &status) != MPI_SUCCESS)
DIE("Unable to receive the first traj num to exchange");
if (i == -1) // End-Of-Exchanges-List marker
break;
if (MPI_Recv(&j, 1, MPI_INT, 0, 0, MPI_COMM_WORLD, &status) != MPI_SUCCESS)
DIE("Unable to receive the second traj num to exchange");
exchangeReplicas(i, j);
}
#endif
}
// Update environment
refreshVars();
}
void save(FILE* f) {
LOG << "restart:save";
fprintf(f, "%lu %d\n", updatesCount, rseed);
for (size_t i = 0; i < (size_t) numberOfTrajectories(); ++i) {
Replica& r = replica[i];
fprintf(f, "%.10e %.10e %.10e %d %d %d %.10e\n", r.Tmax, r.T, r.E, r.id, r.exchangeAttempts, r.successfulExchanges, r.energySum);
}
}
void load(FILE* f) {
LOG << "restart:load";
if (fscanf(f, " %lu %d ", &updatesCount, &rseed) != 2)
DIE("Loading restart from file: unable to get updatesCount and rseed");
printf("%lu %d\n", updatesCount, rseed);
for (size_t i = 0; i < (size_t) numberOfTrajectories(); ++i) {
Replica& r = replica[i];
int ret;
ret = fscanf(f, "%e %e %e %d %d %d %le ", &r.Tmax, &r.T, &r.E, &r.id, &r.exchangeAttempts, &r.successfulExchanges, &r.energySum);
if (ret != 7)
DIE("Loading restart from file: unable to load data for traj #%d", i);
printf("%.10e %.10e %.10e %d %d %d %.10e\n", r.Tmax, r.T, r.E, r.id, r.exchangeAttempts, r.successfulExchanges, r.energySum);
}
refreshVars();
}
#undef LOG
} // namespace replica_exchange
| 040f0ab66889f1860311c9210cc51efe9b8fad5b.cu | /*
* ReplicaExchange.cu
*
* Created on: Aug 10, 2011
* Author: serxa
*/
#include <iostream>
#include <algorithm>
#include <fstream>
#include "../Util/Log.h"
#include <map>
#include "../Core/global.h"
#include "ReplicaExchange.cuh"
#include "../Core/parameters.h"
#include "../Util/ran2.h"
#include "../Updaters/EnergyOutputManager.cuh"
#ifdef USE_MPI
#include <mpi.h>
#endif
namespace replica_exchange {
class Log: public ILog {
virtual void Write(const char* message) const {
std::cout << makeTimePrefix() << "<replica_exchange> " << message << std::endl;
}
} log;
#define LOG LogStream(log)
int numberOfTrajectories() {
#ifdef USE_MPI
static const int result = parameters.Ntr * MPI::COMM_WORLD.Get_size();
#else
static const int result = parameters.Ntr;
#endif
return result;
}
int mpiRank() {
#ifdef USE_MPI
static const int result = MPI::COMM_WORLD.Get_rank();
#else
static const int result = 0;
#endif
return result;
}
int trajId(int traj, int rank) {
return traj + rank * parameters.Ntr;
}
void create() {
LOG << "create";
debug = (bool)getYesNoParameter(PARAMETER_REMD_DEBUG, 0);
if (!getYesNoParameter(PARAMETER_REMD_ENABLED, 0))
return;
// Initialize globals
isExchangesDisabled = (bool)getYesNoParameter(PARAMETER_REMD_DISABLE_EXCHANGES, 0);
heatUpdatesLimit = getIntegerParameter(PARAMETER_REMD_HEATSTEPS, 0) / getIntegerParameter(PARAMETER_REMD_FREQ);
blockCount = (int)ceil((float)gsystem.Ntot/BLOCK_SIZE);
blockSize = BLOCK_SIZE;
hybrid_taus::initRand(getLongIntegerParameter(PARAMETER_RSEED)*parameters.firstrun*(mpiRank()+1), gsystem.Ntot);
rseed = getIntegerParameter(PARAMETER_RSEED);
// Initialize replica-related stuff
float Tmin = getFloatParameter(PARAMETER_REMD_MIN_TEMPERATURE);
float Tmax = getFloatParameter(PARAMETER_REMD_MAX_TEMPERATURE);
float dT = (Tmax - Tmin) / numberOfTrajectories();
replica.resize(numberOfTrajectories());
LOG << "REMD Temperatures:";
for (int traj = 0; traj < numberOfTrajectories(); traj++) {
replica[traj].id = traj;
replica[traj].Tmax = Tmin + dT * traj;
LOG << replica[traj].id << ": " << replica[traj].Tmax;
if (isHeatModeEnabled())
replica[traj].T = 0;
else
replica[traj].T = replica[traj].Tmax;
}
localReplica = &replica[mpiRank() * parameters.Ntr];
refreshVars();
gamma = getFloatParameter(PARAMETER_DAMPING, DEFAULT_DAMPING);
// Initialize potential
potential.compute = compute;
potential.destroy = destroy;
sprintf(potential.name, "Replica exchange");
potentials[potentialsCount++] = &potential;
// Initialize updater
updater.update = update;
updater.destroy = destroy;
updater.frequency = getIntegerParameter(PARAMETER_REMD_FREQ);
sprintf(updater.name, "Replica exchange");
updaters[updatersCount++] = &updater;
// Initialize restarter
addRestarter("ReplicaExchange", save, load);
LOG << "create done";
}
__global__ void compute_kernel(float* var, float gamma) {
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < c_gsystem.Ntot) {
float4 f = c_gsystem.d_forces[i];
int at = c_gsystem.d_atomTypes[i];
f.w = c_gsystem.d_m[at]; // Mass is now here.
// TODO: This should be optimize with use of constant/texture memory. Artem, what do you think will be best here?
// The best would be constant memory, since most of the times all threads in a warp will access the same 'var'.
// However, I'm not sure that it is possible to allocate constant memory dynamically - we don't know in advance how many trajectories we will have
// Texture memory is cached spatially, so some cache will be wasted if texture is used here.
// I think texture is the best choice here though.
/*float mult = var[i / c_gsystem.N] * sqrtf(f.w);
float4 rf = hybrid_taus::rforce(i);
f.x += mult * rf.x;
f.y += mult * rf.y;
f.z += mult * rf.z;
c_gsystem.d_forces[i] = f;*/
float4 v = c_gsystem.d_vel[i];
float mgamma = f.w*gamma;
float mult = var[i / c_gsystem.N] * sqrtf(f.w);
float4 rf = hybrid_taus::rforce(i);
f.x += mult*rf.x - mgamma*v.x;
f.y += mult*rf.y - mgamma*v.y;
f.z += mult*rf.z - mgamma*v.z;
c_gsystem.d_forces[i] = f;
}
}
void inline compute() {
compute_kernel<<<blockCount, blockSize>>>(d_var, gamma);
}
void destroy() {}
float computeReplicaEnergy(int traj) {
// NOTE: this works only if called after energyOutputManager.update()
return energyOutputData.E[traj];
}
double exchangeProbability(const Replica& a, const Replica& b) {
return std::min(1.0, exp((a.E - b.E) * (1.0/(Kb_MD * a.T) - 1.0/(Kb_MD * b.T))));
}
void refreshVars() {
// Allocate memory for the first time
if (h_var.empty()) {
h_var.resize(parameters.Ntr);
cudaMalloc((void**)&d_var, sizeof(float) * parameters.Ntr);
}
// Compute coefficients
static float h = getFloatParameter(PARAMETER_TIMESTEP);
static float gamma = getFloatParameter(PARAMETER_DAMPING, DEFAULT_DAMPING);
for (int traj = 0; traj < parameters.Ntr; traj++)
h_var[traj] = sqrtf(2.0f*gamma*Kb_MD*localReplica[traj].T/h);
// Write coefficients to device
cudaMemcpy(d_var, &h_var[0], sizeof(float) * parameters.Ntr, cudaMemcpyHostToDevice);
}
__global__ void normalizeVelocities_kernel(int base, int size, float coef) {
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < size)
c_gsystem.d_vel[base + i] *= coef;
}
void logExchangeReplicas(int i, int j) {
static bool first = true;
static std::string fileName;
if (first) {
first = false;
// Get and save filename for exchanges
fileName = getMaskedParameterAs<std::string>(PARAMETER_REMD_EXCHANGESFILE);
// Clear file (we are going to append text to the end of it)
if (!fileName.empty())
std::ofstream(fileName.c_str());
}
if (!fileName.empty()) {
std::ofstream ofs(fileName.c_str(), std::ios::app);
ofs << step << '\t' << replica[i].id << '\t' << replica[j].id << '\n';
}
}
int localTrajId(int traj) {
return traj - mpiRank() * parameters.Ntr;
}
int rankForTraj(int traj) {
return traj / parameters.Ntr;
}
void normalizeVelocities(int traj, float coef) {
if (mpiRank() == rankForTraj(traj)) {
unsigned gridSize = (unsigned)ceil((float)gsystem.N/blockSize);
normalizeVelocities_kernel<<<gridSize, blockSize>>>(gsystem.N * localTrajId(traj), gsystem.N, coef);
}
}
void exchangeReplicas(int i, int j) {
// Actually exchange replicas/trajectories on CPU and GPU
float coef = sqrt(replica[i].T/replica[j].T);
normalizeVelocities(i, 1.0f/coef);
normalizeVelocities(j, coef);
std::swap(replica[i], replica[j]);
}
void broadcastExchangeInfo(int i, int j) {
#ifdef USE_MPI
int size = MPI::COMM_WORLD.Get_size();
for (int dst_rank = 1; dst_rank < size; dst_rank++) {
if (MPI_Send(&i, 1, MPI_INT, dst_rank, 0, MPI_COMM_WORLD) != MPI_SUCCESS)
DIE("Unable to send traj num to exchange: traj=%d dst_rank=%d", i, dst_rank);
if (MPI_Send(&j, 1, MPI_INT, dst_rank, 0, MPI_COMM_WORLD) != MPI_SUCCESS)
DIE("Unable to send traj num to exchange: traj=%d dst_rank=%d", j, dst_rank);
}
#endif
}
void broadcastEndOfExchangesMarker() {
#ifdef USE_MPI
int size = MPI::COMM_WORLD.Get_size();
int marker = -1;
for (int dst_rank = 1; dst_rank < size; dst_rank++)
if (MPI_Send(&marker, 1, MPI_INT, dst_rank, 0, MPI_COMM_WORLD) != MPI_SUCCESS)
DIE("Unable to send marker: dst_rank=%d", dst_rank);
#endif
}
void update() {
updatesCount++;
if (isHeatMode())
heatMode();
else
exchangeMode();
}
void heatMode() {
LOG << "update (heat mode)";
// Update temperatures of all replicas
for (int i = 0; i < numberOfTrajectories(); i++)
replica[i].T = double(updatesCount) / double(heatUpdatesLimit) * replica[i].Tmax;
// Update environment
refreshVars();
}
struct ReplicaTemperatureComp {
bool operator()(int l, int r) const {
return replica[l].T < replica[r].T;
}
};
void sendReceiveEnergy() {
#ifdef USE_MPI
int rank = MPI::COMM_WORLD.Get_rank();
int size = MPI::COMM_WORLD.Get_size();
MPI_Status status;
if (rank == 0) {
// Receive energies
for (int src_rank = 1; src_rank < size; src_rank++)
for (int traj = 0; traj < parameters.Ntr; traj++)
if (MPI_Recv(&replica[trajId(traj, src_rank)].E, 1, MPI_FLOAT, src_rank, 0, MPI_COMM_WORLD, &status) != MPI_SUCCESS)
DIE("Unable to receive energy: src_rank=%d traj=%d error_code=%d", src_rank, traj, status.MPI_ERROR);
} else {
// Send energies
for (int traj = 0; traj < parameters.Ntr; traj++)
if (MPI_Send(&localReplica[traj].E, 1, MPI_FLOAT, 0, 0, MPI_COMM_WORLD) != MPI_SUCCESS)
DIE("Unable to send energy: traj=%d", traj);
}
#endif
}
void exchangeMode() {
LOG << "update (exchange mode)";
static bool checked = false;
if (!checked) {
checked = true;
if (updater.frequency % energy_output::updater.frequency != 0) {
DIE("REMD updater frequency (%d) is not a multiple of energy output manager frequency (%d)", updater.frequency, energy_output::updater.frequency);
}
}
// Compute local replicas energies
for (int i = 0; i < parameters.Ntr; i++)
localReplica[i].energySum += (localReplica[i].E = computeReplicaEnergy(i));
// Send/receive replicas energies
sendReceiveEnergy();
if (mpiRank() == 0) {
// Create sorted by replica temperature list of replica indices
std::vector<int> idx;
for (int i = 0, e = numberOfTrajectories(); i < e; i++)
idx.push_back(i);
std::sort(idx.begin(), idx.end(), ReplicaTemperatureComp());
// Attempt replica exchanges
int totalExchanges = 0, successfulExchanges = 0;
std::stringstream sxchg;
for (int n = int(ran2::ran2(&rseed) * 2); n < numberOfTrajectories() - 1; n += 2) {
int i = idx[n];
int j = idx[n + 1];
replica[i].exchangeAttempts++;
replica[j].exchangeAttempts++;
totalExchanges++;
double p = exchangeProbability(replica[i], replica[j]);
int success = (ran2::ran2(&rseed) < p);
if (success && !isExchangesDisabled) {
replica[i].successfulExchanges++;
replica[j].successfulExchanges++;
logExchangeReplicas(i, j);
broadcastExchangeInfo(i, j);
exchangeReplicas(i, j);
successfulExchanges++;
}
sxchg << " (" << i << ", " << j << ", " << p << ", " << success << ")";
}
broadcastEndOfExchangesMarker();
// Create replica_id -> replica map
std::map<int, Replica*> replicaIdx;
std::stringstream smap;
for (int i = 0; i < numberOfTrajectories(); i++) {
smap << " " << replica[i].id;
replicaIdx[replica[i].id] = &replica[i];
}
std::stringstream srates;
std::stringstream senergy;
for (int i = 0; i < numberOfTrajectories(); i++) {
srates << " " << float(replicaIdx[i]->successfulExchanges) / replicaIdx[i]->exchangeAttempts;
senergy << " " << (replicaIdx[i]->energySum / updatesCount) * KCALL_PER_KJ;
}
LOG << "Xchg:" << sxchg.str();
LOG << successfulExchanges << "/" << totalExchanges << " exchanges have been done";
LOG << "Map:" << smap.str();
LOG << "Rates:" << srates.str();
LOG << "AvgEnergy:" << senergy.str();
} else {
#ifdef USE_MPI
// Receive list of traj numers to exchange
MPI_Status status;
while (true) {
int i, j;
if (MPI_Recv(&i, 1, MPI_INT, 0, 0, MPI_COMM_WORLD, &status) != MPI_SUCCESS)
DIE("Unable to receive the first traj num to exchange");
if (i == -1) // End-Of-Exchanges-List marker
break;
if (MPI_Recv(&j, 1, MPI_INT, 0, 0, MPI_COMM_WORLD, &status) != MPI_SUCCESS)
DIE("Unable to receive the second traj num to exchange");
exchangeReplicas(i, j);
}
#endif
}
// Update environment
refreshVars();
}
void save(FILE* f) {
LOG << "restart:save";
fprintf(f, "%lu %d\n", updatesCount, rseed);
for (size_t i = 0; i < (size_t) numberOfTrajectories(); ++i) {
Replica& r = replica[i];
fprintf(f, "%.10e %.10e %.10e %d %d %d %.10e\n", r.Tmax, r.T, r.E, r.id, r.exchangeAttempts, r.successfulExchanges, r.energySum);
}
}
void load(FILE* f) {
LOG << "restart:load";
if (fscanf(f, " %lu %d ", &updatesCount, &rseed) != 2)
DIE("Loading restart from file: unable to get updatesCount and rseed");
printf("%lu %d\n", updatesCount, rseed);
for (size_t i = 0; i < (size_t) numberOfTrajectories(); ++i) {
Replica& r = replica[i];
int ret;
ret = fscanf(f, "%e %e %e %d %d %d %le ", &r.Tmax, &r.T, &r.E, &r.id, &r.exchangeAttempts, &r.successfulExchanges, &r.energySum);
if (ret != 7)
DIE("Loading restart from file: unable to load data for traj #%d", i);
printf("%.10e %.10e %.10e %d %d %d %.10e\n", r.Tmax, r.T, r.E, r.id, r.exchangeAttempts, r.successfulExchanges, r.energySum);
}
refreshVars();
}
#undef LOG
} // namespace replica_exchange
|
efd3090fd62f8ab27cb79bb811b93318fbf4b938.hip | // !!! This is a file automatically generated by hipify!!!
/**
* 3DConvolution.cu: This file is part of the PolyBench/GPU 1.0 test suite.
*
*
* Contact: Scott Grauer-Gray <[email protected]>
* Louis-Noel Pouchet <[email protected]>
* Web address: http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU
*/
#include <unistd.h>
#include <stdio.h>
#include <time.h>
#include <sys/time.h>
#include <stdlib.h>
#include <stdarg.h>
#include <string.h>
#include <hip/hip_runtime.h>
#include "polybenchUtilFuncts.h"
//define the error threshold for the results "not matching"
#define PERCENT_DIFF_ERROR_THRESHOLD 0.5
#define GPU_DEVICE 0
/* Problem size */
#define NI 3
#define NJ 1024
#define NK 1024
#ifndef THREADS
#define THREADS 1024
#endif
/* Can switch DATA_TYPE between float and double */
typedef float DATA_TYPE;
void conv3D(DATA_TYPE* A, DATA_TYPE* B)
{
int i, j, k;
DATA_TYPE c11, c12, c13, c21, c22, c23, c31, c32, c33;
c11 = +2; c21 = +5; c31 = -8;
c12 = -3; c22 = +6; c32 = -9;
c13 = +4; c23 = +7; c33 = +10;
for (i = 1; i < NI - 1; ++i) // 0
{
for (j = 1; j < NJ - 1; ++j) // 1
{
for (k = 1; k < NK -1; ++k) // 2
{
//printf("i:%d\nj:%d\nk:%d\n", i, j, k);
B[i*(NK * NJ) + j*NK + k] = c11 * A[(i - 1)*(NK * NJ) + (j - 1)*NK + (k - 1)] + c13 * A[(i + 1)*(NK * NJ) + (j - 1)*NK + (k - 1)]
+ c21 * A[(i - 1)*(NK * NJ) + (j - 1)*NK + (k - 1)] + c23 * A[(i + 1)*(NK * NJ) + (j - 1)*NK + (k - 1)]
+ c31 * A[(i - 1)*(NK * NJ) + (j - 1)*NK + (k - 1)] + c33 * A[(i + 1)*(NK * NJ) + (j - 1)*NK + (k - 1)]
+ c12 * A[(i + 0)*(NK * NJ) + (j - 1)*NK + (k + 0)] + c22 * A[(i + 0)*(NK * NJ) + (j + 0)*NK + (k + 0)]
+ c32 * A[(i + 0)*(NK * NJ) + (j + 1)*NK + (k + 0)] + c11 * A[(i - 1)*(NK * NJ) + (j - 1)*NK + (k + 1)]
+ c13 * A[(i + 1)*(NK * NJ) + (j - 1)*NK + (k + 1)] + c21 * A[(i - 1)*(NK * NJ) + (j + 0)*NK + (k + 1)]
+ c23 * A[(i + 1)*(NK * NJ) + (j + 0)*NK + (k + 1)] + c31 * A[(i - 1)*(NK * NJ) + (j + 1)*NK + (k + 1)]
+ c33 * A[(i + 1)*(NK * NJ) + (j + 1)*NK + (k + 1)];
}
}
}
}
void init(DATA_TYPE* A)
{
int i, j, k;
for (i = 0; i < NI; ++i)
{
for (j = 0; j < NJ; ++j)
{
for (k = 0; k < NK; ++k)
{
A[i*(NK * NJ) + j*NK + k] = i % 12 + 2 * (j % 7) + 3 * (k % 13);
}
}
}
}
void compareResults(DATA_TYPE* B, DATA_TYPE* B_outputFromGpu)
{
int i, j, k, fail;
fail = 0;
// Compare result from cpu and gpu...
for (i = 1; i < NI - 1; ++i) // 0
{
for (j = 1; j < NJ - 1; ++j) // 1
{
for (k = 1; k < NK - 1; ++k) // 2
{
if (percentDiff(B[i*(NK * NJ) + j*NK + k], B_outputFromGpu[i*(NK * NJ) + j*NK + k]) > PERCENT_DIFF_ERROR_THRESHOLD)
{
fail++;
}
}
}
}
// Print results
printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f Percent: %d\n", PERCENT_DIFF_ERROR_THRESHOLD, fail);
}
void GPU_argv_init()
{
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, GPU_DEVICE);
printf("setting device %d with name %s\n",GPU_DEVICE,deviceProp.name);
hipSetDevice( GPU_DEVICE );
}
__global__ void convolution3D_kernel(DATA_TYPE *A, DATA_TYPE *B, int i)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int k = tid % NK;
int j = tid / NK;
DATA_TYPE c11, c12, c13, c21, c22, c23, c31, c32, c33;
c11 = +2; c21 = +5; c31 = -8;
c12 = -3; c22 = +6; c32 = -9;
c13 = +4; c23 = +7; c33 = +10;
if ((i < (NI-1)) && (j < (NJ-1)) && (k < (NK-1)) && (i > 0) && (j > 0) && (k > 0))
{
B[i*(NK * NJ) + j*NK + k] = c11 * A[(i - 1)*(NK * NJ) + (j - 1)*NK + (k - 1)] + c13 * A[(i + 1)*(NK * NJ) + (j - 1)*NK + (k - 1)]
+ c21 * A[(i - 1)*(NK * NJ) + (j - 1)*NK + (k - 1)] + c23 * A[(i + 1)*(NK * NJ) + (j - 1)*NK + (k - 1)]
+ c31 * A[(i - 1)*(NK * NJ) + (j - 1)*NK + (k - 1)] + c33 * A[(i + 1)*(NK * NJ) + (j - 1)*NK + (k - 1)]
+ c12 * A[(i + 0)*(NK * NJ) + (j - 1)*NK + (k + 0)] + c22 * A[(i + 0)*(NK * NJ) + (j + 0)*NK + (k + 0)]
+ c32 * A[(i + 0)*(NK * NJ) + (j + 1)*NK + (k + 0)] + c11 * A[(i - 1)*(NK * NJ) + (j - 1)*NK + (k + 1)]
+ c13 * A[(i + 1)*(NK * NJ) + (j - 1)*NK + (k + 1)] + c21 * A[(i - 1)*(NK * NJ) + (j + 0)*NK + (k + 1)]
+ c23 * A[(i + 1)*(NK * NJ) + (j + 0)*NK + (k + 1)] + c31 * A[(i - 1)*(NK * NJ) + (j + 1)*NK + (k + 1)]
+ c33 * A[(i + 1)*(NK * NJ) + (j + 1)*NK + (k + 1)];
}
}
void convolution3DCuda(DATA_TYPE* A, DATA_TYPE* B, DATA_TYPE* B_outputFromGpu)
{
double t_start, t_end;
DATA_TYPE *A_gpu;
DATA_TYPE *B_gpu;
hipMalloc((void **)&A_gpu, sizeof(DATA_TYPE) * NI * NJ * NK);
hipMalloc((void **)&B_gpu, sizeof(DATA_TYPE) * NI * NJ * NK);
hipMemcpy(A_gpu, A, sizeof(DATA_TYPE) * NI * NJ * NK, hipMemcpyHostToDevice);
hipMemcpy(B_gpu, B, sizeof(DATA_TYPE) * NI * NJ * NK, hipMemcpyHostToDevice);
dim3 block(THREADS);
dim3 grid((size_t)(ceil( ((float)NK*(float)NJ) / ((float)block.x) )));
t_start = rtclock();
int i;
for (i = 1; i < NI - 1; ++i) // 0
{
hipLaunchKernelGGL(( convolution3D_kernel), dim3(grid), dim3(block) , 0, 0, A_gpu, B_gpu, i);
}
hipDeviceSynchronize();
t_end = rtclock();
fprintf(stdout, "GPU Runtime: %0.6lfs\n", t_end - t_start);
hipMemcpy(B_outputFromGpu, B_gpu, sizeof(DATA_TYPE) * NI * NJ * NK, hipMemcpyDeviceToHost);
hipFree(A_gpu);
hipFree(B_gpu);
}
int main(int argc, char *argv[])
{
//double t_start, t_end;
DATA_TYPE* A;
DATA_TYPE* B;
DATA_TYPE* B_outputFromGpu;
A = (DATA_TYPE*)malloc(NI*NJ*NK*sizeof(DATA_TYPE));
B = (DATA_TYPE*)malloc(NI*NJ*NK*sizeof(DATA_TYPE));
B_outputFromGpu = (DATA_TYPE*)malloc(NI*NJ*NK*sizeof(DATA_TYPE));
init(A);
//GPU_argv_init();
convolution3DCuda(A, B, B_outputFromGpu);
//t_start = rtclock();
//conv3D(A, B);
//t_end = rtclock();
//fprintf(stdout, "CPU Runtime: %0.6lfs\n", t_end - t_start);
//compareResults(B, B_outputFromGpu);
free(A);
free(B);
free(B_outputFromGpu);
return 0;
}
| efd3090fd62f8ab27cb79bb811b93318fbf4b938.cu | /**
* 3DConvolution.cu: This file is part of the PolyBench/GPU 1.0 test suite.
*
*
* Contact: Scott Grauer-Gray <[email protected]>
* Louis-Noel Pouchet <[email protected]>
* Web address: http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU
*/
#include <unistd.h>
#include <stdio.h>
#include <time.h>
#include <sys/time.h>
#include <stdlib.h>
#include <stdarg.h>
#include <string.h>
#include <cuda.h>
#include "polybenchUtilFuncts.h"
//define the error threshold for the results "not matching"
#define PERCENT_DIFF_ERROR_THRESHOLD 0.5
#define GPU_DEVICE 0
/* Problem size */
#define NI 3
#define NJ 1024
#define NK 1024
#ifndef THREADS
#define THREADS 1024
#endif
/* Can switch DATA_TYPE between float and double */
typedef float DATA_TYPE;
void conv3D(DATA_TYPE* A, DATA_TYPE* B)
{
int i, j, k;
DATA_TYPE c11, c12, c13, c21, c22, c23, c31, c32, c33;
c11 = +2; c21 = +5; c31 = -8;
c12 = -3; c22 = +6; c32 = -9;
c13 = +4; c23 = +7; c33 = +10;
for (i = 1; i < NI - 1; ++i) // 0
{
for (j = 1; j < NJ - 1; ++j) // 1
{
for (k = 1; k < NK -1; ++k) // 2
{
//printf("i:%d\nj:%d\nk:%d\n", i, j, k);
B[i*(NK * NJ) + j*NK + k] = c11 * A[(i - 1)*(NK * NJ) + (j - 1)*NK + (k - 1)] + c13 * A[(i + 1)*(NK * NJ) + (j - 1)*NK + (k - 1)]
+ c21 * A[(i - 1)*(NK * NJ) + (j - 1)*NK + (k - 1)] + c23 * A[(i + 1)*(NK * NJ) + (j - 1)*NK + (k - 1)]
+ c31 * A[(i - 1)*(NK * NJ) + (j - 1)*NK + (k - 1)] + c33 * A[(i + 1)*(NK * NJ) + (j - 1)*NK + (k - 1)]
+ c12 * A[(i + 0)*(NK * NJ) + (j - 1)*NK + (k + 0)] + c22 * A[(i + 0)*(NK * NJ) + (j + 0)*NK + (k + 0)]
+ c32 * A[(i + 0)*(NK * NJ) + (j + 1)*NK + (k + 0)] + c11 * A[(i - 1)*(NK * NJ) + (j - 1)*NK + (k + 1)]
+ c13 * A[(i + 1)*(NK * NJ) + (j - 1)*NK + (k + 1)] + c21 * A[(i - 1)*(NK * NJ) + (j + 0)*NK + (k + 1)]
+ c23 * A[(i + 1)*(NK * NJ) + (j + 0)*NK + (k + 1)] + c31 * A[(i - 1)*(NK * NJ) + (j + 1)*NK + (k + 1)]
+ c33 * A[(i + 1)*(NK * NJ) + (j + 1)*NK + (k + 1)];
}
}
}
}
void init(DATA_TYPE* A)
{
int i, j, k;
for (i = 0; i < NI; ++i)
{
for (j = 0; j < NJ; ++j)
{
for (k = 0; k < NK; ++k)
{
A[i*(NK * NJ) + j*NK + k] = i % 12 + 2 * (j % 7) + 3 * (k % 13);
}
}
}
}
void compareResults(DATA_TYPE* B, DATA_TYPE* B_outputFromGpu)
{
int i, j, k, fail;
fail = 0;
// Compare result from cpu and gpu...
for (i = 1; i < NI - 1; ++i) // 0
{
for (j = 1; j < NJ - 1; ++j) // 1
{
for (k = 1; k < NK - 1; ++k) // 2
{
if (percentDiff(B[i*(NK * NJ) + j*NK + k], B_outputFromGpu[i*(NK * NJ) + j*NK + k]) > PERCENT_DIFF_ERROR_THRESHOLD)
{
fail++;
}
}
}
}
// Print results
printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f Percent: %d\n", PERCENT_DIFF_ERROR_THRESHOLD, fail);
}
void GPU_argv_init()
{
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, GPU_DEVICE);
printf("setting device %d with name %s\n",GPU_DEVICE,deviceProp.name);
cudaSetDevice( GPU_DEVICE );
}
__global__ void convolution3D_kernel(DATA_TYPE *A, DATA_TYPE *B, int i)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int k = tid % NK;
int j = tid / NK;
DATA_TYPE c11, c12, c13, c21, c22, c23, c31, c32, c33;
c11 = +2; c21 = +5; c31 = -8;
c12 = -3; c22 = +6; c32 = -9;
c13 = +4; c23 = +7; c33 = +10;
if ((i < (NI-1)) && (j < (NJ-1)) && (k < (NK-1)) && (i > 0) && (j > 0) && (k > 0))
{
B[i*(NK * NJ) + j*NK + k] = c11 * A[(i - 1)*(NK * NJ) + (j - 1)*NK + (k - 1)] + c13 * A[(i + 1)*(NK * NJ) + (j - 1)*NK + (k - 1)]
+ c21 * A[(i - 1)*(NK * NJ) + (j - 1)*NK + (k - 1)] + c23 * A[(i + 1)*(NK * NJ) + (j - 1)*NK + (k - 1)]
+ c31 * A[(i - 1)*(NK * NJ) + (j - 1)*NK + (k - 1)] + c33 * A[(i + 1)*(NK * NJ) + (j - 1)*NK + (k - 1)]
+ c12 * A[(i + 0)*(NK * NJ) + (j - 1)*NK + (k + 0)] + c22 * A[(i + 0)*(NK * NJ) + (j + 0)*NK + (k + 0)]
+ c32 * A[(i + 0)*(NK * NJ) + (j + 1)*NK + (k + 0)] + c11 * A[(i - 1)*(NK * NJ) + (j - 1)*NK + (k + 1)]
+ c13 * A[(i + 1)*(NK * NJ) + (j - 1)*NK + (k + 1)] + c21 * A[(i - 1)*(NK * NJ) + (j + 0)*NK + (k + 1)]
+ c23 * A[(i + 1)*(NK * NJ) + (j + 0)*NK + (k + 1)] + c31 * A[(i - 1)*(NK * NJ) + (j + 1)*NK + (k + 1)]
+ c33 * A[(i + 1)*(NK * NJ) + (j + 1)*NK + (k + 1)];
}
}
void convolution3DCuda(DATA_TYPE* A, DATA_TYPE* B, DATA_TYPE* B_outputFromGpu)
{
double t_start, t_end;
DATA_TYPE *A_gpu;
DATA_TYPE *B_gpu;
cudaMalloc((void **)&A_gpu, sizeof(DATA_TYPE) * NI * NJ * NK);
cudaMalloc((void **)&B_gpu, sizeof(DATA_TYPE) * NI * NJ * NK);
cudaMemcpy(A_gpu, A, sizeof(DATA_TYPE) * NI * NJ * NK, cudaMemcpyHostToDevice);
cudaMemcpy(B_gpu, B, sizeof(DATA_TYPE) * NI * NJ * NK, cudaMemcpyHostToDevice);
dim3 block(THREADS);
dim3 grid((size_t)(ceil( ((float)NK*(float)NJ) / ((float)block.x) )));
t_start = rtclock();
int i;
for (i = 1; i < NI - 1; ++i) // 0
{
convolution3D_kernel<<< grid, block >>>(A_gpu, B_gpu, i);
}
cudaThreadSynchronize();
t_end = rtclock();
fprintf(stdout, "GPU Runtime: %0.6lfs\n", t_end - t_start);
cudaMemcpy(B_outputFromGpu, B_gpu, sizeof(DATA_TYPE) * NI * NJ * NK, cudaMemcpyDeviceToHost);
cudaFree(A_gpu);
cudaFree(B_gpu);
}
int main(int argc, char *argv[])
{
//double t_start, t_end;
DATA_TYPE* A;
DATA_TYPE* B;
DATA_TYPE* B_outputFromGpu;
A = (DATA_TYPE*)malloc(NI*NJ*NK*sizeof(DATA_TYPE));
B = (DATA_TYPE*)malloc(NI*NJ*NK*sizeof(DATA_TYPE));
B_outputFromGpu = (DATA_TYPE*)malloc(NI*NJ*NK*sizeof(DATA_TYPE));
init(A);
//GPU_argv_init();
convolution3DCuda(A, B, B_outputFromGpu);
//t_start = rtclock();
//conv3D(A, B);
//t_end = rtclock();
//fprintf(stdout, "CPU Runtime: %0.6lfs\n", t_end - t_start);
//compareResults(B, B_outputFromGpu);
free(A);
free(B);
free(B_outputFromGpu);
return 0;
}
|
2366b762600c1d101e2fdf86eb38983c71da699e.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @file writer_impl.cu
* @brief cuDF-IO ORC writer class implementation
*/
#include "writer_impl.hpp"
#include <io/utilities/column_utils.cuh>
#include <cudf/null_mask.hpp>
#include <cudf/strings/strings_column_view.hpp>
#include <cudf/utilities/span.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/device_buffer.hpp>
#include <rmm/device_uvector.hpp>
#include <algorithm>
#include <cstring>
#include <numeric>
#include <utility>
namespace cudf {
namespace io {
namespace detail {
namespace orc {
using namespace cudf::io::orc;
using namespace cudf::io;
struct row_group_index_info {
int32_t pos = -1; // Position
int32_t blk_pos = -1; // Block Position
int32_t comp_pos = -1; // Compressed Position
int32_t comp_size = -1; // Compressed size
};
namespace {
/**
* @brief Helper for pinned host memory
*/
template <typename T>
using pinned_buffer = std::unique_ptr<T, decltype(&hipHostFree)>;
/**
* @brief Function that translates GDF compression to ORC compression
*/
orc::CompressionKind to_orc_compression(compression_type compression)
{
switch (compression) {
case compression_type::AUTO:
case compression_type::SNAPPY: return orc::CompressionKind::SNAPPY;
case compression_type::NONE: return orc::CompressionKind::NONE;
default: CUDF_EXPECTS(false, "Unsupported compression type"); return orc::CompressionKind::NONE;
}
}
/**
* @brief Function that translates GDF dtype to ORC datatype
*/
constexpr orc::TypeKind to_orc_type(cudf::type_id id)
{
switch (id) {
case cudf::type_id::INT8: return TypeKind::BYTE;
case cudf::type_id::INT16: return TypeKind::SHORT;
case cudf::type_id::INT32: return TypeKind::INT;
case cudf::type_id::INT64: return TypeKind::LONG;
case cudf::type_id::FLOAT32: return TypeKind::FLOAT;
case cudf::type_id::FLOAT64: return TypeKind::DOUBLE;
case cudf::type_id::BOOL8: return TypeKind::BOOLEAN;
case cudf::type_id::TIMESTAMP_DAYS: return TypeKind::DATE;
case cudf::type_id::TIMESTAMP_SECONDS:
case cudf::type_id::TIMESTAMP_MICROSECONDS:
case cudf::type_id::TIMESTAMP_MILLISECONDS:
case cudf::type_id::TIMESTAMP_NANOSECONDS: return TypeKind::TIMESTAMP;
case cudf::type_id::STRING: return TypeKind::STRING;
default: return TypeKind::INVALID_TYPE_KIND;
}
}
/**
* @brief Function that translates time unit to nanoscale multiple
*/
template <typename T>
constexpr T to_clockscale(cudf::type_id timestamp_id)
{
switch (timestamp_id) {
case cudf::type_id::TIMESTAMP_SECONDS: return 9;
case cudf::type_id::TIMESTAMP_MILLISECONDS: return 6;
case cudf::type_id::TIMESTAMP_MICROSECONDS: return 3;
case cudf::type_id::TIMESTAMP_NANOSECONDS:
default: return 0;
}
}
} // namespace
/**
* @brief Helper class that adds ORC-specific column info
*/
class orc_column_view {
public:
/**
* @brief Constructor that extracts out the string position + length pairs
* for building dictionaries for string columns
*/
explicit orc_column_view(size_t id,
size_t str_id,
column_view const &col,
const table_metadata *metadata,
rmm::cuda_stream_view stream)
: _id(id),
_str_id(str_id),
_is_string_type(col.type().id() == type_id::STRING),
_type_width(_is_string_type ? 0 : cudf::size_of(col.type())),
_data_count(col.size()),
_null_count(col.null_count()),
_nulls(col.null_mask()),
_clockscale(to_clockscale<uint8_t>(col.type().id())),
_type_kind(to_orc_type(col.type().id()))
{
// Generating default name if name isn't present in metadata
if (metadata && _id < metadata->column_names.size()) {
_name = metadata->column_names[_id];
} else {
_name = "_col" + std::to_string(_id);
}
}
auto is_string() const noexcept { return _is_string_type; }
void set_dict_stride(size_t stride) noexcept { dict_stride = stride; }
auto get_dict_stride() const noexcept { return dict_stride; }
/**
* @brief Function that associates an existing dictionary chunk allocation
*/
void attach_dict_chunk(gpu::DictionaryChunk *host_dict, gpu::DictionaryChunk *dev_dict)
{
dict = host_dict;
d_dict = dev_dict;
}
auto host_dict_chunk(size_t rowgroup) const
{
assert(_is_string_type);
return &dict[rowgroup * dict_stride + _str_id];
}
auto device_dict_chunk() const { return d_dict; }
/**
* @brief Function that associates an existing stripe dictionary allocation
*/
void attach_stripe_dict(gpu::StripeDictionary *host_stripe_dict,
gpu::StripeDictionary *dev_stripe_dict)
{
stripe_dict = host_stripe_dict;
d_stripe_dict = dev_stripe_dict;
}
auto host_stripe_dict(size_t stripe) const
{
assert(_is_string_type);
return &stripe_dict[stripe * dict_stride + _str_id];
}
auto device_stripe_dict() const { return d_stripe_dict; }
auto id() const noexcept { return _id; }
size_t type_width() const noexcept { return _type_width; }
size_t data_count() const noexcept { return _data_count; }
size_t null_count() const noexcept { return _null_count; }
bool nullable() const noexcept { return (_nulls != nullptr); }
uint32_t const *nulls() const noexcept { return _nulls; }
uint8_t clockscale() const noexcept { return _clockscale; }
void set_orc_encoding(ColumnEncodingKind e) { _encoding_kind = e; }
auto orc_kind() const noexcept { return _type_kind; }
auto orc_encoding() const noexcept { return _encoding_kind; }
auto orc_name() const noexcept { return _name; }
private:
// Identifier within set of columns and string columns, respectively
size_t _id = 0;
size_t _str_id = 0;
bool _is_string_type = false;
size_t _type_width = 0;
size_t _data_count = 0;
size_t _null_count = 0;
uint32_t const *_nulls = nullptr;
uint8_t _clockscale = 0;
// ORC-related members
std::string _name{};
TypeKind _type_kind;
ColumnEncodingKind _encoding_kind;
// String dictionary-related members
size_t dict_stride = 0;
gpu::DictionaryChunk const *dict = nullptr;
gpu::StripeDictionary const *stripe_dict = nullptr;
gpu::DictionaryChunk *d_dict = nullptr;
gpu::StripeDictionary *d_stripe_dict = nullptr;
};
std::vector<stripe_rowgroups> writer::impl::gather_stripe_info(
host_span<orc_column_view const> columns, size_t num_rowgroups)
{
auto const is_any_column_string =
std::any_of(columns.begin(), columns.end(), [](auto const &col) { return col.is_string(); });
// Apply rows per stripe limit to limit string dictionaries
size_t const max_stripe_rows = is_any_column_string ? 1000000 : 5000000;
std::vector<stripe_rowgroups> infos;
for (size_t rowgroup = 0, stripe_start = 0, stripe_size = 0; rowgroup < num_rowgroups;
++rowgroup) {
auto const rowgroup_size =
std::accumulate(columns.begin(), columns.end(), 0ul, [&](size_t total_size, auto const &col) {
if (col.is_string()) {
const auto dt = col.host_dict_chunk(rowgroup);
return total_size + row_index_stride_ + dt->string_char_count;
} else {
return total_size + col.type_width() * row_index_stride_;
}
});
if ((rowgroup > stripe_start) &&
(stripe_size + rowgroup_size > max_stripe_size_ ||
(rowgroup + 1 - stripe_start) * row_index_stride_ > max_stripe_rows)) {
infos.emplace_back(infos.size(), stripe_start, rowgroup - stripe_start);
stripe_start = rowgroup;
stripe_size = 0;
}
stripe_size += rowgroup_size;
if (rowgroup + 1 == num_rowgroups) {
infos.emplace_back(infos.size(), stripe_start, num_rowgroups - stripe_start);
}
}
return infos;
}
void writer::impl::init_dictionaries(const table_device_view &view,
orc_column_view *columns,
std::vector<int> const &str_col_ids,
device_span<size_type> d_str_col_ids,
uint32_t *dict_data,
uint32_t *dict_index,
hostdevice_vector<gpu::DictionaryChunk> *dict)
{
const size_t num_rowgroups = dict->size() / str_col_ids.size();
// Setup per-rowgroup dictionary indexes for each dictionary-aware column
for (size_t i = 0; i < str_col_ids.size(); ++i) {
auto &str_column = columns[str_col_ids[i]];
str_column.set_dict_stride(str_col_ids.size());
str_column.attach_dict_chunk(dict->host_ptr(), dict->device_ptr());
}
gpu::InitDictionaryIndices(view,
dict->device_ptr(),
dict_data,
dict_index,
row_index_stride_,
d_str_col_ids.data(),
d_str_col_ids.size(),
num_rowgroups,
stream);
dict->device_to_host(stream, true);
}
void writer::impl::build_dictionaries(orc_column_view *columns,
std::vector<int> const &str_col_ids,
host_span<stripe_rowgroups const> stripe_bounds,
hostdevice_vector<gpu::DictionaryChunk> const &dict,
uint32_t *dict_index,
hostdevice_vector<gpu::StripeDictionary> &stripe_dict)
{
const auto num_rowgroups = dict.size() / str_col_ids.size();
for (size_t col_idx = 0; col_idx < str_col_ids.size(); ++col_idx) {
auto &str_column = columns[str_col_ids[col_idx]];
str_column.attach_stripe_dict(stripe_dict.host_ptr(), stripe_dict.device_ptr());
for (auto const &stripe : stripe_bounds) {
auto &sd = stripe_dict[stripe.id * str_col_ids.size() + col_idx];
sd.dict_data = str_column.host_dict_chunk(stripe.first)->dict_data;
sd.dict_index = dict_index + col_idx * str_column.data_count(); // Indexed by abs row
sd.column_id = str_col_ids[col_idx];
sd.start_chunk = stripe.first;
sd.num_chunks = stripe.size;
sd.dict_char_count = 0;
sd.num_strings =
std::accumulate(stripe.cbegin(), stripe.cend(), 0, [&](auto dt_str_cnt, auto rg_idx) {
const auto &dt = dict[rg_idx * str_col_ids.size() + col_idx];
return dt_str_cnt + dt.num_dict_strings;
});
sd.leaf_column = dict[col_idx].leaf_column;
}
if (enable_dictionary_) {
struct string_column_cost {
size_t direct = 0;
size_t dictionary = 0;
};
auto const col_cost =
std::accumulate(stripe_bounds.front().cbegin(),
stripe_bounds.back().cend(),
string_column_cost{},
[&](auto cost, auto rg_idx) -> string_column_cost {
const auto &dt = dict[rg_idx * str_col_ids.size() + col_idx];
return {cost.direct + dt.string_char_count,
cost.dictionary + dt.dict_char_count + dt.num_dict_strings};
});
// Disable dictionary if it does not reduce the output size
if (col_cost.dictionary >= col_cost.direct) {
for (auto const &stripe : stripe_bounds) {
stripe_dict[stripe.id * str_col_ids.size() + col_idx].dict_data = nullptr;
}
}
}
}
stripe_dict.host_to_device(stream);
gpu::BuildStripeDictionaries(stripe_dict.device_ptr(),
stripe_dict.host_ptr(),
dict.device_ptr(),
stripe_bounds.size(),
num_rowgroups,
str_col_ids.size(),
stream);
stripe_dict.device_to_host(stream, true);
}
orc_streams writer::impl::create_streams(host_span<orc_column_view> columns,
host_span<stripe_rowgroups const> stripe_bounds)
{
// First n + 1 streams are row index streams, including 'column 0'
std::vector<Stream> streams{{ROW_INDEX, 0, 0}}; // TODO: Separate index and data streams?
streams.resize(columns.size() + 1);
std::vector<int32_t> ids(columns.size() * gpu::CI_NUM_STREAMS, -1);
for (auto &column : columns) {
TypeKind kind = column.orc_kind();
StreamKind data_kind = DATA;
StreamKind data2_kind = LENGTH;
ColumnEncodingKind encoding_kind = DIRECT;
int64_t present_stream_size = 0;
int64_t data_stream_size = 0;
int64_t data2_stream_size = 0;
int64_t dict_stream_size = 0;
auto const is_nullable = [&]() {
if (single_write_mode) {
return column.nullable();
} else {
return (column.id() < user_metadata_with_nullability.column_nullable.size())
? user_metadata_with_nullability.column_nullable[column.id()]
: true;
}
}();
if (is_nullable) {
present_stream_size = ((row_index_stride_ + 7) >> 3);
present_stream_size += (present_stream_size + 0x7f) >> 7;
}
switch (kind) {
case TypeKind::BOOLEAN:
data_stream_size = div_rowgroups_by<int64_t>(1024) * (128 + 1);
encoding_kind = DIRECT;
break;
case TypeKind::BYTE:
data_stream_size = div_rowgroups_by<int64_t>(128) * (128 + 1);
encoding_kind = DIRECT;
break;
case TypeKind::SHORT:
data_stream_size = div_rowgroups_by<int64_t>(512) * (512 * 2 + 2);
encoding_kind = DIRECT_V2;
break;
case TypeKind::FLOAT:
// Pass through if no nulls (no RLE encoding for floating point)
data_stream_size =
(column.null_count() != 0) ? div_rowgroups_by<int64_t>(512) * (512 * 4 + 2) : INT64_C(-1);
encoding_kind = DIRECT;
break;
case TypeKind::INT:
case TypeKind::DATE:
data_stream_size = div_rowgroups_by<int64_t>(512) * (512 * 4 + 2);
encoding_kind = DIRECT_V2;
break;
case TypeKind::DOUBLE:
// Pass through if no nulls (no RLE encoding for floating point)
data_stream_size =
(column.null_count() != 0) ? div_rowgroups_by<int64_t>(512) * (512 * 8 + 2) : INT64_C(-1);
encoding_kind = DIRECT;
break;
case TypeKind::LONG:
data_stream_size = div_rowgroups_by<int64_t>(512) * (512 * 8 + 2);
encoding_kind = DIRECT_V2;
break;
case TypeKind::STRING: {
bool enable_dict = enable_dictionary_;
size_t dict_data_size = 0;
size_t dict_strings = 0;
size_t dict_lengths_div512 = 0;
for (auto const &stripe : stripe_bounds) {
const auto sd = column.host_stripe_dict(stripe.id);
enable_dict = (enable_dict && sd->dict_data != nullptr);
if (enable_dict) {
dict_strings += sd->num_strings;
dict_lengths_div512 += (sd->num_strings + 0x1ff) >> 9;
dict_data_size += sd->dict_char_count;
}
}
auto const direct_data_size =
std::accumulate(stripe_bounds.front().cbegin(),
stripe_bounds.back().cend(),
size_t{0},
[&](auto data_size, auto rg_idx) {
return data_size + column.host_dict_chunk(rg_idx)->string_char_count;
});
if (enable_dict) {
uint32_t dict_bits = 0;
for (dict_bits = 1; dict_bits < 32; dict_bits <<= 1) {
if (dict_strings <= (1ull << dict_bits)) break;
}
const auto valid_count = column.data_count() - column.null_count();
dict_data_size += (dict_bits * valid_count + 7) >> 3;
}
// Decide between direct or dictionary encoding
if (enable_dict && dict_data_size < direct_data_size) {
data_stream_size = div_rowgroups_by<int64_t>(512) * (512 * 4 + 2);
data2_stream_size = dict_lengths_div512 * (512 * 4 + 2);
dict_stream_size = std::max<size_t>(dict_data_size, 1);
encoding_kind = DICTIONARY_V2;
} else {
data_stream_size = std::max<size_t>(direct_data_size, 1);
data2_stream_size = div_rowgroups_by<int64_t>(512) * (512 * 4 + 2);
encoding_kind = DIRECT_V2;
}
break;
}
case TypeKind::TIMESTAMP:
data_stream_size = ((row_index_stride_ + 0x1ff) >> 9) * (512 * 4 + 2);
data2_stream_size = data_stream_size;
data2_kind = SECONDARY;
encoding_kind = DIRECT_V2;
break;
default: CUDF_FAIL("Unsupported ORC type kind");
}
// Initialize the column's metadata (this is the only reason columns is in/out param)
column.set_orc_encoding(encoding_kind);
// Initialize the column's index stream
const auto id = static_cast<uint32_t>(1 + column.id());
streams[id].column = id;
streams[id].kind = ROW_INDEX;
streams[id].length = 0;
// Initialize the column's data stream(s)
const auto base = column.id() * gpu::CI_NUM_STREAMS;
if (present_stream_size != 0) {
auto len = static_cast<uint64_t>(present_stream_size);
ids[base + gpu::CI_PRESENT] = streams.size();
streams.push_back(orc::Stream{PRESENT, id, len});
}
if (data_stream_size != 0) {
auto len = static_cast<uint64_t>(std::max<int64_t>(data_stream_size, 0));
ids[base + gpu::CI_DATA] = streams.size();
streams.push_back(orc::Stream{data_kind, id, len});
}
if (data2_stream_size != 0) {
auto len = static_cast<uint64_t>(std::max<int64_t>(data2_stream_size, 0));
ids[base + gpu::CI_DATA2] = streams.size();
streams.push_back(orc::Stream{data2_kind, id, len});
}
if (dict_stream_size != 0) {
auto len = static_cast<uint64_t>(dict_stream_size);
ids[base + gpu::CI_DICTIONARY] = streams.size();
streams.push_back(orc::Stream{DICTIONARY_DATA, id, len});
}
}
return {std::move(streams), std::move(ids)};
}
orc_streams::orc_stream_offsets orc_streams::compute_offsets(
host_span<orc_column_view const> columns, size_t num_rowgroups) const
{
std::vector<size_t> strm_offsets(streams.size());
size_t str_data_size = 0;
size_t rle_data_size = 0;
for (size_t i = 0; i < streams.size(); ++i) {
const auto &stream = streams[i];
const auto &column = columns[stream.column - 1];
if (((stream.kind == DICTIONARY_DATA || stream.kind == LENGTH) &&
(column.orc_encoding() == DICTIONARY_V2)) ||
((stream.kind == DATA) &&
(column.orc_kind() == TypeKind::STRING && column.orc_encoding() == DIRECT_V2))) {
strm_offsets[i] = str_data_size;
str_data_size += stream.length;
} else {
strm_offsets[i] = rle_data_size;
rle_data_size += (stream.length * num_rowgroups + 7) & ~7;
}
}
str_data_size = (str_data_size + 7) & ~7;
return {std::move(strm_offsets), str_data_size, rle_data_size};
}
struct segmented_valid_cnt_input {
bitmask_type const *mask;
std::vector<size_type> indices;
};
encoded_data writer::impl::encode_columns(const table_device_view &view,
host_span<orc_column_view const> columns,
std::vector<int> const &str_col_ids,
host_span<stripe_rowgroups const> stripe_bounds,
orc_streams const &streams)
{
auto const num_columns = columns.size();
auto const num_rowgroups = stripes_size(stripe_bounds);
hostdevice_2dvector<gpu::EncChunk> chunks(num_columns, num_rowgroups, stream);
hostdevice_2dvector<gpu::encoder_chunk_streams> chunk_streams(num_columns, num_rowgroups, stream);
auto const stream_offsets = streams.compute_offsets(columns, num_rowgroups);
rmm::device_uvector<uint8_t> encoded_data(stream_offsets.data_size(), stream);
// Initialize column chunks' descriptions
std::map<size_type, segmented_valid_cnt_input> validity_check_inputs;
for (auto const &column : columns) {
for (auto const &stripe : stripe_bounds) {
for (auto rg_idx_it = stripe.cbegin(); rg_idx_it < stripe.cend(); ++rg_idx_it) {
auto const rg_idx = *rg_idx_it;
auto &ck = chunks[column.id()][rg_idx];
ck.start_row = (rg_idx * row_index_stride_);
ck.num_rows = std::min<uint32_t>(row_index_stride_, column.data_count() - ck.start_row);
ck.encoding_kind = column.orc_encoding();
ck.type_kind = column.orc_kind();
if (ck.type_kind == TypeKind::STRING) {
ck.dict_index = (ck.encoding_kind == DICTIONARY_V2)
? column.host_stripe_dict(stripe.id)->dict_index
: nullptr;
ck.dtype_len = 1;
} else {
ck.dtype_len = column.type_width();
}
ck.scale = column.clockscale();
// Only need to check row groups that end within the stripe
}
}
}
auto validity_check_indices = [&](size_t col_idx) {
std::vector<size_type> indices;
for (auto const &stripe : stripe_bounds) {
for (auto rg_idx_it = stripe.cbegin(); rg_idx_it < stripe.cend() - 1; ++rg_idx_it) {
auto const &chunk = chunks[col_idx][*rg_idx_it];
indices.push_back(chunk.start_row);
indices.push_back(chunk.start_row + chunk.num_rows);
}
}
return indices;
};
for (auto const &column : columns) {
if (column.orc_kind() == TypeKind::BOOLEAN && column.nullable()) {
validity_check_inputs[column.id()] = {column.nulls(), validity_check_indices(column.id())};
}
}
for (auto &cnt_in : validity_check_inputs) {
auto const valid_counts = segmented_count_set_bits(cnt_in.second.mask, cnt_in.second.indices);
CUDF_EXPECTS(std::none_of(valid_counts.cbegin(),
valid_counts.cend(),
[](auto valid_count) { return valid_count % 8; }),
"There's currently a bug in encoding boolean columns. Suggested workaround "
"is to convert "
"to "
"int8 type. Please see https://github.com/rapidsai/cudf/issues/6763 for "
"more information.");
}
for (size_t col_idx = 0; col_idx < num_columns; col_idx++) {
auto const &column = columns[col_idx];
auto col_streams = chunk_streams[col_idx];
for (auto const &stripe : stripe_bounds) {
for (auto rg_idx_it = stripe.cbegin(); rg_idx_it < stripe.cend(); ++rg_idx_it) {
auto const rg_idx = *rg_idx_it;
auto const &ck = chunks[col_idx][rg_idx];
auto &strm = col_streams[rg_idx];
for (int strm_type = 0; strm_type < gpu::CI_NUM_STREAMS; ++strm_type) {
auto const strm_id = streams.id(col_idx * gpu::CI_NUM_STREAMS + strm_type);
strm.ids[strm_type] = strm_id;
if (strm_id >= 0) {
if ((strm_type == gpu::CI_DICTIONARY) ||
(strm_type == gpu::CI_DATA2 && ck.encoding_kind == DICTIONARY_V2)) {
if (rg_idx_it == stripe.cbegin()) {
const int32_t dict_stride = column.get_dict_stride();
const auto stripe_dict = column.host_stripe_dict(stripe.id);
strm.lengths[strm_type] =
(strm_type == gpu::CI_DICTIONARY)
? stripe_dict->dict_char_count
: (((stripe_dict->num_strings + 0x1ff) >> 9) * (512 * 4 + 2));
if (stripe.id == 0) {
strm.data_ptrs[strm_type] = encoded_data.data() + stream_offsets.offsets[strm_id];
} else {
auto const &strm_up = col_streams[stripe_dict[-dict_stride].start_chunk];
strm.data_ptrs[strm_type] =
strm_up.data_ptrs[strm_type] + strm_up.lengths[strm_type];
}
} else {
strm.lengths[strm_type] = 0;
strm.data_ptrs[strm_type] = col_streams[rg_idx - 1].data_ptrs[strm_type];
}
} else if (strm_type == gpu::CI_DATA && ck.type_kind == TypeKind::STRING &&
ck.encoding_kind == DIRECT_V2) {
strm.lengths[strm_type] = column.host_dict_chunk(rg_idx)->string_char_count;
auto const &prev_strm = col_streams[rg_idx - 1];
strm.data_ptrs[strm_type] =
(rg_idx == 0) ? encoded_data.data() + stream_offsets.offsets[strm_id]
: (prev_strm.data_ptrs[strm_type] + prev_strm.lengths[strm_type]);
} else if (strm_type == gpu::CI_DATA && streams[strm_id].length == 0 &&
(ck.type_kind == DOUBLE || ck.type_kind == FLOAT)) {
// Pass-through
strm.lengths[strm_type] = ck.num_rows * ck.dtype_len;
strm.data_ptrs[strm_type] = nullptr;
} else {
strm.lengths[strm_type] = streams[strm_id].length;
strm.data_ptrs[strm_type] = encoded_data.data() + stream_offsets.str_data_size +
stream_offsets.offsets[strm_id] +
streams[strm_id].length * rg_idx;
}
} else {
strm.lengths[strm_type] = 0;
strm.data_ptrs[strm_type] = nullptr;
}
}
}
}
}
chunks.host_to_device(stream);
chunk_streams.host_to_device(stream);
gpu::set_chunk_columns(view, chunks, stream);
if (!str_col_ids.empty()) {
auto d_stripe_dict = columns[str_col_ids[0]].device_stripe_dict();
gpu::EncodeStripeDictionaries(
d_stripe_dict, chunks, str_col_ids.size(), stripe_bounds.size(), chunk_streams, stream);
}
gpu::EncodeOrcColumnData(chunks, chunk_streams, stream);
stream.synchronize();
return {std::move(encoded_data), std::move(chunk_streams)};
}
std::vector<StripeInformation> writer::impl::gather_stripes(
size_t num_rows,
size_t num_index_streams,
host_span<stripe_rowgroups const> stripe_bounds,
hostdevice_2dvector<gpu::encoder_chunk_streams> *enc_streams,
hostdevice_2dvector<gpu::StripeStream> *strm_desc)
{
std::vector<StripeInformation> stripes(stripe_bounds.size());
for (auto const &stripe : stripe_bounds) {
for (size_t col_idx = 0; col_idx < enc_streams->size().first; col_idx++) {
const auto &strm = (*enc_streams)[col_idx][stripe.first];
// Assign stream data of column data stream(s)
for (int k = 0; k < gpu::CI_INDEX; k++) {
const auto stream_id = strm.ids[k];
if (stream_id != -1) {
auto *ss = &(*strm_desc)[stripe.id][stream_id - num_index_streams];
ss->stream_size = 0;
ss->first_chunk_id = stripe.first;
ss->num_chunks = stripe.size;
ss->column_id = col_idx;
ss->stream_type = k;
}
}
}
auto const stripe_group_end = *stripe.cend();
auto const stripe_end = ::min(stripe_group_end * row_index_stride_, num_rows);
stripes[stripe.id].numberOfRows = stripe_end - stripe.first * row_index_stride_;
}
strm_desc->host_to_device(stream);
gpu::CompactOrcDataStreams(*strm_desc, *enc_streams, stream);
strm_desc->device_to_host(stream);
enc_streams->device_to_host(stream, true);
return stripes;
}
std::vector<std::vector<uint8_t>> writer::impl::gather_statistic_blobs(
const table_device_view &table,
host_span<orc_column_view const> columns,
host_span<stripe_rowgroups const> stripe_bounds)
{
auto const num_rowgroups = stripes_size(stripe_bounds);
size_t num_stat_blobs = (1 + stripe_bounds.size()) * columns.size();
size_t num_chunks = num_rowgroups * columns.size();
std::vector<std::vector<uint8_t>> stat_blobs(num_stat_blobs);
hostdevice_vector<stats_column_desc> stat_desc(columns.size(), stream);
hostdevice_vector<statistics_merge_group> stat_merge(num_stat_blobs, stream);
rmm::device_uvector<statistics_chunk> stat_chunks(num_chunks + num_stat_blobs, stream);
rmm::device_uvector<statistics_group> stat_groups(num_chunks, stream);
for (auto const &column : columns) {
stats_column_desc *desc = &stat_desc[column.id()];
switch (column.orc_kind()) {
case TypeKind::BYTE: desc->stats_dtype = dtype_int8; break;
case TypeKind::SHORT: desc->stats_dtype = dtype_int16; break;
case TypeKind::INT: desc->stats_dtype = dtype_int32; break;
case TypeKind::LONG: desc->stats_dtype = dtype_int64; break;
case TypeKind::FLOAT: desc->stats_dtype = dtype_float32; break;
case TypeKind::DOUBLE: desc->stats_dtype = dtype_float64; break;
case TypeKind::BOOLEAN: desc->stats_dtype = dtype_bool; break;
case TypeKind::DATE: desc->stats_dtype = dtype_int32; break;
case TypeKind::TIMESTAMP: desc->stats_dtype = dtype_timestamp64; break;
case TypeKind::STRING: desc->stats_dtype = dtype_string; break;
default: desc->stats_dtype = dtype_none; break;
}
desc->num_rows = column.data_count();
desc->num_values = column.data_count();
if (desc->stats_dtype == dtype_timestamp64) {
// Timestamp statistics are in milliseconds
switch (column.clockscale()) {
case 9: desc->ts_scale = 1000; break;
case 6: desc->ts_scale = 0; break;
case 3: desc->ts_scale = -1000; break;
case 0: desc->ts_scale = -1000000; break;
default: desc->ts_scale = 0; break;
}
} else {
desc->ts_scale = 0;
}
for (auto const &stripe : stripe_bounds) {
auto grp = &stat_merge[column.id() * stripe_bounds.size() + stripe.id];
grp->col = stat_desc.device_ptr(column.id());
grp->start_chunk = static_cast<uint32_t>(column.id() * num_rowgroups + stripe.first);
grp->num_chunks = stripe.size;
}
statistics_merge_group *col_stats =
&stat_merge[stripe_bounds.size() * columns.size() + column.id()];
col_stats->col = stat_desc.device_ptr(column.id());
col_stats->start_chunk = static_cast<uint32_t>(column.id() * stripe_bounds.size());
col_stats->num_chunks = static_cast<uint32_t>(stripe_bounds.size());
}
stat_desc.host_to_device(stream);
stat_merge.host_to_device(stream);
rmm::device_uvector<column_device_view> leaf_column_views =
create_leaf_column_device_views<stats_column_desc>(stat_desc, table, stream);
gpu::orc_init_statistics_groups(stat_groups.data(),
stat_desc.device_ptr(),
columns.size(),
num_rowgroups,
row_index_stride_,
stream);
GatherColumnStatistics(stat_chunks.data(), stat_groups.data(), num_chunks, stream);
MergeColumnStatistics(stat_chunks.data() + num_chunks,
stat_chunks.data(),
stat_merge.device_ptr(),
stripe_bounds.size() * columns.size(),
stream);
MergeColumnStatistics(stat_chunks.data() + num_chunks + stripe_bounds.size() * columns.size(),
stat_chunks.data() + num_chunks,
stat_merge.device_ptr(stripe_bounds.size() * columns.size()),
columns.size(),
stream);
gpu::orc_init_statistics_buffersize(
stat_merge.device_ptr(), stat_chunks.data() + num_chunks, num_stat_blobs, stream);
stat_merge.device_to_host(stream, true);
hostdevice_vector<uint8_t> blobs(
stat_merge[num_stat_blobs - 1].start_chunk + stat_merge[num_stat_blobs - 1].num_chunks, stream);
gpu::orc_encode_statistics(blobs.device_ptr(),
stat_merge.device_ptr(),
stat_chunks.data() + num_chunks,
num_stat_blobs,
stream);
stat_merge.device_to_host(stream);
blobs.device_to_host(stream, true);
for (size_t i = 0; i < num_stat_blobs; i++) {
const uint8_t *stat_begin = blobs.host_ptr(stat_merge[i].start_chunk);
const uint8_t *stat_end = stat_begin + stat_merge[i].num_chunks;
stat_blobs[i].assign(stat_begin, stat_end);
}
return stat_blobs;
}
void writer::impl::write_index_stream(int32_t stripe_id,
int32_t stream_id,
host_span<orc_column_view const> columns,
stripe_rowgroups const &rowgroups_range,
host_2dspan<gpu::encoder_chunk_streams const> enc_streams,
host_2dspan<gpu::StripeStream const> strm_desc,
host_span<gpu_inflate_status_s const> comp_out,
StripeInformation *stripe,
orc_streams *streams,
ProtobufWriter *pbw)
{
row_group_index_info present;
row_group_index_info data;
row_group_index_info data2;
auto kind = TypeKind::STRUCT;
auto const column_id = stream_id - 1;
auto find_record = [=, &strm_desc](gpu::encoder_chunk_streams const &stream,
gpu::StreamIndexType type) {
row_group_index_info record;
if (stream.ids[type] > 0) {
record.pos = 0;
if (compression_kind_ != NONE) {
auto const &ss = strm_desc[stripe_id][stream.ids[type] - (columns.size() + 1)];
record.blk_pos = ss.first_block;
record.comp_pos = 0;
record.comp_size = ss.stream_size;
}
}
return record;
};
auto scan_record = [=, &comp_out](gpu::encoder_chunk_streams const &stream,
gpu::StreamIndexType type,
row_group_index_info &record) {
if (record.pos >= 0) {
record.pos += stream.lengths[type];
while ((record.pos >= 0) && (record.blk_pos >= 0) &&
(static_cast<size_t>(record.pos) >= compression_blocksize_) &&
(record.comp_pos + 3 + comp_out[record.blk_pos].bytes_written <
static_cast<size_t>(record.comp_size))) {
record.pos -= compression_blocksize_;
record.comp_pos += 3 + comp_out[record.blk_pos].bytes_written;
record.blk_pos += 1;
}
}
};
// TBD: Not sure we need an empty index stream for column 0
if (stream_id != 0) {
const auto &strm = enc_streams[column_id][0];
present = find_record(strm, gpu::CI_PRESENT);
data = find_record(strm, gpu::CI_DATA);
data2 = find_record(strm, gpu::CI_DATA2);
// Change string dictionary to int from index point of view
kind = columns[column_id].orc_kind();
if (kind == TypeKind::STRING && columns[column_id].orc_encoding() == DICTIONARY_V2) {
kind = TypeKind::INT;
}
}
buffer_.resize((compression_kind_ != NONE) ? 3 : 0);
// Add row index entries
std::for_each(rowgroups_range.cbegin(), rowgroups_range.cend(), [&](auto rowgroup) {
pbw->put_row_index_entry(
present.comp_pos, present.pos, data.comp_pos, data.pos, data2.comp_pos, data2.pos, kind);
if (stream_id != 0) {
const auto &strm = enc_streams[column_id][rowgroup];
scan_record(strm, gpu::CI_PRESENT, present);
scan_record(strm, gpu::CI_DATA, data);
scan_record(strm, gpu::CI_DATA2, data2);
}
});
(*streams)[stream_id].length = buffer_.size();
if (compression_kind_ != NONE) {
uint32_t uncomp_ix_len = (uint32_t)((*streams)[stream_id].length - 3) * 2 + 1;
buffer_[0] = static_cast<uint8_t>(uncomp_ix_len >> 0);
buffer_[1] = static_cast<uint8_t>(uncomp_ix_len >> 8);
buffer_[2] = static_cast<uint8_t>(uncomp_ix_len >> 16);
}
out_sink_->host_write(buffer_.data(), buffer_.size());
stripe->indexLength += buffer_.size();
}
void writer::impl::write_data_stream(gpu::StripeStream const &strm_desc,
gpu::encoder_chunk_streams const &enc_stream,
uint8_t const *compressed_data,
uint8_t *stream_out,
StripeInformation *stripe,
orc_streams *streams)
{
const auto length = strm_desc.stream_size;
(*streams)[enc_stream.ids[strm_desc.stream_type]].length = length;
if (length != 0) {
const auto *stream_in = (compression_kind_ == NONE)
? enc_stream.data_ptrs[strm_desc.stream_type]
: (compressed_data + strm_desc.bfr_offset);
CUDA_TRY(
hipMemcpyAsync(stream_out, stream_in, length, hipMemcpyDeviceToHost, stream.value()));
stream.synchronize();
out_sink_->host_write(stream_out, length);
}
stripe->dataLength += length;
}
void writer::impl::add_uncompressed_block_headers(std::vector<uint8_t> &v)
{
if (compression_kind_ != NONE) {
size_t uncomp_len = v.size() - 3, pos = 0, block_len;
while (uncomp_len > compression_blocksize_) {
block_len = compression_blocksize_ * 2 + 1;
v[pos + 0] = static_cast<uint8_t>(block_len >> 0);
v[pos + 1] = static_cast<uint8_t>(block_len >> 8);
v[pos + 2] = static_cast<uint8_t>(block_len >> 16);
pos += 3 + compression_blocksize_;
v.insert(v.begin() + pos, 3, 0);
uncomp_len -= compression_blocksize_;
}
block_len = uncomp_len * 2 + 1;
v[pos + 0] = static_cast<uint8_t>(block_len >> 0);
v[pos + 1] = static_cast<uint8_t>(block_len >> 8);
v[pos + 2] = static_cast<uint8_t>(block_len >> 16);
}
}
writer::impl::impl(std::unique_ptr<data_sink> sink,
orc_writer_options const &options,
SingleWriteMode mode,
rmm::mr::device_memory_resource *mr,
rmm::cuda_stream_view stream)
: compression_kind_(to_orc_compression(options.get_compression())),
enable_statistics_(options.enable_statistics()),
out_sink_(std::move(sink)),
single_write_mode(mode == SingleWriteMode::YES),
user_metadata(options.get_metadata()),
stream(stream),
_mr(mr)
{
init_state();
}
writer::impl::impl(std::unique_ptr<data_sink> sink,
chunked_orc_writer_options const &options,
SingleWriteMode mode,
rmm::mr::device_memory_resource *mr,
rmm::cuda_stream_view stream)
: compression_kind_(to_orc_compression(options.get_compression())),
enable_statistics_(options.enable_statistics()),
out_sink_(std::move(sink)),
single_write_mode(mode == SingleWriteMode::YES),
stream(stream),
_mr(mr)
{
if (options.get_metadata() != nullptr) {
user_metadata_with_nullability = *options.get_metadata();
user_metadata = &user_metadata_with_nullability;
}
init_state();
}
writer::impl::~impl() { close(); }
void writer::impl::init_state()
{
// Write file header
out_sink_->host_write(MAGIC, std::strlen(MAGIC));
}
rmm::device_uvector<size_type> get_string_column_ids(const table_device_view &view,
rmm::cuda_stream_view stream)
{
rmm::device_uvector<size_type> string_column_ids(view.num_columns(), stream);
auto iter = thrust::make_counting_iterator<size_type>(0);
auto end_iter = thrust::copy_if(rmm::exec_policy(stream),
iter,
iter + view.num_columns(),
string_column_ids.begin(),
[view] __device__(size_type index) {
return (view.column(index).type().id() == type_id::STRING);
});
string_column_ids.resize(end_iter - string_column_ids.begin(), stream);
return string_column_ids;
}
void writer::impl::write(table_view const &table)
{
CUDF_EXPECTS(not closed, "Data has already been flushed to out and closed");
auto const num_columns = table.num_columns();
auto const num_rows = table.num_rows();
if (user_metadata_with_nullability.column_nullable.size() > 0) {
CUDF_EXPECTS(
user_metadata_with_nullability.column_nullable.size() == static_cast<size_t>(num_columns),
"When passing values in user_metadata_with_nullability, data for all columns must "
"be specified");
}
auto device_columns = table_device_view::create(table, stream);
auto string_column_ids = get_string_column_ids(*device_columns, stream);
// Wrapper around cudf columns to attach ORC-specific type info
std::vector<orc_column_view> orc_columns;
orc_columns.reserve(num_columns);
// Mapping of string columns for quick look-up
std::vector<int> str_col_ids;
for (auto const &column : table) {
auto const current_id = orc_columns.size();
auto const current_str_id = str_col_ids.size();
orc_columns.emplace_back(current_id, current_str_id, column, user_metadata, stream);
if (orc_columns.back().is_string()) { str_col_ids.push_back(current_id); }
}
rmm::device_uvector<uint32_t> dict_index(str_col_ids.size() * num_rows, stream);
rmm::device_uvector<uint32_t> dict_data(str_col_ids.size() * num_rows, stream);
// Build per-column dictionary indices
const auto num_rowgroups = div_by_rowgroups<size_t>(num_rows);
const auto num_dict_chunks = num_rowgroups * str_col_ids.size();
hostdevice_vector<gpu::DictionaryChunk> dict(num_dict_chunks, stream);
if (!str_col_ids.empty()) {
init_dictionaries(*device_columns,
orc_columns.data(),
str_col_ids,
string_column_ids,
dict_data.data(),
dict_index.data(),
&dict);
}
// Decide stripe boundaries early on, based on uncompressed size
auto const stripe_bounds = gather_stripe_info(orc_columns, num_rowgroups);
// Build stripe-level dictionaries
const auto num_stripe_dict = stripe_bounds.size() * str_col_ids.size();
hostdevice_vector<gpu::StripeDictionary> stripe_dict(num_stripe_dict, stream);
if (!str_col_ids.empty()) {
build_dictionaries(
orc_columns.data(), str_col_ids, stripe_bounds, dict, dict_index.data(), stripe_dict);
}
auto streams = create_streams(orc_columns, stripe_bounds);
auto enc_data = encode_columns(*device_columns, orc_columns, str_col_ids, stripe_bounds, streams);
// Assemble individual disparate column chunks into contiguous data streams
const auto num_index_streams = (num_columns + 1);
const auto num_data_streams = streams.size() - num_index_streams;
hostdevice_2dvector<gpu::StripeStream> strm_descs(stripe_bounds.size(), num_data_streams, stream);
auto stripes =
gather_stripes(num_rows, num_index_streams, stripe_bounds, &enc_data.streams, &strm_descs);
// Gather column statistics
std::vector<std::vector<uint8_t>> column_stats;
if (enable_statistics_ && num_columns > 0 && num_rows > 0) {
column_stats = gather_statistic_blobs(*device_columns, orc_columns, stripe_bounds);
}
// Allocate intermediate output stream buffer
size_t compressed_bfr_size = 0;
size_t num_compressed_blocks = 0;
auto stream_output = [&]() {
size_t max_stream_size = 0;
for (size_t stripe_id = 0; stripe_id < stripe_bounds.size(); stripe_id++) {
for (size_t i = 0; i < num_data_streams; i++) { // TODO range for (at least)
gpu::StripeStream *ss = &strm_descs[stripe_id][i];
size_t stream_size = ss->stream_size;
if (compression_kind_ != NONE) {
ss->first_block = num_compressed_blocks;
ss->bfr_offset = compressed_bfr_size;
auto num_blocks = std::max<uint32_t>(
(stream_size + compression_blocksize_ - 1) / compression_blocksize_, 1);
stream_size += num_blocks * 3;
num_compressed_blocks += num_blocks;
compressed_bfr_size += stream_size;
}
max_stream_size = ::max(max_stream_size, stream_size);
}
}
return pinned_buffer<uint8_t>{[](size_t size) {
uint8_t *ptr = nullptr;
CUDA_TRY(hipHostMalloc(&ptr, size));
return ptr;
}(max_stream_size),
hipHostFree};
}();
// Compress the data streams
rmm::device_buffer compressed_data(compressed_bfr_size, stream);
hostdevice_vector<gpu_inflate_status_s> comp_out(num_compressed_blocks, stream);
hostdevice_vector<gpu_inflate_input_s> comp_in(num_compressed_blocks, stream);
if (compression_kind_ != NONE) {
strm_descs.host_to_device(stream);
gpu::CompressOrcDataStreams(static_cast<uint8_t *>(compressed_data.data()),
num_compressed_blocks,
compression_kind_,
compression_blocksize_,
strm_descs,
enc_data.streams,
comp_in.device_ptr(),
comp_out.device_ptr(),
stream);
strm_descs.device_to_host(stream);
comp_out.device_to_host(stream, true);
}
ProtobufWriter pbw_(&buffer_);
// Write stripes
for (size_t stripe_id = 0; stripe_id < stripes.size(); ++stripe_id) {
auto const &rowgroup_range = stripe_bounds[stripe_id];
auto &stripe = stripes[stripe_id];
stripe.offset = out_sink_->bytes_written();
// Column (skippable) index streams appear at the start of the stripe
for (size_type stream_id = 0; stream_id <= num_columns; ++stream_id) {
write_index_stream(stripe_id,
stream_id,
orc_columns,
rowgroup_range,
enc_data.streams,
strm_descs,
comp_out,
&stripe,
&streams,
&pbw_);
}
// Column data consisting one or more separate streams
for (auto const &strm_desc : strm_descs[stripe_id]) {
write_data_stream(strm_desc,
enc_data.streams[strm_desc.column_id][rowgroup_range.first],
static_cast<uint8_t *>(compressed_data.data()),
stream_output.get(),
&stripe,
&streams);
}
// Write stripefooter consisting of stream information
StripeFooter sf;
sf.streams = streams;
sf.columns.resize(num_columns + 1);
sf.columns[0].kind = DIRECT;
sf.columns[0].dictionarySize = 0;
for (size_t i = 1; i < sf.columns.size(); ++i) {
sf.columns[i].kind = orc_columns[i - 1].orc_encoding();
sf.columns[i].dictionarySize = (sf.columns[i].kind == DICTIONARY_V2)
? orc_columns[i - 1].host_stripe_dict(stripe_id)->num_strings
: 0;
if (orc_columns[i - 1].orc_kind() == TIMESTAMP) { sf.writerTimezone = "UTC"; }
}
buffer_.resize((compression_kind_ != NONE) ? 3 : 0);
pbw_.write(sf);
stripe.footerLength = buffer_.size();
if (compression_kind_ != NONE) {
uint32_t uncomp_sf_len = (stripe.footerLength - 3) * 2 + 1;
buffer_[0] = static_cast<uint8_t>(uncomp_sf_len >> 0);
buffer_[1] = static_cast<uint8_t>(uncomp_sf_len >> 8);
buffer_[2] = static_cast<uint8_t>(uncomp_sf_len >> 16);
}
out_sink_->host_write(buffer_.data(), buffer_.size());
}
if (column_stats.size() != 0) {
// File-level statistics
// NOTE: Excluded from chunked write mode to avoid the need for merging stats across calls
if (single_write_mode) {
ff.statistics.resize(1 + num_columns);
// First entry contains total number of rows
buffer_.resize(0);
pbw_.putb(1 * 8 + PB_TYPE_VARINT);
pbw_.put_uint(num_rows);
ff.statistics[0] = std::move(buffer_);
for (int col_idx = 0; col_idx < num_columns; col_idx++) {
size_t idx = stripes.size() * num_columns + col_idx;
if (idx < column_stats.size()) {
ff.statistics[1 + col_idx] = std::move(column_stats[idx]);
}
}
}
// Stripe-level statistics
size_t first_stripe = md.stripeStats.size();
md.stripeStats.resize(first_stripe + stripes.size());
for (size_t stripe_id = 0; stripe_id < stripes.size(); stripe_id++) {
md.stripeStats[first_stripe + stripe_id].colStats.resize(1 + num_columns);
buffer_.resize(0);
pbw_.putb(1 * 8 + PB_TYPE_VARINT);
pbw_.put_uint(stripes[stripe_id].numberOfRows);
md.stripeStats[first_stripe + stripe_id].colStats[0] = std::move(buffer_);
for (int col_idx = 0; col_idx < num_columns; col_idx++) {
size_t idx = stripes.size() * col_idx + stripe_id;
if (idx < column_stats.size()) {
md.stripeStats[first_stripe + stripe_id].colStats[1 + col_idx] =
std::move(column_stats[idx]);
}
}
}
}
if (ff.headerLength == 0) {
// First call
ff.headerLength = std::strlen(MAGIC);
ff.rowIndexStride = row_index_stride_;
ff.types.resize(1 + num_columns);
ff.types[0].kind = STRUCT;
ff.types[0].subtypes.resize(num_columns);
ff.types[0].fieldNames.resize(num_columns);
for (auto const &column : orc_columns) {
ff.types[1 + column.id()].kind = column.orc_kind();
ff.types[0].subtypes[column.id()] = 1 + column.id();
ff.types[0].fieldNames[column.id()] = column.orc_name();
}
} else {
// verify the user isn't passing mismatched tables
CUDF_EXPECTS(ff.types.size() == 1 + orc_columns.size(),
"Mismatch in table structure between multiple calls to write");
CUDF_EXPECTS(
std::all_of(orc_columns.cbegin(),
orc_columns.cend(),
[&](auto const &col) { return ff.types[1 + col.id()].kind == col.orc_kind(); }),
"Mismatch in column types between multiple calls to write");
}
ff.stripes.insert(ff.stripes.end(),
std::make_move_iterator(stripes.begin()),
std::make_move_iterator(stripes.end()));
ff.numberOfRows += num_rows;
}
void writer::impl::close()
{
if (closed) { return; }
closed = true;
ProtobufWriter pbw_(&buffer_);
PostScript ps;
ff.contentLength = out_sink_->bytes_written();
if (user_metadata) {
for (auto it = user_metadata->user_data.begin(); it != user_metadata->user_data.end(); it++) {
ff.metadata.push_back({it->first, it->second});
}
}
// Write statistics metadata
if (md.stripeStats.size() != 0) {
buffer_.resize((compression_kind_ != NONE) ? 3 : 0);
pbw_.write(md);
add_uncompressed_block_headers(buffer_);
ps.metadataLength = buffer_.size();
out_sink_->host_write(buffer_.data(), buffer_.size());
} else {
ps.metadataLength = 0;
}
buffer_.resize((compression_kind_ != NONE) ? 3 : 0);
pbw_.write(ff);
add_uncompressed_block_headers(buffer_);
// Write postscript metadata
ps.footerLength = buffer_.size();
ps.compression = compression_kind_;
ps.compressionBlockSize = compression_blocksize_;
ps.version = {0, 12};
ps.magic = MAGIC;
const auto ps_length = static_cast<uint8_t>(pbw_.write(ps));
buffer_.push_back(ps_length);
out_sink_->host_write(buffer_.data(), buffer_.size());
out_sink_->flush();
}
// Forward to implementation
writer::writer(std::unique_ptr<data_sink> sink,
orc_writer_options const &options,
SingleWriteMode mode,
rmm::mr::device_memory_resource *mr,
rmm::cuda_stream_view stream)
: _impl(std::make_unique<impl>(std::move(sink), options, mode, mr, stream))
{
}
// Forward to implementation
writer::writer(std::unique_ptr<data_sink> sink,
chunked_orc_writer_options const &options,
SingleWriteMode mode,
rmm::mr::device_memory_resource *mr,
rmm::cuda_stream_view stream)
: _impl(std::make_unique<impl>(std::move(sink), options, mode, mr, stream))
{
}
// Destructor within this translation unit
writer::~writer() = default;
// Forward to implementation
void writer::write(table_view const &table) { _impl->write(table); }
// Forward to implementation
void writer::close() { _impl->close(); }
} // namespace orc
} // namespace detail
} // namespace io
} // namespace cudf
| 2366b762600c1d101e2fdf86eb38983c71da699e.cu | /*
* Copyright (c) 2019-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @file writer_impl.cu
* @brief cuDF-IO ORC writer class implementation
*/
#include "writer_impl.hpp"
#include <io/utilities/column_utils.cuh>
#include <cudf/null_mask.hpp>
#include <cudf/strings/strings_column_view.hpp>
#include <cudf/utilities/span.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/device_buffer.hpp>
#include <rmm/device_uvector.hpp>
#include <algorithm>
#include <cstring>
#include <numeric>
#include <utility>
namespace cudf {
namespace io {
namespace detail {
namespace orc {
using namespace cudf::io::orc;
using namespace cudf::io;
struct row_group_index_info {
int32_t pos = -1; // Position
int32_t blk_pos = -1; // Block Position
int32_t comp_pos = -1; // Compressed Position
int32_t comp_size = -1; // Compressed size
};
namespace {
/**
* @brief Helper for pinned host memory
*/
template <typename T>
using pinned_buffer = std::unique_ptr<T, decltype(&cudaFreeHost)>;
/**
* @brief Function that translates GDF compression to ORC compression
*/
orc::CompressionKind to_orc_compression(compression_type compression)
{
switch (compression) {
case compression_type::AUTO:
case compression_type::SNAPPY: return orc::CompressionKind::SNAPPY;
case compression_type::NONE: return orc::CompressionKind::NONE;
default: CUDF_EXPECTS(false, "Unsupported compression type"); return orc::CompressionKind::NONE;
}
}
/**
* @brief Function that translates GDF dtype to ORC datatype
*/
constexpr orc::TypeKind to_orc_type(cudf::type_id id)
{
switch (id) {
case cudf::type_id::INT8: return TypeKind::BYTE;
case cudf::type_id::INT16: return TypeKind::SHORT;
case cudf::type_id::INT32: return TypeKind::INT;
case cudf::type_id::INT64: return TypeKind::LONG;
case cudf::type_id::FLOAT32: return TypeKind::FLOAT;
case cudf::type_id::FLOAT64: return TypeKind::DOUBLE;
case cudf::type_id::BOOL8: return TypeKind::BOOLEAN;
case cudf::type_id::TIMESTAMP_DAYS: return TypeKind::DATE;
case cudf::type_id::TIMESTAMP_SECONDS:
case cudf::type_id::TIMESTAMP_MICROSECONDS:
case cudf::type_id::TIMESTAMP_MILLISECONDS:
case cudf::type_id::TIMESTAMP_NANOSECONDS: return TypeKind::TIMESTAMP;
case cudf::type_id::STRING: return TypeKind::STRING;
default: return TypeKind::INVALID_TYPE_KIND;
}
}
/**
* @brief Function that translates time unit to nanoscale multiple
*/
template <typename T>
constexpr T to_clockscale(cudf::type_id timestamp_id)
{
switch (timestamp_id) {
case cudf::type_id::TIMESTAMP_SECONDS: return 9;
case cudf::type_id::TIMESTAMP_MILLISECONDS: return 6;
case cudf::type_id::TIMESTAMP_MICROSECONDS: return 3;
case cudf::type_id::TIMESTAMP_NANOSECONDS:
default: return 0;
}
}
} // namespace
/**
* @brief Helper class that adds ORC-specific column info
*/
class orc_column_view {
public:
/**
* @brief Constructor that extracts out the string position + length pairs
* for building dictionaries for string columns
*/
explicit orc_column_view(size_t id,
size_t str_id,
column_view const &col,
const table_metadata *metadata,
rmm::cuda_stream_view stream)
: _id(id),
_str_id(str_id),
_is_string_type(col.type().id() == type_id::STRING),
_type_width(_is_string_type ? 0 : cudf::size_of(col.type())),
_data_count(col.size()),
_null_count(col.null_count()),
_nulls(col.null_mask()),
_clockscale(to_clockscale<uint8_t>(col.type().id())),
_type_kind(to_orc_type(col.type().id()))
{
// Generating default name if name isn't present in metadata
if (metadata && _id < metadata->column_names.size()) {
_name = metadata->column_names[_id];
} else {
_name = "_col" + std::to_string(_id);
}
}
auto is_string() const noexcept { return _is_string_type; }
void set_dict_stride(size_t stride) noexcept { dict_stride = stride; }
auto get_dict_stride() const noexcept { return dict_stride; }
/**
* @brief Function that associates an existing dictionary chunk allocation
*/
void attach_dict_chunk(gpu::DictionaryChunk *host_dict, gpu::DictionaryChunk *dev_dict)
{
dict = host_dict;
d_dict = dev_dict;
}
auto host_dict_chunk(size_t rowgroup) const
{
assert(_is_string_type);
return &dict[rowgroup * dict_stride + _str_id];
}
auto device_dict_chunk() const { return d_dict; }
/**
* @brief Function that associates an existing stripe dictionary allocation
*/
void attach_stripe_dict(gpu::StripeDictionary *host_stripe_dict,
gpu::StripeDictionary *dev_stripe_dict)
{
stripe_dict = host_stripe_dict;
d_stripe_dict = dev_stripe_dict;
}
auto host_stripe_dict(size_t stripe) const
{
assert(_is_string_type);
return &stripe_dict[stripe * dict_stride + _str_id];
}
auto device_stripe_dict() const { return d_stripe_dict; }
auto id() const noexcept { return _id; }
size_t type_width() const noexcept { return _type_width; }
size_t data_count() const noexcept { return _data_count; }
size_t null_count() const noexcept { return _null_count; }
bool nullable() const noexcept { return (_nulls != nullptr); }
uint32_t const *nulls() const noexcept { return _nulls; }
uint8_t clockscale() const noexcept { return _clockscale; }
void set_orc_encoding(ColumnEncodingKind e) { _encoding_kind = e; }
auto orc_kind() const noexcept { return _type_kind; }
auto orc_encoding() const noexcept { return _encoding_kind; }
auto orc_name() const noexcept { return _name; }
private:
// Identifier within set of columns and string columns, respectively
size_t _id = 0;
size_t _str_id = 0;
bool _is_string_type = false;
size_t _type_width = 0;
size_t _data_count = 0;
size_t _null_count = 0;
uint32_t const *_nulls = nullptr;
uint8_t _clockscale = 0;
// ORC-related members
std::string _name{};
TypeKind _type_kind;
ColumnEncodingKind _encoding_kind;
// String dictionary-related members
size_t dict_stride = 0;
gpu::DictionaryChunk const *dict = nullptr;
gpu::StripeDictionary const *stripe_dict = nullptr;
gpu::DictionaryChunk *d_dict = nullptr;
gpu::StripeDictionary *d_stripe_dict = nullptr;
};
std::vector<stripe_rowgroups> writer::impl::gather_stripe_info(
host_span<orc_column_view const> columns, size_t num_rowgroups)
{
auto const is_any_column_string =
std::any_of(columns.begin(), columns.end(), [](auto const &col) { return col.is_string(); });
// Apply rows per stripe limit to limit string dictionaries
size_t const max_stripe_rows = is_any_column_string ? 1000000 : 5000000;
std::vector<stripe_rowgroups> infos;
for (size_t rowgroup = 0, stripe_start = 0, stripe_size = 0; rowgroup < num_rowgroups;
++rowgroup) {
auto const rowgroup_size =
std::accumulate(columns.begin(), columns.end(), 0ul, [&](size_t total_size, auto const &col) {
if (col.is_string()) {
const auto dt = col.host_dict_chunk(rowgroup);
return total_size + row_index_stride_ + dt->string_char_count;
} else {
return total_size + col.type_width() * row_index_stride_;
}
});
if ((rowgroup > stripe_start) &&
(stripe_size + rowgroup_size > max_stripe_size_ ||
(rowgroup + 1 - stripe_start) * row_index_stride_ > max_stripe_rows)) {
infos.emplace_back(infos.size(), stripe_start, rowgroup - stripe_start);
stripe_start = rowgroup;
stripe_size = 0;
}
stripe_size += rowgroup_size;
if (rowgroup + 1 == num_rowgroups) {
infos.emplace_back(infos.size(), stripe_start, num_rowgroups - stripe_start);
}
}
return infos;
}
void writer::impl::init_dictionaries(const table_device_view &view,
orc_column_view *columns,
std::vector<int> const &str_col_ids,
device_span<size_type> d_str_col_ids,
uint32_t *dict_data,
uint32_t *dict_index,
hostdevice_vector<gpu::DictionaryChunk> *dict)
{
const size_t num_rowgroups = dict->size() / str_col_ids.size();
// Setup per-rowgroup dictionary indexes for each dictionary-aware column
for (size_t i = 0; i < str_col_ids.size(); ++i) {
auto &str_column = columns[str_col_ids[i]];
str_column.set_dict_stride(str_col_ids.size());
str_column.attach_dict_chunk(dict->host_ptr(), dict->device_ptr());
}
gpu::InitDictionaryIndices(view,
dict->device_ptr(),
dict_data,
dict_index,
row_index_stride_,
d_str_col_ids.data(),
d_str_col_ids.size(),
num_rowgroups,
stream);
dict->device_to_host(stream, true);
}
void writer::impl::build_dictionaries(orc_column_view *columns,
std::vector<int> const &str_col_ids,
host_span<stripe_rowgroups const> stripe_bounds,
hostdevice_vector<gpu::DictionaryChunk> const &dict,
uint32_t *dict_index,
hostdevice_vector<gpu::StripeDictionary> &stripe_dict)
{
const auto num_rowgroups = dict.size() / str_col_ids.size();
for (size_t col_idx = 0; col_idx < str_col_ids.size(); ++col_idx) {
auto &str_column = columns[str_col_ids[col_idx]];
str_column.attach_stripe_dict(stripe_dict.host_ptr(), stripe_dict.device_ptr());
for (auto const &stripe : stripe_bounds) {
auto &sd = stripe_dict[stripe.id * str_col_ids.size() + col_idx];
sd.dict_data = str_column.host_dict_chunk(stripe.first)->dict_data;
sd.dict_index = dict_index + col_idx * str_column.data_count(); // Indexed by abs row
sd.column_id = str_col_ids[col_idx];
sd.start_chunk = stripe.first;
sd.num_chunks = stripe.size;
sd.dict_char_count = 0;
sd.num_strings =
std::accumulate(stripe.cbegin(), stripe.cend(), 0, [&](auto dt_str_cnt, auto rg_idx) {
const auto &dt = dict[rg_idx * str_col_ids.size() + col_idx];
return dt_str_cnt + dt.num_dict_strings;
});
sd.leaf_column = dict[col_idx].leaf_column;
}
if (enable_dictionary_) {
struct string_column_cost {
size_t direct = 0;
size_t dictionary = 0;
};
auto const col_cost =
std::accumulate(stripe_bounds.front().cbegin(),
stripe_bounds.back().cend(),
string_column_cost{},
[&](auto cost, auto rg_idx) -> string_column_cost {
const auto &dt = dict[rg_idx * str_col_ids.size() + col_idx];
return {cost.direct + dt.string_char_count,
cost.dictionary + dt.dict_char_count + dt.num_dict_strings};
});
// Disable dictionary if it does not reduce the output size
if (col_cost.dictionary >= col_cost.direct) {
for (auto const &stripe : stripe_bounds) {
stripe_dict[stripe.id * str_col_ids.size() + col_idx].dict_data = nullptr;
}
}
}
}
stripe_dict.host_to_device(stream);
gpu::BuildStripeDictionaries(stripe_dict.device_ptr(),
stripe_dict.host_ptr(),
dict.device_ptr(),
stripe_bounds.size(),
num_rowgroups,
str_col_ids.size(),
stream);
stripe_dict.device_to_host(stream, true);
}
orc_streams writer::impl::create_streams(host_span<orc_column_view> columns,
host_span<stripe_rowgroups const> stripe_bounds)
{
// First n + 1 streams are row index streams, including 'column 0'
std::vector<Stream> streams{{ROW_INDEX, 0, 0}}; // TODO: Separate index and data streams?
streams.resize(columns.size() + 1);
std::vector<int32_t> ids(columns.size() * gpu::CI_NUM_STREAMS, -1);
for (auto &column : columns) {
TypeKind kind = column.orc_kind();
StreamKind data_kind = DATA;
StreamKind data2_kind = LENGTH;
ColumnEncodingKind encoding_kind = DIRECT;
int64_t present_stream_size = 0;
int64_t data_stream_size = 0;
int64_t data2_stream_size = 0;
int64_t dict_stream_size = 0;
auto const is_nullable = [&]() {
if (single_write_mode) {
return column.nullable();
} else {
return (column.id() < user_metadata_with_nullability.column_nullable.size())
? user_metadata_with_nullability.column_nullable[column.id()]
: true;
}
}();
if (is_nullable) {
present_stream_size = ((row_index_stride_ + 7) >> 3);
present_stream_size += (present_stream_size + 0x7f) >> 7;
}
switch (kind) {
case TypeKind::BOOLEAN:
data_stream_size = div_rowgroups_by<int64_t>(1024) * (128 + 1);
encoding_kind = DIRECT;
break;
case TypeKind::BYTE:
data_stream_size = div_rowgroups_by<int64_t>(128) * (128 + 1);
encoding_kind = DIRECT;
break;
case TypeKind::SHORT:
data_stream_size = div_rowgroups_by<int64_t>(512) * (512 * 2 + 2);
encoding_kind = DIRECT_V2;
break;
case TypeKind::FLOAT:
// Pass through if no nulls (no RLE encoding for floating point)
data_stream_size =
(column.null_count() != 0) ? div_rowgroups_by<int64_t>(512) * (512 * 4 + 2) : INT64_C(-1);
encoding_kind = DIRECT;
break;
case TypeKind::INT:
case TypeKind::DATE:
data_stream_size = div_rowgroups_by<int64_t>(512) * (512 * 4 + 2);
encoding_kind = DIRECT_V2;
break;
case TypeKind::DOUBLE:
// Pass through if no nulls (no RLE encoding for floating point)
data_stream_size =
(column.null_count() != 0) ? div_rowgroups_by<int64_t>(512) * (512 * 8 + 2) : INT64_C(-1);
encoding_kind = DIRECT;
break;
case TypeKind::LONG:
data_stream_size = div_rowgroups_by<int64_t>(512) * (512 * 8 + 2);
encoding_kind = DIRECT_V2;
break;
case TypeKind::STRING: {
bool enable_dict = enable_dictionary_;
size_t dict_data_size = 0;
size_t dict_strings = 0;
size_t dict_lengths_div512 = 0;
for (auto const &stripe : stripe_bounds) {
const auto sd = column.host_stripe_dict(stripe.id);
enable_dict = (enable_dict && sd->dict_data != nullptr);
if (enable_dict) {
dict_strings += sd->num_strings;
dict_lengths_div512 += (sd->num_strings + 0x1ff) >> 9;
dict_data_size += sd->dict_char_count;
}
}
auto const direct_data_size =
std::accumulate(stripe_bounds.front().cbegin(),
stripe_bounds.back().cend(),
size_t{0},
[&](auto data_size, auto rg_idx) {
return data_size + column.host_dict_chunk(rg_idx)->string_char_count;
});
if (enable_dict) {
uint32_t dict_bits = 0;
for (dict_bits = 1; dict_bits < 32; dict_bits <<= 1) {
if (dict_strings <= (1ull << dict_bits)) break;
}
const auto valid_count = column.data_count() - column.null_count();
dict_data_size += (dict_bits * valid_count + 7) >> 3;
}
// Decide between direct or dictionary encoding
if (enable_dict && dict_data_size < direct_data_size) {
data_stream_size = div_rowgroups_by<int64_t>(512) * (512 * 4 + 2);
data2_stream_size = dict_lengths_div512 * (512 * 4 + 2);
dict_stream_size = std::max<size_t>(dict_data_size, 1);
encoding_kind = DICTIONARY_V2;
} else {
data_stream_size = std::max<size_t>(direct_data_size, 1);
data2_stream_size = div_rowgroups_by<int64_t>(512) * (512 * 4 + 2);
encoding_kind = DIRECT_V2;
}
break;
}
case TypeKind::TIMESTAMP:
data_stream_size = ((row_index_stride_ + 0x1ff) >> 9) * (512 * 4 + 2);
data2_stream_size = data_stream_size;
data2_kind = SECONDARY;
encoding_kind = DIRECT_V2;
break;
default: CUDF_FAIL("Unsupported ORC type kind");
}
// Initialize the column's metadata (this is the only reason columns is in/out param)
column.set_orc_encoding(encoding_kind);
// Initialize the column's index stream
const auto id = static_cast<uint32_t>(1 + column.id());
streams[id].column = id;
streams[id].kind = ROW_INDEX;
streams[id].length = 0;
// Initialize the column's data stream(s)
const auto base = column.id() * gpu::CI_NUM_STREAMS;
if (present_stream_size != 0) {
auto len = static_cast<uint64_t>(present_stream_size);
ids[base + gpu::CI_PRESENT] = streams.size();
streams.push_back(orc::Stream{PRESENT, id, len});
}
if (data_stream_size != 0) {
auto len = static_cast<uint64_t>(std::max<int64_t>(data_stream_size, 0));
ids[base + gpu::CI_DATA] = streams.size();
streams.push_back(orc::Stream{data_kind, id, len});
}
if (data2_stream_size != 0) {
auto len = static_cast<uint64_t>(std::max<int64_t>(data2_stream_size, 0));
ids[base + gpu::CI_DATA2] = streams.size();
streams.push_back(orc::Stream{data2_kind, id, len});
}
if (dict_stream_size != 0) {
auto len = static_cast<uint64_t>(dict_stream_size);
ids[base + gpu::CI_DICTIONARY] = streams.size();
streams.push_back(orc::Stream{DICTIONARY_DATA, id, len});
}
}
return {std::move(streams), std::move(ids)};
}
orc_streams::orc_stream_offsets orc_streams::compute_offsets(
host_span<orc_column_view const> columns, size_t num_rowgroups) const
{
std::vector<size_t> strm_offsets(streams.size());
size_t str_data_size = 0;
size_t rle_data_size = 0;
for (size_t i = 0; i < streams.size(); ++i) {
const auto &stream = streams[i];
const auto &column = columns[stream.column - 1];
if (((stream.kind == DICTIONARY_DATA || stream.kind == LENGTH) &&
(column.orc_encoding() == DICTIONARY_V2)) ||
((stream.kind == DATA) &&
(column.orc_kind() == TypeKind::STRING && column.orc_encoding() == DIRECT_V2))) {
strm_offsets[i] = str_data_size;
str_data_size += stream.length;
} else {
strm_offsets[i] = rle_data_size;
rle_data_size += (stream.length * num_rowgroups + 7) & ~7;
}
}
str_data_size = (str_data_size + 7) & ~7;
return {std::move(strm_offsets), str_data_size, rle_data_size};
}
struct segmented_valid_cnt_input {
bitmask_type const *mask;
std::vector<size_type> indices;
};
encoded_data writer::impl::encode_columns(const table_device_view &view,
host_span<orc_column_view const> columns,
std::vector<int> const &str_col_ids,
host_span<stripe_rowgroups const> stripe_bounds,
orc_streams const &streams)
{
auto const num_columns = columns.size();
auto const num_rowgroups = stripes_size(stripe_bounds);
hostdevice_2dvector<gpu::EncChunk> chunks(num_columns, num_rowgroups, stream);
hostdevice_2dvector<gpu::encoder_chunk_streams> chunk_streams(num_columns, num_rowgroups, stream);
auto const stream_offsets = streams.compute_offsets(columns, num_rowgroups);
rmm::device_uvector<uint8_t> encoded_data(stream_offsets.data_size(), stream);
// Initialize column chunks' descriptions
std::map<size_type, segmented_valid_cnt_input> validity_check_inputs;
for (auto const &column : columns) {
for (auto const &stripe : stripe_bounds) {
for (auto rg_idx_it = stripe.cbegin(); rg_idx_it < stripe.cend(); ++rg_idx_it) {
auto const rg_idx = *rg_idx_it;
auto &ck = chunks[column.id()][rg_idx];
ck.start_row = (rg_idx * row_index_stride_);
ck.num_rows = std::min<uint32_t>(row_index_stride_, column.data_count() - ck.start_row);
ck.encoding_kind = column.orc_encoding();
ck.type_kind = column.orc_kind();
if (ck.type_kind == TypeKind::STRING) {
ck.dict_index = (ck.encoding_kind == DICTIONARY_V2)
? column.host_stripe_dict(stripe.id)->dict_index
: nullptr;
ck.dtype_len = 1;
} else {
ck.dtype_len = column.type_width();
}
ck.scale = column.clockscale();
// Only need to check row groups that end within the stripe
}
}
}
auto validity_check_indices = [&](size_t col_idx) {
std::vector<size_type> indices;
for (auto const &stripe : stripe_bounds) {
for (auto rg_idx_it = stripe.cbegin(); rg_idx_it < stripe.cend() - 1; ++rg_idx_it) {
auto const &chunk = chunks[col_idx][*rg_idx_it];
indices.push_back(chunk.start_row);
indices.push_back(chunk.start_row + chunk.num_rows);
}
}
return indices;
};
for (auto const &column : columns) {
if (column.orc_kind() == TypeKind::BOOLEAN && column.nullable()) {
validity_check_inputs[column.id()] = {column.nulls(), validity_check_indices(column.id())};
}
}
for (auto &cnt_in : validity_check_inputs) {
auto const valid_counts = segmented_count_set_bits(cnt_in.second.mask, cnt_in.second.indices);
CUDF_EXPECTS(std::none_of(valid_counts.cbegin(),
valid_counts.cend(),
[](auto valid_count) { return valid_count % 8; }),
"There's currently a bug in encoding boolean columns. Suggested workaround "
"is to convert "
"to "
"int8 type. Please see https://github.com/rapidsai/cudf/issues/6763 for "
"more information.");
}
for (size_t col_idx = 0; col_idx < num_columns; col_idx++) {
auto const &column = columns[col_idx];
auto col_streams = chunk_streams[col_idx];
for (auto const &stripe : stripe_bounds) {
for (auto rg_idx_it = stripe.cbegin(); rg_idx_it < stripe.cend(); ++rg_idx_it) {
auto const rg_idx = *rg_idx_it;
auto const &ck = chunks[col_idx][rg_idx];
auto &strm = col_streams[rg_idx];
for (int strm_type = 0; strm_type < gpu::CI_NUM_STREAMS; ++strm_type) {
auto const strm_id = streams.id(col_idx * gpu::CI_NUM_STREAMS + strm_type);
strm.ids[strm_type] = strm_id;
if (strm_id >= 0) {
if ((strm_type == gpu::CI_DICTIONARY) ||
(strm_type == gpu::CI_DATA2 && ck.encoding_kind == DICTIONARY_V2)) {
if (rg_idx_it == stripe.cbegin()) {
const int32_t dict_stride = column.get_dict_stride();
const auto stripe_dict = column.host_stripe_dict(stripe.id);
strm.lengths[strm_type] =
(strm_type == gpu::CI_DICTIONARY)
? stripe_dict->dict_char_count
: (((stripe_dict->num_strings + 0x1ff) >> 9) * (512 * 4 + 2));
if (stripe.id == 0) {
strm.data_ptrs[strm_type] = encoded_data.data() + stream_offsets.offsets[strm_id];
} else {
auto const &strm_up = col_streams[stripe_dict[-dict_stride].start_chunk];
strm.data_ptrs[strm_type] =
strm_up.data_ptrs[strm_type] + strm_up.lengths[strm_type];
}
} else {
strm.lengths[strm_type] = 0;
strm.data_ptrs[strm_type] = col_streams[rg_idx - 1].data_ptrs[strm_type];
}
} else if (strm_type == gpu::CI_DATA && ck.type_kind == TypeKind::STRING &&
ck.encoding_kind == DIRECT_V2) {
strm.lengths[strm_type] = column.host_dict_chunk(rg_idx)->string_char_count;
auto const &prev_strm = col_streams[rg_idx - 1];
strm.data_ptrs[strm_type] =
(rg_idx == 0) ? encoded_data.data() + stream_offsets.offsets[strm_id]
: (prev_strm.data_ptrs[strm_type] + prev_strm.lengths[strm_type]);
} else if (strm_type == gpu::CI_DATA && streams[strm_id].length == 0 &&
(ck.type_kind == DOUBLE || ck.type_kind == FLOAT)) {
// Pass-through
strm.lengths[strm_type] = ck.num_rows * ck.dtype_len;
strm.data_ptrs[strm_type] = nullptr;
} else {
strm.lengths[strm_type] = streams[strm_id].length;
strm.data_ptrs[strm_type] = encoded_data.data() + stream_offsets.str_data_size +
stream_offsets.offsets[strm_id] +
streams[strm_id].length * rg_idx;
}
} else {
strm.lengths[strm_type] = 0;
strm.data_ptrs[strm_type] = nullptr;
}
}
}
}
}
chunks.host_to_device(stream);
chunk_streams.host_to_device(stream);
gpu::set_chunk_columns(view, chunks, stream);
if (!str_col_ids.empty()) {
auto d_stripe_dict = columns[str_col_ids[0]].device_stripe_dict();
gpu::EncodeStripeDictionaries(
d_stripe_dict, chunks, str_col_ids.size(), stripe_bounds.size(), chunk_streams, stream);
}
gpu::EncodeOrcColumnData(chunks, chunk_streams, stream);
stream.synchronize();
return {std::move(encoded_data), std::move(chunk_streams)};
}
std::vector<StripeInformation> writer::impl::gather_stripes(
size_t num_rows,
size_t num_index_streams,
host_span<stripe_rowgroups const> stripe_bounds,
hostdevice_2dvector<gpu::encoder_chunk_streams> *enc_streams,
hostdevice_2dvector<gpu::StripeStream> *strm_desc)
{
std::vector<StripeInformation> stripes(stripe_bounds.size());
for (auto const &stripe : stripe_bounds) {
for (size_t col_idx = 0; col_idx < enc_streams->size().first; col_idx++) {
const auto &strm = (*enc_streams)[col_idx][stripe.first];
// Assign stream data of column data stream(s)
for (int k = 0; k < gpu::CI_INDEX; k++) {
const auto stream_id = strm.ids[k];
if (stream_id != -1) {
auto *ss = &(*strm_desc)[stripe.id][stream_id - num_index_streams];
ss->stream_size = 0;
ss->first_chunk_id = stripe.first;
ss->num_chunks = stripe.size;
ss->column_id = col_idx;
ss->stream_type = k;
}
}
}
auto const stripe_group_end = *stripe.cend();
auto const stripe_end = std::min(stripe_group_end * row_index_stride_, num_rows);
stripes[stripe.id].numberOfRows = stripe_end - stripe.first * row_index_stride_;
}
strm_desc->host_to_device(stream);
gpu::CompactOrcDataStreams(*strm_desc, *enc_streams, stream);
strm_desc->device_to_host(stream);
enc_streams->device_to_host(stream, true);
return stripes;
}
std::vector<std::vector<uint8_t>> writer::impl::gather_statistic_blobs(
const table_device_view &table,
host_span<orc_column_view const> columns,
host_span<stripe_rowgroups const> stripe_bounds)
{
auto const num_rowgroups = stripes_size(stripe_bounds);
size_t num_stat_blobs = (1 + stripe_bounds.size()) * columns.size();
size_t num_chunks = num_rowgroups * columns.size();
std::vector<std::vector<uint8_t>> stat_blobs(num_stat_blobs);
hostdevice_vector<stats_column_desc> stat_desc(columns.size(), stream);
hostdevice_vector<statistics_merge_group> stat_merge(num_stat_blobs, stream);
rmm::device_uvector<statistics_chunk> stat_chunks(num_chunks + num_stat_blobs, stream);
rmm::device_uvector<statistics_group> stat_groups(num_chunks, stream);
for (auto const &column : columns) {
stats_column_desc *desc = &stat_desc[column.id()];
switch (column.orc_kind()) {
case TypeKind::BYTE: desc->stats_dtype = dtype_int8; break;
case TypeKind::SHORT: desc->stats_dtype = dtype_int16; break;
case TypeKind::INT: desc->stats_dtype = dtype_int32; break;
case TypeKind::LONG: desc->stats_dtype = dtype_int64; break;
case TypeKind::FLOAT: desc->stats_dtype = dtype_float32; break;
case TypeKind::DOUBLE: desc->stats_dtype = dtype_float64; break;
case TypeKind::BOOLEAN: desc->stats_dtype = dtype_bool; break;
case TypeKind::DATE: desc->stats_dtype = dtype_int32; break;
case TypeKind::TIMESTAMP: desc->stats_dtype = dtype_timestamp64; break;
case TypeKind::STRING: desc->stats_dtype = dtype_string; break;
default: desc->stats_dtype = dtype_none; break;
}
desc->num_rows = column.data_count();
desc->num_values = column.data_count();
if (desc->stats_dtype == dtype_timestamp64) {
// Timestamp statistics are in milliseconds
switch (column.clockscale()) {
case 9: desc->ts_scale = 1000; break;
case 6: desc->ts_scale = 0; break;
case 3: desc->ts_scale = -1000; break;
case 0: desc->ts_scale = -1000000; break;
default: desc->ts_scale = 0; break;
}
} else {
desc->ts_scale = 0;
}
for (auto const &stripe : stripe_bounds) {
auto grp = &stat_merge[column.id() * stripe_bounds.size() + stripe.id];
grp->col = stat_desc.device_ptr(column.id());
grp->start_chunk = static_cast<uint32_t>(column.id() * num_rowgroups + stripe.first);
grp->num_chunks = stripe.size;
}
statistics_merge_group *col_stats =
&stat_merge[stripe_bounds.size() * columns.size() + column.id()];
col_stats->col = stat_desc.device_ptr(column.id());
col_stats->start_chunk = static_cast<uint32_t>(column.id() * stripe_bounds.size());
col_stats->num_chunks = static_cast<uint32_t>(stripe_bounds.size());
}
stat_desc.host_to_device(stream);
stat_merge.host_to_device(stream);
rmm::device_uvector<column_device_view> leaf_column_views =
create_leaf_column_device_views<stats_column_desc>(stat_desc, table, stream);
gpu::orc_init_statistics_groups(stat_groups.data(),
stat_desc.device_ptr(),
columns.size(),
num_rowgroups,
row_index_stride_,
stream);
GatherColumnStatistics(stat_chunks.data(), stat_groups.data(), num_chunks, stream);
MergeColumnStatistics(stat_chunks.data() + num_chunks,
stat_chunks.data(),
stat_merge.device_ptr(),
stripe_bounds.size() * columns.size(),
stream);
MergeColumnStatistics(stat_chunks.data() + num_chunks + stripe_bounds.size() * columns.size(),
stat_chunks.data() + num_chunks,
stat_merge.device_ptr(stripe_bounds.size() * columns.size()),
columns.size(),
stream);
gpu::orc_init_statistics_buffersize(
stat_merge.device_ptr(), stat_chunks.data() + num_chunks, num_stat_blobs, stream);
stat_merge.device_to_host(stream, true);
hostdevice_vector<uint8_t> blobs(
stat_merge[num_stat_blobs - 1].start_chunk + stat_merge[num_stat_blobs - 1].num_chunks, stream);
gpu::orc_encode_statistics(blobs.device_ptr(),
stat_merge.device_ptr(),
stat_chunks.data() + num_chunks,
num_stat_blobs,
stream);
stat_merge.device_to_host(stream);
blobs.device_to_host(stream, true);
for (size_t i = 0; i < num_stat_blobs; i++) {
const uint8_t *stat_begin = blobs.host_ptr(stat_merge[i].start_chunk);
const uint8_t *stat_end = stat_begin + stat_merge[i].num_chunks;
stat_blobs[i].assign(stat_begin, stat_end);
}
return stat_blobs;
}
void writer::impl::write_index_stream(int32_t stripe_id,
int32_t stream_id,
host_span<orc_column_view const> columns,
stripe_rowgroups const &rowgroups_range,
host_2dspan<gpu::encoder_chunk_streams const> enc_streams,
host_2dspan<gpu::StripeStream const> strm_desc,
host_span<gpu_inflate_status_s const> comp_out,
StripeInformation *stripe,
orc_streams *streams,
ProtobufWriter *pbw)
{
row_group_index_info present;
row_group_index_info data;
row_group_index_info data2;
auto kind = TypeKind::STRUCT;
auto const column_id = stream_id - 1;
auto find_record = [=, &strm_desc](gpu::encoder_chunk_streams const &stream,
gpu::StreamIndexType type) {
row_group_index_info record;
if (stream.ids[type] > 0) {
record.pos = 0;
if (compression_kind_ != NONE) {
auto const &ss = strm_desc[stripe_id][stream.ids[type] - (columns.size() + 1)];
record.blk_pos = ss.first_block;
record.comp_pos = 0;
record.comp_size = ss.stream_size;
}
}
return record;
};
auto scan_record = [=, &comp_out](gpu::encoder_chunk_streams const &stream,
gpu::StreamIndexType type,
row_group_index_info &record) {
if (record.pos >= 0) {
record.pos += stream.lengths[type];
while ((record.pos >= 0) && (record.blk_pos >= 0) &&
(static_cast<size_t>(record.pos) >= compression_blocksize_) &&
(record.comp_pos + 3 + comp_out[record.blk_pos].bytes_written <
static_cast<size_t>(record.comp_size))) {
record.pos -= compression_blocksize_;
record.comp_pos += 3 + comp_out[record.blk_pos].bytes_written;
record.blk_pos += 1;
}
}
};
// TBD: Not sure we need an empty index stream for column 0
if (stream_id != 0) {
const auto &strm = enc_streams[column_id][0];
present = find_record(strm, gpu::CI_PRESENT);
data = find_record(strm, gpu::CI_DATA);
data2 = find_record(strm, gpu::CI_DATA2);
// Change string dictionary to int from index point of view
kind = columns[column_id].orc_kind();
if (kind == TypeKind::STRING && columns[column_id].orc_encoding() == DICTIONARY_V2) {
kind = TypeKind::INT;
}
}
buffer_.resize((compression_kind_ != NONE) ? 3 : 0);
// Add row index entries
std::for_each(rowgroups_range.cbegin(), rowgroups_range.cend(), [&](auto rowgroup) {
pbw->put_row_index_entry(
present.comp_pos, present.pos, data.comp_pos, data.pos, data2.comp_pos, data2.pos, kind);
if (stream_id != 0) {
const auto &strm = enc_streams[column_id][rowgroup];
scan_record(strm, gpu::CI_PRESENT, present);
scan_record(strm, gpu::CI_DATA, data);
scan_record(strm, gpu::CI_DATA2, data2);
}
});
(*streams)[stream_id].length = buffer_.size();
if (compression_kind_ != NONE) {
uint32_t uncomp_ix_len = (uint32_t)((*streams)[stream_id].length - 3) * 2 + 1;
buffer_[0] = static_cast<uint8_t>(uncomp_ix_len >> 0);
buffer_[1] = static_cast<uint8_t>(uncomp_ix_len >> 8);
buffer_[2] = static_cast<uint8_t>(uncomp_ix_len >> 16);
}
out_sink_->host_write(buffer_.data(), buffer_.size());
stripe->indexLength += buffer_.size();
}
void writer::impl::write_data_stream(gpu::StripeStream const &strm_desc,
gpu::encoder_chunk_streams const &enc_stream,
uint8_t const *compressed_data,
uint8_t *stream_out,
StripeInformation *stripe,
orc_streams *streams)
{
const auto length = strm_desc.stream_size;
(*streams)[enc_stream.ids[strm_desc.stream_type]].length = length;
if (length != 0) {
const auto *stream_in = (compression_kind_ == NONE)
? enc_stream.data_ptrs[strm_desc.stream_type]
: (compressed_data + strm_desc.bfr_offset);
CUDA_TRY(
cudaMemcpyAsync(stream_out, stream_in, length, cudaMemcpyDeviceToHost, stream.value()));
stream.synchronize();
out_sink_->host_write(stream_out, length);
}
stripe->dataLength += length;
}
void writer::impl::add_uncompressed_block_headers(std::vector<uint8_t> &v)
{
if (compression_kind_ != NONE) {
size_t uncomp_len = v.size() - 3, pos = 0, block_len;
while (uncomp_len > compression_blocksize_) {
block_len = compression_blocksize_ * 2 + 1;
v[pos + 0] = static_cast<uint8_t>(block_len >> 0);
v[pos + 1] = static_cast<uint8_t>(block_len >> 8);
v[pos + 2] = static_cast<uint8_t>(block_len >> 16);
pos += 3 + compression_blocksize_;
v.insert(v.begin() + pos, 3, 0);
uncomp_len -= compression_blocksize_;
}
block_len = uncomp_len * 2 + 1;
v[pos + 0] = static_cast<uint8_t>(block_len >> 0);
v[pos + 1] = static_cast<uint8_t>(block_len >> 8);
v[pos + 2] = static_cast<uint8_t>(block_len >> 16);
}
}
writer::impl::impl(std::unique_ptr<data_sink> sink,
orc_writer_options const &options,
SingleWriteMode mode,
rmm::mr::device_memory_resource *mr,
rmm::cuda_stream_view stream)
: compression_kind_(to_orc_compression(options.get_compression())),
enable_statistics_(options.enable_statistics()),
out_sink_(std::move(sink)),
single_write_mode(mode == SingleWriteMode::YES),
user_metadata(options.get_metadata()),
stream(stream),
_mr(mr)
{
init_state();
}
writer::impl::impl(std::unique_ptr<data_sink> sink,
chunked_orc_writer_options const &options,
SingleWriteMode mode,
rmm::mr::device_memory_resource *mr,
rmm::cuda_stream_view stream)
: compression_kind_(to_orc_compression(options.get_compression())),
enable_statistics_(options.enable_statistics()),
out_sink_(std::move(sink)),
single_write_mode(mode == SingleWriteMode::YES),
stream(stream),
_mr(mr)
{
if (options.get_metadata() != nullptr) {
user_metadata_with_nullability = *options.get_metadata();
user_metadata = &user_metadata_with_nullability;
}
init_state();
}
writer::impl::~impl() { close(); }
void writer::impl::init_state()
{
// Write file header
out_sink_->host_write(MAGIC, std::strlen(MAGIC));
}
rmm::device_uvector<size_type> get_string_column_ids(const table_device_view &view,
rmm::cuda_stream_view stream)
{
rmm::device_uvector<size_type> string_column_ids(view.num_columns(), stream);
auto iter = thrust::make_counting_iterator<size_type>(0);
auto end_iter = thrust::copy_if(rmm::exec_policy(stream),
iter,
iter + view.num_columns(),
string_column_ids.begin(),
[view] __device__(size_type index) {
return (view.column(index).type().id() == type_id::STRING);
});
string_column_ids.resize(end_iter - string_column_ids.begin(), stream);
return string_column_ids;
}
void writer::impl::write(table_view const &table)
{
CUDF_EXPECTS(not closed, "Data has already been flushed to out and closed");
auto const num_columns = table.num_columns();
auto const num_rows = table.num_rows();
if (user_metadata_with_nullability.column_nullable.size() > 0) {
CUDF_EXPECTS(
user_metadata_with_nullability.column_nullable.size() == static_cast<size_t>(num_columns),
"When passing values in user_metadata_with_nullability, data for all columns must "
"be specified");
}
auto device_columns = table_device_view::create(table, stream);
auto string_column_ids = get_string_column_ids(*device_columns, stream);
// Wrapper around cudf columns to attach ORC-specific type info
std::vector<orc_column_view> orc_columns;
orc_columns.reserve(num_columns);
// Mapping of string columns for quick look-up
std::vector<int> str_col_ids;
for (auto const &column : table) {
auto const current_id = orc_columns.size();
auto const current_str_id = str_col_ids.size();
orc_columns.emplace_back(current_id, current_str_id, column, user_metadata, stream);
if (orc_columns.back().is_string()) { str_col_ids.push_back(current_id); }
}
rmm::device_uvector<uint32_t> dict_index(str_col_ids.size() * num_rows, stream);
rmm::device_uvector<uint32_t> dict_data(str_col_ids.size() * num_rows, stream);
// Build per-column dictionary indices
const auto num_rowgroups = div_by_rowgroups<size_t>(num_rows);
const auto num_dict_chunks = num_rowgroups * str_col_ids.size();
hostdevice_vector<gpu::DictionaryChunk> dict(num_dict_chunks, stream);
if (!str_col_ids.empty()) {
init_dictionaries(*device_columns,
orc_columns.data(),
str_col_ids,
string_column_ids,
dict_data.data(),
dict_index.data(),
&dict);
}
// Decide stripe boundaries early on, based on uncompressed size
auto const stripe_bounds = gather_stripe_info(orc_columns, num_rowgroups);
// Build stripe-level dictionaries
const auto num_stripe_dict = stripe_bounds.size() * str_col_ids.size();
hostdevice_vector<gpu::StripeDictionary> stripe_dict(num_stripe_dict, stream);
if (!str_col_ids.empty()) {
build_dictionaries(
orc_columns.data(), str_col_ids, stripe_bounds, dict, dict_index.data(), stripe_dict);
}
auto streams = create_streams(orc_columns, stripe_bounds);
auto enc_data = encode_columns(*device_columns, orc_columns, str_col_ids, stripe_bounds, streams);
// Assemble individual disparate column chunks into contiguous data streams
const auto num_index_streams = (num_columns + 1);
const auto num_data_streams = streams.size() - num_index_streams;
hostdevice_2dvector<gpu::StripeStream> strm_descs(stripe_bounds.size(), num_data_streams, stream);
auto stripes =
gather_stripes(num_rows, num_index_streams, stripe_bounds, &enc_data.streams, &strm_descs);
// Gather column statistics
std::vector<std::vector<uint8_t>> column_stats;
if (enable_statistics_ && num_columns > 0 && num_rows > 0) {
column_stats = gather_statistic_blobs(*device_columns, orc_columns, stripe_bounds);
}
// Allocate intermediate output stream buffer
size_t compressed_bfr_size = 0;
size_t num_compressed_blocks = 0;
auto stream_output = [&]() {
size_t max_stream_size = 0;
for (size_t stripe_id = 0; stripe_id < stripe_bounds.size(); stripe_id++) {
for (size_t i = 0; i < num_data_streams; i++) { // TODO range for (at least)
gpu::StripeStream *ss = &strm_descs[stripe_id][i];
size_t stream_size = ss->stream_size;
if (compression_kind_ != NONE) {
ss->first_block = num_compressed_blocks;
ss->bfr_offset = compressed_bfr_size;
auto num_blocks = std::max<uint32_t>(
(stream_size + compression_blocksize_ - 1) / compression_blocksize_, 1);
stream_size += num_blocks * 3;
num_compressed_blocks += num_blocks;
compressed_bfr_size += stream_size;
}
max_stream_size = std::max(max_stream_size, stream_size);
}
}
return pinned_buffer<uint8_t>{[](size_t size) {
uint8_t *ptr = nullptr;
CUDA_TRY(cudaMallocHost(&ptr, size));
return ptr;
}(max_stream_size),
cudaFreeHost};
}();
// Compress the data streams
rmm::device_buffer compressed_data(compressed_bfr_size, stream);
hostdevice_vector<gpu_inflate_status_s> comp_out(num_compressed_blocks, stream);
hostdevice_vector<gpu_inflate_input_s> comp_in(num_compressed_blocks, stream);
if (compression_kind_ != NONE) {
strm_descs.host_to_device(stream);
gpu::CompressOrcDataStreams(static_cast<uint8_t *>(compressed_data.data()),
num_compressed_blocks,
compression_kind_,
compression_blocksize_,
strm_descs,
enc_data.streams,
comp_in.device_ptr(),
comp_out.device_ptr(),
stream);
strm_descs.device_to_host(stream);
comp_out.device_to_host(stream, true);
}
ProtobufWriter pbw_(&buffer_);
// Write stripes
for (size_t stripe_id = 0; stripe_id < stripes.size(); ++stripe_id) {
auto const &rowgroup_range = stripe_bounds[stripe_id];
auto &stripe = stripes[stripe_id];
stripe.offset = out_sink_->bytes_written();
// Column (skippable) index streams appear at the start of the stripe
for (size_type stream_id = 0; stream_id <= num_columns; ++stream_id) {
write_index_stream(stripe_id,
stream_id,
orc_columns,
rowgroup_range,
enc_data.streams,
strm_descs,
comp_out,
&stripe,
&streams,
&pbw_);
}
// Column data consisting one or more separate streams
for (auto const &strm_desc : strm_descs[stripe_id]) {
write_data_stream(strm_desc,
enc_data.streams[strm_desc.column_id][rowgroup_range.first],
static_cast<uint8_t *>(compressed_data.data()),
stream_output.get(),
&stripe,
&streams);
}
// Write stripefooter consisting of stream information
StripeFooter sf;
sf.streams = streams;
sf.columns.resize(num_columns + 1);
sf.columns[0].kind = DIRECT;
sf.columns[0].dictionarySize = 0;
for (size_t i = 1; i < sf.columns.size(); ++i) {
sf.columns[i].kind = orc_columns[i - 1].orc_encoding();
sf.columns[i].dictionarySize = (sf.columns[i].kind == DICTIONARY_V2)
? orc_columns[i - 1].host_stripe_dict(stripe_id)->num_strings
: 0;
if (orc_columns[i - 1].orc_kind() == TIMESTAMP) { sf.writerTimezone = "UTC"; }
}
buffer_.resize((compression_kind_ != NONE) ? 3 : 0);
pbw_.write(sf);
stripe.footerLength = buffer_.size();
if (compression_kind_ != NONE) {
uint32_t uncomp_sf_len = (stripe.footerLength - 3) * 2 + 1;
buffer_[0] = static_cast<uint8_t>(uncomp_sf_len >> 0);
buffer_[1] = static_cast<uint8_t>(uncomp_sf_len >> 8);
buffer_[2] = static_cast<uint8_t>(uncomp_sf_len >> 16);
}
out_sink_->host_write(buffer_.data(), buffer_.size());
}
if (column_stats.size() != 0) {
// File-level statistics
// NOTE: Excluded from chunked write mode to avoid the need for merging stats across calls
if (single_write_mode) {
ff.statistics.resize(1 + num_columns);
// First entry contains total number of rows
buffer_.resize(0);
pbw_.putb(1 * 8 + PB_TYPE_VARINT);
pbw_.put_uint(num_rows);
ff.statistics[0] = std::move(buffer_);
for (int col_idx = 0; col_idx < num_columns; col_idx++) {
size_t idx = stripes.size() * num_columns + col_idx;
if (idx < column_stats.size()) {
ff.statistics[1 + col_idx] = std::move(column_stats[idx]);
}
}
}
// Stripe-level statistics
size_t first_stripe = md.stripeStats.size();
md.stripeStats.resize(first_stripe + stripes.size());
for (size_t stripe_id = 0; stripe_id < stripes.size(); stripe_id++) {
md.stripeStats[first_stripe + stripe_id].colStats.resize(1 + num_columns);
buffer_.resize(0);
pbw_.putb(1 * 8 + PB_TYPE_VARINT);
pbw_.put_uint(stripes[stripe_id].numberOfRows);
md.stripeStats[first_stripe + stripe_id].colStats[0] = std::move(buffer_);
for (int col_idx = 0; col_idx < num_columns; col_idx++) {
size_t idx = stripes.size() * col_idx + stripe_id;
if (idx < column_stats.size()) {
md.stripeStats[first_stripe + stripe_id].colStats[1 + col_idx] =
std::move(column_stats[idx]);
}
}
}
}
if (ff.headerLength == 0) {
// First call
ff.headerLength = std::strlen(MAGIC);
ff.rowIndexStride = row_index_stride_;
ff.types.resize(1 + num_columns);
ff.types[0].kind = STRUCT;
ff.types[0].subtypes.resize(num_columns);
ff.types[0].fieldNames.resize(num_columns);
for (auto const &column : orc_columns) {
ff.types[1 + column.id()].kind = column.orc_kind();
ff.types[0].subtypes[column.id()] = 1 + column.id();
ff.types[0].fieldNames[column.id()] = column.orc_name();
}
} else {
// verify the user isn't passing mismatched tables
CUDF_EXPECTS(ff.types.size() == 1 + orc_columns.size(),
"Mismatch in table structure between multiple calls to write");
CUDF_EXPECTS(
std::all_of(orc_columns.cbegin(),
orc_columns.cend(),
[&](auto const &col) { return ff.types[1 + col.id()].kind == col.orc_kind(); }),
"Mismatch in column types between multiple calls to write");
}
ff.stripes.insert(ff.stripes.end(),
std::make_move_iterator(stripes.begin()),
std::make_move_iterator(stripes.end()));
ff.numberOfRows += num_rows;
}
void writer::impl::close()
{
if (closed) { return; }
closed = true;
ProtobufWriter pbw_(&buffer_);
PostScript ps;
ff.contentLength = out_sink_->bytes_written();
if (user_metadata) {
for (auto it = user_metadata->user_data.begin(); it != user_metadata->user_data.end(); it++) {
ff.metadata.push_back({it->first, it->second});
}
}
// Write statistics metadata
if (md.stripeStats.size() != 0) {
buffer_.resize((compression_kind_ != NONE) ? 3 : 0);
pbw_.write(md);
add_uncompressed_block_headers(buffer_);
ps.metadataLength = buffer_.size();
out_sink_->host_write(buffer_.data(), buffer_.size());
} else {
ps.metadataLength = 0;
}
buffer_.resize((compression_kind_ != NONE) ? 3 : 0);
pbw_.write(ff);
add_uncompressed_block_headers(buffer_);
// Write postscript metadata
ps.footerLength = buffer_.size();
ps.compression = compression_kind_;
ps.compressionBlockSize = compression_blocksize_;
ps.version = {0, 12};
ps.magic = MAGIC;
const auto ps_length = static_cast<uint8_t>(pbw_.write(ps));
buffer_.push_back(ps_length);
out_sink_->host_write(buffer_.data(), buffer_.size());
out_sink_->flush();
}
// Forward to implementation
writer::writer(std::unique_ptr<data_sink> sink,
orc_writer_options const &options,
SingleWriteMode mode,
rmm::mr::device_memory_resource *mr,
rmm::cuda_stream_view stream)
: _impl(std::make_unique<impl>(std::move(sink), options, mode, mr, stream))
{
}
// Forward to implementation
writer::writer(std::unique_ptr<data_sink> sink,
chunked_orc_writer_options const &options,
SingleWriteMode mode,
rmm::mr::device_memory_resource *mr,
rmm::cuda_stream_view stream)
: _impl(std::make_unique<impl>(std::move(sink), options, mode, mr, stream))
{
}
// Destructor within this translation unit
writer::~writer() = default;
// Forward to implementation
void writer::write(table_view const &table) { _impl->write(table); }
// Forward to implementation
void writer::close() { _impl->close(); }
} // namespace orc
} // namespace detail
} // namespace io
} // namespace cudf
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.