hip_filename
stringlengths
5
84
hip_content
stringlengths
79
9.69M
cuda_filename
stringlengths
4
83
cuda_content
stringlengths
19
9.69M
3e2e7b32fbbedd9d0b8003b0a55d540d3aa2ab24.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2018-2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <gtest/gtest.h> #include <raft/cudart_utils.h> #include <raft/linalg/eltwise.cuh> #include <raft/random/rng.cuh> #include "../test_utils.h" namespace raft { namespace linalg { //// Testing unary ops template <typename Type> __global__ void naiveScaleKernel(Type *out, const Type *in, Type scalar, int len) { int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx < len) { out[idx] = scalar * in[idx]; } } template <typename Type> void naiveScale(Type *out, const Type *in, Type scalar, int len, hipStream_t stream) { static const int TPB = 64; int nblks = raft::ceildiv(len, TPB); hipLaunchKernelGGL(( naiveScaleKernel<Type>), dim3(nblks), dim3(TPB), 0, stream, out, in, scalar, len); CUDA_CHECK(hipPeekAtLastError()); } template <typename T> struct ScalarMultiplyInputs { T tolerance; int len; T scalar; unsigned long long int seed; }; template <typename T> ::std::ostream &operator<<(::std::ostream &os, const ScalarMultiplyInputs<T> &dims) { return os; } template <typename T> class ScalarMultiplyTest : public ::testing::TestWithParam<ScalarMultiplyInputs<T>> { protected: void SetUp() override { params = ::testing::TestWithParam<ScalarMultiplyInputs<T>>::GetParam(); raft::random::Rng r(params.seed); int len = params.len; T scalar = params.scalar; hipStream_t stream; CUDA_CHECK(hipStreamCreate(&stream)); allocate(in, len); allocate(out_ref, len); allocate(out, len); r.uniform(in, len, T(-1.0), T(1.0), stream); naiveScale(out_ref, in, scalar, len, stream); scalarMultiply(out, in, scalar, len, stream); CUDA_CHECK(hipStreamDestroy(stream)); } void TearDown() override { CUDA_CHECK(hipFree(in)); CUDA_CHECK(hipFree(out_ref)); CUDA_CHECK(hipFree(out)); } protected: ScalarMultiplyInputs<T> params; T *in, *out_ref, *out; }; const std::vector<ScalarMultiplyInputs<float>> inputsf1 = { {0.000001f, 1024 * 1024, 2.f, 1234ULL}}; const std::vector<ScalarMultiplyInputs<double>> inputsd1 = { {0.00000001, 1024 * 1024, 2.0, 1234ULL}}; typedef ScalarMultiplyTest<float> ScalarMultiplyTestF; TEST_P(ScalarMultiplyTestF, Result) { ASSERT_TRUE(devArrMatch(out_ref, out, params.len, CompareApprox<float>(params.tolerance))); } typedef ScalarMultiplyTest<double> ScalarMultiplyTestD; TEST_P(ScalarMultiplyTestD, Result) { ASSERT_TRUE(devArrMatch(out_ref, out, params.len, CompareApprox<double>(params.tolerance))); } INSTANTIATE_TEST_SUITE_P(ScalarMultiplyTests, ScalarMultiplyTestF, ::testing::ValuesIn(inputsf1)); INSTANTIATE_TEST_SUITE_P(ScalarMultiplyTests, ScalarMultiplyTestD, ::testing::ValuesIn(inputsd1)); //// Testing binary ops template <typename Type> __global__ void naiveAddKernel(Type *out, const Type *in1, const Type *in2, int len) { int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx < len) { out[idx] = in1[idx] + in2[idx]; } } template <typename Type> void naiveAdd(Type *out, const Type *in1, const Type *in2, int len, hipStream_t stream) { static const int TPB = 64; int nblks = raft::ceildiv(len, TPB); hipLaunchKernelGGL(( naiveAddKernel<Type>), dim3(nblks), dim3(TPB), 0, stream, out, in1, in2, len); CUDA_CHECK(hipPeekAtLastError()); } template <typename T> struct EltwiseAddInputs { T tolerance; int len; unsigned long long int seed; }; template <typename T> ::std::ostream &operator<<(::std::ostream &os, const EltwiseAddInputs<T> &dims) { return os; } template <typename T> class EltwiseAddTest : public ::testing::TestWithParam<EltwiseAddInputs<T>> { protected: void SetUp() override { params = ::testing::TestWithParam<EltwiseAddInputs<T>>::GetParam(); raft::random::Rng r(params.seed); hipStream_t stream; CUDA_CHECK(hipStreamCreate(&stream)); int len = params.len; allocate(in1, len); allocate(in2, len); allocate(out_ref, len); allocate(out, len); r.uniform(in1, len, T(-1.0), T(1.0), stream); r.uniform(in2, len, T(-1.0), T(1.0), stream); naiveAdd(out_ref, in1, in2, len, stream); eltwiseAdd(out, in1, in2, len, stream); CUDA_CHECK(hipStreamDestroy(stream)); } void TearDown() override { CUDA_CHECK(hipFree(in1)); CUDA_CHECK(hipFree(in2)); CUDA_CHECK(hipFree(out_ref)); CUDA_CHECK(hipFree(out)); } protected: EltwiseAddInputs<T> params; T *in1, *in2, *out_ref, *out; }; const std::vector<EltwiseAddInputs<float>> inputsf2 = { {0.000001f, 1024 * 1024, 1234ULL}}; const std::vector<EltwiseAddInputs<double>> inputsd2 = { {0.00000001, 1024 * 1024, 1234ULL}}; typedef EltwiseAddTest<float> EltwiseAddTestF; TEST_P(EltwiseAddTestF, Result) { ASSERT_TRUE(devArrMatch(out_ref, out, params.len, CompareApprox<float>(params.tolerance))); } typedef EltwiseAddTest<double> EltwiseAddTestD; TEST_P(EltwiseAddTestD, Result) { ASSERT_TRUE(devArrMatch(out_ref, out, params.len, CompareApprox<double>(params.tolerance))); } INSTANTIATE_TEST_SUITE_P(EltwiseAddTests, EltwiseAddTestF, ::testing::ValuesIn(inputsf2)); INSTANTIATE_TEST_SUITE_P(EltwiseAddTests, EltwiseAddTestD, ::testing::ValuesIn(inputsd2)); } // end namespace linalg } // end namespace raft
3e2e7b32fbbedd9d0b8003b0a55d540d3aa2ab24.cu
/* * Copyright (c) 2018-2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <gtest/gtest.h> #include <raft/cudart_utils.h> #include <raft/linalg/eltwise.cuh> #include <raft/random/rng.cuh> #include "../test_utils.h" namespace raft { namespace linalg { //// Testing unary ops template <typename Type> __global__ void naiveScaleKernel(Type *out, const Type *in, Type scalar, int len) { int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx < len) { out[idx] = scalar * in[idx]; } } template <typename Type> void naiveScale(Type *out, const Type *in, Type scalar, int len, cudaStream_t stream) { static const int TPB = 64; int nblks = raft::ceildiv(len, TPB); naiveScaleKernel<Type><<<nblks, TPB, 0, stream>>>(out, in, scalar, len); CUDA_CHECK(cudaPeekAtLastError()); } template <typename T> struct ScalarMultiplyInputs { T tolerance; int len; T scalar; unsigned long long int seed; }; template <typename T> ::std::ostream &operator<<(::std::ostream &os, const ScalarMultiplyInputs<T> &dims) { return os; } template <typename T> class ScalarMultiplyTest : public ::testing::TestWithParam<ScalarMultiplyInputs<T>> { protected: void SetUp() override { params = ::testing::TestWithParam<ScalarMultiplyInputs<T>>::GetParam(); raft::random::Rng r(params.seed); int len = params.len; T scalar = params.scalar; cudaStream_t stream; CUDA_CHECK(cudaStreamCreate(&stream)); allocate(in, len); allocate(out_ref, len); allocate(out, len); r.uniform(in, len, T(-1.0), T(1.0), stream); naiveScale(out_ref, in, scalar, len, stream); scalarMultiply(out, in, scalar, len, stream); CUDA_CHECK(cudaStreamDestroy(stream)); } void TearDown() override { CUDA_CHECK(cudaFree(in)); CUDA_CHECK(cudaFree(out_ref)); CUDA_CHECK(cudaFree(out)); } protected: ScalarMultiplyInputs<T> params; T *in, *out_ref, *out; }; const std::vector<ScalarMultiplyInputs<float>> inputsf1 = { {0.000001f, 1024 * 1024, 2.f, 1234ULL}}; const std::vector<ScalarMultiplyInputs<double>> inputsd1 = { {0.00000001, 1024 * 1024, 2.0, 1234ULL}}; typedef ScalarMultiplyTest<float> ScalarMultiplyTestF; TEST_P(ScalarMultiplyTestF, Result) { ASSERT_TRUE(devArrMatch(out_ref, out, params.len, CompareApprox<float>(params.tolerance))); } typedef ScalarMultiplyTest<double> ScalarMultiplyTestD; TEST_P(ScalarMultiplyTestD, Result) { ASSERT_TRUE(devArrMatch(out_ref, out, params.len, CompareApprox<double>(params.tolerance))); } INSTANTIATE_TEST_SUITE_P(ScalarMultiplyTests, ScalarMultiplyTestF, ::testing::ValuesIn(inputsf1)); INSTANTIATE_TEST_SUITE_P(ScalarMultiplyTests, ScalarMultiplyTestD, ::testing::ValuesIn(inputsd1)); //// Testing binary ops template <typename Type> __global__ void naiveAddKernel(Type *out, const Type *in1, const Type *in2, int len) { int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx < len) { out[idx] = in1[idx] + in2[idx]; } } template <typename Type> void naiveAdd(Type *out, const Type *in1, const Type *in2, int len, cudaStream_t stream) { static const int TPB = 64; int nblks = raft::ceildiv(len, TPB); naiveAddKernel<Type><<<nblks, TPB, 0, stream>>>(out, in1, in2, len); CUDA_CHECK(cudaPeekAtLastError()); } template <typename T> struct EltwiseAddInputs { T tolerance; int len; unsigned long long int seed; }; template <typename T> ::std::ostream &operator<<(::std::ostream &os, const EltwiseAddInputs<T> &dims) { return os; } template <typename T> class EltwiseAddTest : public ::testing::TestWithParam<EltwiseAddInputs<T>> { protected: void SetUp() override { params = ::testing::TestWithParam<EltwiseAddInputs<T>>::GetParam(); raft::random::Rng r(params.seed); cudaStream_t stream; CUDA_CHECK(cudaStreamCreate(&stream)); int len = params.len; allocate(in1, len); allocate(in2, len); allocate(out_ref, len); allocate(out, len); r.uniform(in1, len, T(-1.0), T(1.0), stream); r.uniform(in2, len, T(-1.0), T(1.0), stream); naiveAdd(out_ref, in1, in2, len, stream); eltwiseAdd(out, in1, in2, len, stream); CUDA_CHECK(cudaStreamDestroy(stream)); } void TearDown() override { CUDA_CHECK(cudaFree(in1)); CUDA_CHECK(cudaFree(in2)); CUDA_CHECK(cudaFree(out_ref)); CUDA_CHECK(cudaFree(out)); } protected: EltwiseAddInputs<T> params; T *in1, *in2, *out_ref, *out; }; const std::vector<EltwiseAddInputs<float>> inputsf2 = { {0.000001f, 1024 * 1024, 1234ULL}}; const std::vector<EltwiseAddInputs<double>> inputsd2 = { {0.00000001, 1024 * 1024, 1234ULL}}; typedef EltwiseAddTest<float> EltwiseAddTestF; TEST_P(EltwiseAddTestF, Result) { ASSERT_TRUE(devArrMatch(out_ref, out, params.len, CompareApprox<float>(params.tolerance))); } typedef EltwiseAddTest<double> EltwiseAddTestD; TEST_P(EltwiseAddTestD, Result) { ASSERT_TRUE(devArrMatch(out_ref, out, params.len, CompareApprox<double>(params.tolerance))); } INSTANTIATE_TEST_SUITE_P(EltwiseAddTests, EltwiseAddTestF, ::testing::ValuesIn(inputsf2)); INSTANTIATE_TEST_SUITE_P(EltwiseAddTests, EltwiseAddTestD, ::testing::ValuesIn(inputsd2)); } // end namespace linalg } // end namespace raft
9f9cf6096e871ae3cab2c2b01426ed96bcf61bf7.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* STREAM benchmark implementation in CUDA. COPY: a(i) = b(i) SCALE: a(i) = q*b(i) SUM: a(i) = b(i) + c(i) TRIAD: a(i) = b(i) + q*c(i) It measures the memory system on the device. The implementation is in single precision. Code based on the code developed by John D. McCalpin http://www.cs.virginia.edu/stream/FTP/Code/stream.c Written by: Massimiliano Fatica, NVIDIA Corporation Further modifications by: Ben Cumming, CSCS */ #define N (2<<26) #define NTIMES 2 #include <stdio.h> #include <float.h> #include <limits.h> #include <sys/time.h> #include "../papi_wrap.h" # ifndef MIN # define MIN(x,y) ((x)<(y)?(x):(y)) # endif # ifndef MAX # define MAX(x,y) ((x)>(y)?(x):(y)) # endif typedef double real; static double avgtime[4] = {0}, maxtime[4] = {0}, mintime[4] = {FLT_MAX,FLT_MAX,FLT_MAX,FLT_MAX}; static char *label[4] = {"Copy: ", "Scale: ", "Add: ", "Triad: "}; static double bytes[4] = { 2 * sizeof(real) * N, 2 * sizeof(real) * N, 3 * sizeof(real) * N, 3 * sizeof(real) * N }; /* A gettimeofday routine to give access to the wall clock timer on most UNIX-like systems. */ double mysecond() { struct timeval tp; struct timezone tzp; int i = gettimeofday(&tp,&tzp); return ( (double) tp.tv_sec + (double) tp.tv_usec * 1.e-6 ); } template <typename T> __global__ void set_array(T *a, T value, int len) { int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx < len) a[idx] = value; } template <typename T> __global__ void STREAM_Copy(T *a, T *b, int len) { int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx < len) b[idx] = a[idx]; } template <typename T> __global__ void STREAM_Scale(T *a, T *b, T scale, int len) { int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx < len) b[idx] = scale* a[idx]; } template <typename T> __global__ void STREAM_Add( T *a, T *b, T *c, int len) { int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx < len) c[idx] = a[idx]+b[idx]; } template <typename T> __global__ void STREAM_Triad( T *a, T *b, T *c, T scalar, int len) { int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx < len) c[idx] = a[idx]+scalar*b[idx]; } int main() { real *d_a, *d_b, *d_c; int j,k; double times[4][NTIMES]; real scalar; int handle_add; printf(" STREAM Benchmark implementation in CUDA\n"); printf(" Array size (%s precision) =%7.2f MB\n", sizeof(double)==sizeof(real)?"double":"single", double(N)*double(sizeof(real))/1.e6); /* Allocate memory on device */ hipMalloc((void**)&d_a, sizeof(real)*N); hipMalloc((void**)&d_b, sizeof(real)*N); hipMalloc((void**)&d_c, sizeof(real)*N); /* Compute execution configuration */ dim3 dimBlock(192); dim3 dimGrid(N/dimBlock.x ); if( N % dimBlock.x != 0 ) dimGrid.x+=1; printf(" using %d threads per block, %d blocks\n",dimBlock.x,dimGrid.x); /* Initialize memory on the device */ hipLaunchKernelGGL(( set_array<real>), dim3(dimGrid),dim3(dimBlock), 0, 0, d_a, 2.f, N); hipLaunchKernelGGL(( set_array<real>), dim3(dimGrid),dim3(dimBlock), 0, 0, d_b, .5f, N); hipLaunchKernelGGL(( set_array<real>), dim3(dimGrid),dim3(dimBlock), 0, 0, d_c, .5f, N); handle_add = pw_new_collector("dummy"); pw_start_collector(handle_add); pw_stop_collector(handle_add); handle_add = pw_new_collector("add"); /* --- MAIN LOOP --- repeat test cases NTIMES times --- */ scalar=3.0f; for (k=0; k<NTIMES; k++) { times[0][k]= mysecond(); hipLaunchKernelGGL(( STREAM_Copy<real>), dim3(dimGrid),dim3(dimBlock), 0, 0, d_a, d_c, N); hipDeviceSynchronize(); times[0][k]= mysecond() - times[0][k]; times[1][k]= mysecond(); hipLaunchKernelGGL(( STREAM_Scale<real>), dim3(dimGrid),dim3(dimBlock), 0, 0, d_b, d_c, scalar, N); hipDeviceSynchronize(); times[1][k]= mysecond() - times[1][k]; if(k==1) pw_start_collector(handle_add); times[2][k]= mysecond(); hipLaunchKernelGGL(( STREAM_Add<real>), dim3(dimGrid),dim3(dimBlock), 0, 0, d_a, d_b, d_c, N); hipDeviceSynchronize(); times[2][k]= mysecond() - times[2][k]; if(k==1) pw_stop_collector(handle_add); times[3][k]= mysecond(); hipLaunchKernelGGL(( STREAM_Triad<real>), dim3(dimGrid),dim3(dimBlock), 0, 0, d_b, d_c, d_a, scalar, N); hipDeviceSynchronize(); times[3][k]= mysecond() - times[3][k]; } pw_print(); /* --- SUMMARY --- */ for (k=1; k<NTIMES; k++) /* note -- skip first iteration */ { for (j=0; j<4; j++) { avgtime[j] = avgtime[j] + times[j][k]; mintime[j] = MIN(mintime[j], times[j][k]); maxtime[j] = MAX(maxtime[j], times[j][k]); } } printf("Function Rate (GB/s) Avg time(s) Min time(s) Max time(s)\n"); for (j=0; j<4; j++) { avgtime[j] = avgtime[j]/(double)(NTIMES-1); printf("%s%11.4f %11.8f %11.8f %11.8f\n", label[j], 1.0E-09 * bytes[j]/mintime[j], avgtime[j], mintime[j], maxtime[j]); } /* Free memory on device */ hipFree(d_a); hipFree(d_b); hipFree(d_c); }
9f9cf6096e871ae3cab2c2b01426ed96bcf61bf7.cu
/* STREAM benchmark implementation in CUDA. COPY: a(i) = b(i) SCALE: a(i) = q*b(i) SUM: a(i) = b(i) + c(i) TRIAD: a(i) = b(i) + q*c(i) It measures the memory system on the device. The implementation is in single precision. Code based on the code developed by John D. McCalpin http://www.cs.virginia.edu/stream/FTP/Code/stream.c Written by: Massimiliano Fatica, NVIDIA Corporation Further modifications by: Ben Cumming, CSCS */ #define N (2<<26) #define NTIMES 2 #include <stdio.h> #include <float.h> #include <limits.h> #include <sys/time.h> #include "../papi_wrap.h" # ifndef MIN # define MIN(x,y) ((x)<(y)?(x):(y)) # endif # ifndef MAX # define MAX(x,y) ((x)>(y)?(x):(y)) # endif typedef double real; static double avgtime[4] = {0}, maxtime[4] = {0}, mintime[4] = {FLT_MAX,FLT_MAX,FLT_MAX,FLT_MAX}; static char *label[4] = {"Copy: ", "Scale: ", "Add: ", "Triad: "}; static double bytes[4] = { 2 * sizeof(real) * N, 2 * sizeof(real) * N, 3 * sizeof(real) * N, 3 * sizeof(real) * N }; /* A gettimeofday routine to give access to the wall clock timer on most UNIX-like systems. */ double mysecond() { struct timeval tp; struct timezone tzp; int i = gettimeofday(&tp,&tzp); return ( (double) tp.tv_sec + (double) tp.tv_usec * 1.e-6 ); } template <typename T> __global__ void set_array(T *a, T value, int len) { int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx < len) a[idx] = value; } template <typename T> __global__ void STREAM_Copy(T *a, T *b, int len) { int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx < len) b[idx] = a[idx]; } template <typename T> __global__ void STREAM_Scale(T *a, T *b, T scale, int len) { int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx < len) b[idx] = scale* a[idx]; } template <typename T> __global__ void STREAM_Add( T *a, T *b, T *c, int len) { int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx < len) c[idx] = a[idx]+b[idx]; } template <typename T> __global__ void STREAM_Triad( T *a, T *b, T *c, T scalar, int len) { int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx < len) c[idx] = a[idx]+scalar*b[idx]; } int main() { real *d_a, *d_b, *d_c; int j,k; double times[4][NTIMES]; real scalar; int handle_add; printf(" STREAM Benchmark implementation in CUDA\n"); printf(" Array size (%s precision) =%7.2f MB\n", sizeof(double)==sizeof(real)?"double":"single", double(N)*double(sizeof(real))/1.e6); /* Allocate memory on device */ cudaMalloc((void**)&d_a, sizeof(real)*N); cudaMalloc((void**)&d_b, sizeof(real)*N); cudaMalloc((void**)&d_c, sizeof(real)*N); /* Compute execution configuration */ dim3 dimBlock(192); dim3 dimGrid(N/dimBlock.x ); if( N % dimBlock.x != 0 ) dimGrid.x+=1; printf(" using %d threads per block, %d blocks\n",dimBlock.x,dimGrid.x); /* Initialize memory on the device */ set_array<real><<<dimGrid,dimBlock>>>(d_a, 2.f, N); set_array<real><<<dimGrid,dimBlock>>>(d_b, .5f, N); set_array<real><<<dimGrid,dimBlock>>>(d_c, .5f, N); handle_add = pw_new_collector("dummy"); pw_start_collector(handle_add); pw_stop_collector(handle_add); handle_add = pw_new_collector("add"); /* --- MAIN LOOP --- repeat test cases NTIMES times --- */ scalar=3.0f; for (k=0; k<NTIMES; k++) { times[0][k]= mysecond(); STREAM_Copy<real><<<dimGrid,dimBlock>>>(d_a, d_c, N); cudaThreadSynchronize(); times[0][k]= mysecond() - times[0][k]; times[1][k]= mysecond(); STREAM_Scale<real><<<dimGrid,dimBlock>>>(d_b, d_c, scalar, N); cudaThreadSynchronize(); times[1][k]= mysecond() - times[1][k]; if(k==1) pw_start_collector(handle_add); times[2][k]= mysecond(); STREAM_Add<real><<<dimGrid,dimBlock>>>(d_a, d_b, d_c, N); cudaThreadSynchronize(); times[2][k]= mysecond() - times[2][k]; if(k==1) pw_stop_collector(handle_add); times[3][k]= mysecond(); STREAM_Triad<real><<<dimGrid,dimBlock>>>(d_b, d_c, d_a, scalar, N); cudaThreadSynchronize(); times[3][k]= mysecond() - times[3][k]; } pw_print(); /* --- SUMMARY --- */ for (k=1; k<NTIMES; k++) /* note -- skip first iteration */ { for (j=0; j<4; j++) { avgtime[j] = avgtime[j] + times[j][k]; mintime[j] = MIN(mintime[j], times[j][k]); maxtime[j] = MAX(maxtime[j], times[j][k]); } } printf("Function Rate (GB/s) Avg time(s) Min time(s) Max time(s)\n"); for (j=0; j<4; j++) { avgtime[j] = avgtime[j]/(double)(NTIMES-1); printf("%s%11.4f %11.8f %11.8f %11.8f\n", label[j], 1.0E-09 * bytes[j]/mintime[j], avgtime[j], mintime[j], maxtime[j]); } /* Free memory on device */ cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); }
c0937917505b3339b6d7769223f3ea2914ff39a0.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "threadFenceReduction_kernel.cuh" struct doubleSum { TASKCFG double init() {return 0.0;} TASKCFG double reduce(double x, double acc){return x + acc;} TASKCFG double f(unsigned int, hipDoubleComplex c){return c.x;} }; template <unsigned int blockSize, bool nIsPow2> TASKCFG void reduceSinglePass_dev(const hipDoubleComplex *g_idata, double *g_odata, unsigned int n){ reduceSinglePass_devGen<blockSize, nIsPow2, double, hipDoubleComplex, doubleSum>(g_idata, g_odata, n); } extern "C" __host__ void reduce_init() { resetRetirementCount(); } extern "C" __global__ void reduce_512_e2(const hipDoubleComplex *g_idata, double *g_odata, unsigned int n) { reduceSinglePass_dev<512, true>(g_idata, g_odata, n); } __device__ static __inline__ hipDoubleComplex cuMulComplexByDouble(hipDoubleComplex v, double y){ return make_cuDoubleComplex ( v.x * y , v.y * y ); } __device__ __inline__ void scale_complex_by_dbl( double norm , hipDoubleComplex * v , int len ) { const int x = blockIdx.x * blockDim.x + threadIdx.x; if (x < len) v[x] = cuMulComplexByDouble(v[x], norm); } extern "C" __global__ void normalize( double * normp , hipDoubleComplex * v , int len ){ scale_complex_by_dbl(1.0 / (*normp), v, len); } // Helper for calling hipConfigureCall from Haskell-land extern "C" hipError_t __cudaConfigureCall ( int gridX, int gridY, int gridZ, int blockX, int blockY, int blockZ, size_t sharedMem, hipStream_t stream ) { dim3 gridDim(gridX, gridY, gridZ); dim3 blockDim(blockX,blockY,blockZ); return hipConfigureCall(gridDim, blockDim, sharedMem, stream); }
c0937917505b3339b6d7769223f3ea2914ff39a0.cu
#include "threadFenceReduction_kernel.cuh" struct doubleSum { TASKCFG double init() {return 0.0;} TASKCFG double reduce(double x, double acc){return x + acc;} TASKCFG double f(unsigned int, cuDoubleComplex c){return c.x;} }; template <unsigned int blockSize, bool nIsPow2> TASKCFG void reduceSinglePass_dev(const cuDoubleComplex *g_idata, double *g_odata, unsigned int n){ reduceSinglePass_devGen<blockSize, nIsPow2, double, cuDoubleComplex, doubleSum>(g_idata, g_odata, n); } extern "C" __host__ void reduce_init() { resetRetirementCount(); } extern "C" __global__ void reduce_512_e2(const cuDoubleComplex *g_idata, double *g_odata, unsigned int n) { reduceSinglePass_dev<512, true>(g_idata, g_odata, n); } __device__ static __inline__ cuDoubleComplex cuMulComplexByDouble(cuDoubleComplex v, double y){ return make_cuDoubleComplex ( v.x * y , v.y * y ); } __device__ __inline__ void scale_complex_by_dbl( double norm , cuDoubleComplex * v , int len ) { const int x = blockIdx.x * blockDim.x + threadIdx.x; if (x < len) v[x] = cuMulComplexByDouble(v[x], norm); } extern "C" __global__ void normalize( double * normp , cuDoubleComplex * v , int len ){ scale_complex_by_dbl(1.0 / (*normp), v, len); } // Helper for calling cudaConfigureCall from Haskell-land extern "C" cudaError_t __cudaConfigureCall ( int gridX, int gridY, int gridZ, int blockX, int blockY, int blockZ, size_t sharedMem, cudaStream_t stream ) { dim3 gridDim(gridX, gridY, gridZ); dim3 blockDim(blockX,blockY,blockZ); return cudaConfigureCall(gridDim, blockDim, sharedMem, stream); }
4575c78a3e355e164f7ca5b1548c06ba75957e0d.hip
// !!! This is a file automatically generated by hipify!!! #include "../THCTensorMathPointwise.cuh" #include "THHTensor.hpp" #include "THHStream.h" #include "../generic/THCTensorMathPointwise.cu" #include "../THCGenerateDoubleType.h"
4575c78a3e355e164f7ca5b1548c06ba75957e0d.cu
#include "../THCTensorMathPointwise.cuh" #include "THCTensor.hpp" #include "THCStream.h" #include "../generic/THCTensorMathPointwise.cu" #include "../THCGenerateDoubleType.h"
d2a6e786ff928ad2ed4273b4dc0c86f8256566a7.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <stdio.h> #include "../common.h" int main(int argc, char **argv) { // set up device int dev = 0; CHECK(hipSetDevice(dev)); // memory size unsigned int isize = 1 << 22; unsigned int nbytes = isize * sizeof(float); // get device information. hipDeviceProp_t deviceProp; CHECK(hipGetDeviceProperties(&deviceProp, dev)); if (!deviceProp.canMapHostMemory) { printf("Device %d does not support mapping CPU host memory!\n", dev); CHECK(hipDeviceReset()); exit(EXIT_SUCCESS); } printf("%s starting at ", argv[0]); printf("device %d: %s memory size %d nbyte %5.2fMB canMap %d\n", dev, deviceProp.name, isize, nbytes / (1024.0f * 1024.0f), deviceProp.canMapHostMemory); // allocate pinned host memory. float *h_a; CHECK(hipHostMalloc((float **) &h_a, nbytes)); // allocate device memory. float *d_a; CHECK(hipMalloc((float **) &d_a, nbytes)); // initialize host memory. memset(h_a, 0, nbytes); // transfer data from the host to the device. CHECK(hipMemcpy(d_a, h_a, nbytes, hipMemcpyHostToDevice)); // transfer data from the device to host. CHECK(hipMemcpy(h_a, d_a, nbytes, hipMemcpyDeviceToHost)); // free memory. CHECK(hipFree(d_a)); CHECK(hipHostFree(h_a)); // reset device. CHECK(hipDeviceReset()); return EXIT_SUCCESS; }
d2a6e786ff928ad2ed4273b4dc0c86f8256566a7.cu
#include <cuda_runtime.h> #include <stdio.h> #include "../common.h" int main(int argc, char **argv) { // set up device int dev = 0; CHECK(cudaSetDevice(dev)); // memory size unsigned int isize = 1 << 22; unsigned int nbytes = isize * sizeof(float); // get device information. cudaDeviceProp deviceProp; CHECK(cudaGetDeviceProperties(&deviceProp, dev)); if (!deviceProp.canMapHostMemory) { printf("Device %d does not support mapping CPU host memory!\n", dev); CHECK(cudaDeviceReset()); exit(EXIT_SUCCESS); } printf("%s starting at ", argv[0]); printf("device %d: %s memory size %d nbyte %5.2fMB canMap %d\n", dev, deviceProp.name, isize, nbytes / (1024.0f * 1024.0f), deviceProp.canMapHostMemory); // allocate pinned host memory. float *h_a; CHECK(cudaMallocHost((float **) &h_a, nbytes)); // allocate device memory. float *d_a; CHECK(cudaMalloc((float **) &d_a, nbytes)); // initialize host memory. memset(h_a, 0, nbytes); // transfer data from the host to the device. CHECK(cudaMemcpy(d_a, h_a, nbytes, cudaMemcpyHostToDevice)); // transfer data from the device to host. CHECK(cudaMemcpy(h_a, d_a, nbytes, cudaMemcpyDeviceToHost)); // free memory. CHECK(cudaFree(d_a)); CHECK(cudaFreeHost(h_a)); // reset device. CHECK(cudaDeviceReset()); return EXIT_SUCCESS; }
a0d8b49a297a0b696059d7d7f08b62229c548c30.hip
// !!! This is a file automatically generated by hipify!!! // ######################################################################## // Practical Course: GPU Programming in Computer Vision // Technical University of Munich, Computer Vision Group // ######################################################################## #include "diffusion.cuh" #include <iostream> #include <hip/hip_runtime.h> #include "helper.cuh" #define EPS2 0.00000001 __global__ void updateDiffusivityKernel(float *u, const float *d_div, int w, int h, int nc, float dt) { int x = threadIdx.x + blockDim.x * blockIdx.x; int y = threadIdx.y + blockDim.y * blockIdx.y; int z = threadIdx.z + blockDim.z * blockIdx.z; // TODO (9.5) update diffusivity if (x >= w || y >= h || z >= nc) return; u[z*h*w + y*w + x] += dt * d_div[z*h*w + y*w + x]; } __global__ void multDiffusivityKernel(float *v1, float *v2, int w, int h, int nc, float epsilon, int mode) { int x = threadIdx.x + blockDim.x * blockIdx.x; int y = threadIdx.y + blockDim.y * blockIdx.y; // TODO (9.3) multiply diffusivity if (x >= w || y >= h) return; float g = 0; for (int z = 0; z < nc; z++) { g += v1[z*h*w + y*w + x]*v1[z*h*w + y*w + x]; g += v2[z*h*w + y*w + x]*v2[z*h*w + y*w + x]; } g = funcDiffusivity(sqrtf(g), epsilon, mode); for (int z = 0; z < nc; z++) { v1[z*h*w + y*w + x] *= g; v2[z*h*w + y*w + x] *= g; } } __global__ void multDiffusivityAnisotropicKernel(float *v1, float *v2, float *g11, float *g12, float *g22, int w, int h, int nc) { // TODO (10.2) multiply diffusivity (anisotropic) int x = threadIdx.x + blockDim.x * blockIdx.x; int y = threadIdx.y + blockDim.y * blockIdx.y; if (x >= w || y >= h) return; int idx = y*w + x; float temp_v1 = v1[idx]; float temp_v2 = v2[idx]; v1[idx] = g11[idx]*temp_v1 + g12[idx]*temp_v2; v2[idx] = g12[idx]*temp_v1 + g22[idx]*temp_v2; } __global__ void computeDiffusivityKernel(float *diffusivity, const float *v1, const float *v2, int w, int h, int nc, float epsilon) { // TODO (11.2) compute diffusivity int x = threadIdx.x + blockDim.x * blockIdx.x; int y = threadIdx.y + blockDim.y * blockIdx.y; if (x >= w || y >= h) return; float g = 0; for (int z = 0; z < nc; z++) { int idx = z*h*w + y*w + x; g += v1[idx]*v1[idx]; g += v2[idx]*v2[idx]; } diffusivity[y*w + x] = 1/max(epsilon,sqrtf(g)); } __device__ void computeEigenValuesAndVectorsOfMatrix2x2(float& lambda1, float& lambda2, float& v11, float& v12, float& v21, float& v22, const float& a, const float& b, const float& c, const float& d) { float determinant = a*d - b*c; float trace = a + d; lambda1 = trace/2 + powf(trace*trace/4 - determinant, 0.5f); lambda2 = trace/2 - powf(trace*trace/4 - determinant, 0.5f); if (fabsf(c) > EPS2) { v11 = lambda1 - d; v12 = c; v21 = lambda2 - d; v22 = c; float s1 = sqrtf(v11*v11 + v12*v12); float s2 = sqrtf(v21*v21 + v22*v22); v11 /= s1; v12 /= s1; v21 /= s2; v22 /= s2; } // else if (fabsf(b) > EPS2) // { // v11 = b; v12 = lambda1 - a; // v21 = b; v22 = lambda2 - a; // } else { v11 = 1; v12 = 0; v21 = 0; v22 = 1; } } __global__ void computeDiffusionTensorKernel(float *d_difftensor11, float *d_difftensor12, float *d_difftensor22, float *d_tensor11, float *d_tensor12, float *d_tensor22, float alpha, float C, int w, int h, int nc) { // TODO (10.1) compute diffusion tensor int x = threadIdx.x + blockDim.x * blockIdx.x; int y = threadIdx.y + blockDim.y * blockIdx.y; if (x >= w || y >= h) return; float lambda1,lambda2,v11,v12,v21,v22; int idx = y*w + x; computeEigenValuesAndVectorsOfMatrix2x2(lambda1, lambda2, v11, v12, v21, v22, d_tensor11[idx], d_tensor12[idx], d_tensor12[idx], d_tensor22[idx]); float mu1 = alpha; float mu2 = (abs(lambda1 - lambda2) < EPS2) ? alpha : alpha + (1-alpha)*expf(-C/((lambda1-lambda2)*(lambda1-lambda2))); d_difftensor11[idx] = mu1*v11*v11 + mu2*v21*v21; d_difftensor12[idx] = mu1*v11*v12 + mu2*v21*v22; d_difftensor22[idx] = mu1*v12*v12 + mu2*v22*v22; } void updateDiffusivityCuda(float *u, const float *d_div, int w, int h, int nc, float dt) { // calculate block and grid size dim3 block(32, 8, nc); // TODO (9.5) specify suitable block size dim3 grid = computeGrid2D(block, w, h); // run cuda kernel // TODO (9.5) execute kernel for updating diffusivity hipLaunchKernelGGL(( updateDiffusivityKernel) , dim3(grid), dim3(block), 0, 0, u, d_div, w, h, nc, dt); // check for errors // TODO (9.5) CUDA_CHECK; } void multDiffusivityCuda(float *v1, float *v2, int w, int h, int nc, float epsilon, int mode) { // calculate block and grid size dim3 block(32, 8, 1); // TODO (9.3) specify suitable block size dim3 grid = computeGrid2D(block, w, h); // run cuda kernel // TODO (9.3) execute kernel for multiplying diffusivity hipLaunchKernelGGL(( multDiffusivityKernel) , dim3(grid), dim3(block), 0, 0, v1, v2, w, h, nc, epsilon, mode); // check for errors // TODO (9.3) CUDA_CHECK; } void multDiffusivityAnisotropicCuda(float *v1, float *v2, float *g11, float *g12, float *g22, int w, int h, int nc) { // calculate block and grid size dim3 block(32, 8, 1); // TODO (10.2) specify suitable block size dim3 grid = computeGrid2D(block, w, h); // run cuda kernel // TODO (10.2) execute kernel for multiplying diffusivity (anisotropic) hipLaunchKernelGGL(( multDiffusivityAnisotropicKernel) , dim3(grid), dim3(block), 0, 0, v1, v2, g11, g12, g22, w, h, nc); // check for errors // TODO (10.2) CUDA_CHECK; } void computeDiffusivityCuda(float *diffusivity, const float *v1, const float *v2, int w, int h, int nc, float epsilon) { // calculate block and grid size dim3 block(32, 8, 1); // TODO (11.2) specify suitable block size dim3 grid = computeGrid2D(block, w, h); // run cuda kernel // TODO (11.2) execute kernel for computing diffusivity hipLaunchKernelGGL(( computeDiffusivityKernel) , dim3(grid), dim3(block), 0, 0, diffusivity, v1, v2, w, h, nc, epsilon); // check for errors // TODO (11.2) CUDA_CHECK; } void computeDiffusionTensorCuda(float *d_difftensor11, float *d_difftensor12, float *d_difftensor22, float *d_tensor11, float *d_tensor12, float *d_tensor22, float alpha, float C, int w, int h, int nc) { // calculate block and grid size dim3 block(32, 8, 1); // TODO (10.1) specify suitable block size dim3 grid = computeGrid2D(block, w, h); // run cuda kernel // TODO (10.1) execute kernel for computing diffusion tensor hipLaunchKernelGGL(( computeDiffusionTensorKernel) , dim3(grid), dim3(block), 0, 0, d_difftensor11, d_difftensor12, d_difftensor22, d_tensor11, d_tensor12, d_tensor22, alpha, C, w, h, nc); // check for errors // TODO (10.1) CUDA_CHECK; }
a0d8b49a297a0b696059d7d7f08b62229c548c30.cu
// ######################################################################## // Practical Course: GPU Programming in Computer Vision // Technical University of Munich, Computer Vision Group // ######################################################################## #include "diffusion.cuh" #include <iostream> #include <cuda_runtime.h> #include "helper.cuh" #define EPS2 0.00000001 __global__ void updateDiffusivityKernel(float *u, const float *d_div, int w, int h, int nc, float dt) { int x = threadIdx.x + blockDim.x * blockIdx.x; int y = threadIdx.y + blockDim.y * blockIdx.y; int z = threadIdx.z + blockDim.z * blockIdx.z; // TODO (9.5) update diffusivity if (x >= w || y >= h || z >= nc) return; u[z*h*w + y*w + x] += dt * d_div[z*h*w + y*w + x]; } __global__ void multDiffusivityKernel(float *v1, float *v2, int w, int h, int nc, float epsilon, int mode) { int x = threadIdx.x + blockDim.x * blockIdx.x; int y = threadIdx.y + blockDim.y * blockIdx.y; // TODO (9.3) multiply diffusivity if (x >= w || y >= h) return; float g = 0; for (int z = 0; z < nc; z++) { g += v1[z*h*w + y*w + x]*v1[z*h*w + y*w + x]; g += v2[z*h*w + y*w + x]*v2[z*h*w + y*w + x]; } g = funcDiffusivity(sqrtf(g), epsilon, mode); for (int z = 0; z < nc; z++) { v1[z*h*w + y*w + x] *= g; v2[z*h*w + y*w + x] *= g; } } __global__ void multDiffusivityAnisotropicKernel(float *v1, float *v2, float *g11, float *g12, float *g22, int w, int h, int nc) { // TODO (10.2) multiply diffusivity (anisotropic) int x = threadIdx.x + blockDim.x * blockIdx.x; int y = threadIdx.y + blockDim.y * blockIdx.y; if (x >= w || y >= h) return; int idx = y*w + x; float temp_v1 = v1[idx]; float temp_v2 = v2[idx]; v1[idx] = g11[idx]*temp_v1 + g12[idx]*temp_v2; v2[idx] = g12[idx]*temp_v1 + g22[idx]*temp_v2; } __global__ void computeDiffusivityKernel(float *diffusivity, const float *v1, const float *v2, int w, int h, int nc, float epsilon) { // TODO (11.2) compute diffusivity int x = threadIdx.x + blockDim.x * blockIdx.x; int y = threadIdx.y + blockDim.y * blockIdx.y; if (x >= w || y >= h) return; float g = 0; for (int z = 0; z < nc; z++) { int idx = z*h*w + y*w + x; g += v1[idx]*v1[idx]; g += v2[idx]*v2[idx]; } diffusivity[y*w + x] = 1/max(epsilon,sqrtf(g)); } __device__ void computeEigenValuesAndVectorsOfMatrix2x2(float& lambda1, float& lambda2, float& v11, float& v12, float& v21, float& v22, const float& a, const float& b, const float& c, const float& d) { float determinant = a*d - b*c; float trace = a + d; lambda1 = trace/2 + powf(trace*trace/4 - determinant, 0.5f); lambda2 = trace/2 - powf(trace*trace/4 - determinant, 0.5f); if (fabsf(c) > EPS2) { v11 = lambda1 - d; v12 = c; v21 = lambda2 - d; v22 = c; float s1 = sqrtf(v11*v11 + v12*v12); float s2 = sqrtf(v21*v21 + v22*v22); v11 /= s1; v12 /= s1; v21 /= s2; v22 /= s2; } // else if (fabsf(b) > EPS2) // { // v11 = b; v12 = lambda1 - a; // v21 = b; v22 = lambda2 - a; // } else { v11 = 1; v12 = 0; v21 = 0; v22 = 1; } } __global__ void computeDiffusionTensorKernel(float *d_difftensor11, float *d_difftensor12, float *d_difftensor22, float *d_tensor11, float *d_tensor12, float *d_tensor22, float alpha, float C, int w, int h, int nc) { // TODO (10.1) compute diffusion tensor int x = threadIdx.x + blockDim.x * blockIdx.x; int y = threadIdx.y + blockDim.y * blockIdx.y; if (x >= w || y >= h) return; float lambda1,lambda2,v11,v12,v21,v22; int idx = y*w + x; computeEigenValuesAndVectorsOfMatrix2x2(lambda1, lambda2, v11, v12, v21, v22, d_tensor11[idx], d_tensor12[idx], d_tensor12[idx], d_tensor22[idx]); float mu1 = alpha; float mu2 = (abs(lambda1 - lambda2) < EPS2) ? alpha : alpha + (1-alpha)*expf(-C/((lambda1-lambda2)*(lambda1-lambda2))); d_difftensor11[idx] = mu1*v11*v11 + mu2*v21*v21; d_difftensor12[idx] = mu1*v11*v12 + mu2*v21*v22; d_difftensor22[idx] = mu1*v12*v12 + mu2*v22*v22; } void updateDiffusivityCuda(float *u, const float *d_div, int w, int h, int nc, float dt) { // calculate block and grid size dim3 block(32, 8, nc); // TODO (9.5) specify suitable block size dim3 grid = computeGrid2D(block, w, h); // run cuda kernel // TODO (9.5) execute kernel for updating diffusivity updateDiffusivityKernel <<<grid, block>>> (u, d_div, w, h, nc, dt); // check for errors // TODO (9.5) CUDA_CHECK; } void multDiffusivityCuda(float *v1, float *v2, int w, int h, int nc, float epsilon, int mode) { // calculate block and grid size dim3 block(32, 8, 1); // TODO (9.3) specify suitable block size dim3 grid = computeGrid2D(block, w, h); // run cuda kernel // TODO (9.3) execute kernel for multiplying diffusivity multDiffusivityKernel <<<grid, block>>> (v1, v2, w, h, nc, epsilon, mode); // check for errors // TODO (9.3) CUDA_CHECK; } void multDiffusivityAnisotropicCuda(float *v1, float *v2, float *g11, float *g12, float *g22, int w, int h, int nc) { // calculate block and grid size dim3 block(32, 8, 1); // TODO (10.2) specify suitable block size dim3 grid = computeGrid2D(block, w, h); // run cuda kernel // TODO (10.2) execute kernel for multiplying diffusivity (anisotropic) multDiffusivityAnisotropicKernel <<<grid, block>>> (v1, v2, g11, g12, g22, w, h, nc); // check for errors // TODO (10.2) CUDA_CHECK; } void computeDiffusivityCuda(float *diffusivity, const float *v1, const float *v2, int w, int h, int nc, float epsilon) { // calculate block and grid size dim3 block(32, 8, 1); // TODO (11.2) specify suitable block size dim3 grid = computeGrid2D(block, w, h); // run cuda kernel // TODO (11.2) execute kernel for computing diffusivity computeDiffusivityKernel <<<grid, block>>> (diffusivity, v1, v2, w, h, nc, epsilon); // check for errors // TODO (11.2) CUDA_CHECK; } void computeDiffusionTensorCuda(float *d_difftensor11, float *d_difftensor12, float *d_difftensor22, float *d_tensor11, float *d_tensor12, float *d_tensor22, float alpha, float C, int w, int h, int nc) { // calculate block and grid size dim3 block(32, 8, 1); // TODO (10.1) specify suitable block size dim3 grid = computeGrid2D(block, w, h); // run cuda kernel // TODO (10.1) execute kernel for computing diffusion tensor computeDiffusionTensorKernel <<<grid, block>>> (d_difftensor11, d_difftensor12, d_difftensor22, d_tensor11, d_tensor12, d_tensor22, alpha, C, w, h, nc); // check for errors // TODO (10.1) CUDA_CHECK; }
e5d2158a340ccfe4703d75c96034543a4d6d8a55.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // ### // ### // ### Practical Course: GPU Programming in Computer Vision // ### // ### // ### Technical University Munich, Computer Vision Group // ### Summer Semester 2014, September 8 - October 10 // ### // ### // ### Maria Klodt, Jan Stuehmer, Mohamed Souiai, Thomas Moellenhoff // ### // ### // ### // ### // ### Dennis Mack, [email protected], p060 // ### Adrian Haarbach, [email protected], p077 // ### Markus Schlaffer, [email protected], p070 #include <iostream> #include <math.h> //#include <stdio.h> #include <helper.h> using namespace std; // uncomment to use the camera //#define CAMERA void gammaCPU(float *imgIn, float *imgOut, size_t n, float gamma){ for(size_t i=0;i<n;i++){ imgOut[i]=powf(imgIn[i],gamma); } } __global__ void gammaGPU(float *imgIn, float *imgOut, size_t n, float gamma){ size_t ind = threadIdx.x + blockDim.x * blockIdx.x; if (ind<n) imgOut[ind]=powf(imgIn[ind],gamma); } float GetAverage(float dArray[], int iSize) { float dSum = dArray[0]; for (int i = 1; i < iSize; ++i) { dSum += dArray[i]; } return dSum/iSize; } int main(int argc, char **argv) { // Before the GPU can process your kernels, a so called "CUDA context" must be initialized // This happens on the very first call to a CUDA function, and takes some time (around half a second) // We will do it right here, so that the run time measurements are accurate hipDeviceSynchronize(); CUDA_CHECK; // Reading command line parameters: // getParam("param", var, argc, argv) looks whether "-param xyz" is specified, and if so stores the value "xyz" in "var" // If "-param" is not specified, the value of "var" remains unchanged // // return value: getParam("param", ...) returns true if "-param" is specified, and false otherwise #ifdef CAMERA #else // input image string image = ""; bool ret = getParam("i", image, argc, argv); if (!ret) cerr << "ERROR: no image specified" << endl; if (argc <= 1) { cout << "Usage: " << argv[0] << " -i <image> [-repeats <repeats>] [-gray]" << endl; return 1; } #endif // number of computation repetitions to get a better run time measurement int repeats = 1; getParam("repeats", repeats, argc, argv); cout << "repeats: " << repeats << endl; // load the input image as grayscale if "-gray" is specifed bool gray = false; getParam("gray", gray, argc, argv); cout << "gray: " << gray << endl; // ### Define your own parameters here as needed float gamma=0.5f; getParam("gamma", gamma, argc, argv); cout << "gamma: " << gamma << endl; // Init camera / Load input image #ifdef CAMERA // Init camera cv::VideoCapture camera(0); if(!camera.isOpened()) { cerr << "ERROR: Could not open camera" << endl; return 1; } int camW = 640; int camH = 480; camera.set(CV_CAP_PROP_FRAME_WIDTH,camW); camera.set(CV_CAP_PROP_FRAME_HEIGHT,camH); // read in first frame to get the dimensions cv::Mat mIn; camera >> mIn; #else // Load the input image using opencv (load as grayscale if "gray==true", otherwise as is (may be color or grayscale)) cv::Mat mIn = cv::imread(image.c_str(), (gray? CV_LOAD_IMAGE_GRAYSCALE : -1)); // check if (mIn.data == NULL) { cerr << "ERROR: Could not load image " << image << endl; return 1; } #endif // convert to float representation (opencv loads image values as single bytes by default) mIn.convertTo(mIn,CV_32F); // convert range of each channel to [0,1] (opencv default is [0,255]) mIn /= 255.f; // get image dimensions int w = mIn.cols; // width int h = mIn.rows; // height int nc = mIn.channels(); // number of channels cout << "image: " << w << " x " << h << endl; // Set the output image format // ### // ### // ### TODO: Change the output image format as needed // ### // ### cv::Mat mOut(h,w,mIn.type()); // mOut will have the same number of channels as the input image, nc layers //cv::Mat mOut(h,w,CV_32FC3); // mOut will be a color image, 3 layers //cv::Mat mOut(h,w,CV_32FC1); // mOut will be a grayscale image, 1 layer // ### Define your own output images here as needed // Allocate arrays // input/output image width: w // input/output image height: h // input image number of channels: nc // output image number of channels: mOut.channels(), as defined above (nc, 3, or 1) // allocate raw input image array size_t n = (size_t)w*h*nc; float *imgIn = new float[(size_t)w*h*nc]; // allocate raw output array (the computation result will be stored in this array, then later converted to mOut for displaying) float *imgOut = new float[(size_t)w*h*mOut.channels()]; // For camera mode: Make a loop to read in camera frames #ifdef CAMERA // Read a camera image frame every 30 milliseconds: // cv::waitKey(30) waits 30 milliseconds for a keyboard input, // returns a value <0 if no key is pressed during this time, returns immediately with a value >=0 if a key is pressed while (cv::waitKey(30) < 0) { // Get camera image camera >> mIn; // convert to float representation (opencv loads image values as single bytes by default) mIn.convertTo(mIn,CV_32F); // convert range of each channel to [0,1] (opencv default is [0,255]) mIn /= 255.f; #endif // Init raw input image array // opencv images are interleaved: rgb rgb rgb... (actually bgr bgr bgr...) // But for CUDA it's better to work with layered images: rrr... ggg... bbb... // So we will convert as necessary, using interleaved "cv::Mat" for loading/saving/displaying, and layered "float*" for CUDA computations convert_mat_to_layered (imgIn, mIn); float *tc, *tg, *tg2; tc=(float*)malloc(repeats*sizeof(float)); tg=(float*)malloc(repeats*sizeof(float)); tg2=(float*)malloc(repeats*sizeof(float)); for(int i=0;i<repeats;i++){ //CPU: Timer timercpu, timergpu, timergpu2; timercpu.start(); gammaCPU(imgIn,imgOut,n,gamma); timercpu.end(); tc[i] = timercpu.get(); // elapsed time in seconds //GPU: timergpu.start(); float *d_imgIn, *d_imgOut; hipMalloc(&d_imgIn, n * sizeof(float) ); CUDA_CHECK; hipMemcpy(d_imgIn, imgIn, n * sizeof(float), hipMemcpyHostToDevice); CUDA_CHECK; hipMalloc(&d_imgOut, n * sizeof(float) ); CUDA_CHECK; timergpu2.start(); //min @ 128,1,1: //./main -i ~/cuda_ss14/images/bird.png -repeats 1000 //avg time cpu: 9.95011 ms //avg time gpu: 3.63 ms //avg time gpu allocfree: 0.38 ms dim3 block = dim3(128,1,1); dim3 grid = dim3((n + block.x - 1 ) / block.x, 1, 1); hipLaunchKernelGGL(( gammaGPU) , dim3(grid),dim3(block), 0, 0, d_imgIn, d_imgOut, n, gamma); CUDA_CHECK; hipDeviceSynchronize(); timergpu2.end(); tg2[i] = timergpu2.get(); CUDA_CHECK; hipMemcpy(imgOut, d_imgOut, n * sizeof(float), hipMemcpyDeviceToHost); CUDA_CHECK; hipFree(d_imgOut); CUDA_CHECK; hipFree(d_imgIn); CUDA_CHECK; timergpu.end(); tg[i] = timergpu.get(); // elapsed time in seconds } cout << "avg time cpu: " << GetAverage(tc, repeats)*1000 << " ms" << endl; cout << "avg time gpu: " << GetAverage(tg, repeats)*1000 << " ms" << endl; cout << "avg time gpu allocfree: " << GetAverage(tg2, repeats)*1000 << " ms" << endl; // show input image showImage("Input", mIn, 100, 100); // show at position (x_from_left=100,y_from_above=100) // show output image: first convert to interleaved opencv format from the layered raw array convert_layered_to_mat(mOut, imgOut); showImage("Output", mOut, 100+w+40, 100); // ### Display your own output images here as needed #ifdef CAMERA // end of camera loop } #else // wait for key inputs cv::waitKey(0); #endif // save input and result cv::imwrite("image_input.png",mIn*255.f); // "imwrite" assumes channel range [0,255] cv::imwrite("image_result.png",mOut*255.f); // free allocated arrays delete[] imgIn; delete[] imgOut; // close all opencv windows cvDestroyAllWindows(); return 0; }
e5d2158a340ccfe4703d75c96034543a4d6d8a55.cu
// ### // ### // ### Practical Course: GPU Programming in Computer Vision // ### // ### // ### Technical University Munich, Computer Vision Group // ### Summer Semester 2014, September 8 - October 10 // ### // ### // ### Maria Klodt, Jan Stuehmer, Mohamed Souiai, Thomas Moellenhoff // ### // ### // ### // ### // ### Dennis Mack, [email protected], p060 // ### Adrian Haarbach, [email protected], p077 // ### Markus Schlaffer, [email protected], p070 #include <iostream> #include <math.h> //#include <stdio.h> #include <helper.h> using namespace std; // uncomment to use the camera //#define CAMERA void gammaCPU(float *imgIn, float *imgOut, size_t n, float gamma){ for(size_t i=0;i<n;i++){ imgOut[i]=powf(imgIn[i],gamma); } } __global__ void gammaGPU(float *imgIn, float *imgOut, size_t n, float gamma){ size_t ind = threadIdx.x + blockDim.x * blockIdx.x; if (ind<n) imgOut[ind]=powf(imgIn[ind],gamma); } float GetAverage(float dArray[], int iSize) { float dSum = dArray[0]; for (int i = 1; i < iSize; ++i) { dSum += dArray[i]; } return dSum/iSize; } int main(int argc, char **argv) { // Before the GPU can process your kernels, a so called "CUDA context" must be initialized // This happens on the very first call to a CUDA function, and takes some time (around half a second) // We will do it right here, so that the run time measurements are accurate cudaDeviceSynchronize(); CUDA_CHECK; // Reading command line parameters: // getParam("param", var, argc, argv) looks whether "-param xyz" is specified, and if so stores the value "xyz" in "var" // If "-param" is not specified, the value of "var" remains unchanged // // return value: getParam("param", ...) returns true if "-param" is specified, and false otherwise #ifdef CAMERA #else // input image string image = ""; bool ret = getParam("i", image, argc, argv); if (!ret) cerr << "ERROR: no image specified" << endl; if (argc <= 1) { cout << "Usage: " << argv[0] << " -i <image> [-repeats <repeats>] [-gray]" << endl; return 1; } #endif // number of computation repetitions to get a better run time measurement int repeats = 1; getParam("repeats", repeats, argc, argv); cout << "repeats: " << repeats << endl; // load the input image as grayscale if "-gray" is specifed bool gray = false; getParam("gray", gray, argc, argv); cout << "gray: " << gray << endl; // ### Define your own parameters here as needed float gamma=0.5f; getParam("gamma", gamma, argc, argv); cout << "gamma: " << gamma << endl; // Init camera / Load input image #ifdef CAMERA // Init camera cv::VideoCapture camera(0); if(!camera.isOpened()) { cerr << "ERROR: Could not open camera" << endl; return 1; } int camW = 640; int camH = 480; camera.set(CV_CAP_PROP_FRAME_WIDTH,camW); camera.set(CV_CAP_PROP_FRAME_HEIGHT,camH); // read in first frame to get the dimensions cv::Mat mIn; camera >> mIn; #else // Load the input image using opencv (load as grayscale if "gray==true", otherwise as is (may be color or grayscale)) cv::Mat mIn = cv::imread(image.c_str(), (gray? CV_LOAD_IMAGE_GRAYSCALE : -1)); // check if (mIn.data == NULL) { cerr << "ERROR: Could not load image " << image << endl; return 1; } #endif // convert to float representation (opencv loads image values as single bytes by default) mIn.convertTo(mIn,CV_32F); // convert range of each channel to [0,1] (opencv default is [0,255]) mIn /= 255.f; // get image dimensions int w = mIn.cols; // width int h = mIn.rows; // height int nc = mIn.channels(); // number of channels cout << "image: " << w << " x " << h << endl; // Set the output image format // ### // ### // ### TODO: Change the output image format as needed // ### // ### cv::Mat mOut(h,w,mIn.type()); // mOut will have the same number of channels as the input image, nc layers //cv::Mat mOut(h,w,CV_32FC3); // mOut will be a color image, 3 layers //cv::Mat mOut(h,w,CV_32FC1); // mOut will be a grayscale image, 1 layer // ### Define your own output images here as needed // Allocate arrays // input/output image width: w // input/output image height: h // input image number of channels: nc // output image number of channels: mOut.channels(), as defined above (nc, 3, or 1) // allocate raw input image array size_t n = (size_t)w*h*nc; float *imgIn = new float[(size_t)w*h*nc]; // allocate raw output array (the computation result will be stored in this array, then later converted to mOut for displaying) float *imgOut = new float[(size_t)w*h*mOut.channels()]; // For camera mode: Make a loop to read in camera frames #ifdef CAMERA // Read a camera image frame every 30 milliseconds: // cv::waitKey(30) waits 30 milliseconds for a keyboard input, // returns a value <0 if no key is pressed during this time, returns immediately with a value >=0 if a key is pressed while (cv::waitKey(30) < 0) { // Get camera image camera >> mIn; // convert to float representation (opencv loads image values as single bytes by default) mIn.convertTo(mIn,CV_32F); // convert range of each channel to [0,1] (opencv default is [0,255]) mIn /= 255.f; #endif // Init raw input image array // opencv images are interleaved: rgb rgb rgb... (actually bgr bgr bgr...) // But for CUDA it's better to work with layered images: rrr... ggg... bbb... // So we will convert as necessary, using interleaved "cv::Mat" for loading/saving/displaying, and layered "float*" for CUDA computations convert_mat_to_layered (imgIn, mIn); float *tc, *tg, *tg2; tc=(float*)malloc(repeats*sizeof(float)); tg=(float*)malloc(repeats*sizeof(float)); tg2=(float*)malloc(repeats*sizeof(float)); for(int i=0;i<repeats;i++){ //CPU: Timer timercpu, timergpu, timergpu2; timercpu.start(); gammaCPU(imgIn,imgOut,n,gamma); timercpu.end(); tc[i] = timercpu.get(); // elapsed time in seconds //GPU: timergpu.start(); float *d_imgIn, *d_imgOut; cudaMalloc(&d_imgIn, n * sizeof(float) ); CUDA_CHECK; cudaMemcpy(d_imgIn, imgIn, n * sizeof(float), cudaMemcpyHostToDevice); CUDA_CHECK; cudaMalloc(&d_imgOut, n * sizeof(float) ); CUDA_CHECK; timergpu2.start(); //min @ 128,1,1: //./main -i ~/cuda_ss14/images/bird.png -repeats 1000 //avg time cpu: 9.95011 ms //avg time gpu: 3.63 ms //avg time gpu allocfree: 0.38 ms dim3 block = dim3(128,1,1); dim3 grid = dim3((n + block.x - 1 ) / block.x, 1, 1); gammaGPU <<<grid,block>>> (d_imgIn, d_imgOut, n, gamma); CUDA_CHECK; cudaDeviceSynchronize(); timergpu2.end(); tg2[i] = timergpu2.get(); CUDA_CHECK; cudaMemcpy(imgOut, d_imgOut, n * sizeof(float), cudaMemcpyDeviceToHost); CUDA_CHECK; cudaFree(d_imgOut); CUDA_CHECK; cudaFree(d_imgIn); CUDA_CHECK; timergpu.end(); tg[i] = timergpu.get(); // elapsed time in seconds } cout << "avg time cpu: " << GetAverage(tc, repeats)*1000 << " ms" << endl; cout << "avg time gpu: " << GetAverage(tg, repeats)*1000 << " ms" << endl; cout << "avg time gpu allocfree: " << GetAverage(tg2, repeats)*1000 << " ms" << endl; // show input image showImage("Input", mIn, 100, 100); // show at position (x_from_left=100,y_from_above=100) // show output image: first convert to interleaved opencv format from the layered raw array convert_layered_to_mat(mOut, imgOut); showImage("Output", mOut, 100+w+40, 100); // ### Display your own output images here as needed #ifdef CAMERA // end of camera loop } #else // wait for key inputs cv::waitKey(0); #endif // save input and result cv::imwrite("image_input.png",mIn*255.f); // "imwrite" assumes channel range [0,255] cv::imwrite("image_result.png",mOut*255.f); // free allocated arrays delete[] imgIn; delete[] imgOut; // close all opencv windows cvDestroyAllWindows(); return 0; }
dda6bfdc2b9c861e0752fc3e67479729e6e3d825.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*M/////////////////////////////////////////////////////////////////////////////////////// // // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. // // By downloading, copying, installing or using the software you agree to this license. // If you do not agree to this license, do not download, install, // copy or use the software. // // // License Agreement // For Open Source Computer Vision Library // // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2009, Willow Garage Inc., all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistribution's of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // * Redistribution's in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // * The name of the copyright holders may not be used to endorse or promote products // derived from this software without specific prior written permission. // // This software is provided by the copyright holders and contributors "as is" and // any express or implied warranties, including, but not limited to, the implied // warranties of merchantability and fitness for a particular purpose are disclaimed. // In no event shall the Intel Corporation or contributors be liable for any direct, // indirect, incidental, special, exemplary, or consequential damages // (including, but not limited to, procurement of substitute goods or services; // loss of use, data, or profits; or business interruption) however caused // and on any theory of liability, whether in contract, strict liability, // or tort (including negligence or otherwise) arising in any way out of // the use of this software, even if advised of the possibility of such damage. // //M*/ #if !defined CUDA_DISABLER #include "opencv2/core/cuda/common.hpp" #include "opencv2/core/cuda/border_interpolate.hpp" #define tx threadIdx.x #define ty threadIdx.y #define bx blockIdx.x #define by blockIdx.y #define bdx blockDim.x #define bdy blockDim.y #define BORDER_SIZE 5 #define MAX_KSIZE_HALF 100 namespace cv { namespace cuda { namespace device { namespace optflow_farneback { __constant__ float c_g[8]; __constant__ float c_xg[8]; __constant__ float c_xxg[8]; __constant__ float c_ig11, c_ig03, c_ig33, c_ig55; template <int polyN> __global__ void polynomialExpansion( const int height, const int width, const PtrStepf src, PtrStepf dst) { const int y = by * bdy + ty; const int x = bx * (bdx - 2*polyN) + tx - polyN; if (y < height) { extern __shared__ float smem[]; volatile float *row = smem + tx; int xWarped = ::min(::max(x, 0), width - 1); row[0] = src(y, xWarped) * c_g[0]; row[bdx] = 0.f; row[2*bdx] = 0.f; for (int k = 1; k <= polyN; ++k) { float t0 = src(::max(y - k, 0), xWarped); float t1 = src(::min(y + k, height - 1), xWarped); row[0] += c_g[k] * (t0 + t1); row[bdx] += c_xg[k] * (t1 - t0); row[2*bdx] += c_xxg[k] * (t0 + t1); } __syncthreads(); if (tx >= polyN && tx + polyN < bdx && x < width) { float b1 = c_g[0] * row[0]; float b3 = c_g[0] * row[bdx]; float b5 = c_g[0] * row[2*bdx]; float b2 = 0, b4 = 0, b6 = 0; for (int k = 1; k <= polyN; ++k) { b1 += (row[k] + row[-k]) * c_g[k]; b4 += (row[k] + row[-k]) * c_xxg[k]; b2 += (row[k] - row[-k]) * c_xg[k]; b3 += (row[k + bdx] + row[-k + bdx]) * c_g[k]; b6 += (row[k + bdx] - row[-k + bdx]) * c_xg[k]; b5 += (row[k + 2*bdx] + row[-k + 2*bdx]) * c_g[k]; } dst(y, xWarped) = b3*c_ig11; dst(height + y, xWarped) = b2*c_ig11; dst(2*height + y, xWarped) = b1*c_ig03 + b5*c_ig33; dst(3*height + y, xWarped) = b1*c_ig03 + b4*c_ig33; dst(4*height + y, xWarped) = b6*c_ig55; } } } void setPolynomialExpansionConsts( int polyN, const float *g, const float *xg, const float *xxg, float ig11, float ig03, float ig33, float ig55) { cudaSafeCall(hipMemcpyToSymbol(c_g, g, (polyN + 1) * sizeof(*g))); cudaSafeCall(hipMemcpyToSymbol(c_xg, xg, (polyN + 1) * sizeof(*xg))); cudaSafeCall(hipMemcpyToSymbol(c_xxg, xxg, (polyN + 1) * sizeof(*xxg))); cudaSafeCall(hipMemcpyToSymbol(c_ig11, &ig11, sizeof(ig11))); cudaSafeCall(hipMemcpyToSymbol(c_ig03, &ig03, sizeof(ig03))); cudaSafeCall(hipMemcpyToSymbol(c_ig33, &ig33, sizeof(ig33))); cudaSafeCall(hipMemcpyToSymbol(c_ig55, &ig55, sizeof(ig55))); } void polynomialExpansionGpu(const PtrStepSzf &src, int polyN, PtrStepSzf dst, hipStream_t stream) { dim3 block(256); dim3 grid(divUp(src.cols, block.x - 2*polyN), src.rows); int smem = 3 * block.x * sizeof(float); if (polyN == 5) hipLaunchKernelGGL(( polynomialExpansion<5>), dim3(grid), dim3(block), smem, stream, src.rows, src.cols, src, dst); else if (polyN == 7) hipLaunchKernelGGL(( polynomialExpansion<7>), dim3(grid), dim3(block), smem, stream, src.rows, src.cols, src, dst); cudaSafeCall(hipGetLastError()); if (stream == 0) cudaSafeCall(hipDeviceSynchronize()); } __constant__ float c_border[BORDER_SIZE + 1]; __global__ void updateMatrices( const int height, const int width, const PtrStepf flowx, const PtrStepf flowy, const PtrStepf R0, const PtrStepf R1, PtrStepf M) { const int y = by * bdy + ty; const int x = bx * bdx + tx; if (y < height && x < width) { float dx = flowx(y, x); float dy = flowy(y, x); float fx = x + dx; float fy = y + dy; int x1 = floorf(fx); int y1 = floorf(fy); fx -= x1; fy -= y1; float r2, r3, r4, r5, r6; if (x1 >= 0 && y1 >= 0 && x1 < width - 1 && y1 < height - 1) { float a00 = (1.f - fx) * (1.f - fy); float a01 = fx * (1.f - fy); float a10 = (1.f - fx) * fy; float a11 = fx * fy; r2 = a00 * R1(y1, x1) + a01 * R1(y1, x1 + 1) + a10 * R1(y1 + 1, x1) + a11 * R1(y1 + 1, x1 + 1); r3 = a00 * R1(height + y1, x1) + a01 * R1(height + y1, x1 + 1) + a10 * R1(height + y1 + 1, x1) + a11 * R1(height + y1 + 1, x1 + 1); r4 = a00 * R1(2*height + y1, x1) + a01 * R1(2*height + y1, x1 + 1) + a10 * R1(2*height + y1 + 1, x1) + a11 * R1(2*height + y1 + 1, x1 + 1); r5 = a00 * R1(3*height + y1, x1) + a01 * R1(3*height + y1, x1 + 1) + a10 * R1(3*height + y1 + 1, x1) + a11 * R1(3*height + y1 + 1, x1 + 1); r6 = a00 * R1(4*height + y1, x1) + a01 * R1(4*height + y1, x1 + 1) + a10 * R1(4*height + y1 + 1, x1) + a11 * R1(4*height + y1 + 1, x1 + 1); r4 = (R0(2*height + y, x) + r4) * 0.5f; r5 = (R0(3*height + y, x) + r5) * 0.5f; r6 = (R0(4*height + y, x) + r6) * 0.25f; } else { r2 = r3 = 0.f; r4 = R0(2*height + y, x); r5 = R0(3*height + y, x); r6 = R0(4*height + y, x) * 0.5f; } r2 = (R0(y, x) - r2) * 0.5f; r3 = (R0(height + y, x) - r3) * 0.5f; r2 += r4*dy + r6*dx; r3 += r6*dy + r5*dx; float scale = c_border[::min(x, BORDER_SIZE)] * c_border[::min(y, BORDER_SIZE)] * c_border[::min(width - x - 1, BORDER_SIZE)] * c_border[::min(height - y - 1, BORDER_SIZE)]; r2 *= scale; r3 *= scale; r4 *= scale; r5 *= scale; r6 *= scale; M(y, x) = r4*r4 + r6*r6; M(height + y, x) = (r4 + r5)*r6; M(2*height + y, x) = r5*r5 + r6*r6; M(3*height + y, x) = r4*r2 + r6*r3; M(4*height + y, x) = r6*r2 + r5*r3; } } void setUpdateMatricesConsts() { static const float border[BORDER_SIZE + 1] = {0.14f, 0.14f, 0.4472f, 0.4472f, 0.4472f, 1.f}; cudaSafeCall(hipMemcpyToSymbol(c_border, border, (BORDER_SIZE + 1) * sizeof(*border))); } void updateMatricesGpu( const PtrStepSzf flowx, const PtrStepSzf flowy, const PtrStepSzf R0, const PtrStepSzf R1, PtrStepSzf M, hipStream_t stream) { dim3 block(32, 8); dim3 grid(divUp(flowx.cols, block.x), divUp(flowx.rows, block.y)); hipLaunchKernelGGL(( updateMatrices), dim3(grid), dim3(block), 0, stream, flowx.rows, flowx.cols, flowx, flowy, R0, R1, M); cudaSafeCall(hipGetLastError()); if (stream == 0) cudaSafeCall(hipDeviceSynchronize()); } __global__ void updateFlow( const int height, const int width, const PtrStepf M, PtrStepf flowx, PtrStepf flowy) { const int y = by * bdy + ty; const int x = bx * bdx + tx; if (y < height && x < width) { float g11 = M(y, x); float g12 = M(height + y, x); float g22 = M(2*height + y, x); float h1 = M(3*height + y, x); float h2 = M(4*height + y, x); float detInv = 1.f / (g11*g22 - g12*g12 + 1e-3f); flowx(y, x) = (g11*h2 - g12*h1) * detInv; flowy(y, x) = (g22*h1 - g12*h2) * detInv; } } void updateFlowGpu(const PtrStepSzf M, PtrStepSzf flowx, PtrStepSzf flowy, hipStream_t stream) { dim3 block(32, 8); dim3 grid(divUp(flowx.cols, block.x), divUp(flowx.rows, block.y)); hipLaunchKernelGGL(( updateFlow), dim3(grid), dim3(block), 0, stream, flowx.rows, flowx.cols, M, flowx, flowy); cudaSafeCall(hipGetLastError()); if (stream == 0) cudaSafeCall(hipDeviceSynchronize()); } /*__global__ void boxFilter( const int height, const int width, const PtrStepf src, const int ksizeHalf, const float boxAreaInv, PtrStepf dst) { const int y = by * bdy + ty; const int x = bx * bdx + tx; extern __shared__ float smem[]; volatile float *row = smem + ty * (bdx + 2*ksizeHalf); if (y < height) { // Vertical pass for (int i = tx; i < bdx + 2*ksizeHalf; i += bdx) { int xExt = int(bx * bdx) + i - ksizeHalf; xExt = ::min(::max(xExt, 0), width - 1); row[i] = src(y, xExt); for (int j = 1; j <= ksizeHalf; ++j) row[i] += src(::max(y - j, 0), xExt) + src(::min(y + j, height - 1), xExt); } if (x < width) { __syncthreads(); // Horizontal passs row += tx + ksizeHalf; float res = row[0]; for (int i = 1; i <= ksizeHalf; ++i) res += row[-i] + row[i]; dst(y, x) = res * boxAreaInv; } } } void boxFilterGpu(const PtrStepSzf src, int ksizeHalf, PtrStepSzf dst, hipStream_t stream) { dim3 block(256); dim3 grid(divUp(src.cols, block.x), divUp(src.rows, block.y)); int smem = (block.x + 2*ksizeHalf) * block.y * sizeof(float); float boxAreaInv = 1.f / ((1 + 2*ksizeHalf) * (1 + 2*ksizeHalf)); boxFilter<<<grid, block, smem, stream>>>(src.rows, src.cols, src, ksizeHalf, boxAreaInv, dst); cudaSafeCall(hipGetLastError()); if (stream == 0) cudaSafeCall(hipDeviceSynchronize()); }*/ __global__ void boxFilter5( const int height, const int width, const PtrStepf src, const int ksizeHalf, const float boxAreaInv, PtrStepf dst) { const int y = by * bdy + ty; const int x = bx * bdx + tx; extern __shared__ float smem[]; const int smw = bdx + 2*ksizeHalf; // shared memory "width" volatile float *row = smem + 5 * ty * smw; if (y < height) { // Vertical pass for (int i = tx; i < bdx + 2*ksizeHalf; i += bdx) { int xExt = int(bx * bdx) + i - ksizeHalf; xExt = ::min(::max(xExt, 0), width - 1); #pragma unroll for (int k = 0; k < 5; ++k) row[k*smw + i] = src(k*height + y, xExt); for (int j = 1; j <= ksizeHalf; ++j) #pragma unroll for (int k = 0; k < 5; ++k) row[k*smw + i] += src(k*height + ::max(y - j, 0), xExt) + src(k*height + ::min(y + j, height - 1), xExt); } if (x < width) { __syncthreads(); // Horizontal passs row += tx + ksizeHalf; float res[5]; #pragma unroll for (int k = 0; k < 5; ++k) res[k] = row[k*smw]; for (int i = 1; i <= ksizeHalf; ++i) #pragma unroll for (int k = 0; k < 5; ++k) res[k] += row[k*smw - i] + row[k*smw + i]; #pragma unroll for (int k = 0; k < 5; ++k) dst(k*height + y, x) = res[k] * boxAreaInv; } } } void boxFilter5Gpu(const PtrStepSzf src, int ksizeHalf, PtrStepSzf dst, hipStream_t stream) { int height = src.rows / 5; int width = src.cols; dim3 block(256); dim3 grid(divUp(width, block.x), divUp(height, block.y)); int smem = (block.x + 2*ksizeHalf) * 5 * block.y * sizeof(float); float boxAreaInv = 1.f / ((1 + 2*ksizeHalf) * (1 + 2*ksizeHalf)); hipLaunchKernelGGL(( boxFilter5), dim3(grid), dim3(block), smem, stream, height, width, src, ksizeHalf, boxAreaInv, dst); cudaSafeCall(hipGetLastError()); if (stream == 0) cudaSafeCall(hipDeviceSynchronize()); } void boxFilter5Gpu_CC11(const PtrStepSzf src, int ksizeHalf, PtrStepSzf dst, hipStream_t stream) { int height = src.rows / 5; int width = src.cols; dim3 block(128); dim3 grid(divUp(width, block.x), divUp(height, block.y)); int smem = (block.x + 2*ksizeHalf) * 5 * block.y * sizeof(float); float boxAreaInv = 1.f / ((1 + 2*ksizeHalf) * (1 + 2*ksizeHalf)); hipLaunchKernelGGL(( boxFilter5), dim3(grid), dim3(block), smem, stream, height, width, src, ksizeHalf, boxAreaInv, dst); cudaSafeCall(hipGetLastError()); if (stream == 0) cudaSafeCall(hipDeviceSynchronize()); } __constant__ float c_gKer[MAX_KSIZE_HALF + 1]; template <typename Border> __global__ void gaussianBlur( const int height, const int width, const PtrStepf src, const int ksizeHalf, const Border b, PtrStepf dst) { const int y = by * bdy + ty; const int x = bx * bdx + tx; extern __shared__ float smem[]; volatile float *row = smem + ty * (bdx + 2*ksizeHalf); if (y < height) { // Vertical pass for (int i = tx; i < bdx + 2*ksizeHalf; i += bdx) { int xExt = int(bx * bdx) + i - ksizeHalf; xExt = b.idx_col(xExt); row[i] = src(y, xExt) * c_gKer[0]; for (int j = 1; j <= ksizeHalf; ++j) row[i] += (src(b.idx_row_low(y - j), xExt) + src(b.idx_row_high(y + j), xExt)) * c_gKer[j]; } if (x < width) { __syncthreads(); // Horizontal pass row += tx + ksizeHalf; float res = row[0] * c_gKer[0]; for (int i = 1; i <= ksizeHalf; ++i) res += (row[-i] + row[i]) * c_gKer[i]; dst(y, x) = res; } } } void setGaussianBlurKernel(const float *gKer, int ksizeHalf) { cudaSafeCall(hipMemcpyToSymbol(c_gKer, gKer, (ksizeHalf + 1) * sizeof(*gKer))); } template <typename Border> void gaussianBlurCaller(const PtrStepSzf src, int ksizeHalf, PtrStepSzf dst, hipStream_t stream) { int height = src.rows; int width = src.cols; dim3 block(256); dim3 grid(divUp(width, block.x), divUp(height, block.y)); int smem = (block.x + 2*ksizeHalf) * block.y * sizeof(float); Border b(height, width); hipLaunchKernelGGL(( gaussianBlur), dim3(grid), dim3(block), smem, stream, height, width, src, ksizeHalf, b, dst); cudaSafeCall(hipGetLastError()); if (stream == 0) cudaSafeCall(hipDeviceSynchronize()); } void gaussianBlurGpu( const PtrStepSzf src, int ksizeHalf, PtrStepSzf dst, int borderMode, hipStream_t stream) { typedef void (*caller_t)(const PtrStepSzf, int, PtrStepSzf, hipStream_t); static const caller_t callers[] = { 0 /*gaussianBlurCaller<BrdConstant<float> >*/, gaussianBlurCaller<BrdReplicate<float> >, 0 /*gaussianBlurCaller<BrdReflect<float> >*/, 0 /*gaussianBlurCaller<BrdWrap<float> >*/, gaussianBlurCaller<BrdReflect101<float> > }; callers[borderMode](src, ksizeHalf, dst, stream); } template <typename Border> __global__ void gaussianBlur5( const int height, const int width, const PtrStepf src, const int ksizeHalf, const Border b, PtrStepf dst) { const int y = by * bdy + ty; const int x = bx * bdx + tx; extern __shared__ float smem[]; const int smw = bdx + 2*ksizeHalf; // shared memory "width" volatile float *row = smem + 5 * ty * smw; if (y < height) { // Vertical pass for (int i = tx; i < bdx + 2*ksizeHalf; i += bdx) { int xExt = int(bx * bdx) + i - ksizeHalf; xExt = b.idx_col(xExt); #pragma unroll for (int k = 0; k < 5; ++k) row[k*smw + i] = src(k*height + y, xExt) * c_gKer[0]; for (int j = 1; j <= ksizeHalf; ++j) #pragma unroll for (int k = 0; k < 5; ++k) row[k*smw + i] += (src(k*height + b.idx_row_low(y - j), xExt) + src(k*height + b.idx_row_high(y + j), xExt)) * c_gKer[j]; } if (x < width) { __syncthreads(); // Horizontal pass row += tx + ksizeHalf; float res[5]; #pragma unroll for (int k = 0; k < 5; ++k) res[k] = row[k*smw] * c_gKer[0]; for (int i = 1; i <= ksizeHalf; ++i) #pragma unroll for (int k = 0; k < 5; ++k) res[k] += (row[k*smw - i] + row[k*smw + i]) * c_gKer[i]; #pragma unroll for (int k = 0; k < 5; ++k) dst(k*height + y, x) = res[k]; } } } template <typename Border, int blockDimX> void gaussianBlur5Caller( const PtrStepSzf src, int ksizeHalf, PtrStepSzf dst, hipStream_t stream) { int height = src.rows / 5; int width = src.cols; dim3 block(blockDimX); dim3 grid(divUp(width, block.x), divUp(height, block.y)); int smem = (block.x + 2*ksizeHalf) * 5 * block.y * sizeof(float); Border b(height, width); hipLaunchKernelGGL(( gaussianBlur5), dim3(grid), dim3(block), smem, stream, height, width, src, ksizeHalf, b, dst); cudaSafeCall(hipGetLastError()); if (stream == 0) cudaSafeCall(hipDeviceSynchronize()); } void gaussianBlur5Gpu( const PtrStepSzf src, int ksizeHalf, PtrStepSzf dst, int borderMode, hipStream_t stream) { typedef void (*caller_t)(const PtrStepSzf, int, PtrStepSzf, hipStream_t); static const caller_t callers[] = { 0 /*gaussianBlur5Caller<BrdConstant<float>,256>*/, gaussianBlur5Caller<BrdReplicate<float>,256>, 0 /*gaussianBlur5Caller<BrdReflect<float>,256>*/, 0 /*gaussianBlur5Caller<BrdWrap<float>,256>*/, gaussianBlur5Caller<BrdReflect101<float>,256> }; callers[borderMode](src, ksizeHalf, dst, stream); } void gaussianBlur5Gpu_CC11( const PtrStepSzf src, int ksizeHalf, PtrStepSzf dst, int borderMode, hipStream_t stream) { typedef void (*caller_t)(const PtrStepSzf, int, PtrStepSzf, hipStream_t); static const caller_t callers[] = { 0 /*gaussianBlur5Caller<BrdConstant<float>,128>*/, gaussianBlur5Caller<BrdReplicate<float>,128>, 0 /*gaussianBlur5Caller<BrdReflect<float>,128>*/, 0 /*gaussianBlur5Caller<BrdWrap<float>,128>*/, gaussianBlur5Caller<BrdReflect101<float>,128> }; callers[borderMode](src, ksizeHalf, dst, stream); } }}}} // namespace cv { namespace cuda { namespace cudev { namespace optflow_farneback #endif /* CUDA_DISABLER */
dda6bfdc2b9c861e0752fc3e67479729e6e3d825.cu
/*M/////////////////////////////////////////////////////////////////////////////////////// // // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. // // By downloading, copying, installing or using the software you agree to this license. // If you do not agree to this license, do not download, install, // copy or use the software. // // // License Agreement // For Open Source Computer Vision Library // // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2009, Willow Garage Inc., all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistribution's of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // * Redistribution's in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // * The name of the copyright holders may not be used to endorse or promote products // derived from this software without specific prior written permission. // // This software is provided by the copyright holders and contributors "as is" and // any express or implied warranties, including, but not limited to, the implied // warranties of merchantability and fitness for a particular purpose are disclaimed. // In no event shall the Intel Corporation or contributors be liable for any direct, // indirect, incidental, special, exemplary, or consequential damages // (including, but not limited to, procurement of substitute goods or services; // loss of use, data, or profits; or business interruption) however caused // and on any theory of liability, whether in contract, strict liability, // or tort (including negligence or otherwise) arising in any way out of // the use of this software, even if advised of the possibility of such damage. // //M*/ #if !defined CUDA_DISABLER #include "opencv2/core/cuda/common.hpp" #include "opencv2/core/cuda/border_interpolate.hpp" #define tx threadIdx.x #define ty threadIdx.y #define bx blockIdx.x #define by blockIdx.y #define bdx blockDim.x #define bdy blockDim.y #define BORDER_SIZE 5 #define MAX_KSIZE_HALF 100 namespace cv { namespace cuda { namespace device { namespace optflow_farneback { __constant__ float c_g[8]; __constant__ float c_xg[8]; __constant__ float c_xxg[8]; __constant__ float c_ig11, c_ig03, c_ig33, c_ig55; template <int polyN> __global__ void polynomialExpansion( const int height, const int width, const PtrStepf src, PtrStepf dst) { const int y = by * bdy + ty; const int x = bx * (bdx - 2*polyN) + tx - polyN; if (y < height) { extern __shared__ float smem[]; volatile float *row = smem + tx; int xWarped = ::min(::max(x, 0), width - 1); row[0] = src(y, xWarped) * c_g[0]; row[bdx] = 0.f; row[2*bdx] = 0.f; for (int k = 1; k <= polyN; ++k) { float t0 = src(::max(y - k, 0), xWarped); float t1 = src(::min(y + k, height - 1), xWarped); row[0] += c_g[k] * (t0 + t1); row[bdx] += c_xg[k] * (t1 - t0); row[2*bdx] += c_xxg[k] * (t0 + t1); } __syncthreads(); if (tx >= polyN && tx + polyN < bdx && x < width) { float b1 = c_g[0] * row[0]; float b3 = c_g[0] * row[bdx]; float b5 = c_g[0] * row[2*bdx]; float b2 = 0, b4 = 0, b6 = 0; for (int k = 1; k <= polyN; ++k) { b1 += (row[k] + row[-k]) * c_g[k]; b4 += (row[k] + row[-k]) * c_xxg[k]; b2 += (row[k] - row[-k]) * c_xg[k]; b3 += (row[k + bdx] + row[-k + bdx]) * c_g[k]; b6 += (row[k + bdx] - row[-k + bdx]) * c_xg[k]; b5 += (row[k + 2*bdx] + row[-k + 2*bdx]) * c_g[k]; } dst(y, xWarped) = b3*c_ig11; dst(height + y, xWarped) = b2*c_ig11; dst(2*height + y, xWarped) = b1*c_ig03 + b5*c_ig33; dst(3*height + y, xWarped) = b1*c_ig03 + b4*c_ig33; dst(4*height + y, xWarped) = b6*c_ig55; } } } void setPolynomialExpansionConsts( int polyN, const float *g, const float *xg, const float *xxg, float ig11, float ig03, float ig33, float ig55) { cudaSafeCall(cudaMemcpyToSymbol(c_g, g, (polyN + 1) * sizeof(*g))); cudaSafeCall(cudaMemcpyToSymbol(c_xg, xg, (polyN + 1) * sizeof(*xg))); cudaSafeCall(cudaMemcpyToSymbol(c_xxg, xxg, (polyN + 1) * sizeof(*xxg))); cudaSafeCall(cudaMemcpyToSymbol(c_ig11, &ig11, sizeof(ig11))); cudaSafeCall(cudaMemcpyToSymbol(c_ig03, &ig03, sizeof(ig03))); cudaSafeCall(cudaMemcpyToSymbol(c_ig33, &ig33, sizeof(ig33))); cudaSafeCall(cudaMemcpyToSymbol(c_ig55, &ig55, sizeof(ig55))); } void polynomialExpansionGpu(const PtrStepSzf &src, int polyN, PtrStepSzf dst, cudaStream_t stream) { dim3 block(256); dim3 grid(divUp(src.cols, block.x - 2*polyN), src.rows); int smem = 3 * block.x * sizeof(float); if (polyN == 5) polynomialExpansion<5><<<grid, block, smem, stream>>>(src.rows, src.cols, src, dst); else if (polyN == 7) polynomialExpansion<7><<<grid, block, smem, stream>>>(src.rows, src.cols, src, dst); cudaSafeCall(cudaGetLastError()); if (stream == 0) cudaSafeCall(cudaDeviceSynchronize()); } __constant__ float c_border[BORDER_SIZE + 1]; __global__ void updateMatrices( const int height, const int width, const PtrStepf flowx, const PtrStepf flowy, const PtrStepf R0, const PtrStepf R1, PtrStepf M) { const int y = by * bdy + ty; const int x = bx * bdx + tx; if (y < height && x < width) { float dx = flowx(y, x); float dy = flowy(y, x); float fx = x + dx; float fy = y + dy; int x1 = floorf(fx); int y1 = floorf(fy); fx -= x1; fy -= y1; float r2, r3, r4, r5, r6; if (x1 >= 0 && y1 >= 0 && x1 < width - 1 && y1 < height - 1) { float a00 = (1.f - fx) * (1.f - fy); float a01 = fx * (1.f - fy); float a10 = (1.f - fx) * fy; float a11 = fx * fy; r2 = a00 * R1(y1, x1) + a01 * R1(y1, x1 + 1) + a10 * R1(y1 + 1, x1) + a11 * R1(y1 + 1, x1 + 1); r3 = a00 * R1(height + y1, x1) + a01 * R1(height + y1, x1 + 1) + a10 * R1(height + y1 + 1, x1) + a11 * R1(height + y1 + 1, x1 + 1); r4 = a00 * R1(2*height + y1, x1) + a01 * R1(2*height + y1, x1 + 1) + a10 * R1(2*height + y1 + 1, x1) + a11 * R1(2*height + y1 + 1, x1 + 1); r5 = a00 * R1(3*height + y1, x1) + a01 * R1(3*height + y1, x1 + 1) + a10 * R1(3*height + y1 + 1, x1) + a11 * R1(3*height + y1 + 1, x1 + 1); r6 = a00 * R1(4*height + y1, x1) + a01 * R1(4*height + y1, x1 + 1) + a10 * R1(4*height + y1 + 1, x1) + a11 * R1(4*height + y1 + 1, x1 + 1); r4 = (R0(2*height + y, x) + r4) * 0.5f; r5 = (R0(3*height + y, x) + r5) * 0.5f; r6 = (R0(4*height + y, x) + r6) * 0.25f; } else { r2 = r3 = 0.f; r4 = R0(2*height + y, x); r5 = R0(3*height + y, x); r6 = R0(4*height + y, x) * 0.5f; } r2 = (R0(y, x) - r2) * 0.5f; r3 = (R0(height + y, x) - r3) * 0.5f; r2 += r4*dy + r6*dx; r3 += r6*dy + r5*dx; float scale = c_border[::min(x, BORDER_SIZE)] * c_border[::min(y, BORDER_SIZE)] * c_border[::min(width - x - 1, BORDER_SIZE)] * c_border[::min(height - y - 1, BORDER_SIZE)]; r2 *= scale; r3 *= scale; r4 *= scale; r5 *= scale; r6 *= scale; M(y, x) = r4*r4 + r6*r6; M(height + y, x) = (r4 + r5)*r6; M(2*height + y, x) = r5*r5 + r6*r6; M(3*height + y, x) = r4*r2 + r6*r3; M(4*height + y, x) = r6*r2 + r5*r3; } } void setUpdateMatricesConsts() { static const float border[BORDER_SIZE + 1] = {0.14f, 0.14f, 0.4472f, 0.4472f, 0.4472f, 1.f}; cudaSafeCall(cudaMemcpyToSymbol(c_border, border, (BORDER_SIZE + 1) * sizeof(*border))); } void updateMatricesGpu( const PtrStepSzf flowx, const PtrStepSzf flowy, const PtrStepSzf R0, const PtrStepSzf R1, PtrStepSzf M, cudaStream_t stream) { dim3 block(32, 8); dim3 grid(divUp(flowx.cols, block.x), divUp(flowx.rows, block.y)); updateMatrices<<<grid, block, 0, stream>>>(flowx.rows, flowx.cols, flowx, flowy, R0, R1, M); cudaSafeCall(cudaGetLastError()); if (stream == 0) cudaSafeCall(cudaDeviceSynchronize()); } __global__ void updateFlow( const int height, const int width, const PtrStepf M, PtrStepf flowx, PtrStepf flowy) { const int y = by * bdy + ty; const int x = bx * bdx + tx; if (y < height && x < width) { float g11 = M(y, x); float g12 = M(height + y, x); float g22 = M(2*height + y, x); float h1 = M(3*height + y, x); float h2 = M(4*height + y, x); float detInv = 1.f / (g11*g22 - g12*g12 + 1e-3f); flowx(y, x) = (g11*h2 - g12*h1) * detInv; flowy(y, x) = (g22*h1 - g12*h2) * detInv; } } void updateFlowGpu(const PtrStepSzf M, PtrStepSzf flowx, PtrStepSzf flowy, cudaStream_t stream) { dim3 block(32, 8); dim3 grid(divUp(flowx.cols, block.x), divUp(flowx.rows, block.y)); updateFlow<<<grid, block, 0, stream>>>(flowx.rows, flowx.cols, M, flowx, flowy); cudaSafeCall(cudaGetLastError()); if (stream == 0) cudaSafeCall(cudaDeviceSynchronize()); } /*__global__ void boxFilter( const int height, const int width, const PtrStepf src, const int ksizeHalf, const float boxAreaInv, PtrStepf dst) { const int y = by * bdy + ty; const int x = bx * bdx + tx; extern __shared__ float smem[]; volatile float *row = smem + ty * (bdx + 2*ksizeHalf); if (y < height) { // Vertical pass for (int i = tx; i < bdx + 2*ksizeHalf; i += bdx) { int xExt = int(bx * bdx) + i - ksizeHalf; xExt = ::min(::max(xExt, 0), width - 1); row[i] = src(y, xExt); for (int j = 1; j <= ksizeHalf; ++j) row[i] += src(::max(y - j, 0), xExt) + src(::min(y + j, height - 1), xExt); } if (x < width) { __syncthreads(); // Horizontal passs row += tx + ksizeHalf; float res = row[0]; for (int i = 1; i <= ksizeHalf; ++i) res += row[-i] + row[i]; dst(y, x) = res * boxAreaInv; } } } void boxFilterGpu(const PtrStepSzf src, int ksizeHalf, PtrStepSzf dst, cudaStream_t stream) { dim3 block(256); dim3 grid(divUp(src.cols, block.x), divUp(src.rows, block.y)); int smem = (block.x + 2*ksizeHalf) * block.y * sizeof(float); float boxAreaInv = 1.f / ((1 + 2*ksizeHalf) * (1 + 2*ksizeHalf)); boxFilter<<<grid, block, smem, stream>>>(src.rows, src.cols, src, ksizeHalf, boxAreaInv, dst); cudaSafeCall(cudaGetLastError()); if (stream == 0) cudaSafeCall(cudaDeviceSynchronize()); }*/ __global__ void boxFilter5( const int height, const int width, const PtrStepf src, const int ksizeHalf, const float boxAreaInv, PtrStepf dst) { const int y = by * bdy + ty; const int x = bx * bdx + tx; extern __shared__ float smem[]; const int smw = bdx + 2*ksizeHalf; // shared memory "width" volatile float *row = smem + 5 * ty * smw; if (y < height) { // Vertical pass for (int i = tx; i < bdx + 2*ksizeHalf; i += bdx) { int xExt = int(bx * bdx) + i - ksizeHalf; xExt = ::min(::max(xExt, 0), width - 1); #pragma unroll for (int k = 0; k < 5; ++k) row[k*smw + i] = src(k*height + y, xExt); for (int j = 1; j <= ksizeHalf; ++j) #pragma unroll for (int k = 0; k < 5; ++k) row[k*smw + i] += src(k*height + ::max(y - j, 0), xExt) + src(k*height + ::min(y + j, height - 1), xExt); } if (x < width) { __syncthreads(); // Horizontal passs row += tx + ksizeHalf; float res[5]; #pragma unroll for (int k = 0; k < 5; ++k) res[k] = row[k*smw]; for (int i = 1; i <= ksizeHalf; ++i) #pragma unroll for (int k = 0; k < 5; ++k) res[k] += row[k*smw - i] + row[k*smw + i]; #pragma unroll for (int k = 0; k < 5; ++k) dst(k*height + y, x) = res[k] * boxAreaInv; } } } void boxFilter5Gpu(const PtrStepSzf src, int ksizeHalf, PtrStepSzf dst, cudaStream_t stream) { int height = src.rows / 5; int width = src.cols; dim3 block(256); dim3 grid(divUp(width, block.x), divUp(height, block.y)); int smem = (block.x + 2*ksizeHalf) * 5 * block.y * sizeof(float); float boxAreaInv = 1.f / ((1 + 2*ksizeHalf) * (1 + 2*ksizeHalf)); boxFilter5<<<grid, block, smem, stream>>>(height, width, src, ksizeHalf, boxAreaInv, dst); cudaSafeCall(cudaGetLastError()); if (stream == 0) cudaSafeCall(cudaDeviceSynchronize()); } void boxFilter5Gpu_CC11(const PtrStepSzf src, int ksizeHalf, PtrStepSzf dst, cudaStream_t stream) { int height = src.rows / 5; int width = src.cols; dim3 block(128); dim3 grid(divUp(width, block.x), divUp(height, block.y)); int smem = (block.x + 2*ksizeHalf) * 5 * block.y * sizeof(float); float boxAreaInv = 1.f / ((1 + 2*ksizeHalf) * (1 + 2*ksizeHalf)); boxFilter5<<<grid, block, smem, stream>>>(height, width, src, ksizeHalf, boxAreaInv, dst); cudaSafeCall(cudaGetLastError()); if (stream == 0) cudaSafeCall(cudaDeviceSynchronize()); } __constant__ float c_gKer[MAX_KSIZE_HALF + 1]; template <typename Border> __global__ void gaussianBlur( const int height, const int width, const PtrStepf src, const int ksizeHalf, const Border b, PtrStepf dst) { const int y = by * bdy + ty; const int x = bx * bdx + tx; extern __shared__ float smem[]; volatile float *row = smem + ty * (bdx + 2*ksizeHalf); if (y < height) { // Vertical pass for (int i = tx; i < bdx + 2*ksizeHalf; i += bdx) { int xExt = int(bx * bdx) + i - ksizeHalf; xExt = b.idx_col(xExt); row[i] = src(y, xExt) * c_gKer[0]; for (int j = 1; j <= ksizeHalf; ++j) row[i] += (src(b.idx_row_low(y - j), xExt) + src(b.idx_row_high(y + j), xExt)) * c_gKer[j]; } if (x < width) { __syncthreads(); // Horizontal pass row += tx + ksizeHalf; float res = row[0] * c_gKer[0]; for (int i = 1; i <= ksizeHalf; ++i) res += (row[-i] + row[i]) * c_gKer[i]; dst(y, x) = res; } } } void setGaussianBlurKernel(const float *gKer, int ksizeHalf) { cudaSafeCall(cudaMemcpyToSymbol(c_gKer, gKer, (ksizeHalf + 1) * sizeof(*gKer))); } template <typename Border> void gaussianBlurCaller(const PtrStepSzf src, int ksizeHalf, PtrStepSzf dst, cudaStream_t stream) { int height = src.rows; int width = src.cols; dim3 block(256); dim3 grid(divUp(width, block.x), divUp(height, block.y)); int smem = (block.x + 2*ksizeHalf) * block.y * sizeof(float); Border b(height, width); gaussianBlur<<<grid, block, smem, stream>>>(height, width, src, ksizeHalf, b, dst); cudaSafeCall(cudaGetLastError()); if (stream == 0) cudaSafeCall(cudaDeviceSynchronize()); } void gaussianBlurGpu( const PtrStepSzf src, int ksizeHalf, PtrStepSzf dst, int borderMode, cudaStream_t stream) { typedef void (*caller_t)(const PtrStepSzf, int, PtrStepSzf, cudaStream_t); static const caller_t callers[] = { 0 /*gaussianBlurCaller<BrdConstant<float> >*/, gaussianBlurCaller<BrdReplicate<float> >, 0 /*gaussianBlurCaller<BrdReflect<float> >*/, 0 /*gaussianBlurCaller<BrdWrap<float> >*/, gaussianBlurCaller<BrdReflect101<float> > }; callers[borderMode](src, ksizeHalf, dst, stream); } template <typename Border> __global__ void gaussianBlur5( const int height, const int width, const PtrStepf src, const int ksizeHalf, const Border b, PtrStepf dst) { const int y = by * bdy + ty; const int x = bx * bdx + tx; extern __shared__ float smem[]; const int smw = bdx + 2*ksizeHalf; // shared memory "width" volatile float *row = smem + 5 * ty * smw; if (y < height) { // Vertical pass for (int i = tx; i < bdx + 2*ksizeHalf; i += bdx) { int xExt = int(bx * bdx) + i - ksizeHalf; xExt = b.idx_col(xExt); #pragma unroll for (int k = 0; k < 5; ++k) row[k*smw + i] = src(k*height + y, xExt) * c_gKer[0]; for (int j = 1; j <= ksizeHalf; ++j) #pragma unroll for (int k = 0; k < 5; ++k) row[k*smw + i] += (src(k*height + b.idx_row_low(y - j), xExt) + src(k*height + b.idx_row_high(y + j), xExt)) * c_gKer[j]; } if (x < width) { __syncthreads(); // Horizontal pass row += tx + ksizeHalf; float res[5]; #pragma unroll for (int k = 0; k < 5; ++k) res[k] = row[k*smw] * c_gKer[0]; for (int i = 1; i <= ksizeHalf; ++i) #pragma unroll for (int k = 0; k < 5; ++k) res[k] += (row[k*smw - i] + row[k*smw + i]) * c_gKer[i]; #pragma unroll for (int k = 0; k < 5; ++k) dst(k*height + y, x) = res[k]; } } } template <typename Border, int blockDimX> void gaussianBlur5Caller( const PtrStepSzf src, int ksizeHalf, PtrStepSzf dst, cudaStream_t stream) { int height = src.rows / 5; int width = src.cols; dim3 block(blockDimX); dim3 grid(divUp(width, block.x), divUp(height, block.y)); int smem = (block.x + 2*ksizeHalf) * 5 * block.y * sizeof(float); Border b(height, width); gaussianBlur5<<<grid, block, smem, stream>>>(height, width, src, ksizeHalf, b, dst); cudaSafeCall(cudaGetLastError()); if (stream == 0) cudaSafeCall(cudaDeviceSynchronize()); } void gaussianBlur5Gpu( const PtrStepSzf src, int ksizeHalf, PtrStepSzf dst, int borderMode, cudaStream_t stream) { typedef void (*caller_t)(const PtrStepSzf, int, PtrStepSzf, cudaStream_t); static const caller_t callers[] = { 0 /*gaussianBlur5Caller<BrdConstant<float>,256>*/, gaussianBlur5Caller<BrdReplicate<float>,256>, 0 /*gaussianBlur5Caller<BrdReflect<float>,256>*/, 0 /*gaussianBlur5Caller<BrdWrap<float>,256>*/, gaussianBlur5Caller<BrdReflect101<float>,256> }; callers[borderMode](src, ksizeHalf, dst, stream); } void gaussianBlur5Gpu_CC11( const PtrStepSzf src, int ksizeHalf, PtrStepSzf dst, int borderMode, cudaStream_t stream) { typedef void (*caller_t)(const PtrStepSzf, int, PtrStepSzf, cudaStream_t); static const caller_t callers[] = { 0 /*gaussianBlur5Caller<BrdConstant<float>,128>*/, gaussianBlur5Caller<BrdReplicate<float>,128>, 0 /*gaussianBlur5Caller<BrdReflect<float>,128>*/, 0 /*gaussianBlur5Caller<BrdWrap<float>,128>*/, gaussianBlur5Caller<BrdReflect101<float>,128> }; callers[borderMode](src, ksizeHalf, dst, stream); } }}}} // namespace cv { namespace cuda { namespace cudev { namespace optflow_farneback #endif /* CUDA_DISABLER */
c47f4fd387198a1ad600986adfa7f8ab2c3422b2.hip
// !!! This is a file automatically generated by hipify!!! #include "Camera.cuh" __device__ Vector3 random_in_unit_disk(hiprandState_t *random){ Vector3 p; do{ p = 2.0*Vector3(hiprand_uniform(random), hiprand_uniform(random), 0) - Vector3(1,1,0); }while(dot(p,p) >= 1.0); return p; } __host__ __device__ Camera::Camera(Vector3 lookfrom, Vector3 lookat, Vector3 vup, float vfov, float aspect, float aperture, float focus_dist, float time0, float time1){ this->lookfrom = lookfrom; this->lookat = lookat; this->vup = vup; this->vfov = vfov; this->focus_dist = (lookfrom - lookat).length() * 2.f; this->aperture = aperture; this->time0 = time0; this->time1 = time1; lens_radius = aperture/2; float theta = vfov*M_PI/180.0; float half_height = tan(theta/2); float half_width = aspect * half_height; origin = lookfrom; w = normalize(lookfrom - lookat); u = normalize(cross(vup, w)); v = cross(w, u); lower_left_corner = origin - half_width*focus_dist*u - half_height*focus_dist*v - focus_dist*w; horizontal = 2*half_width*focus_dist*u; vertical = 2*half_height*focus_dist*v; } __device__ Ray Camera::get_ray(float s, float t, hiprandState_t *random){ Vector3 rd = lens_radius*random_in_unit_disk(random); Vector3 offset = u*rd.x() + v*rd.y(); float time = time0 + hiprand_uniform(random) * (time1-time0); return Ray(origin + offset, lower_left_corner + s*horizontal + t*vertical - origin - offset, time); }
c47f4fd387198a1ad600986adfa7f8ab2c3422b2.cu
#include "Camera.cuh" __device__ Vector3 random_in_unit_disk(curandState *random){ Vector3 p; do{ p = 2.0*Vector3(curand_uniform(random), curand_uniform(random), 0) - Vector3(1,1,0); }while(dot(p,p) >= 1.0); return p; } __host__ __device__ Camera::Camera(Vector3 lookfrom, Vector3 lookat, Vector3 vup, float vfov, float aspect, float aperture, float focus_dist, float time0, float time1){ this->lookfrom = lookfrom; this->lookat = lookat; this->vup = vup; this->vfov = vfov; this->focus_dist = (lookfrom - lookat).length() * 2.f; this->aperture = aperture; this->time0 = time0; this->time1 = time1; lens_radius = aperture/2; float theta = vfov*M_PI/180.0; float half_height = tan(theta/2); float half_width = aspect * half_height; origin = lookfrom; w = normalize(lookfrom - lookat); u = normalize(cross(vup, w)); v = cross(w, u); lower_left_corner = origin - half_width*focus_dist*u - half_height*focus_dist*v - focus_dist*w; horizontal = 2*half_width*focus_dist*u; vertical = 2*half_height*focus_dist*v; } __device__ Ray Camera::get_ray(float s, float t, curandState *random){ Vector3 rd = lens_radius*random_in_unit_disk(random); Vector3 offset = u*rd.x() + v*rd.y(); float time = time0 + curand_uniform(random) * (time1-time0); return Ray(origin + offset, lower_left_corner + s*horizontal + t*vertical - origin - offset, time); }
35303972e9a17a50ae59ee31ba818d9fc2909278.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright 2011-2015 NVIDIA Corporation. All rights reserved * * Sample app to demonstrate use of CUPTI library to obtain metric values * using callbacks for CUDA runtime APIs * */ #include <stdio.h> #include <stdlib.h> #include <hip/hip_runtime.h> // #include <cupti.h> #include <math_constants.h> #include "lcutil.h" #include <hip/hip_runtime_api.h> // #include <gpuCUPTISampler.cuh> #define METRIC_NAME_TESLA "branch_efficiency" #define METRIC_NAME_FERMI "ipc" #define ALIGN_SIZE (8) #define ALIGN_BUFFER(buffer, align) \ (((uintptr_t) (buffer) & ((align)-1)) ? ((buffer) + (align) - ((uintptr_t) (buffer) & ((align)-1))) : (buffer)) #define COMP_ITERATIONS (16384) //k40c // #define COMP_ITERATIONS (32768) // titanx // #define THREADS (1024) // #define BLOCKS (1024) #define THREADS (1024) #define BLOCKS (32760) #define REGBLOCK_SIZE (4) #define UNROLL_ITERATIONS (32) #define deviceNum (0) template <class T> __global__ void benchmark (){ __shared__ T shared[THREADS]; T r0; #pragma unroll 16384 for(int j=0; j<COMP_ITERATIONS; j+=UNROLL_ITERATIONS){ r0 = shared[threadIdx.x]; shared[THREADS - threadIdx.x - 1] = r0; r0 = shared[threadIdx.x]; shared[THREADS - threadIdx.x - 1] = r0; r0 = shared[threadIdx.x]; shared[THREADS - threadIdx.x - 1] = r0; r0 = shared[threadIdx.x]; shared[THREADS - threadIdx.x - 1] = r0; r0 = shared[threadIdx.x]; shared[THREADS - threadIdx.x - 1] = r0; r0 = shared[threadIdx.x]; shared[THREADS - threadIdx.x - 1] = r0; r0 = shared[threadIdx.x]; shared[THREADS - threadIdx.x - 1] = r0; r0 = shared[threadIdx.x]; shared[THREADS - threadIdx.x - 1] = r0; r0 = shared[threadIdx.x]; shared[THREADS - threadIdx.x - 1] = r0; r0 = shared[threadIdx.x]; shared[THREADS - threadIdx.x - 1] = r0; r0 = shared[threadIdx.x]; shared[THREADS - threadIdx.x - 1] = r0; r0 = shared[threadIdx.x]; shared[THREADS - threadIdx.x - 1] = r0; r0 = shared[threadIdx.x]; shared[THREADS - threadIdx.x - 1] = r0; r0 = shared[threadIdx.x]; shared[THREADS - threadIdx.x - 1] = r0; r0 = shared[threadIdx.x]; shared[THREADS - threadIdx.x - 1] = r0; r0 = shared[threadIdx.x]; shared[THREADS - threadIdx.x - 1] = r0; r0 = shared[threadIdx.x]; shared[THREADS - threadIdx.x - 1] = r0; r0 = shared[threadIdx.x]; shared[THREADS - threadIdx.x - 1] = r0; r0 = shared[threadIdx.x]; shared[THREADS - threadIdx.x - 1] = r0; r0 = shared[threadIdx.x]; shared[THREADS - threadIdx.x - 1] = r0; r0 = shared[threadIdx.x]; shared[THREADS - threadIdx.x - 1] = r0; r0 = shared[threadIdx.x]; shared[THREADS - threadIdx.x - 1] = r0; r0 = shared[threadIdx.x]; shared[THREADS - threadIdx.x - 1] = r0; r0 = shared[threadIdx.x]; shared[THREADS - threadIdx.x - 1] = r0; r0 = shared[threadIdx.x]; shared[THREADS - threadIdx.x - 1] = r0; r0 = shared[threadIdx.x]; shared[THREADS - threadIdx.x - 1] = r0; r0 = shared[threadIdx.x]; shared[THREADS - threadIdx.x - 1] = r0; r0 = shared[threadIdx.x]; shared[THREADS - threadIdx.x - 1] = r0; r0 = shared[threadIdx.x]; shared[THREADS - threadIdx.x - 1] = r0; r0 = shared[threadIdx.x]; shared[THREADS - threadIdx.x - 1] = r0; r0 = shared[threadIdx.x]; shared[THREADS - threadIdx.x - 1] = r0; r0 = shared[threadIdx.x]; shared[THREADS - threadIdx.x - 1] = r0; } } double median(int n, double x[][4],int col) { double temp; int i, j; // the following two loops sort the array x in ascending order for(i=0; i<n-1; i++) { for(j=i+1; j<n; j++) { if(x[j][col] < x[i][col]) { // swap elements temp = x[i][col]; x[i][col] = x[j][col]; x[j][col] = temp; } } } if(n%2==0) { // if there is an even number of elements, return mean of the two elements in the middle return((x[n/2][col] + x[n/2 - 1][col]) / 2.0); } else { // else return the element in the middle return x[n/2][col]; } } void initializeEvents(hipEvent_t *start, hipEvent_t *stop){ CUDA_SAFE_CALL( hipEventCreate(start) ); CUDA_SAFE_CALL( hipEventCreate(stop) ); CUDA_SAFE_CALL( hipEventRecord(*start, 0) ); } float finalizeEvents(hipEvent_t start, hipEvent_t stop){ CUDA_SAFE_CALL( hipGetLastError() ); CUDA_SAFE_CALL( hipEventRecord(stop, 0) ); CUDA_SAFE_CALL( hipEventSynchronize(stop) ); float kernel_time; CUDA_SAFE_CALL( hipEventElapsedTime(&kernel_time, start, stop) ); CUDA_SAFE_CALL( hipEventDestroy(start) ); CUDA_SAFE_CALL( hipEventDestroy(stop) ); return kernel_time; } void runbench(int type, double* kernel_time, double* bandw){ const long long shared_access = 2*(long long)(COMP_ITERATIONS)*THREADS*BLOCKS; dim3 dimBlock(THREADS, 1, 1); dim3 dimGrid(BLOCKS, 1, 1); hipEvent_t start, stop; initializeEvents(&start, &stop); hipLaunchKernelGGL(( benchmark<float>), dim3(dimGrid), dim3(dimBlock) , 0, 0, ); hipDeviceSynchronize(); double time = finalizeEvents(start, stop); double result; if (type==0) result = ((double)shared_access)*4/(double)time*1000./(double)(1000*1000*1000); else result = ((double)shared_access)*8/(double)time*1000./(double)(1000*1000*1000); *kernel_time = time; *bandw=result; } int main(int argc, char *argv[]){ // CUpti_SubscriberHandle subscriber; hipDevice_t device = 0; int deviceCount; char deviceName[32]; hipDeviceProp_t deviceProp; printf("Usage: %s [device_num] [metric_name]\n", argv[0]); int ntries; if (argc>1){ ntries = atoi(argv[1]); }else{ ntries = 1; } hipSetDevice(deviceNum); double time[ntries][2],value[ntries][4]; hipGetDeviceCount(&deviceCount); if (deviceCount == 0) { printf("There is no device supporting CUDA.\n"); return -2; } printf("CUDA Device Number: %d\n", deviceNum); hipDeviceGet(&device, deviceNum); CUDA_SAFE_CALL(hipGetDeviceProperties(&deviceProp, device)); hipDeviceGetName(deviceName, 32, device); // DRIVER_API_CALL(hipCtxCreate(&context, 0, device)); int i; class type; int dodouble=0; for (i=0;i<ntries;i++){ runbench(dodouble,&time[0][0],&value[0][0]); printf("Registered time: %f ms\n",time[0][0]); } CUDA_SAFE_CALL( hipDeviceReset()); return 0; }
35303972e9a17a50ae59ee31ba818d9fc2909278.cu
/* * Copyright 2011-2015 NVIDIA Corporation. All rights reserved * * Sample app to demonstrate use of CUPTI library to obtain metric values * using callbacks for CUDA runtime APIs * */ #include <stdio.h> #include <stdlib.h> #include <cuda.h> // #include <cupti.h> #include <math_constants.h> #include "lcutil.h" #include <cuda_profiler_api.h> // #include <gpuCUPTISampler.cuh> #define METRIC_NAME_TESLA "branch_efficiency" #define METRIC_NAME_FERMI "ipc" #define ALIGN_SIZE (8) #define ALIGN_BUFFER(buffer, align) \ (((uintptr_t) (buffer) & ((align)-1)) ? ((buffer) + (align) - ((uintptr_t) (buffer) & ((align)-1))) : (buffer)) #define COMP_ITERATIONS (16384) //k40c // #define COMP_ITERATIONS (32768) // titanx // #define THREADS (1024) // #define BLOCKS (1024) #define THREADS (1024) #define BLOCKS (32760) #define REGBLOCK_SIZE (4) #define UNROLL_ITERATIONS (32) #define deviceNum (0) template <class T> __global__ void benchmark (){ __shared__ T shared[THREADS]; T r0; #pragma unroll 16384 for(int j=0; j<COMP_ITERATIONS; j+=UNROLL_ITERATIONS){ r0 = shared[threadIdx.x]; shared[THREADS - threadIdx.x - 1] = r0; r0 = shared[threadIdx.x]; shared[THREADS - threadIdx.x - 1] = r0; r0 = shared[threadIdx.x]; shared[THREADS - threadIdx.x - 1] = r0; r0 = shared[threadIdx.x]; shared[THREADS - threadIdx.x - 1] = r0; r0 = shared[threadIdx.x]; shared[THREADS - threadIdx.x - 1] = r0; r0 = shared[threadIdx.x]; shared[THREADS - threadIdx.x - 1] = r0; r0 = shared[threadIdx.x]; shared[THREADS - threadIdx.x - 1] = r0; r0 = shared[threadIdx.x]; shared[THREADS - threadIdx.x - 1] = r0; r0 = shared[threadIdx.x]; shared[THREADS - threadIdx.x - 1] = r0; r0 = shared[threadIdx.x]; shared[THREADS - threadIdx.x - 1] = r0; r0 = shared[threadIdx.x]; shared[THREADS - threadIdx.x - 1] = r0; r0 = shared[threadIdx.x]; shared[THREADS - threadIdx.x - 1] = r0; r0 = shared[threadIdx.x]; shared[THREADS - threadIdx.x - 1] = r0; r0 = shared[threadIdx.x]; shared[THREADS - threadIdx.x - 1] = r0; r0 = shared[threadIdx.x]; shared[THREADS - threadIdx.x - 1] = r0; r0 = shared[threadIdx.x]; shared[THREADS - threadIdx.x - 1] = r0; r0 = shared[threadIdx.x]; shared[THREADS - threadIdx.x - 1] = r0; r0 = shared[threadIdx.x]; shared[THREADS - threadIdx.x - 1] = r0; r0 = shared[threadIdx.x]; shared[THREADS - threadIdx.x - 1] = r0; r0 = shared[threadIdx.x]; shared[THREADS - threadIdx.x - 1] = r0; r0 = shared[threadIdx.x]; shared[THREADS - threadIdx.x - 1] = r0; r0 = shared[threadIdx.x]; shared[THREADS - threadIdx.x - 1] = r0; r0 = shared[threadIdx.x]; shared[THREADS - threadIdx.x - 1] = r0; r0 = shared[threadIdx.x]; shared[THREADS - threadIdx.x - 1] = r0; r0 = shared[threadIdx.x]; shared[THREADS - threadIdx.x - 1] = r0; r0 = shared[threadIdx.x]; shared[THREADS - threadIdx.x - 1] = r0; r0 = shared[threadIdx.x]; shared[THREADS - threadIdx.x - 1] = r0; r0 = shared[threadIdx.x]; shared[THREADS - threadIdx.x - 1] = r0; r0 = shared[threadIdx.x]; shared[THREADS - threadIdx.x - 1] = r0; r0 = shared[threadIdx.x]; shared[THREADS - threadIdx.x - 1] = r0; r0 = shared[threadIdx.x]; shared[THREADS - threadIdx.x - 1] = r0; r0 = shared[threadIdx.x]; shared[THREADS - threadIdx.x - 1] = r0; } } double median(int n, double x[][4],int col) { double temp; int i, j; // the following two loops sort the array x in ascending order for(i=0; i<n-1; i++) { for(j=i+1; j<n; j++) { if(x[j][col] < x[i][col]) { // swap elements temp = x[i][col]; x[i][col] = x[j][col]; x[j][col] = temp; } } } if(n%2==0) { // if there is an even number of elements, return mean of the two elements in the middle return((x[n/2][col] + x[n/2 - 1][col]) / 2.0); } else { // else return the element in the middle return x[n/2][col]; } } void initializeEvents(cudaEvent_t *start, cudaEvent_t *stop){ CUDA_SAFE_CALL( cudaEventCreate(start) ); CUDA_SAFE_CALL( cudaEventCreate(stop) ); CUDA_SAFE_CALL( cudaEventRecord(*start, 0) ); } float finalizeEvents(cudaEvent_t start, cudaEvent_t stop){ CUDA_SAFE_CALL( cudaGetLastError() ); CUDA_SAFE_CALL( cudaEventRecord(stop, 0) ); CUDA_SAFE_CALL( cudaEventSynchronize(stop) ); float kernel_time; CUDA_SAFE_CALL( cudaEventElapsedTime(&kernel_time, start, stop) ); CUDA_SAFE_CALL( cudaEventDestroy(start) ); CUDA_SAFE_CALL( cudaEventDestroy(stop) ); return kernel_time; } void runbench(int type, double* kernel_time, double* bandw){ const long long shared_access = 2*(long long)(COMP_ITERATIONS)*THREADS*BLOCKS; dim3 dimBlock(THREADS, 1, 1); dim3 dimGrid(BLOCKS, 1, 1); cudaEvent_t start, stop; initializeEvents(&start, &stop); benchmark<float><<< dimGrid, dimBlock >>>(); cudaDeviceSynchronize(); double time = finalizeEvents(start, stop); double result; if (type==0) result = ((double)shared_access)*4/(double)time*1000./(double)(1000*1000*1000); else result = ((double)shared_access)*8/(double)time*1000./(double)(1000*1000*1000); *kernel_time = time; *bandw=result; } int main(int argc, char *argv[]){ // CUpti_SubscriberHandle subscriber; CUdevice device = 0; int deviceCount; char deviceName[32]; cudaDeviceProp deviceProp; printf("Usage: %s [device_num] [metric_name]\n", argv[0]); int ntries; if (argc>1){ ntries = atoi(argv[1]); }else{ ntries = 1; } cudaSetDevice(deviceNum); double time[ntries][2],value[ntries][4]; cuDeviceGetCount(&deviceCount); if (deviceCount == 0) { printf("There is no device supporting CUDA.\n"); return -2; } printf("CUDA Device Number: %d\n", deviceNum); cuDeviceGet(&device, deviceNum); CUDA_SAFE_CALL(cudaGetDeviceProperties(&deviceProp, device)); cuDeviceGetName(deviceName, 32, device); // DRIVER_API_CALL(cuCtxCreate(&context, 0, device)); int i; class type; int dodouble=0; for (i=0;i<ntries;i++){ runbench(dodouble,&time[0][0],&value[0][0]); printf("Registered time: %f ms\n",time[0][0]); } CUDA_SAFE_CALL( cudaDeviceReset()); return 0; }
b152da761a02cf13f31134db37748c4a0421aa6e.hip
// !!! This is a file automatically generated by hipify!!! #include <ATen/native/TensorAdvancedIndexing.h> #include <ATen/ATen.h> #include <ATen/Dispatch.h> #include <ATen/native/TensorIterator.h> #include <ATen/native/hip/Loops.cuh> #include <ATen/core/Array.h> #include <ATen/ExpandUtils.h> namespace at { namespace native { template <typename func_t> void gpu_index_kernel(TensorIterator& iter, IntArrayRef index_size, IntArrayRef index_stride, const func_t& f) { int num_indices = index_size.size(); AT_ASSERT(num_indices == index_stride.size()); AT_ASSERT(num_indices == iter.ntensors() - 2); if (iter.numel() == 0) { return; } if (!iter.can_use_32bit_indexing()) { for (auto& sub_iter : iter.with_32bit_indexing()) { gpu_index_kernel(sub_iter, index_size, index_stride, f); } return; } auto sizes = at::detail::Array<int64_t, 25>(0); auto strides = at::detail::Array<int64_t, 25>(0); auto index_ptrs = at::detail::Array<char*, 25>(nullptr); for (int i = 0; i < num_indices; i++) { sizes[i] = index_size[i]; strides[i] = index_stride[i]; index_ptrs[i] = (char*)iter.data_ptr(i + 2); } char* out_ptr = (char*)iter.data_ptr(0); char* in_ptr = (char*)iter.data_ptr(1); auto offset_calc = make_offset_calculator<3>(iter); legacy::launch_kernel<launch_size_nd, launch_bound2>(iter.numel(), [=]__device__(int idx) { auto offsets = offset_calc.get(idx); char* out_data = out_ptr + offsets[0]; char* in_data = in_ptr + offsets[1]; int64_t offset = 0; #pragma unroll for (int i = 0; i < num_indices; i++) { int64_t index = *(int64_t*)(index_ptrs[i] + offsets[2]); CUDA_KERNEL_ASSERT(index >= -sizes[i] && index < sizes[i] && "index out of bounds"); if (index < 0) { index += sizes[i]; } offset += index * strides[i]; } f(out_data, in_data, offset); }); } // The kernels are templated on an opaque, self-aligned type of the correct // size to avoid redundant kernels for different types of the same size. template <int N> struct alignas(N) OpaqueType { char data[N]; }; template <typename scalar_t> void index_kernel_impl(TensorIterator& iter, IntArrayRef index_size, IntArrayRef index_stride) { gpu_index_kernel(iter, index_size, index_stride, []C10_DEVICE(char* out_data, char* in_data, int64_t offset) { *(scalar_t*)out_data = *(scalar_t*)(in_data + offset); }); } template <typename scalar_t> void index_put_kernel_impl(TensorIterator& iter, IntArrayRef index_size, IntArrayRef index_stride) { gpu_index_kernel(iter, index_size, index_stride, []C10_DEVICE(char* out_data, char* in_data, int64_t offset) { *(scalar_t*)(out_data + offset) = *(scalar_t*)in_data; }); } static void index_kernel(TensorIterator& iter, IntArrayRef index_size, IntArrayRef index_stride) { AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(at::ScalarType::Half, at::ScalarType::Bool, at::ScalarType::BFloat16, iter.dtype(), "index_cuda", [&] { using dtype = OpaqueType<sizeof(scalar_t)>; index_kernel_impl<dtype>(iter, index_size, index_stride); }); } static void index_put_kernel(TensorIterator& iter, IntArrayRef index_size, IntArrayRef index_stride, bool accumulate) { AT_ASSERTM(!accumulate, "index_put does not support accumulate=true"); AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(at::ScalarType::Half, at::ScalarType::Bool, at::ScalarType::BFloat16, iter.dtype(), "index_put", [&] { using dtype = OpaqueType<sizeof(scalar_t)>; index_put_kernel_impl<dtype>(iter, index_size, index_stride); }); } static Tensor & masked_select_out_cuda_impl(Tensor & result, const Tensor & self, const Tensor & mask) { NoNamesGuard guard; TORCH_CHECK(self.scalar_type() != ScalarType::BFloat16, "masked_select: bfloat16 not supported for CUDA implementation"); TORCH_CHECK(mask.scalar_type() == ScalarType::Byte || mask.scalar_type() == ScalarType::Bool, "masked_select: expected BoolTensor or ByteTensor for mask"); TORCH_CHECK(self.scalar_type() == result.scalar_type(), "masked_select(): self and result must have the same scalar type"); Tensor _mask = (mask.dim() == 0) ? mask.unsqueeze(0) : mask; Tensor _self = (self.dim() == 0) ? self.unsqueeze(0) : self; std::tie(_mask, _self) = expand_outplace(_mask, _self); at::native::index_out(result, _self, _mask); return result; } Tensor masked_select_cuda(const Tensor & self, const Tensor & mask) { namedinference::compute_broadcast_outnames(self, mask); Tensor result = at::empty({0}, self.options()); return masked_select_out_cuda_impl(result, self, mask); } Tensor & masked_select_out_cuda(Tensor & result, const Tensor & self, const Tensor & mask) { namedinference::compute_broadcast_outnames(self, mask); return masked_select_out_cuda_impl(result, self, mask); } REGISTER_DISPATCH(index_stub, &index_kernel); REGISTER_DISPATCH(index_put_stub, &index_put_kernel); }} // namespace at::native
b152da761a02cf13f31134db37748c4a0421aa6e.cu
#include <ATen/native/TensorAdvancedIndexing.h> #include <ATen/ATen.h> #include <ATen/Dispatch.h> #include <ATen/native/TensorIterator.h> #include <ATen/native/cuda/Loops.cuh> #include <ATen/core/Array.h> #include <ATen/ExpandUtils.h> namespace at { namespace native { template <typename func_t> void gpu_index_kernel(TensorIterator& iter, IntArrayRef index_size, IntArrayRef index_stride, const func_t& f) { int num_indices = index_size.size(); AT_ASSERT(num_indices == index_stride.size()); AT_ASSERT(num_indices == iter.ntensors() - 2); if (iter.numel() == 0) { return; } if (!iter.can_use_32bit_indexing()) { for (auto& sub_iter : iter.with_32bit_indexing()) { gpu_index_kernel(sub_iter, index_size, index_stride, f); } return; } auto sizes = at::detail::Array<int64_t, 25>(0); auto strides = at::detail::Array<int64_t, 25>(0); auto index_ptrs = at::detail::Array<char*, 25>(nullptr); for (int i = 0; i < num_indices; i++) { sizes[i] = index_size[i]; strides[i] = index_stride[i]; index_ptrs[i] = (char*)iter.data_ptr(i + 2); } char* out_ptr = (char*)iter.data_ptr(0); char* in_ptr = (char*)iter.data_ptr(1); auto offset_calc = make_offset_calculator<3>(iter); legacy::launch_kernel<launch_size_nd, launch_bound2>(iter.numel(), [=]__device__(int idx) { auto offsets = offset_calc.get(idx); char* out_data = out_ptr + offsets[0]; char* in_data = in_ptr + offsets[1]; int64_t offset = 0; #pragma unroll for (int i = 0; i < num_indices; i++) { int64_t index = *(int64_t*)(index_ptrs[i] + offsets[2]); CUDA_KERNEL_ASSERT(index >= -sizes[i] && index < sizes[i] && "index out of bounds"); if (index < 0) { index += sizes[i]; } offset += index * strides[i]; } f(out_data, in_data, offset); }); } // The kernels are templated on an opaque, self-aligned type of the correct // size to avoid redundant kernels for different types of the same size. template <int N> struct alignas(N) OpaqueType { char data[N]; }; template <typename scalar_t> void index_kernel_impl(TensorIterator& iter, IntArrayRef index_size, IntArrayRef index_stride) { gpu_index_kernel(iter, index_size, index_stride, []C10_DEVICE(char* out_data, char* in_data, int64_t offset) { *(scalar_t*)out_data = *(scalar_t*)(in_data + offset); }); } template <typename scalar_t> void index_put_kernel_impl(TensorIterator& iter, IntArrayRef index_size, IntArrayRef index_stride) { gpu_index_kernel(iter, index_size, index_stride, []C10_DEVICE(char* out_data, char* in_data, int64_t offset) { *(scalar_t*)(out_data + offset) = *(scalar_t*)in_data; }); } static void index_kernel(TensorIterator& iter, IntArrayRef index_size, IntArrayRef index_stride) { AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(at::ScalarType::Half, at::ScalarType::Bool, at::ScalarType::BFloat16, iter.dtype(), "index_cuda", [&] { using dtype = OpaqueType<sizeof(scalar_t)>; index_kernel_impl<dtype>(iter, index_size, index_stride); }); } static void index_put_kernel(TensorIterator& iter, IntArrayRef index_size, IntArrayRef index_stride, bool accumulate) { AT_ASSERTM(!accumulate, "index_put does not support accumulate=true"); AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(at::ScalarType::Half, at::ScalarType::Bool, at::ScalarType::BFloat16, iter.dtype(), "index_put", [&] { using dtype = OpaqueType<sizeof(scalar_t)>; index_put_kernel_impl<dtype>(iter, index_size, index_stride); }); } static Tensor & masked_select_out_cuda_impl(Tensor & result, const Tensor & self, const Tensor & mask) { NoNamesGuard guard; TORCH_CHECK(self.scalar_type() != ScalarType::BFloat16, "masked_select: bfloat16 not supported for CUDA implementation"); TORCH_CHECK(mask.scalar_type() == ScalarType::Byte || mask.scalar_type() == ScalarType::Bool, "masked_select: expected BoolTensor or ByteTensor for mask"); TORCH_CHECK(self.scalar_type() == result.scalar_type(), "masked_select(): self and result must have the same scalar type"); Tensor _mask = (mask.dim() == 0) ? mask.unsqueeze(0) : mask; Tensor _self = (self.dim() == 0) ? self.unsqueeze(0) : self; std::tie(_mask, _self) = expand_outplace(_mask, _self); at::native::index_out(result, _self, _mask); return result; } Tensor masked_select_cuda(const Tensor & self, const Tensor & mask) { namedinference::compute_broadcast_outnames(self, mask); Tensor result = at::empty({0}, self.options()); return masked_select_out_cuda_impl(result, self, mask); } Tensor & masked_select_out_cuda(Tensor & result, const Tensor & self, const Tensor & mask) { namedinference::compute_broadcast_outnames(self, mask); return masked_select_out_cuda_impl(result, self, mask); } REGISTER_DISPATCH(index_stub, &index_kernel); REGISTER_DISPATCH(index_put_stub, &index_put_kernel); }} // namespace at::native
40b4625f5e913884b76383be12a1fc7f4118bd5e.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright 2014 NVIDIA Corporation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "hip/hip_runtime.h" #include "rocblas.h" #include <stdio.h> /* macro for index calculations */ #define INDX( row, col, ld ) ( ( (col) * (ld) ) + (row) ) /* matrix size and thread dimensions */ #define SIZE 1024 #define THREAD_X 16 #define THREAD_Y 16 /* naive GPU kernel where each element of C is computed by a single thread */ __global__ void GPU_naive( const int m, double const * const a, double const * const b, double * const c ) { /* insert code to determine my threads's row and col indices in the global C matrix */ const int myrow = FIXME const int mycol = FIXME /* if my row and col are in the C matrix, then calculate that value of C */ if( myrow < m && mycol < m ) { register double temp = 0.0; for( int k = 0; k < m; k++ ) { /* insert correct index code here */ temp += a[INDX( FIXME, FIXME, m )] * b[INDX( FIXME, FIXME, m )]; } /* end for */ /* insert index code to write the output to the C matrix */ c[INDX( FIXME, FIXME, m )] = temp; } /* end if */ return; } /* end GPU_naive */ int main( int argc, char *argv[] ) { const int size = SIZE; fprintf(stdout, "Matrix size is %d\n",size); double *h_a, *h_b, *h_c, *h_c1; double *d_a, *d_b, *d_c; size_t numbytes = (size_t ) size * (size_t ) size * sizeof( double ); h_a = (double *) malloc( numbytes ); if( h_a == NULL ) { fprintf(stderr,"Error in host malloc\n"); return 911; } h_b = (double *) malloc( numbytes ); if( h_b == NULL ) { fprintf(stderr,"Error in host malloc\n"); return 911; } h_c = (double *) malloc( numbytes ); if( h_c == NULL ) { fprintf(stderr,"Error in host malloc\n"); return 911; } h_c1 = (double *) malloc( numbytes ); if( h_c1 == NULL ) { fprintf(stderr,"Error in host malloc\n"); return 911; } /* zero out the host memory for C matrices */ memset( h_c, 0, numbytes ); memset( h_c1, 0, numbytes ); fprintf( stdout, "Total memory required is %lf MB\n", 3.0 * (double) numbytes / 1000000.0 ); /* initialize the A and B matrices */ for( int i = 0; i < size * size; i++ ) { h_a[i] = double( rand() ) / ( double(RAND_MAX) + 1.0 ); h_b[i] = double( rand() ) / ( double(RAND_MAX) + 1.0 ); } /* allocate a, b, c in gpu memory */ hipMalloc( (void **)&d_a, numbytes ); hipMalloc( (void **)&d_b, numbytes ); hipMalloc( (void **)&d_c, numbytes ); /* copy a and b to device */ hipMemcpy( d_a, h_a, numbytes, hipMemcpyHostToDevice ); hipMemcpy( d_b, h_b, numbytes, hipMemcpyHostToDevice ); hipblasHandle_t handle; hipblasStatus_t stat = hipblasCreate( &handle ); double alpha = 1.0; double beta = 0.0; /* start timers */ hipEvent_t start, stop; hipEventCreate( &start ); hipEventCreate( &stop ); hipEventRecord( start, 0 ); /* call CUBLAS dgemm */ hipblasDgemm( handle, HIPBLAS_OP_N, HIPBLAS_OP_N, size, size, size, &alpha, d_a, size, d_b, size, &beta, d_c, size ); /* stop timers */ hipEventRecord( stop, 0 ); hipEventSynchronize( stop ); float elapsedTime; hipEventElapsedTime( &elapsedTime, start, stop ); /* print GPU CUBLAS timing information */ fprintf(stdout, "Total time GPU CUBLAS is %f sec\n", elapsedTime / 1000.0f ); fprintf(stdout, "Performance is %f GFlop/s\n", 2.0 * (double) size * (double) size * (double) size / ( (double) elapsedTime / 1000.0 ) * 1.e-9 ); /* copy C from device to host for error checking */ hipMemcpy( h_c, d_c, numbytes, hipMemcpyDeviceToHost ); /* reset C on device to zero */ hipMemset( d_c, 0, numbytes ); /* setup grid and block sizes */ dim3 threads( THREAD_X, THREAD_Y, 1 ); dim3 blocks( size / THREAD_X + 1, size / THREAD_Y + 1, 1 ); /* start timers */ hipEventRecord( start, 0 ); /* call GPU_naive */ hipLaunchKernelGGL(( GPU_naive), dim3(blocks), dim3(threads) , 0, 0, size, d_a, d_b, d_c ); /* stop timers */ hipEventRecord( stop, 0 ); hipEventSynchronize( stop ); hipEventElapsedTime( &elapsedTime, start, stop ); /* print data for GPU naive */ fprintf(stdout, "Total time GPU NAIVE is %f sec\n", elapsedTime / 1000.0f ); fprintf(stdout, "Performance is %f GFlop/s\n", 2.0 * (double) size * (double) size * (double) size / ( (double) elapsedTime / 1000.0 ) * 1.e-9 ); /* copy C back to host */ hipMemcpy( h_c1, d_c, numbytes, hipMemcpyDeviceToHost ); hipblasDestroy( handle ); hipEventDestroy( start ); hipEventDestroy( stop ); /* check CUBLAS versus GPU NAIVE numerical results */ double temp = 0.0; for( int i = 0; i < size * size; i++ ) { temp += ( h_c[i] - h_c1[i] ) * ( h_c[i] - h_c1[i] ); } /* end for */ printf("error is %f\n",temp); if( temp > 10 ) printf("FAIL\n"); else printf("PASS\n"); /* cleanup */ hipFree( d_a ); hipFree( d_b ); hipFree( d_c ); free( h_a ); free( h_b ); free( h_c ); free( h_c1 ); hipError_t cudaStatus = hipDeviceReset(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceReset failed!"); return 1; } return 0; }
40b4625f5e913884b76383be12a1fc7f4118bd5e.cu
/* * Copyright 2014 NVIDIA Corporation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "cuda_runtime.h" #include "cublas_v2.h" #include <stdio.h> /* macro for index calculations */ #define INDX( row, col, ld ) ( ( (col) * (ld) ) + (row) ) /* matrix size and thread dimensions */ #define SIZE 1024 #define THREAD_X 16 #define THREAD_Y 16 /* naive GPU kernel where each element of C is computed by a single thread */ __global__ void GPU_naive( const int m, double const * const a, double const * const b, double * const c ) { /* insert code to determine my threads's row and col indices in the global C matrix */ const int myrow = FIXME const int mycol = FIXME /* if my row and col are in the C matrix, then calculate that value of C */ if( myrow < m && mycol < m ) { register double temp = 0.0; for( int k = 0; k < m; k++ ) { /* insert correct index code here */ temp += a[INDX( FIXME, FIXME, m )] * b[INDX( FIXME, FIXME, m )]; } /* end for */ /* insert index code to write the output to the C matrix */ c[INDX( FIXME, FIXME, m )] = temp; } /* end if */ return; } /* end GPU_naive */ int main( int argc, char *argv[] ) { const int size = SIZE; fprintf(stdout, "Matrix size is %d\n",size); double *h_a, *h_b, *h_c, *h_c1; double *d_a, *d_b, *d_c; size_t numbytes = (size_t ) size * (size_t ) size * sizeof( double ); h_a = (double *) malloc( numbytes ); if( h_a == NULL ) { fprintf(stderr,"Error in host malloc\n"); return 911; } h_b = (double *) malloc( numbytes ); if( h_b == NULL ) { fprintf(stderr,"Error in host malloc\n"); return 911; } h_c = (double *) malloc( numbytes ); if( h_c == NULL ) { fprintf(stderr,"Error in host malloc\n"); return 911; } h_c1 = (double *) malloc( numbytes ); if( h_c1 == NULL ) { fprintf(stderr,"Error in host malloc\n"); return 911; } /* zero out the host memory for C matrices */ memset( h_c, 0, numbytes ); memset( h_c1, 0, numbytes ); fprintf( stdout, "Total memory required is %lf MB\n", 3.0 * (double) numbytes / 1000000.0 ); /* initialize the A and B matrices */ for( int i = 0; i < size * size; i++ ) { h_a[i] = double( rand() ) / ( double(RAND_MAX) + 1.0 ); h_b[i] = double( rand() ) / ( double(RAND_MAX) + 1.0 ); } /* allocate a, b, c in gpu memory */ cudaMalloc( (void **)&d_a, numbytes ); cudaMalloc( (void **)&d_b, numbytes ); cudaMalloc( (void **)&d_c, numbytes ); /* copy a and b to device */ cudaMemcpy( d_a, h_a, numbytes, cudaMemcpyHostToDevice ); cudaMemcpy( d_b, h_b, numbytes, cudaMemcpyHostToDevice ); cublasHandle_t handle; cublasStatus_t stat = cublasCreate( &handle ); double alpha = 1.0; double beta = 0.0; /* start timers */ cudaEvent_t start, stop; cudaEventCreate( &start ); cudaEventCreate( &stop ); cudaEventRecord( start, 0 ); /* call CUBLAS dgemm */ cublasDgemm( handle, CUBLAS_OP_N, CUBLAS_OP_N, size, size, size, &alpha, d_a, size, d_b, size, &beta, d_c, size ); /* stop timers */ cudaEventRecord( stop, 0 ); cudaEventSynchronize( stop ); float elapsedTime; cudaEventElapsedTime( &elapsedTime, start, stop ); /* print GPU CUBLAS timing information */ fprintf(stdout, "Total time GPU CUBLAS is %f sec\n", elapsedTime / 1000.0f ); fprintf(stdout, "Performance is %f GFlop/s\n", 2.0 * (double) size * (double) size * (double) size / ( (double) elapsedTime / 1000.0 ) * 1.e-9 ); /* copy C from device to host for error checking */ cudaMemcpy( h_c, d_c, numbytes, cudaMemcpyDeviceToHost ); /* reset C on device to zero */ cudaMemset( d_c, 0, numbytes ); /* setup grid and block sizes */ dim3 threads( THREAD_X, THREAD_Y, 1 ); dim3 blocks( size / THREAD_X + 1, size / THREAD_Y + 1, 1 ); /* start timers */ cudaEventRecord( start, 0 ); /* call GPU_naive */ GPU_naive<<< blocks, threads >>> ( size, d_a, d_b, d_c ); /* stop timers */ cudaEventRecord( stop, 0 ); cudaEventSynchronize( stop ); cudaEventElapsedTime( &elapsedTime, start, stop ); /* print data for GPU naive */ fprintf(stdout, "Total time GPU NAIVE is %f sec\n", elapsedTime / 1000.0f ); fprintf(stdout, "Performance is %f GFlop/s\n", 2.0 * (double) size * (double) size * (double) size / ( (double) elapsedTime / 1000.0 ) * 1.e-9 ); /* copy C back to host */ cudaMemcpy( h_c1, d_c, numbytes, cudaMemcpyDeviceToHost ); cublasDestroy( handle ); cudaEventDestroy( start ); cudaEventDestroy( stop ); /* check CUBLAS versus GPU NAIVE numerical results */ double temp = 0.0; for( int i = 0; i < size * size; i++ ) { temp += ( h_c[i] - h_c1[i] ) * ( h_c[i] - h_c1[i] ); } /* end for */ printf("error is %f\n",temp); if( temp > 10 ) printf("FAIL\n"); else printf("PASS\n"); /* cleanup */ cudaFree( d_a ); cudaFree( d_b ); cudaFree( d_c ); free( h_a ); free( h_b ); free( h_c ); free( h_c1 ); cudaError_t cudaStatus = cudaDeviceReset(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceReset failed!"); return 1; } return 0; }
169253be49df576d8f2d9044646e66aec3a4e126.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "create_scene.h" __global__ void create_scene(Scene **p_scene, CameraParams *camera_params, LightsParams *lights_params, GroupParams *base_group_params, MaterialsParams *material_params, Vector3f background_color, Vector3f environment_color) { *p_scene = new Scene(camera_params, lights_params, base_group_params, material_params, background_color, environment_color); }
169253be49df576d8f2d9044646e66aec3a4e126.cu
#include "create_scene.h" __global__ void create_scene(Scene **p_scene, CameraParams *camera_params, LightsParams *lights_params, GroupParams *base_group_params, MaterialsParams *material_params, Vector3f background_color, Vector3f environment_color) { *p_scene = new Scene(camera_params, lights_params, base_group_params, material_params, background_color, environment_color); }
62986b81a8924c9138e7bb6352e07e68b973b6f3.hip
// !!! This is a file automatically generated by hipify!!! #include "hw3crt_wrapper.h" #include <iostream> #include <cstdlib> #include <cstdio> #include <cmath> #include "time_.h" #include "cuda_helmholtz_solver.h" #define PI 3.14159265358979323846 //#define DEBUG using namespace std; int main(int argc, char **argv) { if (argc < 5) { printf("usage: ./t N xbc ybc zbc\n"); exit(-1); } int N = atoi(argv[1]); int xbc = atoi(argv[2]); int ybc = atoi(argv[3]); int zbc = atoi(argv[4]); int nx=N; int ny=N; int nz=N; int i, j, k; double dx = 1.0/nx; double dy = 1.0/ny; double dz = 1.0/nz; int sz = (nx+1)*(ny+1)*(nz+1); double *f = new double[sz]; double *bcl=NULL; double *bcr=NULL; double *bcb=NULL; double *bct=NULL; double *bcf=NULL; double *bce=NULL; if ( xbc == 3 || xbc == 4 ) bcl = (double *) malloc(sizeof(double)*(ny+1)*(nz+1)); if ( xbc == 3 || xbc == 2 ) bcr = (double *) malloc(sizeof(double)*(ny+1)*(nz+1)); if ( ybc == 3 || ybc == 4 ) bcb = (double *) malloc(sizeof(double)*(nx+1)*(nz+1)); if ( ybc == 3 || ybc == 2 ) bct = (double *) malloc(sizeof(double)*(nx+1)*(nz+1)); if ( zbc == 3 || zbc == 4 ) bcf = (double *) malloc(sizeof(double)*(nx+1)*(ny+1)); if ( zbc == 3 || zbc == 2 ) bce = (double *) malloc(sizeof(double)*(nx+1)*(ny+1)); //D for (k=0; k<nz+1; k++) for (j=0; j<ny+1; j++) for (i=0; i<nx+1; i++) f[k*(nx+1)*(ny+1)+j*(nx+1)+i] = (-1./3-4*PI*PI)*sin(2*PI*i*dx)*sin(2*PI*j*dy)*sin(2*PI*k*dz); // (-1./3-4*PI*PI)*cos(2*PI*i*dx)*cos(2*PI*j*dy)*cos(2*PI*k*dz); // - (i*dx)*(i*dx) - (j*dy)*(j*dy); //f[100]+=0.1; for (k=0; k<nz+1; k++) for (j=0; j<ny+1; j++) { double temp = 2. * PI / 3. * sin(2*PI*j*dy) * sin(2*PI*k*dz); //double temp = 0.; if ( xbc == 3 || xbc == 4 ) bcl[k*(ny+1)+j] = temp;// k*dz*k*dz + j*dy*j*dy +0; if ( xbc == 3 || xbc == 2 ) bcr[k*(ny+1)+j] = temp;// k*dz*k*dz + j*dy*j*dy +1; } //// f[k*(nx+1)*(ny+1)+j*(nx+1)+0] //// = k*dz*k*dz + j*dy*j*dy +0; //// f[k*(nx+1)*(ny+1)+j*(nx+1)+nx] //// = k*dz*k*dz + j*dy*j*dy +1; // } for (k=0; k<nz+1; k++) for (i=0; i<nx+1; i++) { double temp = 2. * PI / 3. * sin(2*PI*i*dx) * sin(2*PI*k*dz); //double temp = 0.; if ( ybc == 3 || ybc == 4 ) bcb[k*(nx+1)+i] = temp;// k*dz*k*dz + j*dy*j*dy +0; if ( ybc == 3 || ybc == 2 ) bct[k*(nx+1)+i] = temp;// k*dz*k*dz + j*dy*j*dy +1; // f[k*(nx+1)*(ny+1)+0*(nx+1)+i] = 0; // = k*dz*k*dz + i*dx*i*dx +0; // f[k*(nx+1)*(ny+1)+ny*(nx+1)+i] = 0; // = k*dz*k*dz + i*dx*i*dx +1; } for (j=0; j<ny+1; j++) for (i=0; i<nx+1; i++) { double temp = 2. * PI / 3. * sin(2*PI*i*dx) * sin(2*PI*j*dy); //double temp = 0.; if ( zbc == 3 || zbc == 4 ) bcf[j*(nx+1)+i] = temp;// k*dz*k*dz + j*dy*j*dy +0; if ( zbc == 3 || zbc == 2 ) bce[j*(nx+1)+i] = temp;// k*dz*k*dz + j*dy*j*dy +1; } #ifdef DEBUG1 for (k=0; k<=nz; k++) { for (j=0; j<=ny; j++) { for (i=0; i<=nx; i++) { printf("%.14f\t", f[k*(nx+1)*(ny+1)+j*(nx+1)+i]); //printf("%.14f\t", (f[i+1]-f[i-1])/2./dx); //if (i%5 == 4) // printf("\n"); } printf("\n"); } printf("\n"); } #endif double *f_d; double *bcl_d=NULL; double *bcr_d=NULL; double *bcb_d=NULL; double *bct_d=NULL; double *bcf_d=NULL; double *bce_d=NULL; hipMalloc(&f_d, sizeof(double)*sz); if ( xbc == 3 || xbc == 4 ) hipMalloc(&bcl_d, sizeof(double)*(ny+1)*(nz+1)); if ( xbc == 3 || xbc == 2 ) hipMalloc(&bcr_d, sizeof(double)*(ny+1)*(nz+1)); if ( ybc == 3 || ybc == 4 ) hipMalloc(&bcb_d, sizeof(double)*(nx+1)*(nz+1)); if ( ybc == 3 || ybc == 2 ) hipMalloc(&bct_d, sizeof(double)*(nx+1)*(nz+1)); if ( zbc == 3 || zbc == 4 ) hipMalloc(&bcf_d, sizeof(double)*(nx+1)*(ny+1)); if ( zbc == 3 || zbc == 2 ) hipMalloc(&bce_d, sizeof(double)*(nx+1)*(ny+1)); hipMemcpy(f_d, f, sizeof(double)*sz, hipMemcpyHostToDevice); if ( xbc == 3 || xbc == 4 ) hipMemcpy(bcl_d, bcl, sizeof(double)*(ny+1)*(nz+1), hipMemcpyHostToDevice); if ( xbc == 3 || xbc == 2 ) hipMemcpy(bcr_d, bcr, sizeof(double)*(ny+1)*(nz+1), hipMemcpyHostToDevice); if ( ybc == 3 || ybc == 4 ) hipMemcpy(bcb_d, bcb, sizeof(double)*(nx+1)*(nz+1), hipMemcpyHostToDevice); if ( ybc == 3 || ybc == 2 ) hipMemcpy(bct_d, bct, sizeof(double)*(nx+1)*(nz+1), hipMemcpyHostToDevice); if ( zbc == 3 || zbc == 4 ) hipMemcpy(bcf_d, bcf, sizeof(double)*(nx+1)*(ny+1), hipMemcpyHostToDevice); if ( zbc == 3 || zbc == 2 ) hipMemcpy(bce_d, bce, sizeof(double)*(nx+1)*(ny+1), hipMemcpyHostToDevice); //warm up cuda_helmholtz_solver(0, 1, nx, xbc, bcl_d, bcr_d, 0, 1, ny, ybc, bcb_d, bct_d, 0, 1, nz, zbc, bcf_d, bce_d, -1, f_d); hipMemcpy(f_d, f, sizeof(double)*sz, hipMemcpyHostToDevice); time_( cuda_helmholtz_solver(0, 1, nx, xbc, bcl_d, bcr_d, 0, 1, ny, ybc, bcb_d, bct_d, 0, 1, nz, zbc, bcf_d, bce_d, -1, f_d); ) hipMemcpy(f, f_d, sizeof(double)*sz, hipMemcpyDeviceToHost); hipFree(f_d); if ( xbc == 3 || xbc == 4 ) hipFree(bcl_d); if ( xbc == 3 || xbc == 2 ) hipFree(bcr_d); if ( ybc == 3 || ybc == 4 ) hipFree(bcb_d); if ( ybc == 3 || ybc == 2 ) hipFree(bct_d); if ( zbc == 3 || zbc == 4 ) hipFree(bcf_d); if ( zbc == 3 || zbc == 2 ) hipFree(bce_d); // printf("***********************************\n"); double maxim = 0; for (k=0; k<=nz; k++) { for (j=0; j<=ny; j++) { for (i=0; i<=nx; i++) { //double dif = f[k*(nx+1)*(ny+1)+j*(nx+1)+i] - 1./3*cos(2*PI*i*dx) *cos(2*PI*j*dy)*cos(2*PI*k*dz); double dif = f[k*(nx+1)*(ny+1)+j*(nx+1)+i] - 1./3*sin(2*PI*i*dx) *sin(2*PI*j*dy)*sin(2*PI*k*dz); //- (k*dz*k*dz + j*dy*j*dy + i*dx*i*dx); #ifdef DEBUG printf("%12.8f", dif); #endif dif = fabs(dif); if (maxim < dif) maxim = dif; // printf("%.14f\t", f[k*(nx+1)*(ny+1)+j*(nx+1)+i]);// -sin(2*PI*i*dx)*sin(2*PI*j*dy)*sin(2*PI*k*dz)); //- (k*dz*k*dz + j*dy*j*dy + i*dx*i*dx)); //printf("%.14f\t", (f[i+1]-f[i-1])/2./dx); #ifdef DEBUG if (i%9 == 8) printf("\n"); #endif } //printf("\n"); } #ifdef DEBUG printf("\n"); #endif } printf("max diff : %.14f\n", maxim); delete [] f; if ( xbc == 3 || xbc == 4 ) free(bcl); if ( xbc == 3 || xbc == 2 ) free(bcr); if ( ybc == 3 || ybc == 4 ) free(bcb); if ( ybc == 3 || ybc == 2 ) free(bct); if ( zbc == 3 || zbc == 4 ) free(bcf); if ( zbc == 3 || zbc == 2 ) free(bce); return 0; }
62986b81a8924c9138e7bb6352e07e68b973b6f3.cu
#include "hw3crt_wrapper.h" #include <iostream> #include <cstdlib> #include <cstdio> #include <cmath> #include "time_.h" #include "cuda_helmholtz_solver.h" #define PI 3.14159265358979323846 //#define DEBUG using namespace std; int main(int argc, char **argv) { if (argc < 5) { printf("usage: ./t N xbc ybc zbc\n"); exit(-1); } int N = atoi(argv[1]); int xbc = atoi(argv[2]); int ybc = atoi(argv[3]); int zbc = atoi(argv[4]); int nx=N; int ny=N; int nz=N; int i, j, k; double dx = 1.0/nx; double dy = 1.0/ny; double dz = 1.0/nz; int sz = (nx+1)*(ny+1)*(nz+1); double *f = new double[sz]; double *bcl=NULL; double *bcr=NULL; double *bcb=NULL; double *bct=NULL; double *bcf=NULL; double *bce=NULL; if ( xbc == 3 || xbc == 4 ) bcl = (double *) malloc(sizeof(double)*(ny+1)*(nz+1)); if ( xbc == 3 || xbc == 2 ) bcr = (double *) malloc(sizeof(double)*(ny+1)*(nz+1)); if ( ybc == 3 || ybc == 4 ) bcb = (double *) malloc(sizeof(double)*(nx+1)*(nz+1)); if ( ybc == 3 || ybc == 2 ) bct = (double *) malloc(sizeof(double)*(nx+1)*(nz+1)); if ( zbc == 3 || zbc == 4 ) bcf = (double *) malloc(sizeof(double)*(nx+1)*(ny+1)); if ( zbc == 3 || zbc == 2 ) bce = (double *) malloc(sizeof(double)*(nx+1)*(ny+1)); //D for (k=0; k<nz+1; k++) for (j=0; j<ny+1; j++) for (i=0; i<nx+1; i++) f[k*(nx+1)*(ny+1)+j*(nx+1)+i] = (-1./3-4*PI*PI)*sin(2*PI*i*dx)*sin(2*PI*j*dy)*sin(2*PI*k*dz); // (-1./3-4*PI*PI)*cos(2*PI*i*dx)*cos(2*PI*j*dy)*cos(2*PI*k*dz); // - (i*dx)*(i*dx) - (j*dy)*(j*dy); //f[100]+=0.1; for (k=0; k<nz+1; k++) for (j=0; j<ny+1; j++) { double temp = 2. * PI / 3. * sin(2*PI*j*dy) * sin(2*PI*k*dz); //double temp = 0.; if ( xbc == 3 || xbc == 4 ) bcl[k*(ny+1)+j] = temp;// k*dz*k*dz + j*dy*j*dy +0; if ( xbc == 3 || xbc == 2 ) bcr[k*(ny+1)+j] = temp;// k*dz*k*dz + j*dy*j*dy +1; } //// f[k*(nx+1)*(ny+1)+j*(nx+1)+0] //// = k*dz*k*dz + j*dy*j*dy +0; //// f[k*(nx+1)*(ny+1)+j*(nx+1)+nx] //// = k*dz*k*dz + j*dy*j*dy +1; // } for (k=0; k<nz+1; k++) for (i=0; i<nx+1; i++) { double temp = 2. * PI / 3. * sin(2*PI*i*dx) * sin(2*PI*k*dz); //double temp = 0.; if ( ybc == 3 || ybc == 4 ) bcb[k*(nx+1)+i] = temp;// k*dz*k*dz + j*dy*j*dy +0; if ( ybc == 3 || ybc == 2 ) bct[k*(nx+1)+i] = temp;// k*dz*k*dz + j*dy*j*dy +1; // f[k*(nx+1)*(ny+1)+0*(nx+1)+i] = 0; // = k*dz*k*dz + i*dx*i*dx +0; // f[k*(nx+1)*(ny+1)+ny*(nx+1)+i] = 0; // = k*dz*k*dz + i*dx*i*dx +1; } for (j=0; j<ny+1; j++) for (i=0; i<nx+1; i++) { double temp = 2. * PI / 3. * sin(2*PI*i*dx) * sin(2*PI*j*dy); //double temp = 0.; if ( zbc == 3 || zbc == 4 ) bcf[j*(nx+1)+i] = temp;// k*dz*k*dz + j*dy*j*dy +0; if ( zbc == 3 || zbc == 2 ) bce[j*(nx+1)+i] = temp;// k*dz*k*dz + j*dy*j*dy +1; } #ifdef DEBUG1 for (k=0; k<=nz; k++) { for (j=0; j<=ny; j++) { for (i=0; i<=nx; i++) { printf("%.14f\t", f[k*(nx+1)*(ny+1)+j*(nx+1)+i]); //printf("%.14f\t", (f[i+1]-f[i-1])/2./dx); //if (i%5 == 4) // printf("\n"); } printf("\n"); } printf("\n"); } #endif double *f_d; double *bcl_d=NULL; double *bcr_d=NULL; double *bcb_d=NULL; double *bct_d=NULL; double *bcf_d=NULL; double *bce_d=NULL; cudaMalloc(&f_d, sizeof(double)*sz); if ( xbc == 3 || xbc == 4 ) cudaMalloc(&bcl_d, sizeof(double)*(ny+1)*(nz+1)); if ( xbc == 3 || xbc == 2 ) cudaMalloc(&bcr_d, sizeof(double)*(ny+1)*(nz+1)); if ( ybc == 3 || ybc == 4 ) cudaMalloc(&bcb_d, sizeof(double)*(nx+1)*(nz+1)); if ( ybc == 3 || ybc == 2 ) cudaMalloc(&bct_d, sizeof(double)*(nx+1)*(nz+1)); if ( zbc == 3 || zbc == 4 ) cudaMalloc(&bcf_d, sizeof(double)*(nx+1)*(ny+1)); if ( zbc == 3 || zbc == 2 ) cudaMalloc(&bce_d, sizeof(double)*(nx+1)*(ny+1)); cudaMemcpy(f_d, f, sizeof(double)*sz, cudaMemcpyHostToDevice); if ( xbc == 3 || xbc == 4 ) cudaMemcpy(bcl_d, bcl, sizeof(double)*(ny+1)*(nz+1), cudaMemcpyHostToDevice); if ( xbc == 3 || xbc == 2 ) cudaMemcpy(bcr_d, bcr, sizeof(double)*(ny+1)*(nz+1), cudaMemcpyHostToDevice); if ( ybc == 3 || ybc == 4 ) cudaMemcpy(bcb_d, bcb, sizeof(double)*(nx+1)*(nz+1), cudaMemcpyHostToDevice); if ( ybc == 3 || ybc == 2 ) cudaMemcpy(bct_d, bct, sizeof(double)*(nx+1)*(nz+1), cudaMemcpyHostToDevice); if ( zbc == 3 || zbc == 4 ) cudaMemcpy(bcf_d, bcf, sizeof(double)*(nx+1)*(ny+1), cudaMemcpyHostToDevice); if ( zbc == 3 || zbc == 2 ) cudaMemcpy(bce_d, bce, sizeof(double)*(nx+1)*(ny+1), cudaMemcpyHostToDevice); //warm up cuda_helmholtz_solver(0, 1, nx, xbc, bcl_d, bcr_d, 0, 1, ny, ybc, bcb_d, bct_d, 0, 1, nz, zbc, bcf_d, bce_d, -1, f_d); cudaMemcpy(f_d, f, sizeof(double)*sz, cudaMemcpyHostToDevice); time_( cuda_helmholtz_solver(0, 1, nx, xbc, bcl_d, bcr_d, 0, 1, ny, ybc, bcb_d, bct_d, 0, 1, nz, zbc, bcf_d, bce_d, -1, f_d); ) cudaMemcpy(f, f_d, sizeof(double)*sz, cudaMemcpyDeviceToHost); cudaFree(f_d); if ( xbc == 3 || xbc == 4 ) cudaFree(bcl_d); if ( xbc == 3 || xbc == 2 ) cudaFree(bcr_d); if ( ybc == 3 || ybc == 4 ) cudaFree(bcb_d); if ( ybc == 3 || ybc == 2 ) cudaFree(bct_d); if ( zbc == 3 || zbc == 4 ) cudaFree(bcf_d); if ( zbc == 3 || zbc == 2 ) cudaFree(bce_d); // printf("***********************************\n"); double maxim = 0; for (k=0; k<=nz; k++) { for (j=0; j<=ny; j++) { for (i=0; i<=nx; i++) { //double dif = f[k*(nx+1)*(ny+1)+j*(nx+1)+i] - 1./3*cos(2*PI*i*dx) *cos(2*PI*j*dy)*cos(2*PI*k*dz); double dif = f[k*(nx+1)*(ny+1)+j*(nx+1)+i] - 1./3*sin(2*PI*i*dx) *sin(2*PI*j*dy)*sin(2*PI*k*dz); //- (k*dz*k*dz + j*dy*j*dy + i*dx*i*dx); #ifdef DEBUG printf("%12.8f", dif); #endif dif = fabs(dif); if (maxim < dif) maxim = dif; // printf("%.14f\t", f[k*(nx+1)*(ny+1)+j*(nx+1)+i]);// -sin(2*PI*i*dx)*sin(2*PI*j*dy)*sin(2*PI*k*dz)); //- (k*dz*k*dz + j*dy*j*dy + i*dx*i*dx)); //printf("%.14f\t", (f[i+1]-f[i-1])/2./dx); #ifdef DEBUG if (i%9 == 8) printf("\n"); #endif } //printf("\n"); } #ifdef DEBUG printf("\n"); #endif } printf("max diff : %.14f\n", maxim); delete [] f; if ( xbc == 3 || xbc == 4 ) free(bcl); if ( xbc == 3 || xbc == 2 ) free(bcr); if ( ybc == 3 || ybc == 4 ) free(bcb); if ( ybc == 3 || ybc == 2 ) free(bct); if ( zbc == 3 || zbc == 4 ) free(bcf); if ( zbc == 3 || zbc == 2 ) free(bce); return 0; }
dbcf4ac55f9fd7854ba5c2e475d12c3225f05533.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) 2000-2019, Heiko Bauke // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following // disclaimer in the documentation and/or other materials provided // with the distribution. // // * Neither the name of the copyright holder nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS // FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE // COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, // INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) // HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, // STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED // OF THE POSSIBILITY OF SUCH DAMAGE. #include <cstdlib> #include <iostream> #include <trng/yarn5s.hpp> #include <trng/uniform01_dist.hpp> __global__ void parallel_pi(long samples, long *in, trng::yarn5s r) { long rank=threadIdx.x; long size=blockDim.x; r.jump(2*(rank*samples/size)); // jump ahead trng::uniform01_dist<float> u; // random number distribution in[rank]=0; // local number of points in circle for (long i=rank*samples/size; i<(rank+1)*samples/size; ++i) { float x=u(r), y=u(r); // choose random x- and y-coordinates if (x*x+y*y<=1) // is point in circle? ++in[rank]; // increase thread-local counter } } int main(int argc, char *argv[]) { const long samples=1000000l; // total number of points in square const int size=128; // number of threads long *in_device; hipMalloc(&in_device, size*sizeof(*in_device)); trng::yarn5s r; // start parallel Monte Carlo hipLaunchKernelGGL(( parallel_pi), dim3(1), dim3(size), 0, 0, samples, in_device, r); // gather results long *in=new long[size]; hipMemcpy(in, in_device, size*sizeof(*in), hipMemcpyDeviceToHost); long sum=0; for (int rank=0; rank<size; ++rank) sum+=in[rank]; // print result std::cout << "pi = " << 4.0*sum/samples << std::endl; return EXIT_SUCCESS; }
dbcf4ac55f9fd7854ba5c2e475d12c3225f05533.cu
// Copyright (c) 2000-2019, Heiko Bauke // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following // disclaimer in the documentation and/or other materials provided // with the distribution. // // * Neither the name of the copyright holder nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS // FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE // COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, // INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) // HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, // STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED // OF THE POSSIBILITY OF SUCH DAMAGE. #include <cstdlib> #include <iostream> #include <trng/yarn5s.hpp> #include <trng/uniform01_dist.hpp> __global__ void parallel_pi(long samples, long *in, trng::yarn5s r) { long rank=threadIdx.x; long size=blockDim.x; r.jump(2*(rank*samples/size)); // jump ahead trng::uniform01_dist<float> u; // random number distribution in[rank]=0; // local number of points in circle for (long i=rank*samples/size; i<(rank+1)*samples/size; ++i) { float x=u(r), y=u(r); // choose random x- and y-coordinates if (x*x+y*y<=1) // is point in circle? ++in[rank]; // increase thread-local counter } } int main(int argc, char *argv[]) { const long samples=1000000l; // total number of points in square const int size=128; // number of threads long *in_device; cudaMalloc(&in_device, size*sizeof(*in_device)); trng::yarn5s r; // start parallel Monte Carlo parallel_pi<<<1, size>>>(samples, in_device, r); // gather results long *in=new long[size]; cudaMemcpy(in, in_device, size*sizeof(*in), cudaMemcpyDeviceToHost); long sum=0; for (int rank=0; rank<size; ++rank) sum+=in[rank]; // print result std::cout << "pi = " << 4.0*sum/samples << std::endl; return EXIT_SUCCESS; }
3ff6bb6bbb8b090f49ae24f5836441d0dcecf81a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "coordConvACPlugin.h" #include <hip/hip_fp16.h> template <typename T_DATA> __global__ void kernelCopy( int N, T_DATA* inputs, T_DATA* outputs ) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < N){ outputs[index] = inputs[index]; } __syncthreads(); } template <typename T_DATA> __global__ void kernelAC( int N, int iH, int iW, float stepACh, float stepACw, T_DATA* outputs ) { int index = blockIdx.x * blockDim.x + threadIdx.x; int channelLength = N/2; if (index < channelLength){ outputs[index] = -1.0 + (float)(index / iW) * stepACw; outputs[index + channelLength] = -1.0 + (float)((index + channelLength) % iH) * stepACh; } __syncthreads(); } template <typename T> int inferenceAC( int batchSize, int iC, int iH, int iW, int oC, int oH, int oW, T* inputs, T* outputs, hipStream_t stream){ // NCHW const float coordsRange = 2.0; const int nThreads = 512; int lenCopy = iC * iH * iW; int lenAC = (oC * oH * oW) - lenCopy; int nBlocksCopy = (int)((float)lenCopy / nThreads) + 1; int nBlocksAC = (int)((float)lenAC / nThreads) + 1; float stepACh = coordsRange / (float)(iH - 1); float stepACw = coordsRange / (float)(iW - 1); for(int i=0; i<batchSize; ++i){ // NOTE: kernelCopy kernel can be replaced with hipMemcpy function hipLaunchKernelGGL(( kernelCopy), dim3(nBlocksCopy), dim3(nThreads), 0, stream, lenCopy, inputs, outputs); outputs += lenCopy; hipLaunchKernelGGL(( kernelAC), dim3(nBlocksAC), dim3(nThreads), 0, stream, lenAC, iH, iW, stepACh, stepACw, outputs); outputs += lenAC; inputs += lenCopy; } hipError_t err = hipGetLastError(); if ( hipSuccess != err ) { fprintf( stderr, "cudaCheckError() failed at %s:%i : %s\n", __FILE__, __LINE__, hipGetErrorString( err ) ); return 1; } return 0; } int CoordConvACPlugin::enqueue( int batchSize, const void* const* inputs, void** outputs, void* workspace, hipStream_t stream) { switch(iType){ case DataType::kFLOAT: return inferenceAC(batchSize, iC, iH, iW, oC, oH, oW, (float*)inputs[0], (float*)outputs[0], stream); case DataType::kHALF: return inferenceAC(batchSize, iC, iH, iW, oC, oH, oW, (__half*)inputs[0], (__half*)outputs[0], stream); } return 1; }
3ff6bb6bbb8b090f49ae24f5836441d0dcecf81a.cu
/* * Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "coordConvACPlugin.h" #include <cuda_fp16.h> template <typename T_DATA> __global__ void kernelCopy( int N, T_DATA* inputs, T_DATA* outputs ) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < N){ outputs[index] = inputs[index]; } __syncthreads(); } template <typename T_DATA> __global__ void kernelAC( int N, int iH, int iW, float stepACh, float stepACw, T_DATA* outputs ) { int index = blockIdx.x * blockDim.x + threadIdx.x; int channelLength = N/2; if (index < channelLength){ outputs[index] = -1.0 + (float)(index / iW) * stepACw; outputs[index + channelLength] = -1.0 + (float)((index + channelLength) % iH) * stepACh; } __syncthreads(); } template <typename T> int inferenceAC( int batchSize, int iC, int iH, int iW, int oC, int oH, int oW, T* inputs, T* outputs, cudaStream_t stream){ // NCHW const float coordsRange = 2.0; const int nThreads = 512; int lenCopy = iC * iH * iW; int lenAC = (oC * oH * oW) - lenCopy; int nBlocksCopy = (int)((float)lenCopy / nThreads) + 1; int nBlocksAC = (int)((float)lenAC / nThreads) + 1; float stepACh = coordsRange / (float)(iH - 1); float stepACw = coordsRange / (float)(iW - 1); for(int i=0; i<batchSize; ++i){ // NOTE: kernelCopy kernel can be replaced with cudaMemcpy function kernelCopy<<<nBlocksCopy, nThreads, 0, stream>>>(lenCopy, inputs, outputs); outputs += lenCopy; kernelAC<<<nBlocksAC, nThreads, 0, stream>>>(lenAC, iH, iW, stepACh, stepACw, outputs); outputs += lenAC; inputs += lenCopy; } cudaError_t err = cudaGetLastError(); if ( cudaSuccess != err ) { fprintf( stderr, "cudaCheckError() failed at %s:%i : %s\n", __FILE__, __LINE__, cudaGetErrorString( err ) ); return 1; } return 0; } int CoordConvACPlugin::enqueue( int batchSize, const void* const* inputs, void** outputs, void* workspace, cudaStream_t stream) { switch(iType){ case DataType::kFLOAT: return inferenceAC(batchSize, iC, iH, iW, oC, oH, oW, (float*)inputs[0], (float*)outputs[0], stream); case DataType::kHALF: return inferenceAC(batchSize, iC, iH, iW, oC, oH, oW, (__half*)inputs[0], (__half*)outputs[0], stream); } return 1; }
b9d2d2ee439d2546477b2358ec49b0886f91ffc2.hip
// !!! This is a file automatically generated by hipify!!! /** * @file _reg_comon_gpu.cu * @author Marc Modat * @date 25/03/2009 * Copyright (c) 2009, University College London. All rights reserved. * Centre for Medical Image Computing (CMIC) * See the LICENSE.txt file in the nifty_reg root folder * */ #ifndef _REG_COMMON_GPU_CU #define _REG_COMMON_GPU_CU #include "_reg_common_cuda.h" #include "_reg_tools.h" /* ******************************** */ void cudaCommon_computeGridConfiguration(dim3 &r_blocks, dim3 &r_grid, const int targetVoxelNumber) { unsigned int maxThreads = 256; unsigned int maxBlocks = 65365; unsigned int blocks = (targetVoxelNumber % maxThreads) ? (targetVoxelNumber / maxThreads) + 1 : targetVoxelNumber / maxThreads; blocks = (std::min)(blocks, maxBlocks); r_grid = dim3(blocks, 1, 1); r_blocks = dim3(maxThreads, 1, 1); } /* ******************************** */ /* ******************************** */ template <class NIFTI_TYPE> int cudaCommon_transferNiftiToNiftiOnDevice1(nifti_image **image_d, nifti_image *img) { const unsigned int memSize = img->dim[1] * img->dim[2] * img->dim[3] * sizeof(NIFTI_TYPE); int *g_dim; float* g_pixdim; NIFTI_TYPE* g_data; NR_CUDA_SAFE_CALL(hipMalloc((void**)&g_dim, 8 * sizeof(int))); NR_CUDA_SAFE_CALL(hipMalloc((void**)&g_pixdim, 8 * sizeof(float))); NR_CUDA_SAFE_CALL(hipMalloc((void**)&g_data, memSize)); NIFTI_TYPE *array_h = static_cast<NIFTI_TYPE *>( img->data ); NR_CUDA_SAFE_CALL(hipMemcpy(( *image_d ), img, sizeof(nifti_image), hipMemcpyHostToDevice)); NR_CUDA_SAFE_CALL(hipMemcpy((*image_d)->data, array_h, memSize, hipMemcpyHostToDevice)); NR_CUDA_SAFE_CALL(hipMemcpy(( *image_d )->dim, img->dim, 8 * sizeof(int), hipMemcpyHostToDevice)); NR_CUDA_SAFE_CALL(hipMemcpy(( *image_d )->pixdim, img->pixdim, 8 * sizeof(float), hipMemcpyHostToDevice)); return EXIT_SUCCESS; } template int cudaCommon_transferNiftiToNiftiOnDevice1<float>(nifti_image **image_d, nifti_image *img); template int cudaCommon_transferNiftiToNiftiOnDevice1<double>(nifti_image **image_d, nifti_image *img); /* ******************************** */ /* ******************************** */ template <class DTYPE, class NIFTI_TYPE> int cudaCommon_transferNiftiToArrayOnDevice1(DTYPE **array_d, const nifti_image *img) { if(sizeof(DTYPE)!=sizeof(NIFTI_TYPE)){ reg_print_fct_error("cudaCommon_transferNiftiToArrayOnDevice1"); reg_print_msg_error("The host and device arrays are of different types"); return EXIT_FAILURE; } else{ const unsigned int memSize = img->nvox*sizeof(DTYPE); const NIFTI_TYPE *array_h=static_cast<NIFTI_TYPE *>(img->data); NR_CUDA_SAFE_CALL(hipMemcpy(*array_d, array_h, memSize, hipMemcpyHostToDevice)); } return EXIT_SUCCESS; } /* ******************************** */ template <class DTYPE> int cudaCommon_transferNiftiToArrayOnDevice(DTYPE **array_d, const nifti_image *img) { if( sizeof(DTYPE)==sizeof(float4) ){ if( (img->datatype!=NIFTI_TYPE_FLOAT32) || (img->dim[5]<2) || (img->dim[4]>1)){ reg_print_fct_error("cudaCommon_transferNiftiToArrayOnDevice"); reg_print_msg_error("The specified image is not a single precision deformation field image"); return EXIT_FAILURE; } const float *niftiImgValues = static_cast<float *>(img->data); float4 *array_h=(float4 *)calloc(img->nx*img->ny*img->nz,sizeof(float4)); const int voxelNumber = img->nx*img->ny*img->nz; for(int i=0; i<voxelNumber; i++) array_h[i].x= *niftiImgValues++; if(img->dim[5]>=2){ for(int i=0; i<voxelNumber; i++) array_h[i].y= *niftiImgValues++; } if(img->dim[5]>=3){ for(int i=0; i<voxelNumber; i++) array_h[i].z= *niftiImgValues++; } if(img->dim[5]>=4){ for(int i=0; i<voxelNumber; i++) array_h[i].w= *niftiImgValues++; } NR_CUDA_SAFE_CALL(hipMemcpy(*array_d, array_h, img->nx*img->ny*img->nz*sizeof(float4), hipMemcpyHostToDevice)); free(array_h); } else{ // All these else could be removed but the nvcc compiler would warn for unreachable statement switch(img->datatype){ case NIFTI_TYPE_FLOAT32: return cudaCommon_transferNiftiToArrayOnDevice1<DTYPE,float>(array_d, img); default: reg_print_fct_error("cudaCommon_transferNiftiToArrayOnDevice"); reg_print_msg_error("The image data type is not supported"); return EXIT_FAILURE; } } return EXIT_SUCCESS; } template int cudaCommon_transferNiftiToArrayOnDevice<double>(double **, const nifti_image *); template int cudaCommon_transferNiftiToArrayOnDevice<float>(float **, const nifti_image *); template int cudaCommon_transferNiftiToArrayOnDevice<int>(int **, const nifti_image *); template int cudaCommon_transferNiftiToArrayOnDevice<float4>(float4 **, const nifti_image *); /* ******************************** */ template <class DTYPE, class NIFTI_TYPE> int cudaCommon_transferNiftiToArrayOnDevice1(DTYPE **array_d, DTYPE **array2_d, nifti_image *img) { if(sizeof(DTYPE)!=sizeof(NIFTI_TYPE)){ reg_print_fct_error("cudaCommon_transferNiftiToArrayOnDevice1"); reg_print_msg_error("The host and device arrays are of different types"); return EXIT_FAILURE; } else{ const unsigned int memSize = img->dim[1] * img->dim[2] * img->dim[3] * sizeof(DTYPE); NIFTI_TYPE *array_h=static_cast<NIFTI_TYPE *>(img->data); NIFTI_TYPE *array2_h=&array_h[img->dim[1] * img->dim[2] * img->dim[3]]; NR_CUDA_SAFE_CALL(hipMemcpy(*array_d, array_h, memSize, hipMemcpyHostToDevice)); NR_CUDA_SAFE_CALL(hipMemcpy(*array2_d, array2_h, memSize, hipMemcpyHostToDevice)); } return EXIT_SUCCESS; } /* ******************************** */ template <class DTYPE> int cudaCommon_transferNiftiToArrayOnDevice(DTYPE **array_d, DTYPE **array2_d, nifti_image *img) { if(sizeof(DTYPE)==sizeof(float4) ){ if( (img->datatype!=NIFTI_TYPE_FLOAT32) || (img->dim[5]<2) || (img->dim[4]>1)){ reg_print_fct_error("cudaCommon_transferNiftiToArrayOnDevice"); reg_print_msg_error("The specified image is not a single precision deformation field image"); return EXIT_FAILURE; } float *niftiImgValues = static_cast<float *>(img->data); float4 *array_h=(float4 *)calloc(img->nx*img->ny*img->nz,sizeof(float4)); float4 *array2_h=(float4 *)calloc(img->nx*img->ny*img->nz,sizeof(float4)); const int voxelNumber = img->nx*img->ny*img->nz; for(int i=0; i<voxelNumber; i++) array_h[i].x= *niftiImgValues++; for(int i=0; i<voxelNumber; i++) array2_h[i].x= *niftiImgValues++; if(img->dim[5]>=2){ for(int i=0; i<voxelNumber; i++) array_h[i].y= *niftiImgValues++; for(int i=0; i<voxelNumber; i++) array2_h[i].y= *niftiImgValues++; } if(img->dim[5]>=3){ for(int i=0; i<voxelNumber; i++) array_h[i].z= *niftiImgValues++; for(int i=0; i<voxelNumber; i++) array2_h[i].z= *niftiImgValues++; } if(img->dim[5]>=4){ for(int i=0; i<voxelNumber; i++) array_h[i].w= *niftiImgValues++; for(int i=0; i<voxelNumber; i++) array2_h[i].w= *niftiImgValues++; } NR_CUDA_SAFE_CALL(hipMemcpy(*array_d, array_h, img->nx*img->ny*img->nz*sizeof(float4), hipMemcpyHostToDevice)); NR_CUDA_SAFE_CALL(hipMemcpy(*array2_d, array2_h, img->nx*img->ny*img->nz*sizeof(float4), hipMemcpyHostToDevice)); free(array_h); free(array2_h); } else{ // All these else could be removed but the nvcc compiler would warn for unreachable statement switch(img->datatype){ case NIFTI_TYPE_FLOAT32: return cudaCommon_transferNiftiToArrayOnDevice1<DTYPE,float>(array_d, array2_d, img); default: reg_print_fct_error("cudaCommon_transferNiftiToArrayOnDevice"); reg_print_msg_error("The image data type is not supported"); return EXIT_FAILURE; } } return EXIT_SUCCESS; } template int cudaCommon_transferNiftiToArrayOnDevice<float>(float **,float **, nifti_image *); template int cudaCommon_transferNiftiToArrayOnDevice<double>(double **,double **, nifti_image *); template int cudaCommon_transferNiftiToArrayOnDevice<float4>(float4 **,float4 **, nifti_image *); // for deformation field /* ******************************** */ /* ******************************** */ template <class DTYPE, class NIFTI_TYPE> int cudaCommon_transferNiftiToArrayOnDevice1(hipArray **cuArray_d, nifti_image *img) { if(sizeof(DTYPE)!=sizeof(NIFTI_TYPE)){ reg_print_fct_error("cudaCommon_transferNiftiToArrayOnDevice1"); reg_print_msg_error("The host and device arrays are of different types"); return EXIT_FAILURE; } else{ NIFTI_TYPE *array_h=static_cast<NIFTI_TYPE *>(img->data); hipMemcpy3DParms copyParams; memset(&copyParams, 0, sizeof(copyParams)); copyParams.extent = make_hipExtent(img->dim[1], img->dim[2], img->dim[3]); copyParams.srcPtr = make_hipPitchedPtr((void *) array_h, copyParams.extent.width*sizeof(DTYPE), copyParams.extent.width, copyParams.extent.height); copyParams.dstArray = *cuArray_d; copyParams.kind = hipMemcpyHostToDevice; NR_CUDA_SAFE_CALL(hipMemcpy3D(&copyParams)); } return EXIT_SUCCESS; } /* ******************************** */ template <class DTYPE> int cudaCommon_transferNiftiToArrayOnDevice(hipArray **cuArray_d, nifti_image *img) { if( sizeof(DTYPE)==sizeof(float4) ){ if( (img->datatype!=NIFTI_TYPE_FLOAT32) || (img->dim[5]<2) || (img->dim[4]>1) ){ reg_print_fct_error("cudaCommon_transferNiftiToArrayOnDevice"); reg_print_msg_error("The specified image is not a single precision deformation field image"); return EXIT_FAILURE; } float *niftiImgValues = static_cast<float *>(img->data); float4 *array_h=(float4 *)calloc(img->nx*img->ny*img->nz,sizeof(float4)); for(int i=0; i<img->nx*img->ny*img->nz; i++) array_h[i].x= *niftiImgValues++; if(img->dim[5]>=2) { for(int i=0; i<img->nx*img->ny*img->nz; i++) array_h[i].y= *niftiImgValues++; } if(img->dim[5]>=3) { for(int i=0; i<img->nx*img->ny*img->nz; i++) array_h[i].z= *niftiImgValues++; } if(img->dim[5]==3) { for(int i=0; i<img->nx*img->ny*img->nz; i++) array_h[i].w= *niftiImgValues++; } hipMemcpy3DParms copyParams; memset(&copyParams, 0, sizeof(copyParams)); copyParams.extent = make_hipExtent(img->dim[1], img->dim[2], img->dim[3]); copyParams.srcPtr = make_hipPitchedPtr((void *) array_h, copyParams.extent.width*sizeof(DTYPE), copyParams.extent.width, copyParams.extent.height); copyParams.dstArray = *cuArray_d; copyParams.kind = hipMemcpyHostToDevice; NR_CUDA_SAFE_CALL(hipMemcpy3D(&copyParams)) free(array_h); } else{ // All these else could be removed but the nvcc compiler would warn for unreachable statement switch(img->datatype){ case NIFTI_TYPE_FLOAT32: return cudaCommon_transferNiftiToArrayOnDevice1<DTYPE,float>(cuArray_d, img); default: reg_print_fct_error("cudaCommon_transferNiftiToArrayOnDevice"); reg_print_msg_error("The image data type is not supported"); return EXIT_FAILURE; } } return EXIT_SUCCESS; } template int cudaCommon_transferNiftiToArrayOnDevice<float>(hipArray **, nifti_image *); template int cudaCommon_transferNiftiToArrayOnDevice<double>(hipArray **, nifti_image *); template int cudaCommon_transferNiftiToArrayOnDevice<int>(hipArray **, nifti_image *); template int cudaCommon_transferNiftiToArrayOnDevice<float4>(hipArray **, nifti_image *); // for deformation field /* ******************************** */ /* ******************************** */ template <class DTYPE, class NIFTI_TYPE> int cudaCommon_transferNiftiToArrayOnDevice1(hipArray **cuArray_d, hipArray **cuArray2_d, nifti_image *img) { if(sizeof(DTYPE)!=sizeof(NIFTI_TYPE)){ reg_print_fct_error("cudaCommon_transferNiftiToArrayOnDevice1"); reg_print_msg_error("The host and device arrays are of different types"); return EXIT_FAILURE; } else{ NIFTI_TYPE *array_h = static_cast<NIFTI_TYPE *>(img->data); NIFTI_TYPE *array2_h = &array_h[img->dim[1]*img->dim[2]*img->dim[3]]; hipMemcpy3DParms copyParams; memset(&copyParams, 0, sizeof(copyParams)); copyParams.extent = make_hipExtent(img->dim[1], img->dim[2], img->dim[3]); copyParams.kind = hipMemcpyHostToDevice; // First timepoint copyParams.srcPtr = make_hipPitchedPtr((void *) array_h, copyParams.extent.width*sizeof(DTYPE), copyParams.extent.width, copyParams.extent.height); copyParams.dstArray = *cuArray_d; NR_CUDA_SAFE_CALL(hipMemcpy3D(&copyParams)); // Second timepoint copyParams.srcPtr = make_hipPitchedPtr((void *) array2_h, copyParams.extent.width*sizeof(DTYPE), copyParams.extent.width, copyParams.extent.height); copyParams.dstArray = *cuArray2_d; NR_CUDA_SAFE_CALL(hipMemcpy3D(&copyParams)); } return EXIT_SUCCESS; } /* ******************************** */ template <class DTYPE> int cudaCommon_transferNiftiToArrayOnDevice(hipArray **cuArray_d, hipArray **cuArray2_d, nifti_image *img) { if( sizeof(DTYPE)==sizeof(float4) ){ if( (img->datatype!=NIFTI_TYPE_FLOAT32) || (img->dim[5]<2) || (img->dim[4]>1) ) { reg_print_fct_error("cudaCommon_transferNiftiToArrayOnDevice1"); reg_print_msg_error("The specified image is not a single precision deformation field image"); return EXIT_FAILURE; } float *niftiImgValues = static_cast<float *>(img->data); float4 *array_h=(float4 *)calloc(img->nx*img->ny*img->nz,sizeof(float4)); float4 *array2_h=(float4 *)calloc(img->nx*img->ny*img->nz,sizeof(float4)); for(int i=0; i<img->nx*img->ny*img->nz; i++) array_h[i].x= *niftiImgValues++; for(int i=0; i<img->nx*img->ny*img->nz; i++) array2_h[i].x= *niftiImgValues++; if(img->dim[5]>=2){ for(int i=0; i<img->nx*img->ny*img->nz; i++) array_h[i].y= *niftiImgValues++; for(int i=0; i<img->nx*img->ny*img->nz; i++) array2_h[i].y= *niftiImgValues++; } if(img->dim[5]>=3){ for(int i=0; i<img->nx*img->ny*img->nz; i++) array_h[i].z= *niftiImgValues++; for(int i=0; i<img->nx*img->ny*img->nz; i++) array2_h[i].z= *niftiImgValues++; } if(img->dim[5]==3){ for(int i=0; i<img->nx*img->ny*img->nz; i++) array_h[i].w= *niftiImgValues++; for(int i=0; i<img->nx*img->ny*img->nz; i++) array2_h[i].w= *niftiImgValues++; } hipMemcpy3DParms copyParams; memset(&copyParams, 0, sizeof(copyParams)); copyParams.extent = make_hipExtent(img->dim[1], img->dim[2], img->dim[3]); copyParams.kind = hipMemcpyHostToDevice; // First timepoint copyParams.srcPtr = make_hipPitchedPtr((void *) array_h, copyParams.extent.width*sizeof(DTYPE), copyParams.extent.width, copyParams.extent.height); copyParams.dstArray = *cuArray_d; NR_CUDA_SAFE_CALL(hipMemcpy3D(&copyParams)); free(array_h); // Second timepoint copyParams.srcPtr = make_hipPitchedPtr((void *) array2_h, copyParams.extent.width*sizeof(DTYPE), copyParams.extent.width, copyParams.extent.height); copyParams.dstArray = *cuArray2_d; NR_CUDA_SAFE_CALL(hipMemcpy3D(&copyParams)); free(array2_h); } else{ // All these else could be removed but the nvcc compiler would warn for unreachable statement switch(img->datatype){ case NIFTI_TYPE_FLOAT32: return cudaCommon_transferNiftiToArrayOnDevice1<DTYPE,float>(cuArray_d, cuArray2_d, img); default: reg_print_fct_error("cudaCommon_transferNiftiToArrayOnDevice1"); reg_print_msg_error("The image data type is not supported"); return EXIT_FAILURE; } } return EXIT_SUCCESS; } template int cudaCommon_transferNiftiToArrayOnDevice<float>(hipArray **, hipArray **, nifti_image *); template int cudaCommon_transferNiftiToArrayOnDevice<double>(hipArray **, hipArray **, nifti_image *); template int cudaCommon_transferNiftiToArrayOnDevice<float4>(hipArray **, hipArray **, nifti_image *); // for deformation field /* ******************************** */ /* ******************************** */ template <class DTYPE> int cudaCommon_allocateArrayToDevice(hipArray **cuArray_d, int *dim) { const hipExtent volumeSize = make_hipExtent(dim[1], dim[2], dim[3]); hipChannelFormatDesc texDesc = hipCreateChannelDesc<DTYPE>(); NR_CUDA_SAFE_CALL(hipMalloc3DArray(cuArray_d, &texDesc, volumeSize)); return EXIT_SUCCESS; }template int cudaCommon_allocateArrayToDevice<float>(hipArray **, int *); template int cudaCommon_allocateArrayToDevice<double>(hipArray **, int *); template int cudaCommon_allocateArrayToDevice<float4>(hipArray **, int *); // for deformation field /* ******************************** */ /* ******************************** */ template <class DTYPE> int cudaCommon_allocateArrayToDevice(hipArray **cuArray_d, hipArray **cuArray2_d, int *dim) { const hipExtent volumeSize = make_hipExtent(dim[1], dim[2], dim[3]); hipChannelFormatDesc texDesc = hipCreateChannelDesc<DTYPE>(); NR_CUDA_SAFE_CALL(hipMalloc3DArray(cuArray_d, &texDesc, volumeSize)); NR_CUDA_SAFE_CALL(hipMalloc3DArray(cuArray2_d, &texDesc, volumeSize)); return EXIT_SUCCESS; } template int cudaCommon_allocateArrayToDevice<float>(hipArray **,hipArray **, int *); template int cudaCommon_allocateArrayToDevice<double>(hipArray **,hipArray **, int *); template int cudaCommon_allocateArrayToDevice<float4>(hipArray **,hipArray **, int *); // for deformation field /* ******************************** */ /* ******************************** */ template <class DTYPE> int cudaCommon_allocateArrayToDevice(DTYPE **array_d, int *dim) { const unsigned int memSize = dim[1] * dim[2] * dim[3] * sizeof(DTYPE); NR_CUDA_SAFE_CALL(hipMalloc(array_d, memSize)); return EXIT_SUCCESS; } template int cudaCommon_allocateArrayToDevice<float>(float **, int *); template int cudaCommon_allocateArrayToDevice<double>(double **, int *); template int cudaCommon_allocateArrayToDevice<int>(int **, int *); template int cudaCommon_allocateArrayToDevice<float4>(float4 **, int *); // for deformation field /* ******************************** */ template <class DTYPE> int cudaCommon_allocateArrayToDevice(DTYPE **array_d, int vox) { const unsigned int memSize = vox * sizeof(DTYPE); NR_CUDA_SAFE_CALL(hipMalloc(array_d, memSize)); return EXIT_SUCCESS; } template int cudaCommon_allocateArrayToDevice<float>(float **, int); template int cudaCommon_allocateArrayToDevice<double>(double **, int); template int cudaCommon_allocateArrayToDevice<int>(int **, int); template int cudaCommon_allocateArrayToDevice<float4>(float4 **, int); // for deformation field /* ******************************** */ /* ******************************** */ template <class DTYPE> int cudaCommon_allocateArrayToDevice(DTYPE **array_d, DTYPE **array2_d, int *dim) { const unsigned int memSize = dim[1] * dim[2] * dim[3] * sizeof(DTYPE); NR_CUDA_SAFE_CALL(hipMalloc(array_d, memSize)); NR_CUDA_SAFE_CALL(hipMalloc(array2_d, memSize)); return EXIT_SUCCESS; } template int cudaCommon_allocateArrayToDevice<float>(float **, float **, int *); template int cudaCommon_allocateArrayToDevice<double>(double **, double **, int *); template int cudaCommon_allocateArrayToDevice<float4>(float4 **, float4 **, int *); // for deformation field /* ******************************** */ /* ******************************** */ template <class DTYPE> int cudaCommon_transferFromDeviceToCpu(DTYPE *cpuPtr, DTYPE **cuPtr, const unsigned int nElements) { NR_CUDA_SAFE_CALL(hipMemcpy((void *)cpuPtr, (void *)*cuPtr, nElements*sizeof(DTYPE), hipMemcpyDeviceToHost)); //NR_CUDA_SAFE_CALL(hipDeviceSynchronize()); return EXIT_SUCCESS; } template int cudaCommon_transferFromDeviceToCpu<float>(float *cpuPtr, float **cuPtr, const unsigned int nElements); template int cudaCommon_transferFromDeviceToCpu<double>(double *cpuPtr, double **cuPtr, const unsigned int nElements); /* ******************************** */ /* ******************************** */ /* ******************************** */ template <class DTYPE, class NIFTI_TYPE> int cudaCommon_transferFromDeviceToNifti1(nifti_image *img, DTYPE **array_d) { if(sizeof(DTYPE)!=sizeof(NIFTI_TYPE)){ reg_print_fct_error("cudaCommon_transferFromDeviceToNifti1"); reg_print_msg_error("The host and device arrays are of different types"); return EXIT_FAILURE; } else { NIFTI_TYPE *array_h=static_cast<NIFTI_TYPE *>(img->data); NR_CUDA_SAFE_CALL(hipMemcpy((void *)array_h, (void *)*array_d, img->nvox*sizeof(DTYPE), hipMemcpyDeviceToHost)); } return EXIT_SUCCESS; } template int cudaCommon_transferFromDeviceToNifti1<float, float>(nifti_image *img, float **array_d); template int cudaCommon_transferFromDeviceToNifti1<double, double>(nifti_image *img, double **array_d); /* ******************************** */ template <class DTYPE> int cudaCommon_transferFromDeviceToNifti(nifti_image *img, DTYPE **array_d) { if(sizeof(DTYPE)==sizeof(float4)){ // A nifti 5D volume is expected if(img->dim[0]<5 || img->dim[4]>1 || img->dim[5]<2 || img->datatype!=NIFTI_TYPE_FLOAT32){ reg_print_fct_error("cudaCommon_transferFromDeviceToNifti"); reg_print_msg_error("The nifti image is not a 5D volume"); return EXIT_FAILURE; } const int voxelNumber = img->nx*img->ny*img->nz; float4 *array_h; NR_CUDA_SAFE_CALL(hipHostMalloc(&array_h, voxelNumber*sizeof(float4))); NR_CUDA_SAFE_CALL(hipMemcpy((void *)array_h, (const void *)*array_d, voxelNumber*sizeof(float4), hipMemcpyDeviceToHost)); float *niftiImgValues = static_cast<float *>(img->data); for(int i=0; i<voxelNumber; i++) *niftiImgValues++ = array_h[i].x; if(img->dim[5]>=2){ for(int i=0; i<voxelNumber; i++) *niftiImgValues++ = array_h[i].y; } if(img->dim[5]>=3){ for(int i=0; i<voxelNumber; i++) *niftiImgValues++ = array_h[i].z; } if(img->dim[5]>=4){ for(int i=0; i<voxelNumber; i++) *niftiImgValues++ = array_h[i].w; } NR_CUDA_SAFE_CALL(hipHostFree(array_h)); return EXIT_SUCCESS; } else{ switch(img->datatype){ case NIFTI_TYPE_FLOAT32: return cudaCommon_transferFromDeviceToNifti1<DTYPE,float>(img, array_d); default: reg_print_fct_error("cudaCommon_transferFromDeviceToNifti"); reg_print_msg_error("The image data type is not supported"); return EXIT_FAILURE; } } } template int cudaCommon_transferFromDeviceToNifti<float>(nifti_image *, float **); template int cudaCommon_transferFromDeviceToNifti<double>(nifti_image *, double **); template int cudaCommon_transferFromDeviceToNifti<float4>(nifti_image *, float4 **); // for deformation field /* ******************************** */ /* ******************************** */ template <class DTYPE, class NIFTI_TYPE> int cudaCommon_transferFromDeviceToNifti1(nifti_image *img, DTYPE **array_d, DTYPE **array2_d) { if(sizeof(DTYPE)!=sizeof(NIFTI_TYPE)){ reg_print_fct_error("cudaCommon_transferFromDeviceToNifti1"); reg_print_msg_error("The host and device arrays are of different types"); return EXIT_FAILURE; } else{ unsigned int voxelNumber=img->nx*img->ny*img->nz; NIFTI_TYPE *array_h=static_cast<NIFTI_TYPE *>(img->data); NIFTI_TYPE *array2_h=&array_h[voxelNumber]; NR_CUDA_SAFE_CALL(hipMemcpy((void *)array_h, (void *)*array_d, voxelNumber*sizeof(DTYPE), hipMemcpyDeviceToHost)); NR_CUDA_SAFE_CALL(hipMemcpy((void *)array2_h, (void *)*array2_d, voxelNumber*sizeof(DTYPE), hipMemcpyDeviceToHost)); } return EXIT_SUCCESS; } /* ******************************** */ template <class DTYPE> int cudaCommon_transferFromDeviceToNifti(nifti_image *img, DTYPE **array_d, DTYPE **array2_d) { if(sizeof(DTYPE)==sizeof(float4)){ // A nifti 5D volume is expected if(img->dim[0]<5 || img->dim[4]>1 || img->dim[5]<2 || img->datatype!=NIFTI_TYPE_FLOAT32){ reg_print_fct_error("cudaCommon_transferFromDeviceToNifti"); reg_print_msg_error("The nifti image is not a 5D volume"); return EXIT_FAILURE; } const int voxelNumber = img->nx*img->ny*img->nz; float4 *array_h=NULL; float4 *array2_h=NULL; NR_CUDA_SAFE_CALL(hipHostMalloc(&array_h, voxelNumber*sizeof(float4))); NR_CUDA_SAFE_CALL(hipHostMalloc(&array2_h, voxelNumber*sizeof(float4))); NR_CUDA_SAFE_CALL(hipMemcpy((void *)array_h, (const void *)*array_d, voxelNumber*sizeof(float4), hipMemcpyDeviceToHost)); NR_CUDA_SAFE_CALL(hipMemcpy((void *)array2_h, (const void *)*array2_d, voxelNumber*sizeof(float4), hipMemcpyDeviceToHost)); float *niftiImgValues = static_cast<float *>(img->data); for(int i=0; i<voxelNumber; i++){ *niftiImgValues++ = array_h[i].x; } for(int i=0; i<voxelNumber; i++){ *niftiImgValues++ = array2_h[i].x; } if(img->dim[5]>=2){ for(int i=0; i<voxelNumber; i++){ *niftiImgValues++ = array_h[i].y; } for(int i=0; i<voxelNumber; i++){ *niftiImgValues++ = array2_h[i].y; } } if(img->dim[5]>=3){ for(int i=0; i<voxelNumber; i++){ *niftiImgValues++ = array_h[i].z; } for(int i=0; i<voxelNumber; i++){ *niftiImgValues++ = array2_h[i].z; } } if(img->dim[5]>=4){ for(int i=0; i<voxelNumber; i++){ *niftiImgValues++ = array_h[i].w; } for(int i=0; i<voxelNumber; i++){ *niftiImgValues++ = array2_h[i].w; } } NR_CUDA_SAFE_CALL(hipHostFree(array_h)); NR_CUDA_SAFE_CALL(hipHostFree(array2_h)); return EXIT_SUCCESS; } else{ switch(img->datatype){ case NIFTI_TYPE_FLOAT32: return cudaCommon_transferFromDeviceToNifti1<DTYPE,float>(img, array_d, array2_d); default: reg_print_fct_error("cudaCommon_transferFromDeviceToNifti"); reg_print_msg_error("The image data type is not supported"); return EXIT_FAILURE; } } } template int cudaCommon_transferFromDeviceToNifti<float>(nifti_image *, float **, float **); template int cudaCommon_transferFromDeviceToNifti<double>(nifti_image *, double **, double **); template int cudaCommon_transferFromDeviceToNifti<float4>(nifti_image *, float4 **, float4 **); // for deformation field /* ******************************** */ /* ******************************** */ void cudaCommon_free(hipArray **cuArray_d) { NR_CUDA_SAFE_CALL(hipFreeArray(*cuArray_d)); return; } /* ******************************** */ /* ******************************** */ template <class DTYPE> void cudaCommon_free(DTYPE **array_d) { NR_CUDA_SAFE_CALL(hipFree(*array_d)); return; } template void cudaCommon_free<int>(int **); template void cudaCommon_free<float>(float **); template void cudaCommon_free<double>(double **); template void cudaCommon_free<float4>(float4 **); /* ******************************** */ /* ******************************** */ template <class DTYPE> int cudaCommon_transferFromDeviceToNiftiSimple(DTYPE **array_d, nifti_image *img) { NR_CUDA_SAFE_CALL(hipMemcpy(*array_d, img->data, img->nvox * sizeof(DTYPE), hipMemcpyHostToDevice)); return EXIT_SUCCESS; } template int cudaCommon_transferFromDeviceToNiftiSimple<int>(int **array_d, nifti_image *img); template int cudaCommon_transferFromDeviceToNiftiSimple<float>(float **array_d, nifti_image *img); template int cudaCommon_transferFromDeviceToNiftiSimple<double>(double **array_d, nifti_image *img); /* ******************************** */ /* ******************************** */ template <class DTYPE> int cudaCommon_transferFromDeviceToNiftiSimple1(DTYPE **array_d, DTYPE *img, const unsigned int nvox) { NR_CUDA_SAFE_CALL(hipMemcpy(*array_d, img, nvox * sizeof(DTYPE), hipMemcpyHostToDevice)); return EXIT_SUCCESS; } template int cudaCommon_transferFromDeviceToNiftiSimple1<int>(int **array_d, int *img, const unsigned); template int cudaCommon_transferFromDeviceToNiftiSimple1<float>(float **array_d, float *img, const unsigned); template int cudaCommon_transferFromDeviceToNiftiSimple1<double>(double **array_d, double *img, const unsigned); /* ******************************** */ /* ******************************** */ /* ******************************** */ /* ******************************** */ template <class DTYPE> int cudaCommon_transferArrayFromCpuToDevice(DTYPE *array_d, const DTYPE *array_cpu, const unsigned int nElements) { const unsigned int memSize = nElements * sizeof(DTYPE); //copyData NR_CUDA_SAFE_CALL(hipMemcpy(array_d, array_cpu, memSize, hipMemcpyHostToDevice)); // return EXIT_SUCCESS; } template int cudaCommon_transferArrayFromCpuToDevice<int>(int *array_d, const int *array_cpu, const unsigned int nElements); template int cudaCommon_transferArrayFromCpuToDevice<float>(float *array_d, const float *array_cpu, const unsigned int nElements); template int cudaCommon_transferArrayFromCpuToDevice<double>(double *array_d, const double *array_cpu, const unsigned int nElements); /* ******************************** */ /* ******************************** */ /* ******************************** */ /* ******************************** */ template <class DTYPE> int cudaCommon_transferArrayFromDeviceToCpu(DTYPE *array_cpu, DTYPE *array_d, const unsigned int nElements) { const unsigned int memSize = nElements * sizeof(DTYPE); //copyData NR_CUDA_SAFE_CALL(hipMemcpy(array_cpu, array_d, memSize, hipMemcpyDeviceToHost)); // return EXIT_SUCCESS; } template int cudaCommon_transferArrayFromDeviceToCpu<int>(int *array_cpu, int *array_d, const unsigned int nElements); template int cudaCommon_transferArrayFromDeviceToCpu<float>(float *array_cpu, float *array_d, const unsigned int nElements); template int cudaCommon_transferArrayFromDeviceToCpu<double>(double *array_cpu, double *array_d, const unsigned int nElements); #endif /* ******************************** */ /* ******************************** */
b9d2d2ee439d2546477b2358ec49b0886f91ffc2.cu
/** * @file _reg_comon_gpu.cu * @author Marc Modat * @date 25/03/2009 * Copyright (c) 2009, University College London. All rights reserved. * Centre for Medical Image Computing (CMIC) * See the LICENSE.txt file in the nifty_reg root folder * */ #ifndef _REG_COMMON_GPU_CU #define _REG_COMMON_GPU_CU #include "_reg_common_cuda.h" #include "_reg_tools.h" /* ******************************** */ void cudaCommon_computeGridConfiguration(dim3 &r_blocks, dim3 &r_grid, const int targetVoxelNumber) { unsigned int maxThreads = 256; unsigned int maxBlocks = 65365; unsigned int blocks = (targetVoxelNumber % maxThreads) ? (targetVoxelNumber / maxThreads) + 1 : targetVoxelNumber / maxThreads; blocks = (std::min)(blocks, maxBlocks); r_grid = dim3(blocks, 1, 1); r_blocks = dim3(maxThreads, 1, 1); } /* ******************************** */ /* ******************************** */ template <class NIFTI_TYPE> int cudaCommon_transferNiftiToNiftiOnDevice1(nifti_image **image_d, nifti_image *img) { const unsigned int memSize = img->dim[1] * img->dim[2] * img->dim[3] * sizeof(NIFTI_TYPE); int *g_dim; float* g_pixdim; NIFTI_TYPE* g_data; NR_CUDA_SAFE_CALL(cudaMalloc((void**)&g_dim, 8 * sizeof(int))); NR_CUDA_SAFE_CALL(cudaMalloc((void**)&g_pixdim, 8 * sizeof(float))); NR_CUDA_SAFE_CALL(cudaMalloc((void**)&g_data, memSize)); NIFTI_TYPE *array_h = static_cast<NIFTI_TYPE *>( img->data ); NR_CUDA_SAFE_CALL(cudaMemcpy(( *image_d ), img, sizeof(nifti_image), cudaMemcpyHostToDevice)); NR_CUDA_SAFE_CALL(cudaMemcpy((*image_d)->data, array_h, memSize, cudaMemcpyHostToDevice)); NR_CUDA_SAFE_CALL(cudaMemcpy(( *image_d )->dim, img->dim, 8 * sizeof(int), cudaMemcpyHostToDevice)); NR_CUDA_SAFE_CALL(cudaMemcpy(( *image_d )->pixdim, img->pixdim, 8 * sizeof(float), cudaMemcpyHostToDevice)); return EXIT_SUCCESS; } template int cudaCommon_transferNiftiToNiftiOnDevice1<float>(nifti_image **image_d, nifti_image *img); template int cudaCommon_transferNiftiToNiftiOnDevice1<double>(nifti_image **image_d, nifti_image *img); /* ******************************** */ /* ******************************** */ template <class DTYPE, class NIFTI_TYPE> int cudaCommon_transferNiftiToArrayOnDevice1(DTYPE **array_d, const nifti_image *img) { if(sizeof(DTYPE)!=sizeof(NIFTI_TYPE)){ reg_print_fct_error("cudaCommon_transferNiftiToArrayOnDevice1"); reg_print_msg_error("The host and device arrays are of different types"); return EXIT_FAILURE; } else{ const unsigned int memSize = img->nvox*sizeof(DTYPE); const NIFTI_TYPE *array_h=static_cast<NIFTI_TYPE *>(img->data); NR_CUDA_SAFE_CALL(cudaMemcpy(*array_d, array_h, memSize, cudaMemcpyHostToDevice)); } return EXIT_SUCCESS; } /* ******************************** */ template <class DTYPE> int cudaCommon_transferNiftiToArrayOnDevice(DTYPE **array_d, const nifti_image *img) { if( sizeof(DTYPE)==sizeof(float4) ){ if( (img->datatype!=NIFTI_TYPE_FLOAT32) || (img->dim[5]<2) || (img->dim[4]>1)){ reg_print_fct_error("cudaCommon_transferNiftiToArrayOnDevice"); reg_print_msg_error("The specified image is not a single precision deformation field image"); return EXIT_FAILURE; } const float *niftiImgValues = static_cast<float *>(img->data); float4 *array_h=(float4 *)calloc(img->nx*img->ny*img->nz,sizeof(float4)); const int voxelNumber = img->nx*img->ny*img->nz; for(int i=0; i<voxelNumber; i++) array_h[i].x= *niftiImgValues++; if(img->dim[5]>=2){ for(int i=0; i<voxelNumber; i++) array_h[i].y= *niftiImgValues++; } if(img->dim[5]>=3){ for(int i=0; i<voxelNumber; i++) array_h[i].z= *niftiImgValues++; } if(img->dim[5]>=4){ for(int i=0; i<voxelNumber; i++) array_h[i].w= *niftiImgValues++; } NR_CUDA_SAFE_CALL(cudaMemcpy(*array_d, array_h, img->nx*img->ny*img->nz*sizeof(float4), cudaMemcpyHostToDevice)); free(array_h); } else{ // All these else could be removed but the nvcc compiler would warn for unreachable statement switch(img->datatype){ case NIFTI_TYPE_FLOAT32: return cudaCommon_transferNiftiToArrayOnDevice1<DTYPE,float>(array_d, img); default: reg_print_fct_error("cudaCommon_transferNiftiToArrayOnDevice"); reg_print_msg_error("The image data type is not supported"); return EXIT_FAILURE; } } return EXIT_SUCCESS; } template int cudaCommon_transferNiftiToArrayOnDevice<double>(double **, const nifti_image *); template int cudaCommon_transferNiftiToArrayOnDevice<float>(float **, const nifti_image *); template int cudaCommon_transferNiftiToArrayOnDevice<int>(int **, const nifti_image *); template int cudaCommon_transferNiftiToArrayOnDevice<float4>(float4 **, const nifti_image *); /* ******************************** */ template <class DTYPE, class NIFTI_TYPE> int cudaCommon_transferNiftiToArrayOnDevice1(DTYPE **array_d, DTYPE **array2_d, nifti_image *img) { if(sizeof(DTYPE)!=sizeof(NIFTI_TYPE)){ reg_print_fct_error("cudaCommon_transferNiftiToArrayOnDevice1"); reg_print_msg_error("The host and device arrays are of different types"); return EXIT_FAILURE; } else{ const unsigned int memSize = img->dim[1] * img->dim[2] * img->dim[3] * sizeof(DTYPE); NIFTI_TYPE *array_h=static_cast<NIFTI_TYPE *>(img->data); NIFTI_TYPE *array2_h=&array_h[img->dim[1] * img->dim[2] * img->dim[3]]; NR_CUDA_SAFE_CALL(cudaMemcpy(*array_d, array_h, memSize, cudaMemcpyHostToDevice)); NR_CUDA_SAFE_CALL(cudaMemcpy(*array2_d, array2_h, memSize, cudaMemcpyHostToDevice)); } return EXIT_SUCCESS; } /* ******************************** */ template <class DTYPE> int cudaCommon_transferNiftiToArrayOnDevice(DTYPE **array_d, DTYPE **array2_d, nifti_image *img) { if(sizeof(DTYPE)==sizeof(float4) ){ if( (img->datatype!=NIFTI_TYPE_FLOAT32) || (img->dim[5]<2) || (img->dim[4]>1)){ reg_print_fct_error("cudaCommon_transferNiftiToArrayOnDevice"); reg_print_msg_error("The specified image is not a single precision deformation field image"); return EXIT_FAILURE; } float *niftiImgValues = static_cast<float *>(img->data); float4 *array_h=(float4 *)calloc(img->nx*img->ny*img->nz,sizeof(float4)); float4 *array2_h=(float4 *)calloc(img->nx*img->ny*img->nz,sizeof(float4)); const int voxelNumber = img->nx*img->ny*img->nz; for(int i=0; i<voxelNumber; i++) array_h[i].x= *niftiImgValues++; for(int i=0; i<voxelNumber; i++) array2_h[i].x= *niftiImgValues++; if(img->dim[5]>=2){ for(int i=0; i<voxelNumber; i++) array_h[i].y= *niftiImgValues++; for(int i=0; i<voxelNumber; i++) array2_h[i].y= *niftiImgValues++; } if(img->dim[5]>=3){ for(int i=0; i<voxelNumber; i++) array_h[i].z= *niftiImgValues++; for(int i=0; i<voxelNumber; i++) array2_h[i].z= *niftiImgValues++; } if(img->dim[5]>=4){ for(int i=0; i<voxelNumber; i++) array_h[i].w= *niftiImgValues++; for(int i=0; i<voxelNumber; i++) array2_h[i].w= *niftiImgValues++; } NR_CUDA_SAFE_CALL(cudaMemcpy(*array_d, array_h, img->nx*img->ny*img->nz*sizeof(float4), cudaMemcpyHostToDevice)); NR_CUDA_SAFE_CALL(cudaMemcpy(*array2_d, array2_h, img->nx*img->ny*img->nz*sizeof(float4), cudaMemcpyHostToDevice)); free(array_h); free(array2_h); } else{ // All these else could be removed but the nvcc compiler would warn for unreachable statement switch(img->datatype){ case NIFTI_TYPE_FLOAT32: return cudaCommon_transferNiftiToArrayOnDevice1<DTYPE,float>(array_d, array2_d, img); default: reg_print_fct_error("cudaCommon_transferNiftiToArrayOnDevice"); reg_print_msg_error("The image data type is not supported"); return EXIT_FAILURE; } } return EXIT_SUCCESS; } template int cudaCommon_transferNiftiToArrayOnDevice<float>(float **,float **, nifti_image *); template int cudaCommon_transferNiftiToArrayOnDevice<double>(double **,double **, nifti_image *); template int cudaCommon_transferNiftiToArrayOnDevice<float4>(float4 **,float4 **, nifti_image *); // for deformation field /* ******************************** */ /* ******************************** */ template <class DTYPE, class NIFTI_TYPE> int cudaCommon_transferNiftiToArrayOnDevice1(cudaArray **cuArray_d, nifti_image *img) { if(sizeof(DTYPE)!=sizeof(NIFTI_TYPE)){ reg_print_fct_error("cudaCommon_transferNiftiToArrayOnDevice1"); reg_print_msg_error("The host and device arrays are of different types"); return EXIT_FAILURE; } else{ NIFTI_TYPE *array_h=static_cast<NIFTI_TYPE *>(img->data); cudaMemcpy3DParms copyParams; memset(&copyParams, 0, sizeof(copyParams)); copyParams.extent = make_cudaExtent(img->dim[1], img->dim[2], img->dim[3]); copyParams.srcPtr = make_cudaPitchedPtr((void *) array_h, copyParams.extent.width*sizeof(DTYPE), copyParams.extent.width, copyParams.extent.height); copyParams.dstArray = *cuArray_d; copyParams.kind = cudaMemcpyHostToDevice; NR_CUDA_SAFE_CALL(cudaMemcpy3D(&copyParams)); } return EXIT_SUCCESS; } /* ******************************** */ template <class DTYPE> int cudaCommon_transferNiftiToArrayOnDevice(cudaArray **cuArray_d, nifti_image *img) { if( sizeof(DTYPE)==sizeof(float4) ){ if( (img->datatype!=NIFTI_TYPE_FLOAT32) || (img->dim[5]<2) || (img->dim[4]>1) ){ reg_print_fct_error("cudaCommon_transferNiftiToArrayOnDevice"); reg_print_msg_error("The specified image is not a single precision deformation field image"); return EXIT_FAILURE; } float *niftiImgValues = static_cast<float *>(img->data); float4 *array_h=(float4 *)calloc(img->nx*img->ny*img->nz,sizeof(float4)); for(int i=0; i<img->nx*img->ny*img->nz; i++) array_h[i].x= *niftiImgValues++; if(img->dim[5]>=2) { for(int i=0; i<img->nx*img->ny*img->nz; i++) array_h[i].y= *niftiImgValues++; } if(img->dim[5]>=3) { for(int i=0; i<img->nx*img->ny*img->nz; i++) array_h[i].z= *niftiImgValues++; } if(img->dim[5]==3) { for(int i=0; i<img->nx*img->ny*img->nz; i++) array_h[i].w= *niftiImgValues++; } cudaMemcpy3DParms copyParams; memset(&copyParams, 0, sizeof(copyParams)); copyParams.extent = make_cudaExtent(img->dim[1], img->dim[2], img->dim[3]); copyParams.srcPtr = make_cudaPitchedPtr((void *) array_h, copyParams.extent.width*sizeof(DTYPE), copyParams.extent.width, copyParams.extent.height); copyParams.dstArray = *cuArray_d; copyParams.kind = cudaMemcpyHostToDevice; NR_CUDA_SAFE_CALL(cudaMemcpy3D(&copyParams)) free(array_h); } else{ // All these else could be removed but the nvcc compiler would warn for unreachable statement switch(img->datatype){ case NIFTI_TYPE_FLOAT32: return cudaCommon_transferNiftiToArrayOnDevice1<DTYPE,float>(cuArray_d, img); default: reg_print_fct_error("cudaCommon_transferNiftiToArrayOnDevice"); reg_print_msg_error("The image data type is not supported"); return EXIT_FAILURE; } } return EXIT_SUCCESS; } template int cudaCommon_transferNiftiToArrayOnDevice<float>(cudaArray **, nifti_image *); template int cudaCommon_transferNiftiToArrayOnDevice<double>(cudaArray **, nifti_image *); template int cudaCommon_transferNiftiToArrayOnDevice<int>(cudaArray **, nifti_image *); template int cudaCommon_transferNiftiToArrayOnDevice<float4>(cudaArray **, nifti_image *); // for deformation field /* ******************************** */ /* ******************************** */ template <class DTYPE, class NIFTI_TYPE> int cudaCommon_transferNiftiToArrayOnDevice1(cudaArray **cuArray_d, cudaArray **cuArray2_d, nifti_image *img) { if(sizeof(DTYPE)!=sizeof(NIFTI_TYPE)){ reg_print_fct_error("cudaCommon_transferNiftiToArrayOnDevice1"); reg_print_msg_error("The host and device arrays are of different types"); return EXIT_FAILURE; } else{ NIFTI_TYPE *array_h = static_cast<NIFTI_TYPE *>(img->data); NIFTI_TYPE *array2_h = &array_h[img->dim[1]*img->dim[2]*img->dim[3]]; cudaMemcpy3DParms copyParams; memset(&copyParams, 0, sizeof(copyParams)); copyParams.extent = make_cudaExtent(img->dim[1], img->dim[2], img->dim[3]); copyParams.kind = cudaMemcpyHostToDevice; // First timepoint copyParams.srcPtr = make_cudaPitchedPtr((void *) array_h, copyParams.extent.width*sizeof(DTYPE), copyParams.extent.width, copyParams.extent.height); copyParams.dstArray = *cuArray_d; NR_CUDA_SAFE_CALL(cudaMemcpy3D(&copyParams)); // Second timepoint copyParams.srcPtr = make_cudaPitchedPtr((void *) array2_h, copyParams.extent.width*sizeof(DTYPE), copyParams.extent.width, copyParams.extent.height); copyParams.dstArray = *cuArray2_d; NR_CUDA_SAFE_CALL(cudaMemcpy3D(&copyParams)); } return EXIT_SUCCESS; } /* ******************************** */ template <class DTYPE> int cudaCommon_transferNiftiToArrayOnDevice(cudaArray **cuArray_d, cudaArray **cuArray2_d, nifti_image *img) { if( sizeof(DTYPE)==sizeof(float4) ){ if( (img->datatype!=NIFTI_TYPE_FLOAT32) || (img->dim[5]<2) || (img->dim[4]>1) ) { reg_print_fct_error("cudaCommon_transferNiftiToArrayOnDevice1"); reg_print_msg_error("The specified image is not a single precision deformation field image"); return EXIT_FAILURE; } float *niftiImgValues = static_cast<float *>(img->data); float4 *array_h=(float4 *)calloc(img->nx*img->ny*img->nz,sizeof(float4)); float4 *array2_h=(float4 *)calloc(img->nx*img->ny*img->nz,sizeof(float4)); for(int i=0; i<img->nx*img->ny*img->nz; i++) array_h[i].x= *niftiImgValues++; for(int i=0; i<img->nx*img->ny*img->nz; i++) array2_h[i].x= *niftiImgValues++; if(img->dim[5]>=2){ for(int i=0; i<img->nx*img->ny*img->nz; i++) array_h[i].y= *niftiImgValues++; for(int i=0; i<img->nx*img->ny*img->nz; i++) array2_h[i].y= *niftiImgValues++; } if(img->dim[5]>=3){ for(int i=0; i<img->nx*img->ny*img->nz; i++) array_h[i].z= *niftiImgValues++; for(int i=0; i<img->nx*img->ny*img->nz; i++) array2_h[i].z= *niftiImgValues++; } if(img->dim[5]==3){ for(int i=0; i<img->nx*img->ny*img->nz; i++) array_h[i].w= *niftiImgValues++; for(int i=0; i<img->nx*img->ny*img->nz; i++) array2_h[i].w= *niftiImgValues++; } cudaMemcpy3DParms copyParams; memset(&copyParams, 0, sizeof(copyParams)); copyParams.extent = make_cudaExtent(img->dim[1], img->dim[2], img->dim[3]); copyParams.kind = cudaMemcpyHostToDevice; // First timepoint copyParams.srcPtr = make_cudaPitchedPtr((void *) array_h, copyParams.extent.width*sizeof(DTYPE), copyParams.extent.width, copyParams.extent.height); copyParams.dstArray = *cuArray_d; NR_CUDA_SAFE_CALL(cudaMemcpy3D(&copyParams)); free(array_h); // Second timepoint copyParams.srcPtr = make_cudaPitchedPtr((void *) array2_h, copyParams.extent.width*sizeof(DTYPE), copyParams.extent.width, copyParams.extent.height); copyParams.dstArray = *cuArray2_d; NR_CUDA_SAFE_CALL(cudaMemcpy3D(&copyParams)); free(array2_h); } else{ // All these else could be removed but the nvcc compiler would warn for unreachable statement switch(img->datatype){ case NIFTI_TYPE_FLOAT32: return cudaCommon_transferNiftiToArrayOnDevice1<DTYPE,float>(cuArray_d, cuArray2_d, img); default: reg_print_fct_error("cudaCommon_transferNiftiToArrayOnDevice1"); reg_print_msg_error("The image data type is not supported"); return EXIT_FAILURE; } } return EXIT_SUCCESS; } template int cudaCommon_transferNiftiToArrayOnDevice<float>(cudaArray **, cudaArray **, nifti_image *); template int cudaCommon_transferNiftiToArrayOnDevice<double>(cudaArray **, cudaArray **, nifti_image *); template int cudaCommon_transferNiftiToArrayOnDevice<float4>(cudaArray **, cudaArray **, nifti_image *); // for deformation field /* ******************************** */ /* ******************************** */ template <class DTYPE> int cudaCommon_allocateArrayToDevice(cudaArray **cuArray_d, int *dim) { const cudaExtent volumeSize = make_cudaExtent(dim[1], dim[2], dim[3]); cudaChannelFormatDesc texDesc = cudaCreateChannelDesc<DTYPE>(); NR_CUDA_SAFE_CALL(cudaMalloc3DArray(cuArray_d, &texDesc, volumeSize)); return EXIT_SUCCESS; }template int cudaCommon_allocateArrayToDevice<float>(cudaArray **, int *); template int cudaCommon_allocateArrayToDevice<double>(cudaArray **, int *); template int cudaCommon_allocateArrayToDevice<float4>(cudaArray **, int *); // for deformation field /* ******************************** */ /* ******************************** */ template <class DTYPE> int cudaCommon_allocateArrayToDevice(cudaArray **cuArray_d, cudaArray **cuArray2_d, int *dim) { const cudaExtent volumeSize = make_cudaExtent(dim[1], dim[2], dim[3]); cudaChannelFormatDesc texDesc = cudaCreateChannelDesc<DTYPE>(); NR_CUDA_SAFE_CALL(cudaMalloc3DArray(cuArray_d, &texDesc, volumeSize)); NR_CUDA_SAFE_CALL(cudaMalloc3DArray(cuArray2_d, &texDesc, volumeSize)); return EXIT_SUCCESS; } template int cudaCommon_allocateArrayToDevice<float>(cudaArray **,cudaArray **, int *); template int cudaCommon_allocateArrayToDevice<double>(cudaArray **,cudaArray **, int *); template int cudaCommon_allocateArrayToDevice<float4>(cudaArray **,cudaArray **, int *); // for deformation field /* ******************************** */ /* ******************************** */ template <class DTYPE> int cudaCommon_allocateArrayToDevice(DTYPE **array_d, int *dim) { const unsigned int memSize = dim[1] * dim[2] * dim[3] * sizeof(DTYPE); NR_CUDA_SAFE_CALL(cudaMalloc(array_d, memSize)); return EXIT_SUCCESS; } template int cudaCommon_allocateArrayToDevice<float>(float **, int *); template int cudaCommon_allocateArrayToDevice<double>(double **, int *); template int cudaCommon_allocateArrayToDevice<int>(int **, int *); template int cudaCommon_allocateArrayToDevice<float4>(float4 **, int *); // for deformation field /* ******************************** */ template <class DTYPE> int cudaCommon_allocateArrayToDevice(DTYPE **array_d, int vox) { const unsigned int memSize = vox * sizeof(DTYPE); NR_CUDA_SAFE_CALL(cudaMalloc(array_d, memSize)); return EXIT_SUCCESS; } template int cudaCommon_allocateArrayToDevice<float>(float **, int); template int cudaCommon_allocateArrayToDevice<double>(double **, int); template int cudaCommon_allocateArrayToDevice<int>(int **, int); template int cudaCommon_allocateArrayToDevice<float4>(float4 **, int); // for deformation field /* ******************************** */ /* ******************************** */ template <class DTYPE> int cudaCommon_allocateArrayToDevice(DTYPE **array_d, DTYPE **array2_d, int *dim) { const unsigned int memSize = dim[1] * dim[2] * dim[3] * sizeof(DTYPE); NR_CUDA_SAFE_CALL(cudaMalloc(array_d, memSize)); NR_CUDA_SAFE_CALL(cudaMalloc(array2_d, memSize)); return EXIT_SUCCESS; } template int cudaCommon_allocateArrayToDevice<float>(float **, float **, int *); template int cudaCommon_allocateArrayToDevice<double>(double **, double **, int *); template int cudaCommon_allocateArrayToDevice<float4>(float4 **, float4 **, int *); // for deformation field /* ******************************** */ /* ******************************** */ template <class DTYPE> int cudaCommon_transferFromDeviceToCpu(DTYPE *cpuPtr, DTYPE **cuPtr, const unsigned int nElements) { NR_CUDA_SAFE_CALL(cudaMemcpy((void *)cpuPtr, (void *)*cuPtr, nElements*sizeof(DTYPE), cudaMemcpyDeviceToHost)); //NR_CUDA_SAFE_CALL(cudaThreadSynchronize()); return EXIT_SUCCESS; } template int cudaCommon_transferFromDeviceToCpu<float>(float *cpuPtr, float **cuPtr, const unsigned int nElements); template int cudaCommon_transferFromDeviceToCpu<double>(double *cpuPtr, double **cuPtr, const unsigned int nElements); /* ******************************** */ /* ******************************** */ /* ******************************** */ template <class DTYPE, class NIFTI_TYPE> int cudaCommon_transferFromDeviceToNifti1(nifti_image *img, DTYPE **array_d) { if(sizeof(DTYPE)!=sizeof(NIFTI_TYPE)){ reg_print_fct_error("cudaCommon_transferFromDeviceToNifti1"); reg_print_msg_error("The host and device arrays are of different types"); return EXIT_FAILURE; } else { NIFTI_TYPE *array_h=static_cast<NIFTI_TYPE *>(img->data); NR_CUDA_SAFE_CALL(cudaMemcpy((void *)array_h, (void *)*array_d, img->nvox*sizeof(DTYPE), cudaMemcpyDeviceToHost)); } return EXIT_SUCCESS; } template int cudaCommon_transferFromDeviceToNifti1<float, float>(nifti_image *img, float **array_d); template int cudaCommon_transferFromDeviceToNifti1<double, double>(nifti_image *img, double **array_d); /* ******************************** */ template <class DTYPE> int cudaCommon_transferFromDeviceToNifti(nifti_image *img, DTYPE **array_d) { if(sizeof(DTYPE)==sizeof(float4)){ // A nifti 5D volume is expected if(img->dim[0]<5 || img->dim[4]>1 || img->dim[5]<2 || img->datatype!=NIFTI_TYPE_FLOAT32){ reg_print_fct_error("cudaCommon_transferFromDeviceToNifti"); reg_print_msg_error("The nifti image is not a 5D volume"); return EXIT_FAILURE; } const int voxelNumber = img->nx*img->ny*img->nz; float4 *array_h; NR_CUDA_SAFE_CALL(cudaMallocHost(&array_h, voxelNumber*sizeof(float4))); NR_CUDA_SAFE_CALL(cudaMemcpy((void *)array_h, (const void *)*array_d, voxelNumber*sizeof(float4), cudaMemcpyDeviceToHost)); float *niftiImgValues = static_cast<float *>(img->data); for(int i=0; i<voxelNumber; i++) *niftiImgValues++ = array_h[i].x; if(img->dim[5]>=2){ for(int i=0; i<voxelNumber; i++) *niftiImgValues++ = array_h[i].y; } if(img->dim[5]>=3){ for(int i=0; i<voxelNumber; i++) *niftiImgValues++ = array_h[i].z; } if(img->dim[5]>=4){ for(int i=0; i<voxelNumber; i++) *niftiImgValues++ = array_h[i].w; } NR_CUDA_SAFE_CALL(cudaFreeHost(array_h)); return EXIT_SUCCESS; } else{ switch(img->datatype){ case NIFTI_TYPE_FLOAT32: return cudaCommon_transferFromDeviceToNifti1<DTYPE,float>(img, array_d); default: reg_print_fct_error("cudaCommon_transferFromDeviceToNifti"); reg_print_msg_error("The image data type is not supported"); return EXIT_FAILURE; } } } template int cudaCommon_transferFromDeviceToNifti<float>(nifti_image *, float **); template int cudaCommon_transferFromDeviceToNifti<double>(nifti_image *, double **); template int cudaCommon_transferFromDeviceToNifti<float4>(nifti_image *, float4 **); // for deformation field /* ******************************** */ /* ******************************** */ template <class DTYPE, class NIFTI_TYPE> int cudaCommon_transferFromDeviceToNifti1(nifti_image *img, DTYPE **array_d, DTYPE **array2_d) { if(sizeof(DTYPE)!=sizeof(NIFTI_TYPE)){ reg_print_fct_error("cudaCommon_transferFromDeviceToNifti1"); reg_print_msg_error("The host and device arrays are of different types"); return EXIT_FAILURE; } else{ unsigned int voxelNumber=img->nx*img->ny*img->nz; NIFTI_TYPE *array_h=static_cast<NIFTI_TYPE *>(img->data); NIFTI_TYPE *array2_h=&array_h[voxelNumber]; NR_CUDA_SAFE_CALL(cudaMemcpy((void *)array_h, (void *)*array_d, voxelNumber*sizeof(DTYPE), cudaMemcpyDeviceToHost)); NR_CUDA_SAFE_CALL(cudaMemcpy((void *)array2_h, (void *)*array2_d, voxelNumber*sizeof(DTYPE), cudaMemcpyDeviceToHost)); } return EXIT_SUCCESS; } /* ******************************** */ template <class DTYPE> int cudaCommon_transferFromDeviceToNifti(nifti_image *img, DTYPE **array_d, DTYPE **array2_d) { if(sizeof(DTYPE)==sizeof(float4)){ // A nifti 5D volume is expected if(img->dim[0]<5 || img->dim[4]>1 || img->dim[5]<2 || img->datatype!=NIFTI_TYPE_FLOAT32){ reg_print_fct_error("cudaCommon_transferFromDeviceToNifti"); reg_print_msg_error("The nifti image is not a 5D volume"); return EXIT_FAILURE; } const int voxelNumber = img->nx*img->ny*img->nz; float4 *array_h=NULL; float4 *array2_h=NULL; NR_CUDA_SAFE_CALL(cudaMallocHost(&array_h, voxelNumber*sizeof(float4))); NR_CUDA_SAFE_CALL(cudaMallocHost(&array2_h, voxelNumber*sizeof(float4))); NR_CUDA_SAFE_CALL(cudaMemcpy((void *)array_h, (const void *)*array_d, voxelNumber*sizeof(float4), cudaMemcpyDeviceToHost)); NR_CUDA_SAFE_CALL(cudaMemcpy((void *)array2_h, (const void *)*array2_d, voxelNumber*sizeof(float4), cudaMemcpyDeviceToHost)); float *niftiImgValues = static_cast<float *>(img->data); for(int i=0; i<voxelNumber; i++){ *niftiImgValues++ = array_h[i].x; } for(int i=0; i<voxelNumber; i++){ *niftiImgValues++ = array2_h[i].x; } if(img->dim[5]>=2){ for(int i=0; i<voxelNumber; i++){ *niftiImgValues++ = array_h[i].y; } for(int i=0; i<voxelNumber; i++){ *niftiImgValues++ = array2_h[i].y; } } if(img->dim[5]>=3){ for(int i=0; i<voxelNumber; i++){ *niftiImgValues++ = array_h[i].z; } for(int i=0; i<voxelNumber; i++){ *niftiImgValues++ = array2_h[i].z; } } if(img->dim[5]>=4){ for(int i=0; i<voxelNumber; i++){ *niftiImgValues++ = array_h[i].w; } for(int i=0; i<voxelNumber; i++){ *niftiImgValues++ = array2_h[i].w; } } NR_CUDA_SAFE_CALL(cudaFreeHost(array_h)); NR_CUDA_SAFE_CALL(cudaFreeHost(array2_h)); return EXIT_SUCCESS; } else{ switch(img->datatype){ case NIFTI_TYPE_FLOAT32: return cudaCommon_transferFromDeviceToNifti1<DTYPE,float>(img, array_d, array2_d); default: reg_print_fct_error("cudaCommon_transferFromDeviceToNifti"); reg_print_msg_error("The image data type is not supported"); return EXIT_FAILURE; } } } template int cudaCommon_transferFromDeviceToNifti<float>(nifti_image *, float **, float **); template int cudaCommon_transferFromDeviceToNifti<double>(nifti_image *, double **, double **); template int cudaCommon_transferFromDeviceToNifti<float4>(nifti_image *, float4 **, float4 **); // for deformation field /* ******************************** */ /* ******************************** */ void cudaCommon_free(cudaArray **cuArray_d) { NR_CUDA_SAFE_CALL(cudaFreeArray(*cuArray_d)); return; } /* ******************************** */ /* ******************************** */ template <class DTYPE> void cudaCommon_free(DTYPE **array_d) { NR_CUDA_SAFE_CALL(cudaFree(*array_d)); return; } template void cudaCommon_free<int>(int **); template void cudaCommon_free<float>(float **); template void cudaCommon_free<double>(double **); template void cudaCommon_free<float4>(float4 **); /* ******************************** */ /* ******************************** */ template <class DTYPE> int cudaCommon_transferFromDeviceToNiftiSimple(DTYPE **array_d, nifti_image *img) { NR_CUDA_SAFE_CALL(cudaMemcpy(*array_d, img->data, img->nvox * sizeof(DTYPE), cudaMemcpyHostToDevice)); return EXIT_SUCCESS; } template int cudaCommon_transferFromDeviceToNiftiSimple<int>(int **array_d, nifti_image *img); template int cudaCommon_transferFromDeviceToNiftiSimple<float>(float **array_d, nifti_image *img); template int cudaCommon_transferFromDeviceToNiftiSimple<double>(double **array_d, nifti_image *img); /* ******************************** */ /* ******************************** */ template <class DTYPE> int cudaCommon_transferFromDeviceToNiftiSimple1(DTYPE **array_d, DTYPE *img, const unsigned int nvox) { NR_CUDA_SAFE_CALL(cudaMemcpy(*array_d, img, nvox * sizeof(DTYPE), cudaMemcpyHostToDevice)); return EXIT_SUCCESS; } template int cudaCommon_transferFromDeviceToNiftiSimple1<int>(int **array_d, int *img, const unsigned); template int cudaCommon_transferFromDeviceToNiftiSimple1<float>(float **array_d, float *img, const unsigned); template int cudaCommon_transferFromDeviceToNiftiSimple1<double>(double **array_d, double *img, const unsigned); /* ******************************** */ /* ******************************** */ /* ******************************** */ /* ******************************** */ template <class DTYPE> int cudaCommon_transferArrayFromCpuToDevice(DTYPE *array_d, const DTYPE *array_cpu, const unsigned int nElements) { const unsigned int memSize = nElements * sizeof(DTYPE); //copyData NR_CUDA_SAFE_CALL(cudaMemcpy(array_d, array_cpu, memSize, cudaMemcpyHostToDevice)); // return EXIT_SUCCESS; } template int cudaCommon_transferArrayFromCpuToDevice<int>(int *array_d, const int *array_cpu, const unsigned int nElements); template int cudaCommon_transferArrayFromCpuToDevice<float>(float *array_d, const float *array_cpu, const unsigned int nElements); template int cudaCommon_transferArrayFromCpuToDevice<double>(double *array_d, const double *array_cpu, const unsigned int nElements); /* ******************************** */ /* ******************************** */ /* ******************************** */ /* ******************************** */ template <class DTYPE> int cudaCommon_transferArrayFromDeviceToCpu(DTYPE *array_cpu, DTYPE *array_d, const unsigned int nElements) { const unsigned int memSize = nElements * sizeof(DTYPE); //copyData NR_CUDA_SAFE_CALL(cudaMemcpy(array_cpu, array_d, memSize, cudaMemcpyDeviceToHost)); // return EXIT_SUCCESS; } template int cudaCommon_transferArrayFromDeviceToCpu<int>(int *array_cpu, int *array_d, const unsigned int nElements); template int cudaCommon_transferArrayFromDeviceToCpu<float>(float *array_cpu, float *array_d, const unsigned int nElements); template int cudaCommon_transferArrayFromDeviceToCpu<double>(double *array_cpu, double *array_d, const unsigned int nElements); #endif /* ******************************** */ /* ******************************** */
bdd03dbb1a1d517b8378f853ea9ea8a18f8d7684.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*******************************<GINKGO LICENSE>****************************** Copyright (c) 2017-2021, the Ginkgo authors All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************<GINKGO LICENSE>*******************************/ #include "core/factorization/par_ilu_kernels.hpp" #include <ginkgo/core/matrix/coo.hpp> #include "cuda/base/math.hpp" #include "cuda/base/types.hpp" #include "cuda/components/thread_ids.cuh" namespace gko { namespace kernels { namespace cuda { /** * @brief The parallel ilu factorization namespace. * * @ingroup factor */ namespace par_ilu_factorization { constexpr int default_block_size{512}; #include "common/cuda_hip/factorization/par_ilu_kernels.hpp.inc" template <typename ValueType, typename IndexType> void compute_l_u_factors(std::shared_ptr<const CudaExecutor> exec, size_type iterations, const matrix::Coo<ValueType, IndexType>* system_matrix, matrix::Csr<ValueType, IndexType>* l_factor, matrix::Csr<ValueType, IndexType>* u_factor) { iterations = (iterations == 0) ? 10 : iterations; const auto num_elements = system_matrix->get_num_stored_elements(); const dim3 block_size{default_block_size, 1, 1}; const dim3 grid_dim{ static_cast<uint32>( ceildiv(num_elements, static_cast<size_type>(block_size.x))), 1, 1}; for (size_type i = 0; i < iterations; ++i) { hipLaunchKernelGGL(( kernel::compute_l_u_factors), dim3(grid_dim), dim3(block_size), 0, 0, num_elements, system_matrix->get_const_row_idxs(), system_matrix->get_const_col_idxs(), as_cuda_type(system_matrix->get_const_values()), l_factor->get_const_row_ptrs(), l_factor->get_const_col_idxs(), as_cuda_type(l_factor->get_values()), u_factor->get_const_row_ptrs(), u_factor->get_const_col_idxs(), as_cuda_type(u_factor->get_values())); } } GKO_INSTANTIATE_FOR_EACH_VALUE_AND_INDEX_TYPE( GKO_DECLARE_PAR_ILU_COMPUTE_L_U_FACTORS_KERNEL); } // namespace par_ilu_factorization } // namespace cuda } // namespace kernels } // namespace gko
bdd03dbb1a1d517b8378f853ea9ea8a18f8d7684.cu
/*******************************<GINKGO LICENSE>****************************** Copyright (c) 2017-2021, the Ginkgo authors All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************<GINKGO LICENSE>*******************************/ #include "core/factorization/par_ilu_kernels.hpp" #include <ginkgo/core/matrix/coo.hpp> #include "cuda/base/math.hpp" #include "cuda/base/types.hpp" #include "cuda/components/thread_ids.cuh" namespace gko { namespace kernels { namespace cuda { /** * @brief The parallel ilu factorization namespace. * * @ingroup factor */ namespace par_ilu_factorization { constexpr int default_block_size{512}; #include "common/cuda_hip/factorization/par_ilu_kernels.hpp.inc" template <typename ValueType, typename IndexType> void compute_l_u_factors(std::shared_ptr<const CudaExecutor> exec, size_type iterations, const matrix::Coo<ValueType, IndexType>* system_matrix, matrix::Csr<ValueType, IndexType>* l_factor, matrix::Csr<ValueType, IndexType>* u_factor) { iterations = (iterations == 0) ? 10 : iterations; const auto num_elements = system_matrix->get_num_stored_elements(); const dim3 block_size{default_block_size, 1, 1}; const dim3 grid_dim{ static_cast<uint32>( ceildiv(num_elements, static_cast<size_type>(block_size.x))), 1, 1}; for (size_type i = 0; i < iterations; ++i) { kernel::compute_l_u_factors<<<grid_dim, block_size, 0, 0>>>( num_elements, system_matrix->get_const_row_idxs(), system_matrix->get_const_col_idxs(), as_cuda_type(system_matrix->get_const_values()), l_factor->get_const_row_ptrs(), l_factor->get_const_col_idxs(), as_cuda_type(l_factor->get_values()), u_factor->get_const_row_ptrs(), u_factor->get_const_col_idxs(), as_cuda_type(u_factor->get_values())); } } GKO_INSTANTIATE_FOR_EACH_VALUE_AND_INDEX_TYPE( GKO_DECLARE_PAR_ILU_COMPUTE_L_U_FACTORS_KERNEL); } // namespace par_ilu_factorization } // namespace cuda } // namespace kernels } // namespace gko
0c235976e5b9a60ec25c75e2b0c9583fbe63ce16.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #include <time.h> #include <float.h> #include <hiprand/hiprand_kernel.h> #include "vec3.h" #include "ray.h" #include "sphere.h" #include "hitable_list.h" #include "camera.h" #include "material.h" // limited version of checkCudaErrors from helper_cuda.h in CUDA examples #define checkCudaErrors(val) check_cuda( (val), #val, __FILE__, __LINE__ ) void check_cuda(hipError_t result, char const *const func, const char *const file, int const line) { if (result) { std::cerr << "CUDA error = " << static_cast<unsigned int>(result) << " at " << file << ":" << line << " '" << func << "' \n"; // Make sure we call CUDA Device Reset before exiting hipDeviceReset(); exit(99); } } // Matching the C++ code would recurse enough into color() calls that // it was blowing up the stack, so we have to turn this into a // limited-depth loop instead. Later code in the book limits to a max // depth of 50, so we adapt this a few chapters early on the GPU. __device__ vec3 color(const ray& r, hitable **world, hiprandState_t *local_rand_state) { ray cur_ray = r; vec3 cur_attenuation = vec3(1.0, 1.0, 1.0); for (int i = 0; i < 50; i++) { hit_record rec; if ((*world)->hit(cur_ray, 0.001f, FLT_MAX, rec)) { ray scattered; vec3 attenuation; if (rec.mat_ptr->scatter(cur_ray, rec, attenuation, scattered, local_rand_state)) { cur_attenuation *= attenuation; cur_ray = scattered; } else { return vec3(0.0, 0.0, 0.0); } } else { vec3 unit_direction = unit_vector(cur_ray.direction()); float t = 0.5f*(unit_direction.y() + 1.0f); vec3 c = (1.0f - t)*vec3(1.0, 1.0, 1.0) + t*vec3(0.5, 0.7, 1.0); return cur_attenuation * c; } } return vec3(0.0, 0.0, 0.0); // exceeded recursion } __global__ void rand_init(hiprandState_t *rand_state) { if (threadIdx.x == 0 && blockIdx.x == 0) { hiprand_init(1984, 0, 0, rand_state); } } __global__ void render_init(int max_x, int max_y, hiprandState_t *rand_state) { int i = threadIdx.x + blockIdx.x * blockDim.x; int j = threadIdx.y + blockIdx.y * blockDim.y; if ((i >= max_x) || (j >= max_y)) return; int pixel_index = j*max_x + i; //Each thread gets same seed, a different sequence number, no offset hiprand_init(1984, pixel_index, 0, &rand_state[pixel_index]); } __global__ void render(vec3 *fb, int max_x, int max_y, int ns, camera **cam, hitable **world, hiprandState_t *rand_state) { int i = threadIdx.x + blockIdx.x * blockDim.x; int j = threadIdx.y + blockIdx.y * blockDim.y; if ((i >= max_x) || (j >= max_y)) return; int pixel_index = j*max_x + i; hiprandState_t local_rand_state = rand_state[pixel_index]; vec3 col(0, 0, 0); for (int s = 0; s < ns; s++) { float u = float(i + hiprand_uniform(&local_rand_state)) / float(max_x); float v = float(j + hiprand_uniform(&local_rand_state)) / float(max_y); ray r = (*cam)->get_ray(u, v, &local_rand_state); col += color(r, world, &local_rand_state); } rand_state[pixel_index] = local_rand_state; col /= float(ns); col[0] = sqrt(col[0]); col[1] = sqrt(col[1]); col[2] = sqrt(col[2]); fb[pixel_index] = col; } #define RND (hiprand_uniform(&local_rand_state)) __global__ void create_world(hitable **d_list, hitable **d_world, camera **d_camera, int nx, int ny, hiprandState_t *rand_state) { if (threadIdx.x == 0 && blockIdx.x == 0) { hiprandState_t local_rand_state = *rand_state; d_list[0] = new sphere(vec3(0, -1000.0, -1), 1000, new lambertian(vec3(0.5, 0.5, 0.5))); int i = 1; for (int a = -11; a < 11; a++) { for (int b = -11; b < 11; b++) { float choose_mat = RND; vec3 center(a + RND, 0.2, b + RND); if (choose_mat < 0.8f) { d_list[i++] = new sphere(center, 0.2, new lambertian(vec3(RND*RND, RND*RND, RND*RND))); } else if (choose_mat < 0.95f) { d_list[i++] = new sphere(center, 0.2, new metal(vec3(0.5f*(1.0f + RND), 0.5f*(1.0f + RND), 0.5f*(1.0f + RND)), 0.5f*RND)); } else { d_list[i++] = new sphere(center, 0.2, new dielectric(1.5)); } } } d_list[i++] = new sphere(vec3(0, 1, 0), 1.0, new dielectric(1.5)); d_list[i++] = new sphere(vec3(-4, 1, 0), 1.0, new lambertian(vec3(0.4, 0.2, 0.1))); d_list[i++] = new sphere(vec3(4, 1, 0), 1.0, new metal(vec3(0.7, 0.6, 0.5), 0.0)); *rand_state = local_rand_state; *d_world = new hitable_list(d_list, 22 * 22 + 1 + 3); vec3 lookfrom(13, 2, 3); vec3 lookat(0, 0, 0); float dist_to_focus = 10.0; (lookfrom - lookat).length(); float aperture = 0.1; *d_camera = new camera(lookfrom, lookat, vec3(0, 1, 0), 30.0, float(nx) / float(ny), aperture, dist_to_focus); } } __global__ void free_world(hitable **d_list, hitable **d_world, camera **d_camera) { for (int i = 0; i < 22 * 22 + 1 + 3; i++) { delete ((sphere *)d_list[i])->mat_ptr; delete d_list[i]; } delete *d_world; delete *d_camera; } int main() { int nx = 512; int ny = 512; int ns = 10; int tx = 16; int ty = 16; std::cerr << "Rendering a " << nx << "x" << ny << " image with " << ns << " samples per pixel "; std::cerr << "in " << tx << "x" << ty << " blocks.\n"; int num_pixels = nx*ny; size_t fb_size = num_pixels * sizeof(vec3); // allocate FB vec3 *fb; checkCudaErrors(hipMallocManaged((void **)&fb, fb_size)); // allocate random state hiprandState_t *d_rand_state; checkCudaErrors(hipMalloc((void **)&d_rand_state, num_pixels * sizeof(hiprandState_t))); hiprandState_t *d_rand_state2; checkCudaErrors(hipMalloc((void **)&d_rand_state2, 1 * sizeof(hiprandState_t))); // we need that 2nd random state to be initialized for the world creation rand_init << <1, 1 >> >(d_rand_state2); checkCudaErrors(hipGetLastError()); checkCudaErrors(hipDeviceSynchronize()); // make our world of hitables & the camera hitable **d_list; int num_hitables = 22 * 22 + 1 + 3; checkCudaErrors(hipMalloc((void **)&d_list, num_hitables * sizeof(hitable *))); hitable **d_world; checkCudaErrors(hipMalloc((void **)&d_world, sizeof(hitable *))); camera **d_camera; checkCudaErrors(hipMalloc((void **)&d_camera, sizeof(camera *))); create_world << <1, 1 >> >(d_list, d_world, d_camera, nx, ny, d_rand_state2); checkCudaErrors(hipGetLastError()); checkCudaErrors(hipDeviceSynchronize()); clock_t start, stop; start = clock(); // Render our buffer dim3 blocks(nx / tx + 1, ny / ty + 1); dim3 threads(tx, ty); //render_init << <blocks, threads >> >(nx, ny, d_rand_state); checkCudaErrors(hipGetLastError()); checkCudaErrors(hipDeviceSynchronize()); render << <blocks, threads >> >(fb, nx, ny, ns, d_camera, d_world, d_rand_state); checkCudaErrors(hipGetLastError()); checkCudaErrors(hipDeviceSynchronize()); stop = clock(); double timer_seconds = ((double)(stop - start)) / CLOCKS_PER_SEC; std::cerr << "took " << timer_seconds << " seconds.\n"; // Output FB as Image std::cout << "P3\n" << nx << " " << ny << "\n255\n"; for (int j = ny - 1; j >= 0; j--) { for (int i = 0; i < nx; i++) { size_t pixel_index = j*nx + i; int ir = int(255.99*fb[pixel_index].r()); int ig = int(255.99*fb[pixel_index].g()); int ib = int(255.99*fb[pixel_index].b()); std::cout << ir << " " << ig << " " << ib << "\n"; } } // clean up checkCudaErrors(hipDeviceSynchronize()); free_world << <1, 1 >> >(d_list, d_world, d_camera); checkCudaErrors(hipGetLastError()); checkCudaErrors(hipFree(d_camera)); checkCudaErrors(hipFree(d_world)); checkCudaErrors(hipFree(d_list)); checkCudaErrors(hipFree(d_rand_state)); checkCudaErrors(hipFree(fb)); hipDeviceReset(); }
0c235976e5b9a60ec25c75e2b0c9583fbe63ce16.cu
#include <iostream> #include <time.h> #include <float.h> #include <curand_kernel.h> #include "vec3.h" #include "ray.h" #include "sphere.h" #include "hitable_list.h" #include "camera.h" #include "material.h" // limited version of checkCudaErrors from helper_cuda.h in CUDA examples #define checkCudaErrors(val) check_cuda( (val), #val, __FILE__, __LINE__ ) void check_cuda(cudaError_t result, char const *const func, const char *const file, int const line) { if (result) { std::cerr << "CUDA error = " << static_cast<unsigned int>(result) << " at " << file << ":" << line << " '" << func << "' \n"; // Make sure we call CUDA Device Reset before exiting cudaDeviceReset(); exit(99); } } // Matching the C++ code would recurse enough into color() calls that // it was blowing up the stack, so we have to turn this into a // limited-depth loop instead. Later code in the book limits to a max // depth of 50, so we adapt this a few chapters early on the GPU. __device__ vec3 color(const ray& r, hitable **world, curandState *local_rand_state) { ray cur_ray = r; vec3 cur_attenuation = vec3(1.0, 1.0, 1.0); for (int i = 0; i < 50; i++) { hit_record rec; if ((*world)->hit(cur_ray, 0.001f, FLT_MAX, rec)) { ray scattered; vec3 attenuation; if (rec.mat_ptr->scatter(cur_ray, rec, attenuation, scattered, local_rand_state)) { cur_attenuation *= attenuation; cur_ray = scattered; } else { return vec3(0.0, 0.0, 0.0); } } else { vec3 unit_direction = unit_vector(cur_ray.direction()); float t = 0.5f*(unit_direction.y() + 1.0f); vec3 c = (1.0f - t)*vec3(1.0, 1.0, 1.0) + t*vec3(0.5, 0.7, 1.0); return cur_attenuation * c; } } return vec3(0.0, 0.0, 0.0); // exceeded recursion } __global__ void rand_init(curandState *rand_state) { if (threadIdx.x == 0 && blockIdx.x == 0) { curand_init(1984, 0, 0, rand_state); } } __global__ void render_init(int max_x, int max_y, curandState *rand_state) { int i = threadIdx.x + blockIdx.x * blockDim.x; int j = threadIdx.y + blockIdx.y * blockDim.y; if ((i >= max_x) || (j >= max_y)) return; int pixel_index = j*max_x + i; //Each thread gets same seed, a different sequence number, no offset curand_init(1984, pixel_index, 0, &rand_state[pixel_index]); } __global__ void render(vec3 *fb, int max_x, int max_y, int ns, camera **cam, hitable **world, curandState *rand_state) { int i = threadIdx.x + blockIdx.x * blockDim.x; int j = threadIdx.y + blockIdx.y * blockDim.y; if ((i >= max_x) || (j >= max_y)) return; int pixel_index = j*max_x + i; curandState local_rand_state = rand_state[pixel_index]; vec3 col(0, 0, 0); for (int s = 0; s < ns; s++) { float u = float(i + curand_uniform(&local_rand_state)) / float(max_x); float v = float(j + curand_uniform(&local_rand_state)) / float(max_y); ray r = (*cam)->get_ray(u, v, &local_rand_state); col += color(r, world, &local_rand_state); } rand_state[pixel_index] = local_rand_state; col /= float(ns); col[0] = sqrt(col[0]); col[1] = sqrt(col[1]); col[2] = sqrt(col[2]); fb[pixel_index] = col; } #define RND (curand_uniform(&local_rand_state)) __global__ void create_world(hitable **d_list, hitable **d_world, camera **d_camera, int nx, int ny, curandState *rand_state) { if (threadIdx.x == 0 && blockIdx.x == 0) { curandState local_rand_state = *rand_state; d_list[0] = new sphere(vec3(0, -1000.0, -1), 1000, new lambertian(vec3(0.5, 0.5, 0.5))); int i = 1; for (int a = -11; a < 11; a++) { for (int b = -11; b < 11; b++) { float choose_mat = RND; vec3 center(a + RND, 0.2, b + RND); if (choose_mat < 0.8f) { d_list[i++] = new sphere(center, 0.2, new lambertian(vec3(RND*RND, RND*RND, RND*RND))); } else if (choose_mat < 0.95f) { d_list[i++] = new sphere(center, 0.2, new metal(vec3(0.5f*(1.0f + RND), 0.5f*(1.0f + RND), 0.5f*(1.0f + RND)), 0.5f*RND)); } else { d_list[i++] = new sphere(center, 0.2, new dielectric(1.5)); } } } d_list[i++] = new sphere(vec3(0, 1, 0), 1.0, new dielectric(1.5)); d_list[i++] = new sphere(vec3(-4, 1, 0), 1.0, new lambertian(vec3(0.4, 0.2, 0.1))); d_list[i++] = new sphere(vec3(4, 1, 0), 1.0, new metal(vec3(0.7, 0.6, 0.5), 0.0)); *rand_state = local_rand_state; *d_world = new hitable_list(d_list, 22 * 22 + 1 + 3); vec3 lookfrom(13, 2, 3); vec3 lookat(0, 0, 0); float dist_to_focus = 10.0; (lookfrom - lookat).length(); float aperture = 0.1; *d_camera = new camera(lookfrom, lookat, vec3(0, 1, 0), 30.0, float(nx) / float(ny), aperture, dist_to_focus); } } __global__ void free_world(hitable **d_list, hitable **d_world, camera **d_camera) { for (int i = 0; i < 22 * 22 + 1 + 3; i++) { delete ((sphere *)d_list[i])->mat_ptr; delete d_list[i]; } delete *d_world; delete *d_camera; } int main() { int nx = 512; int ny = 512; int ns = 10; int tx = 16; int ty = 16; std::cerr << "Rendering a " << nx << "x" << ny << " image with " << ns << " samples per pixel "; std::cerr << "in " << tx << "x" << ty << " blocks.\n"; int num_pixels = nx*ny; size_t fb_size = num_pixels * sizeof(vec3); // allocate FB vec3 *fb; checkCudaErrors(cudaMallocManaged((void **)&fb, fb_size)); // allocate random state curandState *d_rand_state; checkCudaErrors(cudaMalloc((void **)&d_rand_state, num_pixels * sizeof(curandState))); curandState *d_rand_state2; checkCudaErrors(cudaMalloc((void **)&d_rand_state2, 1 * sizeof(curandState))); // we need that 2nd random state to be initialized for the world creation rand_init << <1, 1 >> >(d_rand_state2); checkCudaErrors(cudaGetLastError()); checkCudaErrors(cudaDeviceSynchronize()); // make our world of hitables & the camera hitable **d_list; int num_hitables = 22 * 22 + 1 + 3; checkCudaErrors(cudaMalloc((void **)&d_list, num_hitables * sizeof(hitable *))); hitable **d_world; checkCudaErrors(cudaMalloc((void **)&d_world, sizeof(hitable *))); camera **d_camera; checkCudaErrors(cudaMalloc((void **)&d_camera, sizeof(camera *))); create_world << <1, 1 >> >(d_list, d_world, d_camera, nx, ny, d_rand_state2); checkCudaErrors(cudaGetLastError()); checkCudaErrors(cudaDeviceSynchronize()); clock_t start, stop; start = clock(); // Render our buffer dim3 blocks(nx / tx + 1, ny / ty + 1); dim3 threads(tx, ty); //render_init << <blocks, threads >> >(nx, ny, d_rand_state); checkCudaErrors(cudaGetLastError()); checkCudaErrors(cudaDeviceSynchronize()); render << <blocks, threads >> >(fb, nx, ny, ns, d_camera, d_world, d_rand_state); checkCudaErrors(cudaGetLastError()); checkCudaErrors(cudaDeviceSynchronize()); stop = clock(); double timer_seconds = ((double)(stop - start)) / CLOCKS_PER_SEC; std::cerr << "took " << timer_seconds << " seconds.\n"; // Output FB as Image std::cout << "P3\n" << nx << " " << ny << "\n255\n"; for (int j = ny - 1; j >= 0; j--) { for (int i = 0; i < nx; i++) { size_t pixel_index = j*nx + i; int ir = int(255.99*fb[pixel_index].r()); int ig = int(255.99*fb[pixel_index].g()); int ib = int(255.99*fb[pixel_index].b()); std::cout << ir << " " << ig << " " << ib << "\n"; } } // clean up checkCudaErrors(cudaDeviceSynchronize()); free_world << <1, 1 >> >(d_list, d_world, d_camera); checkCudaErrors(cudaGetLastError()); checkCudaErrors(cudaFree(d_camera)); checkCudaErrors(cudaFree(d_world)); checkCudaErrors(cudaFree(d_list)); checkCudaErrors(cudaFree(d_rand_state)); checkCudaErrors(cudaFree(fb)); cudaDeviceReset(); }
3eb0da79d066db22b3f806c6bae3613e2855b9c2.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include </home/nvidia/users/mini/CudaFunc.h> #include <hip/hip_runtime.h> #include <iostream> #include <chrono> int root=3 ; int framefactor=2 ; int square=9 ; int height=4; int width=2; __global__ void multi_add(char* d_i,char* d_o,int width,int height,int numpartitions){ int with = width ; int hith = height ; int local = 0 ; for(int i=0;i<2*with;i++){ local=0 ; for(int k=0;k<numpartitions;k++){ local+=(int)d_i[blockIdx.x*hith*with*numpartitions+k*hith*with+2*with*threadIdx.x+i] ; //printf("%d ",(int)d_i[blockIdx.x*hith*with*numpartitions+k*hith*with+2*with*threadIdx.x+i]) ; } local/=numpartitions ; d_o[(blockIdx.x*hith*with)+2*with*threadIdx.x+i]=(char)local ; } __syncthreads(); } void AvgCalGpu(int aheight,int awidth,int aroot,int aframefactor, char *converted_arr[],char sum_arr[]){ root = aroot ; square= root*root ; framefactor = aframefactor ; height = aheight ; width = awidth ; char temp_converted_arr[framefactor*root*height*width] ; char *d_i; char *d_o; char *d_s; hipMalloc((void **)&d_i, height*width*sizeof(char)*square); hipMalloc((void **)&d_o, height*width*sizeof(char)*root); for(int r=0;r<framefactor;r++){ char* lp = converted_arr[r] ; hipMemcpy(d_i,lp,height*width*sizeof(char)*square,hipMemcpyHostToDevice); hipLaunchKernelGGL(( multi_add), dim3(root),dim3(height/2), 0, 0, d_i,d_o,width,height,root); hipMemcpy(temp_converted_arr+root*height*width*r,d_o,height*width*sizeof(char)*root,hipMemcpyDeviceToHost); } hipFree(d_i) ; hipFree(d_o) ; hipMalloc((void **)&d_i, height*width*sizeof(char)*root*framefactor); hipMalloc((void **)&d_o, height*width*sizeof(char)*1*framefactor); hipMemcpy(d_i,temp_converted_arr,height*width*sizeof(char)*root*framefactor,hipMemcpyHostToDevice); hipLaunchKernelGGL(( multi_add), dim3(framefactor),dim3(height/2), 0, 0, d_i,d_o,width,height,root); hipFree(d_i) ; hipMalloc((void **)&d_s, height*width*sizeof(char)*1); hipLaunchKernelGGL(( multi_add), dim3(1),dim3(height/2), 0, 0, d_o,d_s,width,height,framefactor); hipMemcpy(sum_arr,d_s,height*width*sizeof(char)*1,hipMemcpyDeviceToHost); hipFree(d_s) ; hipFree(d_o) ; }
3eb0da79d066db22b3f806c6bae3613e2855b9c2.cu
#include <stdio.h> #include </home/nvidia/users/mini/CudaFunc.h> #include <cuda_runtime.h> #include <iostream> #include <chrono> int root=3 ; int framefactor=2 ; int square=9 ; int height=4; int width=2; __global__ void multi_add(char* d_i,char* d_o,int width,int height,int numpartitions){ int with = width ; int hith = height ; int local = 0 ; for(int i=0;i<2*with;i++){ local=0 ; for(int k=0;k<numpartitions;k++){ local+=(int)d_i[blockIdx.x*hith*with*numpartitions+k*hith*with+2*with*threadIdx.x+i] ; //printf("%d ",(int)d_i[blockIdx.x*hith*with*numpartitions+k*hith*with+2*with*threadIdx.x+i]) ; } local/=numpartitions ; d_o[(blockIdx.x*hith*with)+2*with*threadIdx.x+i]=(char)local ; } __syncthreads(); } void AvgCalGpu(int aheight,int awidth,int aroot,int aframefactor, char *converted_arr[],char sum_arr[]){ root = aroot ; square= root*root ; framefactor = aframefactor ; height = aheight ; width = awidth ; char temp_converted_arr[framefactor*root*height*width] ; char *d_i; char *d_o; char *d_s; cudaMalloc((void **)&d_i, height*width*sizeof(char)*square); cudaMalloc((void **)&d_o, height*width*sizeof(char)*root); for(int r=0;r<framefactor;r++){ char* lp = converted_arr[r] ; cudaMemcpy(d_i,lp,height*width*sizeof(char)*square,cudaMemcpyHostToDevice); multi_add<<<root,height/2>>>(d_i,d_o,width,height,root); cudaMemcpy(temp_converted_arr+root*height*width*r,d_o,height*width*sizeof(char)*root,cudaMemcpyDeviceToHost); } cudaFree(d_i) ; cudaFree(d_o) ; cudaMalloc((void **)&d_i, height*width*sizeof(char)*root*framefactor); cudaMalloc((void **)&d_o, height*width*sizeof(char)*1*framefactor); cudaMemcpy(d_i,temp_converted_arr,height*width*sizeof(char)*root*framefactor,cudaMemcpyHostToDevice); multi_add<<<framefactor,height/2>>>(d_i,d_o,width,height,root); cudaFree(d_i) ; cudaMalloc((void **)&d_s, height*width*sizeof(char)*1); multi_add<<<1,height/2>>>(d_o,d_s,width,height,framefactor); cudaMemcpy(sum_arr,d_s,height*width*sizeof(char)*1,cudaMemcpyDeviceToHost); cudaFree(d_s) ; cudaFree(d_o) ; }
5198e64a80c04acd5d9e8c7173315d9d9ccf0aa8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> __global__ void mykernel(void) {} int main(void) { hipLaunchKernelGGL(( mykernel), dim3(1), dim3(1), 0, 0, ); printf("Hello World\n"); return 0; }
5198e64a80c04acd5d9e8c7173315d9d9ccf0aa8.cu
#include <stdio.h> __global__ void mykernel(void) {} int main(void) { mykernel<<<1, 1>>>(); printf("Hello World\n"); return 0; }
a7e900af8614085896b025ebdb4eaee22a03dbc0.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void ComputeEuklidianDistancesKernel( float *inputImg, int imgWidth, int imgHeight, float *centroidCoordinates, float *distanceMatrix, int centroids, int inputSize ) { int threadId = blockDim.x*blockIdx.y*gridDim.x //rows preceeding current row in grid + blockDim.x*blockIdx.x //blocks preceeding current block + threadIdx.x; if(threadId < inputSize) { int pointX = threadId % imgWidth; int pointY = threadId / imgWidth; float X = (float)pointX; float Y = (float)pointY; float dist; float centroidX; float centroidY; for(int c = 0; c < centroids; c++) { centroidX = centroidCoordinates[c * 2]; centroidY = centroidCoordinates[c * 2 + 1]; dist = sqrtf( (centroidX - X) * (centroidX - X) + (centroidY - Y) * (centroidY - Y) ); distanceMatrix[c * inputSize + threadId] = dist; } } }
a7e900af8614085896b025ebdb4eaee22a03dbc0.cu
#include "includes.h" __global__ void ComputeEuklidianDistancesKernel( float *inputImg, int imgWidth, int imgHeight, float *centroidCoordinates, float *distanceMatrix, int centroids, int inputSize ) { int threadId = blockDim.x*blockIdx.y*gridDim.x //rows preceeding current row in grid + blockDim.x*blockIdx.x //blocks preceeding current block + threadIdx.x; if(threadId < inputSize) { int pointX = threadId % imgWidth; int pointY = threadId / imgWidth; float X = (float)pointX; float Y = (float)pointY; float dist; float centroidX; float centroidY; for(int c = 0; c < centroids; c++) { centroidX = centroidCoordinates[c * 2]; centroidY = centroidCoordinates[c * 2 + 1]; dist = sqrtf( (centroidX - X) * (centroidX - X) + (centroidY - Y) * (centroidY - Y) ); distanceMatrix[c * inputSize + threadId] = dist; } } }
23d3a312f9d7e643872777317e75c96eb6f37a0a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // SPM Max Pooling CUDA // Author: Vic Chan // Date: 2018/5/21 //#ifdef __cplusplus //extern "C" { //#endif #include <stdio.h> #include "spmmax_pooling_kernel.h" #define CUDA_1D_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \ i += blockDim.x * gridDim.x) __global__ void spmmax_pooling_forward(const int num_threads, const int batch_size, const int num_grids, const int feature_size, const int num_rois, const float* x_data, const float* shapes_data, const float* rois_data, float* output_data, int* max_ids_data){ float spm[32] = {0, 1, 0, 1, 0, 0.5, 0, 0.5, 0, 0.5, 0.5, 1, 0.5, 1, 0, 0.5, 0.5, 1, 0.5, 1, 0, 1, 0, 0.33, 0, 1, 0.33, 0.67, 0, 1, 0.67, 1}; CUDA_1D_KERNEL_LOOP(thread_idx, num_threads) { // int thread_idx = threadIdx.x + blockIdx.x*blockDim.x; int roi_id = thread_idx/(num_grids * feature_size); int grid_id = (thread_idx - roi_id * num_grids * feature_size)/feature_size; int feature_id = thread_idx - roi_id * num_grids * feature_size - grid_id * feature_size; int batch_id = (int)rois_data[roi_id*5]; float center_x = (rois_data[roi_id*5+1] + rois_data[roi_id*5+3])/(2*shapes_data[batch_id*2+0]); float center_y = (rois_data[roi_id*5+2] + rois_data[roi_id*5+4])/(2*shapes_data[batch_id*2+1]); if (center_x >= spm[grid_id*4+0] && center_x < spm[grid_id*4+1] && center_y >= spm[grid_id*4+2] && center_y < spm[grid_id*4+3]) { int idx = batch_id*num_grids*feature_size + grid_id * feature_size + feature_id; if (max_ids_data[idx] == -1 || x_data[roi_id*feature_size + feature_id] > output_data[idx]) { atomicExch(max_ids_data+idx, roi_id); atomicExch(output_data+idx, x_data[roi_id*feature_size + feature_id]); } if (max_ids_data[idx] == -1 || x_data[roi_id*feature_size + feature_id] > output_data[idx]) { atomicExch(max_ids_data+idx, roi_id); atomicExch(output_data+idx, x_data[roi_id*feature_size + feature_id]); } } } } __global__ void spmmax_pooling_backward(const int num_threads, const int batch_size, const int num_grids, const int feature_size, const int num_rois, const float* grad_input_data, float* grad_output_data, const int* max_ids_data) { CUDA_1D_KERNEL_LOOP(thread_idx, num_threads) { // int thread_idx = threadIdx.x + blockIdx.x * blockDim.x; int batch_id = thread_idx / (num_grids * feature_size); int grid_id = (thread_idx - (num_grids * feature_size * batch_id)) / feature_size; int feature_id = thread_idx - num_grids * feature_size * batch_id - feature_size * grid_id; int idx = batch_id * num_grids * feature_size + grid_id * feature_size + feature_id; if (max_ids_data[idx] != -1) { atomicAdd(grad_output_data + max_ids_data[idx] * feature_size + feature_id, grad_input_data[idx]); } } } int spmmax_pooling_forward_kernel(const int batch_size, const int num_grids, const int feature_size, const int num_rois, const float* x_data,const float* shapes_data, const float* rois_data, float* output_data, int* max_ids_data, hipStream_t stream) { int output_size = num_rois * num_grids * feature_size; hipError_t err; const int kThreadsPerBlock = 1024; dim3 threads(kThreadsPerBlock); int block = (output_size + kThreadsPerBlock - 1)/kThreadsPerBlock; dim3 blocks(block); hipLaunchKernelGGL(( spmmax_pooling_forward), dim3(blocks), dim3(threads), 0, stream, output_size, batch_size, num_grids, feature_size, num_rois, x_data, shapes_data,rois_data, output_data, max_ids_data); err = hipGetLastError(); if(hipSuccess != err) { fprintf( stderr, "cudaCheckError() failed : %s\n", hipGetErrorString( err ) ); exit( -1 ); } return 1; } int spmmax_pooling_backward_kernel(const int batch_size, const int num_grids, const int feature_size, const int num_rois, const float* grad_input_data, float* grad_output_data, int* max_ids_data, hipStream_t stream) { const int kThreadsPerBlock = 1024; int output_size = batch_size * num_grids * feature_size; hipError_t err; dim3 threads(kThreadsPerBlock); int block = (output_size + kThreadsPerBlock - 1)/kThreadsPerBlock; dim3 blocks(block); hipLaunchKernelGGL(( spmmax_pooling_backward), dim3(blocks), dim3(threads), 0, stream, output_size, batch_size, num_grids, feature_size, num_rois, grad_input_data, grad_output_data, max_ids_data); err = hipGetLastError(); if(hipSuccess != err) { fprintf( stderr, "cudaCheckError() failed : %s\n", hipGetErrorString( err ) ); exit(-1); } return 1; } //#ifdef __cplusplus //} //#endif
23d3a312f9d7e643872777317e75c96eb6f37a0a.cu
// SPM Max Pooling CUDA // Author: Vic Chan // Date: 2018/5/21 //#ifdef __cplusplus //extern "C" { //#endif #include <stdio.h> #include "spmmax_pooling_kernel.h" #define CUDA_1D_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \ i += blockDim.x * gridDim.x) __global__ void spmmax_pooling_forward(const int num_threads, const int batch_size, const int num_grids, const int feature_size, const int num_rois, const float* x_data, const float* shapes_data, const float* rois_data, float* output_data, int* max_ids_data){ float spm[32] = {0, 1, 0, 1, 0, 0.5, 0, 0.5, 0, 0.5, 0.5, 1, 0.5, 1, 0, 0.5, 0.5, 1, 0.5, 1, 0, 1, 0, 0.33, 0, 1, 0.33, 0.67, 0, 1, 0.67, 1}; CUDA_1D_KERNEL_LOOP(thread_idx, num_threads) { // int thread_idx = threadIdx.x + blockIdx.x*blockDim.x; int roi_id = thread_idx/(num_grids * feature_size); int grid_id = (thread_idx - roi_id * num_grids * feature_size)/feature_size; int feature_id = thread_idx - roi_id * num_grids * feature_size - grid_id * feature_size; int batch_id = (int)rois_data[roi_id*5]; float center_x = (rois_data[roi_id*5+1] + rois_data[roi_id*5+3])/(2*shapes_data[batch_id*2+0]); float center_y = (rois_data[roi_id*5+2] + rois_data[roi_id*5+4])/(2*shapes_data[batch_id*2+1]); if (center_x >= spm[grid_id*4+0] && center_x < spm[grid_id*4+1] && center_y >= spm[grid_id*4+2] && center_y < spm[grid_id*4+3]) { int idx = batch_id*num_grids*feature_size + grid_id * feature_size + feature_id; if (max_ids_data[idx] == -1 || x_data[roi_id*feature_size + feature_id] > output_data[idx]) { atomicExch(max_ids_data+idx, roi_id); atomicExch(output_data+idx, x_data[roi_id*feature_size + feature_id]); } if (max_ids_data[idx] == -1 || x_data[roi_id*feature_size + feature_id] > output_data[idx]) { atomicExch(max_ids_data+idx, roi_id); atomicExch(output_data+idx, x_data[roi_id*feature_size + feature_id]); } } } } __global__ void spmmax_pooling_backward(const int num_threads, const int batch_size, const int num_grids, const int feature_size, const int num_rois, const float* grad_input_data, float* grad_output_data, const int* max_ids_data) { CUDA_1D_KERNEL_LOOP(thread_idx, num_threads) { // int thread_idx = threadIdx.x + blockIdx.x * blockDim.x; int batch_id = thread_idx / (num_grids * feature_size); int grid_id = (thread_idx - (num_grids * feature_size * batch_id)) / feature_size; int feature_id = thread_idx - num_grids * feature_size * batch_id - feature_size * grid_id; int idx = batch_id * num_grids * feature_size + grid_id * feature_size + feature_id; if (max_ids_data[idx] != -1) { atomicAdd(grad_output_data + max_ids_data[idx] * feature_size + feature_id, grad_input_data[idx]); } } } int spmmax_pooling_forward_kernel(const int batch_size, const int num_grids, const int feature_size, const int num_rois, const float* x_data,const float* shapes_data, const float* rois_data, float* output_data, int* max_ids_data, cudaStream_t stream) { int output_size = num_rois * num_grids * feature_size; cudaError_t err; const int kThreadsPerBlock = 1024; dim3 threads(kThreadsPerBlock); int block = (output_size + kThreadsPerBlock - 1)/kThreadsPerBlock; dim3 blocks(block); spmmax_pooling_forward<<<blocks, threads, 0, stream>>>(output_size, batch_size, num_grids, feature_size, num_rois, x_data, shapes_data,rois_data, output_data, max_ids_data); err = cudaGetLastError(); if(cudaSuccess != err) { fprintf( stderr, "cudaCheckError() failed : %s\n", cudaGetErrorString( err ) ); exit( -1 ); } return 1; } int spmmax_pooling_backward_kernel(const int batch_size, const int num_grids, const int feature_size, const int num_rois, const float* grad_input_data, float* grad_output_data, int* max_ids_data, cudaStream_t stream) { const int kThreadsPerBlock = 1024; int output_size = batch_size * num_grids * feature_size; cudaError_t err; dim3 threads(kThreadsPerBlock); int block = (output_size + kThreadsPerBlock - 1)/kThreadsPerBlock; dim3 blocks(block); spmmax_pooling_backward<<<blocks, threads, 0, stream>>>(output_size, batch_size, num_grids, feature_size, num_rois, grad_input_data, grad_output_data, max_ids_data); err = cudaGetLastError(); if(cudaSuccess != err) { fprintf( stderr, "cudaCheckError() failed : %s\n", cudaGetErrorString( err ) ); exit(-1); } return 1; } //#ifdef __cplusplus //} //#endif
c2a19cffb5caf819e07780e3a89bca1469aa6a52.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 1.4.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver August 2013 @precisions normal z -> s d c @author Mark Gates */ #include "common_magma.h" #include <assert.h> #define NB 64 /* ===================================================================== Matrix is m x n, and is divided into block rows, each NB x n. Each CUDA block has NB threads to handle one block row. Each thread adds one row, iterating across all columns. The bottom block of rows may be partially outside the matrix; if so, rows outside the matrix (i >= m) are disabled. TODO. Block in both directions, for large matrices. E.g., each block does 64x64 tile, instead of 64xN tile. */ __global__ void zgeadd_kernel( int m, int n, magmaDoubleComplex alpha, const magmaDoubleComplex *dA, int ldda, magmaDoubleComplex *dB, int lddb ) { // dA and dB iterate across row i int i = blockIdx.x*blockDim.x + threadIdx.x; if ( i < m ) { dA += i; dB += i; const magmaDoubleComplex *dAend = dA + n*ldda; while( dA < dAend ) { *dB = alpha*(*dA) + (*dB); dA += ldda; dB += lddb; } } } /* ===================================================================== */ extern "C" void magmablas_zgeadd( magma_int_t m, magma_int_t n, magmaDoubleComplex alpha, const magmaDoubleComplex *dA, magma_int_t ldda, magmaDoubleComplex *dB, magma_int_t lddb ) { /* Purpose ======= ZGEADD adds two matrices, dB = alpha*dA + dB. Arguments ========= M (input) INTEGER The number of rows of the matrix dA. M >= 0. N (input) INTEGER The number of columns of the matrix dA. N >= 0. ALPHA (input) COMPLEX DOUBLE PRECISION The scalar alpha. dA (input) COMPLEX DOUBLE PRECISION array, dimension (LDDA,N) The m by n matrix dA. LDDA (input) INTEGER The leading dimension of the array dA. LDDA >= max(1,M). dB (input/output) COMPLEX DOUBLE PRECISION array, dimension (LDDB,N) The m by n matrix dB. LDDB (input) INTEGER The leading dimension of the array dB. LDDB >= max(1,M). ===================================================================== */ magma_int_t info = 0; if ( m < 0 ) info = -1; else if ( n < 0 ) info = -2; else if ( ldda < max(1,m)) info = -5; else if ( lddb < max(1,m)) info = -7; if ( info != 0 ) { magma_xerbla( __func__, -(info) ); return; } if ( m == 0 || n == 0 ) return; dim3 threads( NB ); dim3 grid( (m + NB - 1)/NB ); hipLaunchKernelGGL(( zgeadd_kernel), dim3(grid), dim3(threads), 0, magma_stream , m, n, alpha, dA, ldda, dB, lddb ); }
c2a19cffb5caf819e07780e3a89bca1469aa6a52.cu
/* -- MAGMA (version 1.4.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver August 2013 @precisions normal z -> s d c @author Mark Gates */ #include "common_magma.h" #include <assert.h> #define NB 64 /* ===================================================================== Matrix is m x n, and is divided into block rows, each NB x n. Each CUDA block has NB threads to handle one block row. Each thread adds one row, iterating across all columns. The bottom block of rows may be partially outside the matrix; if so, rows outside the matrix (i >= m) are disabled. TODO. Block in both directions, for large matrices. E.g., each block does 64x64 tile, instead of 64xN tile. */ __global__ void zgeadd_kernel( int m, int n, magmaDoubleComplex alpha, const magmaDoubleComplex *dA, int ldda, magmaDoubleComplex *dB, int lddb ) { // dA and dB iterate across row i int i = blockIdx.x*blockDim.x + threadIdx.x; if ( i < m ) { dA += i; dB += i; const magmaDoubleComplex *dAend = dA + n*ldda; while( dA < dAend ) { *dB = alpha*(*dA) + (*dB); dA += ldda; dB += lddb; } } } /* ===================================================================== */ extern "C" void magmablas_zgeadd( magma_int_t m, magma_int_t n, magmaDoubleComplex alpha, const magmaDoubleComplex *dA, magma_int_t ldda, magmaDoubleComplex *dB, magma_int_t lddb ) { /* Purpose ======= ZGEADD adds two matrices, dB = alpha*dA + dB. Arguments ========= M (input) INTEGER The number of rows of the matrix dA. M >= 0. N (input) INTEGER The number of columns of the matrix dA. N >= 0. ALPHA (input) COMPLEX DOUBLE PRECISION The scalar alpha. dA (input) COMPLEX DOUBLE PRECISION array, dimension (LDDA,N) The m by n matrix dA. LDDA (input) INTEGER The leading dimension of the array dA. LDDA >= max(1,M). dB (input/output) COMPLEX DOUBLE PRECISION array, dimension (LDDB,N) The m by n matrix dB. LDDB (input) INTEGER The leading dimension of the array dB. LDDB >= max(1,M). ===================================================================== */ magma_int_t info = 0; if ( m < 0 ) info = -1; else if ( n < 0 ) info = -2; else if ( ldda < max(1,m)) info = -5; else if ( lddb < max(1,m)) info = -7; if ( info != 0 ) { magma_xerbla( __func__, -(info) ); return; } if ( m == 0 || n == 0 ) return; dim3 threads( NB ); dim3 grid( (m + NB - 1)/NB ); zgeadd_kernel<<< grid, threads, 0, magma_stream >>>( m, n, alpha, dA, ldda, dB, lddb ); }
9934773e959bed3df21790ee384aeebf72bf2e52.hip
// !!! This is a file automatically generated by hipify!!! #include <ATen/native/BinaryOps.h> #include <limits> #include <ATen/AccumulateType.h> #include <ATen/Dispatch.h> #include <ATen/native/DispatchStub.h> #include <ATen/native/TensorIterator.h> #include <ATen/native/hip/Loops.cuh> // NOTE: CUDA on Windows requires that the enclosing function // of a __device__ lambda not have internal linkage. namespace at { namespace native { void sigmoid_backward_kernel_cuda(TensorIterator& iter) { if(isComplexType(iter.dtype())) { AT_DISPATCH_COMPLEX_TYPES(iter.dtype(), "sigmoid_backward_cuda", [&]() { gpu_kernel(iter, [] GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t { return a * std::conj((scalar_t{1.} - b) * b); }); }); } else { AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "sigmoid_backward_cuda", [&]() { gpu_kernel(iter, []GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t { return a * (scalar_t(1.) - b) * b; }); }); } } void logit_backward_kernel_cuda(TensorIterator& iter, const Scalar& eps_scalar) { AT_DISPATCH_FLOATING_TYPES_AND2( at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "logit_cuda", [&]() { using T_ACC = acc_type<scalar_t, true>; const T_ACC eps = eps_scalar.to<T_ACC>(); if (eps < T_ACC(0)) { gpu_kernel( iter, [] GPU_LAMBDA(scalar_t dy, scalar_t x) -> scalar_t { const T_ACC dy_acc = static_cast<T_ACC>(dy); const T_ACC x_acc = static_cast<T_ACC>(x); return (x_acc < T_ACC(0) || x_acc > T_ACC(1)) ? std::numeric_limits<T_ACC>::quiet_NaN() : dy_acc / (x_acc * (T_ACC(1) - x_acc)); }); } else { const T_ACC lo = eps; const T_ACC hi = T_ACC(1) - eps; gpu_kernel( iter, [lo, hi] GPU_LAMBDA(scalar_t dy, scalar_t x) -> scalar_t { const T_ACC dy_acc = static_cast<T_ACC>(dy); const T_ACC x_acc = static_cast<T_ACC>(x); return (x_acc < lo || x_acc > hi) ? T_ACC(0) : dy_acc / (x_acc * (T_ACC(1) - x_acc)); }); } }); } void tanh_backward_kernel_cuda(TensorIterator& iter) { if(isComplexType(iter.dtype())) { AT_DISPATCH_COMPLEX_TYPES(iter.dtype(), "tanh_backward_complex_cuda", [&]() { gpu_kernel(iter, [] GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t { return a * std::conj(scalar_t{1.} - b * b); }); }); } else { AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "tanh_backward_cuda", [&]() { gpu_kernel(iter, [] GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t { return a * (scalar_t{1.} - b * b); }); }); } } REGISTER_DISPATCH(sigmoid_backward_stub, &sigmoid_backward_kernel_cuda); REGISTER_DISPATCH(logit_backward_stub, &logit_backward_kernel_cuda); REGISTER_DISPATCH(tanh_backward_stub, &tanh_backward_kernel_cuda); } // namespace native } // namespace at
9934773e959bed3df21790ee384aeebf72bf2e52.cu
#include <ATen/native/BinaryOps.h> #include <limits> #include <ATen/AccumulateType.h> #include <ATen/Dispatch.h> #include <ATen/native/DispatchStub.h> #include <ATen/native/TensorIterator.h> #include <ATen/native/cuda/Loops.cuh> // NOTE: CUDA on Windows requires that the enclosing function // of a __device__ lambda not have internal linkage. namespace at { namespace native { void sigmoid_backward_kernel_cuda(TensorIterator& iter) { if(isComplexType(iter.dtype())) { AT_DISPATCH_COMPLEX_TYPES(iter.dtype(), "sigmoid_backward_cuda", [&]() { gpu_kernel(iter, [] GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t { return a * std::conj((scalar_t{1.} - b) * b); }); }); } else { AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "sigmoid_backward_cuda", [&]() { gpu_kernel(iter, []GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t { return a * (scalar_t(1.) - b) * b; }); }); } } void logit_backward_kernel_cuda(TensorIterator& iter, const Scalar& eps_scalar) { AT_DISPATCH_FLOATING_TYPES_AND2( at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "logit_cuda", [&]() { using T_ACC = acc_type<scalar_t, true>; const T_ACC eps = eps_scalar.to<T_ACC>(); if (eps < T_ACC(0)) { gpu_kernel( iter, [] GPU_LAMBDA(scalar_t dy, scalar_t x) -> scalar_t { const T_ACC dy_acc = static_cast<T_ACC>(dy); const T_ACC x_acc = static_cast<T_ACC>(x); return (x_acc < T_ACC(0) || x_acc > T_ACC(1)) ? std::numeric_limits<T_ACC>::quiet_NaN() : dy_acc / (x_acc * (T_ACC(1) - x_acc)); }); } else { const T_ACC lo = eps; const T_ACC hi = T_ACC(1) - eps; gpu_kernel( iter, [lo, hi] GPU_LAMBDA(scalar_t dy, scalar_t x) -> scalar_t { const T_ACC dy_acc = static_cast<T_ACC>(dy); const T_ACC x_acc = static_cast<T_ACC>(x); return (x_acc < lo || x_acc > hi) ? T_ACC(0) : dy_acc / (x_acc * (T_ACC(1) - x_acc)); }); } }); } void tanh_backward_kernel_cuda(TensorIterator& iter) { if(isComplexType(iter.dtype())) { AT_DISPATCH_COMPLEX_TYPES(iter.dtype(), "tanh_backward_complex_cuda", [&]() { gpu_kernel(iter, [] GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t { return a * std::conj(scalar_t{1.} - b * b); }); }); } else { AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "tanh_backward_cuda", [&]() { gpu_kernel(iter, [] GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t { return a * (scalar_t{1.} - b * b); }); }); } } REGISTER_DISPATCH(sigmoid_backward_stub, &sigmoid_backward_kernel_cuda); REGISTER_DISPATCH(logit_backward_stub, &logit_backward_kernel_cuda); REGISTER_DISPATCH(tanh_backward_stub, &tanh_backward_kernel_cuda); } // namespace native } // namespace at
16dcb0860deae39ae86871eaa6b684c9a4ff76c2.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void get_average(unsigned char * img, int * nz, int * average, int scale) { int x = blockIdx.x * TILE_DIM + threadIdx.x; int y = blockIdx.y * TILE_DIM + threadIdx.y; int width = gridDim.x * TILE_DIM; //int h = width /2; for (int j = 0; j < TILE_DIM; j+= BLOCK_ROWS) { int iw = x; int ih = y + j; if (img[3*(ih*width + iw)] + img[3*(ih*width + iw)+1] + img[3*(ih*width + iw)+2] > 0) { //nz[ih/3 * width + iw/3] += 1; //average[3*(ih/3*width + iw/3)] += (int)img[3*(ih*width + iw)]; //average[3*(ih/3*width + iw/3)+1] += (int)img[3*(ih*width + iw)+1]; //average[3*(ih/3*width + iw/3)+2] += (int)img[3*(ih*width + iw)+2]; atomicAdd(&(nz[ih/scale * width + iw/scale]), 1); atomicAdd(&(average[3*(ih/scale*width + iw/scale)]), (int)img[3*(ih*width + iw)]); atomicAdd(&(average[3*(ih/scale*width + iw/scale)+1]), (int)img[3*(ih*width + iw)+1]); atomicAdd(&(average[3*(ih/scale*width + iw/scale)+2]), (int)img[3*(ih*width + iw)+2]); } } }
16dcb0860deae39ae86871eaa6b684c9a4ff76c2.cu
#include "includes.h" __global__ void get_average(unsigned char * img, int * nz, int * average, int scale) { int x = blockIdx.x * TILE_DIM + threadIdx.x; int y = blockIdx.y * TILE_DIM + threadIdx.y; int width = gridDim.x * TILE_DIM; //int h = width /2; for (int j = 0; j < TILE_DIM; j+= BLOCK_ROWS) { int iw = x; int ih = y + j; if (img[3*(ih*width + iw)] + img[3*(ih*width + iw)+1] + img[3*(ih*width + iw)+2] > 0) { //nz[ih/3 * width + iw/3] += 1; //average[3*(ih/3*width + iw/3)] += (int)img[3*(ih*width + iw)]; //average[3*(ih/3*width + iw/3)+1] += (int)img[3*(ih*width + iw)+1]; //average[3*(ih/3*width + iw/3)+2] += (int)img[3*(ih*width + iw)+2]; atomicAdd(&(nz[ih/scale * width + iw/scale]), 1); atomicAdd(&(average[3*(ih/scale*width + iw/scale)]), (int)img[3*(ih*width + iw)]); atomicAdd(&(average[3*(ih/scale*width + iw/scale)+1]), (int)img[3*(ih*width + iw)+1]); atomicAdd(&(average[3*(ih/scale*width + iw/scale)+2]), (int)img[3*(ih*width + iw)+2]); } } }
643837340065b2feb0cd6ed397adb307120c9f2e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 1.5.0-beta3) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date July 2014 @generated from zlarfgx-v2.cu normal z -> s, Fri Jul 18 17:34:12 2014 */ #include "common_magma.h" // 512 is maximum number of threads for CUDA capability 1.x #define BLOCK_SIZE 512 #define PRECISION_s __global__ void magma_sgemv_kernel3(int m, const float * __restrict__ V, int ldv, float *c, float *dwork, float *tau); __global__ void magma_strmv_kernel(const float *T, int ldt, float *v); __global__ void magma_strmv_kernel2(const float *T, int ldt, float *v, float *y, float *tau); //============================================================================== __global__ void magma_slarfgx_gpu_kernel( int n, float* dx0, float* dx, float *dtau, float *dxnorm, float *dA, int it) { const int i = threadIdx.x; const int j = i + BLOCK_SIZE * blockIdx.x; __shared__ float scale; __shared__ float xnorm; float dxi; if ( j < n-1 ) dxi = dx[j]; if ( i == 0 ) { xnorm = *dxnorm; if ( xnorm == 0 || n == 1) { *dtau = MAGMA_S_ZERO; *dA = *dx0; } else { #if (defined(PRECISION_s) || defined(PRECISION_d)) float alpha = *dx0; // no need to compute the norm as it is passed as input float beta = xnorm; // sqrt( alpha*alpha + xnorm*xnorm ); beta = -copysign( beta, alpha ); // todo: deal with badly scaled vectors (see lapack's larfg) if (j==0){ *dtau = (beta - alpha) / beta; //*dx0 = 1.; *dA = beta; } scale = 1. / (alpha - beta); #else float alpha = *dx0; float alphar = MAGMA_S_REAL(alpha), alphai = MAGMA_S_IMAG(alpha); // no need to compute the norm as it is passed as input float beta = xnorm; // sqrt( alphar*alphar + alphai*alphai + xnorm*xnorm ); beta = -copysign( beta, alphar ); // todo: deal with badly scaled vectors (see lapack's larfg) if (j==0){ *dtau = MAGMA_S_MAKE((beta - alphar)/beta, -alphai/beta); //*dx0 = MAGMA_S_MAKE( 1., 0.); *dA = MAGMA_S_MAKE(beta, 0.); } alpha = MAGMA_S_MAKE( MAGMA_S_REAL(alpha) - beta, MAGMA_S_IMAG(alpha)); scale = MAGMA_S_DIV( MAGMA_S_ONE, alpha); #endif } } // scale x __syncthreads(); if ( xnorm != 0 && j < n-1) dx[j] = MAGMA_S_MUL(dxi, scale); if (j<it){ *( dA-it+j) = *(dx0-it+j); *(dx0-it+j) = MAGMA_S_MAKE(0., 0.); } } //============================================================================== /* Generates Householder elementary reflector H = I - tau v v^T to reduce H [ dx0 ] = [ beta ] [ dx ] [ 0 ] with beta = norm( [dx0, dx] ) = dxnorm[0]. Stores v over dx; first element of v is 1 and is not stored. Stores beta over dx0. Stores tau. The difference with LAPACK's slarfg is that the norm of dx, and hance beta, are computed outside the routine and passed to it in dxnorm (array on the GPU). */ extern "C" void magma_slarfgx_gpu(magma_int_t n, float *dx0, float *dx, float *dtau, float *dxnorm, float *dA, magma_int_t it) { dim3 blocks((n+BLOCK_SIZE-1) / BLOCK_SIZE); dim3 threads( BLOCK_SIZE ); hipLaunchKernelGGL(( magma_slarfgx_gpu_kernel), dim3(blocks), dim3(threads), 0, magma_stream , n, dx0, dx, dtau, dxnorm, dA, it); } //============================================================================== /* Generates Householder elementary reflector H = I - tau v v^T to reduce H [ dx0 ] = [ beta ] [ dx ] [ 0 ] with beta = norm( [dx0, dx] ) = dxnorm[0]. Stores v over dx; first element of v is 1 and is not stored. Stores beta over dx0. Stores tau. The difference with LAPACK's slarfg is that the norm of dx, and hance beta, are computed outside the routine and passed to it in dxnorm (array on the GPU). */ extern "C" void magma_slarfgtx_gpu(magma_int_t n, float *dx0, float *dx, float *dtau, float *dxnorm, float *dA, magma_int_t i, float *V, magma_int_t ldv, float *T, magma_int_t ldt, float *work) { /* Generate the elementary reflector H(i) */ magma_slarfgx_gpu(n, dx0, dx, dtau, dxnorm, dA, i); if (i==0) { float tt = MAGMA_S_ONE; magmablas_slacpy(MagmaUpperLower, 1, 1, dtau, 1, T+i+i*ldt, 1); magma_ssetmatrix(1,1, &tt,1, dx0,1); } else { /* Compute the i-th column of T */ hipLaunchKernelGGL(( magma_sgemv_kernel3), dim3(i), dim3(BLOCK_SIZE), 0, magma_stream , n, V, ldv, dx0, work, dtau); hipLaunchKernelGGL(( magma_strmv_kernel2), dim3(i), dim3(i), 0, magma_stream , T, ldt, work, T+i*ldt, dtau); } } //==============================================================================
643837340065b2feb0cd6ed397adb307120c9f2e.cu
/* -- MAGMA (version 1.5.0-beta3) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date July 2014 @generated from zlarfgx-v2.cu normal z -> s, Fri Jul 18 17:34:12 2014 */ #include "common_magma.h" // 512 is maximum number of threads for CUDA capability 1.x #define BLOCK_SIZE 512 #define PRECISION_s __global__ void magma_sgemv_kernel3(int m, const float * __restrict__ V, int ldv, float *c, float *dwork, float *tau); __global__ void magma_strmv_kernel(const float *T, int ldt, float *v); __global__ void magma_strmv_kernel2(const float *T, int ldt, float *v, float *y, float *tau); //============================================================================== __global__ void magma_slarfgx_gpu_kernel( int n, float* dx0, float* dx, float *dtau, float *dxnorm, float *dA, int it) { const int i = threadIdx.x; const int j = i + BLOCK_SIZE * blockIdx.x; __shared__ float scale; __shared__ float xnorm; float dxi; if ( j < n-1 ) dxi = dx[j]; if ( i == 0 ) { xnorm = *dxnorm; if ( xnorm == 0 || n == 1) { *dtau = MAGMA_S_ZERO; *dA = *dx0; } else { #if (defined(PRECISION_s) || defined(PRECISION_d)) float alpha = *dx0; // no need to compute the norm as it is passed as input float beta = xnorm; // sqrt( alpha*alpha + xnorm*xnorm ); beta = -copysign( beta, alpha ); // todo: deal with badly scaled vectors (see lapack's larfg) if (j==0){ *dtau = (beta - alpha) / beta; //*dx0 = 1.; *dA = beta; } scale = 1. / (alpha - beta); #else float alpha = *dx0; float alphar = MAGMA_S_REAL(alpha), alphai = MAGMA_S_IMAG(alpha); // no need to compute the norm as it is passed as input float beta = xnorm; // sqrt( alphar*alphar + alphai*alphai + xnorm*xnorm ); beta = -copysign( beta, alphar ); // todo: deal with badly scaled vectors (see lapack's larfg) if (j==0){ *dtau = MAGMA_S_MAKE((beta - alphar)/beta, -alphai/beta); //*dx0 = MAGMA_S_MAKE( 1., 0.); *dA = MAGMA_S_MAKE(beta, 0.); } alpha = MAGMA_S_MAKE( MAGMA_S_REAL(alpha) - beta, MAGMA_S_IMAG(alpha)); scale = MAGMA_S_DIV( MAGMA_S_ONE, alpha); #endif } } // scale x __syncthreads(); if ( xnorm != 0 && j < n-1) dx[j] = MAGMA_S_MUL(dxi, scale); if (j<it){ *( dA-it+j) = *(dx0-it+j); *(dx0-it+j) = MAGMA_S_MAKE(0., 0.); } } //============================================================================== /* Generates Householder elementary reflector H = I - tau v v^T to reduce H [ dx0 ] = [ beta ] [ dx ] [ 0 ] with beta = ±norm( [dx0, dx] ) = ±dxnorm[0]. Stores v over dx; first element of v is 1 and is not stored. Stores beta over dx0. Stores tau. The difference with LAPACK's slarfg is that the norm of dx, and hance beta, are computed outside the routine and passed to it in dxnorm (array on the GPU). */ extern "C" void magma_slarfgx_gpu(magma_int_t n, float *dx0, float *dx, float *dtau, float *dxnorm, float *dA, magma_int_t it) { dim3 blocks((n+BLOCK_SIZE-1) / BLOCK_SIZE); dim3 threads( BLOCK_SIZE ); magma_slarfgx_gpu_kernel<<< blocks, threads, 0, magma_stream >>>( n, dx0, dx, dtau, dxnorm, dA, it); } //============================================================================== /* Generates Householder elementary reflector H = I - tau v v^T to reduce H [ dx0 ] = [ beta ] [ dx ] [ 0 ] with beta = ±norm( [dx0, dx] ) = ±dxnorm[0]. Stores v over dx; first element of v is 1 and is not stored. Stores beta over dx0. Stores tau. The difference with LAPACK's slarfg is that the norm of dx, and hance beta, are computed outside the routine and passed to it in dxnorm (array on the GPU). */ extern "C" void magma_slarfgtx_gpu(magma_int_t n, float *dx0, float *dx, float *dtau, float *dxnorm, float *dA, magma_int_t i, float *V, magma_int_t ldv, float *T, magma_int_t ldt, float *work) { /* Generate the elementary reflector H(i) */ magma_slarfgx_gpu(n, dx0, dx, dtau, dxnorm, dA, i); if (i==0) { float tt = MAGMA_S_ONE; magmablas_slacpy(MagmaUpperLower, 1, 1, dtau, 1, T+i+i*ldt, 1); magma_ssetmatrix(1,1, &tt,1, dx0,1); } else { /* Compute the i-th column of T */ magma_sgemv_kernel3<<< i, BLOCK_SIZE, 0, magma_stream >>>(n, V, ldv, dx0, work, dtau); magma_strmv_kernel2<<< i, i, 0, magma_stream >>>( T, ldt, work, T+i*ldt, dtau); } } //==============================================================================
f90faa02af4d6325175a0c61a677fa3b872171e3.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "lab3_cuda.h" #include <cmath> #include <malloc.h> #include <math.h> #include <algorithm> using namespace std; /////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////Helper Functions/////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////////////////// double compare_matrices(double* A, double* B, int M, int N){ double diff = 0; int p, q; for(int i = 0; i < M; i++) for(int j = 0; j < N; j++){ if(fabs(fabs(A[i*N+j]) - fabs(B[i*N+j])) > diff){ diff = fabs(fabs(A[i*N+j]) - fabs(B[i*N+j])); p = i; q = j; } } return diff; } void reverse_array(double* a, int N){ double* temp = new double[N]; for(int i = 0; i < N; i++) temp[i] = a[i]; for(int i = 0; i < N; i++) a[i] = temp[N-i-1]; } void copy_matrix(double* to, double* from, int n, int m){ for(int i = 0; i < m; i++){ for(int j = 0; j < n; j++) to[i*n+j] = from[i*n+j]; } } double* null_matrix(int m, int n){ double* A; hipMallocManaged((void**)&A, sizeof(double)*m*n); return A; } double* empty_matrix(int m, int n){ double* A; hipMallocManaged((void**)&A, sizeof(double)*m*n); for(int i = 0; i < m; i++){ for(int j = 0; j < n; j++) A[i*n+j] = 0; } return A; } double** empty_matrix_2d(int m, int n){ double** A = new double*[m]; for(int i = 0; i < m; i++){ A[i] = new double[n]; for(int j = 0; j < n; j++) A[i][j] = 0; } return A; } double** null_matrix_2d(int m, int n){ double** A = new double*[m]; for(int i = 0; i < m; i++){ A[i] = new double[n]; } return A; } void copy_matrix_to2d(double** to, double* from, int m, int n){ for(int i = 0; i < m; i++){ for(int j = 0; j < n; j++) to[i][j] = from[i*n+j]; } } void copy_matrix_from2d(double* to, double** from, int m, int n){ for(int i = 0; i < m; i++){ for(int j = 0; j < n; j++) to[i*n+j] = from[i][j]; } } double* diagonal_matrix(int n){ double* A; hipMallocManaged((void**)&A, sizeof(double)*n*n); for(int i = 0; i < n; i++){ for(int j = 0; j < n; j++) A[i*n+j] = (i == j) ? 1 : 0; } return A; } void matrix_multiply(double* res, double* A, double* B, int N, int M, int N1){ // Matrices shapes: A = NxM, B = MxN1, res = NxN1 for(int i = 0; i < N; i++){ for(int j = 0; j < N1; j++){ res[i*N1+j] = 0; for(int k = 0; k < M; k++) res[i*N1+j] += A[i*M+k] * B[k*N1+j]; } } } void print_matrix(double* A, int M, int N, char* name){ printf("\nMatrix %s\n", name); for(int i = 0; i < M; i++){ for(int j = 0; j < N; j++){ printf("%f ", A[i*N+j]); } printf("\n"); } } void print_matrix_2d(double** A, int M, int N, char* name){ printf("\nMatrix %s\n", name); for(int i = 0; i < M; i++){ for(int j = 0; j < N; j++){ printf("%f ", A[i][j]); } printf("\n"); } } /////////////////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////Jacobi//////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////////////////// #define TOLERANCE 0.001 #define JACOBI_UPDATE_TOLERANCE 0.001 double **S; //Symmetric matrix (input) double *e; //eigenvalues double **E; //eigenvectors int *ind; bool *changed; int state; int N; double** mat1; double** mat2; double** mat3; double ek_prev; int m; void mat_mul(double** C, double** A, int Am, int An, double** B, int Bm, int Bn){ for (int i=0; i<Am; i++){ for (int j=0; j<Bn; j++){ C[i][j] = 0; for (int k=0; k<An; k++){ C[i][j] += A[i][k] * B[k][j]; } } } } int maxind(int k) { m = k+1; for (int i = k+2; i < N; i++){ if (fabs(S[k][i]) > fabs(S[k][m])){ m = i; } } return m; } void update(int k, double t) { ek_prev = e[k]; e[k] = ek_prev + t; if (e[k] < 0) e[k] = 0; if (changed[k] && fabs(ek_prev - e[k]) < JACOBI_UPDATE_TOLERANCE) { changed[k] = false; state = state - 1; } else if ((! changed[k]) && fabs(ek_prev - e[k]) > JACOBI_UPDATE_TOLERANCE) { changed[k] = true; state = state + 1; } } void rotate(int k, int l, int i, int j, double c, double s, bool eigenvectors){ mat1[0][0] = c; mat1[0][1] = -s; mat1[1][0] = s; mat1[1][1] = c; if (eigenvectors){ mat2[0][0] = E[i][k]; mat2[1][0] = E[i][l]; } else { mat2[0][0] = S[k][l]; mat2[1][0] = S[i][j]; } mat_mul(mat3, mat1, 2, 2, mat2, 2, 1); if (eigenvectors){ E[i][k] = mat3[0][0]; E[i][l] = mat3[1][0]; } else{ S[k][l] = mat3[0][0]; S[i][j] = mat3[1][0]; } } void init_jacobi() { E = (double**)malloc(__SIZEOF_POINTER__*N); for (int i=0; i<N; i++){ E[i] = (double*)malloc(__SIZEOF_DOUBLE__*N); for (int j=0; j<N; j++){ E[i][j] = 0; } E[i][i] = 1; } state = N; mat1 = (double**)malloc(__SIZEOF_POINTER__*2); mat1[0] = (double*)malloc(__SIZEOF_DOUBLE__*2); mat1[1] = (double*)malloc(__SIZEOF_DOUBLE__*2); mat2 = (double**)malloc(__SIZEOF_POINTER__*2); mat2[0] = (double*)malloc(__SIZEOF_DOUBLE__*1); mat2[1] = (double*)malloc(__SIZEOF_DOUBLE__*1); mat3 = (double**)malloc(__SIZEOF_POINTER__*2); mat3[0] = (double*)malloc(__SIZEOF_DOUBLE__*1); mat3[1] = (double*)malloc(__SIZEOF_DOUBLE__*1); e = (double*)malloc(__SIZEOF_DOUBLE__*N); ind = (int*)malloc(__SIZEOF_INT__*N); changed = (bool*)malloc(sizeof(bool)*N); for (int k=0; k<N; k++){ ind[k] = maxind(k); e[k] = S[k][k]; changed[k] = true; } } void Jacobi(double **input_matrix, int n, double **eigenvalues, double ***eigenvectors) { N = n; S = input_matrix; init_jacobi(); int k, l, i, m; double p, y, d, r, c, s, t; while(state != 0){ m = 0; for (k=1; k<N-1; k++){ if (fabs(S[k][ind[k]]) > fabs(S[m][ind[m]])){ m = k; } } k = m; l = ind[m]; p = S[k][l]; y = (e[l] - e[k]) / 2.0; d = fabs(y) + sqrt(p*p + y*y); r = sqrt(p*p + d*d); c = d / r; s = p / r; t = (p*p) / d; if (y < 0.0) { s = -s; t = -t; } S[k][l] = 0.0; update(k, -t); update(l, t); for (i=0; i<k; i++) { rotate(i, k, i, l, c, s, false); } for (i=k+1; i<l; i++){ rotate(k, i, i, l, c, s, false); } for (i=l+1; i<N; i++) { rotate(k, i, l, i, c, s, false); } for (i=0; i<N; i++){ rotate(k, l, i, i, c, s, true); } ind[k] = maxind(k); ind[l] = maxind(l); } *eigenvalues = e; *eigenvectors = E; } /////////////////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////CUDA///////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////////////////// __global__ void mult_cuda(double* res, double* a, double* b, int N, int M, int N1) { int i = blockIdx.x; int j = threadIdx.x; res[i*N1+j] = 0; for(int k = 0; k < M; k++) res[i*N1+j] += a[i*M+k] * b[k*N1+j]; } void matrix_multiply_cuda(double* res, double* a, double* b, int N, int M, int N1){ // Matrices shapes: A = NxM, B = MxN1, res = NxN1 hipLaunchKernelGGL(( mult_cuda), dim3(N), dim3(N1), 0, 0, res, a, b, N, M, N1); hipDeviceSynchronize(); } /////////////////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////SVD////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////////////////// void SVD_and_PCA ( int M, int N, double* D, double** U, double** SIGMA, double** V_T, int *SIGMAm, int *SIGMAn, double** D_HAT, int *K, int retention) { *SIGMAm = N; *SIGMAn = M; // printf("Starting SVD\n"); // Dt is D transpose = NxM double* Dt = empty_matrix(N, M); // Dc is copy of D = MxN double* Dc = empty_matrix(M, N); // DtD is Dt.D = NxN, so are Q and R double* DtD = empty_matrix(N, N); // Compute Dt and Dc for(int i = 0; i < M; i++){ for(int j = 0; j < N; j++){ Dt[j*M+i] = D[i*N + j]; Dc[i*N+j] = D[i*N + j]; } } // Multiply Dt.D = NxM . MxN = NxN matrix_multiply_cuda(DtD, Dt, Dc, N, M, N); // print_matrix(DtD, N, N, "DtD\0"); // Get Eigenvalues of DtD i.e. Q and R double* Ei = null_matrix(N, N); double* Ei_temp = null_matrix(N, N); double* eigenvalues; double** eigenvectors; // Convert DtD to 2d matrix for Jacobi double** DtDJ = null_matrix_2d(N, N); copy_matrix_to2d(DtDJ, DtD, N, N); // print_matrix_2d(DtDJ, N, N, "DtDJ\0"); // printf("Starting jacobi\n"); Jacobi(DtDJ, N, &eigenvalues, &eigenvectors); // printf("End jacobi\n"); // Convert Eigenvectors from 2d to 1d copy_matrix_from2d(Ei, eigenvectors, N, N); // Sorting and reordering eigenvectors double* eigenvalues1 = new double[N]; for(int i = 0; i < N; i++){ eigenvalues1[i] = eigenvalues[i]; } std::sort(eigenvalues, eigenvalues + N); reverse_array(eigenvalues, N); // for(int i = 0; i < N; i++){ // printf("Eigenvals = %f, \t\t %f\n", eigenvalues[i], eigenvalues1[i]); // } // Update Ei for(int j = 0; j < N; j++){ int p = 0; // Find p i.e. index of jth max eigenvalue for(p = 0; p < N; p++){ if(eigenvalues1[p] == eigenvalues[j]) break; } // printf("p=%d, j=%d\n",p,j); for(int i = 0; i < N; i++){ Ei_temp[i*N+j] = Ei[i*N+p]; } } // print_matrix(Ei, N, N, "Ei\0"); // print_matrix(Ei_temp, N, N, "Ei_temp\0"); copy_matrix(Ei, Ei_temp, N, N); // Compute Sigma double* sigma = empty_matrix(M, N); double* sigma_inv = empty_matrix(N, M); double* sigma_vals = new double[N]; for(int i = 0; i < N; i++){ sigma_vals[i] = sqrt(eigenvalues[i]); sigma[i*N+i] = sqrt(eigenvalues[i]); sigma_inv[i*M+i] = (1.0 / sqrt(eigenvalues[i])); } *SIGMA = sigma_vals; double* V_temp = null_matrix(M, M); double* U_temp = null_matrix(N, N); // Compute U for(int i = 0; i < N; i++){ for(int j = 0; j < N; j++){ U_temp[i*N+j] = Ei[i*N+j]; } } *U = U_temp; double* temp = null_matrix(M, N); double* temp2 = null_matrix(M, M); matrix_multiply_cuda(temp, Dc, Ei, M, N, N); matrix_multiply_cuda(temp2, temp, sigma_inv, M, N, M); // Compute V_T for(int i = 0; i < M; i++) for(int j = 0; j < M; j++){ V_temp[j*M+i] = temp2[i*M+j]; } *V_T = V_temp; // Test U = M * V * Sigma-1 // matrix_multiply_cuda(temp, U_temp, sigma, N, N, M); // matrix_multiply_cuda(temp2, temp, V_temp, N, M, M); // printf("Comparison result diff = %f\n", compare_matrices(temp2, Dt, N, M)); /////////////////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////PCA////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////////////////// double ret = double(retention)/100; double sumeigen = 0; for(int i = 0; i < N; i++){ sumeigen += sigma[i*N+i] * sigma[i*N+i]; // printf("Sigma %d is %f\n", i, *(*SIGMA + i)); } double sumret = 0; int k = 0; for(k = 0; k < N; k++){ sumret += (sigma[k*N+k] * sigma[k*N+k]/ sumeigen); if(sumret >= ret) break; } *K = k+1; // printf("K = %d\n", *K); double* W = empty_matrix(N, k+1); for(int i = 0; i < N; i++){ for(int j = 0; j <= k; j++) W[i*(k+1)+j] = U_temp[i*N+j]; } // Print W // print_matrix(W, N, *K, "W\0"); // printf("D-Hat:\n"); double* DHatTemp = null_matrix(M, k+1); matrix_multiply_cuda(DHatTemp, Dc, W, M, N, (k+1)); // for(int i = 0; i < M; i++){ // for(int j = 0; j <= k; j++){ // printf("%f ", DHatTemp[i*(k+1) + j]); // } // printf("\n"); // } *D_HAT = DHatTemp; }
f90faa02af4d6325175a0c61a677fa3b872171e3.cu
#include "lab3_cuda.h" #include <cmath> #include <malloc.h> #include <math.h> #include <algorithm> using namespace std; /////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////Helper Functions/////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////////////////// double compare_matrices(double* A, double* B, int M, int N){ double diff = 0; int p, q; for(int i = 0; i < M; i++) for(int j = 0; j < N; j++){ if(fabs(fabs(A[i*N+j]) - fabs(B[i*N+j])) > diff){ diff = fabs(fabs(A[i*N+j]) - fabs(B[i*N+j])); p = i; q = j; } } return diff; } void reverse_array(double* a, int N){ double* temp = new double[N]; for(int i = 0; i < N; i++) temp[i] = a[i]; for(int i = 0; i < N; i++) a[i] = temp[N-i-1]; } void copy_matrix(double* to, double* from, int n, int m){ for(int i = 0; i < m; i++){ for(int j = 0; j < n; j++) to[i*n+j] = from[i*n+j]; } } double* null_matrix(int m, int n){ double* A; cudaMallocManaged((void**)&A, sizeof(double)*m*n); return A; } double* empty_matrix(int m, int n){ double* A; cudaMallocManaged((void**)&A, sizeof(double)*m*n); for(int i = 0; i < m; i++){ for(int j = 0; j < n; j++) A[i*n+j] = 0; } return A; } double** empty_matrix_2d(int m, int n){ double** A = new double*[m]; for(int i = 0; i < m; i++){ A[i] = new double[n]; for(int j = 0; j < n; j++) A[i][j] = 0; } return A; } double** null_matrix_2d(int m, int n){ double** A = new double*[m]; for(int i = 0; i < m; i++){ A[i] = new double[n]; } return A; } void copy_matrix_to2d(double** to, double* from, int m, int n){ for(int i = 0; i < m; i++){ for(int j = 0; j < n; j++) to[i][j] = from[i*n+j]; } } void copy_matrix_from2d(double* to, double** from, int m, int n){ for(int i = 0; i < m; i++){ for(int j = 0; j < n; j++) to[i*n+j] = from[i][j]; } } double* diagonal_matrix(int n){ double* A; cudaMallocManaged((void**)&A, sizeof(double)*n*n); for(int i = 0; i < n; i++){ for(int j = 0; j < n; j++) A[i*n+j] = (i == j) ? 1 : 0; } return A; } void matrix_multiply(double* res, double* A, double* B, int N, int M, int N1){ // Matrices shapes: A = NxM, B = MxN1, res = NxN1 for(int i = 0; i < N; i++){ for(int j = 0; j < N1; j++){ res[i*N1+j] = 0; for(int k = 0; k < M; k++) res[i*N1+j] += A[i*M+k] * B[k*N1+j]; } } } void print_matrix(double* A, int M, int N, char* name){ printf("\nMatrix %s\n", name); for(int i = 0; i < M; i++){ for(int j = 0; j < N; j++){ printf("%f ", A[i*N+j]); } printf("\n"); } } void print_matrix_2d(double** A, int M, int N, char* name){ printf("\nMatrix %s\n", name); for(int i = 0; i < M; i++){ for(int j = 0; j < N; j++){ printf("%f ", A[i][j]); } printf("\n"); } } /////////////////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////Jacobi//////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////////////////// #define TOLERANCE 0.001 #define JACOBI_UPDATE_TOLERANCE 0.001 double **S; //Symmetric matrix (input) double *e; //eigenvalues double **E; //eigenvectors int *ind; bool *changed; int state; int N; double** mat1; double** mat2; double** mat3; double ek_prev; int m; void mat_mul(double** C, double** A, int Am, int An, double** B, int Bm, int Bn){ for (int i=0; i<Am; i++){ for (int j=0; j<Bn; j++){ C[i][j] = 0; for (int k=0; k<An; k++){ C[i][j] += A[i][k] * B[k][j]; } } } } int maxind(int k) { m = k+1; for (int i = k+2; i < N; i++){ if (fabs(S[k][i]) > fabs(S[k][m])){ m = i; } } return m; } void update(int k, double t) { ek_prev = e[k]; e[k] = ek_prev + t; if (e[k] < 0) e[k] = 0; if (changed[k] && fabs(ek_prev - e[k]) < JACOBI_UPDATE_TOLERANCE) { changed[k] = false; state = state - 1; } else if ((! changed[k]) && fabs(ek_prev - e[k]) > JACOBI_UPDATE_TOLERANCE) { changed[k] = true; state = state + 1; } } void rotate(int k, int l, int i, int j, double c, double s, bool eigenvectors){ mat1[0][0] = c; mat1[0][1] = -s; mat1[1][0] = s; mat1[1][1] = c; if (eigenvectors){ mat2[0][0] = E[i][k]; mat2[1][0] = E[i][l]; } else { mat2[0][0] = S[k][l]; mat2[1][0] = S[i][j]; } mat_mul(mat3, mat1, 2, 2, mat2, 2, 1); if (eigenvectors){ E[i][k] = mat3[0][0]; E[i][l] = mat3[1][0]; } else{ S[k][l] = mat3[0][0]; S[i][j] = mat3[1][0]; } } void init_jacobi() { E = (double**)malloc(__SIZEOF_POINTER__*N); for (int i=0; i<N; i++){ E[i] = (double*)malloc(__SIZEOF_DOUBLE__*N); for (int j=0; j<N; j++){ E[i][j] = 0; } E[i][i] = 1; } state = N; mat1 = (double**)malloc(__SIZEOF_POINTER__*2); mat1[0] = (double*)malloc(__SIZEOF_DOUBLE__*2); mat1[1] = (double*)malloc(__SIZEOF_DOUBLE__*2); mat2 = (double**)malloc(__SIZEOF_POINTER__*2); mat2[0] = (double*)malloc(__SIZEOF_DOUBLE__*1); mat2[1] = (double*)malloc(__SIZEOF_DOUBLE__*1); mat3 = (double**)malloc(__SIZEOF_POINTER__*2); mat3[0] = (double*)malloc(__SIZEOF_DOUBLE__*1); mat3[1] = (double*)malloc(__SIZEOF_DOUBLE__*1); e = (double*)malloc(__SIZEOF_DOUBLE__*N); ind = (int*)malloc(__SIZEOF_INT__*N); changed = (bool*)malloc(sizeof(bool)*N); for (int k=0; k<N; k++){ ind[k] = maxind(k); e[k] = S[k][k]; changed[k] = true; } } void Jacobi(double **input_matrix, int n, double **eigenvalues, double ***eigenvectors) { N = n; S = input_matrix; init_jacobi(); int k, l, i, m; double p, y, d, r, c, s, t; while(state != 0){ m = 0; for (k=1; k<N-1; k++){ if (fabs(S[k][ind[k]]) > fabs(S[m][ind[m]])){ m = k; } } k = m; l = ind[m]; p = S[k][l]; y = (e[l] - e[k]) / 2.0; d = fabs(y) + sqrt(p*p + y*y); r = sqrt(p*p + d*d); c = d / r; s = p / r; t = (p*p) / d; if (y < 0.0) { s = -s; t = -t; } S[k][l] = 0.0; update(k, -t); update(l, t); for (i=0; i<k; i++) { rotate(i, k, i, l, c, s, false); } for (i=k+1; i<l; i++){ rotate(k, i, i, l, c, s, false); } for (i=l+1; i<N; i++) { rotate(k, i, l, i, c, s, false); } for (i=0; i<N; i++){ rotate(k, l, i, i, c, s, true); } ind[k] = maxind(k); ind[l] = maxind(l); } *eigenvalues = e; *eigenvectors = E; } /////////////////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////CUDA///////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////////////////// __global__ void mult_cuda(double* res, double* a, double* b, int N, int M, int N1) { int i = blockIdx.x; int j = threadIdx.x; res[i*N1+j] = 0; for(int k = 0; k < M; k++) res[i*N1+j] += a[i*M+k] * b[k*N1+j]; } void matrix_multiply_cuda(double* res, double* a, double* b, int N, int M, int N1){ // Matrices shapes: A = NxM, B = MxN1, res = NxN1 mult_cuda<<<N, N1>>>(res, a, b, N, M, N1); cudaDeviceSynchronize(); } /////////////////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////SVD////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////////////////// void SVD_and_PCA ( int M, int N, double* D, double** U, double** SIGMA, double** V_T, int *SIGMAm, int *SIGMAn, double** D_HAT, int *K, int retention) { *SIGMAm = N; *SIGMAn = M; // printf("Starting SVD\n"); // Dt is D transpose = NxM double* Dt = empty_matrix(N, M); // Dc is copy of D = MxN double* Dc = empty_matrix(M, N); // DtD is Dt.D = NxN, so are Q and R double* DtD = empty_matrix(N, N); // Compute Dt and Dc for(int i = 0; i < M; i++){ for(int j = 0; j < N; j++){ Dt[j*M+i] = D[i*N + j]; Dc[i*N+j] = D[i*N + j]; } } // Multiply Dt.D = NxM . MxN = NxN matrix_multiply_cuda(DtD, Dt, Dc, N, M, N); // print_matrix(DtD, N, N, "DtD\0"); // Get Eigenvalues of DtD i.e. Q and R double* Ei = null_matrix(N, N); double* Ei_temp = null_matrix(N, N); double* eigenvalues; double** eigenvectors; // Convert DtD to 2d matrix for Jacobi double** DtDJ = null_matrix_2d(N, N); copy_matrix_to2d(DtDJ, DtD, N, N); // print_matrix_2d(DtDJ, N, N, "DtDJ\0"); // printf("Starting jacobi\n"); Jacobi(DtDJ, N, &eigenvalues, &eigenvectors); // printf("End jacobi\n"); // Convert Eigenvectors from 2d to 1d copy_matrix_from2d(Ei, eigenvectors, N, N); // Sorting and reordering eigenvectors double* eigenvalues1 = new double[N]; for(int i = 0; i < N; i++){ eigenvalues1[i] = eigenvalues[i]; } std::sort(eigenvalues, eigenvalues + N); reverse_array(eigenvalues, N); // for(int i = 0; i < N; i++){ // printf("Eigenvals = %f, \t\t %f\n", eigenvalues[i], eigenvalues1[i]); // } // Update Ei for(int j = 0; j < N; j++){ int p = 0; // Find p i.e. index of jth max eigenvalue for(p = 0; p < N; p++){ if(eigenvalues1[p] == eigenvalues[j]) break; } // printf("p=%d, j=%d\n",p,j); for(int i = 0; i < N; i++){ Ei_temp[i*N+j] = Ei[i*N+p]; } } // print_matrix(Ei, N, N, "Ei\0"); // print_matrix(Ei_temp, N, N, "Ei_temp\0"); copy_matrix(Ei, Ei_temp, N, N); // Compute Sigma double* sigma = empty_matrix(M, N); double* sigma_inv = empty_matrix(N, M); double* sigma_vals = new double[N]; for(int i = 0; i < N; i++){ sigma_vals[i] = sqrt(eigenvalues[i]); sigma[i*N+i] = sqrt(eigenvalues[i]); sigma_inv[i*M+i] = (1.0 / sqrt(eigenvalues[i])); } *SIGMA = sigma_vals; double* V_temp = null_matrix(M, M); double* U_temp = null_matrix(N, N); // Compute U for(int i = 0; i < N; i++){ for(int j = 0; j < N; j++){ U_temp[i*N+j] = Ei[i*N+j]; } } *U = U_temp; double* temp = null_matrix(M, N); double* temp2 = null_matrix(M, M); matrix_multiply_cuda(temp, Dc, Ei, M, N, N); matrix_multiply_cuda(temp2, temp, sigma_inv, M, N, M); // Compute V_T for(int i = 0; i < M; i++) for(int j = 0; j < M; j++){ V_temp[j*M+i] = temp2[i*M+j]; } *V_T = V_temp; // Test U = M * V * Sigma-1 // matrix_multiply_cuda(temp, U_temp, sigma, N, N, M); // matrix_multiply_cuda(temp2, temp, V_temp, N, M, M); // printf("Comparison result diff = %f\n", compare_matrices(temp2, Dt, N, M)); /////////////////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////PCA////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////////////////// double ret = double(retention)/100; double sumeigen = 0; for(int i = 0; i < N; i++){ sumeigen += sigma[i*N+i] * sigma[i*N+i]; // printf("Sigma %d is %f\n", i, *(*SIGMA + i)); } double sumret = 0; int k = 0; for(k = 0; k < N; k++){ sumret += (sigma[k*N+k] * sigma[k*N+k]/ sumeigen); if(sumret >= ret) break; } *K = k+1; // printf("K = %d\n", *K); double* W = empty_matrix(N, k+1); for(int i = 0; i < N; i++){ for(int j = 0; j <= k; j++) W[i*(k+1)+j] = U_temp[i*N+j]; } // Print W // print_matrix(W, N, *K, "W\0"); // printf("D-Hat:\n"); double* DHatTemp = null_matrix(M, k+1); matrix_multiply_cuda(DHatTemp, Dc, W, M, N, (k+1)); // for(int i = 0; i < M; i++){ // for(int j = 0; j <= k; j++){ // printf("%f ", DHatTemp[i*(k+1) + j]); // } // printf("\n"); // } *D_HAT = DHatTemp; }
5ced3032f852028252904f42beed61d4183e920d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #include <fstream> #include <string> #include <vector> #include "MemAllocator.h" #include "threadpool.h" using namespace std; #pragma comment(lib, "pthreadVC2.lib") threadpool_t *threadPool; const int threadCount = 1; const int queueSize = 51200000; const int blockSize = 64; MemAllocator *memAllocator; int *gTop; struct KernelParameter { KernelParameter(StackElement _elem, int _r1, int _r2) { elem = _elem; r1 = _r1; r2 = _r2; } ~KernelParameter() { } StackElement elem; int r1; int r2; }; struct ThreadParameter { ThreadParameter(StackElement _elem, Object *_d_obj=NULL, int *_winL=NULL, int *_winR=NULL) { elem = _elem; d_obj = _d_obj; winL = _winL; winR = _winR; } ~ThreadParameter() { } StackElement elem; Object *d_obj; int *winL; int *winR; }; #ifdef VECTOR const string DBPath = "I:\\colors_112_112682.ascii"; const int DBSize = 112682; __host__ Object *Read(istream& in) { string cmdLine; double *x = new double[dimension]; for (int i=0; i<dimension; ++i) { in>>cmdLine; x[i] = atof(cmdLine.c_str()); } Object *obj = new Object(x); delete[] x; return obj; } #endif #ifdef STRING const string DBPath = "I:\\English.dic"; const int DBSize = 69069; __host__ Object *Read(istream& in) { string cmdLine; getline(in, cmdLine); Object *obj = new Object(cmdLine.c_str(), (int)cmdLine.size()); return obj; } #endif __host__ void QuickJoinWinLaunch(void *parameter) { int bitIndex = memAllocator->AllocateBitmap(); if (bitIndex == -1) { cout<<"Resource allocation failed!"<<endl; } vector<hipStream_t> streams(1); checkCudaErrors(hipStreamCreateWithFlags(&streams[0], hipStreamNonBlocking)); Stack *d_stack, *h_stack; memAllocator->AllocateStack(bitIndex, &d_stack, &h_stack); checkCudaErrors(hipMemcpyAsync(d_stack, gTop, sizeof(int), hipMemcpyHostToDevice, streams[0])); Offset *d_offsets; memAllocator->AllocateOffset(bitIndex, &d_offsets); checkCudaErrors(hipMemsetAsync(d_offsets, 0, sizeof(Offset)*2, streams[0])); int size1 = (((ThreadParameter *)parameter)->elem).endL + 1; int size2 = (((ThreadParameter *)parameter)->elem).endR + 1; int size = size1 + size2; int r1 = rand() % size; int r2 = rand() % size; while (r1 == r2) { r2 = rand() % size; } int *outL, *outR, *posL, *posR; memAllocator->AllocateInOut(bitIndex, &posL, &outL, &posR, &outR); checkCudaErrors(hipMemcpyAsync(posL, ((ThreadParameter *)parameter)->winL, sizeof(int)*size1, hipMemcpyHostToDevice, streams[0])); checkCudaErrors(hipMemcpyAsync(posR, ((ThreadParameter *)parameter)->winR, sizeof(int)*size2, hipMemcpyHostToDevice, streams[0])); delete[] ((ThreadParameter *)parameter)->winL; delete[] ((ThreadParameter *)parameter)->winR; vector<KernelParameter *> paras; paras.push_back(new KernelParameter(StackElement(((ThreadParameter *)parameter)->elem), r1, r2)); Object *d_objs = ((ThreadParameter *)parameter)->d_obj; delete parameter; vector<SizePair> sizePairVec; sizePairVec.push_back(SizePair(0, max(size1, size2))); int **winLs1 = new int*[1]; int **winRs1 = new int*[1]; int **winLs2 = new int*[1]; int **winRs2 = new int*[1]; memAllocator->AllocateWin(bitIndex, sizePairVec, winLs1, winRs1, winLs2, winRs2); int gridSize = (int)ceil(size / ((float)blockSize)); hipLaunchKernelGGL(( QuickJoinWin), dim3(gridSize), dim3(blockSize), 0, streams[0], d_objs, d_stack, posL, posR, outL, outR, d_offsets, d_offsets+1, 0, (paras[0]->elem).endL, 0, (paras[0]->elem).endR, r1, r2, winLs1[0], winRs1[0], winLs2[0], winRs2[0]); sizePairVec.resize(0); delete[] winLs1; delete[] winRs1; delete[] winLs2; delete[] winRs2; int turn = 0; int *in1 = posL; int *in2 = posR; int *out1 = outL; int *out2 = outR; while (1) { int stackSize = (min(streams.size()*4, MaxStackSize) * 6 + 1) * sizeof(int); for (unsigned int i=0; i<streams.size(); ++i) { checkCudaErrors(hipStreamSynchronize(streams[i])); checkCudaErrors(hipStreamDestroy(streams[i])); delete paras[i]; } streams.resize(0); paras.resize(0); hipStream_t s; streams.push_back(s); checkCudaErrors(hipStreamCreateWithFlags(&streams[0], hipStreamNonBlocking)); checkCudaErrors(hipMemcpyAsync(h_stack, d_stack, stackSize, hipMemcpyDeviceToHost, streams[0])); checkCudaErrors(hipMemcpyAsync(d_stack, gTop, sizeof(int), hipMemcpyHostToDevice, streams[0])); hipLaunchKernelGGL(( Synchronize), dim3(0), dim3(0), 0, streams[0], ); checkCudaErrors(hipStreamSynchronize(streams[0])); checkCudaErrors(hipStreamDestroy(streams[0])); streams.resize(0); StackElement elem; if (!h_stack->Top(elem)) { break; } vector<hipStream_t> winStreams; while (h_stack->Pop(elem)) { if (elem.posL==0 && elem.posR==0) { hipStream_t s; streams.push_back(s); checkCudaErrors(hipStreamCreateWithFlags(&streams[streams.size()-1], hipStreamNonBlocking)); long long size1 = elem.endL - elem.startL + 1; long long size2 = elem.endR - elem.startR + 1; if (size1*size2 <= constSmallNumber) { r1 = 0; r2 = 0; } else { int size = size1 + size2; r1 = rand() % size; r2 = rand() % size; while (r1 == r2) { r2 = rand() % size; } } paras.push_back(new KernelParameter(elem, r1, r2)); sizePairVec.push_back(SizePair(streams.size()-1, max(size1, size2))); } else { long long size1 = elem.endL + 1; long long size2 = elem.endR + 1; long long mulSize = size1 * size2; if (mulSize <= constSmallNumber) { hipStream_t s; winStreams.push_back(s); checkCudaErrors(hipStreamCreateWithFlags(&winStreams[winStreams.size()-1], hipStreamNonBlocking)); gridSize = (int)ceil(mulSize / (float)blockSize); hipLaunchKernelGGL(( NestedLoopWin), dim3(gridSize), dim3(blockSize), 0, winStreams[winStreams.size()-1], d_objs, (int *)elem.posL, 0, elem.endL, (int *)elem.posR, 0, elem.endR); } else { int *winL = new int[size1]; int *winR = new int[size2]; hipStream_t s; checkCudaErrors(hipStreamCreateWithFlags(&s, hipStreamNonBlocking)); checkCudaErrors(hipMemcpyAsync(winL, (int *)elem.posL, sizeof(int)*size1, hipMemcpyDeviceToHost, s)); checkCudaErrors(hipMemcpyAsync(winR, (int *)elem.posR, sizeof(int)*size2, hipMemcpyDeviceToHost, s)); hipLaunchKernelGGL(( Synchronize), dim3(0), dim3(0), 0, s, ); checkCudaErrors(hipStreamSynchronize(s)); checkCudaErrors(hipStreamDestroy(s)); if (threadpool_add(threadPool, &QuickJoinWinLaunch, new ThreadParameter(elem, d_objs, winL, winR))) { delete[] winL; delete[] winR; cout<<"Failed in thread creation!"<<endl; return; } } } } if (streams.size() > 0) { checkCudaErrors(hipMemsetAsync(d_offsets, 0, sizeof(Offset)*streams.size()*2, streams[0])); } for (unsigned int i=0; i<winStreams.size(); ++i) { checkCudaErrors(hipStreamSynchronize(winStreams[i])); checkCudaErrors(hipStreamDestroy(winStreams[i])); } winLs1 = new int*[streams.size()]; winRs1 = new int*[streams.size()]; winLs2 = new int*[streams.size()]; winRs2 = new int*[streams.size()]; memAllocator->AllocateWin(bitIndex, sizePairVec, winLs1, winRs1, winLs2, winRs2); ++turn; in1 = turn%2 ? outL : posL; in2 = turn%2 ? outR : posR; out1 = turn%2 ? posL : outL; out2 = turn%2 ? posR : outR; for (unsigned int i=0; i<streams.size(); ++i) { long long size1 = (paras[i]->elem).endL - (paras[i]->elem).startL + 1; long long size2 = (paras[i]->elem).endR - (paras[i]->elem).startR + 1; long long mulSize = size1 * size2; if (mulSize <= constSmallNumber) { gridSize = (int)ceil(mulSize / (float)blockSize); hipLaunchKernelGGL(( NestedLoopWin), dim3(gridSize), dim3(blockSize), 0, streams[i], d_objs, in1, (paras[i]->elem).startL, (paras[i]->elem).endL, in2, (paras[i]->elem).startR, (paras[i]->elem).endR); } else { gridSize = (int)ceil((size1 + size2) / (float)blockSize); hipLaunchKernelGGL(( QuickJoinWin), dim3(gridSize), dim3(blockSize), 0, streams[i], d_objs, d_stack, in1, in2, out1, out2, d_offsets+i*2, d_offsets+1+i*2, (paras[i]->elem).startL, (paras[i]->elem).endL, (paras[i]->elem).startR, (paras[i]->elem).endR, paras[i]->r1, paras[i]->r2, winLs1[i], winRs1[i], winLs2[i], winRs2[i]); } } sizePairVec.resize(0); delete[] winLs1; delete[] winRs1; delete[] winLs2; delete[] winRs2; } memAllocator->DeallocateBitmap(bitIndex); } __host__ void QuickJoinLaunch(Object *h_objs, Object *d_objs, int amount) { threadPool = threadpool_create(threadCount, queueSize); memAllocator = new MemAllocator; int bitIndex = memAllocator->AllocateBitmap(); if (bitIndex == -1) { cout<<"Resource allocation failed!"<<endl; } StopWatchInterface *timer = 0; sdkCreateTimer(&timer); sdkStartTimer(&timer); vector<hipStream_t> streams(1); checkCudaErrors(hipStreamCreateWithFlags(&streams[0], hipStreamNonBlocking)); int *i_idx, *o_idx; memAllocator->AllocateInOut(bitIndex, &i_idx, &o_idx); int *h_idx = new int[amount]; for (int i=0; i<amount; ++i) { h_idx[i] = i; } checkCudaErrors(hipMemcpyAsync(i_idx, h_idx, sizeof(int)*amount, hipMemcpyHostToDevice, streams[0])); delete[] h_idx; Stack *d_stack, *h_stack; memAllocator->AllocateStack(bitIndex, &d_stack, &h_stack); checkCudaErrors(hipMemcpyAsync(d_stack, gTop, sizeof(int), hipMemcpyHostToDevice, streams[0])); Offset *d_offsets; memAllocator->AllocateOffset(bitIndex, &d_offsets); checkCudaErrors(hipMemsetAsync(d_offsets, 0, sizeof(Offset), streams[0])); int r1 = rand() % amount; int r2 = rand() % amount; while (r1 == r2) { r2 = rand() % amount; } vector<KernelParameter *> paras; paras.push_back(new KernelParameter(StackElement(0, amount-1), r1, r2)); vector<SizePair> sizePairVec; sizePairVec.push_back(SizePair(0, amount)); int **winLs = new int*[1]; int **winRs = new int*[1]; memAllocator->AllocateWin(bitIndex, sizePairVec, winLs, winRs); int gridSize = (int)ceil(((float)amount)/blockSize); hipLaunchKernelGGL(( QuickJoin), dim3(gridSize), dim3(blockSize), 0, streams[0], d_objs, d_stack, i_idx, o_idx, d_offsets, 0, amount-1, r1, r2, winLs[0], winRs[0]); sizePairVec.resize(0); delete[] winLs; delete[] winRs; int turn = 0; int *in = i_idx; int *out = o_idx; while (1) { int stackSize = (min(streams.size()*3, MaxStackSize) * 6 + 1) * sizeof(int); for (unsigned int i=0; i<streams.size(); ++i) { checkCudaErrors(hipStreamSynchronize(streams[i])); checkCudaErrors(hipStreamDestroy(streams[i])); delete paras[i]; } streams.resize(0); paras.resize(0); hipStream_t s; streams.push_back(s); checkCudaErrors(hipStreamCreateWithFlags(&streams[0], hipStreamNonBlocking)); checkCudaErrors(hipMemcpyAsync(h_stack, d_stack, stackSize, hipMemcpyDeviceToHost, streams[0])); checkCudaErrors(hipMemcpyAsync(d_stack, gTop, sizeof(int), hipMemcpyHostToDevice, streams[0])); hipLaunchKernelGGL(( Synchronize), dim3(0), dim3(0), 0, streams[0], ); checkCudaErrors(hipStreamSynchronize(streams[0])); checkCudaErrors(hipStreamDestroy(streams[0])); streams.resize(0); StackElement elem; if (!h_stack->Top(elem)) { break; } vector<hipStream_t> winStreams; while (h_stack->Pop(elem)) { if (elem.startL==0 && elem.endL!=-1 && elem.startR==0 && elem.endR!=-1) { long long size1 = elem.endL + 1; long long size2 = elem.endR + 1; long long mulSize = size1 * size2; if (mulSize <= constSmallNumber) { hipStream_t s; winStreams.push_back(s); checkCudaErrors(hipStreamCreateWithFlags(&winStreams[winStreams.size()-1], hipStreamNonBlocking)); gridSize = (int)ceil(mulSize / ((float)blockSize)); hipLaunchKernelGGL(( NestedLoopWin), dim3(gridSize), dim3(blockSize), 0, winStreams[winStreams.size()-1], d_objs, (int *)elem.posL, 0, elem.endL, (int *)elem.posR, 0, elem.endR); } else { int *winL = new int[size1]; int *winR = new int[size2]; hipStream_t s; checkCudaErrors(hipStreamCreateWithFlags(&s, hipStreamNonBlocking)); checkCudaErrors(hipMemcpyAsync(winL, (int *)elem.posL, sizeof(int)*size1, hipMemcpyDeviceToHost, s)); checkCudaErrors(hipMemcpyAsync(winR, (int *)elem.posR, sizeof(int)*size2, hipMemcpyDeviceToHost, s)); hipLaunchKernelGGL(( Synchronize), dim3(0), dim3(0), 0, s, ); checkCudaErrors(hipStreamSynchronize(s)); checkCudaErrors(hipStreamDestroy(s)); if (threadpool_add(threadPool, &QuickJoinWinLaunch, new ThreadParameter(elem, d_objs, winL, winR))) { delete[] winL; delete[] winR; cout<<"Failed in thread creation!"<<endl; return; } } } else { hipStream_t s; streams.push_back(s); checkCudaErrors(hipStreamCreateWithFlags(&streams[streams.size()-1], hipStreamNonBlocking)); int size = elem.posR - elem.posL + 1; r1 = rand() % size; r2 = rand() % size; while (r1 == r2) { r2 = rand() % size; } paras.push_back(new KernelParameter(elem, r1, r2)); sizePairVec.push_back(SizePair(streams.size()-1, size)); } } if (streams.size() > 0) { checkCudaErrors(hipMemsetAsync(d_offsets, 0, sizeof(Offset)*streams.size(), streams[0])); } for (unsigned int i=0; i<winStreams.size(); ++i) { checkCudaErrors(hipStreamSynchronize(winStreams[i])); checkCudaErrors(hipStreamDestroy(winStreams[i])); } winLs = new int*[streams.size()]; winRs = new int*[streams.size()]; memAllocator->AllocateWin(bitIndex, sizePairVec, winLs, winRs); ++turn; in = turn%2 ? o_idx : i_idx; out = turn%2 ? i_idx : o_idx; for (unsigned int i=0; i<streams.size(); ++i) { long long size = (paras[i]->elem).posR - (paras[i]->elem).posL + 1; long long mulSize = size * size; if ((mulSize-size)>>1 <= constSmallNumber) { gridSize = (int)ceil(mulSize / (float)blockSize); hipLaunchKernelGGL(( NestedLoop), dim3(gridSize), dim3(blockSize), 0, streams[i], d_objs, in, (paras[i]->elem).posL, (paras[i]->elem).posR); } else { gridSize = (int)ceil(size / (float)blockSize); hipLaunchKernelGGL(( QuickJoin), dim3(gridSize), dim3(blockSize), 0, streams[i], d_objs, d_stack, in, out, d_offsets+i, (paras[i]->elem).posL, (paras[i]->elem).posR, paras[i]->r1, paras[i]->r2, winLs[i], winRs[i]); } } sizePairVec.resize(0); delete[] winLs; delete[] winRs; } memAllocator->DeallocateBitmap(bitIndex); while (!threadpool_destroy_ready(threadPool)) { } sdkStopTimer(&timer); cout<<"Processing time: "<<sdkGetTimerValue(&timer)<<" (ms)"<<endl; sdkDeleteTimer(&timer); delete memAllocator; threadpool_destroy(threadPool); } __host__ int main(int argc, char** argv) { int amount = DBSize; Object *h_objs = new Object[amount]; ifstream fin(DBPath.c_str()); for (int i=0; i<amount; ++i) { Object *obj = Read(fin); h_objs[i] = *obj; delete obj; } fin.close(); findCudaDevice(argc, (const char **)argv); checkCudaErrors(hipDeviceSetCacheConfig(hipFuncCachePreferL1)); checkCudaErrors(hipHostMalloc(&gTop, sizeof(int))); *gTop = -1; Object *d_objs; checkCudaErrors(hipMalloc(&d_objs, sizeof(Object)*amount)); for (int i=0; i<amount; ++i) { #ifdef VECTOR double *tmp; checkCudaErrors(hipMalloc(&tmp, sizeof(double)*dimension)); checkCudaErrors(hipMemcpy(tmp, h_objs[i].x, sizeof(double)*dimension, hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(&(d_objs[i].x), &tmp, sizeof(double *), hipMemcpyHostToDevice)); #endif #ifdef STRING char *tmp; checkCudaErrors(hipMalloc(&tmp, sizeof(char)*h_objs[i].length)); checkCudaErrors(hipMemcpy(tmp, h_objs[i].x, sizeof(char)*h_objs[i].length, hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(&(d_objs[i].x), &tmp, sizeof(char *), hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(&(d_objs[i].length), &(h_objs[i].length), sizeof(int), hipMemcpyHostToDevice)); #endif } QuickJoinLaunch(h_objs, d_objs, amount); delete[] h_objs; checkCudaErrors(hipFree(d_objs)); checkCudaErrors(hipHostFree(gTop)); hipDeviceReset(); exit(0); }
5ced3032f852028252904f42beed61d4183e920d.cu
#include <iostream> #include <fstream> #include <string> #include <vector> #include "MemAllocator.h" #include "threadpool.h" using namespace std; #pragma comment(lib, "pthreadVC2.lib") threadpool_t *threadPool; const int threadCount = 1; const int queueSize = 51200000; const int blockSize = 64; MemAllocator *memAllocator; int *gTop; struct KernelParameter { KernelParameter(StackElement _elem, int _r1, int _r2) { elem = _elem; r1 = _r1; r2 = _r2; } ~KernelParameter() { } StackElement elem; int r1; int r2; }; struct ThreadParameter { ThreadParameter(StackElement _elem, Object *_d_obj=NULL, int *_winL=NULL, int *_winR=NULL) { elem = _elem; d_obj = _d_obj; winL = _winL; winR = _winR; } ~ThreadParameter() { } StackElement elem; Object *d_obj; int *winL; int *winR; }; #ifdef VECTOR const string DBPath = "I:\\colors_112_112682.ascii"; const int DBSize = 112682; __host__ Object *Read(istream& in) { string cmdLine; double *x = new double[dimension]; for (int i=0; i<dimension; ++i) { in>>cmdLine; x[i] = atof(cmdLine.c_str()); } Object *obj = new Object(x); delete[] x; return obj; } #endif #ifdef STRING const string DBPath = "I:\\English.dic"; const int DBSize = 69069; __host__ Object *Read(istream& in) { string cmdLine; getline(in, cmdLine); Object *obj = new Object(cmdLine.c_str(), (int)cmdLine.size()); return obj; } #endif __host__ void QuickJoinWinLaunch(void *parameter) { int bitIndex = memAllocator->AllocateBitmap(); if (bitIndex == -1) { cout<<"Resource allocation failed!"<<endl; } vector<cudaStream_t> streams(1); checkCudaErrors(cudaStreamCreateWithFlags(&streams[0], cudaStreamNonBlocking)); Stack *d_stack, *h_stack; memAllocator->AllocateStack(bitIndex, &d_stack, &h_stack); checkCudaErrors(cudaMemcpyAsync(d_stack, gTop, sizeof(int), cudaMemcpyHostToDevice, streams[0])); Offset *d_offsets; memAllocator->AllocateOffset(bitIndex, &d_offsets); checkCudaErrors(cudaMemsetAsync(d_offsets, 0, sizeof(Offset)*2, streams[0])); int size1 = (((ThreadParameter *)parameter)->elem).endL + 1; int size2 = (((ThreadParameter *)parameter)->elem).endR + 1; int size = size1 + size2; int r1 = rand() % size; int r2 = rand() % size; while (r1 == r2) { r2 = rand() % size; } int *outL, *outR, *posL, *posR; memAllocator->AllocateInOut(bitIndex, &posL, &outL, &posR, &outR); checkCudaErrors(cudaMemcpyAsync(posL, ((ThreadParameter *)parameter)->winL, sizeof(int)*size1, cudaMemcpyHostToDevice, streams[0])); checkCudaErrors(cudaMemcpyAsync(posR, ((ThreadParameter *)parameter)->winR, sizeof(int)*size2, cudaMemcpyHostToDevice, streams[0])); delete[] ((ThreadParameter *)parameter)->winL; delete[] ((ThreadParameter *)parameter)->winR; vector<KernelParameter *> paras; paras.push_back(new KernelParameter(StackElement(((ThreadParameter *)parameter)->elem), r1, r2)); Object *d_objs = ((ThreadParameter *)parameter)->d_obj; delete parameter; vector<SizePair> sizePairVec; sizePairVec.push_back(SizePair(0, max(size1, size2))); int **winLs1 = new int*[1]; int **winRs1 = new int*[1]; int **winLs2 = new int*[1]; int **winRs2 = new int*[1]; memAllocator->AllocateWin(bitIndex, sizePairVec, winLs1, winRs1, winLs2, winRs2); int gridSize = (int)ceil(size / ((float)blockSize)); QuickJoinWin<<<gridSize, blockSize, 0, streams[0]>>>(d_objs, d_stack, posL, posR, outL, outR, d_offsets, d_offsets+1, 0, (paras[0]->elem).endL, 0, (paras[0]->elem).endR, r1, r2, winLs1[0], winRs1[0], winLs2[0], winRs2[0]); sizePairVec.resize(0); delete[] winLs1; delete[] winRs1; delete[] winLs2; delete[] winRs2; int turn = 0; int *in1 = posL; int *in2 = posR; int *out1 = outL; int *out2 = outR; while (1) { int stackSize = (min(streams.size()*4, MaxStackSize) * 6 + 1) * sizeof(int); for (unsigned int i=0; i<streams.size(); ++i) { checkCudaErrors(cudaStreamSynchronize(streams[i])); checkCudaErrors(cudaStreamDestroy(streams[i])); delete paras[i]; } streams.resize(0); paras.resize(0); cudaStream_t s; streams.push_back(s); checkCudaErrors(cudaStreamCreateWithFlags(&streams[0], cudaStreamNonBlocking)); checkCudaErrors(cudaMemcpyAsync(h_stack, d_stack, stackSize, cudaMemcpyDeviceToHost, streams[0])); checkCudaErrors(cudaMemcpyAsync(d_stack, gTop, sizeof(int), cudaMemcpyHostToDevice, streams[0])); Synchronize<<<0, 0, 0, streams[0]>>>(); checkCudaErrors(cudaStreamSynchronize(streams[0])); checkCudaErrors(cudaStreamDestroy(streams[0])); streams.resize(0); StackElement elem; if (!h_stack->Top(elem)) { break; } vector<cudaStream_t> winStreams; while (h_stack->Pop(elem)) { if (elem.posL==0 && elem.posR==0) { cudaStream_t s; streams.push_back(s); checkCudaErrors(cudaStreamCreateWithFlags(&streams[streams.size()-1], cudaStreamNonBlocking)); long long size1 = elem.endL - elem.startL + 1; long long size2 = elem.endR - elem.startR + 1; if (size1*size2 <= constSmallNumber) { r1 = 0; r2 = 0; } else { int size = size1 + size2; r1 = rand() % size; r2 = rand() % size; while (r1 == r2) { r2 = rand() % size; } } paras.push_back(new KernelParameter(elem, r1, r2)); sizePairVec.push_back(SizePair(streams.size()-1, max(size1, size2))); } else { long long size1 = elem.endL + 1; long long size2 = elem.endR + 1; long long mulSize = size1 * size2; if (mulSize <= constSmallNumber) { cudaStream_t s; winStreams.push_back(s); checkCudaErrors(cudaStreamCreateWithFlags(&winStreams[winStreams.size()-1], cudaStreamNonBlocking)); gridSize = (int)ceil(mulSize / (float)blockSize); NestedLoopWin<<<gridSize, blockSize, 0, winStreams[winStreams.size()-1]>>>(d_objs, (int *)elem.posL, 0, elem.endL, (int *)elem.posR, 0, elem.endR); } else { int *winL = new int[size1]; int *winR = new int[size2]; cudaStream_t s; checkCudaErrors(cudaStreamCreateWithFlags(&s, cudaStreamNonBlocking)); checkCudaErrors(cudaMemcpyAsync(winL, (int *)elem.posL, sizeof(int)*size1, cudaMemcpyDeviceToHost, s)); checkCudaErrors(cudaMemcpyAsync(winR, (int *)elem.posR, sizeof(int)*size2, cudaMemcpyDeviceToHost, s)); Synchronize<<<0, 0, 0, s>>>(); checkCudaErrors(cudaStreamSynchronize(s)); checkCudaErrors(cudaStreamDestroy(s)); if (threadpool_add(threadPool, &QuickJoinWinLaunch, new ThreadParameter(elem, d_objs, winL, winR))) { delete[] winL; delete[] winR; cout<<"Failed in thread creation!"<<endl; return; } } } } if (streams.size() > 0) { checkCudaErrors(cudaMemsetAsync(d_offsets, 0, sizeof(Offset)*streams.size()*2, streams[0])); } for (unsigned int i=0; i<winStreams.size(); ++i) { checkCudaErrors(cudaStreamSynchronize(winStreams[i])); checkCudaErrors(cudaStreamDestroy(winStreams[i])); } winLs1 = new int*[streams.size()]; winRs1 = new int*[streams.size()]; winLs2 = new int*[streams.size()]; winRs2 = new int*[streams.size()]; memAllocator->AllocateWin(bitIndex, sizePairVec, winLs1, winRs1, winLs2, winRs2); ++turn; in1 = turn%2 ? outL : posL; in2 = turn%2 ? outR : posR; out1 = turn%2 ? posL : outL; out2 = turn%2 ? posR : outR; for (unsigned int i=0; i<streams.size(); ++i) { long long size1 = (paras[i]->elem).endL - (paras[i]->elem).startL + 1; long long size2 = (paras[i]->elem).endR - (paras[i]->elem).startR + 1; long long mulSize = size1 * size2; if (mulSize <= constSmallNumber) { gridSize = (int)ceil(mulSize / (float)blockSize); NestedLoopWin<<<gridSize, blockSize, 0, streams[i]>>>(d_objs, in1, (paras[i]->elem).startL, (paras[i]->elem).endL, in2, (paras[i]->elem).startR, (paras[i]->elem).endR); } else { gridSize = (int)ceil((size1 + size2) / (float)blockSize); QuickJoinWin<<<gridSize, blockSize, 0, streams[i]>>>(d_objs, d_stack, in1, in2, out1, out2, d_offsets+i*2, d_offsets+1+i*2, (paras[i]->elem).startL, (paras[i]->elem).endL, (paras[i]->elem).startR, (paras[i]->elem).endR, paras[i]->r1, paras[i]->r2, winLs1[i], winRs1[i], winLs2[i], winRs2[i]); } } sizePairVec.resize(0); delete[] winLs1; delete[] winRs1; delete[] winLs2; delete[] winRs2; } memAllocator->DeallocateBitmap(bitIndex); } __host__ void QuickJoinLaunch(Object *h_objs, Object *d_objs, int amount) { threadPool = threadpool_create(threadCount, queueSize); memAllocator = new MemAllocator; int bitIndex = memAllocator->AllocateBitmap(); if (bitIndex == -1) { cout<<"Resource allocation failed!"<<endl; } StopWatchInterface *timer = 0; sdkCreateTimer(&timer); sdkStartTimer(&timer); vector<cudaStream_t> streams(1); checkCudaErrors(cudaStreamCreateWithFlags(&streams[0], cudaStreamNonBlocking)); int *i_idx, *o_idx; memAllocator->AllocateInOut(bitIndex, &i_idx, &o_idx); int *h_idx = new int[amount]; for (int i=0; i<amount; ++i) { h_idx[i] = i; } checkCudaErrors(cudaMemcpyAsync(i_idx, h_idx, sizeof(int)*amount, cudaMemcpyHostToDevice, streams[0])); delete[] h_idx; Stack *d_stack, *h_stack; memAllocator->AllocateStack(bitIndex, &d_stack, &h_stack); checkCudaErrors(cudaMemcpyAsync(d_stack, gTop, sizeof(int), cudaMemcpyHostToDevice, streams[0])); Offset *d_offsets; memAllocator->AllocateOffset(bitIndex, &d_offsets); checkCudaErrors(cudaMemsetAsync(d_offsets, 0, sizeof(Offset), streams[0])); int r1 = rand() % amount; int r2 = rand() % amount; while (r1 == r2) { r2 = rand() % amount; } vector<KernelParameter *> paras; paras.push_back(new KernelParameter(StackElement(0, amount-1), r1, r2)); vector<SizePair> sizePairVec; sizePairVec.push_back(SizePair(0, amount)); int **winLs = new int*[1]; int **winRs = new int*[1]; memAllocator->AllocateWin(bitIndex, sizePairVec, winLs, winRs); int gridSize = (int)ceil(((float)amount)/blockSize); QuickJoin<<<gridSize, blockSize, 0, streams[0]>>>(d_objs, d_stack, i_idx, o_idx, d_offsets, 0, amount-1, r1, r2, winLs[0], winRs[0]); sizePairVec.resize(0); delete[] winLs; delete[] winRs; int turn = 0; int *in = i_idx; int *out = o_idx; while (1) { int stackSize = (min(streams.size()*3, MaxStackSize) * 6 + 1) * sizeof(int); for (unsigned int i=0; i<streams.size(); ++i) { checkCudaErrors(cudaStreamSynchronize(streams[i])); checkCudaErrors(cudaStreamDestroy(streams[i])); delete paras[i]; } streams.resize(0); paras.resize(0); cudaStream_t s; streams.push_back(s); checkCudaErrors(cudaStreamCreateWithFlags(&streams[0], cudaStreamNonBlocking)); checkCudaErrors(cudaMemcpyAsync(h_stack, d_stack, stackSize, cudaMemcpyDeviceToHost, streams[0])); checkCudaErrors(cudaMemcpyAsync(d_stack, gTop, sizeof(int), cudaMemcpyHostToDevice, streams[0])); Synchronize<<<0, 0, 0, streams[0]>>>(); checkCudaErrors(cudaStreamSynchronize(streams[0])); checkCudaErrors(cudaStreamDestroy(streams[0])); streams.resize(0); StackElement elem; if (!h_stack->Top(elem)) { break; } vector<cudaStream_t> winStreams; while (h_stack->Pop(elem)) { if (elem.startL==0 && elem.endL!=-1 && elem.startR==0 && elem.endR!=-1) { long long size1 = elem.endL + 1; long long size2 = elem.endR + 1; long long mulSize = size1 * size2; if (mulSize <= constSmallNumber) { cudaStream_t s; winStreams.push_back(s); checkCudaErrors(cudaStreamCreateWithFlags(&winStreams[winStreams.size()-1], cudaStreamNonBlocking)); gridSize = (int)ceil(mulSize / ((float)blockSize)); NestedLoopWin<<<gridSize, blockSize, 0, winStreams[winStreams.size()-1]>>>(d_objs, (int *)elem.posL, 0, elem.endL, (int *)elem.posR, 0, elem.endR); } else { int *winL = new int[size1]; int *winR = new int[size2]; cudaStream_t s; checkCudaErrors(cudaStreamCreateWithFlags(&s, cudaStreamNonBlocking)); checkCudaErrors(cudaMemcpyAsync(winL, (int *)elem.posL, sizeof(int)*size1, cudaMemcpyDeviceToHost, s)); checkCudaErrors(cudaMemcpyAsync(winR, (int *)elem.posR, sizeof(int)*size2, cudaMemcpyDeviceToHost, s)); Synchronize<<<0, 0, 0, s>>>(); checkCudaErrors(cudaStreamSynchronize(s)); checkCudaErrors(cudaStreamDestroy(s)); if (threadpool_add(threadPool, &QuickJoinWinLaunch, new ThreadParameter(elem, d_objs, winL, winR))) { delete[] winL; delete[] winR; cout<<"Failed in thread creation!"<<endl; return; } } } else { cudaStream_t s; streams.push_back(s); checkCudaErrors(cudaStreamCreateWithFlags(&streams[streams.size()-1], cudaStreamNonBlocking)); int size = elem.posR - elem.posL + 1; r1 = rand() % size; r2 = rand() % size; while (r1 == r2) { r2 = rand() % size; } paras.push_back(new KernelParameter(elem, r1, r2)); sizePairVec.push_back(SizePair(streams.size()-1, size)); } } if (streams.size() > 0) { checkCudaErrors(cudaMemsetAsync(d_offsets, 0, sizeof(Offset)*streams.size(), streams[0])); } for (unsigned int i=0; i<winStreams.size(); ++i) { checkCudaErrors(cudaStreamSynchronize(winStreams[i])); checkCudaErrors(cudaStreamDestroy(winStreams[i])); } winLs = new int*[streams.size()]; winRs = new int*[streams.size()]; memAllocator->AllocateWin(bitIndex, sizePairVec, winLs, winRs); ++turn; in = turn%2 ? o_idx : i_idx; out = turn%2 ? i_idx : o_idx; for (unsigned int i=0; i<streams.size(); ++i) { long long size = (paras[i]->elem).posR - (paras[i]->elem).posL + 1; long long mulSize = size * size; if ((mulSize-size)>>1 <= constSmallNumber) { gridSize = (int)ceil(mulSize / (float)blockSize); NestedLoop<<<gridSize, blockSize, 0, streams[i]>>>(d_objs, in, (paras[i]->elem).posL, (paras[i]->elem).posR); } else { gridSize = (int)ceil(size / (float)blockSize); QuickJoin<<<gridSize, blockSize, 0, streams[i]>>>(d_objs, d_stack, in, out, d_offsets+i, (paras[i]->elem).posL, (paras[i]->elem).posR, paras[i]->r1, paras[i]->r2, winLs[i], winRs[i]); } } sizePairVec.resize(0); delete[] winLs; delete[] winRs; } memAllocator->DeallocateBitmap(bitIndex); while (!threadpool_destroy_ready(threadPool)) { } sdkStopTimer(&timer); cout<<"Processing time: "<<sdkGetTimerValue(&timer)<<" (ms)"<<endl; sdkDeleteTimer(&timer); delete memAllocator; threadpool_destroy(threadPool); } __host__ int main(int argc, char** argv) { int amount = DBSize; Object *h_objs = new Object[amount]; ifstream fin(DBPath.c_str()); for (int i=0; i<amount; ++i) { Object *obj = Read(fin); h_objs[i] = *obj; delete obj; } fin.close(); findCudaDevice(argc, (const char **)argv); checkCudaErrors(cudaDeviceSetCacheConfig(cudaFuncCachePreferL1)); checkCudaErrors(cudaMallocHost(&gTop, sizeof(int))); *gTop = -1; Object *d_objs; checkCudaErrors(cudaMalloc(&d_objs, sizeof(Object)*amount)); for (int i=0; i<amount; ++i) { #ifdef VECTOR double *tmp; checkCudaErrors(cudaMalloc(&tmp, sizeof(double)*dimension)); checkCudaErrors(cudaMemcpy(tmp, h_objs[i].x, sizeof(double)*dimension, cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(&(d_objs[i].x), &tmp, sizeof(double *), cudaMemcpyHostToDevice)); #endif #ifdef STRING char *tmp; checkCudaErrors(cudaMalloc(&tmp, sizeof(char)*h_objs[i].length)); checkCudaErrors(cudaMemcpy(tmp, h_objs[i].x, sizeof(char)*h_objs[i].length, cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(&(d_objs[i].x), &tmp, sizeof(char *), cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(&(d_objs[i].length), &(h_objs[i].length), sizeof(int), cudaMemcpyHostToDevice)); #endif } QuickJoinLaunch(h_objs, d_objs, amount); delete[] h_objs; checkCudaErrors(cudaFree(d_objs)); checkCudaErrors(cudaFreeHost(gTop)); cudaDeviceReset(); exit(0); }
6bdf5309d0679659f609b4ebd1dbcbd88eda71ae.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "hiprand/hiprand.h" #include "rocblas.h" extern "C" { #include "convolutional_layer.h" #include "dilated_convolutional_layer.h" #include "batchnorm_layer.h" #include "gemm.h" #include "blas.h" #include "utils.h" #include "darknet.h" #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> } __global__ void binarize_kernel(float *x, int n, float *binary); void binarize_gpu(float *x, int n, float *binary); __global__ void binarize_input_kernel(float *input, int n, int size, float *binary); void binarize_input_gpu(float *input, int n, int size, float *binary); __global__ void binarize_weights_kernel(float *weights, int n, int size, float *binary); void binarize_weights_gpu(float *weights, int n, int size, float *binary); void forward_dilated_conv_layer_gpu(dilated_convolutional_layer l, network net) { fill_gpu(l.outputs*l.batch, 0, l.output_gpu, 1); if(l.binary){ binarize_weights_gpu(l.weights_gpu, l.n, l.c/l.groups*l.size*l.size, l.binary_weights_gpu); swap_binary(&l); } if(l.xnor){ binarize_weights_gpu(l.weights_gpu, l.n, l.c/l.groups*l.size*l.size, l.binary_weights_gpu); swap_binary(&l); binarize_gpu(net.input_gpu, l.c*l.h*l.w*l.batch, l.binary_input_gpu); net.input_gpu = l.binary_input_gpu; } #ifdef CUDNN float one = 1; cudnnConvolutionForward(cudnn_handle(), &one, l.srcTensorDesc, net.input_gpu, l.weightDesc, l.weights_gpu, l.convDesc, l.fw_algo, net.workspace, l.workspace_size, &one, l.dstTensorDesc, l.output_gpu); #else int i, j; int m = l.n/l.groups; int k = l.size*l.size*l.c/l.groups; int n = l.out_w*l.out_h; for(i = 0; i < l.batch; ++i){ for(j = 0; j < l.groups; ++j){ float *a = l.weights_gpu + j*l.nweights/l.groups; float *b = net.workspace; float *c = l.output_gpu + (i*l.groups + j)*n*m; float *im = net.input_gpu + (i*l.groups + j)*l.c/l.groups*l.h*l.w; if (l.size == 1){ b = im; } else { im2col_dilated_gpu(im, l.c/l.groups, l.h, l.w, l.size, l.stride, l.pad, l.dilate_rate, b); } gemm_gpu(0,0,m,n,k,1,a,k,b,n,1,c,n); } } #endif if (l.batch_normalize) { forward_batchnorm_layer_gpu(l, net); } else { add_bias_gpu(l.output_gpu, l.biases_gpu, l.batch, l.n, l.out_w*l.out_h); } activate_array_gpu(l.output_gpu, l.outputs*l.batch, l.activation); //if(l.dot > 0) dot_error_gpu(l); if(l.binary || l.xnor) swap_binary(&l); } __global__ void smooth_kernel(float *x, int n, int w, int h, int c, int size, float rate, float *delta); extern "C" void smooth_layer(layer l, int size, float rate); void backward_dilated_conv_layer_gpu(convolutional_layer l, network net) { if(l.smooth){ smooth_layer(l, 5, l.smooth); } gradient_array_gpu(l.output_gpu, l.outputs*l.batch, l.activation, l.delta_gpu); if(l.batch_normalize){ backward_batchnorm_layer_gpu(l, net); } else { backward_bias_gpu(l.bias_updates_gpu, l.delta_gpu, l.batch, l.n, l.out_w*l.out_h); } float *original_input = net.input_gpu; if(l.xnor) net.input_gpu = l.binary_input_gpu; #ifdef CUDNN float one = 1; cudnnConvolutionBackwardFilter(cudnn_handle(), &one, l.srcTensorDesc, net.input_gpu, l.ddstTensorDesc, l.delta_gpu, l.convDesc, l.bf_algo, net.workspace, l.workspace_size, &one, l.dweightDesc, l.weight_updates_gpu); if(net.delta_gpu){ if(l.binary || l.xnor) swap_binary(&l); cudnnConvolutionBackwardData(cudnn_handle(), &one, l.weightDesc, l.weights_gpu, l.ddstTensorDesc, l.delta_gpu, l.convDesc, l.bd_algo, net.workspace, l.workspace_size, &one, l.dsrcTensorDesc, net.delta_gpu); if(l.binary || l.xnor) swap_binary(&l); if(l.xnor) gradient_array_gpu(original_input, l.batch*l.c*l.h*l.w, HARDTAN, net.delta_gpu); } #else int m = l.n/l.groups; int n = l.size*l.size*l.c/l.groups; int k = l.out_w*l.out_h; int i, j; for(i = 0; i < l.batch; ++i){ for(j = 0; j < l.groups; ++j){ float *a = l.delta_gpu + (i*l.groups + j)*m*k; float *b = net.workspace; float *c = l.weight_updates_gpu + j*l.nweights/l.groups; float *im = net.input_gpu+(i*l.groups + j)*l.c/l.groups*l.h*l.w; float *imd = net.delta_gpu+(i*l.groups + j)*l.c/l.groups*l.h*l.w; im2col_dilated_gpu(im, l.c/l.groups, l.h, l.w, l.size, l.stride, l.pad,l.dilate_rate, b); gemm_gpu(0,1,m,n,k,1,a,k,b,k,1,c,n); if (net.delta_gpu) { if (l.binary || l.xnor) swap_binary(&l); a = l.weights_gpu + j*l.nweights/l.groups; b = l.delta_gpu + (i*l.groups + j)*m*k; c = net.workspace; if (l.size == 1) { c = imd; } gemm_gpu(1,0,n,k,m,1,a,n,b,k,0,c,k); //------------------------------------------------------------ /*printf("GPU input of col2im_dilated = \n"); float input[n*k]; hipMemcpy(input, c, n*k*sizeof(float),hipMemcpyDeviceToHost); for (int i=0; i<n; i++){ for (int j=0; j<k; j++){ printf("%d ",(int)input[i*k+j]); }printf("\n"); }printf("\n");*/ //------------------------------------------------------------ if (l.size != 1) { col2im_dilated_gpu(net.workspace, l.c/l.groups, l.h, l.w, l.size, l.stride, l.pad, l.dilate_rate, imd); //----------------------------------------------------------- /*printf("GPU output of col2im_dilated = \n"); float output[l.h*l.c*l.w]; hipMemcpy(output, imd, l.h*l.w*l.c, hipMemcpyDeviceToHost); for (int i=0; i<l.h*l.c; i++){ for (int j=0; j<l.w; j++){ printf("%f\t",output[i*l.w+j]); }printf("\n"); }printf("\n");*/ //------------------------------------------------------------ } if(l.binary || l.xnor) { swap_binary(&l); } } if(l.xnor) gradient_array_gpu(original_input + i*l.c*l.h*l.w, l.c*l.h*l.w, HARDTAN, net.delta_gpu + i*l.c*l.h*l.w); } } #endif } void pull_dilated_conv_layer(layer l) { cuda_pull_array(l.weights_gpu, l.weights, l.nweights); cuda_pull_array(l.biases_gpu, l.biases, l.n); cuda_pull_array(l.weight_updates_gpu, l.weight_updates, l.nweights); cuda_pull_array(l.bias_updates_gpu, l.bias_updates, l.n); if (l.batch_normalize){ cuda_pull_array(l.scales_gpu, l.scales, l.n); cuda_pull_array(l.rolling_mean_gpu, l.rolling_mean, l.n); cuda_pull_array(l.rolling_variance_gpu, l.rolling_variance, l.n); } } void push_dilated_conv_layer(layer l) { cuda_push_array(l.weights_gpu, l.weights, l.nweights); cuda_push_array(l.biases_gpu, l.biases, l.n); cuda_push_array(l.weight_updates_gpu, l.weight_updates, l.nweights); cuda_push_array(l.bias_updates_gpu, l.bias_updates, l.n); if (l.batch_normalize){ cuda_push_array(l.scales_gpu, l.scales, l.n); cuda_push_array(l.rolling_mean_gpu, l.rolling_mean, l.n); cuda_push_array(l.rolling_variance_gpu, l.rolling_variance, l.n); } } void update_dilated_conv_layer_gpu(layer l, update_args a) { float learning_rate = a.learning_rate*l.learning_rate_scale; float momentum = a.momentum; float decay = a.decay; int batch = a.batch; if(a.adam){ adam_update_gpu(l.weights_gpu, l.weight_updates_gpu, l.m_gpu, l.v_gpu, a.B1, a.B2, a.eps, decay, learning_rate, l.nweights, batch, a.t); adam_update_gpu(l.biases_gpu, l.bias_updates_gpu, l.bias_m_gpu, l.bias_v_gpu, a.B1, a.B2, a.eps, decay, learning_rate, l.n, batch, a.t); if(l.scales_gpu){ adam_update_gpu(l.scales_gpu, l.scale_updates_gpu, l.scale_m_gpu, l.scale_v_gpu, a.B1, a.B2, a.eps, decay, learning_rate, l.n, batch, a.t); } }else{ axpy_gpu(l.nweights, -decay*batch, l.weights_gpu, 1, l.weight_updates_gpu, 1); axpy_gpu(l.nweights, learning_rate/batch, l.weight_updates_gpu, 1, l.weights_gpu, 1); scal_gpu(l.nweights, momentum, l.weight_updates_gpu, 1); axpy_gpu(l.n, learning_rate/batch, l.bias_updates_gpu, 1, l.biases_gpu, 1); scal_gpu(l.n, momentum, l.bias_updates_gpu, 1); if(l.scales_gpu){ axpy_gpu(l.n, learning_rate/batch, l.scale_updates_gpu, 1, l.scales_gpu, 1); scal_gpu(l.n, momentum, l.scale_updates_gpu, 1); } } if(l.clip){ constrain_gpu(l.nweights, l.clip, l.weights_gpu, 1); } } void test_dconv_forward_gpu() { int batch = 100; int h = 32; int w = 32; int c = 3; int n = 32; int groups = 1; int size = 5; int stride = 1; int padding = 5; ACTIVATION activation = LEAKY; int batch_normalize = 0; int binary = 0; int xnor = 0; int adam = 0; int dilate_rate = 2; dilated_convolutional_layer l = make_dilated_conv_layer( batch,h,w,c,n,groups,size,stride,padding,activation, batch_normalize, binary, xnor, adam, dilate_rate); network net = *make_network(1); net.layers = &l; float *input_cpu, *weights_cpu, *output_cpu; input_cpu = (float*) calloc (batch*h*w*c, sizeof(float)); weights_cpu = (float*) calloc (size*size*c*n, sizeof(float)); output_cpu = (float*) calloc (batch*l.out_c*l.out_h*l.out_w, sizeof(float)); FILE *fp; if((fp=fopen("caffe_forward_input.txt","r"))==NULL){ printf("Open file caffe_forward_input failed.\n"); exit(0); } for(int i=0; i<h*w*c*batch; i++){ fscanf(fp,"%f,", &input_cpu[i]); } fclose(fp); FILE *fin; if ((fin = fopen("caffe_forward_weights.txt","r"))==NULL){ printf("Open file caffe_forward_weights failed.\n"); exit(0); } //fscanf(fin, "%*[^\n]\n", NULL,NULL); for(int i=0; i<size*size*c*n; i++){ fscanf(fin, "%f,", &weights_cpu[i]); } fclose(fin); printf("finish reading all inputs.\n"); hipMalloc((void**)&l.output_gpu, batch*l.out_w*l.out_h*l.out_c*sizeof(float)); hipMalloc((void**)&l.weights_gpu, size*size*c*n*sizeof(float)); hipMalloc((void**)&net.input_gpu, batch*h*w*c*sizeof(float)); hipMalloc((void**)&net.workspace, batch*size*size*c*l.out_w*l.out_h*sizeof(float)); hipMemcpy(l.output_gpu, output_cpu, batch*l.out_w*l.out_h*l.out_c*sizeof(float),hipMemcpyHostToDevice); hipMemcpy(l.weights_gpu, weights_cpu, size*size*c*n*sizeof(float),hipMemcpyHostToDevice); hipMemcpy(net.input_gpu, input_cpu, batch*h*w*c*sizeof(float),hipMemcpyHostToDevice); forward_dilated_conv_layer_gpu(l, net); printf("forward dconv gpu complete.\n"); hipMemcpy(output_cpu, l.output_gpu, batch*l.out_c*l.out_w*l.out_h*sizeof(float),hipMemcpyDeviceToHost); FILE *f3; if((f3 = fopen("darknet_output.txt", "a"))==NULL){ printf("Error opening file darknet_output\n"); exit(0); } for (int i=0; i<l.out_c*l.out_h*l.out_w*batch; i++){ fprintf(f3, "%e, ", output_cpu[i]); if (i%10 == 9) fprintf(f3,"\n"); } fclose(f3); printf("test completed successfully.\n"); } void test_dconv_backprop_gpu() { int batch = 100; int h = 8; int w = 8; int c = 32; int n = 64; int groups = 1; int size = 5; int stride = 1; int padding = 5; ACTIVATION activation = LEAKY; int batch_normalize = 0; int binary = 0; int xnor = 0; int adam = 0; int dilate_rate = 2; dilated_convolutional_layer l = make_dilated_conv_layer( batch,h,w,c,n,groups,size,stride,padding,activation, batch_normalize, binary, xnor, adam, dilate_rate); network net = *make_network(1); net.layers = &l; float *input_cpu, *weights_cpu, *delta_cpu, *weight_updates_cpu, *upperdelta_cpu, *output_cpu; input_cpu = (float*) calloc (batch*h*w*c, sizeof(float)); weights_cpu = (float*) calloc (size*size*c*n, sizeof(float)); delta_cpu = (float*) calloc (batch*l.out_w*l.out_h*l.out_c, sizeof(float)); upperdelta_cpu = (float*) calloc (batch*h*w*c, sizeof(float)); weight_updates_cpu = (float*) calloc (size*size*c*n, sizeof(float)); output_cpu = (float*) calloc (batch*l.out_c*l.out_h*l.out_w, sizeof(float)); FILE *fp; if((fp=fopen("caffe_backprop_input.txt","r"))==NULL){ printf("Open file caffe_backprop_input failed.\n"); exit(0); } for(int i=0; i<h*w*c*batch; i++){ fscanf(fp,"%f,", &input_cpu[i]); } fclose(fp); FILE *fin; if ((fin = fopen("caffe_backprop_weights.txt","r"))==NULL){ printf("Open file caffe_backprop_weights failed.\n"); exit(0); } //fscanf(fin, "%*[^\n]\n", NULL,NULL); for(int i=0; i<size*size*c*n; i++){ fscanf(fin, "%f,", &weights_cpu[i]); } fclose(fin); FILE *f1; if ((f1 = fopen("caffe_backprop_topdiff.txt","r"))==NULL){ printf("Open file caffe_backprop_topdiff.txt failed.\n"); exit(0); } for (int i=0; i<l.out_w*l.out_h*l.out_c*batch; i++){ fscanf(f1, "%f,", &delta_cpu[i]); } fclose(f1); printf("finish reading all inputs.\n"); hipMalloc((void**)&l.output_gpu, batch*l.out_w*l.out_h*l.out_c*sizeof(float)); hipMalloc((void**)&l.weights_gpu, size*size*c*n*sizeof(float)); hipMalloc((void**)&l.weight_updates_gpu, size*size*c*n*sizeof(float)); hipMalloc((void**)&l.delta_gpu, batch*l.out_w*l.out_h*l.out_c*sizeof(float)); hipMalloc((void**)&net.input_gpu, batch*h*w*c*sizeof(float)); hipMalloc((void**)&net.workspace, batch*size*size*c*l.out_w*l.out_h*sizeof(float)); hipMalloc((void**)&net.delta_gpu, batch*c*h*w*sizeof(float)); hipMemcpy(l.output_gpu, output_cpu, batch*l.out_w*l.out_h*l.out_c*sizeof(float),hipMemcpyHostToDevice); hipMemcpy(l.weights_gpu, weights_cpu, size*size*c*n*sizeof(float),hipMemcpyHostToDevice); hipMemcpy(l.weight_updates_gpu, weight_updates_cpu, size*size*c*n*sizeof(float), hipMemcpyHostToDevice); hipMemcpy(l.delta_gpu, delta_cpu, batch*l.out_w*l.out_h*l.out_c*sizeof(float),hipMemcpyHostToDevice); hipMemcpy(net.input_gpu, input_cpu, batch*h*w*c*sizeof(float),hipMemcpyHostToDevice); hipMemcpy(net.delta_gpu, upperdelta_cpu, c*h*w*sizeof(float),hipMemcpyHostToDevice); //forward_dilated_conv_layer_gpu(l, net); //printf("forward dconv gpu complete.\n"); hipMemcpy(output_cpu, l.output_gpu, batch*l.out_c*l.out_w*l.out_h*sizeof(float),hipMemcpyDeviceToHost); backward_dilated_conv_layer_gpu(l,net); printf("backprop dconv gpu complete.\n"); hipMemcpy(weight_updates_cpu, l.weight_updates_gpu, size*size*c*n*sizeof(float),hipMemcpyDeviceToHost); hipMemcpy(upperdelta_cpu, net.delta_gpu, batch*h*w*c*sizeof(float),hipMemcpyDeviceToHost); FILE *f; if((f = fopen("darknet_weight_diff.txt", "a"))==NULL){ printf("Error opening file weight_diff\n"); exit(0); } for (int i=0; i<size*size*n*c; i++){ fprintf(f,"%e,",weight_updates_cpu[i]); if (i%10 == 9) fprintf(f,"\n"); } fclose(f); FILE *f2; if((f2 = fopen("darknet_bottom_diff.txt", "a"))==NULL){ printf("Error opening file bottom_diff\n"); exit(0); } for (int i=0; i<h*w*c*batch; i++){ fprintf(f2, "%e, ", upperdelta_cpu[i]); if (i%10 == 9) fprintf(f2,"\n"); } fclose(f2); printf("test completed successfully.\n"); }
6bdf5309d0679659f609b4ebd1dbcbd88eda71ae.cu
#include "cuda_runtime.h" #include "curand.h" #include "cublas_v2.h" extern "C" { #include "convolutional_layer.h" #include "dilated_convolutional_layer.h" #include "batchnorm_layer.h" #include "gemm.h" #include "blas.h" #include "utils.h" #include "darknet.h" #include "cuda.h" #include <stdio.h> #include <stdlib.h> } __global__ void binarize_kernel(float *x, int n, float *binary); void binarize_gpu(float *x, int n, float *binary); __global__ void binarize_input_kernel(float *input, int n, int size, float *binary); void binarize_input_gpu(float *input, int n, int size, float *binary); __global__ void binarize_weights_kernel(float *weights, int n, int size, float *binary); void binarize_weights_gpu(float *weights, int n, int size, float *binary); void forward_dilated_conv_layer_gpu(dilated_convolutional_layer l, network net) { fill_gpu(l.outputs*l.batch, 0, l.output_gpu, 1); if(l.binary){ binarize_weights_gpu(l.weights_gpu, l.n, l.c/l.groups*l.size*l.size, l.binary_weights_gpu); swap_binary(&l); } if(l.xnor){ binarize_weights_gpu(l.weights_gpu, l.n, l.c/l.groups*l.size*l.size, l.binary_weights_gpu); swap_binary(&l); binarize_gpu(net.input_gpu, l.c*l.h*l.w*l.batch, l.binary_input_gpu); net.input_gpu = l.binary_input_gpu; } #ifdef CUDNN float one = 1; cudnnConvolutionForward(cudnn_handle(), &one, l.srcTensorDesc, net.input_gpu, l.weightDesc, l.weights_gpu, l.convDesc, l.fw_algo, net.workspace, l.workspace_size, &one, l.dstTensorDesc, l.output_gpu); #else int i, j; int m = l.n/l.groups; int k = l.size*l.size*l.c/l.groups; int n = l.out_w*l.out_h; for(i = 0; i < l.batch; ++i){ for(j = 0; j < l.groups; ++j){ float *a = l.weights_gpu + j*l.nweights/l.groups; float *b = net.workspace; float *c = l.output_gpu + (i*l.groups + j)*n*m; float *im = net.input_gpu + (i*l.groups + j)*l.c/l.groups*l.h*l.w; if (l.size == 1){ b = im; } else { im2col_dilated_gpu(im, l.c/l.groups, l.h, l.w, l.size, l.stride, l.pad, l.dilate_rate, b); } gemm_gpu(0,0,m,n,k,1,a,k,b,n,1,c,n); } } #endif if (l.batch_normalize) { forward_batchnorm_layer_gpu(l, net); } else { add_bias_gpu(l.output_gpu, l.biases_gpu, l.batch, l.n, l.out_w*l.out_h); } activate_array_gpu(l.output_gpu, l.outputs*l.batch, l.activation); //if(l.dot > 0) dot_error_gpu(l); if(l.binary || l.xnor) swap_binary(&l); } __global__ void smooth_kernel(float *x, int n, int w, int h, int c, int size, float rate, float *delta); extern "C" void smooth_layer(layer l, int size, float rate); void backward_dilated_conv_layer_gpu(convolutional_layer l, network net) { if(l.smooth){ smooth_layer(l, 5, l.smooth); } gradient_array_gpu(l.output_gpu, l.outputs*l.batch, l.activation, l.delta_gpu); if(l.batch_normalize){ backward_batchnorm_layer_gpu(l, net); } else { backward_bias_gpu(l.bias_updates_gpu, l.delta_gpu, l.batch, l.n, l.out_w*l.out_h); } float *original_input = net.input_gpu; if(l.xnor) net.input_gpu = l.binary_input_gpu; #ifdef CUDNN float one = 1; cudnnConvolutionBackwardFilter(cudnn_handle(), &one, l.srcTensorDesc, net.input_gpu, l.ddstTensorDesc, l.delta_gpu, l.convDesc, l.bf_algo, net.workspace, l.workspace_size, &one, l.dweightDesc, l.weight_updates_gpu); if(net.delta_gpu){ if(l.binary || l.xnor) swap_binary(&l); cudnnConvolutionBackwardData(cudnn_handle(), &one, l.weightDesc, l.weights_gpu, l.ddstTensorDesc, l.delta_gpu, l.convDesc, l.bd_algo, net.workspace, l.workspace_size, &one, l.dsrcTensorDesc, net.delta_gpu); if(l.binary || l.xnor) swap_binary(&l); if(l.xnor) gradient_array_gpu(original_input, l.batch*l.c*l.h*l.w, HARDTAN, net.delta_gpu); } #else int m = l.n/l.groups; int n = l.size*l.size*l.c/l.groups; int k = l.out_w*l.out_h; int i, j; for(i = 0; i < l.batch; ++i){ for(j = 0; j < l.groups; ++j){ float *a = l.delta_gpu + (i*l.groups + j)*m*k; float *b = net.workspace; float *c = l.weight_updates_gpu + j*l.nweights/l.groups; float *im = net.input_gpu+(i*l.groups + j)*l.c/l.groups*l.h*l.w; float *imd = net.delta_gpu+(i*l.groups + j)*l.c/l.groups*l.h*l.w; im2col_dilated_gpu(im, l.c/l.groups, l.h, l.w, l.size, l.stride, l.pad,l.dilate_rate, b); gemm_gpu(0,1,m,n,k,1,a,k,b,k,1,c,n); if (net.delta_gpu) { if (l.binary || l.xnor) swap_binary(&l); a = l.weights_gpu + j*l.nweights/l.groups; b = l.delta_gpu + (i*l.groups + j)*m*k; c = net.workspace; if (l.size == 1) { c = imd; } gemm_gpu(1,0,n,k,m,1,a,n,b,k,0,c,k); //------------------------------------------------------------ /*printf("GPU input of col2im_dilated = \n"); float input[n*k]; cudaMemcpy(input, c, n*k*sizeof(float),cudaMemcpyDeviceToHost); for (int i=0; i<n; i++){ for (int j=0; j<k; j++){ printf("%d ",(int)input[i*k+j]); }printf("\n"); }printf("\n");*/ //------------------------------------------------------------ if (l.size != 1) { col2im_dilated_gpu(net.workspace, l.c/l.groups, l.h, l.w, l.size, l.stride, l.pad, l.dilate_rate, imd); //----------------------------------------------------------- /*printf("GPU output of col2im_dilated = \n"); float output[l.h*l.c*l.w]; cudaMemcpy(output, imd, l.h*l.w*l.c, cudaMemcpyDeviceToHost); for (int i=0; i<l.h*l.c; i++){ for (int j=0; j<l.w; j++){ printf("%f\t",output[i*l.w+j]); }printf("\n"); }printf("\n");*/ //------------------------------------------------------------ } if(l.binary || l.xnor) { swap_binary(&l); } } if(l.xnor) gradient_array_gpu(original_input + i*l.c*l.h*l.w, l.c*l.h*l.w, HARDTAN, net.delta_gpu + i*l.c*l.h*l.w); } } #endif } void pull_dilated_conv_layer(layer l) { cuda_pull_array(l.weights_gpu, l.weights, l.nweights); cuda_pull_array(l.biases_gpu, l.biases, l.n); cuda_pull_array(l.weight_updates_gpu, l.weight_updates, l.nweights); cuda_pull_array(l.bias_updates_gpu, l.bias_updates, l.n); if (l.batch_normalize){ cuda_pull_array(l.scales_gpu, l.scales, l.n); cuda_pull_array(l.rolling_mean_gpu, l.rolling_mean, l.n); cuda_pull_array(l.rolling_variance_gpu, l.rolling_variance, l.n); } } void push_dilated_conv_layer(layer l) { cuda_push_array(l.weights_gpu, l.weights, l.nweights); cuda_push_array(l.biases_gpu, l.biases, l.n); cuda_push_array(l.weight_updates_gpu, l.weight_updates, l.nweights); cuda_push_array(l.bias_updates_gpu, l.bias_updates, l.n); if (l.batch_normalize){ cuda_push_array(l.scales_gpu, l.scales, l.n); cuda_push_array(l.rolling_mean_gpu, l.rolling_mean, l.n); cuda_push_array(l.rolling_variance_gpu, l.rolling_variance, l.n); } } void update_dilated_conv_layer_gpu(layer l, update_args a) { float learning_rate = a.learning_rate*l.learning_rate_scale; float momentum = a.momentum; float decay = a.decay; int batch = a.batch; if(a.adam){ adam_update_gpu(l.weights_gpu, l.weight_updates_gpu, l.m_gpu, l.v_gpu, a.B1, a.B2, a.eps, decay, learning_rate, l.nweights, batch, a.t); adam_update_gpu(l.biases_gpu, l.bias_updates_gpu, l.bias_m_gpu, l.bias_v_gpu, a.B1, a.B2, a.eps, decay, learning_rate, l.n, batch, a.t); if(l.scales_gpu){ adam_update_gpu(l.scales_gpu, l.scale_updates_gpu, l.scale_m_gpu, l.scale_v_gpu, a.B1, a.B2, a.eps, decay, learning_rate, l.n, batch, a.t); } }else{ axpy_gpu(l.nweights, -decay*batch, l.weights_gpu, 1, l.weight_updates_gpu, 1); axpy_gpu(l.nweights, learning_rate/batch, l.weight_updates_gpu, 1, l.weights_gpu, 1); scal_gpu(l.nweights, momentum, l.weight_updates_gpu, 1); axpy_gpu(l.n, learning_rate/batch, l.bias_updates_gpu, 1, l.biases_gpu, 1); scal_gpu(l.n, momentum, l.bias_updates_gpu, 1); if(l.scales_gpu){ axpy_gpu(l.n, learning_rate/batch, l.scale_updates_gpu, 1, l.scales_gpu, 1); scal_gpu(l.n, momentum, l.scale_updates_gpu, 1); } } if(l.clip){ constrain_gpu(l.nweights, l.clip, l.weights_gpu, 1); } } void test_dconv_forward_gpu() { int batch = 100; int h = 32; int w = 32; int c = 3; int n = 32; int groups = 1; int size = 5; int stride = 1; int padding = 5; ACTIVATION activation = LEAKY; int batch_normalize = 0; int binary = 0; int xnor = 0; int adam = 0; int dilate_rate = 2; dilated_convolutional_layer l = make_dilated_conv_layer( batch,h,w,c,n,groups,size,stride,padding,activation, batch_normalize, binary, xnor, adam, dilate_rate); network net = *make_network(1); net.layers = &l; float *input_cpu, *weights_cpu, *output_cpu; input_cpu = (float*) calloc (batch*h*w*c, sizeof(float)); weights_cpu = (float*) calloc (size*size*c*n, sizeof(float)); output_cpu = (float*) calloc (batch*l.out_c*l.out_h*l.out_w, sizeof(float)); FILE *fp; if((fp=fopen("caffe_forward_input.txt","r"))==NULL){ printf("Open file caffe_forward_input failed.\n"); exit(0); } for(int i=0; i<h*w*c*batch; i++){ fscanf(fp,"%f,", &input_cpu[i]); } fclose(fp); FILE *fin; if ((fin = fopen("caffe_forward_weights.txt","r"))==NULL){ printf("Open file caffe_forward_weights failed.\n"); exit(0); } //fscanf(fin, "%*[^\n]\n", NULL,NULL); for(int i=0; i<size*size*c*n; i++){ fscanf(fin, "%f,", &weights_cpu[i]); } fclose(fin); printf("finish reading all inputs.\n"); cudaMalloc((void**)&l.output_gpu, batch*l.out_w*l.out_h*l.out_c*sizeof(float)); cudaMalloc((void**)&l.weights_gpu, size*size*c*n*sizeof(float)); cudaMalloc((void**)&net.input_gpu, batch*h*w*c*sizeof(float)); cudaMalloc((void**)&net.workspace, batch*size*size*c*l.out_w*l.out_h*sizeof(float)); cudaMemcpy(l.output_gpu, output_cpu, batch*l.out_w*l.out_h*l.out_c*sizeof(float),cudaMemcpyHostToDevice); cudaMemcpy(l.weights_gpu, weights_cpu, size*size*c*n*sizeof(float),cudaMemcpyHostToDevice); cudaMemcpy(net.input_gpu, input_cpu, batch*h*w*c*sizeof(float),cudaMemcpyHostToDevice); forward_dilated_conv_layer_gpu(l, net); printf("forward dconv gpu complete.\n"); cudaMemcpy(output_cpu, l.output_gpu, batch*l.out_c*l.out_w*l.out_h*sizeof(float),cudaMemcpyDeviceToHost); FILE *f3; if((f3 = fopen("darknet_output.txt", "a"))==NULL){ printf("Error opening file darknet_output\n"); exit(0); } for (int i=0; i<l.out_c*l.out_h*l.out_w*batch; i++){ fprintf(f3, "%e, ", output_cpu[i]); if (i%10 == 9) fprintf(f3,"\n"); } fclose(f3); printf("test completed successfully.\n"); } void test_dconv_backprop_gpu() { int batch = 100; int h = 8; int w = 8; int c = 32; int n = 64; int groups = 1; int size = 5; int stride = 1; int padding = 5; ACTIVATION activation = LEAKY; int batch_normalize = 0; int binary = 0; int xnor = 0; int adam = 0; int dilate_rate = 2; dilated_convolutional_layer l = make_dilated_conv_layer( batch,h,w,c,n,groups,size,stride,padding,activation, batch_normalize, binary, xnor, adam, dilate_rate); network net = *make_network(1); net.layers = &l; float *input_cpu, *weights_cpu, *delta_cpu, *weight_updates_cpu, *upperdelta_cpu, *output_cpu; input_cpu = (float*) calloc (batch*h*w*c, sizeof(float)); weights_cpu = (float*) calloc (size*size*c*n, sizeof(float)); delta_cpu = (float*) calloc (batch*l.out_w*l.out_h*l.out_c, sizeof(float)); upperdelta_cpu = (float*) calloc (batch*h*w*c, sizeof(float)); weight_updates_cpu = (float*) calloc (size*size*c*n, sizeof(float)); output_cpu = (float*) calloc (batch*l.out_c*l.out_h*l.out_w, sizeof(float)); FILE *fp; if((fp=fopen("caffe_backprop_input.txt","r"))==NULL){ printf("Open file caffe_backprop_input failed.\n"); exit(0); } for(int i=0; i<h*w*c*batch; i++){ fscanf(fp,"%f,", &input_cpu[i]); } fclose(fp); FILE *fin; if ((fin = fopen("caffe_backprop_weights.txt","r"))==NULL){ printf("Open file caffe_backprop_weights failed.\n"); exit(0); } //fscanf(fin, "%*[^\n]\n", NULL,NULL); for(int i=0; i<size*size*c*n; i++){ fscanf(fin, "%f,", &weights_cpu[i]); } fclose(fin); FILE *f1; if ((f1 = fopen("caffe_backprop_topdiff.txt","r"))==NULL){ printf("Open file caffe_backprop_topdiff.txt failed.\n"); exit(0); } for (int i=0; i<l.out_w*l.out_h*l.out_c*batch; i++){ fscanf(f1, "%f,", &delta_cpu[i]); } fclose(f1); printf("finish reading all inputs.\n"); cudaMalloc((void**)&l.output_gpu, batch*l.out_w*l.out_h*l.out_c*sizeof(float)); cudaMalloc((void**)&l.weights_gpu, size*size*c*n*sizeof(float)); cudaMalloc((void**)&l.weight_updates_gpu, size*size*c*n*sizeof(float)); cudaMalloc((void**)&l.delta_gpu, batch*l.out_w*l.out_h*l.out_c*sizeof(float)); cudaMalloc((void**)&net.input_gpu, batch*h*w*c*sizeof(float)); cudaMalloc((void**)&net.workspace, batch*size*size*c*l.out_w*l.out_h*sizeof(float)); cudaMalloc((void**)&net.delta_gpu, batch*c*h*w*sizeof(float)); cudaMemcpy(l.output_gpu, output_cpu, batch*l.out_w*l.out_h*l.out_c*sizeof(float),cudaMemcpyHostToDevice); cudaMemcpy(l.weights_gpu, weights_cpu, size*size*c*n*sizeof(float),cudaMemcpyHostToDevice); cudaMemcpy(l.weight_updates_gpu, weight_updates_cpu, size*size*c*n*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(l.delta_gpu, delta_cpu, batch*l.out_w*l.out_h*l.out_c*sizeof(float),cudaMemcpyHostToDevice); cudaMemcpy(net.input_gpu, input_cpu, batch*h*w*c*sizeof(float),cudaMemcpyHostToDevice); cudaMemcpy(net.delta_gpu, upperdelta_cpu, c*h*w*sizeof(float),cudaMemcpyHostToDevice); //forward_dilated_conv_layer_gpu(l, net); //printf("forward dconv gpu complete.\n"); cudaMemcpy(output_cpu, l.output_gpu, batch*l.out_c*l.out_w*l.out_h*sizeof(float),cudaMemcpyDeviceToHost); backward_dilated_conv_layer_gpu(l,net); printf("backprop dconv gpu complete.\n"); cudaMemcpy(weight_updates_cpu, l.weight_updates_gpu, size*size*c*n*sizeof(float),cudaMemcpyDeviceToHost); cudaMemcpy(upperdelta_cpu, net.delta_gpu, batch*h*w*c*sizeof(float),cudaMemcpyDeviceToHost); FILE *f; if((f = fopen("darknet_weight_diff.txt", "a"))==NULL){ printf("Error opening file weight_diff\n"); exit(0); } for (int i=0; i<size*size*n*c; i++){ fprintf(f,"%e,",weight_updates_cpu[i]); if (i%10 == 9) fprintf(f,"\n"); } fclose(f); FILE *f2; if((f2 = fopen("darknet_bottom_diff.txt", "a"))==NULL){ printf("Error opening file bottom_diff\n"); exit(0); } for (int i=0; i<h*w*c*batch; i++){ fprintf(f2, "%e, ", upperdelta_cpu[i]); if (i%10 == 9) fprintf(f2,"\n"); } fclose(f2); printf("test completed successfully.\n"); }
e9b9b7469a78157e79865f87f0120a319dc7e9d8.hip
// !!! This is a file automatically generated by hipify!!! /* Copyright 2017 the gpudevicemem authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "lib.h" #include "common_hip.cuh" #include "common_reduce.cuh" #include <cassert> #include <hip/hip_runtime.h> template <typename T> class CopyMap { public: __forceinline__ __device__ static T Map(T x) { return x; } }; template <typename T> class SquareMap { public: __forceinline__ __device__ static T Map(T x) { return x * x; } }; template <typename T, typename Map, typename Reduce, typename Write> __global__ void gpudevicemem_map_reduce_packed_deterministic_kernel( uint32_t reduce_dim, const T *x, T *y) { extern __shared__ T cache[]; uint32_t rdup_reduce_dim = (reduce_dim + blockDim.x - 1) / blockDim.x * blockDim.x; T accumulator = Reduce::InitVal(); for (uint32_t i = threadIdx.x; i < rdup_reduce_dim; i += blockDim.x) { if (i < reduce_dim) { cache[threadIdx.x] = Map::Map(x[i]); } else { cache[threadIdx.x] = Reduce::InitVal(); } __syncthreads(); threadblock_reduce_sync<T, Reduce>(cache); if (0 == threadIdx.x) { Reduce::Reduce(&accumulator, cache[0]); } __syncthreads(); } if (0 == threadIdx.x) { Write::Write(&y[0], accumulator); } } extern "C" void gpudevicemem_sum_packed_deterministic_f32( uint32_t reduce_dim, const float *x, float *y, const KernelConfig *cfg, hipStream_t stream) { assert(check_power_of_2(cfg->flat_block_dim().x)); hipLaunchKernelGGL(( gpudevicemem_map_reduce_packed_deterministic_kernel<float, CopyMap<float>, AddReduce<float>, AssignWrite<float>>), dim3(1), dim3(cfg->flat_block_dim()), cfg->flat_block_len() * sizeof(float), stream, reduce_dim, x, y); } extern "C" void gpudevicemem_sum_packed_accumulate_deterministic_f32( uint32_t reduce_dim, const float *x, float *y, const KernelConfig *cfg, hipStream_t stream) { assert(check_power_of_2(cfg->flat_block_dim().x)); hipLaunchKernelGGL(( gpudevicemem_map_reduce_packed_deterministic_kernel<float, CopyMap<float>, AddReduce<float>, AccumulateWrite<float>>), dim3(1), dim3(cfg->flat_block_dim()), cfg->flat_block_len() * sizeof(float), stream, reduce_dim, x, y); } template <typename T, typename Map, typename Reduce> __global__ void gpudevicemem_map_reduce_I1ab_Oa_packed_deterministic_kernel( uint32_t inner_dim, uint32_t reduce_dim, const T *x, T *y) { extern __shared__ T cache[]; uint32_t rdup_reduce_dim = (reduce_dim + blockDim.x - 1) / blockDim.x * blockDim.x; for (uint32_t blk = gblock(); blk < inner_dim; blk += gblockcount()) { T accumulator = Reduce::InitVal(); for (uint32_t i = threadIdx.x; i < rdup_reduce_dim; i += blockDim.x) { if (i < reduce_dim) { cache[threadIdx.x] = Map::Map(x[Index2::Pack(blk, inner_dim, i)]); } else { cache[threadIdx.x] = Reduce::InitVal(); } __syncthreads(); threadblock_reduce_sync<T, Reduce>(cache); if (0 == threadIdx.x) { Reduce::Reduce(&accumulator, cache[0]); } __syncthreads(); } if (0 == threadIdx.x) { y[blk] = accumulator; } } } extern "C" void gpudevicemem_sum_I1ab_Oa_packed_deterministic_f32( uint32_t inner_dim, uint32_t reduce_dim, const float *x, float *y, const KernelConfig *cfg, hipStream_t stream) { assert(check_power_of_2(cfg->flat_block_dim().x)); hipLaunchKernelGGL(( gpudevicemem_map_reduce_I1ab_Oa_packed_deterministic_kernel<float, CopyMap<float>, AddReduce<float>>), dim3(cfg->flat_block_count(inner_dim)), dim3(cfg->flat_block_dim()), cfg->flat_block_len() * sizeof(float), stream, inner_dim, reduce_dim, x, y); } template <typename T, typename Map, typename Reduce> __global__ void gpudevicemem_map_reduce_I1ab_Ob_packed_deterministic_kernel( uint32_t reduce_dim, uint32_t outer_dim, const T *x, T *y) { extern __shared__ T cache[]; uint32_t rdup_reduce_dim = (reduce_dim + blockDim.x - 1) / blockDim.x * blockDim.x; for (uint32_t blk = gblock(); blk < outer_dim; blk += gblockcount()) { T accumulator = Reduce::InitVal(); for (uint32_t i = threadIdx.x; i < rdup_reduce_dim; i += blockDim.x) { if (i < reduce_dim) { cache[threadIdx.x] = Map::Map(x[Index2::Pack(i, reduce_dim, blk)]); } else { cache[threadIdx.x] = Reduce::InitVal(); } __syncthreads(); threadblock_reduce_sync<T, Reduce>(cache); if (0 == threadIdx.x) { Reduce::Reduce(&accumulator, cache[0]); } __syncthreads(); } if (0 == threadIdx.x) { y[blk] = accumulator; } } } extern "C" void gpudevicemem_sum_I1ab_Ob_packed_deterministic_f32( uint32_t reduce_dim, uint32_t outer_dim, const float *x, float *y, const KernelConfig *cfg, hipStream_t stream) { assert(check_power_of_2(cfg->flat_block_dim().x)); hipLaunchKernelGGL(( gpudevicemem_map_reduce_I1ab_Ob_packed_deterministic_kernel<float, CopyMap<float>, AddReduce<float>>), dim3(cfg->flat_block_count(outer_dim)), dim3(cfg->flat_block_dim()), cfg->flat_block_len() * sizeof(float), stream, reduce_dim, outer_dim, x, y); } template <typename T, typename Map, typename Reduce> __global__ void gpudevicemem_map_reduce_I1abc_Ob_packed_deterministic_kernel( uint32_t reduce_inner_dim, uint32_t mid_dim, uint32_t reduce_outer_dim, const T *x, T *y) { extern __shared__ T cache[]; uint32_t rdup_reduce_inner_dim = (reduce_inner_dim + blockDim.x - 1) / blockDim.x * blockDim.x; for (uint32_t blk = gblock(); blk < mid_dim; blk += gblockcount()) { T accumulator = Reduce::InitVal(); for (uint32_t j = 0; j < reduce_outer_dim; ++j) { for (uint32_t i = threadIdx.x; i < rdup_reduce_inner_dim; i += blockDim.x) { if (i < reduce_inner_dim) { cache[threadIdx.x] = Map::Map(x[Index3::Pack(i, reduce_inner_dim, blk, mid_dim, j)]); } else { cache[threadIdx.x] = Reduce::InitVal(); } __syncthreads(); threadblock_reduce_sync<T, Reduce>(cache); if (0 == threadIdx.x) { Reduce::Reduce(&accumulator, cache[0]); } __syncthreads(); } } if (0 == threadIdx.x) { y[blk] = accumulator; } } } template <typename T, typename Map, typename Reduce> __global__ void gpudevicemem_map_reduce_I1abc_Ob_packed_deterministic_v2_kernel( uint32_t reduce_inner_dim, uint32_t mid_dim, uint32_t reduce_outer_dim, const T *x, T *y) { extern __shared__ T cache[]; uint32_t fused_inner_outer_dim = reduce_inner_dim * reduce_outer_dim; uint32_t rdup_fused_inner_outer_dim = (fused_inner_outer_dim + blockDim.x - 1) / blockDim.x * blockDim.x; for (uint32_t blk = gblock(); blk < mid_dim; blk += gblockcount()) { T accumulator = Reduce::InitVal(); for (uint32_t i = threadIdx.x; i < rdup_fused_inner_outer_dim; i += blockDim.x) { if (i < fused_inner_outer_dim) { uint32_t i0, i1; Index2::Unpack(i, &i0, reduce_inner_dim, &i1); cache[threadIdx.x] = Map::Map(x[Index3::Pack(i0, reduce_inner_dim, blk, mid_dim, i1)]); } else { cache[threadIdx.x] = Reduce::InitVal(); } __syncthreads(); threadblock_reduce_sync<T, Reduce>(cache); if (0 == threadIdx.x) { Reduce::Reduce(&accumulator, cache[0]); } __syncthreads(); } if (0 == threadIdx.x) { y[blk] = accumulator; } } } extern "C" void gpudevicemem_sum_I1abc_Ob_packed_deterministic_f32( uint32_t reduce_inner_dim, uint32_t mid_dim, uint32_t reduce_outer_dim, const float *x, float *y, const KernelConfig *cfg, hipStream_t stream) { assert(check_power_of_2(cfg->flat_block_dim().x)); hipLaunchKernelGGL(( gpudevicemem_map_reduce_I1abc_Ob_packed_deterministic_v2_kernel<float, CopyMap<float>, AddReduce<float>>), dim3(cfg->flat_block_count(mid_dim)), dim3(cfg->flat_block_dim()), cfg->flat_block_len() * sizeof(float), stream, reduce_inner_dim, mid_dim, reduce_outer_dim, x, y); } extern "C" void gpudevicemem_square_map_sum_I1abc_Ob_packed_deterministic_f32( uint32_t reduce_inner_dim, uint32_t mid_dim, uint32_t reduce_outer_dim, const float *x, float *y, const KernelConfig *cfg, hipStream_t stream) { assert(check_power_of_2(cfg->flat_block_dim().x)); hipLaunchKernelGGL(( gpudevicemem_map_reduce_I1abc_Ob_packed_deterministic_v2_kernel<float, SquareMap<float>, AddReduce<float>>), dim3(cfg->flat_block_count(mid_dim)), dim3(cfg->flat_block_dim()), cfg->flat_block_len() * sizeof(float), stream, reduce_inner_dim, mid_dim, reduce_outer_dim, x, y); } template <typename T, typename Reduce> __global__ void gpudevicemem_mult_then_reduce_I1abc_I2abc_Ob_packed_deterministic_v2_kernel( uint32_t reduce_inner_dim, uint32_t mid_dim, uint32_t reduce_outer_dim, const T *x1, const T *x2, T *y) { extern __shared__ T cache[]; uint32_t fused_inner_outer_dim = reduce_inner_dim * reduce_outer_dim; uint32_t rdup_fused_inner_outer_dim = (fused_inner_outer_dim + blockDim.x - 1) / blockDim.x * blockDim.x; for (uint32_t blk1 = gblock(); blk1 < mid_dim; blk1 += gblockcount()) { T accumulator = Reduce::InitVal(); for (uint32_t i = threadIdx.x; i < rdup_fused_inner_outer_dim; i += blockDim.x) { if (i < fused_inner_outer_dim) { uint32_t i0, i2; Index2::Unpack(i, &i0, reduce_inner_dim, &i2); uint32_t idx = Index3::Pack(i0, reduce_inner_dim, blk1, mid_dim, i2); cache[threadIdx.x] = x1[idx] * x2[idx]; } else { cache[threadIdx.x] = Reduce::InitVal(); } __syncthreads(); threadblock_reduce_sync<T, Reduce>(cache); if (0 == threadIdx.x) { Reduce::Reduce(&accumulator, cache[0]); } __syncthreads(); } if (0 == threadIdx.x) { y[blk1] = accumulator; } } } extern "C" void gpudevicemem_mult_then_sum_I1abc_I2abc_Ob_packed_deterministic_f32( uint32_t reduce_inner_dim, uint32_t mid_dim, uint32_t reduce_outer_dim, const float *x1, const float *x2, float *y, const KernelConfig *cfg, hipStream_t stream) { assert(check_power_of_2(cfg->flat_block_dim().x)); hipLaunchKernelGGL(( gpudevicemem_mult_then_reduce_I1abc_I2abc_Ob_packed_deterministic_v2_kernel<float, AddReduce<float>>), dim3(cfg->flat_block_count(mid_dim)), dim3(cfg->flat_block_dim()), cfg->flat_block_len() * sizeof(float), stream, reduce_inner_dim, mid_dim, reduce_outer_dim, x1, x2, y); }
e9b9b7469a78157e79865f87f0120a319dc7e9d8.cu
/* Copyright 2017 the gpudevicemem authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "lib.h" #include "common.cuh" #include "common_reduce.cuh" #include <cassert> #include <cuda_runtime.h> template <typename T> class CopyMap { public: __forceinline__ __device__ static T Map(T x) { return x; } }; template <typename T> class SquareMap { public: __forceinline__ __device__ static T Map(T x) { return x * x; } }; template <typename T, typename Map, typename Reduce, typename Write> __global__ void gpudevicemem_map_reduce_packed_deterministic_kernel( uint32_t reduce_dim, const T *x, T *y) { extern __shared__ T cache[]; uint32_t rdup_reduce_dim = (reduce_dim + blockDim.x - 1) / blockDim.x * blockDim.x; T accumulator = Reduce::InitVal(); for (uint32_t i = threadIdx.x; i < rdup_reduce_dim; i += blockDim.x) { if (i < reduce_dim) { cache[threadIdx.x] = Map::Map(x[i]); } else { cache[threadIdx.x] = Reduce::InitVal(); } __syncthreads(); threadblock_reduce_sync<T, Reduce>(cache); if (0 == threadIdx.x) { Reduce::Reduce(&accumulator, cache[0]); } __syncthreads(); } if (0 == threadIdx.x) { Write::Write(&y[0], accumulator); } } extern "C" void gpudevicemem_sum_packed_deterministic_f32( uint32_t reduce_dim, const float *x, float *y, const KernelConfig *cfg, cudaStream_t stream) { assert(check_power_of_2(cfg->flat_block_dim().x)); gpudevicemem_map_reduce_packed_deterministic_kernel<float, CopyMap<float>, AddReduce<float>, AssignWrite<float>><<<1, cfg->flat_block_dim(), cfg->flat_block_len() * sizeof(float), stream>>>( reduce_dim, x, y); } extern "C" void gpudevicemem_sum_packed_accumulate_deterministic_f32( uint32_t reduce_dim, const float *x, float *y, const KernelConfig *cfg, cudaStream_t stream) { assert(check_power_of_2(cfg->flat_block_dim().x)); gpudevicemem_map_reduce_packed_deterministic_kernel<float, CopyMap<float>, AddReduce<float>, AccumulateWrite<float>><<<1, cfg->flat_block_dim(), cfg->flat_block_len() * sizeof(float), stream>>>( reduce_dim, x, y); } template <typename T, typename Map, typename Reduce> __global__ void gpudevicemem_map_reduce_I1ab_Oa_packed_deterministic_kernel( uint32_t inner_dim, uint32_t reduce_dim, const T *x, T *y) { extern __shared__ T cache[]; uint32_t rdup_reduce_dim = (reduce_dim + blockDim.x - 1) / blockDim.x * blockDim.x; for (uint32_t blk = gblock(); blk < inner_dim; blk += gblockcount()) { T accumulator = Reduce::InitVal(); for (uint32_t i = threadIdx.x; i < rdup_reduce_dim; i += blockDim.x) { if (i < reduce_dim) { cache[threadIdx.x] = Map::Map(x[Index2::Pack(blk, inner_dim, i)]); } else { cache[threadIdx.x] = Reduce::InitVal(); } __syncthreads(); threadblock_reduce_sync<T, Reduce>(cache); if (0 == threadIdx.x) { Reduce::Reduce(&accumulator, cache[0]); } __syncthreads(); } if (0 == threadIdx.x) { y[blk] = accumulator; } } } extern "C" void gpudevicemem_sum_I1ab_Oa_packed_deterministic_f32( uint32_t inner_dim, uint32_t reduce_dim, const float *x, float *y, const KernelConfig *cfg, cudaStream_t stream) { assert(check_power_of_2(cfg->flat_block_dim().x)); gpudevicemem_map_reduce_I1ab_Oa_packed_deterministic_kernel<float, CopyMap<float>, AddReduce<float>><<<cfg->flat_block_count(inner_dim), cfg->flat_block_dim(), cfg->flat_block_len() * sizeof(float), stream>>>( inner_dim, reduce_dim, x, y); } template <typename T, typename Map, typename Reduce> __global__ void gpudevicemem_map_reduce_I1ab_Ob_packed_deterministic_kernel( uint32_t reduce_dim, uint32_t outer_dim, const T *x, T *y) { extern __shared__ T cache[]; uint32_t rdup_reduce_dim = (reduce_dim + blockDim.x - 1) / blockDim.x * blockDim.x; for (uint32_t blk = gblock(); blk < outer_dim; blk += gblockcount()) { T accumulator = Reduce::InitVal(); for (uint32_t i = threadIdx.x; i < rdup_reduce_dim; i += blockDim.x) { if (i < reduce_dim) { cache[threadIdx.x] = Map::Map(x[Index2::Pack(i, reduce_dim, blk)]); } else { cache[threadIdx.x] = Reduce::InitVal(); } __syncthreads(); threadblock_reduce_sync<T, Reduce>(cache); if (0 == threadIdx.x) { Reduce::Reduce(&accumulator, cache[0]); } __syncthreads(); } if (0 == threadIdx.x) { y[blk] = accumulator; } } } extern "C" void gpudevicemem_sum_I1ab_Ob_packed_deterministic_f32( uint32_t reduce_dim, uint32_t outer_dim, const float *x, float *y, const KernelConfig *cfg, cudaStream_t stream) { assert(check_power_of_2(cfg->flat_block_dim().x)); gpudevicemem_map_reduce_I1ab_Ob_packed_deterministic_kernel<float, CopyMap<float>, AddReduce<float>><<<cfg->flat_block_count(outer_dim), cfg->flat_block_dim(), cfg->flat_block_len() * sizeof(float), stream>>>( reduce_dim, outer_dim, x, y); } template <typename T, typename Map, typename Reduce> __global__ void gpudevicemem_map_reduce_I1abc_Ob_packed_deterministic_kernel( uint32_t reduce_inner_dim, uint32_t mid_dim, uint32_t reduce_outer_dim, const T *x, T *y) { extern __shared__ T cache[]; uint32_t rdup_reduce_inner_dim = (reduce_inner_dim + blockDim.x - 1) / blockDim.x * blockDim.x; for (uint32_t blk = gblock(); blk < mid_dim; blk += gblockcount()) { T accumulator = Reduce::InitVal(); for (uint32_t j = 0; j < reduce_outer_dim; ++j) { for (uint32_t i = threadIdx.x; i < rdup_reduce_inner_dim; i += blockDim.x) { if (i < reduce_inner_dim) { cache[threadIdx.x] = Map::Map(x[Index3::Pack(i, reduce_inner_dim, blk, mid_dim, j)]); } else { cache[threadIdx.x] = Reduce::InitVal(); } __syncthreads(); threadblock_reduce_sync<T, Reduce>(cache); if (0 == threadIdx.x) { Reduce::Reduce(&accumulator, cache[0]); } __syncthreads(); } } if (0 == threadIdx.x) { y[blk] = accumulator; } } } template <typename T, typename Map, typename Reduce> __global__ void gpudevicemem_map_reduce_I1abc_Ob_packed_deterministic_v2_kernel( uint32_t reduce_inner_dim, uint32_t mid_dim, uint32_t reduce_outer_dim, const T *x, T *y) { extern __shared__ T cache[]; uint32_t fused_inner_outer_dim = reduce_inner_dim * reduce_outer_dim; uint32_t rdup_fused_inner_outer_dim = (fused_inner_outer_dim + blockDim.x - 1) / blockDim.x * blockDim.x; for (uint32_t blk = gblock(); blk < mid_dim; blk += gblockcount()) { T accumulator = Reduce::InitVal(); for (uint32_t i = threadIdx.x; i < rdup_fused_inner_outer_dim; i += blockDim.x) { if (i < fused_inner_outer_dim) { uint32_t i0, i1; Index2::Unpack(i, &i0, reduce_inner_dim, &i1); cache[threadIdx.x] = Map::Map(x[Index3::Pack(i0, reduce_inner_dim, blk, mid_dim, i1)]); } else { cache[threadIdx.x] = Reduce::InitVal(); } __syncthreads(); threadblock_reduce_sync<T, Reduce>(cache); if (0 == threadIdx.x) { Reduce::Reduce(&accumulator, cache[0]); } __syncthreads(); } if (0 == threadIdx.x) { y[blk] = accumulator; } } } extern "C" void gpudevicemem_sum_I1abc_Ob_packed_deterministic_f32( uint32_t reduce_inner_dim, uint32_t mid_dim, uint32_t reduce_outer_dim, const float *x, float *y, const KernelConfig *cfg, cudaStream_t stream) { assert(check_power_of_2(cfg->flat_block_dim().x)); gpudevicemem_map_reduce_I1abc_Ob_packed_deterministic_v2_kernel<float, CopyMap<float>, AddReduce<float>><<<cfg->flat_block_count(mid_dim), cfg->flat_block_dim(), cfg->flat_block_len() * sizeof(float), stream>>>( reduce_inner_dim, mid_dim, reduce_outer_dim, x, y); } extern "C" void gpudevicemem_square_map_sum_I1abc_Ob_packed_deterministic_f32( uint32_t reduce_inner_dim, uint32_t mid_dim, uint32_t reduce_outer_dim, const float *x, float *y, const KernelConfig *cfg, cudaStream_t stream) { assert(check_power_of_2(cfg->flat_block_dim().x)); gpudevicemem_map_reduce_I1abc_Ob_packed_deterministic_v2_kernel<float, SquareMap<float>, AddReduce<float>><<<cfg->flat_block_count(mid_dim), cfg->flat_block_dim(), cfg->flat_block_len() * sizeof(float), stream>>>( reduce_inner_dim, mid_dim, reduce_outer_dim, x, y); } template <typename T, typename Reduce> __global__ void gpudevicemem_mult_then_reduce_I1abc_I2abc_Ob_packed_deterministic_v2_kernel( uint32_t reduce_inner_dim, uint32_t mid_dim, uint32_t reduce_outer_dim, const T *x1, const T *x2, T *y) { extern __shared__ T cache[]; uint32_t fused_inner_outer_dim = reduce_inner_dim * reduce_outer_dim; uint32_t rdup_fused_inner_outer_dim = (fused_inner_outer_dim + blockDim.x - 1) / blockDim.x * blockDim.x; for (uint32_t blk1 = gblock(); blk1 < mid_dim; blk1 += gblockcount()) { T accumulator = Reduce::InitVal(); for (uint32_t i = threadIdx.x; i < rdup_fused_inner_outer_dim; i += blockDim.x) { if (i < fused_inner_outer_dim) { uint32_t i0, i2; Index2::Unpack(i, &i0, reduce_inner_dim, &i2); uint32_t idx = Index3::Pack(i0, reduce_inner_dim, blk1, mid_dim, i2); cache[threadIdx.x] = x1[idx] * x2[idx]; } else { cache[threadIdx.x] = Reduce::InitVal(); } __syncthreads(); threadblock_reduce_sync<T, Reduce>(cache); if (0 == threadIdx.x) { Reduce::Reduce(&accumulator, cache[0]); } __syncthreads(); } if (0 == threadIdx.x) { y[blk1] = accumulator; } } } extern "C" void gpudevicemem_mult_then_sum_I1abc_I2abc_Ob_packed_deterministic_f32( uint32_t reduce_inner_dim, uint32_t mid_dim, uint32_t reduce_outer_dim, const float *x1, const float *x2, float *y, const KernelConfig *cfg, cudaStream_t stream) { assert(check_power_of_2(cfg->flat_block_dim().x)); gpudevicemem_mult_then_reduce_I1abc_I2abc_Ob_packed_deterministic_v2_kernel<float, AddReduce<float>><<<cfg->flat_block_count(mid_dim), cfg->flat_block_dim(), cfg->flat_block_len() * sizeof(float), stream>>>( reduce_inner_dim, mid_dim, reduce_outer_dim, x1, x2, y); }
42cd49dc34633c06e794c71091203242371810fc.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Sadiku's Numerical Techniques in Electromagnetics. pg.712 extern "C" int testeCG(int n, int kmax, float err, float* A, float* x, float* b) { int i, j, k = 1; float alpha, beta, sum1, sum2, sum3 = 1.0f, sum4; float *r = cast(float*, malloc(n*sizeof(float))); float *p = cast(float*, malloc(n*sizeof(float))); float *u = cast(float*, malloc(n*sizeof(float))); for (i = 0; i < n; i++) { p[i] = b[i]; r[i] = b[i]; } while (k < kmax && fabs(sqrt(sum3)) > err) { for (j = 0; j < n; j++) { u[j] = 0.0; for (i = 0; i < n; i++) u[j] += A[i*n + j]*p[i]; } sum1 = 0.0f; sum2 = 0.0f; for (i = 0; i < n; i++) { sum1 += p[i]*r[i]; sum2 += p[i]*u[i]; } alpha = sum2 != 0.0f ? sum1/sum2 : 0.0f; alpha = isnan(alpha) ? 0.0f : alpha; for (i = 0; i < n; i++) { x[i] += alpha*p[i]; r[i] -= alpha*u[i]; } sum3 = 0.0; sum4 = 0.0; for (i = 0; i < n; i++) { sum3 += r[i]*r[i]; sum4 += r[i]*u[i]; } beta = sum2 != 0.0 ? -sum4/sum2 : 0.0; for (i = 0; i < n; i++) { p[i] = r[i] + beta*p[i]; } k++; } free(r); free(p); free(u); return k; } #if CUDA extern "C" float teste_sum_reduction(int size, float *a, float *b) { const dim3 threads(BSIZE); const dim3 blocks(1 + size/BSIZE); float *_a, *_b, *_sum, sum = 0; smalloc(&_a, sizeof(float)*size); smalloc(&_b, sizeof(float)*size); smalloc(&_sum, sizeof(float)); smemcpy(_a, a, sizeof(float)*size, hipMemcpyHostToDevice); smemcpy(_b, b, sizeof(float)*size, hipMemcpyHostToDevice); smemcpy(_sum, &sum, sizeof(float), hipMemcpyHostToDevice); hipLaunchKernelGGL(( kernel_util_vecsummult), dim3(blocks), dim3(threads), 0, 0, size, _a, _b, _sum); hipDeviceSynchronize(); smemcpy(&sum, _sum, sizeof(float), hipMemcpyDeviceToHost); hipDeviceSynchronize(); return sum; } #endif
42cd49dc34633c06e794c71091203242371810fc.cu
// Sadiku's Numerical Techniques in Electromagnetics. pg.712 extern "C" int testeCG(int n, int kmax, float err, float* A, float* x, float* b) { int i, j, k = 1; float alpha, beta, sum1, sum2, sum3 = 1.0f, sum4; float *r = cast(float*, malloc(n*sizeof(float))); float *p = cast(float*, malloc(n*sizeof(float))); float *u = cast(float*, malloc(n*sizeof(float))); for (i = 0; i < n; i++) { p[i] = b[i]; r[i] = b[i]; } while (k < kmax && fabs(sqrt(sum3)) > err) { for (j = 0; j < n; j++) { u[j] = 0.0; for (i = 0; i < n; i++) u[j] += A[i*n + j]*p[i]; } sum1 = 0.0f; sum2 = 0.0f; for (i = 0; i < n; i++) { sum1 += p[i]*r[i]; sum2 += p[i]*u[i]; } alpha = sum2 != 0.0f ? sum1/sum2 : 0.0f; alpha = isnan(alpha) ? 0.0f : alpha; for (i = 0; i < n; i++) { x[i] += alpha*p[i]; r[i] -= alpha*u[i]; } sum3 = 0.0; sum4 = 0.0; for (i = 0; i < n; i++) { sum3 += r[i]*r[i]; sum4 += r[i]*u[i]; } beta = sum2 != 0.0 ? -sum4/sum2 : 0.0; for (i = 0; i < n; i++) { p[i] = r[i] + beta*p[i]; } k++; } free(r); free(p); free(u); return k; } #if CUDA extern "C" float teste_sum_reduction(int size, float *a, float *b) { const dim3 threads(BSIZE); const dim3 blocks(1 + size/BSIZE); float *_a, *_b, *_sum, sum = 0; smalloc(&_a, sizeof(float)*size); smalloc(&_b, sizeof(float)*size); smalloc(&_sum, sizeof(float)); smemcpy(_a, a, sizeof(float)*size, cudaMemcpyHostToDevice); smemcpy(_b, b, sizeof(float)*size, cudaMemcpyHostToDevice); smemcpy(_sum, &sum, sizeof(float), cudaMemcpyHostToDevice); kernel_util_vecsummult<<<blocks, threads>>>(size, _a, _b, _sum); cudaDeviceSynchronize(); smemcpy(&sum, _sum, sizeof(float), cudaMemcpyDeviceToHost); cudaDeviceSynchronize(); return sum; } #endif
980c19148284a771e86b5fff04c2d9cf3885cfc5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* This file is the implemetation of RBM following the original algorithm. It is used as the baseline algorithm for comparison. It has been rewritten for CUDA Notes on code style: Ptrs on the host that point to device arrays should be prefixed with "dev_". __constant__ CUDA memory should be prefixed with "const_". The CUDA toolkit documentation says: "For the highest quality parallel pseudorandom number generation, each experiment should be assigned a unique seed. Within an experiment, each thread of computation should be assigned a unique sequence number. If an experiment spans multiple kernel launches, it is recommended that threads between kernel launches be given the same seed, and sequence numbers be assigned in a monotonically increasing way. If the same configuration of threads is launched, random state can be preserved in global memory between launches to avoid state setup time. " With this in mind, this code will allocate a hiprandState_t object for each thread, and init each with a different seed and sequence number, where the seed is shifted by the current time. Note that the dW array is now completely in shared memory during the write_results_to_memory __global__ function. Question: Could I get rid of the nv_means array and just directly compute the nv_samples array? */ #include <iostream> #include <fstream> #include <cstdlib> #include <fstream> #include <math.h> #include <vector> #include <ctime> #include <algorithm> #include <numeric> #include <string.h> #include "../include/rbm_baseline.h" #include "../include/utils.h" #include "../include/cuda_utils.h" #include "../include/kernels.h" #include <hiprand/hiprand_kernel.h> #include <time.h> // #include <boost/date_time/posix_time/posix_time.hpp> using namespace std; using namespace utils; namespace baseline { /* RBM constructor */ //Note that only one of these RBM classes should be run at a //time, since the constant CUDA memory will be overwritten by //secondary classes. RBM::RBM(int size, int n_v, int n_h, int b_size, int k, DTYPE **w, DTYPE *hb, DTYPE *vb, int data_num_rows, int data_num_cols) { // cerr << "RBM baseline constructor\n"; hipStreamCreate(&stream); N = size; n_visible = n_v; hipMemcpyToSymbol(const_n_visible , &n_visible , sizeof(int)); n_hidden = n_h; hipMemcpyToSymbol(const_n_hidden , &n_hidden , sizeof(int)); batch_size = b_size; hipMemcpyToSymbol(const_batch_size, &batch_size, sizeof(int)); this->k = k; hipMemcpyToSymbol(const_k , &k , sizeof(int)); this->data_num_rows = data_num_rows; this->data_num_cols = data_num_cols; //Number of hiprand objects per batch_i: curand_batch_width = n_visible > n_hidden ? n_visible : n_hidden; int num_curand_states = batch_size * curand_batch_width; hipMalloc((void**)&dev_curand_states, num_curand_states * sizeof(hiprandState_t)); int num_blocks = (num_curand_states / MAX_THREADS) + 1; #ifdef RANDOM_RUNS hipLaunchKernelGGL(( init_curand), dim3(num_blocks), dim3(MAX_THREADS), 0, 0, dev_curand_states, num_curand_states, time(NULL)); #else hipLaunchKernelGGL(( init_curand), dim3(num_blocks), dim3(MAX_THREADS), 0, 0, dev_curand_states, num_curand_states, 0); #endif CUDA_CHECK(hipDeviceSynchronize()); CUDA_CHECK(hipGetLastError()); //========================== //Allocate all device memory //========================== // allocate_special_memory(); // CUDA_CHECK(hipMallocPitch((void**)&dev_W, &W_pitch, // n_visible * sizeof(DTYPE), n_hidden)); // cout << "Weight matrix pitch is " << W_pitch << " bytes." << endl; // cout << "This implies that " << (W_pitch - (n_visible * sizeof(DTYPE))) // << " bytes extra are being used per row." << endl; // CUDA_CHECK(hipMalloc((void**)&dev_data, data_num_rows * data_num_cols * sizeof(int))); CUDA_CHECK(hipMalloc((void**)&dev_hbias , n_hidden * sizeof(DTYPE))); CUDA_CHECK(hipMalloc((void**)&dev_vbias , n_visible * sizeof(DTYPE))); CUDA_CHECK(hipMalloc((void**)&dev_dhbias, n_hidden * sizeof(DTYPE))); CUDA_CHECK(hipMalloc((void**)&dev_dvbias, n_visible * sizeof(DTYPE))); CUDA_CHECK(hipMemset(dev_hbias, 0, n_hidden * sizeof(DTYPE))); CUDA_CHECK(hipMemset(dev_vbias, 0, n_visible * sizeof(DTYPE))); CUDA_CHECK(hipMalloc((void**)&dev_tot_vones_temp, sizeof(int))); CUDA_CHECK(hipMalloc((void**)&dev_tot_hones_temp, sizeof(int))); CUDA_CHECK(hipMalloc((void**)&dev_tot_vones , sizeof(int))); CUDA_CHECK(hipMalloc((void**)&dev_tot_hones , sizeof(int))); CUDA_CHECK(hipMalloc((void**)&dev_ph_mean_batch , sizeof(DTYPE) * n_hidden * batch_size)); CUDA_CHECK(hipMalloc((void**)&dev_nv_means_batch , sizeof(DTYPE) * n_visible * batch_size)); CUDA_CHECK(hipMalloc((void**)&dev_nh_means_batch , sizeof(DTYPE) * n_hidden * batch_size)); // hipMalloc((void**)&dev_ph_sample_batch , sizeof(int ) * n_hidden * batch_size); // hipMalloc((void**)&dev_nv_samples_batch, sizeof(int ) * n_visible * batch_size); // hipMalloc((void**)&dev_nh_samples_batch, sizeof(int ) * n_hidden * batch_size); last_k = 0; /* parameters for debugging and development purpose */ tot_vones = 0; tot_hones = 0; tot_vones_temp = 0; tot_hones_temp = 0; /* initializing learning parameters */ if(w == NULL) { WArray = new DTYPE[n_hidden * n_visible]; W = new DTYPE*[n_hidden]; for(int i=0; i<n_hidden; i++) W[i] = new DTYPE[n_visible]; DTYPE a = 1.0 / n_visible; for(int i=0; i<n_hidden; i++) { for(int j=0; j<n_visible; j++) { W[i][j] = uniform(-a, a); } } } else { W = w; } if(hb == NULL) { hbias = new DTYPE[n_hidden]; for(int i=0; i<n_hidden; i++) hbias[i] = 0; } else { hbias = hb; } if(vb == NULL) { vbias = new DTYPE[n_visible]; for(int i=0; i<n_visible; i++) vbias[i] = 0; } else { vbias = vb; } /* initialize gradient for updating the parameters when batch_size >= 1 */ if(batch_size != 0) { dhbias = new DTYPE[n_hidden]; for(int i=0; i<n_hidden; i++) dhbias[i] = 0; dvbias = new DTYPE[n_visible]; for(int i=0; i<n_visible; i++) dvbias[i] = 0; } } //This function is for memory that changes in terms of its //allocation in child classes. void RBM::allocate_special_memory() { this->data = data; hipMalloc((void**)&dev_ph_sample_batch , sizeof(int) * n_hidden * batch_size); hipMalloc((void**)&dev_nv_samples_batch, sizeof(int) * n_visible * batch_size); hipMalloc((void**)&dev_nh_samples_batch, sizeof(int) * n_hidden * batch_size); CUDA_CHECK(hipMallocPitch((void**)&dev_W, &W_pitch, n_visible * sizeof(DTYPE), n_hidden)); matrixToArray (W, WArray, n_hidden, n_visible); CUDA_CHECK(hipMemcpy2D(dev_W, W_pitch, WArray, n_visible * sizeof(DTYPE), n_visible * sizeof(DTYPE), n_hidden, hipMemcpyHostToDevice)); } /* RBM destructor */ RBM::~RBM() { // saveWeightMatrix(); if(batch_size != 0) { for(int i=0; i<n_hidden; i++) { delete[] W[i]; } delete[] dhbias; delete[] dvbias; } else { for(int i=0; i<n_hidden; i++) delete[] W[i]; } delete[] W; delete[] hbias; delete[] vbias; delete[] WArray; hipStreamDestroy(stream); hipFree(dev_curand_states); hipFree(dev_ph_mean_batch); hipFree(dev_ph_sample_batch); hipFree(dev_nv_means_batch); hipFree(dev_nv_samples_batch); hipFree(dev_nh_means_batch); hipFree(dev_nh_samples_batch); hipFree(dev_W); hipFree(dev_hbias); hipFree(dev_vbias); hipFree(dev_dhbias); hipFree(dev_dvbias); hipFree(dev_tot_vones_temp); hipFree(dev_tot_hones_temp); hipFree(dev_tot_vones); hipFree(dev_tot_hones); } /*This function propagates the visible units activation upwards to the hidden units*/ DTYPE RBM::propup(DTYPE *v, DTYPE *w, DTYPE b) { DTYPE pre_sigmoid_activation = 0.0; // pre_sigmoid_activation = inner_product(w, w+n_visible, v, pre_sigmoid_activation); for(int j=0; j<n_visible; j++) { pre_sigmoid_activation += w[j] * v[j]; } pre_sigmoid_activation += b; return (1.0 / (1.0 + exp(-pre_sigmoid_activation))); //sigmoid(pre_sigmoid_activation); } void RBM::reset_d_arrays() { //Since dW_pitch is the width of the dev_dW array rows, we //multiply by the number of rows (n_hidden) to get the number of //bytes to reset: CUDA_CHECK(hipMemset(dev_dhbias, 0, n_hidden * sizeof(DTYPE))); CUDA_CHECK(hipMemset(dev_dvbias, 0, n_visible * sizeof(DTYPE))); // CUDA_CHECK(hipMemset(dev_ph_mean_batch , 0, sizeof(DTYPE) * n_hidden * batch_size)); // CUDA_CHECK(hipMemset(dev_nv_means_batch , 0, sizeof(DTYPE) * n_visible * batch_size)); // CUDA_CHECK(hipMemset(dev_nh_means_batch , 0, sizeof(DTYPE) * n_hidden * batch_size)); // CUDA_CHECK(hipMemset(dev_ph_sample_batch , 0, sizeof(DTYPE) * n_hidden * batch_size)); // CUDA_CHECK(hipMemset(dev_nv_samples_batch, 0, sizeof(DTYPE) * n_visible * batch_size)); // CUDA_CHECK(hipMemset(dev_nh_samples_batch, 0, sizeof(DTYPE) * n_hidden * batch_size)); } /* Contrastive Divergence CUDA kernel*/ void RBM::contrastive_divergence(int curr_i, DTYPE lr, DTYPE wc, DTYPE * dev_data) { reset_d_arrays(); CUDA_CHECK(hipMemcpy(dev_tot_vones_temp, &tot_vones_temp, sizeof(int), hipMemcpyHostToDevice)); CUDA_CHECK(hipMemcpy(dev_tot_hones_temp, &tot_hones_temp, sizeof(int), hipMemcpyHostToDevice)); CUDA_CHECK(hipMemcpy(dev_tot_vones , &tot_vones , sizeof(int), hipMemcpyHostToDevice)); CUDA_CHECK(hipMemcpy(dev_tot_hones , &tot_hones , sizeof(int), hipMemcpyHostToDevice)); //============== //Execute Kernel //============== if(batch_size > MAX_THREADS) { cerr << "ERROR: batch_size cannot exceed 1024" << endl; } // GET_TIME(k1_t1); // cerr << "time: " << k1_t1 << endl; int n_blocks = 1 + (batch_size - 1) / NUM_BATCH_THREADS_PER_BLOCK; hipLaunchKernelGGL(( cd_gpu) , dim3(n_blocks), dim3(NUM_BATCH_THREADS_PER_BLOCK), 0, stream, dev_data, curr_i, data_num_cols, dev_tot_vones_temp, dev_tot_hones_temp, dev_tot_vones, dev_tot_hones, dev_W, W_pitch, dev_hbias, dev_vbias, dev_curand_states, dev_ph_mean_batch, dev_nv_means_batch, dev_nh_means_batch, dev_ph_sample_batch, dev_nv_samples_batch, dev_nh_samples_batch, curand_batch_width); hipDeviceSynchronize(); // GET_TIME(k1_t2); // cerr << "k1 time: " << get_duration(k1_t1, k1_t2) << endl; CUDA_CHECK(hipGetLastError()); // cerr << "Initiating write\n"; dim3 num_blocks, num_threads; dims_to_num_threads_and_blocks(n_visible, n_hidden, num_blocks, num_threads); // GET_TIME(k2_t1); hipLaunchKernelGGL(( write_results_to_memory) , dim3(num_blocks), dim3(num_threads), 0, stream, dev_data, dev_W, W_pitch, lr, wc, dev_ph_mean_batch, dev_nv_means_batch, dev_nh_means_batch, dev_ph_sample_batch, dev_nv_samples_batch, dev_nh_samples_batch, dev_hbias, dev_vbias, dev_dhbias, dev_dvbias, data_num_rows, data_num_cols, curr_i); hipDeviceSynchronize(); // GET_TIME(k2_t2); // cerr << "k2 time: " << get_duration(k2_t1, k2_t2) << endl; CUDA_CHECK(hipGetLastError()); CUDA_CHECK(hipMemcpy(&tot_vones_temp, dev_tot_vones_temp, sizeof(int), hipMemcpyDeviceToHost)); CUDA_CHECK(hipMemcpy(&tot_hones_temp, dev_tot_hones_temp, sizeof(int), hipMemcpyDeviceToHost)); CUDA_CHECK(hipMemcpy(&tot_vones , dev_tot_vones , sizeof(int), hipMemcpyDeviceToHost)); CUDA_CHECK(hipMemcpy(&tot_hones , dev_tot_hones , sizeof(int), hipMemcpyDeviceToHost)); } void RBM::copy_matrices_to_host() { // cerr << "RBM baseline copy_matrices_to_host\n"; CUDA_CHECK(hipMemcpy2D(WArray, n_visible * sizeof(DTYPE), dev_W, W_pitch, n_visible * sizeof(DTYPE), n_hidden, hipMemcpyDeviceToHost)); arrayToMatrix(WArray, W, n_hidden, n_visible); CUDA_CHECK(hipMemcpy(vbias, dev_vbias, n_visible * sizeof(DTYPE), hipMemcpyDeviceToHost)); CUDA_CHECK(hipMemcpy(hbias, dev_hbias, n_hidden * sizeof(DTYPE), hipMemcpyDeviceToHost)); } void RBM::reconstruct(DTYPE *v, DTYPE *reconstructed_v) { DTYPE *h = new DTYPE[n_hidden]; DTYPE pre_sigmoid_activation; for(int i=0; i<n_hidden; i++) { h[i] = propup(v, W[i], hbias[i]); } for(int i=0; i<n_visible; i++) { pre_sigmoid_activation = 0.0; for(int j=0; j<n_hidden; j++) { pre_sigmoid_activation += W[j][i] * h[j]; } pre_sigmoid_activation += vbias[i]; reconstructed_v[i] = sigmoid(pre_sigmoid_activation); } delete[] h; } void RBM::printParams() { cout << "\n"; cout << "W is: \n"; for(int i=0; i<5; i++) { for(int j=0; j<5; j++) { cout << W[i][j] << " "; } cout << "\n"; } cout << "..." << endl; cout << "hbias is: \n"; for(int i=0; i<5; i++) cout << hbias[i] << "\n"; cout << "..." << endl; cout << "vbias is: \n"; for(int i=0; i<5; i++) cout << vbias[i] << "\n"; cout << "..." << endl; } void RBM::saveWeightMatrix() { // cout << "baseline saveWeightMatrix" << endl; copy_matrices_to_host(); matrixToArray (W, WArray, n_hidden, n_visible); string wFilename(MATRIX_FILENAME); saveMatrix(WArray, (size_t) n_hidden, (size_t) n_visible, wFilename); string hbiasFilename("dat_files/hbias.dat"); saveArray(hbias, (size_t) n_hidden, hbiasFilename); string vbiasFilename("dat_files/vbias.dat"); saveArray(vbias, (size_t) n_visible, vbiasFilename); } //NOTE: loadWeightMatrix is not complete // void RBM::loadWeightMatrix() { // string filename(MATRIX_FILENAME); // size_t num_rows; // size_t num_cols; // loadArray(WArray, num_rows, num_cols, filename); // if((num_rows != n_hidden) || (num_cols != n_visible)) { // cout << "WARNING: matrix found in " << filename << " has dimensions " // << num_rows << " X " << num_cols << " while dimensions " << // n_hidden << " X " << n_visible << " were expected from // } // } void RBM::printExpResult(DTYPE vones, DTYPE hones) { cout << "tot 1s in v: " <<tot_vones << "\n"; cout << "tot 1s in h: " <<tot_hones << "\n"; cout << "average 0s in v: " << (DTYPE)n_visible - vones << "\n"; cout << "average 0s in h: " << (DTYPE)n_hidden - hones << "\n\n"; } }
980c19148284a771e86b5fff04c2d9cf3885cfc5.cu
/* This file is the implemetation of RBM following the original algorithm. It is used as the baseline algorithm for comparison. It has been rewritten for CUDA Notes on code style: Ptrs on the host that point to device arrays should be prefixed with "dev_". __constant__ CUDA memory should be prefixed with "const_". The CUDA toolkit documentation says: "For the highest quality parallel pseudorandom number generation, each experiment should be assigned a unique seed. Within an experiment, each thread of computation should be assigned a unique sequence number. If an experiment spans multiple kernel launches, it is recommended that threads between kernel launches be given the same seed, and sequence numbers be assigned in a monotonically increasing way. If the same configuration of threads is launched, random state can be preserved in global memory between launches to avoid state setup time. " With this in mind, this code will allocate a curandState_t object for each thread, and init each with a different seed and sequence number, where the seed is shifted by the current time. Note that the dW array is now completely in shared memory during the write_results_to_memory __global__ function. Question: Could I get rid of the nv_means array and just directly compute the nv_samples array? */ #include <iostream> #include <fstream> #include <cstdlib> #include <fstream> #include <math.h> #include <vector> #include <ctime> #include <algorithm> #include <numeric> #include <string.h> #include "../include/rbm_baseline.h" #include "../include/utils.h" #include "../include/cuda_utils.h" #include "../include/kernels.h" #include <curand_kernel.h> #include <time.h> // #include <boost/date_time/posix_time/posix_time.hpp> using namespace std; using namespace utils; namespace baseline { /* RBM constructor */ //Note that only one of these RBM classes should be run at a //time, since the constant CUDA memory will be overwritten by //secondary classes. RBM::RBM(int size, int n_v, int n_h, int b_size, int k, DTYPE **w, DTYPE *hb, DTYPE *vb, int data_num_rows, int data_num_cols) { // cerr << "RBM baseline constructor\n"; cudaStreamCreate(&stream); N = size; n_visible = n_v; cudaMemcpyToSymbol(const_n_visible , &n_visible , sizeof(int)); n_hidden = n_h; cudaMemcpyToSymbol(const_n_hidden , &n_hidden , sizeof(int)); batch_size = b_size; cudaMemcpyToSymbol(const_batch_size, &batch_size, sizeof(int)); this->k = k; cudaMemcpyToSymbol(const_k , &k , sizeof(int)); this->data_num_rows = data_num_rows; this->data_num_cols = data_num_cols; //Number of curand objects per batch_i: curand_batch_width = n_visible > n_hidden ? n_visible : n_hidden; int num_curand_states = batch_size * curand_batch_width; cudaMalloc((void**)&dev_curand_states, num_curand_states * sizeof(curandState_t)); int num_blocks = (num_curand_states / MAX_THREADS) + 1; #ifdef RANDOM_RUNS init_curand<<<num_blocks, MAX_THREADS>>>(dev_curand_states, num_curand_states, time(NULL)); #else init_curand<<<num_blocks, MAX_THREADS>>>(dev_curand_states, num_curand_states, 0); #endif CUDA_CHECK(cudaDeviceSynchronize()); CUDA_CHECK(cudaGetLastError()); //========================== //Allocate all device memory //========================== // allocate_special_memory(); // CUDA_CHECK(cudaMallocPitch((void**)&dev_W, &W_pitch, // n_visible * sizeof(DTYPE), n_hidden)); // cout << "Weight matrix pitch is " << W_pitch << " bytes." << endl; // cout << "This implies that " << (W_pitch - (n_visible * sizeof(DTYPE))) // << " bytes extra are being used per row." << endl; // CUDA_CHECK(cudaMalloc((void**)&dev_data, data_num_rows * data_num_cols * sizeof(int))); CUDA_CHECK(cudaMalloc((void**)&dev_hbias , n_hidden * sizeof(DTYPE))); CUDA_CHECK(cudaMalloc((void**)&dev_vbias , n_visible * sizeof(DTYPE))); CUDA_CHECK(cudaMalloc((void**)&dev_dhbias, n_hidden * sizeof(DTYPE))); CUDA_CHECK(cudaMalloc((void**)&dev_dvbias, n_visible * sizeof(DTYPE))); CUDA_CHECK(cudaMemset(dev_hbias, 0, n_hidden * sizeof(DTYPE))); CUDA_CHECK(cudaMemset(dev_vbias, 0, n_visible * sizeof(DTYPE))); CUDA_CHECK(cudaMalloc((void**)&dev_tot_vones_temp, sizeof(int))); CUDA_CHECK(cudaMalloc((void**)&dev_tot_hones_temp, sizeof(int))); CUDA_CHECK(cudaMalloc((void**)&dev_tot_vones , sizeof(int))); CUDA_CHECK(cudaMalloc((void**)&dev_tot_hones , sizeof(int))); CUDA_CHECK(cudaMalloc((void**)&dev_ph_mean_batch , sizeof(DTYPE) * n_hidden * batch_size)); CUDA_CHECK(cudaMalloc((void**)&dev_nv_means_batch , sizeof(DTYPE) * n_visible * batch_size)); CUDA_CHECK(cudaMalloc((void**)&dev_nh_means_batch , sizeof(DTYPE) * n_hidden * batch_size)); // cudaMalloc((void**)&dev_ph_sample_batch , sizeof(int ) * n_hidden * batch_size); // cudaMalloc((void**)&dev_nv_samples_batch, sizeof(int ) * n_visible * batch_size); // cudaMalloc((void**)&dev_nh_samples_batch, sizeof(int ) * n_hidden * batch_size); last_k = 0; /* parameters for debugging and development purpose */ tot_vones = 0; tot_hones = 0; tot_vones_temp = 0; tot_hones_temp = 0; /* initializing learning parameters */ if(w == NULL) { WArray = new DTYPE[n_hidden * n_visible]; W = new DTYPE*[n_hidden]; for(int i=0; i<n_hidden; i++) W[i] = new DTYPE[n_visible]; DTYPE a = 1.0 / n_visible; for(int i=0; i<n_hidden; i++) { for(int j=0; j<n_visible; j++) { W[i][j] = uniform(-a, a); } } } else { W = w; } if(hb == NULL) { hbias = new DTYPE[n_hidden]; for(int i=0; i<n_hidden; i++) hbias[i] = 0; } else { hbias = hb; } if(vb == NULL) { vbias = new DTYPE[n_visible]; for(int i=0; i<n_visible; i++) vbias[i] = 0; } else { vbias = vb; } /* initialize gradient for updating the parameters when batch_size >= 1 */ if(batch_size != 0) { dhbias = new DTYPE[n_hidden]; for(int i=0; i<n_hidden; i++) dhbias[i] = 0; dvbias = new DTYPE[n_visible]; for(int i=0; i<n_visible; i++) dvbias[i] = 0; } } //This function is for memory that changes in terms of its //allocation in child classes. void RBM::allocate_special_memory() { this->data = data; cudaMalloc((void**)&dev_ph_sample_batch , sizeof(int) * n_hidden * batch_size); cudaMalloc((void**)&dev_nv_samples_batch, sizeof(int) * n_visible * batch_size); cudaMalloc((void**)&dev_nh_samples_batch, sizeof(int) * n_hidden * batch_size); CUDA_CHECK(cudaMallocPitch((void**)&dev_W, &W_pitch, n_visible * sizeof(DTYPE), n_hidden)); matrixToArray (W, WArray, n_hidden, n_visible); CUDA_CHECK(cudaMemcpy2D(dev_W, W_pitch, WArray, n_visible * sizeof(DTYPE), n_visible * sizeof(DTYPE), n_hidden, cudaMemcpyHostToDevice)); } /* RBM destructor */ RBM::~RBM() { // saveWeightMatrix(); if(batch_size != 0) { for(int i=0; i<n_hidden; i++) { delete[] W[i]; } delete[] dhbias; delete[] dvbias; } else { for(int i=0; i<n_hidden; i++) delete[] W[i]; } delete[] W; delete[] hbias; delete[] vbias; delete[] WArray; cudaStreamDestroy(stream); cudaFree(dev_curand_states); cudaFree(dev_ph_mean_batch); cudaFree(dev_ph_sample_batch); cudaFree(dev_nv_means_batch); cudaFree(dev_nv_samples_batch); cudaFree(dev_nh_means_batch); cudaFree(dev_nh_samples_batch); cudaFree(dev_W); cudaFree(dev_hbias); cudaFree(dev_vbias); cudaFree(dev_dhbias); cudaFree(dev_dvbias); cudaFree(dev_tot_vones_temp); cudaFree(dev_tot_hones_temp); cudaFree(dev_tot_vones); cudaFree(dev_tot_hones); } /*This function propagates the visible units activation upwards to the hidden units*/ DTYPE RBM::propup(DTYPE *v, DTYPE *w, DTYPE b) { DTYPE pre_sigmoid_activation = 0.0; // pre_sigmoid_activation = inner_product(w, w+n_visible, v, pre_sigmoid_activation); for(int j=0; j<n_visible; j++) { pre_sigmoid_activation += w[j] * v[j]; } pre_sigmoid_activation += b; return (1.0 / (1.0 + exp(-pre_sigmoid_activation))); //sigmoid(pre_sigmoid_activation); } void RBM::reset_d_arrays() { //Since dW_pitch is the width of the dev_dW array rows, we //multiply by the number of rows (n_hidden) to get the number of //bytes to reset: CUDA_CHECK(cudaMemset(dev_dhbias, 0, n_hidden * sizeof(DTYPE))); CUDA_CHECK(cudaMemset(dev_dvbias, 0, n_visible * sizeof(DTYPE))); // CUDA_CHECK(cudaMemset(dev_ph_mean_batch , 0, sizeof(DTYPE) * n_hidden * batch_size)); // CUDA_CHECK(cudaMemset(dev_nv_means_batch , 0, sizeof(DTYPE) * n_visible * batch_size)); // CUDA_CHECK(cudaMemset(dev_nh_means_batch , 0, sizeof(DTYPE) * n_hidden * batch_size)); // CUDA_CHECK(cudaMemset(dev_ph_sample_batch , 0, sizeof(DTYPE) * n_hidden * batch_size)); // CUDA_CHECK(cudaMemset(dev_nv_samples_batch, 0, sizeof(DTYPE) * n_visible * batch_size)); // CUDA_CHECK(cudaMemset(dev_nh_samples_batch, 0, sizeof(DTYPE) * n_hidden * batch_size)); } /* Contrastive Divergence CUDA kernel*/ void RBM::contrastive_divergence(int curr_i, DTYPE lr, DTYPE wc, DTYPE * dev_data) { reset_d_arrays(); CUDA_CHECK(cudaMemcpy(dev_tot_vones_temp, &tot_vones_temp, sizeof(int), cudaMemcpyHostToDevice)); CUDA_CHECK(cudaMemcpy(dev_tot_hones_temp, &tot_hones_temp, sizeof(int), cudaMemcpyHostToDevice)); CUDA_CHECK(cudaMemcpy(dev_tot_vones , &tot_vones , sizeof(int), cudaMemcpyHostToDevice)); CUDA_CHECK(cudaMemcpy(dev_tot_hones , &tot_hones , sizeof(int), cudaMemcpyHostToDevice)); //============== //Execute Kernel //============== if(batch_size > MAX_THREADS) { cerr << "ERROR: batch_size cannot exceed 1024" << endl; } // GET_TIME(k1_t1); // cerr << "time: " << k1_t1 << endl; int n_blocks = 1 + (batch_size - 1) / NUM_BATCH_THREADS_PER_BLOCK; cd_gpu <<< n_blocks, NUM_BATCH_THREADS_PER_BLOCK, 0, stream>>> (dev_data, curr_i, data_num_cols, dev_tot_vones_temp, dev_tot_hones_temp, dev_tot_vones, dev_tot_hones, dev_W, W_pitch, dev_hbias, dev_vbias, dev_curand_states, dev_ph_mean_batch, dev_nv_means_batch, dev_nh_means_batch, dev_ph_sample_batch, dev_nv_samples_batch, dev_nh_samples_batch, curand_batch_width); cudaDeviceSynchronize(); // GET_TIME(k1_t2); // cerr << "k1 time: " << get_duration(k1_t1, k1_t2) << endl; CUDA_CHECK(cudaGetLastError()); // cerr << "Initiating write\n"; dim3 num_blocks, num_threads; dims_to_num_threads_and_blocks(n_visible, n_hidden, num_blocks, num_threads); // GET_TIME(k2_t1); write_results_to_memory <<< num_blocks, num_threads, 0, stream>>> (dev_data, dev_W, W_pitch, lr, wc, dev_ph_mean_batch, dev_nv_means_batch, dev_nh_means_batch, dev_ph_sample_batch, dev_nv_samples_batch, dev_nh_samples_batch, dev_hbias, dev_vbias, dev_dhbias, dev_dvbias, data_num_rows, data_num_cols, curr_i); cudaDeviceSynchronize(); // GET_TIME(k2_t2); // cerr << "k2 time: " << get_duration(k2_t1, k2_t2) << endl; CUDA_CHECK(cudaGetLastError()); CUDA_CHECK(cudaMemcpy(&tot_vones_temp, dev_tot_vones_temp, sizeof(int), cudaMemcpyDeviceToHost)); CUDA_CHECK(cudaMemcpy(&tot_hones_temp, dev_tot_hones_temp, sizeof(int), cudaMemcpyDeviceToHost)); CUDA_CHECK(cudaMemcpy(&tot_vones , dev_tot_vones , sizeof(int), cudaMemcpyDeviceToHost)); CUDA_CHECK(cudaMemcpy(&tot_hones , dev_tot_hones , sizeof(int), cudaMemcpyDeviceToHost)); } void RBM::copy_matrices_to_host() { // cerr << "RBM baseline copy_matrices_to_host\n"; CUDA_CHECK(cudaMemcpy2D(WArray, n_visible * sizeof(DTYPE), dev_W, W_pitch, n_visible * sizeof(DTYPE), n_hidden, cudaMemcpyDeviceToHost)); arrayToMatrix(WArray, W, n_hidden, n_visible); CUDA_CHECK(cudaMemcpy(vbias, dev_vbias, n_visible * sizeof(DTYPE), cudaMemcpyDeviceToHost)); CUDA_CHECK(cudaMemcpy(hbias, dev_hbias, n_hidden * sizeof(DTYPE), cudaMemcpyDeviceToHost)); } void RBM::reconstruct(DTYPE *v, DTYPE *reconstructed_v) { DTYPE *h = new DTYPE[n_hidden]; DTYPE pre_sigmoid_activation; for(int i=0; i<n_hidden; i++) { h[i] = propup(v, W[i], hbias[i]); } for(int i=0; i<n_visible; i++) { pre_sigmoid_activation = 0.0; for(int j=0; j<n_hidden; j++) { pre_sigmoid_activation += W[j][i] * h[j]; } pre_sigmoid_activation += vbias[i]; reconstructed_v[i] = sigmoid(pre_sigmoid_activation); } delete[] h; } void RBM::printParams() { cout << "\n"; cout << "W is: \n"; for(int i=0; i<5; i++) { for(int j=0; j<5; j++) { cout << W[i][j] << " "; } cout << "\n"; } cout << "..." << endl; cout << "hbias is: \n"; for(int i=0; i<5; i++) cout << hbias[i] << "\n"; cout << "..." << endl; cout << "vbias is: \n"; for(int i=0; i<5; i++) cout << vbias[i] << "\n"; cout << "..." << endl; } void RBM::saveWeightMatrix() { // cout << "baseline saveWeightMatrix" << endl; copy_matrices_to_host(); matrixToArray (W, WArray, n_hidden, n_visible); string wFilename(MATRIX_FILENAME); saveMatrix(WArray, (size_t) n_hidden, (size_t) n_visible, wFilename); string hbiasFilename("dat_files/hbias.dat"); saveArray(hbias, (size_t) n_hidden, hbiasFilename); string vbiasFilename("dat_files/vbias.dat"); saveArray(vbias, (size_t) n_visible, vbiasFilename); } //NOTE: loadWeightMatrix is not complete // void RBM::loadWeightMatrix() { // string filename(MATRIX_FILENAME); // size_t num_rows; // size_t num_cols; // loadArray(WArray, num_rows, num_cols, filename); // if((num_rows != n_hidden) || (num_cols != n_visible)) { // cout << "WARNING: matrix found in " << filename << " has dimensions " // << num_rows << " X " << num_cols << " while dimensions " << // n_hidden << " X " << n_visible << " were expected from // } // } void RBM::printExpResult(DTYPE vones, DTYPE hones) { cout << "tot 1s in v: " <<tot_vones << "\n"; cout << "tot 1s in h: " <<tot_hones << "\n"; cout << "average 0s in v: " << (DTYPE)n_visible - vones << "\n"; cout << "average 0s in h: " << (DTYPE)n_hidden - hones << "\n\n"; } }
7c74550413f3bf2710fdae0ade87058c5c6fc8bd.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "reduceSmemUnroll.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int *g_idata = NULL; hipMalloc(&g_idata, XSIZE*YSIZE); int *g_odata = NULL; hipMalloc(&g_odata, XSIZE*YSIZE); unsigned int n = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( reduceSmemUnroll), dim3(gridBlock),dim3(threadBlock), 0, 0, g_idata,g_odata,n); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( reduceSmemUnroll), dim3(gridBlock),dim3(threadBlock), 0, 0, g_idata,g_odata,n); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( reduceSmemUnroll), dim3(gridBlock),dim3(threadBlock), 0, 0, g_idata,g_odata,n); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
7c74550413f3bf2710fdae0ade87058c5c6fc8bd.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "reduceSmemUnroll.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int *g_idata = NULL; cudaMalloc(&g_idata, XSIZE*YSIZE); int *g_odata = NULL; cudaMalloc(&g_odata, XSIZE*YSIZE); unsigned int n = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); reduceSmemUnroll<<<gridBlock,threadBlock>>>(g_idata,g_odata,n); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { reduceSmemUnroll<<<gridBlock,threadBlock>>>(g_idata,g_odata,n); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { reduceSmemUnroll<<<gridBlock,threadBlock>>>(g_idata,g_odata,n); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
d64f782e9f5186b110111846e8f3655a320cab88.hip
// !!! This is a file automatically generated by hipify!!! #include "core/common.cuh" #include <hip/hip_runtime.h> #include <hip/hip_fp16.h> #include <iostream> #include <vector> // bert softmax code modified from Nvidia's DeepLearningExamples // https://github.com/NVIDIA/DeepLearningExamples/blob/master/FasterTransformer/v3.1/fastertransformer/cuda/open_attention.cu#L1399-L1583 template <typename T> __global__ void softmax_kernel_bert(T *qk_buf, const T* attr_mask,const int head_num, const int seq_len, const T scalar) { int batch_id = blockIdx.x / head_num; int qk_offset = blockIdx.x * seq_len * seq_len; int mask_offset = batch_id * seq_len * seq_len; __shared__ float s_sum, s_max; for(int i = 0; i < seq_len; ++i) { float qk = threadIdx.x < seq_len ? (float)qk_buf[threadIdx.x + qk_offset] : 0.0f; float mask_val = threadIdx.x < seq_len ? (float)attr_mask[threadIdx.x + mask_offset] : 0.0f; mask_val = (1.0f - mask_val) * -10000.0f; float tmp = threadIdx.x < seq_len ? (float)(qk * (float)scalar + mask_val): -1e20f; float max_val = blockReduceMax<float>(tmp); if(threadIdx.x == 0) s_max = max_val; __syncthreads(); qk = threadIdx.x < seq_len ? __expf(tmp - s_max) : 0.0f; float sum_val = blockReduceSum<float>(qk); if(threadIdx.x == 0) { s_sum = sum_val + 1e-6f; } __syncthreads(); if(threadIdx.x < seq_len) qk_buf[threadIdx.x + qk_offset] = (T)(qk / s_sum); qk_offset += seq_len; } } template <typename T> __global__ void softmax_kernel_v2_bert(T *qk_buf,const T* attr_mask, const int head_num, const int seq_len, const float scalar) { int batch_id = blockIdx.x / head_num / seq_len; int seq_id = blockIdx.x % seq_len; int qk_offset = blockIdx.x * seq_len; int mask_offset = batch_id * seq_len * seq_len + seq_id * seq_len; __shared__ float s_sum, s_max; float qk = threadIdx.x < seq_len ? (float)qk_buf[threadIdx.x + qk_offset] : 0.0f; float mask_val = threadIdx.x < seq_len ? (float)attr_mask[threadIdx.x + mask_offset] : 0.0f; mask_val = (1.0f - mask_val) * -10000.0f; float tmp = threadIdx.x < seq_len ? (float)(qk * (float)scalar + mask_val) : -1e20f; float max_val = blockReduceMax<float>(tmp); if(threadIdx.x == 0) s_max = max_val; __syncthreads(); float qk_tmp = threadIdx.x < seq_len ? __expf((float)(tmp - s_max)) : 0.0f; float sum_val = blockReduceSum<float>(qk_tmp); if(threadIdx.x == 0) { s_sum = sum_val + 1e-6f; } __syncthreads(); if(threadIdx.x < seq_len) qk_buf[threadIdx.x + qk_offset] = (T)(qk_tmp / s_sum); } template <typename T> __global__ void softmax_kernel_v3_bert(T *qk_buf,const T* attr_mask, const int head_num, const int seq_len, const T scalar) { for (int seq_id = blockIdx.x ; seq_id < seq_len ; seq_id += gridDim.x){ float tmp = -1e20f; int qk_offset; __shared__ float s_mean, s_max; if (threadIdx.x < seq_len){ qk_offset = ((blockIdx.y*head_num + blockIdx.z)*seq_len + seq_id) *seq_len + threadIdx.x; int mask_offset = (blockIdx.y * seq_len + seq_id) * seq_len + threadIdx.x; float qk = static_cast<float>(qk_buf[qk_offset]); float mask_val = static_cast<float>(__ldg(&attr_mask[mask_offset])); mask_val = (1.0f - mask_val) * -10000.0f; tmp = qk * static_cast<float>(scalar) + mask_val; } float max_val = blockReduceMax<float>(tmp); if (threadIdx.x == 0){ s_max = max_val; } __syncthreads(); float qk_tmp = threadIdx.x < seq_len ? __expf(tmp - s_max) : 0.0f; float sum_val = blockReduceSum<float>(qk_tmp); if (threadIdx.x == 0){ s_mean = sum_val + 1e-6f; s_mean = __fdividef(1.0f, s_mean); } __syncthreads(); if(threadIdx.x < seq_len) qk_buf[qk_offset] = (T)(qk_tmp * s_mean); } } template <> __global__ void softmax_kernel_v3_bert(half *qk_buf, const half* attr_mask, const int head_num, const int seq_len, const half scalar) { int threadIdx2 = threadIdx.x << 1; half2* qk_bufhalf2Ptr = (half2*) qk_buf; const half2* attr_mask_half2Ptr = (const half2*) attr_mask; __shared__ float s_mean, s_max; for (int seq_id = blockIdx.x ; seq_id < seq_len ; seq_id += gridDim.x){ int qk_offset; half2 tmp = __float2half2_rn(0.0f); float max_val = -1e20f; half2 qk; if (threadIdx2 < seq_len){ qk_offset = ((((blockIdx.y*head_num + blockIdx.z)*seq_len + seq_id) *seq_len) >> 1) + threadIdx.x; int mask_offset = (((blockIdx.y * seq_len + seq_id) * seq_len) >> 1) + threadIdx.x; qk = qk_bufhalf2Ptr[qk_offset]; half2 mask_val = __ldg(&attr_mask_half2Ptr[mask_offset]); half2 mask_val_tmp = __hmul2(__hsub2(__float2half2_rn(1.0f), mask_val), __float2half2_rn(-10000.0f)); tmp = __hadd2(__hmul2(__half2half2(scalar), qk), mask_val_tmp); max_val = fmax((float)tmp.x, (float)tmp.y); } max_val = blockDim.x <= 32 ? warpReduceMax(max_val) : blockReduceMax<float>(max_val); if (threadIdx.x == 0){ s_max = max_val; } __syncthreads(); if (threadIdx2 < seq_len){ tmp = h2exp(__hsub2(tmp, __float2half2_rn(s_max))); } float sum_val = blockDim.x <= 32 ? warpReduceSum((float)(tmp.x + tmp.y)) : blockReduceSum<float>((float)(tmp.x + tmp.y)); if (threadIdx.x == 0){ s_mean = sum_val + 1e-6f; s_mean = __fdividef(1.0f, s_mean); } __syncthreads(); if(threadIdx2 < seq_len){ qk = __hmul2(tmp, __float2half2_rn(s_mean)); qk_bufhalf2Ptr[qk_offset] = qk; } } } template <typename T> __global__ void softmax_kernel_v3_LE32_bert(T *qk_buf, const T* attr_mask, const int head_num, const int seq_len, const T scalar) { for (int seq_id = blockIdx.x; seq_id < seq_len; seq_id += gridDim.x) { int qk_offset; __shared__ float s_mean, s_max; float tmp = -1e20f; if (threadIdx.x < seq_len){ qk_offset = ((blockIdx.y*head_num + blockIdx.z)*seq_len + seq_id) *seq_len + threadIdx.x; int mask_offset = (blockIdx.y * seq_len + seq_id) * seq_len + threadIdx.x; float qk = static_cast<float>(qk_buf[qk_offset]); float mask_val = static_cast<float>(__ldg(&attr_mask[mask_offset])); mask_val = (1.0f - mask_val) * -10000.0f; tmp = static_cast<float>(qk) * static_cast<float>(scalar) + mask_val; } float max_val = warpReduceMax<float>(tmp); if (threadIdx.x == 0){ s_max = max_val; } __syncthreads(); tmp = threadIdx.x < seq_len ? __expf(tmp - s_max) : 0.0f; float sum_val = warpReduceSum<float>(tmp); if (threadIdx.x == 0){ s_mean = sum_val + 1e-6f; s_mean = __fdividef(1.0f, s_mean); } __syncthreads(); if(threadIdx.x < seq_len) qk_buf[qk_offset] = (T)(tmp * s_mean); } } template <class T> void bert_softmax_kernel(void *qk_buf,void *attr_mask, const int &batch_size, const int &head_num, const int &seq_len, const float &scalar, const hipStream_t stream) { dim3 grid, block; //deal with odd seq_len if (seq_len % 2 != 0) { if (seq_len <= 32) block.x = 32; else if (seq_len > 32 && seq_len <= 64) block.x = 64; else if (seq_len > 64 && seq_len <= 128) block.x = 128; else if (seq_len > 128 && seq_len <= 256) block.x = 256; else if (seq_len > 256 && seq_len <= 512) block.x = 512; else block.x = 1024; if (batch_size * head_num <= 120) { grid.x = batch_size * head_num * seq_len; hipLaunchKernelGGL(( softmax_kernel_v2_bert<T>), dim3(grid), dim3(block), 0, stream, (T*)qk_buf, (T*)attr_mask, head_num, seq_len, scalar); } else { grid.x = batch_size * head_num; hipLaunchKernelGGL(( softmax_kernel_bert<T>), dim3(grid), dim3(block), 0, stream, (T*)qk_buf, (T*)attr_mask, head_num, seq_len, scalar); } } //deal with even seq_len else { grid.x = seq_len; grid.y = batch_size; grid.z = head_num; if (seq_len <= 32) { block.x = 32; hipLaunchKernelGGL(( softmax_kernel_v3_LE32_bert<T>), dim3(grid), dim3(block), 0, stream, (T*)qk_buf, (T*)attr_mask, head_num, seq_len, scalar); } else { if (sizeof(T) == sizeof(float)) { block.x = (seq_len + 31) / 32 * 32; hipLaunchKernelGGL(( softmax_kernel_v3_bert<T>), dim3(grid), dim3(block), 0, stream, (T*)qk_buf, (T*)attr_mask, head_num, seq_len, scalar); } else { block.x = (seq_len / 2 + 31) / 32 * 32; hipLaunchKernelGGL(( softmax_kernel_v3_bert<T>), dim3(grid), dim3(block), 0, stream, (T*)qk_buf, (T*)attr_mask, head_num, seq_len, scalar); } } } } template void bert_softmax_kernel<float>(void *qk_buf, void* attr_mask,const int& batch_size, const int& head_num, const int& seq_len, const float& scalar, const hipStream_t stream); template void bert_softmax_kernel<half>(void *qk_buf, void* attr_mask,const int& batch_size, const int& head_num, const int& seq_len, const float& scalar, const hipStream_t stream);
d64f782e9f5186b110111846e8f3655a320cab88.cu
#include "core/common.cuh" #include <cuda_runtime.h> #include <cuda_fp16.h> #include <iostream> #include <vector> // bert softmax code modified from Nvidia's DeepLearningExamples // https://github.com/NVIDIA/DeepLearningExamples/blob/master/FasterTransformer/v3.1/fastertransformer/cuda/open_attention.cu#L1399-L1583 template <typename T> __global__ void softmax_kernel_bert(T *qk_buf, const T* attr_mask,const int head_num, const int seq_len, const T scalar) { int batch_id = blockIdx.x / head_num; int qk_offset = blockIdx.x * seq_len * seq_len; int mask_offset = batch_id * seq_len * seq_len; __shared__ float s_sum, s_max; for(int i = 0; i < seq_len; ++i) { float qk = threadIdx.x < seq_len ? (float)qk_buf[threadIdx.x + qk_offset] : 0.0f; float mask_val = threadIdx.x < seq_len ? (float)attr_mask[threadIdx.x + mask_offset] : 0.0f; mask_val = (1.0f - mask_val) * -10000.0f; float tmp = threadIdx.x < seq_len ? (float)(qk * (float)scalar + mask_val): -1e20f; float max_val = blockReduceMax<float>(tmp); if(threadIdx.x == 0) s_max = max_val; __syncthreads(); qk = threadIdx.x < seq_len ? __expf(tmp - s_max) : 0.0f; float sum_val = blockReduceSum<float>(qk); if(threadIdx.x == 0) { s_sum = sum_val + 1e-6f; } __syncthreads(); if(threadIdx.x < seq_len) qk_buf[threadIdx.x + qk_offset] = (T)(qk / s_sum); qk_offset += seq_len; } } template <typename T> __global__ void softmax_kernel_v2_bert(T *qk_buf,const T* attr_mask, const int head_num, const int seq_len, const float scalar) { int batch_id = blockIdx.x / head_num / seq_len; int seq_id = blockIdx.x % seq_len; int qk_offset = blockIdx.x * seq_len; int mask_offset = batch_id * seq_len * seq_len + seq_id * seq_len; __shared__ float s_sum, s_max; float qk = threadIdx.x < seq_len ? (float)qk_buf[threadIdx.x + qk_offset] : 0.0f; float mask_val = threadIdx.x < seq_len ? (float)attr_mask[threadIdx.x + mask_offset] : 0.0f; mask_val = (1.0f - mask_val) * -10000.0f; float tmp = threadIdx.x < seq_len ? (float)(qk * (float)scalar + mask_val) : -1e20f; float max_val = blockReduceMax<float>(tmp); if(threadIdx.x == 0) s_max = max_val; __syncthreads(); float qk_tmp = threadIdx.x < seq_len ? __expf((float)(tmp - s_max)) : 0.0f; float sum_val = blockReduceSum<float>(qk_tmp); if(threadIdx.x == 0) { s_sum = sum_val + 1e-6f; } __syncthreads(); if(threadIdx.x < seq_len) qk_buf[threadIdx.x + qk_offset] = (T)(qk_tmp / s_sum); } template <typename T> __global__ void softmax_kernel_v3_bert(T *qk_buf,const T* attr_mask, const int head_num, const int seq_len, const T scalar) { for (int seq_id = blockIdx.x ; seq_id < seq_len ; seq_id += gridDim.x){ float tmp = -1e20f; int qk_offset; __shared__ float s_mean, s_max; if (threadIdx.x < seq_len){ qk_offset = ((blockIdx.y*head_num + blockIdx.z)*seq_len + seq_id) *seq_len + threadIdx.x; int mask_offset = (blockIdx.y * seq_len + seq_id) * seq_len + threadIdx.x; float qk = static_cast<float>(qk_buf[qk_offset]); float mask_val = static_cast<float>(__ldg(&attr_mask[mask_offset])); mask_val = (1.0f - mask_val) * -10000.0f; tmp = qk * static_cast<float>(scalar) + mask_val; } float max_val = blockReduceMax<float>(tmp); if (threadIdx.x == 0){ s_max = max_val; } __syncthreads(); float qk_tmp = threadIdx.x < seq_len ? __expf(tmp - s_max) : 0.0f; float sum_val = blockReduceSum<float>(qk_tmp); if (threadIdx.x == 0){ s_mean = sum_val + 1e-6f; s_mean = __fdividef(1.0f, s_mean); } __syncthreads(); if(threadIdx.x < seq_len) qk_buf[qk_offset] = (T)(qk_tmp * s_mean); } } template <> __global__ void softmax_kernel_v3_bert(half *qk_buf, const half* attr_mask, const int head_num, const int seq_len, const half scalar) { int threadIdx2 = threadIdx.x << 1; half2* qk_bufhalf2Ptr = (half2*) qk_buf; const half2* attr_mask_half2Ptr = (const half2*) attr_mask; __shared__ float s_mean, s_max; for (int seq_id = blockIdx.x ; seq_id < seq_len ; seq_id += gridDim.x){ int qk_offset; half2 tmp = __float2half2_rn(0.0f); float max_val = -1e20f; half2 qk; if (threadIdx2 < seq_len){ qk_offset = ((((blockIdx.y*head_num + blockIdx.z)*seq_len + seq_id) *seq_len) >> 1) + threadIdx.x; int mask_offset = (((blockIdx.y * seq_len + seq_id) * seq_len) >> 1) + threadIdx.x; qk = qk_bufhalf2Ptr[qk_offset]; half2 mask_val = __ldg(&attr_mask_half2Ptr[mask_offset]); half2 mask_val_tmp = __hmul2(__hsub2(__float2half2_rn(1.0f), mask_val), __float2half2_rn(-10000.0f)); tmp = __hadd2(__hmul2(__half2half2(scalar), qk), mask_val_tmp); max_val = fmax((float)tmp.x, (float)tmp.y); } max_val = blockDim.x <= 32 ? warpReduceMax(max_val) : blockReduceMax<float>(max_val); if (threadIdx.x == 0){ s_max = max_val; } __syncthreads(); if (threadIdx2 < seq_len){ tmp = h2exp(__hsub2(tmp, __float2half2_rn(s_max))); } float sum_val = blockDim.x <= 32 ? warpReduceSum((float)(tmp.x + tmp.y)) : blockReduceSum<float>((float)(tmp.x + tmp.y)); if (threadIdx.x == 0){ s_mean = sum_val + 1e-6f; s_mean = __fdividef(1.0f, s_mean); } __syncthreads(); if(threadIdx2 < seq_len){ qk = __hmul2(tmp, __float2half2_rn(s_mean)); qk_bufhalf2Ptr[qk_offset] = qk; } } } template <typename T> __global__ void softmax_kernel_v3_LE32_bert(T *qk_buf, const T* attr_mask, const int head_num, const int seq_len, const T scalar) { for (int seq_id = blockIdx.x; seq_id < seq_len; seq_id += gridDim.x) { int qk_offset; __shared__ float s_mean, s_max; float tmp = -1e20f; if (threadIdx.x < seq_len){ qk_offset = ((blockIdx.y*head_num + blockIdx.z)*seq_len + seq_id) *seq_len + threadIdx.x; int mask_offset = (blockIdx.y * seq_len + seq_id) * seq_len + threadIdx.x; float qk = static_cast<float>(qk_buf[qk_offset]); float mask_val = static_cast<float>(__ldg(&attr_mask[mask_offset])); mask_val = (1.0f - mask_val) * -10000.0f; tmp = static_cast<float>(qk) * static_cast<float>(scalar) + mask_val; } float max_val = warpReduceMax<float>(tmp); if (threadIdx.x == 0){ s_max = max_val; } __syncthreads(); tmp = threadIdx.x < seq_len ? __expf(tmp - s_max) : 0.0f; float sum_val = warpReduceSum<float>(tmp); if (threadIdx.x == 0){ s_mean = sum_val + 1e-6f; s_mean = __fdividef(1.0f, s_mean); } __syncthreads(); if(threadIdx.x < seq_len) qk_buf[qk_offset] = (T)(tmp * s_mean); } } template <class T> void bert_softmax_kernel(void *qk_buf,void *attr_mask, const int &batch_size, const int &head_num, const int &seq_len, const float &scalar, const cudaStream_t stream) { dim3 grid, block; //deal with odd seq_len if (seq_len % 2 != 0) { if (seq_len <= 32) block.x = 32; else if (seq_len > 32 && seq_len <= 64) block.x = 64; else if (seq_len > 64 && seq_len <= 128) block.x = 128; else if (seq_len > 128 && seq_len <= 256) block.x = 256; else if (seq_len > 256 && seq_len <= 512) block.x = 512; else block.x = 1024; if (batch_size * head_num <= 120) { grid.x = batch_size * head_num * seq_len; softmax_kernel_v2_bert<T><<<grid, block, 0, stream>>>((T*)qk_buf, (T*)attr_mask, head_num, seq_len, scalar); } else { grid.x = batch_size * head_num; softmax_kernel_bert<T><<<grid, block, 0, stream>>>((T*)qk_buf, (T*)attr_mask, head_num, seq_len, scalar); } } //deal with even seq_len else { grid.x = seq_len; grid.y = batch_size; grid.z = head_num; if (seq_len <= 32) { block.x = 32; softmax_kernel_v3_LE32_bert<T><<<grid, block, 0, stream>>>((T*)qk_buf, (T*)attr_mask, head_num, seq_len, scalar); } else { if (sizeof(T) == sizeof(float)) { block.x = (seq_len + 31) / 32 * 32; softmax_kernel_v3_bert<T><<<grid, block, 0, stream>>>((T*)qk_buf, (T*)attr_mask, head_num, seq_len, scalar); } else { block.x = (seq_len / 2 + 31) / 32 * 32; softmax_kernel_v3_bert<T><<<grid, block, 0, stream>>>((T*)qk_buf, (T*)attr_mask, head_num, seq_len, scalar); } } } } template void bert_softmax_kernel<float>(void *qk_buf, void* attr_mask,const int& batch_size, const int& head_num, const int& seq_len, const float& scalar, const cudaStream_t stream); template void bert_softmax_kernel<half>(void *qk_buf, void* attr_mask,const int& batch_size, const int& head_num, const int& seq_len, const float& scalar, const cudaStream_t stream);
52a3b0209194c03111124e00390fb532cf41e3da.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "isEqualCuda.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; const uint8_t *in1 = NULL; hipMalloc(&in1, XSIZE*YSIZE); uint32_t rowSizeIn1 = XSIZE*YSIZE; const uint8_t *in2 = NULL; hipMalloc(&in2, XSIZE*YSIZE); uint32_t rowSizeIn2 = XSIZE*YSIZE; uint32_t width = XSIZE; uint32_t height = YSIZE; uint32_t *isEqual = NULL; hipMalloc(&isEqual, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( isEqualCuda), dim3(gridBlock),dim3(threadBlock), 0, 0, in1,rowSizeIn1,in2,rowSizeIn2,width,height,isEqual); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( isEqualCuda), dim3(gridBlock),dim3(threadBlock), 0, 0, in1,rowSizeIn1,in2,rowSizeIn2,width,height,isEqual); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( isEqualCuda), dim3(gridBlock),dim3(threadBlock), 0, 0, in1,rowSizeIn1,in2,rowSizeIn2,width,height,isEqual); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
52a3b0209194c03111124e00390fb532cf41e3da.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "isEqualCuda.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; const uint8_t *in1 = NULL; cudaMalloc(&in1, XSIZE*YSIZE); uint32_t rowSizeIn1 = XSIZE*YSIZE; const uint8_t *in2 = NULL; cudaMalloc(&in2, XSIZE*YSIZE); uint32_t rowSizeIn2 = XSIZE*YSIZE; uint32_t width = XSIZE; uint32_t height = YSIZE; uint32_t *isEqual = NULL; cudaMalloc(&isEqual, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); isEqualCuda<<<gridBlock,threadBlock>>>(in1,rowSizeIn1,in2,rowSizeIn2,width,height,isEqual); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { isEqualCuda<<<gridBlock,threadBlock>>>(in1,rowSizeIn1,in2,rowSizeIn2,width,height,isEqual); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { isEqualCuda<<<gridBlock,threadBlock>>>(in1,rowSizeIn1,in2,rowSizeIn2,width,height,isEqual); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
8dcd63bc8b718cbe15fdf62981c6079a9704e26d.hip
// !!! This is a file automatically generated by hipify!!! #include "common/book.h" int main(void){ hipDeviceProp_t devprop; int count; hipGetDeviceCount(&count); for (int i = 0; i < count; i++) { hipGetDeviceProperties(&devprop, i); printf(" ------ Info o CUDA napravi %d ------ ", i); printf("Ime naprave: %s \n", devprop.name); printf("Hitrost ure: %d \n", devprop.clockRate); printf("Kolicina globalnega pomnilnika: %d \n", devprop.totalGlobalMem); printf("Stevilo multiprocesorjev SM: %d\n", devprop.multiProcessorCount); printf("Kolicina skupnega pomnilnika v SM: %d \n", devprop.sharedMemPerMultiprocessor); printf("Stevilo registrov v SM: %d \n", devprop.regsPerMultiprocessor); printf("Max stevilo niti v bloku: %d \n", devprop.maxThreadsPerBlock); printf("Stevilo registrov na blok: %d \n", devprop.regsPerBlock); printf("Kolicina skupnega pomnilnika za blok: %d \n", devprop.sharedMemPerBlock); printf("Max stevilo niti na SM: %d \n", devprop.maxThreadsPerMultiProcessor); printf("Velikost snopa (warp): %d\n", devprop.warpSize); } }
8dcd63bc8b718cbe15fdf62981c6079a9704e26d.cu
#include "common/book.h" int main(void){ cudaDeviceProp devprop; int count; cudaGetDeviceCount(&count); for (int i = 0; i < count; i++) { cudaGetDeviceProperties(&devprop, i); printf(" ------ Info o CUDA napravi %d ------ ", i); printf("Ime naprave: %s \n", devprop.name); printf("Hitrost ure: %d \n", devprop.clockRate); printf("Kolicina globalnega pomnilnika: %d \n", devprop.totalGlobalMem); printf("Stevilo multiprocesorjev SM: %d\n", devprop.multiProcessorCount); printf("Kolicina skupnega pomnilnika v SM: %d \n", devprop.sharedMemPerMultiprocessor); printf("Stevilo registrov v SM: %d \n", devprop.regsPerMultiprocessor); printf("Max stevilo niti v bloku: %d \n", devprop.maxThreadsPerBlock); printf("Stevilo registrov na blok: %d \n", devprop.regsPerBlock); printf("Kolicina skupnega pomnilnika za blok: %d \n", devprop.sharedMemPerBlock); printf("Max stevilo niti na SM: %d \n", devprop.maxThreadsPerMultiProcessor); printf("Velikost snopa (warp): %d\n", devprop.warpSize); } }
eb699cd2414c8ce7b140dcda45e18339c92ddb25.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include<stdio.h> #include<stdlib.h> #include<time.h> #include<algorithm> #include <hiprand/hiprand.h> #include <hiprand/hiprand_kernel.h> #include <thrust/sort.h> #include <thrust/device_ptr.h> #include <thrust/device_vector.h> //#define PSIZE 10 //#define NGEN 500000 #define MUT_PROB 0.05 struct Individual { float fitness; unsigned int chromossomes; }; struct Population { float *fitness; unsigned int *chromossomes; }; __host__ __device__ bool operator<(const Individual &i, const Individual &j) { return (i.fitness > j.fitness); } __host__ __device__ float operator+(const Individual &i, const Individual &j) { return (i.fitness + j.fitness); } __host__ __device__ Individual individualSum (Individual i, Individual j) { Individual I; I.fitness = i.fitness + j.fitness; return I; } void printPop(Individual *population, int popSize, int print) { if(print != 0) { for(int i = 0; i < popSize; i++) { printf("%f - ", population[i].fitness); } } } __global__ void createPopulation(Population *population, unsigned int seed, hiprandState_t *states) { int id = blockIdx.x*blockDim.x + threadIdx.x; hiprand_init(seed, id, 0, &states[id]); population->fitness[id] = 0; population->chromossomes[id] = hiprand(&states[id]); } __global__ void fitness(Population *population, float *totalFitness) { int id = blockIdx.x; unsigned int mask = 0x3FF; __shared__ float a[3]; a[threadIdx.x] = (population->chromossomes[id] & (mask << (10 * threadIdx.x))) >> (10 * threadIdx.x); a[threadIdx.x] = (a[threadIdx.x] - 512)/100.0; __syncthreads(); if(threadIdx.x == 0) { float f = 1.0 / (1 + a[0]*a[0] + a[1]*a[1] + a[2]*a[2]); population->fitness[id] = f; atomicAdd(totalFitness, f); } } __global__ void reproduce(Population *population, Population *nextPopulation, int PSIZE, float *totalFitness, hiprandState_t *states) { int id = blockIdx.x*blockDim.x + threadIdx.x; unsigned int parents[2]; int temp = -1; float localTotalFitness = *totalFitness; //Selection for(int j = 0; j < 2; j++) { float p = hiprand_uniform(&states[id]) * localTotalFitness; float score = 0; for(int k = 0; k < PSIZE; k++) { if(k == temp) { continue; } score += population->fitness[k]; if(p < score) { parents[j] = population->chromossomes[k]; localTotalFitness -= population->fitness[k]; temp = k; break; } } } //Crossover unsigned char cutPoint = hiprand(&states[id]) % 31; unsigned mask1 = 0xffffffff << cutPoint; unsigned mask2 = 0xffffffff >> (32 - cutPoint); unsigned int child; child = (parents[0] & mask1) + (parents[1] & mask2); //Mutation float mutation = hiprand_uniform(&states[id]); if(mutation < MUT_PROB) { unsigned char mutPoint = hiprand(&states[id]) % 30; child ^= 1 << mutPoint; } nextPopulation->chromossomes[id] = child; nextPopulation->fitness[id] = 0; if(id == 0) { nextPopulation->chromossomes[0] = population->chromossomes[0]; } } int main(int argc, char *argv[ ]) { int PSIZE, NGEN, NIT, PRINT; double Ttotal = 0; if(argc < 5) { printf("Uso %s <POP_SIZE> <N_GEN> <N_ITERACOES> <PRINT>\n", argv[0]); return 1; } else { PSIZE = atoi(argv[1]); NGEN = atoi(argv[2]); NIT = atoi(argv[3]); PRINT = atoi(argv[4]); } for(int it = 0; it < NIT; it++) { //printf("ANTES"); clock_t start, end; //Init variables Population *population, *nextPopulation, *swap, *population_h, *nextPopulation_h; population_h = (Population *) malloc(sizeof(Population)); nextPopulation_h = (Population *) malloc(sizeof(Population)); hipMalloc((void**) &population, sizeof(Population)); hipMalloc((void**) &nextPopulation, sizeof(Population)); hipMalloc((void**) &(population_h->fitness), PSIZE * sizeof(float)); hipMalloc((void**) &(population_h->chromossomes), PSIZE * sizeof(unsigned int)); hipMalloc((void**) &(nextPopulation_h->fitness), PSIZE * sizeof(float)); hipMalloc((void**) &(nextPopulation_h->chromossomes), PSIZE * sizeof(unsigned int)); hipMemcpy(population, population_h, sizeof(Population), hipMemcpyHostToDevice); hipMemcpy(nextPopulation, nextPopulation_h, sizeof(Population), hipMemcpyHostToDevice); hiprandState_t *states; hipMalloc((void**) &states, PSIZE * sizeof(hiprandState_t)); float *maxFitness; maxFitness = (float *) malloc(NGEN * sizeof(float)); float *totalFitness; hipMalloc((void**) &totalFitness, sizeof(float)); start = clock(); //printf("marco0"); //Create population hipLaunchKernelGGL(( createPopulation), dim3(ceil(PSIZE/1024.0)), dim3(min(PSIZE, 1024)), 0, 0, population, time(NULL), states); //printf("marco1"); float const zero = 0.0f; for(int i = 0; i < NGEN; i++) { hipMemcpy(totalFitness, &zero, sizeof(float), hipMemcpyHostToDevice); //Calculate fitness hipLaunchKernelGGL(( fitness), dim3(PSIZE), dim3(3), 0, 0, population, totalFitness); //printf("marco2"); //thrust::device_ptr<Population> dev_ptr_population(population); //thrust::sort(dev_ptr_population, dev_ptr_population + PSIZE); thrust::device_ptr<float> dev_ptr_fitness(population_h->fitness); thrust::device_ptr<unsigned int> dev_ptr_chromossomes(population_h->chromossomes); thrust::sort_by_key(dev_ptr_fitness, dev_ptr_fitness + PSIZE, dev_ptr_chromossomes, thrust::greater<float>()); hipMemcpy(&maxFitness[i], &(population_h->fitness[0]), sizeof(float), hipMemcpyDeviceToHost); //printf("marco3"); hipLaunchKernelGGL(( reproduce), dim3(ceil(PSIZE/1024.0)), dim3(min(PSIZE, 1024)), 0, 0, population, nextPopulation, PSIZE, totalFitness, states); //printf("marco4"); swap = population; population = nextPopulation; nextPopulation = swap; swap = population_h; population_h = nextPopulation_h; nextPopulation_h = swap; } end = clock(); hipFree(population); hipFree(nextPopulation); hipFree(states); if(PRINT != 0) { printf("Gen\tFitness\n"); for(int i = 0; i < NGEN; i++) { printf("%d\t%f\n", i+1, maxFitness[i]); } } free(maxFitness); printf("\nT total(us)\t\tT gerao(us)\n"); double cpu_time_used = 1000000 * ((double) (end - start)) / CLOCKS_PER_SEC; printf("%f\t\t%f\n\n", cpu_time_used, cpu_time_used/NGEN); Ttotal += cpu_time_used; } printf("\nAvg T total(us)\t\tAvg T gerao(us)\n"); printf("%f\t\t%f\n", Ttotal/NIT, Ttotal/(NIT*NGEN)); return 0; }
eb699cd2414c8ce7b140dcda45e18339c92ddb25.cu
#include<stdio.h> #include<stdlib.h> #include<time.h> #include<algorithm> #include <curand.h> #include <curand_kernel.h> #include <thrust/sort.h> #include <thrust/device_ptr.h> #include <thrust/device_vector.h> //#define PSIZE 10 //#define NGEN 500000 #define MUT_PROB 0.05 struct Individual { float fitness; unsigned int chromossomes; }; struct Population { float *fitness; unsigned int *chromossomes; }; __host__ __device__ bool operator<(const Individual &i, const Individual &j) { return (i.fitness > j.fitness); } __host__ __device__ float operator+(const Individual &i, const Individual &j) { return (i.fitness + j.fitness); } __host__ __device__ Individual individualSum (Individual i, Individual j) { Individual I; I.fitness = i.fitness + j.fitness; return I; } void printPop(Individual *population, int popSize, int print) { if(print != 0) { for(int i = 0; i < popSize; i++) { printf("%f - ", population[i].fitness); } } } __global__ void createPopulation(Population *population, unsigned int seed, curandState_t *states) { int id = blockIdx.x*blockDim.x + threadIdx.x; curand_init(seed, id, 0, &states[id]); population->fitness[id] = 0; population->chromossomes[id] = curand(&states[id]); } __global__ void fitness(Population *population, float *totalFitness) { int id = blockIdx.x; unsigned int mask = 0x3FF; __shared__ float a[3]; a[threadIdx.x] = (population->chromossomes[id] & (mask << (10 * threadIdx.x))) >> (10 * threadIdx.x); a[threadIdx.x] = (a[threadIdx.x] - 512)/100.0; __syncthreads(); if(threadIdx.x == 0) { float f = 1.0 / (1 + a[0]*a[0] + a[1]*a[1] + a[2]*a[2]); population->fitness[id] = f; atomicAdd(totalFitness, f); } } __global__ void reproduce(Population *population, Population *nextPopulation, int PSIZE, float *totalFitness, curandState_t *states) { int id = blockIdx.x*blockDim.x + threadIdx.x; unsigned int parents[2]; int temp = -1; float localTotalFitness = *totalFitness; //Selection for(int j = 0; j < 2; j++) { float p = curand_uniform(&states[id]) * localTotalFitness; float score = 0; for(int k = 0; k < PSIZE; k++) { if(k == temp) { continue; } score += population->fitness[k]; if(p < score) { parents[j] = population->chromossomes[k]; localTotalFitness -= population->fitness[k]; temp = k; break; } } } //Crossover unsigned char cutPoint = curand(&states[id]) % 31; unsigned mask1 = 0xffffffff << cutPoint; unsigned mask2 = 0xffffffff >> (32 - cutPoint); unsigned int child; child = (parents[0] & mask1) + (parents[1] & mask2); //Mutation float mutation = curand_uniform(&states[id]); if(mutation < MUT_PROB) { unsigned char mutPoint = curand(&states[id]) % 30; child ^= 1 << mutPoint; } nextPopulation->chromossomes[id] = child; nextPopulation->fitness[id] = 0; if(id == 0) { nextPopulation->chromossomes[0] = population->chromossomes[0]; } } int main(int argc, char *argv[ ]) { int PSIZE, NGEN, NIT, PRINT; double Ttotal = 0; if(argc < 5) { printf("Uso %s <POP_SIZE> <N_GEN> <N_ITERACOES> <PRINT>\n", argv[0]); return 1; } else { PSIZE = atoi(argv[1]); NGEN = atoi(argv[2]); NIT = atoi(argv[3]); PRINT = atoi(argv[4]); } for(int it = 0; it < NIT; it++) { //printf("ANTES"); clock_t start, end; //Init variables Population *population, *nextPopulation, *swap, *population_h, *nextPopulation_h; population_h = (Population *) malloc(sizeof(Population)); nextPopulation_h = (Population *) malloc(sizeof(Population)); cudaMalloc((void**) &population, sizeof(Population)); cudaMalloc((void**) &nextPopulation, sizeof(Population)); cudaMalloc((void**) &(population_h->fitness), PSIZE * sizeof(float)); cudaMalloc((void**) &(population_h->chromossomes), PSIZE * sizeof(unsigned int)); cudaMalloc((void**) &(nextPopulation_h->fitness), PSIZE * sizeof(float)); cudaMalloc((void**) &(nextPopulation_h->chromossomes), PSIZE * sizeof(unsigned int)); cudaMemcpy(population, population_h, sizeof(Population), cudaMemcpyHostToDevice); cudaMemcpy(nextPopulation, nextPopulation_h, sizeof(Population), cudaMemcpyHostToDevice); curandState_t *states; cudaMalloc((void**) &states, PSIZE * sizeof(curandState_t)); float *maxFitness; maxFitness = (float *) malloc(NGEN * sizeof(float)); float *totalFitness; cudaMalloc((void**) &totalFitness, sizeof(float)); start = clock(); //printf("marco0"); //Create population createPopulation<<<ceil(PSIZE/1024.0), min(PSIZE, 1024)>>>(population, time(NULL), states); //printf("marco1"); float const zero = 0.0f; for(int i = 0; i < NGEN; i++) { cudaMemcpy(totalFitness, &zero, sizeof(float), cudaMemcpyHostToDevice); //Calculate fitness fitness<<<PSIZE, 3>>>(population, totalFitness); //printf("marco2"); //thrust::device_ptr<Population> dev_ptr_population(population); //thrust::sort(dev_ptr_population, dev_ptr_population + PSIZE); thrust::device_ptr<float> dev_ptr_fitness(population_h->fitness); thrust::device_ptr<unsigned int> dev_ptr_chromossomes(population_h->chromossomes); thrust::sort_by_key(dev_ptr_fitness, dev_ptr_fitness + PSIZE, dev_ptr_chromossomes, thrust::greater<float>()); cudaMemcpy(&maxFitness[i], &(population_h->fitness[0]), sizeof(float), cudaMemcpyDeviceToHost); //printf("marco3"); reproduce<<<ceil(PSIZE/1024.0), min(PSIZE, 1024)>>>(population, nextPopulation, PSIZE, totalFitness, states); //printf("marco4"); swap = population; population = nextPopulation; nextPopulation = swap; swap = population_h; population_h = nextPopulation_h; nextPopulation_h = swap; } end = clock(); cudaFree(population); cudaFree(nextPopulation); cudaFree(states); if(PRINT != 0) { printf("Gen\tFitness\n"); for(int i = 0; i < NGEN; i++) { printf("%d\t%f\n", i+1, maxFitness[i]); } } free(maxFitness); printf("\nT total(us)\t\tT geração(us)\n"); double cpu_time_used = 1000000 * ((double) (end - start)) / CLOCKS_PER_SEC; printf("%f\t\t%f\n\n", cpu_time_used, cpu_time_used/NGEN); Ttotal += cpu_time_used; } printf("\nAvg T total(us)\t\tAvg T geração(us)\n"); printf("%f\t\t%f\n", Ttotal/NIT, Ttotal/(NIT*NGEN)); return 0; }
9ac1aceb9d1f2e576c38ae506d6f08bb9d96b132.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #define LOG_DEBUG if(0) #define LOG_INPUT if(0) #define LOG_OUTPUT if(0) __global__ void sumTriangle(float* M, float* V, int N); __global__ void sumTriangleWithAtomics(float* M, float* V, int N); __global__ void sumTriangle(float* M, float* V, int N){ int j=threadIdx.x; float sum=0.0; for (int i=0;i<j;i++) sum+=M[i*N+j]; V[j]=sum; __syncthreads(); if(j == N-1) { sum = 0.0; for(int i=0;i<N;i++) sum =sum + V[i]; V[N] = sum; } } __global__ void sumTriangleWithAtomics(float* M, float* V, int N){ int j=threadIdx.x; float sum=0.0; __shared__ float totalSum; if(j==0) totalSum=0.0; __syncthreads(); for (int i=0;i<j;i++) sum+=M[i*N+j]; V[j]=sum; atomicAdd(&totalSum, sum); __syncthreads(); if(j == N-1) { V[N]=totalSum; } } void print_matrix(float *A,int m,int n) { for(int i =0;i<m;i++) { for(int j=0;j<n;j++) printf("%.2f ",A[i*n+j]); printf("\n"); } } void init_matrix(float *A,int m,int n) { for(int i=0;i<m;i++) { for(int j=0;j<n;j++) A[i*n+j]=j; } } int main(void) { hipError_t err = hipSuccess; int t=1; int option; LOG_INPUT printf("%d\n",t); while(t--) { int mat_row; scanf("%d %d",&mat_row,&option); int mat_dim = mat_row; int num_elems = mat_dim*mat_dim; size_t size_M = num_elems*sizeof(float); size_t size_V = (1+mat_dim)*sizeof(float); float *h_input1 = (float *)malloc(size_M); float *h_output1 = (float *)malloc(size_V); if (h_input1 == NULL || h_output1 == NULL) { fprintf(stderr, "Failed to allocate host vectors!\n"); exit(EXIT_FAILURE); } init_matrix(h_input1,mat_dim,mat_dim); LOG_INPUT print_matrix(h_input1,mat_dim,mat_dim); float *d_input1 = NULL; err = hipMalloc((void **)&d_input1, size_M); if (err != hipSuccess) { fprintf(stderr, "Failed to allocate device vector d_input1 (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } float *d_output1 = NULL; err = hipMalloc((void **)&d_output1, size_V); if (err != hipSuccess) { fprintf(stderr, "Failed to allocate device vector d_input1 (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } LOG_DEBUG printf("Copy input data from the host memory to the CUDA device\n"); err = hipMemcpy(d_input1, h_input1, size_M, hipMemcpyHostToDevice); if (err != hipSuccess) { fprintf(stderr, "Failed to copy vector h_input1 from host to device (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } int grid_dim = 1, block_dim = mat_dim; hipEvent_t seq_start, seq_end; hipEventCreate(&seq_start); hipEventCreate(&seq_end); if(option==0) { hipEventRecord(seq_start,0); hipLaunchKernelGGL(( sumTriangle), dim3(grid_dim), dim3(block_dim), 0, 0, d_input1, d_output1, mat_dim); hipEventRecord(seq_end,0); hipEventSynchronize(seq_end); } else { hipEventRecord(seq_start,0); hipLaunchKernelGGL(( sumTriangleWithAtomics), dim3(grid_dim), dim3(block_dim), 0, 0, d_input1, d_output1, mat_dim); hipEventRecord(seq_end,0); hipEventSynchronize(seq_end); } err = hipGetLastError(); if (err != hipSuccess) { fprintf(stderr, "Failed to launch process_kernel1 kernel (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } LOG_DEBUG printf("Copy output data from the CUDA device to the host memory\n"); err = hipMemcpy(h_output1, d_output1, size_V, hipMemcpyDeviceToHost); if (err != hipSuccess) { fprintf(stderr, "Failed to copy vector d_input1 from device to host (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } LOG_OUTPUT print_matrix(h_output1,1,mat_dim+1); err = hipFree(d_input1); if (err != hipSuccess) { fprintf(stderr, "Failed to free device vector d_input1 (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } free(h_input1); err = hipFree(d_output1); if (err != hipSuccess) { fprintf(stderr, "Failed to free device vector d_input1 (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } free(h_output1); float event_recorded_time=0.0; hipEventElapsedTime(&event_recorded_time, seq_start, seq_end); printf("Execution Time: %f\n",event_recorded_time); err = hipDeviceReset(); if (err != hipSuccess) { fprintf(stderr, "Failed to deinitialize the device! error=%s\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } LOG_DEBUG printf("Done\n"); } return 0; }
9ac1aceb9d1f2e576c38ae506d6f08bb9d96b132.cu
#include <stdio.h> #include <cuda.h> #include <cuda_runtime.h> #define LOG_DEBUG if(0) #define LOG_INPUT if(0) #define LOG_OUTPUT if(0) __global__ void sumTriangle(float* M, float* V, int N); __global__ void sumTriangleWithAtomics(float* M, float* V, int N); __global__ void sumTriangle(float* M, float* V, int N){ int j=threadIdx.x; float sum=0.0; for (int i=0;i<j;i++) sum+=M[i*N+j]; V[j]=sum; __syncthreads(); if(j == N-1) { sum = 0.0; for(int i=0;i<N;i++) sum =sum + V[i]; V[N] = sum; } } __global__ void sumTriangleWithAtomics(float* M, float* V, int N){ int j=threadIdx.x; float sum=0.0; __shared__ float totalSum; if(j==0) totalSum=0.0; __syncthreads(); for (int i=0;i<j;i++) sum+=M[i*N+j]; V[j]=sum; atomicAdd(&totalSum, sum); __syncthreads(); if(j == N-1) { V[N]=totalSum; } } void print_matrix(float *A,int m,int n) { for(int i =0;i<m;i++) { for(int j=0;j<n;j++) printf("%.2f ",A[i*n+j]); printf("\n"); } } void init_matrix(float *A,int m,int n) { for(int i=0;i<m;i++) { for(int j=0;j<n;j++) A[i*n+j]=j; } } int main(void) { cudaError_t err = cudaSuccess; int t=1; int option; LOG_INPUT printf("%d\n",t); while(t--) { int mat_row; scanf("%d %d",&mat_row,&option); int mat_dim = mat_row; int num_elems = mat_dim*mat_dim; size_t size_M = num_elems*sizeof(float); size_t size_V = (1+mat_dim)*sizeof(float); float *h_input1 = (float *)malloc(size_M); float *h_output1 = (float *)malloc(size_V); if (h_input1 == NULL || h_output1 == NULL) { fprintf(stderr, "Failed to allocate host vectors!\n"); exit(EXIT_FAILURE); } init_matrix(h_input1,mat_dim,mat_dim); LOG_INPUT print_matrix(h_input1,mat_dim,mat_dim); float *d_input1 = NULL; err = cudaMalloc((void **)&d_input1, size_M); if (err != cudaSuccess) { fprintf(stderr, "Failed to allocate device vector d_input1 (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } float *d_output1 = NULL; err = cudaMalloc((void **)&d_output1, size_V); if (err != cudaSuccess) { fprintf(stderr, "Failed to allocate device vector d_input1 (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } LOG_DEBUG printf("Copy input data from the host memory to the CUDA device\n"); err = cudaMemcpy(d_input1, h_input1, size_M, cudaMemcpyHostToDevice); if (err != cudaSuccess) { fprintf(stderr, "Failed to copy vector h_input1 from host to device (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } int grid_dim = 1, block_dim = mat_dim; cudaEvent_t seq_start, seq_end; cudaEventCreate(&seq_start); cudaEventCreate(&seq_end); if(option==0) { cudaEventRecord(seq_start,0); sumTriangle<<<grid_dim, block_dim>>>(d_input1, d_output1, mat_dim); cudaEventRecord(seq_end,0); cudaEventSynchronize(seq_end); } else { cudaEventRecord(seq_start,0); sumTriangleWithAtomics<<<grid_dim, block_dim>>>(d_input1, d_output1, mat_dim); cudaEventRecord(seq_end,0); cudaEventSynchronize(seq_end); } err = cudaGetLastError(); if (err != cudaSuccess) { fprintf(stderr, "Failed to launch process_kernel1 kernel (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } LOG_DEBUG printf("Copy output data from the CUDA device to the host memory\n"); err = cudaMemcpy(h_output1, d_output1, size_V, cudaMemcpyDeviceToHost); if (err != cudaSuccess) { fprintf(stderr, "Failed to copy vector d_input1 from device to host (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } LOG_OUTPUT print_matrix(h_output1,1,mat_dim+1); err = cudaFree(d_input1); if (err != cudaSuccess) { fprintf(stderr, "Failed to free device vector d_input1 (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } free(h_input1); err = cudaFree(d_output1); if (err != cudaSuccess) { fprintf(stderr, "Failed to free device vector d_input1 (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } free(h_output1); float event_recorded_time=0.0; cudaEventElapsedTime(&event_recorded_time, seq_start, seq_end); printf("Execution Time: %f\n",event_recorded_time); err = cudaDeviceReset(); if (err != cudaSuccess) { fprintf(stderr, "Failed to deinitialize the device! error=%s\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } LOG_DEBUG printf("Done\n"); } return 0; }
f4d935b3ca3ed8fc70c3506b681dae1a0c7cc47e.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <math.h> #include <stdio.h> #include "1dadvec_kernels.cu" /* set integration points * * get the gauss lobatta integration points and weights for integration */ void setIntegrationPoints(int Np, float *w, float *r) { switch (Np) { case 0: r[0] = 0.; w[0] = 2.; break; case 1: r[0] = -1./sqrt(3); r[1] = 1./sqrt(3); w[0] = 1.; w[1] = 1.; break; case 2: r[0] = 0.; r[1] = -sqrt(3./5); r[2] = sqrt(3./5); w[0] = 8./9; w[1] = 5./9; w[2] = 5./9; break; case 3: r[0] = -sqrt((3.-2.*sqrt(6./5))/7.); r[1] = sqrt((3.-2.*sqrt(6./5))/7.); r[2] = -sqrt((3.+2.*sqrt(6./5))/7.); r[3] = sqrt((3.+2.*sqrt(6./5))/7.); w[0] = (18.+sqrt(30.))/36.; w[1] = (18.+sqrt(30.))/36.; w[2] = (18.-sqrt(30.))/36.; w[3] = (18.-sqrt(30.))/36.; break; case 4: r[0] = 0.; r[1] = -sqrt(5.-2.*sqrt(10./7))/3.; r[2] = sqrt(5.-2.*sqrt(10./7))/3.; r[3] = -sqrt(5.+2.*sqrt(10./7))/3.; r[4] = sqrt(5.+2.*sqrt(10./7))/3.; w[0] = 128./225; w[1] = (322.+13.*sqrt(70.))/900.; w[2] = (322.+13.*sqrt(70.))/900.; w[3] = (322.-13.*sqrt(70.))/900.; w[4] = (322.-13.*sqrt(70.))/900.; break; case 5: r[0] = -0.23861918; r[1] = 0.23861918; r[2] = -0.66120939; r[3] = 0.66120939; r[4] = -0.93246951; r[5] = 0.93246951; w[0] = 0.46791393; w[1] = 0.46791393; w[2] = 0.36076157; w[3] = 0.36076157; w[4] = 0.17132449; w[5] = 0.17132449; break; case 6: r[0] = 0; r[1] = -0.40584515; r[2] = 0.40584515; r[3] = -0.74153119; r[4] = 0.74153119; r[5] = -0.94910791; r[6] = 0.94910791; w[0] = 0.41795918; w[1] = 0.38183005; w[2] = 0.38183005; w[3] = 0.27970539; w[4] = 0.27970539; w[5] = 0.12948497; w[6] = 0.12948497; break; case 7: r[0] = -0.18343464; r[1] = 0.18343464; r[2] = -0.52553241; r[3] = 0.52553241; r[4] = -0.79666648; r[5] = 0.79666648; r[6] = -0.96028986; r[7] = 0.96028986; w[0] = 0.36268378; w[1] = 0.36268378; w[2] = 0.31370665; w[3] = 0.31370665; w[4] = 0.22238103; w[5] = 0.22238103; w[6] = 0.10122854; w[7] = 0.10122854; break; // This is WRONG case 8: r[0] = -0.14887434; r[1] = 0.14887434; r[2] = -0.43339539; r[3] = 0.43339539; r[4] = -0.67940957; r[5] = 0.67940957; r[6] = -0.86506337; r[7] = 0.86506337; r[8] = -0.97390653; r[9] = 0.97390653; w[0] = 0.29552422; w[1] = 0.29552422; w[2] = 0.26926672; w[3] = 0.26926672; w[4] = 0.21908636; w[5] = 0.21908636; w[6] = 0.14945135; w[7] = 0.14945135; w[8] = 0.06667134; w[9] = 0.06667134; break; // This might be ok. case 9: r[0] = -0.14887434; r[1] = 0.14887434; r[2] = -0.43339539; r[3] = 0.43339539; r[4] = -0.67940957; r[5] = 0.67940957; r[6] = -0.86506337; r[7] = 0.86506337; r[8] = -0.97390653; r[9]= 0.97390653; w[0] = 0.29552422; w[1] = 0.29552422; w[2] = 0.26926672; w[3] = 0.26926672; w[4] = 0.21908636; w[5] = 0.21908636; w[6] = 0.14945135; w[7] = 0.14945135; w[8] = 0.06667134; w[9] = 0.06667134; break; // This is also WRONG case 10: r[0] = -0.14887434; r[1] = 0.14887434; r[2] = -0.43339539; r[3] = 0.43339539; r[4] = -0.67940957; r[5] = 0.67940957; r[6] = -0.86506337; r[7] = 0.86506337; r[8] = -0.97390653; r[9] = 0.97390653; w[0] = 0.29552422; w[1] = 0.29552422; w[2] = 0.26926672; w[3] = 0.26926672; w[4] = 0.21908636; w[5] = 0.21908636; w[6] = 0.14945135; w[7] = 0.14945135; w[8] = 0.06667134; w[9] = 0.06667134; break; } } void checkCudaError(const char *message) { hipError_t error = hipGetLastError(); if(error!=hipSuccess) { fprintf(stderr,"ERROR: %s: %s\n", message, hipGetErrorString(error) ); exit(-1); } } /* integrate in time * * take one time step; calls the kernel functions to compute in parallel. */ void timeIntegrate(float *u, float a, int K, float dt, float dx, double t, int Np) { int nThreads = 128; int nBlocksRHS = (K + 1) / nThreads + (((K + 1) % nThreads) ? 1 : 0); int nBlocksFlux = (K + 2) / nThreads + (((K + 2) % nThreads) ? 1 : 0); int nBlocksRK = ((Np + 1)*K) / nThreads + ((((Np + 1)*K) % nThreads) ? 1 : 0); checkCudaError("error before rk4"); // Stage 1 // f <- flux(u) hipLaunchKernelGGL(( calcFlux), dim3(nBlocksFlux), dim3(nThreads), 0, 0, d_u, d_f, a, t, K, Np); hipDeviceSynchronize(); // k1 <- dt*rhs(u) hipLaunchKernelGGL(( rhs), dim3(nBlocksRHS), dim3(nThreads), 0, 0, d_u, d_k1, d_f, d_w, d_r, a, dt, dx, K, Np); hipDeviceSynchronize(); // k* <- u + k1/2 hipLaunchKernelGGL(( rk4_tempstorage), dim3(nBlocksRK), dim3(nThreads), 0, 0, d_u, d_kstar, d_k1, 0.5, dt, Np, K); hipDeviceSynchronize(); // Stage 2 // f <- flux(k*) hipLaunchKernelGGL(( calcFlux), dim3(nBlocksFlux), dim3(nThreads), 0, 0, d_kstar, d_f, a, t, K, Np); hipDeviceSynchronize(); // k2 <- dt*rhs(k*) hipLaunchKernelGGL(( rhs), dim3(nBlocksRHS), dim3(nThreads), 0, 0, d_kstar, d_k2, d_f, d_w, d_r, a, dt, dx, K, Np); hipDeviceSynchronize(); // k* <- u + k2/2 hipLaunchKernelGGL(( rk4_tempstorage), dim3(nBlocksRK), dim3(nThreads), 0, 0, d_u, d_kstar, d_k2, 0.5, dt, Np, K); hipDeviceSynchronize(); // Stage 3 // f <- flux(k*) hipLaunchKernelGGL(( calcFlux), dim3(nBlocksFlux), dim3(nThreads), 0, 0, d_kstar, d_f, a, t, K, Np); hipDeviceSynchronize(); // k3 <- dt*rhs(k*) hipLaunchKernelGGL(( rhs), dim3(nBlocksRHS), dim3(nThreads), 0, 0, d_kstar, d_k3, d_f, d_w, d_r, a, dt, dx, K, Np); hipDeviceSynchronize(); // k* <- u + k3 hipLaunchKernelGGL(( rk4_tempstorage), dim3(nBlocksRK), dim3(nThreads), 0, 0, d_u, d_kstar, d_k3, 1.0, dt, Np, K); hipDeviceSynchronize(); // Stage 4 // f <- flux(k*) hipLaunchKernelGGL(( calcFlux), dim3(nBlocksFlux), dim3(nThreads), 0, 0, d_kstar, d_f, a, t, K, Np); hipDeviceSynchronize(); // k4 <- dt*rhs(k*) hipLaunchKernelGGL(( rhs), dim3(nBlocksRHS), dim3(nThreads), 0, 0, d_kstar, d_k4, d_f, d_w, d_r, a, dt, dx, K, Np); hipDeviceSynchronize(); checkCudaError("error after rk4"); // u <- k1/6 + k2/3 + k3/3 + k4/6 hipLaunchKernelGGL(( rk4), dim3(nBlocksRK), dim3(nThreads), 0, 0, d_u, d_k1, d_k2, d_k3, d_k4, Np, K); // copy back the data (this takes a while) hipMemcpy(u, d_u, K * (Np + 1) * sizeof(float), hipMemcpyDeviceToHost); } /* allocate memory on the GPU for the GPU data */ void initGPU(int K, int Np) { int size = K * (Np + 1); hipDeviceReset(); checkCudaError("error after reset?"); // Main variables hipMalloc((void **) &d_u , (K + 1) * (Np + 1) * sizeof(float)); hipMalloc((void **) &d_f, (K + 2) * sizeof(float)); hipMalloc((void **) &d_mesh, K * sizeof(float)); hipMalloc((void **) &d_r, (Np + 1) * sizeof(float)); hipMalloc((void **) &d_w, (Np + 1) * sizeof(float)); // Runge-Kutta storage hipMalloc((void **) &d_kstar , size * sizeof(float)); hipMalloc((void **) &d_k1 , size * sizeof(float)); hipMalloc((void **) &d_k2 , size * sizeof(float)); hipMalloc((void **) &d_k3 , size * sizeof(float)); hipMalloc((void **) &d_k4 , size * sizeof(float)); checkCudaError("error in init"); } int main() { int i, j, size, t, timesteps; float *mesh; // the grid points float *u; // the computed result float *r; // the GLL points float *w; // Gaussian integration weights int Np = 6; // polynomial order of the approximation int K = 200; // the mesh size float a = -1.; // left boundary float b = 1.; // right boundary float dx = (b - a) / K; // size of cell float aspeed = 2.*3.14159; // the wave speed float CFL = 0.5 * (1. / (2.*Np + 1.)); float dt = CFL/aspeed * dx; // timestep timesteps = 1000; size = (Np + 1) * K; // size of u mesh = (float *) malloc((K + 1) * sizeof(float)); u = (float *) malloc(K * (Np + 1) * sizeof(float)); r = (float *) malloc((Np + 1) * sizeof(float)); w = (float *) malloc((Np + 1) * sizeof(float)); int nThreads = 128; int nBlocksMesh = K / nThreads + ((K % nThreads) ? 1 : 0); int nBlocksU = K / nThreads + ((K % nThreads) ? 1 : 0); // Allocate space on the GPU initGPU(K, Np); // Init the mesh's endpoints hipLaunchKernelGGL(( initMesh), dim3(nBlocksMesh), dim3(nThreads), 0, 0, d_mesh, dx, a, K); hipDeviceSynchronize(); // Copy over r and w setIntegrationPoints(Np, w, r); hipMemcpy(d_r, r, (Np + 1) * sizeof(float), hipMemcpyHostToDevice); hipMemcpy(d_w, w, (Np + 1) * sizeof(float), hipMemcpyHostToDevice); // Initialize u at time t0 hipLaunchKernelGGL(( initU), dim3(nBlocksU), dim3(nThreads), 0, 0, d_u, d_mesh, d_w, d_r, dx, K, Np); hipDeviceSynchronize(); hipLaunchKernelGGL(( initUPeriodic), dim3(1), dim3(1), 0, 0, d_u, K, Np); hipDeviceSynchronize(); checkCudaError("error after initialization"); // File for output FILE *data; data = fopen("data.txt", "w"); hipMemcpy(mesh, d_mesh, K * sizeof(float), hipMemcpyDeviceToHost); hipMemcpy(u, d_u, K * (Np + 1) * sizeof(float), hipMemcpyDeviceToHost); // Output the mesh into the file fprintf(data, "%i\n", Np); for (i = 0; i < K; i++) { fprintf(data, "%f ", mesh[i]); } fprintf(data, "\n"); // Make sure the ghost state's flux isn't filled with garbage hipLaunchKernelGGL(( initFlux), dim3(1),dim3(1), 0, 0, d_u, d_f, K, Np); // Run the integrator for (t = 0; t < timesteps; t++) { // Output the data into the file (this takes a while) for (i = 0; i < K; i++) { for (j = 0; j < Np+1; j++) { fprintf(data," %f ", u[j*(K + 1) + i]); } } fprintf(data, "\n"); timeIntegrate(u, aspeed, K, dt, dx, dt*t, Np); } fclose(data); // Free host data free(mesh); free(u); free(r); free(w); // Free GPU data hipFree(d_u); hipFree(d_f); hipFree(d_mesh); hipFree(d_r); hipFree(d_kstar); hipFree(d_k1); hipFree(d_k2); hipFree(d_k3); hipFree(d_k4); }
f4d935b3ca3ed8fc70c3506b681dae1a0c7cc47e.cu
#include <cuda.h> #include <math.h> #include <stdio.h> #include "1dadvec_kernels.cu" /* set integration points * * get the gauss lobatta integration points and weights for integration */ void setIntegrationPoints(int Np, float *w, float *r) { switch (Np) { case 0: r[0] = 0.; w[0] = 2.; break; case 1: r[0] = -1./sqrt(3); r[1] = 1./sqrt(3); w[0] = 1.; w[1] = 1.; break; case 2: r[0] = 0.; r[1] = -sqrt(3./5); r[2] = sqrt(3./5); w[0] = 8./9; w[1] = 5./9; w[2] = 5./9; break; case 3: r[0] = -sqrt((3.-2.*sqrt(6./5))/7.); r[1] = sqrt((3.-2.*sqrt(6./5))/7.); r[2] = -sqrt((3.+2.*sqrt(6./5))/7.); r[3] = sqrt((3.+2.*sqrt(6./5))/7.); w[0] = (18.+sqrt(30.))/36.; w[1] = (18.+sqrt(30.))/36.; w[2] = (18.-sqrt(30.))/36.; w[3] = (18.-sqrt(30.))/36.; break; case 4: r[0] = 0.; r[1] = -sqrt(5.-2.*sqrt(10./7))/3.; r[2] = sqrt(5.-2.*sqrt(10./7))/3.; r[3] = -sqrt(5.+2.*sqrt(10./7))/3.; r[4] = sqrt(5.+2.*sqrt(10./7))/3.; w[0] = 128./225; w[1] = (322.+13.*sqrt(70.))/900.; w[2] = (322.+13.*sqrt(70.))/900.; w[3] = (322.-13.*sqrt(70.))/900.; w[4] = (322.-13.*sqrt(70.))/900.; break; case 5: r[0] = -0.23861918; r[1] = 0.23861918; r[2] = -0.66120939; r[3] = 0.66120939; r[4] = -0.93246951; r[5] = 0.93246951; w[0] = 0.46791393; w[1] = 0.46791393; w[2] = 0.36076157; w[3] = 0.36076157; w[4] = 0.17132449; w[5] = 0.17132449; break; case 6: r[0] = 0; r[1] = -0.40584515; r[2] = 0.40584515; r[3] = -0.74153119; r[4] = 0.74153119; r[5] = -0.94910791; r[6] = 0.94910791; w[0] = 0.41795918; w[1] = 0.38183005; w[2] = 0.38183005; w[3] = 0.27970539; w[4] = 0.27970539; w[5] = 0.12948497; w[6] = 0.12948497; break; case 7: r[0] = -0.18343464; r[1] = 0.18343464; r[2] = -0.52553241; r[3] = 0.52553241; r[4] = -0.79666648; r[5] = 0.79666648; r[6] = -0.96028986; r[7] = 0.96028986; w[0] = 0.36268378; w[1] = 0.36268378; w[2] = 0.31370665; w[3] = 0.31370665; w[4] = 0.22238103; w[5] = 0.22238103; w[6] = 0.10122854; w[7] = 0.10122854; break; // This is WRONG case 8: r[0] = -0.14887434; r[1] = 0.14887434; r[2] = -0.43339539; r[3] = 0.43339539; r[4] = -0.67940957; r[5] = 0.67940957; r[6] = -0.86506337; r[7] = 0.86506337; r[8] = -0.97390653; r[9] = 0.97390653; w[0] = 0.29552422; w[1] = 0.29552422; w[2] = 0.26926672; w[3] = 0.26926672; w[4] = 0.21908636; w[5] = 0.21908636; w[6] = 0.14945135; w[7] = 0.14945135; w[8] = 0.06667134; w[9] = 0.06667134; break; // This might be ok. case 9: r[0] = -0.14887434; r[1] = 0.14887434; r[2] = -0.43339539; r[3] = 0.43339539; r[4] = -0.67940957; r[5] = 0.67940957; r[6] = -0.86506337; r[7] = 0.86506337; r[8] = -0.97390653; r[9]= 0.97390653; w[0] = 0.29552422; w[1] = 0.29552422; w[2] = 0.26926672; w[3] = 0.26926672; w[4] = 0.21908636; w[5] = 0.21908636; w[6] = 0.14945135; w[7] = 0.14945135; w[8] = 0.06667134; w[9] = 0.06667134; break; // This is also WRONG case 10: r[0] = -0.14887434; r[1] = 0.14887434; r[2] = -0.43339539; r[3] = 0.43339539; r[4] = -0.67940957; r[5] = 0.67940957; r[6] = -0.86506337; r[7] = 0.86506337; r[8] = -0.97390653; r[9] = 0.97390653; w[0] = 0.29552422; w[1] = 0.29552422; w[2] = 0.26926672; w[3] = 0.26926672; w[4] = 0.21908636; w[5] = 0.21908636; w[6] = 0.14945135; w[7] = 0.14945135; w[8] = 0.06667134; w[9] = 0.06667134; break; } } void checkCudaError(const char *message) { cudaError_t error = cudaGetLastError(); if(error!=cudaSuccess) { fprintf(stderr,"ERROR: %s: %s\n", message, cudaGetErrorString(error) ); exit(-1); } } /* integrate in time * * take one time step; calls the kernel functions to compute in parallel. */ void timeIntegrate(float *u, float a, int K, float dt, float dx, double t, int Np) { int nThreads = 128; int nBlocksRHS = (K + 1) / nThreads + (((K + 1) % nThreads) ? 1 : 0); int nBlocksFlux = (K + 2) / nThreads + (((K + 2) % nThreads) ? 1 : 0); int nBlocksRK = ((Np + 1)*K) / nThreads + ((((Np + 1)*K) % nThreads) ? 1 : 0); checkCudaError("error before rk4"); // Stage 1 // f <- flux(u) calcFlux<<<nBlocksFlux, nThreads>>>(d_u, d_f, a, t, K, Np); cudaThreadSynchronize(); // k1 <- dt*rhs(u) rhs<<<nBlocksRHS, nThreads>>>(d_u, d_k1, d_f, d_w, d_r, a, dt, dx, K, Np); cudaThreadSynchronize(); // k* <- u + k1/2 rk4_tempstorage<<<nBlocksRK, nThreads>>>(d_u, d_kstar, d_k1, 0.5, dt, Np, K); cudaThreadSynchronize(); // Stage 2 // f <- flux(k*) calcFlux<<<nBlocksFlux, nThreads>>>(d_kstar, d_f, a, t, K, Np); cudaThreadSynchronize(); // k2 <- dt*rhs(k*) rhs<<<nBlocksRHS, nThreads>>>(d_kstar, d_k2, d_f, d_w, d_r, a, dt, dx, K, Np); cudaThreadSynchronize(); // k* <- u + k2/2 rk4_tempstorage<<<nBlocksRK, nThreads>>>(d_u, d_kstar, d_k2, 0.5, dt, Np, K); cudaThreadSynchronize(); // Stage 3 // f <- flux(k*) calcFlux<<<nBlocksFlux, nThreads>>>(d_kstar, d_f, a, t, K, Np); cudaThreadSynchronize(); // k3 <- dt*rhs(k*) rhs<<<nBlocksRHS, nThreads>>>(d_kstar, d_k3, d_f, d_w, d_r, a, dt, dx, K, Np); cudaThreadSynchronize(); // k* <- u + k3 rk4_tempstorage<<<nBlocksRK, nThreads>>>(d_u, d_kstar, d_k3, 1.0, dt, Np, K); cudaThreadSynchronize(); // Stage 4 // f <- flux(k*) calcFlux<<<nBlocksFlux, nThreads>>>(d_kstar, d_f, a, t, K, Np); cudaThreadSynchronize(); // k4 <- dt*rhs(k*) rhs<<<nBlocksRHS, nThreads>>>(d_kstar, d_k4, d_f, d_w, d_r, a, dt, dx, K, Np); cudaThreadSynchronize(); checkCudaError("error after rk4"); // u <- k1/6 + k2/3 + k3/3 + k4/6 rk4<<<nBlocksRK, nThreads>>>(d_u, d_k1, d_k2, d_k3, d_k4, Np, K); // copy back the data (this takes a while) cudaMemcpy(u, d_u, K * (Np + 1) * sizeof(float), cudaMemcpyDeviceToHost); } /* allocate memory on the GPU for the GPU data */ void initGPU(int K, int Np) { int size = K * (Np + 1); cudaDeviceReset(); checkCudaError("error after reset?"); // Main variables cudaMalloc((void **) &d_u , (K + 1) * (Np + 1) * sizeof(float)); cudaMalloc((void **) &d_f, (K + 2) * sizeof(float)); cudaMalloc((void **) &d_mesh, K * sizeof(float)); cudaMalloc((void **) &d_r, (Np + 1) * sizeof(float)); cudaMalloc((void **) &d_w, (Np + 1) * sizeof(float)); // Runge-Kutta storage cudaMalloc((void **) &d_kstar , size * sizeof(float)); cudaMalloc((void **) &d_k1 , size * sizeof(float)); cudaMalloc((void **) &d_k2 , size * sizeof(float)); cudaMalloc((void **) &d_k3 , size * sizeof(float)); cudaMalloc((void **) &d_k4 , size * sizeof(float)); checkCudaError("error in init"); } int main() { int i, j, size, t, timesteps; float *mesh; // the grid points float *u; // the computed result float *r; // the GLL points float *w; // Gaussian integration weights int Np = 6; // polynomial order of the approximation int K = 200; // the mesh size float a = -1.; // left boundary float b = 1.; // right boundary float dx = (b - a) / K; // size of cell float aspeed = 2.*3.14159; // the wave speed float CFL = 0.5 * (1. / (2.*Np + 1.)); float dt = CFL/aspeed * dx; // timestep timesteps = 1000; size = (Np + 1) * K; // size of u mesh = (float *) malloc((K + 1) * sizeof(float)); u = (float *) malloc(K * (Np + 1) * sizeof(float)); r = (float *) malloc((Np + 1) * sizeof(float)); w = (float *) malloc((Np + 1) * sizeof(float)); int nThreads = 128; int nBlocksMesh = K / nThreads + ((K % nThreads) ? 1 : 0); int nBlocksU = K / nThreads + ((K % nThreads) ? 1 : 0); // Allocate space on the GPU initGPU(K, Np); // Init the mesh's endpoints initMesh<<<nBlocksMesh, nThreads>>>(d_mesh, dx, a, K); cudaThreadSynchronize(); // Copy over r and w setIntegrationPoints(Np, w, r); cudaMemcpy(d_r, r, (Np + 1) * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_w, w, (Np + 1) * sizeof(float), cudaMemcpyHostToDevice); // Initialize u at time t0 initU<<<nBlocksU, nThreads>>>(d_u, d_mesh, d_w, d_r, dx, K, Np); cudaThreadSynchronize(); initUPeriodic<<<1, 1>>>(d_u, K, Np); cudaThreadSynchronize(); checkCudaError("error after initialization"); // File for output FILE *data; data = fopen("data.txt", "w"); cudaMemcpy(mesh, d_mesh, K * sizeof(float), cudaMemcpyDeviceToHost); cudaMemcpy(u, d_u, K * (Np + 1) * sizeof(float), cudaMemcpyDeviceToHost); // Output the mesh into the file fprintf(data, "%i\n", Np); for (i = 0; i < K; i++) { fprintf(data, "%f ", mesh[i]); } fprintf(data, "\n"); // Make sure the ghost state's flux isn't filled with garbage initFlux<<<1,1>>>(d_u, d_f, K, Np); // Run the integrator for (t = 0; t < timesteps; t++) { // Output the data into the file (this takes a while) for (i = 0; i < K; i++) { for (j = 0; j < Np+1; j++) { fprintf(data," %f ", u[j*(K + 1) + i]); } } fprintf(data, "\n"); timeIntegrate(u, aspeed, K, dt, dx, dt*t, Np); } fclose(data); // Free host data free(mesh); free(u); free(r); free(w); // Free GPU data cudaFree(d_u); cudaFree(d_f); cudaFree(d_mesh); cudaFree(d_r); cudaFree(d_kstar); cudaFree(d_k1); cudaFree(d_k2); cudaFree(d_k3); cudaFree(d_k4); }
408587e611909be61948d2362ccca2a2ec4a449a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // ------------------------------------------------------------------ // Fast R-CNN // Copyright (c) 2015 Microsoft // Licensed under The MIT License [see fast-rcnn/LICENSE for details] // Written by Ross Girshick // ------------------------------------------------------------------ #include <hipcub/hipcub.hpp> #include <iomanip> #include "caffe/FRCNN/frcnn_proposal_layer.hpp" #include "caffe/FRCNN/util/frcnn_utils.hpp" #include "caffe/FRCNN/util/frcnn_helper.hpp" #include "caffe/FRCNN/util/frcnn_param.hpp" #include "caffe/FRCNN/util/frcnn_gpu_nms.hpp" namespace caffe { namespace Frcnn { using std::vector; __global__ void GetIndex(const int n,int *indices){ CUDA_KERNEL_LOOP(index , n){ indices[index] = index; } } template <typename Dtype> __global__ void BBoxTransformInv(const int nthreads, const Dtype* const bottom_rpn_bbox, const int height, const int width, const int feat_stride, const int im_height, const int im_width, const int* sorted_indices, const float* anchors, float* const transform_bbox) { CUDA_KERNEL_LOOP(index , nthreads) { const int score_idx = sorted_indices[index]; const int i = score_idx % width; // width const int j = (score_idx % (width * height)) / width; // height const int k = score_idx / (width * height); // channel float *box = transform_bbox + index * 4; box[0] = anchors[k * 4 + 0] + i * feat_stride; box[1] = anchors[k * 4 + 1] + j * feat_stride; box[2] = anchors[k * 4 + 2] + i * feat_stride; box[3] = anchors[k * 4 + 3] + j * feat_stride; const Dtype det[4] = { bottom_rpn_bbox[(k * 4 + 0) * height * width + j * width + i], bottom_rpn_bbox[(k * 4 + 1) * height * width + j * width + i], bottom_rpn_bbox[(k * 4 + 2) * height * width + j * width + i], bottom_rpn_bbox[(k * 4 + 3) * height * width + j * width + i] }; float src_w = box[2] - box[0] + 1; float src_h = box[3] - box[1] + 1; float src_ctr_x = box[0] + 0.5 * src_w; float src_ctr_y = box[1] + 0.5 * src_h; float pred_ctr_x = det[0] * src_w + src_ctr_x; float pred_ctr_y = det[1] * src_h + src_ctr_y; float pred_w = exp(det[2]) * src_w; float pred_h = exp(det[3]) * src_h; box[0] = pred_ctr_x - 0.5 * pred_w; box[1] = pred_ctr_y - 0.5 * pred_h; box[2] = pred_ctr_x + 0.5 * pred_w; box[3] = pred_ctr_y + 0.5 * pred_h; box[0] = max(0.0f, min(box[0], im_width - 1.0)); box[1] = max(0.0f, min(box[1], im_height - 1.0)); box[2] = max(0.0f, min(box[2], im_width - 1.0)); box[3] = max(0.0f, min(box[3], im_height - 1.0)); } } __global__ void SelectBox(const int nthreads, const float *box, float min_size, int *flags) { CUDA_KERNEL_LOOP(index , nthreads) { if ((box[index * 4 + 2] - box[index * 4 + 0] < min_size) || (box[index * 4 + 3] - box[index * 4 + 1] < min_size)) { flags[index] = 0; } else { flags[index] = 1; } } } template <typename Dtype> __global__ void SelectBoxByIndices(const int nthreads, const float *in_box, int *selected_indices, float *out_box, const Dtype *in_score, Dtype *out_score) { CUDA_KERNEL_LOOP(index , nthreads) { if ((index == 0 && selected_indices[index] == 1) || (index > 0 && selected_indices[index] == selected_indices[index - 1] + 1)) { out_box[(selected_indices[index] - 1) * 4 + 0] = in_box[index * 4 + 0]; out_box[(selected_indices[index] - 1) * 4 + 1] = in_box[index * 4 + 1]; out_box[(selected_indices[index] - 1) * 4 + 2] = in_box[index * 4 + 2]; out_box[(selected_indices[index] - 1) * 4 + 3] = in_box[index * 4 + 3]; if (in_score!=NULL && out_score!=NULL) { out_score[selected_indices[index] - 1] = in_score[index]; } } } } template <typename Dtype> __global__ void SelectBoxAftNMS(const int nthreads, const float *in_box, int *keep_indices, Dtype *top_data, const Dtype *in_score, Dtype* top_score) { CUDA_KERNEL_LOOP(index , nthreads) { top_data[index * 5] = 0; int keep_idx = keep_indices[index]; for (int j = 1; j < 5; ++j) { top_data[index * 5 + j] = in_box[keep_idx * 4 + j - 1]; } if (top_score != NULL && in_score != NULL) { top_score[index] = in_score[keep_idx]; } } } template <typename Dtype> void FrcnnProposalLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype> *> &bottom, const vector<Blob<Dtype> *> &top) { // if(this->phase_ == TEST) { // this->use_gpu_nms_in_forward_cpu = true; // set flag to be used in forward_cpu Forward_cpu(bottom, top); // }else{ { #if 0 DLOG(ERROR) << "========== enter proposal layer"; const Dtype *bottom_rpn_score = bottom[0]->gpu_data(); const Dtype *bottom_rpn_bbox = bottom[1]->gpu_data(); // bottom data comes from host memory Dtype bottom_im_info[3]; CHECK_EQ(bottom[2]->count(), 3); CUDA_CHECK(hipMemcpy(bottom_im_info, bottom[2]->gpu_data(), sizeof(Dtype) * 3, hipMemcpyDeviceToHost)); const int num = bottom[1]->num(); const int channes = bottom[1]->channels(); const int height = bottom[1]->height(); const int width = bottom[1]->width(); CHECK(num == 1) << "only single item batches are supported"; CHECK(channes % 4 == 0) << "rpn bbox pred channels should be divided by 4"; const float im_height = bottom_im_info[0]; const float im_width = bottom_im_info[1]; int rpn_pre_nms_top_n; int rpn_post_nms_top_n; float rpn_nms_thresh; int rpn_min_size; if (this->phase_ == TRAIN) { rpn_pre_nms_top_n = FrcnnParam::rpn_pre_nms_top_n; rpn_post_nms_top_n = FrcnnParam::rpn_post_nms_top_n; rpn_nms_thresh = FrcnnParam::rpn_nms_thresh; rpn_min_size = FrcnnParam::rpn_min_size; } else { rpn_pre_nms_top_n = FrcnnParam::test_rpn_pre_nms_top_n; rpn_post_nms_top_n = FrcnnParam::test_rpn_post_nms_top_n; rpn_nms_thresh = FrcnnParam::test_rpn_nms_thresh; rpn_min_size = FrcnnParam::test_rpn_min_size; } LOG_IF(ERROR, rpn_pre_nms_top_n <= 0 ) << "rpn_pre_nms_top_n : " << rpn_pre_nms_top_n; LOG_IF(ERROR, rpn_post_nms_top_n <= 0 ) << "rpn_post_nms_top_n : " << rpn_post_nms_top_n; if (rpn_pre_nms_top_n <= 0 || rpn_post_nms_top_n <= 0 ) return; const int config_n_anchors = FrcnnParam::anchors.size() / 4; const int total_anchor_num = config_n_anchors * height * width; //Step 1. -------------------------------Sort the rpn result---------------------- // the first half of rpn_score is the bg score // Note that the sorting operator will change the order fg_scores (bottom_rpn_score) Dtype *fg_scores = (Dtype*)(&bottom_rpn_score[total_anchor_num]); Dtype *sorted_scores = NULL; CUDA_CHECK(hipMalloc((void**)&sorted_scores, sizeof(Dtype) * total_anchor_num)); cub::DoubleBuffer<Dtype> d_keys(fg_scores, sorted_scores); int *indices = NULL; CUDA_CHECK(hipMalloc((void**)&indices, sizeof(int) * total_anchor_num)); hipLaunchKernelGGL(( GetIndex), dim3(caffe::CAFFE_GET_BLOCKS(total_anchor_num)), dim3(caffe::CAFFE_CUDA_NUM_THREADS), 0, 0, total_anchor_num, indices); hipDeviceSynchronize(); int *sorted_indices = NULL; CUDA_CHECK(hipMalloc((void**)&sorted_indices, sizeof(int) * total_anchor_num)); cub::DoubleBuffer<int> d_values(indices, sorted_indices); void *sort_temp_storage_ = NULL; size_t sort_temp_storage_bytes_ = 0; // calculate the temp_storage_bytes hipcub::DeviceRadixSort::SortPairsDescending(sort_temp_storage_, sort_temp_storage_bytes_, d_keys, d_values, total_anchor_num); DLOG(ERROR) << "sort_temp_storage_bytes_ : " << sort_temp_storage_bytes_; CUDA_CHECK(hipMalloc(&sort_temp_storage_, sort_temp_storage_bytes_)); // sorting hipcub::DeviceRadixSort::SortPairsDescending(sort_temp_storage_, sort_temp_storage_bytes_, d_keys, d_values, total_anchor_num); hipDeviceSynchronize(); //Step 2. ---------------------------bbox transform---------------------------- const int retained_anchor_num = ::min(total_anchor_num, rpn_pre_nms_top_n); // float *transform_bbox = NULL; // CUDA_CHECK(hipMalloc(&transform_bbox, sizeof(float) * retained_anchor_num * 4)); hipLaunchKernelGGL(( BBoxTransformInv<Dtype>), dim3(caffe::CAFFE_GET_BLOCKS(retained_anchor_num)), dim3(caffe::CAFFE_CUDA_NUM_THREADS), 0, 0, retained_anchor_num, bottom_rpn_bbox, height, width, FrcnnParam::feat_stride, im_height, im_width, sorted_indices, anchors_, transform_bbox_); hipDeviceSynchronize(); //Step 3. -------------------------filter out small box----------------------- // select the box larger than min size // int *selected_flags = NULL; // CUDA_CHECK(hipMalloc(&selected_flags, sizeof(int) * retained_anchor_num)); hipLaunchKernelGGL(( SelectBox), dim3(caffe::CAFFE_GET_BLOCKS(retained_anchor_num)), dim3(caffe::CAFFE_CUDA_NUM_THREADS), 0, 0, retained_anchor_num, transform_bbox_, bottom_im_info[2] * rpn_min_size, selected_flags_); hipDeviceSynchronize(); // cumulative sum up the flags to get the copy index int *selected_indices_ = NULL; CUDA_CHECK(hipMalloc((void**)&selected_indices_, sizeof(int) * retained_anchor_num)); void *cumsum_temp_storage_ = NULL; size_t cumsum_temp_storage_bytes_ = 0; hipcub::DeviceScan::InclusiveSum(cumsum_temp_storage_, cumsum_temp_storage_bytes_, selected_flags_, selected_indices_, retained_anchor_num); DLOG(ERROR) << "cumsum_temp_storage_bytes : " << cumsum_temp_storage_bytes_; CUDA_CHECK(hipMalloc(&cumsum_temp_storage_, cumsum_temp_storage_bytes_)); hipcub::DeviceScan::InclusiveSum(sort_temp_storage_, cumsum_temp_storage_bytes_, selected_flags_, selected_indices_, retained_anchor_num); // CUDA_CHECK(hipFree(cumsum_temp_storage)); int selected_num = -1; hipMemcpy(&selected_num, &selected_indices_[retained_anchor_num - 1], sizeof(int), hipMemcpyDeviceToHost); CHECK_GT(selected_num, 0); Dtype *bbox_score_ = NULL; if (top.size() == 2) CUDA_CHECK(hipMalloc(&bbox_score_, sizeof(Dtype) * retained_anchor_num)); hipLaunchKernelGGL(( SelectBoxByIndices), dim3(caffe::CAFFE_GET_BLOCKS(selected_num)), dim3(caffe::CAFFE_CUDA_NUM_THREADS), 0, 0, selected_num, transform_bbox_, selected_indices_, transform_bbox_, sorted_scores, bbox_score_); hipDeviceSynchronize(); //Step 4. -----------------------------apply nms------------------------------- DLOG(ERROR) << "========== apply nms with rpn_nms_thresh : " << rpn_nms_thresh; vector<int> keep_indices(selected_num); int keep_num = -1; gpu_nms(&keep_indices[0], &keep_num, transform_bbox_, selected_num, 4, rpn_nms_thresh); DLOG(ERROR) << "rpn num after gpu nms: " << keep_num; keep_num = ::min(keep_num, rpn_post_nms_top_n); DLOG(ERROR) << "========== copy to top"; hipMemcpy(gpu_keep_indices_, &keep_indices[0], sizeof(int) * keep_num, hipMemcpyHostToDevice); top[0]->Reshape(keep_num, 5, 1, 1); Dtype *top_data = top[0]->mutable_gpu_data(); Dtype *top_score = NULL; if (top.size() == 2) { top[1]->Reshape(keep_num, 1, 1, 1); top_score = top[1]->mutable_gpu_data(); } hipLaunchKernelGGL(( SelectBoxAftNMS), dim3(caffe::CAFFE_GET_BLOCKS(keep_num)), dim3(caffe::CAFFE_CUDA_NUM_THREADS), 0, 0, keep_num, transform_bbox_, gpu_keep_indices_, top_data, bbox_score_, top_score); DLOG(ERROR) << "========== exit proposal layer"; //////////////////////////////////// // do not forget to free the malloc memory CUDA_CHECK(hipFree(sorted_scores)); CUDA_CHECK(hipFree(indices)); CUDA_CHECK(hipFree(sorted_indices)); CUDA_CHECK(hipFree(sort_temp_storage_)); CUDA_CHECK(hipFree(cumsum_temp_storage_)); CUDA_CHECK(hipFree(selected_indices_)); if (bbox_score_!=NULL) CUDA_CHECK(hipFree(bbox_score_)); #endif } } template <typename Dtype> void FrcnnProposalLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype> *> &top, const vector<bool> &propagate_down, const vector<Blob<Dtype> *> &bottom) { for (int i = 0; i < propagate_down.size(); ++i) { if (propagate_down[i]) { NOT_IMPLEMENTED; } } } INSTANTIATE_LAYER_GPU_FUNCS(FrcnnProposalLayer); } // namespace frcnn } // namespace caffe
408587e611909be61948d2362ccca2a2ec4a449a.cu
// ------------------------------------------------------------------ // Fast R-CNN // Copyright (c) 2015 Microsoft // Licensed under The MIT License [see fast-rcnn/LICENSE for details] // Written by Ross Girshick // ------------------------------------------------------------------ #include <cub/cub.cuh> #include <iomanip> #include "caffe/FRCNN/frcnn_proposal_layer.hpp" #include "caffe/FRCNN/util/frcnn_utils.hpp" #include "caffe/FRCNN/util/frcnn_helper.hpp" #include "caffe/FRCNN/util/frcnn_param.hpp" #include "caffe/FRCNN/util/frcnn_gpu_nms.hpp" namespace caffe { namespace Frcnn { using std::vector; __global__ void GetIndex(const int n,int *indices){ CUDA_KERNEL_LOOP(index , n){ indices[index] = index; } } template <typename Dtype> __global__ void BBoxTransformInv(const int nthreads, const Dtype* const bottom_rpn_bbox, const int height, const int width, const int feat_stride, const int im_height, const int im_width, const int* sorted_indices, const float* anchors, float* const transform_bbox) { CUDA_KERNEL_LOOP(index , nthreads) { const int score_idx = sorted_indices[index]; const int i = score_idx % width; // width const int j = (score_idx % (width * height)) / width; // height const int k = score_idx / (width * height); // channel float *box = transform_bbox + index * 4; box[0] = anchors[k * 4 + 0] + i * feat_stride; box[1] = anchors[k * 4 + 1] + j * feat_stride; box[2] = anchors[k * 4 + 2] + i * feat_stride; box[3] = anchors[k * 4 + 3] + j * feat_stride; const Dtype det[4] = { bottom_rpn_bbox[(k * 4 + 0) * height * width + j * width + i], bottom_rpn_bbox[(k * 4 + 1) * height * width + j * width + i], bottom_rpn_bbox[(k * 4 + 2) * height * width + j * width + i], bottom_rpn_bbox[(k * 4 + 3) * height * width + j * width + i] }; float src_w = box[2] - box[0] + 1; float src_h = box[3] - box[1] + 1; float src_ctr_x = box[0] + 0.5 * src_w; float src_ctr_y = box[1] + 0.5 * src_h; float pred_ctr_x = det[0] * src_w + src_ctr_x; float pred_ctr_y = det[1] * src_h + src_ctr_y; float pred_w = exp(det[2]) * src_w; float pred_h = exp(det[3]) * src_h; box[0] = pred_ctr_x - 0.5 * pred_w; box[1] = pred_ctr_y - 0.5 * pred_h; box[2] = pred_ctr_x + 0.5 * pred_w; box[3] = pred_ctr_y + 0.5 * pred_h; box[0] = max(0.0f, min(box[0], im_width - 1.0)); box[1] = max(0.0f, min(box[1], im_height - 1.0)); box[2] = max(0.0f, min(box[2], im_width - 1.0)); box[3] = max(0.0f, min(box[3], im_height - 1.0)); } } __global__ void SelectBox(const int nthreads, const float *box, float min_size, int *flags) { CUDA_KERNEL_LOOP(index , nthreads) { if ((box[index * 4 + 2] - box[index * 4 + 0] < min_size) || (box[index * 4 + 3] - box[index * 4 + 1] < min_size)) { flags[index] = 0; } else { flags[index] = 1; } } } template <typename Dtype> __global__ void SelectBoxByIndices(const int nthreads, const float *in_box, int *selected_indices, float *out_box, const Dtype *in_score, Dtype *out_score) { CUDA_KERNEL_LOOP(index , nthreads) { if ((index == 0 && selected_indices[index] == 1) || (index > 0 && selected_indices[index] == selected_indices[index - 1] + 1)) { out_box[(selected_indices[index] - 1) * 4 + 0] = in_box[index * 4 + 0]; out_box[(selected_indices[index] - 1) * 4 + 1] = in_box[index * 4 + 1]; out_box[(selected_indices[index] - 1) * 4 + 2] = in_box[index * 4 + 2]; out_box[(selected_indices[index] - 1) * 4 + 3] = in_box[index * 4 + 3]; if (in_score!=NULL && out_score!=NULL) { out_score[selected_indices[index] - 1] = in_score[index]; } } } } template <typename Dtype> __global__ void SelectBoxAftNMS(const int nthreads, const float *in_box, int *keep_indices, Dtype *top_data, const Dtype *in_score, Dtype* top_score) { CUDA_KERNEL_LOOP(index , nthreads) { top_data[index * 5] = 0; int keep_idx = keep_indices[index]; for (int j = 1; j < 5; ++j) { top_data[index * 5 + j] = in_box[keep_idx * 4 + j - 1]; } if (top_score != NULL && in_score != NULL) { top_score[index] = in_score[keep_idx]; } } } template <typename Dtype> void FrcnnProposalLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype> *> &bottom, const vector<Blob<Dtype> *> &top) { // if(this->phase_ == TEST) { // this->use_gpu_nms_in_forward_cpu = true; // set flag to be used in forward_cpu Forward_cpu(bottom, top); // }else{ { #if 0 DLOG(ERROR) << "========== enter proposal layer"; const Dtype *bottom_rpn_score = bottom[0]->gpu_data(); const Dtype *bottom_rpn_bbox = bottom[1]->gpu_data(); // bottom data comes from host memory Dtype bottom_im_info[3]; CHECK_EQ(bottom[2]->count(), 3); CUDA_CHECK(cudaMemcpy(bottom_im_info, bottom[2]->gpu_data(), sizeof(Dtype) * 3, cudaMemcpyDeviceToHost)); const int num = bottom[1]->num(); const int channes = bottom[1]->channels(); const int height = bottom[1]->height(); const int width = bottom[1]->width(); CHECK(num == 1) << "only single item batches are supported"; CHECK(channes % 4 == 0) << "rpn bbox pred channels should be divided by 4"; const float im_height = bottom_im_info[0]; const float im_width = bottom_im_info[1]; int rpn_pre_nms_top_n; int rpn_post_nms_top_n; float rpn_nms_thresh; int rpn_min_size; if (this->phase_ == TRAIN) { rpn_pre_nms_top_n = FrcnnParam::rpn_pre_nms_top_n; rpn_post_nms_top_n = FrcnnParam::rpn_post_nms_top_n; rpn_nms_thresh = FrcnnParam::rpn_nms_thresh; rpn_min_size = FrcnnParam::rpn_min_size; } else { rpn_pre_nms_top_n = FrcnnParam::test_rpn_pre_nms_top_n; rpn_post_nms_top_n = FrcnnParam::test_rpn_post_nms_top_n; rpn_nms_thresh = FrcnnParam::test_rpn_nms_thresh; rpn_min_size = FrcnnParam::test_rpn_min_size; } LOG_IF(ERROR, rpn_pre_nms_top_n <= 0 ) << "rpn_pre_nms_top_n : " << rpn_pre_nms_top_n; LOG_IF(ERROR, rpn_post_nms_top_n <= 0 ) << "rpn_post_nms_top_n : " << rpn_post_nms_top_n; if (rpn_pre_nms_top_n <= 0 || rpn_post_nms_top_n <= 0 ) return; const int config_n_anchors = FrcnnParam::anchors.size() / 4; const int total_anchor_num = config_n_anchors * height * width; //Step 1. -------------------------------Sort the rpn result---------------------- // the first half of rpn_score is the bg score // Note that the sorting operator will change the order fg_scores (bottom_rpn_score) Dtype *fg_scores = (Dtype*)(&bottom_rpn_score[total_anchor_num]); Dtype *sorted_scores = NULL; CUDA_CHECK(cudaMalloc((void**)&sorted_scores, sizeof(Dtype) * total_anchor_num)); cub::DoubleBuffer<Dtype> d_keys(fg_scores, sorted_scores); int *indices = NULL; CUDA_CHECK(cudaMalloc((void**)&indices, sizeof(int) * total_anchor_num)); GetIndex<<<caffe::CAFFE_GET_BLOCKS(total_anchor_num), caffe::CAFFE_CUDA_NUM_THREADS>>>( total_anchor_num, indices); cudaDeviceSynchronize(); int *sorted_indices = NULL; CUDA_CHECK(cudaMalloc((void**)&sorted_indices, sizeof(int) * total_anchor_num)); cub::DoubleBuffer<int> d_values(indices, sorted_indices); void *sort_temp_storage_ = NULL; size_t sort_temp_storage_bytes_ = 0; // calculate the temp_storage_bytes cub::DeviceRadixSort::SortPairsDescending(sort_temp_storage_, sort_temp_storage_bytes_, d_keys, d_values, total_anchor_num); DLOG(ERROR) << "sort_temp_storage_bytes_ : " << sort_temp_storage_bytes_; CUDA_CHECK(cudaMalloc(&sort_temp_storage_, sort_temp_storage_bytes_)); // sorting cub::DeviceRadixSort::SortPairsDescending(sort_temp_storage_, sort_temp_storage_bytes_, d_keys, d_values, total_anchor_num); cudaDeviceSynchronize(); //Step 2. ---------------------------bbox transform---------------------------- const int retained_anchor_num = std::min(total_anchor_num, rpn_pre_nms_top_n); // float *transform_bbox = NULL; // CUDA_CHECK(cudaMalloc(&transform_bbox, sizeof(float) * retained_anchor_num * 4)); BBoxTransformInv<Dtype><<<caffe::CAFFE_GET_BLOCKS(retained_anchor_num), caffe::CAFFE_CUDA_NUM_THREADS>>>( retained_anchor_num, bottom_rpn_bbox, height, width, FrcnnParam::feat_stride, im_height, im_width, sorted_indices, anchors_, transform_bbox_); cudaDeviceSynchronize(); //Step 3. -------------------------filter out small box----------------------- // select the box larger than min size // int *selected_flags = NULL; // CUDA_CHECK(cudaMalloc(&selected_flags, sizeof(int) * retained_anchor_num)); SelectBox<<<caffe::CAFFE_GET_BLOCKS(retained_anchor_num), caffe::CAFFE_CUDA_NUM_THREADS>>>( retained_anchor_num, transform_bbox_, bottom_im_info[2] * rpn_min_size, selected_flags_); cudaDeviceSynchronize(); // cumulative sum up the flags to get the copy index int *selected_indices_ = NULL; CUDA_CHECK(cudaMalloc((void**)&selected_indices_, sizeof(int) * retained_anchor_num)); void *cumsum_temp_storage_ = NULL; size_t cumsum_temp_storage_bytes_ = 0; cub::DeviceScan::InclusiveSum(cumsum_temp_storage_, cumsum_temp_storage_bytes_, selected_flags_, selected_indices_, retained_anchor_num); DLOG(ERROR) << "cumsum_temp_storage_bytes : " << cumsum_temp_storage_bytes_; CUDA_CHECK(cudaMalloc(&cumsum_temp_storage_, cumsum_temp_storage_bytes_)); cub::DeviceScan::InclusiveSum(sort_temp_storage_, cumsum_temp_storage_bytes_, selected_flags_, selected_indices_, retained_anchor_num); // CUDA_CHECK(cudaFree(cumsum_temp_storage)); int selected_num = -1; cudaMemcpy(&selected_num, &selected_indices_[retained_anchor_num - 1], sizeof(int), cudaMemcpyDeviceToHost); CHECK_GT(selected_num, 0); Dtype *bbox_score_ = NULL; if (top.size() == 2) CUDA_CHECK(cudaMalloc(&bbox_score_, sizeof(Dtype) * retained_anchor_num)); SelectBoxByIndices<<<caffe::CAFFE_GET_BLOCKS(selected_num), caffe::CAFFE_CUDA_NUM_THREADS>>>( selected_num, transform_bbox_, selected_indices_, transform_bbox_, sorted_scores, bbox_score_); cudaDeviceSynchronize(); //Step 4. -----------------------------apply nms------------------------------- DLOG(ERROR) << "========== apply nms with rpn_nms_thresh : " << rpn_nms_thresh; vector<int> keep_indices(selected_num); int keep_num = -1; gpu_nms(&keep_indices[0], &keep_num, transform_bbox_, selected_num, 4, rpn_nms_thresh); DLOG(ERROR) << "rpn num after gpu nms: " << keep_num; keep_num = std::min(keep_num, rpn_post_nms_top_n); DLOG(ERROR) << "========== copy to top"; cudaMemcpy(gpu_keep_indices_, &keep_indices[0], sizeof(int) * keep_num, cudaMemcpyHostToDevice); top[0]->Reshape(keep_num, 5, 1, 1); Dtype *top_data = top[0]->mutable_gpu_data(); Dtype *top_score = NULL; if (top.size() == 2) { top[1]->Reshape(keep_num, 1, 1, 1); top_score = top[1]->mutable_gpu_data(); } SelectBoxAftNMS<<<caffe::CAFFE_GET_BLOCKS(keep_num), caffe::CAFFE_CUDA_NUM_THREADS>>>( keep_num, transform_bbox_, gpu_keep_indices_, top_data, bbox_score_, top_score); DLOG(ERROR) << "========== exit proposal layer"; //////////////////////////////////// // do not forget to free the malloc memory CUDA_CHECK(cudaFree(sorted_scores)); CUDA_CHECK(cudaFree(indices)); CUDA_CHECK(cudaFree(sorted_indices)); CUDA_CHECK(cudaFree(sort_temp_storage_)); CUDA_CHECK(cudaFree(cumsum_temp_storage_)); CUDA_CHECK(cudaFree(selected_indices_)); if (bbox_score_!=NULL) CUDA_CHECK(cudaFree(bbox_score_)); #endif } } template <typename Dtype> void FrcnnProposalLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype> *> &top, const vector<bool> &propagate_down, const vector<Blob<Dtype> *> &bottom) { for (int i = 0; i < propagate_down.size(); ++i) { if (propagate_down[i]) { NOT_IMPLEMENTED; } } } INSTANTIATE_LAYER_GPU_FUNCS(FrcnnProposalLayer); } // namespace frcnn } // namespace caffe
c334de0ad4007ef6183eb67bfb0797bf54731bcf.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #define N (2048 * 2048) #define THREADS_PER_BLOCK 512 #define RADIUS 3 __global__ void add(int *in,int *out,int size) { __shared__ int temp[THREADS_PER_BLOCK + (2*RADIUS)]; int globalIdx = blockIdx.x * blockDim.x + threadIdx.x; int localIdx = threadIdx.x + RADIUS; int localSum = 0 ; temp[localIdx] = in[globalIdx]; if(threadIdx.x < RADIUS) { if((globalIdx - RADIUS) >= 1) { temp[localIdx - RADIUS] = in[globalIdx - RADIUS]; }else { temp[localIdx - RADIUS] = 0; } if((globalIdx + THREADS_PER_BLOCK) < size) { temp[localIdx + THREADS_PER_BLOCK] = in[globalIdx + THREADS_PER_BLOCK]; }else { temp[localIdx + THREADS_PER_BLOCK] = 0; } } __syncthreads(); for(int i = -RADIUS; i <= RADIUS; i++) { localSum = localSum + temp[threadIdx.x + RADIUS + i]; } out[globalIdx] = localSum; __syncthreads(); } int main(void) { int *a,*b; int *d_a,*d_b; int size = N * sizeof(int); hipMalloc((void **)&d_a,size); hipMalloc((void **)&d_b,size); a = (int *)malloc(size); b = (int *)malloc(size); for(int i = 0; i < N;i++) { a[i] = 1; } hipMemcpy(d_a,a,size,hipMemcpyHostToDevice); hipLaunchKernelGGL(( add), dim3((N + THREADS_PER_BLOCK-1)/THREADS_PER_BLOCK),dim3(THREADS_PER_BLOCK), 0, 0, d_a,d_b,N); hipMemcpy(b,d_b,size,hipMemcpyDeviceToHost); printf("Hello world %d\n",b[120]); free(a); free(b); hipFree(d_a); hipFree(d_b); return 0; }
c334de0ad4007ef6183eb67bfb0797bf54731bcf.cu
#include <stdio.h> #define N (2048 * 2048) #define THREADS_PER_BLOCK 512 #define RADIUS 3 __global__ void add(int *in,int *out,int size) { __shared__ int temp[THREADS_PER_BLOCK + (2*RADIUS)]; int globalIdx = blockIdx.x * blockDim.x + threadIdx.x; int localIdx = threadIdx.x + RADIUS; int localSum = 0 ; temp[localIdx] = in[globalIdx]; if(threadIdx.x < RADIUS) { if((globalIdx - RADIUS) >= 1) { temp[localIdx - RADIUS] = in[globalIdx - RADIUS]; }else { temp[localIdx - RADIUS] = 0; } if((globalIdx + THREADS_PER_BLOCK) < size) { temp[localIdx + THREADS_PER_BLOCK] = in[globalIdx + THREADS_PER_BLOCK]; }else { temp[localIdx + THREADS_PER_BLOCK] = 0; } } __syncthreads(); for(int i = -RADIUS; i <= RADIUS; i++) { localSum = localSum + temp[threadIdx.x + RADIUS + i]; } out[globalIdx] = localSum; __syncthreads(); } int main(void) { int *a,*b; int *d_a,*d_b; int size = N * sizeof(int); cudaMalloc((void **)&d_a,size); cudaMalloc((void **)&d_b,size); a = (int *)malloc(size); b = (int *)malloc(size); for(int i = 0; i < N;i++) { a[i] = 1; } cudaMemcpy(d_a,a,size,cudaMemcpyHostToDevice); add<<<(N + THREADS_PER_BLOCK-1)/THREADS_PER_BLOCK,THREADS_PER_BLOCK>>>(d_a,d_b,N); cudaMemcpy(b,d_b,size,cudaMemcpyDeviceToHost); printf("Hello world %d\n",b[120]); free(a); free(b); cudaFree(d_a); cudaFree(d_b); return 0; }
3092e20d2a81a33c767deb389494969da123ea80.hip
// !!! This is a file automatically generated by hipify!!! // MP Reduction // Given a list (lst) of length n // Output its sum = lst[0] + lst[1] + ... + lst[n-1]; #include <wb.h> #define BLOCK_SIZE 512 //@@ You can change this #define wbCheck(stmt) do { \ hipError_t err = stmt; \ if (err != hipSuccess) { \ wbLog(ERROR, "Failed to run stmt ", #stmt); \ wbLog(ERROR, "Got CUDA error ... ", hipGetErrorString(err)); \ return -1; \ } \ } while(0) void total(float * input, float * output, int len) { //@@ Load a segment of the input vector into shared memory //@@ Traverse the reduction tree //@@ Write the computed sum of the block to the output vector at the //@@ correct index } int main(int argc, char ** argv) { int ii; wbArg_t args; float * hostInput; // The input 1D list float * hostOutput; // The output list float * deviceInput; float * deviceOutput; int numInputElements; // number of elements in the input list int numOutputElements; // number of elements in the output list args = wbArg_read(argc, argv); wbTime_start(Generic, "Importing data and creating memory on host"); hostInput = (float *) wbImport(wbArg_getInputFile(args, 0), &numInputElements); numOutputElements = numInputElements / (BLOCK_SIZE<<1); if (numInputElements % (BLOCK_SIZE<<1)) { numOutputElements++; } hostOutput = (float*) malloc(numOutputElements * sizeof(float)); wbTime_stop(Generic, "Importing data and creating memory on host"); wbLog(TRACE, "The number of input elements in the input is ", numInputElements); wbLog(TRACE, "The number of output elements in the input is ", numOutputElements); wbTime_start(GPU, "Allocating GPU memory."); //@@ Allocate GPU memory here wbTime_stop(GPU, "Allocating GPU memory."); wbTime_start(GPU, "Copying input memory to the GPU."); //@@ Copy memory to the GPU here wbTime_stop(GPU, "Copying input memory to the GPU."); //@@ Initialize the grid and block dimensions here wbTime_start(Compute, "Performing CUDA computation"); //@@ Launch the GPU Kernel here hipDeviceSynchronize(); wbTime_stop(Compute, "Performing CUDA computation"); wbTime_start(Copy, "Copying output memory to the CPU"); //@@ Copy the GPU memory back to the CPU here wbTime_stop(Copy, "Copying output memory to the CPU"); /******************************************************************** * Reduce output vector on the host * NOTE: One could also perform the reduction of the output vector * recursively and support any size input. For simplicity, we do not * require that for this lab. ********************************************************************/ for (ii = 1; ii < numOutputElements; ii++) { hostOutput[0] += hostOutput[ii]; } wbTime_start(GPU, "Freeing GPU Memory"); //@@ Free the GPU memory here wbTime_stop(GPU, "Freeing GPU Memory"); wbSolution(args, hostOutput, 1); free(hostInput); free(hostOutput); return 0; }
3092e20d2a81a33c767deb389494969da123ea80.cu
// MP Reduction // Given a list (lst) of length n // Output its sum = lst[0] + lst[1] + ... + lst[n-1]; #include <wb.h> #define BLOCK_SIZE 512 //@@ You can change this #define wbCheck(stmt) do { \ cudaError_t err = stmt; \ if (err != cudaSuccess) { \ wbLog(ERROR, "Failed to run stmt ", #stmt); \ wbLog(ERROR, "Got CUDA error ... ", cudaGetErrorString(err)); \ return -1; \ } \ } while(0) void total(float * input, float * output, int len) { //@@ Load a segment of the input vector into shared memory //@@ Traverse the reduction tree //@@ Write the computed sum of the block to the output vector at the //@@ correct index } int main(int argc, char ** argv) { int ii; wbArg_t args; float * hostInput; // The input 1D list float * hostOutput; // The output list float * deviceInput; float * deviceOutput; int numInputElements; // number of elements in the input list int numOutputElements; // number of elements in the output list args = wbArg_read(argc, argv); wbTime_start(Generic, "Importing data and creating memory on host"); hostInput = (float *) wbImport(wbArg_getInputFile(args, 0), &numInputElements); numOutputElements = numInputElements / (BLOCK_SIZE<<1); if (numInputElements % (BLOCK_SIZE<<1)) { numOutputElements++; } hostOutput = (float*) malloc(numOutputElements * sizeof(float)); wbTime_stop(Generic, "Importing data and creating memory on host"); wbLog(TRACE, "The number of input elements in the input is ", numInputElements); wbLog(TRACE, "The number of output elements in the input is ", numOutputElements); wbTime_start(GPU, "Allocating GPU memory."); //@@ Allocate GPU memory here wbTime_stop(GPU, "Allocating GPU memory."); wbTime_start(GPU, "Copying input memory to the GPU."); //@@ Copy memory to the GPU here wbTime_stop(GPU, "Copying input memory to the GPU."); //@@ Initialize the grid and block dimensions here wbTime_start(Compute, "Performing CUDA computation"); //@@ Launch the GPU Kernel here cudaDeviceSynchronize(); wbTime_stop(Compute, "Performing CUDA computation"); wbTime_start(Copy, "Copying output memory to the CPU"); //@@ Copy the GPU memory back to the CPU here wbTime_stop(Copy, "Copying output memory to the CPU"); /******************************************************************** * Reduce output vector on the host * NOTE: One could also perform the reduction of the output vector * recursively and support any size input. For simplicity, we do not * require that for this lab. ********************************************************************/ for (ii = 1; ii < numOutputElements; ii++) { hostOutput[0] += hostOutput[ii]; } wbTime_start(GPU, "Freeing GPU Memory"); //@@ Free the GPU memory here wbTime_stop(GPU, "Freeing GPU Memory"); wbSolution(args, hostOutput, 1); free(hostInput); free(hostOutput); return 0; }
65c48a08382fe213dfd77e3f51271bc51acdace5.hip
// !!! This is a file automatically generated by hipify!!! /* * The MIT License (MIT) * * Copyright (c) 2015-2016 Maximilian Knespel * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include "testVectorReduce.hpp" #include <iostream> #include <iomanip> #include <cassert> #include <cstdlib> // srand, rand #include <cstdint> // uint32_t, uint64_t #include <chrono> #include <limits> #include <vector> #include <cmath> #include <cfloat> // FLT_MAX #include <bitset> #include <hip/hip_runtime.h> #include <hipfft.h> // hipfftComplex #ifdef USE_FFTW # include <fftw3.h> # include "libs/hybridInputOutput.hpp" #endif #include "algorithms/vectorReduce.hpp" #include "algorithms/cuda/cudaVectorReduce.hpp" #include "benchmark/imresh/algorithms/cuda/cudaVectorReduce.hpp" #include "libs/cudacommon.h" #include "benchmarkHelper.hpp" namespace imresh { namespace algorithms { unsigned int constexpr nRepetitions = 20; template<class T_PREC> bool compareFloat( const char * file, int line, T_PREC a, T_PREC b, T_PREC marginFactor = 1.0 ) { auto const max = ::max( std::abs(a), std::abs(b) ); if ( max == 0 ) return true; // both are 0 and therefore equal auto const relErr = fabs( a - b ) / max; auto const maxRelErr = marginFactor * std::numeric_limits<T_PREC>::epsilon(); if ( not ( relErr <= maxRelErr ) ) printf( "[%s:%i] relErr: %f > %f :maxRelErr!\n", file, line, relErr, maxRelErr ); return relErr <= maxRelErr; } void testVectorReduce( void ) { using namespace std::chrono; using namespace benchmark::imresh::algorithms::cuda; using namespace imresh::algorithms::cuda; using namespace imresh::algorithms; const unsigned nMaxElements = 64*1024*1024; // ~4000x4000 pixel auto pData = new float[nMaxElements]; srand(350471643); for ( unsigned i = 0; i < nMaxElements; ++i ) pData[i] = ( (float) rand() / RAND_MAX ) - 0.5f; float * dpData; CUDA_ERROR( hipMalloc( (void**)&dpData, nMaxElements*sizeof(dpData[0]) ) ); CUDA_ERROR( hipMemcpy( dpData, pData, nMaxElements*sizeof(dpData[0]), hipMemcpyHostToDevice ) ); /* Test for array of length 1 */ assert( vectorMin( pData, 1 ) == pData[0] ); assert( vectorMax( pData, 1 ) == pData[0] ); assert( vectorSum( pData, 1 ) == pData[0] ); assert( cudaVectorMin( dpData, 1 ) == pData[0] ); assert( cudaVectorMax( dpData, 1 ) == pData[0] ); assert( cudaVectorSum( dpData, 1 ) == pData[0] ); /* do some checks with longer arrays and obvious results */ float obviousMaximum = 7.37519; float obviousMinimum =-7.37519; /* in order to filter out page time outs or similarily long random wait * times, we repeat the measurement nRepetitions times and choose the * shortest duration measured */ hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); using clock = std::chrono::high_resolution_clock; std::cout << "vector length : cudaVectorMax (global atomic) | cudaVectorMax (global atomic) | cudaVectorMax (shared memory) | cudaVectorMax (shared memory+warp reduce) | cudaVectorMax (__shfl_down) | vectorMax | cudaVectorMin (__shfl_down) | vectorMin\n"; using namespace imresh::tests; for ( auto nElements : getLogSpacedSamplingPoints( 2, nMaxElements, 50 ) ) { std::cout << std::setw(8) << nElements << " : "; float milliseconds, minTime; decltype( clock::now() ) clock0, clock1; int iObviousValuePos = rand() % nElements; // std::cout << "iObviousValuePos = " << iObviousValuePos << "\n"; // std::cout << "nElements = " << nElements << "\n"; /* Maximum */ pData[iObviousValuePos] = obviousMaximum; CUDA_ERROR( hipMemcpy( dpData, pData, nElements*sizeof(dpData[0]), hipMemcpyHostToDevice ) ); #define TIME_GPU( FUNC, OBVIOUS_VALUE ) \ { \ minTime = FLT_MAX; \ for ( unsigned iRepetition = 0; iRepetition < nRepetitions; \ ++iRepetition ) \ { \ hipEventRecord( start ); \ auto cudaReduced = FUNC( dpData, nElements ); \ hipEventRecord( stop ); \ hipEventSynchronize( stop ); \ hipEventElapsedTime( &milliseconds, start, stop ); \ minTime = fmin( minTime, milliseconds ); \ assert( cudaReduced == OBVIOUS_VALUE ); \ } \ std::cout << std::setw(8) << minTime << " |" << std::flush; \ } TIME_GPU( cudaVectorMaxGlobalAtomic2 , obviousMaximum ) TIME_GPU( cudaVectorMaxGlobalAtomic , obviousMaximum ) TIME_GPU( cudaVectorMaxSharedMemory , obviousMaximum ) TIME_GPU( cudaVectorMaxSharedMemoryWarps, obviousMaximum ) TIME_GPU( cudaVectorMax , obviousMaximum ) /* time CPU */ #define TIME_CPU( FUNC, OBVIOUS_VALUE ) \ { \ minTime = FLT_MAX; \ for ( unsigned iRepetition = 0; iRepetition < nRepetitions; \ ++iRepetition ) \ { \ clock0 = clock::now(); \ auto cpuMax = FUNC( pData, nElements ); \ clock1 = clock::now(); \ auto seconds = duration_cast<duration<double>>( \ clock1 - clock0 ); \ minTime = fmin( minTime, seconds.count() * 1000 ); \ assert( cpuMax == OBVIOUS_VALUE ); \ } \ std::cout << std::setw(8) << minTime << " |" << std::flush; \ } TIME_CPU( vectorMax, obviousMaximum ) /* Minimum */ pData[iObviousValuePos] = obviousMinimum; CUDA_ERROR( hipMemcpy( dpData, pData, nElements*sizeof(dpData[0]), hipMemcpyHostToDevice ) ); TIME_GPU( cudaVectorMin, obviousMinimum ) TIME_CPU( vectorMin, obviousMinimum ) /* set obvious value back to random value */ pData[iObviousValuePos] = (float) rand() / RAND_MAX; std::cout << "\n"; #undef TIME_GPU #undef TIME_CPU } //for ( unsigned nElements = 2; nElements CUDA_ERROR( hipFree( dpData ) ); delete[] pData; } template<class T_MASK, class T_PACKED> __attribute__(( optimize("unroll-loops") )) void unpackBitMask ( T_MASK * const __restrict__ rMask, T_PACKED const * const __restrict__ rPackedBits, unsigned int const nElements ) { auto const nElem = rMask + nElements; auto constexpr nBits = sizeof( T_PACKED ) * 8u; auto iPacked = rPackedBits; for ( auto iElem = rMask; iElem < nElem; ++iPacked ) { auto bitMask = T_PACKED(0x01) << ( nBits-1 ); for ( auto iBit = 0u; iBit < nBits; ++iBit, ++iElem ) { if ( iElem >= nElem ) break; assert( bitMask != T_MASK(0) ); assert( iElem < rMask + nElements ); assert( iPacked < rPackedBits + ceilDiv( nElements, nBits ) ); *iElem = T_MASK( (*iPacked & bitMask) != 0 ); bitMask >>= 1; } } } void testUnpackBitMask( void ) { uint32_t packed = 0x33333333; constexpr auto nElements = 8 * sizeof( packed ); bool unpacked[ nElements ]; unpacked[ nElements-2 ] = 1; unpacked[ nElements-1 ] = 0; unpackBitMask( unpacked, &packed, nElements-2 ); for ( auto i = 0u; i < (nElements-2)/2; ++i ) { assert( unpacked[2*i+0] == i % 2 ); assert( unpacked[2*i+1] == i % 2 ); } assert( unpacked[ nElements-2 ] == 1 ); assert( unpacked[ nElements-1 ] == 0 ); } void testCalculateHioError( void ) { using namespace std::chrono; using namespace benchmark::imresh::algorithms::cuda; // cudaCalculateHioErrorBitPacked using namespace imresh::algorithms::cuda; // cudaKernelCalculateHioError using namespace imresh::libs; // calculateHioError using namespace imresh::tests; // getLogSpacedSamplingPoints const unsigned nMaxElements = 64*1024*1024; // ~4000x4000 pixel /* allocate */ hipfftComplex * dpData, * pData; unsigned char * dpIsMaskedChar, * pIsMaskedChar; float * dpIsMasked , * pIsMasked; unsigned * dpBitMasked , * pBitMasked; auto const nBitMaskedElements = ceilDiv( nMaxElements, 8 * sizeof( dpBitMasked[0] ) ); CUDA_ERROR( hipMalloc( &dpIsMaskedChar, nMaxElements * sizeof( dpIsMaskedChar[0] ) ) ); CUDA_ERROR( hipMalloc( &dpData , nMaxElements * sizeof( dpData [0] ) ) ); CUDA_ERROR( hipMalloc( &dpIsMasked , nMaxElements * sizeof( dpIsMasked [0] ) ) ); CUDA_ERROR( hipMalloc( &dpBitMasked , nBitMaskedElements * sizeof( dpBitMasked [0] ) ) ); pData = new hipfftComplex [ nMaxElements ]; pIsMaskedChar = new unsigned char[ nMaxElements ]; pIsMasked = new float [ nMaxElements ]; pBitMasked = new unsigned[ nBitMaskedElements ]; /* allocate result buffer for reduced values of calculateHioError * kernel call */ float nMaskedPixels, * dpnMaskedPixels; float totalError , * dpTotalError; CUDA_ERROR( hipMalloc( &dpnMaskedPixels, sizeof(float) ) ); CUDA_ERROR( hipMalloc( &dpTotalError , sizeof(float) ) ); /* initialize mask randomly */ assert( sizeof(int) == 4 ); srand(350471643); for ( auto i = 0u; i < nBitMaskedElements; ++i ) pBitMasked[i] = rand() % UINT_MAX; unpackBitMask( pIsMasked, pBitMasked, nMaxElements ); for ( auto i = 0u; i < nMaxElements; ++i ) { pIsMaskedChar[i] = pIsMasked[i]; assert( pIsMaskedChar[i] == 0 or pIsMaskedChar[i] == 1 ); } std::cout << "[unpacked] "; for ( int i = 0; i < 32; ++i ) std::cout << pIsMasked[i]; std::cout << "\n"; std::cout << "[ packed] " << std::bitset<32>( pBitMasked[0] ) << "\n"; /* initialize data with Pythagorean triple 3*3 + 4*4 = 5*5 for masked bits */ for ( auto i = 0u; i < nMaxElements; ++i ) { if ( pIsMasked[i] ) { pData[i].x = 3.0f; pData[i].y = 4.0f; } else { pData[i].x = (float) rand() / RAND_MAX; pData[i].y = (float) rand() / RAND_MAX; } } /* if calculateHioError works correctly then we simply get * #masked * 5 as the mean complex norm error */ /* push to GPU */ CUDA_ERROR( hipMemcpy( dpData , pData , nMaxElements * sizeof( pData [0] ), hipMemcpyHostToDevice ) ); CUDA_ERROR( hipMemcpy( dpIsMasked , pIsMasked , nMaxElements * sizeof( pIsMasked[0] ), hipMemcpyHostToDevice ) ); CUDA_ERROR( hipMemcpy( dpBitMasked, pBitMasked, nBitMaskedElements * sizeof( pBitMasked[0] ), hipMemcpyHostToDevice ) ); CUDA_ERROR( hipMemcpy( dpIsMaskedChar, pIsMaskedChar, nMaxElements * sizeof( pIsMaskedChar[0] ), hipMemcpyHostToDevice ) ); std::cout << "test with randomly masked pythagorean triples"; /* because the number of elements we include only increases the number * of found masked elements should also only increase. */ float nLastMaskedPixels = 0; for ( auto nElements : getLogSpacedSamplingPoints( 2, nMaxElements, 50 ) ) { std::cout << "." << std::flush; CUDA_ERROR( hipMemset( dpnMaskedPixels, 0, sizeof(float) ) ); CUDA_ERROR( hipMemset( dpTotalError , 0, sizeof(float) ) ); hipLaunchKernelGGL(( cudaKernelCalculateHioError), dim3(3),dim3(256), 0, 0, dpData, dpIsMasked, nElements, false /* don't invert mask */, dpTotalError, dpnMaskedPixels ); CUDA_ERROR( hipMemcpy( &nMaskedPixels, dpnMaskedPixels, sizeof(float), hipMemcpyDeviceToHost) ); CUDA_ERROR( hipMemcpy( &totalError, dpTotalError, sizeof(float), hipMemcpyDeviceToHost) ); /* Calculation done, now check if everything is correct */ if ( totalError < 16777216 ) // float vlaues higher round to multiple of 2 { assert( nLastMaskedPixels <= nMaskedPixels ); assert( (unsigned) totalError % 5 == 0 ); assert( nMaskedPixels * 5 == totalError ); } nLastMaskedPixels = nMaskedPixels; /* check char version */ CUDA_ERROR( hipMemset( dpnMaskedPixels, 0, sizeof(float) ) ); CUDA_ERROR( hipMemset( dpTotalError , 0, sizeof(float) ) ); hipLaunchKernelGGL(( cudaKernelCalculateHioError), dim3(3),dim3(256), 0, 0, dpData, dpIsMaskedChar, nElements, false /* don't invert mask */, dpTotalError, dpnMaskedPixels ); CUDA_ERROR( hipMemcpy( &nMaskedPixels, dpnMaskedPixels, sizeof(float), hipMemcpyDeviceToHost) ); CUDA_ERROR( hipMemcpy( &totalError, dpTotalError, sizeof(float), hipMemcpyDeviceToHost) ); /* Calculation done, now check if everything is correct */ if ( totalError < 16777216 ) // float vlaues higher round to multiple of 2 { assert( nLastMaskedPixels == nMaskedPixels ); assert( (unsigned) totalError % 5 == 0 ); assert( nMaskedPixels * 5 == totalError ); } /* check packed bit version */ CUDA_ERROR( hipMemset( dpnMaskedPixels, 0, sizeof(float) ) ); CUDA_ERROR( hipMemset( dpTotalError , 0, sizeof(float) ) ); hipLaunchKernelGGL(( cudaKernelCalculateHioErrorBitPacked), dim3(1),dim3(32), 0, 0, dpData, dpBitMasked, nElements, dpTotalError, dpnMaskedPixels ); CUDA_ERROR( hipMemcpy( &nMaskedPixels, dpnMaskedPixels, sizeof(float), hipMemcpyDeviceToHost) ); CUDA_ERROR( hipMemcpy( &totalError, dpTotalError, sizeof(float), hipMemcpyDeviceToHost) ); /* Calculation done, now check if everything is correct */ if ( totalError < 16777216 ) // float vlaues higher round to multiple of 2 { if ( not ( nLastMaskedPixels == nMaskedPixels ) ) { printf( "nLastMaskedPixels: %f, nMaskedPixels: %f, totalError: %f\n", nLastMaskedPixels, nMaskedPixels, totalError ); assert( nLastMaskedPixels == nMaskedPixels ); } if ( not ( (unsigned) totalError % 5 == 0 ) ) { printf( "totalError: %f, nMaskedPixels: %f\n", totalError, nMaskedPixels ); assert( (unsigned) totalError % 5 == 0 ); } assert( nMaskedPixels * 5 == totalError ); } else { /* no use continuing this loop if we can't assert anything */ break; } #ifdef USE_FFTW static_assert( sizeof( hipfftComplex ) == sizeof( fftwf_complex ), "" ); /* now compare with CPU version which should give the exact same * result, as there should be no floating point rounding errors * for relatively short array ( < 1e6 ? ) */ float nMaskedPixelsCpu, totalErrorCpu; calculateHioError( (fftwf_complex*) pData, pIsMasked, nElements, /* is inverted: */ false, &totalErrorCpu, &nMaskedPixelsCpu ); /* when rounding errors occur the order becomes important */ if ( totalError < 16777216 ) { assert( compareFloat( __FILE__, __LINE__, totalError, totalErrorCpu, sqrtf(nElements) ) ); assert( nMaskedPixelsCpu == nMaskedPixels ); } #endif } std::cout << "OK\n"; /* benchmark with random numbers */ for ( auto i = 0u; i < nBitMaskedElements; ++i ) { pData[i].x = (float) rand() / RAND_MAX; pData[i].y = (float) rand() / RAND_MAX; } CUDA_ERROR( hipMemcpy( dpData, pData, nMaxElements * sizeof( pData[0] ), hipMemcpyHostToDevice ) ); hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); using clock = std::chrono::high_resolution_clock; std::cout << "time in milliseconds:\n"; std::cout << "vector length : cudaCalcHioError(uint32_t) | cudaCalcHioError(char) | cudaCalcHioError(packed) | calcHioError (CPU) |\n"; for ( auto nElements : getLogSpacedSamplingPoints( 2, nMaxElements, 50 ) ) { std::cout << std::setw(8) << nElements << " : "; float milliseconds, minTime; decltype( clock::now() ) clock0, clock1; float error; #define TIME_GPU( FUNC, MASK ) \ minTime = FLT_MAX; \ for ( auto iRepetition = 0u; iRepetition < nRepetitions; \ ++iRepetition ) \ { \ hipEventRecord( start ); \ error = FUNC( dpData, MASK, nElements ); \ hipEventRecord( stop ); \ hipEventSynchronize( stop ); \ hipEventElapsedTime( &milliseconds, start, stop ); \ minTime = fmin( minTime, milliseconds ); \ assert( error <= nElements ); \ } \ std::cout << std::setw(8) << minTime << " |" << std::flush; TIME_GPU( cudaCalculateHioError, dpIsMasked ) auto unpackedError = error; TIME_GPU( cudaCalculateHioError, dpIsMaskedChar ) // sets error compareFloat( __FILE__, __LINE__, unpackedError, error, sqrtf(nElements) ); TIME_GPU( cudaCalculateHioErrorBitPacked, dpBitMasked ) // sets error compareFloat( __FILE__, __LINE__, unpackedError, error, sqrtf(nElements) ); #ifdef USE_FFTW /* time CPU */ minTime = FLT_MAX; for ( auto iRepetition = 0u; iRepetition < nRepetitions; ++iRepetition ) { clock0 = clock::now(); auto error = calculateHioError( (fftwf_complex*) pData, pIsMasked, nElements ); clock1 = clock::now(); auto seconds = duration_cast<duration<double>>( clock1 - clock0 ); minTime = fmin( minTime, seconds.count() * 1000 ); assert( error <= nElements ); } #endif std::cout << std::setw(8) << minTime << "\n" << std::flush; } /* free */ CUDA_ERROR( hipFree( dpnMaskedPixels ) ); CUDA_ERROR( hipFree( dpTotalError ) ); CUDA_ERROR( hipFree( dpData ) ); CUDA_ERROR( hipFree( dpIsMasked ) ); CUDA_ERROR( hipFree( dpBitMasked ) ); delete[] pData; delete[] pIsMasked; delete[] pBitMasked; } } // namespace algorithms } // namespace imresh
65c48a08382fe213dfd77e3f51271bc51acdace5.cu
/* * The MIT License (MIT) * * Copyright (c) 2015-2016 Maximilian Knespel * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include "testVectorReduce.hpp" #include <iostream> #include <iomanip> #include <cassert> #include <cstdlib> // srand, rand #include <cstdint> // uint32_t, uint64_t #include <chrono> #include <limits> #include <vector> #include <cmath> #include <cfloat> // FLT_MAX #include <bitset> #include <cuda_runtime.h> #include <cufft.h> // cufftComplex #ifdef USE_FFTW # include <fftw3.h> # include "libs/hybridInputOutput.hpp" #endif #include "algorithms/vectorReduce.hpp" #include "algorithms/cuda/cudaVectorReduce.hpp" #include "benchmark/imresh/algorithms/cuda/cudaVectorReduce.hpp" #include "libs/cudacommon.h" #include "benchmarkHelper.hpp" namespace imresh { namespace algorithms { unsigned int constexpr nRepetitions = 20; template<class T_PREC> bool compareFloat( const char * file, int line, T_PREC a, T_PREC b, T_PREC marginFactor = 1.0 ) { auto const max = std::max( std::abs(a), std::abs(b) ); if ( max == 0 ) return true; // both are 0 and therefore equal auto const relErr = fabs( a - b ) / max; auto const maxRelErr = marginFactor * std::numeric_limits<T_PREC>::epsilon(); if ( not ( relErr <= maxRelErr ) ) printf( "[%s:%i] relErr: %f > %f :maxRelErr!\n", file, line, relErr, maxRelErr ); return relErr <= maxRelErr; } void testVectorReduce( void ) { using namespace std::chrono; using namespace benchmark::imresh::algorithms::cuda; using namespace imresh::algorithms::cuda; using namespace imresh::algorithms; const unsigned nMaxElements = 64*1024*1024; // ~4000x4000 pixel auto pData = new float[nMaxElements]; srand(350471643); for ( unsigned i = 0; i < nMaxElements; ++i ) pData[i] = ( (float) rand() / RAND_MAX ) - 0.5f; float * dpData; CUDA_ERROR( cudaMalloc( (void**)&dpData, nMaxElements*sizeof(dpData[0]) ) ); CUDA_ERROR( cudaMemcpy( dpData, pData, nMaxElements*sizeof(dpData[0]), cudaMemcpyHostToDevice ) ); /* Test for array of length 1 */ assert( vectorMin( pData, 1 ) == pData[0] ); assert( vectorMax( pData, 1 ) == pData[0] ); assert( vectorSum( pData, 1 ) == pData[0] ); assert( cudaVectorMin( dpData, 1 ) == pData[0] ); assert( cudaVectorMax( dpData, 1 ) == pData[0] ); assert( cudaVectorSum( dpData, 1 ) == pData[0] ); /* do some checks with longer arrays and obvious results */ float obviousMaximum = 7.37519; float obviousMinimum =-7.37519; /* in order to filter out page time outs or similarily long random wait * times, we repeat the measurement nRepetitions times and choose the * shortest duration measured */ cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); using clock = std::chrono::high_resolution_clock; std::cout << "vector length : cudaVectorMax (global atomic) | cudaVectorMax (global atomic) | cudaVectorMax (shared memory) | cudaVectorMax (shared memory+warp reduce) | cudaVectorMax (__shfl_down) | vectorMax | cudaVectorMin (__shfl_down) | vectorMin\n"; using namespace imresh::tests; for ( auto nElements : getLogSpacedSamplingPoints( 2, nMaxElements, 50 ) ) { std::cout << std::setw(8) << nElements << " : "; float milliseconds, minTime; decltype( clock::now() ) clock0, clock1; int iObviousValuePos = rand() % nElements; // std::cout << "iObviousValuePos = " << iObviousValuePos << "\n"; // std::cout << "nElements = " << nElements << "\n"; /* Maximum */ pData[iObviousValuePos] = obviousMaximum; CUDA_ERROR( cudaMemcpy( dpData, pData, nElements*sizeof(dpData[0]), cudaMemcpyHostToDevice ) ); #define TIME_GPU( FUNC, OBVIOUS_VALUE ) \ { \ minTime = FLT_MAX; \ for ( unsigned iRepetition = 0; iRepetition < nRepetitions; \ ++iRepetition ) \ { \ cudaEventRecord( start ); \ auto cudaReduced = FUNC( dpData, nElements ); \ cudaEventRecord( stop ); \ cudaEventSynchronize( stop ); \ cudaEventElapsedTime( &milliseconds, start, stop ); \ minTime = fmin( minTime, milliseconds ); \ assert( cudaReduced == OBVIOUS_VALUE ); \ } \ std::cout << std::setw(8) << minTime << " |" << std::flush; \ } TIME_GPU( cudaVectorMaxGlobalAtomic2 , obviousMaximum ) TIME_GPU( cudaVectorMaxGlobalAtomic , obviousMaximum ) TIME_GPU( cudaVectorMaxSharedMemory , obviousMaximum ) TIME_GPU( cudaVectorMaxSharedMemoryWarps, obviousMaximum ) TIME_GPU( cudaVectorMax , obviousMaximum ) /* time CPU */ #define TIME_CPU( FUNC, OBVIOUS_VALUE ) \ { \ minTime = FLT_MAX; \ for ( unsigned iRepetition = 0; iRepetition < nRepetitions; \ ++iRepetition ) \ { \ clock0 = clock::now(); \ auto cpuMax = FUNC( pData, nElements ); \ clock1 = clock::now(); \ auto seconds = duration_cast<duration<double>>( \ clock1 - clock0 ); \ minTime = fmin( minTime, seconds.count() * 1000 ); \ assert( cpuMax == OBVIOUS_VALUE ); \ } \ std::cout << std::setw(8) << minTime << " |" << std::flush; \ } TIME_CPU( vectorMax, obviousMaximum ) /* Minimum */ pData[iObviousValuePos] = obviousMinimum; CUDA_ERROR( cudaMemcpy( dpData, pData, nElements*sizeof(dpData[0]), cudaMemcpyHostToDevice ) ); TIME_GPU( cudaVectorMin, obviousMinimum ) TIME_CPU( vectorMin, obviousMinimum ) /* set obvious value back to random value */ pData[iObviousValuePos] = (float) rand() / RAND_MAX; std::cout << "\n"; #undef TIME_GPU #undef TIME_CPU } //for ( unsigned nElements = 2; nElements CUDA_ERROR( cudaFree( dpData ) ); delete[] pData; } template<class T_MASK, class T_PACKED> __attribute__(( optimize("unroll-loops") )) void unpackBitMask ( T_MASK * const __restrict__ rMask, T_PACKED const * const __restrict__ rPackedBits, unsigned int const nElements ) { auto const nElem = rMask + nElements; auto constexpr nBits = sizeof( T_PACKED ) * 8u; auto iPacked = rPackedBits; for ( auto iElem = rMask; iElem < nElem; ++iPacked ) { auto bitMask = T_PACKED(0x01) << ( nBits-1 ); for ( auto iBit = 0u; iBit < nBits; ++iBit, ++iElem ) { if ( iElem >= nElem ) break; assert( bitMask != T_MASK(0) ); assert( iElem < rMask + nElements ); assert( iPacked < rPackedBits + ceilDiv( nElements, nBits ) ); *iElem = T_MASK( (*iPacked & bitMask) != 0 ); bitMask >>= 1; } } } void testUnpackBitMask( void ) { uint32_t packed = 0x33333333; constexpr auto nElements = 8 * sizeof( packed ); bool unpacked[ nElements ]; unpacked[ nElements-2 ] = 1; unpacked[ nElements-1 ] = 0; unpackBitMask( unpacked, &packed, nElements-2 ); for ( auto i = 0u; i < (nElements-2)/2; ++i ) { assert( unpacked[2*i+0] == i % 2 ); assert( unpacked[2*i+1] == i % 2 ); } assert( unpacked[ nElements-2 ] == 1 ); assert( unpacked[ nElements-1 ] == 0 ); } void testCalculateHioError( void ) { using namespace std::chrono; using namespace benchmark::imresh::algorithms::cuda; // cudaCalculateHioErrorBitPacked using namespace imresh::algorithms::cuda; // cudaKernelCalculateHioError using namespace imresh::libs; // calculateHioError using namespace imresh::tests; // getLogSpacedSamplingPoints const unsigned nMaxElements = 64*1024*1024; // ~4000x4000 pixel /* allocate */ cufftComplex * dpData, * pData; unsigned char * dpIsMaskedChar, * pIsMaskedChar; float * dpIsMasked , * pIsMasked; unsigned * dpBitMasked , * pBitMasked; auto const nBitMaskedElements = ceilDiv( nMaxElements, 8 * sizeof( dpBitMasked[0] ) ); CUDA_ERROR( cudaMalloc( &dpIsMaskedChar, nMaxElements * sizeof( dpIsMaskedChar[0] ) ) ); CUDA_ERROR( cudaMalloc( &dpData , nMaxElements * sizeof( dpData [0] ) ) ); CUDA_ERROR( cudaMalloc( &dpIsMasked , nMaxElements * sizeof( dpIsMasked [0] ) ) ); CUDA_ERROR( cudaMalloc( &dpBitMasked , nBitMaskedElements * sizeof( dpBitMasked [0] ) ) ); pData = new cufftComplex [ nMaxElements ]; pIsMaskedChar = new unsigned char[ nMaxElements ]; pIsMasked = new float [ nMaxElements ]; pBitMasked = new unsigned[ nBitMaskedElements ]; /* allocate result buffer for reduced values of calculateHioError * kernel call */ float nMaskedPixels, * dpnMaskedPixels; float totalError , * dpTotalError; CUDA_ERROR( cudaMalloc( &dpnMaskedPixels, sizeof(float) ) ); CUDA_ERROR( cudaMalloc( &dpTotalError , sizeof(float) ) ); /* initialize mask randomly */ assert( sizeof(int) == 4 ); srand(350471643); for ( auto i = 0u; i < nBitMaskedElements; ++i ) pBitMasked[i] = rand() % UINT_MAX; unpackBitMask( pIsMasked, pBitMasked, nMaxElements ); for ( auto i = 0u; i < nMaxElements; ++i ) { pIsMaskedChar[i] = pIsMasked[i]; assert( pIsMaskedChar[i] == 0 or pIsMaskedChar[i] == 1 ); } std::cout << "[unpacked] "; for ( int i = 0; i < 32; ++i ) std::cout << pIsMasked[i]; std::cout << "\n"; std::cout << "[ packed] " << std::bitset<32>( pBitMasked[0] ) << "\n"; /* initialize data with Pythagorean triple 3*3 + 4*4 = 5*5 for masked bits */ for ( auto i = 0u; i < nMaxElements; ++i ) { if ( pIsMasked[i] ) { pData[i].x = 3.0f; pData[i].y = 4.0f; } else { pData[i].x = (float) rand() / RAND_MAX; pData[i].y = (float) rand() / RAND_MAX; } } /* if calculateHioError works correctly then we simply get * #masked * 5 as the mean complex norm error */ /* push to GPU */ CUDA_ERROR( cudaMemcpy( dpData , pData , nMaxElements * sizeof( pData [0] ), cudaMemcpyHostToDevice ) ); CUDA_ERROR( cudaMemcpy( dpIsMasked , pIsMasked , nMaxElements * sizeof( pIsMasked[0] ), cudaMemcpyHostToDevice ) ); CUDA_ERROR( cudaMemcpy( dpBitMasked, pBitMasked, nBitMaskedElements * sizeof( pBitMasked[0] ), cudaMemcpyHostToDevice ) ); CUDA_ERROR( cudaMemcpy( dpIsMaskedChar, pIsMaskedChar, nMaxElements * sizeof( pIsMaskedChar[0] ), cudaMemcpyHostToDevice ) ); std::cout << "test with randomly masked pythagorean triples"; /* because the number of elements we include only increases the number * of found masked elements should also only increase. */ float nLastMaskedPixels = 0; for ( auto nElements : getLogSpacedSamplingPoints( 2, nMaxElements, 50 ) ) { std::cout << "." << std::flush; CUDA_ERROR( cudaMemset( dpnMaskedPixels, 0, sizeof(float) ) ); CUDA_ERROR( cudaMemset( dpTotalError , 0, sizeof(float) ) ); cudaKernelCalculateHioError<<<3,256>>> ( dpData, dpIsMasked, nElements, false /* don't invert mask */, dpTotalError, dpnMaskedPixels ); CUDA_ERROR( cudaMemcpy( &nMaskedPixels, dpnMaskedPixels, sizeof(float), cudaMemcpyDeviceToHost) ); CUDA_ERROR( cudaMemcpy( &totalError, dpTotalError, sizeof(float), cudaMemcpyDeviceToHost) ); /* Calculation done, now check if everything is correct */ if ( totalError < 16777216 ) // float vlaues higher round to multiple of 2 { assert( nLastMaskedPixels <= nMaskedPixels ); assert( (unsigned) totalError % 5 == 0 ); assert( nMaskedPixels * 5 == totalError ); } nLastMaskedPixels = nMaskedPixels; /* check char version */ CUDA_ERROR( cudaMemset( dpnMaskedPixels, 0, sizeof(float) ) ); CUDA_ERROR( cudaMemset( dpTotalError , 0, sizeof(float) ) ); cudaKernelCalculateHioError<<<3,256>>> ( dpData, dpIsMaskedChar, nElements, false /* don't invert mask */, dpTotalError, dpnMaskedPixels ); CUDA_ERROR( cudaMemcpy( &nMaskedPixels, dpnMaskedPixels, sizeof(float), cudaMemcpyDeviceToHost) ); CUDA_ERROR( cudaMemcpy( &totalError, dpTotalError, sizeof(float), cudaMemcpyDeviceToHost) ); /* Calculation done, now check if everything is correct */ if ( totalError < 16777216 ) // float vlaues higher round to multiple of 2 { assert( nLastMaskedPixels == nMaskedPixels ); assert( (unsigned) totalError % 5 == 0 ); assert( nMaskedPixels * 5 == totalError ); } /* check packed bit version */ CUDA_ERROR( cudaMemset( dpnMaskedPixels, 0, sizeof(float) ) ); CUDA_ERROR( cudaMemset( dpTotalError , 0, sizeof(float) ) ); cudaKernelCalculateHioErrorBitPacked<<<1,32>>> ( dpData, dpBitMasked, nElements, dpTotalError, dpnMaskedPixels ); CUDA_ERROR( cudaMemcpy( &nMaskedPixels, dpnMaskedPixels, sizeof(float), cudaMemcpyDeviceToHost) ); CUDA_ERROR( cudaMemcpy( &totalError, dpTotalError, sizeof(float), cudaMemcpyDeviceToHost) ); /* Calculation done, now check if everything is correct */ if ( totalError < 16777216 ) // float vlaues higher round to multiple of 2 { if ( not ( nLastMaskedPixels == nMaskedPixels ) ) { printf( "nLastMaskedPixels: %f, nMaskedPixels: %f, totalError: %f\n", nLastMaskedPixels, nMaskedPixels, totalError ); assert( nLastMaskedPixels == nMaskedPixels ); } if ( not ( (unsigned) totalError % 5 == 0 ) ) { printf( "totalError: %f, nMaskedPixels: %f\n", totalError, nMaskedPixels ); assert( (unsigned) totalError % 5 == 0 ); } assert( nMaskedPixels * 5 == totalError ); } else { /* no use continuing this loop if we can't assert anything */ break; } #ifdef USE_FFTW static_assert( sizeof( cufftComplex ) == sizeof( fftwf_complex ), "" ); /* now compare with CPU version which should give the exact same * result, as there should be no floating point rounding errors * for relatively short array ( < 1e6 ? ) */ float nMaskedPixelsCpu, totalErrorCpu; calculateHioError( (fftwf_complex*) pData, pIsMasked, nElements, /* is inverted: */ false, &totalErrorCpu, &nMaskedPixelsCpu ); /* when rounding errors occur the order becomes important */ if ( totalError < 16777216 ) { assert( compareFloat( __FILE__, __LINE__, totalError, totalErrorCpu, sqrtf(nElements) ) ); assert( nMaskedPixelsCpu == nMaskedPixels ); } #endif } std::cout << "OK\n"; /* benchmark with random numbers */ for ( auto i = 0u; i < nBitMaskedElements; ++i ) { pData[i].x = (float) rand() / RAND_MAX; pData[i].y = (float) rand() / RAND_MAX; } CUDA_ERROR( cudaMemcpy( dpData, pData, nMaxElements * sizeof( pData[0] ), cudaMemcpyHostToDevice ) ); cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); using clock = std::chrono::high_resolution_clock; std::cout << "time in milliseconds:\n"; std::cout << "vector length : cudaCalcHioError(uint32_t) | cudaCalcHioError(char) | cudaCalcHioError(packed) | calcHioError (CPU) |\n"; for ( auto nElements : getLogSpacedSamplingPoints( 2, nMaxElements, 50 ) ) { std::cout << std::setw(8) << nElements << " : "; float milliseconds, minTime; decltype( clock::now() ) clock0, clock1; float error; #define TIME_GPU( FUNC, MASK ) \ minTime = FLT_MAX; \ for ( auto iRepetition = 0u; iRepetition < nRepetitions; \ ++iRepetition ) \ { \ cudaEventRecord( start ); \ error = FUNC( dpData, MASK, nElements ); \ cudaEventRecord( stop ); \ cudaEventSynchronize( stop ); \ cudaEventElapsedTime( &milliseconds, start, stop ); \ minTime = fmin( minTime, milliseconds ); \ assert( error <= nElements ); \ } \ std::cout << std::setw(8) << minTime << " |" << std::flush; TIME_GPU( cudaCalculateHioError, dpIsMasked ) auto unpackedError = error; TIME_GPU( cudaCalculateHioError, dpIsMaskedChar ) // sets error compareFloat( __FILE__, __LINE__, unpackedError, error, sqrtf(nElements) ); TIME_GPU( cudaCalculateHioErrorBitPacked, dpBitMasked ) // sets error compareFloat( __FILE__, __LINE__, unpackedError, error, sqrtf(nElements) ); #ifdef USE_FFTW /* time CPU */ minTime = FLT_MAX; for ( auto iRepetition = 0u; iRepetition < nRepetitions; ++iRepetition ) { clock0 = clock::now(); auto error = calculateHioError( (fftwf_complex*) pData, pIsMasked, nElements ); clock1 = clock::now(); auto seconds = duration_cast<duration<double>>( clock1 - clock0 ); minTime = fmin( minTime, seconds.count() * 1000 ); assert( error <= nElements ); } #endif std::cout << std::setw(8) << minTime << "\n" << std::flush; } /* free */ CUDA_ERROR( cudaFree( dpnMaskedPixels ) ); CUDA_ERROR( cudaFree( dpTotalError ) ); CUDA_ERROR( cudaFree( dpData ) ); CUDA_ERROR( cudaFree( dpIsMasked ) ); CUDA_ERROR( cudaFree( dpBitMasked ) ); delete[] pData; delete[] pIsMasked; delete[] pBitMasked; } } // namespace algorithms } // namespace imresh
aa9794fb153c78f74ee38fe11097e43789953622.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <rocblas.h> #include <util.cuh> #include <basicOps.cuh> #include <mpi.h> #include <hip/hip_runtime.h> #include <assert.h> #include <util.cuh> #include <clusterNet.h> #include <time.h> #include <batchAllocator.h> #include <DeepNeuralNetwork.h> #include <WikiMaxoutNet.h> #include <WikiMaxoutNet_PCIe.h> #include <WikiMaxoutNet_PCIe2.h> #include <WikiNetDist.h> #include <Layer.h> #include <time.h> using std::cout; using std::endl; void run_neural_network() { Matrix *X = read_hdf5("/home/tim/mnist_full_X.hdf5"); Matrix *y = read_hdf5("/home/tim/mnist_full_y.hdf5"); ClusterNet gpu = ClusterNet(12345); cout << X->rows << endl; int hidden_size = 1024; Matrix *w1 = gpu.sparseInitWeight(784,hidden_size); Matrix *w2 = gpu.sparseInitWeight(hidden_size,10); Matrix *m1 = zeros(784,hidden_size); Matrix *m2 = zeros(hidden_size,10); Matrix *ms1 = zeros(784,hidden_size); Matrix *ms2 = zeros(hidden_size,10); Matrix *grad_w1_ms = zeros(784,hidden_size); Matrix *grad_w2_ms = zeros(hidden_size,10); Matrix *grad_w2 = empty(hidden_size,10); Matrix *grad_w1 = empty(784,hidden_size); float cv_error = 0; float cv_size = 0.1428571f; float train_error = 0.0f; BatchAllocator b = BatchAllocator(); b.init(X, y, cv_size, 128, 512); clock_t t1,t2; t1=clock(); //code goes here int epochs = 100; gpu.tick(); float learning_rate = 0.003; //size_t free = 0; //size_t total = 0; float momentum = 0.5; for(int EPOCH = 0; EPOCH < epochs; EPOCH++) { std::cout << "EPOCH: " << EPOCH + 1 << std::endl; //hipMemGetInfo(&free, &total); //std::cout << free << std::endl; momentum += 0.01; if(momentum > 0.95) momentum = 0.95; for(int i = 0; i < b.TOTAL_BATCHES; i++) { b.allocate_next_batch_async(); //nesterov updates scalarMul(m1,momentum,m1); scalarMul(m2,momentum,m2); add(w1,m1,w1); add(w2,m2,w2); Matrix *d0 = gpu.dropout(b.CURRENT_BATCH,0.2); Matrix *z1 = gpu.dot(d0, w1); logistic(z1, z1); Matrix *d1 = gpu.dropout(z1,0.5); Matrix *a2 = gpu.dot(d1,w2); Matrix *out = softmax(a2); Matrix *t = create_t_matrix(b.CURRENT_BATCH_Y,10); //backprop Matrix *e1 = sub(out, t); Matrix *e2 = gpu.dotT(e1, w2); gpu.Tdot(z1,e1,grad_w2); logisticGrad(z1,z1); mul(e2,z1,e2); gpu.Tdot(b.CURRENT_BATCH,e2,grad_w1); b.allocate_next_batch_async(); RMSprop_with_momentum_weight_update(ms1,grad_w1,w1,m1,0.9f,learning_rate,b.CURRENT_BATCH->rows, momentum); RMSprop_with_momentum_weight_update(ms2,grad_w2,w2,m2,0.9f,learning_rate,b.CURRENT_BATCH->rows, momentum); hipFree(e1->data); hipFree(e2->data); hipFree(z1->data); hipFree(a2->data); hipFree(out->data); hipFree(t->data); hipFree(d0->data); hipFree(d1->data); b.replace_current_batch_with_next(); } //Matrix *sum_value = sum(w1); //std::cout << "weight 1 Sum: " << to_host(sum_value)->data[0] << std::endl; train_error = 0; for(int i = 0; i < b.TOTAL_BATCHES; i++) { b.broadcast_batch_to_processes(); //Matrix *d0 = scalarMul(b.CURRENT_BATCH,0.8); Matrix *a1 = gpu.dot(b.CURRENT_BATCH,w1); logistic(a1, a1); //Matrix *d1 = scalarMul(a1,0.5); Matrix *a2 = gpu.dot(a1,w2); Matrix *out = softmax(a2); Matrix *result = argmax(out); Matrix *eq = equal(result,b.CURRENT_BATCH_Y); b.allocate_next_batch_async(); float sum_value = sum(eq); train_error += (b.CURRENT_BATCH->rows - sum_value)/ (1.0f * b.CURRENT_BATCH->rows *b.TOTAL_BATCHES) ; hipFree(a1->data); hipFree(a2->data); hipFree(out->data); hipFree(result->data); hipFree(eq->data); //hipFree(d0->data); //hipFree(d1->data); b.replace_current_batch_with_next(); } std::cout << "Train error: " << train_error << std::endl; cv_error = 0; for(int i = 0; i < b.TOTAL_BATCHES_CV; i++) { b.broadcast_batch_cv_to_processes(); Matrix *d0 = scalarMul(b.CURRENT_BATCH_CV,0.8); Matrix *a1 = gpu.dot(d0,w1); logistic(a1, a1); Matrix *d1 = scalarMul(a1,0.5); Matrix *a2 = gpu.dot(d1,w2); Matrix *out = softmax(a2); Matrix *result = argmax(out); Matrix *eq = equal(result,b.CURRENT_BATCH_CV_Y); b.allocate_next_cv_batch_async(); float sum_value = sum(eq); cv_error += (b.CURRENT_BATCH_CV->rows - sum_value)/ (1.0f * b.CURRENT_BATCH_CV->rows *b.TOTAL_BATCHES_CV) ; hipFree(a1->data); hipFree(a2->data); hipFree(out->data); hipFree(result->data); hipFree(eq->data); hipFree(d0->data); hipFree(d1->data); b.replace_current_cv_batch_with_next(); } std::cout << "Cross validation error: " << cv_error << std::endl; } hipDeviceSynchronize(); t2=clock(); float diff ((float)t2-(float)t1); float mseconds = (diff / CLOCKS_PER_SEC)/1000; std::cout<<mseconds<<std::endl; gpu.tock(); b.finish_batch_allocator(); //gpu.tock("batch replace"); //gpu.tock("async batch allocate"); //gpu.tock("feedforward"); printf("Finished!\n"); } void run_maxout_network() { hipSetDevice(0); Matrix *X = read_hdf5("/home/tim/mnist_full_X.hdf5"); Matrix *y = read_hdf5("/home/tim/mnist_full_y.hdf5"); ClusterNet gpus = ClusterNet(12345); int hiddenunits = 512; int maxout_Size = 8; int batch_size = 128; Matrix *w1 = gpus.uniformSqrtWeight(784,hiddenunits); Matrix *w2 = gpus.uniformSqrtWeight(hiddenunits/maxout_Size,10); Matrix *b1 = zeros(1,hiddenunits); Matrix *b2 = zeros(1,10); Matrix *m1 = zeros(784,hiddenunits); Matrix *m2 = zeros(hiddenunits/maxout_Size,10); Matrix *mb1 = zeros(1,hiddenunits); Matrix *mb2 = zeros(1,10); Matrix *ms1 = zeros(784,hiddenunits); Matrix *ms2 = zeros(hiddenunits/maxout_Size,10); Matrix *msb1 = zeros(1,hiddenunits); Matrix *msb2 = zeros(1,10); Matrix *grad_w1 = zeros(784,hiddenunits); Matrix *grad_w2 = zeros(hiddenunits/maxout_Size,10); Matrix *grad_b1 = zeros(1,hiddenunits); Matrix *grad_b2 = zeros(1,10); float cv_error = 0.0f; float train_error = 0.0f; BatchAllocator b = BatchAllocator(); b.init(X, y, 0.2, batch_size, 512); int epochs = 1000; float learning_rate = 0.001; float momentum = 0.5; for(int EPOCH = 1; EPOCH < epochs; EPOCH++) { cout << "EPOCH: " << EPOCH << endl; //momentum += 0.01; //if(momentum > 0.95) momentum = 0.95; for(int i = 0; i < b.TOTAL_BATCHES; i++) { b.broadcast_batch_to_processes(); //nesterov updates scalarMul(m1,momentum,m1); scalarMul(m2,momentum,m2); scalarMul(mb1,momentum,mb1); scalarMul(mb2,momentum,mb2); add(w1,m1,w1); add(w2,m2,w2); add(b1,mb1,b1); add(b2,mb2,b2); //feedforward Matrix *d0 = gpus.dropout(b.CURRENT_BATCH,0.2); Matrix *z1 = gpus.dot(d0, w1); addMatrixVector(z1,b1,z1); Matrix **a_paired = maxout(z1,maxout_Size); Matrix *a1 = a_paired[0]; Matrix *a1_idx = a_paired[1]; Matrix *d1 = gpus.dropout(a1,0.5); Matrix *a2 = gpus.dot(d1,w2); addMatrixVector(a2,b2,a2); Matrix *out = softmax(a2); Matrix *t = create_t_matrix(b.CURRENT_BATCH_Y,10); b.allocate_next_batch_async(); //backprop Matrix *e1 = sub(out, t); Matrix *e2_partial = gpus.dotT(e1, w2); Matrix *e2 = empty(b.CURRENT_BATCH->rows,e2_partial->cols*maxout_Size); Matrix *aB = ones(1,b.CURRENT_BATCH->rows); gpus.Tdot(a1,e1,grad_w2); gpus.dot(aB,e1,grad_b2); expand_to_maxout_grad(e2_partial, a1_idx,e2); gpus.Tdot(b.CURRENT_BATCH,e2,grad_w1); gpus.dot(aB,e2,grad_b1); //weight updates //RMSProp RMSprop_with_momentum_weight_update(ms1,grad_w1,w1,m1,0.9f,learning_rate,b.CURRENT_BATCH->rows, momentum); RMSprop_with_momentum_weight_update(ms2,grad_w2,w2,m2,0.9f,learning_rate,b.CURRENT_BATCH->rows, momentum); RMSprop_with_momentum_weight_update(msb1,grad_b1,b1,mb1,0.9f,learning_rate,b.CURRENT_BATCH->rows, momentum); RMSprop_with_momentum_weight_update(msb2,grad_b2,b2,mb2,0.9f,learning_rate,b.CURRENT_BATCH->rows, momentum); /* scalarMul(grad_w1,learning_rate/(float)b.CURRENT_BATCH->rows,grad_w1); scalarMul(grad_w2,learning_rate/(float)b.CURRENT_BATCH->rows,grad_w2); scalarMul(grad_b1,learning_rate/(float)b.CURRENT_BATCH->rows,grad_b1); scalarMul(grad_b2,learning_rate/(float)b.CURRENT_BATCH->rows,grad_b2); //classical momentum scalarMul(m1,momentum,m1); scalarMul(m2,momentum,m2); scalarMul(mb1,momentum,mb1); scalarMul(mb2,momentum,mb2); sub(m1,grad_w1,m1); sub(m2,grad_w2,m2); sub(mb1,grad_b1,mb1); sub(mb2,grad_b2,mb2); add(w1,m1,w1); add(w2,m2,w2); add(b1,mb1,b1); add(b2,mb2,b2); */ /* sub(w1,grad_w1,w1); sub(w2,grad_w2,w2); sub(b1,grad_b1,b1); sub(b2,grad_b2,b2); */ hipFree(e1->data); hipFree(e2->data); hipFree(e2_partial->data); hipFree(z1->data); hipFree(a1->data); hipFree(a1_idx->data); hipFree(a2->data); hipFree(out->data); hipFree(t->data); hipFree(d0->data); hipFree(d1->data); hipFree(aB->data); free(a_paired); b.replace_current_batch_with_next(); } train_error = 0; for(int i = 0; i < b.TOTAL_BATCHES; i++) { b.broadcast_batch_to_processes(); Matrix *d0 = scalarMul(b.CURRENT_BATCH,0.8); Matrix *z1 = gpus.dot(d0,w1); Matrix **a1_pair = maxout(z1,maxout_Size); Matrix *a1 = a1_pair[0]; Matrix *d1 = scalarMul(a1,0.5); Matrix *a2 = gpus.dot(d1,w2); Matrix *out = softmax(a2); Matrix *result = argmax(out); Matrix *eq = equal(result,b.CURRENT_BATCH_Y); b.allocate_next_batch_async(); float sum_value = sum(eq); train_error += (b.CURRENT_BATCH->rows - sum_value)/ (1.0f * b.CURRENT_BATCH->rows *b.TOTAL_BATCHES) ; hipFree(z1->data); hipFree(a1->data); hipFree(a1_pair[1]->data); hipFree(a2->data); hipFree(out->data); hipFree(result->data); hipFree(eq->data); hipFree(d0->data); hipFree(d1->data); free(a1_pair); b.replace_current_batch_with_next(); } std::cout << "MAXOUT Train error: " << train_error << std::endl; cv_error = 0; for(int i = 0; i < b.TOTAL_BATCHES_CV; i++) { b.broadcast_batch_cv_to_processes(); Matrix *d0 = scalarMul(b.CURRENT_BATCH_CV,0.8); Matrix *z1 = gpus.dot(d0,w1); Matrix **a1_pair = maxout(z1,maxout_Size); Matrix *a1 = a1_pair[0]; Matrix *d1 = scalarMul(a1,0.5); Matrix *a2 = gpus.dot(d1,w2); Matrix *out = softmax(a2); Matrix *result = argmax(out); Matrix *eq = equal(result,b.CURRENT_BATCH_CV_Y); b.allocate_next_batch_async(); float sum_value = sum(eq); cv_error += (b.CURRENT_BATCH_CV->rows - sum_value)/ (1.0f * b.CURRENT_BATCH_CV->rows *b.TOTAL_BATCHES_CV) ; hipFree(z1->data); hipFree(a1->data); hipFree(a1_pair[1]->data); hipFree(a2->data); hipFree(out->data); hipFree(result->data); hipFree(eq->data); hipFree(d0->data); hipFree(d1->data); free(a1_pair); b.replace_current_cv_batch_with_next(); } std::cout << "MAXOUT Cross validation error: " << cv_error << std::endl; } } void run_normal_net() { hipSetDevice(2); Matrix *X = read_hdf5("/home/tim/mnist_full_X.hdf5"); Matrix *y = read_hdf5("/home/tim/mnist_full_y.hdf5"); ClusterNet gpus = ClusterNet(12345); int hiddenunits = 1024; int maxout_Size = 1; int batch_size = 128; Matrix *w1 = gpus.uniformSqrtWeight(784,hiddenunits); Matrix *w2 = gpus.uniformSqrtWeight(hiddenunits/maxout_Size,10); Matrix *b1 = zeros(1,hiddenunits); Matrix *b2 = zeros(1,10); Matrix *m1 = zeros(784,hiddenunits); Matrix *m2 = zeros(hiddenunits/maxout_Size,10); Matrix *mb1 = zeros(1,hiddenunits); Matrix *mb2 = zeros(1,10); Matrix *ms1 = zeros(784,hiddenunits); Matrix *ms2 = zeros(hiddenunits/maxout_Size,10); Matrix *msb1 = zeros(1,hiddenunits); Matrix *msb2 = zeros(1,10); Matrix *grad_w1 = zeros(784,hiddenunits); Matrix *grad_w2 = zeros(hiddenunits/maxout_Size,10); Matrix *grad_b1 = zeros(1,hiddenunits); Matrix *grad_b2 = zeros(1,10); float cv_error = 0.0f; float train_error = 0.0f; BatchAllocator b = BatchAllocator(); b.init(X, y, 0.4, batch_size, 512); int epochs = 500; float learning_rate = 0.000001; float momentum = 0.5; for(int EPOCH = 1; EPOCH < epochs; EPOCH++) { cout << "EPOCH: " << EPOCH << endl; momentum += 0.01; if(momentum > 0.95) momentum = 0.95; for(int i = 0; i < b.TOTAL_BATCHES; i++) { b.broadcast_batch_to_processes(); //nesterov updates scalarMul(m1,momentum,m1); scalarMul(m2,momentum,m2); scalarMul(mb1,momentum,mb1); scalarMul(mb2,momentum,mb2); add(w1,m1,w1); add(w2,m2,w2); add(b1,mb1,b1); add(b2,mb2,b2); //feedforward Matrix *d0 = gpus.dropout(b.CURRENT_BATCH,0.2); Matrix *z1 = gpus.dot(d0, w1); addMatrixVector(z1,b1,z1); Matrix *a1 = logistic(z1); //Matrix *a1 = rectified_linear(z1); Matrix *d1 = gpus.dropout(a1,0.5); Matrix *a2 = gpus.dot(d1,w2); addMatrixVector(a2,b2,a2); Matrix *out = softmax(a2); Matrix *t = create_t_matrix(b.CURRENT_BATCH_Y,10); b.allocate_next_batch_async(); //backprop Matrix *e1 = sub(out, t); Matrix *e2 = gpus.dotT(e1, w2); Matrix *aB = ones(1,b.CURRENT_BATCH->rows); gpus.Tdot(a1,e1,grad_w2); gpus.dot(aB,e1,grad_b2); //rectified_linear_derivative(a1,a1); logisticGrad(a1,a1); mul(e2,a1,e2); gpus.Tdot(b.CURRENT_BATCH,e2,grad_w1); gpus.dot(aB,e2,grad_b1); /* //about equal to momentum update + nesterov update -> momentum applyied to gradient+momentum better? RMSprop_with_momentum_weight_update(ms1,grad_w1,w1,m1,0.9f,learning_rate,b.CURRENT_BATCH->rows, momentum); RMSprop_with_momentum_weight_update(ms2,grad_w2,w2,m2,0.9f,learning_rate,b.CURRENT_BATCH->rows, momentum); RMSprop_with_momentum_weight_update(msb1,grad_b1,b1,mb1,0.9f,learning_rate,b.CURRENT_BATCH->rows, momentum); RMSprop_with_momentum_weight_update(msb2,grad_b2,b2,mb2,0.9f,learning_rate,b.CURRENT_BATCH->rows, momentum); */ /* //slow and generally worse error, but sometimes better results in the end RMSprop_with_momentum_update(ms1,grad_w1,w1,m1,0.9f,learning_rate,b.CURRENT_BATCH->rows, momentum); RMSprop_with_momentum_update(ms2,grad_w2,w2,m2,0.9f,learning_rate,b.CURRENT_BATCH->rows, momentum); RMSprop_with_momentum_update(msb1,grad_b1,b1,mb1,0.9f,learning_rate,b.CURRENT_BATCH->rows, momentum); RMSprop_with_momentum_update(msb2,grad_b2,b2,mb2,0.9f,learning_rate,b.CURRENT_BATCH->rows, momentum); */ RMSprop_with_nesterov_weight_update(ms1,grad_w1,w1,m1,0.9f,learning_rate,b.CURRENT_BATCH->rows, momentum); RMSprop_with_nesterov_weight_update(ms2,grad_w2,w2,m2,0.9f,learning_rate,b.CURRENT_BATCH->rows, momentum); RMSprop_with_nesterov_weight_update(msb1,grad_b1,b1,mb1,0.9f,learning_rate,b.CURRENT_BATCH->rows, momentum); RMSprop_with_nesterov_weight_update(msb2,grad_b2,b2,mb2,0.9f,learning_rate,b.CURRENT_BATCH->rows, momentum); /* //slower but equally good to nesterov momentum RMSprop_with_weight_update(ms1,grad_w1,w1,m1,0.9f,learning_rate,b.CURRENT_BATCH->rows, momentum); RMSprop_with_weight_update(ms2,grad_w2,w2,m2,0.9f,learning_rate,b.CURRENT_BATCH->rows, momentum); RMSprop_with_weight_update(msb1,grad_b1,b1,mb1,0.9f,learning_rate,b.CURRENT_BATCH->rows, momentum); RMSprop_with_weight_update(msb2,grad_b2,b2,mb2,0.9f,learning_rate,b.CURRENT_BATCH->rows, momentum); */ /* scalarMul(grad_w1,learning_rate/(float)b.CURRENT_BATCH->rows,grad_w1); scalarMul(grad_w2,learning_rate/(float)b.CURRENT_BATCH->rows,grad_w2); scalarMul(grad_b1,learning_rate/(float)b.CURRENT_BATCH->rows,grad_b1); scalarMul(grad_b2,learning_rate/(float)b.CURRENT_BATCH->rows,grad_b2); //classical momentum scalarMul(m1,momentum,m1); scalarMul(m2,momentum,m2); scalarMul(mb1,momentum,mb1); scalarMul(mb2,momentum,mb2); sub(m1,grad_w1,m1); sub(m2,grad_w2,m2); sub(mb1,grad_b1,mb1); sub(mb2,grad_b2,mb2); add(w1,m1,w1); add(w2,m2,w2); add(b1,mb1,b1); add(b2,mb2,b2); */ /* sub(w1,grad_w1,w1); sub(w2,grad_w2,w2); sub(b1,grad_b1,b1); sub(b2,grad_b2,b2); */ hipFree(e1->data); hipFree(e2->data); hipFree(z1->data); hipFree(a1->data); hipFree(a2->data); hipFree(out->data); hipFree(t->data); hipFree(d0->data); hipFree(d1->data); hipFree(aB->data); b.replace_current_batch_with_next(); } train_error = 0; for(int i = 0; i < b.TOTAL_BATCHES; i++) { b.broadcast_batch_to_processes(); Matrix *d0 = scalarMul(b.CURRENT_BATCH,0.8); Matrix *z1 = gpus.dot(d0,w1); Matrix *a1 = logistic(z1); //Matrix *a1 = rectified_linear(z1); Matrix *d1 = scalarMul(a1,0.5); Matrix *a2 = gpus.dot(d1,w2); Matrix *out = softmax(a2); Matrix *result = argmax(out); Matrix *eq = equal(result,b.CURRENT_BATCH_Y); b.allocate_next_batch_async(); float sum_value = sum(eq); train_error += (b.CURRENT_BATCH->rows - sum_value)/ (1.0f * b.CURRENT_BATCH->rows *b.TOTAL_BATCHES) ; hipFree(z1->data); hipFree(a1->data); hipFree(a2->data); hipFree(out->data); hipFree(result->data); hipFree(eq->data); hipFree(d0->data); hipFree(d1->data); b.replace_current_batch_with_next(); } std::cout << "MAXOUT Train error: " << train_error << std::endl; cv_error = 0; for(int i = 0; i < b.TOTAL_BATCHES_CV; i++) { b.broadcast_batch_cv_to_processes(); Matrix *d0 = scalarMul(b.CURRENT_BATCH_CV,0.8); Matrix *z1 = gpus.dot(d0,w1); Matrix *a1 = logistic(z1); //Matrix *a1 = rectified_linear(z1); Matrix *d1 = scalarMul(a1,0.5); Matrix *a2 = gpus.dot(d1,w2); Matrix *out = softmax(a2); Matrix *result = argmax(out); Matrix *eq = equal(result,b.CURRENT_BATCH_CV_Y); b.allocate_next_batch_async(); float sum_value = sum(eq); cv_error += (b.CURRENT_BATCH_CV->rows - sum_value)/ (1.0f * b.CURRENT_BATCH_CV->rows *b.TOTAL_BATCHES_CV) ; hipFree(z1->data); hipFree(a1->data); hipFree(a2->data); hipFree(out->data); hipFree(result->data); hipFree(eq->data); hipFree(d0->data); hipFree(d1->data); b.replace_current_cv_batch_with_next(); } std::cout << "MAXOUT Cross validation error: " << cv_error << std::endl; } } void MPI_benchmark_P2P(int argc, char *argv[]) { char name[100]; int myrank, length, size; MPI_Init(&argc, &argv); MPI_Comm_rank(MPI_COMM_WORLD, &myrank); MPI_Get_processor_name(name, &length); MPI_Comm_size(MPI_COMM_WORLD, &size); MPI_Status status; int local_rank = myrank % 4; int gpus; hipGetDeviceCount(&gpus); int mygpu_id; int your_gpu_id; if(myrank == 0) { mygpu_id = 0; if(gpus > 1) your_gpu_id = 1; else your_gpu_id = 0; MPI_Send(&your_gpu_id,1, MPI_INT,1,0,MPI_COMM_WORLD); } else { MPI_Recv(&mygpu_id,1,MPI_INT,myrank-1,0,MPI_COMM_WORLD,&status); if(gpus > mygpu_id+1) your_gpu_id = mygpu_id + 1; else your_gpu_id = 0; if(myrank < size-1) MPI_Send(&your_gpu_id,1, MPI_INT,myrank+1,0,MPI_COMM_WORLD); } hipSetDevice(mygpu_id); int batch_size = 128; int inner_dim = 10000; int outer_dim = 15000; ClusterNet gpu = ClusterNet(); Matrix *A = gpu.rand(batch_size,inner_dim); Matrix *B = gpu.rand(inner_dim,outer_dim); Matrix *out = empty(batch_size,outer_dim); Matrix *rec = empty(batch_size,outer_dim); Matrix *A1 = gpu.rand(batch_size/2,inner_dim); Matrix *B1 = gpu.rand(inner_dim,outer_dim); Matrix *rec1 = empty(batch_size/2,outer_dim); Matrix *out1 = empty(batch_size/2,outer_dim); Matrix *A2 = gpu.rand(batch_size,inner_dim); Matrix *B2 = gpu.rand(inner_dim,outer_dim/2); Matrix *rec2 = empty(batch_size,outer_dim/2); Matrix *out2 = empty(batch_size,outer_dim/2); gpu.tick("Direct compute"); for(int i = 0; i< 100; i++) { gpu.dot(A,B, out); //add(A, B, out); } gpu.tock("Direct compute"); gpu.tick("partial batch direct compute"); for(int i = 0; i< 100; i++) { gpu.dot(A1,B1, out1); //add(A, B, out); } gpu.tock("partial batch direct compute"); gpu.tick("partial units direct compute"); for(int i = 0; i< 100; i++) { gpu.dot(A2,B2, out2); //add(A, B, out); } gpu.tock("partial units direct compute"); gpu.tick("PCIe transfer"); for(int i = 0; i< 100; i++) { if(local_rank == 0 && gpus > 1) { MPI_Send(out->data, out->size, MPI_FLOAT, 1, 100, MPI_COMM_WORLD); } else if(local_rank == 1 && gpus > 1) { //add(A2,B, out); MPI_Recv(rec->data, rec->size, MPI_FLOAT, 0, 100, MPI_COMM_WORLD, &status); } } gpu.tock("PCIe transfer"); gpu.tick("PCIe dot"); for(int i = 0; i< 100; i++) { if(local_rank == 0 && gpus > 1) { gpu.dot(A2,B2,out2); MPI_Send(out1->data, out1->size, MPI_FLOAT, 1, 100, MPI_COMM_WORLD); } else if(local_rank == 1 && gpus > 1) { gpu.dot(A2,B2,out2); MPI_Recv(rec1->data, rec1->size, MPI_FLOAT, 0, 100, MPI_COMM_WORLD, &status); vStack(out2,rec2,rec); } } gpu.tock("PCIe dot"); gpu.tick("RDMA transfer"); for(int i = 0; i< 100; i++) { if(myrank == 0) { MPI_Send(out->data, out->size, MPI_FLOAT, 3, 100, MPI_COMM_WORLD); } else if(myrank == 3) { //add(A2,B, out); MPI_Recv(rec->data, rec->size, MPI_FLOAT, 0, 100, MPI_COMM_WORLD, &status); } } gpu.tock("RDMA transfer"); gpu.tick("RDMA dot"); for(int i = 0; i< 100; i++) { if(myrank == 0) { gpu.dot(A2,B2,out2); MPI_Send(out->data, out->size, MPI_FLOAT, 3, 100, MPI_COMM_WORLD); } else if(myrank == 3) { //add(A2,B, out); gpu.dot(A2,B2,out2); MPI_Recv(rec->data, rec->size, MPI_FLOAT, 0, 100, MPI_COMM_WORLD, &status); vStack(out2,rec2,rec); } } gpu.tock("RDMA dot"); MPI_Finalize(); } void MPI_benchmark(int argc, char *argv[]) { int myrank; MPI_Status status; MPI_Init(&argc, &argv); MPI_Comm_rank(MPI_COMM_WORLD, &myrank); ClusterNet gpu = ClusterNet(); int batch_rows = 128; int w_in = 10000; int w_out = 8000; //dot Matrix *B = gpu.rand(w_in,w_out); Matrix *A = gpu.rand(batch_rows,w_in); assert(test_matrix(A,batch_rows,w_in)); assert(test_matrix(B,w_in,w_out)); Matrix *out = empty(batch_rows, w_out); Matrix *B1 = gpu.rand(w_in,w_out/2); Matrix *B2 = gpu.rand(w_in,w_out/2); Matrix *D = empty(batch_rows,w_out/2); Matrix *A1 = gpu.rand(batch_rows/2,w_in); Matrix *big_out = gpu.rand(batch_rows/2,w_out); Matrix *grand_out = empty(batch_rows, w_out); Matrix *C = gpu.rand(batch_rows/2,w_in); Matrix *C_out = empty(batch_rows/2,w_out); Matrix *E = gpu.rand(batch_rows/4,w_in); Matrix *E_out = empty(batch_rows/4,w_out); Matrix *E_merge = empty(batch_rows/2,w_out); Matrix *E_merge2 = empty(batch_rows/2,w_out); //add /* B = gpu.rand(w_in,w_out); A = gpu.rand(w_in,w_out); out = empty(w_in, w_out); A1 = gpu.rand(w_in/2,w_out); Matrix *A2 = gpu.rand(w_in/2,w_out); D = empty(w_in/2,w_out); */ hipEvent_t* startstop = tick(); for(int i = 0; i< 100; i++) { gpu.dot(A,B, out); //add(A, B, out); } printf("Direct compute:\n"); tock(startstop); out = empty(batch_rows,w_out/2); Matrix *out2 = empty(batch_rows,w_out/2); startstop = tick(); for(int i = 0; i< 100; i++) { gpu.dot(A,B1, out); gpu.dot(A,B2, out2); vStack(out,out2,grand_out); } printf("Direct compute x2:\n"); tock(startstop); Matrix *mergemat = empty(batch_rows, w_out); out = empty(batch_rows,w_out/2); startstop = tick(); //out = empty(w_in/2,w_out); for(int i = 0; i < 100; i++) { if(myrank == 0) { gpu.dot(A,B1, out); //add(A1, B,out); MPI_Send(out->data, out->size, MPI_FLOAT, 1, 100, MPI_COMM_WORLD); } else { gpu.dot(A,B2, out); //add(A2,B, out); MPI_Recv(D->data, D->size, MPI_FLOAT, 0, 100, MPI_COMM_WORLD, &status); vStack(out,D, mergemat); } } if(myrank == 1) { printf("GPUDirect RDMA:\n"); tock(startstop); } out = empty(batch_rows/2,w_out); startstop = tick(); gpu.tick("aa"); //out = empty(w_in/2,w_out); for(int i = 0; i < 100; i++) { gpu.tick("dot"); gpu.dot(C,B, out); gpu.tick("dot"); if(myrank == 0) { //add(A1, B,out); gpu.tick("send"); MPI_Send(out->data, out->size, MPI_FLOAT, 1, 100, MPI_COMM_WORLD); gpu.tick("send"); } else { //add(A2,B, out); gpu.tick("receive"); MPI_Recv(C_out->data, C_out->size, MPI_FLOAT, 0, 100, MPI_COMM_WORLD, &status); vStack(out,C_out, grand_out); gpu.tick("receive"); } if(myrank == 1) { //add(A1, B,out); gpu.tick("send"); MPI_Send(out->data, out->size, MPI_FLOAT, 0, 100, MPI_COMM_WORLD); gpu.tick("send"); } else { //add(A2,B, out); gpu.tick("receive"); MPI_Recv(C_out->data, C_out->size, MPI_FLOAT, 1, 100, MPI_COMM_WORLD, &status); vStack(out,C_out, grand_out); gpu.tick("receive"); } } gpu.tock("dot"); if(myrank == 1) { printf("GPUDirect RDMA batch:\n"); tock(startstop); gpu.tock("receive"); gpu.tock("aa"); } else { gpu.tock("send"); } MPI_Finalize(); } void dotMPI_test(int argc, char *argv[]) { /* ClusterNet gpu = ClusterNet(argc, argv, 123465); int inner = 2000; int outer = 1200; int batch_size = 128; int reduced_left = 128; int reduced_right = 400; Matrix *A = gpu.rand(batch_size,inner); Matrix *B = gpu.rand(inner,outer); Matrix *A1 = gpu.rand(reduced_left,inner); Matrix *B1 = gpu.rand(inner,reduced_right); Matrix *out = empty(batch_size,outer); Matrix *out1 = empty(reduced_left,reduced_right); Matrix *recv1 = empty(reduced_left,reduced_right); Matrix *recv2 = empty(reduced_left,reduced_right); Matrix *recv3 = empty(reduced_left,reduced_right); MPI_Status status; gpu.tick("dot mpi batch"); for(int i = 0; i < 100; i++) { gpu.dotMPI_batchSlice(A,B); } gpu.tock("dot mpi batch"); gpu.tick("dot mpi unit"); for(int i = 0; i < 100; i++) { gpu.dotMPI_unitSlice(A,B); } gpu.tock("dot mpi unit"); printf("My rank: %i\n",gpu.MYRANK); //gpu.benchmark_dot(); gpu.tick("dot normal"); for(int i = 0; i < 100; i++) { gpu.dot(A,B,out); } gpu.tock("dot normal"); //std::vector<MPI_Request> requests; MPI_Request *requests = (MPI_Request*)malloc(sizeof(MPI_Request)*gpu.MPI_SIZE-1); MPI_Request request_send; std::vector<Matrix*> recv_buffer; for(int i = 0; i < gpu.MPI_SIZE-1; i++) { MPI_Request request; requests[i] = request; } int received_count = 0; for(int i = 0; i < 100; i++) { for(int i = 0; i < recv_buffer.size(); i++) hipFree(recv_buffer[i]->data); recv_buffer.clear(); out1 = empty(reduced_left,reduced_right); for(int i = 0; i < gpu.MPI_SIZE; i++) { recv_buffer.push_back(empty(reduced_left,reduced_right)); } gpu.tick("all to all custom"); //cout << "a1 rows" << A1->rows << endl; gpu.dot(A1,B1,out1); recv_buffer[gpu.MYRANK]= out1; for(int i = 0; i < gpu.MPI_SIZE; i++) { if(gpu.MYRANK == i) { continue; } MPI_Isend(out1->data, out1->size, MPI_FLOAT, i, 100, MPI_COMM_WORLD, &request_send); } for(int i = 0; i < gpu.MPI_SIZE; i++) { if(gpu.MYRANK == i) { continue; } MPI_Irecv(recv1->data, recv1->size, MPI_FLOAT, i, 100, MPI_COMM_WORLD, &requests[i]); } for(int i = 0; i < gpu.MPI_SIZE; i++) { if(gpu.MYRANK == i) { continue; } MPI_Wait(&requests[i],MPI_STATUS_IGNORE); } received_count = 0; while(received_count < gpu.MPI_SIZE-1) { for(int i = 0; i < gpu.MPI_SIZE; i++) { int received = 0; if(gpu.MYRANK == i) { continue; } MPI_Test(&requests[i],&received,&status); if(received == 1) { out1 = hStack(out1,recv1); received_count++; } } } gpu.tick("all to all custom"); } gpu.tock("all to all custom"); int destination = gpu.MYRANK + 1; int source = gpu.MYRANK - 1; if(destination == gpu.MPI_SIZE){destination = 0; } if(source < 0){ source = gpu.MPI_SIZE - 1;} for(int i = 0; i < 100; i++) { out1 = empty(reduced_left,reduced_right); recv1 = empty(reduced_left,reduced_right); gpu.tick("chain custom"); gpu.dot(A1,B1,out1); for(int i = 0; i < gpu.MPI_SIZE-1; i++) { if(i == 0) MPI_Isend(out1->data, out1->size, MPI_FLOAT, destination, 100, MPI_COMM_WORLD, &request_send); else MPI_Isend(recv1->data, recv1->size, MPI_FLOAT, destination, 100, MPI_COMM_WORLD, &request_send); MPI_Recv(recv1->data, recv1->size, MPI_FLOAT, source, 100, MPI_COMM_WORLD, &status); //MPI_Wait(&requests[i],&status); out1 = hStack(out1,recv1); } gpu.tick("chain custom"); } gpu.tock("chain custom"); cout << gpu.MYRANK << endl; int matrix_idx = gpu.MYRANK; Matrix** arrOut = (Matrix**)malloc(sizeof(Matrix*)*gpu.MPI_SIZE); for(int i = 0; i < gpu.MPI_SIZE; i++) arrOut[i] = empty(reduced_left,reduced_right); float **h_arrA = (float**)malloc(sizeof(float*)*gpu.MPI_SIZE); for(int i = 0; i < gpu.MPI_SIZE; i++) h_arrA[i] = arrOut[i]->data; float **d_arrA; hipMalloc((void**) &d_arrA,sizeof(float*)*gpu.MPI_SIZE); hipMemcpy(d_arrA,h_arrA,sizeof(float*)*gpu.MPI_SIZE,hipMemcpyDefault); gpu.tick("chain matrix array"); for(int i = 0; i < 100; i++) { gpu.dot(A1,B1,arrOut[gpu.MYRANK]); matrix_idx = gpu.MYRANK; for(int i = 0; i < gpu.MPI_SIZE-1; i++) { MPI_Isend(arrOut[matrix_idx]->data, arrOut[matrix_idx]->size, MPI_FLOAT, destination, 100, MPI_COMM_WORLD, &request_send); matrix_idx = (matrix_idx - 1) < 0 ? gpu.MPI_SIZE-1 : (matrix_idx - 1); MPI_Irecv(arrOut[matrix_idx]->data, arrOut[matrix_idx]->size, MPI_FLOAT, source, 100, MPI_COMM_WORLD,&requests[i]); } MPI_Waitall(gpu.MPI_SIZE-1,requests,MPI_STATUSES_IGNORE); //hStackN(d_arrA,arrOut[0]->size, out,gpu.MPI_SIZE); } gpu.tock("chain matrix array"); gpu.shutdown(); */ } void async_test(int argc, char *argv[]) { ClusterNet gpu = ClusterNet(argc,argv,1324); int rows = 512; int cols = 128; /* MPI_Request r = MPI_REQUEST_NULL; MPI_Request s = MPI_REQUEST_NULL; Matrix *a = gpu.rand(rows,cols); Matrix *b = zeros(rows,cols); if(gpu.MYRANK == 0) { MPI_Irecv(b->data,b->size,MPI_FLOAT,1,0,MPI_COMM_WORLD,&r); MPI_Isend(a->data,a->size,MPI_FLOAT,1,0,MPI_COMM_WORLD,&s); } else { MPI_Irecv(b->data,b->size,MPI_FLOAT,0,0,MPI_COMM_WORLD,&r); MPI_Isend(a->data,a->size,MPI_FLOAT,0,0,MPI_COMM_WORLD,&s); } MPI_Wait(&s,MPI_STATUS_IGNORE); MPI_Wait(&r,MPI_STATUS_IGNORE); gpu.tick("MPI"); for(int i = 0; i < 100; i++) { if(gpu.MYRANK == 0) { MPI_Irecv(b->data,b->size,MPI_FLOAT,1,0,MPI_COMM_WORLD,&r); MPI_Isend(a->data,a->size,MPI_FLOAT,1,0,MPI_COMM_WORLD,&s); } else { MPI_Irecv(b->data,b->size,MPI_FLOAT,0,0,MPI_COMM_WORLD,&r); MPI_Isend(a->data,a->size,MPI_FLOAT,0,0,MPI_COMM_WORLD,&s); } MPI_Wait(&s,MPI_STATUS_IGNORE); MPI_Wait(&r,MPI_STATUS_IGNORE); } gpu.tock("MPI"); */ if(gpu.MYRANK == 0) { hipSetDevice(0); //hipDeviceEnablePeerAccess(1,0); hipDeviceDisablePeerAccess(1); Matrix *A1 = gpu.rand(rows,cols); Matrix *A2 = gpu.rand(rows,cols); hipSetDevice(1); //hipDeviceEnablePeerAccess(0,0); hipDeviceDisablePeerAccess(0); Matrix *B1 = gpu.rand(rows,cols); Matrix *B2 = gpu.rand(rows,cols); hipSetDevice(0); hipStream_t s; hipStreamCreate(&s); hipSetDevice(1); hipStream_t s2; hipStreamCreate(&s2); hipSetDevice(0); int access = 0; hipDeviceCanAccessPeer(&access,0,1); cout << access << endl; hipDeviceCanAccessPeer(&access,1,0); cout << access << endl; hipSetDevice(0); gpu.tick("cuda"); for(int i = 0; i < 100; i++) { hipMemcpyPeerAsync(B2->data,1,A2->data,0,A2->bytes,s); hipSetDevice(1); hipMemcpyPeerAsync(A1->data,0,B1->data,1,B1->bytes,s2); hipSetDevice(0); hipStreamSynchronize(s); hipSetDevice(1); hipStreamSynchronize(s2); hipSetDevice(0); } gpu.tock("cuda"); } MPI_Barrier(MPI_COMM_WORLD); gpu.shutdown_MPI(); } struct arg_struct { ClusterNet *gpus; WikiMaxoutNet *net; int device; }; void *run_net(void * args) { struct arg_struct *_args = (struct arg_struct*)args; cout << "device: " << _args->device << endl; hipSetDevice(_args->device); _args->net->run(); return 0; } void *print_message(void*) { ClusterNet gpu = ClusterNet(124345); WikiMaxoutNet net = WikiMaxoutNet(gpu); net.run(); return 0; } void bandwidth_test_MPI(int argc, char *argv[]) { ClusterNet *gpu = new ClusterNet(argc,argv,1235,true); std::vector<MPI_Request*> sends; std::vector<MPI_Request*> recvs; std::vector<Matrix*> lSync; std::vector<Matrix*> lData; int packages = 10; float time = 0; for(int epoch = 1; epoch < 2000; epoch++) { if(lData.size() > 0) { for(int i = 0; i < packages; i++) { hipFree(lSync[i]->data); hipFree(lData[i]->data); } lSync.clear(); lData.clear(); } for(int i = 0; i < packages; i++) { lSync.push_back(zeros(128*epoch,128*epoch)); lData.push_back(gpu->rand(128*epoch,128*epoch)); } for(int j = 0; j < packages; j++) { MPI_Request *send_request = new MPI_Request; MPI_Request *recv_request = new MPI_Request; sends.push_back(send_request); recvs.push_back(recv_request); int target = gpu->MYRANK +1 == gpu->MPI_SIZE ? 0 : gpu->MYRANK+1; int source = gpu->MYRANK-1 == -1 ? gpu->MPI_SIZE-1 : gpu->MYRANK-1; gpu->tick(); for (int i = 0; i < gpu->MPI_SIZE -1; i++) { //MPI_Irecv(lSync[j]->data,lSync[j]->size,MPI_FLOAT,source,999,MPI_COMM_WORLD,recv_request); //MPI_Isend(lData[j]->data,lData[j]->size,MPI_FLOAT,target,999,MPI_COMM_WORLD,send_request); //MPI_Isend(lData[j]->data,lData[j]->size,MPI_FLOAT,target,j,MPI_COMM_WORLD,send_request); if(i == gpu->MYRANK) { MPI_Send(lData[j]->data,lData[j]->size,MPI_FLOAT,target,j,MPI_COMM_WORLD); MPI_Recv(lSync[j]->data,lSync[j]->size,MPI_FLOAT,source,j,MPI_COMM_WORLD, MPI_STATUS_IGNORE); } else { MPI_Recv(lSync[j]->data,lSync[j]->size,MPI_FLOAT,source,j,MPI_COMM_WORLD, MPI_STATUS_IGNORE); MPI_Send(lData[j]->data,lData[j]->size,MPI_FLOAT,target,j,MPI_COMM_WORLD); } } gpu->tick(); } /* gpu->tick(); for(int i = 0; i < packages; i++) { MPI_Wait(sends[i],MPI_STATUS_IGNORE); MPI_Wait(recvs[i],MPI_STATUS_IGNORE); } */ time = gpu->tock(); //for(int i = 0; i < packages; i++) //assert(sum(lData[i]) == sum(lSync[i])); printdim(lData[0]); cout << 10*packages*lData[0]->bytes/1024./1024./1024./time << " GB/s" << endl; } gpu->shutdown_MPI(); } void bandwidth_test_peer() { ClusterNet *gpu = new ClusterNet(1235); std::vector<Matrix*> lSync0; std::vector<Matrix*> lData0; std::vector<Matrix*> lSync1; std::vector<Matrix*> lData1; std::vector<hipStream_t> s0s; std::vector<hipStream_t> s1s; int packages = 1; float time = 0; hipSetDevice(0); hipDeviceEnablePeerAccess(1,0); hipSetDevice(1); hipDeviceEnablePeerAccess(0,0); for(int i = 0; i < packages; i++) { hipStream_t s0; hipStream_t s1; hipSetDevice(0); hipStreamCreate(&s0); hipSetDevice(1); hipStreamCreate(&s1); s0s.push_back(s0); s1s.push_back(s1); } hipSetDevice(0); int access = 0; hipDeviceCanAccessPeer(&access,0,1); cout << access << endl; hipDeviceCanAccessPeer(&access,1,0); cout << access << endl; for(int epoch = 1; epoch < 100; epoch++) { if(lSync0.size() > 0) { for(int i = 0; i < packages; i++) { hipFree(lSync0[i]->data); hipFree(lData0[i]->data); hipFree(lSync1[i]->data); hipFree(lData1[i]->data); } lSync0.clear(); lData0.clear(); lSync1.clear(); lData1.clear(); } for(int i = 0; i < packages; i++) { hipSetDevice(0); lSync0.push_back(zeros(128*epoch,128*epoch)); lData0.push_back(gpu->rand(128*epoch,128*epoch)); hipSetDevice(1); lSync1.push_back(zeros(128*epoch,128*epoch)); lData1.push_back(gpu->rand(128*epoch,128*epoch)); } hipSetDevice(0); gpu->tick(); for(int j = 0; j < packages; j++) { hipMemcpyAsync(lSync1[j]->data,lData0[j]->data,lData0[j]->bytes,hipMemcpyDefault, s0s[j]); hipSetDevice(1); hipMemcpyAsync(lSync0[j]->data,lData1[j]->data,lData1[j]->bytes,hipMemcpyDefault,s1s[j]); hipSetDevice(0); } for(int i = 0; i < packages; i++) { hipStreamSynchronize(s0s[i]); hipStreamSynchronize(s1s[i]); } time = gpu->tock()/1000.; cout << packages*lData0[0]->bytes/1024./1024./1024./time << " GB/s" << endl; } } void bandwidth_test_kernel() { ClusterNet *gpu = new ClusterNet(1235); std::vector<Matrix*> lSync0; std::vector<Matrix*> lData0; std::vector<Matrix*> lSync1; std::vector<Matrix*> lData1; std::vector<hipStream_t> s0s; std::vector<hipStream_t> s1s; int packages = 10; float time = 0; hipSetDevice(0); hipDeviceEnablePeerAccess(1,0); hipSetDevice(1); hipDeviceEnablePeerAccess(0,0); for(int i = 0; i < packages; i++) { hipStream_t s0; hipStream_t s1; hipSetDevice(0); hipStreamCreate(&s0); hipSetDevice(1); hipStreamCreate(&s1); s0s.push_back(s0); s1s.push_back(s1); } hipSetDevice(0); int access = 0; hipDeviceCanAccessPeer(&access,0,1); cout << access << endl; hipDeviceCanAccessPeer(&access,1,0); cout << access << endl; for(int epoch = 1; epoch < 1000; epoch++) { if(lSync0.size() > 0) { for(int i = 0; i < packages; i++) { hipFree(lSync0[i]->data); hipFree(lData0[i]->data); hipFree(lSync1[i]->data); hipFree(lData1[i]->data); } lSync0.clear(); lData0.clear(); lSync1.clear(); lData1.clear(); } for(int i = 0; i < packages; i++) { hipSetDevice(0); lSync0.push_back(zeros(128*epoch,128*epoch)); lData0.push_back(gpu->rand(128*epoch,128*epoch)); hipSetDevice(1); lSync1.push_back(zeros(128*epoch,128*epoch)); lData1.push_back(gpu->rand(128*epoch,128*epoch)); } hipSetDevice(0); gpu->tick(); for(int j = 0; j < packages; j++) { add(lSync0[j],lData1[j],lSync0[j]); hipSetDevice(1); add(lSync1[j],lData0[j],lSync1[j]); hipSetDevice(0); } hipDeviceSynchronize(); hipSetDevice(1); hipDeviceSynchronize(); hipSetDevice(0); time = gpu->tock(); /* for(int i = 0; i < packages; i++) assert(sum(lData0[i]) == sum(lSync1[i])); for(int i = 0; i < packages; i++) assert(sum(lData1[i]) == sum(lSync0[i])); */ printdim(lSync0[0]); cout << 1000*2*packages*lData0[0]->bytes/1024./1024./1024./time << " GB/s" << endl; } } void bandwidth_test_compression(int argc, char *argv[]) { ClusterNet *gpu = new ClusterNet(argc,argv,1235,true); MPI_Request *send_request = new MPI_Request; MPI_Request *recv_request = new MPI_Request; Matrix *w_grad_next = empty(1024,1024); Matrix *w_next_sync = empty(1024,1024); //warmup int target = gpu->MYRANK +1 == gpu->MPI_SIZE ? 0 : gpu->MYRANK+1; int source = gpu->MYRANK-1 == -1 ? gpu->MPI_SIZE-1 : gpu->MYRANK-1; for (int i = 0; i < gpu->MPI_SIZE - 1; i++) { MPI_Isend(w_grad_next->data,w_grad_next->size,MPI_FLOAT,target,i,MPI_COMM_WORLD, send_request); MPI_Irecv(w_next_sync->data,w_grad_next->size,MPI_FLOAT,source,i,MPI_COMM_WORLD,recv_request); target = target +1 == gpu->MPI_SIZE ? 0 : target+1; source = source-1 == -1 ? gpu->MPI_SIZE-1 : source-1; } MPI_Wait(recv_request,MPI_STATUS_IGNORE); int times = 100; gpu->tick(); for(int i = 0; i < times; i++) { target = gpu->MYRANK +1 == gpu->MPI_SIZE ? 0 : gpu->MYRANK+1; source = gpu->MYRANK-1 == -1 ? gpu->MPI_SIZE-1 : gpu->MYRANK-1; for (int i = 0; i < gpu->MPI_SIZE - 1; i++) { MPI_Isend(w_grad_next->data,w_grad_next->size,MPI_FLOAT,target,i,MPI_COMM_WORLD, send_request); MPI_Recv(w_next_sync->data,w_grad_next->size,MPI_FLOAT,source,i,MPI_COMM_WORLD, MPI_STATUS_IGNORE); target = target +1 == gpu->MPI_SIZE ? 0 : target+1; source = source-1 == -1 ? gpu->MPI_SIZE-1 : source-1; } //MPI_Wait(send_request,MPI_STATUS_IGNORE); } float sec = gpu->tock()*1000.0; float GB = 3*times*w_grad_next->bytes/(1024.0*1024.0*1024.0); if(gpu->MYRANK == 0) { cout << "Size in GB: " << GB << endl; cout << "GB/s: " << GB/sec << endl; } gpu->shutdown_MPI(); } void simple_bandwidth_test(int argc, char *argv[]) { ClusterNet *gpu = new ClusterNet(argc,argv,1235,true); MPI_Request *send_request = new MPI_Request; MPI_Request *recv_request = new MPI_Request; int size = 12000; for(int i = 8; i < size; i+=8) { for(int j = 0; j < 3; j++) { Matrix *w_grad_next; if(j==0) w_grad_next = empty(i,i); if(j==1) w_grad_next = empty(i/2,i/2); if(j==2) w_grad_next = empty(i/4,i/8); Matrix *w_next_sync; if(j==0) w_next_sync = empty(i,i); if(j==1) w_next_sync = empty(i/2,i/2); if(j==2) w_next_sync = empty(i/4,i/8); if(gpu->MYRANK == 0) MPI_Send(w_grad_next->data,w_grad_next->size,MPI_FLOAT,1,999,MPI_COMM_WORLD); if(gpu->MYRANK == 1) MPI_Recv(w_next_sync->data,w_next_sync->size,MPI_FLOAT,0,999,MPI_COMM_WORLD, MPI_STATUS_IGNORE); MPI_Barrier(MPI_COMM_WORLD); int times = 100; gpu->tick(); for(int k = 0; k < times; k++) { if(gpu->MYRANK == 0) MPI_Send(w_grad_next->data,w_grad_next->size,MPI_FLOAT,1,999,MPI_COMM_WORLD); if(gpu->MYRANK == 1) MPI_Recv(w_next_sync->data,w_next_sync->size,MPI_FLOAT,0,999,MPI_COMM_WORLD, MPI_STATUS_IGNORE); } float quant = 9.5e-08f; float dequant = 2.0e-08f; float compreess = 1.e-07f; float decompress = 5.0e-08f; float added_penalty = 0.0f; if(j == 1)added_penalty = compreess + decompress; if(j == 2)added_penalty = quant + dequant; //cout << 100*(added_penalty)*w_grad_next->size << endl; float sec = gpu->tock() + (100*(added_penalty)*(i*i)); float GB = times*w_grad_next->bytes/(1024.0*1024.0*1024.0); if(gpu->MYRANK == 0) { cout << "Size: " << w_grad_next->rows << "x" << w_grad_next->cols << " GB/s: " << GB/(sec/1000) << " " << sec*(j == 2 ? 2.0 : 1.0) << "ms"<< endl; } hipFree(w_grad_next->data); hipFree(w_next_sync->data); } } gpu->shutdown_MPI(); } void model_parallelism_test(int argc, char *argv[]) { ClusterNet *GPU = new ClusterNet(argc,argv,1235,true); MPI_Request *send_request = new MPI_Request; MPI_Request *recv_request = new MPI_Request; std::vector<MPI_Request *> send_requests; std::vector<MPI_Request *> recv_requests; for(int i = 0; i < GPU->MPI_SIZE-1; i++) { send_requests.push_back(new MPI_Request); recv_requests.push_back(new MPI_Request); } float max_value = 1.0f; for(int round = 128; round <= 8192; round+=128) { int batch_size = 256; int inner = round; int outer = round; Matrix *A = GPU->rand(batch_size,inner); Matrix *B = GPU->distributed_uniformSqrtWeight(inner,outer); Matrix *B_normal = GPU->uniformSqrtWeight(inner,outer); Matrix *out = zeros(batch_size,outer); Matrix *out_stacked = zeros(batch_size,outer); int col_split_size = (B->isDistributed == 1 ? B->cols_distributed : B->cols) / GPU->MPI_SIZE; int remainder = (B->isDistributed == 1 ? B->cols_distributed : B->cols) - (col_split_size*GPU->MPI_SIZE); if(GPU->MYRANK == 0) cout << batch_size << "x" << inner << " DOT " << inner << "x" << outer << endl; Matrix** arrOut = (Matrix**) malloc(sizeof(Matrix*) * GPU->MPI_SIZE); Matrix** arrOut8 = (Matrix**) malloc(sizeof(Matrix*) * GPU->MPI_SIZE); for (int i = 0; i < GPU->MPI_SIZE; i++) { if (i == GPU->MPI_SIZE - 1) { arrOut[i] = empty(A->rows, col_split_size + remainder); arrOut8[i] = empty_char(A->rows, col_split_size + remainder); } else { arrOut[i] = empty(A->rows, col_split_size); arrOut8[i] = empty_char(A->rows, col_split_size); } } float **h_arrA = (float**) malloc(sizeof(float*) * GPU->MPI_SIZE); unsigned char **h_arrA8 = (unsigned char**) malloc(sizeof(unsigned char*) * GPU->MPI_SIZE); for (int i = 0; i < GPU->MPI_SIZE; i++) { h_arrA[i] = arrOut[i]->data; h_arrA8[i] = arrOut8[i]->char_data; } float **d_arrA; hipMalloc((void**) &d_arrA, sizeof(float*) * GPU->MPI_SIZE); hipMemcpy(d_arrA, h_arrA, sizeof(float*) * GPU->MPI_SIZE,hipMemcpyDefault); unsigned char **d_arrA8; hipMalloc((unsigned char**) &d_arrA8, sizeof(unsigned char*) * GPU->MPI_SIZE); hipMemcpy(d_arrA8, h_arrA8, sizeof(unsigned char*) * GPU->MPI_SIZE,hipMemcpyDefault); for(int epoch = 0; epoch < 2; epoch++) for(int type = 0; type < 3; type++) { std::string text = ""; if(type == 0) text = "DOT"; else if(type == 1) text = "DOT32BIT"; else if(type == 2) text = "DOT8BIT"; if(GPU->MYRANK == 0 && epoch == 1){ GPU->tick(text); } for(int i = 0; i < 100; i++) { if(type == 0) { GPU->dot(A,B_normal,out); continue; } GPU->dot(A,B,arrOut[GPU->MYRANK]); int target = GPU->MYRANK +1 == GPU->MPI_SIZE ? 0 : GPU->MYRANK+1; int source = GPU->MYRANK-1 == -1 ? GPU->MPI_SIZE-1 : GPU->MYRANK-1; if(type == 2) { GPU->compression_8bit(arrOut[GPU->MYRANK],max_value,arrOut8[GPU->MYRANK]); for (int i = 0; i < GPU->MPI_SIZE - 1; i++) { MPI_Isend(arrOut8[GPU->MYRANK]->char_data,arrOut8[GPU->MYRANK]->size,MPI_CHAR,target,i,MPI_COMM_WORLD, send_requests[i]); MPI_Irecv(arrOut8[source]->char_data,arrOut8[source]->size,MPI_CHAR,source,i,MPI_COMM_WORLD,recv_requests[i]); target = target +1 == GPU->MPI_SIZE ? 0 : target+1; source = source-1 == -1 ? GPU->MPI_SIZE-1 : source-1; } } if(type == 1) { for (int i = 0; i < GPU->MPI_SIZE - 1; i++) { MPI_Isend(arrOut[GPU->MYRANK]->data,arrOut[GPU->MYRANK]->size,MPI_FLOAT,target,i,MPI_COMM_WORLD, send_requests[i]); MPI_Irecv(arrOut[source]->data,arrOut[source]->size,MPI_FLOAT,source,i,MPI_COMM_WORLD,recv_requests[i]); target = target +1 == GPU->MPI_SIZE ? 0 : target+1; source = source-1 == -1 ? GPU->MPI_SIZE-1 : source-1; } } //MPI_Wait(next->send_request,MPI_STATUS_IGNORE); for(int i = 0; i < GPU->MPI_SIZE-1; i++) MPI_Wait(recv_requests[i],MPI_STATUS_IGNORE); if(type == 2) { for (int i = 0; i < GPU->MPI_SIZE; i++) { if(i == GPU->MYRANK){continue;} GPU->decompression_8bit(arrOut8[i],max_value,arrOut[i]); } } hStackN(d_arrA, arrOut[0]->size, out_stacked, GPU->MPI_SIZE); } if(GPU->MYRANK == 0 && epoch == 1){ GPU->tock(text); } /* MPI_Barrier(MPI_COMM_WORLD); if(type == 0) printsum(out); else if(type == 1) printsum(out_stacked); else if(type == 2) printsum(out_stacked); MPI_Barrier(MPI_COMM_WORLD); if(type == 0) printmat(out,0,4,0,4); else if(type == 1) printmat(out_stacked,0,4,0,4); else if(type == 2) printmat(out_stacked,0,4,0,4); */ if(type == 0) { abs(out,out); max_value = max(out); } } hipFree(A->data); hipFree(B->data); hipFree(out->data); hipFree(out_stacked->data); hipFree(d_arrA8); hipFree(d_arrA); for(int i = 0; i < GPU->MPI_SIZE; i++) { hipFree(arrOut[i]->data); hipFree(arrOut8[i]->char_data); } size_t total, free; hipMemGetInfo(&free, &total); } GPU->shutdown_MPI(); } void simple_bandwidth_test_CPU(int argc, char *argv[]) { ClusterNet *gpu = new ClusterNet(argc,argv,1235,true); MPI_Request *send_request = new MPI_Request; MPI_Request *recv_request = new MPI_Request; size_t size = 1024*1024*1024; float *data = (float*)malloc(sizeof(float)*size); float *data_sync = (float*)malloc(sizeof(float)*size); int times = 10; gpu->tick(); for(int i = 0; i < times; i++) { if(gpu->MYRANK == 0) { MPI_Send(data,size,MPI_FLOAT,1,999,MPI_COMM_WORLD); //MPI_Recv(w_next_sync->data,w_next_sync->size,MPI_FLOAT,0,999,MPI_COMM_WORLD, MPI_STATUS_IGNORE); } if(gpu->MYRANK == 1) { MPI_Recv(data_sync,size,MPI_FLOAT,0,999,MPI_COMM_WORLD, MPI_STATUS_IGNORE); //MPI_Send(w_grad_next->data,w_grad_next->size,MPI_FLOAT,1,999,MPI_COMM_WORLD, MPI_STATUS_IGNORE); } } double sec = gpu->tock()*1000.0; double GB = times*size*4.0/(1024.0*1024.0*1024.0); if(gpu->MYRANK == 0) { cout << "Size in GB: " << GB << endl; cout << "GB/s: " << GB/sec << endl; } gpu->shutdown_MPI(); } void compression_test(int argc, char *argv[]) { ClusterNet *gpu = new ClusterNet(); Matrix *A = scalarMul(gpu->randn(5120,5120),1.0f/10.0f); Matrix *out = empty_char(5120,5120); gpu->tick(); for(int i = 0; i < 10000; i++) gpu->compression_8bit(A, 0.1f,out); gpu->tock(); gpu->tick(); for(int i = 0; i < 10000; i++) gpu->decompression_8bit(out, 0.1f,A); gpu->tock(); } int main(int argc, char *argv[]) { //bandwidth_test_peer(); //bandwidth_test_MPI(argc,argv); //bandwidth_test_kernel(); //compression_test(argc,argv); //simple_bandwidth_test(argc,argv); //simple_bandwidth_test_CPU(argc,argv); //model_parallelism_test(argc,argv); //ClusterNet *gpu = new ClusterNet(234); /* Matrix *rdm = gpu->rand_numbers(10,10); printmat(rdm); */ /* ClusterNet *gpu = new ClusterNet(234); int out_rows = 128; int out_cols = 800; int inner = 784; Matrix *A = gpu->rand(out_rows,inner); Matrix *B = gpu->rand(inner,out_cols); Matrix *out1 = zeros(out_rows,out_cols); Matrix *charA = empty_char(out_rows,inner); Matrix *charB = empty_char(inner,out_cols); Matrix *out2 = empty(out_rows,out_cols); Matrix *out3 = empty(out_rows,out_cols); gpu->tick(); for(int i = 0; i < 100; i++) gpu->dot(A,B,out3); gpu->tock(); float maxA = max(abs(A)); float maxB = max(abs(B)); gpu->compression_8bit(A,maxA,charA); gpu->compression_8bit(B,maxB,charB); //printmat(A); //printmat(gpu->decompression_8bit(charA,maxA)); //printmat(B); //printmat(gpu->decompression_8bit(charB,maxB)); //cout << sum(gpuSqrt(square(sub(B,gpu->decompression_8bit(charB,maxB)))))/(float)B->size << endl; //cout << sum(gpuSqrt(square(sub(A,gpu->decompression_8bit(charA,maxA)))))/(float)B->size << endl; //gpu->compression_8bit(A,maxA,charA); //printmat(out1); //printmat(out1,60,65,70,80); gpu->tick(); for(int i = 0; i < 100; i++) { fill_matrix(out1,0.0f); gpu->dot8bit(charA,charB,maxA,maxB,out1); } gpu->tock(); gpu->tick(); for(int i = 0; i < 100; i++) gpu->dot8bit_shared(charA,charB,maxA,maxB,out2); gpu->tock(); //printmat(gpu->decompression_8bit(charB,maxB)); //printmat(out1,60,65,70,80); //printmat(out2,60,65,70,80); //printmat(out1); //printmat(out2); //printsum(out1); //printsum(out2); cout << sum(gpuSqrt(square(sub(out1,out2))))/(float)out1->size << endl; cout << sum(gpuSqrt(square(sub(out1,out3))))/(float)out1->size << endl; cout << sum(gpuSqrt(square(sub(out2,out3))))/(float)out1->size << endl; //cout << "max A " << maxA <<endl; //cout << "max B " << maxB <<endl; */ ClusterNet *gpu = new ClusterNet(argc,argv,123635,false); //ClusterNet *gpu = new ClusterNet(argc,argv); /* Matrix *A = gpu->distributed_uniformSqrtWeight(6,4); Matrix *B = gpu->rand(4,6); Matrix *A2 = empty(6,2); A2->data = A->data; printmat(A); Matrix *C = gpu->dotMPI(B,A); Matrix *C2 = gpu->dot(B,A2); printmat(C); printmat(C2); gpu->shutdown_MPI(); */ //Matrix *X = read_hdf5("/home/tim/data/mnist/X.hdf5"); //Matrix *y = read_hdf5("/home/tim/data/mnist/y.hdf5"); //Matrix *X = gpu->distribute_file("/home/tim/data/mnist/X.hdf5"); //Matrix *y = gpu->distribute_file("/home/tim/data/mnist/y.hdf5"); Matrix *X = gpu->distribute_rows_hdf5_file("/home/tim/data/mnist/distributed_X.hdf5"); Matrix *y = gpu->distribute_rows_hdf5_file("/home/tim/data/mnist/distributed_y.hdf5"); //Matrix *y = gpu->distribute_rows_hdf5_file("/home/tim/data/mnist/y_15000.hdf5"); printdim(X); printdim(y); BatchAllocator b = BatchAllocator(); //16384 int batch_size_per_GPU = 128; b.init(X,y,(1.0-0.85715),batch_size_per_GPU,128,gpu, Single_GPU); Layer *l0 = new Layer(X->cols,batch_size_per_GPU,Input,gpu); l0->PARALLELISM = DataParallelism; //l0->PARALLELISM = ModelParallelism; Layer *l1 = new Layer(1024, Logistic, l0); l1->PARALLELISM = DataParallelism; //l1->PARALLELISM = ModelParallelism; Layer *l2 = new Layer(1024, Logistic, l1); l2->PARALLELISM = DataParallelism; //l2->PARALLELISM = ModelParallelism; Layer *l3 = new Layer(10, Softmax, l2); l3->PARALLELISM = DataParallelism; //l3->PARALLELISM = ModelParallelism; l0->MAX_GRAD_VALUE = 0.005; l1->MAX_GRAD_VALUE = 0.002; l2->MAX_GRAD_VALUE = 0.01; l3->MAX_GRAD_VALUE = 1; /* cout << "l0: " << l0->MAX_GRAD_VALUE << endl; cout << "l1: " << l1->MAX_GRAD_VALUE << endl; cout << "l2: " << l2->MAX_GRAD_VALUE << endl; cout << "l3: " << l3->MAX_GRAD_VALUE << endl; */ l0->DROPOUT = 0.2f; l0->set_hidden_dropout(0.5f); cout << gpu->MYRANK << endl; float decay = 0.99f; gpu->tick("pass"); b.SKIP_LAST_BATCH = true; int epochs = 75; for(int epoch = 0; epoch < epochs; epoch++) { gpu->tick("epoch"); if(gpu->MYRANK == 0) cout << "EPOCH: " << epoch + 1 << endl; b.propagate_through_layers(l0,Training,epoch); b.propagate_through_layers(l0,Trainerror,epoch); b.propagate_through_layers(l0,CVerror,epoch); l0->learning_rate_decay(decay); /* cout << "l0: " << l0->MAX_GRAD_VALUE << endl; cout << "l1: " << l1->MAX_GRAD_VALUE << endl; cout << "l2: " << l2->MAX_GRAD_VALUE << endl; cout << "l3: " << l3->MAX_GRAD_VALUE << endl; */ if(epoch == 60) { l0->dropout_decay(); decay = 0.85f; /* l0->compression = bits_32; l1->MAX_GRAD_VALUE = bits_32; l2->MAX_GRAD_VALUE = bits_32; l3->MAX_GRAD_VALUE = bits_32; */ } //cout << l1->MAX_GRAD_VALUE << endl; gpu->tock("epoch"); } gpu->tock("pass"); gpu->shutdown_MPI(); if(gpu->MYRANK == 0) { int n1 = l3->Train_errors[0].size(); int n2 = l3->CV_errors[0].size(); cout << n1 << endl; cout << n2 << endl; Matrix *train = empty_cpu(epochs,n1); Matrix *cv = empty_cpu(epochs,n2); for(int i = 0; i < epochs; i++) { for(int j = 0; j < n1; j++) train->data[j + (i*n1)] = l3->Train_errors[i][j]; for(int j = 0; j < n2; j++) cv->data[j + (i*n2)] = l3->CV_errors[i][j]; } write_hdf5("/home/tim/data/mnist/results/8bit/train_error_model.hdf5" ,train); write_hdf5("/home/tim/data/mnist/results/8bit/cv_error_model.hdf5",cv); } /* hipSetDevice(0); Matrix *X = read_hdf5("/home/tim/data/mnist/X.hdf5"); Matrix *y = read_hdf5("/home/tim/data/mnist/y.hdf5"); ClusterNet gpu = ClusterNet(1235); BatchAllocator b = BatchAllocator(); std::vector<int> layers; layers.push_back(1200); layers.push_back(1200); std::vector<float> dropout; dropout.push_back(0.2f); dropout.push_back(0.5f); dropout.push_back(0.5f); BatchAllocator allocator = BatchAllocator(); allocator.init(X,y,(1.0-0.8571429),128,256,gpu, Single_GPU); DeepNeuralNetwork net = DeepNeuralNetwork(layers,Classification, gpu, allocator, 10); net.EPOCHS = 500; net.TRANSITION_EPOCH = 75; net.LEARNING_RATE = 0.003; net.UPDATE_TYPE = RMSProp; net.DROPOUT = dropout; //net.MAIN_UNIT = Double_Rectified_Linear; net.train(); */ //hipSetDevice(1); //ClusterNet *gpus = new ClusterNet(123635); //WikiMaxoutNet_PCIe net = WikiMaxoutNet_PCIe(gpus); //net.run(); /* hipSetDevice(0); struct arg_struct *args0 = (arg_struct*)malloc(sizeof(arg_struct)); ClusterNet *gpus0 = new ClusterNet(23452345); WikiMaxoutNet *net0 = new WikiMaxoutNet(gpus0[0]); args0->gpus = gpus0; args0->net = net0; args0->device = 0; net0->run(); pthread_t t0; pthread_create(&t0, NULL, &run_net, args0); hipSetDevice(1); struct arg_struct *args1 = (arg_struct*)malloc(sizeof(arg_struct)); ClusterNet *gpus1 = new ClusterNet(23452345); WikiMaxoutNet *net1 = new WikiMaxoutNet(gpus1[0]); args1->gpus = gpus1; args1->net = net1; args1->device = 1; pthread_t t1; //pthread_create(&t1, NULL, &run_net, args1); hipSetDevice(2); struct arg_struct *args2 = (arg_struct*)malloc(sizeof(arg_struct)); ClusterNet *gpus2 = new ClusterNet(23452345); WikiMaxoutNet *net2 = new WikiMaxoutNet(gpus2[0]); args2->gpus = gpus2; args2->net = net2; args2->device = 2; pthread_t t2; //pthread_create(&t2, NULL, &run_net, args2); cout << "rolfen kek!" << endl; void* result0; void* result1; void* result2; pthread_join(t0,&result0); //pthread_join(t1,&result1); //pthread_join(t2,&result2); */ }
aa9794fb153c78f74ee38fe11097e43789953622.cu
#include <stdio.h> #include <cublas_v2.h> #include <util.cuh> #include <basicOps.cuh> #include <mpi.h> #include <cuda.h> #include <assert.h> #include <util.cuh> #include <clusterNet.h> #include <time.h> #include <batchAllocator.h> #include <DeepNeuralNetwork.h> #include <WikiMaxoutNet.h> #include <WikiMaxoutNet_PCIe.h> #include <WikiMaxoutNet_PCIe2.h> #include <WikiNetDist.h> #include <Layer.h> #include <time.h> using std::cout; using std::endl; void run_neural_network() { Matrix *X = read_hdf5("/home/tim/mnist_full_X.hdf5"); Matrix *y = read_hdf5("/home/tim/mnist_full_y.hdf5"); ClusterNet gpu = ClusterNet(12345); cout << X->rows << endl; int hidden_size = 1024; Matrix *w1 = gpu.sparseInitWeight(784,hidden_size); Matrix *w2 = gpu.sparseInitWeight(hidden_size,10); Matrix *m1 = zeros(784,hidden_size); Matrix *m2 = zeros(hidden_size,10); Matrix *ms1 = zeros(784,hidden_size); Matrix *ms2 = zeros(hidden_size,10); Matrix *grad_w1_ms = zeros(784,hidden_size); Matrix *grad_w2_ms = zeros(hidden_size,10); Matrix *grad_w2 = empty(hidden_size,10); Matrix *grad_w1 = empty(784,hidden_size); float cv_error = 0; float cv_size = 0.1428571f; float train_error = 0.0f; BatchAllocator b = BatchAllocator(); b.init(X, y, cv_size, 128, 512); clock_t t1,t2; t1=clock(); //code goes here int epochs = 100; gpu.tick(); float learning_rate = 0.003; //size_t free = 0; //size_t total = 0; float momentum = 0.5; for(int EPOCH = 0; EPOCH < epochs; EPOCH++) { std::cout << "EPOCH: " << EPOCH + 1 << std::endl; //cudaMemGetInfo(&free, &total); //std::cout << free << std::endl; momentum += 0.01; if(momentum > 0.95) momentum = 0.95; for(int i = 0; i < b.TOTAL_BATCHES; i++) { b.allocate_next_batch_async(); //nesterov updates scalarMul(m1,momentum,m1); scalarMul(m2,momentum,m2); add(w1,m1,w1); add(w2,m2,w2); Matrix *d0 = gpu.dropout(b.CURRENT_BATCH,0.2); Matrix *z1 = gpu.dot(d0, w1); logistic(z1, z1); Matrix *d1 = gpu.dropout(z1,0.5); Matrix *a2 = gpu.dot(d1,w2); Matrix *out = softmax(a2); Matrix *t = create_t_matrix(b.CURRENT_BATCH_Y,10); //backprop Matrix *e1 = sub(out, t); Matrix *e2 = gpu.dotT(e1, w2); gpu.Tdot(z1,e1,grad_w2); logisticGrad(z1,z1); mul(e2,z1,e2); gpu.Tdot(b.CURRENT_BATCH,e2,grad_w1); b.allocate_next_batch_async(); RMSprop_with_momentum_weight_update(ms1,grad_w1,w1,m1,0.9f,learning_rate,b.CURRENT_BATCH->rows, momentum); RMSprop_with_momentum_weight_update(ms2,grad_w2,w2,m2,0.9f,learning_rate,b.CURRENT_BATCH->rows, momentum); cudaFree(e1->data); cudaFree(e2->data); cudaFree(z1->data); cudaFree(a2->data); cudaFree(out->data); cudaFree(t->data); cudaFree(d0->data); cudaFree(d1->data); b.replace_current_batch_with_next(); } //Matrix *sum_value = sum(w1); //std::cout << "weight 1 Sum: " << to_host(sum_value)->data[0] << std::endl; train_error = 0; for(int i = 0; i < b.TOTAL_BATCHES; i++) { b.broadcast_batch_to_processes(); //Matrix *d0 = scalarMul(b.CURRENT_BATCH,0.8); Matrix *a1 = gpu.dot(b.CURRENT_BATCH,w1); logistic(a1, a1); //Matrix *d1 = scalarMul(a1,0.5); Matrix *a2 = gpu.dot(a1,w2); Matrix *out = softmax(a2); Matrix *result = argmax(out); Matrix *eq = equal(result,b.CURRENT_BATCH_Y); b.allocate_next_batch_async(); float sum_value = sum(eq); train_error += (b.CURRENT_BATCH->rows - sum_value)/ (1.0f * b.CURRENT_BATCH->rows *b.TOTAL_BATCHES) ; cudaFree(a1->data); cudaFree(a2->data); cudaFree(out->data); cudaFree(result->data); cudaFree(eq->data); //cudaFree(d0->data); //cudaFree(d1->data); b.replace_current_batch_with_next(); } std::cout << "Train error: " << train_error << std::endl; cv_error = 0; for(int i = 0; i < b.TOTAL_BATCHES_CV; i++) { b.broadcast_batch_cv_to_processes(); Matrix *d0 = scalarMul(b.CURRENT_BATCH_CV,0.8); Matrix *a1 = gpu.dot(d0,w1); logistic(a1, a1); Matrix *d1 = scalarMul(a1,0.5); Matrix *a2 = gpu.dot(d1,w2); Matrix *out = softmax(a2); Matrix *result = argmax(out); Matrix *eq = equal(result,b.CURRENT_BATCH_CV_Y); b.allocate_next_cv_batch_async(); float sum_value = sum(eq); cv_error += (b.CURRENT_BATCH_CV->rows - sum_value)/ (1.0f * b.CURRENT_BATCH_CV->rows *b.TOTAL_BATCHES_CV) ; cudaFree(a1->data); cudaFree(a2->data); cudaFree(out->data); cudaFree(result->data); cudaFree(eq->data); cudaFree(d0->data); cudaFree(d1->data); b.replace_current_cv_batch_with_next(); } std::cout << "Cross validation error: " << cv_error << std::endl; } cudaThreadSynchronize(); t2=clock(); float diff ((float)t2-(float)t1); float mseconds = (diff / CLOCKS_PER_SEC)/1000; std::cout<<mseconds<<std::endl; gpu.tock(); b.finish_batch_allocator(); //gpu.tock("batch replace"); //gpu.tock("async batch allocate"); //gpu.tock("feedforward"); printf("Finished!\n"); } void run_maxout_network() { cudaSetDevice(0); Matrix *X = read_hdf5("/home/tim/mnist_full_X.hdf5"); Matrix *y = read_hdf5("/home/tim/mnist_full_y.hdf5"); ClusterNet gpus = ClusterNet(12345); int hiddenunits = 512; int maxout_Size = 8; int batch_size = 128; Matrix *w1 = gpus.uniformSqrtWeight(784,hiddenunits); Matrix *w2 = gpus.uniformSqrtWeight(hiddenunits/maxout_Size,10); Matrix *b1 = zeros(1,hiddenunits); Matrix *b2 = zeros(1,10); Matrix *m1 = zeros(784,hiddenunits); Matrix *m2 = zeros(hiddenunits/maxout_Size,10); Matrix *mb1 = zeros(1,hiddenunits); Matrix *mb2 = zeros(1,10); Matrix *ms1 = zeros(784,hiddenunits); Matrix *ms2 = zeros(hiddenunits/maxout_Size,10); Matrix *msb1 = zeros(1,hiddenunits); Matrix *msb2 = zeros(1,10); Matrix *grad_w1 = zeros(784,hiddenunits); Matrix *grad_w2 = zeros(hiddenunits/maxout_Size,10); Matrix *grad_b1 = zeros(1,hiddenunits); Matrix *grad_b2 = zeros(1,10); float cv_error = 0.0f; float train_error = 0.0f; BatchAllocator b = BatchAllocator(); b.init(X, y, 0.2, batch_size, 512); int epochs = 1000; float learning_rate = 0.001; float momentum = 0.5; for(int EPOCH = 1; EPOCH < epochs; EPOCH++) { cout << "EPOCH: " << EPOCH << endl; //momentum += 0.01; //if(momentum > 0.95) momentum = 0.95; for(int i = 0; i < b.TOTAL_BATCHES; i++) { b.broadcast_batch_to_processes(); //nesterov updates scalarMul(m1,momentum,m1); scalarMul(m2,momentum,m2); scalarMul(mb1,momentum,mb1); scalarMul(mb2,momentum,mb2); add(w1,m1,w1); add(w2,m2,w2); add(b1,mb1,b1); add(b2,mb2,b2); //feedforward Matrix *d0 = gpus.dropout(b.CURRENT_BATCH,0.2); Matrix *z1 = gpus.dot(d0, w1); addMatrixVector(z1,b1,z1); Matrix **a_paired = maxout(z1,maxout_Size); Matrix *a1 = a_paired[0]; Matrix *a1_idx = a_paired[1]; Matrix *d1 = gpus.dropout(a1,0.5); Matrix *a2 = gpus.dot(d1,w2); addMatrixVector(a2,b2,a2); Matrix *out = softmax(a2); Matrix *t = create_t_matrix(b.CURRENT_BATCH_Y,10); b.allocate_next_batch_async(); //backprop Matrix *e1 = sub(out, t); Matrix *e2_partial = gpus.dotT(e1, w2); Matrix *e2 = empty(b.CURRENT_BATCH->rows,e2_partial->cols*maxout_Size); Matrix *aB = ones(1,b.CURRENT_BATCH->rows); gpus.Tdot(a1,e1,grad_w2); gpus.dot(aB,e1,grad_b2); expand_to_maxout_grad(e2_partial, a1_idx,e2); gpus.Tdot(b.CURRENT_BATCH,e2,grad_w1); gpus.dot(aB,e2,grad_b1); //weight updates //RMSProp RMSprop_with_momentum_weight_update(ms1,grad_w1,w1,m1,0.9f,learning_rate,b.CURRENT_BATCH->rows, momentum); RMSprop_with_momentum_weight_update(ms2,grad_w2,w2,m2,0.9f,learning_rate,b.CURRENT_BATCH->rows, momentum); RMSprop_with_momentum_weight_update(msb1,grad_b1,b1,mb1,0.9f,learning_rate,b.CURRENT_BATCH->rows, momentum); RMSprop_with_momentum_weight_update(msb2,grad_b2,b2,mb2,0.9f,learning_rate,b.CURRENT_BATCH->rows, momentum); /* scalarMul(grad_w1,learning_rate/(float)b.CURRENT_BATCH->rows,grad_w1); scalarMul(grad_w2,learning_rate/(float)b.CURRENT_BATCH->rows,grad_w2); scalarMul(grad_b1,learning_rate/(float)b.CURRENT_BATCH->rows,grad_b1); scalarMul(grad_b2,learning_rate/(float)b.CURRENT_BATCH->rows,grad_b2); //classical momentum scalarMul(m1,momentum,m1); scalarMul(m2,momentum,m2); scalarMul(mb1,momentum,mb1); scalarMul(mb2,momentum,mb2); sub(m1,grad_w1,m1); sub(m2,grad_w2,m2); sub(mb1,grad_b1,mb1); sub(mb2,grad_b2,mb2); add(w1,m1,w1); add(w2,m2,w2); add(b1,mb1,b1); add(b2,mb2,b2); */ /* sub(w1,grad_w1,w1); sub(w2,grad_w2,w2); sub(b1,grad_b1,b1); sub(b2,grad_b2,b2); */ cudaFree(e1->data); cudaFree(e2->data); cudaFree(e2_partial->data); cudaFree(z1->data); cudaFree(a1->data); cudaFree(a1_idx->data); cudaFree(a2->data); cudaFree(out->data); cudaFree(t->data); cudaFree(d0->data); cudaFree(d1->data); cudaFree(aB->data); free(a_paired); b.replace_current_batch_with_next(); } train_error = 0; for(int i = 0; i < b.TOTAL_BATCHES; i++) { b.broadcast_batch_to_processes(); Matrix *d0 = scalarMul(b.CURRENT_BATCH,0.8); Matrix *z1 = gpus.dot(d0,w1); Matrix **a1_pair = maxout(z1,maxout_Size); Matrix *a1 = a1_pair[0]; Matrix *d1 = scalarMul(a1,0.5); Matrix *a2 = gpus.dot(d1,w2); Matrix *out = softmax(a2); Matrix *result = argmax(out); Matrix *eq = equal(result,b.CURRENT_BATCH_Y); b.allocate_next_batch_async(); float sum_value = sum(eq); train_error += (b.CURRENT_BATCH->rows - sum_value)/ (1.0f * b.CURRENT_BATCH->rows *b.TOTAL_BATCHES) ; cudaFree(z1->data); cudaFree(a1->data); cudaFree(a1_pair[1]->data); cudaFree(a2->data); cudaFree(out->data); cudaFree(result->data); cudaFree(eq->data); cudaFree(d0->data); cudaFree(d1->data); free(a1_pair); b.replace_current_batch_with_next(); } std::cout << "MAXOUT Train error: " << train_error << std::endl; cv_error = 0; for(int i = 0; i < b.TOTAL_BATCHES_CV; i++) { b.broadcast_batch_cv_to_processes(); Matrix *d0 = scalarMul(b.CURRENT_BATCH_CV,0.8); Matrix *z1 = gpus.dot(d0,w1); Matrix **a1_pair = maxout(z1,maxout_Size); Matrix *a1 = a1_pair[0]; Matrix *d1 = scalarMul(a1,0.5); Matrix *a2 = gpus.dot(d1,w2); Matrix *out = softmax(a2); Matrix *result = argmax(out); Matrix *eq = equal(result,b.CURRENT_BATCH_CV_Y); b.allocate_next_batch_async(); float sum_value = sum(eq); cv_error += (b.CURRENT_BATCH_CV->rows - sum_value)/ (1.0f * b.CURRENT_BATCH_CV->rows *b.TOTAL_BATCHES_CV) ; cudaFree(z1->data); cudaFree(a1->data); cudaFree(a1_pair[1]->data); cudaFree(a2->data); cudaFree(out->data); cudaFree(result->data); cudaFree(eq->data); cudaFree(d0->data); cudaFree(d1->data); free(a1_pair); b.replace_current_cv_batch_with_next(); } std::cout << "MAXOUT Cross validation error: " << cv_error << std::endl; } } void run_normal_net() { cudaSetDevice(2); Matrix *X = read_hdf5("/home/tim/mnist_full_X.hdf5"); Matrix *y = read_hdf5("/home/tim/mnist_full_y.hdf5"); ClusterNet gpus = ClusterNet(12345); int hiddenunits = 1024; int maxout_Size = 1; int batch_size = 128; Matrix *w1 = gpus.uniformSqrtWeight(784,hiddenunits); Matrix *w2 = gpus.uniformSqrtWeight(hiddenunits/maxout_Size,10); Matrix *b1 = zeros(1,hiddenunits); Matrix *b2 = zeros(1,10); Matrix *m1 = zeros(784,hiddenunits); Matrix *m2 = zeros(hiddenunits/maxout_Size,10); Matrix *mb1 = zeros(1,hiddenunits); Matrix *mb2 = zeros(1,10); Matrix *ms1 = zeros(784,hiddenunits); Matrix *ms2 = zeros(hiddenunits/maxout_Size,10); Matrix *msb1 = zeros(1,hiddenunits); Matrix *msb2 = zeros(1,10); Matrix *grad_w1 = zeros(784,hiddenunits); Matrix *grad_w2 = zeros(hiddenunits/maxout_Size,10); Matrix *grad_b1 = zeros(1,hiddenunits); Matrix *grad_b2 = zeros(1,10); float cv_error = 0.0f; float train_error = 0.0f; BatchAllocator b = BatchAllocator(); b.init(X, y, 0.4, batch_size, 512); int epochs = 500; float learning_rate = 0.000001; float momentum = 0.5; for(int EPOCH = 1; EPOCH < epochs; EPOCH++) { cout << "EPOCH: " << EPOCH << endl; momentum += 0.01; if(momentum > 0.95) momentum = 0.95; for(int i = 0; i < b.TOTAL_BATCHES; i++) { b.broadcast_batch_to_processes(); //nesterov updates scalarMul(m1,momentum,m1); scalarMul(m2,momentum,m2); scalarMul(mb1,momentum,mb1); scalarMul(mb2,momentum,mb2); add(w1,m1,w1); add(w2,m2,w2); add(b1,mb1,b1); add(b2,mb2,b2); //feedforward Matrix *d0 = gpus.dropout(b.CURRENT_BATCH,0.2); Matrix *z1 = gpus.dot(d0, w1); addMatrixVector(z1,b1,z1); Matrix *a1 = logistic(z1); //Matrix *a1 = rectified_linear(z1); Matrix *d1 = gpus.dropout(a1,0.5); Matrix *a2 = gpus.dot(d1,w2); addMatrixVector(a2,b2,a2); Matrix *out = softmax(a2); Matrix *t = create_t_matrix(b.CURRENT_BATCH_Y,10); b.allocate_next_batch_async(); //backprop Matrix *e1 = sub(out, t); Matrix *e2 = gpus.dotT(e1, w2); Matrix *aB = ones(1,b.CURRENT_BATCH->rows); gpus.Tdot(a1,e1,grad_w2); gpus.dot(aB,e1,grad_b2); //rectified_linear_derivative(a1,a1); logisticGrad(a1,a1); mul(e2,a1,e2); gpus.Tdot(b.CURRENT_BATCH,e2,grad_w1); gpus.dot(aB,e2,grad_b1); /* //about equal to momentum update + nesterov update -> momentum applyied to gradient+momentum better? RMSprop_with_momentum_weight_update(ms1,grad_w1,w1,m1,0.9f,learning_rate,b.CURRENT_BATCH->rows, momentum); RMSprop_with_momentum_weight_update(ms2,grad_w2,w2,m2,0.9f,learning_rate,b.CURRENT_BATCH->rows, momentum); RMSprop_with_momentum_weight_update(msb1,grad_b1,b1,mb1,0.9f,learning_rate,b.CURRENT_BATCH->rows, momentum); RMSprop_with_momentum_weight_update(msb2,grad_b2,b2,mb2,0.9f,learning_rate,b.CURRENT_BATCH->rows, momentum); */ /* //slow and generally worse error, but sometimes better results in the end RMSprop_with_momentum_update(ms1,grad_w1,w1,m1,0.9f,learning_rate,b.CURRENT_BATCH->rows, momentum); RMSprop_with_momentum_update(ms2,grad_w2,w2,m2,0.9f,learning_rate,b.CURRENT_BATCH->rows, momentum); RMSprop_with_momentum_update(msb1,grad_b1,b1,mb1,0.9f,learning_rate,b.CURRENT_BATCH->rows, momentum); RMSprop_with_momentum_update(msb2,grad_b2,b2,mb2,0.9f,learning_rate,b.CURRENT_BATCH->rows, momentum); */ RMSprop_with_nesterov_weight_update(ms1,grad_w1,w1,m1,0.9f,learning_rate,b.CURRENT_BATCH->rows, momentum); RMSprop_with_nesterov_weight_update(ms2,grad_w2,w2,m2,0.9f,learning_rate,b.CURRENT_BATCH->rows, momentum); RMSprop_with_nesterov_weight_update(msb1,grad_b1,b1,mb1,0.9f,learning_rate,b.CURRENT_BATCH->rows, momentum); RMSprop_with_nesterov_weight_update(msb2,grad_b2,b2,mb2,0.9f,learning_rate,b.CURRENT_BATCH->rows, momentum); /* //slower but equally good to nesterov momentum RMSprop_with_weight_update(ms1,grad_w1,w1,m1,0.9f,learning_rate,b.CURRENT_BATCH->rows, momentum); RMSprop_with_weight_update(ms2,grad_w2,w2,m2,0.9f,learning_rate,b.CURRENT_BATCH->rows, momentum); RMSprop_with_weight_update(msb1,grad_b1,b1,mb1,0.9f,learning_rate,b.CURRENT_BATCH->rows, momentum); RMSprop_with_weight_update(msb2,grad_b2,b2,mb2,0.9f,learning_rate,b.CURRENT_BATCH->rows, momentum); */ /* scalarMul(grad_w1,learning_rate/(float)b.CURRENT_BATCH->rows,grad_w1); scalarMul(grad_w2,learning_rate/(float)b.CURRENT_BATCH->rows,grad_w2); scalarMul(grad_b1,learning_rate/(float)b.CURRENT_BATCH->rows,grad_b1); scalarMul(grad_b2,learning_rate/(float)b.CURRENT_BATCH->rows,grad_b2); //classical momentum scalarMul(m1,momentum,m1); scalarMul(m2,momentum,m2); scalarMul(mb1,momentum,mb1); scalarMul(mb2,momentum,mb2); sub(m1,grad_w1,m1); sub(m2,grad_w2,m2); sub(mb1,grad_b1,mb1); sub(mb2,grad_b2,mb2); add(w1,m1,w1); add(w2,m2,w2); add(b1,mb1,b1); add(b2,mb2,b2); */ /* sub(w1,grad_w1,w1); sub(w2,grad_w2,w2); sub(b1,grad_b1,b1); sub(b2,grad_b2,b2); */ cudaFree(e1->data); cudaFree(e2->data); cudaFree(z1->data); cudaFree(a1->data); cudaFree(a2->data); cudaFree(out->data); cudaFree(t->data); cudaFree(d0->data); cudaFree(d1->data); cudaFree(aB->data); b.replace_current_batch_with_next(); } train_error = 0; for(int i = 0; i < b.TOTAL_BATCHES; i++) { b.broadcast_batch_to_processes(); Matrix *d0 = scalarMul(b.CURRENT_BATCH,0.8); Matrix *z1 = gpus.dot(d0,w1); Matrix *a1 = logistic(z1); //Matrix *a1 = rectified_linear(z1); Matrix *d1 = scalarMul(a1,0.5); Matrix *a2 = gpus.dot(d1,w2); Matrix *out = softmax(a2); Matrix *result = argmax(out); Matrix *eq = equal(result,b.CURRENT_BATCH_Y); b.allocate_next_batch_async(); float sum_value = sum(eq); train_error += (b.CURRENT_BATCH->rows - sum_value)/ (1.0f * b.CURRENT_BATCH->rows *b.TOTAL_BATCHES) ; cudaFree(z1->data); cudaFree(a1->data); cudaFree(a2->data); cudaFree(out->data); cudaFree(result->data); cudaFree(eq->data); cudaFree(d0->data); cudaFree(d1->data); b.replace_current_batch_with_next(); } std::cout << "MAXOUT Train error: " << train_error << std::endl; cv_error = 0; for(int i = 0; i < b.TOTAL_BATCHES_CV; i++) { b.broadcast_batch_cv_to_processes(); Matrix *d0 = scalarMul(b.CURRENT_BATCH_CV,0.8); Matrix *z1 = gpus.dot(d0,w1); Matrix *a1 = logistic(z1); //Matrix *a1 = rectified_linear(z1); Matrix *d1 = scalarMul(a1,0.5); Matrix *a2 = gpus.dot(d1,w2); Matrix *out = softmax(a2); Matrix *result = argmax(out); Matrix *eq = equal(result,b.CURRENT_BATCH_CV_Y); b.allocate_next_batch_async(); float sum_value = sum(eq); cv_error += (b.CURRENT_BATCH_CV->rows - sum_value)/ (1.0f * b.CURRENT_BATCH_CV->rows *b.TOTAL_BATCHES_CV) ; cudaFree(z1->data); cudaFree(a1->data); cudaFree(a2->data); cudaFree(out->data); cudaFree(result->data); cudaFree(eq->data); cudaFree(d0->data); cudaFree(d1->data); b.replace_current_cv_batch_with_next(); } std::cout << "MAXOUT Cross validation error: " << cv_error << std::endl; } } void MPI_benchmark_P2P(int argc, char *argv[]) { char name[100]; int myrank, length, size; MPI_Init(&argc, &argv); MPI_Comm_rank(MPI_COMM_WORLD, &myrank); MPI_Get_processor_name(name, &length); MPI_Comm_size(MPI_COMM_WORLD, &size); MPI_Status status; int local_rank = myrank % 4; int gpus; cudaGetDeviceCount(&gpus); int mygpu_id; int your_gpu_id; if(myrank == 0) { mygpu_id = 0; if(gpus > 1) your_gpu_id = 1; else your_gpu_id = 0; MPI_Send(&your_gpu_id,1, MPI_INT,1,0,MPI_COMM_WORLD); } else { MPI_Recv(&mygpu_id,1,MPI_INT,myrank-1,0,MPI_COMM_WORLD,&status); if(gpus > mygpu_id+1) your_gpu_id = mygpu_id + 1; else your_gpu_id = 0; if(myrank < size-1) MPI_Send(&your_gpu_id,1, MPI_INT,myrank+1,0,MPI_COMM_WORLD); } cudaSetDevice(mygpu_id); int batch_size = 128; int inner_dim = 10000; int outer_dim = 15000; ClusterNet gpu = ClusterNet(); Matrix *A = gpu.rand(batch_size,inner_dim); Matrix *B = gpu.rand(inner_dim,outer_dim); Matrix *out = empty(batch_size,outer_dim); Matrix *rec = empty(batch_size,outer_dim); Matrix *A1 = gpu.rand(batch_size/2,inner_dim); Matrix *B1 = gpu.rand(inner_dim,outer_dim); Matrix *rec1 = empty(batch_size/2,outer_dim); Matrix *out1 = empty(batch_size/2,outer_dim); Matrix *A2 = gpu.rand(batch_size,inner_dim); Matrix *B2 = gpu.rand(inner_dim,outer_dim/2); Matrix *rec2 = empty(batch_size,outer_dim/2); Matrix *out2 = empty(batch_size,outer_dim/2); gpu.tick("Direct compute"); for(int i = 0; i< 100; i++) { gpu.dot(A,B, out); //add(A, B, out); } gpu.tock("Direct compute"); gpu.tick("partial batch direct compute"); for(int i = 0; i< 100; i++) { gpu.dot(A1,B1, out1); //add(A, B, out); } gpu.tock("partial batch direct compute"); gpu.tick("partial units direct compute"); for(int i = 0; i< 100; i++) { gpu.dot(A2,B2, out2); //add(A, B, out); } gpu.tock("partial units direct compute"); gpu.tick("PCIe transfer"); for(int i = 0; i< 100; i++) { if(local_rank == 0 && gpus > 1) { MPI_Send(out->data, out->size, MPI_FLOAT, 1, 100, MPI_COMM_WORLD); } else if(local_rank == 1 && gpus > 1) { //add(A2,B, out); MPI_Recv(rec->data, rec->size, MPI_FLOAT, 0, 100, MPI_COMM_WORLD, &status); } } gpu.tock("PCIe transfer"); gpu.tick("PCIe dot"); for(int i = 0; i< 100; i++) { if(local_rank == 0 && gpus > 1) { gpu.dot(A2,B2,out2); MPI_Send(out1->data, out1->size, MPI_FLOAT, 1, 100, MPI_COMM_WORLD); } else if(local_rank == 1 && gpus > 1) { gpu.dot(A2,B2,out2); MPI_Recv(rec1->data, rec1->size, MPI_FLOAT, 0, 100, MPI_COMM_WORLD, &status); vStack(out2,rec2,rec); } } gpu.tock("PCIe dot"); gpu.tick("RDMA transfer"); for(int i = 0; i< 100; i++) { if(myrank == 0) { MPI_Send(out->data, out->size, MPI_FLOAT, 3, 100, MPI_COMM_WORLD); } else if(myrank == 3) { //add(A2,B, out); MPI_Recv(rec->data, rec->size, MPI_FLOAT, 0, 100, MPI_COMM_WORLD, &status); } } gpu.tock("RDMA transfer"); gpu.tick("RDMA dot"); for(int i = 0; i< 100; i++) { if(myrank == 0) { gpu.dot(A2,B2,out2); MPI_Send(out->data, out->size, MPI_FLOAT, 3, 100, MPI_COMM_WORLD); } else if(myrank == 3) { //add(A2,B, out); gpu.dot(A2,B2,out2); MPI_Recv(rec->data, rec->size, MPI_FLOAT, 0, 100, MPI_COMM_WORLD, &status); vStack(out2,rec2,rec); } } gpu.tock("RDMA dot"); MPI_Finalize(); } void MPI_benchmark(int argc, char *argv[]) { int myrank; MPI_Status status; MPI_Init(&argc, &argv); MPI_Comm_rank(MPI_COMM_WORLD, &myrank); ClusterNet gpu = ClusterNet(); int batch_rows = 128; int w_in = 10000; int w_out = 8000; //dot Matrix *B = gpu.rand(w_in,w_out); Matrix *A = gpu.rand(batch_rows,w_in); assert(test_matrix(A,batch_rows,w_in)); assert(test_matrix(B,w_in,w_out)); Matrix *out = empty(batch_rows, w_out); Matrix *B1 = gpu.rand(w_in,w_out/2); Matrix *B2 = gpu.rand(w_in,w_out/2); Matrix *D = empty(batch_rows,w_out/2); Matrix *A1 = gpu.rand(batch_rows/2,w_in); Matrix *big_out = gpu.rand(batch_rows/2,w_out); Matrix *grand_out = empty(batch_rows, w_out); Matrix *C = gpu.rand(batch_rows/2,w_in); Matrix *C_out = empty(batch_rows/2,w_out); Matrix *E = gpu.rand(batch_rows/4,w_in); Matrix *E_out = empty(batch_rows/4,w_out); Matrix *E_merge = empty(batch_rows/2,w_out); Matrix *E_merge2 = empty(batch_rows/2,w_out); //add /* B = gpu.rand(w_in,w_out); A = gpu.rand(w_in,w_out); out = empty(w_in, w_out); A1 = gpu.rand(w_in/2,w_out); Matrix *A2 = gpu.rand(w_in/2,w_out); D = empty(w_in/2,w_out); */ cudaEvent_t* startstop = tick(); for(int i = 0; i< 100; i++) { gpu.dot(A,B, out); //add(A, B, out); } printf("Direct compute:\n"); tock(startstop); out = empty(batch_rows,w_out/2); Matrix *out2 = empty(batch_rows,w_out/2); startstop = tick(); for(int i = 0; i< 100; i++) { gpu.dot(A,B1, out); gpu.dot(A,B2, out2); vStack(out,out2,grand_out); } printf("Direct compute x2:\n"); tock(startstop); Matrix *mergemat = empty(batch_rows, w_out); out = empty(batch_rows,w_out/2); startstop = tick(); //out = empty(w_in/2,w_out); for(int i = 0; i < 100; i++) { if(myrank == 0) { gpu.dot(A,B1, out); //add(A1, B,out); MPI_Send(out->data, out->size, MPI_FLOAT, 1, 100, MPI_COMM_WORLD); } else { gpu.dot(A,B2, out); //add(A2,B, out); MPI_Recv(D->data, D->size, MPI_FLOAT, 0, 100, MPI_COMM_WORLD, &status); vStack(out,D, mergemat); } } if(myrank == 1) { printf("GPUDirect RDMA:\n"); tock(startstop); } out = empty(batch_rows/2,w_out); startstop = tick(); gpu.tick("aa"); //out = empty(w_in/2,w_out); for(int i = 0; i < 100; i++) { gpu.tick("dot"); gpu.dot(C,B, out); gpu.tick("dot"); if(myrank == 0) { //add(A1, B,out); gpu.tick("send"); MPI_Send(out->data, out->size, MPI_FLOAT, 1, 100, MPI_COMM_WORLD); gpu.tick("send"); } else { //add(A2,B, out); gpu.tick("receive"); MPI_Recv(C_out->data, C_out->size, MPI_FLOAT, 0, 100, MPI_COMM_WORLD, &status); vStack(out,C_out, grand_out); gpu.tick("receive"); } if(myrank == 1) { //add(A1, B,out); gpu.tick("send"); MPI_Send(out->data, out->size, MPI_FLOAT, 0, 100, MPI_COMM_WORLD); gpu.tick("send"); } else { //add(A2,B, out); gpu.tick("receive"); MPI_Recv(C_out->data, C_out->size, MPI_FLOAT, 1, 100, MPI_COMM_WORLD, &status); vStack(out,C_out, grand_out); gpu.tick("receive"); } } gpu.tock("dot"); if(myrank == 1) { printf("GPUDirect RDMA batch:\n"); tock(startstop); gpu.tock("receive"); gpu.tock("aa"); } else { gpu.tock("send"); } MPI_Finalize(); } void dotMPI_test(int argc, char *argv[]) { /* ClusterNet gpu = ClusterNet(argc, argv, 123465); int inner = 2000; int outer = 1200; int batch_size = 128; int reduced_left = 128; int reduced_right = 400; Matrix *A = gpu.rand(batch_size,inner); Matrix *B = gpu.rand(inner,outer); Matrix *A1 = gpu.rand(reduced_left,inner); Matrix *B1 = gpu.rand(inner,reduced_right); Matrix *out = empty(batch_size,outer); Matrix *out1 = empty(reduced_left,reduced_right); Matrix *recv1 = empty(reduced_left,reduced_right); Matrix *recv2 = empty(reduced_left,reduced_right); Matrix *recv3 = empty(reduced_left,reduced_right); MPI_Status status; gpu.tick("dot mpi batch"); for(int i = 0; i < 100; i++) { gpu.dotMPI_batchSlice(A,B); } gpu.tock("dot mpi batch"); gpu.tick("dot mpi unit"); for(int i = 0; i < 100; i++) { gpu.dotMPI_unitSlice(A,B); } gpu.tock("dot mpi unit"); printf("My rank: %i\n",gpu.MYRANK); //gpu.benchmark_dot(); gpu.tick("dot normal"); for(int i = 0; i < 100; i++) { gpu.dot(A,B,out); } gpu.tock("dot normal"); //std::vector<MPI_Request> requests; MPI_Request *requests = (MPI_Request*)malloc(sizeof(MPI_Request)*gpu.MPI_SIZE-1); MPI_Request request_send; std::vector<Matrix*> recv_buffer; for(int i = 0; i < gpu.MPI_SIZE-1; i++) { MPI_Request request; requests[i] = request; } int received_count = 0; for(int i = 0; i < 100; i++) { for(int i = 0; i < recv_buffer.size(); i++) cudaFree(recv_buffer[i]->data); recv_buffer.clear(); out1 = empty(reduced_left,reduced_right); for(int i = 0; i < gpu.MPI_SIZE; i++) { recv_buffer.push_back(empty(reduced_left,reduced_right)); } gpu.tick("all to all custom"); //cout << "a1 rows" << A1->rows << endl; gpu.dot(A1,B1,out1); recv_buffer[gpu.MYRANK]= out1; for(int i = 0; i < gpu.MPI_SIZE; i++) { if(gpu.MYRANK == i) { continue; } MPI_Isend(out1->data, out1->size, MPI_FLOAT, i, 100, MPI_COMM_WORLD, &request_send); } for(int i = 0; i < gpu.MPI_SIZE; i++) { if(gpu.MYRANK == i) { continue; } MPI_Irecv(recv1->data, recv1->size, MPI_FLOAT, i, 100, MPI_COMM_WORLD, &requests[i]); } for(int i = 0; i < gpu.MPI_SIZE; i++) { if(gpu.MYRANK == i) { continue; } MPI_Wait(&requests[i],MPI_STATUS_IGNORE); } received_count = 0; while(received_count < gpu.MPI_SIZE-1) { for(int i = 0; i < gpu.MPI_SIZE; i++) { int received = 0; if(gpu.MYRANK == i) { continue; } MPI_Test(&requests[i],&received,&status); if(received == 1) { out1 = hStack(out1,recv1); received_count++; } } } gpu.tick("all to all custom"); } gpu.tock("all to all custom"); int destination = gpu.MYRANK + 1; int source = gpu.MYRANK - 1; if(destination == gpu.MPI_SIZE){destination = 0; } if(source < 0){ source = gpu.MPI_SIZE - 1;} for(int i = 0; i < 100; i++) { out1 = empty(reduced_left,reduced_right); recv1 = empty(reduced_left,reduced_right); gpu.tick("chain custom"); gpu.dot(A1,B1,out1); for(int i = 0; i < gpu.MPI_SIZE-1; i++) { if(i == 0) MPI_Isend(out1->data, out1->size, MPI_FLOAT, destination, 100, MPI_COMM_WORLD, &request_send); else MPI_Isend(recv1->data, recv1->size, MPI_FLOAT, destination, 100, MPI_COMM_WORLD, &request_send); MPI_Recv(recv1->data, recv1->size, MPI_FLOAT, source, 100, MPI_COMM_WORLD, &status); //MPI_Wait(&requests[i],&status); out1 = hStack(out1,recv1); } gpu.tick("chain custom"); } gpu.tock("chain custom"); cout << gpu.MYRANK << endl; int matrix_idx = gpu.MYRANK; Matrix** arrOut = (Matrix**)malloc(sizeof(Matrix*)*gpu.MPI_SIZE); for(int i = 0; i < gpu.MPI_SIZE; i++) arrOut[i] = empty(reduced_left,reduced_right); float **h_arrA = (float**)malloc(sizeof(float*)*gpu.MPI_SIZE); for(int i = 0; i < gpu.MPI_SIZE; i++) h_arrA[i] = arrOut[i]->data; float **d_arrA; cudaMalloc((void**) &d_arrA,sizeof(float*)*gpu.MPI_SIZE); cudaMemcpy(d_arrA,h_arrA,sizeof(float*)*gpu.MPI_SIZE,cudaMemcpyDefault); gpu.tick("chain matrix array"); for(int i = 0; i < 100; i++) { gpu.dot(A1,B1,arrOut[gpu.MYRANK]); matrix_idx = gpu.MYRANK; for(int i = 0; i < gpu.MPI_SIZE-1; i++) { MPI_Isend(arrOut[matrix_idx]->data, arrOut[matrix_idx]->size, MPI_FLOAT, destination, 100, MPI_COMM_WORLD, &request_send); matrix_idx = (matrix_idx - 1) < 0 ? gpu.MPI_SIZE-1 : (matrix_idx - 1); MPI_Irecv(arrOut[matrix_idx]->data, arrOut[matrix_idx]->size, MPI_FLOAT, source, 100, MPI_COMM_WORLD,&requests[i]); } MPI_Waitall(gpu.MPI_SIZE-1,requests,MPI_STATUSES_IGNORE); //hStackN(d_arrA,arrOut[0]->size, out,gpu.MPI_SIZE); } gpu.tock("chain matrix array"); gpu.shutdown(); */ } void async_test(int argc, char *argv[]) { ClusterNet gpu = ClusterNet(argc,argv,1324); int rows = 512; int cols = 128; /* MPI_Request r = MPI_REQUEST_NULL; MPI_Request s = MPI_REQUEST_NULL; Matrix *a = gpu.rand(rows,cols); Matrix *b = zeros(rows,cols); if(gpu.MYRANK == 0) { MPI_Irecv(b->data,b->size,MPI_FLOAT,1,0,MPI_COMM_WORLD,&r); MPI_Isend(a->data,a->size,MPI_FLOAT,1,0,MPI_COMM_WORLD,&s); } else { MPI_Irecv(b->data,b->size,MPI_FLOAT,0,0,MPI_COMM_WORLD,&r); MPI_Isend(a->data,a->size,MPI_FLOAT,0,0,MPI_COMM_WORLD,&s); } MPI_Wait(&s,MPI_STATUS_IGNORE); MPI_Wait(&r,MPI_STATUS_IGNORE); gpu.tick("MPI"); for(int i = 0; i < 100; i++) { if(gpu.MYRANK == 0) { MPI_Irecv(b->data,b->size,MPI_FLOAT,1,0,MPI_COMM_WORLD,&r); MPI_Isend(a->data,a->size,MPI_FLOAT,1,0,MPI_COMM_WORLD,&s); } else { MPI_Irecv(b->data,b->size,MPI_FLOAT,0,0,MPI_COMM_WORLD,&r); MPI_Isend(a->data,a->size,MPI_FLOAT,0,0,MPI_COMM_WORLD,&s); } MPI_Wait(&s,MPI_STATUS_IGNORE); MPI_Wait(&r,MPI_STATUS_IGNORE); } gpu.tock("MPI"); */ if(gpu.MYRANK == 0) { cudaSetDevice(0); //cudaDeviceEnablePeerAccess(1,0); cudaDeviceDisablePeerAccess(1); Matrix *A1 = gpu.rand(rows,cols); Matrix *A2 = gpu.rand(rows,cols); cudaSetDevice(1); //cudaDeviceEnablePeerAccess(0,0); cudaDeviceDisablePeerAccess(0); Matrix *B1 = gpu.rand(rows,cols); Matrix *B2 = gpu.rand(rows,cols); cudaSetDevice(0); cudaStream_t s; cudaStreamCreate(&s); cudaSetDevice(1); cudaStream_t s2; cudaStreamCreate(&s2); cudaSetDevice(0); int access = 0; cudaDeviceCanAccessPeer(&access,0,1); cout << access << endl; cudaDeviceCanAccessPeer(&access,1,0); cout << access << endl; cudaSetDevice(0); gpu.tick("cuda"); for(int i = 0; i < 100; i++) { cudaMemcpyPeerAsync(B2->data,1,A2->data,0,A2->bytes,s); cudaSetDevice(1); cudaMemcpyPeerAsync(A1->data,0,B1->data,1,B1->bytes,s2); cudaSetDevice(0); cudaStreamSynchronize(s); cudaSetDevice(1); cudaStreamSynchronize(s2); cudaSetDevice(0); } gpu.tock("cuda"); } MPI_Barrier(MPI_COMM_WORLD); gpu.shutdown_MPI(); } struct arg_struct { ClusterNet *gpus; WikiMaxoutNet *net; int device; }; void *run_net(void * args) { struct arg_struct *_args = (struct arg_struct*)args; cout << "device: " << _args->device << endl; cudaSetDevice(_args->device); _args->net->run(); return 0; } void *print_message(void*) { ClusterNet gpu = ClusterNet(124345); WikiMaxoutNet net = WikiMaxoutNet(gpu); net.run(); return 0; } void bandwidth_test_MPI(int argc, char *argv[]) { ClusterNet *gpu = new ClusterNet(argc,argv,1235,true); std::vector<MPI_Request*> sends; std::vector<MPI_Request*> recvs; std::vector<Matrix*> lSync; std::vector<Matrix*> lData; int packages = 10; float time = 0; for(int epoch = 1; epoch < 2000; epoch++) { if(lData.size() > 0) { for(int i = 0; i < packages; i++) { cudaFree(lSync[i]->data); cudaFree(lData[i]->data); } lSync.clear(); lData.clear(); } for(int i = 0; i < packages; i++) { lSync.push_back(zeros(128*epoch,128*epoch)); lData.push_back(gpu->rand(128*epoch,128*epoch)); } for(int j = 0; j < packages; j++) { MPI_Request *send_request = new MPI_Request; MPI_Request *recv_request = new MPI_Request; sends.push_back(send_request); recvs.push_back(recv_request); int target = gpu->MYRANK +1 == gpu->MPI_SIZE ? 0 : gpu->MYRANK+1; int source = gpu->MYRANK-1 == -1 ? gpu->MPI_SIZE-1 : gpu->MYRANK-1; gpu->tick(); for (int i = 0; i < gpu->MPI_SIZE -1; i++) { //MPI_Irecv(lSync[j]->data,lSync[j]->size,MPI_FLOAT,source,999,MPI_COMM_WORLD,recv_request); //MPI_Isend(lData[j]->data,lData[j]->size,MPI_FLOAT,target,999,MPI_COMM_WORLD,send_request); //MPI_Isend(lData[j]->data,lData[j]->size,MPI_FLOAT,target,j,MPI_COMM_WORLD,send_request); if(i == gpu->MYRANK) { MPI_Send(lData[j]->data,lData[j]->size,MPI_FLOAT,target,j,MPI_COMM_WORLD); MPI_Recv(lSync[j]->data,lSync[j]->size,MPI_FLOAT,source,j,MPI_COMM_WORLD, MPI_STATUS_IGNORE); } else { MPI_Recv(lSync[j]->data,lSync[j]->size,MPI_FLOAT,source,j,MPI_COMM_WORLD, MPI_STATUS_IGNORE); MPI_Send(lData[j]->data,lData[j]->size,MPI_FLOAT,target,j,MPI_COMM_WORLD); } } gpu->tick(); } /* gpu->tick(); for(int i = 0; i < packages; i++) { MPI_Wait(sends[i],MPI_STATUS_IGNORE); MPI_Wait(recvs[i],MPI_STATUS_IGNORE); } */ time = gpu->tock(); //for(int i = 0; i < packages; i++) //assert(sum(lData[i]) == sum(lSync[i])); printdim(lData[0]); cout << 10*packages*lData[0]->bytes/1024./1024./1024./time << " GB/s" << endl; } gpu->shutdown_MPI(); } void bandwidth_test_peer() { ClusterNet *gpu = new ClusterNet(1235); std::vector<Matrix*> lSync0; std::vector<Matrix*> lData0; std::vector<Matrix*> lSync1; std::vector<Matrix*> lData1; std::vector<cudaStream_t> s0s; std::vector<cudaStream_t> s1s; int packages = 1; float time = 0; cudaSetDevice(0); cudaDeviceEnablePeerAccess(1,0); cudaSetDevice(1); cudaDeviceEnablePeerAccess(0,0); for(int i = 0; i < packages; i++) { cudaStream_t s0; cudaStream_t s1; cudaSetDevice(0); cudaStreamCreate(&s0); cudaSetDevice(1); cudaStreamCreate(&s1); s0s.push_back(s0); s1s.push_back(s1); } cudaSetDevice(0); int access = 0; cudaDeviceCanAccessPeer(&access,0,1); cout << access << endl; cudaDeviceCanAccessPeer(&access,1,0); cout << access << endl; for(int epoch = 1; epoch < 100; epoch++) { if(lSync0.size() > 0) { for(int i = 0; i < packages; i++) { cudaFree(lSync0[i]->data); cudaFree(lData0[i]->data); cudaFree(lSync1[i]->data); cudaFree(lData1[i]->data); } lSync0.clear(); lData0.clear(); lSync1.clear(); lData1.clear(); } for(int i = 0; i < packages; i++) { cudaSetDevice(0); lSync0.push_back(zeros(128*epoch,128*epoch)); lData0.push_back(gpu->rand(128*epoch,128*epoch)); cudaSetDevice(1); lSync1.push_back(zeros(128*epoch,128*epoch)); lData1.push_back(gpu->rand(128*epoch,128*epoch)); } cudaSetDevice(0); gpu->tick(); for(int j = 0; j < packages; j++) { cudaMemcpyAsync(lSync1[j]->data,lData0[j]->data,lData0[j]->bytes,cudaMemcpyDefault, s0s[j]); cudaSetDevice(1); cudaMemcpyAsync(lSync0[j]->data,lData1[j]->data,lData1[j]->bytes,cudaMemcpyDefault,s1s[j]); cudaSetDevice(0); } for(int i = 0; i < packages; i++) { cudaStreamSynchronize(s0s[i]); cudaStreamSynchronize(s1s[i]); } time = gpu->tock()/1000.; cout << packages*lData0[0]->bytes/1024./1024./1024./time << " GB/s" << endl; } } void bandwidth_test_kernel() { ClusterNet *gpu = new ClusterNet(1235); std::vector<Matrix*> lSync0; std::vector<Matrix*> lData0; std::vector<Matrix*> lSync1; std::vector<Matrix*> lData1; std::vector<cudaStream_t> s0s; std::vector<cudaStream_t> s1s; int packages = 10; float time = 0; cudaSetDevice(0); cudaDeviceEnablePeerAccess(1,0); cudaSetDevice(1); cudaDeviceEnablePeerAccess(0,0); for(int i = 0; i < packages; i++) { cudaStream_t s0; cudaStream_t s1; cudaSetDevice(0); cudaStreamCreate(&s0); cudaSetDevice(1); cudaStreamCreate(&s1); s0s.push_back(s0); s1s.push_back(s1); } cudaSetDevice(0); int access = 0; cudaDeviceCanAccessPeer(&access,0,1); cout << access << endl; cudaDeviceCanAccessPeer(&access,1,0); cout << access << endl; for(int epoch = 1; epoch < 1000; epoch++) { if(lSync0.size() > 0) { for(int i = 0; i < packages; i++) { cudaFree(lSync0[i]->data); cudaFree(lData0[i]->data); cudaFree(lSync1[i]->data); cudaFree(lData1[i]->data); } lSync0.clear(); lData0.clear(); lSync1.clear(); lData1.clear(); } for(int i = 0; i < packages; i++) { cudaSetDevice(0); lSync0.push_back(zeros(128*epoch,128*epoch)); lData0.push_back(gpu->rand(128*epoch,128*epoch)); cudaSetDevice(1); lSync1.push_back(zeros(128*epoch,128*epoch)); lData1.push_back(gpu->rand(128*epoch,128*epoch)); } cudaSetDevice(0); gpu->tick(); for(int j = 0; j < packages; j++) { add(lSync0[j],lData1[j],lSync0[j]); cudaSetDevice(1); add(lSync1[j],lData0[j],lSync1[j]); cudaSetDevice(0); } cudaDeviceSynchronize(); cudaSetDevice(1); cudaDeviceSynchronize(); cudaSetDevice(0); time = gpu->tock(); /* for(int i = 0; i < packages; i++) assert(sum(lData0[i]) == sum(lSync1[i])); for(int i = 0; i < packages; i++) assert(sum(lData1[i]) == sum(lSync0[i])); */ printdim(lSync0[0]); cout << 1000*2*packages*lData0[0]->bytes/1024./1024./1024./time << " GB/s" << endl; } } void bandwidth_test_compression(int argc, char *argv[]) { ClusterNet *gpu = new ClusterNet(argc,argv,1235,true); MPI_Request *send_request = new MPI_Request; MPI_Request *recv_request = new MPI_Request; Matrix *w_grad_next = empty(1024,1024); Matrix *w_next_sync = empty(1024,1024); //warmup int target = gpu->MYRANK +1 == gpu->MPI_SIZE ? 0 : gpu->MYRANK+1; int source = gpu->MYRANK-1 == -1 ? gpu->MPI_SIZE-1 : gpu->MYRANK-1; for (int i = 0; i < gpu->MPI_SIZE - 1; i++) { MPI_Isend(w_grad_next->data,w_grad_next->size,MPI_FLOAT,target,i,MPI_COMM_WORLD, send_request); MPI_Irecv(w_next_sync->data,w_grad_next->size,MPI_FLOAT,source,i,MPI_COMM_WORLD,recv_request); target = target +1 == gpu->MPI_SIZE ? 0 : target+1; source = source-1 == -1 ? gpu->MPI_SIZE-1 : source-1; } MPI_Wait(recv_request,MPI_STATUS_IGNORE); int times = 100; gpu->tick(); for(int i = 0; i < times; i++) { target = gpu->MYRANK +1 == gpu->MPI_SIZE ? 0 : gpu->MYRANK+1; source = gpu->MYRANK-1 == -1 ? gpu->MPI_SIZE-1 : gpu->MYRANK-1; for (int i = 0; i < gpu->MPI_SIZE - 1; i++) { MPI_Isend(w_grad_next->data,w_grad_next->size,MPI_FLOAT,target,i,MPI_COMM_WORLD, send_request); MPI_Recv(w_next_sync->data,w_grad_next->size,MPI_FLOAT,source,i,MPI_COMM_WORLD, MPI_STATUS_IGNORE); target = target +1 == gpu->MPI_SIZE ? 0 : target+1; source = source-1 == -1 ? gpu->MPI_SIZE-1 : source-1; } //MPI_Wait(send_request,MPI_STATUS_IGNORE); } float sec = gpu->tock()*1000.0; float GB = 3*times*w_grad_next->bytes/(1024.0*1024.0*1024.0); if(gpu->MYRANK == 0) { cout << "Size in GB: " << GB << endl; cout << "GB/s: " << GB/sec << endl; } gpu->shutdown_MPI(); } void simple_bandwidth_test(int argc, char *argv[]) { ClusterNet *gpu = new ClusterNet(argc,argv,1235,true); MPI_Request *send_request = new MPI_Request; MPI_Request *recv_request = new MPI_Request; int size = 12000; for(int i = 8; i < size; i+=8) { for(int j = 0; j < 3; j++) { Matrix *w_grad_next; if(j==0) w_grad_next = empty(i,i); if(j==1) w_grad_next = empty(i/2,i/2); if(j==2) w_grad_next = empty(i/4,i/8); Matrix *w_next_sync; if(j==0) w_next_sync = empty(i,i); if(j==1) w_next_sync = empty(i/2,i/2); if(j==2) w_next_sync = empty(i/4,i/8); if(gpu->MYRANK == 0) MPI_Send(w_grad_next->data,w_grad_next->size,MPI_FLOAT,1,999,MPI_COMM_WORLD); if(gpu->MYRANK == 1) MPI_Recv(w_next_sync->data,w_next_sync->size,MPI_FLOAT,0,999,MPI_COMM_WORLD, MPI_STATUS_IGNORE); MPI_Barrier(MPI_COMM_WORLD); int times = 100; gpu->tick(); for(int k = 0; k < times; k++) { if(gpu->MYRANK == 0) MPI_Send(w_grad_next->data,w_grad_next->size,MPI_FLOAT,1,999,MPI_COMM_WORLD); if(gpu->MYRANK == 1) MPI_Recv(w_next_sync->data,w_next_sync->size,MPI_FLOAT,0,999,MPI_COMM_WORLD, MPI_STATUS_IGNORE); } float quant = 9.5e-08f; float dequant = 2.0e-08f; float compreess = 1.e-07f; float decompress = 5.0e-08f; float added_penalty = 0.0f; if(j == 1)added_penalty = compreess + decompress; if(j == 2)added_penalty = quant + dequant; //cout << 100*(added_penalty)*w_grad_next->size << endl; float sec = gpu->tock() + (100*(added_penalty)*(i*i)); float GB = times*w_grad_next->bytes/(1024.0*1024.0*1024.0); if(gpu->MYRANK == 0) { cout << "Size: " << w_grad_next->rows << "x" << w_grad_next->cols << " GB/s: " << GB/(sec/1000) << " " << sec*(j == 2 ? 2.0 : 1.0) << "ms"<< endl; } cudaFree(w_grad_next->data); cudaFree(w_next_sync->data); } } gpu->shutdown_MPI(); } void model_parallelism_test(int argc, char *argv[]) { ClusterNet *GPU = new ClusterNet(argc,argv,1235,true); MPI_Request *send_request = new MPI_Request; MPI_Request *recv_request = new MPI_Request; std::vector<MPI_Request *> send_requests; std::vector<MPI_Request *> recv_requests; for(int i = 0; i < GPU->MPI_SIZE-1; i++) { send_requests.push_back(new MPI_Request); recv_requests.push_back(new MPI_Request); } float max_value = 1.0f; for(int round = 128; round <= 8192; round+=128) { int batch_size = 256; int inner = round; int outer = round; Matrix *A = GPU->rand(batch_size,inner); Matrix *B = GPU->distributed_uniformSqrtWeight(inner,outer); Matrix *B_normal = GPU->uniformSqrtWeight(inner,outer); Matrix *out = zeros(batch_size,outer); Matrix *out_stacked = zeros(batch_size,outer); int col_split_size = (B->isDistributed == 1 ? B->cols_distributed : B->cols) / GPU->MPI_SIZE; int remainder = (B->isDistributed == 1 ? B->cols_distributed : B->cols) - (col_split_size*GPU->MPI_SIZE); if(GPU->MYRANK == 0) cout << batch_size << "x" << inner << " DOT " << inner << "x" << outer << endl; Matrix** arrOut = (Matrix**) malloc(sizeof(Matrix*) * GPU->MPI_SIZE); Matrix** arrOut8 = (Matrix**) malloc(sizeof(Matrix*) * GPU->MPI_SIZE); for (int i = 0; i < GPU->MPI_SIZE; i++) { if (i == GPU->MPI_SIZE - 1) { arrOut[i] = empty(A->rows, col_split_size + remainder); arrOut8[i] = empty_char(A->rows, col_split_size + remainder); } else { arrOut[i] = empty(A->rows, col_split_size); arrOut8[i] = empty_char(A->rows, col_split_size); } } float **h_arrA = (float**) malloc(sizeof(float*) * GPU->MPI_SIZE); unsigned char **h_arrA8 = (unsigned char**) malloc(sizeof(unsigned char*) * GPU->MPI_SIZE); for (int i = 0; i < GPU->MPI_SIZE; i++) { h_arrA[i] = arrOut[i]->data; h_arrA8[i] = arrOut8[i]->char_data; } float **d_arrA; cudaMalloc((void**) &d_arrA, sizeof(float*) * GPU->MPI_SIZE); cudaMemcpy(d_arrA, h_arrA, sizeof(float*) * GPU->MPI_SIZE,cudaMemcpyDefault); unsigned char **d_arrA8; cudaMalloc((unsigned char**) &d_arrA8, sizeof(unsigned char*) * GPU->MPI_SIZE); cudaMemcpy(d_arrA8, h_arrA8, sizeof(unsigned char*) * GPU->MPI_SIZE,cudaMemcpyDefault); for(int epoch = 0; epoch < 2; epoch++) for(int type = 0; type < 3; type++) { std::string text = ""; if(type == 0) text = "DOT"; else if(type == 1) text = "DOT32BIT"; else if(type == 2) text = "DOT8BIT"; if(GPU->MYRANK == 0 && epoch == 1){ GPU->tick(text); } for(int i = 0; i < 100; i++) { if(type == 0) { GPU->dot(A,B_normal,out); continue; } GPU->dot(A,B,arrOut[GPU->MYRANK]); int target = GPU->MYRANK +1 == GPU->MPI_SIZE ? 0 : GPU->MYRANK+1; int source = GPU->MYRANK-1 == -1 ? GPU->MPI_SIZE-1 : GPU->MYRANK-1; if(type == 2) { GPU->compression_8bit(arrOut[GPU->MYRANK],max_value,arrOut8[GPU->MYRANK]); for (int i = 0; i < GPU->MPI_SIZE - 1; i++) { MPI_Isend(arrOut8[GPU->MYRANK]->char_data,arrOut8[GPU->MYRANK]->size,MPI_CHAR,target,i,MPI_COMM_WORLD, send_requests[i]); MPI_Irecv(arrOut8[source]->char_data,arrOut8[source]->size,MPI_CHAR,source,i,MPI_COMM_WORLD,recv_requests[i]); target = target +1 == GPU->MPI_SIZE ? 0 : target+1; source = source-1 == -1 ? GPU->MPI_SIZE-1 : source-1; } } if(type == 1) { for (int i = 0; i < GPU->MPI_SIZE - 1; i++) { MPI_Isend(arrOut[GPU->MYRANK]->data,arrOut[GPU->MYRANK]->size,MPI_FLOAT,target,i,MPI_COMM_WORLD, send_requests[i]); MPI_Irecv(arrOut[source]->data,arrOut[source]->size,MPI_FLOAT,source,i,MPI_COMM_WORLD,recv_requests[i]); target = target +1 == GPU->MPI_SIZE ? 0 : target+1; source = source-1 == -1 ? GPU->MPI_SIZE-1 : source-1; } } //MPI_Wait(next->send_request,MPI_STATUS_IGNORE); for(int i = 0; i < GPU->MPI_SIZE-1; i++) MPI_Wait(recv_requests[i],MPI_STATUS_IGNORE); if(type == 2) { for (int i = 0; i < GPU->MPI_SIZE; i++) { if(i == GPU->MYRANK){continue;} GPU->decompression_8bit(arrOut8[i],max_value,arrOut[i]); } } hStackN(d_arrA, arrOut[0]->size, out_stacked, GPU->MPI_SIZE); } if(GPU->MYRANK == 0 && epoch == 1){ GPU->tock(text); } /* MPI_Barrier(MPI_COMM_WORLD); if(type == 0) printsum(out); else if(type == 1) printsum(out_stacked); else if(type == 2) printsum(out_stacked); MPI_Barrier(MPI_COMM_WORLD); if(type == 0) printmat(out,0,4,0,4); else if(type == 1) printmat(out_stacked,0,4,0,4); else if(type == 2) printmat(out_stacked,0,4,0,4); */ if(type == 0) { abs(out,out); max_value = max(out); } } cudaFree(A->data); cudaFree(B->data); cudaFree(out->data); cudaFree(out_stacked->data); cudaFree(d_arrA8); cudaFree(d_arrA); for(int i = 0; i < GPU->MPI_SIZE; i++) { cudaFree(arrOut[i]->data); cudaFree(arrOut8[i]->char_data); } size_t total, free; cudaMemGetInfo(&free, &total); } GPU->shutdown_MPI(); } void simple_bandwidth_test_CPU(int argc, char *argv[]) { ClusterNet *gpu = new ClusterNet(argc,argv,1235,true); MPI_Request *send_request = new MPI_Request; MPI_Request *recv_request = new MPI_Request; size_t size = 1024*1024*1024; float *data = (float*)malloc(sizeof(float)*size); float *data_sync = (float*)malloc(sizeof(float)*size); int times = 10; gpu->tick(); for(int i = 0; i < times; i++) { if(gpu->MYRANK == 0) { MPI_Send(data,size,MPI_FLOAT,1,999,MPI_COMM_WORLD); //MPI_Recv(w_next_sync->data,w_next_sync->size,MPI_FLOAT,0,999,MPI_COMM_WORLD, MPI_STATUS_IGNORE); } if(gpu->MYRANK == 1) { MPI_Recv(data_sync,size,MPI_FLOAT,0,999,MPI_COMM_WORLD, MPI_STATUS_IGNORE); //MPI_Send(w_grad_next->data,w_grad_next->size,MPI_FLOAT,1,999,MPI_COMM_WORLD, MPI_STATUS_IGNORE); } } double sec = gpu->tock()*1000.0; double GB = times*size*4.0/(1024.0*1024.0*1024.0); if(gpu->MYRANK == 0) { cout << "Size in GB: " << GB << endl; cout << "GB/s: " << GB/sec << endl; } gpu->shutdown_MPI(); } void compression_test(int argc, char *argv[]) { ClusterNet *gpu = new ClusterNet(); Matrix *A = scalarMul(gpu->randn(5120,5120),1.0f/10.0f); Matrix *out = empty_char(5120,5120); gpu->tick(); for(int i = 0; i < 10000; i++) gpu->compression_8bit(A, 0.1f,out); gpu->tock(); gpu->tick(); for(int i = 0; i < 10000; i++) gpu->decompression_8bit(out, 0.1f,A); gpu->tock(); } int main(int argc, char *argv[]) { //bandwidth_test_peer(); //bandwidth_test_MPI(argc,argv); //bandwidth_test_kernel(); //compression_test(argc,argv); //simple_bandwidth_test(argc,argv); //simple_bandwidth_test_CPU(argc,argv); //model_parallelism_test(argc,argv); //ClusterNet *gpu = new ClusterNet(234); /* Matrix *rdm = gpu->rand_numbers(10,10); printmat(rdm); */ /* ClusterNet *gpu = new ClusterNet(234); int out_rows = 128; int out_cols = 800; int inner = 784; Matrix *A = gpu->rand(out_rows,inner); Matrix *B = gpu->rand(inner,out_cols); Matrix *out1 = zeros(out_rows,out_cols); Matrix *charA = empty_char(out_rows,inner); Matrix *charB = empty_char(inner,out_cols); Matrix *out2 = empty(out_rows,out_cols); Matrix *out3 = empty(out_rows,out_cols); gpu->tick(); for(int i = 0; i < 100; i++) gpu->dot(A,B,out3); gpu->tock(); float maxA = max(abs(A)); float maxB = max(abs(B)); gpu->compression_8bit(A,maxA,charA); gpu->compression_8bit(B,maxB,charB); //printmat(A); //printmat(gpu->decompression_8bit(charA,maxA)); //printmat(B); //printmat(gpu->decompression_8bit(charB,maxB)); //cout << sum(gpuSqrt(square(sub(B,gpu->decompression_8bit(charB,maxB)))))/(float)B->size << endl; //cout << sum(gpuSqrt(square(sub(A,gpu->decompression_8bit(charA,maxA)))))/(float)B->size << endl; //gpu->compression_8bit(A,maxA,charA); //printmat(out1); //printmat(out1,60,65,70,80); gpu->tick(); for(int i = 0; i < 100; i++) { fill_matrix(out1,0.0f); gpu->dot8bit(charA,charB,maxA,maxB,out1); } gpu->tock(); gpu->tick(); for(int i = 0; i < 100; i++) gpu->dot8bit_shared(charA,charB,maxA,maxB,out2); gpu->tock(); //printmat(gpu->decompression_8bit(charB,maxB)); //printmat(out1,60,65,70,80); //printmat(out2,60,65,70,80); //printmat(out1); //printmat(out2); //printsum(out1); //printsum(out2); cout << sum(gpuSqrt(square(sub(out1,out2))))/(float)out1->size << endl; cout << sum(gpuSqrt(square(sub(out1,out3))))/(float)out1->size << endl; cout << sum(gpuSqrt(square(sub(out2,out3))))/(float)out1->size << endl; //cout << "max A " << maxA <<endl; //cout << "max B " << maxB <<endl; */ ClusterNet *gpu = new ClusterNet(argc,argv,123635,false); //ClusterNet *gpu = new ClusterNet(argc,argv); /* Matrix *A = gpu->distributed_uniformSqrtWeight(6,4); Matrix *B = gpu->rand(4,6); Matrix *A2 = empty(6,2); A2->data = A->data; printmat(A); Matrix *C = gpu->dotMPI(B,A); Matrix *C2 = gpu->dot(B,A2); printmat(C); printmat(C2); gpu->shutdown_MPI(); */ //Matrix *X = read_hdf5("/home/tim/data/mnist/X.hdf5"); //Matrix *y = read_hdf5("/home/tim/data/mnist/y.hdf5"); //Matrix *X = gpu->distribute_file("/home/tim/data/mnist/X.hdf5"); //Matrix *y = gpu->distribute_file("/home/tim/data/mnist/y.hdf5"); Matrix *X = gpu->distribute_rows_hdf5_file("/home/tim/data/mnist/distributed_X.hdf5"); Matrix *y = gpu->distribute_rows_hdf5_file("/home/tim/data/mnist/distributed_y.hdf5"); //Matrix *y = gpu->distribute_rows_hdf5_file("/home/tim/data/mnist/y_15000.hdf5"); printdim(X); printdim(y); BatchAllocator b = BatchAllocator(); //16384 int batch_size_per_GPU = 128; b.init(X,y,(1.0-0.85715),batch_size_per_GPU,128,gpu, Single_GPU); Layer *l0 = new Layer(X->cols,batch_size_per_GPU,Input,gpu); l0->PARALLELISM = DataParallelism; //l0->PARALLELISM = ModelParallelism; Layer *l1 = new Layer(1024, Logistic, l0); l1->PARALLELISM = DataParallelism; //l1->PARALLELISM = ModelParallelism; Layer *l2 = new Layer(1024, Logistic, l1); l2->PARALLELISM = DataParallelism; //l2->PARALLELISM = ModelParallelism; Layer *l3 = new Layer(10, Softmax, l2); l3->PARALLELISM = DataParallelism; //l3->PARALLELISM = ModelParallelism; l0->MAX_GRAD_VALUE = 0.005; l1->MAX_GRAD_VALUE = 0.002; l2->MAX_GRAD_VALUE = 0.01; l3->MAX_GRAD_VALUE = 1; /* cout << "l0: " << l0->MAX_GRAD_VALUE << endl; cout << "l1: " << l1->MAX_GRAD_VALUE << endl; cout << "l2: " << l2->MAX_GRAD_VALUE << endl; cout << "l3: " << l3->MAX_GRAD_VALUE << endl; */ l0->DROPOUT = 0.2f; l0->set_hidden_dropout(0.5f); cout << gpu->MYRANK << endl; float decay = 0.99f; gpu->tick("pass"); b.SKIP_LAST_BATCH = true; int epochs = 75; for(int epoch = 0; epoch < epochs; epoch++) { gpu->tick("epoch"); if(gpu->MYRANK == 0) cout << "EPOCH: " << epoch + 1 << endl; b.propagate_through_layers(l0,Training,epoch); b.propagate_through_layers(l0,Trainerror,epoch); b.propagate_through_layers(l0,CVerror,epoch); l0->learning_rate_decay(decay); /* cout << "l0: " << l0->MAX_GRAD_VALUE << endl; cout << "l1: " << l1->MAX_GRAD_VALUE << endl; cout << "l2: " << l2->MAX_GRAD_VALUE << endl; cout << "l3: " << l3->MAX_GRAD_VALUE << endl; */ if(epoch == 60) { l0->dropout_decay(); decay = 0.85f; /* l0->compression = bits_32; l1->MAX_GRAD_VALUE = bits_32; l2->MAX_GRAD_VALUE = bits_32; l3->MAX_GRAD_VALUE = bits_32; */ } //cout << l1->MAX_GRAD_VALUE << endl; gpu->tock("epoch"); } gpu->tock("pass"); gpu->shutdown_MPI(); if(gpu->MYRANK == 0) { int n1 = l3->Train_errors[0].size(); int n2 = l3->CV_errors[0].size(); cout << n1 << endl; cout << n2 << endl; Matrix *train = empty_cpu(epochs,n1); Matrix *cv = empty_cpu(epochs,n2); for(int i = 0; i < epochs; i++) { for(int j = 0; j < n1; j++) train->data[j + (i*n1)] = l3->Train_errors[i][j]; for(int j = 0; j < n2; j++) cv->data[j + (i*n2)] = l3->CV_errors[i][j]; } write_hdf5("/home/tim/data/mnist/results/8bit/train_error_model.hdf5" ,train); write_hdf5("/home/tim/data/mnist/results/8bit/cv_error_model.hdf5",cv); } /* cudaSetDevice(0); Matrix *X = read_hdf5("/home/tim/data/mnist/X.hdf5"); Matrix *y = read_hdf5("/home/tim/data/mnist/y.hdf5"); ClusterNet gpu = ClusterNet(1235); BatchAllocator b = BatchAllocator(); std::vector<int> layers; layers.push_back(1200); layers.push_back(1200); std::vector<float> dropout; dropout.push_back(0.2f); dropout.push_back(0.5f); dropout.push_back(0.5f); BatchAllocator allocator = BatchAllocator(); allocator.init(X,y,(1.0-0.8571429),128,256,gpu, Single_GPU); DeepNeuralNetwork net = DeepNeuralNetwork(layers,Classification, gpu, allocator, 10); net.EPOCHS = 500; net.TRANSITION_EPOCH = 75; net.LEARNING_RATE = 0.003; net.UPDATE_TYPE = RMSProp; net.DROPOUT = dropout; //net.MAIN_UNIT = Double_Rectified_Linear; net.train(); */ //cudaSetDevice(1); //ClusterNet *gpus = new ClusterNet(123635); //WikiMaxoutNet_PCIe net = WikiMaxoutNet_PCIe(gpus); //net.run(); /* cudaSetDevice(0); struct arg_struct *args0 = (arg_struct*)malloc(sizeof(arg_struct)); ClusterNet *gpus0 = new ClusterNet(23452345); WikiMaxoutNet *net0 = new WikiMaxoutNet(gpus0[0]); args0->gpus = gpus0; args0->net = net0; args0->device = 0; net0->run(); pthread_t t0; pthread_create(&t0, NULL, &run_net, args0); cudaSetDevice(1); struct arg_struct *args1 = (arg_struct*)malloc(sizeof(arg_struct)); ClusterNet *gpus1 = new ClusterNet(23452345); WikiMaxoutNet *net1 = new WikiMaxoutNet(gpus1[0]); args1->gpus = gpus1; args1->net = net1; args1->device = 1; pthread_t t1; //pthread_create(&t1, NULL, &run_net, args1); cudaSetDevice(2); struct arg_struct *args2 = (arg_struct*)malloc(sizeof(arg_struct)); ClusterNet *gpus2 = new ClusterNet(23452345); WikiMaxoutNet *net2 = new WikiMaxoutNet(gpus2[0]); args2->gpus = gpus2; args2->net = net2; args2->device = 2; pthread_t t2; //pthread_create(&t2, NULL, &run_net, args2); cout << "rolfen kek!" << endl; void* result0; void* result1; void* result2; pthread_join(t0,&result0); //pthread_join(t1,&result1); //pthread_join(t2,&result2); */ }
d3a82631a351e8079e29086d66d1b67c50ae064e.hip
// !!! This is a file automatically generated by hipify!!! #include "CudaRijndael.h" #include <iostream> #include <string> #include <vector> #include <fstream> #include <time.h> #include <chrono> #include <sstream> #include <iomanip> #include <ctime> #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include "Utils.h" #define BLOCK_SIZE 16 #define THREADS_PER_BLOCK 1024 using namespace std; //CONSTRUCTOR __device__ __host__ CudaRijndael::CudaRijndael() : m_bKeyInit(false) { } //DESTRUCTOR __device__ __host__ CudaRijndael::~CudaRijndael() { } //Expand a user-supplied key material into a session key. // key - The 128/192/256-bit user-key to use. // chain - initial chain block for CBC and CFB modes. // keylength - 16, 24 or 32 bytes // blockSize - The block size in bytes of this Rijndael (16, 24 or 32 bytes). __device__ __host__ void CudaRijndael::MakeKey(char const* key, char const* chain, int keylength, int blockSize) { m_keylength = keylength; m_blockSize = blockSize; //Initialize the chain memcpy(m_chain0, chain, m_blockSize); memcpy(m_chain, chain, m_blockSize); //Calculate Number of Rounds switch (m_keylength) { case 16: m_iROUNDS = (m_blockSize == 16) ? 10 : (m_blockSize == 24 ? 12 : 14); break; case 24: m_iROUNDS = (m_blockSize != 32) ? 12 : 14; break; default: // 32 bytes = 256 bits m_iROUNDS = 14; } int BC = m_blockSize / 4; int i, j; for (i = 0; i <= m_iROUNDS; i++) { for (j = 0; j<BC; j++) m_Ke[i][j] = 0; } for (i = 0; i <= m_iROUNDS; i++) { for (j = 0; j<BC; j++) m_Kd[i][j] = 0; } int ROUND_KEY_COUNT = (m_iROUNDS + 1) * BC; int KC = m_keylength / 4; //Copy user material bytes into temporary ints int* pi = tk; char const* pc = key; for (i = 0; i<KC; i++) { *pi = (unsigned char)*(pc++) << 24; *pi |= (unsigned char)*(pc++) << 16; *pi |= (unsigned char)*(pc++) << 8; *(pi++) |= (unsigned char)*(pc++); } //Copy values into round key arrays int t = 0; for (j = 0; (j<KC) && (t<ROUND_KEY_COUNT); j++, t++) { m_Ke[t / BC][t%BC] = tk[j]; m_Kd[m_iROUNDS - (t / BC)][t%BC] = tk[j]; } int tt, rconpointer = 0; while (t < ROUND_KEY_COUNT) { //Extrapolate using phi (the round key evolution function) tt = tk[KC - 1]; tk[0] ^= (sm_S[(tt >> 16) & 0xFF] & 0xFF) << 24 ^ (sm_S[(tt >> 8) & 0xFF] & 0xFF) << 16 ^ (sm_S[tt & 0xFF] & 0xFF) << 8 ^ (sm_S[(tt >> 24) & 0xFF] & 0xFF) ^ (sm_rcon[rconpointer++] & 0xFF) << 24; if (KC != 8) for (i = 1, j = 0; i<KC;) tk[i++] ^= tk[j++]; else { for (i = 1, j = 0; i<KC / 2; ) tk[i++] ^= tk[j++]; tt = tk[KC / 2 - 1]; tk[KC / 2] ^= (sm_S[tt & 0xFF] & 0xFF) ^ (sm_S[(tt >> 8) & 0xFF] & 0xFF) << 8 ^ (sm_S[(tt >> 16) & 0xFF] & 0xFF) << 16 ^ (sm_S[(tt >> 24) & 0xFF] & 0xFF) << 24; for (j = KC / 2, i = j + 1; i<KC; ) tk[i++] ^= tk[j++]; } //Copy values into round key arrays for (j = 0; (j<KC) && (t<ROUND_KEY_COUNT); j++, t++) { m_Ke[t / BC][t%BC] = tk[j]; m_Kd[m_iROUNDS - (t / BC)][t%BC] = tk[j]; } } //Inverse MixColumn where needed for (int r = 1; r<m_iROUNDS; r++) for (j = 0; j<BC; j++) { tt = m_Kd[r][j]; m_Kd[r][j] = sm_U1[(tt >> 24) & 0xFF] ^ sm_U2[(tt >> 16) & 0xFF] ^ sm_U3[(tt >> 8) & 0xFF] ^ sm_U4[tt & 0xFF]; } m_bKeyInit = true; } //Convenience method to encrypt exactly one block of plaintext, assuming //Rijndael's default block size (128-bit). // in - The plaintext // result - The ciphertext generated from a plaintext using the key __device__ __host__ void CudaRijndael::DefEncryptBlock(char const* in, char* result) { int* Ker = m_Ke[0]; int t0 = ((unsigned char)*(in++) << 24); t0 |= ((unsigned char)*(in++) << 16); t0 |= ((unsigned char)*(in++) << 8); (t0 |= (unsigned char)*(in++)) ^= Ker[0]; int t1 = ((unsigned char)*(in++) << 24); t1 |= ((unsigned char)*(in++) << 16); t1 |= ((unsigned char)*(in++) << 8); (t1 |= (unsigned char)*(in++)) ^= Ker[1]; int t2 = ((unsigned char)*(in++) << 24); t2 |= ((unsigned char)*(in++) << 16); t2 |= ((unsigned char)*(in++) << 8); (t2 |= (unsigned char)*(in++)) ^= Ker[2]; int t3 = ((unsigned char)*(in++) << 24); t3 |= ((unsigned char)*(in++) << 16); t3 |= ((unsigned char)*(in++) << 8); (t3 |= (unsigned char)*(in++)) ^= Ker[3]; int a0, a1, a2, a3; //Apply Round Transforms for (int r = 1; r < m_iROUNDS; r++) { Ker = m_Ke[r]; a0 = (sm_T1[(t0 >> 24) & 0xFF] ^ sm_T2[(t1 >> 16) & 0xFF] ^ sm_T3[(t2 >> 8) & 0xFF] ^ sm_T4[t3 & 0xFF]) ^ Ker[0]; a1 = (sm_T1[(t1 >> 24) & 0xFF] ^ sm_T2[(t2 >> 16) & 0xFF] ^ sm_T3[(t3 >> 8) & 0xFF] ^ sm_T4[t0 & 0xFF]) ^ Ker[1]; a2 = (sm_T1[(t2 >> 24) & 0xFF] ^ sm_T2[(t3 >> 16) & 0xFF] ^ sm_T3[(t0 >> 8) & 0xFF] ^ sm_T4[t1 & 0xFF]) ^ Ker[2]; a3 = (sm_T1[(t3 >> 24) & 0xFF] ^ sm_T2[(t0 >> 16) & 0xFF] ^ sm_T3[(t1 >> 8) & 0xFF] ^ sm_T4[t2 & 0xFF]) ^ Ker[3]; t0 = a0; t1 = a1; t2 = a2; t3 = a3; } //Last Round is special Ker = m_Ke[m_iROUNDS]; int tt = Ker[0]; result[0] = sm_S[(t0 >> 24) & 0xFF] ^ (tt >> 24); result[1] = sm_S[(t1 >> 16) & 0xFF] ^ (tt >> 16); result[2] = sm_S[(t2 >> 8) & 0xFF] ^ (tt >> 8); result[3] = sm_S[t3 & 0xFF] ^ tt; tt = Ker[1]; result[4] = sm_S[(t1 >> 24) & 0xFF] ^ (tt >> 24); result[5] = sm_S[(t2 >> 16) & 0xFF] ^ (tt >> 16); result[6] = sm_S[(t3 >> 8) & 0xFF] ^ (tt >> 8); result[7] = sm_S[t0 & 0xFF] ^ tt; tt = Ker[2]; result[8] = sm_S[(t2 >> 24) & 0xFF] ^ (tt >> 24); result[9] = sm_S[(t3 >> 16) & 0xFF] ^ (tt >> 16); result[10] = sm_S[(t0 >> 8) & 0xFF] ^ (tt >> 8); result[11] = sm_S[t1 & 0xFF] ^ tt; tt = Ker[3]; result[12] = sm_S[(t3 >> 24) & 0xFF] ^ (tt >> 24); result[13] = sm_S[(t0 >> 16) & 0xFF] ^ (tt >> 16); result[14] = sm_S[(t1 >> 8) & 0xFF] ^ (tt >> 8); result[15] = sm_S[t2 & 0xFF] ^ tt; } //Convenience method to decrypt exactly one block of plaintext, assuming //Rijndael's default block size (128-bit). // in - The ciphertext. // result - The plaintext generated from a ciphertext using the session key. __device__ __host__ void CudaRijndael::DefDecryptBlock(char const* in, char* result) { int* Kdr = m_Kd[0]; int t0 = ((unsigned char)*(in++) << 24); t0 = t0 | ((unsigned char)*(in++) << 16); t0 |= ((unsigned char)*(in++) << 8); (t0 |= (unsigned char)*(in++)) ^= Kdr[0]; int t1 = ((unsigned char)*(in++) << 24); t1 |= ((unsigned char)*(in++) << 16); t1 |= ((unsigned char)*(in++) << 8); (t1 |= (unsigned char)*(in++)) ^= Kdr[1]; int t2 = ((unsigned char)*(in++) << 24); t2 |= ((unsigned char)*(in++) << 16); t2 |= ((unsigned char)*(in++) << 8); (t2 |= (unsigned char)*(in++)) ^= Kdr[2]; int t3 = ((unsigned char)*(in++) << 24); t3 |= ((unsigned char)*(in++) << 16); t3 |= ((unsigned char)*(in++) << 8); (t3 |= (unsigned char)*(in++)) ^= Kdr[3]; int a0, a1, a2, a3; for (int r = 1; r < m_iROUNDS; r++) // apply round transforms { Kdr = m_Kd[r]; a0 = (sm_T5[(t0 >> 24) & 0xFF] ^ sm_T6[(t3 >> 16) & 0xFF] ^ sm_T7[(t2 >> 8) & 0xFF] ^ sm_T8[t1 & 0xFF]) ^ Kdr[0]; a1 = (sm_T5[(t1 >> 24) & 0xFF] ^ sm_T6[(t0 >> 16) & 0xFF] ^ sm_T7[(t3 >> 8) & 0xFF] ^ sm_T8[t2 & 0xFF]) ^ Kdr[1]; a2 = (sm_T5[(t2 >> 24) & 0xFF] ^ sm_T6[(t1 >> 16) & 0xFF] ^ sm_T7[(t0 >> 8) & 0xFF] ^ sm_T8[t3 & 0xFF]) ^ Kdr[2]; a3 = (sm_T5[(t3 >> 24) & 0xFF] ^ sm_T6[(t2 >> 16) & 0xFF] ^ sm_T7[(t1 >> 8) & 0xFF] ^ sm_T8[t0 & 0xFF]) ^ Kdr[3]; t0 = a0; t1 = a1; t2 = a2; t3 = a3; } //Last Round is special Kdr = m_Kd[m_iROUNDS]; int tt = Kdr[0]; result[0] = sm_Si[(t0 >> 24) & 0xFF] ^ (tt >> 24); result[1] = sm_Si[(t3 >> 16) & 0xFF] ^ (tt >> 16); result[2] = sm_Si[(t2 >> 8) & 0xFF] ^ (tt >> 8); result[3] = sm_Si[t1 & 0xFF] ^ tt; tt = Kdr[1]; result[4] = sm_Si[(t1 >> 24) & 0xFF] ^ (tt >> 24); result[5] = sm_Si[(t0 >> 16) & 0xFF] ^ (tt >> 16); result[6] = sm_Si[(t3 >> 8) & 0xFF] ^ (tt >> 8); result[7] = sm_Si[t2 & 0xFF] ^ tt; tt = Kdr[2]; result[8] = sm_Si[(t2 >> 24) & 0xFF] ^ (tt >> 24); result[9] = sm_Si[(t1 >> 16) & 0xFF] ^ (tt >> 16); result[10] = sm_Si[(t0 >> 8) & 0xFF] ^ (tt >> 8); result[11] = sm_Si[t3 & 0xFF] ^ tt; tt = Kdr[3]; result[12] = sm_Si[(t3 >> 24) & 0xFF] ^ (tt >> 24); result[13] = sm_Si[(t2 >> 16) & 0xFF] ^ (tt >> 16); result[14] = sm_Si[(t1 >> 8) & 0xFF] ^ (tt >> 8); result[15] = sm_Si[t0 & 0xFF] ^ tt; } //Encrypt exactly one block of plaintext. // in - The plaintext. // result - The ciphertext generated from a plaintext using the key. __device__ __host__ void CudaRijndael::EncryptBlock(char const* in, char* result) { if (DEFAULT_BLOCK_SIZE == m_blockSize) { DefEncryptBlock(in, result); return; } int BC = m_blockSize / 4; int SC = (BC == 4) ? 0 : (BC == 6 ? 1 : 2); int s1 = sm_shifts[SC][1][0]; int s2 = sm_shifts[SC][2][0]; int s3 = sm_shifts[SC][3][0]; //Temporary Work Arrays int i; int tt; int* pi = t; for (i = 0; i<BC; i++) { *pi = ((unsigned char)*(in++) << 24); *pi |= ((unsigned char)*(in++) << 16); *pi |= ((unsigned char)*(in++) << 8); (*(pi++) |= (unsigned char)*(in++)) ^= m_Ke[0][i]; } //Apply Round Transforms for (int r = 1; r<m_iROUNDS; r++) { for (i = 0; i<BC; i++) a[i] = (sm_T1[(t[i] >> 24) & 0xFF] ^ sm_T2[(t[(i + s1) % BC] >> 16) & 0xFF] ^ sm_T3[(t[(i + s2) % BC] >> 8) & 0xFF] ^ sm_T4[t[(i + s3) % BC] & 0xFF]) ^ m_Ke[r][i]; memcpy(t, a, 4 * BC); } int j; //Last Round is Special for (i = 0, j = 0; i<BC; i++) { tt = m_Ke[m_iROUNDS][i]; result[j++] = sm_S[(t[i] >> 24) & 0xFF] ^ (tt >> 24); result[j++] = sm_S[(t[(i + s1) % BC] >> 16) & 0xFF] ^ (tt >> 16); result[j++] = sm_S[(t[(i + s2) % BC] >> 8) & 0xFF] ^ (tt >> 8); result[j++] = sm_S[t[(i + s3) % BC] & 0xFF] ^ tt; } } //Decrypt exactly one block of ciphertext. // in - The ciphertext. // result - The plaintext generated from a ciphertext using the session key. __device__ __host__ void CudaRijndael::DecryptBlock(char const* in, char* result) { if (DEFAULT_BLOCK_SIZE == m_blockSize) { DefDecryptBlock(in, result); return; } int BC = m_blockSize / 4; int SC = BC == 4 ? 0 : (BC == 6 ? 1 : 2); int s1 = sm_shifts[SC][1][1]; int s2 = sm_shifts[SC][2][1]; int s3 = sm_shifts[SC][3][1]; //Temporary Work Arrays int i; int tt; int* pi = t; for (i = 0; i<BC; i++) { *pi = ((unsigned char)*(in++) << 24); *pi |= ((unsigned char)*(in++) << 16); *pi |= ((unsigned char)*(in++) << 8); (*(pi++) |= (unsigned char)*(in++)) ^= m_Kd[0][i]; } //Apply Round Transforms for (int r = 1; r<m_iROUNDS; r++) { for (i = 0; i<BC; i++) a[i] = (sm_T5[(t[i] >> 24) & 0xFF] ^ sm_T6[(t[(i + s1) % BC] >> 16) & 0xFF] ^ sm_T7[(t[(i + s2) % BC] >> 8) & 0xFF] ^ sm_T8[t[(i + s3) % BC] & 0xFF]) ^ m_Kd[r][i]; memcpy(t, a, 4 * BC); } int j; //Last Round is Special for (i = 0, j = 0; i<BC; i++) { tt = m_Kd[m_iROUNDS][i]; result[j++] = sm_Si[(t[i] >> 24) & 0xFF] ^ (tt >> 24); result[j++] = sm_Si[(t[(i + s1) % BC] >> 16) & 0xFF] ^ (tt >> 16); result[j++] = sm_Si[(t[(i + s2) % BC] >> 8) & 0xFF] ^ (tt >> 8); result[j++] = sm_Si[t[(i + s3) % BC] & 0xFF] ^ tt; } } __device__ __host__ void CudaRijndael::Encrypt(char const* in, char* result, size_t n, int iMode) { int i; char const* pin; char* presult; if (CBC == iMode) //CBC mode, using the Chain { for (i = 0, pin = in, presult = result; i<n / m_blockSize; i++) { Xor(m_chain, pin); EncryptBlock(m_chain, presult); memcpy(m_chain, presult, m_blockSize); pin += m_blockSize; presult += m_blockSize; } } else if (CFB == iMode) //CFB mode, using the Chain { for (i = 0, pin = in, presult = result; i<n / m_blockSize; i++) { EncryptBlock(m_chain, presult); Xor(presult, pin); memcpy(m_chain, presult, m_blockSize); pin += m_blockSize; presult += m_blockSize; } } else //ECB mode, not using the Chain { for (i = 0, pin = in, presult = result; i<n / m_blockSize; i++) { EncryptBlock(pin, presult); pin += m_blockSize; presult += m_blockSize; } } } __device__ __host__ void CudaRijndael::Decrypt(char const* in, char* result, size_t n, int iMode) { int i; char const* pin; char* presult; if (CBC == iMode) //CBC mode, using the Chain { for (i = 0, pin = in, presult = result; i<n / m_blockSize; i++) { DecryptBlock(pin, presult); Xor(presult, m_chain); memcpy(m_chain, pin, m_blockSize); pin += m_blockSize; presult += m_blockSize; } } else if (CFB == iMode) //CFB mode, using the Chain, not using Decrypt() { for (i = 0, pin = in, presult = result; i<n / m_blockSize; i++) { EncryptBlock(m_chain, presult); //memcpy(presult, pin, m_blockSize); Xor(presult, pin); memcpy(m_chain, pin, m_blockSize); pin += m_blockSize; presult += m_blockSize; } } else //ECB mode, not using the Chain { for (i = 0, pin = in, presult = result; i<n / m_blockSize; i++) { DecryptBlock(pin, presult); pin += m_blockSize; presult += m_blockSize; } } } __global__ void kernel(char* plaintext, char * ciphertext, int * size, CudaRijndael * rijndael) { int idx = blockIdx.x * blockDim.x + threadIdx.x; int startIndex = idx * BLOCK_SIZE; int endIndex = (idx + 1) * BLOCK_SIZE; char blockPlain[BLOCK_SIZE]; if (endIndex >= *size) { endIndex = *size; } int p_i = 0; for (int i = startIndex; i < endIndex; ++i) { blockPlain[p_i++] = plaintext[i]; } while (p_i < BLOCK_SIZE) { blockPlain[p_i++] = '\0'; } char blockCipher[BLOCK_SIZE]; rijndael->EncryptBlock(blockPlain, blockCipher); int c_i = 0; for (int i = startIndex; i < endIndex; ++i) { ciphertext[i] = blockCipher[c_i++]; } } void RunOnGpu(CudaRijndael rijndael, int numThreadsPerBlock) { vector<char> data = Utils::ReadBytes("data/image.jpg"); stringstream ss; data = Utils::PadToMultipleOfN(data, BLOCK_SIZE);; int size = data.size(); ss << "Paded size : " << data.size(); Utils::Log(ss.str()); ss.str(string()); char *h_plaintext = Utils::VectorToArray(data, size); char *h_ciphertext = new char[size]; char *d_plaintext; char *d_ciphertext; int * d_size; CudaRijndael *d_rijndael; Utils::Log("Allocating cuda mem..."); auto startTime = Utils::CurrentTime(); hipMalloc((void**)&d_plaintext, size * sizeof(char)); hipMalloc((void**)&d_ciphertext, size * sizeof(char)); hipMalloc((void**)&d_size, sizeof(int)); hipMalloc((void**)&d_rijndael, sizeof(CudaRijndael)); hipMemcpy(d_plaintext, h_plaintext, size * sizeof(char), hipMemcpyHostToDevice); hipMemcpy(d_size, &size, sizeof(int), hipMemcpyHostToDevice); hipMemcpy(d_rijndael, &rijndael, sizeof(CudaRijndael), hipMemcpyHostToDevice); auto endAllocationTime = Utils::CurrentTime(); Utils::Log("Alocated cuda mem."); int numEncryptionBlocks = size / BLOCK_SIZE; int numGpuBlocks = numEncryptionBlocks / numThreadsPerBlock + 1; hipLaunchKernelGGL(( kernel) , dim3(numGpuBlocks), dim3(numThreadsPerBlock), 0, 0, d_plaintext, d_ciphertext, d_size, d_rijndael); hipDeviceSynchronize(); auto endEncryptionTime = Utils::CurrentTime(); hipMemcpy(h_ciphertext, d_ciphertext, size * sizeof(char), hipMemcpyDeviceToHost); auto endCopyResultToCpuTime = Utils::CurrentTime(); ss.str(string()); ss << "Elapsed time for parallel encryption:" << endl; ss << "Encryption block size: " << BLOCK_SIZE; ss << endl; ss << "GPU blocks: " << numGpuBlocks << endl; ss << "Threads per block: " << numThreadsPerBlock << endl; chrono::duration<double> time = endAllocationTime - startTime; ss << "Cuda memory allocation time: " << time.count() << "s" << endl; time = endEncryptionTime - endAllocationTime; ss << "Encryption Time: " << time.count() << "s" << endl; time = endCopyResultToCpuTime - endEncryptionTime; ss << "Copy result to CPU time: " << time.count() << "s" << endl; time = endCopyResultToCpuTime - startTime; ss << "Total elapsed time:" << time.count() << "s" << endl; Utils::Log(ss.str()); hipError_t code = hipPeekAtLastError(); if (code != hipSuccess) { Utils::Log(hipGetErrorString(code)); } // Write the decrypted file for correctness check /*vector<char> encrypted(h_ciphertext, h_ciphertext + sizeof(h_ciphertext) / sizeof(h_ciphertext[0])); vector<vector<char>> encBlocks = Utils::GetBlocks(encrypted, BLOCK_SIZE);*/ char * decrypted = new char[size]; vector<char> vEncrypted = Utils::ArrayToVector(h_ciphertext, size); Utils::Log("Start writing to file.[encrypted]"); Utils::WriteBytes(vEncrypted, "data/encrypted_image.jpg"); Utils::Log("End writing to file.[encrypted]"); Utils::Log("Decrypting..."); rijndael.Decrypt(h_ciphertext, decrypted, size, CudaRijndael::ECB); Utils::Log("Decrypted."); vector<char> vDecrypted = Utils::ArrayToVector(decrypted, size); vector<char> decrNoPadding(vDecrypted.begin(), vDecrypted.begin() + size); Utils::Log("Start writing to file.[decrypted]"); Utils::WriteBytes(decrNoPadding, "data/decrypted_image.jpg"); Utils::Log("End writing to file.[decrypted]"); delete[] h_ciphertext; delete[] decrypted; hipFree(d_plaintext); hipFree(d_ciphertext); hipFree(d_size); hipFree(d_rijndael); } int main() { CudaRijndael rijndael; rijndael.MakeKey("abcdefghabcdefgh", "\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0", 16, BLOCK_SIZE); RunOnGpu(rijndael, THREADS_PER_BLOCK); cout << "Enter a key..."; char k; cin >> k; return 0; }
d3a82631a351e8079e29086d66d1b67c50ae064e.cu
#include "CudaRijndael.h" #include <iostream> #include <string> #include <vector> #include <fstream> #include <time.h> #include <chrono> #include <sstream> #include <iomanip> #include <ctime> #include "cuda_runtime.h" #include "device_launch_parameters.h" #include "Utils.h" #define BLOCK_SIZE 16 #define THREADS_PER_BLOCK 1024 using namespace std; //CONSTRUCTOR __device__ __host__ CudaRijndael::CudaRijndael() : m_bKeyInit(false) { } //DESTRUCTOR __device__ __host__ CudaRijndael::~CudaRijndael() { } //Expand a user-supplied key material into a session key. // key - The 128/192/256-bit user-key to use. // chain - initial chain block for CBC and CFB modes. // keylength - 16, 24 or 32 bytes // blockSize - The block size in bytes of this Rijndael (16, 24 or 32 bytes). __device__ __host__ void CudaRijndael::MakeKey(char const* key, char const* chain, int keylength, int blockSize) { m_keylength = keylength; m_blockSize = blockSize; //Initialize the chain memcpy(m_chain0, chain, m_blockSize); memcpy(m_chain, chain, m_blockSize); //Calculate Number of Rounds switch (m_keylength) { case 16: m_iROUNDS = (m_blockSize == 16) ? 10 : (m_blockSize == 24 ? 12 : 14); break; case 24: m_iROUNDS = (m_blockSize != 32) ? 12 : 14; break; default: // 32 bytes = 256 bits m_iROUNDS = 14; } int BC = m_blockSize / 4; int i, j; for (i = 0; i <= m_iROUNDS; i++) { for (j = 0; j<BC; j++) m_Ke[i][j] = 0; } for (i = 0; i <= m_iROUNDS; i++) { for (j = 0; j<BC; j++) m_Kd[i][j] = 0; } int ROUND_KEY_COUNT = (m_iROUNDS + 1) * BC; int KC = m_keylength / 4; //Copy user material bytes into temporary ints int* pi = tk; char const* pc = key; for (i = 0; i<KC; i++) { *pi = (unsigned char)*(pc++) << 24; *pi |= (unsigned char)*(pc++) << 16; *pi |= (unsigned char)*(pc++) << 8; *(pi++) |= (unsigned char)*(pc++); } //Copy values into round key arrays int t = 0; for (j = 0; (j<KC) && (t<ROUND_KEY_COUNT); j++, t++) { m_Ke[t / BC][t%BC] = tk[j]; m_Kd[m_iROUNDS - (t / BC)][t%BC] = tk[j]; } int tt, rconpointer = 0; while (t < ROUND_KEY_COUNT) { //Extrapolate using phi (the round key evolution function) tt = tk[KC - 1]; tk[0] ^= (sm_S[(tt >> 16) & 0xFF] & 0xFF) << 24 ^ (sm_S[(tt >> 8) & 0xFF] & 0xFF) << 16 ^ (sm_S[tt & 0xFF] & 0xFF) << 8 ^ (sm_S[(tt >> 24) & 0xFF] & 0xFF) ^ (sm_rcon[rconpointer++] & 0xFF) << 24; if (KC != 8) for (i = 1, j = 0; i<KC;) tk[i++] ^= tk[j++]; else { for (i = 1, j = 0; i<KC / 2; ) tk[i++] ^= tk[j++]; tt = tk[KC / 2 - 1]; tk[KC / 2] ^= (sm_S[tt & 0xFF] & 0xFF) ^ (sm_S[(tt >> 8) & 0xFF] & 0xFF) << 8 ^ (sm_S[(tt >> 16) & 0xFF] & 0xFF) << 16 ^ (sm_S[(tt >> 24) & 0xFF] & 0xFF) << 24; for (j = KC / 2, i = j + 1; i<KC; ) tk[i++] ^= tk[j++]; } //Copy values into round key arrays for (j = 0; (j<KC) && (t<ROUND_KEY_COUNT); j++, t++) { m_Ke[t / BC][t%BC] = tk[j]; m_Kd[m_iROUNDS - (t / BC)][t%BC] = tk[j]; } } //Inverse MixColumn where needed for (int r = 1; r<m_iROUNDS; r++) for (j = 0; j<BC; j++) { tt = m_Kd[r][j]; m_Kd[r][j] = sm_U1[(tt >> 24) & 0xFF] ^ sm_U2[(tt >> 16) & 0xFF] ^ sm_U3[(tt >> 8) & 0xFF] ^ sm_U4[tt & 0xFF]; } m_bKeyInit = true; } //Convenience method to encrypt exactly one block of plaintext, assuming //Rijndael's default block size (128-bit). // in - The plaintext // result - The ciphertext generated from a plaintext using the key __device__ __host__ void CudaRijndael::DefEncryptBlock(char const* in, char* result) { int* Ker = m_Ke[0]; int t0 = ((unsigned char)*(in++) << 24); t0 |= ((unsigned char)*(in++) << 16); t0 |= ((unsigned char)*(in++) << 8); (t0 |= (unsigned char)*(in++)) ^= Ker[0]; int t1 = ((unsigned char)*(in++) << 24); t1 |= ((unsigned char)*(in++) << 16); t1 |= ((unsigned char)*(in++) << 8); (t1 |= (unsigned char)*(in++)) ^= Ker[1]; int t2 = ((unsigned char)*(in++) << 24); t2 |= ((unsigned char)*(in++) << 16); t2 |= ((unsigned char)*(in++) << 8); (t2 |= (unsigned char)*(in++)) ^= Ker[2]; int t3 = ((unsigned char)*(in++) << 24); t3 |= ((unsigned char)*(in++) << 16); t3 |= ((unsigned char)*(in++) << 8); (t3 |= (unsigned char)*(in++)) ^= Ker[3]; int a0, a1, a2, a3; //Apply Round Transforms for (int r = 1; r < m_iROUNDS; r++) { Ker = m_Ke[r]; a0 = (sm_T1[(t0 >> 24) & 0xFF] ^ sm_T2[(t1 >> 16) & 0xFF] ^ sm_T3[(t2 >> 8) & 0xFF] ^ sm_T4[t3 & 0xFF]) ^ Ker[0]; a1 = (sm_T1[(t1 >> 24) & 0xFF] ^ sm_T2[(t2 >> 16) & 0xFF] ^ sm_T3[(t3 >> 8) & 0xFF] ^ sm_T4[t0 & 0xFF]) ^ Ker[1]; a2 = (sm_T1[(t2 >> 24) & 0xFF] ^ sm_T2[(t3 >> 16) & 0xFF] ^ sm_T3[(t0 >> 8) & 0xFF] ^ sm_T4[t1 & 0xFF]) ^ Ker[2]; a3 = (sm_T1[(t3 >> 24) & 0xFF] ^ sm_T2[(t0 >> 16) & 0xFF] ^ sm_T3[(t1 >> 8) & 0xFF] ^ sm_T4[t2 & 0xFF]) ^ Ker[3]; t0 = a0; t1 = a1; t2 = a2; t3 = a3; } //Last Round is special Ker = m_Ke[m_iROUNDS]; int tt = Ker[0]; result[0] = sm_S[(t0 >> 24) & 0xFF] ^ (tt >> 24); result[1] = sm_S[(t1 >> 16) & 0xFF] ^ (tt >> 16); result[2] = sm_S[(t2 >> 8) & 0xFF] ^ (tt >> 8); result[3] = sm_S[t3 & 0xFF] ^ tt; tt = Ker[1]; result[4] = sm_S[(t1 >> 24) & 0xFF] ^ (tt >> 24); result[5] = sm_S[(t2 >> 16) & 0xFF] ^ (tt >> 16); result[6] = sm_S[(t3 >> 8) & 0xFF] ^ (tt >> 8); result[7] = sm_S[t0 & 0xFF] ^ tt; tt = Ker[2]; result[8] = sm_S[(t2 >> 24) & 0xFF] ^ (tt >> 24); result[9] = sm_S[(t3 >> 16) & 0xFF] ^ (tt >> 16); result[10] = sm_S[(t0 >> 8) & 0xFF] ^ (tt >> 8); result[11] = sm_S[t1 & 0xFF] ^ tt; tt = Ker[3]; result[12] = sm_S[(t3 >> 24) & 0xFF] ^ (tt >> 24); result[13] = sm_S[(t0 >> 16) & 0xFF] ^ (tt >> 16); result[14] = sm_S[(t1 >> 8) & 0xFF] ^ (tt >> 8); result[15] = sm_S[t2 & 0xFF] ^ tt; } //Convenience method to decrypt exactly one block of plaintext, assuming //Rijndael's default block size (128-bit). // in - The ciphertext. // result - The plaintext generated from a ciphertext using the session key. __device__ __host__ void CudaRijndael::DefDecryptBlock(char const* in, char* result) { int* Kdr = m_Kd[0]; int t0 = ((unsigned char)*(in++) << 24); t0 = t0 | ((unsigned char)*(in++) << 16); t0 |= ((unsigned char)*(in++) << 8); (t0 |= (unsigned char)*(in++)) ^= Kdr[0]; int t1 = ((unsigned char)*(in++) << 24); t1 |= ((unsigned char)*(in++) << 16); t1 |= ((unsigned char)*(in++) << 8); (t1 |= (unsigned char)*(in++)) ^= Kdr[1]; int t2 = ((unsigned char)*(in++) << 24); t2 |= ((unsigned char)*(in++) << 16); t2 |= ((unsigned char)*(in++) << 8); (t2 |= (unsigned char)*(in++)) ^= Kdr[2]; int t3 = ((unsigned char)*(in++) << 24); t3 |= ((unsigned char)*(in++) << 16); t3 |= ((unsigned char)*(in++) << 8); (t3 |= (unsigned char)*(in++)) ^= Kdr[3]; int a0, a1, a2, a3; for (int r = 1; r < m_iROUNDS; r++) // apply round transforms { Kdr = m_Kd[r]; a0 = (sm_T5[(t0 >> 24) & 0xFF] ^ sm_T6[(t3 >> 16) & 0xFF] ^ sm_T7[(t2 >> 8) & 0xFF] ^ sm_T8[t1 & 0xFF]) ^ Kdr[0]; a1 = (sm_T5[(t1 >> 24) & 0xFF] ^ sm_T6[(t0 >> 16) & 0xFF] ^ sm_T7[(t3 >> 8) & 0xFF] ^ sm_T8[t2 & 0xFF]) ^ Kdr[1]; a2 = (sm_T5[(t2 >> 24) & 0xFF] ^ sm_T6[(t1 >> 16) & 0xFF] ^ sm_T7[(t0 >> 8) & 0xFF] ^ sm_T8[t3 & 0xFF]) ^ Kdr[2]; a3 = (sm_T5[(t3 >> 24) & 0xFF] ^ sm_T6[(t2 >> 16) & 0xFF] ^ sm_T7[(t1 >> 8) & 0xFF] ^ sm_T8[t0 & 0xFF]) ^ Kdr[3]; t0 = a0; t1 = a1; t2 = a2; t3 = a3; } //Last Round is special Kdr = m_Kd[m_iROUNDS]; int tt = Kdr[0]; result[0] = sm_Si[(t0 >> 24) & 0xFF] ^ (tt >> 24); result[1] = sm_Si[(t3 >> 16) & 0xFF] ^ (tt >> 16); result[2] = sm_Si[(t2 >> 8) & 0xFF] ^ (tt >> 8); result[3] = sm_Si[t1 & 0xFF] ^ tt; tt = Kdr[1]; result[4] = sm_Si[(t1 >> 24) & 0xFF] ^ (tt >> 24); result[5] = sm_Si[(t0 >> 16) & 0xFF] ^ (tt >> 16); result[6] = sm_Si[(t3 >> 8) & 0xFF] ^ (tt >> 8); result[7] = sm_Si[t2 & 0xFF] ^ tt; tt = Kdr[2]; result[8] = sm_Si[(t2 >> 24) & 0xFF] ^ (tt >> 24); result[9] = sm_Si[(t1 >> 16) & 0xFF] ^ (tt >> 16); result[10] = sm_Si[(t0 >> 8) & 0xFF] ^ (tt >> 8); result[11] = sm_Si[t3 & 0xFF] ^ tt; tt = Kdr[3]; result[12] = sm_Si[(t3 >> 24) & 0xFF] ^ (tt >> 24); result[13] = sm_Si[(t2 >> 16) & 0xFF] ^ (tt >> 16); result[14] = sm_Si[(t1 >> 8) & 0xFF] ^ (tt >> 8); result[15] = sm_Si[t0 & 0xFF] ^ tt; } //Encrypt exactly one block of plaintext. // in - The plaintext. // result - The ciphertext generated from a plaintext using the key. __device__ __host__ void CudaRijndael::EncryptBlock(char const* in, char* result) { if (DEFAULT_BLOCK_SIZE == m_blockSize) { DefEncryptBlock(in, result); return; } int BC = m_blockSize / 4; int SC = (BC == 4) ? 0 : (BC == 6 ? 1 : 2); int s1 = sm_shifts[SC][1][0]; int s2 = sm_shifts[SC][2][0]; int s3 = sm_shifts[SC][3][0]; //Temporary Work Arrays int i; int tt; int* pi = t; for (i = 0; i<BC; i++) { *pi = ((unsigned char)*(in++) << 24); *pi |= ((unsigned char)*(in++) << 16); *pi |= ((unsigned char)*(in++) << 8); (*(pi++) |= (unsigned char)*(in++)) ^= m_Ke[0][i]; } //Apply Round Transforms for (int r = 1; r<m_iROUNDS; r++) { for (i = 0; i<BC; i++) a[i] = (sm_T1[(t[i] >> 24) & 0xFF] ^ sm_T2[(t[(i + s1) % BC] >> 16) & 0xFF] ^ sm_T3[(t[(i + s2) % BC] >> 8) & 0xFF] ^ sm_T4[t[(i + s3) % BC] & 0xFF]) ^ m_Ke[r][i]; memcpy(t, a, 4 * BC); } int j; //Last Round is Special for (i = 0, j = 0; i<BC; i++) { tt = m_Ke[m_iROUNDS][i]; result[j++] = sm_S[(t[i] >> 24) & 0xFF] ^ (tt >> 24); result[j++] = sm_S[(t[(i + s1) % BC] >> 16) & 0xFF] ^ (tt >> 16); result[j++] = sm_S[(t[(i + s2) % BC] >> 8) & 0xFF] ^ (tt >> 8); result[j++] = sm_S[t[(i + s3) % BC] & 0xFF] ^ tt; } } //Decrypt exactly one block of ciphertext. // in - The ciphertext. // result - The plaintext generated from a ciphertext using the session key. __device__ __host__ void CudaRijndael::DecryptBlock(char const* in, char* result) { if (DEFAULT_BLOCK_SIZE == m_blockSize) { DefDecryptBlock(in, result); return; } int BC = m_blockSize / 4; int SC = BC == 4 ? 0 : (BC == 6 ? 1 : 2); int s1 = sm_shifts[SC][1][1]; int s2 = sm_shifts[SC][2][1]; int s3 = sm_shifts[SC][3][1]; //Temporary Work Arrays int i; int tt; int* pi = t; for (i = 0; i<BC; i++) { *pi = ((unsigned char)*(in++) << 24); *pi |= ((unsigned char)*(in++) << 16); *pi |= ((unsigned char)*(in++) << 8); (*(pi++) |= (unsigned char)*(in++)) ^= m_Kd[0][i]; } //Apply Round Transforms for (int r = 1; r<m_iROUNDS; r++) { for (i = 0; i<BC; i++) a[i] = (sm_T5[(t[i] >> 24) & 0xFF] ^ sm_T6[(t[(i + s1) % BC] >> 16) & 0xFF] ^ sm_T7[(t[(i + s2) % BC] >> 8) & 0xFF] ^ sm_T8[t[(i + s3) % BC] & 0xFF]) ^ m_Kd[r][i]; memcpy(t, a, 4 * BC); } int j; //Last Round is Special for (i = 0, j = 0; i<BC; i++) { tt = m_Kd[m_iROUNDS][i]; result[j++] = sm_Si[(t[i] >> 24) & 0xFF] ^ (tt >> 24); result[j++] = sm_Si[(t[(i + s1) % BC] >> 16) & 0xFF] ^ (tt >> 16); result[j++] = sm_Si[(t[(i + s2) % BC] >> 8) & 0xFF] ^ (tt >> 8); result[j++] = sm_Si[t[(i + s3) % BC] & 0xFF] ^ tt; } } __device__ __host__ void CudaRijndael::Encrypt(char const* in, char* result, size_t n, int iMode) { int i; char const* pin; char* presult; if (CBC == iMode) //CBC mode, using the Chain { for (i = 0, pin = in, presult = result; i<n / m_blockSize; i++) { Xor(m_chain, pin); EncryptBlock(m_chain, presult); memcpy(m_chain, presult, m_blockSize); pin += m_blockSize; presult += m_blockSize; } } else if (CFB == iMode) //CFB mode, using the Chain { for (i = 0, pin = in, presult = result; i<n / m_blockSize; i++) { EncryptBlock(m_chain, presult); Xor(presult, pin); memcpy(m_chain, presult, m_blockSize); pin += m_blockSize; presult += m_blockSize; } } else //ECB mode, not using the Chain { for (i = 0, pin = in, presult = result; i<n / m_blockSize; i++) { EncryptBlock(pin, presult); pin += m_blockSize; presult += m_blockSize; } } } __device__ __host__ void CudaRijndael::Decrypt(char const* in, char* result, size_t n, int iMode) { int i; char const* pin; char* presult; if (CBC == iMode) //CBC mode, using the Chain { for (i = 0, pin = in, presult = result; i<n / m_blockSize; i++) { DecryptBlock(pin, presult); Xor(presult, m_chain); memcpy(m_chain, pin, m_blockSize); pin += m_blockSize; presult += m_blockSize; } } else if (CFB == iMode) //CFB mode, using the Chain, not using Decrypt() { for (i = 0, pin = in, presult = result; i<n / m_blockSize; i++) { EncryptBlock(m_chain, presult); //memcpy(presult, pin, m_blockSize); Xor(presult, pin); memcpy(m_chain, pin, m_blockSize); pin += m_blockSize; presult += m_blockSize; } } else //ECB mode, not using the Chain { for (i = 0, pin = in, presult = result; i<n / m_blockSize; i++) { DecryptBlock(pin, presult); pin += m_blockSize; presult += m_blockSize; } } } __global__ void kernel(char* plaintext, char * ciphertext, int * size, CudaRijndael * rijndael) { int idx = blockIdx.x * blockDim.x + threadIdx.x; int startIndex = idx * BLOCK_SIZE; int endIndex = (idx + 1) * BLOCK_SIZE; char blockPlain[BLOCK_SIZE]; if (endIndex >= *size) { endIndex = *size; } int p_i = 0; for (int i = startIndex; i < endIndex; ++i) { blockPlain[p_i++] = plaintext[i]; } while (p_i < BLOCK_SIZE) { blockPlain[p_i++] = '\0'; } char blockCipher[BLOCK_SIZE]; rijndael->EncryptBlock(blockPlain, blockCipher); int c_i = 0; for (int i = startIndex; i < endIndex; ++i) { ciphertext[i] = blockCipher[c_i++]; } } void RunOnGpu(CudaRijndael rijndael, int numThreadsPerBlock) { vector<char> data = Utils::ReadBytes("data/image.jpg"); stringstream ss; data = Utils::PadToMultipleOfN(data, BLOCK_SIZE);; int size = data.size(); ss << "Paded size : " << data.size(); Utils::Log(ss.str()); ss.str(string()); char *h_plaintext = Utils::VectorToArray(data, size); char *h_ciphertext = new char[size]; char *d_plaintext; char *d_ciphertext; int * d_size; CudaRijndael *d_rijndael; Utils::Log("Allocating cuda mem..."); auto startTime = Utils::CurrentTime(); cudaMalloc((void**)&d_plaintext, size * sizeof(char)); cudaMalloc((void**)&d_ciphertext, size * sizeof(char)); cudaMalloc((void**)&d_size, sizeof(int)); cudaMalloc((void**)&d_rijndael, sizeof(CudaRijndael)); cudaMemcpy(d_plaintext, h_plaintext, size * sizeof(char), cudaMemcpyHostToDevice); cudaMemcpy(d_size, &size, sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_rijndael, &rijndael, sizeof(CudaRijndael), cudaMemcpyHostToDevice); auto endAllocationTime = Utils::CurrentTime(); Utils::Log("Alocated cuda mem."); int numEncryptionBlocks = size / BLOCK_SIZE; int numGpuBlocks = numEncryptionBlocks / numThreadsPerBlock + 1; kernel <<<numGpuBlocks, numThreadsPerBlock>>> (d_plaintext, d_ciphertext, d_size, d_rijndael); cudaDeviceSynchronize(); auto endEncryptionTime = Utils::CurrentTime(); cudaMemcpy(h_ciphertext, d_ciphertext, size * sizeof(char), cudaMemcpyDeviceToHost); auto endCopyResultToCpuTime = Utils::CurrentTime(); ss.str(string()); ss << "Elapsed time for parallel encryption:" << endl; ss << "Encryption block size: " << BLOCK_SIZE; ss << endl; ss << "GPU blocks: " << numGpuBlocks << endl; ss << "Threads per block: " << numThreadsPerBlock << endl; chrono::duration<double> time = endAllocationTime - startTime; ss << "Cuda memory allocation time: " << time.count() << "s" << endl; time = endEncryptionTime - endAllocationTime; ss << "Encryption Time: " << time.count() << "s" << endl; time = endCopyResultToCpuTime - endEncryptionTime; ss << "Copy result to CPU time: " << time.count() << "s" << endl; time = endCopyResultToCpuTime - startTime; ss << "Total elapsed time:" << time.count() << "s" << endl; Utils::Log(ss.str()); cudaError_t code = cudaPeekAtLastError(); if (code != cudaSuccess) { Utils::Log(cudaGetErrorString(code)); } // Write the decrypted file for correctness check /*vector<char> encrypted(h_ciphertext, h_ciphertext + sizeof(h_ciphertext) / sizeof(h_ciphertext[0])); vector<vector<char>> encBlocks = Utils::GetBlocks(encrypted, BLOCK_SIZE);*/ char * decrypted = new char[size]; vector<char> vEncrypted = Utils::ArrayToVector(h_ciphertext, size); Utils::Log("Start writing to file.[encrypted]"); Utils::WriteBytes(vEncrypted, "data/encrypted_image.jpg"); Utils::Log("End writing to file.[encrypted]"); Utils::Log("Decrypting..."); rijndael.Decrypt(h_ciphertext, decrypted, size, CudaRijndael::ECB); Utils::Log("Decrypted."); vector<char> vDecrypted = Utils::ArrayToVector(decrypted, size); vector<char> decrNoPadding(vDecrypted.begin(), vDecrypted.begin() + size); Utils::Log("Start writing to file.[decrypted]"); Utils::WriteBytes(decrNoPadding, "data/decrypted_image.jpg"); Utils::Log("End writing to file.[decrypted]"); delete[] h_ciphertext; delete[] decrypted; cudaFree(d_plaintext); cudaFree(d_ciphertext); cudaFree(d_size); cudaFree(d_rijndael); } int main() { CudaRijndael rijndael; rijndael.MakeKey("abcdefghabcdefgh", "\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0", 16, BLOCK_SIZE); RunOnGpu(rijndael, THREADS_PER_BLOCK); cout << "Enter a key..."; char k; cin >> k; return 0; }
e9c1afd919aa2e2b99cb768118ce752a8955d37b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 1.5.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date September 2014 @generated from zswapblk.cu normal z -> d, Wed Sep 17 15:08:23 2014 */ #include "common_magma.h" #define BLOCK_SIZE 64 /*********************************************************/ /* * Blocked version: swap several pairs of lines */ typedef struct { double *A1; double *A2; int n, lda1, lda2, npivots; short ipiv[BLOCK_SIZE]; } magmagpu_dswapblk_params_t; __global__ void magmagpu_dswapblkrm( magmagpu_dswapblk_params_t params ) { unsigned int y = threadIdx.x + blockDim.x*blockIdx.x; if( y < params.n ) { double *A1 = params.A1 + y - params.lda1; double *A2 = params.A2 + y; for( int i = 0; i < params.npivots; i++ ) { A1 += params.lda1; if ( params.ipiv[i] == -1 ) continue; double tmp1 = *A1; double *tmp2 = A2 + params.ipiv[i]*params.lda2; *A1 = *tmp2; *tmp2 = tmp1; } } } __global__ void magmagpu_dswapblkcm( magmagpu_dswapblk_params_t params ) { unsigned int y = threadIdx.x + blockDim.x*blockIdx.x; unsigned int offset1 = y*params.lda1; unsigned int offset2 = y*params.lda2; if( y < params.n ) { double *A1 = params.A1 + offset1 - 1; double *A2 = params.A2 + offset2; for( int i = 0; i < params.npivots; i++ ) { A1++; if ( params.ipiv[i] == -1 ) continue; double tmp1 = *A1; double *tmp2 = A2 + params.ipiv[i]; *A1 = *tmp2; *tmp2 = tmp1; } } __syncthreads(); } /** @ingroup magma_dblas2 ********************************************************************/ extern "C" void magmablas_dswapblk_q( magma_order_t order, magma_int_t n, double *dA1T, magma_int_t lda1, double *dA2T, magma_int_t lda2, magma_int_t i1, magma_int_t i2, const magma_int_t *ipiv, magma_int_t inci, magma_int_t offset, magma_queue_t queue ) { magma_int_t blocksize = 64; dim3 blocks( (n+blocksize-1) / blocksize, 1, 1); magma_int_t k, im; /* Quick return */ if ( n == 0 ) return; if ( order == MagmaColMajor ) { for( k=(i1-1); k<i2; k+=BLOCK_SIZE ) { magma_int_t sb = min(BLOCK_SIZE, i2-k); magmagpu_dswapblk_params_t params = { dA1T+k, dA2T, n, lda1, lda2, sb }; for( magma_int_t j = 0; j < sb; j++ ) { im = ipiv[(k+j)*inci] - 1; if ( (k+j) == im ) params.ipiv[j] = -1; else params.ipiv[j] = im - offset; } hipLaunchKernelGGL(( magmagpu_dswapblkcm), dim3(blocks), dim3(blocksize), 0, queue , params ); } } else { for( k=(i1-1); k<i2; k+=BLOCK_SIZE ) { magma_int_t sb = min(BLOCK_SIZE, i2-k); magmagpu_dswapblk_params_t params = { dA1T+k*lda1, dA2T, n, lda1, lda2, sb }; for( magma_int_t j = 0; j < sb; j++ ) { im = ipiv[(k+j)*inci] - 1; if ( (k+j) == im ) params.ipiv[j] = -1; else params.ipiv[j] = im - offset; } hipLaunchKernelGGL(( magmagpu_dswapblkrm), dim3(blocks), dim3(blocksize), 0, queue , params ); } } } /** @see magmablas_dswapblk_q @ingroup magma_dblas2 ********************************************************************/ extern "C" void magmablas_dswapblk( magma_order_t order, magma_int_t n, double *dA1T, magma_int_t lda1, double *dA2T, magma_int_t lda2, magma_int_t i1, magma_int_t i2, const magma_int_t *ipiv, magma_int_t inci, magma_int_t offset ) { magmablas_dswapblk_q( order, n, dA1T, lda1, dA2T, lda2, i1, i2, ipiv, inci, offset, magma_stream ); }
e9c1afd919aa2e2b99cb768118ce752a8955d37b.cu
/* -- MAGMA (version 1.5.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date September 2014 @generated from zswapblk.cu normal z -> d, Wed Sep 17 15:08:23 2014 */ #include "common_magma.h" #define BLOCK_SIZE 64 /*********************************************************/ /* * Blocked version: swap several pairs of lines */ typedef struct { double *A1; double *A2; int n, lda1, lda2, npivots; short ipiv[BLOCK_SIZE]; } magmagpu_dswapblk_params_t; __global__ void magmagpu_dswapblkrm( magmagpu_dswapblk_params_t params ) { unsigned int y = threadIdx.x + blockDim.x*blockIdx.x; if( y < params.n ) { double *A1 = params.A1 + y - params.lda1; double *A2 = params.A2 + y; for( int i = 0; i < params.npivots; i++ ) { A1 += params.lda1; if ( params.ipiv[i] == -1 ) continue; double tmp1 = *A1; double *tmp2 = A2 + params.ipiv[i]*params.lda2; *A1 = *tmp2; *tmp2 = tmp1; } } } __global__ void magmagpu_dswapblkcm( magmagpu_dswapblk_params_t params ) { unsigned int y = threadIdx.x + blockDim.x*blockIdx.x; unsigned int offset1 = y*params.lda1; unsigned int offset2 = y*params.lda2; if( y < params.n ) { double *A1 = params.A1 + offset1 - 1; double *A2 = params.A2 + offset2; for( int i = 0; i < params.npivots; i++ ) { A1++; if ( params.ipiv[i] == -1 ) continue; double tmp1 = *A1; double *tmp2 = A2 + params.ipiv[i]; *A1 = *tmp2; *tmp2 = tmp1; } } __syncthreads(); } /** @ingroup magma_dblas2 ********************************************************************/ extern "C" void magmablas_dswapblk_q( magma_order_t order, magma_int_t n, double *dA1T, magma_int_t lda1, double *dA2T, magma_int_t lda2, magma_int_t i1, magma_int_t i2, const magma_int_t *ipiv, magma_int_t inci, magma_int_t offset, magma_queue_t queue ) { magma_int_t blocksize = 64; dim3 blocks( (n+blocksize-1) / blocksize, 1, 1); magma_int_t k, im; /* Quick return */ if ( n == 0 ) return; if ( order == MagmaColMajor ) { for( k=(i1-1); k<i2; k+=BLOCK_SIZE ) { magma_int_t sb = min(BLOCK_SIZE, i2-k); magmagpu_dswapblk_params_t params = { dA1T+k, dA2T, n, lda1, lda2, sb }; for( magma_int_t j = 0; j < sb; j++ ) { im = ipiv[(k+j)*inci] - 1; if ( (k+j) == im ) params.ipiv[j] = -1; else params.ipiv[j] = im - offset; } magmagpu_dswapblkcm<<< blocks, blocksize, 0, queue >>>( params ); } } else { for( k=(i1-1); k<i2; k+=BLOCK_SIZE ) { magma_int_t sb = min(BLOCK_SIZE, i2-k); magmagpu_dswapblk_params_t params = { dA1T+k*lda1, dA2T, n, lda1, lda2, sb }; for( magma_int_t j = 0; j < sb; j++ ) { im = ipiv[(k+j)*inci] - 1; if ( (k+j) == im ) params.ipiv[j] = -1; else params.ipiv[j] = im - offset; } magmagpu_dswapblkrm<<< blocks, blocksize, 0, queue >>>( params ); } } } /** @see magmablas_dswapblk_q @ingroup magma_dblas2 ********************************************************************/ extern "C" void magmablas_dswapblk( magma_order_t order, magma_int_t n, double *dA1T, magma_int_t lda1, double *dA2T, magma_int_t lda2, magma_int_t i1, magma_int_t i2, const magma_int_t *ipiv, magma_int_t inci, magma_int_t offset ) { magmablas_dswapblk_q( order, n, dA1T, lda1, dA2T, lda2, i1, i2, ipiv, inci, offset, magma_stream ); }
3302d573141d9cb62987ffb3bd5fbd216a70201f.hip
// !!! This is a file automatically generated by hipify!!! /********************************* * Fichier dpr_cuda.cu * *********************************/ #include <stdlib.h> #include <stdio.h> #include <math.h> #include <sys/time.h> #include <hip/hip_runtime.h> #include "../inc/utils.h" //Min nb of points to launch the GPU computation #define TRESHOLD_SEQ 90000 //Nb points in each parallel region #define SIZE_PARALLEL 50000 /** * CUDA error control and debugging. **/ #ifdef CUDA_DEBUG #define CUDA_SYNC_ERROR() { \ hipError_t sync_error; \ hipDeviceSynchronize(); \ Sync_error = hipGetLastError(); \ if(sync_error != hipSuccess) { \ fprintf(stderr, "[CUDA SYNC ERROR at %s:%d -> %s]\n", \ __FILE__ , __LINE__, hipGetErrorString(sync_error)); \ exit(EXIT_FAILURE); \ } \ } #else /* #ifdef CUDA_DEBUG */ #define CUDA_SYNC_ERROR() #endif /* #ifdef CUDA_DEBUG */ #define CUDA_ERROR(cuda_call) { \ hipError_t error = cuda_call; \ if(error != hipSuccess){ \ fprintf(stderr, "[CUDA ERROR at %s:%d -> %s]\n", \ __FILE__ , __LINE__, hipGetErrorString(error)); \ exit(EXIT_FAILURE); \ } \ CUDA_SYNC_ERROR(); \ } __global__ void calcul_min( unsigned long *ord, int ind_start, int ind_end, unsigned long long *ymin, int *ind_min, int size_max_parallel ){ int a = threadIdx.x; int size_tot = (ind_end - ind_start -1); //On n'effectue pas le calcul aux indices ind_start ni ind_end int nb_threads = ceilf((float)size_tot/(float)size_max_parallel); //size of region to compute in the current thread int size_parallel = ceilf( (float)size_tot/(float)nb_threads ); //have to be computed before the case of a different size_parallel value int ind_start_loc = ind_start + a * size_parallel + 1; if ( a == (nb_threads - 1) ) size_parallel = size_tot - (nb_threads - 1) * size_parallel; unsigned long min_loc = ord[ind_start_loc]; int ind_min_loc = ind_start_loc; int i = 0; //printf("FINDING YMIN\n"); for ( i = ind_start_loc; i < ind_start_loc + size_parallel; i++ ){ //Looking for the lowest ordinate if ( ord[i]< min_loc ){ min_loc = ord[i]; ind_min_loc = i; } } atomicMin(ymin, min_loc); __syncthreads(); if (*ymin == min_loc) *ind_min = ind_min_loc; return; } /** * * Function dpr_cuda() * **/ unsigned long long dpr_cuda(unsigned long **data, int n, int l, unsigned long h, int ind_start, int ind_end){ int i = 0; //ycross min on the whole area, ymin min on the whole area minus the 2 ends int ind_min = 0; unsigned long long crosswise_area = 0, left_area = 0, right_area = 0, result_area = 0, ymin =0; //Two points left : returns the rectangle defined by the height if ( (ind_end - ind_start) == 1 ){ return (unsigned long long) (data[0][ind_end]-data[0][ind_start]) * h; } // No parallel computing if too few points if ( (ind_end - ind_start) < TRESHOLD_SEQ ){ ymin = data[1][ind_start + 1]; ind_min = ind_start + 1; //We don't enter the loop if ind_end - ind_start == 2 for ( i = ind_start + 2; i < ind_end; i++ ){ //Looking for the lowest ordinate if ( data[1][i] < ymin ){ ymin = data[1][i]; ind_min = i; } } } else { int *ind_min_gpu, *ind_start_gpu, *ind_end_gpu, size_parallel = SIZE_PARALLEL, *size_parallel_gpu; unsigned long *ord_gpu; unsigned long long *min_gpu; //INIT GPU PARAMETERS /* GPU allocation */ hipMalloc((void **)&min_gpu, sizeof(unsigned long long)); hipMalloc((void **)&ind_min_gpu, sizeof(int)); hipMalloc((void **)&ind_start_gpu, sizeof(int)); hipMalloc((void **)&ind_end_gpu, sizeof(int)); hipMalloc((void **)&size_parallel_gpu, sizeof(int)); if(min_gpu == NULL || ind_min_gpu == NULL || ind_start_gpu == NULL || ind_end_gpu == NULL || size_parallel_gpu == NULL) printf("Parameters allocation failed\n"); hipMalloc((void **)&ord_gpu, n * sizeof(unsigned long)); /* CPU -> GPU transfer (synchrones) */ hipMemcpy(ord_gpu, data[1], n * sizeof(unsigned long), hipMemcpyHostToDevice); hipMemcpy(ind_start_gpu, &ind_start, sizeof(int), hipMemcpyHostToDevice); hipMemcpy(ind_end_gpu, &ind_end, sizeof(int), hipMemcpyHostToDevice); hipMemcpy(size_parallel_gpu, &size_parallel, sizeof(int), hipMemcpyHostToDevice); hipMemset(min_gpu, h, sizeof(unsigned long long)); hipMemset(ind_min_gpu, -1, sizeof(int)); /* Kernel launching */ //Un seul bloc de threads 1D int size_tot = (ind_end - ind_start -1); int nb_threads = ceil((float)size_tot/(float)SIZE_PARALLEL); dim3 threadsParBloc(nb_threads, 1); dim3 tailleGrille(1, 1); // Compute ymin on GPU hipLaunchKernelGGL(( calcul_min), dim3(tailleGrille), dim3(threadsParBloc), 0, 0, ord_gpu, ind_start, ind_end, min_gpu, ind_min_gpu, size_parallel); /* Recovering min element and index on CPU (element too for testing purposes) */ hipMemcpy((void *)&ymin, min_gpu, sizeof(unsigned long long), hipMemcpyDeviceToHost); hipMemcpy((void *)&ind_min, ind_min_gpu, sizeof(int), hipMemcpyDeviceToHost); /* cuda Frees */ hipFree(min_gpu); hipFree(ind_min_gpu); hipFree(ind_start_gpu); hipFree(ind_end_gpu); hipFree(ord_gpu); } crosswise_area = ymin * (data[0][ind_end] - data[0][ind_start]); left_area = dpr_cuda(data, n, l, h, ind_start, ind_min); right_area = dpr_cuda(data, n, l, h, ind_min, ind_end); //Result is the max of these areas result_area = crosswise_area; if ( left_area > result_area ) result_area = left_area; if ( right_area > result_area ) result_area = right_area; return result_area; } int main(int argc, char **argv){ double debut=0.0, fin=0.0; unsigned long **data; unsigned long long S = 0, h = 0; int res = 0; int n = 0, l = 0; if(argc != 2){ printf("Usage: %s <path_of_data_file>\n", argv[0]); return -1; } char *name = argv[1]; /* Read parameters */ res = read_param_cuda(name, data, &n, &l, &h); if(res != 0){ printf("read_param :\t ERROR\n"); return -1; } /* Allocate data table */ data = (unsigned long **) malloc(2 * sizeof(unsigned long *)); data[0] = (unsigned long *) malloc(n * sizeof(unsigned long)); data[1] = (unsigned long *) malloc(n * sizeof(unsigned long)); /* Read coordinates from file */ res = read_data(name, data, n); if(res != 0){ printf("read_data :\t ERROR\n"); return -1; } /* Start timing */ debut = my_gettimeofday(); /* Do computation: */ S = dpr_cuda(data, n, l, h, 0, n-1); /* End timing */ fin = my_gettimeofday(); fprintf(stdout, "\n***** Algorithme Diviser Pour Rgner, hybride *****\n"); fprintf(stdout, "Pour les paramtres N = %d\t S = %llu\nTRESHOLD_SEQ = %d\t, SIZE_PARALLEL = %d\n", n, S, TRESHOLD_SEQ, SIZE_PARALLEL); fprintf( stdout, "Total computation time in s (with gettimeofday()) :\t"); fprintf( stdout, "%g\n\n", fin - debut); return 0; }
3302d573141d9cb62987ffb3bd5fbd216a70201f.cu
/********************************* * Fichier dpr_cuda.cu * *********************************/ #include <stdlib.h> #include <stdio.h> #include <math.h> #include <sys/time.h> #include <cuda.h> #include "../inc/utils.h" //Min nb of points to launch the GPU computation #define TRESHOLD_SEQ 90000 //Nb points in each parallel region #define SIZE_PARALLEL 50000 /** * CUDA error control and debugging. **/ #ifdef CUDA_DEBUG #define CUDA_SYNC_ERROR() { \ cudaError_t sync_error; \ cudaDeviceSynchronize(); \ Sync_error = cudaGetLastError(); \ if(sync_error != cudaSuccess) { \ fprintf(stderr, "[CUDA SYNC ERROR at %s:%d -> %s]\n", \ __FILE__ , __LINE__, cudaGetErrorString(sync_error)); \ exit(EXIT_FAILURE); \ } \ } #else /* #ifdef CUDA_DEBUG */ #define CUDA_SYNC_ERROR() #endif /* #ifdef CUDA_DEBUG */ #define CUDA_ERROR(cuda_call) { \ cudaError_t error = cuda_call; \ if(error != cudaSuccess){ \ fprintf(stderr, "[CUDA ERROR at %s:%d -> %s]\n", \ __FILE__ , __LINE__, cudaGetErrorString(error)); \ exit(EXIT_FAILURE); \ } \ CUDA_SYNC_ERROR(); \ } __global__ void calcul_min( unsigned long *ord, int ind_start, int ind_end, unsigned long long *ymin, int *ind_min, int size_max_parallel ){ int a = threadIdx.x; int size_tot = (ind_end - ind_start -1); //On n'effectue pas le calcul aux indices ind_start ni ind_end int nb_threads = ceilf((float)size_tot/(float)size_max_parallel); //size of region to compute in the current thread int size_parallel = ceilf( (float)size_tot/(float)nb_threads ); //have to be computed before the case of a different size_parallel value int ind_start_loc = ind_start + a * size_parallel + 1; if ( a == (nb_threads - 1) ) size_parallel = size_tot - (nb_threads - 1) * size_parallel; unsigned long min_loc = ord[ind_start_loc]; int ind_min_loc = ind_start_loc; int i = 0; //printf("FINDING YMIN\n"); for ( i = ind_start_loc; i < ind_start_loc + size_parallel; i++ ){ //Looking for the lowest ordinate if ( ord[i]< min_loc ){ min_loc = ord[i]; ind_min_loc = i; } } atomicMin(ymin, min_loc); __syncthreads(); if (*ymin == min_loc) *ind_min = ind_min_loc; return; } /** * * Function dpr_cuda() * **/ unsigned long long dpr_cuda(unsigned long **data, int n, int l, unsigned long h, int ind_start, int ind_end){ int i = 0; //ycross min on the whole area, ymin min on the whole area minus the 2 ends int ind_min = 0; unsigned long long crosswise_area = 0, left_area = 0, right_area = 0, result_area = 0, ymin =0; //Two points left : returns the rectangle defined by the height if ( (ind_end - ind_start) == 1 ){ return (unsigned long long) (data[0][ind_end]-data[0][ind_start]) * h; } // No parallel computing if too few points if ( (ind_end - ind_start) < TRESHOLD_SEQ ){ ymin = data[1][ind_start + 1]; ind_min = ind_start + 1; //We don't enter the loop if ind_end - ind_start == 2 for ( i = ind_start + 2; i < ind_end; i++ ){ //Looking for the lowest ordinate if ( data[1][i] < ymin ){ ymin = data[1][i]; ind_min = i; } } } else { int *ind_min_gpu, *ind_start_gpu, *ind_end_gpu, size_parallel = SIZE_PARALLEL, *size_parallel_gpu; unsigned long *ord_gpu; unsigned long long *min_gpu; //INIT GPU PARAMETERS /* GPU allocation */ cudaMalloc((void **)&min_gpu, sizeof(unsigned long long)); cudaMalloc((void **)&ind_min_gpu, sizeof(int)); cudaMalloc((void **)&ind_start_gpu, sizeof(int)); cudaMalloc((void **)&ind_end_gpu, sizeof(int)); cudaMalloc((void **)&size_parallel_gpu, sizeof(int)); if(min_gpu == NULL || ind_min_gpu == NULL || ind_start_gpu == NULL || ind_end_gpu == NULL || size_parallel_gpu == NULL) printf("Parameters allocation failed\n"); cudaMalloc((void **)&ord_gpu, n * sizeof(unsigned long)); /* CPU -> GPU transfer (synchrones) */ cudaMemcpy(ord_gpu, data[1], n * sizeof(unsigned long), cudaMemcpyHostToDevice); cudaMemcpy(ind_start_gpu, &ind_start, sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(ind_end_gpu, &ind_end, sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(size_parallel_gpu, &size_parallel, sizeof(int), cudaMemcpyHostToDevice); cudaMemset(min_gpu, h, sizeof(unsigned long long)); cudaMemset(ind_min_gpu, -1, sizeof(int)); /* Kernel launching */ //Un seul bloc de threads 1D int size_tot = (ind_end - ind_start -1); int nb_threads = ceil((float)size_tot/(float)SIZE_PARALLEL); dim3 threadsParBloc(nb_threads, 1); dim3 tailleGrille(1, 1); // Compute ymin on GPU calcul_min<<<tailleGrille, threadsParBloc>>>(ord_gpu, ind_start, ind_end, min_gpu, ind_min_gpu, size_parallel); /* Recovering min element and index on CPU (element too for testing purposes) */ cudaMemcpy((void *)&ymin, min_gpu, sizeof(unsigned long long), cudaMemcpyDeviceToHost); cudaMemcpy((void *)&ind_min, ind_min_gpu, sizeof(int), cudaMemcpyDeviceToHost); /* cuda Frees */ cudaFree(min_gpu); cudaFree(ind_min_gpu); cudaFree(ind_start_gpu); cudaFree(ind_end_gpu); cudaFree(ord_gpu); } crosswise_area = ymin * (data[0][ind_end] - data[0][ind_start]); left_area = dpr_cuda(data, n, l, h, ind_start, ind_min); right_area = dpr_cuda(data, n, l, h, ind_min, ind_end); //Result is the max of these areas result_area = crosswise_area; if ( left_area > result_area ) result_area = left_area; if ( right_area > result_area ) result_area = right_area; return result_area; } int main(int argc, char **argv){ double debut=0.0, fin=0.0; unsigned long **data; unsigned long long S = 0, h = 0; int res = 0; int n = 0, l = 0; if(argc != 2){ printf("Usage: %s <path_of_data_file>\n", argv[0]); return -1; } char *name = argv[1]; /* Read parameters */ res = read_param_cuda(name, data, &n, &l, &h); if(res != 0){ printf("read_param :\t ERROR\n"); return -1; } /* Allocate data table */ data = (unsigned long **) malloc(2 * sizeof(unsigned long *)); data[0] = (unsigned long *) malloc(n * sizeof(unsigned long)); data[1] = (unsigned long *) malloc(n * sizeof(unsigned long)); /* Read coordinates from file */ res = read_data(name, data, n); if(res != 0){ printf("read_data :\t ERROR\n"); return -1; } /* Start timing */ debut = my_gettimeofday(); /* Do computation: */ S = dpr_cuda(data, n, l, h, 0, n-1); /* End timing */ fin = my_gettimeofday(); fprintf(stdout, "\n***** Algorithme Diviser Pour Régner, hybride *****\n"); fprintf(stdout, "Pour les paramètres N = %d\t S = %llu\nTRESHOLD_SEQ = %d\t, SIZE_PARALLEL = %d\n", n, S, TRESHOLD_SEQ, SIZE_PARALLEL); fprintf( stdout, "Total computation time in s (with gettimeofday()) :\t"); fprintf( stdout, "%g\n\n", fin - debut); return 0; }
f6b9a3cd4c59dbce71808ed202d18f026c4fcc65.hip
// !!! This is a file automatically generated by hipify!!! // Automatically generated CU for /global/project/projectdirs/m2043/zladd/NeuroGPU/GUI/Figure2_BBP_TTPC/pyNeuroGPU_unix/python./runModel.hoc #include <stdio.h> #include <stdlib.h> #include <math.h> #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include "AllModels_hip.cuh" // Universals: #define PI (3.1415927f) #define R (8.31441f) #define FARADAY (96485.309f) #define ktf (1000.*8.3134*(celsius + 273.15)/FARADAY) #define DEF_vrest -65. #define DEF_nai 10. #define DEF_nao 140. #define DEF_ena (115. + DEF_vrest) #define DEF_ki 54.4 #define DEF_ko 2.5 #define DEF_ek (-12. + DEF_vrest) #include <math.h> #define DEF_cai 5.e-5 #define DEF_cao 2. #define DEF_eca 12.5 *log(DEF_cao / DEF_cai) // GGlobals: #define celsius (34.0) #define stoprun (0.0) #define clamp_resist (0.001) #define secondorder (0.0) // NGlobals: // Reversals: #define ek (-85.0f) #define DEF_eca2 (140.21871199503352f) #define ena (50.0f) // Declarations: __device__ void Curates_Ca_HVA(MYFTYPE v,MYFTYPE gCa_HVAbar_Ca_HVA,MYFTYPE &hAlpha,MYFTYPE &hBeta,MYFTYPE &hInf,MYFTYPE &hTau,MYFTYPE &mAlpha,MYFTYPE &mBeta,MYFTYPE &mInf,MYFTYPE &mTau); __device__ void Curates_Ca_LVAst(MYFTYPE v,MYFTYPE gCa_LVAstbar_Ca_LVAst,MYFTYPE &hInf,MYFTYPE &hTau,MYFTYPE &mInf,MYFTYPE &mTau); __device__ void Curates_Ih(MYFTYPE v,MYFTYPE gIhbar_Ih,MYFTYPE ehcn_Ih,MYFTYPE &mAlpha,MYFTYPE &mBeta,MYFTYPE &mInf,MYFTYPE &mTau); __device__ void Curates_Im(MYFTYPE v,MYFTYPE gImbar_Im,MYFTYPE &mAlpha,MYFTYPE &mBeta,MYFTYPE &mInf,MYFTYPE &mTau); __device__ void Curates_K_Pst(MYFTYPE v,MYFTYPE gK_Pstbar_K_Pst,MYFTYPE &hInf,MYFTYPE &hTau,MYFTYPE &mInf,MYFTYPE &mTau); __device__ void Curates_K_Tst(MYFTYPE v,MYFTYPE gK_Tstbar_K_Tst,MYFTYPE &hInf,MYFTYPE &hTau,MYFTYPE &mInf,MYFTYPE &mTau); __device__ void Curates_Nap_Et2(MYFTYPE v,MYFTYPE gNap_Et2bar_Nap_Et2,MYFTYPE &hAlpha,MYFTYPE &hBeta,MYFTYPE &hInf,MYFTYPE &hTau,MYFTYPE &mAlpha,MYFTYPE &mBeta,MYFTYPE &mInf,MYFTYPE &mTau); __device__ void Curates_NaTa_t(MYFTYPE v,MYFTYPE gNaTa_tbar_NaTa_t,MYFTYPE &hAlpha,MYFTYPE &hBeta,MYFTYPE &hInf,MYFTYPE &hTau,MYFTYPE &mAlpha,MYFTYPE &mBeta,MYFTYPE &mInf,MYFTYPE &mTau); __device__ void Curates_NaTs2_t(MYFTYPE v,MYFTYPE gNaTs2_tbar_NaTs2_t,MYFTYPE &hAlpha,MYFTYPE &hBeta,MYFTYPE &hInf,MYFTYPE &hTau,MYFTYPE &mAlpha,MYFTYPE &mBeta,MYFTYPE &mInf,MYFTYPE &mTau); __device__ void Curates_SK_E2(MYFTYPE ca,MYFTYPE gSK_E2bar_SK_E2,MYFTYPE zTau_SK_E2,MYFTYPE &zInf); __device__ void Curates_SKv3_1(MYFTYPE v,MYFTYPE gSKv3_1bar_SKv3_1,MYFTYPE &mInf,MYFTYPE &mTau); float Cunernst(float ci,float co, float z) { if (z == 0) { return 0.; } if (ci <= 0.) { return 1e6; }else if (co <= 0.) { return -1e6; }else{ return ktf/z*log(co/ci); } } // Functions: // Procedures: __device__ void Curates_Ca_HVA(MYFTYPE v,MYFTYPE gCa_HVAbar_Ca_HVA,MYFTYPE &hAlpha,MYFTYPE &hBeta,MYFTYPE &hInf,MYFTYPE &hTau,MYFTYPE &mAlpha,MYFTYPE &mBeta,MYFTYPE &mInf,MYFTYPE &mTau) { if ( ( v == - 27.0 ) ) { v = v + 0.0001 ; } mAlpha = ( 0.055 * ( - 27.0 - v ) ) / ( exp ( ( - 27.0 - v ) / 3.8 ) - 1.0 ) ; mBeta = ( 0.94 * exp ( ( - 75.0 - v ) / 17.0 ) ) ; mInf = mAlpha / ( mAlpha + mBeta ) ; mTau = 1.0 / ( mAlpha + mBeta ) ; hAlpha = ( 0.000457 * exp ( ( - 13.0 - v ) / 50.0 ) ) ; hBeta = ( 0.0065 / ( exp ( ( - v - 15.0 ) / 28.0 ) + 1.0 ) ) ; hInf = hAlpha / ( hAlpha + hBeta ) ; hTau = 1.0 / ( hAlpha + hBeta ) ; } __device__ void Curates_Ca_LVAst(MYFTYPE v,MYFTYPE gCa_LVAstbar_Ca_LVAst,MYFTYPE &hInf,MYFTYPE &hTau,MYFTYPE &mInf,MYFTYPE &mTau) { MYFTYPE qt ; qt = powf( 2.3 , ( ( 34.0 - 21.0 ) / 10.0 ) ) ; v = v + 10.0 ; mInf = 1.0000 / ( 1.0 + exp ( ( v - - 30.000 ) / - 6.0 ) ) ; mTau = ( 5.0000 + 20.0000 / ( 1.0 + exp ( ( v - - 25.000 ) / 5.0 ) ) ) / qt ; hInf = 1.0000 / ( 1.0 + exp ( ( v - - 80.000 ) / 6.4 ) ) ; hTau = ( 20.0000 + 50.0000 / ( 1.0 + exp ( ( v - - 40.000 ) / 7.0 ) ) ) / qt ; v = v - 10.0 ; } __device__ void Curates_Ih(MYFTYPE v,MYFTYPE gIhbar_Ih,MYFTYPE ehcn_Ih,MYFTYPE &mAlpha,MYFTYPE &mBeta,MYFTYPE &mInf,MYFTYPE &mTau) { if ( v == - 154.9 ) { v = v + 0.0001 ; } mAlpha = 0.001 * 6.43 * ( v + 154.9 ) / ( exp ( ( v + 154.9 ) / 11.9 ) - 1.0 ) ; mBeta = 0.001 * 193.0 * exp ( v / 33.1 ) ; mInf = mAlpha / ( mAlpha + mBeta ) ; mTau = 1.0 / ( mAlpha + mBeta ) ; } __device__ void Curates_Im(MYFTYPE v,MYFTYPE gImbar_Im,MYFTYPE &mAlpha,MYFTYPE &mBeta,MYFTYPE &mInf,MYFTYPE &mTau) { MYFTYPE qt ; qt = powf( 2.3 , ( ( 34.0 - 21.0 ) / 10.0 ) ) ; mAlpha = 3.3e-3 * exp ( 2.5 * 0.04 * ( v - - 35.0 ) ) ; mBeta = 3.3e-3 * exp ( - 2.5 * 0.04 * ( v - - 35.0 ) ) ; mInf = mAlpha / ( mAlpha + mBeta ) ; mTau = ( 1.0 / ( mAlpha + mBeta ) ) / qt ; } __device__ void Curates_K_Pst(MYFTYPE v,MYFTYPE gK_Pstbar_K_Pst,MYFTYPE &hInf,MYFTYPE &hTau,MYFTYPE &mInf,MYFTYPE &mTau) { MYFTYPE qt ; qt = powf( 2.3 , ( ( 34.0 - 21.0 ) / 10.0 ) ) ; v = v + 10.0 ; mInf = ( 1.0 / ( 1.0 + exp ( - ( v + 1.0 ) / 12.0 ) ) ) ; if ( v < - 50.0 ) { mTau = ( 1.25 + 175.03 * exp ( - v * - 0.026 ) ) / qt ; } else { mTau = ( ( 1.25 + 13.0 * exp ( - v * 0.026 ) ) ) / qt ; } hInf = 1.0 / ( 1.0 + exp ( - ( v + 54.0 ) / - 11.0 ) ) ; hTau = ( 360.0 + ( 1010.0 + 24.0 * ( v + 55.0 ) ) * exp ( - powf( ( ( v + 75.0 ) / 48.0 ) , 2.0 ) ) ) / qt ; v = v - 10.0 ; } __device__ void Curates_K_Tst(MYFTYPE v,MYFTYPE gK_Tstbar_K_Tst,MYFTYPE &hInf,MYFTYPE &hTau,MYFTYPE &mInf,MYFTYPE &mTau) { MYFTYPE qt ; qt = powf( 2.3 , ( ( 34.0 - 21.0 ) / 10.0 ) ) ; v = v + 10.0 ; mInf = 1.0 / ( 1.0 + exp ( - ( v + 0.0 ) / 19.0 ) ) ; mTau = ( 0.34 + 0.92 * exp ( - powf( ( ( v + 71.0 ) / 59.0 ) , 2.0 ) ) ) / qt ; hInf = 1.0 / ( 1.0 + exp ( - ( v + 66.0 ) / - 10.0 ) ) ; hTau = ( 8.0 + 49.0 * exp ( - powf( ( ( v + 73.0 ) / 23.0 ) , 2.0 ) ) ) / qt ; v = v - 10.0 ; } __device__ void Curates_Nap_Et2(MYFTYPE v,MYFTYPE gNap_Et2bar_Nap_Et2,MYFTYPE &hAlpha,MYFTYPE &hBeta,MYFTYPE &hInf,MYFTYPE &hTau,MYFTYPE &mAlpha,MYFTYPE &mBeta,MYFTYPE &mInf,MYFTYPE &mTau) { MYFTYPE qt ; qt = powf( 2.3 , ( ( 34.0 - 21.0 ) / 10.0 ) ) ; mInf = 1.0 / ( 1.0 + exp ( ( v - - 52.6 ) / - 4.6 ) ) ; if ( v == - 38.0 ) { v = v + 0.0001 ; } mAlpha = ( 0.182 * ( v - - 38.0 ) ) / ( 1.0 - ( exp ( - ( v - - 38.0 ) / 6.0 ) ) ) ; mBeta = ( 0.124 * ( - v - 38.0 ) ) / ( 1.0 - ( exp ( - ( - v - 38.0 ) / 6.0 ) ) ) ; mTau = 6.0 * ( 1.0 / ( mAlpha + mBeta ) ) / qt ; if ( v == - 17.0 ) { v = v + 0.0001 ; } if ( v == - 64.4 ) { v = v + 0.0001 ; } hInf = 1.0 / ( 1.0 + exp ( ( v - - 48.8 ) / 10.0 ) ) ; hAlpha = - 2.88e-6 * ( v + 17.0 ) / ( 1.0 - exp ( ( v + 17.0 ) / 4.63 ) ) ; hBeta = 6.94e-6 * ( v + 64.4 ) / ( 1.0 - exp ( - ( v + 64.4 ) / 2.63 ) ) ; hTau = ( 1.0 / ( hAlpha + hBeta ) ) / qt ; } __device__ void Curates_NaTa_t(MYFTYPE v,MYFTYPE gNaTa_tbar_NaTa_t,MYFTYPE &hAlpha,MYFTYPE &hBeta,MYFTYPE &hInf,MYFTYPE &hTau,MYFTYPE &mAlpha,MYFTYPE &mBeta,MYFTYPE &mInf,MYFTYPE &mTau) { MYFTYPE qt ; qt = powf( 2.3 , ( ( 34.0 - 21.0 ) / 10.0 ) ) ; if ( v == - 38.0 ) { v = v + 0.0001 ; } mAlpha = ( 0.182 * ( v - - 38.0 ) ) / ( 1.0 - ( exp ( - ( v - - 38.0 ) / 6.0 ) ) ) ; mBeta = ( 0.124 * ( - v - 38.0 ) ) / ( 1.0 - ( exp ( - ( - v - 38.0 ) / 6.0 ) ) ) ; mTau = ( 1.0 / ( mAlpha + mBeta ) ) / qt ; mInf = mAlpha / ( mAlpha + mBeta ) ; if ( v == - 66.0 ) { v = v + 0.0001 ; } hAlpha = ( - 0.015 * ( v - - 66.0 ) ) / ( 1.0 - ( exp ( ( v - - 66.0 ) / 6.0 ) ) ) ; hBeta = ( - 0.015 * ( - v - 66.0 ) ) / ( 1.0 - ( exp ( ( - v - 66.0 ) / 6.0 ) ) ) ; hTau = ( 1.0 / ( hAlpha + hBeta ) ) / qt ; hInf = hAlpha / ( hAlpha + hBeta ) ; } __device__ void Curates_NaTs2_t(MYFTYPE v,MYFTYPE gNaTs2_tbar_NaTs2_t,MYFTYPE &hAlpha,MYFTYPE &hBeta,MYFTYPE &hInf,MYFTYPE &hTau,MYFTYPE &mAlpha,MYFTYPE &mBeta,MYFTYPE &mInf,MYFTYPE &mTau) { MYFTYPE qt ; qt = powf( 2.3 , ( ( 34.0 - 21.0 ) / 10.0 ) ) ; if ( v == - 32.0 ) { v = v + 0.0001 ; } mAlpha = ( 0.182 * ( v - - 32.0 ) ) / ( 1.0 - ( exp ( - ( v - - 32.0 ) / 6.0 ) ) ) ; mBeta = ( 0.124 * ( - v - 32.0 ) ) / ( 1.0 - ( exp ( - ( - v - 32.0 ) / 6.0 ) ) ) ; mInf = mAlpha / ( mAlpha + mBeta ) ; mTau = ( 1.0 / ( mAlpha + mBeta ) ) / qt ; if ( v == - 60.0 ) { v = v + 0.0001 ; } hAlpha = ( - 0.015 * ( v - - 60.0 ) ) / ( 1.0 - ( exp ( ( v - - 60.0 ) / 6.0 ) ) ) ; hBeta = ( - 0.015 * ( - v - 60.0 ) ) / ( 1.0 - ( exp ( ( - v - 60.0 ) / 6.0 ) ) ) ; hInf = hAlpha / ( hAlpha + hBeta ) ; hTau = ( 1.0 / ( hAlpha + hBeta ) ) / qt ; } __device__ void Curates_SK_E2(MYFTYPE ca,MYFTYPE gSK_E2bar_SK_E2,MYFTYPE zTau_SK_E2,MYFTYPE &zInf) { if ( ca < 1e-7 ) { ca = ca + 1e-07 ; } zInf = 1.0 / ( 1.0 + powf( ( 0.00043 / ca ) , 4.8 ) ) ; } __device__ void Curates_SKv3_1(MYFTYPE v,MYFTYPE gSKv3_1bar_SKv3_1,MYFTYPE &mInf,MYFTYPE &mTau) { mInf = 1.0 / ( 1.0 + exp ( ( ( v - ( 18.700 ) ) / ( - 9.700 ) ) ) ) ; mTau = 0.2 * 20.000 / ( 1.0 + exp ( ( ( v - ( - 46.560 ) ) / ( - 44.140 ) ) ) ) ; } // Inits: __device__ void CuInitModel_Ca_HVA(MYFTYPE v,MYFTYPE &m,MYFTYPE &h,MYFTYPE gCa_HVAbar_Ca_HVA, MYFTYPE &ica,MYFTYPE &eca, MYFTYPE &cai){ MYFTYPE hAlpha,hBeta,hInf,hTau,mAlpha,mBeta,mInf,mTau; eca = ktf/2 *log(DEF_cao / cai); Curates_Ca_HVA(v,gCa_HVAbar_Ca_HVA,hAlpha,hBeta,hInf,hTau,mAlpha,mBeta,mInf,mTau); m = mInf; h = hInf; }; __device__ void CuInitModel_Ca_LVAst(MYFTYPE v,MYFTYPE &m,MYFTYPE &h,MYFTYPE gCa_LVAstbar_Ca_LVAst, MYFTYPE &ica,MYFTYPE &eca, MYFTYPE &cai){ MYFTYPE hInf,hTau,mInf,mTau; eca = ktf/2 *log(DEF_cao / cai); Curates_Ca_LVAst(v,gCa_LVAstbar_Ca_LVAst,hInf,hTau,mInf,mTau); m = mInf; h = hInf; }; __device__ void CuInitModel_CaDynamics_E2(MYFTYPE v,MYFTYPE &cai,MYFTYPE gamma_CaDynamics_E2,MYFTYPE decay_CaDynamics_E2,MYFTYPE depth_CaDynamics_E2,MYFTYPE minCai_CaDynamics_E2, MYFTYPE ica,MYFTYPE &eca){ cai = DEF_cai; eca = ktf/2 *log(DEF_cao / cai); }; __device__ void CuInitModel_Ih(MYFTYPE v,MYFTYPE &m,MYFTYPE gIhbar_Ih,MYFTYPE ehcn_Ih){ MYFTYPE mAlpha,mBeta,mInf,mTau; Curates_Ih(v,gIhbar_Ih,ehcn_Ih,mAlpha,mBeta,mInf,mTau); m = mInf; }; __device__ void CuInitModel_Im(MYFTYPE v,MYFTYPE &m,MYFTYPE gImbar_Im){ MYFTYPE mAlpha,mBeta,mInf,mTau; Curates_Im(v,gImbar_Im,mAlpha,mBeta,mInf,mTau); m = mInf; }; __device__ void CuInitModel_K_Pst(MYFTYPE v,MYFTYPE &m,MYFTYPE &h,MYFTYPE gK_Pstbar_K_Pst){ MYFTYPE hInf,hTau,mInf,mTau; Curates_K_Pst(v,gK_Pstbar_K_Pst,hInf,hTau,mInf,mTau); m = mInf; h = hInf; }; __device__ void CuInitModel_K_Tst(MYFTYPE v,MYFTYPE &m,MYFTYPE &h,MYFTYPE gK_Tstbar_K_Tst){ MYFTYPE hInf,hTau,mInf,mTau; Curates_K_Tst(v,gK_Tstbar_K_Tst,hInf,hTau,mInf,mTau); m = mInf; h = hInf; }; __device__ void CuInitModel_Nap_Et2(MYFTYPE v,MYFTYPE &m,MYFTYPE &h,MYFTYPE gNap_Et2bar_Nap_Et2){ MYFTYPE hAlpha,hBeta,hInf,hTau,mAlpha,mBeta,mInf,mTau; Curates_Nap_Et2(v,gNap_Et2bar_Nap_Et2,hAlpha,hBeta,hInf,hTau,mAlpha,mBeta,mInf,mTau); m = mInf; h = hInf; }; __device__ void CuInitModel_NaTa_t(MYFTYPE v,MYFTYPE &m,MYFTYPE &h,MYFTYPE gNaTa_tbar_NaTa_t){ MYFTYPE hAlpha,hBeta,hInf,hTau,mAlpha,mBeta,mInf,mTau; Curates_NaTa_t(v,gNaTa_tbar_NaTa_t,hAlpha,hBeta,hInf,hTau,mAlpha,mBeta,mInf,mTau); m = mInf; h = hInf; }; __device__ void CuInitModel_NaTs2_t(MYFTYPE v,MYFTYPE &m,MYFTYPE &h,MYFTYPE gNaTs2_tbar_NaTs2_t){ MYFTYPE hAlpha,hBeta,hInf,hTau,mAlpha,mBeta,mInf,mTau; Curates_NaTs2_t(v,gNaTs2_tbar_NaTs2_t,hAlpha,hBeta,hInf,hTau,mAlpha,mBeta,mInf,mTau); m = mInf; h = hInf; }; __device__ void CuInitModel_pas(MYFTYPE v,MYFTYPE g_pas,MYFTYPE e_pas){ }; __device__ void CuInitModel_SK_E2(MYFTYPE v,MYFTYPE &z,MYFTYPE gSK_E2bar_SK_E2,MYFTYPE zTau_SK_E2, MYFTYPE cai,MYFTYPE &eca){ MYFTYPE zInf; eca = ktf/2 *log(DEF_cao / cai); Curates_SK_E2(cai,gSK_E2bar_SK_E2,zTau_SK_E2,zInf); z = zInf; }; __device__ void CuInitModel_SKv3_1(MYFTYPE v,MYFTYPE &m,MYFTYPE gSKv3_1bar_SKv3_1){ MYFTYPE mInf,mTau; Curates_SKv3_1(v,gSKv3_1bar_SKv3_1,mInf,mTau); m = mInf; }; // Derivs: __device__ void CuDerivModel_Ca_HVA(MYFTYPE dt, MYFTYPE v,MYFTYPE &m,MYFTYPE &h,MYFTYPE gCa_HVAbar_Ca_HVA, MYFTYPE &ica){ MYFTYPE gCa; MYFTYPE hAlpha,hBeta,hInf,hTau,mAlpha,mBeta,mInf,mTau; Curates_Ca_HVA (v,gCa_HVAbar_Ca_HVA,hAlpha,hBeta,hInf,hTau,mAlpha,mBeta,mInf,mTau); m = m + (1. - exp(dt*(( ( ( - 1.0 ) ) ) / mTau)))*(- ( ( ( mInf ) ) / mTau ) / ( ( ( ( - 1.0 ) ) ) / mTau ) - m) ; h = h + (1. - exp(dt*(( ( ( - 1.0 ) ) ) / hTau)))*(- ( ( ( hInf ) ) / hTau ) / ( ( ( ( - 1.0 ) ) ) / hTau ) - h) ; } __device__ void CuDerivModel_Ca_LVAst(MYFTYPE dt, MYFTYPE v,MYFTYPE &m,MYFTYPE &h,MYFTYPE gCa_LVAstbar_Ca_LVAst, MYFTYPE &ica){ MYFTYPE gCa_LVAst; MYFTYPE hInf,hTau,mInf,mTau; Curates_Ca_LVAst (v,gCa_LVAstbar_Ca_LVAst,hInf,hTau,mInf,mTau); m = m + (1. - exp(dt*(( ( ( - 1.0 ) ) ) / mTau)))*(- ( ( ( mInf ) ) / mTau ) / ( ( ( ( - 1.0 ) ) ) / mTau ) - m) ; h = h + (1. - exp(dt*(( ( ( - 1.0 ) ) ) / hTau)))*(- ( ( ( hInf ) ) / hTau ) / ( ( ( ( - 1.0 ) ) ) / hTau ) - h) ; } __device__ void CuDerivModel_CaDynamics_E2(MYFTYPE dt, MYFTYPE v,MYFTYPE &cai,MYFTYPE gamma_CaDynamics_E2,MYFTYPE decay_CaDynamics_E2,MYFTYPE depth_CaDynamics_E2,MYFTYPE minCai_CaDynamics_E2, MYFTYPE ica,MYFTYPE &eca){ cai = cai + (1. - exp(dt*(( - ( ( 1.0 ) ) / decay_CaDynamics_E2 ))))*(- ( ( - ( 10000.0 ) )*( ( ( ( ica )*( gamma_CaDynamics_E2 ) ) / ( 2.0 * FARADAY * depth_CaDynamics_E2 ) ) ) - ( ( ( - minCai_CaDynamics_E2 ) ) ) / decay_CaDynamics_E2 ) / ( ( - ( ( 1.0 ) ) / decay_CaDynamics_E2 ) ) - cai) ; eca = ktf/2 *log(DEF_cao / cai); } __device__ void CuDerivModel_Ih(MYFTYPE dt, MYFTYPE v,MYFTYPE &m,MYFTYPE gIhbar_Ih,MYFTYPE ehcn_Ih){ MYFTYPE gIh,ihcn; MYFTYPE mAlpha,mBeta,mInf,mTau; Curates_Ih (v,gIhbar_Ih,ehcn_Ih,mAlpha,mBeta,mInf,mTau); m = m + (1. - exp(dt*(( ( ( - 1.0 ) ) ) / mTau)))*(- ( ( ( mInf ) ) / mTau ) / ( ( ( ( - 1.0 ) ) ) / mTau ) - m) ; } __device__ void CuDerivModel_Im(MYFTYPE dt, MYFTYPE v,MYFTYPE &m,MYFTYPE gImbar_Im){ MYFTYPE gIm; MYFTYPE mAlpha,mBeta,mInf,mTau; Curates_Im (v,gImbar_Im,mAlpha,mBeta,mInf,mTau); m = m + (1. - exp(dt*(( ( ( - 1.0 ) ) ) / mTau)))*(- ( ( ( mInf ) ) / mTau ) / ( ( ( ( - 1.0 ) ) ) / mTau ) - m) ; } __device__ void CuDerivModel_K_Pst(MYFTYPE dt, MYFTYPE v,MYFTYPE &m,MYFTYPE &h,MYFTYPE gK_Pstbar_K_Pst){ MYFTYPE gK_Pst; MYFTYPE hInf,hTau,mInf,mTau; Curates_K_Pst (v,gK_Pstbar_K_Pst,hInf,hTau,mInf,mTau); m = m + (1. - exp(dt*(( ( ( - 1.0 ) ) ) / mTau)))*(- ( ( ( mInf ) ) / mTau ) / ( ( ( ( - 1.0 ) ) ) / mTau ) - m) ; h = h + (1. - exp(dt*(( ( ( - 1.0 ) ) ) / hTau)))*(- ( ( ( hInf ) ) / hTau ) / ( ( ( ( - 1.0 ) ) ) / hTau ) - h) ; } __device__ void CuDerivModel_K_Tst(MYFTYPE dt, MYFTYPE v,MYFTYPE &m,MYFTYPE &h,MYFTYPE gK_Tstbar_K_Tst){ MYFTYPE gK_Tst; MYFTYPE hInf,hTau,mInf,mTau; Curates_K_Tst (v,gK_Tstbar_K_Tst,hInf,hTau,mInf,mTau); m = m + (1. - exp(dt*(( ( ( - 1.0 ) ) ) / mTau)))*(- ( ( ( mInf ) ) / mTau ) / ( ( ( ( - 1.0 ) ) ) / mTau ) - m) ; h = h + (1. - exp(dt*(( ( ( - 1.0 ) ) ) / hTau)))*(- ( ( ( hInf ) ) / hTau ) / ( ( ( ( - 1.0 ) ) ) / hTau ) - h) ; } __device__ void CuDerivModel_Nap_Et2(MYFTYPE dt, MYFTYPE v,MYFTYPE &m,MYFTYPE &h,MYFTYPE gNap_Et2bar_Nap_Et2){ MYFTYPE gNap_Et2; MYFTYPE hAlpha,hBeta,hInf,hTau,mAlpha,mBeta,mInf,mTau; Curates_Nap_Et2 (v,gNap_Et2bar_Nap_Et2,hAlpha,hBeta,hInf,hTau,mAlpha,mBeta,mInf,mTau); m = m + (1. - exp(dt*(( ( ( - 1.0 ) ) ) / mTau)))*(- ( ( ( mInf ) ) / mTau ) / ( ( ( ( - 1.0 ) ) ) / mTau ) - m) ; h = h + (1. - exp(dt*(( ( ( - 1.0 ) ) ) / hTau)))*(- ( ( ( hInf ) ) / hTau ) / ( ( ( ( - 1.0 ) ) ) / hTau ) - h) ; } __device__ void CuDerivModel_NaTa_t(MYFTYPE dt, MYFTYPE v,MYFTYPE &m,MYFTYPE &h,MYFTYPE gNaTa_tbar_NaTa_t){ MYFTYPE gNaTa_t; MYFTYPE hAlpha,hBeta,hInf,hTau,mAlpha,mBeta,mInf,mTau; Curates_NaTa_t (v,gNaTa_tbar_NaTa_t,hAlpha,hBeta,hInf,hTau,mAlpha,mBeta,mInf,mTau); m = m + (1. - exp(dt*(( ( ( - 1.0 ) ) ) / mTau)))*(- ( ( ( mInf ) ) / mTau ) / ( ( ( ( - 1.0 ) ) ) / mTau ) - m) ; h = h + (1. - exp(dt*(( ( ( - 1.0 ) ) ) / hTau)))*(- ( ( ( hInf ) ) / hTau ) / ( ( ( ( - 1.0 ) ) ) / hTau ) - h) ; } __device__ void CuDerivModel_NaTs2_t(MYFTYPE dt, MYFTYPE v,MYFTYPE &m,MYFTYPE &h,MYFTYPE gNaTs2_tbar_NaTs2_t){ MYFTYPE gNaTs2_t; MYFTYPE hAlpha,hBeta,hInf,hTau,mAlpha,mBeta,mInf,mTau; Curates_NaTs2_t (v,gNaTs2_tbar_NaTs2_t,hAlpha,hBeta,hInf,hTau,mAlpha,mBeta,mInf,mTau); m = m + (1. - exp(dt*(( ( ( - 1.0 ) ) ) / mTau)))*(- ( ( ( mInf ) ) / mTau ) / ( ( ( ( - 1.0 ) ) ) / mTau ) - m) ; h = h + (1. - exp(dt*(( ( ( - 1.0 ) ) ) / hTau)))*(- ( ( ( hInf ) ) / hTau ) / ( ( ( ( - 1.0 ) ) ) / hTau ) - h) ; } __device__ void CuDerivModel_SK_E2(MYFTYPE dt, MYFTYPE v,MYFTYPE &z,MYFTYPE gSK_E2bar_SK_E2,MYFTYPE zTau_SK_E2, MYFTYPE cai,MYFTYPE &eca){ MYFTYPE gSK_E2; MYFTYPE zInf; Curates_SK_E2 ( cai,gSK_E2bar_SK_E2,zTau_SK_E2,zInf); z = z + (1. - exp(dt*(( ( ( - 1.0 ) ) ) / zTau_SK_E2)))*(- ( ( ( zInf ) ) / zTau_SK_E2 ) / ( ( ( ( - 1.0 ) ) ) / zTau_SK_E2 ) - z) ; eca = ktf/2 *log(DEF_cao / cai); } __device__ void CuDerivModel_SKv3_1(MYFTYPE dt, MYFTYPE v,MYFTYPE &m,MYFTYPE gSKv3_1bar_SKv3_1){ MYFTYPE gSKv3_1; MYFTYPE mInf,mTau; Curates_SKv3_1 (v,gSKv3_1bar_SKv3_1,mInf,mTau); m = m + (1. - exp(dt*(( ( ( - 1.0 ) ) ) / mTau)))*(- ( ( ( mInf ) ) / mTau ) / ( ( ( ( - 1.0 ) ) ) / mTau ) - m) ; } // Breaks: __device__ void CuBreakpointModel_Ca_HVA(MYSECONDFTYPE &sumCurrents, MYFTYPE &sumConductivity, MYFTYPE v,MYFTYPE &m,MYFTYPE &h,MYFTYPE gCa_HVAbar_Ca_HVA, MYFTYPE &ica,MYFTYPE &eca, MYFTYPE &cai) { MYFTYPE gCa, gCa_HVA; MYFTYPE ; MYFTYPE ica_Ca_HVA; gCa = gCa_HVAbar_Ca_HVA * m * m * h ; ica_Ca_HVA = gCa * ( v - eca ) ; sumCurrents+= ica_Ca_HVA; ica += ica_Ca_HVA; sumConductivity+= gCa; }; __device__ void CuBreakpointModel_Ca_LVAst(MYSECONDFTYPE &sumCurrents, MYFTYPE &sumConductivity, MYFTYPE v,MYFTYPE &m,MYFTYPE &h,MYFTYPE gCa_LVAstbar_Ca_LVAst, MYFTYPE &ica,MYFTYPE &eca, MYFTYPE &cai) { MYFTYPE gCa_LVAst, gca; MYFTYPE ; MYFTYPE ica_Ca_LVAst; gCa_LVAst = gCa_LVAstbar_Ca_LVAst * m * m * h ; ica_Ca_LVAst = gCa_LVAst * ( v - eca ) ; sumCurrents+= ica_Ca_LVAst; ica += ica_Ca_LVAst; sumConductivity+= gCa_LVAst; }; __device__ void CuBreakpointModel_CaDynamics_E2(MYSECONDFTYPE &sumCurrents, MYFTYPE &sumConductivity, MYFTYPE v,MYFTYPE &cai,MYFTYPE gamma_CaDynamics_E2,MYFTYPE decay_CaDynamics_E2,MYFTYPE depth_CaDynamics_E2,MYFTYPE minCai_CaDynamics_E2, MYFTYPE ica,MYFTYPE &eca) { MYFTYPE gca; MYFTYPE ; }; __device__ void CuBreakpointModel_Ih(MYSECONDFTYPE &sumCurrents, MYFTYPE &sumConductivity, MYFTYPE v,MYFTYPE &m,MYFTYPE gIhbar_Ih,MYFTYPE ehcn_Ih) { MYFTYPE gIh, ihcn; MYFTYPE i; gIh = gIhbar_Ih * m ; ihcn = gIh * ( v - ehcn_Ih ) ; i = ihcn; sumCurrents+= i; sumConductivity+= gIh; }; __device__ void CuBreakpointModel_Im(MYSECONDFTYPE &sumCurrents, MYFTYPE &sumConductivity, MYFTYPE v,MYFTYPE &m,MYFTYPE gImbar_Im) { MYFTYPE gk, ik, gIm; MYFTYPE ; gIm = gImbar_Im * m ; ik = gIm * ( v - ek ) ; sumCurrents+= ik; sumConductivity+= gIm; }; __device__ void CuBreakpointModel_K_Pst(MYSECONDFTYPE &sumCurrents, MYFTYPE &sumConductivity, MYFTYPE v,MYFTYPE &m,MYFTYPE &h,MYFTYPE gK_Pstbar_K_Pst) { MYFTYPE gk, gK_Pst, ik; MYFTYPE ; gK_Pst = gK_Pstbar_K_Pst * m * m * h ; ik = gK_Pst * ( v - ek ) ; sumCurrents+= ik; sumConductivity+= gK_Pst; }; __device__ void CuBreakpointModel_K_Tst(MYSECONDFTYPE &sumCurrents, MYFTYPE &sumConductivity, MYFTYPE v,MYFTYPE &m,MYFTYPE &h,MYFTYPE gK_Tstbar_K_Tst) { MYFTYPE gk, ik, gK_Tst; MYFTYPE ; gK_Tst = gK_Tstbar_K_Tst * powf( m , 4.0 ) * h ; ik = gK_Tst * ( v - ek ) ; sumCurrents+= ik; sumConductivity+= gK_Tst; }; __device__ void CuBreakpointModel_Nap_Et2(MYSECONDFTYPE &sumCurrents, MYFTYPE &sumConductivity, MYFTYPE v,MYFTYPE &m,MYFTYPE &h,MYFTYPE gNap_Et2bar_Nap_Et2) { MYFTYPE ina, gNap_Et2, gna; MYFTYPE ; gNap_Et2 = gNap_Et2bar_Nap_Et2 * m * m * m * h ; ina = gNap_Et2 * ( v - ena ) ; sumCurrents+= ina; sumConductivity+= gNap_Et2; }; __device__ void CuBreakpointModel_NaTa_t(MYSECONDFTYPE &sumCurrents, MYFTYPE &sumConductivity, MYFTYPE v,MYFTYPE &m,MYFTYPE &h,MYFTYPE gNaTa_tbar_NaTa_t) { MYFTYPE ina, gNaTa_t, gna; MYFTYPE ; gNaTa_t = gNaTa_tbar_NaTa_t * m * m * m * h ; ina = gNaTa_t * ( v - ena ) ; sumCurrents+= ina; sumConductivity+= gNaTa_t; }; __device__ void CuBreakpointModel_NaTs2_t(MYSECONDFTYPE &sumCurrents, MYFTYPE &sumConductivity, MYFTYPE v,MYFTYPE &m,MYFTYPE &h,MYFTYPE gNaTs2_tbar_NaTs2_t) { MYFTYPE ina, gNaTs2_t, gna; MYFTYPE ; gNaTs2_t = gNaTs2_tbar_NaTs2_t * m * m * m * h ; ina = gNaTs2_t * ( v - ena ) ; sumCurrents+= ina; sumConductivity+= gNaTs2_t; }; __device__ void CuBreakpointModel_pas(MYSECONDFTYPE &sumCurrents, MYFTYPE &sumConductivity, MYFTYPE v,MYFTYPE g_pas,MYFTYPE e_pas) { MYFTYPE; MYFTYPE i; i = g_pas * ( v - e_pas ) ; i = i; sumCurrents+= i; sumConductivity+= g_pas; }; __device__ void CuBreakpointModel_SK_E2(MYSECONDFTYPE &sumCurrents, MYFTYPE &sumConductivity, MYFTYPE v,MYFTYPE &z,MYFTYPE gSK_E2bar_SK_E2,MYFTYPE zTau_SK_E2, MYFTYPE cai,MYFTYPE &eca) { MYFTYPE gSK_E2, gk, gca, ik; MYFTYPE ; gSK_E2 = gSK_E2bar_SK_E2 * z ; ik = gSK_E2 * ( v - ek ) ; sumCurrents+= ik; sumConductivity+= gSK_E2; }; __device__ void CuBreakpointModel_SKv3_1(MYSECONDFTYPE &sumCurrents, MYFTYPE &sumConductivity, MYFTYPE v,MYFTYPE &m,MYFTYPE gSKv3_1bar_SKv3_1) { MYFTYPE gSKv3_1, gk, ik; MYFTYPE ; gSKv3_1 = gSKv3_1bar_SKv3_1 * m ; ik = gSKv3_1 * ( v - ek ) ; sumCurrents+= ik; sumConductivity+= gSKv3_1; };
f6b9a3cd4c59dbce71808ed202d18f026c4fcc65.cu
// Automatically generated CU for /global/project/projectdirs/m2043/zladd/NeuroGPU/GUI/Figure2_BBP_TTPC/pyNeuroGPU_unix/python./runModel.hoc #include <stdio.h> #include <stdlib.h> #include <math.h> #include "cuda_runtime.h" #include "device_launch_parameters.h" #include "AllModels.cuh" // Universals: #define PI (3.1415927f) #define R (8.31441f) #define FARADAY (96485.309f) #define ktf (1000.*8.3134*(celsius + 273.15)/FARADAY) #define DEF_vrest -65. #define DEF_nai 10. #define DEF_nao 140. #define DEF_ena (115. + DEF_vrest) #define DEF_ki 54.4 #define DEF_ko 2.5 #define DEF_ek (-12. + DEF_vrest) #include <math.h> #define DEF_cai 5.e-5 #define DEF_cao 2. #define DEF_eca 12.5 *log(DEF_cao / DEF_cai) // GGlobals: #define celsius (34.0) #define stoprun (0.0) #define clamp_resist (0.001) #define secondorder (0.0) // NGlobals: // Reversals: #define ek (-85.0f) #define DEF_eca2 (140.21871199503352f) #define ena (50.0f) // Declarations: __device__ void Curates_Ca_HVA(MYFTYPE v,MYFTYPE gCa_HVAbar_Ca_HVA,MYFTYPE &hAlpha,MYFTYPE &hBeta,MYFTYPE &hInf,MYFTYPE &hTau,MYFTYPE &mAlpha,MYFTYPE &mBeta,MYFTYPE &mInf,MYFTYPE &mTau); __device__ void Curates_Ca_LVAst(MYFTYPE v,MYFTYPE gCa_LVAstbar_Ca_LVAst,MYFTYPE &hInf,MYFTYPE &hTau,MYFTYPE &mInf,MYFTYPE &mTau); __device__ void Curates_Ih(MYFTYPE v,MYFTYPE gIhbar_Ih,MYFTYPE ehcn_Ih,MYFTYPE &mAlpha,MYFTYPE &mBeta,MYFTYPE &mInf,MYFTYPE &mTau); __device__ void Curates_Im(MYFTYPE v,MYFTYPE gImbar_Im,MYFTYPE &mAlpha,MYFTYPE &mBeta,MYFTYPE &mInf,MYFTYPE &mTau); __device__ void Curates_K_Pst(MYFTYPE v,MYFTYPE gK_Pstbar_K_Pst,MYFTYPE &hInf,MYFTYPE &hTau,MYFTYPE &mInf,MYFTYPE &mTau); __device__ void Curates_K_Tst(MYFTYPE v,MYFTYPE gK_Tstbar_K_Tst,MYFTYPE &hInf,MYFTYPE &hTau,MYFTYPE &mInf,MYFTYPE &mTau); __device__ void Curates_Nap_Et2(MYFTYPE v,MYFTYPE gNap_Et2bar_Nap_Et2,MYFTYPE &hAlpha,MYFTYPE &hBeta,MYFTYPE &hInf,MYFTYPE &hTau,MYFTYPE &mAlpha,MYFTYPE &mBeta,MYFTYPE &mInf,MYFTYPE &mTau); __device__ void Curates_NaTa_t(MYFTYPE v,MYFTYPE gNaTa_tbar_NaTa_t,MYFTYPE &hAlpha,MYFTYPE &hBeta,MYFTYPE &hInf,MYFTYPE &hTau,MYFTYPE &mAlpha,MYFTYPE &mBeta,MYFTYPE &mInf,MYFTYPE &mTau); __device__ void Curates_NaTs2_t(MYFTYPE v,MYFTYPE gNaTs2_tbar_NaTs2_t,MYFTYPE &hAlpha,MYFTYPE &hBeta,MYFTYPE &hInf,MYFTYPE &hTau,MYFTYPE &mAlpha,MYFTYPE &mBeta,MYFTYPE &mInf,MYFTYPE &mTau); __device__ void Curates_SK_E2(MYFTYPE ca,MYFTYPE gSK_E2bar_SK_E2,MYFTYPE zTau_SK_E2,MYFTYPE &zInf); __device__ void Curates_SKv3_1(MYFTYPE v,MYFTYPE gSKv3_1bar_SKv3_1,MYFTYPE &mInf,MYFTYPE &mTau); float Cunernst(float ci,float co, float z) { if (z == 0) { return 0.; } if (ci <= 0.) { return 1e6; }else if (co <= 0.) { return -1e6; }else{ return ktf/z*log(co/ci); } } // Functions: // Procedures: __device__ void Curates_Ca_HVA(MYFTYPE v,MYFTYPE gCa_HVAbar_Ca_HVA,MYFTYPE &hAlpha,MYFTYPE &hBeta,MYFTYPE &hInf,MYFTYPE &hTau,MYFTYPE &mAlpha,MYFTYPE &mBeta,MYFTYPE &mInf,MYFTYPE &mTau) { if ( ( v == - 27.0 ) ) { v = v + 0.0001 ; } mAlpha = ( 0.055 * ( - 27.0 - v ) ) / ( exp ( ( - 27.0 - v ) / 3.8 ) - 1.0 ) ; mBeta = ( 0.94 * exp ( ( - 75.0 - v ) / 17.0 ) ) ; mInf = mAlpha / ( mAlpha + mBeta ) ; mTau = 1.0 / ( mAlpha + mBeta ) ; hAlpha = ( 0.000457 * exp ( ( - 13.0 - v ) / 50.0 ) ) ; hBeta = ( 0.0065 / ( exp ( ( - v - 15.0 ) / 28.0 ) + 1.0 ) ) ; hInf = hAlpha / ( hAlpha + hBeta ) ; hTau = 1.0 / ( hAlpha + hBeta ) ; } __device__ void Curates_Ca_LVAst(MYFTYPE v,MYFTYPE gCa_LVAstbar_Ca_LVAst,MYFTYPE &hInf,MYFTYPE &hTau,MYFTYPE &mInf,MYFTYPE &mTau) { MYFTYPE qt ; qt = powf( 2.3 , ( ( 34.0 - 21.0 ) / 10.0 ) ) ; v = v + 10.0 ; mInf = 1.0000 / ( 1.0 + exp ( ( v - - 30.000 ) / - 6.0 ) ) ; mTau = ( 5.0000 + 20.0000 / ( 1.0 + exp ( ( v - - 25.000 ) / 5.0 ) ) ) / qt ; hInf = 1.0000 / ( 1.0 + exp ( ( v - - 80.000 ) / 6.4 ) ) ; hTau = ( 20.0000 + 50.0000 / ( 1.0 + exp ( ( v - - 40.000 ) / 7.0 ) ) ) / qt ; v = v - 10.0 ; } __device__ void Curates_Ih(MYFTYPE v,MYFTYPE gIhbar_Ih,MYFTYPE ehcn_Ih,MYFTYPE &mAlpha,MYFTYPE &mBeta,MYFTYPE &mInf,MYFTYPE &mTau) { if ( v == - 154.9 ) { v = v + 0.0001 ; } mAlpha = 0.001 * 6.43 * ( v + 154.9 ) / ( exp ( ( v + 154.9 ) / 11.9 ) - 1.0 ) ; mBeta = 0.001 * 193.0 * exp ( v / 33.1 ) ; mInf = mAlpha / ( mAlpha + mBeta ) ; mTau = 1.0 / ( mAlpha + mBeta ) ; } __device__ void Curates_Im(MYFTYPE v,MYFTYPE gImbar_Im,MYFTYPE &mAlpha,MYFTYPE &mBeta,MYFTYPE &mInf,MYFTYPE &mTau) { MYFTYPE qt ; qt = powf( 2.3 , ( ( 34.0 - 21.0 ) / 10.0 ) ) ; mAlpha = 3.3e-3 * exp ( 2.5 * 0.04 * ( v - - 35.0 ) ) ; mBeta = 3.3e-3 * exp ( - 2.5 * 0.04 * ( v - - 35.0 ) ) ; mInf = mAlpha / ( mAlpha + mBeta ) ; mTau = ( 1.0 / ( mAlpha + mBeta ) ) / qt ; } __device__ void Curates_K_Pst(MYFTYPE v,MYFTYPE gK_Pstbar_K_Pst,MYFTYPE &hInf,MYFTYPE &hTau,MYFTYPE &mInf,MYFTYPE &mTau) { MYFTYPE qt ; qt = powf( 2.3 , ( ( 34.0 - 21.0 ) / 10.0 ) ) ; v = v + 10.0 ; mInf = ( 1.0 / ( 1.0 + exp ( - ( v + 1.0 ) / 12.0 ) ) ) ; if ( v < - 50.0 ) { mTau = ( 1.25 + 175.03 * exp ( - v * - 0.026 ) ) / qt ; } else { mTau = ( ( 1.25 + 13.0 * exp ( - v * 0.026 ) ) ) / qt ; } hInf = 1.0 / ( 1.0 + exp ( - ( v + 54.0 ) / - 11.0 ) ) ; hTau = ( 360.0 + ( 1010.0 + 24.0 * ( v + 55.0 ) ) * exp ( - powf( ( ( v + 75.0 ) / 48.0 ) , 2.0 ) ) ) / qt ; v = v - 10.0 ; } __device__ void Curates_K_Tst(MYFTYPE v,MYFTYPE gK_Tstbar_K_Tst,MYFTYPE &hInf,MYFTYPE &hTau,MYFTYPE &mInf,MYFTYPE &mTau) { MYFTYPE qt ; qt = powf( 2.3 , ( ( 34.0 - 21.0 ) / 10.0 ) ) ; v = v + 10.0 ; mInf = 1.0 / ( 1.0 + exp ( - ( v + 0.0 ) / 19.0 ) ) ; mTau = ( 0.34 + 0.92 * exp ( - powf( ( ( v + 71.0 ) / 59.0 ) , 2.0 ) ) ) / qt ; hInf = 1.0 / ( 1.0 + exp ( - ( v + 66.0 ) / - 10.0 ) ) ; hTau = ( 8.0 + 49.0 * exp ( - powf( ( ( v + 73.0 ) / 23.0 ) , 2.0 ) ) ) / qt ; v = v - 10.0 ; } __device__ void Curates_Nap_Et2(MYFTYPE v,MYFTYPE gNap_Et2bar_Nap_Et2,MYFTYPE &hAlpha,MYFTYPE &hBeta,MYFTYPE &hInf,MYFTYPE &hTau,MYFTYPE &mAlpha,MYFTYPE &mBeta,MYFTYPE &mInf,MYFTYPE &mTau) { MYFTYPE qt ; qt = powf( 2.3 , ( ( 34.0 - 21.0 ) / 10.0 ) ) ; mInf = 1.0 / ( 1.0 + exp ( ( v - - 52.6 ) / - 4.6 ) ) ; if ( v == - 38.0 ) { v = v + 0.0001 ; } mAlpha = ( 0.182 * ( v - - 38.0 ) ) / ( 1.0 - ( exp ( - ( v - - 38.0 ) / 6.0 ) ) ) ; mBeta = ( 0.124 * ( - v - 38.0 ) ) / ( 1.0 - ( exp ( - ( - v - 38.0 ) / 6.0 ) ) ) ; mTau = 6.0 * ( 1.0 / ( mAlpha + mBeta ) ) / qt ; if ( v == - 17.0 ) { v = v + 0.0001 ; } if ( v == - 64.4 ) { v = v + 0.0001 ; } hInf = 1.0 / ( 1.0 + exp ( ( v - - 48.8 ) / 10.0 ) ) ; hAlpha = - 2.88e-6 * ( v + 17.0 ) / ( 1.0 - exp ( ( v + 17.0 ) / 4.63 ) ) ; hBeta = 6.94e-6 * ( v + 64.4 ) / ( 1.0 - exp ( - ( v + 64.4 ) / 2.63 ) ) ; hTau = ( 1.0 / ( hAlpha + hBeta ) ) / qt ; } __device__ void Curates_NaTa_t(MYFTYPE v,MYFTYPE gNaTa_tbar_NaTa_t,MYFTYPE &hAlpha,MYFTYPE &hBeta,MYFTYPE &hInf,MYFTYPE &hTau,MYFTYPE &mAlpha,MYFTYPE &mBeta,MYFTYPE &mInf,MYFTYPE &mTau) { MYFTYPE qt ; qt = powf( 2.3 , ( ( 34.0 - 21.0 ) / 10.0 ) ) ; if ( v == - 38.0 ) { v = v + 0.0001 ; } mAlpha = ( 0.182 * ( v - - 38.0 ) ) / ( 1.0 - ( exp ( - ( v - - 38.0 ) / 6.0 ) ) ) ; mBeta = ( 0.124 * ( - v - 38.0 ) ) / ( 1.0 - ( exp ( - ( - v - 38.0 ) / 6.0 ) ) ) ; mTau = ( 1.0 / ( mAlpha + mBeta ) ) / qt ; mInf = mAlpha / ( mAlpha + mBeta ) ; if ( v == - 66.0 ) { v = v + 0.0001 ; } hAlpha = ( - 0.015 * ( v - - 66.0 ) ) / ( 1.0 - ( exp ( ( v - - 66.0 ) / 6.0 ) ) ) ; hBeta = ( - 0.015 * ( - v - 66.0 ) ) / ( 1.0 - ( exp ( ( - v - 66.0 ) / 6.0 ) ) ) ; hTau = ( 1.0 / ( hAlpha + hBeta ) ) / qt ; hInf = hAlpha / ( hAlpha + hBeta ) ; } __device__ void Curates_NaTs2_t(MYFTYPE v,MYFTYPE gNaTs2_tbar_NaTs2_t,MYFTYPE &hAlpha,MYFTYPE &hBeta,MYFTYPE &hInf,MYFTYPE &hTau,MYFTYPE &mAlpha,MYFTYPE &mBeta,MYFTYPE &mInf,MYFTYPE &mTau) { MYFTYPE qt ; qt = powf( 2.3 , ( ( 34.0 - 21.0 ) / 10.0 ) ) ; if ( v == - 32.0 ) { v = v + 0.0001 ; } mAlpha = ( 0.182 * ( v - - 32.0 ) ) / ( 1.0 - ( exp ( - ( v - - 32.0 ) / 6.0 ) ) ) ; mBeta = ( 0.124 * ( - v - 32.0 ) ) / ( 1.0 - ( exp ( - ( - v - 32.0 ) / 6.0 ) ) ) ; mInf = mAlpha / ( mAlpha + mBeta ) ; mTau = ( 1.0 / ( mAlpha + mBeta ) ) / qt ; if ( v == - 60.0 ) { v = v + 0.0001 ; } hAlpha = ( - 0.015 * ( v - - 60.0 ) ) / ( 1.0 - ( exp ( ( v - - 60.0 ) / 6.0 ) ) ) ; hBeta = ( - 0.015 * ( - v - 60.0 ) ) / ( 1.0 - ( exp ( ( - v - 60.0 ) / 6.0 ) ) ) ; hInf = hAlpha / ( hAlpha + hBeta ) ; hTau = ( 1.0 / ( hAlpha + hBeta ) ) / qt ; } __device__ void Curates_SK_E2(MYFTYPE ca,MYFTYPE gSK_E2bar_SK_E2,MYFTYPE zTau_SK_E2,MYFTYPE &zInf) { if ( ca < 1e-7 ) { ca = ca + 1e-07 ; } zInf = 1.0 / ( 1.0 + powf( ( 0.00043 / ca ) , 4.8 ) ) ; } __device__ void Curates_SKv3_1(MYFTYPE v,MYFTYPE gSKv3_1bar_SKv3_1,MYFTYPE &mInf,MYFTYPE &mTau) { mInf = 1.0 / ( 1.0 + exp ( ( ( v - ( 18.700 ) ) / ( - 9.700 ) ) ) ) ; mTau = 0.2 * 20.000 / ( 1.0 + exp ( ( ( v - ( - 46.560 ) ) / ( - 44.140 ) ) ) ) ; } // Inits: __device__ void CuInitModel_Ca_HVA(MYFTYPE v,MYFTYPE &m,MYFTYPE &h,MYFTYPE gCa_HVAbar_Ca_HVA, MYFTYPE &ica,MYFTYPE &eca, MYFTYPE &cai){ MYFTYPE hAlpha,hBeta,hInf,hTau,mAlpha,mBeta,mInf,mTau; eca = ktf/2 *log(DEF_cao / cai); Curates_Ca_HVA(v,gCa_HVAbar_Ca_HVA,hAlpha,hBeta,hInf,hTau,mAlpha,mBeta,mInf,mTau); m = mInf; h = hInf; }; __device__ void CuInitModel_Ca_LVAst(MYFTYPE v,MYFTYPE &m,MYFTYPE &h,MYFTYPE gCa_LVAstbar_Ca_LVAst, MYFTYPE &ica,MYFTYPE &eca, MYFTYPE &cai){ MYFTYPE hInf,hTau,mInf,mTau; eca = ktf/2 *log(DEF_cao / cai); Curates_Ca_LVAst(v,gCa_LVAstbar_Ca_LVAst,hInf,hTau,mInf,mTau); m = mInf; h = hInf; }; __device__ void CuInitModel_CaDynamics_E2(MYFTYPE v,MYFTYPE &cai,MYFTYPE gamma_CaDynamics_E2,MYFTYPE decay_CaDynamics_E2,MYFTYPE depth_CaDynamics_E2,MYFTYPE minCai_CaDynamics_E2, MYFTYPE ica,MYFTYPE &eca){ cai = DEF_cai; eca = ktf/2 *log(DEF_cao / cai); }; __device__ void CuInitModel_Ih(MYFTYPE v,MYFTYPE &m,MYFTYPE gIhbar_Ih,MYFTYPE ehcn_Ih){ MYFTYPE mAlpha,mBeta,mInf,mTau; Curates_Ih(v,gIhbar_Ih,ehcn_Ih,mAlpha,mBeta,mInf,mTau); m = mInf; }; __device__ void CuInitModel_Im(MYFTYPE v,MYFTYPE &m,MYFTYPE gImbar_Im){ MYFTYPE mAlpha,mBeta,mInf,mTau; Curates_Im(v,gImbar_Im,mAlpha,mBeta,mInf,mTau); m = mInf; }; __device__ void CuInitModel_K_Pst(MYFTYPE v,MYFTYPE &m,MYFTYPE &h,MYFTYPE gK_Pstbar_K_Pst){ MYFTYPE hInf,hTau,mInf,mTau; Curates_K_Pst(v,gK_Pstbar_K_Pst,hInf,hTau,mInf,mTau); m = mInf; h = hInf; }; __device__ void CuInitModel_K_Tst(MYFTYPE v,MYFTYPE &m,MYFTYPE &h,MYFTYPE gK_Tstbar_K_Tst){ MYFTYPE hInf,hTau,mInf,mTau; Curates_K_Tst(v,gK_Tstbar_K_Tst,hInf,hTau,mInf,mTau); m = mInf; h = hInf; }; __device__ void CuInitModel_Nap_Et2(MYFTYPE v,MYFTYPE &m,MYFTYPE &h,MYFTYPE gNap_Et2bar_Nap_Et2){ MYFTYPE hAlpha,hBeta,hInf,hTau,mAlpha,mBeta,mInf,mTau; Curates_Nap_Et2(v,gNap_Et2bar_Nap_Et2,hAlpha,hBeta,hInf,hTau,mAlpha,mBeta,mInf,mTau); m = mInf; h = hInf; }; __device__ void CuInitModel_NaTa_t(MYFTYPE v,MYFTYPE &m,MYFTYPE &h,MYFTYPE gNaTa_tbar_NaTa_t){ MYFTYPE hAlpha,hBeta,hInf,hTau,mAlpha,mBeta,mInf,mTau; Curates_NaTa_t(v,gNaTa_tbar_NaTa_t,hAlpha,hBeta,hInf,hTau,mAlpha,mBeta,mInf,mTau); m = mInf; h = hInf; }; __device__ void CuInitModel_NaTs2_t(MYFTYPE v,MYFTYPE &m,MYFTYPE &h,MYFTYPE gNaTs2_tbar_NaTs2_t){ MYFTYPE hAlpha,hBeta,hInf,hTau,mAlpha,mBeta,mInf,mTau; Curates_NaTs2_t(v,gNaTs2_tbar_NaTs2_t,hAlpha,hBeta,hInf,hTau,mAlpha,mBeta,mInf,mTau); m = mInf; h = hInf; }; __device__ void CuInitModel_pas(MYFTYPE v,MYFTYPE g_pas,MYFTYPE e_pas){ }; __device__ void CuInitModel_SK_E2(MYFTYPE v,MYFTYPE &z,MYFTYPE gSK_E2bar_SK_E2,MYFTYPE zTau_SK_E2, MYFTYPE cai,MYFTYPE &eca){ MYFTYPE zInf; eca = ktf/2 *log(DEF_cao / cai); Curates_SK_E2(cai,gSK_E2bar_SK_E2,zTau_SK_E2,zInf); z = zInf; }; __device__ void CuInitModel_SKv3_1(MYFTYPE v,MYFTYPE &m,MYFTYPE gSKv3_1bar_SKv3_1){ MYFTYPE mInf,mTau; Curates_SKv3_1(v,gSKv3_1bar_SKv3_1,mInf,mTau); m = mInf; }; // Derivs: __device__ void CuDerivModel_Ca_HVA(MYFTYPE dt, MYFTYPE v,MYFTYPE &m,MYFTYPE &h,MYFTYPE gCa_HVAbar_Ca_HVA, MYFTYPE &ica){ MYFTYPE gCa; MYFTYPE hAlpha,hBeta,hInf,hTau,mAlpha,mBeta,mInf,mTau; Curates_Ca_HVA (v,gCa_HVAbar_Ca_HVA,hAlpha,hBeta,hInf,hTau,mAlpha,mBeta,mInf,mTau); m = m + (1. - exp(dt*(( ( ( - 1.0 ) ) ) / mTau)))*(- ( ( ( mInf ) ) / mTau ) / ( ( ( ( - 1.0 ) ) ) / mTau ) - m) ; h = h + (1. - exp(dt*(( ( ( - 1.0 ) ) ) / hTau)))*(- ( ( ( hInf ) ) / hTau ) / ( ( ( ( - 1.0 ) ) ) / hTau ) - h) ; } __device__ void CuDerivModel_Ca_LVAst(MYFTYPE dt, MYFTYPE v,MYFTYPE &m,MYFTYPE &h,MYFTYPE gCa_LVAstbar_Ca_LVAst, MYFTYPE &ica){ MYFTYPE gCa_LVAst; MYFTYPE hInf,hTau,mInf,mTau; Curates_Ca_LVAst (v,gCa_LVAstbar_Ca_LVAst,hInf,hTau,mInf,mTau); m = m + (1. - exp(dt*(( ( ( - 1.0 ) ) ) / mTau)))*(- ( ( ( mInf ) ) / mTau ) / ( ( ( ( - 1.0 ) ) ) / mTau ) - m) ; h = h + (1. - exp(dt*(( ( ( - 1.0 ) ) ) / hTau)))*(- ( ( ( hInf ) ) / hTau ) / ( ( ( ( - 1.0 ) ) ) / hTau ) - h) ; } __device__ void CuDerivModel_CaDynamics_E2(MYFTYPE dt, MYFTYPE v,MYFTYPE &cai,MYFTYPE gamma_CaDynamics_E2,MYFTYPE decay_CaDynamics_E2,MYFTYPE depth_CaDynamics_E2,MYFTYPE minCai_CaDynamics_E2, MYFTYPE ica,MYFTYPE &eca){ cai = cai + (1. - exp(dt*(( - ( ( 1.0 ) ) / decay_CaDynamics_E2 ))))*(- ( ( - ( 10000.0 ) )*( ( ( ( ica )*( gamma_CaDynamics_E2 ) ) / ( 2.0 * FARADAY * depth_CaDynamics_E2 ) ) ) - ( ( ( - minCai_CaDynamics_E2 ) ) ) / decay_CaDynamics_E2 ) / ( ( - ( ( 1.0 ) ) / decay_CaDynamics_E2 ) ) - cai) ; eca = ktf/2 *log(DEF_cao / cai); } __device__ void CuDerivModel_Ih(MYFTYPE dt, MYFTYPE v,MYFTYPE &m,MYFTYPE gIhbar_Ih,MYFTYPE ehcn_Ih){ MYFTYPE gIh,ihcn; MYFTYPE mAlpha,mBeta,mInf,mTau; Curates_Ih (v,gIhbar_Ih,ehcn_Ih,mAlpha,mBeta,mInf,mTau); m = m + (1. - exp(dt*(( ( ( - 1.0 ) ) ) / mTau)))*(- ( ( ( mInf ) ) / mTau ) / ( ( ( ( - 1.0 ) ) ) / mTau ) - m) ; } __device__ void CuDerivModel_Im(MYFTYPE dt, MYFTYPE v,MYFTYPE &m,MYFTYPE gImbar_Im){ MYFTYPE gIm; MYFTYPE mAlpha,mBeta,mInf,mTau; Curates_Im (v,gImbar_Im,mAlpha,mBeta,mInf,mTau); m = m + (1. - exp(dt*(( ( ( - 1.0 ) ) ) / mTau)))*(- ( ( ( mInf ) ) / mTau ) / ( ( ( ( - 1.0 ) ) ) / mTau ) - m) ; } __device__ void CuDerivModel_K_Pst(MYFTYPE dt, MYFTYPE v,MYFTYPE &m,MYFTYPE &h,MYFTYPE gK_Pstbar_K_Pst){ MYFTYPE gK_Pst; MYFTYPE hInf,hTau,mInf,mTau; Curates_K_Pst (v,gK_Pstbar_K_Pst,hInf,hTau,mInf,mTau); m = m + (1. - exp(dt*(( ( ( - 1.0 ) ) ) / mTau)))*(- ( ( ( mInf ) ) / mTau ) / ( ( ( ( - 1.0 ) ) ) / mTau ) - m) ; h = h + (1. - exp(dt*(( ( ( - 1.0 ) ) ) / hTau)))*(- ( ( ( hInf ) ) / hTau ) / ( ( ( ( - 1.0 ) ) ) / hTau ) - h) ; } __device__ void CuDerivModel_K_Tst(MYFTYPE dt, MYFTYPE v,MYFTYPE &m,MYFTYPE &h,MYFTYPE gK_Tstbar_K_Tst){ MYFTYPE gK_Tst; MYFTYPE hInf,hTau,mInf,mTau; Curates_K_Tst (v,gK_Tstbar_K_Tst,hInf,hTau,mInf,mTau); m = m + (1. - exp(dt*(( ( ( - 1.0 ) ) ) / mTau)))*(- ( ( ( mInf ) ) / mTau ) / ( ( ( ( - 1.0 ) ) ) / mTau ) - m) ; h = h + (1. - exp(dt*(( ( ( - 1.0 ) ) ) / hTau)))*(- ( ( ( hInf ) ) / hTau ) / ( ( ( ( - 1.0 ) ) ) / hTau ) - h) ; } __device__ void CuDerivModel_Nap_Et2(MYFTYPE dt, MYFTYPE v,MYFTYPE &m,MYFTYPE &h,MYFTYPE gNap_Et2bar_Nap_Et2){ MYFTYPE gNap_Et2; MYFTYPE hAlpha,hBeta,hInf,hTau,mAlpha,mBeta,mInf,mTau; Curates_Nap_Et2 (v,gNap_Et2bar_Nap_Et2,hAlpha,hBeta,hInf,hTau,mAlpha,mBeta,mInf,mTau); m = m + (1. - exp(dt*(( ( ( - 1.0 ) ) ) / mTau)))*(- ( ( ( mInf ) ) / mTau ) / ( ( ( ( - 1.0 ) ) ) / mTau ) - m) ; h = h + (1. - exp(dt*(( ( ( - 1.0 ) ) ) / hTau)))*(- ( ( ( hInf ) ) / hTau ) / ( ( ( ( - 1.0 ) ) ) / hTau ) - h) ; } __device__ void CuDerivModel_NaTa_t(MYFTYPE dt, MYFTYPE v,MYFTYPE &m,MYFTYPE &h,MYFTYPE gNaTa_tbar_NaTa_t){ MYFTYPE gNaTa_t; MYFTYPE hAlpha,hBeta,hInf,hTau,mAlpha,mBeta,mInf,mTau; Curates_NaTa_t (v,gNaTa_tbar_NaTa_t,hAlpha,hBeta,hInf,hTau,mAlpha,mBeta,mInf,mTau); m = m + (1. - exp(dt*(( ( ( - 1.0 ) ) ) / mTau)))*(- ( ( ( mInf ) ) / mTau ) / ( ( ( ( - 1.0 ) ) ) / mTau ) - m) ; h = h + (1. - exp(dt*(( ( ( - 1.0 ) ) ) / hTau)))*(- ( ( ( hInf ) ) / hTau ) / ( ( ( ( - 1.0 ) ) ) / hTau ) - h) ; } __device__ void CuDerivModel_NaTs2_t(MYFTYPE dt, MYFTYPE v,MYFTYPE &m,MYFTYPE &h,MYFTYPE gNaTs2_tbar_NaTs2_t){ MYFTYPE gNaTs2_t; MYFTYPE hAlpha,hBeta,hInf,hTau,mAlpha,mBeta,mInf,mTau; Curates_NaTs2_t (v,gNaTs2_tbar_NaTs2_t,hAlpha,hBeta,hInf,hTau,mAlpha,mBeta,mInf,mTau); m = m + (1. - exp(dt*(( ( ( - 1.0 ) ) ) / mTau)))*(- ( ( ( mInf ) ) / mTau ) / ( ( ( ( - 1.0 ) ) ) / mTau ) - m) ; h = h + (1. - exp(dt*(( ( ( - 1.0 ) ) ) / hTau)))*(- ( ( ( hInf ) ) / hTau ) / ( ( ( ( - 1.0 ) ) ) / hTau ) - h) ; } __device__ void CuDerivModel_SK_E2(MYFTYPE dt, MYFTYPE v,MYFTYPE &z,MYFTYPE gSK_E2bar_SK_E2,MYFTYPE zTau_SK_E2, MYFTYPE cai,MYFTYPE &eca){ MYFTYPE gSK_E2; MYFTYPE zInf; Curates_SK_E2 ( cai,gSK_E2bar_SK_E2,zTau_SK_E2,zInf); z = z + (1. - exp(dt*(( ( ( - 1.0 ) ) ) / zTau_SK_E2)))*(- ( ( ( zInf ) ) / zTau_SK_E2 ) / ( ( ( ( - 1.0 ) ) ) / zTau_SK_E2 ) - z) ; eca = ktf/2 *log(DEF_cao / cai); } __device__ void CuDerivModel_SKv3_1(MYFTYPE dt, MYFTYPE v,MYFTYPE &m,MYFTYPE gSKv3_1bar_SKv3_1){ MYFTYPE gSKv3_1; MYFTYPE mInf,mTau; Curates_SKv3_1 (v,gSKv3_1bar_SKv3_1,mInf,mTau); m = m + (1. - exp(dt*(( ( ( - 1.0 ) ) ) / mTau)))*(- ( ( ( mInf ) ) / mTau ) / ( ( ( ( - 1.0 ) ) ) / mTau ) - m) ; } // Breaks: __device__ void CuBreakpointModel_Ca_HVA(MYSECONDFTYPE &sumCurrents, MYFTYPE &sumConductivity, MYFTYPE v,MYFTYPE &m,MYFTYPE &h,MYFTYPE gCa_HVAbar_Ca_HVA, MYFTYPE &ica,MYFTYPE &eca, MYFTYPE &cai) { MYFTYPE gCa, gCa_HVA; MYFTYPE ; MYFTYPE ica_Ca_HVA; gCa = gCa_HVAbar_Ca_HVA * m * m * h ; ica_Ca_HVA = gCa * ( v - eca ) ; sumCurrents+= ica_Ca_HVA; ica += ica_Ca_HVA; sumConductivity+= gCa; }; __device__ void CuBreakpointModel_Ca_LVAst(MYSECONDFTYPE &sumCurrents, MYFTYPE &sumConductivity, MYFTYPE v,MYFTYPE &m,MYFTYPE &h,MYFTYPE gCa_LVAstbar_Ca_LVAst, MYFTYPE &ica,MYFTYPE &eca, MYFTYPE &cai) { MYFTYPE gCa_LVAst, gca; MYFTYPE ; MYFTYPE ica_Ca_LVAst; gCa_LVAst = gCa_LVAstbar_Ca_LVAst * m * m * h ; ica_Ca_LVAst = gCa_LVAst * ( v - eca ) ; sumCurrents+= ica_Ca_LVAst; ica += ica_Ca_LVAst; sumConductivity+= gCa_LVAst; }; __device__ void CuBreakpointModel_CaDynamics_E2(MYSECONDFTYPE &sumCurrents, MYFTYPE &sumConductivity, MYFTYPE v,MYFTYPE &cai,MYFTYPE gamma_CaDynamics_E2,MYFTYPE decay_CaDynamics_E2,MYFTYPE depth_CaDynamics_E2,MYFTYPE minCai_CaDynamics_E2, MYFTYPE ica,MYFTYPE &eca) { MYFTYPE gca; MYFTYPE ; }; __device__ void CuBreakpointModel_Ih(MYSECONDFTYPE &sumCurrents, MYFTYPE &sumConductivity, MYFTYPE v,MYFTYPE &m,MYFTYPE gIhbar_Ih,MYFTYPE ehcn_Ih) { MYFTYPE gIh, ihcn; MYFTYPE i; gIh = gIhbar_Ih * m ; ihcn = gIh * ( v - ehcn_Ih ) ; i = ihcn; sumCurrents+= i; sumConductivity+= gIh; }; __device__ void CuBreakpointModel_Im(MYSECONDFTYPE &sumCurrents, MYFTYPE &sumConductivity, MYFTYPE v,MYFTYPE &m,MYFTYPE gImbar_Im) { MYFTYPE gk, ik, gIm; MYFTYPE ; gIm = gImbar_Im * m ; ik = gIm * ( v - ek ) ; sumCurrents+= ik; sumConductivity+= gIm; }; __device__ void CuBreakpointModel_K_Pst(MYSECONDFTYPE &sumCurrents, MYFTYPE &sumConductivity, MYFTYPE v,MYFTYPE &m,MYFTYPE &h,MYFTYPE gK_Pstbar_K_Pst) { MYFTYPE gk, gK_Pst, ik; MYFTYPE ; gK_Pst = gK_Pstbar_K_Pst * m * m * h ; ik = gK_Pst * ( v - ek ) ; sumCurrents+= ik; sumConductivity+= gK_Pst; }; __device__ void CuBreakpointModel_K_Tst(MYSECONDFTYPE &sumCurrents, MYFTYPE &sumConductivity, MYFTYPE v,MYFTYPE &m,MYFTYPE &h,MYFTYPE gK_Tstbar_K_Tst) { MYFTYPE gk, ik, gK_Tst; MYFTYPE ; gK_Tst = gK_Tstbar_K_Tst * powf( m , 4.0 ) * h ; ik = gK_Tst * ( v - ek ) ; sumCurrents+= ik; sumConductivity+= gK_Tst; }; __device__ void CuBreakpointModel_Nap_Et2(MYSECONDFTYPE &sumCurrents, MYFTYPE &sumConductivity, MYFTYPE v,MYFTYPE &m,MYFTYPE &h,MYFTYPE gNap_Et2bar_Nap_Et2) { MYFTYPE ina, gNap_Et2, gna; MYFTYPE ; gNap_Et2 = gNap_Et2bar_Nap_Et2 * m * m * m * h ; ina = gNap_Et2 * ( v - ena ) ; sumCurrents+= ina; sumConductivity+= gNap_Et2; }; __device__ void CuBreakpointModel_NaTa_t(MYSECONDFTYPE &sumCurrents, MYFTYPE &sumConductivity, MYFTYPE v,MYFTYPE &m,MYFTYPE &h,MYFTYPE gNaTa_tbar_NaTa_t) { MYFTYPE ina, gNaTa_t, gna; MYFTYPE ; gNaTa_t = gNaTa_tbar_NaTa_t * m * m * m * h ; ina = gNaTa_t * ( v - ena ) ; sumCurrents+= ina; sumConductivity+= gNaTa_t; }; __device__ void CuBreakpointModel_NaTs2_t(MYSECONDFTYPE &sumCurrents, MYFTYPE &sumConductivity, MYFTYPE v,MYFTYPE &m,MYFTYPE &h,MYFTYPE gNaTs2_tbar_NaTs2_t) { MYFTYPE ina, gNaTs2_t, gna; MYFTYPE ; gNaTs2_t = gNaTs2_tbar_NaTs2_t * m * m * m * h ; ina = gNaTs2_t * ( v - ena ) ; sumCurrents+= ina; sumConductivity+= gNaTs2_t; }; __device__ void CuBreakpointModel_pas(MYSECONDFTYPE &sumCurrents, MYFTYPE &sumConductivity, MYFTYPE v,MYFTYPE g_pas,MYFTYPE e_pas) { MYFTYPE; MYFTYPE i; i = g_pas * ( v - e_pas ) ; i = i; sumCurrents+= i; sumConductivity+= g_pas; }; __device__ void CuBreakpointModel_SK_E2(MYSECONDFTYPE &sumCurrents, MYFTYPE &sumConductivity, MYFTYPE v,MYFTYPE &z,MYFTYPE gSK_E2bar_SK_E2,MYFTYPE zTau_SK_E2, MYFTYPE cai,MYFTYPE &eca) { MYFTYPE gSK_E2, gk, gca, ik; MYFTYPE ; gSK_E2 = gSK_E2bar_SK_E2 * z ; ik = gSK_E2 * ( v - ek ) ; sumCurrents+= ik; sumConductivity+= gSK_E2; }; __device__ void CuBreakpointModel_SKv3_1(MYSECONDFTYPE &sumCurrents, MYFTYPE &sumConductivity, MYFTYPE v,MYFTYPE &m,MYFTYPE gSKv3_1bar_SKv3_1) { MYFTYPE gSKv3_1, gk, ik; MYFTYPE ; gSKv3_1 = gSKv3_1bar_SKv3_1 * m ; ik = gSKv3_1 * ( v - ek ) ; sumCurrents+= ik; sumConductivity+= gSKv3_1; };
27965ebf6866a041c6da2ef0cd77be4aadb1c776.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* The implementation of this file is based on skipLayerNorm plugin in TensorRT demo: https://github.com/NVIDIA/TensorRT/tree/release/5.1/demo/BERT/ Copyright 2019 NVIDIA Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #include "layer_norm.cuh" #include "skip_layer_norm_impl.h" #include <hip/hip_fp16.h> namespace onnxruntime { namespace contrib { namespace cuda { template <typename T, unsigned TPB> __global__ void SkipLayerNormKernelSmall( const int ld, const T* input, const T* skip, const T* beta, const T* gamma, const T* bias, const T epsilon, T* output) { const T reverse_ld = T(1.f / ld); const int offset = blockIdx.x * ld; KeyValuePairSum pair_sum; // reduce x and x^2 hipcub::KeyValuePair<T, T> thread_data(0, 0); const int idx = offset + threadIdx.x; T val = 0; if (threadIdx.x < ld) { val = (bias == nullptr) ? input[idx] + skip[idx] : input[idx] + skip[idx] + bias[threadIdx.x]; const T rldval = reverse_ld * val; thread_data = pair_sum(thread_data, hipcub::KeyValuePair<T, T>(rldval, rldval * val)); } LayerNormSmall<T, TPB>(val, thread_data, ld, idx, beta, gamma, epsilon, output); } template <typename T, unsigned TPB> __global__ void SkipLayerNormKernel( const int ld, const T* input, const T* skip, const T* beta, const T* gamma, const T* bias, const T epsilon, T* output) { const T reverse_ld = T(1.f / ld); const int offset = blockIdx.x * ld; KeyValuePairSum pair_sum; // reduce x and x^2 hipcub::KeyValuePair<T, T> thread_data(0, 0); for (int i = threadIdx.x; i < ld; i += TPB) { const int idx = offset + i; const T val = (bias == nullptr) ? input[idx] + skip[idx] : input[idx] + skip[idx] + bias[i]; const T rldval = reverse_ld * val; thread_data = pair_sum(thread_data, hipcub::KeyValuePair<T, T>(rldval, rldval * val)); output[idx] = val; } LayerNorm<T, TPB>(thread_data, ld, offset, beta, gamma, epsilon, output); } template <typename T> bool ComputeSkipLayerNorm( hipStream_t stream, const int ld, const int n, const T* input, const T* skip, const T* beta, const T* gamma, const T* bias, const T epsilon, T* output) { // this must be true because n is the total size of the tensor assert(n % ld == 0); const int grid_size = n / ld; if (ld <= 32) { constexpr int block_size = 32; hipLaunchKernelGGL(( SkipLayerNormKernelSmall<T, block_size>) , dim3(grid_size), dim3(block_size), 0, stream, ld, input, skip, beta, gamma, bias, epsilon, output); } else if (ld <= 128) { constexpr int block_size = 128; hipLaunchKernelGGL(( SkipLayerNormKernelSmall<T, block_size>) , dim3(grid_size), dim3(block_size), 0, stream, ld, input, skip, beta, gamma, bias, epsilon, output); } else if (ld == 384) { constexpr int block_size = 384; hipLaunchKernelGGL(( SkipLayerNormKernelSmall<T, block_size>) , dim3(grid_size), dim3(block_size), 0, stream, ld, input, skip, beta, gamma, bias, epsilon, output); } else { constexpr int block_size = 256; hipLaunchKernelGGL(( SkipLayerNormKernel<T, block_size>), dim3(grid_size), dim3(block_size), 0, stream, ld, input, skip, beta, gamma, bias, epsilon, output); } return CUDA_CALL(hipPeekAtLastError()); } bool LaunchSkipLayerNormKernel( void* output, const void* input, const void* skip, const void* gamma, const void* beta, const void* bias, float epsilon, int hidden_size, int element_count, size_t element_size) { // use default stream const hipStream_t stream = nullptr; if (element_size == 2) { return ComputeSkipLayerNorm( stream, hidden_size, element_count, reinterpret_cast<const half*>(input), reinterpret_cast<const half*>(skip), reinterpret_cast<const half*>(beta), reinterpret_cast<const half*>(gamma), reinterpret_cast<const half*>(bias), __float2half_rn(epsilon), reinterpret_cast<half*>(output)); } else { return ComputeSkipLayerNorm( stream, hidden_size, element_count, reinterpret_cast<const float*>(input), reinterpret_cast<const float*>(skip), reinterpret_cast<const float*>(beta), reinterpret_cast<const float*>(gamma), reinterpret_cast<const float*>(bias), epsilon, reinterpret_cast<float*>(output)); } } } // namespace cuda } // namespace contrib } // namespace onnxruntime
27965ebf6866a041c6da2ef0cd77be4aadb1c776.cu
/* The implementation of this file is based on skipLayerNorm plugin in TensorRT demo: https://github.com/NVIDIA/TensorRT/tree/release/5.1/demo/BERT/ Copyright 2019 NVIDIA Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #include "layer_norm.cuh" #include "skip_layer_norm_impl.h" #include <cuda_fp16.h> namespace onnxruntime { namespace contrib { namespace cuda { template <typename T, unsigned TPB> __global__ void SkipLayerNormKernelSmall( const int ld, const T* input, const T* skip, const T* beta, const T* gamma, const T* bias, const T epsilon, T* output) { const T reverse_ld = T(1.f / ld); const int offset = blockIdx.x * ld; KeyValuePairSum pair_sum; // reduce x and x^2 cub::KeyValuePair<T, T> thread_data(0, 0); const int idx = offset + threadIdx.x; T val = 0; if (threadIdx.x < ld) { val = (bias == nullptr) ? input[idx] + skip[idx] : input[idx] + skip[idx] + bias[threadIdx.x]; const T rldval = reverse_ld * val; thread_data = pair_sum(thread_data, cub::KeyValuePair<T, T>(rldval, rldval * val)); } LayerNormSmall<T, TPB>(val, thread_data, ld, idx, beta, gamma, epsilon, output); } template <typename T, unsigned TPB> __global__ void SkipLayerNormKernel( const int ld, const T* input, const T* skip, const T* beta, const T* gamma, const T* bias, const T epsilon, T* output) { const T reverse_ld = T(1.f / ld); const int offset = blockIdx.x * ld; KeyValuePairSum pair_sum; // reduce x and x^2 cub::KeyValuePair<T, T> thread_data(0, 0); for (int i = threadIdx.x; i < ld; i += TPB) { const int idx = offset + i; const T val = (bias == nullptr) ? input[idx] + skip[idx] : input[idx] + skip[idx] + bias[i]; const T rldval = reverse_ld * val; thread_data = pair_sum(thread_data, cub::KeyValuePair<T, T>(rldval, rldval * val)); output[idx] = val; } LayerNorm<T, TPB>(thread_data, ld, offset, beta, gamma, epsilon, output); } template <typename T> bool ComputeSkipLayerNorm( cudaStream_t stream, const int ld, const int n, const T* input, const T* skip, const T* beta, const T* gamma, const T* bias, const T epsilon, T* output) { // this must be true because n is the total size of the tensor assert(n % ld == 0); const int grid_size = n / ld; if (ld <= 32) { constexpr int block_size = 32; SkipLayerNormKernelSmall<T, block_size> <<<grid_size, block_size, 0, stream>>>(ld, input, skip, beta, gamma, bias, epsilon, output); } else if (ld <= 128) { constexpr int block_size = 128; SkipLayerNormKernelSmall<T, block_size> <<<grid_size, block_size, 0, stream>>>(ld, input, skip, beta, gamma, bias, epsilon, output); } else if (ld == 384) { constexpr int block_size = 384; SkipLayerNormKernelSmall<T, block_size> <<<grid_size, block_size, 0, stream>>>(ld, input, skip, beta, gamma, bias, epsilon, output); } else { constexpr int block_size = 256; SkipLayerNormKernel<T, block_size><<<grid_size, block_size, 0, stream>>>(ld, input, skip, beta, gamma, bias, epsilon, output); } return CUDA_CALL(cudaPeekAtLastError()); } bool LaunchSkipLayerNormKernel( void* output, const void* input, const void* skip, const void* gamma, const void* beta, const void* bias, float epsilon, int hidden_size, int element_count, size_t element_size) { // use default stream const cudaStream_t stream = nullptr; if (element_size == 2) { return ComputeSkipLayerNorm( stream, hidden_size, element_count, reinterpret_cast<const half*>(input), reinterpret_cast<const half*>(skip), reinterpret_cast<const half*>(beta), reinterpret_cast<const half*>(gamma), reinterpret_cast<const half*>(bias), __float2half_rn(epsilon), reinterpret_cast<half*>(output)); } else { return ComputeSkipLayerNorm( stream, hidden_size, element_count, reinterpret_cast<const float*>(input), reinterpret_cast<const float*>(skip), reinterpret_cast<const float*>(beta), reinterpret_cast<const float*>(gamma), reinterpret_cast<const float*>(bias), epsilon, reinterpret_cast<float*>(output)); } } } // namespace cuda } // namespace contrib } // namespace onnxruntime
75db87bdc252c112a34586aa25b6073c40abe631.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 1.5.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date September 2014 @generated from zswapblk.cu normal z -> c, Wed Sep 17 15:08:23 2014 */ #include "common_magma.h" #define BLOCK_SIZE 64 /*********************************************************/ /* * Blocked version: swap several pairs of lines */ typedef struct { magmaFloatComplex *A1; magmaFloatComplex *A2; int n, lda1, lda2, npivots; short ipiv[BLOCK_SIZE]; } magmagpu_cswapblk_params_t; __global__ void magmagpu_cswapblkrm( magmagpu_cswapblk_params_t params ) { unsigned int y = threadIdx.x + blockDim.x*blockIdx.x; if( y < params.n ) { magmaFloatComplex *A1 = params.A1 + y - params.lda1; magmaFloatComplex *A2 = params.A2 + y; for( int i = 0; i < params.npivots; i++ ) { A1 += params.lda1; if ( params.ipiv[i] == -1 ) continue; magmaFloatComplex tmp1 = *A1; magmaFloatComplex *tmp2 = A2 + params.ipiv[i]*params.lda2; *A1 = *tmp2; *tmp2 = tmp1; } } } __global__ void magmagpu_cswapblkcm( magmagpu_cswapblk_params_t params ) { unsigned int y = threadIdx.x + blockDim.x*blockIdx.x; unsigned int offset1 = y*params.lda1; unsigned int offset2 = y*params.lda2; if( y < params.n ) { magmaFloatComplex *A1 = params.A1 + offset1 - 1; magmaFloatComplex *A2 = params.A2 + offset2; for( int i = 0; i < params.npivots; i++ ) { A1++; if ( params.ipiv[i] == -1 ) continue; magmaFloatComplex tmp1 = *A1; magmaFloatComplex *tmp2 = A2 + params.ipiv[i]; *A1 = *tmp2; *tmp2 = tmp1; } } __syncthreads(); } /** @ingroup magma_cblas2 ********************************************************************/ extern "C" void magmablas_cswapblk_q( magma_order_t order, magma_int_t n, magmaFloatComplex *dA1T, magma_int_t lda1, magmaFloatComplex *dA2T, magma_int_t lda2, magma_int_t i1, magma_int_t i2, const magma_int_t *ipiv, magma_int_t inci, magma_int_t offset, magma_queue_t queue ) { magma_int_t blocksize = 64; dim3 blocks( (n+blocksize-1) / blocksize, 1, 1); magma_int_t k, im; /* Quick return */ if ( n == 0 ) return; if ( order == MagmaColMajor ) { for( k=(i1-1); k<i2; k+=BLOCK_SIZE ) { magma_int_t sb = min(BLOCK_SIZE, i2-k); magmagpu_cswapblk_params_t params = { dA1T+k, dA2T, n, lda1, lda2, sb }; for( magma_int_t j = 0; j < sb; j++ ) { im = ipiv[(k+j)*inci] - 1; if ( (k+j) == im ) params.ipiv[j] = -1; else params.ipiv[j] = im - offset; } hipLaunchKernelGGL(( magmagpu_cswapblkcm), dim3(blocks), dim3(blocksize), 0, queue , params ); } } else { for( k=(i1-1); k<i2; k+=BLOCK_SIZE ) { magma_int_t sb = min(BLOCK_SIZE, i2-k); magmagpu_cswapblk_params_t params = { dA1T+k*lda1, dA2T, n, lda1, lda2, sb }; for( magma_int_t j = 0; j < sb; j++ ) { im = ipiv[(k+j)*inci] - 1; if ( (k+j) == im ) params.ipiv[j] = -1; else params.ipiv[j] = im - offset; } hipLaunchKernelGGL(( magmagpu_cswapblkrm), dim3(blocks), dim3(blocksize), 0, queue , params ); } } } /** @see magmablas_cswapblk_q @ingroup magma_cblas2 ********************************************************************/ extern "C" void magmablas_cswapblk( magma_order_t order, magma_int_t n, magmaFloatComplex *dA1T, magma_int_t lda1, magmaFloatComplex *dA2T, magma_int_t lda2, magma_int_t i1, magma_int_t i2, const magma_int_t *ipiv, magma_int_t inci, magma_int_t offset ) { magmablas_cswapblk_q( order, n, dA1T, lda1, dA2T, lda2, i1, i2, ipiv, inci, offset, magma_stream ); }
75db87bdc252c112a34586aa25b6073c40abe631.cu
/* -- MAGMA (version 1.5.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date September 2014 @generated from zswapblk.cu normal z -> c, Wed Sep 17 15:08:23 2014 */ #include "common_magma.h" #define BLOCK_SIZE 64 /*********************************************************/ /* * Blocked version: swap several pairs of lines */ typedef struct { magmaFloatComplex *A1; magmaFloatComplex *A2; int n, lda1, lda2, npivots; short ipiv[BLOCK_SIZE]; } magmagpu_cswapblk_params_t; __global__ void magmagpu_cswapblkrm( magmagpu_cswapblk_params_t params ) { unsigned int y = threadIdx.x + blockDim.x*blockIdx.x; if( y < params.n ) { magmaFloatComplex *A1 = params.A1 + y - params.lda1; magmaFloatComplex *A2 = params.A2 + y; for( int i = 0; i < params.npivots; i++ ) { A1 += params.lda1; if ( params.ipiv[i] == -1 ) continue; magmaFloatComplex tmp1 = *A1; magmaFloatComplex *tmp2 = A2 + params.ipiv[i]*params.lda2; *A1 = *tmp2; *tmp2 = tmp1; } } } __global__ void magmagpu_cswapblkcm( magmagpu_cswapblk_params_t params ) { unsigned int y = threadIdx.x + blockDim.x*blockIdx.x; unsigned int offset1 = y*params.lda1; unsigned int offset2 = y*params.lda2; if( y < params.n ) { magmaFloatComplex *A1 = params.A1 + offset1 - 1; magmaFloatComplex *A2 = params.A2 + offset2; for( int i = 0; i < params.npivots; i++ ) { A1++; if ( params.ipiv[i] == -1 ) continue; magmaFloatComplex tmp1 = *A1; magmaFloatComplex *tmp2 = A2 + params.ipiv[i]; *A1 = *tmp2; *tmp2 = tmp1; } } __syncthreads(); } /** @ingroup magma_cblas2 ********************************************************************/ extern "C" void magmablas_cswapblk_q( magma_order_t order, magma_int_t n, magmaFloatComplex *dA1T, magma_int_t lda1, magmaFloatComplex *dA2T, magma_int_t lda2, magma_int_t i1, magma_int_t i2, const magma_int_t *ipiv, magma_int_t inci, magma_int_t offset, magma_queue_t queue ) { magma_int_t blocksize = 64; dim3 blocks( (n+blocksize-1) / blocksize, 1, 1); magma_int_t k, im; /* Quick return */ if ( n == 0 ) return; if ( order == MagmaColMajor ) { for( k=(i1-1); k<i2; k+=BLOCK_SIZE ) { magma_int_t sb = min(BLOCK_SIZE, i2-k); magmagpu_cswapblk_params_t params = { dA1T+k, dA2T, n, lda1, lda2, sb }; for( magma_int_t j = 0; j < sb; j++ ) { im = ipiv[(k+j)*inci] - 1; if ( (k+j) == im ) params.ipiv[j] = -1; else params.ipiv[j] = im - offset; } magmagpu_cswapblkcm<<< blocks, blocksize, 0, queue >>>( params ); } } else { for( k=(i1-1); k<i2; k+=BLOCK_SIZE ) { magma_int_t sb = min(BLOCK_SIZE, i2-k); magmagpu_cswapblk_params_t params = { dA1T+k*lda1, dA2T, n, lda1, lda2, sb }; for( magma_int_t j = 0; j < sb; j++ ) { im = ipiv[(k+j)*inci] - 1; if ( (k+j) == im ) params.ipiv[j] = -1; else params.ipiv[j] = im - offset; } magmagpu_cswapblkrm<<< blocks, blocksize, 0, queue >>>( params ); } } } /** @see magmablas_cswapblk_q @ingroup magma_cblas2 ********************************************************************/ extern "C" void magmablas_cswapblk( magma_order_t order, magma_int_t n, magmaFloatComplex *dA1T, magma_int_t lda1, magmaFloatComplex *dA2T, magma_int_t lda2, magma_int_t i1, magma_int_t i2, const magma_int_t *ipiv, magma_int_t inci, magma_int_t offset ) { magmablas_cswapblk_q( order, n, dA1T, lda1, dA2T, lda2, i1, i2, ipiv, inci, offset, magma_stream ); }
f1524f1044bbd1e0a505e80dbaae23f511b8c83e.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include <cmath> #include <algorithm> #include <stdio.h> #include <stdlib.h> #include <fstream> #include <sys/time.h> #include <omp.h> #include <hip/hip_runtime.h> #include <cstdlib> #define epsilon 1.e-8 #define num 16 using namespace std; template <typename T> double sgn(T val) { return (val > T(0)) - (val < T(0)); } __global__ void parallel2(double** dev_V, double* dev_s, double* dev_c, double** dev_U, int* dev_i, int* dev_j, int* dev_N){ for(int k=0; k < *dev_N; k++) { double temp = dev_U[k][*dev_i]; dev_U[k][*dev_i] = *(dev_c)*temp - *(dev_s)*dev_U[k][*dev_j]; dev_U[k][*dev_j] = *(dev_s)*temp + *(dev_c)*dev_U[k][*dev_j]; temp = dev_V[k][*dev_i]; dev_V[k][*dev_i] = *(dev_c)*temp - *(dev_s)*dev_V[k][*dev_j]; dev_V[k][*dev_j] = *(dev_s)*temp + *(dev_c)*dev_V[k][*dev_j]; } } __global__ void parallel1(double* dev_alpha, double* dev_beta, double* dev_gamma, double** dev_U, int* dev_i, int* dev_j, int* dev_N){ for(int k = 0; k < *dev_N ; k++) { *dev_alpha = *dev_alpha + (dev_U[k][*dev_i] * dev_U[k][*dev_i]); *dev_beta = *dev_beta + (dev_U[k][*dev_j] * dev_U[k][*dev_j]); *dev_gamma = *dev_gamma + (dev_U[k][*dev_i] * dev_U[k][*dev_j]); } } int main (int argc, char* argv[]) { int M,N; string T,P,Db; //double elapsedTime,elapsedTime2; //timeval start,end,end2; // Check number of arguments if(argc < 3) { cout<<"Please input the size of Matrix\n"; return 0; } M = atoi(argv[1]); N = atoi(argv[2]); //printf("N-%d\n",N); // Check that given matrix should be square if(M != N) { cout<<"Error: Matrix must be square"; return 0; } double **U,**V, **S,**A; double **U_t,**V_t; double *alpha, *beta, *gamma, *c, *zeta, t,*s, converge; double *dev_alpha ,*dev_beta,*dev_gamma,*dev_c,*dev_s; int *dev_N; double **dev_U,**dev_V; int *dev_i,*dev_j; alpha = (double *)malloc(sizeof(double)); beta = (double *)malloc(sizeof(double)); gamma = (double *)malloc(sizeof(double)); zeta = (double *)malloc(sizeof(double)); int acum = 0; converge = 1.0; // Assign memory to all matrices U, S, V U = new double*[N]; V = new double*[N]; U_t = new double*[N]; V_t = new double*[N]; A = new double*[N]; S = new double*[N]; for(int i =0; i<N; i++){ U[i] = new double[N]; V[i] = new double[N]; U_t[i] = new double[N]; V_t[i] = new double[N]; A[i] = new double[N]; S[i] = new double[N]; } //printf("Initializations done\n"); //Generate random matrix for(int i = 0; i < M; i++){ for(int j = 0; j < N; j++){ //cout<<"In init "<<i<<" "<<j<<" "; A[i][j] = rand()%10+1; } } cout<<"A"<<endl<<endl; for(int i =0; i<M; i++){ for(int j =0; j<N; j++){ cout<<A[i][j]<<" "; } cout<<endl; } //Copy to U_t for (int i=0;i<M;i++){ for (int j=0;j<N;j++){ U_t[i][j]=A[j][i]; } } //printf("Copy to U_t\n"); // Initialize V matrix as identity matrix and S Matrix with zeros for(int i=0; i<M;i++) { for(int j=0; j<N;j++) { //printf("i-%d,j-%d\n",i,j); if(i==j) { V[i][j] = (double)1.0; S[i][j] = (double)0.0; } else { V[i][j] = (double)0.0; S[i][j] = (double)0.0; } } } //printf("V build\n"); int * N_temp = (int *)malloc(sizeof(int)); *N_temp = N; //printf("Conv loop start\n"); //gettimeofday(&start, NULL); /* SVD using Jacobi algorithm (Sequencial)*/ while(converge > epsilon) { //convergence converge = 0.0; //counter of loops acum++; int * i_temp = (int *)malloc(sizeof(int)); int * j_temp = (int *)malloc(sizeof(int)); for(int i = 0; i<M; i++) { for(int j = i+1; j<N; j++) { i_temp = &i; j_temp = &j; // Initialize alpha, beta , gamma to zero *alpha = 0.0; *beta = 0.0; *gamma = 0.0; // Update alpha, beta , gamma as per the formulae hipMalloc ((void**)&dev_alpha , sizeof(double)); hipMalloc ((void**)&dev_beta , sizeof(double)); hipMalloc ((void**)&dev_gamma , sizeof(double)); hipMalloc ((void**)&dev_i , sizeof(int)); hipMalloc ((void**)&dev_j, sizeof(int)); hipMalloc ((void**)&dev_N, sizeof(int)); hipMalloc ((void***)&dev_U, sizeof(double)*N*N); hipMemcpy(dev_alpha ,alpha, sizeof(double), hipMemcpyHostToDevice); hipMemcpy(dev_beta ,beta, sizeof(double), hipMemcpyHostToDevice); hipMemcpy(dev_gamma ,gamma, sizeof(double), hipMemcpyHostToDevice); hipMemcpy(dev_i ,i_temp, sizeof(int), hipMemcpyHostToDevice); hipMemcpy(dev_j ,j_temp, sizeof(int), hipMemcpyHostToDevice); hipMemcpy(dev_N ,N_temp, sizeof(int), hipMemcpyHostToDevice); hipMemcpy(dev_U ,U, sizeof(double)*N*N, hipMemcpyHostToDevice); //printf("Call to parallel1\n"); hipLaunchKernelGGL(( parallel1), dim3(1),dim3(4), 0, 0, dev_alpha, dev_beta, dev_gamma, dev_U, dev_i, dev_j, dev_N); //printf("After paralell1\n"); hipMemcpy(U, dev_U, sizeof(double)*N*N, hipMemcpyDeviceToHost); //printf("1\n"); //free(dev_alpha); //free(dev_beta); //free(dev_gamma); //printf("2\n"); //free(dev_i); //free(dev_j); //free(dev_N); //free(dev_U); //printf("3\n"); //printf("Before c,s cal\n"); // Update converge basicaly is the angle between column i and j double gamma_t = *gamma; //printf("**1\n"); double alpha_t = *alpha; //printf("**1\n"); double beta_t = *beta; //printf("**1\n"); double zeta_t = *zeta; //printf("**1\n"); double c_t = *c; //printf("**1\n"); //double s_t = *s; //printf("**1\n"); converge = max(converge, abs(gamma_t)/sqrt(alpha_t*beta_t)); //printf("**2\n"); zeta_t = (beta_t - alpha_t) / (2.0 * gamma_t); //compute tan of angle t = sgn(zeta_t) / (abs(zeta_t) + sqrt(1.0 + (zeta_t*zeta_t))); //extract cos c_t = 1.0 / (sqrt (1.0 + (t*t))); //extract sin *s = c_t*t; //printf("**\n"); *alpha = alpha_t; *beta = beta_t; *gamma = gamma_t; *zeta = zeta_t; *c = c_t; //*s = s_t; //printf("After c,s cal\n"); //Apply rotations on U and V hipMalloc ((void**)&dev_i , sizeof(int)); hipMalloc ((void**)&dev_j , sizeof(int)); hipMalloc ((void**)&dev_N , sizeof(int)); hipMalloc ((void***)&dev_U , sizeof(double)*N*N); hipMalloc ((void***)&dev_V , sizeof(double)*N*N); hipMalloc ((void**)&dev_s , sizeof(double)); hipMalloc ((void**)&dev_c , sizeof(double)); hipMemcpy(dev_V ,V, sizeof(double)*N*N, hipMemcpyHostToDevice); hipMemcpy(dev_s ,s, sizeof(double), hipMemcpyHostToDevice); hipMemcpy(dev_c ,c, sizeof(double), hipMemcpyHostToDevice); hipMemcpy(dev_i ,i_temp, sizeof(int), hipMemcpyHostToDevice); hipMemcpy(dev_j ,j_temp, sizeof(int), hipMemcpyHostToDevice); hipMemcpy(dev_N ,N_temp, sizeof(int), hipMemcpyHostToDevice); hipMemcpy(dev_U ,U, sizeof(double)*N*N, hipMemcpyHostToDevice); //printf("Call to parallel2\n"); hipLaunchKernelGGL(( parallel2), dim3(1),dim3(4), 0, 0, dev_V, dev_s, dev_c, dev_U, dev_i, dev_j, dev_N); //printf("After paralell2\n"); hipMemcpy(U, dev_U, sizeof(double)*N*N, hipMemcpyDeviceToHost); hipMemcpy(V, dev_V, sizeof(double)*N*N, hipMemcpyDeviceToHost); free(dev_V); free(dev_s); free(dev_c); free(dev_i); free(dev_j); free(dev_N); free(dev_U); } } } //Create matrix S for(int i =0; i<M; i++) { t=0; for(int j=0; j<N;j++) { t=t + pow(U[i][j],2); } //double t_t = *t; //t_t = sqrt(t_t); //*t = t_t; t=sqrt(t); for(int j=0; j<N;j++) { U[i][j] = U[i][j] / t; if(i == j) { S[i][j] = t; } } } //gettimeofday(&end, NULL); // Print final matrix U cout<<"\nMatrix U"<<endl; for(int i=0; i<M; i++) { for(int j=0; j<N; j++) cout<<U[i][j]<<" "; cout<<endl; } // Print final matrix S cout<<"\nMatrix S"<<endl; for(int i=0; i<M; i++) { for(int j=0; j<N; j++) cout<<S[i][j]<<" "; cout<<endl; } // Print final matrix V_t cout<<"\nMatrix V Transpose"<<endl; for(int i=0; i<M; i++) { for(int j=0; j<N; j++) cout<<V[j][i]<<" "; cout<<endl; } // Print time and iterations cout<<"iterations: "<<acum<<endl; //elapsedTime = (end.tv_sec - start.tv_sec) * 1000.0; //elapsedTime += (end.tv_usec - start.tv_usec) / 1000.0; //cout<<"Time: "<<elapsedTime<<" ms."<<endl<<endl; }
f1524f1044bbd1e0a505e80dbaae23f511b8c83e.cu
#include <iostream> #include <cmath> #include <algorithm> #include <stdio.h> #include <stdlib.h> #include <fstream> #include <sys/time.h> #include <omp.h> #include <cuda_runtime.h> #include <cstdlib> #define epsilon 1.e-8 #define num 16 using namespace std; template <typename T> double sgn(T val) { return (val > T(0)) - (val < T(0)); } __global__ void parallel2(double** dev_V, double* dev_s, double* dev_c, double** dev_U, int* dev_i, int* dev_j, int* dev_N){ for(int k=0; k < *dev_N; k++) { double temp = dev_U[k][*dev_i]; dev_U[k][*dev_i] = *(dev_c)*temp - *(dev_s)*dev_U[k][*dev_j]; dev_U[k][*dev_j] = *(dev_s)*temp + *(dev_c)*dev_U[k][*dev_j]; temp = dev_V[k][*dev_i]; dev_V[k][*dev_i] = *(dev_c)*temp - *(dev_s)*dev_V[k][*dev_j]; dev_V[k][*dev_j] = *(dev_s)*temp + *(dev_c)*dev_V[k][*dev_j]; } } __global__ void parallel1(double* dev_alpha, double* dev_beta, double* dev_gamma, double** dev_U, int* dev_i, int* dev_j, int* dev_N){ for(int k = 0; k < *dev_N ; k++) { *dev_alpha = *dev_alpha + (dev_U[k][*dev_i] * dev_U[k][*dev_i]); *dev_beta = *dev_beta + (dev_U[k][*dev_j] * dev_U[k][*dev_j]); *dev_gamma = *dev_gamma + (dev_U[k][*dev_i] * dev_U[k][*dev_j]); } } int main (int argc, char* argv[]) { int M,N; string T,P,Db; //double elapsedTime,elapsedTime2; //timeval start,end,end2; // Check number of arguments if(argc < 3) { cout<<"Please input the size of Matrix\n"; return 0; } M = atoi(argv[1]); N = atoi(argv[2]); //printf("N-%d\n",N); // Check that given matrix should be square if(M != N) { cout<<"Error: Matrix must be square"; return 0; } double **U,**V, **S,**A; double **U_t,**V_t; double *alpha, *beta, *gamma, *c, *zeta, t,*s, converge; double *dev_alpha ,*dev_beta,*dev_gamma,*dev_c,*dev_s; int *dev_N; double **dev_U,**dev_V; int *dev_i,*dev_j; alpha = (double *)malloc(sizeof(double)); beta = (double *)malloc(sizeof(double)); gamma = (double *)malloc(sizeof(double)); zeta = (double *)malloc(sizeof(double)); int acum = 0; converge = 1.0; // Assign memory to all matrices U, S, V U = new double*[N]; V = new double*[N]; U_t = new double*[N]; V_t = new double*[N]; A = new double*[N]; S = new double*[N]; for(int i =0; i<N; i++){ U[i] = new double[N]; V[i] = new double[N]; U_t[i] = new double[N]; V_t[i] = new double[N]; A[i] = new double[N]; S[i] = new double[N]; } //printf("Initializations done\n"); //Generate random matrix for(int i = 0; i < M; i++){ for(int j = 0; j < N; j++){ //cout<<"In init "<<i<<" "<<j<<" "; A[i][j] = rand()%10+1; } } cout<<"A"<<endl<<endl; for(int i =0; i<M; i++){ for(int j =0; j<N; j++){ cout<<A[i][j]<<" "; } cout<<endl; } //Copy to U_t for (int i=0;i<M;i++){ for (int j=0;j<N;j++){ U_t[i][j]=A[j][i]; } } //printf("Copy to U_t\n"); // Initialize V matrix as identity matrix and S Matrix with zeros for(int i=0; i<M;i++) { for(int j=0; j<N;j++) { //printf("i-%d,j-%d\n",i,j); if(i==j) { V[i][j] = (double)1.0; S[i][j] = (double)0.0; } else { V[i][j] = (double)0.0; S[i][j] = (double)0.0; } } } //printf("V build\n"); int * N_temp = (int *)malloc(sizeof(int)); *N_temp = N; //printf("Conv loop start\n"); //gettimeofday(&start, NULL); /* SVD using Jacobi algorithm (Sequencial)*/ while(converge > epsilon) { //convergence converge = 0.0; //counter of loops acum++; int * i_temp = (int *)malloc(sizeof(int)); int * j_temp = (int *)malloc(sizeof(int)); for(int i = 0; i<M; i++) { for(int j = i+1; j<N; j++) { i_temp = &i; j_temp = &j; // Initialize alpha, beta , gamma to zero *alpha = 0.0; *beta = 0.0; *gamma = 0.0; // Update alpha, beta , gamma as per the formulae cudaMalloc ((void**)&dev_alpha , sizeof(double)); cudaMalloc ((void**)&dev_beta , sizeof(double)); cudaMalloc ((void**)&dev_gamma , sizeof(double)); cudaMalloc ((void**)&dev_i , sizeof(int)); cudaMalloc ((void**)&dev_j, sizeof(int)); cudaMalloc ((void**)&dev_N, sizeof(int)); cudaMalloc ((void***)&dev_U, sizeof(double)*N*N); cudaMemcpy(dev_alpha ,alpha, sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(dev_beta ,beta, sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(dev_gamma ,gamma, sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(dev_i ,i_temp, sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(dev_j ,j_temp, sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(dev_N ,N_temp, sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(dev_U ,U, sizeof(double)*N*N, cudaMemcpyHostToDevice); //printf("Call to parallel1\n"); parallel1<<<1,4>>>(dev_alpha, dev_beta, dev_gamma, dev_U, dev_i, dev_j, dev_N); //printf("After paralell1\n"); cudaMemcpy(U, dev_U, sizeof(double)*N*N, cudaMemcpyDeviceToHost); //printf("1\n"); //free(dev_alpha); //free(dev_beta); //free(dev_gamma); //printf("2\n"); //free(dev_i); //free(dev_j); //free(dev_N); //free(dev_U); //printf("3\n"); //printf("Before c,s cal\n"); // Update converge basicaly is the angle between column i and j double gamma_t = *gamma; //printf("**1\n"); double alpha_t = *alpha; //printf("**1\n"); double beta_t = *beta; //printf("**1\n"); double zeta_t = *zeta; //printf("**1\n"); double c_t = *c; //printf("**1\n"); //double s_t = *s; //printf("**1\n"); converge = max(converge, abs(gamma_t)/sqrt(alpha_t*beta_t)); //printf("**2\n"); zeta_t = (beta_t - alpha_t) / (2.0 * gamma_t); //compute tan of angle t = sgn(zeta_t) / (abs(zeta_t) + sqrt(1.0 + (zeta_t*zeta_t))); //extract cos c_t = 1.0 / (sqrt (1.0 + (t*t))); //extract sin *s = c_t*t; //printf("**\n"); *alpha = alpha_t; *beta = beta_t; *gamma = gamma_t; *zeta = zeta_t; *c = c_t; //*s = s_t; //printf("After c,s cal\n"); //Apply rotations on U and V cudaMalloc ((void**)&dev_i , sizeof(int)); cudaMalloc ((void**)&dev_j , sizeof(int)); cudaMalloc ((void**)&dev_N , sizeof(int)); cudaMalloc ((void***)&dev_U , sizeof(double)*N*N); cudaMalloc ((void***)&dev_V , sizeof(double)*N*N); cudaMalloc ((void**)&dev_s , sizeof(double)); cudaMalloc ((void**)&dev_c , sizeof(double)); cudaMemcpy(dev_V ,V, sizeof(double)*N*N, cudaMemcpyHostToDevice); cudaMemcpy(dev_s ,s, sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(dev_c ,c, sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(dev_i ,i_temp, sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(dev_j ,j_temp, sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(dev_N ,N_temp, sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(dev_U ,U, sizeof(double)*N*N, cudaMemcpyHostToDevice); //printf("Call to parallel2\n"); parallel2<<<1,4>>>(dev_V, dev_s, dev_c, dev_U, dev_i, dev_j, dev_N); //printf("After paralell2\n"); cudaMemcpy(U, dev_U, sizeof(double)*N*N, cudaMemcpyDeviceToHost); cudaMemcpy(V, dev_V, sizeof(double)*N*N, cudaMemcpyDeviceToHost); free(dev_V); free(dev_s); free(dev_c); free(dev_i); free(dev_j); free(dev_N); free(dev_U); } } } //Create matrix S for(int i =0; i<M; i++) { t=0; for(int j=0; j<N;j++) { t=t + pow(U[i][j],2); } //double t_t = *t; //t_t = sqrt(t_t); //*t = t_t; t=sqrt(t); for(int j=0; j<N;j++) { U[i][j] = U[i][j] / t; if(i == j) { S[i][j] = t; } } } //gettimeofday(&end, NULL); // Print final matrix U cout<<"\nMatrix U"<<endl; for(int i=0; i<M; i++) { for(int j=0; j<N; j++) cout<<U[i][j]<<" "; cout<<endl; } // Print final matrix S cout<<"\nMatrix S"<<endl; for(int i=0; i<M; i++) { for(int j=0; j<N; j++) cout<<S[i][j]<<" "; cout<<endl; } // Print final matrix V_t cout<<"\nMatrix V Transpose"<<endl; for(int i=0; i<M; i++) { for(int j=0; j<N; j++) cout<<V[j][i]<<" "; cout<<endl; } // Print time and iterations cout<<"iterations: "<<acum<<endl; //elapsedTime = (end.tv_sec - start.tv_sec) * 1000.0; //elapsedTime += (end.tv_usec - start.tv_usec) / 1000.0; //cout<<"Time: "<<elapsedTime<<" ms."<<endl<<endl; }
76ff0ecf52f31b37a77c2c0212e707bc6e6693c5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /***************************************************************************************** chi2.c Compute chi-square for a model that was previously created. Modified 2016 December 8 by ME: Converted to "FIT" action-only, CUDA code Modified 2015 June 10 by CM: Implement smearing for the "fit" and "write" actions Modified 2013 April 24 by CM: Adjust names of output files so they are in alphanumeric order if > 100 per dataset Modified 2012 December 6 by CM: Fix bug introduced on December 6: take "calval" into account when determining whether or not model signal is greater than 'chi2fit0_thresh' sigmas Modified 2012 December 5 by CM: When the "write_chi2fit0" parameter is turned on, display the number of degrees of freedom with zero model signal in addition to the chi2 contribution, and list chi2 and dof for all data combined Implement the 'chi2fit0_thresh' parameter Modified 2012 March 24 by CM: When the root node calls chi2 with list_breakdown = 1 (e.g., for the "write" action), print a warning if the value of any (delay-)Doppler dataset's Doppler scaling factor is out of the allowed range Modified 2010 August 25 by CM: Move TINYCALFACT definition to const.h Modified 2010 August 10 by CM: Implement the "radfitmin" and "radobsmin" parameters: these are the pixel values that map to black for all fit and obs pgm images that are output for delay-Doppler frames Modified 2010 March 20 by CM: For the "write" action for lightcurve datasets, include magnitude uncertainties as a new column in output files fit_MM.dat Modified 2009 August 9 by CM: For the "write" action with the "listfit" parameter turned on, replace a ".rdf" or ".fits" or ".fit" suffix with ".fitdat" Modified 2009 April 3 by CM: When the root node calls chi2 with list_breakdown = 1 (e.g., for the "write" action), print a warning if any plane-of-sky fit image is too small to "contain" all nonzero pixels in the POS sky rendering or if the model is too wide in delay-Doppler space for any (delay-)Doppler fit frame to be correctly constructed For MPI_Recv calls, mpi_par[0] is no longer equal to the MPI action, since the message tag argument already serves that purpose (as of 2008 April 10) -- so the other mpi_par elements are renumbered Modified 2008 June 29 by CM: For the "write" and "orbit" actions, zero out fit pixels/bins in delay-Doppler, Doppler, and plane-of-sky frames for which those pixels/bins have been zeroed out in the pixel mask Modified 2008 April 10 by CM: Use message tag argument to MPI_Recv to identify the MPI action Modified 2007 September 13 by CM: Implement "write_chi2fit0" parameter: for the "write" and "orbit" actions, output chi2 for delay-Doppler, Doppler, and plane-of-sky pixels/bins with zero model power; do this both for each individual frame, for all delay-Doppler data taken together, for all Doppler data taken together, and for all plane-of-sky data taken together Modified 2007 August 18 by CM: Rename MPI_TAG to MPI_TAG_1 to avoid name conflict with mpich headers Modified 2007 August 10 by CM: Create chi2_deldop, chi2_doppler, chi2_poset, and chi2_lghtcrv routines so that the code isn't one giant switch statement When a calfact value is negative, reset it to a tiny positive value rather than to zero in order to protect against division by zero Implement the "radfitmax" and "radobsmax" parameters for applying the same brightness scale to all fit and obs pgm images output for delay-Doppler frames Modified 2007 August 4 by CM: Carry out "write" action steps for the "orbit" action as well Modified 2007 January 6 by CM: For the "write" action for lightcurve datasets, output rotation phases to files fit_MM.dat and calc_MM.dat. Modified 2006 October 1 by CM: For lightcurve datasets, chi-square is now computed in intensity space rather than in magnitude space. (File output is still in magnitudes.) Modified 2006 June 21 by CM: In delay-Doppler section, changed delres to del_per_pixel and dopres to dop_per_pixel For POS renderings and plane-of-sky fit frames, changed res to km_per_pixel When the root node calls chi2 with list_breakdown = 1, print a warning if the model extends beyond the boundaries of the POS frames, if any photometric parameter has an illegal value, or if any ellipsoid diameter is tiny or negative. (This change will cause such warnings to be displayed for the "write" action.) Modified 2006 June 18 by CM: Allow each delay-Doppler frame within a dataset to have different dimensions after vignetting Allow each Doppler frame within a dataset to have different dimensions after vignetting Allow plane-of-sky frames to be rectangular rather than square For the "write" action, adjust output for delay-Doppler, Doppler, and plane-of-sky frames to allow for masked-out pixels Modified 2005 June 27 by CM: Renamed "round" function to "iround" to avoid conflict Modified 2005 April 25 by CM: For the "write" action, compute the one-sigma percentage uncertainty on chi2 correctly; the expression used until now, 100*sqrt(2/dof), is only valid when all data are weighted equally. Modified 2005 March 17 by CM: For the "fit" action with parallel processing, check that root receives the responses to the correct broadcast Compute degrees of freedom in routine read_dat rather than here, so that it only needs to be done once per fit rather than repeatedly (dof for individual data types is still computed here) Allow weights and degrees of freedom to be floating-point rather than integer; but if they are integer after all, print integer-rounded values to the screen rather than floating-point For the "write" action with "speckle" turned on, write Doppler file fit_MM_NN.dat with spectral values normalized to the input sdev value, not to the sdev value increased for self-noise. Modified 2005 March 6 by CM: For the "write" action, write calibration factors for plane-of-sky datasets to disk if the "list_posetcal" parameter is turned on Modified 2005 March 2 by CM: Rename some "sdev" and "var" values to be "oneovervar" (1/variance) For the "write" action, adjust arguments of revised "resampim" routine For the "write" action, rotate plane-of-sky fit/obs/res frames so that north is upward, unless poset_scaling = NONE Modified 2005 February 22 by CM: For the "write" action, fix bug (incorrect array dimensions) in scaling fit vs. obs pgm image brightness for plane-of-sky datasets For the "write" action, add the new "image_rebin" argument to function resampim to handle output images which have much coarser resolution than the model POS frames from which they are constructed (i.e., which are greatly undersampled). This situation probably won't arise often: The real importance of "image_rebin" is for dealing with plane-of-sky fit frames in routine calc_fits. Modified 2005 January 25 by CM: Take care of uninitialized variables Modified 2005 January 20 by CM: For the "write" action, implement the bilinear and bicubic interpolation options for the "dd_scaling" parameter For the "write" action, write model, data, and residual pgm images for POS datasets Add pgm file output (obs/fit/res) for POS datasets Correct the expression for chi-square for POS datasets (the calibration factor was being ignored) If the calibration factor for a POS frame is a floating parameter, don't allow it to be negative: instead set it to zero. Also display a warning, unless this is a fit and we're not at the end of an iteration. Modified 2005 January 12 by CM: For the "fit" action with parallel processing, revise the code so that "list_breakdown" will work: For each dataset which is handled by a branch node rather than by root, root broadcasts a request for that branch node to run chi2 for just that one dataset and to send the results to root. This is necessary because otherwise each branch node calls chi2 just once to process *all* of its datasets in one shot, without keeping results for different data types (Doppler, delay-Doppler, etc.) separate. Modified 2004 December 1 by CM: For the "write" action, adjust the "listfit" option so that the "listfit_path" directory can be created on the fly if necessary Modified 2004 November 30 by CM: For the "write" action, implement the "listfit" option to write out the model "data" files for all delay-Doppler frames Modified 2004 July 26 by CM: If the calibration factor for a Doppler or delay-Doppler frame is a floating parameter, don't allow it to be negative: instead set it to zero. Also display a warning, unless this is a fit and we're not at the end of an iteration. Modified 2004 June 27 by CM: Fixed bug: For 'dd_scaling' = 'block' the number of pixels per side for the resampled delay-Doppler obs/fit/residual images wasn't being defined Modified 2004 April 3 by CM: Add the "list_breakdown" argument so that we can list chi2 by data type (Doppler, delay-Doppler, POS, lightcurves) for both the "fit" and the "write" action as desired Modified 2004 March 20 by CM: For the "write" action, move final summary screen display from the main program (shape.c) to here For the "write" action, display summaries for each form of data (Doppler, delay-Doppler, POS, lightcurves) taken separately, in addition to the overall summary Modified 2004 February 29 by CM: For lightcurve output, replace JD244 variable with jdoffset parameter For the "write" action, move all screen and file output which relies on updated calibration factors to this routine Modified 2003 May 10 by CM: Account for contributions to chi-square from model (delay-)Doppler power which lies outside the data frame Modified 2003 April 24 by CM: Display chi-square for Doppler datasets when action = "write" Implement the new "weight" parameter *****************************************************************************************/ extern "C" { #include "../shape/head.h" } /* File-scope global CUDA variables */ __device__ int c2s_print_breakdown/*, dof*/; __device__ unsigned char c2s_write_chi2fit0, c2s_badradar, c2s_badphoto, c2s_baddopscale, c2s_badposet, c2s_posbnd, c2s_baddiam; __device__ double c2s_dof_deldop, c2s_dof_doppler, c2s_dof_poset, c2s_dof_lghtcrv, c2s_dof, c2s_chi2, c2s_chi2_set, c2s_chi2_all_doppler; __device__ double c2s_chi2_fit0_deldop64, c2s_dof_fit0_deldop64, c2s_chi2_all_deldop64, c2s_chi2_fit0_doppler64, c2s_dof_fit0_doppler64; typedef struct chi2_thread_t { int thread_no; struct par_t *parameter; struct dat_t *data; unsigned char *htype; unsigned char *dtype; int *nframes; int *hlc_n; int *GPUID; int gpuid; int nsets; int list_breakdown; int max_frames; double chi2_all_deldop; double chi2_all_doppler; double chi2_all_poset; double chi2_all_lghtcrv; double chi2_fit0_deldop; double chi2_fit0_doppler; double chi2_fit0_poset; double dof_fit0_deldop; double dof_fit0_doppler; double dof_fit0_poset; double chi2; hipStream_t *gpu_stream; } chi2_data; /* File-scope CUDA structures */ /* Function prototype declarations */ __host__ double chi2_deldop_gpu(struct par_t *dpar, struct dat_t *ddat, int s, int list_breakdown, double *chi2_all_deldop, double *chi2_fit0_deldop, double *dof_fit0_deldop, int nframes, hipStream_t *c2s_stream); __host__ double chi2_doppler_gpu(struct par_t *dpar, struct dat_t *ddat, int s, int list_breakdown, double *chi2_all_doppler, double *chi2_fit0_doppler, double *dof_fit0_doppler, int nframes, hipStream_t *c2s_stream); __host__ double chi2_lghtcrv_gpu(struct par_t *dpar, struct dat_t *ddat, int s, int list_breakdown, double *chi2_all_lghtcrv, int nframes, int lc_n); void *chi2_pthread_sub(void *ptr); __global__ void c2s_init_krnl(struct dat_t *ddat, int nsets) { /* Single-threaded kernel */ if (threadIdx.x == 0) { ddat->chi2 = 0.0; c2s_chi2_set = 0.0; c2s_chi2_all_doppler = 0.0; } } __global__ void c2s_retrieve_chi2_krnl(struct dat_t *ddat) { /* Single-threaded kernel */ if (threadIdx.x == 0) c2s_chi2 = ddat->chi2; } __global__ void c2s_deldop_init_krnl(struct dat_t *ddat, int s, int *ndel, int *ndop, double *o2, double *m2, double *om, double *weight, int nframes) { /* nframes-threaded kernelDELDOP only */ int f = blockIdx.x * blockDim.x + threadIdx.x; if (f < nframes) { ndel[f] = ddat->set[s].desc.deldop.frame[f].ndel; ndop[f] = ddat->set[s].desc.deldop.frame[f].ndop; o2[f] = ddat->set[s].desc.deldop.frame[f].overflow_o2; m2[f] = ddat->set[s].desc.deldop.frame[f].overflow_m2; om[f] = 0.0; weight[f] = ddat->set[s].desc.deldop.frame[f].weight; } } __global__ void c2s_doppler_init_krnl(struct dat_t *ddat, int s, int *ndop, double *o2, double *m2, double *om, double *weight, int nframes) { /* nframes-threaded kernel */ int f = blockIdx.x * blockDim.x + threadIdx.x; if (f < nframes) { ndop[f] = ddat->set[s].desc.doppler.frame[f].ndop; o2[f] = ddat->set[s].desc.doppler.frame[f].overflow_o2; m2[f] = ddat->set[s].desc.doppler.frame[f].overflow_m2; om[f] = 0.0; weight[f] = ddat->set[s].desc.doppler.frame[f].weight; } } /* WARNING WARNING WARNING */ /* This kernel is for accuracy testing only. Must develop a proper * parallel reduction for this instead */ __global__ void c2s_deldop_add_o2_krnl( struct par_t *dpar, struct dat_t *ddat, double *o2, double *m2, double *om, int *ndel, int *ndop, int nThreads, int s, int f) { /* ndel*ndop-threaded kernel */ int offset = blockIdx.x * blockDim.x + threadIdx.x; int i = offset % ndel[f] + 1; int j = offset / ndel[f] + 1; double temp; if (offset < nThreads) { /* The following two lines implement this: * o2 += obs[i][j]*obs[i][j]*oneovervar[i][j]; */ temp = ddat->set[s].desc.deldop.frame[f].obs[i][j] * ddat->set[s].desc.deldop.frame[f].obs[i][j] * ddat->set[s].desc.deldop.frame[f].oneovervar[i][j]; atomicAdd(&o2[f], temp); /* The following two lines implement this: * m2 += fit[i][j]*fit[i][j]*oneovervar[i][j]; */ temp = ddat->set[s].desc.deldop.frame[f].fit[i][j] * ddat->set[s].desc.deldop.frame[f].fit[i][j] * ddat->set[s].desc.deldop.frame[f].oneovervar[i][j]; atomicAdd(&m2[f], temp); /* The following two lines implement this: * om += fit[i][j]*obs[i][j]*oneovervar[i][j]; */ temp = ddat->set[s].desc.deldop.frame[f].fit[i][j] * ddat->set[s].desc.deldop.frame[f].obs[i][j] * ddat->set[s].desc.deldop.frame[f].oneovervar[i][j]; atomicAdd(&om[f], temp); } } __global__ void c2s_add_deldop_contributions_krnl( struct par_t *dpar, struct dat_t *ddat, double *o2, double *m2, double *om, double *weight, int *ndel, int *ndop, double *chi2_deldop_frame, int s, int nframes) { /* nframes-threaded kernel */ int f = blockIdx.x * blockDim.x + threadIdx.x; if (f < nframes) { double calval, err, o2_fit0, dof_fit0, err_fit0, thresh_fit0; chi2_deldop_frame[f] = 0.0; int off, i, j; // o2[f] += ddat->set[s].desc.deldop.frame[f].overflow_o2; // m2[f] += ddat->set[s].desc.deldop.frame[f].overflow_m2; /* If this frame's calibration factor is allowed to float, set it to * minimize chi-square, the sum over all pixels of * { (obs - calfact*fit)^2 / variance } */ if (ddat->set[s].desc.deldop.frame[f].cal.state == 'f') { if (om[f] > 0.0) ddat->set[s].desc.deldop.frame[f].cal.val = om[f]/m2[f]; else ddat->set[s].desc.deldop.frame[f].cal.val = TINYCALFACT; } /* Compute chi-square for this frame */ calval = ddat->set[s].desc.deldop.frame[f].cal.val; err = weight[f] * (o2[f] - (2 * calval * om[f]) + (calval * calval * m2[f])); ddat->set[s].desc.deldop.frame[f].chi2 = err; chi2_deldop_frame[f] += err; //atomicAdd(&c2s_chi2_all_deldop, chi2_deldop_frame[f]); /* Compute chi-square contributions and deg. of freedom due to pixels * whose model signal is less than or equal to 'chi2fit0_thresh' * standard deviations of the noise in the data frame */ o2_fit0 = dof_fit0 = err_fit0 = 0.0; thresh_fit0 = dpar->chi2fit0_thresh * ddat->set[s].desc.deldop.frame[f].sdev; // if (dpar->write_chi2fit0) { // for (i=0; i<ndel[f]; i++) // for (j=0; j<ndop[f]; j++) // off = j*ndel[f] + i; // For the unrolled fit, obs pointers // if (calval*ddat->set[s].desc.deldop.frame[f].fit[i][j] <= thresh_fit0) { // o2_fit0 += ddat->set[s].desc.deldop.frame[f].obs[i][j]* // ddat->set[s].desc.deldop.frame[f].obs[i][j]* // ddat->set[s].desc.deldop.frame[f].oneovervar[i][j]; // if (ddat->set[s].desc.deldop.frame[f].oneovervar[i][j] > 0.0) // dof_fit0 += weight[f]; // } // err_fit0 = weight[f] * o2_fit0; // atomicAdd(&c2s_chi2_fit0_deldop64, err_fit0); // atomicAdd(&c2s_dof_fit0_deldop64, dof_fit0); // } } } __global__ void c2s_add_dop_contrbts_srl_krnl( struct par_t *dpar, struct dat_t *ddat, double *o2, double *m2, double *om, double *weight, int *ndop, double *chi2_doppler_frame, int s, int f) { /* Single-threaded kernel */ if (threadIdx.x == 0) { int j; double calval, err, o2_fit0, dof_fit0, err_fit0, thresh_fit0; chi2_doppler_frame[f] = 0.0; /* If this frame's calibration factor is allowed to float, set it to * minimize chi-square, the sum over all bins of { (obs - calfact*fit)^2 / variance }. */ if (ddat->set[s].desc.doppler.frame[f].cal.state == 'f') { if (om[f] > 0.0) ddat->set[s].desc.doppler.frame[f].cal.val = om[f]/m2[f]; else { ddat->set[s].desc.doppler.frame[f].cal.val = TINYCALFACT; if (dpar->action != FIT ) printf("WARNING: set %2d frame %2d had negative calfact reset to %10.4e\n", s, f, ddat->set[s].desc.doppler.frame[f].cal.val); } } /* Compute chi-square for this frame */ calval = ddat->set[s].desc.doppler.frame[f].cal.val; err = weight[f] * (o2[f] - 2*calval*om[f] + calval*calval*m2[f]); ddat->set[s].desc.doppler.frame[f].chi2 = err; chi2_doppler_frame[f] += err; //if (list_breakdown) //c2s_chi2_all_doppler += err; /* Compute chi-square contributions and dof due to bins whose model * signal is =< 'chi2fit0_thresh' standard deviations of the noise in * the data frame */ o2_fit0 = dof_fit0 = err_fit0 = 0.0; thresh_fit0 = dpar->chi2fit0_thresh * ddat->set[s].desc.doppler.frame[f].sdev; if (dpar->write_chi2fit0) { for (j=0; j<ndop[f]; j++) if (calval*ddat->set[s].desc.doppler.frame[f].fit[j] <= thresh_fit0) { o2_fit0 += ddat->set[s].desc.doppler.frame[f].obs[j] * ddat->set[s].desc.doppler.frame[f].obs[j] *ddat->set[s].desc.doppler.frame[f].oneovervar[j]; if (ddat->set[s].desc.doppler.frame[f].oneovervar[j] > 0.0) dof_fit0 += weight[f]; } err_fit0 = weight[f]*o2_fit0; atomicAdd(&c2s_chi2_fit0_doppler64, err_fit0); atomicAdd(&c2s_dof_fit0_doppler64, dof_fit0); } } } __global__ void c2s_doppler_add_o2_krnl(struct dat_t *ddat, double *o2, double *m2, double *om, int *ndop, int s, int f) { /* ndop-threaded kernel */ int j = blockIdx.x * blockDim.x + threadIdx.x+1; double temp; if (j <= ndop[f]) { /* Add contributions from power within frame limits */ /* Next 2 lines implement: o2 += obs[j]*obs[j]*oneovervar[j]; */ temp = ddat->set[s].desc.doppler.frame[f].obs[j] * ddat->set[s].desc.doppler.frame[f].obs[j] * ddat->set[s].desc.doppler.frame[f].oneovervar[j]; atomicAdd(&o2[f], temp); /* Next 2 lines implement: m2 += fit[j]*fit[j]*oneovervar[j]; */ temp = ddat->set[s].desc.doppler.frame[f].fit[j] * ddat->set[s].desc.doppler.frame[f].fit[j] * ddat->set[s].desc.doppler.frame[f].oneovervar[j]; atomicAdd(&m2[f], temp); /* Next 2 lines implement: om += fit[j]*obs[j]*oneovervar[j]; */ temp = ddat->set[s].desc.doppler.frame[f].fit[j] * ddat->set[s].desc.doppler.frame[f].obs[j] * ddat->set[s].desc.doppler.frame[f].oneovervar[j]; atomicAdd(&om[f], temp); } } __global__ void c2s_lghtcrv_serial_krnl(struct dat_t *ddat, int s, double *dof_chi2set, double3 *o2m2om) { /* single-threaded kernel */ o2m2om[0].x = o2m2om[0].y = o2m2om[0].z = 0.0; double obs, fit, oneovervar, calval; if (threadIdx.x == 0) { for (int i=1; i<=ddat->set[s].desc.lghtcrv.n; i++) { obs = ddat->set[s].desc.lghtcrv.obs[i]; fit = ddat->set[s].desc.lghtcrv.fit[i]; oneovervar = ddat->set[s].desc.lghtcrv.oneovervar[i]; // printf("%i, %3.8g, %3.8g, %3.8g\n", i, fit, fit, oneovervar); // printf("fit[%i], %3.8g\n", i, fit); o2m2om[0].x += obs * obs * oneovervar; o2m2om[0].y += fit * fit * oneovervar; o2m2om[0].z += fit * obs * oneovervar; o2m2om[1].x = o2m2om[1].y = o2m2om[1].z = 0.0; } /* If lightcurve's calibration factor is allowed to float, set it to mini- * mize chi-square (sum over all points of {(obs-calfact*fit)^2/variance}*/ if (ddat->set[s].desc.lghtcrv.cal.state == 'f') { if (o2m2om[0].z > 0.0) ddat->set[s].desc.lghtcrv.cal.val = o2m2om[0].z/o2m2om[0].y; else ddat->set[s].desc.lghtcrv.cal.val = TINYCALFACT; } calval = ddat->set[s].desc.lghtcrv.cal.val; /* Compute chi-square for dataset */ dof_chi2set[0] = ddat->set[s].desc.lghtcrv.dof; dof_chi2set[1] = ddat->set[s].desc.lghtcrv.weight * (o2m2om[0].x - 2 * calval * o2m2om[0].z + calval * calval * o2m2om[0].y); // printf("dof=%3.8g\n", dof_chi2set[0]); // printf("weight=%3.8g\n", ddat->set[s].desc.lghtcrv.weight); // printf("calval=%3.8g\n", calval); // printf("weight * (o2 - 2 * calval * om + calval * calval * m2) = %3.8g\n", dof_chi2set[1]); } } __global__ void c2s_get_prntflgs_krnl(struct par_t *dpar, struct dat_t *ddat) { /* Single-threaded kernel */ if (threadIdx.x == 0) { c2s_print_breakdown = (ddat->dof_deldop > SMALLVAL || ddat->dof_doppler > SMALLVAL || ddat->dof_poset > SMALLVAL || ddat->dof_lghtcrv > SMALLVAL); c2s_dof_deldop = ddat->dof_deldop; c2s_dof_doppler = ddat->dof_doppler; c2s_dof_poset = ddat->dof_poset; c2s_dof_lghtcrv = ddat->dof_lghtcrv; c2s_dof = ddat->dof; c2s_write_chi2fit0 = dpar->write_chi2fit0; c2s_badradar = dpar->badradar; c2s_badphoto = dpar->badphoto; c2s_baddopscale = dpar->baddopscale; c2s_badposet = dpar->badposet; c2s_posbnd = dpar->posbnd; c2s_baddiam = dpar->baddiam; } } __global__ void c2_add_chi2_krnl(struct dat_t *ddat, int s) { /* Single-threaded kernel */ if (threadIdx.x == 0) ddat->chi2 += ddat->set[s].chi2; } __global__ void set_global_chi2_krnl(struct dat_t *ddat, double chi2a, double chi2b) { /* Single-threaded kernel */ if (threadIdx.x == 0) ddat->chi2 = chi2a + chi2b; } __global__ void c2_set_chi2_krnl(struct dat_t *ddat, double chi2, int s) { /* Single-threaded kernel */ if (threadIdx.x == 0) ddat->set[s].chi2 = chi2; } __global__ void deldop_wrt_chi2fit0_krnl(struct par_t *dpar, struct dat_t *ddat, int s, int f, int *ndel, int *ndop, int nThreads, double *returns, double *o2_fit0_dof_fit0) { /* ndel*ndop-threaded kernel */ int offset = blockIdx.x * blockDim.x + threadIdx.x; int i = offset % ndel[f] + 1; int j = offset / ndel[f] + 1; double temp, err_fit0 = 0.0, thresh_fit0; /* returns[0] = chi2_fit0_deldop * returns[1] = dof_fit0_deldop */ if (dpar->write_chi2fit0) { if (offset < nThreads) { thresh_fit0 = dpar->chi2fit0_thresh * ddat->set[s].desc.deldop.frame[f].sdev; if (ddat->set[s].desc.deldop.frame[f].cal.val * ddat->set[s].desc.deldop.frame[f].fit[i][j] <= thresh_fit0) { temp = ddat->set[s].desc.deldop.frame[f].obs[i][j] * ddat->set[s].desc.deldop.frame[f].obs[i][j] * ddat->set[s].desc.deldop.frame[f].oneovervar[i][j]; atomicAdd(&o2_fit0_dof_fit0[0], temp); if (ddat->set[s].desc.deldop.frame[f].oneovervar[i][j] > 0.0) { temp = ddat->set[s].desc.deldop.frame[f].weight; atomicAdd(&o2_fit0_dof_fit0[1], temp); } } } __syncthreads(); if (offset == 0) { temp = ddat->set[s].desc.deldop.frame[f].weight; err_fit0 = temp*o2_fit0_dof_fit0[0]; returns[0] = err_fit0; returns[1] = o2_fit0_dof_fit0[1]; } } else if (offset==0) returns[0] = returns[1] = 0.0; } __global__ void dop_wrt_chi2fit0_krnl(struct par_t *dpar, struct dat_t *ddat, int s, int f, int nThreads, double *returns, double *o2_fit0_dof_fit0) { /* ndop-threaded kernel */ int offset = blockIdx.x * blockDim.x + threadIdx.x; double temp; double err_fit0 = 0.0, thresh_fit0; returns[0] = returns[1] = 0.0; /* returns[0] = chi2_fit0_deldop * returns[1] = dof_fit0_deldop */ if (dpar->write_chi2fit0) { if (offset < nThreads) { thresh_fit0 = dpar->chi2fit0_thresh * ddat->set[s].desc.doppler.frame[f].sdev; if (ddat->set[s].desc.doppler.frame[f].cal.val * ddat->set[s].desc.doppler.frame[f].fit_s[offset] <= thresh_fit0) { temp = ddat->set[s].desc.doppler.frame[f].obs[offset] * ddat->set[s].desc.doppler.frame[f].obs[offset] * ddat->set[s].desc.doppler.frame[f].oneovervar[offset]; atomicAdd(&o2_fit0_dof_fit0[0], temp); if (ddat->set[s].desc.doppler.frame[f].oneovervar[offset] > 0.0) { temp = ddat->set[s].desc.doppler.frame[f].weight; atomicAdd(&o2_fit0_dof_fit0[1], temp); } } } __syncthreads(); if (offset == 0) { temp = ddat->set[s].desc.doppler.frame[f].weight; err_fit0 = temp* o2_fit0_dof_fit0[0]; returns[0] = err_fit0; returns[1] = o2_fit0_dof_fit0[1]; } } else if (offset==0) returns[0] = returns[1] = 0.0; } __host__ double chi2_gpu( struct par_t *dpar, struct dat_t *ddat, unsigned char *htype, unsigned char *dtype, int *hnframes, int *hlc_n, int list_breakdown, int nsets, hipStream_t *c2s_stream, int max_frames) { /* This version of chi2 accepts an existing streams array from the calling * function. Number of streams in array is guaranteed to be as large as * the the largest nframes for all sets */ int s, print_breakdown; unsigned char write_chi2fit0, baddiam, badphoto, badposet, baddopscale, posbnd, badradar; dim3 BLK,THD; double chi2_all_doppler, chi2_all_deldop, chi2_all_poset, chi2_all_lghtcrv, chi2_fit0_doppler, chi2_fit0_deldop, chi2_fit0_poset, /*chi2_branch, */ dof_fit0_doppler, dof_fit0_deldop, /*dof_fit0_poset, */chi2, dof; double dof_deldop, dof_doppler, dof_poset, dof_lghtcrv; char dofstring[MAXLEN], dof0string[MAXLEN]; /* Initialize variables that accumulate chi-square values */ chi2_all_deldop = chi2_all_doppler = chi2_all_poset = chi2_all_lghtcrv = chi2_fit0_deldop = chi2_fit0_doppler /*= chi2_fit0_poset*/ = 0.0; dof_fit0_deldop = dof_fit0_doppler/* = dof_fit0_poset*/ = 0.0; /* Initialize variables that accumulate chi-square values */ hipLaunchKernelGGL(( c2s_init_krnl), dim3(1),dim3(1), 0, 0, ddat, nsets); checkErrorAfterKernelLaunch("c2s_init_krnl2, chi2_cuda_streams"); /* Loop through all datasets, carry out chi-square computations, and * provide screen and image output */ for (s=0; s<nsets; s++) { switch (htype[s]) { case DELAY: chi2 = chi2_deldop_gpu(dpar, ddat, s, list_breakdown, &chi2_all_deldop, &chi2_fit0_deldop, &dof_fit0_deldop, hnframes[s], c2s_stream); hipLaunchKernelGGL(( c2_set_chi2_krnl), dim3(1),dim3(1), 0, 0, ddat, chi2, s); checkErrorAfterKernelLaunch("c2_set_chi2_krnl"); // printf("chi2_set[%i] (Deldop), %3.8g\n", s, chi2); break; case DOPPLER: chi2 = chi2_doppler_gpu(dpar, ddat, s,list_breakdown, &chi2_all_doppler, &chi2_fit0_doppler, &dof_fit0_doppler, hnframes[s], c2s_stream); hipLaunchKernelGGL(( c2_set_chi2_krnl), dim3(1),dim3(1), 0, 0, ddat, chi2, s); checkErrorAfterKernelLaunch("c2_set_chi2_krnl"); // printf("chi2_set[%i] (Doppler), %3.8g\n", s, chi2); break; case POS: printf("\nWrite chi2_poset_cuda!\n"); // dat->set[s].chi2 = chi2_poset(dpar, s); break; case LGHTCRV: chi2 = chi2_lghtcrv_gpu(dpar, ddat, s, list_breakdown, &chi2_all_lghtcrv, hnframes[s], hlc_n[s]); hipLaunchKernelGGL(( c2_set_chi2_krnl), dim3(1),dim3(1), 0, 0, ddat, chi2, s); checkErrorAfterKernelLaunch("c2_set_chi2_krnl, chi2_cuda"); // printf("chi2_set[%i] (lghtcrv), %3.8g\n", s, chi2); break; default: printf("chi2_gpu.cu: can't handle this type yet\n"); } /* Single-thread kernel adds ddat->set[s].chi2 to ddat->chi2 */ hipLaunchKernelGGL(( c2_add_chi2_krnl), dim3(1),dim3(1), 0, 0, ddat, s); checkErrorAfterKernelLaunch("c2_add_chi2_krnl"); } /* end for loop over datasets */ /* Launch single-threaded kernel to retrieve ddat->chi2 to return it */ hipLaunchKernelGGL(( c2s_retrieve_chi2_krnl), dim3(1),dim3(1), 0, 0, ddat); checkErrorAfterKernelLaunch("c2s_retrieve_chi2_krnl"); gpuErrchk(hipMemcpyFromSymbol(&chi2, c2s_chi2, sizeof(double), 0, hipMemcpyDeviceToHost)); /*.......................................................................*/ /* Call kernel to get flags from ddat */ hipLaunchKernelGGL(( c2s_get_prntflgs_krnl), dim3(1),dim3(1), 0, 0, dpar, ddat); checkErrorAfterKernelLaunch("c2s_get_prntflgs_krnl"); gpuErrchk(hipMemcpyFromSymbol(&print_breakdown, c2s_print_breakdown, sizeof(int), 0, hipMemcpyDeviceToHost)); gpuErrchk(hipMemcpyFromSymbol(&dof_deldop, c2s_dof_deldop, sizeof(double), 0, hipMemcpyDeviceToHost)); gpuErrchk(hipMemcpyFromSymbol(&dof_doppler, c2s_dof_doppler, sizeof(double), 0, hipMemcpyDeviceToHost)); // gpuErrchk(hipMemcpyFromSymbol(&dof_poset, c2s_dof_poset, sizeof(double), // 0, hipMemcpyDeviceToHost)); gpuErrchk(hipMemcpyFromSymbol(&dof_lghtcrv, c2s_dof_lghtcrv, sizeof(double), 0, hipMemcpyDeviceToHost)); gpuErrchk(hipMemcpyFromSymbol(&write_chi2fit0, c2s_write_chi2fit0, sizeof(unsigned char), 0, hipMemcpyDeviceToHost)); gpuErrchk(hipMemcpyFromSymbol(&dof, c2s_dof, sizeof(double), 0, hipMemcpyDeviceToHost)); gpuErrchk(hipMemcpyFromSymbol(&baddiam, c2s_baddiam, sizeof(unsigned char), 0, hipMemcpyDeviceToHost)); gpuErrchk(hipMemcpyFromSymbol(&badphoto, c2s_badphoto, sizeof(unsigned char), 0, hipMemcpyDeviceToHost)); gpuErrchk(hipMemcpyFromSymbol(&posbnd, c2s_posbnd, sizeof(unsigned char), 0, hipMemcpyDeviceToHost)); gpuErrchk(hipMemcpyFromSymbol(&badposet, c2s_badposet, sizeof(unsigned char), 0, hipMemcpyDeviceToHost)); gpuErrchk(hipMemcpyFromSymbol(&badradar, c2s_badradar, sizeof(unsigned char), 0, hipMemcpyDeviceToHost)); gpuErrchk(hipMemcpyFromSymbol(&baddopscale, c2s_baddopscale, sizeof(unsigned char), 0, hipMemcpyDeviceToHost)); if (list_breakdown) { if (print_breakdown) { printf("#\n"); if (dof_deldop > SMALLVAL) { intifpossible( dofstring, MAXLEN, dof_deldop, SMALLVAL, "%f"); printf("delay chi2 = %e for %s dof (reduced chi2 = %f)\n", chi2_all_deldop, dofstring, chi2_all_deldop/dof_deldop); if (write_chi2fit0) { intifpossible( dof0string, MAXLEN, dof_fit0_deldop, SMALLVAL, "%f"); printf(" (%e outside model for %s dof)\n", chi2_fit0_deldop, dof0string); } } if (dof_doppler > SMALLVAL) { intifpossible( dofstring, MAXLEN, dof_doppler, SMALLVAL, "%f"); printf("Doppler chi2 = %e for %s dof (reduced chi2 = %f)\n", chi2_all_doppler, dofstring, chi2_all_doppler/dof_doppler); if (write_chi2fit0) { intifpossible( dof0string, MAXLEN, dof_fit0_doppler, SMALLVAL, "%f"); printf(" (%e outside model for %s dof)\n", chi2_fit0_doppler, dof0string); } } // if (dof_poset > SMALLVAL) { // intifpossible( dofstring, MAXLEN, dof_poset, SMALLVAL, "%f"); // printf("POS chi2 = %e for %s dof (reduced chi2 = %f)\n", // chi2_all_poset, dofstring, chi2_all_poset/dof_poset); // if (write_chi2fit0) { // intifpossible( dof0string, MAXLEN, dof_fit0_poset, SMALLVAL, "%f"); // printf(" (%e outside model for %s dof)\n", // chi2_fit0_poset, dof0string); // } // } if (dof_lghtcrv > SMALLVAL) { intifpossible( dofstring, MAXLEN, dof_lghtcrv, SMALLVAL, "%f"); printf("lghtcrv chi2 = %e for %s dof (reduced chi2 = %f)\n", chi2_all_lghtcrv, dofstring, chi2_all_lghtcrv/dof_lghtcrv); } intifpossible( dofstring, MAXLEN, dof, SMALLVAL, "%f"); printf("ALLDATA chi2 = %e for %s dof (reduced chi2 = %f)", chi2, dofstring, chi2/dof); } else { intifpossible( dofstring, MAXLEN, ddat->dof, SMALLVAL, "%f"); printf(" chi2 = %e for %s dof (reduced chi2 = %f)", chi2, dofstring, chi2/dof); } if (baddiam) printf(" (BAD DIAMS)"); if (badphoto) printf(" (BAD PHOTO) (chi2_cuda)"); if (posbnd) printf(" (BAD POS)"); if (badposet) printf(" (BAD POSET)"); if (badradar) printf(" (BAD RADAR)"); if (baddopscale) printf(" (BAD DOPSCALE)"); printf("\n"); if (print_breakdown && write_chi2fit0) { intifpossible( dof0string, MAXLEN, dof_fit0_deldop + dof_fit0_doppler/* + dof_fit0_poset*/, SMALLVAL, "%f"); printf(" (%e outside model for %s dof)\n", chi2_fit0_deldop + chi2_fit0_doppler/* + chi2_fit0_poset*/, dof0string); } printf("#\n"); fflush(stdout); } /*.......................................................................*/ return chi2; } __host__ double chi2_MFS_gpu( struct par_t *dpar, struct dat_t *ddat, int list_breakdown, int nsets, hipStream_t *c2s_stream) { /* This version of chi2 accepts an existing streams array from the calling * function. Number of streams in array is guaranteed to be as large as * the the largest nframes for all sets */ int s, print_breakdown; unsigned char write_chi2fit0, baddiam, badphoto, badposet, baddopscale, posbnd, badradar; dim3 BLK,THD; double chi2_all_deldop, chi2_fit0_deldop, dof_fit0_deldop, chi2, dof, dof_deldop; char dofstring[MAXLEN], dof0string[MAXLEN]; /* Initialize variables that accumulate chi-square values */ chi2_all_deldop = dof_fit0_deldop = 0.0; /* Initialize variables that accumulate chi-square values */ hipLaunchKernelGGL(( c2s_init_krnl), dim3(1),dim3(1), 0, 0, ddat, nsets); checkErrorAfterKernelLaunch("c2s_init_krnl2"); /* Loop through all datasets, carry out chi-square computations, and * provide screen and image output */ for (s=0; s<nsets; s++) { chi2 = chi2_deldop_gpu(dpar, ddat, s, list_breakdown, &chi2_all_deldop, &chi2_fit0_deldop, &dof_fit0_deldop, 1, c2s_stream); hipLaunchKernelGGL(( c2_set_chi2_krnl), dim3(1),dim3(1), 0, 0, ddat, chi2, s); checkErrorAfterKernelLaunch("c2_set_chi2_krnl"); // printf("chi2_set[%i] (Deldop), %3.8g\n", s, chi2); /* Single-thread kernel adds ddat->set[s].chi2 to ddat->chi2 */ hipLaunchKernelGGL(( c2_add_chi2_krnl), dim3(1),dim3(1), 0, 0, ddat, s); checkErrorAfterKernelLaunch("c2_add_chi2_krnl"); } /* end for loop over datasets */ /* Launch single-threaded kernel to retrieve ddat->chi2 to return it */ hipLaunchKernelGGL(( c2s_retrieve_chi2_krnl), dim3(1),dim3(1), 0, 0, ddat); checkErrorAfterKernelLaunch("c2s_retrieve_chi2_krnl"); gpuErrchk(hipMemcpyFromSymbol(&chi2, c2s_chi2, sizeof(double), 0, hipMemcpyDeviceToHost)); /*.......................................................................*/ /* Call kernel to get flags from ddat */ hipLaunchKernelGGL(( c2s_get_prntflgs_krnl), dim3(1),dim3(1), 0, 0, dpar, ddat); checkErrorAfterKernelLaunch("c2s_get_prntflgs_krnl"); gpuErrchk(hipMemcpyFromSymbol(&print_breakdown, c2s_print_breakdown, sizeof(int), 0, hipMemcpyDeviceToHost)); gpuErrchk(hipMemcpyFromSymbol(&dof_deldop, c2s_dof_deldop, sizeof(double), 0, hipMemcpyDeviceToHost)); gpuErrchk(hipMemcpyFromSymbol(&write_chi2fit0, c2s_write_chi2fit0, sizeof(unsigned char), 0, hipMemcpyDeviceToHost)); gpuErrchk(hipMemcpyFromSymbol(&dof, c2s_dof, sizeof(double), 0, hipMemcpyDeviceToHost)); gpuErrchk(hipMemcpyFromSymbol(&baddiam, c2s_baddiam, sizeof(unsigned char), 0, hipMemcpyDeviceToHost)); gpuErrchk(hipMemcpyFromSymbol(&badphoto, c2s_badphoto, sizeof(unsigned char), 0, hipMemcpyDeviceToHost)); gpuErrchk(hipMemcpyFromSymbol(&posbnd, c2s_posbnd, sizeof(unsigned char), 0, hipMemcpyDeviceToHost)); gpuErrchk(hipMemcpyFromSymbol(&badposet, c2s_badposet, sizeof(unsigned char), 0, hipMemcpyDeviceToHost)); gpuErrchk(hipMemcpyFromSymbol(&badradar, c2s_badradar, sizeof(unsigned char), 0, hipMemcpyDeviceToHost)); gpuErrchk(hipMemcpyFromSymbol(&baddopscale, c2s_baddopscale, sizeof(unsigned char), 0, hipMemcpyDeviceToHost)); if (list_breakdown) { if (print_breakdown) { printf("#\n"); if (dof_deldop > SMALLVAL) { intifpossible( dofstring, MAXLEN, dof_deldop, SMALLVAL, "%f"); printf("delay chi2 = %e for %s dof (reduced chi2 = %f)\n", chi2_all_deldop, dofstring, chi2_all_deldop/dof_deldop); if (write_chi2fit0) { intifpossible( dof0string, MAXLEN, dof_fit0_deldop, SMALLVAL, "%f"); printf(" (%e outside model for %s dof)\n", chi2_fit0_deldop, dof0string); } } intifpossible( dofstring, MAXLEN, dof, SMALLVAL, "%f"); printf("ALLDATA chi2 = %e for %s dof (reduced chi2 = %f)", chi2, dofstring, chi2/dof); } else { intifpossible( dofstring, MAXLEN, ddat->dof, SMALLVAL, "%f"); printf(" chi2 = %e for %s dof (reduced chi2 = %f)", chi2, dofstring, chi2/dof); } if (baddiam) printf(" (BAD DIAMS)"); if (badphoto) printf(" (BAD PHOTO) (chi2_cuda)"); if (posbnd) printf(" (BAD POS)"); if (badposet) printf(" (BAD POSET)"); if (badradar) printf(" (BAD RADAR)"); if (baddopscale) printf(" (BAD DOPSCALE)"); printf("\n"); if (print_breakdown && write_chi2fit0) { intifpossible( dof0string, MAXLEN, dof_fit0_deldop, SMALLVAL, "%f"); printf(" (%e outside model for %s dof)\n", chi2_fit0_deldop, dof0string); } printf("#\n"); fflush(stdout); } /*.......................................................................*/ return chi2; } __host__ double chi2_pthreads( struct par_t *dpar0, struct par_t *dpar1, struct dat_t *ddat0, struct dat_t *ddat1, unsigned char *htype, unsigned char *dtype0, unsigned char *dtype1, int *hnframes, int *hlc_n, int *GPUID, int list_breakdown, int nsets, int max_frames, pthread_t thread1, pthread_t thread2, hipStream_t *gpu0_stream, hipStream_t *gpu1_stream) { /* This version of chi2 splits all work between two host threads (pthreads) * which each have their own assigned GPU. It is intended as dual-threaded * host application with dual-GPU usage. */ int print_breakdown; unsigned char write_chi2fit0, baddiam, badphoto, badposet, baddopscale, posbnd, badradar; dim3 BLK,THD; double chi2_all_doppler, chi2_all_deldop, chi2_all_poset, chi2_all_lghtcrv, chi2_fit0_doppler, chi2_fit0_deldop, chi2_fit0_poset, /*chi2_branch, */ dof_fit0_doppler, dof_fit0_deldop, dof_fit0_poset, chi2, dof; double dof_deldop, dof_doppler, dof_poset, dof_lghtcrv; char dofstring[MAXLEN], dof0string[MAXLEN]; /* Initialize variables that accumulate chi-square values */ chi2_all_deldop = chi2_all_doppler = chi2_all_poset = chi2_all_lghtcrv = chi2_fit0_deldop = chi2_fit0_doppler = chi2_fit0_poset = 0.0; dof_fit0_deldop = dof_fit0_doppler = dof_fit0_poset = 0.0; gpuErrchk(hipSetDevice(GPU0)); /* Create and populate the structs needed to pass information to the * pthreaded sub functions */ chi2_data data1, data2; data1.gpuid = GPU0; data2.gpuid = GPU1; data1.thread_no = 1; data2.thread_no = 2; data1.gpu_stream = gpu0_stream; data2.gpu_stream = gpu1_stream; data1.GPUID = data2.GPUID = GPUID; data1.data = ddat0; data2.data = ddat1; data1.parameter = dpar0; data2.parameter = dpar1; data1.dtype = dtype0; data2.dtype = dtype1; data1.htype = data2.htype = htype; data1.hlc_n = data2.hlc_n = hlc_n; data1.list_breakdown = data2.list_breakdown = list_breakdown; data1.max_frames = data2.max_frames = max_frames; data1.nframes = data2.nframes = hnframes; data1.nsets = data2.nsets = nsets; data1.chi2_all_deldop = data2.chi2_all_deldop = 0.0; data1.chi2_all_doppler = data2.chi2_all_doppler = 0.0; data1.chi2_all_poset = data2.chi2_all_poset = 0.0; data1.chi2_all_lghtcrv = data2.chi2_all_lghtcrv = 0.0; data1.chi2_fit0_deldop = data2.chi2_fit0_deldop = 0.0; data1.chi2_fit0_doppler = data2.chi2_fit0_doppler = 0.0; data1.chi2_fit0_poset = data2.chi2_fit0_poset = 0.0; data1.dof_fit0_deldop = data2.dof_fit0_deldop = 0.0; data1.dof_fit0_doppler = data2.dof_fit0_doppler = 0.0; data1.dof_fit0_poset = data2.dof_fit0_poset = 0.0; data1.chi2 = data2.chi2 = 0.0; /* Initialize variables that accumulate chi-square values */ hipLaunchKernelGGL(( c2s_init_krnl), dim3(1),dim3(1), 0, 0, ddat0, nsets); checkErrorAfterKernelLaunch("c2s_init_krnl for GPU0"); gpuErrchk(hipSetDevice(GPU1)); hipLaunchKernelGGL(( c2s_init_krnl), dim3(1),dim3(1), 0, 0, ddat1, nsets); checkErrorAfterKernelLaunch("c2s_init_krnl for GPU1"); gpuErrchk(hipSetDevice(GPU0)); /* From here, launch the pthreaded subfunction */ pthread_create(&thread1, NULL, chi2_pthread_sub,(void*)&data1); pthread_create(&thread2, NULL, chi2_pthread_sub,(void*)&data2); /* The calculation of all sets happens now */ pthread_join(thread1, NULL); pthread_join(thread2, NULL); /* Complete calculations of values that will be used during a fit to * increase the objective function for models with bad properties */ gpuErrchk(hipSetDevice(GPU0)); hipLaunchKernelGGL(( set_global_chi2_krnl), dim3(1),dim3(1), 0, 0, ddat0, data1.chi2, data2.chi2); checkErrorAfterKernelLaunch("set_global_chi2_krnl"); gpuErrchk(hipSetDevice(GPU1)); hipLaunchKernelGGL(( set_global_chi2_krnl), dim3(1),dim3(1), 0, 0, ddat1, data1.chi2, data2.chi2); checkErrorAfterKernelLaunch("set_global_chi2_krnl"); gpuErrchk(hipSetDevice(GPU0)); /* Recombine/update the all-data-type values */ chi2 = data1.chi2 + data2.chi2; chi2_all_deldop = data1.chi2_all_deldop + data2.chi2_all_deldop; chi2_all_doppler = data1.chi2_all_doppler + data2.chi2_all_doppler; chi2_all_poset = data1.chi2_all_poset + data2.chi2_all_poset; chi2_all_lghtcrv = data1.chi2_all_lghtcrv + data2.chi2_all_lghtcrv; chi2_fit0_deldop = data1.chi2_fit0_deldop + data2.chi2_fit0_deldop; chi2_fit0_doppler = data1.chi2_fit0_doppler + data2.chi2_fit0_doppler; chi2_fit0_poset = data1.chi2_fit0_poset + data2.chi2_fit0_poset; dof_fit0_deldop = data1.dof_fit0_deldop + data2.dof_fit0_deldop; dof_fit0_doppler = data1.dof_fit0_doppler + data2.dof_fit0_doppler; dof_fit0_poset = data1.dof_fit0_poset + data2.dof_fit0_poset; /* Call kernel to get flags from ddat */ hipLaunchKernelGGL(( c2s_get_prntflgs_krnl), dim3(1),dim3(1), 0, 0, dpar0, ddat0); checkErrorAfterKernelLaunch("c2s_get_prntflgs_krnl"); gpuErrchk(hipMemcpyFromSymbol(&print_breakdown, c2s_print_breakdown, sizeof(int), 0, hipMemcpyDeviceToHost)); gpuErrchk(hipMemcpyFromSymbol(&dof_deldop, c2s_dof_deldop, sizeof(double), 0, hipMemcpyDeviceToHost)); gpuErrchk(hipMemcpyFromSymbol(&dof_doppler, c2s_dof_doppler, sizeof(double), 0, hipMemcpyDeviceToHost)); // gpuErrchk(hipMemcpyFromSymbol(&dof_poset, c2s_dof_poset, sizeof(double), // 0, hipMemcpyDeviceToHost)); gpuErrchk(hipMemcpyFromSymbol(&dof_lghtcrv, c2s_dof_lghtcrv, sizeof(double), 0, hipMemcpyDeviceToHost)); gpuErrchk(hipMemcpyFromSymbol(&write_chi2fit0, c2s_write_chi2fit0, sizeof(unsigned char), 0, hipMemcpyDeviceToHost)); gpuErrchk(hipMemcpyFromSymbol(&dof, c2s_dof, sizeof(double), 0, hipMemcpyDeviceToHost)); gpuErrchk(hipMemcpyFromSymbol(&baddiam, c2s_baddiam, sizeof(unsigned char), 0, hipMemcpyDeviceToHost)); gpuErrchk(hipMemcpyFromSymbol(&badphoto, c2s_badphoto, sizeof(unsigned char), 0, hipMemcpyDeviceToHost)); gpuErrchk(hipMemcpyFromSymbol(&posbnd, c2s_posbnd, sizeof(unsigned char), 0, hipMemcpyDeviceToHost)); gpuErrchk(hipMemcpyFromSymbol(&badposet, c2s_badposet, sizeof(unsigned char), 0, hipMemcpyDeviceToHost)); gpuErrchk(hipMemcpyFromSymbol(&badradar, c2s_badradar, sizeof(unsigned char), 0, hipMemcpyDeviceToHost)); gpuErrchk(hipMemcpyFromSymbol(&baddopscale, c2s_baddopscale, sizeof(unsigned char), 0, hipMemcpyDeviceToHost)); if (list_breakdown) { if (print_breakdown) { printf("#\n"); if (dof_deldop > SMALLVAL) { intifpossible( dofstring, MAXLEN, dof_deldop, SMALLVAL, "%f"); printf("delay chi2 = %e for %s dof (reduced chi2 = %f)\n", chi2_all_deldop, dofstring, chi2_all_deldop/dof_deldop); if (write_chi2fit0) { intifpossible( dof0string, MAXLEN, dof_fit0_deldop, SMALLVAL, "%f"); printf(" (%e outside model for %s dof)\n", chi2_fit0_deldop, dof0string); } } if (dof_doppler > SMALLVAL) { intifpossible( dofstring, MAXLEN, dof_doppler, SMALLVAL, "%f"); printf("Doppler chi2 = %e for %s dof (reduced chi2 = %f)\n", chi2_all_doppler, dofstring, chi2_all_doppler/dof_doppler); if (write_chi2fit0) { intifpossible( dof0string, MAXLEN, dof_fit0_doppler, SMALLVAL, "%f"); printf(" (%e outside model for %s dof)\n", chi2_fit0_doppler, dof0string); } } // if (dof_poset > SMALLVAL) { // intifpossible( dofstring, MAXLEN, dof_poset, SMALLVAL, "%f"); // printf("POS chi2 = %e for %s dof (reduced chi2 = %f)\n", // chi2_all_poset, dofstring, chi2_all_poset/dof_poset); // if (write_chi2fit0) { // intifpossible( dof0string, MAXLEN, dof_fit0_poset, SMALLVAL, "%f"); // printf(" (%e outside model for %s dof)\n", // chi2_fit0_poset, dof0string); // } // } if (dof_lghtcrv > SMALLVAL) { intifpossible( dofstring, MAXLEN, dof_lghtcrv, SMALLVAL, "%f"); printf("lghtcrv chi2 = %e for %s dof (reduced chi2 = %f)\n", chi2_all_lghtcrv, dofstring, chi2_all_lghtcrv/dof_lghtcrv); } intifpossible( dofstring, MAXLEN, dof, SMALLVAL, "%f"); printf("ALLDATA chi2 = %e for %s dof (reduced chi2 = %f)", chi2, dofstring, chi2/dof); } // } else { // intifpossible( dofstring, MAXLEN, ddat->dof, SMALLVAL, "%f"); // printf(" chi2 = %e for %s dof (reduced chi2 = %f)", // chi2, dofstring, chi2/dof); // } if (baddiam) printf(" (BAD DIAMS)"); if (badphoto) printf(" (BAD PHOTO) (chi2_cuda)"); if (posbnd) printf(" (BAD POS)"); if (badposet) printf(" (BAD POSET)"); if (badradar) printf(" (BAD RADAR)"); if (baddopscale) printf(" (BAD DOPSCALE)"); printf("\n"); if (print_breakdown && write_chi2fit0) { intifpossible( dof0string, MAXLEN, dof_fit0_deldop + dof_fit0_doppler/* + dof_fit0_poset*/, SMALLVAL, "%f"); printf(" (%e outside model for %s dof)\n", chi2_fit0_deldop + chi2_fit0_doppler/* + chi2_fit0_poset*/, dof0string); } printf("#\n"); fflush(stdout); } /*.......................................................................*/ return chi2; } void *chi2_pthread_sub(void *ptr) { int s; double chi2; chi2_data *data; data = (chi2_data *) ptr; gpuErrchk(hipSetDevice(data->gpuid)); /* Loop through all datasets, carry out chi-square computations, and * provide screen and image output */ // for (s=0; s<data->nsets; s++) { // if (data->GPUID[s]==data->gpuid) { // switch (data->htype[s]) { // case DELAY: // chi2 = chi2_deldop_gpu32(data->parameter, data->data, s, // data->list_breakdown, &data->chi2_all_deldop, // &data->chi2_fit0_deldop, &data->dof_fit0_deldop, // data->nframes[s], data->gpu_stream); // hipLaunchKernelGGL(( c2_set_chi2_krnl), dim3(1),dim3(1), 0, 0, data->data, chi2, s); // checkErrorAfterKernelLaunch("c2_set_chi2_krnl"); //// printf("chi2 for set %i (Delay-Doppler) = %g\n", s, chi2); // break; // case DOPPLER: // chi2 = chi2_doppler_gpu32(data->parameter, data->data, s, // data->list_breakdown, &data->chi2_all_doppler, // &data->chi2_fit0_doppler, &data->dof_fit0_doppler, // data->nframes[s], data->gpu_stream); // hipLaunchKernelGGL(( c2_set_chi2_krnl), dim3(1),dim3(1), 0, 0, data->data, chi2, s); // checkErrorAfterKernelLaunch("c2_set_chi2_krnl"); //// printf("chi2 for set %i (Doppler) = %g\n", s, chi2); // break; // case POS: // printf("\nWrite chi2_poset_cuda!\n"); // // dat->set[s].chi2 = chi2_poset(dpar, s); // break; // case LGHTCRV: // chi2 = chi2_lghtcrv_gpu(data->parameter, data->data, s, // data->list_breakdown, &data->chi2_all_lghtcrv, // data->nframes[s], data->hlc_n[s]); // hipLaunchKernelGGL(( c2_set_chi2_krnl), dim3(1),dim3(1), 0, 0, data->data, chi2, s); // checkErrorAfterKernelLaunch("c2_set_chi2_krnl"); // //printf("chi2 for set %i (Lightcurve) = %g\n", s, chi2); // break; // default: // printf("chi2_pthread_sub: can't handle this type yet\n"); // } // data->chi2 += chi2; // } // } // pthread_exit(0); } __host__ double chi2_deldop_gpu( struct par_t *dpar, struct dat_t *ddat, int s, int list_breakdown, double *chi2_all_deldop, double *chi2_fit0_deldop, double *dof_fit0_deldop, int nframes, hipStream_t *c2s_stream) { int f, *ndel, *ndop, hndel[nframes], hndop[nframes], nThreads[nframes]; double chi2_set, *chi2_deldop_frame, h_chi2_deldop_frame[nframes]; dim3 BLK[nframes],THD, BLKfrm, THD64; THD.x = maxThreadsPerBlock; THD64.x = 64; BLKfrm.x = floor((THD64.x -1 + nframes)/THD64.x); double *o2_fit0_dof_fit0; /* o2, m2, and om are per-frame radar variables */ double *o2, *m2, *om, *weight, *returns, *hreturns; chi2_set = 0.0; gpuErrchk(hipMalloc((void**)&o2, sizeof(double)*nframes)); gpuErrchk(hipMalloc((void**)&m2, sizeof(double)*nframes)); gpuErrchk(hipMalloc((void**)&om, sizeof(double)*nframes)); gpuErrchk(hipMalloc((void**)&weight, sizeof(double)*nframes)); gpuErrchk(hipMalloc((void**)&chi2_deldop_frame,sizeof(double)*nframes)); gpuErrchk(hipMalloc((void**)&ndel, sizeof(int)*nframes)); gpuErrchk(hipMalloc((void**)&ndop, sizeof(int)*nframes)); gpuErrchk(hipMalloc((void**)&returns, sizeof(double)*2)); gpuErrchk(hipMalloc((void**)&o2_fit0_dof_fit0, sizeof(double)*2)); hreturns = (double *) malloc(2*sizeof(double)); hreturns[0] = hreturns[1] = 0.0; /* Get values for ndel and ndop, and the overflow parameters o2, m2, om */ hipLaunchKernelGGL(( c2s_deldop_init_krnl), dim3(BLKfrm),dim3(THD64), 0, 0, ddat, s, ndel, ndop, o2, m2, om, weight, nframes); checkErrorAfterKernelLaunch("c2s_deldop_init_krnl"); gpuErrchk(hipMemcpy(&hndel, ndel, sizeof(int)*nframes, hipMemcpyDeviceToHost)); gpuErrchk(hipMemcpy(&hndop, ndop, sizeof(int)*nframes, hipMemcpyDeviceToHost)); /* Calculate launch parameters */ for (f=0; f<nframes; f++) { nThreads[f] = hndel[f]*hndop[f]; BLK[f].x = floor((THD.x - 1 + nThreads[f]) / THD.x); } /* Add contributions from power within limits of data frame. This kernel * also takes care of the frame's calibration factor and computes chi2 * for this frame */ sum_o2m2om_gpu(ddat, o2, m2, om, nframes, hndel[0]*hndop[0], s, c2s_stream); hipLaunchKernelGGL(( c2s_add_deldop_contributions_krnl), dim3(BLKfrm),dim3(THD64), 0, 0, dpar, ddat, o2, m2, om, weight, ndel, ndop, chi2_deldop_frame, s, nframes); checkErrorAfterKernelLaunch("c2s_add_deldop_contributions_krnl"); gpuErrchk(hipMemcpy(&h_chi2_deldop_frame, chi2_deldop_frame, sizeof(double) * nframes, hipMemcpyDeviceToHost)); /* Add all frames from device memory to host memoryd */ for (f=0; f<nframes; f++) { chi2_set += h_chi2_deldop_frame[f]; // printf("Set %i deldop, frame %i, %3.8g\n", s, f, h_chi2_deldop_frame[f]); if (list_breakdown) *chi2_all_deldop += h_chi2_deldop_frame[f]; } if (list_breakdown) { for (f=0; f<nframes; f++) { /* WARNING WARNING WARNING */ /* This kernel is for accuracy testing only. Must develop a proper * parallel reduction for this instead */ hipLaunchKernelGGL(( deldop_wrt_chi2fit0_krnl), dim3(BLK[f]),dim3(THD), 0, c2s_stream[f], dpar, ddat, s, f, ndel, ndop, nThreads[f], returns, o2_fit0_dof_fit0); checkErrorAfterKernelLaunch("deldop_wrt_chi2fit0_krnl64"); } gpuErrchk(hipMemcpy(hreturns, returns, sizeof(double) * 2, hipMemcpyDeviceToHost)); *chi2_fit0_deldop = hreturns[0]; *dof_fit0_deldop = hreturns[1]; } hipFree(o2); hipFree(m2); hipFree(om); hipFree(ndel); hipFree(ndop); hipFree(weight); hipFree(returns); hipFree(chi2_deldop_frame); hipFree(o2_fit0_dof_fit0); free(hreturns); return chi2_set; } __host__ double chi2_doppler_gpu(struct par_t *dpar, struct dat_t *ddat, int s, int list_breakdown, double *chi2_all_doppler, double *chi2_fit0_doppler, double *dof_fit0_doppler, int nframes, hipStream_t *c2s_stream) { int f, *ndop, hndop[nframes]; double chi2_set, *chi2_doppler_frame, h_chi2_doppler_frame[nframes]; dim3 BLK[nframes], THD, BLKfrm, THD64; THD.x = maxThreadsPerBlock; THD64.x = 64; double *o2, *m2, *om, *weight, hreturns[2], *returns, *o2_fit0_dof_fit0; chi2_set = 0.0; BLKfrm.x = floor((THD64.x -1 + nframes)/THD64.x); gpuErrchk(hipMalloc((void**)&o2, sizeof(double)*nframes)); gpuErrchk(hipMalloc((void**)&m2, sizeof(double)*nframes)); gpuErrchk(hipMalloc((void**)&om, sizeof(double)*nframes)); gpuErrchk(hipMalloc((void**)&ndop, sizeof(int)*nframes)); gpuErrchk(hipMalloc((void**)&weight, sizeof(double)*nframes)); gpuErrchk(hipMalloc((void**)&chi2_doppler_frame,sizeof(double)*nframes)); gpuErrchk(hipMalloc((void**)&returns, sizeof(double)*2)); gpuErrchk(hipMalloc((void**)&o2_fit0_dof_fit0, sizeof(double)*2)); hreturns[0] = hreturns[1] = 0.0; /* Get values for ndel and ndop, and the overflow parameters o2, m2, om */ hipLaunchKernelGGL(( c2s_doppler_init_krnl), dim3(BLKfrm),dim3(THD64), 0, 0, ddat, s, ndop, o2, m2, om, weight, nframes); checkErrorAfterKernelLaunch("c2s_doppler_init_krnl64"); gpuErrchk(hipMemcpy(&hndop, ndop, sizeof(int)*nframes, hipMemcpyDeviceToHost)); /* Calculate launch parameters */ for (f=0; f<nframes; f++) BLK[f].x = floor((THD.x - 1 + hndop[f]) / THD.x); /* Loop through all frames for this dataset */ for (f=0; f<nframes; f++) { /* Add contributions from power within data frame limits. This kernel * also considers frame's calibration factor & computes frame chi2 */ hipLaunchKernelGGL(( c2s_doppler_add_o2_krnl), dim3(BLK[f]),dim3(THD),0,c2s_stream[f], ddat, o2, m2, om, ndop, s, f); } checkErrorAfterKernelLaunch("c2_doppler_add_o2_krnl64"); // c2s_add_dop_contrbts_krnl<<<BLKfrm,THD64>>>(dpar, ddat, o2, m2, om, weight, // ndop, chi2_doppler_frame, s, nframes); for (f=0; f<nframes; f++) { hipLaunchKernelGGL(( c2s_add_dop_contrbts_srl_krnl), dim3(1),dim3(1), 0, 0, dpar, ddat, o2, m2, om, weight, ndop, chi2_doppler_frame, s, f); } checkErrorAfterKernelLaunch("c2_add_dop_contrbts_krnl64"); gpuErrchk(hipMemcpy(&h_chi2_doppler_frame, chi2_doppler_frame, sizeof(double) * nframes, hipMemcpyDeviceToHost)); /* Add all frames from device memory to host memory*/ for (f=0; f<nframes; f++) { chi2_set += h_chi2_doppler_frame[f]; // printf("h_chi2_doppler_frame[%i]=%3.6g\n", f, h_chi2_doppler_frame[f]); // printf("Set %i doppler, frame %i, %3.8g\n", s, f, h_chi2_doppler_frame[f]); if (list_breakdown) *chi2_all_doppler += h_chi2_doppler_frame[f]; } /* Compute the chi-square contributions and number of degrees of freedom * due to bins whose model signal is less than or equal to 'chi2fit0_thresh' * standard deviations of the noise in the data frame */ if (list_breakdown) { for (f=0; f<nframes; f++) { hipLaunchKernelGGL(( dop_wrt_chi2fit0_krnl), dim3(BLK[f]),dim3(THD), 0, c2s_stream[f], dpar, ddat, s, f, hndop[f], returns, o2_fit0_dof_fit0); checkErrorAfterKernelLaunch("dop_wrt_chi2fit0_krnl64"); } gpuErrchk(hipMemcpy(&hreturns, returns, sizeof(double) * 2, hipMemcpyDeviceToHost)); *chi2_fit0_doppler = hreturns[0]; *dof_fit0_doppler = hreturns[1]; } hipFree(o2); hipFree(m2); hipFree(om); hipFree(ndop); hipFree(weight); hipFree(returns); hipFree(chi2_doppler_frame); hipFree(o2_fit0_dof_fit0); return chi2_set; } __host__ double chi2_lghtcrv_gpu( struct par_t *dpar, struct dat_t *ddat, int s, int list_breakdown, double *chi2_all_lghtcrv, int nframes, int lc_n) { double *dof_chi2set, h_dof_chi2set[2]; double3 *o2m2om, *h_o2m2om; dim3 BLK,THD; THD.x = maxThreadsPerBlock; gpuErrchk(hipMalloc((void**)&dof_chi2set, sizeof(double) * 2)); gpuErrchk(hipMalloc((void**)&o2m2om, sizeof(double3) * 2)); // h_o2m2om = (double3 *) malloc(2*sizeof(double3)); BLK = floor((THD.x - 1 + lc_n)/THD.x); hipLaunchKernelGGL(( c2s_lghtcrv_serial_krnl), dim3(1),dim3(1), 0, 0, ddat, s, dof_chi2set, o2m2om); gpuErrchk(hipMemcpy(&h_dof_chi2set, dof_chi2set, sizeof(double)*2, hipMemcpyDeviceToHost)); // gpuErrchk(hipMemcpy(h_o2m2om, o2m2om, sizeof(double3)*2, hipMemcpyDeviceToHost)); // printf("o2=%3.6g\n", h_o2m2om[0].x); // printf("m2=%3.6g\n", h_o2m2om[0].y); // printf("om=%3.6g\n", h_o2m2om[0].z); if (list_breakdown) *chi2_all_lghtcrv += h_dof_chi2set[1]; hipFree(dof_chi2set); hipFree(o2m2om); // free(h_o2m2om); return h_dof_chi2set[1]; }
76ff0ecf52f31b37a77c2c0212e707bc6e6693c5.cu
/***************************************************************************************** chi2.c Compute chi-square for a model that was previously created. Modified 2016 December 8 by ME: Converted to "FIT" action-only, CUDA code Modified 2015 June 10 by CM: Implement smearing for the "fit" and "write" actions Modified 2013 April 24 by CM: Adjust names of output files so they are in alphanumeric order if > 100 per dataset Modified 2012 December 6 by CM: Fix bug introduced on December 6: take "calval" into account when determining whether or not model signal is greater than 'chi2fit0_thresh' sigmas Modified 2012 December 5 by CM: When the "write_chi2fit0" parameter is turned on, display the number of degrees of freedom with zero model signal in addition to the chi2 contribution, and list chi2 and dof for all data combined Implement the 'chi2fit0_thresh' parameter Modified 2012 March 24 by CM: When the root node calls chi2 with list_breakdown = 1 (e.g., for the "write" action), print a warning if the value of any (delay-)Doppler dataset's Doppler scaling factor is out of the allowed range Modified 2010 August 25 by CM: Move TINYCALFACT definition to const.h Modified 2010 August 10 by CM: Implement the "radfitmin" and "radobsmin" parameters: these are the pixel values that map to black for all fit and obs pgm images that are output for delay-Doppler frames Modified 2010 March 20 by CM: For the "write" action for lightcurve datasets, include magnitude uncertainties as a new column in output files fit_MM.dat Modified 2009 August 9 by CM: For the "write" action with the "listfit" parameter turned on, replace a ".rdf" or ".fits" or ".fit" suffix with ".fitdat" Modified 2009 April 3 by CM: When the root node calls chi2 with list_breakdown = 1 (e.g., for the "write" action), print a warning if any plane-of-sky fit image is too small to "contain" all nonzero pixels in the POS sky rendering or if the model is too wide in delay-Doppler space for any (delay-)Doppler fit frame to be correctly constructed For MPI_Recv calls, mpi_par[0] is no longer equal to the MPI action, since the message tag argument already serves that purpose (as of 2008 April 10) -- so the other mpi_par elements are renumbered Modified 2008 June 29 by CM: For the "write" and "orbit" actions, zero out fit pixels/bins in delay-Doppler, Doppler, and plane-of-sky frames for which those pixels/bins have been zeroed out in the pixel mask Modified 2008 April 10 by CM: Use message tag argument to MPI_Recv to identify the MPI action Modified 2007 September 13 by CM: Implement "write_chi2fit0" parameter: for the "write" and "orbit" actions, output chi2 for delay-Doppler, Doppler, and plane-of-sky pixels/bins with zero model power; do this both for each individual frame, for all delay-Doppler data taken together, for all Doppler data taken together, and for all plane-of-sky data taken together Modified 2007 August 18 by CM: Rename MPI_TAG to MPI_TAG_1 to avoid name conflict with mpich headers Modified 2007 August 10 by CM: Create chi2_deldop, chi2_doppler, chi2_poset, and chi2_lghtcrv routines so that the code isn't one giant switch statement When a calfact value is negative, reset it to a tiny positive value rather than to zero in order to protect against division by zero Implement the "radfitmax" and "radobsmax" parameters for applying the same brightness scale to all fit and obs pgm images output for delay-Doppler frames Modified 2007 August 4 by CM: Carry out "write" action steps for the "orbit" action as well Modified 2007 January 6 by CM: For the "write" action for lightcurve datasets, output rotation phases to files fit_MM.dat and calc_MM.dat. Modified 2006 October 1 by CM: For lightcurve datasets, chi-square is now computed in intensity space rather than in magnitude space. (File output is still in magnitudes.) Modified 2006 June 21 by CM: In delay-Doppler section, changed delres to del_per_pixel and dopres to dop_per_pixel For POS renderings and plane-of-sky fit frames, changed res to km_per_pixel When the root node calls chi2 with list_breakdown = 1, print a warning if the model extends beyond the boundaries of the POS frames, if any photometric parameter has an illegal value, or if any ellipsoid diameter is tiny or negative. (This change will cause such warnings to be displayed for the "write" action.) Modified 2006 June 18 by CM: Allow each delay-Doppler frame within a dataset to have different dimensions after vignetting Allow each Doppler frame within a dataset to have different dimensions after vignetting Allow plane-of-sky frames to be rectangular rather than square For the "write" action, adjust output for delay-Doppler, Doppler, and plane-of-sky frames to allow for masked-out pixels Modified 2005 June 27 by CM: Renamed "round" function to "iround" to avoid conflict Modified 2005 April 25 by CM: For the "write" action, compute the one-sigma percentage uncertainty on chi2 correctly; the expression used until now, 100*sqrt(2/dof), is only valid when all data are weighted equally. Modified 2005 March 17 by CM: For the "fit" action with parallel processing, check that root receives the responses to the correct broadcast Compute degrees of freedom in routine read_dat rather than here, so that it only needs to be done once per fit rather than repeatedly (dof for individual data types is still computed here) Allow weights and degrees of freedom to be floating-point rather than integer; but if they are integer after all, print integer-rounded values to the screen rather than floating-point For the "write" action with "speckle" turned on, write Doppler file fit_MM_NN.dat with spectral values normalized to the input sdev value, not to the sdev value increased for self-noise. Modified 2005 March 6 by CM: For the "write" action, write calibration factors for plane-of-sky datasets to disk if the "list_posetcal" parameter is turned on Modified 2005 March 2 by CM: Rename some "sdev" and "var" values to be "oneovervar" (1/variance) For the "write" action, adjust arguments of revised "resampim" routine For the "write" action, rotate plane-of-sky fit/obs/res frames so that north is upward, unless poset_scaling = NONE Modified 2005 February 22 by CM: For the "write" action, fix bug (incorrect array dimensions) in scaling fit vs. obs pgm image brightness for plane-of-sky datasets For the "write" action, add the new "image_rebin" argument to function resampim to handle output images which have much coarser resolution than the model POS frames from which they are constructed (i.e., which are greatly undersampled). This situation probably won't arise often: The real importance of "image_rebin" is for dealing with plane-of-sky fit frames in routine calc_fits. Modified 2005 January 25 by CM: Take care of uninitialized variables Modified 2005 January 20 by CM: For the "write" action, implement the bilinear and bicubic interpolation options for the "dd_scaling" parameter For the "write" action, write model, data, and residual pgm images for POS datasets Add pgm file output (obs/fit/res) for POS datasets Correct the expression for chi-square for POS datasets (the calibration factor was being ignored) If the calibration factor for a POS frame is a floating parameter, don't allow it to be negative: instead set it to zero. Also display a warning, unless this is a fit and we're not at the end of an iteration. Modified 2005 January 12 by CM: For the "fit" action with parallel processing, revise the code so that "list_breakdown" will work: For each dataset which is handled by a branch node rather than by root, root broadcasts a request for that branch node to run chi2 for just that one dataset and to send the results to root. This is necessary because otherwise each branch node calls chi2 just once to process *all* of its datasets in one shot, without keeping results for different data types (Doppler, delay-Doppler, etc.) separate. Modified 2004 December 1 by CM: For the "write" action, adjust the "listfit" option so that the "listfit_path" directory can be created on the fly if necessary Modified 2004 November 30 by CM: For the "write" action, implement the "listfit" option to write out the model "data" files for all delay-Doppler frames Modified 2004 July 26 by CM: If the calibration factor for a Doppler or delay-Doppler frame is a floating parameter, don't allow it to be negative: instead set it to zero. Also display a warning, unless this is a fit and we're not at the end of an iteration. Modified 2004 June 27 by CM: Fixed bug: For 'dd_scaling' = 'block' the number of pixels per side for the resampled delay-Doppler obs/fit/residual images wasn't being defined Modified 2004 April 3 by CM: Add the "list_breakdown" argument so that we can list chi2 by data type (Doppler, delay-Doppler, POS, lightcurves) for both the "fit" and the "write" action as desired Modified 2004 March 20 by CM: For the "write" action, move final summary screen display from the main program (shape.c) to here For the "write" action, display summaries for each form of data (Doppler, delay-Doppler, POS, lightcurves) taken separately, in addition to the overall summary Modified 2004 February 29 by CM: For lightcurve output, replace JD244 variable with jdoffset parameter For the "write" action, move all screen and file output which relies on updated calibration factors to this routine Modified 2003 May 10 by CM: Account for contributions to chi-square from model (delay-)Doppler power which lies outside the data frame Modified 2003 April 24 by CM: Display chi-square for Doppler datasets when action = "write" Implement the new "weight" parameter *****************************************************************************************/ extern "C" { #include "../shape/head.h" } /* File-scope global CUDA variables */ __device__ int c2s_print_breakdown/*, dof*/; __device__ unsigned char c2s_write_chi2fit0, c2s_badradar, c2s_badphoto, c2s_baddopscale, c2s_badposet, c2s_posbnd, c2s_baddiam; __device__ double c2s_dof_deldop, c2s_dof_doppler, c2s_dof_poset, c2s_dof_lghtcrv, c2s_dof, c2s_chi2, c2s_chi2_set, c2s_chi2_all_doppler; __device__ double c2s_chi2_fit0_deldop64, c2s_dof_fit0_deldop64, c2s_chi2_all_deldop64, c2s_chi2_fit0_doppler64, c2s_dof_fit0_doppler64; typedef struct chi2_thread_t { int thread_no; struct par_t *parameter; struct dat_t *data; unsigned char *htype; unsigned char *dtype; int *nframes; int *hlc_n; int *GPUID; int gpuid; int nsets; int list_breakdown; int max_frames; double chi2_all_deldop; double chi2_all_doppler; double chi2_all_poset; double chi2_all_lghtcrv; double chi2_fit0_deldop; double chi2_fit0_doppler; double chi2_fit0_poset; double dof_fit0_deldop; double dof_fit0_doppler; double dof_fit0_poset; double chi2; cudaStream_t *gpu_stream; } chi2_data; /* File-scope CUDA structures */ /* Function prototype declarations */ __host__ double chi2_deldop_gpu(struct par_t *dpar, struct dat_t *ddat, int s, int list_breakdown, double *chi2_all_deldop, double *chi2_fit0_deldop, double *dof_fit0_deldop, int nframes, cudaStream_t *c2s_stream); __host__ double chi2_doppler_gpu(struct par_t *dpar, struct dat_t *ddat, int s, int list_breakdown, double *chi2_all_doppler, double *chi2_fit0_doppler, double *dof_fit0_doppler, int nframes, cudaStream_t *c2s_stream); __host__ double chi2_lghtcrv_gpu(struct par_t *dpar, struct dat_t *ddat, int s, int list_breakdown, double *chi2_all_lghtcrv, int nframes, int lc_n); void *chi2_pthread_sub(void *ptr); __global__ void c2s_init_krnl(struct dat_t *ddat, int nsets) { /* Single-threaded kernel */ if (threadIdx.x == 0) { ddat->chi2 = 0.0; c2s_chi2_set = 0.0; c2s_chi2_all_doppler = 0.0; } } __global__ void c2s_retrieve_chi2_krnl(struct dat_t *ddat) { /* Single-threaded kernel */ if (threadIdx.x == 0) c2s_chi2 = ddat->chi2; } __global__ void c2s_deldop_init_krnl(struct dat_t *ddat, int s, int *ndel, int *ndop, double *o2, double *m2, double *om, double *weight, int nframes) { /* nframes-threaded kernelDELDOP only */ int f = blockIdx.x * blockDim.x + threadIdx.x; if (f < nframes) { ndel[f] = ddat->set[s].desc.deldop.frame[f].ndel; ndop[f] = ddat->set[s].desc.deldop.frame[f].ndop; o2[f] = ddat->set[s].desc.deldop.frame[f].overflow_o2; m2[f] = ddat->set[s].desc.deldop.frame[f].overflow_m2; om[f] = 0.0; weight[f] = ddat->set[s].desc.deldop.frame[f].weight; } } __global__ void c2s_doppler_init_krnl(struct dat_t *ddat, int s, int *ndop, double *o2, double *m2, double *om, double *weight, int nframes) { /* nframes-threaded kernel */ int f = blockIdx.x * blockDim.x + threadIdx.x; if (f < nframes) { ndop[f] = ddat->set[s].desc.doppler.frame[f].ndop; o2[f] = ddat->set[s].desc.doppler.frame[f].overflow_o2; m2[f] = ddat->set[s].desc.doppler.frame[f].overflow_m2; om[f] = 0.0; weight[f] = ddat->set[s].desc.doppler.frame[f].weight; } } /* WARNING WARNING WARNING */ /* This kernel is for accuracy testing only. Must develop a proper * parallel reduction for this instead */ __global__ void c2s_deldop_add_o2_krnl( struct par_t *dpar, struct dat_t *ddat, double *o2, double *m2, double *om, int *ndel, int *ndop, int nThreads, int s, int f) { /* ndel*ndop-threaded kernel */ int offset = blockIdx.x * blockDim.x + threadIdx.x; int i = offset % ndel[f] + 1; int j = offset / ndel[f] + 1; double temp; if (offset < nThreads) { /* The following two lines implement this: * o2 += obs[i][j]*obs[i][j]*oneovervar[i][j]; */ temp = ddat->set[s].desc.deldop.frame[f].obs[i][j] * ddat->set[s].desc.deldop.frame[f].obs[i][j] * ddat->set[s].desc.deldop.frame[f].oneovervar[i][j]; atomicAdd(&o2[f], temp); /* The following two lines implement this: * m2 += fit[i][j]*fit[i][j]*oneovervar[i][j]; */ temp = ddat->set[s].desc.deldop.frame[f].fit[i][j] * ddat->set[s].desc.deldop.frame[f].fit[i][j] * ddat->set[s].desc.deldop.frame[f].oneovervar[i][j]; atomicAdd(&m2[f], temp); /* The following two lines implement this: * om += fit[i][j]*obs[i][j]*oneovervar[i][j]; */ temp = ddat->set[s].desc.deldop.frame[f].fit[i][j] * ddat->set[s].desc.deldop.frame[f].obs[i][j] * ddat->set[s].desc.deldop.frame[f].oneovervar[i][j]; atomicAdd(&om[f], temp); } } __global__ void c2s_add_deldop_contributions_krnl( struct par_t *dpar, struct dat_t *ddat, double *o2, double *m2, double *om, double *weight, int *ndel, int *ndop, double *chi2_deldop_frame, int s, int nframes) { /* nframes-threaded kernel */ int f = blockIdx.x * blockDim.x + threadIdx.x; if (f < nframes) { double calval, err, o2_fit0, dof_fit0, err_fit0, thresh_fit0; chi2_deldop_frame[f] = 0.0; int off, i, j; // o2[f] += ddat->set[s].desc.deldop.frame[f].overflow_o2; // m2[f] += ddat->set[s].desc.deldop.frame[f].overflow_m2; /* If this frame's calibration factor is allowed to float, set it to * minimize chi-square, the sum over all pixels of * { (obs - calfact*fit)^2 / variance } */ if (ddat->set[s].desc.deldop.frame[f].cal.state == 'f') { if (om[f] > 0.0) ddat->set[s].desc.deldop.frame[f].cal.val = om[f]/m2[f]; else ddat->set[s].desc.deldop.frame[f].cal.val = TINYCALFACT; } /* Compute chi-square for this frame */ calval = ddat->set[s].desc.deldop.frame[f].cal.val; err = weight[f] * (o2[f] - (2 * calval * om[f]) + (calval * calval * m2[f])); ddat->set[s].desc.deldop.frame[f].chi2 = err; chi2_deldop_frame[f] += err; //atomicAdd(&c2s_chi2_all_deldop, chi2_deldop_frame[f]); /* Compute chi-square contributions and deg. of freedom due to pixels * whose model signal is less than or equal to 'chi2fit0_thresh' * standard deviations of the noise in the data frame */ o2_fit0 = dof_fit0 = err_fit0 = 0.0; thresh_fit0 = dpar->chi2fit0_thresh * ddat->set[s].desc.deldop.frame[f].sdev; // if (dpar->write_chi2fit0) { // for (i=0; i<ndel[f]; i++) // for (j=0; j<ndop[f]; j++) // off = j*ndel[f] + i; // For the unrolled fit, obs pointers // if (calval*ddat->set[s].desc.deldop.frame[f].fit[i][j] <= thresh_fit0) { // o2_fit0 += ddat->set[s].desc.deldop.frame[f].obs[i][j]* // ddat->set[s].desc.deldop.frame[f].obs[i][j]* // ddat->set[s].desc.deldop.frame[f].oneovervar[i][j]; // if (ddat->set[s].desc.deldop.frame[f].oneovervar[i][j] > 0.0) // dof_fit0 += weight[f]; // } // err_fit0 = weight[f] * o2_fit0; // atomicAdd(&c2s_chi2_fit0_deldop64, err_fit0); // atomicAdd(&c2s_dof_fit0_deldop64, dof_fit0); // } } } __global__ void c2s_add_dop_contrbts_srl_krnl( struct par_t *dpar, struct dat_t *ddat, double *o2, double *m2, double *om, double *weight, int *ndop, double *chi2_doppler_frame, int s, int f) { /* Single-threaded kernel */ if (threadIdx.x == 0) { int j; double calval, err, o2_fit0, dof_fit0, err_fit0, thresh_fit0; chi2_doppler_frame[f] = 0.0; /* If this frame's calibration factor is allowed to float, set it to * minimize chi-square, the sum over all bins of { (obs - calfact*fit)^2 / variance }. */ if (ddat->set[s].desc.doppler.frame[f].cal.state == 'f') { if (om[f] > 0.0) ddat->set[s].desc.doppler.frame[f].cal.val = om[f]/m2[f]; else { ddat->set[s].desc.doppler.frame[f].cal.val = TINYCALFACT; if (dpar->action != FIT ) printf("WARNING: set %2d frame %2d had negative calfact reset to %10.4e\n", s, f, ddat->set[s].desc.doppler.frame[f].cal.val); } } /* Compute chi-square for this frame */ calval = ddat->set[s].desc.doppler.frame[f].cal.val; err = weight[f] * (o2[f] - 2*calval*om[f] + calval*calval*m2[f]); ddat->set[s].desc.doppler.frame[f].chi2 = err; chi2_doppler_frame[f] += err; //if (list_breakdown) //c2s_chi2_all_doppler += err; /* Compute chi-square contributions and dof due to bins whose model * signal is =< 'chi2fit0_thresh' standard deviations of the noise in * the data frame */ o2_fit0 = dof_fit0 = err_fit0 = 0.0; thresh_fit0 = dpar->chi2fit0_thresh * ddat->set[s].desc.doppler.frame[f].sdev; if (dpar->write_chi2fit0) { for (j=0; j<ndop[f]; j++) if (calval*ddat->set[s].desc.doppler.frame[f].fit[j] <= thresh_fit0) { o2_fit0 += ddat->set[s].desc.doppler.frame[f].obs[j] * ddat->set[s].desc.doppler.frame[f].obs[j] *ddat->set[s].desc.doppler.frame[f].oneovervar[j]; if (ddat->set[s].desc.doppler.frame[f].oneovervar[j] > 0.0) dof_fit0 += weight[f]; } err_fit0 = weight[f]*o2_fit0; atomicAdd(&c2s_chi2_fit0_doppler64, err_fit0); atomicAdd(&c2s_dof_fit0_doppler64, dof_fit0); } } } __global__ void c2s_doppler_add_o2_krnl(struct dat_t *ddat, double *o2, double *m2, double *om, int *ndop, int s, int f) { /* ndop-threaded kernel */ int j = blockIdx.x * blockDim.x + threadIdx.x+1; double temp; if (j <= ndop[f]) { /* Add contributions from power within frame limits */ /* Next 2 lines implement: o2 += obs[j]*obs[j]*oneovervar[j]; */ temp = ddat->set[s].desc.doppler.frame[f].obs[j] * ddat->set[s].desc.doppler.frame[f].obs[j] * ddat->set[s].desc.doppler.frame[f].oneovervar[j]; atomicAdd(&o2[f], temp); /* Next 2 lines implement: m2 += fit[j]*fit[j]*oneovervar[j]; */ temp = ddat->set[s].desc.doppler.frame[f].fit[j] * ddat->set[s].desc.doppler.frame[f].fit[j] * ddat->set[s].desc.doppler.frame[f].oneovervar[j]; atomicAdd(&m2[f], temp); /* Next 2 lines implement: om += fit[j]*obs[j]*oneovervar[j]; */ temp = ddat->set[s].desc.doppler.frame[f].fit[j] * ddat->set[s].desc.doppler.frame[f].obs[j] * ddat->set[s].desc.doppler.frame[f].oneovervar[j]; atomicAdd(&om[f], temp); } } __global__ void c2s_lghtcrv_serial_krnl(struct dat_t *ddat, int s, double *dof_chi2set, double3 *o2m2om) { /* single-threaded kernel */ o2m2om[0].x = o2m2om[0].y = o2m2om[0].z = 0.0; double obs, fit, oneovervar, calval; if (threadIdx.x == 0) { for (int i=1; i<=ddat->set[s].desc.lghtcrv.n; i++) { obs = ddat->set[s].desc.lghtcrv.obs[i]; fit = ddat->set[s].desc.lghtcrv.fit[i]; oneovervar = ddat->set[s].desc.lghtcrv.oneovervar[i]; // printf("%i, %3.8g, %3.8g, %3.8g\n", i, fit, fit, oneovervar); // printf("fit[%i], %3.8g\n", i, fit); o2m2om[0].x += obs * obs * oneovervar; o2m2om[0].y += fit * fit * oneovervar; o2m2om[0].z += fit * obs * oneovervar; o2m2om[1].x = o2m2om[1].y = o2m2om[1].z = 0.0; } /* If lightcurve's calibration factor is allowed to float, set it to mini- * mize chi-square (sum over all points of {(obs-calfact*fit)^2/variance}*/ if (ddat->set[s].desc.lghtcrv.cal.state == 'f') { if (o2m2om[0].z > 0.0) ddat->set[s].desc.lghtcrv.cal.val = o2m2om[0].z/o2m2om[0].y; else ddat->set[s].desc.lghtcrv.cal.val = TINYCALFACT; } calval = ddat->set[s].desc.lghtcrv.cal.val; /* Compute chi-square for dataset */ dof_chi2set[0] = ddat->set[s].desc.lghtcrv.dof; dof_chi2set[1] = ddat->set[s].desc.lghtcrv.weight * (o2m2om[0].x - 2 * calval * o2m2om[0].z + calval * calval * o2m2om[0].y); // printf("dof=%3.8g\n", dof_chi2set[0]); // printf("weight=%3.8g\n", ddat->set[s].desc.lghtcrv.weight); // printf("calval=%3.8g\n", calval); // printf("weight * (o2 - 2 * calval * om + calval * calval * m2) = %3.8g\n", dof_chi2set[1]); } } __global__ void c2s_get_prntflgs_krnl(struct par_t *dpar, struct dat_t *ddat) { /* Single-threaded kernel */ if (threadIdx.x == 0) { c2s_print_breakdown = (ddat->dof_deldop > SMALLVAL || ddat->dof_doppler > SMALLVAL || ddat->dof_poset > SMALLVAL || ddat->dof_lghtcrv > SMALLVAL); c2s_dof_deldop = ddat->dof_deldop; c2s_dof_doppler = ddat->dof_doppler; c2s_dof_poset = ddat->dof_poset; c2s_dof_lghtcrv = ddat->dof_lghtcrv; c2s_dof = ddat->dof; c2s_write_chi2fit0 = dpar->write_chi2fit0; c2s_badradar = dpar->badradar; c2s_badphoto = dpar->badphoto; c2s_baddopscale = dpar->baddopscale; c2s_badposet = dpar->badposet; c2s_posbnd = dpar->posbnd; c2s_baddiam = dpar->baddiam; } } __global__ void c2_add_chi2_krnl(struct dat_t *ddat, int s) { /* Single-threaded kernel */ if (threadIdx.x == 0) ddat->chi2 += ddat->set[s].chi2; } __global__ void set_global_chi2_krnl(struct dat_t *ddat, double chi2a, double chi2b) { /* Single-threaded kernel */ if (threadIdx.x == 0) ddat->chi2 = chi2a + chi2b; } __global__ void c2_set_chi2_krnl(struct dat_t *ddat, double chi2, int s) { /* Single-threaded kernel */ if (threadIdx.x == 0) ddat->set[s].chi2 = chi2; } __global__ void deldop_wrt_chi2fit0_krnl(struct par_t *dpar, struct dat_t *ddat, int s, int f, int *ndel, int *ndop, int nThreads, double *returns, double *o2_fit0_dof_fit0) { /* ndel*ndop-threaded kernel */ int offset = blockIdx.x * blockDim.x + threadIdx.x; int i = offset % ndel[f] + 1; int j = offset / ndel[f] + 1; double temp, err_fit0 = 0.0, thresh_fit0; /* returns[0] = chi2_fit0_deldop * returns[1] = dof_fit0_deldop */ if (dpar->write_chi2fit0) { if (offset < nThreads) { thresh_fit0 = dpar->chi2fit0_thresh * ddat->set[s].desc.deldop.frame[f].sdev; if (ddat->set[s].desc.deldop.frame[f].cal.val * ddat->set[s].desc.deldop.frame[f].fit[i][j] <= thresh_fit0) { temp = ddat->set[s].desc.deldop.frame[f].obs[i][j] * ddat->set[s].desc.deldop.frame[f].obs[i][j] * ddat->set[s].desc.deldop.frame[f].oneovervar[i][j]; atomicAdd(&o2_fit0_dof_fit0[0], temp); if (ddat->set[s].desc.deldop.frame[f].oneovervar[i][j] > 0.0) { temp = ddat->set[s].desc.deldop.frame[f].weight; atomicAdd(&o2_fit0_dof_fit0[1], temp); } } } __syncthreads(); if (offset == 0) { temp = ddat->set[s].desc.deldop.frame[f].weight; err_fit0 = temp*o2_fit0_dof_fit0[0]; returns[0] = err_fit0; returns[1] = o2_fit0_dof_fit0[1]; } } else if (offset==0) returns[0] = returns[1] = 0.0; } __global__ void dop_wrt_chi2fit0_krnl(struct par_t *dpar, struct dat_t *ddat, int s, int f, int nThreads, double *returns, double *o2_fit0_dof_fit0) { /* ndop-threaded kernel */ int offset = blockIdx.x * blockDim.x + threadIdx.x; double temp; double err_fit0 = 0.0, thresh_fit0; returns[0] = returns[1] = 0.0; /* returns[0] = chi2_fit0_deldop * returns[1] = dof_fit0_deldop */ if (dpar->write_chi2fit0) { if (offset < nThreads) { thresh_fit0 = dpar->chi2fit0_thresh * ddat->set[s].desc.doppler.frame[f].sdev; if (ddat->set[s].desc.doppler.frame[f].cal.val * ddat->set[s].desc.doppler.frame[f].fit_s[offset] <= thresh_fit0) { temp = ddat->set[s].desc.doppler.frame[f].obs[offset] * ddat->set[s].desc.doppler.frame[f].obs[offset] * ddat->set[s].desc.doppler.frame[f].oneovervar[offset]; atomicAdd(&o2_fit0_dof_fit0[0], temp); if (ddat->set[s].desc.doppler.frame[f].oneovervar[offset] > 0.0) { temp = ddat->set[s].desc.doppler.frame[f].weight; atomicAdd(&o2_fit0_dof_fit0[1], temp); } } } __syncthreads(); if (offset == 0) { temp = ddat->set[s].desc.doppler.frame[f].weight; err_fit0 = temp* o2_fit0_dof_fit0[0]; returns[0] = err_fit0; returns[1] = o2_fit0_dof_fit0[1]; } } else if (offset==0) returns[0] = returns[1] = 0.0; } __host__ double chi2_gpu( struct par_t *dpar, struct dat_t *ddat, unsigned char *htype, unsigned char *dtype, int *hnframes, int *hlc_n, int list_breakdown, int nsets, cudaStream_t *c2s_stream, int max_frames) { /* This version of chi2 accepts an existing streams array from the calling * function. Number of streams in array is guaranteed to be as large as * the the largest nframes for all sets */ int s, print_breakdown; unsigned char write_chi2fit0, baddiam, badphoto, badposet, baddopscale, posbnd, badradar; dim3 BLK,THD; double chi2_all_doppler, chi2_all_deldop, chi2_all_poset, chi2_all_lghtcrv, chi2_fit0_doppler, chi2_fit0_deldop, chi2_fit0_poset, /*chi2_branch, */ dof_fit0_doppler, dof_fit0_deldop, /*dof_fit0_poset, */chi2, dof; double dof_deldop, dof_doppler, dof_poset, dof_lghtcrv; char dofstring[MAXLEN], dof0string[MAXLEN]; /* Initialize variables that accumulate chi-square values */ chi2_all_deldop = chi2_all_doppler = chi2_all_poset = chi2_all_lghtcrv = chi2_fit0_deldop = chi2_fit0_doppler /*= chi2_fit0_poset*/ = 0.0; dof_fit0_deldop = dof_fit0_doppler/* = dof_fit0_poset*/ = 0.0; /* Initialize variables that accumulate chi-square values */ c2s_init_krnl<<<1,1>>>(ddat, nsets); checkErrorAfterKernelLaunch("c2s_init_krnl2, chi2_cuda_streams"); /* Loop through all datasets, carry out chi-square computations, and * provide screen and image output */ for (s=0; s<nsets; s++) { switch (htype[s]) { case DELAY: chi2 = chi2_deldop_gpu(dpar, ddat, s, list_breakdown, &chi2_all_deldop, &chi2_fit0_deldop, &dof_fit0_deldop, hnframes[s], c2s_stream); c2_set_chi2_krnl<<<1,1>>>(ddat, chi2, s); checkErrorAfterKernelLaunch("c2_set_chi2_krnl"); // printf("chi2_set[%i] (Deldop), %3.8g\n", s, chi2); break; case DOPPLER: chi2 = chi2_doppler_gpu(dpar, ddat, s,list_breakdown, &chi2_all_doppler, &chi2_fit0_doppler, &dof_fit0_doppler, hnframes[s], c2s_stream); c2_set_chi2_krnl<<<1,1>>>(ddat, chi2, s); checkErrorAfterKernelLaunch("c2_set_chi2_krnl"); // printf("chi2_set[%i] (Doppler), %3.8g\n", s, chi2); break; case POS: printf("\nWrite chi2_poset_cuda!\n"); // dat->set[s].chi2 = chi2_poset(dpar, s); break; case LGHTCRV: chi2 = chi2_lghtcrv_gpu(dpar, ddat, s, list_breakdown, &chi2_all_lghtcrv, hnframes[s], hlc_n[s]); c2_set_chi2_krnl<<<1,1>>>(ddat, chi2, s); checkErrorAfterKernelLaunch("c2_set_chi2_krnl, chi2_cuda"); // printf("chi2_set[%i] (lghtcrv), %3.8g\n", s, chi2); break; default: printf("chi2_gpu.cu: can't handle this type yet\n"); } /* Single-thread kernel adds ddat->set[s].chi2 to ddat->chi2 */ c2_add_chi2_krnl<<<1,1>>>(ddat, s); checkErrorAfterKernelLaunch("c2_add_chi2_krnl"); } /* end for loop over datasets */ /* Launch single-threaded kernel to retrieve ddat->chi2 to return it */ c2s_retrieve_chi2_krnl<<<1,1>>>(ddat); checkErrorAfterKernelLaunch("c2s_retrieve_chi2_krnl"); gpuErrchk(cudaMemcpyFromSymbol(&chi2, c2s_chi2, sizeof(double), 0, cudaMemcpyDeviceToHost)); /*.......................................................................*/ /* Call kernel to get flags from ddat */ c2s_get_prntflgs_krnl<<<1,1>>>(dpar, ddat); checkErrorAfterKernelLaunch("c2s_get_prntflgs_krnl"); gpuErrchk(cudaMemcpyFromSymbol(&print_breakdown, c2s_print_breakdown, sizeof(int), 0, cudaMemcpyDeviceToHost)); gpuErrchk(cudaMemcpyFromSymbol(&dof_deldop, c2s_dof_deldop, sizeof(double), 0, cudaMemcpyDeviceToHost)); gpuErrchk(cudaMemcpyFromSymbol(&dof_doppler, c2s_dof_doppler, sizeof(double), 0, cudaMemcpyDeviceToHost)); // gpuErrchk(cudaMemcpyFromSymbol(&dof_poset, c2s_dof_poset, sizeof(double), // 0, cudaMemcpyDeviceToHost)); gpuErrchk(cudaMemcpyFromSymbol(&dof_lghtcrv, c2s_dof_lghtcrv, sizeof(double), 0, cudaMemcpyDeviceToHost)); gpuErrchk(cudaMemcpyFromSymbol(&write_chi2fit0, c2s_write_chi2fit0, sizeof(unsigned char), 0, cudaMemcpyDeviceToHost)); gpuErrchk(cudaMemcpyFromSymbol(&dof, c2s_dof, sizeof(double), 0, cudaMemcpyDeviceToHost)); gpuErrchk(cudaMemcpyFromSymbol(&baddiam, c2s_baddiam, sizeof(unsigned char), 0, cudaMemcpyDeviceToHost)); gpuErrchk(cudaMemcpyFromSymbol(&badphoto, c2s_badphoto, sizeof(unsigned char), 0, cudaMemcpyDeviceToHost)); gpuErrchk(cudaMemcpyFromSymbol(&posbnd, c2s_posbnd, sizeof(unsigned char), 0, cudaMemcpyDeviceToHost)); gpuErrchk(cudaMemcpyFromSymbol(&badposet, c2s_badposet, sizeof(unsigned char), 0, cudaMemcpyDeviceToHost)); gpuErrchk(cudaMemcpyFromSymbol(&badradar, c2s_badradar, sizeof(unsigned char), 0, cudaMemcpyDeviceToHost)); gpuErrchk(cudaMemcpyFromSymbol(&baddopscale, c2s_baddopscale, sizeof(unsigned char), 0, cudaMemcpyDeviceToHost)); if (list_breakdown) { if (print_breakdown) { printf("#\n"); if (dof_deldop > SMALLVAL) { intifpossible( dofstring, MAXLEN, dof_deldop, SMALLVAL, "%f"); printf("delay chi2 = %e for %s dof (reduced chi2 = %f)\n", chi2_all_deldop, dofstring, chi2_all_deldop/dof_deldop); if (write_chi2fit0) { intifpossible( dof0string, MAXLEN, dof_fit0_deldop, SMALLVAL, "%f"); printf(" (%e outside model for %s dof)\n", chi2_fit0_deldop, dof0string); } } if (dof_doppler > SMALLVAL) { intifpossible( dofstring, MAXLEN, dof_doppler, SMALLVAL, "%f"); printf("Doppler chi2 = %e for %s dof (reduced chi2 = %f)\n", chi2_all_doppler, dofstring, chi2_all_doppler/dof_doppler); if (write_chi2fit0) { intifpossible( dof0string, MAXLEN, dof_fit0_doppler, SMALLVAL, "%f"); printf(" (%e outside model for %s dof)\n", chi2_fit0_doppler, dof0string); } } // if (dof_poset > SMALLVAL) { // intifpossible( dofstring, MAXLEN, dof_poset, SMALLVAL, "%f"); // printf("POS chi2 = %e for %s dof (reduced chi2 = %f)\n", // chi2_all_poset, dofstring, chi2_all_poset/dof_poset); // if (write_chi2fit0) { // intifpossible( dof0string, MAXLEN, dof_fit0_poset, SMALLVAL, "%f"); // printf(" (%e outside model for %s dof)\n", // chi2_fit0_poset, dof0string); // } // } if (dof_lghtcrv > SMALLVAL) { intifpossible( dofstring, MAXLEN, dof_lghtcrv, SMALLVAL, "%f"); printf("lghtcrv chi2 = %e for %s dof (reduced chi2 = %f)\n", chi2_all_lghtcrv, dofstring, chi2_all_lghtcrv/dof_lghtcrv); } intifpossible( dofstring, MAXLEN, dof, SMALLVAL, "%f"); printf("ALLDATA chi2 = %e for %s dof (reduced chi2 = %f)", chi2, dofstring, chi2/dof); } else { intifpossible( dofstring, MAXLEN, ddat->dof, SMALLVAL, "%f"); printf(" chi2 = %e for %s dof (reduced chi2 = %f)", chi2, dofstring, chi2/dof); } if (baddiam) printf(" (BAD DIAMS)"); if (badphoto) printf(" (BAD PHOTO) (chi2_cuda)"); if (posbnd) printf(" (BAD POS)"); if (badposet) printf(" (BAD POSET)"); if (badradar) printf(" (BAD RADAR)"); if (baddopscale) printf(" (BAD DOPSCALE)"); printf("\n"); if (print_breakdown && write_chi2fit0) { intifpossible( dof0string, MAXLEN, dof_fit0_deldop + dof_fit0_doppler/* + dof_fit0_poset*/, SMALLVAL, "%f"); printf(" (%e outside model for %s dof)\n", chi2_fit0_deldop + chi2_fit0_doppler/* + chi2_fit0_poset*/, dof0string); } printf("#\n"); fflush(stdout); } /*.......................................................................*/ return chi2; } __host__ double chi2_MFS_gpu( struct par_t *dpar, struct dat_t *ddat, int list_breakdown, int nsets, cudaStream_t *c2s_stream) { /* This version of chi2 accepts an existing streams array from the calling * function. Number of streams in array is guaranteed to be as large as * the the largest nframes for all sets */ int s, print_breakdown; unsigned char write_chi2fit0, baddiam, badphoto, badposet, baddopscale, posbnd, badradar; dim3 BLK,THD; double chi2_all_deldop, chi2_fit0_deldop, dof_fit0_deldop, chi2, dof, dof_deldop; char dofstring[MAXLEN], dof0string[MAXLEN]; /* Initialize variables that accumulate chi-square values */ chi2_all_deldop = dof_fit0_deldop = 0.0; /* Initialize variables that accumulate chi-square values */ c2s_init_krnl<<<1,1>>>(ddat, nsets); checkErrorAfterKernelLaunch("c2s_init_krnl2"); /* Loop through all datasets, carry out chi-square computations, and * provide screen and image output */ for (s=0; s<nsets; s++) { chi2 = chi2_deldop_gpu(dpar, ddat, s, list_breakdown, &chi2_all_deldop, &chi2_fit0_deldop, &dof_fit0_deldop, 1, c2s_stream); c2_set_chi2_krnl<<<1,1>>>(ddat, chi2, s); checkErrorAfterKernelLaunch("c2_set_chi2_krnl"); // printf("chi2_set[%i] (Deldop), %3.8g\n", s, chi2); /* Single-thread kernel adds ddat->set[s].chi2 to ddat->chi2 */ c2_add_chi2_krnl<<<1,1>>>(ddat, s); checkErrorAfterKernelLaunch("c2_add_chi2_krnl"); } /* end for loop over datasets */ /* Launch single-threaded kernel to retrieve ddat->chi2 to return it */ c2s_retrieve_chi2_krnl<<<1,1>>>(ddat); checkErrorAfterKernelLaunch("c2s_retrieve_chi2_krnl"); gpuErrchk(cudaMemcpyFromSymbol(&chi2, c2s_chi2, sizeof(double), 0, cudaMemcpyDeviceToHost)); /*.......................................................................*/ /* Call kernel to get flags from ddat */ c2s_get_prntflgs_krnl<<<1,1>>>(dpar, ddat); checkErrorAfterKernelLaunch("c2s_get_prntflgs_krnl"); gpuErrchk(cudaMemcpyFromSymbol(&print_breakdown, c2s_print_breakdown, sizeof(int), 0, cudaMemcpyDeviceToHost)); gpuErrchk(cudaMemcpyFromSymbol(&dof_deldop, c2s_dof_deldop, sizeof(double), 0, cudaMemcpyDeviceToHost)); gpuErrchk(cudaMemcpyFromSymbol(&write_chi2fit0, c2s_write_chi2fit0, sizeof(unsigned char), 0, cudaMemcpyDeviceToHost)); gpuErrchk(cudaMemcpyFromSymbol(&dof, c2s_dof, sizeof(double), 0, cudaMemcpyDeviceToHost)); gpuErrchk(cudaMemcpyFromSymbol(&baddiam, c2s_baddiam, sizeof(unsigned char), 0, cudaMemcpyDeviceToHost)); gpuErrchk(cudaMemcpyFromSymbol(&badphoto, c2s_badphoto, sizeof(unsigned char), 0, cudaMemcpyDeviceToHost)); gpuErrchk(cudaMemcpyFromSymbol(&posbnd, c2s_posbnd, sizeof(unsigned char), 0, cudaMemcpyDeviceToHost)); gpuErrchk(cudaMemcpyFromSymbol(&badposet, c2s_badposet, sizeof(unsigned char), 0, cudaMemcpyDeviceToHost)); gpuErrchk(cudaMemcpyFromSymbol(&badradar, c2s_badradar, sizeof(unsigned char), 0, cudaMemcpyDeviceToHost)); gpuErrchk(cudaMemcpyFromSymbol(&baddopscale, c2s_baddopscale, sizeof(unsigned char), 0, cudaMemcpyDeviceToHost)); if (list_breakdown) { if (print_breakdown) { printf("#\n"); if (dof_deldop > SMALLVAL) { intifpossible( dofstring, MAXLEN, dof_deldop, SMALLVAL, "%f"); printf("delay chi2 = %e for %s dof (reduced chi2 = %f)\n", chi2_all_deldop, dofstring, chi2_all_deldop/dof_deldop); if (write_chi2fit0) { intifpossible( dof0string, MAXLEN, dof_fit0_deldop, SMALLVAL, "%f"); printf(" (%e outside model for %s dof)\n", chi2_fit0_deldop, dof0string); } } intifpossible( dofstring, MAXLEN, dof, SMALLVAL, "%f"); printf("ALLDATA chi2 = %e for %s dof (reduced chi2 = %f)", chi2, dofstring, chi2/dof); } else { intifpossible( dofstring, MAXLEN, ddat->dof, SMALLVAL, "%f"); printf(" chi2 = %e for %s dof (reduced chi2 = %f)", chi2, dofstring, chi2/dof); } if (baddiam) printf(" (BAD DIAMS)"); if (badphoto) printf(" (BAD PHOTO) (chi2_cuda)"); if (posbnd) printf(" (BAD POS)"); if (badposet) printf(" (BAD POSET)"); if (badradar) printf(" (BAD RADAR)"); if (baddopscale) printf(" (BAD DOPSCALE)"); printf("\n"); if (print_breakdown && write_chi2fit0) { intifpossible( dof0string, MAXLEN, dof_fit0_deldop, SMALLVAL, "%f"); printf(" (%e outside model for %s dof)\n", chi2_fit0_deldop, dof0string); } printf("#\n"); fflush(stdout); } /*.......................................................................*/ return chi2; } __host__ double chi2_pthreads( struct par_t *dpar0, struct par_t *dpar1, struct dat_t *ddat0, struct dat_t *ddat1, unsigned char *htype, unsigned char *dtype0, unsigned char *dtype1, int *hnframes, int *hlc_n, int *GPUID, int list_breakdown, int nsets, int max_frames, pthread_t thread1, pthread_t thread2, cudaStream_t *gpu0_stream, cudaStream_t *gpu1_stream) { /* This version of chi2 splits all work between two host threads (pthreads) * which each have their own assigned GPU. It is intended as dual-threaded * host application with dual-GPU usage. */ int print_breakdown; unsigned char write_chi2fit0, baddiam, badphoto, badposet, baddopscale, posbnd, badradar; dim3 BLK,THD; double chi2_all_doppler, chi2_all_deldop, chi2_all_poset, chi2_all_lghtcrv, chi2_fit0_doppler, chi2_fit0_deldop, chi2_fit0_poset, /*chi2_branch, */ dof_fit0_doppler, dof_fit0_deldop, dof_fit0_poset, chi2, dof; double dof_deldop, dof_doppler, dof_poset, dof_lghtcrv; char dofstring[MAXLEN], dof0string[MAXLEN]; /* Initialize variables that accumulate chi-square values */ chi2_all_deldop = chi2_all_doppler = chi2_all_poset = chi2_all_lghtcrv = chi2_fit0_deldop = chi2_fit0_doppler = chi2_fit0_poset = 0.0; dof_fit0_deldop = dof_fit0_doppler = dof_fit0_poset = 0.0; gpuErrchk(cudaSetDevice(GPU0)); /* Create and populate the structs needed to pass information to the * pthreaded sub functions */ chi2_data data1, data2; data1.gpuid = GPU0; data2.gpuid = GPU1; data1.thread_no = 1; data2.thread_no = 2; data1.gpu_stream = gpu0_stream; data2.gpu_stream = gpu1_stream; data1.GPUID = data2.GPUID = GPUID; data1.data = ddat0; data2.data = ddat1; data1.parameter = dpar0; data2.parameter = dpar1; data1.dtype = dtype0; data2.dtype = dtype1; data1.htype = data2.htype = htype; data1.hlc_n = data2.hlc_n = hlc_n; data1.list_breakdown = data2.list_breakdown = list_breakdown; data1.max_frames = data2.max_frames = max_frames; data1.nframes = data2.nframes = hnframes; data1.nsets = data2.nsets = nsets; data1.chi2_all_deldop = data2.chi2_all_deldop = 0.0; data1.chi2_all_doppler = data2.chi2_all_doppler = 0.0; data1.chi2_all_poset = data2.chi2_all_poset = 0.0; data1.chi2_all_lghtcrv = data2.chi2_all_lghtcrv = 0.0; data1.chi2_fit0_deldop = data2.chi2_fit0_deldop = 0.0; data1.chi2_fit0_doppler = data2.chi2_fit0_doppler = 0.0; data1.chi2_fit0_poset = data2.chi2_fit0_poset = 0.0; data1.dof_fit0_deldop = data2.dof_fit0_deldop = 0.0; data1.dof_fit0_doppler = data2.dof_fit0_doppler = 0.0; data1.dof_fit0_poset = data2.dof_fit0_poset = 0.0; data1.chi2 = data2.chi2 = 0.0; /* Initialize variables that accumulate chi-square values */ c2s_init_krnl<<<1,1>>>(ddat0, nsets); checkErrorAfterKernelLaunch("c2s_init_krnl for GPU0"); gpuErrchk(cudaSetDevice(GPU1)); c2s_init_krnl<<<1,1>>>(ddat1, nsets); checkErrorAfterKernelLaunch("c2s_init_krnl for GPU1"); gpuErrchk(cudaSetDevice(GPU0)); /* From here, launch the pthreaded subfunction */ pthread_create(&thread1, NULL, chi2_pthread_sub,(void*)&data1); pthread_create(&thread2, NULL, chi2_pthread_sub,(void*)&data2); /* The calculation of all sets happens now */ pthread_join(thread1, NULL); pthread_join(thread2, NULL); /* Complete calculations of values that will be used during a fit to * increase the objective function for models with bad properties */ gpuErrchk(cudaSetDevice(GPU0)); set_global_chi2_krnl<<<1,1>>>(ddat0, data1.chi2, data2.chi2); checkErrorAfterKernelLaunch("set_global_chi2_krnl"); gpuErrchk(cudaSetDevice(GPU1)); set_global_chi2_krnl<<<1,1>>>(ddat1, data1.chi2, data2.chi2); checkErrorAfterKernelLaunch("set_global_chi2_krnl"); gpuErrchk(cudaSetDevice(GPU0)); /* Recombine/update the all-data-type values */ chi2 = data1.chi2 + data2.chi2; chi2_all_deldop = data1.chi2_all_deldop + data2.chi2_all_deldop; chi2_all_doppler = data1.chi2_all_doppler + data2.chi2_all_doppler; chi2_all_poset = data1.chi2_all_poset + data2.chi2_all_poset; chi2_all_lghtcrv = data1.chi2_all_lghtcrv + data2.chi2_all_lghtcrv; chi2_fit0_deldop = data1.chi2_fit0_deldop + data2.chi2_fit0_deldop; chi2_fit0_doppler = data1.chi2_fit0_doppler + data2.chi2_fit0_doppler; chi2_fit0_poset = data1.chi2_fit0_poset + data2.chi2_fit0_poset; dof_fit0_deldop = data1.dof_fit0_deldop + data2.dof_fit0_deldop; dof_fit0_doppler = data1.dof_fit0_doppler + data2.dof_fit0_doppler; dof_fit0_poset = data1.dof_fit0_poset + data2.dof_fit0_poset; /* Call kernel to get flags from ddat */ c2s_get_prntflgs_krnl<<<1,1>>>(dpar0, ddat0); checkErrorAfterKernelLaunch("c2s_get_prntflgs_krnl"); gpuErrchk(cudaMemcpyFromSymbol(&print_breakdown, c2s_print_breakdown, sizeof(int), 0, cudaMemcpyDeviceToHost)); gpuErrchk(cudaMemcpyFromSymbol(&dof_deldop, c2s_dof_deldop, sizeof(double), 0, cudaMemcpyDeviceToHost)); gpuErrchk(cudaMemcpyFromSymbol(&dof_doppler, c2s_dof_doppler, sizeof(double), 0, cudaMemcpyDeviceToHost)); // gpuErrchk(cudaMemcpyFromSymbol(&dof_poset, c2s_dof_poset, sizeof(double), // 0, cudaMemcpyDeviceToHost)); gpuErrchk(cudaMemcpyFromSymbol(&dof_lghtcrv, c2s_dof_lghtcrv, sizeof(double), 0, cudaMemcpyDeviceToHost)); gpuErrchk(cudaMemcpyFromSymbol(&write_chi2fit0, c2s_write_chi2fit0, sizeof(unsigned char), 0, cudaMemcpyDeviceToHost)); gpuErrchk(cudaMemcpyFromSymbol(&dof, c2s_dof, sizeof(double), 0, cudaMemcpyDeviceToHost)); gpuErrchk(cudaMemcpyFromSymbol(&baddiam, c2s_baddiam, sizeof(unsigned char), 0, cudaMemcpyDeviceToHost)); gpuErrchk(cudaMemcpyFromSymbol(&badphoto, c2s_badphoto, sizeof(unsigned char), 0, cudaMemcpyDeviceToHost)); gpuErrchk(cudaMemcpyFromSymbol(&posbnd, c2s_posbnd, sizeof(unsigned char), 0, cudaMemcpyDeviceToHost)); gpuErrchk(cudaMemcpyFromSymbol(&badposet, c2s_badposet, sizeof(unsigned char), 0, cudaMemcpyDeviceToHost)); gpuErrchk(cudaMemcpyFromSymbol(&badradar, c2s_badradar, sizeof(unsigned char), 0, cudaMemcpyDeviceToHost)); gpuErrchk(cudaMemcpyFromSymbol(&baddopscale, c2s_baddopscale, sizeof(unsigned char), 0, cudaMemcpyDeviceToHost)); if (list_breakdown) { if (print_breakdown) { printf("#\n"); if (dof_deldop > SMALLVAL) { intifpossible( dofstring, MAXLEN, dof_deldop, SMALLVAL, "%f"); printf("delay chi2 = %e for %s dof (reduced chi2 = %f)\n", chi2_all_deldop, dofstring, chi2_all_deldop/dof_deldop); if (write_chi2fit0) { intifpossible( dof0string, MAXLEN, dof_fit0_deldop, SMALLVAL, "%f"); printf(" (%e outside model for %s dof)\n", chi2_fit0_deldop, dof0string); } } if (dof_doppler > SMALLVAL) { intifpossible( dofstring, MAXLEN, dof_doppler, SMALLVAL, "%f"); printf("Doppler chi2 = %e for %s dof (reduced chi2 = %f)\n", chi2_all_doppler, dofstring, chi2_all_doppler/dof_doppler); if (write_chi2fit0) { intifpossible( dof0string, MAXLEN, dof_fit0_doppler, SMALLVAL, "%f"); printf(" (%e outside model for %s dof)\n", chi2_fit0_doppler, dof0string); } } // if (dof_poset > SMALLVAL) { // intifpossible( dofstring, MAXLEN, dof_poset, SMALLVAL, "%f"); // printf("POS chi2 = %e for %s dof (reduced chi2 = %f)\n", // chi2_all_poset, dofstring, chi2_all_poset/dof_poset); // if (write_chi2fit0) { // intifpossible( dof0string, MAXLEN, dof_fit0_poset, SMALLVAL, "%f"); // printf(" (%e outside model for %s dof)\n", // chi2_fit0_poset, dof0string); // } // } if (dof_lghtcrv > SMALLVAL) { intifpossible( dofstring, MAXLEN, dof_lghtcrv, SMALLVAL, "%f"); printf("lghtcrv chi2 = %e for %s dof (reduced chi2 = %f)\n", chi2_all_lghtcrv, dofstring, chi2_all_lghtcrv/dof_lghtcrv); } intifpossible( dofstring, MAXLEN, dof, SMALLVAL, "%f"); printf("ALLDATA chi2 = %e for %s dof (reduced chi2 = %f)", chi2, dofstring, chi2/dof); } // } else { // intifpossible( dofstring, MAXLEN, ddat->dof, SMALLVAL, "%f"); // printf(" chi2 = %e for %s dof (reduced chi2 = %f)", // chi2, dofstring, chi2/dof); // } if (baddiam) printf(" (BAD DIAMS)"); if (badphoto) printf(" (BAD PHOTO) (chi2_cuda)"); if (posbnd) printf(" (BAD POS)"); if (badposet) printf(" (BAD POSET)"); if (badradar) printf(" (BAD RADAR)"); if (baddopscale) printf(" (BAD DOPSCALE)"); printf("\n"); if (print_breakdown && write_chi2fit0) { intifpossible( dof0string, MAXLEN, dof_fit0_deldop + dof_fit0_doppler/* + dof_fit0_poset*/, SMALLVAL, "%f"); printf(" (%e outside model for %s dof)\n", chi2_fit0_deldop + chi2_fit0_doppler/* + chi2_fit0_poset*/, dof0string); } printf("#\n"); fflush(stdout); } /*.......................................................................*/ return chi2; } void *chi2_pthread_sub(void *ptr) { int s; double chi2; chi2_data *data; data = (chi2_data *) ptr; gpuErrchk(cudaSetDevice(data->gpuid)); /* Loop through all datasets, carry out chi-square computations, and * provide screen and image output */ // for (s=0; s<data->nsets; s++) { // if (data->GPUID[s]==data->gpuid) { // switch (data->htype[s]) { // case DELAY: // chi2 = chi2_deldop_gpu32(data->parameter, data->data, s, // data->list_breakdown, &data->chi2_all_deldop, // &data->chi2_fit0_deldop, &data->dof_fit0_deldop, // data->nframes[s], data->gpu_stream); // c2_set_chi2_krnl<<<1,1>>>(data->data, chi2, s); // checkErrorAfterKernelLaunch("c2_set_chi2_krnl"); //// printf("chi2 for set %i (Delay-Doppler) = %g\n", s, chi2); // break; // case DOPPLER: // chi2 = chi2_doppler_gpu32(data->parameter, data->data, s, // data->list_breakdown, &data->chi2_all_doppler, // &data->chi2_fit0_doppler, &data->dof_fit0_doppler, // data->nframes[s], data->gpu_stream); // c2_set_chi2_krnl<<<1,1>>>(data->data, chi2, s); // checkErrorAfterKernelLaunch("c2_set_chi2_krnl"); //// printf("chi2 for set %i (Doppler) = %g\n", s, chi2); // break; // case POS: // printf("\nWrite chi2_poset_cuda!\n"); // // dat->set[s].chi2 = chi2_poset(dpar, s); // break; // case LGHTCRV: // chi2 = chi2_lghtcrv_gpu(data->parameter, data->data, s, // data->list_breakdown, &data->chi2_all_lghtcrv, // data->nframes[s], data->hlc_n[s]); // c2_set_chi2_krnl<<<1,1>>>(data->data, chi2, s); // checkErrorAfterKernelLaunch("c2_set_chi2_krnl"); // //printf("chi2 for set %i (Lightcurve) = %g\n", s, chi2); // break; // default: // printf("chi2_pthread_sub: can't handle this type yet\n"); // } // data->chi2 += chi2; // } // } // pthread_exit(0); } __host__ double chi2_deldop_gpu( struct par_t *dpar, struct dat_t *ddat, int s, int list_breakdown, double *chi2_all_deldop, double *chi2_fit0_deldop, double *dof_fit0_deldop, int nframes, cudaStream_t *c2s_stream) { int f, *ndel, *ndop, hndel[nframes], hndop[nframes], nThreads[nframes]; double chi2_set, *chi2_deldop_frame, h_chi2_deldop_frame[nframes]; dim3 BLK[nframes],THD, BLKfrm, THD64; THD.x = maxThreadsPerBlock; THD64.x = 64; BLKfrm.x = floor((THD64.x -1 + nframes)/THD64.x); double *o2_fit0_dof_fit0; /* o2, m2, and om are per-frame radar variables */ double *o2, *m2, *om, *weight, *returns, *hreturns; chi2_set = 0.0; gpuErrchk(cudaMalloc((void**)&o2, sizeof(double)*nframes)); gpuErrchk(cudaMalloc((void**)&m2, sizeof(double)*nframes)); gpuErrchk(cudaMalloc((void**)&om, sizeof(double)*nframes)); gpuErrchk(cudaMalloc((void**)&weight, sizeof(double)*nframes)); gpuErrchk(cudaMalloc((void**)&chi2_deldop_frame,sizeof(double)*nframes)); gpuErrchk(cudaMalloc((void**)&ndel, sizeof(int)*nframes)); gpuErrchk(cudaMalloc((void**)&ndop, sizeof(int)*nframes)); gpuErrchk(cudaMalloc((void**)&returns, sizeof(double)*2)); gpuErrchk(cudaMalloc((void**)&o2_fit0_dof_fit0, sizeof(double)*2)); hreturns = (double *) malloc(2*sizeof(double)); hreturns[0] = hreturns[1] = 0.0; /* Get values for ndel and ndop, and the overflow parameters o2, m2, om */ c2s_deldop_init_krnl<<<BLKfrm,THD64>>>(ddat, s, ndel, ndop, o2, m2, om, weight, nframes); checkErrorAfterKernelLaunch("c2s_deldop_init_krnl"); gpuErrchk(cudaMemcpy(&hndel, ndel, sizeof(int)*nframes, cudaMemcpyDeviceToHost)); gpuErrchk(cudaMemcpy(&hndop, ndop, sizeof(int)*nframes, cudaMemcpyDeviceToHost)); /* Calculate launch parameters */ for (f=0; f<nframes; f++) { nThreads[f] = hndel[f]*hndop[f]; BLK[f].x = floor((THD.x - 1 + nThreads[f]) / THD.x); } /* Add contributions from power within limits of data frame. This kernel * also takes care of the frame's calibration factor and computes chi2 * for this frame */ sum_o2m2om_gpu(ddat, o2, m2, om, nframes, hndel[0]*hndop[0], s, c2s_stream); c2s_add_deldop_contributions_krnl<<<BLKfrm,THD64>>>(dpar, ddat, o2, m2, om, weight, ndel, ndop, chi2_deldop_frame, s, nframes); checkErrorAfterKernelLaunch("c2s_add_deldop_contributions_krnl"); gpuErrchk(cudaMemcpy(&h_chi2_deldop_frame, chi2_deldop_frame, sizeof(double) * nframes, cudaMemcpyDeviceToHost)); /* Add all frames from device memory to host memoryd */ for (f=0; f<nframes; f++) { chi2_set += h_chi2_deldop_frame[f]; // printf("Set %i deldop, frame %i, %3.8g\n", s, f, h_chi2_deldop_frame[f]); if (list_breakdown) *chi2_all_deldop += h_chi2_deldop_frame[f]; } if (list_breakdown) { for (f=0; f<nframes; f++) { /* WARNING WARNING WARNING */ /* This kernel is for accuracy testing only. Must develop a proper * parallel reduction for this instead */ deldop_wrt_chi2fit0_krnl<<<BLK[f],THD, 0, c2s_stream[f]>>>(dpar, ddat, s, f, ndel, ndop, nThreads[f], returns, o2_fit0_dof_fit0); checkErrorAfterKernelLaunch("deldop_wrt_chi2fit0_krnl64"); } gpuErrchk(cudaMemcpy(hreturns, returns, sizeof(double) * 2, cudaMemcpyDeviceToHost)); *chi2_fit0_deldop = hreturns[0]; *dof_fit0_deldop = hreturns[1]; } cudaFree(o2); cudaFree(m2); cudaFree(om); cudaFree(ndel); cudaFree(ndop); cudaFree(weight); cudaFree(returns); cudaFree(chi2_deldop_frame); cudaFree(o2_fit0_dof_fit0); free(hreturns); return chi2_set; } __host__ double chi2_doppler_gpu(struct par_t *dpar, struct dat_t *ddat, int s, int list_breakdown, double *chi2_all_doppler, double *chi2_fit0_doppler, double *dof_fit0_doppler, int nframes, cudaStream_t *c2s_stream) { int f, *ndop, hndop[nframes]; double chi2_set, *chi2_doppler_frame, h_chi2_doppler_frame[nframes]; dim3 BLK[nframes], THD, BLKfrm, THD64; THD.x = maxThreadsPerBlock; THD64.x = 64; double *o2, *m2, *om, *weight, hreturns[2], *returns, *o2_fit0_dof_fit0; chi2_set = 0.0; BLKfrm.x = floor((THD64.x -1 + nframes)/THD64.x); gpuErrchk(cudaMalloc((void**)&o2, sizeof(double)*nframes)); gpuErrchk(cudaMalloc((void**)&m2, sizeof(double)*nframes)); gpuErrchk(cudaMalloc((void**)&om, sizeof(double)*nframes)); gpuErrchk(cudaMalloc((void**)&ndop, sizeof(int)*nframes)); gpuErrchk(cudaMalloc((void**)&weight, sizeof(double)*nframes)); gpuErrchk(cudaMalloc((void**)&chi2_doppler_frame,sizeof(double)*nframes)); gpuErrchk(cudaMalloc((void**)&returns, sizeof(double)*2)); gpuErrchk(cudaMalloc((void**)&o2_fit0_dof_fit0, sizeof(double)*2)); hreturns[0] = hreturns[1] = 0.0; /* Get values for ndel and ndop, and the overflow parameters o2, m2, om */ c2s_doppler_init_krnl<<<BLKfrm,THD64>>>(ddat, s, ndop, o2, m2, om, weight, nframes); checkErrorAfterKernelLaunch("c2s_doppler_init_krnl64"); gpuErrchk(cudaMemcpy(&hndop, ndop, sizeof(int)*nframes, cudaMemcpyDeviceToHost)); /* Calculate launch parameters */ for (f=0; f<nframes; f++) BLK[f].x = floor((THD.x - 1 + hndop[f]) / THD.x); /* Loop through all frames for this dataset */ for (f=0; f<nframes; f++) { /* Add contributions from power within data frame limits. This kernel * also considers frame's calibration factor & computes frame chi2 */ c2s_doppler_add_o2_krnl<<<BLK[f],THD,0,c2s_stream[f]>>>(ddat, o2, m2, om, ndop, s, f); } checkErrorAfterKernelLaunch("c2_doppler_add_o2_krnl64"); // c2s_add_dop_contrbts_krnl<<<BLKfrm,THD64>>>(dpar, ddat, o2, m2, om, weight, // ndop, chi2_doppler_frame, s, nframes); for (f=0; f<nframes; f++) { c2s_add_dop_contrbts_srl_krnl<<<1,1>>>(dpar, ddat, o2, m2, om, weight, ndop, chi2_doppler_frame, s, f); } checkErrorAfterKernelLaunch("c2_add_dop_contrbts_krnl64"); gpuErrchk(cudaMemcpy(&h_chi2_doppler_frame, chi2_doppler_frame, sizeof(double) * nframes, cudaMemcpyDeviceToHost)); /* Add all frames from device memory to host memory*/ for (f=0; f<nframes; f++) { chi2_set += h_chi2_doppler_frame[f]; // printf("h_chi2_doppler_frame[%i]=%3.6g\n", f, h_chi2_doppler_frame[f]); // printf("Set %i doppler, frame %i, %3.8g\n", s, f, h_chi2_doppler_frame[f]); if (list_breakdown) *chi2_all_doppler += h_chi2_doppler_frame[f]; } /* Compute the chi-square contributions and number of degrees of freedom * due to bins whose model signal is less than or equal to 'chi2fit0_thresh' * standard deviations of the noise in the data frame */ if (list_breakdown) { for (f=0; f<nframes; f++) { dop_wrt_chi2fit0_krnl<<<BLK[f],THD, 0, c2s_stream[f]>>>(dpar, ddat, s, f, hndop[f], returns, o2_fit0_dof_fit0); checkErrorAfterKernelLaunch("dop_wrt_chi2fit0_krnl64"); } gpuErrchk(cudaMemcpy(&hreturns, returns, sizeof(double) * 2, cudaMemcpyDeviceToHost)); *chi2_fit0_doppler = hreturns[0]; *dof_fit0_doppler = hreturns[1]; } cudaFree(o2); cudaFree(m2); cudaFree(om); cudaFree(ndop); cudaFree(weight); cudaFree(returns); cudaFree(chi2_doppler_frame); cudaFree(o2_fit0_dof_fit0); return chi2_set; } __host__ double chi2_lghtcrv_gpu( struct par_t *dpar, struct dat_t *ddat, int s, int list_breakdown, double *chi2_all_lghtcrv, int nframes, int lc_n) { double *dof_chi2set, h_dof_chi2set[2]; double3 *o2m2om, *h_o2m2om; dim3 BLK,THD; THD.x = maxThreadsPerBlock; gpuErrchk(cudaMalloc((void**)&dof_chi2set, sizeof(double) * 2)); gpuErrchk(cudaMalloc((void**)&o2m2om, sizeof(double3) * 2)); // h_o2m2om = (double3 *) malloc(2*sizeof(double3)); BLK = floor((THD.x - 1 + lc_n)/THD.x); c2s_lghtcrv_serial_krnl<<<1,1>>>(ddat, s, dof_chi2set, o2m2om); gpuErrchk(cudaMemcpy(&h_dof_chi2set, dof_chi2set, sizeof(double)*2, cudaMemcpyDeviceToHost)); // gpuErrchk(cudaMemcpy(h_o2m2om, o2m2om, sizeof(double3)*2, cudaMemcpyDeviceToHost)); // printf("o2=%3.6g\n", h_o2m2om[0].x); // printf("m2=%3.6g\n", h_o2m2om[0].y); // printf("om=%3.6g\n", h_o2m2om[0].z); if (list_breakdown) *chi2_all_lghtcrv += h_dof_chi2set[1]; cudaFree(dof_chi2set); cudaFree(o2m2om); // free(h_o2m2om); return h_dof_chi2set[1]; }
1401b7737e34f9b134ba8b0214bffe90de436ea8.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "ComputeOrientations.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *g_Data = NULL; hipMalloc(&g_Data, XSIZE*YSIZE); int *d_Ptrs = NULL; hipMalloc(&d_Ptrs, XSIZE*YSIZE); float *d_Orient = NULL; hipMalloc(&d_Orient, XSIZE*YSIZE); int maxPts = 1; int w = XSIZE; int h = YSIZE; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( ComputeOrientations), dim3(gridBlock),dim3(threadBlock), 0, 0, g_Data,d_Ptrs,d_Orient,maxPts,w,h); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( ComputeOrientations), dim3(gridBlock),dim3(threadBlock), 0, 0, g_Data,d_Ptrs,d_Orient,maxPts,w,h); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( ComputeOrientations), dim3(gridBlock),dim3(threadBlock), 0, 0, g_Data,d_Ptrs,d_Orient,maxPts,w,h); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
1401b7737e34f9b134ba8b0214bffe90de436ea8.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "ComputeOrientations.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *g_Data = NULL; cudaMalloc(&g_Data, XSIZE*YSIZE); int *d_Ptrs = NULL; cudaMalloc(&d_Ptrs, XSIZE*YSIZE); float *d_Orient = NULL; cudaMalloc(&d_Orient, XSIZE*YSIZE); int maxPts = 1; int w = XSIZE; int h = YSIZE; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); ComputeOrientations<<<gridBlock,threadBlock>>>(g_Data,d_Ptrs,d_Orient,maxPts,w,h); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { ComputeOrientations<<<gridBlock,threadBlock>>>(g_Data,d_Ptrs,d_Orient,maxPts,w,h); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { ComputeOrientations<<<gridBlock,threadBlock>>>(g_Data,d_Ptrs,d_Orient,maxPts,w,h); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
a2217ebe14bc458fb43e5ed5dc30ceaa113bd6c6.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifndef _ZQ_CUDA_ADVECTION_2D_CU_ #define _ZQ_CUDA_ADVECTION_2D_CU_ #include "ZQ_CUDA_Advection2D.cuh" namespace ZQ_CUDA_Advection2D { texture<float2,2,hipReadModeElementType> tex_velocity_regular; texture<float,2,hipReadModeElementType> tex_velocity_MAC_u; texture<float,2,hipReadModeElementType> tex_velocity_MAC_v; texture<unsigned char,2,hipReadModeElementType> tex_occupy; texture<float,2,hipReadModeElementType> tex_inputVelocity_mac_u; texture<float,2,hipReadModeElementType> tex_inputVelocity_mac_v; texture<float2,2,hipReadModeElementType> tex_inputVelocity_regular; texture<float2,2,hipReadModeElementType> tex_scalar; //temperature and density unsigned h_width; unsigned h_height; unsigned int h_steps; float h_voxelSize; float h_deltatt; __constant__ unsigned int d_width; __constant__ unsigned int d_height; __constant__ unsigned int d_steps; __constant__ float d_voxelSize; __constant__ float d_deltatt; /****************************************************************************************/ __global__ void ZQ_Cuda_Velocity_Advection_inRegular_outRegular_Kernel(float2 * d_output) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if(x >= d_width || y >= d_height) return; float2 pos = make_float2(x+0.5f,y+0.5f); float2 lastpos = pos; float2 velCoord = make_float2(pos.x/d_width,pos.y/d_height); float2 lastvel = tex2D(tex_velocity_regular,velCoord.x,velCoord.y); unsigned int istep = 0; do { float2 occupyCoord = velCoord; if(!(pos.x >= 0 && pos.x <= d_width && pos.y >= 0 && pos.y <= d_height)) break; if(tex2D(tex_occupy,occupyCoord.x,occupyCoord.y) != 0) break; lastpos = pos; pos -= lastvel * d_deltatt / d_voxelSize; velCoord = make_float2(pos.x/d_width,pos.y/d_height); lastvel = tex2D(tex_velocity_regular,velCoord.x,velCoord.y); istep ++; } while (istep < d_steps); float2 out_coord = make_float2(lastpos.x/d_width,lastpos.y/d_height); float2 tempvel = tex2D(tex_inputVelocity_regular,out_coord.x,out_coord.y); d_output[y*d_width+x] = tempvel; } __global__ void ZQ_Cuda_Velocity_Advection_inRegular_outMAC_u_Kernel(float * d_mac_u) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if(x > d_width || y >= d_height) return; float2 pos = make_float2(x,y+0.5f); float2 lastpos = pos; float2 velCoord = make_float2(pos.x/d_width,pos.y/d_height); float2 lastvel = tex2D(tex_velocity_regular,velCoord.x,velCoord.y); unsigned int istep = 0; do { float2 occupyCoord = velCoord; if(!(pos.x >= 0 && pos.x <= d_width && pos.y >= 0 && pos.y <= d_height)) break; if(tex2D(tex_occupy,occupyCoord.x,occupyCoord.y) != 0) break; lastpos = pos; pos -= lastvel * d_deltatt / d_voxelSize; velCoord = make_float2(pos.x/d_width,pos.y/d_height); lastvel = tex2D(tex_velocity_regular,velCoord.x,velCoord.y); istep ++; } while (istep < d_steps); float2 out_coord = make_float2(lastpos.x/d_width,lastpos.y/d_height); float2 tempvel = tex2D(tex_inputVelocity_regular,out_coord.x,out_coord.y); d_mac_u[y*(d_width+1)+x] = tempvel.x; } __global__ void ZQ_Cuda_Velocity_Advection_inRegular_outMAC_v_Kernel(float * d_mac_v) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if(x >= d_width || y > d_height) return; float2 pos = make_float2(x+0.5f,y); float2 lastpos = pos; float2 velCoord = make_float2(pos.x/d_width,pos.y/d_height); float2 lastvel = tex2D(tex_velocity_regular,velCoord.x,velCoord.y); unsigned int istep = 0; do { float2 occupyCoord = velCoord; if(!(pos.x >= 0 && pos.x <= d_width && pos.y >= 0 && pos.y <= d_height)) break; if(tex2D(tex_occupy,occupyCoord.x,occupyCoord.y) != 0) break; lastpos = pos; pos -= lastvel * d_deltatt / d_voxelSize; velCoord = make_float2(pos.x/d_width,pos.y/d_height); lastvel = tex2D(tex_velocity_regular,velCoord.x,velCoord.y); istep ++; } while (istep < d_steps); float2 out_coord = make_float2(lastpos.x/d_width,lastpos.y/d_height); float2 tempvel = tex2D(tex_inputVelocity_regular,out_coord.x,out_coord.y); d_mac_v[y*d_width+x] = tempvel.y; } __global__ void ZQ_Cuda_Velocity_Advection_inMAC_outMAC_u_Kernel(float * d_mac_u) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if(x > d_width || y >= d_height) return; float2 pos = make_float2(x,y+0.5f); float2 lastpos = pos; float2 velCoord_u = make_float2((pos.x+0.5f)/(d_width+1),pos.y/d_height); float2 velCoord_v = make_float2(pos.x/d_width,(pos.y+0.5f)/(d_height+1)); float2 lastvel = make_float2( tex2D(tex_velocity_MAC_u,velCoord_u.x,velCoord_u.y), tex2D(tex_velocity_MAC_v,velCoord_v.x,velCoord_v.y)); unsigned int istep = 0; do { float2 occupyCoord = make_float2(pos.x/d_width,pos.y/d_height); if(!(pos.x >= 0 && pos.x <= d_width && pos.y >= 0 && pos.y <= d_height)) break; if(tex2D(tex_occupy,occupyCoord.x,occupyCoord.y) != 0) break; lastpos = pos; pos -= lastvel * d_deltatt / d_voxelSize; velCoord_u = make_float2((pos.x+0.5f)/(d_width+1),pos.y/d_height); velCoord_v = make_float2(pos.x/d_width,(pos.y+0.5f)/(d_height+1)); lastvel = make_float2( tex2D(tex_velocity_MAC_u,velCoord_u.x,velCoord_u.y), tex2D(tex_velocity_MAC_v,velCoord_v.x,velCoord_v.y)); istep ++; } while (istep < d_steps); float2 out_coord_u = make_float2((lastpos.x+0.5f)/(d_width+1),lastpos.y/d_height); float tempvel = tex2D(tex_inputVelocity_mac_u,out_coord_u.x,out_coord_u.y); d_mac_u[y*(d_width+1)+x] = tempvel; } __global__ void ZQ_Cuda_Velocity_Advection_inMAC_outMAC_v_Kernel(float * d_mac_v) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if(x >= d_width || y > d_height) return; float2 pos = make_float2(x+0.5f,y); float2 lastpos = pos; float2 velCoord_u = make_float2((pos.x+0.5f)/(d_width+1),pos.y/d_height); float2 velCoord_v = make_float2(pos.x/d_width,(pos.y+0.5f)/(d_height+1)); float2 lastvel = make_float2( tex2D(tex_velocity_MAC_u,velCoord_u.x,velCoord_u.y), tex2D(tex_velocity_MAC_v,velCoord_v.x,velCoord_v.y)); unsigned int istep = 0; do { float2 occupyCoord = make_float2(pos.x/d_width,pos.y/d_height); if(!(pos.x >= 0 && pos.x <= d_width && pos.y >= 0 && pos.y <= d_height)) break; if(tex2D(tex_occupy,occupyCoord.x,occupyCoord.y) != 0) break; lastpos = pos; pos -= lastvel * d_deltatt / d_voxelSize; velCoord_u = make_float2((pos.x+0.5f)/(d_width+1),pos.y/d_height); velCoord_v = make_float2(pos.x/d_width,(pos.y+0.5f)/(d_height+1)); lastvel = make_float2( tex2D(tex_velocity_MAC_u,velCoord_u.x,velCoord_u.y), tex2D(tex_velocity_MAC_v,velCoord_v.x,velCoord_v.y)); istep ++; } while (istep < d_steps); float2 out_coord_v = make_float2(lastpos.x/d_width,(lastpos.y+0.5f)/(d_height+1)); float tempvel = tex2D(tex_inputVelocity_mac_v,out_coord_v.x,out_coord_v.y); d_mac_v[y*d_width+x] = tempvel; } __global__ void ZQ_Cuda_Scalar_Advection_Regular_Velocity_Kernel(float2* d_output) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if(x >= d_width || y >= d_height) return; float2 pos = make_float2(x+0.5f,y+0.5f); float2 lastpos = pos; float2 velCoord = make_float2(pos.x/d_width,pos.y/d_height); float2 lastvel = tex2D(tex_velocity_regular,velCoord.x,velCoord.y); unsigned int istep = 0; do { float2 occupyCoord = velCoord; if(!(pos.x >= 0 && pos.x <= d_width && pos.y >= 0 && pos.y <= d_height)) break; if(tex2D(tex_occupy,occupyCoord.x,occupyCoord.y) != 0) break; lastpos = pos; pos -= lastvel * d_deltatt / d_voxelSize; velCoord = make_float2(pos.x/d_width,pos.y/d_height); lastvel = tex2D(tex_velocity_regular,velCoord.x,velCoord.y); istep ++; } while (istep < d_steps); float2 out_coord = make_float2(lastpos.x/d_width,lastpos.y/d_height); float2 tempscalar = tex2D(tex_scalar,out_coord.x,out_coord.y); d_output[y*d_width+x] = tempscalar; } __global__ void ZQ_Cuda_Scalar_Advection_MAC_Velocity_Kernel(float2* d_output) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if(x >= d_width || y >= d_height) return; float2 pos = make_float2(x+0.5f,y+0.5f); float2 lastpos = pos; float2 velCoord_u = make_float2((pos.x+0.5f)/(d_width+1),pos.y/d_height); float2 velCoord_v = make_float2(pos.x/d_width,(pos.y+0.5f)/(d_height+1)); float2 lastvel = make_float2( tex2D(tex_velocity_MAC_u,velCoord_u.x,velCoord_u.y), tex2D(tex_velocity_MAC_v,velCoord_v.x,velCoord_v.y)); unsigned int istep = 0; do { float2 occupyCoord = make_float2(pos.x/d_width,pos.y/d_height); if(!(pos.x >= 0 && pos.x <= d_width && pos.y >= 0 && pos.y <= d_height)) break; if(tex2D(tex_occupy,occupyCoord.x,occupyCoord.y) != 0) break; lastpos = pos; pos -= lastvel * d_deltatt / d_voxelSize; velCoord_u = make_float2((pos.x+0.5f)/(d_width+1),pos.y/d_height); velCoord_v = make_float2(pos.x/d_width,(pos.y+0.5f)/(d_height+1)); lastvel = make_float2( tex2D(tex_velocity_MAC_u,velCoord_u.x,velCoord_u.y), tex2D(tex_velocity_MAC_v,velCoord_v.x,velCoord_v.y)); istep ++; } while (istep < d_steps); float2 out_coord = make_float2(lastpos.x/d_width,lastpos.y/d_height); float2 tempscalar = tex2D(tex_scalar,out_coord.x,out_coord.y); d_output[y*d_width+x] = tempscalar; } extern "C" void ZQ_Cuda_Prepare_Advection2D(const unsigned int width, const unsigned int height, const float voxelSize, const unsigned int steps, const float deltatt) { h_width = width; h_height = height; h_steps = steps; h_voxelSize = voxelSize; h_deltatt = deltatt; checkCudaErrors( hipMemcpyToSymbol(ZQ_CUDA_Advection2D::d_width,&width,sizeof(unsigned int))); checkCudaErrors( hipMemcpyToSymbol(ZQ_CUDA_Advection2D::d_height,&height,sizeof(unsigned int))); checkCudaErrors( hipMemcpyToSymbol(ZQ_CUDA_Advection2D::d_steps,&steps,sizeof(unsigned int))); checkCudaErrors( hipMemcpyToSymbol(ZQ_CUDA_Advection2D::d_voxelSize,&voxelSize,sizeof(float))); checkCudaErrors( hipMemcpyToSymbol(ZQ_CUDA_Advection2D::d_deltatt,&deltatt,sizeof(float))); //int tmp = 0; //scanf("%d",&tmp); //checkCudaErrors( hipMemcpyFromSymbol(&h_width,d_width,sizeof(unsigned int))); //checkCudaErrors( hipMemcpyFromSymbol(&h_height,d_height,sizeof(unsigned int))); //checkCudaErrors( hipMemcpyFromSymbol(&h_steps,d_steps,sizeof(unsigned int))); //checkCudaErrors( hipMemcpyFromSymbol(&h_voxelSize,d_voxelSize,sizeof(float))); //checkCudaErrors( hipMemcpyFromSymbol(&h_deltatt,d_deltatt,sizeof(float))); //printf("width = %d\n",h_width); //printf("height = %d\n",h_height); //printf("steps = %d\n",h_steps); //printf("voxelSize = %f\n",h_voxelSize); //printf("deltatt = %f\n",h_deltatt); //scanf("%d",&tmp); } extern "C" void ZQ_Cuda_Velocity_Advection2D_inRegular_outRegular(const float* velocity, const bool* occupy, const float* inVelocity, float* outVelocity) { tex_velocity_regular.normalized = true; tex_velocity_regular.filterMode = hipFilterModeLinear; tex_velocity_regular.addressMode[0] = hipAddressModeClamp; tex_velocity_regular.addressMode[1] = hipAddressModeClamp; tex_occupy.normalized = true; tex_occupy.filterMode = hipFilterModePoint; tex_occupy.addressMode[0] = hipAddressModeClamp; tex_occupy.addressMode[1] = hipAddressModeClamp; tex_inputVelocity_regular.normalized = true; tex_inputVelocity_regular.filterMode = hipFilterModeLinear; tex_inputVelocity_regular.addressMode[0] = hipAddressModeClamp; tex_inputVelocity_regular.addressMode[1] = hipAddressModeClamp; hipChannelFormatDesc channelDescf2 = hipCreateChannelDesc<float2>(); hipArray* velocity_array = 0; checkCudaErrors( hipMallocArray(&velocity_array,&channelDescf2,h_width,h_height) ); checkCudaErrors( hipMemcpyToArray(velocity_array,0,0,velocity,sizeof(float)*h_width*h_height*2,hipMemcpyHostToDevice) ); hipChannelFormatDesc channelDescb = hipCreateChannelDesc<uchar1>(); hipArray* occupy_array = 0; checkCudaErrors( hipMallocArray(&occupy_array,&channelDescb,h_width,h_height) ); checkCudaErrors( hipMemcpyToArray(occupy_array,0,0,occupy,sizeof(bool)*h_width*h_height,hipMemcpyHostToDevice) ); hipArray* input_velocity_array = 0; checkCudaErrors( hipMallocArray(&input_velocity_array,&channelDescf2,h_width,h_height) ); checkCudaErrors( hipMemcpyToArray(input_velocity_array,0,0,inVelocity,sizeof(float)*h_width*h_height*2,hipMemcpyHostToDevice) ); checkCudaErrors( hipBindTextureToArray(tex_velocity_regular,velocity_array,channelDescf2) ); checkCudaErrors( hipBindTextureToArray(tex_occupy,occupy_array,channelDescb) ); checkCudaErrors( hipBindTextureToArray(tex_inputVelocity_regular,input_velocity_array,channelDescf2) ); float2* d_output = 0; checkCudaErrors( hipMalloc((void**)&d_output,sizeof(float)*h_width*h_height*2) ); checkCudaErrors( hipMemset(d_output,0,sizeof(float)*h_width*h_height*2) ); dim3 blockSize(BLOCK_SIZE,BLOCK_SIZE); dim3 gridSize((h_width+blockSize.x-1)/blockSize.x,(h_height+blockSize.y-1)/blockSize.y); hipLaunchKernelGGL(( ZQ_Cuda_Velocity_Advection_inRegular_outRegular_Kernel), dim3(gridSize),dim3(blockSize), 0, 0, d_output); checkCudaErrors( hipMemcpy(outVelocity,d_output,sizeof(float)*h_width*h_height*2,hipMemcpyDeviceToHost) ); checkCudaErrors( hipFree(d_output) ); d_output = 0; checkCudaErrors( hipUnbindTexture(tex_velocity_regular) ); checkCudaErrors( hipUnbindTexture(tex_occupy) ); checkCudaErrors( hipUnbindTexture(tex_inputVelocity_regular) ); checkCudaErrors( hipFreeArray(velocity_array) ); checkCudaErrors( hipFreeArray(occupy_array) ); checkCudaErrors( hipFreeArray(input_velocity_array) ); velocity_array = 0; occupy_array = 0; input_velocity_array = 0; } extern "C" void ZQ_Cuda_Velocity_Advection2D_inRegular_outMAC(const float* velocity, const bool* occupy, const float* inVelocity, float* out_mac_u, float* out_mac_v) { tex_velocity_regular.normalized = true; tex_velocity_regular.filterMode = hipFilterModeLinear; tex_velocity_regular.addressMode[0] = hipAddressModeClamp; tex_velocity_regular.addressMode[1] = hipAddressModeClamp; tex_occupy.normalized = true; tex_occupy.filterMode = hipFilterModePoint; tex_occupy.addressMode[0] = hipAddressModeClamp; tex_occupy.addressMode[1] = hipAddressModeClamp; tex_inputVelocity_regular.normalized = true; tex_inputVelocity_regular.filterMode = hipFilterModeLinear; tex_inputVelocity_regular.addressMode[0] = hipAddressModeClamp; tex_inputVelocity_regular.addressMode[1] = hipAddressModeClamp; hipChannelFormatDesc channelDescf2 = hipCreateChannelDesc<float2>(); hipArray* velocity_array = 0; checkCudaErrors( hipMallocArray(&velocity_array,&channelDescf2,h_width,h_height) ); checkCudaErrors( hipMemcpyToArray(velocity_array,0,0,velocity,sizeof(float)*h_width*h_height*2,hipMemcpyHostToDevice) ); hipChannelFormatDesc channelDescb = hipCreateChannelDesc<uchar1>(); hipArray* occupy_array = 0; checkCudaErrors( hipMallocArray(&occupy_array,&channelDescb,h_width,h_height) ); checkCudaErrors( hipMemcpyToArray(occupy_array,0,0,occupy,sizeof(bool)*h_width*h_height,hipMemcpyHostToDevice) ); hipArray* input_velocity_array = 0; checkCudaErrors( hipMallocArray(&input_velocity_array,&channelDescf2,h_width,h_height) ); checkCudaErrors( hipMemcpyToArray(input_velocity_array,0,0,inVelocity,sizeof(float)*h_width*h_height*2,hipMemcpyHostToDevice) ); checkCudaErrors( hipBindTextureToArray(tex_velocity_regular,velocity_array,channelDescf2) ); checkCudaErrors( hipBindTextureToArray(tex_occupy,occupy_array,channelDescb) ); checkCudaErrors( hipBindTextureToArray(tex_inputVelocity_regular,input_velocity_array,channelDescf2) ); float* d_out_mac_u = 0; float* d_out_mac_v = 0; checkCudaErrors( hipMalloc((void**)&d_out_mac_u,sizeof(float)*(h_width+1)*h_height) ); checkCudaErrors( hipMemset(d_out_mac_u,0,sizeof(float)*(h_width+1)*h_height) ); checkCudaErrors( hipMalloc((void**)&d_out_mac_v,sizeof(float)*h_width*(h_height+1)) ); checkCudaErrors( hipMemset(d_out_mac_v,0,sizeof(float)*h_width*(h_height+1)) ); dim3 blockSize(BLOCK_SIZE,BLOCK_SIZE); dim3 gridSize_u(((h_width+1)+blockSize.x-1)/blockSize.x,(h_height+blockSize.y-1)/blockSize.y); dim3 gridSize_v((h_width+blockSize.x-1)/blockSize.x,((h_height+1)+blockSize.y-1)/blockSize.y); hipLaunchKernelGGL(( ZQ_Cuda_Velocity_Advection_inRegular_outMAC_u_Kernel), dim3(gridSize_u),dim3(blockSize), 0, 0, d_out_mac_u); hipLaunchKernelGGL(( ZQ_Cuda_Velocity_Advection_inRegular_outMAC_v_Kernel), dim3(gridSize_v),dim3(blockSize), 0, 0, d_out_mac_v); checkCudaErrors( hipMemcpy(out_mac_u,d_out_mac_u,sizeof(float)*(h_width+1)*h_height,hipMemcpyDeviceToHost) ); checkCudaErrors( hipMemcpy(out_mac_v,d_out_mac_v,sizeof(float)*h_width*(h_height+1),hipMemcpyDeviceToHost) ); checkCudaErrors( hipFree(d_out_mac_u) ); checkCudaErrors( hipFree(d_out_mac_v) ); d_out_mac_u = 0; d_out_mac_v = 0; checkCudaErrors( hipUnbindTexture(tex_velocity_regular) ); checkCudaErrors( hipUnbindTexture(tex_occupy) ); checkCudaErrors( hipUnbindTexture(tex_inputVelocity_regular) ); checkCudaErrors( hipFreeArray(velocity_array) ); checkCudaErrors( hipFreeArray(occupy_array) ); checkCudaErrors( hipFreeArray(input_velocity_array) ); velocity_array = 0; occupy_array = 0; input_velocity_array = 0; } extern "C" void ZQ_Cuda_Velocity_Advection2D_inMAC_outMAC(const float* vel_mac_u, const float* vel_mac_v, const bool* occupy, const float* in_mac_u, const float* in_mac_v, float* out_mac_u, float* out_mac_v) { tex_velocity_MAC_u.normalized = true; tex_velocity_MAC_u.filterMode = hipFilterModeLinear; tex_velocity_MAC_u.addressMode[0] = hipAddressModeClamp; tex_velocity_MAC_u.addressMode[1] = hipAddressModeClamp; tex_velocity_MAC_v.normalized = true; tex_velocity_MAC_v.filterMode = hipFilterModeLinear; tex_velocity_MAC_v.addressMode[0] = hipAddressModeClamp; tex_velocity_MAC_v.addressMode[1] = hipAddressModeClamp; tex_occupy.normalized = true; tex_occupy.filterMode = hipFilterModePoint; tex_occupy.addressMode[0] = hipAddressModeClamp; tex_occupy.addressMode[1] = hipAddressModeClamp; tex_inputVelocity_mac_u.normalized = true; tex_inputVelocity_mac_u.filterMode = hipFilterModeLinear; tex_inputVelocity_mac_u.addressMode[0] = hipAddressModeClamp; tex_inputVelocity_mac_u.addressMode[1] = hipAddressModeClamp; tex_inputVelocity_mac_v.normalized = true; tex_inputVelocity_mac_v.filterMode = hipFilterModeLinear; tex_inputVelocity_mac_v.addressMode[0] = hipAddressModeClamp; tex_inputVelocity_mac_v.addressMode[1] = hipAddressModeClamp; hipChannelFormatDesc channelDescf = hipCreateChannelDesc<float>(); hipArray* velocity_u_array = 0; checkCudaErrors( hipMallocArray(&velocity_u_array,&channelDescf,h_width+1,h_height) ); checkCudaErrors( hipMemcpyToArray(velocity_u_array,0,0,vel_mac_u,sizeof(float)*(h_width+1)*h_height,hipMemcpyHostToDevice) ); hipArray* velocity_v_array = 0; checkCudaErrors( hipMallocArray(&velocity_v_array,&channelDescf,h_width,h_height+1) ); checkCudaErrors( hipMemcpyToArray(velocity_v_array,0,0,vel_mac_v,sizeof(float)*h_width*(h_height+1),hipMemcpyHostToDevice) ); hipChannelFormatDesc channelDescb = hipCreateChannelDesc<uchar1>(); hipArray* occupy_array = 0; checkCudaErrors( hipMallocArray(&occupy_array,&channelDescb,h_width,h_height) ); checkCudaErrors( hipMemcpyToArray(occupy_array,0,0,occupy,sizeof(bool)*h_width*h_height,hipMemcpyHostToDevice) ); hipArray* input_velocity_u_array = 0; checkCudaErrors( hipMallocArray(&input_velocity_u_array,&channelDescf,h_width+1,h_height) ); checkCudaErrors( hipMemcpyToArray(input_velocity_u_array,0,0,in_mac_u,sizeof(float)*(h_width+1)*h_height,hipMemcpyHostToDevice) ); hipArray* input_velocity_v_array = 0; checkCudaErrors( hipMallocArray(&input_velocity_v_array,&channelDescf,h_width,h_height+1) ); checkCudaErrors( hipMemcpyToArray(input_velocity_v_array,0,0,in_mac_v,sizeof(float)*h_width*(h_height+1),hipMemcpyHostToDevice) ); checkCudaErrors( hipBindTextureToArray(tex_velocity_MAC_u,velocity_u_array,channelDescf) ); checkCudaErrors( hipBindTextureToArray(tex_velocity_MAC_v,velocity_v_array,channelDescf) ); checkCudaErrors( hipBindTextureToArray(tex_occupy,occupy_array,channelDescb) ); checkCudaErrors( hipBindTextureToArray(tex_inputVelocity_mac_u,input_velocity_u_array,channelDescf) ); checkCudaErrors( hipBindTextureToArray(tex_inputVelocity_mac_v,input_velocity_v_array,channelDescf) ); float* d_out_mac_u = 0; float* d_out_mac_v = 0; checkCudaErrors( hipMalloc((void**)&d_out_mac_u,sizeof(float)*(h_width+1)*h_height) ); checkCudaErrors( hipMemset(d_out_mac_u,0,sizeof(float)*(h_width+1)*h_height) ); checkCudaErrors( hipMalloc((void**)&d_out_mac_v,sizeof(float)*h_width*(h_height+1)) ); checkCudaErrors( hipMemset(d_out_mac_v,0,sizeof(float)*h_width*(h_height+1)) ); dim3 blockSize(BLOCK_SIZE,BLOCK_SIZE); dim3 gridSize_u(((h_width+1)+blockSize.x-1)/blockSize.x,(h_height+blockSize.y-1)/blockSize.y); dim3 gridSize_v((h_width+blockSize.x-1)/blockSize.x,((h_height+1)+blockSize.y-1)/blockSize.y); hipLaunchKernelGGL(( ZQ_Cuda_Velocity_Advection_inMAC_outMAC_u_Kernel), dim3(gridSize_u),dim3(blockSize), 0, 0, d_out_mac_u); hipLaunchKernelGGL(( ZQ_Cuda_Velocity_Advection_inMAC_outMAC_v_Kernel), dim3(gridSize_v),dim3(blockSize), 0, 0, d_out_mac_v); checkCudaErrors( hipMemcpy(out_mac_u,d_out_mac_u,sizeof(float)*(h_width+1)*h_height,hipMemcpyDeviceToHost) ); checkCudaErrors( hipMemcpy(out_mac_v,d_out_mac_v,sizeof(float)*h_width*(h_height+1),hipMemcpyDeviceToHost) ); checkCudaErrors( hipFree(d_out_mac_u) ); checkCudaErrors( hipFree(d_out_mac_v) ); d_out_mac_u = 0; d_out_mac_v = 0; checkCudaErrors( hipUnbindTexture(tex_velocity_MAC_u) ); checkCudaErrors( hipUnbindTexture(tex_velocity_MAC_v) ); checkCudaErrors( hipUnbindTexture(tex_occupy) ); checkCudaErrors( hipUnbindTexture(tex_inputVelocity_mac_u) ); checkCudaErrors( hipUnbindTexture(tex_inputVelocity_mac_v) ); checkCudaErrors( hipFreeArray(velocity_u_array) ); checkCudaErrors( hipFreeArray(velocity_v_array) ); checkCudaErrors( hipFreeArray(occupy_array) ); checkCudaErrors( hipFreeArray(input_velocity_u_array) ); checkCudaErrors( hipFreeArray(input_velocity_v_array) ); velocity_u_array = 0; velocity_v_array = 0; occupy_array = 0; input_velocity_u_array = 0; input_velocity_v_array = 0; } extern "C" void ZQ_Cuda_Scalar_Advection2D_Regular_Velocity(const float* velocity, const bool* occupy, const float* input, float* output) { tex_velocity_regular.normalized = true; tex_velocity_regular.filterMode = hipFilterModeLinear; tex_velocity_regular.addressMode[0] = hipAddressModeClamp; tex_velocity_regular.addressMode[1] = hipAddressModeClamp; tex_occupy.normalized = true; tex_occupy.filterMode = hipFilterModePoint; tex_occupy.addressMode[0] = hipAddressModeClamp; tex_occupy.addressMode[1] = hipAddressModeClamp; tex_scalar.normalized = true; tex_scalar.filterMode = hipFilterModeLinear; tex_scalar.addressMode[0] = hipAddressModeClamp; tex_scalar.addressMode[1] = hipAddressModeClamp; hipChannelFormatDesc channelDescf2 = hipCreateChannelDesc<float2>(); hipArray* velocity_array = 0; checkCudaErrors( hipMallocArray(&velocity_array,&channelDescf2,h_width,h_height) ); checkCudaErrors( hipMemcpyToArray(velocity_array,0,0,velocity,sizeof(float)*h_width*h_height*2,hipMemcpyHostToDevice) ); hipChannelFormatDesc channelDescb = hipCreateChannelDesc<uchar1>(); hipArray* occupy_array = 0; checkCudaErrors( hipMallocArray(&occupy_array,&channelDescb,h_width,h_height) ); checkCudaErrors( hipMemcpyToArray(occupy_array,0,0,occupy,sizeof(bool)*h_width*h_height,hipMemcpyHostToDevice) ); hipArray* scalar_array = 0; checkCudaErrors( hipMallocArray(&scalar_array,&channelDescf2,h_width,h_height) ); checkCudaErrors( hipMemcpyToArray(scalar_array,0,0,input,sizeof(float)*h_width*h_height*2,hipMemcpyHostToDevice) ); checkCudaErrors( hipBindTextureToArray(tex_velocity_regular,velocity_array,channelDescf2) ); checkCudaErrors( hipBindTextureToArray(tex_occupy,occupy_array,channelDescb) ); checkCudaErrors( hipBindTextureToArray(tex_scalar,scalar_array,channelDescf2) ); float2* d_output = 0; checkCudaErrors( hipMalloc((void**)&d_output,sizeof(float)*h_width*h_height*2) ); checkCudaErrors( hipMemset(d_output,0,sizeof(float)*h_width*h_height*2) ); dim3 blockSize(BLOCK_SIZE,BLOCK_SIZE); dim3 gridSize((h_width+blockSize.x-1)/blockSize.x,(h_height+blockSize.y-1)/blockSize.y); hipLaunchKernelGGL(( ZQ_Cuda_Scalar_Advection_Regular_Velocity_Kernel), dim3(gridSize),dim3(blockSize), 0, 0, d_output); checkCudaErrors( hipMemcpy(output,d_output,sizeof(float)*h_width*h_height*2,hipMemcpyDeviceToHost) ); checkCudaErrors( hipFree(d_output) ); d_output = 0; checkCudaErrors( hipUnbindTexture(tex_velocity_regular) ); checkCudaErrors( hipUnbindTexture(tex_occupy) ); checkCudaErrors( hipUnbindTexture(tex_scalar) ); checkCudaErrors( hipFreeArray(velocity_array) ); checkCudaErrors( hipFreeArray(occupy_array) ); checkCudaErrors( hipFreeArray(scalar_array) ); velocity_array = 0; occupy_array = 0; scalar_array = 0; } extern "C" void ZQ_Cuda_Scalar_Advection2D_MAC_Velocity(const float* vel_mac_u, const float* vel_mac_v, const bool* occupy, const float* input, float* output) { tex_velocity_MAC_u.normalized = true; tex_velocity_MAC_u.filterMode = hipFilterModeLinear; tex_velocity_MAC_u.addressMode[0] = hipAddressModeClamp; tex_velocity_MAC_u.addressMode[1] = hipAddressModeClamp; tex_velocity_MAC_v.normalized = true; tex_velocity_MAC_v.filterMode = hipFilterModeLinear; tex_velocity_MAC_v.addressMode[0] = hipAddressModeClamp; tex_velocity_MAC_v.addressMode[1] = hipAddressModeClamp; tex_occupy.normalized = true; tex_occupy.filterMode = hipFilterModePoint; tex_occupy.addressMode[0] = hipAddressModeClamp; tex_occupy.addressMode[1] = hipAddressModeClamp; tex_scalar.normalized = true; tex_scalar.filterMode = hipFilterModeLinear; tex_scalar.addressMode[0] = hipAddressModeClamp; tex_scalar.addressMode[1] = hipAddressModeClamp; hipChannelFormatDesc channelDescf = hipCreateChannelDesc<float>(); hipArray* velocity_u_array = 0; checkCudaErrors( hipMallocArray(&velocity_u_array,&channelDescf,h_width+1,h_height) ); checkCudaErrors( hipMemcpyToArray(velocity_u_array,0,0,vel_mac_u,sizeof(float)*(h_width+1)*h_height,hipMemcpyHostToDevice) ); hipArray* velocity_v_array = 0; checkCudaErrors( hipMallocArray(&velocity_v_array,&channelDescf,h_width,h_height+1) ); checkCudaErrors( hipMemcpyToArray(velocity_v_array,0,0,vel_mac_v,sizeof(float)*h_width*(h_height+1),hipMemcpyHostToDevice) ); hipChannelFormatDesc channelDescb = hipCreateChannelDesc<uchar1>(); hipArray* occupy_array = 0; checkCudaErrors( hipMallocArray(&occupy_array,&channelDescb,h_width,h_height) ); checkCudaErrors( hipMemcpyToArray(occupy_array,0,0,occupy,sizeof(bool)*h_width*h_height,hipMemcpyHostToDevice) ); hipChannelFormatDesc channelDescf2 = hipCreateChannelDesc<float2>(); hipArray* scalar_array = 0; checkCudaErrors( hipMallocArray(&scalar_array,&channelDescf2,h_width,h_height) ); checkCudaErrors( hipMemcpyToArray(scalar_array,0,0,input,sizeof(float)*h_width*h_height*2,hipMemcpyHostToDevice) ); checkCudaErrors( hipBindTextureToArray(tex_velocity_MAC_u,velocity_u_array,channelDescf) ); checkCudaErrors( hipBindTextureToArray(tex_velocity_MAC_v,velocity_v_array,channelDescf) ); checkCudaErrors( hipBindTextureToArray(tex_occupy,occupy_array,channelDescb) ); checkCudaErrors( hipBindTextureToArray(tex_scalar,scalar_array,channelDescf2) ); float2* d_output = 0; checkCudaErrors( hipMalloc((void**)&d_output,sizeof(float)*h_width*h_height*2) ); checkCudaErrors( hipMemset(d_output,0,sizeof(float)*h_width*h_height*2) ); dim3 blockSize(BLOCK_SIZE,BLOCK_SIZE); dim3 gridSize((h_width+blockSize.x-1)/blockSize.x,(h_height+blockSize.y-1)/blockSize.y); hipLaunchKernelGGL(( ZQ_Cuda_Scalar_Advection_MAC_Velocity_Kernel), dim3(gridSize),dim3(blockSize), 0, 0, d_output); checkCudaErrors( hipMemcpy(output,d_output,sizeof(float)*h_width*h_height*2,hipMemcpyDeviceToHost) ); checkCudaErrors( hipFree(d_output) ); d_output = 0; checkCudaErrors( hipUnbindTexture(tex_velocity_MAC_u) ); checkCudaErrors( hipUnbindTexture(tex_velocity_MAC_v) ); checkCudaErrors( hipUnbindTexture(tex_occupy) ); checkCudaErrors( hipUnbindTexture(tex_scalar) ); checkCudaErrors( hipFreeArray(velocity_u_array) ); checkCudaErrors( hipFreeArray(velocity_v_array) ); checkCudaErrors( hipFreeArray(occupy_array) ); checkCudaErrors( hipFreeArray(scalar_array) ); velocity_u_array = 0; velocity_v_array = 0; occupy_array = 0; scalar_array = 0; } } #endif
a2217ebe14bc458fb43e5ed5dc30ceaa113bd6c6.cu
#ifndef _ZQ_CUDA_ADVECTION_2D_CU_ #define _ZQ_CUDA_ADVECTION_2D_CU_ #include "ZQ_CUDA_Advection2D.cuh" namespace ZQ_CUDA_Advection2D { texture<float2,2,cudaReadModeElementType> tex_velocity_regular; texture<float,2,cudaReadModeElementType> tex_velocity_MAC_u; texture<float,2,cudaReadModeElementType> tex_velocity_MAC_v; texture<unsigned char,2,cudaReadModeElementType> tex_occupy; texture<float,2,cudaReadModeElementType> tex_inputVelocity_mac_u; texture<float,2,cudaReadModeElementType> tex_inputVelocity_mac_v; texture<float2,2,cudaReadModeElementType> tex_inputVelocity_regular; texture<float2,2,cudaReadModeElementType> tex_scalar; //temperature and density unsigned h_width; unsigned h_height; unsigned int h_steps; float h_voxelSize; float h_deltatt; __constant__ unsigned int d_width; __constant__ unsigned int d_height; __constant__ unsigned int d_steps; __constant__ float d_voxelSize; __constant__ float d_deltatt; /****************************************************************************************/ __global__ void ZQ_Cuda_Velocity_Advection_inRegular_outRegular_Kernel(float2 * d_output) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if(x >= d_width || y >= d_height) return; float2 pos = make_float2(x+0.5f,y+0.5f); float2 lastpos = pos; float2 velCoord = make_float2(pos.x/d_width,pos.y/d_height); float2 lastvel = tex2D(tex_velocity_regular,velCoord.x,velCoord.y); unsigned int istep = 0; do { float2 occupyCoord = velCoord; if(!(pos.x >= 0 && pos.x <= d_width && pos.y >= 0 && pos.y <= d_height)) break; if(tex2D(tex_occupy,occupyCoord.x,occupyCoord.y) != 0) break; lastpos = pos; pos -= lastvel * d_deltatt / d_voxelSize; velCoord = make_float2(pos.x/d_width,pos.y/d_height); lastvel = tex2D(tex_velocity_regular,velCoord.x,velCoord.y); istep ++; } while (istep < d_steps); float2 out_coord = make_float2(lastpos.x/d_width,lastpos.y/d_height); float2 tempvel = tex2D(tex_inputVelocity_regular,out_coord.x,out_coord.y); d_output[y*d_width+x] = tempvel; } __global__ void ZQ_Cuda_Velocity_Advection_inRegular_outMAC_u_Kernel(float * d_mac_u) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if(x > d_width || y >= d_height) return; float2 pos = make_float2(x,y+0.5f); float2 lastpos = pos; float2 velCoord = make_float2(pos.x/d_width,pos.y/d_height); float2 lastvel = tex2D(tex_velocity_regular,velCoord.x,velCoord.y); unsigned int istep = 0; do { float2 occupyCoord = velCoord; if(!(pos.x >= 0 && pos.x <= d_width && pos.y >= 0 && pos.y <= d_height)) break; if(tex2D(tex_occupy,occupyCoord.x,occupyCoord.y) != 0) break; lastpos = pos; pos -= lastvel * d_deltatt / d_voxelSize; velCoord = make_float2(pos.x/d_width,pos.y/d_height); lastvel = tex2D(tex_velocity_regular,velCoord.x,velCoord.y); istep ++; } while (istep < d_steps); float2 out_coord = make_float2(lastpos.x/d_width,lastpos.y/d_height); float2 tempvel = tex2D(tex_inputVelocity_regular,out_coord.x,out_coord.y); d_mac_u[y*(d_width+1)+x] = tempvel.x; } __global__ void ZQ_Cuda_Velocity_Advection_inRegular_outMAC_v_Kernel(float * d_mac_v) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if(x >= d_width || y > d_height) return; float2 pos = make_float2(x+0.5f,y); float2 lastpos = pos; float2 velCoord = make_float2(pos.x/d_width,pos.y/d_height); float2 lastvel = tex2D(tex_velocity_regular,velCoord.x,velCoord.y); unsigned int istep = 0; do { float2 occupyCoord = velCoord; if(!(pos.x >= 0 && pos.x <= d_width && pos.y >= 0 && pos.y <= d_height)) break; if(tex2D(tex_occupy,occupyCoord.x,occupyCoord.y) != 0) break; lastpos = pos; pos -= lastvel * d_deltatt / d_voxelSize; velCoord = make_float2(pos.x/d_width,pos.y/d_height); lastvel = tex2D(tex_velocity_regular,velCoord.x,velCoord.y); istep ++; } while (istep < d_steps); float2 out_coord = make_float2(lastpos.x/d_width,lastpos.y/d_height); float2 tempvel = tex2D(tex_inputVelocity_regular,out_coord.x,out_coord.y); d_mac_v[y*d_width+x] = tempvel.y; } __global__ void ZQ_Cuda_Velocity_Advection_inMAC_outMAC_u_Kernel(float * d_mac_u) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if(x > d_width || y >= d_height) return; float2 pos = make_float2(x,y+0.5f); float2 lastpos = pos; float2 velCoord_u = make_float2((pos.x+0.5f)/(d_width+1),pos.y/d_height); float2 velCoord_v = make_float2(pos.x/d_width,(pos.y+0.5f)/(d_height+1)); float2 lastvel = make_float2( tex2D(tex_velocity_MAC_u,velCoord_u.x,velCoord_u.y), tex2D(tex_velocity_MAC_v,velCoord_v.x,velCoord_v.y)); unsigned int istep = 0; do { float2 occupyCoord = make_float2(pos.x/d_width,pos.y/d_height); if(!(pos.x >= 0 && pos.x <= d_width && pos.y >= 0 && pos.y <= d_height)) break; if(tex2D(tex_occupy,occupyCoord.x,occupyCoord.y) != 0) break; lastpos = pos; pos -= lastvel * d_deltatt / d_voxelSize; velCoord_u = make_float2((pos.x+0.5f)/(d_width+1),pos.y/d_height); velCoord_v = make_float2(pos.x/d_width,(pos.y+0.5f)/(d_height+1)); lastvel = make_float2( tex2D(tex_velocity_MAC_u,velCoord_u.x,velCoord_u.y), tex2D(tex_velocity_MAC_v,velCoord_v.x,velCoord_v.y)); istep ++; } while (istep < d_steps); float2 out_coord_u = make_float2((lastpos.x+0.5f)/(d_width+1),lastpos.y/d_height); float tempvel = tex2D(tex_inputVelocity_mac_u,out_coord_u.x,out_coord_u.y); d_mac_u[y*(d_width+1)+x] = tempvel; } __global__ void ZQ_Cuda_Velocity_Advection_inMAC_outMAC_v_Kernel(float * d_mac_v) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if(x >= d_width || y > d_height) return; float2 pos = make_float2(x+0.5f,y); float2 lastpos = pos; float2 velCoord_u = make_float2((pos.x+0.5f)/(d_width+1),pos.y/d_height); float2 velCoord_v = make_float2(pos.x/d_width,(pos.y+0.5f)/(d_height+1)); float2 lastvel = make_float2( tex2D(tex_velocity_MAC_u,velCoord_u.x,velCoord_u.y), tex2D(tex_velocity_MAC_v,velCoord_v.x,velCoord_v.y)); unsigned int istep = 0; do { float2 occupyCoord = make_float2(pos.x/d_width,pos.y/d_height); if(!(pos.x >= 0 && pos.x <= d_width && pos.y >= 0 && pos.y <= d_height)) break; if(tex2D(tex_occupy,occupyCoord.x,occupyCoord.y) != 0) break; lastpos = pos; pos -= lastvel * d_deltatt / d_voxelSize; velCoord_u = make_float2((pos.x+0.5f)/(d_width+1),pos.y/d_height); velCoord_v = make_float2(pos.x/d_width,(pos.y+0.5f)/(d_height+1)); lastvel = make_float2( tex2D(tex_velocity_MAC_u,velCoord_u.x,velCoord_u.y), tex2D(tex_velocity_MAC_v,velCoord_v.x,velCoord_v.y)); istep ++; } while (istep < d_steps); float2 out_coord_v = make_float2(lastpos.x/d_width,(lastpos.y+0.5f)/(d_height+1)); float tempvel = tex2D(tex_inputVelocity_mac_v,out_coord_v.x,out_coord_v.y); d_mac_v[y*d_width+x] = tempvel; } __global__ void ZQ_Cuda_Scalar_Advection_Regular_Velocity_Kernel(float2* d_output) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if(x >= d_width || y >= d_height) return; float2 pos = make_float2(x+0.5f,y+0.5f); float2 lastpos = pos; float2 velCoord = make_float2(pos.x/d_width,pos.y/d_height); float2 lastvel = tex2D(tex_velocity_regular,velCoord.x,velCoord.y); unsigned int istep = 0; do { float2 occupyCoord = velCoord; if(!(pos.x >= 0 && pos.x <= d_width && pos.y >= 0 && pos.y <= d_height)) break; if(tex2D(tex_occupy,occupyCoord.x,occupyCoord.y) != 0) break; lastpos = pos; pos -= lastvel * d_deltatt / d_voxelSize; velCoord = make_float2(pos.x/d_width,pos.y/d_height); lastvel = tex2D(tex_velocity_regular,velCoord.x,velCoord.y); istep ++; } while (istep < d_steps); float2 out_coord = make_float2(lastpos.x/d_width,lastpos.y/d_height); float2 tempscalar = tex2D(tex_scalar,out_coord.x,out_coord.y); d_output[y*d_width+x] = tempscalar; } __global__ void ZQ_Cuda_Scalar_Advection_MAC_Velocity_Kernel(float2* d_output) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if(x >= d_width || y >= d_height) return; float2 pos = make_float2(x+0.5f,y+0.5f); float2 lastpos = pos; float2 velCoord_u = make_float2((pos.x+0.5f)/(d_width+1),pos.y/d_height); float2 velCoord_v = make_float2(pos.x/d_width,(pos.y+0.5f)/(d_height+1)); float2 lastvel = make_float2( tex2D(tex_velocity_MAC_u,velCoord_u.x,velCoord_u.y), tex2D(tex_velocity_MAC_v,velCoord_v.x,velCoord_v.y)); unsigned int istep = 0; do { float2 occupyCoord = make_float2(pos.x/d_width,pos.y/d_height); if(!(pos.x >= 0 && pos.x <= d_width && pos.y >= 0 && pos.y <= d_height)) break; if(tex2D(tex_occupy,occupyCoord.x,occupyCoord.y) != 0) break; lastpos = pos; pos -= lastvel * d_deltatt / d_voxelSize; velCoord_u = make_float2((pos.x+0.5f)/(d_width+1),pos.y/d_height); velCoord_v = make_float2(pos.x/d_width,(pos.y+0.5f)/(d_height+1)); lastvel = make_float2( tex2D(tex_velocity_MAC_u,velCoord_u.x,velCoord_u.y), tex2D(tex_velocity_MAC_v,velCoord_v.x,velCoord_v.y)); istep ++; } while (istep < d_steps); float2 out_coord = make_float2(lastpos.x/d_width,lastpos.y/d_height); float2 tempscalar = tex2D(tex_scalar,out_coord.x,out_coord.y); d_output[y*d_width+x] = tempscalar; } extern "C" void ZQ_Cuda_Prepare_Advection2D(const unsigned int width, const unsigned int height, const float voxelSize, const unsigned int steps, const float deltatt) { h_width = width; h_height = height; h_steps = steps; h_voxelSize = voxelSize; h_deltatt = deltatt; checkCudaErrors( cudaMemcpyToSymbol(ZQ_CUDA_Advection2D::d_width,&width,sizeof(unsigned int))); checkCudaErrors( cudaMemcpyToSymbol(ZQ_CUDA_Advection2D::d_height,&height,sizeof(unsigned int))); checkCudaErrors( cudaMemcpyToSymbol(ZQ_CUDA_Advection2D::d_steps,&steps,sizeof(unsigned int))); checkCudaErrors( cudaMemcpyToSymbol(ZQ_CUDA_Advection2D::d_voxelSize,&voxelSize,sizeof(float))); checkCudaErrors( cudaMemcpyToSymbol(ZQ_CUDA_Advection2D::d_deltatt,&deltatt,sizeof(float))); //int tmp = 0; //scanf("%d",&tmp); //checkCudaErrors( cudaMemcpyFromSymbol(&h_width,d_width,sizeof(unsigned int))); //checkCudaErrors( cudaMemcpyFromSymbol(&h_height,d_height,sizeof(unsigned int))); //checkCudaErrors( cudaMemcpyFromSymbol(&h_steps,d_steps,sizeof(unsigned int))); //checkCudaErrors( cudaMemcpyFromSymbol(&h_voxelSize,d_voxelSize,sizeof(float))); //checkCudaErrors( cudaMemcpyFromSymbol(&h_deltatt,d_deltatt,sizeof(float))); //printf("width = %d\n",h_width); //printf("height = %d\n",h_height); //printf("steps = %d\n",h_steps); //printf("voxelSize = %f\n",h_voxelSize); //printf("deltatt = %f\n",h_deltatt); //scanf("%d",&tmp); } extern "C" void ZQ_Cuda_Velocity_Advection2D_inRegular_outRegular(const float* velocity, const bool* occupy, const float* inVelocity, float* outVelocity) { tex_velocity_regular.normalized = true; tex_velocity_regular.filterMode = cudaFilterModeLinear; tex_velocity_regular.addressMode[0] = cudaAddressModeClamp; tex_velocity_regular.addressMode[1] = cudaAddressModeClamp; tex_occupy.normalized = true; tex_occupy.filterMode = cudaFilterModePoint; tex_occupy.addressMode[0] = cudaAddressModeClamp; tex_occupy.addressMode[1] = cudaAddressModeClamp; tex_inputVelocity_regular.normalized = true; tex_inputVelocity_regular.filterMode = cudaFilterModeLinear; tex_inputVelocity_regular.addressMode[0] = cudaAddressModeClamp; tex_inputVelocity_regular.addressMode[1] = cudaAddressModeClamp; cudaChannelFormatDesc channelDescf2 = cudaCreateChannelDesc<float2>(); cudaArray* velocity_array = 0; checkCudaErrors( cudaMallocArray(&velocity_array,&channelDescf2,h_width,h_height) ); checkCudaErrors( cudaMemcpyToArray(velocity_array,0,0,velocity,sizeof(float)*h_width*h_height*2,cudaMemcpyHostToDevice) ); cudaChannelFormatDesc channelDescb = cudaCreateChannelDesc<uchar1>(); cudaArray* occupy_array = 0; checkCudaErrors( cudaMallocArray(&occupy_array,&channelDescb,h_width,h_height) ); checkCudaErrors( cudaMemcpyToArray(occupy_array,0,0,occupy,sizeof(bool)*h_width*h_height,cudaMemcpyHostToDevice) ); cudaArray* input_velocity_array = 0; checkCudaErrors( cudaMallocArray(&input_velocity_array,&channelDescf2,h_width,h_height) ); checkCudaErrors( cudaMemcpyToArray(input_velocity_array,0,0,inVelocity,sizeof(float)*h_width*h_height*2,cudaMemcpyHostToDevice) ); checkCudaErrors( cudaBindTextureToArray(tex_velocity_regular,velocity_array,channelDescf2) ); checkCudaErrors( cudaBindTextureToArray(tex_occupy,occupy_array,channelDescb) ); checkCudaErrors( cudaBindTextureToArray(tex_inputVelocity_regular,input_velocity_array,channelDescf2) ); float2* d_output = 0; checkCudaErrors( cudaMalloc((void**)&d_output,sizeof(float)*h_width*h_height*2) ); checkCudaErrors( cudaMemset(d_output,0,sizeof(float)*h_width*h_height*2) ); dim3 blockSize(BLOCK_SIZE,BLOCK_SIZE); dim3 gridSize((h_width+blockSize.x-1)/blockSize.x,(h_height+blockSize.y-1)/blockSize.y); ZQ_Cuda_Velocity_Advection_inRegular_outRegular_Kernel<<<gridSize,blockSize>>>(d_output); checkCudaErrors( cudaMemcpy(outVelocity,d_output,sizeof(float)*h_width*h_height*2,cudaMemcpyDeviceToHost) ); checkCudaErrors( cudaFree(d_output) ); d_output = 0; checkCudaErrors( cudaUnbindTexture(tex_velocity_regular) ); checkCudaErrors( cudaUnbindTexture(tex_occupy) ); checkCudaErrors( cudaUnbindTexture(tex_inputVelocity_regular) ); checkCudaErrors( cudaFreeArray(velocity_array) ); checkCudaErrors( cudaFreeArray(occupy_array) ); checkCudaErrors( cudaFreeArray(input_velocity_array) ); velocity_array = 0; occupy_array = 0; input_velocity_array = 0; } extern "C" void ZQ_Cuda_Velocity_Advection2D_inRegular_outMAC(const float* velocity, const bool* occupy, const float* inVelocity, float* out_mac_u, float* out_mac_v) { tex_velocity_regular.normalized = true; tex_velocity_regular.filterMode = cudaFilterModeLinear; tex_velocity_regular.addressMode[0] = cudaAddressModeClamp; tex_velocity_regular.addressMode[1] = cudaAddressModeClamp; tex_occupy.normalized = true; tex_occupy.filterMode = cudaFilterModePoint; tex_occupy.addressMode[0] = cudaAddressModeClamp; tex_occupy.addressMode[1] = cudaAddressModeClamp; tex_inputVelocity_regular.normalized = true; tex_inputVelocity_regular.filterMode = cudaFilterModeLinear; tex_inputVelocity_regular.addressMode[0] = cudaAddressModeClamp; tex_inputVelocity_regular.addressMode[1] = cudaAddressModeClamp; cudaChannelFormatDesc channelDescf2 = cudaCreateChannelDesc<float2>(); cudaArray* velocity_array = 0; checkCudaErrors( cudaMallocArray(&velocity_array,&channelDescf2,h_width,h_height) ); checkCudaErrors( cudaMemcpyToArray(velocity_array,0,0,velocity,sizeof(float)*h_width*h_height*2,cudaMemcpyHostToDevice) ); cudaChannelFormatDesc channelDescb = cudaCreateChannelDesc<uchar1>(); cudaArray* occupy_array = 0; checkCudaErrors( cudaMallocArray(&occupy_array,&channelDescb,h_width,h_height) ); checkCudaErrors( cudaMemcpyToArray(occupy_array,0,0,occupy,sizeof(bool)*h_width*h_height,cudaMemcpyHostToDevice) ); cudaArray* input_velocity_array = 0; checkCudaErrors( cudaMallocArray(&input_velocity_array,&channelDescf2,h_width,h_height) ); checkCudaErrors( cudaMemcpyToArray(input_velocity_array,0,0,inVelocity,sizeof(float)*h_width*h_height*2,cudaMemcpyHostToDevice) ); checkCudaErrors( cudaBindTextureToArray(tex_velocity_regular,velocity_array,channelDescf2) ); checkCudaErrors( cudaBindTextureToArray(tex_occupy,occupy_array,channelDescb) ); checkCudaErrors( cudaBindTextureToArray(tex_inputVelocity_regular,input_velocity_array,channelDescf2) ); float* d_out_mac_u = 0; float* d_out_mac_v = 0; checkCudaErrors( cudaMalloc((void**)&d_out_mac_u,sizeof(float)*(h_width+1)*h_height) ); checkCudaErrors( cudaMemset(d_out_mac_u,0,sizeof(float)*(h_width+1)*h_height) ); checkCudaErrors( cudaMalloc((void**)&d_out_mac_v,sizeof(float)*h_width*(h_height+1)) ); checkCudaErrors( cudaMemset(d_out_mac_v,0,sizeof(float)*h_width*(h_height+1)) ); dim3 blockSize(BLOCK_SIZE,BLOCK_SIZE); dim3 gridSize_u(((h_width+1)+blockSize.x-1)/blockSize.x,(h_height+blockSize.y-1)/blockSize.y); dim3 gridSize_v((h_width+blockSize.x-1)/blockSize.x,((h_height+1)+blockSize.y-1)/blockSize.y); ZQ_Cuda_Velocity_Advection_inRegular_outMAC_u_Kernel<<<gridSize_u,blockSize>>>(d_out_mac_u); ZQ_Cuda_Velocity_Advection_inRegular_outMAC_v_Kernel<<<gridSize_v,blockSize>>>(d_out_mac_v); checkCudaErrors( cudaMemcpy(out_mac_u,d_out_mac_u,sizeof(float)*(h_width+1)*h_height,cudaMemcpyDeviceToHost) ); checkCudaErrors( cudaMemcpy(out_mac_v,d_out_mac_v,sizeof(float)*h_width*(h_height+1),cudaMemcpyDeviceToHost) ); checkCudaErrors( cudaFree(d_out_mac_u) ); checkCudaErrors( cudaFree(d_out_mac_v) ); d_out_mac_u = 0; d_out_mac_v = 0; checkCudaErrors( cudaUnbindTexture(tex_velocity_regular) ); checkCudaErrors( cudaUnbindTexture(tex_occupy) ); checkCudaErrors( cudaUnbindTexture(tex_inputVelocity_regular) ); checkCudaErrors( cudaFreeArray(velocity_array) ); checkCudaErrors( cudaFreeArray(occupy_array) ); checkCudaErrors( cudaFreeArray(input_velocity_array) ); velocity_array = 0; occupy_array = 0; input_velocity_array = 0; } extern "C" void ZQ_Cuda_Velocity_Advection2D_inMAC_outMAC(const float* vel_mac_u, const float* vel_mac_v, const bool* occupy, const float* in_mac_u, const float* in_mac_v, float* out_mac_u, float* out_mac_v) { tex_velocity_MAC_u.normalized = true; tex_velocity_MAC_u.filterMode = cudaFilterModeLinear; tex_velocity_MAC_u.addressMode[0] = cudaAddressModeClamp; tex_velocity_MAC_u.addressMode[1] = cudaAddressModeClamp; tex_velocity_MAC_v.normalized = true; tex_velocity_MAC_v.filterMode = cudaFilterModeLinear; tex_velocity_MAC_v.addressMode[0] = cudaAddressModeClamp; tex_velocity_MAC_v.addressMode[1] = cudaAddressModeClamp; tex_occupy.normalized = true; tex_occupy.filterMode = cudaFilterModePoint; tex_occupy.addressMode[0] = cudaAddressModeClamp; tex_occupy.addressMode[1] = cudaAddressModeClamp; tex_inputVelocity_mac_u.normalized = true; tex_inputVelocity_mac_u.filterMode = cudaFilterModeLinear; tex_inputVelocity_mac_u.addressMode[0] = cudaAddressModeClamp; tex_inputVelocity_mac_u.addressMode[1] = cudaAddressModeClamp; tex_inputVelocity_mac_v.normalized = true; tex_inputVelocity_mac_v.filterMode = cudaFilterModeLinear; tex_inputVelocity_mac_v.addressMode[0] = cudaAddressModeClamp; tex_inputVelocity_mac_v.addressMode[1] = cudaAddressModeClamp; cudaChannelFormatDesc channelDescf = cudaCreateChannelDesc<float>(); cudaArray* velocity_u_array = 0; checkCudaErrors( cudaMallocArray(&velocity_u_array,&channelDescf,h_width+1,h_height) ); checkCudaErrors( cudaMemcpyToArray(velocity_u_array,0,0,vel_mac_u,sizeof(float)*(h_width+1)*h_height,cudaMemcpyHostToDevice) ); cudaArray* velocity_v_array = 0; checkCudaErrors( cudaMallocArray(&velocity_v_array,&channelDescf,h_width,h_height+1) ); checkCudaErrors( cudaMemcpyToArray(velocity_v_array,0,0,vel_mac_v,sizeof(float)*h_width*(h_height+1),cudaMemcpyHostToDevice) ); cudaChannelFormatDesc channelDescb = cudaCreateChannelDesc<uchar1>(); cudaArray* occupy_array = 0; checkCudaErrors( cudaMallocArray(&occupy_array,&channelDescb,h_width,h_height) ); checkCudaErrors( cudaMemcpyToArray(occupy_array,0,0,occupy,sizeof(bool)*h_width*h_height,cudaMemcpyHostToDevice) ); cudaArray* input_velocity_u_array = 0; checkCudaErrors( cudaMallocArray(&input_velocity_u_array,&channelDescf,h_width+1,h_height) ); checkCudaErrors( cudaMemcpyToArray(input_velocity_u_array,0,0,in_mac_u,sizeof(float)*(h_width+1)*h_height,cudaMemcpyHostToDevice) ); cudaArray* input_velocity_v_array = 0; checkCudaErrors( cudaMallocArray(&input_velocity_v_array,&channelDescf,h_width,h_height+1) ); checkCudaErrors( cudaMemcpyToArray(input_velocity_v_array,0,0,in_mac_v,sizeof(float)*h_width*(h_height+1),cudaMemcpyHostToDevice) ); checkCudaErrors( cudaBindTextureToArray(tex_velocity_MAC_u,velocity_u_array,channelDescf) ); checkCudaErrors( cudaBindTextureToArray(tex_velocity_MAC_v,velocity_v_array,channelDescf) ); checkCudaErrors( cudaBindTextureToArray(tex_occupy,occupy_array,channelDescb) ); checkCudaErrors( cudaBindTextureToArray(tex_inputVelocity_mac_u,input_velocity_u_array,channelDescf) ); checkCudaErrors( cudaBindTextureToArray(tex_inputVelocity_mac_v,input_velocity_v_array,channelDescf) ); float* d_out_mac_u = 0; float* d_out_mac_v = 0; checkCudaErrors( cudaMalloc((void**)&d_out_mac_u,sizeof(float)*(h_width+1)*h_height) ); checkCudaErrors( cudaMemset(d_out_mac_u,0,sizeof(float)*(h_width+1)*h_height) ); checkCudaErrors( cudaMalloc((void**)&d_out_mac_v,sizeof(float)*h_width*(h_height+1)) ); checkCudaErrors( cudaMemset(d_out_mac_v,0,sizeof(float)*h_width*(h_height+1)) ); dim3 blockSize(BLOCK_SIZE,BLOCK_SIZE); dim3 gridSize_u(((h_width+1)+blockSize.x-1)/blockSize.x,(h_height+blockSize.y-1)/blockSize.y); dim3 gridSize_v((h_width+blockSize.x-1)/blockSize.x,((h_height+1)+blockSize.y-1)/blockSize.y); ZQ_Cuda_Velocity_Advection_inMAC_outMAC_u_Kernel<<<gridSize_u,blockSize>>>(d_out_mac_u); ZQ_Cuda_Velocity_Advection_inMAC_outMAC_v_Kernel<<<gridSize_v,blockSize>>>(d_out_mac_v); checkCudaErrors( cudaMemcpy(out_mac_u,d_out_mac_u,sizeof(float)*(h_width+1)*h_height,cudaMemcpyDeviceToHost) ); checkCudaErrors( cudaMemcpy(out_mac_v,d_out_mac_v,sizeof(float)*h_width*(h_height+1),cudaMemcpyDeviceToHost) ); checkCudaErrors( cudaFree(d_out_mac_u) ); checkCudaErrors( cudaFree(d_out_mac_v) ); d_out_mac_u = 0; d_out_mac_v = 0; checkCudaErrors( cudaUnbindTexture(tex_velocity_MAC_u) ); checkCudaErrors( cudaUnbindTexture(tex_velocity_MAC_v) ); checkCudaErrors( cudaUnbindTexture(tex_occupy) ); checkCudaErrors( cudaUnbindTexture(tex_inputVelocity_mac_u) ); checkCudaErrors( cudaUnbindTexture(tex_inputVelocity_mac_v) ); checkCudaErrors( cudaFreeArray(velocity_u_array) ); checkCudaErrors( cudaFreeArray(velocity_v_array) ); checkCudaErrors( cudaFreeArray(occupy_array) ); checkCudaErrors( cudaFreeArray(input_velocity_u_array) ); checkCudaErrors( cudaFreeArray(input_velocity_v_array) ); velocity_u_array = 0; velocity_v_array = 0; occupy_array = 0; input_velocity_u_array = 0; input_velocity_v_array = 0; } extern "C" void ZQ_Cuda_Scalar_Advection2D_Regular_Velocity(const float* velocity, const bool* occupy, const float* input, float* output) { tex_velocity_regular.normalized = true; tex_velocity_regular.filterMode = cudaFilterModeLinear; tex_velocity_regular.addressMode[0] = cudaAddressModeClamp; tex_velocity_regular.addressMode[1] = cudaAddressModeClamp; tex_occupy.normalized = true; tex_occupy.filterMode = cudaFilterModePoint; tex_occupy.addressMode[0] = cudaAddressModeClamp; tex_occupy.addressMode[1] = cudaAddressModeClamp; tex_scalar.normalized = true; tex_scalar.filterMode = cudaFilterModeLinear; tex_scalar.addressMode[0] = cudaAddressModeClamp; tex_scalar.addressMode[1] = cudaAddressModeClamp; cudaChannelFormatDesc channelDescf2 = cudaCreateChannelDesc<float2>(); cudaArray* velocity_array = 0; checkCudaErrors( cudaMallocArray(&velocity_array,&channelDescf2,h_width,h_height) ); checkCudaErrors( cudaMemcpyToArray(velocity_array,0,0,velocity,sizeof(float)*h_width*h_height*2,cudaMemcpyHostToDevice) ); cudaChannelFormatDesc channelDescb = cudaCreateChannelDesc<uchar1>(); cudaArray* occupy_array = 0; checkCudaErrors( cudaMallocArray(&occupy_array,&channelDescb,h_width,h_height) ); checkCudaErrors( cudaMemcpyToArray(occupy_array,0,0,occupy,sizeof(bool)*h_width*h_height,cudaMemcpyHostToDevice) ); cudaArray* scalar_array = 0; checkCudaErrors( cudaMallocArray(&scalar_array,&channelDescf2,h_width,h_height) ); checkCudaErrors( cudaMemcpyToArray(scalar_array,0,0,input,sizeof(float)*h_width*h_height*2,cudaMemcpyHostToDevice) ); checkCudaErrors( cudaBindTextureToArray(tex_velocity_regular,velocity_array,channelDescf2) ); checkCudaErrors( cudaBindTextureToArray(tex_occupy,occupy_array,channelDescb) ); checkCudaErrors( cudaBindTextureToArray(tex_scalar,scalar_array,channelDescf2) ); float2* d_output = 0; checkCudaErrors( cudaMalloc((void**)&d_output,sizeof(float)*h_width*h_height*2) ); checkCudaErrors( cudaMemset(d_output,0,sizeof(float)*h_width*h_height*2) ); dim3 blockSize(BLOCK_SIZE,BLOCK_SIZE); dim3 gridSize((h_width+blockSize.x-1)/blockSize.x,(h_height+blockSize.y-1)/blockSize.y); ZQ_Cuda_Scalar_Advection_Regular_Velocity_Kernel<<<gridSize,blockSize>>>(d_output); checkCudaErrors( cudaMemcpy(output,d_output,sizeof(float)*h_width*h_height*2,cudaMemcpyDeviceToHost) ); checkCudaErrors( cudaFree(d_output) ); d_output = 0; checkCudaErrors( cudaUnbindTexture(tex_velocity_regular) ); checkCudaErrors( cudaUnbindTexture(tex_occupy) ); checkCudaErrors( cudaUnbindTexture(tex_scalar) ); checkCudaErrors( cudaFreeArray(velocity_array) ); checkCudaErrors( cudaFreeArray(occupy_array) ); checkCudaErrors( cudaFreeArray(scalar_array) ); velocity_array = 0; occupy_array = 0; scalar_array = 0; } extern "C" void ZQ_Cuda_Scalar_Advection2D_MAC_Velocity(const float* vel_mac_u, const float* vel_mac_v, const bool* occupy, const float* input, float* output) { tex_velocity_MAC_u.normalized = true; tex_velocity_MAC_u.filterMode = cudaFilterModeLinear; tex_velocity_MAC_u.addressMode[0] = cudaAddressModeClamp; tex_velocity_MAC_u.addressMode[1] = cudaAddressModeClamp; tex_velocity_MAC_v.normalized = true; tex_velocity_MAC_v.filterMode = cudaFilterModeLinear; tex_velocity_MAC_v.addressMode[0] = cudaAddressModeClamp; tex_velocity_MAC_v.addressMode[1] = cudaAddressModeClamp; tex_occupy.normalized = true; tex_occupy.filterMode = cudaFilterModePoint; tex_occupy.addressMode[0] = cudaAddressModeClamp; tex_occupy.addressMode[1] = cudaAddressModeClamp; tex_scalar.normalized = true; tex_scalar.filterMode = cudaFilterModeLinear; tex_scalar.addressMode[0] = cudaAddressModeClamp; tex_scalar.addressMode[1] = cudaAddressModeClamp; cudaChannelFormatDesc channelDescf = cudaCreateChannelDesc<float>(); cudaArray* velocity_u_array = 0; checkCudaErrors( cudaMallocArray(&velocity_u_array,&channelDescf,h_width+1,h_height) ); checkCudaErrors( cudaMemcpyToArray(velocity_u_array,0,0,vel_mac_u,sizeof(float)*(h_width+1)*h_height,cudaMemcpyHostToDevice) ); cudaArray* velocity_v_array = 0; checkCudaErrors( cudaMallocArray(&velocity_v_array,&channelDescf,h_width,h_height+1) ); checkCudaErrors( cudaMemcpyToArray(velocity_v_array,0,0,vel_mac_v,sizeof(float)*h_width*(h_height+1),cudaMemcpyHostToDevice) ); cudaChannelFormatDesc channelDescb = cudaCreateChannelDesc<uchar1>(); cudaArray* occupy_array = 0; checkCudaErrors( cudaMallocArray(&occupy_array,&channelDescb,h_width,h_height) ); checkCudaErrors( cudaMemcpyToArray(occupy_array,0,0,occupy,sizeof(bool)*h_width*h_height,cudaMemcpyHostToDevice) ); cudaChannelFormatDesc channelDescf2 = cudaCreateChannelDesc<float2>(); cudaArray* scalar_array = 0; checkCudaErrors( cudaMallocArray(&scalar_array,&channelDescf2,h_width,h_height) ); checkCudaErrors( cudaMemcpyToArray(scalar_array,0,0,input,sizeof(float)*h_width*h_height*2,cudaMemcpyHostToDevice) ); checkCudaErrors( cudaBindTextureToArray(tex_velocity_MAC_u,velocity_u_array,channelDescf) ); checkCudaErrors( cudaBindTextureToArray(tex_velocity_MAC_v,velocity_v_array,channelDescf) ); checkCudaErrors( cudaBindTextureToArray(tex_occupy,occupy_array,channelDescb) ); checkCudaErrors( cudaBindTextureToArray(tex_scalar,scalar_array,channelDescf2) ); float2* d_output = 0; checkCudaErrors( cudaMalloc((void**)&d_output,sizeof(float)*h_width*h_height*2) ); checkCudaErrors( cudaMemset(d_output,0,sizeof(float)*h_width*h_height*2) ); dim3 blockSize(BLOCK_SIZE,BLOCK_SIZE); dim3 gridSize((h_width+blockSize.x-1)/blockSize.x,(h_height+blockSize.y-1)/blockSize.y); ZQ_Cuda_Scalar_Advection_MAC_Velocity_Kernel<<<gridSize,blockSize>>>(d_output); checkCudaErrors( cudaMemcpy(output,d_output,sizeof(float)*h_width*h_height*2,cudaMemcpyDeviceToHost) ); checkCudaErrors( cudaFree(d_output) ); d_output = 0; checkCudaErrors( cudaUnbindTexture(tex_velocity_MAC_u) ); checkCudaErrors( cudaUnbindTexture(tex_velocity_MAC_v) ); checkCudaErrors( cudaUnbindTexture(tex_occupy) ); checkCudaErrors( cudaUnbindTexture(tex_scalar) ); checkCudaErrors( cudaFreeArray(velocity_u_array) ); checkCudaErrors( cudaFreeArray(velocity_v_array) ); checkCudaErrors( cudaFreeArray(occupy_array) ); checkCudaErrors( cudaFreeArray(scalar_array) ); velocity_u_array = 0; velocity_v_array = 0; occupy_array = 0; scalar_array = 0; } } #endif
a7c3fa44af101101dc38b024f7a6566b2999189f.hip
// !!! This is a file automatically generated by hipify!!! // #include <stdio.h> #include <stdlib.h> #include <hip/hip_runtime.h> #define DataSize 16 void GenerateNumbers(int *number, int size, int k)// { int i; srand(k * time(NULL)); for (i = 0; i < size; i++) number[i] = rand() % 100; } __global__ void Add_A(int *Da)//kernel function { int tx = threadIdx.x; //threadxid int bx = blockIdx.x; //blockxid int bn = blockDim.x; //blockxthread int id = bx*bn+tx; // /*int i = DataSize/2; while (i != 0) { if (id < i) Da[id] += Da[id + i]; __syncthreads(); i /= 2; }*/ int i = DataSize/2; for(int j = 0; j < 4; j++) { // log16=4 if (id < i) Da[id] += Da[id + i]; __syncthreads( ); i /= 2; } } int main() { int *Ha; //CPU int size = DataSize * sizeof(int); Ha = (int*)malloc(size); // GenerateNumbers(Ha, DataSize, 2); // /* dim3 CUDA (X,Y,Z) CUDAblockthread1024, (X*Y*Z)<=1024 gridblock65535, (X*Y)<=65535. block2 */ dim3 block(DataSize/2, 1, 1); //thread dim3 grid(2, 1, 1); //block int i; printf("A\n"); for (i = 0; i < DataSize; i++) printf("%3d ", Ha[i]); int *Da; //GPU hipMalloc((void**)&Da, size); //GPU hipMemcpy(Da, Ha, size, hipMemcpyHostToDevice); //GPU hipLaunchKernelGGL(( Add_A) , dim3(grid), dim3(block) , 0, 0, Da); //kernel hipDeviceSynchronize(); hipMemcpy(Ha, Da, size, hipMemcpyDeviceToHost); //()CPU printf("\n%3d\n", Ha[0]); // free(Ha); hipFree(Da); }
a7c3fa44af101101dc38b024f7a6566b2999189f.cu
//一維陣列相加的範例程式 #include <stdio.h> #include <stdlib.h> #include <cuda.h> #define DataSize 16 void GenerateNumbers(int *number, int size, int k)//隨機產生資料 { int i; srand(k * time(NULL)); for (i = 0; i < size; i++) number[i] = rand() % 100; } __global__ void Add_A(int *Da)//kernel function { int tx = threadIdx.x; //thread的x軸id int bx = blockIdx.x; //block的x軸id int bn = blockDim.x; //block的x軸有幾個thread int id = bx*bn+tx; //計算矩陣座標 /*int i = DataSize/2; while (i != 0) { if (id < i) Da[id] += Da[id + i]; __syncthreads(); i /= 2; }*/ int i = DataSize/2; for(int j = 0; j < 4; j++) { // log16=4 if (id < i) Da[id] += Da[id + i]; __syncthreads( ); i /= 2; } } int main() { int *Ha; //CPU int size = DataSize * sizeof(int); Ha = (int*)malloc(size); //配置矩陣空間 GenerateNumbers(Ha, DataSize, 2); //產生矩陣資料 /* dim3 由CUDA提供的三維向量型態 (X,Y,Z) CUDA限制每個block的thread上限為1024, (X*Y*Z)<=1024 grid的block上限為65535, (X*Y)<=65535. block最多2維而已 */ dim3 block(DataSize/2, 1, 1); //配置thread維度、大小 dim3 grid(2, 1, 1); //配置block維度、大小 int i; printf("A\n"); for (i = 0; i < DataSize; i++) printf("%3d ", Ha[i]); int *Da; //GPU cudaMalloc((void**)&Da, size); //配置GPU矩陣空間 cudaMemcpy(Da, Ha, size, cudaMemcpyHostToDevice); //複製資料到GPU Add_A <<< grid, block >>> (Da); //呼叫kernel cudaThreadSynchronize(); cudaMemcpy(Ha, Da, size, cudaMemcpyDeviceToHost); //複製資料(比較後的結果)回CPU printf("\n%3d\n", Ha[0]); //釋放記憶體空間 free(Ha); cudaFree(Da); }
ba917170da98e1cfe2e6df7cf0f2a154984c6ea9.hip
// !!! This is a file automatically generated by hipify!!! /* The code generates a 3D image of a stack of images. For each image (matrix) calculate the variance at all points, and then create a topography matrix (relief matrix) with the position (number in the stack) of the image that had the largest variance in a pixel. The same with the color of the image (RGB matrices). */ #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <time.h> //************Global variables*************** //************** Kernel CUDA ********************* __global__ void EDF(int *R_d, int *G_d, int *B_d, int *Rf_d, int *Gf_d, int *Bf_d, int *topof_d, float *max_d, int d) { int idx = threadIdx.x + blockIdx.x*blockDim.x; int idy = threadIdx.y + blockIdx.y*blockDim.y; unsigned long long int id = idx + idy*blockDim.x*gridDim.x; unsigned long long int dimx = 1040, dimy = 1388, tam_imag = 1388 * 1040, msk = 3, M_d[9], k; float X = 0.f, Xprom = 0.f, Y = 0.f, var = 0.f; //printf("Run=%lld ", id); //Rf_d[id] = 99; if (id<tam_imag) { M_d[0] = ((idx == 0 || idy == 0) ? 0 : G_d[id - 1 - dimy]); if (id - dimy >= 0 && id - dimy < tam_imag) M_d[1] = ((idx == 0) ? 0 : G_d[id - dimy]); else { printf("val=%lld _LINE_=%d\n" , id - dimy , __LINE__); return; } //M_d[1] = ((idx == 0) ? 0 : 10); if (id + 1 - dimy >= 0 && id + 1 - dimy < tam_imag) M_d[2] = ((idx == 0 || idy == dimy) ? 0 : G_d[id + 1 - dimy]); else { printf("val=%lld _LINE_=%d\n" , id + 1 - dimy , __LINE__); return; } if (id - 1 >= 0 && id - 1 < tam_imag) M_d[3] = ((idy == 0) ? 0 : G_d[id - 1]); else { printf("val=%lld _LINE_=%d\n" , id - 1 , __LINE__); return; } if (id >= 0 && id < tam_imag) M_d[4] = G_d[id]; else { printf("val=%lld _LINE_=%d\n" , id , __LINE__); return; } if (id + 1 >= 0 && id + 1 < tam_imag) M_d[5] = ((idy == dimy) ? 0 : G_d[id + 1]); else { printf("val=%lld _LINE_=%d\n" , id + 1 , __LINE__); return; } if (id - 1 + dimy >= 0 && id - 1 + dimy < tam_imag) M_d[6] = ((idx == dimx || idy == 0) ? 0 : G_d[id - 1 + dimy]); else { printf("val=%lld _LINE_=%d\n" , id - 1 + dimy , __LINE__); return; } if (id + dimy >= 0 && id + dimy < tam_imag) M_d[7] = ((idx == dimx) ? 0 : G_d[id + dimy]); else { printf("val=%lld _LINE_=%d\n" , id + dimy , __LINE__); return; } if (id + 1 + dimy >= 0 && id + 1 + dimy < tam_imag) M_d[8] = ((idx == dimx || idy == dimy) ? 0 : G_d[id + 1 + dimy]); else { printf("val=%lld _LINE_=%d\n" , id + 1 + dimy , __LINE__); return; } for (k = 0;k<msk*msk;k++) X += M_d[k]; Xprom = ((float)X) / (msk*msk); for (k = 0;k<msk*msk;k++) Y += (Xprom - M_d[k])*(Xprom - M_d[k]); var = Y / (msk*msk); //syncthreads(); __syncthreads(); //hosam if (var>max_d[id]) { topof_d[id] = d; Rf_d[id]=R_d[id]; Gf_d[id] = G_d[id]; Bf_d[id] = B_d[id]; max_d[id] = var; } } } long msk = 3, dimx = 1040, dimy = 1388, tam_imag = 1388 * 1040; //*****************Main function********************** int main(int argc, char* argv[]) { //***************Variables************** int i, j, m, cont, tam_B, init, fin; hipError_t cudaStatus; init=atoi(argv[1]); fin=atoi(argv[2]); //init = 5; //fin = 5; FILE *matrizR, *matrizG, *matrizB; int d; float t; clock_t tinicio, t_GPU; tinicio = clock(); int *topof_h, *R_h, *G_h, *B_h, *Rf_h, *Gf_h, *Bf_h; float *max_h; int *topof_d, *R_d, *G_d, *B_d, *Rf_d, *Gf_d, *Bf_d; float *max_d; //************ Malloc in host and device *************** R_h = (int *)malloc(sizeof(int)*tam_imag); hipMalloc((void**)&R_d, tam_imag * sizeof(int)); G_h = (int *)malloc(sizeof(int)*tam_imag); cudaStatus = hipMalloc((void**)&G_d, tam_imag * sizeof(int)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed for G_d Line=%d!\n" , __LINE__); exit(0); } B_h = (int *)malloc(sizeof(int)*tam_imag); hipMalloc((void**)&B_d, tam_imag * sizeof(int)); Rf_h = (int *)malloc(sizeof(int)*tam_imag); cudaStatus = hipMalloc((void**)&Rf_d, tam_imag * sizeof(int)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed for Rf_d!\n"); exit(0); } Gf_h = (int *)malloc(sizeof(int)*tam_imag); cudaStatus=hipMalloc((void**)&Gf_d, tam_imag * sizeof(int)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed line %d!\n", __LINE__); fprintf(stderr, "ERROR: %s\n", hipGetErrorString(cudaStatus)); exit(0); } Bf_h = (int *)malloc(sizeof(int)*tam_imag); cudaStatus=hipMalloc((void**)&Bf_d, tam_imag * sizeof(int)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed line %d!\n", __LINE__); fprintf(stderr, "ERROR: %s\n", hipGetErrorString(cudaStatus)); exit(0); } topof_h = (int *)malloc(sizeof(int)*tam_imag); cudaStatus=hipMalloc((void**)&topof_d, tam_imag * sizeof(int)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed line %d!\n", __LINE__); fprintf(stderr, "ERROR: %s\n", hipGetErrorString(cudaStatus)); exit(0); } max_h = (float *)malloc(sizeof(float)*tam_imag); cudaStatus=hipMalloc((void**)&max_d, tam_imag * sizeof(float)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed line %d!\n", __LINE__); fprintf(stderr, "ERROR: %s\n", hipGetErrorString(cudaStatus)); exit(0); } //hipMemset((void *)max_h, 0, sizeof(float)*tam_imag); //*************** Principal FOR **************** for (d = init;d <= fin;d++) { //printf("d=%d \n", d); //*****************Read RGB files**************** char rutaR[1024]; //sprintf(rutaR, "%s%d%s", "D:/Freelancer/cuda/RGB/", d, "/R.txt"); sprintf(rutaR, "%s%d%s","RGB/",d,"/R"); matrizR = fopen(rutaR, "r+"); char rutaG[1024]; //sprintf(rutaG, "%s%d%s", "D:/Freelancer/cuda/RGB/", d, "/G.txt"); sprintf(rutaG, "%s%d%s","RGB/",d,"/G"); matrizG = fopen(rutaG, "r+"); if (!matrizG) { printf("Error open file \n"); exit(0); } char rutaB[1024]; //sprintf(rutaB, "%s%d%s", "D:/Freelancer/cuda/RGB/", d, "/B.txt"); sprintf(rutaB, "%s%d%s","RGB/",d,"/B"); matrizB = fopen(rutaB, "r+"); printf("dimx=%d\n", dimx); printf("dimy=%d\n", dimy); printf("tam_imag=%d\n", tam_imag); printf("dimx*dimy=%d\n", dimx*dimy); for (i = 0;i<dimx;i++) { for (j = 0;j<dimy;j++) { fscanf(matrizR, "%d", &R_h[i*dimy + j]); fscanf(matrizG, "%d", &G_h[i*dimy + j]); fscanf(matrizB, "%d", &B_h[i*dimy + j]); } } fclose(matrizR); fclose(matrizG); fclose(matrizB); //***************** Kernel EDF ******************* cudaStatus = hipMemcpy(R_d, R_h, sizeof(int)*tam_imag, hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed for %d!\n", __LINE__); exit(0); } cudaStatus = hipMemcpy(G_d, G_h, sizeof(int)*tam_imag, hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed line %d!\n", __LINE__); fprintf(stderr, "ERROR: %s\n", hipGetErrorString(cudaStatus)); exit(0); } cudaStatus=hipMemcpy(B_d, B_h, sizeof(int)*tam_imag, hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed line %d!\n", __LINE__); fprintf(stderr, "ERROR: %s\n", hipGetErrorString(cudaStatus)); exit(0); } cudaStatus = hipMemcpy(Rf_d, Rf_h, sizeof(int)*tam_imag, hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed line %d!\n", __LINE__); fprintf(stderr, "ERROR: %s\n", hipGetErrorString(cudaStatus)); exit(0); } cudaStatus=hipMemcpy(Gf_d, Gf_h, sizeof(int)*tam_imag, hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed line %d!\n", __LINE__); fprintf(stderr, "ERROR: %s\n", hipGetErrorString(cudaStatus)); exit(0); } cudaStatus=hipMemcpy(Bf_d, Bf_h, sizeof(int)*tam_imag, hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed line %d!\n", __LINE__); fprintf(stderr, "ERROR: %s\n", hipGetErrorString(cudaStatus)); exit(0); } cudaStatus=hipMemcpy(topof_d, topof_h, sizeof(int)*tam_imag, hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed line %d!\n", __LINE__); fprintf(stderr, "ERROR: %s\n", hipGetErrorString(cudaStatus)); exit(0); } cudaStatus=hipMemcpy(max_d, max_h, sizeof(float)*tam_imag, hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed line %d!\n", __LINE__); fprintf(stderr, "ERROR: %s\n", hipGetErrorString(cudaStatus)); exit(0); } dim3 Grid(347, 20); dim3 Block(13, 16); hipLaunchKernelGGL(( EDF) , dim3(Grid), dim3(Block) , 0, 0, R_d, G_d, B_d, Rf_d, Gf_d, Bf_d, topof_d, max_d, d); printf("\n\n FINISH \n\n"); //++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ The code stops here cudaStatus = hipMemcpy(Rf_h, Rf_d, sizeof(int)*tam_imag, hipMemcpyDeviceToHost); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed line %d!\n", __LINE__); fprintf(stderr, "ERROR: %s\n", hipGetErrorString(cudaStatus)); exit(0); } cudaStatus=hipMemcpy(Gf_h, Gf_d, sizeof(int)*tam_imag, hipMemcpyDeviceToHost); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed line %d!\n", __LINE__); fprintf(stderr, "ERROR: %s\n", hipGetErrorString(cudaStatus)); exit(0); } cudaStatus=hipMemcpy(Bf_h, Bf_d, sizeof(int)*tam_imag, hipMemcpyDeviceToHost); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed line %d!\n", __LINE__); fprintf(stderr, "ERROR: %s\n", hipGetErrorString(cudaStatus)); exit(0); } cudaStatus=hipMemcpy(topof_h, topof_d, sizeof(int)*tam_imag, hipMemcpyDeviceToHost); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed line %d!\n", __LINE__); fprintf(stderr, "ERROR: %s\n", hipGetErrorString(cudaStatus)); exit(0); } cudaStatus=hipMemcpy(max_h, max_d, sizeof(float)*tam_imag, hipMemcpyDeviceToHost); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed line %d!\n", __LINE__); fprintf(stderr, "ERROR: %s\n", hipGetErrorString(cudaStatus)); exit(0); } } //End for //****************Save results************** printf("Finish\n"); FILE *archTopo, *archR, *archG, *archB; /*archTopo = fopen("D:/Freelancer/cuda/Resultados/topo.txt", "w+"); archR = fopen("D:/Freelancer/cuda/Resultados/Resultados/R.txt", "w+"); archG = fopen("D:/Freelancer/cuda/Resultados/Resultados/G.txt", "w+"); archB = fopen("D:/Freelancer/cuda/Resultados/Resultados/B.txt", "w+"); */ archTopo=fopen("Resultados/topo","w+"); archR=fopen("Resultados/R","w+"); archG=fopen("Resultados/G","w+"); archB=fopen("Resultados/B","w+"); for (i = 0;i<dimx;i++) { for (j = 0;j<dimy;j++) { fprintf(archTopo, "%d ", topof_h[i*dimy + j]); fprintf(archR, "%d ", Rf_h[i*dimy + j]); fprintf(archG, "%d ", Gf_h[i*dimy + j]); fprintf(archB, "%d ", Bf_h[i*dimy + j]); } fprintf(archTopo, "\n"); fprintf(archR, "\n"); fprintf(archG, "\n"); fprintf(archB, "\n"); } fclose(archTopo); fclose(archR); fclose(archG); fclose(archB); free(max_h); free(topof_h); free(R_h); free(G_h); free(B_h); free(Rf_h); free(Gf_h); free(Bf_h); hipFree(max_d); hipFree(topof_d); hipFree(R_d); hipFree(G_d); hipFree(B_d); hipFree(Rf_d); hipFree(Gf_d); hipFree(Bf_d); t_GPU = clock(); t = ((float)t_GPU - (float)tinicio) / CLOCKS_PER_SEC; printf("\ntiempo de procesamiento de varianzas: %6.3fs\n", t); //getchar (); cudaStatus = hipDeviceReset(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceReset failed!"); return 1; } return 0; }//END Main function
ba917170da98e1cfe2e6df7cf0f2a154984c6ea9.cu
/* The code generates a 3D image of a stack of images. For each image (matrix) calculate the variance at all points, and then create a topography matrix (relief matrix) with the position (number in the stack) of the image that had the largest variance in a pixel. The same with the color of the image (RGB matrices). */ #include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <stdlib.h> #include <cuda.h> #include <time.h> //************Global variables*************** //************** Kernel CUDA ********************* __global__ void EDF(int *R_d, int *G_d, int *B_d, int *Rf_d, int *Gf_d, int *Bf_d, int *topof_d, float *max_d, int d) { int idx = threadIdx.x + blockIdx.x*blockDim.x; int idy = threadIdx.y + blockIdx.y*blockDim.y; unsigned long long int id = idx + idy*blockDim.x*gridDim.x; unsigned long long int dimx = 1040, dimy = 1388, tam_imag = 1388 * 1040, msk = 3, M_d[9], k; float X = 0.f, Xprom = 0.f, Y = 0.f, var = 0.f; //printf("Run=%lld ", id); //Rf_d[id] = 99; if (id<tam_imag) { M_d[0] = ((idx == 0 || idy == 0) ? 0 : G_d[id - 1 - dimy]); if (id - dimy >= 0 && id - dimy < tam_imag) M_d[1] = ((idx == 0) ? 0 : G_d[id - dimy]); else { printf("val=%lld _LINE_=%d\n" , id - dimy , __LINE__); return; } //M_d[1] = ((idx == 0) ? 0 : 10); if (id + 1 - dimy >= 0 && id + 1 - dimy < tam_imag) M_d[2] = ((idx == 0 || idy == dimy) ? 0 : G_d[id + 1 - dimy]); else { printf("val=%lld _LINE_=%d\n" , id + 1 - dimy , __LINE__); return; } if (id - 1 >= 0 && id - 1 < tam_imag) M_d[3] = ((idy == 0) ? 0 : G_d[id - 1]); else { printf("val=%lld _LINE_=%d\n" , id - 1 , __LINE__); return; } if (id >= 0 && id < tam_imag) M_d[4] = G_d[id]; else { printf("val=%lld _LINE_=%d\n" , id , __LINE__); return; } if (id + 1 >= 0 && id + 1 < tam_imag) M_d[5] = ((idy == dimy) ? 0 : G_d[id + 1]); else { printf("val=%lld _LINE_=%d\n" , id + 1 , __LINE__); return; } if (id - 1 + dimy >= 0 && id - 1 + dimy < tam_imag) M_d[6] = ((idx == dimx || idy == 0) ? 0 : G_d[id - 1 + dimy]); else { printf("val=%lld _LINE_=%d\n" , id - 1 + dimy , __LINE__); return; } if (id + dimy >= 0 && id + dimy < tam_imag) M_d[7] = ((idx == dimx) ? 0 : G_d[id + dimy]); else { printf("val=%lld _LINE_=%d\n" , id + dimy , __LINE__); return; } if (id + 1 + dimy >= 0 && id + 1 + dimy < tam_imag) M_d[8] = ((idx == dimx || idy == dimy) ? 0 : G_d[id + 1 + dimy]); else { printf("val=%lld _LINE_=%d\n" , id + 1 + dimy , __LINE__); return; } for (k = 0;k<msk*msk;k++) X += M_d[k]; Xprom = ((float)X) / (msk*msk); for (k = 0;k<msk*msk;k++) Y += (Xprom - M_d[k])*(Xprom - M_d[k]); var = Y / (msk*msk); //syncthreads(); __syncthreads(); //hosam if (var>max_d[id]) { topof_d[id] = d; Rf_d[id]=R_d[id]; Gf_d[id] = G_d[id]; Bf_d[id] = B_d[id]; max_d[id] = var; } } } long msk = 3, dimx = 1040, dimy = 1388, tam_imag = 1388 * 1040; //*****************Main function********************** int main(int argc, char* argv[]) { //***************Variables************** int i, j, m, cont, tam_B, init, fin; cudaError_t cudaStatus; init=atoi(argv[1]); fin=atoi(argv[2]); //init = 5; //fin = 5; FILE *matrizR, *matrizG, *matrizB; int d; float t; clock_t tinicio, t_GPU; tinicio = clock(); int *topof_h, *R_h, *G_h, *B_h, *Rf_h, *Gf_h, *Bf_h; float *max_h; int *topof_d, *R_d, *G_d, *B_d, *Rf_d, *Gf_d, *Bf_d; float *max_d; //************ Malloc in host and device *************** R_h = (int *)malloc(sizeof(int)*tam_imag); cudaMalloc((void**)&R_d, tam_imag * sizeof(int)); G_h = (int *)malloc(sizeof(int)*tam_imag); cudaStatus = cudaMalloc((void**)&G_d, tam_imag * sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed for G_d Line=%d!\n" , __LINE__); exit(0); } B_h = (int *)malloc(sizeof(int)*tam_imag); cudaMalloc((void**)&B_d, tam_imag * sizeof(int)); Rf_h = (int *)malloc(sizeof(int)*tam_imag); cudaStatus = cudaMalloc((void**)&Rf_d, tam_imag * sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed for Rf_d!\n"); exit(0); } Gf_h = (int *)malloc(sizeof(int)*tam_imag); cudaStatus=cudaMalloc((void**)&Gf_d, tam_imag * sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed line %d!\n", __LINE__); fprintf(stderr, "ERROR: %s\n", cudaGetErrorString(cudaStatus)); exit(0); } Bf_h = (int *)malloc(sizeof(int)*tam_imag); cudaStatus=cudaMalloc((void**)&Bf_d, tam_imag * sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed line %d!\n", __LINE__); fprintf(stderr, "ERROR: %s\n", cudaGetErrorString(cudaStatus)); exit(0); } topof_h = (int *)malloc(sizeof(int)*tam_imag); cudaStatus=cudaMalloc((void**)&topof_d, tam_imag * sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed line %d!\n", __LINE__); fprintf(stderr, "ERROR: %s\n", cudaGetErrorString(cudaStatus)); exit(0); } max_h = (float *)malloc(sizeof(float)*tam_imag); cudaStatus=cudaMalloc((void**)&max_d, tam_imag * sizeof(float)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed line %d!\n", __LINE__); fprintf(stderr, "ERROR: %s\n", cudaGetErrorString(cudaStatus)); exit(0); } //cudaMemset((void *)max_h, 0, sizeof(float)*tam_imag); //*************** Principal FOR **************** for (d = init;d <= fin;d++) { //printf("d=%d \n", d); //*****************Read RGB files**************** char rutaR[1024]; //sprintf(rutaR, "%s%d%s", "D:/Freelancer/cuda/RGB/", d, "/R.txt"); sprintf(rutaR, "%s%d%s","RGB/",d,"/R"); matrizR = fopen(rutaR, "r+"); char rutaG[1024]; //sprintf(rutaG, "%s%d%s", "D:/Freelancer/cuda/RGB/", d, "/G.txt"); sprintf(rutaG, "%s%d%s","RGB/",d,"/G"); matrizG = fopen(rutaG, "r+"); if (!matrizG) { printf("Error open file \n"); exit(0); } char rutaB[1024]; //sprintf(rutaB, "%s%d%s", "D:/Freelancer/cuda/RGB/", d, "/B.txt"); sprintf(rutaB, "%s%d%s","RGB/",d,"/B"); matrizB = fopen(rutaB, "r+"); printf("dimx=%d\n", dimx); printf("dimy=%d\n", dimy); printf("tam_imag=%d\n", tam_imag); printf("dimx*dimy=%d\n", dimx*dimy); for (i = 0;i<dimx;i++) { for (j = 0;j<dimy;j++) { fscanf(matrizR, "%d", &R_h[i*dimy + j]); fscanf(matrizG, "%d", &G_h[i*dimy + j]); fscanf(matrizB, "%d", &B_h[i*dimy + j]); } } fclose(matrizR); fclose(matrizG); fclose(matrizB); //***************** Kernel EDF ******************* cudaStatus = cudaMemcpy(R_d, R_h, sizeof(int)*tam_imag, cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed for %d!\n", __LINE__); exit(0); } cudaStatus = cudaMemcpy(G_d, G_h, sizeof(int)*tam_imag, cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed line %d!\n", __LINE__); fprintf(stderr, "ERROR: %s\n", cudaGetErrorString(cudaStatus)); exit(0); } cudaStatus=cudaMemcpy(B_d, B_h, sizeof(int)*tam_imag, cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed line %d!\n", __LINE__); fprintf(stderr, "ERROR: %s\n", cudaGetErrorString(cudaStatus)); exit(0); } cudaStatus = cudaMemcpy(Rf_d, Rf_h, sizeof(int)*tam_imag, cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed line %d!\n", __LINE__); fprintf(stderr, "ERROR: %s\n", cudaGetErrorString(cudaStatus)); exit(0); } cudaStatus=cudaMemcpy(Gf_d, Gf_h, sizeof(int)*tam_imag, cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed line %d!\n", __LINE__); fprintf(stderr, "ERROR: %s\n", cudaGetErrorString(cudaStatus)); exit(0); } cudaStatus=cudaMemcpy(Bf_d, Bf_h, sizeof(int)*tam_imag, cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed line %d!\n", __LINE__); fprintf(stderr, "ERROR: %s\n", cudaGetErrorString(cudaStatus)); exit(0); } cudaStatus=cudaMemcpy(topof_d, topof_h, sizeof(int)*tam_imag, cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed line %d!\n", __LINE__); fprintf(stderr, "ERROR: %s\n", cudaGetErrorString(cudaStatus)); exit(0); } cudaStatus=cudaMemcpy(max_d, max_h, sizeof(float)*tam_imag, cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed line %d!\n", __LINE__); fprintf(stderr, "ERROR: %s\n", cudaGetErrorString(cudaStatus)); exit(0); } dim3 Grid(347, 20); dim3 Block(13, 16); EDF <<<Grid, Block >>>(R_d, G_d, B_d, Rf_d, Gf_d, Bf_d, topof_d, max_d, d); printf("\n\n FINISH \n\n"); //++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ The code stops here cudaStatus = cudaMemcpy(Rf_h, Rf_d, sizeof(int)*tam_imag, cudaMemcpyDeviceToHost); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed line %d!\n", __LINE__); fprintf(stderr, "ERROR: %s\n", cudaGetErrorString(cudaStatus)); exit(0); } cudaStatus=cudaMemcpy(Gf_h, Gf_d, sizeof(int)*tam_imag, cudaMemcpyDeviceToHost); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed line %d!\n", __LINE__); fprintf(stderr, "ERROR: %s\n", cudaGetErrorString(cudaStatus)); exit(0); } cudaStatus=cudaMemcpy(Bf_h, Bf_d, sizeof(int)*tam_imag, cudaMemcpyDeviceToHost); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed line %d!\n", __LINE__); fprintf(stderr, "ERROR: %s\n", cudaGetErrorString(cudaStatus)); exit(0); } cudaStatus=cudaMemcpy(topof_h, topof_d, sizeof(int)*tam_imag, cudaMemcpyDeviceToHost); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed line %d!\n", __LINE__); fprintf(stderr, "ERROR: %s\n", cudaGetErrorString(cudaStatus)); exit(0); } cudaStatus=cudaMemcpy(max_h, max_d, sizeof(float)*tam_imag, cudaMemcpyDeviceToHost); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed line %d!\n", __LINE__); fprintf(stderr, "ERROR: %s\n", cudaGetErrorString(cudaStatus)); exit(0); } } //End for //****************Save results************** printf("Finish\n"); FILE *archTopo, *archR, *archG, *archB; /*archTopo = fopen("D:/Freelancer/cuda/Resultados/topo.txt", "w+"); archR = fopen("D:/Freelancer/cuda/Resultados/Resultados/R.txt", "w+"); archG = fopen("D:/Freelancer/cuda/Resultados/Resultados/G.txt", "w+"); archB = fopen("D:/Freelancer/cuda/Resultados/Resultados/B.txt", "w+"); */ archTopo=fopen("Resultados/topo","w+"); archR=fopen("Resultados/R","w+"); archG=fopen("Resultados/G","w+"); archB=fopen("Resultados/B","w+"); for (i = 0;i<dimx;i++) { for (j = 0;j<dimy;j++) { fprintf(archTopo, "%d ", topof_h[i*dimy + j]); fprintf(archR, "%d ", Rf_h[i*dimy + j]); fprintf(archG, "%d ", Gf_h[i*dimy + j]); fprintf(archB, "%d ", Bf_h[i*dimy + j]); } fprintf(archTopo, "\n"); fprintf(archR, "\n"); fprintf(archG, "\n"); fprintf(archB, "\n"); } fclose(archTopo); fclose(archR); fclose(archG); fclose(archB); free(max_h); free(topof_h); free(R_h); free(G_h); free(B_h); free(Rf_h); free(Gf_h); free(Bf_h); cudaFree(max_d); cudaFree(topof_d); cudaFree(R_d); cudaFree(G_d); cudaFree(B_d); cudaFree(Rf_d); cudaFree(Gf_d); cudaFree(Bf_d); t_GPU = clock(); t = ((float)t_GPU - (float)tinicio) / CLOCKS_PER_SEC; printf("\ntiempo de procesamiento de varianzas: %6.3fs\n", t); //getchar (); cudaStatus = cudaDeviceReset(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceReset failed!"); return 1; } return 0; }//END Main function
6072ef1622dc59c92dce3cc032628bccd0cf7703.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** * Copyright 2020 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <algorithm> #include "maxpool_with_argmax_impl.cuh" #include "runtime/device/gpu/cuda_common.h" #include "include/hip/hip_fp16.h" template <typename T, typename S> __global__ void MaxPoolWithArgmax(const T* input, const int n, const int c, const int h, const int w, const int windowHeight, const int windowWidth, const int strideHeight, const int strideWidth, const int padTop, const int padLeft, const int outputHeight, const int outputWidth, const int outputNCHW, const int outputCHW, const int outputHW, T* output, S *index) { for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < (outputNCHW); pos += blockDim.x * gridDim.x) { const int posn = pos / outputCHW; const int posc = pos / outputHW % c; const int posh = pos / outputWidth % outputHeight; const int posw = pos % outputWidth; int hstart = posh * strideHeight - padTop; int wstart = posw * strideWidth - padLeft; const int hend = min(hstart + windowHeight, h); const int wend = min(wstart + windowWidth, w); hstart = max(hstart, 0); wstart = max(wstart, 0); S inputStart = posn*c*h*w; S maxIdx = posc*h*w + hstart*w + wstart; T maxData = input[inputStart+maxIdx]; for (int hcur = hstart; hcur < hend; ++hcur) { for (int wcur = wstart; wcur < wend; ++wcur) { S inputIdx = posc*h*w + hcur*w + wcur; T inputData = input[inputStart+inputIdx]; if (inputData > maxData) { maxIdx = inputIdx; maxData = inputData; } } } output[pos] = maxData; index[pos] = maxIdx; } return; } template <typename T, typename S> void CalMaxPoolWithArgmax(const T* input, const int n, const int c, const int h, const int w, const int windowHeight, const int windowWidth, const int strideHeight, const int strideWidth, const int padTop, const int padLeft, const int outputHeight, const int outputWidth, T* output, S *index, hipStream_t cuda_stream) { const int outputNCHW = n*c*outputHeight*outputWidth; const int outputCHW = c*outputHeight*outputWidth; const int outputHW = outputHeight*outputWidth; hipLaunchKernelGGL(( MaxPoolWithArgmax), dim3(GET_BLOCKS(n*c*outputHeight*outputWidth)), dim3(GET_THREADS), 0, cuda_stream, input, n, c, h, w, windowHeight, windowWidth, strideHeight, strideWidth, padTop, padLeft, outputHeight, outputWidth, outputNCHW, outputCHW, outputHW, output, index); return; } template void CalMaxPoolWithArgmax<float, int>(const float* input, const int n, const int c, const int h, const int w, const int windowHeight, const int windowWidth, const int strideHeight, const int strideWidth, const int padTop, const int padLeft, const int outputHeight, const int outputWidth, float* output, int* index, hipStream_t cuda_stream); template void CalMaxPoolWithArgmax<half, int>(const half* input, const int n, const int c, const int h, const int w, const int windowHeight, const int windowWidth, const int strideHeight, const int strideWidth, const int padTop, const int padLeft, const int outputHeight, const int outputWidth, half* output, int* index, hipStream_t cuda_stream);
6072ef1622dc59c92dce3cc032628bccd0cf7703.cu
/** * Copyright 2020 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <algorithm> #include "maxpool_with_argmax_impl.cuh" #include "runtime/device/gpu/cuda_common.h" #include "include/cuda_fp16.h" template <typename T, typename S> __global__ void MaxPoolWithArgmax(const T* input, const int n, const int c, const int h, const int w, const int windowHeight, const int windowWidth, const int strideHeight, const int strideWidth, const int padTop, const int padLeft, const int outputHeight, const int outputWidth, const int outputNCHW, const int outputCHW, const int outputHW, T* output, S *index) { for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < (outputNCHW); pos += blockDim.x * gridDim.x) { const int posn = pos / outputCHW; const int posc = pos / outputHW % c; const int posh = pos / outputWidth % outputHeight; const int posw = pos % outputWidth; int hstart = posh * strideHeight - padTop; int wstart = posw * strideWidth - padLeft; const int hend = min(hstart + windowHeight, h); const int wend = min(wstart + windowWidth, w); hstart = max(hstart, 0); wstart = max(wstart, 0); S inputStart = posn*c*h*w; S maxIdx = posc*h*w + hstart*w + wstart; T maxData = input[inputStart+maxIdx]; for (int hcur = hstart; hcur < hend; ++hcur) { for (int wcur = wstart; wcur < wend; ++wcur) { S inputIdx = posc*h*w + hcur*w + wcur; T inputData = input[inputStart+inputIdx]; if (inputData > maxData) { maxIdx = inputIdx; maxData = inputData; } } } output[pos] = maxData; index[pos] = maxIdx; } return; } template <typename T, typename S> void CalMaxPoolWithArgmax(const T* input, const int n, const int c, const int h, const int w, const int windowHeight, const int windowWidth, const int strideHeight, const int strideWidth, const int padTop, const int padLeft, const int outputHeight, const int outputWidth, T* output, S *index, cudaStream_t cuda_stream) { const int outputNCHW = n*c*outputHeight*outputWidth; const int outputCHW = c*outputHeight*outputWidth; const int outputHW = outputHeight*outputWidth; MaxPoolWithArgmax<<<GET_BLOCKS(n*c*outputHeight*outputWidth), GET_THREADS, 0, cuda_stream>>>( input, n, c, h, w, windowHeight, windowWidth, strideHeight, strideWidth, padTop, padLeft, outputHeight, outputWidth, outputNCHW, outputCHW, outputHW, output, index); return; } template void CalMaxPoolWithArgmax<float, int>(const float* input, const int n, const int c, const int h, const int w, const int windowHeight, const int windowWidth, const int strideHeight, const int strideWidth, const int padTop, const int padLeft, const int outputHeight, const int outputWidth, float* output, int* index, cudaStream_t cuda_stream); template void CalMaxPoolWithArgmax<half, int>(const half* input, const int n, const int c, const int h, const int w, const int windowHeight, const int windowWidth, const int strideHeight, const int strideWidth, const int padTop, const int padLeft, const int outputHeight, const int outputWidth, half* output, int* index, cudaStream_t cuda_stream);
31cc896862d44bbb5fc67457cc97e11895ffcd60.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "kernel.h" #define TX 32 #define TY 32 #define DIM 2100 struct hipComplex { float r; float i; __device__ hipComplex( float a, float b ) : r(a), i(b) {} __device__ float magnitude2( void ) { return r * r + i * i; } __device__ hipComplex operator*(const hipComplex& a) { return hipComplex(r*a.r - i*a.i, i*a.r + r*a.i); } __device__ hipComplex operator-(const hipComplex& a) { return hipComplex(r-a.r, i-a.i); } __device__ hipComplex operator+(const hipComplex& a) { return hipComplex(r+a.r, i+a.i); } __device__ hipComplex operator/(const hipComplex& a) { return hipComplex((r*a.r + i*a.i)/(a.r*a.r + a.i*a.i), (i*a.r - r*a.i)/(a.r*a.r + a.i*a.i)); } }; __device__ hipComplex conj(hipComplex m) { hipComplex out(m.r,-m.i); return out; } __device__ hipComplex nor(hipComplex m) { hipComplex out(m.r*m.r+m.i*m.i,0.0); return out; } __device__ float norg(hipComplex m) { return sqrtf(m.r*m.r+m.i*m.i); } __device__ hipComplex qpoch(hipComplex a, hipComplex q) { hipComplex out(1.0,0.0); hipComplex unity(1.0,0.0); int i = 0; hipComplex Q = q; if(q.magnitude2()>1.0) { return hipComplex(0.0,0.0); } // We want to formally match the definition of a q-pochhammer symbol. for(i=1;i<80;i++) { out = out * (unity - a*Q); Q = q * Q; } return out; } __device__ hipComplex qp(hipComplex a, hipComplex q, int n) { hipComplex out(1.0,0.0); hipComplex unity(1.0,0.0); int i = 0; hipComplex Q = q; if(q.magnitude2()>1.0) { return hipComplex(0.0,0.0); } // We want to formally match the definition of a q-pochhammer symbol. for(i=1;i<n;i++) { out = out * (unity - a*Q); Q = q * Q; } return out; } __device__ hipComplex ramphi(hipComplex q) { hipComplex out(1.0,0.0); hipComplex mone(-1.0,0.0); hipComplex mq = mone*q; return qpoch(mq,mq)/qpoch(q,mq); } __device__ hipComplex rampsi(hipComplex q) { hipComplex out(1.0,0.0); hipComplex mone(-1.0,0.0); hipComplex mq = mone*q; return qpoch(mq,q)*qpoch(q*q,q*q); } __device__ hipComplex ramchi(hipComplex q) { hipComplex out(1.0,0.0); hipComplex mone(-1.0,0.0); hipComplex mq = mone*q; return qpoch(mq,q*q); } __device__ hipComplex ramf(hipComplex a, hipComplex b) { hipComplex out(1.0,0.0); hipComplex mone(-1.0,0.0); hipComplex ma = mone*a; hipComplex mb = mone*b; return qpoch(ma,a*b)*qpoch(mb,a*b)*qpoch(a*b,a*b); } // complex exponential __device__ hipComplex expc(hipComplex m) { hipComplex out(expf(m.r) * cosf(m.i),expf(m.r) * sinf(m.i)); return out; } __device__ hipComplex powc(hipComplex ag, hipComplex bg) { hipComplex out(0.0,0.0); hipComplex mesp(0.0,0.0); hipComplex frim(0.0,0.0); double radiu, thet; /* get the proper polar form of the complex number */ radiu = sqrtf(ag.r*ag.r + ag.i*ag.i); thet = atan2f(ag.i,ag.r); /* mesp gives R^(c+di) */ mesp.r = powf(radiu,bg.r)*cosf(bg.i*logf(radiu)); mesp.i = powf(radiu,bg.r)*sinf(bg.i*logf(radiu)); /* frim gives e^(i theta (c+di)) */ /* now since we already have the machinery for performing complex exponentiation (just exp), we can just call that here */ frim.r = -1.0 * bg.i * thet; frim.i = bg.r * thet; frim = expc(frim); out = mesp*frim; return out; } // cosine (nothing algorithmically clean) __device__ hipComplex cosc(hipComplex m) { hipComplex ai(0.0,1.0); hipComplex ot(0.5,0.0); hipComplex mone(-1.0,0.0); hipComplex out = ot*(expc(m*ai) + expc(mone*m*ai)); return out; } __device__ hipComplex sins(hipComplex m) { hipComplex ai(0.0,1.0); hipComplex ot(0.0,0.5); hipComplex mone(-1.0,0.0); hipComplex out = ot*(expc(m*ai) - expc(mone*m*ai)); return out; } __device__ hipComplex tans(hipComplex m) { return sins(m)/cosc(m); } __device__ hipComplex moeb(hipComplex t, hipComplex a, hipComplex z) { hipComplex out(0.0,0.0); hipComplex ai(0.0,1.0); hipComplex unity(1.0,0.0); out = expc(ai*t) * (z-a)/(unity-conj(a)*z); return out; } __device__ hipComplex mob(hipComplex a, hipComplex z) { hipComplex out(0.0,0.0); hipComplex ai(0.0,1.0); hipComplex unity(1.0,0.0); out = (z-a)/(unity-conj(a)*z); return out; } __device__ hipComplex bnewt(hipComplex z) { hipComplex three(3.0,0.0); hipComplex unity(1.0,0.0); hipComplex out(0.0,0.0); hipComplex Z =z; hipComplex L(0.0,0.0); hipComplex R(0.62348980185873359,0.7818314824680298); hipComplex v(0.62348980185873359,0.7818314824680298); int i; for(i=0;i<100;i++) { L = sins(expc(Z)-cosc(Z))-Z; out = out + v*L; v = R * v; Z = Z - L/((expc(Z)+sins(Z))*cosc(expc(Z)-cosc(Z))-unity); } return out; } __device__ hipComplex they3(hipComplex z, hipComplex q) { int u; hipComplex out(0.0,0.0); hipComplex enn(-20.0,0.0); hipComplex onn(1.0,0.0); hipComplex dui(0.0,1.0); for(u=-20;u<20;u++) { out = out + powc(q,enn*enn)*expc(dui*enn*z); enn = enn + onn; } return out; } __device__ hipComplex wahi(hipComplex z) { int u; hipComplex un(1.0,0.0); hipComplex ne(1.0,0.0); hipComplex out(0.0,0.0); for(u=1;u<40;u++) { out = out + powc(z/ne,ne); ne = ne + un; } out = out + un; return out; } __device__ hipComplex dwahi(hipComplex z) { int u; hipComplex un(1.0,0.0); hipComplex ne(1.0,0.0); hipComplex out(0.0,0.0); for(u=1;u<40;u++) { out = out + powc(z/ne,ne-un); ne = ne + un; } return out; } __device__ hipComplex they3p(hipComplex z, hipComplex q) { int u; hipComplex out(0.0,0.0); hipComplex enn(-20.0,0.0); hipComplex onn(1.0,0.0); hipComplex dui(0.0,1.0); for(u=-20;u<20;u++) { out = out + (enn*enn)*powc(q,enn*enn-onn)*expc(dui*enn*z); enn = enn + onn; } return out; } __device__ hipComplex h3ey3p(hipComplex z, hipComplex q) { int u; hipComplex out(0.0,0.0); hipComplex aut(0.0,0.0); hipComplex enn(-20.0,0.0); hipComplex onn(1.0,0.0); hipComplex dui(0.0,1.0); hipComplex vel(0.0,0.0); hipComplex rav(0.0,0.0); for(u=-40;u<40;u++) { vel = expc(dui*enn*z); rav = powc(q,enn*enn); aut = aut + (enn*enn)*rav/q*vel; out = out + rav*vel; enn = enn + onn; } return out/aut; } __device__ hipComplex thess(hipComplex z, hipComplex q) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ hipComplex the1(hipComplex z, hipComplex q) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); hipComplex rt(0.25,0.0); for(v=0;v<15;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity - tw * qoo * cosc(tw*z) + qoo*qoo); } return tw*out*powc(q,rt)*sins(z); } __device__ hipComplex the2(hipComplex z, hipComplex q) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); hipComplex rt(0.25,0.0); for(v=0;v<15;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo * cosc(tw*z) + qoo*qoo); } return tw*out*powc(q,rt)*cosc(z); } __device__ hipComplex the3(hipComplex z, hipComplex q) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); for(v=0;v<15;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ hipComplex the4(hipComplex z, hipComplex q) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); for(v=0;v<15;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity - tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return out; } /* routine to generate q-integers */ __device__ hipComplex qin(hipComplex a, hipComplex q) { hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); out = (unity - powc(q, a))/(unity-q); return out; } /* generating function for n^2 */ __device__ hipComplex geffa(hipComplex z, hipComplex q) { hipComplex out(0.0,0.0); hipComplex unity(1.0,0.0); hipComplex wu(0.0,0.0); hipComplex Z=unity; int v; for(v=0;v<20;v++) { out = out + qin(wu*wu,q)* Z; wu = wu + unity; Z = z * Z; } return out; } __device__ hipComplex thratd(hipComplex z, hipComplex q) { int n; hipComplex fau(4.0,0.0); hipComplex too(2.0,0.0); hipComplex unity(1.0,0.0); hipComplex ennn(1.0,0.0); hipComplex ni(-1.0,0.0); hipComplex noo(-1.0,0.0); hipComplex out(0.0,0.0); hipComplex loo = q; hipComplex qoo =q*q; for(n=0;n<80;n++) { out = out + noo*(loo/(unity-qoo))*sins(too*ennn*z); qoo = qoo * q*q; loo = loo * q; ennn = ennn +unity; noo = ni * noo; } return out*fau; } __device__ hipComplex thess4(hipComplex z, hipComplex q) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); for(v=0;v<20;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity - tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ hipComplex thesk(hipComplex z, hipComplex q, hipComplex r) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); hipComplex roo(1.0,0.0); for(v=0;v<20;v++) { qoo = qoo * q * q; roo = roo * r * r ; out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + roo*roo/(r*r)); } return out; } __device__ hipComplex thass(hipComplex z, hipComplex q) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); for(v=0;v<20;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * sins(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ hipComplex rogers( hipComplex q) { hipComplex onf(0.2,0.0); hipComplex Q5 = q*q*q*q*q; hipComplex out = powc(q,onf)* qpoch(q,Q5) * qpoch(q*q*q*q,Q5)/ (qpoch(q*q,Q5)*qpoch(q*q*q,Q5)); return out; } __device__ hipComplex flat(hipComplex m) { float ua = sqrtf(m.r*m.r + m.i*m.i); hipComplex out(m.r/ua,m.i/ua); return out; } __device__ hipComplex eff(hipComplex z, hipComplex lambda) { return z*z*z*z+ lambda/(z*z*z*z); } __device__ hipComplex thete(float R, hipComplex tau, hipComplex z) { /* note that as I'm not immediately doing this on the unit circle, as the real action is considered to happen on the z-plane, we don't yet need to fret about whether I'm looking at things in terms of tau or in terms of q, next revision */ /* set accumulant to zero */ hipComplex A(0.0,0.0); /* miscellaneous setup */ hipComplex pai(3.14159265353898,0.0); hipComplex ai(0.0,1.0); hipComplex oo(1.0,0.0); hipComplex oot(2.0,0.0); hipComplex nini(9.0,0.0); hipComplex eigh(-18.0,0.0); /* hipComplex arr(cos(2*3.1415926535897f*R/2048.0),0.0) */ hipComplex frann(1.0,0.0); frann = pai * ai * tau ; hipComplex shenn(1.0,0.0); shenn = oot * ai * z; hipComplex plenn(1.0,0.0); hipComplex enn(1.0,0.0); hipComplex ann(1.0,0.0); hipComplex bnn(1.0,0.0); hipComplex scrunn(1.0,0.0); float ca, cb,cc; int a, b; for(a=-10;a<10;a++) { ann.r = a; for(b=-10;b<10;b++) { bnn.r = b; if(((a+b)%2)==0) { scrunn.r = a*a + b*b; A = A + expc(frann* scrunn) * expc(shenn* (ann+bnn)); } else { ca = 5.0 + a*a + b*b; cb = 2*(a * cos(R)- b * sin(R)); cc = 4*(b * cos(R)+a*sin(R)); scrunn.r = ca + cb + cc; A = A + expc(frann*scrunn)*expc(shenn*(ann+bnn)); } } } return A; } __device__ hipComplex thetta(hipComplex tau, hipComplex z) { /* note that as I'm not immediately doing this on the unit circle, as the real action is considered to happen on the z-plane, we don't yet need to fret about whether I'm looking at things in terms of tau or in terms of q, next revision */ /* set accumulant to zero */ hipComplex A(0.0,0.0); /* miscellaneous setup */ hipComplex pai(3.14159265353898,0.0); hipComplex ai(0.0,1.0); hipComplex oo(1.0,0.0); hipComplex oot(2.0,0.0); hipComplex nini(9.0,0.0); hipComplex eigh(-18.0,0.0); /* hipComplex arr(cos(2*3.1415926535897f*R/2048.0),0.0) */ hipComplex frann(1.0,0.0); frann = pai * ai * tau ; hipComplex shenn(1.0,0.0); shenn = oot * ai * z; hipComplex plenn(1.0,0.0); hipComplex enn(1.0,0.0); int n; for(n=-10;n<10;n++) { enn.r = n; plenn = enn * enn; /* this get the hipComplex out of the event loop */ A = A + expc(frann* plenn) * expc(shenn* enn); } return A; } __device__ hipComplex mitlef(hipComplex z,hipComplex c) { hipComplex out(0.0,0.0); hipComplex Z(1.0,0.0); hipComplex frove(0.0,0.0); int v; for(v=0;v<20;v++) { frove.r = tgammaf(c.r*v+c.i); out = out + Z/frove; Z = Z * z; } return out; } __device__ hipComplex helva(hipComplex z) { hipComplex out(j0f(z.r),j1f(z.i)); return out; } __device__ hipComplex hylva(hipComplex z) { hipComplex out(j1f(1/j0f(z.r)),j1f(1/j0f(z.i))); return out; } __device__ hipComplex hilva(hipComplex z) { hipComplex out(j1f(z.r),j0f(z.i)); return out; } __device__ hipComplex halva(hipComplex z) { hipComplex out(j0f(z.r),j0f(z.i)); return out; } __device__ hipComplex hinva(hipComplex z) { hipComplex out(j1f(z.r),j1f(z.i)); return out; } __device__ hipComplex henga(hipComplex z) { hipComplex out(acoshf(z.r),asinhf(z.i)); return out; } __device__ hipComplex holva(hipComplex z) { hipComplex out(y0f(z.r),y1f(z.i)); return out; } __device__ hipComplex aliva(hipComplex z) { hipComplex out(j1f(z.r),cyl_bessel_i1f(z.i)); return out; } __device__ hipComplex ariva(hipComplex z) { hipComplex out(sinf(z.i),cbrtf(z.r)); return out; } __device__ hipComplex arago(hipComplex z, hipComplex q) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * hinva(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ hipComplex irigo(hipComplex z, hipComplex q) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * holva(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ hipComplex urigo(hipComplex z, hipComplex q) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * powc(hilva(q*z),helva(q*z)) + qoo*qoo/(q*q)); } return out; } __device__ hipComplex arreg(hipComplex q, hipComplex r, hipComplex z) { /* arreg implements the representation of theta3'(z)/theta(z) I don't know if these are derivatives with respect to z or q, we'll see */ hipComplex out(0.0,0.0); hipComplex qoo(1.0,0.0); hipComplex roo(1.0,0.0); hipComplex morra(-1.0,0.0); hipComplex tla(1.0,0.0); hipComplex vnn(0.0,0.0); hipComplex fou(4.0,0.0); hipComplex tw(2.0,0.0); hipComplex run(1.0,0.0); int v; for(v=0;v<20;v++) { qoo = qoo * q; roo = roo * r * r; tla = tla * morra; vnn = vnn + run; out = out + morra*qoo*sins(tw*z*run)/(run-roo); } return fou*out; } __device__ hipComplex urreg(hipComplex q, hipComplex r, hipComplex z) { /* arreg implements the representation of theta3'(z)/theta(z) I don't know if these are derivatives with respect to z or q, we'll see */ hipComplex out(0.0,0.0); hipComplex qoo(1.0,0.0); hipComplex roo(1.0,0.0); hipComplex morra(-1.0,0.0); hipComplex tla(1.0,0.0); hipComplex vnn(0.0,0.0); hipComplex fou(4.0,0.0); hipComplex tw(2.0,0.0); hipComplex run(1.0,0.0); int v; for(v=0;v<10;v++) { qoo = qoo * q; roo = roo * r * r; tla = tla * morra; vnn = vnn + run; out = out + morra*qoo*the3(tw*z*run,r)/(run-roo); } return fou*out; } // * small q-exponential __device__ hipComplex qexp(hipComplex z, hipComplex q) { hipComplex mone(-1.0,0.0); hipComplex une(1.0,0.0); return une/qpoch(z,q); } //* large q exponential is just qpoch(-z,q) __device__ hipComplex qExp(hipComplex z, hipComplex q) { hipComplex mone(-1.0,0.0); hipComplex une(1.0,0.0); return qpoch(mone*z,q); } __device__ hipComplex sinq(hipComplex z, hipComplex q) { hipComplex aie(0.0,1.0); hipComplex out(0.0,0.0); hipComplex doo(2.0,0.0); out = (qexp(z*aie,q) -qexp(z*aie,q))/doo; return out; } __device__ hipComplex cosq(hipComplex z, hipComplex q) { hipComplex aie(0.0,1.0); hipComplex out(0.0,0.0); hipComplex doo(2.0,0.0); out = (qexp(z*aie,q) +qexp(z*aie,q))/doo; return out; } __device__ hipComplex Sinq(hipComplex z, hipComplex q) { hipComplex aie(0.0,1.0); hipComplex out(0.0,0.0); hipComplex doo(2.0,0.0); out = (qExp(z*aie,q) -qExp(z*aie,q))/doo; return out; } __device__ hipComplex Cosq(hipComplex z, hipComplex q) { hipComplex aie(0.0,1.0); hipComplex out(0.0,0.0); hipComplex doo(2.0,0.0); out = (qExp(z*aie,q) +qExp(z*aie,q))/doo; return out; } __device__ hipComplex asins(hipComplex z) { float alp = 0.5 * (sqrtf((z.r+1)*(z.r+1) + z.i*z.i) + sqrtf((z.r-1)*(z.r-1) + z.i*z.i)); float bet = 0.5 * (sqrtf((z.r+1)*(z.r+1) + z.i*z.i) - sqrtf((z.r-1)*(z.r-1) + z.i*z.i)); float fla = z.i/abs(z.i); // *signum, but without a comparison, probably a saner way to do this? // hipComplex out(0.0,0.0); out.r = asinf(bet); out.i = fla * logf(alp + sqrtf(alp*alp-1)); return out; } __device__ int gcd(int a, int b) { int remainder = a % b; if (remainder == 0) { return b; } return gcd(b, remainder); } /* Real Analytic Eisenstein Series */ __device__ hipComplex reis(hipComplex s, hipComplex z) { // see en.wikipedia.org/wiki/Real_analytic_Eisenstein_series hipComplex out(0.0,0.0); hipComplex hav(0.5,0.0); hipComplex xu=out; hipComplex yu=out; yu.r = z.i; int m,n; hipComplex ema=out; hipComplex ena=out; hipComplex den=out; for(m=-20;m<20;m++) { for(n=-20;n<20;n++) { if((m!=0)&&(n!=0)) { if((gcd(m,n)==1)) { ena.r = n; ema.r = m; den.r = norg(ema*z+ena); out = out + powc(yu,s)/powc(den,s/hav); } } } } return out; } __device__ hipComplex thu3(hipComplex z, hipComplex q) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * asins(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ hipComplex trev(hipComplex lav, hipComplex mel, hipComplex rel) { hipComplex out(0.0,0.0); hipComplex V(0.739085133215160641655312087674,0.0); int v; for(v=0;v<3;v++) { lav = lav - rel*(cosc(lav)-powc(V,rel))/cosc(lav); out = out + mel*(cosc(lav)-powc(V,mel)); } return out; } __device__ hipComplex polylog(hipComplex z, hipComplex s) { hipComplex out(0.0,0.0); hipComplex oom(1.0,0.0); hipComplex flag=oom; int v; for(v=0;v<30;v++) { flag = flag + oom; out = out + powc(z,flag)/powc(flag,s); } return out; } __device__ unsigned char clip(int n) { return n > 255 ? 255 : (n < 0 ? 0 : n); } __global__ void distanceKernel(uchar4 *d_out, int w, int h, int2 pos) { const int c = blockIdx.x*blockDim.x + threadIdx.x; const int r= blockIdx.y*blockDim.y + threadIdx.y; const int i = c + r*w; // 1D indexing float pi = 3.1415926535898; hipComplex ip(pi,0.0); const float scale = 1.3; float fx = scale * (float)(DIM/2 - c)/(DIM/2); float fy = scale * (float)(DIM/2 - r)/(DIM/2); hipComplex effx(fx,0.0); hipComplex effy(fy,0.0); float LA = scale * (float)(DIM/2 - pos.x)/(DIM/2); float LB = scale * (float)(DIM/2 - pos.y)/(DIM/2); hipComplex mouse(LA,LB); hipComplex moux(LA,0.0); hipComplex mouy(0.0,LB); hipComplex q(fx,fy); /* hipComplex tik(sin(ticks/40.0f),0.0);*/ /* hipComplex uon(cosf(-2*pi*ticks/16384.0),sinf(-2*pi*ticks/16384.0)); hipComplex aon(cosf(2.6457513110645912*2*pi*ticks/1024),sinf(2.645751311064591*2*pi*ticks/1024)); hipComplex eon(cosf(-2.6457513110645912*2*pi*ticks/1024.0),sinf(2.645751311064591*2*pi*ticks/1024.0));*/ hipComplex fixon(.029348,.828934); hipComplex faxon(.029348,-.828934); hipComplex unity(1.0,0.0); hipComplex ai(0.0,1.0); hipComplex aon = expc(ai*moux); hipComplex uon= expc(mouy); hipComplex flurn(0.0,0.0); hipComplex accume(1.0,0.0); hipComplex eccume(1.0,0.0); hipComplex rhun(1.02871376821872462237195122725097462534904479,0.0); hipComplex cue = q; hipComplex lam(0.73736887807831963, -0.67549029426152396); hipComplex due(3.0,0.0); hipComplex tir(2.0,0.0); hipComplex selga(3.5,0.0); hipComplex vro(-1.0,0.0); hipComplex tle(0.0,0.0); hipComplex sle(4.0,0.0); hipComplex cherra(0.62348980185873359, 0.7818314824680298); hipComplex lerra = cherra*cherra; hipComplex ferra = lerra * cherra; hipComplex terra = ferra * cherra; hipComplex zerra = terra * cherra; hipComplex nerra = zerra * cherra; hipComplex vlarv(1/3.0,0.0); hipComplex sugna(0.70710678118654757, 0.70710678118654746); hipComplex regna(0.99966573338968745, 0.025853848581176047); hipComplex spa(sqrtf(2.0),0.0); hipComplex spb(sqrtf(3.0),0.0); hipComplex spc(sqrtf(4.0),0.0); hipComplex spd(sqrtf(5.0),0.0); hipComplex mrun(1/2.0,0.0); hipComplex gloon (4.0,0.0); hipComplex plenod(-.01,0.0); hipComplex nue = cue; hipComplex bor(-10.0,0.0); hipComplex nat(0.0,-10.0); hipComplex rhus(1.0,0.0); hipComplex D(0.739085133215160641655312087674,0.0); hipComplex gren(2.0,0.0); hipComplex next=flurn; hipComplex current = cue; hipComplex xnext = flurn; hipComplex xcurrent = cue; hipComplex rue=cue; hipComplex tinny(.0001,0.0001); hipComplex raga(0.5,27.0); hipComplex ruga(0.5,0.0); hipComplex senna(2.0,0.0); hipComplex renna(3.0,0.0); hipComplex finch(0.001,.001); hipComplex smenn(0.5,sqrtf(3.0)/2.0); hipComplex lmenn(0.96592582628906831,0.25881904510252074); hipComplex vmenn(-0.5,-sqrtf(3.0)/2.0); float ah, ex, feig; feig = 3.67; ex = 2.10981; float xa,xb,ya,yb,tta,ttb; char va,vb,vc; hipComplex seahorse(-0.75,0.123); hipComplex thr2(32.0,0.0); hipComplex t2=unity; hipComplex t3=unity; hipComplex t4=unity; /* if ((c >= w) || (r >= h)) return; // Check if within image bounds const int i = c + r*w; // 1D indexing const int dist = sqrtf((c - pos.x)*(c - pos.x) + (r - pos.y)*(r - pos.y)); const unsigned char intensity = clip(255 - dist);*/ // theta function varying on constant // cue =thess(cue,fixon*mouse); int v=1; int axa=-10; int uu; /*while((v<100)&&norg(cue)<2.0) { cue = cue*(cue-mouy)*(cue-moux) -cue * q; v++; }*/ // One way of describing this would be we want to perform Newton's method //on the Mandelbrot set /* preiterate */ //tex.stackexchange.com/questions/278843/making-a-phase-portrait-of-two-autonomous-system-of-differential-equations-with?fbclid=IwAR2Tz66CbUAq7LFVYck4uUGF5uQWnmzf5iZw3Bi8IOycvCC7czO6ZVgkz3s // this is not terribly hard to do with cuda // what we need: // x' = x - y -> dx / dt = x - y // y' = 1 - x^2 -> dy / dt = 1-x^2 // dy / dx = (dy / dt) / (dx/ dt) // so the trick is to convert dy/dx into a unit complex number to make this work, okay that's not that difficult q = mob(mouse,cue); t2 = the2(flurn,q); t3 = the3(flurn,q); t4 = the4(flurn,q); t2 = t2*t2*t2*t2*t2*t2*t2*t2; t3 = t3*t3*t3*t3*t3*t3*t3*t3; t4 = t4*t4*t4*t4*t4*t4*t4*t4; nue = t3+t3+t4; cue = thr2*nue*nue*nue/(t2*t3*t4); cue=asins(cue); if(norg(q)>1.0) { d_out[i].x = 0; d_out[i].y = 0; d_out[i].z = 0; d_out[i].w = 255; } else { double tha; tha = ((atan2(cue.i,cue.r) - pi)/(2.0*pi)); d_out[i].x = (unsigned char) (255.0*pow(sin(pi*tha),2)); d_out[i].y = (unsigned char) (255.0*pow(sin(pi*tha+pi/3),2)); d_out[i].z = (unsigned char) (255.0*pow(sin(pi*tha+2*pi/3),2)); d_out[i].w = 255; } } void kernelLauncher(uchar4 *d_out, int w, int h, int2 pos) { const dim3 blockSize(TX, TY); const dim3 gridSize = dim3((w + TX - 1)/TX, (h + TY - 1)/TY); hipLaunchKernelGGL(( distanceKernel), dim3(gridSize), dim3(blockSize), 0, 0, d_out, w, h, pos); } /*for(v=1;v<5;v++) { cue = cue - cue * (expc(unity-cue/moux)+expc(cue-unity/mouy))/((vlarv-unity/moux )*(expc(unity-cue/moux))-expc(cue-unity/mouy)); accume = accume + ((vlarv-unity/moux )*(expc(unity-cue/moux))-expc(cue-unity/mouy)); } cue = accume;*/ /*cue = ramchi(moeb(unity,uon*fixon,q))*rampsi(moeb(unity,uon*fixon,q)); rhus = ramchi(uon/moeb(unity,uon*faxon,unity/q))*ramphi(uon/moeb(unity,uon*faxon,unity/q)); cue = rhus+cue; cue = cosc(unity/(unity-uon*cue))*rampsi(moeb(unity,uon*fixon,q));*/ /*for(v=0;v<60;v++){ cue = moeb(aon,fixon,cue) - aon/((expc(uon*cue-sins(cue))-cue)/((aon+cosc(cue)) * expc(uon*cue-sins(cue))-aon)); accume = accume *(unity - (expc(aon*moeb(uon,faxon,cue))-sins(moeb(aon,fixon,cue))-cue)); } cue = accume;*/ /* One for (x+d)/cos(d) -cos(x)/d Tungilipa D = cos(D) cos(sqrt(x*D))/D -1 = 0.0 The other for cos(x)-x Eripgrunna */
31cc896862d44bbb5fc67457cc97e11895ffcd60.cu
#include "kernel.h" #define TX 32 #define TY 32 #define DIM 2100 struct cuComplex { float r; float i; __device__ cuComplex( float a, float b ) : r(a), i(b) {} __device__ float magnitude2( void ) { return r * r + i * i; } __device__ cuComplex operator*(const cuComplex& a) { return cuComplex(r*a.r - i*a.i, i*a.r + r*a.i); } __device__ cuComplex operator-(const cuComplex& a) { return cuComplex(r-a.r, i-a.i); } __device__ cuComplex operator+(const cuComplex& a) { return cuComplex(r+a.r, i+a.i); } __device__ cuComplex operator/(const cuComplex& a) { return cuComplex((r*a.r + i*a.i)/(a.r*a.r + a.i*a.i), (i*a.r - r*a.i)/(a.r*a.r + a.i*a.i)); } }; __device__ cuComplex conj(cuComplex m) { cuComplex out(m.r,-m.i); return out; } __device__ cuComplex nor(cuComplex m) { cuComplex out(m.r*m.r+m.i*m.i,0.0); return out; } __device__ float norg(cuComplex m) { return sqrtf(m.r*m.r+m.i*m.i); } __device__ cuComplex qpoch(cuComplex a, cuComplex q) { cuComplex out(1.0,0.0); cuComplex unity(1.0,0.0); int i = 0; cuComplex Q = q; if(q.magnitude2()>1.0) { return cuComplex(0.0,0.0); } // We want to formally match the definition of a q-pochhammer symbol. for(i=1;i<80;i++) { out = out * (unity - a*Q); Q = q * Q; } return out; } __device__ cuComplex qp(cuComplex a, cuComplex q, int n) { cuComplex out(1.0,0.0); cuComplex unity(1.0,0.0); int i = 0; cuComplex Q = q; if(q.magnitude2()>1.0) { return cuComplex(0.0,0.0); } // We want to formally match the definition of a q-pochhammer symbol. for(i=1;i<n;i++) { out = out * (unity - a*Q); Q = q * Q; } return out; } __device__ cuComplex ramphi(cuComplex q) { cuComplex out(1.0,0.0); cuComplex mone(-1.0,0.0); cuComplex mq = mone*q; return qpoch(mq,mq)/qpoch(q,mq); } __device__ cuComplex rampsi(cuComplex q) { cuComplex out(1.0,0.0); cuComplex mone(-1.0,0.0); cuComplex mq = mone*q; return qpoch(mq,q)*qpoch(q*q,q*q); } __device__ cuComplex ramchi(cuComplex q) { cuComplex out(1.0,0.0); cuComplex mone(-1.0,0.0); cuComplex mq = mone*q; return qpoch(mq,q*q); } __device__ cuComplex ramf(cuComplex a, cuComplex b) { cuComplex out(1.0,0.0); cuComplex mone(-1.0,0.0); cuComplex ma = mone*a; cuComplex mb = mone*b; return qpoch(ma,a*b)*qpoch(mb,a*b)*qpoch(a*b,a*b); } // complex exponential __device__ cuComplex expc(cuComplex m) { cuComplex out(expf(m.r) * cosf(m.i),expf(m.r) * sinf(m.i)); return out; } __device__ cuComplex powc(cuComplex ag, cuComplex bg) { cuComplex out(0.0,0.0); cuComplex mesp(0.0,0.0); cuComplex frim(0.0,0.0); double radiu, thet; /* get the proper polar form of the complex number */ radiu = sqrtf(ag.r*ag.r + ag.i*ag.i); thet = atan2f(ag.i,ag.r); /* mesp gives R^(c+di) */ mesp.r = powf(radiu,bg.r)*cosf(bg.i*logf(radiu)); mesp.i = powf(radiu,bg.r)*sinf(bg.i*logf(radiu)); /* frim gives e^(i theta (c+di)) */ /* now since we already have the machinery for performing complex exponentiation (just exp), we can just call that here */ frim.r = -1.0 * bg.i * thet; frim.i = bg.r * thet; frim = expc(frim); out = mesp*frim; return out; } // cosine (nothing algorithmically clean) __device__ cuComplex cosc(cuComplex m) { cuComplex ai(0.0,1.0); cuComplex ot(0.5,0.0); cuComplex mone(-1.0,0.0); cuComplex out = ot*(expc(m*ai) + expc(mone*m*ai)); return out; } __device__ cuComplex sins(cuComplex m) { cuComplex ai(0.0,1.0); cuComplex ot(0.0,0.5); cuComplex mone(-1.0,0.0); cuComplex out = ot*(expc(m*ai) - expc(mone*m*ai)); return out; } __device__ cuComplex tans(cuComplex m) { return sins(m)/cosc(m); } __device__ cuComplex moeb(cuComplex t, cuComplex a, cuComplex z) { cuComplex out(0.0,0.0); cuComplex ai(0.0,1.0); cuComplex unity(1.0,0.0); out = expc(ai*t) * (z-a)/(unity-conj(a)*z); return out; } __device__ cuComplex mob(cuComplex a, cuComplex z) { cuComplex out(0.0,0.0); cuComplex ai(0.0,1.0); cuComplex unity(1.0,0.0); out = (z-a)/(unity-conj(a)*z); return out; } __device__ cuComplex bnewt(cuComplex z) { cuComplex three(3.0,0.0); cuComplex unity(1.0,0.0); cuComplex out(0.0,0.0); cuComplex Z =z; cuComplex L(0.0,0.0); cuComplex R(0.62348980185873359,0.7818314824680298); cuComplex v(0.62348980185873359,0.7818314824680298); int i; for(i=0;i<100;i++) { L = sins(expc(Z)-cosc(Z))-Z; out = out + v*L; v = R * v; Z = Z - L/((expc(Z)+sins(Z))*cosc(expc(Z)-cosc(Z))-unity); } return out; } __device__ cuComplex they3(cuComplex z, cuComplex q) { int u; cuComplex out(0.0,0.0); cuComplex enn(-20.0,0.0); cuComplex onn(1.0,0.0); cuComplex dui(0.0,1.0); for(u=-20;u<20;u++) { out = out + powc(q,enn*enn)*expc(dui*enn*z); enn = enn + onn; } return out; } __device__ cuComplex wahi(cuComplex z) { int u; cuComplex un(1.0,0.0); cuComplex ne(1.0,0.0); cuComplex out(0.0,0.0); for(u=1;u<40;u++) { out = out + powc(z/ne,ne); ne = ne + un; } out = out + un; return out; } __device__ cuComplex dwahi(cuComplex z) { int u; cuComplex un(1.0,0.0); cuComplex ne(1.0,0.0); cuComplex out(0.0,0.0); for(u=1;u<40;u++) { out = out + powc(z/ne,ne-un); ne = ne + un; } return out; } __device__ cuComplex they3p(cuComplex z, cuComplex q) { int u; cuComplex out(0.0,0.0); cuComplex enn(-20.0,0.0); cuComplex onn(1.0,0.0); cuComplex dui(0.0,1.0); for(u=-20;u<20;u++) { out = out + (enn*enn)*powc(q,enn*enn-onn)*expc(dui*enn*z); enn = enn + onn; } return out; } __device__ cuComplex h3ey3p(cuComplex z, cuComplex q) { int u; cuComplex out(0.0,0.0); cuComplex aut(0.0,0.0); cuComplex enn(-20.0,0.0); cuComplex onn(1.0,0.0); cuComplex dui(0.0,1.0); cuComplex vel(0.0,0.0); cuComplex rav(0.0,0.0); for(u=-40;u<40;u++) { vel = expc(dui*enn*z); rav = powc(q,enn*enn); aut = aut + (enn*enn)*rav/q*vel; out = out + rav*vel; enn = enn + onn; } return out/aut; } __device__ cuComplex thess(cuComplex z, cuComplex q) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ cuComplex the1(cuComplex z, cuComplex q) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); cuComplex rt(0.25,0.0); for(v=0;v<15;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity - tw * qoo * cosc(tw*z) + qoo*qoo); } return tw*out*powc(q,rt)*sins(z); } __device__ cuComplex the2(cuComplex z, cuComplex q) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); cuComplex rt(0.25,0.0); for(v=0;v<15;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo * cosc(tw*z) + qoo*qoo); } return tw*out*powc(q,rt)*cosc(z); } __device__ cuComplex the3(cuComplex z, cuComplex q) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); for(v=0;v<15;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ cuComplex the4(cuComplex z, cuComplex q) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); for(v=0;v<15;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity - tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return out; } /* routine to generate q-integers */ __device__ cuComplex qin(cuComplex a, cuComplex q) { cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); out = (unity - powc(q, a))/(unity-q); return out; } /* generating function for n^2 */ __device__ cuComplex geffa(cuComplex z, cuComplex q) { cuComplex out(0.0,0.0); cuComplex unity(1.0,0.0); cuComplex wu(0.0,0.0); cuComplex Z=unity; int v; for(v=0;v<20;v++) { out = out + qin(wu*wu,q)* Z; wu = wu + unity; Z = z * Z; } return out; } __device__ cuComplex thratd(cuComplex z, cuComplex q) { int n; cuComplex fau(4.0,0.0); cuComplex too(2.0,0.0); cuComplex unity(1.0,0.0); cuComplex ennn(1.0,0.0); cuComplex ni(-1.0,0.0); cuComplex noo(-1.0,0.0); cuComplex out(0.0,0.0); cuComplex loo = q; cuComplex qoo =q*q; for(n=0;n<80;n++) { out = out + noo*(loo/(unity-qoo))*sins(too*ennn*z); qoo = qoo * q*q; loo = loo * q; ennn = ennn +unity; noo = ni * noo; } return out*fau; } __device__ cuComplex thess4(cuComplex z, cuComplex q) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); for(v=0;v<20;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity - tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ cuComplex thesk(cuComplex z, cuComplex q, cuComplex r) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); cuComplex roo(1.0,0.0); for(v=0;v<20;v++) { qoo = qoo * q * q; roo = roo * r * r ; out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + roo*roo/(r*r)); } return out; } __device__ cuComplex thass(cuComplex z, cuComplex q) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); for(v=0;v<20;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * sins(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ cuComplex rogers( cuComplex q) { cuComplex onf(0.2,0.0); cuComplex Q5 = q*q*q*q*q; cuComplex out = powc(q,onf)* qpoch(q,Q5) * qpoch(q*q*q*q,Q5)/ (qpoch(q*q,Q5)*qpoch(q*q*q,Q5)); return out; } __device__ cuComplex flat(cuComplex m) { float ua = sqrtf(m.r*m.r + m.i*m.i); cuComplex out(m.r/ua,m.i/ua); return out; } __device__ cuComplex eff(cuComplex z, cuComplex lambda) { return z*z*z*z+ lambda/(z*z*z*z); } __device__ cuComplex thete(float R, cuComplex tau, cuComplex z) { /* note that as I'm not immediately doing this on the unit circle, as the real action is considered to happen on the z-plane, we don't yet need to fret about whether I'm looking at things in terms of tau or in terms of q, next revision */ /* set accumulant to zero */ cuComplex A(0.0,0.0); /* miscellaneous setup */ cuComplex pai(3.14159265353898,0.0); cuComplex ai(0.0,1.0); cuComplex oo(1.0,0.0); cuComplex oot(2.0,0.0); cuComplex nini(9.0,0.0); cuComplex eigh(-18.0,0.0); /* cuComplex arr(cos(2*3.1415926535897f*R/2048.0),0.0) */ cuComplex frann(1.0,0.0); frann = pai * ai * tau ; cuComplex shenn(1.0,0.0); shenn = oot * ai * z; cuComplex plenn(1.0,0.0); cuComplex enn(1.0,0.0); cuComplex ann(1.0,0.0); cuComplex bnn(1.0,0.0); cuComplex scrunn(1.0,0.0); float ca, cb,cc; int a, b; for(a=-10;a<10;a++) { ann.r = a; for(b=-10;b<10;b++) { bnn.r = b; if(((a+b)%2)==0) { scrunn.r = a*a + b*b; A = A + expc(frann* scrunn) * expc(shenn* (ann+bnn)); } else { ca = 5.0 + a*a + b*b; cb = 2*(a * cos(R)- b * sin(R)); cc = 4*(b * cos(R)+a*sin(R)); scrunn.r = ca + cb + cc; A = A + expc(frann*scrunn)*expc(shenn*(ann+bnn)); } } } return A; } __device__ cuComplex thetta(cuComplex tau, cuComplex z) { /* note that as I'm not immediately doing this on the unit circle, as the real action is considered to happen on the z-plane, we don't yet need to fret about whether I'm looking at things in terms of tau or in terms of q, next revision */ /* set accumulant to zero */ cuComplex A(0.0,0.0); /* miscellaneous setup */ cuComplex pai(3.14159265353898,0.0); cuComplex ai(0.0,1.0); cuComplex oo(1.0,0.0); cuComplex oot(2.0,0.0); cuComplex nini(9.0,0.0); cuComplex eigh(-18.0,0.0); /* cuComplex arr(cos(2*3.1415926535897f*R/2048.0),0.0) */ cuComplex frann(1.0,0.0); frann = pai * ai * tau ; cuComplex shenn(1.0,0.0); shenn = oot * ai * z; cuComplex plenn(1.0,0.0); cuComplex enn(1.0,0.0); int n; for(n=-10;n<10;n++) { enn.r = n; plenn = enn * enn; /* this get the cuComplex out of the event loop */ A = A + expc(frann* plenn) * expc(shenn* enn); } return A; } __device__ cuComplex mitlef(cuComplex z,cuComplex c) { cuComplex out(0.0,0.0); cuComplex Z(1.0,0.0); cuComplex frove(0.0,0.0); int v; for(v=0;v<20;v++) { frove.r = tgammaf(c.r*v+c.i); out = out + Z/frove; Z = Z * z; } return out; } __device__ cuComplex helva(cuComplex z) { cuComplex out(j0f(z.r),j1f(z.i)); return out; } __device__ cuComplex hylva(cuComplex z) { cuComplex out(j1f(1/j0f(z.r)),j1f(1/j0f(z.i))); return out; } __device__ cuComplex hilva(cuComplex z) { cuComplex out(j1f(z.r),j0f(z.i)); return out; } __device__ cuComplex halva(cuComplex z) { cuComplex out(j0f(z.r),j0f(z.i)); return out; } __device__ cuComplex hinva(cuComplex z) { cuComplex out(j1f(z.r),j1f(z.i)); return out; } __device__ cuComplex henga(cuComplex z) { cuComplex out(acoshf(z.r),asinhf(z.i)); return out; } __device__ cuComplex holva(cuComplex z) { cuComplex out(y0f(z.r),y1f(z.i)); return out; } __device__ cuComplex aliva(cuComplex z) { cuComplex out(j1f(z.r),cyl_bessel_i1f(z.i)); return out; } __device__ cuComplex ariva(cuComplex z) { cuComplex out(sinf(z.i),cbrtf(z.r)); return out; } __device__ cuComplex arago(cuComplex z, cuComplex q) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * hinva(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ cuComplex irigo(cuComplex z, cuComplex q) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * holva(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ cuComplex urigo(cuComplex z, cuComplex q) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * powc(hilva(q*z),helva(q*z)) + qoo*qoo/(q*q)); } return out; } __device__ cuComplex arreg(cuComplex q, cuComplex r, cuComplex z) { /* arreg implements the representation of theta3'(z)/theta(z) I don't know if these are derivatives with respect to z or q, we'll see */ cuComplex out(0.0,0.0); cuComplex qoo(1.0,0.0); cuComplex roo(1.0,0.0); cuComplex morra(-1.0,0.0); cuComplex tla(1.0,0.0); cuComplex vnn(0.0,0.0); cuComplex fou(4.0,0.0); cuComplex tw(2.0,0.0); cuComplex run(1.0,0.0); int v; for(v=0;v<20;v++) { qoo = qoo * q; roo = roo * r * r; tla = tla * morra; vnn = vnn + run; out = out + morra*qoo*sins(tw*z*run)/(run-roo); } return fou*out; } __device__ cuComplex urreg(cuComplex q, cuComplex r, cuComplex z) { /* arreg implements the representation of theta3'(z)/theta(z) I don't know if these are derivatives with respect to z or q, we'll see */ cuComplex out(0.0,0.0); cuComplex qoo(1.0,0.0); cuComplex roo(1.0,0.0); cuComplex morra(-1.0,0.0); cuComplex tla(1.0,0.0); cuComplex vnn(0.0,0.0); cuComplex fou(4.0,0.0); cuComplex tw(2.0,0.0); cuComplex run(1.0,0.0); int v; for(v=0;v<10;v++) { qoo = qoo * q; roo = roo * r * r; tla = tla * morra; vnn = vnn + run; out = out + morra*qoo*the3(tw*z*run,r)/(run-roo); } return fou*out; } // * small q-exponential __device__ cuComplex qexp(cuComplex z, cuComplex q) { cuComplex mone(-1.0,0.0); cuComplex une(1.0,0.0); return une/qpoch(z,q); } //* large q exponential is just qpoch(-z,q) __device__ cuComplex qExp(cuComplex z, cuComplex q) { cuComplex mone(-1.0,0.0); cuComplex une(1.0,0.0); return qpoch(mone*z,q); } __device__ cuComplex sinq(cuComplex z, cuComplex q) { cuComplex aie(0.0,1.0); cuComplex out(0.0,0.0); cuComplex doo(2.0,0.0); out = (qexp(z*aie,q) -qexp(z*aie,q))/doo; return out; } __device__ cuComplex cosq(cuComplex z, cuComplex q) { cuComplex aie(0.0,1.0); cuComplex out(0.0,0.0); cuComplex doo(2.0,0.0); out = (qexp(z*aie,q) +qexp(z*aie,q))/doo; return out; } __device__ cuComplex Sinq(cuComplex z, cuComplex q) { cuComplex aie(0.0,1.0); cuComplex out(0.0,0.0); cuComplex doo(2.0,0.0); out = (qExp(z*aie,q) -qExp(z*aie,q))/doo; return out; } __device__ cuComplex Cosq(cuComplex z, cuComplex q) { cuComplex aie(0.0,1.0); cuComplex out(0.0,0.0); cuComplex doo(2.0,0.0); out = (qExp(z*aie,q) +qExp(z*aie,q))/doo; return out; } __device__ cuComplex asins(cuComplex z) { float alp = 0.5 * (sqrtf((z.r+1)*(z.r+1) + z.i*z.i) + sqrtf((z.r-1)*(z.r-1) + z.i*z.i)); float bet = 0.5 * (sqrtf((z.r+1)*(z.r+1) + z.i*z.i) - sqrtf((z.r-1)*(z.r-1) + z.i*z.i)); float fla = z.i/abs(z.i); // *signum, but without a comparison, probably a saner way to do this? // cuComplex out(0.0,0.0); out.r = asinf(bet); out.i = fla * logf(alp + sqrtf(alp*alp-1)); return out; } __device__ int gcd(int a, int b) { int remainder = a % b; if (remainder == 0) { return b; } return gcd(b, remainder); } /* Real Analytic Eisenstein Series */ __device__ cuComplex reis(cuComplex s, cuComplex z) { // see en.wikipedia.org/wiki/Real_analytic_Eisenstein_series cuComplex out(0.0,0.0); cuComplex hav(0.5,0.0); cuComplex xu=out; cuComplex yu=out; yu.r = z.i; int m,n; cuComplex ema=out; cuComplex ena=out; cuComplex den=out; for(m=-20;m<20;m++) { for(n=-20;n<20;n++) { if((m!=0)&&(n!=0)) { if((gcd(m,n)==1)) { ena.r = n; ema.r = m; den.r = norg(ema*z+ena); out = out + powc(yu,s)/powc(den,s/hav); } } } } return out; } __device__ cuComplex thu3(cuComplex z, cuComplex q) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * asins(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ cuComplex trev(cuComplex lav, cuComplex mel, cuComplex rel) { cuComplex out(0.0,0.0); cuComplex V(0.739085133215160641655312087674,0.0); int v; for(v=0;v<3;v++) { lav = lav - rel*(cosc(lav)-powc(V,rel))/cosc(lav); out = out + mel*(cosc(lav)-powc(V,mel)); } return out; } __device__ cuComplex polylog(cuComplex z, cuComplex s) { cuComplex out(0.0,0.0); cuComplex oom(1.0,0.0); cuComplex flag=oom; int v; for(v=0;v<30;v++) { flag = flag + oom; out = out + powc(z,flag)/powc(flag,s); } return out; } __device__ unsigned char clip(int n) { return n > 255 ? 255 : (n < 0 ? 0 : n); } __global__ void distanceKernel(uchar4 *d_out, int w, int h, int2 pos) { const int c = blockIdx.x*blockDim.x + threadIdx.x; const int r= blockIdx.y*blockDim.y + threadIdx.y; const int i = c + r*w; // 1D indexing float pi = 3.1415926535898; cuComplex ip(pi,0.0); const float scale = 1.3; float fx = scale * (float)(DIM/2 - c)/(DIM/2); float fy = scale * (float)(DIM/2 - r)/(DIM/2); cuComplex effx(fx,0.0); cuComplex effy(fy,0.0); float LA = scale * (float)(DIM/2 - pos.x)/(DIM/2); float LB = scale * (float)(DIM/2 - pos.y)/(DIM/2); cuComplex mouse(LA,LB); cuComplex moux(LA,0.0); cuComplex mouy(0.0,LB); cuComplex q(fx,fy); /* cuComplex tik(sin(ticks/40.0f),0.0);*/ /* cuComplex uon(cosf(-2*pi*ticks/16384.0),sinf(-2*pi*ticks/16384.0)); cuComplex aon(cosf(2.6457513110645912*2*pi*ticks/1024),sinf(2.645751311064591*2*pi*ticks/1024)); cuComplex eon(cosf(-2.6457513110645912*2*pi*ticks/1024.0),sinf(2.645751311064591*2*pi*ticks/1024.0));*/ cuComplex fixon(.029348,.828934); cuComplex faxon(.029348,-.828934); cuComplex unity(1.0,0.0); cuComplex ai(0.0,1.0); cuComplex aon = expc(ai*moux); cuComplex uon= expc(mouy); cuComplex flurn(0.0,0.0); cuComplex accume(1.0,0.0); cuComplex eccume(1.0,0.0); cuComplex rhun(1.02871376821872462237195122725097462534904479,0.0); cuComplex cue = q; cuComplex lam(0.73736887807831963, -0.67549029426152396); cuComplex due(3.0,0.0); cuComplex tir(2.0,0.0); cuComplex selga(3.5,0.0); cuComplex vro(-1.0,0.0); cuComplex tle(0.0,0.0); cuComplex sle(4.0,0.0); cuComplex cherra(0.62348980185873359, 0.7818314824680298); cuComplex lerra = cherra*cherra; cuComplex ferra = lerra * cherra; cuComplex terra = ferra * cherra; cuComplex zerra = terra * cherra; cuComplex nerra = zerra * cherra; cuComplex vlarv(1/3.0,0.0); cuComplex sugna(0.70710678118654757, 0.70710678118654746); cuComplex regna(0.99966573338968745, 0.025853848581176047); cuComplex spa(sqrtf(2.0),0.0); cuComplex spb(sqrtf(3.0),0.0); cuComplex spc(sqrtf(4.0),0.0); cuComplex spd(sqrtf(5.0),0.0); cuComplex mrun(1/2.0,0.0); cuComplex gloon (4.0,0.0); cuComplex plenod(-.01,0.0); cuComplex nue = cue; cuComplex bor(-10.0,0.0); cuComplex nat(0.0,-10.0); cuComplex rhus(1.0,0.0); cuComplex D(0.739085133215160641655312087674,0.0); cuComplex gren(2.0,0.0); cuComplex next=flurn; cuComplex current = cue; cuComplex xnext = flurn; cuComplex xcurrent = cue; cuComplex rue=cue; cuComplex tinny(.0001,0.0001); cuComplex raga(0.5,27.0); cuComplex ruga(0.5,0.0); cuComplex senna(2.0,0.0); cuComplex renna(3.0,0.0); cuComplex finch(0.001,.001); cuComplex smenn(0.5,sqrtf(3.0)/2.0); cuComplex lmenn(0.96592582628906831,0.25881904510252074); cuComplex vmenn(-0.5,-sqrtf(3.0)/2.0); float ah, ex, feig; feig = 3.67; ex = 2.10981; float xa,xb,ya,yb,tta,ttb; char va,vb,vc; cuComplex seahorse(-0.75,0.123); cuComplex thr2(32.0,0.0); cuComplex t2=unity; cuComplex t3=unity; cuComplex t4=unity; /* if ((c >= w) || (r >= h)) return; // Check if within image bounds const int i = c + r*w; // 1D indexing const int dist = sqrtf((c - pos.x)*(c - pos.x) + (r - pos.y)*(r - pos.y)); const unsigned char intensity = clip(255 - dist);*/ // theta function varying on constant // cue =thess(cue,fixon*mouse); int v=1; int axa=-10; int uu; /*while((v<100)&&norg(cue)<2.0) { cue = cue*(cue-mouy)*(cue-moux) -cue * q; v++; }*/ // One way of describing this would be we want to perform Newton's method //on the Mandelbrot set /* preiterate */ //tex.stackexchange.com/questions/278843/making-a-phase-portrait-of-two-autonomous-system-of-differential-equations-with?fbclid=IwAR2Tz66CbUAq7LFVYck4uUGF5uQWnmzf5iZw3Bi8IOycvCC7czO6ZVgkz3s // this is not terribly hard to do with cuda // what we need: // x' = x - y -> dx / dt = x - y // y' = 1 - x^2 -> dy / dt = 1-x^2 // dy / dx = (dy / dt) / (dx/ dt) // so the trick is to convert dy/dx into a unit complex number to make this work, okay that's not that difficult q = mob(mouse,cue); t2 = the2(flurn,q); t3 = the3(flurn,q); t4 = the4(flurn,q); t2 = t2*t2*t2*t2*t2*t2*t2*t2; t3 = t3*t3*t3*t3*t3*t3*t3*t3; t4 = t4*t4*t4*t4*t4*t4*t4*t4; nue = t3+t3+t4; cue = thr2*nue*nue*nue/(t2*t3*t4); cue=asins(cue); if(norg(q)>1.0) { d_out[i].x = 0; d_out[i].y = 0; d_out[i].z = 0; d_out[i].w = 255; } else { double tha; tha = ((atan2(cue.i,cue.r) - pi)/(2.0*pi)); d_out[i].x = (unsigned char) (255.0*pow(sin(pi*tha),2)); d_out[i].y = (unsigned char) (255.0*pow(sin(pi*tha+pi/3),2)); d_out[i].z = (unsigned char) (255.0*pow(sin(pi*tha+2*pi/3),2)); d_out[i].w = 255; } } void kernelLauncher(uchar4 *d_out, int w, int h, int2 pos) { const dim3 blockSize(TX, TY); const dim3 gridSize = dim3((w + TX - 1)/TX, (h + TY - 1)/TY); distanceKernel<<<gridSize, blockSize>>>(d_out, w, h, pos); } /*for(v=1;v<5;v++) { cue = cue - cue * (expc(unity-cue/moux)+expc(cue-unity/mouy))/((vlarv-unity/moux )*(expc(unity-cue/moux))-expc(cue-unity/mouy)); accume = accume + ((vlarv-unity/moux )*(expc(unity-cue/moux))-expc(cue-unity/mouy)); } cue = accume;*/ /*cue = ramchi(moeb(unity,uon*fixon,q))*rampsi(moeb(unity,uon*fixon,q)); rhus = ramchi(uon/moeb(unity,uon*faxon,unity/q))*ramphi(uon/moeb(unity,uon*faxon,unity/q)); cue = rhus+cue; cue = cosc(unity/(unity-uon*cue))*rampsi(moeb(unity,uon*fixon,q));*/ /*for(v=0;v<60;v++){ cue = moeb(aon,fixon,cue) - aon/((expc(uon*cue-sins(cue))-cue)/((aon+cosc(cue)) * expc(uon*cue-sins(cue))-aon)); accume = accume *(unity - (expc(aon*moeb(uon,faxon,cue))-sins(moeb(aon,fixon,cue))-cue)); } cue = accume;*/ /* One for (x+d)/cos(d) -cos(x)/d Tungilipa D = cos(D) cos(sqrt(x*D))/D -1 = 0.0 The other for cos(x)-x Eripgrunna */
4bf00b6eead63f9bec9dd7e1401588dd7bd39b4d.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <hip/hip_runtime_api.h> #include "helper_cuda.h" #define BLOCK_DIM 16 //////////////////////////////////////////////////////////////////////////////// //! Compute reference data set matrix multiply on GPU //! C = alpha * A * B + beta * C //! @param A matrix A as provided to device //! @param B matrix B as provided to device //! @param C matrix C as provided to device //! @param N height of matrix A and matrix C //! @param M width of matrix B and matrix C //! @param K width of matrix A and height of matrix C //! @param alpha scala value for matrix multiplication //! @param beta scala value for matrix summation with C //////////////////////////////////////////////////////////////////////////////// __global__ void sgemm_kernel(const float *A, const float *B, float *C, int N, int M, int K, float alpha, float beta) { int col = blockIdx.x * blockDim.x + threadIdx.x; int row = blockIdx.y * blockDim.y + threadIdx.y; float sum = 0.f; for (int i = 0; i < K; ++i) sum += A[row * K + i] * B[i * K + col]; C[row * M + col] = alpha * sum + beta * C[row * M + col]; } void random_init(float *data, int size) { for (int i = 0; i < size; ++i) { data[i] = (rand() & 0xFF) / (float)RAND_MAX; } } int main() { float *A, *B, *C; float *d_A, *d_B, *d_C; int N, M, K; float alpha = 2.f; float beta = 1.f; int n_iter = 10; N = M = K = 2048; // allocation of linear memory space A = (float *)malloc(N * K * sizeof(float)); B = (float *)malloc(K * M * sizeof(float)); C = (float *)malloc(N * M * sizeof(float)); // allocation of gpu linear memory space checkCudaErrors(hipMalloc((void **)&d_A, N * K * sizeof(float))); checkCudaErrors(hipMalloc((void **)&d_B, K * M * sizeof(float))); checkCudaErrors(hipMalloc((void **)&d_C, N * M * sizeof(float))); // initialize randomized values for memory space random_init(A, N * K); random_init(B, K * M); random_init(C, N * M); // copy initial value for gpu memory checkCudaErrors(hipMemcpy(d_A, A, N * K * sizeof(float), hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(d_B, B, K * M * sizeof(float), hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(d_C, C, N * M * sizeof(float), hipMemcpyHostToDevice)); // do operation for (int i = 0; i < n_iter; i++) { dim3 dimBlock(BLOCK_DIM, BLOCK_DIM); dim3 dimGrid(M / dimBlock.x, N / dimBlock.y); hipLaunchKernelGGL(( sgemm_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, d_A, d_B, d_C, N, M, K, alpha, beta); checkCudaErrors(hipGetLastError()); } checkCudaErrors(hipDeviceSynchronize()); printf("Application finished successfully."); // terminates allocated gpu memory space //checkCudaErrors(hipFree(d_A+2048)); checkCudaErrors(hipFree(d_B)); checkCudaErrors(hipFree(d_C)); // terminates allocated memory space free(A); free(B); free(C); return 0; }
4bf00b6eead63f9bec9dd7e1401588dd7bd39b4d.cu
#include <stdio.h> #include <cuda_profiler_api.h> #include "helper_cuda.h" #define BLOCK_DIM 16 //////////////////////////////////////////////////////////////////////////////// //! Compute reference data set matrix multiply on GPU //! C = alpha * A * B + beta * C //! @param A matrix A as provided to device //! @param B matrix B as provided to device //! @param C matrix C as provided to device //! @param N height of matrix A and matrix C //! @param M width of matrix B and matrix C //! @param K width of matrix A and height of matrix C //! @param alpha scala value for matrix multiplication //! @param beta scala value for matrix summation with C //////////////////////////////////////////////////////////////////////////////// __global__ void sgemm_kernel(const float *A, const float *B, float *C, int N, int M, int K, float alpha, float beta) { int col = blockIdx.x * blockDim.x + threadIdx.x; int row = blockIdx.y * blockDim.y + threadIdx.y; float sum = 0.f; for (int i = 0; i < K; ++i) sum += A[row * K + i] * B[i * K + col]; C[row * M + col] = alpha * sum + beta * C[row * M + col]; } void random_init(float *data, int size) { for (int i = 0; i < size; ++i) { data[i] = (rand() & 0xFF) / (float)RAND_MAX; } } int main() { float *A, *B, *C; float *d_A, *d_B, *d_C; int N, M, K; float alpha = 2.f; float beta = 1.f; int n_iter = 10; N = M = K = 2048; // allocation of linear memory space A = (float *)malloc(N * K * sizeof(float)); B = (float *)malloc(K * M * sizeof(float)); C = (float *)malloc(N * M * sizeof(float)); // allocation of gpu linear memory space checkCudaErrors(cudaMalloc((void **)&d_A, N * K * sizeof(float))); checkCudaErrors(cudaMalloc((void **)&d_B, K * M * sizeof(float))); checkCudaErrors(cudaMalloc((void **)&d_C, N * M * sizeof(float))); // initialize randomized values for memory space random_init(A, N * K); random_init(B, K * M); random_init(C, N * M); // copy initial value for gpu memory checkCudaErrors(cudaMemcpy(d_A, A, N * K * sizeof(float), cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(d_B, B, K * M * sizeof(float), cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(d_C, C, N * M * sizeof(float), cudaMemcpyHostToDevice)); // do operation for (int i = 0; i < n_iter; i++) { dim3 dimBlock(BLOCK_DIM, BLOCK_DIM); dim3 dimGrid(M / dimBlock.x, N / dimBlock.y); sgemm_kernel<<<dimGrid, dimBlock>>>(d_A, d_B, d_C, N, M, K, alpha, beta); checkCudaErrors(cudaGetLastError()); } checkCudaErrors(cudaDeviceSynchronize()); printf("Application finished successfully."); // terminates allocated gpu memory space //checkCudaErrors(cudaFree(d_A+2048)); checkCudaErrors(cudaFree(d_B)); checkCudaErrors(cudaFree(d_C)); // terminates allocated memory space free(A); free(B); free(C); return 0; }
6c9636e7518ccccb5063c8308d0da524c4384491.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "value_add_matrix.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *mat1 = NULL; hipMalloc(&mat1, XSIZE*YSIZE); float *mat2 = NULL; hipMalloc(&mat2, XSIZE*YSIZE); int row = 1; int col = 1; float v = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( value_add_matrix), dim3(gridBlock),dim3(threadBlock), 0, 0, mat1,mat2,row,col,v); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( value_add_matrix), dim3(gridBlock),dim3(threadBlock), 0, 0, mat1,mat2,row,col,v); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( value_add_matrix), dim3(gridBlock),dim3(threadBlock), 0, 0, mat1,mat2,row,col,v); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
6c9636e7518ccccb5063c8308d0da524c4384491.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "value_add_matrix.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *mat1 = NULL; cudaMalloc(&mat1, XSIZE*YSIZE); float *mat2 = NULL; cudaMalloc(&mat2, XSIZE*YSIZE); int row = 1; int col = 1; float v = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); value_add_matrix<<<gridBlock,threadBlock>>>(mat1,mat2,row,col,v); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { value_add_matrix<<<gridBlock,threadBlock>>>(mat1,mat2,row,col,v); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { value_add_matrix<<<gridBlock,threadBlock>>>(mat1,mat2,row,col,v); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
eab7ec7068dfe2879145314ed2b1095fa3f4df9a.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2019-2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cudf/column/column.hpp> #include <cudf/column/column_device_view.cuh> #include <cudf/column/column_factories.hpp> #include <cudf/column/column_view.hpp> #include <cudf/copying.hpp> #include <cudf/detail/copy_range.cuh> #include <cudf/detail/iterator.cuh> #include <cudf/detail/nvtx/ranges.hpp> #include <cudf/strings/detail/copy_range.cuh> #include <cudf/strings/strings_column_view.hpp> #include <cudf/types.hpp> #include <cudf/utilities/error.hpp> #include <cudf/utilities/traits.hpp> #include <thrust/iterator/constant_iterator.h> #include <hip/hip_runtime.h> #include <memory> namespace { template <typename T> void in_place_copy_range(cudf::column_view const& source, cudf::mutable_column_view& target, cudf::size_type source_begin, cudf::size_type source_end, cudf::size_type target_begin, hipStream_t stream = 0) { auto p_source_device_view = cudf::column_device_view::create(source, stream); if (source.has_nulls()) { cudf::detail::copy_range( cudf::detail::make_null_replacement_iterator<T>(*p_source_device_view, T()) + source_begin, cudf::detail::make_validity_iterator(*p_source_device_view) + source_begin, target, target_begin, target_begin + (source_end - source_begin), stream); } else { cudf::detail::copy_range(p_source_device_view->begin<T>() + source_begin, thrust::make_constant_iterator(true), // dummy target, target_begin, target_begin + (source_end - source_begin), stream); } } struct in_place_copy_range_dispatch { cudf::column_view const& source; cudf::mutable_column_view& target; template <typename T> std::enable_if_t<cudf::is_fixed_width<T>(), void> operator()(cudf::size_type source_begin, cudf::size_type source_end, cudf::size_type target_begin, hipStream_t stream = 0) { in_place_copy_range<T>(source, target, source_begin, source_end, target_begin, stream); } template <typename T> std::enable_if_t<not cudf::is_fixed_width<T>(), void> operator()(cudf::size_type source_begin, cudf::size_type source_end, cudf::size_type target_begin, hipStream_t stream = 0) { CUDF_FAIL("in-place copy does not work for variable width types."); } }; struct out_of_place_copy_range_dispatch { cudf::column_view const& source; cudf::column_view const& target; template <typename T> std::unique_ptr<cudf::column> operator()( cudf::size_type source_begin, cudf::size_type source_end, cudf::size_type target_begin, rmm::mr::device_memory_resource* mr = rmm::mr::get_default_resource(), hipStream_t stream = 0) { auto p_ret = std::make_unique<cudf::column>(target, stream, mr); if ((!p_ret->nullable()) && source.has_nulls(source_begin, source_end)) { p_ret->set_null_mask( cudf::create_null_mask(p_ret->size(), cudf::mask_state::ALL_VALID, stream, mr), 0); } if (source_end != source_begin) { // otherwise no-op auto ret_view = p_ret->mutable_view(); in_place_copy_range<T>(source, ret_view, source_begin, source_end, target_begin, stream); } return p_ret; } }; template <> std::unique_ptr<cudf::column> out_of_place_copy_range_dispatch::operator()<cudf::string_view>( cudf::size_type source_begin, cudf::size_type source_end, cudf::size_type target_begin, rmm::mr::device_memory_resource* mr, hipStream_t stream) { auto target_end = target_begin + (source_end - source_begin); auto p_source_device_view = cudf::column_device_view::create(source, stream); if (source.has_nulls()) { return cudf::strings::detail::copy_range( cudf::detail::make_null_replacement_iterator<cudf::string_view>(*p_source_device_view, cudf::string_view()) + source_begin, cudf::detail::make_validity_iterator(*p_source_device_view) + source_begin, cudf::strings_column_view(target), target_begin, target_end, mr, stream); } else { return cudf::strings::detail::copy_range( p_source_device_view->begin<cudf::string_view>() + source_begin, thrust::make_constant_iterator(true), cudf::strings_column_view(target), target_begin, target_end, mr, stream); } } template <> std::unique_ptr<cudf::column> out_of_place_copy_range_dispatch::operator()<cudf::dictionary32>( cudf::size_type source_begin, cudf::size_type source_end, cudf::size_type target_begin, rmm::mr::device_memory_resource* mr, hipStream_t stream) { CUDF_FAIL("dictionary type not supported"); } template <> std::unique_ptr<cudf::column> out_of_place_copy_range_dispatch::operator()<cudf::list_view>( cudf::size_type source_begin, cudf::size_type source_end, cudf::size_type target_begin, rmm::mr::device_memory_resource* mr, hipStream_t stream) { CUDF_FAIL("list_view type not supported"); } } // namespace namespace cudf { namespace detail { void copy_range_in_place(column_view const& source, mutable_column_view& target, size_type source_begin, size_type source_end, size_type target_begin, hipStream_t stream) { CUDF_EXPECTS(cudf::is_fixed_width(target.type()) == true, "In-place copy_range does not support variable-sized types."); CUDF_EXPECTS((source_begin >= 0) && (source_end <= source.size()) && (source_begin <= source_end) && (target_begin >= 0) && (target_begin <= target.size() - (source_end - source_begin)), "Range is out of bounds."); CUDF_EXPECTS(target.type() == source.type(), "Data type mismatch."); CUDF_EXPECTS((target.nullable() == true) || (source.has_nulls() == false), "target should be nullable if source has null values."); if (source_end != source_begin) { // otherwise no-op cudf::type_dispatcher(target.type(), in_place_copy_range_dispatch{source, target}, source_begin, source_end, target_begin, stream); } } std::unique_ptr<column> copy_range(column_view const& source, column_view const& target, size_type source_begin, size_type source_end, size_type target_begin, rmm::mr::device_memory_resource* mr, hipStream_t stream) { CUDF_EXPECTS((source_begin >= 0) && (source_end <= source.size()) && (source_begin <= source_end) && (target_begin >= 0) && (target_begin <= target.size() - (source_end - source_begin)), "Range is out of bounds."); CUDF_EXPECTS(target.type() == source.type(), "Data type mismatch."); return cudf::type_dispatcher(target.type(), out_of_place_copy_range_dispatch{source, target}, source_begin, source_end, target_begin, mr, stream); } } // namespace detail void copy_range_in_place(column_view const& source, mutable_column_view& target, size_type source_begin, size_type source_end, size_type target_begin) { CUDF_FUNC_RANGE(); return detail::copy_range_in_place(source, target, source_begin, source_end, target_begin, 0); } std::unique_ptr<column> copy_range(column_view const& source, column_view const& target, size_type source_begin, size_type source_end, size_type target_begin, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); return detail::copy_range(source, target, source_begin, source_end, target_begin, mr, 0); } } // namespace cudf
eab7ec7068dfe2879145314ed2b1095fa3f4df9a.cu
/* * Copyright (c) 2019-2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cudf/column/column.hpp> #include <cudf/column/column_device_view.cuh> #include <cudf/column/column_factories.hpp> #include <cudf/column/column_view.hpp> #include <cudf/copying.hpp> #include <cudf/detail/copy_range.cuh> #include <cudf/detail/iterator.cuh> #include <cudf/detail/nvtx/ranges.hpp> #include <cudf/strings/detail/copy_range.cuh> #include <cudf/strings/strings_column_view.hpp> #include <cudf/types.hpp> #include <cudf/utilities/error.hpp> #include <cudf/utilities/traits.hpp> #include <thrust/iterator/constant_iterator.h> #include <cuda_runtime.h> #include <memory> namespace { template <typename T> void in_place_copy_range(cudf::column_view const& source, cudf::mutable_column_view& target, cudf::size_type source_begin, cudf::size_type source_end, cudf::size_type target_begin, cudaStream_t stream = 0) { auto p_source_device_view = cudf::column_device_view::create(source, stream); if (source.has_nulls()) { cudf::detail::copy_range( cudf::detail::make_null_replacement_iterator<T>(*p_source_device_view, T()) + source_begin, cudf::detail::make_validity_iterator(*p_source_device_view) + source_begin, target, target_begin, target_begin + (source_end - source_begin), stream); } else { cudf::detail::copy_range(p_source_device_view->begin<T>() + source_begin, thrust::make_constant_iterator(true), // dummy target, target_begin, target_begin + (source_end - source_begin), stream); } } struct in_place_copy_range_dispatch { cudf::column_view const& source; cudf::mutable_column_view& target; template <typename T> std::enable_if_t<cudf::is_fixed_width<T>(), void> operator()(cudf::size_type source_begin, cudf::size_type source_end, cudf::size_type target_begin, cudaStream_t stream = 0) { in_place_copy_range<T>(source, target, source_begin, source_end, target_begin, stream); } template <typename T> std::enable_if_t<not cudf::is_fixed_width<T>(), void> operator()(cudf::size_type source_begin, cudf::size_type source_end, cudf::size_type target_begin, cudaStream_t stream = 0) { CUDF_FAIL("in-place copy does not work for variable width types."); } }; struct out_of_place_copy_range_dispatch { cudf::column_view const& source; cudf::column_view const& target; template <typename T> std::unique_ptr<cudf::column> operator()( cudf::size_type source_begin, cudf::size_type source_end, cudf::size_type target_begin, rmm::mr::device_memory_resource* mr = rmm::mr::get_default_resource(), cudaStream_t stream = 0) { auto p_ret = std::make_unique<cudf::column>(target, stream, mr); if ((!p_ret->nullable()) && source.has_nulls(source_begin, source_end)) { p_ret->set_null_mask( cudf::create_null_mask(p_ret->size(), cudf::mask_state::ALL_VALID, stream, mr), 0); } if (source_end != source_begin) { // otherwise no-op auto ret_view = p_ret->mutable_view(); in_place_copy_range<T>(source, ret_view, source_begin, source_end, target_begin, stream); } return p_ret; } }; template <> std::unique_ptr<cudf::column> out_of_place_copy_range_dispatch::operator()<cudf::string_view>( cudf::size_type source_begin, cudf::size_type source_end, cudf::size_type target_begin, rmm::mr::device_memory_resource* mr, cudaStream_t stream) { auto target_end = target_begin + (source_end - source_begin); auto p_source_device_view = cudf::column_device_view::create(source, stream); if (source.has_nulls()) { return cudf::strings::detail::copy_range( cudf::detail::make_null_replacement_iterator<cudf::string_view>(*p_source_device_view, cudf::string_view()) + source_begin, cudf::detail::make_validity_iterator(*p_source_device_view) + source_begin, cudf::strings_column_view(target), target_begin, target_end, mr, stream); } else { return cudf::strings::detail::copy_range( p_source_device_view->begin<cudf::string_view>() + source_begin, thrust::make_constant_iterator(true), cudf::strings_column_view(target), target_begin, target_end, mr, stream); } } template <> std::unique_ptr<cudf::column> out_of_place_copy_range_dispatch::operator()<cudf::dictionary32>( cudf::size_type source_begin, cudf::size_type source_end, cudf::size_type target_begin, rmm::mr::device_memory_resource* mr, cudaStream_t stream) { CUDF_FAIL("dictionary type not supported"); } template <> std::unique_ptr<cudf::column> out_of_place_copy_range_dispatch::operator()<cudf::list_view>( cudf::size_type source_begin, cudf::size_type source_end, cudf::size_type target_begin, rmm::mr::device_memory_resource* mr, cudaStream_t stream) { CUDF_FAIL("list_view type not supported"); } } // namespace namespace cudf { namespace detail { void copy_range_in_place(column_view const& source, mutable_column_view& target, size_type source_begin, size_type source_end, size_type target_begin, cudaStream_t stream) { CUDF_EXPECTS(cudf::is_fixed_width(target.type()) == true, "In-place copy_range does not support variable-sized types."); CUDF_EXPECTS((source_begin >= 0) && (source_end <= source.size()) && (source_begin <= source_end) && (target_begin >= 0) && (target_begin <= target.size() - (source_end - source_begin)), "Range is out of bounds."); CUDF_EXPECTS(target.type() == source.type(), "Data type mismatch."); CUDF_EXPECTS((target.nullable() == true) || (source.has_nulls() == false), "target should be nullable if source has null values."); if (source_end != source_begin) { // otherwise no-op cudf::type_dispatcher(target.type(), in_place_copy_range_dispatch{source, target}, source_begin, source_end, target_begin, stream); } } std::unique_ptr<column> copy_range(column_view const& source, column_view const& target, size_type source_begin, size_type source_end, size_type target_begin, rmm::mr::device_memory_resource* mr, cudaStream_t stream) { CUDF_EXPECTS((source_begin >= 0) && (source_end <= source.size()) && (source_begin <= source_end) && (target_begin >= 0) && (target_begin <= target.size() - (source_end - source_begin)), "Range is out of bounds."); CUDF_EXPECTS(target.type() == source.type(), "Data type mismatch."); return cudf::type_dispatcher(target.type(), out_of_place_copy_range_dispatch{source, target}, source_begin, source_end, target_begin, mr, stream); } } // namespace detail void copy_range_in_place(column_view const& source, mutable_column_view& target, size_type source_begin, size_type source_end, size_type target_begin) { CUDF_FUNC_RANGE(); return detail::copy_range_in_place(source, target, source_begin, source_end, target_begin, 0); } std::unique_ptr<column> copy_range(column_view const& source, column_view const& target, size_type source_begin, size_type source_end, size_type target_begin, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); return detail::copy_range(source, target, source_begin, source_end, target_begin, mr, 0); } } // namespace cudf
65b0b874900b455c01dad9b0fbe10f7758c03d9d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void polynomial_expansion (float* poly,int degree,int n,float* array) { int INX=blockIdx.x*blockDim.x+threadIdx.x; if(INX<n) { float val=0.0; float exp=1.0; for(int x=0;x<=degree;++x) { val+=exp*poly[x]; exp*=array[INX]; } array[INX]=val; } }
65b0b874900b455c01dad9b0fbe10f7758c03d9d.cu
#include "includes.h" __global__ void polynomial_expansion (float* poly,int degree,int n,float* array) { int INX=blockIdx.x*blockDim.x+threadIdx.x; if(INX<n) { float val=0.0; float exp=1.0; for(int x=0;x<=degree;++x) { val+=exp*poly[x]; exp*=array[INX]; } array[INX]=val; } }
dcb8f451aa814f33d7b5c0eb999e7a645f7c3d88.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #include <string.h> #include <math.h> #include <iostream> #include <fstream> #include "sim_run.h" #include "calc_neighbor_list.h" #include "random_mars.h" //#include "cuda.h" #include "hiprand/hiprand_kernel.h" #include <map> #include <vector> using namespace std; double verify_f(double** f) { double sum_f = 0; for (int j = 0; j < N; j++) { sum_f += f[j][0]; sum_f += f[j][1]; sum_f += f[j][2]; } return sum_f; } #if __CUDA_ARCH__ < 600 __device__ double atomicAdd_30(double* address, double val) { unsigned long long int* address_as_ull = (unsigned long long int*)address; unsigned long long int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val + __longlong_as_double(assumed))); // Note: uses integer comparison to avoid hang in case of NaN (since NaN != NaN) } while (assumed != old); return __longlong_as_double(old); } #endif void cpu_dpd(double** r, double**f, double** v, const char* select, RanMars* random, OUTPUT_struct* output_info, double len_cell, double len_x, double len_y, double len_z) { ofstream outputfile; outputfile.open("dump_cpu.md", ios::out); //init force compute if (!strcmp(select, "sijun")) compute_force(r, v, f, random, N, output_info, len_cell, len_x, len_y, len_z); else if (!strcmp(select, "base")) compute_force_std(r, v, f, random, N); //writeDump(outputfile, r, v, 0); int ntimestep = 100;//5000; double m = 1.0; for (int i = 0; i <= ntimestep; i++) { //half integration for(int j = 0; j < N; j++) { v[j][0] += 0.5 * f[j][0] * dt; v[j][1] += 0.5 * f[j][1] * dt; v[j][2] += 0.5 * f[j][2] * dt; r[j][0] += v[j][0] * dt; r[j][1] += v[j][1] * dt; r[j][2] += v[j][2] * dt; } pbc(r); //force computation clear_force(f, N); if (!strcmp(select, "sijun")) { compute_force(r, v, f, random, N, output_info, len_cell, len_x, len_y, len_z); int partc_num_def = 4000; output_info = func_partc_incell_stat(r, partc_num_def, len_cell, len_x, len_y, len_z); } else if (!strcmp(select, "base")) compute_force_std(r, v, f, random, N); //full integration for(int j = 0; j < N; j++) { v[j][0] += 0.5 * f[j][0] * dt; v[j][1] += 0.5 * f[j][1] * dt; v[j][2] += 0.5 * f[j][2] * dt; } if(i % 1 == 0) { double ke = computeKE(v); cout << i << " temp is " << ke * 2 / (3 * 4000 * 1) << endl; writeDump(outputfile, r, f, i); //cout << "verify force on cpu at step " << i << " is " << verify_f(f) << endl; } } } __global__ void print_kernel() { printf("Hello from block %d, thread %d\n", blockIdx.x, threadIdx.x); } __global__ void init(double *r, double *v, double *f) { int id = threadIdx.x + blockDim.x * blockIdx.x; if (id < N) { int i; for(i = 0; i < 3; i++) { v[id * 3 + i] += 0.5 * f[id * 3 + i] * dt; r[id * 3 + i] += v[id * 3 + i] * dt; } // do periodic boundary condition for (i = 0; i < 3; i++) { if (r[id * 3 + i] < 0) r[id * 3 + i] += 10; else if (r[id * 3 + i] > 10) r[id * 3 + i] -= 10; } } } __global__ void f_clear(double *f) { int id = threadIdx.x + blockDim.x * blockIdx.x; if (id < N) { //force computation int i; for (i = 0; i < 3; i++) f[id * 3 + i] = 0; } } __global__ void setup_kernel(hiprandState_t *state) { int id = threadIdx.x + blockDim.x * blockIdx.x; if (id < N) hiprand_init(34387, id, 0, &state[id]); } __global__ void iteration(hiprandState_t *state, double* r, double* f, double* v, int* cell_list, int* cell_list_count, int avg_num_cell, int * dev_ts) { //printf("Hello from block %d, thread %d\n", blockIdx.x, threadIdx.x); int id = threadIdx.x + blockDim.x * blockIdx.x; //printf("blockIdx.x %d threadIdx.x %d\n", blockIdx.x, threadIdx.x); if (id < N) { hiprandState_t localState = state[id]; //hiprand_init(34387+id, id, 0, &state[id]); int i; if (id == 0) { double sum_f = 0; for (int j = 0; j < 3 * N; j++) { sum_f += f[j]; } //printf("sum f is %f \n", sum_f); } double len_cell = 2.0; double len_x = 10.0; double len_y = 10.0; double len_z = 10.0; double m = 1.0; int num_cx, num_cy, num_cz; num_cx = (int) floor(len_x/len_cell); num_cy = (int) floor(len_y/len_cell); num_cz = (int) floor(len_z/len_cell); //printf("number of cell is %d \n", num_cx * num_cy * num_cz); double x = r[id * 3]; double y = r[id * 3 + 1]; double z = r[id * 3 + 2]; // which cell in 3d the particle is in int idx = (int) floor(x / len_cell); int idy = (int) floor(y / len_cell); int idz = (int) floor(z / len_cell); // loop through 27 boxes for (int n = -1; n < 2; n++) { for (int m = -1; m < 2; m++) { for (int l = -1; l < 2; l++) { int newidz = (idz + n + num_cz) % num_cz; int newidy = (idy + m + num_cy) % num_cy; int newidx = (idx + l + num_cx) % num_cx; int cell_id = newidz + newidy * num_cz + newidx * num_cy * num_cz; if (id == 3999) { //printf("l %d m %d n %d cell_id %d \n", l, m, n, cell_id); } int num_neigh_particle = cell_list_count[cell_id]; if (id == 3999) { // printf("l %d m %d n %d num_neigh_particle %d \n", l, m, n, num_neigh_particle); } for (int j = 0; j < num_neigh_particle; j++) { int part_id = cell_list[cell_id * avg_num_cell + j]; if (part_id > id) { if (part_id == id) continue; double delx, dely, delz; delx = x - r[part_id * 3]; dely = y - r[part_id * 3 + 1]; delz = z - r[part_id * 3 + 2]; if (delx < -5) delx = delx + 10; else if (delx > 5) delx = delx - 10; if (dely < -5) dely = dely + 10; else if (dely > 5) dely = dely - 10; if (delz < -5) delz = delz + 10; else if (delz > 5) delz = delz - 10; //printf("blockIdx %d threadIdx %d part_id %d %f %f %f %f %f %f %f\n", blockIdx.x, threadIdx.x, part_id, r[part_id * 3], r[part_id * 3 + 1], r[part_id *3 + 2], x, y, z); double rr; rr = sqrt(delx * delx + dely * dely + delz * delz); if(rr < rc) { //printf("id %f part_id %f \n", id, part_id); if (id == 3999) { //printf(" id %d part_id %d rr %f \n", id, part_id, rr); } double fpair; double wr; wr = 1 - rr / rc; fpair = force_a0 * wr; double delvx, delvy, delvz; delvx = v[id*3] - v[part_id*3]; delvy = v[id*3+1] - v[part_id*3+1]; delvz = v[id*3+2] - v[part_id*3+2]; double dot; dot = (delx * delvx + dely * delvy + delz * delvz) / rr; fpair -= force_gamma * wr * wr * dot; //fpair += force_sigma * wr * hiprand_normal_double(&state[id]) * 1 / sqrt(dt); fpair += force_sigma * wr * hiprand_normal_double(&localState) * 1 / sqrt(dt); f[id*3] += delx * fpair / rr; f[id*3+1] += dely * fpair / rr; f[id*3+2] += delz * fpair / rr; //f[part_id*3] -= delx * fpair / rr; //f[part_id*3+1] -= dely * fpair / rr; //f[part_id*3+2] -= delz * fpair / rr; atomicAdd_30(&(f[part_id*3]), -delx * fpair /rr); atomicAdd_30(&(f[part_id*3+1]), -dely * fpair / rr); atomicAdd_30(&(f[part_id*3+2]), -delz * fpair / rr); } } } // if (id == 3999) // printf("force of each cell is %f %f %f\n", f[id*3], f[id*3+1], f[id*3+2]); } } } state[id] = localState; } } __global__ void post_int(double *v, double *f) { int id = threadIdx.x + blockDim.x * blockIdx.x; //printf("blockIdx.x %d threadIdx.x %d\n", blockIdx.x, threadIdx.x); if (id < N) { //full integration int i; for (i = 0; i < 3; i++) { v[id * 3 + i] += 0.5 * f[id * 3 + i] * dt; } } } void gpu_dpd(double** r, double**f, double** v, const char* select, RanMars* random, OUTPUT_struct* output_info, double len_cell, double len_x, double len_y, double len_z) { compute_force(r, v, f, random, N, output_info, len_cell, len_x, len_y, len_z); //allocate r,f,v on GPU int size = N * 3 * sizeof(double); double* cpu_r = (double *) malloc(size); double* cpu_f = (double *) malloc(size); double* cpu_v = (double *) malloc(size); int i; int count = 0; for(i = 0; i < N; i++) { cpu_r[count] = r[i][0]; cpu_f[count] = f[i][0]; cpu_v[count] = v[i][0]; cpu_r[count+1] = r[i][1]; cpu_f[count+1] = f[i][1]; cpu_v[count+1] = v[i][1]; cpu_r[count+2] = r[i][2]; cpu_f[count+2] = f[i][2]; cpu_v[count+2] = v[i][2]; count += 3; } if (count == 3 * N) { printf("count right\n"); //return; double residual_r = 0; double residual_f = 0; double residual_v = 0; int j; for (j = 0; j < 3 * N; j++) { residual_r += (cpu_r[j] - r[j/3][j%3]); residual_f += (cpu_f[j] - f[j/3][j%3]); residual_v += (cpu_v[j] - v[j/3][j%3]); } printf("residual %f %f %f\n", residual_r, residual_f, residual_v); } double* dev_r; double* dev_f; double* dev_v; hipMalloc((void **) &dev_r, size); hipMalloc((void **) &dev_f, size); hipMalloc((void **) &dev_v, size); hipMemcpy(dev_r, cpu_r, size, hipMemcpyHostToDevice); hipMemcpy(dev_f, cpu_f, size, hipMemcpyHostToDevice); hipMemcpy(dev_v, cpu_v, size, hipMemcpyHostToDevice); int** cell_partc_list_res = output_info->cell_partc_list_res; int* part_num = output_info->cell_partc_num_res; int cell_num = output_info->cell_num; int max_num_part = output_info->col_num; int* cell_list = (int *) malloc(sizeof(int) * cell_num * max_num_part); int* cell_list_count = (int *) malloc(sizeof(int) * cell_num); int j; count = 0; for (j = 0; j < cell_num; j++) { int k; for (k = 0; k < max_num_part; k++) { cell_list[count++] = cell_partc_list_res[j][k]; } cell_list_count[j] = part_num[j]; } double residual_count = 0; double residual_list = 0; for (j = 0; j < cell_num; j++) { residual_count += (cell_list_count[j] - part_num[j]); int k; for (k = 0; k < max_num_part; k++) { residual_list += (cell_list[j * max_num_part + k] - cell_partc_list_res[j][k]); } } printf("residual list %f count %f \n", residual_list, residual_count); int* dev_cell_list; int* dev_cell_list_count; int size_cell_list = sizeof(int) * cell_num * max_num_part; hipMalloc((void **) &dev_cell_list, size_cell_list); hipMalloc((void **) &dev_cell_list_count, sizeof(int) * cell_num); hipMemcpy(dev_cell_list, cell_list, size_cell_list, hipMemcpyHostToDevice); hipMemcpy(dev_cell_list_count, cell_list_count, sizeof(int) * cell_num, hipMemcpyHostToDevice); hiprandState_t *d_state; hipMalloc((void**) &d_state, N); int ntimestep = 5000; double m = 1.0; int blockSize = (int) floor(N/1024 + 1); printf("go to call iteration\n"); cout << "verify f " << verify_f(f) << endl; ofstream outputfile; outputfile.open("dump_gpu.md", ios::out); hipLaunchKernelGGL(( setup_kernel), dim3(blockSize),dim3(1024), 0, 0, d_state); hipDeviceSynchronize(); for (i = 0; i <= 5000; i++) { hipLaunchKernelGGL(( init), dim3(blockSize), dim3(1024), 0, 0, dev_r, dev_v, dev_f); hipDeviceSynchronize(); hipLaunchKernelGGL(( f_clear), dim3(blockSize), dim3(1024), 0, 0, dev_f); hipDeviceSynchronize(); int *dev_ts; hipMalloc((void**) &dev_ts, sizeof(int)); hipMemcpy(dev_ts, &i, sizeof(int), hipMemcpyHostToDevice); //setup_kernel<<<blockSize,1024>>>(d_state); //hipDeviceSynchronize(); hipLaunchKernelGGL(( iteration), dim3(blockSize), dim3(1024), 0, 0, d_state, dev_r, dev_f, dev_v, dev_cell_list, dev_cell_list_count, max_num_part, dev_ts); hipDeviceSynchronize(); hipLaunchKernelGGL(( post_int), dim3(blockSize), dim3(1024), 0, 0, dev_v, dev_f); hipMemcpy(cpu_r, dev_r, size, hipMemcpyDeviceToHost); hipMemcpy(cpu_v, dev_v, size, hipMemcpyDeviceToHost); hipMemcpy(cpu_f, dev_f, size, hipMemcpyDeviceToHost); double res = 0; for (int j = 0; j < 3 * N; j++) { res += cpu_f[j]; } //cout << " cpu_f is " << res << endl; for (int j = 0; j < N; j++) { for (int k = 0; k < 3; k++) { r[j][k] = cpu_r[j * 3 + k]; v[j][k] = cpu_v[j * 3 + k]; f[j][k] = cpu_f[j * 3 + k]; } } //rebuild cell list int partc_num_def = 4000; output_info = func_partc_incell_stat(r, partc_num_def, len_cell, len_x, len_y, len_z); cell_partc_list_res = output_info->cell_partc_list_res; part_num = output_info->cell_partc_num_res; cell_num = output_info->cell_num; max_num_part = output_info->col_num; free(cell_list); free(cell_list_count); cell_list = (int *) malloc(sizeof(int) * cell_num * max_num_part); cell_list_count = (int *) malloc(sizeof(int) * cell_num); count = 0; for (int j = 0; j < cell_num; j++) { int k; for (k = 0; k < max_num_part; k++) { cell_list[count++] = cell_partc_list_res[j][k]; } cell_list_count[j] = part_num[j]; } hipFree(dev_cell_list); hipFree(dev_cell_list_count); int size_cell_list = sizeof(int) * cell_num * max_num_part; hipMalloc((void **) &dev_cell_list, size_cell_list); hipMalloc((void **) &dev_cell_list_count, sizeof(int) * cell_num); hipMemcpy(dev_cell_list, cell_list, size_cell_list, hipMemcpyHostToDevice); hipMemcpy(dev_cell_list_count, cell_list_count, sizeof(int) * cell_num, hipMemcpyHostToDevice); if(i % 100 == 0) { double ke = computeKE(v); cout << i << " temp is " << ke * 2 / (3 * 4000 * 1) << endl; writeDump(outputfile, r, f, i); //cout << "verify force on gpu at step " << i << " is " << verify_f(f) << endl; } } printf("iteration done\n"); free(cpu_r); free(cpu_f); free(cpu_v); free(cell_list); free(cell_list_count); hipFree(dev_r); hipFree(dev_f); hipFree(dev_v); hipFree(dev_cell_list); hipFree(dev_cell_list_count); hipFree(d_state); } void next_func(FILE* fptr) { char str_buff[256]; //while (!feof(fptr)){ // fscanf(fptr, "%s", str_buff); //} for (int loop=0; loop<14; loop++){ fgets(str_buff, 256, fptr); printf("%d %s", loop, str_buff); } } int load_func(FILE* fptr, double* outptr) { char str_buff[256]; double b,c,d; int a; fscanf(fptr, "%d", &a); fscanf(fptr, "%d", &a); if (!feof(fptr)){ fscanf(fptr, "%lf", &b); fscanf(fptr, "%lf", &c); fscanf(fptr, "%lf", &d); outptr[0] = b; outptr[1] = c; outptr[2] = d; return 1; } else{ return 0; } } int main(int argc, char* argv[]) { int type_of_device = atoi(argv[1]); // 0 - CPU; 1 - GPU const char* select = argv[2]; FILE* file_ptr; char str_input[5]; double result[3]; int count; int end; int flag; double** partc_pos_res; int pos_index; int partc_num_def = 4000; int i; // read input position file count = 0; if((file_ptr = fopen("4000_new.txt","r")) == NULL){ printf("Cannt open the file!"); exit(1); } next_func(file_ptr); // allocate memory for partc_pos_res partc_pos_res = (double**)malloc(partc_num_def*sizeof(double*)); for (i=0; i<partc_num_def; i++){ partc_pos_res[i] = (double*)malloc(3*sizeof(double)); } // fill in the particle positions into partc_pos_res while (!feof(file_ptr)){ // count = count+1; flag = load_func(file_ptr, result); if (flag){ // printf("Line:%d, %.2f, %.2f, %.2f\n", count, result[0], result[1], result[2]); for (pos_index=0; pos_index<3; pos_index++){ partc_pos_res[count][pos_index] = result[pos_index]; } } count = count+1; } fclose(file_ptr); //scanf("%d",&end); // build cell list int loop; unsigned int seed; //input_cube double len_cell; double len_x, len_y, len_z; //input_partc int partc_num; double** partc_pos; int end_flag; //output OUTPUT_struct* output_info; seed = 10; srand(seed); //INPUT information: len_cell = 2.0; len_x = 10.0; len_y = 10.0; len_z = 10.0; // partc_num = 10; // partc_pos = (double**)malloc(partc_num*sizeof(double*)); // for(loop=0; loop<partc_num; loop++){ // partc_pos[loop] = (double*)malloc(3*sizeof(double)); // partc_pos[loop][0] = (double)(rand()%CONST_MAX_RAND)/(CONST_MAX_RAND-1)*len_x; // partc_pos[loop][1] = (double)(rand()%CONST_MAX_RAND)/(CONST_MAX_RAND-1)*len_y; // partc_pos[loop][2] = (double)(rand()%CONST_MAX_RAND)/(CONST_MAX_RAND-1)*len_z; // } output_info = func_partc_incell_stat(partc_pos_res, partc_num_def, len_cell, len_x, len_y, len_z); //PRINT basic information //if (1){ //} //PRINT output information //if (1){ // func_print_output_info(output_info); //} //scanf("%d",&end_flag); // finished building cell list RanMars * random = new RanMars(34387); int N = 4000; double** r = new double* [N]; double** v = new double* [N]; double** f = new double* [N]; for(int i = 0; i < N; i++) { r[i] = new double[3]; v[i] = new double[3]; f[i] = new double[3]; } for (int i = 0; i < N; i++) { r[i][0] = partc_pos_res[i][0]; r[i][1] = partc_pos_res[i][1]; r[i][2] = partc_pos_res[i][2]; } for(int i = 0; i < N; i++) { for(int j = 0; j < 3; j++) { v[i][j] = 0; f[i][j] = 0; } } if (!type_of_device) { cpu_dpd(r, f, v, select, random, output_info, len_cell, len_x, len_y, len_z); } else { gpu_dpd(r, f, v, select, random, output_info,len_cell, len_x, len_y, len_z); } //memory release for(int i = 0; i < N; i++) { delete(r[i]); delete(v[i]); delete(f[i]); } delete(r); delete(v); delete(f); delete(random); cout << "position is read, f is computed" << endl; }
dcb8f451aa814f33d7b5c0eb999e7a645f7c3d88.cu
#include <stdio.h> #include <stdlib.h> #include <string.h> #include <math.h> #include <iostream> #include <fstream> #include "sim_run.h" #include "calc_neighbor_list.h" #include "random_mars.h" //#include "cuda.h" #include "curand_kernel.h" #include <map> #include <vector> using namespace std; double verify_f(double** f) { double sum_f = 0; for (int j = 0; j < N; j++) { sum_f += f[j][0]; sum_f += f[j][1]; sum_f += f[j][2]; } return sum_f; } #if __CUDA_ARCH__ < 600 __device__ double atomicAdd_30(double* address, double val) { unsigned long long int* address_as_ull = (unsigned long long int*)address; unsigned long long int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val + __longlong_as_double(assumed))); // Note: uses integer comparison to avoid hang in case of NaN (since NaN != NaN) } while (assumed != old); return __longlong_as_double(old); } #endif void cpu_dpd(double** r, double**f, double** v, const char* select, RanMars* random, OUTPUT_struct* output_info, double len_cell, double len_x, double len_y, double len_z) { ofstream outputfile; outputfile.open("dump_cpu.md", ios::out); //init force compute if (!strcmp(select, "sijun")) compute_force(r, v, f, random, N, output_info, len_cell, len_x, len_y, len_z); else if (!strcmp(select, "base")) compute_force_std(r, v, f, random, N); //writeDump(outputfile, r, v, 0); int ntimestep = 100;//5000; double m = 1.0; for (int i = 0; i <= ntimestep; i++) { //half integration for(int j = 0; j < N; j++) { v[j][0] += 0.5 * f[j][0] * dt; v[j][1] += 0.5 * f[j][1] * dt; v[j][2] += 0.5 * f[j][2] * dt; r[j][0] += v[j][0] * dt; r[j][1] += v[j][1] * dt; r[j][2] += v[j][2] * dt; } pbc(r); //force computation clear_force(f, N); if (!strcmp(select, "sijun")) { compute_force(r, v, f, random, N, output_info, len_cell, len_x, len_y, len_z); int partc_num_def = 4000; output_info = func_partc_incell_stat(r, partc_num_def, len_cell, len_x, len_y, len_z); } else if (!strcmp(select, "base")) compute_force_std(r, v, f, random, N); //full integration for(int j = 0; j < N; j++) { v[j][0] += 0.5 * f[j][0] * dt; v[j][1] += 0.5 * f[j][1] * dt; v[j][2] += 0.5 * f[j][2] * dt; } if(i % 1 == 0) { double ke = computeKE(v); cout << i << " temp is " << ke * 2 / (3 * 4000 * 1) << endl; writeDump(outputfile, r, f, i); //cout << "verify force on cpu at step " << i << " is " << verify_f(f) << endl; } } } __global__ void print_kernel() { printf("Hello from block %d, thread %d\n", blockIdx.x, threadIdx.x); } __global__ void init(double *r, double *v, double *f) { int id = threadIdx.x + blockDim.x * blockIdx.x; if (id < N) { int i; for(i = 0; i < 3; i++) { v[id * 3 + i] += 0.5 * f[id * 3 + i] * dt; r[id * 3 + i] += v[id * 3 + i] * dt; } // do periodic boundary condition for (i = 0; i < 3; i++) { if (r[id * 3 + i] < 0) r[id * 3 + i] += 10; else if (r[id * 3 + i] > 10) r[id * 3 + i] -= 10; } } } __global__ void f_clear(double *f) { int id = threadIdx.x + blockDim.x * blockIdx.x; if (id < N) { //force computation int i; for (i = 0; i < 3; i++) f[id * 3 + i] = 0; } } __global__ void setup_kernel(curandState *state) { int id = threadIdx.x + blockDim.x * blockIdx.x; if (id < N) curand_init(34387, id, 0, &state[id]); } __global__ void iteration(curandState *state, double* r, double* f, double* v, int* cell_list, int* cell_list_count, int avg_num_cell, int * dev_ts) { //printf("Hello from block %d, thread %d\n", blockIdx.x, threadIdx.x); int id = threadIdx.x + blockDim.x * blockIdx.x; //printf("blockIdx.x %d threadIdx.x %d\n", blockIdx.x, threadIdx.x); if (id < N) { curandState localState = state[id]; //curand_init(34387+id, id, 0, &state[id]); int i; if (id == 0) { double sum_f = 0; for (int j = 0; j < 3 * N; j++) { sum_f += f[j]; } //printf("sum f is %f \n", sum_f); } double len_cell = 2.0; double len_x = 10.0; double len_y = 10.0; double len_z = 10.0; double m = 1.0; int num_cx, num_cy, num_cz; num_cx = (int) floor(len_x/len_cell); num_cy = (int) floor(len_y/len_cell); num_cz = (int) floor(len_z/len_cell); //printf("number of cell is %d \n", num_cx * num_cy * num_cz); double x = r[id * 3]; double y = r[id * 3 + 1]; double z = r[id * 3 + 2]; // which cell in 3d the particle is in int idx = (int) floor(x / len_cell); int idy = (int) floor(y / len_cell); int idz = (int) floor(z / len_cell); // loop through 27 boxes for (int n = -1; n < 2; n++) { for (int m = -1; m < 2; m++) { for (int l = -1; l < 2; l++) { int newidz = (idz + n + num_cz) % num_cz; int newidy = (idy + m + num_cy) % num_cy; int newidx = (idx + l + num_cx) % num_cx; int cell_id = newidz + newidy * num_cz + newidx * num_cy * num_cz; if (id == 3999) { //printf("l %d m %d n %d cell_id %d \n", l, m, n, cell_id); } int num_neigh_particle = cell_list_count[cell_id]; if (id == 3999) { // printf("l %d m %d n %d num_neigh_particle %d \n", l, m, n, num_neigh_particle); } for (int j = 0; j < num_neigh_particle; j++) { int part_id = cell_list[cell_id * avg_num_cell + j]; if (part_id > id) { if (part_id == id) continue; double delx, dely, delz; delx = x - r[part_id * 3]; dely = y - r[part_id * 3 + 1]; delz = z - r[part_id * 3 + 2]; if (delx < -5) delx = delx + 10; else if (delx > 5) delx = delx - 10; if (dely < -5) dely = dely + 10; else if (dely > 5) dely = dely - 10; if (delz < -5) delz = delz + 10; else if (delz > 5) delz = delz - 10; //printf("blockIdx %d threadIdx %d part_id %d %f %f %f %f %f %f %f\n", blockIdx.x, threadIdx.x, part_id, r[part_id * 3], r[part_id * 3 + 1], r[part_id *3 + 2], x, y, z); double rr; rr = sqrt(delx * delx + dely * dely + delz * delz); if(rr < rc) { //printf("id %f part_id %f \n", id, part_id); if (id == 3999) { //printf(" id %d part_id %d rr %f \n", id, part_id, rr); } double fpair; double wr; wr = 1 - rr / rc; fpair = force_a0 * wr; double delvx, delvy, delvz; delvx = v[id*3] - v[part_id*3]; delvy = v[id*3+1] - v[part_id*3+1]; delvz = v[id*3+2] - v[part_id*3+2]; double dot; dot = (delx * delvx + dely * delvy + delz * delvz) / rr; fpair -= force_gamma * wr * wr * dot; //fpair += force_sigma * wr * curand_normal_double(&state[id]) * 1 / sqrt(dt); fpair += force_sigma * wr * curand_normal_double(&localState) * 1 / sqrt(dt); f[id*3] += delx * fpair / rr; f[id*3+1] += dely * fpair / rr; f[id*3+2] += delz * fpair / rr; //f[part_id*3] -= delx * fpair / rr; //f[part_id*3+1] -= dely * fpair / rr; //f[part_id*3+2] -= delz * fpair / rr; atomicAdd_30(&(f[part_id*3]), -delx * fpair /rr); atomicAdd_30(&(f[part_id*3+1]), -dely * fpair / rr); atomicAdd_30(&(f[part_id*3+2]), -delz * fpair / rr); } } } // if (id == 3999) // printf("force of each cell is %f %f %f\n", f[id*3], f[id*3+1], f[id*3+2]); } } } state[id] = localState; } } __global__ void post_int(double *v, double *f) { int id = threadIdx.x + blockDim.x * blockIdx.x; //printf("blockIdx.x %d threadIdx.x %d\n", blockIdx.x, threadIdx.x); if (id < N) { //full integration int i; for (i = 0; i < 3; i++) { v[id * 3 + i] += 0.5 * f[id * 3 + i] * dt; } } } void gpu_dpd(double** r, double**f, double** v, const char* select, RanMars* random, OUTPUT_struct* output_info, double len_cell, double len_x, double len_y, double len_z) { compute_force(r, v, f, random, N, output_info, len_cell, len_x, len_y, len_z); //allocate r,f,v on GPU int size = N * 3 * sizeof(double); double* cpu_r = (double *) malloc(size); double* cpu_f = (double *) malloc(size); double* cpu_v = (double *) malloc(size); int i; int count = 0; for(i = 0; i < N; i++) { cpu_r[count] = r[i][0]; cpu_f[count] = f[i][0]; cpu_v[count] = v[i][0]; cpu_r[count+1] = r[i][1]; cpu_f[count+1] = f[i][1]; cpu_v[count+1] = v[i][1]; cpu_r[count+2] = r[i][2]; cpu_f[count+2] = f[i][2]; cpu_v[count+2] = v[i][2]; count += 3; } if (count == 3 * N) { printf("count right\n"); //return; double residual_r = 0; double residual_f = 0; double residual_v = 0; int j; for (j = 0; j < 3 * N; j++) { residual_r += (cpu_r[j] - r[j/3][j%3]); residual_f += (cpu_f[j] - f[j/3][j%3]); residual_v += (cpu_v[j] - v[j/3][j%3]); } printf("residual %f %f %f\n", residual_r, residual_f, residual_v); } double* dev_r; double* dev_f; double* dev_v; cudaMalloc((void **) &dev_r, size); cudaMalloc((void **) &dev_f, size); cudaMalloc((void **) &dev_v, size); cudaMemcpy(dev_r, cpu_r, size, cudaMemcpyHostToDevice); cudaMemcpy(dev_f, cpu_f, size, cudaMemcpyHostToDevice); cudaMemcpy(dev_v, cpu_v, size, cudaMemcpyHostToDevice); int** cell_partc_list_res = output_info->cell_partc_list_res; int* part_num = output_info->cell_partc_num_res; int cell_num = output_info->cell_num; int max_num_part = output_info->col_num; int* cell_list = (int *) malloc(sizeof(int) * cell_num * max_num_part); int* cell_list_count = (int *) malloc(sizeof(int) * cell_num); int j; count = 0; for (j = 0; j < cell_num; j++) { int k; for (k = 0; k < max_num_part; k++) { cell_list[count++] = cell_partc_list_res[j][k]; } cell_list_count[j] = part_num[j]; } double residual_count = 0; double residual_list = 0; for (j = 0; j < cell_num; j++) { residual_count += (cell_list_count[j] - part_num[j]); int k; for (k = 0; k < max_num_part; k++) { residual_list += (cell_list[j * max_num_part + k] - cell_partc_list_res[j][k]); } } printf("residual list %f count %f \n", residual_list, residual_count); int* dev_cell_list; int* dev_cell_list_count; int size_cell_list = sizeof(int) * cell_num * max_num_part; cudaMalloc((void **) &dev_cell_list, size_cell_list); cudaMalloc((void **) &dev_cell_list_count, sizeof(int) * cell_num); cudaMemcpy(dev_cell_list, cell_list, size_cell_list, cudaMemcpyHostToDevice); cudaMemcpy(dev_cell_list_count, cell_list_count, sizeof(int) * cell_num, cudaMemcpyHostToDevice); curandState *d_state; cudaMalloc((void**) &d_state, N); int ntimestep = 5000; double m = 1.0; int blockSize = (int) floor(N/1024 + 1); printf("go to call iteration\n"); cout << "verify f " << verify_f(f) << endl; ofstream outputfile; outputfile.open("dump_gpu.md", ios::out); setup_kernel<<<blockSize,1024>>>(d_state); cudaDeviceSynchronize(); for (i = 0; i <= 5000; i++) { init<<<blockSize, 1024>>>(dev_r, dev_v, dev_f); cudaDeviceSynchronize(); f_clear<<<blockSize, 1024>>>(dev_f); cudaDeviceSynchronize(); int *dev_ts; cudaMalloc((void**) &dev_ts, sizeof(int)); cudaMemcpy(dev_ts, &i, sizeof(int), cudaMemcpyHostToDevice); //setup_kernel<<<blockSize,1024>>>(d_state); //cudaDeviceSynchronize(); iteration<<<blockSize, 1024>>>(d_state, dev_r, dev_f, dev_v, dev_cell_list, dev_cell_list_count, max_num_part, dev_ts); cudaDeviceSynchronize(); post_int<<<blockSize, 1024>>>(dev_v, dev_f); cudaMemcpy(cpu_r, dev_r, size, cudaMemcpyDeviceToHost); cudaMemcpy(cpu_v, dev_v, size, cudaMemcpyDeviceToHost); cudaMemcpy(cpu_f, dev_f, size, cudaMemcpyDeviceToHost); double res = 0; for (int j = 0; j < 3 * N; j++) { res += cpu_f[j]; } //cout << " cpu_f is " << res << endl; for (int j = 0; j < N; j++) { for (int k = 0; k < 3; k++) { r[j][k] = cpu_r[j * 3 + k]; v[j][k] = cpu_v[j * 3 + k]; f[j][k] = cpu_f[j * 3 + k]; } } //rebuild cell list int partc_num_def = 4000; output_info = func_partc_incell_stat(r, partc_num_def, len_cell, len_x, len_y, len_z); cell_partc_list_res = output_info->cell_partc_list_res; part_num = output_info->cell_partc_num_res; cell_num = output_info->cell_num; max_num_part = output_info->col_num; free(cell_list); free(cell_list_count); cell_list = (int *) malloc(sizeof(int) * cell_num * max_num_part); cell_list_count = (int *) malloc(sizeof(int) * cell_num); count = 0; for (int j = 0; j < cell_num; j++) { int k; for (k = 0; k < max_num_part; k++) { cell_list[count++] = cell_partc_list_res[j][k]; } cell_list_count[j] = part_num[j]; } cudaFree(dev_cell_list); cudaFree(dev_cell_list_count); int size_cell_list = sizeof(int) * cell_num * max_num_part; cudaMalloc((void **) &dev_cell_list, size_cell_list); cudaMalloc((void **) &dev_cell_list_count, sizeof(int) * cell_num); cudaMemcpy(dev_cell_list, cell_list, size_cell_list, cudaMemcpyHostToDevice); cudaMemcpy(dev_cell_list_count, cell_list_count, sizeof(int) * cell_num, cudaMemcpyHostToDevice); if(i % 100 == 0) { double ke = computeKE(v); cout << i << " temp is " << ke * 2 / (3 * 4000 * 1) << endl; writeDump(outputfile, r, f, i); //cout << "verify force on gpu at step " << i << " is " << verify_f(f) << endl; } } printf("iteration done\n"); free(cpu_r); free(cpu_f); free(cpu_v); free(cell_list); free(cell_list_count); cudaFree(dev_r); cudaFree(dev_f); cudaFree(dev_v); cudaFree(dev_cell_list); cudaFree(dev_cell_list_count); cudaFree(d_state); } void next_func(FILE* fptr) { char str_buff[256]; //while (!feof(fptr)){ // fscanf(fptr, "%s", str_buff); //} for (int loop=0; loop<14; loop++){ fgets(str_buff, 256, fptr); printf("%d %s", loop, str_buff); } } int load_func(FILE* fptr, double* outptr) { char str_buff[256]; double b,c,d; int a; fscanf(fptr, "%d", &a); fscanf(fptr, "%d", &a); if (!feof(fptr)){ fscanf(fptr, "%lf", &b); fscanf(fptr, "%lf", &c); fscanf(fptr, "%lf", &d); outptr[0] = b; outptr[1] = c; outptr[2] = d; return 1; } else{ return 0; } } int main(int argc, char* argv[]) { int type_of_device = atoi(argv[1]); // 0 - CPU; 1 - GPU const char* select = argv[2]; FILE* file_ptr; char str_input[5]; double result[3]; int count; int end; int flag; double** partc_pos_res; int pos_index; int partc_num_def = 4000; int i; // read input position file count = 0; if((file_ptr = fopen("4000_new.txt","r")) == NULL){ printf("Cannt open the file!"); exit(1); } next_func(file_ptr); // allocate memory for partc_pos_res partc_pos_res = (double**)malloc(partc_num_def*sizeof(double*)); for (i=0; i<partc_num_def; i++){ partc_pos_res[i] = (double*)malloc(3*sizeof(double)); } // fill in the particle positions into partc_pos_res while (!feof(file_ptr)){ // count = count+1; flag = load_func(file_ptr, result); if (flag){ // printf("Line:%d, %.2f, %.2f, %.2f\n", count, result[0], result[1], result[2]); for (pos_index=0; pos_index<3; pos_index++){ partc_pos_res[count][pos_index] = result[pos_index]; } } count = count+1; } fclose(file_ptr); //scanf("%d",&end); // build cell list int loop; unsigned int seed; //input_cube double len_cell; double len_x, len_y, len_z; //input_partc int partc_num; double** partc_pos; int end_flag; //output OUTPUT_struct* output_info; seed = 10; srand(seed); //INPUT information: len_cell = 2.0; len_x = 10.0; len_y = 10.0; len_z = 10.0; // partc_num = 10; // partc_pos = (double**)malloc(partc_num*sizeof(double*)); // for(loop=0; loop<partc_num; loop++){ // partc_pos[loop] = (double*)malloc(3*sizeof(double)); // partc_pos[loop][0] = (double)(rand()%CONST_MAX_RAND)/(CONST_MAX_RAND-1)*len_x; // partc_pos[loop][1] = (double)(rand()%CONST_MAX_RAND)/(CONST_MAX_RAND-1)*len_y; // partc_pos[loop][2] = (double)(rand()%CONST_MAX_RAND)/(CONST_MAX_RAND-1)*len_z; // } output_info = func_partc_incell_stat(partc_pos_res, partc_num_def, len_cell, len_x, len_y, len_z); //PRINT basic information //if (1){ //} //PRINT output information //if (1){ // func_print_output_info(output_info); //} //scanf("%d",&end_flag); // finished building cell list RanMars * random = new RanMars(34387); int N = 4000; double** r = new double* [N]; double** v = new double* [N]; double** f = new double* [N]; for(int i = 0; i < N; i++) { r[i] = new double[3]; v[i] = new double[3]; f[i] = new double[3]; } for (int i = 0; i < N; i++) { r[i][0] = partc_pos_res[i][0]; r[i][1] = partc_pos_res[i][1]; r[i][2] = partc_pos_res[i][2]; } for(int i = 0; i < N; i++) { for(int j = 0; j < 3; j++) { v[i][j] = 0; f[i][j] = 0; } } if (!type_of_device) { cpu_dpd(r, f, v, select, random, output_info, len_cell, len_x, len_y, len_z); } else { gpu_dpd(r, f, v, select, random, output_info,len_cell, len_x, len_y, len_z); } //memory release for(int i = 0; i < N; i++) { delete(r[i]); delete(v[i]); delete(f[i]); } delete(r); delete(v); delete(f); delete(random); cout << "position is read, f is computed" << endl; }
a840a8bd7321ba4e1717dc10c5dc73f0b8edb7dd.hip
// !!! This is a file automatically generated by hipify!!! #include <ATen/ATen.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <vector> namespace { template <typename scalar_t> __device__ __forceinline__ scalar_t sigmoid(scalar_t z) { return 1.0 / (1.0 + exp(-z)); } template <typename scalar_t> __device__ __forceinline__ scalar_t d_sigmoid(scalar_t z) { const auto s = sigmoid(z); return (1.0 - s) * s; } template <typename scalar_t> __device__ __forceinline__ scalar_t d_tanh(scalar_t z) { const auto t = tanh(z); return 1 - (t * t); } template <typename scalar_t> __device__ __forceinline__ scalar_t elu(scalar_t z, scalar_t alpha = 1.0) { return fmax(0.0, z) + fmin(0.0, alpha * (exp(z) - 1.0)); } template <typename scalar_t> __device__ __forceinline__ scalar_t d_elu(scalar_t z, scalar_t alpha = 1.0) { const auto e = exp(z); const auto d_relu = z < 0.0 ? 0.0 : 1.0; return d_relu + (((alpha * (e - 1.0)) < 0.0) ? (alpha * e) : 0.0); } template <typename scalar_t> __global__ void lltm_cuda_forward_kernel( const scalar_t* __restrict__ gates, const scalar_t* __restrict__ old_cell, scalar_t* __restrict__ new_h, scalar_t* __restrict__ new_cell, scalar_t* __restrict__ input_gate, scalar_t* __restrict__ output_gate, scalar_t* __restrict__ candidate_cell, size_t state_size) { const int column = blockIdx.x * blockDim.x + threadIdx.x; const int index = blockIdx.y * state_size + column; const int gates_row = blockIdx.y * (state_size * 3); if (column < state_size) { input_gate[index] = sigmoid(gates[gates_row + column]); output_gate[index] = sigmoid(gates[gates_row + state_size + column]); candidate_cell[index] = elu(gates[gates_row + 2 * state_size + column]); new_cell[index] = old_cell[index] + candidate_cell[index] * input_gate[index]; new_h[index] = tanh(new_cell[index]) * output_gate[index]; } } template <typename scalar_t> __global__ void lltm_cuda_backward_kernel( scalar_t* __restrict__ d_old_cell, scalar_t* __restrict__ d_gates, const scalar_t* __restrict__ grad_h, const scalar_t* __restrict__ grad_cell, const scalar_t* __restrict__ new_cell, const scalar_t* __restrict__ input_gate, const scalar_t* __restrict__ output_gate, const scalar_t* __restrict__ candidate_cell, const scalar_t* __restrict__ gate_weights, size_t state_size) { const int column = blockIdx.x * blockDim.x + threadIdx.x; const int index = blockIdx.y * state_size + column; const int gates_row = blockIdx.y * (state_size * 3); if (column < state_size) { const auto d_output_gate = tanh(new_cell[index]) * grad_h[index]; const auto d_tanh_new_cell = output_gate[index] * grad_h[index]; const auto d_new_cell = d_tanh(new_cell[index]) * d_tanh_new_cell + grad_cell[index]; d_old_cell[index] = d_new_cell; const auto d_candidate_cell = input_gate[index] * d_new_cell; const auto d_input_gate = candidate_cell[index] * d_new_cell; const auto input_gate_index = gates_row + column; const auto output_gate_index = gates_row + state_size + column; const auto candidate_cell_index = gates_row + 2 * state_size + column; d_gates[input_gate_index] = d_input_gate * d_sigmoid(gate_weights[input_gate_index]); d_gates[output_gate_index] = d_output_gate * d_sigmoid(gate_weights[output_gate_index]); d_gates[candidate_cell_index] = d_candidate_cell * d_elu(gate_weights[candidate_cell_index]); } } } // namespace std::vector<at::Tensor> band_cuda_forward( at::Tensor input, at::Tensor weights, at::Tensor bias, at::Tensor old_h, at::Tensor old_cell) { auto X = at::cat({old_h, input}, /*dim=*/1); auto gates = at::addmm(bias, X, weights.transpose(0, 1)); const auto batch_size = old_cell.size(0); const auto state_size = old_cell.size(1); auto new_h = at::zeros_like(old_cell); auto new_cell = at::zeros_like(old_cell); auto input_gate = at::zeros_like(old_cell); auto output_gate = at::zeros_like(old_cell); auto candidate_cell = at::zeros_like(old_cell); const int threads = 1024; const dim3 blocks((state_size + threads - 1) / threads, batch_size); AT_DISPATCH_FLOATING_TYPES(gates.type(), "lltm_forward_cuda", ([&] { hipLaunchKernelGGL(( lltm_cuda_forward_kernel<scalar_t>), dim3(blocks), dim3(threads), 0, 0, gates.data<scalar_t>(), old_cell.data<scalar_t>(), new_h.data<scalar_t>(), new_cell.data<scalar_t>(), input_gate.data<scalar_t>(), output_gate.data<scalar_t>(), candidate_cell.data<scalar_t>(), state_size); })); return {new_h, new_cell, input_gate, output_gate, candidate_cell, X, gates}; } std::vector<at::Tensor> band_cuda_backward( at::Tensor grad_h, at::Tensor grad_cell, at::Tensor new_cell, at::Tensor input_gate, at::Tensor output_gate, at::Tensor candidate_cell, at::Tensor X, at::Tensor gate_weights, at::Tensor weights) { auto d_old_cell = at::zeros_like(new_cell); auto d_gates = at::zeros_like(gate_weights); const auto batch_size = new_cell.size(0); const auto state_size = new_cell.size(1); const int threads = 1024; const dim3 blocks((state_size + threads - 1) / threads, batch_size); AT_DISPATCH_FLOATING_TYPES(X.type(), "lltm_forward_cuda", ([&] { hipLaunchKernelGGL(( lltm_cuda_backward_kernel<scalar_t>), dim3(blocks), dim3(threads), 0, 0, d_old_cell.data<scalar_t>(), d_gates.data<scalar_t>(), grad_h.data<scalar_t>(), grad_cell.data<scalar_t>(), new_cell.data<scalar_t>(), input_gate.data<scalar_t>(), output_gate.data<scalar_t>(), candidate_cell.data<scalar_t>(), gate_weights.data<scalar_t>(), state_size); })); auto d_weights = d_gates.t().mm(X); auto d_bias = d_gates.sum(/*dim=*/0, /*keepdim=*/true); auto d_X = d_gates.mm(weights); auto d_old_h = d_X.slice(/*dim=*/1, 0, state_size); auto d_input = d_X.slice(/*dim=*/1, state_size); return {d_old_h, d_input, d_weights, d_bias, d_old_cell, d_gates}; }
a840a8bd7321ba4e1717dc10c5dc73f0b8edb7dd.cu
#include <ATen/ATen.h> #include <cuda.h> #include <cuda_runtime.h> #include <vector> namespace { template <typename scalar_t> __device__ __forceinline__ scalar_t sigmoid(scalar_t z) { return 1.0 / (1.0 + exp(-z)); } template <typename scalar_t> __device__ __forceinline__ scalar_t d_sigmoid(scalar_t z) { const auto s = sigmoid(z); return (1.0 - s) * s; } template <typename scalar_t> __device__ __forceinline__ scalar_t d_tanh(scalar_t z) { const auto t = tanh(z); return 1 - (t * t); } template <typename scalar_t> __device__ __forceinline__ scalar_t elu(scalar_t z, scalar_t alpha = 1.0) { return fmax(0.0, z) + fmin(0.0, alpha * (exp(z) - 1.0)); } template <typename scalar_t> __device__ __forceinline__ scalar_t d_elu(scalar_t z, scalar_t alpha = 1.0) { const auto e = exp(z); const auto d_relu = z < 0.0 ? 0.0 : 1.0; return d_relu + (((alpha * (e - 1.0)) < 0.0) ? (alpha * e) : 0.0); } template <typename scalar_t> __global__ void lltm_cuda_forward_kernel( const scalar_t* __restrict__ gates, const scalar_t* __restrict__ old_cell, scalar_t* __restrict__ new_h, scalar_t* __restrict__ new_cell, scalar_t* __restrict__ input_gate, scalar_t* __restrict__ output_gate, scalar_t* __restrict__ candidate_cell, size_t state_size) { const int column = blockIdx.x * blockDim.x + threadIdx.x; const int index = blockIdx.y * state_size + column; const int gates_row = blockIdx.y * (state_size * 3); if (column < state_size) { input_gate[index] = sigmoid(gates[gates_row + column]); output_gate[index] = sigmoid(gates[gates_row + state_size + column]); candidate_cell[index] = elu(gates[gates_row + 2 * state_size + column]); new_cell[index] = old_cell[index] + candidate_cell[index] * input_gate[index]; new_h[index] = tanh(new_cell[index]) * output_gate[index]; } } template <typename scalar_t> __global__ void lltm_cuda_backward_kernel( scalar_t* __restrict__ d_old_cell, scalar_t* __restrict__ d_gates, const scalar_t* __restrict__ grad_h, const scalar_t* __restrict__ grad_cell, const scalar_t* __restrict__ new_cell, const scalar_t* __restrict__ input_gate, const scalar_t* __restrict__ output_gate, const scalar_t* __restrict__ candidate_cell, const scalar_t* __restrict__ gate_weights, size_t state_size) { const int column = blockIdx.x * blockDim.x + threadIdx.x; const int index = blockIdx.y * state_size + column; const int gates_row = blockIdx.y * (state_size * 3); if (column < state_size) { const auto d_output_gate = tanh(new_cell[index]) * grad_h[index]; const auto d_tanh_new_cell = output_gate[index] * grad_h[index]; const auto d_new_cell = d_tanh(new_cell[index]) * d_tanh_new_cell + grad_cell[index]; d_old_cell[index] = d_new_cell; const auto d_candidate_cell = input_gate[index] * d_new_cell; const auto d_input_gate = candidate_cell[index] * d_new_cell; const auto input_gate_index = gates_row + column; const auto output_gate_index = gates_row + state_size + column; const auto candidate_cell_index = gates_row + 2 * state_size + column; d_gates[input_gate_index] = d_input_gate * d_sigmoid(gate_weights[input_gate_index]); d_gates[output_gate_index] = d_output_gate * d_sigmoid(gate_weights[output_gate_index]); d_gates[candidate_cell_index] = d_candidate_cell * d_elu(gate_weights[candidate_cell_index]); } } } // namespace std::vector<at::Tensor> band_cuda_forward( at::Tensor input, at::Tensor weights, at::Tensor bias, at::Tensor old_h, at::Tensor old_cell) { auto X = at::cat({old_h, input}, /*dim=*/1); auto gates = at::addmm(bias, X, weights.transpose(0, 1)); const auto batch_size = old_cell.size(0); const auto state_size = old_cell.size(1); auto new_h = at::zeros_like(old_cell); auto new_cell = at::zeros_like(old_cell); auto input_gate = at::zeros_like(old_cell); auto output_gate = at::zeros_like(old_cell); auto candidate_cell = at::zeros_like(old_cell); const int threads = 1024; const dim3 blocks((state_size + threads - 1) / threads, batch_size); AT_DISPATCH_FLOATING_TYPES(gates.type(), "lltm_forward_cuda", ([&] { lltm_cuda_forward_kernel<scalar_t><<<blocks, threads>>>( gates.data<scalar_t>(), old_cell.data<scalar_t>(), new_h.data<scalar_t>(), new_cell.data<scalar_t>(), input_gate.data<scalar_t>(), output_gate.data<scalar_t>(), candidate_cell.data<scalar_t>(), state_size); })); return {new_h, new_cell, input_gate, output_gate, candidate_cell, X, gates}; } std::vector<at::Tensor> band_cuda_backward( at::Tensor grad_h, at::Tensor grad_cell, at::Tensor new_cell, at::Tensor input_gate, at::Tensor output_gate, at::Tensor candidate_cell, at::Tensor X, at::Tensor gate_weights, at::Tensor weights) { auto d_old_cell = at::zeros_like(new_cell); auto d_gates = at::zeros_like(gate_weights); const auto batch_size = new_cell.size(0); const auto state_size = new_cell.size(1); const int threads = 1024; const dim3 blocks((state_size + threads - 1) / threads, batch_size); AT_DISPATCH_FLOATING_TYPES(X.type(), "lltm_forward_cuda", ([&] { lltm_cuda_backward_kernel<scalar_t><<<blocks, threads>>>( d_old_cell.data<scalar_t>(), d_gates.data<scalar_t>(), grad_h.data<scalar_t>(), grad_cell.data<scalar_t>(), new_cell.data<scalar_t>(), input_gate.data<scalar_t>(), output_gate.data<scalar_t>(), candidate_cell.data<scalar_t>(), gate_weights.data<scalar_t>(), state_size); })); auto d_weights = d_gates.t().mm(X); auto d_bias = d_gates.sum(/*dim=*/0, /*keepdim=*/true); auto d_X = d_gates.mm(weights); auto d_old_h = d_X.slice(/*dim=*/1, 0, state_size); auto d_input = d_X.slice(/*dim=*/1, state_size); return {d_old_h, d_input, d_weights, d_bias, d_old_cell, d_gates}; }
e82c085140ff7f7dae9646f1187cbb1b71571482.hip
// !!! This is a file automatically generated by hipify!!! #include <cstring> #include <stdexcept> #include <cstdlib> #include <fstream> #include <iostream> #include <vector> #include "utils.h" #include "cuda_error_check.cuh" #include "initial_graph.hpp" #include "parse_graph.hpp" #include "opt.cu" #include "impl2.cu" #include "impl1.cu" enum class ProcessingType {Push, Neighbor, Own, Unknown}; enum SyncMode {InCore, OutOfCore}; enum SmemMode {UseSmem, UseNoSmem}; enum SyncMode syncMethod; enum SmemMode smemMethod; int sync = -1; int smem = -1; void mergeSort(unsigned int *arr, unsigned int *other, unsigned int *weight, unsigned int l, unsigned int r); void merge(unsigned int *arr, unsigned int *other, unsigned int *weight, unsigned int l, unsigned int m, unsigned int r); // Open files safely. template <typename T_file> void openFileToAccess( T_file& input_file, std::string file_name ) { input_file.open( file_name.c_str() ); if( !input_file ) throw std::runtime_error( "Failed to open specified file: " + file_name + "\n" ); } // Execution entry point. int main( int argc, char** argv ) { std::string usage = "\tRequired command line arguments:\n\ Input file: E.g., --input in.txt\n\ Block size: E.g., --bsize 512\n\ Block count: E.g., --bcount 192\n\ Output path: E.g., --output output.txt\n\ Processing method: E.g., --method bmf (bellman-ford), or tpe (to-process-edge), or opt (one further optimizations)\n\ Shared memory usage: E.g., --usesmem yes, or no \n\ Sync method: E.g., --sync incore, or outcore\n\ Sort method: E.g., --sort_by_dest yes, or no\n"; try { std::ifstream inputFile; std::ofstream outputFile; int selectedDevice = 0; int bsize = 0, bcount = 0; int vwsize = 32; int threads = 1; long long arbparam = 0; bool nonDirectedGraph = false; // By default, the graph is directed. ProcessingType processingMethod = ProcessingType::Unknown; syncMethod = OutOfCore; int shouldSortByDestination = -1; /******************************** * GETTING INPUT PARAMETERS. ********************************/ for( int iii = 1; iii < argc; ++iii ) if ( !strcmp(argv[iii], "--method") && iii != argc-1 ) { if ( !strcmp(argv[iii+1], "bmf") ) processingMethod = ProcessingType::Push; else if ( !strcmp(argv[iii+1], "tpe") ) processingMethod = ProcessingType::Neighbor; else if ( !strcmp(argv[iii+1], "opt") ) processingMethod = ProcessingType::Own; else{ std::cerr << "\n Un-recognized method parameter value \n\n"; exit; } } else if ( !strcmp(argv[iii], "--sync") && iii != argc-1 ) { if ( !strcmp(argv[iii+1], "incore") ) { syncMethod = InCore; sync = 1; } else if ( !strcmp(argv[iii+1], "outcore") ) { syncMethod = OutOfCore; sync = 0; } else{ std::cerr << "\n Un-recognized sync parameter value \n\n"; exit; } } else if ( !strcmp(argv[iii], "--usesmem") && iii != argc-1 ) { if ( !strcmp(argv[iii+1], "yes") ) { smemMethod = UseSmem; smem = 1; } else if ( !strcmp(argv[iii+1], "no") ) { smemMethod = UseNoSmem; smem = 0; } else{ std::cerr << "\n Un-recognized usesmem parameter value \n\n"; exit; } } else if ( !strcmp(argv[iii], "--sort_by_dest") && iii != argc-1 ) { if ( !strcmp(argv[iii+1], "yes") ) { shouldSortByDestination = 1; } else if ( !strcmp(argv[iii+1], "no") ) { shouldSortByDestination = 0; } else{ std::cerr << "\n Un-recognized sort_by_dest parameter value \n\n"; exit; } } else if( !strcmp( argv[iii], "--input" ) && iii != argc-1 /*is not the last one*/) openFileToAccess< std::ifstream >( inputFile, std::string( argv[iii+1] ) ); else if( !strcmp( argv[iii], "--output" ) && iii != argc-1 /*is not the last one*/) openFileToAccess< std::ofstream >( outputFile, std::string( argv[iii+1] ) ); else if( !strcmp( argv[iii], "--bsize" ) && iii != argc-1 /*is not the last one*/) bsize = std::atoi( argv[iii+1] ); else if( !strcmp( argv[iii], "--bcount" ) && iii != argc-1 /*is not the last one*/) bcount = std::atoi( argv[iii+1] ); if(bsize <= 0 || bcount <= 0){ std::cerr << "Usage: " << usage; exit; throw std::runtime_error("\nAn initialization error happened.\nExiting."); } if( !inputFile.is_open() || processingMethod == ProcessingType::Unknown ) { std::cerr << "Usage: " << usage; throw std::runtime_error( "\nAn initialization error happened.\nExiting." ); } if( !outputFile.is_open() ) openFileToAccess< std::ofstream >( outputFile, "out.txt" ); CUDAErrorCheck( hipSetDevice( selectedDevice ) ); std::cout << "Device with ID " << selectedDevice << " is selected to process the graph.\n"; /******************************** * Read the input graph file. ********************************/ std::cout << "Collecting the input graph ...\n"; std::vector<initial_vertex> parsedGraph( 0 ); uint nEdges = parse_graph::parse( inputFile, // Input file. parsedGraph, // The parsed graph. arbparam, nonDirectedGraph ); // Arbitrary user-provided parameter. std::cout << "Input graph collected with " << parsedGraph.size() << " vertices and " << nEdges << " edges.\n"; /******************************** * Process the graph. ********************************/ unsigned int * distance = (unsigned int *) malloc(parsedGraph.size() * sizeof(unsigned int)); unsigned int *edges_src, *edges_dest, *edges_weight; unsigned int edges_length = 0; unsigned int vertices_length = parsedGraph.size(); // get edges_length for(std::vector<int>::size_type i = 0; i != vertices_length; i++) { edges_length += parsedGraph.at(i).nbrs.size(); } // malloc edges arrays edges_src = (unsigned int *) malloc(edges_length * sizeof(unsigned int)); edges_dest = (unsigned int *) malloc(edges_length * sizeof(unsigned int)); edges_weight = (unsigned int *) malloc(edges_length * sizeof(unsigned int)); int edge_index = 0; // get values for each array for(std::vector<int>::size_type i = 0; i != vertices_length; i++) { for(std::vector<int>::size_type j = 0; j != parsedGraph.at(i).nbrs.size(); j++) { edges_src[edge_index] = parsedGraph.at(i).nbrs[j].srcIndex; edges_dest[edge_index] = i; edges_weight[edge_index] = parsedGraph.at(i).nbrs[j].edgeValue.weight; //printf("src: %u | dest: %u | weight: %u\n", edges_src[edge_index], edges_dest[edge_index], edges_weight[edge_index]); edge_index++; } } unsigned int temp_offset = 0; // sort the edges by destination if (shouldSortByDestination == 1) { mergeSort(edges_dest, edges_src, edges_weight, 0, edges_length - 1); std::cout << "Edges sorted by Destination.\n"; // if the vertices do not start at 0 if (edges_dest[0] != 0) { temp_offset = edges_dest[0]; for (unsigned int i = 0; i < edges_length; i++) { edges_dest[i] -= temp_offset; edges_src[i] -= temp_offset; } } } // sort the edges by source else if (shouldSortByDestination == 0){ mergeSort(edges_src, edges_dest, edges_weight, 0, edges_length - 1); std::cout << "Edges sorted by Source.\n"; if (edges_src[0] != 0) { temp_offset = edges_src[0]; for (unsigned int i = 0; i < edges_length; i++) { edges_dest[i] -= temp_offset; edges_src[i] -= temp_offset; } } } //int bsizes[5] = {256, 384, 512, 768, 1024}; //int bcounts[5] = {8, 5, 4, 2, 2}; //for (int i = 0; i < 5; i++) { switch(processingMethod){ case ProcessingType::Push: puller(bsize, bcount, sync, smem, distance, edges_src, edges_dest, edges_weight, edges_length, vertices_length); break; case ProcessingType::Neighbor: neighborHandler(bsize, bcount, sync, smem, distance, edges_src, edges_dest, edges_weight, edges_length, vertices_length); break; default: own(&parsedGraph, bsize, bcount); } //} // print it out to test char outputStr[100]; for(int i = 0; i < parsedGraph.size(); i++) { if (distance[i] == -1) sprintf(outputStr, "%u:INF\n", i+temp_offset); else sprintf(outputStr, "%u:%u\n", i+temp_offset, distance[i]); outputFile << outputStr; } /******************************** * It's done here. ********************************/ CUDAErrorCheck( hipDeviceReset() ); std::cout << "Done.\n"; return( EXIT_SUCCESS ); } catch( const std::exception& strException ) { std::cerr << strException.what() << "\n"; return( EXIT_FAILURE ); } catch(...) { std::cerr << "An exception has occurred." << std::endl; return( EXIT_FAILURE ); } } void mergeSort(unsigned int *arr, unsigned int *other, unsigned int *weight, unsigned int l, unsigned int r) { if (l < r) { unsigned int m = l + (r - l)/2; mergeSort(arr, other, weight, l, m); mergeSort(arr, other, weight, m+1, r); merge(arr, other, weight, l, m, r); } } void merge(unsigned int *arr, unsigned int *other, unsigned int *weight, unsigned int l, unsigned int m, unsigned int r) { unsigned int i, j, k; unsigned int n1 = m - l + 1; unsigned int n2 = r - m; unsigned int *L1 = (unsigned int *) malloc(n1 * sizeof(unsigned int)); unsigned int *L2 = (unsigned int *) malloc(n1 * sizeof(unsigned int)); unsigned int *L3 = (unsigned int *) malloc(n1 * sizeof(unsigned int)); unsigned int *R1 = (unsigned int *) malloc(n2 * sizeof(unsigned int)); unsigned int *R2 = (unsigned int *) malloc(n2 * sizeof(unsigned int)); unsigned int *R3 = (unsigned int *) malloc(n2 * sizeof(unsigned int)); for (i = 0; i < n1; i++) { L1[i] = arr[l + i]; L2[i] = other[l + i]; L3[i] = weight[l + i]; } for (j = 0; j < n2; j++) { R1[j] = arr[m + j + 1]; R2[j] = other[m + j + 1]; R3[j] = weight[m + j + 1]; } i = 0; j = 0; k = l; while (i < n1 && j < n2) { if (L1[i] <= R1[j]) { arr[k] = L1[i]; other[k] = L2[i]; weight[k] = L3[i]; i++; } else { arr[k] = R1[j]; other[k] = R2[j]; weight[k] = R3[j]; j++; } k++; } while (i < n1) { arr[k] = L1[i]; other[k] = L2[i]; weight[k] = L3[i]; i++; k++; } while (j < n2) { arr[k] = R1[j]; other[k] = R2[j]; weight[k] = R3[j]; j++; k++; } free(L1); free(L2); free(L3); free(R1); free(R2); free(R3); }
e82c085140ff7f7dae9646f1187cbb1b71571482.cu
#include <cstring> #include <stdexcept> #include <cstdlib> #include <fstream> #include <iostream> #include <vector> #include "utils.h" #include "cuda_error_check.cuh" #include "initial_graph.hpp" #include "parse_graph.hpp" #include "opt.cu" #include "impl2.cu" #include "impl1.cu" enum class ProcessingType {Push, Neighbor, Own, Unknown}; enum SyncMode {InCore, OutOfCore}; enum SmemMode {UseSmem, UseNoSmem}; enum SyncMode syncMethod; enum SmemMode smemMethod; int sync = -1; int smem = -1; void mergeSort(unsigned int *arr, unsigned int *other, unsigned int *weight, unsigned int l, unsigned int r); void merge(unsigned int *arr, unsigned int *other, unsigned int *weight, unsigned int l, unsigned int m, unsigned int r); // Open files safely. template <typename T_file> void openFileToAccess( T_file& input_file, std::string file_name ) { input_file.open( file_name.c_str() ); if( !input_file ) throw std::runtime_error( "Failed to open specified file: " + file_name + "\n" ); } // Execution entry point. int main( int argc, char** argv ) { std::string usage = "\tRequired command line arguments:\n\ Input file: E.g., --input in.txt\n\ Block size: E.g., --bsize 512\n\ Block count: E.g., --bcount 192\n\ Output path: E.g., --output output.txt\n\ Processing method: E.g., --method bmf (bellman-ford), or tpe (to-process-edge), or opt (one further optimizations)\n\ Shared memory usage: E.g., --usesmem yes, or no \n\ Sync method: E.g., --sync incore, or outcore\n\ Sort method: E.g., --sort_by_dest yes, or no\n"; try { std::ifstream inputFile; std::ofstream outputFile; int selectedDevice = 0; int bsize = 0, bcount = 0; int vwsize = 32; int threads = 1; long long arbparam = 0; bool nonDirectedGraph = false; // By default, the graph is directed. ProcessingType processingMethod = ProcessingType::Unknown; syncMethod = OutOfCore; int shouldSortByDestination = -1; /******************************** * GETTING INPUT PARAMETERS. ********************************/ for( int iii = 1; iii < argc; ++iii ) if ( !strcmp(argv[iii], "--method") && iii != argc-1 ) { if ( !strcmp(argv[iii+1], "bmf") ) processingMethod = ProcessingType::Push; else if ( !strcmp(argv[iii+1], "tpe") ) processingMethod = ProcessingType::Neighbor; else if ( !strcmp(argv[iii+1], "opt") ) processingMethod = ProcessingType::Own; else{ std::cerr << "\n Un-recognized method parameter value \n\n"; exit; } } else if ( !strcmp(argv[iii], "--sync") && iii != argc-1 ) { if ( !strcmp(argv[iii+1], "incore") ) { syncMethod = InCore; sync = 1; } else if ( !strcmp(argv[iii+1], "outcore") ) { syncMethod = OutOfCore; sync = 0; } else{ std::cerr << "\n Un-recognized sync parameter value \n\n"; exit; } } else if ( !strcmp(argv[iii], "--usesmem") && iii != argc-1 ) { if ( !strcmp(argv[iii+1], "yes") ) { smemMethod = UseSmem; smem = 1; } else if ( !strcmp(argv[iii+1], "no") ) { smemMethod = UseNoSmem; smem = 0; } else{ std::cerr << "\n Un-recognized usesmem parameter value \n\n"; exit; } } else if ( !strcmp(argv[iii], "--sort_by_dest") && iii != argc-1 ) { if ( !strcmp(argv[iii+1], "yes") ) { shouldSortByDestination = 1; } else if ( !strcmp(argv[iii+1], "no") ) { shouldSortByDestination = 0; } else{ std::cerr << "\n Un-recognized sort_by_dest parameter value \n\n"; exit; } } else if( !strcmp( argv[iii], "--input" ) && iii != argc-1 /*is not the last one*/) openFileToAccess< std::ifstream >( inputFile, std::string( argv[iii+1] ) ); else if( !strcmp( argv[iii], "--output" ) && iii != argc-1 /*is not the last one*/) openFileToAccess< std::ofstream >( outputFile, std::string( argv[iii+1] ) ); else if( !strcmp( argv[iii], "--bsize" ) && iii != argc-1 /*is not the last one*/) bsize = std::atoi( argv[iii+1] ); else if( !strcmp( argv[iii], "--bcount" ) && iii != argc-1 /*is not the last one*/) bcount = std::atoi( argv[iii+1] ); if(bsize <= 0 || bcount <= 0){ std::cerr << "Usage: " << usage; exit; throw std::runtime_error("\nAn initialization error happened.\nExiting."); } if( !inputFile.is_open() || processingMethod == ProcessingType::Unknown ) { std::cerr << "Usage: " << usage; throw std::runtime_error( "\nAn initialization error happened.\nExiting." ); } if( !outputFile.is_open() ) openFileToAccess< std::ofstream >( outputFile, "out.txt" ); CUDAErrorCheck( cudaSetDevice( selectedDevice ) ); std::cout << "Device with ID " << selectedDevice << " is selected to process the graph.\n"; /******************************** * Read the input graph file. ********************************/ std::cout << "Collecting the input graph ...\n"; std::vector<initial_vertex> parsedGraph( 0 ); uint nEdges = parse_graph::parse( inputFile, // Input file. parsedGraph, // The parsed graph. arbparam, nonDirectedGraph ); // Arbitrary user-provided parameter. std::cout << "Input graph collected with " << parsedGraph.size() << " vertices and " << nEdges << " edges.\n"; /******************************** * Process the graph. ********************************/ unsigned int * distance = (unsigned int *) malloc(parsedGraph.size() * sizeof(unsigned int)); unsigned int *edges_src, *edges_dest, *edges_weight; unsigned int edges_length = 0; unsigned int vertices_length = parsedGraph.size(); // get edges_length for(std::vector<int>::size_type i = 0; i != vertices_length; i++) { edges_length += parsedGraph.at(i).nbrs.size(); } // malloc edges arrays edges_src = (unsigned int *) malloc(edges_length * sizeof(unsigned int)); edges_dest = (unsigned int *) malloc(edges_length * sizeof(unsigned int)); edges_weight = (unsigned int *) malloc(edges_length * sizeof(unsigned int)); int edge_index = 0; // get values for each array for(std::vector<int>::size_type i = 0; i != vertices_length; i++) { for(std::vector<int>::size_type j = 0; j != parsedGraph.at(i).nbrs.size(); j++) { edges_src[edge_index] = parsedGraph.at(i).nbrs[j].srcIndex; edges_dest[edge_index] = i; edges_weight[edge_index] = parsedGraph.at(i).nbrs[j].edgeValue.weight; //printf("src: %u | dest: %u | weight: %u\n", edges_src[edge_index], edges_dest[edge_index], edges_weight[edge_index]); edge_index++; } } unsigned int temp_offset = 0; // sort the edges by destination if (shouldSortByDestination == 1) { mergeSort(edges_dest, edges_src, edges_weight, 0, edges_length - 1); std::cout << "Edges sorted by Destination.\n"; // if the vertices do not start at 0 if (edges_dest[0] != 0) { temp_offset = edges_dest[0]; for (unsigned int i = 0; i < edges_length; i++) { edges_dest[i] -= temp_offset; edges_src[i] -= temp_offset; } } } // sort the edges by source else if (shouldSortByDestination == 0){ mergeSort(edges_src, edges_dest, edges_weight, 0, edges_length - 1); std::cout << "Edges sorted by Source.\n"; if (edges_src[0] != 0) { temp_offset = edges_src[0]; for (unsigned int i = 0; i < edges_length; i++) { edges_dest[i] -= temp_offset; edges_src[i] -= temp_offset; } } } //int bsizes[5] = {256, 384, 512, 768, 1024}; //int bcounts[5] = {8, 5, 4, 2, 2}; //for (int i = 0; i < 5; i++) { switch(processingMethod){ case ProcessingType::Push: puller(bsize, bcount, sync, smem, distance, edges_src, edges_dest, edges_weight, edges_length, vertices_length); break; case ProcessingType::Neighbor: neighborHandler(bsize, bcount, sync, smem, distance, edges_src, edges_dest, edges_weight, edges_length, vertices_length); break; default: own(&parsedGraph, bsize, bcount); } //} // print it out to test char outputStr[100]; for(int i = 0; i < parsedGraph.size(); i++) { if (distance[i] == -1) sprintf(outputStr, "%u:INF\n", i+temp_offset); else sprintf(outputStr, "%u:%u\n", i+temp_offset, distance[i]); outputFile << outputStr; } /******************************** * It's done here. ********************************/ CUDAErrorCheck( cudaDeviceReset() ); std::cout << "Done.\n"; return( EXIT_SUCCESS ); } catch( const std::exception& strException ) { std::cerr << strException.what() << "\n"; return( EXIT_FAILURE ); } catch(...) { std::cerr << "An exception has occurred." << std::endl; return( EXIT_FAILURE ); } } void mergeSort(unsigned int *arr, unsigned int *other, unsigned int *weight, unsigned int l, unsigned int r) { if (l < r) { unsigned int m = l + (r - l)/2; mergeSort(arr, other, weight, l, m); mergeSort(arr, other, weight, m+1, r); merge(arr, other, weight, l, m, r); } } void merge(unsigned int *arr, unsigned int *other, unsigned int *weight, unsigned int l, unsigned int m, unsigned int r) { unsigned int i, j, k; unsigned int n1 = m - l + 1; unsigned int n2 = r - m; unsigned int *L1 = (unsigned int *) malloc(n1 * sizeof(unsigned int)); unsigned int *L2 = (unsigned int *) malloc(n1 * sizeof(unsigned int)); unsigned int *L3 = (unsigned int *) malloc(n1 * sizeof(unsigned int)); unsigned int *R1 = (unsigned int *) malloc(n2 * sizeof(unsigned int)); unsigned int *R2 = (unsigned int *) malloc(n2 * sizeof(unsigned int)); unsigned int *R3 = (unsigned int *) malloc(n2 * sizeof(unsigned int)); for (i = 0; i < n1; i++) { L1[i] = arr[l + i]; L2[i] = other[l + i]; L3[i] = weight[l + i]; } for (j = 0; j < n2; j++) { R1[j] = arr[m + j + 1]; R2[j] = other[m + j + 1]; R3[j] = weight[m + j + 1]; } i = 0; j = 0; k = l; while (i < n1 && j < n2) { if (L1[i] <= R1[j]) { arr[k] = L1[i]; other[k] = L2[i]; weight[k] = L3[i]; i++; } else { arr[k] = R1[j]; other[k] = R2[j]; weight[k] = R3[j]; j++; } k++; } while (i < n1) { arr[k] = L1[i]; other[k] = L2[i]; weight[k] = L3[i]; i++; k++; } while (j < n2) { arr[k] = R1[j]; other[k] = R2[j]; weight[k] = R3[j]; j++; k++; } free(L1); free(L2); free(L3); free(R1); free(R2); free(R3); }
1713b931297f662541329b774943f84e2b77327a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* This code tests times taken to perform differnt tasks on GPU vs CPU. */ #include <stdio.h> #include <stdlib.h> #include "timerc.h" #define gerror(ans) {gpuAssert((ans), __FILE__, __LINE__);} inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true) { if (code != hipSuccess) { fprintf(stderr, "GPUassert: %s %s %d\n", hipGetErrorString(code), file, line); if (abort) exit(code); } } __global__ void test_warp_divergence1() { if (threadIdx.x < 16) { double v = 0; for (int i = 0; i < 1000; i++) { for (int j = 0; j < i * i; j++) { v = v + i + i * j / 3.435; } } } else { double v = 0; for (int i = 0; i < 1000; i++) { for (int j = 0; j < i * i; j++) { v = v + i - i * j / 1.435; } } } } __global__ void test_warp_divergence2() { if (threadIdx.x < 8) { double v = 0; for (int i = 0; i < 1000; i++) { for (int j = 0; j < i * i; j++) { v = v + i + i * j / 3.435; } } } else if (threadIdx.x < 16) { double v = 0; for (int i = 0; i < 1000; i++) { for (int j = 0; j < i * i; j++) { v = v + i + i * j / 3.435; } } } else if (threadIdx.x < 24) { double v = 0; for (int i = 0; i < 1000; i++) { for (int j = 0; j < i * i; j++) { v = v + i + i * j / 3.435; } } } else if (threadIdx.x < 32) { double v = 0; for (int i = 0; i < 1000; i++) { for (int j = 0; j < i * i; j++) { v = v + i + i * j / 3.435; } } } } __global__ void gpucycle() { double v = 0; for (int i = 0; i < 1000; i++) { for (int j = 0; j < i * i; j++) { v = v + i + i * j / 3.435; } } } void cpucycle() { double v = 0; for (int i = 0; i < 1000; i++) { for (int j = 0; j < i * i; j++) { v = v + i + i * j / 3.435; } } } int main(void) { hipSetDevice(0); float time = 0; cstart(); cpucycle(); cend(&time); printf("cpu time = %f\n", time); fflush(stdout); gstart(); hipLaunchKernelGGL(( gpucycle), dim3(1),dim3(32), 0, 0, ); gend(&time); printf("gpu time = %f\n", time); fflush(stdout); gstart(); hipLaunchKernelGGL(( test_warp_divergence1), dim3(1),dim3(32), 0, 0, ); gend(&time); printf("gpu time = %f\n", time); fflush(stdout); gstart(); hipLaunchKernelGGL(( test_warp_divergence2), dim3(1),dim3(32), 0, 0, ); gend(&time); printf("gpu time = %f\n", time); fflush(stdout); return 0; }
1713b931297f662541329b774943f84e2b77327a.cu
/* This code tests times taken to perform differnt tasks on GPU vs CPU. */ #include <stdio.h> #include <stdlib.h> #include "timerc.h" #define gerror(ans) {gpuAssert((ans), __FILE__, __LINE__);} inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true) { if (code != cudaSuccess) { fprintf(stderr, "GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line); if (abort) exit(code); } } __global__ void test_warp_divergence1() { if (threadIdx.x < 16) { double v = 0; for (int i = 0; i < 1000; i++) { for (int j = 0; j < i * i; j++) { v = v + i + i * j / 3.435; } } } else { double v = 0; for (int i = 0; i < 1000; i++) { for (int j = 0; j < i * i; j++) { v = v + i - i * j / 1.435; } } } } __global__ void test_warp_divergence2() { if (threadIdx.x < 8) { double v = 0; for (int i = 0; i < 1000; i++) { for (int j = 0; j < i * i; j++) { v = v + i + i * j / 3.435; } } } else if (threadIdx.x < 16) { double v = 0; for (int i = 0; i < 1000; i++) { for (int j = 0; j < i * i; j++) { v = v + i + i * j / 3.435; } } } else if (threadIdx.x < 24) { double v = 0; for (int i = 0; i < 1000; i++) { for (int j = 0; j < i * i; j++) { v = v + i + i * j / 3.435; } } } else if (threadIdx.x < 32) { double v = 0; for (int i = 0; i < 1000; i++) { for (int j = 0; j < i * i; j++) { v = v + i + i * j / 3.435; } } } } __global__ void gpucycle() { double v = 0; for (int i = 0; i < 1000; i++) { for (int j = 0; j < i * i; j++) { v = v + i + i * j / 3.435; } } } void cpucycle() { double v = 0; for (int i = 0; i < 1000; i++) { for (int j = 0; j < i * i; j++) { v = v + i + i * j / 3.435; } } } int main(void) { cudaSetDevice(0); float time = 0; cstart(); cpucycle(); cend(&time); printf("cpu time = %f\n", time); fflush(stdout); gstart(); gpucycle<<<1,32>>>(); gend(&time); printf("gpu time = %f\n", time); fflush(stdout); gstart(); test_warp_divergence1<<<1,32>>>(); gend(&time); printf("gpu time = %f\n", time); fflush(stdout); gstart(); test_warp_divergence2<<<1,32>>>(); gend(&time); printf("gpu time = %f\n", time); fflush(stdout); return 0; }
f805ff2a58505314e2213d643c6e94ec25f92acf.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "core/common_cu.h" #include "nodes/add.h" __global__ void AddKernelForward(const int n, const float alpha, const float *a, const float beta, const float *b, float *c) { int i = blockIdx.x*blockDim.x + threadIdx.x; if (i < n) c[i] = alpha * a[i] + beta * b[i]; } __global__ void AddKernelBackward(const int n, const float *dy, const float scale, float * __restrict__ dx) { int i = blockIdx.x*blockDim.x + threadIdx.x; if (i < n) { dx[i] = scale * dy[i]; } } Add::Add(deepflow::NodeParam *param) : Node(param) { LOG_IF(FATAL, param->has_add_param() == false) << "param.has_add_param() == false"; } std::string Add::op_name() const { std::string op; if (_alpha == 1 && _beta == 1) op = "add"; else if (_alpha == 1 && _beta == -1) op = "subtract"; else { op = "add"; } return op; } void Add::init() { auto a = _inputs[0]; auto ad = a->dims(); auto b = _inputs[1]; auto bd = b->dims(); _alpha = _param->add_param().alpha(); _beta = _param->add_param().beta(); LOG_IF(FATAL, a->value()->size() != b->value()->size()) << _name << " - Different input sizes: " << a->value()->shape() << " vs " << b->value()->shape() ; _outputs[0]->initValue(_inputs[0]->value()->dims()); _outputs[0]->initDiff(); } void Add::forward() { // C(m,n) = A(m,n) + B(m,n) auto size = _outputs[0]->value()->size(); AddKernelForward << <numOfBlocks(size), maxThreadsPerBlock >> >(size , _alpha, (float*)_inputs[0]->value()->data(),_beta, (float*)_inputs[1]->value()->data(), (float*)_outputs[0]->value()->mutableData()); DF_KERNEL_CHECK(); } void Add::backward() { auto size = _outputs[0]->diff()->size(); if (_inputs[0]->diff()) { AddKernelBackward << <numOfBlocks(size), maxThreadsPerBlock >> > (size, (float*)_outputs[0]->diff()->data(), _alpha, (float*)_inputs[0]->diff()->mutableData()); DF_KERNEL_CHECK(); } if (_inputs[1]->diff()) { AddKernelBackward << <numOfBlocks(size), maxThreadsPerBlock >> > (size, (float*)_outputs[0]->diff()->data(), _beta, (float*)_inputs[1]->diff()->mutableData()); DF_KERNEL_CHECK(); } } void Add::setAlpha(float alpha) { _alpha = alpha; } void Add::setBeta(float beta) { _beta = beta; } std::string Add::to_cpp() const { std::string op; float print_alpha_beta = false; if (_alpha == 1 && _beta == 1) op = "add"; else if (_alpha == 1 && _beta == -1) op = "subtract"; else { op = "add"; print_alpha_beta = true; } std::string cpp = "auto " + _name + " = df." + op + "(" + _input_name_for_cpp(0) + ", " + _input_name_for_cpp(1) + ", "; if (print_alpha_beta) cpp += std::to_string(_alpha) + ", " + std::to_string(_beta); cpp += "\"" + _name + "\");"; return cpp; }
f805ff2a58505314e2213d643c6e94ec25f92acf.cu
#include "core/common_cu.h" #include "nodes/add.h" __global__ void AddKernelForward(const int n, const float alpha, const float *a, const float beta, const float *b, float *c) { int i = blockIdx.x*blockDim.x + threadIdx.x; if (i < n) c[i] = alpha * a[i] + beta * b[i]; } __global__ void AddKernelBackward(const int n, const float *dy, const float scale, float * __restrict__ dx) { int i = blockIdx.x*blockDim.x + threadIdx.x; if (i < n) { dx[i] = scale * dy[i]; } } Add::Add(deepflow::NodeParam *param) : Node(param) { LOG_IF(FATAL, param->has_add_param() == false) << "param.has_add_param() == false"; } std::string Add::op_name() const { std::string op; if (_alpha == 1 && _beta == 1) op = "add"; else if (_alpha == 1 && _beta == -1) op = "subtract"; else { op = "add"; } return op; } void Add::init() { auto a = _inputs[0]; auto ad = a->dims(); auto b = _inputs[1]; auto bd = b->dims(); _alpha = _param->add_param().alpha(); _beta = _param->add_param().beta(); LOG_IF(FATAL, a->value()->size() != b->value()->size()) << _name << " - Different input sizes: " << a->value()->shape() << " vs " << b->value()->shape() ; _outputs[0]->initValue(_inputs[0]->value()->dims()); _outputs[0]->initDiff(); } void Add::forward() { // C(m,n) = A(m,n) + B(m,n) auto size = _outputs[0]->value()->size(); AddKernelForward << <numOfBlocks(size), maxThreadsPerBlock >> >(size , _alpha, (float*)_inputs[0]->value()->data(),_beta, (float*)_inputs[1]->value()->data(), (float*)_outputs[0]->value()->mutableData()); DF_KERNEL_CHECK(); } void Add::backward() { auto size = _outputs[0]->diff()->size(); if (_inputs[0]->diff()) { AddKernelBackward << <numOfBlocks(size), maxThreadsPerBlock >> > (size, (float*)_outputs[0]->diff()->data(), _alpha, (float*)_inputs[0]->diff()->mutableData()); DF_KERNEL_CHECK(); } if (_inputs[1]->diff()) { AddKernelBackward << <numOfBlocks(size), maxThreadsPerBlock >> > (size, (float*)_outputs[0]->diff()->data(), _beta, (float*)_inputs[1]->diff()->mutableData()); DF_KERNEL_CHECK(); } } void Add::setAlpha(float alpha) { _alpha = alpha; } void Add::setBeta(float beta) { _beta = beta; } std::string Add::to_cpp() const { std::string op; float print_alpha_beta = false; if (_alpha == 1 && _beta == 1) op = "add"; else if (_alpha == 1 && _beta == -1) op = "subtract"; else { op = "add"; print_alpha_beta = true; } std::string cpp = "auto " + _name + " = df." + op + "(" + _input_name_for_cpp(0) + ", " + _input_name_for_cpp(1) + ", "; if (print_alpha_beta) cpp += std::to_string(_alpha) + ", " + std::to_string(_beta); cpp += "\"" + _name + "\");"; return cpp; }
36a13126d4023a7697fb431f8f5b1fc59eafd54b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #if !defined CUDA_DISABLER #include "opencv2/core/cuda/common.hpp" #include "opencv2/core/types.hpp" namespace cv { namespace cuda { namespace device { namespace blend { __global__ void addSrcWeightKernel16S(const PtrStep<short> src, const PtrStep<short> src_weight, PtrStep<short> dst, PtrStep<short> dst_weight, int rows, int cols) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (y < rows && x < cols) { const short3 v = ((const short3*)src.ptr(y))[x]; short w = src_weight.ptr(y)[x]; ((short3*)dst.ptr(y))[x].x += short((v.x * w) >> 8); ((short3*)dst.ptr(y))[x].y += short((v.y * w) >> 8); ((short3*)dst.ptr(y))[x].z += short((v.z * w) >> 8); dst_weight.ptr(y)[x] += w; } } void addSrcWeightGpu16S(const PtrStep<short> src, const PtrStep<short> src_weight, PtrStep<short> dst, PtrStep<short> dst_weight, cv::Rect &rc) { dim3 threads(16, 16); dim3 grid(divUp(rc.width, threads.x), divUp(rc.height, threads.y)); hipLaunchKernelGGL(( addSrcWeightKernel16S), dim3(grid), dim3(threads), 0, 0, src, src_weight, dst, dst_weight, rc.height, rc.width); cudaSafeCall(hipGetLastError()); } __global__ void addSrcWeightKernel32F(const PtrStep<short> src, const PtrStepf src_weight, PtrStep<short> dst, PtrStepf dst_weight, int rows, int cols) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (y < rows && x < cols) { const short3 v = ((const short3*)src.ptr(y))[x]; float w = src_weight.ptr(y)[x]; ((short3*)dst.ptr(y))[x].x += static_cast<short>(v.x * w); ((short3*)dst.ptr(y))[x].y += static_cast<short>(v.y * w); ((short3*)dst.ptr(y))[x].z += static_cast<short>(v.z * w); dst_weight.ptr(y)[x] += w; } } void addSrcWeightGpu32F(const PtrStep<short> src, const PtrStepf src_weight, PtrStep<short> dst, PtrStepf dst_weight, cv::Rect &rc) { dim3 threads(16, 16); dim3 grid(divUp(rc.width, threads.x), divUp(rc.height, threads.y)); hipLaunchKernelGGL(( addSrcWeightKernel32F), dim3(grid), dim3(threads), 0, 0, src, src_weight, dst, dst_weight, rc.height, rc.width); cudaSafeCall(hipGetLastError()); } # ifndef SUPPRESS_DREAMVU_CHANGES __global__ void addSrcWeight1(const PtrStep<short> src, const PtrStepf src_weight, PtrStep<short> dst, PtrStepf dst_weight, int rows, int cols) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (y < rows && x < cols) { const short3 v = ((const short3*)src.ptr(y))[x]; float w = src_weight.ptr(y)[x]; ((short3*)dst.ptr(y))[x].x = static_cast<short>(v.x * w); ((short3*)dst.ptr(y))[x].y = static_cast<short>(v.y * w); ((short3*)dst.ptr(y))[x].z = static_cast<short>(v.z * w); //dst_weight.ptr(y)[x] += w; } } __global__ void addSrcWeight2(const PtrStep<short> src, const PtrStepf src_weight, PtrStep<short> dst, PtrStepf dst_weight, int rows, int cols) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (y < rows && x < cols) { const short3 v = ((const short3*)src.ptr(y))[x]; float w = src_weight.ptr(y)[x]; ((short3*)dst.ptr(y))[x].x += static_cast<short>(v.x * w); ((short3*)dst.ptr(y))[x].y += static_cast<short>(v.y * w); ((short3*)dst.ptr(y))[x].z += static_cast<short>(v.z * w); //dst_weight.ptr(y)[x] += w; } } void addSrcWeight1(const PtrStep<short> src, const PtrStepf src_weight, PtrStep<short> dst, PtrStepf dst_weight, cv::Rect &rc) { dim3 threads(16, 16); dim3 grid(divUp(rc.width, threads.x), divUp(rc.height, threads.y)); addSrcWeight1 << <grid, threads >> >(src, src_weight, dst, dst_weight, rc.height, rc.width); cudaSafeCall(hipGetLastError()); } void addSrcWeight2(const PtrStep<short> src, const PtrStepf src_weight, PtrStep<short> dst, PtrStepf dst_weight, cv::Rect &rc) { dim3 threads(16, 16); dim3 grid(divUp(rc.width, threads.x), divUp(rc.height, threads.y)); addSrcWeight2 << <grid, threads >> >(src, src_weight, dst, dst_weight, rc.height, rc.width); cudaSafeCall(hipGetLastError()); } # endif __global__ void normalizeUsingWeightKernel16S(const PtrStep<short> weight, PtrStep<short> src, const int width, const int height) { int x = (blockIdx.x * blockDim.x) + threadIdx.x; int y = (blockIdx.y * blockDim.y) + threadIdx.y; if (x < width && y < height) { const short3 v = ((short3*)src.ptr(y))[x]; short w = weight.ptr(y)[x]; ((short3*)src.ptr(y))[x] = make_short3(short((v.x << 8) / w), short((v.y << 8) / w), short((v.z << 8) / w)); } } void normalizeUsingWeightMapGpu16S(const PtrStep<short> weight, PtrStep<short> src, const int width, const int height) { dim3 threads(16, 16); dim3 grid(divUp(width, threads.x), divUp(height, threads.y)); hipLaunchKernelGGL(( normalizeUsingWeightKernel16S), dim3(grid), dim3(threads), 0, 0, weight, src, width, height); } __global__ void normalizeUsingWeightKernel32F(const PtrStepf weight, PtrStep<short> src, const int width, const int height) { int x = (blockIdx.x * blockDim.x) + threadIdx.x; int y = (blockIdx.y * blockDim.y) + threadIdx.y; if (x < width && y < height) { const float WEIGHT_EPS = 1e-5f; const short3 v = ((short3*)src.ptr(y))[x]; float w = weight.ptr(y)[x]; ((short3*)src.ptr(y))[x] = make_short3(static_cast<short>(v.x / (w + WEIGHT_EPS)), static_cast<short>(v.y / (w + WEIGHT_EPS)), static_cast<short>(v.z / (w + WEIGHT_EPS))); } } void normalizeUsingWeightMapGpu32F(const PtrStepf weight, PtrStep<short> src, const int width, const int height) { dim3 threads(16, 16); dim3 grid(divUp(width, threads.x), divUp(height, threads.y)); hipLaunchKernelGGL(( normalizeUsingWeightKernel32F), dim3(grid), dim3(threads), 0, 0, weight, src, width, height); } } }}} #endif
36a13126d4023a7697fb431f8f5b1fc59eafd54b.cu
#if !defined CUDA_DISABLER #include "opencv2/core/cuda/common.hpp" #include "opencv2/core/types.hpp" namespace cv { namespace cuda { namespace device { namespace blend { __global__ void addSrcWeightKernel16S(const PtrStep<short> src, const PtrStep<short> src_weight, PtrStep<short> dst, PtrStep<short> dst_weight, int rows, int cols) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (y < rows && x < cols) { const short3 v = ((const short3*)src.ptr(y))[x]; short w = src_weight.ptr(y)[x]; ((short3*)dst.ptr(y))[x].x += short((v.x * w) >> 8); ((short3*)dst.ptr(y))[x].y += short((v.y * w) >> 8); ((short3*)dst.ptr(y))[x].z += short((v.z * w) >> 8); dst_weight.ptr(y)[x] += w; } } void addSrcWeightGpu16S(const PtrStep<short> src, const PtrStep<short> src_weight, PtrStep<short> dst, PtrStep<short> dst_weight, cv::Rect &rc) { dim3 threads(16, 16); dim3 grid(divUp(rc.width, threads.x), divUp(rc.height, threads.y)); addSrcWeightKernel16S<<<grid, threads>>>(src, src_weight, dst, dst_weight, rc.height, rc.width); cudaSafeCall(cudaGetLastError()); } __global__ void addSrcWeightKernel32F(const PtrStep<short> src, const PtrStepf src_weight, PtrStep<short> dst, PtrStepf dst_weight, int rows, int cols) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (y < rows && x < cols) { const short3 v = ((const short3*)src.ptr(y))[x]; float w = src_weight.ptr(y)[x]; ((short3*)dst.ptr(y))[x].x += static_cast<short>(v.x * w); ((short3*)dst.ptr(y))[x].y += static_cast<short>(v.y * w); ((short3*)dst.ptr(y))[x].z += static_cast<short>(v.z * w); dst_weight.ptr(y)[x] += w; } } void addSrcWeightGpu32F(const PtrStep<short> src, const PtrStepf src_weight, PtrStep<short> dst, PtrStepf dst_weight, cv::Rect &rc) { dim3 threads(16, 16); dim3 grid(divUp(rc.width, threads.x), divUp(rc.height, threads.y)); addSrcWeightKernel32F<<<grid, threads>>>(src, src_weight, dst, dst_weight, rc.height, rc.width); cudaSafeCall(cudaGetLastError()); } # ifndef SUPPRESS_DREAMVU_CHANGES __global__ void addSrcWeight1(const PtrStep<short> src, const PtrStepf src_weight, PtrStep<short> dst, PtrStepf dst_weight, int rows, int cols) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (y < rows && x < cols) { const short3 v = ((const short3*)src.ptr(y))[x]; float w = src_weight.ptr(y)[x]; ((short3*)dst.ptr(y))[x].x = static_cast<short>(v.x * w); ((short3*)dst.ptr(y))[x].y = static_cast<short>(v.y * w); ((short3*)dst.ptr(y))[x].z = static_cast<short>(v.z * w); //dst_weight.ptr(y)[x] += w; } } __global__ void addSrcWeight2(const PtrStep<short> src, const PtrStepf src_weight, PtrStep<short> dst, PtrStepf dst_weight, int rows, int cols) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (y < rows && x < cols) { const short3 v = ((const short3*)src.ptr(y))[x]; float w = src_weight.ptr(y)[x]; ((short3*)dst.ptr(y))[x].x += static_cast<short>(v.x * w); ((short3*)dst.ptr(y))[x].y += static_cast<short>(v.y * w); ((short3*)dst.ptr(y))[x].z += static_cast<short>(v.z * w); //dst_weight.ptr(y)[x] += w; } } void addSrcWeight1(const PtrStep<short> src, const PtrStepf src_weight, PtrStep<short> dst, PtrStepf dst_weight, cv::Rect &rc) { dim3 threads(16, 16); dim3 grid(divUp(rc.width, threads.x), divUp(rc.height, threads.y)); addSrcWeight1 << <grid, threads >> >(src, src_weight, dst, dst_weight, rc.height, rc.width); cudaSafeCall(cudaGetLastError()); } void addSrcWeight2(const PtrStep<short> src, const PtrStepf src_weight, PtrStep<short> dst, PtrStepf dst_weight, cv::Rect &rc) { dim3 threads(16, 16); dim3 grid(divUp(rc.width, threads.x), divUp(rc.height, threads.y)); addSrcWeight2 << <grid, threads >> >(src, src_weight, dst, dst_weight, rc.height, rc.width); cudaSafeCall(cudaGetLastError()); } # endif __global__ void normalizeUsingWeightKernel16S(const PtrStep<short> weight, PtrStep<short> src, const int width, const int height) { int x = (blockIdx.x * blockDim.x) + threadIdx.x; int y = (blockIdx.y * blockDim.y) + threadIdx.y; if (x < width && y < height) { const short3 v = ((short3*)src.ptr(y))[x]; short w = weight.ptr(y)[x]; ((short3*)src.ptr(y))[x] = make_short3(short((v.x << 8) / w), short((v.y << 8) / w), short((v.z << 8) / w)); } } void normalizeUsingWeightMapGpu16S(const PtrStep<short> weight, PtrStep<short> src, const int width, const int height) { dim3 threads(16, 16); dim3 grid(divUp(width, threads.x), divUp(height, threads.y)); normalizeUsingWeightKernel16S<<<grid, threads>>> (weight, src, width, height); } __global__ void normalizeUsingWeightKernel32F(const PtrStepf weight, PtrStep<short> src, const int width, const int height) { int x = (blockIdx.x * blockDim.x) + threadIdx.x; int y = (blockIdx.y * blockDim.y) + threadIdx.y; if (x < width && y < height) { const float WEIGHT_EPS = 1e-5f; const short3 v = ((short3*)src.ptr(y))[x]; float w = weight.ptr(y)[x]; ((short3*)src.ptr(y))[x] = make_short3(static_cast<short>(v.x / (w + WEIGHT_EPS)), static_cast<short>(v.y / (w + WEIGHT_EPS)), static_cast<short>(v.z / (w + WEIGHT_EPS))); } } void normalizeUsingWeightMapGpu32F(const PtrStepf weight, PtrStep<short> src, const int width, const int height) { dim3 threads(16, 16); dim3 grid(divUp(width, threads.x), divUp(height, threads.y)); normalizeUsingWeightKernel32F<<<grid, threads>>> (weight, src, width, height); } } }}} #endif
715b62ecc2ec53bd49fe249ce5c590c3cc439506.hip
// !!! This is a file automatically generated by hipify!!! /* Lab 5 Exercise 3 Program In this program we experiment with using texture memory whilst blurring an image using a GPU. We will explicitly use texture binding rather than using qualifiers to force memory loads through the read-only cache. There are good reasons for doing this when dealing with problems which relate to images or with problems which decompose naturally to 2D layouts. Potential benefits include improved caching, address wrapping and filtering. An image of a dog, `input.ppm`, is provided. Build and execute the code to see the result of executing the image blur kernel. You can modify the macro `SAMPLE_SIZE` to increase the scale of the blur. */ /* GPU devices possess several different types of memory and caches, including: Registers (very fast, but limited space), Thread-Local Global memory (very slow), Shared memory (allows data to be shared between threads in the same block), Constant memory (useful for broadcasting the same value to multiple threads within a half-warp or larger thread block), L1 Cache/read-only memory (useful when multiple threads access the same piece of data), texture memory (useful for normalized values and reading 2D image data). */ #include <stdio.h> #include <stdlib.h> #include <math.h> #include <string.h> #include <hip/hip_vector_types.h> #include <vector_functions.h> #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include "hip/hip_texture_types.h" #include "texture_fetch_functions.hpp" #define IMAGE_DIM 2048 #define SAMPLE_SIZE 6 #define NUMBER_OF_SAMPLES (((2*SAMPLE_SIZE)+1)*((2*SAMPLE_SIZE)+1)) // Takes input, multiply by pseudo-random `int` between `0` and `RAND_MAX`, then divide by `RAND_MAX` #define rnd( x ) (x * rand() / RAND_MAX) // Approximates multiplication of `x` by a uniform random number from [0,1] #define INF 2e10f // Represents an infinite value void output_image_file(uchar4* image); void input_image_file(char* filename, uchar4* image); void checkCUDAError(const char *msg); /* Device Code */ __global__ void image_blur(uchar4 *image, uchar4 *image_output) { // Map from thread position in grid to pixel position int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; int output_offset = y * blockDim.x * gridDim.x + x; // Linearized index into the output image buffer uchar4 pixel; float4 average = make_float4(0,0,0,0); // For each thread (x,y) iterate over the sample grid of neighbouring pixels (x,y) + [-SAMPLE_SIZE, SAMPLE_SIZE]^2 for (int i = -SAMPLE_SIZE; i <= SAMPLE_SIZE; i++) { for (int j = -SAMPLE_SIZE; j <= SAMPLE_SIZE; j++) { // Calculate the position of an offset pixel within the sample grid around (x,y) int x_offset = x + i; int y_offset = y + j; // Wrap the boundaries of the image like a torus in case the sample grid leaves the image boundaries // At most one of each pair of conditional statements will hold for any offset pixel, and there is no interference if (x_offset < 0) { x_offset += IMAGE_DIM; } if (x_offset >= IMAGE_DIM) { x_offset -= IMAGE_DIM; } if (y_offset < 0) { y_offset += IMAGE_DIM; } if (y_offset >= IMAGE_DIM) { y_offset -= IMAGE_DIM; } // Linearized index of the offset pixel used to read from the input image buffer int offset = y_offset * blockDim.x * gridDim.x + x_offset; pixel = image[offset]; // Sum the rgb values over the sample grid `(x,y) + [-SAMPLE_SIZE, SAMPLE_SIZE]^2` average.x += pixel.x; average.y += pixel.y; average.z += pixel.z; } } // Calculate the average of the rgb values by dividing by `(2s+1)^2` average.x /= (float)NUMBER_OF_SAMPLES; average.y /= (float)NUMBER_OF_SAMPLES; average.z /= (float)NUMBER_OF_SAMPLES; // Cast the average rgb values from `float` to `unsigned char` and write them to `image_output` at the linear index for (x,y) image_output[output_offset].x = (unsigned char)average.x; image_output[output_offset].y = (unsigned char)average.y; image_output[output_offset].z = (unsigned char)average.z; image_output[output_offset].w = 255; // Leave the `a` value at full opacity (see "RGBA Colour Model") } /* 3.1 Create a copy of the `image_blur` kernel called `image_blur_texture1D`. Declare a 1-dimensional texture reference with `hipReadModeElementType`. Modify the new kernel to perform a texture lookup using `tex1Dfetch`. Modify the host code to execute the `texture1D` version of the kernel after the first version saving the timing value to the `.y` component of the variable `ms`. You will need to add appropriate host code to bind and unbind the texture before and after the kernel execution, respectively. */ texture<uchar4, hipTextureType1D, hipReadModeElementType> sample1D; // Texture references can only be declared as static global variables and cannot be passed as function/kernel arguments __global__ void image_blur_texture1D(uchar4* image_output) { // Map from thread position in grid to pixel position int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; int output_offset = y * blockDim.x * gridDim.x + x; // Linearized index into the output image buffer uchar4 pixel; float4 average = make_float4(0, 0, 0, 0); // For each thread (x,y) iterate over the sample grid of neighbouring pixels (x,y) + [-SAMPLE_SIZE, SAMPLE_SIZE]^2 for (int i = -SAMPLE_SIZE; i <= SAMPLE_SIZE; i++) { for (int j = -SAMPLE_SIZE; j <= SAMPLE_SIZE; j++) { // Calculate the position of an offset pixel within the sample grid around (x,y) int x_offset = x + i; int y_offset = y + j; // Wrap the boundaries of the image like a torus in case the sample grid leaves the image boundaries // At most one of each pair of conditional statements will hold for any offset pixel, and there is no interference if (x_offset < 0) { x_offset += IMAGE_DIM; } if (x_offset >= IMAGE_DIM) { x_offset -= IMAGE_DIM; } if (y_offset < 0) { y_offset += IMAGE_DIM; } if (y_offset >= IMAGE_DIM) { y_offset -= IMAGE_DIM; } int offset = y_offset * blockDim.x * gridDim.x + x_offset; // Linearized index of the offset pixel used to read from texture memory pixel = tex1Dfetch(sample1D, offset); // Sum the rgb values over the sample grid `(x,y) + [-SAMPLE_SIZE, SAMPLE_SIZE]^2` average.x += pixel.x; average.y += pixel.y; average.z += pixel.z; } } // Calculate the average of the rgb values by dividing by `(2s+1)^2` average.x /= (float)NUMBER_OF_SAMPLES; average.y /= (float)NUMBER_OF_SAMPLES; average.z /= (float)NUMBER_OF_SAMPLES; // Cast the average rgb values from `float` to `unsigned char` and write them to `image_output` at the linear index for (x,y) image_output[output_offset].x = (unsigned char)average.x; image_output[output_offset].y = (unsigned char)average.y; image_output[output_offset].z = (unsigned char)average.z; image_output[output_offset].w = 255; // Leave the `a` value at full opacity (see "RGBA Colour Model") } /* 3.2 Create a copy of the `image_blur` kernel called `image_blur_texture2D`. Declare a 2-dimensional texture reference with `hipReadModeElementType`. Modify the new kernel to perform a texture lookup using `tex2D`. Modify the host code to execute the `texture2D` version of the kernel after the `texture1D` version saving the timing value to the `.z` component of the variable `ms`. You will need to add appropriate host code to bind and unbind the texture before and after the kernel execution, respectively. */ texture<uchar4, hipTextureType2D, hipReadModeElementType> sample2D; // Texture references can only be declared as static global variables and cannot be passed as function/kernel arguments __global__ void image_blur_texture2D(uchar4* image_output) { // Map from thread position in grid to pixel position int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; int output_offset = y * blockDim.x * gridDim.x + x; // Linearized index into the output image buffer uchar4 pixel; float4 average = make_float4(0, 0, 0, 0); // For each thread (x,y) iterate over the sample grid of neighbouring pixels (x,y) + [-SAMPLE_SIZE, SAMPLE_SIZE]^2 for (int i = -SAMPLE_SIZE; i <= SAMPLE_SIZE; i++) { for (int j = -SAMPLE_SIZE; j <= SAMPLE_SIZE; j++) { /* 3.3 In the case of the 2D texture version it is possible to perform wrapping of the index values without explicitly checking the values of `x_offset`, `y_offset`. To do this remove the checks from your kernel, and set the structure members `addressMode[0]`, `addressMode[1]` of your 2D texture reference to `hipAddressModeWrap` in the `main` function. */ // Calculate the position of an offset pixel within the sample grid around (x,y) int x_offset = x + i; int y_offset = y + j; // For 2D texture lookup, we don't need to calculate linearized indices pixel = tex2D(sample2D, x_offset, y_offset); // Sum the rgb values over the sample grid `(x,y) + [-SAMPLE_SIZE, SAMPLE_SIZE]^2` average.x += pixel.x; average.y += pixel.y; average.z += pixel.z; } } // Calculate the average of the rgb values by dividing by `(2s+1)^2` average.x /= (float)NUMBER_OF_SAMPLES; average.y /= (float)NUMBER_OF_SAMPLES; average.z /= (float)NUMBER_OF_SAMPLES; // Cast the average rgb values from `float` to `unsigned char` and write them to `image_output` at the linear index for (x,y) image_output[output_offset].x = (unsigned char)average.x; image_output[output_offset].y = (unsigned char)average.y; image_output[output_offset].z = (unsigned char)average.z; image_output[output_offset].w = 255; // Leave the `a` value at full opacity (see "RGBA Colour Model") } /* Host Code */ int main(void) { unsigned int image_size; // Memory requirement for image data uchar4 *d_image, *d_image_output; // Pointer variables for image input and output on device uchar4 *h_image; // Pointer variable for image on host hipEvent_t start, stop; // CUDA event timestamps float3 ms; // 3-tuple of timing data where .x = normal, .y = tex1D, .z = tex2D image_size = IMAGE_DIM*IMAGE_DIM*sizeof(uchar4); // Calculate memory requirements // Create GPU event timers hipEventCreate(&start); hipEventCreate(&stop); // Allocate memory on the GPU for the image input and output hipMalloc((void**)&d_image, image_size); hipMalloc((void**)&d_image_output, image_size); checkCUDAError("CUDA Malloc"); // Allocate and load host image h_image = (uchar4*)malloc(image_size); input_image_file("input.ppm", h_image); // Copy input image from host to device memory hipMemcpy(d_image, h_image, image_size, hipMemcpyHostToDevice); checkCUDAError("CUDA Memcpy Host to Device"); // CUDA grid layout dim3 blocksPerGrid(IMAGE_DIM / 16, IMAGE_DIM / 16); dim3 threadsPerBlock(16, 16); // Normal version hipEventRecord(start, 0); image_blur << <blocksPerGrid, threadsPerBlock >> >(d_image, d_image_output); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&ms.x, start, stop); checkCUDAError("Kernel (normal)"); /* 3.1 Execute the `texture1D` version of the kernel after the normal version, saving the timing value to the `ms.y`. You will need to bind and unbind the texture before and after the kernel execution, respectively. */ // Bind the texture reference `sample1D` declared earlier to the memory buffer for the input image `d_image` hipBindTexture(0, sample1D, d_image, image_size); checkCUDAError("Bind hipTextureType1D"); hipEventRecord(start, 0); image_blur_texture1D << <blocksPerGrid, threadsPerBlock >> > (d_image_output); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&ms.y, start, stop); hipUnbindTexture(sample1D); checkCUDAError("Kernel (tex1D)"); /* 3.2 Execute the `texture2D` kernel after the `texture1D` version, saving the timing value to the `ms.z`. You will need to bind and unbind the texture before and after the kernel execution, respectively. Moreover, the CUDA runtime requires that we provide a `hipChannelFormatDesc` when we bind 2D textures */ /* 3.3 When using 2D textures we can wrap index values around without explicitly checking them. To do this remove the checks from your kernel, and set the structure members `addressMode[0]`, `addressMode[1]` of your 2D texture reference `sample2D` to `hipAddressModeWrap` in the `main` function before binding. */ // Results in wrapping the image boundaries around like a torus when we access outside the boundaries sample2D.addressMode[0] = hipAddressModeWrap; sample2D.addressMode[1] = hipAddressModeWrap; // Declare a channel format descriptor called `desc` with data type `uchar4` hipChannelFormatDesc desc = hipCreateChannelDesc<uchar4>(); // Bind the formatted texture reference `sample2D` to the memory buffer for the input image `d_image` hipBindTexture2D(0, sample2D, d_image, desc, IMAGE_DIM, IMAGE_DIM, IMAGE_DIM * sizeof(uchar4)); checkCUDAError("Bind hipTextureType2D"); hipEventRecord(start, 0); image_blur_texture2D << <blocksPerGrid, threadsPerBlock >> > (d_image_output); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&ms.z, start, stop); checkCUDAError("Kernel (tex2D)"); hipUnbindTexture(sample2D); // Copy the blurred output image from device to host for output to file hipMemcpy(h_image, d_image_output, image_size, hipMemcpyDeviceToHost); checkCUDAError("CUDA Memcpy Device to Host"); // Output timings printf("Execution times:\n"); printf("\tNormal version: %f\n", ms.x); // 16.917631ms printf("\ttex1D version: %f\n", ms.y); // 10.342144ms printf("\ttex2D version: %f\n", ms.z); // 10.314624ms // Output image to file `output.ppm` output_image_file(h_image); // Cleanup hipEventDestroy(start); hipEventDestroy(stop); hipFree(d_image); hipFree(d_image_output); free(h_image); return 0; } void output_image_file(uchar4* image) { FILE *f; // Output file handle // Open the output file and write the header info for the `.ppm` filetype f = fopen("output.ppm", "wb"); if (f == NULL) { fprintf(stderr, "Error opening 'output.ppm' output file\n"); exit(1); } fprintf(f, "P6\n"); fprintf(f, "# COM4521 Lab 05 Exercise03\n"); fprintf(f, "%d %d\n%d\n", IMAGE_DIM, IMAGE_DIM, 255); for (int y = 0; y < IMAGE_DIM; y++) { for (int x = 0; x < IMAGE_DIM; x++) { int i = y * IMAGE_DIM + x; fwrite(&image[i], sizeof(unsigned char), 3, f); // Only write rgb values (ignoring a) for each pixel } } fclose(f); } void input_image_file(char* filename, uchar4* image) { FILE *f; // Input file handle char temp[256]; unsigned int x, y, s; // Open the input file and write the header info for the `.ppm` filetype // See http://netpbm.sourceforge.net/doc/ppm.html for the PPM file specification // See also https://en.wikipedia.org/wiki/Netpbm for further information and history f = fopen(filename, "rb"); if (f == NULL){ fprintf(stderr, "Error opening '%s' input file\n", filename); exit(1); } fscanf(f, "%s\n", &temp); // Read the first line of the file to a temporary buffer fscanf(f, "%d %d\n", &x, &y); // Read the image width and height to variables `x` and `y`, respectively. fscanf(f, "%d\n",&s); // Read the maximum colour value setting to `s` if ((x != IMAGE_DIM) || (y != IMAGE_DIM)) { fprintf(stderr, "Error: Input image file has wrong fixed dimensions\n"); exit(1); } for (int y = 0; y < IMAGE_DIM; y++) { for (int x = 0; x < IMAGE_DIM; x++) { int i = y * IMAGE_DIM + x; fread(&image[i], sizeof(unsigned char), 3, f); // Only read rgb values (ignoring a) for each pixel //image[i].w = 255; // Otherwise could explicitly set `a` value to full opacity } } fclose(f); } void checkCUDAError(const char *msg) { hipError_t err = hipGetLastError(); if (err != hipSuccess) { fprintf(stderr, "CUDA ERROR: %s: %s.\n", msg, hipGetErrorString(err)); exit(EXIT_FAILURE); } }
715b62ecc2ec53bd49fe249ce5c590c3cc439506.cu
/* Lab 5 Exercise 3 Program In this program we experiment with using texture memory whilst blurring an image using a GPU. We will explicitly use texture binding rather than using qualifiers to force memory loads through the read-only cache. There are good reasons for doing this when dealing with problems which relate to images or with problems which decompose naturally to 2D layouts. Potential benefits include improved caching, address wrapping and filtering. An image of a dog, `input.ppm`, is provided. Build and execute the code to see the result of executing the image blur kernel. You can modify the macro `SAMPLE_SIZE` to increase the scale of the blur. */ /* GPU devices possess several different types of memory and caches, including: Registers (very fast, but limited space), Thread-Local Global memory (very slow), Shared memory (allows data to be shared between threads in the same block), Constant memory (useful for broadcasting the same value to multiple threads within a half-warp or larger thread block), L1 Cache/read-only memory (useful when multiple threads access the same piece of data), texture memory (useful for normalized values and reading 2D image data). */ #include <stdio.h> #include <stdlib.h> #include <math.h> #include <string.h> #include <vector_types.h> #include <vector_functions.h> #include "cuda_runtime.h" #include "device_launch_parameters.h" #include "cuda_texture_types.h" #include "texture_fetch_functions.hpp" #define IMAGE_DIM 2048 #define SAMPLE_SIZE 6 #define NUMBER_OF_SAMPLES (((2*SAMPLE_SIZE)+1)*((2*SAMPLE_SIZE)+1)) // Takes input, multiply by pseudo-random `int` between `0` and `RAND_MAX`, then divide by `RAND_MAX` #define rnd( x ) (x * rand() / RAND_MAX) // Approximates multiplication of `x` by a uniform random number from [0,1] #define INF 2e10f // Represents an infinite value void output_image_file(uchar4* image); void input_image_file(char* filename, uchar4* image); void checkCUDAError(const char *msg); /* Device Code */ __global__ void image_blur(uchar4 *image, uchar4 *image_output) { // Map from thread position in grid to pixel position int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; int output_offset = y * blockDim.x * gridDim.x + x; // Linearized index into the output image buffer uchar4 pixel; float4 average = make_float4(0,0,0,0); // For each thread (x,y) iterate over the sample grid of neighbouring pixels (x,y) + [-SAMPLE_SIZE, SAMPLE_SIZE]^2 for (int i = -SAMPLE_SIZE; i <= SAMPLE_SIZE; i++) { for (int j = -SAMPLE_SIZE; j <= SAMPLE_SIZE; j++) { // Calculate the position of an offset pixel within the sample grid around (x,y) int x_offset = x + i; int y_offset = y + j; // Wrap the boundaries of the image like a torus in case the sample grid leaves the image boundaries // At most one of each pair of conditional statements will hold for any offset pixel, and there is no interference if (x_offset < 0) { x_offset += IMAGE_DIM; } if (x_offset >= IMAGE_DIM) { x_offset -= IMAGE_DIM; } if (y_offset < 0) { y_offset += IMAGE_DIM; } if (y_offset >= IMAGE_DIM) { y_offset -= IMAGE_DIM; } // Linearized index of the offset pixel used to read from the input image buffer int offset = y_offset * blockDim.x * gridDim.x + x_offset; pixel = image[offset]; // Sum the rgb values over the sample grid `(x,y) + [-SAMPLE_SIZE, SAMPLE_SIZE]^2` average.x += pixel.x; average.y += pixel.y; average.z += pixel.z; } } // Calculate the average of the rgb values by dividing by `(2s+1)^2` average.x /= (float)NUMBER_OF_SAMPLES; average.y /= (float)NUMBER_OF_SAMPLES; average.z /= (float)NUMBER_OF_SAMPLES; // Cast the average rgb values from `float` to `unsigned char` and write them to `image_output` at the linear index for (x,y) image_output[output_offset].x = (unsigned char)average.x; image_output[output_offset].y = (unsigned char)average.y; image_output[output_offset].z = (unsigned char)average.z; image_output[output_offset].w = 255; // Leave the `a` value at full opacity (see "RGBA Colour Model") } /* 3.1 Create a copy of the `image_blur` kernel called `image_blur_texture1D`. Declare a 1-dimensional texture reference with `cudaReadModeElementType`. Modify the new kernel to perform a texture lookup using `tex1Dfetch`. Modify the host code to execute the `texture1D` version of the kernel after the first version saving the timing value to the `.y` component of the variable `ms`. You will need to add appropriate host code to bind and unbind the texture before and after the kernel execution, respectively. */ texture<uchar4, cudaTextureType1D, cudaReadModeElementType> sample1D; // Texture references can only be declared as static global variables and cannot be passed as function/kernel arguments __global__ void image_blur_texture1D(uchar4* image_output) { // Map from thread position in grid to pixel position int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; int output_offset = y * blockDim.x * gridDim.x + x; // Linearized index into the output image buffer uchar4 pixel; float4 average = make_float4(0, 0, 0, 0); // For each thread (x,y) iterate over the sample grid of neighbouring pixels (x,y) + [-SAMPLE_SIZE, SAMPLE_SIZE]^2 for (int i = -SAMPLE_SIZE; i <= SAMPLE_SIZE; i++) { for (int j = -SAMPLE_SIZE; j <= SAMPLE_SIZE; j++) { // Calculate the position of an offset pixel within the sample grid around (x,y) int x_offset = x + i; int y_offset = y + j; // Wrap the boundaries of the image like a torus in case the sample grid leaves the image boundaries // At most one of each pair of conditional statements will hold for any offset pixel, and there is no interference if (x_offset < 0) { x_offset += IMAGE_DIM; } if (x_offset >= IMAGE_DIM) { x_offset -= IMAGE_DIM; } if (y_offset < 0) { y_offset += IMAGE_DIM; } if (y_offset >= IMAGE_DIM) { y_offset -= IMAGE_DIM; } int offset = y_offset * blockDim.x * gridDim.x + x_offset; // Linearized index of the offset pixel used to read from texture memory pixel = tex1Dfetch(sample1D, offset); // Sum the rgb values over the sample grid `(x,y) + [-SAMPLE_SIZE, SAMPLE_SIZE]^2` average.x += pixel.x; average.y += pixel.y; average.z += pixel.z; } } // Calculate the average of the rgb values by dividing by `(2s+1)^2` average.x /= (float)NUMBER_OF_SAMPLES; average.y /= (float)NUMBER_OF_SAMPLES; average.z /= (float)NUMBER_OF_SAMPLES; // Cast the average rgb values from `float` to `unsigned char` and write them to `image_output` at the linear index for (x,y) image_output[output_offset].x = (unsigned char)average.x; image_output[output_offset].y = (unsigned char)average.y; image_output[output_offset].z = (unsigned char)average.z; image_output[output_offset].w = 255; // Leave the `a` value at full opacity (see "RGBA Colour Model") } /* 3.2 Create a copy of the `image_blur` kernel called `image_blur_texture2D`. Declare a 2-dimensional texture reference with `cudaReadModeElementType`. Modify the new kernel to perform a texture lookup using `tex2D`. Modify the host code to execute the `texture2D` version of the kernel after the `texture1D` version saving the timing value to the `.z` component of the variable `ms`. You will need to add appropriate host code to bind and unbind the texture before and after the kernel execution, respectively. */ texture<uchar4, cudaTextureType2D, cudaReadModeElementType> sample2D; // Texture references can only be declared as static global variables and cannot be passed as function/kernel arguments __global__ void image_blur_texture2D(uchar4* image_output) { // Map from thread position in grid to pixel position int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; int output_offset = y * blockDim.x * gridDim.x + x; // Linearized index into the output image buffer uchar4 pixel; float4 average = make_float4(0, 0, 0, 0); // For each thread (x,y) iterate over the sample grid of neighbouring pixels (x,y) + [-SAMPLE_SIZE, SAMPLE_SIZE]^2 for (int i = -SAMPLE_SIZE; i <= SAMPLE_SIZE; i++) { for (int j = -SAMPLE_SIZE; j <= SAMPLE_SIZE; j++) { /* 3.3 In the case of the 2D texture version it is possible to perform wrapping of the index values without explicitly checking the values of `x_offset`, `y_offset`. To do this remove the checks from your kernel, and set the structure members `addressMode[0]`, `addressMode[1]` of your 2D texture reference to `cudaAddressModeWrap` in the `main` function. */ // Calculate the position of an offset pixel within the sample grid around (x,y) int x_offset = x + i; int y_offset = y + j; // For 2D texture lookup, we don't need to calculate linearized indices pixel = tex2D(sample2D, x_offset, y_offset); // Sum the rgb values over the sample grid `(x,y) + [-SAMPLE_SIZE, SAMPLE_SIZE]^2` average.x += pixel.x; average.y += pixel.y; average.z += pixel.z; } } // Calculate the average of the rgb values by dividing by `(2s+1)^2` average.x /= (float)NUMBER_OF_SAMPLES; average.y /= (float)NUMBER_OF_SAMPLES; average.z /= (float)NUMBER_OF_SAMPLES; // Cast the average rgb values from `float` to `unsigned char` and write them to `image_output` at the linear index for (x,y) image_output[output_offset].x = (unsigned char)average.x; image_output[output_offset].y = (unsigned char)average.y; image_output[output_offset].z = (unsigned char)average.z; image_output[output_offset].w = 255; // Leave the `a` value at full opacity (see "RGBA Colour Model") } /* Host Code */ int main(void) { unsigned int image_size; // Memory requirement for image data uchar4 *d_image, *d_image_output; // Pointer variables for image input and output on device uchar4 *h_image; // Pointer variable for image on host cudaEvent_t start, stop; // CUDA event timestamps float3 ms; // 3-tuple of timing data where .x = normal, .y = tex1D, .z = tex2D image_size = IMAGE_DIM*IMAGE_DIM*sizeof(uchar4); // Calculate memory requirements // Create GPU event timers cudaEventCreate(&start); cudaEventCreate(&stop); // Allocate memory on the GPU for the image input and output cudaMalloc((void**)&d_image, image_size); cudaMalloc((void**)&d_image_output, image_size); checkCUDAError("CUDA Malloc"); // Allocate and load host image h_image = (uchar4*)malloc(image_size); input_image_file("input.ppm", h_image); // Copy input image from host to device memory cudaMemcpy(d_image, h_image, image_size, cudaMemcpyHostToDevice); checkCUDAError("CUDA Memcpy Host to Device"); // CUDA grid layout dim3 blocksPerGrid(IMAGE_DIM / 16, IMAGE_DIM / 16); dim3 threadsPerBlock(16, 16); // Normal version cudaEventRecord(start, 0); image_blur << <blocksPerGrid, threadsPerBlock >> >(d_image, d_image_output); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&ms.x, start, stop); checkCUDAError("Kernel (normal)"); /* 3.1 Execute the `texture1D` version of the kernel after the normal version, saving the timing value to the `ms.y`. You will need to bind and unbind the texture before and after the kernel execution, respectively. */ // Bind the texture reference `sample1D` declared earlier to the memory buffer for the input image `d_image` cudaBindTexture(0, sample1D, d_image, image_size); checkCUDAError("Bind cudaTextureType1D"); cudaEventRecord(start, 0); image_blur_texture1D << <blocksPerGrid, threadsPerBlock >> > (d_image_output); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&ms.y, start, stop); cudaUnbindTexture(sample1D); checkCUDAError("Kernel (tex1D)"); /* 3.2 Execute the `texture2D` kernel after the `texture1D` version, saving the timing value to the `ms.z`. You will need to bind and unbind the texture before and after the kernel execution, respectively. Moreover, the CUDA runtime requires that we provide a `cudaChannelFormatDesc` when we bind 2D textures */ /* 3.3 When using 2D textures we can wrap index values around without explicitly checking them. To do this remove the checks from your kernel, and set the structure members `addressMode[0]`, `addressMode[1]` of your 2D texture reference `sample2D` to `cudaAddressModeWrap` in the `main` function before binding. */ // Results in wrapping the image boundaries around like a torus when we access outside the boundaries sample2D.addressMode[0] = cudaAddressModeWrap; sample2D.addressMode[1] = cudaAddressModeWrap; // Declare a channel format descriptor called `desc` with data type `uchar4` cudaChannelFormatDesc desc = cudaCreateChannelDesc<uchar4>(); // Bind the formatted texture reference `sample2D` to the memory buffer for the input image `d_image` cudaBindTexture2D(0, sample2D, d_image, desc, IMAGE_DIM, IMAGE_DIM, IMAGE_DIM * sizeof(uchar4)); checkCUDAError("Bind cudaTextureType2D"); cudaEventRecord(start, 0); image_blur_texture2D << <blocksPerGrid, threadsPerBlock >> > (d_image_output); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&ms.z, start, stop); checkCUDAError("Kernel (tex2D)"); cudaUnbindTexture(sample2D); // Copy the blurred output image from device to host for output to file cudaMemcpy(h_image, d_image_output, image_size, cudaMemcpyDeviceToHost); checkCUDAError("CUDA Memcpy Device to Host"); // Output timings printf("Execution times:\n"); printf("\tNormal version: %f\n", ms.x); // 16.917631ms printf("\ttex1D version: %f\n", ms.y); // 10.342144ms printf("\ttex2D version: %f\n", ms.z); // 10.314624ms // Output image to file `output.ppm` output_image_file(h_image); // Cleanup cudaEventDestroy(start); cudaEventDestroy(stop); cudaFree(d_image); cudaFree(d_image_output); free(h_image); return 0; } void output_image_file(uchar4* image) { FILE *f; // Output file handle // Open the output file and write the header info for the `.ppm` filetype f = fopen("output.ppm", "wb"); if (f == NULL) { fprintf(stderr, "Error opening 'output.ppm' output file\n"); exit(1); } fprintf(f, "P6\n"); fprintf(f, "# COM4521 Lab 05 Exercise03\n"); fprintf(f, "%d %d\n%d\n", IMAGE_DIM, IMAGE_DIM, 255); for (int y = 0; y < IMAGE_DIM; y++) { for (int x = 0; x < IMAGE_DIM; x++) { int i = y * IMAGE_DIM + x; fwrite(&image[i], sizeof(unsigned char), 3, f); // Only write rgb values (ignoring a) for each pixel } } fclose(f); } void input_image_file(char* filename, uchar4* image) { FILE *f; // Input file handle char temp[256]; unsigned int x, y, s; // Open the input file and write the header info for the `.ppm` filetype // See http://netpbm.sourceforge.net/doc/ppm.html for the PPM file specification // See also https://en.wikipedia.org/wiki/Netpbm for further information and history f = fopen(filename, "rb"); if (f == NULL){ fprintf(stderr, "Error opening '%s' input file\n", filename); exit(1); } fscanf(f, "%s\n", &temp); // Read the first line of the file to a temporary buffer fscanf(f, "%d %d\n", &x, &y); // Read the image width and height to variables `x` and `y`, respectively. fscanf(f, "%d\n",&s); // Read the maximum colour value setting to `s` if ((x != IMAGE_DIM) || (y != IMAGE_DIM)) { fprintf(stderr, "Error: Input image file has wrong fixed dimensions\n"); exit(1); } for (int y = 0; y < IMAGE_DIM; y++) { for (int x = 0; x < IMAGE_DIM; x++) { int i = y * IMAGE_DIM + x; fread(&image[i], sizeof(unsigned char), 3, f); // Only read rgb values (ignoring a) for each pixel //image[i].w = 255; // Otherwise could explicitly set `a` value to full opacity } } fclose(f); } void checkCUDAError(const char *msg) { cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) { fprintf(stderr, "CUDA ERROR: %s: %s.\n", msg, cudaGetErrorString(err)); exit(EXIT_FAILURE); } }
78f955db4f3fe394362e6ebfa209c8fa16063aef.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #define TORCH_ASSERT_ONLY_METHOD_OPERATORS #include <ATen/native/sparse/SparseStubs.h> #include <ATen/native/sparse/SparseBinaryOpIntersectionCommon.h> #include <ATen/native/hip/Loops.cuh> #include <ATen/native/hip/KernelUtils.cuh> #include <ATen/hip/detail/OffsetCalculator.cuh> namespace at::native { namespace { template <typename func_t> struct CUDAKernelLauncher { static void launch(TensorIteratorBase& iter, const func_t& f) { gpu_kernel(iter, f); } }; struct MulOp { template <typename scalar_t> static FUNCAPI scalar_t apply(scalar_t a, scalar_t b) { return a * b; } }; template <> FUNCAPI bool MulOp::apply(bool a, bool b) { return a && b; } template <int nt, int vt, typename loop_t> C10_LAUNCH_BOUNDS_2(nt, vt) __global__ void apply_kernel(int n, loop_t loop) { constexpr int nv = nt * vt; int idx = nv * blockIdx.x + threadIdx.x; #pragma unroll for (int i = 0; i < vt; ++i) { if (idx < n) { loop(idx); idx += nt; } } } template <int nt, int vt, typename loop_t> void launch_kernel(int64_t n, const loop_t& loop) { TORCH_INTERNAL_ASSERT(0 <= n && n <= std::numeric_limits<int32_t>::max()); if (!n) { return; } const dim3 block(nt); const dim3 grid((n + block.x * vt - 1) / (block.x * vt)); const auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); hipLaunchKernelGGL(( apply_kernel<nt, vt, loop_t>), dim3(grid), dim3(block), 0, stream, n, loop); C10_HIP_KERNEL_LAUNCH_CHECK(); } template <typename binary_op_t, typename scalar_t, typename index_t> void binary_op_intersection_kernel( TensorIterator& iter, int64_t lhs_nnz_stride, int64_t rhs_nnz_stride) { if (!iter.can_use_32bit_indexing()) { for (auto& sub_iter : iter.with_32bit_indexing()) { binary_op_intersection_kernel<binary_op_t, scalar_t, index_t>( sub_iter, lhs_nnz_stride, rhs_nnz_stride); } return; } auto* RESTRICT ptr_res_values_bytes = reinterpret_cast<char*>(iter.data_ptr(0)); const auto* RESTRICT ptr_lhs_values_bytes = reinterpret_cast<char*>(iter.data_ptr(1)); const auto* RESTRICT ptr_lhs_select_idx_bytes = reinterpret_cast<char*>(iter.data_ptr(2)); const auto* RESTRICT ptr_rhs_values_bytes = reinterpret_cast<char*>(iter.data_ptr(3)); const auto* RESTRICT ptr_rhs_select_idx_bytes = reinterpret_cast<char*>(iter.data_ptr(4)); auto offset_calc = make_offset_calculator<5>(iter); auto loop = [=] FUNCAPI (int i) { auto offsets = offset_calc.get(i); auto* RESTRICT ptr_res_values = reinterpret_cast<scalar_t*>(ptr_res_values_bytes + offsets[0]); const auto* RESTRICT ptr_lhs_values = reinterpret_cast<const scalar_t*>(ptr_lhs_values_bytes + offsets[1]); const auto lhs_nnz_idx = *reinterpret_cast<const index_t*>(ptr_lhs_select_idx_bytes + offsets[2]); const auto* RESTRICT ptr_rhs_values = reinterpret_cast<const scalar_t*>(ptr_rhs_values_bytes + offsets[3]); const auto rhs_nnz_idx = *reinterpret_cast<const index_t*>(ptr_rhs_select_idx_bytes + offsets[4]); *ptr_res_values = binary_op_t::apply( *(ptr_lhs_values + lhs_nnz_idx * lhs_nnz_stride), *(ptr_rhs_values + rhs_nnz_idx * rhs_nnz_stride)); }; launch_kernel<num_threads(), thread_work_size()>(iter.numel(), loop); } template <typename binary_op_t> struct CUDAValueSelectionIntersectionKernel { static Tensor apply( const Tensor& lhs_values, const Tensor& lhs_select_idx, const Tensor& rhs_values, const Tensor& rhs_select_idx) { auto iter = make_value_selection_intersection_iter( lhs_values, lhs_select_idx, rhs_values, rhs_select_idx); auto res_values = iter.tensor(0); // If res_values is empty, we can return it right away. // Otherwise floating point issues with OffsetCalculator. if (!res_values.numel()) { return res_values; } const auto lhs_nnz_stride = lhs_values.stride(0); const auto rhs_nnz_stride = rhs_values.stride(0); AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3( ScalarType::Bool, ScalarType::Half, ScalarType::BFloat16, res_values.scalar_type(), "binary_op_intersection_cpu", [&] { AT_DISPATCH_INDEX_TYPES(lhs_select_idx.scalar_type(), "binary_op_intersection_cpu", [&] { binary_op_intersection_kernel<binary_op_t, scalar_t, index_t>( iter, lhs_nnz_stride, rhs_nnz_stride); }); }); return res_values; } }; void mul_sparse_sparse_out_cuda_kernel( Tensor& result, const Tensor& x, const Tensor& y) { using CUDAValueSelectionMulKernel = CUDAValueSelectionIntersectionKernel<MulOp>; _sparse_binary_op_intersection_kernel_out<CUDAKernelLauncher, CUDAValueSelectionMulKernel>( result, x, y ); } } REGISTER_CUDA_DISPATCH(mul_sparse_sparse_out_stub, &mul_sparse_sparse_out_cuda_kernel); } // namespace at::native
78f955db4f3fe394362e6ebfa209c8fa16063aef.cu
#define TORCH_ASSERT_ONLY_METHOD_OPERATORS #include <ATen/native/sparse/SparseStubs.h> #include <ATen/native/sparse/SparseBinaryOpIntersectionCommon.h> #include <ATen/native/cuda/Loops.cuh> #include <ATen/native/cuda/KernelUtils.cuh> #include <ATen/cuda/detail/OffsetCalculator.cuh> namespace at::native { namespace { template <typename func_t> struct CUDAKernelLauncher { static void launch(TensorIteratorBase& iter, const func_t& f) { gpu_kernel(iter, f); } }; struct MulOp { template <typename scalar_t> static FUNCAPI scalar_t apply(scalar_t a, scalar_t b) { return a * b; } }; template <> FUNCAPI bool MulOp::apply(bool a, bool b) { return a && b; } template <int nt, int vt, typename loop_t> C10_LAUNCH_BOUNDS_2(nt, vt) __global__ void apply_kernel(int n, loop_t loop) { constexpr int nv = nt * vt; int idx = nv * blockIdx.x + threadIdx.x; #pragma unroll for (int i = 0; i < vt; ++i) { if (idx < n) { loop(idx); idx += nt; } } } template <int nt, int vt, typename loop_t> void launch_kernel(int64_t n, const loop_t& loop) { TORCH_INTERNAL_ASSERT(0 <= n && n <= std::numeric_limits<int32_t>::max()); if (!n) { return; } const dim3 block(nt); const dim3 grid((n + block.x * vt - 1) / (block.x * vt)); const auto stream = at::cuda::getCurrentCUDAStream(); apply_kernel<nt, vt, loop_t><<<grid, block, 0, stream>>>(n, loop); C10_CUDA_KERNEL_LAUNCH_CHECK(); } template <typename binary_op_t, typename scalar_t, typename index_t> void binary_op_intersection_kernel( TensorIterator& iter, int64_t lhs_nnz_stride, int64_t rhs_nnz_stride) { if (!iter.can_use_32bit_indexing()) { for (auto& sub_iter : iter.with_32bit_indexing()) { binary_op_intersection_kernel<binary_op_t, scalar_t, index_t>( sub_iter, lhs_nnz_stride, rhs_nnz_stride); } return; } auto* RESTRICT ptr_res_values_bytes = reinterpret_cast<char*>(iter.data_ptr(0)); const auto* RESTRICT ptr_lhs_values_bytes = reinterpret_cast<char*>(iter.data_ptr(1)); const auto* RESTRICT ptr_lhs_select_idx_bytes = reinterpret_cast<char*>(iter.data_ptr(2)); const auto* RESTRICT ptr_rhs_values_bytes = reinterpret_cast<char*>(iter.data_ptr(3)); const auto* RESTRICT ptr_rhs_select_idx_bytes = reinterpret_cast<char*>(iter.data_ptr(4)); auto offset_calc = make_offset_calculator<5>(iter); auto loop = [=] FUNCAPI (int i) { auto offsets = offset_calc.get(i); auto* RESTRICT ptr_res_values = reinterpret_cast<scalar_t*>(ptr_res_values_bytes + offsets[0]); const auto* RESTRICT ptr_lhs_values = reinterpret_cast<const scalar_t*>(ptr_lhs_values_bytes + offsets[1]); const auto lhs_nnz_idx = *reinterpret_cast<const index_t*>(ptr_lhs_select_idx_bytes + offsets[2]); const auto* RESTRICT ptr_rhs_values = reinterpret_cast<const scalar_t*>(ptr_rhs_values_bytes + offsets[3]); const auto rhs_nnz_idx = *reinterpret_cast<const index_t*>(ptr_rhs_select_idx_bytes + offsets[4]); *ptr_res_values = binary_op_t::apply( *(ptr_lhs_values + lhs_nnz_idx * lhs_nnz_stride), *(ptr_rhs_values + rhs_nnz_idx * rhs_nnz_stride)); }; launch_kernel<num_threads(), thread_work_size()>(iter.numel(), loop); } template <typename binary_op_t> struct CUDAValueSelectionIntersectionKernel { static Tensor apply( const Tensor& lhs_values, const Tensor& lhs_select_idx, const Tensor& rhs_values, const Tensor& rhs_select_idx) { auto iter = make_value_selection_intersection_iter( lhs_values, lhs_select_idx, rhs_values, rhs_select_idx); auto res_values = iter.tensor(0); // If res_values is empty, we can return it right away. // Otherwise floating point issues with OffsetCalculator. if (!res_values.numel()) { return res_values; } const auto lhs_nnz_stride = lhs_values.stride(0); const auto rhs_nnz_stride = rhs_values.stride(0); AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3( ScalarType::Bool, ScalarType::Half, ScalarType::BFloat16, res_values.scalar_type(), "binary_op_intersection_cpu", [&] { AT_DISPATCH_INDEX_TYPES(lhs_select_idx.scalar_type(), "binary_op_intersection_cpu", [&] { binary_op_intersection_kernel<binary_op_t, scalar_t, index_t>( iter, lhs_nnz_stride, rhs_nnz_stride); }); }); return res_values; } }; void mul_sparse_sparse_out_cuda_kernel( Tensor& result, const Tensor& x, const Tensor& y) { using CUDAValueSelectionMulKernel = CUDAValueSelectionIntersectionKernel<MulOp>; _sparse_binary_op_intersection_kernel_out<CUDAKernelLauncher, CUDAValueSelectionMulKernel>( result, x, y ); } } REGISTER_CUDA_DISPATCH(mul_sparse_sparse_out_stub, &mul_sparse_sparse_out_cuda_kernel); } // namespace at::native
3ced6f2ef7a53310d020c741a9d9ca1340a4122f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved. * * NVIDIA CORPORATION and its licensors retain all intellectual property * and proprietary rights in and to this software, related documentation * and any modifications thereto. Any use, reproduction, disclosure or * distribution of this software and related documentation without an express * license agreement from NVIDIA CORPORATION is strictly prohibited. * * See COPYRIGHT.txt for license information */ #include <stdio.h> #include "mpi.h" #include "nvshmem.h" #include "nvshmemx.h" #undef CUDA_CHECK #define CUDA_CHECK(stmt) \ do { \ hipError_t result = (stmt); \ if (hipSuccess != result) { \ fprintf(stderr, "[%s:%d] cuda failed with %s \n", __FILE__, __LINE__, \ hipGetErrorString(result)); \ exit(-1); \ } \ } while (0) #define MPI_CHECK(stmt) \ do { \ int result = (stmt); \ if (MPI_SUCCESS != result) { \ fprintf(stderr, "[%s:%d] MPI failed with error %d \n", __FILE__, __LINE__, result); \ exit(-1); \ } \ } while (0) __global__ void simple_shift(int *target, int mype, int npes) { int peer = (mype + 1) % npes; nvshmem_int_p(target, mype, peer); } int main(int c, char *v[]) { int *target; int rank, nranks; MPI_Comm mpi_comm; nvshmemx_init_attr_t attr; int mype, npes, mype_node; MPI_CHECK(MPI_Init(&c, &v)); MPI_CHECK(MPI_Comm_rank(MPI_COMM_WORLD, &rank)); MPI_CHECK(MPI_Comm_size(MPI_COMM_WORLD, &nranks)); mpi_comm = MPI_COMM_WORLD; attr.mpi_comm = &mpi_comm; nvshmemx_init_attr(NVSHMEMX_INIT_WITH_MPI_COMM, &attr); mype = nvshmem_my_pe(); npes = nvshmem_n_pes(); mype_node = nvshmem_team_my_pe(NVSHMEMX_TEAM_NODE); // application picks the device each PE will use CUDA_CHECK(hipSetDevice(mype_node)); target = (int *)nvshmem_malloc(sizeof(int)); hipLaunchKernelGGL(( simple_shift), dim3(1), dim3(1), 0, 0, target, mype, npes); CUDA_CHECK(hipDeviceSynchronize()); printf("[%d of %d] run complete \n", mype, npes); nvshmem_free(target); nvshmem_finalize(); MPI_CHECK(MPI_Finalize()); return 0; }
3ced6f2ef7a53310d020c741a9d9ca1340a4122f.cu
/* * Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved. * * NVIDIA CORPORATION and its licensors retain all intellectual property * and proprietary rights in and to this software, related documentation * and any modifications thereto. Any use, reproduction, disclosure or * distribution of this software and related documentation without an express * license agreement from NVIDIA CORPORATION is strictly prohibited. * * See COPYRIGHT.txt for license information */ #include <stdio.h> #include "mpi.h" #include "nvshmem.h" #include "nvshmemx.h" #undef CUDA_CHECK #define CUDA_CHECK(stmt) \ do { \ cudaError_t result = (stmt); \ if (cudaSuccess != result) { \ fprintf(stderr, "[%s:%d] cuda failed with %s \n", __FILE__, __LINE__, \ cudaGetErrorString(result)); \ exit(-1); \ } \ } while (0) #define MPI_CHECK(stmt) \ do { \ int result = (stmt); \ if (MPI_SUCCESS != result) { \ fprintf(stderr, "[%s:%d] MPI failed with error %d \n", __FILE__, __LINE__, result); \ exit(-1); \ } \ } while (0) __global__ void simple_shift(int *target, int mype, int npes) { int peer = (mype + 1) % npes; nvshmem_int_p(target, mype, peer); } int main(int c, char *v[]) { int *target; int rank, nranks; MPI_Comm mpi_comm; nvshmemx_init_attr_t attr; int mype, npes, mype_node; MPI_CHECK(MPI_Init(&c, &v)); MPI_CHECK(MPI_Comm_rank(MPI_COMM_WORLD, &rank)); MPI_CHECK(MPI_Comm_size(MPI_COMM_WORLD, &nranks)); mpi_comm = MPI_COMM_WORLD; attr.mpi_comm = &mpi_comm; nvshmemx_init_attr(NVSHMEMX_INIT_WITH_MPI_COMM, &attr); mype = nvshmem_my_pe(); npes = nvshmem_n_pes(); mype_node = nvshmem_team_my_pe(NVSHMEMX_TEAM_NODE); // application picks the device each PE will use CUDA_CHECK(cudaSetDevice(mype_node)); target = (int *)nvshmem_malloc(sizeof(int)); simple_shift<<<1, 1>>>(target, mype, npes); CUDA_CHECK(cudaDeviceSynchronize()); printf("[%d of %d] run complete \n", mype, npes); nvshmem_free(target); nvshmem_finalize(); MPI_CHECK(MPI_Finalize()); return 0; }
9a8f467ff3e96b989c27c0513b2609646808f635.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" // CUDA runtime // Utilities and system includes __constant__ double dev_kernel[KERNEL_LENGTH*KERNEL_LENGTH]; __global__ void runConvolutionGPU(double* image, double* result, int height, int width, int step) { int tx = threadIdx.x; int ty = threadIdx.y; //int O_TILE_WIDTH = blockDim.x-(lkernel/2)*2; //int O_TILE_HEIGHT = blockDim.y-(lkernel/2)*2; int row_o = threadIdx.y + blockIdx.y*O_TILE_HEIGHT; int col_o = threadIdx.x + blockIdx.x*O_TILE_WIDTH; int row_i = row_o - KERNEL_LENGTH/2; int col_i = col_o - KERNEL_LENGTH/2; __shared__ double N_ds[BLOCK_DIM_Y][BLOCK_DIM_X]; if((row_i >= 0) && (row_i < height) && (col_i >= 0) && (col_i < height)){ N_ds[ty][tx] = image[row_i*width+col_i]; }else{ N_ds[ty][tx] = 0.0f; } __syncthreads(); double output = 0.0f; if(tx%step ==0 && ty%step==0 && ty < O_TILE_HEIGHT && tx < O_TILE_WIDTH){ for(int i=0; i<KERNEL_LENGTH; i++){ for(int j=0; j<KERNEL_LENGTH; j++){ output += dev_kernel[i*KERNEL_LENGTH+j]*N_ds[(i+ty)][(j+tx)]; } } if(row_o < height && col_o < width){ result[(row_o/step)*width/step+col_o/step] = output/9.0; } } }
9a8f467ff3e96b989c27c0513b2609646808f635.cu
#include "includes.h" // CUDA runtime // Utilities and system includes __constant__ double dev_kernel[KERNEL_LENGTH*KERNEL_LENGTH]; __global__ void runConvolutionGPU(double* image, double* result, int height, int width, int step) { int tx = threadIdx.x; int ty = threadIdx.y; //int O_TILE_WIDTH = blockDim.x-(lkernel/2)*2; //int O_TILE_HEIGHT = blockDim.y-(lkernel/2)*2; int row_o = threadIdx.y + blockIdx.y*O_TILE_HEIGHT; int col_o = threadIdx.x + blockIdx.x*O_TILE_WIDTH; int row_i = row_o - KERNEL_LENGTH/2; int col_i = col_o - KERNEL_LENGTH/2; __shared__ double N_ds[BLOCK_DIM_Y][BLOCK_DIM_X]; if((row_i >= 0) && (row_i < height) && (col_i >= 0) && (col_i < height)){ N_ds[ty][tx] = image[row_i*width+col_i]; }else{ N_ds[ty][tx] = 0.0f; } __syncthreads(); double output = 0.0f; if(tx%step ==0 && ty%step==0 && ty < O_TILE_HEIGHT && tx < O_TILE_WIDTH){ for(int i=0; i<KERNEL_LENGTH; i++){ for(int j=0; j<KERNEL_LENGTH; j++){ output += dev_kernel[i*KERNEL_LENGTH+j]*N_ds[(i+ty)][(j+tx)]; } } if(row_o < height && col_o < width){ result[(row_o/step)*width/step+col_o/step] = output/9.0; } } }
a8ae5911aa2f15d35e2d2b8e18d08a86cf4d1236.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // auto-generated by op2.py // //user function __device__ void update_gpu( double *phim, double *res, const double *u, double *rms) { *phim -= *u; *res = 0.0; *rms += (*u) * (*u); } // CUDA kernel function __global__ void op_cuda_update( double *arg0, double *arg1, const double *__restrict arg2, double *arg3, int set_size ) { double arg3_l[1]; for ( int d=0; d<1; d++ ){ arg3_l[d]=ZERO_double; } //process set elements for ( int n=threadIdx.x+blockIdx.x*blockDim.x; n<set_size; n+=blockDim.x*gridDim.x ){ //user-supplied kernel call update_gpu(arg0+n*1, arg1+n*1, arg2+n*1, arg3_l); } //global reductions for ( int d=0; d<1; d++ ){ op_reduction<OP_INC>(&arg3[d+blockIdx.x*1],arg3_l[d]); } } //host stub function void op_par_loop_update(char const *name, op_set set, op_arg arg0, op_arg arg1, op_arg arg2, op_arg arg3){ double*arg3h = (double *)arg3.data; int nargs = 4; op_arg args[4]; args[0] = arg0; args[1] = arg1; args[2] = arg2; args[3] = arg3; // initialise timers double cpu_t1, cpu_t2, wall_t1, wall_t2; op_timing_realloc(8); op_timers_core(&cpu_t1, &wall_t1); OP_kernels[8].name = name; OP_kernels[8].count += 1; if (OP_diags>2) { printf(" kernel routine w/o indirection: update"); } int set_size = op_mpi_halo_exchanges_grouped(set, nargs, args, 2); if (set_size > 0) { //set CUDA execution parameters #ifdef OP_BLOCK_SIZE_8 int nthread = OP_BLOCK_SIZE_8; #else int nthread = OP_block_size; #endif int nblocks = 200; //transfer global reduction data to GPU int maxblocks = nblocks; int reduct_bytes = 0; int reduct_size = 0; reduct_bytes += ROUND_UP(maxblocks*1*sizeof(double)); reduct_size = MAX(reduct_size,sizeof(double)); reallocReductArrays(reduct_bytes); reduct_bytes = 0; arg3.data = OP_reduct_h + reduct_bytes; arg3.data_d = OP_reduct_d + reduct_bytes; for ( int b=0; b<maxblocks; b++ ){ for ( int d=0; d<1; d++ ){ ((double *)arg3.data)[d+b*1] = ZERO_double; } } reduct_bytes += ROUND_UP(maxblocks*1*sizeof(double)); mvReductArraysToDevice(reduct_bytes); int nshared = reduct_size*nthread; hipLaunchKernelGGL(( op_cuda_update), dim3(nblocks),dim3(nthread),nshared, 0, (double *) arg0.data_d, (double *) arg1.data_d, (double *) arg2.data_d, (double *) arg3.data_d, set->size ); //transfer global reduction data back to CPU mvReductArraysToHost(reduct_bytes); for ( int b=0; b<maxblocks; b++ ){ for ( int d=0; d<1; d++ ){ arg3h[d] = arg3h[d] + ((double *)arg3.data)[d+b*1]; } } arg3.data = (char *)arg3h; op_mpi_reduce(&arg3,arg3h); } op_mpi_set_dirtybit_cuda(nargs, args); cutilSafeCall(hipDeviceSynchronize()); //update kernel record op_timers_core(&cpu_t2, &wall_t2); OP_kernels[8].time += wall_t2 - wall_t1; OP_kernels[8].transfer += (float)set->size * arg0.size * 2.0f; OP_kernels[8].transfer += (float)set->size * arg1.size * 2.0f; OP_kernels[8].transfer += (float)set->size * arg2.size; }
a8ae5911aa2f15d35e2d2b8e18d08a86cf4d1236.cu
// // auto-generated by op2.py // //user function __device__ void update_gpu( double *phim, double *res, const double *u, double *rms) { *phim -= *u; *res = 0.0; *rms += (*u) * (*u); } // CUDA kernel function __global__ void op_cuda_update( double *arg0, double *arg1, const double *__restrict arg2, double *arg3, int set_size ) { double arg3_l[1]; for ( int d=0; d<1; d++ ){ arg3_l[d]=ZERO_double; } //process set elements for ( int n=threadIdx.x+blockIdx.x*blockDim.x; n<set_size; n+=blockDim.x*gridDim.x ){ //user-supplied kernel call update_gpu(arg0+n*1, arg1+n*1, arg2+n*1, arg3_l); } //global reductions for ( int d=0; d<1; d++ ){ op_reduction<OP_INC>(&arg3[d+blockIdx.x*1],arg3_l[d]); } } //host stub function void op_par_loop_update(char const *name, op_set set, op_arg arg0, op_arg arg1, op_arg arg2, op_arg arg3){ double*arg3h = (double *)arg3.data; int nargs = 4; op_arg args[4]; args[0] = arg0; args[1] = arg1; args[2] = arg2; args[3] = arg3; // initialise timers double cpu_t1, cpu_t2, wall_t1, wall_t2; op_timing_realloc(8); op_timers_core(&cpu_t1, &wall_t1); OP_kernels[8].name = name; OP_kernels[8].count += 1; if (OP_diags>2) { printf(" kernel routine w/o indirection: update"); } int set_size = op_mpi_halo_exchanges_grouped(set, nargs, args, 2); if (set_size > 0) { //set CUDA execution parameters #ifdef OP_BLOCK_SIZE_8 int nthread = OP_BLOCK_SIZE_8; #else int nthread = OP_block_size; #endif int nblocks = 200; //transfer global reduction data to GPU int maxblocks = nblocks; int reduct_bytes = 0; int reduct_size = 0; reduct_bytes += ROUND_UP(maxblocks*1*sizeof(double)); reduct_size = MAX(reduct_size,sizeof(double)); reallocReductArrays(reduct_bytes); reduct_bytes = 0; arg3.data = OP_reduct_h + reduct_bytes; arg3.data_d = OP_reduct_d + reduct_bytes; for ( int b=0; b<maxblocks; b++ ){ for ( int d=0; d<1; d++ ){ ((double *)arg3.data)[d+b*1] = ZERO_double; } } reduct_bytes += ROUND_UP(maxblocks*1*sizeof(double)); mvReductArraysToDevice(reduct_bytes); int nshared = reduct_size*nthread; op_cuda_update<<<nblocks,nthread,nshared>>>( (double *) arg0.data_d, (double *) arg1.data_d, (double *) arg2.data_d, (double *) arg3.data_d, set->size ); //transfer global reduction data back to CPU mvReductArraysToHost(reduct_bytes); for ( int b=0; b<maxblocks; b++ ){ for ( int d=0; d<1; d++ ){ arg3h[d] = arg3h[d] + ((double *)arg3.data)[d+b*1]; } } arg3.data = (char *)arg3h; op_mpi_reduce(&arg3,arg3h); } op_mpi_set_dirtybit_cuda(nargs, args); cutilSafeCall(cudaDeviceSynchronize()); //update kernel record op_timers_core(&cpu_t2, &wall_t2); OP_kernels[8].time += wall_t2 - wall_t1; OP_kernels[8].transfer += (float)set->size * arg0.size * 2.0f; OP_kernels[8].transfer += (float)set->size * arg1.size * 2.0f; OP_kernels[8].transfer += (float)set->size * arg2.size; }
608ecf636e225599d60f8921c7b8ca1532afb6ec.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <Eigen/Core> #include <Eigen/Eigenvalues> #include "HeterogeneousCore/CUDAUtilities/interface/cudaCheck.h" #include "PixelTrackReconstructionGPU.h" using namespace Eigen; __global__ void KernelFastFitAllHits(float *hits_and_covariances, int hits_in_fit, int cumulative_size, float B, Rfit::helix_fit *results, Rfit::Matrix3xNd<4> *hits, Eigen::Matrix<float,6,4> *hits_ge, Rfit::circle_fit *circle_fit, Vector4d *fast_fit, Rfit::line_fit *line_fit) { // Reshape Eigen components from hits_and_covariances, using proper thread and block indices // Perform the fit // Store the results in the proper vector, using again correct indices // Loop for hits_in_fit times: // first 3 are the points // the rest is the covariance matrix, 3x3 int start = (blockIdx.x * blockDim.x + threadIdx.x) * hits_in_fit * 12; int helix_start = (blockIdx.x * blockDim.x + threadIdx.x); if (start >= cumulative_size) { return; } #ifdef GPU_DEBUG printf("BlockDim.x: %d, BlockIdx.x: %d, threadIdx.x: %d, start: %d, cumulative_size: %d\n", blockDim.x, blockIdx.x, threadIdx.x, start, cumulative_size); #endif // Prepare data structure (stack) for (unsigned int i = 0; i < hits_in_fit; ++i) { hits[helix_start].col(i) << hits_and_covariances[start], hits_and_covariances[start + 1], hits_and_covariances[start + 2]; start += 3; hits_ge[helix_start].col(i) << hits_and_covariances[start], hits_and_covariances[start + 1], hits_and_covariances[start + 2], hits_and_covariances[start + 3], hits_and_covariances[start + 4], hits_and_covariances[start + 5]; start += 6; } Rfit::Fast_fit(hits[helix_start],fast_fit[helix_start]); } __global__ void KernelCircleFitAllHits(float *hits_and_covariances, int hits_in_fit, int cumulative_size, float B, Rfit::helix_fit *results, Rfit::Matrix3xNd<4> *hits, Eigen::Matrix<float,6,4> *hits_ge, Rfit::circle_fit *circle_fit, Vector4d *fast_fit, Rfit::line_fit *line_fit) { // Reshape Eigen components from hits_and_covariances, using proper thread and block indices // Perform the fit // Store the results in the proper vector, using again correct indices // Loop for hits_in_fit times: // first 3 are the points // the rest is the covariance matrix, 3x3 int start = (blockIdx.x * blockDim.x + threadIdx.x) * hits_in_fit * 12; int helix_start = (blockIdx.x * blockDim.x + threadIdx.x); if (start >= cumulative_size) { return; } #ifdef GPU_DEBUG printf("BlockDim.x: %d, BlockIdx.x: %d, threadIdx.x: %d, start: %d, " "cumulative_size: %d\n", blockDim.x, blockIdx.x, threadIdx.x, start, cumulative_size); #endif u_int n = hits[helix_start].cols(); constexpr uint32_t N = 4; Rfit::VectorNd<N> rad = (hits[helix_start].block(0, 0, 2, n).colwise().norm()); Rfit::Matrix2Nd<N> hits_cov = MatrixXd::Zero(2 * n, 2 * n); Rfit::loadCovariance2D(hits_ge[helix_start],hits_cov); circle_fit[helix_start] = Rfit::Circle_fit(hits[helix_start].block(0, 0, 2, n), hits_cov, fast_fit[helix_start], rad, B, true); #ifdef GPU_DEBUG printf("KernelCircleFitAllHits circle.par(0): %d %f\n", helix_start, circle_fit[helix_start].par(0)); printf("KernelCircleFitAllHits circle.par(1): %d %f\n", helix_start, circle_fit[helix_start].par(1)); printf("KernelCircleFitAllHits circle.par(2): %d %f\n", helix_start, circle_fit[helix_start].par(2)); #endif } __global__ void KernelLineFitAllHits(float *hits_and_covariances, int hits_in_fit, int cumulative_size, float B, Rfit::helix_fit *results, Rfit::Matrix3xNd<4> *hits, Eigen::Matrix<float,6,4> *hits_ge, Rfit::circle_fit *circle_fit, Vector4d *fast_fit, Rfit::line_fit *line_fit) { // Reshape Eigen components from hits_and_covariances, using proper thread and block indices // Perform the fit // Store the results in the proper vector, using again correct indices // Loop for hits_in_fit times: // first 3 are the points // the rest is the covariance matrix, 3x3 int start = (blockIdx.x * blockDim.x + threadIdx.x) * hits_in_fit * 12; int helix_start = (blockIdx.x * blockDim.x + threadIdx.x); if (start >= cumulative_size) { return; } #ifdef GPU_DEBUG printf("BlockDim.x: %d, BlockIdx.x: %d, threadIdx.x: %d, start: %d, " "cumulative_size: %d\n", blockDim.x, blockIdx.x, threadIdx.x, start, cumulative_size); #endif line_fit[helix_start] = Rfit::Line_fit(hits[helix_start], hits_ge[helix_start], circle_fit[helix_start], fast_fit[helix_start], B, true); par_uvrtopak(circle_fit[helix_start], B, true); // Grab helix_fit from the proper location in the output vector Rfit::helix_fit &helix = results[helix_start]; helix.par << circle_fit[helix_start].par, line_fit[helix_start].par; // TODO: pass properly error booleans helix.cov = MatrixXd::Zero(5, 5); helix.cov.block(0, 0, 3, 3) = circle_fit[helix_start].cov; helix.cov.block(3, 3, 2, 2) = line_fit[helix_start].cov; helix.q = circle_fit[helix_start].q; helix.chi2_circle = circle_fit[helix_start].chi2; helix.chi2_line = line_fit[helix_start].chi2; #ifdef GPU_DEBUG printf("KernelLineFitAllHits line.par(0): %d %f\n", helix_start, circle_fit[helix_start].par(0)); printf("KernelLineFitAllHits line.par(1): %d %f\n", helix_start, line_fit[helix_start].par(1)); #endif } void PixelTrackReconstructionGPU::launchKernelFit( float *hits_and_covariancesGPU, int cumulative_size, int hits_in_fit, float B, Rfit::helix_fit *results) { const dim3 threads_per_block(32, 1); int num_blocks = cumulative_size / (hits_in_fit * 12) / threads_per_block.x + 1; auto numberOfSeeds = cumulative_size / (hits_in_fit * 12); Rfit::Matrix3xNd<4> *hitsGPU; cudaCheck(hipMalloc(&hitsGPU, 48 * numberOfSeeds * sizeof(Rfit::Matrix3xNd<4>))); cudaCheck(hipMemset(hitsGPU, 0x00, 48 * numberOfSeeds * sizeof(Rfit::Matrix3xNd<4>))); Eigen::Matrix<float,6,4> *hits_geGPU = nullptr; cudaCheck(hipMalloc(&hits_geGPU, 48 * numberOfSeeds * sizeof(Eigen::Matrix<float,6,4>))); cudaCheck(hipMemset(hits_geGPU, 0x00, 48 * numberOfSeeds * sizeof(Eigen::Matrix<float,6,4>))); Vector4d *fast_fit_resultsGPU = nullptr; cudaCheck(hipMalloc(&fast_fit_resultsGPU, 48 * numberOfSeeds * sizeof(Vector4d))); cudaCheck(hipMemset(fast_fit_resultsGPU, 0x00, 48 * numberOfSeeds * sizeof(Vector4d))); Rfit::circle_fit *circle_fit_resultsGPU = nullptr; cudaCheck(hipMalloc(&circle_fit_resultsGPU, 48 * numberOfSeeds * sizeof(Rfit::circle_fit))); cudaCheck(hipMemset(circle_fit_resultsGPU, 0x00, 48 * numberOfSeeds * sizeof(Rfit::circle_fit))); Rfit::line_fit *line_fit_resultsGPU = nullptr; cudaCheck(hipMalloc(&line_fit_resultsGPU, numberOfSeeds * sizeof(Rfit::line_fit))); cudaCheck(hipMemset(line_fit_resultsGPU, 0x00, numberOfSeeds * sizeof(Rfit::line_fit))); hipLaunchKernelGGL(( KernelFastFitAllHits), dim3(num_blocks), dim3(threads_per_block), 0, 0, hits_and_covariancesGPU, hits_in_fit, cumulative_size, B, results, hitsGPU, hits_geGPU, circle_fit_resultsGPU, fast_fit_resultsGPU, line_fit_resultsGPU); cudaCheck(hipGetLastError()); hipLaunchKernelGGL(( KernelCircleFitAllHits), dim3(num_blocks), dim3(threads_per_block), 0, 0, hits_and_covariancesGPU, hits_in_fit, cumulative_size, B, results, hitsGPU, hits_geGPU, circle_fit_resultsGPU, fast_fit_resultsGPU, line_fit_resultsGPU); cudaCheck(hipGetLastError()); hipLaunchKernelGGL(( KernelLineFitAllHits), dim3(num_blocks), dim3(threads_per_block), 0, 0, hits_and_covariancesGPU, hits_in_fit, cumulative_size, B, results, hitsGPU, hits_geGPU, circle_fit_resultsGPU, fast_fit_resultsGPU, line_fit_resultsGPU); cudaCheck(hipGetLastError()); hipFree(hitsGPU); hipFree(hits_geGPU); hipFree(fast_fit_resultsGPU); hipFree(circle_fit_resultsGPU); hipFree(line_fit_resultsGPU); }
608ecf636e225599d60f8921c7b8ca1532afb6ec.cu
#include <Eigen/Core> #include <Eigen/Eigenvalues> #include "HeterogeneousCore/CUDAUtilities/interface/cudaCheck.h" #include "PixelTrackReconstructionGPU.h" using namespace Eigen; __global__ void KernelFastFitAllHits(float *hits_and_covariances, int hits_in_fit, int cumulative_size, float B, Rfit::helix_fit *results, Rfit::Matrix3xNd<4> *hits, Eigen::Matrix<float,6,4> *hits_ge, Rfit::circle_fit *circle_fit, Vector4d *fast_fit, Rfit::line_fit *line_fit) { // Reshape Eigen components from hits_and_covariances, using proper thread and block indices // Perform the fit // Store the results in the proper vector, using again correct indices // Loop for hits_in_fit times: // first 3 are the points // the rest is the covariance matrix, 3x3 int start = (blockIdx.x * blockDim.x + threadIdx.x) * hits_in_fit * 12; int helix_start = (blockIdx.x * blockDim.x + threadIdx.x); if (start >= cumulative_size) { return; } #ifdef GPU_DEBUG printf("BlockDim.x: %d, BlockIdx.x: %d, threadIdx.x: %d, start: %d, cumulative_size: %d\n", blockDim.x, blockIdx.x, threadIdx.x, start, cumulative_size); #endif // Prepare data structure (stack) for (unsigned int i = 0; i < hits_in_fit; ++i) { hits[helix_start].col(i) << hits_and_covariances[start], hits_and_covariances[start + 1], hits_and_covariances[start + 2]; start += 3; hits_ge[helix_start].col(i) << hits_and_covariances[start], hits_and_covariances[start + 1], hits_and_covariances[start + 2], hits_and_covariances[start + 3], hits_and_covariances[start + 4], hits_and_covariances[start + 5]; start += 6; } Rfit::Fast_fit(hits[helix_start],fast_fit[helix_start]); } __global__ void KernelCircleFitAllHits(float *hits_and_covariances, int hits_in_fit, int cumulative_size, float B, Rfit::helix_fit *results, Rfit::Matrix3xNd<4> *hits, Eigen::Matrix<float,6,4> *hits_ge, Rfit::circle_fit *circle_fit, Vector4d *fast_fit, Rfit::line_fit *line_fit) { // Reshape Eigen components from hits_and_covariances, using proper thread and block indices // Perform the fit // Store the results in the proper vector, using again correct indices // Loop for hits_in_fit times: // first 3 are the points // the rest is the covariance matrix, 3x3 int start = (blockIdx.x * blockDim.x + threadIdx.x) * hits_in_fit * 12; int helix_start = (blockIdx.x * blockDim.x + threadIdx.x); if (start >= cumulative_size) { return; } #ifdef GPU_DEBUG printf("BlockDim.x: %d, BlockIdx.x: %d, threadIdx.x: %d, start: %d, " "cumulative_size: %d\n", blockDim.x, blockIdx.x, threadIdx.x, start, cumulative_size); #endif u_int n = hits[helix_start].cols(); constexpr uint32_t N = 4; Rfit::VectorNd<N> rad = (hits[helix_start].block(0, 0, 2, n).colwise().norm()); Rfit::Matrix2Nd<N> hits_cov = MatrixXd::Zero(2 * n, 2 * n); Rfit::loadCovariance2D(hits_ge[helix_start],hits_cov); circle_fit[helix_start] = Rfit::Circle_fit(hits[helix_start].block(0, 0, 2, n), hits_cov, fast_fit[helix_start], rad, B, true); #ifdef GPU_DEBUG printf("KernelCircleFitAllHits circle.par(0): %d %f\n", helix_start, circle_fit[helix_start].par(0)); printf("KernelCircleFitAllHits circle.par(1): %d %f\n", helix_start, circle_fit[helix_start].par(1)); printf("KernelCircleFitAllHits circle.par(2): %d %f\n", helix_start, circle_fit[helix_start].par(2)); #endif } __global__ void KernelLineFitAllHits(float *hits_and_covariances, int hits_in_fit, int cumulative_size, float B, Rfit::helix_fit *results, Rfit::Matrix3xNd<4> *hits, Eigen::Matrix<float,6,4> *hits_ge, Rfit::circle_fit *circle_fit, Vector4d *fast_fit, Rfit::line_fit *line_fit) { // Reshape Eigen components from hits_and_covariances, using proper thread and block indices // Perform the fit // Store the results in the proper vector, using again correct indices // Loop for hits_in_fit times: // first 3 are the points // the rest is the covariance matrix, 3x3 int start = (blockIdx.x * blockDim.x + threadIdx.x) * hits_in_fit * 12; int helix_start = (blockIdx.x * blockDim.x + threadIdx.x); if (start >= cumulative_size) { return; } #ifdef GPU_DEBUG printf("BlockDim.x: %d, BlockIdx.x: %d, threadIdx.x: %d, start: %d, " "cumulative_size: %d\n", blockDim.x, blockIdx.x, threadIdx.x, start, cumulative_size); #endif line_fit[helix_start] = Rfit::Line_fit(hits[helix_start], hits_ge[helix_start], circle_fit[helix_start], fast_fit[helix_start], B, true); par_uvrtopak(circle_fit[helix_start], B, true); // Grab helix_fit from the proper location in the output vector Rfit::helix_fit &helix = results[helix_start]; helix.par << circle_fit[helix_start].par, line_fit[helix_start].par; // TODO: pass properly error booleans helix.cov = MatrixXd::Zero(5, 5); helix.cov.block(0, 0, 3, 3) = circle_fit[helix_start].cov; helix.cov.block(3, 3, 2, 2) = line_fit[helix_start].cov; helix.q = circle_fit[helix_start].q; helix.chi2_circle = circle_fit[helix_start].chi2; helix.chi2_line = line_fit[helix_start].chi2; #ifdef GPU_DEBUG printf("KernelLineFitAllHits line.par(0): %d %f\n", helix_start, circle_fit[helix_start].par(0)); printf("KernelLineFitAllHits line.par(1): %d %f\n", helix_start, line_fit[helix_start].par(1)); #endif } void PixelTrackReconstructionGPU::launchKernelFit( float *hits_and_covariancesGPU, int cumulative_size, int hits_in_fit, float B, Rfit::helix_fit *results) { const dim3 threads_per_block(32, 1); int num_blocks = cumulative_size / (hits_in_fit * 12) / threads_per_block.x + 1; auto numberOfSeeds = cumulative_size / (hits_in_fit * 12); Rfit::Matrix3xNd<4> *hitsGPU; cudaCheck(cudaMalloc(&hitsGPU, 48 * numberOfSeeds * sizeof(Rfit::Matrix3xNd<4>))); cudaCheck(cudaMemset(hitsGPU, 0x00, 48 * numberOfSeeds * sizeof(Rfit::Matrix3xNd<4>))); Eigen::Matrix<float,6,4> *hits_geGPU = nullptr; cudaCheck(cudaMalloc(&hits_geGPU, 48 * numberOfSeeds * sizeof(Eigen::Matrix<float,6,4>))); cudaCheck(cudaMemset(hits_geGPU, 0x00, 48 * numberOfSeeds * sizeof(Eigen::Matrix<float,6,4>))); Vector4d *fast_fit_resultsGPU = nullptr; cudaCheck(cudaMalloc(&fast_fit_resultsGPU, 48 * numberOfSeeds * sizeof(Vector4d))); cudaCheck(cudaMemset(fast_fit_resultsGPU, 0x00, 48 * numberOfSeeds * sizeof(Vector4d))); Rfit::circle_fit *circle_fit_resultsGPU = nullptr; cudaCheck(cudaMalloc(&circle_fit_resultsGPU, 48 * numberOfSeeds * sizeof(Rfit::circle_fit))); cudaCheck(cudaMemset(circle_fit_resultsGPU, 0x00, 48 * numberOfSeeds * sizeof(Rfit::circle_fit))); Rfit::line_fit *line_fit_resultsGPU = nullptr; cudaCheck(cudaMalloc(&line_fit_resultsGPU, numberOfSeeds * sizeof(Rfit::line_fit))); cudaCheck(cudaMemset(line_fit_resultsGPU, 0x00, numberOfSeeds * sizeof(Rfit::line_fit))); KernelFastFitAllHits<<<num_blocks, threads_per_block>>>( hits_and_covariancesGPU, hits_in_fit, cumulative_size, B, results, hitsGPU, hits_geGPU, circle_fit_resultsGPU, fast_fit_resultsGPU, line_fit_resultsGPU); cudaCheck(cudaGetLastError()); KernelCircleFitAllHits<<<num_blocks, threads_per_block>>>( hits_and_covariancesGPU, hits_in_fit, cumulative_size, B, results, hitsGPU, hits_geGPU, circle_fit_resultsGPU, fast_fit_resultsGPU, line_fit_resultsGPU); cudaCheck(cudaGetLastError()); KernelLineFitAllHits<<<num_blocks, threads_per_block>>>( hits_and_covariancesGPU, hits_in_fit, cumulative_size, B, results, hitsGPU, hits_geGPU, circle_fit_resultsGPU, fast_fit_resultsGPU, line_fit_resultsGPU); cudaCheck(cudaGetLastError()); cudaFree(hitsGPU); cudaFree(hits_geGPU); cudaFree(fast_fit_resultsGPU); cudaFree(circle_fit_resultsGPU); cudaFree(line_fit_resultsGPU); }
9d6b41e5ff83cb4b75fe98d9d170188d3130dc5c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * mcmc_kernel.cu * * Created on: 04-Feb-2009 * Author: alee */ #include "temper.ch" #include "rng.h" #include <stdio.h> #include "test_functions.h" __constant__ float args_p[NUM_AP]; // __global__ void FUNC( metropolis_rw, TYPE)( int size, float* d_array_init, float* d_array_step, float* d_array_uniform, float* d_array_out, int log) { const int tid = blockDim.x * blockIdx.x + threadIdx.x; const int nt = blockDim.x * gridDim.x; int j; float w, x, y, ratio; x = d_array_init[tid]; for (j = tid; j < size; j += nt) { w = d_array_step[j]; y = x + w; // Metropolis so q(y,x) = q(x,y) if (log) { ratio = expf(LOG_TARGET(y, args_p) - LOG_TARGET(x, args_p)); } else { ratio = TARGET(y, args_p) / TARGET(x, args_p); } if (d_array_uniform[j] < ratio) { x = y; } d_array_out[j] = x; } } void FUNC( metropolis_rw, TYPE)( int N, float* d_array_init, float sigma, float* d_array_out, float* h_args_p, int log, int nb, int nt) { hipMemcpyToSymbol(args_p, h_args_p, NUM_AP * sizeof(float)); float* d_array_uniform; hipMalloc((void **) &d_array_uniform, N * sizeof(float)); populate_rand_d(d_array_uniform, N); float* d_array_step; hipMalloc((void **) &d_array_step, N * sizeof(float)); populate_randn_d(d_array_step, N); if (sigma != 1.0) { multiply(N, d_array_step, d_array_step, sigma, nb, nt); } FUNC(metropolis_rw,hipLaunchKernelGGL(( TYPE)), dim3(nb),dim3(nt), 0, 0, N, d_array_init, d_array_step, d_array_uniform, d_array_out, log); hipFree(d_array_uniform); hipFree(d_array_step); } //__global__ void FUNC( metropolis_rw_step, TYPE)( //float* d_array_init, float* d_array_step, float* d_array_uniform, float* d_array_out) { // const int tid = blockDim.x * blockIdx.x + threadIdx.x; // float w, x, y, ratio; // // x = d_array_init[tid]; // w = d_array_step[tid]; // y = x + w; // // Metropolis so q(y,x) = q(x,y) // ratio = TARGET(y, args_p) / TARGET(x, args_p); // if (d_array_uniform[tid] < ratio) { // d_array_out[tid] = y; // } else { // d_array_out[tid] = x; // } // //} // //void FUNC( metropolis_rw_steps, TYPE)( //int N, float* d_array_init, float sigma, float* d_array_out, float* h_args_p, int nb, int nt) { // hipMemcpyToSymbol(args_p, h_args_p, NUM_AP * sizeof(float)); // int tt = nb * nt; // int numSteps = N / tt; // // float* d_array_uniform; // hipMalloc((void **) &d_array_uniform, N * sizeof(float)); // populate_rand_d(d_array_uniform, N); // // float* d_array_step; // hipMalloc((void **) &d_array_step, N * sizeof(float)); // populate_randn_d(d_array_step, N); // if (sigma != 1.0) { // multiply(N, d_array_step, d_array_step, sigma, nb, nt); // } // // for (int i = 0; i < numSteps; i++) { // FUNC(metropolis_rw_step,hipLaunchKernelGGL(( TYPE)), dim3(nb),dim3(nt), 0, 0, d_array_init, d_array_step, d_array_uniform, d_array_out); // hipDeviceSynchronize(); // d_array_init = d_array_out; // d_array_step += tt; // d_array_uniform += tt; // d_array_out += tt; // } // // hipFree(d_array_uniform); // hipFree(d_array_step); // //} __global__ void FUNC( metropolis_rwpop_step, TYPE)( float* d_array_init, float* d_array_step, float* d_array_uniform, float* d_temps, float* d_array_out, int log) { const int tid = blockDim.x * blockIdx.x + threadIdx.x; float w, x, y, t, ratio; t = d_temps[tid]; x = d_array_init[tid]; w = d_array_step[tid]; y = x + w; // Metropolis so q(y,x) = q(x,y) if (log) { ratio = expf(t * (LOG_TARGET(y, args_p) - LOG_TARGET(x, args_p))); } else { ratio = temper(TARGET(y, args_p), t) / temper(TARGET(x, args_p), t); } if (d_array_uniform[tid] < ratio) { d_array_out[tid] = y; } else { d_array_out[tid] = x; } } __global__ void FUNC( metropolis_rwpop_exchange, TYPE)( float* d_array_values, int type, float* d_temps, float* d_array_uniform, int log) { const int tid = blockDim.x * blockIdx.x + threadIdx.x; const int tt = blockDim.x * gridDim.x; if (tid % 2 == type && tid != tt - 1) { int otid = tid + 1; float x = d_array_values[tid]; float y = d_array_values[otid]; float t = d_temps[tid]; float t2 = d_temps[otid]; float ratio; if (log) { float ty = LOG_TARGET(y, args_p); float tx = LOG_TARGET(x, args_p); ratio = expf(ty * (t - t2) + tx * (t2 - t)); } else { float ty = TARGET(y, args_p); float tx = TARGET(x, args_p); ratio = temper(ty, t - 2) * temper(tx, t2 - t); // ratio = temper(TARGET(y, args_p), t) // / temper(TARGET(y, args_p), t2) // * temper(TARGET(x, args_p), t2) // / temper(TARGET(x, args_p), t); } if (d_array_uniform[tid] < ratio) { d_array_values[tid] = y; d_array_values[otid] = x; } } } void FUNC( metropolis_rwpop, TYPE)( int N, float* d_array_init, float sigma, float* h_args_p, float* d_temps, float* d_array_out, int log, int nb, int nt) { hipMemcpyToSymbol(args_p, h_args_p, NUM_AP * sizeof(float)); int tt = nb * nt; int numSteps = N / tt; int* array_types = (int*) malloc(numSteps * sizeof(int)); populate_randIK(array_types, numSteps, 2); float* d_array_step; hipMalloc((void **) &d_array_step, N * sizeof(float)); populate_randn_d(d_array_step, N); if (sigma != 1.0) { multiply(N, d_array_step, d_array_step, sigma, nb, nt); } float* d_array_uniform1; float* d_array_uniform2; hipMalloc((void **) &d_array_uniform1, N * sizeof(float)); hipMalloc((void **) &d_array_uniform2, N * sizeof(float)); populate_rand_d(d_array_uniform1, N); populate_rand_d(d_array_uniform2, N); float* du1_orig = d_array_uniform1; float* du2_orig = d_array_uniform2; float* ds_orig = d_array_step; for (int i = 0; i < numSteps; i++) { FUNC(metropolis_rwpop_step,hipLaunchKernelGGL(( TYPE)), dim3(nb),dim3(nt), 0, 0, d_array_init, d_array_step + i*tt, d_array_uniform1 + i*tt, d_temps, d_array_out + i*tt, log); hipDeviceSynchronize(); FUNC(metropolis_rwpop_exchange,hipLaunchKernelGGL(( TYPE)), dim3(nb),dim3(nt), 0, 0, d_array_out + i*tt, array_types[i], d_temps, d_array_uniform2 + i * tt, log); hipDeviceSynchronize(); hipMemcpy(d_array_init, d_array_out + i * tt, tt * sizeof(float), hipMemcpyDeviceToDevice); } hipFree(du1_orig); hipFree(du2_orig); hipFree(ds_orig); free(array_types); }
9d6b41e5ff83cb4b75fe98d9d170188d3130dc5c.cu
/* * mcmc_kernel.cu * * Created on: 04-Feb-2009 * Author: alee */ #include "temper.ch" #include "rng.h" #include <stdio.h> #include "test_functions.h" __constant__ float args_p[NUM_AP]; // __global__ void FUNC( metropolis_rw, TYPE)( int size, float* d_array_init, float* d_array_step, float* d_array_uniform, float* d_array_out, int log) { const int tid = blockDim.x * blockIdx.x + threadIdx.x; const int nt = blockDim.x * gridDim.x; int j; float w, x, y, ratio; x = d_array_init[tid]; for (j = tid; j < size; j += nt) { w = d_array_step[j]; y = x + w; // Metropolis so q(y,x) = q(x,y) if (log) { ratio = expf(LOG_TARGET(y, args_p) - LOG_TARGET(x, args_p)); } else { ratio = TARGET(y, args_p) / TARGET(x, args_p); } if (d_array_uniform[j] < ratio) { x = y; } d_array_out[j] = x; } } void FUNC( metropolis_rw, TYPE)( int N, float* d_array_init, float sigma, float* d_array_out, float* h_args_p, int log, int nb, int nt) { cudaMemcpyToSymbol(args_p, h_args_p, NUM_AP * sizeof(float)); float* d_array_uniform; cudaMalloc((void **) &d_array_uniform, N * sizeof(float)); populate_rand_d(d_array_uniform, N); float* d_array_step; cudaMalloc((void **) &d_array_step, N * sizeof(float)); populate_randn_d(d_array_step, N); if (sigma != 1.0) { multiply(N, d_array_step, d_array_step, sigma, nb, nt); } FUNC(metropolis_rw, TYPE)<<<nb,nt>>>(N, d_array_init, d_array_step, d_array_uniform, d_array_out, log); cudaFree(d_array_uniform); cudaFree(d_array_step); } //__global__ void FUNC( metropolis_rw_step, TYPE)( //float* d_array_init, float* d_array_step, float* d_array_uniform, float* d_array_out) { // const int tid = blockDim.x * blockIdx.x + threadIdx.x; // float w, x, y, ratio; // // x = d_array_init[tid]; // w = d_array_step[tid]; // y = x + w; // // Metropolis so q(y,x) = q(x,y) // ratio = TARGET(y, args_p) / TARGET(x, args_p); // if (d_array_uniform[tid] < ratio) { // d_array_out[tid] = y; // } else { // d_array_out[tid] = x; // } // //} // //void FUNC( metropolis_rw_steps, TYPE)( //int N, float* d_array_init, float sigma, float* d_array_out, float* h_args_p, int nb, int nt) { // cudaMemcpyToSymbol(args_p, h_args_p, NUM_AP * sizeof(float)); // int tt = nb * nt; // int numSteps = N / tt; // // float* d_array_uniform; // cudaMalloc((void **) &d_array_uniform, N * sizeof(float)); // populate_rand_d(d_array_uniform, N); // // float* d_array_step; // cudaMalloc((void **) &d_array_step, N * sizeof(float)); // populate_randn_d(d_array_step, N); // if (sigma != 1.0) { // multiply(N, d_array_step, d_array_step, sigma, nb, nt); // } // // for (int i = 0; i < numSteps; i++) { // FUNC(metropolis_rw_step, TYPE)<<<nb,nt>>>(d_array_init, d_array_step, d_array_uniform, d_array_out); // cudaThreadSynchronize(); // d_array_init = d_array_out; // d_array_step += tt; // d_array_uniform += tt; // d_array_out += tt; // } // // cudaFree(d_array_uniform); // cudaFree(d_array_step); // //} __global__ void FUNC( metropolis_rwpop_step, TYPE)( float* d_array_init, float* d_array_step, float* d_array_uniform, float* d_temps, float* d_array_out, int log) { const int tid = blockDim.x * blockIdx.x + threadIdx.x; float w, x, y, t, ratio; t = d_temps[tid]; x = d_array_init[tid]; w = d_array_step[tid]; y = x + w; // Metropolis so q(y,x) = q(x,y) if (log) { ratio = expf(t * (LOG_TARGET(y, args_p) - LOG_TARGET(x, args_p))); } else { ratio = temper(TARGET(y, args_p), t) / temper(TARGET(x, args_p), t); } if (d_array_uniform[tid] < ratio) { d_array_out[tid] = y; } else { d_array_out[tid] = x; } } __global__ void FUNC( metropolis_rwpop_exchange, TYPE)( float* d_array_values, int type, float* d_temps, float* d_array_uniform, int log) { const int tid = blockDim.x * blockIdx.x + threadIdx.x; const int tt = blockDim.x * gridDim.x; if (tid % 2 == type && tid != tt - 1) { int otid = tid + 1; float x = d_array_values[tid]; float y = d_array_values[otid]; float t = d_temps[tid]; float t2 = d_temps[otid]; float ratio; if (log) { float ty = LOG_TARGET(y, args_p); float tx = LOG_TARGET(x, args_p); ratio = expf(ty * (t - t2) + tx * (t2 - t)); } else { float ty = TARGET(y, args_p); float tx = TARGET(x, args_p); ratio = temper(ty, t - 2) * temper(tx, t2 - t); // ratio = temper(TARGET(y, args_p), t) // / temper(TARGET(y, args_p), t2) // * temper(TARGET(x, args_p), t2) // / temper(TARGET(x, args_p), t); } if (d_array_uniform[tid] < ratio) { d_array_values[tid] = y; d_array_values[otid] = x; } } } void FUNC( metropolis_rwpop, TYPE)( int N, float* d_array_init, float sigma, float* h_args_p, float* d_temps, float* d_array_out, int log, int nb, int nt) { cudaMemcpyToSymbol(args_p, h_args_p, NUM_AP * sizeof(float)); int tt = nb * nt; int numSteps = N / tt; int* array_types = (int*) malloc(numSteps * sizeof(int)); populate_randIK(array_types, numSteps, 2); float* d_array_step; cudaMalloc((void **) &d_array_step, N * sizeof(float)); populate_randn_d(d_array_step, N); if (sigma != 1.0) { multiply(N, d_array_step, d_array_step, sigma, nb, nt); } float* d_array_uniform1; float* d_array_uniform2; cudaMalloc((void **) &d_array_uniform1, N * sizeof(float)); cudaMalloc((void **) &d_array_uniform2, N * sizeof(float)); populate_rand_d(d_array_uniform1, N); populate_rand_d(d_array_uniform2, N); float* du1_orig = d_array_uniform1; float* du2_orig = d_array_uniform2; float* ds_orig = d_array_step; for (int i = 0; i < numSteps; i++) { FUNC(metropolis_rwpop_step, TYPE)<<<nb,nt>>>(d_array_init, d_array_step + i*tt, d_array_uniform1 + i*tt, d_temps, d_array_out + i*tt, log); cudaThreadSynchronize(); FUNC(metropolis_rwpop_exchange, TYPE)<<<nb,nt>>>(d_array_out + i*tt, array_types[i], d_temps, d_array_uniform2 + i * tt, log); cudaThreadSynchronize(); cudaMemcpy(d_array_init, d_array_out + i * tt, tt * sizeof(float), cudaMemcpyDeviceToDevice); } cudaFree(du1_orig); cudaFree(du2_orig); cudaFree(ds_orig); free(array_types); }
9f2e5090e86cd3f94bb24c13ad10653e1d944e85.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "pointwise_hist2_one_byte_templ.cuh" #include "split_properties_helpers.cuh" #include "compute_point_hist2_loop.cuh" #include <hip/hip_cooperative_groups.h> #include <catboost/cuda/cuda_lib/kernel/arch.cuh> #include <catboost/cuda/cuda_util/kernel/instructions.cuh> #include <catboost/cuda/cuda_util/kernel/kernel_helpers.cuh> #include <catboost/cuda/cuda_lib/kernel/arch.cuh> using namespace cooperative_groups; namespace NKernel { template <> struct TLoadEntriesTrait<0, false> { constexpr static ELoadType LoadType() { #if __CUDA_ARCH__ < 700 return ELoadType::OneElement; #else return ELoadType::FourElements; #endif } }; template <> struct TLoadEntriesTrait<0, true> { constexpr static ELoadType LoadType() { #if __CUDA_ARCH__ < 520 return ELoadType::OneElement; #elif __CUDA_ARCH__ < 700 return ELoadType::TwoElements; #else return ELoadType::FourElements; #endif } }; template <> struct TUnrollsTrait<0, ELoadType::FourElements> { constexpr static int Outer() { return 1; } }; template <> struct TDeclarePassInnerOuterBitsTrait<0> { constexpr static int Inner() { return 0; } constexpr static int Outer() { return 0; } }; template<int BLOCK_SIZE> struct TPointHist<0, 0, BLOCK_SIZE> { float* __restrict__ Buffer; __forceinline__ __device__ int SliceOffset() { const int warpId = (threadIdx.x / 32); const int warpOffset = 1024 * warpId; const int blocks = 4; const int innerHistStart = (threadIdx.x & ((blocks - 1) << 3)); return warpOffset + innerHistStart; } __forceinline__ __device__ TPointHist(float* buff) { const int HIST_SIZE = 32 * BLOCK_SIZE; #pragma unroll 8 for (int i = threadIdx.x; i < HIST_SIZE; i += BLOCK_SIZE) { buff[i] = 0; } Buffer = buff + SliceOffset(); __syncthreads(); } __forceinline__ __device__ void Add(float val, float* dst) { dst[0] += val; } __forceinline__ __device__ void AddPoint(ui32 ci, const float t, const float w) { thread_block_tile<8> syncTile = tiled_partition<8>(this_thread_block()); const bool flag = threadIdx.x & 1; const float stat1 = flag ? t : w; const float stat2 = flag ? w : t; #pragma unroll for (int i = 0; i < 4; i++) { const int f = ((2 * i + threadIdx.x) & 6); const int bin = (ci >> (24 - (f << 2))) & 255; const bool pass = bin != 32; int offset = f + 32 * (bin & 31); const int offset1 = offset + flag; const float add1 = pass ? stat1 : 0.0f; const int offset2 = offset + !flag; const float add2 = pass ? stat2 : 0.0f; syncTile.sync(); Buffer[offset1] += add1; syncTile.sync(); Buffer[offset2] += add2; } } __forceinline__ __device__ void AddPoint2(uint2 ci, const float2 t, const float2 w) { thread_block_tile<8> syncTile = tiled_partition<8>(this_thread_block()); const bool flag = threadIdx.x & 1; const float2 stat1 = flag ? t : w; const float2 stat2 = flag ? w : t; #pragma unroll for (int i = 0; i < 4; i++) { int f = ((2 * i + threadIdx.x) & 6); const int bin1 = (ci.x >> (24 - (f << 2))) & 255; const int bin2 = (ci.y >> (24 - (f << 2))) & 255; const float passx = bin1 != 32 ? 1.0f : 0.0f; const float passy = bin2 != 32 ? 1.0f : 0.0f; int offsetx = f + 32 * (bin1 & 31) + flag; int offsety = f + 32 * (bin2 & 31) + flag; syncTile.sync(); Buffer[offsetx] += passx * stat1.x; Buffer[offsety] += passy * stat1.y; offsetx += flag ? -1 : 1; offsety += flag ? -1 : 1; syncTile.sync(); Buffer[offsetx] += passx * stat2.x; Buffer[offsety] += passy * stat2.y; } } __forceinline__ __device__ void AddPoint4(uint4 ci, const float4 t, const float4 w) { //don't change anything without performance tests, nvcc is so awesome, that little change of code could slow everything by 5-10% thread_block_tile<8> syncTile = tiled_partition<8>(this_thread_block()); const bool flag = threadIdx.x & 1; const float4 stat1 = flag ? t : w; const float4 stat2 = flag ? w : t; #pragma unroll for (int i = 0; i < 4; i++) { int f = ((2 * i + threadIdx.x) & 6); const ui32 shift = static_cast<ui32>(24 - (f << 2)); f += flag; const int binx = (ci.x >> shift) & 255; const int biny = (ci.y >> shift) & 255; const int binz = (ci.z >> shift) & 255; const int binw = (ci.w >> shift) & 255; const float passx = binx != 32 ? 1.0f : 0.0f; const float passy = biny != 32 ? 1.0f : 0.0f; const float passz = binz != 32 ? 1.0f : 0.0f; const float passw = binw != 32 ? 1.0f : 0.0f; float* buffer = Buffer + f; int offsetx = (binx & 31) << 5; int offsety = (biny & 31) << 5; int offsetz = (binz & 31) << 5; int offsetw = (binw & 31) << 5; syncTile.sync(); buffer[offsetx] += passx * stat1.x; buffer[offsety] += passy * stat1.y; buffer[offsetz] += passz * stat1.z; buffer[offsetw] += passw * stat1.w; offsetx += flag ? -1 : 1; offsety += flag ? -1 : 1; offsetz += flag ? -1 : 1; offsetw += flag ? -1 : 1; syncTile.sync(); buffer[offsetx] += passx * stat2.x; buffer[offsety] += passy * stat2.y; buffer[offsetz] += passz * stat2.z; buffer[offsetw] += passw * stat2.w; } } //After reduce we store histograms by blocks: 256 floats (4 x 2 x 32) // for first 32 bins; than 256 floats for second 32 bins, etc __forceinline__ __device__ void Reduce() { Buffer -= SliceOffset(); __syncthreads(); { const int warpHistSize = 1024; for (int start = threadIdx.x; start < warpHistSize; start += BLOCK_SIZE) { float sum = 0; // 12 iterations at 32-bin #pragma unroll 12 for (int i = start; i < 32 * BLOCK_SIZE; i += warpHistSize) { sum += Buffer[i]; } Buffer[warpHistSize + start] = sum; } } __syncthreads(); if (threadIdx.x < 256) { const int w = threadIdx.x & 1; const int f = threadIdx.x / 64; float sum = 0.0f; const int fold = (threadIdx.x >> 1) & 31; const int maxFoldCount = 32; if (fold < maxFoldCount) { const int innerHistCount = 4; const volatile float* __restrict__ src = Buffer + 1024 //warpHistSize + 32 * fold + w; #pragma unroll for (int inWarpHist = 0; inWarpHist < innerHistCount; ++inWarpHist) { sum += src[2 * f + (inWarpHist << 3)]; } Buffer[2 * (maxFoldCount * f + fold) + w] = sum; } } __syncthreads(); } }; DEFINE_NON_BINARY(5) }
9f2e5090e86cd3f94bb24c13ad10653e1d944e85.cu
#include "pointwise_hist2_one_byte_templ.cuh" #include "split_properties_helpers.cuh" #include "compute_point_hist2_loop.cuh" #include <cooperative_groups.h> #include <catboost/cuda/cuda_lib/kernel/arch.cuh> #include <catboost/cuda/cuda_util/kernel/instructions.cuh> #include <catboost/cuda/cuda_util/kernel/kernel_helpers.cuh> #include <catboost/cuda/cuda_lib/kernel/arch.cuh> using namespace cooperative_groups; namespace NKernel { template <> struct TLoadEntriesTrait<0, false> { constexpr static ELoadType LoadType() { #if __CUDA_ARCH__ < 700 return ELoadType::OneElement; #else return ELoadType::FourElements; #endif } }; template <> struct TLoadEntriesTrait<0, true> { constexpr static ELoadType LoadType() { #if __CUDA_ARCH__ < 520 return ELoadType::OneElement; #elif __CUDA_ARCH__ < 700 return ELoadType::TwoElements; #else return ELoadType::FourElements; #endif } }; template <> struct TUnrollsTrait<0, ELoadType::FourElements> { constexpr static int Outer() { return 1; } }; template <> struct TDeclarePassInnerOuterBitsTrait<0> { constexpr static int Inner() { return 0; } constexpr static int Outer() { return 0; } }; template<int BLOCK_SIZE> struct TPointHist<0, 0, BLOCK_SIZE> { float* __restrict__ Buffer; __forceinline__ __device__ int SliceOffset() { const int warpId = (threadIdx.x / 32); const int warpOffset = 1024 * warpId; const int blocks = 4; const int innerHistStart = (threadIdx.x & ((blocks - 1) << 3)); return warpOffset + innerHistStart; } __forceinline__ __device__ TPointHist(float* buff) { const int HIST_SIZE = 32 * BLOCK_SIZE; #pragma unroll 8 for (int i = threadIdx.x; i < HIST_SIZE; i += BLOCK_SIZE) { buff[i] = 0; } Buffer = buff + SliceOffset(); __syncthreads(); } __forceinline__ __device__ void Add(float val, float* dst) { dst[0] += val; } __forceinline__ __device__ void AddPoint(ui32 ci, const float t, const float w) { thread_block_tile<8> syncTile = tiled_partition<8>(this_thread_block()); const bool flag = threadIdx.x & 1; const float stat1 = flag ? t : w; const float stat2 = flag ? w : t; #pragma unroll for (int i = 0; i < 4; i++) { const int f = ((2 * i + threadIdx.x) & 6); const int bin = (ci >> (24 - (f << 2))) & 255; const bool pass = bin != 32; int offset = f + 32 * (bin & 31); const int offset1 = offset + flag; const float add1 = pass ? stat1 : 0.0f; const int offset2 = offset + !flag; const float add2 = pass ? stat2 : 0.0f; syncTile.sync(); Buffer[offset1] += add1; syncTile.sync(); Buffer[offset2] += add2; } } __forceinline__ __device__ void AddPoint2(uint2 ci, const float2 t, const float2 w) { thread_block_tile<8> syncTile = tiled_partition<8>(this_thread_block()); const bool flag = threadIdx.x & 1; const float2 stat1 = flag ? t : w; const float2 stat2 = flag ? w : t; #pragma unroll for (int i = 0; i < 4; i++) { int f = ((2 * i + threadIdx.x) & 6); const int bin1 = (ci.x >> (24 - (f << 2))) & 255; const int bin2 = (ci.y >> (24 - (f << 2))) & 255; const float passx = bin1 != 32 ? 1.0f : 0.0f; const float passy = bin2 != 32 ? 1.0f : 0.0f; int offsetx = f + 32 * (bin1 & 31) + flag; int offsety = f + 32 * (bin2 & 31) + flag; syncTile.sync(); Buffer[offsetx] += passx * stat1.x; Buffer[offsety] += passy * stat1.y; offsetx += flag ? -1 : 1; offsety += flag ? -1 : 1; syncTile.sync(); Buffer[offsetx] += passx * stat2.x; Buffer[offsety] += passy * stat2.y; } } __forceinline__ __device__ void AddPoint4(uint4 ci, const float4 t, const float4 w) { //don't change anything without performance tests, nvcc is so awesome, that little change of code could slow everything by 5-10% thread_block_tile<8> syncTile = tiled_partition<8>(this_thread_block()); const bool flag = threadIdx.x & 1; const float4 stat1 = flag ? t : w; const float4 stat2 = flag ? w : t; #pragma unroll for (int i = 0; i < 4; i++) { int f = ((2 * i + threadIdx.x) & 6); const ui32 shift = static_cast<ui32>(24 - (f << 2)); f += flag; const int binx = (ci.x >> shift) & 255; const int biny = (ci.y >> shift) & 255; const int binz = (ci.z >> shift) & 255; const int binw = (ci.w >> shift) & 255; const float passx = binx != 32 ? 1.0f : 0.0f; const float passy = biny != 32 ? 1.0f : 0.0f; const float passz = binz != 32 ? 1.0f : 0.0f; const float passw = binw != 32 ? 1.0f : 0.0f; float* buffer = Buffer + f; int offsetx = (binx & 31) << 5; int offsety = (biny & 31) << 5; int offsetz = (binz & 31) << 5; int offsetw = (binw & 31) << 5; syncTile.sync(); buffer[offsetx] += passx * stat1.x; buffer[offsety] += passy * stat1.y; buffer[offsetz] += passz * stat1.z; buffer[offsetw] += passw * stat1.w; offsetx += flag ? -1 : 1; offsety += flag ? -1 : 1; offsetz += flag ? -1 : 1; offsetw += flag ? -1 : 1; syncTile.sync(); buffer[offsetx] += passx * stat2.x; buffer[offsety] += passy * stat2.y; buffer[offsetz] += passz * stat2.z; buffer[offsetw] += passw * stat2.w; } } //After reduce we store histograms by blocks: 256 floats (4 x 2 x 32) // for first 32 bins; than 256 floats for second 32 bins, etc __forceinline__ __device__ void Reduce() { Buffer -= SliceOffset(); __syncthreads(); { const int warpHistSize = 1024; for (int start = threadIdx.x; start < warpHistSize; start += BLOCK_SIZE) { float sum = 0; // 12 iterations at 32-bin #pragma unroll 12 for (int i = start; i < 32 * BLOCK_SIZE; i += warpHistSize) { sum += Buffer[i]; } Buffer[warpHistSize + start] = sum; } } __syncthreads(); if (threadIdx.x < 256) { const int w = threadIdx.x & 1; const int f = threadIdx.x / 64; float sum = 0.0f; const int fold = (threadIdx.x >> 1) & 31; const int maxFoldCount = 32; if (fold < maxFoldCount) { const int innerHistCount = 4; const volatile float* __restrict__ src = Buffer + 1024 //warpHistSize + 32 * fold + w; #pragma unroll for (int inWarpHist = 0; inWarpHist < innerHistCount; ++inWarpHist) { sum += src[2 * f + (inWarpHist << 3)]; } Buffer[2 * (maxFoldCount * f + fold) + w] = sum; } } __syncthreads(); } }; DEFINE_NON_BINARY(5) }
7ba4f61c66c928aa9ea5675ed228b5823ae6eb17.hip
// !!! This is a file automatically generated by hipify!!! //bondsEngine.cu //Scott Grauer-Gray [email protected] //Contains main function for running bonds application on a GPU #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #include <hip/hip_runtime.h> #include "bondsStructs.h" #include "bondsKernelsGpu.hip" #include "bondsKernelsCpu.cu" #define MIN(a, b) (((a) < (b)) ? (a) : (b)) #define MAX(a, b) (((a) > (b)) ? (a) : (b)) int monthLengthCpu(int month, bool leapYear) { int MonthLength[] = { 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31 }; int MonthLeapLength[] = { 31, 29, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31 }; return (leapYear? MonthLeapLength[month-1] : MonthLength[month-1]); } int monthOffsetCpu(int m, bool leapYear) { int MonthOffset[] = { 0, 31, 59, 90, 120, 151, // Jan - Jun 181, 212, 243, 273, 304, 334, // Jun - Dec 365 // used in dayOfMonth to bracket day }; int MonthLeapOffset[] = { 0, 31, 60, 91, 121, 152, // Jan - Jun 182, 213, 244, 274, 305, 335, // Jun - Dec 366 // used in dayOfMonth to bracket day }; return (leapYear? MonthLeapOffset[m-1] : MonthOffset[m-1]); } int yearOffsetCpu(int y) { // the list of all December 31st in the preceding year // e.g. for 1901 yearOffset[1] is 366, that is, December 31 1900 int YearOffset[] = { // 1900-1909 0, 366, 731, 1096, 1461, 1827, 2192, 2557, 2922, 3288, // 1910-1919 3653, 4018, 4383, 4749, 5114, 5479, 5844, 6210, 6575, 6940, // 1920-1929 7305, 7671, 8036, 8401, 8766, 9132, 9497, 9862,10227,10593, // 1930-1939 10958,11323,11688,12054,12419,12784,13149,13515,13880,14245, // 1940-1949 14610,14976,15341,15706,16071,16437,16802,17167,17532,17898, // 1950-1959 18263,18628,18993,19359,19724,20089,20454,20820,21185,21550, // 1960-1969 21915,22281,22646,23011,23376,23742,24107,24472,24837,25203, // 1970-1979 25568,25933,26298,26664,27029,27394,27759,28125,28490,28855, // 1980-1989 29220,29586,29951,30316,30681,31047,31412,31777,32142,32508, // 1990-1999 32873,33238,33603,33969,34334,34699,35064,35430,35795,36160, // 2000-2009 36525,36891,37256,37621,37986,38352,38717,39082,39447,39813, // 2010-2019 40178,40543,40908,41274,41639,42004,42369,42735,43100,43465, // 2020-2029 43830,44196,44561,44926,45291,45657,46022,46387,46752,47118, // 2030-2039 47483,47848,48213,48579,48944,49309,49674,50040,50405,50770, // 2040-2049 51135,51501,51866,52231,52596,52962,53327,53692,54057,54423, // 2050-2059 54788,55153,55518,55884,56249,56614,56979,57345,57710,58075, // 2060-2069 58440,58806,59171,59536,59901,60267,60632,60997,61362,61728, // 2070-2079 62093,62458,62823,63189,63554,63919,64284,64650,65015,65380, // 2080-2089 65745,66111,66476,66841,67206,67572,67937,68302,68667,69033, // 2090-2099 69398,69763,70128,70494,70859,71224,71589,71955,72320,72685, // 2100-2109 73050,73415,73780,74145,74510,74876,75241,75606,75971,76337, // 2110-2119 76702,77067,77432,77798,78163,78528,78893,79259,79624,79989, // 2120-2129 80354,80720,81085,81450,81815,82181,82546,82911,83276,83642, // 2130-2139 84007,84372,84737,85103,85468,85833,86198,86564,86929,87294, // 2140-2149 87659,88025,88390,88755,89120,89486,89851,90216,90581,90947, // 2150-2159 91312,91677,92042,92408,92773,93138,93503,93869,94234,94599, // 2160-2169 94964,95330,95695,96060,96425,96791,97156,97521,97886,98252, // 2170-2179 98617,98982,99347,99713,100078,100443,100808,101174,101539,101904, // 2180-2189 102269,102635,103000,103365,103730,104096,104461,104826,105191,105557, // 2190-2199 105922,106287,106652,107018,107383,107748,108113,108479,108844,109209, // 2200 109574 }; return YearOffset[y-1900]; } bool isLeapCpu(int y) { bool YearIsLeap[] = { // 1900 is leap in agreement with Excel's bug // 1900 is out of valid date range anyway // 1900-1909 true,false,false,false, true,false,false,false, true,false, // 1910-1919 false,false, true,false,false,false, true,false,false,false, // 1920-1929 true,false,false,false, true,false,false,false, true,false, // 1930-1939 false,false, true,false,false,false, true,false,false,false, // 1940-1949 true,false,false,false, true,false,false,false, true,false, // 1950-1959 false,false, true,false,false,false, true,false,false,false, // 1960-1969 true,false,false,false, true,false,false,false, true,false, // 1970-1979 false,false, true,false,false,false, true,false,false,false, // 1980-1989 true,false,false,false, true,false,false,false, true,false, // 1990-1999 false,false, true,false,false,false, true,false,false,false, // 2000-2009 true,false,false,false, true,false,false,false, true,false, // 2010-2019 false,false, true,false,false,false, true,false,false,false, // 2020-2029 true,false,false,false, true,false,false,false, true,false, // 2030-2039 false,false, true,false,false,false, true,false,false,false, // 2040-2049 true,false,false,false, true,false,false,false, true,false, // 2050-2059 false,false, true,false,false,false, true,false,false,false, // 2060-2069 true,false,false,false, true,false,false,false, true,false, // 2070-2079 false,false, true,false,false,false, true,false,false,false, // 2080-2089 true,false,false,false, true,false,false,false, true,false, // 2090-2099 false,false, true,false,false,false, true,false,false,false, // 2100-2109 false,false,false,false, true,false,false,false, true,false, // 2110-2119 false,false, true,false,false,false, true,false,false,false, // 2120-2129 true,false,false,false, true,false,false,false, true,false, // 2130-2139 false,false, true,false,false,false, true,false,false,false, // 2140-2149 true,false,false,false, true,false,false,false, true,false, // 2150-2159 false,false, true,false,false,false, true,false,false,false, // 2160-2169 true,false,false,false, true,false,false,false, true,false, // 2170-2179 false,false, true,false,false,false, true,false,false,false, // 2180-2189 true,false,false,false, true,false,false,false, true,false, // 2190-2199 false,false, true,false,false,false, true,false,false,false, // 2200 false }; return YearIsLeap[y-1900]; } bondsDateStruct intializeDateCpu(int d, int m, int y) { bondsDateStruct currDate; currDate.day = d; currDate.month = m; currDate.year = y; bool leap = isLeapCpu(y); int offset = monthOffsetCpu(m,leap); currDate.dateSerialNum = d + offset + yearOffsetCpu(y); return currDate; } void runBoundsEngine() { //can run multiple times with different number of bonds by uncommenting these lines int nBondsArray[] = {1000000}; for (int numTime=0; numTime < 1; numTime++) { int numBonds = nBondsArray[numTime]; printf("\nNumber of Bonds: %d\n\n", numBonds); inArgsStruct inArgsHost; inArgsHost.discountCurve = (bondsYieldTermStruct*)malloc(numBonds*sizeof(bondsYieldTermStruct)); inArgsHost.repoCurve = (bondsYieldTermStruct*)malloc(numBonds*sizeof(bondsYieldTermStruct)); inArgsHost.currDate = (bondsDateStruct*)malloc(numBonds*sizeof(bondsDateStruct)); inArgsHost.maturityDate = (bondsDateStruct*)malloc(numBonds*sizeof(bondsDateStruct)); inArgsHost.bondCleanPrice = (dataType*)malloc(numBonds*sizeof(dataType)); inArgsHost.bond = (bondStruct*)malloc(numBonds*sizeof(bondStruct)); inArgsHost.dummyStrike = (dataType*)malloc(numBonds*sizeof(dataType)); srand (123); int numBond; for (numBond = 0; numBond < numBonds; numBond++) { dataType repoRate = 0.07; //int repoSettlementDays = 0; int repoCompounding = SIMPLE_INTEREST; dataType repoCompoundFreq = 1; // assume a ten year bond- this is irrelevant bondsDateStruct bondIssueDate = intializeDateCpu(rand() % 28 + 1, rand() % 12 + 1, 1999 - (rand() % 2)); bondsDateStruct bondMaturityDate = intializeDateCpu(rand() % 28 + 1, rand() % 12 + 1, 2000 + (rand() % 2)); bondsDateStruct todaysDate = intializeDateCpu(bondMaturityDate.day-1,bondMaturityDate.month,bondMaturityDate.year); bondStruct bond; bond.startDate = bondIssueDate; bond.maturityDate = bondMaturityDate; bond.rate = 0.08 + ((float)rand()/(float)RAND_MAX - 0.5)*0.1; dataType bondCouponFrequency = 2; dataType bondCleanPrice = 89.97693786; bondsYieldTermStruct bondCurve; bondCurve.refDate = todaysDate; bondCurve.calDate = todaysDate; bondCurve.forward = -0.1f; // dummy rate bondCurve.compounding = COMPOUNDED_INTEREST; bondCurve.frequency = bondCouponFrequency; bondCurve.dayCounter = USE_EXACT_DAY; bondCurve.refDate = todaysDate; bondCurve.calDate = todaysDate; bondCurve.compounding = COMPOUNDED_INTEREST; bondCurve.frequency = bondCouponFrequency; dataType dummyStrike = 91.5745; bondsYieldTermStruct repoCurve; repoCurve.refDate = todaysDate; repoCurve.calDate = todaysDate; repoCurve.forward = repoRate; repoCurve.compounding = repoCompounding; repoCurve.frequency = repoCompoundFreq; repoCurve.dayCounter = USE_SERIAL_NUMS; inArgsHost.discountCurve[numBond] = bondCurve; inArgsHost.repoCurve[numBond] = repoCurve; inArgsHost.currDate[numBond] = todaysDate; inArgsHost.maturityDate[numBond] = bondMaturityDate; inArgsHost.bondCleanPrice[numBond] = bondCleanPrice; inArgsHost.bond[numBond] = bond; inArgsHost.dummyStrike[numBond] = dummyStrike; } printf("Inputs for bond with index %d\n", numBonds/2); printf("Bond Issue Date: %d-%d-%d\n", inArgsHost.bond[numBonds/2].startDate.month, inArgsHost.bond[numBonds/2].startDate.day, inArgsHost.bond[numBonds/2].startDate.year); printf("Bond Maturity Date: %d-%d-%d\n", inArgsHost.bond[numBonds/2].maturityDate.month, inArgsHost.bond[numBonds/2].maturityDate.day, inArgsHost.bond[numBonds/2].maturityDate.year); printf("Bond rate: %f\n\n", inArgsHost.bond[numBonds/2].rate); resultsStruct resultsHost; resultsStruct resultsFromGpu; resultsHost.dirtyPrice = (dataType*)malloc(numBonds*sizeof(dataType)); resultsHost.accruedAmountCurrDate = (dataType*)malloc(numBonds*sizeof(dataType));; resultsHost.cleanPrice = (dataType*)malloc(numBonds*sizeof(dataType));; resultsHost.bondForwardVal = (dataType*)malloc(numBonds*sizeof(dataType));; resultsFromGpu.dirtyPrice = (dataType*)malloc(numBonds*sizeof(dataType)); resultsFromGpu.accruedAmountCurrDate = (dataType*)malloc(numBonds*sizeof(dataType));; resultsFromGpu.cleanPrice = (dataType*)malloc(numBonds*sizeof(dataType));; resultsFromGpu.bondForwardVal = (dataType*)malloc(numBonds*sizeof(dataType));; bondsYieldTermStruct* discountCurveGpu; bondsYieldTermStruct* repoCurveGpu; bondsDateStruct* currDateGpu; bondsDateStruct* maturityDateGpu; dataType* bondCleanPriceGpu; bondStruct* bondGpu; dataType* dummyStrikeGpu; dataType* dirtyPriceGpu; dataType* accruedAmountCurrDateGpu; dataType* cleanPriceGpu; dataType* bondForwardValGpu; hipMalloc((void**)&discountCurveGpu, numBonds*sizeof(bondsYieldTermStruct)); hipMalloc((void**)&repoCurveGpu, numBonds*sizeof(bondsYieldTermStruct)); hipMalloc((void**)&currDateGpu, numBonds*sizeof(bondsDateStruct)); hipMalloc((void**)&maturityDateGpu, numBonds*sizeof(bondsDateStruct)); hipMalloc((void**)&bondCleanPriceGpu, numBonds*sizeof(dataType)); hipMalloc((void**)&bondGpu, numBonds*sizeof(bondStruct)); hipMalloc((void**)&dummyStrikeGpu, numBonds*sizeof(dataType)); hipMalloc((void**)&dirtyPriceGpu, numBonds*sizeof(dataType)); hipMalloc((void**)&accruedAmountCurrDateGpu, numBonds*sizeof(dataType)); hipMalloc((void**)&cleanPriceGpu, numBonds*sizeof(dataType)); hipMalloc((void**)&bondForwardValGpu, numBonds*sizeof(dataType)); hipMemcpy(discountCurveGpu, inArgsHost.discountCurve, numBonds*sizeof(bondsYieldTermStruct), hipMemcpyHostToDevice); hipMemcpy(repoCurveGpu, inArgsHost.repoCurve, numBonds*sizeof(bondsYieldTermStruct), hipMemcpyHostToDevice); hipMemcpy(currDateGpu, inArgsHost.currDate, numBonds*sizeof(bondsDateStruct), hipMemcpyHostToDevice); hipMemcpy(maturityDateGpu, inArgsHost.maturityDate, numBonds*sizeof(bondsDateStruct), hipMemcpyHostToDevice); hipMemcpy(bondCleanPriceGpu, inArgsHost.bondCleanPrice, numBonds*sizeof(dataType), hipMemcpyHostToDevice); hipMemcpy(bondGpu, inArgsHost.bond, numBonds*sizeof(bondStruct), hipMemcpyHostToDevice); hipMemcpy(dummyStrikeGpu, inArgsHost.dummyStrike, numBonds*sizeof(dataType), hipMemcpyHostToDevice); long seconds, useconds; float mtimeCpu; float mtimeGpu; struct timeval start; struct timeval end; inArgsStruct inArgs; inArgs.discountCurve = discountCurveGpu; inArgs.repoCurve = repoCurveGpu; inArgs.currDate = currDateGpu; inArgs.maturityDate = maturityDateGpu; inArgs.bondCleanPrice = bondCleanPriceGpu; inArgs.bond = bondGpu; inArgs.dummyStrike = dummyStrikeGpu; resultsStruct results; results.dirtyPrice = dirtyPriceGpu; results.accruedAmountCurrDate = accruedAmountCurrDateGpu; results.cleanPrice = cleanPriceGpu; results.bondForwardVal = bondForwardValGpu; dim3 grid((ceil(((float)numBonds)/((float)256.0f))), 1, 1); dim3 threads(256, 1, 1); hipDeviceSynchronize(); gettimeofday(&start, NULL); hipLaunchKernelGGL(( getBondsResultsGpu) , dim3(dim3(grid)), dim3(dim3(threads )) , 0, 0, inArgs, results, numBonds); hipDeviceSynchronize(); gettimeofday(&end, NULL); hipMemcpy(resultsFromGpu.dirtyPrice, dirtyPriceGpu, numBonds*sizeof(dataType), hipMemcpyDeviceToHost); hipMemcpy(resultsFromGpu.accruedAmountCurrDate, accruedAmountCurrDateGpu, numBonds*sizeof(dataType), hipMemcpyDeviceToHost); hipMemcpy(resultsFromGpu.cleanPrice, cleanPriceGpu, numBonds*sizeof(dataType), hipMemcpyDeviceToHost); hipMemcpy(resultsFromGpu.bondForwardVal, bondForwardValGpu, numBonds*sizeof(dataType), hipMemcpyDeviceToHost); seconds = end.tv_sec - start.tv_sec; useconds = end.tv_usec - start.tv_usec; mtimeGpu = ((seconds) * 1000 + ((float)useconds)/1000.0) + 0.5f; printf("Run on GPU\n"); printf("Processing time on GPU: %f (ms) \n\n", mtimeGpu); double totPrice = 0.0; int numBond1; for (numBond1= 0; numBond1< numBonds; numBond1++) { totPrice += resultsFromGpu.dirtyPrice[numBond1]; } printf("Sum of output dirty prices on GPU: %f\n", totPrice); printf("Outputs on GPU for bond with index %d: \n", numBonds/2); printf("Dirty Price: %f\n", resultsFromGpu.dirtyPrice[numBonds/2]); printf("Accrued Amount: %f\n", resultsFromGpu.accruedAmountCurrDate[numBonds/2]); printf("Clean Price: %f\n", resultsFromGpu.cleanPrice[numBonds/2]); printf("Bond Forward Val: %f\n\n", resultsFromGpu.bondForwardVal[numBonds/2]); gettimeofday(&start, NULL); getBondsResultsCpu(inArgsHost, resultsHost, numBonds); gettimeofday(&end, NULL); seconds = end.tv_sec - start.tv_sec; useconds = end.tv_usec - start.tv_usec; mtimeCpu = ((seconds) * 1000 + ((float)useconds)/1000.0) + 0.5f; printf("Run on CPU\n"); printf("Processing time on CPU: %f (ms) \n\n", mtimeCpu); totPrice = 0.0; for (numBond1= 0; numBond1< numBonds; numBond1++) { totPrice += resultsHost.dirtyPrice[numBond1]; } printf("Sum of output dirty prices on CPU: %f\n", totPrice); printf("Outputs on CPU for bond with index %d: \n", numBonds/2); printf("Dirty Price: %f\n", resultsHost.dirtyPrice[numBonds/2]); printf("Accrued Amount: %f\n", resultsHost.accruedAmountCurrDate[numBonds/2]); printf("Clean Price: %f\n", resultsHost.cleanPrice[numBonds/2]); printf("Bond Forward Val: %f\n\n", resultsHost.bondForwardVal[numBonds/2]); printf("Speedup using GPU: %f\n", mtimeCpu/mtimeGpu); hipFree(discountCurveGpu); hipFree(repoCurveGpu); hipFree(currDateGpu); hipFree(maturityDateGpu); hipFree(bondCleanPriceGpu); hipFree(bondGpu); hipFree(dummyStrikeGpu); hipFree(dirtyPriceGpu); hipFree(accruedAmountCurrDateGpu); hipFree(cleanPriceGpu); hipFree(bondForwardValGpu); free(resultsHost.dirtyPrice); free(resultsHost.accruedAmountCurrDate);; free(resultsHost.cleanPrice);; free(resultsHost.bondForwardVal);; free(resultsFromGpu.dirtyPrice); free(resultsFromGpu.accruedAmountCurrDate);; free(resultsFromGpu.cleanPrice);; free(resultsFromGpu.bondForwardVal); free(inArgsHost.discountCurve); free(inArgsHost.repoCurve); free(inArgsHost.currDate); free(inArgsHost.maturityDate); free(inArgsHost.bondCleanPrice); free(inArgsHost.bond); free(inArgsHost.dummyStrike); } } //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main(int argc, char** argv) { runBoundsEngine(); return 0; }
7ba4f61c66c928aa9ea5675ed228b5823ae6eb17.cu
//bondsEngine.cu //Scott Grauer-Gray [email protected] //Contains main function for running bonds application on a GPU #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #include <cuda.h> #include "bondsStructs.h" #include "bondsKernelsGpu.cu" #include "bondsKernelsCpu.cu" #define MIN(a, b) (((a) < (b)) ? (a) : (b)) #define MAX(a, b) (((a) > (b)) ? (a) : (b)) int monthLengthCpu(int month, bool leapYear) { int MonthLength[] = { 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31 }; int MonthLeapLength[] = { 31, 29, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31 }; return (leapYear? MonthLeapLength[month-1] : MonthLength[month-1]); } int monthOffsetCpu(int m, bool leapYear) { int MonthOffset[] = { 0, 31, 59, 90, 120, 151, // Jan - Jun 181, 212, 243, 273, 304, 334, // Jun - Dec 365 // used in dayOfMonth to bracket day }; int MonthLeapOffset[] = { 0, 31, 60, 91, 121, 152, // Jan - Jun 182, 213, 244, 274, 305, 335, // Jun - Dec 366 // used in dayOfMonth to bracket day }; return (leapYear? MonthLeapOffset[m-1] : MonthOffset[m-1]); } int yearOffsetCpu(int y) { // the list of all December 31st in the preceding year // e.g. for 1901 yearOffset[1] is 366, that is, December 31 1900 int YearOffset[] = { // 1900-1909 0, 366, 731, 1096, 1461, 1827, 2192, 2557, 2922, 3288, // 1910-1919 3653, 4018, 4383, 4749, 5114, 5479, 5844, 6210, 6575, 6940, // 1920-1929 7305, 7671, 8036, 8401, 8766, 9132, 9497, 9862,10227,10593, // 1930-1939 10958,11323,11688,12054,12419,12784,13149,13515,13880,14245, // 1940-1949 14610,14976,15341,15706,16071,16437,16802,17167,17532,17898, // 1950-1959 18263,18628,18993,19359,19724,20089,20454,20820,21185,21550, // 1960-1969 21915,22281,22646,23011,23376,23742,24107,24472,24837,25203, // 1970-1979 25568,25933,26298,26664,27029,27394,27759,28125,28490,28855, // 1980-1989 29220,29586,29951,30316,30681,31047,31412,31777,32142,32508, // 1990-1999 32873,33238,33603,33969,34334,34699,35064,35430,35795,36160, // 2000-2009 36525,36891,37256,37621,37986,38352,38717,39082,39447,39813, // 2010-2019 40178,40543,40908,41274,41639,42004,42369,42735,43100,43465, // 2020-2029 43830,44196,44561,44926,45291,45657,46022,46387,46752,47118, // 2030-2039 47483,47848,48213,48579,48944,49309,49674,50040,50405,50770, // 2040-2049 51135,51501,51866,52231,52596,52962,53327,53692,54057,54423, // 2050-2059 54788,55153,55518,55884,56249,56614,56979,57345,57710,58075, // 2060-2069 58440,58806,59171,59536,59901,60267,60632,60997,61362,61728, // 2070-2079 62093,62458,62823,63189,63554,63919,64284,64650,65015,65380, // 2080-2089 65745,66111,66476,66841,67206,67572,67937,68302,68667,69033, // 2090-2099 69398,69763,70128,70494,70859,71224,71589,71955,72320,72685, // 2100-2109 73050,73415,73780,74145,74510,74876,75241,75606,75971,76337, // 2110-2119 76702,77067,77432,77798,78163,78528,78893,79259,79624,79989, // 2120-2129 80354,80720,81085,81450,81815,82181,82546,82911,83276,83642, // 2130-2139 84007,84372,84737,85103,85468,85833,86198,86564,86929,87294, // 2140-2149 87659,88025,88390,88755,89120,89486,89851,90216,90581,90947, // 2150-2159 91312,91677,92042,92408,92773,93138,93503,93869,94234,94599, // 2160-2169 94964,95330,95695,96060,96425,96791,97156,97521,97886,98252, // 2170-2179 98617,98982,99347,99713,100078,100443,100808,101174,101539,101904, // 2180-2189 102269,102635,103000,103365,103730,104096,104461,104826,105191,105557, // 2190-2199 105922,106287,106652,107018,107383,107748,108113,108479,108844,109209, // 2200 109574 }; return YearOffset[y-1900]; } bool isLeapCpu(int y) { bool YearIsLeap[] = { // 1900 is leap in agreement with Excel's bug // 1900 is out of valid date range anyway // 1900-1909 true,false,false,false, true,false,false,false, true,false, // 1910-1919 false,false, true,false,false,false, true,false,false,false, // 1920-1929 true,false,false,false, true,false,false,false, true,false, // 1930-1939 false,false, true,false,false,false, true,false,false,false, // 1940-1949 true,false,false,false, true,false,false,false, true,false, // 1950-1959 false,false, true,false,false,false, true,false,false,false, // 1960-1969 true,false,false,false, true,false,false,false, true,false, // 1970-1979 false,false, true,false,false,false, true,false,false,false, // 1980-1989 true,false,false,false, true,false,false,false, true,false, // 1990-1999 false,false, true,false,false,false, true,false,false,false, // 2000-2009 true,false,false,false, true,false,false,false, true,false, // 2010-2019 false,false, true,false,false,false, true,false,false,false, // 2020-2029 true,false,false,false, true,false,false,false, true,false, // 2030-2039 false,false, true,false,false,false, true,false,false,false, // 2040-2049 true,false,false,false, true,false,false,false, true,false, // 2050-2059 false,false, true,false,false,false, true,false,false,false, // 2060-2069 true,false,false,false, true,false,false,false, true,false, // 2070-2079 false,false, true,false,false,false, true,false,false,false, // 2080-2089 true,false,false,false, true,false,false,false, true,false, // 2090-2099 false,false, true,false,false,false, true,false,false,false, // 2100-2109 false,false,false,false, true,false,false,false, true,false, // 2110-2119 false,false, true,false,false,false, true,false,false,false, // 2120-2129 true,false,false,false, true,false,false,false, true,false, // 2130-2139 false,false, true,false,false,false, true,false,false,false, // 2140-2149 true,false,false,false, true,false,false,false, true,false, // 2150-2159 false,false, true,false,false,false, true,false,false,false, // 2160-2169 true,false,false,false, true,false,false,false, true,false, // 2170-2179 false,false, true,false,false,false, true,false,false,false, // 2180-2189 true,false,false,false, true,false,false,false, true,false, // 2190-2199 false,false, true,false,false,false, true,false,false,false, // 2200 false }; return YearIsLeap[y-1900]; } bondsDateStruct intializeDateCpu(int d, int m, int y) { bondsDateStruct currDate; currDate.day = d; currDate.month = m; currDate.year = y; bool leap = isLeapCpu(y); int offset = monthOffsetCpu(m,leap); currDate.dateSerialNum = d + offset + yearOffsetCpu(y); return currDate; } void runBoundsEngine() { //can run multiple times with different number of bonds by uncommenting these lines int nBondsArray[] = {1000000}; for (int numTime=0; numTime < 1; numTime++) { int numBonds = nBondsArray[numTime]; printf("\nNumber of Bonds: %d\n\n", numBonds); inArgsStruct inArgsHost; inArgsHost.discountCurve = (bondsYieldTermStruct*)malloc(numBonds*sizeof(bondsYieldTermStruct)); inArgsHost.repoCurve = (bondsYieldTermStruct*)malloc(numBonds*sizeof(bondsYieldTermStruct)); inArgsHost.currDate = (bondsDateStruct*)malloc(numBonds*sizeof(bondsDateStruct)); inArgsHost.maturityDate = (bondsDateStruct*)malloc(numBonds*sizeof(bondsDateStruct)); inArgsHost.bondCleanPrice = (dataType*)malloc(numBonds*sizeof(dataType)); inArgsHost.bond = (bondStruct*)malloc(numBonds*sizeof(bondStruct)); inArgsHost.dummyStrike = (dataType*)malloc(numBonds*sizeof(dataType)); srand (123); int numBond; for (numBond = 0; numBond < numBonds; numBond++) { dataType repoRate = 0.07; //int repoSettlementDays = 0; int repoCompounding = SIMPLE_INTEREST; dataType repoCompoundFreq = 1; // assume a ten year bond- this is irrelevant bondsDateStruct bondIssueDate = intializeDateCpu(rand() % 28 + 1, rand() % 12 + 1, 1999 - (rand() % 2)); bondsDateStruct bondMaturityDate = intializeDateCpu(rand() % 28 + 1, rand() % 12 + 1, 2000 + (rand() % 2)); bondsDateStruct todaysDate = intializeDateCpu(bondMaturityDate.day-1,bondMaturityDate.month,bondMaturityDate.year); bondStruct bond; bond.startDate = bondIssueDate; bond.maturityDate = bondMaturityDate; bond.rate = 0.08 + ((float)rand()/(float)RAND_MAX - 0.5)*0.1; dataType bondCouponFrequency = 2; dataType bondCleanPrice = 89.97693786; bondsYieldTermStruct bondCurve; bondCurve.refDate = todaysDate; bondCurve.calDate = todaysDate; bondCurve.forward = -0.1f; // dummy rate bondCurve.compounding = COMPOUNDED_INTEREST; bondCurve.frequency = bondCouponFrequency; bondCurve.dayCounter = USE_EXACT_DAY; bondCurve.refDate = todaysDate; bondCurve.calDate = todaysDate; bondCurve.compounding = COMPOUNDED_INTEREST; bondCurve.frequency = bondCouponFrequency; dataType dummyStrike = 91.5745; bondsYieldTermStruct repoCurve; repoCurve.refDate = todaysDate; repoCurve.calDate = todaysDate; repoCurve.forward = repoRate; repoCurve.compounding = repoCompounding; repoCurve.frequency = repoCompoundFreq; repoCurve.dayCounter = USE_SERIAL_NUMS; inArgsHost.discountCurve[numBond] = bondCurve; inArgsHost.repoCurve[numBond] = repoCurve; inArgsHost.currDate[numBond] = todaysDate; inArgsHost.maturityDate[numBond] = bondMaturityDate; inArgsHost.bondCleanPrice[numBond] = bondCleanPrice; inArgsHost.bond[numBond] = bond; inArgsHost.dummyStrike[numBond] = dummyStrike; } printf("Inputs for bond with index %d\n", numBonds/2); printf("Bond Issue Date: %d-%d-%d\n", inArgsHost.bond[numBonds/2].startDate.month, inArgsHost.bond[numBonds/2].startDate.day, inArgsHost.bond[numBonds/2].startDate.year); printf("Bond Maturity Date: %d-%d-%d\n", inArgsHost.bond[numBonds/2].maturityDate.month, inArgsHost.bond[numBonds/2].maturityDate.day, inArgsHost.bond[numBonds/2].maturityDate.year); printf("Bond rate: %f\n\n", inArgsHost.bond[numBonds/2].rate); resultsStruct resultsHost; resultsStruct resultsFromGpu; resultsHost.dirtyPrice = (dataType*)malloc(numBonds*sizeof(dataType)); resultsHost.accruedAmountCurrDate = (dataType*)malloc(numBonds*sizeof(dataType));; resultsHost.cleanPrice = (dataType*)malloc(numBonds*sizeof(dataType));; resultsHost.bondForwardVal = (dataType*)malloc(numBonds*sizeof(dataType));; resultsFromGpu.dirtyPrice = (dataType*)malloc(numBonds*sizeof(dataType)); resultsFromGpu.accruedAmountCurrDate = (dataType*)malloc(numBonds*sizeof(dataType));; resultsFromGpu.cleanPrice = (dataType*)malloc(numBonds*sizeof(dataType));; resultsFromGpu.bondForwardVal = (dataType*)malloc(numBonds*sizeof(dataType));; bondsYieldTermStruct* discountCurveGpu; bondsYieldTermStruct* repoCurveGpu; bondsDateStruct* currDateGpu; bondsDateStruct* maturityDateGpu; dataType* bondCleanPriceGpu; bondStruct* bondGpu; dataType* dummyStrikeGpu; dataType* dirtyPriceGpu; dataType* accruedAmountCurrDateGpu; dataType* cleanPriceGpu; dataType* bondForwardValGpu; cudaMalloc((void**)&discountCurveGpu, numBonds*sizeof(bondsYieldTermStruct)); cudaMalloc((void**)&repoCurveGpu, numBonds*sizeof(bondsYieldTermStruct)); cudaMalloc((void**)&currDateGpu, numBonds*sizeof(bondsDateStruct)); cudaMalloc((void**)&maturityDateGpu, numBonds*sizeof(bondsDateStruct)); cudaMalloc((void**)&bondCleanPriceGpu, numBonds*sizeof(dataType)); cudaMalloc((void**)&bondGpu, numBonds*sizeof(bondStruct)); cudaMalloc((void**)&dummyStrikeGpu, numBonds*sizeof(dataType)); cudaMalloc((void**)&dirtyPriceGpu, numBonds*sizeof(dataType)); cudaMalloc((void**)&accruedAmountCurrDateGpu, numBonds*sizeof(dataType)); cudaMalloc((void**)&cleanPriceGpu, numBonds*sizeof(dataType)); cudaMalloc((void**)&bondForwardValGpu, numBonds*sizeof(dataType)); cudaMemcpy(discountCurveGpu, inArgsHost.discountCurve, numBonds*sizeof(bondsYieldTermStruct), cudaMemcpyHostToDevice); cudaMemcpy(repoCurveGpu, inArgsHost.repoCurve, numBonds*sizeof(bondsYieldTermStruct), cudaMemcpyHostToDevice); cudaMemcpy(currDateGpu, inArgsHost.currDate, numBonds*sizeof(bondsDateStruct), cudaMemcpyHostToDevice); cudaMemcpy(maturityDateGpu, inArgsHost.maturityDate, numBonds*sizeof(bondsDateStruct), cudaMemcpyHostToDevice); cudaMemcpy(bondCleanPriceGpu, inArgsHost.bondCleanPrice, numBonds*sizeof(dataType), cudaMemcpyHostToDevice); cudaMemcpy(bondGpu, inArgsHost.bond, numBonds*sizeof(bondStruct), cudaMemcpyHostToDevice); cudaMemcpy(dummyStrikeGpu, inArgsHost.dummyStrike, numBonds*sizeof(dataType), cudaMemcpyHostToDevice); long seconds, useconds; float mtimeCpu; float mtimeGpu; struct timeval start; struct timeval end; inArgsStruct inArgs; inArgs.discountCurve = discountCurveGpu; inArgs.repoCurve = repoCurveGpu; inArgs.currDate = currDateGpu; inArgs.maturityDate = maturityDateGpu; inArgs.bondCleanPrice = bondCleanPriceGpu; inArgs.bond = bondGpu; inArgs.dummyStrike = dummyStrikeGpu; resultsStruct results; results.dirtyPrice = dirtyPriceGpu; results.accruedAmountCurrDate = accruedAmountCurrDateGpu; results.cleanPrice = cleanPriceGpu; results.bondForwardVal = bondForwardValGpu; dim3 grid((ceil(((float)numBonds)/((float)256.0f))), 1, 1); dim3 threads(256, 1, 1); cudaDeviceSynchronize(); gettimeofday(&start, NULL); getBondsResultsGpu <<< dim3(grid), dim3(threads ) >>> (inArgs, results, numBonds); cudaDeviceSynchronize(); gettimeofday(&end, NULL); cudaMemcpy(resultsFromGpu.dirtyPrice, dirtyPriceGpu, numBonds*sizeof(dataType), cudaMemcpyDeviceToHost); cudaMemcpy(resultsFromGpu.accruedAmountCurrDate, accruedAmountCurrDateGpu, numBonds*sizeof(dataType), cudaMemcpyDeviceToHost); cudaMemcpy(resultsFromGpu.cleanPrice, cleanPriceGpu, numBonds*sizeof(dataType), cudaMemcpyDeviceToHost); cudaMemcpy(resultsFromGpu.bondForwardVal, bondForwardValGpu, numBonds*sizeof(dataType), cudaMemcpyDeviceToHost); seconds = end.tv_sec - start.tv_sec; useconds = end.tv_usec - start.tv_usec; mtimeGpu = ((seconds) * 1000 + ((float)useconds)/1000.0) + 0.5f; printf("Run on GPU\n"); printf("Processing time on GPU: %f (ms) \n\n", mtimeGpu); double totPrice = 0.0; int numBond1; for (numBond1= 0; numBond1< numBonds; numBond1++) { totPrice += resultsFromGpu.dirtyPrice[numBond1]; } printf("Sum of output dirty prices on GPU: %f\n", totPrice); printf("Outputs on GPU for bond with index %d: \n", numBonds/2); printf("Dirty Price: %f\n", resultsFromGpu.dirtyPrice[numBonds/2]); printf("Accrued Amount: %f\n", resultsFromGpu.accruedAmountCurrDate[numBonds/2]); printf("Clean Price: %f\n", resultsFromGpu.cleanPrice[numBonds/2]); printf("Bond Forward Val: %f\n\n", resultsFromGpu.bondForwardVal[numBonds/2]); gettimeofday(&start, NULL); getBondsResultsCpu(inArgsHost, resultsHost, numBonds); gettimeofday(&end, NULL); seconds = end.tv_sec - start.tv_sec; useconds = end.tv_usec - start.tv_usec; mtimeCpu = ((seconds) * 1000 + ((float)useconds)/1000.0) + 0.5f; printf("Run on CPU\n"); printf("Processing time on CPU: %f (ms) \n\n", mtimeCpu); totPrice = 0.0; for (numBond1= 0; numBond1< numBonds; numBond1++) { totPrice += resultsHost.dirtyPrice[numBond1]; } printf("Sum of output dirty prices on CPU: %f\n", totPrice); printf("Outputs on CPU for bond with index %d: \n", numBonds/2); printf("Dirty Price: %f\n", resultsHost.dirtyPrice[numBonds/2]); printf("Accrued Amount: %f\n", resultsHost.accruedAmountCurrDate[numBonds/2]); printf("Clean Price: %f\n", resultsHost.cleanPrice[numBonds/2]); printf("Bond Forward Val: %f\n\n", resultsHost.bondForwardVal[numBonds/2]); printf("Speedup using GPU: %f\n", mtimeCpu/mtimeGpu); cudaFree(discountCurveGpu); cudaFree(repoCurveGpu); cudaFree(currDateGpu); cudaFree(maturityDateGpu); cudaFree(bondCleanPriceGpu); cudaFree(bondGpu); cudaFree(dummyStrikeGpu); cudaFree(dirtyPriceGpu); cudaFree(accruedAmountCurrDateGpu); cudaFree(cleanPriceGpu); cudaFree(bondForwardValGpu); free(resultsHost.dirtyPrice); free(resultsHost.accruedAmountCurrDate);; free(resultsHost.cleanPrice);; free(resultsHost.bondForwardVal);; free(resultsFromGpu.dirtyPrice); free(resultsFromGpu.accruedAmountCurrDate);; free(resultsFromGpu.cleanPrice);; free(resultsFromGpu.bondForwardVal); free(inArgsHost.discountCurve); free(inArgsHost.repoCurve); free(inArgsHost.currDate); free(inArgsHost.maturityDate); free(inArgsHost.bondCleanPrice); free(inArgsHost.bond); free(inArgsHost.dummyStrike); } } //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main(int argc, char** argv) { runBoundsEngine(); return 0; }
6814be34cdf2be772caabc8804a0c16100d52c75.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include<cstdio> #include<omp.h> #include"globals.h" #define sq(x) ((x)*(x)) //tutaj tez zrobic te optymalizacje zeby 2x nie czytac najlepiej wzgledem wymiaru Xowego __global__ void __launch_bounds__(128) kernelGenerate(int const rSize, int const nPiiTau, real const * const pii, real const * const tau, real const * const aReal, real const * const aImag, real const * const bReal, real const * const bImag, int const * const NmaxTable, real * const II) { volatile __shared__ real realis[128]; volatile __shared__ real imaginalis[128]; const int Nmax = NmaxTable[blockIdx.y]; int index = blockIdx.x * nPiiTau + threadIdx.x; int indexY = blockIdx.y * nPiiTau + threadIdx.x; real r; real i; if(threadIdx.x < Nmax) { const real pi = pii[index]; const real ta = tau[index]; r = aReal[indexY] * pi + bReal[indexY] * ta; i = aImag[indexY] * pi + bImag[indexY] * ta; for(int id = 128 ; id < Nmax ; id+=128) { if(threadIdx.x + id < Nmax) { index += 128; indexY += 128; const real pi = pii[index]; const real ta = tau[index]; r += aReal[indexY] * pi + bReal[indexY] * ta; i += aImag[indexY] * pi + bImag[indexY] * ta; } } } if ( threadIdx.x < Nmax ) { realis[threadIdx.x] = r; imaginalis[threadIdx.x] = i; } else { realis[threadIdx.x]=0.0f; imaginalis[threadIdx.x]=0.0f; } __syncthreads(); if(threadIdx.x < 64 ) { realis[threadIdx.x]+=realis[threadIdx.x+64]; imaginalis[threadIdx.x]+=imaginalis[threadIdx.x+64]; } __syncthreads(); if(threadIdx.x < 32) { realis[threadIdx.x]+=realis[threadIdx.x+32]; realis[threadIdx.x]+=realis[threadIdx.x+16]; realis[threadIdx.x]+=realis[threadIdx.x+8]; realis[threadIdx.x]+=realis[threadIdx.x+4]; realis[threadIdx.x]+=realis[threadIdx.x+2]; realis[threadIdx.x]+=realis[threadIdx.x+1]; imaginalis[threadIdx.x]+=imaginalis[threadIdx.x+32]; imaginalis[threadIdx.x]+=imaginalis[threadIdx.x+16]; imaginalis[threadIdx.x]+=imaginalis[threadIdx.x+8]; imaginalis[threadIdx.x]+=imaginalis[threadIdx.x+4]; imaginalis[threadIdx.x]+=imaginalis[threadIdx.x+2]; imaginalis[threadIdx.x]+=imaginalis[threadIdx.x+1]; if(threadIdx.x==0) II[blockIdx.x + blockIdx.y*gridDim.x] = sq(realis[0]) + sq(imaginalis[0]); } } void cudaGenerate(int rSize, int pattern_length, int * Nmax, real * pii, int nPiiTau, real * tau, real * aReal, real * aImag, real * bReal, real * bImag, real * II ) { //double init = omp_get_wtime(); real * devPii; real * devTau; real * devAReal; real * devAImag; real * devBReal; real * devBImag; real * devII; int * devNmax; hipError_t(hipSetDevice(0)); hipError_t(hipMalloc((void**)&devPii, nPiiTau*pattern_length*sizeof(real))); hipError_t(hipMalloc((void**)&devTau, nPiiTau*pattern_length*sizeof(real))); hipError_t(hipMalloc((void**)&devAReal, rSize*nPiiTau*sizeof(real))); hipError_t(hipMalloc((void**)&devBReal, rSize*nPiiTau*sizeof(real))); hipError_t(hipMalloc((void**)&devAImag, rSize*nPiiTau*sizeof(real))); hipError_t(hipMalloc((void**)&devBImag, rSize*nPiiTau*sizeof(real))); hipError_t(hipMalloc((void**)&devII, rSize*pattern_length*sizeof(real))); hipError_t(hipMalloc((void**)&devNmax, rSize*sizeof(int))); hipError_t(hipMemcpy(devPii, pii, nPiiTau*pattern_length*sizeof(real), hipMemcpyHostToDevice)); hipError_t(hipMemcpy(devTau, tau, nPiiTau*pattern_length*sizeof(real), hipMemcpyHostToDevice)); hipError_t(hipMemcpy(devNmax, Nmax, rSize*sizeof(int), hipMemcpyHostToDevice)); hipError_t(hipMemcpy(devAReal, aReal, rSize*nPiiTau*sizeof(real), hipMemcpyHostToDevice)); hipError_t(hipMemcpy(devBReal, bReal, rSize*nPiiTau*sizeof(real), hipMemcpyHostToDevice)); hipError_t(hipMemcpy(devAImag, aImag, rSize*nPiiTau*sizeof(real), hipMemcpyHostToDevice)); hipError_t(hipMemcpy(devBImag, bImag, rSize*nPiiTau*sizeof(real), hipMemcpyHostToDevice)); hipFuncSetCacheConfig(kernelGenerate, hipFuncCachePreferL1); //TODO lepiej L1, nie wiem czemu //printf("%d %d\n", pattern_length, rSize); hipLaunchKernelGGL(( kernelGenerate), dim3(dim3(pattern_length,rSize,1)), dim3(128), 0, 0, rSize, nPiiTau, devPii, devTau, devAReal, devAImag, devBReal, devBImag, devNmax, devII); hipError_t(hipMemcpy(II, devII, rSize*pattern_length*sizeof(real), hipMemcpyDeviceToHost)); hipError_t(hipFree(devPii)); hipError_t(hipFree(devTau)); hipError_t(hipFree(devNmax)); hipError_t(hipFree(devAReal)); hipError_t(hipFree(devAImag)); hipError_t(hipFree(devBReal)); hipError_t(hipFree(devBImag)); hipError_t(hipFree(devII)); //printf("%f C Z A S CUDA\n", omp_get_wtime()-init); } #undef sq
6814be34cdf2be772caabc8804a0c16100d52c75.cu
#include<cstdio> #include<omp.h> #include"globals.h" #define sq(x) ((x)*(x)) //tutaj tez zrobic te optymalizacje zeby 2x nie czytac najlepiej wzgledem wymiaru Xowego __global__ void __launch_bounds__(128) kernelGenerate(int const rSize, int const nPiiTau, real const * const pii, real const * const tau, real const * const aReal, real const * const aImag, real const * const bReal, real const * const bImag, int const * const NmaxTable, real * const II) { volatile __shared__ real realis[128]; volatile __shared__ real imaginalis[128]; const int Nmax = NmaxTable[blockIdx.y]; int index = blockIdx.x * nPiiTau + threadIdx.x; int indexY = blockIdx.y * nPiiTau + threadIdx.x; real r; real i; if(threadIdx.x < Nmax) { const real pi = pii[index]; const real ta = tau[index]; r = aReal[indexY] * pi + bReal[indexY] * ta; i = aImag[indexY] * pi + bImag[indexY] * ta; for(int id = 128 ; id < Nmax ; id+=128) { if(threadIdx.x + id < Nmax) { index += 128; indexY += 128; const real pi = pii[index]; const real ta = tau[index]; r += aReal[indexY] * pi + bReal[indexY] * ta; i += aImag[indexY] * pi + bImag[indexY] * ta; } } } if ( threadIdx.x < Nmax ) { realis[threadIdx.x] = r; imaginalis[threadIdx.x] = i; } else { realis[threadIdx.x]=0.0f; imaginalis[threadIdx.x]=0.0f; } __syncthreads(); if(threadIdx.x < 64 ) { realis[threadIdx.x]+=realis[threadIdx.x+64]; imaginalis[threadIdx.x]+=imaginalis[threadIdx.x+64]; } __syncthreads(); if(threadIdx.x < 32) { realis[threadIdx.x]+=realis[threadIdx.x+32]; realis[threadIdx.x]+=realis[threadIdx.x+16]; realis[threadIdx.x]+=realis[threadIdx.x+8]; realis[threadIdx.x]+=realis[threadIdx.x+4]; realis[threadIdx.x]+=realis[threadIdx.x+2]; realis[threadIdx.x]+=realis[threadIdx.x+1]; imaginalis[threadIdx.x]+=imaginalis[threadIdx.x+32]; imaginalis[threadIdx.x]+=imaginalis[threadIdx.x+16]; imaginalis[threadIdx.x]+=imaginalis[threadIdx.x+8]; imaginalis[threadIdx.x]+=imaginalis[threadIdx.x+4]; imaginalis[threadIdx.x]+=imaginalis[threadIdx.x+2]; imaginalis[threadIdx.x]+=imaginalis[threadIdx.x+1]; if(threadIdx.x==0) II[blockIdx.x + blockIdx.y*gridDim.x] = sq(realis[0]) + sq(imaginalis[0]); } } void cudaGenerate(int rSize, int pattern_length, int * Nmax, real * pii, int nPiiTau, real * tau, real * aReal, real * aImag, real * bReal, real * bImag, real * II ) { //double init = omp_get_wtime(); real * devPii; real * devTau; real * devAReal; real * devAImag; real * devBReal; real * devBImag; real * devII; int * devNmax; cudaError(cudaSetDevice(0)); cudaError(cudaMalloc((void**)&devPii, nPiiTau*pattern_length*sizeof(real))); cudaError(cudaMalloc((void**)&devTau, nPiiTau*pattern_length*sizeof(real))); cudaError(cudaMalloc((void**)&devAReal, rSize*nPiiTau*sizeof(real))); cudaError(cudaMalloc((void**)&devBReal, rSize*nPiiTau*sizeof(real))); cudaError(cudaMalloc((void**)&devAImag, rSize*nPiiTau*sizeof(real))); cudaError(cudaMalloc((void**)&devBImag, rSize*nPiiTau*sizeof(real))); cudaError(cudaMalloc((void**)&devII, rSize*pattern_length*sizeof(real))); cudaError(cudaMalloc((void**)&devNmax, rSize*sizeof(int))); cudaError(cudaMemcpy(devPii, pii, nPiiTau*pattern_length*sizeof(real), cudaMemcpyHostToDevice)); cudaError(cudaMemcpy(devTau, tau, nPiiTau*pattern_length*sizeof(real), cudaMemcpyHostToDevice)); cudaError(cudaMemcpy(devNmax, Nmax, rSize*sizeof(int), cudaMemcpyHostToDevice)); cudaError(cudaMemcpy(devAReal, aReal, rSize*nPiiTau*sizeof(real), cudaMemcpyHostToDevice)); cudaError(cudaMemcpy(devBReal, bReal, rSize*nPiiTau*sizeof(real), cudaMemcpyHostToDevice)); cudaError(cudaMemcpy(devAImag, aImag, rSize*nPiiTau*sizeof(real), cudaMemcpyHostToDevice)); cudaError(cudaMemcpy(devBImag, bImag, rSize*nPiiTau*sizeof(real), cudaMemcpyHostToDevice)); cudaFuncSetCacheConfig(kernelGenerate, cudaFuncCachePreferL1); //TODO lepiej L1, nie wiem czemu //printf("%d %d\n", pattern_length, rSize); kernelGenerate<<<dim3(pattern_length,rSize,1), 128>>>(rSize, nPiiTau, devPii, devTau, devAReal, devAImag, devBReal, devBImag, devNmax, devII); cudaError(cudaMemcpy(II, devII, rSize*pattern_length*sizeof(real), cudaMemcpyDeviceToHost)); cudaError(cudaFree(devPii)); cudaError(cudaFree(devTau)); cudaError(cudaFree(devNmax)); cudaError(cudaFree(devAReal)); cudaError(cudaFree(devAImag)); cudaError(cudaFree(devBReal)); cudaError(cudaFree(devBImag)); cudaError(cudaFree(devII)); //printf("%f C Z A S CUDA\n", omp_get_wtime()-init); } #undef sq
75452b6221f414a3b5c81de1b813bb93394d6a58.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <hip/hip_fp16.h> #include <torch/extension.h> #include <hipcub/hipcub.hpp> #include "block_reduce.h" template <typename T, int block_size, int pack_size> __device__ void moe_dpch_one_fwd(T *src_row, T *dst_row, const int cols) { assert(cols % pack_size == 0); const int bpack_size = block_size * pack_size; typedef cub::BlockLoad<T, block_size, pack_size, cub::BLOCK_LOAD_VECTORIZE> BlockLoad; __shared__ typename BlockLoad::TempStorage ts_load; typedef cub::BlockStore<T, block_size, pack_size, cub::BLOCK_STORE_VECTORIZE> BlockStore; __shared__ typename BlockStore::TempStorage ts_store; int tps = threadIdx.x * pack_size; T pack[pack_size]; for (int idx = 0; idx + tps < cols; idx += bpack_size) { BlockLoad(ts_load).Load(src_row + idx, pack); BlockStore(ts_store).Store(dst_row + idx, pack); } } template <typename T, int block_size, int pack_size> __device__ void moe_dpch_one_bwd(T *src_row, T *dst_row, const int cols) { assert(cols % pack_size == 0); const int bpack_size = block_size * pack_size; typedef cub::BlockLoad<T, block_size, pack_size, cub::BLOCK_LOAD_VECTORIZE> BlockLoad; __shared__ typename BlockLoad::TempStorage ts_load; typedef cub::BlockStore<T, block_size, pack_size, cub::BLOCK_STORE_VECTORIZE> BlockStore; __shared__ typename BlockStore::TempStorage ts_store; int tps = threadIdx.x * pack_size; T pack[pack_size]; for (int idx = 0; idx + tps < cols; idx += bpack_size) { BlockLoad(ts_load).Load(dst_row + idx, pack); BlockStore(ts_store).Store(src_row + idx, pack); } } template <typename T, int block_size, int pack_size> __device__ void moe_dpch_two_fwd(T *src_row, T *dst_row1, T *dst_row2, const int cols) { assert(cols % pack_size == 0); const int bpack_size = block_size * pack_size; typedef cub::BlockLoad<T, block_size, pack_size, cub::BLOCK_LOAD_VECTORIZE> BlockLoad; __shared__ typename BlockLoad::TempStorage ts_load; typedef cub::BlockStore<T, block_size, pack_size, cub::BLOCK_STORE_VECTORIZE> BlockStore; __shared__ typename BlockStore::TempStorage ts_store; int tps = threadIdx.x * pack_size; T pack[pack_size]; for (int idx = 0; idx + tps < cols; idx += bpack_size) { BlockLoad(ts_load).Load(src_row + idx, pack); BlockStore(ts_store).Store(dst_row1 + idx, pack); BlockStore(ts_store).Store(dst_row2 + idx, pack); } } template <typename T, int block_size, int pack_size> __device__ void moe_dpch_two_bwd(T *src_row, T *dst_row1, T *dst_row2, const int cols) { assert(cols % pack_size == 0); const int bpack_size = block_size * pack_size; typedef cub::BlockLoad<T, block_size, pack_size, cub::BLOCK_LOAD_VECTORIZE> BlockLoad; __shared__ typename BlockLoad::TempStorage ts_load; typedef cub::BlockStore<T, block_size, pack_size, cub::BLOCK_STORE_VECTORIZE> BlockStore; __shared__ typename BlockStore::TempStorage ts_store; int tps = threadIdx.x * pack_size; T pack1[pack_size], pack2[pack_size]; for (int idx = 0; idx + tps < cols; idx += bpack_size) { BlockLoad(ts_load).Load(dst_row1 + idx, pack1); BlockLoad(ts_load).Load(dst_row2 + idx, pack2); #pragma unroll for (int i = 0; i < pack_size; ++i) { pack1[i] += pack2[i]; } BlockStore(ts_store).Store(src_row + idx, pack1); } } template <typename T, int block_size, int pack_size> __device__ void moe_cb_one_fwd(T *src_row, T *dst_row, const T weight, const int cols) { assert(cols % pack_size == 0); const int bpack_size = block_size * pack_size; typedef cub::BlockLoad<T, block_size, pack_size, cub::BLOCK_LOAD_VECTORIZE> BlockLoad; __shared__ typename BlockLoad::TempStorage ts_load; typedef cub::BlockStore<T, block_size, pack_size, cub::BLOCK_STORE_VECTORIZE> BlockStore; __shared__ typename BlockStore::TempStorage ts_store; int tps = threadIdx.x * pack_size; T pack[pack_size]; for (int idx = 0; idx + tps < cols; idx += bpack_size) { BlockLoad(ts_load).Load(src_row + idx, pack); #pragma unroll for (int i = 0; i < pack_size; ++i) { pack[i] *= weight; } BlockStore(ts_store).Store(dst_row + idx, pack); } } template <typename T, int block_size, int pack_size> __device__ void moe_cb_one_bwd(T *src_row, T *dst_row, T *tks_row, T *weight_grad, const T weight, const int cols) { assert(cols % pack_size == 0); const int bpack_size = block_size * pack_size; typedef cub::BlockLoad<T, block_size, pack_size, cub::BLOCK_LOAD_VECTORIZE> BlockLoad; __shared__ typename BlockLoad::TempStorage ts_load; typedef cub::BlockStore<T, block_size, pack_size, cub::BLOCK_STORE_VECTORIZE> BlockStore; __shared__ typename BlockStore::TempStorage ts_store; int tps = threadIdx.x * pack_size; T grad[pack_size], tokens[pack_size]; float thread_sum = 0; for (int idx = 0; idx + tps < cols; idx += bpack_size) { BlockLoad(ts_load).Load(dst_row + idx, grad); BlockLoad(ts_load).Load(tks_row + idx, tokens); #pragma unroll for (int i = 0; i < pack_size; ++i) { thread_sum += grad[i] * tokens[i]; grad[i] *= weight; } BlockStore(ts_store).Store(src_row + idx, grad); } blockReduce<ReduceType::kSum, 1>(&thread_sum); if (threadIdx.x == 0) *weight_grad = static_cast<T>(thread_sum); } template <typename T, int block_size, int pack_size> __device__ void moe_cb_two_fwd(T *src_row1, T *src_row2, T *dst_row, const T weight1, const T weight2, const int cols) { assert(cols % pack_size == 0); const int bpack_size = block_size * pack_size; typedef cub::BlockLoad<T, block_size, pack_size, cub::BLOCK_LOAD_VECTORIZE> BlockLoad; __shared__ typename BlockLoad::TempStorage ts_load; typedef cub::BlockStore<T, block_size, pack_size, cub::BLOCK_STORE_VECTORIZE> BlockStore; __shared__ typename BlockStore::TempStorage ts_store; int tps = threadIdx.x * pack_size; T pack1[pack_size], pack2[pack_size]; for (int idx = 0; idx + tps < cols; idx += bpack_size) { BlockLoad(ts_load).Load(src_row1 + idx, pack1); BlockLoad(ts_load).Load(src_row2 + idx, pack2); #pragma unroll for (int i = 0; i < pack_size; ++i) { pack1[i] = pack1[i] * weight1 + pack2[i] * weight2; } BlockStore(ts_store).Store(dst_row + idx, pack1); } } template <typename T, int block_size, int pack_size> __device__ void moe_cb_two_bwd(T *src_row1, T *src_row2, T *dst_row, T *tks_row1, T *tks_row2, T *weight_grad1, T *weight_grad2, const T weight1, const T weight2, const int cols) { assert(cols % pack_size == 0); const int bpack_size = block_size * pack_size; typedef cub::BlockLoad<T, block_size, pack_size, cub::BLOCK_LOAD_VECTORIZE> BlockLoad; __shared__ typename BlockLoad::TempStorage ts_load; typedef cub::BlockStore<T, block_size, pack_size, cub::BLOCK_STORE_VECTORIZE> BlockStore; __shared__ typename BlockStore::TempStorage ts_store; int tps = threadIdx.x * pack_size; T grad[pack_size], tokens1[pack_size], tokens2[pack_size], sgrad1[pack_size], sgrad2[pack_size]; float thread_sum[2] = {0, 0}; for (int idx = 0; idx + tps < cols; idx += bpack_size) { BlockLoad(ts_load).Load(dst_row + idx, grad); BlockLoad(ts_load).Load(tks_row1 + idx, tokens1); BlockLoad(ts_load).Load(tks_row2 + idx, tokens2); #pragma unroll for (int i = 0; i < pack_size; ++i) { thread_sum[0] += grad[i] * tokens1[i]; thread_sum[1] += grad[i] * tokens2[i]; sgrad1[i] = weight1 * grad[i]; sgrad2[i] = weight2 * grad[i]; } BlockStore(ts_store).Store(src_row1 + idx, sgrad1); BlockStore(ts_store).Store(src_row2 + idx, sgrad2); } blockReduce<ReduceType::kSum, 2>(thread_sum); if (threadIdx.x == 0) *weight_grad1 = static_cast<T>(thread_sum[0]); else if (threadIdx.x == 1) *weight_grad2 = static_cast<T>(thread_sum[1]); } // DISPATCH KERNELS -------------------------------- template <typename T, int block_size, int pack_size> __device__ void moe_dpch_fwd_selector(T *src_row, T *dst_row1, T *dst_row2, const int cols, const int indicator1, const int indicator2) { if (indicator1 != 0 && indicator2 != 0) moe_dpch_two_fwd<T, block_size, pack_size>(src_row, dst_row1, dst_row2, cols); else if (indicator1 != 0) moe_dpch_one_fwd<T, block_size, pack_size>(src_row, dst_row1, cols); else if (indicator2 != 0) moe_dpch_one_fwd<T, block_size, pack_size>(src_row, dst_row2, cols); else return; } template <typename T, int block_size, int pack_size> __device__ void moe_dpch_bwd_selector(T *src_row, T *dst_row1, T *dst_row2, const int cols, const int indicator1, const int indicator2) { if (indicator1 != 0 && indicator2 != 0) moe_dpch_two_bwd<T, block_size, pack_size>(src_row, dst_row1, dst_row2, cols); else if (indicator1 != 0) moe_dpch_one_bwd<T, block_size, pack_size>(src_row, dst_row1, cols); else if (indicator2 != 0) moe_dpch_one_bwd<T, block_size, pack_size>(src_row, dst_row2, cols); else return; } template <typename T, int block_size, int pack_size> __global__ void moe_dpch_fwd_kernel(T *batch_tokens, T *expert_input, int *mask1, int *mask2, int *dest1, int *dest2, const int h) { int row = blockIdx.x; int indicator2 = mask2 == nullptr ? 0 : mask2[row]; moe_dpch_fwd_selector<T, block_size, pack_size>( batch_tokens + (row * h), expert_input + (dest1[row] * h), expert_input + (dest2[row] * h), h, mask1[row], indicator2); } template <typename T, int block_size, int pack_size> __global__ void moe_dpch_bwd_kernel(T *tokens_grad, T *expert_grad, int *mask1, int *mask2, int *dest1, int *dest2, const int h) { int row = blockIdx.x; int indicator2 = mask2 == nullptr ? 0 : mask2[row]; moe_dpch_bwd_selector<T, block_size, pack_size>( tokens_grad + (row * h), expert_grad + (dest1[row] * h), expert_grad + (dest2[row] * h), h, mask1[row], indicator2); } // COMBINE KERNELS -------------------------------- template <typename T, int block_size, int pack_size> __device__ void moe_cb_fwd_selector(T *src_row1, T *src_row2, T *dst_row, const int cols, const T weight1, const T weight2, const int indicator1, const int indicator2) { if (indicator1 != 0 && indicator2 != 0) moe_cb_two_fwd<T, block_size, pack_size>(src_row1, src_row2, dst_row, weight1, weight2, cols); else if (indicator1 != 0) moe_cb_one_fwd<T, block_size, pack_size>(src_row1, dst_row, weight1, cols); else if (indicator2 != 0) moe_cb_one_fwd<T, block_size, pack_size>(src_row2, dst_row, weight2, cols); else return; } template <typename T, int block_size, int pack_size> __device__ void moe_cb_bwd_selector(T *src_row1, T *src_row2, T *dst_row, const int cols, T *tks_row1, T *tks_row2, T *wt_grad1, T *wt_grad2, const T weight1, const T weight2, const int indicator1, const int indicator2) { if (indicator1 != 0 && indicator2 != 0) moe_cb_two_bwd<T, block_size, pack_size>(src_row1, src_row2, dst_row, tks_row1, tks_row2, wt_grad1, wt_grad2, weight1, weight2, cols); else if (indicator1 != 0) moe_cb_one_bwd<T, block_size, pack_size>(src_row1, dst_row, tks_row1, wt_grad1, weight1, cols); else if (indicator2 != 0) moe_cb_one_bwd<T, block_size, pack_size>(src_row2, dst_row, tks_row2, wt_grad2, weight2, cols); else return; } template <typename T, int block_size, int pack_size> __global__ void moe_cb_fwd_kernel(T *expert_tokens, T *combine_tokens, T *logits, int *mask1, int *mask2, int *dest1, int *dest2, const int e, const int c, const int h) { int row = blockIdx.x, eid1 = dest1[row] / c, eid2 = dest2[row] / c; int indicator2 = mask2 == nullptr ? 0 : mask2[row]; T *row_log = logits + (row * e); moe_cb_fwd_selector<T, block_size, pack_size>( expert_tokens + (dest1[row] * h), expert_tokens + (dest2[row] * h), combine_tokens + (row * h), h, row_log[eid1], row_log[eid2], mask1[row], indicator2); } template <typename T, int block_size, int pack_size> __global__ void moe_cb_bwd_kernel(T *tokens_grad, T *expert_grad, T *tks, T *logits, T *logits_grad, int *mask1, int *mask2, int *dest1, int *dest2, const int e, const int c, const int h) { int row = blockIdx.x, eid1 = dest1[row] / c, eid2 = dest2[row] / c; int indicator2 = mask2 == nullptr ? 0 : mask2[row]; T *row_log = logits + (row * e), *row_grad = logits_grad + (row * e); moe_cb_bwd_selector<T, block_size, pack_size>( expert_grad + (dest1[row] * h), expert_grad + (dest2[row] * h), tokens_grad + (row * h), h, tks + (dest1[row] * h), tks + (dest2[row] * h), row_grad + eid1, row_grad + eid2, row_log[eid1], row_log[eid2], mask1[row], indicator2); } // CUMSUM KERNEL -------------------------------- template <int block_size, int pack_size> __global__ void cumsum_kernel(int *inputs, int *outputs, const int s, const int e) { assert(s % pack_size == 0); constexpr int bpack_size = block_size * pack_size; int tid = threadIdx.x, bid = blockIdx.x, tps = tid * pack_size, last_sum = -1; __shared__ int temp[block_size + 1]; int pack[pack_size]; for (int idx = 0; idx < s; idx += bpack_size) { int offset = 1; if (idx + tps < s) { temp[tid] = inputs[tps * e + bid]; #pragma unroll for (int i = 1; i < pack_size; ++i) { pack[i] = inputs[(tps + i) * e + bid]; } #pragma unroll for (int i = 1; i < pack_size; ++i) { temp[tid] += pack[i]; } } for (int i = block_size >> 1; i > 0; i >>= 1) { __syncthreads(); if (tid < i) { int j = offset * (2 * tid + 1) - 1; temp[j + offset] += temp[j]; } offset <<= 1; } if (tid == 0) { temp[block_size] = temp[block_size - 1]; temp[block_size - 1] = 0; } for (int i = 1; i < block_size; i <<= 1) { offset >>= 1; __syncthreads(); if (tid < i) { int j = offset * (2 * tid + 1) - 1, k = j + offset, ts = temp[j]; temp[j] = temp[k]; temp[k] += ts; } } __syncthreads(); if (tid == 0) temp[0] = temp[block_size]; __syncthreads(); if (idx + tps < s) { temp[tid + 1] += last_sum; #pragma unroll for (int i = pack_size - 1; i > 0; --i) { outputs[(tps + i) * e + bid] = temp[tid + 1]; temp[tid + 1] -= pack[i]; } outputs[tps * e + bid] = temp[tid + 1]; } __syncthreads(); last_sum += temp[0]; inputs += bpack_size * e; outputs += bpack_size * e; } } // LAUNCH FUNCTIONS -------------------------------- template <typename T> void moe_dpch_fwd_launch(T *batch_tokens, T *expert_input, int *mask1, int *mask2, int *dest1, int *dest2, const int s, const int h) { if (h < 256) hipLaunchKernelGGL(( moe_dpch_fwd_kernel<T, 32, 4>) , dim3(s), dim3(32), 0, 0, batch_tokens, expert_input, mask1, mask2, dest1, dest2, h); else if (h < 512) hipLaunchKernelGGL(( moe_dpch_fwd_kernel<T, 32, 8>) , dim3(s), dim3(32), 0, 0, batch_tokens, expert_input, mask1, mask2, dest1, dest2, h); else if (h < 1024) hipLaunchKernelGGL(( moe_dpch_fwd_kernel<T, 32, 16>) , dim3(s), dim3(32), 0, 0, batch_tokens, expert_input, mask1, mask2, dest1, dest2, h); else if (h < 2048) hipLaunchKernelGGL(( moe_dpch_fwd_kernel<T, 64, 16>) , dim3(s), dim3(64), 0, 0, batch_tokens, expert_input, mask1, mask2, dest1, dest2, h); else hipLaunchKernelGGL(( moe_dpch_fwd_kernel<T, 128, 16>) , dim3(s), dim3(128), 0, 0, batch_tokens, expert_input, mask1, mask2, dest1, dest2, h); } template <typename T> void moe_dpch_bwd_launch(T *tokens_grad, T *expert_grad, int *mask1, int *mask2, int *dest1, int *dest2, const int s, const int h) { if (h < 256) hipLaunchKernelGGL(( moe_dpch_bwd_kernel<T, 32, 4>) , dim3(s), dim3(32), 0, 0, tokens_grad, expert_grad, mask1, mask2, dest1, dest2, h); else if (h < 512) hipLaunchKernelGGL(( moe_dpch_bwd_kernel<T, 32, 8>) , dim3(s), dim3(32), 0, 0, tokens_grad, expert_grad, mask1, mask2, dest1, dest2, h); else if (h < 1024) hipLaunchKernelGGL(( moe_dpch_bwd_kernel<T, 32, 16>) , dim3(s), dim3(32), 0, 0, tokens_grad, expert_grad, mask1, mask2, dest1, dest2, h); else if (h < 2048) hipLaunchKernelGGL(( moe_dpch_bwd_kernel<T, 64, 16>) , dim3(s), dim3(64), 0, 0, tokens_grad, expert_grad, mask1, mask2, dest1, dest2, h); else hipLaunchKernelGGL(( moe_dpch_bwd_kernel<T, 128, 16>) , dim3(s), dim3(128), 0, 0, tokens_grad, expert_grad, mask1, mask2, dest1, dest2, h); } template <typename T> void moe_cb_fwd_launch(T *expert_tokens, T *combine_tokens, T *logits, int *mask1, int *mask2, int *dest1, int *dest2, const int s, const int e, const int c, const int h) { if (h < 256) hipLaunchKernelGGL(( moe_cb_fwd_kernel<T, 32, 4>), dim3(s), dim3(32), 0, 0, expert_tokens, combine_tokens, logits, mask1, mask2, dest1, dest2, e, c, h); else if (h < 512) hipLaunchKernelGGL(( moe_cb_fwd_kernel<T, 32, 8>), dim3(s), dim3(32), 0, 0, expert_tokens, combine_tokens, logits, mask1, mask2, dest1, dest2, e, c, h); else if (h < 1024) hipLaunchKernelGGL(( moe_cb_fwd_kernel<T, 32, 16>), dim3(s), dim3(32), 0, 0, expert_tokens, combine_tokens, logits, mask1, mask2, dest1, dest2, e, c, h); else if (h < 2048) hipLaunchKernelGGL(( moe_cb_fwd_kernel<T, 64, 16>), dim3(s), dim3(64), 0, 0, expert_tokens, combine_tokens, logits, mask1, mask2, dest1, dest2, e, c, h); else hipLaunchKernelGGL(( moe_cb_fwd_kernel<T, 128, 16>), dim3(s), dim3(128), 0, 0, expert_tokens, combine_tokens, logits, mask1, mask2, dest1, dest2, e, c, h); } template <typename T> void moe_cb_bwd_launch(T *tokens_grad, T *expert_grad, T *tks, T *logits, T *logits_grad, int *mask1, int *mask2, int *dest1, int *dest2, const int s, const int e, const int c, const int h) { if (h < 256) hipLaunchKernelGGL(( moe_cb_bwd_kernel<T, 32, 4>), dim3(s), dim3(32), 0, 0, tokens_grad, expert_grad, tks, logits, logits_grad, mask1, mask2, dest1, dest2, e, c, h); else // if (h < 512) hipLaunchKernelGGL(( moe_cb_bwd_kernel<T, 64, 4>), dim3(s), dim3(64), 0, 0, tokens_grad, expert_grad, tks, logits, logits_grad, mask1, mask2, dest1, dest2, e, c, h); // else if (h < 1024) // moe_cb_bwd_kernel<T, 128, 4><<<s, 128>>> // (tokens_grad, expert_grad, tks, logits, logits_grad, mask1, mask2, // dest1, dest2, e, c, h); // else // moe_cb_bwd_kernel<T, 256, 4><<<s, 256>>> // (tokens_grad, expert_grad, tks, logits, logits_grad, mask1, mask2, // dest1, dest2, e, c, h); } void cumsum_launch(int *inputs, int *outputs, const int s, const int e) { if (s <= 256) hipLaunchKernelGGL(( cumsum_kernel<256, 1>), dim3(e), dim3(256), 0, 0, inputs, outputs, s, e); else if (s <= 512) hipLaunchKernelGGL(( cumsum_kernel<512, 1>), dim3(e), dim3(512), 0, 0, inputs, outputs, s, e); else if (s <= 1024) hipLaunchKernelGGL(( cumsum_kernel<1024, 1>), dim3(e), dim3(1024), 0, 0, inputs, outputs, s, e); else if (s <= 2048) hipLaunchKernelGGL(( cumsum_kernel<1024, 2>), dim3(e), dim3(1024), 0, 0, inputs, outputs, s, e); else hipLaunchKernelGGL(( cumsum_kernel<1024, 4>), dim3(e), dim3(1024), 0, 0, inputs, outputs, s, e); } // API FUNCTIONS -------------------------------- #define DISPATCH_FLOAT_AND_HALF(TYPE, NAME, ...) \ switch (TYPE) { \ case at::ScalarType::Float: { \ using scalar_t = float; \ __VA_ARGS__; \ break; \ } \ case at::ScalarType::Half: { \ using scalar_t = at::Half; \ __VA_ARGS__; \ break; \ } \ default: \ AT_ERROR(#NAME, " not implemented yet for specific data type."); \ } torch::Tensor moe_dispatch_cuda_forward(int s, int ec, int h, torch::Tensor batch_tokens, torch::Tensor mask, torch::Tensor dest_idx) { assert(h % 16 == 0); auto res = torch::zeros( {ec, h}, torch::dtype(batch_tokens.dtype()).device(batch_tokens.device())); auto k = mask.size(0); DISPATCH_FLOAT_AND_HALF( batch_tokens.scalar_type(), "moe dispatch forward", moe_dpch_fwd_launch<scalar_t>( batch_tokens.data<scalar_t>(), res.data<scalar_t>(), mask[0].data<int>(), k == 1 ? nullptr : mask[1].data<int>(), dest_idx[0].data<int>(), k == 1 ? dest_idx[0].data<int>() : dest_idx[1].data<int>(), s, h)); return res; } torch::Tensor moe_dispatch_cuda_backward(int s, int ec, int h, torch::Tensor expert_grad, torch::Tensor mask, torch::Tensor dest_idx) { assert(h % 16 == 0); auto res = torch::zeros( {s, h}, torch::dtype(expert_grad.dtype()).device(expert_grad.device())); auto k = mask.size(0); DISPATCH_FLOAT_AND_HALF( expert_grad.scalar_type(), "moe dispatch backward", moe_dpch_bwd_launch<scalar_t>( res.data<scalar_t>(), expert_grad.data<scalar_t>(), mask[0].data<int>(), k == 1 ? nullptr : mask[1].data<int>(), dest_idx[0].data<int>(), k == 1 ? dest_idx[0].data<int>() : dest_idx[1].data<int>(), s, h)); return res; } torch::Tensor moe_combine_cuda_forward(int s, int e, int c, int h, torch::Tensor expert_tokens, torch::Tensor logits, torch::Tensor mask, torch::Tensor dest_idx) { assert(h % 16 == 0); assert(expert_tokens.dtype() == logits.dtype()); auto res = torch::zeros( {s, h}, torch::dtype(expert_tokens.dtype()).device(expert_tokens.device())); auto k = mask.size(0); DISPATCH_FLOAT_AND_HALF( expert_tokens.scalar_type(), "moe combine forward", moe_cb_fwd_launch<scalar_t>( expert_tokens.data<scalar_t>(), res.data<scalar_t>(), logits.data<scalar_t>(), mask[0].data<int>(), k == 1 ? nullptr : mask[1].data<int>(), dest_idx[0].data<int>(), k == 1 ? dest_idx[0].data<int>() : dest_idx[1].data<int>(), s, e, c, h)); return res; } std::vector<torch::Tensor> moe_combine_cuda_backward( int s, int e, int c, int h, torch::Tensor tokens_grad, torch::Tensor expert_tokens, torch::Tensor logits, torch::Tensor mask, torch::Tensor dest_idx) { assert(h % 16 == 0); assert(tokens_grad.dtype() == expert_tokens.dtype()); assert(expert_tokens.dtype() == logits.dtype()); auto egrad = torch::zeros( {e * c, h}, torch::dtype(tokens_grad.dtype()).device(tokens_grad.device())), wgrad = torch::zeros( {s, e}, torch::dtype(logits.dtype()).device(logits.device())); auto k = mask.size(0); DISPATCH_FLOAT_AND_HALF( tokens_grad.scalar_type(), "moe combine backward", moe_cb_bwd_launch<scalar_t>( tokens_grad.data<scalar_t>(), egrad.data<scalar_t>(), expert_tokens.data<scalar_t>(), logits.data<scalar_t>(), wgrad.data<scalar_t>(), mask[0].data<int>(), k == 1 ? nullptr : mask[1].data<int>(), dest_idx[0].data<int>(), k == 1 ? dest_idx[0].data<int>() : dest_idx[1].data<int>(), s, e, c, h)); return {egrad, wgrad}; } torch::Tensor cumsum_sub_one_in_dim0(torch::Tensor mask) { assert(mask.dim() == 2); assert(mask.dtype() == torch::kInt32); const int s = mask.size(0), e = mask.size(1); auto res = torch::empty({s, e}, torch::dtype(torch::kInt32).device(mask.device())); cumsum_launch(mask.data<int>(), res.data<int>(), s, e); return res; }
75452b6221f414a3b5c81de1b813bb93394d6a58.cu
#include <cuda.h> #include <cuda_fp16.h> #include <torch/extension.h> #include <cub/cub.cuh> #include "block_reduce.h" template <typename T, int block_size, int pack_size> __device__ void moe_dpch_one_fwd(T *src_row, T *dst_row, const int cols) { assert(cols % pack_size == 0); const int bpack_size = block_size * pack_size; typedef cub::BlockLoad<T, block_size, pack_size, cub::BLOCK_LOAD_VECTORIZE> BlockLoad; __shared__ typename BlockLoad::TempStorage ts_load; typedef cub::BlockStore<T, block_size, pack_size, cub::BLOCK_STORE_VECTORIZE> BlockStore; __shared__ typename BlockStore::TempStorage ts_store; int tps = threadIdx.x * pack_size; T pack[pack_size]; for (int idx = 0; idx + tps < cols; idx += bpack_size) { BlockLoad(ts_load).Load(src_row + idx, pack); BlockStore(ts_store).Store(dst_row + idx, pack); } } template <typename T, int block_size, int pack_size> __device__ void moe_dpch_one_bwd(T *src_row, T *dst_row, const int cols) { assert(cols % pack_size == 0); const int bpack_size = block_size * pack_size; typedef cub::BlockLoad<T, block_size, pack_size, cub::BLOCK_LOAD_VECTORIZE> BlockLoad; __shared__ typename BlockLoad::TempStorage ts_load; typedef cub::BlockStore<T, block_size, pack_size, cub::BLOCK_STORE_VECTORIZE> BlockStore; __shared__ typename BlockStore::TempStorage ts_store; int tps = threadIdx.x * pack_size; T pack[pack_size]; for (int idx = 0; idx + tps < cols; idx += bpack_size) { BlockLoad(ts_load).Load(dst_row + idx, pack); BlockStore(ts_store).Store(src_row + idx, pack); } } template <typename T, int block_size, int pack_size> __device__ void moe_dpch_two_fwd(T *src_row, T *dst_row1, T *dst_row2, const int cols) { assert(cols % pack_size == 0); const int bpack_size = block_size * pack_size; typedef cub::BlockLoad<T, block_size, pack_size, cub::BLOCK_LOAD_VECTORIZE> BlockLoad; __shared__ typename BlockLoad::TempStorage ts_load; typedef cub::BlockStore<T, block_size, pack_size, cub::BLOCK_STORE_VECTORIZE> BlockStore; __shared__ typename BlockStore::TempStorage ts_store; int tps = threadIdx.x * pack_size; T pack[pack_size]; for (int idx = 0; idx + tps < cols; idx += bpack_size) { BlockLoad(ts_load).Load(src_row + idx, pack); BlockStore(ts_store).Store(dst_row1 + idx, pack); BlockStore(ts_store).Store(dst_row2 + idx, pack); } } template <typename T, int block_size, int pack_size> __device__ void moe_dpch_two_bwd(T *src_row, T *dst_row1, T *dst_row2, const int cols) { assert(cols % pack_size == 0); const int bpack_size = block_size * pack_size; typedef cub::BlockLoad<T, block_size, pack_size, cub::BLOCK_LOAD_VECTORIZE> BlockLoad; __shared__ typename BlockLoad::TempStorage ts_load; typedef cub::BlockStore<T, block_size, pack_size, cub::BLOCK_STORE_VECTORIZE> BlockStore; __shared__ typename BlockStore::TempStorage ts_store; int tps = threadIdx.x * pack_size; T pack1[pack_size], pack2[pack_size]; for (int idx = 0; idx + tps < cols; idx += bpack_size) { BlockLoad(ts_load).Load(dst_row1 + idx, pack1); BlockLoad(ts_load).Load(dst_row2 + idx, pack2); #pragma unroll for (int i = 0; i < pack_size; ++i) { pack1[i] += pack2[i]; } BlockStore(ts_store).Store(src_row + idx, pack1); } } template <typename T, int block_size, int pack_size> __device__ void moe_cb_one_fwd(T *src_row, T *dst_row, const T weight, const int cols) { assert(cols % pack_size == 0); const int bpack_size = block_size * pack_size; typedef cub::BlockLoad<T, block_size, pack_size, cub::BLOCK_LOAD_VECTORIZE> BlockLoad; __shared__ typename BlockLoad::TempStorage ts_load; typedef cub::BlockStore<T, block_size, pack_size, cub::BLOCK_STORE_VECTORIZE> BlockStore; __shared__ typename BlockStore::TempStorage ts_store; int tps = threadIdx.x * pack_size; T pack[pack_size]; for (int idx = 0; idx + tps < cols; idx += bpack_size) { BlockLoad(ts_load).Load(src_row + idx, pack); #pragma unroll for (int i = 0; i < pack_size; ++i) { pack[i] *= weight; } BlockStore(ts_store).Store(dst_row + idx, pack); } } template <typename T, int block_size, int pack_size> __device__ void moe_cb_one_bwd(T *src_row, T *dst_row, T *tks_row, T *weight_grad, const T weight, const int cols) { assert(cols % pack_size == 0); const int bpack_size = block_size * pack_size; typedef cub::BlockLoad<T, block_size, pack_size, cub::BLOCK_LOAD_VECTORIZE> BlockLoad; __shared__ typename BlockLoad::TempStorage ts_load; typedef cub::BlockStore<T, block_size, pack_size, cub::BLOCK_STORE_VECTORIZE> BlockStore; __shared__ typename BlockStore::TempStorage ts_store; int tps = threadIdx.x * pack_size; T grad[pack_size], tokens[pack_size]; float thread_sum = 0; for (int idx = 0; idx + tps < cols; idx += bpack_size) { BlockLoad(ts_load).Load(dst_row + idx, grad); BlockLoad(ts_load).Load(tks_row + idx, tokens); #pragma unroll for (int i = 0; i < pack_size; ++i) { thread_sum += grad[i] * tokens[i]; grad[i] *= weight; } BlockStore(ts_store).Store(src_row + idx, grad); } blockReduce<ReduceType::kSum, 1>(&thread_sum); if (threadIdx.x == 0) *weight_grad = static_cast<T>(thread_sum); } template <typename T, int block_size, int pack_size> __device__ void moe_cb_two_fwd(T *src_row1, T *src_row2, T *dst_row, const T weight1, const T weight2, const int cols) { assert(cols % pack_size == 0); const int bpack_size = block_size * pack_size; typedef cub::BlockLoad<T, block_size, pack_size, cub::BLOCK_LOAD_VECTORIZE> BlockLoad; __shared__ typename BlockLoad::TempStorage ts_load; typedef cub::BlockStore<T, block_size, pack_size, cub::BLOCK_STORE_VECTORIZE> BlockStore; __shared__ typename BlockStore::TempStorage ts_store; int tps = threadIdx.x * pack_size; T pack1[pack_size], pack2[pack_size]; for (int idx = 0; idx + tps < cols; idx += bpack_size) { BlockLoad(ts_load).Load(src_row1 + idx, pack1); BlockLoad(ts_load).Load(src_row2 + idx, pack2); #pragma unroll for (int i = 0; i < pack_size; ++i) { pack1[i] = pack1[i] * weight1 + pack2[i] * weight2; } BlockStore(ts_store).Store(dst_row + idx, pack1); } } template <typename T, int block_size, int pack_size> __device__ void moe_cb_two_bwd(T *src_row1, T *src_row2, T *dst_row, T *tks_row1, T *tks_row2, T *weight_grad1, T *weight_grad2, const T weight1, const T weight2, const int cols) { assert(cols % pack_size == 0); const int bpack_size = block_size * pack_size; typedef cub::BlockLoad<T, block_size, pack_size, cub::BLOCK_LOAD_VECTORIZE> BlockLoad; __shared__ typename BlockLoad::TempStorage ts_load; typedef cub::BlockStore<T, block_size, pack_size, cub::BLOCK_STORE_VECTORIZE> BlockStore; __shared__ typename BlockStore::TempStorage ts_store; int tps = threadIdx.x * pack_size; T grad[pack_size], tokens1[pack_size], tokens2[pack_size], sgrad1[pack_size], sgrad2[pack_size]; float thread_sum[2] = {0, 0}; for (int idx = 0; idx + tps < cols; idx += bpack_size) { BlockLoad(ts_load).Load(dst_row + idx, grad); BlockLoad(ts_load).Load(tks_row1 + idx, tokens1); BlockLoad(ts_load).Load(tks_row2 + idx, tokens2); #pragma unroll for (int i = 0; i < pack_size; ++i) { thread_sum[0] += grad[i] * tokens1[i]; thread_sum[1] += grad[i] * tokens2[i]; sgrad1[i] = weight1 * grad[i]; sgrad2[i] = weight2 * grad[i]; } BlockStore(ts_store).Store(src_row1 + idx, sgrad1); BlockStore(ts_store).Store(src_row2 + idx, sgrad2); } blockReduce<ReduceType::kSum, 2>(thread_sum); if (threadIdx.x == 0) *weight_grad1 = static_cast<T>(thread_sum[0]); else if (threadIdx.x == 1) *weight_grad2 = static_cast<T>(thread_sum[1]); } // DISPATCH KERNELS -------------------------------- template <typename T, int block_size, int pack_size> __device__ void moe_dpch_fwd_selector(T *src_row, T *dst_row1, T *dst_row2, const int cols, const int indicator1, const int indicator2) { if (indicator1 != 0 && indicator2 != 0) moe_dpch_two_fwd<T, block_size, pack_size>(src_row, dst_row1, dst_row2, cols); else if (indicator1 != 0) moe_dpch_one_fwd<T, block_size, pack_size>(src_row, dst_row1, cols); else if (indicator2 != 0) moe_dpch_one_fwd<T, block_size, pack_size>(src_row, dst_row2, cols); else return; } template <typename T, int block_size, int pack_size> __device__ void moe_dpch_bwd_selector(T *src_row, T *dst_row1, T *dst_row2, const int cols, const int indicator1, const int indicator2) { if (indicator1 != 0 && indicator2 != 0) moe_dpch_two_bwd<T, block_size, pack_size>(src_row, dst_row1, dst_row2, cols); else if (indicator1 != 0) moe_dpch_one_bwd<T, block_size, pack_size>(src_row, dst_row1, cols); else if (indicator2 != 0) moe_dpch_one_bwd<T, block_size, pack_size>(src_row, dst_row2, cols); else return; } template <typename T, int block_size, int pack_size> __global__ void moe_dpch_fwd_kernel(T *batch_tokens, T *expert_input, int *mask1, int *mask2, int *dest1, int *dest2, const int h) { int row = blockIdx.x; int indicator2 = mask2 == nullptr ? 0 : mask2[row]; moe_dpch_fwd_selector<T, block_size, pack_size>( batch_tokens + (row * h), expert_input + (dest1[row] * h), expert_input + (dest2[row] * h), h, mask1[row], indicator2); } template <typename T, int block_size, int pack_size> __global__ void moe_dpch_bwd_kernel(T *tokens_grad, T *expert_grad, int *mask1, int *mask2, int *dest1, int *dest2, const int h) { int row = blockIdx.x; int indicator2 = mask2 == nullptr ? 0 : mask2[row]; moe_dpch_bwd_selector<T, block_size, pack_size>( tokens_grad + (row * h), expert_grad + (dest1[row] * h), expert_grad + (dest2[row] * h), h, mask1[row], indicator2); } // COMBINE KERNELS -------------------------------- template <typename T, int block_size, int pack_size> __device__ void moe_cb_fwd_selector(T *src_row1, T *src_row2, T *dst_row, const int cols, const T weight1, const T weight2, const int indicator1, const int indicator2) { if (indicator1 != 0 && indicator2 != 0) moe_cb_two_fwd<T, block_size, pack_size>(src_row1, src_row2, dst_row, weight1, weight2, cols); else if (indicator1 != 0) moe_cb_one_fwd<T, block_size, pack_size>(src_row1, dst_row, weight1, cols); else if (indicator2 != 0) moe_cb_one_fwd<T, block_size, pack_size>(src_row2, dst_row, weight2, cols); else return; } template <typename T, int block_size, int pack_size> __device__ void moe_cb_bwd_selector(T *src_row1, T *src_row2, T *dst_row, const int cols, T *tks_row1, T *tks_row2, T *wt_grad1, T *wt_grad2, const T weight1, const T weight2, const int indicator1, const int indicator2) { if (indicator1 != 0 && indicator2 != 0) moe_cb_two_bwd<T, block_size, pack_size>(src_row1, src_row2, dst_row, tks_row1, tks_row2, wt_grad1, wt_grad2, weight1, weight2, cols); else if (indicator1 != 0) moe_cb_one_bwd<T, block_size, pack_size>(src_row1, dst_row, tks_row1, wt_grad1, weight1, cols); else if (indicator2 != 0) moe_cb_one_bwd<T, block_size, pack_size>(src_row2, dst_row, tks_row2, wt_grad2, weight2, cols); else return; } template <typename T, int block_size, int pack_size> __global__ void moe_cb_fwd_kernel(T *expert_tokens, T *combine_tokens, T *logits, int *mask1, int *mask2, int *dest1, int *dest2, const int e, const int c, const int h) { int row = blockIdx.x, eid1 = dest1[row] / c, eid2 = dest2[row] / c; int indicator2 = mask2 == nullptr ? 0 : mask2[row]; T *row_log = logits + (row * e); moe_cb_fwd_selector<T, block_size, pack_size>( expert_tokens + (dest1[row] * h), expert_tokens + (dest2[row] * h), combine_tokens + (row * h), h, row_log[eid1], row_log[eid2], mask1[row], indicator2); } template <typename T, int block_size, int pack_size> __global__ void moe_cb_bwd_kernel(T *tokens_grad, T *expert_grad, T *tks, T *logits, T *logits_grad, int *mask1, int *mask2, int *dest1, int *dest2, const int e, const int c, const int h) { int row = blockIdx.x, eid1 = dest1[row] / c, eid2 = dest2[row] / c; int indicator2 = mask2 == nullptr ? 0 : mask2[row]; T *row_log = logits + (row * e), *row_grad = logits_grad + (row * e); moe_cb_bwd_selector<T, block_size, pack_size>( expert_grad + (dest1[row] * h), expert_grad + (dest2[row] * h), tokens_grad + (row * h), h, tks + (dest1[row] * h), tks + (dest2[row] * h), row_grad + eid1, row_grad + eid2, row_log[eid1], row_log[eid2], mask1[row], indicator2); } // CUMSUM KERNEL -------------------------------- template <int block_size, int pack_size> __global__ void cumsum_kernel(int *inputs, int *outputs, const int s, const int e) { assert(s % pack_size == 0); constexpr int bpack_size = block_size * pack_size; int tid = threadIdx.x, bid = blockIdx.x, tps = tid * pack_size, last_sum = -1; __shared__ int temp[block_size + 1]; int pack[pack_size]; for (int idx = 0; idx < s; idx += bpack_size) { int offset = 1; if (idx + tps < s) { temp[tid] = inputs[tps * e + bid]; #pragma unroll for (int i = 1; i < pack_size; ++i) { pack[i] = inputs[(tps + i) * e + bid]; } #pragma unroll for (int i = 1; i < pack_size; ++i) { temp[tid] += pack[i]; } } for (int i = block_size >> 1; i > 0; i >>= 1) { __syncthreads(); if (tid < i) { int j = offset * (2 * tid + 1) - 1; temp[j + offset] += temp[j]; } offset <<= 1; } if (tid == 0) { temp[block_size] = temp[block_size - 1]; temp[block_size - 1] = 0; } for (int i = 1; i < block_size; i <<= 1) { offset >>= 1; __syncthreads(); if (tid < i) { int j = offset * (2 * tid + 1) - 1, k = j + offset, ts = temp[j]; temp[j] = temp[k]; temp[k] += ts; } } __syncthreads(); if (tid == 0) temp[0] = temp[block_size]; __syncthreads(); if (idx + tps < s) { temp[tid + 1] += last_sum; #pragma unroll for (int i = pack_size - 1; i > 0; --i) { outputs[(tps + i) * e + bid] = temp[tid + 1]; temp[tid + 1] -= pack[i]; } outputs[tps * e + bid] = temp[tid + 1]; } __syncthreads(); last_sum += temp[0]; inputs += bpack_size * e; outputs += bpack_size * e; } } // LAUNCH FUNCTIONS -------------------------------- template <typename T> void moe_dpch_fwd_launch(T *batch_tokens, T *expert_input, int *mask1, int *mask2, int *dest1, int *dest2, const int s, const int h) { if (h < 256) moe_dpch_fwd_kernel<T, 32, 4> <<<s, 32>>>(batch_tokens, expert_input, mask1, mask2, dest1, dest2, h); else if (h < 512) moe_dpch_fwd_kernel<T, 32, 8> <<<s, 32>>>(batch_tokens, expert_input, mask1, mask2, dest1, dest2, h); else if (h < 1024) moe_dpch_fwd_kernel<T, 32, 16> <<<s, 32>>>(batch_tokens, expert_input, mask1, mask2, dest1, dest2, h); else if (h < 2048) moe_dpch_fwd_kernel<T, 64, 16> <<<s, 64>>>(batch_tokens, expert_input, mask1, mask2, dest1, dest2, h); else moe_dpch_fwd_kernel<T, 128, 16> <<<s, 128>>>(batch_tokens, expert_input, mask1, mask2, dest1, dest2, h); } template <typename T> void moe_dpch_bwd_launch(T *tokens_grad, T *expert_grad, int *mask1, int *mask2, int *dest1, int *dest2, const int s, const int h) { if (h < 256) moe_dpch_bwd_kernel<T, 32, 4> <<<s, 32>>>(tokens_grad, expert_grad, mask1, mask2, dest1, dest2, h); else if (h < 512) moe_dpch_bwd_kernel<T, 32, 8> <<<s, 32>>>(tokens_grad, expert_grad, mask1, mask2, dest1, dest2, h); else if (h < 1024) moe_dpch_bwd_kernel<T, 32, 16> <<<s, 32>>>(tokens_grad, expert_grad, mask1, mask2, dest1, dest2, h); else if (h < 2048) moe_dpch_bwd_kernel<T, 64, 16> <<<s, 64>>>(tokens_grad, expert_grad, mask1, mask2, dest1, dest2, h); else moe_dpch_bwd_kernel<T, 128, 16> <<<s, 128>>>(tokens_grad, expert_grad, mask1, mask2, dest1, dest2, h); } template <typename T> void moe_cb_fwd_launch(T *expert_tokens, T *combine_tokens, T *logits, int *mask1, int *mask2, int *dest1, int *dest2, const int s, const int e, const int c, const int h) { if (h < 256) moe_cb_fwd_kernel<T, 32, 4><<<s, 32>>>(expert_tokens, combine_tokens, logits, mask1, mask2, dest1, dest2, e, c, h); else if (h < 512) moe_cb_fwd_kernel<T, 32, 8><<<s, 32>>>(expert_tokens, combine_tokens, logits, mask1, mask2, dest1, dest2, e, c, h); else if (h < 1024) moe_cb_fwd_kernel<T, 32, 16><<<s, 32>>>(expert_tokens, combine_tokens, logits, mask1, mask2, dest1, dest2, e, c, h); else if (h < 2048) moe_cb_fwd_kernel<T, 64, 16><<<s, 64>>>(expert_tokens, combine_tokens, logits, mask1, mask2, dest1, dest2, e, c, h); else moe_cb_fwd_kernel<T, 128, 16><<<s, 128>>>(expert_tokens, combine_tokens, logits, mask1, mask2, dest1, dest2, e, c, h); } template <typename T> void moe_cb_bwd_launch(T *tokens_grad, T *expert_grad, T *tks, T *logits, T *logits_grad, int *mask1, int *mask2, int *dest1, int *dest2, const int s, const int e, const int c, const int h) { if (h < 256) moe_cb_bwd_kernel<T, 32, 4><<<s, 32>>>(tokens_grad, expert_grad, tks, logits, logits_grad, mask1, mask2, dest1, dest2, e, c, h); else // if (h < 512) moe_cb_bwd_kernel<T, 64, 4><<<s, 64>>>(tokens_grad, expert_grad, tks, logits, logits_grad, mask1, mask2, dest1, dest2, e, c, h); // else if (h < 1024) // moe_cb_bwd_kernel<T, 128, 4><<<s, 128>>> // (tokens_grad, expert_grad, tks, logits, logits_grad, mask1, mask2, // dest1, dest2, e, c, h); // else // moe_cb_bwd_kernel<T, 256, 4><<<s, 256>>> // (tokens_grad, expert_grad, tks, logits, logits_grad, mask1, mask2, // dest1, dest2, e, c, h); } void cumsum_launch(int *inputs, int *outputs, const int s, const int e) { if (s <= 256) cumsum_kernel<256, 1><<<e, 256>>>(inputs, outputs, s, e); else if (s <= 512) cumsum_kernel<512, 1><<<e, 512>>>(inputs, outputs, s, e); else if (s <= 1024) cumsum_kernel<1024, 1><<<e, 1024>>>(inputs, outputs, s, e); else if (s <= 2048) cumsum_kernel<1024, 2><<<e, 1024>>>(inputs, outputs, s, e); else cumsum_kernel<1024, 4><<<e, 1024>>>(inputs, outputs, s, e); } // API FUNCTIONS -------------------------------- #define DISPATCH_FLOAT_AND_HALF(TYPE, NAME, ...) \ switch (TYPE) { \ case at::ScalarType::Float: { \ using scalar_t = float; \ __VA_ARGS__; \ break; \ } \ case at::ScalarType::Half: { \ using scalar_t = at::Half; \ __VA_ARGS__; \ break; \ } \ default: \ AT_ERROR(#NAME, " not implemented yet for specific data type."); \ } torch::Tensor moe_dispatch_cuda_forward(int s, int ec, int h, torch::Tensor batch_tokens, torch::Tensor mask, torch::Tensor dest_idx) { assert(h % 16 == 0); auto res = torch::zeros( {ec, h}, torch::dtype(batch_tokens.dtype()).device(batch_tokens.device())); auto k = mask.size(0); DISPATCH_FLOAT_AND_HALF( batch_tokens.scalar_type(), "moe dispatch forward", moe_dpch_fwd_launch<scalar_t>( batch_tokens.data<scalar_t>(), res.data<scalar_t>(), mask[0].data<int>(), k == 1 ? nullptr : mask[1].data<int>(), dest_idx[0].data<int>(), k == 1 ? dest_idx[0].data<int>() : dest_idx[1].data<int>(), s, h)); return res; } torch::Tensor moe_dispatch_cuda_backward(int s, int ec, int h, torch::Tensor expert_grad, torch::Tensor mask, torch::Tensor dest_idx) { assert(h % 16 == 0); auto res = torch::zeros( {s, h}, torch::dtype(expert_grad.dtype()).device(expert_grad.device())); auto k = mask.size(0); DISPATCH_FLOAT_AND_HALF( expert_grad.scalar_type(), "moe dispatch backward", moe_dpch_bwd_launch<scalar_t>( res.data<scalar_t>(), expert_grad.data<scalar_t>(), mask[0].data<int>(), k == 1 ? nullptr : mask[1].data<int>(), dest_idx[0].data<int>(), k == 1 ? dest_idx[0].data<int>() : dest_idx[1].data<int>(), s, h)); return res; } torch::Tensor moe_combine_cuda_forward(int s, int e, int c, int h, torch::Tensor expert_tokens, torch::Tensor logits, torch::Tensor mask, torch::Tensor dest_idx) { assert(h % 16 == 0); assert(expert_tokens.dtype() == logits.dtype()); auto res = torch::zeros( {s, h}, torch::dtype(expert_tokens.dtype()).device(expert_tokens.device())); auto k = mask.size(0); DISPATCH_FLOAT_AND_HALF( expert_tokens.scalar_type(), "moe combine forward", moe_cb_fwd_launch<scalar_t>( expert_tokens.data<scalar_t>(), res.data<scalar_t>(), logits.data<scalar_t>(), mask[0].data<int>(), k == 1 ? nullptr : mask[1].data<int>(), dest_idx[0].data<int>(), k == 1 ? dest_idx[0].data<int>() : dest_idx[1].data<int>(), s, e, c, h)); return res; } std::vector<torch::Tensor> moe_combine_cuda_backward( int s, int e, int c, int h, torch::Tensor tokens_grad, torch::Tensor expert_tokens, torch::Tensor logits, torch::Tensor mask, torch::Tensor dest_idx) { assert(h % 16 == 0); assert(tokens_grad.dtype() == expert_tokens.dtype()); assert(expert_tokens.dtype() == logits.dtype()); auto egrad = torch::zeros( {e * c, h}, torch::dtype(tokens_grad.dtype()).device(tokens_grad.device())), wgrad = torch::zeros( {s, e}, torch::dtype(logits.dtype()).device(logits.device())); auto k = mask.size(0); DISPATCH_FLOAT_AND_HALF( tokens_grad.scalar_type(), "moe combine backward", moe_cb_bwd_launch<scalar_t>( tokens_grad.data<scalar_t>(), egrad.data<scalar_t>(), expert_tokens.data<scalar_t>(), logits.data<scalar_t>(), wgrad.data<scalar_t>(), mask[0].data<int>(), k == 1 ? nullptr : mask[1].data<int>(), dest_idx[0].data<int>(), k == 1 ? dest_idx[0].data<int>() : dest_idx[1].data<int>(), s, e, c, h)); return {egrad, wgrad}; } torch::Tensor cumsum_sub_one_in_dim0(torch::Tensor mask) { assert(mask.dim() == 2); assert(mask.dtype() == torch::kInt32); const int s = mask.size(0), e = mask.size(1); auto res = torch::empty({s, e}, torch::dtype(torch::kInt32).device(mask.device())); cumsum_launch(mask.data<int>(), res.data<int>(), s, e); return res; }
dca4ff71aa8b88ee02a7f8a6f775b84aef40faa5.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "HugeCTR/include/embeddings/sparse_embedding_functors.hpp" #include "HugeCTR/include/utils.hpp" namespace HugeCTR { template <typename TypeHashKey> void SparseEmbeddingFunctors::get_update_params_results( size_t embedding_vec_size, size_t vocabulary_size, const Tensors2<float> &hash_table_value_tensors, const std::vector<std::shared_ptr<HashTable<TypeHashKey, size_t>>> &hash_tables, Tensor2<TypeHashKey> &hash_table_key, Tensor2<float> &hash_table_value, const ResourceManager &resource_manager) { CudaDeviceContext context; size_t local_gpu_count = resource_manager.get_local_gpu_count(); // memory allocation std::unique_ptr<size_t[]> count(new size_t[local_gpu_count]); size_t total_count = 0; for (size_t id = 0; id < local_gpu_count; id++) { const auto &local_gpu = resource_manager.get_local_gpu(id); context.set_device(local_gpu->get_device_id()); if ((count[id] = hash_tables[id]->get_value_head(local_gpu->get_stream())) != hash_tables[id]->get_size(local_gpu->get_stream())) { std::cout << "hashtable: get_value_head()=" << hash_tables[id]->get_value_head(local_gpu->get_stream()) << ", get_size()=" << hash_tables[id]->get_size(local_gpu->get_stream()) << std::endl; CK_THROW_(Error_t::WrongInput, "Error: hash_table get_value_head() size not equal to get_size()"); } total_count += count[id]; #ifndef NDEBUG std::cout << "GPU[" << id << "]: number of <key,value> pairs:" << count[id] << std::endl; #endif } #ifndef NDEBUG std::cout << "Total number of <key,value> pairs:" << total_count << std::endl; #endif if (total_count > (size_t)vocabulary_size) { CK_THROW_(Error_t::WrongInput, "Error: required download size is larger than hash table vocabulary_size"); } std::unique_ptr<TypeHashKey *[]> d_hash_table_key(new TypeHashKey *[local_gpu_count]); std::unique_ptr<size_t *[]> d_hash_table_value_index(new size_t *[local_gpu_count]); std::unique_ptr<float *[]> d_hash_table_value(new float *[local_gpu_count]); std::unique_ptr<size_t *[]> d_dump_counter(new size_t *[local_gpu_count]); for (size_t id = 0; id < local_gpu_count; id++) { if (count[id] == 0) { continue; } context.set_device(resource_manager.get_local_gpu(id)->get_device_id()); hipMalloc(&d_hash_table_key[id], count[id] * sizeof(TypeHashKey)); hipMalloc(&d_hash_table_value_index[id], count[id] * sizeof(size_t)); hipMalloc(&d_hash_table_value[id], count[id] * embedding_vec_size * sizeof(float)); hipMalloc(&d_dump_counter[id], count[id] * sizeof(size_t)); } // dump hash table on GPU for (size_t id = 0; id < local_gpu_count; id++) { if (count[id] == 0) { continue; } const auto &local_gpu = resource_manager.get_local_gpu(id); context.set_device(local_gpu->get_device_id()); hash_tables[id]->dump(d_hash_table_key[id], d_hash_table_value_index[id], d_dump_counter[id], local_gpu->get_stream()); get_hash_value(count[id], embedding_vec_size, d_hash_table_value_index[id], hash_table_value_tensors[id].get_ptr(), d_hash_table_value[id], local_gpu->get_stream()); } // sync wait sync_all_gpus(resource_manager); // memcpy from GPU to CPU memory size_t key_offset = 0; size_t value_offset = 0; for (size_t id = 0; id < local_gpu_count; id++) { if (count[id] == 0) { continue; } context.set_device(resource_manager.get_local_gpu(id)->get_device_id()); CK_CUDA_THROW_(hipMemcpy(hash_table_key.get_ptr() + key_offset, d_hash_table_key[id], count[id] * sizeof(TypeHashKey), hipMemcpyDeviceToHost)); key_offset += count[id]; CK_CUDA_THROW_(hipMemcpy(hash_table_value.get_ptr() + value_offset, d_hash_table_value[id], count[id] * embedding_vec_size * sizeof(float), hipMemcpyDeviceToHost)); value_offset += count[id] * embedding_vec_size; } for (size_t id = 0; id < local_gpu_count; id++) { if (count[id] == 0) { continue; } context.set_device(resource_manager.get_local_gpu(id)->get_device_id()); CK_CUDA_THROW_(hipFree(d_hash_table_key[id])); CK_CUDA_THROW_(hipFree(d_hash_table_value_index[id])); CK_CUDA_THROW_(hipFree(d_hash_table_value[id])); CK_CUDA_THROW_(hipFree(d_dump_counter[id])); } #ifdef ENABLE_MPI int my_rank = 0; int n_ranks = 1; CK_MPI_THROW_(MPI_Comm_rank(MPI_COMM_WORLD, &my_rank)); CK_MPI_THROW_(MPI_Comm_size(MPI_COMM_WORLD, &n_ranks)); if (n_ranks > 1) { std::unique_ptr<int> displs(new int(n_ranks)); std::unique_ptr<int> recv_count(new int(n_ranks)); MPI_Gather(&total_count, 1, MPI_INT, recv_count.get(), 1, MPI_INT, 0, MPI_COMM_WORLD); if (my_rank == 0) { displs.get()[0] = 0; for (int i = 1; i < n_ranks; i++) { displs.get()[i] = displs.get()[i - 1] + recv_count.get()[i - 1]; } } std::unique_ptr<int> displs_key(new int(n_ranks)); std::unique_ptr<int> recv_count_key(new int(n_ranks)); if (my_rank == 0) { for (int i = 0; i < n_ranks; i++) { recv_count_key.get()[i] = recv_count.get()[i] * sizeof(TypeHashKey); displs_key.get()[i] = displs.get()[i] * sizeof(TypeHashKey); } } MPI_Gatherv(hash_table_key.get_ptr(), total_count * sizeof(TypeHashKey), MPI_CHAR, hash_table_key.get_ptr(), recv_count_key.get(), displs_key.get(), MPI_CHAR, 0, MPI_COMM_WORLD); std::unique_ptr<int> displs_value(new int(n_ranks)); std::unique_ptr<int> recv_count_value(new int(n_ranks)); if (my_rank == 0) { for (int i = 0; i < n_ranks; i++) { recv_count_value.get()[i] = recv_count.get()[i] * embedding_vec_size * sizeof(float); displs_value.get()[i] = displs.get()[i] * embedding_vec_size * sizeof(float); } } MPI_Gatherv(hash_table_value.get_ptr(), total_count * embedding_vec_size * sizeof(float), MPI_CHAR, hash_table_value.get_ptr(), recv_count_value.get(), displs_value.get(), MPI_CHAR, 0, MPI_COMM_WORLD); } #endif return; } template void SparseEmbeddingFunctors::get_update_params_results<unsigned int>( size_t embedding_vec_size, size_t vocabulary_size, const Tensors2<float> &hash_table_value_tensors, const std::vector<std::shared_ptr<HashTable<unsigned int, size_t>>> &hash_tables, Tensor2<unsigned int> &hash_table_key, Tensor2<float> &hash_table_value, const ResourceManager &resource_manager); template void SparseEmbeddingFunctors::get_update_params_results<long long>( size_t embedding_vec_size, size_t vocabulary_size, const Tensors2<float> &hash_table_value_tensors, const std::vector<std::shared_ptr<HashTable<long long, size_t>>> &hash_tables, Tensor2<long long> &hash_table_key, Tensor2<float> &hash_table_value, const ResourceManager &resource_manager); } // namespace HugeCTR
dca4ff71aa8b88ee02a7f8a6f775b84aef40faa5.cu
/* * Copyright (c) 2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "HugeCTR/include/embeddings/sparse_embedding_functors.hpp" #include "HugeCTR/include/utils.hpp" namespace HugeCTR { template <typename TypeHashKey> void SparseEmbeddingFunctors::get_update_params_results( size_t embedding_vec_size, size_t vocabulary_size, const Tensors2<float> &hash_table_value_tensors, const std::vector<std::shared_ptr<HashTable<TypeHashKey, size_t>>> &hash_tables, Tensor2<TypeHashKey> &hash_table_key, Tensor2<float> &hash_table_value, const ResourceManager &resource_manager) { CudaDeviceContext context; size_t local_gpu_count = resource_manager.get_local_gpu_count(); // memory allocation std::unique_ptr<size_t[]> count(new size_t[local_gpu_count]); size_t total_count = 0; for (size_t id = 0; id < local_gpu_count; id++) { const auto &local_gpu = resource_manager.get_local_gpu(id); context.set_device(local_gpu->get_device_id()); if ((count[id] = hash_tables[id]->get_value_head(local_gpu->get_stream())) != hash_tables[id]->get_size(local_gpu->get_stream())) { std::cout << "hashtable: get_value_head()=" << hash_tables[id]->get_value_head(local_gpu->get_stream()) << ", get_size()=" << hash_tables[id]->get_size(local_gpu->get_stream()) << std::endl; CK_THROW_(Error_t::WrongInput, "Error: hash_table get_value_head() size not equal to get_size()"); } total_count += count[id]; #ifndef NDEBUG std::cout << "GPU[" << id << "]: number of <key,value> pairs:" << count[id] << std::endl; #endif } #ifndef NDEBUG std::cout << "Total number of <key,value> pairs:" << total_count << std::endl; #endif if (total_count > (size_t)vocabulary_size) { CK_THROW_(Error_t::WrongInput, "Error: required download size is larger than hash table vocabulary_size"); } std::unique_ptr<TypeHashKey *[]> d_hash_table_key(new TypeHashKey *[local_gpu_count]); std::unique_ptr<size_t *[]> d_hash_table_value_index(new size_t *[local_gpu_count]); std::unique_ptr<float *[]> d_hash_table_value(new float *[local_gpu_count]); std::unique_ptr<size_t *[]> d_dump_counter(new size_t *[local_gpu_count]); for (size_t id = 0; id < local_gpu_count; id++) { if (count[id] == 0) { continue; } context.set_device(resource_manager.get_local_gpu(id)->get_device_id()); cudaMalloc(&d_hash_table_key[id], count[id] * sizeof(TypeHashKey)); cudaMalloc(&d_hash_table_value_index[id], count[id] * sizeof(size_t)); cudaMalloc(&d_hash_table_value[id], count[id] * embedding_vec_size * sizeof(float)); cudaMalloc(&d_dump_counter[id], count[id] * sizeof(size_t)); } // dump hash table on GPU for (size_t id = 0; id < local_gpu_count; id++) { if (count[id] == 0) { continue; } const auto &local_gpu = resource_manager.get_local_gpu(id); context.set_device(local_gpu->get_device_id()); hash_tables[id]->dump(d_hash_table_key[id], d_hash_table_value_index[id], d_dump_counter[id], local_gpu->get_stream()); get_hash_value(count[id], embedding_vec_size, d_hash_table_value_index[id], hash_table_value_tensors[id].get_ptr(), d_hash_table_value[id], local_gpu->get_stream()); } // sync wait sync_all_gpus(resource_manager); // memcpy from GPU to CPU memory size_t key_offset = 0; size_t value_offset = 0; for (size_t id = 0; id < local_gpu_count; id++) { if (count[id] == 0) { continue; } context.set_device(resource_manager.get_local_gpu(id)->get_device_id()); CK_CUDA_THROW_(cudaMemcpy(hash_table_key.get_ptr() + key_offset, d_hash_table_key[id], count[id] * sizeof(TypeHashKey), cudaMemcpyDeviceToHost)); key_offset += count[id]; CK_CUDA_THROW_(cudaMemcpy(hash_table_value.get_ptr() + value_offset, d_hash_table_value[id], count[id] * embedding_vec_size * sizeof(float), cudaMemcpyDeviceToHost)); value_offset += count[id] * embedding_vec_size; } for (size_t id = 0; id < local_gpu_count; id++) { if (count[id] == 0) { continue; } context.set_device(resource_manager.get_local_gpu(id)->get_device_id()); CK_CUDA_THROW_(cudaFree(d_hash_table_key[id])); CK_CUDA_THROW_(cudaFree(d_hash_table_value_index[id])); CK_CUDA_THROW_(cudaFree(d_hash_table_value[id])); CK_CUDA_THROW_(cudaFree(d_dump_counter[id])); } #ifdef ENABLE_MPI int my_rank = 0; int n_ranks = 1; CK_MPI_THROW_(MPI_Comm_rank(MPI_COMM_WORLD, &my_rank)); CK_MPI_THROW_(MPI_Comm_size(MPI_COMM_WORLD, &n_ranks)); if (n_ranks > 1) { std::unique_ptr<int> displs(new int(n_ranks)); std::unique_ptr<int> recv_count(new int(n_ranks)); MPI_Gather(&total_count, 1, MPI_INT, recv_count.get(), 1, MPI_INT, 0, MPI_COMM_WORLD); if (my_rank == 0) { displs.get()[0] = 0; for (int i = 1; i < n_ranks; i++) { displs.get()[i] = displs.get()[i - 1] + recv_count.get()[i - 1]; } } std::unique_ptr<int> displs_key(new int(n_ranks)); std::unique_ptr<int> recv_count_key(new int(n_ranks)); if (my_rank == 0) { for (int i = 0; i < n_ranks; i++) { recv_count_key.get()[i] = recv_count.get()[i] * sizeof(TypeHashKey); displs_key.get()[i] = displs.get()[i] * sizeof(TypeHashKey); } } MPI_Gatherv(hash_table_key.get_ptr(), total_count * sizeof(TypeHashKey), MPI_CHAR, hash_table_key.get_ptr(), recv_count_key.get(), displs_key.get(), MPI_CHAR, 0, MPI_COMM_WORLD); std::unique_ptr<int> displs_value(new int(n_ranks)); std::unique_ptr<int> recv_count_value(new int(n_ranks)); if (my_rank == 0) { for (int i = 0; i < n_ranks; i++) { recv_count_value.get()[i] = recv_count.get()[i] * embedding_vec_size * sizeof(float); displs_value.get()[i] = displs.get()[i] * embedding_vec_size * sizeof(float); } } MPI_Gatherv(hash_table_value.get_ptr(), total_count * embedding_vec_size * sizeof(float), MPI_CHAR, hash_table_value.get_ptr(), recv_count_value.get(), displs_value.get(), MPI_CHAR, 0, MPI_COMM_WORLD); } #endif return; } template void SparseEmbeddingFunctors::get_update_params_results<unsigned int>( size_t embedding_vec_size, size_t vocabulary_size, const Tensors2<float> &hash_table_value_tensors, const std::vector<std::shared_ptr<HashTable<unsigned int, size_t>>> &hash_tables, Tensor2<unsigned int> &hash_table_key, Tensor2<float> &hash_table_value, const ResourceManager &resource_manager); template void SparseEmbeddingFunctors::get_update_params_results<long long>( size_t embedding_vec_size, size_t vocabulary_size, const Tensors2<float> &hash_table_value_tensors, const std::vector<std::shared_ptr<HashTable<long long, size_t>>> &hash_tables, Tensor2<long long> &hash_table_key, Tensor2<float> &hash_table_value, const ResourceManager &resource_manager); } // namespace HugeCTR
cecb73147578206e64fca1928f43958f8ff600dc.hip
// !!! This is a file automatically generated by hipify!!! // // main.cpp // // // Created by Elijah Afanasiev on 25.09.2018. // // // System includes #include <stdio.h> #include <assert.h> // CUDA runtime #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #ifndef MAX #define MAX(a,b) (a > b ? a : b) #endif __global__ void vectorAddGPU(float *a, float *b, float *c, int N) { int idx = blockIdx.x*blockDim.x + threadIdx.x; if (idx < N) { c[idx] = a[idx] + b[idx]; } } void unified_samle(int size = 1048576) { int n = size; int bytes = size * sizeof(float); float *h_a, *h_b, *h_c; dim3 block(256); dim3 grid((unsigned int)ceil(n/(float)block.x)); printf("Allocating device unified memory on host and device\n"); hipMallocManaged(&h_a, bytes); hipMallocManaged(&h_b, bytes); hipMallocManaged(&h_c, bytes); for(int i=0;i<n;i++) { h_a[i] = rand() / (float)RAND_MAX; h_b[i] = rand() / (float)RAND_MAX; } hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start); printf("Doing GPU Vector add\n"); hipLaunchKernelGGL(( vectorAddGPU), dim3(grid), dim3(block), 0, 0, h_a, h_b, h_c, n); hipEventRecord(stop); hipEventSynchronize(stop); float milliseconds = 0; hipEventElapsedTime(&milliseconds, start, stop); printf("Unified memory time: %f ms\n", milliseconds); hipDeviceSynchronize(); } void pinned_samle(int size = 1048576) { int n = size; int bytes = size * sizeof(float); float *h_a, *h_b, *h_c; float *d_a, *d_b, *d_c; dim3 block(256); dim3 grid((unsigned int)ceil(n/(float)block.x)); printf("Allocating device pinned memory on host..\n"); hipHostMalloc(&h_a, bytes); hipHostMalloc(&h_b, bytes); hipHostMalloc(&h_c, bytes); hipMalloc(&d_a, bytes); hipMalloc(&d_b, bytes); hipMalloc(&d_c, bytes); for(int i=0;i<n;i++) { h_a[i] = rand() / (float)RAND_MAX; h_b[i] = rand() / (float)RAND_MAX; h_c[i] = 0; } printf("Copying to device..\n"); hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start); hipMemcpy(d_a, h_a, n*sizeof(float), hipMemcpyHostToDevice); hipMemcpy(d_b, h_b, n*sizeof(float), hipMemcpyHostToDevice); printf("Doing GPU Vector add\n"); hipLaunchKernelGGL(( vectorAddGPU), dim3(grid), dim3(block), 0, 0, d_a, d_b, d_c, n); hipEventRecord(stop); hipEventSynchronize(stop); float milliseconds = 0; hipEventElapsedTime(&milliseconds, start, stop); printf("Pinned memory time: %f ms\n", milliseconds); hipDeviceSynchronize(); hipFree(d_a); hipFree(d_b); hipFree(d_c); } void usual_sample(int size = 1048576) { int n = size; int nBytes = n*sizeof(float); float *a, *b; // host data float *c; // results a = (float *)malloc(nBytes); b = (float *)malloc(nBytes); c = (float *)malloc(nBytes); float *a_d,*b_d,*c_d; dim3 block(256); dim3 grid((unsigned int)ceil(n/(float)block.x)); for(int i=0;i<n;i++) { a[i] = rand() / (float)RAND_MAX; b[i] = rand() / (float)RAND_MAX; c[i] = 0; } printf("Allocating device memory on host..\n"); hipMalloc((void **)&a_d,n*sizeof(float)); hipMalloc((void **)&b_d,n*sizeof(float)); hipMalloc((void **)&c_d,n*sizeof(float)); printf("Copying to device..\n"); hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start); hipMemcpy(a_d,a,n*sizeof(float), hipMemcpyHostToDevice); hipMemcpy(b_d,b,n*sizeof(float), hipMemcpyHostToDevice); printf("Doing GPU Vector add\n"); hipLaunchKernelGGL(( vectorAddGPU), dim3(grid), dim3(block), 0, 0, a_d, b_d, c_d, n); hipEventRecord(stop); hipEventSynchronize(stop); float milliseconds = 0; hipEventElapsedTime(&milliseconds, start, stop); printf("time: %f ms\n", milliseconds); hipDeviceSynchronize(); hipFree(a_d); hipFree(b_d); hipFree(c_d); } int main(int argc, char **argv) { usual_sample(atoi(argv[1])); pinned_samle(atoi(argv[1])); unified_samle(atoi(argv[1])); return 0; }
cecb73147578206e64fca1928f43958f8ff600dc.cu
// // main.cpp // // // Created by Elijah Afanasiev on 25.09.2018. // // // System includes #include <stdio.h> #include <assert.h> // CUDA runtime #include <cuda.h> #include <cuda_runtime.h> #ifndef MAX #define MAX(a,b) (a > b ? a : b) #endif __global__ void vectorAddGPU(float *a, float *b, float *c, int N) { int idx = blockIdx.x*blockDim.x + threadIdx.x; if (idx < N) { c[idx] = a[idx] + b[idx]; } } void unified_samle(int size = 1048576) { int n = size; int bytes = size * sizeof(float); float *h_a, *h_b, *h_c; dim3 block(256); dim3 grid((unsigned int)ceil(n/(float)block.x)); printf("Allocating device unified memory on host and device\n"); cudaMallocManaged(&h_a, bytes); cudaMallocManaged(&h_b, bytes); cudaMallocManaged(&h_c, bytes); for(int i=0;i<n;i++) { h_a[i] = rand() / (float)RAND_MAX; h_b[i] = rand() / (float)RAND_MAX; } cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start); printf("Doing GPU Vector add\n"); vectorAddGPU<<<grid, block>>>(h_a, h_b, h_c, n); cudaEventRecord(stop); cudaEventSynchronize(stop); float milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); printf("Unified memory time: %f ms\n", milliseconds); cudaThreadSynchronize(); } void pinned_samle(int size = 1048576) { int n = size; int bytes = size * sizeof(float); float *h_a, *h_b, *h_c; float *d_a, *d_b, *d_c; dim3 block(256); dim3 grid((unsigned int)ceil(n/(float)block.x)); printf("Allocating device pinned memory on host..\n"); cudaMallocHost(&h_a, bytes); cudaMallocHost(&h_b, bytes); cudaMallocHost(&h_c, bytes); cudaMalloc(&d_a, bytes); cudaMalloc(&d_b, bytes); cudaMalloc(&d_c, bytes); for(int i=0;i<n;i++) { h_a[i] = rand() / (float)RAND_MAX; h_b[i] = rand() / (float)RAND_MAX; h_c[i] = 0; } printf("Copying to device..\n"); cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start); cudaMemcpy(d_a, h_a, n*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_b, h_b, n*sizeof(float), cudaMemcpyHostToDevice); printf("Doing GPU Vector add\n"); vectorAddGPU<<<grid, block>>>(d_a, d_b, d_c, n); cudaEventRecord(stop); cudaEventSynchronize(stop); float milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); printf("Pinned memory time: %f ms\n", milliseconds); cudaThreadSynchronize(); cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); } void usual_sample(int size = 1048576) { int n = size; int nBytes = n*sizeof(float); float *a, *b; // host data float *c; // results a = (float *)malloc(nBytes); b = (float *)malloc(nBytes); c = (float *)malloc(nBytes); float *a_d,*b_d,*c_d; dim3 block(256); dim3 grid((unsigned int)ceil(n/(float)block.x)); for(int i=0;i<n;i++) { a[i] = rand() / (float)RAND_MAX; b[i] = rand() / (float)RAND_MAX; c[i] = 0; } printf("Allocating device memory on host..\n"); cudaMalloc((void **)&a_d,n*sizeof(float)); cudaMalloc((void **)&b_d,n*sizeof(float)); cudaMalloc((void **)&c_d,n*sizeof(float)); printf("Copying to device..\n"); cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start); cudaMemcpy(a_d,a,n*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(b_d,b,n*sizeof(float), cudaMemcpyHostToDevice); printf("Doing GPU Vector add\n"); vectorAddGPU<<<grid, block>>>(a_d, b_d, c_d, n); cudaEventRecord(stop); cudaEventSynchronize(stop); float milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); printf("time: %f ms\n", milliseconds); cudaThreadSynchronize(); cudaFree(a_d); cudaFree(b_d); cudaFree(c_d); } int main(int argc, char **argv) { usual_sample(atoi(argv[1])); pinned_samle(atoi(argv[1])); unified_samle(atoi(argv[1])); return 0; }
5d54c7dc0d5f3ce998c7afc0be1d7d35cfea55e1.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "FedTree/Encryption/paillier_gpu.h" #include "FedTree/util/device_lambda.cuh" #include "gmp.h" //#include "perf_tests/gpu_support.h" void to_mpz(mpz_t r, uint32_t *x, uint32_t count) { mpz_import(r, count, -1, sizeof(uint32_t), 0, 0, x); } void from_mpz(mpz_t s, uint32_t *x, uint32_t count) { size_t words; if(mpz_sizeinbase(s, 2)>count*32) { fprintf(stderr, "from_mpz failed -- result does not fit\n"); exit(1); } mpz_export(x, &words, -1, sizeof(uint32_t), 0, 0, s); while(words<count) x[words++]=0; } void cgbn_check(cgbn_error_report_t *report, const char *file=NULL, int32_t line=0) { // check for cgbn errors if(cgbn_error_report_check(report)) { printf("\n"); printf("CGBN error occurred: %s\n", cgbn_error_string(report)); if(report->_instance!=0xFFFFFFFF) { printf("Error reported by instance %d", report->_instance); if(report->_blockIdx.x!=0xFFFFFFFF || report->_threadIdx.x!=0xFFFFFFFF) printf(", "); if(report->_blockIdx.x!=0xFFFFFFFF) printf("blockIdx=(%d, %d, %d) ", report->_blockIdx.x, report->_blockIdx.y, report->_blockIdx.z); if(report->_threadIdx.x!=0xFFFFFFFF) printf("threadIdx=(%d, %d, %d)", report->_threadIdx.x, report->_threadIdx.y, report->_threadIdx.z); printf("\n"); } else { printf("Error reported by blockIdx=(%d %d %d)", report->_blockIdx.x, report->_blockIdx.y, report->_blockIdx.z); printf("threadIdx=(%d %d %d)\n", report->_threadIdx.x, report->_threadIdx.y, report->_threadIdx.z); } if(file!=NULL) printf("file %s, line %d\n", file, line); exit(1); } } #define CGBN_CHECK(report) cgbn_check(report, __FILE__, __LINE__) #define TPI 32 #define ENV_BITS 1024 typedef cgbn_context_t<TPI> context_t; typedef cgbn_env_t<context_t, ENV_BITS> env_t; //template<uint32_t BITS> void Paillier_GPU::add(mpz_t &result, mpz_t &x, mpz_t &y){ mpz_mul(result, x, y); mpz_mod(result, result, paillier_cpu.n_square); return; } //template<uint32_t BITS> void Paillier_GPU::mul(mpz_t result, mpz_t &x, mpz_t &y){ mpz_powm(result, x, y, paillier_cpu.n_square); return ; } //template<uint32_t BITS> void Paillier_GPU::L_function(mpz_t result, mpz_t input, mpz_t N){ mpz_sub_ui(result, input, 1); mpz_tdiv_q(result, result, N); } void Paillier_GPU::parameters_cpu_to_gpu(){ cgbn_mem_t<BITS> *n_cpu = (cgbn_mem_t<BITS> *)malloc(sizeof(cgbn_mem_t<BITS>)); cgbn_mem_t<BITS> *n_square_cpu = (cgbn_mem_t<BITS> *)malloc(sizeof(cgbn_mem_t<BITS>)); cgbn_mem_t<BITS> *generator_cpu = (cgbn_mem_t<BITS> *)malloc(sizeof(cgbn_mem_t<BITS>)); cgbn_mem_t<BITS> *lambda_cpu = (cgbn_mem_t<BITS> *)malloc(sizeof(cgbn_mem_t<BITS>)); cgbn_mem_t<BITS> *mu_cpu = (cgbn_mem_t<BITS> *)malloc(sizeof(cgbn_mem_t<BITS>)); from_mpz(paillier_cpu.n, n_cpu->_limbs, BITS/32); from_mpz(paillier_cpu.n_square, n_square_cpu->_limbs, BITS/32); from_mpz(paillier_cpu.generator, generator_cpu->_limbs, BITS/32); from_mpz(paillier_cpu.lambda, lambda_cpu->_limbs, BITS/32); from_mpz(paillier_cpu.mu, mu_cpu->_limbs, BITS/32); // cgbn_mem_t<BITS> *n_gpu; CUDA_CHECK(hipMalloc((void**)&n_gpu, sizeof(cgbn_mem_t<BITS>))); CUDA_CHECK(hipMemcpy(n_gpu, n_cpu, sizeof(cgbn_mem_t<BITS>), hipMemcpyHostToDevice)); // cgbn_mem_t<BITS> *n_square_gpu; CUDA_CHECK(hipMalloc((void**)&n_square_gpu, sizeof(cgbn_mem_t<BITS>))); CUDA_CHECK(hipMemcpy(n_square_gpu, n_square_cpu, sizeof(cgbn_mem_t<BITS>), hipMemcpyHostToDevice)); // cgbn_mem_t<BITS> *generator_gpu; CUDA_CHECK(hipMalloc((void**)&generator_gpu, sizeof(cgbn_mem_t<BITS>))); CUDA_CHECK(hipMemcpy(generator_gpu, generator_cpu, sizeof(cgbn_mem_t<BITS>), hipMemcpyHostToDevice)); // cgbn_mem_t<BITS> *lambda_gpu; CUDA_CHECK(hipMalloc((void**)&lambda_gpu, sizeof(cgbn_mem_t<BITS>))); CUDA_CHECK(hipMemcpy(lambda_gpu, lambda_cpu, sizeof(cgbn_mem_t<BITS>), hipMemcpyHostToDevice)); // cgbn_mem_t<BITS> *mu_gpu; CUDA_CHECK(hipMalloc((void**)&mu_gpu, sizeof(cgbn_mem_t<BITS>))); CUDA_CHECK(hipMemcpy(mu_gpu, mu_cpu, sizeof(cgbn_mem_t<BITS>), hipMemcpyHostToDevice)); CUDA_CHECK(hipDeviceSynchronize()); free(n_cpu); free(n_square_cpu); free(generator_cpu); free(lambda_cpu); free(mu_cpu); // cgbn_mem_t<BITS> *r_cpu = (cgbn_mem_t<BITS> *)malloc(sizeof(cgbn_mem_t<BITS>)); // from_mpz(paillier_cpu.r, r_cpu->_limbs, BITS/32); // CUDA_CHECK(hipMalloc((void**)&random_gpu, sizeof(cgbn_mem_t<BITS>))); // CUDA_CHECK(hipMemcpy(random_gpu, r_cpu, sizeof(cgbn_mem_t<BITS>), hipMemcpyHostToDevice)); // free(r_cpu); } //template<uint32_t BITS> void Paillier_GPU::keygen(){ paillier_cpu.keyGen(BITS); parameters_cpu_to_gpu(); // gmp_randstate_t state = new gmp_randstate_t(); // gmp_randinit_mt(state); //// gmp_randseed_ui(state, 1000U); // mpz tmp1, tmp2, tmp3, tmp4; // mpz_init(tmp1); // mpz_init(tmp2); // mpz_init(tmp3); // mpz_init(tmp4); // while (true){ // mpz_urandomb(p, gpc_randstate, BITS/4); // mpz_urandomb(q, gpc_randstate, BITS/4); // mpz_nextprime(p, p); // mpz_nextprime(q, q); // mpz_sub_ui(tmp1, p, 1); // mpz_sub_ui(tmp2, q, 1); // mpz_mul(tmp3, tmp1, tmp2); // tmp3 = (p-1)(q-1) // mpz_mul(tmp4, p, q); // tmp4 = p*q // mpz_gcd(tmp3, tmp3, tmp4); // tmp = gcd(pq, (p-1)(q-1)) // if(mpz_cmp_ui(tmp3, 1) == 0) // gcd(pq, (p-1)(q-1)) == 1 // break; // } // // n = tmp4; // n = p * q // mpz_add_ui(generator, n, 1); // generator = modulus + 1 // mpz_lcm(lambda, tmp1, tmp2); // lamda = lcm(p-1, q-1) // mpz_mul(n_square, n, n); // mpz_t lambda_power; // mpz_init(lambda_power); // mpz_powm(lambda_power, generator, lambda, n_square); // L_function(mu, lambda_power, n); // mpz_invert(mu, mu, N); // u = L((generator^lambda) mod n ^ 2) ) ^ -1 mod modulus // mpz_clear(tmp1); // mpz_clear(tmp2); // mpz_clear(tmp3); // mpz_clear(tmp4); // mpz_clear(lambda_power); } __global__ void kernel_encrypt(cgbn_gh<BITS> *gh_gpu, cgbn_gh<BITS> *gh_results_gpu, cgbn_mem_t<BITS> *generator_gpu, cgbn_mem_t<BITS> *random_gpu, cgbn_mem_t<BITS> *n_gpu, cgbn_mem_t<BITS> *n_square_gpu, int n_instances){ // cgbm_mem_t<BITS> *g_gpu_test = new cgbm_mem_t<BITS>(); int32_t idx; idx = (blockIdx.x*blockDim.x + threadIdx.x)/TPI; if(idx >= n_instances) return; context_t bn_context(cgbn_report_monitor); // context_t bn_context(cgbn_report_monitor); env_t bn_env(bn_context.env<env_t>()); env_t::cgbn_t g, m, r, n, n_square, re1, re2, result, result2; env_t::cgbn_wide_t w; //todo: check whether g is in gpu or not. compare with another way: convert g to cgbn_mem_t before kernel // cgbn_set_ui32(bn_env, m, (uint32_t) (message_device_data[idx].g * 1e6)); cgbn_load(bn_env, m, &(gh_gpu[idx].g)); // cgbn_rem(bn_env, re1, m, m); cgbn_load(bn_env, g, generator_gpu); // cgbn_rem(bn_env, re1, g, g); cgbn_load(bn_env, r, random_gpu); cgbn_load(bn_env, n, n_gpu); cgbn_load(bn_env, n_square, n_square_gpu); // cgbn_rem(bn_env, g_mod, r, m); // cgbn_rem(bn_env, g_mod, g, g); // compute g_enc cgbn_modular_power(bn_env, re1, g, m, n_square); cgbn_modular_power(bn_env, re2, r, n, n_square); cgbn_mul_wide(bn_env, w, re1, re2); cgbn_rem_wide(bn_env, result, w, n_square); cgbn_store(bn_env, &(gh_results_gpu[idx].g), result); // // compute h_enc //// cgbn_set_ui32(bn_env, m, (uint32_t) (message_device_data[idx].h * 1e6)); cgbn_load(bn_env, m, &(gh_gpu[idx].h)); cgbn_modular_power(bn_env, re1, g, m, n_square); cgbn_mul_wide(bn_env, w, re1, re2); cgbn_rem_wide(bn_env, result2, w, n_square); cgbn_store(bn_env, &(gh_results_gpu[idx].h), result2); } //template<uint32_t BITS> void Paillier_GPU::encrypt(SyncArray<GHPair> &message){ // auto message_device_data = message.device_data(); // cgbn_error_report_t *report; // CUDA_CHECK(cgbn_error_report_alloc(&report)); int n_instances = message.size(); auto message_host_data = message.host_data(); cgbn_gh<BITS> *gh_cpu = (cgbn_gh<BITS> *)malloc(sizeof(cgbn_gh<BITS>) * n_instances); mpz_t g_mpz, h_mpz; mpz_init(g_mpz); mpz_init(h_mpz); // std::cout<<"test import and export"<<std::endl; // float a = -0.2; // long a_ul = (long)(a * 1e6); // std::cout<<"import a_ul:"<<a_ul<<std::endl; // mpz_t tmp; // mpz_init(tmp); // mpz_import(tmp, 1, -1, sizeof(a_ul), 0, 0, &a_ul); // std::cout<<"import mpz:"<<tmp<<std::endl; // long a_l; // mpz_export(&a_l, (size_t*)0, -1, sizeof(a_l), 0, 0, tmp); // std::cout<<"export a_l:"<<a_l<<std::endl; // float a_res = (float) a_l/1e6; // std::cout<<"a_res:"<<a_res<<std::endl; for(int i = 0; i < n_instances; i++){ long g_ul = (long) (message_host_data[i].g * 1e6); // if(i == 0) // std::cout<<"g_ul:"<<g_ul<<std::endl; long h_ul = (long) (message_host_data[i].h * 1e6); mpz_import(g_mpz, 1, -1, sizeof(g_ul), 0, 0, &g_ul); // if(i == 0) // std::cout<<"g_mpz:"<<g_mpz<<std::endl; mpz_import(h_mpz, 1, -1, sizeof(h_ul), 0, 0, &h_ul); from_mpz(g_mpz, gh_cpu[i].g._limbs, BITS / 32); from_mpz(h_mpz, gh_cpu[i].h._limbs, BITS / 32); } mpz_clear(g_mpz); mpz_clear(h_mpz); cgbn_gh<BITS> *gh_gpu; CUDA_CHECK(hipMalloc((void**)&gh_gpu, sizeof(cgbn_gh<BITS>) * n_instances)); CUDA_CHECK(hipMemcpy(gh_gpu, gh_cpu, sizeof(cgbn_gh<BITS>) * n_instances, hipMemcpyHostToDevice)); free(gh_cpu); gmp_randstate_t state; gmp_randinit_mt(state); // gmp_randseed_ui(state, 1000U); mpz_t r; mpz_init(r); while(true) { mpz_urandomm(r, state, paillier_cpu.n); if(mpz_cmp_ui(r, 0)) break; } // mpz_urandomb(r, state, BITS); // mpz_add_ui(r, r, 1); //ensure r > 0 // mpz_mod(r, r, paillier_cpu.n); cgbn_mem_t<BITS> *random_cpu = (cgbn_mem_t<BITS> *)malloc(sizeof(cgbn_mem_t<BITS>)); from_mpz(r, random_cpu->_limbs, BITS/32); cgbn_mem_t<BITS> *random_gpu; CUDA_CHECK(hipMalloc((void**)&random_gpu, sizeof(cgbn_mem_t<BITS>))); CUDA_CHECK(hipMemcpy(random_gpu, random_cpu, sizeof(cgbn_mem_t<BITS>), hipMemcpyHostToDevice)); free(random_cpu); mpz_clear(r); cgbn_gh<BITS>* gh_results_gpu; CUDA_CHECK(hipMalloc((void **)&gh_results_gpu, sizeof(cgbn_gh<BITS>) * n_instances)); // hipMemcpy(&(gpuInstances->n), &n,, sizeof(n), hipMemcpyHostToDevice); hipLaunchKernelGGL(( kernel_encrypt), dim3((n_instances+3)/4), dim3(128), 0, 0, gh_gpu, gh_results_gpu, generator_gpu, random_gpu, n_gpu, n_square_gpu, n_instances); CUDA_CHECK(hipDeviceSynchronize()); // CGBN_CHECK(report); // CUDA_CHECK(hipFree(gpu_parameters)); cgbn_gh<BITS>* gh_results = (cgbn_gh<BITS>*)malloc(sizeof(cgbn_gh<BITS>)*n_instances); CUDA_CHECK(hipMemcpy(gh_results, gh_results_gpu, sizeof(cgbn_gh<BITS>)*n_instances, hipMemcpyDeviceToHost)); CUDA_CHECK(hipFree(gh_results_gpu)); // auto message_host_data = message.host_data(); for(int i = 0; i < n_instances; i++){ mpz_init(message_host_data[i].g_enc); mpz_init(message_host_data[i].h_enc); // todo: another way: directly copy in GPU. to_mpz(message_host_data[i].g_enc, gh_results[i].g._limbs, BITS / 32); to_mpz(message_host_data[i].h_enc, gh_results[i].h._limbs, BITS / 32); // message_host_data[i].encrypted=true; } free(gh_results); // CUDA_CHECK(cgbn_error_report_free(report)); } //template<uint32_t BITS> //void Paillier_GPU::encrypt(GHPair &message){ //// cgbn_error_report_t *report; //// CUDA_CHECK(cgbn_error_report_alloc(&report)); // // // cgbn_gh<BITS> gh_cpu; // mpz_t g_mpz, h_mpz; // mpz_init(g_mpz); // mpz_init(h_mpz); // unsigned long g_ul = (unsigned long) (message.g * 1e6); // unsigned long h_ul = (unsigned long) (message.h * 1e6); // mpz_import(g_mpz, 1, -1, sizeof(g_ul), 0, 0, &g_ul); // mpz_import(h_mpz, 1, -1, sizeof(h_ul), 0, 0, &h_ul); // from_mpz(g_mpz, gh_cpu.g._limbs, BITS/32); // from_mpz(h_mpz, gh_cpu.h._limbs, BITS/32); // mpz_clear(g_mpz); // mpz_clear(h_mpz); // // cgbn_gh<BITS> *gh_gpu; // CUDA_CHECK(hipMalloc((void**)&gh_gpu, sizeof(cgbn_gh<BITS>))); // CUDA_CHECK(hipMemcpy(gh_gpu, &gh_cpu, sizeof(cgbn_gh<BITS>), hipMemcpyHostToDevice)); // // gmp_randstate_t state; // gmp_randinit_mt(state); // gmp_randseed_ui(state, 1000U); // mpz_t r; // mpz_init(r); // mpz_urandomb(r, state, BITS); // mpz_mod(r, r, paillier_cpu.n); // // cgbn_mem_t<BITS> *random_cpu = (cgbn_mem_t<BITS> *)malloc(sizeof(cgbn_mem_t<BITS>)); // // from_mpz(r, random_cpu->_limbs, BITS/32); // cgbn_mem_t<BITS> *random_gpu; // CUDA_CHECK(hipMalloc((void**)&random_gpu, sizeof(cgbn_mem_t<BITS>))); // CUDA_CHECK(hipMemcpy(random_gpu, random_cpu, sizeof(cgbn_mem_t<BITS>), hipMemcpyHostToDevice)); // // int n_instances = 1; // cgbn_gh<BITS>* gh_results_gpu; // CUDA_CHECK(hipMalloc((void **)&gh_results_gpu, sizeof(cgbn_gh<BITS>) * n_instances)); // //// hipMemcpy(&(gpuInstances->n), &n,, sizeof(n), hipMemcpyHostToDevice); // // // todo: move values to gpu first // device_loop(1, [=] __device__(int idx){ //// context_t bn_context(cgbn_report_monitor, report, idx); // context_t bn_context(cgbn_report_monitor); // env_t bn_env(bn_context.env<env_t>()); // env_t::cgbn_t g, m, r, n, n_square, re1, re2, result; // env_t::cgbn_wide_t w; // // // todo: wrong! gh_cpu->g is cgbn_mem_t, in cpu, and cannot directly be multiplied with 1e6, check whether the computation is allowed //// cgbn_set_ui32(bn_env, m, (uint32_t) (gh_cpu->g * 1e6)); // cgbn_load(bn_env, m, &(gh_gpu->g)); // // cgbn_load(bn_env, g, generator_gpu); // cgbn_load(bn_env, r, random_gpu); // cgbn_load(bn_env, n, n_gpu); // cgbn_load(bn_env, n_square, n_square_gpu); // // // compute g_enc // cgbn_modular_power(bn_env, re1, g, m, n_square); // cgbn_modular_power(bn_env, re2, r, n, n_square); // cgbn_mul_wide(bn_env, w, re1, re2); // cgbn_rem_wide(bn_env, result, w, n_square); // cgbn_store(bn_env, &(gh_results_gpu[idx].g), result); // // // compute h_enc //// cgbn_set_ui32(bn_env, m, (uint32_t) (gh_cpu.h * 1e6)); // cgbn_load(bn_env, m, &(gh_gpu->h)); // cgbn_modular_power(bn_env, re1, g, m, n_square); // cgbn_mul_wide(bn_env, w, re1, re2); // cgbn_rem_wide(bn_env, result, w, n_square); // cgbn_store(bn_env, &(gh_results_gpu[idx].h), result); // }); // //// CGBN_CHECK(report); // //// CUDA_CHECK(hipFree(gpu_parameters)); // cgbn_gh<BITS>* gh_results = (cgbn_gh<BITS>*)malloc(sizeof(cgbn_gh<BITS>)*n_instances); // CUDA_CHECK(hipMemcpy(gh_results, gh_results_gpu, sizeof(cgbn_gh<BITS>)*n_instances, hipMemcpyDeviceToHost)); // CUDA_CHECK(hipFree(gh_results_gpu)); // // for(int i = 0; i < n_instances; i++){ // mpz_init(message.g_enc); // mpz_init(message.h_enc); // // todo: another way: directly copy in GPU. // to_mpz(message.g_enc, gh_results->g._limbs, BITS / 32); // to_mpz(message.h_enc, gh_results->h._limbs, BITS / 32); // } // free(gh_results); //} __global__ void kernel_decrypt(cgbn_gh<BITS>* gh_enc_gpu, cgbn_mem_t<BITS> *lambda_gpu, cgbn_mem_t<BITS> *mu_gpu, cgbn_mem_t<BITS> *n_gpu, cgbn_mem_t<BITS> *n_square_gpu, cgbn_gh<BITS> *gh_results_gpu, int n_instances){ int idx; idx = (blockIdx.x*blockDim.x + threadIdx.x)/TPI; if(idx >= n_instances) return; context_t bn_context(cgbn_report_monitor); env_t bn_env(bn_context.env<env_t>()); env_t::cgbn_t c, lambda, n, mu, n_square, re1, re2, re3, re4, result, result2; cgbn_load(bn_env, c, &(gh_enc_gpu[idx].g)); cgbn_load(bn_env, lambda, lambda_gpu); cgbn_load(bn_env, n, n_gpu); cgbn_load(bn_env, n_square, n_square_gpu); cgbn_load(bn_env, mu, mu_gpu); cgbn_modular_power(bn_env, re1, c, lambda, n_square); cgbn_sub_ui32(bn_env, re2, re1, 1); cgbn_div(bn_env, re3, re2, n); cgbn_mul(bn_env, re4, re3, mu); cgbn_rem(bn_env, result, re4, n); // todo: uint32 may not be enough to store g and h // message_device_data[idx].g = (float_type) cgbn_get_ui32(bn_env, result) / 1e6; cgbn_store(bn_env, &(gh_results_gpu[idx].g), result); // todo: check whether cpu mem data is syncrhonized or not cgbn_load(bn_env, c, &(gh_enc_gpu[idx].h)); cgbn_modular_power(bn_env, re1, c, lambda, n_square); cgbn_sub_ui32(bn_env, re2, re1, 1); cgbn_div(bn_env, re3, re2, n); cgbn_mul(bn_env, re4, re3, mu); cgbn_rem(bn_env, result2, re4, n); // message_device_data[idx].h = (float_type) cgbn_get_ui32(bn_env, result) / 1e6; cgbn_store(bn_env, &(gh_results_gpu[idx].h), result2); } //template<uint32_t BITS> void Paillier_GPU::decrypt(SyncArray<GHPair> &message){ // auto message_device_data = message.device_data(); auto message_host_data = message.host_data(); // cgbn_error_report_t *report; // CUDA_CHECK(cgbn_error_report_alloc(&report)); int n_instances = message.size(); cgbn_gh<BITS>* gh_enc_cpu = (cgbn_gh<BITS>*)malloc(sizeof(cgbn_gh<BITS>)*n_instances); for(int i = 0; i < n_instances; i++){ if(message_host_data[i].encrypted) { from_mpz(message_host_data[i].g_enc, gh_enc_cpu[i].g._limbs, BITS / 32); from_mpz(message_host_data[i].h_enc, gh_enc_cpu[i].h._limbs, BITS / 32); } } cgbn_gh<BITS>* gh_enc_gpu; CUDA_CHECK(hipMalloc((void **)&gh_enc_gpu, sizeof(cgbn_gh<BITS>) * n_instances)); CUDA_CHECK(hipMemcpy(gh_enc_gpu, gh_enc_cpu, sizeof(cgbn_gh<BITS>) * n_instances, hipMemcpyHostToDevice)); cgbn_gh<BITS>* gh_results_gpu; CUDA_CHECK(hipMalloc((void **)&gh_results_gpu, sizeof(cgbn_gh<BITS>) * n_instances)); // std::cout<<"n_instances:"<<n_instances<<std::endl; hipLaunchKernelGGL(( kernel_decrypt), dim3((n_instances+3)/4), dim3(128), 0, 0, gh_enc_gpu, lambda_gpu, mu_gpu, n_gpu, n_square_gpu, gh_results_gpu, n_instances); CUDA_CHECK(hipDeviceSynchronize()); cgbn_gh<BITS>* gh_results = (cgbn_gh<BITS>*)malloc(sizeof(cgbn_gh<BITS>)*n_instances); CUDA_CHECK(hipMemcpy(gh_results, gh_results_gpu, sizeof(cgbn_gh<BITS>)*n_instances, hipMemcpyDeviceToHost)); CUDA_CHECK(hipFree(gh_results_gpu)); // auto message_host_data = message.host_data(); mpz_t g_result, h_result; mpz_init(g_result); mpz_init(h_result); for(int i = 0; i < n_instances; i++){ if(message_host_data[i].encrypted) { long g_ul = 0, h_ul = 0; to_mpz(g_result, gh_results[i].g._limbs, BITS / 32); to_mpz(h_result, gh_results[i].h._limbs, BITS / 32); mpz_export(&g_ul, 0, -1, sizeof(g_ul), 0, 0, g_result); mpz_export(&h_ul, 0, -1, sizeof(h_ul), 0, 0, h_result); message_host_data[i].g = (float_type) g_ul / 1e6; message_host_data[i].h = (float_type) h_ul / 1e6; } } free(gh_results); mpz_clear(g_result); mpz_clear(h_result); } void Paillier_GPU::decrypt(GHPair &message){ // auto message_device_data = message.device_data(); auto message_host_data = message; // cgbn_error_report_t *report; // CUDA_CHECK(cgbn_error_report_alloc(&report)); int n_instances = 1; cgbn_gh<BITS>* gh_enc_cpu = (cgbn_gh<BITS>*)malloc(sizeof(cgbn_gh<BITS>)*n_instances); if(message.encrypted) { from_mpz(message.g_enc, gh_enc_cpu[0].g._limbs, BITS / 32); from_mpz(message.h_enc, gh_enc_cpu[0].h._limbs, BITS / 32); } cgbn_gh<BITS>* gh_enc_gpu; CUDA_CHECK(hipMalloc((void **)&gh_enc_gpu, sizeof(cgbn_gh<BITS>) * n_instances)); CUDA_CHECK(hipMemcpy(gh_enc_gpu, gh_enc_cpu, sizeof(cgbn_gh<BITS>) * n_instances, hipMemcpyHostToDevice)); cgbn_gh<BITS>* gh_results_gpu; CUDA_CHECK(hipMalloc((void **)&gh_results_gpu, sizeof(cgbn_gh<BITS>) * n_instances)); // std::cout<<"n_instances:"<<n_instances<<std::endl; hipLaunchKernelGGL(( kernel_decrypt), dim3((n_instances+3)/4), dim3(128), 0, 0, gh_enc_gpu, lambda_gpu, mu_gpu, n_gpu, n_square_gpu, gh_results_gpu, n_instances); CUDA_CHECK(hipDeviceSynchronize()); cgbn_gh<BITS>* gh_results = (cgbn_gh<BITS>*)malloc(sizeof(cgbn_gh<BITS>)*n_instances); CUDA_CHECK(hipMemcpy(gh_results, gh_results_gpu, sizeof(cgbn_gh<BITS>)*n_instances, hipMemcpyDeviceToHost)); CUDA_CHECK(hipFree(gh_results_gpu)); // auto message_host_data = message.host_data(); mpz_t g_result, h_result; mpz_init(g_result); mpz_init(h_result); if(message.encrypted) { long g_ul = 0, h_ul = 0; to_mpz(g_result, gh_results[0].g._limbs, BITS / 32); to_mpz(h_result, gh_results[0].h._limbs, BITS / 32); mpz_export(&g_ul, 0, -1, sizeof(g_ul), 0, 0, g_result); mpz_export(&h_ul, 0, -1, sizeof(h_ul), 0, 0, h_result); message.g = (float_type) g_ul / 1e6; message.h = (float_type) h_ul / 1e6; } free(gh_results); mpz_clear(g_result); mpz_clear(h_result); } //template<uint32_t BITS> //void Paillier_GPU::decrypt(GHPair &message){ //// auto message_device_data = message.device_data(); //// cgbn_error_report_t *report; //// CUDA_CHECK(cgbn_error_report_alloc(&report)); // // cgbn_gh<BITS>* gh_enc_cpu = (cgbn_gh<BITS>*)malloc(sizeof(cgbn_gh<BITS>)); // from_mpz(message.g_enc, gh_enc_cpu->g._limbs, BITS / 32); // from_mpz(message.h_enc, gh_enc_cpu->h._limbs, BITS / 32); // cgbn_gh<BITS>* gh_enc_gpu; // CUDA_CHECK(hipMalloc((void **)&gh_enc_gpu, sizeof(cgbn_gh<BITS>))); // CUDA_CHECK(hipMemcpy(gh_enc_gpu, gh_enc_cpu, sizeof(cgbn_gh<BITS>), hipMemcpyHostToDevice)); // // int n_instances = 1; // cgbn_gh<BITS>* gh_results_gpu; // CUDA_CHECK(hipMalloc((void **)&gh_results_gpu, sizeof(cgbn_gh<BITS>) * n_instances)); // // //// float_type* g_gpu, h_gpu; //// CUDA_CHECK(hipMalloc((void **)&g_gpu, sizeof(float_type))); //// CUDA_CHECK(hipMalloc((void **)&h_gpu, sizeof(float_type))); // // device_loop(1, [=] __device__(int idx){ //// context_t bn_context(cgbn_report_monitor, report, idx); // context_t bn_context(cgbn_report_monitor); // env_t bn_env(bn_context.env<env_t>()); // env_t::cgbn_t c, lambda, n, mu, n_square, re1, re2, re3, re4, result; // // cgbn_load(bn_env, c, &(gh_enc_gpu->g)); // cgbn_load(bn_env, lambda, this->lambda_gpu); // cgbn_load(bn_env, n, this->n_gpu); // cgbn_load(bn_env, n_square, this->n_square_gpu); // // cgbn_modular_power(bn_env, re1, c, lambda, n_square); // cgbn_sub_ui32(bn_env, re2, re1, 1); // cgbn_div(bn_env, re3, re2, n); // // cgbn_mul(bn_env, re4, re3, mu); // cgbn_rem(bn_env, result, re4, n); // //// *g_gpu = (float_type) cgbn_get_ui32(bn_env, result) / 1e6; // cgbn_store(bn_env, &(gh_results_gpu[idx].g), result); // // todo: check whether cpu mem data is syncrhonized or not // // cgbn_load(bn_env, c, &(gh_enc_gpu->h)); // cgbn_modular_power(bn_env, re1, c, lambda, n_square); // cgbn_sub_ui32(bn_env, re2, re1, 1); // cgbn_div(bn_env, re3, re2, n); // cgbn_mul(bn_env, re4, re3, mu); // cgbn_rem(bn_env, result, re4, n); //// *h_gpu = (float_type) cgbn_get_ui32(bn_env, result) / 1e6; // cgbn_store(bn_env, &(gh_results_gpu[idx].h), result); // }); // // cgbn_gh<BITS>* gh_results = (cgbn_gh<BITS>*)malloc(sizeof(cgbn_gh<BITS>)*n_instances); // CUDA_CHECK(hipMemcpy(gh_results, gh_results_gpu, sizeof(cgbn_gh<BITS>)*n_instances, hipMemcpyDeviceToHost)); // CUDA_CHECK(hipFree(gh_results_gpu)); // // mpz_t g_result, h_result; // mpz_init(g_result); // mpz_init(h_result); // unsigned long g_ul, h_ul; // // to_mpz(g_result, gh_results->g._limbs, BITS/32); // to_mpz(h_result, gh_results->h._limbs, BITS/32); // mpz_export(&g_ul, 0, -1, sizeof(g_ul), 0, 0, g_result); // mpz_export(&h_ul, 0, -1, sizeof(h_ul), 0, 0, h_result); // message.g = (float_type) g_ul/1e6; // message.h = (float_type) h_ul/1e6; // // free(gh_results); // mpz_clear(g_result); // mpz_clear(h_result); //// CUDA_CHECK(hipMemcpy(message.g, g_gpu, sizeof(float_type), hipMemcpyDeviceToHost)); //// CUDA_CHECK(hipMemcpy(message.h, h_gpu, sizeof(float_type), hipMemcpyDeviceToHost)); //}
5d54c7dc0d5f3ce998c7afc0be1d7d35cfea55e1.cu
#include "FedTree/Encryption/paillier_gpu.h" #include "FedTree/util/device_lambda.cuh" #include "gmp.h" //#include "perf_tests/gpu_support.h" void to_mpz(mpz_t r, uint32_t *x, uint32_t count) { mpz_import(r, count, -1, sizeof(uint32_t), 0, 0, x); } void from_mpz(mpz_t s, uint32_t *x, uint32_t count) { size_t words; if(mpz_sizeinbase(s, 2)>count*32) { fprintf(stderr, "from_mpz failed -- result does not fit\n"); exit(1); } mpz_export(x, &words, -1, sizeof(uint32_t), 0, 0, s); while(words<count) x[words++]=0; } void cgbn_check(cgbn_error_report_t *report, const char *file=NULL, int32_t line=0) { // check for cgbn errors if(cgbn_error_report_check(report)) { printf("\n"); printf("CGBN error occurred: %s\n", cgbn_error_string(report)); if(report->_instance!=0xFFFFFFFF) { printf("Error reported by instance %d", report->_instance); if(report->_blockIdx.x!=0xFFFFFFFF || report->_threadIdx.x!=0xFFFFFFFF) printf(", "); if(report->_blockIdx.x!=0xFFFFFFFF) printf("blockIdx=(%d, %d, %d) ", report->_blockIdx.x, report->_blockIdx.y, report->_blockIdx.z); if(report->_threadIdx.x!=0xFFFFFFFF) printf("threadIdx=(%d, %d, %d)", report->_threadIdx.x, report->_threadIdx.y, report->_threadIdx.z); printf("\n"); } else { printf("Error reported by blockIdx=(%d %d %d)", report->_blockIdx.x, report->_blockIdx.y, report->_blockIdx.z); printf("threadIdx=(%d %d %d)\n", report->_threadIdx.x, report->_threadIdx.y, report->_threadIdx.z); } if(file!=NULL) printf("file %s, line %d\n", file, line); exit(1); } } #define CGBN_CHECK(report) cgbn_check(report, __FILE__, __LINE__) #define TPI 32 #define ENV_BITS 1024 typedef cgbn_context_t<TPI> context_t; typedef cgbn_env_t<context_t, ENV_BITS> env_t; //template<uint32_t BITS> void Paillier_GPU::add(mpz_t &result, mpz_t &x, mpz_t &y){ mpz_mul(result, x, y); mpz_mod(result, result, paillier_cpu.n_square); return; } //template<uint32_t BITS> void Paillier_GPU::mul(mpz_t result, mpz_t &x, mpz_t &y){ mpz_powm(result, x, y, paillier_cpu.n_square); return ; } //template<uint32_t BITS> void Paillier_GPU::L_function(mpz_t result, mpz_t input, mpz_t N){ mpz_sub_ui(result, input, 1); mpz_tdiv_q(result, result, N); } void Paillier_GPU::parameters_cpu_to_gpu(){ cgbn_mem_t<BITS> *n_cpu = (cgbn_mem_t<BITS> *)malloc(sizeof(cgbn_mem_t<BITS>)); cgbn_mem_t<BITS> *n_square_cpu = (cgbn_mem_t<BITS> *)malloc(sizeof(cgbn_mem_t<BITS>)); cgbn_mem_t<BITS> *generator_cpu = (cgbn_mem_t<BITS> *)malloc(sizeof(cgbn_mem_t<BITS>)); cgbn_mem_t<BITS> *lambda_cpu = (cgbn_mem_t<BITS> *)malloc(sizeof(cgbn_mem_t<BITS>)); cgbn_mem_t<BITS> *mu_cpu = (cgbn_mem_t<BITS> *)malloc(sizeof(cgbn_mem_t<BITS>)); from_mpz(paillier_cpu.n, n_cpu->_limbs, BITS/32); from_mpz(paillier_cpu.n_square, n_square_cpu->_limbs, BITS/32); from_mpz(paillier_cpu.generator, generator_cpu->_limbs, BITS/32); from_mpz(paillier_cpu.lambda, lambda_cpu->_limbs, BITS/32); from_mpz(paillier_cpu.mu, mu_cpu->_limbs, BITS/32); // cgbn_mem_t<BITS> *n_gpu; CUDA_CHECK(cudaMalloc((void**)&n_gpu, sizeof(cgbn_mem_t<BITS>))); CUDA_CHECK(cudaMemcpy(n_gpu, n_cpu, sizeof(cgbn_mem_t<BITS>), cudaMemcpyHostToDevice)); // cgbn_mem_t<BITS> *n_square_gpu; CUDA_CHECK(cudaMalloc((void**)&n_square_gpu, sizeof(cgbn_mem_t<BITS>))); CUDA_CHECK(cudaMemcpy(n_square_gpu, n_square_cpu, sizeof(cgbn_mem_t<BITS>), cudaMemcpyHostToDevice)); // cgbn_mem_t<BITS> *generator_gpu; CUDA_CHECK(cudaMalloc((void**)&generator_gpu, sizeof(cgbn_mem_t<BITS>))); CUDA_CHECK(cudaMemcpy(generator_gpu, generator_cpu, sizeof(cgbn_mem_t<BITS>), cudaMemcpyHostToDevice)); // cgbn_mem_t<BITS> *lambda_gpu; CUDA_CHECK(cudaMalloc((void**)&lambda_gpu, sizeof(cgbn_mem_t<BITS>))); CUDA_CHECK(cudaMemcpy(lambda_gpu, lambda_cpu, sizeof(cgbn_mem_t<BITS>), cudaMemcpyHostToDevice)); // cgbn_mem_t<BITS> *mu_gpu; CUDA_CHECK(cudaMalloc((void**)&mu_gpu, sizeof(cgbn_mem_t<BITS>))); CUDA_CHECK(cudaMemcpy(mu_gpu, mu_cpu, sizeof(cgbn_mem_t<BITS>), cudaMemcpyHostToDevice)); CUDA_CHECK(cudaDeviceSynchronize()); free(n_cpu); free(n_square_cpu); free(generator_cpu); free(lambda_cpu); free(mu_cpu); // cgbn_mem_t<BITS> *r_cpu = (cgbn_mem_t<BITS> *)malloc(sizeof(cgbn_mem_t<BITS>)); // from_mpz(paillier_cpu.r, r_cpu->_limbs, BITS/32); // CUDA_CHECK(cudaMalloc((void**)&random_gpu, sizeof(cgbn_mem_t<BITS>))); // CUDA_CHECK(cudaMemcpy(random_gpu, r_cpu, sizeof(cgbn_mem_t<BITS>), cudaMemcpyHostToDevice)); // free(r_cpu); } //template<uint32_t BITS> void Paillier_GPU::keygen(){ paillier_cpu.keyGen(BITS); parameters_cpu_to_gpu(); // gmp_randstate_t state = new gmp_randstate_t(); // gmp_randinit_mt(state); //// gmp_randseed_ui(state, 1000U); // mpz tmp1, tmp2, tmp3, tmp4; // mpz_init(tmp1); // mpz_init(tmp2); // mpz_init(tmp3); // mpz_init(tmp4); // while (true){ // mpz_urandomb(p, gpc_randstate, BITS/4); // mpz_urandomb(q, gpc_randstate, BITS/4); // mpz_nextprime(p, p); // mpz_nextprime(q, q); // mpz_sub_ui(tmp1, p, 1); // mpz_sub_ui(tmp2, q, 1); // mpz_mul(tmp3, tmp1, tmp2); // tmp3 = (p-1)(q-1) // mpz_mul(tmp4, p, q); // tmp4 = p*q // mpz_gcd(tmp3, tmp3, tmp4); // tmp = gcd(pq, (p-1)(q-1)) // if(mpz_cmp_ui(tmp3, 1) == 0) // gcd(pq, (p-1)(q-1)) == 1 // break; // } // // n = tmp4; // n = p * q // mpz_add_ui(generator, n, 1); // generator = modulus + 1 // mpz_lcm(lambda, tmp1, tmp2); // lamda = lcm(p-1, q-1) // mpz_mul(n_square, n, n); // mpz_t lambda_power; // mpz_init(lambda_power); // mpz_powm(lambda_power, generator, lambda, n_square); // L_function(mu, lambda_power, n); // mpz_invert(mu, mu, N); // u = L((generator^lambda) mod n ^ 2) ) ^ -1 mod modulus // mpz_clear(tmp1); // mpz_clear(tmp2); // mpz_clear(tmp3); // mpz_clear(tmp4); // mpz_clear(lambda_power); } __global__ void kernel_encrypt(cgbn_gh<BITS> *gh_gpu, cgbn_gh<BITS> *gh_results_gpu, cgbn_mem_t<BITS> *generator_gpu, cgbn_mem_t<BITS> *random_gpu, cgbn_mem_t<BITS> *n_gpu, cgbn_mem_t<BITS> *n_square_gpu, int n_instances){ // cgbm_mem_t<BITS> *g_gpu_test = new cgbm_mem_t<BITS>(); int32_t idx; idx = (blockIdx.x*blockDim.x + threadIdx.x)/TPI; if(idx >= n_instances) return; context_t bn_context(cgbn_report_monitor); // context_t bn_context(cgbn_report_monitor); env_t bn_env(bn_context.env<env_t>()); env_t::cgbn_t g, m, r, n, n_square, re1, re2, result, result2; env_t::cgbn_wide_t w; //todo: check whether g is in gpu or not. compare with another way: convert g to cgbn_mem_t before kernel // cgbn_set_ui32(bn_env, m, (uint32_t) (message_device_data[idx].g * 1e6)); cgbn_load(bn_env, m, &(gh_gpu[idx].g)); // cgbn_rem(bn_env, re1, m, m); cgbn_load(bn_env, g, generator_gpu); // cgbn_rem(bn_env, re1, g, g); cgbn_load(bn_env, r, random_gpu); cgbn_load(bn_env, n, n_gpu); cgbn_load(bn_env, n_square, n_square_gpu); // cgbn_rem(bn_env, g_mod, r, m); // cgbn_rem(bn_env, g_mod, g, g); // compute g_enc cgbn_modular_power(bn_env, re1, g, m, n_square); cgbn_modular_power(bn_env, re2, r, n, n_square); cgbn_mul_wide(bn_env, w, re1, re2); cgbn_rem_wide(bn_env, result, w, n_square); cgbn_store(bn_env, &(gh_results_gpu[idx].g), result); // // compute h_enc //// cgbn_set_ui32(bn_env, m, (uint32_t) (message_device_data[idx].h * 1e6)); cgbn_load(bn_env, m, &(gh_gpu[idx].h)); cgbn_modular_power(bn_env, re1, g, m, n_square); cgbn_mul_wide(bn_env, w, re1, re2); cgbn_rem_wide(bn_env, result2, w, n_square); cgbn_store(bn_env, &(gh_results_gpu[idx].h), result2); } //template<uint32_t BITS> void Paillier_GPU::encrypt(SyncArray<GHPair> &message){ // auto message_device_data = message.device_data(); // cgbn_error_report_t *report; // CUDA_CHECK(cgbn_error_report_alloc(&report)); int n_instances = message.size(); auto message_host_data = message.host_data(); cgbn_gh<BITS> *gh_cpu = (cgbn_gh<BITS> *)malloc(sizeof(cgbn_gh<BITS>) * n_instances); mpz_t g_mpz, h_mpz; mpz_init(g_mpz); mpz_init(h_mpz); // std::cout<<"test import and export"<<std::endl; // float a = -0.2; // long a_ul = (long)(a * 1e6); // std::cout<<"import a_ul:"<<a_ul<<std::endl; // mpz_t tmp; // mpz_init(tmp); // mpz_import(tmp, 1, -1, sizeof(a_ul), 0, 0, &a_ul); // std::cout<<"import mpz:"<<tmp<<std::endl; // long a_l; // mpz_export(&a_l, (size_t*)0, -1, sizeof(a_l), 0, 0, tmp); // std::cout<<"export a_l:"<<a_l<<std::endl; // float a_res = (float) a_l/1e6; // std::cout<<"a_res:"<<a_res<<std::endl; for(int i = 0; i < n_instances; i++){ long g_ul = (long) (message_host_data[i].g * 1e6); // if(i == 0) // std::cout<<"g_ul:"<<g_ul<<std::endl; long h_ul = (long) (message_host_data[i].h * 1e6); mpz_import(g_mpz, 1, -1, sizeof(g_ul), 0, 0, &g_ul); // if(i == 0) // std::cout<<"g_mpz:"<<g_mpz<<std::endl; mpz_import(h_mpz, 1, -1, sizeof(h_ul), 0, 0, &h_ul); from_mpz(g_mpz, gh_cpu[i].g._limbs, BITS / 32); from_mpz(h_mpz, gh_cpu[i].h._limbs, BITS / 32); } mpz_clear(g_mpz); mpz_clear(h_mpz); cgbn_gh<BITS> *gh_gpu; CUDA_CHECK(cudaMalloc((void**)&gh_gpu, sizeof(cgbn_gh<BITS>) * n_instances)); CUDA_CHECK(cudaMemcpy(gh_gpu, gh_cpu, sizeof(cgbn_gh<BITS>) * n_instances, cudaMemcpyHostToDevice)); free(gh_cpu); gmp_randstate_t state; gmp_randinit_mt(state); // gmp_randseed_ui(state, 1000U); mpz_t r; mpz_init(r); while(true) { mpz_urandomm(r, state, paillier_cpu.n); if(mpz_cmp_ui(r, 0)) break; } // mpz_urandomb(r, state, BITS); // mpz_add_ui(r, r, 1); //ensure r > 0 // mpz_mod(r, r, paillier_cpu.n); cgbn_mem_t<BITS> *random_cpu = (cgbn_mem_t<BITS> *)malloc(sizeof(cgbn_mem_t<BITS>)); from_mpz(r, random_cpu->_limbs, BITS/32); cgbn_mem_t<BITS> *random_gpu; CUDA_CHECK(cudaMalloc((void**)&random_gpu, sizeof(cgbn_mem_t<BITS>))); CUDA_CHECK(cudaMemcpy(random_gpu, random_cpu, sizeof(cgbn_mem_t<BITS>), cudaMemcpyHostToDevice)); free(random_cpu); mpz_clear(r); cgbn_gh<BITS>* gh_results_gpu; CUDA_CHECK(cudaMalloc((void **)&gh_results_gpu, sizeof(cgbn_gh<BITS>) * n_instances)); // cudaMemcpy(&(gpuInstances->n), &n,, sizeof(n), cudaMemcpyHostToDevice); kernel_encrypt<<<(n_instances+3)/4, 128>>>(gh_gpu, gh_results_gpu, generator_gpu, random_gpu, n_gpu, n_square_gpu, n_instances); CUDA_CHECK(cudaDeviceSynchronize()); // CGBN_CHECK(report); // CUDA_CHECK(cudaFree(gpu_parameters)); cgbn_gh<BITS>* gh_results = (cgbn_gh<BITS>*)malloc(sizeof(cgbn_gh<BITS>)*n_instances); CUDA_CHECK(cudaMemcpy(gh_results, gh_results_gpu, sizeof(cgbn_gh<BITS>)*n_instances, cudaMemcpyDeviceToHost)); CUDA_CHECK(cudaFree(gh_results_gpu)); // auto message_host_data = message.host_data(); for(int i = 0; i < n_instances; i++){ mpz_init(message_host_data[i].g_enc); mpz_init(message_host_data[i].h_enc); // todo: another way: directly copy in GPU. to_mpz(message_host_data[i].g_enc, gh_results[i].g._limbs, BITS / 32); to_mpz(message_host_data[i].h_enc, gh_results[i].h._limbs, BITS / 32); // message_host_data[i].encrypted=true; } free(gh_results); // CUDA_CHECK(cgbn_error_report_free(report)); } //template<uint32_t BITS> //void Paillier_GPU::encrypt(GHPair &message){ //// cgbn_error_report_t *report; //// CUDA_CHECK(cgbn_error_report_alloc(&report)); // // // cgbn_gh<BITS> gh_cpu; // mpz_t g_mpz, h_mpz; // mpz_init(g_mpz); // mpz_init(h_mpz); // unsigned long g_ul = (unsigned long) (message.g * 1e6); // unsigned long h_ul = (unsigned long) (message.h * 1e6); // mpz_import(g_mpz, 1, -1, sizeof(g_ul), 0, 0, &g_ul); // mpz_import(h_mpz, 1, -1, sizeof(h_ul), 0, 0, &h_ul); // from_mpz(g_mpz, gh_cpu.g._limbs, BITS/32); // from_mpz(h_mpz, gh_cpu.h._limbs, BITS/32); // mpz_clear(g_mpz); // mpz_clear(h_mpz); // // cgbn_gh<BITS> *gh_gpu; // CUDA_CHECK(cudaMalloc((void**)&gh_gpu, sizeof(cgbn_gh<BITS>))); // CUDA_CHECK(cudaMemcpy(gh_gpu, &gh_cpu, sizeof(cgbn_gh<BITS>), cudaMemcpyHostToDevice)); // // gmp_randstate_t state; // gmp_randinit_mt(state); // gmp_randseed_ui(state, 1000U); // mpz_t r; // mpz_init(r); // mpz_urandomb(r, state, BITS); // mpz_mod(r, r, paillier_cpu.n); // // cgbn_mem_t<BITS> *random_cpu = (cgbn_mem_t<BITS> *)malloc(sizeof(cgbn_mem_t<BITS>)); // // from_mpz(r, random_cpu->_limbs, BITS/32); // cgbn_mem_t<BITS> *random_gpu; // CUDA_CHECK(cudaMalloc((void**)&random_gpu, sizeof(cgbn_mem_t<BITS>))); // CUDA_CHECK(cudaMemcpy(random_gpu, random_cpu, sizeof(cgbn_mem_t<BITS>), cudaMemcpyHostToDevice)); // // int n_instances = 1; // cgbn_gh<BITS>* gh_results_gpu; // CUDA_CHECK(cudaMalloc((void **)&gh_results_gpu, sizeof(cgbn_gh<BITS>) * n_instances)); // //// cudaMemcpy(&(gpuInstances->n), &n,, sizeof(n), cudaMemcpyHostToDevice); // // // todo: move values to gpu first // device_loop(1, [=] __device__(int idx){ //// context_t bn_context(cgbn_report_monitor, report, idx); // context_t bn_context(cgbn_report_monitor); // env_t bn_env(bn_context.env<env_t>()); // env_t::cgbn_t g, m, r, n, n_square, re1, re2, result; // env_t::cgbn_wide_t w; // // // todo: wrong! gh_cpu->g is cgbn_mem_t, in cpu, and cannot directly be multiplied with 1e6, check whether the computation is allowed //// cgbn_set_ui32(bn_env, m, (uint32_t) (gh_cpu->g * 1e6)); // cgbn_load(bn_env, m, &(gh_gpu->g)); // // cgbn_load(bn_env, g, generator_gpu); // cgbn_load(bn_env, r, random_gpu); // cgbn_load(bn_env, n, n_gpu); // cgbn_load(bn_env, n_square, n_square_gpu); // // // compute g_enc // cgbn_modular_power(bn_env, re1, g, m, n_square); // cgbn_modular_power(bn_env, re2, r, n, n_square); // cgbn_mul_wide(bn_env, w, re1, re2); // cgbn_rem_wide(bn_env, result, w, n_square); // cgbn_store(bn_env, &(gh_results_gpu[idx].g), result); // // // compute h_enc //// cgbn_set_ui32(bn_env, m, (uint32_t) (gh_cpu.h * 1e6)); // cgbn_load(bn_env, m, &(gh_gpu->h)); // cgbn_modular_power(bn_env, re1, g, m, n_square); // cgbn_mul_wide(bn_env, w, re1, re2); // cgbn_rem_wide(bn_env, result, w, n_square); // cgbn_store(bn_env, &(gh_results_gpu[idx].h), result); // }); // //// CGBN_CHECK(report); // //// CUDA_CHECK(cudaFree(gpu_parameters)); // cgbn_gh<BITS>* gh_results = (cgbn_gh<BITS>*)malloc(sizeof(cgbn_gh<BITS>)*n_instances); // CUDA_CHECK(cudaMemcpy(gh_results, gh_results_gpu, sizeof(cgbn_gh<BITS>)*n_instances, cudaMemcpyDeviceToHost)); // CUDA_CHECK(cudaFree(gh_results_gpu)); // // for(int i = 0; i < n_instances; i++){ // mpz_init(message.g_enc); // mpz_init(message.h_enc); // // todo: another way: directly copy in GPU. // to_mpz(message.g_enc, gh_results->g._limbs, BITS / 32); // to_mpz(message.h_enc, gh_results->h._limbs, BITS / 32); // } // free(gh_results); //} __global__ void kernel_decrypt(cgbn_gh<BITS>* gh_enc_gpu, cgbn_mem_t<BITS> *lambda_gpu, cgbn_mem_t<BITS> *mu_gpu, cgbn_mem_t<BITS> *n_gpu, cgbn_mem_t<BITS> *n_square_gpu, cgbn_gh<BITS> *gh_results_gpu, int n_instances){ int idx; idx = (blockIdx.x*blockDim.x + threadIdx.x)/TPI; if(idx >= n_instances) return; context_t bn_context(cgbn_report_monitor); env_t bn_env(bn_context.env<env_t>()); env_t::cgbn_t c, lambda, n, mu, n_square, re1, re2, re3, re4, result, result2; cgbn_load(bn_env, c, &(gh_enc_gpu[idx].g)); cgbn_load(bn_env, lambda, lambda_gpu); cgbn_load(bn_env, n, n_gpu); cgbn_load(bn_env, n_square, n_square_gpu); cgbn_load(bn_env, mu, mu_gpu); cgbn_modular_power(bn_env, re1, c, lambda, n_square); cgbn_sub_ui32(bn_env, re2, re1, 1); cgbn_div(bn_env, re3, re2, n); cgbn_mul(bn_env, re4, re3, mu); cgbn_rem(bn_env, result, re4, n); // todo: uint32 may not be enough to store g and h // message_device_data[idx].g = (float_type) cgbn_get_ui32(bn_env, result) / 1e6; cgbn_store(bn_env, &(gh_results_gpu[idx].g), result); // todo: check whether cpu mem data is syncrhonized or not cgbn_load(bn_env, c, &(gh_enc_gpu[idx].h)); cgbn_modular_power(bn_env, re1, c, lambda, n_square); cgbn_sub_ui32(bn_env, re2, re1, 1); cgbn_div(bn_env, re3, re2, n); cgbn_mul(bn_env, re4, re3, mu); cgbn_rem(bn_env, result2, re4, n); // message_device_data[idx].h = (float_type) cgbn_get_ui32(bn_env, result) / 1e6; cgbn_store(bn_env, &(gh_results_gpu[idx].h), result2); } //template<uint32_t BITS> void Paillier_GPU::decrypt(SyncArray<GHPair> &message){ // auto message_device_data = message.device_data(); auto message_host_data = message.host_data(); // cgbn_error_report_t *report; // CUDA_CHECK(cgbn_error_report_alloc(&report)); int n_instances = message.size(); cgbn_gh<BITS>* gh_enc_cpu = (cgbn_gh<BITS>*)malloc(sizeof(cgbn_gh<BITS>)*n_instances); for(int i = 0; i < n_instances; i++){ if(message_host_data[i].encrypted) { from_mpz(message_host_data[i].g_enc, gh_enc_cpu[i].g._limbs, BITS / 32); from_mpz(message_host_data[i].h_enc, gh_enc_cpu[i].h._limbs, BITS / 32); } } cgbn_gh<BITS>* gh_enc_gpu; CUDA_CHECK(cudaMalloc((void **)&gh_enc_gpu, sizeof(cgbn_gh<BITS>) * n_instances)); CUDA_CHECK(cudaMemcpy(gh_enc_gpu, gh_enc_cpu, sizeof(cgbn_gh<BITS>) * n_instances, cudaMemcpyHostToDevice)); cgbn_gh<BITS>* gh_results_gpu; CUDA_CHECK(cudaMalloc((void **)&gh_results_gpu, sizeof(cgbn_gh<BITS>) * n_instances)); // std::cout<<"n_instances:"<<n_instances<<std::endl; kernel_decrypt<<<(n_instances+3)/4, 128>>>(gh_enc_gpu, lambda_gpu, mu_gpu, n_gpu, n_square_gpu, gh_results_gpu, n_instances); CUDA_CHECK(cudaDeviceSynchronize()); cgbn_gh<BITS>* gh_results = (cgbn_gh<BITS>*)malloc(sizeof(cgbn_gh<BITS>)*n_instances); CUDA_CHECK(cudaMemcpy(gh_results, gh_results_gpu, sizeof(cgbn_gh<BITS>)*n_instances, cudaMemcpyDeviceToHost)); CUDA_CHECK(cudaFree(gh_results_gpu)); // auto message_host_data = message.host_data(); mpz_t g_result, h_result; mpz_init(g_result); mpz_init(h_result); for(int i = 0; i < n_instances; i++){ if(message_host_data[i].encrypted) { long g_ul = 0, h_ul = 0; to_mpz(g_result, gh_results[i].g._limbs, BITS / 32); to_mpz(h_result, gh_results[i].h._limbs, BITS / 32); mpz_export(&g_ul, 0, -1, sizeof(g_ul), 0, 0, g_result); mpz_export(&h_ul, 0, -1, sizeof(h_ul), 0, 0, h_result); message_host_data[i].g = (float_type) g_ul / 1e6; message_host_data[i].h = (float_type) h_ul / 1e6; } } free(gh_results); mpz_clear(g_result); mpz_clear(h_result); } void Paillier_GPU::decrypt(GHPair &message){ // auto message_device_data = message.device_data(); auto message_host_data = message; // cgbn_error_report_t *report; // CUDA_CHECK(cgbn_error_report_alloc(&report)); int n_instances = 1; cgbn_gh<BITS>* gh_enc_cpu = (cgbn_gh<BITS>*)malloc(sizeof(cgbn_gh<BITS>)*n_instances); if(message.encrypted) { from_mpz(message.g_enc, gh_enc_cpu[0].g._limbs, BITS / 32); from_mpz(message.h_enc, gh_enc_cpu[0].h._limbs, BITS / 32); } cgbn_gh<BITS>* gh_enc_gpu; CUDA_CHECK(cudaMalloc((void **)&gh_enc_gpu, sizeof(cgbn_gh<BITS>) * n_instances)); CUDA_CHECK(cudaMemcpy(gh_enc_gpu, gh_enc_cpu, sizeof(cgbn_gh<BITS>) * n_instances, cudaMemcpyHostToDevice)); cgbn_gh<BITS>* gh_results_gpu; CUDA_CHECK(cudaMalloc((void **)&gh_results_gpu, sizeof(cgbn_gh<BITS>) * n_instances)); // std::cout<<"n_instances:"<<n_instances<<std::endl; kernel_decrypt<<<(n_instances+3)/4, 128>>>(gh_enc_gpu, lambda_gpu, mu_gpu, n_gpu, n_square_gpu, gh_results_gpu, n_instances); CUDA_CHECK(cudaDeviceSynchronize()); cgbn_gh<BITS>* gh_results = (cgbn_gh<BITS>*)malloc(sizeof(cgbn_gh<BITS>)*n_instances); CUDA_CHECK(cudaMemcpy(gh_results, gh_results_gpu, sizeof(cgbn_gh<BITS>)*n_instances, cudaMemcpyDeviceToHost)); CUDA_CHECK(cudaFree(gh_results_gpu)); // auto message_host_data = message.host_data(); mpz_t g_result, h_result; mpz_init(g_result); mpz_init(h_result); if(message.encrypted) { long g_ul = 0, h_ul = 0; to_mpz(g_result, gh_results[0].g._limbs, BITS / 32); to_mpz(h_result, gh_results[0].h._limbs, BITS / 32); mpz_export(&g_ul, 0, -1, sizeof(g_ul), 0, 0, g_result); mpz_export(&h_ul, 0, -1, sizeof(h_ul), 0, 0, h_result); message.g = (float_type) g_ul / 1e6; message.h = (float_type) h_ul / 1e6; } free(gh_results); mpz_clear(g_result); mpz_clear(h_result); } //template<uint32_t BITS> //void Paillier_GPU::decrypt(GHPair &message){ //// auto message_device_data = message.device_data(); //// cgbn_error_report_t *report; //// CUDA_CHECK(cgbn_error_report_alloc(&report)); // // cgbn_gh<BITS>* gh_enc_cpu = (cgbn_gh<BITS>*)malloc(sizeof(cgbn_gh<BITS>)); // from_mpz(message.g_enc, gh_enc_cpu->g._limbs, BITS / 32); // from_mpz(message.h_enc, gh_enc_cpu->h._limbs, BITS / 32); // cgbn_gh<BITS>* gh_enc_gpu; // CUDA_CHECK(cudaMalloc((void **)&gh_enc_gpu, sizeof(cgbn_gh<BITS>))); // CUDA_CHECK(cudaMemcpy(gh_enc_gpu, gh_enc_cpu, sizeof(cgbn_gh<BITS>), cudaMemcpyHostToDevice)); // // int n_instances = 1; // cgbn_gh<BITS>* gh_results_gpu; // CUDA_CHECK(cudaMalloc((void **)&gh_results_gpu, sizeof(cgbn_gh<BITS>) * n_instances)); // // //// float_type* g_gpu, h_gpu; //// CUDA_CHECK(cudaMalloc((void **)&g_gpu, sizeof(float_type))); //// CUDA_CHECK(cudaMalloc((void **)&h_gpu, sizeof(float_type))); // // device_loop(1, [=] __device__(int idx){ //// context_t bn_context(cgbn_report_monitor, report, idx); // context_t bn_context(cgbn_report_monitor); // env_t bn_env(bn_context.env<env_t>()); // env_t::cgbn_t c, lambda, n, mu, n_square, re1, re2, re3, re4, result; // // cgbn_load(bn_env, c, &(gh_enc_gpu->g)); // cgbn_load(bn_env, lambda, this->lambda_gpu); // cgbn_load(bn_env, n, this->n_gpu); // cgbn_load(bn_env, n_square, this->n_square_gpu); // // cgbn_modular_power(bn_env, re1, c, lambda, n_square); // cgbn_sub_ui32(bn_env, re2, re1, 1); // cgbn_div(bn_env, re3, re2, n); // // cgbn_mul(bn_env, re4, re3, mu); // cgbn_rem(bn_env, result, re4, n); // //// *g_gpu = (float_type) cgbn_get_ui32(bn_env, result) / 1e6; // cgbn_store(bn_env, &(gh_results_gpu[idx].g), result); // // todo: check whether cpu mem data is syncrhonized or not // // cgbn_load(bn_env, c, &(gh_enc_gpu->h)); // cgbn_modular_power(bn_env, re1, c, lambda, n_square); // cgbn_sub_ui32(bn_env, re2, re1, 1); // cgbn_div(bn_env, re3, re2, n); // cgbn_mul(bn_env, re4, re3, mu); // cgbn_rem(bn_env, result, re4, n); //// *h_gpu = (float_type) cgbn_get_ui32(bn_env, result) / 1e6; // cgbn_store(bn_env, &(gh_results_gpu[idx].h), result); // }); // // cgbn_gh<BITS>* gh_results = (cgbn_gh<BITS>*)malloc(sizeof(cgbn_gh<BITS>)*n_instances); // CUDA_CHECK(cudaMemcpy(gh_results, gh_results_gpu, sizeof(cgbn_gh<BITS>)*n_instances, cudaMemcpyDeviceToHost)); // CUDA_CHECK(cudaFree(gh_results_gpu)); // // mpz_t g_result, h_result; // mpz_init(g_result); // mpz_init(h_result); // unsigned long g_ul, h_ul; // // to_mpz(g_result, gh_results->g._limbs, BITS/32); // to_mpz(h_result, gh_results->h._limbs, BITS/32); // mpz_export(&g_ul, 0, -1, sizeof(g_ul), 0, 0, g_result); // mpz_export(&h_ul, 0, -1, sizeof(h_ul), 0, 0, h_result); // message.g = (float_type) g_ul/1e6; // message.h = (float_type) h_ul/1e6; // // free(gh_results); // mpz_clear(g_result); // mpz_clear(h_result); //// CUDA_CHECK(cudaMemcpy(message.g, g_gpu, sizeof(float_type), cudaMemcpyDeviceToHost)); //// CUDA_CHECK(cudaMemcpy(message.h, h_gpu, sizeof(float_type), cudaMemcpyDeviceToHost)); //}
09b1d04b348bf65afbbd4a972607f4c807380f79.hip
// !!! This is a file automatically generated by hipify!!! #include "Managed.hpp" namespace mm_ray { using namespace std; void* Managed::operator new(std::size_t count) throw(std::bad_alloc){ void* dev_ptr; gpuErrchk(hipMallocManaged((void**)&dev_ptr, count, hipMemAttachGlobal)); return dev_ptr; } void* Managed::operator new[](std::size_t count) throw(std::bad_alloc){ void* dev_ptr; gpuErrchk(hipMallocManaged((void**)&dev_ptr, count, hipMemAttachGlobal)); return dev_ptr; } void Managed::operator delete(void* ptr) throw() { gpuErrchk(hipFree(ptr)); } void Managed::operator delete[](void* ptr) throw() { gpuErrchk(hipFree(ptr)); } void* Cuda_Malloc(size_t d_size){ //A convenience function that void* dev_ptr; gpuErrchk(hipMallocManaged(&dev_ptr, d_size, hipMemAttachGlobal)); //Call a placement copy constructor return dev_ptr; } void Cuda_Free(void* mem){ gpuErrchk(hipFree(mem)); } }
09b1d04b348bf65afbbd4a972607f4c807380f79.cu
#include "Managed.hpp" namespace mm_ray { using namespace std; void* Managed::operator new(std::size_t count) throw(std::bad_alloc){ void* dev_ptr; gpuErrchk(cudaMallocManaged((void**)&dev_ptr, count, cudaMemAttachGlobal)); return dev_ptr; } void* Managed::operator new[](std::size_t count) throw(std::bad_alloc){ void* dev_ptr; gpuErrchk(cudaMallocManaged((void**)&dev_ptr, count, cudaMemAttachGlobal)); return dev_ptr; } void Managed::operator delete(void* ptr) throw() { gpuErrchk(cudaFree(ptr)); } void Managed::operator delete[](void* ptr) throw() { gpuErrchk(cudaFree(ptr)); } void* Cuda_Malloc(size_t d_size){ //A convenience function that void* dev_ptr; gpuErrchk(cudaMallocManaged(&dev_ptr, d_size, cudaMemAttachGlobal)); //Call a placement copy constructor return dev_ptr; } void Cuda_Free(void* mem){ gpuErrchk(cudaFree(mem)); } }
9b1522690a994c1b6ac7bc6c9811d253b42cc483.hip
// !!! This is a file automatically generated by hipify!!! /* * env_vars.cu * * Created on: 01/dic/2014 * Author: Edoardo Mondoni */ #include "samples.h" #include "err.h" #include <stdio.h> /*** CONSTANT MEMORY DECLARATIONS ***/ __device__ double d_gamma[N_SAMPLES + 1]; __device__ double d_vo[N_SAMPLES + 1]; __device__ double d_instants[N_SAMPLES]; /*** CONSTANT DEFINITIONS ***/ const double h_gamma[N_SAMPLES] = {150.9118531981026, 151.5285590578920, 151.8096606907299, 151.7562930050505, 151.4642346584513, 150.9581207918112, 150.2606280519593, 149.3934013670099, 148.3777904516479, 147.2353439294383, 145.9881194285274, 144.6528821601795, 143.2526577745959, 141.9058189578238, 140.5866109614942, 139.2692067856598, 137.9287115410390, 136.5593413120525, 135.0980616219118, 133.5028360448642, 131.7747384613797, 129.9150382483862, 127.9251395674573, 125.8065791239034, 123.5610407395057, 121.1903777933693, 118.6966304688798, 116.0820267158657, 113.3489628150171, 110.4999684313524, 107.5376682808701, 104.4647548825510, 101.2839831403584, 97.99818900750454, 94.61032457258823, 91.12349462991806, 87.54097827766026, 83.86622422913365, 80.10281856964643, 76.25443482782526, 72.32478400313934, 68.31758326248365, 64.23655557683860, 60.08546080250227, 55.86814608186654, 51.58859498474102, 47.25095414298436, 42.85952391867009, 38.41871322204285, 33.93297285627299, 29.40673107018239, 24.84435537694043, 20.25015551856541, 15.62842687465883, 10.98351750219837, 6.319891735692119, 1.642163242589056, -3.044918860776084, -7.736529482757397, -12.42785316559739, -17.11416506263525, -21.79087681825568, -26.45352927187255, -31.09773407279362, -35.71908662104847, -40.31308518180270, -44.87509024555831, -49.40034389840219, -53.88404614753406, -58.32146244725330, -62.70802308992435, -67.03937649509434, -71.31137493454654, -75.51999704360846, -79.66123675893489, -83.73100298241140, -87.72507194878320, -91.63911510080199, -95.46879611789336, -99.20990279184402, -102.8584639564917, -106.4108054928136, -109.8635219711709, -113.2133735776256, -116.4571486785438, -119.5915480219573, -122.6131399130156, -125.5184082431288, -128.3038771933860, -130.9662630471705, -133.5025892697328, -135.9102134091697, -138.1867503520988, -140.3299223779285, -142.3374022054308, -144.2067227560494, -145.9504404355676, -147.5469187332408, -148.9318335048445, -150.0820637453334, -150.9725285997426, -151.5772172830993, -151.8462223321152, -151.7807136525794, -151.4765159927838, -150.9582910072513, -150.2487406212101, -149.3695313730091, -148.3420277565196, -147.1877838555826, -145.9288530790558, -144.5819925950053, -143.1702082599511, -141.8118167003158, -140.4810708399161, -139.1521500814942, -137.8001644308767, -136.4193187050986, -134.9466322470249, -133.3401256065954, -131.6008954161345, -129.7302337289814, -127.7295594101187, -125.6004122532571, -123.3444698293929, -120.9635771720460, -118.4597729788949, -115.8352971263997, -113.0925721992880, -110.2341630646940, -107.2627285378647, -104.1809836101987, -100.9916874544385, -97.69766253318762, -94.30183746445621, -90.80729598260224, -87.21731073743987, -83.53534559595926, -79.76502197843072, -75.91005912638852, -71.97420937853271, -67.96121281375949, -63.87478923300315, -59.71867144059283, -55.49666739381114, -51.21272648995492, -46.87098220133188, -42.47575115196885, -38.03148497909444, -33.54268990413045, -29.01384257752727, -24.44933376257186, -19.85346202038811, -15.23048084265765, -10.58468162030908, -5.920479695973544, -1.242467837461407, 3.444587448506927, 8.135809965506729, 12.82631771973166, 17.51132934222521, 22.18623286753558, 26.84658960786979, 31.48807021863084, 36.10634675491476, 40.69698308800641, 45.25536872816330, 49.77672606420251, 54.25619339123547, 58.68895619857150, 63.07037876832002, 67.39608575307216, 71.66196088481411, 75.86406133062852, 79.99847963246236, 84.06120742240232, 88.04805678502025, 91.95467460834715, 95.77664968231936, 99.50967518736837, 103.1497053675946, 106.6930448384480, 110.1363334510258, 113.4764302697344, 116.7102413120443, 119.8345604648115, 122.8459903727553, 125.7409796465810, 128.5159654876024, 131.1675655075404, 133.6927384286541, 136.0888424081005, 138.3535608604175, 140.4847237771999, 142.4801028996183, 144.3372772439026, 146.0687849570104, 147.6529107598514, 149.0252538985235, 150.0094608580336}; // experimental samples of Gamma(t) const double h_vo[N_SAMPLES] = {0.6760973943249335, 0.5717467299267627, 0.4665442143697862, 0.3604905683435304, 0.2536292701964937, 0.1460213986096215, 0.03774337014700224, -0.07111352756116007, -0.1804428687231168, -0.2901222890285782, -0.4000123667547668, -0.5099552327627888, -0.6197547873667815, -0.7291687114502992, -0.8379668650874113, -0.9459389847234885, -1.052895220667863, -1.158666587964081, -1.263120922024231, -1.366156636774651, -1.467675352583971, -1.567580372447689, -1.665776581376069, -1.762170395246174, -1.856669808406348, -1.949184551830339, -2.039626332071301, -2.127909092216705, -2.213949230041704, -2.297665726820814, -2.378980174805175, -2.457816728532791, -2.534102030957383, -2.607765170840508, -2.678737711978820, -2.746953804658915, -2.812350357243830, -2.874867223243498, -2.934447354520114, -2.991036885277661, -3.044585138005190, -3.095044571039522, -3.142370707125508, -3.186522086339126, -3.227460274282618, -3.265149933034065, -3.299558937233582, -3.330658500419298, -3.358423273339567, -3.382831387095663, -3.403864434716708, -3.421507406911287, -3.435748612912533, -3.446579620186737, -3.453995236832424, -3.457993542098095, -3.458575950894605, -3.455747284858068, -3.449515820128567, -3.439893290924596, -3.426894844332465, -3.410538959014104, -3.390847352239840, -3.367844901652289, -3.341559600176355, -3.312022547982817, -3.279267970142625, -3.243333238342549, -3.204258873384214, -3.162088512366561, -3.116868837338004, -3.068649475700950, -3.017482891639262, -2.963424289191908, -2.906531541141616, -2.846865146442169, -2.784488207001688, -2.719466406765988, -2.651867974965727, -2.581763621176315, -2.509226439977398, -2.434331793459436, -2.357157186651176, -2.277782151733637, -2.196288151649543, -2.112758504672146, -2.027278322237886, -1.939934446349340, -1.850815372193087, -1.760011146254415, -1.667613238124885, -1.573714392259209, -1.478408471171280, -1.381790302308604, -1.283955537391881, -1.185000527419725, -1.085021785420186, -0.9841002767201421, -0.8822856275055089, -0.7796122029434081, -0.6760990286935233, -0.5717483884597057, -0.4665459018415614, -0.3604922831247461, -0.2536310074603434, -0.1460231535934189, -0.03774514011326513, 0.07111174322054969, 0.1804410699868377, 0.2901204773415658, 0.4000105464747890, 0.5099534110970247, 0.6197529732081124, 0.7291669115943678, 0.8379650812657480, 0.9459372132451951, 1.052893454820229, 1.158664822325769, 1.263119157012880, 1.366154879239600, 1.467673613441027, 1.567578662953196, 1.665774909481146, 1.762168763395699, 1.856668213524275, 1.949182987370973, 2.039624791249993, 2.127907571142381, 2.213947729581562, 2.297664252566135, 2.378978735252114, 2.457815332254131, 2.534100683878019, 2.607763874632337, 2.678736464118774, 2.746952600042936, 2.812349190672184, 2.874866091801151, 2.934446258964438, 2.991035829962230, 3.044584129440531, 3.095043615697531, 3.142369809347757, 3.186521247128072, 3.227459491385597, 3.265149202225357, 3.299558254255332, 3.330657862821280, 3.358422681532558, 3.382830844261123, 3.403863945661545, 3.421506976347718, 3.435748243835595, 3.446579312953126, 3.453994989252438, 3.457993350457681, 3.458575811479713, 3.455747195385631, 3.449515780551918, 3.439893303341161, 3.426894912071254, 3.410539085284444, 3.390847538869141, 3.367845148382046, 3.341559904753850, 3.312022906976164, 3.279268380139023, 3.243333697062127, 3.204259380296025, 3.162089068599831, 3.116869444956136, 3.068650136641577, 3.017483606734288, 2.963425057631516, 2.906532360561980, 2.846866013573900, 2.784489118608731, 2.719467360508919, 2.651868969869754, 2.581764657548397, 2.509227518831352, 2.434332915689696, 2.357158352278182, 2.277783359500837, 2.196289399105813, 2.112759788684429, 2.027279639720124, 1.939935794923556, 1.850816750538731, 1.760012554031996, 1.667614675525918, 1.573715859372168, 1.478409967412222, 1.381791826127216, 1.283957086348587, 1.185002098584672, 1.085023375977398, 0.9841018850593342, 0.8822872531983896, 0.7796138463523725}; // experimental samples of Vo(t) const double h_instants[N_SAMPLES] = {4.872634209637256e-12, 9.745268419274511e-12, 1.461790262891177e-11, 1.949053683854902e-11, 2.436317104818628e-11, 2.923580525782353e-11, 3.410843946746079e-11, 3.898107367709804e-11, 4.385370788673530e-11, 4.872634209637256e-11, 5.359897630600981e-11, 5.847161051564707e-11, 6.334424472528432e-11, 6.821687893492158e-11, 7.308951314455883e-11, 7.796214735419609e-11, 8.283478156383334e-11, 8.770741577347060e-11, 9.258004998310786e-11, 9.745268419274511e-11, 1.023253184023824e-10, 1.071979526120196e-10, 1.120705868216569e-10, 1.169432210312941e-10, 1.218158552409314e-10, 1.266884894505686e-10, 1.315611236602059e-10, 1.364337578698432e-10, 1.413063920794804e-10, 1.461790262891177e-10, 1.510516604987549e-10, 1.559242947083922e-10, 1.607969289180294e-10, 1.656695631276667e-10, 1.705421973373039e-10, 1.754148315469412e-10, 1.802874657565785e-10, 1.851600999662157e-10, 1.900327341758530e-10, 1.949053683854902e-10, 1.997780025951275e-10, 2.046506368047647e-10, 2.095232710144020e-10, 2.143959052240392e-10, 2.192685394336765e-10, 2.241411736433138e-10, 2.290138078529510e-10, 2.338864420625883e-10, 2.387590762722255e-10, 2.436317104818628e-10, 2.485043446915000e-10, 2.533769789011373e-10, 2.582496131107745e-10, 2.631222473204118e-10, 2.679948815300491e-10, 2.728675157396863e-10, 2.777401499493236e-10, 2.826127841589608e-10, 2.874854183685981e-10, 2.923580525782353e-10, 2.972306867878726e-10, 3.021033209975098e-10, 3.069759552071471e-10, 3.118485894167844e-10, 3.167212236264216e-10, 3.215938578360589e-10, 3.264664920456961e-10, 3.313391262553334e-10, 3.362117604649706e-10, 3.410843946746079e-10, 3.459570288842451e-10, 3.508296630938824e-10, 3.557022973035197e-10, 3.605749315131569e-10, 3.654475657227942e-10, 3.703201999324314e-10, 3.751928341420687e-10, 3.800654683517059e-10, 3.849381025613432e-10, 3.898107367709804e-10, 3.946833709806177e-10, 3.995560051902550e-10, 4.044286393998922e-10, 4.093012736095295e-10, 4.141739078191667e-10, 4.190465420288040e-10, 4.239191762384412e-10, 4.287918104480785e-10, 4.336644446577157e-10, 4.385370788673530e-10, 4.434097130769903e-10, 4.482823472866275e-10, 4.531549814962648e-10, 4.580276157059020e-10, 4.629002499155393e-10, 4.677728841251765e-10, 4.726455183348138e-10, 4.775181525444510e-10, 4.823907867540883e-10, 4.872634209637256e-10, 4.921360551733628e-10, 4.970086893830001e-10, 5.018813235926373e-10, 5.067539578022746e-10, 5.116265920119118e-10, 5.164992262215491e-10, 5.213718604311863e-10, 5.262444946408236e-10, 5.311171288504609e-10, 5.359897630600981e-10, 5.408623972697354e-10, 5.457350314793726e-10, 5.506076656890099e-10, 5.554802998986471e-10, 5.603529341082844e-10, 5.652255683179216e-10, 5.700982025275589e-10, 5.749708367371962e-10, 5.798434709468334e-10, 5.847161051564707e-10, 5.895887393661079e-10, 5.944613735757452e-10, 5.993340077853824e-10, 6.042066419950197e-10, 6.090792762046569e-10, 6.139519104142942e-10, 6.188245446239315e-10, 6.236971788335687e-10, 6.285698130432060e-10, 6.334424472528432e-10, 6.383150814624805e-10, 6.431877156721177e-10, 6.480603498817550e-10, 6.529329840913922e-10, 6.578056183010295e-10, 6.626782525106668e-10, 6.675508867203040e-10, 6.724235209299413e-10, 6.772961551395785e-10, 6.821687893492158e-10, 6.870414235588530e-10, 6.919140577684903e-10, 6.967866919781275e-10, 7.016593261877648e-10, 7.065319603974021e-10, 7.114045946070393e-10, 7.162772288166766e-10, 7.211498630263138e-10, 7.260224972359511e-10, 7.308951314455883e-10, 7.357677656552256e-10, 7.406403998648628e-10, 7.455130340745001e-10, 7.503856682841374e-10, 7.552583024937746e-10, 7.601309367034119e-10, 7.650035709130491e-10, 7.698762051226864e-10, 7.747488393323236e-10, 7.796214735419609e-10, 7.844941077515981e-10, 7.893667419612354e-10, 7.942393761708727e-10, 7.991120103805099e-10, 8.039846445901472e-10, 8.088572787997844e-10, 8.137299130094217e-10, 8.186025472190589e-10, 8.234751814286962e-10, 8.283478156383334e-10, 8.332204498479707e-10, 8.380930840576080e-10, 8.429657182672452e-10, 8.478383524768825e-10, 8.527109866865197e-10, 8.575836208961570e-10, 8.624562551057942e-10, 8.673288893154315e-10, 8.722015235250687e-10, 8.770741577347060e-10, 8.819467919443433e-10, 8.868194261539805e-10, 8.916920603636178e-10, 8.965646945732550e-10, 9.014373287828923e-10, 9.063099629925295e-10, 9.111825972021668e-10, 9.160552314118040e-10, 9.209278656214413e-10, 9.258004998310786e-10, 9.306731340407158e-10, 9.355457682503531e-10, 9.404184024599903e-10, 9.452910366696276e-10, 9.501636708792648e-10, 9.550363050889021e-10, 9.599089392985393e-10, 9.647815735081766e-10, 9.696542077178139e-10, 9.745268419274511e-10}; // instants of time when Gamma and Vo were sampled /*** FUNCTION IMPLEMENTATIONS ***/ /* * Allocates the samples of the Gamma and Vo functions in device memory. * d_gamma is the pointer to the Gamma function samples; * d_vo is the pointer to the Vo function samples; * d_instants is the pointer to the sampling instants of the Gamma and Vo functions * version is a flag deciding which version of the function to use: */ void allocate_samples(const unsigned int version) { if(!version) { //The first cell of the d_gamma and d_vo arrays is intentionally left empty: //it will then be filled with the last value of the h_gamma and h_vo arrays. hipMemcpyToSymbol(d_gamma, h_gamma, sizeof(double) * N_SAMPLES, sizeof(double)); //cells 1-200 hipMemcpyToSymbol(d_vo, h_vo, sizeof(double) * N_SAMPLES, sizeof(double)); //cells 1-200 hipMemcpyToSymbol(d_gamma, &h_gamma[N_SAMPLES - 1], sizeof(double)); //cell 0 hipMemcpyToSymbol(d_vo, &h_vo[N_SAMPLES - 1], sizeof(double)); //cell 0 hipMemcpyToSymbol(d_instants, h_instants, sizeof(double) * N_SAMPLES); } else { fprintf(stderr, "Forbidden allocate_samples version number! Valid values are 0 and 1."); exit(INVALID_VERSION_NUM); } }
9b1522690a994c1b6ac7bc6c9811d253b42cc483.cu
/* * env_vars.cu * * Created on: 01/dic/2014 * Author: Edoardo Mondoni */ #include "samples.h" #include "err.h" #include <stdio.h> /*** CONSTANT MEMORY DECLARATIONS ***/ __device__ double d_gamma[N_SAMPLES + 1]; __device__ double d_vo[N_SAMPLES + 1]; __device__ double d_instants[N_SAMPLES]; /*** CONSTANT DEFINITIONS ***/ const double h_gamma[N_SAMPLES] = {150.9118531981026, 151.5285590578920, 151.8096606907299, 151.7562930050505, 151.4642346584513, 150.9581207918112, 150.2606280519593, 149.3934013670099, 148.3777904516479, 147.2353439294383, 145.9881194285274, 144.6528821601795, 143.2526577745959, 141.9058189578238, 140.5866109614942, 139.2692067856598, 137.9287115410390, 136.5593413120525, 135.0980616219118, 133.5028360448642, 131.7747384613797, 129.9150382483862, 127.9251395674573, 125.8065791239034, 123.5610407395057, 121.1903777933693, 118.6966304688798, 116.0820267158657, 113.3489628150171, 110.4999684313524, 107.5376682808701, 104.4647548825510, 101.2839831403584, 97.99818900750454, 94.61032457258823, 91.12349462991806, 87.54097827766026, 83.86622422913365, 80.10281856964643, 76.25443482782526, 72.32478400313934, 68.31758326248365, 64.23655557683860, 60.08546080250227, 55.86814608186654, 51.58859498474102, 47.25095414298436, 42.85952391867009, 38.41871322204285, 33.93297285627299, 29.40673107018239, 24.84435537694043, 20.25015551856541, 15.62842687465883, 10.98351750219837, 6.319891735692119, 1.642163242589056, -3.044918860776084, -7.736529482757397, -12.42785316559739, -17.11416506263525, -21.79087681825568, -26.45352927187255, -31.09773407279362, -35.71908662104847, -40.31308518180270, -44.87509024555831, -49.40034389840219, -53.88404614753406, -58.32146244725330, -62.70802308992435, -67.03937649509434, -71.31137493454654, -75.51999704360846, -79.66123675893489, -83.73100298241140, -87.72507194878320, -91.63911510080199, -95.46879611789336, -99.20990279184402, -102.8584639564917, -106.4108054928136, -109.8635219711709, -113.2133735776256, -116.4571486785438, -119.5915480219573, -122.6131399130156, -125.5184082431288, -128.3038771933860, -130.9662630471705, -133.5025892697328, -135.9102134091697, -138.1867503520988, -140.3299223779285, -142.3374022054308, -144.2067227560494, -145.9504404355676, -147.5469187332408, -148.9318335048445, -150.0820637453334, -150.9725285997426, -151.5772172830993, -151.8462223321152, -151.7807136525794, -151.4765159927838, -150.9582910072513, -150.2487406212101, -149.3695313730091, -148.3420277565196, -147.1877838555826, -145.9288530790558, -144.5819925950053, -143.1702082599511, -141.8118167003158, -140.4810708399161, -139.1521500814942, -137.8001644308767, -136.4193187050986, -134.9466322470249, -133.3401256065954, -131.6008954161345, -129.7302337289814, -127.7295594101187, -125.6004122532571, -123.3444698293929, -120.9635771720460, -118.4597729788949, -115.8352971263997, -113.0925721992880, -110.2341630646940, -107.2627285378647, -104.1809836101987, -100.9916874544385, -97.69766253318762, -94.30183746445621, -90.80729598260224, -87.21731073743987, -83.53534559595926, -79.76502197843072, -75.91005912638852, -71.97420937853271, -67.96121281375949, -63.87478923300315, -59.71867144059283, -55.49666739381114, -51.21272648995492, -46.87098220133188, -42.47575115196885, -38.03148497909444, -33.54268990413045, -29.01384257752727, -24.44933376257186, -19.85346202038811, -15.23048084265765, -10.58468162030908, -5.920479695973544, -1.242467837461407, 3.444587448506927, 8.135809965506729, 12.82631771973166, 17.51132934222521, 22.18623286753558, 26.84658960786979, 31.48807021863084, 36.10634675491476, 40.69698308800641, 45.25536872816330, 49.77672606420251, 54.25619339123547, 58.68895619857150, 63.07037876832002, 67.39608575307216, 71.66196088481411, 75.86406133062852, 79.99847963246236, 84.06120742240232, 88.04805678502025, 91.95467460834715, 95.77664968231936, 99.50967518736837, 103.1497053675946, 106.6930448384480, 110.1363334510258, 113.4764302697344, 116.7102413120443, 119.8345604648115, 122.8459903727553, 125.7409796465810, 128.5159654876024, 131.1675655075404, 133.6927384286541, 136.0888424081005, 138.3535608604175, 140.4847237771999, 142.4801028996183, 144.3372772439026, 146.0687849570104, 147.6529107598514, 149.0252538985235, 150.0094608580336}; // experimental samples of Gamma(t) const double h_vo[N_SAMPLES] = {0.6760973943249335, 0.5717467299267627, 0.4665442143697862, 0.3604905683435304, 0.2536292701964937, 0.1460213986096215, 0.03774337014700224, -0.07111352756116007, -0.1804428687231168, -0.2901222890285782, -0.4000123667547668, -0.5099552327627888, -0.6197547873667815, -0.7291687114502992, -0.8379668650874113, -0.9459389847234885, -1.052895220667863, -1.158666587964081, -1.263120922024231, -1.366156636774651, -1.467675352583971, -1.567580372447689, -1.665776581376069, -1.762170395246174, -1.856669808406348, -1.949184551830339, -2.039626332071301, -2.127909092216705, -2.213949230041704, -2.297665726820814, -2.378980174805175, -2.457816728532791, -2.534102030957383, -2.607765170840508, -2.678737711978820, -2.746953804658915, -2.812350357243830, -2.874867223243498, -2.934447354520114, -2.991036885277661, -3.044585138005190, -3.095044571039522, -3.142370707125508, -3.186522086339126, -3.227460274282618, -3.265149933034065, -3.299558937233582, -3.330658500419298, -3.358423273339567, -3.382831387095663, -3.403864434716708, -3.421507406911287, -3.435748612912533, -3.446579620186737, -3.453995236832424, -3.457993542098095, -3.458575950894605, -3.455747284858068, -3.449515820128567, -3.439893290924596, -3.426894844332465, -3.410538959014104, -3.390847352239840, -3.367844901652289, -3.341559600176355, -3.312022547982817, -3.279267970142625, -3.243333238342549, -3.204258873384214, -3.162088512366561, -3.116868837338004, -3.068649475700950, -3.017482891639262, -2.963424289191908, -2.906531541141616, -2.846865146442169, -2.784488207001688, -2.719466406765988, -2.651867974965727, -2.581763621176315, -2.509226439977398, -2.434331793459436, -2.357157186651176, -2.277782151733637, -2.196288151649543, -2.112758504672146, -2.027278322237886, -1.939934446349340, -1.850815372193087, -1.760011146254415, -1.667613238124885, -1.573714392259209, -1.478408471171280, -1.381790302308604, -1.283955537391881, -1.185000527419725, -1.085021785420186, -0.9841002767201421, -0.8822856275055089, -0.7796122029434081, -0.6760990286935233, -0.5717483884597057, -0.4665459018415614, -0.3604922831247461, -0.2536310074603434, -0.1460231535934189, -0.03774514011326513, 0.07111174322054969, 0.1804410699868377, 0.2901204773415658, 0.4000105464747890, 0.5099534110970247, 0.6197529732081124, 0.7291669115943678, 0.8379650812657480, 0.9459372132451951, 1.052893454820229, 1.158664822325769, 1.263119157012880, 1.366154879239600, 1.467673613441027, 1.567578662953196, 1.665774909481146, 1.762168763395699, 1.856668213524275, 1.949182987370973, 2.039624791249993, 2.127907571142381, 2.213947729581562, 2.297664252566135, 2.378978735252114, 2.457815332254131, 2.534100683878019, 2.607763874632337, 2.678736464118774, 2.746952600042936, 2.812349190672184, 2.874866091801151, 2.934446258964438, 2.991035829962230, 3.044584129440531, 3.095043615697531, 3.142369809347757, 3.186521247128072, 3.227459491385597, 3.265149202225357, 3.299558254255332, 3.330657862821280, 3.358422681532558, 3.382830844261123, 3.403863945661545, 3.421506976347718, 3.435748243835595, 3.446579312953126, 3.453994989252438, 3.457993350457681, 3.458575811479713, 3.455747195385631, 3.449515780551918, 3.439893303341161, 3.426894912071254, 3.410539085284444, 3.390847538869141, 3.367845148382046, 3.341559904753850, 3.312022906976164, 3.279268380139023, 3.243333697062127, 3.204259380296025, 3.162089068599831, 3.116869444956136, 3.068650136641577, 3.017483606734288, 2.963425057631516, 2.906532360561980, 2.846866013573900, 2.784489118608731, 2.719467360508919, 2.651868969869754, 2.581764657548397, 2.509227518831352, 2.434332915689696, 2.357158352278182, 2.277783359500837, 2.196289399105813, 2.112759788684429, 2.027279639720124, 1.939935794923556, 1.850816750538731, 1.760012554031996, 1.667614675525918, 1.573715859372168, 1.478409967412222, 1.381791826127216, 1.283957086348587, 1.185002098584672, 1.085023375977398, 0.9841018850593342, 0.8822872531983896, 0.7796138463523725}; // experimental samples of Vo(t) const double h_instants[N_SAMPLES] = {4.872634209637256e-12, 9.745268419274511e-12, 1.461790262891177e-11, 1.949053683854902e-11, 2.436317104818628e-11, 2.923580525782353e-11, 3.410843946746079e-11, 3.898107367709804e-11, 4.385370788673530e-11, 4.872634209637256e-11, 5.359897630600981e-11, 5.847161051564707e-11, 6.334424472528432e-11, 6.821687893492158e-11, 7.308951314455883e-11, 7.796214735419609e-11, 8.283478156383334e-11, 8.770741577347060e-11, 9.258004998310786e-11, 9.745268419274511e-11, 1.023253184023824e-10, 1.071979526120196e-10, 1.120705868216569e-10, 1.169432210312941e-10, 1.218158552409314e-10, 1.266884894505686e-10, 1.315611236602059e-10, 1.364337578698432e-10, 1.413063920794804e-10, 1.461790262891177e-10, 1.510516604987549e-10, 1.559242947083922e-10, 1.607969289180294e-10, 1.656695631276667e-10, 1.705421973373039e-10, 1.754148315469412e-10, 1.802874657565785e-10, 1.851600999662157e-10, 1.900327341758530e-10, 1.949053683854902e-10, 1.997780025951275e-10, 2.046506368047647e-10, 2.095232710144020e-10, 2.143959052240392e-10, 2.192685394336765e-10, 2.241411736433138e-10, 2.290138078529510e-10, 2.338864420625883e-10, 2.387590762722255e-10, 2.436317104818628e-10, 2.485043446915000e-10, 2.533769789011373e-10, 2.582496131107745e-10, 2.631222473204118e-10, 2.679948815300491e-10, 2.728675157396863e-10, 2.777401499493236e-10, 2.826127841589608e-10, 2.874854183685981e-10, 2.923580525782353e-10, 2.972306867878726e-10, 3.021033209975098e-10, 3.069759552071471e-10, 3.118485894167844e-10, 3.167212236264216e-10, 3.215938578360589e-10, 3.264664920456961e-10, 3.313391262553334e-10, 3.362117604649706e-10, 3.410843946746079e-10, 3.459570288842451e-10, 3.508296630938824e-10, 3.557022973035197e-10, 3.605749315131569e-10, 3.654475657227942e-10, 3.703201999324314e-10, 3.751928341420687e-10, 3.800654683517059e-10, 3.849381025613432e-10, 3.898107367709804e-10, 3.946833709806177e-10, 3.995560051902550e-10, 4.044286393998922e-10, 4.093012736095295e-10, 4.141739078191667e-10, 4.190465420288040e-10, 4.239191762384412e-10, 4.287918104480785e-10, 4.336644446577157e-10, 4.385370788673530e-10, 4.434097130769903e-10, 4.482823472866275e-10, 4.531549814962648e-10, 4.580276157059020e-10, 4.629002499155393e-10, 4.677728841251765e-10, 4.726455183348138e-10, 4.775181525444510e-10, 4.823907867540883e-10, 4.872634209637256e-10, 4.921360551733628e-10, 4.970086893830001e-10, 5.018813235926373e-10, 5.067539578022746e-10, 5.116265920119118e-10, 5.164992262215491e-10, 5.213718604311863e-10, 5.262444946408236e-10, 5.311171288504609e-10, 5.359897630600981e-10, 5.408623972697354e-10, 5.457350314793726e-10, 5.506076656890099e-10, 5.554802998986471e-10, 5.603529341082844e-10, 5.652255683179216e-10, 5.700982025275589e-10, 5.749708367371962e-10, 5.798434709468334e-10, 5.847161051564707e-10, 5.895887393661079e-10, 5.944613735757452e-10, 5.993340077853824e-10, 6.042066419950197e-10, 6.090792762046569e-10, 6.139519104142942e-10, 6.188245446239315e-10, 6.236971788335687e-10, 6.285698130432060e-10, 6.334424472528432e-10, 6.383150814624805e-10, 6.431877156721177e-10, 6.480603498817550e-10, 6.529329840913922e-10, 6.578056183010295e-10, 6.626782525106668e-10, 6.675508867203040e-10, 6.724235209299413e-10, 6.772961551395785e-10, 6.821687893492158e-10, 6.870414235588530e-10, 6.919140577684903e-10, 6.967866919781275e-10, 7.016593261877648e-10, 7.065319603974021e-10, 7.114045946070393e-10, 7.162772288166766e-10, 7.211498630263138e-10, 7.260224972359511e-10, 7.308951314455883e-10, 7.357677656552256e-10, 7.406403998648628e-10, 7.455130340745001e-10, 7.503856682841374e-10, 7.552583024937746e-10, 7.601309367034119e-10, 7.650035709130491e-10, 7.698762051226864e-10, 7.747488393323236e-10, 7.796214735419609e-10, 7.844941077515981e-10, 7.893667419612354e-10, 7.942393761708727e-10, 7.991120103805099e-10, 8.039846445901472e-10, 8.088572787997844e-10, 8.137299130094217e-10, 8.186025472190589e-10, 8.234751814286962e-10, 8.283478156383334e-10, 8.332204498479707e-10, 8.380930840576080e-10, 8.429657182672452e-10, 8.478383524768825e-10, 8.527109866865197e-10, 8.575836208961570e-10, 8.624562551057942e-10, 8.673288893154315e-10, 8.722015235250687e-10, 8.770741577347060e-10, 8.819467919443433e-10, 8.868194261539805e-10, 8.916920603636178e-10, 8.965646945732550e-10, 9.014373287828923e-10, 9.063099629925295e-10, 9.111825972021668e-10, 9.160552314118040e-10, 9.209278656214413e-10, 9.258004998310786e-10, 9.306731340407158e-10, 9.355457682503531e-10, 9.404184024599903e-10, 9.452910366696276e-10, 9.501636708792648e-10, 9.550363050889021e-10, 9.599089392985393e-10, 9.647815735081766e-10, 9.696542077178139e-10, 9.745268419274511e-10}; // instants of time when Gamma and Vo were sampled /*** FUNCTION IMPLEMENTATIONS ***/ /* * Allocates the samples of the Gamma and Vo functions in device memory. * d_gamma is the pointer to the Gamma function samples; * d_vo is the pointer to the Vo function samples; * d_instants is the pointer to the sampling instants of the Gamma and Vo functions * version is a flag deciding which version of the function to use: */ void allocate_samples(const unsigned int version) { if(!version) { //The first cell of the d_gamma and d_vo arrays is intentionally left empty: //it will then be filled with the last value of the h_gamma and h_vo arrays. cudaMemcpyToSymbol(d_gamma, h_gamma, sizeof(double) * N_SAMPLES, sizeof(double)); //cells 1-200 cudaMemcpyToSymbol(d_vo, h_vo, sizeof(double) * N_SAMPLES, sizeof(double)); //cells 1-200 cudaMemcpyToSymbol(d_gamma, &h_gamma[N_SAMPLES - 1], sizeof(double)); //cell 0 cudaMemcpyToSymbol(d_vo, &h_vo[N_SAMPLES - 1], sizeof(double)); //cell 0 cudaMemcpyToSymbol(d_instants, h_instants, sizeof(double) * N_SAMPLES); } else { fprintf(stderr, "Forbidden allocate_samples version number! Valid values are 0 and 1."); exit(INVALID_VERSION_NUM); } }
eff64437431a90c3c71b97097ecb87a4230757f8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "../include/evolution.h" #include "../include/vortex_3d.h" // 3D void apply_gauge(Grid &par, double2 *wfc, double2 *Ax, double2 *Ay, double2 *Az, double renorm_factor_x, double renorm_factor_y, double renorm_factor_z, bool flip, hipfftHandle plan_1d, hipfftHandle plan_dim2, hipfftHandle plan_dim3, double dx, double dy, double dz, double time, int yDim, int size){ dim3 grid = par.grid; dim3 threads = par.threads; if (flip){ // 1d forward / mult by Ax cufftHandleError( hipfftExecZ2Z(plan_1d, wfc, wfc, HIPFFT_FORWARD) ); hipLaunchKernelGGL(( scalarMult), dim3(grid),dim3(threads), 0, 0, wfc, renorm_factor_x, wfc); cudaCheckError(); if(par.bval("Ax_time")){ EqnNode_gpu* Ax_eqn = par.astval("Ax"); int e_num = par.ival("Ax_num"); hipLaunchKernelGGL(( ast_cmult), dim3(grid),dim3(threads), 0, 0, wfc, wfc, Ax_eqn, dx, dy, dz, time, e_num); cudaCheckError(); } else{ hipLaunchKernelGGL(( cMult), dim3(grid),dim3(threads), 0, 0, wfc, (hipfftDoubleComplex*) Ax, wfc); cudaCheckError(); } cufftHandleError( hipfftExecZ2Z(plan_1d, wfc, wfc, HIPFFT_BACKWARD) ); hipLaunchKernelGGL(( scalarMult), dim3(grid),dim3(threads), 0, 0, wfc, renorm_factor_x, wfc); cudaCheckError(); // loop to multiply by Ay for (int i = 0; i < yDim; i++){ cufftHandleError( hipfftExecZ2Z(plan_dim2, &wfc[i*size], &wfc[i*size], HIPFFT_FORWARD) ); } hipLaunchKernelGGL(( scalarMult), dim3(grid),dim3(threads), 0, 0, wfc, renorm_factor_y, wfc); cudaCheckError(); if(par.bval("Ay_time")){ EqnNode_gpu* Ay_eqn = par.astval("Ay"); int e_num = par.ival("Ay_num"); hipLaunchKernelGGL(( ast_cmult), dim3(grid),dim3(threads), 0, 0, wfc, wfc, Ay_eqn, dx, dy, dz, time, e_num); cudaCheckError(); } else{ hipLaunchKernelGGL(( cMult), dim3(grid),dim3(threads), 0, 0, wfc, (hipfftDoubleComplex*) Ay, wfc); cudaCheckError(); } for (int i = 0; i < yDim; i++){ //size = xDim * zDim; cufftHandleError( hipfftExecZ2Z(plan_dim2, &wfc[i*size], &wfc[i*size], HIPFFT_BACKWARD) ); } hipLaunchKernelGGL(( scalarMult), dim3(grid),dim3(threads), 0, 0, wfc, renorm_factor_y, wfc); cudaCheckError(); // 1D FFT to Az cufftHandleError( hipfftExecZ2Z(plan_dim3, wfc, wfc, HIPFFT_FORWARD) ); hipLaunchKernelGGL(( scalarMult), dim3(grid),dim3(threads), 0, 0, wfc, renorm_factor_z, wfc); cudaCheckError(); if(par.bval("Az_time")){ EqnNode_gpu* Az_eqn = par.astval("Az"); int e_num = par.ival("Az_num"); hipLaunchKernelGGL(( ast_cmult), dim3(grid),dim3(threads), 0, 0, wfc, wfc, Az_eqn, dx, dy, dz, time, e_num); cudaCheckError(); } else{ hipLaunchKernelGGL(( cMult), dim3(grid),dim3(threads), 0, 0, wfc, (hipfftDoubleComplex*) Az, wfc); cudaCheckError(); } cufftHandleError( hipfftExecZ2Z(plan_dim3, wfc, wfc, HIPFFT_BACKWARD) ); hipLaunchKernelGGL(( scalarMult), dim3(grid),dim3(threads), 0, 0, wfc, renorm_factor_z, wfc); cudaCheckError(); } else{ // 1D FFT to Az cufftHandleError( hipfftExecZ2Z(plan_dim3, wfc, wfc, HIPFFT_FORWARD) ); hipLaunchKernelGGL(( scalarMult), dim3(grid),dim3(threads), 0, 0, wfc, renorm_factor_z, wfc); cudaCheckError(); if(par.bval("Az_time")){ EqnNode_gpu* Az_eqn = par.astval("Az"); int e_num = par.ival("Az_num"); hipLaunchKernelGGL(( ast_cmult), dim3(grid),dim3(threads), 0, 0, wfc, wfc, Az_eqn, dx, dy, dz, time, e_num); cudaCheckError(); } else{ hipLaunchKernelGGL(( cMult), dim3(grid),dim3(threads), 0, 0, wfc, (hipfftDoubleComplex*) Az, wfc); cudaCheckError(); } cufftHandleError( hipfftExecZ2Z(plan_dim3, wfc, wfc, HIPFFT_BACKWARD) ); hipLaunchKernelGGL(( scalarMult), dim3(grid),dim3(threads), 0, 0, wfc, renorm_factor_z, wfc); cudaCheckError(); // loop to multiply by Ay for (int i = 0; i < yDim; i++){ cufftHandleError( hipfftExecZ2Z(plan_dim2, &wfc[i*size], &wfc[i*size], HIPFFT_FORWARD) ); } hipLaunchKernelGGL(( scalarMult), dim3(grid),dim3(threads), 0, 0, wfc, renorm_factor_y, wfc); cudaCheckError(); if(par.bval("Ay_time")){ EqnNode_gpu* Ay_eqn = par.astval("Ay"); int e_num = par.ival("Ay_num"); hipLaunchKernelGGL(( ast_cmult), dim3(grid),dim3(threads), 0, 0, wfc, wfc, Ay_eqn, dx, dy, dz, time, e_num); cudaCheckError(); } else{ hipLaunchKernelGGL(( cMult), dim3(grid),dim3(threads), 0, 0, wfc, (hipfftDoubleComplex*) Ay, wfc); cudaCheckError(); } for (int i = 0; i < yDim; i++){ //size = xDim * zDim; cufftHandleError( hipfftExecZ2Z(plan_dim2, &wfc[i*size], &wfc[i*size], HIPFFT_BACKWARD) ); } hipLaunchKernelGGL(( scalarMult), dim3(grid),dim3(threads), 0, 0, wfc, renorm_factor_y, wfc); cudaCheckError(); // 1d forward / mult by Ax cufftHandleError( hipfftExecZ2Z(plan_1d, wfc, wfc, HIPFFT_FORWARD) ); hipLaunchKernelGGL(( scalarMult), dim3(grid),dim3(threads), 0, 0, wfc, renorm_factor_x, wfc); cudaCheckError(); if(par.bval("Ax_time")){ EqnNode_gpu* Ax_eqn = par.astval("Ax"); int e_num = par.ival("Ax_num"); hipLaunchKernelGGL(( ast_cmult), dim3(grid),dim3(threads), 0, 0, wfc, wfc, Ax_eqn, dx, dy, dz, time, e_num); cudaCheckError(); } else{ hipLaunchKernelGGL(( cMult), dim3(grid),dim3(threads), 0, 0, wfc, (hipfftDoubleComplex*) Ax, wfc); cudaCheckError(); } cufftHandleError( hipfftExecZ2Z(plan_1d, wfc, wfc, HIPFFT_BACKWARD) ); hipLaunchKernelGGL(( scalarMult), dim3(grid),dim3(threads), 0, 0, wfc, renorm_factor_x, wfc); cudaCheckError(); } } // 2D void apply_gauge(Grid &par, double2 *wfc, double2 *Ax, double2 *Ay, double renorm_factor_x, double renorm_factor_y, bool flip, hipfftHandle plan_1d, hipfftHandle plan_dim2, double dx, double dy, double dz, double time){ dim3 grid = par.grid; dim3 threads = par.threads; if (flip){ // 1d forward / mult by Ax cufftHandleError( hipfftExecZ2Z(plan_1d, wfc, wfc, HIPFFT_FORWARD) ); hipLaunchKernelGGL(( scalarMult), dim3(grid),dim3(threads), 0, 0, wfc, renorm_factor_y ,wfc); cudaCheckError(); if(par.bval("Ax_time")){ EqnNode_gpu* Ax_eqn = par.astval("Ax"); int e_num = par.ival("Ax_num"); hipLaunchKernelGGL(( ast_cmult), dim3(grid),dim3(threads), 0, 0, wfc, wfc, Ax_eqn, dx, dy, dz, time, e_num); cudaCheckError(); } else{ hipLaunchKernelGGL(( cMult), dim3(grid),dim3(threads), 0, 0, wfc, (hipfftDoubleComplex*) Ax, wfc); cudaCheckError(); } cufftHandleError( hipfftExecZ2Z(plan_1d, wfc, wfc, HIPFFT_BACKWARD) ); hipLaunchKernelGGL(( scalarMult), dim3(grid),dim3(threads), 0, 0, wfc, renorm_factor_x, wfc); cudaCheckError(); // 1D FFT to wfc_pAy cufftHandleError( hipfftExecZ2Z(plan_dim2, wfc, wfc, HIPFFT_FORWARD) ); hipLaunchKernelGGL(( scalarMult), dim3(grid),dim3(threads), 0, 0, wfc, renorm_factor_x, wfc); cudaCheckError(); if(par.bval("Ay_time")){ EqnNode_gpu* Ay_eqn = par.astval("Ay"); int e_num = par.ival("Ay_num"); hipLaunchKernelGGL(( ast_cmult), dim3(grid),dim3(threads), 0, 0, wfc, wfc, Ay_eqn, dx, dy, dz, time, e_num); cudaCheckError(); } else{ hipLaunchKernelGGL(( cMult), dim3(grid),dim3(threads), 0, 0, wfc, (hipfftDoubleComplex*) Ay, wfc); cudaCheckError(); } cufftHandleError( hipfftExecZ2Z(plan_dim2, wfc, wfc, HIPFFT_BACKWARD) ); hipLaunchKernelGGL(( scalarMult), dim3(grid),dim3(threads), 0, 0, wfc, renorm_factor_y, wfc); cudaCheckError(); } else{ // 1D FFT to wfc_pAy cufftHandleError( hipfftExecZ2Z(plan_dim2, wfc, wfc, HIPFFT_FORWARD) ); hipLaunchKernelGGL(( scalarMult), dim3(grid),dim3(threads), 0, 0, wfc, renorm_factor_x, wfc); cudaCheckError(); if(par.bval("Ay_time")){ EqnNode_gpu* Ay_eqn = par.astval("Ay"); int e_num = par.ival("Ay_num"); hipLaunchKernelGGL(( ast_cmult), dim3(grid),dim3(threads), 0, 0, wfc, wfc, Ay_eqn, dx, dy, dz, time, e_num); cudaCheckError(); } else{ hipLaunchKernelGGL(( cMult), dim3(grid),dim3(threads), 0, 0, wfc, (hipfftDoubleComplex*) Ay, wfc); cudaCheckError(); } cufftHandleError( hipfftExecZ2Z(plan_dim2, wfc, wfc, HIPFFT_BACKWARD) ); hipLaunchKernelGGL(( scalarMult), dim3(grid),dim3(threads), 0, 0, wfc, renorm_factor_y, wfc); cudaCheckError(); // 1d forward / mult by Ax cufftHandleError( hipfftExecZ2Z(plan_1d, wfc, wfc, HIPFFT_FORWARD) ); hipLaunchKernelGGL(( scalarMult), dim3(grid),dim3(threads), 0, 0, wfc, renorm_factor_y ,wfc); cudaCheckError(); if(par.bval("Ax_time")){ EqnNode_gpu* Ax_eqn = par.astval("Ax"); int e_num = par.ival("Ax_num"); hipLaunchKernelGGL(( ast_cmult), dim3(grid),dim3(threads), 0, 0, wfc, wfc, Ax_eqn, dx, dy, dz, time, e_num); cudaCheckError(); } else{ hipLaunchKernelGGL(( cMult), dim3(grid),dim3(threads), 0, 0, wfc, (hipfftDoubleComplex*) Ax, wfc); cudaCheckError(); } cufftHandleError( hipfftExecZ2Z(plan_1d, wfc, wfc, HIPFFT_BACKWARD) ); hipLaunchKernelGGL(( scalarMult), dim3(grid),dim3(threads), 0, 0, wfc, renorm_factor_x, wfc); cudaCheckError(); } } void evolve(Grid &par, int numSteps, unsigned int gstate, std::string buffer){ // Re-establishing variables from parsed Grid class std::string data_dir = par.sval("data_dir"); int dimnum = par.ival("dimnum"); double omega = par.dval("omega"); double angle_sweep = par.dval("angle_sweep"); double gdt = par.dval("gdt"); double dt = par.dval("dt"); double omegaX = par.dval("omegaX"); double omegaY = par.dval("omegaY"); double mass = par.dval("mass"); double dx = par.dval("dx"); double dy = 1; double dz = 1; double interaction = par.dval("interaction"); double laser_power = par.dval("laser_power"); double gDenConst = par.dval("gDenConst"); double thresh_const = par.dval("thresh_const"); double *x = par.dsval("x"); double *y; double *V = par.dsval("V"); double *Phi = par.dsval("Phi"); double2 *gpu1dpAx = par.cufftDoubleComplexval("pAx_gpu"); double2 *gpu1dpAy; double2 *gpu1dpAz; double *Phi_gpu = par.dsval("Phi_gpu"); bool write_it = par.bval("write_it"); bool graph = par.bval("graph"); int N = par.ival("atoms"); int printSteps = par.ival("printSteps"); int energy_calc_steps = par.ival("energy_calc_steps"); double energy_calc_threshold = par.dval("energy_calc_threshold"); bool nonlin = par.bval("gpe"); bool lz = par.bval("corotating"); bool ramp = par.bval("ramp"); int ramp_type = par.ival("ramp_type"); int xDim = par.ival("xDim"); int yDim = 1; int zDim = 1; hipfftDoubleComplex *wfc = par.cufftDoubleComplexval("wfc"); hipfftDoubleComplex *gpuWfc = par.cufftDoubleComplexval("wfc_gpu"); hipfftDoubleComplex *K_gpu = par.cufftDoubleComplexval("K_gpu"); hipfftDoubleComplex *V_gpu = par.cufftDoubleComplexval("V_gpu"); if (dimnum > 1){ dy = par.dval("dy"); y = par.dsval("y"); gpu1dpAy = par.cufftDoubleComplexval("pAy_gpu"); yDim = par.ival("yDim"); } if (dimnum > 2){ dz = par.dval("dz"); gpu1dpAz = par.cufftDoubleComplexval("pAz_gpu"); zDim = par.ival("zDim"); } int gridSize = xDim * yDim * zDim; // getting data from Cuda class hipfftHandle plan_1d = par.ival("plan_1d"); hipfftHandle plan_2d = par.ival("plan_2d"); hipfftHandle plan_other2d = par.ival("plan_other2d"); hipfftHandle plan_3d = par.ival("plan_3d"); hipfftHandle plan_dim2 = par.ival("plan_dim2"); hipfftHandle plan_dim3 = par.ival("plan_dim3"); dim3 threads = par.threads; dim3 grid = par.grid; // Because no two operations are created equally. // Multiplication is faster than divisions. double renorm_factor_nd=1.0/pow(gridSize,0.5); double renorm_factor_x=1.0/pow(xDim,0.5); double renorm_factor_y=1.0/pow(yDim,0.5); double renorm_factor_z=1.0/pow(zDim,0.5); clock_t begin, end; double time_spent; double Dt; if(gstate==0){ Dt = gdt; printf("Timestep for groundstate solver set as: %E\n",Dt); } else{ Dt = dt; printf("Timestep for evolution set as: %E\n",Dt); } begin = clock(); double omega_0=omega*omegaX; // 2D VORTEX TRACKING double mask_2d = par.dval("mask_2d"); int x0_shift = par.dval("x0_shift"); int y0_shift = par.dval("y0_shift"); int charge = par.ival("charge"); int kill_idx = par.ival("kill_idx"); hipfftDoubleComplex *EV_opt = par.cufftDoubleComplexval("EV_opt"); int kick_it = par.ival("kick_it"); double *V_opt = par.dsval("V_opt"); // Double buffering and will attempt to thread free and calloc operations to // hide time penalty. Or may not bother. int num_vortices[2] = {0,0}; // binary matrix of size xDim*yDim, // 1 for vortex at specified index, 0 otherwise int* vortexLocation; //int* olMaxLocation = (int*) calloc(xDim*yDim,sizeof(int)); std::shared_ptr<Vtx::Vortex> central_vortex; //vortex closest to the central position /* central_vortex.coords.x = -1; central_vortex.coords.y = -1; central_vortex.coordsD.x = -1.; central_vortex.coordsD.y = -1.; central_vortex.wind = 0; */ // Angle of vortex lattice. Add to optical lattice for alignment. double vort_angle; // array of vortex coordinates from vortexLocation 1's //struct Vtx::Vortex *vortCoords = NULL; std::shared_ptr<Vtx::VtxList> vortCoords = std::make_shared<Vtx::VtxList>(7); //std::vector<std::shared_ptr<Vtx::Vortex> vortCoords; //Previous array of vortex coordinates from vortexLocation 1's //struct Vtx::Vortex *vortCoordsP = NULL; //std::vector<struct Vtx::Vortex> vortCoordsP; std::shared_ptr<Vtx::VtxList> vortCoordsP = std::make_shared<Vtx::VtxList>(7); LatticeGraph::Lattice lattice; //Vortex lattice graph. double* adjMat; // Assuming triangular lattice at rotatio //std::cout << "numSteps is: " << numSteps << '\n'; // Iterating through all of the steps in either g or esteps. for(int i=0; i < numSteps; ++i){ double time = Dt*i; if (ramp){ //Adjusts omega for the appropriate trap frequency. if (ramp_type == 1){ if (i == 0){ omega_0 = (double)omega; } else{ omega_0 = (double)i / (double)(i+1); } } else{ if (i == 0){ omega_0=(double)omega/(double)(numSteps); } else{ omega_0 = (double)(i+1) / (double)i; } } } cudaHandleError( hipMemcpy(wfc, gpuWfc, sizeof(hipfftDoubleComplex)*xDim*yDim*zDim, hipMemcpyDeviceToHost) ); // Print-out at pre-determined rate. // Vortex & wfc analysis performed here also. if(i % printSteps == 0) { // If the unit_test flag is on, we need a special case printf("Step: %d Omega: %lf\n", i, omega_0); // Printing out time of iteration end = clock(); time_spent = (double) (end - begin) / CLOCKS_PER_SEC; printf("Time spent: %lf\n", time_spent); std::string fileName = ""; //printf("ramp=%d gstate=%d rg=%d \n", // ramp, gstate, ramp | (gstate << 1)); switch (ramp | (gstate << 1)) { case 0: //Groundstate solver, constant Omega value. { fileName = "wfc_0_const"; break; } case 1: //Groundstate solver, ramped Omega value. { fileName = "wfc_0_ramp"; break; } case 2: //Real-time evolution, constant Omega value. { if (dimnum == 3){ // Note: In the case of 3d, we need to think about // vortex tracking in a new way. // It may be as simple as splitting the problem // into 2D elements and working from there, but // let's look into it when we need it in the // future. std::cout << "commencing 3d vortex tracking" << '\n'; // Creating the necessary double* values double* edges = (double *)malloc(sizeof(double) *gridSize); find_edges(par, wfc, edges); double* edges_gpu = par.dsval("edges_gpu"); // Now we need to output everything if (write_it){ FileIO::writeOutDouble(buffer, data_dir+"Edges", edges, gridSize, i); } free(edges); } else if (dimnum == 2){ vortexLocation = (int *) calloc(xDim * yDim, sizeof(int)); num_vortices[0] = Tracker::findVortex(vortexLocation, wfc, mask_2d, xDim, x, i); // If initial step, locate vortices, least-squares to find // exact centre, calculate lattice angle, generate optical // lattice. if (i == 0) { if(num_vortices[0] > 0){ //Reserve enough space for the vortices //reserve(num_vortices[0]); vortCoords = std::make_shared<Vtx::VtxList> (num_vortices[0]); vortCoordsP = std::make_shared<Vtx::VtxList> (num_vortices[0]); //Locate the vortex positions to the nearest grid, then //perform a least-squares fit to determine the location //to sub-grid reolution. Tracker::vortPos(vortexLocation, vortCoords->getVortices(), xDim, wfc); Tracker::lsFit(vortCoords->getVortices(),wfc,xDim); //Find the centre-most vortex in the lattice central_vortex = Tracker::vortCentre(vortCoords-> getVortices(), xDim); //Determine the Angle formed by the lattice relative to //the x-axis vort_angle = Tracker::vortAngle(vortCoords-> getVortices(), central_vortex); //Store the vortex angle in the parameter file par.store("Vort_angle", vort_angle); //Determine average lattice spacing. double sepAvg = Tracker::vortSepAvg(vortCoords-> getVortices(), central_vortex); par.store("Central_vort_x", (double) central_vortex->getCoords().x); par.store("Central_vort_y", (double) central_vortex->getCoords().y); par.store("Central_vort_winding", (double) central_vortex->getWinding()); par.store("Num_vort", (double) vortCoords-> getVortices().size()); //Setup the optical lattice to match the spacing and // angle+angle_sweep of the vortex lattice. // Amplitude matched by setting laser_power // parameter switch. optLatSetup(central_vortex, V, vortCoords->getVortices(), vort_angle + PI * angle_sweep / 180.0, laser_power * HBAR * sqrt(omegaX * omegaY), V_opt, x, y, par); } // If kick_it param is 2, perform a single kick of the // optical lattice for the first timestep only. // This is performed by loading the // EV_opt exp(V + V_opt) array into GPU memory // for the potential. if (kick_it == 2) { printf("Kicked it 1\n"); cudaHandleError( hipMemcpy(V_gpu, EV_opt, sizeof(hipfftDoubleComplex) * xDim * yDim, hipMemcpyHostToDevice) ); } // Write out the newly specified potential // and exp potential to files if(write_it){ FileIO::writeOutDouble(buffer, data_dir + "V_opt_1", V_opt, xDim * yDim, 0); FileIO::writeOut(buffer, data_dir + "EV_opt_1", EV_opt, xDim * yDim, 0); //Store necessary parameters to Params.dat file. FileIO::writeOutParam(buffer, par, data_dir + "Params.dat"); } } //If i!=0 and the number of vortices changes // if num_vortices[1] < num_vortices[0] ... Fewer vortices else { if (num_vortices[0] > 0){ Tracker::vortPos(vortexLocation, vortCoords->getVortices(), xDim, wfc); Tracker::lsFit(vortCoords->getVortices(), wfc, xDim); Tracker::vortArrange(vortCoords->getVortices(), vortCoordsP->getVortices()); if(write_it){ FileIO::writeOutInt(buffer, data_dir + "vLoc_", vortexLocation, xDim * yDim, i); } } } // The following will eventually be modified and moved into // a new library that works closely wy0_shiftUE. Used to // also defined for vortex elimination using graph positions // and UID numbers. if (graph && num_vortices[0] > 0) { for (int ii = 0; ii < vortCoords->getVortices().size(); ++ii) { std::shared_ptr<LatticeGraph::Node> n(new LatticeGraph::Node( *vortCoords->getVortices().at(ii).get())); lattice.addVortex(std::move(n)); } unsigned int *uids = (unsigned int *) malloc( sizeof(unsigned int) * lattice.getVortices().size()); for (size_t a=0; a < lattice.getVortices().size(); ++a){ uids[a] = lattice.getVortexIdx(a)->getUid(); } if(i==0) { //Lambda for vortex annihilation/creation. auto killIt=[&](int idx, int winding, double delta_x, double delta_y) { if (abs(delta_x) > 0 || abs(delta_y) > 0){ // Killing initial vortex and then // imprinting new one WFC::phaseWinding(Phi, 1, x,y, dx,dy, lattice.getVortexUid(idx)-> getData().getCoordsD().x, lattice.getVortexUid(idx)-> getData().getCoordsD().y, xDim); cudaHandleError( hipMemcpy(Phi_gpu, Phi, sizeof(double) * xDim * yDim, hipMemcpyHostToDevice) ); hipLaunchKernelGGL(( cMultPhi) , dim3(grid), dim3(threads), 0, 0, gpuWfc,Phi_gpu, gpuWfc); cudaCheckError(); // Imprinting new one int cval = -winding; WFC::phaseWinding(Phi, cval, x,y, dx,dy, lattice.getVortexUid(idx)-> getData().getCoordsD().x + delta_x, lattice.getVortexUid(idx)-> getData().getCoordsD().y + delta_y, xDim); // Sending to device for imprinting cudaHandleError( hipMemcpy(Phi_gpu, Phi, sizeof(double) * xDim * yDim, hipMemcpyHostToDevice) ); hipLaunchKernelGGL(( cMultPhi) , dim3(grid), dim3(threads), 0, 0, gpuWfc,Phi_gpu, gpuWfc); cudaCheckError(); } else{ int cval = -(winding-1); WFC::phaseWinding(Phi, cval, x,y,dx,dy, lattice.getVortexUid(idx)-> getData().getCoordsD().x, lattice.getVortexUid(idx)-> getData().getCoordsD().y, xDim); cudaHandleError( hipMemcpy(Phi_gpu, Phi, sizeof(double) * xDim * yDim, hipMemcpyHostToDevice) ); hipLaunchKernelGGL(( cMultPhi) , dim3(grid), dim3(threads), 0, 0, gpuWfc,Phi_gpu, gpuWfc); cudaCheckError(); } }; if (kill_idx > 0){ killIt(kill_idx, charge, x0_shift, y0_shift); } } lattice.createEdges(1.5 * 2e-5 / dx); //Assumes that vortices //only form edges when the distance is upto 1.5*2e-5. //Replace with delaunay triangulation determined edges //for better computational scaling (and sanity) //O(n^2) -> terrible implementation. It works for now. //Generates the adjacency matrix from the graph, and //outputs to a Mathematica compatible format. adjMat = (double *)calloc(lattice.getVortices().size() * lattice.getVortices().size(), sizeof(double)); lattice.genAdjMat(adjMat); if (write_it){ FileIO::writeOutAdjMat(buffer, data_dir + "graph", adjMat, uids, lattice.getVortices().size(), i); } //Free and clear all memory blocks free(adjMat); free(uids); lattice.getVortices().clear(); lattice.getEdges().clear(); //exit(0); } //Write out the vortex locations if(write_it){ FileIO::writeOutVortex(buffer, data_dir + "vort_arr", vortCoords->getVortices(), i); } printf("Located %lu vortices\n", vortCoords->getVortices().size()); //Free memory block for now. free(vortexLocation); //Current values become previous values. num_vortices[1] = num_vortices[0]; vortCoords->getVortices().swap(vortCoordsP->getVortices()); vortCoords->getVortices().clear(); } fileName = "wfc_ev"; break; } case 3: { fileName = "wfc_ev_ramp"; break; } default: { break; } } //std::cout << "writing" << '\n'; if (write_it) { FileIO::writeOut(buffer, data_dir + fileName, wfc, xDim*yDim*zDim, i); } //std::cout << "written" << '\n'; } // No longer writing out // U_r(dt/2)*wfc if(nonlin == 1){ if(par.bval("V_time")){ EqnNode_gpu* V_eqn = par.astval("V"); int e_num = par.ival("V_num"); hipLaunchKernelGGL(( cMultDensity_ast), dim3(grid),dim3(threads), 0, 0, V_eqn,gpuWfc,gpuWfc, dx, dy, dz, time, e_num, 0.5*Dt, gstate,interaction*gDenConst); cudaCheckError(); } else{ hipLaunchKernelGGL(( cMultDensity), dim3(grid),dim3(threads), 0, 0, V_gpu,gpuWfc,gpuWfc,0.5*Dt, gstate,interaction*gDenConst); cudaCheckError(); } } else { if(par.bval("V_time")){ EqnNode_gpu* V_eqn = par.astval("V"); int e_num = par.ival("V_num"); hipLaunchKernelGGL(( ast_op_mult), dim3(grid),dim3(threads), 0, 0, gpuWfc,gpuWfc, V_eqn, dx, dy, dz, time, e_num, gstate+1, Dt); cudaCheckError(); } else{ hipLaunchKernelGGL(( cMult), dim3(grid),dim3(threads), 0, 0, V_gpu,gpuWfc,gpuWfc); cudaCheckError(); } } // U_p(dt)*fft2(wfc) cufftHandleError( hipfftExecZ2Z(plan_3d,gpuWfc,gpuWfc,HIPFFT_FORWARD) ); // Normalise hipLaunchKernelGGL(( scalarMult), dim3(grid),dim3(threads), 0, 0, gpuWfc,renorm_factor_nd,gpuWfc); cudaCheckError(); if (par.bval("K_time")){ EqnNode_gpu* k_eqn = par.astval("k"); int e_num = par.ival("k_num"); hipLaunchKernelGGL(( ast_op_mult), dim3(grid),dim3(threads), 0, 0, gpuWfc,gpuWfc, k_eqn, dx, dy, dz, time, e_num, gstate+1, Dt); } else{ hipLaunchKernelGGL(( cMult), dim3(grid),dim3(threads), 0, 0, K_gpu,gpuWfc,gpuWfc); cudaCheckError(); } cufftHandleError( hipfftExecZ2Z(plan_3d,gpuWfc,gpuWfc,HIPFFT_BACKWARD) ); // Normalise hipLaunchKernelGGL(( scalarMult), dim3(grid),dim3(threads), 0, 0, gpuWfc,renorm_factor_nd,gpuWfc); cudaCheckError(); // U_r(dt/2)*wfc if(nonlin == 1){ if(par.bval("V_time")){ EqnNode_gpu* V_eqn = par.astval("V"); int e_num = par.ival("V_num"); hipLaunchKernelGGL(( cMultDensity_ast), dim3(grid),dim3(threads), 0, 0, V_eqn,gpuWfc,gpuWfc, dx, dy, dz, time, e_num, 0.5*Dt, gstate,interaction*gDenConst); cudaCheckError(); } else{ hipLaunchKernelGGL(( cMultDensity), dim3(grid),dim3(threads), 0, 0, V_gpu,gpuWfc,gpuWfc,0.5*Dt, gstate,interaction*gDenConst); cudaCheckError(); } } else { if(par.bval("V_time")){ EqnNode_gpu* V_eqn = par.astval("V"); int e_num = par.ival("V_num"); hipLaunchKernelGGL(( ast_op_mult), dim3(grid),dim3(threads), 0, 0, gpuWfc,gpuWfc, V_eqn, dx, dy, dz, time, e_num, gstate+1, Dt); cudaCheckError(); } else{ hipLaunchKernelGGL(( cMult), dim3(grid),dim3(threads), 0, 0, V_gpu,gpuWfc,gpuWfc); cudaCheckError(); } } // Angular momentum pAy-pAx (if engaged) // if(lz == true){ // Multiplying by ramping factor if necessary // Note: using scalarPow to do the scaling inside of the exp if (ramp){ hipLaunchKernelGGL(( scalarPow), dim3(grid),dim3(threads), 0, 0, (hipfftDoubleComplex*) gpu1dpAy, omega_0, (hipfftDoubleComplex*) gpu1dpAy); cudaCheckError(); if (dimnum > 1){ hipLaunchKernelGGL(( scalarPow), dim3(grid),dim3(threads), 0, 0, (hipfftDoubleComplex*) gpu1dpAx, omega_0, (hipfftDoubleComplex*) gpu1dpAx); cudaCheckError(); } if (dimnum > 2){ hipLaunchKernelGGL(( scalarPow), dim3(grid),dim3(threads), 0, 0, (hipfftDoubleComplex*) gpu1dpAz, omega_0, (hipfftDoubleComplex*) gpu1dpAz); cudaCheckError(); } } int size = xDim*zDim; if (dimnum == 3){ apply_gauge(par, gpuWfc, gpu1dpAx, gpu1dpAy, gpu1dpAz, renorm_factor_x, renorm_factor_y, renorm_factor_z, i%2, plan_1d, plan_dim2, plan_dim3, dx, dy, dz, time, yDim, size); } else if (dimnum == 2){ apply_gauge(par, gpuWfc, gpu1dpAx, gpu1dpAy, renorm_factor_x, renorm_factor_y, i%2, plan_1d, plan_other2d, dx, dy, dz, time); } else if (dimnum == 1){ cufftHandleError( hipfftExecZ2Z(plan_1d,gpuWfc,gpuWfc,HIPFFT_FORWARD) ); hipLaunchKernelGGL(( scalarMult), dim3(grid),dim3(threads), 0, 0, gpuWfc,renorm_factor_x,gpuWfc); cudaCheckError(); if(par.bval("Ax_time")){ EqnNode_gpu* Ax_eqn = par.astval("Ax"); int e_num = par.ival("Ax_num"); hipLaunchKernelGGL(( ast_cmult), dim3(grid),dim3(threads), 0, 0, gpuWfc, gpuWfc, Ax_eqn, dx, dy, dz, time, e_num); cudaCheckError(); } else{ hipLaunchKernelGGL(( cMult), dim3(grid),dim3(threads), 0, 0, gpuWfc, (hipfftDoubleComplex*) gpu1dpAx, gpuWfc); cudaCheckError(); } cufftHandleError( hipfftExecZ2Z(plan_1d,gpuWfc,gpuWfc, HIPFFT_BACKWARD) ); hipLaunchKernelGGL(( scalarMult), dim3(grid),dim3(threads), 0, 0, gpuWfc, renorm_factor_x, gpuWfc); cudaCheckError(); } } if(gstate==0){ parSum(gpuWfc, par); } if (par.bval("energy_calc") && (i % (energy_calc_steps == 0 ? printSteps : energy_calc_steps) == 0)) { double energy = energy_calc(par, gpuWfc); printf("Energy[t@%d]=%E\n",i,energy); std::ofstream energy_out; std::string mode = "energyi.dat"; if (gstate == 1){ mode = "energy.dat"; } if (i == 0){ energy_out.open(data_dir + mode); } else{ energy_out.open(data_dir + mode, std::ios::out | std::ios::app); } energy_out << energy << '\n'; energy_out.close(); double oldEnergy; if (i != 0) { oldEnergy = par.dval("energy"); } else { oldEnergy = 0; } par.store("energy", energy); if (i != 0 && fabs(oldEnergy - energy) < energy_calc_threshold * oldEnergy && gstate == 0) { printf("Stopping early at step %d with energy %E\n", i, energy); break; } } } par.store("wfc", wfc); par.store("wfc_gpu", gpuWfc); }
eff64437431a90c3c71b97097ecb87a4230757f8.cu
#include "../include/evolution.h" #include "../include/vortex_3d.h" // 3D void apply_gauge(Grid &par, double2 *wfc, double2 *Ax, double2 *Ay, double2 *Az, double renorm_factor_x, double renorm_factor_y, double renorm_factor_z, bool flip, cufftHandle plan_1d, cufftHandle plan_dim2, cufftHandle plan_dim3, double dx, double dy, double dz, double time, int yDim, int size){ dim3 grid = par.grid; dim3 threads = par.threads; if (flip){ // 1d forward / mult by Ax cufftHandleError( cufftExecZ2Z(plan_1d, wfc, wfc, CUFFT_FORWARD) ); scalarMult<<<grid,threads>>>(wfc, renorm_factor_x, wfc); cudaCheckError(); if(par.bval("Ax_time")){ EqnNode_gpu* Ax_eqn = par.astval("Ax"); int e_num = par.ival("Ax_num"); ast_cmult<<<grid,threads>>>(wfc, wfc, Ax_eqn, dx, dy, dz, time, e_num); cudaCheckError(); } else{ cMult<<<grid,threads>>>(wfc, (cufftDoubleComplex*) Ax, wfc); cudaCheckError(); } cufftHandleError( cufftExecZ2Z(plan_1d, wfc, wfc, CUFFT_INVERSE) ); scalarMult<<<grid,threads>>>(wfc, renorm_factor_x, wfc); cudaCheckError(); // loop to multiply by Ay for (int i = 0; i < yDim; i++){ cufftHandleError( cufftExecZ2Z(plan_dim2, &wfc[i*size], &wfc[i*size], CUFFT_FORWARD) ); } scalarMult<<<grid,threads>>>(wfc, renorm_factor_y, wfc); cudaCheckError(); if(par.bval("Ay_time")){ EqnNode_gpu* Ay_eqn = par.astval("Ay"); int e_num = par.ival("Ay_num"); ast_cmult<<<grid,threads>>>(wfc, wfc, Ay_eqn, dx, dy, dz, time, e_num); cudaCheckError(); } else{ cMult<<<grid,threads>>>(wfc, (cufftDoubleComplex*) Ay, wfc); cudaCheckError(); } for (int i = 0; i < yDim; i++){ //size = xDim * zDim; cufftHandleError( cufftExecZ2Z(plan_dim2, &wfc[i*size], &wfc[i*size], CUFFT_INVERSE) ); } scalarMult<<<grid,threads>>>(wfc, renorm_factor_y, wfc); cudaCheckError(); // 1D FFT to Az cufftHandleError( cufftExecZ2Z(plan_dim3, wfc, wfc, CUFFT_FORWARD) ); scalarMult<<<grid,threads>>>(wfc, renorm_factor_z, wfc); cudaCheckError(); if(par.bval("Az_time")){ EqnNode_gpu* Az_eqn = par.astval("Az"); int e_num = par.ival("Az_num"); ast_cmult<<<grid,threads>>>(wfc, wfc, Az_eqn, dx, dy, dz, time, e_num); cudaCheckError(); } else{ cMult<<<grid,threads>>>(wfc, (cufftDoubleComplex*) Az, wfc); cudaCheckError(); } cufftHandleError( cufftExecZ2Z(plan_dim3, wfc, wfc, CUFFT_INVERSE) ); scalarMult<<<grid,threads>>>(wfc, renorm_factor_z, wfc); cudaCheckError(); } else{ // 1D FFT to Az cufftHandleError( cufftExecZ2Z(plan_dim3, wfc, wfc, CUFFT_FORWARD) ); scalarMult<<<grid,threads>>>(wfc, renorm_factor_z, wfc); cudaCheckError(); if(par.bval("Az_time")){ EqnNode_gpu* Az_eqn = par.astval("Az"); int e_num = par.ival("Az_num"); ast_cmult<<<grid,threads>>>(wfc, wfc, Az_eqn, dx, dy, dz, time, e_num); cudaCheckError(); } else{ cMult<<<grid,threads>>>(wfc, (cufftDoubleComplex*) Az, wfc); cudaCheckError(); } cufftHandleError( cufftExecZ2Z(plan_dim3, wfc, wfc, CUFFT_INVERSE) ); scalarMult<<<grid,threads>>>(wfc, renorm_factor_z, wfc); cudaCheckError(); // loop to multiply by Ay for (int i = 0; i < yDim; i++){ cufftHandleError( cufftExecZ2Z(plan_dim2, &wfc[i*size], &wfc[i*size], CUFFT_FORWARD) ); } scalarMult<<<grid,threads>>>(wfc, renorm_factor_y, wfc); cudaCheckError(); if(par.bval("Ay_time")){ EqnNode_gpu* Ay_eqn = par.astval("Ay"); int e_num = par.ival("Ay_num"); ast_cmult<<<grid,threads>>>(wfc, wfc, Ay_eqn, dx, dy, dz, time, e_num); cudaCheckError(); } else{ cMult<<<grid,threads>>>(wfc, (cufftDoubleComplex*) Ay, wfc); cudaCheckError(); } for (int i = 0; i < yDim; i++){ //size = xDim * zDim; cufftHandleError( cufftExecZ2Z(plan_dim2, &wfc[i*size], &wfc[i*size], CUFFT_INVERSE) ); } scalarMult<<<grid,threads>>>(wfc, renorm_factor_y, wfc); cudaCheckError(); // 1d forward / mult by Ax cufftHandleError( cufftExecZ2Z(plan_1d, wfc, wfc, CUFFT_FORWARD) ); scalarMult<<<grid,threads>>>(wfc, renorm_factor_x, wfc); cudaCheckError(); if(par.bval("Ax_time")){ EqnNode_gpu* Ax_eqn = par.astval("Ax"); int e_num = par.ival("Ax_num"); ast_cmult<<<grid,threads>>>(wfc, wfc, Ax_eqn, dx, dy, dz, time, e_num); cudaCheckError(); } else{ cMult<<<grid,threads>>>(wfc, (cufftDoubleComplex*) Ax, wfc); cudaCheckError(); } cufftHandleError( cufftExecZ2Z(plan_1d, wfc, wfc, CUFFT_INVERSE) ); scalarMult<<<grid,threads>>>(wfc, renorm_factor_x, wfc); cudaCheckError(); } } // 2D void apply_gauge(Grid &par, double2 *wfc, double2 *Ax, double2 *Ay, double renorm_factor_x, double renorm_factor_y, bool flip, cufftHandle plan_1d, cufftHandle plan_dim2, double dx, double dy, double dz, double time){ dim3 grid = par.grid; dim3 threads = par.threads; if (flip){ // 1d forward / mult by Ax cufftHandleError( cufftExecZ2Z(plan_1d, wfc, wfc, CUFFT_FORWARD) ); scalarMult<<<grid,threads>>>(wfc, renorm_factor_y ,wfc); cudaCheckError(); if(par.bval("Ax_time")){ EqnNode_gpu* Ax_eqn = par.astval("Ax"); int e_num = par.ival("Ax_num"); ast_cmult<<<grid,threads>>>(wfc, wfc, Ax_eqn, dx, dy, dz, time, e_num); cudaCheckError(); } else{ cMult<<<grid,threads>>>(wfc, (cufftDoubleComplex*) Ax, wfc); cudaCheckError(); } cufftHandleError( cufftExecZ2Z(plan_1d, wfc, wfc, CUFFT_INVERSE) ); scalarMult<<<grid,threads>>>(wfc, renorm_factor_x, wfc); cudaCheckError(); // 1D FFT to wfc_pAy cufftHandleError( cufftExecZ2Z(plan_dim2, wfc, wfc, CUFFT_FORWARD) ); scalarMult<<<grid,threads>>>(wfc, renorm_factor_x, wfc); cudaCheckError(); if(par.bval("Ay_time")){ EqnNode_gpu* Ay_eqn = par.astval("Ay"); int e_num = par.ival("Ay_num"); ast_cmult<<<grid,threads>>>(wfc, wfc, Ay_eqn, dx, dy, dz, time, e_num); cudaCheckError(); } else{ cMult<<<grid,threads>>>(wfc, (cufftDoubleComplex*) Ay, wfc); cudaCheckError(); } cufftHandleError( cufftExecZ2Z(plan_dim2, wfc, wfc, CUFFT_INVERSE) ); scalarMult<<<grid,threads>>>(wfc, renorm_factor_y, wfc); cudaCheckError(); } else{ // 1D FFT to wfc_pAy cufftHandleError( cufftExecZ2Z(plan_dim2, wfc, wfc, CUFFT_FORWARD) ); scalarMult<<<grid,threads>>>(wfc, renorm_factor_x, wfc); cudaCheckError(); if(par.bval("Ay_time")){ EqnNode_gpu* Ay_eqn = par.astval("Ay"); int e_num = par.ival("Ay_num"); ast_cmult<<<grid,threads>>>(wfc, wfc, Ay_eqn, dx, dy, dz, time, e_num); cudaCheckError(); } else{ cMult<<<grid,threads>>>(wfc, (cufftDoubleComplex*) Ay, wfc); cudaCheckError(); } cufftHandleError( cufftExecZ2Z(plan_dim2, wfc, wfc, CUFFT_INVERSE) ); scalarMult<<<grid,threads>>>(wfc, renorm_factor_y, wfc); cudaCheckError(); // 1d forward / mult by Ax cufftHandleError( cufftExecZ2Z(plan_1d, wfc, wfc, CUFFT_FORWARD) ); scalarMult<<<grid,threads>>>(wfc, renorm_factor_y ,wfc); cudaCheckError(); if(par.bval("Ax_time")){ EqnNode_gpu* Ax_eqn = par.astval("Ax"); int e_num = par.ival("Ax_num"); ast_cmult<<<grid,threads>>>(wfc, wfc, Ax_eqn, dx, dy, dz, time, e_num); cudaCheckError(); } else{ cMult<<<grid,threads>>>(wfc, (cufftDoubleComplex*) Ax, wfc); cudaCheckError(); } cufftHandleError( cufftExecZ2Z(plan_1d, wfc, wfc, CUFFT_INVERSE) ); scalarMult<<<grid,threads>>>(wfc, renorm_factor_x, wfc); cudaCheckError(); } } void evolve(Grid &par, int numSteps, unsigned int gstate, std::string buffer){ // Re-establishing variables from parsed Grid class std::string data_dir = par.sval("data_dir"); int dimnum = par.ival("dimnum"); double omega = par.dval("omega"); double angle_sweep = par.dval("angle_sweep"); double gdt = par.dval("gdt"); double dt = par.dval("dt"); double omegaX = par.dval("omegaX"); double omegaY = par.dval("omegaY"); double mass = par.dval("mass"); double dx = par.dval("dx"); double dy = 1; double dz = 1; double interaction = par.dval("interaction"); double laser_power = par.dval("laser_power"); double gDenConst = par.dval("gDenConst"); double thresh_const = par.dval("thresh_const"); double *x = par.dsval("x"); double *y; double *V = par.dsval("V"); double *Phi = par.dsval("Phi"); double2 *gpu1dpAx = par.cufftDoubleComplexval("pAx_gpu"); double2 *gpu1dpAy; double2 *gpu1dpAz; double *Phi_gpu = par.dsval("Phi_gpu"); bool write_it = par.bval("write_it"); bool graph = par.bval("graph"); int N = par.ival("atoms"); int printSteps = par.ival("printSteps"); int energy_calc_steps = par.ival("energy_calc_steps"); double energy_calc_threshold = par.dval("energy_calc_threshold"); bool nonlin = par.bval("gpe"); bool lz = par.bval("corotating"); bool ramp = par.bval("ramp"); int ramp_type = par.ival("ramp_type"); int xDim = par.ival("xDim"); int yDim = 1; int zDim = 1; cufftDoubleComplex *wfc = par.cufftDoubleComplexval("wfc"); cufftDoubleComplex *gpuWfc = par.cufftDoubleComplexval("wfc_gpu"); cufftDoubleComplex *K_gpu = par.cufftDoubleComplexval("K_gpu"); cufftDoubleComplex *V_gpu = par.cufftDoubleComplexval("V_gpu"); if (dimnum > 1){ dy = par.dval("dy"); y = par.dsval("y"); gpu1dpAy = par.cufftDoubleComplexval("pAy_gpu"); yDim = par.ival("yDim"); } if (dimnum > 2){ dz = par.dval("dz"); gpu1dpAz = par.cufftDoubleComplexval("pAz_gpu"); zDim = par.ival("zDim"); } int gridSize = xDim * yDim * zDim; // getting data from Cuda class cufftHandle plan_1d = par.ival("plan_1d"); cufftHandle plan_2d = par.ival("plan_2d"); cufftHandle plan_other2d = par.ival("plan_other2d"); cufftHandle plan_3d = par.ival("plan_3d"); cufftHandle plan_dim2 = par.ival("plan_dim2"); cufftHandle plan_dim3 = par.ival("plan_dim3"); dim3 threads = par.threads; dim3 grid = par.grid; // Because no two operations are created equally. // Multiplication is faster than divisions. double renorm_factor_nd=1.0/pow(gridSize,0.5); double renorm_factor_x=1.0/pow(xDim,0.5); double renorm_factor_y=1.0/pow(yDim,0.5); double renorm_factor_z=1.0/pow(zDim,0.5); clock_t begin, end; double time_spent; double Dt; if(gstate==0){ Dt = gdt; printf("Timestep for groundstate solver set as: %E\n",Dt); } else{ Dt = dt; printf("Timestep for evolution set as: %E\n",Dt); } begin = clock(); double omega_0=omega*omegaX; // 2D VORTEX TRACKING double mask_2d = par.dval("mask_2d"); int x0_shift = par.dval("x0_shift"); int y0_shift = par.dval("y0_shift"); int charge = par.ival("charge"); int kill_idx = par.ival("kill_idx"); cufftDoubleComplex *EV_opt = par.cufftDoubleComplexval("EV_opt"); int kick_it = par.ival("kick_it"); double *V_opt = par.dsval("V_opt"); // Double buffering and will attempt to thread free and calloc operations to // hide time penalty. Or may not bother. int num_vortices[2] = {0,0}; // binary matrix of size xDim*yDim, // 1 for vortex at specified index, 0 otherwise int* vortexLocation; //int* olMaxLocation = (int*) calloc(xDim*yDim,sizeof(int)); std::shared_ptr<Vtx::Vortex> central_vortex; //vortex closest to the central position /* central_vortex.coords.x = -1; central_vortex.coords.y = -1; central_vortex.coordsD.x = -1.; central_vortex.coordsD.y = -1.; central_vortex.wind = 0; */ // Angle of vortex lattice. Add to optical lattice for alignment. double vort_angle; // array of vortex coordinates from vortexLocation 1's //struct Vtx::Vortex *vortCoords = NULL; std::shared_ptr<Vtx::VtxList> vortCoords = std::make_shared<Vtx::VtxList>(7); //std::vector<std::shared_ptr<Vtx::Vortex> vortCoords; //Previous array of vortex coordinates from vortexLocation 1's //struct Vtx::Vortex *vortCoordsP = NULL; //std::vector<struct Vtx::Vortex> vortCoordsP; std::shared_ptr<Vtx::VtxList> vortCoordsP = std::make_shared<Vtx::VtxList>(7); LatticeGraph::Lattice lattice; //Vortex lattice graph. double* adjMat; // Assuming triangular lattice at rotatio //std::cout << "numSteps is: " << numSteps << '\n'; // Iterating through all of the steps in either g or esteps. for(int i=0; i < numSteps; ++i){ double time = Dt*i; if (ramp){ //Adjusts omega for the appropriate trap frequency. if (ramp_type == 1){ if (i == 0){ omega_0 = (double)omega; } else{ omega_0 = (double)i / (double)(i+1); } } else{ if (i == 0){ omega_0=(double)omega/(double)(numSteps); } else{ omega_0 = (double)(i+1) / (double)i; } } } cudaHandleError( cudaMemcpy(wfc, gpuWfc, sizeof(cufftDoubleComplex)*xDim*yDim*zDim, cudaMemcpyDeviceToHost) ); // Print-out at pre-determined rate. // Vortex & wfc analysis performed here also. if(i % printSteps == 0) { // If the unit_test flag is on, we need a special case printf("Step: %d Omega: %lf\n", i, omega_0); // Printing out time of iteration end = clock(); time_spent = (double) (end - begin) / CLOCKS_PER_SEC; printf("Time spent: %lf\n", time_spent); std::string fileName = ""; //printf("ramp=%d gstate=%d rg=%d \n", // ramp, gstate, ramp | (gstate << 1)); switch (ramp | (gstate << 1)) { case 0: //Groundstate solver, constant Omega value. { fileName = "wfc_0_const"; break; } case 1: //Groundstate solver, ramped Omega value. { fileName = "wfc_0_ramp"; break; } case 2: //Real-time evolution, constant Omega value. { if (dimnum == 3){ // Note: In the case of 3d, we need to think about // vortex tracking in a new way. // It may be as simple as splitting the problem // into 2D elements and working from there, but // let's look into it when we need it in the // future. std::cout << "commencing 3d vortex tracking" << '\n'; // Creating the necessary double* values double* edges = (double *)malloc(sizeof(double) *gridSize); find_edges(par, wfc, edges); double* edges_gpu = par.dsval("edges_gpu"); // Now we need to output everything if (write_it){ FileIO::writeOutDouble(buffer, data_dir+"Edges", edges, gridSize, i); } free(edges); } else if (dimnum == 2){ vortexLocation = (int *) calloc(xDim * yDim, sizeof(int)); num_vortices[0] = Tracker::findVortex(vortexLocation, wfc, mask_2d, xDim, x, i); // If initial step, locate vortices, least-squares to find // exact centre, calculate lattice angle, generate optical // lattice. if (i == 0) { if(num_vortices[0] > 0){ //Reserve enough space for the vortices //reserve(num_vortices[0]); vortCoords = std::make_shared<Vtx::VtxList> (num_vortices[0]); vortCoordsP = std::make_shared<Vtx::VtxList> (num_vortices[0]); //Locate the vortex positions to the nearest grid, then //perform a least-squares fit to determine the location //to sub-grid reolution. Tracker::vortPos(vortexLocation, vortCoords->getVortices(), xDim, wfc); Tracker::lsFit(vortCoords->getVortices(),wfc,xDim); //Find the centre-most vortex in the lattice central_vortex = Tracker::vortCentre(vortCoords-> getVortices(), xDim); //Determine the Angle formed by the lattice relative to //the x-axis vort_angle = Tracker::vortAngle(vortCoords-> getVortices(), central_vortex); //Store the vortex angle in the parameter file par.store("Vort_angle", vort_angle); //Determine average lattice spacing. double sepAvg = Tracker::vortSepAvg(vortCoords-> getVortices(), central_vortex); par.store("Central_vort_x", (double) central_vortex->getCoords().x); par.store("Central_vort_y", (double) central_vortex->getCoords().y); par.store("Central_vort_winding", (double) central_vortex->getWinding()); par.store("Num_vort", (double) vortCoords-> getVortices().size()); //Setup the optical lattice to match the spacing and // angle+angle_sweep of the vortex lattice. // Amplitude matched by setting laser_power // parameter switch. optLatSetup(central_vortex, V, vortCoords->getVortices(), vort_angle + PI * angle_sweep / 180.0, laser_power * HBAR * sqrt(omegaX * omegaY), V_opt, x, y, par); } // If kick_it param is 2, perform a single kick of the // optical lattice for the first timestep only. // This is performed by loading the // EV_opt exp(V + V_opt) array into GPU memory // for the potential. if (kick_it == 2) { printf("Kicked it 1\n"); cudaHandleError( cudaMemcpy(V_gpu, EV_opt, sizeof(cufftDoubleComplex) * xDim * yDim, cudaMemcpyHostToDevice) ); } // Write out the newly specified potential // and exp potential to files if(write_it){ FileIO::writeOutDouble(buffer, data_dir + "V_opt_1", V_opt, xDim * yDim, 0); FileIO::writeOut(buffer, data_dir + "EV_opt_1", EV_opt, xDim * yDim, 0); //Store necessary parameters to Params.dat file. FileIO::writeOutParam(buffer, par, data_dir + "Params.dat"); } } //If i!=0 and the number of vortices changes // if num_vortices[1] < num_vortices[0] ... Fewer vortices else { if (num_vortices[0] > 0){ Tracker::vortPos(vortexLocation, vortCoords->getVortices(), xDim, wfc); Tracker::lsFit(vortCoords->getVortices(), wfc, xDim); Tracker::vortArrange(vortCoords->getVortices(), vortCoordsP->getVortices()); if(write_it){ FileIO::writeOutInt(buffer, data_dir + "vLoc_", vortexLocation, xDim * yDim, i); } } } // The following will eventually be modified and moved into // a new library that works closely wy0_shiftUE. Used to // also defined for vortex elimination using graph positions // and UID numbers. if (graph && num_vortices[0] > 0) { for (int ii = 0; ii < vortCoords->getVortices().size(); ++ii) { std::shared_ptr<LatticeGraph::Node> n(new LatticeGraph::Node( *vortCoords->getVortices().at(ii).get())); lattice.addVortex(std::move(n)); } unsigned int *uids = (unsigned int *) malloc( sizeof(unsigned int) * lattice.getVortices().size()); for (size_t a=0; a < lattice.getVortices().size(); ++a){ uids[a] = lattice.getVortexIdx(a)->getUid(); } if(i==0) { //Lambda for vortex annihilation/creation. auto killIt=[&](int idx, int winding, double delta_x, double delta_y) { if (abs(delta_x) > 0 || abs(delta_y) > 0){ // Killing initial vortex and then // imprinting new one WFC::phaseWinding(Phi, 1, x,y, dx,dy, lattice.getVortexUid(idx)-> getData().getCoordsD().x, lattice.getVortexUid(idx)-> getData().getCoordsD().y, xDim); cudaHandleError( cudaMemcpy(Phi_gpu, Phi, sizeof(double) * xDim * yDim, cudaMemcpyHostToDevice) ); cMultPhi <<<grid, threads>>>(gpuWfc,Phi_gpu, gpuWfc); cudaCheckError(); // Imprinting new one int cval = -winding; WFC::phaseWinding(Phi, cval, x,y, dx,dy, lattice.getVortexUid(idx)-> getData().getCoordsD().x + delta_x, lattice.getVortexUid(idx)-> getData().getCoordsD().y + delta_y, xDim); // Sending to device for imprinting cudaHandleError( cudaMemcpy(Phi_gpu, Phi, sizeof(double) * xDim * yDim, cudaMemcpyHostToDevice) ); cMultPhi <<<grid, threads>>>(gpuWfc,Phi_gpu, gpuWfc); cudaCheckError(); } else{ int cval = -(winding-1); WFC::phaseWinding(Phi, cval, x,y,dx,dy, lattice.getVortexUid(idx)-> getData().getCoordsD().x, lattice.getVortexUid(idx)-> getData().getCoordsD().y, xDim); cudaHandleError( cudaMemcpy(Phi_gpu, Phi, sizeof(double) * xDim * yDim, cudaMemcpyHostToDevice) ); cMultPhi <<<grid, threads>>>(gpuWfc,Phi_gpu, gpuWfc); cudaCheckError(); } }; if (kill_idx > 0){ killIt(kill_idx, charge, x0_shift, y0_shift); } } lattice.createEdges(1.5 * 2e-5 / dx); //Assumes that vortices //only form edges when the distance is upto 1.5*2e-5. //Replace with delaunay triangulation determined edges //for better computational scaling (and sanity) //O(n^2) -> terrible implementation. It works for now. //Generates the adjacency matrix from the graph, and //outputs to a Mathematica compatible format. adjMat = (double *)calloc(lattice.getVortices().size() * lattice.getVortices().size(), sizeof(double)); lattice.genAdjMat(adjMat); if (write_it){ FileIO::writeOutAdjMat(buffer, data_dir + "graph", adjMat, uids, lattice.getVortices().size(), i); } //Free and clear all memory blocks free(adjMat); free(uids); lattice.getVortices().clear(); lattice.getEdges().clear(); //exit(0); } //Write out the vortex locations if(write_it){ FileIO::writeOutVortex(buffer, data_dir + "vort_arr", vortCoords->getVortices(), i); } printf("Located %lu vortices\n", vortCoords->getVortices().size()); //Free memory block for now. free(vortexLocation); //Current values become previous values. num_vortices[1] = num_vortices[0]; vortCoords->getVortices().swap(vortCoordsP->getVortices()); vortCoords->getVortices().clear(); } fileName = "wfc_ev"; break; } case 3: { fileName = "wfc_ev_ramp"; break; } default: { break; } } //std::cout << "writing" << '\n'; if (write_it) { FileIO::writeOut(buffer, data_dir + fileName, wfc, xDim*yDim*zDim, i); } //std::cout << "written" << '\n'; } // No longer writing out // U_r(dt/2)*wfc if(nonlin == 1){ if(par.bval("V_time")){ EqnNode_gpu* V_eqn = par.astval("V"); int e_num = par.ival("V_num"); cMultDensity_ast<<<grid,threads>>>(V_eqn,gpuWfc,gpuWfc, dx, dy, dz, time, e_num, 0.5*Dt, gstate,interaction*gDenConst); cudaCheckError(); } else{ cMultDensity<<<grid,threads>>>(V_gpu,gpuWfc,gpuWfc,0.5*Dt, gstate,interaction*gDenConst); cudaCheckError(); } } else { if(par.bval("V_time")){ EqnNode_gpu* V_eqn = par.astval("V"); int e_num = par.ival("V_num"); ast_op_mult<<<grid,threads>>>(gpuWfc,gpuWfc, V_eqn, dx, dy, dz, time, e_num, gstate+1, Dt); cudaCheckError(); } else{ cMult<<<grid,threads>>>(V_gpu,gpuWfc,gpuWfc); cudaCheckError(); } } // U_p(dt)*fft2(wfc) cufftHandleError( cufftExecZ2Z(plan_3d,gpuWfc,gpuWfc,CUFFT_FORWARD) ); // Normalise scalarMult<<<grid,threads>>>(gpuWfc,renorm_factor_nd,gpuWfc); cudaCheckError(); if (par.bval("K_time")){ EqnNode_gpu* k_eqn = par.astval("k"); int e_num = par.ival("k_num"); ast_op_mult<<<grid,threads>>>(gpuWfc,gpuWfc, k_eqn, dx, dy, dz, time, e_num, gstate+1, Dt); } else{ cMult<<<grid,threads>>>(K_gpu,gpuWfc,gpuWfc); cudaCheckError(); } cufftHandleError( cufftExecZ2Z(plan_3d,gpuWfc,gpuWfc,CUFFT_INVERSE) ); // Normalise scalarMult<<<grid,threads>>>(gpuWfc,renorm_factor_nd,gpuWfc); cudaCheckError(); // U_r(dt/2)*wfc if(nonlin == 1){ if(par.bval("V_time")){ EqnNode_gpu* V_eqn = par.astval("V"); int e_num = par.ival("V_num"); cMultDensity_ast<<<grid,threads>>>(V_eqn,gpuWfc,gpuWfc, dx, dy, dz, time, e_num, 0.5*Dt, gstate,interaction*gDenConst); cudaCheckError(); } else{ cMultDensity<<<grid,threads>>>(V_gpu,gpuWfc,gpuWfc,0.5*Dt, gstate,interaction*gDenConst); cudaCheckError(); } } else { if(par.bval("V_time")){ EqnNode_gpu* V_eqn = par.astval("V"); int e_num = par.ival("V_num"); ast_op_mult<<<grid,threads>>>(gpuWfc,gpuWfc, V_eqn, dx, dy, dz, time, e_num, gstate+1, Dt); cudaCheckError(); } else{ cMult<<<grid,threads>>>(V_gpu,gpuWfc,gpuWfc); cudaCheckError(); } } // Angular momentum pAy-pAx (if engaged) // if(lz == true){ // Multiplying by ramping factor if necessary // Note: using scalarPow to do the scaling inside of the exp if (ramp){ scalarPow<<<grid,threads>>>((cufftDoubleComplex*) gpu1dpAy, omega_0, (cufftDoubleComplex*) gpu1dpAy); cudaCheckError(); if (dimnum > 1){ scalarPow<<<grid,threads>>>((cufftDoubleComplex*) gpu1dpAx, omega_0, (cufftDoubleComplex*) gpu1dpAx); cudaCheckError(); } if (dimnum > 2){ scalarPow<<<grid,threads>>>((cufftDoubleComplex*) gpu1dpAz, omega_0, (cufftDoubleComplex*) gpu1dpAz); cudaCheckError(); } } int size = xDim*zDim; if (dimnum == 3){ apply_gauge(par, gpuWfc, gpu1dpAx, gpu1dpAy, gpu1dpAz, renorm_factor_x, renorm_factor_y, renorm_factor_z, i%2, plan_1d, plan_dim2, plan_dim3, dx, dy, dz, time, yDim, size); } else if (dimnum == 2){ apply_gauge(par, gpuWfc, gpu1dpAx, gpu1dpAy, renorm_factor_x, renorm_factor_y, i%2, plan_1d, plan_other2d, dx, dy, dz, time); } else if (dimnum == 1){ cufftHandleError( cufftExecZ2Z(plan_1d,gpuWfc,gpuWfc,CUFFT_FORWARD) ); scalarMult<<<grid,threads>>>(gpuWfc,renorm_factor_x,gpuWfc); cudaCheckError(); if(par.bval("Ax_time")){ EqnNode_gpu* Ax_eqn = par.astval("Ax"); int e_num = par.ival("Ax_num"); ast_cmult<<<grid,threads>>>(gpuWfc, gpuWfc, Ax_eqn, dx, dy, dz, time, e_num); cudaCheckError(); } else{ cMult<<<grid,threads>>>(gpuWfc, (cufftDoubleComplex*) gpu1dpAx, gpuWfc); cudaCheckError(); } cufftHandleError( cufftExecZ2Z(plan_1d,gpuWfc,gpuWfc, CUFFT_INVERSE) ); scalarMult<<<grid,threads>>>(gpuWfc, renorm_factor_x, gpuWfc); cudaCheckError(); } } if(gstate==0){ parSum(gpuWfc, par); } if (par.bval("energy_calc") && (i % (energy_calc_steps == 0 ? printSteps : energy_calc_steps) == 0)) { double energy = energy_calc(par, gpuWfc); printf("Energy[t@%d]=%E\n",i,energy); std::ofstream energy_out; std::string mode = "energyi.dat"; if (gstate == 1){ mode = "energy.dat"; } if (i == 0){ energy_out.open(data_dir + mode); } else{ energy_out.open(data_dir + mode, std::ios::out | std::ios::app); } energy_out << energy << '\n'; energy_out.close(); double oldEnergy; if (i != 0) { oldEnergy = par.dval("energy"); } else { oldEnergy = 0; } par.store("energy", energy); if (i != 0 && fabs(oldEnergy - energy) < energy_calc_threshold * oldEnergy && gstate == 0) { printf("Stopping early at step %d with energy %E\n", i, energy); break; } } } par.store("wfc", wfc); par.store("wfc_gpu", gpuWfc); }
908c6037211cbeb30501dae8f5e2bbaf1a797d60.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) 2009-2023 The Regents of the University of Michigan. // Part of HOOMD-blue, released under the BSD 3-Clause License. /*! * \file mpcd/SlitGeometryFillerGPU.cu * \brief Defines GPU functions and kernels used by mpcd::SlitGeometryFillerGPU */ #include "ParticleDataUtilities.h" #include "SlitGeometryFillerGPU.cuh" #include "hoomd/RNGIdentifiers.h" #include "hoomd/RandomNumbers.h" namespace hoomd { namespace mpcd { namespace gpu { namespace kernel { /*! * \param d_pos Particle positions * \param d_vel Particle velocities * \param d_tag Particle tags * \param geom Slit geometry to fill * \param z_min Lower bound to lower fill region * \param z_max Upper bound to upper fill region * \param box Local simulation box * \param type Type of fill particles * \param N_lo Number of particles to fill in lower region * \param N_hi Number of particles to fill in upper region * \param first_tag First tag of filled particles * \param first_idx First (local) particle index of filled particles * \param vel_factor Scale factor for uniform normal velocities consistent with particle mass / * temperature \param timestep Current timestep \param seed User seed to PRNG for drawing velocities * * \b Implementation: * * Using one thread per particle (in both slabs), the thread is assigned to fill either the lower * or upper region. This defines a local cuboid of volume to fill. The thread index is translated * into a particle tag and local particle index. A random position is drawn within the cuboid. A * random velocity is drawn consistent with the speed of the moving wall. */ __global__ void slit_draw_particles(Scalar4* d_pos, Scalar4* d_vel, unsigned int* d_tag, const mpcd::detail::SlitGeometry geom, const Scalar z_min, const Scalar z_max, const BoxDim box, const unsigned int type, const unsigned int N_lo, const unsigned int N_tot, const unsigned int first_tag, const unsigned int first_idx, const Scalar vel_factor, const uint64_t timestep, const uint16_t seed) { // one thread per particle const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= N_tot) return; // determine the fill region based on current index signed char sign = (idx >= N_lo) - (idx < N_lo); Scalar3 lo = box.getLo(); Scalar3 hi = box.getHi(); if (sign == -1) // bottom { lo.z = z_min; hi.z = -geom.getH(); } else // top { lo.z = geom.getH(); hi.z = z_max; } // particle tag and index const unsigned int tag = first_tag + idx; const unsigned int pidx = first_idx + idx; d_tag[pidx] = tag; // initialize random number generator for positions and velocity hoomd::RandomGenerator rng( hoomd::Seed(hoomd::RNGIdentifier::SlitGeometryFiller, timestep, seed), hoomd::Counter(tag)); d_pos[pidx] = make_scalar4(hoomd::UniformDistribution<Scalar>(lo.x, hi.x)(rng), hoomd::UniformDistribution<Scalar>(lo.y, hi.y)(rng), hoomd::UniformDistribution<Scalar>(lo.z, hi.z)(rng), __int_as_scalar(type)); hoomd::NormalDistribution<Scalar> gen(vel_factor, 0.0); Scalar3 vel; gen(vel.x, vel.y, rng); vel.z = gen(rng); // TODO: should these be given zero net-momentum contribution (relative to the frame of // reference?) d_vel[pidx] = make_scalar4(vel.x + sign * geom.getVelocity(), vel.y, vel.z, __int_as_scalar(mpcd::detail::NO_CELL)); } } // end namespace kernel /*! * \param d_pos Particle positions * \param d_vel Particle velocities * \param d_tag Particle tags * \param geom Slit geometry to fill * \param z_min Lower bound to lower fill region * \param z_max Upper bound to upper fill region * \param box Local simulation box * \param mass Mass of fill particles * \param type Type of fill particles * \param N_lo Number of particles to fill in lower region * \param N_hi Number of particles to fill in upper region * \param first_tag First tag of filled particles * \param first_idx First (local) particle index of filled particles * \param kT Temperature for fill particles * \param timestep Current timestep * \param seed User seed to PRNG for drawing velocities * \param block_size Number of threads per block * * \sa kernel::slit_draw_particles */ hipError_t slit_draw_particles(Scalar4* d_pos, Scalar4* d_vel, unsigned int* d_tag, const mpcd::detail::SlitGeometry& geom, const Scalar z_min, const Scalar z_max, const BoxDim& box, const Scalar mass, const unsigned int type, const unsigned int N_lo, const unsigned int N_hi, const unsigned int first_tag, const unsigned int first_idx, const Scalar kT, const uint64_t timestep, const uint16_t seed, const unsigned int block_size) { const unsigned int N_tot = N_lo + N_hi; if (N_tot == 0) return hipSuccess; unsigned int max_block_size; hipFuncAttributes attr; hipFuncGetAttributes(&attr, (const void*)kernel::slit_draw_particles); max_block_size = attr.maxThreadsPerBlock; // precompute factor for rescaling the velocities since it is the same for all particles const Scalar vel_factor = fast::sqrt(kT / mass); unsigned int run_block_size = min(block_size, max_block_size); dim3 grid(N_tot / run_block_size + 1); hipLaunchKernelGGL(( kernel::slit_draw_particles), dim3(grid), dim3(run_block_size), 0, 0, d_pos, d_vel, d_tag, geom, z_min, z_max, box, type, N_lo, N_tot, first_tag, first_idx, vel_factor, timestep, seed); return hipSuccess; } } // end namespace gpu } // end namespace mpcd } // end namespace hoomd
908c6037211cbeb30501dae8f5e2bbaf1a797d60.cu
// Copyright (c) 2009-2023 The Regents of the University of Michigan. // Part of HOOMD-blue, released under the BSD 3-Clause License. /*! * \file mpcd/SlitGeometryFillerGPU.cu * \brief Defines GPU functions and kernels used by mpcd::SlitGeometryFillerGPU */ #include "ParticleDataUtilities.h" #include "SlitGeometryFillerGPU.cuh" #include "hoomd/RNGIdentifiers.h" #include "hoomd/RandomNumbers.h" namespace hoomd { namespace mpcd { namespace gpu { namespace kernel { /*! * \param d_pos Particle positions * \param d_vel Particle velocities * \param d_tag Particle tags * \param geom Slit geometry to fill * \param z_min Lower bound to lower fill region * \param z_max Upper bound to upper fill region * \param box Local simulation box * \param type Type of fill particles * \param N_lo Number of particles to fill in lower region * \param N_hi Number of particles to fill in upper region * \param first_tag First tag of filled particles * \param first_idx First (local) particle index of filled particles * \param vel_factor Scale factor for uniform normal velocities consistent with particle mass / * temperature \param timestep Current timestep \param seed User seed to PRNG for drawing velocities * * \b Implementation: * * Using one thread per particle (in both slabs), the thread is assigned to fill either the lower * or upper region. This defines a local cuboid of volume to fill. The thread index is translated * into a particle tag and local particle index. A random position is drawn within the cuboid. A * random velocity is drawn consistent with the speed of the moving wall. */ __global__ void slit_draw_particles(Scalar4* d_pos, Scalar4* d_vel, unsigned int* d_tag, const mpcd::detail::SlitGeometry geom, const Scalar z_min, const Scalar z_max, const BoxDim box, const unsigned int type, const unsigned int N_lo, const unsigned int N_tot, const unsigned int first_tag, const unsigned int first_idx, const Scalar vel_factor, const uint64_t timestep, const uint16_t seed) { // one thread per particle const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= N_tot) return; // determine the fill region based on current index signed char sign = (idx >= N_lo) - (idx < N_lo); Scalar3 lo = box.getLo(); Scalar3 hi = box.getHi(); if (sign == -1) // bottom { lo.z = z_min; hi.z = -geom.getH(); } else // top { lo.z = geom.getH(); hi.z = z_max; } // particle tag and index const unsigned int tag = first_tag + idx; const unsigned int pidx = first_idx + idx; d_tag[pidx] = tag; // initialize random number generator for positions and velocity hoomd::RandomGenerator rng( hoomd::Seed(hoomd::RNGIdentifier::SlitGeometryFiller, timestep, seed), hoomd::Counter(tag)); d_pos[pidx] = make_scalar4(hoomd::UniformDistribution<Scalar>(lo.x, hi.x)(rng), hoomd::UniformDistribution<Scalar>(lo.y, hi.y)(rng), hoomd::UniformDistribution<Scalar>(lo.z, hi.z)(rng), __int_as_scalar(type)); hoomd::NormalDistribution<Scalar> gen(vel_factor, 0.0); Scalar3 vel; gen(vel.x, vel.y, rng); vel.z = gen(rng); // TODO: should these be given zero net-momentum contribution (relative to the frame of // reference?) d_vel[pidx] = make_scalar4(vel.x + sign * geom.getVelocity(), vel.y, vel.z, __int_as_scalar(mpcd::detail::NO_CELL)); } } // end namespace kernel /*! * \param d_pos Particle positions * \param d_vel Particle velocities * \param d_tag Particle tags * \param geom Slit geometry to fill * \param z_min Lower bound to lower fill region * \param z_max Upper bound to upper fill region * \param box Local simulation box * \param mass Mass of fill particles * \param type Type of fill particles * \param N_lo Number of particles to fill in lower region * \param N_hi Number of particles to fill in upper region * \param first_tag First tag of filled particles * \param first_idx First (local) particle index of filled particles * \param kT Temperature for fill particles * \param timestep Current timestep * \param seed User seed to PRNG for drawing velocities * \param block_size Number of threads per block * * \sa kernel::slit_draw_particles */ cudaError_t slit_draw_particles(Scalar4* d_pos, Scalar4* d_vel, unsigned int* d_tag, const mpcd::detail::SlitGeometry& geom, const Scalar z_min, const Scalar z_max, const BoxDim& box, const Scalar mass, const unsigned int type, const unsigned int N_lo, const unsigned int N_hi, const unsigned int first_tag, const unsigned int first_idx, const Scalar kT, const uint64_t timestep, const uint16_t seed, const unsigned int block_size) { const unsigned int N_tot = N_lo + N_hi; if (N_tot == 0) return cudaSuccess; unsigned int max_block_size; cudaFuncAttributes attr; cudaFuncGetAttributes(&attr, (const void*)kernel::slit_draw_particles); max_block_size = attr.maxThreadsPerBlock; // precompute factor for rescaling the velocities since it is the same for all particles const Scalar vel_factor = fast::sqrt(kT / mass); unsigned int run_block_size = min(block_size, max_block_size); dim3 grid(N_tot / run_block_size + 1); kernel::slit_draw_particles<<<grid, run_block_size>>>(d_pos, d_vel, d_tag, geom, z_min, z_max, box, type, N_lo, N_tot, first_tag, first_idx, vel_factor, timestep, seed); return cudaSuccess; } } // end namespace gpu } // end namespace mpcd } // end namespace hoomd
1e2142898b890ec052c4a7d279f47838de57a5ab.hip
// !!! This is a file automatically generated by hipify!!! /* * model.c * * */ #include <math.h> #include <hip/hip_runtime.h> struct model_data_ { double mygamma; double *theta; int N_samples; int N_sensors; } model_data; extern "C" __global__ void GPU_model(double *g, double *d,double *theta,double mygamma,int N_samples,int N_sensors) { int ix= blockDim.x*blockIdx.x+threadIdx.x; int iy= blockDim.y*blockIdx.y+threadIdx.y; if(ix<N_samples && iy<N_sensors) { g[ix*N_sensors+iy] = 0.0; g[ix*N_sensors+iy] = mygamma*theta[ix*2+1]/(2*M_PI*(pow((d[0]+iy*d[1])-theta[ix*2+0],2.0) + pow(theta[ix*2+1],2))); } } extern "C" void model(double *g, double *d) { double mygamma = model_data.mygamma; double *theta = model_data.theta; int N_samples = model_data.N_samples; int N_sensors = model_data.N_sensors; ////////////////////////////////// // model.m // This function provides the model function (= velocity measurements) for the // vortex optimal sensor placement problem. // Input: // mygamma : vortex strength // theta : samples of model parameters // d : vector of design parameters, d = [x_s, h] // N_sensors : number of sensors to be placed // // Author: Franziska Krummenacher, [email protected] // Spring/Summer 2016 ////////////////////////////////// //extract number of samples //N_samples = size(theta,1); //initialize output matrix //g = zeros(N_samples,N_sensors); ////allocate GPU memory double *d_g; double *d_d; double *d_theta; hipMalloc(&d_g,N_samples*N_sensors*sizeof(double)); hipMalloc(&d_d,2*sizeof(double)); hipMalloc(&d_theta,2*N_samples*sizeof(double)); //GPU memory copy hipMemcpy(d_g,g,N_samples*N_sensors*sizeof(double),hipMemcpyHostToDevice); hipMemcpy(d_d,d,2*sizeof(double),hipMemcpyHostToDevice); hipMemcpy(d_theta,theta,2*N_samples*sizeof(double),hipMemcpyHostToDevice); //kernel call dim3 blocks(40,1); dim3 threads(100,5); hipLaunchKernelGGL(( GPU_model), dim3(blocks),dim3(threads), 0, 0, d_g,d_d,d_theta,mygamma,N_samples,N_sensors); hipMemcpy(g,d_g,N_samples*N_sensors*sizeof(double),hipMemcpyDeviceToHost); hipFree(d_g); hipFree(d_d); hipFree(d_theta); //for (int i = 0; i < N_samples; i++) // for (int j = 0; j < N_sensors; j++) // g[i*N_sensors+j] = 0.0; //fill in model predictions //for (int i = 0; i < N_samples; i++) // for (int k = 0; k < N_sensors; k++) // { // g[i*N_sensors+k] = mygamma*theta[i*2+1]/(2*M_PI*(pow((d[0]+k*d[1])-theta[i*2+0],2.0) + pow(theta[i*2+1],2))); // } return; }
1e2142898b890ec052c4a7d279f47838de57a5ab.cu
/* * model.c * * */ #include <math.h> #include <cuda.h> struct model_data_ { double mygamma; double *theta; int N_samples; int N_sensors; } model_data; extern "C" __global__ void GPU_model(double *g, double *d,double *theta,double mygamma,int N_samples,int N_sensors) { int ix= blockDim.x*blockIdx.x+threadIdx.x; int iy= blockDim.y*blockIdx.y+threadIdx.y; if(ix<N_samples && iy<N_sensors) { g[ix*N_sensors+iy] = 0.0; g[ix*N_sensors+iy] = mygamma*theta[ix*2+1]/(2*M_PI*(pow((d[0]+iy*d[1])-theta[ix*2+0],2.0) + pow(theta[ix*2+1],2))); } } extern "C" void model(double *g, double *d) { double mygamma = model_data.mygamma; double *theta = model_data.theta; int N_samples = model_data.N_samples; int N_sensors = model_data.N_sensors; ////////////////////////////////// // model.m // This function provides the model function (= velocity measurements) for the // vortex optimal sensor placement problem. // Input: // mygamma : vortex strength // theta : samples of model parameters // d : vector of design parameters, d = [x_s, h] // N_sensors : number of sensors to be placed // // Author: Franziska Krummenacher, [email protected] // Spring/Summer 2016 ////////////////////////////////// //extract number of samples //N_samples = size(theta,1); //initialize output matrix //g = zeros(N_samples,N_sensors); ////allocate GPU memory double *d_g; double *d_d; double *d_theta; cudaMalloc(&d_g,N_samples*N_sensors*sizeof(double)); cudaMalloc(&d_d,2*sizeof(double)); cudaMalloc(&d_theta,2*N_samples*sizeof(double)); //GPU memory copy cudaMemcpy(d_g,g,N_samples*N_sensors*sizeof(double),cudaMemcpyHostToDevice); cudaMemcpy(d_d,d,2*sizeof(double),cudaMemcpyHostToDevice); cudaMemcpy(d_theta,theta,2*N_samples*sizeof(double),cudaMemcpyHostToDevice); //kernel call dim3 blocks(40,1); dim3 threads(100,5); GPU_model<<<blocks,threads>>>(d_g,d_d,d_theta,mygamma,N_samples,N_sensors); cudaMemcpy(g,d_g,N_samples*N_sensors*sizeof(double),cudaMemcpyDeviceToHost); cudaFree(d_g); cudaFree(d_d); cudaFree(d_theta); //for (int i = 0; i < N_samples; i++) // for (int j = 0; j < N_sensors; j++) // g[i*N_sensors+j] = 0.0; //fill in model predictions //for (int i = 0; i < N_samples; i++) // for (int k = 0; k < N_sensors; k++) // { // g[i*N_sensors+k] = mygamma*theta[i*2+1]/(2*M_PI*(pow((d[0]+k*d[1])-theta[i*2+0],2.0) + pow(theta[i*2+1],2))); // } return; }
2ea22bd293ec37c22e3e535a718ee9404b430065.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <hip/hip_runtime_api.h> #include <hip/hip_runtime_api.h> #include <math_functions.h> #include <stdlib.h> #include <stdio.h> #include <math.h> #include <sys/time.h> #include <omp.h> #include "../headers/serialKnn.h" #include "../headers/pointsHelper.h" void printPoints(int* pts, int num , int dim); void printTime(char* text, struct timeval end , struct timeval start); __global__ void devKnnShared(float* points, float* queries, int* points_per_block, int* queries_per_block, int* res_indexes , float* res_dists, int number_of_queries , int max_points, int* dev_integral_points , int* dev_integral_queries) { int b = blockIdx.x*gridDim.y*gridDim.z + blockIdx.y*gridDim.z + blockIdx.z; int num_of_queries = queries_per_block[b]; int mulq = 0; int integral_queries = 0; int integral_points = 0; int qrs_shifter; int num_of_points ; float nbrs_dist; int nbrs_indx; int mulp ; int grid_d = gridDim.x; __shared__ float sh_pts[2000][3]; int flag = 0; float myQuery[3]; // for(int i = 0; i < b; i++){ // integral_points += points_per_block[i]; // integral_queries += queries_per_block[i]; // } integral_points = dev_integral_points[b]; integral_queries = dev_integral_queries[b]; while(mulq*blockDim.x < num_of_queries){ int q = mulq*blockDim.x + threadIdx.x; qrs_shifter = integral_queries + q; num_of_points = points_per_block[b]; nbrs_dist = 100; nbrs_indx = 1; mulp = 0; if(q < num_of_queries){ myQuery[0] = queries[qrs_shifter*3 + 0]; myQuery[1] = queries[qrs_shifter*3 + 1]; myQuery[2] = queries[qrs_shifter*3 + 2]; } while(mulp*blockDim.x < num_of_points){ int p2 = mulp*blockDim.x + threadIdx.x; int pts_shifter2 = integral_points + p2; __syncthreads(); if(p2 < num_of_points && pts_shifter2 < max_points){ for(int d = 0; d < 3; d++){ sh_pts[threadIdx.x][d] = points[pts_shifter2*3 + d]; } } __syncthreads(); if(q < num_of_queries){ int limit = min(num_of_points,(mulp+1)*blockDim.x); for(int p = mulp*blockDim.x; p < limit; p++){ int pts_shifter = integral_points + p; float dist = 0; for(int d = 0; d < 3; d++){ dist += powf(myQuery[d]- sh_pts[p - mulp*blockDim.x][d] ,2); //points[pts_shifter*3+d],2); // } dist = sqrtf(dist); if(dist < nbrs_dist){ nbrs_dist = dist; nbrs_indx = pts_shifter; } } } mulp++; } if(q < num_of_queries){ if(nbrs_dist < res_dists[qrs_shifter]){ res_dists[qrs_shifter] = nbrs_dist; res_indexes[qrs_shifter] = nbrs_indx; } } mulq++; } // Search neighbour blocks int nbrs_blocks[27]; int number_of_nbrs_blocks = 0; for(int i = -1; i <= 1; i++){ for(int j = -1; j <= 1; j++){ for(int k = -1; k <= 1; k++){ if(i != 0 | j != 0 | k != 0){ int nx = blockIdx.x + i; int ny = blockIdx.y + j; int nz = blockIdx.z + k; if(!(nx<0 | ny<0 | nz<0 | nx >=grid_d | ny >= grid_d | nz>=grid_d)){ nbrs_blocks[number_of_nbrs_blocks] = nx*grid_d*grid_d + ny*grid_d + nz; number_of_nbrs_blocks++; } } } } } for(int nb = 0; nb < number_of_nbrs_blocks; nb++){ integral_points = 0; // for(int i = 0; i < nbrs_blocks[nb]; i++){ // integral_points += points_per_block[i]; // } integral_points = dev_integral_points[nbrs_blocks[nb]]; mulq = 0; while(mulq*blockDim.x < num_of_queries){ int q = mulq*blockDim.x + threadIdx.x; qrs_shifter = integral_queries + q; num_of_points = points_per_block[nbrs_blocks[nb]]; nbrs_dist = 100; nbrs_indx = 1; mulp = 0; if(q < num_of_queries){ myQuery[0] = queries[qrs_shifter*3 + 0]; myQuery[1] = queries[qrs_shifter*3 + 1]; myQuery[2] = queries[qrs_shifter*3 + 2]; } while(mulp*blockDim.x < num_of_points){ int p2 = mulp*blockDim.x + threadIdx.x; int pts_shifter2 = integral_points + p2; __syncthreads(); if(p2 < num_of_points && pts_shifter2 < max_points){ for(int d = 0; d < 3; d++){ sh_pts[threadIdx.x][d] = points[pts_shifter2*3 + d]; } } __syncthreads(); if(q < num_of_queries){ int limit = min(num_of_points,(mulp+1)*blockDim.x); for(int p = mulp*blockDim.x; p < limit; p++){ int pts_shifter = integral_points + p; float dist = 0; for(int d = 0; d < 3; d++){ dist += powf(myQuery[d]- sh_pts[p - mulp*blockDim.x][d] ,2); //points[pts_shifter*3+d],2); // } dist = sqrtf(dist); if(dist < nbrs_dist){ nbrs_dist = dist; nbrs_indx = pts_shifter; } } } mulp++; } if(q < num_of_queries){ if(nbrs_dist < res_dists[qrs_shifter]){ res_dists[qrs_shifter] = nbrs_dist; res_indexes[qrs_shifter] = nbrs_indx; } } mulq++; } } } /******************** INPUT ********************* * 1st param -> number of points (default 2^5) * 2nd param -> grid dimensions (default 2^1) * 3rd param -> seed (default 1,2) *************************************************/ int main(int argc, char** argv){ long MeasurementsRes[10]; hipDeviceReset(); struct timeval totalProgramStart,totalProgramEnd,tstart,tend,knnStart,knnEnd,cumallocStart,cumallocEnd; gettimeofday(&totalProgramStart,NULL); int *cudaInit; //----------------------------------------------// int number_of_points = 5; int grid_d = 1; int k_num = 1; int seed = 1; if(argc > 1){ number_of_points = atoi(argv[1]); } number_of_points = pow(2,number_of_points); if(argc > 2){ grid_d = atoi(argv[2]); } grid_d = pow(2,grid_d); if(argc > 3){ k_num = atoi(argv[3]); } if(argc > 4){ seed = atoi(argv[4]); } int number_of_queries = number_of_points; int dimensions = 3; float side_block_length = ((float)1)/((float)grid_d); printf("Number of points:%d\nNumber of queries:%d\nDimensions:%d\nGrid Dimensions:%d\nK for k-nn:%d\nSideBlock Length%f\n", number_of_points,number_of_queries,dimensions,grid_d,k_num,side_block_length); gettimeofday(&tstart,NULL); float* points = (float*)malloc(number_of_points*dimensions*sizeof(float)); float* queries = (float*)malloc(number_of_queries*dimensions*sizeof(float)); float* grid_arranged_points = (float*)malloc(number_of_points*dimensions*sizeof(float)); //float grid_arranged_points[(int)pow(2,19)*3]; int* block_of_point = (int*)malloc(number_of_points*dimensions*sizeof(int)); int* block_of_query = (int*)malloc(number_of_queries*dimensions*sizeof(int)); int* points_per_block = (int*)malloc(grid_d*grid_d*grid_d*sizeof(int)); int* queries_per_block = (int*)malloc(grid_d*grid_d*grid_d*sizeof(int)); int* integral_points_per_block = (int*)malloc(grid_d*grid_d*grid_d*sizeof(int)); int* integral_queries_per_block = (int*)malloc(grid_d*grid_d*grid_d*sizeof(int)); gettimeofday(&tend,NULL); printTime("CPU MALLOC TIME ",tend,tstart); gettimeofday(&tstart,NULL); generatePoints(points, number_of_points, dimensions, 0, 1, 1); generatePoints(queries, number_of_queries, dimensions, 0, 1, 2); gettimeofday(&tend,NULL); printTime("GENERATION TIME ",tend,tstart); gettimeofday(&tstart,NULL); assignPointsToBlocks(points, block_of_point , points_per_block , side_block_length , number_of_points, grid_d , dimensions); rearrangePointsToGrid(points,grid_arranged_points, block_of_point , points_per_block , side_block_length , number_of_points, grid_d , dimensions); assignPointsToBlocks(grid_arranged_points, block_of_point , points_per_block , side_block_length , number_of_points, grid_d , dimensions); printf("assigned points\n"); free(points); float* grid_arranged_queries = (float*)malloc(number_of_queries*dimensions*sizeof(float)); assignPointsToBlocks(queries, block_of_query , queries_per_block , side_block_length , number_of_queries, grid_d , dimensions); rearrangePointsToGrid(queries,grid_arranged_queries, block_of_query , queries_per_block , side_block_length , number_of_queries, grid_d , dimensions); printf("assigned queries\n"); assignPointsToBlocks(grid_arranged_queries, block_of_query , queries_per_block , side_block_length , number_of_queries, grid_d , dimensions); printf("assigned queries\n"); for(int i = 0; i < grid_d*grid_d*grid_d; i++){ integral_points_per_block[i] = 0; integral_queries_per_block[i] = 0; for(int j = 0; j < i; j++){ integral_points_per_block[i] += points_per_block[j]; integral_queries_per_block[i] += queries_per_block[j]; } } printf("rearranged points\n"); free(queries); //float* knns = (float*) malloc(number_of_queries*dimensions*sizeof(int)); float* knns_gpu = (float*) malloc(number_of_queries*dimensions*sizeof(int)); float* knns_dists = (float*)malloc(number_of_queries*sizeof(float)); for(int i = 0; i < number_of_queries; i++){ knns_dists[i] = 100; } gettimeofday(&tend,NULL); printTime("CPU BINNING TIME ",tend,tstart); gettimeofday(&tstart,NULL); gettimeofday(&cumallocStart,NULL); float* dev_points; hipError_t cuer; cuer = hipMalloc(&dev_points,number_of_points*3*sizeof(float)); printf("%s\n",hipGetErrorName(cuer)); cuer = hipMemcpy(dev_points, grid_arranged_points, number_of_points*3*sizeof(float),hipMemcpyHostToDevice); printf("%s\n",hipGetErrorName(cuer)); float* dev_queries; cuer = hipMalloc(&dev_queries, number_of_queries*3*sizeof(float)); printf("%s\n",hipGetErrorName(cuer)); cuer = hipMemcpy(dev_queries,grid_arranged_queries,number_of_queries*3*sizeof(float),hipMemcpyHostToDevice); printf("%s\n",hipGetErrorName(cuer)); int* dev_points_per_block; cuer = hipMalloc(&dev_points_per_block, grid_d*grid_d*grid_d*sizeof(int)); printf("%s\n",hipGetErrorName(cuer)); cuer = hipMemcpy(dev_points_per_block , points_per_block, grid_d*grid_d*grid_d*sizeof(int),hipMemcpyHostToDevice); printf("%s\n",hipGetErrorName(cuer)); int* dev_queries_per_blcok; cuer = hipMalloc(&dev_queries_per_blcok, grid_d*grid_d*grid_d*sizeof(int)); printf("%s\n",hipGetErrorName(cuer)); cuer = hipMemcpy(dev_queries_per_blcok, queries_per_block ,grid_d*grid_d*grid_d*sizeof(int),hipMemcpyHostToDevice); printf("%s\n",hipGetErrorName(cuer)); int *dev_integral_points; hipMalloc(&dev_integral_points,grid_d*grid_d*grid_d*sizeof(int)); hipMemcpy(dev_integral_points , integral_points_per_block , grid_d*grid_d*grid_d*sizeof(int) , hipMemcpyHostToDevice); int* dev_integral_queries; hipMalloc(&dev_integral_queries , grid_d*grid_d*grid_d*sizeof(int)); hipMemcpy(dev_integral_queries, integral_queries_per_block, grid_d*grid_d*grid_d*sizeof(int),hipMemcpyHostToDevice); free(knns_dists); float* res_dists = (float*)malloc(number_of_queries*sizeof(float)); int* res_indexes = (int*)malloc(number_of_queries*sizeof(int)); // float* res_dists2 = (float*)malloc(number_of_queries*sizeof(float)); // int* res_indexes2 = (int*)malloc(number_of_queries*sizeof(int)); for(int i = 0; i < number_of_queries; i++){ res_dists[i] = 100; res_indexes[i] = 19; } int* dev_res_indexes; cuer = hipMalloc(&dev_res_indexes,number_of_queries*sizeof(int)); printf("%s\n",hipGetErrorName(cuer)); cuer = hipMemcpy(dev_res_indexes,res_indexes, number_of_queries*sizeof(int),hipMemcpyHostToDevice); printf("%s\n",hipGetErrorName(cuer)); float *dev_res_dists; cuer = hipMalloc(&dev_res_dists,number_of_queries*sizeof(float)); printf("%s\n",hipGetErrorName(cuer)); cuer = hipMemcpy(dev_res_dists,res_dists,number_of_queries*sizeof(float) , hipMemcpyHostToDevice); printf("%s\n",hipGetErrorName(cuer)); gettimeofday(&tend,NULL); printTime("GPU MALLOC",tend,tstart); gettimeofday(&cumallocEnd,NULL); gettimeofday(&knnStart,NULL); //dbgKnn<<<1000,500>>>(dev_res_dists,dev_res_indexes,number_of_queries); hipLaunchKernelGGL(( devKnnShared), dim3(dim3(grid_d,grid_d,grid_d)),dim3(128), 0, 0, dev_points,dev_queries,dev_points_per_block , dev_queries_per_blcok,dev_res_indexes , dev_res_dists , number_of_queries,number_of_points, dev_integral_points,dev_integral_queries); cuer = hipGetLastError(); printf("%s\n",hipGetErrorName(cuer)); cuer = hipMemcpy(res_dists,dev_res_dists,number_of_queries*sizeof(float) , hipMemcpyDeviceToHost); printf("%s\n",hipGetErrorName(cuer)); cuer = hipMemcpy(res_indexes,dev_res_indexes, number_of_queries*sizeof(int),hipMemcpyDeviceToHost); printf("%s\n",hipGetErrorName(cuer)); for(int i = 0; i < number_of_queries; i++){ memcpy(&knns_gpu[i*3], &grid_arranged_points[res_indexes[i]*3], 3*sizeof(float)); } gettimeofday(&knnEnd,NULL); //printPointsToCsv("knn2.csv" , "w" , knns_gpu , number_of_queries , dimensions); gettimeofday(&tstart,NULL); printTime("GPU KNN ",tstart,tend); printPointsToCsv("knn.csv" , "w" , knns_gpu , number_of_queries*k_num , dimensions); // printPointsToCsv("points.csv" , "w" , points , number_of_points , dimensions); // printPointsToCsv("queries.csv" , "w" , queries , number_of_queries , dimensions); printPointsToCsv("points_arranged.csv" ,"w" , grid_arranged_points , number_of_points , dimensions); printPointsToCsv("queries_arranged.csv" , "w" , grid_arranged_queries , number_of_queries , dimensions); MeasurementsRes[0] = number_of_points; MeasurementsRes[1] = grid_d; MeasurementsRes[2] = cumallocEnd.tv_sec - cumallocStart.tv_sec; MeasurementsRes[3] = cumallocEnd.tv_usec = cumallocStart.tv_usec; if(MeasurementsRes[3] < 0){ MeasurementsRes[3] += 1000000; MeasurementsRes[2]--; } MeasurementsRes[4] = knnEnd.tv_sec - knnStart.tv_sec; MeasurementsRes[5] = knnEnd.tv_usec - knnStart.tv_usec; if(MeasurementsRes[5] < 0){ MeasurementsRes[5] += 1000000; MeasurementsRes[4]--; } //debugGPUKnnGlobal(grid_arranged_points,0,0,grid_arranged_queries,0,0,points_per_block,queries_per_block,grid_d,num_of_threads,1,indxs,dsts); FILE* file = fopen("timesGPU.csv", "a"); fprintf(file,"%ld,%ld,%ld,%ld,%ld,%ld\n",MeasurementsRes[0],MeasurementsRes[1],MeasurementsRes[2],MeasurementsRes[3],MeasurementsRes[4],MeasurementsRes[5]); fclose(file); free(res_dists); free(res_indexes); free(knns_gpu); free(grid_arranged_points); free(grid_arranged_queries); free(block_of_point); free(block_of_query); free(points_per_block); free(queries_per_block); //free(knn_res); //----------------------------------------------------------------// gettimeofday(&totalProgramEnd,NULL); printTime("total program time ", totalProgramEnd,totalProgramStart); hipProfilerStop(); hipDeviceReset(); return 0; } void printPoints(int* pts, int num, int dim){ for(int i = 0; i < num; i++){ printf("Points%d:\t",i); for(int j = 0; j < dim; j++){ printf("x%d %d\t",j,pts[i*dim + j]); } printf("\n"); } } void printTime(char* text, struct timeval end , struct timeval start){ printf("%s ",text); long s=end.tv_sec-start.tv_sec; long us=end.tv_usec - start.tv_usec; if(us < 0){ us = 1000000+us; s = s-1; } printf("%ld s, %ld us\n",s,us); }
2ea22bd293ec37c22e3e535a718ee9404b430065.cu
#include <cuda.h> #include <cuda_runtime_api.h> #include <cuda_profiler_api.h> #include <math_functions.h> #include <stdlib.h> #include <stdio.h> #include <math.h> #include <sys/time.h> #include <omp.h> #include "../headers/serialKnn.h" #include "../headers/pointsHelper.h" void printPoints(int* pts, int num , int dim); void printTime(char* text, struct timeval end , struct timeval start); __global__ void devKnnShared(float* points, float* queries, int* points_per_block, int* queries_per_block, int* res_indexes , float* res_dists, int number_of_queries , int max_points, int* dev_integral_points , int* dev_integral_queries) { int b = blockIdx.x*gridDim.y*gridDim.z + blockIdx.y*gridDim.z + blockIdx.z; int num_of_queries = queries_per_block[b]; int mulq = 0; int integral_queries = 0; int integral_points = 0; int qrs_shifter; int num_of_points ; float nbrs_dist; int nbrs_indx; int mulp ; int grid_d = gridDim.x; __shared__ float sh_pts[2000][3]; int flag = 0; float myQuery[3]; // for(int i = 0; i < b; i++){ // integral_points += points_per_block[i]; // integral_queries += queries_per_block[i]; // } integral_points = dev_integral_points[b]; integral_queries = dev_integral_queries[b]; while(mulq*blockDim.x < num_of_queries){ int q = mulq*blockDim.x + threadIdx.x; qrs_shifter = integral_queries + q; num_of_points = points_per_block[b]; nbrs_dist = 100; nbrs_indx = 1; mulp = 0; if(q < num_of_queries){ myQuery[0] = queries[qrs_shifter*3 + 0]; myQuery[1] = queries[qrs_shifter*3 + 1]; myQuery[2] = queries[qrs_shifter*3 + 2]; } while(mulp*blockDim.x < num_of_points){ int p2 = mulp*blockDim.x + threadIdx.x; int pts_shifter2 = integral_points + p2; __syncthreads(); if(p2 < num_of_points && pts_shifter2 < max_points){ for(int d = 0; d < 3; d++){ sh_pts[threadIdx.x][d] = points[pts_shifter2*3 + d]; } } __syncthreads(); if(q < num_of_queries){ int limit = min(num_of_points,(mulp+1)*blockDim.x); for(int p = mulp*blockDim.x; p < limit; p++){ int pts_shifter = integral_points + p; float dist = 0; for(int d = 0; d < 3; d++){ dist += powf(myQuery[d]- sh_pts[p - mulp*blockDim.x][d] ,2); //points[pts_shifter*3+d],2); // } dist = sqrtf(dist); if(dist < nbrs_dist){ nbrs_dist = dist; nbrs_indx = pts_shifter; } } } mulp++; } if(q < num_of_queries){ if(nbrs_dist < res_dists[qrs_shifter]){ res_dists[qrs_shifter] = nbrs_dist; res_indexes[qrs_shifter] = nbrs_indx; } } mulq++; } // Search neighbour blocks int nbrs_blocks[27]; int number_of_nbrs_blocks = 0; for(int i = -1; i <= 1; i++){ for(int j = -1; j <= 1; j++){ for(int k = -1; k <= 1; k++){ if(i != 0 | j != 0 | k != 0){ int nx = blockIdx.x + i; int ny = blockIdx.y + j; int nz = blockIdx.z + k; if(!(nx<0 | ny<0 | nz<0 | nx >=grid_d | ny >= grid_d | nz>=grid_d)){ nbrs_blocks[number_of_nbrs_blocks] = nx*grid_d*grid_d + ny*grid_d + nz; number_of_nbrs_blocks++; } } } } } for(int nb = 0; nb < number_of_nbrs_blocks; nb++){ integral_points = 0; // for(int i = 0; i < nbrs_blocks[nb]; i++){ // integral_points += points_per_block[i]; // } integral_points = dev_integral_points[nbrs_blocks[nb]]; mulq = 0; while(mulq*blockDim.x < num_of_queries){ int q = mulq*blockDim.x + threadIdx.x; qrs_shifter = integral_queries + q; num_of_points = points_per_block[nbrs_blocks[nb]]; nbrs_dist = 100; nbrs_indx = 1; mulp = 0; if(q < num_of_queries){ myQuery[0] = queries[qrs_shifter*3 + 0]; myQuery[1] = queries[qrs_shifter*3 + 1]; myQuery[2] = queries[qrs_shifter*3 + 2]; } while(mulp*blockDim.x < num_of_points){ int p2 = mulp*blockDim.x + threadIdx.x; int pts_shifter2 = integral_points + p2; __syncthreads(); if(p2 < num_of_points && pts_shifter2 < max_points){ for(int d = 0; d < 3; d++){ sh_pts[threadIdx.x][d] = points[pts_shifter2*3 + d]; } } __syncthreads(); if(q < num_of_queries){ int limit = min(num_of_points,(mulp+1)*blockDim.x); for(int p = mulp*blockDim.x; p < limit; p++){ int pts_shifter = integral_points + p; float dist = 0; for(int d = 0; d < 3; d++){ dist += powf(myQuery[d]- sh_pts[p - mulp*blockDim.x][d] ,2); //points[pts_shifter*3+d],2); // } dist = sqrtf(dist); if(dist < nbrs_dist){ nbrs_dist = dist; nbrs_indx = pts_shifter; } } } mulp++; } if(q < num_of_queries){ if(nbrs_dist < res_dists[qrs_shifter]){ res_dists[qrs_shifter] = nbrs_dist; res_indexes[qrs_shifter] = nbrs_indx; } } mulq++; } } } /******************** INPUT ********************* * 1st param -> number of points (default 2^5) * 2nd param -> grid dimensions (default 2^1) * 3rd param -> seed (default 1,2) *************************************************/ int main(int argc, char** argv){ long MeasurementsRes[10]; cudaDeviceReset(); struct timeval totalProgramStart,totalProgramEnd,tstart,tend,knnStart,knnEnd,cumallocStart,cumallocEnd; gettimeofday(&totalProgramStart,NULL); int *cudaInit; //----------------------------------------------// int number_of_points = 5; int grid_d = 1; int k_num = 1; int seed = 1; if(argc > 1){ number_of_points = atoi(argv[1]); } number_of_points = pow(2,number_of_points); if(argc > 2){ grid_d = atoi(argv[2]); } grid_d = pow(2,grid_d); if(argc > 3){ k_num = atoi(argv[3]); } if(argc > 4){ seed = atoi(argv[4]); } int number_of_queries = number_of_points; int dimensions = 3; float side_block_length = ((float)1)/((float)grid_d); printf("Number of points:%d\nNumber of queries:%d\nDimensions:%d\nGrid Dimensions:%d\nK for k-nn:%d\nSideBlock Length%f\n", number_of_points,number_of_queries,dimensions,grid_d,k_num,side_block_length); gettimeofday(&tstart,NULL); float* points = (float*)malloc(number_of_points*dimensions*sizeof(float)); float* queries = (float*)malloc(number_of_queries*dimensions*sizeof(float)); float* grid_arranged_points = (float*)malloc(number_of_points*dimensions*sizeof(float)); //float grid_arranged_points[(int)pow(2,19)*3]; int* block_of_point = (int*)malloc(number_of_points*dimensions*sizeof(int)); int* block_of_query = (int*)malloc(number_of_queries*dimensions*sizeof(int)); int* points_per_block = (int*)malloc(grid_d*grid_d*grid_d*sizeof(int)); int* queries_per_block = (int*)malloc(grid_d*grid_d*grid_d*sizeof(int)); int* integral_points_per_block = (int*)malloc(grid_d*grid_d*grid_d*sizeof(int)); int* integral_queries_per_block = (int*)malloc(grid_d*grid_d*grid_d*sizeof(int)); gettimeofday(&tend,NULL); printTime("CPU MALLOC TIME ",tend,tstart); gettimeofday(&tstart,NULL); generatePoints(points, number_of_points, dimensions, 0, 1, 1); generatePoints(queries, number_of_queries, dimensions, 0, 1, 2); gettimeofday(&tend,NULL); printTime("GENERATION TIME ",tend,tstart); gettimeofday(&tstart,NULL); assignPointsToBlocks(points, block_of_point , points_per_block , side_block_length , number_of_points, grid_d , dimensions); rearrangePointsToGrid(points,grid_arranged_points, block_of_point , points_per_block , side_block_length , number_of_points, grid_d , dimensions); assignPointsToBlocks(grid_arranged_points, block_of_point , points_per_block , side_block_length , number_of_points, grid_d , dimensions); printf("assigned points\n"); free(points); float* grid_arranged_queries = (float*)malloc(number_of_queries*dimensions*sizeof(float)); assignPointsToBlocks(queries, block_of_query , queries_per_block , side_block_length , number_of_queries, grid_d , dimensions); rearrangePointsToGrid(queries,grid_arranged_queries, block_of_query , queries_per_block , side_block_length , number_of_queries, grid_d , dimensions); printf("assigned queries\n"); assignPointsToBlocks(grid_arranged_queries, block_of_query , queries_per_block , side_block_length , number_of_queries, grid_d , dimensions); printf("assigned queries\n"); for(int i = 0; i < grid_d*grid_d*grid_d; i++){ integral_points_per_block[i] = 0; integral_queries_per_block[i] = 0; for(int j = 0; j < i; j++){ integral_points_per_block[i] += points_per_block[j]; integral_queries_per_block[i] += queries_per_block[j]; } } printf("rearranged points\n"); free(queries); //float* knns = (float*) malloc(number_of_queries*dimensions*sizeof(int)); float* knns_gpu = (float*) malloc(number_of_queries*dimensions*sizeof(int)); float* knns_dists = (float*)malloc(number_of_queries*sizeof(float)); for(int i = 0; i < number_of_queries; i++){ knns_dists[i] = 100; } gettimeofday(&tend,NULL); printTime("CPU BINNING TIME ",tend,tstart); gettimeofday(&tstart,NULL); gettimeofday(&cumallocStart,NULL); float* dev_points; cudaError_t cuer; cuer = cudaMalloc(&dev_points,number_of_points*3*sizeof(float)); printf("%s\n",cudaGetErrorName(cuer)); cuer = cudaMemcpy(dev_points, grid_arranged_points, number_of_points*3*sizeof(float),cudaMemcpyHostToDevice); printf("%s\n",cudaGetErrorName(cuer)); float* dev_queries; cuer = cudaMalloc(&dev_queries, number_of_queries*3*sizeof(float)); printf("%s\n",cudaGetErrorName(cuer)); cuer = cudaMemcpy(dev_queries,grid_arranged_queries,number_of_queries*3*sizeof(float),cudaMemcpyHostToDevice); printf("%s\n",cudaGetErrorName(cuer)); int* dev_points_per_block; cuer = cudaMalloc(&dev_points_per_block, grid_d*grid_d*grid_d*sizeof(int)); printf("%s\n",cudaGetErrorName(cuer)); cuer = cudaMemcpy(dev_points_per_block , points_per_block, grid_d*grid_d*grid_d*sizeof(int),cudaMemcpyHostToDevice); printf("%s\n",cudaGetErrorName(cuer)); int* dev_queries_per_blcok; cuer = cudaMalloc(&dev_queries_per_blcok, grid_d*grid_d*grid_d*sizeof(int)); printf("%s\n",cudaGetErrorName(cuer)); cuer = cudaMemcpy(dev_queries_per_blcok, queries_per_block ,grid_d*grid_d*grid_d*sizeof(int),cudaMemcpyHostToDevice); printf("%s\n",cudaGetErrorName(cuer)); int *dev_integral_points; cudaMalloc(&dev_integral_points,grid_d*grid_d*grid_d*sizeof(int)); cudaMemcpy(dev_integral_points , integral_points_per_block , grid_d*grid_d*grid_d*sizeof(int) , cudaMemcpyHostToDevice); int* dev_integral_queries; cudaMalloc(&dev_integral_queries , grid_d*grid_d*grid_d*sizeof(int)); cudaMemcpy(dev_integral_queries, integral_queries_per_block, grid_d*grid_d*grid_d*sizeof(int),cudaMemcpyHostToDevice); free(knns_dists); float* res_dists = (float*)malloc(number_of_queries*sizeof(float)); int* res_indexes = (int*)malloc(number_of_queries*sizeof(int)); // float* res_dists2 = (float*)malloc(number_of_queries*sizeof(float)); // int* res_indexes2 = (int*)malloc(number_of_queries*sizeof(int)); for(int i = 0; i < number_of_queries; i++){ res_dists[i] = 100; res_indexes[i] = 19; } int* dev_res_indexes; cuer = cudaMalloc(&dev_res_indexes,number_of_queries*sizeof(int)); printf("%s\n",cudaGetErrorName(cuer)); cuer = cudaMemcpy(dev_res_indexes,res_indexes, number_of_queries*sizeof(int),cudaMemcpyHostToDevice); printf("%s\n",cudaGetErrorName(cuer)); float *dev_res_dists; cuer = cudaMalloc(&dev_res_dists,number_of_queries*sizeof(float)); printf("%s\n",cudaGetErrorName(cuer)); cuer = cudaMemcpy(dev_res_dists,res_dists,number_of_queries*sizeof(float) , cudaMemcpyHostToDevice); printf("%s\n",cudaGetErrorName(cuer)); gettimeofday(&tend,NULL); printTime("GPU MALLOC",tend,tstart); gettimeofday(&cumallocEnd,NULL); gettimeofday(&knnStart,NULL); //dbgKnn<<<1000,500>>>(dev_res_dists,dev_res_indexes,number_of_queries); devKnnShared<<<dim3(grid_d,grid_d,grid_d),128>>>(dev_points,dev_queries,dev_points_per_block , dev_queries_per_blcok,dev_res_indexes , dev_res_dists , number_of_queries,number_of_points, dev_integral_points,dev_integral_queries); cuer = cudaGetLastError(); printf("%s\n",cudaGetErrorName(cuer)); cuer = cudaMemcpy(res_dists,dev_res_dists,number_of_queries*sizeof(float) , cudaMemcpyDeviceToHost); printf("%s\n",cudaGetErrorName(cuer)); cuer = cudaMemcpy(res_indexes,dev_res_indexes, number_of_queries*sizeof(int),cudaMemcpyDeviceToHost); printf("%s\n",cudaGetErrorName(cuer)); for(int i = 0; i < number_of_queries; i++){ memcpy(&knns_gpu[i*3], &grid_arranged_points[res_indexes[i]*3], 3*sizeof(float)); } gettimeofday(&knnEnd,NULL); //printPointsToCsv("knn2.csv" , "w" , knns_gpu , number_of_queries , dimensions); gettimeofday(&tstart,NULL); printTime("GPU KNN ",tstart,tend); printPointsToCsv("knn.csv" , "w" , knns_gpu , number_of_queries*k_num , dimensions); // printPointsToCsv("points.csv" , "w" , points , number_of_points , dimensions); // printPointsToCsv("queries.csv" , "w" , queries , number_of_queries , dimensions); printPointsToCsv("points_arranged.csv" ,"w" , grid_arranged_points , number_of_points , dimensions); printPointsToCsv("queries_arranged.csv" , "w" , grid_arranged_queries , number_of_queries , dimensions); MeasurementsRes[0] = number_of_points; MeasurementsRes[1] = grid_d; MeasurementsRes[2] = cumallocEnd.tv_sec - cumallocStart.tv_sec; MeasurementsRes[3] = cumallocEnd.tv_usec = cumallocStart.tv_usec; if(MeasurementsRes[3] < 0){ MeasurementsRes[3] += 1000000; MeasurementsRes[2]--; } MeasurementsRes[4] = knnEnd.tv_sec - knnStart.tv_sec; MeasurementsRes[5] = knnEnd.tv_usec - knnStart.tv_usec; if(MeasurementsRes[5] < 0){ MeasurementsRes[5] += 1000000; MeasurementsRes[4]--; } //debugGPUKnnGlobal(grid_arranged_points,0,0,grid_arranged_queries,0,0,points_per_block,queries_per_block,grid_d,num_of_threads,1,indxs,dsts); FILE* file = fopen("timesGPU.csv", "a"); fprintf(file,"%ld,%ld,%ld,%ld,%ld,%ld\n",MeasurementsRes[0],MeasurementsRes[1],MeasurementsRes[2],MeasurementsRes[3],MeasurementsRes[4],MeasurementsRes[5]); fclose(file); free(res_dists); free(res_indexes); free(knns_gpu); free(grid_arranged_points); free(grid_arranged_queries); free(block_of_point); free(block_of_query); free(points_per_block); free(queries_per_block); //free(knn_res); //----------------------------------------------------------------// gettimeofday(&totalProgramEnd,NULL); printTime("total program time ", totalProgramEnd,totalProgramStart); cudaProfilerStop(); cudaDeviceReset(); return 0; } void printPoints(int* pts, int num, int dim){ for(int i = 0; i < num; i++){ printf("Points%d:\t",i); for(int j = 0; j < dim; j++){ printf("x%d %d\t",j,pts[i*dim + j]); } printf("\n"); } } void printTime(char* text, struct timeval end , struct timeval start){ printf("%s ",text); long s=end.tv_sec-start.tv_sec; long us=end.tv_usec - start.tv_usec; if(us < 0){ us = 1000000+us; s = s-1; } printf("%ld s, %ld us\n",s,us); }
57d7add4e529c04b094a67b1fd6f05ee0db24511.hip
// !!! This is a file automatically generated by hipify!!! #define GLM_FORCE_CUDA #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include "common.h" #include "naive.h" namespace StreamCompaction { namespace Naive { using StreamCompaction::Common::PerformanceTimer; PerformanceTimer& timer() { static PerformanceTimer timer; return timer; } // TODO: /** * Performs prefix-sum (aka scan) on idata, storing the result into odata. */ __global__ void kernAdvanceScan(int n, int offset, int* a, int* b) { int idx = threadIdx.x + (blockIdx.x * blockDim.x); if (idx >= n) { return; } if (idx >= offset) { b[idx] = a[idx - offset] + a[idx]; } else { b[idx] = a[idx]; } } void scan(int n, int *odata, const int *idata, int *cudaA, int *cudaB) { dim3 fullBlocksPerGrid((n + blocksize - 1) / blocksize); int kmax = ilog2ceil(n); timer().startGpuTimer(); for (int k = 1; k <= kmax; ++k) { // invoke kernel int offset = (int)pow(2, k - 1); hipLaunchKernelGGL(( kernAdvanceScan), dim3(fullBlocksPerGrid), dim3(blocksize), 0, 0, n - 1, offset, cudaA, cudaB); // pointer swap int *temp = cudaA; cudaA = cudaB; cudaB = temp; } timer().endGpuTimer(); hipMemcpy(odata + 1, cudaA, (n - 1) * sizeof(int), hipMemcpyDeviceToHost); odata[0] = 0; } } }
57d7add4e529c04b094a67b1fd6f05ee0db24511.cu
#define GLM_FORCE_CUDA #include <cuda.h> #include <cuda_runtime.h> #include "common.h" #include "naive.h" namespace StreamCompaction { namespace Naive { using StreamCompaction::Common::PerformanceTimer; PerformanceTimer& timer() { static PerformanceTimer timer; return timer; } // TODO: /** * Performs prefix-sum (aka scan) on idata, storing the result into odata. */ __global__ void kernAdvanceScan(int n, int offset, int* a, int* b) { int idx = threadIdx.x + (blockIdx.x * blockDim.x); if (idx >= n) { return; } if (idx >= offset) { b[idx] = a[idx - offset] + a[idx]; } else { b[idx] = a[idx]; } } void scan(int n, int *odata, const int *idata, int *cudaA, int *cudaB) { dim3 fullBlocksPerGrid((n + blocksize - 1) / blocksize); int kmax = ilog2ceil(n); timer().startGpuTimer(); for (int k = 1; k <= kmax; ++k) { // invoke kernel int offset = (int)pow(2, k - 1); kernAdvanceScan<<<fullBlocksPerGrid, blocksize>>>(n - 1, offset, cudaA, cudaB); // pointer swap int *temp = cudaA; cudaA = cudaB; cudaB = temp; } timer().endGpuTimer(); cudaMemcpy(odata + 1, cudaA, (n - 1) * sizeof(int), cudaMemcpyDeviceToHost); odata[0] = 0; } } }
4fe80861b2e7e3e362962e9ad4e253bd6b987964.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /// /// matmultKernel00.cu /// For CSU CS575 Spring 2011 /// Instructor: Wim Bohm /// Based on code from the CUDA Programming Guide /// Modified by Wim Bohm and David Newman /// Created: 2011-01-27 /// Last Modified: 2011-02-23 DVN /// /// Multiplies two matrices using CUDA: A x B = C /// /// Copy this file and modify the MatMultKernel device function for /// each of your experiments. /// #include "matmultKernel.h" // Define a gpu kernel to perform matrix multiplication // of A x B = C. __global__ void MatMulKernel(Matrix A, Matrix B, Matrix C){ // matrix blocks float *Asub, *Bsub, *Csub; // Putting these into registers speeds access. int thread_row = threadIdx.y; int thread_col = threadIdx.x; int block_row = blockIdx.y; int block_col = blockIdx.x; // Each THREAD BLOCK computes one sub matrix Csub of C // EACH THREAD creates its own matrix descriptor Csub Csub = &C.elements[C.stride * BLOCK_SIZE * block_row + BLOCK_SIZE * block_col]; // Each thread computes one element of Csub in its copy of CValue float Cvalue = 0; // Loop over all sub matrices in block_row of A and block_col of B // required to compute Csub. Block multiply each pair of sub matrices // and accumulate results for (int m = 0; m < (A.width / BLOCK_SIZE); ++m){ // Get Asub and Bsub descriptors Asub = &A.elements[A.stride * BLOCK_SIZE * block_row + BLOCK_SIZE * m]; Bsub = &B.elements[B.stride * BLOCK_SIZE * m + BLOCK_SIZE * block_col]; // Copy ELEMENTS OF ASub and Bsub into shared memory // EACH THREAD loads ONE ELEMENT of ASub and ONE of Bsub // Notice: it does not need to be the element it requires to // compute its Cvalue, as long as all elements are // collaboratively read. // Notice: every thread declares shared_A and shared_B in shared memory // even though a thread block has only one shared_A and one shared_B __shared__ float shared_A[BLOCK_SIZE][BLOCK_SIZE]; __shared__ float shared_B[BLOCK_SIZE][BLOCK_SIZE]; // Each thread copies just one element of shared_A and one element of shared_B shared_A[thread_row][thread_col] = Asub[thread_row * A.stride + thread_col]; shared_B[thread_row][thread_col] = Bsub[thread_row * B.stride + thread_col]; // Synchronize to ensure all elements are read __syncthreads(); // Do an inproduct of one row of shared_A and one col of shared_B // computing one Cvalue by accumulation #pragma unroll for(int e=0; e<BLOCK_SIZE; ++e) Cvalue += shared_A[thread_row][e] * shared_B[e][thread_col]; // Synchronize to ensure all Cvalues have been incremented // before reading in the next shared_A AND shared_B BLOCKS __syncthreads(); } // Write Csub to GLOBAL memory. // Each thread writes its own cell value. Csub[thread_row * C.stride + thread_col] = Cvalue; }
4fe80861b2e7e3e362962e9ad4e253bd6b987964.cu
/// /// matmultKernel00.cu /// For CSU CS575 Spring 2011 /// Instructor: Wim Bohm /// Based on code from the CUDA Programming Guide /// Modified by Wim Bohm and David Newman /// Created: 2011-01-27 /// Last Modified: 2011-02-23 DVN /// /// Multiplies two matrices using CUDA: A x B = C /// /// Copy this file and modify the MatMultKernel device function for /// each of your experiments. /// #include "matmultKernel.h" // Define a gpu kernel to perform matrix multiplication // of A x B = C. __global__ void MatMulKernel(Matrix A, Matrix B, Matrix C){ // matrix blocks float *Asub, *Bsub, *Csub; // Putting these into registers speeds access. int thread_row = threadIdx.y; int thread_col = threadIdx.x; int block_row = blockIdx.y; int block_col = blockIdx.x; // Each THREAD BLOCK computes one sub matrix Csub of C // EACH THREAD creates its own matrix descriptor Csub Csub = &C.elements[C.stride * BLOCK_SIZE * block_row + BLOCK_SIZE * block_col]; // Each thread computes one element of Csub in its copy of CValue float Cvalue = 0; // Loop over all sub matrices in block_row of A and block_col of B // required to compute Csub. Block multiply each pair of sub matrices // and accumulate results for (int m = 0; m < (A.width / BLOCK_SIZE); ++m){ // Get Asub and Bsub descriptors Asub = &A.elements[A.stride * BLOCK_SIZE * block_row + BLOCK_SIZE * m]; Bsub = &B.elements[B.stride * BLOCK_SIZE * m + BLOCK_SIZE * block_col]; // Copy ELEMENTS OF ASub and Bsub into shared memory // EACH THREAD loads ONE ELEMENT of ASub and ONE of Bsub // Notice: it does not need to be the element it requires to // compute its Cvalue, as long as all elements are // collaboratively read. // Notice: every thread declares shared_A and shared_B in shared memory // even though a thread block has only one shared_A and one shared_B __shared__ float shared_A[BLOCK_SIZE][BLOCK_SIZE]; __shared__ float shared_B[BLOCK_SIZE][BLOCK_SIZE]; // Each thread copies just one element of shared_A and one element of shared_B shared_A[thread_row][thread_col] = Asub[thread_row * A.stride + thread_col]; shared_B[thread_row][thread_col] = Bsub[thread_row * B.stride + thread_col]; // Synchronize to ensure all elements are read __syncthreads(); // Do an inproduct of one row of shared_A and one col of shared_B // computing one Cvalue by accumulation #pragma unroll for(int e=0; e<BLOCK_SIZE; ++e) Cvalue += shared_A[thread_row][e] * shared_B[e][thread_col]; // Synchronize to ensure all Cvalues have been incremented // before reading in the next shared_A AND shared_B BLOCKS __syncthreads(); } // Write Csub to GLOBAL memory. // Each thread writes its own cell value. Csub[thread_row * C.stride + thread_col] = Cvalue; }
a2a2a4b1f0e7af7faf1b8bc983122c8576640dc0.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // auto-generated by ops.py // __constant__ int xdim0_initialise_chunk_kernel_celly; int xdim0_initialise_chunk_kernel_celly_h = -1; __constant__ int xdim1_initialise_chunk_kernel_celly; int xdim1_initialise_chunk_kernel_celly_h = -1; __constant__ int xdim2_initialise_chunk_kernel_celly; int xdim2_initialise_chunk_kernel_celly_h = -1; #undef OPS_ACC0 #undef OPS_ACC1 #undef OPS_ACC2 #define OPS_ACC0(x, y) (x + xdim0_initialise_chunk_kernel_celly * (y)) #define OPS_ACC1(x, y) (x + xdim1_initialise_chunk_kernel_celly * (y)) #define OPS_ACC2(x, y) (x + xdim2_initialise_chunk_kernel_celly * (y)) // user function __device__ void initialise_chunk_kernel_celly_gpu(const double *vertexy, double *celly, double *celldy) { double d_y; d_y = (grid.ymax - grid.ymin) / (double)grid.y_cells; celly[OPS_ACC1(0, 0)] = 0.5 * (vertexy[OPS_ACC0(0, 0)] + vertexy[OPS_ACC0(0, 1)]); celldy[OPS_ACC2(0, 0)] = d_y; } #undef OPS_ACC0 #undef OPS_ACC1 #undef OPS_ACC2 __global__ void ops_initialise_chunk_kernel_celly(const double *__restrict arg0, double *__restrict arg1, double *__restrict arg2, int size0, int size1) { int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 0 * 1 + idx_y * 1 * 1 * xdim0_initialise_chunk_kernel_celly; arg1 += idx_x * 0 * 1 + idx_y * 1 * 1 * xdim1_initialise_chunk_kernel_celly; arg2 += idx_x * 0 * 1 + idx_y * 1 * 1 * xdim2_initialise_chunk_kernel_celly; if (idx_x < size0 && idx_y < size1) { initialise_chunk_kernel_celly_gpu(arg0, arg1, arg2); } } // host stub function #ifndef OPS_LAZY void ops_par_loop_initialise_chunk_kernel_celly(char const *name, ops_block block, int dim, int *range, ops_arg arg0, ops_arg arg1, ops_arg arg2) { #else void ops_par_loop_initialise_chunk_kernel_celly_execute( ops_kernel_descriptor *desc) { int dim = desc->dim; int *range = desc->range; ops_arg arg0 = desc->args[0]; ops_arg arg1 = desc->args[1]; ops_arg arg2 = desc->args[2]; #endif // Timing double t1, t2, c1, c2; ops_arg args[3] = {arg0, arg1, arg2}; #if CHECKPOINTING && !OPS_LAZY if (!ops_checkpointing_before(args, 3, range, 13)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(13, "initialise_chunk_kernel_celly"); OPS_kernels[13].count++; ops_timers_core(&c1, &t1); } // compute locally allocated range for the sub-block int start[2]; int end[2]; #if OPS_MPI && !OPS_LAZY sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for (int n = 0; n < 2; n++) { start[n] = sb->decomp_disp[n]; end[n] = sb->decomp_disp[n] + sb->decomp_size[n]; if (start[n] >= range[2 * n]) { start[n] = 0; } else { start[n] = range[2 * n] - start[n]; } if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0) start[n] = range[2 * n]; if (end[n] >= range[2 * n + 1]) { end[n] = range[2 * n + 1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n] == MPI_PROC_NULL && (range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n])) end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]); } #else for (int n = 0; n < 2; n++) { start[n] = range[2 * n]; end[n] = range[2 * n + 1]; } #endif int x_size = MAX(0, end[0] - start[0]); int y_size = MAX(0, end[1] - start[1]); int xdim0 = args[0].dat->size[0]; int xdim1 = args[1].dat->size[0]; int xdim2 = args[2].dat->size[0]; if (xdim0 != xdim0_initialise_chunk_kernel_celly_h || xdim1 != xdim1_initialise_chunk_kernel_celly_h || xdim2 != xdim2_initialise_chunk_kernel_celly_h) { hipMemcpyToSymbol(xdim0_initialise_chunk_kernel_celly, &xdim0, sizeof(int)); xdim0_initialise_chunk_kernel_celly_h = xdim0; hipMemcpyToSymbol(xdim1_initialise_chunk_kernel_celly, &xdim1, sizeof(int)); xdim1_initialise_chunk_kernel_celly_h = xdim1; hipMemcpyToSymbol(xdim2_initialise_chunk_kernel_celly, &xdim2, sizeof(int)); xdim2_initialise_chunk_kernel_celly_h = xdim2; } dim3 grid((x_size - 1) / OPS_block_size_x + 1, (y_size - 1) / OPS_block_size_y + 1, 1); dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1); int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size); int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size); int dat2 = (OPS_soa ? args[2].dat->type_size : args[2].dat->elem_size); char *p_a[3]; // set up initial pointers int base0 = args[0].dat->base_offset + dat0 * 1 * (start[0] * args[0].stencil->stride[0]); base0 = base0 + dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1]); p_a[0] = (char *)args[0].data_d + base0; int base1 = args[1].dat->base_offset + dat1 * 1 * (start[0] * args[1].stencil->stride[0]); base1 = base1 + dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1]); p_a[1] = (char *)args[1].data_d + base1; int base2 = args[2].dat->base_offset + dat2 * 1 * (start[0] * args[2].stencil->stride[0]); base2 = base2 + dat2 * args[2].dat->size[0] * (start[1] * args[2].stencil->stride[1]); p_a[2] = (char *)args[2].data_d + base2; #ifndef OPS_LAZY ops_H_D_exchanges_device(args, 3); ops_halo_exchanges(args, 3, range); #endif if (OPS_diags > 1) { ops_timers_core(&c2, &t2); OPS_kernels[13].mpi_time += t2 - t1; } // call kernel wrapper function, passing in pointers to data hipLaunchKernelGGL(( ops_initialise_chunk_kernel_celly), dim3(grid), dim3(tblock), 0, 0, (double *)p_a[0], (double *)p_a[1], (double *)p_a[2], x_size, y_size); cutilSafeCall(hipGetLastError()); if (OPS_diags > 1) { cutilSafeCall(hipDeviceSynchronize()); ops_timers_core(&c1, &t1); OPS_kernels[13].time += t1 - t2; } #ifndef OPS_LAZY ops_set_dirtybit_device(args, 3); ops_set_halo_dirtybit3(&args[1], range); ops_set_halo_dirtybit3(&args[2], range); #endif if (OPS_diags > 1) { // Update kernel record ops_timers_core(&c2, &t2); OPS_kernels[13].mpi_time += t2 - t1; OPS_kernels[13].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[13].transfer += ops_compute_transfer(dim, start, end, &arg1); OPS_kernels[13].transfer += ops_compute_transfer(dim, start, end, &arg2); } } #ifdef OPS_LAZY void ops_par_loop_initialise_chunk_kernel_celly(char const *name, ops_block block, int dim, int *range, ops_arg arg0, ops_arg arg1, ops_arg arg2) { ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor)); desc->name = name; desc->block = block; desc->dim = dim; desc->device = 1; desc->index = 13; desc->hash = 5381; desc->hash = ((desc->hash << 5) + desc->hash) + 13; for (int i = 0; i < 4; i++) { desc->range[i] = range[i]; desc->orig_range[i] = range[i]; desc->hash = ((desc->hash << 5) + desc->hash) + range[i]; } desc->nargs = 3; desc->args = (ops_arg *)malloc(3 * sizeof(ops_arg)); desc->args[0] = arg0; desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index; desc->args[1] = arg1; desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index; desc->args[2] = arg2; desc->hash = ((desc->hash << 5) + desc->hash) + arg2.dat->index; desc->function = ops_par_loop_initialise_chunk_kernel_celly_execute; if (OPS_diags > 1) { ops_timing_realloc(13, "initialise_chunk_kernel_celly"); } ops_enqueue_kernel(desc); } #endif
a2a2a4b1f0e7af7faf1b8bc983122c8576640dc0.cu
// // auto-generated by ops.py // __constant__ int xdim0_initialise_chunk_kernel_celly; int xdim0_initialise_chunk_kernel_celly_h = -1; __constant__ int xdim1_initialise_chunk_kernel_celly; int xdim1_initialise_chunk_kernel_celly_h = -1; __constant__ int xdim2_initialise_chunk_kernel_celly; int xdim2_initialise_chunk_kernel_celly_h = -1; #undef OPS_ACC0 #undef OPS_ACC1 #undef OPS_ACC2 #define OPS_ACC0(x, y) (x + xdim0_initialise_chunk_kernel_celly * (y)) #define OPS_ACC1(x, y) (x + xdim1_initialise_chunk_kernel_celly * (y)) #define OPS_ACC2(x, y) (x + xdim2_initialise_chunk_kernel_celly * (y)) // user function __device__ void initialise_chunk_kernel_celly_gpu(const double *vertexy, double *celly, double *celldy) { double d_y; d_y = (grid.ymax - grid.ymin) / (double)grid.y_cells; celly[OPS_ACC1(0, 0)] = 0.5 * (vertexy[OPS_ACC0(0, 0)] + vertexy[OPS_ACC0(0, 1)]); celldy[OPS_ACC2(0, 0)] = d_y; } #undef OPS_ACC0 #undef OPS_ACC1 #undef OPS_ACC2 __global__ void ops_initialise_chunk_kernel_celly(const double *__restrict arg0, double *__restrict arg1, double *__restrict arg2, int size0, int size1) { int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 0 * 1 + idx_y * 1 * 1 * xdim0_initialise_chunk_kernel_celly; arg1 += idx_x * 0 * 1 + idx_y * 1 * 1 * xdim1_initialise_chunk_kernel_celly; arg2 += idx_x * 0 * 1 + idx_y * 1 * 1 * xdim2_initialise_chunk_kernel_celly; if (idx_x < size0 && idx_y < size1) { initialise_chunk_kernel_celly_gpu(arg0, arg1, arg2); } } // host stub function #ifndef OPS_LAZY void ops_par_loop_initialise_chunk_kernel_celly(char const *name, ops_block block, int dim, int *range, ops_arg arg0, ops_arg arg1, ops_arg arg2) { #else void ops_par_loop_initialise_chunk_kernel_celly_execute( ops_kernel_descriptor *desc) { int dim = desc->dim; int *range = desc->range; ops_arg arg0 = desc->args[0]; ops_arg arg1 = desc->args[1]; ops_arg arg2 = desc->args[2]; #endif // Timing double t1, t2, c1, c2; ops_arg args[3] = {arg0, arg1, arg2}; #if CHECKPOINTING && !OPS_LAZY if (!ops_checkpointing_before(args, 3, range, 13)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(13, "initialise_chunk_kernel_celly"); OPS_kernels[13].count++; ops_timers_core(&c1, &t1); } // compute locally allocated range for the sub-block int start[2]; int end[2]; #if OPS_MPI && !OPS_LAZY sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for (int n = 0; n < 2; n++) { start[n] = sb->decomp_disp[n]; end[n] = sb->decomp_disp[n] + sb->decomp_size[n]; if (start[n] >= range[2 * n]) { start[n] = 0; } else { start[n] = range[2 * n] - start[n]; } if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0) start[n] = range[2 * n]; if (end[n] >= range[2 * n + 1]) { end[n] = range[2 * n + 1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n] == MPI_PROC_NULL && (range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n])) end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]); } #else for (int n = 0; n < 2; n++) { start[n] = range[2 * n]; end[n] = range[2 * n + 1]; } #endif int x_size = MAX(0, end[0] - start[0]); int y_size = MAX(0, end[1] - start[1]); int xdim0 = args[0].dat->size[0]; int xdim1 = args[1].dat->size[0]; int xdim2 = args[2].dat->size[0]; if (xdim0 != xdim0_initialise_chunk_kernel_celly_h || xdim1 != xdim1_initialise_chunk_kernel_celly_h || xdim2 != xdim2_initialise_chunk_kernel_celly_h) { cudaMemcpyToSymbol(xdim0_initialise_chunk_kernel_celly, &xdim0, sizeof(int)); xdim0_initialise_chunk_kernel_celly_h = xdim0; cudaMemcpyToSymbol(xdim1_initialise_chunk_kernel_celly, &xdim1, sizeof(int)); xdim1_initialise_chunk_kernel_celly_h = xdim1; cudaMemcpyToSymbol(xdim2_initialise_chunk_kernel_celly, &xdim2, sizeof(int)); xdim2_initialise_chunk_kernel_celly_h = xdim2; } dim3 grid((x_size - 1) / OPS_block_size_x + 1, (y_size - 1) / OPS_block_size_y + 1, 1); dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1); int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size); int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size); int dat2 = (OPS_soa ? args[2].dat->type_size : args[2].dat->elem_size); char *p_a[3]; // set up initial pointers int base0 = args[0].dat->base_offset + dat0 * 1 * (start[0] * args[0].stencil->stride[0]); base0 = base0 + dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1]); p_a[0] = (char *)args[0].data_d + base0; int base1 = args[1].dat->base_offset + dat1 * 1 * (start[0] * args[1].stencil->stride[0]); base1 = base1 + dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1]); p_a[1] = (char *)args[1].data_d + base1; int base2 = args[2].dat->base_offset + dat2 * 1 * (start[0] * args[2].stencil->stride[0]); base2 = base2 + dat2 * args[2].dat->size[0] * (start[1] * args[2].stencil->stride[1]); p_a[2] = (char *)args[2].data_d + base2; #ifndef OPS_LAZY ops_H_D_exchanges_device(args, 3); ops_halo_exchanges(args, 3, range); #endif if (OPS_diags > 1) { ops_timers_core(&c2, &t2); OPS_kernels[13].mpi_time += t2 - t1; } // call kernel wrapper function, passing in pointers to data ops_initialise_chunk_kernel_celly<<<grid, tblock>>>( (double *)p_a[0], (double *)p_a[1], (double *)p_a[2], x_size, y_size); cutilSafeCall(cudaGetLastError()); if (OPS_diags > 1) { cutilSafeCall(cudaDeviceSynchronize()); ops_timers_core(&c1, &t1); OPS_kernels[13].time += t1 - t2; } #ifndef OPS_LAZY ops_set_dirtybit_device(args, 3); ops_set_halo_dirtybit3(&args[1], range); ops_set_halo_dirtybit3(&args[2], range); #endif if (OPS_diags > 1) { // Update kernel record ops_timers_core(&c2, &t2); OPS_kernels[13].mpi_time += t2 - t1; OPS_kernels[13].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[13].transfer += ops_compute_transfer(dim, start, end, &arg1); OPS_kernels[13].transfer += ops_compute_transfer(dim, start, end, &arg2); } } #ifdef OPS_LAZY void ops_par_loop_initialise_chunk_kernel_celly(char const *name, ops_block block, int dim, int *range, ops_arg arg0, ops_arg arg1, ops_arg arg2) { ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor)); desc->name = name; desc->block = block; desc->dim = dim; desc->device = 1; desc->index = 13; desc->hash = 5381; desc->hash = ((desc->hash << 5) + desc->hash) + 13; for (int i = 0; i < 4; i++) { desc->range[i] = range[i]; desc->orig_range[i] = range[i]; desc->hash = ((desc->hash << 5) + desc->hash) + range[i]; } desc->nargs = 3; desc->args = (ops_arg *)malloc(3 * sizeof(ops_arg)); desc->args[0] = arg0; desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index; desc->args[1] = arg1; desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index; desc->args[2] = arg2; desc->hash = ((desc->hash << 5) + desc->hash) + arg2.dat->index; desc->function = ops_par_loop_initialise_chunk_kernel_celly_execute; if (OPS_diags > 1) { ops_timing_realloc(13, "initialise_chunk_kernel_celly"); } ops_enqueue_kernel(desc); } #endif
f46fbc68fe6ad9aa65216f9dadfdec1ae03e7177.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "LongShortTermMemoryKernelCaller.h" #include "GPUArray.cu" #include "LongShortTermMemoryKernel.cu" #include "LongShortTermMemoryGradientCollectionKernel.cu" #include "LongShortTermMemoryFiringRateKernel.cu" #include "utils.h" using namespace SNN; using namespace Kernels; using namespace GPU; using std::vector; /* constructor */ LongShortTermMemoryKernelCaller::LongShortTermMemoryKernelCaller( unsigned batchSize, unsigned numInputs, unsigned numHidden, unsigned numStandartHidden, unsigned numAdaptiveHidden, unsigned numOutputs, unsigned numSimulationTimesteps, unsigned errorMode, FloatType timeStepLength, FloatType spikeThreshold, FloatType refactoryPeriod, FloatType hiddenDecayFactor, FloatType readoutDecayFactor, FloatType adaptationDecayFactor, FloatType thresholdIncreaseConstant, FloatType targetFiringRate, FloatType firingRateScallingFactor, FloatType derivativeDumpingFactor, vector<FloatType *> inputSpikesOverTime, std::vector<FloatType *> spikesOverTime, FloatType *firingRates, FloatType *inputWeights, FloatType *hiddenWeights, FloatType *outputWeights, FloatType *feedbackWeights, FloatType *targetWeights, vector<FloatType *> targetsOverTime, vector<FloatType *> outputsOverTime, vector<FloatType *> outputErrorsOverTime, vector<FloatType *> errorMaskOverTime, vector<FloatType *> outputErrorFactorOverTime, FloatType *inputGradients, FloatType *inputFiringRateGradients, FloatType *hiddenGradients, FloatType *hiddenFiringRateGradients, FloatType *leakyReadoutGradients, FloatType *inputErrorsOverTime, vector<FloatType *> allInputErrorsOverTime ) : backPropagation( new int[1]), starttime( new int[1]), endtime( new int[1]), numBlocks(batchSize), numThreads(::max(::max(numHidden, numInputs), numOutputs)), numHiddenNeurons(numHidden), numInputNeurons(numInputs), numOutputNeurons(numOutputs), numTimeSteps(numSimulationTimesteps), batchErrors( new FloatType[batchSize]), summedTargets( new FloatType[batchSize]), squaredSummedTargets( new FloatType[batchSize]), numSummedValues( new FloatType[batchSize]), classificationAccuracyCPU( new FloatType[batchSize]), classificationSamplesCPU( new FloatType[batchSize]), useBackPropagation( new GPUArray<int>( backPropagation, 1)), numInputs( new GPUArray<unsigned>( numInputs)), numStandartHidden( new GPUArray<unsigned>( numStandartHidden)), numAdaptiveHidden( new GPUArray<unsigned>( numAdaptiveHidden)), numOutputs( new GPUArray<unsigned>( numOutputs)), batchSize( new GPUArray<unsigned>( batchSize)), numSimulationTimesteps( new GPUArray<unsigned>( numSimulationTimesteps)), startTime( new GPUArray<int>( starttime, 1)), endTime( new GPUArray<int>( endtime, 1)), errorMode( new GPUArray<unsigned>( errorMode)), timeStepLength( new GPUArray<FloatType>(timeStepLength)), spikeThreshold( new GPUArray<FloatType>(spikeThreshold)), refactoryPeriod( new GPUArray<FloatType>(refactoryPeriod)), hiddenDecayFactor( new GPUArray<FloatType>(hiddenDecayFactor)), readoutDecayFactor( new GPUArray<FloatType>(readoutDecayFactor)), adaptationDecayFactor( new GPUArray<FloatType>(adaptationDecayFactor)), thresholdIncreaseConstant( new GPUArray<FloatType>(thresholdIncreaseConstant)), targetFiringRate( new GPUArray<FloatType>(targetFiringRate)), firingRateScallingFactor( new GPUArray<FloatType>(firingRateScallingFactor)), derivativeDumpingFactor( new GPUArray<FloatType>(derivativeDumpingFactor)), inputSpikesOverTime( new GPUArray<FloatType>(inputSpikesOverTime, numInputs * numSimulationTimesteps)), spikesOverTime( new GPUArray<FloatType>(spikesOverTime, (numInputs + numHidden) * numSimulationTimesteps)), firingRates( new GPUArray<FloatType>(firingRates, numHidden)), numSpikes( new GPUArray<FloatType>(vector<FloatType *>(batchSize, NULL), numHidden + numInputs)), inputWeights( new GPUArray<FloatType>(inputWeights, numInputs * numHidden)), hiddenWeights( new GPUArray<FloatType>(hiddenWeights, numHidden * numHidden)), outputWeights( new GPUArray<FloatType>(outputWeights, numHidden * numOutputs)), feedbackWeights( new GPUArray<FloatType>(feedbackWeights, numHidden * numOutputs)), targetWeights( new GPUArray<FloatType>(targetWeights, numOutputs)), targetsOverTime( new GPUArray<FloatType>(targetsOverTime, numOutputs * numSimulationTimesteps)), outputsOverTime( new GPUArray<FloatType>(outputsOverTime, numOutputs * numSimulationTimesteps)), outputErrorsOverTime( new GPUArray<FloatType>(outputErrorsOverTime, numOutputs * numSimulationTimesteps)), derivativesOverTime( new GPUArray<FloatType>(vector<FloatType *>(batchSize, NULL), numHidden * numSimulationTimesteps)), oldDerivativesOverTime( new GPUArray<FloatType>(vector<FloatType *>(batchSize, NULL), numHidden * numSimulationTimesteps)), voltageOverTime( new GPUArray<FloatType>(vector<FloatType *>(batchSize, NULL), numHidden * numSimulationTimesteps)), timeStepsSinceLastSpikeOverTime(new GPUArray<FloatType>(vector<FloatType *>(batchSize, NULL), numHidden * numSimulationTimesteps)), thresholdAdaptationOverTime( new GPUArray<FloatType>(vector<FloatType *>(batchSize, NULL), numAdaptiveHidden * numSimulationTimesteps)), errorMaskOverTime( new GPUArray<FloatType>(errorMaskOverTime, numSimulationTimesteps)), outputErrorFactorOverTime( new GPUArray<FloatType>(outputErrorFactorOverTime, numOutputs * numSimulationTimesteps)), inputGradients( new GPUArray<FloatType>(vector<FloatType *>(batchSize, inputGradients), numInputs * numHidden)), inputFiringRateGradients( new GPUArray<FloatType>(vector<FloatType *>(batchSize, inputFiringRateGradients), numInputs * numHidden)), hiddenGradients( new GPUArray<FloatType>(vector<FloatType *>(batchSize, hiddenGradients), numHidden * numHidden)), hiddenFiringRateGradients( new GPUArray<FloatType>(vector<FloatType *>(batchSize, hiddenFiringRateGradients), numHidden * numHidden)), leakyReadoutGradients( new GPUArray<FloatType>(vector<FloatType *>(batchSize, leakyReadoutGradients), numHidden * numOutputs)), networkError( new GPUArray<FloatType>(batchErrors, batchSize)), networkTargets( new GPUArray<FloatType>(summedTargets, batchSize)), networkSquaredTargets( new GPUArray<FloatType>(squaredSummedTargets, batchSize)), summedValues( new GPUArray<FloatType>(numSummedValues, batchSize)), classificationAccuracy( new GPUArray<FloatType>(classificationAccuracyCPU, batchSize)), classificationSamples( new GPUArray<FloatType>(classificationSamplesCPU, batchSize)), filteredEligibilityTraces( new GPUArray<FloatType>(vector<FloatType *>(batchSize, NULL), (numInputs + numHidden) * numHidden)), filteredSpikes( new GPUArray<FloatType>(vector<FloatType *>(batchSize, NULL), numInputs + numHidden)), readoutDecayFilteredSpikes( new GPUArray<FloatType>(vector<FloatType *>(batchSize, NULL), numHidden)), thresholdAdaptation( new GPUArray<FloatType>(vector<FloatType *>(batchSize, NULL), numHidden)), adaptionEligibility( new GPUArray<FloatType>(vector<FloatType *>(batchSize, NULL), (numInputs + numHidden) * numAdaptiveHidden)), derivatives( new GPUArray<FloatType>(vector<FloatType *>(batchSize, NULL), numHidden)), I( new GPUArray<FloatType>(vector<FloatType *>(batchSize, NULL), numHidden + numOutputs)), v( new GPUArray<FloatType>(vector<FloatType *>(batchSize, NULL), numHidden + numOutputs)), hiddenSpikes( new GPUArray<FloatType>(vector<FloatType *>(batchSize, NULL), numHidden)), timeStepsSinceLastSpike( new GPUArray<FloatType>(vector<FloatType *>(batchSize, NULL), numHidden)), learnSignals( new GPUArray<FloatType>(vector<FloatType *>(batchSize, NULL), numHidden)), deltaErrorsVoltage( new GPUArray<FloatType>(vector<FloatType *>(batchSize, NULL), numHidden)), deltaErrorsAdaption( new GPUArray<FloatType>(vector<FloatType *>(batchSize, NULL), numAdaptiveHidden)), inputErrorsOverTime( new GPUArray<FloatType>(inputErrorsOverTime, numInputs * numSimulationTimesteps)), allInputErrorsOverTime( new GPUArray<FloatType>(allInputErrorsOverTime, numInputs * numSimulationTimesteps)), filteredOutputErrors( new GPUArray<FloatType>(vector<FloatType *>(batchSize, NULL), numOutputs)), summedActivation( new GPUArray<FloatType>(vector<FloatType *>(batchSize, NULL), numOutputs)) { this->numInputs->copyToDevice(); this->numStandartHidden->copyToDevice(); this->numAdaptiveHidden->copyToDevice(); this->numOutputs->copyToDevice(); this->batchSize->copyToDevice(); this->numSimulationTimesteps->copyToDevice(); this->errorMode->copyToDevice(); this->timeStepLength->copyToDevice(); this->spikeThreshold->copyToDevice(); this->refactoryPeriod->copyToDevice(); this->adaptationDecayFactor->copyToDevice(); this->thresholdIncreaseConstant->copyToDevice(); this->hiddenDecayFactor->copyToDevice(); this->readoutDecayFactor->copyToDevice(); this->targetFiringRate->copyToDevice(); this->firingRateScallingFactor->copyToDevice(); this->derivativeDumpingFactor->copyToDevice(); this->feedbackWeights->copyToDevice(); this->firingRates->copyToDevice(); unsigned memory = 0; memory += this->useBackPropagation->globalMemoryConsumption(); memory += this->numInputs->globalMemoryConsumption(); memory += this->numStandartHidden->globalMemoryConsumption(); memory += this->numAdaptiveHidden->globalMemoryConsumption(); memory += this->numOutputs->globalMemoryConsumption(); memory += this->batchSize->globalMemoryConsumption(); memory += this->numSimulationTimesteps->globalMemoryConsumption(); memory += this->startTime->globalMemoryConsumption(); memory += this->endTime->globalMemoryConsumption(); memory += this->errorMode->globalMemoryConsumption(); memory += this->timeStepLength->globalMemoryConsumption(); memory += this->spikeThreshold->globalMemoryConsumption(); memory += this->refactoryPeriod->globalMemoryConsumption(); memory += this->adaptationDecayFactor->globalMemoryConsumption(); memory += this->thresholdIncreaseConstant->globalMemoryConsumption(); memory += this->hiddenDecayFactor->globalMemoryConsumption(); memory += this->readoutDecayFactor->globalMemoryConsumption(); memory += this->targetFiringRate->globalMemoryConsumption(); memory += this->firingRateScallingFactor->globalMemoryConsumption(); memory += this->derivativeDumpingFactor->globalMemoryConsumption(); memory += this->inputSpikesOverTime->globalMemoryConsumption(); memory += this->spikesOverTime->globalMemoryConsumption(); memory += this->firingRates->globalMemoryConsumption(); memory += this->numSpikes->globalMemoryConsumption(); memory += this->inputWeights->globalMemoryConsumption(); memory += this->hiddenWeights->globalMemoryConsumption(); memory += this->outputWeights->globalMemoryConsumption(); memory += this->feedbackWeights->globalMemoryConsumption(); memory += this->targetWeights->globalMemoryConsumption(); memory += this->targetsOverTime->globalMemoryConsumption(); memory += this->outputsOverTime->globalMemoryConsumption(); memory += this->outputErrorsOverTime->globalMemoryConsumption(); memory += this->derivativesOverTime->globalMemoryConsumption(); memory += this->oldDerivativesOverTime->globalMemoryConsumption(); memory += this->voltageOverTime->globalMemoryConsumption(); memory += this->timeStepsSinceLastSpikeOverTime->globalMemoryConsumption(); memory += this->thresholdAdaptationOverTime->globalMemoryConsumption(); memory += this->errorMaskOverTime->globalMemoryConsumption(); memory += this->outputErrorFactorOverTime->globalMemoryConsumption(); memory += this->inputGradients->globalMemoryConsumption(); memory += this->inputFiringRateGradients->globalMemoryConsumption(); memory += this->hiddenGradients->globalMemoryConsumption(); memory += this->hiddenFiringRateGradients->globalMemoryConsumption(); memory += this->leakyReadoutGradients->globalMemoryConsumption(); memory += this->networkError->globalMemoryConsumption(); memory += this->networkTargets->globalMemoryConsumption(); memory += this->networkSquaredTargets->globalMemoryConsumption(); memory += this->summedValues->globalMemoryConsumption(); memory += this->classificationAccuracy->globalMemoryConsumption(); memory += this->classificationSamples->globalMemoryConsumption(); memory += this->filteredEligibilityTraces->globalMemoryConsumption(); memory += this->filteredSpikes->globalMemoryConsumption(); memory += this->readoutDecayFilteredSpikes->globalMemoryConsumption(); memory += this->thresholdAdaptation->globalMemoryConsumption(); memory += this->adaptionEligibility->globalMemoryConsumption(); memory += this->derivatives->globalMemoryConsumption(); memory += this->I->globalMemoryConsumption(); memory += this->v->globalMemoryConsumption(); memory += this->hiddenSpikes->globalMemoryConsumption(); memory += this->timeStepsSinceLastSpike->globalMemoryConsumption(); memory += this->learnSignals->globalMemoryConsumption(); memory += this->deltaErrorsVoltage->globalMemoryConsumption(); memory += this->deltaErrorsAdaption->globalMemoryConsumption(); memory += this->inputErrorsOverTime->globalMemoryConsumption(); memory += this->allInputErrorsOverTime->globalMemoryConsumption(); memory += this->filteredOutputErrors->globalMemoryConsumption(); memory += this->summedActivation->globalMemoryConsumption(); log_str("globalMemoryConsumption: " + itoa(memory), LOG_I); } /* destructor */ LongShortTermMemoryKernelCaller::~LongShortTermMemoryKernelCaller() { delete[] backPropagation; delete[] starttime; delete[] endtime; delete[] batchErrors; delete[] classificationAccuracyCPU; delete[] classificationSamplesCPU; delete useBackPropagation; delete numInputs; delete numStandartHidden; delete numAdaptiveHidden; delete numOutputs; delete batchSize; delete numSimulationTimesteps; delete startTime; delete endTime; delete errorMode; delete timeStepLength; delete spikeThreshold; delete refactoryPeriod; delete hiddenDecayFactor; delete readoutDecayFactor; delete adaptationDecayFactor; delete thresholdIncreaseConstant; delete targetFiringRate; delete firingRateScallingFactor; delete derivativeDumpingFactor; delete inputSpikesOverTime; delete spikesOverTime; delete firingRates; delete numSpikes; delete inputWeights; delete hiddenWeights; delete outputWeights; delete feedbackWeights; delete targetWeights; delete targetsOverTime; delete outputsOverTime; delete outputErrorsOverTime; delete derivativesOverTime; delete oldDerivativesOverTime; delete voltageOverTime; delete timeStepsSinceLastSpikeOverTime; delete thresholdAdaptationOverTime; delete errorMaskOverTime; delete outputErrorFactorOverTime; delete inputGradients; delete inputFiringRateGradients; delete hiddenGradients; delete hiddenFiringRateGradients; delete leakyReadoutGradients; delete networkError; delete networkTargets; delete networkSquaredTargets; delete summedValues; delete classificationAccuracy; delete classificationSamples; delete filteredEligibilityTraces; delete filteredSpikes; delete readoutDecayFilteredSpikes; delete thresholdAdaptation; delete adaptionEligibility; delete derivatives; delete I; delete v; delete hiddenSpikes; delete timeStepsSinceLastSpike; delete learnSignals; delete deltaErrorsVoltage; delete deltaErrorsAdaption; delete inputErrorsOverTime; delete allInputErrorsOverTime; delete filteredOutputErrors; delete summedActivation; } /* runs the kernel of this (blocks untill finished) */ void LongShortTermMemoryKernelCaller::runAndWait( int backPropagation, bool inputErrors, int starttime, int endtime ) { static uint64_t cpuTime = 0; static uint64_t gpuTime = 0; static uint64_t t = gettime_usec(); this->starttime[0] = starttime; this->endtime[0] = endtime; cpuTime += gettime_usec() - t; t = gettime_usec(); this->backPropagation[0] = backPropagation; useBackPropagation->copyToDevice(); targetWeights->copyToDevice(); inputWeights->copyToDevice(); hiddenWeights->copyToDevice(); outputWeights->copyToDevice(); startTime->copyToDevice(); endTime->copyToDevice(); if (starttime == 0 && endtime == int(numTimeSteps)) { inputSpikesOverTime->copyToDevice(); targetsOverTime->copyToDevice(); errorMaskOverTime->copyToDevice(); outputErrorFactorOverTime->copyToDevice(); outputErrorsOverTime->copyToDevice(); } else { inputSpikesOverTime->copyToDevice( (starttime % numTimeSteps) * numInputNeurons, (endtime - starttime) * numInputNeurons ); targetsOverTime->copyToDevice( (starttime % numTimeSteps) * numOutputNeurons, (endtime - starttime) * numOutputNeurons ); errorMaskOverTime->copyToDevice( starttime % numTimeSteps, endtime - starttime ); outputErrorFactorOverTime->copyToDevice( (starttime % numTimeSteps) * numOutputNeurons, (endtime - starttime) * numOutputNeurons ); outputErrorsOverTime->copyToDevice( (starttime % numTimeSteps) * numOutputNeurons, (endtime - starttime) * numOutputNeurons ); } log_str("launching longShortTermMemoryKernel<<<" + itoa(numBlocks) + ", " + itoa(numThreads) + ">>>", LOG_DD); hipLaunchKernelGGL(( longShortTermMemoryKernel), dim3(numBlocks), dim3(numThreads), 0, 0, (int *) useBackPropagation->d_ptr(), (unsigned *) numInputs->d_ptr(), (unsigned *) numStandartHidden->d_ptr(), (unsigned *) numAdaptiveHidden->d_ptr(), (unsigned *) numOutputs->d_ptr(), (unsigned *) numSimulationTimesteps->d_ptr(), (int *) startTime->d_ptr(), (int *) endTime->d_ptr(), (unsigned *) errorMode->d_ptr(), (FloatType *) timeStepLength->d_ptr(), (FloatType *) spikeThreshold->d_ptr(), (FloatType *) refactoryPeriod->d_ptr(), (FloatType *) hiddenDecayFactor->d_ptr(), (FloatType *) readoutDecayFactor->d_ptr(), (FloatType *) adaptationDecayFactor->d_ptr(), (FloatType *) thresholdIncreaseConstant->d_ptr(), (FloatType *) targetFiringRate->d_ptr(), (FloatType *) firingRateScallingFactor->d_ptr(), (FloatType *) derivativeDumpingFactor->d_ptr(), (FloatType *) inputSpikesOverTime->d_ptr(), (FloatType *) spikesOverTime->d_ptr(), (FloatType *) firingRates->d_ptr(), (FloatType *) numSpikes->d_ptr(), (FloatType *) inputWeights->d_ptr(), (FloatType *) hiddenWeights->d_ptr(), (FloatType *) outputWeights->d_ptr(), (FloatType *) feedbackWeights->d_ptr(), (FloatType *) targetWeights->d_ptr(), (FloatType *) targetsOverTime->d_ptr(), (FloatType *) outputsOverTime->d_ptr(), (FloatType *) outputErrorsOverTime->d_ptr(), (FloatType *) derivativesOverTime->d_ptr(), (FloatType *) oldDerivativesOverTime->d_ptr(), (FloatType *) voltageOverTime->d_ptr(), (FloatType *) timeStepsSinceLastSpikeOverTime->d_ptr(), (FloatType *) thresholdAdaptationOverTime->d_ptr(), (FloatType *) errorMaskOverTime->d_ptr(), (FloatType *) outputErrorFactorOverTime->d_ptr(), (FloatType *) inputGradients->d_ptr(), (FloatType *) inputFiringRateGradients->d_ptr(), (FloatType *) hiddenGradients->d_ptr(), (FloatType *) hiddenFiringRateGradients->d_ptr(), (FloatType *) leakyReadoutGradients->d_ptr(), (FloatType *) networkError->d_ptr(), (FloatType *) networkTargets->d_ptr(), (FloatType *) networkSquaredTargets->d_ptr(), (FloatType *) summedValues->d_ptr(), (FloatType *) classificationAccuracy->d_ptr(), (FloatType *) classificationSamples->d_ptr(), (FloatType *) filteredEligibilityTraces->d_ptr(), (FloatType *) filteredSpikes->d_ptr(), (FloatType *) readoutDecayFilteredSpikes->d_ptr(), (FloatType *) thresholdAdaptation->d_ptr(), (FloatType *) adaptionEligibility->d_ptr(), (FloatType *) derivatives->d_ptr(), (FloatType *) I->d_ptr(), (FloatType *) v->d_ptr(), (FloatType *) hiddenSpikes->d_ptr(), (FloatType *) timeStepsSinceLastSpike->d_ptr(), (FloatType *) learnSignals->d_ptr(), (FloatType *) deltaErrorsVoltage->d_ptr(), (FloatType *) deltaErrorsAdaption->d_ptr(), (FloatType *) inputErrorsOverTime->d_ptr(), (FloatType *) allInputErrorsOverTime->d_ptr(), (FloatType *) filteredOutputErrors->d_ptr(), (FloatType *) summedActivation->d_ptr() ); if (backPropagation == BACKPROPAGATION_OFF || backPropagation == BACKPROPAGATION_FULL || backPropagation == BACKPROPAGATION_BACKWARD) { // TODO parametrisize blocks and threads if (numBlocks > 1 || inputErrors) { hipLaunchKernelGGL(( longShortTermMemoryGradientCollectionKernel), dim3(16), dim3(1024), 0, 0, (unsigned *) numInputs->d_ptr(), (unsigned *) numStandartHidden->d_ptr(), (unsigned *) numAdaptiveHidden->d_ptr(), (unsigned *) numOutputs->d_ptr(), (unsigned *) batchSize->d_ptr(), (unsigned *) numSimulationTimesteps->d_ptr(), (FloatType *) inputGradients->d_ptr(), (FloatType *) inputFiringRateGradients->d_ptr(), (FloatType *) hiddenGradients->d_ptr(), (FloatType *) hiddenFiringRateGradients->d_ptr(), (FloatType *) leakyReadoutGradients->d_ptr(), (FloatType *) inputErrorsOverTime->d_ptr(), (FloatType *) allInputErrorsOverTime->d_ptr() ); } hipLaunchKernelGGL(( longShortTermMemoryFiringRateKernel), dim3(1), dim3(numHiddenNeurons), 0, 0, (unsigned *) numInputs->d_ptr(), (unsigned *) numStandartHidden->d_ptr(), (unsigned *) numAdaptiveHidden->d_ptr(), (unsigned *) batchSize->d_ptr(), (unsigned *) numSimulationTimesteps->d_ptr(), (FloatType *) timeStepLength->d_ptr(), (FloatType *) firingRates->d_ptr(), (FloatType *) numSpikes->d_ptr() ); } if (backPropagation != BACKPROPAGATION_FORWARD) { inputGradients->copyToHost(0); inputFiringRateGradients->copyToHost(0); hiddenGradients->copyToHost(0); hiddenFiringRateGradients->copyToHost(0); leakyReadoutGradients->copyToHost(0); } firingRates->copyToHost(); networkError->copyToHost(); networkTargets->copyToHost(); networkSquaredTargets->copyToHost(); summedValues->copyToHost(); classificationAccuracy->copyToHost(); classificationSamples->copyToHost(); if (starttime == 0 && endtime == int(numTimeSteps)) { outputsOverTime->copyToHost(); spikesOverTime->copyToHost(); } else { outputsOverTime->copyToHost( (starttime % numTimeSteps) * numOutputNeurons, (endtime - starttime) * numOutputNeurons ); spikesOverTime->copyToHost( (starttime % numTimeSteps) * (numInputNeurons + numHiddenNeurons), (endtime - starttime) * (numInputNeurons + numHiddenNeurons) ); } if (inputErrors) { if (starttime == 0 && endtime == int(numTimeSteps)) { inputErrorsOverTime->copyToHost(); allInputErrorsOverTime->copyToHost(); } else { inputErrorsOverTime->copyToHost( (starttime % numTimeSteps) * numInputNeurons, (endtime - starttime) * numInputNeurons ); allInputErrorsOverTime->copyToHost( (starttime % numTimeSteps) * numInputNeurons, (endtime - starttime) * numInputNeurons ); } } int error = hipGetLastError(); if (error) log_err("LongShortTermMemoryKernel failed: " + itoa(error), LOG_EE); gpuTime += gettime_usec() - t; t = gettime_usec(); log_str("CPU time: " + ftoa(cpuTime / 1000000.0) + ", GPU time: " + ftoa(gpuTime / 1000000.0), LOG_D); } /* returns the networks spuared error for the last run*/ FloatType LongShortTermMemoryKernelCaller::getSampleSquaredSummedError(unsigned batch) { return batchErrors[batch]; } FloatType LongShortTermMemoryKernelCaller::getSquaredSummedError() { FloatType error = 0; for (unsigned i = 0; i < numBlocks; i++) error += batchErrors[i]; return error; } /* returns the networks summed target for the last run */ FloatType LongShortTermMemoryKernelCaller::getSampleSummedTarget(unsigned batch) { return summedTargets[batch]; } FloatType LongShortTermMemoryKernelCaller::getSummedTarget() { FloatType error = 0; for (unsigned i = 0; i < numBlocks; i++) error += summedTargets[i]; return error; } /* returns the networks squared summed target for the last run */ FloatType LongShortTermMemoryKernelCaller::getSquaredSummedTarget() { FloatType error = 0; for (unsigned i = 0; i < numBlocks; i++) error += squaredSummedTargets[i]; return error; } /* returns the the number of summed values for the last run */ FloatType LongShortTermMemoryKernelCaller::getSampleNumSummedValues(unsigned batch) { return numSummedValues[batch]; } FloatType LongShortTermMemoryKernelCaller::getNumSummedValues() { FloatType error = 0; for (unsigned i = 0; i < numBlocks; i++) error += numSummedValues[i]; return error; } /* returns the networks classification accuracy */ FloatType LongShortTermMemoryKernelCaller::getAccuracy() { FloatType accuracy = 0; FloatType samples = 0; for (unsigned i = 0; i < numBlocks; i++) { accuracy += classificationAccuracyCPU[i]; samples += classificationSamplesCPU[i]; } return accuracy / samples; } /* reload feedback weights and other "not changing" values into device */ void LongShortTermMemoryKernelCaller::reload() { firingRates->copyToDevice(); feedbackWeights->copyToDevice(); } /* sets the current active device */ void LongShortTermMemoryKernelCaller::setDevice(int device) { hipSetDevice(device); }
f46fbc68fe6ad9aa65216f9dadfdec1ae03e7177.cu
#include "LongShortTermMemoryKernelCaller.h" #include "GPUArray.cu" #include "LongShortTermMemoryKernel.cu" #include "LongShortTermMemoryGradientCollectionKernel.cu" #include "LongShortTermMemoryFiringRateKernel.cu" #include "utils.h" using namespace SNN; using namespace Kernels; using namespace GPU; using std::vector; /* constructor */ LongShortTermMemoryKernelCaller::LongShortTermMemoryKernelCaller( unsigned batchSize, unsigned numInputs, unsigned numHidden, unsigned numStandartHidden, unsigned numAdaptiveHidden, unsigned numOutputs, unsigned numSimulationTimesteps, unsigned errorMode, FloatType timeStepLength, FloatType spikeThreshold, FloatType refactoryPeriod, FloatType hiddenDecayFactor, FloatType readoutDecayFactor, FloatType adaptationDecayFactor, FloatType thresholdIncreaseConstant, FloatType targetFiringRate, FloatType firingRateScallingFactor, FloatType derivativeDumpingFactor, vector<FloatType *> inputSpikesOverTime, std::vector<FloatType *> spikesOverTime, FloatType *firingRates, FloatType *inputWeights, FloatType *hiddenWeights, FloatType *outputWeights, FloatType *feedbackWeights, FloatType *targetWeights, vector<FloatType *> targetsOverTime, vector<FloatType *> outputsOverTime, vector<FloatType *> outputErrorsOverTime, vector<FloatType *> errorMaskOverTime, vector<FloatType *> outputErrorFactorOverTime, FloatType *inputGradients, FloatType *inputFiringRateGradients, FloatType *hiddenGradients, FloatType *hiddenFiringRateGradients, FloatType *leakyReadoutGradients, FloatType *inputErrorsOverTime, vector<FloatType *> allInputErrorsOverTime ) : backPropagation( new int[1]), starttime( new int[1]), endtime( new int[1]), numBlocks(batchSize), numThreads(std::max(std::max(numHidden, numInputs), numOutputs)), numHiddenNeurons(numHidden), numInputNeurons(numInputs), numOutputNeurons(numOutputs), numTimeSteps(numSimulationTimesteps), batchErrors( new FloatType[batchSize]), summedTargets( new FloatType[batchSize]), squaredSummedTargets( new FloatType[batchSize]), numSummedValues( new FloatType[batchSize]), classificationAccuracyCPU( new FloatType[batchSize]), classificationSamplesCPU( new FloatType[batchSize]), useBackPropagation( new GPUArray<int>( backPropagation, 1)), numInputs( new GPUArray<unsigned>( numInputs)), numStandartHidden( new GPUArray<unsigned>( numStandartHidden)), numAdaptiveHidden( new GPUArray<unsigned>( numAdaptiveHidden)), numOutputs( new GPUArray<unsigned>( numOutputs)), batchSize( new GPUArray<unsigned>( batchSize)), numSimulationTimesteps( new GPUArray<unsigned>( numSimulationTimesteps)), startTime( new GPUArray<int>( starttime, 1)), endTime( new GPUArray<int>( endtime, 1)), errorMode( new GPUArray<unsigned>( errorMode)), timeStepLength( new GPUArray<FloatType>(timeStepLength)), spikeThreshold( new GPUArray<FloatType>(spikeThreshold)), refactoryPeriod( new GPUArray<FloatType>(refactoryPeriod)), hiddenDecayFactor( new GPUArray<FloatType>(hiddenDecayFactor)), readoutDecayFactor( new GPUArray<FloatType>(readoutDecayFactor)), adaptationDecayFactor( new GPUArray<FloatType>(adaptationDecayFactor)), thresholdIncreaseConstant( new GPUArray<FloatType>(thresholdIncreaseConstant)), targetFiringRate( new GPUArray<FloatType>(targetFiringRate)), firingRateScallingFactor( new GPUArray<FloatType>(firingRateScallingFactor)), derivativeDumpingFactor( new GPUArray<FloatType>(derivativeDumpingFactor)), inputSpikesOverTime( new GPUArray<FloatType>(inputSpikesOverTime, numInputs * numSimulationTimesteps)), spikesOverTime( new GPUArray<FloatType>(spikesOverTime, (numInputs + numHidden) * numSimulationTimesteps)), firingRates( new GPUArray<FloatType>(firingRates, numHidden)), numSpikes( new GPUArray<FloatType>(vector<FloatType *>(batchSize, NULL), numHidden + numInputs)), inputWeights( new GPUArray<FloatType>(inputWeights, numInputs * numHidden)), hiddenWeights( new GPUArray<FloatType>(hiddenWeights, numHidden * numHidden)), outputWeights( new GPUArray<FloatType>(outputWeights, numHidden * numOutputs)), feedbackWeights( new GPUArray<FloatType>(feedbackWeights, numHidden * numOutputs)), targetWeights( new GPUArray<FloatType>(targetWeights, numOutputs)), targetsOverTime( new GPUArray<FloatType>(targetsOverTime, numOutputs * numSimulationTimesteps)), outputsOverTime( new GPUArray<FloatType>(outputsOverTime, numOutputs * numSimulationTimesteps)), outputErrorsOverTime( new GPUArray<FloatType>(outputErrorsOverTime, numOutputs * numSimulationTimesteps)), derivativesOverTime( new GPUArray<FloatType>(vector<FloatType *>(batchSize, NULL), numHidden * numSimulationTimesteps)), oldDerivativesOverTime( new GPUArray<FloatType>(vector<FloatType *>(batchSize, NULL), numHidden * numSimulationTimesteps)), voltageOverTime( new GPUArray<FloatType>(vector<FloatType *>(batchSize, NULL), numHidden * numSimulationTimesteps)), timeStepsSinceLastSpikeOverTime(new GPUArray<FloatType>(vector<FloatType *>(batchSize, NULL), numHidden * numSimulationTimesteps)), thresholdAdaptationOverTime( new GPUArray<FloatType>(vector<FloatType *>(batchSize, NULL), numAdaptiveHidden * numSimulationTimesteps)), errorMaskOverTime( new GPUArray<FloatType>(errorMaskOverTime, numSimulationTimesteps)), outputErrorFactorOverTime( new GPUArray<FloatType>(outputErrorFactorOverTime, numOutputs * numSimulationTimesteps)), inputGradients( new GPUArray<FloatType>(vector<FloatType *>(batchSize, inputGradients), numInputs * numHidden)), inputFiringRateGradients( new GPUArray<FloatType>(vector<FloatType *>(batchSize, inputFiringRateGradients), numInputs * numHidden)), hiddenGradients( new GPUArray<FloatType>(vector<FloatType *>(batchSize, hiddenGradients), numHidden * numHidden)), hiddenFiringRateGradients( new GPUArray<FloatType>(vector<FloatType *>(batchSize, hiddenFiringRateGradients), numHidden * numHidden)), leakyReadoutGradients( new GPUArray<FloatType>(vector<FloatType *>(batchSize, leakyReadoutGradients), numHidden * numOutputs)), networkError( new GPUArray<FloatType>(batchErrors, batchSize)), networkTargets( new GPUArray<FloatType>(summedTargets, batchSize)), networkSquaredTargets( new GPUArray<FloatType>(squaredSummedTargets, batchSize)), summedValues( new GPUArray<FloatType>(numSummedValues, batchSize)), classificationAccuracy( new GPUArray<FloatType>(classificationAccuracyCPU, batchSize)), classificationSamples( new GPUArray<FloatType>(classificationSamplesCPU, batchSize)), filteredEligibilityTraces( new GPUArray<FloatType>(vector<FloatType *>(batchSize, NULL), (numInputs + numHidden) * numHidden)), filteredSpikes( new GPUArray<FloatType>(vector<FloatType *>(batchSize, NULL), numInputs + numHidden)), readoutDecayFilteredSpikes( new GPUArray<FloatType>(vector<FloatType *>(batchSize, NULL), numHidden)), thresholdAdaptation( new GPUArray<FloatType>(vector<FloatType *>(batchSize, NULL), numHidden)), adaptionEligibility( new GPUArray<FloatType>(vector<FloatType *>(batchSize, NULL), (numInputs + numHidden) * numAdaptiveHidden)), derivatives( new GPUArray<FloatType>(vector<FloatType *>(batchSize, NULL), numHidden)), I( new GPUArray<FloatType>(vector<FloatType *>(batchSize, NULL), numHidden + numOutputs)), v( new GPUArray<FloatType>(vector<FloatType *>(batchSize, NULL), numHidden + numOutputs)), hiddenSpikes( new GPUArray<FloatType>(vector<FloatType *>(batchSize, NULL), numHidden)), timeStepsSinceLastSpike( new GPUArray<FloatType>(vector<FloatType *>(batchSize, NULL), numHidden)), learnSignals( new GPUArray<FloatType>(vector<FloatType *>(batchSize, NULL), numHidden)), deltaErrorsVoltage( new GPUArray<FloatType>(vector<FloatType *>(batchSize, NULL), numHidden)), deltaErrorsAdaption( new GPUArray<FloatType>(vector<FloatType *>(batchSize, NULL), numAdaptiveHidden)), inputErrorsOverTime( new GPUArray<FloatType>(inputErrorsOverTime, numInputs * numSimulationTimesteps)), allInputErrorsOverTime( new GPUArray<FloatType>(allInputErrorsOverTime, numInputs * numSimulationTimesteps)), filteredOutputErrors( new GPUArray<FloatType>(vector<FloatType *>(batchSize, NULL), numOutputs)), summedActivation( new GPUArray<FloatType>(vector<FloatType *>(batchSize, NULL), numOutputs)) { this->numInputs->copyToDevice(); this->numStandartHidden->copyToDevice(); this->numAdaptiveHidden->copyToDevice(); this->numOutputs->copyToDevice(); this->batchSize->copyToDevice(); this->numSimulationTimesteps->copyToDevice(); this->errorMode->copyToDevice(); this->timeStepLength->copyToDevice(); this->spikeThreshold->copyToDevice(); this->refactoryPeriod->copyToDevice(); this->adaptationDecayFactor->copyToDevice(); this->thresholdIncreaseConstant->copyToDevice(); this->hiddenDecayFactor->copyToDevice(); this->readoutDecayFactor->copyToDevice(); this->targetFiringRate->copyToDevice(); this->firingRateScallingFactor->copyToDevice(); this->derivativeDumpingFactor->copyToDevice(); this->feedbackWeights->copyToDevice(); this->firingRates->copyToDevice(); unsigned memory = 0; memory += this->useBackPropagation->globalMemoryConsumption(); memory += this->numInputs->globalMemoryConsumption(); memory += this->numStandartHidden->globalMemoryConsumption(); memory += this->numAdaptiveHidden->globalMemoryConsumption(); memory += this->numOutputs->globalMemoryConsumption(); memory += this->batchSize->globalMemoryConsumption(); memory += this->numSimulationTimesteps->globalMemoryConsumption(); memory += this->startTime->globalMemoryConsumption(); memory += this->endTime->globalMemoryConsumption(); memory += this->errorMode->globalMemoryConsumption(); memory += this->timeStepLength->globalMemoryConsumption(); memory += this->spikeThreshold->globalMemoryConsumption(); memory += this->refactoryPeriod->globalMemoryConsumption(); memory += this->adaptationDecayFactor->globalMemoryConsumption(); memory += this->thresholdIncreaseConstant->globalMemoryConsumption(); memory += this->hiddenDecayFactor->globalMemoryConsumption(); memory += this->readoutDecayFactor->globalMemoryConsumption(); memory += this->targetFiringRate->globalMemoryConsumption(); memory += this->firingRateScallingFactor->globalMemoryConsumption(); memory += this->derivativeDumpingFactor->globalMemoryConsumption(); memory += this->inputSpikesOverTime->globalMemoryConsumption(); memory += this->spikesOverTime->globalMemoryConsumption(); memory += this->firingRates->globalMemoryConsumption(); memory += this->numSpikes->globalMemoryConsumption(); memory += this->inputWeights->globalMemoryConsumption(); memory += this->hiddenWeights->globalMemoryConsumption(); memory += this->outputWeights->globalMemoryConsumption(); memory += this->feedbackWeights->globalMemoryConsumption(); memory += this->targetWeights->globalMemoryConsumption(); memory += this->targetsOverTime->globalMemoryConsumption(); memory += this->outputsOverTime->globalMemoryConsumption(); memory += this->outputErrorsOverTime->globalMemoryConsumption(); memory += this->derivativesOverTime->globalMemoryConsumption(); memory += this->oldDerivativesOverTime->globalMemoryConsumption(); memory += this->voltageOverTime->globalMemoryConsumption(); memory += this->timeStepsSinceLastSpikeOverTime->globalMemoryConsumption(); memory += this->thresholdAdaptationOverTime->globalMemoryConsumption(); memory += this->errorMaskOverTime->globalMemoryConsumption(); memory += this->outputErrorFactorOverTime->globalMemoryConsumption(); memory += this->inputGradients->globalMemoryConsumption(); memory += this->inputFiringRateGradients->globalMemoryConsumption(); memory += this->hiddenGradients->globalMemoryConsumption(); memory += this->hiddenFiringRateGradients->globalMemoryConsumption(); memory += this->leakyReadoutGradients->globalMemoryConsumption(); memory += this->networkError->globalMemoryConsumption(); memory += this->networkTargets->globalMemoryConsumption(); memory += this->networkSquaredTargets->globalMemoryConsumption(); memory += this->summedValues->globalMemoryConsumption(); memory += this->classificationAccuracy->globalMemoryConsumption(); memory += this->classificationSamples->globalMemoryConsumption(); memory += this->filteredEligibilityTraces->globalMemoryConsumption(); memory += this->filteredSpikes->globalMemoryConsumption(); memory += this->readoutDecayFilteredSpikes->globalMemoryConsumption(); memory += this->thresholdAdaptation->globalMemoryConsumption(); memory += this->adaptionEligibility->globalMemoryConsumption(); memory += this->derivatives->globalMemoryConsumption(); memory += this->I->globalMemoryConsumption(); memory += this->v->globalMemoryConsumption(); memory += this->hiddenSpikes->globalMemoryConsumption(); memory += this->timeStepsSinceLastSpike->globalMemoryConsumption(); memory += this->learnSignals->globalMemoryConsumption(); memory += this->deltaErrorsVoltage->globalMemoryConsumption(); memory += this->deltaErrorsAdaption->globalMemoryConsumption(); memory += this->inputErrorsOverTime->globalMemoryConsumption(); memory += this->allInputErrorsOverTime->globalMemoryConsumption(); memory += this->filteredOutputErrors->globalMemoryConsumption(); memory += this->summedActivation->globalMemoryConsumption(); log_str("globalMemoryConsumption: " + itoa(memory), LOG_I); } /* destructor */ LongShortTermMemoryKernelCaller::~LongShortTermMemoryKernelCaller() { delete[] backPropagation; delete[] starttime; delete[] endtime; delete[] batchErrors; delete[] classificationAccuracyCPU; delete[] classificationSamplesCPU; delete useBackPropagation; delete numInputs; delete numStandartHidden; delete numAdaptiveHidden; delete numOutputs; delete batchSize; delete numSimulationTimesteps; delete startTime; delete endTime; delete errorMode; delete timeStepLength; delete spikeThreshold; delete refactoryPeriod; delete hiddenDecayFactor; delete readoutDecayFactor; delete adaptationDecayFactor; delete thresholdIncreaseConstant; delete targetFiringRate; delete firingRateScallingFactor; delete derivativeDumpingFactor; delete inputSpikesOverTime; delete spikesOverTime; delete firingRates; delete numSpikes; delete inputWeights; delete hiddenWeights; delete outputWeights; delete feedbackWeights; delete targetWeights; delete targetsOverTime; delete outputsOverTime; delete outputErrorsOverTime; delete derivativesOverTime; delete oldDerivativesOverTime; delete voltageOverTime; delete timeStepsSinceLastSpikeOverTime; delete thresholdAdaptationOverTime; delete errorMaskOverTime; delete outputErrorFactorOverTime; delete inputGradients; delete inputFiringRateGradients; delete hiddenGradients; delete hiddenFiringRateGradients; delete leakyReadoutGradients; delete networkError; delete networkTargets; delete networkSquaredTargets; delete summedValues; delete classificationAccuracy; delete classificationSamples; delete filteredEligibilityTraces; delete filteredSpikes; delete readoutDecayFilteredSpikes; delete thresholdAdaptation; delete adaptionEligibility; delete derivatives; delete I; delete v; delete hiddenSpikes; delete timeStepsSinceLastSpike; delete learnSignals; delete deltaErrorsVoltage; delete deltaErrorsAdaption; delete inputErrorsOverTime; delete allInputErrorsOverTime; delete filteredOutputErrors; delete summedActivation; } /* runs the kernel of this (blocks untill finished) */ void LongShortTermMemoryKernelCaller::runAndWait( int backPropagation, bool inputErrors, int starttime, int endtime ) { static uint64_t cpuTime = 0; static uint64_t gpuTime = 0; static uint64_t t = gettime_usec(); this->starttime[0] = starttime; this->endtime[0] = endtime; cpuTime += gettime_usec() - t; t = gettime_usec(); this->backPropagation[0] = backPropagation; useBackPropagation->copyToDevice(); targetWeights->copyToDevice(); inputWeights->copyToDevice(); hiddenWeights->copyToDevice(); outputWeights->copyToDevice(); startTime->copyToDevice(); endTime->copyToDevice(); if (starttime == 0 && endtime == int(numTimeSteps)) { inputSpikesOverTime->copyToDevice(); targetsOverTime->copyToDevice(); errorMaskOverTime->copyToDevice(); outputErrorFactorOverTime->copyToDevice(); outputErrorsOverTime->copyToDevice(); } else { inputSpikesOverTime->copyToDevice( (starttime % numTimeSteps) * numInputNeurons, (endtime - starttime) * numInputNeurons ); targetsOverTime->copyToDevice( (starttime % numTimeSteps) * numOutputNeurons, (endtime - starttime) * numOutputNeurons ); errorMaskOverTime->copyToDevice( starttime % numTimeSteps, endtime - starttime ); outputErrorFactorOverTime->copyToDevice( (starttime % numTimeSteps) * numOutputNeurons, (endtime - starttime) * numOutputNeurons ); outputErrorsOverTime->copyToDevice( (starttime % numTimeSteps) * numOutputNeurons, (endtime - starttime) * numOutputNeurons ); } log_str("launching longShortTermMemoryKernel<<<" + itoa(numBlocks) + ", " + itoa(numThreads) + ">>>", LOG_DD); longShortTermMemoryKernel<<<numBlocks, numThreads>>>( (int *) useBackPropagation->d_ptr(), (unsigned *) numInputs->d_ptr(), (unsigned *) numStandartHidden->d_ptr(), (unsigned *) numAdaptiveHidden->d_ptr(), (unsigned *) numOutputs->d_ptr(), (unsigned *) numSimulationTimesteps->d_ptr(), (int *) startTime->d_ptr(), (int *) endTime->d_ptr(), (unsigned *) errorMode->d_ptr(), (FloatType *) timeStepLength->d_ptr(), (FloatType *) spikeThreshold->d_ptr(), (FloatType *) refactoryPeriod->d_ptr(), (FloatType *) hiddenDecayFactor->d_ptr(), (FloatType *) readoutDecayFactor->d_ptr(), (FloatType *) adaptationDecayFactor->d_ptr(), (FloatType *) thresholdIncreaseConstant->d_ptr(), (FloatType *) targetFiringRate->d_ptr(), (FloatType *) firingRateScallingFactor->d_ptr(), (FloatType *) derivativeDumpingFactor->d_ptr(), (FloatType *) inputSpikesOverTime->d_ptr(), (FloatType *) spikesOverTime->d_ptr(), (FloatType *) firingRates->d_ptr(), (FloatType *) numSpikes->d_ptr(), (FloatType *) inputWeights->d_ptr(), (FloatType *) hiddenWeights->d_ptr(), (FloatType *) outputWeights->d_ptr(), (FloatType *) feedbackWeights->d_ptr(), (FloatType *) targetWeights->d_ptr(), (FloatType *) targetsOverTime->d_ptr(), (FloatType *) outputsOverTime->d_ptr(), (FloatType *) outputErrorsOverTime->d_ptr(), (FloatType *) derivativesOverTime->d_ptr(), (FloatType *) oldDerivativesOverTime->d_ptr(), (FloatType *) voltageOverTime->d_ptr(), (FloatType *) timeStepsSinceLastSpikeOverTime->d_ptr(), (FloatType *) thresholdAdaptationOverTime->d_ptr(), (FloatType *) errorMaskOverTime->d_ptr(), (FloatType *) outputErrorFactorOverTime->d_ptr(), (FloatType *) inputGradients->d_ptr(), (FloatType *) inputFiringRateGradients->d_ptr(), (FloatType *) hiddenGradients->d_ptr(), (FloatType *) hiddenFiringRateGradients->d_ptr(), (FloatType *) leakyReadoutGradients->d_ptr(), (FloatType *) networkError->d_ptr(), (FloatType *) networkTargets->d_ptr(), (FloatType *) networkSquaredTargets->d_ptr(), (FloatType *) summedValues->d_ptr(), (FloatType *) classificationAccuracy->d_ptr(), (FloatType *) classificationSamples->d_ptr(), (FloatType *) filteredEligibilityTraces->d_ptr(), (FloatType *) filteredSpikes->d_ptr(), (FloatType *) readoutDecayFilteredSpikes->d_ptr(), (FloatType *) thresholdAdaptation->d_ptr(), (FloatType *) adaptionEligibility->d_ptr(), (FloatType *) derivatives->d_ptr(), (FloatType *) I->d_ptr(), (FloatType *) v->d_ptr(), (FloatType *) hiddenSpikes->d_ptr(), (FloatType *) timeStepsSinceLastSpike->d_ptr(), (FloatType *) learnSignals->d_ptr(), (FloatType *) deltaErrorsVoltage->d_ptr(), (FloatType *) deltaErrorsAdaption->d_ptr(), (FloatType *) inputErrorsOverTime->d_ptr(), (FloatType *) allInputErrorsOverTime->d_ptr(), (FloatType *) filteredOutputErrors->d_ptr(), (FloatType *) summedActivation->d_ptr() ); if (backPropagation == BACKPROPAGATION_OFF || backPropagation == BACKPROPAGATION_FULL || backPropagation == BACKPROPAGATION_BACKWARD) { // TODO parametrisize blocks and threads if (numBlocks > 1 || inputErrors) { longShortTermMemoryGradientCollectionKernel<<<16, 1024>>>( (unsigned *) numInputs->d_ptr(), (unsigned *) numStandartHidden->d_ptr(), (unsigned *) numAdaptiveHidden->d_ptr(), (unsigned *) numOutputs->d_ptr(), (unsigned *) batchSize->d_ptr(), (unsigned *) numSimulationTimesteps->d_ptr(), (FloatType *) inputGradients->d_ptr(), (FloatType *) inputFiringRateGradients->d_ptr(), (FloatType *) hiddenGradients->d_ptr(), (FloatType *) hiddenFiringRateGradients->d_ptr(), (FloatType *) leakyReadoutGradients->d_ptr(), (FloatType *) inputErrorsOverTime->d_ptr(), (FloatType *) allInputErrorsOverTime->d_ptr() ); } longShortTermMemoryFiringRateKernel<<<1, numHiddenNeurons>>>( (unsigned *) numInputs->d_ptr(), (unsigned *) numStandartHidden->d_ptr(), (unsigned *) numAdaptiveHidden->d_ptr(), (unsigned *) batchSize->d_ptr(), (unsigned *) numSimulationTimesteps->d_ptr(), (FloatType *) timeStepLength->d_ptr(), (FloatType *) firingRates->d_ptr(), (FloatType *) numSpikes->d_ptr() ); } if (backPropagation != BACKPROPAGATION_FORWARD) { inputGradients->copyToHost(0); inputFiringRateGradients->copyToHost(0); hiddenGradients->copyToHost(0); hiddenFiringRateGradients->copyToHost(0); leakyReadoutGradients->copyToHost(0); } firingRates->copyToHost(); networkError->copyToHost(); networkTargets->copyToHost(); networkSquaredTargets->copyToHost(); summedValues->copyToHost(); classificationAccuracy->copyToHost(); classificationSamples->copyToHost(); if (starttime == 0 && endtime == int(numTimeSteps)) { outputsOverTime->copyToHost(); spikesOverTime->copyToHost(); } else { outputsOverTime->copyToHost( (starttime % numTimeSteps) * numOutputNeurons, (endtime - starttime) * numOutputNeurons ); spikesOverTime->copyToHost( (starttime % numTimeSteps) * (numInputNeurons + numHiddenNeurons), (endtime - starttime) * (numInputNeurons + numHiddenNeurons) ); } if (inputErrors) { if (starttime == 0 && endtime == int(numTimeSteps)) { inputErrorsOverTime->copyToHost(); allInputErrorsOverTime->copyToHost(); } else { inputErrorsOverTime->copyToHost( (starttime % numTimeSteps) * numInputNeurons, (endtime - starttime) * numInputNeurons ); allInputErrorsOverTime->copyToHost( (starttime % numTimeSteps) * numInputNeurons, (endtime - starttime) * numInputNeurons ); } } int error = cudaGetLastError(); if (error) log_err("LongShortTermMemoryKernel failed: " + itoa(error), LOG_EE); gpuTime += gettime_usec() - t; t = gettime_usec(); log_str("CPU time: " + ftoa(cpuTime / 1000000.0) + ", GPU time: " + ftoa(gpuTime / 1000000.0), LOG_D); } /* returns the networks spuared error for the last run*/ FloatType LongShortTermMemoryKernelCaller::getSampleSquaredSummedError(unsigned batch) { return batchErrors[batch]; } FloatType LongShortTermMemoryKernelCaller::getSquaredSummedError() { FloatType error = 0; for (unsigned i = 0; i < numBlocks; i++) error += batchErrors[i]; return error; } /* returns the networks summed target for the last run */ FloatType LongShortTermMemoryKernelCaller::getSampleSummedTarget(unsigned batch) { return summedTargets[batch]; } FloatType LongShortTermMemoryKernelCaller::getSummedTarget() { FloatType error = 0; for (unsigned i = 0; i < numBlocks; i++) error += summedTargets[i]; return error; } /* returns the networks squared summed target for the last run */ FloatType LongShortTermMemoryKernelCaller::getSquaredSummedTarget() { FloatType error = 0; for (unsigned i = 0; i < numBlocks; i++) error += squaredSummedTargets[i]; return error; } /* returns the the number of summed values for the last run */ FloatType LongShortTermMemoryKernelCaller::getSampleNumSummedValues(unsigned batch) { return numSummedValues[batch]; } FloatType LongShortTermMemoryKernelCaller::getNumSummedValues() { FloatType error = 0; for (unsigned i = 0; i < numBlocks; i++) error += numSummedValues[i]; return error; } /* returns the networks classification accuracy */ FloatType LongShortTermMemoryKernelCaller::getAccuracy() { FloatType accuracy = 0; FloatType samples = 0; for (unsigned i = 0; i < numBlocks; i++) { accuracy += classificationAccuracyCPU[i]; samples += classificationSamplesCPU[i]; } return accuracy / samples; } /* reload feedback weights and other "not changing" values into device */ void LongShortTermMemoryKernelCaller::reload() { firingRates->copyToDevice(); feedbackWeights->copyToDevice(); } /* sets the current active device */ void LongShortTermMemoryKernelCaller::setDevice(int device) { cudaSetDevice(device); }
9c491fe079b603c9d57c52a4778146c5e3120079.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include <cstring> #include <cstdlib> #include <fstream> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime_api.h> #include <omp.h> #include "mpi.h" #include "timer.h" #define NODES 2 #ifdef ENABLE_MULTI_MSG #define MAX_COMM 3000 #else #define MAX_COMM 1 #endif #ifdef ENABLE_HEAVY #define MSG_SIZE 5500000 //! 22MB #else #define MSG_SIZE 100 //! 25xINT #endif #define BLOCKING_COMM_MODE 0 #define NONBLOCKING_COMM_MODE 1 #define NONBLOCKING_SYNC_COMM_MODE 2 /** * Verify both gpu and cpu buffers. * Each element is initialized with the corresponding index. */ __global__ void verifyGPUBuffers(int *send_buffer, int *reduce) { for (int i = 0; i < MSG_SIZE; i++) { if (send_buffer[i] != i) { printf("%d is failed to verified; %d\n", i, send_buffer[i]); *reduce += 1; break; } } if (*reduce > 0) { printf("verifying failed\n"); } } void verifyRecvedBuffers(int *send_buffer, int *reduce) { for (int i = 0; i < MSG_SIZE; i++) { if (send_buffer[i] != i) { printf("%d is failed to verified; %d\n", i, send_buffer[i]); *reduce += 1; break; } } if (*reduce > 0) { printf("verifying failed\n"); } } //! Initialize buffer. Each element is initialized with its index. void initializeBuffers(int *send_buffer) { for (int i = 0; i < MSG_SIZE; i++) { send_buffer[i] = i; } } //! Print buffer. void printBuffer(int *buffer) { for (int i = 0; i < MSG_SIZE; i++) { printf("\tbuffer[%d] = %d ", i, buffer[i]); } printf("\n"); } void run(int *buffer, int rank) { if (rank == 0) { ///< Rank0 node. printf("RANK 0: Initialize msg..\n"); initializeBuffers(buffer); printf("RANK 0: Send msg..\n"); #ifdef PRINT_BUFFER printf("RANK 0: Printing send-msg %d-th\n", i); printBuffer(buffer); #endif //! Send the msg one by one. for (int neigh = 1; neigh < NODES; neigh++) { #ifdef PARALLEL_MSG_MODE #pragma omp parallel for #endif for (int i = 0; i < MAX_COMM; i++) { printf("RANK 0: Sending msg %d-th\n", i); #if (COMM_MODE != BLOCKING_COMM_MODE) MPI_Request req; MPI_Status stat; #endif startTimer(); #if (COMM_MODE == BLOCKING_COMM_MODE) MPI_Send(buffer, MSG_SIZE, MPI_INT, neigh, i, MPI_COMM_WORLD); #elif (COMM_MODE == NONBLOCKING_COMM_MODE) MPI_Isend(buffer, MSG_SIZE, MPI_INT, neigh, i, MPI_COMM_WORLD, &req); #elif (COMM_MODE == NONBLOCKING_SYNC_COMM_MODE) MPI_Issend(buffer, MSG_SIZE, MPI_INT, neigh, i, MPI_COMM_WORLD, &req); #endif printf("RANK 0: Sending msg %d-th to %d: done\n", i, neigh); #if (COMM_MODE != BLOCKING_COMM_MODE) MPI_Wait(&req, &stat); #endif stopTimer(); } } printf("RANK 0: Rank 0 is done\n"); } else { ///< Not rank0 nodes. //! Initialize receiver-side buffers. memset(buffer, 0, sizeof(int)*MSG_SIZE); #ifdef PRINT_BUFFER printf("RANK %d: Print recv buffer before recving\n", rank); printBuffer(buffer); #endif int reduce; //! Receive the msg one by one. for (int i = 0; i < MAX_COMM; i++) { reduce = 0; printf("RANK %d: Tries to recv %d-th msg (size: %d)\n", rank, i, MSG_SIZE); startTimer(); #if COMM_MODE == BLOCKING_COMM_MODE MPI_Recv(buffer, MSG_SIZE, MPI_INT, 0, MPI_ANY_TAG, MPI_COMM_WORLD, MPI_STATUS_IGNORE); #else MPI_Request req; MPI_Status stat; MPI_Irecv(buffer, MSG_SIZE, MPI_INT, 0, MPI_ANY_TAG, MPI_COMM_WORLD, &req); MPI_Wait(&req, &stat); #endif stopTimer(); #ifdef PRINT_BUFFER printf("RANK %d: Print recv %d-th msg\n", rank, i); printBuffer(buffer); #endif printf("Starts to verifying.. %d-th msg\n", i); verifyRecvedBuffers(buffer, &reduce); printf("Verified done.. %d-th msg\n", i); } printf("RANK %d: Received msg\n", rank); //! Copy to GPU. int* gpu_buffer; hipMalloc((void **)&gpu_buffer, sizeof(int)*MSG_SIZE); hipMemcpy(gpu_buffer, buffer, sizeof(int)*MSG_SIZE, hipMemcpyHostToDevice); printf("RANK %d: Verify the copied data from cpu to gpu..\n", rank); reduce = 0; hipLaunchKernelGGL(( verifyGPUBuffers), dim3(1),dim3(1), 0, 0, gpu_buffer, &reduce); printf("RANK %d: All jobs are done\n", rank); hipDeviceSynchronize(); } double elapsedTime = getTimer(); printf("Elapsed time: %lf\n", elapsedTime); } int main(int argc, char** argv) { int rank, p; int *buffer; int supportProvided; MPI_Init_thread(&argc, &argv, MPI_THREAD_MULTIPLE, &supportProvided); MPI_Comm_rank(MPI_COMM_WORLD, &rank); MPI_Comm_size(MPI_COMM_WORLD, &p); printf("Welcome to MPI world. %d out of %d processors\n", rank, p); printf("Number of nodes: %d, Number of msgs: %d," "Msg size: %d\n", NODES, MAX_COMM, MSG_SIZE); #ifdef PARALLEL_MSG_MODE omp_set_num_threads(56); printf("Parallel mode is enabled. Used number of threads is 56\n"); #endif //! Initialize buffers. buffer = (int *) malloc(MSG_SIZE*sizeof(int)); run(buffer, rank); MPI_Finalize(); free(buffer); return 0; }
9c491fe079b603c9d57c52a4778146c5e3120079.cu
#include <iostream> #include <cstring> #include <cstdlib> #include <fstream> #include <cuda.h> #include <cuda_runtime.h> #include <cuda_profiler_api.h> #include <omp.h> #include "mpi.h" #include "timer.h" #define NODES 2 #ifdef ENABLE_MULTI_MSG #define MAX_COMM 3000 #else #define MAX_COMM 1 #endif #ifdef ENABLE_HEAVY #define MSG_SIZE 5500000 //! 22MB #else #define MSG_SIZE 100 //! 25xINT #endif #define BLOCKING_COMM_MODE 0 #define NONBLOCKING_COMM_MODE 1 #define NONBLOCKING_SYNC_COMM_MODE 2 /** * Verify both gpu and cpu buffers. * Each element is initialized with the corresponding index. */ __global__ void verifyGPUBuffers(int *send_buffer, int *reduce) { for (int i = 0; i < MSG_SIZE; i++) { if (send_buffer[i] != i) { printf("%d is failed to verified; %d\n", i, send_buffer[i]); *reduce += 1; break; } } if (*reduce > 0) { printf("verifying failed\n"); } } void verifyRecvedBuffers(int *send_buffer, int *reduce) { for (int i = 0; i < MSG_SIZE; i++) { if (send_buffer[i] != i) { printf("%d is failed to verified; %d\n", i, send_buffer[i]); *reduce += 1; break; } } if (*reduce > 0) { printf("verifying failed\n"); } } //! Initialize buffer. Each element is initialized with its index. void initializeBuffers(int *send_buffer) { for (int i = 0; i < MSG_SIZE; i++) { send_buffer[i] = i; } } //! Print buffer. void printBuffer(int *buffer) { for (int i = 0; i < MSG_SIZE; i++) { printf("\tbuffer[%d] = %d ", i, buffer[i]); } printf("\n"); } void run(int *buffer, int rank) { if (rank == 0) { ///< Rank0 node. printf("RANK 0: Initialize msg..\n"); initializeBuffers(buffer); printf("RANK 0: Send msg..\n"); #ifdef PRINT_BUFFER printf("RANK 0: Printing send-msg %d-th\n", i); printBuffer(buffer); #endif //! Send the msg one by one. for (int neigh = 1; neigh < NODES; neigh++) { #ifdef PARALLEL_MSG_MODE #pragma omp parallel for #endif for (int i = 0; i < MAX_COMM; i++) { printf("RANK 0: Sending msg %d-th\n", i); #if (COMM_MODE != BLOCKING_COMM_MODE) MPI_Request req; MPI_Status stat; #endif startTimer(); #if (COMM_MODE == BLOCKING_COMM_MODE) MPI_Send(buffer, MSG_SIZE, MPI_INT, neigh, i, MPI_COMM_WORLD); #elif (COMM_MODE == NONBLOCKING_COMM_MODE) MPI_Isend(buffer, MSG_SIZE, MPI_INT, neigh, i, MPI_COMM_WORLD, &req); #elif (COMM_MODE == NONBLOCKING_SYNC_COMM_MODE) MPI_Issend(buffer, MSG_SIZE, MPI_INT, neigh, i, MPI_COMM_WORLD, &req); #endif printf("RANK 0: Sending msg %d-th to %d: done\n", i, neigh); #if (COMM_MODE != BLOCKING_COMM_MODE) MPI_Wait(&req, &stat); #endif stopTimer(); } } printf("RANK 0: Rank 0 is done\n"); } else { ///< Not rank0 nodes. //! Initialize receiver-side buffers. memset(buffer, 0, sizeof(int)*MSG_SIZE); #ifdef PRINT_BUFFER printf("RANK %d: Print recv buffer before recving\n", rank); printBuffer(buffer); #endif int reduce; //! Receive the msg one by one. for (int i = 0; i < MAX_COMM; i++) { reduce = 0; printf("RANK %d: Tries to recv %d-th msg (size: %d)\n", rank, i, MSG_SIZE); startTimer(); #if COMM_MODE == BLOCKING_COMM_MODE MPI_Recv(buffer, MSG_SIZE, MPI_INT, 0, MPI_ANY_TAG, MPI_COMM_WORLD, MPI_STATUS_IGNORE); #else MPI_Request req; MPI_Status stat; MPI_Irecv(buffer, MSG_SIZE, MPI_INT, 0, MPI_ANY_TAG, MPI_COMM_WORLD, &req); MPI_Wait(&req, &stat); #endif stopTimer(); #ifdef PRINT_BUFFER printf("RANK %d: Print recv %d-th msg\n", rank, i); printBuffer(buffer); #endif printf("Starts to verifying.. %d-th msg\n", i); verifyRecvedBuffers(buffer, &reduce); printf("Verified done.. %d-th msg\n", i); } printf("RANK %d: Received msg\n", rank); //! Copy to GPU. int* gpu_buffer; cudaMalloc((void **)&gpu_buffer, sizeof(int)*MSG_SIZE); cudaMemcpy(gpu_buffer, buffer, sizeof(int)*MSG_SIZE, cudaMemcpyHostToDevice); printf("RANK %d: Verify the copied data from cpu to gpu..\n", rank); reduce = 0; verifyGPUBuffers<<<1,1>>>(gpu_buffer, &reduce); printf("RANK %d: All jobs are done\n", rank); cudaDeviceSynchronize(); } double elapsedTime = getTimer(); printf("Elapsed time: %lf\n", elapsedTime); } int main(int argc, char** argv) { int rank, p; int *buffer; int supportProvided; MPI_Init_thread(&argc, &argv, MPI_THREAD_MULTIPLE, &supportProvided); MPI_Comm_rank(MPI_COMM_WORLD, &rank); MPI_Comm_size(MPI_COMM_WORLD, &p); printf("Welcome to MPI world. %d out of %d processors\n", rank, p); printf("Number of nodes: %d, Number of msgs: %d," "Msg size: %d\n", NODES, MAX_COMM, MSG_SIZE); #ifdef PARALLEL_MSG_MODE omp_set_num_threads(56); printf("Parallel mode is enabled. Used number of threads is 56\n"); #endif //! Initialize buffers. buffer = (int *) malloc(MSG_SIZE*sizeof(int)); run(buffer, rank); MPI_Finalize(); free(buffer); return 0; }
96a79cec913044b5e5de48488a643ae2a251d994.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #define COALESCED_NUM 16 #define blockDimX 512 #define blockDimY 1 #define gridDimX (gridDim.x) #define gridDimY (gridDim.y) #define idx (blockIdx.x*blockDimX+threadIdx.x) #define idy (blockIdx.y*blockDimY+threadIdx.y) #define bidy (blockIdx.y) #define bidx (blockIdx.x) #define tidx (threadIdx.x) #define tidy (threadIdx.y) #define merger_y 1 #define coalesced_idy (bidy/(COALESCED_NUM/(merger_y*blockDimY))*COALESCED_NUM) #define A(y,x) A[(y)*WIDTH_A+(x)] #define B(y,x) B[(y)*WIDTH_B+(x)] #define C(y,x) C[(y)*WIDTH_C+(x)] #define WIDTH_C 2048 #define WIDTH_B 16 #define WIDTH_A (2048+16) __global__ void conv(float * A, float * B, float * C, int width, int height, int w, int h) { __shared__ float shared_1[16]; __shared__ float shared_0[528]; int j; float sum = 0; for (j=0; j<h; j=(j+1)) { int it_2; if ((tidx<16)) { shared_0[(tidx+0)]=A(((idy+(( - 1)*j))+h), (idx+(( - 1)*0))); } shared_0[(tidx+16)]=A(((idy+(( - 1)*j))+h), ((idx+(( - 1)*0))+16)); __syncthreads(); if ((tidx<16)) { shared_1[(tidx+0)]=B(j, (0+tidx)); } __syncthreads(); #pragma unroll for (it_2=0; it_2<16; it_2=(it_2+1)) { float a; float b; a=shared_0[((tidx+(( - 1)*(it_2+0)))+16)]; b=shared_1[it_2]; sum+=(a*b); } __syncthreads(); __syncthreads(); } { C(idy, idx)=sum; } }
96a79cec913044b5e5de48488a643ae2a251d994.cu
#define COALESCED_NUM 16 #define blockDimX 512 #define blockDimY 1 #define gridDimX (gridDim.x) #define gridDimY (gridDim.y) #define idx (blockIdx.x*blockDimX+threadIdx.x) #define idy (blockIdx.y*blockDimY+threadIdx.y) #define bidy (blockIdx.y) #define bidx (blockIdx.x) #define tidx (threadIdx.x) #define tidy (threadIdx.y) #define merger_y 1 #define coalesced_idy (bidy/(COALESCED_NUM/(merger_y*blockDimY))*COALESCED_NUM) #define A(y,x) A[(y)*WIDTH_A+(x)] #define B(y,x) B[(y)*WIDTH_B+(x)] #define C(y,x) C[(y)*WIDTH_C+(x)] #define WIDTH_C 2048 #define WIDTH_B 16 #define WIDTH_A (2048+16) __global__ void conv(float * A, float * B, float * C, int width, int height, int w, int h) { __shared__ float shared_1[16]; __shared__ float shared_0[528]; int j; float sum = 0; for (j=0; j<h; j=(j+1)) { int it_2; if ((tidx<16)) { shared_0[(tidx+0)]=A(((idy+(( - 1)*j))+h), (idx+(( - 1)*0))); } shared_0[(tidx+16)]=A(((idy+(( - 1)*j))+h), ((idx+(( - 1)*0))+16)); __syncthreads(); if ((tidx<16)) { shared_1[(tidx+0)]=B(j, (0+tidx)); } __syncthreads(); #pragma unroll for (it_2=0; it_2<16; it_2=(it_2+1)) { float a; float b; a=shared_0[((tidx+(( - 1)*(it_2+0)))+16)]; b=shared_1[it_2]; sum+=(a*b); } __syncthreads(); __syncthreads(); } { C(idy, idx)=sum; } }
39bfc457eea706155df9791b38e4117ee3f87370.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Homework 2 // Image Blurring // // In this homework we are blurring an image. To do this, imagine that we have // a square array of weight values. For each pixel in the image, imagine that we // overlay this square array of weights on top of the image such that the center // of the weight array is aligned with the current pixel. To compute a blurred // pixel value, we multiply each pair of numbers that line up. In other words, we // multiply each weight with the pixel underneath it. Finally, we add up all of the // multiplied numbers and assign that value to our output for the current pixel. // We repeat this process for all the pixels in the image. // To help get you started, we have included some useful notes here. //**************************************************************************** // For a color image that has multiple channels, we suggest separating // the different color channels so that each color is stored contiguously // instead of being interleaved. This will simplify your code. // That is instead of RGBARGBARGBARGBA... we suggest transforming to three // arrays (as in the previous homework we ignore the alpha channel again): // 1) RRRRRRRR... // 2) GGGGGGGG... // 3) BBBBBBBB... // // The original layout is known an Array of Structures (AoS) whereas the // format we are converting to is known as a Structure of Arrays (SoA). // As a warm-up, we will ask you to write the kernel that performs this // separation. You should then write the "meat" of the assignment, // which is the kernel that performs the actual blur. We provide code that // re-combines your blurred results for each color channel. //**************************************************************************** // You must fill in the gaussian_blur kernel to perform the blurring of the // inputChannel, using the array of weights, and put the result in the outputChannel. // Here is an example of computing a blur, using a weighted average, for a single // pixel in a small image. // // Array of weights: // // 0.0 0.2 0.0 // 0.2 0.2 0.2 // 0.0 0.2 0.0 // // Image (note that we align the array of weights to the center of the box): // // 1 2 5 2 0 3 // ------- // 3 |2 5 1| 6 0 0.0*2 + 0.2*5 + 0.0*1 + // | | // 4 |3 6 2| 1 4 -> 0.2*3 + 0.2*6 + 0.2*2 + -> 3.2 // | | // 0 |4 0 3| 4 2 0.0*4 + 0.2*0 + 0.0*3 // ------- // 9 6 5 0 3 9 // // (1) (2) (3) // // A good starting place is to map each thread to a pixel as you have before. // Then every thread can perform steps 2 and 3 in the diagram above // completely independently of one another. // Note that the array of weights is square, so its height is the same as its width. // We refer to the array of weights as a filter, and we refer to its width with the // variable filterWidth. //**************************************************************************** // Your homework submission will be evaluated based on correctness and speed. // We test each pixel against a reference solution. If any pixel differs by // more than some small threshold value, the system will tell you that your // solution is incorrect, and it will let you try again. // Once you have gotten that working correctly, then you can think about using // shared memory and having the threads cooperate to achieve better performance. //**************************************************************************** // Also note that we've supplied a helpful debugging function called checkCudaErrors. // You should wrap your allocation and copying statements like we've done in the // code we're supplying you. Here is an example of the unsafe way to allocate // memory on the GPU: // // hipMalloc(&d_red, sizeof(unsigned char) * numRows * numCols); // // Here is an example of the safe way to do the same thing: // // checkCudaErrors(hipMalloc(&d_red, sizeof(unsigned char) * numRows * numCols)); // // Writing code the safe way requires slightly more typing, but is very helpful for // catching mistakes. If you write code the unsafe way and you make a mistake, then // any subsequent kernels won't compute anything, and it will be hard to figure out // why. Writing code the safe way will inform you as soon as you make a mistake. // Finally, remember to free the memory you allocate at the end of the function. //**************************************************************************** #include "utils.h" #include <iostream> #define BLOCK_SIZE 32 __device__ int d_min(int a, int b) { return a > b?b:a; } __device__ int d_max(int a, int b) { return a > b?a:b; } __global__ void naive_gaussian_blur(const unsigned char* const inputChannel, unsigned char* const outputChannel, int numRows, int numCols, const float* const filter, const int filterWidth) { // NOTE: Be sure to compute any intermediate results in floating point // before storing the final result as unsigned char. // NOTE: Be careful not to try to access memory that is outside the bounds of // the image. You'll want code that performs the following check before accessing // GPU memory: // // if ( absolute_image_position_x >= numCols || // absolute_image_position_y >= numRows ) // { // return; // } // NOTE: If a thread's absolute position 2D position is within the image, but some of // its neighbors are outside the image, then you will need to be extra careful. Instead // of trying to read such a neighbor value from GPU memory (which won't work because // the value is out of bounds), you should explicitly clamp the neighbor values you read // to be within the bounds of the image. If this is not clear to you, then please refer // to sequential reference solution for the exact clamping semantics you should follow. int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (x < numCols && y < numRows) { float out_val = 0.0f; for (int f_idx = 0; f_idx < filterWidth*filterWidth; ++f_idx) { // clamp the boundaries, initially i tried to set them to zero, but reference does the clamp int neighbor_pix_x = d_min(d_max(x - (filterWidth/2) + (f_idx % filterWidth),0),numCols-1); int neighbor_pix_y = d_min(d_max(y - (filterWidth/2) + (f_idx / filterWidth),0),numRows-1); out_val += filter[f_idx] * inputChannel[neighbor_pix_x + numCols*neighbor_pix_y]; } outputChannel[x + numCols*y] = (unsigned char)out_val; } } __global__ void shared_mem_gaussian_blur(const unsigned char* const inputChannel, unsigned char* const outputChannel, int numRows, int numCols, const float* const filter, const int filterWidth) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (x < numCols && y < numRows) { extern __shared__ unsigned char image_blk[]; image_blk[threadIdx.x + threadIdx.y * blockDim.x] = inputChannel[x + numCols*y]; __syncthreads(); float out_val = 0.0f; int left_x = blockIdx.x * blockDim.x; int top_y = blockIdx.y * blockDim.y; int right_x = (blockIdx.x + 1) * blockDim.x; int bottom_y = (blockIdx.y + 1) * blockDim.y; for (int f_idx = 0; f_idx < filterWidth*filterWidth; ++f_idx) { // clamp the boundaries, initially i tried to set them to zero, but reference does the clamp int neighbor_pix_x = d_min(d_max(x - (filterWidth/2) + (f_idx % filterWidth),0),numCols-1); int neighbor_pix_y = d_min(d_max(y - (filterWidth/2) + (f_idx / filterWidth),0),numRows-1); if (neighbor_pix_x >= left_x && neighbor_pix_x < right_x && neighbor_pix_y >= top_y && neighbor_pix_y < bottom_y) { out_val += filter[f_idx] * image_blk[neighbor_pix_x - left_x + BLOCK_SIZE*(neighbor_pix_y - top_y)]; } else { out_val += filter[f_idx] * inputChannel[neighbor_pix_x + numCols*neighbor_pix_y]; } } outputChannel[x + numCols*y] = (unsigned char)out_val; } } //This kernel takes in an image represented as a uchar4 and splits //it into three images consisting of only one color channel each __global__ void separateChannels(const uchar4* const inputImageRGBA, int numRows, int numCols, unsigned char* const redChannel, unsigned char* const greenChannel, unsigned char* const blueChannel) { // NOTE: Be careful not to try to access memory that is outside the bounds of // the image. You'll want code that performs the following check before accessing // GPU memory: // // if ( absolute_image_position_x >= numCols || // absolute_image_position_y >= numRows ) // { // return; // } int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if ((x < numCols) && (y < numRows)) { int pix = y*numCols + x; //printf("pixel %d\n", pix); redChannel[pix] = inputImageRGBA[pix].x; greenChannel[pix] = inputImageRGBA[pix].y; blueChannel[pix] = inputImageRGBA[pix].z; } } //This kernel takes in three color channels and recombines them //into one image. The alpha channel is set to 255 to represent //that this image has no transparency. __global__ void recombineChannels(const unsigned char* const redChannel, const unsigned char* const greenChannel, const unsigned char* const blueChannel, uchar4* const outputImageRGBA, int numRows, int numCols) { const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x, blockIdx.y * blockDim.y + threadIdx.y); const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x; //make sure we don't try and access memory outside the image //by having any threads mapped there return early if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows) return; unsigned char red = redChannel[thread_1D_pos]; unsigned char green = greenChannel[thread_1D_pos]; unsigned char blue = blueChannel[thread_1D_pos]; //Alpha should be 255 for no transparency uchar4 outputPixel = make_uchar4(red, green, blue, 255); outputImageRGBA[thread_1D_pos] = outputPixel; } unsigned char *d_red, *d_green, *d_blue; float *d_filter; void allocateMemoryAndCopyToGPU(const size_t numRowsImage, const size_t numColsImage, const float* const h_filter, const size_t filterWidth) { //allocate memory for the three different channels //original checkCudaErrors(hipMalloc(&d_red, sizeof(unsigned char) * numRowsImage * numColsImage)); checkCudaErrors(hipMalloc(&d_green, sizeof(unsigned char) * numRowsImage * numColsImage)); checkCudaErrors(hipMalloc(&d_blue, sizeof(unsigned char) * numRowsImage * numColsImage)); //Allocate memory for the filter on the GPU //Use the pointer d_filter that we have already declared for you //You need to allocate memory for the filter with hipMalloc //be sure to use checkCudaErrors like the above examples to //be able to tell if anything goes wrong //IMPORTANT: Notice that we pass a pointer to a pointer to hipMalloc checkCudaErrors(hipMalloc(&d_filter, sizeof(float) * filterWidth * filterWidth)); //Copy the filter on the host (h_filter) to the memory you just allocated //on the GPU. hipMemcpy(dst, src, numBytes, hipMemcpyHostToDevice); //Remember to use checkCudaErrors! checkCudaErrors(hipMemcpy(d_filter, h_filter, sizeof(float) * filterWidth * filterWidth, hipMemcpyHostToDevice)); } void your_gaussian_blur(const uchar4 * const h_inputImageRGBA, uchar4 * const d_inputImageRGBA, uchar4* const d_outputImageRGBA, const size_t numRows, const size_t numCols, unsigned char *d_redBlurred, unsigned char *d_greenBlurred, unsigned char *d_blueBlurred, const int filterWidth) { // Set reasonable block size (i.e., number of threads per block) const dim3 blockSize(BLOCK_SIZE,BLOCK_SIZE,1); //Compute correct grid size (i.e., number of blocks per kernel launch) //from the image size and and block size. const dim3 gridSize((numCols + blockSize.x - 1)/blockSize.x,(numRows + blockSize.y - 1)/blockSize.y,1); // Launch a kernel for separating the RGBA image into different color channels hipLaunchKernelGGL(( separateChannels), dim3(gridSize), dim3(blockSize), 0, 0, d_inputImageRGBA, numRows, numCols, d_red, d_green, d_blue); // Call hipDeviceSynchronize(), then call checkCudaErrors() immediately after // launching your kernel to make sure that you didn't make any mistakes. hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); // Call your convolution kernel here 3 times, once for each color channel. hipLaunchKernelGGL(( shared_mem_gaussian_blur), dim3(gridSize),dim3(blockSize),sizeof(unsigned char)*BLOCK_SIZE * BLOCK_SIZE, 0, d_red, d_redBlurred, numRows, numCols, d_filter, filterWidth); hipLaunchKernelGGL(( shared_mem_gaussian_blur), dim3(gridSize),dim3(blockSize),sizeof(unsigned char)*BLOCK_SIZE * BLOCK_SIZE, 0, d_blue, d_blueBlurred, numRows, numCols, d_filter, filterWidth); hipLaunchKernelGGL(( shared_mem_gaussian_blur), dim3(gridSize),dim3(blockSize),sizeof(unsigned char)*BLOCK_SIZE * BLOCK_SIZE, 0, d_green, d_greenBlurred, numRows, numCols, d_filter, filterWidth); // Again, call hipDeviceSynchronize(), then call checkCudaErrors() immediately after // launching your kernel to make sure that you didn't make any mistakes. hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); // Now we recombine your results. We take care of launching this kernel for you. // // NOTE: This kernel launch depends on the gridSize and blockSize variables, // which you must set yourself. hipLaunchKernelGGL(( recombineChannels), dim3(gridSize), dim3(blockSize), 0, 0, d_redBlurred, d_greenBlurred, d_blueBlurred, d_outputImageRGBA, numRows, numCols); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); } // Free all the memory that we allocated // make sure you free any arrays that you allocated void cleanup() { checkCudaErrors(hipFree(d_red)); checkCudaErrors(hipFree(d_green)); checkCudaErrors(hipFree(d_blue)); checkCudaErrors(hipFree(d_filter)); }
39bfc457eea706155df9791b38e4117ee3f87370.cu
// Homework 2 // Image Blurring // // In this homework we are blurring an image. To do this, imagine that we have // a square array of weight values. For each pixel in the image, imagine that we // overlay this square array of weights on top of the image such that the center // of the weight array is aligned with the current pixel. To compute a blurred // pixel value, we multiply each pair of numbers that line up. In other words, we // multiply each weight with the pixel underneath it. Finally, we add up all of the // multiplied numbers and assign that value to our output for the current pixel. // We repeat this process for all the pixels in the image. // To help get you started, we have included some useful notes here. //**************************************************************************** // For a color image that has multiple channels, we suggest separating // the different color channels so that each color is stored contiguously // instead of being interleaved. This will simplify your code. // That is instead of RGBARGBARGBARGBA... we suggest transforming to three // arrays (as in the previous homework we ignore the alpha channel again): // 1) RRRRRRRR... // 2) GGGGGGGG... // 3) BBBBBBBB... // // The original layout is known an Array of Structures (AoS) whereas the // format we are converting to is known as a Structure of Arrays (SoA). // As a warm-up, we will ask you to write the kernel that performs this // separation. You should then write the "meat" of the assignment, // which is the kernel that performs the actual blur. We provide code that // re-combines your blurred results for each color channel. //**************************************************************************** // You must fill in the gaussian_blur kernel to perform the blurring of the // inputChannel, using the array of weights, and put the result in the outputChannel. // Here is an example of computing a blur, using a weighted average, for a single // pixel in a small image. // // Array of weights: // // 0.0 0.2 0.0 // 0.2 0.2 0.2 // 0.0 0.2 0.0 // // Image (note that we align the array of weights to the center of the box): // // 1 2 5 2 0 3 // ------- // 3 |2 5 1| 6 0 0.0*2 + 0.2*5 + 0.0*1 + // | | // 4 |3 6 2| 1 4 -> 0.2*3 + 0.2*6 + 0.2*2 + -> 3.2 // | | // 0 |4 0 3| 4 2 0.0*4 + 0.2*0 + 0.0*3 // ------- // 9 6 5 0 3 9 // // (1) (2) (3) // // A good starting place is to map each thread to a pixel as you have before. // Then every thread can perform steps 2 and 3 in the diagram above // completely independently of one another. // Note that the array of weights is square, so its height is the same as its width. // We refer to the array of weights as a filter, and we refer to its width with the // variable filterWidth. //**************************************************************************** // Your homework submission will be evaluated based on correctness and speed. // We test each pixel against a reference solution. If any pixel differs by // more than some small threshold value, the system will tell you that your // solution is incorrect, and it will let you try again. // Once you have gotten that working correctly, then you can think about using // shared memory and having the threads cooperate to achieve better performance. //**************************************************************************** // Also note that we've supplied a helpful debugging function called checkCudaErrors. // You should wrap your allocation and copying statements like we've done in the // code we're supplying you. Here is an example of the unsafe way to allocate // memory on the GPU: // // cudaMalloc(&d_red, sizeof(unsigned char) * numRows * numCols); // // Here is an example of the safe way to do the same thing: // // checkCudaErrors(cudaMalloc(&d_red, sizeof(unsigned char) * numRows * numCols)); // // Writing code the safe way requires slightly more typing, but is very helpful for // catching mistakes. If you write code the unsafe way and you make a mistake, then // any subsequent kernels won't compute anything, and it will be hard to figure out // why. Writing code the safe way will inform you as soon as you make a mistake. // Finally, remember to free the memory you allocate at the end of the function. //**************************************************************************** #include "utils.h" #include <iostream> #define BLOCK_SIZE 32 __device__ int d_min(int a, int b) { return a > b?b:a; } __device__ int d_max(int a, int b) { return a > b?a:b; } __global__ void naive_gaussian_blur(const unsigned char* const inputChannel, unsigned char* const outputChannel, int numRows, int numCols, const float* const filter, const int filterWidth) { // NOTE: Be sure to compute any intermediate results in floating point // before storing the final result as unsigned char. // NOTE: Be careful not to try to access memory that is outside the bounds of // the image. You'll want code that performs the following check before accessing // GPU memory: // // if ( absolute_image_position_x >= numCols || // absolute_image_position_y >= numRows ) // { // return; // } // NOTE: If a thread's absolute position 2D position is within the image, but some of // its neighbors are outside the image, then you will need to be extra careful. Instead // of trying to read such a neighbor value from GPU memory (which won't work because // the value is out of bounds), you should explicitly clamp the neighbor values you read // to be within the bounds of the image. If this is not clear to you, then please refer // to sequential reference solution for the exact clamping semantics you should follow. int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (x < numCols && y < numRows) { float out_val = 0.0f; for (int f_idx = 0; f_idx < filterWidth*filterWidth; ++f_idx) { // clamp the boundaries, initially i tried to set them to zero, but reference does the clamp int neighbor_pix_x = d_min(d_max(x - (filterWidth/2) + (f_idx % filterWidth),0),numCols-1); int neighbor_pix_y = d_min(d_max(y - (filterWidth/2) + (f_idx / filterWidth),0),numRows-1); out_val += filter[f_idx] * inputChannel[neighbor_pix_x + numCols*neighbor_pix_y]; } outputChannel[x + numCols*y] = (unsigned char)out_val; } } __global__ void shared_mem_gaussian_blur(const unsigned char* const inputChannel, unsigned char* const outputChannel, int numRows, int numCols, const float* const filter, const int filterWidth) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (x < numCols && y < numRows) { extern __shared__ unsigned char image_blk[]; image_blk[threadIdx.x + threadIdx.y * blockDim.x] = inputChannel[x + numCols*y]; __syncthreads(); float out_val = 0.0f; int left_x = blockIdx.x * blockDim.x; int top_y = blockIdx.y * blockDim.y; int right_x = (blockIdx.x + 1) * blockDim.x; int bottom_y = (blockIdx.y + 1) * blockDim.y; for (int f_idx = 0; f_idx < filterWidth*filterWidth; ++f_idx) { // clamp the boundaries, initially i tried to set them to zero, but reference does the clamp int neighbor_pix_x = d_min(d_max(x - (filterWidth/2) + (f_idx % filterWidth),0),numCols-1); int neighbor_pix_y = d_min(d_max(y - (filterWidth/2) + (f_idx / filterWidth),0),numRows-1); if (neighbor_pix_x >= left_x && neighbor_pix_x < right_x && neighbor_pix_y >= top_y && neighbor_pix_y < bottom_y) { out_val += filter[f_idx] * image_blk[neighbor_pix_x - left_x + BLOCK_SIZE*(neighbor_pix_y - top_y)]; } else { out_val += filter[f_idx] * inputChannel[neighbor_pix_x + numCols*neighbor_pix_y]; } } outputChannel[x + numCols*y] = (unsigned char)out_val; } } //This kernel takes in an image represented as a uchar4 and splits //it into three images consisting of only one color channel each __global__ void separateChannels(const uchar4* const inputImageRGBA, int numRows, int numCols, unsigned char* const redChannel, unsigned char* const greenChannel, unsigned char* const blueChannel) { // NOTE: Be careful not to try to access memory that is outside the bounds of // the image. You'll want code that performs the following check before accessing // GPU memory: // // if ( absolute_image_position_x >= numCols || // absolute_image_position_y >= numRows ) // { // return; // } int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if ((x < numCols) && (y < numRows)) { int pix = y*numCols + x; //printf("pixel %d\n", pix); redChannel[pix] = inputImageRGBA[pix].x; greenChannel[pix] = inputImageRGBA[pix].y; blueChannel[pix] = inputImageRGBA[pix].z; } } //This kernel takes in three color channels and recombines them //into one image. The alpha channel is set to 255 to represent //that this image has no transparency. __global__ void recombineChannels(const unsigned char* const redChannel, const unsigned char* const greenChannel, const unsigned char* const blueChannel, uchar4* const outputImageRGBA, int numRows, int numCols) { const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x, blockIdx.y * blockDim.y + threadIdx.y); const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x; //make sure we don't try and access memory outside the image //by having any threads mapped there return early if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows) return; unsigned char red = redChannel[thread_1D_pos]; unsigned char green = greenChannel[thread_1D_pos]; unsigned char blue = blueChannel[thread_1D_pos]; //Alpha should be 255 for no transparency uchar4 outputPixel = make_uchar4(red, green, blue, 255); outputImageRGBA[thread_1D_pos] = outputPixel; } unsigned char *d_red, *d_green, *d_blue; float *d_filter; void allocateMemoryAndCopyToGPU(const size_t numRowsImage, const size_t numColsImage, const float* const h_filter, const size_t filterWidth) { //allocate memory for the three different channels //original checkCudaErrors(cudaMalloc(&d_red, sizeof(unsigned char) * numRowsImage * numColsImage)); checkCudaErrors(cudaMalloc(&d_green, sizeof(unsigned char) * numRowsImage * numColsImage)); checkCudaErrors(cudaMalloc(&d_blue, sizeof(unsigned char) * numRowsImage * numColsImage)); //Allocate memory for the filter on the GPU //Use the pointer d_filter that we have already declared for you //You need to allocate memory for the filter with cudaMalloc //be sure to use checkCudaErrors like the above examples to //be able to tell if anything goes wrong //IMPORTANT: Notice that we pass a pointer to a pointer to cudaMalloc checkCudaErrors(cudaMalloc(&d_filter, sizeof(float) * filterWidth * filterWidth)); //Copy the filter on the host (h_filter) to the memory you just allocated //on the GPU. cudaMemcpy(dst, src, numBytes, cudaMemcpyHostToDevice); //Remember to use checkCudaErrors! checkCudaErrors(cudaMemcpy(d_filter, h_filter, sizeof(float) * filterWidth * filterWidth, cudaMemcpyHostToDevice)); } void your_gaussian_blur(const uchar4 * const h_inputImageRGBA, uchar4 * const d_inputImageRGBA, uchar4* const d_outputImageRGBA, const size_t numRows, const size_t numCols, unsigned char *d_redBlurred, unsigned char *d_greenBlurred, unsigned char *d_blueBlurred, const int filterWidth) { // Set reasonable block size (i.e., number of threads per block) const dim3 blockSize(BLOCK_SIZE,BLOCK_SIZE,1); //Compute correct grid size (i.e., number of blocks per kernel launch) //from the image size and and block size. const dim3 gridSize((numCols + blockSize.x - 1)/blockSize.x,(numRows + blockSize.y - 1)/blockSize.y,1); // Launch a kernel for separating the RGBA image into different color channels separateChannels<<<gridSize, blockSize>>>(d_inputImageRGBA, numRows, numCols, d_red, d_green, d_blue); // Call cudaDeviceSynchronize(), then call checkCudaErrors() immediately after // launching your kernel to make sure that you didn't make any mistakes. cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); // Call your convolution kernel here 3 times, once for each color channel. shared_mem_gaussian_blur<<<gridSize,blockSize,sizeof(unsigned char)*BLOCK_SIZE * BLOCK_SIZE>>>(d_red, d_redBlurred, numRows, numCols, d_filter, filterWidth); shared_mem_gaussian_blur<<<gridSize,blockSize,sizeof(unsigned char)*BLOCK_SIZE * BLOCK_SIZE>>>(d_blue, d_blueBlurred, numRows, numCols, d_filter, filterWidth); shared_mem_gaussian_blur<<<gridSize,blockSize,sizeof(unsigned char)*BLOCK_SIZE * BLOCK_SIZE>>>(d_green, d_greenBlurred, numRows, numCols, d_filter, filterWidth); // Again, call cudaDeviceSynchronize(), then call checkCudaErrors() immediately after // launching your kernel to make sure that you didn't make any mistakes. cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); // Now we recombine your results. We take care of launching this kernel for you. // // NOTE: This kernel launch depends on the gridSize and blockSize variables, // which you must set yourself. recombineChannels<<<gridSize, blockSize>>>(d_redBlurred, d_greenBlurred, d_blueBlurred, d_outputImageRGBA, numRows, numCols); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); } // Free all the memory that we allocated // make sure you free any arrays that you allocated void cleanup() { checkCudaErrors(cudaFree(d_red)); checkCudaErrors(cudaFree(d_green)); checkCudaErrors(cudaFree(d_blue)); checkCudaErrors(cudaFree(d_filter)); }
c3bba4ba26c97e7ba130df21c4a52b467af65771.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <hip/hip_runtime.h> #include<sys/time.h> __global__ void dkernel(unsigned *vector, unsigned vectorsize,int N,int v2) { unsigned id = blockIdx.x * blockDim.x + threadIdx.x; if(id<vectorsize) vector[id]+=N; int flag=0; for(int g=1;g<=v2;g++){ if(g%2==0) flag++; else flag--; __syncthreads();//barrier here } } #define BLOCKSIZE 1024 int main(int nn, char *str[]) { unsigned long long N = 1024; unsigned *vector, *hvector; unsigned vec[N]; for (int i = 0; i < N; i++) { vec[i] = i; } hipMalloc(&vector, N * sizeof(unsigned)); hipMemcpy(vector, vec, N * sizeof(unsigned), hipMemcpyHostToDevice); hvector = (unsigned *)malloc(N * sizeof(unsigned)); unsigned nblocks = ceil((float)N / BLOCKSIZE); // printf("nblocks = %d\n", nblocks); //here we run the kernel in a loop which runs 1024 times. for(int v1=10000;v1>=1;v1-=1000){ for(int j=1000000;j>=1;j-=10000) { struct timeval tv1, tv2; gettimeofday(&tv1, NULL); for(int i=0;i<j;i++){ hipLaunchKernelGGL(( dkernel), dim3(nblocks), dim3(BLOCKSIZE), 0, 0, vector, N,i,v1); hipMemcpy(hvector, vector, N * sizeof(unsigned), hipMemcpyDeviceToHost);} gettimeofday(&tv2, NULL); printf ("%d %d %f\n",j,v1, (double) (tv2.tv_usec - tv1.tv_usec) / 1000000 + (double) (tv2.tv_sec - tv1.tv_sec)); hipDeviceSynchronize(); } } /*for (unsigned ii = 0; ii < N; ++ii) { printf("%4d ", hvector[ii]); }*/ return 0; }
c3bba4ba26c97e7ba130df21c4a52b467af65771.cu
#include <stdio.h> #include <cuda.h> #include<sys/time.h> __global__ void dkernel(unsigned *vector, unsigned vectorsize,int N,int v2) { unsigned id = blockIdx.x * blockDim.x + threadIdx.x; if(id<vectorsize) vector[id]+=N; int flag=0; for(int g=1;g<=v2;g++){ if(g%2==0) flag++; else flag--; __syncthreads();//barrier here } } #define BLOCKSIZE 1024 int main(int nn, char *str[]) { unsigned long long N = 1024; unsigned *vector, *hvector; unsigned vec[N]; for (int i = 0; i < N; i++) { vec[i] = i; } cudaMalloc(&vector, N * sizeof(unsigned)); cudaMemcpy(vector, vec, N * sizeof(unsigned), cudaMemcpyHostToDevice); hvector = (unsigned *)malloc(N * sizeof(unsigned)); unsigned nblocks = ceil((float)N / BLOCKSIZE); // printf("nblocks = %d\n", nblocks); //here we run the kernel in a loop which runs 1024 times. for(int v1=10000;v1>=1;v1-=1000){ for(int j=1000000;j>=1;j-=10000) { struct timeval tv1, tv2; gettimeofday(&tv1, NULL); for(int i=0;i<j;i++){ dkernel<<<nblocks, BLOCKSIZE>>>(vector, N,i,v1); cudaMemcpy(hvector, vector, N * sizeof(unsigned), cudaMemcpyDeviceToHost);} gettimeofday(&tv2, NULL); printf ("%d %d %f\n",j,v1, (double) (tv2.tv_usec - tv1.tv_usec) / 1000000 + (double) (tv2.tv_sec - tv1.tv_sec)); cudaDeviceSynchronize(); } } /*for (unsigned ii = 0; ii < N; ++ii) { printf("%4d ", hvector[ii]); }*/ return 0; }
7b05453bd8408b23dc1cba206c21fc2983732c96.hip
// !!! This is a file automatically generated by hipify!!! /*M/////////////////////////////////////////////////////////////////////////////////////// // // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. // // By downloading, copying, installing or using the software you agree to this license. // If you do not agree to this license, do not download, install, // copy or use the software. // // // License Agreement // For Open Source Computer Vision Library // // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2009, Willow Garage Inc., all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistribution's of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // * Redistribution's in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // * The name of the copyright holders may not be used to endorse or promote products // derived from this software without specific prior written permission. // // This software is provided by the copyright holders and contributors "as is" and // any express or implied warranties, including, but not limited to, the implied // warranties of merchantability and fitness for a particular purpose are disclaimed. // In no event shall the Intel Corporation or contributors be liable for any direct, // indirect, incidental, special, exemplary, or consequential damages // (including, but not limited to, procurement of substitute goods or services; // loss of use, data, or profits; or business interruption) however caused // and on any theory of liability, whether in contract, strict liability, // or tort (including negligence or otherwise) arising in any way out of // the use of this software, even if advised of the possibility of such damage. // //M*/ #if !defined CUDA_DISABLER #include "column_filter.h" #ifndef OPENCV_TINY_GPU_MODULE namespace filter { template void linearColumn<float, unsigned short>(PtrStepSzb src, PtrStepSzb dst, const float* kernel, int ksize, int anchor, int brd_type, int cc, hipStream_t stream); } #endif #endif /* CUDA_DISABLER */
7b05453bd8408b23dc1cba206c21fc2983732c96.cu
/*M/////////////////////////////////////////////////////////////////////////////////////// // // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. // // By downloading, copying, installing or using the software you agree to this license. // If you do not agree to this license, do not download, install, // copy or use the software. // // // License Agreement // For Open Source Computer Vision Library // // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2009, Willow Garage Inc., all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistribution's of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // * Redistribution's in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // * The name of the copyright holders may not be used to endorse or promote products // derived from this software without specific prior written permission. // // This software is provided by the copyright holders and contributors "as is" and // any express or implied warranties, including, but not limited to, the implied // warranties of merchantability and fitness for a particular purpose are disclaimed. // In no event shall the Intel Corporation or contributors be liable for any direct, // indirect, incidental, special, exemplary, or consequential damages // (including, but not limited to, procurement of substitute goods or services; // loss of use, data, or profits; or business interruption) however caused // and on any theory of liability, whether in contract, strict liability, // or tort (including negligence or otherwise) arising in any way out of // the use of this software, even if advised of the possibility of such damage. // //M*/ #if !defined CUDA_DISABLER #include "column_filter.h" #ifndef OPENCV_TINY_GPU_MODULE namespace filter { template void linearColumn<float, unsigned short>(PtrStepSzb src, PtrStepSzb dst, const float* kernel, int ksize, int anchor, int brd_type, int cc, cudaStream_t stream); } #endif #endif /* CUDA_DISABLER */
5dbb27a073787a9d86f39766875d327ffb0e7b4c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright 2023 CMU, Facebook, LANL, MIT, NVIDIA, and Stanford (alphabetical) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "flexflow/utils/cuda_helper.h" #include "moe.h" void DataLoader::load_input(Task const *task, std::vector<PhysicalRegion> const &regions, Context ctx, Runtime *runtime) { assert(regions.size() == 2); assert(task->regions.size() == 2); SampleIdxs *meta = (SampleIdxs *)task->local_args; TensorAccessorR<float, 3> acc_full_input( regions[0], task->regions[0], FID_DATA, ctx, runtime); TensorAccessorW<float, 3> acc_batch_input(regions[1], task->regions[1], FID_DATA, ctx, runtime, false /*readOutput*/); coord_t batch_size = acc_batch_input.rect.hi[1] - acc_batch_input.rect.lo[1] + 1; coord_t sample_dim = acc_batch_input.rect.hi[0] - acc_batch_input.rect.lo[0] + 1; // FIXME: currently assume continous indices assert(batch_size == meta->num_samples); for (int i = 1; i < batch_size; i++) { assert(meta->idxs[i] == meta->idxs[0] + i); } coord_t start_idx = meta->idxs[0]; float const *input_zc = acc_full_input.ptr + start_idx * sample_dim; hipLaunchKernelGGL(( copy_kernel), dim3(GET_BLOCKS(acc_batch_input.rect.volume())), dim3(CUDA_NUM_THREADS), 0, 0, acc_batch_input.ptr, input_zc, acc_batch_input.rect.volume()); checkCUDA(hipDeviceSynchronize()); } void DataLoader::load_label(Task const *task, std::vector<PhysicalRegion> const &regions, Context ctx, Runtime *runtime) { assert(regions.size() == 2); assert(task->regions.size() == 2); SampleIdxs *meta = (SampleIdxs *)task->local_args; TensorAccessorR<int, LABEL_DIM + 2> acc_full_label( regions[0], task->regions[0], FID_DATA, ctx, runtime); TensorAccessorW<int, LABEL_DIM + 2> acc_batch_label(regions[1], task->regions[1], FID_DATA, ctx, runtime, false /*readOutput*/); coord_t batch_size = acc_batch_label.rect.hi[1] - acc_batch_label.rect.lo[1] + 1; // FIXME: currently assume continous indices assert(batch_size == meta->num_samples); for (int i = 1; i < meta->num_samples; i++) { assert(meta->idxs[i] == meta->idxs[0] + i); } int const *input_zc = acc_full_label.ptr + meta->idxs[0]; hipLaunchKernelGGL(( copy_kernel), dim3(GET_BLOCKS(acc_batch_label.rect.volume())), dim3(CUDA_NUM_THREADS), 0, 0, acc_batch_label.ptr, input_zc, acc_batch_label.rect.volume()); checkCUDA(hipDeviceSynchronize()); }
5dbb27a073787a9d86f39766875d327ffb0e7b4c.cu
/* Copyright 2023 CMU, Facebook, LANL, MIT, NVIDIA, and Stanford (alphabetical) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "flexflow/utils/cuda_helper.h" #include "moe.h" void DataLoader::load_input(Task const *task, std::vector<PhysicalRegion> const &regions, Context ctx, Runtime *runtime) { assert(regions.size() == 2); assert(task->regions.size() == 2); SampleIdxs *meta = (SampleIdxs *)task->local_args; TensorAccessorR<float, 3> acc_full_input( regions[0], task->regions[0], FID_DATA, ctx, runtime); TensorAccessorW<float, 3> acc_batch_input(regions[1], task->regions[1], FID_DATA, ctx, runtime, false /*readOutput*/); coord_t batch_size = acc_batch_input.rect.hi[1] - acc_batch_input.rect.lo[1] + 1; coord_t sample_dim = acc_batch_input.rect.hi[0] - acc_batch_input.rect.lo[0] + 1; // FIXME: currently assume continous indices assert(batch_size == meta->num_samples); for (int i = 1; i < batch_size; i++) { assert(meta->idxs[i] == meta->idxs[0] + i); } coord_t start_idx = meta->idxs[0]; float const *input_zc = acc_full_input.ptr + start_idx * sample_dim; copy_kernel<<<GET_BLOCKS(acc_batch_input.rect.volume()), CUDA_NUM_THREADS>>>( acc_batch_input.ptr, input_zc, acc_batch_input.rect.volume()); checkCUDA(cudaDeviceSynchronize()); } void DataLoader::load_label(Task const *task, std::vector<PhysicalRegion> const &regions, Context ctx, Runtime *runtime) { assert(regions.size() == 2); assert(task->regions.size() == 2); SampleIdxs *meta = (SampleIdxs *)task->local_args; TensorAccessorR<int, LABEL_DIM + 2> acc_full_label( regions[0], task->regions[0], FID_DATA, ctx, runtime); TensorAccessorW<int, LABEL_DIM + 2> acc_batch_label(regions[1], task->regions[1], FID_DATA, ctx, runtime, false /*readOutput*/); coord_t batch_size = acc_batch_label.rect.hi[1] - acc_batch_label.rect.lo[1] + 1; // FIXME: currently assume continous indices assert(batch_size == meta->num_samples); for (int i = 1; i < meta->num_samples; i++) { assert(meta->idxs[i] == meta->idxs[0] + i); } int const *input_zc = acc_full_label.ptr + meta->idxs[0]; copy_kernel<<<GET_BLOCKS(acc_batch_label.rect.volume()), CUDA_NUM_THREADS>>>( acc_batch_label.ptr, input_zc, acc_batch_label.rect.volume()); checkCUDA(cudaDeviceSynchronize()); }
7582903d5d7e879f46f907b5883105ed7b9b9e36.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <cstring> #include <cstdint> #include "include/SHA1_cuda_cracker.cuh" struct block { uint32_t a; uint32_t b; uint32_t c; uint32_t d; uint32_t e; }; __constant__ block DEFAULT_DIGEST_BUFFER = { 0x67452301, 0xEFCDAB89, 0x98BADCFE, 0x10325476, 0xC3D2E1F0 }; __device__ uint32_t leftRotate(uint32_t x, uint32_t n) { return (x << n) | (x >> (32 - n)); } __device__ uint32_t funI(const uint32_t b, const uint32_t c, const uint32_t d) { return b ^ c ^ d; } __device__ uint32_t funH(const uint32_t b, const uint32_t c, const uint32_t d) { return (b & c) | (b & d) | (c & d); } __device__ uint32_t funG(const uint32_t b, const uint32_t c, const uint32_t d) { return b ^ c ^ d; } __device__ uint32_t funF(const uint32_t b, const uint32_t c, const uint32_t d) { return (b & c) | ((~b) & d); } __device__ uint32_t swap_bits(uint32_t x) { uint8_t *ptr = reinterpret_cast<uint8_t *>(&x); return (ptr[3] << 0) | (ptr[2] << 8) | (ptr[1] << 16) | (ptr[0] << 24); } #define MAX_WORD_SIZE 10 #define MAX_WORKING_BUFFER_SIZE MAX_WORD_SIZE + 128 __global__ void calculateHashSum(unsigned char *digest_g, char *message, int workingBufferLength, int lenght, volatile bool *kernel_end) { __shared__ uint32_t digest[DIGEST_LENGTH / 4]; for (int i = threadIdx.x; i < DIGEST_LENGTH / 4; i += blockDim.x) digest[i] = reinterpret_cast<uint32_t *>(digest_g)[i]; __syncthreads(); __shared__ bool done; __shared__ unsigned char workingBuffer[MAX_WORKING_BUFFER_SIZE]; if (threadIdx.x == 0) { memset(workingBuffer, 0, workingBufferLength * 4); //init working buffer workingBuffer[lenght] = 0b10000000; uint64_t tmp = lenght * 8; uint32_t l = swap_bits(((uint32_t *) &tmp)[0]); uint32_t h = swap_bits(((uint32_t *) &tmp)[1]); memcpy(workingBuffer + workingBufferLength * 4 - 8, &h, sizeof(uint32_t)); memcpy(workingBuffer + workingBufferLength * 4 - 4, &l, sizeof(uint32_t)); done = false; } __syncthreads(); unsigned int numberOfChunks = workingBufferLength / 16; do { uint32_t w[80]; block mdBuffer = DEFAULT_DIGEST_BUFFER; block stepBuffer; uint32_t temp; for (unsigned int chunkNum = 0; chunkNum < numberOfChunks; chunkNum++) { if (chunkNum == 0) { uint32_t X0 = threadIdx.x + (256 * blockIdx.x) + (uint32_t )(reinterpret_cast<uint32_t *>(&workingBuffer)[0]); w[0] = swap_bits(X0); #pragma unroll for (int i = 1; i < 16; i++) w[i] = swap_bits(reinterpret_cast<uint32_t *>(&workingBuffer + chunkNum * 16)[i]); } else { #pragma unroll for (int i = 0; i < 16; i++) w[i] = swap_bits(reinterpret_cast<uint32_t *>(&workingBuffer + chunkNum * 16)[i]); } #pragma unroll for (int i = 16; i <= 79; i++) w[i] = leftRotate(w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16], 1); stepBuffer = mdBuffer; #pragma unroll for (int i = 0; i <= 79; i++) { if (i <= 19) temp = leftRotate(stepBuffer.a, 5) + funF(stepBuffer.b, stepBuffer.c, stepBuffer.d) + stepBuffer.e + w[i] + 0x5A827999; else if (i <= 39) temp = leftRotate(stepBuffer.a, 5) + funG(stepBuffer.b, stepBuffer.c, stepBuffer.d) + stepBuffer.e + w[i] + 0x6ED9EBA1; else if (i <= 59) temp = leftRotate(stepBuffer.a, 5) + funH(stepBuffer.b, stepBuffer.c, stepBuffer.d) + stepBuffer.e + w[i] + 0x8F1BBCDC; else temp = leftRotate(stepBuffer.a, 5) + funI(stepBuffer.b, stepBuffer.c, stepBuffer.d) + stepBuffer.e + w[i] + 0xCA62C1D6; stepBuffer.e = stepBuffer.d; stepBuffer.d = stepBuffer.c; stepBuffer.c = leftRotate(stepBuffer.b, 30); stepBuffer.b = stepBuffer.a; stepBuffer.a = temp; } mdBuffer.a += stepBuffer.a; mdBuffer.b += stepBuffer.b; mdBuffer.c += stepBuffer.c; mdBuffer.d += stepBuffer.d; mdBuffer.e += stepBuffer.e; } if (mdBuffer.a == reinterpret_cast<uint32_t *>(digest)[0] && mdBuffer.b == reinterpret_cast<uint32_t *>(digest)[1] && mdBuffer.c == reinterpret_cast<uint32_t *>(digest)[2] && mdBuffer.d == reinterpret_cast<uint32_t *>(digest)[3] && mdBuffer.e == reinterpret_cast<uint32_t *>(digest)[4]) { memcpy(message, &workingBuffer, lenght * sizeof(char)); reinterpret_cast<uint32_t *>(message)[0] += (blockIdx.x * 256) | threadIdx.x; *kernel_end = true; } __syncthreads(); if (!done && threadIdx.x == 0) { int i = 2; while (i < lenght) workingBuffer[i++]++; done = true; for (int i = 2; i < lenght; i++) { if (workingBuffer[i] != 0) { done = false; } } } __syncthreads(); } while (!(done||*kernel_end)); }
7582903d5d7e879f46f907b5883105ed7b9b9e36.cu
#include <cstring> #include <cstdint> #include "include/SHA1_cuda_cracker.cuh" struct block { uint32_t a; uint32_t b; uint32_t c; uint32_t d; uint32_t e; }; __constant__ block DEFAULT_DIGEST_BUFFER = { 0x67452301, 0xEFCDAB89, 0x98BADCFE, 0x10325476, 0xC3D2E1F0 }; __device__ uint32_t leftRotate(uint32_t x, uint32_t n) { return (x << n) | (x >> (32 - n)); } __device__ uint32_t funI(const uint32_t b, const uint32_t c, const uint32_t d) { return b ^ c ^ d; } __device__ uint32_t funH(const uint32_t b, const uint32_t c, const uint32_t d) { return (b & c) | (b & d) | (c & d); } __device__ uint32_t funG(const uint32_t b, const uint32_t c, const uint32_t d) { return b ^ c ^ d; } __device__ uint32_t funF(const uint32_t b, const uint32_t c, const uint32_t d) { return (b & c) | ((~b) & d); } __device__ uint32_t swap_bits(uint32_t x) { uint8_t *ptr = reinterpret_cast<uint8_t *>(&x); return (ptr[3] << 0) | (ptr[2] << 8) | (ptr[1] << 16) | (ptr[0] << 24); } #define MAX_WORD_SIZE 10 #define MAX_WORKING_BUFFER_SIZE MAX_WORD_SIZE + 128 __global__ void calculateHashSum(unsigned char *digest_g, char *message, int workingBufferLength, int lenght, volatile bool *kernel_end) { __shared__ uint32_t digest[DIGEST_LENGTH / 4]; for (int i = threadIdx.x; i < DIGEST_LENGTH / 4; i += blockDim.x) digest[i] = reinterpret_cast<uint32_t *>(digest_g)[i]; __syncthreads(); __shared__ bool done; __shared__ unsigned char workingBuffer[MAX_WORKING_BUFFER_SIZE]; if (threadIdx.x == 0) { memset(workingBuffer, 0, workingBufferLength * 4); //init working buffer workingBuffer[lenght] = 0b10000000; uint64_t tmp = lenght * 8; uint32_t l = swap_bits(((uint32_t *) &tmp)[0]); uint32_t h = swap_bits(((uint32_t *) &tmp)[1]); memcpy(workingBuffer + workingBufferLength * 4 - 8, &h, sizeof(uint32_t)); memcpy(workingBuffer + workingBufferLength * 4 - 4, &l, sizeof(uint32_t)); done = false; } __syncthreads(); unsigned int numberOfChunks = workingBufferLength / 16; do { uint32_t w[80]; block mdBuffer = DEFAULT_DIGEST_BUFFER; block stepBuffer; uint32_t temp; for (unsigned int chunkNum = 0; chunkNum < numberOfChunks; chunkNum++) { if (chunkNum == 0) { uint32_t X0 = threadIdx.x + (256 * blockIdx.x) + (uint32_t )(reinterpret_cast<uint32_t *>(&workingBuffer)[0]); w[0] = swap_bits(X0); #pragma unroll for (int i = 1; i < 16; i++) w[i] = swap_bits(reinterpret_cast<uint32_t *>(&workingBuffer + chunkNum * 16)[i]); } else { #pragma unroll for (int i = 0; i < 16; i++) w[i] = swap_bits(reinterpret_cast<uint32_t *>(&workingBuffer + chunkNum * 16)[i]); } #pragma unroll for (int i = 16; i <= 79; i++) w[i] = leftRotate(w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16], 1); stepBuffer = mdBuffer; #pragma unroll for (int i = 0; i <= 79; i++) { if (i <= 19) temp = leftRotate(stepBuffer.a, 5) + funF(stepBuffer.b, stepBuffer.c, stepBuffer.d) + stepBuffer.e + w[i] + 0x5A827999; else if (i <= 39) temp = leftRotate(stepBuffer.a, 5) + funG(stepBuffer.b, stepBuffer.c, stepBuffer.d) + stepBuffer.e + w[i] + 0x6ED9EBA1; else if (i <= 59) temp = leftRotate(stepBuffer.a, 5) + funH(stepBuffer.b, stepBuffer.c, stepBuffer.d) + stepBuffer.e + w[i] + 0x8F1BBCDC; else temp = leftRotate(stepBuffer.a, 5) + funI(stepBuffer.b, stepBuffer.c, stepBuffer.d) + stepBuffer.e + w[i] + 0xCA62C1D6; stepBuffer.e = stepBuffer.d; stepBuffer.d = stepBuffer.c; stepBuffer.c = leftRotate(stepBuffer.b, 30); stepBuffer.b = stepBuffer.a; stepBuffer.a = temp; } mdBuffer.a += stepBuffer.a; mdBuffer.b += stepBuffer.b; mdBuffer.c += stepBuffer.c; mdBuffer.d += stepBuffer.d; mdBuffer.e += stepBuffer.e; } if (mdBuffer.a == reinterpret_cast<uint32_t *>(digest)[0] && mdBuffer.b == reinterpret_cast<uint32_t *>(digest)[1] && mdBuffer.c == reinterpret_cast<uint32_t *>(digest)[2] && mdBuffer.d == reinterpret_cast<uint32_t *>(digest)[3] && mdBuffer.e == reinterpret_cast<uint32_t *>(digest)[4]) { memcpy(message, &workingBuffer, lenght * sizeof(char)); reinterpret_cast<uint32_t *>(message)[0] += (blockIdx.x * 256) | threadIdx.x; *kernel_end = true; } __syncthreads(); if (!done && threadIdx.x == 0) { int i = 2; while (i < lenght) workingBuffer[i++]++; done = true; for (int i = 2; i < lenght; i++) { if (workingBuffer[i] != 0) { done = false; } } } __syncthreads(); } while (!(done||*kernel_end)); }