hip_filename
stringlengths 5
84
| hip_content
stringlengths 79
9.69M
| cuda_filename
stringlengths 4
83
| cuda_content
stringlengths 19
9.69M
|
---|---|---|---|
03b017934a73fc8375c257b31d3826e6173d66db.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cstdio>
#include <cstdlib>
#include <chrono>
#include <cfloat>
#include <iostream>
#include <string>
#include <vector>
#ifndef ARRAY_SIZE
#define ARRAY_SIZE 100
#endif
#ifndef BLOCK_NUM
#define BLOCK_NUM 10
#endif
#ifndef BLOCK_SIZE
#define BLOCK_SIZE 512
#endif
#ifndef LOOP_TIME
#define LOOP_TIME 100000
#endif
#ifndef NTIMES
#define NTIMES 10
#endif
#define cudaErrorCheck(call) \
do { \
hipError_t cuErr = call; \
if (hipSuccess != cuErr) { \
printf("CUDA Error - %s:%d: '%s, %s'\n", __FILE__, __LINE__, hipGetErrorName(cuErr), hipGetErrorString(cuErr)); \
exit(1); \
} \
} while (0)
__global__ void testLDVolatile(unsigned long long int *array, unsigned size, unsigned long long int *array2) {
for (unsigned i = 0; i < LOOP_TIME; i++) {
unsigned long long int element = *(volatile unsigned long long int*)(&array[size - 2]);
unsigned threadID = blockIdx.x * blockDim.x + threadIdx.x;
array2[threadID] = element;
}
}
__global__ void testSTVolatile(unsigned long long int *array, unsigned size) {
for (unsigned i = 0; i < LOOP_TIME; i++) {
*(volatile unsigned long long int*)(&array[size -1]) = threadIdx.x;
}
}
__global__ void testAtomicExch(unsigned long long int *array, unsigned size) {
for (unsigned i = 0; i < LOOP_TIME; i++) {
atomicExch(&array[size -1], threadIdx.x);
}
}
__global__ void testLDSTVolatile(unsigned long long int *array, unsigned size, unsigned long long int *array2) {
for (unsigned i = 0; i < LOOP_TIME; i++) {
unsigned long long int element = *(volatile unsigned long long int*)(&array[size - 1]);
unsigned threadID = blockIdx.x * blockDim.x + threadIdx.x;
array2[threadID] = element;
*(volatile unsigned long long int*)(&array[size -1]) = threadIdx.x;
}
}
__global__ void testLDAtomic(unsigned long long int *array, unsigned size, unsigned long long int *array2) {
for (unsigned i = 0; i < LOOP_TIME; i++) {
unsigned long long int element = *(volatile unsigned long long int*)(&array[size - 1]);
unsigned threadID = blockIdx.x * blockDim.x + threadIdx.x;
array2[threadID] = element;
atomicExch(&array[size -1], threadIdx.x);
}
}
__global__ void testSTRelaxed(unsigned long long int *array, unsigned size) {
#if __CUDA_ARCH__ < 700
asm(
"{\n\t"
" .reg .pred p<2>;\n\t"
" .reg .b32 r<8>;\n\t"
" .reg .b64 rd<6>;\n\t"
" mov.u64 rd3, %0;\n\t"
" mov.u32 r4, %1;\n\t"
" cvta.to.global.u64 rd4, rd3;\n\t"
" mov.u32 r5, %tid.x;\n\t"
" cvt.u64.u32 rd1, r5;\n\t"
" add.s32 r6, r4, -1;\n\t"
" mul.wide.u32 rd5, r6, 8;\n\t"
" add.s64 rd2, rd4, rd5;\n\t"
" mov.u32 r7, -100000;\n\t"
" BB1_1:\n\t"
" st.volatile.global.u64 [rd2], rd1;\n\t"
" st.volatile.global.u64 [rd2], rd1;\n\t"
" st.volatile.global.u64 [rd2], rd1;\n\t"
" st.volatile.global.u64 [rd2], rd1;\n\t"
" st.volatile.global.u64 [rd2], rd1;\n\t"
" st.volatile.global.u64 [rd2], rd1;\n\t"
" st.volatile.global.u64 [rd2], rd1;\n\t"
" st.volatile.global.u64 [rd2], rd1;\n\t"
" st.volatile.global.u64 [rd2], rd1;\n\t"
" st.volatile.global.u64 [rd2], rd1;\n\t"
" st.volatile.global.u64 [rd2], rd1;\n\t"
" st.volatile.global.u64 [rd2], rd1;\n\t"
" st.volatile.global.u64 [rd2], rd1;\n\t"
" st.volatile.global.u64 [rd2], rd1;\n\t"
" st.volatile.global.u64 [rd2], rd1;\n\t"
" st.volatile.global.u64 [rd2], rd1;\n\t"
" st.volatile.global.u64 [rd2], rd1;\n\t"
" st.volatile.global.u64 [rd2], rd1;\n\t"
" st.volatile.global.u64 [rd2], rd1;\n\t"
" st.volatile.global.u64 [rd2], rd1;\n\t"
" st.volatile.global.u64 [rd2], rd1;\n\t"
" st.volatile.global.u64 [rd2], rd1;\n\t"
" st.volatile.global.u64 [rd2], rd1;\n\t"
" st.volatile.global.u64 [rd2], rd1;\n\t"
" st.volatile.global.u64 [rd2], rd1;\n\t"
" st.volatile.global.u64 [rd2], rd1;\n\t"
" st.volatile.global.u64 [rd2], rd1;\n\t"
" st.volatile.global.u64 [rd2], rd1;\n\t"
" st.volatile.global.u64 [rd2], rd1;\n\t"
" st.volatile.global.u64 [rd2], rd1;\n\t"
" st.volatile.global.u64 [rd2], rd1;\n\t"
" st.volatile.global.u64 [rd2], rd1;\n\t"
" st.volatile.global.u64 [rd2], rd1;\n\t"
" st.volatile.global.u64 [rd2], rd1;\n\t"
" st.volatile.global.u64 [rd2], rd1;\n\t"
" st.volatile.global.u64 [rd2], rd1;\n\t"
" st.volatile.global.u64 [rd2], rd1;\n\t"
" st.volatile.global.u64 [rd2], rd1;\n\t"
" st.volatile.global.u64 [rd2], rd1;\n\t"
" st.volatile.global.u64 [rd2], rd1;\n\t"
" st.volatile.global.u64 [rd2], rd1;\n\t"
" st.volatile.global.u64 [rd2], rd1;\n\t"
" st.volatile.global.u64 [rd2], rd1;\n\t"
" st.volatile.global.u64 [rd2], rd1;\n\t"
" st.volatile.global.u64 [rd2], rd1;\n\t"
" st.volatile.global.u64 [rd2], rd1;\n\t"
" st.volatile.global.u64 [rd2], rd1;\n\t"
" st.volatile.global.u64 [rd2], rd1;\n\t"
" st.volatile.global.u64 [rd2], rd1;\n\t"
" st.volatile.global.u64 [rd2], rd1;\n\t"
" add.s32 r7, r7, 50;\n\t"
" setp.ne.s32 p1, r7, 0;\n\t"
" @p1 bra BB1_1;\n\t"
" ret;\n\t"
"}"
:: "l"(array), "r"(size) : "memory");
#else
asm(
"{\n\t"
" .reg .pred p<2>;\n\t"
" .reg .b32 r<8>;\n\t"
" .reg .b64 rd<6>;\n\t"
" mov.u64 rd3, %0;\n\t"
" mov.u32 r4, %1;\n\t"
" cvta.to.global.u64 rd4, rd3;\n\t"
" mov.u32 r5, %tid.x;\n\t"
" cvt.u64.u32 rd1, r5;\n\t"
" add.s32 r6, r4, -1;\n\t"
" mul.wide.u32 rd5, r6, 8;\n\t"
" add.s64 rd2, rd4, rd5;\n\t"
" mov.u32 r7, -100000;\n\t"
" BB1_1:\n\t"
" st.global.relaxed.gpu.u64 [rd2], rd1;\n\t"
" st.global.relaxed.gpu.u64 [rd2], rd1;\n\t"
" st.global.relaxed.gpu.u64 [rd2], rd1;\n\t"
" st.global.relaxed.gpu.u64 [rd2], rd1;\n\t"
" st.global.relaxed.gpu.u64 [rd2], rd1;\n\t"
" st.global.relaxed.gpu.u64 [rd2], rd1;\n\t"
" st.global.relaxed.gpu.u64 [rd2], rd1;\n\t"
" st.global.relaxed.gpu.u64 [rd2], rd1;\n\t"
" st.global.relaxed.gpu.u64 [rd2], rd1;\n\t"
" st.global.relaxed.gpu.u64 [rd2], rd1;\n\t"
" st.global.relaxed.gpu.u64 [rd2], rd1;\n\t"
" st.global.relaxed.gpu.u64 [rd2], rd1;\n\t"
" st.global.relaxed.gpu.u64 [rd2], rd1;\n\t"
" st.global.relaxed.gpu.u64 [rd2], rd1;\n\t"
" st.global.relaxed.gpu.u64 [rd2], rd1;\n\t"
" st.global.relaxed.gpu.u64 [rd2], rd1;\n\t"
" st.global.relaxed.gpu.u64 [rd2], rd1;\n\t"
" st.global.relaxed.gpu.u64 [rd2], rd1;\n\t"
" st.global.relaxed.gpu.u64 [rd2], rd1;\n\t"
" st.global.relaxed.gpu.u64 [rd2], rd1;\n\t"
" st.global.relaxed.gpu.u64 [rd2], rd1;\n\t"
" st.global.relaxed.gpu.u64 [rd2], rd1;\n\t"
" st.global.relaxed.gpu.u64 [rd2], rd1;\n\t"
" st.global.relaxed.gpu.u64 [rd2], rd1;\n\t"
" st.global.relaxed.gpu.u64 [rd2], rd1;\n\t"
" st.global.relaxed.gpu.u64 [rd2], rd1;\n\t"
" st.global.relaxed.gpu.u64 [rd2], rd1;\n\t"
" st.global.relaxed.gpu.u64 [rd2], rd1;\n\t"
" st.global.relaxed.gpu.u64 [rd2], rd1;\n\t"
" st.global.relaxed.gpu.u64 [rd2], rd1;\n\t"
" st.global.relaxed.gpu.u64 [rd2], rd1;\n\t"
" st.global.relaxed.gpu.u64 [rd2], rd1;\n\t"
" st.global.relaxed.gpu.u64 [rd2], rd1;\n\t"
" st.global.relaxed.gpu.u64 [rd2], rd1;\n\t"
" st.global.relaxed.gpu.u64 [rd2], rd1;\n\t"
" st.global.relaxed.gpu.u64 [rd2], rd1;\n\t"
" st.global.relaxed.gpu.u64 [rd2], rd1;\n\t"
" st.global.relaxed.gpu.u64 [rd2], rd1;\n\t"
" st.global.relaxed.gpu.u64 [rd2], rd1;\n\t"
" st.global.relaxed.gpu.u64 [rd2], rd1;\n\t"
" st.global.relaxed.gpu.u64 [rd2], rd1;\n\t"
" st.global.relaxed.gpu.u64 [rd2], rd1;\n\t"
" st.global.relaxed.gpu.u64 [rd2], rd1;\n\t"
" st.global.relaxed.gpu.u64 [rd2], rd1;\n\t"
" st.global.relaxed.gpu.u64 [rd2], rd1;\n\t"
" st.global.relaxed.gpu.u64 [rd2], rd1;\n\t"
" st.global.relaxed.gpu.u64 [rd2], rd1;\n\t"
" st.global.relaxed.gpu.u64 [rd2], rd1;\n\t"
" st.global.relaxed.gpu.u64 [rd2], rd1;\n\t"
" st.global.relaxed.gpu.u64 [rd2], rd1;\n\t"
" add.s32 r7, r7, 50;\n\t"
" setp.ne.s32 p1, r7, 0;\n\t"
" @p1 bra BB1_1;\n\t"
" ret;\n\t"
"}"
:: "l"(array), "r"(size) : "memory");
#endif
}
__global__ void testLDSTRelaxed(unsigned long long int *array, unsigned size, unsigned long long int *array2) {
#if __CUDA_ARCH__ < 700
asm(
"{\n\t"
" .reg .pred p<2>;\n\t"
" .reg .b32 r<11>;\n\t"
" .reg .b64 rd<42>;\n\t"
" mov.u64 rd4, %0;\n\t"
" mov.u32 r4, %1;\n\t"
" mov.u64 rd5, %2;\n\t"
" cvta.to.global.u64 rd6, rd5;\n\t"
" add.s32 r5, r4, -1;\n\t"
" cvta.to.global.u64 rd7, rd4;\n\t"
" mul.wide.u32 rd8, r5, 8;\n\t"
" add.s64 rd1, rd7, rd8;\n\t"
" mov.u32 r6, %ntid.x;\n\t"
" mov.u32 r7, %ctaid.x;\n\t"
" mov.u32 r8, %tid.x;\n\t"
" mad.lo.s32 r9, r6, r7, r8;\n\t"
" mul.wide.u32 rd9, r9, 8;\n\t"
" add.s64 rd2, rd6, rd9;\n\t"
" cvt.u64.u32 rd3, r8;\n\t"
" mov.u32 r10, -100000;\n\t"
" BB3_1:\n\t"
" ld.volatile.global.u64 rd10, [rd1];\n\t"
" st.global.u64 [rd2], rd10;\n\t"
" st.volatile.global.u64 [rd1], rd3;\n\t"
" ld.volatile.global.u64 rd11, [rd1];\n\t"
" st.global.u64 [rd2], rd11;\n\t"
" st.volatile.global.u64 [rd1], rd3;\n\t"
" ld.volatile.global.u64 rd12, [rd1];\n\t"
" st.global.u64 [rd2], rd12;\n\t"
" st.volatile.global.u64 [rd1], rd3;\n\t"
" ld.volatile.global.u64 rd13, [rd1];\n\t"
" st.global.u64 [rd2], rd13;\n\t"
" st.volatile.global.u64 [rd1], rd3;\n\t"
" ld.volatile.global.u64 rd14, [rd1];\n\t"
" st.global.u64 [rd2], rd14;\n\t"
" st.volatile.global.u64 [rd1], rd3;\n\t"
" ld.volatile.global.u64 rd15, [rd1];\n\t"
" st.global.u64 [rd2], rd15;\n\t"
" st.volatile.global.u64 [rd1], rd3;\n\t"
" ld.volatile.global.u64 rd16, [rd1];\n\t"
" st.global.u64 [rd2], rd16;\n\t"
" st.volatile.global.u64 [rd1], rd3;\n\t"
" ld.volatile.global.u64 rd17, [rd1];\n\t"
" st.global.u64 [rd2], rd17;\n\t"
" st.volatile.global.u64 [rd1], rd3;\n\t"
" ld.volatile.global.u64 rd18, [rd1];\n\t"
" st.global.u64 [rd2], rd18;\n\t"
" st.volatile.global.u64 [rd1], rd3;\n\t"
" ld.volatile.global.u64 rd19, [rd1];\n\t"
" st.global.u64 [rd2], rd19;\n\t"
" st.volatile.global.u64 [rd1], rd3;\n\t"
" ld.volatile.global.u64 rd20, [rd1];\n\t"
" st.global.u64 [rd2], rd20;\n\t"
" st.volatile.global.u64 [rd1], rd3;\n\t"
" ld.volatile.global.u64 rd21, [rd1];\n\t"
" st.global.u64 [rd2], rd21;\n\t"
" st.volatile.global.u64 [rd1], rd3;\n\t"
" ld.volatile.global.u64 rd22, [rd1];\n\t"
" st.global.u64 [rd2], rd22;\n\t"
" st.volatile.global.u64 [rd1], rd3;\n\t"
" ld.volatile.global.u64 rd23, [rd1];\n\t"
" st.global.u64 [rd2], rd23;\n\t"
" st.volatile.global.u64 [rd1], rd3;\n\t"
" ld.volatile.global.u64 rd24, [rd1];\n\t"
" st.global.u64 [rd2], rd24;\n\t"
" st.volatile.global.u64 [rd1], rd3;\n\t"
" ld.volatile.global.u64 rd25, [rd1];\n\t"
" st.global.u64 [rd2], rd25;\n\t"
" st.volatile.global.u64 [rd1], rd3;\n\t"
" ld.volatile.global.u64 rd26, [rd1];\n\t"
" st.global.u64 [rd2], rd26;\n\t"
" st.volatile.global.u64 [rd1], rd3;\n\t"
" ld.volatile.global.u64 rd27, [rd1];\n\t"
" st.global.u64 [rd2], rd27;\n\t"
" st.volatile.global.u64 [rd1], rd3;\n\t"
" ld.volatile.global.u64 rd28, [rd1];\n\t"
" st.global.u64 [rd2], rd28;\n\t"
" st.volatile.global.u64 [rd1], rd3;\n\t"
" ld.volatile.global.u64 rd29, [rd1];\n\t"
" st.global.u64 [rd2], rd29;\n\t"
" st.volatile.global.u64 [rd1], rd3;\n\t"
" ld.volatile.global.u64 rd30, [rd1];\n\t"
" st.global.u64 [rd2], rd30;\n\t"
" st.volatile.global.u64 [rd1], rd3;\n\t"
" ld.volatile.global.u64 rd31, [rd1];\n\t"
" st.global.u64 [rd2], rd31;\n\t"
" st.volatile.global.u64 [rd1], rd3;\n\t"
" ld.volatile.global.u64 rd32, [rd1];\n\t"
" st.global.u64 [rd2], rd32;\n\t"
" st.volatile.global.u64 [rd1], rd3;\n\t"
" ld.volatile.global.u64 rd33, [rd1];\n\t"
" st.global.u64 [rd2], rd33;\n\t"
" st.volatile.global.u64 [rd1], rd3;\n\t"
" ld.volatile.global.u64 rd34, [rd1];\n\t"
" st.global.u64 [rd2], rd34;\n\t"
" st.volatile.global.u64 [rd1], rd3;\n\t"
" ld.volatile.global.u64 rd35, [rd1];\n\t"
" st.global.u64 [rd2], rd35;\n\t"
" st.volatile.global.u64 [rd1], rd3;\n\t"
" ld.volatile.global.u64 rd36, [rd1];\n\t"
" st.global.u64 [rd2], rd36;\n\t"
" st.volatile.global.u64 [rd1], rd3;\n\t"
" ld.volatile.global.u64 rd37, [rd1];\n\t"
" st.global.u64 [rd2], rd37;\n\t"
" st.volatile.global.u64 [rd1], rd3;\n\t"
" ld.volatile.global.u64 rd38, [rd1];\n\t"
" st.global.u64 [rd2], rd38;\n\t"
" st.volatile.global.u64 [rd1], rd3;\n\t"
" ld.volatile.global.u64 rd39, [rd1];\n\t"
" st.global.u64 [rd2], rd39;\n\t"
" st.volatile.global.u64 [rd1], rd3;\n\t"
" ld.volatile.global.u64 rd40, [rd1];\n\t"
" st.global.u64 [rd2], rd40;\n\t"
" st.volatile.global.u64 [rd1], rd3;\n\t"
" ld.volatile.global.u64 rd41, [rd1];\n\t"
" st.global.u64 [rd2], rd41;\n\t"
" st.volatile.global.u64 [rd1], rd3;\n\t"
" add.s32 r10, r10, 32;\n\t"
" setp.ne.s32 p1, r10, 0;\n\t"
" @p1 bra BB3_1;\n\t"
" ret;\n\t"
"}"
:: "l"(array), "r"(size), "l"(array2) : "memory");
#else
asm(
"{\n\t"
" .reg .pred p<2>;\n\t"
" .reg .b32 r<11>;\n\t"
" .reg .b64 rd<42>;\n\t"
" mov.u64 rd4, %0;\n\t"
" mov.u32 r4, %1;\n\t"
" mov.u64 rd5, %2;\n\t"
" cvta.to.global.u64 rd6, rd5;\n\t"
" add.s32 r5, r4, -1;\n\t"
" cvta.to.global.u64 rd7, rd4;\n\t"
" mul.wide.u32 rd8, r5, 8;\n\t"
" add.s64 rd1, rd7, rd8;\n\t"
" mov.u32 r6, %ntid.x;\n\t"
" mov.u32 r7, %ctaid.x;\n\t"
" mov.u32 r8, %tid.x;\n\t"
" mad.lo.s32 r9, r6, r7, r8;\n\t"
" mul.wide.u32 rd9, r9, 8;\n\t"
" add.s64 rd2, rd6, rd9;\n\t"
" cvt.u64.u32 rd3, r8;\n\t"
" mov.u32 r10, -100000;\n\t"
" BB3_1:\n\t"
" ld.global.relaxed.gpu.u64 rd10, [rd1];\n\t"
" st.global.u64 [rd2], rd10;\n\t"
" st.global.relaxed.gpu.u64 [rd1], rd3;\n\t"
" ld.global.relaxed.gpu.u64 rd11, [rd1];\n\t"
" st.global.u64 [rd2], rd11;\n\t"
" st.global.relaxed.gpu.u64 [rd1], rd3;\n\t"
" ld.global.relaxed.gpu.u64 rd12, [rd1];\n\t"
" st.global.u64 [rd2], rd12;\n\t"
" st.global.relaxed.gpu.u64 [rd1], rd3;\n\t"
" ld.global.relaxed.gpu.u64 rd13, [rd1];\n\t"
" st.global.u64 [rd2], rd13;\n\t"
" st.global.relaxed.gpu.u64 [rd1], rd3;\n\t"
" ld.global.relaxed.gpu.u64 rd14, [rd1];\n\t"
" st.global.u64 [rd2], rd14;\n\t"
" st.global.relaxed.gpu.u64 [rd1], rd3;\n\t"
" ld.global.relaxed.gpu.u64 rd15, [rd1];\n\t"
" st.global.u64 [rd2], rd15;\n\t"
" st.global.relaxed.gpu.u64 [rd1], rd3;\n\t"
" ld.global.relaxed.gpu.u64 rd16, [rd1];\n\t"
" st.global.u64 [rd2], rd16;\n\t"
" st.global.relaxed.gpu.u64 [rd1], rd3;\n\t"
" ld.global.relaxed.gpu.u64 rd17, [rd1];\n\t"
" st.global.u64 [rd2], rd17;\n\t"
" st.global.relaxed.gpu.u64 [rd1], rd3;\n\t"
" ld.global.relaxed.gpu.u64 rd18, [rd1];\n\t"
" st.global.u64 [rd2], rd18;\n\t"
" st.global.relaxed.gpu.u64 [rd1], rd3;\n\t"
" ld.global.relaxed.gpu.u64 rd19, [rd1];\n\t"
" st.global.u64 [rd2], rd19;\n\t"
" st.global.relaxed.gpu.u64 [rd1], rd3;\n\t"
" ld.global.relaxed.gpu.u64 rd20, [rd1];\n\t"
" st.global.u64 [rd2], rd20;\n\t"
" st.global.relaxed.gpu.u64 [rd1], rd3;\n\t"
" ld.global.relaxed.gpu.u64 rd21, [rd1];\n\t"
" st.global.u64 [rd2], rd21;\n\t"
" st.global.relaxed.gpu.u64 [rd1], rd3;\n\t"
" ld.global.relaxed.gpu.u64 rd22, [rd1];\n\t"
" st.global.u64 [rd2], rd22;\n\t"
" st.global.relaxed.gpu.u64 [rd1], rd3;\n\t"
" ld.global.relaxed.gpu.u64 rd23, [rd1];\n\t"
" st.global.u64 [rd2], rd23;\n\t"
" st.global.relaxed.gpu.u64 [rd1], rd3;\n\t"
" ld.global.relaxed.gpu.u64 rd24, [rd1];\n\t"
" st.global.u64 [rd2], rd24;\n\t"
" st.global.relaxed.gpu.u64 [rd1], rd3;\n\t"
" ld.global.relaxed.gpu.u64 rd25, [rd1];\n\t"
" st.global.u64 [rd2], rd25;\n\t"
" st.global.relaxed.gpu.u64 [rd1], rd3;\n\t"
" ld.global.relaxed.gpu.u64 rd26, [rd1];\n\t"
" st.global.u64 [rd2], rd26;\n\t"
" st.global.relaxed.gpu.u64 [rd1], rd3;\n\t"
" ld.global.relaxed.gpu.u64 rd27, [rd1];\n\t"
" st.global.u64 [rd2], rd27;\n\t"
" st.global.relaxed.gpu.u64 [rd1], rd3;\n\t"
" ld.global.relaxed.gpu.u64 rd28, [rd1];\n\t"
" st.global.u64 [rd2], rd28;\n\t"
" st.global.relaxed.gpu.u64 [rd1], rd3;\n\t"
" ld.global.relaxed.gpu.u64 rd29, [rd1];\n\t"
" st.global.u64 [rd2], rd29;\n\t"
" st.global.relaxed.gpu.u64 [rd1], rd3;\n\t"
" ld.global.relaxed.gpu.u64 rd30, [rd1];\n\t"
" st.global.u64 [rd2], rd30;\n\t"
" st.global.relaxed.gpu.u64 [rd1], rd3;\n\t"
" ld.global.relaxed.gpu.u64 rd31, [rd1];\n\t"
" st.global.u64 [rd2], rd31;\n\t"
" st.global.relaxed.gpu.u64 [rd1], rd3;\n\t"
" ld.global.relaxed.gpu.u64 rd32, [rd1];\n\t"
" st.global.u64 [rd2], rd32;\n\t"
" st.global.relaxed.gpu.u64 [rd1], rd3;\n\t"
" ld.global.relaxed.gpu.u64 rd33, [rd1];\n\t"
" st.global.u64 [rd2], rd33;\n\t"
" st.global.relaxed.gpu.u64 [rd1], rd3;\n\t"
" ld.global.relaxed.gpu.u64 rd34, [rd1];\n\t"
" st.global.u64 [rd2], rd34;\n\t"
" st.global.relaxed.gpu.u64 [rd1], rd3;\n\t"
" ld.global.relaxed.gpu.u64 rd35, [rd1];\n\t"
" st.global.u64 [rd2], rd35;\n\t"
" st.global.relaxed.gpu.u64 [rd1], rd3;\n\t"
" ld.global.relaxed.gpu.u64 rd36, [rd1];\n\t"
" st.global.u64 [rd2], rd36;\n\t"
" st.global.relaxed.gpu.u64 [rd1], rd3;\n\t"
" ld.global.relaxed.gpu.u64 rd37, [rd1];\n\t"
" st.global.u64 [rd2], rd37;\n\t"
" st.global.relaxed.gpu.u64 [rd1], rd3;\n\t"
" ld.global.relaxed.gpu.u64 rd38, [rd1];\n\t"
" st.global.u64 [rd2], rd38;\n\t"
" st.global.relaxed.gpu.u64 [rd1], rd3;\n\t"
" ld.global.relaxed.gpu.u64 rd39, [rd1];\n\t"
" st.global.u64 [rd2], rd39;\n\t"
" st.global.relaxed.gpu.u64 [rd1], rd3;\n\t"
" ld.global.relaxed.gpu.u64 rd40, [rd1];\n\t"
" st.global.u64 [rd2], rd40;\n\t"
" st.global.relaxed.gpu.u64 [rd1], rd3;\n\t"
" ld.global.relaxed.gpu.u64 rd41, [rd1];\n\t"
" st.global.u64 [rd2], rd41;\n\t"
" st.global.relaxed.gpu.u64 [rd1], rd3;\n\t"
" add.s32 r10, r10, 32;\n\t"
" setp.ne.s32 p1, r10, 0;\n\t"
" @p1 bra BB3_1;\n\t"
" ret;\n\t"
"}"
:: "l"(array), "r"(size), "l"(array2) : "memory");
#endif
}
int main(int argc, char *argv[]) {
// Output Device Info
int num_devices;
int device_num;
int device_major;
hipGetDeviceCount(&num_devices);
if (argc < 2) {
for (unsigned i = 0; i < num_devices; i++) {
hipDeviceProp_t prop;
hipGetDeviceProperties(&prop, i);
std::cout << "Device Number: " << i << std::endl;
std::cout << " Device Name: " << prop.name << std::endl;
std::cout << " Compute Capacity: " << prop.major << "." << prop.minor << std::endl;
}
exit(0);
} else {
device_num = std::stoi(argv[1], nullptr, 10);
if (device_num >= num_devices) {
std::cout << "Only " << num_devices << " devices is available on this machine" << std::endl;
exit(1);
} else {
hipDeviceProp_t prop;
hipGetDeviceProperties(&prop, device_num);
std::cout << "Device Number: " << device_num << std::endl;
std::cout << " Device Name: " << prop.name << std::endl;
std::cout << " Compute Capacity: " << prop.major << "." << prop.minor << std::endl;
device_major = prop.major;
hipSetDevice(device_num);
}
}
std::vector<std::string> names = {"ld.volatile", "st.volatile", "atomicExch", "ldst.volatile", "ldatomic"};
if (device_major >= 7) {
names.push_back("st.relaxed");
names.push_back("ldst.relaxed");
}
double** execution_times = new double*[names.size()]();
for (unsigned i = 0; i < names.size(); i++) {
execution_times[i] = new double[NTIMES + 1]();
}
unsigned long long int *array1_host, *array2_host, *array1_device, *array2_device;
unsigned thread_num = BLOCK_NUM * BLOCK_SIZE;
unsigned array1_size_in_byte = sizeof(unsigned long long int) * ARRAY_SIZE;
unsigned array2_size_in_byte = sizeof(unsigned long long int) * thread_num;
array1_host = (unsigned long long int*)malloc(array1_size_in_byte);
array2_host = (unsigned long long int*)malloc(array2_size_in_byte);
for (unsigned i = 0; i < ARRAY_SIZE; i++) {
array1_host[i] = i;
}
cudaErrorCheck(hipMalloc(&array1_device, array1_size_in_byte));
cudaErrorCheck(hipMalloc(&array2_device, array2_size_in_byte));
cudaErrorCheck(hipMemcpy(array1_device, array1_host, array1_size_in_byte, hipMemcpyHostToDevice));
for (unsigned i = 0; i <= NTIMES; i++) {
auto start_ld_volatile = std::chrono::high_resolution_clock::now();
hipLaunchKernelGGL(( testLDVolatile), dim3(BLOCK_NUM), dim3(BLOCK_SIZE), 0, 0, array1_device, ARRAY_SIZE, array2_device);
cudaErrorCheck(hipGetLastError());
cudaErrorCheck(hipDeviceSynchronize());
auto end_ld_volatile = std::chrono::high_resolution_clock::now();
std::chrono::duration<double> elapsed_time_ld_volatile = end_ld_volatile - start_ld_volatile;
execution_times[0][i] = elapsed_time_ld_volatile.count();
auto start_st_volatile = std::chrono::high_resolution_clock::now();
hipLaunchKernelGGL(( testSTVolatile), dim3(BLOCK_NUM), dim3(BLOCK_SIZE), 0, 0, array1_device, ARRAY_SIZE);
cudaErrorCheck(hipGetLastError());
cudaErrorCheck(hipDeviceSynchronize());
auto end_st_volatile = std::chrono::high_resolution_clock::now();
std::chrono::duration<double> elapsed_time_st_volatile = end_st_volatile - start_st_volatile;
execution_times[1][i] = elapsed_time_st_volatile.count();
auto start_atomic = std::chrono::high_resolution_clock::now();
hipLaunchKernelGGL(( testAtomicExch), dim3(BLOCK_NUM), dim3(BLOCK_SIZE), 0, 0, array1_device, ARRAY_SIZE);
cudaErrorCheck(hipGetLastError());
cudaErrorCheck(hipDeviceSynchronize());
auto end_atomic = std::chrono::high_resolution_clock::now();
std::chrono::duration<double> elapsed_time_atomic = end_atomic - start_atomic;
execution_times[2][i] = elapsed_time_atomic.count();
auto start_ldst_volatile = std::chrono::high_resolution_clock::now();
hipLaunchKernelGGL(( testLDSTVolatile), dim3(BLOCK_NUM), dim3(BLOCK_SIZE), 0, 0, array1_device, ARRAY_SIZE, array2_device);
cudaErrorCheck(hipGetLastError());
cudaErrorCheck(hipDeviceSynchronize());
auto end_ldst_volatile = std::chrono::high_resolution_clock::now();
std::chrono::duration<double> elapsed_time_ldst_volatile = end_ldst_volatile - start_ldst_volatile;
execution_times[3][i] = elapsed_time_ldst_volatile.count();
auto start_ldatomic = std::chrono::high_resolution_clock::now();
hipLaunchKernelGGL(( testLDAtomic), dim3(BLOCK_NUM), dim3(BLOCK_SIZE), 0, 0, array1_device, ARRAY_SIZE, array2_device);
cudaErrorCheck(hipGetLastError());
cudaErrorCheck(hipDeviceSynchronize());
auto end_ldatomic = std::chrono::high_resolution_clock::now();
std::chrono::duration<double> elapsed_time_ldatomic = end_ldatomic - start_ldatomic;
execution_times[4][i] = elapsed_time_ldatomic.count();
if (device_major >= 7) {
auto start_st_relaxed = std::chrono::high_resolution_clock::now();
hipLaunchKernelGGL(( testSTRelaxed), dim3(BLOCK_NUM), dim3(BLOCK_SIZE), 0, 0, array1_device, ARRAY_SIZE);
cudaErrorCheck(hipGetLastError());
cudaErrorCheck(hipDeviceSynchronize());
auto end_st_relaxed = std::chrono::high_resolution_clock::now();
std::chrono::duration<double> elapsed_time_st_relaxed = end_st_relaxed - start_st_relaxed;
execution_times[5][i] = elapsed_time_st_relaxed.count();
auto start_ldst_relaxed = std::chrono::high_resolution_clock::now();
hipLaunchKernelGGL(( testLDSTRelaxed), dim3(BLOCK_NUM), dim3(BLOCK_SIZE), 0, 0, array1_device, ARRAY_SIZE, array2_device);
cudaErrorCheck(hipGetLastError());
cudaErrorCheck(hipDeviceSynchronize());
auto end_ldst_relaxed = std::chrono::high_resolution_clock::now();
std::chrono::duration<double> elapsed_time_ldst_relaxed = end_ldst_relaxed - start_ldst_relaxed;
execution_times[6][i] = elapsed_time_ldst_relaxed.count();
}
}
cudaErrorCheck(hipMemcpy(array1_host, array1_device, array1_size_in_byte, hipMemcpyDeviceToHost));
cudaErrorCheck(hipMemcpy(array2_host, array2_device, array2_size_in_byte, hipMemcpyDeviceToHost));
//verify
//for (unsigned i = 0; i < thread_num; i++) {
//if (array2_host[i] != array1_host[ARRAY_SIZE - 2]) {
//std::cout << "Mismatch at " << i << ", value is " << array2_host[i] << " " << array1_host[ARRAY_SIZE - 2] << std::endl;
//}
//}
for (unsigned i = 0; i < names.size(); i++) {
std::cout << names[i];
double average = 0;
double min = DBL_MAX;
double max = 0;
for (unsigned j = 1; j <= NTIMES; j++) {
if (min > execution_times[i][j]) {
min = execution_times[i][j];
} else if (max < execution_times[i][j]) {
max = execution_times[i][j];
}
average += execution_times[i][j];
}
average /= NTIMES;
std::cout << " Average " << average << " Min " << min << " Max " << max << std::endl;
}
return 0;
}
| 03b017934a73fc8375c257b31d3826e6173d66db.cu | #include <cstdio>
#include <cstdlib>
#include <chrono>
#include <cfloat>
#include <iostream>
#include <string>
#include <vector>
#ifndef ARRAY_SIZE
#define ARRAY_SIZE 100
#endif
#ifndef BLOCK_NUM
#define BLOCK_NUM 10
#endif
#ifndef BLOCK_SIZE
#define BLOCK_SIZE 512
#endif
#ifndef LOOP_TIME
#define LOOP_TIME 100000
#endif
#ifndef NTIMES
#define NTIMES 10
#endif
#define cudaErrorCheck(call) \
do { \
cudaError_t cuErr = call; \
if (cudaSuccess != cuErr) { \
printf("CUDA Error - %s:%d: '%s, %s'\n", __FILE__, __LINE__, cudaGetErrorName(cuErr), cudaGetErrorString(cuErr)); \
exit(1); \
} \
} while (0)
__global__ void testLDVolatile(unsigned long long int *array, unsigned size, unsigned long long int *array2) {
for (unsigned i = 0; i < LOOP_TIME; i++) {
unsigned long long int element = *(volatile unsigned long long int*)(&array[size - 2]);
unsigned threadID = blockIdx.x * blockDim.x + threadIdx.x;
array2[threadID] = element;
}
}
__global__ void testSTVolatile(unsigned long long int *array, unsigned size) {
for (unsigned i = 0; i < LOOP_TIME; i++) {
*(volatile unsigned long long int*)(&array[size -1]) = threadIdx.x;
}
}
__global__ void testAtomicExch(unsigned long long int *array, unsigned size) {
for (unsigned i = 0; i < LOOP_TIME; i++) {
atomicExch(&array[size -1], threadIdx.x);
}
}
__global__ void testLDSTVolatile(unsigned long long int *array, unsigned size, unsigned long long int *array2) {
for (unsigned i = 0; i < LOOP_TIME; i++) {
unsigned long long int element = *(volatile unsigned long long int*)(&array[size - 1]);
unsigned threadID = blockIdx.x * blockDim.x + threadIdx.x;
array2[threadID] = element;
*(volatile unsigned long long int*)(&array[size -1]) = threadIdx.x;
}
}
__global__ void testLDAtomic(unsigned long long int *array, unsigned size, unsigned long long int *array2) {
for (unsigned i = 0; i < LOOP_TIME; i++) {
unsigned long long int element = *(volatile unsigned long long int*)(&array[size - 1]);
unsigned threadID = blockIdx.x * blockDim.x + threadIdx.x;
array2[threadID] = element;
atomicExch(&array[size -1], threadIdx.x);
}
}
__global__ void testSTRelaxed(unsigned long long int *array, unsigned size) {
#if __CUDA_ARCH__ < 700
asm(
"{\n\t"
" .reg .pred p<2>;\n\t"
" .reg .b32 r<8>;\n\t"
" .reg .b64 rd<6>;\n\t"
" mov.u64 rd3, %0;\n\t"
" mov.u32 r4, %1;\n\t"
" cvta.to.global.u64 rd4, rd3;\n\t"
" mov.u32 r5, %tid.x;\n\t"
" cvt.u64.u32 rd1, r5;\n\t"
" add.s32 r6, r4, -1;\n\t"
" mul.wide.u32 rd5, r6, 8;\n\t"
" add.s64 rd2, rd4, rd5;\n\t"
" mov.u32 r7, -100000;\n\t"
" BB1_1:\n\t"
" st.volatile.global.u64 [rd2], rd1;\n\t"
" st.volatile.global.u64 [rd2], rd1;\n\t"
" st.volatile.global.u64 [rd2], rd1;\n\t"
" st.volatile.global.u64 [rd2], rd1;\n\t"
" st.volatile.global.u64 [rd2], rd1;\n\t"
" st.volatile.global.u64 [rd2], rd1;\n\t"
" st.volatile.global.u64 [rd2], rd1;\n\t"
" st.volatile.global.u64 [rd2], rd1;\n\t"
" st.volatile.global.u64 [rd2], rd1;\n\t"
" st.volatile.global.u64 [rd2], rd1;\n\t"
" st.volatile.global.u64 [rd2], rd1;\n\t"
" st.volatile.global.u64 [rd2], rd1;\n\t"
" st.volatile.global.u64 [rd2], rd1;\n\t"
" st.volatile.global.u64 [rd2], rd1;\n\t"
" st.volatile.global.u64 [rd2], rd1;\n\t"
" st.volatile.global.u64 [rd2], rd1;\n\t"
" st.volatile.global.u64 [rd2], rd1;\n\t"
" st.volatile.global.u64 [rd2], rd1;\n\t"
" st.volatile.global.u64 [rd2], rd1;\n\t"
" st.volatile.global.u64 [rd2], rd1;\n\t"
" st.volatile.global.u64 [rd2], rd1;\n\t"
" st.volatile.global.u64 [rd2], rd1;\n\t"
" st.volatile.global.u64 [rd2], rd1;\n\t"
" st.volatile.global.u64 [rd2], rd1;\n\t"
" st.volatile.global.u64 [rd2], rd1;\n\t"
" st.volatile.global.u64 [rd2], rd1;\n\t"
" st.volatile.global.u64 [rd2], rd1;\n\t"
" st.volatile.global.u64 [rd2], rd1;\n\t"
" st.volatile.global.u64 [rd2], rd1;\n\t"
" st.volatile.global.u64 [rd2], rd1;\n\t"
" st.volatile.global.u64 [rd2], rd1;\n\t"
" st.volatile.global.u64 [rd2], rd1;\n\t"
" st.volatile.global.u64 [rd2], rd1;\n\t"
" st.volatile.global.u64 [rd2], rd1;\n\t"
" st.volatile.global.u64 [rd2], rd1;\n\t"
" st.volatile.global.u64 [rd2], rd1;\n\t"
" st.volatile.global.u64 [rd2], rd1;\n\t"
" st.volatile.global.u64 [rd2], rd1;\n\t"
" st.volatile.global.u64 [rd2], rd1;\n\t"
" st.volatile.global.u64 [rd2], rd1;\n\t"
" st.volatile.global.u64 [rd2], rd1;\n\t"
" st.volatile.global.u64 [rd2], rd1;\n\t"
" st.volatile.global.u64 [rd2], rd1;\n\t"
" st.volatile.global.u64 [rd2], rd1;\n\t"
" st.volatile.global.u64 [rd2], rd1;\n\t"
" st.volatile.global.u64 [rd2], rd1;\n\t"
" st.volatile.global.u64 [rd2], rd1;\n\t"
" st.volatile.global.u64 [rd2], rd1;\n\t"
" st.volatile.global.u64 [rd2], rd1;\n\t"
" st.volatile.global.u64 [rd2], rd1;\n\t"
" add.s32 r7, r7, 50;\n\t"
" setp.ne.s32 p1, r7, 0;\n\t"
" @p1 bra BB1_1;\n\t"
" ret;\n\t"
"}"
:: "l"(array), "r"(size) : "memory");
#else
asm(
"{\n\t"
" .reg .pred p<2>;\n\t"
" .reg .b32 r<8>;\n\t"
" .reg .b64 rd<6>;\n\t"
" mov.u64 rd3, %0;\n\t"
" mov.u32 r4, %1;\n\t"
" cvta.to.global.u64 rd4, rd3;\n\t"
" mov.u32 r5, %tid.x;\n\t"
" cvt.u64.u32 rd1, r5;\n\t"
" add.s32 r6, r4, -1;\n\t"
" mul.wide.u32 rd5, r6, 8;\n\t"
" add.s64 rd2, rd4, rd5;\n\t"
" mov.u32 r7, -100000;\n\t"
" BB1_1:\n\t"
" st.global.relaxed.gpu.u64 [rd2], rd1;\n\t"
" st.global.relaxed.gpu.u64 [rd2], rd1;\n\t"
" st.global.relaxed.gpu.u64 [rd2], rd1;\n\t"
" st.global.relaxed.gpu.u64 [rd2], rd1;\n\t"
" st.global.relaxed.gpu.u64 [rd2], rd1;\n\t"
" st.global.relaxed.gpu.u64 [rd2], rd1;\n\t"
" st.global.relaxed.gpu.u64 [rd2], rd1;\n\t"
" st.global.relaxed.gpu.u64 [rd2], rd1;\n\t"
" st.global.relaxed.gpu.u64 [rd2], rd1;\n\t"
" st.global.relaxed.gpu.u64 [rd2], rd1;\n\t"
" st.global.relaxed.gpu.u64 [rd2], rd1;\n\t"
" st.global.relaxed.gpu.u64 [rd2], rd1;\n\t"
" st.global.relaxed.gpu.u64 [rd2], rd1;\n\t"
" st.global.relaxed.gpu.u64 [rd2], rd1;\n\t"
" st.global.relaxed.gpu.u64 [rd2], rd1;\n\t"
" st.global.relaxed.gpu.u64 [rd2], rd1;\n\t"
" st.global.relaxed.gpu.u64 [rd2], rd1;\n\t"
" st.global.relaxed.gpu.u64 [rd2], rd1;\n\t"
" st.global.relaxed.gpu.u64 [rd2], rd1;\n\t"
" st.global.relaxed.gpu.u64 [rd2], rd1;\n\t"
" st.global.relaxed.gpu.u64 [rd2], rd1;\n\t"
" st.global.relaxed.gpu.u64 [rd2], rd1;\n\t"
" st.global.relaxed.gpu.u64 [rd2], rd1;\n\t"
" st.global.relaxed.gpu.u64 [rd2], rd1;\n\t"
" st.global.relaxed.gpu.u64 [rd2], rd1;\n\t"
" st.global.relaxed.gpu.u64 [rd2], rd1;\n\t"
" st.global.relaxed.gpu.u64 [rd2], rd1;\n\t"
" st.global.relaxed.gpu.u64 [rd2], rd1;\n\t"
" st.global.relaxed.gpu.u64 [rd2], rd1;\n\t"
" st.global.relaxed.gpu.u64 [rd2], rd1;\n\t"
" st.global.relaxed.gpu.u64 [rd2], rd1;\n\t"
" st.global.relaxed.gpu.u64 [rd2], rd1;\n\t"
" st.global.relaxed.gpu.u64 [rd2], rd1;\n\t"
" st.global.relaxed.gpu.u64 [rd2], rd1;\n\t"
" st.global.relaxed.gpu.u64 [rd2], rd1;\n\t"
" st.global.relaxed.gpu.u64 [rd2], rd1;\n\t"
" st.global.relaxed.gpu.u64 [rd2], rd1;\n\t"
" st.global.relaxed.gpu.u64 [rd2], rd1;\n\t"
" st.global.relaxed.gpu.u64 [rd2], rd1;\n\t"
" st.global.relaxed.gpu.u64 [rd2], rd1;\n\t"
" st.global.relaxed.gpu.u64 [rd2], rd1;\n\t"
" st.global.relaxed.gpu.u64 [rd2], rd1;\n\t"
" st.global.relaxed.gpu.u64 [rd2], rd1;\n\t"
" st.global.relaxed.gpu.u64 [rd2], rd1;\n\t"
" st.global.relaxed.gpu.u64 [rd2], rd1;\n\t"
" st.global.relaxed.gpu.u64 [rd2], rd1;\n\t"
" st.global.relaxed.gpu.u64 [rd2], rd1;\n\t"
" st.global.relaxed.gpu.u64 [rd2], rd1;\n\t"
" st.global.relaxed.gpu.u64 [rd2], rd1;\n\t"
" st.global.relaxed.gpu.u64 [rd2], rd1;\n\t"
" add.s32 r7, r7, 50;\n\t"
" setp.ne.s32 p1, r7, 0;\n\t"
" @p1 bra BB1_1;\n\t"
" ret;\n\t"
"}"
:: "l"(array), "r"(size) : "memory");
#endif
}
__global__ void testLDSTRelaxed(unsigned long long int *array, unsigned size, unsigned long long int *array2) {
#if __CUDA_ARCH__ < 700
asm(
"{\n\t"
" .reg .pred p<2>;\n\t"
" .reg .b32 r<11>;\n\t"
" .reg .b64 rd<42>;\n\t"
" mov.u64 rd4, %0;\n\t"
" mov.u32 r4, %1;\n\t"
" mov.u64 rd5, %2;\n\t"
" cvta.to.global.u64 rd6, rd5;\n\t"
" add.s32 r5, r4, -1;\n\t"
" cvta.to.global.u64 rd7, rd4;\n\t"
" mul.wide.u32 rd8, r5, 8;\n\t"
" add.s64 rd1, rd7, rd8;\n\t"
" mov.u32 r6, %ntid.x;\n\t"
" mov.u32 r7, %ctaid.x;\n\t"
" mov.u32 r8, %tid.x;\n\t"
" mad.lo.s32 r9, r6, r7, r8;\n\t"
" mul.wide.u32 rd9, r9, 8;\n\t"
" add.s64 rd2, rd6, rd9;\n\t"
" cvt.u64.u32 rd3, r8;\n\t"
" mov.u32 r10, -100000;\n\t"
" BB3_1:\n\t"
" ld.volatile.global.u64 rd10, [rd1];\n\t"
" st.global.u64 [rd2], rd10;\n\t"
" st.volatile.global.u64 [rd1], rd3;\n\t"
" ld.volatile.global.u64 rd11, [rd1];\n\t"
" st.global.u64 [rd2], rd11;\n\t"
" st.volatile.global.u64 [rd1], rd3;\n\t"
" ld.volatile.global.u64 rd12, [rd1];\n\t"
" st.global.u64 [rd2], rd12;\n\t"
" st.volatile.global.u64 [rd1], rd3;\n\t"
" ld.volatile.global.u64 rd13, [rd1];\n\t"
" st.global.u64 [rd2], rd13;\n\t"
" st.volatile.global.u64 [rd1], rd3;\n\t"
" ld.volatile.global.u64 rd14, [rd1];\n\t"
" st.global.u64 [rd2], rd14;\n\t"
" st.volatile.global.u64 [rd1], rd3;\n\t"
" ld.volatile.global.u64 rd15, [rd1];\n\t"
" st.global.u64 [rd2], rd15;\n\t"
" st.volatile.global.u64 [rd1], rd3;\n\t"
" ld.volatile.global.u64 rd16, [rd1];\n\t"
" st.global.u64 [rd2], rd16;\n\t"
" st.volatile.global.u64 [rd1], rd3;\n\t"
" ld.volatile.global.u64 rd17, [rd1];\n\t"
" st.global.u64 [rd2], rd17;\n\t"
" st.volatile.global.u64 [rd1], rd3;\n\t"
" ld.volatile.global.u64 rd18, [rd1];\n\t"
" st.global.u64 [rd2], rd18;\n\t"
" st.volatile.global.u64 [rd1], rd3;\n\t"
" ld.volatile.global.u64 rd19, [rd1];\n\t"
" st.global.u64 [rd2], rd19;\n\t"
" st.volatile.global.u64 [rd1], rd3;\n\t"
" ld.volatile.global.u64 rd20, [rd1];\n\t"
" st.global.u64 [rd2], rd20;\n\t"
" st.volatile.global.u64 [rd1], rd3;\n\t"
" ld.volatile.global.u64 rd21, [rd1];\n\t"
" st.global.u64 [rd2], rd21;\n\t"
" st.volatile.global.u64 [rd1], rd3;\n\t"
" ld.volatile.global.u64 rd22, [rd1];\n\t"
" st.global.u64 [rd2], rd22;\n\t"
" st.volatile.global.u64 [rd1], rd3;\n\t"
" ld.volatile.global.u64 rd23, [rd1];\n\t"
" st.global.u64 [rd2], rd23;\n\t"
" st.volatile.global.u64 [rd1], rd3;\n\t"
" ld.volatile.global.u64 rd24, [rd1];\n\t"
" st.global.u64 [rd2], rd24;\n\t"
" st.volatile.global.u64 [rd1], rd3;\n\t"
" ld.volatile.global.u64 rd25, [rd1];\n\t"
" st.global.u64 [rd2], rd25;\n\t"
" st.volatile.global.u64 [rd1], rd3;\n\t"
" ld.volatile.global.u64 rd26, [rd1];\n\t"
" st.global.u64 [rd2], rd26;\n\t"
" st.volatile.global.u64 [rd1], rd3;\n\t"
" ld.volatile.global.u64 rd27, [rd1];\n\t"
" st.global.u64 [rd2], rd27;\n\t"
" st.volatile.global.u64 [rd1], rd3;\n\t"
" ld.volatile.global.u64 rd28, [rd1];\n\t"
" st.global.u64 [rd2], rd28;\n\t"
" st.volatile.global.u64 [rd1], rd3;\n\t"
" ld.volatile.global.u64 rd29, [rd1];\n\t"
" st.global.u64 [rd2], rd29;\n\t"
" st.volatile.global.u64 [rd1], rd3;\n\t"
" ld.volatile.global.u64 rd30, [rd1];\n\t"
" st.global.u64 [rd2], rd30;\n\t"
" st.volatile.global.u64 [rd1], rd3;\n\t"
" ld.volatile.global.u64 rd31, [rd1];\n\t"
" st.global.u64 [rd2], rd31;\n\t"
" st.volatile.global.u64 [rd1], rd3;\n\t"
" ld.volatile.global.u64 rd32, [rd1];\n\t"
" st.global.u64 [rd2], rd32;\n\t"
" st.volatile.global.u64 [rd1], rd3;\n\t"
" ld.volatile.global.u64 rd33, [rd1];\n\t"
" st.global.u64 [rd2], rd33;\n\t"
" st.volatile.global.u64 [rd1], rd3;\n\t"
" ld.volatile.global.u64 rd34, [rd1];\n\t"
" st.global.u64 [rd2], rd34;\n\t"
" st.volatile.global.u64 [rd1], rd3;\n\t"
" ld.volatile.global.u64 rd35, [rd1];\n\t"
" st.global.u64 [rd2], rd35;\n\t"
" st.volatile.global.u64 [rd1], rd3;\n\t"
" ld.volatile.global.u64 rd36, [rd1];\n\t"
" st.global.u64 [rd2], rd36;\n\t"
" st.volatile.global.u64 [rd1], rd3;\n\t"
" ld.volatile.global.u64 rd37, [rd1];\n\t"
" st.global.u64 [rd2], rd37;\n\t"
" st.volatile.global.u64 [rd1], rd3;\n\t"
" ld.volatile.global.u64 rd38, [rd1];\n\t"
" st.global.u64 [rd2], rd38;\n\t"
" st.volatile.global.u64 [rd1], rd3;\n\t"
" ld.volatile.global.u64 rd39, [rd1];\n\t"
" st.global.u64 [rd2], rd39;\n\t"
" st.volatile.global.u64 [rd1], rd3;\n\t"
" ld.volatile.global.u64 rd40, [rd1];\n\t"
" st.global.u64 [rd2], rd40;\n\t"
" st.volatile.global.u64 [rd1], rd3;\n\t"
" ld.volatile.global.u64 rd41, [rd1];\n\t"
" st.global.u64 [rd2], rd41;\n\t"
" st.volatile.global.u64 [rd1], rd3;\n\t"
" add.s32 r10, r10, 32;\n\t"
" setp.ne.s32 p1, r10, 0;\n\t"
" @p1 bra BB3_1;\n\t"
" ret;\n\t"
"}"
:: "l"(array), "r"(size), "l"(array2) : "memory");
#else
asm(
"{\n\t"
" .reg .pred p<2>;\n\t"
" .reg .b32 r<11>;\n\t"
" .reg .b64 rd<42>;\n\t"
" mov.u64 rd4, %0;\n\t"
" mov.u32 r4, %1;\n\t"
" mov.u64 rd5, %2;\n\t"
" cvta.to.global.u64 rd6, rd5;\n\t"
" add.s32 r5, r4, -1;\n\t"
" cvta.to.global.u64 rd7, rd4;\n\t"
" mul.wide.u32 rd8, r5, 8;\n\t"
" add.s64 rd1, rd7, rd8;\n\t"
" mov.u32 r6, %ntid.x;\n\t"
" mov.u32 r7, %ctaid.x;\n\t"
" mov.u32 r8, %tid.x;\n\t"
" mad.lo.s32 r9, r6, r7, r8;\n\t"
" mul.wide.u32 rd9, r9, 8;\n\t"
" add.s64 rd2, rd6, rd9;\n\t"
" cvt.u64.u32 rd3, r8;\n\t"
" mov.u32 r10, -100000;\n\t"
" BB3_1:\n\t"
" ld.global.relaxed.gpu.u64 rd10, [rd1];\n\t"
" st.global.u64 [rd2], rd10;\n\t"
" st.global.relaxed.gpu.u64 [rd1], rd3;\n\t"
" ld.global.relaxed.gpu.u64 rd11, [rd1];\n\t"
" st.global.u64 [rd2], rd11;\n\t"
" st.global.relaxed.gpu.u64 [rd1], rd3;\n\t"
" ld.global.relaxed.gpu.u64 rd12, [rd1];\n\t"
" st.global.u64 [rd2], rd12;\n\t"
" st.global.relaxed.gpu.u64 [rd1], rd3;\n\t"
" ld.global.relaxed.gpu.u64 rd13, [rd1];\n\t"
" st.global.u64 [rd2], rd13;\n\t"
" st.global.relaxed.gpu.u64 [rd1], rd3;\n\t"
" ld.global.relaxed.gpu.u64 rd14, [rd1];\n\t"
" st.global.u64 [rd2], rd14;\n\t"
" st.global.relaxed.gpu.u64 [rd1], rd3;\n\t"
" ld.global.relaxed.gpu.u64 rd15, [rd1];\n\t"
" st.global.u64 [rd2], rd15;\n\t"
" st.global.relaxed.gpu.u64 [rd1], rd3;\n\t"
" ld.global.relaxed.gpu.u64 rd16, [rd1];\n\t"
" st.global.u64 [rd2], rd16;\n\t"
" st.global.relaxed.gpu.u64 [rd1], rd3;\n\t"
" ld.global.relaxed.gpu.u64 rd17, [rd1];\n\t"
" st.global.u64 [rd2], rd17;\n\t"
" st.global.relaxed.gpu.u64 [rd1], rd3;\n\t"
" ld.global.relaxed.gpu.u64 rd18, [rd1];\n\t"
" st.global.u64 [rd2], rd18;\n\t"
" st.global.relaxed.gpu.u64 [rd1], rd3;\n\t"
" ld.global.relaxed.gpu.u64 rd19, [rd1];\n\t"
" st.global.u64 [rd2], rd19;\n\t"
" st.global.relaxed.gpu.u64 [rd1], rd3;\n\t"
" ld.global.relaxed.gpu.u64 rd20, [rd1];\n\t"
" st.global.u64 [rd2], rd20;\n\t"
" st.global.relaxed.gpu.u64 [rd1], rd3;\n\t"
" ld.global.relaxed.gpu.u64 rd21, [rd1];\n\t"
" st.global.u64 [rd2], rd21;\n\t"
" st.global.relaxed.gpu.u64 [rd1], rd3;\n\t"
" ld.global.relaxed.gpu.u64 rd22, [rd1];\n\t"
" st.global.u64 [rd2], rd22;\n\t"
" st.global.relaxed.gpu.u64 [rd1], rd3;\n\t"
" ld.global.relaxed.gpu.u64 rd23, [rd1];\n\t"
" st.global.u64 [rd2], rd23;\n\t"
" st.global.relaxed.gpu.u64 [rd1], rd3;\n\t"
" ld.global.relaxed.gpu.u64 rd24, [rd1];\n\t"
" st.global.u64 [rd2], rd24;\n\t"
" st.global.relaxed.gpu.u64 [rd1], rd3;\n\t"
" ld.global.relaxed.gpu.u64 rd25, [rd1];\n\t"
" st.global.u64 [rd2], rd25;\n\t"
" st.global.relaxed.gpu.u64 [rd1], rd3;\n\t"
" ld.global.relaxed.gpu.u64 rd26, [rd1];\n\t"
" st.global.u64 [rd2], rd26;\n\t"
" st.global.relaxed.gpu.u64 [rd1], rd3;\n\t"
" ld.global.relaxed.gpu.u64 rd27, [rd1];\n\t"
" st.global.u64 [rd2], rd27;\n\t"
" st.global.relaxed.gpu.u64 [rd1], rd3;\n\t"
" ld.global.relaxed.gpu.u64 rd28, [rd1];\n\t"
" st.global.u64 [rd2], rd28;\n\t"
" st.global.relaxed.gpu.u64 [rd1], rd3;\n\t"
" ld.global.relaxed.gpu.u64 rd29, [rd1];\n\t"
" st.global.u64 [rd2], rd29;\n\t"
" st.global.relaxed.gpu.u64 [rd1], rd3;\n\t"
" ld.global.relaxed.gpu.u64 rd30, [rd1];\n\t"
" st.global.u64 [rd2], rd30;\n\t"
" st.global.relaxed.gpu.u64 [rd1], rd3;\n\t"
" ld.global.relaxed.gpu.u64 rd31, [rd1];\n\t"
" st.global.u64 [rd2], rd31;\n\t"
" st.global.relaxed.gpu.u64 [rd1], rd3;\n\t"
" ld.global.relaxed.gpu.u64 rd32, [rd1];\n\t"
" st.global.u64 [rd2], rd32;\n\t"
" st.global.relaxed.gpu.u64 [rd1], rd3;\n\t"
" ld.global.relaxed.gpu.u64 rd33, [rd1];\n\t"
" st.global.u64 [rd2], rd33;\n\t"
" st.global.relaxed.gpu.u64 [rd1], rd3;\n\t"
" ld.global.relaxed.gpu.u64 rd34, [rd1];\n\t"
" st.global.u64 [rd2], rd34;\n\t"
" st.global.relaxed.gpu.u64 [rd1], rd3;\n\t"
" ld.global.relaxed.gpu.u64 rd35, [rd1];\n\t"
" st.global.u64 [rd2], rd35;\n\t"
" st.global.relaxed.gpu.u64 [rd1], rd3;\n\t"
" ld.global.relaxed.gpu.u64 rd36, [rd1];\n\t"
" st.global.u64 [rd2], rd36;\n\t"
" st.global.relaxed.gpu.u64 [rd1], rd3;\n\t"
" ld.global.relaxed.gpu.u64 rd37, [rd1];\n\t"
" st.global.u64 [rd2], rd37;\n\t"
" st.global.relaxed.gpu.u64 [rd1], rd3;\n\t"
" ld.global.relaxed.gpu.u64 rd38, [rd1];\n\t"
" st.global.u64 [rd2], rd38;\n\t"
" st.global.relaxed.gpu.u64 [rd1], rd3;\n\t"
" ld.global.relaxed.gpu.u64 rd39, [rd1];\n\t"
" st.global.u64 [rd2], rd39;\n\t"
" st.global.relaxed.gpu.u64 [rd1], rd3;\n\t"
" ld.global.relaxed.gpu.u64 rd40, [rd1];\n\t"
" st.global.u64 [rd2], rd40;\n\t"
" st.global.relaxed.gpu.u64 [rd1], rd3;\n\t"
" ld.global.relaxed.gpu.u64 rd41, [rd1];\n\t"
" st.global.u64 [rd2], rd41;\n\t"
" st.global.relaxed.gpu.u64 [rd1], rd3;\n\t"
" add.s32 r10, r10, 32;\n\t"
" setp.ne.s32 p1, r10, 0;\n\t"
" @p1 bra BB3_1;\n\t"
" ret;\n\t"
"}"
:: "l"(array), "r"(size), "l"(array2) : "memory");
#endif
}
int main(int argc, char *argv[]) {
// Output Device Info
int num_devices;
int device_num;
int device_major;
cudaGetDeviceCount(&num_devices);
if (argc < 2) {
for (unsigned i = 0; i < num_devices; i++) {
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, i);
std::cout << "Device Number: " << i << std::endl;
std::cout << " Device Name: " << prop.name << std::endl;
std::cout << " Compute Capacity: " << prop.major << "." << prop.minor << std::endl;
}
exit(0);
} else {
device_num = std::stoi(argv[1], nullptr, 10);
if (device_num >= num_devices) {
std::cout << "Only " << num_devices << " devices is available on this machine" << std::endl;
exit(1);
} else {
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, device_num);
std::cout << "Device Number: " << device_num << std::endl;
std::cout << " Device Name: " << prop.name << std::endl;
std::cout << " Compute Capacity: " << prop.major << "." << prop.minor << std::endl;
device_major = prop.major;
cudaSetDevice(device_num);
}
}
std::vector<std::string> names = {"ld.volatile", "st.volatile", "atomicExch", "ldst.volatile", "ldatomic"};
if (device_major >= 7) {
names.push_back("st.relaxed");
names.push_back("ldst.relaxed");
}
double** execution_times = new double*[names.size()]();
for (unsigned i = 0; i < names.size(); i++) {
execution_times[i] = new double[NTIMES + 1]();
}
unsigned long long int *array1_host, *array2_host, *array1_device, *array2_device;
unsigned thread_num = BLOCK_NUM * BLOCK_SIZE;
unsigned array1_size_in_byte = sizeof(unsigned long long int) * ARRAY_SIZE;
unsigned array2_size_in_byte = sizeof(unsigned long long int) * thread_num;
array1_host = (unsigned long long int*)malloc(array1_size_in_byte);
array2_host = (unsigned long long int*)malloc(array2_size_in_byte);
for (unsigned i = 0; i < ARRAY_SIZE; i++) {
array1_host[i] = i;
}
cudaErrorCheck(cudaMalloc(&array1_device, array1_size_in_byte));
cudaErrorCheck(cudaMalloc(&array2_device, array2_size_in_byte));
cudaErrorCheck(cudaMemcpy(array1_device, array1_host, array1_size_in_byte, cudaMemcpyHostToDevice));
for (unsigned i = 0; i <= NTIMES; i++) {
auto start_ld_volatile = std::chrono::high_resolution_clock::now();
testLDVolatile<<<BLOCK_NUM, BLOCK_SIZE>>>(array1_device, ARRAY_SIZE, array2_device);
cudaErrorCheck(cudaGetLastError());
cudaErrorCheck(cudaDeviceSynchronize());
auto end_ld_volatile = std::chrono::high_resolution_clock::now();
std::chrono::duration<double> elapsed_time_ld_volatile = end_ld_volatile - start_ld_volatile;
execution_times[0][i] = elapsed_time_ld_volatile.count();
auto start_st_volatile = std::chrono::high_resolution_clock::now();
testSTVolatile<<<BLOCK_NUM, BLOCK_SIZE>>>(array1_device, ARRAY_SIZE);
cudaErrorCheck(cudaGetLastError());
cudaErrorCheck(cudaDeviceSynchronize());
auto end_st_volatile = std::chrono::high_resolution_clock::now();
std::chrono::duration<double> elapsed_time_st_volatile = end_st_volatile - start_st_volatile;
execution_times[1][i] = elapsed_time_st_volatile.count();
auto start_atomic = std::chrono::high_resolution_clock::now();
testAtomicExch<<<BLOCK_NUM, BLOCK_SIZE>>>(array1_device, ARRAY_SIZE);
cudaErrorCheck(cudaGetLastError());
cudaErrorCheck(cudaDeviceSynchronize());
auto end_atomic = std::chrono::high_resolution_clock::now();
std::chrono::duration<double> elapsed_time_atomic = end_atomic - start_atomic;
execution_times[2][i] = elapsed_time_atomic.count();
auto start_ldst_volatile = std::chrono::high_resolution_clock::now();
testLDSTVolatile<<<BLOCK_NUM, BLOCK_SIZE>>>(array1_device, ARRAY_SIZE, array2_device);
cudaErrorCheck(cudaGetLastError());
cudaErrorCheck(cudaDeviceSynchronize());
auto end_ldst_volatile = std::chrono::high_resolution_clock::now();
std::chrono::duration<double> elapsed_time_ldst_volatile = end_ldst_volatile - start_ldst_volatile;
execution_times[3][i] = elapsed_time_ldst_volatile.count();
auto start_ldatomic = std::chrono::high_resolution_clock::now();
testLDAtomic<<<BLOCK_NUM, BLOCK_SIZE>>>(array1_device, ARRAY_SIZE, array2_device);
cudaErrorCheck(cudaGetLastError());
cudaErrorCheck(cudaDeviceSynchronize());
auto end_ldatomic = std::chrono::high_resolution_clock::now();
std::chrono::duration<double> elapsed_time_ldatomic = end_ldatomic - start_ldatomic;
execution_times[4][i] = elapsed_time_ldatomic.count();
if (device_major >= 7) {
auto start_st_relaxed = std::chrono::high_resolution_clock::now();
testSTRelaxed<<<BLOCK_NUM, BLOCK_SIZE>>>(array1_device, ARRAY_SIZE);
cudaErrorCheck(cudaGetLastError());
cudaErrorCheck(cudaDeviceSynchronize());
auto end_st_relaxed = std::chrono::high_resolution_clock::now();
std::chrono::duration<double> elapsed_time_st_relaxed = end_st_relaxed - start_st_relaxed;
execution_times[5][i] = elapsed_time_st_relaxed.count();
auto start_ldst_relaxed = std::chrono::high_resolution_clock::now();
testLDSTRelaxed<<<BLOCK_NUM, BLOCK_SIZE>>>(array1_device, ARRAY_SIZE, array2_device);
cudaErrorCheck(cudaGetLastError());
cudaErrorCheck(cudaDeviceSynchronize());
auto end_ldst_relaxed = std::chrono::high_resolution_clock::now();
std::chrono::duration<double> elapsed_time_ldst_relaxed = end_ldst_relaxed - start_ldst_relaxed;
execution_times[6][i] = elapsed_time_ldst_relaxed.count();
}
}
cudaErrorCheck(cudaMemcpy(array1_host, array1_device, array1_size_in_byte, cudaMemcpyDeviceToHost));
cudaErrorCheck(cudaMemcpy(array2_host, array2_device, array2_size_in_byte, cudaMemcpyDeviceToHost));
//verify
//for (unsigned i = 0; i < thread_num; i++) {
//if (array2_host[i] != array1_host[ARRAY_SIZE - 2]) {
//std::cout << "Mismatch at " << i << ", value is " << array2_host[i] << " " << array1_host[ARRAY_SIZE - 2] << std::endl;
//}
//}
for (unsigned i = 0; i < names.size(); i++) {
std::cout << names[i];
double average = 0;
double min = DBL_MAX;
double max = 0;
for (unsigned j = 1; j <= NTIMES; j++) {
if (min > execution_times[i][j]) {
min = execution_times[i][j];
} else if (max < execution_times[i][j]) {
max = execution_times[i][j];
}
average += execution_times[i][j];
}
average /= NTIMES;
std::cout << " Average " << average << " Min " << min << " Max " << max << std::endl;
}
return 0;
}
|
1dea1dd0450ed7a815a97e899d663b7ab511994b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.5.4) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date October 2020
@precisions normal z -> c d s
*/
#include "magmasparse_internal.h"
#define BLOCK_SIZE 512
// 3-pt stencil kernel
__global__ void
zge3pt_kernel(
int num_rows,
int num_cols,
magmaDoubleComplex alpha,
magmaDoubleComplex beta,
magmaDoubleComplex * dx,
magmaDoubleComplex * dy)
{
int row = blockDim.x * blockIdx.x + threadIdx.x;
if( row >= num_rows ){
return;
} else {
for( int i=0; i<num_cols; i++ ){
if (row == num_rows-1) {
dy[ row+i*num_rows ] = alpha * (- 2 * dx[ row+i*num_rows ]
+ dx[ row+i*num_rows-1 ])
+ beta * dy[ row+i*num_rows ] ;
} else if(row == 0) {
dy[ row+i*num_rows ] = alpha * (- 2 * dx[ row+i*num_rows ]
+ dx[ row+i*num_rows+1 ])
+ beta * dy[ row+i*num_rows ] ;
} else {
dy[ row+i*num_rows ] = alpha * (- 2 * dx[ row+i*num_rows ]
+ dx[ row+i*num_rows-1 ] + dx[ row+i*num_rows+1 ])
+ beta * dy[ row+i*num_rows ] ;
}
}
}
}
/**
Purpose
-------
This routine is a 3-pt-stencil operator derived from a FD-scheme in 2D
with Dirichlet boundary.
It computes y_i = -2 x_i + x_{i-1} + x_{i+1}
Arguments
---------
@param[in]
m magma_int_t
number of rows in x and y
@param[in]
n magma_int_t
number of columns in x and y
@param[in]
alpha magmaDoubleComplex
scalar multiplier
@param[in]
beta magmaDoubleComplex
scalar multiplier
@param[in]
dx magmaDoubleComplex_ptr
input vector x
@param[out]
dy magmaDoubleComplex_ptr
output vector y
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_zblas
********************************************************************/
extern "C" magma_int_t
magma_zge3pt(
magma_int_t m,
magma_int_t n,
magmaDoubleComplex alpha,
magmaDoubleComplex beta,
magmaDoubleComplex_ptr dx,
magmaDoubleComplex_ptr dy,
magma_queue_t queue )
{
dim3 grid( magma_ceildiv( m, BLOCK_SIZE ) );
magma_int_t threads = BLOCK_SIZE;
hipLaunchKernelGGL(( zge3pt_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() ,
m, n, alpha, beta, dx, dy );
return MAGMA_SUCCESS;
}
| 1dea1dd0450ed7a815a97e899d663b7ab511994b.cu | /*
-- MAGMA (version 2.5.4) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date October 2020
@precisions normal z -> c d s
*/
#include "magmasparse_internal.h"
#define BLOCK_SIZE 512
// 3-pt stencil kernel
__global__ void
zge3pt_kernel(
int num_rows,
int num_cols,
magmaDoubleComplex alpha,
magmaDoubleComplex beta,
magmaDoubleComplex * dx,
magmaDoubleComplex * dy)
{
int row = blockDim.x * blockIdx.x + threadIdx.x;
if( row >= num_rows ){
return;
} else {
for( int i=0; i<num_cols; i++ ){
if (row == num_rows-1) {
dy[ row+i*num_rows ] = alpha * (- 2 * dx[ row+i*num_rows ]
+ dx[ row+i*num_rows-1 ])
+ beta * dy[ row+i*num_rows ] ;
} else if(row == 0) {
dy[ row+i*num_rows ] = alpha * (- 2 * dx[ row+i*num_rows ]
+ dx[ row+i*num_rows+1 ])
+ beta * dy[ row+i*num_rows ] ;
} else {
dy[ row+i*num_rows ] = alpha * (- 2 * dx[ row+i*num_rows ]
+ dx[ row+i*num_rows-1 ] + dx[ row+i*num_rows+1 ])
+ beta * dy[ row+i*num_rows ] ;
}
}
}
}
/**
Purpose
-------
This routine is a 3-pt-stencil operator derived from a FD-scheme in 2D
with Dirichlet boundary.
It computes y_i = -2 x_i + x_{i-1} + x_{i+1}
Arguments
---------
@param[in]
m magma_int_t
number of rows in x and y
@param[in]
n magma_int_t
number of columns in x and y
@param[in]
alpha magmaDoubleComplex
scalar multiplier
@param[in]
beta magmaDoubleComplex
scalar multiplier
@param[in]
dx magmaDoubleComplex_ptr
input vector x
@param[out]
dy magmaDoubleComplex_ptr
output vector y
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_zblas
********************************************************************/
extern "C" magma_int_t
magma_zge3pt(
magma_int_t m,
magma_int_t n,
magmaDoubleComplex alpha,
magmaDoubleComplex beta,
magmaDoubleComplex_ptr dx,
magmaDoubleComplex_ptr dy,
magma_queue_t queue )
{
dim3 grid( magma_ceildiv( m, BLOCK_SIZE ) );
magma_int_t threads = BLOCK_SIZE;
zge3pt_kernel<<< grid, threads, 0, queue->cuda_stream() >>>
( m, n, alpha, beta, dx, dy );
return MAGMA_SUCCESS;
}
|
fc77730abb9391b990faac06980e03a9094dad47.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <iostream>
#include <numeric>
#define SIZE 16
using namespace std;
__global__ void sum(int* input)
{
const int tid = threadIdx.x;
int step_size = 1;
int number_of_threads = blockDim.x;
while (number_of_threads > 0)
{
if (tid < number_of_threads) // still alive?
{
const int fst = tid * step_size * 2;
const int snd = fst + step_size;
input[fst] += input[snd];
}
step_size <<= 1;
number_of_threads >>= 1;
}
}
int main()
{
int count = SIZE;
const int size = count*sizeof(int);
int h[SIZE];
for(int i=0;i<count;i++)
{
h[i]=rand%20+1;
}
for(int i=0;i<count;i++)
{
printf("%d ",h[i]);
}
int* d;
hipMalloc(&d, size);
hipMemcpy(d, h, size, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( sum) , dim3(1), dim3(count), 0, 0, d);
int result;
hipMemcpy(&result, d, sizeof(int), hipMemcpyDeviceToHost);
cout << "Sum is " << result << endl;
//getchar();
hipFree(d);
return 0;
}
| fc77730abb9391b990faac06980e03a9094dad47.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <iostream>
#include <numeric>
#define SIZE 16
using namespace std;
__global__ void sum(int* input)
{
const int tid = threadIdx.x;
int step_size = 1;
int number_of_threads = blockDim.x;
while (number_of_threads > 0)
{
if (tid < number_of_threads) // still alive?
{
const int fst = tid * step_size * 2;
const int snd = fst + step_size;
input[fst] += input[snd];
}
step_size <<= 1;
number_of_threads >>= 1;
}
}
int main()
{
int count = SIZE;
const int size = count*sizeof(int);
int h[SIZE];
for(int i=0;i<count;i++)
{
h[i]=rand%20+1;
}
for(int i=0;i<count;i++)
{
printf("%d ",h[i]);
}
int* d;
cudaMalloc(&d, size);
cudaMemcpy(d, h, size, cudaMemcpyHostToDevice);
sum <<<1, count>>>(d);
int result;
cudaMemcpy(&result, d, sizeof(int), cudaMemcpyDeviceToHost);
cout << "Sum is " << result << endl;
//getchar();
cudaFree(d);
return 0;
}
|
6ba5497d9802858693f08f3c8ac07757c815aacc.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <chrono>
#include <hip/hip_runtime.h>
#include "reference.cpp"
__global__ void smoothingFilter(
int Lx, int Ly,
int Threshold, int MaxRad,
const float*__restrict__ Img,
int*__restrict__ Box,
float*__restrict__ Norm)
{
int tid = threadIdx.x;
int tjd = threadIdx.y;
int i = blockIdx.x * blockDim.x + tid;
int j = blockIdx.y * blockDim.y + tjd;
int stid = tjd * blockDim.x + tid;
int gtid = j * Lx + i;
// part of shared memory may be unused
__shared__ float s_Img[1024];
if ( i < Lx && j < Ly )
s_Img[stid] = Img[gtid];
__syncthreads();
if ( i < Lx && j < Ly )
{
// Smoothing parameters
float sum = 0.f;
int q = 1;
int s = q;
int ksum = 0;
// Continue until parameters are met
while (sum < Threshold && q < MaxRad)
{
s = q;
sum = 0.f;
ksum = 0;
// Normal adaptive smoothing
for (int ii = -s; ii < s+1; ii++)
for (int jj = -s; jj < s+1; jj++)
if ( (i-s >= 0) && (i+s < Ly) && (j-s >= 0) && (j+s < Lx) )
{
ksum++;
// Compute within bounds of block dimensions
if( tid-s >= 0 && tid+s < blockDim.x && tjd-s >= 0 && tjd+s < blockDim.y )
sum += s_Img[stid + ii*blockDim.x + jj];
// Compute block borders with global memory
else
sum += Img[gtid + ii*Lx + jj];
}
q++;
}
Box[gtid] = s;
// Normalization for each box
for (int ii = -s; ii < s+1; ii++)
for (int jj = -s; jj < s+1; jj++)
if (ksum != 0)
atomicAdd(&Norm[gtid + ii*Lx + jj], __fdividef(1.f, (float)ksum));
}
}
__global__ void normalizeFilter(
int Lx, int Ly,
float*__restrict__ Img,
const float*__restrict__ Norm)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if ( i < Lx && j < Ly ) {
int gtid = j * Lx + i;
const float norm = Norm[gtid];
if (norm != 0) Img[gtid] = __fdividef(Img[gtid], norm);
}
}
__global__ void outFilter(
int Lx, int Ly,
const float*__restrict__ Img,
const int*__restrict__ Box,
float*__restrict__ Out )
{
int tid = threadIdx.x;
int tjd = threadIdx.y;
int i = blockIdx.x * blockDim.x + tid;
int j = blockIdx.y * blockDim.y + tjd;
int stid = tjd * blockDim.x + tid;
int gtid = j * Lx + i;
// part of shared memory may be unused
__shared__ float s_Img[1024];
if ( i < Lx && j < Ly )
s_Img[stid] = Img[gtid];
__syncthreads();
if ( i < Lx && j < Ly )
{
const int s = Box[gtid];
float sum = 0.f;
int ksum = 0;
for (int ii = -s; ii < s+1; ii++)
for (int jj = -s; jj < s+1; jj++)
if ( (i-s >= 0) && (i+s < Lx) && (j-s >= 0) && (j+s < Ly) )
{
ksum++;
if( tid-s >= 0 && tid+s < blockDim.x && tjd-s >= 0 && tjd+s < blockDim.y )
sum += s_Img[stid + ii*blockDim.y + jj];
else
sum += Img[gtid + ii*Ly + jj];
}
if ( ksum != 0 ) Out[gtid] = __fdividef(sum , (float)ksum);
}
}
int main(int argc, char* argv[]) {
if (argc != 5) {
printf("./%s <image dimension> <threshold> <max box size> <iterations>\n", argv[0]);
exit(1);
}
// only a square image is supported
const int Lx = atoi(argv[1]);
const int Ly = Lx;
const int size = Lx * Ly;
const int Threshold = atoi(argv[2]);
const int MaxRad = atoi(argv[3]);
const int repeat = atoi(argv[4]);
// input image
float *img = (float*) malloc (sizeof(float) * size);
// host and device results
float *norm = (float*) malloc (sizeof(float) * size);
float *h_norm = (float*) malloc (sizeof(float) * size);
int *box = (int*) malloc (sizeof(int) * size);
int *h_box = (int*) malloc (sizeof(int) * size);
float *out = (float*) malloc (sizeof(float) * size);
float *h_out = (float*) malloc (sizeof(float) * size);
srand(123);
for (int i = 0; i < size; i++) {
img[i] = rand() % 256;
norm[i] = box[i] = out[i] = 0;
}
float *d_img;
hipMalloc((void**)&d_img, sizeof(float) * size);
float *d_norm;
hipMalloc((void**)&d_norm, sizeof(float) * size);
int *d_box;
hipMalloc((void**)&d_box, sizeof(int) * size);
float *d_out;
hipMalloc((void**)&d_out, sizeof(float) * size);
dim3 grids ((Lx+15)/16, (Ly+15)/16);
dim3 blocks (16, 16);
// reset output
hipMemcpy(d_out, out, sizeof(float) * size, hipMemcpyHostToDevice);
double time = 0;
for (int i = 0; i < repeat; i++) {
// restore input image
hipMemcpy(d_img, img, sizeof(float) * size, hipMemcpyHostToDevice);
// reset norm
hipMemcpy(d_norm, norm, sizeof(float) * size, hipMemcpyHostToDevice);
hipDeviceSynchronize();
auto start = std::chrono::steady_clock::now();
// launch three kernels
hipLaunchKernelGGL(( smoothingFilter), dim3(grids), dim3(blocks), 0, 0, Lx, Ly, Threshold, MaxRad, d_img, d_box, d_norm);
hipLaunchKernelGGL(( normalizeFilter), dim3(grids), dim3(blocks), 0, 0, Lx, Ly, d_img, d_norm);
hipLaunchKernelGGL(( outFilter), dim3(grids), dim3(blocks), 0, 0, Lx, Ly, d_img, d_box, d_out);
hipDeviceSynchronize();
auto end = std::chrono::steady_clock::now();
time += std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count();
}
printf("Average filtering time %lf (s)\n", (time * 1e-9) / repeat);
hipMemcpy(out, d_out, sizeof(float) * size, hipMemcpyDeviceToHost);
hipMemcpy(box, d_box, sizeof(int) * size, hipMemcpyDeviceToHost);
hipMemcpy(norm, d_norm, sizeof(float) * size, hipMemcpyDeviceToHost);
// verify
reference (Lx, Ly, Threshold, MaxRad, img, h_box, h_norm, h_out);
verify(size, MaxRad, norm, h_norm, out, h_out, box, h_box);
hipFree(d_img);
hipFree(d_norm);
hipFree(d_box);
hipFree(d_out);
free(img);
free(norm);
free(h_norm);
free(box);
free(h_box);
free(out);
free(h_out);
return 0;
}
| 6ba5497d9802858693f08f3c8ac07757c815aacc.cu | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <chrono>
#include <cuda.h>
#include "reference.cpp"
__global__ void smoothingFilter(
int Lx, int Ly,
int Threshold, int MaxRad,
const float*__restrict__ Img,
int*__restrict__ Box,
float*__restrict__ Norm)
{
int tid = threadIdx.x;
int tjd = threadIdx.y;
int i = blockIdx.x * blockDim.x + tid;
int j = blockIdx.y * blockDim.y + tjd;
int stid = tjd * blockDim.x + tid;
int gtid = j * Lx + i;
// part of shared memory may be unused
__shared__ float s_Img[1024];
if ( i < Lx && j < Ly )
s_Img[stid] = Img[gtid];
__syncthreads();
if ( i < Lx && j < Ly )
{
// Smoothing parameters
float sum = 0.f;
int q = 1;
int s = q;
int ksum = 0;
// Continue until parameters are met
while (sum < Threshold && q < MaxRad)
{
s = q;
sum = 0.f;
ksum = 0;
// Normal adaptive smoothing
for (int ii = -s; ii < s+1; ii++)
for (int jj = -s; jj < s+1; jj++)
if ( (i-s >= 0) && (i+s < Ly) && (j-s >= 0) && (j+s < Lx) )
{
ksum++;
// Compute within bounds of block dimensions
if( tid-s >= 0 && tid+s < blockDim.x && tjd-s >= 0 && tjd+s < blockDim.y )
sum += s_Img[stid + ii*blockDim.x + jj];
// Compute block borders with global memory
else
sum += Img[gtid + ii*Lx + jj];
}
q++;
}
Box[gtid] = s;
// Normalization for each box
for (int ii = -s; ii < s+1; ii++)
for (int jj = -s; jj < s+1; jj++)
if (ksum != 0)
atomicAdd(&Norm[gtid + ii*Lx + jj], __fdividef(1.f, (float)ksum));
}
}
__global__ void normalizeFilter(
int Lx, int Ly,
float*__restrict__ Img,
const float*__restrict__ Norm)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if ( i < Lx && j < Ly ) {
int gtid = j * Lx + i;
const float norm = Norm[gtid];
if (norm != 0) Img[gtid] = __fdividef(Img[gtid], norm);
}
}
__global__ void outFilter(
int Lx, int Ly,
const float*__restrict__ Img,
const int*__restrict__ Box,
float*__restrict__ Out )
{
int tid = threadIdx.x;
int tjd = threadIdx.y;
int i = blockIdx.x * blockDim.x + tid;
int j = blockIdx.y * blockDim.y + tjd;
int stid = tjd * blockDim.x + tid;
int gtid = j * Lx + i;
// part of shared memory may be unused
__shared__ float s_Img[1024];
if ( i < Lx && j < Ly )
s_Img[stid] = Img[gtid];
__syncthreads();
if ( i < Lx && j < Ly )
{
const int s = Box[gtid];
float sum = 0.f;
int ksum = 0;
for (int ii = -s; ii < s+1; ii++)
for (int jj = -s; jj < s+1; jj++)
if ( (i-s >= 0) && (i+s < Lx) && (j-s >= 0) && (j+s < Ly) )
{
ksum++;
if( tid-s >= 0 && tid+s < blockDim.x && tjd-s >= 0 && tjd+s < blockDim.y )
sum += s_Img[stid + ii*blockDim.y + jj];
else
sum += Img[gtid + ii*Ly + jj];
}
if ( ksum != 0 ) Out[gtid] = __fdividef(sum , (float)ksum);
}
}
int main(int argc, char* argv[]) {
if (argc != 5) {
printf("./%s <image dimension> <threshold> <max box size> <iterations>\n", argv[0]);
exit(1);
}
// only a square image is supported
const int Lx = atoi(argv[1]);
const int Ly = Lx;
const int size = Lx * Ly;
const int Threshold = atoi(argv[2]);
const int MaxRad = atoi(argv[3]);
const int repeat = atoi(argv[4]);
// input image
float *img = (float*) malloc (sizeof(float) * size);
// host and device results
float *norm = (float*) malloc (sizeof(float) * size);
float *h_norm = (float*) malloc (sizeof(float) * size);
int *box = (int*) malloc (sizeof(int) * size);
int *h_box = (int*) malloc (sizeof(int) * size);
float *out = (float*) malloc (sizeof(float) * size);
float *h_out = (float*) malloc (sizeof(float) * size);
srand(123);
for (int i = 0; i < size; i++) {
img[i] = rand() % 256;
norm[i] = box[i] = out[i] = 0;
}
float *d_img;
cudaMalloc((void**)&d_img, sizeof(float) * size);
float *d_norm;
cudaMalloc((void**)&d_norm, sizeof(float) * size);
int *d_box;
cudaMalloc((void**)&d_box, sizeof(int) * size);
float *d_out;
cudaMalloc((void**)&d_out, sizeof(float) * size);
dim3 grids ((Lx+15)/16, (Ly+15)/16);
dim3 blocks (16, 16);
// reset output
cudaMemcpy(d_out, out, sizeof(float) * size, cudaMemcpyHostToDevice);
double time = 0;
for (int i = 0; i < repeat; i++) {
// restore input image
cudaMemcpy(d_img, img, sizeof(float) * size, cudaMemcpyHostToDevice);
// reset norm
cudaMemcpy(d_norm, norm, sizeof(float) * size, cudaMemcpyHostToDevice);
cudaDeviceSynchronize();
auto start = std::chrono::steady_clock::now();
// launch three kernels
smoothingFilter<<<grids, blocks>>>(Lx, Ly, Threshold, MaxRad, d_img, d_box, d_norm);
normalizeFilter<<<grids, blocks>>>(Lx, Ly, d_img, d_norm);
outFilter<<<grids, blocks>>>(Lx, Ly, d_img, d_box, d_out);
cudaDeviceSynchronize();
auto end = std::chrono::steady_clock::now();
time += std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count();
}
printf("Average filtering time %lf (s)\n", (time * 1e-9) / repeat);
cudaMemcpy(out, d_out, sizeof(float) * size, cudaMemcpyDeviceToHost);
cudaMemcpy(box, d_box, sizeof(int) * size, cudaMemcpyDeviceToHost);
cudaMemcpy(norm, d_norm, sizeof(float) * size, cudaMemcpyDeviceToHost);
// verify
reference (Lx, Ly, Threshold, MaxRad, img, h_box, h_norm, h_out);
verify(size, MaxRad, norm, h_norm, out, h_out, box, h_box);
cudaFree(d_img);
cudaFree(d_norm);
cudaFree(d_box);
cudaFree(d_out);
free(img);
free(norm);
free(h_norm);
free(box);
free(h_box);
free(out);
free(h_out);
return 0;
}
|
0c7a69e4e70b5a72793e80ab34a4d32210b22123.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <hip/hip_runtime.h>
__global__
void saxpy(int n, float a, float *x, float *y)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < n) y[i] = a*x[i] + y[i];
}
int main(void)
{
int N = 1<<20;
float *x, *y, *d_x, *d_y;
x = (float*)malloc(N*sizeof(float));
y = (float*)malloc(N*sizeof(float));
hipMalloc(&d_x, N*sizeof(float));
hipMalloc(&d_y, N*sizeof(float));
for (int i = 0; i < N; i++) {
x[i] = 1.0f;
y[i] = 2.0f;
}
hipMemcpy(d_x, x, N*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_y, y, N*sizeof(float), hipMemcpyHostToDevice);
// Perform SAXPY on 1M elements
hipLaunchKernelGGL(( saxpy), dim3((N+255)/256), dim3(256), 0, 0, N, 2.0f, d_x, d_y);
hipMemcpy(y, d_y, N*sizeof(float), hipMemcpyDeviceToHost);
float maxError = 0.0f;
for (int i = 0; i < N; i++)
maxError = max(maxError, abs(y[i]-4.0f));
printf("Max error: %f\n", maxError);
hipFree(d_x);
hipFree(d_y);
free(x);
free(y);
} | 0c7a69e4e70b5a72793e80ab34a4d32210b22123.cu | #include <stdio.h>
#include <cuda.h>
__global__
void saxpy(int n, float a, float *x, float *y)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < n) y[i] = a*x[i] + y[i];
}
int main(void)
{
int N = 1<<20;
float *x, *y, *d_x, *d_y;
x = (float*)malloc(N*sizeof(float));
y = (float*)malloc(N*sizeof(float));
cudaMalloc(&d_x, N*sizeof(float));
cudaMalloc(&d_y, N*sizeof(float));
for (int i = 0; i < N; i++) {
x[i] = 1.0f;
y[i] = 2.0f;
}
cudaMemcpy(d_x, x, N*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_y, y, N*sizeof(float), cudaMemcpyHostToDevice);
// Perform SAXPY on 1M elements
saxpy<<<(N+255)/256, 256>>>(N, 2.0f, d_x, d_y);
cudaMemcpy(y, d_y, N*sizeof(float), cudaMemcpyDeviceToHost);
float maxError = 0.0f;
for (int i = 0; i < N; i++)
maxError = max(maxError, abs(y[i]-4.0f));
printf("Max error: %f\n", maxError);
cudaFree(d_x);
cudaFree(d_y);
free(x);
free(y);
} |
03c65aa3de5bc7732d6535cc7163ff5d50a88256.hip | // !!! This is a file automatically generated by hipify!!!
#include<iostream>
#include<cmath>
#include <WINDOWS.H>
#include<opencv2/imgproc/imgproc.hpp>
#include<opencv2/highgui/highgui.hpp>
#include<time.h>
// includes, cuda
#include <hip/hip_vector_types.h>
#include <driver_functions.h>
#include <hip/hip_runtime.h>
#include <cuda_gl_interop.h>
// CUDA utilities and system includes
#include <helper_cuda.h>
#include <helper_cuda_gl.h>
#include <helper_functions.h>
#include <hip/hip_vector_types.h>
#include <math.h>
using namespace std;
using namespace cv;
int image[200000000];
int f_image[200000000];
/*__global__ void gradient_x(int x, int y,int width)
{
int gx;
gx = image[(x - 1)*width + (y - 1)] * 1 +
image[(x - 1)*width + y] * 2 +
image[(x - 1)*width + (y + 1)] * 1 +
image[(x + 1)*width + (y - 1)] * (-1) +
image[(x + 1)*width + y] * (-2) +
image[(x + 1)*width + (y + 1)] * (-1);
}
__global__ void gradient_y(int x, int y,int width)
{
int gy;
gy = image[(x - 1)*width + (y - 1)] * 1 +
image[x*width + (y - 1)] * 2 +
image[(x + 1)*width + (y - 1)] * 1 +
image[(x - 1)*width + (y + 1)] * (-1) +
image[x*width + (y + 1)] * (-2) +
image[(x + 1)*width + (y + 1)] * (-1);
}*/
__global__ void gradient(int* image, int* f_image, int length, int width)
{
//int a = gradient_x(x, y,width);
//int b = gradient_y(x, y,width);
int templates[25] = {
1, 4, 7, 4, 1,
4, 16, 26, 16, 4,
7, 26, 41, 26, 7,
4, 16, 26, 16, 4,
1, 4, 7, 4, 1 };
int id = blockIdx.x*blockDim.x + threadIdx.x;
if (id >= length*width)
{
return;
}
int y = id / width;
int x = id % width;
if (y < 2 || y >= length - 2 || x < 2 || x >= width - 2)
{
return;
}
/*
int a = image[(y - 1)*width + (x - 1)] * 1 +
image[(y - 1)*width + x] * 2 +
image[(y - 1)*width + (x + 1)] * 1 +
image[(y + 1)*width + (x - 1)] * (-1) +
image[(y + 1)*width + x] * (-2) +
image[(y + 1)*width + (x + 1)] * (-1);
int b = image[(y - 1)*width + (x - 1)] * 1 +
image[y*width + (x - 1)] * 2 +
image[(y + 1)*width + (x - 1)] * 1 +
image[(y - 1)*width + (x + 1)] * (-1) +
image[y*width + (x + 1)] * (-2) +
image[(y + 1)*width + (x + 1)] * (-1);
int g = sqrt(float(a*a + b*b));
g = g > 100 ? 255 : 0;
f_image[y*width + x] = g;*/
int g = 0;
int index = 0;
for (int m = y - 2; m<y + 3; m++)
{
for (int n = x - 2; n<x + 3; n++)
{
g += image[m*width + n] * templates[index++];
}
}
g /= 273;
if (g > 255)
g = 255;
f_image[id] = g;
}
int main()
{
//double a = clock();
Mat src;
src = imread("D:\\testpic\\12800x10240.bmp", CV_LOAD_IMAGE_GRAYSCALE);
Mat dst = src.clone();
for (int y = 0; y < src.rows; y++)
for (int x = 0; x < src.cols; x++)
dst.at<uchar>(y, x) = 0.0;
/////////////////////////////////////////////
for (int i = 0; i < src.rows; i++)
{
for (int j = 0; j < src.cols; j++)
{
image[i*src.cols + j] = src.at<uchar>(i, j);
}
}
int image_size = src.rows*src.cols;
int* cuda_image;
int* cuda_f_image;
hipMalloc((void**)&cuda_image, sizeof(int) * src.cols * src.rows);
hipMalloc((void**)&cuda_f_image, sizeof(int) * src.cols * src.rows);
double a = clock();
hipMemcpy(cuda_image, image, sizeof(int) * src.rows * src.cols, hipMemcpyHostToDevice);
gradient << <(image_size / 32) + 1, 32 >> > (cuda_image, cuda_f_image, src.rows, src.cols);
hipMemcpy(f_image, cuda_f_image, sizeof(int) * src.rows * src.cols, hipMemcpyDeviceToHost);
double b = clock();
double diff = (b - a) / CLOCKS_PER_SEC;
//cout << diff << endl;
cout << diff<<endl;
for (int y = 0; y < src.rows ; y++) {
for (int x = 0; x < src.cols ; x++) {
dst.at<uchar>(y, x) = f_image[y*(src.cols) + x];
}
}
cout << src.rows << endl;
cout << src.cols << endl;
namedWindow("initial", WINDOW_NORMAL);
imshow("initial", src);
namedWindow("final", WINDOW_NORMAL);
imshow("final", dst);
//double b = clock();
//double diff = (b - a) / CLOCKS_PER_SEC;
//cout << diff << endl;
waitKey();
}
| 03c65aa3de5bc7732d6535cc7163ff5d50a88256.cu | #include<iostream>
#include<cmath>
#include <WINDOWS.H>
#include<opencv2/imgproc/imgproc.hpp>
#include<opencv2/highgui/highgui.hpp>
#include<time.h>
// includes, cuda
#include <vector_types.h>
#include <driver_functions.h>
#include <cuda_runtime.h>
#include <cuda_gl_interop.h>
// CUDA utilities and system includes
#include <helper_cuda.h>
#include <helper_cuda_gl.h>
#include <helper_functions.h>
#include <vector_types.h>
#include <math.h>
using namespace std;
using namespace cv;
int image[200000000];
int f_image[200000000];
/*__global__ void gradient_x(int x, int y,int width)
{
int gx;
gx = image[(x - 1)*width + (y - 1)] * 1 +
image[(x - 1)*width + y] * 2 +
image[(x - 1)*width + (y + 1)] * 1 +
image[(x + 1)*width + (y - 1)] * (-1) +
image[(x + 1)*width + y] * (-2) +
image[(x + 1)*width + (y + 1)] * (-1);
}
__global__ void gradient_y(int x, int y,int width)
{
int gy;
gy = image[(x - 1)*width + (y - 1)] * 1 +
image[x*width + (y - 1)] * 2 +
image[(x + 1)*width + (y - 1)] * 1 +
image[(x - 1)*width + (y + 1)] * (-1) +
image[x*width + (y + 1)] * (-2) +
image[(x + 1)*width + (y + 1)] * (-1);
}*/
__global__ void gradient(int* image, int* f_image, int length, int width)
{
//int a = gradient_x(x, y,width);
//int b = gradient_y(x, y,width);
int templates[25] = {
1, 4, 7, 4, 1,
4, 16, 26, 16, 4,
7, 26, 41, 26, 7,
4, 16, 26, 16, 4,
1, 4, 7, 4, 1 };
int id = blockIdx.x*blockDim.x + threadIdx.x;
if (id >= length*width)
{
return;
}
int y = id / width;
int x = id % width;
if (y < 2 || y >= length - 2 || x < 2 || x >= width - 2)
{
return;
}
/*
int a = image[(y - 1)*width + (x - 1)] * 1 +
image[(y - 1)*width + x] * 2 +
image[(y - 1)*width + (x + 1)] * 1 +
image[(y + 1)*width + (x - 1)] * (-1) +
image[(y + 1)*width + x] * (-2) +
image[(y + 1)*width + (x + 1)] * (-1);
int b = image[(y - 1)*width + (x - 1)] * 1 +
image[y*width + (x - 1)] * 2 +
image[(y + 1)*width + (x - 1)] * 1 +
image[(y - 1)*width + (x + 1)] * (-1) +
image[y*width + (x + 1)] * (-2) +
image[(y + 1)*width + (x + 1)] * (-1);
int g = sqrt(float(a*a + b*b));
g = g > 100 ? 255 : 0;
f_image[y*width + x] = g;*/
int g = 0;
int index = 0;
for (int m = y - 2; m<y + 3; m++)
{
for (int n = x - 2; n<x + 3; n++)
{
g += image[m*width + n] * templates[index++];
}
}
g /= 273;
if (g > 255)
g = 255;
f_image[id] = g;
}
int main()
{
//double a = clock();
Mat src;
src = imread("D:\\testpic\\12800x10240.bmp", CV_LOAD_IMAGE_GRAYSCALE);
Mat dst = src.clone();
for (int y = 0; y < src.rows; y++)
for (int x = 0; x < src.cols; x++)
dst.at<uchar>(y, x) = 0.0;
/////////////////////////////////////////////
for (int i = 0; i < src.rows; i++)
{
for (int j = 0; j < src.cols; j++)
{
image[i*src.cols + j] = src.at<uchar>(i, j);
}
}
int image_size = src.rows*src.cols;
int* cuda_image;
int* cuda_f_image;
cudaMalloc((void**)&cuda_image, sizeof(int) * src.cols * src.rows);
cudaMalloc((void**)&cuda_f_image, sizeof(int) * src.cols * src.rows);
double a = clock();
cudaMemcpy(cuda_image, image, sizeof(int) * src.rows * src.cols, cudaMemcpyHostToDevice);
gradient << <(image_size / 32) + 1, 32 >> > (cuda_image, cuda_f_image, src.rows, src.cols);
cudaMemcpy(f_image, cuda_f_image, sizeof(int) * src.rows * src.cols, cudaMemcpyDeviceToHost);
double b = clock();
double diff = (b - a) / CLOCKS_PER_SEC;
//cout << diff << endl;
cout << diff<<endl;
for (int y = 0; y < src.rows ; y++) {
for (int x = 0; x < src.cols ; x++) {
dst.at<uchar>(y, x) = f_image[y*(src.cols) + x];
}
}
cout << src.rows << endl;
cout << src.cols << endl;
namedWindow("initial", WINDOW_NORMAL);
imshow("initial", src);
namedWindow("final", WINDOW_NORMAL);
imshow("final", dst);
//double b = clock();
//double diff = (b - a) / CLOCKS_PER_SEC;
//cout << diff << endl;
waitKey();
}
|
f42a6a316d29957d5cab00530eba8f51d491ff4a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
#define PRECISION 0.00001
#define TAM_BLOCO 8
#define uN 5.0
#define uS 5.0
#define uW 0.0
#define uE 10.0
//Variveis CPU
double h_h1, h_h2;
double h_denominador1, h_denominador2;
double *h_m, *d_m;
double h_parcial1, h_parcial2;
int h_dimensaoX, h_dimensaoY, laps = 0, i;
//Variveis GPU
__constant__ double omega = 1.5;
__constant__ double d_h1, d_h2;
__constant__ double d_denominador1, d_denominador2;
__constant__ int d_dimensaoX, d_dimensaoY;
__constant__ double d_parcial1, d_parcial2;
__device__ __shared__ double subMatriz[TAM_BLOCO][TAM_BLOCO];
FILE *arquivo;
clock_t start, end;
double tempo;
//Funes da CPU
//Funcao que imprime a matriz no arquivo de saida
void printMat(){
int i, j;
for(i = 0; i < h_dimensaoX; i++){
for(j = 0; j < h_dimensaoY; j++){
fprintf(arquivo, "%lf", h_m[i * h_dimensaoY + j]);
if(j != h_dimensaoY - 1) fprintf(arquivo, " ");
}
if(i != h_dimensaoX - 1)
fprintf(arquivo, "\n");
}
}
//Funcao que inicializa a matriz com os valores de contorno especificados pelo problema
void setupM(){
int i,j;
for(i = 0; i < h_dimensaoX; i++){
for(j = 0; j < h_dimensaoY; j++){
if(i == 0){
h_m[i * h_dimensaoY + j] = uN;
}else if(i == (h_dimensaoX - 1)){
h_m[i * h_dimensaoY + j] = uS;
}else if(j == 0){
h_m[i * h_dimensaoY + j] = uW;
}else if(j == h_dimensaoY - 1){
h_m[i * h_dimensaoY + j] = uE;
}
}
}
}
//Funes da GPU
//Funcoes "a" e "b" especificada pelo problema
__device__ double a(int i, int j){
double x = i * d_h1;
double y = j * d_h2;
return 500 * x * (1 - x) * (0.5 - y);
}
__device__ double b(int i, int j){
double x = i * d_h1;
double y = j * d_h2;
return 500 * y * (1 - y) * (x - 0.5);
}
//Funcoes "n", "s", "w", "e" especificadas pelo problema
__device__ double n(int i, int j){
return (d_parcial2 - (d_h2 * b(i,j))/d_denominador2);
}
__device__ double s(int i, int j){
return (d_parcial2 + (d_h2 * b(i,j))/d_denominador2);
}
__device__ double e(int i, int j){
return (d_parcial1 - (d_h1 * a(i,j))/d_denominador1);
}
__device__ double w(int i, int j){
return (d_parcial1 + (d_h1 * a(i,j))/d_denominador1);
}
__device__ double pontosExternos(int i, int j, int x, int y, double *m){
double temp = 0;
temp += w(x,y) * m[(x - 1) * d_dimensaoY + y];
temp += e(x,y) * m[(x + 1) * d_dimensaoY + y];
temp += s(x,y) * m[x * d_dimensaoY + (y - 1)];
temp += n(x,y) * m[x * d_dimensaoY + (y + 1)];
return temp;
}
__device__ double pontosInternos(int i, int j, int x, int y, double *m){
double temp = 0;
temp += w(x,y) * subMatriz[i - 1][j];
temp += e(x,y) * subMatriz[i + 1][j];
temp += s(x,y) * subMatriz[i][j - 1];
temp += n(x,y) * subMatriz[i][j + 1];
return temp;
}
//Kernels principais do programa. Cada um trabalho em um conjunto de pontos da matriz
//fazendo uma media ponderada entre o valor atual do ponto que est sendo analisado e
//seus quatro pontos adjacentes. O quanto cada valor vai pesar determinado pelo mega
//da funcao que, nesse caso, fixo
__global__ void vermelhos(double *m){
int tidx = blockIdx.x * blockDim.x + threadIdx.x + 1;
int tidy = blockIdx.y * blockDim.y + threadIdx.y + 1;
if(tidx > (d_dimensaoX - 2) || tidy > (d_dimensaoY - 2)){
return;
}
//printf("%d %d\n", tidx, tidy);
if((tidx + tidy) % 2 == 1){
subMatriz[threadIdx.x][threadIdx.y] = m[tidx * d_dimensaoY + tidy];
}
__syncthreads();
if((tidx + tidy) % 2 == 0){
if(threadIdx.x == 0 || threadIdx.x == (TAM_BLOCO - 1) || threadIdx.y == 0 || threadIdx.y == (TAM_BLOCO - 1)){
m[tidx * d_dimensaoY + tidy] *= (1 - omega);
m[tidx * d_dimensaoY + tidy] += omega * pontosExternos(threadIdx.x, threadIdx.y, tidx, tidy, m);
}else{
m[tidx * d_dimensaoY + tidy] *= (1 - omega);
m[tidx * d_dimensaoY + tidy] += omega * pontosInternos(threadIdx.x, threadIdx.y, tidx, tidy, m);
}
}
}
__global__ void azuis(double *m){
int tidx = blockIdx.x * blockDim.x + threadIdx.x + 1;
int tidy = blockIdx.y * blockDim.y + threadIdx.y + 1;
if(tidx > (d_dimensaoX - 2) || tidy > (d_dimensaoY - 2)){
return;
}
if((tidx + tidy) % 2 == 0){
subMatriz[threadIdx.x][threadIdx.y] = m[tidx * d_dimensaoY + tidy];
}
__syncthreads();
if((tidx + tidy) % 2 == 1){
if(threadIdx.x == 0 || threadIdx.x == (TAM_BLOCO - 1) || threadIdx.y == 0 || threadIdx.y == (TAM_BLOCO - 1)){
m[tidx * d_dimensaoY + tidy] *= (1 - omega);
m[tidx * d_dimensaoY + tidy] += omega * pontosExternos(threadIdx.x, threadIdx.y, tidx, tidy, m);
}else{
m[tidx * d_dimensaoY + tidy] *= (1 - omega);
m[tidx * d_dimensaoY + tidy] += omega * pontosInternos(threadIdx.x, threadIdx.y, tidx, tidy, m);
}
}
}
int main(int argc, char** argv){
//Especificacoes iniciais para garantir que o programa ser rodado com as
//condicoes iniciais corretas
if(argc != 4){
printf("Nmero incorreto de parmetros:\n");
printf("Insira as dimensoes e a quantidade de iteraes\n");
printf("\tUtilize o formato: %s <Dimensao X> <Dimensao Y> <Iteraes>\n", argv[0]);
exit(-1);
}
//Inicializando todos os valores necessrios para transferir para a GPU e para realizar
//os calculos do programa
h_dimensaoX = atoi(argv[1]);
h_dimensaoY = atoi(argv[2]);
laps = atoi(argv[3]);
h_h1 = 1.0/(h_dimensaoX + 1);
h_h2 = 1.0/(h_dimensaoY + 1);
h_dimensaoX += 2;
h_dimensaoY += 2;
h_denominador1 = 4*(1 + (pow(h_h1,2)/pow(h_h2,2)));
h_denominador2 = 4*(1 + (pow(h_h2,2)/pow(h_h1,2)));
h_parcial1 = 2/h_denominador1;
h_parcial2 = 2/h_denominador2;
//Alocando a matriz na CPU e inicializando
h_m = (double *) calloc(h_dimensaoX * h_dimensaoY, sizeof(double));
setupM();
//Alocando a matriz na GPU
hipMalloc(&d_m, h_dimensaoX * h_dimensaoY * sizeof(double));
//Transferindo as informaes necessrias para a GPU
hipMemcpy(d_m, h_m, h_dimensaoX * h_dimensaoY * sizeof(double), hipMemcpyHostToDevice);
hipMemcpyToSymbol(d_denominador1, &h_denominador1, sizeof(double), 0, hipMemcpyHostToDevice);
hipMemcpyToSymbol(d_denominador2, &h_denominador2, sizeof(double), 0, hipMemcpyHostToDevice);
hipMemcpyToSymbol(d_dimensaoX, &h_dimensaoX, sizeof(int), 0, hipMemcpyHostToDevice);
hipMemcpyToSymbol(d_dimensaoY, &h_dimensaoY, sizeof(int), 0, hipMemcpyHostToDevice);
hipMemcpyToSymbol(d_h1, &h_h1, sizeof(double), 0, hipMemcpyHostToDevice);
hipMemcpyToSymbol(d_h2, &h_h2, sizeof(double), 0, hipMemcpyHostToDevice);
hipMemcpyToSymbol(d_parcial1, &h_parcial1, sizeof(double), 0, hipMemcpyHostToDevice);
hipMemcpyToSymbol(d_parcial2, &h_parcial2, sizeof(double), 0, hipMemcpyHostToDevice);
//Iniciando a contagem do tempo
start = clock();
//Calculando a quantidade de blocos e threads que serao lancados
dim3 nthreads(TAM_BLOCO,TAM_BLOCO);
dim3 nblocos(((h_dimensaoX - 2) + nthreads.x - 1)/nthreads.x, ((h_dimensaoY - 2) + nthreads.y - 1)/nthreads.y);
//Fazendo os clculos
for(i = 0; i < laps; i++){
hipLaunchKernelGGL(( vermelhos), dim3(nblocos), dim3(nthreads), 0, 0, d_m);
hipLaunchKernelGGL(( azuis), dim3(nblocos), dim3(nthreads), 0, 0, d_m);
}
//Trazendo a matriz de volta para a CPU
hipMemcpy(h_m, d_m, h_dimensaoX * h_dimensaoY * sizeof(double), hipMemcpyDeviceToHost);
//Reseta a GPU para liberar todos os recursos
hipDeviceReset();
//Imprimindo a matriz no arquivo e fechando-o
arquivo = fopen("sample.txt", "w");
printMat();
fclose(arquivo);
//Termina de calcular o tempo que demorou o programa
end = clock();
tempo = ((double) (end - start))/CLOCKS_PER_SEC;
printf("%lf;", tempo);
return 0;
} | f42a6a316d29957d5cab00530eba8f51d491ff4a.cu | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
#define PRECISION 0.00001
#define TAM_BLOCO 8
#define uN 5.0
#define uS 5.0
#define uW 0.0
#define uE 10.0
//Variáveis CPU
double h_h1, h_h2;
double h_denominador1, h_denominador2;
double *h_m, *d_m;
double h_parcial1, h_parcial2;
int h_dimensaoX, h_dimensaoY, laps = 0, i;
//Variáveis GPU
__constant__ double omega = 1.5;
__constant__ double d_h1, d_h2;
__constant__ double d_denominador1, d_denominador2;
__constant__ int d_dimensaoX, d_dimensaoY;
__constant__ double d_parcial1, d_parcial2;
__device__ __shared__ double subMatriz[TAM_BLOCO][TAM_BLOCO];
FILE *arquivo;
clock_t start, end;
double tempo;
//Funções da CPU
//Funcao que imprime a matriz no arquivo de saida
void printMat(){
int i, j;
for(i = 0; i < h_dimensaoX; i++){
for(j = 0; j < h_dimensaoY; j++){
fprintf(arquivo, "%lf", h_m[i * h_dimensaoY + j]);
if(j != h_dimensaoY - 1) fprintf(arquivo, " ");
}
if(i != h_dimensaoX - 1)
fprintf(arquivo, "\n");
}
}
//Funcao que inicializa a matriz com os valores de contorno especificados pelo problema
void setupM(){
int i,j;
for(i = 0; i < h_dimensaoX; i++){
for(j = 0; j < h_dimensaoY; j++){
if(i == 0){
h_m[i * h_dimensaoY + j] = uN;
}else if(i == (h_dimensaoX - 1)){
h_m[i * h_dimensaoY + j] = uS;
}else if(j == 0){
h_m[i * h_dimensaoY + j] = uW;
}else if(j == h_dimensaoY - 1){
h_m[i * h_dimensaoY + j] = uE;
}
}
}
}
//Funções da GPU
//Funcoes "a" e "b" especificada pelo problema
__device__ double a(int i, int j){
double x = i * d_h1;
double y = j * d_h2;
return 500 * x * (1 - x) * (0.5 - y);
}
__device__ double b(int i, int j){
double x = i * d_h1;
double y = j * d_h2;
return 500 * y * (1 - y) * (x - 0.5);
}
//Funcoes "n", "s", "w", "e" especificadas pelo problema
__device__ double n(int i, int j){
return (d_parcial2 - (d_h2 * b(i,j))/d_denominador2);
}
__device__ double s(int i, int j){
return (d_parcial2 + (d_h2 * b(i,j))/d_denominador2);
}
__device__ double e(int i, int j){
return (d_parcial1 - (d_h1 * a(i,j))/d_denominador1);
}
__device__ double w(int i, int j){
return (d_parcial1 + (d_h1 * a(i,j))/d_denominador1);
}
__device__ double pontosExternos(int i, int j, int x, int y, double *m){
double temp = 0;
temp += w(x,y) * m[(x - 1) * d_dimensaoY + y];
temp += e(x,y) * m[(x + 1) * d_dimensaoY + y];
temp += s(x,y) * m[x * d_dimensaoY + (y - 1)];
temp += n(x,y) * m[x * d_dimensaoY + (y + 1)];
return temp;
}
__device__ double pontosInternos(int i, int j, int x, int y, double *m){
double temp = 0;
temp += w(x,y) * subMatriz[i - 1][j];
temp += e(x,y) * subMatriz[i + 1][j];
temp += s(x,y) * subMatriz[i][j - 1];
temp += n(x,y) * subMatriz[i][j + 1];
return temp;
}
//Kernels principais do programa. Cada um trabalho em um conjunto de pontos da matriz
//fazendo uma media ponderada entre o valor atual do ponto que está sendo analisado e
//seus quatro pontos adjacentes. O quanto cada valor vai pesar é determinado pelo ômega
//da funcao que, nesse caso, é fixo
__global__ void vermelhos(double *m){
int tidx = blockIdx.x * blockDim.x + threadIdx.x + 1;
int tidy = blockIdx.y * blockDim.y + threadIdx.y + 1;
if(tidx > (d_dimensaoX - 2) || tidy > (d_dimensaoY - 2)){
return;
}
//printf("%d %d\n", tidx, tidy);
if((tidx + tidy) % 2 == 1){
subMatriz[threadIdx.x][threadIdx.y] = m[tidx * d_dimensaoY + tidy];
}
__syncthreads();
if((tidx + tidy) % 2 == 0){
if(threadIdx.x == 0 || threadIdx.x == (TAM_BLOCO - 1) || threadIdx.y == 0 || threadIdx.y == (TAM_BLOCO - 1)){
m[tidx * d_dimensaoY + tidy] *= (1 - omega);
m[tidx * d_dimensaoY + tidy] += omega * pontosExternos(threadIdx.x, threadIdx.y, tidx, tidy, m);
}else{
m[tidx * d_dimensaoY + tidy] *= (1 - omega);
m[tidx * d_dimensaoY + tidy] += omega * pontosInternos(threadIdx.x, threadIdx.y, tidx, tidy, m);
}
}
}
__global__ void azuis(double *m){
int tidx = blockIdx.x * blockDim.x + threadIdx.x + 1;
int tidy = blockIdx.y * blockDim.y + threadIdx.y + 1;
if(tidx > (d_dimensaoX - 2) || tidy > (d_dimensaoY - 2)){
return;
}
if((tidx + tidy) % 2 == 0){
subMatriz[threadIdx.x][threadIdx.y] = m[tidx * d_dimensaoY + tidy];
}
__syncthreads();
if((tidx + tidy) % 2 == 1){
if(threadIdx.x == 0 || threadIdx.x == (TAM_BLOCO - 1) || threadIdx.y == 0 || threadIdx.y == (TAM_BLOCO - 1)){
m[tidx * d_dimensaoY + tidy] *= (1 - omega);
m[tidx * d_dimensaoY + tidy] += omega * pontosExternos(threadIdx.x, threadIdx.y, tidx, tidy, m);
}else{
m[tidx * d_dimensaoY + tidy] *= (1 - omega);
m[tidx * d_dimensaoY + tidy] += omega * pontosInternos(threadIdx.x, threadIdx.y, tidx, tidy, m);
}
}
}
int main(int argc, char** argv){
//Especificacoes iniciais para garantir que o programa será rodado com as
//condicoes iniciais corretas
if(argc != 4){
printf("Número incorreto de parâmetros:\n");
printf("Insira as dimensoes e a quantidade de iterações\n");
printf("\tUtilize o formato: %s <Dimensao X> <Dimensao Y> <Iterações>\n", argv[0]);
exit(-1);
}
//Inicializando todos os valores necessários para transferir para a GPU e para realizar
//os calculos do programa
h_dimensaoX = atoi(argv[1]);
h_dimensaoY = atoi(argv[2]);
laps = atoi(argv[3]);
h_h1 = 1.0/(h_dimensaoX + 1);
h_h2 = 1.0/(h_dimensaoY + 1);
h_dimensaoX += 2;
h_dimensaoY += 2;
h_denominador1 = 4*(1 + (pow(h_h1,2)/pow(h_h2,2)));
h_denominador2 = 4*(1 + (pow(h_h2,2)/pow(h_h1,2)));
h_parcial1 = 2/h_denominador1;
h_parcial2 = 2/h_denominador2;
//Alocando a matriz na CPU e inicializando
h_m = (double *) calloc(h_dimensaoX * h_dimensaoY, sizeof(double));
setupM();
//Alocando a matriz na GPU
cudaMalloc(&d_m, h_dimensaoX * h_dimensaoY * sizeof(double));
//Transferindo as informações necessárias para a GPU
cudaMemcpy(d_m, h_m, h_dimensaoX * h_dimensaoY * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(d_denominador1, &h_denominador1, sizeof(double), 0, cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(d_denominador2, &h_denominador2, sizeof(double), 0, cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(d_dimensaoX, &h_dimensaoX, sizeof(int), 0, cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(d_dimensaoY, &h_dimensaoY, sizeof(int), 0, cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(d_h1, &h_h1, sizeof(double), 0, cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(d_h2, &h_h2, sizeof(double), 0, cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(d_parcial1, &h_parcial1, sizeof(double), 0, cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(d_parcial2, &h_parcial2, sizeof(double), 0, cudaMemcpyHostToDevice);
//Iniciando a contagem do tempo
start = clock();
//Calculando a quantidade de blocos e threads que serao lancados
dim3 nthreads(TAM_BLOCO,TAM_BLOCO);
dim3 nblocos(((h_dimensaoX - 2) + nthreads.x - 1)/nthreads.x, ((h_dimensaoY - 2) + nthreads.y - 1)/nthreads.y);
//Fazendo os cálculos
for(i = 0; i < laps; i++){
vermelhos<<<nblocos, nthreads>>>(d_m);
azuis<<<nblocos, nthreads>>>(d_m);
}
//Trazendo a matriz de volta para a CPU
cudaMemcpy(h_m, d_m, h_dimensaoX * h_dimensaoY * sizeof(double), cudaMemcpyDeviceToHost);
//Reseta a GPU para liberar todos os recursos
cudaDeviceReset();
//Imprimindo a matriz no arquivo e fechando-o
arquivo = fopen("sample.txt", "w");
printMat();
fclose(arquivo);
//Termina de calcular o tempo que demorou o programa
end = clock();
tempo = ((double) (end - start))/CLOCKS_PER_SEC;
printf("%lf;", tempo);
return 0;
} |
d88a1996f7eb0cb4c820647b34e9aa1ef9399d7c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdlib.h>
#include <stdio.h>
__global__ void mallocTest()
{
size_t size = 123;
char* ptr = (char*)malloc(size);
memset(ptr, 0, size);
ptr[0] = 9;
printf("Thread %d got pointer: %p: %d\n", threadIdx.x, ptr, ptr[0]);
free(ptr);
}
// int main()
// {
// // Set a heap size of 128 megabytes. Note that this must
// // be done before any kernel is launched.
// hipDeviceSetLimit(hipLimitMallocHeapSize, 128*1024*1024);
// mallocTest<<<1, 5>>>();
// hipDeviceSynchronize();
// return 0;
// }
#include<iostream>
int main(){
int r= 2;
int arr[r];
arr[0]= 3;
std::cout<< arr[0];
} | d88a1996f7eb0cb4c820647b34e9aa1ef9399d7c.cu | #include <stdlib.h>
#include <stdio.h>
__global__ void mallocTest()
{
size_t size = 123;
char* ptr = (char*)malloc(size);
memset(ptr, 0, size);
ptr[0] = 9;
printf("Thread %d got pointer: %p: %d\n", threadIdx.x, ptr, ptr[0]);
free(ptr);
}
// int main()
// {
// // Set a heap size of 128 megabytes. Note that this must
// // be done before any kernel is launched.
// cudaDeviceSetLimit(cudaLimitMallocHeapSize, 128*1024*1024);
// mallocTest<<<1, 5>>>();
// cudaDeviceSynchronize();
// return 0;
// }
#include<iostream>
int main(){
int r= 2;
int arr[r];
arr[0]= 3;
std::cout<< arr[0];
} |
a9b45a2c1aef79916b57b13a14812a30dddca911.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2006 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws.
*
* This software and the information contained herein is PROPRIETARY and
* CONFIDENTIAL to NVIDIA and is being provided under the terms and
* conditions of a Non-Disclosure Agreement. Any reproduction or
* disclosure to any third party without the express written consent of
* NVIDIA is prohibited.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
* "commercial computer software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*/
/* Matrix multiplication: C = A * B.
* Host code.
*/
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
// includes, project
#include <cutil.h>
// includes, kernels
#include <matrixmul_kernel.cu>
////////////////////////////////////////////////////////////////////////////////
// declarations, forward
extern "C"
void computeGold(float*, const float*, const float*, unsigned int, unsigned int, unsigned int);
Matrix AllocateDeviceMatrix(const Matrix M);
Matrix AllocateMatrix(int height, int width, int init);
void CopyToDeviceMatrix(Matrix Mdevice, const Matrix Mhost);
void CopyFromDeviceMatrix(Matrix Mhost, const Matrix Mdevice);
int ReadFile(Matrix* M, char* file_name);
void WriteFile(Matrix M, char* file_name);
void MatrixMulOnDevice(const Matrix M, const Matrix N, Matrix P);
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int main(int argc, char** argv) {
// Matrices for the program
Matrix M;
Matrix N;
Matrix P;
// Number of elements in the solution matrix
// Assuming square matrices, so the sizes of M, N and P are equal
unsigned int size_elements = WP * HP;
int errorM = 0, errorN = 0;
srand(2012);
// Check command line for input matrix files
if(argc != 3 && argc != 4)
{
// No inputs provided
// Allocate and initialize the matrices
M = AllocateMatrix(MATRIX_SIZE, MATRIX_SIZE, 1);
N = AllocateMatrix(MATRIX_SIZE, MATRIX_SIZE, 1);
P = AllocateMatrix(MATRIX_SIZE, MATRIX_SIZE, 0);
}
else
{
// Inputs provided
// Allocate and read source matrices from disk
M = AllocateMatrix(MATRIX_SIZE, MATRIX_SIZE, 0);
N = AllocateMatrix(MATRIX_SIZE, MATRIX_SIZE, 0);
P = AllocateMatrix(MATRIX_SIZE, MATRIX_SIZE, 0);
errorM = ReadFile(&M, argv[1]);
errorN = ReadFile(&N, argv[2]);
// check for read errors
if(errorM != size_elements || errorN != size_elements)
{
printf("Error reading input files %d, %d\n", errorM, errorN);
return 1;
}
}
// M * N on the device
MatrixMulOnDevice(M, N, P);
// compute the matrix multiplication on the CPU for comparison
Matrix reference = AllocateMatrix(MATRIX_SIZE, MATRIX_SIZE, 0);
computeGold(reference.elements, M.elements, N.elements, HM, WM, WN);
// check if the device result is equivalent to the expected solution
CUTBoolean res = cutComparefe(reference.elements, P.elements,
size_elements, 0.0001f);
printf("Test %s\n", (1 == res) ? "PASSED" : "FAILED");
// output result if output file is requested
if(argc == 4)
{
WriteFile(P, argv[3]);
}
else if(argc == 2)
{
WriteFile(P, argv[1]);
}
// Free host matrices
free(M.elements);
M.elements = NULL;
free(N.elements);
N.elements = NULL;
free(P.elements);
P.elements = NULL;
return 0;
}
////////////////////////////////////////////////////////////////////////////////
//! Run a simple test for CUDA
////////////////////////////////////////////////////////////////////////////////
void MatrixMulOnDevice(const Matrix M, const Matrix N, Matrix P)
{
//Interface host call to the device kernel code and invoke the kernel
Matrix d_M, d_N, d_P;
int NumBlocks = M.width/MATRIX_SIZE ;
if((M.width % MATRIX_SIZE) > 0)
{
NumBlocks++;
}
dim3 dimGrid(NumBlocks, NumBlocks);
dim3 dimBlock(MATRIX_SIZE,MATRIX_SIZE);
d_M = AllocateDeviceMatrix(M);
d_N = AllocateDeviceMatrix(N);
d_P = AllocateDeviceMatrix(P);
CopyToDeviceMatrix(d_M, M);
CopyToDeviceMatrix(d_N, N);
hipLaunchKernelGGL(( MatrixMulKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, d_M, d_N, d_P);
CopyFromDeviceMatrix(P, d_P);
}
// Allocate a device matrix of same size as M.
Matrix AllocateDeviceMatrix(const Matrix M)
{
Matrix Mdevice = M;
int size = M.width * M.height * sizeof(float);
hipMalloc((void**)&Mdevice.elements, size);
return Mdevice;
}
// Allocate a matrix of dimensions height*width
// If init == 0, initialize to all zeroes.
// If init == 1, perform random initialization.
Matrix AllocateMatrix(int height, int width, int init)
{
Matrix M;
M.width = M.pitch = width;
M.height = height;
int size = M.width * M.height;
M.elements = NULL;
M.elements = (float*) malloc(size*sizeof(float));
for(unsigned int i = 0; i < M.height * M.width; i++)
{
M.elements[i] = (init == 0) ? (0.0f) : (rand() / (float)RAND_MAX);
}
return M;
}
// Copy a host matrix to a device matrix.
void CopyToDeviceMatrix(Matrix Mdevice, const Matrix Mhost)
{
int size = Mhost.width * Mhost.height * sizeof(float);
Mdevice.height = Mhost.height;
Mdevice.width = Mhost.width;
Mdevice.pitch = Mhost.pitch;
hipMemcpy(Mdevice.elements, Mhost.elements, size,
hipMemcpyHostToDevice);
}
// Copy a device matrix to a host matrix.
void CopyFromDeviceMatrix(Matrix Mhost, const Matrix Mdevice)
{
int size = Mdevice.width * Mdevice.height * sizeof(float);
hipMemcpy(Mhost.elements, Mdevice.elements, size,
hipMemcpyDeviceToHost);
}
// Read a 16x16 floating point matrix in from file
int ReadFile(Matrix* M, char* file_name)
{
unsigned int data_read = MATRIX_SIZE*MATRIX_SIZE;
cutReadFilef(file_name, &(M->elements), &data_read, true);
return data_read;
}
// Write a 16x16 floating point matrix to file
void WriteFile(Matrix M, char* file_name)
{
cutWriteFilef(file_name, M.elements, M.width*M.height,
0.0001f);
}
| a9b45a2c1aef79916b57b13a14812a30dddca911.cu | /*
* Copyright 1993-2006 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws.
*
* This software and the information contained herein is PROPRIETARY and
* CONFIDENTIAL to NVIDIA and is being provided under the terms and
* conditions of a Non-Disclosure Agreement. Any reproduction or
* disclosure to any third party without the express written consent of
* NVIDIA is prohibited.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
* "commercial computer software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*/
/* Matrix multiplication: C = A * B.
* Host code.
*/
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
// includes, project
#include <cutil.h>
// includes, kernels
#include <matrixmul_kernel.cu>
////////////////////////////////////////////////////////////////////////////////
// declarations, forward
extern "C"
void computeGold(float*, const float*, const float*, unsigned int, unsigned int, unsigned int);
Matrix AllocateDeviceMatrix(const Matrix M);
Matrix AllocateMatrix(int height, int width, int init);
void CopyToDeviceMatrix(Matrix Mdevice, const Matrix Mhost);
void CopyFromDeviceMatrix(Matrix Mhost, const Matrix Mdevice);
int ReadFile(Matrix* M, char* file_name);
void WriteFile(Matrix M, char* file_name);
void MatrixMulOnDevice(const Matrix M, const Matrix N, Matrix P);
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int main(int argc, char** argv) {
// Matrices for the program
Matrix M;
Matrix N;
Matrix P;
// Number of elements in the solution matrix
// Assuming square matrices, so the sizes of M, N and P are equal
unsigned int size_elements = WP * HP;
int errorM = 0, errorN = 0;
srand(2012);
// Check command line for input matrix files
if(argc != 3 && argc != 4)
{
// No inputs provided
// Allocate and initialize the matrices
M = AllocateMatrix(MATRIX_SIZE, MATRIX_SIZE, 1);
N = AllocateMatrix(MATRIX_SIZE, MATRIX_SIZE, 1);
P = AllocateMatrix(MATRIX_SIZE, MATRIX_SIZE, 0);
}
else
{
// Inputs provided
// Allocate and read source matrices from disk
M = AllocateMatrix(MATRIX_SIZE, MATRIX_SIZE, 0);
N = AllocateMatrix(MATRIX_SIZE, MATRIX_SIZE, 0);
P = AllocateMatrix(MATRIX_SIZE, MATRIX_SIZE, 0);
errorM = ReadFile(&M, argv[1]);
errorN = ReadFile(&N, argv[2]);
// check for read errors
if(errorM != size_elements || errorN != size_elements)
{
printf("Error reading input files %d, %d\n", errorM, errorN);
return 1;
}
}
// M * N on the device
MatrixMulOnDevice(M, N, P);
// compute the matrix multiplication on the CPU for comparison
Matrix reference = AllocateMatrix(MATRIX_SIZE, MATRIX_SIZE, 0);
computeGold(reference.elements, M.elements, N.elements, HM, WM, WN);
// check if the device result is equivalent to the expected solution
CUTBoolean res = cutComparefe(reference.elements, P.elements,
size_elements, 0.0001f);
printf("Test %s\n", (1 == res) ? "PASSED" : "FAILED");
// output result if output file is requested
if(argc == 4)
{
WriteFile(P, argv[3]);
}
else if(argc == 2)
{
WriteFile(P, argv[1]);
}
// Free host matrices
free(M.elements);
M.elements = NULL;
free(N.elements);
N.elements = NULL;
free(P.elements);
P.elements = NULL;
return 0;
}
////////////////////////////////////////////////////////////////////////////////
//! Run a simple test for CUDA
////////////////////////////////////////////////////////////////////////////////
void MatrixMulOnDevice(const Matrix M, const Matrix N, Matrix P)
{
//Interface host call to the device kernel code and invoke the kernel
Matrix d_M, d_N, d_P;
int NumBlocks = M.width/MATRIX_SIZE ;
if((M.width % MATRIX_SIZE) > 0)
{
NumBlocks++;
}
dim3 dimGrid(NumBlocks, NumBlocks);
dim3 dimBlock(MATRIX_SIZE,MATRIX_SIZE);
d_M = AllocateDeviceMatrix(M);
d_N = AllocateDeviceMatrix(N);
d_P = AllocateDeviceMatrix(P);
CopyToDeviceMatrix(d_M, M);
CopyToDeviceMatrix(d_N, N);
MatrixMulKernel<<<dimGrid, dimBlock>>>(d_M, d_N, d_P);
CopyFromDeviceMatrix(P, d_P);
}
// Allocate a device matrix of same size as M.
Matrix AllocateDeviceMatrix(const Matrix M)
{
Matrix Mdevice = M;
int size = M.width * M.height * sizeof(float);
cudaMalloc((void**)&Mdevice.elements, size);
return Mdevice;
}
// Allocate a matrix of dimensions height*width
// If init == 0, initialize to all zeroes.
// If init == 1, perform random initialization.
Matrix AllocateMatrix(int height, int width, int init)
{
Matrix M;
M.width = M.pitch = width;
M.height = height;
int size = M.width * M.height;
M.elements = NULL;
M.elements = (float*) malloc(size*sizeof(float));
for(unsigned int i = 0; i < M.height * M.width; i++)
{
M.elements[i] = (init == 0) ? (0.0f) : (rand() / (float)RAND_MAX);
}
return M;
}
// Copy a host matrix to a device matrix.
void CopyToDeviceMatrix(Matrix Mdevice, const Matrix Mhost)
{
int size = Mhost.width * Mhost.height * sizeof(float);
Mdevice.height = Mhost.height;
Mdevice.width = Mhost.width;
Mdevice.pitch = Mhost.pitch;
cudaMemcpy(Mdevice.elements, Mhost.elements, size,
cudaMemcpyHostToDevice);
}
// Copy a device matrix to a host matrix.
void CopyFromDeviceMatrix(Matrix Mhost, const Matrix Mdevice)
{
int size = Mdevice.width * Mdevice.height * sizeof(float);
cudaMemcpy(Mhost.elements, Mdevice.elements, size,
cudaMemcpyDeviceToHost);
}
// Read a 16x16 floating point matrix in from file
int ReadFile(Matrix* M, char* file_name)
{
unsigned int data_read = MATRIX_SIZE*MATRIX_SIZE;
cutReadFilef(file_name, &(M->elements), &data_read, true);
return data_read;
}
// Write a 16x16 floating point matrix to file
void WriteFile(Matrix M, char* file_name)
{
cutWriteFilef(file_name, M.elements, M.width*M.height,
0.0001f);
}
|
886397df018eae810174f6d056679ae612f751f4.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hiprand/hiprand.h>
#include <hiprand/hiprand_kernel.h>
#include <thrust/random.h>
#include "device_launch_parameters.h"
#include <vector>
#include <random>
#include <string>
#include <time.h>
#include <Windows.h>
#define STB_IMAGE_IMPLEMENTATION
#include <stb\stb_image.h>
#define STB_IMAGE_WRITE_IMPLEMENTATION
#define STBI_MSC_SECURE_CRT
#include <stb\stb_image_write.h>
using namespace std;
#define HOST_DEVICE __host__ __device__
// Utility
float Clamp(float V, float Min, float Max)
{
if (V < Min) V = Min;
if (V > Max) V = Max;
return V;
}
bool SavePng(const char* Filename, int Width, int Height, int Channels, float* Input){
unsigned char* Transform = new unsigned char[Width*Height*Channels];
for (int i = 0; i < Height; ++i){
for (int j = 0; j < Width; ++j){
unsigned Pixel = i*Width + j;
for (int c = 0; c < Channels; ++c)
{
unsigned C = unsigned(Clamp(Input[Channels * Pixel + c], 0.f, 1.f)*255.f);
Transform[Channels * Pixel + c] = C;
}
}
}
stbi_write_png(Filename, Width, Height, Channels, Transform, 0);
delete[] Transform;
return true;
}
class Timer {
private:
LARGE_INTEGER start;
LARGE_INTEGER end;
public:
Timer()
:start()
, end(){}
void Start()
{
QueryPerformanceCounter(&start);
}
void End()
{
QueryPerformanceCounter(&end);
}
float GetElapsed() const
{
LARGE_INTEGER freq;
//frequency per second
QueryPerformanceFrequency(&freq);
return ((float)(end.QuadPart - start.QuadPart)) / freq.QuadPart;
}
};
//
// ---------------------White Noise----------------------
static __forceinline HOST_DEVICE unsigned int WangHash(unsigned int seed)
{
seed = (seed ^ 61) ^ (seed >> 16);
seed = seed + (seed << 3);
seed = seed ^ (seed >> 4);
seed = seed * 0x27d4eb2d;
seed = seed ^ (seed >> 15);
return seed;
}
__global__ void WhiteNoiseGenerator(int Channels, int Iter, float* Image)
{
int X = blockIdx.x*blockDim.x + threadIdx.x;
int Y = blockIdx.y*blockDim.y + threadIdx.y;
int Pixel = Y*gridDim.x*blockDim.x + X;
thrust::default_random_engine Rng(WangHash(Pixel) + WangHash(Iter));
thrust::uniform_real_distribution<float> Uniform(0.f, 1.f);
for (int Index = 0; Index < Channels; ++Index)
{
Image[Channels*Pixel + Index] = Uniform(Rng);
}
}
void WhiteNoiseGenerator(int Width, int Height, int Channels, int Iter, float* Output)
{
float* Image;
hipMalloc(&Image, Width*Height*Channels*sizeof(float));
dim3 BlockSize(32, 32);
dim3 GridSize(Width / BlockSize.x, Height / BlockSize.y);
WhiteNoiseGenerator << <GridSize, BlockSize >> >(Channels, Iter, Image);
hipMemcpy(Output, Image, Width*Height*Channels*sizeof(float), hipMemcpyDeviceToHost);
}
// ---------------------White Noise----------------------
// ----------------------Blue Noise----------------------
HOST_DEVICE float ToroidalDistanceSq(float X1, float Y1, float X2, float Y2, float Width, float Height)
{
float Dx = abs(X2 - X1);
float Dy = abs(Y2 - Y1);
if (Dx > Width * 0.5f)
Dx = Width - Dx;
if (Dy > Height * 0.5f)
Dy = Height - Dy;
return Dx*Dx + Dy*Dy;
}
__global__ void Init(int Width, int Height, float* EnergyLut)
{
int X = blockIdx.x*blockDim.x + threadIdx.x;
int Y = blockIdx.y*blockDim.y + threadIdx.y;
int Pixel = Y*Width + X;
EnergyLut[Pixel] = 0.f;
}
__global__ void Normalize(int Width, int Height, float* Image)
{
int X = blockIdx.x*blockDim.x + threadIdx.x;
int Y = blockIdx.y*blockDim.y + threadIdx.y;
int Pixel = Y*Width + X;
Image[Pixel] /= (Width*Height);
}
__global__ void UpdateEnergyLut(int SX, int SY, int Width, int Height, float* EnergyLut)
{
int X = blockIdx.x*blockDim.x + threadIdx.x;
int Y = blockIdx.y*blockDim.y + threadIdx.y;
int Pixel = Y*Width + X;
//Calc Distance
float DistanceSq = ToroidalDistanceSq(X, Y, SX, SY, Width, Height);
float SigmaSquareTimesTwo = 2.f*1.9f*1.9f;
EnergyLut[Pixel] += exp(-DistanceSq / SigmaSquareTimesTwo);
}
const int BlockWidth = 32;
__device__ int BestXY[2];
__shared__ float MinVec[BlockWidth*BlockWidth];
__shared__ int PixelIndex[BlockWidth*BlockWidth];
__global__ void UpdateEnergyLut(int Width, int Height, float* EnergyLut)
{
int X = blockIdx.x*blockDim.x + threadIdx.x;
int Y = blockIdx.y*blockDim.y + threadIdx.y;
int Pixel = Y*Width + X;
//Calc Distance
float DistanceSq = ToroidalDistanceSq(X, Y, BestXY[0], BestXY[1], Width, Height);
float SigmaSquareTimesTwo = 2.f*1.9f*1.9f;
EnergyLut[Pixel] += exp(-DistanceSq / SigmaSquareTimesTwo);
}
__global__ void FindLargestVoid(int Width, int Height, int* BinaryPattern, float* EnergyLut, float* Image, int CurrentIndex)
{
const int NumProcesses = Width*Height / (BlockWidth*BlockWidth);
const int ThreadIdx = threadIdx.y*blockDim.x + threadIdx.x;
float MinF = INFINITY;
for (int Index = 0; Index < NumProcesses; ++Index)
{
float V = EnergyLut[ThreadIdx*NumProcesses + Index];
if (V < MinF && !BinaryPattern[ThreadIdx*NumProcesses + Index])
{
PixelIndex[ThreadIdx] = ThreadIdx*NumProcesses + Index;
MinF = V;
}
}
MinVec[ThreadIdx] = MinF;
__syncthreads();
// Parallel Reduction Min Begin
auto MaySwapVolatile = [&](volatile int* Indices, volatile float* Vec, int Gap)
{
if (Vec[ThreadIdx] > Vec[ThreadIdx + Gap] && !BinaryPattern[Indices[ThreadIdx + Gap]])
{
Vec[ThreadIdx] = Vec[ThreadIdx + Gap];
Indices[ThreadIdx] = Indices[ThreadIdx + Gap];
}
};
auto MaySwap = [&](int* Indices, float* Vec, int Gap)
{
if (Vec[ThreadIdx] > Vec[ThreadIdx + Gap] && !BinaryPattern[Indices[ThreadIdx + Gap]])
{
Vec[ThreadIdx] = Vec[ThreadIdx + Gap];
Indices[ThreadIdx] = Indices[ThreadIdx + Gap];
}
};
auto WarpReductionMin = [&](volatile int* Indices, volatile float* Vec)
{
MaySwapVolatile(Indices, Vec, 32);
if (ThreadIdx < 16) MaySwapVolatile(Indices, Vec, 16);
if (ThreadIdx < 8) MaySwapVolatile(Indices, Vec, 8);
if (ThreadIdx < 4) MaySwapVolatile(Indices, Vec, 4);
if (ThreadIdx < 2) MaySwapVolatile(Indices, Vec, 2);
if (ThreadIdx < 1) MaySwapVolatile(Indices, Vec, 1);
};
auto ParallelReductionMin = [&](int* Indices, float* Vec)
{
if (ThreadIdx < 512) MaySwap(Indices, Vec, 512);
__syncthreads();
if (ThreadIdx < 256) MaySwap(Indices, Vec, 256);
__syncthreads();
if (ThreadIdx < 128) MaySwap(Indices, Vec, 128);
__syncthreads();
if (ThreadIdx < 64) MaySwap(Indices, Vec, 64);
__syncthreads();
if (ThreadIdx < 32) WarpReductionMin(Indices, Vec);
};
// Parallel Reduction Min End
ParallelReductionMin(PixelIndex, MinVec);
if (ThreadIdx == 0)
{
BestXY[0] = PixelIndex[0] % Width;
BestXY[1] = PixelIndex[0] / Width;
BinaryPattern[PixelIndex[0]] = 1;
Image[PixelIndex[0]] = CurrentIndex;
}
}
//Mitchell Best Candiate Algorithm
void MitchellBestCandiate(const int N, const int Width, const int Height, int* BinaryPattern, float* Out)
{
struct Vec2f
{
float X, Y;
Vec2f(float X, float Y) :X(X), Y(Y){}
};
srand(time(nullptr));
auto Generate2DNoise = [&]()->Vec2f
{
float X = float(rand()) / RAND_MAX;
float Y = float(rand()) / RAND_MAX;
return Vec2f(X, Y);
};
int CurrentIndex = 0;
vector<Vec2f> Samples;
if (CurrentIndex == 0)
{
Samples.push_back(Generate2DNoise());
CurrentIndex = 1;
}
while (CurrentIndex < N)
{
vector<Vec2f> Candidates;
int NumCandidates = Samples.size() + 1;
for (int Index = 0; Index < NumCandidates; ++Index)
{
Candidates.push_back(Generate2DNoise());
}
float MaxDistance = 0.f;
int BestIndex = -1;
for (int Index = 0; Index < NumCandidates; ++Index)
{
Vec2f Candiate = Candidates[Index];
float Distance = INFINITY;
for (int S = 0; S < Samples.size(); ++S)
{
Vec2f Sample = Samples[S];
float D = ToroidalDistanceSq(Sample.X, Sample.Y, Candiate.X, Candiate.Y, 1.f, 1.f);
if (D < Distance) Distance = D;
}
if (Distance > MaxDistance)
{
MaxDistance = Distance;
BestIndex = Index;
}
}
Samples.push_back(Candidates[BestIndex]);
CurrentIndex++;
printf("Mitchells Best Candiates : %f%%\r", 100.f * float(CurrentIndex) / N);
}
printf("\n");
for (int Index = 0; Index < Samples.size(); ++Index)
{
Vec2f Sample = Samples[Index];
int X = floor(Sample.X*Width); X = X == Width ? X - 1 : X;
int Y = floor(Sample.Y*Height); Y = Y == Height ? Y - 1 : Y;
Out[Y*Height + X] = Index + 1;
BinaryPattern[Y*Height + X] = 1;
}
}
//Void And Cluster Algorithm
//http://cv.ulichney.com/papers/1993-void-cluster.pdf
void BlueNoiseGenerator(int Width, int Height, float* Image)
{
vector<int> BinaryPattern;
BinaryPattern.resize(Width*Height);
memset(&BinaryPattern[0], 0, Width*Height*sizeof(int));
int nSamples = 256;
MitchellBestCandiate(nSamples, Width, Height, &BinaryPattern[0], Image);
int CurrentIndex = nSamples;
float* EnergyLut, *ImageDevice;
hipMalloc(&EnergyLut, Width*Height*sizeof(float));
int* BinaryPatternDevice;
hipMalloc(&BinaryPatternDevice, Width*Height*sizeof(int));
hipMemcpy(BinaryPatternDevice, &BinaryPattern[0], Width*Height*sizeof(int), hipMemcpyHostToDevice);
hipMalloc(&ImageDevice, Width*Height*sizeof(float));
hipMemcpy(ImageDevice, Image, Width*Height*sizeof(float), hipMemcpyHostToDevice);
dim3 BlockSize(BlockWidth, BlockWidth);
dim3 GridSize(Width / BlockSize.x, Height / BlockSize.y);
Init << <GridSize, BlockSize >> >(Width, Height, EnergyLut);
for (int Index = 0; Index < Width*Height; ++Index)
{
if (!BinaryPattern[Index]) continue;
int X = Index % Width;
int Y = Index / Width;
UpdateEnergyLut << <GridSize, BlockSize >> >(X, Y, Width, Height, EnergyLut);
}
while (CurrentIndex < Width*Height)
{
CurrentIndex++;
dim3 SpecialGridSize(1, 1);
FindLargestVoid << <SpecialGridSize, BlockSize >> >(Width, Height, BinaryPatternDevice, EnergyLut, ImageDevice, CurrentIndex);
UpdateEnergyLut << <GridSize, BlockSize >> >(Width, Height, EnergyLut);
if (CurrentIndex % 10 == 0)
printf("Phase2 And Phase3 : %f%%\r", CurrentIndex * 100.f / (Width*Height));
}
printf("Phase2 And Phase3 : %f%%\r", 100.f);
// normalize
Normalize << <GridSize, BlockSize >> >(Width, Height, ImageDevice);
hipMemcpy(Image, ImageDevice, Width*Height*sizeof(float), hipMemcpyDeviceToHost);
// free resources
hipFree(EnergyLut);
hipFree(ImageDevice);
hipFree(BinaryPatternDevice);
}
// ----------------------Blue Noise----------------------
void PrintHelp()
{
printf("******************************************************************\n");
printf(R"(Options:
--size <num> Size of blue or white noise texture need be generated
--channels <num> Channels of noise texture (1,2,3,4)
--output <directory> Output directory of blue or white noise texture
--type <num> Which noise do you want to generate (0: white, 1: blue)
--frames <num> Number of textures do you want to generated
--goldratio <num> Generate multi textures using goldratio?
--help Print help
)");
printf("******************************************************************\n");
}
int main(int argc, char** argv)
{
PrintHelp();
string Base = "D:/";
int Width = 64, Height = 64;
int Type = 1;
int NumFrames = 1, GoldRatio = 1;
int NumChannels = 1;
for (int Index = 1; Index < argc; ++Index)
{
if (!strcmp(argv[Index], "--size"))
{
Width = Height = atoi(argv[++Index]);
}
else if (!strcmp(argv[Index], "--output"))
{
Base = argv[++Index];
}
else if (!strcmp(argv[Index], "--type"))
{
Type = atoi(argv[++Index]);
}
else if (!strcmp(argv[Index], "--frames"))
{
NumFrames = atoi(argv[++Index]);
}
else if (!strcmp(argv[Index], "--goldratio"))
{
GoldRatio = atoi(argv[++Index]);
}
else if (!strcmp(argv[Index], "--channels"))
{
NumChannels = atoi(argv[++Index]);
}
else if (!strcmp(argv[Index], "--help"))
{
PrintHelp();
return 0;
}
else
{
printf("Unexpected parameters\n");
return 1;
}
}
printf("Will generate %d %snoise textures(%d channels) with size[%dx%d] and output directory[%s]\n", NumFrames, Type == 0 ? "white" : "blue", NumChannels, Width, Height, Base.c_str());
float *Output;
Output = new float[Width*Height*NumChannels];
for (int Index = 0; Index < Width*Height*NumChannels; ++Index) Output[Index] = 0.f;
for (int Frames = 0; Frames < NumFrames; ++Frames)
{
Timer Record;
Record.Start();
if (Frames > 0 && GoldRatio)
{
const float Ratio = 1.618033; //Gold Ratio
for (int Index = 0; Index < Width*Height*NumChannels; ++Index)
{
Output[Index] += Ratio;
Output[Index] -= floor(Output[Index]);
}
}
else
{
if (Type == 0)
{
WhiteNoiseGenerator(Width, Height, NumChannels, Frames, Output);
}
else
{
vector<float> Temp;
Temp.resize(Width*Height);
for (int Index = 0; Index < NumChannels; ++Index)
{
BlueNoiseGenerator(Width, Height, &Temp[0]);
for (int Pixel = 0; Pixel < Width*Height; ++Pixel)
{
Output[NumChannels*Pixel + Index] = Temp[Pixel];
}
}
}
}
Record.End();
string Filename = Base + "Noise" + to_string(Frames) + ".png";
SavePng(Filename.c_str(), Width, Width, NumChannels, Output);
printf("\nElapsed Time : %f\n", Record.GetElapsed());
}
delete[] Output;
return 0;
} | 886397df018eae810174f6d056679ae612f751f4.cu | #include <cuda.h>
#include <curand.h>
#include <curand_kernel.h>
#include <thrust/random.h>
#include "device_launch_parameters.h"
#include <vector>
#include <random>
#include <string>
#include <time.h>
#include <Windows.h>
#define STB_IMAGE_IMPLEMENTATION
#include <stb\stb_image.h>
#define STB_IMAGE_WRITE_IMPLEMENTATION
#define STBI_MSC_SECURE_CRT
#include <stb\stb_image_write.h>
using namespace std;
#define HOST_DEVICE __host__ __device__
// Utility
float Clamp(float V, float Min, float Max)
{
if (V < Min) V = Min;
if (V > Max) V = Max;
return V;
}
bool SavePng(const char* Filename, int Width, int Height, int Channels, float* Input){
unsigned char* Transform = new unsigned char[Width*Height*Channels];
for (int i = 0; i < Height; ++i){
for (int j = 0; j < Width; ++j){
unsigned Pixel = i*Width + j;
for (int c = 0; c < Channels; ++c)
{
unsigned C = unsigned(Clamp(Input[Channels * Pixel + c], 0.f, 1.f)*255.f);
Transform[Channels * Pixel + c] = C;
}
}
}
stbi_write_png(Filename, Width, Height, Channels, Transform, 0);
delete[] Transform;
return true;
}
class Timer {
private:
LARGE_INTEGER start;
LARGE_INTEGER end;
public:
Timer()
:start()
, end(){}
void Start()
{
QueryPerformanceCounter(&start);
}
void End()
{
QueryPerformanceCounter(&end);
}
float GetElapsed() const
{
LARGE_INTEGER freq;
//frequency per second
QueryPerformanceFrequency(&freq);
return ((float)(end.QuadPart - start.QuadPart)) / freq.QuadPart;
}
};
//
// ---------------------White Noise----------------------
static __forceinline HOST_DEVICE unsigned int WangHash(unsigned int seed)
{
seed = (seed ^ 61) ^ (seed >> 16);
seed = seed + (seed << 3);
seed = seed ^ (seed >> 4);
seed = seed * 0x27d4eb2d;
seed = seed ^ (seed >> 15);
return seed;
}
__global__ void WhiteNoiseGenerator(int Channels, int Iter, float* Image)
{
int X = blockIdx.x*blockDim.x + threadIdx.x;
int Y = blockIdx.y*blockDim.y + threadIdx.y;
int Pixel = Y*gridDim.x*blockDim.x + X;
thrust::default_random_engine Rng(WangHash(Pixel) + WangHash(Iter));
thrust::uniform_real_distribution<float> Uniform(0.f, 1.f);
for (int Index = 0; Index < Channels; ++Index)
{
Image[Channels*Pixel + Index] = Uniform(Rng);
}
}
void WhiteNoiseGenerator(int Width, int Height, int Channels, int Iter, float* Output)
{
float* Image;
cudaMalloc(&Image, Width*Height*Channels*sizeof(float));
dim3 BlockSize(32, 32);
dim3 GridSize(Width / BlockSize.x, Height / BlockSize.y);
WhiteNoiseGenerator << <GridSize, BlockSize >> >(Channels, Iter, Image);
cudaMemcpy(Output, Image, Width*Height*Channels*sizeof(float), cudaMemcpyDeviceToHost);
}
// ---------------------White Noise----------------------
// ----------------------Blue Noise----------------------
HOST_DEVICE float ToroidalDistanceSq(float X1, float Y1, float X2, float Y2, float Width, float Height)
{
float Dx = abs(X2 - X1);
float Dy = abs(Y2 - Y1);
if (Dx > Width * 0.5f)
Dx = Width - Dx;
if (Dy > Height * 0.5f)
Dy = Height - Dy;
return Dx*Dx + Dy*Dy;
}
__global__ void Init(int Width, int Height, float* EnergyLut)
{
int X = blockIdx.x*blockDim.x + threadIdx.x;
int Y = blockIdx.y*blockDim.y + threadIdx.y;
int Pixel = Y*Width + X;
EnergyLut[Pixel] = 0.f;
}
__global__ void Normalize(int Width, int Height, float* Image)
{
int X = blockIdx.x*blockDim.x + threadIdx.x;
int Y = blockIdx.y*blockDim.y + threadIdx.y;
int Pixel = Y*Width + X;
Image[Pixel] /= (Width*Height);
}
__global__ void UpdateEnergyLut(int SX, int SY, int Width, int Height, float* EnergyLut)
{
int X = blockIdx.x*blockDim.x + threadIdx.x;
int Y = blockIdx.y*blockDim.y + threadIdx.y;
int Pixel = Y*Width + X;
//Calc Distance
float DistanceSq = ToroidalDistanceSq(X, Y, SX, SY, Width, Height);
float SigmaSquareTimesTwo = 2.f*1.9f*1.9f;
EnergyLut[Pixel] += exp(-DistanceSq / SigmaSquareTimesTwo);
}
const int BlockWidth = 32;
__device__ int BestXY[2];
__shared__ float MinVec[BlockWidth*BlockWidth];
__shared__ int PixelIndex[BlockWidth*BlockWidth];
__global__ void UpdateEnergyLut(int Width, int Height, float* EnergyLut)
{
int X = blockIdx.x*blockDim.x + threadIdx.x;
int Y = blockIdx.y*blockDim.y + threadIdx.y;
int Pixel = Y*Width + X;
//Calc Distance
float DistanceSq = ToroidalDistanceSq(X, Y, BestXY[0], BestXY[1], Width, Height);
float SigmaSquareTimesTwo = 2.f*1.9f*1.9f;
EnergyLut[Pixel] += exp(-DistanceSq / SigmaSquareTimesTwo);
}
__global__ void FindLargestVoid(int Width, int Height, int* BinaryPattern, float* EnergyLut, float* Image, int CurrentIndex)
{
const int NumProcesses = Width*Height / (BlockWidth*BlockWidth);
const int ThreadIdx = threadIdx.y*blockDim.x + threadIdx.x;
float MinF = INFINITY;
for (int Index = 0; Index < NumProcesses; ++Index)
{
float V = EnergyLut[ThreadIdx*NumProcesses + Index];
if (V < MinF && !BinaryPattern[ThreadIdx*NumProcesses + Index])
{
PixelIndex[ThreadIdx] = ThreadIdx*NumProcesses + Index;
MinF = V;
}
}
MinVec[ThreadIdx] = MinF;
__syncthreads();
// Parallel Reduction Min Begin
auto MaySwapVolatile = [&](volatile int* Indices, volatile float* Vec, int Gap)
{
if (Vec[ThreadIdx] > Vec[ThreadIdx + Gap] && !BinaryPattern[Indices[ThreadIdx + Gap]])
{
Vec[ThreadIdx] = Vec[ThreadIdx + Gap];
Indices[ThreadIdx] = Indices[ThreadIdx + Gap];
}
};
auto MaySwap = [&](int* Indices, float* Vec, int Gap)
{
if (Vec[ThreadIdx] > Vec[ThreadIdx + Gap] && !BinaryPattern[Indices[ThreadIdx + Gap]])
{
Vec[ThreadIdx] = Vec[ThreadIdx + Gap];
Indices[ThreadIdx] = Indices[ThreadIdx + Gap];
}
};
auto WarpReductionMin = [&](volatile int* Indices, volatile float* Vec)
{
MaySwapVolatile(Indices, Vec, 32);
if (ThreadIdx < 16) MaySwapVolatile(Indices, Vec, 16);
if (ThreadIdx < 8) MaySwapVolatile(Indices, Vec, 8);
if (ThreadIdx < 4) MaySwapVolatile(Indices, Vec, 4);
if (ThreadIdx < 2) MaySwapVolatile(Indices, Vec, 2);
if (ThreadIdx < 1) MaySwapVolatile(Indices, Vec, 1);
};
auto ParallelReductionMin = [&](int* Indices, float* Vec)
{
if (ThreadIdx < 512) MaySwap(Indices, Vec, 512);
__syncthreads();
if (ThreadIdx < 256) MaySwap(Indices, Vec, 256);
__syncthreads();
if (ThreadIdx < 128) MaySwap(Indices, Vec, 128);
__syncthreads();
if (ThreadIdx < 64) MaySwap(Indices, Vec, 64);
__syncthreads();
if (ThreadIdx < 32) WarpReductionMin(Indices, Vec);
};
// Parallel Reduction Min End
ParallelReductionMin(PixelIndex, MinVec);
if (ThreadIdx == 0)
{
BestXY[0] = PixelIndex[0] % Width;
BestXY[1] = PixelIndex[0] / Width;
BinaryPattern[PixelIndex[0]] = 1;
Image[PixelIndex[0]] = CurrentIndex;
}
}
//Mitchell Best Candiate Algorithm
void MitchellBestCandiate(const int N, const int Width, const int Height, int* BinaryPattern, float* Out)
{
struct Vec2f
{
float X, Y;
Vec2f(float X, float Y) :X(X), Y(Y){}
};
srand(time(nullptr));
auto Generate2DNoise = [&]()->Vec2f
{
float X = float(rand()) / RAND_MAX;
float Y = float(rand()) / RAND_MAX;
return Vec2f(X, Y);
};
int CurrentIndex = 0;
vector<Vec2f> Samples;
if (CurrentIndex == 0)
{
Samples.push_back(Generate2DNoise());
CurrentIndex = 1;
}
while (CurrentIndex < N)
{
vector<Vec2f> Candidates;
int NumCandidates = Samples.size() + 1;
for (int Index = 0; Index < NumCandidates; ++Index)
{
Candidates.push_back(Generate2DNoise());
}
float MaxDistance = 0.f;
int BestIndex = -1;
for (int Index = 0; Index < NumCandidates; ++Index)
{
Vec2f Candiate = Candidates[Index];
float Distance = INFINITY;
for (int S = 0; S < Samples.size(); ++S)
{
Vec2f Sample = Samples[S];
float D = ToroidalDistanceSq(Sample.X, Sample.Y, Candiate.X, Candiate.Y, 1.f, 1.f);
if (D < Distance) Distance = D;
}
if (Distance > MaxDistance)
{
MaxDistance = Distance;
BestIndex = Index;
}
}
Samples.push_back(Candidates[BestIndex]);
CurrentIndex++;
printf("Mitchells Best Candiates : %f%%\r", 100.f * float(CurrentIndex) / N);
}
printf("\n");
for (int Index = 0; Index < Samples.size(); ++Index)
{
Vec2f Sample = Samples[Index];
int X = floor(Sample.X*Width); X = X == Width ? X - 1 : X;
int Y = floor(Sample.Y*Height); Y = Y == Height ? Y - 1 : Y;
Out[Y*Height + X] = Index + 1;
BinaryPattern[Y*Height + X] = 1;
}
}
//Void And Cluster Algorithm
//http://cv.ulichney.com/papers/1993-void-cluster.pdf
void BlueNoiseGenerator(int Width, int Height, float* Image)
{
vector<int> BinaryPattern;
BinaryPattern.resize(Width*Height);
memset(&BinaryPattern[0], 0, Width*Height*sizeof(int));
int nSamples = 256;
MitchellBestCandiate(nSamples, Width, Height, &BinaryPattern[0], Image);
int CurrentIndex = nSamples;
float* EnergyLut, *ImageDevice;
cudaMalloc(&EnergyLut, Width*Height*sizeof(float));
int* BinaryPatternDevice;
cudaMalloc(&BinaryPatternDevice, Width*Height*sizeof(int));
cudaMemcpy(BinaryPatternDevice, &BinaryPattern[0], Width*Height*sizeof(int), cudaMemcpyHostToDevice);
cudaMalloc(&ImageDevice, Width*Height*sizeof(float));
cudaMemcpy(ImageDevice, Image, Width*Height*sizeof(float), cudaMemcpyHostToDevice);
dim3 BlockSize(BlockWidth, BlockWidth);
dim3 GridSize(Width / BlockSize.x, Height / BlockSize.y);
Init << <GridSize, BlockSize >> >(Width, Height, EnergyLut);
for (int Index = 0; Index < Width*Height; ++Index)
{
if (!BinaryPattern[Index]) continue;
int X = Index % Width;
int Y = Index / Width;
UpdateEnergyLut << <GridSize, BlockSize >> >(X, Y, Width, Height, EnergyLut);
}
while (CurrentIndex < Width*Height)
{
CurrentIndex++;
dim3 SpecialGridSize(1, 1);
FindLargestVoid << <SpecialGridSize, BlockSize >> >(Width, Height, BinaryPatternDevice, EnergyLut, ImageDevice, CurrentIndex);
UpdateEnergyLut << <GridSize, BlockSize >> >(Width, Height, EnergyLut);
if (CurrentIndex % 10 == 0)
printf("Phase2 And Phase3 : %f%%\r", CurrentIndex * 100.f / (Width*Height));
}
printf("Phase2 And Phase3 : %f%%\r", 100.f);
// normalize
Normalize << <GridSize, BlockSize >> >(Width, Height, ImageDevice);
cudaMemcpy(Image, ImageDevice, Width*Height*sizeof(float), cudaMemcpyDeviceToHost);
// free resources
cudaFree(EnergyLut);
cudaFree(ImageDevice);
cudaFree(BinaryPatternDevice);
}
// ----------------------Blue Noise----------------------
void PrintHelp()
{
printf("******************************************************************\n");
printf(R"(Options:
--size <num> Size of blue or white noise texture need be generated
--channels <num> Channels of noise texture (1,2,3,4)
--output <directory> Output directory of blue or white noise texture
--type <num> Which noise do you want to generate (0: white, 1: blue)
--frames <num> Number of textures do you want to generated
--goldratio <num> Generate multi textures using goldratio?
--help Print help
)");
printf("******************************************************************\n");
}
int main(int argc, char** argv)
{
PrintHelp();
string Base = "D:/";
int Width = 64, Height = 64;
int Type = 1;
int NumFrames = 1, GoldRatio = 1;
int NumChannels = 1;
for (int Index = 1; Index < argc; ++Index)
{
if (!strcmp(argv[Index], "--size"))
{
Width = Height = atoi(argv[++Index]);
}
else if (!strcmp(argv[Index], "--output"))
{
Base = argv[++Index];
}
else if (!strcmp(argv[Index], "--type"))
{
Type = atoi(argv[++Index]);
}
else if (!strcmp(argv[Index], "--frames"))
{
NumFrames = atoi(argv[++Index]);
}
else if (!strcmp(argv[Index], "--goldratio"))
{
GoldRatio = atoi(argv[++Index]);
}
else if (!strcmp(argv[Index], "--channels"))
{
NumChannels = atoi(argv[++Index]);
}
else if (!strcmp(argv[Index], "--help"))
{
PrintHelp();
return 0;
}
else
{
printf("Unexpected parameters\n");
return 1;
}
}
printf("Will generate %d %snoise textures(%d channels) with size[%dx%d] and output directory[%s]\n", NumFrames, Type == 0 ? "white" : "blue", NumChannels, Width, Height, Base.c_str());
float *Output;
Output = new float[Width*Height*NumChannels];
for (int Index = 0; Index < Width*Height*NumChannels; ++Index) Output[Index] = 0.f;
for (int Frames = 0; Frames < NumFrames; ++Frames)
{
Timer Record;
Record.Start();
if (Frames > 0 && GoldRatio)
{
const float Ratio = 1.618033; //Gold Ratio
for (int Index = 0; Index < Width*Height*NumChannels; ++Index)
{
Output[Index] += Ratio;
Output[Index] -= floor(Output[Index]);
}
}
else
{
if (Type == 0)
{
WhiteNoiseGenerator(Width, Height, NumChannels, Frames, Output);
}
else
{
vector<float> Temp;
Temp.resize(Width*Height);
for (int Index = 0; Index < NumChannels; ++Index)
{
BlueNoiseGenerator(Width, Height, &Temp[0]);
for (int Pixel = 0; Pixel < Width*Height; ++Pixel)
{
Output[NumChannels*Pixel + Index] = Temp[Pixel];
}
}
}
}
Record.End();
string Filename = Base + "Noise" + to_string(Frames) + ".png";
SavePng(Filename.c_str(), Width, Width, NumChannels, Output);
printf("\nElapsed Time : %f\n", Record.GetElapsed());
}
delete[] Output;
return 0;
} |
67ea755aba834cfb053cb51cbe04feeaf0bb5502.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <iostream>
#include <fstream>
using namespace std;
const double learning_rate = 2;
__global__ void assign_weights(double *d, int size)
{
int i = threadIdx.x;
int j = blockIdx.x;
int index = (j * size) + i;
d[index] = 0;
}
__global__ void mat_mull(double* weights, double* prev_activation, int size)
{
int i = threadIdx.x;
int j = blockIdx.x;
int index = (j * size) + i;
weights[index] = weights[index] * prev_activation[i];
//weights[index] = weights[index] / size;
}
__global__ void reduce_mat(double* weights, int size_block, int size)
{
int i = threadIdx.x;
int j = blockIdx.x;
int index = (j * size_block) + i;
weights[index] = weights[index] + weights[size + index];
}
__global__ void rectify(double* weights, int size_block, int i)
{
int j = threadIdx.x;
int index = (j * size_block) + i;
weights[(j * size_block)] = weights[index] + weights[(j * size_block)];
}
__global__ void add_bias(double* activation, double* weights, double* bias, int prev_neurons)
{
int i = threadIdx.x;
int index = (i * prev_neurons);
activation[i] = weights[index] + bias[i];
//activation[i] = activation[i] / prev_neurons;
}
__global__ void sigmoid(double* activation)
{
int i = threadIdx.x;
activation[i] = pow(2.72, activation[i]);
activation[i] = activation[i] / (1 + activation[i]);
}
__global__ void update_weights(double* target, double* next_neurons, double* weights, double* bias, double* cost, int learning_rate, int no_neurons, int no_next_neuron)
{
int i = threadIdx.x;
int j = blockIdx.x;
int index = (j * no_neurons) + i;
cost[j] = target[j] - next_neurons[j];
double B = cost[j] * (1 - next_neurons[j]) * next_neurons[j];
double C = B / cost[j];
weights[index] = weights[index] - (learning_rate * cost[j] * B);
bias[i] = bias[i] - (learning_rate * C);
}
__global__ void update_weights_output (double* target, double* activation, double* weights, double* bias, double* cost, int learning_rate, int neurons, int prev_no_neurons)
{
int i = threadIdx.x;
int j = blockIdx.x;
int index = (j * prev_no_neurons) + i;
cost[j] = target[j] - activation[j];
double B = cost[j] * (1 - activation[j]) * activation[j];
double C = B / cost[j];
weights[index] = weights[index] - (learning_rate * cost[j] * B);
bias[i] = bias[i] - (learning_rate * C);
}
__global__ void calc_target (double* next_neurons, double* target, double* next_weights, int no_neurons)
{
int i = threadIdx.x;
int j = blockIdx.x;
int index = (j * no_neurons) + i;
target[i] = next_weights[index] * next_neurons[i];
}
__global__ void average(double* target, int next_no_neurons)
{
int i = threadIdx.x;
target[i] = target[i] / next_no_neurons;
}
class neuron_layer
{
private:
int neurons;
int no_weights;
double* weights;
double* activation;
double* bias;
public:
neuron_layer(int no_neuron)
{
neurons = no_neuron;
activation = new double[neurons];
bias = new double[neurons];
for (int i = 0; i < neurons; i++)
{
bias[i] = 0;
}
}
neuron_layer(int no_neuron,neuron_layer prev_layer)
{
int prev_neurons = prev_layer.get_number_neurons();
neurons = no_neuron;
activation = new double[neurons];
bias = new double[neurons];
no_weights = neurons * prev_neurons;
weights = new double[neurons * prev_neurons];
for (int i = 0; i < neurons; i++)
{
bias[i] = 0;
}
double* dev_weights = 0;
if (hipMalloc((void**)&dev_weights, neurons * prev_neurons * sizeof(double)) != hipSuccess)
{
cout << "Memory allocation for cuda weight assignment failed.\n";
hipFree(dev_weights);
}
hipLaunchKernelGGL(( assign_weights), dim3(neurons),dim3(prev_neurons), 0, 0, dev_weights,prev_neurons);
if (hipMemcpy(weights, dev_weights, neurons*prev_neurons * sizeof(double), hipMemcpyDeviceToHost) != hipSuccess) {
cout << "Memory copy from cuda to host weight assignment failed.\n";
hipFree(dev_weights);
}
//cout << "\nWeights = ";
/*for (int i = 0; i < neurons*prev_neurons; i++)
{
cout << weights[i] << ",";
}
cout << endl;*/
hipFree(dev_weights);
}
void print_layer()
{
/*cout << "Activation values = ";
for (int i = 0; i < neurons; i++)
{
cout<< activation[i] << ",";
}*/
cout << "\nBias values = ";
for (int i = 0; i < neurons; i++)
{
cout << bias[i] << ",";
}
cout << "\nWeight values = ";
for (int i = 0; i < 10; i++)
cout << weights[i] << ",";
}
void input(double* image)
{
/*double* dev_activation;
if (hipMalloc((void**)& dev_activation, neurons * sizeof(double)) != hipSuccess)
{
cout << "Memory allocation for cuda image assignment failed during activation.\n";
hipFree(dev_activation);
}
if (hipMemcpy(dev_activation, image, neurons * sizeof(double), hipMemcpyHostToDevice) != hipSuccess) {
cout << "Memory copy from host to device image assignment failed during activation.\n";
hipFree(dev_activation);
}
if (hipMemcpy(activation, dev_activation, neurons * sizeof(double), hipMemcpyDeviceToHost) != hipSuccess) {
cout << "Memory copy from yDevice To Host image assignment failed during activation.\n";
hipFree(dev_activation);
}
hipFree(dev_activation);*/
activation = image;
}
int get_number_neurons()
{
return neurons;
}
int get_no_weights()
{
return no_weights;
}
(double*)get_activation()
{
return activation;
}
(double*)get_weights()
{
return weights;
}
void activate_layer(neuron_layer prev_layer)
{
int prev_neurons = prev_layer.get_number_neurons();
double* dev_weights = 0;
double* dev_activation = 0;
double* dev_bias = 0;
double* dev_prev_activation = 0;
if(hipMalloc((void**)& dev_weights, neurons * prev_neurons * sizeof(double)) != hipSuccess)
{
cout << "Memory allocation for cuda weight assignment failed during activation.\n";
hipFree(dev_weights);
hipFree(dev_activation);
hipFree(dev_bias);
hipFree(dev_prev_activation);
}
if (hipMalloc((void**)& dev_activation, neurons * sizeof(double)) != hipSuccess)
{
cout << "Memory allocation for cuda activation assignment failed during activation.\n";
hipFree(dev_weights);
hipFree(dev_activation);
hipFree(dev_bias);
hipFree(dev_prev_activation);
}
if (hipMalloc((void**)& dev_bias, neurons * sizeof(double)) != hipSuccess)
{
cout << "Memory allocation for cuda bias assignment failed during activation.\n";
hipFree(dev_weights);
hipFree(dev_activation);
hipFree(dev_bias);
hipFree(dev_prev_activation);
}
if (hipMalloc((void**)& dev_prev_activation, prev_neurons * sizeof(double)) != hipSuccess)
{
cout << "Memory allocation for cuda bias assignment failed during activation.\n";
hipFree(dev_weights);
hipFree(dev_activation);
hipFree(dev_bias);
hipFree(dev_prev_activation);
}
if (hipMemcpy(dev_weights, weights, neurons * prev_neurons * sizeof(double), hipMemcpyHostToDevice) != hipSuccess) {
cout << "Memory copy from host to device weight assignment failed during activation.\n";
hipFree(dev_weights);
hipFree(dev_activation);
hipFree(dev_bias);
hipFree(dev_prev_activation);
}
if (hipMemcpy(dev_bias, bias, neurons * sizeof(double), hipMemcpyHostToDevice) != hipSuccess) {
cout << "Memory copy from host to device bias assignment failed during activation.\n";
hipFree(dev_weights);
hipFree(dev_activation);
hipFree(dev_bias);
hipFree(dev_prev_activation);
}
if (hipMemcpy(dev_prev_activation, prev_layer.get_activation(), prev_neurons * sizeof(double), hipMemcpyHostToDevice) != hipSuccess) {
cout << "Memory copy from host to device previous layer activation assignment failed during activation.\n";
hipFree(dev_weights);
hipFree(dev_activation);
hipFree(dev_bias);
hipFree(dev_prev_activation);
}
hipLaunchKernelGGL(( mat_mull), dim3(neurons), dim3(prev_neurons), 0, 0, dev_weights, dev_prev_activation, prev_neurons);
/*if (hipMemcpy(weights, dev_weights, prev_neurons * neurons * sizeof(double), hipMemcpyDeviceToHost) != hipSuccess) {
cout << "Memory copy from cuda to host activation assignment failed.\n";
hipFree(dev_weights);
hipFree(dev_activation);
hipFree(dev_bias);
hipFree(dev_prev_activation);
}
cout << "\nWeights = ";
for (int i = 0; i < neurons * prev_neurons; i++)
{
cout << weights[i] << ",";
}
cout << endl;*/
if (prev_neurons % 2 == 1)
{
hipLaunchKernelGGL(( rectify) , dim3(1), dim3(neurons) , 0, 0, dev_weights, prev_neurons, (prev_neurons-1));
}
/*if (hipMemcpy(weights, dev_weights, prev_neurons * neurons * sizeof(double), hipMemcpyDeviceToHost) != hipSuccess) {
cout << "Memory copy from cuda to host activation assignment failed.\n";
hipFree(dev_weights);
hipFree(dev_activation);
hipFree(dev_bias);
hipFree(dev_prev_activation);
}
cout << "\nWeights = ";
for (int i = 0; i < neurons * prev_neurons; i++)
{
cout << weights[i] << ",";
}
cout << endl;*/
int i = prev_neurons / 2;
while (i >= 1)
{
hipLaunchKernelGGL(( reduce_mat) , dim3(neurons), dim3(i) , 0, 0, dev_weights, prev_neurons, i);
if (((i % 2) == 1) && i != 1)
{
hipLaunchKernelGGL(( rectify) , dim3(1), dim3(neurons) , 0, 0, dev_weights, prev_neurons, i);
}
i = i / 2;
}
/*if (hipMemcpy(weights, dev_weights, prev_neurons * neurons * sizeof(double), hipMemcpyDeviceToHost) != hipSuccess) {
cout << "Memory copy from cuda to host activation assignment failed.\n";
hipFree(dev_weights);
hipFree(dev_activation);
hipFree(dev_bias);
hipFree(dev_prev_activation);
}
cout << "\nWeights = ";
for (int i = 0; i < neurons * prev_neurons; i++)
{
cout << weights[i] << ",";
}
cout << endl;*/
hipLaunchKernelGGL(( add_bias) , dim3(1), dim3(neurons) , 0, 0, dev_activation, dev_weights, dev_bias, prev_neurons);
hipLaunchKernelGGL(( sigmoid) , dim3(1), dim3(neurons) , 0, 0, dev_activation);
if (hipMemcpy(activation, dev_activation, neurons * sizeof(double), hipMemcpyDeviceToHost) != hipSuccess) {
cout << "Memory copy from cuda to host activation assignment failed.\n";
hipFree(dev_weights);
hipFree(dev_activation);
hipFree(dev_bias);
hipFree(dev_prev_activation);
}
hipFree(dev_weights);
hipFree(dev_activation);
hipFree(dev_bias);
hipFree(dev_prev_activation);
}
void activate_input_layer()
{
double* dev_activation;
if (hipMalloc((void**)& dev_activation, neurons * sizeof(double)) != hipSuccess)
{
cout << "Memory allocation for cuda activation assignment failed during first layer activation.\n";
hipFree(dev_activation);
}
if (hipMemcpy(dev_activation, activation, neurons * sizeof(double), hipMemcpyHostToDevice) != hipSuccess) {
cout << "Memory copy from host to device previous layer activation assignment failed during first layer activation.\n";
hipFree(dev_activation);
}
hipLaunchKernelGGL(( sigmoid) , dim3(1), dim3(neurons) , 0, 0, dev_activation);
if (hipMemcpy(activation, dev_activation, neurons * sizeof(double), hipMemcpyDeviceToHost) != hipSuccess) {
cout << "Memory copy from cuda to host activation assignment failed during first layer activation.\n";
hipFree(dev_activation);
}
hipFree(dev_activation);
}
void back_propagate_output(double* target, neuron_layer prev_layer)
{
int prev_no_neurons = prev_layer.get_number_neurons();
//double* next_neurons = new double[next_no_neurons];
//next_neurons = next_layer.get_activation();
double* cost = new double[10];
double* dev_weights;
double* dev_target;
double* dev_activation;
double* dev_cost;
double*dev_bias;
if (hipMalloc((void**)& dev_weights, neurons * prev_no_neurons * sizeof(double)) != hipSuccess)
{
cout << "Memory allocation for cuda weight assignment failed during back propagation.\n";
hipFree(dev_weights);
hipFree(dev_target);
hipFree(dev_activation);
hipFree(dev_cost);
hipFree(dev_bias);
}
if (hipMalloc((void**)& dev_bias, neurons * sizeof(double)) != hipSuccess)
{
cout << "Memory allocation for cuda bias assignment failed during back propagation.\n";
hipFree(dev_weights);
hipFree(dev_target);
hipFree(dev_activation);
hipFree(dev_cost);
hipFree(dev_bias);
}
if (hipMalloc((void**)& dev_target, neurons * sizeof(double)) != hipSuccess)
{
cout << "Memory allocation for cuda target assignment failed during back propagation.\n";
hipFree(dev_weights);
hipFree(dev_target);
hipFree(dev_activation);
hipFree(dev_cost);
hipFree(dev_bias);
}
if (hipMalloc((void**)& dev_activation, neurons * sizeof(double)) != hipSuccess)
{
cout << "Memory allocation for cuda neurons assignment failed during back propagation.\n";
hipFree(dev_weights);
hipFree(dev_target);
hipFree(dev_activation);
hipFree(dev_cost);
hipFree(dev_bias);
}
if (hipMalloc((void**)& dev_cost, neurons * sizeof(double)) != hipSuccess)
{
cout << "Memory allocation for cuda weight assignment failed during back propagation.\n";
hipFree(dev_weights);
hipFree(dev_target);
hipFree(dev_activation);
hipFree(dev_cost);
hipFree(dev_bias);
}
if (hipMemcpy(dev_weights, weights, neurons * prev_no_neurons * sizeof(double), hipMemcpyHostToDevice) != hipSuccess) {
cout << "Memory copy from host to device weight assignment failed during back propagation.\n";
hipFree(dev_weights);
hipFree(dev_target);
hipFree(dev_activation);
hipFree(dev_cost);
hipFree(dev_bias);
}
if (hipMemcpy(dev_target, target, neurons * sizeof(double), hipMemcpyHostToDevice) != hipSuccess) {
cout << "Memory copy from host to device target assignment failed during back propagation.\n";
hipFree(dev_weights);
hipFree(dev_target);
hipFree(dev_activation);
hipFree(dev_cost);
hipFree(dev_bias);
}
if (hipMemcpy(dev_activation, activation, neurons * sizeof(double), hipMemcpyHostToDevice) != hipSuccess) {
cout << "Memory copy from host to device next neuron assignment failed during back propagation.\n";
hipFree(dev_weights);
hipFree(dev_target);
hipFree(dev_activation);
hipFree(dev_cost);
hipFree(dev_bias);
}
if (hipMemcpy(dev_bias, bias,neurons * sizeof(double), hipMemcpyHostToDevice) != hipSuccess) {
cout << "Memory copy from host to device bias assignment failed during back propagation.\n";
hipFree(dev_weights);
hipFree(dev_target);
hipFree(dev_activation);
hipFree(dev_cost);
hipFree(dev_bias);
}
hipLaunchKernelGGL(( update_weights_output) , dim3(neurons), dim3(prev_no_neurons), 0, 0, dev_target, dev_activation, dev_weights, dev_bias, dev_cost, learning_rate, neurons, prev_no_neurons);
if (hipMemcpy(weights, dev_weights, neurons * prev_no_neurons * sizeof(double), hipMemcpyDeviceToHost) != hipSuccess) {
cout << "Memory copy from devide to host weight assignment failed during back propagation.\n";
hipFree(dev_weights);
hipFree(dev_target);
hipFree(dev_activation);
hipFree(dev_cost);
hipFree(dev_bias);
}
if (hipMemcpy(cost, dev_cost, neurons * sizeof(double), hipMemcpyDeviceToHost) != hipSuccess) {
cout << "Memory copy from devide to host weight assignment failed during back propagation.\n";
hipFree(dev_weights);
hipFree(dev_target);
hipFree(dev_activation);
hipFree(dev_cost);
hipFree(dev_bias);
}
if (hipMemcpy(bias, dev_bias, neurons * sizeof(double), hipMemcpyDeviceToHost) != hipSuccess) {
cout << "Memory copy from device to host bias assignment failed during back propagation.\n";
hipFree(dev_weights);
hipFree(dev_target);
hipFree(dev_activation);
hipFree(dev_cost);
hipFree(dev_bias);
}
hipFree(dev_weights);
hipFree(dev_target);
hipFree(dev_activation);
hipFree(dev_cost);
hipFree(dev_bias);
}
void back_propagate(neuron_layer next_layer)
{
int next_no_neurons = next_layer.get_number_neurons();
int next_no_weights = next_layer.get_no_weights();
int next_next_neurons = next_no_weights / next_no_neurons;
double* next_neurons = new double[next_no_neurons];
next_neurons = next_layer.get_activation();
double* cost = new double[10];
double* dev_weights;
double* dev_target;
double* dev_next_neurons;
double* dev_cost;
double* dev_bias;
double* dev_next_weights;
if (hipMalloc((void**)& dev_weights, neurons * next_no_neurons * sizeof(double)) != hipSuccess)
{
cout << "Memory allocation for cuda weight assignment failed during back propagation.\n";
hipFree(dev_weights);
hipFree(dev_target);
hipFree(dev_next_neurons);
hipFree(dev_cost);
hipFree(dev_bias);
hipFree(dev_next_weights);
}
if (hipMalloc((void**)& dev_next_weights, next_no_weights * sizeof(double)) != hipSuccess)
{
cout << "Memory allocation for cuda next weight assignment failed during back propagation.\n";
hipFree(dev_weights);
hipFree(dev_target);
hipFree(dev_next_neurons);
hipFree(dev_cost);
hipFree(dev_bias);
hipFree(dev_next_weights);
}
if (hipMalloc((void**)& dev_bias, neurons * sizeof(double)) != hipSuccess)
{
cout << "Memory allocation for cuda bias assignment failed during back propagation.\n";
hipFree(dev_weights);
hipFree(dev_target);
hipFree(dev_next_neurons);
hipFree(dev_cost);
hipFree(dev_bias);
hipFree(dev_next_weights);
}
if (hipMalloc((void**)& dev_target, next_no_neurons * sizeof(double)) != hipSuccess)
{
cout << "Memory allocation for cuda target assignment failed during back propagation.\n";
hipFree(dev_weights);
hipFree(dev_target);
hipFree(dev_next_neurons);
hipFree(dev_cost);
hipFree(dev_bias);
hipFree(dev_next_weights);
}
if (hipMalloc((void**)& dev_next_neurons, next_no_neurons * sizeof(double)) != hipSuccess)
{
cout << "Memory allocation for cuda neurons assignment failed during back propagation.\n";
hipFree(dev_weights);
hipFree(dev_target);
hipFree(dev_next_neurons);
hipFree(dev_cost);
hipFree(dev_bias);
hipFree(dev_next_weights);
}
if (hipMalloc((void**)& dev_cost, next_no_neurons * sizeof(double)) != hipSuccess)
{
cout << "Memory allocation for cuda weight assignment failed during back propagation.\n";
hipFree(dev_weights);
hipFree(dev_target);
hipFree(dev_next_neurons);
hipFree(dev_cost);
hipFree(dev_bias);
hipFree(dev_next_weights);
}
if (hipMemcpy(dev_weights, weights, neurons * next_no_neurons * sizeof(double), hipMemcpyHostToDevice) != hipSuccess) {
cout << "Memory copy from host to device weight assignment failed during back propagation.\n";
hipFree(dev_weights);
hipFree(dev_target);
hipFree(dev_next_neurons);
hipFree(dev_cost);
hipFree(dev_bias);
hipFree(dev_next_weights);
}
if (hipMemcpy(dev_next_weights, next_layer.get_weights(), next_no_weights * sizeof(double), hipMemcpyHostToDevice) != hipSuccess) {
cout << "Memory copy from host to device next layer weight assignment failed during back propagation.\n";
hipFree(dev_weights);
hipFree(dev_target);
hipFree(dev_next_neurons);
hipFree(dev_cost);
hipFree(dev_bias);
hipFree(dev_next_weights);
}
if (hipMemcpy(dev_next_neurons, next_neurons, next_no_neurons * sizeof(double), hipMemcpyHostToDevice) != hipSuccess) {
cout << "Memory copy from host to device next neuron assignment failed during back propagation.\n";
hipFree(dev_weights);
hipFree(dev_target);
hipFree(dev_next_neurons);
hipFree(dev_cost);
hipFree(dev_bias);
hipFree(dev_next_weights);
}
if (hipMemcpy(dev_bias, bias, neurons * sizeof(double), hipMemcpyHostToDevice) != hipSuccess) {
cout << "Memory copy from host to device bias assignment failed during back propagation.\n";
hipFree(dev_weights);
hipFree(dev_target);
hipFree(dev_next_neurons);
hipFree(dev_cost);
hipFree(dev_bias);
hipFree(dev_next_weights);
}
hipLaunchKernelGGL(( calc_target) , dim3(next_no_neurons) , dim3(next_next_neurons), 0, 0, dev_next_neurons, dev_target, dev_next_weights, neurons);
hipLaunchKernelGGL(( average) , dim3(1), dim3(next_no_neurons) , 0, 0, dev_target, next_no_neurons);
hipLaunchKernelGGL(( update_weights) , dim3(next_no_neurons), dim3(neurons) , 0, 0, dev_target, dev_next_neurons, dev_weights, dev_bias, dev_cost, learning_rate, neurons, next_no_neurons);
if (hipMemcpy(weights, dev_weights, neurons * next_no_neurons * sizeof(double), hipMemcpyDeviceToHost) != hipSuccess) {
cout << "Memory copy from devide to host weight assignment failed during back propagation.\n";
hipFree(dev_weights);
hipFree(dev_target);
hipFree(dev_next_neurons);
hipFree(dev_cost);
hipFree(dev_bias);
hipFree(dev_next_weights);
}
if (hipMemcpy(cost, dev_cost, next_no_neurons * sizeof(double), hipMemcpyDeviceToHost) != hipSuccess) {
cout << "Memory copy from devide to host weight assignment failed during back propagation.\n";
hipFree(dev_weights);
hipFree(dev_target);
hipFree(dev_next_neurons);
hipFree(dev_cost);
hipFree(dev_bias);
hipFree(dev_next_weights);
}
if (hipMemcpy(bias, dev_bias, neurons * sizeof(double), hipMemcpyDeviceToHost) != hipSuccess) {
cout << "Memory copy from device to host bias assignment failed during back propagation.\n";
hipFree(dev_weights);
hipFree(dev_target);
hipFree(dev_next_neurons);
hipFree(dev_cost);
hipFree(dev_bias);
hipFree(dev_next_weights);
}
/*hipFree(dev_weights);
hipFree(dev_target);
hipFree(dev_next_neurons);
hipFree(dev_cost);
hipFree(dev_bias);
hipFree(dev_next_weights);*/
}
};
class training_data
{
private:
double* image = new double[784 * sizeof(double)];
double* label = new double[10 * sizeof(double)];
public:
training_data()
{
for (int i = 0; i < 10; i++)
label[i] = 0;
}
void get_image(int i, double data)
{
image[i] = data;
}
void get_label(int data)
{
label[data] = 1;
}
(double*)give_image()
{
return image;
}
(double*)give_label()
{
return label;
}
void print_img()
{
for (int i = 0; i < 784; i++)
cout << image[i] << " ";
cout << endl;
for (int i = 0; i < 10; i++)
cout << label[i] << " ";
cout << endl;
}
}training_set[60000];
int reverseInt(int i)
{
unsigned char c1, c2, c3, c4;
c1 = i & 255;
c2 = (i >> 8) & 255;
c3 = (i >> 16) & 255;
c4 = (i >> 24) & 255;
return ((int)c1 << 24) + ((int)c2 << 16) + ((int)c3 << 8) + c4;
}
void read_mnist_images()
{
ifstream file("train-images.idx3-ubyte", ios::in | ios::binary);
if (file.is_open())
{
int magic_number = 0;
int number_of_images = 0;
int n_rows = 0;
int n_cols = 0;
file.read((char*)& magic_number, sizeof(magic_number));
magic_number = reverseInt(magic_number);
file.read((char*)& number_of_images, sizeof(number_of_images));
number_of_images = reverseInt(number_of_images);
file.read((char*)& n_rows, sizeof(n_rows));
n_rows = reverseInt(n_rows);
file.read((char*)& n_cols, sizeof(n_cols));
n_cols = reverseInt(n_cols);
for (int i = 0; i < number_of_images; ++i)
{
for (int r = 0; r < n_rows; ++r)
{
for (int c = 0; c < n_cols; ++c)
{
unsigned char temp = 0;
file.read((char*)& temp, sizeof(temp));
double data = temp;
training_set[i].get_image(((r * 28) + c), data);
}
}
}
}
}
void read_mnist_labels()
{
ifstream file("train-labels.idx1-ubyte", ios::in | ios::binary);
if (file.is_open())
{
int magic_number = 0;
int number_of_images = 0;
file.read((char*)& magic_number, sizeof(magic_number));
magic_number = reverseInt(magic_number);
file.read((char*)& number_of_images, sizeof(number_of_images));
number_of_images = reverseInt(number_of_images);
for (int i = 0; i < number_of_images; ++i)
{
unsigned char temp = 0;
file.read((char*)& temp, sizeof(temp));
int data = temp;
training_set[i].get_label(data);
}
}
}
int main()
{
cout << "Do you want to train the model or load the weights(0 for 1st and 1 for 2nd option):";
int dec;
cin >> dec;
neuron_layer input_layer(784);
neuron_layer hidden_layer_1(16, input_layer);
neuron_layer hidden_layer_2(16, hidden_layer_1);
neuron_layer output_layer(10, hidden_layer_1);
output_layer.print_layer();
cout << endl << "--------------------------------------------------------";
if (dec == 0)
{
read_mnist_images();
read_mnist_labels();
double* image = new double[784];
double* target = new double[10];
for (int x = 0; x <= 0; x++)
{
image = training_set[x].give_image();
target = training_set[x].give_label();
input_layer.input(image);
input_layer.activate_input_layer();
hidden_layer_1.activate_layer(input_layer);
hidden_layer_2.activate_layer(hidden_layer_1);
output_layer.activate_layer(hidden_layer_2);
output_layer.back_propagate_output(target, hidden_layer_2);
cout << "0";
hidden_layer_2.back_propagate_output(target, output_layer);
cout << "1";
hidden_layer_1.back_propagate(hidden_layer_2);
cout << "2";
}
}
else if (dec == 1)
{
cout << "Still in progress.\nYou can try later when it is developed.";
return 0;
}
/*input_layer.print_layer();
cout << "\nEND\n";
hidden_layer_1.print_layer();
cout << "\nEND\n";
hidden_layer_2.print_layer();
cout << "\nEND\n";*/
output_layer.print_layer();
cout << "\nEND\n";
return 0;
}
| 67ea755aba834cfb053cb51cbe04feeaf0bb5502.cu |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <iostream>
#include <fstream>
using namespace std;
const double learning_rate = 2;
__global__ void assign_weights(double *d, int size)
{
int i = threadIdx.x;
int j = blockIdx.x;
int index = (j * size) + i;
d[index] = 0;
}
__global__ void mat_mull(double* weights, double* prev_activation, int size)
{
int i = threadIdx.x;
int j = blockIdx.x;
int index = (j * size) + i;
weights[index] = weights[index] * prev_activation[i];
//weights[index] = weights[index] / size;
}
__global__ void reduce_mat(double* weights, int size_block, int size)
{
int i = threadIdx.x;
int j = blockIdx.x;
int index = (j * size_block) + i;
weights[index] = weights[index] + weights[size + index];
}
__global__ void rectify(double* weights, int size_block, int i)
{
int j = threadIdx.x;
int index = (j * size_block) + i;
weights[(j * size_block)] = weights[index] + weights[(j * size_block)];
}
__global__ void add_bias(double* activation, double* weights, double* bias, int prev_neurons)
{
int i = threadIdx.x;
int index = (i * prev_neurons);
activation[i] = weights[index] + bias[i];
//activation[i] = activation[i] / prev_neurons;
}
__global__ void sigmoid(double* activation)
{
int i = threadIdx.x;
activation[i] = pow(2.72, activation[i]);
activation[i] = activation[i] / (1 + activation[i]);
}
__global__ void update_weights(double* target, double* next_neurons, double* weights, double* bias, double* cost, int learning_rate, int no_neurons, int no_next_neuron)
{
int i = threadIdx.x;
int j = blockIdx.x;
int index = (j * no_neurons) + i;
cost[j] = target[j] - next_neurons[j];
double B = cost[j] * (1 - next_neurons[j]) * next_neurons[j];
double C = B / cost[j];
weights[index] = weights[index] - (learning_rate * cost[j] * B);
bias[i] = bias[i] - (learning_rate * C);
}
__global__ void update_weights_output (double* target, double* activation, double* weights, double* bias, double* cost, int learning_rate, int neurons, int prev_no_neurons)
{
int i = threadIdx.x;
int j = blockIdx.x;
int index = (j * prev_no_neurons) + i;
cost[j] = target[j] - activation[j];
double B = cost[j] * (1 - activation[j]) * activation[j];
double C = B / cost[j];
weights[index] = weights[index] - (learning_rate * cost[j] * B);
bias[i] = bias[i] - (learning_rate * C);
}
__global__ void calc_target (double* next_neurons, double* target, double* next_weights, int no_neurons)
{
int i = threadIdx.x;
int j = blockIdx.x;
int index = (j * no_neurons) + i;
target[i] = next_weights[index] * next_neurons[i];
}
__global__ void average(double* target, int next_no_neurons)
{
int i = threadIdx.x;
target[i] = target[i] / next_no_neurons;
}
class neuron_layer
{
private:
int neurons;
int no_weights;
double* weights;
double* activation;
double* bias;
public:
neuron_layer(int no_neuron)
{
neurons = no_neuron;
activation = new double[neurons];
bias = new double[neurons];
for (int i = 0; i < neurons; i++)
{
bias[i] = 0;
}
}
neuron_layer(int no_neuron,neuron_layer prev_layer)
{
int prev_neurons = prev_layer.get_number_neurons();
neurons = no_neuron;
activation = new double[neurons];
bias = new double[neurons];
no_weights = neurons * prev_neurons;
weights = new double[neurons * prev_neurons];
for (int i = 0; i < neurons; i++)
{
bias[i] = 0;
}
double* dev_weights = 0;
if (cudaMalloc((void**)&dev_weights, neurons * prev_neurons * sizeof(double)) != cudaSuccess)
{
cout << "Memory allocation for cuda weight assignment failed.\n";
cudaFree(dev_weights);
}
assign_weights<<<neurons,prev_neurons>>>(dev_weights,prev_neurons);
if (cudaMemcpy(weights, dev_weights, neurons*prev_neurons * sizeof(double), cudaMemcpyDeviceToHost) != cudaSuccess) {
cout << "Memory copy from cuda to host weight assignment failed.\n";
cudaFree(dev_weights);
}
//cout << "\nWeights = ";
/*for (int i = 0; i < neurons*prev_neurons; i++)
{
cout << weights[i] << ",";
}
cout << endl;*/
cudaFree(dev_weights);
}
void print_layer()
{
/*cout << "Activation values = ";
for (int i = 0; i < neurons; i++)
{
cout<< activation[i] << ",";
}*/
cout << "\nBias values = ";
for (int i = 0; i < neurons; i++)
{
cout << bias[i] << ",";
}
cout << "\nWeight values = ";
for (int i = 0; i < 10; i++)
cout << weights[i] << ",";
}
void input(double* image)
{
/*double* dev_activation;
if (cudaMalloc((void**)& dev_activation, neurons * sizeof(double)) != cudaSuccess)
{
cout << "Memory allocation for cuda image assignment failed during activation.\n";
cudaFree(dev_activation);
}
if (cudaMemcpy(dev_activation, image, neurons * sizeof(double), cudaMemcpyHostToDevice) != cudaSuccess) {
cout << "Memory copy from host to device image assignment failed during activation.\n";
cudaFree(dev_activation);
}
if (cudaMemcpy(activation, dev_activation, neurons * sizeof(double), cudaMemcpyDeviceToHost) != cudaSuccess) {
cout << "Memory copy from yDevice To Host image assignment failed during activation.\n";
cudaFree(dev_activation);
}
cudaFree(dev_activation);*/
activation = image;
}
int get_number_neurons()
{
return neurons;
}
int get_no_weights()
{
return no_weights;
}
(double*)get_activation()
{
return activation;
}
(double*)get_weights()
{
return weights;
}
void activate_layer(neuron_layer prev_layer)
{
int prev_neurons = prev_layer.get_number_neurons();
double* dev_weights = 0;
double* dev_activation = 0;
double* dev_bias = 0;
double* dev_prev_activation = 0;
if(cudaMalloc((void**)& dev_weights, neurons * prev_neurons * sizeof(double)) != cudaSuccess)
{
cout << "Memory allocation for cuda weight assignment failed during activation.\n";
cudaFree(dev_weights);
cudaFree(dev_activation);
cudaFree(dev_bias);
cudaFree(dev_prev_activation);
}
if (cudaMalloc((void**)& dev_activation, neurons * sizeof(double)) != cudaSuccess)
{
cout << "Memory allocation for cuda activation assignment failed during activation.\n";
cudaFree(dev_weights);
cudaFree(dev_activation);
cudaFree(dev_bias);
cudaFree(dev_prev_activation);
}
if (cudaMalloc((void**)& dev_bias, neurons * sizeof(double)) != cudaSuccess)
{
cout << "Memory allocation for cuda bias assignment failed during activation.\n";
cudaFree(dev_weights);
cudaFree(dev_activation);
cudaFree(dev_bias);
cudaFree(dev_prev_activation);
}
if (cudaMalloc((void**)& dev_prev_activation, prev_neurons * sizeof(double)) != cudaSuccess)
{
cout << "Memory allocation for cuda bias assignment failed during activation.\n";
cudaFree(dev_weights);
cudaFree(dev_activation);
cudaFree(dev_bias);
cudaFree(dev_prev_activation);
}
if (cudaMemcpy(dev_weights, weights, neurons * prev_neurons * sizeof(double), cudaMemcpyHostToDevice) != cudaSuccess) {
cout << "Memory copy from host to device weight assignment failed during activation.\n";
cudaFree(dev_weights);
cudaFree(dev_activation);
cudaFree(dev_bias);
cudaFree(dev_prev_activation);
}
if (cudaMemcpy(dev_bias, bias, neurons * sizeof(double), cudaMemcpyHostToDevice) != cudaSuccess) {
cout << "Memory copy from host to device bias assignment failed during activation.\n";
cudaFree(dev_weights);
cudaFree(dev_activation);
cudaFree(dev_bias);
cudaFree(dev_prev_activation);
}
if (cudaMemcpy(dev_prev_activation, prev_layer.get_activation(), prev_neurons * sizeof(double), cudaMemcpyHostToDevice) != cudaSuccess) {
cout << "Memory copy from host to device previous layer activation assignment failed during activation.\n";
cudaFree(dev_weights);
cudaFree(dev_activation);
cudaFree(dev_bias);
cudaFree(dev_prev_activation);
}
mat_mull<<<neurons, prev_neurons>>>(dev_weights, dev_prev_activation, prev_neurons);
/*if (cudaMemcpy(weights, dev_weights, prev_neurons * neurons * sizeof(double), cudaMemcpyDeviceToHost) != cudaSuccess) {
cout << "Memory copy from cuda to host activation assignment failed.\n";
cudaFree(dev_weights);
cudaFree(dev_activation);
cudaFree(dev_bias);
cudaFree(dev_prev_activation);
}
cout << "\nWeights = ";
for (int i = 0; i < neurons * prev_neurons; i++)
{
cout << weights[i] << ",";
}
cout << endl;*/
if (prev_neurons % 2 == 1)
{
rectify <<<1, neurons >>> (dev_weights, prev_neurons, (prev_neurons-1));
}
/*if (cudaMemcpy(weights, dev_weights, prev_neurons * neurons * sizeof(double), cudaMemcpyDeviceToHost) != cudaSuccess) {
cout << "Memory copy from cuda to host activation assignment failed.\n";
cudaFree(dev_weights);
cudaFree(dev_activation);
cudaFree(dev_bias);
cudaFree(dev_prev_activation);
}
cout << "\nWeights = ";
for (int i = 0; i < neurons * prev_neurons; i++)
{
cout << weights[i] << ",";
}
cout << endl;*/
int i = prev_neurons / 2;
while (i >= 1)
{
reduce_mat <<<neurons, i >>> (dev_weights, prev_neurons, i);
if (((i % 2) == 1) && i != 1)
{
rectify <<<1, neurons >>> (dev_weights, prev_neurons, i);
}
i = i / 2;
}
/*if (cudaMemcpy(weights, dev_weights, prev_neurons * neurons * sizeof(double), cudaMemcpyDeviceToHost) != cudaSuccess) {
cout << "Memory copy from cuda to host activation assignment failed.\n";
cudaFree(dev_weights);
cudaFree(dev_activation);
cudaFree(dev_bias);
cudaFree(dev_prev_activation);
}
cout << "\nWeights = ";
for (int i = 0; i < neurons * prev_neurons; i++)
{
cout << weights[i] << ",";
}
cout << endl;*/
add_bias <<<1, neurons >>> (dev_activation, dev_weights, dev_bias, prev_neurons);
sigmoid <<<1, neurons >>> (dev_activation);
if (cudaMemcpy(activation, dev_activation, neurons * sizeof(double), cudaMemcpyDeviceToHost) != cudaSuccess) {
cout << "Memory copy from cuda to host activation assignment failed.\n";
cudaFree(dev_weights);
cudaFree(dev_activation);
cudaFree(dev_bias);
cudaFree(dev_prev_activation);
}
cudaFree(dev_weights);
cudaFree(dev_activation);
cudaFree(dev_bias);
cudaFree(dev_prev_activation);
}
void activate_input_layer()
{
double* dev_activation;
if (cudaMalloc((void**)& dev_activation, neurons * sizeof(double)) != cudaSuccess)
{
cout << "Memory allocation for cuda activation assignment failed during first layer activation.\n";
cudaFree(dev_activation);
}
if (cudaMemcpy(dev_activation, activation, neurons * sizeof(double), cudaMemcpyHostToDevice) != cudaSuccess) {
cout << "Memory copy from host to device previous layer activation assignment failed during first layer activation.\n";
cudaFree(dev_activation);
}
sigmoid <<<1, neurons >>> (dev_activation);
if (cudaMemcpy(activation, dev_activation, neurons * sizeof(double), cudaMemcpyDeviceToHost) != cudaSuccess) {
cout << "Memory copy from cuda to host activation assignment failed during first layer activation.\n";
cudaFree(dev_activation);
}
cudaFree(dev_activation);
}
void back_propagate_output(double* target, neuron_layer prev_layer)
{
int prev_no_neurons = prev_layer.get_number_neurons();
//double* next_neurons = new double[next_no_neurons];
//next_neurons = next_layer.get_activation();
double* cost = new double[10];
double* dev_weights;
double* dev_target;
double* dev_activation;
double* dev_cost;
double*dev_bias;
if (cudaMalloc((void**)& dev_weights, neurons * prev_no_neurons * sizeof(double)) != cudaSuccess)
{
cout << "Memory allocation for cuda weight assignment failed during back propagation.\n";
cudaFree(dev_weights);
cudaFree(dev_target);
cudaFree(dev_activation);
cudaFree(dev_cost);
cudaFree(dev_bias);
}
if (cudaMalloc((void**)& dev_bias, neurons * sizeof(double)) != cudaSuccess)
{
cout << "Memory allocation for cuda bias assignment failed during back propagation.\n";
cudaFree(dev_weights);
cudaFree(dev_target);
cudaFree(dev_activation);
cudaFree(dev_cost);
cudaFree(dev_bias);
}
if (cudaMalloc((void**)& dev_target, neurons * sizeof(double)) != cudaSuccess)
{
cout << "Memory allocation for cuda target assignment failed during back propagation.\n";
cudaFree(dev_weights);
cudaFree(dev_target);
cudaFree(dev_activation);
cudaFree(dev_cost);
cudaFree(dev_bias);
}
if (cudaMalloc((void**)& dev_activation, neurons * sizeof(double)) != cudaSuccess)
{
cout << "Memory allocation for cuda neurons assignment failed during back propagation.\n";
cudaFree(dev_weights);
cudaFree(dev_target);
cudaFree(dev_activation);
cudaFree(dev_cost);
cudaFree(dev_bias);
}
if (cudaMalloc((void**)& dev_cost, neurons * sizeof(double)) != cudaSuccess)
{
cout << "Memory allocation for cuda weight assignment failed during back propagation.\n";
cudaFree(dev_weights);
cudaFree(dev_target);
cudaFree(dev_activation);
cudaFree(dev_cost);
cudaFree(dev_bias);
}
if (cudaMemcpy(dev_weights, weights, neurons * prev_no_neurons * sizeof(double), cudaMemcpyHostToDevice) != cudaSuccess) {
cout << "Memory copy from host to device weight assignment failed during back propagation.\n";
cudaFree(dev_weights);
cudaFree(dev_target);
cudaFree(dev_activation);
cudaFree(dev_cost);
cudaFree(dev_bias);
}
if (cudaMemcpy(dev_target, target, neurons * sizeof(double), cudaMemcpyHostToDevice) != cudaSuccess) {
cout << "Memory copy from host to device target assignment failed during back propagation.\n";
cudaFree(dev_weights);
cudaFree(dev_target);
cudaFree(dev_activation);
cudaFree(dev_cost);
cudaFree(dev_bias);
}
if (cudaMemcpy(dev_activation, activation, neurons * sizeof(double), cudaMemcpyHostToDevice) != cudaSuccess) {
cout << "Memory copy from host to device next neuron assignment failed during back propagation.\n";
cudaFree(dev_weights);
cudaFree(dev_target);
cudaFree(dev_activation);
cudaFree(dev_cost);
cudaFree(dev_bias);
}
if (cudaMemcpy(dev_bias, bias,neurons * sizeof(double), cudaMemcpyHostToDevice) != cudaSuccess) {
cout << "Memory copy from host to device bias assignment failed during back propagation.\n";
cudaFree(dev_weights);
cudaFree(dev_target);
cudaFree(dev_activation);
cudaFree(dev_cost);
cudaFree(dev_bias);
}
update_weights_output <<<neurons, prev_no_neurons>>> (dev_target, dev_activation, dev_weights, dev_bias, dev_cost, learning_rate, neurons, prev_no_neurons);
if (cudaMemcpy(weights, dev_weights, neurons * prev_no_neurons * sizeof(double), cudaMemcpyDeviceToHost) != cudaSuccess) {
cout << "Memory copy from devide to host weight assignment failed during back propagation.\n";
cudaFree(dev_weights);
cudaFree(dev_target);
cudaFree(dev_activation);
cudaFree(dev_cost);
cudaFree(dev_bias);
}
if (cudaMemcpy(cost, dev_cost, neurons * sizeof(double), cudaMemcpyDeviceToHost) != cudaSuccess) {
cout << "Memory copy from devide to host weight assignment failed during back propagation.\n";
cudaFree(dev_weights);
cudaFree(dev_target);
cudaFree(dev_activation);
cudaFree(dev_cost);
cudaFree(dev_bias);
}
if (cudaMemcpy(bias, dev_bias, neurons * sizeof(double), cudaMemcpyDeviceToHost) != cudaSuccess) {
cout << "Memory copy from device to host bias assignment failed during back propagation.\n";
cudaFree(dev_weights);
cudaFree(dev_target);
cudaFree(dev_activation);
cudaFree(dev_cost);
cudaFree(dev_bias);
}
cudaFree(dev_weights);
cudaFree(dev_target);
cudaFree(dev_activation);
cudaFree(dev_cost);
cudaFree(dev_bias);
}
void back_propagate(neuron_layer next_layer)
{
int next_no_neurons = next_layer.get_number_neurons();
int next_no_weights = next_layer.get_no_weights();
int next_next_neurons = next_no_weights / next_no_neurons;
double* next_neurons = new double[next_no_neurons];
next_neurons = next_layer.get_activation();
double* cost = new double[10];
double* dev_weights;
double* dev_target;
double* dev_next_neurons;
double* dev_cost;
double* dev_bias;
double* dev_next_weights;
if (cudaMalloc((void**)& dev_weights, neurons * next_no_neurons * sizeof(double)) != cudaSuccess)
{
cout << "Memory allocation for cuda weight assignment failed during back propagation.\n";
cudaFree(dev_weights);
cudaFree(dev_target);
cudaFree(dev_next_neurons);
cudaFree(dev_cost);
cudaFree(dev_bias);
cudaFree(dev_next_weights);
}
if (cudaMalloc((void**)& dev_next_weights, next_no_weights * sizeof(double)) != cudaSuccess)
{
cout << "Memory allocation for cuda next weight assignment failed during back propagation.\n";
cudaFree(dev_weights);
cudaFree(dev_target);
cudaFree(dev_next_neurons);
cudaFree(dev_cost);
cudaFree(dev_bias);
cudaFree(dev_next_weights);
}
if (cudaMalloc((void**)& dev_bias, neurons * sizeof(double)) != cudaSuccess)
{
cout << "Memory allocation for cuda bias assignment failed during back propagation.\n";
cudaFree(dev_weights);
cudaFree(dev_target);
cudaFree(dev_next_neurons);
cudaFree(dev_cost);
cudaFree(dev_bias);
cudaFree(dev_next_weights);
}
if (cudaMalloc((void**)& dev_target, next_no_neurons * sizeof(double)) != cudaSuccess)
{
cout << "Memory allocation for cuda target assignment failed during back propagation.\n";
cudaFree(dev_weights);
cudaFree(dev_target);
cudaFree(dev_next_neurons);
cudaFree(dev_cost);
cudaFree(dev_bias);
cudaFree(dev_next_weights);
}
if (cudaMalloc((void**)& dev_next_neurons, next_no_neurons * sizeof(double)) != cudaSuccess)
{
cout << "Memory allocation for cuda neurons assignment failed during back propagation.\n";
cudaFree(dev_weights);
cudaFree(dev_target);
cudaFree(dev_next_neurons);
cudaFree(dev_cost);
cudaFree(dev_bias);
cudaFree(dev_next_weights);
}
if (cudaMalloc((void**)& dev_cost, next_no_neurons * sizeof(double)) != cudaSuccess)
{
cout << "Memory allocation for cuda weight assignment failed during back propagation.\n";
cudaFree(dev_weights);
cudaFree(dev_target);
cudaFree(dev_next_neurons);
cudaFree(dev_cost);
cudaFree(dev_bias);
cudaFree(dev_next_weights);
}
if (cudaMemcpy(dev_weights, weights, neurons * next_no_neurons * sizeof(double), cudaMemcpyHostToDevice) != cudaSuccess) {
cout << "Memory copy from host to device weight assignment failed during back propagation.\n";
cudaFree(dev_weights);
cudaFree(dev_target);
cudaFree(dev_next_neurons);
cudaFree(dev_cost);
cudaFree(dev_bias);
cudaFree(dev_next_weights);
}
if (cudaMemcpy(dev_next_weights, next_layer.get_weights(), next_no_weights * sizeof(double), cudaMemcpyHostToDevice) != cudaSuccess) {
cout << "Memory copy from host to device next layer weight assignment failed during back propagation.\n";
cudaFree(dev_weights);
cudaFree(dev_target);
cudaFree(dev_next_neurons);
cudaFree(dev_cost);
cudaFree(dev_bias);
cudaFree(dev_next_weights);
}
if (cudaMemcpy(dev_next_neurons, next_neurons, next_no_neurons * sizeof(double), cudaMemcpyHostToDevice) != cudaSuccess) {
cout << "Memory copy from host to device next neuron assignment failed during back propagation.\n";
cudaFree(dev_weights);
cudaFree(dev_target);
cudaFree(dev_next_neurons);
cudaFree(dev_cost);
cudaFree(dev_bias);
cudaFree(dev_next_weights);
}
if (cudaMemcpy(dev_bias, bias, neurons * sizeof(double), cudaMemcpyHostToDevice) != cudaSuccess) {
cout << "Memory copy from host to device bias assignment failed during back propagation.\n";
cudaFree(dev_weights);
cudaFree(dev_target);
cudaFree(dev_next_neurons);
cudaFree(dev_cost);
cudaFree(dev_bias);
cudaFree(dev_next_weights);
}
calc_target <<<next_no_neurons , next_next_neurons>>> (dev_next_neurons, dev_target, dev_next_weights, neurons);
average <<<1, next_no_neurons >>> (dev_target, next_no_neurons);
update_weights <<<next_no_neurons, neurons >>> (dev_target, dev_next_neurons, dev_weights, dev_bias, dev_cost, learning_rate, neurons, next_no_neurons);
if (cudaMemcpy(weights, dev_weights, neurons * next_no_neurons * sizeof(double), cudaMemcpyDeviceToHost) != cudaSuccess) {
cout << "Memory copy from devide to host weight assignment failed during back propagation.\n";
cudaFree(dev_weights);
cudaFree(dev_target);
cudaFree(dev_next_neurons);
cudaFree(dev_cost);
cudaFree(dev_bias);
cudaFree(dev_next_weights);
}
if (cudaMemcpy(cost, dev_cost, next_no_neurons * sizeof(double), cudaMemcpyDeviceToHost) != cudaSuccess) {
cout << "Memory copy from devide to host weight assignment failed during back propagation.\n";
cudaFree(dev_weights);
cudaFree(dev_target);
cudaFree(dev_next_neurons);
cudaFree(dev_cost);
cudaFree(dev_bias);
cudaFree(dev_next_weights);
}
if (cudaMemcpy(bias, dev_bias, neurons * sizeof(double), cudaMemcpyDeviceToHost) != cudaSuccess) {
cout << "Memory copy from device to host bias assignment failed during back propagation.\n";
cudaFree(dev_weights);
cudaFree(dev_target);
cudaFree(dev_next_neurons);
cudaFree(dev_cost);
cudaFree(dev_bias);
cudaFree(dev_next_weights);
}
/*cudaFree(dev_weights);
cudaFree(dev_target);
cudaFree(dev_next_neurons);
cudaFree(dev_cost);
cudaFree(dev_bias);
cudaFree(dev_next_weights);*/
}
};
class training_data
{
private:
double* image = new double[784 * sizeof(double)];
double* label = new double[10 * sizeof(double)];
public:
training_data()
{
for (int i = 0; i < 10; i++)
label[i] = 0;
}
void get_image(int i, double data)
{
image[i] = data;
}
void get_label(int data)
{
label[data] = 1;
}
(double*)give_image()
{
return image;
}
(double*)give_label()
{
return label;
}
void print_img()
{
for (int i = 0; i < 784; i++)
cout << image[i] << " ";
cout << endl;
for (int i = 0; i < 10; i++)
cout << label[i] << " ";
cout << endl;
}
}training_set[60000];
int reverseInt(int i)
{
unsigned char c1, c2, c3, c4;
c1 = i & 255;
c2 = (i >> 8) & 255;
c3 = (i >> 16) & 255;
c4 = (i >> 24) & 255;
return ((int)c1 << 24) + ((int)c2 << 16) + ((int)c3 << 8) + c4;
}
void read_mnist_images()
{
ifstream file("train-images.idx3-ubyte", ios::in | ios::binary);
if (file.is_open())
{
int magic_number = 0;
int number_of_images = 0;
int n_rows = 0;
int n_cols = 0;
file.read((char*)& magic_number, sizeof(magic_number));
magic_number = reverseInt(magic_number);
file.read((char*)& number_of_images, sizeof(number_of_images));
number_of_images = reverseInt(number_of_images);
file.read((char*)& n_rows, sizeof(n_rows));
n_rows = reverseInt(n_rows);
file.read((char*)& n_cols, sizeof(n_cols));
n_cols = reverseInt(n_cols);
for (int i = 0; i < number_of_images; ++i)
{
for (int r = 0; r < n_rows; ++r)
{
for (int c = 0; c < n_cols; ++c)
{
unsigned char temp = 0;
file.read((char*)& temp, sizeof(temp));
double data = temp;
training_set[i].get_image(((r * 28) + c), data);
}
}
}
}
}
void read_mnist_labels()
{
ifstream file("train-labels.idx1-ubyte", ios::in | ios::binary);
if (file.is_open())
{
int magic_number = 0;
int number_of_images = 0;
file.read((char*)& magic_number, sizeof(magic_number));
magic_number = reverseInt(magic_number);
file.read((char*)& number_of_images, sizeof(number_of_images));
number_of_images = reverseInt(number_of_images);
for (int i = 0; i < number_of_images; ++i)
{
unsigned char temp = 0;
file.read((char*)& temp, sizeof(temp));
int data = temp;
training_set[i].get_label(data);
}
}
}
int main()
{
cout << "Do you want to train the model or load the weights(0 for 1st and 1 for 2nd option):";
int dec;
cin >> dec;
neuron_layer input_layer(784);
neuron_layer hidden_layer_1(16, input_layer);
neuron_layer hidden_layer_2(16, hidden_layer_1);
neuron_layer output_layer(10, hidden_layer_1);
output_layer.print_layer();
cout << endl << "--------------------------------------------------------";
if (dec == 0)
{
read_mnist_images();
read_mnist_labels();
double* image = new double[784];
double* target = new double[10];
for (int x = 0; x <= 0; x++)
{
image = training_set[x].give_image();
target = training_set[x].give_label();
input_layer.input(image);
input_layer.activate_input_layer();
hidden_layer_1.activate_layer(input_layer);
hidden_layer_2.activate_layer(hidden_layer_1);
output_layer.activate_layer(hidden_layer_2);
output_layer.back_propagate_output(target, hidden_layer_2);
cout << "0";
hidden_layer_2.back_propagate_output(target, output_layer);
cout << "1";
hidden_layer_1.back_propagate(hidden_layer_2);
cout << "2";
}
}
else if (dec == 1)
{
cout << "Still in progress.\nYou can try later when it is developed.";
return 0;
}
/*input_layer.print_layer();
cout << "\nEND\n";
hidden_layer_1.print_layer();
cout << "\nEND\n";
hidden_layer_2.print_layer();
cout << "\nEND\n";*/
output_layer.print_layer();
cout << "\nEND\n";
return 0;
}
|
c8b64aa71c98b6e17ca4c9d4623d26d1731a7616.hip | // !!! This is a file automatically generated by hipify!!!
/*
Copyright (c) 2010.
Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory.
LLNL-CODE-461231
All rights reserved.
This file is part of LULESH, Version 1.0.
Please also read this link -- http://www.opensource.org/licenses/index.php
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC,
THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Additional BSD Notice
1. This notice is required to be provided under our contract with the U.S.
Department of Energy (DOE). This work was produced at Lawrence Livermore
National Laboratory under Contract No. DE-AC52-07NA27344 with the DOE.
2. Neither the United States Government nor Lawrence Livermore National
Security, LLC nor any of their employees, makes any warranty, express
or implied, or assumes any liability or responsibility for the accuracy,
completeness, or usefulness of any information, apparatus, product, or
process disclosed, or represents that its use would not infringe
privately-owned rights.
3. Also, reference herein to any specific commercial products, process, or
services by trade name, trademark, manufacturer or otherwise does not
necessarily constitute or imply its endorsement, recommendation, or
favoring by the United States Government or Lawrence Livermore National
Security, LLC. The views and opinions of authors expressed herein do not
necessarily state or reflect those of the United States Government or
Lawrence Livermore National Security, LLC, and shall not be used for
advertising or product endorsement purposes.
*/
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <iostream>
#include <iomanip>
#include <sstream>
#include <util.h>
#include <sm_utils.inl>
#include <hip/hip_runtime.h>
#include <allocator.h>
#include "hip/hip_runtime_api.h"
#ifdef USE_MPI
#include <mpi.h>
#include "comm.h"
#endif
#include <sys/time.h>
#include <unistd.h>
#include "lulesh.h"
static int globalRank=0;
static int totIters=0;
static int currIter=0;
static int gpuID=0;
/****************************************************/
/* Allow flexibility for arithmetic representations */
/****************************************************/
__device__ inline real4 SQRT(real4 arg) { return sqrtf(arg) ; }
__device__ inline real8 SQRT(real8 arg) { return sqrt(arg) ; }
__device__ inline real4 CBRT(real4 arg) { return cbrtf(arg) ; }
__device__ inline real8 CBRT(real8 arg) { return cbrt(arg) ; }
__device__ __host__ inline real4 FABS(real4 arg) { return fabsf(arg) ; }
__device__ __host__ inline real8 FABS(real8 arg) { return fabs(arg) ; }
__device__ inline real4 FMAX(real4 arg1,real4 arg2) { return fmaxf(arg1,arg2) ; }
__device__ inline real8 FMAX(real8 arg1,real8 arg2) { return fmax(arg1,arg2) ; }
#define MAX(a, b) ( ((a) > (b)) ? (a) : (b))
/* Stuff needed for boundary conditions */
/* 2 BCs on each of 6 hexahedral faces (12 bits) */
#define XI_M 0x00007
#define XI_M_SYMM 0x00001
#define XI_M_FREE 0x00002
#define XI_M_COMM 0x00004
#define XI_P 0x00038
#define XI_P_SYMM 0x00008
#define XI_P_FREE 0x00010
#define XI_P_COMM 0x00020
#define ETA_M 0x001c0
#define ETA_M_SYMM 0x00040
#define ETA_M_FREE 0x00080
#define ETA_M_COMM 0x00100
#define ETA_P 0x00e00
#define ETA_P_SYMM 0x00200
#define ETA_P_FREE 0x00400
#define ETA_P_COMM 0x00800
#define ZETA_M 0x07000
#define ZETA_M_SYMM 0x01000
#define ZETA_M_FREE 0x02000
#define ZETA_M_COMM 0x04000
#define ZETA_P 0x38000
#define ZETA_P_SYMM 0x08000
#define ZETA_P_FREE 0x10000
#define ZETA_P_COMM 0x20000
#define VOLUDER(a0,a1,a2,a3,a4,a5,b0,b1,b2,b3,b4,b5,dvdc) \
{ \
const Real_t twelfth = Real_t(1.0) / Real_t(12.0) ; \
\
dvdc= \
((a1) + (a2)) * ((b0) + (b1)) - ((a0) + (a1)) * ((b1) + (b2)) + \
((a0) + (a4)) * ((b3) + (b4)) - ((a3) + (a4)) * ((b0) + (b4)) - \
((a2) + (a5)) * ((b3) + (b5)) + ((a3) + (a5)) * ((b2) + (b5)); \
dvdc *= twelfth; \
}
/*
__device__
static
__forceinline__
void SumOverNodes(Real_t& val, volatile Real_t* smem, int cta_elem, int node) {
int tid = (cta_elem << 3) + node;
smem[tid] = val;
if (node < 4)
{
smem[tid] += smem[tid+4];
smem[tid] += smem[tid+2];
smem[tid] += smem[tid+1];
}
val = smem[(cta_elem << 3)];
}
*/
__device__
static
__forceinline__
void SumOverNodesShfl(Real_t& val) {
val += utils::shfl_xor( val, 4, 8);
val += utils::shfl_xor( val, 2, 8);
val += utils::shfl_xor( val, 1, 8);
}
__host__ __device__
static
__forceinline__
Real_t CalcElemVolume( const Real_t x0, const Real_t x1,
const Real_t x2, const Real_t x3,
const Real_t x4, const Real_t x5,
const Real_t x6, const Real_t x7,
const Real_t y0, const Real_t y1,
const Real_t y2, const Real_t y3,
const Real_t y4, const Real_t y5,
const Real_t y6, const Real_t y7,
const Real_t z0, const Real_t z1,
const Real_t z2, const Real_t z3,
const Real_t z4, const Real_t z5,
const Real_t z6, const Real_t z7 )
{
Real_t twelveth = Real_t(1.0)/Real_t(12.0);
Real_t dx61 = x6 - x1;
Real_t dy61 = y6 - y1;
Real_t dz61 = z6 - z1;
Real_t dx70 = x7 - x0;
Real_t dy70 = y7 - y0;
Real_t dz70 = z7 - z0;
Real_t dx63 = x6 - x3;
Real_t dy63 = y6 - y3;
Real_t dz63 = z6 - z3;
Real_t dx20 = x2 - x0;
Real_t dy20 = y2 - y0;
Real_t dz20 = z2 - z0;
Real_t dx50 = x5 - x0;
Real_t dy50 = y5 - y0;
Real_t dz50 = z5 - z0;
Real_t dx64 = x6 - x4;
Real_t dy64 = y6 - y4;
Real_t dz64 = z6 - z4;
Real_t dx31 = x3 - x1;
Real_t dy31 = y3 - y1;
Real_t dz31 = z3 - z1;
Real_t dx72 = x7 - x2;
Real_t dy72 = y7 - y2;
Real_t dz72 = z7 - z2;
Real_t dx43 = x4 - x3;
Real_t dy43 = y4 - y3;
Real_t dz43 = z4 - z3;
Real_t dx57 = x5 - x7;
Real_t dy57 = y5 - y7;
Real_t dz57 = z5 - z7;
Real_t dx14 = x1 - x4;
Real_t dy14 = y1 - y4;
Real_t dz14 = z1 - z4;
Real_t dx25 = x2 - x5;
Real_t dy25 = y2 - y5;
Real_t dz25 = z2 - z5;
#define TRIPLE_PRODUCT(x1, y1, z1, x2, y2, z2, x3, y3, z3) \
((x1)*((y2)*(z3) - (z2)*(y3)) + (x2)*((z1)*(y3) - (y1)*(z3)) + (x3)*((y1)*(z2) - (z1)*(y2)))
// 11 + 3*14
Real_t volume =
TRIPLE_PRODUCT(dx31 + dx72, dx63, dx20,
dy31 + dy72, dy63, dy20,
dz31 + dz72, dz63, dz20) +
TRIPLE_PRODUCT(dx43 + dx57, dx64, dx70,
dy43 + dy57, dy64, dy70,
dz43 + dz57, dz64, dz70) +
TRIPLE_PRODUCT(dx14 + dx25, dx61, dx50,
dy14 + dy25, dy61, dy50,
dz14 + dz25, dz61, dz50);
#undef TRIPLE_PRODUCT
volume *= twelveth;
return volume ;
}
__host__ __device__
static
__forceinline__
Real_t CalcElemVolume( const Real_t x[8], const Real_t y[8], const Real_t z[8] )
{
return CalcElemVolume( x[0], x[1], x[2], x[3], x[4], x[5], x[6], x[7],
y[0], y[1], y[2], y[3], y[4], y[5], y[6], y[7],
z[0], z[1], z[2], z[3], z[4], z[5], z[6], z[7]);
}
void cuda_init(int rank)
{
Int_t deviceCount, dev;
hipDeviceProp_t cuda_deviceProp;
cudaSafeCall( hipGetDeviceCount(&deviceCount) );
if (deviceCount == 0) {
fprintf(stderr, "cuda_init(): no devices supporting CUDA.\n");
exit(1);
}
//dev = rank % deviceCount;
dev = comm_select_device(rank);
if ((dev < 0) || (dev > deviceCount-1)) {
fprintf(stderr, "cuda_init(): requested device (%d) out of range [%d,%d]\n",
dev, 0, deviceCount-1);
exit(1);
}
cudaSafeCall( hipSetDevice(dev) );
struct hipDeviceProp_t props;
hipGetDeviceProperties(&props, dev);
char hostname[256];
gethostname(hostname, sizeof(hostname));
printf("Host %s using GPU %i: %s\n", hostname, dev, props.name);
cudaSafeCall( hipGetDeviceProperties(&cuda_deviceProp, dev) );
if (cuda_deviceProp.major < 3) {
fprintf(stderr, "cuda_init(): This implementation of Lulesh requires device SM 3.0+.\n", dev);
exit(1);
}
#if CUDART_VERSION < 5000
fprintf(stderr,"cuda_init(): This implementation of Lulesh uses texture objects, which is requires Cuda 5.0+.\n");
exit(1);
#endif
gpuID = dev;
}
void AllocateNodalPersistent(Domain* domain, size_t domNodes)
{
domain->x.resize(domNodes) ; /* coordinates */
domain->y.resize(domNodes) ;
domain->z.resize(domNodes) ;
domain->xd.resize(domNodes) ; /* velocities */
domain->yd.resize(domNodes) ;
domain->zd.resize(domNodes) ;
domain->xdd.resize(domNodes) ; /* accelerations */
domain->ydd.resize(domNodes) ;
domain->zdd.resize(domNodes) ;
domain->fx.resize(domNodes) ; /* forces */
domain->fy.resize(domNodes) ;
domain->fz.resize(domNodes) ;
domain->nodalMass.resize(domNodes) ; /* mass */
}
void AllocateElemPersistent(Domain* domain, size_t domElems, size_t padded_domElems)
{
domain->matElemlist.resize(domElems) ; /* material indexset */
domain->nodelist.resize(8*padded_domElems) ; /* elemToNode connectivity */
domain->lxim.resize(domElems) ; /* elem connectivity through face */
domain->lxip.resize(domElems) ;
domain->letam.resize(domElems) ;
domain->letap.resize(domElems) ;
domain->lzetam.resize(domElems) ;
domain->lzetap.resize(domElems) ;
domain->elemBC.resize(domElems) ; /* elem face symm/free-surf flag */
domain->e.resize(domElems) ; /* energy */
domain->p.resize(domElems) ; /* pressure */
domain->q.resize(domElems) ; /* q */
domain->ql.resize(domElems) ; /* linear term for q */
domain->qq.resize(domElems) ; /* quadratic term for q */
domain->v.resize(domElems) ; /* relative volume */
domain->volo.resize(domElems) ; /* reference volume */
domain->delv.resize(domElems) ; /* m_vnew - m_v */
domain->vdov.resize(domElems) ; /* volume derivative over volume */
domain->arealg.resize(domElems) ; /* elem characteristic length */
domain->ss.resize(domElems) ; /* "sound speed" */
domain->elemMass.resize(domElems) ; /* mass */
}
void AllocateSymmX(Domain* domain, size_t size)
{
domain->symmX.resize(size) ;
}
void AllocateSymmY(Domain* domain, size_t size)
{
domain->symmY.resize(size) ;
}
void AllocateSymmZ(Domain* domain, size_t size)
{
domain->symmZ.resize(size) ;
}
void InitializeFields(Domain* domain)
{
/* Basic Field Initialization */
thrust::fill(domain->ss.begin(),domain->ss.end(),0.);
thrust::fill(domain->e.begin(),domain->e.end(),0.);
thrust::fill(domain->p.begin(),domain->p.end(),0.);
thrust::fill(domain->q.begin(),domain->q.end(),0.);
thrust::fill(domain->v.begin(),domain->v.end(),1.);
thrust::fill(domain->xd.begin(),domain->xd.end(),0.);
thrust::fill(domain->yd.begin(),domain->yd.end(),0.);
thrust::fill(domain->zd.begin(),domain->zd.end(),0.);
thrust::fill(domain->xdd.begin(),domain->xdd.end(),0.);
thrust::fill(domain->ydd.begin(),domain->ydd.end(),0.);
thrust::fill(domain->zdd.begin(),domain->zdd.end(),0.);
thrust::fill(domain->nodalMass.begin(),domain->nodalMass.end(),0.);
}
////////////////////////////////////////////////////////////////////////////////
void
Domain::SetupCommBuffers(Int_t edgeNodes)
{
// allocate a buffer large enough for nodal ghost data
maxEdgeSize = MAX(this->sizeX, MAX(this->sizeY, this->sizeZ))+1 ;
maxPlaneSize = CACHE_ALIGN_REAL(maxEdgeSize*maxEdgeSize) ;
maxEdgeSize = CACHE_ALIGN_REAL(maxEdgeSize) ;
// assume communication to 6 neighbors by default
m_rowMin = (m_rowLoc == 0) ? 0 : 1;
m_rowMax = (m_rowLoc == m_tp-1) ? 0 : 1;
m_colMin = (m_colLoc == 0) ? 0 : 1;
m_colMax = (m_colLoc == m_tp-1) ? 0 : 1;
m_planeMin = (m_planeLoc == 0) ? 0 : 1;
m_planeMax = (m_planeLoc == m_tp-1) ? 0 : 1;
#if USE_MPI
// account for face communication
Index_t comBufSize =
(m_rowMin + m_rowMax + m_colMin + m_colMax + m_planeMin + m_planeMax) *
maxPlaneSize * MAX_FIELDS_PER_MPI_COMM ;
// account for edge communication
comBufSize +=
((m_rowMin & m_colMin) + (m_rowMin & m_planeMin) + (m_colMin & m_planeMin) +
(m_rowMax & m_colMax) + (m_rowMax & m_planeMax) + (m_colMax & m_planeMax) +
(m_rowMax & m_colMin) + (m_rowMin & m_planeMax) + (m_colMin & m_planeMax) +
(m_rowMin & m_colMax) + (m_rowMax & m_planeMin) + (m_colMax & m_planeMin)) *
maxPlaneSize * MAX_FIELDS_PER_MPI_COMM ;
// account for corner communication
// factor of 16 is so each buffer has its own cache line
comBufSize += ((m_rowMin & m_colMin & m_planeMin) +
(m_rowMin & m_colMin & m_planeMax) +
(m_rowMin & m_colMax & m_planeMin) +
(m_rowMin & m_colMax & m_planeMax) +
(m_rowMax & m_colMin & m_planeMin) +
(m_rowMax & m_colMin & m_planeMax) +
(m_rowMax & m_colMax & m_planeMin) +
(m_rowMax & m_colMax & m_planeMax)) * CACHE_COHERENCE_PAD_REAL ;
if(comm_use_comm())
{
int myRank=0, numPeers=0;
MPI_Comm_rank(MPI_COMM_WORLD, &myRank);
MPI_Comm_size(MPI_COMM_WORLD, &numPeers);
if(myRank == 0)
printf("\n***comBufSize: %d totSize: %d***\n", comBufSize, comBufSize*sizeof(Real_t));
comm_regions_setup(26*3, SEND_REGION);
comm_regions_setup(26*3, SEND_STREAM_REGION);
comm_regions_setup(26*3, RECV_REGION);
for(int typeBuf=0; typeBuf < 3; typeBuf++)
{
for(int ind=0; ind<26; ind++)
{
hipHostMalloc((void **)&(this->commDataSend_multi[ind+(typeBuf*26)]), comBufSize*sizeof(Real_t));
comm_register_index(this->commDataSend_multi[ind+(typeBuf*26)], comBufSize*sizeof(Real_t), SEND_REGION, ind+(typeBuf*26));
memset(this->commDataSend_multi[ind+(typeBuf*26)], 0, comBufSize*sizeof(Real_t)) ;
hipHostMalloc((void **)&(this->commDataSendStream_multi[ind+(typeBuf*26)]), comBufSize*sizeof(Real_t));
comm_register_index(this->commDataSendStream_multi[ind+(typeBuf*26)], comBufSize*sizeof(Real_t), SEND_STREAM_REGION, ind+(typeBuf*26));
memset(this->commDataSendStream_multi[ind+(typeBuf*26)], 0, comBufSize*sizeof(Real_t)) ;
hipHostMalloc((void **)&(this->commDataRecv_multi[ind+(typeBuf*26)]), comBufSize*sizeof(Real_t));
comm_register_index(this->commDataRecv_multi[ind+(typeBuf*26)], comBufSize*sizeof(Real_t),RECV_REGION, ind+(typeBuf*26));
memset(this->commDataRecv_multi[ind+(typeBuf*26)], 0, comBufSize*sizeof(Real_t)) ;
}
}
comm_regions_setup(0, TIMER_RECV_REGION);
comm_regions_setup(0, TIMER_SEND_REGION);
hipHostMalloc((void **)&(this->timerRecvBuf), (numPeers+1)*sizeof(Real_t));
hipHostMalloc((void **)&(this->timerSendBuf), (numPeers+1)*sizeof(Real_t));
comm_register_index(this->timerRecvBuf, (numPeers+1)*sizeof(Real_t), TIMER_RECV_REGION, 0);
comm_register_index(this->timerSendBuf, (numPeers+1)*sizeof(Real_t), TIMER_SEND_REGION, 0);
}
else
{
for(int typeBuf=0; typeBuf < 3; typeBuf++)
{
for(int ind=0; ind<26; ind++)
{
hipHostMalloc((void **)&(this->commDataSend_multi[ind+(typeBuf*26)]), comBufSize*sizeof(Real_t));
memset(this->commDataSend_multi[ind+(typeBuf*26)], 0, comBufSize*sizeof(Real_t)) ;
hipHostMalloc((void **)&(this->commDataSendStream_multi[ind+(typeBuf*26)]), comBufSize*sizeof(Real_t));
memset(this->commDataSendStream_multi[ind+(typeBuf*26)], 0, comBufSize*sizeof(Real_t)) ;
hipHostMalloc((void **)&(this->commDataRecv_multi[ind+(typeBuf*26)]), comBufSize*sizeof(Real_t));
memset(this->commDataRecv_multi[ind+(typeBuf*26)], 0, comBufSize*sizeof(Real_t)) ;
}
}
#if 0
this->commDataSend = new Real_t[comBufSize] ;
this->commDataRecv = new Real_t[comBufSize] ;
// pin buffers
hipHostRegister(this->commDataSend, comBufSize*sizeof(Real_t), 0);
hipHostRegister(this->commDataRecv, comBufSize*sizeof(Real_t), 0);
// prevent floating point exceptions
memset(this->commDataSend, 0, comBufSize*sizeof(Real_t)) ;
memset(this->commDataRecv, 0, comBufSize*sizeof(Real_t)) ;
#endif
}
// allocate shadow GPU buffers
hipMalloc(&this->d_commDataSend, comBufSize*sizeof(Real_t));
hipMalloc(&this->d_commDataRecv, comBufSize*sizeof(Real_t));
// prevent floating point exceptions
hipMemset(this->d_commDataSend, 0, comBufSize*sizeof(Real_t));
hipMemset(this->d_commDataRecv, 0, comBufSize*sizeof(Real_t));
#endif
}
void SetupConnectivityBC(Domain *domain, int edgeElems)
{
int domElems = domain->numElem;
Vector_h<Index_t> lxim_h(domElems);
Vector_h<Index_t> lxip_h(domElems);
Vector_h<Index_t> letam_h(domElems);
Vector_h<Index_t> letap_h(domElems);
Vector_h<Index_t> lzetam_h(domElems);
Vector_h<Index_t> lzetap_h(domElems);
/* set up elemement connectivity information */
lxim_h[0] = 0 ;
for (Index_t i=1; i<domElems; ++i) {
lxim_h[i] = i-1 ;
lxip_h[i-1] = i ;
}
lxip_h[domElems-1] = domElems-1 ;
for (Index_t i=0; i<edgeElems; ++i) {
letam_h[i] = i ;
letap_h[domElems-edgeElems+i] = domElems-edgeElems+i ;
}
for (Index_t i=edgeElems; i<domElems; ++i) {
letam_h[i] = i-edgeElems ;
letap_h[i-edgeElems] = i ;
}
for (Index_t i=0; i<edgeElems*edgeElems; ++i) {
lzetam_h[i] = i ;
lzetap_h[domElems-edgeElems*edgeElems+i] = domElems-edgeElems*edgeElems+i ;
}
for (Index_t i=edgeElems*edgeElems; i<domElems; ++i) {
lzetam_h[i] = i - edgeElems*edgeElems ;
lzetap_h[i-edgeElems*edgeElems] = i ;
}
/* set up boundary condition information */
Vector_h<Index_t> elemBC_h(domElems);
for (Index_t i=0; i<domElems; ++i) {
elemBC_h[i] = 0 ; /* clear BCs by default */
}
Index_t ghostIdx[6] ; // offsets to ghost locations
for (Index_t i=0; i<6; ++i) {
ghostIdx[i] = INT_MIN ;
}
Int_t pidx = domElems ;
if (domain->m_planeMin != 0) {
ghostIdx[0] = pidx ;
pidx += domain->sizeX*domain->sizeY ;
}
if (domain->m_planeMax != 0) {
ghostIdx[1] = pidx ;
pidx += domain->sizeX*domain->sizeY ;
}
if (domain->m_rowMin != 0) {
ghostIdx[2] = pidx ;
pidx += domain->sizeX*domain->sizeZ ;
}
if (domain->m_rowMax != 0) {
ghostIdx[3] = pidx ;
pidx += domain->sizeX*domain->sizeZ ;
}
if (domain->m_colMin != 0) {
ghostIdx[4] = pidx ;
pidx += domain->sizeY*domain->sizeZ ;
}
if (domain->m_colMax != 0) {
ghostIdx[5] = pidx ;
}
/* symmetry plane or free surface BCs */
for (Index_t i=0; i<edgeElems; ++i) {
Index_t planeInc = i*edgeElems*edgeElems ;
Index_t rowInc = i*edgeElems ;
for (Index_t j=0; j<edgeElems; ++j) {
if (domain->m_planeLoc == 0) {
elemBC_h[rowInc+j] |= ZETA_M_SYMM ;
}
else {
elemBC_h[rowInc+j] |= ZETA_M_COMM ;
lzetam_h[rowInc+j] = ghostIdx[0] + rowInc + j ;
}
if (domain->m_planeLoc == domain->m_tp-1) {
elemBC_h[rowInc+j+domElems-edgeElems*edgeElems] |=
ZETA_P_FREE;
}
else {
elemBC_h[rowInc+j+domElems-edgeElems*edgeElems] |=
ZETA_P_COMM ;
lzetap_h[rowInc+j+domElems-edgeElems*edgeElems] =
ghostIdx[1] + rowInc + j ;
}
if (domain->m_rowLoc == 0) {
elemBC_h[planeInc+j] |= ETA_M_SYMM ;
}
else {
elemBC_h[planeInc+j] |= ETA_M_COMM ;
letam_h[planeInc+j] = ghostIdx[2] + rowInc + j ;
}
if (domain->m_rowLoc == domain->m_tp-1) {
elemBC_h[planeInc+j+edgeElems*edgeElems-edgeElems] |=
ETA_P_FREE ;
}
else {
elemBC_h[planeInc+j+edgeElems*edgeElems-edgeElems] |=
ETA_P_COMM ;
letap_h[planeInc+j+edgeElems*edgeElems-edgeElems] =
ghostIdx[3] + rowInc + j ;
}
if (domain->m_colLoc == 0) {
elemBC_h[planeInc+j*edgeElems] |= XI_M_SYMM ;
}
else {
elemBC_h[planeInc+j*edgeElems] |= XI_M_COMM ;
lxim_h[planeInc+j*edgeElems] = ghostIdx[4] + rowInc + j ;
}
if (domain->m_colLoc == domain->m_tp-1) {
elemBC_h[planeInc+j*edgeElems+edgeElems-1] |= XI_P_FREE ;
}
else {
elemBC_h[planeInc+j*edgeElems+edgeElems-1] |= XI_P_COMM ;
lxip_h[planeInc+j*edgeElems+edgeElems-1] =
ghostIdx[5] + rowInc + j ;
}
}
}
domain->elemBC = elemBC_h;
domain->lxim = lxim_h;
domain->lxip = lxip_h;
domain->letam = letam_h;
domain->letap = letap_h;
domain->lzetam = lzetam_h;
domain->lzetap = lzetap_h;
}
void Domain::BuildMesh(Int_t nx, Int_t edgeNodes, Int_t edgeElems, Int_t domNodes, Int_t padded_domElems, Vector_h<Real_t> &x_h, Vector_h<Real_t> &y_h, Vector_h<Real_t> &z_h, Vector_h<Int_t> &nodelist_h)
{
Index_t meshEdgeElems = m_tp*nx ;
x_h.resize(domNodes);
y_h.resize(domNodes);
z_h.resize(domNodes);
// initialize nodal coordinates
Index_t nidx = 0 ;
Real_t tz = Real_t(1.125)*Real_t(m_planeLoc*nx)/Real_t(meshEdgeElems) ;
for (Index_t plane=0; plane<edgeNodes; ++plane) {
Real_t ty = Real_t(1.125)*Real_t(m_rowLoc*nx)/Real_t(meshEdgeElems) ;
for (Index_t row=0; row<edgeNodes; ++row) {
Real_t tx = Real_t(1.125)*Real_t(m_colLoc*nx)/Real_t(meshEdgeElems) ;
for (Index_t col=0; col<edgeNodes; ++col) {
x_h[nidx] = tx ;
y_h[nidx] = ty ;
z_h[nidx] = tz ;
++nidx ;
// tx += ds ; // may accumulate roundoff...
tx = Real_t(1.125)*Real_t(m_colLoc*nx+col+1)/Real_t(meshEdgeElems) ;
}
// ty += ds ; // may accumulate roundoff...
ty = Real_t(1.125)*Real_t(m_rowLoc*nx+row+1)/Real_t(meshEdgeElems) ;
}
// tz += ds ; // may accumulate roundoff...
tz = Real_t(1.125)*Real_t(m_planeLoc*nx+plane+1)/Real_t(meshEdgeElems) ;
}
x = x_h;
y = y_h;
z = z_h;
nodelist_h.resize(padded_domElems*8);
// embed hexehedral elements in nodal point lattice
Index_t zidx = 0 ;
nidx = 0 ;
for (Index_t plane=0; plane<edgeElems; ++plane) {
for (Index_t row=0; row<edgeElems; ++row) {
for (Index_t col=0; col<edgeElems; ++col) {
nodelist_h[0*padded_domElems+zidx] = nidx ;
nodelist_h[1*padded_domElems+zidx] = nidx + 1 ;
nodelist_h[2*padded_domElems+zidx] = nidx + edgeNodes + 1 ;
nodelist_h[3*padded_domElems+zidx] = nidx + edgeNodes ;
nodelist_h[4*padded_domElems+zidx] = nidx + edgeNodes*edgeNodes ;
nodelist_h[5*padded_domElems+zidx] = nidx + edgeNodes*edgeNodes + 1 ;
nodelist_h[6*padded_domElems+zidx] = nidx + edgeNodes*edgeNodes + edgeNodes + 1 ;
nodelist_h[7*padded_domElems+zidx] = nidx + edgeNodes*edgeNodes + edgeNodes ;
++zidx ;
++nidx ;
}
++nidx ;
}
nidx += edgeNodes ;
}
nodelist = nodelist_h;
}
void elenagoAllocateElems(Domain *locDom)
{
int allElem = locDom->numElem + /* local elem */
2*locDom->sizeX*locDom->sizeY + /* plane ghosts */
2*locDom->sizeX*locDom->sizeZ + /* row ghosts */
2*locDom->sizeY*locDom->sizeZ ; /* col ghosts */
locDom->vnew = Allocator< Vector_d<Real_t> >::allocate(locDom->numElem);
locDom->dxx = Allocator< Vector_d<Real_t> >::allocate(locDom->numElem);
locDom->dyy = Allocator< Vector_d<Real_t> >::allocate(locDom->numElem);
locDom->dzz = Allocator< Vector_d<Real_t> >::allocate(locDom->numElem);
locDom->delx_xi = Allocator< Vector_d<Real_t> >::allocate(locDom->numElem);
locDom->delx_eta = Allocator< Vector_d<Real_t> >::allocate(locDom->numElem);
locDom->delx_zeta = Allocator< Vector_d<Real_t> >::allocate(locDom->numElem);
locDom->delv_xi = Allocator< Vector_d<Real_t> >::allocate(allElem);
locDom->delv_eta = Allocator< Vector_d<Real_t> >::allocate(allElem);
locDom->delv_zeta = Allocator< Vector_d<Real_t> >::allocate(allElem);
locDom->d_fx = locDom->fx.raw();
locDom->d_fy = locDom->fy.raw();
locDom->d_fz = locDom->fz.raw();
locDom->d_x = locDom->x.raw();
locDom->d_y = locDom->y.raw();
locDom->d_z = locDom->z.raw();
locDom->d_xd = locDom->xd.raw();
locDom->d_yd = locDom->yd.raw();
locDom->d_zd = locDom->zd.raw();
locDom->d_delv_xi = locDom->delv_xi->raw();
locDom->d_delv_eta = locDom->delv_eta->raw();
locDom->d_delv_zeta = locDom->delv_zeta->raw();
#ifdef DOUBLE_PRECISION
locDom->fx_elem = Allocator< Vector_d<Real_t> >::allocate(locDom->padded_numElem*8);
locDom->fy_elem = Allocator< Vector_d<Real_t> >::allocate(locDom->padded_numElem*8);
locDom->fz_elem = Allocator< Vector_d<Real_t> >::allocate(locDom->padded_numElem*8);
#else
thrust::fill(locDom->fx.begin(),locDom->fx.end(),0.);
thrust::fill(locDom->fy.begin(),locDom->fy.end(),0.);
thrust::fill(locDom->fz.begin(),locDom->fz.end(),0.);
#endif
int dimGrid=::min(1024,PAD_DIV(locDom->numElem,128));
locDom->dev_mindtcourant= Allocator< Vector_d<Real_t> >::allocate(dimGrid);
locDom->dev_mindthydro = Allocator< Vector_d<Real_t> >::allocate(dimGrid);
}
void elenagoDellocateElems(Domain *locDom)
{
int dimGrid=::min(1024,PAD_DIV(locDom->numElem,128));
int allElem = locDom->numElem + /* local elem */
2*locDom->sizeX*locDom->sizeY + /* plane ghosts */
2*locDom->sizeX*locDom->sizeZ + /* row ghosts */
2*locDom->sizeY*locDom->sizeZ ; /* col ghosts */
Allocator<Vector_d<Real_t> >::free(locDom->dev_mindtcourant,dimGrid);
Allocator<Vector_d<Real_t> >::free(locDom->dev_mindthydro,dimGrid);
Allocator<Vector_d<Real_t> >::free(locDom->fx_elem,locDom->padded_numElem*8);
Allocator<Vector_d<Real_t> >::free(locDom->fy_elem,locDom->padded_numElem*8);
Allocator<Vector_d<Real_t> >::free(locDom->fz_elem,locDom->padded_numElem*8);
Allocator<Vector_d<Real_t> >::free(locDom->dxx,locDom->numElem);
Allocator<Vector_d<Real_t> >::free(locDom->dyy,locDom->numElem);
Allocator<Vector_d<Real_t> >::free(locDom->dzz,locDom->numElem);
Allocator<Vector_d<Real_t> >::free(locDom->delx_xi,locDom->numElem);
Allocator<Vector_d<Real_t> >::free(locDom->delx_eta,locDom->numElem);
Allocator<Vector_d<Real_t> >::free(locDom->delx_zeta,locDom->numElem);
Allocator<Vector_d<Real_t> >::free(locDom->delv_xi,allElem);
Allocator<Vector_d<Real_t> >::free(locDom->delv_eta,allElem);
Allocator<Vector_d<Real_t> >::free(locDom->delv_zeta,allElem);
Allocator<Vector_d<Real_t> >::free(locDom->vnew,locDom->numElem);
}
Domain *ResetDomain(Domain *domain, char* argv[], Int_t numRanks, Index_t colLoc,
Index_t rowLoc, Index_t planeLoc,
Index_t nx, int tp, bool structured, Int_t nr, Int_t balance, Int_t cost)
{
domain->max_streams = 32;
domain->streams.resize(domain->max_streams);
for (Int_t i=0;i<domain->max_streams;i++)
domain->streams[i] = NULL;
//hipStreamCreate(&(domain->streams[i]));
//elenagoevent
assert(totIters > 0);
assert(totIters < 512);
for (Int_t i=0;i<(totIters+1);i++)
{
hipEventDestroy(domain->time_constraint_computed[i]);
hipEventDestroy(domain->time_constraint_reduced[i]);
hipEventCreateWithFlags(&domain->time_constraint_computed[i],hipEventDisableTiming);
hipEventCreateWithFlags(&domain->time_constraint_reduced[i],hipEventDisableTiming);
}
Index_t domElems;
Index_t domNodes;
Index_t padded_domElems;
Vector_h<Index_t> nodelist_h;
Vector_h<Real_t> x_h;
Vector_h<Real_t> y_h;
Vector_h<Real_t> z_h;
if (structured)
{
domain->m_tp = tp ;
domain->m_numRanks = numRanks ;
domain->m_colLoc = colLoc ;
domain->m_rowLoc = rowLoc ;
domain->m_planeLoc = planeLoc ;
Index_t edgeElems = nx ;
Index_t edgeNodes = edgeElems+1 ;
domain->sizeX = edgeElems ;
domain->sizeY = edgeElems ;
domain->sizeZ = edgeElems ;
domain->numElem = domain->sizeX*domain->sizeY*domain->sizeZ ;
domain->padded_numElem = PAD(domain->numElem,32);
domain->numNode = (domain->sizeX+1)*(domain->sizeY+1)*(domain->sizeZ+1) ;
domain->padded_numNode = PAD(domain->numNode,32);
domElems = domain->numElem ;
domNodes = domain->numNode ;
padded_domElems = domain->padded_numElem ;
AllocateElemPersistent(domain,domElems,padded_domElems);
AllocateNodalPersistent(domain,domNodes);
//domain->SetupCommBuffers(edgeNodes);
InitializeFields(domain);
domain->BuildMesh(nx, edgeNodes, edgeElems, domNodes, padded_domElems, x_h, y_h, z_h, nodelist_h);
domain->numSymmX = domain->numSymmY = domain->numSymmZ = 0;
if (domain->m_colLoc == 0)
domain->numSymmX = (edgeElems+1)*(edgeElems+1) ;
if (domain->m_rowLoc == 0)
domain->numSymmY = (edgeElems+1)*(edgeElems+1) ;
if (domain->m_planeLoc == 0)
domain->numSymmZ = (edgeElems+1)*(edgeElems+1) ;
AllocateSymmX(domain,edgeNodes*edgeNodes);
AllocateSymmY(domain,edgeNodes*edgeNodes);
AllocateSymmZ(domain,edgeNodes*edgeNodes);
/* set up symmetry nodesets */
Vector_h<Index_t> symmX_h(domain->symmX.size());
Vector_h<Index_t> symmY_h(domain->symmY.size());
Vector_h<Index_t> symmZ_h(domain->symmZ.size());
Int_t nidx = 0 ;
for (Index_t i=0; i<edgeNodes; ++i) {
Index_t planeInc = i*edgeNodes*edgeNodes ;
Index_t rowInc = i*edgeNodes ;
for (Index_t j=0; j<edgeNodes; ++j) {
if (domain->m_planeLoc == 0) {
symmZ_h[nidx] = rowInc + j ;
}
if (domain->m_rowLoc == 0) {
symmY_h[nidx] = planeInc + j ;
}
if (domain->m_colLoc == 0) {
symmX_h[nidx] = planeInc + j*edgeNodes ;
}
++nidx ;
}
}
if (domain->m_planeLoc == 0)
domain->symmZ = symmZ_h;
if (domain->m_rowLoc == 0)
domain->symmY = symmY_h;
if (domain->m_colLoc == 0)
domain->symmX = symmX_h;
SetupConnectivityBC(domain, edgeElems);
}
else
{
FILE *fp;
int ee, en;
if ((fp = fopen(argv[2], "r")) == 0) {
printf("could not open file %s\n", argv[2]) ;
exit( LFileError ) ;
}
bool fsuccess;
fsuccess = fscanf(fp, "%d %d", &ee, &en) ;
domain->numElem = Index_t(ee);
domain->padded_numElem = PAD(domain->numElem,32);
domain->numNode = Index_t(en);
domain->padded_numNode = PAD(domain->numNode,32);
domElems = domain->numElem ;
domNodes = domain->numNode ;
padded_domElems = domain->padded_numElem ;
AllocateElemPersistent(domain,domElems,padded_domElems);
AllocateNodalPersistent(domain,domNodes);
InitializeFields(domain);
/* initialize nodal coordinates */
x_h.resize(domNodes);
y_h.resize(domNodes);
z_h.resize(domNodes);
for (Index_t i=0; i<domNodes; ++i) {
double px, py, pz ;
fsuccess = fscanf(fp, "%lf %lf %lf", &px, &py, &pz) ;
x_h[i] = Real_t(px) ;
y_h[i] = Real_t(py) ;
z_h[i] = Real_t(pz) ;
}
domain->x = x_h;
domain->y = y_h;
domain->z = z_h;
/* embed hexehedral elements in nodal point lattice */
nodelist_h.resize(padded_domElems*8);
for (Index_t zidx=0; zidx<domElems; ++zidx) {
for (Index_t ni=0; ni<Index_t(8); ++ni) {
int n ;
fsuccess = fscanf(fp, "%d", &n) ;
nodelist_h[ni*padded_domElems+zidx] = Index_t(n);
}
}
domain->nodelist = nodelist_h;
/* set up face-based element neighbors */
Vector_h<Index_t> lxim_h(domElems);
Vector_h<Index_t> lxip_h(domElems);
Vector_h<Index_t> letam_h(domElems);
Vector_h<Index_t> letap_h(domElems);
Vector_h<Index_t> lzetam_h(domElems);
Vector_h<Index_t> lzetap_h(domElems);
for (Index_t i=0; i<domElems; ++i) {
int xi_m, xi_p, eta_m, eta_p, zeta_m, zeta_p ;
fsuccess = fscanf(fp, "%d %d %d %d %d %d",
&xi_m, &xi_p, &eta_m, &eta_p, &zeta_m, &zeta_p) ;
lxim_h[i] = Index_t(xi_m) ;
lxip_h[i] = Index_t(xi_p) ;
letam_h[i] = Index_t(eta_m) ;
letap_h[i] = Index_t(eta_p) ;
lzetam_h[i] = Index_t(zeta_m) ;
lzetap_h[i] = Index_t(zeta_p) ;
}
domain->lxim = lxim_h;
domain->lxip = lxip_h;
domain->letam = letam_h;
domain->letap = letap_h;
domain->lzetam = lzetam_h;
domain->lzetap = lzetap_h;
/* set up X symmetry nodeset */
fsuccess = fscanf(fp, "%d", &domain->numSymmX) ;
Vector_h<Index_t> symmX_h(domain->numSymmX);
for (Index_t i=0; i<domain->numSymmX; ++i) {
int n ;
fsuccess = fscanf(fp, "%d", &n) ;
symmX_h[i] = Index_t(n) ;
}
domain->symmX = symmX_h;
fsuccess = fscanf(fp, "%d", &domain->numSymmY) ;
Vector_h<Index_t> symmY_h(domain->numSymmY);
for (Index_t i=0; i<domain->numSymmY; ++i) {
int n ;
fsuccess = fscanf(fp, "%d", &n) ;
symmY_h[i] = Index_t(n) ;
}
domain->symmY = symmY_h;
fsuccess = fscanf(fp, "%d", &domain->numSymmZ) ;
Vector_h<Index_t> symmZ_h(domain->numSymmZ);
for (Index_t i=0; i<domain->numSymmZ; ++i) {
int n ;
fsuccess = fscanf(fp, "%d", &n) ;
symmZ_h[i] = Index_t(n) ;
}
domain->symmZ = symmZ_h;
/* set up free surface nodeset */
Index_t numFreeSurf;
fsuccess = fscanf(fp, "%d", &numFreeSurf) ;
Vector_h<Index_t> freeSurf_h(numFreeSurf);
for (Index_t i=0; i<numFreeSurf; ++i) {
int n ;
fsuccess = fscanf(fp, "%d", &n) ;
freeSurf_h[i] = Index_t(n) ;
}
printf("%c\n",fsuccess);//nothing
fclose(fp);
/* set up boundary condition information */
Vector_h<Index_t> elemBC_h(domElems);
Vector_h<Index_t> surfaceNode_h(domNodes);
for (Index_t i=0; i<domain->numElem; ++i) {
elemBC_h[i] = 0 ;
}
for (Index_t i=0; i<domain->numNode; ++i) {
surfaceNode_h[i] = 0 ;
}
for (Index_t i=0; i<domain->numSymmX; ++i) {
surfaceNode_h[symmX_h[i]] = 1 ;
}
for (Index_t i=0; i<domain->numSymmY; ++i) {
surfaceNode_h[symmY_h[i]] = 1 ;
}
for (Index_t i=0; i<domain->numSymmZ; ++i) {
surfaceNode_h[symmZ_h[i]] = 1 ;
}
for (Index_t zidx=0; zidx<domain->numElem; ++zidx) {
Int_t mask = 0 ;
for (Index_t ni=0; ni<8; ++ni) {
mask |= (surfaceNode_h[nodelist_h[ni*domain->padded_numElem+zidx]] << ni) ;
}
if ((mask & 0x0f) == 0x0f) elemBC_h[zidx] |= ZETA_M_SYMM ;
if ((mask & 0xf0) == 0xf0) elemBC_h[zidx] |= ZETA_P_SYMM ;
if ((mask & 0x33) == 0x33) elemBC_h[zidx] |= ETA_M_SYMM ;
if ((mask & 0xcc) == 0xcc) elemBC_h[zidx] |= ETA_P_SYMM ;
if ((mask & 0x99) == 0x99) elemBC_h[zidx] |= XI_M_SYMM ;
if ((mask & 0x66) == 0x66) elemBC_h[zidx] |= XI_P_SYMM ;
}
for (Index_t zidx=0; zidx<domain->numElem; ++zidx) {
if (elemBC_h[zidx] == (XI_M_SYMM | ETA_M_SYMM | ZETA_M_SYMM)) {
domain->octantCorner = zidx ;
break ;
}
}
for (Index_t i=0; i<domain->numNode; ++i) {
surfaceNode_h[i] = 0 ;
}
for (Index_t i=0; i<numFreeSurf; ++i) {
surfaceNode_h[freeSurf_h[i]] = 1 ;
}
for (Index_t zidx=0; zidx<domain->numElem; ++zidx) {
Int_t mask = 0 ;
for (Index_t ni=0; ni<8; ++ni) {
mask |= (surfaceNode_h[nodelist_h[ni*domain->padded_numElem+zidx]] << ni) ;
}
if ((mask & 0x0f) == 0x0f) elemBC_h[zidx] |= ZETA_M_SYMM ;
if ((mask & 0xf0) == 0xf0) elemBC_h[zidx] |= ZETA_P_SYMM ;
if ((mask & 0x33) == 0x33) elemBC_h[zidx] |= ETA_M_SYMM ;
if ((mask & 0xcc) == 0xcc) elemBC_h[zidx] |= ETA_P_SYMM ;
if ((mask & 0x99) == 0x99) elemBC_h[zidx] |= XI_M_SYMM ;
if ((mask & 0x66) == 0x66) elemBC_h[zidx] |= XI_P_SYMM ;
}
domain->elemBC = elemBC_h;
/* deposit energy */
domain->e[domain->octantCorner] = Real_t(3.948746e+7) ;
}
/* set up node-centered indexing of elements */
Vector_h<Index_t> nodeElemCount_h(domNodes);
for (Index_t i=0; i<domNodes; ++i) {
nodeElemCount_h[i] = 0 ;
}
for (Index_t i=0; i<domElems; ++i) {
for (Index_t j=0; j < 8; ++j) {
++(nodeElemCount_h[nodelist_h[j*padded_domElems+i]]);
}
}
Vector_h<Index_t> nodeElemStart_h(domNodes);
nodeElemStart_h[0] = 0;
for (Index_t i=1; i < domNodes; ++i) {
nodeElemStart_h[i] =
nodeElemStart_h[i-1] + nodeElemCount_h[i-1] ;
}
Vector_h<Index_t> nodeElemCornerList_h(nodeElemStart_h[domNodes-1] +
nodeElemCount_h[domNodes-1] );
for (Index_t i=0; i < domNodes; ++i) {
nodeElemCount_h[i] = 0;
}
for (Index_t j=0; j < 8; ++j) {
for (Index_t i=0; i < domElems; ++i) {
Index_t m = nodelist_h[padded_domElems*j+i];
Index_t k = padded_domElems*j + i ;
Index_t offset = nodeElemStart_h[m] +
nodeElemCount_h[m] ;
nodeElemCornerList_h[offset] = k;
++(nodeElemCount_h[m]) ;
}
}
Index_t clSize = nodeElemStart_h[domNodes-1] +
nodeElemCount_h[domNodes-1] ;
for (Index_t i=0; i < clSize; ++i) {
Index_t clv = nodeElemCornerList_h[i] ;
if ((clv < 0) || (clv > padded_domElems*8)) {
fprintf(stderr,
"AllocateNodeElemIndexes(): nodeElemCornerList entry out of range!\n");
exit(1);
}
}
domain->nodeElemStart = nodeElemStart_h;
domain->nodeElemCount = nodeElemCount_h;
domain->nodeElemCornerList = nodeElemCornerList_h;
/* Create a material IndexSet (entire domain same material for now) */
Vector_h<Index_t> matElemlist_h(domElems);
for (Index_t i=0; i<domElems; ++i) {
matElemlist_h[i] = i ;
}
domain->matElemlist = matElemlist_h;
hipHostMalloc(&domain->dtcourant_h,sizeof(Real_t),0);
hipHostMalloc(&domain->dthydro_h,sizeof(Real_t),0);
hipHostMalloc(&domain->bad_vol_h,sizeof(Index_t),0);
hipHostMalloc(&domain->bad_q_h,sizeof(Index_t),0);
*(domain->bad_vol_h)=-1;
*(domain->bad_q_h)=-1;
*(domain->dthydro_h)=1e20;
*(domain->dtcourant_h)=1e20;
/* initialize material parameters */
domain->time_h = Real_t(0.) ;
//elenago
hipHostMalloc((void **)&(domain->time_h_async), 1*sizeof(Real_t));
domain->time_h_async[0] = domain->time_h;
domain->dtfixed = Real_t(-1.0e-6) ;
domain->deltatimemultlb = Real_t(1.1) ;
domain->deltatimemultub = Real_t(1.2) ;
domain->stoptime = Real_t(1.0e-2) ;
domain->dtmax = Real_t(1.0e-2) ;
domain->cycle = 0 ;
domain->e_cut = Real_t(1.0e-7) ;
domain->p_cut = Real_t(1.0e-7) ;
domain->q_cut = Real_t(1.0e-7) ;
domain->u_cut = Real_t(1.0e-7) ;
domain->v_cut = Real_t(1.0e-10) ;
domain->hgcoef = Real_t(3.0) ;
domain->ss4o3 = Real_t(4.0)/Real_t(3.0) ;
domain->qstop = Real_t(1.0e+12) ;
domain->monoq_max_slope = Real_t(1.0) ;
domain->monoq_limiter_mult = Real_t(2.0) ;
domain->qlc_monoq = Real_t(0.5) ;
domain->qqc_monoq = Real_t(2.0)/Real_t(3.0) ;
domain->qqc = Real_t(2.0) ;
domain->pmin = Real_t(0.) ;
domain->emin = Real_t(-1.0e+15) ;
domain->dvovmax = Real_t(0.1) ;
domain->eosvmax = Real_t(1.0e+9) ;
domain->eosvmin = Real_t(1.0e-9) ;
domain->refdens = Real_t(1.0) ;
/* initialize field data */
Vector_h<Real_t> nodalMass_h(domNodes);
Vector_h<Real_t> volo_h(domElems);
Vector_h<Real_t> elemMass_h(domElems);
for (Index_t i=0; i<domElems; ++i) {
Real_t x_local[8], y_local[8], z_local[8] ;
for( Index_t lnode=0 ; lnode<8 ; ++lnode )
{
Index_t gnode = nodelist_h[lnode*padded_domElems+i];
x_local[lnode] = x_h[gnode];
y_local[lnode] = y_h[gnode];
z_local[lnode] = z_h[gnode];
}
// volume calculations
Real_t volume = CalcElemVolume(x_local, y_local, z_local );
volo_h[i] = volume ;
elemMass_h[i] = volume ;
for (Index_t j=0; j<8; ++j) {
Index_t gnode = nodelist_h[j*padded_domElems+i];
nodalMass_h[gnode] += volume / Real_t(8.0) ;
}
}
domain->nodalMass = nodalMass_h;
domain->volo = volo_h;
domain->elemMass= elemMass_h;
/* deposit energy */
domain->octantCorner = 0;
// deposit initial energy
// An energy of 3.948746e+7 is correct for a problem with
// 45 zones along a side - we need to scale it
const Real_t ebase = 3.948746e+7;
Real_t scale = (nx*domain->m_tp)/45.0;
Real_t einit = ebase*scale*scale*scale;
//Real_t einit = ebase;
if (domain->m_rowLoc + domain->m_colLoc + domain->m_planeLoc == 0) {
// Dump into the first zone (which we know is in the corner)
// of the domain that sits at the origin
domain->e[0] = einit;
}
//set initial deltatime base on analytic CFL calculation
//elenago async time
hipHostMalloc((void **)&(domain->deltatime_h_async), 1*sizeof(Real_t));
domain->deltatime_h_async[0] = (.5*cbrt(domain->volo[0]))/sqrt(2*einit);
hipMalloc((void **)&(domain->deltatime_d_async), 1*sizeof(Real_t));
hipMemcpy(domain->deltatime_d_async, domain->deltatime_h_async, 1*sizeof(Real_t), hipMemcpyHostToDevice);
domain->cost = cost;
domain->regNumList.resize(domain->numElem) ; // material indexset
domain->regElemlist.resize(domain->numElem) ; // material indexset
domain->regCSR.resize(nr);
domain->regReps.resize(nr);
domain->regSorted.resize(nr);
// Setup region index sets. For now, these are constant sized
// throughout the run, but could be changed every cycle to
// simulate effects of ALE on the lagrange solver
domain->CreateRegionIndexSets(nr, balance);
return domain ;
}
Domain *NewDomain(char* argv[], Int_t numRanks, Index_t colLoc,
Index_t rowLoc, Index_t planeLoc,
Index_t nx, int tp, bool structured, Int_t nr, Int_t balance, Int_t cost)
{
Domain *domain = new Domain ;
domain->max_streams = 32;
domain->streams.resize(domain->max_streams);
for (Int_t i=0;i<domain->max_streams;i++)
domain->streams[i] = NULL;
//hipStreamCreate(&(domain->streams[i]));
//elenagoevent
assert(totIters > 0);
assert(totIters < 512);
for (Int_t i=0;i<(totIters+1);i++)
{
hipEventCreateWithFlags(&domain->time_constraint_computed[i],hipEventDisableTiming);
hipEventCreateWithFlags(&domain->time_constraint_reduced[i],hipEventDisableTiming);
}
Index_t domElems;
Index_t domNodes;
Index_t padded_domElems;
Vector_h<Index_t> nodelist_h;
Vector_h<Real_t> x_h;
Vector_h<Real_t> y_h;
Vector_h<Real_t> z_h;
if (structured)
{
domain->m_tp = tp ;
domain->m_numRanks = numRanks ;
domain->m_colLoc = colLoc ;
domain->m_rowLoc = rowLoc ;
domain->m_planeLoc = planeLoc ;
Index_t edgeElems = nx ;
Index_t edgeNodes = edgeElems+1 ;
domain->sizeX = edgeElems ;
domain->sizeY = edgeElems ;
domain->sizeZ = edgeElems ;
domain->numElem = domain->sizeX*domain->sizeY*domain->sizeZ ;
domain->padded_numElem = PAD(domain->numElem,32);
domain->numNode = (domain->sizeX+1)*(domain->sizeY+1)*(domain->sizeZ+1) ;
domain->padded_numNode = PAD(domain->numNode,32);
domElems = domain->numElem ;
domNodes = domain->numNode ;
padded_domElems = domain->padded_numElem ;
AllocateElemPersistent(domain,domElems,padded_domElems);
AllocateNodalPersistent(domain,domNodes);
domain->SetupCommBuffers(edgeNodes);
InitializeFields(domain);
domain->BuildMesh(nx, edgeNodes, edgeElems, domNodes, padded_domElems, x_h, y_h, z_h, nodelist_h);
domain->numSymmX = domain->numSymmY = domain->numSymmZ = 0;
if (domain->m_colLoc == 0)
domain->numSymmX = (edgeElems+1)*(edgeElems+1) ;
if (domain->m_rowLoc == 0)
domain->numSymmY = (edgeElems+1)*(edgeElems+1) ;
if (domain->m_planeLoc == 0)
domain->numSymmZ = (edgeElems+1)*(edgeElems+1) ;
AllocateSymmX(domain,edgeNodes*edgeNodes);
AllocateSymmY(domain,edgeNodes*edgeNodes);
AllocateSymmZ(domain,edgeNodes*edgeNodes);
/* set up symmetry nodesets */
Vector_h<Index_t> symmX_h(domain->symmX.size());
Vector_h<Index_t> symmY_h(domain->symmY.size());
Vector_h<Index_t> symmZ_h(domain->symmZ.size());
Int_t nidx = 0 ;
for (Index_t i=0; i<edgeNodes; ++i) {
Index_t planeInc = i*edgeNodes*edgeNodes ;
Index_t rowInc = i*edgeNodes ;
for (Index_t j=0; j<edgeNodes; ++j) {
if (domain->m_planeLoc == 0) {
symmZ_h[nidx] = rowInc + j ;
}
if (domain->m_rowLoc == 0) {
symmY_h[nidx] = planeInc + j ;
}
if (domain->m_colLoc == 0) {
symmX_h[nidx] = planeInc + j*edgeNodes ;
}
++nidx ;
}
}
if (domain->m_planeLoc == 0)
domain->symmZ = symmZ_h;
if (domain->m_rowLoc == 0)
domain->symmY = symmY_h;
if (domain->m_colLoc == 0)
domain->symmX = symmX_h;
SetupConnectivityBC(domain, edgeElems);
}
else
{
FILE *fp;
int ee, en;
if ((fp = fopen(argv[2], "r")) == 0) {
printf("could not open file %s\n", argv[2]) ;
exit( LFileError ) ;
}
bool fsuccess;
fsuccess = fscanf(fp, "%d %d", &ee, &en) ;
domain->numElem = Index_t(ee);
domain->padded_numElem = PAD(domain->numElem,32);
domain->numNode = Index_t(en);
domain->padded_numNode = PAD(domain->numNode,32);
domElems = domain->numElem ;
domNodes = domain->numNode ;
padded_domElems = domain->padded_numElem ;
AllocateElemPersistent(domain,domElems,padded_domElems);
AllocateNodalPersistent(domain,domNodes);
InitializeFields(domain);
/* initialize nodal coordinates */
x_h.resize(domNodes);
y_h.resize(domNodes);
z_h.resize(domNodes);
for (Index_t i=0; i<domNodes; ++i) {
double px, py, pz ;
fsuccess = fscanf(fp, "%lf %lf %lf", &px, &py, &pz) ;
x_h[i] = Real_t(px) ;
y_h[i] = Real_t(py) ;
z_h[i] = Real_t(pz) ;
}
domain->x = x_h;
domain->y = y_h;
domain->z = z_h;
/* embed hexehedral elements in nodal point lattice */
nodelist_h.resize(padded_domElems*8);
for (Index_t zidx=0; zidx<domElems; ++zidx) {
for (Index_t ni=0; ni<Index_t(8); ++ni) {
int n ;
fsuccess = fscanf(fp, "%d", &n) ;
nodelist_h[ni*padded_domElems+zidx] = Index_t(n);
}
}
domain->nodelist = nodelist_h;
/* set up face-based element neighbors */
Vector_h<Index_t> lxim_h(domElems);
Vector_h<Index_t> lxip_h(domElems);
Vector_h<Index_t> letam_h(domElems);
Vector_h<Index_t> letap_h(domElems);
Vector_h<Index_t> lzetam_h(domElems);
Vector_h<Index_t> lzetap_h(domElems);
for (Index_t i=0; i<domElems; ++i) {
int xi_m, xi_p, eta_m, eta_p, zeta_m, zeta_p ;
fsuccess = fscanf(fp, "%d %d %d %d %d %d",
&xi_m, &xi_p, &eta_m, &eta_p, &zeta_m, &zeta_p) ;
lxim_h[i] = Index_t(xi_m) ;
lxip_h[i] = Index_t(xi_p) ;
letam_h[i] = Index_t(eta_m) ;
letap_h[i] = Index_t(eta_p) ;
lzetam_h[i] = Index_t(zeta_m) ;
lzetap_h[i] = Index_t(zeta_p) ;
}
domain->lxim = lxim_h;
domain->lxip = lxip_h;
domain->letam = letam_h;
domain->letap = letap_h;
domain->lzetam = lzetam_h;
domain->lzetap = lzetap_h;
/* set up X symmetry nodeset */
fsuccess = fscanf(fp, "%d", &domain->numSymmX) ;
Vector_h<Index_t> symmX_h(domain->numSymmX);
for (Index_t i=0; i<domain->numSymmX; ++i) {
int n ;
fsuccess = fscanf(fp, "%d", &n) ;
symmX_h[i] = Index_t(n) ;
}
domain->symmX = symmX_h;
fsuccess = fscanf(fp, "%d", &domain->numSymmY) ;
Vector_h<Index_t> symmY_h(domain->numSymmY);
for (Index_t i=0; i<domain->numSymmY; ++i) {
int n ;
fsuccess = fscanf(fp, "%d", &n) ;
symmY_h[i] = Index_t(n) ;
}
domain->symmY = symmY_h;
fsuccess = fscanf(fp, "%d", &domain->numSymmZ) ;
Vector_h<Index_t> symmZ_h(domain->numSymmZ);
for (Index_t i=0; i<domain->numSymmZ; ++i) {
int n ;
fsuccess = fscanf(fp, "%d", &n) ;
symmZ_h[i] = Index_t(n) ;
}
domain->symmZ = symmZ_h;
/* set up free surface nodeset */
Index_t numFreeSurf;
fsuccess = fscanf(fp, "%d", &numFreeSurf) ;
Vector_h<Index_t> freeSurf_h(numFreeSurf);
for (Index_t i=0; i<numFreeSurf; ++i) {
int n ;
fsuccess = fscanf(fp, "%d", &n) ;
freeSurf_h[i] = Index_t(n) ;
}
printf("%c\n",fsuccess);//nothing
fclose(fp);
/* set up boundary condition information */
Vector_h<Index_t> elemBC_h(domElems);
Vector_h<Index_t> surfaceNode_h(domNodes);
for (Index_t i=0; i<domain->numElem; ++i) {
elemBC_h[i] = 0 ;
}
for (Index_t i=0; i<domain->numNode; ++i) {
surfaceNode_h[i] = 0 ;
}
for (Index_t i=0; i<domain->numSymmX; ++i) {
surfaceNode_h[symmX_h[i]] = 1 ;
}
for (Index_t i=0; i<domain->numSymmY; ++i) {
surfaceNode_h[symmY_h[i]] = 1 ;
}
for (Index_t i=0; i<domain->numSymmZ; ++i) {
surfaceNode_h[symmZ_h[i]] = 1 ;
}
for (Index_t zidx=0; zidx<domain->numElem; ++zidx) {
Int_t mask = 0 ;
for (Index_t ni=0; ni<8; ++ni) {
mask |= (surfaceNode_h[nodelist_h[ni*domain->padded_numElem+zidx]] << ni) ;
}
if ((mask & 0x0f) == 0x0f) elemBC_h[zidx] |= ZETA_M_SYMM ;
if ((mask & 0xf0) == 0xf0) elemBC_h[zidx] |= ZETA_P_SYMM ;
if ((mask & 0x33) == 0x33) elemBC_h[zidx] |= ETA_M_SYMM ;
if ((mask & 0xcc) == 0xcc) elemBC_h[zidx] |= ETA_P_SYMM ;
if ((mask & 0x99) == 0x99) elemBC_h[zidx] |= XI_M_SYMM ;
if ((mask & 0x66) == 0x66) elemBC_h[zidx] |= XI_P_SYMM ;
}
for (Index_t zidx=0; zidx<domain->numElem; ++zidx) {
if (elemBC_h[zidx] == (XI_M_SYMM | ETA_M_SYMM | ZETA_M_SYMM)) {
domain->octantCorner = zidx ;
break ;
}
}
for (Index_t i=0; i<domain->numNode; ++i) {
surfaceNode_h[i] = 0 ;
}
for (Index_t i=0; i<numFreeSurf; ++i) {
surfaceNode_h[freeSurf_h[i]] = 1 ;
}
for (Index_t zidx=0; zidx<domain->numElem; ++zidx) {
Int_t mask = 0 ;
for (Index_t ni=0; ni<8; ++ni) {
mask |= (surfaceNode_h[nodelist_h[ni*domain->padded_numElem+zidx]] << ni) ;
}
if ((mask & 0x0f) == 0x0f) elemBC_h[zidx] |= ZETA_M_SYMM ;
if ((mask & 0xf0) == 0xf0) elemBC_h[zidx] |= ZETA_P_SYMM ;
if ((mask & 0x33) == 0x33) elemBC_h[zidx] |= ETA_M_SYMM ;
if ((mask & 0xcc) == 0xcc) elemBC_h[zidx] |= ETA_P_SYMM ;
if ((mask & 0x99) == 0x99) elemBC_h[zidx] |= XI_M_SYMM ;
if ((mask & 0x66) == 0x66) elemBC_h[zidx] |= XI_P_SYMM ;
}
domain->elemBC = elemBC_h;
/* deposit energy */
domain->e[domain->octantCorner] = Real_t(3.948746e+7) ;
}
/* set up node-centered indexing of elements */
Vector_h<Index_t> nodeElemCount_h(domNodes);
for (Index_t i=0; i<domNodes; ++i) {
nodeElemCount_h[i] = 0 ;
}
for (Index_t i=0; i<domElems; ++i) {
for (Index_t j=0; j < 8; ++j) {
++(nodeElemCount_h[nodelist_h[j*padded_domElems+i]]);
}
}
Vector_h<Index_t> nodeElemStart_h(domNodes);
nodeElemStart_h[0] = 0;
for (Index_t i=1; i < domNodes; ++i) {
nodeElemStart_h[i] =
nodeElemStart_h[i-1] + nodeElemCount_h[i-1] ;
}
Vector_h<Index_t> nodeElemCornerList_h(nodeElemStart_h[domNodes-1] +
nodeElemCount_h[domNodes-1] );
for (Index_t i=0; i < domNodes; ++i) {
nodeElemCount_h[i] = 0;
}
for (Index_t j=0; j < 8; ++j) {
for (Index_t i=0; i < domElems; ++i) {
Index_t m = nodelist_h[padded_domElems*j+i];
Index_t k = padded_domElems*j + i ;
Index_t offset = nodeElemStart_h[m] +
nodeElemCount_h[m] ;
nodeElemCornerList_h[offset] = k;
++(nodeElemCount_h[m]) ;
}
}
Index_t clSize = nodeElemStart_h[domNodes-1] +
nodeElemCount_h[domNodes-1] ;
for (Index_t i=0; i < clSize; ++i) {
Index_t clv = nodeElemCornerList_h[i] ;
if ((clv < 0) || (clv > padded_domElems*8)) {
fprintf(stderr,
"AllocateNodeElemIndexes(): nodeElemCornerList entry out of range!\n");
exit(1);
}
}
domain->nodeElemStart = nodeElemStart_h;
domain->nodeElemCount = nodeElemCount_h;
domain->nodeElemCornerList = nodeElemCornerList_h;
/* Create a material IndexSet (entire domain same material for now) */
Vector_h<Index_t> matElemlist_h(domElems);
for (Index_t i=0; i<domElems; ++i) {
matElemlist_h[i] = i ;
}
domain->matElemlist = matElemlist_h;
hipHostMalloc(&domain->dtcourant_h,sizeof(Real_t),0);
hipHostMalloc(&domain->dthydro_h,sizeof(Real_t),0);
hipHostMalloc(&domain->bad_vol_h,sizeof(Index_t),0);
hipHostMalloc(&domain->bad_q_h,sizeof(Index_t),0);
*(domain->bad_vol_h)=-1;
*(domain->bad_q_h)=-1;
*(domain->dthydro_h)=1e20;
*(domain->dtcourant_h)=1e20;
/* initialize material parameters */
domain->time_h = Real_t(0.) ;
hipHostMalloc((void **)&(domain->time_h_async), 1*sizeof(Real_t));
domain->time_h_async[0] = domain->time_h;
domain->dtfixed = Real_t(-1.0e-6) ;
domain->deltatimemultlb = Real_t(1.1) ;
domain->deltatimemultub = Real_t(1.2) ;
domain->stoptime = Real_t(1.0e-2) ;
domain->dtmax = Real_t(1.0e-2) ;
domain->cycle = 0 ;
domain->e_cut = Real_t(1.0e-7) ;
domain->p_cut = Real_t(1.0e-7) ;
domain->q_cut = Real_t(1.0e-7) ;
domain->u_cut = Real_t(1.0e-7) ;
domain->v_cut = Real_t(1.0e-10) ;
domain->hgcoef = Real_t(3.0) ;
domain->ss4o3 = Real_t(4.0)/Real_t(3.0) ;
domain->qstop = Real_t(1.0e+12) ;
domain->monoq_max_slope = Real_t(1.0) ;
domain->monoq_limiter_mult = Real_t(2.0) ;
domain->qlc_monoq = Real_t(0.5) ;
domain->qqc_monoq = Real_t(2.0)/Real_t(3.0) ;
domain->qqc = Real_t(2.0) ;
domain->pmin = Real_t(0.) ;
domain->emin = Real_t(-1.0e+15) ;
domain->dvovmax = Real_t(0.1) ;
domain->eosvmax = Real_t(1.0e+9) ;
domain->eosvmin = Real_t(1.0e-9) ;
domain->refdens = Real_t(1.0) ;
/* initialize field data */
Vector_h<Real_t> nodalMass_h(domNodes);
Vector_h<Real_t> volo_h(domElems);
Vector_h<Real_t> elemMass_h(domElems);
for (Index_t i=0; i<domElems; ++i) {
Real_t x_local[8], y_local[8], z_local[8] ;
for( Index_t lnode=0 ; lnode<8 ; ++lnode )
{
Index_t gnode = nodelist_h[lnode*padded_domElems+i];
x_local[lnode] = x_h[gnode];
y_local[lnode] = y_h[gnode];
z_local[lnode] = z_h[gnode];
}
// volume calculations
Real_t volume = CalcElemVolume(x_local, y_local, z_local );
volo_h[i] = volume ;
elemMass_h[i] = volume ;
for (Index_t j=0; j<8; ++j) {
Index_t gnode = nodelist_h[j*padded_domElems+i];
nodalMass_h[gnode] += volume / Real_t(8.0) ;
}
}
domain->nodalMass = nodalMass_h;
domain->volo = volo_h;
domain->elemMass= elemMass_h;
/* deposit energy */
domain->octantCorner = 0;
// deposit initial energy
// An energy of 3.948746e+7 is correct for a problem with
// 45 zones along a side - we need to scale it
const Real_t ebase = 3.948746e+7;
Real_t scale = (nx*domain->m_tp)/45.0;
Real_t einit = ebase*scale*scale*scale;
//Real_t einit = ebase;
if (domain->m_rowLoc + domain->m_colLoc + domain->m_planeLoc == 0) {
// Dump into the first zone (which we know is in the corner)
// of the domain that sits at the origin
domain->e[0] = einit;
}
//set initial deltatime base on analytic CFL calculation
//elenago async time
hipHostMalloc((void **)&(domain->deltatime_h_async), 1*sizeof(Real_t));
domain->deltatime_h_async[0] = (.5*cbrt(domain->volo[0]))/sqrt(2*einit);
hipMalloc((void **)&(domain->deltatime_d_async), 1*sizeof(Real_t));
hipMemcpy(domain->deltatime_d_async, domain->deltatime_h_async, 1*sizeof(Real_t), hipMemcpyHostToDevice);
domain->cost = cost;
domain->regNumList.resize(domain->numElem) ; // material indexset
domain->regElemlist.resize(domain->numElem) ; // material indexset
domain->regCSR.resize(nr);
domain->regReps.resize(nr);
domain->regSorted.resize(nr);
// Setup region index sets. For now, these are constant sized
// throughout the run, but could be changed every cycle to
// simulate effects of ALE on the lagrange solver
domain->CreateRegionIndexSets(nr, balance);
return domain ;
}
/******************* to support region *********************/
void Domain::sortRegions(Vector_h<Int_t>& regReps_h, Vector_h<Index_t>& regSorted_h)
{
Index_t temp;
Vector_h<Index_t> regIndex;
regIndex.resize(numReg);
for(int i = 0; i < numReg; i++)
regIndex[i] = i;
for(int i = 0; i < numReg-1; i++)
for(int j = 0; j < numReg-i-1; j++)
if(regReps_h[j] < regReps_h[j+1])
{
temp = regReps_h[j];
regReps_h[j] = regReps_h[j+1];
regReps_h[j+1] = temp;
temp = regElemSize[j];
regElemSize[j] = regElemSize[j+1];
regElemSize[j+1] = temp;
temp = regIndex[j];
regIndex[j] = regIndex[j+1];
regIndex[j+1] = temp;
}
for(int i = 0; i < numReg; i++)
regSorted_h[regIndex[i]] = i;
}
// simple function for int pow x^y, y >= 0
static Int_t POW(Int_t x, Int_t y)
{
Int_t res = 1;
for (Int_t i = 0; i < y; i++)
res *= x;
return res;
}
void Domain::CreateRegionIndexSets(Int_t nr, Int_t b)
{
#if USE_MPI
Index_t myRank;
MPI_Comm_rank(MPI_COMM_WORLD, &myRank) ;
srand(myRank);
#else
srand(0);
Index_t myRank = 0;
#endif
numReg = nr;
balance = b;
regElemSize = new Int_t[numReg];
Index_t nextIndex = 0;
Vector_h<Int_t> regCSR_h(regCSR.size()); // records the begining and end of each region
Vector_h<Int_t> regReps_h(regReps.size()); // records the rep number per region
Vector_h<Index_t> regNumList_h(regNumList.size()); // Region number per domain element
Vector_h<Index_t> regElemlist_h(regElemlist.size()); // region indexset
Vector_h<Index_t> regSorted_h(regSorted.size()); // keeps index of sorted regions
//if we only have one region just fill it
// Fill out the regNumList with material numbers, which are always
// the region index plus one
if(numReg == 1) {
while (nextIndex < numElem) {
regNumList_h[nextIndex] = 1;
nextIndex++;
}
regElemSize[0] = 0;
}
//If we have more than one region distribute the elements.
else {
Int_t regionNum;
Int_t regionVar;
Int_t lastReg = -1;
Int_t binSize;
Int_t elements;
Index_t runto = 0;
Int_t costDenominator = 0;
Int_t* regBinEnd = new Int_t[numReg];
//Determine the relative weights of all the regions.
for (Index_t i=0 ; i<numReg ; ++i) {
regElemSize[i] = 0;
costDenominator += POW((i+1), balance); //Total cost of all regions
regBinEnd[i] = costDenominator; //Chance of hitting a given region is (regBinEnd[i] - regBinEdn[i-1])/costDenominator
}
//Until all elements are assigned
while (nextIndex < numElem) {
//pick the region
regionVar = rand() % costDenominator;
Index_t i = 0;
while(regionVar >= regBinEnd[i])
i++;
//rotate the regions based on MPI rank. Rotation is Rank % NumRegions
regionNum = ((i + myRank) % numReg) + 1;
// make sure we don't pick the same region twice in a row
while(regionNum == lastReg) {
regionVar = rand() % costDenominator;
i = 0;
while(regionVar >= regBinEnd[i])
i++;
regionNum = ((i + myRank) % numReg) + 1;
}
//Pick the bin size of the region and determine the number of elements.
binSize = rand() % 1000;
if(binSize < 773) {
elements = rand() % 15 + 1;
}
else if(binSize < 937) {
elements = rand() % 16 + 16;
}
else if(binSize < 970) {
elements = rand() % 32 + 32;
}
else if(binSize < 974) {
elements = rand() % 64 + 64;
}
else if(binSize < 978) {
elements = rand() % 128 + 128;
}
else if(binSize < 981) {
elements = rand() % 256 + 256;
}
else
elements = rand() % 1537 + 512;
runto = elements + nextIndex;
//Store the elements. If we hit the end before we run out of elements then just stop.
while (nextIndex < runto && nextIndex < numElem) {
regNumList_h[nextIndex] = regionNum;
nextIndex++;
}
lastReg = regionNum;
}
}
// Convert regNumList to region index sets
// First, count size of each region
for (Index_t i=0 ; i<numElem ; ++i) {
int r = regNumList_h[i]-1; // region index == regnum-1
regElemSize[r]++;
}
Index_t rep;
// Second, allocate each region index set
for (Index_t r=0; r<numReg ; ++r) {
if(r < numReg/2)
rep = 1;
else if(r < (numReg - (numReg+15)/20))
rep = 1 + cost;
else
rep = 10 * (1+ cost);
regReps_h[r] = rep;
}
sortRegions(regReps_h, regSorted_h);
regCSR_h[0] = 0;
// Second, allocate each region index set
for (Index_t i=1 ; i<numReg ; ++i) {
regCSR_h[i] = regCSR_h[i-1] + regElemSize[i-1];
}
// Third, fill index sets
for (Index_t i=0 ; i<numElem ; ++i) {
Index_t r = regSorted_h[regNumList_h[i]-1]; // region index == regnum-1
regElemlist_h[regCSR_h[r]] = i;
regCSR_h[r]++;
}
// Copy to device
regCSR = regCSR_h; // records the begining and end of each region
regReps = regReps_h; // records the rep number per region
regNumList = regNumList_h; // Region number per domain element
regElemlist = regElemlist_h; // region indexset
regSorted = regSorted_h; // keeps index of sorted regions
} // end of create function
static inline
void TimeIncrement(Domain* domain)
{
// To make sure dtcourant and dthydro have been updated on host
hipEventSynchronize(domain->time_constraint_computed[currIter]);
Real_t targetdt = domain->stoptime - domain->time_h;
if ((domain->dtfixed <= Real_t(0.0)) && (domain->cycle != Int_t(0))) {
Real_t ratio ;
/* This will require a reduction in parallel */
Real_t gnewdt = Real_t(1.0e+20) ;
Real_t newdt;
if ( *(domain->dtcourant_h) < gnewdt) {
gnewdt = *(domain->dtcourant_h) / Real_t(2.0) ;
}
if ( *(domain->dthydro_h) < gnewdt) {
gnewdt = *(domain->dthydro_h) * Real_t(2.0) / Real_t(3.0) ;
}
#if USE_MPI
MPI_Allreduce(&gnewdt, &newdt, 1,
((sizeof(Real_t) == 4) ? MPI_FLOAT : MPI_DOUBLE),
MPI_MIN, MPI_COMM_WORLD) ;
#else
newdt = gnewdt;
#endif
Real_t olddt = domain->deltatime_h_async[0];
ratio = newdt / olddt ;
if (ratio >= Real_t(1.0)) {
if (ratio < domain->deltatimemultlb) {
newdt = olddt ;
}
else if (ratio > domain->deltatimemultub) {
newdt = olddt*domain->deltatimemultub ;
}
}
if (newdt > domain->dtmax) {
newdt = domain->dtmax ;
}
domain->deltatime_h_async[0] = newdt ;
}
/* TRY TO PREVENT VERY SMALL SCALING ON THE NEXT CYCLE */
if ((targetdt > domain->deltatime_h_async[0]) &&
(targetdt < (Real_t(4.0) * domain->deltatime_h_async[0] / Real_t(3.0))) ) {
targetdt = Real_t(2.0) * domain->deltatime_h_async[0] / Real_t(3.0) ;
}
if (targetdt < domain->deltatime_h_async[0]) {
domain->deltatime_h_async[0] = targetdt ;
}
domain->time_h += domain->deltatime_h_async[0] ;
++domain->cycle ;
hipMemcpyAsync(domain->deltatime_d_async, domain->deltatime_h_async, 1*sizeof(Real_t), hipMemcpyHostToDevice, domain->streams[1]);
hipEventRecord(domain->time_constraint_reduced[currIter], domain->streams[1]);
}
__device__
static
__forceinline__
void CalcElemShapeFunctionDerivatives( const Real_t* const x,
const Real_t* const y,
const Real_t* const z,
Real_t b[][8],
Real_t* const volume )
{
const Real_t x0 = x[0] ; const Real_t x1 = x[1] ;
const Real_t x2 = x[2] ; const Real_t x3 = x[3] ;
const Real_t x4 = x[4] ; const Real_t x5 = x[5] ;
const Real_t x6 = x[6] ; const Real_t x7 = x[7] ;
const Real_t y0 = y[0] ; const Real_t y1 = y[1] ;
const Real_t y2 = y[2] ; const Real_t y3 = y[3] ;
const Real_t y4 = y[4] ; const Real_t y5 = y[5] ;
const Real_t y6 = y[6] ; const Real_t y7 = y[7] ;
const Real_t z0 = z[0] ; const Real_t z1 = z[1] ;
const Real_t z2 = z[2] ; const Real_t z3 = z[3] ;
const Real_t z4 = z[4] ; const Real_t z5 = z[5] ;
const Real_t z6 = z[6] ; const Real_t z7 = z[7] ;
Real_t fjxxi, fjxet, fjxze;
Real_t fjyxi, fjyet, fjyze;
Real_t fjzxi, fjzet, fjzze;
Real_t cjxxi, cjxet, cjxze;
Real_t cjyxi, cjyet, cjyze;
Real_t cjzxi, cjzet, cjzze;
fjxxi = Real_t(.125) * ( (x6-x0) + (x5-x3) - (x7-x1) - (x4-x2) );
fjxet = Real_t(.125) * ( (x6-x0) - (x5-x3) + (x7-x1) - (x4-x2) );
fjxze = Real_t(.125) * ( (x6-x0) + (x5-x3) + (x7-x1) + (x4-x2) );
fjyxi = Real_t(.125) * ( (y6-y0) + (y5-y3) - (y7-y1) - (y4-y2) );
fjyet = Real_t(.125) * ( (y6-y0) - (y5-y3) + (y7-y1) - (y4-y2) );
fjyze = Real_t(.125) * ( (y6-y0) + (y5-y3) + (y7-y1) + (y4-y2) );
fjzxi = Real_t(.125) * ( (z6-z0) + (z5-z3) - (z7-z1) - (z4-z2) );
fjzet = Real_t(.125) * ( (z6-z0) - (z5-z3) + (z7-z1) - (z4-z2) );
fjzze = Real_t(.125) * ( (z6-z0) + (z5-z3) + (z7-z1) + (z4-z2) );
/* compute cofactors */
cjxxi = (fjyet * fjzze) - (fjzet * fjyze);
cjxet = - (fjyxi * fjzze) + (fjzxi * fjyze);
cjxze = (fjyxi * fjzet) - (fjzxi * fjyet);
cjyxi = - (fjxet * fjzze) + (fjzet * fjxze);
cjyet = (fjxxi * fjzze) - (fjzxi * fjxze);
cjyze = - (fjxxi * fjzet) + (fjzxi * fjxet);
cjzxi = (fjxet * fjyze) - (fjyet * fjxze);
cjzet = - (fjxxi * fjyze) + (fjyxi * fjxze);
cjzze = (fjxxi * fjyet) - (fjyxi * fjxet);
/* calculate partials :
this need only be done for l = 0,1,2,3 since , by symmetry ,
(6,7,4,5) = - (0,1,2,3) .
*/
b[0][0] = - cjxxi - cjxet - cjxze;
b[0][1] = cjxxi - cjxet - cjxze;
b[0][2] = cjxxi + cjxet - cjxze;
b[0][3] = - cjxxi + cjxet - cjxze;
b[0][4] = -b[0][2];
b[0][5] = -b[0][3];
b[0][6] = -b[0][0];
b[0][7] = -b[0][1];
/*
b[0][4] = - cjxxi - cjxet + cjxze;
b[0][5] = + cjxxi - cjxet + cjxze;
b[0][6] = + cjxxi + cjxet + cjxze;
b[0][7] = - cjxxi + cjxet + cjxze;
*/
b[1][0] = - cjyxi - cjyet - cjyze;
b[1][1] = cjyxi - cjyet - cjyze;
b[1][2] = cjyxi + cjyet - cjyze;
b[1][3] = - cjyxi + cjyet - cjyze;
b[1][4] = -b[1][2];
b[1][5] = -b[1][3];
b[1][6] = -b[1][0];
b[1][7] = -b[1][1];
b[2][0] = - cjzxi - cjzet - cjzze;
b[2][1] = cjzxi - cjzet - cjzze;
b[2][2] = cjzxi + cjzet - cjzze;
b[2][3] = - cjzxi + cjzet - cjzze;
b[2][4] = -b[2][2];
b[2][5] = -b[2][3];
b[2][6] = -b[2][0];
b[2][7] = -b[2][1];
/* calculate jacobian determinant (volume) */
*volume = Real_t(8.) * ( fjxet * cjxet + fjyet * cjyet + fjzet * cjzet);
}
static
__device__
__forceinline__
void SumElemFaceNormal(Real_t *normalX0, Real_t *normalY0, Real_t *normalZ0,
Real_t *normalX1, Real_t *normalY1, Real_t *normalZ1,
Real_t *normalX2, Real_t *normalY2, Real_t *normalZ2,
Real_t *normalX3, Real_t *normalY3, Real_t *normalZ3,
const Real_t x0, const Real_t y0, const Real_t z0,
const Real_t x1, const Real_t y1, const Real_t z1,
const Real_t x2, const Real_t y2, const Real_t z2,
const Real_t x3, const Real_t y3, const Real_t z3)
{
Real_t bisectX0 = Real_t(0.5) * (x3 + x2 - x1 - x0);
Real_t bisectY0 = Real_t(0.5) * (y3 + y2 - y1 - y0);
Real_t bisectZ0 = Real_t(0.5) * (z3 + z2 - z1 - z0);
Real_t bisectX1 = Real_t(0.5) * (x2 + x1 - x3 - x0);
Real_t bisectY1 = Real_t(0.5) * (y2 + y1 - y3 - y0);
Real_t bisectZ1 = Real_t(0.5) * (z2 + z1 - z3 - z0);
Real_t areaX = Real_t(0.25) * (bisectY0 * bisectZ1 - bisectZ0 * bisectY1);
Real_t areaY = Real_t(0.25) * (bisectZ0 * bisectX1 - bisectX0 * bisectZ1);
Real_t areaZ = Real_t(0.25) * (bisectX0 * bisectY1 - bisectY0 * bisectX1);
*normalX0 += areaX;
*normalX1 += areaX;
*normalX2 += areaX;
*normalX3 += areaX;
*normalY0 += areaY;
*normalY1 += areaY;
*normalY2 += areaY;
*normalY3 += areaY;
*normalZ0 += areaZ;
*normalZ1 += areaZ;
*normalZ2 += areaZ;
*normalZ3 += areaZ;
}
static
__device__
__forceinline__
void SumElemFaceNormal_warp_per_4cell(
Real_t *normalX0, Real_t *normalY0, Real_t *normalZ0,
const Real_t x, const Real_t y, const Real_t z,
int node,
int n0, int n1, int n2, int n3)
{
Real_t coef0 = Real_t(0.5);
Real_t coef1 = Real_t(0.5);
if (node == n0 || node == n1 || node==n2 || node==n3)
{
if (node == n0 || node == n1)
coef0 = -coef0;
if (node == n0 || node == n3)
coef1 = -coef1;
}
else
{
coef0 = Real_t(0.);
coef1 = Real_t(0.);
}
Real_t bisectX0 = coef0*x;
Real_t bisectY0 = coef0*y;
Real_t bisectZ0 = coef0*z;
Real_t bisectX1 = coef1*x;
Real_t bisectY1 = coef1*y;
Real_t bisectZ1 = coef1*z;
SumOverNodesShfl(bisectX0);
SumOverNodesShfl(bisectY0);
SumOverNodesShfl(bisectZ0);
SumOverNodesShfl(bisectX1);
SumOverNodesShfl(bisectY1);
SumOverNodesShfl(bisectZ1);
Real_t areaX = Real_t(0.25) * (bisectY0 * bisectZ1 - bisectZ0 * bisectY1);
Real_t areaY = Real_t(0.25) * (bisectZ0 * bisectX1 - bisectX0 * bisectZ1);
Real_t areaZ = Real_t(0.25) * (bisectX0 * bisectY1 - bisectY0 * bisectX1);
if (node == n0 || node == n1 || node==n2 || node==n3)
{
*normalX0 += areaX;
*normalY0 += areaY;
*normalZ0 += areaZ;
}
}
__device__
static inline
void CalcElemNodeNormals(Real_t pfx[8],
Real_t pfy[8],
Real_t pfz[8],
const Real_t x[8],
const Real_t y[8],
const Real_t z[8])
{
for (Index_t i = 0 ; i < 8 ; ++i) {
pfx[i] = Real_t(0.0);
pfy[i] = Real_t(0.0);
pfz[i] = Real_t(0.0);
}
/* evaluate face one: nodes 0, 1, 2, 3 */
SumElemFaceNormal(&pfx[0], &pfy[0], &pfz[0],
&pfx[1], &pfy[1], &pfz[1],
&pfx[2], &pfy[2], &pfz[2],
&pfx[3], &pfy[3], &pfz[3],
x[0], y[0], z[0], x[1], y[1], z[1],
x[2], y[2], z[2], x[3], y[3], z[3]);
/* evaluate face two: nodes 0, 4, 5, 1 */
SumElemFaceNormal(&pfx[0], &pfy[0], &pfz[0],
&pfx[4], &pfy[4], &pfz[4],
&pfx[5], &pfy[5], &pfz[5],
&pfx[1], &pfy[1], &pfz[1],
x[0], y[0], z[0], x[4], y[4], z[4],
x[5], y[5], z[5], x[1], y[1], z[1]);
/* evaluate face three: nodes 1, 5, 6, 2 */
SumElemFaceNormal(&pfx[1], &pfy[1], &pfz[1],
&pfx[5], &pfy[5], &pfz[5],
&pfx[6], &pfy[6], &pfz[6],
&pfx[2], &pfy[2], &pfz[2],
x[1], y[1], z[1], x[5], y[5], z[5],
x[6], y[6], z[6], x[2], y[2], z[2]);
/* evaluate face four: nodes 2, 6, 7, 3 */
SumElemFaceNormal(&pfx[2], &pfy[2], &pfz[2],
&pfx[6], &pfy[6], &pfz[6],
&pfx[7], &pfy[7], &pfz[7],
&pfx[3], &pfy[3], &pfz[3],
x[2], y[2], z[2], x[6], y[6], z[6],
x[7], y[7], z[7], x[3], y[3], z[3]);
/* evaluate face five: nodes 3, 7, 4, 0 */
SumElemFaceNormal(&pfx[3], &pfy[3], &pfz[3],
&pfx[7], &pfy[7], &pfz[7],
&pfx[4], &pfy[4], &pfz[4],
&pfx[0], &pfy[0], &pfz[0],
x[3], y[3], z[3], x[7], y[7], z[7],
x[4], y[4], z[4], x[0], y[0], z[0]);
/* evaluate face six: nodes 4, 7, 6, 5 */
SumElemFaceNormal(&pfx[4], &pfy[4], &pfz[4],
&pfx[7], &pfy[7], &pfz[7],
&pfx[6], &pfy[6], &pfz[6],
&pfx[5], &pfy[5], &pfz[5],
x[4], y[4], z[4], x[7], y[7], z[7],
x[6], y[6], z[6], x[5], y[5], z[5]);
}
__global__
void AddNodeForcesFromElems_kernel( Index_t numNode,
Index_t padded_numNode,
const Int_t* nodeElemCount,
const Int_t* nodeElemStart,
const Index_t* nodeElemCornerList,
const Real_t* fx_elem,
const Real_t* fy_elem,
const Real_t* fz_elem,
Real_t* fx_node,
Real_t* fy_node,
Real_t* fz_node,
const Int_t num_threads)
{
int tid=blockDim.x*blockIdx.x+threadIdx.x;
if (tid < num_threads)
{
Index_t g_i = tid;
Int_t count=nodeElemCount[g_i];
Int_t start=nodeElemStart[g_i];
Real_t fx,fy,fz;
fx=fy=fz=Real_t(0.0);
for (int j=0;j<count;j++)
{
Index_t pos=nodeElemCornerList[start+j]; // Uncoalesced access here
fx += fx_elem[pos];
fy += fy_elem[pos];
fz += fz_elem[pos];
}
fx_node[g_i]=fx;
fy_node[g_i]=fy;
fz_node[g_i]=fz;
}
}
static
__device__
__forceinline__
void VoluDer(const Real_t x0, const Real_t x1, const Real_t x2,
const Real_t x3, const Real_t x4, const Real_t x5,
const Real_t y0, const Real_t y1, const Real_t y2,
const Real_t y3, const Real_t y4, const Real_t y5,
const Real_t z0, const Real_t z1, const Real_t z2,
const Real_t z3, const Real_t z4, const Real_t z5,
Real_t* dvdx, Real_t* dvdy, Real_t* dvdz)
{
const Real_t twelfth = Real_t(1.0) / Real_t(12.0) ;
*dvdx =
(y1 + y2) * (z0 + z1) - (y0 + y1) * (z1 + z2) +
(y0 + y4) * (z3 + z4) - (y3 + y4) * (z0 + z4) -
(y2 + y5) * (z3 + z5) + (y3 + y5) * (z2 + z5);
*dvdy =
- (x1 + x2) * (z0 + z1) + (x0 + x1) * (z1 + z2) -
(x0 + x4) * (z3 + z4) + (x3 + x4) * (z0 + z4) +
(x2 + x5) * (z3 + z5) - (x3 + x5) * (z2 + z5);
*dvdz =
- (y1 + y2) * (x0 + x1) + (y0 + y1) * (x1 + x2) -
(y0 + y4) * (x3 + x4) + (y3 + y4) * (x0 + x4) +
(y2 + y5) * (x3 + x5) - (y3 + y5) * (x2 + x5);
*dvdx *= twelfth;
*dvdy *= twelfth;
*dvdz *= twelfth;
}
static
__device__
__forceinline__
void CalcElemVolumeDerivative(Real_t dvdx[8],
Real_t dvdy[8],
Real_t dvdz[8],
const Real_t x[8],
const Real_t y[8],
const Real_t z[8])
{
VoluDer(x[1], x[2], x[3], x[4], x[5], x[7],
y[1], y[2], y[3], y[4], y[5], y[7],
z[1], z[2], z[3], z[4], z[5], z[7],
&dvdx[0], &dvdy[0], &dvdz[0]);
VoluDer(x[0], x[1], x[2], x[7], x[4], x[6],
y[0], y[1], y[2], y[7], y[4], y[6],
z[0], z[1], z[2], z[7], z[4], z[6],
&dvdx[3], &dvdy[3], &dvdz[3]);
VoluDer(x[3], x[0], x[1], x[6], x[7], x[5],
y[3], y[0], y[1], y[6], y[7], y[5],
z[3], z[0], z[1], z[6], z[7], z[5],
&dvdx[2], &dvdy[2], &dvdz[2]);
VoluDer(x[2], x[3], x[0], x[5], x[6], x[4],
y[2], y[3], y[0], y[5], y[6], y[4],
z[2], z[3], z[0], z[5], z[6], z[4],
&dvdx[1], &dvdy[1], &dvdz[1]);
VoluDer(x[7], x[6], x[5], x[0], x[3], x[1],
y[7], y[6], y[5], y[0], y[3], y[1],
z[7], z[6], z[5], z[0], z[3], z[1],
&dvdx[4], &dvdy[4], &dvdz[4]);
VoluDer(x[4], x[7], x[6], x[1], x[0], x[2],
y[4], y[7], y[6], y[1], y[0], y[2],
z[4], z[7], z[6], z[1], z[0], z[2],
&dvdx[5], &dvdy[5], &dvdz[5]);
VoluDer(x[5], x[4], x[7], x[2], x[1], x[3],
y[5], y[4], y[7], y[2], y[1], y[3],
z[5], z[4], z[7], z[2], z[1], z[3],
&dvdx[6], &dvdy[6], &dvdz[6]);
VoluDer(x[6], x[5], x[4], x[3], x[2], x[0],
y[6], y[5], y[4], y[3], y[2], y[0],
z[6], z[5], z[4], z[3], z[2], z[0],
&dvdx[7], &dvdy[7], &dvdz[7]);
}
static
__device__
__forceinline__
void CalcElemFBHourglassForce(Real_t *xd, Real_t *yd, Real_t *zd, Real_t *hourgam0,
Real_t *hourgam1, Real_t *hourgam2, Real_t *hourgam3,
Real_t *hourgam4, Real_t *hourgam5, Real_t *hourgam6,
Real_t *hourgam7, Real_t coefficient,
Real_t *hgfx, Real_t *hgfy, Real_t *hgfz )
{
Index_t i00=0;
Index_t i01=1;
Index_t i02=2;
Index_t i03=3;
Real_t h00 =
hourgam0[i00] * xd[0] + hourgam1[i00] * xd[1] +
hourgam2[i00] * xd[2] + hourgam3[i00] * xd[3] +
hourgam4[i00] * xd[4] + hourgam5[i00] * xd[5] +
hourgam6[i00] * xd[6] + hourgam7[i00] * xd[7];
Real_t h01 =
hourgam0[i01] * xd[0] + hourgam1[i01] * xd[1] +
hourgam2[i01] * xd[2] + hourgam3[i01] * xd[3] +
hourgam4[i01] * xd[4] + hourgam5[i01] * xd[5] +
hourgam6[i01] * xd[6] + hourgam7[i01] * xd[7];
Real_t h02 =
hourgam0[i02] * xd[0] + hourgam1[i02] * xd[1]+
hourgam2[i02] * xd[2] + hourgam3[i02] * xd[3]+
hourgam4[i02] * xd[4] + hourgam5[i02] * xd[5]+
hourgam6[i02] * xd[6] + hourgam7[i02] * xd[7];
Real_t h03 =
hourgam0[i03] * xd[0] + hourgam1[i03] * xd[1] +
hourgam2[i03] * xd[2] + hourgam3[i03] * xd[3] +
hourgam4[i03] * xd[4] + hourgam5[i03] * xd[5] +
hourgam6[i03] * xd[6] + hourgam7[i03] * xd[7];
hgfx[0] += coefficient *
(hourgam0[i00] * h00 + hourgam0[i01] * h01 +
hourgam0[i02] * h02 + hourgam0[i03] * h03);
hgfx[1] += coefficient *
(hourgam1[i00] * h00 + hourgam1[i01] * h01 +
hourgam1[i02] * h02 + hourgam1[i03] * h03);
hgfx[2] += coefficient *
(hourgam2[i00] * h00 + hourgam2[i01] * h01 +
hourgam2[i02] * h02 + hourgam2[i03] * h03);
hgfx[3] += coefficient *
(hourgam3[i00] * h00 + hourgam3[i01] * h01 +
hourgam3[i02] * h02 + hourgam3[i03] * h03);
hgfx[4] += coefficient *
(hourgam4[i00] * h00 + hourgam4[i01] * h01 +
hourgam4[i02] * h02 + hourgam4[i03] * h03);
hgfx[5] += coefficient *
(hourgam5[i00] * h00 + hourgam5[i01] * h01 +
hourgam5[i02] * h02 + hourgam5[i03] * h03);
hgfx[6] += coefficient *
(hourgam6[i00] * h00 + hourgam6[i01] * h01 +
hourgam6[i02] * h02 + hourgam6[i03] * h03);
hgfx[7] += coefficient *
(hourgam7[i00] * h00 + hourgam7[i01] * h01 +
hourgam7[i02] * h02 + hourgam7[i03] * h03);
h00 =
hourgam0[i00] * yd[0] + hourgam1[i00] * yd[1] +
hourgam2[i00] * yd[2] + hourgam3[i00] * yd[3] +
hourgam4[i00] * yd[4] + hourgam5[i00] * yd[5] +
hourgam6[i00] * yd[6] + hourgam7[i00] * yd[7];
h01 =
hourgam0[i01] * yd[0] + hourgam1[i01] * yd[1] +
hourgam2[i01] * yd[2] + hourgam3[i01] * yd[3] +
hourgam4[i01] * yd[4] + hourgam5[i01] * yd[5] +
hourgam6[i01] * yd[6] + hourgam7[i01] * yd[7];
h02 =
hourgam0[i02] * yd[0] + hourgam1[i02] * yd[1]+
hourgam2[i02] * yd[2] + hourgam3[i02] * yd[3]+
hourgam4[i02] * yd[4] + hourgam5[i02] * yd[5]+
hourgam6[i02] * yd[6] + hourgam7[i02] * yd[7];
h03 =
hourgam0[i03] * yd[0] + hourgam1[i03] * yd[1] +
hourgam2[i03] * yd[2] + hourgam3[i03] * yd[3] +
hourgam4[i03] * yd[4] + hourgam5[i03] * yd[5] +
hourgam6[i03] * yd[6] + hourgam7[i03] * yd[7];
hgfy[0] += coefficient *
(hourgam0[i00] * h00 + hourgam0[i01] * h01 +
hourgam0[i02] * h02 + hourgam0[i03] * h03);
hgfy[1] += coefficient *
(hourgam1[i00] * h00 + hourgam1[i01] * h01 +
hourgam1[i02] * h02 + hourgam1[i03] * h03);
hgfy[2] += coefficient *
(hourgam2[i00] * h00 + hourgam2[i01] * h01 +
hourgam2[i02] * h02 + hourgam2[i03] * h03);
hgfy[3] += coefficient *
(hourgam3[i00] * h00 + hourgam3[i01] * h01 +
hourgam3[i02] * h02 + hourgam3[i03] * h03);
hgfy[4] += coefficient *
(hourgam4[i00] * h00 + hourgam4[i01] * h01 +
hourgam4[i02] * h02 + hourgam4[i03] * h03);
hgfy[5] += coefficient *
(hourgam5[i00] * h00 + hourgam5[i01] * h01 +
hourgam5[i02] * h02 + hourgam5[i03] * h03);
hgfy[6] += coefficient *
(hourgam6[i00] * h00 + hourgam6[i01] * h01 +
hourgam6[i02] * h02 + hourgam6[i03] * h03);
hgfy[7] += coefficient *
(hourgam7[i00] * h00 + hourgam7[i01] * h01 +
hourgam7[i02] * h02 + hourgam7[i03] * h03);
h00 =
hourgam0[i00] * zd[0] + hourgam1[i00] * zd[1] +
hourgam2[i00] * zd[2] + hourgam3[i00] * zd[3] +
hourgam4[i00] * zd[4] + hourgam5[i00] * zd[5] +
hourgam6[i00] * zd[6] + hourgam7[i00] * zd[7];
h01 =
hourgam0[i01] * zd[0] + hourgam1[i01] * zd[1] +
hourgam2[i01] * zd[2] + hourgam3[i01] * zd[3] +
hourgam4[i01] * zd[4] + hourgam5[i01] * zd[5] +
hourgam6[i01] * zd[6] + hourgam7[i01] * zd[7];
h02 =
hourgam0[i02] * zd[0] + hourgam1[i02] * zd[1]+
hourgam2[i02] * zd[2] + hourgam3[i02] * zd[3]+
hourgam4[i02] * zd[4] + hourgam5[i02] * zd[5]+
hourgam6[i02] * zd[6] + hourgam7[i02] * zd[7];
h03 =
hourgam0[i03] * zd[0] + hourgam1[i03] * zd[1] +
hourgam2[i03] * zd[2] + hourgam3[i03] * zd[3] +
hourgam4[i03] * zd[4] + hourgam5[i03] * zd[5] +
hourgam6[i03] * zd[6] + hourgam7[i03] * zd[7];
hgfz[0] += coefficient *
(hourgam0[i00] * h00 + hourgam0[i01] * h01 +
hourgam0[i02] * h02 + hourgam0[i03] * h03);
hgfz[1] += coefficient *
(hourgam1[i00] * h00 + hourgam1[i01] * h01 +
hourgam1[i02] * h02 + hourgam1[i03] * h03);
hgfz[2] += coefficient *
(hourgam2[i00] * h00 + hourgam2[i01] * h01 +
hourgam2[i02] * h02 + hourgam2[i03] * h03);
hgfz[3] += coefficient *
(hourgam3[i00] * h00 + hourgam3[i01] * h01 +
hourgam3[i02] * h02 + hourgam3[i03] * h03);
hgfz[4] += coefficient *
(hourgam4[i00] * h00 + hourgam4[i01] * h01 +
hourgam4[i02] * h02 + hourgam4[i03] * h03);
hgfz[5] += coefficient *
(hourgam5[i00] * h00 + hourgam5[i01] * h01 +
hourgam5[i02] * h02 + hourgam5[i03] * h03);
hgfz[6] += coefficient *
(hourgam6[i00] * h00 + hourgam6[i01] * h01 +
hourgam6[i02] * h02 + hourgam6[i03] * h03);
hgfz[7] += coefficient *
(hourgam7[i00] * h00 + hourgam7[i01] * h01 +
hourgam7[i02] * h02 + hourgam7[i03] * h03);
}
__device__
__forceinline__
void CalcHourglassModes(const Real_t xn[8], const Real_t yn[8], const Real_t zn[8],
const Real_t dvdxn[8], const Real_t dvdyn[8], const Real_t dvdzn[8],
Real_t hourgam[8][4], Real_t volinv)
{
Real_t hourmodx, hourmody, hourmodz;
hourmodx = xn[0] + xn[1] - xn[2] - xn[3] - xn[4] - xn[5] + xn[6] + xn[7];
hourmody = yn[0] + yn[1] - yn[2] - yn[3] - yn[4] - yn[5] + yn[6] + yn[7];
hourmodz = zn[0] + zn[1] - zn[2] - zn[3] - zn[4] - zn[5] + zn[6] + zn[7]; // 21
hourgam[0][0] = 1.0 - volinv*(dvdxn[0]*hourmodx + dvdyn[0]*hourmody + dvdzn[0]*hourmodz);
hourgam[1][0] = 1.0 - volinv*(dvdxn[1]*hourmodx + dvdyn[1]*hourmody + dvdzn[1]*hourmodz);
hourgam[2][0] = -1.0 - volinv*(dvdxn[2]*hourmodx + dvdyn[2]*hourmody + dvdzn[2]*hourmodz);
hourgam[3][0] = -1.0 - volinv*(dvdxn[3]*hourmodx + dvdyn[3]*hourmody + dvdzn[3]*hourmodz);
hourgam[4][0] = -1.0 - volinv*(dvdxn[4]*hourmodx + dvdyn[4]*hourmody + dvdzn[4]*hourmodz);
hourgam[5][0] = -1.0 - volinv*(dvdxn[5]*hourmodx + dvdyn[5]*hourmody + dvdzn[5]*hourmodz);
hourgam[6][0] = 1.0 - volinv*(dvdxn[6]*hourmodx + dvdyn[6]*hourmody + dvdzn[6]*hourmodz);
hourgam[7][0] = 1.0 - volinv*(dvdxn[7]*hourmodx + dvdyn[7]*hourmody + dvdzn[7]*hourmodz); // 60
hourmodx = xn[0] - xn[1] - xn[2] + xn[3] - xn[4] + xn[5] + xn[6] - xn[7];
hourmody = yn[0] - yn[1] - yn[2] + yn[3] - yn[4] + yn[5] + yn[6] - yn[7];
hourmodz = zn[0] - zn[1] - zn[2] + zn[3] - zn[4] + zn[5] + zn[6] - zn[7];
hourgam[0][1] = 1.0 - volinv*(dvdxn[0]*hourmodx + dvdyn[0]*hourmody + dvdzn[0]*hourmodz);
hourgam[1][1] = -1.0 - volinv*(dvdxn[1]*hourmodx + dvdyn[1]*hourmody + dvdzn[1]*hourmodz);
hourgam[2][1] = -1.0 - volinv*(dvdxn[2]*hourmodx + dvdyn[2]*hourmody + dvdzn[2]*hourmodz);
hourgam[3][1] = 1.0 - volinv*(dvdxn[3]*hourmodx + dvdyn[3]*hourmody + dvdzn[3]*hourmodz);
hourgam[4][1] = -1.0 - volinv*(dvdxn[4]*hourmodx + dvdyn[4]*hourmody + dvdzn[4]*hourmodz);
hourgam[5][1] = 1.0 - volinv*(dvdxn[5]*hourmodx + dvdyn[5]*hourmody + dvdzn[5]*hourmodz);
hourgam[6][1] = 1.0 - volinv*(dvdxn[6]*hourmodx + dvdyn[6]*hourmody + dvdzn[6]*hourmodz);
hourgam[7][1] = -1.0 - volinv*(dvdxn[7]*hourmodx + dvdyn[7]*hourmody + dvdzn[7]*hourmodz);
hourmodx = xn[0] - xn[1] + xn[2] - xn[3] + xn[4] - xn[5] + xn[6] - xn[7];
hourmody = yn[0] - yn[1] + yn[2] - yn[3] + yn[4] - yn[5] + yn[6] - yn[7];
hourmodz = zn[0] - zn[1] + zn[2] - zn[3] + zn[4] - zn[5] + zn[6] - zn[7];
hourgam[0][2] = 1.0 - volinv*(dvdxn[0]*hourmodx + dvdyn[0]*hourmody + dvdzn[0]*hourmodz);
hourgam[1][2] = -1.0 - volinv*(dvdxn[1]*hourmodx + dvdyn[1]*hourmody + dvdzn[1]*hourmodz);
hourgam[2][2] = 1.0 - volinv*(dvdxn[2]*hourmodx + dvdyn[2]*hourmody + dvdzn[2]*hourmodz);
hourgam[3][2] = -1.0 - volinv*(dvdxn[3]*hourmodx + dvdyn[3]*hourmody + dvdzn[3]*hourmodz);
hourgam[4][2] = 1.0 - volinv*(dvdxn[4]*hourmodx + dvdyn[4]*hourmody + dvdzn[4]*hourmodz);
hourgam[5][2] = -1.0 - volinv*(dvdxn[5]*hourmodx + dvdyn[5]*hourmody + dvdzn[5]*hourmodz);
hourgam[6][2] = 1.0 - volinv*(dvdxn[6]*hourmodx + dvdyn[6]*hourmody + dvdzn[6]*hourmodz);
hourgam[7][2] = -1.0 - volinv*(dvdxn[7]*hourmodx + dvdyn[7]*hourmody + dvdzn[7]*hourmodz);
hourmodx = -xn[0] + xn[1] - xn[2] + xn[3] + xn[4] - xn[5] + xn[6] - xn[7];
hourmody = -yn[0] + yn[1] - yn[2] + yn[3] + yn[4] - yn[5] + yn[6] - yn[7];
hourmodz = -zn[0] + zn[1] - zn[2] + zn[3] + zn[4] - zn[5] + zn[6] - zn[7];
hourgam[0][3] = -1.0 - volinv*(dvdxn[0]*hourmodx + dvdyn[0]*hourmody + dvdzn[0]*hourmodz);
hourgam[1][3] = 1.0 - volinv*(dvdxn[1]*hourmodx + dvdyn[1]*hourmody + dvdzn[1]*hourmodz);
hourgam[2][3] = -1.0 - volinv*(dvdxn[2]*hourmodx + dvdyn[2]*hourmody + dvdzn[2]*hourmodz);
hourgam[3][3] = 1.0 - volinv*(dvdxn[3]*hourmodx + dvdyn[3]*hourmody + dvdzn[3]*hourmodz);
hourgam[4][3] = 1.0 - volinv*(dvdxn[4]*hourmodx + dvdyn[4]*hourmody + dvdzn[4]*hourmodz);
hourgam[5][3] = -1.0 - volinv*(dvdxn[5]*hourmodx + dvdyn[5]*hourmody + dvdzn[5]*hourmodz);
hourgam[6][3] = 1.0 - volinv*(dvdxn[6]*hourmodx + dvdyn[6]*hourmody + dvdzn[6]*hourmodz);
hourgam[7][3] = -1.0 - volinv*(dvdxn[7]*hourmodx + dvdyn[7]*hourmody + dvdzn[7]*hourmodz);
}
template< bool hourg_gt_zero >
__global__
#ifdef DOUBLE_PRECISION
__launch_bounds__(64,4)
#else
__launch_bounds__(64,8)
#endif
void CalcVolumeForceForElems_kernel(
const Real_t* __restrict__ volo,
const Real_t* __restrict__ v,
const Real_t* __restrict__ p,
const Real_t* __restrict__ q,
Real_t hourg,
Index_t numElem,
Index_t padded_numElem,
const Index_t* __restrict__ nodelist,
const Real_t* __restrict__ ss,
const Real_t* __restrict__ elemMass,
const Real_t* __restrict__ x, const Real_t* __restrict__ y, const Real_t* __restrict__ z,
const Real_t* __restrict__ xd, const Real_t* __restrict__ yd, const Real_t* __restrict__ zd,
//TextureObj<Real_t> x, TextureObj<Real_t> y, TextureObj<Real_t> z,
//TextureObj<Real_t> xd, TextureObj<Real_t> yd, TextureObj<Real_t> zd,
//TextureObj<Real_t>* x, TextureObj<Real_t>* y, TextureObj<Real_t>* z,
//TextureObj<Real_t>* xd, TextureObj<Real_t>* yd, TextureObj<Real_t>* zd,
#ifdef DOUBLE_PRECISION // For floats, use atomicAdd
Real_t* __restrict__ fx_elem,
Real_t* __restrict__ fy_elem,
Real_t* __restrict__ fz_elem,
#else
Real_t* __restrict__ fx_node,
Real_t* __restrict__ fy_node,
Real_t* __restrict__ fz_node,
#endif
Index_t* __restrict__ bad_vol,
const Index_t num_threads)
{
/*************************************************
* FUNCTION: Calculates the volume forces
*************************************************/
Real_t xn[8],yn[8],zn[8];;
Real_t xdn[8],ydn[8],zdn[8];;
Real_t dvdxn[8],dvdyn[8],dvdzn[8];;
Real_t hgfx[8],hgfy[8],hgfz[8];;
Real_t hourgam[8][4];
Real_t coefficient;
int elem=blockDim.x*blockIdx.x+threadIdx.x;
if (elem < num_threads)
{
Real_t volume = v[elem];
Real_t det = volo[elem] * volume;
// Check for bad volume
if (volume < 0.) {
*bad_vol = elem;
}
Real_t ss1 = ss[elem];
Real_t mass1 = elemMass[elem];
Real_t sigxx = -p[elem] - q[elem];
Index_t n[8];
#pragma unroll
for (int i=0;i<8;i++) {
n[i] = nodelist[elem+i*padded_numElem];
}
Real_t volinv = Real_t(1.0) / det;
//#pragma unroll
//for (int i=0;i<8;i++) {
// xn[i] =x[n[i]];
// yn[i] =y[n[i]];
// zn[i] =z[n[i]];
//}
#pragma unroll
for (int i=0;i<8;i++)
xn[i] =x[n[i]];
#pragma unroll
for (int i=0;i<8;i++)
yn[i] =y[n[i]];
#pragma unroll
for (int i=0;i<8;i++)
zn[i] =z[n[i]];
Real_t volume13 = CBRT(det);
coefficient = - hourg * Real_t(0.01) * ss1 * mass1 / volume13;
/*************************************************/
/* compute the volume derivatives */
/*************************************************/
CalcElemVolumeDerivative(dvdxn, dvdyn, dvdzn, xn, yn, zn);
/*************************************************/
/* compute the hourglass modes */
/*************************************************/
CalcHourglassModes(xn,yn,zn,dvdxn,dvdyn,dvdzn,hourgam,volinv);
/*************************************************/
/* CalcStressForElems */
/*************************************************/
Real_t B[3][8];
CalcElemShapeFunctionDerivatives(xn, yn, zn, B, &det);
CalcElemNodeNormals( B[0] , B[1], B[2], xn, yn, zn);
// Check for bad volume
if (det < 0.) {
*bad_vol = elem;
}
#pragma unroll
for (int i=0;i<8;i++)
{
hgfx[i] = -( sigxx*B[0][i] );
hgfy[i] = -( sigxx*B[1][i] );
hgfz[i] = -( sigxx*B[2][i] );
}
if (hourg_gt_zero)
{
/*************************************************/
/* CalcFBHourglassForceForElems */
/*************************************************/
// #pragma unroll
// for (int i=0;i<8;i++) {
// xdn[i] =xd[n[i]];
// ydn[i] =yd[n[i]];
// zdn[i] =zd[n[i]];
// }
#pragma unroll
for (int i=0;i<8;i++)
xdn[i] =xd[n[i]];
#pragma unroll
for (int i=0;i<8;i++)
ydn[i] =yd[n[i]];
#pragma unroll
for (int i=0;i<8;i++)
zdn[i] =zd[n[i]];
CalcElemFBHourglassForce
( &xdn[0],&ydn[0],&zdn[0],
hourgam[0],hourgam[1],hourgam[2],hourgam[3],
hourgam[4],hourgam[5],hourgam[6],hourgam[7],
coefficient,
&hgfx[0],&hgfy[0],&hgfz[0]
);
}
#ifdef DOUBLE_PRECISION
#pragma unroll
for (int node=0;node<8;node++)
{
Index_t store_loc = elem+padded_numElem*node;
fx_elem[store_loc]=hgfx[node];
fy_elem[store_loc]=hgfy[node];
fz_elem[store_loc]=hgfz[node];
}
#else
#pragma unroll
for (int i=0;i<8;i++)
{
Index_t ni= n[i];
atomicAdd(&fx_node[ni],hgfx[i]);
atomicAdd(&fy_node[ni],hgfy[i]);
atomicAdd(&fz_node[ni],hgfz[i]);
}
#endif
} // If elem < numElem
}
template< bool hourg_gt_zero, int cta_size>
__global__
void CalcVolumeForceForElems_kernel_warp_per_4cell(
const Real_t* __restrict__ volo,
const Real_t* __restrict__ v,
const Real_t* __restrict__ p,
const Real_t* __restrict__ q,
Real_t hourg,
Index_t numElem,
Index_t padded_numElem,
const Index_t* __restrict__ nodelist,
const Real_t* __restrict__ ss,
const Real_t* __restrict__ elemMass,
//const Real_t __restrict__ *x, const Real_t __restrict__ *y, const Real_t __restrict__ *z,
//const Real_t __restrict__ *xd, const Real_t __restrict__ *yd, const Real_t __restrict__ *zd,
const Real_t *x, const Real_t *y, const Real_t *z,
const Real_t *xd, const Real_t *yd, const Real_t *zd,
#ifdef DOUBLE_PRECISION // For floats, use atomicAdd
Real_t* __restrict__ fx_elem,
Real_t* __restrict__ fy_elem,
Real_t* __restrict__ fz_elem,
#else
Real_t* __restrict__ fx_node,
Real_t* __restrict__ fy_node,
Real_t* __restrict__ fz_node,
#endif
Index_t* __restrict__ bad_vol,
const Index_t num_threads)
{
/*************************************************
* FUNCTION: Calculates the volume forces
*************************************************/
Real_t xn,yn,zn;;
Real_t xdn,ydn,zdn;;
Real_t dvdxn,dvdyn,dvdzn;;
Real_t hgfx,hgfy,hgfz;;
Real_t hourgam[4];
Real_t coefficient;
int tid=blockDim.x*blockIdx.x+threadIdx.x;
int elem = tid >> 3; // elem = tid/8
int node = tid & 7; // node = tid%8
// elem within cta
// int cta_elem = threadIdx.x/8;
if (elem < num_threads)
{
Real_t volume = v[elem];
Real_t det = volo[elem] * volume;
// Check for bad volume
if (volume < 0.) {
*bad_vol = elem;
}
Real_t ss1 = ss[elem];
Real_t mass1 = elemMass[elem];
Real_t sigxx = -p[elem] - q[elem];
Index_t node_id;
node_id = nodelist[elem+node*padded_numElem];
Real_t volinv = Real_t(1.0) / det;
xn =x[node_id];
yn =y[node_id];
zn =z[node_id];
Real_t volume13 = CBRT(det);
coefficient = - hourg * Real_t(0.01) * ss1 * mass1 / volume13;
/*************************************************/
/* compute the volume derivatives */
/*************************************************/
unsigned int ind0,ind1,ind2,ind3,ind4,ind5;
// Use octal number to represent the indices for each node
//ind0 = 012307456;
//ind1 = 023016745;
//ind2 = 030125674;
//ind3 = 045670123;
//ind4 = 056743012;
//ind5 = 074561230;
//int mask = 7u << (3*node;
switch(node) {
case 0:
{ind0=1; ind1=2; ind2=3; ind3=4; ind4=5; ind5=7;
break;}
case 1:
{ind0=2; ind1=3; ind2=0; ind3=5; ind4=6; ind5=4;
break;}
case 2:
{ind0=3; ind1=0; ind2=1; ind3=6; ind4=7; ind5=5;
break;}
case 3:
{ind0=0; ind1=1; ind2=2; ind3=7; ind4=4; ind5=6;
break;}
case 4:
{ind0=7; ind1=6; ind2=5; ind3=0; ind4=3; ind5=1;
break;}
case 5:
{ind0=4; ind1=7; ind2=6; ind3=1; ind4=0; ind5=2;
break;}
case 6:
{ind0=5; ind1=4; ind2=7; ind3=2; ind4=1; ind5=3;
break;}
case 7:
{ind0=6; ind1=5; ind2=4; ind3=3; ind4=2; ind5=0;
break;}
}
VOLUDER(utils::shfl(yn,ind0,8),utils::shfl(yn,ind1,8),utils::shfl(yn,ind2,8),
utils::shfl(yn,ind3,8),utils::shfl(yn,ind4,8),utils::shfl(yn,ind5,8),
utils::shfl(zn,ind0,8),utils::shfl(zn,ind1,8),utils::shfl(zn,ind2,8),
utils::shfl(zn,ind3,8),utils::shfl(zn,ind4,8),utils::shfl(zn,ind5,8),
dvdxn);
VOLUDER(utils::shfl(zn,ind0,8),utils::shfl(zn,ind1,8),utils::shfl(zn,ind2,8),
utils::shfl(zn,ind3,8),utils::shfl(zn,ind4,8),utils::shfl(zn,ind5,8),
utils::shfl(xn,ind0,8),utils::shfl(xn,ind1,8),utils::shfl(xn,ind2,8),
utils::shfl(xn,ind3,8),utils::shfl(xn,ind4,8),utils::shfl(xn,ind5,8),
dvdyn);
VOLUDER(utils::shfl(xn,ind0,8),utils::shfl(xn,ind1,8),utils::shfl(xn,ind2,8),
utils::shfl(xn,ind3,8),utils::shfl(xn,ind4,8),utils::shfl(xn,ind5,8),
utils::shfl(yn,ind0,8),utils::shfl(yn,ind1,8),utils::shfl(yn,ind2,8),
utils::shfl(yn,ind3,8),utils::shfl(yn,ind4,8),utils::shfl(yn,ind5,8),
dvdzn);
/*************************************************/
/* compute the hourglass modes */
/*************************************************/
Real_t hourmodx, hourmody, hourmodz;
const Real_t posf = Real_t( 1.);
const Real_t negf = Real_t(-1.);
hourmodx=xn; hourmody=yn; hourmodz=zn;
if (node==2 || node==3 || node==4 || node==5) {
hourmodx *= negf; hourmody *= negf; hourmodz *= negf;
hourgam[0] = negf;
}
else hourgam[0] = posf;
SumOverNodesShfl(hourmodx);
SumOverNodesShfl(hourmody);
SumOverNodesShfl(hourmodz);
hourgam[0] -= volinv*(dvdxn*hourmodx + dvdyn*hourmody + dvdzn*hourmodz);
hourmodx=xn; hourmody=yn; hourmodz=zn;
if (node==1 || node==2 || node==4 || node==7) {
hourmodx *= negf; hourmody *= negf; hourmodz *= negf;
hourgam[1] = negf;
}
else hourgam[1] = posf;
SumOverNodesShfl(hourmodx);
SumOverNodesShfl(hourmody);
SumOverNodesShfl(hourmodz);
hourgam[1] -= volinv*(dvdxn*hourmodx + dvdyn*hourmody + dvdzn*hourmodz);
hourmodx=xn; hourmody=yn; hourmodz=zn;
if (node==1 || node==3 || node==5 || node==7) {
hourmodx *= negf; hourmody *= negf; hourmodz *= negf;
hourgam[2] = negf;
}
else hourgam[2] = posf;
SumOverNodesShfl(hourmodx);
SumOverNodesShfl(hourmody);
SumOverNodesShfl(hourmodz);
hourgam[2] -= volinv*(dvdxn*hourmodx + dvdyn*hourmody + dvdzn*hourmodz);
hourmodx=xn; hourmody=yn; hourmodz=zn;
if (node==0 || node==2 || node==5 || node==7) {
hourmodx *= negf; hourmody *= negf; hourmodz *= negf;
hourgam[3] = negf;
}
else hourgam[3] = posf;
SumOverNodesShfl(hourmodx);
SumOverNodesShfl(hourmody);
SumOverNodesShfl(hourmodz);
hourgam[3] -= volinv*(dvdxn*hourmodx + dvdyn*hourmody + dvdzn*hourmodz);
/*************************************************/
/* CalcStressForElems */
/*************************************************/
Real_t b[3];
/*************************************************/
//CalcElemShapeFunctionDerivatives_warp_per_4cell(xn, yn, zn, B, &det);
/*************************************************/
Real_t fjxxi, fjxet, fjxze;
Real_t fjyxi, fjyet, fjyze;
Real_t fjzxi, fjzet, fjzze;
fjxxi = fjxet = fjxze = Real_t(0.125)*xn;
fjyxi = fjyet = fjyze = Real_t(0.125)*yn;
fjzxi = fjzet = fjzze = Real_t(0.125)*zn;
if (node==0 || node==3 || node==7 || node==4)
{
fjxxi = -fjxxi;
fjyxi = -fjyxi;
fjzxi = -fjzxi;
}
if (node==0 || node==5 || node==1 || node==4)
{
fjxet = -fjxet;
fjyet = -fjyet;
fjzet = -fjzet;
}
if (node==0 || node==3 || node==1 || node==2)
{
fjxze = -fjxze;
fjyze = -fjyze;
fjzze = -fjzze;
}
SumOverNodesShfl(fjxxi);
SumOverNodesShfl(fjxet);
SumOverNodesShfl(fjxze);
SumOverNodesShfl(fjyxi);
SumOverNodesShfl(fjyet);
SumOverNodesShfl(fjyze);
SumOverNodesShfl(fjzxi);
SumOverNodesShfl(fjzet);
SumOverNodesShfl(fjzze);
/* compute cofactors */
Real_t cjxxi, cjxet, cjxze;
Real_t cjyxi, cjyet, cjyze;
Real_t cjzxi, cjzet, cjzze;
cjxxi = (fjyet * fjzze) - (fjzet * fjyze);
cjxet = - (fjyxi * fjzze) + (fjzxi * fjyze);
cjxze = (fjyxi * fjzet) - (fjzxi * fjyet);
cjyxi = - (fjxet * fjzze) + (fjzet * fjxze);
cjyet = (fjxxi * fjzze) - (fjzxi * fjxze);
cjyze = - (fjxxi * fjzet) + (fjzxi * fjxet);
cjzxi = (fjxet * fjyze) - (fjyet * fjxze);
cjzet = - (fjxxi * fjyze) + (fjyxi * fjxze);
cjzze = (fjxxi * fjyet) - (fjyxi * fjxet);
Real_t coef_xi, coef_et, coef_ze;
if (node==0 || node==3 || node==4 || node==7)
coef_xi = Real_t(-1.);
else
coef_xi = Real_t(1.);
if (node==0 || node==1 || node==4 || node==5)
coef_et = Real_t(-1.);
else
coef_et = Real_t(1.);
if (node==0 || node==1 || node==2 || node==3)
coef_ze = Real_t(-1.);
else
coef_ze = Real_t(1.);
/* calculate partials :
this need only be done for l = 0,1,2,3 since , by symmetry ,
(6,7,4,5) = - (0,1,2,3) .
*/
b[0] = coef_xi * cjxxi + coef_et * cjxet + coef_ze * cjxze;
b[1] = coef_xi * cjyxi + coef_et * cjyet + coef_ze * cjyze;
b[2] = coef_xi * cjzxi + coef_et * cjzet + coef_ze * cjzze;
/* calculate jacobian determinant (volume) */
det = Real_t(8.) * ( fjxet * cjxet + fjyet * cjyet + fjzet * cjzet);
/*************************************************/
//CalcElemNodeNormals_warp_per_4cell( B[0] , B[1], B[2], xn, yn, zn);
/*************************************************/
b[0] = Real_t(0.0);
b[1] = Real_t(0.0);
b[2] = Real_t(0.0);
// Six faces, if no
SumElemFaceNormal_warp_per_4cell(&b[0], &b[1], &b[2],
xn, yn, zn, node, 0,1,2,3);
SumElemFaceNormal_warp_per_4cell(&b[0], &b[1], &b[2],
xn, yn, zn, node, 0,4,5,1);
SumElemFaceNormal_warp_per_4cell(&b[0], &b[1], &b[2],
xn, yn, zn, node, 1,5,6,2);
SumElemFaceNormal_warp_per_4cell(&b[0], &b[1], &b[2],
xn, yn, zn, node, 2,6,7,3);
SumElemFaceNormal_warp_per_4cell(&b[0], &b[1], &b[2],
xn, yn, zn, node, 3,7,4,0);
SumElemFaceNormal_warp_per_4cell(&b[0], &b[1], &b[2],
xn, yn, zn, node, 4,7,6,5);
// Check for bad volume
if (det < 0.) {
*bad_vol = elem;
}
hgfx = -( sigxx*b[0] );
hgfy = -( sigxx*b[1] );
hgfz = -( sigxx*b[2] );
if (hourg_gt_zero)
{
/*************************************************/
/* CalcFBHourglassForceForElems */
/*************************************************/
xdn = xd[node_id];
ydn = yd[node_id];
zdn = zd[node_id];
Real_t hgfx_temp=0;
#pragma unroll
for (int i=0;i<4;i++) {
Real_t h;
h=hourgam[i]*xdn;
SumOverNodesShfl(h);
hgfx_temp+=hourgam[i]*h;
}
hgfx_temp *= coefficient;
hgfx += hgfx_temp;
Real_t hgfy_temp=0;
#pragma unroll
for (int i=0;i<4;i++) {
Real_t h;
h=hourgam[i]*ydn;
SumOverNodesShfl(h);
hgfy_temp+=hourgam[i]*h;
}
hgfy_temp *= coefficient;
hgfy += hgfy_temp;
Real_t hgfz_temp=0;
#pragma unroll
for (int i=0;i<4;i++) {
Real_t h;
h=hourgam[i]*zdn;
SumOverNodesShfl(h);
hgfz_temp+=hourgam[i]*h;
}
hgfz_temp *= coefficient;
hgfz += hgfz_temp;
}
#ifdef DOUBLE_PRECISION
Index_t store_loc = elem+padded_numElem*node;
fx_elem[store_loc]=hgfx;
fy_elem[store_loc]=hgfy;
fz_elem[store_loc]=hgfz;
#else
atomicAdd(&fx_node[node_id],hgfx);
atomicAdd(&fy_node[node_id],hgfy);
atomicAdd(&fz_node[node_id],hgfz);
#endif
} // If elem < numElem
}
static inline
void CalcVolumeForceForElems(const Real_t hgcoef,Domain *domain)
{
Index_t numElem = domain->numElem ;
Index_t padded_numElem = domain->padded_numElem;
/*
#ifdef DOUBLE_PRECISION
Vector_d<Real_t>* fx_elem = Allocator< Vector_d<Real_t> >::allocate(padded_numElem*8);
Vector_d<Real_t>* fy_elem = Allocator< Vector_d<Real_t> >::allocate(padded_numElem*8);
Vector_d<Real_t>* fz_elem = Allocator< Vector_d<Real_t> >::allocate(padded_numElem*8);
#else
thrust::fill(domain->fx.begin(),domain->fx.end(),0.);
thrust::fill(domain->fy.begin(),domain->fy.end(),0.);
thrust::fill(domain->fz.begin(),domain->fz.end(),0.);
#endif
*/
int num_threads = numElem ;
const int block_size = 64;
int dimGrid = PAD_DIV(num_threads,block_size);
bool hourg_gt_zero = hgcoef > Real_t(0.0);
if (hourg_gt_zero)
{
hipLaunchKernelGGL(( CalcVolumeForceForElems_kernel<true>) , dim3(dimGrid),dim3(block_size), 0, 0,
domain->volo.raw(),
domain->v.raw(),
domain->p.raw(),
domain->q.raw(),
hgcoef, numElem, padded_numElem,
domain->nodelist.raw(),
domain->ss.raw(),
domain->elemMass.raw(),
domain->x.raw(), domain->y.raw(), domain->z.raw(), domain->xd.raw(), domain->yd.raw(), domain->zd.raw(),
#ifdef DOUBLE_PRECISION
domain->fx_elem->raw(),
domain->fy_elem->raw(),
domain->fz_elem->raw(),
#else
domain->fx.raw(),
domain->fy.raw(),
domain->fz.raw(),
#endif
domain->bad_vol_h,
num_threads
);
}
else
{
hipLaunchKernelGGL(( CalcVolumeForceForElems_kernel<false>) , dim3(dimGrid),dim3(block_size), 0, 0,
domain->volo.raw(),
domain->v.raw(),
domain->p.raw(),
domain->q.raw(),
hgcoef, numElem, padded_numElem,
domain->nodelist.raw(),
domain->ss.raw(),
domain->elemMass.raw(),
domain->x.raw(), domain->y.raw(), domain->z.raw(), domain->xd.raw(), domain->yd.raw(), domain->zd.raw(),
#ifdef DOUBLE_PRECISION
domain->fx_elem->raw(),
domain->fy_elem->raw(),
domain->fz_elem->raw(),
#else
domain->fx.raw(),
domain->fy.raw(),
domain->fz.raw(),
#endif
domain->bad_vol_h,
num_threads
);
}
#ifdef DOUBLE_PRECISION
num_threads = domain->numNode;
// Launch boundary nodes first
dimGrid= PAD_DIV(num_threads,block_size);
hipLaunchKernelGGL(( AddNodeForcesFromElems_kernel), dim3(dimGrid),dim3(block_size), 0, 0,
domain->numNode,
domain->padded_numNode,
domain->nodeElemCount.raw(),
domain->nodeElemStart.raw(),
domain->nodeElemCornerList.raw(),
domain->fx_elem->raw(),
domain->fy_elem->raw(),
domain->fz_elem->raw(),
domain->fx.raw(),
domain->fy.raw(),
domain->fz.raw(),
num_threads
);
// hipDeviceSynchronize();
// cudaCheckError();
// Allocator<Vector_d<Real_t> >::free(fx_elem,padded_numElem*8);
// Allocator<Vector_d<Real_t> >::free(fy_elem,padded_numElem*8);
// Allocator<Vector_d<Real_t> >::free(fz_elem,padded_numElem*8);
#endif // ifdef DOUBLE_PRECISION
return ;
}
/*
static inline
void CalcVolumeForceForElems_warp_per_4cell(const Real_t hgcoef,Domain *domain)
{
// We're gonna map one warp per 4 cells, i.e. one thread per vertex
Index_t numElem = domain->numElem ;
Index_t padded_numElem = domain->padded_numElem;
#ifdef DOUBLE_PRECISION
Vector_d<Real_t>* fx_elem = Allocator< Vector_d<Real_t> >::allocate(padded_numElem*8);
Vector_d<Real_t>* fy_elem = Allocator< Vector_d<Real_t> >::allocate(padded_numElem*8);
Vector_d<Real_t>* fz_elem = Allocator< Vector_d<Real_t> >::allocate(padded_numElem*8);
#else
thrust::fill(domain->fx.begin(),domain->fx.end(),0.);
thrust::fill(domain->fy.begin(),domain->fy.end(),0.);
thrust::fill(domain->fz.begin(),domain->fz.end(),0.);
#endif
const int warps_per_cta = 2;
const int cta_size = warps_per_cta * 32;
int num_threads = numElem*8;
int dimGrid = PAD_DIV(num_threads,cta_size);
bool hourg_gt_zero = hgcoef > Real_t(0.0);
if (hourg_gt_zero)
{
CalcVolumeForceForElems_kernel_warp_per_4cell<true, cta_size> <<<dimGrid,cta_size>>>
( domain->volo.raw(),
domain->v.raw(),
domain->p.raw(),
domain->q.raw(),
hgcoef, numElem, padded_numElem,
domain->nodelist.raw(),
domain->ss.raw(),
domain->elemMass.raw(),
domain->x.raw(),
domain->y.raw(),
domain->z.raw(),
domain->xd.raw(),
domain->yd.raw(),
domain->zd.raw(),
//domain->tex_x, domain->tex_y, domain->tex_z, domain->tex_xd, domain->tex_yd, domain->tex_zd,
#ifdef DOUBLE_PRECISION
fx_elem->raw(),
fy_elem->raw(),
fz_elem->raw() ,
#else
domain->fx.raw(),
domain->fy.raw(),
domain->fz.raw(),
#endif
domain->bad_vol_h,
num_threads
);
}
else
{
CalcVolumeForceForElems_kernel_warp_per_4cell<false, cta_size> <<<dimGrid,cta_size>>>
( domain->volo.raw(),
domain->v.raw(),
domain->p.raw(),
domain->q.raw(),
hgcoef, numElem, padded_numElem,
domain->nodelist.raw(),
domain->ss.raw(),
domain->elemMass.raw(),
domain->x.raw(),
domain->y.raw(),
domain->z.raw(),
domain->xd.raw(),
domain->yd.raw(),
domain->zd.raw(),
#ifdef DOUBLE_PRECISION
fx_elem->raw(),
fy_elem->raw(),
fz_elem->raw() ,
#else
domain->fx.raw(),
domain->fy.raw(),
domain->fz.raw(),
#endif
domain->bad_vol_h,
num_threads
);
}
#ifdef DOUBLE_PRECISION
num_threads = domain->numNode;
// Launch boundary nodes first
dimGrid= PAD_DIV(num_threads,cta_size);
AddNodeForcesFromElems_kernel<<<dimGrid,cta_size>>>
( domain->numNode,
domain->padded_numNode,
domain->nodeElemCount.raw(),
domain->nodeElemStart.raw(),
domain->nodeElemCornerList.raw(),
fx_elem->raw(),
fy_elem->raw(),
fz_elem->raw(),
domain->fx.raw(),
domain->fy.raw(),
domain->fz.raw(),
num_threads
);
//hipDeviceSynchronize();
//cudaCheckError();
Allocator<Vector_d<Real_t> >::free(fx_elem,padded_numElem*8);
Allocator<Vector_d<Real_t> >::free(fy_elem,padded_numElem*8);
Allocator<Vector_d<Real_t> >::free(fz_elem,padded_numElem*8);
#endif // ifdef DOUBLE_PRECISION
return ;
}
*/
static inline
void CalcVolumeForceForElems(Domain* domain)
{
const Real_t hgcoef = domain->hgcoef ;
CalcVolumeForceForElems(hgcoef,domain);
//CalcVolumeForceForElems_warp_per_4cell(hgcoef,domain);
}
static inline void checkErrors(Domain* domain,int its,int myRank)
{
if (*(domain->bad_vol_h) != -1)
{
printf("Rank %i: Volume Error in cell %d at iteration %d\n",myRank,*(domain->bad_vol_h),its);
exit(VolumeError);
}
if (*(domain->bad_q_h) != -1)
{
printf("Rank %i: Q Error in cell %d at iteration %d\n",myRank,*(domain->bad_q_h),its);
exit(QStopError);
}
}
static inline void CalcForceForNodes(Domain *domain)
{
#if USE_MPI
CommRecv(*domain, MSG_COMM_SBN, 3,
domain->sizeX + 1, domain->sizeY + 1, domain->sizeZ + 1,
true, false, 0) ;
#endif
CalcVolumeForceForElems(domain);
// moved here from the main loop to allow async execution with GPU work
// TimeIncrement(domain);
#if USE_MPI
// initialize pointers
/*
domain->d_fx = domain->fx.raw();
domain->d_fy = domain->fy.raw();
domain->d_fz = domain->fz.raw();
*/
Domain_member fieldData[3] ;
fieldData[0] = &Domain::get_fx ;
fieldData[1] = &Domain::get_fy ;
fieldData[2] = &Domain::get_fz ;
CommSendGpu(*domain, MSG_COMM_SBN, 3, fieldData,
domain->sizeX + 1, domain->sizeY + 1, domain->sizeZ + 1,
true, false, domain->streams[2], 0) ;
CommSBNGpu(*domain, 3, fieldData, &domain->streams[2], 0) ;
#endif
}
__global__
void CalcAccelerationForNodes_kernel(int numNode,
Real_t *xdd, Real_t *ydd, Real_t *zdd,
Real_t *fx, Real_t *fy, Real_t *fz,
Real_t *nodalMass)
{
int tid=blockDim.x*blockIdx.x+threadIdx.x;
if (tid < numNode)
{
Real_t one_over_nMass = Real_t(1.)/nodalMass[tid];
xdd[tid]=fx[tid]*one_over_nMass;
ydd[tid]=fy[tid]*one_over_nMass;
zdd[tid]=fz[tid]*one_over_nMass;
}
}
static inline
void CalcAccelerationForNodes(Domain *domain)
{
Index_t dimBlock = 128;
Index_t dimGrid = PAD_DIV(domain->numNode,dimBlock);
hipLaunchKernelGGL(( CalcAccelerationForNodes_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0,
domain->numNode,
domain->xdd.raw(),domain->ydd.raw(),domain->zdd.raw(),
domain->fx.raw(),domain->fy.raw(),domain->fz.raw(),
domain->nodalMass.raw());
//hipDeviceSynchronize();
//cudaCheckError();
}
__global__
void ApplyAccelerationBoundaryConditionsForNodes_kernel(
int numNodeBC, Real_t *xyzdd,
Index_t *symm)
{
int i=blockDim.x*blockIdx.x+threadIdx.x;
if (i < numNodeBC)
{
xyzdd[symm[i]] = Real_t(0.0) ;
}
}
static inline
void ApplyAccelerationBoundaryConditionsForNodes(Domain *domain)
{
Index_t dimBlock = 128;
Index_t dimGrid = PAD_DIV(domain->numSymmX,dimBlock);
if (domain->numSymmX > 0)
hipLaunchKernelGGL(( ApplyAccelerationBoundaryConditionsForNodes_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0,
domain->numSymmX,
domain->xdd.raw(),
domain->symmX.raw());
dimGrid = PAD_DIV(domain->numSymmY,dimBlock);
if (domain->numSymmY > 0)
hipLaunchKernelGGL(( ApplyAccelerationBoundaryConditionsForNodes_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0,
domain->numSymmY,
domain->ydd.raw(),
domain->symmY.raw());
dimGrid = PAD_DIV(domain->numSymmZ,dimBlock);
if (domain->numSymmZ > 0)
hipLaunchKernelGGL(( ApplyAccelerationBoundaryConditionsForNodes_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0,
domain->numSymmZ,
domain->zdd.raw(),
domain->symmZ.raw());
}
__global__
void CalcPositionAndVelocityForNodes_kernel(int numNode,
const Real_t * deltatime,
const Real_t u_cut,
Real_t* __restrict__ x, Real_t* __restrict__ y, Real_t* __restrict__ z,
Real_t* __restrict__ xd, Real_t* __restrict__ yd, Real_t* __restrict__ zd,
const Real_t* __restrict__ xdd, const Real_t* __restrict__ ydd, const Real_t* __restrict__ zdd)
{
int i=blockDim.x*blockIdx.x+threadIdx.x;
if (i < numNode)
{
Real_t xdtmp, ydtmp, zdtmp, dt;
dt = deltatime[0];
xdtmp = xd[i] + xdd[i] * dt ;
ydtmp = yd[i] + ydd[i] * dt ;
zdtmp = zd[i] + zdd[i] * dt ;
if( FABS(xdtmp) < u_cut ) xdtmp = 0.0;
if( FABS(ydtmp) < u_cut ) ydtmp = 0.0;
if( FABS(zdtmp) < u_cut ) zdtmp = 0.0;
x[i] += xdtmp * dt;
y[i] += ydtmp * dt;
z[i] += zdtmp * dt;
xd[i] = xdtmp;
yd[i] = ydtmp;
zd[i] = zdtmp;
}
}
static inline
void CalcPositionAndVelocityForNodes(const Real_t u_cut, Domain* domain)
{
Index_t dimBlock = 128;
Index_t dimGrid = PAD_DIV(domain->numNode,dimBlock);
// hipEventSynchronize(domain->time_constraint_reduced[currIter]);
hipStreamWaitEvent(NULL, domain->time_constraint_reduced[currIter], 0);
hipLaunchKernelGGL(( CalcPositionAndVelocityForNodes_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0,
domain->numNode, domain->deltatime_d_async,u_cut,
domain->x.raw(),domain->y.raw(),domain->z.raw(),
domain->xd.raw(),domain->yd.raw(),domain->zd.raw(),
domain->xdd.raw(),domain->ydd.raw(),domain->zdd.raw());
//hipDeviceSynchronize();
//cudaCheckError();
}
static inline
void LagrangeNodal(Domain *domain)
{
#ifdef SEDOV_SYNC_POS_VEL_EARLY
Domain_member fieldData[6] ;
#endif
Real_t u_cut = domain->u_cut ;
/* time of boundary condition evaluation is beginning of step for force and
* acceleration boundary conditions. */
CalcForceForNodes(domain);
#if USE_MPI
#ifdef SEDOV_SYNC_POS_VEL_EARLY
//printf("Rank %d CommRecv di LagrangeNodal\n",globalRank);
CommRecv(*domain, MSG_SYNC_POS_VEL, 6,
domain->sizeX + 1, domain->sizeY + 1, domain->sizeZ + 1,
false, false, 0) ;
//printf("Rank %d Fuori CommRecv di LagrangeNodal\n", globalRank);
#endif
#endif
CalcAccelerationForNodes(domain);
ApplyAccelerationBoundaryConditionsForNodes(domain);
CalcPositionAndVelocityForNodes(u_cut, domain);
#if USE_MPI
#ifdef SEDOV_SYNC_POS_VEL_EARLY
// initialize pointers
domain->d_x = domain->x.raw();
domain->d_y = domain->y.raw();
domain->d_z = domain->z.raw();
domain->d_xd = domain->xd.raw();
domain->d_yd = domain->yd.raw();
domain->d_zd = domain->zd.raw();
fieldData[0] = &Domain::get_x ;
fieldData[1] = &Domain::get_y ;
fieldData[2] = &Domain::get_z ;
fieldData[3] = &Domain::get_xd ;
fieldData[4] = &Domain::get_yd ;
fieldData[5] = &Domain::get_zd ;
CommSendGpu(*domain, MSG_SYNC_POS_VEL, 6, fieldData,
domain->sizeX + 1, domain->sizeY + 1, domain->sizeZ + 1,
false, false, domain->streams[2], 0) ;
CommSyncPosVelGpu(*domain, &domain->streams[2], 0) ;
#endif
#endif
return;
}
__device__
static inline
Real_t AreaFace( const Real_t x0, const Real_t x1,
const Real_t x2, const Real_t x3,
const Real_t y0, const Real_t y1,
const Real_t y2, const Real_t y3,
const Real_t z0, const Real_t z1,
const Real_t z2, const Real_t z3)
{
Real_t fx = (x2 - x0) - (x3 - x1);
Real_t fy = (y2 - y0) - (y3 - y1);
Real_t fz = (z2 - z0) - (z3 - z1);
Real_t gx = (x2 - x0) + (x3 - x1);
Real_t gy = (y2 - y0) + (y3 - y1);
Real_t gz = (z2 - z0) + (z3 - z1);
Real_t temp = (fx * gx + fy * gy + fz * gz);
Real_t area =
(fx * fx + fy * fy + fz * fz) *
(gx * gx + gy * gy + gz * gz) -
temp * temp;
return area ;
}
__device__
static inline
Real_t CalcElemCharacteristicLength( const Real_t x[8],
const Real_t y[8],
const Real_t z[8],
const Real_t volume)
{
Real_t a, charLength = Real_t(0.0);
a = AreaFace(x[0],x[1],x[2],x[3],
y[0],y[1],y[2],y[3],
z[0],z[1],z[2],z[3]) ; // 38
charLength = FMAX(a,charLength) ;
a = AreaFace(x[4],x[5],x[6],x[7],
y[4],y[5],y[6],y[7],
z[4],z[5],z[6],z[7]) ;
charLength = FMAX(a,charLength) ;
a = AreaFace(x[0],x[1],x[5],x[4],
y[0],y[1],y[5],y[4],
z[0],z[1],z[5],z[4]) ;
charLength = FMAX(a,charLength) ;
a = AreaFace(x[1],x[2],x[6],x[5],
y[1],y[2],y[6],y[5],
z[1],z[2],z[6],z[5]) ;
charLength = FMAX(a,charLength) ;
a = AreaFace(x[2],x[3],x[7],x[6],
y[2],y[3],y[7],y[6],
z[2],z[3],z[7],z[6]) ;
charLength = FMAX(a,charLength) ;
a = AreaFace(x[3],x[0],x[4],x[7],
y[3],y[0],y[4],y[7],
z[3],z[0],z[4],z[7]) ;
charLength = FMAX(a,charLength) ;
charLength = Real_t(4.0) * volume / SQRT(charLength);
return charLength;
}
__device__
static
__forceinline__
void CalcElemVelocityGradient( const Real_t* const xvel,
const Real_t* const yvel,
const Real_t* const zvel,
const Real_t b[][8],
const Real_t detJ,
Real_t* const d )
{
const Real_t inv_detJ = Real_t(1.0) / detJ ;
Real_t dyddx, dxddy, dzddx, dxddz, dzddy, dyddz;
const Real_t* const pfx = b[0];
const Real_t* const pfy = b[1];
const Real_t* const pfz = b[2];
Real_t tmp1 = (xvel[0]-xvel[6]);
Real_t tmp2 = (xvel[1]-xvel[7]);
Real_t tmp3 = (xvel[2]-xvel[4]);
Real_t tmp4 = (xvel[3]-xvel[5]);
d[0] = inv_detJ * ( pfx[0] * tmp1
+ pfx[1] * tmp2
+ pfx[2] * tmp3
+ pfx[3] * tmp4);
dxddy = inv_detJ * ( pfy[0] * tmp1
+ pfy[1] * tmp2
+ pfy[2] * tmp3
+ pfy[3] * tmp4);
dxddz = inv_detJ * ( pfz[0] * tmp1
+ pfz[1] * tmp2
+ pfz[2] * tmp3
+ pfz[3] * tmp4);
tmp1 = (yvel[0]-yvel[6]);
tmp2 = (yvel[1]-yvel[7]);
tmp3 = (yvel[2]-yvel[4]);
tmp4 = (yvel[3]-yvel[5]);
d[1] = inv_detJ * ( pfy[0] * tmp1
+ pfy[1] * tmp2
+ pfy[2] * tmp3
+ pfy[3] * tmp4);
dyddx = inv_detJ * ( pfx[0] * tmp1
+ pfx[1] * tmp2
+ pfx[2] * tmp3
+ pfx[3] * tmp4);
dyddz = inv_detJ * ( pfz[0] * tmp1
+ pfz[1] * tmp2
+ pfz[2] * tmp3
+ pfz[3] * tmp4);
tmp1 = (zvel[0]-zvel[6]);
tmp2 = (zvel[1]-zvel[7]);
tmp3 = (zvel[2]-zvel[4]);
tmp4 = (zvel[3]-zvel[5]);
d[2] = inv_detJ * ( pfz[0] * tmp1
+ pfz[1] * tmp2
+ pfz[2] * tmp3
+ pfz[3] * tmp4);
dzddx = inv_detJ * ( pfx[0] * tmp1
+ pfx[1] * tmp2
+ pfx[2] * tmp3
+ pfx[3] * tmp4);
dzddy = inv_detJ * ( pfy[0] * tmp1
+ pfy[1] * tmp2
+ pfy[2] * tmp3
+ pfy[3] * tmp4);
d[5] = Real_t( .5) * ( dxddy + dyddx );
d[4] = Real_t( .5) * ( dxddz + dzddx );
d[3] = Real_t( .5) * ( dzddy + dyddz );
}
static __device__ __forceinline__
void CalcMonoGradient(Real_t *x, Real_t *y, Real_t *z,
Real_t *xv, Real_t *yv, Real_t *zv,
Real_t vol,
Real_t *delx_zeta,
Real_t *delv_zeta,
Real_t *delx_xi,
Real_t *delv_xi,
Real_t *delx_eta,
Real_t *delv_eta)
{
#define SUM4(a,b,c,d) (a + b + c + d)
const Real_t ptiny = Real_t(1.e-36) ;
Real_t ax,ay,az ;
Real_t dxv,dyv,dzv ;
Real_t norm = Real_t(1.0) / ( vol + ptiny ) ;
Real_t dxj = Real_t(-0.25)*(SUM4(x[0],x[1],x[5],x[4]) - SUM4(x[3],x[2],x[6],x[7])) ;
Real_t dyj = Real_t(-0.25)*(SUM4(y[0],y[1],y[5],y[4]) - SUM4(y[3],y[2],y[6],y[7])) ;
Real_t dzj = Real_t(-0.25)*(SUM4(z[0],z[1],z[5],z[4]) - SUM4(z[3],z[2],z[6],z[7])) ;
Real_t dxi = Real_t( 0.25)*(SUM4(x[1],x[2],x[6],x[5]) - SUM4(x[0],x[3],x[7],x[4])) ;
Real_t dyi = Real_t( 0.25)*(SUM4(y[1],y[2],y[6],y[5]) - SUM4(y[0],y[3],y[7],y[4])) ;
Real_t dzi = Real_t( 0.25)*(SUM4(z[1],z[2],z[6],z[5]) - SUM4(z[0],z[3],z[7],z[4])) ;
Real_t dxk = Real_t( 0.25)*(SUM4(x[4],x[5],x[6],x[7]) - SUM4(x[0],x[1],x[2],x[3])) ;
Real_t dyk = Real_t( 0.25)*(SUM4(y[4],y[5],y[6],y[7]) - SUM4(y[0],y[1],y[2],y[3])) ;
Real_t dzk = Real_t( 0.25)*(SUM4(z[4],z[5],z[6],z[7]) - SUM4(z[0],z[1],z[2],z[3])) ;
/* find delvk and delxk ( i cross j ) */
ax = dyi*dzj - dzi*dyj ;
ay = dzi*dxj - dxi*dzj ;
az = dxi*dyj - dyi*dxj ;
*delx_zeta = vol / SQRT(ax*ax + ay*ay + az*az + ptiny) ;
ax *= norm ;
ay *= norm ;
az *= norm ;
dxv = Real_t(0.25)*(SUM4(xv[4],xv[5],xv[6],xv[7]) - SUM4(xv[0],xv[1],xv[2],xv[3])) ;
dyv = Real_t(0.25)*(SUM4(yv[4],yv[5],yv[6],yv[7]) - SUM4(yv[0],yv[1],yv[2],yv[3])) ;
dzv = Real_t(0.25)*(SUM4(zv[4],zv[5],zv[6],zv[7]) - SUM4(zv[0],zv[1],zv[2],zv[3])) ;
*delv_zeta = ax*dxv + ay*dyv + az*dzv ;
/* find delxi and delvi ( j cross k ) */
ax = dyj*dzk - dzj*dyk ;
ay = dzj*dxk - dxj*dzk ;
az = dxj*dyk - dyj*dxk ;
*delx_xi = vol / SQRT(ax*ax + ay*ay + az*az + ptiny) ;
ax *= norm ;
ay *= norm ;
az *= norm ;
dxv = Real_t(0.25)*(SUM4(xv[1],xv[2],xv[6],xv[5]) - SUM4(xv[0],xv[3],xv[7],xv[4])) ;
dyv = Real_t(0.25)*(SUM4(yv[1],yv[2],yv[6],yv[5]) - SUM4(yv[0],yv[3],yv[7],yv[4])) ;
dzv = Real_t(0.25)*(SUM4(zv[1],zv[2],zv[6],zv[5]) - SUM4(zv[0],zv[3],zv[7],zv[4])) ;
*delv_xi = ax*dxv + ay*dyv + az*dzv ;
/* find delxj and delvj ( k cross i ) */
ax = dyk*dzi - dzk*dyi ;
ay = dzk*dxi - dxk*dzi ;
az = dxk*dyi - dyk*dxi ;
*delx_eta = vol / SQRT(ax*ax + ay*ay + az*az + ptiny) ;
ax *= norm ;
ay *= norm ;
az *= norm ;
dxv = Real_t(-0.25)*(SUM4(xv[0],xv[1],xv[5],xv[4]) - SUM4(xv[3],xv[2],xv[6],xv[7])) ;
dyv = Real_t(-0.25)*(SUM4(yv[0],yv[1],yv[5],yv[4]) - SUM4(yv[3],yv[2],yv[6],yv[7])) ;
dzv = Real_t(-0.25)*(SUM4(zv[0],zv[1],zv[5],zv[4]) - SUM4(zv[3],zv[2],zv[6],zv[7])) ;
*delv_eta = ax*dxv + ay*dyv + az*dzv ;
#undef SUM4
}
__global__
#ifdef DOUBLE_PRECISION
__launch_bounds__(64,8) // 64-bit
#else
__launch_bounds__(64,16) // 32-bit
#endif
void CalcKinematicsAndMonotonicQGradient_kernel(
Index_t numElem, Index_t padded_numElem, const Real_t * dt,
const Index_t* __restrict__ nodelist, const Real_t* __restrict__ volo, const Real_t* __restrict__ v,
const Real_t* __restrict__ x,
const Real_t* __restrict__ y,
const Real_t* __restrict__ z,
const Real_t* __restrict__ xd,
const Real_t* __restrict__ yd,
const Real_t* __restrict__ zd,
Real_t* __restrict__ vnew,
Real_t* __restrict__ delv,
Real_t* __restrict__ arealg,
Real_t* __restrict__ dxx,
Real_t* __restrict__ dyy,
Real_t* __restrict__ dzz,
Real_t* __restrict__ vdov,
Real_t* __restrict__ delx_zeta,
Real_t* __restrict__ delv_zeta,
Real_t* __restrict__ delx_xi,
Real_t* __restrict__ delv_xi,
Real_t* __restrict__ delx_eta,
Real_t* __restrict__ delv_eta,
Index_t* __restrict__ bad_vol,
const Index_t num_threads
)
{
Real_t B[3][8] ; /** shape function derivatives */
Index_t nodes[8] ;
Real_t x_local[8] ;
Real_t y_local[8] ;
Real_t z_local[8] ;
Real_t xd_local[8] ;
Real_t yd_local[8] ;
Real_t zd_local[8] ;
Real_t D[6];
int k=blockDim.x*blockIdx.x+threadIdx.x;
if ( k < num_threads) {
Real_t volume ;
Real_t relativeVolume ;
// get nodal coordinates from global arrays and copy into local arrays.
//#pragma unroll
//for( Index_t lnode=0 ; lnode<8 ; ++lnode )
//{
// Index_t gnode = nodelist[k+lnode*padded_numElem];
// nodes[lnode] = gnode;
// x_local[lnode] = x[gnode];
// y_local[lnode] = y[gnode];
// z_local[lnode] = z[gnode];
//}
#pragma unroll
for( Index_t lnode=0 ; lnode<8 ; ++lnode )
{
Index_t gnode = nodelist[k+lnode*padded_numElem];
nodes[lnode] = gnode;
}
#pragma unroll
for( Index_t lnode=0 ; lnode<8 ; ++lnode )
x_local[lnode] = x[nodes[lnode]];
#pragma unroll
for( Index_t lnode=0 ; lnode<8 ; ++lnode )
y_local[lnode] = y[nodes[lnode]];
#pragma unroll
for( Index_t lnode=0 ; lnode<8 ; ++lnode )
z_local[lnode] = z[nodes[lnode]];
// volume calculations
volume = CalcElemVolume(x_local, y_local, z_local );
relativeVolume = volume / volo[k] ;
vnew[k] = relativeVolume ;
delv[k] = relativeVolume - v[k] ;
// set characteristic length
arealg[k] = CalcElemCharacteristicLength(x_local,y_local,z_local,volume);
// get nodal velocities from global array and copy into local arrays.
#pragma unroll
for( Index_t lnode=0 ; lnode<8 ; ++lnode )
{
Index_t gnode = nodes[lnode];
xd_local[lnode] = xd[gnode];
yd_local[lnode] = yd[gnode];
zd_local[lnode] = zd[gnode];
}
Real_t dt2 = Real_t(0.5) * dt[0];
#pragma unroll
for ( Index_t j=0 ; j<8 ; ++j )
{
x_local[j] -= dt2 * xd_local[j];
y_local[j] -= dt2 * yd_local[j];
z_local[j] -= dt2 * zd_local[j];
}
Real_t detJ;
CalcElemShapeFunctionDerivatives(x_local,y_local,z_local,B,&detJ );
CalcElemVelocityGradient(xd_local,yd_local,zd_local,B,detJ,D);
// ------------------------
// CALC LAGRANGE ELEM 2
// ------------------------
// calc strain rate and apply as constraint (only done in FB element)
Real_t vdovNew = D[0] + D[1] + D[2];
Real_t vdovthird = vdovNew/Real_t(3.0) ;
// make the rate of deformation tensor deviatoric
vdov[k] = vdovNew ;
dxx[k] = D[0] - vdovthird ;
dyy[k] = D[1] - vdovthird ;
dzz[k] = D[2] - vdovthird ;
// ------------------------
// CALC MONOTONIC Q GRADIENT
// ------------------------
Real_t vol = volo[k]*vnew[k];
// Undo x_local update
#pragma unroll
for ( Index_t j=0 ; j<8 ; ++j ) {
x_local[j] += dt2 * xd_local[j];
y_local[j] += dt2 * yd_local[j];
z_local[j] += dt2 * zd_local[j];
}
CalcMonoGradient(x_local,y_local,z_local,xd_local,yd_local,zd_local,
vol,
&delx_zeta[k],&delv_zeta[k],&delx_xi[k],
&delv_xi[k], &delx_eta[k], &delv_eta[k]);
//Check for bad volume
if (relativeVolume < 0)
*bad_vol = k;
}
}
static inline
void CalcKinematicsAndMonotonicQGradient(Domain *domain)
{
Index_t numElem = domain->numElem ;
Index_t padded_numElem = domain->padded_numElem;
int num_threads = numElem;
const int block_size = 64;
int dimGrid = PAD_DIV(num_threads,block_size);
//elenago
#if 0
int rank;
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
printf("QGradient ---> Rank %d, dimGrid: %d numElem: %d, padded_numElem: %d domain->deltatime_h: %f bad_vol_h=%d\n",
rank, dimGrid, numElem, padded_numElem, domain->deltatime_h, *domain->bad_vol_h);
#endif
hipLaunchKernelGGL((
CalcKinematicsAndMonotonicQGradient_kernel), dim3(dimGrid),dim3(block_size), 0, 0,
numElem,padded_numElem, domain->deltatime_d_async,
domain->nodelist.raw(),
domain->volo.raw(),
domain->v.raw(),
domain->x.raw(), domain->y.raw(), domain->z.raw(), domain->xd.raw(), domain->yd.raw(), domain->zd.raw(),
domain->vnew->raw(),
domain->delv.raw(),
domain->arealg.raw(),
domain->dxx->raw(),
domain->dyy->raw(),
domain->dzz->raw(),
domain->vdov.raw(),
domain->delx_zeta->raw(),
domain->delv_zeta->raw(),
domain->delx_xi->raw(),
domain->delv_xi->raw(),
domain->delx_eta->raw(),
domain->delv_eta->raw(),
domain->bad_vol_h,
num_threads
);
//hipDeviceSynchronize();
//cudaCheckError();
}
#if 0
__global__
#ifdef DOUBLE_PRECISION
__launch_bounds__(64,8) // 64-bit
#else
__launch_bounds__(64,16) // 32-bit
#endif
void testVolume(
Index_t numElem, Index_t padded_numElem, const Real_t dt,
const Index_t* __restrict__ nodelist, const Real_t* __restrict__ volo, const Real_t* __restrict__ v,
const Real_t* __restrict__ x,
const Real_t* __restrict__ y,
const Real_t* __restrict__ z,
const Real_t* __restrict__ xd,
const Real_t* __restrict__ yd,
const Real_t* __restrict__ zd,
Real_t* __restrict__ vnew,
Real_t* __restrict__ delv,
Real_t* __restrict__ arealg,
Real_t* __restrict__ dxx,
Real_t* __restrict__ dyy,
Real_t* __restrict__ dzz,
Real_t* __restrict__ vdov,
Real_t* __restrict__ delx_zeta,
Real_t* __restrict__ delv_zeta,
Real_t* __restrict__ delx_xi,
Real_t* __restrict__ delv_xi,
Real_t* __restrict__ delx_eta,
Real_t* __restrict__ delv_eta,
Index_t* __restrict__ bad_vol,
const Index_t num_threads
)
{
Real_t B[3][8] ; /** shape function derivatives */
Index_t nodes[8] ;
Real_t x_local[8] ;
Real_t y_local[8] ;
Real_t z_local[8] ;
Real_t xd_local[8] ;
Real_t yd_local[8] ;
Real_t zd_local[8] ;
Real_t D[6];
int k=blockDim.x*blockIdx.x+threadIdx.x;
if ( k < num_threads) {
Real_t volume ;
Real_t relativeVolume ;
#pragma unroll
for( Index_t lnode=0 ; lnode<8 ; ++lnode )
{
Index_t gnode = nodelist[k+lnode*padded_numElem];
nodes[lnode] = gnode;
}
#pragma unroll
for( Index_t lnode=0 ; lnode<8 ; ++lnode )
x_local[lnode] = x[nodes[lnode]];
#pragma unroll
for( Index_t lnode=0 ; lnode<8 ; ++lnode )
y_local[lnode] = y[nodes[lnode]];
#pragma unroll
for( Index_t lnode=0 ; lnode<8 ; ++lnode )
z_local[lnode] = z[nodes[lnode]];
// volume calculations
volume = CalcElemVolume(x_local, y_local, z_local );
relativeVolume = volume / volo[k] ;
//Check for bad volume
if (relativeVolume < 0)
*bad_vol = k;
}
}
static inline
void testVolume(Domain *domain)
{
Index_t numElem = domain->numElem ;
Index_t padded_numElem = domain->padded_numElem;
int num_threads = numElem;
const int block_size = 64;
int dimGrid = PAD_DIV(num_threads,block_size);
hipLaunchKernelGGL((
testVolume), dim3(dimGrid),dim3(block_size), 0, 0,
numElem,padded_numElem, domain->deltatime_h,
domain->nodelist.raw(),
domain->volo.raw(),
domain->v.raw(),
domain->x.raw(), domain->y.raw(), domain->z.raw(), domain->xd.raw(), domain->yd.raw(), domain->zd.raw(),
domain->vnew->raw(),
domain->delv.raw(),
domain->arealg.raw(),
domain->dxx->raw(),
domain->dyy->raw(),
domain->dzz->raw(),
domain->vdov.raw(),
domain->delx_zeta->raw(),
domain->delv_zeta->raw(),
domain->delx_xi->raw(),
domain->delv_xi->raw(),
domain->delx_eta->raw(),
domain->delv_eta->raw(),
domain->bad_vol_h,
num_threads
);
//hipDeviceSynchronize();
//cudaCheckError();
}
#endif
__global__
#ifdef DOUBLE_PRECISION
__launch_bounds__(128,16)
#else
__launch_bounds__(128,16)
#endif
void CalcMonotonicQRegionForElems_kernel(
Real_t qlc_monoq,
Real_t qqc_monoq,
Real_t monoq_limiter_mult,
Real_t monoq_max_slope,
Real_t ptiny,
// the elementset length
Index_t elength,
Index_t* regElemlist,
// const Index_t* __restrict__ regElemlist,
Index_t *elemBC,
Index_t *lxim,
Index_t *lxip,
Index_t *letam,
Index_t *letap,
Index_t *lzetam,
Index_t *lzetap,
Real_t *delv_xi,
Real_t *delv_eta,
Real_t *delv_zeta,
Real_t *delx_xi,
Real_t *delx_eta,
Real_t *delx_zeta,
Real_t *vdov,Real_t *elemMass,Real_t *volo,Real_t *vnew,
Real_t *qq, Real_t *ql,
Real_t *q,
Real_t qstop,
Index_t* bad_q
)
{
int ielem=blockDim.x*blockIdx.x + threadIdx.x;
if (ielem<elength) {
Real_t qlin, qquad ;
Real_t phixi, phieta, phizeta ;
Index_t i = regElemlist[ielem];
Int_t bcMask = elemBC[i] ;
Real_t delvm, delvp ;
/* phixi */
Real_t norm = Real_t(1.) / ( delv_xi[i] + ptiny ) ;
switch (bcMask & XI_M) {
case XI_M_COMM: /* needs comm data */
case 0: delvm = delv_xi[lxim[i]] ; break ;
case XI_M_SYMM: delvm = delv_xi[i] ; break ;
case XI_M_FREE: delvm = Real_t(0.0) ; break ;
default: /* ERROR */ ; break ;
}
switch (bcMask & XI_P) {
case XI_P_COMM: /* needs comm data */
case 0: delvp = delv_xi[lxip[i]] ; break ;
case XI_P_SYMM: delvp = delv_xi[i] ; break ;
case XI_P_FREE: delvp = Real_t(0.0) ; break ;
default: /* ERROR */ ; break ;
}
delvm = delvm * norm ;
delvp = delvp * norm ;
phixi = Real_t(.5) * ( delvm + delvp ) ;
delvm *= monoq_limiter_mult ;
delvp *= monoq_limiter_mult ;
if ( delvm < phixi ) phixi = delvm ;
if ( delvp < phixi ) phixi = delvp ;
if ( phixi < Real_t(0.)) phixi = Real_t(0.) ;
if ( phixi > monoq_max_slope) phixi = monoq_max_slope;
/* phieta */
norm = Real_t(1.) / ( delv_eta[i] + ptiny ) ;
switch (bcMask & ETA_M) {
case ETA_M_COMM: /* needs comm data */
case 0: delvm = delv_eta[letam[i]] ; break ;
case ETA_M_SYMM: delvm = delv_eta[i] ; break ;
case ETA_M_FREE: delvm = Real_t(0.0) ; break ;
default: /* ERROR */ ; break ;
}
switch (bcMask & ETA_P) {
case ETA_P_COMM: /* needs comm data */
case 0: delvp = delv_eta[letap[i]] ; break ;
case ETA_P_SYMM: delvp = delv_eta[i] ; break ;
case ETA_P_FREE: delvp = Real_t(0.0) ; break ;
default: /* ERROR */ ; break ;
}
delvm = delvm * norm ;
delvp = delvp * norm ;
phieta = Real_t(.5) * ( delvm + delvp ) ;
delvm *= monoq_limiter_mult ;
delvp *= monoq_limiter_mult ;
if ( delvm < phieta ) phieta = delvm ;
if ( delvp < phieta ) phieta = delvp ;
if ( phieta < Real_t(0.)) phieta = Real_t(0.) ;
if ( phieta > monoq_max_slope) phieta = monoq_max_slope;
/* phizeta */
norm = Real_t(1.) / ( delv_zeta[i] + ptiny ) ;
switch (bcMask & ZETA_M) {
case ZETA_M_COMM: /* needs comm data */
case 0: delvm = delv_zeta[lzetam[i]] ; break ;
case ZETA_M_SYMM: delvm = delv_zeta[i] ; break ;
case ZETA_M_FREE: delvm = Real_t(0.0) ; break ;
default: /* ERROR */ ; break ;
}
switch (bcMask & ZETA_P) {
case ZETA_P_COMM: /* needs comm data */
case 0: delvp = delv_zeta[lzetap[i]] ; break ;
case ZETA_P_SYMM: delvp = delv_zeta[i] ; break ;
case ZETA_P_FREE: delvp = Real_t(0.0) ; break ;
default: /* ERROR */ ; break ;
}
delvm = delvm * norm ;
delvp = delvp * norm ;
phizeta = Real_t(.5) * ( delvm + delvp ) ;
delvm *= monoq_limiter_mult ;
delvp *= monoq_limiter_mult ;
if ( delvm < phizeta ) phizeta = delvm ;
if ( delvp < phizeta ) phizeta = delvp ;
if ( phizeta < Real_t(0.)) phizeta = Real_t(0.);
if ( phizeta > monoq_max_slope ) phizeta = monoq_max_slope;
/* Remove length scale */
if ( vdov[i] > Real_t(0.) ) {
qlin = Real_t(0.) ;
qquad = Real_t(0.) ;
}
else {
Real_t delvxxi = delv_xi[i] * delx_xi[i] ;
Real_t delvxeta = delv_eta[i] * delx_eta[i] ;
Real_t delvxzeta = delv_zeta[i] * delx_zeta[i] ;
if ( delvxxi > Real_t(0.) ) delvxxi = Real_t(0.) ;
if ( delvxeta > Real_t(0.) ) delvxeta = Real_t(0.) ;
if ( delvxzeta > Real_t(0.) ) delvxzeta = Real_t(0.) ;
Real_t rho = elemMass[i] / (volo[i] * vnew[i]) ;
qlin = -qlc_monoq * rho *
( delvxxi * (Real_t(1.) - phixi) +
delvxeta * (Real_t(1.) - phieta) +
delvxzeta * (Real_t(1.) - phizeta) ) ;
qquad = qqc_monoq * rho *
( delvxxi*delvxxi * (Real_t(1.) - phixi*phixi) +
delvxeta*delvxeta * (Real_t(1.) - phieta*phieta) +
delvxzeta*delvxzeta * (Real_t(1.) - phizeta*phizeta) ) ;
}
qq[i] = qquad ;
ql[i] = qlin ;
// Don't allow excessive artificial viscosity
if (q[i] > qstop)
*(bad_q) = i;
}
}
static inline
void CalcMonotonicQRegionForElems(Domain *domain)
{
const Real_t ptiny = Real_t(1.e-36) ;
Real_t monoq_max_slope = domain->monoq_max_slope ;
Real_t monoq_limiter_mult = domain->monoq_limiter_mult ;
Real_t qlc_monoq = domain->qlc_monoq;
Real_t qqc_monoq = domain->qqc_monoq;
Index_t elength = domain->numElem;
Index_t dimBlock= 128;
Index_t dimGrid = PAD_DIV(elength,dimBlock);
//elenago
#if 0
int rank;
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
printf("Regions ---> Rank %d, dimGrid: %d monoq_max_slope: %f, monoq_limiter_mult: %f qlc_monoq: %f qqc_monoq: %f elength: %d domain->qstop: %f bad_q_h=%d\n",
rank, dimGrid, monoq_max_slope, monoq_limiter_mult, qlc_monoq, qqc_monoq, elength, domain->qstop, *domain->bad_q_h);
#endif
hipLaunchKernelGGL(( CalcMonotonicQRegionForElems_kernel), dim3(dimGrid),dim3(dimBlock), 0, 0,
qlc_monoq,qqc_monoq,monoq_limiter_mult,monoq_max_slope,ptiny,elength,
domain->regElemlist.raw(),domain->elemBC.raw(),
domain->lxim.raw(),domain->lxip.raw(),
domain->letam.raw(),domain->letap.raw(),
domain->lzetam.raw(),domain->lzetap.raw(),
domain->delv_xi->raw(),domain->delv_eta->raw(),domain->delv_zeta->raw(),
domain->delx_xi->raw(),domain->delx_eta->raw(),domain->delx_zeta->raw(),
domain->vdov.raw(),domain->elemMass.raw(),domain->volo.raw(),domain->vnew->raw(),
domain->qq.raw(),domain->ql.raw(),
domain->q.raw(),
domain->qstop,
domain->bad_q_h
);
//hipDeviceSynchronize();
//cudaCheckError();
}
static
__device__ __forceinline__
void CalcPressureForElems_device(
Real_t& p_new, Real_t& bvc,
Real_t& pbvc, Real_t& e_old,
Real_t& compression, Real_t& vnewc,
Real_t pmin,
Real_t p_cut, Real_t eosvmax)
{
Real_t c1s = Real_t(2.0)/Real_t(3.0);
Real_t p_temp = p_new;
bvc = c1s * (compression + Real_t(1.));
pbvc = c1s;
p_temp = bvc * e_old ;
if ( FABS(p_temp) < p_cut )
p_temp = Real_t(0.0) ;
if ( vnewc >= eosvmax ) /* impossible condition here? */
p_temp = Real_t(0.0) ;
if (p_temp < pmin)
p_temp = pmin ;
p_new = p_temp;
}
static
__device__ __forceinline__
void CalcSoundSpeedForElems_device(Real_t& vnewc, Real_t rho0, Real_t &enewc,
Real_t &pnewc, Real_t &pbvc,
Real_t &bvc, Real_t ss4o3, Index_t nz,
Real_t *ss, Index_t iz)
{
Real_t ssTmp = (pbvc * enewc + vnewc * vnewc *
bvc * pnewc) / rho0;
if (ssTmp <= Real_t(.1111111e-36)) {
ssTmp = Real_t(.3333333e-18);
}
else {
ssTmp = SQRT(ssTmp) ;
}
ss[iz] = ssTmp;
}
static
__device__
__forceinline__
void ApplyMaterialPropertiesForElems_device(
Real_t& eosvmin, Real_t& eosvmax,
Real_t* vnew, Real_t *v,
Real_t& vnewc, Index_t* bad_vol, Index_t zn)
{
vnewc = vnew[zn] ;
if (eosvmin != Real_t(0.)) {
if (vnewc < eosvmin)
vnewc = eosvmin ;
}
if (eosvmax != Real_t(0.)) {
if (vnewc > eosvmax)
vnewc = eosvmax ;
}
// Now check for valid volume
Real_t vc = v[zn];
if (eosvmin != Real_t(0.)) {
if (vc < eosvmin)
vc = eosvmin ;
}
if (eosvmax != Real_t(0.)) {
if (vc > eosvmax)
vc = eosvmax ;
}
if (vc <= 0.) {
*bad_vol = zn;
}
}
static
__device__
__forceinline__
void UpdateVolumesForElems_device(Index_t numElem, Real_t& v_cut,
Real_t *vnew,
Real_t *v,
int i)
{
Real_t tmpV ;
tmpV = vnew[i] ;
if ( FABS(tmpV - Real_t(1.0)) < v_cut )
tmpV = Real_t(1.0) ;
v[i] = tmpV ;
}
static
__device__
__forceinline__
void CalcEnergyForElems_device(Real_t& p_new, Real_t& e_new, Real_t& q_new,
Real_t& bvc, Real_t& pbvc,
Real_t& p_old, Real_t& e_old, Real_t& q_old,
Real_t& compression, Real_t& compHalfStep,
Real_t& vnewc, Real_t& work, Real_t& delvc, Real_t pmin,
Real_t p_cut, Real_t e_cut, Real_t q_cut, Real_t emin,
Real_t& qq, Real_t& ql,
Real_t& rho0,
Real_t& eosvmax,
Index_t length)
{
const Real_t sixth = Real_t(1.0) / Real_t(6.0) ;
Real_t pHalfStep;
e_new = e_old - Real_t(0.5) * delvc * (p_old + q_old)
+ Real_t(0.5) * work;
if (e_new < emin ) {
e_new = emin ;
}
CalcPressureForElems_device(pHalfStep, bvc, pbvc, e_new, compHalfStep, vnewc,
pmin, p_cut, eosvmax);
Real_t vhalf = Real_t(1.) / (Real_t(1.) + compHalfStep) ;
if ( delvc > Real_t(0.) ) {
q_new = Real_t(0.) ;
}
else {
Real_t ssc = ( pbvc * e_new
+ vhalf * vhalf * bvc * pHalfStep ) / rho0 ;
if ( ssc <= Real_t(.1111111e-36) ) {
ssc =Real_t(.3333333e-18) ;
} else {
ssc = SQRT(ssc) ;
}
q_new = (ssc*ql + qq) ;
}
e_new = e_new + Real_t(0.5) * delvc
* ( Real_t(3.0)*(p_old + q_old)
- Real_t(4.0)*(pHalfStep + q_new)) ;
e_new += Real_t(0.5) * work;
if (FABS(e_new) < e_cut) {
e_new = Real_t(0.) ;
}
if ( e_new < emin ) {
e_new = emin ;
}
CalcPressureForElems_device(p_new, bvc, pbvc, e_new, compression, vnewc,
pmin, p_cut, eosvmax);
Real_t q_tilde ;
if (delvc > Real_t(0.)) {
q_tilde = Real_t(0.) ;
}
else {
Real_t ssc = ( pbvc * e_new
+ vnewc * vnewc * bvc * p_new ) / rho0 ;
if ( ssc <= Real_t(.1111111e-36) ) {
ssc = Real_t(.3333333e-18) ;
} else {
ssc = SQRT(ssc) ;
}
q_tilde = (ssc*ql + qq) ;
}
e_new = e_new - ( Real_t(7.0)*(p_old + q_old)
- Real_t(8.0)*(pHalfStep + q_new)
+ (p_new + q_tilde)) * delvc*sixth ;
if (FABS(e_new) < e_cut) {
e_new = Real_t(0.) ;
}
if ( e_new < emin ) {
e_new = emin ;
}
CalcPressureForElems_device(p_new, bvc, pbvc, e_new, compression, vnewc,
pmin, p_cut, eosvmax);
if ( delvc <= Real_t(0.) ) {
Real_t ssc = ( pbvc * e_new
+ vnewc * vnewc * bvc * p_new ) / rho0 ;
if ( ssc <= Real_t(.1111111e-36) ) {
ssc = Real_t(.3333333e-18) ;
} else {
ssc = SQRT(ssc) ;
}
q_new = (ssc*ql + qq) ;
if (FABS(q_new) < q_cut) q_new = Real_t(0.) ;
}
return ;
}
__device__ inline
Index_t giveMyRegion(const Index_t* regCSR,const Index_t i, const Index_t numReg)
{
for(Index_t reg = 0; reg < numReg-1; reg++)
if(i < regCSR[reg])
return reg;
return (numReg-1);
}
__global__
void ApplyMaterialPropertiesAndUpdateVolume_kernel(
Index_t length,
Real_t rho0,
Real_t e_cut,
Real_t emin,
Real_t* __restrict__ ql,
Real_t* __restrict__ qq,
Real_t* __restrict__ vnew,
Real_t* __restrict__ v,
Real_t pmin,
Real_t p_cut,
Real_t q_cut,
Real_t eosvmin,
Real_t eosvmax,
Index_t* __restrict__ regElemlist,
// const Index_t* __restrict__ regElemlist,
Real_t* __restrict__ e,
Real_t* __restrict__ delv,
Real_t* __restrict__ p,
Real_t* __restrict__ q,
Real_t ss4o3,
Real_t* __restrict__ ss,
Real_t v_cut,
Index_t* __restrict__ bad_vol,
const Int_t cost,
const Index_t* regCSR,
const Index_t* regReps,
const Index_t numReg
)
{
Real_t e_old, delvc, p_old, q_old, e_temp, delvc_temp, p_temp, q_temp;
Real_t compression, compHalfStep;
Real_t qq_old, ql_old, qq_temp, ql_temp, work;
Real_t p_new, e_new, q_new;
Real_t bvc, pbvc, vnewc;
Index_t i=blockDim.x*blockIdx.x + threadIdx.x;
if (i<length) {
Index_t zidx = regElemlist[i] ;
ApplyMaterialPropertiesForElems_device
(eosvmin,eosvmax,vnew,v,vnewc,bad_vol,zidx);
/********************** Start EvalEOSForElems **************************/
// Here we need to find out what region this element belongs to and what is the rep value!
Index_t region = giveMyRegion(regCSR,i,numReg);
Index_t rep = regReps[region];
e_temp = e[zidx];
p_temp = p[zidx];
q_temp = q[zidx];
qq_temp = qq[zidx] ;
ql_temp = ql[zidx] ;
delvc_temp = delv[zidx];
for(int r=0; r < rep; r++)
{
e_old = e_temp;
p_old = p_temp;
q_old = q_temp;
qq_old = qq_temp;
ql_old = ql_temp;
delvc = delvc_temp;
work = Real_t(0.);
Real_t vchalf ;
compression = Real_t(1.) / vnewc - Real_t(1.);
vchalf = vnewc - delvc * Real_t(.5);
compHalfStep = Real_t(1.) / vchalf - Real_t(1.);
if ( eosvmin != Real_t(0.) ) {
if (vnewc <= eosvmin) { /* impossible due to calling func? */
compHalfStep = compression ;
}
}
if ( eosvmax != Real_t(0.) ) {
if (vnewc >= eosvmax) { /* impossible due to calling func? */
p_old = Real_t(0.) ;
compression = Real_t(0.) ;
compHalfStep = Real_t(0.) ;
}
}
// qq_old = qq[zidx] ;
// ql_old = ql[zidx] ;
// work = Real_t(0.) ;
CalcEnergyForElems_device(p_new, e_new, q_new, bvc, pbvc,
p_old, e_old, q_old, compression, compHalfStep,
vnewc, work, delvc, pmin,
p_cut, e_cut, q_cut, emin,
qq_old, ql_old, rho0, eosvmax, length);
}//end for rep
p[zidx] = p_new ;
e[zidx] = e_new ;
q[zidx] = q_new ;
CalcSoundSpeedForElems_device
(vnewc,rho0,e_new,p_new,pbvc,bvc,ss4o3,length,ss,zidx);
/********************** End EvalEOSForElems **************************/
UpdateVolumesForElems_device(length,v_cut,vnew,v,zidx);
}
}
static inline
void ApplyMaterialPropertiesAndUpdateVolume(Domain *domain)
{
Index_t length = domain->numElem ;
if (length != 0) {
Index_t dimBlock = 128;
Index_t dimGrid = PAD_DIV(length,dimBlock);
hipLaunchKernelGGL(( ApplyMaterialPropertiesAndUpdateVolume_kernel), dim3(dimGrid),dim3(dimBlock), 0, 0,
length,
domain->refdens,
domain->e_cut,
domain->emin,
domain->ql.raw(),
domain->qq.raw(),
domain->vnew->raw(),
domain->v.raw(),
domain->pmin,
domain->p_cut,
domain->q_cut,
domain->eosvmin,
domain->eosvmax,
domain->regElemlist.raw(),
domain->e.raw(),
domain->delv.raw(),
domain->p.raw(),
domain->q.raw(),
domain->ss4o3,
domain->ss.raw(),
domain->v_cut,
domain->bad_vol_h,
domain->cost,
domain->regCSR.raw(),
domain->regReps.raw(),
domain->numReg
);
//hipDeviceSynchronize();
//cudaCheckError();
}
}
static inline
void LagrangeElements(Domain *domain)
{
int allElem = domain->numElem + /* local elem */
2*domain->sizeX*domain->sizeY + /* plane ghosts */
2*domain->sizeX*domain->sizeZ + /* row ghosts */
2*domain->sizeY*domain->sizeZ ; /* col ghosts */
domain->vnew = Allocator< Vector_d<Real_t> >::allocate(domain->numElem);
domain->dxx = Allocator< Vector_d<Real_t> >::allocate(domain->numElem);
domain->dyy = Allocator< Vector_d<Real_t> >::allocate(domain->numElem);
domain->dzz = Allocator< Vector_d<Real_t> >::allocate(domain->numElem);
domain->delx_xi = Allocator< Vector_d<Real_t> >::allocate(domain->numElem);
domain->delx_eta = Allocator< Vector_d<Real_t> >::allocate(domain->numElem);
domain->delx_zeta = Allocator< Vector_d<Real_t> >::allocate(domain->numElem);
domain->delv_xi = Allocator< Vector_d<Real_t> >::allocate(allElem);
domain->delv_eta = Allocator< Vector_d<Real_t> >::allocate(allElem);
domain->delv_zeta = Allocator< Vector_d<Real_t> >::allocate(allElem);
/*
#if USE_MPI
CommRecv(*domain, MSG_MONOQ, 3,
domain->sizeX, domain->sizeY, domain->sizeZ,
true, true) ;
#endif
*/
/*********************************************/
/* Calc Kinematics and Monotic Q Gradient */
/*********************************************/
CalcKinematicsAndMonotonicQGradient(domain);
#if USE_MPI
//printf("Rank %d prima di CommRecv LagrangeElements\n", globalRank);
CommRecv(*domain, MSG_MONOQ, 3,
domain->sizeX, domain->sizeY, domain->sizeZ,
true, true, 0) ;
Domain_member fieldData[3] ;
// initialize pointers
domain->d_delv_xi = domain->delv_xi->raw();
domain->d_delv_eta = domain->delv_eta->raw();
domain->d_delv_zeta = domain->delv_zeta->raw();
fieldData[0] = &Domain::get_delv_xi ;
fieldData[1] = &Domain::get_delv_eta ;
fieldData[2] = &Domain::get_delv_zeta ;
//printf("Rank %d prima di CommSendGpu LagrangeElements\n", globalRank);
CommSendGpu(*domain, MSG_MONOQ, 3, fieldData,
domain->sizeX, domain->sizeY, domain->sizeZ,
true, true, domain->streams[2], 0) ;
//printf("Rank %d prima di CommMonoQGpu LagrangeElements\n", globalRank);
CommMonoQGpu(*domain, domain->streams[2], 0) ;
//printf("Rank %d fuori da CommMonoQGpu LagrangeElements\n", globalRank);
#endif
Allocator<Vector_d<Real_t> >::free(domain->dxx,domain->numElem);
Allocator<Vector_d<Real_t> >::free(domain->dyy,domain->numElem);
Allocator<Vector_d<Real_t> >::free(domain->dzz,domain->numElem);
/**********************************
* Calc Monotic Q Region
**********************************/
CalcMonotonicQRegionForElems(domain);
Allocator<Vector_d<Real_t> >::free(domain->delx_xi,domain->numElem);
Allocator<Vector_d<Real_t> >::free(domain->delx_eta,domain->numElem);
Allocator<Vector_d<Real_t> >::free(domain->delx_zeta,domain->numElem);
Allocator<Vector_d<Real_t> >::free(domain->delv_xi,allElem);
Allocator<Vector_d<Real_t> >::free(domain->delv_eta,allElem);
Allocator<Vector_d<Real_t> >::free(domain->delv_zeta,allElem);
// printf("\n --Start of ApplyMaterials! \n");
ApplyMaterialPropertiesAndUpdateVolume(domain) ;
// printf("\n --End of ApplyMaterials! \n");
Allocator<Vector_d<Real_t> >::free(domain->vnew,domain->numElem);
}
template<int block_size>
__global__
#ifdef DOUBLE_PRECISION
__launch_bounds__(128,16)
#else
__launch_bounds__(128,16)
#endif
void CalcTimeConstraintsForElems_kernel(
Index_t length,
Real_t qqc2,
Real_t dvovmax,
Index_t *matElemlist,
Real_t *ss,
Real_t *vdov,
Real_t *arealg,
Real_t *dev_mindtcourant,
Real_t *dev_mindthydro)
{
int tid = threadIdx.x;
int i=blockDim.x*blockIdx.x + tid;
__shared__ volatile Real_t s_mindthydro[block_size];
__shared__ volatile Real_t s_mindtcourant[block_size];
Real_t mindthydro = Real_t(1.0e+20) ;
Real_t mindtcourant = Real_t(1.0e+20) ;
Real_t dthydro = mindthydro;
Real_t dtcourant = mindtcourant;
while (i<length) {
Index_t indx = matElemlist[i] ;
Real_t vdov_tmp = vdov[indx];
// Computing dt_hydro
if (vdov_tmp != Real_t(0.)) {
Real_t dtdvov = dvovmax / (FABS(vdov_tmp)+Real_t(1.e-20)) ;
if ( dthydro > dtdvov ) {
dthydro = dtdvov ;
}
}
if (dthydro < mindthydro)
mindthydro = dthydro;
// Computing dt_courant
Real_t ss_tmp = ss[indx];
Real_t area_tmp = arealg[indx];
Real_t dtf = ss_tmp * ss_tmp ;
dtf += ((vdov_tmp < 0.) ? qqc2*area_tmp*area_tmp*vdov_tmp*vdov_tmp : 0.);
dtf = area_tmp / SQRT(dtf) ;
/* determine minimum timestep with its corresponding elem */
if (vdov_tmp != Real_t(0.) && dtf < dtcourant) {
dtcourant = dtf ;
}
if (dtcourant< mindtcourant)
mindtcourant= dtcourant;
i += gridDim.x*blockDim.x;
}
s_mindthydro[tid] = mindthydro;
s_mindtcourant[tid] = mindtcourant;
__syncthreads();
// Do shared memory reduction
if (block_size >= 1024) {
if (tid < 512) {
s_mindthydro[tid] = min( s_mindthydro[tid] , s_mindthydro[tid + 512]) ;
s_mindtcourant[tid] = min( s_mindtcourant[tid], s_mindtcourant[tid + 512]) ; }
__syncthreads(); }
if (block_size >= 512) {
if (tid < 256) {
s_mindthydro[tid] = min( s_mindthydro[tid], s_mindthydro[tid + 256]) ;
s_mindtcourant[tid] = min( s_mindtcourant[tid], s_mindtcourant[tid + 256]) ; }
__syncthreads(); }
if (block_size >= 256) {
if (tid < 128) {
s_mindthydro[tid] = min( s_mindthydro[tid], s_mindthydro[tid + 128]) ;
s_mindtcourant[tid] = min( s_mindtcourant[tid], s_mindtcourant[tid + 128]) ; }
__syncthreads(); }
if (block_size >= 128) {
if (tid < 64) {
s_mindthydro[tid] = min( s_mindthydro[tid], s_mindthydro[tid + 64]) ;
s_mindtcourant[tid] = min( s_mindtcourant[tid], s_mindtcourant[tid + 64]) ; }
__syncthreads(); }
if (tid < 32) {
s_mindthydro[tid] = min( s_mindthydro[tid], s_mindthydro[tid + 32]) ;
s_mindtcourant[tid] = min( s_mindtcourant[tid], s_mindtcourant[tid + 32]) ;
}
if (tid < 16) {
s_mindthydro[tid] = min( s_mindthydro[tid], s_mindthydro[tid + 16]) ;
s_mindtcourant[tid] = min( s_mindtcourant[tid], s_mindtcourant[tid + 16]) ;
}
if (tid < 8) {
s_mindthydro[tid] = min( s_mindthydro[tid], s_mindthydro[tid + 8]) ;
s_mindtcourant[tid] = min( s_mindtcourant[tid], s_mindtcourant[tid + 8]) ;
}
if (tid < 4) {
s_mindthydro[tid] = min( s_mindthydro[tid], s_mindthydro[tid + 4]) ;
s_mindtcourant[tid] = min( s_mindtcourant[tid], s_mindtcourant[tid + 4]) ;
}
if (tid < 2) {
s_mindthydro[tid] = min( s_mindthydro[tid], s_mindthydro[tid + 2]) ;
s_mindtcourant[tid] = min( s_mindtcourant[tid], s_mindtcourant[tid + 2]) ;
}
if (tid < 1) {
s_mindthydro[tid] = min( s_mindthydro[tid], s_mindthydro[tid + 1]) ;
s_mindtcourant[tid] = min( s_mindtcourant[tid], s_mindtcourant[tid + 1]) ;
}
// Store in global memory
if (tid==0) {
dev_mindtcourant[blockIdx.x] = s_mindtcourant[0];
dev_mindthydro[blockIdx.x] = s_mindthydro[0];
}
}
template <int block_size>
__global__
void CalcMinDtOneBlock(Real_t* dev_mindthydro, Real_t* dev_mindtcourant, Real_t* dtcourant, Real_t* dthydro, Index_t shared_array_size)
{
volatile __shared__ Real_t s_data[block_size];
int tid = threadIdx.x;
if (blockIdx.x==0)
{
if (tid < shared_array_size)
s_data[tid] = dev_mindtcourant[tid];
else
s_data[tid] = 1.0e20;
__syncthreads();
if (block_size >= 1024) { if (tid < 512) { s_data[tid] = min(s_data[tid],s_data[tid + 512]); } __syncthreads(); }
if (block_size >= 512) { if (tid < 256) { s_data[tid] = min(s_data[tid],s_data[tid + 256]); } __syncthreads(); }
if (block_size >= 256) { if (tid < 128) { s_data[tid] = min(s_data[tid],s_data[tid + 128]); } __syncthreads(); }
if (block_size >= 128) { if (tid < 64) { s_data[tid] = min(s_data[tid],s_data[tid + 64]); } __syncthreads(); }
if (tid < 32) { s_data[tid] = min(s_data[tid],s_data[tid + 32]); }
if (tid < 16) { s_data[tid] = min(s_data[tid],s_data[tid + 16]); }
if (tid < 8) { s_data[tid] = min(s_data[tid],s_data[tid + 8]); }
if (tid < 4) { s_data[tid] = min(s_data[tid],s_data[tid + 4]); }
if (tid < 2) { s_data[tid] = min(s_data[tid],s_data[tid + 2]); }
if (tid < 1) { s_data[tid] = min(s_data[tid],s_data[tid + 1]); }
if (tid<1)
{
*(dtcourant)= s_data[0];
}
}
else if (blockIdx.x==1)
{
if (tid < shared_array_size)
s_data[tid] = dev_mindthydro[tid];
else
s_data[tid] = 1.0e20;
__syncthreads();
if (block_size >= 1024) { if (tid < 512) { s_data[tid] = min(s_data[tid],s_data[tid + 512]); } __syncthreads(); }
if (block_size >= 512) { if (tid < 256) { s_data[tid] = min(s_data[tid],s_data[tid + 256]); } __syncthreads(); }
if (block_size >= 256) { if (tid < 128) { s_data[tid] = min(s_data[tid],s_data[tid + 128]); } __syncthreads(); }
if (block_size >= 128) { if (tid < 64) { s_data[tid] = min(s_data[tid],s_data[tid + 64]); } __syncthreads(); }
if (tid < 32) { s_data[tid] = min(s_data[tid],s_data[tid + 32]); }
if (tid < 16) { s_data[tid] = min(s_data[tid],s_data[tid + 16]); }
if (tid < 8) { s_data[tid] = min(s_data[tid],s_data[tid + 8]); }
if (tid < 4) { s_data[tid] = min(s_data[tid],s_data[tid + 4]); }
if (tid < 2) { s_data[tid] = min(s_data[tid],s_data[tid + 2]); }
if (tid < 1) { s_data[tid] = min(s_data[tid],s_data[tid + 1]); }
if (tid<1)
{
*(dthydro) = s_data[0];
}
}
}
static inline
void CalcTimeConstraintsForElems(Domain* domain)
{
Real_t qqc = domain->qqc;
Real_t qqc2 = Real_t(64.0) * qqc * qqc ;
Real_t dvovmax = domain->dvovmax ;
const Index_t length = domain->numElem;
const int max_dimGrid = 1024;
const int dimBlock = 128;
int dimGrid=::min(max_dimGrid,PAD_DIV(length,dimBlock));
hipFuncSetCacheConfig(CalcTimeConstraintsForElems_kernel<dimBlock>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( CalcTimeConstraintsForElems_kernel<dimBlock>) , dim3(dimGrid),dim3(dimBlock), 0, 0,
length,qqc2,dvovmax,
domain->matElemlist.raw(),domain->ss.raw(),domain->vdov.raw(),domain->arealg.raw(),
domain->dev_mindtcourant->raw(), domain->dev_mindthydro->raw());
// TODO: if dimGrid < 1024, should launch less threads
hipLaunchKernelGGL(( CalcMinDtOneBlock<max_dimGrid>) , dim3(2),dim3(max_dimGrid), max_dimGrid*sizeof(Real_t), domain->streams[1], domain->dev_mindthydro->raw(),
domain->dev_mindtcourant->raw(),
domain->dtcourant_h,domain->dthydro_h, dimGrid);
hipEventRecord(domain->time_constraint_computed[currIter+1], domain->streams[1]);
}
//elenago lagrange
static inline
void LagrangeLeapFrogPt1(Domain* domain)
{
/* calculate nodal forces, accelerations, velocities, positions, with
* applied boundary conditions and slide surface considerations */
//LagrangeNodal(domain);
// ------------------------------ LAGRANGE NODAL -------------------------------
//#ifdef SEDOV_SYNC_POS_VEL_EARLY
Domain_member fieldData[6] ;
//#endif
#if 0
#if USE_MPI
CommRecv(*domain, MSG_COMM_SBN, 3,
domain->sizeX + 1, domain->sizeY + 1, domain->sizeZ + 1,
true, false, 0) ;
#ifdef SEDOV_SYNC_POS_VEL_EARLY
CommRecv(*domain, MSG_SYNC_POS_VEL, 6,
domain->sizeX + 1, domain->sizeY + 1, domain->sizeZ + 1,
false, false, 1) ;
#endif
CommRecv(*domain, MSG_MONOQ, 3,
domain->sizeX, domain->sizeY, domain->sizeZ,
true, true, 2) ;
MPI_Barrier(MPI_COMM_WORLD);
#endif
#endif
Real_t u_cut = domain->u_cut ;
/* time of boundary condition evaluation is beginning of step for force and
* acceleration boundary conditions. */
//CalcForceForNodes(domain);
CalcVolumeForceForElems(domain);
// moved here from the main loop to allow async execution with GPU work
// TimeIncrement(domain);
#if USE_MPI
fieldData[0] = &Domain::get_fx ;
fieldData[1] = &Domain::get_fy ;
fieldData[2] = &Domain::get_fz ;
CommSendGpu(*domain, MSG_COMM_SBN, 3, fieldData,
domain->sizeX + 1, domain->sizeY + 1, domain->sizeZ + 1,
true, false, domain->streams[2], 0) ;
CommSBNGpu(*domain, 3, fieldData, &domain->streams[2], 0) ;
#endif
CalcAccelerationForNodes(domain);
ApplyAccelerationBoundaryConditionsForNodes(domain);
}
static inline
void LagrangeLeapFrogPt2(Domain* domain)
{
/* calculate nodal forces, accelerations, velocities, positions, with
* applied boundary conditions and slide surface considerations */
//LagrangeNodal(domain);
// ------------------------------ LAGRANGE NODAL -------------------------------
Domain_member fieldData[6] ;
Real_t u_cut = domain->u_cut ;
//qui deltatime_h
CalcPositionAndVelocityForNodes(u_cut, domain);
#if 0
hipDeviceSynchronize();
testVolume(domain);
hipDeviceSynchronize();
checkErrors(domain,1,myRank);
#endif
#if USE_MPI
#ifdef SEDOV_SYNC_POS_VEL_EARLY
fieldData[0] = &Domain::get_x ;
fieldData[1] = &Domain::get_y ;
fieldData[2] = &Domain::get_z ;
fieldData[3] = &Domain::get_xd ;
fieldData[4] = &Domain::get_yd ;
fieldData[5] = &Domain::get_zd ;
CommSendGpu(*domain, MSG_SYNC_POS_VEL, 6, fieldData,
domain->sizeX + 1, domain->sizeY + 1, domain->sizeZ + 1,
false, false, domain->streams[2], 1) ;
CommSyncPosVelGpu(*domain, &domain->streams[2], 1) ;
#endif
#endif
// ------------------------------------------------------------------------
/* calculate element quantities (i.e. velocity gradient & q), and update
* material states */
//LagrangeElements(domain);
// ------------------------------ LAGRANGE ELEMENTS -------------------------------
//qui deltatime_h
CalcKinematicsAndMonotonicQGradient(domain);
//hipDeviceSynchronize();
// checkErrors(domain,5,myRank);
#if USE_MPI
fieldData[0] = &Domain::get_delv_xi ;
fieldData[1] = &Domain::get_delv_eta ;
fieldData[2] = &Domain::get_delv_zeta ;
CommSendGpu(*domain, MSG_MONOQ, 3, fieldData,
domain->sizeX, domain->sizeY, domain->sizeZ,
true, true, domain->streams[2], 2) ;
CommMonoQGpu(*domain, domain->streams[2], 2) ;
#endif
//qui deltatime_h
CalcMonotonicQRegionForElems(domain);
ApplyMaterialPropertiesAndUpdateVolume(domain) ;
//qui deltatime_h
CalcTimeConstraintsForElems(domain);
}
static inline
void LagrangeLeapFrog(Domain* domain)
{
/* calculate nodal forces, accelerations, velocities, positions, with
* applied boundary conditions and slide surface considerations */
//LagrangeNodal(domain);
// ------------------------------ LAGRANGE NODAL -------------------------------
//#ifdef SEDOV_SYNC_POS_VEL_EARLY
Domain_member fieldData[6] ;
//#endif
#if 0
#if USE_MPI
CommRecv(*domain, MSG_COMM_SBN, 3,
domain->sizeX + 1, domain->sizeY + 1, domain->sizeZ + 1,
true, false, 0) ;
#ifdef SEDOV_SYNC_POS_VEL_EARLY
CommRecv(*domain, MSG_SYNC_POS_VEL, 6,
domain->sizeX + 1, domain->sizeY + 1, domain->sizeZ + 1,
false, false, 1) ;
#endif
CommRecv(*domain, MSG_MONOQ, 3,
domain->sizeX, domain->sizeY, domain->sizeZ,
true, true, 2) ;
MPI_Barrier(MPI_COMM_WORLD);
#endif
#endif
Real_t u_cut = domain->u_cut ;
/* time of boundary condition evaluation is beginning of step for force and
* acceleration boundary conditions. */
//CalcForceForNodes(domain);
CalcVolumeForceForElems(domain);
// moved here from the main loop to allow async execution with GPU work
// TimeIncrement(domain);
#if USE_MPI
fieldData[0] = &Domain::get_fx ;
fieldData[1] = &Domain::get_fy ;
fieldData[2] = &Domain::get_fz ;
CommSendGpu(*domain, MSG_COMM_SBN, 3, fieldData,
domain->sizeX + 1, domain->sizeY + 1, domain->sizeZ + 1,
true, false, domain->streams[2], 0) ;
CommSBNGpu(*domain, 3, fieldData, &domain->streams[2], 0) ;
#endif
CalcAccelerationForNodes(domain);
ApplyAccelerationBoundaryConditionsForNodes(domain);
//qui deltatime_h
CalcPositionAndVelocityForNodes(u_cut, domain);
#if 0
hipDeviceSynchronize();
testVolume(domain);
hipDeviceSynchronize();
checkErrors(domain,1,myRank);
#endif
#if USE_MPI
#ifdef SEDOV_SYNC_POS_VEL_EARLY
fieldData[0] = &Domain::get_x ;
fieldData[1] = &Domain::get_y ;
fieldData[2] = &Domain::get_z ;
fieldData[3] = &Domain::get_xd ;
fieldData[4] = &Domain::get_yd ;
fieldData[5] = &Domain::get_zd ;
CommSendGpu(*domain, MSG_SYNC_POS_VEL, 6, fieldData,
domain->sizeX + 1, domain->sizeY + 1, domain->sizeZ + 1,
false, false, domain->streams[2], 1) ;
CommSyncPosVelGpu(*domain, &domain->streams[2], 1) ;
#endif
#endif
// ------------------------------------------------------------------------
/* calculate element quantities (i.e. velocity gradient & q), and update
* material states */
//LagrangeElements(domain);
// ------------------------------ LAGRANGE ELEMENTS -------------------------------
//qui deltatime_h
CalcKinematicsAndMonotonicQGradient(domain);
//hipDeviceSynchronize();
// checkErrors(domain,5,myRank);
#if USE_MPI
fieldData[0] = &Domain::get_delv_xi ;
fieldData[1] = &Domain::get_delv_eta ;
fieldData[2] = &Domain::get_delv_zeta ;
CommSendGpu(*domain, MSG_MONOQ, 3, fieldData,
domain->sizeX, domain->sizeY, domain->sizeZ,
true, true, domain->streams[2], 2) ;
CommMonoQGpu(*domain, domain->streams[2], 2) ;
#endif
CalcMonotonicQRegionForElems(domain);
ApplyMaterialPropertiesAndUpdateVolume(domain) ;
// --------------------------------------------------------------------------------
//event next iteration
//qui deltatime_h
CalcTimeConstraintsForElems(domain);
// hipDeviceSynchronize();
}
void printUsage(char* argv[])
{
printf("Usage: \n");
printf("Unstructured grid: %s -u <file.lmesh> \n", argv[0]) ;
printf("Structured grid: %s -s numEdgeElems \n", argv[0]) ;
printf("\nExamples:\n") ;
printf("%s -s 45\n", argv[0]) ;
printf("%s -u sedov15oct.lmesh\n", argv[0]) ;
}
#ifdef SAMI
#ifdef __cplusplus
extern "C" {
#endif
#include "silo.h"
#ifdef __cplusplus
}
#endif
#define MAX_LEN_SAMI_HEADER 10
#define SAMI_HDR_NUMBRICK 0
#define SAMI_HDR_NUMNODES 3
#define SAMI_HDR_NUMMATERIAL 4
#define SAMI_HDR_INDEX_START 6
#define SAMI_HDR_MESHDIM 7
#define MAX_ADJACENCY 14 /* must be 14 or greater */
void DumpSAMI(Domain *domain, char *name)
{
DBfile *fp ;
int headerLen = MAX_LEN_SAMI_HEADER ;
int headerInfo[MAX_LEN_SAMI_HEADER];
char varName[] = "brick_nd0";
char coordName[] = "x";
int version = 121 ;
int numElem = int(domain->numElem) ;
int numNode = int(domain->numNode) ;
int count ;
int *materialID ;
int *nodeConnect ;
double *nodeCoord ;
if ((fp = DBCreate(name, DB_CLOBBER, DB_LOCAL,
NULL, DB_PDB)) == NULL)
{
printf("Couldn't create file %s\n", name) ;
exit(1);
}
for (int i=0; i<MAX_LEN_SAMI_HEADER; ++i) {
headerInfo[i] = 0 ;
}
headerInfo[SAMI_HDR_NUMBRICK] = numElem ;
headerInfo[SAMI_HDR_NUMNODES] = numNode ;
headerInfo[SAMI_HDR_NUMMATERIAL] = 1 ;
headerInfo[SAMI_HDR_INDEX_START] = 1 ;
headerInfo[SAMI_HDR_MESHDIM] = 3 ;
DBWrite(fp, "mesh_data", headerInfo, &headerLen, 1, DB_INT) ;
count = 1 ;
DBWrite(fp, "version", &version, &count, 1, DB_INT) ;
nodeConnect = new int[numElem] ;
Vector_h<Index_t> nodelist_h = domain->nodelist;
for (Index_t i=0; i<8; ++i)
{
for (Index_t j=0; j<numElem; ++j) {
nodeConnect[j] = int(nodelist_h[i*domain->padded_numElem + j]) + 1 ;
}
varName[8] = '0' + i;
DBWrite(fp, varName, nodeConnect, &numElem, 1, DB_INT) ;
}
delete [] nodeConnect ;
nodeCoord = new double[numNode] ;
Vector_h<Real_t> x_h = domain->x;
Vector_h<Real_t> y_h = domain->y;
Vector_h<Real_t> z_h = domain->z;
for (Index_t i=0; i<3; ++i)
{
for (Index_t j=0; j<numNode; ++j) {
Real_t coordVal ;
switch(i) {
case 0: coordVal = double(x_h[j]) ; break ;
case 1: coordVal = double(y_h[j]) ; break ;
case 2: coordVal = double(z_h[j]) ; break ;
}
nodeCoord[j] = coordVal ;
}
coordName[0] = 'x' + i ;
DBWrite(fp, coordName, nodeCoord, &numNode, 1, DB_DOUBLE) ;
}
delete [] nodeCoord ;
materialID = new int[numElem] ;
for (Index_t i=0; i<numElem; ++i)
materialID[i] = 1 ;
DBWrite(fp, "brick_material", materialID, &numElem, 1, DB_INT) ;
delete [] materialID ;
DBClose(fp);
}
#endif
#ifdef SAMI
void DumpDomain(Domain *domain)
{
char meshName[64] ;
printf("Dumping SAMI file\n");
sprintf(meshName, "sedov_%d.sami", int(domain->cycle)) ;
DumpSAMI(domain, meshName) ;
}
#endif
void write_solution(Domain* locDom)
{
Vector_h<Real_t> x_h = locDom->x;
Vector_h<Real_t> y_h = locDom->y;
Vector_h<Real_t> z_h = locDom->z;
// printf("Writing solution to file xyz.asc\n");
std::stringstream filename;
filename << "xyz.asc";
FILE *fout = fopen(filename.str().c_str(),"wb");
for (Index_t i=0; i<locDom->numNode; i++) {
fprintf(fout,"%10d\n",i);
fprintf(fout,"%.10f\n",x_h[i]);
fprintf(fout,"%.10f\n",y_h[i]);
fprintf(fout,"%.10f\n",z_h[i]);
}
fclose(fout);
}
///////////////////////////////////////////////////////////////////////////
void InitMeshDecomp(Int_t numRanks, Int_t myRank,
Int_t *col, Int_t *row, Int_t *plane, Int_t *side)
{
Int_t testProcs;
Int_t dx, dy, dz;
Int_t myDom;
// Assume cube processor layout for now
testProcs = Int_t(cbrt(Real_t(numRanks))+0.5) ;
if (testProcs*testProcs*testProcs != numRanks) {
printf("Num processors must be a cube of an integer (1, 8, 27, ...)\n") ;
#if USE_MPI
MPI_Abort(MPI_COMM_WORLD, -1) ;
#else
exit(-1);
#endif
}
if (sizeof(Real_t) != 4 && sizeof(Real_t) != 8) {
printf("MPI operations only support float and double right now...\n");
#if USE_MPI
MPI_Abort(MPI_COMM_WORLD, -1) ;
#else
exit(-1);
#endif
}
if (MAX_FIELDS_PER_MPI_COMM > CACHE_COHERENCE_PAD_REAL) {
printf("corner element comm buffers too small. Fix code.\n") ;
#if USE_MPI
MPI_Abort(MPI_COMM_WORLD, -1) ;
#else
exit(-1);
#endif
}
dx = testProcs ;
dy = testProcs ;
dz = testProcs ;
// temporary test
if (dx*dy*dz != numRanks) {
printf("error -- must have as many domains as procs\n") ;
#if USE_MPI
MPI_Abort(MPI_COMM_WORLD, -1) ;
#else
exit(-1);
#endif
}
Int_t remainder = dx*dy*dz % numRanks ;
if (myRank < remainder) {
myDom = myRank*( 1+ (dx*dy*dz / numRanks)) ;
}
else {
myDom = remainder*( 1+ (dx*dy*dz / numRanks)) +
(myRank - remainder)*(dx*dy*dz/numRanks) ;
}
*col = myDom % dx ;
*row = (myDom / dx) % dy ;
*plane = myDom / (dx*dy) ;
*side = testProcs;
return;
}
void VerifyAndWriteFinalOutput(Real_t elapsed_time,
Domain& locDom,
Int_t its,
Int_t nx,
Int_t numRanks)
{
size_t free_mem, total_mem, used_mem;
hipMemGetInfo(&free_mem, &total_mem);
used_mem= total_mem - free_mem;
#if LULESH_SHOW_PROGRESS == 0
printf(" Used Memory = %8.4f Mb\n", used_mem / (1024.*1024.) );
#endif
// GrindTime1 only takes a single domain into account, and is thus a good way to measure
// processor speed indepdendent of MPI parallelism.
// GrindTime2 takes into account speedups from MPI parallelism
Real_t grindTime1 = ((elapsed_time*1e6)/its)/(nx*nx*nx);
Real_t grindTime2 = ((elapsed_time*1e6)/its)/(nx*nx*nx*numRanks);
// Copy Energy back to Host
Real_t e_zero;
Real_t* d_ezero_ptr = locDom.e.raw() + locDom.octantCorner; /* octant corner supposed to be 0 */
hipMemcpy(&e_zero, d_ezero_ptr, sizeof(Real_t), hipMemcpyDeviceToHost);
printf("Run completed: \n");
printf(" Problem size = %i \n", nx);
printf(" MPI tasks = %i \n", numRanks);
printf(" Iteration count = %i \n", its);
printf(" Final Origin Energy = %12.6e \n", e_zero);
Real_t MaxAbsDiff = Real_t(0.0);
Real_t TotalAbsDiff = Real_t(0.0);
Real_t MaxRelDiff = Real_t(0.0);
Real_t *e_all = new Real_t[nx * nx];
hipMemcpy(e_all, locDom.e.raw(), nx * nx * sizeof(Real_t), hipMemcpyDeviceToHost);
for (Index_t j=0; j<nx; ++j) {
for (Index_t k=j+1; k<nx; ++k) {
Real_t AbsDiff = FABS(e_all[j*nx+k]-e_all[k*nx+j]);
TotalAbsDiff += AbsDiff;
if (MaxAbsDiff <AbsDiff) MaxAbsDiff = AbsDiff;
Real_t RelDiff = AbsDiff / e_all[k*nx+j];
if (MaxRelDiff <RelDiff) MaxRelDiff = RelDiff;
}
}
delete e_all;
// Quick symmetry check
printf(" Testing Plane 0 of Energy Array on rank 0:\n");
printf(" MaxAbsDiff = %12.6e\n", MaxAbsDiff );
printf(" TotalAbsDiff = %12.6e\n", TotalAbsDiff );
printf(" MaxRelDiff = %12.6e\n\n", MaxRelDiff );
// Timing information
printf("\nElapsed time = %10.2f (s)\n", elapsed_time);
printf("Grind time (us/z/c) = %10.8g (per dom) (%10.8g overall)\n", grindTime1, grindTime2);
printf("FOM = %10.8g (z/s)\n\n", 1000.0/grindTime2); // zones per second
bool write_solution_flag=true;
if (write_solution_flag) {
write_solution(&locDom);
}
return ;
}
int main(int argc, char *argv[])
{
if (argc < 3) {
printUsage(argv);
exit( LFileError );
}
if ( strcmp(argv[1],"-u") != 0 && strcmp(argv[1],"-s") != 0 )
{
printUsage(argv);
exit( LFileError ) ;
}
int num_iters = -1;
if (argc == 5) {
num_iters = atoi(argv[4]);
}
//elenago global
totIters = num_iters;
currIter = 0;
bool structured = ( strcmp(argv[1],"-s") == 0 );
Int_t numRanks ;
Int_t myRank ;
#if USE_MPI
Domain_member fieldData ;
MPI_Init(&argc, &argv) ;
MPI_Comm_size(MPI_COMM_WORLD, &numRanks) ;
MPI_Comm_rank(MPI_COMM_WORLD, &myRank) ;
globalRank = myRank;
#else
numRanks = 1;
myRank = 0;
#endif
cuda_init(myRank);
printf("MyRank: %d, gpuID: %d\n", myRank, gpuID);
//ASYNC
if(comm_use_comm())
{
printf("Calling comm_init\n");
comm_init(MPI_COMM_WORLD, gpuID);
}
/* assume cube subdomain geometry for now */
Index_t nx = atoi(argv[2]);
Domain *locDom ;
// Set up the mesh and decompose. Assumes regular cubes for now
Int_t col, row, plane, side;
InitMeshDecomp(numRanks, myRank, &col, &row, &plane, &side);
// TODO: change default nr to 11
Int_t nr = 11;
Int_t balance = 1;
Int_t cost = 1;
// TODO: modify this constructor to account for new fields
// TODO: setup communication buffers
locDom = NewDomain(argv, numRanks, col, row, plane, nx, side, structured, nr, balance, cost);
#if USE_MPI
if(comm_use_comm())
{
if(myRank == 0)
printf("comm_use_comm\n");
if(comm_use_async())
{
if(myRank == 0)
printf("comm_use_async\n");
}
}
// copy to the host for mpi transfer
locDom->h_nodalMass = locDom->nodalMass;
fieldData = &Domain::get_nodalMass;
printf("Primo CommRecv\n");
// Initial domain boundary communication
CommRecv(*locDom, MSG_COMM_SBN, 1,
locDom->sizeX + 1, locDom->sizeY + 1, locDom->sizeZ + 1,
true, false, 0) ;
printf("Primo CommSend\n");
CommSend(*locDom, MSG_COMM_SBN, 1, &fieldData,
locDom->sizeX + 1, locDom->sizeY + 1, locDom->sizeZ + 1,
true, false, 0) ;
printf("Primo CommSBN\n");
CommSBN(*locDom, 1, &fieldData, 0) ;
// copy back to the device
locDom->nodalMass = locDom->h_nodalMass;
// End initialization
MPI_Barrier(MPI_COMM_WORLD);
#endif
hipDeviceSetCacheConfig(hipFuncCachePreferL1);
/* timestep to solution */
int its=0;
if (myRank == 0) {
if (structured)
printf("Running until t=%f, Problem size=%dx%dx%d\n",locDom->stoptime,nx,nx,nx);
else
printf("Running until t=%f, Problem size=%d \n",locDom->stoptime,locDom->numElem);
}
hipDeviceSynchronize();
elenagoAllocateElems(locDom);
MPI_Barrier(MPI_COMM_WORLD);
PUSH_RANGE("WARMUP", 1);
for(int iterWarm=0; iterWarm < 10; iterWarm++)
{
#if USE_MPI
CommRecv(*locDom, MSG_COMM_SBN, 3,
locDom->sizeX + 1, locDom->sizeY + 1, locDom->sizeZ + 1,
true, false, 0) ;
#ifdef SEDOV_SYNC_POS_VEL_EARLY
CommRecv(*locDom, MSG_SYNC_POS_VEL, 6,
locDom->sizeX + 1, locDom->sizeY + 1, locDom->sizeZ + 1,
false, false, 1) ;
#endif
CommRecv(*locDom, MSG_MONOQ, 3,
locDom->sizeX, locDom->sizeY, locDom->sizeZ,
true, true, 2) ;
//MPI_Barrier(MPI_COMM_WORLD);
#endif
LagrangeLeapFrogPt1(locDom) ;
// this has been moved after computation of volume forces to hide launch latencies
TimeIncrement(locDom) ;
LagrangeLeapFrogPt2(locDom) ;
// make sure GPU finished its work
// hipDeviceSynchronize();
comm_progress();
//checkErrors(locDom,its,myRank);
currIter++;
}
hipDeviceSynchronize();
POP_RANGE;
MPI_Barrier(MPI_COMM_WORLD);
currIter=0;
elenagoDellocateElems(locDom);
locDom = ResetDomain(locDom, argv, numRanks, col, row, plane, nx, side, structured, nr, balance, cost);
#if USE_MPI
// copy to the host for mpi transfer
locDom->h_nodalMass = locDom->nodalMass;
fieldData = &Domain::get_nodalMass;
// Initial domain boundary communication
CommRecv(*locDom, MSG_COMM_SBN, 1,
locDom->sizeX + 1, locDom->sizeY + 1, locDom->sizeZ + 1,
true, false, 0) ;
CommSend(*locDom, MSG_COMM_SBN, 1, &fieldData,
locDom->sizeX + 1, locDom->sizeY + 1, locDom->sizeZ + 1,
true, false, 0) ;
CommSBN(*locDom, 1, &fieldData, 0) ;
// copy back to the device
locDom->nodalMass = locDom->h_nodalMass;
// End initialization
MPI_Barrier(MPI_COMM_WORLD);
#endif
hipProfilerStart();
//hipProfilerStop();
hipDeviceSynchronize();
elenagoAllocateElems(locDom);
MPI_Barrier(MPI_COMM_WORLD);
#if USE_MPI
double start = MPI_Wtime();
#else
timeval start;
gettimeofday(&start, NULL) ;
#endif
while(locDom->time_h < locDom->stoptime)
{
/*
PUSH_RANGE("TIME", 7);
// this has been moved after computation of volume forces to hide launch latencies
TimeIncrementAsync(locDom) ;
POP_RANGE;
*/
#if USE_MPI
CommRecv(*locDom, MSG_COMM_SBN, 3,
locDom->sizeX + 1, locDom->sizeY + 1, locDom->sizeZ + 1,
true, false, 0) ;
#ifdef SEDOV_SYNC_POS_VEL_EARLY
CommRecv(*locDom, MSG_SYNC_POS_VEL, 6,
locDom->sizeX + 1, locDom->sizeY + 1, locDom->sizeZ + 1,
false, false, 1) ;
#endif
CommRecv(*locDom, MSG_MONOQ, 3,
locDom->sizeX, locDom->sizeY, locDom->sizeZ,
true, true, 2) ;
//MPI_Barrier(MPI_COMM_WORLD);
#endif
PUSH_RANGE("PT1", 6);
LagrangeLeapFrogPt1(locDom) ;
POP_RANGE;
PUSH_RANGE("TIME", 7);
TimeIncrement(locDom) ;
POP_RANGE;
PUSH_RANGE("PT2", 1);
LagrangeLeapFrogPt2(locDom) ;
POP_RANGE;
// make sure GPU finished its work
// hipDeviceSynchronize();
PUSH_RANGE("PROGRESS", 2);
if((its%4) == 1)
comm_progress();
POP_RANGE;
//checkErrors(locDom,its,myRank);
#if 0
#if LULESH_SHOW_PROGRESS
hipDeviceSynchronize();
if (myRank == 0)
printf("cycle = %d, time = %e, dt=%e\n", its+1, double(locDom->time_h), double(locDom->deltatime_h_async[0]) ) ;
#endif
#endif
its++;
if (its == num_iters) break;
currIter++;
}
hipDeviceSynchronize();
// Use reduced max elapsed time
double elapsed_time;
#if USE_MPI
elapsed_time = MPI_Wtime() - start;
#else
timeval end;
gettimeofday(&end, NULL) ;
elapsed_time = (double)(end.tv_sec - start.tv_sec) + ((double)(end.tv_usec - start.tv_usec))/1000000 ;
#endif
double elapsed_timeG;
#if USE_MPI
MPI_Reduce(&elapsed_time, &elapsed_timeG, 1, MPI_DOUBLE,
MPI_MAX, 0, MPI_COMM_WORLD);
#else
elapsed_timeG = elapsed_time;
#endif
// hipProfilerStop();
elenagoDellocateElems(locDom);
if (myRank == 0)
VerifyAndWriteFinalOutput(elapsed_timeG, *locDom, its, nx, numRanks);
#ifdef SAMI
DumpDomain(locDom) ;
#endif
hipDeviceReset();
#if USE_MPI
//ASYNC
if(comm_use_comm())
comm_finalize();
MPI_Finalize() ;
#endif
return 0 ;
}
| c8b64aa71c98b6e17ca4c9d4623d26d1731a7616.cu | /*
Copyright (c) 2010.
Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory.
LLNL-CODE-461231
All rights reserved.
This file is part of LULESH, Version 1.0.
Please also read this link -- http://www.opensource.org/licenses/index.php
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC,
THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Additional BSD Notice
1. This notice is required to be provided under our contract with the U.S.
Department of Energy (DOE). This work was produced at Lawrence Livermore
National Laboratory under Contract No. DE-AC52-07NA27344 with the DOE.
2. Neither the United States Government nor Lawrence Livermore National
Security, LLC nor any of their employees, makes any warranty, express
or implied, or assumes any liability or responsibility for the accuracy,
completeness, or usefulness of any information, apparatus, product, or
process disclosed, or represents that its use would not infringe
privately-owned rights.
3. Also, reference herein to any specific commercial products, process, or
services by trade name, trademark, manufacturer or otherwise does not
necessarily constitute or imply its endorsement, recommendation, or
favoring by the United States Government or Lawrence Livermore National
Security, LLC. The views and opinions of authors expressed herein do not
necessarily state or reflect those of the United States Government or
Lawrence Livermore National Security, LLC, and shall not be used for
advertising or product endorsement purposes.
*/
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <iostream>
#include <iomanip>
#include <sstream>
#include <util.h>
#include <sm_utils.inl>
#include <cuda.h>
#include <allocator.h>
#include "cuda_profiler_api.h"
#ifdef USE_MPI
#include <mpi.h>
#include "comm.h"
#endif
#include <sys/time.h>
#include <unistd.h>
#include "lulesh.h"
static int globalRank=0;
static int totIters=0;
static int currIter=0;
static int gpuID=0;
/****************************************************/
/* Allow flexibility for arithmetic representations */
/****************************************************/
__device__ inline real4 SQRT(real4 arg) { return sqrtf(arg) ; }
__device__ inline real8 SQRT(real8 arg) { return sqrt(arg) ; }
__device__ inline real4 CBRT(real4 arg) { return cbrtf(arg) ; }
__device__ inline real8 CBRT(real8 arg) { return cbrt(arg) ; }
__device__ __host__ inline real4 FABS(real4 arg) { return fabsf(arg) ; }
__device__ __host__ inline real8 FABS(real8 arg) { return fabs(arg) ; }
__device__ inline real4 FMAX(real4 arg1,real4 arg2) { return fmaxf(arg1,arg2) ; }
__device__ inline real8 FMAX(real8 arg1,real8 arg2) { return fmax(arg1,arg2) ; }
#define MAX(a, b) ( ((a) > (b)) ? (a) : (b))
/* Stuff needed for boundary conditions */
/* 2 BCs on each of 6 hexahedral faces (12 bits) */
#define XI_M 0x00007
#define XI_M_SYMM 0x00001
#define XI_M_FREE 0x00002
#define XI_M_COMM 0x00004
#define XI_P 0x00038
#define XI_P_SYMM 0x00008
#define XI_P_FREE 0x00010
#define XI_P_COMM 0x00020
#define ETA_M 0x001c0
#define ETA_M_SYMM 0x00040
#define ETA_M_FREE 0x00080
#define ETA_M_COMM 0x00100
#define ETA_P 0x00e00
#define ETA_P_SYMM 0x00200
#define ETA_P_FREE 0x00400
#define ETA_P_COMM 0x00800
#define ZETA_M 0x07000
#define ZETA_M_SYMM 0x01000
#define ZETA_M_FREE 0x02000
#define ZETA_M_COMM 0x04000
#define ZETA_P 0x38000
#define ZETA_P_SYMM 0x08000
#define ZETA_P_FREE 0x10000
#define ZETA_P_COMM 0x20000
#define VOLUDER(a0,a1,a2,a3,a4,a5,b0,b1,b2,b3,b4,b5,dvdc) \
{ \
const Real_t twelfth = Real_t(1.0) / Real_t(12.0) ; \
\
dvdc= \
((a1) + (a2)) * ((b0) + (b1)) - ((a0) + (a1)) * ((b1) + (b2)) + \
((a0) + (a4)) * ((b3) + (b4)) - ((a3) + (a4)) * ((b0) + (b4)) - \
((a2) + (a5)) * ((b3) + (b5)) + ((a3) + (a5)) * ((b2) + (b5)); \
dvdc *= twelfth; \
}
/*
__device__
static
__forceinline__
void SumOverNodes(Real_t& val, volatile Real_t* smem, int cta_elem, int node) {
int tid = (cta_elem << 3) + node;
smem[tid] = val;
if (node < 4)
{
smem[tid] += smem[tid+4];
smem[tid] += smem[tid+2];
smem[tid] += smem[tid+1];
}
val = smem[(cta_elem << 3)];
}
*/
__device__
static
__forceinline__
void SumOverNodesShfl(Real_t& val) {
val += utils::shfl_xor( val, 4, 8);
val += utils::shfl_xor( val, 2, 8);
val += utils::shfl_xor( val, 1, 8);
}
__host__ __device__
static
__forceinline__
Real_t CalcElemVolume( const Real_t x0, const Real_t x1,
const Real_t x2, const Real_t x3,
const Real_t x4, const Real_t x5,
const Real_t x6, const Real_t x7,
const Real_t y0, const Real_t y1,
const Real_t y2, const Real_t y3,
const Real_t y4, const Real_t y5,
const Real_t y6, const Real_t y7,
const Real_t z0, const Real_t z1,
const Real_t z2, const Real_t z3,
const Real_t z4, const Real_t z5,
const Real_t z6, const Real_t z7 )
{
Real_t twelveth = Real_t(1.0)/Real_t(12.0);
Real_t dx61 = x6 - x1;
Real_t dy61 = y6 - y1;
Real_t dz61 = z6 - z1;
Real_t dx70 = x7 - x0;
Real_t dy70 = y7 - y0;
Real_t dz70 = z7 - z0;
Real_t dx63 = x6 - x3;
Real_t dy63 = y6 - y3;
Real_t dz63 = z6 - z3;
Real_t dx20 = x2 - x0;
Real_t dy20 = y2 - y0;
Real_t dz20 = z2 - z0;
Real_t dx50 = x5 - x0;
Real_t dy50 = y5 - y0;
Real_t dz50 = z5 - z0;
Real_t dx64 = x6 - x4;
Real_t dy64 = y6 - y4;
Real_t dz64 = z6 - z4;
Real_t dx31 = x3 - x1;
Real_t dy31 = y3 - y1;
Real_t dz31 = z3 - z1;
Real_t dx72 = x7 - x2;
Real_t dy72 = y7 - y2;
Real_t dz72 = z7 - z2;
Real_t dx43 = x4 - x3;
Real_t dy43 = y4 - y3;
Real_t dz43 = z4 - z3;
Real_t dx57 = x5 - x7;
Real_t dy57 = y5 - y7;
Real_t dz57 = z5 - z7;
Real_t dx14 = x1 - x4;
Real_t dy14 = y1 - y4;
Real_t dz14 = z1 - z4;
Real_t dx25 = x2 - x5;
Real_t dy25 = y2 - y5;
Real_t dz25 = z2 - z5;
#define TRIPLE_PRODUCT(x1, y1, z1, x2, y2, z2, x3, y3, z3) \
((x1)*((y2)*(z3) - (z2)*(y3)) + (x2)*((z1)*(y3) - (y1)*(z3)) + (x3)*((y1)*(z2) - (z1)*(y2)))
// 11 + 3*14
Real_t volume =
TRIPLE_PRODUCT(dx31 + dx72, dx63, dx20,
dy31 + dy72, dy63, dy20,
dz31 + dz72, dz63, dz20) +
TRIPLE_PRODUCT(dx43 + dx57, dx64, dx70,
dy43 + dy57, dy64, dy70,
dz43 + dz57, dz64, dz70) +
TRIPLE_PRODUCT(dx14 + dx25, dx61, dx50,
dy14 + dy25, dy61, dy50,
dz14 + dz25, dz61, dz50);
#undef TRIPLE_PRODUCT
volume *= twelveth;
return volume ;
}
__host__ __device__
static
__forceinline__
Real_t CalcElemVolume( const Real_t x[8], const Real_t y[8], const Real_t z[8] )
{
return CalcElemVolume( x[0], x[1], x[2], x[3], x[4], x[5], x[6], x[7],
y[0], y[1], y[2], y[3], y[4], y[5], y[6], y[7],
z[0], z[1], z[2], z[3], z[4], z[5], z[6], z[7]);
}
void cuda_init(int rank)
{
Int_t deviceCount, dev;
cudaDeviceProp cuda_deviceProp;
cudaSafeCall( cudaGetDeviceCount(&deviceCount) );
if (deviceCount == 0) {
fprintf(stderr, "cuda_init(): no devices supporting CUDA.\n");
exit(1);
}
//dev = rank % deviceCount;
dev = comm_select_device(rank);
if ((dev < 0) || (dev > deviceCount-1)) {
fprintf(stderr, "cuda_init(): requested device (%d) out of range [%d,%d]\n",
dev, 0, deviceCount-1);
exit(1);
}
cudaSafeCall( cudaSetDevice(dev) );
struct cudaDeviceProp props;
cudaGetDeviceProperties(&props, dev);
char hostname[256];
gethostname(hostname, sizeof(hostname));
printf("Host %s using GPU %i: %s\n", hostname, dev, props.name);
cudaSafeCall( cudaGetDeviceProperties(&cuda_deviceProp, dev) );
if (cuda_deviceProp.major < 3) {
fprintf(stderr, "cuda_init(): This implementation of Lulesh requires device SM 3.0+.\n", dev);
exit(1);
}
#if CUDART_VERSION < 5000
fprintf(stderr,"cuda_init(): This implementation of Lulesh uses texture objects, which is requires Cuda 5.0+.\n");
exit(1);
#endif
gpuID = dev;
}
void AllocateNodalPersistent(Domain* domain, size_t domNodes)
{
domain->x.resize(domNodes) ; /* coordinates */
domain->y.resize(domNodes) ;
domain->z.resize(domNodes) ;
domain->xd.resize(domNodes) ; /* velocities */
domain->yd.resize(domNodes) ;
domain->zd.resize(domNodes) ;
domain->xdd.resize(domNodes) ; /* accelerations */
domain->ydd.resize(domNodes) ;
domain->zdd.resize(domNodes) ;
domain->fx.resize(domNodes) ; /* forces */
domain->fy.resize(domNodes) ;
domain->fz.resize(domNodes) ;
domain->nodalMass.resize(domNodes) ; /* mass */
}
void AllocateElemPersistent(Domain* domain, size_t domElems, size_t padded_domElems)
{
domain->matElemlist.resize(domElems) ; /* material indexset */
domain->nodelist.resize(8*padded_domElems) ; /* elemToNode connectivity */
domain->lxim.resize(domElems) ; /* elem connectivity through face */
domain->lxip.resize(domElems) ;
domain->letam.resize(domElems) ;
domain->letap.resize(domElems) ;
domain->lzetam.resize(domElems) ;
domain->lzetap.resize(domElems) ;
domain->elemBC.resize(domElems) ; /* elem face symm/free-surf flag */
domain->e.resize(domElems) ; /* energy */
domain->p.resize(domElems) ; /* pressure */
domain->q.resize(domElems) ; /* q */
domain->ql.resize(domElems) ; /* linear term for q */
domain->qq.resize(domElems) ; /* quadratic term for q */
domain->v.resize(domElems) ; /* relative volume */
domain->volo.resize(domElems) ; /* reference volume */
domain->delv.resize(domElems) ; /* m_vnew - m_v */
domain->vdov.resize(domElems) ; /* volume derivative over volume */
domain->arealg.resize(domElems) ; /* elem characteristic length */
domain->ss.resize(domElems) ; /* "sound speed" */
domain->elemMass.resize(domElems) ; /* mass */
}
void AllocateSymmX(Domain* domain, size_t size)
{
domain->symmX.resize(size) ;
}
void AllocateSymmY(Domain* domain, size_t size)
{
domain->symmY.resize(size) ;
}
void AllocateSymmZ(Domain* domain, size_t size)
{
domain->symmZ.resize(size) ;
}
void InitializeFields(Domain* domain)
{
/* Basic Field Initialization */
thrust::fill(domain->ss.begin(),domain->ss.end(),0.);
thrust::fill(domain->e.begin(),domain->e.end(),0.);
thrust::fill(domain->p.begin(),domain->p.end(),0.);
thrust::fill(domain->q.begin(),domain->q.end(),0.);
thrust::fill(domain->v.begin(),domain->v.end(),1.);
thrust::fill(domain->xd.begin(),domain->xd.end(),0.);
thrust::fill(domain->yd.begin(),domain->yd.end(),0.);
thrust::fill(domain->zd.begin(),domain->zd.end(),0.);
thrust::fill(domain->xdd.begin(),domain->xdd.end(),0.);
thrust::fill(domain->ydd.begin(),domain->ydd.end(),0.);
thrust::fill(domain->zdd.begin(),domain->zdd.end(),0.);
thrust::fill(domain->nodalMass.begin(),domain->nodalMass.end(),0.);
}
////////////////////////////////////////////////////////////////////////////////
void
Domain::SetupCommBuffers(Int_t edgeNodes)
{
// allocate a buffer large enough for nodal ghost data
maxEdgeSize = MAX(this->sizeX, MAX(this->sizeY, this->sizeZ))+1 ;
maxPlaneSize = CACHE_ALIGN_REAL(maxEdgeSize*maxEdgeSize) ;
maxEdgeSize = CACHE_ALIGN_REAL(maxEdgeSize) ;
// assume communication to 6 neighbors by default
m_rowMin = (m_rowLoc == 0) ? 0 : 1;
m_rowMax = (m_rowLoc == m_tp-1) ? 0 : 1;
m_colMin = (m_colLoc == 0) ? 0 : 1;
m_colMax = (m_colLoc == m_tp-1) ? 0 : 1;
m_planeMin = (m_planeLoc == 0) ? 0 : 1;
m_planeMax = (m_planeLoc == m_tp-1) ? 0 : 1;
#if USE_MPI
// account for face communication
Index_t comBufSize =
(m_rowMin + m_rowMax + m_colMin + m_colMax + m_planeMin + m_planeMax) *
maxPlaneSize * MAX_FIELDS_PER_MPI_COMM ;
// account for edge communication
comBufSize +=
((m_rowMin & m_colMin) + (m_rowMin & m_planeMin) + (m_colMin & m_planeMin) +
(m_rowMax & m_colMax) + (m_rowMax & m_planeMax) + (m_colMax & m_planeMax) +
(m_rowMax & m_colMin) + (m_rowMin & m_planeMax) + (m_colMin & m_planeMax) +
(m_rowMin & m_colMax) + (m_rowMax & m_planeMin) + (m_colMax & m_planeMin)) *
maxPlaneSize * MAX_FIELDS_PER_MPI_COMM ;
// account for corner communication
// factor of 16 is so each buffer has its own cache line
comBufSize += ((m_rowMin & m_colMin & m_planeMin) +
(m_rowMin & m_colMin & m_planeMax) +
(m_rowMin & m_colMax & m_planeMin) +
(m_rowMin & m_colMax & m_planeMax) +
(m_rowMax & m_colMin & m_planeMin) +
(m_rowMax & m_colMin & m_planeMax) +
(m_rowMax & m_colMax & m_planeMin) +
(m_rowMax & m_colMax & m_planeMax)) * CACHE_COHERENCE_PAD_REAL ;
if(comm_use_comm())
{
int myRank=0, numPeers=0;
MPI_Comm_rank(MPI_COMM_WORLD, &myRank);
MPI_Comm_size(MPI_COMM_WORLD, &numPeers);
if(myRank == 0)
printf("\n***comBufSize: %d totSize: %d***\n", comBufSize, comBufSize*sizeof(Real_t));
comm_regions_setup(26*3, SEND_REGION);
comm_regions_setup(26*3, SEND_STREAM_REGION);
comm_regions_setup(26*3, RECV_REGION);
for(int typeBuf=0; typeBuf < 3; typeBuf++)
{
for(int ind=0; ind<26; ind++)
{
cudaMallocHost((void **)&(this->commDataSend_multi[ind+(typeBuf*26)]), comBufSize*sizeof(Real_t));
comm_register_index(this->commDataSend_multi[ind+(typeBuf*26)], comBufSize*sizeof(Real_t), SEND_REGION, ind+(typeBuf*26));
memset(this->commDataSend_multi[ind+(typeBuf*26)], 0, comBufSize*sizeof(Real_t)) ;
cudaMallocHost((void **)&(this->commDataSendStream_multi[ind+(typeBuf*26)]), comBufSize*sizeof(Real_t));
comm_register_index(this->commDataSendStream_multi[ind+(typeBuf*26)], comBufSize*sizeof(Real_t), SEND_STREAM_REGION, ind+(typeBuf*26));
memset(this->commDataSendStream_multi[ind+(typeBuf*26)], 0, comBufSize*sizeof(Real_t)) ;
cudaMallocHost((void **)&(this->commDataRecv_multi[ind+(typeBuf*26)]), comBufSize*sizeof(Real_t));
comm_register_index(this->commDataRecv_multi[ind+(typeBuf*26)], comBufSize*sizeof(Real_t),RECV_REGION, ind+(typeBuf*26));
memset(this->commDataRecv_multi[ind+(typeBuf*26)], 0, comBufSize*sizeof(Real_t)) ;
}
}
comm_regions_setup(0, TIMER_RECV_REGION);
comm_regions_setup(0, TIMER_SEND_REGION);
cudaMallocHost((void **)&(this->timerRecvBuf), (numPeers+1)*sizeof(Real_t));
cudaMallocHost((void **)&(this->timerSendBuf), (numPeers+1)*sizeof(Real_t));
comm_register_index(this->timerRecvBuf, (numPeers+1)*sizeof(Real_t), TIMER_RECV_REGION, 0);
comm_register_index(this->timerSendBuf, (numPeers+1)*sizeof(Real_t), TIMER_SEND_REGION, 0);
}
else
{
for(int typeBuf=0; typeBuf < 3; typeBuf++)
{
for(int ind=0; ind<26; ind++)
{
cudaMallocHost((void **)&(this->commDataSend_multi[ind+(typeBuf*26)]), comBufSize*sizeof(Real_t));
memset(this->commDataSend_multi[ind+(typeBuf*26)], 0, comBufSize*sizeof(Real_t)) ;
cudaMallocHost((void **)&(this->commDataSendStream_multi[ind+(typeBuf*26)]), comBufSize*sizeof(Real_t));
memset(this->commDataSendStream_multi[ind+(typeBuf*26)], 0, comBufSize*sizeof(Real_t)) ;
cudaMallocHost((void **)&(this->commDataRecv_multi[ind+(typeBuf*26)]), comBufSize*sizeof(Real_t));
memset(this->commDataRecv_multi[ind+(typeBuf*26)], 0, comBufSize*sizeof(Real_t)) ;
}
}
#if 0
this->commDataSend = new Real_t[comBufSize] ;
this->commDataRecv = new Real_t[comBufSize] ;
// pin buffers
cudaHostRegister(this->commDataSend, comBufSize*sizeof(Real_t), 0);
cudaHostRegister(this->commDataRecv, comBufSize*sizeof(Real_t), 0);
// prevent floating point exceptions
memset(this->commDataSend, 0, comBufSize*sizeof(Real_t)) ;
memset(this->commDataRecv, 0, comBufSize*sizeof(Real_t)) ;
#endif
}
// allocate shadow GPU buffers
cudaMalloc(&this->d_commDataSend, comBufSize*sizeof(Real_t));
cudaMalloc(&this->d_commDataRecv, comBufSize*sizeof(Real_t));
// prevent floating point exceptions
cudaMemset(this->d_commDataSend, 0, comBufSize*sizeof(Real_t));
cudaMemset(this->d_commDataRecv, 0, comBufSize*sizeof(Real_t));
#endif
}
void SetupConnectivityBC(Domain *domain, int edgeElems)
{
int domElems = domain->numElem;
Vector_h<Index_t> lxim_h(domElems);
Vector_h<Index_t> lxip_h(domElems);
Vector_h<Index_t> letam_h(domElems);
Vector_h<Index_t> letap_h(domElems);
Vector_h<Index_t> lzetam_h(domElems);
Vector_h<Index_t> lzetap_h(domElems);
/* set up elemement connectivity information */
lxim_h[0] = 0 ;
for (Index_t i=1; i<domElems; ++i) {
lxim_h[i] = i-1 ;
lxip_h[i-1] = i ;
}
lxip_h[domElems-1] = domElems-1 ;
for (Index_t i=0; i<edgeElems; ++i) {
letam_h[i] = i ;
letap_h[domElems-edgeElems+i] = domElems-edgeElems+i ;
}
for (Index_t i=edgeElems; i<domElems; ++i) {
letam_h[i] = i-edgeElems ;
letap_h[i-edgeElems] = i ;
}
for (Index_t i=0; i<edgeElems*edgeElems; ++i) {
lzetam_h[i] = i ;
lzetap_h[domElems-edgeElems*edgeElems+i] = domElems-edgeElems*edgeElems+i ;
}
for (Index_t i=edgeElems*edgeElems; i<domElems; ++i) {
lzetam_h[i] = i - edgeElems*edgeElems ;
lzetap_h[i-edgeElems*edgeElems] = i ;
}
/* set up boundary condition information */
Vector_h<Index_t> elemBC_h(domElems);
for (Index_t i=0; i<domElems; ++i) {
elemBC_h[i] = 0 ; /* clear BCs by default */
}
Index_t ghostIdx[6] ; // offsets to ghost locations
for (Index_t i=0; i<6; ++i) {
ghostIdx[i] = INT_MIN ;
}
Int_t pidx = domElems ;
if (domain->m_planeMin != 0) {
ghostIdx[0] = pidx ;
pidx += domain->sizeX*domain->sizeY ;
}
if (domain->m_planeMax != 0) {
ghostIdx[1] = pidx ;
pidx += domain->sizeX*domain->sizeY ;
}
if (domain->m_rowMin != 0) {
ghostIdx[2] = pidx ;
pidx += domain->sizeX*domain->sizeZ ;
}
if (domain->m_rowMax != 0) {
ghostIdx[3] = pidx ;
pidx += domain->sizeX*domain->sizeZ ;
}
if (domain->m_colMin != 0) {
ghostIdx[4] = pidx ;
pidx += domain->sizeY*domain->sizeZ ;
}
if (domain->m_colMax != 0) {
ghostIdx[5] = pidx ;
}
/* symmetry plane or free surface BCs */
for (Index_t i=0; i<edgeElems; ++i) {
Index_t planeInc = i*edgeElems*edgeElems ;
Index_t rowInc = i*edgeElems ;
for (Index_t j=0; j<edgeElems; ++j) {
if (domain->m_planeLoc == 0) {
elemBC_h[rowInc+j] |= ZETA_M_SYMM ;
}
else {
elemBC_h[rowInc+j] |= ZETA_M_COMM ;
lzetam_h[rowInc+j] = ghostIdx[0] + rowInc + j ;
}
if (domain->m_planeLoc == domain->m_tp-1) {
elemBC_h[rowInc+j+domElems-edgeElems*edgeElems] |=
ZETA_P_FREE;
}
else {
elemBC_h[rowInc+j+domElems-edgeElems*edgeElems] |=
ZETA_P_COMM ;
lzetap_h[rowInc+j+domElems-edgeElems*edgeElems] =
ghostIdx[1] + rowInc + j ;
}
if (domain->m_rowLoc == 0) {
elemBC_h[planeInc+j] |= ETA_M_SYMM ;
}
else {
elemBC_h[planeInc+j] |= ETA_M_COMM ;
letam_h[planeInc+j] = ghostIdx[2] + rowInc + j ;
}
if (domain->m_rowLoc == domain->m_tp-1) {
elemBC_h[planeInc+j+edgeElems*edgeElems-edgeElems] |=
ETA_P_FREE ;
}
else {
elemBC_h[planeInc+j+edgeElems*edgeElems-edgeElems] |=
ETA_P_COMM ;
letap_h[planeInc+j+edgeElems*edgeElems-edgeElems] =
ghostIdx[3] + rowInc + j ;
}
if (domain->m_colLoc == 0) {
elemBC_h[planeInc+j*edgeElems] |= XI_M_SYMM ;
}
else {
elemBC_h[planeInc+j*edgeElems] |= XI_M_COMM ;
lxim_h[planeInc+j*edgeElems] = ghostIdx[4] + rowInc + j ;
}
if (domain->m_colLoc == domain->m_tp-1) {
elemBC_h[planeInc+j*edgeElems+edgeElems-1] |= XI_P_FREE ;
}
else {
elemBC_h[planeInc+j*edgeElems+edgeElems-1] |= XI_P_COMM ;
lxip_h[planeInc+j*edgeElems+edgeElems-1] =
ghostIdx[5] + rowInc + j ;
}
}
}
domain->elemBC = elemBC_h;
domain->lxim = lxim_h;
domain->lxip = lxip_h;
domain->letam = letam_h;
domain->letap = letap_h;
domain->lzetam = lzetam_h;
domain->lzetap = lzetap_h;
}
void Domain::BuildMesh(Int_t nx, Int_t edgeNodes, Int_t edgeElems, Int_t domNodes, Int_t padded_domElems, Vector_h<Real_t> &x_h, Vector_h<Real_t> &y_h, Vector_h<Real_t> &z_h, Vector_h<Int_t> &nodelist_h)
{
Index_t meshEdgeElems = m_tp*nx ;
x_h.resize(domNodes);
y_h.resize(domNodes);
z_h.resize(domNodes);
// initialize nodal coordinates
Index_t nidx = 0 ;
Real_t tz = Real_t(1.125)*Real_t(m_planeLoc*nx)/Real_t(meshEdgeElems) ;
for (Index_t plane=0; plane<edgeNodes; ++plane) {
Real_t ty = Real_t(1.125)*Real_t(m_rowLoc*nx)/Real_t(meshEdgeElems) ;
for (Index_t row=0; row<edgeNodes; ++row) {
Real_t tx = Real_t(1.125)*Real_t(m_colLoc*nx)/Real_t(meshEdgeElems) ;
for (Index_t col=0; col<edgeNodes; ++col) {
x_h[nidx] = tx ;
y_h[nidx] = ty ;
z_h[nidx] = tz ;
++nidx ;
// tx += ds ; // may accumulate roundoff...
tx = Real_t(1.125)*Real_t(m_colLoc*nx+col+1)/Real_t(meshEdgeElems) ;
}
// ty += ds ; // may accumulate roundoff...
ty = Real_t(1.125)*Real_t(m_rowLoc*nx+row+1)/Real_t(meshEdgeElems) ;
}
// tz += ds ; // may accumulate roundoff...
tz = Real_t(1.125)*Real_t(m_planeLoc*nx+plane+1)/Real_t(meshEdgeElems) ;
}
x = x_h;
y = y_h;
z = z_h;
nodelist_h.resize(padded_domElems*8);
// embed hexehedral elements in nodal point lattice
Index_t zidx = 0 ;
nidx = 0 ;
for (Index_t plane=0; plane<edgeElems; ++plane) {
for (Index_t row=0; row<edgeElems; ++row) {
for (Index_t col=0; col<edgeElems; ++col) {
nodelist_h[0*padded_domElems+zidx] = nidx ;
nodelist_h[1*padded_domElems+zidx] = nidx + 1 ;
nodelist_h[2*padded_domElems+zidx] = nidx + edgeNodes + 1 ;
nodelist_h[3*padded_domElems+zidx] = nidx + edgeNodes ;
nodelist_h[4*padded_domElems+zidx] = nidx + edgeNodes*edgeNodes ;
nodelist_h[5*padded_domElems+zidx] = nidx + edgeNodes*edgeNodes + 1 ;
nodelist_h[6*padded_domElems+zidx] = nidx + edgeNodes*edgeNodes + edgeNodes + 1 ;
nodelist_h[7*padded_domElems+zidx] = nidx + edgeNodes*edgeNodes + edgeNodes ;
++zidx ;
++nidx ;
}
++nidx ;
}
nidx += edgeNodes ;
}
nodelist = nodelist_h;
}
void elenagoAllocateElems(Domain *locDom)
{
int allElem = locDom->numElem + /* local elem */
2*locDom->sizeX*locDom->sizeY + /* plane ghosts */
2*locDom->sizeX*locDom->sizeZ + /* row ghosts */
2*locDom->sizeY*locDom->sizeZ ; /* col ghosts */
locDom->vnew = Allocator< Vector_d<Real_t> >::allocate(locDom->numElem);
locDom->dxx = Allocator< Vector_d<Real_t> >::allocate(locDom->numElem);
locDom->dyy = Allocator< Vector_d<Real_t> >::allocate(locDom->numElem);
locDom->dzz = Allocator< Vector_d<Real_t> >::allocate(locDom->numElem);
locDom->delx_xi = Allocator< Vector_d<Real_t> >::allocate(locDom->numElem);
locDom->delx_eta = Allocator< Vector_d<Real_t> >::allocate(locDom->numElem);
locDom->delx_zeta = Allocator< Vector_d<Real_t> >::allocate(locDom->numElem);
locDom->delv_xi = Allocator< Vector_d<Real_t> >::allocate(allElem);
locDom->delv_eta = Allocator< Vector_d<Real_t> >::allocate(allElem);
locDom->delv_zeta = Allocator< Vector_d<Real_t> >::allocate(allElem);
locDom->d_fx = locDom->fx.raw();
locDom->d_fy = locDom->fy.raw();
locDom->d_fz = locDom->fz.raw();
locDom->d_x = locDom->x.raw();
locDom->d_y = locDom->y.raw();
locDom->d_z = locDom->z.raw();
locDom->d_xd = locDom->xd.raw();
locDom->d_yd = locDom->yd.raw();
locDom->d_zd = locDom->zd.raw();
locDom->d_delv_xi = locDom->delv_xi->raw();
locDom->d_delv_eta = locDom->delv_eta->raw();
locDom->d_delv_zeta = locDom->delv_zeta->raw();
#ifdef DOUBLE_PRECISION
locDom->fx_elem = Allocator< Vector_d<Real_t> >::allocate(locDom->padded_numElem*8);
locDom->fy_elem = Allocator< Vector_d<Real_t> >::allocate(locDom->padded_numElem*8);
locDom->fz_elem = Allocator< Vector_d<Real_t> >::allocate(locDom->padded_numElem*8);
#else
thrust::fill(locDom->fx.begin(),locDom->fx.end(),0.);
thrust::fill(locDom->fy.begin(),locDom->fy.end(),0.);
thrust::fill(locDom->fz.begin(),locDom->fz.end(),0.);
#endif
int dimGrid=std::min(1024,PAD_DIV(locDom->numElem,128));
locDom->dev_mindtcourant= Allocator< Vector_d<Real_t> >::allocate(dimGrid);
locDom->dev_mindthydro = Allocator< Vector_d<Real_t> >::allocate(dimGrid);
}
void elenagoDellocateElems(Domain *locDom)
{
int dimGrid=std::min(1024,PAD_DIV(locDom->numElem,128));
int allElem = locDom->numElem + /* local elem */
2*locDom->sizeX*locDom->sizeY + /* plane ghosts */
2*locDom->sizeX*locDom->sizeZ + /* row ghosts */
2*locDom->sizeY*locDom->sizeZ ; /* col ghosts */
Allocator<Vector_d<Real_t> >::free(locDom->dev_mindtcourant,dimGrid);
Allocator<Vector_d<Real_t> >::free(locDom->dev_mindthydro,dimGrid);
Allocator<Vector_d<Real_t> >::free(locDom->fx_elem,locDom->padded_numElem*8);
Allocator<Vector_d<Real_t> >::free(locDom->fy_elem,locDom->padded_numElem*8);
Allocator<Vector_d<Real_t> >::free(locDom->fz_elem,locDom->padded_numElem*8);
Allocator<Vector_d<Real_t> >::free(locDom->dxx,locDom->numElem);
Allocator<Vector_d<Real_t> >::free(locDom->dyy,locDom->numElem);
Allocator<Vector_d<Real_t> >::free(locDom->dzz,locDom->numElem);
Allocator<Vector_d<Real_t> >::free(locDom->delx_xi,locDom->numElem);
Allocator<Vector_d<Real_t> >::free(locDom->delx_eta,locDom->numElem);
Allocator<Vector_d<Real_t> >::free(locDom->delx_zeta,locDom->numElem);
Allocator<Vector_d<Real_t> >::free(locDom->delv_xi,allElem);
Allocator<Vector_d<Real_t> >::free(locDom->delv_eta,allElem);
Allocator<Vector_d<Real_t> >::free(locDom->delv_zeta,allElem);
Allocator<Vector_d<Real_t> >::free(locDom->vnew,locDom->numElem);
}
Domain *ResetDomain(Domain *domain, char* argv[], Int_t numRanks, Index_t colLoc,
Index_t rowLoc, Index_t planeLoc,
Index_t nx, int tp, bool structured, Int_t nr, Int_t balance, Int_t cost)
{
domain->max_streams = 32;
domain->streams.resize(domain->max_streams);
for (Int_t i=0;i<domain->max_streams;i++)
domain->streams[i] = NULL;
//cudaStreamCreate(&(domain->streams[i]));
//elenagoevent
assert(totIters > 0);
assert(totIters < 512);
for (Int_t i=0;i<(totIters+1);i++)
{
cudaEventDestroy(domain->time_constraint_computed[i]);
cudaEventDestroy(domain->time_constraint_reduced[i]);
cudaEventCreateWithFlags(&domain->time_constraint_computed[i],cudaEventDisableTiming);
cudaEventCreateWithFlags(&domain->time_constraint_reduced[i],cudaEventDisableTiming);
}
Index_t domElems;
Index_t domNodes;
Index_t padded_domElems;
Vector_h<Index_t> nodelist_h;
Vector_h<Real_t> x_h;
Vector_h<Real_t> y_h;
Vector_h<Real_t> z_h;
if (structured)
{
domain->m_tp = tp ;
domain->m_numRanks = numRanks ;
domain->m_colLoc = colLoc ;
domain->m_rowLoc = rowLoc ;
domain->m_planeLoc = planeLoc ;
Index_t edgeElems = nx ;
Index_t edgeNodes = edgeElems+1 ;
domain->sizeX = edgeElems ;
domain->sizeY = edgeElems ;
domain->sizeZ = edgeElems ;
domain->numElem = domain->sizeX*domain->sizeY*domain->sizeZ ;
domain->padded_numElem = PAD(domain->numElem,32);
domain->numNode = (domain->sizeX+1)*(domain->sizeY+1)*(domain->sizeZ+1) ;
domain->padded_numNode = PAD(domain->numNode,32);
domElems = domain->numElem ;
domNodes = domain->numNode ;
padded_domElems = domain->padded_numElem ;
AllocateElemPersistent(domain,domElems,padded_domElems);
AllocateNodalPersistent(domain,domNodes);
//domain->SetupCommBuffers(edgeNodes);
InitializeFields(domain);
domain->BuildMesh(nx, edgeNodes, edgeElems, domNodes, padded_domElems, x_h, y_h, z_h, nodelist_h);
domain->numSymmX = domain->numSymmY = domain->numSymmZ = 0;
if (domain->m_colLoc == 0)
domain->numSymmX = (edgeElems+1)*(edgeElems+1) ;
if (domain->m_rowLoc == 0)
domain->numSymmY = (edgeElems+1)*(edgeElems+1) ;
if (domain->m_planeLoc == 0)
domain->numSymmZ = (edgeElems+1)*(edgeElems+1) ;
AllocateSymmX(domain,edgeNodes*edgeNodes);
AllocateSymmY(domain,edgeNodes*edgeNodes);
AllocateSymmZ(domain,edgeNodes*edgeNodes);
/* set up symmetry nodesets */
Vector_h<Index_t> symmX_h(domain->symmX.size());
Vector_h<Index_t> symmY_h(domain->symmY.size());
Vector_h<Index_t> symmZ_h(domain->symmZ.size());
Int_t nidx = 0 ;
for (Index_t i=0; i<edgeNodes; ++i) {
Index_t planeInc = i*edgeNodes*edgeNodes ;
Index_t rowInc = i*edgeNodes ;
for (Index_t j=0; j<edgeNodes; ++j) {
if (domain->m_planeLoc == 0) {
symmZ_h[nidx] = rowInc + j ;
}
if (domain->m_rowLoc == 0) {
symmY_h[nidx] = planeInc + j ;
}
if (domain->m_colLoc == 0) {
symmX_h[nidx] = planeInc + j*edgeNodes ;
}
++nidx ;
}
}
if (domain->m_planeLoc == 0)
domain->symmZ = symmZ_h;
if (domain->m_rowLoc == 0)
domain->symmY = symmY_h;
if (domain->m_colLoc == 0)
domain->symmX = symmX_h;
SetupConnectivityBC(domain, edgeElems);
}
else
{
FILE *fp;
int ee, en;
if ((fp = fopen(argv[2], "r")) == 0) {
printf("could not open file %s\n", argv[2]) ;
exit( LFileError ) ;
}
bool fsuccess;
fsuccess = fscanf(fp, "%d %d", &ee, &en) ;
domain->numElem = Index_t(ee);
domain->padded_numElem = PAD(domain->numElem,32);
domain->numNode = Index_t(en);
domain->padded_numNode = PAD(domain->numNode,32);
domElems = domain->numElem ;
domNodes = domain->numNode ;
padded_domElems = domain->padded_numElem ;
AllocateElemPersistent(domain,domElems,padded_domElems);
AllocateNodalPersistent(domain,domNodes);
InitializeFields(domain);
/* initialize nodal coordinates */
x_h.resize(domNodes);
y_h.resize(domNodes);
z_h.resize(domNodes);
for (Index_t i=0; i<domNodes; ++i) {
double px, py, pz ;
fsuccess = fscanf(fp, "%lf %lf %lf", &px, &py, &pz) ;
x_h[i] = Real_t(px) ;
y_h[i] = Real_t(py) ;
z_h[i] = Real_t(pz) ;
}
domain->x = x_h;
domain->y = y_h;
domain->z = z_h;
/* embed hexehedral elements in nodal point lattice */
nodelist_h.resize(padded_domElems*8);
for (Index_t zidx=0; zidx<domElems; ++zidx) {
for (Index_t ni=0; ni<Index_t(8); ++ni) {
int n ;
fsuccess = fscanf(fp, "%d", &n) ;
nodelist_h[ni*padded_domElems+zidx] = Index_t(n);
}
}
domain->nodelist = nodelist_h;
/* set up face-based element neighbors */
Vector_h<Index_t> lxim_h(domElems);
Vector_h<Index_t> lxip_h(domElems);
Vector_h<Index_t> letam_h(domElems);
Vector_h<Index_t> letap_h(domElems);
Vector_h<Index_t> lzetam_h(domElems);
Vector_h<Index_t> lzetap_h(domElems);
for (Index_t i=0; i<domElems; ++i) {
int xi_m, xi_p, eta_m, eta_p, zeta_m, zeta_p ;
fsuccess = fscanf(fp, "%d %d %d %d %d %d",
&xi_m, &xi_p, &eta_m, &eta_p, &zeta_m, &zeta_p) ;
lxim_h[i] = Index_t(xi_m) ;
lxip_h[i] = Index_t(xi_p) ;
letam_h[i] = Index_t(eta_m) ;
letap_h[i] = Index_t(eta_p) ;
lzetam_h[i] = Index_t(zeta_m) ;
lzetap_h[i] = Index_t(zeta_p) ;
}
domain->lxim = lxim_h;
domain->lxip = lxip_h;
domain->letam = letam_h;
domain->letap = letap_h;
domain->lzetam = lzetam_h;
domain->lzetap = lzetap_h;
/* set up X symmetry nodeset */
fsuccess = fscanf(fp, "%d", &domain->numSymmX) ;
Vector_h<Index_t> symmX_h(domain->numSymmX);
for (Index_t i=0; i<domain->numSymmX; ++i) {
int n ;
fsuccess = fscanf(fp, "%d", &n) ;
symmX_h[i] = Index_t(n) ;
}
domain->symmX = symmX_h;
fsuccess = fscanf(fp, "%d", &domain->numSymmY) ;
Vector_h<Index_t> symmY_h(domain->numSymmY);
for (Index_t i=0; i<domain->numSymmY; ++i) {
int n ;
fsuccess = fscanf(fp, "%d", &n) ;
symmY_h[i] = Index_t(n) ;
}
domain->symmY = symmY_h;
fsuccess = fscanf(fp, "%d", &domain->numSymmZ) ;
Vector_h<Index_t> symmZ_h(domain->numSymmZ);
for (Index_t i=0; i<domain->numSymmZ; ++i) {
int n ;
fsuccess = fscanf(fp, "%d", &n) ;
symmZ_h[i] = Index_t(n) ;
}
domain->symmZ = symmZ_h;
/* set up free surface nodeset */
Index_t numFreeSurf;
fsuccess = fscanf(fp, "%d", &numFreeSurf) ;
Vector_h<Index_t> freeSurf_h(numFreeSurf);
for (Index_t i=0; i<numFreeSurf; ++i) {
int n ;
fsuccess = fscanf(fp, "%d", &n) ;
freeSurf_h[i] = Index_t(n) ;
}
printf("%c\n",fsuccess);//nothing
fclose(fp);
/* set up boundary condition information */
Vector_h<Index_t> elemBC_h(domElems);
Vector_h<Index_t> surfaceNode_h(domNodes);
for (Index_t i=0; i<domain->numElem; ++i) {
elemBC_h[i] = 0 ;
}
for (Index_t i=0; i<domain->numNode; ++i) {
surfaceNode_h[i] = 0 ;
}
for (Index_t i=0; i<domain->numSymmX; ++i) {
surfaceNode_h[symmX_h[i]] = 1 ;
}
for (Index_t i=0; i<domain->numSymmY; ++i) {
surfaceNode_h[symmY_h[i]] = 1 ;
}
for (Index_t i=0; i<domain->numSymmZ; ++i) {
surfaceNode_h[symmZ_h[i]] = 1 ;
}
for (Index_t zidx=0; zidx<domain->numElem; ++zidx) {
Int_t mask = 0 ;
for (Index_t ni=0; ni<8; ++ni) {
mask |= (surfaceNode_h[nodelist_h[ni*domain->padded_numElem+zidx]] << ni) ;
}
if ((mask & 0x0f) == 0x0f) elemBC_h[zidx] |= ZETA_M_SYMM ;
if ((mask & 0xf0) == 0xf0) elemBC_h[zidx] |= ZETA_P_SYMM ;
if ((mask & 0x33) == 0x33) elemBC_h[zidx] |= ETA_M_SYMM ;
if ((mask & 0xcc) == 0xcc) elemBC_h[zidx] |= ETA_P_SYMM ;
if ((mask & 0x99) == 0x99) elemBC_h[zidx] |= XI_M_SYMM ;
if ((mask & 0x66) == 0x66) elemBC_h[zidx] |= XI_P_SYMM ;
}
for (Index_t zidx=0; zidx<domain->numElem; ++zidx) {
if (elemBC_h[zidx] == (XI_M_SYMM | ETA_M_SYMM | ZETA_M_SYMM)) {
domain->octantCorner = zidx ;
break ;
}
}
for (Index_t i=0; i<domain->numNode; ++i) {
surfaceNode_h[i] = 0 ;
}
for (Index_t i=0; i<numFreeSurf; ++i) {
surfaceNode_h[freeSurf_h[i]] = 1 ;
}
for (Index_t zidx=0; zidx<domain->numElem; ++zidx) {
Int_t mask = 0 ;
for (Index_t ni=0; ni<8; ++ni) {
mask |= (surfaceNode_h[nodelist_h[ni*domain->padded_numElem+zidx]] << ni) ;
}
if ((mask & 0x0f) == 0x0f) elemBC_h[zidx] |= ZETA_M_SYMM ;
if ((mask & 0xf0) == 0xf0) elemBC_h[zidx] |= ZETA_P_SYMM ;
if ((mask & 0x33) == 0x33) elemBC_h[zidx] |= ETA_M_SYMM ;
if ((mask & 0xcc) == 0xcc) elemBC_h[zidx] |= ETA_P_SYMM ;
if ((mask & 0x99) == 0x99) elemBC_h[zidx] |= XI_M_SYMM ;
if ((mask & 0x66) == 0x66) elemBC_h[zidx] |= XI_P_SYMM ;
}
domain->elemBC = elemBC_h;
/* deposit energy */
domain->e[domain->octantCorner] = Real_t(3.948746e+7) ;
}
/* set up node-centered indexing of elements */
Vector_h<Index_t> nodeElemCount_h(domNodes);
for (Index_t i=0; i<domNodes; ++i) {
nodeElemCount_h[i] = 0 ;
}
for (Index_t i=0; i<domElems; ++i) {
for (Index_t j=0; j < 8; ++j) {
++(nodeElemCount_h[nodelist_h[j*padded_domElems+i]]);
}
}
Vector_h<Index_t> nodeElemStart_h(domNodes);
nodeElemStart_h[0] = 0;
for (Index_t i=1; i < domNodes; ++i) {
nodeElemStart_h[i] =
nodeElemStart_h[i-1] + nodeElemCount_h[i-1] ;
}
Vector_h<Index_t> nodeElemCornerList_h(nodeElemStart_h[domNodes-1] +
nodeElemCount_h[domNodes-1] );
for (Index_t i=0; i < domNodes; ++i) {
nodeElemCount_h[i] = 0;
}
for (Index_t j=0; j < 8; ++j) {
for (Index_t i=0; i < domElems; ++i) {
Index_t m = nodelist_h[padded_domElems*j+i];
Index_t k = padded_domElems*j + i ;
Index_t offset = nodeElemStart_h[m] +
nodeElemCount_h[m] ;
nodeElemCornerList_h[offset] = k;
++(nodeElemCount_h[m]) ;
}
}
Index_t clSize = nodeElemStart_h[domNodes-1] +
nodeElemCount_h[domNodes-1] ;
for (Index_t i=0; i < clSize; ++i) {
Index_t clv = nodeElemCornerList_h[i] ;
if ((clv < 0) || (clv > padded_domElems*8)) {
fprintf(stderr,
"AllocateNodeElemIndexes(): nodeElemCornerList entry out of range!\n");
exit(1);
}
}
domain->nodeElemStart = nodeElemStart_h;
domain->nodeElemCount = nodeElemCount_h;
domain->nodeElemCornerList = nodeElemCornerList_h;
/* Create a material IndexSet (entire domain same material for now) */
Vector_h<Index_t> matElemlist_h(domElems);
for (Index_t i=0; i<domElems; ++i) {
matElemlist_h[i] = i ;
}
domain->matElemlist = matElemlist_h;
cudaMallocHost(&domain->dtcourant_h,sizeof(Real_t),0);
cudaMallocHost(&domain->dthydro_h,sizeof(Real_t),0);
cudaMallocHost(&domain->bad_vol_h,sizeof(Index_t),0);
cudaMallocHost(&domain->bad_q_h,sizeof(Index_t),0);
*(domain->bad_vol_h)=-1;
*(domain->bad_q_h)=-1;
*(domain->dthydro_h)=1e20;
*(domain->dtcourant_h)=1e20;
/* initialize material parameters */
domain->time_h = Real_t(0.) ;
//elenago
cudaMallocHost((void **)&(domain->time_h_async), 1*sizeof(Real_t));
domain->time_h_async[0] = domain->time_h;
domain->dtfixed = Real_t(-1.0e-6) ;
domain->deltatimemultlb = Real_t(1.1) ;
domain->deltatimemultub = Real_t(1.2) ;
domain->stoptime = Real_t(1.0e-2) ;
domain->dtmax = Real_t(1.0e-2) ;
domain->cycle = 0 ;
domain->e_cut = Real_t(1.0e-7) ;
domain->p_cut = Real_t(1.0e-7) ;
domain->q_cut = Real_t(1.0e-7) ;
domain->u_cut = Real_t(1.0e-7) ;
domain->v_cut = Real_t(1.0e-10) ;
domain->hgcoef = Real_t(3.0) ;
domain->ss4o3 = Real_t(4.0)/Real_t(3.0) ;
domain->qstop = Real_t(1.0e+12) ;
domain->monoq_max_slope = Real_t(1.0) ;
domain->monoq_limiter_mult = Real_t(2.0) ;
domain->qlc_monoq = Real_t(0.5) ;
domain->qqc_monoq = Real_t(2.0)/Real_t(3.0) ;
domain->qqc = Real_t(2.0) ;
domain->pmin = Real_t(0.) ;
domain->emin = Real_t(-1.0e+15) ;
domain->dvovmax = Real_t(0.1) ;
domain->eosvmax = Real_t(1.0e+9) ;
domain->eosvmin = Real_t(1.0e-9) ;
domain->refdens = Real_t(1.0) ;
/* initialize field data */
Vector_h<Real_t> nodalMass_h(domNodes);
Vector_h<Real_t> volo_h(domElems);
Vector_h<Real_t> elemMass_h(domElems);
for (Index_t i=0; i<domElems; ++i) {
Real_t x_local[8], y_local[8], z_local[8] ;
for( Index_t lnode=0 ; lnode<8 ; ++lnode )
{
Index_t gnode = nodelist_h[lnode*padded_domElems+i];
x_local[lnode] = x_h[gnode];
y_local[lnode] = y_h[gnode];
z_local[lnode] = z_h[gnode];
}
// volume calculations
Real_t volume = CalcElemVolume(x_local, y_local, z_local );
volo_h[i] = volume ;
elemMass_h[i] = volume ;
for (Index_t j=0; j<8; ++j) {
Index_t gnode = nodelist_h[j*padded_domElems+i];
nodalMass_h[gnode] += volume / Real_t(8.0) ;
}
}
domain->nodalMass = nodalMass_h;
domain->volo = volo_h;
domain->elemMass= elemMass_h;
/* deposit energy */
domain->octantCorner = 0;
// deposit initial energy
// An energy of 3.948746e+7 is correct for a problem with
// 45 zones along a side - we need to scale it
const Real_t ebase = 3.948746e+7;
Real_t scale = (nx*domain->m_tp)/45.0;
Real_t einit = ebase*scale*scale*scale;
//Real_t einit = ebase;
if (domain->m_rowLoc + domain->m_colLoc + domain->m_planeLoc == 0) {
// Dump into the first zone (which we know is in the corner)
// of the domain that sits at the origin
domain->e[0] = einit;
}
//set initial deltatime base on analytic CFL calculation
//elenago async time
cudaMallocHost((void **)&(domain->deltatime_h_async), 1*sizeof(Real_t));
domain->deltatime_h_async[0] = (.5*cbrt(domain->volo[0]))/sqrt(2*einit);
cudaMalloc((void **)&(domain->deltatime_d_async), 1*sizeof(Real_t));
cudaMemcpy(domain->deltatime_d_async, domain->deltatime_h_async, 1*sizeof(Real_t), cudaMemcpyHostToDevice);
domain->cost = cost;
domain->regNumList.resize(domain->numElem) ; // material indexset
domain->regElemlist.resize(domain->numElem) ; // material indexset
domain->regCSR.resize(nr);
domain->regReps.resize(nr);
domain->regSorted.resize(nr);
// Setup region index sets. For now, these are constant sized
// throughout the run, but could be changed every cycle to
// simulate effects of ALE on the lagrange solver
domain->CreateRegionIndexSets(nr, balance);
return domain ;
}
Domain *NewDomain(char* argv[], Int_t numRanks, Index_t colLoc,
Index_t rowLoc, Index_t planeLoc,
Index_t nx, int tp, bool structured, Int_t nr, Int_t balance, Int_t cost)
{
Domain *domain = new Domain ;
domain->max_streams = 32;
domain->streams.resize(domain->max_streams);
for (Int_t i=0;i<domain->max_streams;i++)
domain->streams[i] = NULL;
//cudaStreamCreate(&(domain->streams[i]));
//elenagoevent
assert(totIters > 0);
assert(totIters < 512);
for (Int_t i=0;i<(totIters+1);i++)
{
cudaEventCreateWithFlags(&domain->time_constraint_computed[i],cudaEventDisableTiming);
cudaEventCreateWithFlags(&domain->time_constraint_reduced[i],cudaEventDisableTiming);
}
Index_t domElems;
Index_t domNodes;
Index_t padded_domElems;
Vector_h<Index_t> nodelist_h;
Vector_h<Real_t> x_h;
Vector_h<Real_t> y_h;
Vector_h<Real_t> z_h;
if (structured)
{
domain->m_tp = tp ;
domain->m_numRanks = numRanks ;
domain->m_colLoc = colLoc ;
domain->m_rowLoc = rowLoc ;
domain->m_planeLoc = planeLoc ;
Index_t edgeElems = nx ;
Index_t edgeNodes = edgeElems+1 ;
domain->sizeX = edgeElems ;
domain->sizeY = edgeElems ;
domain->sizeZ = edgeElems ;
domain->numElem = domain->sizeX*domain->sizeY*domain->sizeZ ;
domain->padded_numElem = PAD(domain->numElem,32);
domain->numNode = (domain->sizeX+1)*(domain->sizeY+1)*(domain->sizeZ+1) ;
domain->padded_numNode = PAD(domain->numNode,32);
domElems = domain->numElem ;
domNodes = domain->numNode ;
padded_domElems = domain->padded_numElem ;
AllocateElemPersistent(domain,domElems,padded_domElems);
AllocateNodalPersistent(domain,domNodes);
domain->SetupCommBuffers(edgeNodes);
InitializeFields(domain);
domain->BuildMesh(nx, edgeNodes, edgeElems, domNodes, padded_domElems, x_h, y_h, z_h, nodelist_h);
domain->numSymmX = domain->numSymmY = domain->numSymmZ = 0;
if (domain->m_colLoc == 0)
domain->numSymmX = (edgeElems+1)*(edgeElems+1) ;
if (domain->m_rowLoc == 0)
domain->numSymmY = (edgeElems+1)*(edgeElems+1) ;
if (domain->m_planeLoc == 0)
domain->numSymmZ = (edgeElems+1)*(edgeElems+1) ;
AllocateSymmX(domain,edgeNodes*edgeNodes);
AllocateSymmY(domain,edgeNodes*edgeNodes);
AllocateSymmZ(domain,edgeNodes*edgeNodes);
/* set up symmetry nodesets */
Vector_h<Index_t> symmX_h(domain->symmX.size());
Vector_h<Index_t> symmY_h(domain->symmY.size());
Vector_h<Index_t> symmZ_h(domain->symmZ.size());
Int_t nidx = 0 ;
for (Index_t i=0; i<edgeNodes; ++i) {
Index_t planeInc = i*edgeNodes*edgeNodes ;
Index_t rowInc = i*edgeNodes ;
for (Index_t j=0; j<edgeNodes; ++j) {
if (domain->m_planeLoc == 0) {
symmZ_h[nidx] = rowInc + j ;
}
if (domain->m_rowLoc == 0) {
symmY_h[nidx] = planeInc + j ;
}
if (domain->m_colLoc == 0) {
symmX_h[nidx] = planeInc + j*edgeNodes ;
}
++nidx ;
}
}
if (domain->m_planeLoc == 0)
domain->symmZ = symmZ_h;
if (domain->m_rowLoc == 0)
domain->symmY = symmY_h;
if (domain->m_colLoc == 0)
domain->symmX = symmX_h;
SetupConnectivityBC(domain, edgeElems);
}
else
{
FILE *fp;
int ee, en;
if ((fp = fopen(argv[2], "r")) == 0) {
printf("could not open file %s\n", argv[2]) ;
exit( LFileError ) ;
}
bool fsuccess;
fsuccess = fscanf(fp, "%d %d", &ee, &en) ;
domain->numElem = Index_t(ee);
domain->padded_numElem = PAD(domain->numElem,32);
domain->numNode = Index_t(en);
domain->padded_numNode = PAD(domain->numNode,32);
domElems = domain->numElem ;
domNodes = domain->numNode ;
padded_domElems = domain->padded_numElem ;
AllocateElemPersistent(domain,domElems,padded_domElems);
AllocateNodalPersistent(domain,domNodes);
InitializeFields(domain);
/* initialize nodal coordinates */
x_h.resize(domNodes);
y_h.resize(domNodes);
z_h.resize(domNodes);
for (Index_t i=0; i<domNodes; ++i) {
double px, py, pz ;
fsuccess = fscanf(fp, "%lf %lf %lf", &px, &py, &pz) ;
x_h[i] = Real_t(px) ;
y_h[i] = Real_t(py) ;
z_h[i] = Real_t(pz) ;
}
domain->x = x_h;
domain->y = y_h;
domain->z = z_h;
/* embed hexehedral elements in nodal point lattice */
nodelist_h.resize(padded_domElems*8);
for (Index_t zidx=0; zidx<domElems; ++zidx) {
for (Index_t ni=0; ni<Index_t(8); ++ni) {
int n ;
fsuccess = fscanf(fp, "%d", &n) ;
nodelist_h[ni*padded_domElems+zidx] = Index_t(n);
}
}
domain->nodelist = nodelist_h;
/* set up face-based element neighbors */
Vector_h<Index_t> lxim_h(domElems);
Vector_h<Index_t> lxip_h(domElems);
Vector_h<Index_t> letam_h(domElems);
Vector_h<Index_t> letap_h(domElems);
Vector_h<Index_t> lzetam_h(domElems);
Vector_h<Index_t> lzetap_h(domElems);
for (Index_t i=0; i<domElems; ++i) {
int xi_m, xi_p, eta_m, eta_p, zeta_m, zeta_p ;
fsuccess = fscanf(fp, "%d %d %d %d %d %d",
&xi_m, &xi_p, &eta_m, &eta_p, &zeta_m, &zeta_p) ;
lxim_h[i] = Index_t(xi_m) ;
lxip_h[i] = Index_t(xi_p) ;
letam_h[i] = Index_t(eta_m) ;
letap_h[i] = Index_t(eta_p) ;
lzetam_h[i] = Index_t(zeta_m) ;
lzetap_h[i] = Index_t(zeta_p) ;
}
domain->lxim = lxim_h;
domain->lxip = lxip_h;
domain->letam = letam_h;
domain->letap = letap_h;
domain->lzetam = lzetam_h;
domain->lzetap = lzetap_h;
/* set up X symmetry nodeset */
fsuccess = fscanf(fp, "%d", &domain->numSymmX) ;
Vector_h<Index_t> symmX_h(domain->numSymmX);
for (Index_t i=0; i<domain->numSymmX; ++i) {
int n ;
fsuccess = fscanf(fp, "%d", &n) ;
symmX_h[i] = Index_t(n) ;
}
domain->symmX = symmX_h;
fsuccess = fscanf(fp, "%d", &domain->numSymmY) ;
Vector_h<Index_t> symmY_h(domain->numSymmY);
for (Index_t i=0; i<domain->numSymmY; ++i) {
int n ;
fsuccess = fscanf(fp, "%d", &n) ;
symmY_h[i] = Index_t(n) ;
}
domain->symmY = symmY_h;
fsuccess = fscanf(fp, "%d", &domain->numSymmZ) ;
Vector_h<Index_t> symmZ_h(domain->numSymmZ);
for (Index_t i=0; i<domain->numSymmZ; ++i) {
int n ;
fsuccess = fscanf(fp, "%d", &n) ;
symmZ_h[i] = Index_t(n) ;
}
domain->symmZ = symmZ_h;
/* set up free surface nodeset */
Index_t numFreeSurf;
fsuccess = fscanf(fp, "%d", &numFreeSurf) ;
Vector_h<Index_t> freeSurf_h(numFreeSurf);
for (Index_t i=0; i<numFreeSurf; ++i) {
int n ;
fsuccess = fscanf(fp, "%d", &n) ;
freeSurf_h[i] = Index_t(n) ;
}
printf("%c\n",fsuccess);//nothing
fclose(fp);
/* set up boundary condition information */
Vector_h<Index_t> elemBC_h(domElems);
Vector_h<Index_t> surfaceNode_h(domNodes);
for (Index_t i=0; i<domain->numElem; ++i) {
elemBC_h[i] = 0 ;
}
for (Index_t i=0; i<domain->numNode; ++i) {
surfaceNode_h[i] = 0 ;
}
for (Index_t i=0; i<domain->numSymmX; ++i) {
surfaceNode_h[symmX_h[i]] = 1 ;
}
for (Index_t i=0; i<domain->numSymmY; ++i) {
surfaceNode_h[symmY_h[i]] = 1 ;
}
for (Index_t i=0; i<domain->numSymmZ; ++i) {
surfaceNode_h[symmZ_h[i]] = 1 ;
}
for (Index_t zidx=0; zidx<domain->numElem; ++zidx) {
Int_t mask = 0 ;
for (Index_t ni=0; ni<8; ++ni) {
mask |= (surfaceNode_h[nodelist_h[ni*domain->padded_numElem+zidx]] << ni) ;
}
if ((mask & 0x0f) == 0x0f) elemBC_h[zidx] |= ZETA_M_SYMM ;
if ((mask & 0xf0) == 0xf0) elemBC_h[zidx] |= ZETA_P_SYMM ;
if ((mask & 0x33) == 0x33) elemBC_h[zidx] |= ETA_M_SYMM ;
if ((mask & 0xcc) == 0xcc) elemBC_h[zidx] |= ETA_P_SYMM ;
if ((mask & 0x99) == 0x99) elemBC_h[zidx] |= XI_M_SYMM ;
if ((mask & 0x66) == 0x66) elemBC_h[zidx] |= XI_P_SYMM ;
}
for (Index_t zidx=0; zidx<domain->numElem; ++zidx) {
if (elemBC_h[zidx] == (XI_M_SYMM | ETA_M_SYMM | ZETA_M_SYMM)) {
domain->octantCorner = zidx ;
break ;
}
}
for (Index_t i=0; i<domain->numNode; ++i) {
surfaceNode_h[i] = 0 ;
}
for (Index_t i=0; i<numFreeSurf; ++i) {
surfaceNode_h[freeSurf_h[i]] = 1 ;
}
for (Index_t zidx=0; zidx<domain->numElem; ++zidx) {
Int_t mask = 0 ;
for (Index_t ni=0; ni<8; ++ni) {
mask |= (surfaceNode_h[nodelist_h[ni*domain->padded_numElem+zidx]] << ni) ;
}
if ((mask & 0x0f) == 0x0f) elemBC_h[zidx] |= ZETA_M_SYMM ;
if ((mask & 0xf0) == 0xf0) elemBC_h[zidx] |= ZETA_P_SYMM ;
if ((mask & 0x33) == 0x33) elemBC_h[zidx] |= ETA_M_SYMM ;
if ((mask & 0xcc) == 0xcc) elemBC_h[zidx] |= ETA_P_SYMM ;
if ((mask & 0x99) == 0x99) elemBC_h[zidx] |= XI_M_SYMM ;
if ((mask & 0x66) == 0x66) elemBC_h[zidx] |= XI_P_SYMM ;
}
domain->elemBC = elemBC_h;
/* deposit energy */
domain->e[domain->octantCorner] = Real_t(3.948746e+7) ;
}
/* set up node-centered indexing of elements */
Vector_h<Index_t> nodeElemCount_h(domNodes);
for (Index_t i=0; i<domNodes; ++i) {
nodeElemCount_h[i] = 0 ;
}
for (Index_t i=0; i<domElems; ++i) {
for (Index_t j=0; j < 8; ++j) {
++(nodeElemCount_h[nodelist_h[j*padded_domElems+i]]);
}
}
Vector_h<Index_t> nodeElemStart_h(domNodes);
nodeElemStart_h[0] = 0;
for (Index_t i=1; i < domNodes; ++i) {
nodeElemStart_h[i] =
nodeElemStart_h[i-1] + nodeElemCount_h[i-1] ;
}
Vector_h<Index_t> nodeElemCornerList_h(nodeElemStart_h[domNodes-1] +
nodeElemCount_h[domNodes-1] );
for (Index_t i=0; i < domNodes; ++i) {
nodeElemCount_h[i] = 0;
}
for (Index_t j=0; j < 8; ++j) {
for (Index_t i=0; i < domElems; ++i) {
Index_t m = nodelist_h[padded_domElems*j+i];
Index_t k = padded_domElems*j + i ;
Index_t offset = nodeElemStart_h[m] +
nodeElemCount_h[m] ;
nodeElemCornerList_h[offset] = k;
++(nodeElemCount_h[m]) ;
}
}
Index_t clSize = nodeElemStart_h[domNodes-1] +
nodeElemCount_h[domNodes-1] ;
for (Index_t i=0; i < clSize; ++i) {
Index_t clv = nodeElemCornerList_h[i] ;
if ((clv < 0) || (clv > padded_domElems*8)) {
fprintf(stderr,
"AllocateNodeElemIndexes(): nodeElemCornerList entry out of range!\n");
exit(1);
}
}
domain->nodeElemStart = nodeElemStart_h;
domain->nodeElemCount = nodeElemCount_h;
domain->nodeElemCornerList = nodeElemCornerList_h;
/* Create a material IndexSet (entire domain same material for now) */
Vector_h<Index_t> matElemlist_h(domElems);
for (Index_t i=0; i<domElems; ++i) {
matElemlist_h[i] = i ;
}
domain->matElemlist = matElemlist_h;
cudaMallocHost(&domain->dtcourant_h,sizeof(Real_t),0);
cudaMallocHost(&domain->dthydro_h,sizeof(Real_t),0);
cudaMallocHost(&domain->bad_vol_h,sizeof(Index_t),0);
cudaMallocHost(&domain->bad_q_h,sizeof(Index_t),0);
*(domain->bad_vol_h)=-1;
*(domain->bad_q_h)=-1;
*(domain->dthydro_h)=1e20;
*(domain->dtcourant_h)=1e20;
/* initialize material parameters */
domain->time_h = Real_t(0.) ;
cudaMallocHost((void **)&(domain->time_h_async), 1*sizeof(Real_t));
domain->time_h_async[0] = domain->time_h;
domain->dtfixed = Real_t(-1.0e-6) ;
domain->deltatimemultlb = Real_t(1.1) ;
domain->deltatimemultub = Real_t(1.2) ;
domain->stoptime = Real_t(1.0e-2) ;
domain->dtmax = Real_t(1.0e-2) ;
domain->cycle = 0 ;
domain->e_cut = Real_t(1.0e-7) ;
domain->p_cut = Real_t(1.0e-7) ;
domain->q_cut = Real_t(1.0e-7) ;
domain->u_cut = Real_t(1.0e-7) ;
domain->v_cut = Real_t(1.0e-10) ;
domain->hgcoef = Real_t(3.0) ;
domain->ss4o3 = Real_t(4.0)/Real_t(3.0) ;
domain->qstop = Real_t(1.0e+12) ;
domain->monoq_max_slope = Real_t(1.0) ;
domain->monoq_limiter_mult = Real_t(2.0) ;
domain->qlc_monoq = Real_t(0.5) ;
domain->qqc_monoq = Real_t(2.0)/Real_t(3.0) ;
domain->qqc = Real_t(2.0) ;
domain->pmin = Real_t(0.) ;
domain->emin = Real_t(-1.0e+15) ;
domain->dvovmax = Real_t(0.1) ;
domain->eosvmax = Real_t(1.0e+9) ;
domain->eosvmin = Real_t(1.0e-9) ;
domain->refdens = Real_t(1.0) ;
/* initialize field data */
Vector_h<Real_t> nodalMass_h(domNodes);
Vector_h<Real_t> volo_h(domElems);
Vector_h<Real_t> elemMass_h(domElems);
for (Index_t i=0; i<domElems; ++i) {
Real_t x_local[8], y_local[8], z_local[8] ;
for( Index_t lnode=0 ; lnode<8 ; ++lnode )
{
Index_t gnode = nodelist_h[lnode*padded_domElems+i];
x_local[lnode] = x_h[gnode];
y_local[lnode] = y_h[gnode];
z_local[lnode] = z_h[gnode];
}
// volume calculations
Real_t volume = CalcElemVolume(x_local, y_local, z_local );
volo_h[i] = volume ;
elemMass_h[i] = volume ;
for (Index_t j=0; j<8; ++j) {
Index_t gnode = nodelist_h[j*padded_domElems+i];
nodalMass_h[gnode] += volume / Real_t(8.0) ;
}
}
domain->nodalMass = nodalMass_h;
domain->volo = volo_h;
domain->elemMass= elemMass_h;
/* deposit energy */
domain->octantCorner = 0;
// deposit initial energy
// An energy of 3.948746e+7 is correct for a problem with
// 45 zones along a side - we need to scale it
const Real_t ebase = 3.948746e+7;
Real_t scale = (nx*domain->m_tp)/45.0;
Real_t einit = ebase*scale*scale*scale;
//Real_t einit = ebase;
if (domain->m_rowLoc + domain->m_colLoc + domain->m_planeLoc == 0) {
// Dump into the first zone (which we know is in the corner)
// of the domain that sits at the origin
domain->e[0] = einit;
}
//set initial deltatime base on analytic CFL calculation
//elenago async time
cudaMallocHost((void **)&(domain->deltatime_h_async), 1*sizeof(Real_t));
domain->deltatime_h_async[0] = (.5*cbrt(domain->volo[0]))/sqrt(2*einit);
cudaMalloc((void **)&(domain->deltatime_d_async), 1*sizeof(Real_t));
cudaMemcpy(domain->deltatime_d_async, domain->deltatime_h_async, 1*sizeof(Real_t), cudaMemcpyHostToDevice);
domain->cost = cost;
domain->regNumList.resize(domain->numElem) ; // material indexset
domain->regElemlist.resize(domain->numElem) ; // material indexset
domain->regCSR.resize(nr);
domain->regReps.resize(nr);
domain->regSorted.resize(nr);
// Setup region index sets. For now, these are constant sized
// throughout the run, but could be changed every cycle to
// simulate effects of ALE on the lagrange solver
domain->CreateRegionIndexSets(nr, balance);
return domain ;
}
/******************* to support region *********************/
void Domain::sortRegions(Vector_h<Int_t>& regReps_h, Vector_h<Index_t>& regSorted_h)
{
Index_t temp;
Vector_h<Index_t> regIndex;
regIndex.resize(numReg);
for(int i = 0; i < numReg; i++)
regIndex[i] = i;
for(int i = 0; i < numReg-1; i++)
for(int j = 0; j < numReg-i-1; j++)
if(regReps_h[j] < regReps_h[j+1])
{
temp = regReps_h[j];
regReps_h[j] = regReps_h[j+1];
regReps_h[j+1] = temp;
temp = regElemSize[j];
regElemSize[j] = regElemSize[j+1];
regElemSize[j+1] = temp;
temp = regIndex[j];
regIndex[j] = regIndex[j+1];
regIndex[j+1] = temp;
}
for(int i = 0; i < numReg; i++)
regSorted_h[regIndex[i]] = i;
}
// simple function for int pow x^y, y >= 0
static Int_t POW(Int_t x, Int_t y)
{
Int_t res = 1;
for (Int_t i = 0; i < y; i++)
res *= x;
return res;
}
void Domain::CreateRegionIndexSets(Int_t nr, Int_t b)
{
#if USE_MPI
Index_t myRank;
MPI_Comm_rank(MPI_COMM_WORLD, &myRank) ;
srand(myRank);
#else
srand(0);
Index_t myRank = 0;
#endif
numReg = nr;
balance = b;
regElemSize = new Int_t[numReg];
Index_t nextIndex = 0;
Vector_h<Int_t> regCSR_h(regCSR.size()); // records the begining and end of each region
Vector_h<Int_t> regReps_h(regReps.size()); // records the rep number per region
Vector_h<Index_t> regNumList_h(regNumList.size()); // Region number per domain element
Vector_h<Index_t> regElemlist_h(regElemlist.size()); // region indexset
Vector_h<Index_t> regSorted_h(regSorted.size()); // keeps index of sorted regions
//if we only have one region just fill it
// Fill out the regNumList with material numbers, which are always
// the region index plus one
if(numReg == 1) {
while (nextIndex < numElem) {
regNumList_h[nextIndex] = 1;
nextIndex++;
}
regElemSize[0] = 0;
}
//If we have more than one region distribute the elements.
else {
Int_t regionNum;
Int_t regionVar;
Int_t lastReg = -1;
Int_t binSize;
Int_t elements;
Index_t runto = 0;
Int_t costDenominator = 0;
Int_t* regBinEnd = new Int_t[numReg];
//Determine the relative weights of all the regions.
for (Index_t i=0 ; i<numReg ; ++i) {
regElemSize[i] = 0;
costDenominator += POW((i+1), balance); //Total cost of all regions
regBinEnd[i] = costDenominator; //Chance of hitting a given region is (regBinEnd[i] - regBinEdn[i-1])/costDenominator
}
//Until all elements are assigned
while (nextIndex < numElem) {
//pick the region
regionVar = rand() % costDenominator;
Index_t i = 0;
while(regionVar >= regBinEnd[i])
i++;
//rotate the regions based on MPI rank. Rotation is Rank % NumRegions
regionNum = ((i + myRank) % numReg) + 1;
// make sure we don't pick the same region twice in a row
while(regionNum == lastReg) {
regionVar = rand() % costDenominator;
i = 0;
while(regionVar >= regBinEnd[i])
i++;
regionNum = ((i + myRank) % numReg) + 1;
}
//Pick the bin size of the region and determine the number of elements.
binSize = rand() % 1000;
if(binSize < 773) {
elements = rand() % 15 + 1;
}
else if(binSize < 937) {
elements = rand() % 16 + 16;
}
else if(binSize < 970) {
elements = rand() % 32 + 32;
}
else if(binSize < 974) {
elements = rand() % 64 + 64;
}
else if(binSize < 978) {
elements = rand() % 128 + 128;
}
else if(binSize < 981) {
elements = rand() % 256 + 256;
}
else
elements = rand() % 1537 + 512;
runto = elements + nextIndex;
//Store the elements. If we hit the end before we run out of elements then just stop.
while (nextIndex < runto && nextIndex < numElem) {
regNumList_h[nextIndex] = regionNum;
nextIndex++;
}
lastReg = regionNum;
}
}
// Convert regNumList to region index sets
// First, count size of each region
for (Index_t i=0 ; i<numElem ; ++i) {
int r = regNumList_h[i]-1; // region index == regnum-1
regElemSize[r]++;
}
Index_t rep;
// Second, allocate each region index set
for (Index_t r=0; r<numReg ; ++r) {
if(r < numReg/2)
rep = 1;
else if(r < (numReg - (numReg+15)/20))
rep = 1 + cost;
else
rep = 10 * (1+ cost);
regReps_h[r] = rep;
}
sortRegions(regReps_h, regSorted_h);
regCSR_h[0] = 0;
// Second, allocate each region index set
for (Index_t i=1 ; i<numReg ; ++i) {
regCSR_h[i] = regCSR_h[i-1] + regElemSize[i-1];
}
// Third, fill index sets
for (Index_t i=0 ; i<numElem ; ++i) {
Index_t r = regSorted_h[regNumList_h[i]-1]; // region index == regnum-1
regElemlist_h[regCSR_h[r]] = i;
regCSR_h[r]++;
}
// Copy to device
regCSR = regCSR_h; // records the begining and end of each region
regReps = regReps_h; // records the rep number per region
regNumList = regNumList_h; // Region number per domain element
regElemlist = regElemlist_h; // region indexset
regSorted = regSorted_h; // keeps index of sorted regions
} // end of create function
static inline
void TimeIncrement(Domain* domain)
{
// To make sure dtcourant and dthydro have been updated on host
cudaEventSynchronize(domain->time_constraint_computed[currIter]);
Real_t targetdt = domain->stoptime - domain->time_h;
if ((domain->dtfixed <= Real_t(0.0)) && (domain->cycle != Int_t(0))) {
Real_t ratio ;
/* This will require a reduction in parallel */
Real_t gnewdt = Real_t(1.0e+20) ;
Real_t newdt;
if ( *(domain->dtcourant_h) < gnewdt) {
gnewdt = *(domain->dtcourant_h) / Real_t(2.0) ;
}
if ( *(domain->dthydro_h) < gnewdt) {
gnewdt = *(domain->dthydro_h) * Real_t(2.0) / Real_t(3.0) ;
}
#if USE_MPI
MPI_Allreduce(&gnewdt, &newdt, 1,
((sizeof(Real_t) == 4) ? MPI_FLOAT : MPI_DOUBLE),
MPI_MIN, MPI_COMM_WORLD) ;
#else
newdt = gnewdt;
#endif
Real_t olddt = domain->deltatime_h_async[0];
ratio = newdt / olddt ;
if (ratio >= Real_t(1.0)) {
if (ratio < domain->deltatimemultlb) {
newdt = olddt ;
}
else if (ratio > domain->deltatimemultub) {
newdt = olddt*domain->deltatimemultub ;
}
}
if (newdt > domain->dtmax) {
newdt = domain->dtmax ;
}
domain->deltatime_h_async[0] = newdt ;
}
/* TRY TO PREVENT VERY SMALL SCALING ON THE NEXT CYCLE */
if ((targetdt > domain->deltatime_h_async[0]) &&
(targetdt < (Real_t(4.0) * domain->deltatime_h_async[0] / Real_t(3.0))) ) {
targetdt = Real_t(2.0) * domain->deltatime_h_async[0] / Real_t(3.0) ;
}
if (targetdt < domain->deltatime_h_async[0]) {
domain->deltatime_h_async[0] = targetdt ;
}
domain->time_h += domain->deltatime_h_async[0] ;
++domain->cycle ;
cudaMemcpyAsync(domain->deltatime_d_async, domain->deltatime_h_async, 1*sizeof(Real_t), cudaMemcpyHostToDevice, domain->streams[1]);
cudaEventRecord(domain->time_constraint_reduced[currIter], domain->streams[1]);
}
__device__
static
__forceinline__
void CalcElemShapeFunctionDerivatives( const Real_t* const x,
const Real_t* const y,
const Real_t* const z,
Real_t b[][8],
Real_t* const volume )
{
const Real_t x0 = x[0] ; const Real_t x1 = x[1] ;
const Real_t x2 = x[2] ; const Real_t x3 = x[3] ;
const Real_t x4 = x[4] ; const Real_t x5 = x[5] ;
const Real_t x6 = x[6] ; const Real_t x7 = x[7] ;
const Real_t y0 = y[0] ; const Real_t y1 = y[1] ;
const Real_t y2 = y[2] ; const Real_t y3 = y[3] ;
const Real_t y4 = y[4] ; const Real_t y5 = y[5] ;
const Real_t y6 = y[6] ; const Real_t y7 = y[7] ;
const Real_t z0 = z[0] ; const Real_t z1 = z[1] ;
const Real_t z2 = z[2] ; const Real_t z3 = z[3] ;
const Real_t z4 = z[4] ; const Real_t z5 = z[5] ;
const Real_t z6 = z[6] ; const Real_t z7 = z[7] ;
Real_t fjxxi, fjxet, fjxze;
Real_t fjyxi, fjyet, fjyze;
Real_t fjzxi, fjzet, fjzze;
Real_t cjxxi, cjxet, cjxze;
Real_t cjyxi, cjyet, cjyze;
Real_t cjzxi, cjzet, cjzze;
fjxxi = Real_t(.125) * ( (x6-x0) + (x5-x3) - (x7-x1) - (x4-x2) );
fjxet = Real_t(.125) * ( (x6-x0) - (x5-x3) + (x7-x1) - (x4-x2) );
fjxze = Real_t(.125) * ( (x6-x0) + (x5-x3) + (x7-x1) + (x4-x2) );
fjyxi = Real_t(.125) * ( (y6-y0) + (y5-y3) - (y7-y1) - (y4-y2) );
fjyet = Real_t(.125) * ( (y6-y0) - (y5-y3) + (y7-y1) - (y4-y2) );
fjyze = Real_t(.125) * ( (y6-y0) + (y5-y3) + (y7-y1) + (y4-y2) );
fjzxi = Real_t(.125) * ( (z6-z0) + (z5-z3) - (z7-z1) - (z4-z2) );
fjzet = Real_t(.125) * ( (z6-z0) - (z5-z3) + (z7-z1) - (z4-z2) );
fjzze = Real_t(.125) * ( (z6-z0) + (z5-z3) + (z7-z1) + (z4-z2) );
/* compute cofactors */
cjxxi = (fjyet * fjzze) - (fjzet * fjyze);
cjxet = - (fjyxi * fjzze) + (fjzxi * fjyze);
cjxze = (fjyxi * fjzet) - (fjzxi * fjyet);
cjyxi = - (fjxet * fjzze) + (fjzet * fjxze);
cjyet = (fjxxi * fjzze) - (fjzxi * fjxze);
cjyze = - (fjxxi * fjzet) + (fjzxi * fjxet);
cjzxi = (fjxet * fjyze) - (fjyet * fjxze);
cjzet = - (fjxxi * fjyze) + (fjyxi * fjxze);
cjzze = (fjxxi * fjyet) - (fjyxi * fjxet);
/* calculate partials :
this need only be done for l = 0,1,2,3 since , by symmetry ,
(6,7,4,5) = - (0,1,2,3) .
*/
b[0][0] = - cjxxi - cjxet - cjxze;
b[0][1] = cjxxi - cjxet - cjxze;
b[0][2] = cjxxi + cjxet - cjxze;
b[0][3] = - cjxxi + cjxet - cjxze;
b[0][4] = -b[0][2];
b[0][5] = -b[0][3];
b[0][6] = -b[0][0];
b[0][7] = -b[0][1];
/*
b[0][4] = - cjxxi - cjxet + cjxze;
b[0][5] = + cjxxi - cjxet + cjxze;
b[0][6] = + cjxxi + cjxet + cjxze;
b[0][7] = - cjxxi + cjxet + cjxze;
*/
b[1][0] = - cjyxi - cjyet - cjyze;
b[1][1] = cjyxi - cjyet - cjyze;
b[1][2] = cjyxi + cjyet - cjyze;
b[1][3] = - cjyxi + cjyet - cjyze;
b[1][4] = -b[1][2];
b[1][5] = -b[1][3];
b[1][6] = -b[1][0];
b[1][7] = -b[1][1];
b[2][0] = - cjzxi - cjzet - cjzze;
b[2][1] = cjzxi - cjzet - cjzze;
b[2][2] = cjzxi + cjzet - cjzze;
b[2][3] = - cjzxi + cjzet - cjzze;
b[2][4] = -b[2][2];
b[2][5] = -b[2][3];
b[2][6] = -b[2][0];
b[2][7] = -b[2][1];
/* calculate jacobian determinant (volume) */
*volume = Real_t(8.) * ( fjxet * cjxet + fjyet * cjyet + fjzet * cjzet);
}
static
__device__
__forceinline__
void SumElemFaceNormal(Real_t *normalX0, Real_t *normalY0, Real_t *normalZ0,
Real_t *normalX1, Real_t *normalY1, Real_t *normalZ1,
Real_t *normalX2, Real_t *normalY2, Real_t *normalZ2,
Real_t *normalX3, Real_t *normalY3, Real_t *normalZ3,
const Real_t x0, const Real_t y0, const Real_t z0,
const Real_t x1, const Real_t y1, const Real_t z1,
const Real_t x2, const Real_t y2, const Real_t z2,
const Real_t x3, const Real_t y3, const Real_t z3)
{
Real_t bisectX0 = Real_t(0.5) * (x3 + x2 - x1 - x0);
Real_t bisectY0 = Real_t(0.5) * (y3 + y2 - y1 - y0);
Real_t bisectZ0 = Real_t(0.5) * (z3 + z2 - z1 - z0);
Real_t bisectX1 = Real_t(0.5) * (x2 + x1 - x3 - x0);
Real_t bisectY1 = Real_t(0.5) * (y2 + y1 - y3 - y0);
Real_t bisectZ1 = Real_t(0.5) * (z2 + z1 - z3 - z0);
Real_t areaX = Real_t(0.25) * (bisectY0 * bisectZ1 - bisectZ0 * bisectY1);
Real_t areaY = Real_t(0.25) * (bisectZ0 * bisectX1 - bisectX0 * bisectZ1);
Real_t areaZ = Real_t(0.25) * (bisectX0 * bisectY1 - bisectY0 * bisectX1);
*normalX0 += areaX;
*normalX1 += areaX;
*normalX2 += areaX;
*normalX3 += areaX;
*normalY0 += areaY;
*normalY1 += areaY;
*normalY2 += areaY;
*normalY3 += areaY;
*normalZ0 += areaZ;
*normalZ1 += areaZ;
*normalZ2 += areaZ;
*normalZ3 += areaZ;
}
static
__device__
__forceinline__
void SumElemFaceNormal_warp_per_4cell(
Real_t *normalX0, Real_t *normalY0, Real_t *normalZ0,
const Real_t x, const Real_t y, const Real_t z,
int node,
int n0, int n1, int n2, int n3)
{
Real_t coef0 = Real_t(0.5);
Real_t coef1 = Real_t(0.5);
if (node == n0 || node == n1 || node==n2 || node==n3)
{
if (node == n0 || node == n1)
coef0 = -coef0;
if (node == n0 || node == n3)
coef1 = -coef1;
}
else
{
coef0 = Real_t(0.);
coef1 = Real_t(0.);
}
Real_t bisectX0 = coef0*x;
Real_t bisectY0 = coef0*y;
Real_t bisectZ0 = coef0*z;
Real_t bisectX1 = coef1*x;
Real_t bisectY1 = coef1*y;
Real_t bisectZ1 = coef1*z;
SumOverNodesShfl(bisectX0);
SumOverNodesShfl(bisectY0);
SumOverNodesShfl(bisectZ0);
SumOverNodesShfl(bisectX1);
SumOverNodesShfl(bisectY1);
SumOverNodesShfl(bisectZ1);
Real_t areaX = Real_t(0.25) * (bisectY0 * bisectZ1 - bisectZ0 * bisectY1);
Real_t areaY = Real_t(0.25) * (bisectZ0 * bisectX1 - bisectX0 * bisectZ1);
Real_t areaZ = Real_t(0.25) * (bisectX0 * bisectY1 - bisectY0 * bisectX1);
if (node == n0 || node == n1 || node==n2 || node==n3)
{
*normalX0 += areaX;
*normalY0 += areaY;
*normalZ0 += areaZ;
}
}
__device__
static inline
void CalcElemNodeNormals(Real_t pfx[8],
Real_t pfy[8],
Real_t pfz[8],
const Real_t x[8],
const Real_t y[8],
const Real_t z[8])
{
for (Index_t i = 0 ; i < 8 ; ++i) {
pfx[i] = Real_t(0.0);
pfy[i] = Real_t(0.0);
pfz[i] = Real_t(0.0);
}
/* evaluate face one: nodes 0, 1, 2, 3 */
SumElemFaceNormal(&pfx[0], &pfy[0], &pfz[0],
&pfx[1], &pfy[1], &pfz[1],
&pfx[2], &pfy[2], &pfz[2],
&pfx[3], &pfy[3], &pfz[3],
x[0], y[0], z[0], x[1], y[1], z[1],
x[2], y[2], z[2], x[3], y[3], z[3]);
/* evaluate face two: nodes 0, 4, 5, 1 */
SumElemFaceNormal(&pfx[0], &pfy[0], &pfz[0],
&pfx[4], &pfy[4], &pfz[4],
&pfx[5], &pfy[5], &pfz[5],
&pfx[1], &pfy[1], &pfz[1],
x[0], y[0], z[0], x[4], y[4], z[4],
x[5], y[5], z[5], x[1], y[1], z[1]);
/* evaluate face three: nodes 1, 5, 6, 2 */
SumElemFaceNormal(&pfx[1], &pfy[1], &pfz[1],
&pfx[5], &pfy[5], &pfz[5],
&pfx[6], &pfy[6], &pfz[6],
&pfx[2], &pfy[2], &pfz[2],
x[1], y[1], z[1], x[5], y[5], z[5],
x[6], y[6], z[6], x[2], y[2], z[2]);
/* evaluate face four: nodes 2, 6, 7, 3 */
SumElemFaceNormal(&pfx[2], &pfy[2], &pfz[2],
&pfx[6], &pfy[6], &pfz[6],
&pfx[7], &pfy[7], &pfz[7],
&pfx[3], &pfy[3], &pfz[3],
x[2], y[2], z[2], x[6], y[6], z[6],
x[7], y[7], z[7], x[3], y[3], z[3]);
/* evaluate face five: nodes 3, 7, 4, 0 */
SumElemFaceNormal(&pfx[3], &pfy[3], &pfz[3],
&pfx[7], &pfy[7], &pfz[7],
&pfx[4], &pfy[4], &pfz[4],
&pfx[0], &pfy[0], &pfz[0],
x[3], y[3], z[3], x[7], y[7], z[7],
x[4], y[4], z[4], x[0], y[0], z[0]);
/* evaluate face six: nodes 4, 7, 6, 5 */
SumElemFaceNormal(&pfx[4], &pfy[4], &pfz[4],
&pfx[7], &pfy[7], &pfz[7],
&pfx[6], &pfy[6], &pfz[6],
&pfx[5], &pfy[5], &pfz[5],
x[4], y[4], z[4], x[7], y[7], z[7],
x[6], y[6], z[6], x[5], y[5], z[5]);
}
__global__
void AddNodeForcesFromElems_kernel( Index_t numNode,
Index_t padded_numNode,
const Int_t* nodeElemCount,
const Int_t* nodeElemStart,
const Index_t* nodeElemCornerList,
const Real_t* fx_elem,
const Real_t* fy_elem,
const Real_t* fz_elem,
Real_t* fx_node,
Real_t* fy_node,
Real_t* fz_node,
const Int_t num_threads)
{
int tid=blockDim.x*blockIdx.x+threadIdx.x;
if (tid < num_threads)
{
Index_t g_i = tid;
Int_t count=nodeElemCount[g_i];
Int_t start=nodeElemStart[g_i];
Real_t fx,fy,fz;
fx=fy=fz=Real_t(0.0);
for (int j=0;j<count;j++)
{
Index_t pos=nodeElemCornerList[start+j]; // Uncoalesced access here
fx += fx_elem[pos];
fy += fy_elem[pos];
fz += fz_elem[pos];
}
fx_node[g_i]=fx;
fy_node[g_i]=fy;
fz_node[g_i]=fz;
}
}
static
__device__
__forceinline__
void VoluDer(const Real_t x0, const Real_t x1, const Real_t x2,
const Real_t x3, const Real_t x4, const Real_t x5,
const Real_t y0, const Real_t y1, const Real_t y2,
const Real_t y3, const Real_t y4, const Real_t y5,
const Real_t z0, const Real_t z1, const Real_t z2,
const Real_t z3, const Real_t z4, const Real_t z5,
Real_t* dvdx, Real_t* dvdy, Real_t* dvdz)
{
const Real_t twelfth = Real_t(1.0) / Real_t(12.0) ;
*dvdx =
(y1 + y2) * (z0 + z1) - (y0 + y1) * (z1 + z2) +
(y0 + y4) * (z3 + z4) - (y3 + y4) * (z0 + z4) -
(y2 + y5) * (z3 + z5) + (y3 + y5) * (z2 + z5);
*dvdy =
- (x1 + x2) * (z0 + z1) + (x0 + x1) * (z1 + z2) -
(x0 + x4) * (z3 + z4) + (x3 + x4) * (z0 + z4) +
(x2 + x5) * (z3 + z5) - (x3 + x5) * (z2 + z5);
*dvdz =
- (y1 + y2) * (x0 + x1) + (y0 + y1) * (x1 + x2) -
(y0 + y4) * (x3 + x4) + (y3 + y4) * (x0 + x4) +
(y2 + y5) * (x3 + x5) - (y3 + y5) * (x2 + x5);
*dvdx *= twelfth;
*dvdy *= twelfth;
*dvdz *= twelfth;
}
static
__device__
__forceinline__
void CalcElemVolumeDerivative(Real_t dvdx[8],
Real_t dvdy[8],
Real_t dvdz[8],
const Real_t x[8],
const Real_t y[8],
const Real_t z[8])
{
VoluDer(x[1], x[2], x[3], x[4], x[5], x[7],
y[1], y[2], y[3], y[4], y[5], y[7],
z[1], z[2], z[3], z[4], z[5], z[7],
&dvdx[0], &dvdy[0], &dvdz[0]);
VoluDer(x[0], x[1], x[2], x[7], x[4], x[6],
y[0], y[1], y[2], y[7], y[4], y[6],
z[0], z[1], z[2], z[7], z[4], z[6],
&dvdx[3], &dvdy[3], &dvdz[3]);
VoluDer(x[3], x[0], x[1], x[6], x[7], x[5],
y[3], y[0], y[1], y[6], y[7], y[5],
z[3], z[0], z[1], z[6], z[7], z[5],
&dvdx[2], &dvdy[2], &dvdz[2]);
VoluDer(x[2], x[3], x[0], x[5], x[6], x[4],
y[2], y[3], y[0], y[5], y[6], y[4],
z[2], z[3], z[0], z[5], z[6], z[4],
&dvdx[1], &dvdy[1], &dvdz[1]);
VoluDer(x[7], x[6], x[5], x[0], x[3], x[1],
y[7], y[6], y[5], y[0], y[3], y[1],
z[7], z[6], z[5], z[0], z[3], z[1],
&dvdx[4], &dvdy[4], &dvdz[4]);
VoluDer(x[4], x[7], x[6], x[1], x[0], x[2],
y[4], y[7], y[6], y[1], y[0], y[2],
z[4], z[7], z[6], z[1], z[0], z[2],
&dvdx[5], &dvdy[5], &dvdz[5]);
VoluDer(x[5], x[4], x[7], x[2], x[1], x[3],
y[5], y[4], y[7], y[2], y[1], y[3],
z[5], z[4], z[7], z[2], z[1], z[3],
&dvdx[6], &dvdy[6], &dvdz[6]);
VoluDer(x[6], x[5], x[4], x[3], x[2], x[0],
y[6], y[5], y[4], y[3], y[2], y[0],
z[6], z[5], z[4], z[3], z[2], z[0],
&dvdx[7], &dvdy[7], &dvdz[7]);
}
static
__device__
__forceinline__
void CalcElemFBHourglassForce(Real_t *xd, Real_t *yd, Real_t *zd, Real_t *hourgam0,
Real_t *hourgam1, Real_t *hourgam2, Real_t *hourgam3,
Real_t *hourgam4, Real_t *hourgam5, Real_t *hourgam6,
Real_t *hourgam7, Real_t coefficient,
Real_t *hgfx, Real_t *hgfy, Real_t *hgfz )
{
Index_t i00=0;
Index_t i01=1;
Index_t i02=2;
Index_t i03=3;
Real_t h00 =
hourgam0[i00] * xd[0] + hourgam1[i00] * xd[1] +
hourgam2[i00] * xd[2] + hourgam3[i00] * xd[3] +
hourgam4[i00] * xd[4] + hourgam5[i00] * xd[5] +
hourgam6[i00] * xd[6] + hourgam7[i00] * xd[7];
Real_t h01 =
hourgam0[i01] * xd[0] + hourgam1[i01] * xd[1] +
hourgam2[i01] * xd[2] + hourgam3[i01] * xd[3] +
hourgam4[i01] * xd[4] + hourgam5[i01] * xd[5] +
hourgam6[i01] * xd[6] + hourgam7[i01] * xd[7];
Real_t h02 =
hourgam0[i02] * xd[0] + hourgam1[i02] * xd[1]+
hourgam2[i02] * xd[2] + hourgam3[i02] * xd[3]+
hourgam4[i02] * xd[4] + hourgam5[i02] * xd[5]+
hourgam6[i02] * xd[6] + hourgam7[i02] * xd[7];
Real_t h03 =
hourgam0[i03] * xd[0] + hourgam1[i03] * xd[1] +
hourgam2[i03] * xd[2] + hourgam3[i03] * xd[3] +
hourgam4[i03] * xd[4] + hourgam5[i03] * xd[5] +
hourgam6[i03] * xd[6] + hourgam7[i03] * xd[7];
hgfx[0] += coefficient *
(hourgam0[i00] * h00 + hourgam0[i01] * h01 +
hourgam0[i02] * h02 + hourgam0[i03] * h03);
hgfx[1] += coefficient *
(hourgam1[i00] * h00 + hourgam1[i01] * h01 +
hourgam1[i02] * h02 + hourgam1[i03] * h03);
hgfx[2] += coefficient *
(hourgam2[i00] * h00 + hourgam2[i01] * h01 +
hourgam2[i02] * h02 + hourgam2[i03] * h03);
hgfx[3] += coefficient *
(hourgam3[i00] * h00 + hourgam3[i01] * h01 +
hourgam3[i02] * h02 + hourgam3[i03] * h03);
hgfx[4] += coefficient *
(hourgam4[i00] * h00 + hourgam4[i01] * h01 +
hourgam4[i02] * h02 + hourgam4[i03] * h03);
hgfx[5] += coefficient *
(hourgam5[i00] * h00 + hourgam5[i01] * h01 +
hourgam5[i02] * h02 + hourgam5[i03] * h03);
hgfx[6] += coefficient *
(hourgam6[i00] * h00 + hourgam6[i01] * h01 +
hourgam6[i02] * h02 + hourgam6[i03] * h03);
hgfx[7] += coefficient *
(hourgam7[i00] * h00 + hourgam7[i01] * h01 +
hourgam7[i02] * h02 + hourgam7[i03] * h03);
h00 =
hourgam0[i00] * yd[0] + hourgam1[i00] * yd[1] +
hourgam2[i00] * yd[2] + hourgam3[i00] * yd[3] +
hourgam4[i00] * yd[4] + hourgam5[i00] * yd[5] +
hourgam6[i00] * yd[6] + hourgam7[i00] * yd[7];
h01 =
hourgam0[i01] * yd[0] + hourgam1[i01] * yd[1] +
hourgam2[i01] * yd[2] + hourgam3[i01] * yd[3] +
hourgam4[i01] * yd[4] + hourgam5[i01] * yd[5] +
hourgam6[i01] * yd[6] + hourgam7[i01] * yd[7];
h02 =
hourgam0[i02] * yd[0] + hourgam1[i02] * yd[1]+
hourgam2[i02] * yd[2] + hourgam3[i02] * yd[3]+
hourgam4[i02] * yd[4] + hourgam5[i02] * yd[5]+
hourgam6[i02] * yd[6] + hourgam7[i02] * yd[7];
h03 =
hourgam0[i03] * yd[0] + hourgam1[i03] * yd[1] +
hourgam2[i03] * yd[2] + hourgam3[i03] * yd[3] +
hourgam4[i03] * yd[4] + hourgam5[i03] * yd[5] +
hourgam6[i03] * yd[6] + hourgam7[i03] * yd[7];
hgfy[0] += coefficient *
(hourgam0[i00] * h00 + hourgam0[i01] * h01 +
hourgam0[i02] * h02 + hourgam0[i03] * h03);
hgfy[1] += coefficient *
(hourgam1[i00] * h00 + hourgam1[i01] * h01 +
hourgam1[i02] * h02 + hourgam1[i03] * h03);
hgfy[2] += coefficient *
(hourgam2[i00] * h00 + hourgam2[i01] * h01 +
hourgam2[i02] * h02 + hourgam2[i03] * h03);
hgfy[3] += coefficient *
(hourgam3[i00] * h00 + hourgam3[i01] * h01 +
hourgam3[i02] * h02 + hourgam3[i03] * h03);
hgfy[4] += coefficient *
(hourgam4[i00] * h00 + hourgam4[i01] * h01 +
hourgam4[i02] * h02 + hourgam4[i03] * h03);
hgfy[5] += coefficient *
(hourgam5[i00] * h00 + hourgam5[i01] * h01 +
hourgam5[i02] * h02 + hourgam5[i03] * h03);
hgfy[6] += coefficient *
(hourgam6[i00] * h00 + hourgam6[i01] * h01 +
hourgam6[i02] * h02 + hourgam6[i03] * h03);
hgfy[7] += coefficient *
(hourgam7[i00] * h00 + hourgam7[i01] * h01 +
hourgam7[i02] * h02 + hourgam7[i03] * h03);
h00 =
hourgam0[i00] * zd[0] + hourgam1[i00] * zd[1] +
hourgam2[i00] * zd[2] + hourgam3[i00] * zd[3] +
hourgam4[i00] * zd[4] + hourgam5[i00] * zd[5] +
hourgam6[i00] * zd[6] + hourgam7[i00] * zd[7];
h01 =
hourgam0[i01] * zd[0] + hourgam1[i01] * zd[1] +
hourgam2[i01] * zd[2] + hourgam3[i01] * zd[3] +
hourgam4[i01] * zd[4] + hourgam5[i01] * zd[5] +
hourgam6[i01] * zd[6] + hourgam7[i01] * zd[7];
h02 =
hourgam0[i02] * zd[0] + hourgam1[i02] * zd[1]+
hourgam2[i02] * zd[2] + hourgam3[i02] * zd[3]+
hourgam4[i02] * zd[4] + hourgam5[i02] * zd[5]+
hourgam6[i02] * zd[6] + hourgam7[i02] * zd[7];
h03 =
hourgam0[i03] * zd[0] + hourgam1[i03] * zd[1] +
hourgam2[i03] * zd[2] + hourgam3[i03] * zd[3] +
hourgam4[i03] * zd[4] + hourgam5[i03] * zd[5] +
hourgam6[i03] * zd[6] + hourgam7[i03] * zd[7];
hgfz[0] += coefficient *
(hourgam0[i00] * h00 + hourgam0[i01] * h01 +
hourgam0[i02] * h02 + hourgam0[i03] * h03);
hgfz[1] += coefficient *
(hourgam1[i00] * h00 + hourgam1[i01] * h01 +
hourgam1[i02] * h02 + hourgam1[i03] * h03);
hgfz[2] += coefficient *
(hourgam2[i00] * h00 + hourgam2[i01] * h01 +
hourgam2[i02] * h02 + hourgam2[i03] * h03);
hgfz[3] += coefficient *
(hourgam3[i00] * h00 + hourgam3[i01] * h01 +
hourgam3[i02] * h02 + hourgam3[i03] * h03);
hgfz[4] += coefficient *
(hourgam4[i00] * h00 + hourgam4[i01] * h01 +
hourgam4[i02] * h02 + hourgam4[i03] * h03);
hgfz[5] += coefficient *
(hourgam5[i00] * h00 + hourgam5[i01] * h01 +
hourgam5[i02] * h02 + hourgam5[i03] * h03);
hgfz[6] += coefficient *
(hourgam6[i00] * h00 + hourgam6[i01] * h01 +
hourgam6[i02] * h02 + hourgam6[i03] * h03);
hgfz[7] += coefficient *
(hourgam7[i00] * h00 + hourgam7[i01] * h01 +
hourgam7[i02] * h02 + hourgam7[i03] * h03);
}
__device__
__forceinline__
void CalcHourglassModes(const Real_t xn[8], const Real_t yn[8], const Real_t zn[8],
const Real_t dvdxn[8], const Real_t dvdyn[8], const Real_t dvdzn[8],
Real_t hourgam[8][4], Real_t volinv)
{
Real_t hourmodx, hourmody, hourmodz;
hourmodx = xn[0] + xn[1] - xn[2] - xn[3] - xn[4] - xn[5] + xn[6] + xn[7];
hourmody = yn[0] + yn[1] - yn[2] - yn[3] - yn[4] - yn[5] + yn[6] + yn[7];
hourmodz = zn[0] + zn[1] - zn[2] - zn[3] - zn[4] - zn[5] + zn[6] + zn[7]; // 21
hourgam[0][0] = 1.0 - volinv*(dvdxn[0]*hourmodx + dvdyn[0]*hourmody + dvdzn[0]*hourmodz);
hourgam[1][0] = 1.0 - volinv*(dvdxn[1]*hourmodx + dvdyn[1]*hourmody + dvdzn[1]*hourmodz);
hourgam[2][0] = -1.0 - volinv*(dvdxn[2]*hourmodx + dvdyn[2]*hourmody + dvdzn[2]*hourmodz);
hourgam[3][0] = -1.0 - volinv*(dvdxn[3]*hourmodx + dvdyn[3]*hourmody + dvdzn[3]*hourmodz);
hourgam[4][0] = -1.0 - volinv*(dvdxn[4]*hourmodx + dvdyn[4]*hourmody + dvdzn[4]*hourmodz);
hourgam[5][0] = -1.0 - volinv*(dvdxn[5]*hourmodx + dvdyn[5]*hourmody + dvdzn[5]*hourmodz);
hourgam[6][0] = 1.0 - volinv*(dvdxn[6]*hourmodx + dvdyn[6]*hourmody + dvdzn[6]*hourmodz);
hourgam[7][0] = 1.0 - volinv*(dvdxn[7]*hourmodx + dvdyn[7]*hourmody + dvdzn[7]*hourmodz); // 60
hourmodx = xn[0] - xn[1] - xn[2] + xn[3] - xn[4] + xn[5] + xn[6] - xn[7];
hourmody = yn[0] - yn[1] - yn[2] + yn[3] - yn[4] + yn[5] + yn[6] - yn[7];
hourmodz = zn[0] - zn[1] - zn[2] + zn[3] - zn[4] + zn[5] + zn[6] - zn[7];
hourgam[0][1] = 1.0 - volinv*(dvdxn[0]*hourmodx + dvdyn[0]*hourmody + dvdzn[0]*hourmodz);
hourgam[1][1] = -1.0 - volinv*(dvdxn[1]*hourmodx + dvdyn[1]*hourmody + dvdzn[1]*hourmodz);
hourgam[2][1] = -1.0 - volinv*(dvdxn[2]*hourmodx + dvdyn[2]*hourmody + dvdzn[2]*hourmodz);
hourgam[3][1] = 1.0 - volinv*(dvdxn[3]*hourmodx + dvdyn[3]*hourmody + dvdzn[3]*hourmodz);
hourgam[4][1] = -1.0 - volinv*(dvdxn[4]*hourmodx + dvdyn[4]*hourmody + dvdzn[4]*hourmodz);
hourgam[5][1] = 1.0 - volinv*(dvdxn[5]*hourmodx + dvdyn[5]*hourmody + dvdzn[5]*hourmodz);
hourgam[6][1] = 1.0 - volinv*(dvdxn[6]*hourmodx + dvdyn[6]*hourmody + dvdzn[6]*hourmodz);
hourgam[7][1] = -1.0 - volinv*(dvdxn[7]*hourmodx + dvdyn[7]*hourmody + dvdzn[7]*hourmodz);
hourmodx = xn[0] - xn[1] + xn[2] - xn[3] + xn[4] - xn[5] + xn[6] - xn[7];
hourmody = yn[0] - yn[1] + yn[2] - yn[3] + yn[4] - yn[5] + yn[6] - yn[7];
hourmodz = zn[0] - zn[1] + zn[2] - zn[3] + zn[4] - zn[5] + zn[6] - zn[7];
hourgam[0][2] = 1.0 - volinv*(dvdxn[0]*hourmodx + dvdyn[0]*hourmody + dvdzn[0]*hourmodz);
hourgam[1][2] = -1.0 - volinv*(dvdxn[1]*hourmodx + dvdyn[1]*hourmody + dvdzn[1]*hourmodz);
hourgam[2][2] = 1.0 - volinv*(dvdxn[2]*hourmodx + dvdyn[2]*hourmody + dvdzn[2]*hourmodz);
hourgam[3][2] = -1.0 - volinv*(dvdxn[3]*hourmodx + dvdyn[3]*hourmody + dvdzn[3]*hourmodz);
hourgam[4][2] = 1.0 - volinv*(dvdxn[4]*hourmodx + dvdyn[4]*hourmody + dvdzn[4]*hourmodz);
hourgam[5][2] = -1.0 - volinv*(dvdxn[5]*hourmodx + dvdyn[5]*hourmody + dvdzn[5]*hourmodz);
hourgam[6][2] = 1.0 - volinv*(dvdxn[6]*hourmodx + dvdyn[6]*hourmody + dvdzn[6]*hourmodz);
hourgam[7][2] = -1.0 - volinv*(dvdxn[7]*hourmodx + dvdyn[7]*hourmody + dvdzn[7]*hourmodz);
hourmodx = -xn[0] + xn[1] - xn[2] + xn[3] + xn[4] - xn[5] + xn[6] - xn[7];
hourmody = -yn[0] + yn[1] - yn[2] + yn[3] + yn[4] - yn[5] + yn[6] - yn[7];
hourmodz = -zn[0] + zn[1] - zn[2] + zn[3] + zn[4] - zn[5] + zn[6] - zn[7];
hourgam[0][3] = -1.0 - volinv*(dvdxn[0]*hourmodx + dvdyn[0]*hourmody + dvdzn[0]*hourmodz);
hourgam[1][3] = 1.0 - volinv*(dvdxn[1]*hourmodx + dvdyn[1]*hourmody + dvdzn[1]*hourmodz);
hourgam[2][3] = -1.0 - volinv*(dvdxn[2]*hourmodx + dvdyn[2]*hourmody + dvdzn[2]*hourmodz);
hourgam[3][3] = 1.0 - volinv*(dvdxn[3]*hourmodx + dvdyn[3]*hourmody + dvdzn[3]*hourmodz);
hourgam[4][3] = 1.0 - volinv*(dvdxn[4]*hourmodx + dvdyn[4]*hourmody + dvdzn[4]*hourmodz);
hourgam[5][3] = -1.0 - volinv*(dvdxn[5]*hourmodx + dvdyn[5]*hourmody + dvdzn[5]*hourmodz);
hourgam[6][3] = 1.0 - volinv*(dvdxn[6]*hourmodx + dvdyn[6]*hourmody + dvdzn[6]*hourmodz);
hourgam[7][3] = -1.0 - volinv*(dvdxn[7]*hourmodx + dvdyn[7]*hourmody + dvdzn[7]*hourmodz);
}
template< bool hourg_gt_zero >
__global__
#ifdef DOUBLE_PRECISION
__launch_bounds__(64,4)
#else
__launch_bounds__(64,8)
#endif
void CalcVolumeForceForElems_kernel(
const Real_t* __restrict__ volo,
const Real_t* __restrict__ v,
const Real_t* __restrict__ p,
const Real_t* __restrict__ q,
Real_t hourg,
Index_t numElem,
Index_t padded_numElem,
const Index_t* __restrict__ nodelist,
const Real_t* __restrict__ ss,
const Real_t* __restrict__ elemMass,
const Real_t* __restrict__ x, const Real_t* __restrict__ y, const Real_t* __restrict__ z,
const Real_t* __restrict__ xd, const Real_t* __restrict__ yd, const Real_t* __restrict__ zd,
//TextureObj<Real_t> x, TextureObj<Real_t> y, TextureObj<Real_t> z,
//TextureObj<Real_t> xd, TextureObj<Real_t> yd, TextureObj<Real_t> zd,
//TextureObj<Real_t>* x, TextureObj<Real_t>* y, TextureObj<Real_t>* z,
//TextureObj<Real_t>* xd, TextureObj<Real_t>* yd, TextureObj<Real_t>* zd,
#ifdef DOUBLE_PRECISION // For floats, use atomicAdd
Real_t* __restrict__ fx_elem,
Real_t* __restrict__ fy_elem,
Real_t* __restrict__ fz_elem,
#else
Real_t* __restrict__ fx_node,
Real_t* __restrict__ fy_node,
Real_t* __restrict__ fz_node,
#endif
Index_t* __restrict__ bad_vol,
const Index_t num_threads)
{
/*************************************************
* FUNCTION: Calculates the volume forces
*************************************************/
Real_t xn[8],yn[8],zn[8];;
Real_t xdn[8],ydn[8],zdn[8];;
Real_t dvdxn[8],dvdyn[8],dvdzn[8];;
Real_t hgfx[8],hgfy[8],hgfz[8];;
Real_t hourgam[8][4];
Real_t coefficient;
int elem=blockDim.x*blockIdx.x+threadIdx.x;
if (elem < num_threads)
{
Real_t volume = v[elem];
Real_t det = volo[elem] * volume;
// Check for bad volume
if (volume < 0.) {
*bad_vol = elem;
}
Real_t ss1 = ss[elem];
Real_t mass1 = elemMass[elem];
Real_t sigxx = -p[elem] - q[elem];
Index_t n[8];
#pragma unroll
for (int i=0;i<8;i++) {
n[i] = nodelist[elem+i*padded_numElem];
}
Real_t volinv = Real_t(1.0) / det;
//#pragma unroll
//for (int i=0;i<8;i++) {
// xn[i] =x[n[i]];
// yn[i] =y[n[i]];
// zn[i] =z[n[i]];
//}
#pragma unroll
for (int i=0;i<8;i++)
xn[i] =x[n[i]];
#pragma unroll
for (int i=0;i<8;i++)
yn[i] =y[n[i]];
#pragma unroll
for (int i=0;i<8;i++)
zn[i] =z[n[i]];
Real_t volume13 = CBRT(det);
coefficient = - hourg * Real_t(0.01) * ss1 * mass1 / volume13;
/*************************************************/
/* compute the volume derivatives */
/*************************************************/
CalcElemVolumeDerivative(dvdxn, dvdyn, dvdzn, xn, yn, zn);
/*************************************************/
/* compute the hourglass modes */
/*************************************************/
CalcHourglassModes(xn,yn,zn,dvdxn,dvdyn,dvdzn,hourgam,volinv);
/*************************************************/
/* CalcStressForElems */
/*************************************************/
Real_t B[3][8];
CalcElemShapeFunctionDerivatives(xn, yn, zn, B, &det);
CalcElemNodeNormals( B[0] , B[1], B[2], xn, yn, zn);
// Check for bad volume
if (det < 0.) {
*bad_vol = elem;
}
#pragma unroll
for (int i=0;i<8;i++)
{
hgfx[i] = -( sigxx*B[0][i] );
hgfy[i] = -( sigxx*B[1][i] );
hgfz[i] = -( sigxx*B[2][i] );
}
if (hourg_gt_zero)
{
/*************************************************/
/* CalcFBHourglassForceForElems */
/*************************************************/
// #pragma unroll
// for (int i=0;i<8;i++) {
// xdn[i] =xd[n[i]];
// ydn[i] =yd[n[i]];
// zdn[i] =zd[n[i]];
// }
#pragma unroll
for (int i=0;i<8;i++)
xdn[i] =xd[n[i]];
#pragma unroll
for (int i=0;i<8;i++)
ydn[i] =yd[n[i]];
#pragma unroll
for (int i=0;i<8;i++)
zdn[i] =zd[n[i]];
CalcElemFBHourglassForce
( &xdn[0],&ydn[0],&zdn[0],
hourgam[0],hourgam[1],hourgam[2],hourgam[3],
hourgam[4],hourgam[5],hourgam[6],hourgam[7],
coefficient,
&hgfx[0],&hgfy[0],&hgfz[0]
);
}
#ifdef DOUBLE_PRECISION
#pragma unroll
for (int node=0;node<8;node++)
{
Index_t store_loc = elem+padded_numElem*node;
fx_elem[store_loc]=hgfx[node];
fy_elem[store_loc]=hgfy[node];
fz_elem[store_loc]=hgfz[node];
}
#else
#pragma unroll
for (int i=0;i<8;i++)
{
Index_t ni= n[i];
atomicAdd(&fx_node[ni],hgfx[i]);
atomicAdd(&fy_node[ni],hgfy[i]);
atomicAdd(&fz_node[ni],hgfz[i]);
}
#endif
} // If elem < numElem
}
template< bool hourg_gt_zero, int cta_size>
__global__
void CalcVolumeForceForElems_kernel_warp_per_4cell(
const Real_t* __restrict__ volo,
const Real_t* __restrict__ v,
const Real_t* __restrict__ p,
const Real_t* __restrict__ q,
Real_t hourg,
Index_t numElem,
Index_t padded_numElem,
const Index_t* __restrict__ nodelist,
const Real_t* __restrict__ ss,
const Real_t* __restrict__ elemMass,
//const Real_t __restrict__ *x, const Real_t __restrict__ *y, const Real_t __restrict__ *z,
//const Real_t __restrict__ *xd, const Real_t __restrict__ *yd, const Real_t __restrict__ *zd,
const Real_t *x, const Real_t *y, const Real_t *z,
const Real_t *xd, const Real_t *yd, const Real_t *zd,
#ifdef DOUBLE_PRECISION // For floats, use atomicAdd
Real_t* __restrict__ fx_elem,
Real_t* __restrict__ fy_elem,
Real_t* __restrict__ fz_elem,
#else
Real_t* __restrict__ fx_node,
Real_t* __restrict__ fy_node,
Real_t* __restrict__ fz_node,
#endif
Index_t* __restrict__ bad_vol,
const Index_t num_threads)
{
/*************************************************
* FUNCTION: Calculates the volume forces
*************************************************/
Real_t xn,yn,zn;;
Real_t xdn,ydn,zdn;;
Real_t dvdxn,dvdyn,dvdzn;;
Real_t hgfx,hgfy,hgfz;;
Real_t hourgam[4];
Real_t coefficient;
int tid=blockDim.x*blockIdx.x+threadIdx.x;
int elem = tid >> 3; // elem = tid/8
int node = tid & 7; // node = tid%8
// elem within cta
// int cta_elem = threadIdx.x/8;
if (elem < num_threads)
{
Real_t volume = v[elem];
Real_t det = volo[elem] * volume;
// Check for bad volume
if (volume < 0.) {
*bad_vol = elem;
}
Real_t ss1 = ss[elem];
Real_t mass1 = elemMass[elem];
Real_t sigxx = -p[elem] - q[elem];
Index_t node_id;
node_id = nodelist[elem+node*padded_numElem];
Real_t volinv = Real_t(1.0) / det;
xn =x[node_id];
yn =y[node_id];
zn =z[node_id];
Real_t volume13 = CBRT(det);
coefficient = - hourg * Real_t(0.01) * ss1 * mass1 / volume13;
/*************************************************/
/* compute the volume derivatives */
/*************************************************/
unsigned int ind0,ind1,ind2,ind3,ind4,ind5;
// Use octal number to represent the indices for each node
//ind0 = 012307456;
//ind1 = 023016745;
//ind2 = 030125674;
//ind3 = 045670123;
//ind4 = 056743012;
//ind5 = 074561230;
//int mask = 7u << (3*node;
switch(node) {
case 0:
{ind0=1; ind1=2; ind2=3; ind3=4; ind4=5; ind5=7;
break;}
case 1:
{ind0=2; ind1=3; ind2=0; ind3=5; ind4=6; ind5=4;
break;}
case 2:
{ind0=3; ind1=0; ind2=1; ind3=6; ind4=7; ind5=5;
break;}
case 3:
{ind0=0; ind1=1; ind2=2; ind3=7; ind4=4; ind5=6;
break;}
case 4:
{ind0=7; ind1=6; ind2=5; ind3=0; ind4=3; ind5=1;
break;}
case 5:
{ind0=4; ind1=7; ind2=6; ind3=1; ind4=0; ind5=2;
break;}
case 6:
{ind0=5; ind1=4; ind2=7; ind3=2; ind4=1; ind5=3;
break;}
case 7:
{ind0=6; ind1=5; ind2=4; ind3=3; ind4=2; ind5=0;
break;}
}
VOLUDER(utils::shfl(yn,ind0,8),utils::shfl(yn,ind1,8),utils::shfl(yn,ind2,8),
utils::shfl(yn,ind3,8),utils::shfl(yn,ind4,8),utils::shfl(yn,ind5,8),
utils::shfl(zn,ind0,8),utils::shfl(zn,ind1,8),utils::shfl(zn,ind2,8),
utils::shfl(zn,ind3,8),utils::shfl(zn,ind4,8),utils::shfl(zn,ind5,8),
dvdxn);
VOLUDER(utils::shfl(zn,ind0,8),utils::shfl(zn,ind1,8),utils::shfl(zn,ind2,8),
utils::shfl(zn,ind3,8),utils::shfl(zn,ind4,8),utils::shfl(zn,ind5,8),
utils::shfl(xn,ind0,8),utils::shfl(xn,ind1,8),utils::shfl(xn,ind2,8),
utils::shfl(xn,ind3,8),utils::shfl(xn,ind4,8),utils::shfl(xn,ind5,8),
dvdyn);
VOLUDER(utils::shfl(xn,ind0,8),utils::shfl(xn,ind1,8),utils::shfl(xn,ind2,8),
utils::shfl(xn,ind3,8),utils::shfl(xn,ind4,8),utils::shfl(xn,ind5,8),
utils::shfl(yn,ind0,8),utils::shfl(yn,ind1,8),utils::shfl(yn,ind2,8),
utils::shfl(yn,ind3,8),utils::shfl(yn,ind4,8),utils::shfl(yn,ind5,8),
dvdzn);
/*************************************************/
/* compute the hourglass modes */
/*************************************************/
Real_t hourmodx, hourmody, hourmodz;
const Real_t posf = Real_t( 1.);
const Real_t negf = Real_t(-1.);
hourmodx=xn; hourmody=yn; hourmodz=zn;
if (node==2 || node==3 || node==4 || node==5) {
hourmodx *= negf; hourmody *= negf; hourmodz *= negf;
hourgam[0] = negf;
}
else hourgam[0] = posf;
SumOverNodesShfl(hourmodx);
SumOverNodesShfl(hourmody);
SumOverNodesShfl(hourmodz);
hourgam[0] -= volinv*(dvdxn*hourmodx + dvdyn*hourmody + dvdzn*hourmodz);
hourmodx=xn; hourmody=yn; hourmodz=zn;
if (node==1 || node==2 || node==4 || node==7) {
hourmodx *= negf; hourmody *= negf; hourmodz *= negf;
hourgam[1] = negf;
}
else hourgam[1] = posf;
SumOverNodesShfl(hourmodx);
SumOverNodesShfl(hourmody);
SumOverNodesShfl(hourmodz);
hourgam[1] -= volinv*(dvdxn*hourmodx + dvdyn*hourmody + dvdzn*hourmodz);
hourmodx=xn; hourmody=yn; hourmodz=zn;
if (node==1 || node==3 || node==5 || node==7) {
hourmodx *= negf; hourmody *= negf; hourmodz *= negf;
hourgam[2] = negf;
}
else hourgam[2] = posf;
SumOverNodesShfl(hourmodx);
SumOverNodesShfl(hourmody);
SumOverNodesShfl(hourmodz);
hourgam[2] -= volinv*(dvdxn*hourmodx + dvdyn*hourmody + dvdzn*hourmodz);
hourmodx=xn; hourmody=yn; hourmodz=zn;
if (node==0 || node==2 || node==5 || node==7) {
hourmodx *= negf; hourmody *= negf; hourmodz *= negf;
hourgam[3] = negf;
}
else hourgam[3] = posf;
SumOverNodesShfl(hourmodx);
SumOverNodesShfl(hourmody);
SumOverNodesShfl(hourmodz);
hourgam[3] -= volinv*(dvdxn*hourmodx + dvdyn*hourmody + dvdzn*hourmodz);
/*************************************************/
/* CalcStressForElems */
/*************************************************/
Real_t b[3];
/*************************************************/
//CalcElemShapeFunctionDerivatives_warp_per_4cell(xn, yn, zn, B, &det);
/*************************************************/
Real_t fjxxi, fjxet, fjxze;
Real_t fjyxi, fjyet, fjyze;
Real_t fjzxi, fjzet, fjzze;
fjxxi = fjxet = fjxze = Real_t(0.125)*xn;
fjyxi = fjyet = fjyze = Real_t(0.125)*yn;
fjzxi = fjzet = fjzze = Real_t(0.125)*zn;
if (node==0 || node==3 || node==7 || node==4)
{
fjxxi = -fjxxi;
fjyxi = -fjyxi;
fjzxi = -fjzxi;
}
if (node==0 || node==5 || node==1 || node==4)
{
fjxet = -fjxet;
fjyet = -fjyet;
fjzet = -fjzet;
}
if (node==0 || node==3 || node==1 || node==2)
{
fjxze = -fjxze;
fjyze = -fjyze;
fjzze = -fjzze;
}
SumOverNodesShfl(fjxxi);
SumOverNodesShfl(fjxet);
SumOverNodesShfl(fjxze);
SumOverNodesShfl(fjyxi);
SumOverNodesShfl(fjyet);
SumOverNodesShfl(fjyze);
SumOverNodesShfl(fjzxi);
SumOverNodesShfl(fjzet);
SumOverNodesShfl(fjzze);
/* compute cofactors */
Real_t cjxxi, cjxet, cjxze;
Real_t cjyxi, cjyet, cjyze;
Real_t cjzxi, cjzet, cjzze;
cjxxi = (fjyet * fjzze) - (fjzet * fjyze);
cjxet = - (fjyxi * fjzze) + (fjzxi * fjyze);
cjxze = (fjyxi * fjzet) - (fjzxi * fjyet);
cjyxi = - (fjxet * fjzze) + (fjzet * fjxze);
cjyet = (fjxxi * fjzze) - (fjzxi * fjxze);
cjyze = - (fjxxi * fjzet) + (fjzxi * fjxet);
cjzxi = (fjxet * fjyze) - (fjyet * fjxze);
cjzet = - (fjxxi * fjyze) + (fjyxi * fjxze);
cjzze = (fjxxi * fjyet) - (fjyxi * fjxet);
Real_t coef_xi, coef_et, coef_ze;
if (node==0 || node==3 || node==4 || node==7)
coef_xi = Real_t(-1.);
else
coef_xi = Real_t(1.);
if (node==0 || node==1 || node==4 || node==5)
coef_et = Real_t(-1.);
else
coef_et = Real_t(1.);
if (node==0 || node==1 || node==2 || node==3)
coef_ze = Real_t(-1.);
else
coef_ze = Real_t(1.);
/* calculate partials :
this need only be done for l = 0,1,2,3 since , by symmetry ,
(6,7,4,5) = - (0,1,2,3) .
*/
b[0] = coef_xi * cjxxi + coef_et * cjxet + coef_ze * cjxze;
b[1] = coef_xi * cjyxi + coef_et * cjyet + coef_ze * cjyze;
b[2] = coef_xi * cjzxi + coef_et * cjzet + coef_ze * cjzze;
/* calculate jacobian determinant (volume) */
det = Real_t(8.) * ( fjxet * cjxet + fjyet * cjyet + fjzet * cjzet);
/*************************************************/
//CalcElemNodeNormals_warp_per_4cell( B[0] , B[1], B[2], xn, yn, zn);
/*************************************************/
b[0] = Real_t(0.0);
b[1] = Real_t(0.0);
b[2] = Real_t(0.0);
// Six faces, if no
SumElemFaceNormal_warp_per_4cell(&b[0], &b[1], &b[2],
xn, yn, zn, node, 0,1,2,3);
SumElemFaceNormal_warp_per_4cell(&b[0], &b[1], &b[2],
xn, yn, zn, node, 0,4,5,1);
SumElemFaceNormal_warp_per_4cell(&b[0], &b[1], &b[2],
xn, yn, zn, node, 1,5,6,2);
SumElemFaceNormal_warp_per_4cell(&b[0], &b[1], &b[2],
xn, yn, zn, node, 2,6,7,3);
SumElemFaceNormal_warp_per_4cell(&b[0], &b[1], &b[2],
xn, yn, zn, node, 3,7,4,0);
SumElemFaceNormal_warp_per_4cell(&b[0], &b[1], &b[2],
xn, yn, zn, node, 4,7,6,5);
// Check for bad volume
if (det < 0.) {
*bad_vol = elem;
}
hgfx = -( sigxx*b[0] );
hgfy = -( sigxx*b[1] );
hgfz = -( sigxx*b[2] );
if (hourg_gt_zero)
{
/*************************************************/
/* CalcFBHourglassForceForElems */
/*************************************************/
xdn = xd[node_id];
ydn = yd[node_id];
zdn = zd[node_id];
Real_t hgfx_temp=0;
#pragma unroll
for (int i=0;i<4;i++) {
Real_t h;
h=hourgam[i]*xdn;
SumOverNodesShfl(h);
hgfx_temp+=hourgam[i]*h;
}
hgfx_temp *= coefficient;
hgfx += hgfx_temp;
Real_t hgfy_temp=0;
#pragma unroll
for (int i=0;i<4;i++) {
Real_t h;
h=hourgam[i]*ydn;
SumOverNodesShfl(h);
hgfy_temp+=hourgam[i]*h;
}
hgfy_temp *= coefficient;
hgfy += hgfy_temp;
Real_t hgfz_temp=0;
#pragma unroll
for (int i=0;i<4;i++) {
Real_t h;
h=hourgam[i]*zdn;
SumOverNodesShfl(h);
hgfz_temp+=hourgam[i]*h;
}
hgfz_temp *= coefficient;
hgfz += hgfz_temp;
}
#ifdef DOUBLE_PRECISION
Index_t store_loc = elem+padded_numElem*node;
fx_elem[store_loc]=hgfx;
fy_elem[store_loc]=hgfy;
fz_elem[store_loc]=hgfz;
#else
atomicAdd(&fx_node[node_id],hgfx);
atomicAdd(&fy_node[node_id],hgfy);
atomicAdd(&fz_node[node_id],hgfz);
#endif
} // If elem < numElem
}
static inline
void CalcVolumeForceForElems(const Real_t hgcoef,Domain *domain)
{
Index_t numElem = domain->numElem ;
Index_t padded_numElem = domain->padded_numElem;
/*
#ifdef DOUBLE_PRECISION
Vector_d<Real_t>* fx_elem = Allocator< Vector_d<Real_t> >::allocate(padded_numElem*8);
Vector_d<Real_t>* fy_elem = Allocator< Vector_d<Real_t> >::allocate(padded_numElem*8);
Vector_d<Real_t>* fz_elem = Allocator< Vector_d<Real_t> >::allocate(padded_numElem*8);
#else
thrust::fill(domain->fx.begin(),domain->fx.end(),0.);
thrust::fill(domain->fy.begin(),domain->fy.end(),0.);
thrust::fill(domain->fz.begin(),domain->fz.end(),0.);
#endif
*/
int num_threads = numElem ;
const int block_size = 64;
int dimGrid = PAD_DIV(num_threads,block_size);
bool hourg_gt_zero = hgcoef > Real_t(0.0);
if (hourg_gt_zero)
{
CalcVolumeForceForElems_kernel<true> <<<dimGrid,block_size>>>
( domain->volo.raw(),
domain->v.raw(),
domain->p.raw(),
domain->q.raw(),
hgcoef, numElem, padded_numElem,
domain->nodelist.raw(),
domain->ss.raw(),
domain->elemMass.raw(),
domain->x.raw(), domain->y.raw(), domain->z.raw(), domain->xd.raw(), domain->yd.raw(), domain->zd.raw(),
#ifdef DOUBLE_PRECISION
domain->fx_elem->raw(),
domain->fy_elem->raw(),
domain->fz_elem->raw(),
#else
domain->fx.raw(),
domain->fy.raw(),
domain->fz.raw(),
#endif
domain->bad_vol_h,
num_threads
);
}
else
{
CalcVolumeForceForElems_kernel<false> <<<dimGrid,block_size>>>
( domain->volo.raw(),
domain->v.raw(),
domain->p.raw(),
domain->q.raw(),
hgcoef, numElem, padded_numElem,
domain->nodelist.raw(),
domain->ss.raw(),
domain->elemMass.raw(),
domain->x.raw(), domain->y.raw(), domain->z.raw(), domain->xd.raw(), domain->yd.raw(), domain->zd.raw(),
#ifdef DOUBLE_PRECISION
domain->fx_elem->raw(),
domain->fy_elem->raw(),
domain->fz_elem->raw(),
#else
domain->fx.raw(),
domain->fy.raw(),
domain->fz.raw(),
#endif
domain->bad_vol_h,
num_threads
);
}
#ifdef DOUBLE_PRECISION
num_threads = domain->numNode;
// Launch boundary nodes first
dimGrid= PAD_DIV(num_threads,block_size);
AddNodeForcesFromElems_kernel<<<dimGrid,block_size>>>
( domain->numNode,
domain->padded_numNode,
domain->nodeElemCount.raw(),
domain->nodeElemStart.raw(),
domain->nodeElemCornerList.raw(),
domain->fx_elem->raw(),
domain->fy_elem->raw(),
domain->fz_elem->raw(),
domain->fx.raw(),
domain->fy.raw(),
domain->fz.raw(),
num_threads
);
// cudaDeviceSynchronize();
// cudaCheckError();
// Allocator<Vector_d<Real_t> >::free(fx_elem,padded_numElem*8);
// Allocator<Vector_d<Real_t> >::free(fy_elem,padded_numElem*8);
// Allocator<Vector_d<Real_t> >::free(fz_elem,padded_numElem*8);
#endif // ifdef DOUBLE_PRECISION
return ;
}
/*
static inline
void CalcVolumeForceForElems_warp_per_4cell(const Real_t hgcoef,Domain *domain)
{
// We're gonna map one warp per 4 cells, i.e. one thread per vertex
Index_t numElem = domain->numElem ;
Index_t padded_numElem = domain->padded_numElem;
#ifdef DOUBLE_PRECISION
Vector_d<Real_t>* fx_elem = Allocator< Vector_d<Real_t> >::allocate(padded_numElem*8);
Vector_d<Real_t>* fy_elem = Allocator< Vector_d<Real_t> >::allocate(padded_numElem*8);
Vector_d<Real_t>* fz_elem = Allocator< Vector_d<Real_t> >::allocate(padded_numElem*8);
#else
thrust::fill(domain->fx.begin(),domain->fx.end(),0.);
thrust::fill(domain->fy.begin(),domain->fy.end(),0.);
thrust::fill(domain->fz.begin(),domain->fz.end(),0.);
#endif
const int warps_per_cta = 2;
const int cta_size = warps_per_cta * 32;
int num_threads = numElem*8;
int dimGrid = PAD_DIV(num_threads,cta_size);
bool hourg_gt_zero = hgcoef > Real_t(0.0);
if (hourg_gt_zero)
{
CalcVolumeForceForElems_kernel_warp_per_4cell<true, cta_size> <<<dimGrid,cta_size>>>
( domain->volo.raw(),
domain->v.raw(),
domain->p.raw(),
domain->q.raw(),
hgcoef, numElem, padded_numElem,
domain->nodelist.raw(),
domain->ss.raw(),
domain->elemMass.raw(),
domain->x.raw(),
domain->y.raw(),
domain->z.raw(),
domain->xd.raw(),
domain->yd.raw(),
domain->zd.raw(),
//domain->tex_x, domain->tex_y, domain->tex_z, domain->tex_xd, domain->tex_yd, domain->tex_zd,
#ifdef DOUBLE_PRECISION
fx_elem->raw(),
fy_elem->raw(),
fz_elem->raw() ,
#else
domain->fx.raw(),
domain->fy.raw(),
domain->fz.raw(),
#endif
domain->bad_vol_h,
num_threads
);
}
else
{
CalcVolumeForceForElems_kernel_warp_per_4cell<false, cta_size> <<<dimGrid,cta_size>>>
( domain->volo.raw(),
domain->v.raw(),
domain->p.raw(),
domain->q.raw(),
hgcoef, numElem, padded_numElem,
domain->nodelist.raw(),
domain->ss.raw(),
domain->elemMass.raw(),
domain->x.raw(),
domain->y.raw(),
domain->z.raw(),
domain->xd.raw(),
domain->yd.raw(),
domain->zd.raw(),
#ifdef DOUBLE_PRECISION
fx_elem->raw(),
fy_elem->raw(),
fz_elem->raw() ,
#else
domain->fx.raw(),
domain->fy.raw(),
domain->fz.raw(),
#endif
domain->bad_vol_h,
num_threads
);
}
#ifdef DOUBLE_PRECISION
num_threads = domain->numNode;
// Launch boundary nodes first
dimGrid= PAD_DIV(num_threads,cta_size);
AddNodeForcesFromElems_kernel<<<dimGrid,cta_size>>>
( domain->numNode,
domain->padded_numNode,
domain->nodeElemCount.raw(),
domain->nodeElemStart.raw(),
domain->nodeElemCornerList.raw(),
fx_elem->raw(),
fy_elem->raw(),
fz_elem->raw(),
domain->fx.raw(),
domain->fy.raw(),
domain->fz.raw(),
num_threads
);
//cudaDeviceSynchronize();
//cudaCheckError();
Allocator<Vector_d<Real_t> >::free(fx_elem,padded_numElem*8);
Allocator<Vector_d<Real_t> >::free(fy_elem,padded_numElem*8);
Allocator<Vector_d<Real_t> >::free(fz_elem,padded_numElem*8);
#endif // ifdef DOUBLE_PRECISION
return ;
}
*/
static inline
void CalcVolumeForceForElems(Domain* domain)
{
const Real_t hgcoef = domain->hgcoef ;
CalcVolumeForceForElems(hgcoef,domain);
//CalcVolumeForceForElems_warp_per_4cell(hgcoef,domain);
}
static inline void checkErrors(Domain* domain,int its,int myRank)
{
if (*(domain->bad_vol_h) != -1)
{
printf("Rank %i: Volume Error in cell %d at iteration %d\n",myRank,*(domain->bad_vol_h),its);
exit(VolumeError);
}
if (*(domain->bad_q_h) != -1)
{
printf("Rank %i: Q Error in cell %d at iteration %d\n",myRank,*(domain->bad_q_h),its);
exit(QStopError);
}
}
static inline void CalcForceForNodes(Domain *domain)
{
#if USE_MPI
CommRecv(*domain, MSG_COMM_SBN, 3,
domain->sizeX + 1, domain->sizeY + 1, domain->sizeZ + 1,
true, false, 0) ;
#endif
CalcVolumeForceForElems(domain);
// moved here from the main loop to allow async execution with GPU work
// TimeIncrement(domain);
#if USE_MPI
// initialize pointers
/*
domain->d_fx = domain->fx.raw();
domain->d_fy = domain->fy.raw();
domain->d_fz = domain->fz.raw();
*/
Domain_member fieldData[3] ;
fieldData[0] = &Domain::get_fx ;
fieldData[1] = &Domain::get_fy ;
fieldData[2] = &Domain::get_fz ;
CommSendGpu(*domain, MSG_COMM_SBN, 3, fieldData,
domain->sizeX + 1, domain->sizeY + 1, domain->sizeZ + 1,
true, false, domain->streams[2], 0) ;
CommSBNGpu(*domain, 3, fieldData, &domain->streams[2], 0) ;
#endif
}
__global__
void CalcAccelerationForNodes_kernel(int numNode,
Real_t *xdd, Real_t *ydd, Real_t *zdd,
Real_t *fx, Real_t *fy, Real_t *fz,
Real_t *nodalMass)
{
int tid=blockDim.x*blockIdx.x+threadIdx.x;
if (tid < numNode)
{
Real_t one_over_nMass = Real_t(1.)/nodalMass[tid];
xdd[tid]=fx[tid]*one_over_nMass;
ydd[tid]=fy[tid]*one_over_nMass;
zdd[tid]=fz[tid]*one_over_nMass;
}
}
static inline
void CalcAccelerationForNodes(Domain *domain)
{
Index_t dimBlock = 128;
Index_t dimGrid = PAD_DIV(domain->numNode,dimBlock);
CalcAccelerationForNodes_kernel<<<dimGrid, dimBlock>>>
(domain->numNode,
domain->xdd.raw(),domain->ydd.raw(),domain->zdd.raw(),
domain->fx.raw(),domain->fy.raw(),domain->fz.raw(),
domain->nodalMass.raw());
//cudaDeviceSynchronize();
//cudaCheckError();
}
__global__
void ApplyAccelerationBoundaryConditionsForNodes_kernel(
int numNodeBC, Real_t *xyzdd,
Index_t *symm)
{
int i=blockDim.x*blockIdx.x+threadIdx.x;
if (i < numNodeBC)
{
xyzdd[symm[i]] = Real_t(0.0) ;
}
}
static inline
void ApplyAccelerationBoundaryConditionsForNodes(Domain *domain)
{
Index_t dimBlock = 128;
Index_t dimGrid = PAD_DIV(domain->numSymmX,dimBlock);
if (domain->numSymmX > 0)
ApplyAccelerationBoundaryConditionsForNodes_kernel<<<dimGrid, dimBlock>>>
(domain->numSymmX,
domain->xdd.raw(),
domain->symmX.raw());
dimGrid = PAD_DIV(domain->numSymmY,dimBlock);
if (domain->numSymmY > 0)
ApplyAccelerationBoundaryConditionsForNodes_kernel<<<dimGrid, dimBlock>>>
(domain->numSymmY,
domain->ydd.raw(),
domain->symmY.raw());
dimGrid = PAD_DIV(domain->numSymmZ,dimBlock);
if (domain->numSymmZ > 0)
ApplyAccelerationBoundaryConditionsForNodes_kernel<<<dimGrid, dimBlock>>>
(domain->numSymmZ,
domain->zdd.raw(),
domain->symmZ.raw());
}
__global__
void CalcPositionAndVelocityForNodes_kernel(int numNode,
const Real_t * deltatime,
const Real_t u_cut,
Real_t* __restrict__ x, Real_t* __restrict__ y, Real_t* __restrict__ z,
Real_t* __restrict__ xd, Real_t* __restrict__ yd, Real_t* __restrict__ zd,
const Real_t* __restrict__ xdd, const Real_t* __restrict__ ydd, const Real_t* __restrict__ zdd)
{
int i=blockDim.x*blockIdx.x+threadIdx.x;
if (i < numNode)
{
Real_t xdtmp, ydtmp, zdtmp, dt;
dt = deltatime[0];
xdtmp = xd[i] + xdd[i] * dt ;
ydtmp = yd[i] + ydd[i] * dt ;
zdtmp = zd[i] + zdd[i] * dt ;
if( FABS(xdtmp) < u_cut ) xdtmp = 0.0;
if( FABS(ydtmp) < u_cut ) ydtmp = 0.0;
if( FABS(zdtmp) < u_cut ) zdtmp = 0.0;
x[i] += xdtmp * dt;
y[i] += ydtmp * dt;
z[i] += zdtmp * dt;
xd[i] = xdtmp;
yd[i] = ydtmp;
zd[i] = zdtmp;
}
}
static inline
void CalcPositionAndVelocityForNodes(const Real_t u_cut, Domain* domain)
{
Index_t dimBlock = 128;
Index_t dimGrid = PAD_DIV(domain->numNode,dimBlock);
// cudaEventSynchronize(domain->time_constraint_reduced[currIter]);
cudaStreamWaitEvent(NULL, domain->time_constraint_reduced[currIter], 0);
CalcPositionAndVelocityForNodes_kernel<<<dimGrid, dimBlock>>>
(domain->numNode, domain->deltatime_d_async,u_cut,
domain->x.raw(),domain->y.raw(),domain->z.raw(),
domain->xd.raw(),domain->yd.raw(),domain->zd.raw(),
domain->xdd.raw(),domain->ydd.raw(),domain->zdd.raw());
//cudaDeviceSynchronize();
//cudaCheckError();
}
static inline
void LagrangeNodal(Domain *domain)
{
#ifdef SEDOV_SYNC_POS_VEL_EARLY
Domain_member fieldData[6] ;
#endif
Real_t u_cut = domain->u_cut ;
/* time of boundary condition evaluation is beginning of step for force and
* acceleration boundary conditions. */
CalcForceForNodes(domain);
#if USE_MPI
#ifdef SEDOV_SYNC_POS_VEL_EARLY
//printf("Rank %d CommRecv di LagrangeNodal\n",globalRank);
CommRecv(*domain, MSG_SYNC_POS_VEL, 6,
domain->sizeX + 1, domain->sizeY + 1, domain->sizeZ + 1,
false, false, 0) ;
//printf("Rank %d Fuori CommRecv di LagrangeNodal\n", globalRank);
#endif
#endif
CalcAccelerationForNodes(domain);
ApplyAccelerationBoundaryConditionsForNodes(domain);
CalcPositionAndVelocityForNodes(u_cut, domain);
#if USE_MPI
#ifdef SEDOV_SYNC_POS_VEL_EARLY
// initialize pointers
domain->d_x = domain->x.raw();
domain->d_y = domain->y.raw();
domain->d_z = domain->z.raw();
domain->d_xd = domain->xd.raw();
domain->d_yd = domain->yd.raw();
domain->d_zd = domain->zd.raw();
fieldData[0] = &Domain::get_x ;
fieldData[1] = &Domain::get_y ;
fieldData[2] = &Domain::get_z ;
fieldData[3] = &Domain::get_xd ;
fieldData[4] = &Domain::get_yd ;
fieldData[5] = &Domain::get_zd ;
CommSendGpu(*domain, MSG_SYNC_POS_VEL, 6, fieldData,
domain->sizeX + 1, domain->sizeY + 1, domain->sizeZ + 1,
false, false, domain->streams[2], 0) ;
CommSyncPosVelGpu(*domain, &domain->streams[2], 0) ;
#endif
#endif
return;
}
__device__
static inline
Real_t AreaFace( const Real_t x0, const Real_t x1,
const Real_t x2, const Real_t x3,
const Real_t y0, const Real_t y1,
const Real_t y2, const Real_t y3,
const Real_t z0, const Real_t z1,
const Real_t z2, const Real_t z3)
{
Real_t fx = (x2 - x0) - (x3 - x1);
Real_t fy = (y2 - y0) - (y3 - y1);
Real_t fz = (z2 - z0) - (z3 - z1);
Real_t gx = (x2 - x0) + (x3 - x1);
Real_t gy = (y2 - y0) + (y3 - y1);
Real_t gz = (z2 - z0) + (z3 - z1);
Real_t temp = (fx * gx + fy * gy + fz * gz);
Real_t area =
(fx * fx + fy * fy + fz * fz) *
(gx * gx + gy * gy + gz * gz) -
temp * temp;
return area ;
}
__device__
static inline
Real_t CalcElemCharacteristicLength( const Real_t x[8],
const Real_t y[8],
const Real_t z[8],
const Real_t volume)
{
Real_t a, charLength = Real_t(0.0);
a = AreaFace(x[0],x[1],x[2],x[3],
y[0],y[1],y[2],y[3],
z[0],z[1],z[2],z[3]) ; // 38
charLength = FMAX(a,charLength) ;
a = AreaFace(x[4],x[5],x[6],x[7],
y[4],y[5],y[6],y[7],
z[4],z[5],z[6],z[7]) ;
charLength = FMAX(a,charLength) ;
a = AreaFace(x[0],x[1],x[5],x[4],
y[0],y[1],y[5],y[4],
z[0],z[1],z[5],z[4]) ;
charLength = FMAX(a,charLength) ;
a = AreaFace(x[1],x[2],x[6],x[5],
y[1],y[2],y[6],y[5],
z[1],z[2],z[6],z[5]) ;
charLength = FMAX(a,charLength) ;
a = AreaFace(x[2],x[3],x[7],x[6],
y[2],y[3],y[7],y[6],
z[2],z[3],z[7],z[6]) ;
charLength = FMAX(a,charLength) ;
a = AreaFace(x[3],x[0],x[4],x[7],
y[3],y[0],y[4],y[7],
z[3],z[0],z[4],z[7]) ;
charLength = FMAX(a,charLength) ;
charLength = Real_t(4.0) * volume / SQRT(charLength);
return charLength;
}
__device__
static
__forceinline__
void CalcElemVelocityGradient( const Real_t* const xvel,
const Real_t* const yvel,
const Real_t* const zvel,
const Real_t b[][8],
const Real_t detJ,
Real_t* const d )
{
const Real_t inv_detJ = Real_t(1.0) / detJ ;
Real_t dyddx, dxddy, dzddx, dxddz, dzddy, dyddz;
const Real_t* const pfx = b[0];
const Real_t* const pfy = b[1];
const Real_t* const pfz = b[2];
Real_t tmp1 = (xvel[0]-xvel[6]);
Real_t tmp2 = (xvel[1]-xvel[7]);
Real_t tmp3 = (xvel[2]-xvel[4]);
Real_t tmp4 = (xvel[3]-xvel[5]);
d[0] = inv_detJ * ( pfx[0] * tmp1
+ pfx[1] * tmp2
+ pfx[2] * tmp3
+ pfx[3] * tmp4);
dxddy = inv_detJ * ( pfy[0] * tmp1
+ pfy[1] * tmp2
+ pfy[2] * tmp3
+ pfy[3] * tmp4);
dxddz = inv_detJ * ( pfz[0] * tmp1
+ pfz[1] * tmp2
+ pfz[2] * tmp3
+ pfz[3] * tmp4);
tmp1 = (yvel[0]-yvel[6]);
tmp2 = (yvel[1]-yvel[7]);
tmp3 = (yvel[2]-yvel[4]);
tmp4 = (yvel[3]-yvel[5]);
d[1] = inv_detJ * ( pfy[0] * tmp1
+ pfy[1] * tmp2
+ pfy[2] * tmp3
+ pfy[3] * tmp4);
dyddx = inv_detJ * ( pfx[0] * tmp1
+ pfx[1] * tmp2
+ pfx[2] * tmp3
+ pfx[3] * tmp4);
dyddz = inv_detJ * ( pfz[0] * tmp1
+ pfz[1] * tmp2
+ pfz[2] * tmp3
+ pfz[3] * tmp4);
tmp1 = (zvel[0]-zvel[6]);
tmp2 = (zvel[1]-zvel[7]);
tmp3 = (zvel[2]-zvel[4]);
tmp4 = (zvel[3]-zvel[5]);
d[2] = inv_detJ * ( pfz[0] * tmp1
+ pfz[1] * tmp2
+ pfz[2] * tmp3
+ pfz[3] * tmp4);
dzddx = inv_detJ * ( pfx[0] * tmp1
+ pfx[1] * tmp2
+ pfx[2] * tmp3
+ pfx[3] * tmp4);
dzddy = inv_detJ * ( pfy[0] * tmp1
+ pfy[1] * tmp2
+ pfy[2] * tmp3
+ pfy[3] * tmp4);
d[5] = Real_t( .5) * ( dxddy + dyddx );
d[4] = Real_t( .5) * ( dxddz + dzddx );
d[3] = Real_t( .5) * ( dzddy + dyddz );
}
static __device__ __forceinline__
void CalcMonoGradient(Real_t *x, Real_t *y, Real_t *z,
Real_t *xv, Real_t *yv, Real_t *zv,
Real_t vol,
Real_t *delx_zeta,
Real_t *delv_zeta,
Real_t *delx_xi,
Real_t *delv_xi,
Real_t *delx_eta,
Real_t *delv_eta)
{
#define SUM4(a,b,c,d) (a + b + c + d)
const Real_t ptiny = Real_t(1.e-36) ;
Real_t ax,ay,az ;
Real_t dxv,dyv,dzv ;
Real_t norm = Real_t(1.0) / ( vol + ptiny ) ;
Real_t dxj = Real_t(-0.25)*(SUM4(x[0],x[1],x[5],x[4]) - SUM4(x[3],x[2],x[6],x[7])) ;
Real_t dyj = Real_t(-0.25)*(SUM4(y[0],y[1],y[5],y[4]) - SUM4(y[3],y[2],y[6],y[7])) ;
Real_t dzj = Real_t(-0.25)*(SUM4(z[0],z[1],z[5],z[4]) - SUM4(z[3],z[2],z[6],z[7])) ;
Real_t dxi = Real_t( 0.25)*(SUM4(x[1],x[2],x[6],x[5]) - SUM4(x[0],x[3],x[7],x[4])) ;
Real_t dyi = Real_t( 0.25)*(SUM4(y[1],y[2],y[6],y[5]) - SUM4(y[0],y[3],y[7],y[4])) ;
Real_t dzi = Real_t( 0.25)*(SUM4(z[1],z[2],z[6],z[5]) - SUM4(z[0],z[3],z[7],z[4])) ;
Real_t dxk = Real_t( 0.25)*(SUM4(x[4],x[5],x[6],x[7]) - SUM4(x[0],x[1],x[2],x[3])) ;
Real_t dyk = Real_t( 0.25)*(SUM4(y[4],y[5],y[6],y[7]) - SUM4(y[0],y[1],y[2],y[3])) ;
Real_t dzk = Real_t( 0.25)*(SUM4(z[4],z[5],z[6],z[7]) - SUM4(z[0],z[1],z[2],z[3])) ;
/* find delvk and delxk ( i cross j ) */
ax = dyi*dzj - dzi*dyj ;
ay = dzi*dxj - dxi*dzj ;
az = dxi*dyj - dyi*dxj ;
*delx_zeta = vol / SQRT(ax*ax + ay*ay + az*az + ptiny) ;
ax *= norm ;
ay *= norm ;
az *= norm ;
dxv = Real_t(0.25)*(SUM4(xv[4],xv[5],xv[6],xv[7]) - SUM4(xv[0],xv[1],xv[2],xv[3])) ;
dyv = Real_t(0.25)*(SUM4(yv[4],yv[5],yv[6],yv[7]) - SUM4(yv[0],yv[1],yv[2],yv[3])) ;
dzv = Real_t(0.25)*(SUM4(zv[4],zv[5],zv[6],zv[7]) - SUM4(zv[0],zv[1],zv[2],zv[3])) ;
*delv_zeta = ax*dxv + ay*dyv + az*dzv ;
/* find delxi and delvi ( j cross k ) */
ax = dyj*dzk - dzj*dyk ;
ay = dzj*dxk - dxj*dzk ;
az = dxj*dyk - dyj*dxk ;
*delx_xi = vol / SQRT(ax*ax + ay*ay + az*az + ptiny) ;
ax *= norm ;
ay *= norm ;
az *= norm ;
dxv = Real_t(0.25)*(SUM4(xv[1],xv[2],xv[6],xv[5]) - SUM4(xv[0],xv[3],xv[7],xv[4])) ;
dyv = Real_t(0.25)*(SUM4(yv[1],yv[2],yv[6],yv[5]) - SUM4(yv[0],yv[3],yv[7],yv[4])) ;
dzv = Real_t(0.25)*(SUM4(zv[1],zv[2],zv[6],zv[5]) - SUM4(zv[0],zv[3],zv[7],zv[4])) ;
*delv_xi = ax*dxv + ay*dyv + az*dzv ;
/* find delxj and delvj ( k cross i ) */
ax = dyk*dzi - dzk*dyi ;
ay = dzk*dxi - dxk*dzi ;
az = dxk*dyi - dyk*dxi ;
*delx_eta = vol / SQRT(ax*ax + ay*ay + az*az + ptiny) ;
ax *= norm ;
ay *= norm ;
az *= norm ;
dxv = Real_t(-0.25)*(SUM4(xv[0],xv[1],xv[5],xv[4]) - SUM4(xv[3],xv[2],xv[6],xv[7])) ;
dyv = Real_t(-0.25)*(SUM4(yv[0],yv[1],yv[5],yv[4]) - SUM4(yv[3],yv[2],yv[6],yv[7])) ;
dzv = Real_t(-0.25)*(SUM4(zv[0],zv[1],zv[5],zv[4]) - SUM4(zv[3],zv[2],zv[6],zv[7])) ;
*delv_eta = ax*dxv + ay*dyv + az*dzv ;
#undef SUM4
}
__global__
#ifdef DOUBLE_PRECISION
__launch_bounds__(64,8) // 64-bit
#else
__launch_bounds__(64,16) // 32-bit
#endif
void CalcKinematicsAndMonotonicQGradient_kernel(
Index_t numElem, Index_t padded_numElem, const Real_t * dt,
const Index_t* __restrict__ nodelist, const Real_t* __restrict__ volo, const Real_t* __restrict__ v,
const Real_t* __restrict__ x,
const Real_t* __restrict__ y,
const Real_t* __restrict__ z,
const Real_t* __restrict__ xd,
const Real_t* __restrict__ yd,
const Real_t* __restrict__ zd,
Real_t* __restrict__ vnew,
Real_t* __restrict__ delv,
Real_t* __restrict__ arealg,
Real_t* __restrict__ dxx,
Real_t* __restrict__ dyy,
Real_t* __restrict__ dzz,
Real_t* __restrict__ vdov,
Real_t* __restrict__ delx_zeta,
Real_t* __restrict__ delv_zeta,
Real_t* __restrict__ delx_xi,
Real_t* __restrict__ delv_xi,
Real_t* __restrict__ delx_eta,
Real_t* __restrict__ delv_eta,
Index_t* __restrict__ bad_vol,
const Index_t num_threads
)
{
Real_t B[3][8] ; /** shape function derivatives */
Index_t nodes[8] ;
Real_t x_local[8] ;
Real_t y_local[8] ;
Real_t z_local[8] ;
Real_t xd_local[8] ;
Real_t yd_local[8] ;
Real_t zd_local[8] ;
Real_t D[6];
int k=blockDim.x*blockIdx.x+threadIdx.x;
if ( k < num_threads) {
Real_t volume ;
Real_t relativeVolume ;
// get nodal coordinates from global arrays and copy into local arrays.
//#pragma unroll
//for( Index_t lnode=0 ; lnode<8 ; ++lnode )
//{
// Index_t gnode = nodelist[k+lnode*padded_numElem];
// nodes[lnode] = gnode;
// x_local[lnode] = x[gnode];
// y_local[lnode] = y[gnode];
// z_local[lnode] = z[gnode];
//}
#pragma unroll
for( Index_t lnode=0 ; lnode<8 ; ++lnode )
{
Index_t gnode = nodelist[k+lnode*padded_numElem];
nodes[lnode] = gnode;
}
#pragma unroll
for( Index_t lnode=0 ; lnode<8 ; ++lnode )
x_local[lnode] = x[nodes[lnode]];
#pragma unroll
for( Index_t lnode=0 ; lnode<8 ; ++lnode )
y_local[lnode] = y[nodes[lnode]];
#pragma unroll
for( Index_t lnode=0 ; lnode<8 ; ++lnode )
z_local[lnode] = z[nodes[lnode]];
// volume calculations
volume = CalcElemVolume(x_local, y_local, z_local );
relativeVolume = volume / volo[k] ;
vnew[k] = relativeVolume ;
delv[k] = relativeVolume - v[k] ;
// set characteristic length
arealg[k] = CalcElemCharacteristicLength(x_local,y_local,z_local,volume);
// get nodal velocities from global array and copy into local arrays.
#pragma unroll
for( Index_t lnode=0 ; lnode<8 ; ++lnode )
{
Index_t gnode = nodes[lnode];
xd_local[lnode] = xd[gnode];
yd_local[lnode] = yd[gnode];
zd_local[lnode] = zd[gnode];
}
Real_t dt2 = Real_t(0.5) * dt[0];
#pragma unroll
for ( Index_t j=0 ; j<8 ; ++j )
{
x_local[j] -= dt2 * xd_local[j];
y_local[j] -= dt2 * yd_local[j];
z_local[j] -= dt2 * zd_local[j];
}
Real_t detJ;
CalcElemShapeFunctionDerivatives(x_local,y_local,z_local,B,&detJ );
CalcElemVelocityGradient(xd_local,yd_local,zd_local,B,detJ,D);
// ------------------------
// CALC LAGRANGE ELEM 2
// ------------------------
// calc strain rate and apply as constraint (only done in FB element)
Real_t vdovNew = D[0] + D[1] + D[2];
Real_t vdovthird = vdovNew/Real_t(3.0) ;
// make the rate of deformation tensor deviatoric
vdov[k] = vdovNew ;
dxx[k] = D[0] - vdovthird ;
dyy[k] = D[1] - vdovthird ;
dzz[k] = D[2] - vdovthird ;
// ------------------------
// CALC MONOTONIC Q GRADIENT
// ------------------------
Real_t vol = volo[k]*vnew[k];
// Undo x_local update
#pragma unroll
for ( Index_t j=0 ; j<8 ; ++j ) {
x_local[j] += dt2 * xd_local[j];
y_local[j] += dt2 * yd_local[j];
z_local[j] += dt2 * zd_local[j];
}
CalcMonoGradient(x_local,y_local,z_local,xd_local,yd_local,zd_local,
vol,
&delx_zeta[k],&delv_zeta[k],&delx_xi[k],
&delv_xi[k], &delx_eta[k], &delv_eta[k]);
//Check for bad volume
if (relativeVolume < 0)
*bad_vol = k;
}
}
static inline
void CalcKinematicsAndMonotonicQGradient(Domain *domain)
{
Index_t numElem = domain->numElem ;
Index_t padded_numElem = domain->padded_numElem;
int num_threads = numElem;
const int block_size = 64;
int dimGrid = PAD_DIV(num_threads,block_size);
//elenago
#if 0
int rank;
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
printf("QGradient ---> Rank %d, dimGrid: %d numElem: %d, padded_numElem: %d domain->deltatime_h: %f bad_vol_h=%d\n",
rank, dimGrid, numElem, padded_numElem, domain->deltatime_h, *domain->bad_vol_h);
#endif
CalcKinematicsAndMonotonicQGradient_kernel<<<dimGrid,block_size>>>
( numElem,padded_numElem, domain->deltatime_d_async,
domain->nodelist.raw(),
domain->volo.raw(),
domain->v.raw(),
domain->x.raw(), domain->y.raw(), domain->z.raw(), domain->xd.raw(), domain->yd.raw(), domain->zd.raw(),
domain->vnew->raw(),
domain->delv.raw(),
domain->arealg.raw(),
domain->dxx->raw(),
domain->dyy->raw(),
domain->dzz->raw(),
domain->vdov.raw(),
domain->delx_zeta->raw(),
domain->delv_zeta->raw(),
domain->delx_xi->raw(),
domain->delv_xi->raw(),
domain->delx_eta->raw(),
domain->delv_eta->raw(),
domain->bad_vol_h,
num_threads
);
//cudaDeviceSynchronize();
//cudaCheckError();
}
#if 0
__global__
#ifdef DOUBLE_PRECISION
__launch_bounds__(64,8) // 64-bit
#else
__launch_bounds__(64,16) // 32-bit
#endif
void testVolume(
Index_t numElem, Index_t padded_numElem, const Real_t dt,
const Index_t* __restrict__ nodelist, const Real_t* __restrict__ volo, const Real_t* __restrict__ v,
const Real_t* __restrict__ x,
const Real_t* __restrict__ y,
const Real_t* __restrict__ z,
const Real_t* __restrict__ xd,
const Real_t* __restrict__ yd,
const Real_t* __restrict__ zd,
Real_t* __restrict__ vnew,
Real_t* __restrict__ delv,
Real_t* __restrict__ arealg,
Real_t* __restrict__ dxx,
Real_t* __restrict__ dyy,
Real_t* __restrict__ dzz,
Real_t* __restrict__ vdov,
Real_t* __restrict__ delx_zeta,
Real_t* __restrict__ delv_zeta,
Real_t* __restrict__ delx_xi,
Real_t* __restrict__ delv_xi,
Real_t* __restrict__ delx_eta,
Real_t* __restrict__ delv_eta,
Index_t* __restrict__ bad_vol,
const Index_t num_threads
)
{
Real_t B[3][8] ; /** shape function derivatives */
Index_t nodes[8] ;
Real_t x_local[8] ;
Real_t y_local[8] ;
Real_t z_local[8] ;
Real_t xd_local[8] ;
Real_t yd_local[8] ;
Real_t zd_local[8] ;
Real_t D[6];
int k=blockDim.x*blockIdx.x+threadIdx.x;
if ( k < num_threads) {
Real_t volume ;
Real_t relativeVolume ;
#pragma unroll
for( Index_t lnode=0 ; lnode<8 ; ++lnode )
{
Index_t gnode = nodelist[k+lnode*padded_numElem];
nodes[lnode] = gnode;
}
#pragma unroll
for( Index_t lnode=0 ; lnode<8 ; ++lnode )
x_local[lnode] = x[nodes[lnode]];
#pragma unroll
for( Index_t lnode=0 ; lnode<8 ; ++lnode )
y_local[lnode] = y[nodes[lnode]];
#pragma unroll
for( Index_t lnode=0 ; lnode<8 ; ++lnode )
z_local[lnode] = z[nodes[lnode]];
// volume calculations
volume = CalcElemVolume(x_local, y_local, z_local );
relativeVolume = volume / volo[k] ;
//Check for bad volume
if (relativeVolume < 0)
*bad_vol = k;
}
}
static inline
void testVolume(Domain *domain)
{
Index_t numElem = domain->numElem ;
Index_t padded_numElem = domain->padded_numElem;
int num_threads = numElem;
const int block_size = 64;
int dimGrid = PAD_DIV(num_threads,block_size);
testVolume<<<dimGrid,block_size>>>
( numElem,padded_numElem, domain->deltatime_h,
domain->nodelist.raw(),
domain->volo.raw(),
domain->v.raw(),
domain->x.raw(), domain->y.raw(), domain->z.raw(), domain->xd.raw(), domain->yd.raw(), domain->zd.raw(),
domain->vnew->raw(),
domain->delv.raw(),
domain->arealg.raw(),
domain->dxx->raw(),
domain->dyy->raw(),
domain->dzz->raw(),
domain->vdov.raw(),
domain->delx_zeta->raw(),
domain->delv_zeta->raw(),
domain->delx_xi->raw(),
domain->delv_xi->raw(),
domain->delx_eta->raw(),
domain->delv_eta->raw(),
domain->bad_vol_h,
num_threads
);
//cudaDeviceSynchronize();
//cudaCheckError();
}
#endif
__global__
#ifdef DOUBLE_PRECISION
__launch_bounds__(128,16)
#else
__launch_bounds__(128,16)
#endif
void CalcMonotonicQRegionForElems_kernel(
Real_t qlc_monoq,
Real_t qqc_monoq,
Real_t monoq_limiter_mult,
Real_t monoq_max_slope,
Real_t ptiny,
// the elementset length
Index_t elength,
Index_t* regElemlist,
// const Index_t* __restrict__ regElemlist,
Index_t *elemBC,
Index_t *lxim,
Index_t *lxip,
Index_t *letam,
Index_t *letap,
Index_t *lzetam,
Index_t *lzetap,
Real_t *delv_xi,
Real_t *delv_eta,
Real_t *delv_zeta,
Real_t *delx_xi,
Real_t *delx_eta,
Real_t *delx_zeta,
Real_t *vdov,Real_t *elemMass,Real_t *volo,Real_t *vnew,
Real_t *qq, Real_t *ql,
Real_t *q,
Real_t qstop,
Index_t* bad_q
)
{
int ielem=blockDim.x*blockIdx.x + threadIdx.x;
if (ielem<elength) {
Real_t qlin, qquad ;
Real_t phixi, phieta, phizeta ;
Index_t i = regElemlist[ielem];
Int_t bcMask = elemBC[i] ;
Real_t delvm, delvp ;
/* phixi */
Real_t norm = Real_t(1.) / ( delv_xi[i] + ptiny ) ;
switch (bcMask & XI_M) {
case XI_M_COMM: /* needs comm data */
case 0: delvm = delv_xi[lxim[i]] ; break ;
case XI_M_SYMM: delvm = delv_xi[i] ; break ;
case XI_M_FREE: delvm = Real_t(0.0) ; break ;
default: /* ERROR */ ; break ;
}
switch (bcMask & XI_P) {
case XI_P_COMM: /* needs comm data */
case 0: delvp = delv_xi[lxip[i]] ; break ;
case XI_P_SYMM: delvp = delv_xi[i] ; break ;
case XI_P_FREE: delvp = Real_t(0.0) ; break ;
default: /* ERROR */ ; break ;
}
delvm = delvm * norm ;
delvp = delvp * norm ;
phixi = Real_t(.5) * ( delvm + delvp ) ;
delvm *= monoq_limiter_mult ;
delvp *= monoq_limiter_mult ;
if ( delvm < phixi ) phixi = delvm ;
if ( delvp < phixi ) phixi = delvp ;
if ( phixi < Real_t(0.)) phixi = Real_t(0.) ;
if ( phixi > monoq_max_slope) phixi = monoq_max_slope;
/* phieta */
norm = Real_t(1.) / ( delv_eta[i] + ptiny ) ;
switch (bcMask & ETA_M) {
case ETA_M_COMM: /* needs comm data */
case 0: delvm = delv_eta[letam[i]] ; break ;
case ETA_M_SYMM: delvm = delv_eta[i] ; break ;
case ETA_M_FREE: delvm = Real_t(0.0) ; break ;
default: /* ERROR */ ; break ;
}
switch (bcMask & ETA_P) {
case ETA_P_COMM: /* needs comm data */
case 0: delvp = delv_eta[letap[i]] ; break ;
case ETA_P_SYMM: delvp = delv_eta[i] ; break ;
case ETA_P_FREE: delvp = Real_t(0.0) ; break ;
default: /* ERROR */ ; break ;
}
delvm = delvm * norm ;
delvp = delvp * norm ;
phieta = Real_t(.5) * ( delvm + delvp ) ;
delvm *= monoq_limiter_mult ;
delvp *= monoq_limiter_mult ;
if ( delvm < phieta ) phieta = delvm ;
if ( delvp < phieta ) phieta = delvp ;
if ( phieta < Real_t(0.)) phieta = Real_t(0.) ;
if ( phieta > monoq_max_slope) phieta = monoq_max_slope;
/* phizeta */
norm = Real_t(1.) / ( delv_zeta[i] + ptiny ) ;
switch (bcMask & ZETA_M) {
case ZETA_M_COMM: /* needs comm data */
case 0: delvm = delv_zeta[lzetam[i]] ; break ;
case ZETA_M_SYMM: delvm = delv_zeta[i] ; break ;
case ZETA_M_FREE: delvm = Real_t(0.0) ; break ;
default: /* ERROR */ ; break ;
}
switch (bcMask & ZETA_P) {
case ZETA_P_COMM: /* needs comm data */
case 0: delvp = delv_zeta[lzetap[i]] ; break ;
case ZETA_P_SYMM: delvp = delv_zeta[i] ; break ;
case ZETA_P_FREE: delvp = Real_t(0.0) ; break ;
default: /* ERROR */ ; break ;
}
delvm = delvm * norm ;
delvp = delvp * norm ;
phizeta = Real_t(.5) * ( delvm + delvp ) ;
delvm *= monoq_limiter_mult ;
delvp *= monoq_limiter_mult ;
if ( delvm < phizeta ) phizeta = delvm ;
if ( delvp < phizeta ) phizeta = delvp ;
if ( phizeta < Real_t(0.)) phizeta = Real_t(0.);
if ( phizeta > monoq_max_slope ) phizeta = monoq_max_slope;
/* Remove length scale */
if ( vdov[i] > Real_t(0.) ) {
qlin = Real_t(0.) ;
qquad = Real_t(0.) ;
}
else {
Real_t delvxxi = delv_xi[i] * delx_xi[i] ;
Real_t delvxeta = delv_eta[i] * delx_eta[i] ;
Real_t delvxzeta = delv_zeta[i] * delx_zeta[i] ;
if ( delvxxi > Real_t(0.) ) delvxxi = Real_t(0.) ;
if ( delvxeta > Real_t(0.) ) delvxeta = Real_t(0.) ;
if ( delvxzeta > Real_t(0.) ) delvxzeta = Real_t(0.) ;
Real_t rho = elemMass[i] / (volo[i] * vnew[i]) ;
qlin = -qlc_monoq * rho *
( delvxxi * (Real_t(1.) - phixi) +
delvxeta * (Real_t(1.) - phieta) +
delvxzeta * (Real_t(1.) - phizeta) ) ;
qquad = qqc_monoq * rho *
( delvxxi*delvxxi * (Real_t(1.) - phixi*phixi) +
delvxeta*delvxeta * (Real_t(1.) - phieta*phieta) +
delvxzeta*delvxzeta * (Real_t(1.) - phizeta*phizeta) ) ;
}
qq[i] = qquad ;
ql[i] = qlin ;
// Don't allow excessive artificial viscosity
if (q[i] > qstop)
*(bad_q) = i;
}
}
static inline
void CalcMonotonicQRegionForElems(Domain *domain)
{
const Real_t ptiny = Real_t(1.e-36) ;
Real_t monoq_max_slope = domain->monoq_max_slope ;
Real_t monoq_limiter_mult = domain->monoq_limiter_mult ;
Real_t qlc_monoq = domain->qlc_monoq;
Real_t qqc_monoq = domain->qqc_monoq;
Index_t elength = domain->numElem;
Index_t dimBlock= 128;
Index_t dimGrid = PAD_DIV(elength,dimBlock);
//elenago
#if 0
int rank;
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
printf("Regions ---> Rank %d, dimGrid: %d monoq_max_slope: %f, monoq_limiter_mult: %f qlc_monoq: %f qqc_monoq: %f elength: %d domain->qstop: %f bad_q_h=%d\n",
rank, dimGrid, monoq_max_slope, monoq_limiter_mult, qlc_monoq, qqc_monoq, elength, domain->qstop, *domain->bad_q_h);
#endif
CalcMonotonicQRegionForElems_kernel<<<dimGrid,dimBlock>>>
( qlc_monoq,qqc_monoq,monoq_limiter_mult,monoq_max_slope,ptiny,elength,
domain->regElemlist.raw(),domain->elemBC.raw(),
domain->lxim.raw(),domain->lxip.raw(),
domain->letam.raw(),domain->letap.raw(),
domain->lzetam.raw(),domain->lzetap.raw(),
domain->delv_xi->raw(),domain->delv_eta->raw(),domain->delv_zeta->raw(),
domain->delx_xi->raw(),domain->delx_eta->raw(),domain->delx_zeta->raw(),
domain->vdov.raw(),domain->elemMass.raw(),domain->volo.raw(),domain->vnew->raw(),
domain->qq.raw(),domain->ql.raw(),
domain->q.raw(),
domain->qstop,
domain->bad_q_h
);
//cudaDeviceSynchronize();
//cudaCheckError();
}
static
__device__ __forceinline__
void CalcPressureForElems_device(
Real_t& p_new, Real_t& bvc,
Real_t& pbvc, Real_t& e_old,
Real_t& compression, Real_t& vnewc,
Real_t pmin,
Real_t p_cut, Real_t eosvmax)
{
Real_t c1s = Real_t(2.0)/Real_t(3.0);
Real_t p_temp = p_new;
bvc = c1s * (compression + Real_t(1.));
pbvc = c1s;
p_temp = bvc * e_old ;
if ( FABS(p_temp) < p_cut )
p_temp = Real_t(0.0) ;
if ( vnewc >= eosvmax ) /* impossible condition here? */
p_temp = Real_t(0.0) ;
if (p_temp < pmin)
p_temp = pmin ;
p_new = p_temp;
}
static
__device__ __forceinline__
void CalcSoundSpeedForElems_device(Real_t& vnewc, Real_t rho0, Real_t &enewc,
Real_t &pnewc, Real_t &pbvc,
Real_t &bvc, Real_t ss4o3, Index_t nz,
Real_t *ss, Index_t iz)
{
Real_t ssTmp = (pbvc * enewc + vnewc * vnewc *
bvc * pnewc) / rho0;
if (ssTmp <= Real_t(.1111111e-36)) {
ssTmp = Real_t(.3333333e-18);
}
else {
ssTmp = SQRT(ssTmp) ;
}
ss[iz] = ssTmp;
}
static
__device__
__forceinline__
void ApplyMaterialPropertiesForElems_device(
Real_t& eosvmin, Real_t& eosvmax,
Real_t* vnew, Real_t *v,
Real_t& vnewc, Index_t* bad_vol, Index_t zn)
{
vnewc = vnew[zn] ;
if (eosvmin != Real_t(0.)) {
if (vnewc < eosvmin)
vnewc = eosvmin ;
}
if (eosvmax != Real_t(0.)) {
if (vnewc > eosvmax)
vnewc = eosvmax ;
}
// Now check for valid volume
Real_t vc = v[zn];
if (eosvmin != Real_t(0.)) {
if (vc < eosvmin)
vc = eosvmin ;
}
if (eosvmax != Real_t(0.)) {
if (vc > eosvmax)
vc = eosvmax ;
}
if (vc <= 0.) {
*bad_vol = zn;
}
}
static
__device__
__forceinline__
void UpdateVolumesForElems_device(Index_t numElem, Real_t& v_cut,
Real_t *vnew,
Real_t *v,
int i)
{
Real_t tmpV ;
tmpV = vnew[i] ;
if ( FABS(tmpV - Real_t(1.0)) < v_cut )
tmpV = Real_t(1.0) ;
v[i] = tmpV ;
}
static
__device__
__forceinline__
void CalcEnergyForElems_device(Real_t& p_new, Real_t& e_new, Real_t& q_new,
Real_t& bvc, Real_t& pbvc,
Real_t& p_old, Real_t& e_old, Real_t& q_old,
Real_t& compression, Real_t& compHalfStep,
Real_t& vnewc, Real_t& work, Real_t& delvc, Real_t pmin,
Real_t p_cut, Real_t e_cut, Real_t q_cut, Real_t emin,
Real_t& qq, Real_t& ql,
Real_t& rho0,
Real_t& eosvmax,
Index_t length)
{
const Real_t sixth = Real_t(1.0) / Real_t(6.0) ;
Real_t pHalfStep;
e_new = e_old - Real_t(0.5) * delvc * (p_old + q_old)
+ Real_t(0.5) * work;
if (e_new < emin ) {
e_new = emin ;
}
CalcPressureForElems_device(pHalfStep, bvc, pbvc, e_new, compHalfStep, vnewc,
pmin, p_cut, eosvmax);
Real_t vhalf = Real_t(1.) / (Real_t(1.) + compHalfStep) ;
if ( delvc > Real_t(0.) ) {
q_new = Real_t(0.) ;
}
else {
Real_t ssc = ( pbvc * e_new
+ vhalf * vhalf * bvc * pHalfStep ) / rho0 ;
if ( ssc <= Real_t(.1111111e-36) ) {
ssc =Real_t(.3333333e-18) ;
} else {
ssc = SQRT(ssc) ;
}
q_new = (ssc*ql + qq) ;
}
e_new = e_new + Real_t(0.5) * delvc
* ( Real_t(3.0)*(p_old + q_old)
- Real_t(4.0)*(pHalfStep + q_new)) ;
e_new += Real_t(0.5) * work;
if (FABS(e_new) < e_cut) {
e_new = Real_t(0.) ;
}
if ( e_new < emin ) {
e_new = emin ;
}
CalcPressureForElems_device(p_new, bvc, pbvc, e_new, compression, vnewc,
pmin, p_cut, eosvmax);
Real_t q_tilde ;
if (delvc > Real_t(0.)) {
q_tilde = Real_t(0.) ;
}
else {
Real_t ssc = ( pbvc * e_new
+ vnewc * vnewc * bvc * p_new ) / rho0 ;
if ( ssc <= Real_t(.1111111e-36) ) {
ssc = Real_t(.3333333e-18) ;
} else {
ssc = SQRT(ssc) ;
}
q_tilde = (ssc*ql + qq) ;
}
e_new = e_new - ( Real_t(7.0)*(p_old + q_old)
- Real_t(8.0)*(pHalfStep + q_new)
+ (p_new + q_tilde)) * delvc*sixth ;
if (FABS(e_new) < e_cut) {
e_new = Real_t(0.) ;
}
if ( e_new < emin ) {
e_new = emin ;
}
CalcPressureForElems_device(p_new, bvc, pbvc, e_new, compression, vnewc,
pmin, p_cut, eosvmax);
if ( delvc <= Real_t(0.) ) {
Real_t ssc = ( pbvc * e_new
+ vnewc * vnewc * bvc * p_new ) / rho0 ;
if ( ssc <= Real_t(.1111111e-36) ) {
ssc = Real_t(.3333333e-18) ;
} else {
ssc = SQRT(ssc) ;
}
q_new = (ssc*ql + qq) ;
if (FABS(q_new) < q_cut) q_new = Real_t(0.) ;
}
return ;
}
__device__ inline
Index_t giveMyRegion(const Index_t* regCSR,const Index_t i, const Index_t numReg)
{
for(Index_t reg = 0; reg < numReg-1; reg++)
if(i < regCSR[reg])
return reg;
return (numReg-1);
}
__global__
void ApplyMaterialPropertiesAndUpdateVolume_kernel(
Index_t length,
Real_t rho0,
Real_t e_cut,
Real_t emin,
Real_t* __restrict__ ql,
Real_t* __restrict__ qq,
Real_t* __restrict__ vnew,
Real_t* __restrict__ v,
Real_t pmin,
Real_t p_cut,
Real_t q_cut,
Real_t eosvmin,
Real_t eosvmax,
Index_t* __restrict__ regElemlist,
// const Index_t* __restrict__ regElemlist,
Real_t* __restrict__ e,
Real_t* __restrict__ delv,
Real_t* __restrict__ p,
Real_t* __restrict__ q,
Real_t ss4o3,
Real_t* __restrict__ ss,
Real_t v_cut,
Index_t* __restrict__ bad_vol,
const Int_t cost,
const Index_t* regCSR,
const Index_t* regReps,
const Index_t numReg
)
{
Real_t e_old, delvc, p_old, q_old, e_temp, delvc_temp, p_temp, q_temp;
Real_t compression, compHalfStep;
Real_t qq_old, ql_old, qq_temp, ql_temp, work;
Real_t p_new, e_new, q_new;
Real_t bvc, pbvc, vnewc;
Index_t i=blockDim.x*blockIdx.x + threadIdx.x;
if (i<length) {
Index_t zidx = regElemlist[i] ;
ApplyMaterialPropertiesForElems_device
(eosvmin,eosvmax,vnew,v,vnewc,bad_vol,zidx);
/********************** Start EvalEOSForElems **************************/
// Here we need to find out what region this element belongs to and what is the rep value!
Index_t region = giveMyRegion(regCSR,i,numReg);
Index_t rep = regReps[region];
e_temp = e[zidx];
p_temp = p[zidx];
q_temp = q[zidx];
qq_temp = qq[zidx] ;
ql_temp = ql[zidx] ;
delvc_temp = delv[zidx];
for(int r=0; r < rep; r++)
{
e_old = e_temp;
p_old = p_temp;
q_old = q_temp;
qq_old = qq_temp;
ql_old = ql_temp;
delvc = delvc_temp;
work = Real_t(0.);
Real_t vchalf ;
compression = Real_t(1.) / vnewc - Real_t(1.);
vchalf = vnewc - delvc * Real_t(.5);
compHalfStep = Real_t(1.) / vchalf - Real_t(1.);
if ( eosvmin != Real_t(0.) ) {
if (vnewc <= eosvmin) { /* impossible due to calling func? */
compHalfStep = compression ;
}
}
if ( eosvmax != Real_t(0.) ) {
if (vnewc >= eosvmax) { /* impossible due to calling func? */
p_old = Real_t(0.) ;
compression = Real_t(0.) ;
compHalfStep = Real_t(0.) ;
}
}
// qq_old = qq[zidx] ;
// ql_old = ql[zidx] ;
// work = Real_t(0.) ;
CalcEnergyForElems_device(p_new, e_new, q_new, bvc, pbvc,
p_old, e_old, q_old, compression, compHalfStep,
vnewc, work, delvc, pmin,
p_cut, e_cut, q_cut, emin,
qq_old, ql_old, rho0, eosvmax, length);
}//end for rep
p[zidx] = p_new ;
e[zidx] = e_new ;
q[zidx] = q_new ;
CalcSoundSpeedForElems_device
(vnewc,rho0,e_new,p_new,pbvc,bvc,ss4o3,length,ss,zidx);
/********************** End EvalEOSForElems **************************/
UpdateVolumesForElems_device(length,v_cut,vnew,v,zidx);
}
}
static inline
void ApplyMaterialPropertiesAndUpdateVolume(Domain *domain)
{
Index_t length = domain->numElem ;
if (length != 0) {
Index_t dimBlock = 128;
Index_t dimGrid = PAD_DIV(length,dimBlock);
ApplyMaterialPropertiesAndUpdateVolume_kernel<<<dimGrid,dimBlock>>>
(length,
domain->refdens,
domain->e_cut,
domain->emin,
domain->ql.raw(),
domain->qq.raw(),
domain->vnew->raw(),
domain->v.raw(),
domain->pmin,
domain->p_cut,
domain->q_cut,
domain->eosvmin,
domain->eosvmax,
domain->regElemlist.raw(),
domain->e.raw(),
domain->delv.raw(),
domain->p.raw(),
domain->q.raw(),
domain->ss4o3,
domain->ss.raw(),
domain->v_cut,
domain->bad_vol_h,
domain->cost,
domain->regCSR.raw(),
domain->regReps.raw(),
domain->numReg
);
//cudaDeviceSynchronize();
//cudaCheckError();
}
}
static inline
void LagrangeElements(Domain *domain)
{
int allElem = domain->numElem + /* local elem */
2*domain->sizeX*domain->sizeY + /* plane ghosts */
2*domain->sizeX*domain->sizeZ + /* row ghosts */
2*domain->sizeY*domain->sizeZ ; /* col ghosts */
domain->vnew = Allocator< Vector_d<Real_t> >::allocate(domain->numElem);
domain->dxx = Allocator< Vector_d<Real_t> >::allocate(domain->numElem);
domain->dyy = Allocator< Vector_d<Real_t> >::allocate(domain->numElem);
domain->dzz = Allocator< Vector_d<Real_t> >::allocate(domain->numElem);
domain->delx_xi = Allocator< Vector_d<Real_t> >::allocate(domain->numElem);
domain->delx_eta = Allocator< Vector_d<Real_t> >::allocate(domain->numElem);
domain->delx_zeta = Allocator< Vector_d<Real_t> >::allocate(domain->numElem);
domain->delv_xi = Allocator< Vector_d<Real_t> >::allocate(allElem);
domain->delv_eta = Allocator< Vector_d<Real_t> >::allocate(allElem);
domain->delv_zeta = Allocator< Vector_d<Real_t> >::allocate(allElem);
/*
#if USE_MPI
CommRecv(*domain, MSG_MONOQ, 3,
domain->sizeX, domain->sizeY, domain->sizeZ,
true, true) ;
#endif
*/
/*********************************************/
/* Calc Kinematics and Monotic Q Gradient */
/*********************************************/
CalcKinematicsAndMonotonicQGradient(domain);
#if USE_MPI
//printf("Rank %d prima di CommRecv LagrangeElements\n", globalRank);
CommRecv(*domain, MSG_MONOQ, 3,
domain->sizeX, domain->sizeY, domain->sizeZ,
true, true, 0) ;
Domain_member fieldData[3] ;
// initialize pointers
domain->d_delv_xi = domain->delv_xi->raw();
domain->d_delv_eta = domain->delv_eta->raw();
domain->d_delv_zeta = domain->delv_zeta->raw();
fieldData[0] = &Domain::get_delv_xi ;
fieldData[1] = &Domain::get_delv_eta ;
fieldData[2] = &Domain::get_delv_zeta ;
//printf("Rank %d prima di CommSendGpu LagrangeElements\n", globalRank);
CommSendGpu(*domain, MSG_MONOQ, 3, fieldData,
domain->sizeX, domain->sizeY, domain->sizeZ,
true, true, domain->streams[2], 0) ;
//printf("Rank %d prima di CommMonoQGpu LagrangeElements\n", globalRank);
CommMonoQGpu(*domain, domain->streams[2], 0) ;
//printf("Rank %d fuori da CommMonoQGpu LagrangeElements\n", globalRank);
#endif
Allocator<Vector_d<Real_t> >::free(domain->dxx,domain->numElem);
Allocator<Vector_d<Real_t> >::free(domain->dyy,domain->numElem);
Allocator<Vector_d<Real_t> >::free(domain->dzz,domain->numElem);
/**********************************
* Calc Monotic Q Region
**********************************/
CalcMonotonicQRegionForElems(domain);
Allocator<Vector_d<Real_t> >::free(domain->delx_xi,domain->numElem);
Allocator<Vector_d<Real_t> >::free(domain->delx_eta,domain->numElem);
Allocator<Vector_d<Real_t> >::free(domain->delx_zeta,domain->numElem);
Allocator<Vector_d<Real_t> >::free(domain->delv_xi,allElem);
Allocator<Vector_d<Real_t> >::free(domain->delv_eta,allElem);
Allocator<Vector_d<Real_t> >::free(domain->delv_zeta,allElem);
// printf("\n --Start of ApplyMaterials! \n");
ApplyMaterialPropertiesAndUpdateVolume(domain) ;
// printf("\n --End of ApplyMaterials! \n");
Allocator<Vector_d<Real_t> >::free(domain->vnew,domain->numElem);
}
template<int block_size>
__global__
#ifdef DOUBLE_PRECISION
__launch_bounds__(128,16)
#else
__launch_bounds__(128,16)
#endif
void CalcTimeConstraintsForElems_kernel(
Index_t length,
Real_t qqc2,
Real_t dvovmax,
Index_t *matElemlist,
Real_t *ss,
Real_t *vdov,
Real_t *arealg,
Real_t *dev_mindtcourant,
Real_t *dev_mindthydro)
{
int tid = threadIdx.x;
int i=blockDim.x*blockIdx.x + tid;
__shared__ volatile Real_t s_mindthydro[block_size];
__shared__ volatile Real_t s_mindtcourant[block_size];
Real_t mindthydro = Real_t(1.0e+20) ;
Real_t mindtcourant = Real_t(1.0e+20) ;
Real_t dthydro = mindthydro;
Real_t dtcourant = mindtcourant;
while (i<length) {
Index_t indx = matElemlist[i] ;
Real_t vdov_tmp = vdov[indx];
// Computing dt_hydro
if (vdov_tmp != Real_t(0.)) {
Real_t dtdvov = dvovmax / (FABS(vdov_tmp)+Real_t(1.e-20)) ;
if ( dthydro > dtdvov ) {
dthydro = dtdvov ;
}
}
if (dthydro < mindthydro)
mindthydro = dthydro;
// Computing dt_courant
Real_t ss_tmp = ss[indx];
Real_t area_tmp = arealg[indx];
Real_t dtf = ss_tmp * ss_tmp ;
dtf += ((vdov_tmp < 0.) ? qqc2*area_tmp*area_tmp*vdov_tmp*vdov_tmp : 0.);
dtf = area_tmp / SQRT(dtf) ;
/* determine minimum timestep with its corresponding elem */
if (vdov_tmp != Real_t(0.) && dtf < dtcourant) {
dtcourant = dtf ;
}
if (dtcourant< mindtcourant)
mindtcourant= dtcourant;
i += gridDim.x*blockDim.x;
}
s_mindthydro[tid] = mindthydro;
s_mindtcourant[tid] = mindtcourant;
__syncthreads();
// Do shared memory reduction
if (block_size >= 1024) {
if (tid < 512) {
s_mindthydro[tid] = min( s_mindthydro[tid] , s_mindthydro[tid + 512]) ;
s_mindtcourant[tid] = min( s_mindtcourant[tid], s_mindtcourant[tid + 512]) ; }
__syncthreads(); }
if (block_size >= 512) {
if (tid < 256) {
s_mindthydro[tid] = min( s_mindthydro[tid], s_mindthydro[tid + 256]) ;
s_mindtcourant[tid] = min( s_mindtcourant[tid], s_mindtcourant[tid + 256]) ; }
__syncthreads(); }
if (block_size >= 256) {
if (tid < 128) {
s_mindthydro[tid] = min( s_mindthydro[tid], s_mindthydro[tid + 128]) ;
s_mindtcourant[tid] = min( s_mindtcourant[tid], s_mindtcourant[tid + 128]) ; }
__syncthreads(); }
if (block_size >= 128) {
if (tid < 64) {
s_mindthydro[tid] = min( s_mindthydro[tid], s_mindthydro[tid + 64]) ;
s_mindtcourant[tid] = min( s_mindtcourant[tid], s_mindtcourant[tid + 64]) ; }
__syncthreads(); }
if (tid < 32) {
s_mindthydro[tid] = min( s_mindthydro[tid], s_mindthydro[tid + 32]) ;
s_mindtcourant[tid] = min( s_mindtcourant[tid], s_mindtcourant[tid + 32]) ;
}
if (tid < 16) {
s_mindthydro[tid] = min( s_mindthydro[tid], s_mindthydro[tid + 16]) ;
s_mindtcourant[tid] = min( s_mindtcourant[tid], s_mindtcourant[tid + 16]) ;
}
if (tid < 8) {
s_mindthydro[tid] = min( s_mindthydro[tid], s_mindthydro[tid + 8]) ;
s_mindtcourant[tid] = min( s_mindtcourant[tid], s_mindtcourant[tid + 8]) ;
}
if (tid < 4) {
s_mindthydro[tid] = min( s_mindthydro[tid], s_mindthydro[tid + 4]) ;
s_mindtcourant[tid] = min( s_mindtcourant[tid], s_mindtcourant[tid + 4]) ;
}
if (tid < 2) {
s_mindthydro[tid] = min( s_mindthydro[tid], s_mindthydro[tid + 2]) ;
s_mindtcourant[tid] = min( s_mindtcourant[tid], s_mindtcourant[tid + 2]) ;
}
if (tid < 1) {
s_mindthydro[tid] = min( s_mindthydro[tid], s_mindthydro[tid + 1]) ;
s_mindtcourant[tid] = min( s_mindtcourant[tid], s_mindtcourant[tid + 1]) ;
}
// Store in global memory
if (tid==0) {
dev_mindtcourant[blockIdx.x] = s_mindtcourant[0];
dev_mindthydro[blockIdx.x] = s_mindthydro[0];
}
}
template <int block_size>
__global__
void CalcMinDtOneBlock(Real_t* dev_mindthydro, Real_t* dev_mindtcourant, Real_t* dtcourant, Real_t* dthydro, Index_t shared_array_size)
{
volatile __shared__ Real_t s_data[block_size];
int tid = threadIdx.x;
if (blockIdx.x==0)
{
if (tid < shared_array_size)
s_data[tid] = dev_mindtcourant[tid];
else
s_data[tid] = 1.0e20;
__syncthreads();
if (block_size >= 1024) { if (tid < 512) { s_data[tid] = min(s_data[tid],s_data[tid + 512]); } __syncthreads(); }
if (block_size >= 512) { if (tid < 256) { s_data[tid] = min(s_data[tid],s_data[tid + 256]); } __syncthreads(); }
if (block_size >= 256) { if (tid < 128) { s_data[tid] = min(s_data[tid],s_data[tid + 128]); } __syncthreads(); }
if (block_size >= 128) { if (tid < 64) { s_data[tid] = min(s_data[tid],s_data[tid + 64]); } __syncthreads(); }
if (tid < 32) { s_data[tid] = min(s_data[tid],s_data[tid + 32]); }
if (tid < 16) { s_data[tid] = min(s_data[tid],s_data[tid + 16]); }
if (tid < 8) { s_data[tid] = min(s_data[tid],s_data[tid + 8]); }
if (tid < 4) { s_data[tid] = min(s_data[tid],s_data[tid + 4]); }
if (tid < 2) { s_data[tid] = min(s_data[tid],s_data[tid + 2]); }
if (tid < 1) { s_data[tid] = min(s_data[tid],s_data[tid + 1]); }
if (tid<1)
{
*(dtcourant)= s_data[0];
}
}
else if (blockIdx.x==1)
{
if (tid < shared_array_size)
s_data[tid] = dev_mindthydro[tid];
else
s_data[tid] = 1.0e20;
__syncthreads();
if (block_size >= 1024) { if (tid < 512) { s_data[tid] = min(s_data[tid],s_data[tid + 512]); } __syncthreads(); }
if (block_size >= 512) { if (tid < 256) { s_data[tid] = min(s_data[tid],s_data[tid + 256]); } __syncthreads(); }
if (block_size >= 256) { if (tid < 128) { s_data[tid] = min(s_data[tid],s_data[tid + 128]); } __syncthreads(); }
if (block_size >= 128) { if (tid < 64) { s_data[tid] = min(s_data[tid],s_data[tid + 64]); } __syncthreads(); }
if (tid < 32) { s_data[tid] = min(s_data[tid],s_data[tid + 32]); }
if (tid < 16) { s_data[tid] = min(s_data[tid],s_data[tid + 16]); }
if (tid < 8) { s_data[tid] = min(s_data[tid],s_data[tid + 8]); }
if (tid < 4) { s_data[tid] = min(s_data[tid],s_data[tid + 4]); }
if (tid < 2) { s_data[tid] = min(s_data[tid],s_data[tid + 2]); }
if (tid < 1) { s_data[tid] = min(s_data[tid],s_data[tid + 1]); }
if (tid<1)
{
*(dthydro) = s_data[0];
}
}
}
static inline
void CalcTimeConstraintsForElems(Domain* domain)
{
Real_t qqc = domain->qqc;
Real_t qqc2 = Real_t(64.0) * qqc * qqc ;
Real_t dvovmax = domain->dvovmax ;
const Index_t length = domain->numElem;
const int max_dimGrid = 1024;
const int dimBlock = 128;
int dimGrid=std::min(max_dimGrid,PAD_DIV(length,dimBlock));
cudaFuncSetCacheConfig(CalcTimeConstraintsForElems_kernel<dimBlock>, cudaFuncCachePreferShared);
CalcTimeConstraintsForElems_kernel<dimBlock> <<<dimGrid,dimBlock>>>
(length,qqc2,dvovmax,
domain->matElemlist.raw(),domain->ss.raw(),domain->vdov.raw(),domain->arealg.raw(),
domain->dev_mindtcourant->raw(), domain->dev_mindthydro->raw());
// TODO: if dimGrid < 1024, should launch less threads
CalcMinDtOneBlock<max_dimGrid> <<<2,max_dimGrid, max_dimGrid*sizeof(Real_t), domain->streams[1]>>>(domain->dev_mindthydro->raw(),
domain->dev_mindtcourant->raw(),
domain->dtcourant_h,domain->dthydro_h, dimGrid);
cudaEventRecord(domain->time_constraint_computed[currIter+1], domain->streams[1]);
}
//elenago lagrange
static inline
void LagrangeLeapFrogPt1(Domain* domain)
{
/* calculate nodal forces, accelerations, velocities, positions, with
* applied boundary conditions and slide surface considerations */
//LagrangeNodal(domain);
// ------------------------------ LAGRANGE NODAL -------------------------------
//#ifdef SEDOV_SYNC_POS_VEL_EARLY
Domain_member fieldData[6] ;
//#endif
#if 0
#if USE_MPI
CommRecv(*domain, MSG_COMM_SBN, 3,
domain->sizeX + 1, domain->sizeY + 1, domain->sizeZ + 1,
true, false, 0) ;
#ifdef SEDOV_SYNC_POS_VEL_EARLY
CommRecv(*domain, MSG_SYNC_POS_VEL, 6,
domain->sizeX + 1, domain->sizeY + 1, domain->sizeZ + 1,
false, false, 1) ;
#endif
CommRecv(*domain, MSG_MONOQ, 3,
domain->sizeX, domain->sizeY, domain->sizeZ,
true, true, 2) ;
MPI_Barrier(MPI_COMM_WORLD);
#endif
#endif
Real_t u_cut = domain->u_cut ;
/* time of boundary condition evaluation is beginning of step for force and
* acceleration boundary conditions. */
//CalcForceForNodes(domain);
CalcVolumeForceForElems(domain);
// moved here from the main loop to allow async execution with GPU work
// TimeIncrement(domain);
#if USE_MPI
fieldData[0] = &Domain::get_fx ;
fieldData[1] = &Domain::get_fy ;
fieldData[2] = &Domain::get_fz ;
CommSendGpu(*domain, MSG_COMM_SBN, 3, fieldData,
domain->sizeX + 1, domain->sizeY + 1, domain->sizeZ + 1,
true, false, domain->streams[2], 0) ;
CommSBNGpu(*domain, 3, fieldData, &domain->streams[2], 0) ;
#endif
CalcAccelerationForNodes(domain);
ApplyAccelerationBoundaryConditionsForNodes(domain);
}
static inline
void LagrangeLeapFrogPt2(Domain* domain)
{
/* calculate nodal forces, accelerations, velocities, positions, with
* applied boundary conditions and slide surface considerations */
//LagrangeNodal(domain);
// ------------------------------ LAGRANGE NODAL -------------------------------
Domain_member fieldData[6] ;
Real_t u_cut = domain->u_cut ;
//qui deltatime_h
CalcPositionAndVelocityForNodes(u_cut, domain);
#if 0
cudaDeviceSynchronize();
testVolume(domain);
cudaDeviceSynchronize();
checkErrors(domain,1,myRank);
#endif
#if USE_MPI
#ifdef SEDOV_SYNC_POS_VEL_EARLY
fieldData[0] = &Domain::get_x ;
fieldData[1] = &Domain::get_y ;
fieldData[2] = &Domain::get_z ;
fieldData[3] = &Domain::get_xd ;
fieldData[4] = &Domain::get_yd ;
fieldData[5] = &Domain::get_zd ;
CommSendGpu(*domain, MSG_SYNC_POS_VEL, 6, fieldData,
domain->sizeX + 1, domain->sizeY + 1, domain->sizeZ + 1,
false, false, domain->streams[2], 1) ;
CommSyncPosVelGpu(*domain, &domain->streams[2], 1) ;
#endif
#endif
// ------------------------------------------------------------------------
/* calculate element quantities (i.e. velocity gradient & q), and update
* material states */
//LagrangeElements(domain);
// ------------------------------ LAGRANGE ELEMENTS -------------------------------
//qui deltatime_h
CalcKinematicsAndMonotonicQGradient(domain);
//cudaDeviceSynchronize();
// checkErrors(domain,5,myRank);
#if USE_MPI
fieldData[0] = &Domain::get_delv_xi ;
fieldData[1] = &Domain::get_delv_eta ;
fieldData[2] = &Domain::get_delv_zeta ;
CommSendGpu(*domain, MSG_MONOQ, 3, fieldData,
domain->sizeX, domain->sizeY, domain->sizeZ,
true, true, domain->streams[2], 2) ;
CommMonoQGpu(*domain, domain->streams[2], 2) ;
#endif
//qui deltatime_h
CalcMonotonicQRegionForElems(domain);
ApplyMaterialPropertiesAndUpdateVolume(domain) ;
//qui deltatime_h
CalcTimeConstraintsForElems(domain);
}
static inline
void LagrangeLeapFrog(Domain* domain)
{
/* calculate nodal forces, accelerations, velocities, positions, with
* applied boundary conditions and slide surface considerations */
//LagrangeNodal(domain);
// ------------------------------ LAGRANGE NODAL -------------------------------
//#ifdef SEDOV_SYNC_POS_VEL_EARLY
Domain_member fieldData[6] ;
//#endif
#if 0
#if USE_MPI
CommRecv(*domain, MSG_COMM_SBN, 3,
domain->sizeX + 1, domain->sizeY + 1, domain->sizeZ + 1,
true, false, 0) ;
#ifdef SEDOV_SYNC_POS_VEL_EARLY
CommRecv(*domain, MSG_SYNC_POS_VEL, 6,
domain->sizeX + 1, domain->sizeY + 1, domain->sizeZ + 1,
false, false, 1) ;
#endif
CommRecv(*domain, MSG_MONOQ, 3,
domain->sizeX, domain->sizeY, domain->sizeZ,
true, true, 2) ;
MPI_Barrier(MPI_COMM_WORLD);
#endif
#endif
Real_t u_cut = domain->u_cut ;
/* time of boundary condition evaluation is beginning of step for force and
* acceleration boundary conditions. */
//CalcForceForNodes(domain);
CalcVolumeForceForElems(domain);
// moved here from the main loop to allow async execution with GPU work
// TimeIncrement(domain);
#if USE_MPI
fieldData[0] = &Domain::get_fx ;
fieldData[1] = &Domain::get_fy ;
fieldData[2] = &Domain::get_fz ;
CommSendGpu(*domain, MSG_COMM_SBN, 3, fieldData,
domain->sizeX + 1, domain->sizeY + 1, domain->sizeZ + 1,
true, false, domain->streams[2], 0) ;
CommSBNGpu(*domain, 3, fieldData, &domain->streams[2], 0) ;
#endif
CalcAccelerationForNodes(domain);
ApplyAccelerationBoundaryConditionsForNodes(domain);
//qui deltatime_h
CalcPositionAndVelocityForNodes(u_cut, domain);
#if 0
cudaDeviceSynchronize();
testVolume(domain);
cudaDeviceSynchronize();
checkErrors(domain,1,myRank);
#endif
#if USE_MPI
#ifdef SEDOV_SYNC_POS_VEL_EARLY
fieldData[0] = &Domain::get_x ;
fieldData[1] = &Domain::get_y ;
fieldData[2] = &Domain::get_z ;
fieldData[3] = &Domain::get_xd ;
fieldData[4] = &Domain::get_yd ;
fieldData[5] = &Domain::get_zd ;
CommSendGpu(*domain, MSG_SYNC_POS_VEL, 6, fieldData,
domain->sizeX + 1, domain->sizeY + 1, domain->sizeZ + 1,
false, false, domain->streams[2], 1) ;
CommSyncPosVelGpu(*domain, &domain->streams[2], 1) ;
#endif
#endif
// ------------------------------------------------------------------------
/* calculate element quantities (i.e. velocity gradient & q), and update
* material states */
//LagrangeElements(domain);
// ------------------------------ LAGRANGE ELEMENTS -------------------------------
//qui deltatime_h
CalcKinematicsAndMonotonicQGradient(domain);
//cudaDeviceSynchronize();
// checkErrors(domain,5,myRank);
#if USE_MPI
fieldData[0] = &Domain::get_delv_xi ;
fieldData[1] = &Domain::get_delv_eta ;
fieldData[2] = &Domain::get_delv_zeta ;
CommSendGpu(*domain, MSG_MONOQ, 3, fieldData,
domain->sizeX, domain->sizeY, domain->sizeZ,
true, true, domain->streams[2], 2) ;
CommMonoQGpu(*domain, domain->streams[2], 2) ;
#endif
CalcMonotonicQRegionForElems(domain);
ApplyMaterialPropertiesAndUpdateVolume(domain) ;
// --------------------------------------------------------------------------------
//event next iteration
//qui deltatime_h
CalcTimeConstraintsForElems(domain);
// cudaDeviceSynchronize();
}
void printUsage(char* argv[])
{
printf("Usage: \n");
printf("Unstructured grid: %s -u <file.lmesh> \n", argv[0]) ;
printf("Structured grid: %s -s numEdgeElems \n", argv[0]) ;
printf("\nExamples:\n") ;
printf("%s -s 45\n", argv[0]) ;
printf("%s -u sedov15oct.lmesh\n", argv[0]) ;
}
#ifdef SAMI
#ifdef __cplusplus
extern "C" {
#endif
#include "silo.h"
#ifdef __cplusplus
}
#endif
#define MAX_LEN_SAMI_HEADER 10
#define SAMI_HDR_NUMBRICK 0
#define SAMI_HDR_NUMNODES 3
#define SAMI_HDR_NUMMATERIAL 4
#define SAMI_HDR_INDEX_START 6
#define SAMI_HDR_MESHDIM 7
#define MAX_ADJACENCY 14 /* must be 14 or greater */
void DumpSAMI(Domain *domain, char *name)
{
DBfile *fp ;
int headerLen = MAX_LEN_SAMI_HEADER ;
int headerInfo[MAX_LEN_SAMI_HEADER];
char varName[] = "brick_nd0";
char coordName[] = "x";
int version = 121 ;
int numElem = int(domain->numElem) ;
int numNode = int(domain->numNode) ;
int count ;
int *materialID ;
int *nodeConnect ;
double *nodeCoord ;
if ((fp = DBCreate(name, DB_CLOBBER, DB_LOCAL,
NULL, DB_PDB)) == NULL)
{
printf("Couldn't create file %s\n", name) ;
exit(1);
}
for (int i=0; i<MAX_LEN_SAMI_HEADER; ++i) {
headerInfo[i] = 0 ;
}
headerInfo[SAMI_HDR_NUMBRICK] = numElem ;
headerInfo[SAMI_HDR_NUMNODES] = numNode ;
headerInfo[SAMI_HDR_NUMMATERIAL] = 1 ;
headerInfo[SAMI_HDR_INDEX_START] = 1 ;
headerInfo[SAMI_HDR_MESHDIM] = 3 ;
DBWrite(fp, "mesh_data", headerInfo, &headerLen, 1, DB_INT) ;
count = 1 ;
DBWrite(fp, "version", &version, &count, 1, DB_INT) ;
nodeConnect = new int[numElem] ;
Vector_h<Index_t> nodelist_h = domain->nodelist;
for (Index_t i=0; i<8; ++i)
{
for (Index_t j=0; j<numElem; ++j) {
nodeConnect[j] = int(nodelist_h[i*domain->padded_numElem + j]) + 1 ;
}
varName[8] = '0' + i;
DBWrite(fp, varName, nodeConnect, &numElem, 1, DB_INT) ;
}
delete [] nodeConnect ;
nodeCoord = new double[numNode] ;
Vector_h<Real_t> x_h = domain->x;
Vector_h<Real_t> y_h = domain->y;
Vector_h<Real_t> z_h = domain->z;
for (Index_t i=0; i<3; ++i)
{
for (Index_t j=0; j<numNode; ++j) {
Real_t coordVal ;
switch(i) {
case 0: coordVal = double(x_h[j]) ; break ;
case 1: coordVal = double(y_h[j]) ; break ;
case 2: coordVal = double(z_h[j]) ; break ;
}
nodeCoord[j] = coordVal ;
}
coordName[0] = 'x' + i ;
DBWrite(fp, coordName, nodeCoord, &numNode, 1, DB_DOUBLE) ;
}
delete [] nodeCoord ;
materialID = new int[numElem] ;
for (Index_t i=0; i<numElem; ++i)
materialID[i] = 1 ;
DBWrite(fp, "brick_material", materialID, &numElem, 1, DB_INT) ;
delete [] materialID ;
DBClose(fp);
}
#endif
#ifdef SAMI
void DumpDomain(Domain *domain)
{
char meshName[64] ;
printf("Dumping SAMI file\n");
sprintf(meshName, "sedov_%d.sami", int(domain->cycle)) ;
DumpSAMI(domain, meshName) ;
}
#endif
void write_solution(Domain* locDom)
{
Vector_h<Real_t> x_h = locDom->x;
Vector_h<Real_t> y_h = locDom->y;
Vector_h<Real_t> z_h = locDom->z;
// printf("Writing solution to file xyz.asc\n");
std::stringstream filename;
filename << "xyz.asc";
FILE *fout = fopen(filename.str().c_str(),"wb");
for (Index_t i=0; i<locDom->numNode; i++) {
fprintf(fout,"%10d\n",i);
fprintf(fout,"%.10f\n",x_h[i]);
fprintf(fout,"%.10f\n",y_h[i]);
fprintf(fout,"%.10f\n",z_h[i]);
}
fclose(fout);
}
///////////////////////////////////////////////////////////////////////////
void InitMeshDecomp(Int_t numRanks, Int_t myRank,
Int_t *col, Int_t *row, Int_t *plane, Int_t *side)
{
Int_t testProcs;
Int_t dx, dy, dz;
Int_t myDom;
// Assume cube processor layout for now
testProcs = Int_t(cbrt(Real_t(numRanks))+0.5) ;
if (testProcs*testProcs*testProcs != numRanks) {
printf("Num processors must be a cube of an integer (1, 8, 27, ...)\n") ;
#if USE_MPI
MPI_Abort(MPI_COMM_WORLD, -1) ;
#else
exit(-1);
#endif
}
if (sizeof(Real_t) != 4 && sizeof(Real_t) != 8) {
printf("MPI operations only support float and double right now...\n");
#if USE_MPI
MPI_Abort(MPI_COMM_WORLD, -1) ;
#else
exit(-1);
#endif
}
if (MAX_FIELDS_PER_MPI_COMM > CACHE_COHERENCE_PAD_REAL) {
printf("corner element comm buffers too small. Fix code.\n") ;
#if USE_MPI
MPI_Abort(MPI_COMM_WORLD, -1) ;
#else
exit(-1);
#endif
}
dx = testProcs ;
dy = testProcs ;
dz = testProcs ;
// temporary test
if (dx*dy*dz != numRanks) {
printf("error -- must have as many domains as procs\n") ;
#if USE_MPI
MPI_Abort(MPI_COMM_WORLD, -1) ;
#else
exit(-1);
#endif
}
Int_t remainder = dx*dy*dz % numRanks ;
if (myRank < remainder) {
myDom = myRank*( 1+ (dx*dy*dz / numRanks)) ;
}
else {
myDom = remainder*( 1+ (dx*dy*dz / numRanks)) +
(myRank - remainder)*(dx*dy*dz/numRanks) ;
}
*col = myDom % dx ;
*row = (myDom / dx) % dy ;
*plane = myDom / (dx*dy) ;
*side = testProcs;
return;
}
void VerifyAndWriteFinalOutput(Real_t elapsed_time,
Domain& locDom,
Int_t its,
Int_t nx,
Int_t numRanks)
{
size_t free_mem, total_mem, used_mem;
cudaMemGetInfo(&free_mem, &total_mem);
used_mem= total_mem - free_mem;
#if LULESH_SHOW_PROGRESS == 0
printf(" Used Memory = %8.4f Mb\n", used_mem / (1024.*1024.) );
#endif
// GrindTime1 only takes a single domain into account, and is thus a good way to measure
// processor speed indepdendent of MPI parallelism.
// GrindTime2 takes into account speedups from MPI parallelism
Real_t grindTime1 = ((elapsed_time*1e6)/its)/(nx*nx*nx);
Real_t grindTime2 = ((elapsed_time*1e6)/its)/(nx*nx*nx*numRanks);
// Copy Energy back to Host
Real_t e_zero;
Real_t* d_ezero_ptr = locDom.e.raw() + locDom.octantCorner; /* octant corner supposed to be 0 */
cudaMemcpy(&e_zero, d_ezero_ptr, sizeof(Real_t), cudaMemcpyDeviceToHost);
printf("Run completed: \n");
printf(" Problem size = %i \n", nx);
printf(" MPI tasks = %i \n", numRanks);
printf(" Iteration count = %i \n", its);
printf(" Final Origin Energy = %12.6e \n", e_zero);
Real_t MaxAbsDiff = Real_t(0.0);
Real_t TotalAbsDiff = Real_t(0.0);
Real_t MaxRelDiff = Real_t(0.0);
Real_t *e_all = new Real_t[nx * nx];
cudaMemcpy(e_all, locDom.e.raw(), nx * nx * sizeof(Real_t), cudaMemcpyDeviceToHost);
for (Index_t j=0; j<nx; ++j) {
for (Index_t k=j+1; k<nx; ++k) {
Real_t AbsDiff = FABS(e_all[j*nx+k]-e_all[k*nx+j]);
TotalAbsDiff += AbsDiff;
if (MaxAbsDiff <AbsDiff) MaxAbsDiff = AbsDiff;
Real_t RelDiff = AbsDiff / e_all[k*nx+j];
if (MaxRelDiff <RelDiff) MaxRelDiff = RelDiff;
}
}
delete e_all;
// Quick symmetry check
printf(" Testing Plane 0 of Energy Array on rank 0:\n");
printf(" MaxAbsDiff = %12.6e\n", MaxAbsDiff );
printf(" TotalAbsDiff = %12.6e\n", TotalAbsDiff );
printf(" MaxRelDiff = %12.6e\n\n", MaxRelDiff );
// Timing information
printf("\nElapsed time = %10.2f (s)\n", elapsed_time);
printf("Grind time (us/z/c) = %10.8g (per dom) (%10.8g overall)\n", grindTime1, grindTime2);
printf("FOM = %10.8g (z/s)\n\n", 1000.0/grindTime2); // zones per second
bool write_solution_flag=true;
if (write_solution_flag) {
write_solution(&locDom);
}
return ;
}
int main(int argc, char *argv[])
{
if (argc < 3) {
printUsage(argv);
exit( LFileError );
}
if ( strcmp(argv[1],"-u") != 0 && strcmp(argv[1],"-s") != 0 )
{
printUsage(argv);
exit( LFileError ) ;
}
int num_iters = -1;
if (argc == 5) {
num_iters = atoi(argv[4]);
}
//elenago global
totIters = num_iters;
currIter = 0;
bool structured = ( strcmp(argv[1],"-s") == 0 );
Int_t numRanks ;
Int_t myRank ;
#if USE_MPI
Domain_member fieldData ;
MPI_Init(&argc, &argv) ;
MPI_Comm_size(MPI_COMM_WORLD, &numRanks) ;
MPI_Comm_rank(MPI_COMM_WORLD, &myRank) ;
globalRank = myRank;
#else
numRanks = 1;
myRank = 0;
#endif
cuda_init(myRank);
printf("MyRank: %d, gpuID: %d\n", myRank, gpuID);
//ASYNC
if(comm_use_comm())
{
printf("Calling comm_init\n");
comm_init(MPI_COMM_WORLD, gpuID);
}
/* assume cube subdomain geometry for now */
Index_t nx = atoi(argv[2]);
Domain *locDom ;
// Set up the mesh and decompose. Assumes regular cubes for now
Int_t col, row, plane, side;
InitMeshDecomp(numRanks, myRank, &col, &row, &plane, &side);
// TODO: change default nr to 11
Int_t nr = 11;
Int_t balance = 1;
Int_t cost = 1;
// TODO: modify this constructor to account for new fields
// TODO: setup communication buffers
locDom = NewDomain(argv, numRanks, col, row, plane, nx, side, structured, nr, balance, cost);
#if USE_MPI
if(comm_use_comm())
{
if(myRank == 0)
printf("comm_use_comm\n");
if(comm_use_async())
{
if(myRank == 0)
printf("comm_use_async\n");
}
}
// copy to the host for mpi transfer
locDom->h_nodalMass = locDom->nodalMass;
fieldData = &Domain::get_nodalMass;
printf("Primo CommRecv\n");
// Initial domain boundary communication
CommRecv(*locDom, MSG_COMM_SBN, 1,
locDom->sizeX + 1, locDom->sizeY + 1, locDom->sizeZ + 1,
true, false, 0) ;
printf("Primo CommSend\n");
CommSend(*locDom, MSG_COMM_SBN, 1, &fieldData,
locDom->sizeX + 1, locDom->sizeY + 1, locDom->sizeZ + 1,
true, false, 0) ;
printf("Primo CommSBN\n");
CommSBN(*locDom, 1, &fieldData, 0) ;
// copy back to the device
locDom->nodalMass = locDom->h_nodalMass;
// End initialization
MPI_Barrier(MPI_COMM_WORLD);
#endif
cudaDeviceSetCacheConfig(cudaFuncCachePreferL1);
/* timestep to solution */
int its=0;
if (myRank == 0) {
if (structured)
printf("Running until t=%f, Problem size=%dx%dx%d\n",locDom->stoptime,nx,nx,nx);
else
printf("Running until t=%f, Problem size=%d \n",locDom->stoptime,locDom->numElem);
}
cudaDeviceSynchronize();
elenagoAllocateElems(locDom);
MPI_Barrier(MPI_COMM_WORLD);
PUSH_RANGE("WARMUP", 1);
for(int iterWarm=0; iterWarm < 10; iterWarm++)
{
#if USE_MPI
CommRecv(*locDom, MSG_COMM_SBN, 3,
locDom->sizeX + 1, locDom->sizeY + 1, locDom->sizeZ + 1,
true, false, 0) ;
#ifdef SEDOV_SYNC_POS_VEL_EARLY
CommRecv(*locDom, MSG_SYNC_POS_VEL, 6,
locDom->sizeX + 1, locDom->sizeY + 1, locDom->sizeZ + 1,
false, false, 1) ;
#endif
CommRecv(*locDom, MSG_MONOQ, 3,
locDom->sizeX, locDom->sizeY, locDom->sizeZ,
true, true, 2) ;
//MPI_Barrier(MPI_COMM_WORLD);
#endif
LagrangeLeapFrogPt1(locDom) ;
// this has been moved after computation of volume forces to hide launch latencies
TimeIncrement(locDom) ;
LagrangeLeapFrogPt2(locDom) ;
// make sure GPU finished its work
// cudaDeviceSynchronize();
comm_progress();
//checkErrors(locDom,its,myRank);
currIter++;
}
cudaDeviceSynchronize();
POP_RANGE;
MPI_Barrier(MPI_COMM_WORLD);
currIter=0;
elenagoDellocateElems(locDom);
locDom = ResetDomain(locDom, argv, numRanks, col, row, plane, nx, side, structured, nr, balance, cost);
#if USE_MPI
// copy to the host for mpi transfer
locDom->h_nodalMass = locDom->nodalMass;
fieldData = &Domain::get_nodalMass;
// Initial domain boundary communication
CommRecv(*locDom, MSG_COMM_SBN, 1,
locDom->sizeX + 1, locDom->sizeY + 1, locDom->sizeZ + 1,
true, false, 0) ;
CommSend(*locDom, MSG_COMM_SBN, 1, &fieldData,
locDom->sizeX + 1, locDom->sizeY + 1, locDom->sizeZ + 1,
true, false, 0) ;
CommSBN(*locDom, 1, &fieldData, 0) ;
// copy back to the device
locDom->nodalMass = locDom->h_nodalMass;
// End initialization
MPI_Barrier(MPI_COMM_WORLD);
#endif
cudaProfilerStart();
//cudaProfilerStop();
cudaDeviceSynchronize();
elenagoAllocateElems(locDom);
MPI_Barrier(MPI_COMM_WORLD);
#if USE_MPI
double start = MPI_Wtime();
#else
timeval start;
gettimeofday(&start, NULL) ;
#endif
while(locDom->time_h < locDom->stoptime)
{
/*
PUSH_RANGE("TIME", 7);
// this has been moved after computation of volume forces to hide launch latencies
TimeIncrementAsync(locDom) ;
POP_RANGE;
*/
#if USE_MPI
CommRecv(*locDom, MSG_COMM_SBN, 3,
locDom->sizeX + 1, locDom->sizeY + 1, locDom->sizeZ + 1,
true, false, 0) ;
#ifdef SEDOV_SYNC_POS_VEL_EARLY
CommRecv(*locDom, MSG_SYNC_POS_VEL, 6,
locDom->sizeX + 1, locDom->sizeY + 1, locDom->sizeZ + 1,
false, false, 1) ;
#endif
CommRecv(*locDom, MSG_MONOQ, 3,
locDom->sizeX, locDom->sizeY, locDom->sizeZ,
true, true, 2) ;
//MPI_Barrier(MPI_COMM_WORLD);
#endif
PUSH_RANGE("PT1", 6);
LagrangeLeapFrogPt1(locDom) ;
POP_RANGE;
PUSH_RANGE("TIME", 7);
TimeIncrement(locDom) ;
POP_RANGE;
PUSH_RANGE("PT2", 1);
LagrangeLeapFrogPt2(locDom) ;
POP_RANGE;
// make sure GPU finished its work
// cudaDeviceSynchronize();
PUSH_RANGE("PROGRESS", 2);
if((its%4) == 1)
comm_progress();
POP_RANGE;
//checkErrors(locDom,its,myRank);
#if 0
#if LULESH_SHOW_PROGRESS
cudaDeviceSynchronize();
if (myRank == 0)
printf("cycle = %d, time = %e, dt=%e\n", its+1, double(locDom->time_h), double(locDom->deltatime_h_async[0]) ) ;
#endif
#endif
its++;
if (its == num_iters) break;
currIter++;
}
cudaDeviceSynchronize();
// Use reduced max elapsed time
double elapsed_time;
#if USE_MPI
elapsed_time = MPI_Wtime() - start;
#else
timeval end;
gettimeofday(&end, NULL) ;
elapsed_time = (double)(end.tv_sec - start.tv_sec) + ((double)(end.tv_usec - start.tv_usec))/1000000 ;
#endif
double elapsed_timeG;
#if USE_MPI
MPI_Reduce(&elapsed_time, &elapsed_timeG, 1, MPI_DOUBLE,
MPI_MAX, 0, MPI_COMM_WORLD);
#else
elapsed_timeG = elapsed_time;
#endif
// cudaProfilerStop();
elenagoDellocateElems(locDom);
if (myRank == 0)
VerifyAndWriteFinalOutput(elapsed_timeG, *locDom, its, nx, numRanks);
#ifdef SAMI
DumpDomain(locDom) ;
#endif
cudaDeviceReset();
#if USE_MPI
//ASYNC
if(comm_use_comm())
comm_finalize();
MPI_Finalize() ;
#endif
return 0 ;
}
|
e65c72415a5b0321dff822166dbeb4493aa2e8a1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "nbody.h"
#include "utility.cuh"
#include <device_launch_parameters.h>
#include <random>
#include "errorHandling.cuh"
__global__ void velocity_halfstep(float3* positions, float3* velocities, float const* masses, size_t N)
{
//index computation etc.
extern __shared__ float smem[]; // size == N * sizeof(float3) + N * sizeof(float)
uint32_t tid = threadIdx.x + gridDim.x * blockIdx.x;
if(tid>=N)
return;
float3* spos = (float3*)(smem);
float* sm = (float*)&spos[N];
uint32_t tiles = ceildiv(N, blockDim.x);
uint32_t last_tile_size = N % blockDim.x;
float3 accel{0.f,0.f,0.f};
float3 ref_pos = positions[tid];
// loops to calculate acceleration on particle, using a tile the width of blockDim
// last, potentially ill-fitting tile get's special treatment
for (uint32_t tile = 0; tile != tiles - 1; ++tile) {
size_t to_fetch = blockDim.x * tile + threadIdx.x;
spos[threadIdx.x] = positions[to_fetch];
sm[threadIdx.x] = masses[to_fetch];
__syncthreads();
for (uint32_t tile_idx = 0; tile_idx != blockDim.x; ++tile_idx) {
auto diff = spos[tile_idx] - ref_pos;
accel += G * sm[tile_idx] * diff / (norm_pow3(diff) + eps);
}
}
//now tile == tiles-1
size_t to_fetch = blockDim.x * (tiles - 1) + threadIdx.x;
// we only have work to do if our part of the tile is overlapping the data
if (to_fetch < N) {
__syncthreads();
spos[threadIdx.x] = positions[to_fetch];
sm[threadIdx.x] = masses[to_fetch];
__syncthreads();
}
for (uint32_t tile_idx = 0; tile_idx != last_tile_size; ++tile_idx) {
auto diff = spos[tile_idx] - ref_pos;
accel += G * sm[tile_idx] * diff / (norm_pow3(diff) + eps);
}
//finally, update main storage
velocities[tid] = accel;
}
__global__ void position_step(float3* positions, float3* velocities, float const* masses, size_t N)
{
extern __shared__ float smem[]; // size == N * 2 * sizeof(float3)
uint32_t tid = threadIdx.x + gridDim.x * blockIdx.x;
if(tid>=N)
return;
float3* spos = (float3*)(smem);
float3* sv = (float3*)&spos[N];
uint32_t tiles = ceildiv(N, blockDim.x);
uint32_t last_tile_size = N % blockDim.x;
float3 dv{0,0,0};
for (uint32_t tile = 0; tile != tiles - 1; ++tile) {
size_t to_fetch = blockDim.x * tile + threadIdx.x;
spos[threadIdx.x] = positions[to_fetch];
sv[threadIdx.x] = velocities[to_fetch];
__syncthreads();
for (uint32_t tile_idx = 0; tile_idx != blockDim.x; ++tile_idx) {
dv += sv[tile_idx];
}
}
size_t to_fetch = blockDim.x * (tiles - 1) + threadIdx.x;
// we only have work to do if our part of the tile is overlapping the data
if (to_fetch < N) {
__syncthreads();
spos[threadIdx.x] = positions[to_fetch];
sv[threadIdx.x] = velocities[to_fetch];
__syncthreads();
}
for (uint32_t tile_idx = 0; tile_idx != last_tile_size; ++tile_idx) {
dv += sv[tile_idx];
}
positions[tid] += dv * dt;
}
timed<thrust::host_vector<float3>> run_leapfrog_soa(size_t N, size_t threads_per_block, size_t iters)
{
thrust::device_vector<float3> positions;
thrust::device_vector<float3> velocities;
thrust::device_vector<float> masses;
std::default_random_engine generator(0xdeadbeef);
std::uniform_real_distribution<float> position_distribution(-1,1);
auto x = std::bind(position_distribution, generator);
std::uniform_real_distribution<float> mass_distribution(0.1f,5.f);
auto m = std::bind(mass_distribution, generator);
for(auto _=0;_!=N;++_) {
positions.push_back({x(),x(),x()});
masses.push_back(m());
velocities.push_back({0,0,0});
}
size_t n_blocks = ceildiv(N,threads_per_block);
auto start = std::chrono::high_resolution_clock::now();
for(auto _=0; _!=iters; ++_) {
hipLaunchKernelGGL(( velocity_halfstep), dim3(n_blocks),dim3(threads_per_block),N * sizeof(float3) + N * sizeof(float), 0,
thrust::raw_pointer_cast(positions.data()),thrust::raw_pointer_cast(velocities.data()),thrust::raw_pointer_cast(masses.data()),N);
hipDeviceSynchronize();
quitOnCudaError();
hipLaunchKernelGGL(( position_step), dim3(n_blocks),dim3(threads_per_block),N *2*sizeof(float3), 0,
thrust::raw_pointer_cast(positions.data()),thrust::raw_pointer_cast(velocities.data()),thrust::raw_pointer_cast(masses.data()),N);
hipDeviceSynchronize();
quitOnCudaError();
hipLaunchKernelGGL(( velocity_halfstep), dim3(n_blocks),dim3(threads_per_block),N * sizeof(float3) + N * sizeof(float), 0,
thrust::raw_pointer_cast(positions.data()),thrust::raw_pointer_cast(velocities.data()),thrust::raw_pointer_cast(masses.data()),N);
hipDeviceSynchronize();
quitOnCudaError();
}
auto end = std::chrono::high_resolution_clock::now();
return {std::chrono::duration_cast<seconds>(end-start), thrust::host_vector<float3>(positions)};
} | e65c72415a5b0321dff822166dbeb4493aa2e8a1.cu | #include "nbody.h"
#include "utility.cuh"
#include <device_launch_parameters.h>
#include <random>
#include "errorHandling.cuh"
__global__ void velocity_halfstep(float3* positions, float3* velocities, float const* masses, size_t N)
{
//index computation etc.
extern __shared__ float smem[]; // size == N * sizeof(float3) + N * sizeof(float)
uint32_t tid = threadIdx.x + gridDim.x * blockIdx.x;
if(tid>=N)
return;
float3* spos = (float3*)(smem);
float* sm = (float*)&spos[N];
uint32_t tiles = ceildiv(N, blockDim.x);
uint32_t last_tile_size = N % blockDim.x;
float3 accel{0.f,0.f,0.f};
float3 ref_pos = positions[tid];
// loops to calculate acceleration on particle, using a tile the width of blockDim
// last, potentially ill-fitting tile get's special treatment
for (uint32_t tile = 0; tile != tiles - 1; ++tile) {
size_t to_fetch = blockDim.x * tile + threadIdx.x;
spos[threadIdx.x] = positions[to_fetch];
sm[threadIdx.x] = masses[to_fetch];
__syncthreads();
for (uint32_t tile_idx = 0; tile_idx != blockDim.x; ++tile_idx) {
auto diff = spos[tile_idx] - ref_pos;
accel += G * sm[tile_idx] * diff / (norm_pow3(diff) + eps);
}
}
//now tile == tiles-1
size_t to_fetch = blockDim.x * (tiles - 1) + threadIdx.x;
// we only have work to do if our part of the tile is overlapping the data
if (to_fetch < N) {
__syncthreads();
spos[threadIdx.x] = positions[to_fetch];
sm[threadIdx.x] = masses[to_fetch];
__syncthreads();
}
for (uint32_t tile_idx = 0; tile_idx != last_tile_size; ++tile_idx) {
auto diff = spos[tile_idx] - ref_pos;
accel += G * sm[tile_idx] * diff / (norm_pow3(diff) + eps);
}
//finally, update main storage
velocities[tid] = accel;
}
__global__ void position_step(float3* positions, float3* velocities, float const* masses, size_t N)
{
extern __shared__ float smem[]; // size == N * 2 * sizeof(float3)
uint32_t tid = threadIdx.x + gridDim.x * blockIdx.x;
if(tid>=N)
return;
float3* spos = (float3*)(smem);
float3* sv = (float3*)&spos[N];
uint32_t tiles = ceildiv(N, blockDim.x);
uint32_t last_tile_size = N % blockDim.x;
float3 dv{0,0,0};
for (uint32_t tile = 0; tile != tiles - 1; ++tile) {
size_t to_fetch = blockDim.x * tile + threadIdx.x;
spos[threadIdx.x] = positions[to_fetch];
sv[threadIdx.x] = velocities[to_fetch];
__syncthreads();
for (uint32_t tile_idx = 0; tile_idx != blockDim.x; ++tile_idx) {
dv += sv[tile_idx];
}
}
size_t to_fetch = blockDim.x * (tiles - 1) + threadIdx.x;
// we only have work to do if our part of the tile is overlapping the data
if (to_fetch < N) {
__syncthreads();
spos[threadIdx.x] = positions[to_fetch];
sv[threadIdx.x] = velocities[to_fetch];
__syncthreads();
}
for (uint32_t tile_idx = 0; tile_idx != last_tile_size; ++tile_idx) {
dv += sv[tile_idx];
}
positions[tid] += dv * dt;
}
timed<thrust::host_vector<float3>> run_leapfrog_soa(size_t N, size_t threads_per_block, size_t iters)
{
thrust::device_vector<float3> positions;
thrust::device_vector<float3> velocities;
thrust::device_vector<float> masses;
std::default_random_engine generator(0xdeadbeef);
std::uniform_real_distribution<float> position_distribution(-1,1);
auto x = std::bind(position_distribution, generator);
std::uniform_real_distribution<float> mass_distribution(0.1f,5.f);
auto m = std::bind(mass_distribution, generator);
for(auto _=0;_!=N;++_) {
positions.push_back({x(),x(),x()});
masses.push_back(m());
velocities.push_back({0,0,0});
}
size_t n_blocks = ceildiv(N,threads_per_block);
auto start = std::chrono::high_resolution_clock::now();
for(auto _=0; _!=iters; ++_) {
velocity_halfstep<<<n_blocks,threads_per_block,N * sizeof(float3) + N * sizeof(float)>>>
(thrust::raw_pointer_cast(positions.data()),thrust::raw_pointer_cast(velocities.data()),thrust::raw_pointer_cast(masses.data()),N);
cudaDeviceSynchronize();
quitOnCudaError();
position_step<<<n_blocks,threads_per_block,N *2*sizeof(float3)>>>
(thrust::raw_pointer_cast(positions.data()),thrust::raw_pointer_cast(velocities.data()),thrust::raw_pointer_cast(masses.data()),N);
cudaDeviceSynchronize();
quitOnCudaError();
velocity_halfstep<<<n_blocks,threads_per_block,N * sizeof(float3) + N * sizeof(float)>>>
(thrust::raw_pointer_cast(positions.data()),thrust::raw_pointer_cast(velocities.data()),thrust::raw_pointer_cast(masses.data()),N);
cudaDeviceSynchronize();
quitOnCudaError();
}
auto end = std::chrono::high_resolution_clock::now();
return {std::chrono::duration_cast<seconds>(end-start), thrust::host_vector<float3>(positions)};
} |
add_two_vectors.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
__global__ void gpu_add_two_vectors(void)
{
}
int main()
{
printf("Adding Vectors: \n");
return 0;
} | add_two_vectors.cu | #include <stdio.h>
#include <stdlib.h>
__global__ void gpu_add_two_vectors(void)
{
}
int main()
{
printf("Adding Vectors: \n");
return 0;
} |
c640cd154e8e459ea72aede6e061849936cb9e58.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "cuda_utils.h"
#include "parameters.h"
#include "seed_filter_interface.h"
#include "store_gpu.h"
// Control Variables
std::mutex mu;
std::condition_variable cv;
std::vector<int> available_gpus;
int NUM_DEVICES;
char** d_ref_seq;
uint32_t ref_len;
uint32_t** d_index_table;
uint32_t** d_pos_table;
__global__
void compress_string (uint32_t len, char* src_seq, char* dst_seq){
int thread_id = threadIdx.x;
int block_dim = blockDim.x;
int grid_dim = gridDim.x;
int block_id = blockIdx.x;
int stride = block_dim * grid_dim;
uint32_t start = block_dim * block_id + thread_id;
for (uint32_t i = start; i < len; i += stride) {
char ch = src_seq[i];
char dst = X_NT;
if (ch == 'A')
dst = A_NT;
else if (ch == 'C')
dst = C_NT;
else if (ch == 'G')
dst = G_NT;
else if (ch == 'T')
dst = T_NT;
else if ((ch == 'a') || (ch == 'c') || (ch == 'g') || (ch == 't'))
dst = L_NT;
else if ((ch == 'n') || (ch == 'N'))
dst = N_NT;
else if (ch == '&')
dst = E_NT;
dst_seq[i] = dst;
}
}
int InitializeInterface (int num_gpu){
int nDevices;
hipError_t err = hipGetDeviceCount(&nDevices);
if (err != hipSuccess) {
fprintf(stderr, "Error: No GPU device found!\n");
exit(1);
}
if(num_gpu == -1){
NUM_DEVICES = nDevices;
}
else{
if(num_gpu <= nDevices){
NUM_DEVICES = num_gpu;
}
else{
fprintf(stderr, "Requested GPUs greater than available GPUs\n");
exit(10);
}
}
fprintf(stderr, "Using %d GPU(s)\n", NUM_DEVICES);
d_ref_seq = (char**) malloc(NUM_DEVICES*sizeof(char*));
d_index_table = (uint32_t**) malloc(NUM_DEVICES*sizeof(uint32_t*));
d_pos_table = (uint32_t**) malloc(NUM_DEVICES*sizeof(uint32_t*));
return NUM_DEVICES;
}
void SendRefWriteRequest (char* seq, size_t start_addr, uint32_t len){
ref_len = len;
for(int g = 0; g < NUM_DEVICES; g++){
check_cuda_setDevice(g, "SendRefWriteRequest");
char* d_ref_seq_tmp;
check_cuda_malloc((void**)&d_ref_seq_tmp, len*sizeof(char), "tmp_ref_seq");
check_cuda_memcpy((void*)d_ref_seq_tmp, (void*)(seq + start_addr), len*sizeof(char), hipMemcpyHostToDevice, "ref_seq");
check_cuda_malloc((void**)&d_ref_seq[g], len*sizeof(char), "ref_seq");
hipLaunchKernelGGL(( compress_string) , dim3(MAX_BLOCKS), dim3(MAX_THREADS), 0, 0, len, d_ref_seq_tmp, d_ref_seq[g]);
check_cuda_free((void*)d_ref_seq_tmp, "d_ref_seq_tmp");
}
}
void ClearRef(){
for(int g = 0; g < NUM_DEVICES; g++){
check_cuda_setDevice(g, "ClearRef");
check_cuda_free((void*)d_ref_seq[g], "d_ref_seq");
check_cuda_free((void*)d_index_table[g], "d_index_table");
check_cuda_free((void*)d_pos_table[g], "d_pos_table");
}
}
InitializeInterface_ptr g_InitializeInterface = InitializeInterface;
SendRefWriteRequest_ptr g_SendRefWriteRequest = SendRefWriteRequest;
ClearRef_ptr g_ClearRef = ClearRef;
| c640cd154e8e459ea72aede6e061849936cb9e58.cu | #include "cuda_utils.h"
#include "parameters.h"
#include "seed_filter_interface.h"
#include "store_gpu.h"
// Control Variables
std::mutex mu;
std::condition_variable cv;
std::vector<int> available_gpus;
int NUM_DEVICES;
char** d_ref_seq;
uint32_t ref_len;
uint32_t** d_index_table;
uint32_t** d_pos_table;
__global__
void compress_string (uint32_t len, char* src_seq, char* dst_seq){
int thread_id = threadIdx.x;
int block_dim = blockDim.x;
int grid_dim = gridDim.x;
int block_id = blockIdx.x;
int stride = block_dim * grid_dim;
uint32_t start = block_dim * block_id + thread_id;
for (uint32_t i = start; i < len; i += stride) {
char ch = src_seq[i];
char dst = X_NT;
if (ch == 'A')
dst = A_NT;
else if (ch == 'C')
dst = C_NT;
else if (ch == 'G')
dst = G_NT;
else if (ch == 'T')
dst = T_NT;
else if ((ch == 'a') || (ch == 'c') || (ch == 'g') || (ch == 't'))
dst = L_NT;
else if ((ch == 'n') || (ch == 'N'))
dst = N_NT;
else if (ch == '&')
dst = E_NT;
dst_seq[i] = dst;
}
}
int InitializeInterface (int num_gpu){
int nDevices;
cudaError_t err = cudaGetDeviceCount(&nDevices);
if (err != cudaSuccess) {
fprintf(stderr, "Error: No GPU device found!\n");
exit(1);
}
if(num_gpu == -1){
NUM_DEVICES = nDevices;
}
else{
if(num_gpu <= nDevices){
NUM_DEVICES = num_gpu;
}
else{
fprintf(stderr, "Requested GPUs greater than available GPUs\n");
exit(10);
}
}
fprintf(stderr, "Using %d GPU(s)\n", NUM_DEVICES);
d_ref_seq = (char**) malloc(NUM_DEVICES*sizeof(char*));
d_index_table = (uint32_t**) malloc(NUM_DEVICES*sizeof(uint32_t*));
d_pos_table = (uint32_t**) malloc(NUM_DEVICES*sizeof(uint32_t*));
return NUM_DEVICES;
}
void SendRefWriteRequest (char* seq, size_t start_addr, uint32_t len){
ref_len = len;
for(int g = 0; g < NUM_DEVICES; g++){
check_cuda_setDevice(g, "SendRefWriteRequest");
char* d_ref_seq_tmp;
check_cuda_malloc((void**)&d_ref_seq_tmp, len*sizeof(char), "tmp_ref_seq");
check_cuda_memcpy((void*)d_ref_seq_tmp, (void*)(seq + start_addr), len*sizeof(char), cudaMemcpyHostToDevice, "ref_seq");
check_cuda_malloc((void**)&d_ref_seq[g], len*sizeof(char), "ref_seq");
compress_string <<<MAX_BLOCKS, MAX_THREADS>>> (len, d_ref_seq_tmp, d_ref_seq[g]);
check_cuda_free((void*)d_ref_seq_tmp, "d_ref_seq_tmp");
}
}
void ClearRef(){
for(int g = 0; g < NUM_DEVICES; g++){
check_cuda_setDevice(g, "ClearRef");
check_cuda_free((void*)d_ref_seq[g], "d_ref_seq");
check_cuda_free((void*)d_index_table[g], "d_index_table");
check_cuda_free((void*)d_pos_table[g], "d_pos_table");
}
}
InitializeInterface_ptr g_InitializeInterface = InitializeInterface;
SendRefWriteRequest_ptr g_SendRefWriteRequest = SendRefWriteRequest;
ClearRef_ptr g_ClearRef = ClearRef;
|
65ac551be15620ea15fb0d3e0f4057fe5ffef63c.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2015 Kai Zhang ([email protected])
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#define EMULATE_NVM_BW
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <string.h>
#include <assert.h>
#include <time.h>
#include <hip/hip_runtime.h>
#include <chrono>
#include <byteswap.h>
#include <map>
#include "gpu_hash.h"
#include "libgpm.cuh"
#include "bandwidth_analysis.cuh"
#include "zipf.h"
#define HASH_BLOCK_ELEM_NUM (BUC_NUM/INSERT_BLOCK)
#define BLOCK_ELEM_NUM (SELEM_NUM/INSERT_BLOCK)
#define LOAD_FACTOR 1 / 8
#define PRELOAD_CNT (uint32_t)(((1 << 30)/8) * LOAD_FACTOR)
#define TOTAL_CNT (((uint32_t)1 << 31) - 1)
#define ZIPF_THETA 0.99
double persist_time = 0, operation_time = 0, ddio_time = 0;
#define TIME_NOW std::chrono::high_resolution_clock::now()
#define time_diff(a, b) std::chrono::duration_cast<std::chrono::microseconds>(a - b).count()
int main(int argc, char *argv[])
{
ddio_on();
int SELEM_NUM, THREAD_NUM;
if (argc != 3) {
printf("usage: ./run #elem_num #thread_num, now running with 16384\n");
SELEM_NUM = 16384 * 128;
THREAD_NUM = 16384 * 2;
} else {
SELEM_NUM = atoi(argv[1]);
THREAD_NUM = atoi(argv[2]);
}
printf("elem_num is %d, thread_num is %d\n", SELEM_NUM, THREAD_NUM);
struct zipf_gen_state zipf_state;
mehcached_zipf_init(&zipf_state, (uint64_t)PRELOAD_CNT - 2, (double)ZIPF_THETA, (uint64_t)21);
uint8_t *device_hash_table;
uint8_t *device_in;
uint8_t *host_in;
ielem_t *blk_input_h[INSERT_BLOCK];
int blk_elem_num_h[INSERT_BLOCK];
ielem_t **blk_input_d;
int *blk_elem_num_d;
int i;
std::map<selem_t, loc_t> cpu_map;
uint8_t *device_search_in;
uint8_t *device_search_out;
uint8_t *host_search_in;
uint8_t *host_search_out;
uint8_t *host_search_verify;
//CUDA_SAFE_CALL(hipMalloc((void **)&(device_hash_table), HT_SIZE));
size_t file_size = HT_SIZE;
device_hash_table = (uint8_t*)gpm_map_file("./imkv.out", file_size, 1);
CUDA_SAFE_CALL(hipMemset((void *)device_hash_table, 0, HT_SIZE));
// Allocate memory for preloading keys into KVS
CUDA_SAFE_CALL(hipMalloc((void **)&(device_in), PRELOAD_CNT * sizeof(ielem_t)));
CUDA_SAFE_CALL(hipMemset((void *)device_in, 0, PRELOAD_CNT * sizeof(ielem_t)));
CUDA_SAFE_CALL(hipHostMalloc((void **)&(host_in), PRELOAD_CNT * sizeof(ielem_t), hipHostMallocDefault));
CUDA_SAFE_CALL(hipMalloc((void **)&(blk_input_d), INSERT_BLOCK * sizeof(ielem_t *)));
CUDA_SAFE_CALL(hipMalloc((void **)&(blk_elem_num_d), INSERT_BLOCK * sizeof(int)));
for (i = 0; i < INSERT_BLOCK; i ++) {
blk_input_h[i] = &(((ielem_t *)device_in)[i*((int)PRELOAD_CNT/INSERT_BLOCK)]);
blk_elem_num_h[i] = 0;
}
// for search
CUDA_SAFE_CALL(hipMalloc((void **)&(device_search_in), PRELOAD_CNT * sizeof(selem_t)));
CUDA_SAFE_CALL(hipHostMalloc((void **)&(host_search_in), PRELOAD_CNT * sizeof(selem_t), hipHostMallocDefault));
CUDA_SAFE_CALL(hipMalloc((void **)&(device_search_out), 2 * PRELOAD_CNT * sizeof(loc_t)));
CUDA_SAFE_CALL(hipHostMalloc((void **)&(host_search_out), 2 * PRELOAD_CNT * sizeof(loc_t), hipHostMallocDefault));
CUDA_SAFE_CALL(hipHostMalloc((void **)&(host_search_verify), PRELOAD_CNT * sizeof(loc_t), hipHostMallocDefault));
// Generate keys
printf("Generate %d keys\n", PRELOAD_CNT);
int num_keys = PRELOAD_CNT / INSERT_BLOCK;
for(int i = 0; i < PRELOAD_CNT; ++i) {
int blk = (i + 1) % INSERT_BLOCK;
int index = num_keys * blk + blk_elem_num_h[blk];
// sig
((ielem_t *)host_in)[index].sig =
((selem_t *)host_search_in)[index].sig =
(i + 1);
// hash
((ielem_t *)host_in)[index].hash =
((selem_t *)host_search_in)[index].hash =
(i + 1);
// loc
((ielem_t *)host_in)[index].loc = (loc_t)rand();
cpu_map[selem_t(i+1, i+1)] = ((ielem_t *)host_in)[index].loc;
blk_elem_num_h[blk]++;
}
CUDA_SAFE_CALL(hipMemcpy(blk_input_d, blk_input_h, INSERT_BLOCK * sizeof(void *), hipMemcpyHostToDevice));
CUDA_SAFE_CALL(hipMemcpy(blk_elem_num_d, blk_elem_num_h, INSERT_BLOCK * sizeof(int), hipMemcpyHostToDevice));
CUDA_SAFE_CALL(hipMemcpy(device_in, host_in, PRELOAD_CNT * sizeof(ielem_t), hipMemcpyHostToDevice));
// Insert preload keys
printf("Preload %d keys\n", PRELOAD_CNT);
double ins_time = 0, search_time = 0, del_time = 0;
gpu_hash_insert((bucket_t *)device_hash_table,
(ielem_t **)blk_input_d,
(int *)blk_elem_num_d, INSERT_BLOCK, PRELOAD_CNT, 0,
operation_time, ddio_time, persist_time);
// verify with search
CUDA_SAFE_CALL(hipMemcpy(device_search_in, host_search_in, PRELOAD_CNT * sizeof(selem_t), hipMemcpyHostToDevice));
CUDA_SAFE_CALL(hipMemset((void *)device_search_out, 1, 2 * PRELOAD_CNT * sizeof(loc_t)));
printf("Verify %d keys\n", PRELOAD_CNT);
// ---------------------------
gpu_hash_search((selem_t *)device_search_in, (loc_t *)device_search_out,
(bucket_t *)device_hash_table, PRELOAD_CNT, THREAD_NUM, 128, 0);
hipDeviceSynchronize();
// ---------------------------
CUDA_SAFE_CALL(hipMemcpy(host_search_out, device_search_out,
2 * PRELOAD_CNT * sizeof(loc_t), hipMemcpyDeviceToHost));
for (i = 0; i < PRELOAD_CNT; i ++) {
loc_t loc = cpu_map[selem_t(((ielem_t *)host_in)[i].sig, ((ielem_t *)host_in)[i].hash)];
if(((loc_t *)host_search_out)[i<<1] != loc
&& ((loc_t *)host_search_out)[(i<<1)+1] != loc) {
printf("not found insertion %d : out %lx and %lx, should be : %lx\n", i,
((loc_t *)host_search_out)[i<<1], ((loc_t *)host_search_out)[(i<<1)+1],
loc);
}
}
// Free memory for preload
CUDA_SAFE_CALL(hipFree(device_in));
CUDA_SAFE_CALL(hipFree(blk_input_d));
CUDA_SAFE_CALL(hipFree(blk_elem_num_d));
CUDA_SAFE_CALL(hipFree(device_search_in));
CUDA_SAFE_CALL(hipFree(device_search_out));
CUDA_SAFE_CALL(hipHostFree(host_in));
CUDA_SAFE_CALL(hipHostFree(host_search_in));
CUDA_SAFE_CALL(hipHostFree(host_search_out));
CUDA_SAFE_CALL(hipHostFree(host_search_verify));
// Allocate for actual insert/searches
CUDA_SAFE_CALL(hipMalloc((void **)&(device_in), SELEM_NUM * sizeof(ielem_t)));
CUDA_SAFE_CALL(hipMemset((void *)device_in, 0, SELEM_NUM * sizeof(ielem_t)));
CUDA_SAFE_CALL(hipHostMalloc((void **)&(host_in), SELEM_NUM * sizeof(ielem_t), hipHostMallocDefault));
CUDA_SAFE_CALL(hipMalloc((void **)&(blk_input_d), INSERT_BLOCK * sizeof(ielem_t *)));
CUDA_SAFE_CALL(hipMalloc((void **)&(blk_elem_num_d), INSERT_BLOCK * sizeof(int)));
for (i = 0; i < INSERT_BLOCK; i ++) {
blk_input_h[i] = &(((ielem_t *)device_in)[i*(SELEM_NUM/INSERT_BLOCK)]);
blk_elem_num_h[i] = SELEM_NUM/INSERT_BLOCK;
}
CUDA_SAFE_CALL(hipMemcpy(blk_input_d, blk_input_h, INSERT_BLOCK * sizeof(void *), hipMemcpyHostToDevice));
CUDA_SAFE_CALL(hipMemcpy(blk_elem_num_d, blk_elem_num_h, INSERT_BLOCK * sizeof(int), hipMemcpyHostToDevice));
// for search
CUDA_SAFE_CALL(hipMalloc((void **)&(device_search_in), SELEM_NUM * sizeof(selem_t)));
CUDA_SAFE_CALL(hipHostMalloc((void **)&(host_search_in), SELEM_NUM * sizeof(selem_t), hipHostMallocDefault));
CUDA_SAFE_CALL(hipMalloc((void **)&(device_search_out), 2 * SELEM_NUM * sizeof(loc_t)));
CUDA_SAFE_CALL(hipHostMalloc((void **)&(host_search_out), 2 * SELEM_NUM * sizeof(loc_t), hipHostMallocDefault));
CUDA_SAFE_CALL(hipHostMalloc((void **)&(host_search_verify), SELEM_NUM * sizeof(loc_t), hipHostMallocDefault));
//host_search_verify = (uint8_t *)malloc(SELEM_NUM * sizeof(loc_t));
// start
CUDA_SAFE_CALL(hipDeviceSynchronize());
int lower_bond;
ins_time = 0, search_time = 0, del_time = 0;
persist_time = 0, operation_time = 0, ddio_time = 0;
int num_ops = 100;
int num_get = 95;
int num_set = num_ops - num_get;
active_gpu_cache = false;
for (int has = 0; has < num_ops; has++) {
int selection = rand() % (num_get + num_set);
if(selection < num_set) {
--num_set;
/* +++++++++++++++++++++++++++++++++++ INSERT +++++++++++++++++++++++++++++++++ */
for (i = 0; i < SELEM_NUM; i += 1) {
lower_bond = (i / BLOCK_ELEM_NUM) * HASH_BLOCK_ELEM_NUM;
// sig
((selem_t *)host_search_in)[i].sig
= ((ielem_t *)host_in)[i].sig = rand();
// hash
((selem_t *)host_search_in)[i].hash
= ((ielem_t *)host_in)[i].hash
= lower_bond + rand() % HASH_BLOCK_ELEM_NUM;
// loc
((loc_t *)host_search_verify)[i]
= ((ielem_t *)host_in)[i].loc = (loc_t)rand();
//cpu_map[selem_t(i+1, i+1)] = ((ielem_t *)host_in)[i].loc;
//printf("%d\n", ((int *)host_search_verify)[i]);
}
//for debugging
for (i = 0; i < SELEM_NUM; i += 1) {
//printf("%d %d %d\n", ((int *)host_in)[i*3], (i/BLOCK_ELEM_NUM) * BLOCK_ELEM_NUM,
//(i/BLOCK_ELEM_NUM) * BLOCK_ELEM_NUM + BLOCK_ELEM_NUM);
assert(((ielem_t *)host_in)[i].hash < (i/BLOCK_ELEM_NUM) * HASH_BLOCK_ELEM_NUM + HASH_BLOCK_ELEM_NUM);
assert(((ielem_t *)host_in)[i].hash >= (i/BLOCK_ELEM_NUM) * HASH_BLOCK_ELEM_NUM);
}
//START_BW_MONITOR2("bw_gpm_kvs.csv");
auto start_time = TIME_NOW;
CUDA_SAFE_CALL(hipMemcpy(device_in, host_in, SELEM_NUM * sizeof(ielem_t), hipMemcpyHostToDevice));
hipDeviceSynchronize();
gpu_hash_insert((bucket_t *)device_hash_table,
(ielem_t **)blk_input_d,
(int *)blk_elem_num_d, INSERT_BLOCK, SELEM_NUM, 0,
operation_time, ddio_time, persist_time);
CUDA_SAFE_CALL(hipDeviceSynchronize());
ins_time += time_diff(TIME_NOW, start_time)/ 1000.0f;
//STOP_BW_MONITOR
//OUTPUT_STATS
printf("Batch %d. INSERT: insert %f ms, search %f ms\n", has, ins_time, search_time);
/* +++++++++++++++++++++++++++++++++++ SEARCH +++++++++++++++++++++++++++++++++ */
// verify with search
CUDA_SAFE_CALL(hipMemcpy(device_search_in, host_search_in,
SELEM_NUM * sizeof(selem_t), hipMemcpyHostToDevice));
CUDA_SAFE_CALL(hipMemset((void *)device_search_out, 0, 2 * SELEM_NUM * sizeof(loc_t)));
// ---------------------------
gpu_hash_search((selem_t *)device_search_in, (loc_t *)device_search_out,
(bucket_t *)device_hash_table, SELEM_NUM, THREAD_NUM, 128, 0);
hipDeviceSynchronize();
// ---------------------------
CUDA_SAFE_CALL(hipMemcpy(host_search_out, device_search_out,
2 * SELEM_NUM * sizeof(loc_t), hipMemcpyDeviceToHost));
for (i = 0; i < SELEM_NUM; i ++) {
if(((loc_t *)host_search_out)[i<<1] != ((loc_t *)host_search_verify)[i]
&& ((loc_t *)host_search_out)[(i<<1)+1] != ((loc_t *)host_search_verify)[i]) {
printf("not found insertion %d : out %lx and %lx, should be : %lx\n", i,
((loc_t *)host_search_out)[i<<1], ((loc_t *)host_search_out)[(i<<1)+1],
((loc_t *)host_search_verify)[i]);
assert(false);
}
}
}
else {
--num_get;
/* +++++++++++++++++++++++++++++++++++ SEARCH +++++++++++++++++++++++++++++++++ */
for (i = 0; i < SELEM_NUM; i += 1) {
lower_bond = (i / BLOCK_ELEM_NUM) * HASH_BLOCK_ELEM_NUM;
uint32_t get_key = (uint32_t)mehcached_zipf_next(&zipf_state) + 1;
assert(get_key < PRELOAD_CNT);
// sig
((selem_t *)host_search_in)[i].sig = get_key;
// hash
((selem_t *)host_search_in)[i].hash = get_key;
}
auto search_start = TIME_NOW;
CUDA_SAFE_CALL(hipMemcpy(device_search_in, host_search_in,
SELEM_NUM * sizeof(selem_t), hipMemcpyHostToDevice));
CUDA_SAFE_CALL(hipMemset((void *)device_search_out, 0, 2 * SELEM_NUM * sizeof(loc_t)));
// ---------------------------
gpu_hash_search((selem_t *)device_search_in, (loc_t *)device_search_out,
(bucket_t *)device_hash_table, SELEM_NUM, THREAD_NUM, 128, 0);
hipDeviceSynchronize();
// ---------------------------
CUDA_SAFE_CALL(hipMemcpy(host_search_out, device_search_out,
2 * SELEM_NUM * sizeof(loc_t), hipMemcpyDeviceToHost));
search_time += (double)time_diff(TIME_NOW, search_start) / 1000.0;
for (i = 0; i < SELEM_NUM; i ++) {
loc_t loc = cpu_map[selem_t(((selem_t *)host_search_in)[i].sig, ((selem_t *)host_search_in)[i].hash)];
if(((loc_t *)host_search_out)[i<<1] != loc
&& ((loc_t *)host_search_out)[(i<<1)+1] != loc) {
printf("not found insertion %d, key %d : out %lx and %lx, should be : %lx\n", i,
((selem_t *)host_search_in)[i].sig, ((loc_t *)host_search_out)[i<<1],
((loc_t *)host_search_out)[(i<<1)+1], loc);
assert(false);
}
}
printf("Batch %d. SEARCH: insert %f ms, search %f ms\n", has, ins_time, search_time);
}
}
printf("\nOperation execution time: %f ms\n", operation_time/1000000.0);
printf("DDIO time: %f ms\nPersistTime\t%f\n", ddio_time/1000000.0, persist_time/1000000.0);
printf("\n\n");
printf("Insert: %f ms, search: %f ms\n", ins_time, search_time);
printf("Runtime\t%f\tms\n", ins_time + search_time);
return 0;
}
| 65ac551be15620ea15fb0d3e0f4057fe5ffef63c.cu | /*
* Copyright (c) 2015 Kai Zhang ([email protected])
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#define EMULATE_NVM_BW
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <string.h>
#include <assert.h>
#include <time.h>
#include <cuda_runtime.h>
#include <chrono>
#include <byteswap.h>
#include <map>
#include "gpu_hash.h"
#include "libgpm.cuh"
#include "bandwidth_analysis.cuh"
#include "zipf.h"
#define HASH_BLOCK_ELEM_NUM (BUC_NUM/INSERT_BLOCK)
#define BLOCK_ELEM_NUM (SELEM_NUM/INSERT_BLOCK)
#define LOAD_FACTOR 1 / 8
#define PRELOAD_CNT (uint32_t)(((1 << 30)/8) * LOAD_FACTOR)
#define TOTAL_CNT (((uint32_t)1 << 31) - 1)
#define ZIPF_THETA 0.99
double persist_time = 0, operation_time = 0, ddio_time = 0;
#define TIME_NOW std::chrono::high_resolution_clock::now()
#define time_diff(a, b) std::chrono::duration_cast<std::chrono::microseconds>(a - b).count()
int main(int argc, char *argv[])
{
ddio_on();
int SELEM_NUM, THREAD_NUM;
if (argc != 3) {
printf("usage: ./run #elem_num #thread_num, now running with 16384\n");
SELEM_NUM = 16384 * 128;
THREAD_NUM = 16384 * 2;
} else {
SELEM_NUM = atoi(argv[1]);
THREAD_NUM = atoi(argv[2]);
}
printf("elem_num is %d, thread_num is %d\n", SELEM_NUM, THREAD_NUM);
struct zipf_gen_state zipf_state;
mehcached_zipf_init(&zipf_state, (uint64_t)PRELOAD_CNT - 2, (double)ZIPF_THETA, (uint64_t)21);
uint8_t *device_hash_table;
uint8_t *device_in;
uint8_t *host_in;
ielem_t *blk_input_h[INSERT_BLOCK];
int blk_elem_num_h[INSERT_BLOCK];
ielem_t **blk_input_d;
int *blk_elem_num_d;
int i;
std::map<selem_t, loc_t> cpu_map;
uint8_t *device_search_in;
uint8_t *device_search_out;
uint8_t *host_search_in;
uint8_t *host_search_out;
uint8_t *host_search_verify;
//CUDA_SAFE_CALL(cudaMalloc((void **)&(device_hash_table), HT_SIZE));
size_t file_size = HT_SIZE;
device_hash_table = (uint8_t*)gpm_map_file("./imkv.out", file_size, 1);
CUDA_SAFE_CALL(cudaMemset((void *)device_hash_table, 0, HT_SIZE));
// Allocate memory for preloading keys into KVS
CUDA_SAFE_CALL(cudaMalloc((void **)&(device_in), PRELOAD_CNT * sizeof(ielem_t)));
CUDA_SAFE_CALL(cudaMemset((void *)device_in, 0, PRELOAD_CNT * sizeof(ielem_t)));
CUDA_SAFE_CALL(cudaHostAlloc((void **)&(host_in), PRELOAD_CNT * sizeof(ielem_t), cudaHostAllocDefault));
CUDA_SAFE_CALL(cudaMalloc((void **)&(blk_input_d), INSERT_BLOCK * sizeof(ielem_t *)));
CUDA_SAFE_CALL(cudaMalloc((void **)&(blk_elem_num_d), INSERT_BLOCK * sizeof(int)));
for (i = 0; i < INSERT_BLOCK; i ++) {
blk_input_h[i] = &(((ielem_t *)device_in)[i*((int)PRELOAD_CNT/INSERT_BLOCK)]);
blk_elem_num_h[i] = 0;
}
// for search
CUDA_SAFE_CALL(cudaMalloc((void **)&(device_search_in), PRELOAD_CNT * sizeof(selem_t)));
CUDA_SAFE_CALL(cudaHostAlloc((void **)&(host_search_in), PRELOAD_CNT * sizeof(selem_t), cudaHostAllocDefault));
CUDA_SAFE_CALL(cudaMalloc((void **)&(device_search_out), 2 * PRELOAD_CNT * sizeof(loc_t)));
CUDA_SAFE_CALL(cudaHostAlloc((void **)&(host_search_out), 2 * PRELOAD_CNT * sizeof(loc_t), cudaHostAllocDefault));
CUDA_SAFE_CALL(cudaHostAlloc((void **)&(host_search_verify), PRELOAD_CNT * sizeof(loc_t), cudaHostAllocDefault));
// Generate keys
printf("Generate %d keys\n", PRELOAD_CNT);
int num_keys = PRELOAD_CNT / INSERT_BLOCK;
for(int i = 0; i < PRELOAD_CNT; ++i) {
int blk = (i + 1) % INSERT_BLOCK;
int index = num_keys * blk + blk_elem_num_h[blk];
// sig
((ielem_t *)host_in)[index].sig =
((selem_t *)host_search_in)[index].sig =
(i + 1);
// hash
((ielem_t *)host_in)[index].hash =
((selem_t *)host_search_in)[index].hash =
(i + 1);
// loc
((ielem_t *)host_in)[index].loc = (loc_t)rand();
cpu_map[selem_t(i+1, i+1)] = ((ielem_t *)host_in)[index].loc;
blk_elem_num_h[blk]++;
}
CUDA_SAFE_CALL(cudaMemcpy(blk_input_d, blk_input_h, INSERT_BLOCK * sizeof(void *), cudaMemcpyHostToDevice));
CUDA_SAFE_CALL(cudaMemcpy(blk_elem_num_d, blk_elem_num_h, INSERT_BLOCK * sizeof(int), cudaMemcpyHostToDevice));
CUDA_SAFE_CALL(cudaMemcpy(device_in, host_in, PRELOAD_CNT * sizeof(ielem_t), cudaMemcpyHostToDevice));
// Insert preload keys
printf("Preload %d keys\n", PRELOAD_CNT);
double ins_time = 0, search_time = 0, del_time = 0;
gpu_hash_insert((bucket_t *)device_hash_table,
(ielem_t **)blk_input_d,
(int *)blk_elem_num_d, INSERT_BLOCK, PRELOAD_CNT, 0,
operation_time, ddio_time, persist_time);
// verify with search
CUDA_SAFE_CALL(cudaMemcpy(device_search_in, host_search_in, PRELOAD_CNT * sizeof(selem_t), cudaMemcpyHostToDevice));
CUDA_SAFE_CALL(cudaMemset((void *)device_search_out, 1, 2 * PRELOAD_CNT * sizeof(loc_t)));
printf("Verify %d keys\n", PRELOAD_CNT);
// ---------------------------
gpu_hash_search((selem_t *)device_search_in, (loc_t *)device_search_out,
(bucket_t *)device_hash_table, PRELOAD_CNT, THREAD_NUM, 128, 0);
cudaDeviceSynchronize();
// ---------------------------
CUDA_SAFE_CALL(cudaMemcpy(host_search_out, device_search_out,
2 * PRELOAD_CNT * sizeof(loc_t), cudaMemcpyDeviceToHost));
for (i = 0; i < PRELOAD_CNT; i ++) {
loc_t loc = cpu_map[selem_t(((ielem_t *)host_in)[i].sig, ((ielem_t *)host_in)[i].hash)];
if(((loc_t *)host_search_out)[i<<1] != loc
&& ((loc_t *)host_search_out)[(i<<1)+1] != loc) {
printf("not found insertion %d : out %lx and %lx, should be : %lx\n", i,
((loc_t *)host_search_out)[i<<1], ((loc_t *)host_search_out)[(i<<1)+1],
loc);
}
}
// Free memory for preload
CUDA_SAFE_CALL(cudaFree(device_in));
CUDA_SAFE_CALL(cudaFree(blk_input_d));
CUDA_SAFE_CALL(cudaFree(blk_elem_num_d));
CUDA_SAFE_CALL(cudaFree(device_search_in));
CUDA_SAFE_CALL(cudaFree(device_search_out));
CUDA_SAFE_CALL(cudaFreeHost(host_in));
CUDA_SAFE_CALL(cudaFreeHost(host_search_in));
CUDA_SAFE_CALL(cudaFreeHost(host_search_out));
CUDA_SAFE_CALL(cudaFreeHost(host_search_verify));
// Allocate for actual insert/searches
CUDA_SAFE_CALL(cudaMalloc((void **)&(device_in), SELEM_NUM * sizeof(ielem_t)));
CUDA_SAFE_CALL(cudaMemset((void *)device_in, 0, SELEM_NUM * sizeof(ielem_t)));
CUDA_SAFE_CALL(cudaHostAlloc((void **)&(host_in), SELEM_NUM * sizeof(ielem_t), cudaHostAllocDefault));
CUDA_SAFE_CALL(cudaMalloc((void **)&(blk_input_d), INSERT_BLOCK * sizeof(ielem_t *)));
CUDA_SAFE_CALL(cudaMalloc((void **)&(blk_elem_num_d), INSERT_BLOCK * sizeof(int)));
for (i = 0; i < INSERT_BLOCK; i ++) {
blk_input_h[i] = &(((ielem_t *)device_in)[i*(SELEM_NUM/INSERT_BLOCK)]);
blk_elem_num_h[i] = SELEM_NUM/INSERT_BLOCK;
}
CUDA_SAFE_CALL(cudaMemcpy(blk_input_d, blk_input_h, INSERT_BLOCK * sizeof(void *), cudaMemcpyHostToDevice));
CUDA_SAFE_CALL(cudaMemcpy(blk_elem_num_d, blk_elem_num_h, INSERT_BLOCK * sizeof(int), cudaMemcpyHostToDevice));
// for search
CUDA_SAFE_CALL(cudaMalloc((void **)&(device_search_in), SELEM_NUM * sizeof(selem_t)));
CUDA_SAFE_CALL(cudaHostAlloc((void **)&(host_search_in), SELEM_NUM * sizeof(selem_t), cudaHostAllocDefault));
CUDA_SAFE_CALL(cudaMalloc((void **)&(device_search_out), 2 * SELEM_NUM * sizeof(loc_t)));
CUDA_SAFE_CALL(cudaHostAlloc((void **)&(host_search_out), 2 * SELEM_NUM * sizeof(loc_t), cudaHostAllocDefault));
CUDA_SAFE_CALL(cudaHostAlloc((void **)&(host_search_verify), SELEM_NUM * sizeof(loc_t), cudaHostAllocDefault));
//host_search_verify = (uint8_t *)malloc(SELEM_NUM * sizeof(loc_t));
// start
CUDA_SAFE_CALL(cudaDeviceSynchronize());
int lower_bond;
ins_time = 0, search_time = 0, del_time = 0;
persist_time = 0, operation_time = 0, ddio_time = 0;
int num_ops = 100;
int num_get = 95;
int num_set = num_ops - num_get;
active_gpu_cache = false;
for (int has = 0; has < num_ops; has++) {
int selection = rand() % (num_get + num_set);
if(selection < num_set) {
--num_set;
/* +++++++++++++++++++++++++++++++++++ INSERT +++++++++++++++++++++++++++++++++ */
for (i = 0; i < SELEM_NUM; i += 1) {
lower_bond = (i / BLOCK_ELEM_NUM) * HASH_BLOCK_ELEM_NUM;
// sig
((selem_t *)host_search_in)[i].sig
= ((ielem_t *)host_in)[i].sig = rand();
// hash
((selem_t *)host_search_in)[i].hash
= ((ielem_t *)host_in)[i].hash
= lower_bond + rand() % HASH_BLOCK_ELEM_NUM;
// loc
((loc_t *)host_search_verify)[i]
= ((ielem_t *)host_in)[i].loc = (loc_t)rand();
//cpu_map[selem_t(i+1, i+1)] = ((ielem_t *)host_in)[i].loc;
//printf("%d\n", ((int *)host_search_verify)[i]);
}
//for debugging
for (i = 0; i < SELEM_NUM; i += 1) {
//printf("%d %d %d\n", ((int *)host_in)[i*3], (i/BLOCK_ELEM_NUM) * BLOCK_ELEM_NUM,
//(i/BLOCK_ELEM_NUM) * BLOCK_ELEM_NUM + BLOCK_ELEM_NUM);
assert(((ielem_t *)host_in)[i].hash < (i/BLOCK_ELEM_NUM) * HASH_BLOCK_ELEM_NUM + HASH_BLOCK_ELEM_NUM);
assert(((ielem_t *)host_in)[i].hash >= (i/BLOCK_ELEM_NUM) * HASH_BLOCK_ELEM_NUM);
}
//START_BW_MONITOR2("bw_gpm_kvs.csv");
auto start_time = TIME_NOW;
CUDA_SAFE_CALL(cudaMemcpy(device_in, host_in, SELEM_NUM * sizeof(ielem_t), cudaMemcpyHostToDevice));
cudaDeviceSynchronize();
gpu_hash_insert((bucket_t *)device_hash_table,
(ielem_t **)blk_input_d,
(int *)blk_elem_num_d, INSERT_BLOCK, SELEM_NUM, 0,
operation_time, ddio_time, persist_time);
CUDA_SAFE_CALL(cudaDeviceSynchronize());
ins_time += time_diff(TIME_NOW, start_time)/ 1000.0f;
//STOP_BW_MONITOR
//OUTPUT_STATS
printf("Batch %d. INSERT: insert %f ms, search %f ms\n", has, ins_time, search_time);
/* +++++++++++++++++++++++++++++++++++ SEARCH +++++++++++++++++++++++++++++++++ */
// verify with search
CUDA_SAFE_CALL(cudaMemcpy(device_search_in, host_search_in,
SELEM_NUM * sizeof(selem_t), cudaMemcpyHostToDevice));
CUDA_SAFE_CALL(cudaMemset((void *)device_search_out, 0, 2 * SELEM_NUM * sizeof(loc_t)));
// ---------------------------
gpu_hash_search((selem_t *)device_search_in, (loc_t *)device_search_out,
(bucket_t *)device_hash_table, SELEM_NUM, THREAD_NUM, 128, 0);
cudaDeviceSynchronize();
// ---------------------------
CUDA_SAFE_CALL(cudaMemcpy(host_search_out, device_search_out,
2 * SELEM_NUM * sizeof(loc_t), cudaMemcpyDeviceToHost));
for (i = 0; i < SELEM_NUM; i ++) {
if(((loc_t *)host_search_out)[i<<1] != ((loc_t *)host_search_verify)[i]
&& ((loc_t *)host_search_out)[(i<<1)+1] != ((loc_t *)host_search_verify)[i]) {
printf("not found insertion %d : out %lx and %lx, should be : %lx\n", i,
((loc_t *)host_search_out)[i<<1], ((loc_t *)host_search_out)[(i<<1)+1],
((loc_t *)host_search_verify)[i]);
assert(false);
}
}
}
else {
--num_get;
/* +++++++++++++++++++++++++++++++++++ SEARCH +++++++++++++++++++++++++++++++++ */
for (i = 0; i < SELEM_NUM; i += 1) {
lower_bond = (i / BLOCK_ELEM_NUM) * HASH_BLOCK_ELEM_NUM;
uint32_t get_key = (uint32_t)mehcached_zipf_next(&zipf_state) + 1;
assert(get_key < PRELOAD_CNT);
// sig
((selem_t *)host_search_in)[i].sig = get_key;
// hash
((selem_t *)host_search_in)[i].hash = get_key;
}
auto search_start = TIME_NOW;
CUDA_SAFE_CALL(cudaMemcpy(device_search_in, host_search_in,
SELEM_NUM * sizeof(selem_t), cudaMemcpyHostToDevice));
CUDA_SAFE_CALL(cudaMemset((void *)device_search_out, 0, 2 * SELEM_NUM * sizeof(loc_t)));
// ---------------------------
gpu_hash_search((selem_t *)device_search_in, (loc_t *)device_search_out,
(bucket_t *)device_hash_table, SELEM_NUM, THREAD_NUM, 128, 0);
cudaDeviceSynchronize();
// ---------------------------
CUDA_SAFE_CALL(cudaMemcpy(host_search_out, device_search_out,
2 * SELEM_NUM * sizeof(loc_t), cudaMemcpyDeviceToHost));
search_time += (double)time_diff(TIME_NOW, search_start) / 1000.0;
for (i = 0; i < SELEM_NUM; i ++) {
loc_t loc = cpu_map[selem_t(((selem_t *)host_search_in)[i].sig, ((selem_t *)host_search_in)[i].hash)];
if(((loc_t *)host_search_out)[i<<1] != loc
&& ((loc_t *)host_search_out)[(i<<1)+1] != loc) {
printf("not found insertion %d, key %d : out %lx and %lx, should be : %lx\n", i,
((selem_t *)host_search_in)[i].sig, ((loc_t *)host_search_out)[i<<1],
((loc_t *)host_search_out)[(i<<1)+1], loc);
assert(false);
}
}
printf("Batch %d. SEARCH: insert %f ms, search %f ms\n", has, ins_time, search_time);
}
}
printf("\nOperation execution time: %f ms\n", operation_time/1000000.0);
printf("DDIO time: %f ms\nPersistTime\t%f\n", ddio_time/1000000.0, persist_time/1000000.0);
printf("\n\n");
printf("Insert: %f ms, search: %f ms\n", ins_time, search_time);
printf("Runtime\t%f\tms\n", ins_time + search_time);
return 0;
}
|
19f98abe8c735d943cbc04c315ca236c64540c18.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "../lib/coo.h"
#include "../lib/csc.h"
#include "../lib/csr.h"
#include "../lib/wlt.h"
#include "../lib/thread.h"
#include "../lib/spGEMM.h"
#include <string.h>
#include <stdlib.h>
#include <stdio.h>
#include <unistd.h>
#define SHARED
#define ERROR_CHECK \
{\
hipError_t err = hipGetLastError(); \
if ( hipSuccess != err ) \
{\
printf("[%s:%d]CUDA ERROR : %s\n", __FILE__, __LINE__, hipGetErrorString(err) ); \
sleep(5); \
exit(-1); \
}\
}
//#define MERGE
#define M16 1//32
#define M8 1//16
#define M4 1//8
#define M2 1//4
#define M1 1//2
/*
#define M16 32
#define M8 16
#define M4 8
#define M2 4
#define M1 2
*/
#define MT 64
#define GRID 1024
#define ALPHA 50
#define BETA 10
__device__ uint get_smid(void){
uint ret;
asm("mov.u32 %0, %smid;" : "=r"(ret));
return ret;
}
__global__ void calcUpp(
int* csrCidx,int *csrPtr,
int* cscRidx,int *cscPtr,
int *rUpp,
int *wlt,
int* N, int * part_out, int* part_row){
int tid = threadIdx.x;
int bid = blockIdx.x; //bid = rowIdx
int bsize = blockDim.x;
if(bid<N[0]){
/* CALCULATE UPPER BOUND */
int base = csrPtr[bid];
int rLen = csrPtr[bid+1]-base;
int cLen = cscPtr[bid+1]-cscPtr[bid];
for(int i = tid ; i < rLen; i+=bsize){
int target = csrCidx[base+i];
int len = csrPtr[target+1] - csrPtr[target];
atomicAdd(&rUpp[bid], csrPtr[target+1]-csrPtr[target]);
}
/* BUILD WORKLOADTABLE */
wlt[bid] = rLen*cLen;
if(rLen*cLen && tid==0) atomicAdd(&part_out[0],1);
if(rUpp[bid] && tid==0) atomicAdd(&part_row[0],1);
}
}
__global__ void findDom(int* csrPtr, int* cscPtr,
int *rUpp,
int* wlt,
int *domCC, int *domRC,
int *domR, int *domN,
int *dominator_c,
int *number_of_dominators_in_c,
int *else_c,
int *number_of_else_in_c,
char *c_bool,
int *N,
int* part_out,
int* part_row){
int tid = threadIdx.x;
int bid = blockIdx.x;
int bsize = blockDim.x;
int index = bsize*bid + tid;
if(index < N[0]){
if(1){//(double)wlt[index]>(rUpp[N[0]]/part_out[0] * ALPHA)){
int cntr = atomicAdd(&domN[0],1);
int cc = cscPtr[index+1] - cscPtr[index];
int rc = csrPtr[index+1] - csrPtr[index];
domR[cntr] = index;
wlt[index]=0;
atomicAdd(&domCC[0],cc);
atomicAdd(&domRC[0],rc);
}
if(rUpp[index+1] - rUpp[index] > rUpp[*N]/part_row[0]*5){
int temp = atomicAdd(&number_of_dominators_in_c[0], 1);
dominator_c[temp] = index;
// c_bool[index] = 1;
}
else{
int temp = atomicAdd(&number_of_else_in_c[0],1);
else_c[temp] = index;
}
}
}
__global__ void find_thread_count_approx(
int* a_ptr, int* b_ptr,int* thread_bin, int *N)
{
int global_tid = threadIdx.x + blockIdx.x*blockDim.x;
if(global_tid<N[0]){
int len = b_ptr[global_tid+1] - b_ptr[global_tid];
if(0<len && len <=2) atomicAdd(&thread_bin[0],1);
else if(2<len && len <=4) atomicAdd(&thread_bin[1],1);
else if(4<len && len <=8) atomicAdd(&thread_bin[2],1);
else if(8<len && len <=16) atomicAdd(&thread_bin[3],1);
else if(16<len && len <=32) atomicAdd(&thread_bin[4],1);
else if(32<len && len <=64) atomicAdd(&thread_bin[5],1);
else if(64<len && len <= 128) atomicAdd(&thread_bin[6],1);
else if(128<len) atomicAdd(&thread_bin[7],1);
}
}
__global__ void fill_thread_bin_approx(
int* a_ptr, int* b_ptr,
int* thread_bin, int* thread_counter,
int* index_bin, int* N)
{
int global_tid = threadIdx.x + blockIdx.x*blockDim.x;
if(global_tid < N[0]){
int len = b_ptr[global_tid+1] - b_ptr[global_tid];
if(0<len && len <=2){
int a = atomicAdd(&thread_counter[0], 1);
index_bin[thread_bin[0] + a] = global_tid;
}
else if(2<len && len <=4){
int a = atomicAdd(&thread_counter[1], 1);
index_bin[thread_bin[1] + a] = global_tid;
}
else if(4<len && len <=8){
int a = atomicAdd(&thread_counter[2], 1);
index_bin[thread_bin[2] + a] = global_tid;
}
else if(8<len && len <=16){
int a = atomicAdd(&thread_counter[3], 1);
index_bin[thread_bin[3] + a]=global_tid;
}
else if(16<len && len <=32){
int a = atomicAdd(&thread_counter[4], 1);
index_bin[thread_bin[4] + a] = global_tid;
}
else if(32<len && len <=64){
int a = atomicAdd(&thread_counter[5], 1);
index_bin[thread_bin[5] + a] = global_tid;
}
else if(64<len && len <=128){
int a = atomicAdd(&thread_counter[6], 1);
index_bin[thread_bin[6] + a] =global_tid;
}
else if(128< len ){
int a = atomicAdd(&thread_counter[7], 1);
index_bin[thread_bin[7] + a] =global_tid;
}
}
}
__global__ void calcInterBase(
int* idx_bin,
int* csrCidx, int* csrPtr, float* csrVal, // MATRIX B(CSR)
int* cscRidx, int* cscPtr, float* cscVal, // MATRIX A(CSC)
int* c_idx, int* c_ptr, float* c_val,
int *RUPP,
int *WLS,
int *RBOX)
{
int tid = threadIdx.x;
int bid = blockIdx.x;
int bsize = blockDim.x;
int idx = idx_bin[bid];
if(WLS[idx]==0){asm("exit;");}
int csrBase = csrPtr[idx];
int cscBase = cscPtr[idx];
int rLen = csrPtr[idx+1] - csrBase;
int cLen = cscPtr[idx+1] - cscBase;
__shared__ int resBase;
__syncthreads();
for( int ci = 0 ; ci < cLen; ci ++)
{
int rIdx = cscRidx[ cscBase + ci ];
int rowBase = RUPP[rIdx];
if(tid==0)
resBase = atomicAdd(&RBOX[rIdx], rLen);
__syncthreads();
for(int ri= tid ;ri < rLen ; ri+=bsize)
{
int cIdx = csrCidx[ csrBase + ri ];
c_idx[rowBase+resBase+ri] = cIdx;
c_val[rowBase+resBase+ri] = cscVal[ cscBase + ci ]*csrVal[ csrBase + ri ];
}
}
}
__global__ void calcInterNoMerge(
int* idx_bin,
int* csrCidx, int* csrPtr, float* csrVal, // MATRIX B(CSR)
int* cscRidx, int* cscPtr, float* cscVal, // MATRIX A(CSC)
int* c_idx, int* c_ptr, float* c_val,
int *RUPP,
int *WLS,
int *RBOX)
{
int tid = threadIdx.x;
int bid = blockIdx.x;
int bsize = blockDim.x;
int newTid=tid;
int newBid=0;
int idx = idx_bin[bid];
if(WLS[idx]==0){asm("exit;");}
else{
int csrBase = csrPtr[idx];
int cscBase = cscPtr[idx];
int rLen = csrPtr[idx+1] - csrBase;
int cLen = cscPtr[idx+1] - cscBase;
__shared__ int resBase[16];
__syncthreads();
for( int ci = 0 ; ci < cLen; ci ++)
{
int rIdx = cscRidx[ cscBase + ci ];
int rowBase = RUPP[rIdx];
if(newTid==0)
resBase[newBid] = atomicAdd(&RBOX[rIdx], rLen);
__syncthreads();
for(int ri= newTid ;ri < rLen ; ri+=bsize)
{
int cIdx = csrCidx[ csrBase + ri ];
c_idx[rowBase+resBase[newBid]+ri] = cIdx;
c_val[rowBase+resBase[newBid]+ri] = cscVal[ cscBase + ci ]*csrVal[ csrBase + ri ];
}
}
WLS[idx] = 0; // fuck
}
}
__global__ void calcInterMerge1(
int* idx_bin,
int* csrCidx, int* csrPtr, float* csrVal, // MATRIX B(CSR)
int* cscRidx, int* cscPtr, float* cscVal, // MATRIX A(CSC)
int* c_idx, int* c_ptr, float* c_val,
int *RUPP,
int *WLS,
int *RBOX)
{
int tid = threadIdx.x;
int bid = blockIdx.x;
int bsize = blockDim.x;
int newTid=tid;
int newBid=0;
bid *=M1;
newBid = tid / (bsize/M1);
newTid = tid - (bsize/M1)*newBid;
int idx = idx_bin[bid+newBid];
if(WLS[idx]==0){asm("exit;");}
else{
int csrBase = csrPtr[idx];
int cscBase = cscPtr[idx];
int rLen = csrPtr[idx+1] - csrBase;
int cLen = cscPtr[idx+1] - cscBase;
__shared__ int resBase[32];
__syncthreads();
for( int ci = 0 ; ci < cLen; ci ++)
{
int rIdx = cscRidx[ cscBase + ci ];
int rowBase = RUPP[rIdx];
if(newTid==0)
resBase[newBid] = atomicAdd(&RBOX[rIdx], rLen);
__syncthreads();
for(int ri= newTid ;ri < rLen ; ri+=bsize/M1)
{
int cIdx = csrCidx[ csrBase + ri ];
c_idx[rowBase+resBase[newBid]+ri] = cIdx;
c_val[rowBase+resBase[newBid]+ri] = cscVal[ cscBase + ci ]*csrVal[ csrBase + ri ];
}
}
WLS[idx] = 0; // fuck
}
}
__global__ void calcInterMerge2(
int* idx_bin,
int* csrCidx, int* csrPtr, float* csrVal, // MATRIX B(CSR)
int* cscRidx, int* cscPtr, float* cscVal, // MATRIX A(CSC)
int* c_idx, int* c_ptr, float* c_val,
int *RUPP,
int *WLS,
int *RBOX)
{
int tid = threadIdx.x;
int bid = blockIdx.x;
int bsize = blockDim.x;
int newTid=tid;
int newBid=0;
bid *=M2;
newBid = tid / (bsize/M2);
newTid = tid - (bsize/M2)*newBid;
int idx = idx_bin[bid+newBid];
if(WLS[idx]==0){asm("exit;");}
else{
int csrBase = csrPtr[idx];
int cscBase = cscPtr[idx];
int rLen = csrPtr[idx+1] - csrBase;
int cLen = cscPtr[idx+1] - cscBase;
__shared__ int resBase[32];
__syncthreads();
for( int ci = 0 ; ci < cLen; ci ++)
{
int rIdx = cscRidx[ cscBase + ci ];
int rowBase = RUPP[rIdx];
if(newTid==0)
resBase[newBid] = atomicAdd(&RBOX[rIdx], rLen);
__syncthreads();
for(int ri= newTid ;ri < rLen ; ri+=bsize/M2)
{
int cIdx = csrCidx[ csrBase + ri ];
c_idx[rowBase+resBase[newBid]+ri] = cIdx;
c_val[rowBase+resBase[newBid]+ri] = cscVal[ cscBase + ci ]*csrVal[ csrBase + ri ];
}
}
WLS[idx] = 0; // fuck
}
}
__global__ void calcInterMerge4(
int* idx_bin,
int* csrCidx, int* csrPtr, float* csrVal, // MATRIX B(CSR)
int* cscRidx, int* cscPtr, float* cscVal, // MATRIX A(CSC)
int* c_idx, int* c_ptr, float* c_val,
int *RUPP,
int *WLS,
int *RBOX)
{
int tid = threadIdx.x;
int bid = blockIdx.x;
int bsize = blockDim.x;
int newTid = tid;
int newBid = 0;
bid*= M4;
newBid = tid / (bsize/M4);
newTid = tid - (bsize/M4)*newBid;
int idx = idx_bin[bid + newBid];
if(WLS[idx]==0){asm("exit;");}
else{
int csrBase = csrPtr[idx];
int cscBase = cscPtr[idx];
int rLen = csrPtr[idx+1] - csrBase;
int cLen = cscPtr[idx+1] - cscBase;
__shared__ int resBase[32];
__syncthreads();
for( int ci = 0 ; ci < cLen; ci ++)
{
int rIdx = cscRidx[ cscBase + ci ];
int rowBase = RUPP[rIdx];
if(newTid==0)
resBase[newBid] = atomicAdd(&RBOX[rIdx], rLen);
__syncthreads();
for(int ri= newTid ;ri < rLen ; ri+=bsize/M4)
{
int cIdx = csrCidx[ csrBase + ri ];
c_idx[rowBase+resBase[newBid]+ri] = cIdx;
c_val[rowBase+resBase[newBid]+ri] = cscVal[ cscBase + ci ]*csrVal[ csrBase + ri ];
}
}
WLS[idx] = 0; // fuck
}
}
__global__ void calcInterMerge8(
int* idx_bin,
int* csrCidx, int* csrPtr, float* csrVal, // MATRIX B(CSR)
int* cscRidx, int* cscPtr, float* cscVal, // MATRIX A(CSC)
int* c_idx, int* c_ptr, float* c_val,
int *RUPP,
int *WLS,
int *RBOX)
{
int tid = threadIdx.x;
int bid = blockIdx.x;
int bsize = blockDim.x;
int newBid = 0;
int newTid = tid;
bid*= M8;
newBid = tid / (bsize/M8);
newTid = tid - (bsize/M8)*newBid;
int idx = idx_bin[bid + newBid];
if(WLS[idx]==0){asm("exit;");}
else{
int csrBase = csrPtr[idx];
int cscBase = cscPtr[idx];
int rLen = csrPtr[idx+1] - csrBase;
int cLen = cscPtr[idx+1] - cscBase;
__shared__ int resBase[32];
__syncthreads();
for( int ci = 0 ; ci < cLen; ci ++)
{
int rIdx = cscRidx[ cscBase + ci ];
int rowBase = RUPP[rIdx];
if(newTid==0)
resBase[newBid] = atomicAdd(&RBOX[rIdx], rLen);
__syncthreads();
for(int ri= newTid ;ri < rLen ; ri+=bsize/M8)
{
int cIdx = csrCidx[ csrBase + ri ];
c_idx[rowBase+resBase[newBid]+ri] = cIdx;
c_val[rowBase+resBase[newBid]+ri] = cscVal[ cscBase + ci ]*csrVal[ csrBase + ri ];
}
}
WLS[idx] = 0; // fuck
}
}
__global__ void calcInterMerge16(
int* idx_bin,
int* csrCidx, int* csrPtr, float* csrVal, // MATRIX B(CSR)
int* cscRidx, int* cscPtr, float* cscVal, // MATRIX A(CSC)
int* c_idx, int* c_ptr, float* c_val,
int *RUPP,
int *WLS,
int *RBOX)
{
int tid = threadIdx.x;
int bid = blockIdx.x;
int bsize = blockDim.x;
int newBid = 0;
int newTid = tid;
bid*= M16;
newBid = tid / (bsize/M16);
newTid = tid - (bsize/M16)*newBid;
int idx = idx_bin[bid + newBid];
if(WLS[idx]==0){asm("exit;");}
else{
int csrBase = csrPtr[idx];
int cscBase = cscPtr[idx];
int rLen = csrPtr[idx+1] - csrBase;
int cLen = cscPtr[idx+1] - cscBase;
__shared__ int resBase[32];
__syncthreads();
for( int ci = 0 ; ci < cLen; ci ++)
{
int rIdx = cscRidx[ cscBase + ci ];
int rowBase = RUPP[rIdx];
if(newTid==0)
resBase[newBid] = atomicAdd(&RBOX[rIdx], rLen);
__syncthreads();
for(int ri= newTid ;ri < rLen ; ri+=bsize/M16)
{
int cIdx = csrCidx[ csrBase + ri ];
c_idx[rowBase+resBase[newBid]+ri] = cIdx;
c_val[rowBase+resBase[newBid]+ri] = cscVal[ cscBase + ci ]*csrVal[ csrBase + ri ];
}
}
WLS[idx] = 0; // fuck
}
}
__global__ void calcInterDom(
int* csrCidx, int* csrPtr, float* csrVal, // MATRIX B(CSR)
int* cscRidx, int* cscPtr, float* cscVal, // MATRIX A(CSC)
int* c_idx, int* c_ptr, float* c_val,
// cooData *interC, // INTERMEDIATE C
int *RUPP,
PP *P,
int *RBOX)
{
int tid = threadIdx.x;
int bid = blockIdx.x;
int bsize = blockDim.x;
int rid = P[bid].r;
int cid = P[bid].c;
int csrBase = csrPtr[rid];
int cscBase = cscPtr[cid];
int rLen = csrPtr[rid+1] - csrBase;
int cLen = cscPtr[cid+1] - cscBase;
__shared__ int resBase;
resBase = 0;
for( int ci=0 ; ci < cLen; ci ++)
{
__syncthreads();
int rIdx = cscRidx[ cscBase + ci ];
int rowBase = RUPP[rIdx];
if(tid==0) resBase = atomicAdd(&RBOX[rIdx], rLen);
__syncthreads();
for(int ri = tid; ri < rLen ; ri+=bsize )
{
int cIdx = csrCidx[ csrBase + ri ];
//interC[rowBase + resBase + ri].cidx = cIdx;
//interC[rowBase + resBase + ri].ridx = rIdx;
//interC[rowBase + resBase + ri].val = cscVal[ cscBase + ci ] * csrVal[ csrBase + ri ];
c_idx[rowBase+resBase+ri] = cIdx;
c_val[rowBase+resBase+ri] = cscVal[ cscBase + ci ] * csrVal[ csrBase + ri ];
}
}
}
__global__ void merge_limitting(
//const cooData *interC, // INTERMEDIATE C
int *RUPP,
int *RBOX,
float* DROW, // DENSE ROW
//int* MIDX, float* MVAL, int* MPTR, // MERGED C
int* c_jdx, float* c_val, int* c_ptr,
int *c_idx,
int* dominator_c,
int* number_of_dominators_in_c,
int *N)
{
int tid = threadIdx.x;
int bid = blockIdx.x;
int bsize = blockDim.x;
__shared__ int ip;
__shared__ double shm[768*4];
shm[0] = 0;
for(int RR = 0 ; RR<(*N/GRID)+1; RR++){
if(bid+(RR)*GRID>=*number_of_dominators_in_c) return;
int ii = dominator_c[bid+RR*GRID];
int rBase = RUPP[ii];
int rLen = RUPP[ii+1]-RUPP[ii];
int dBase = bid*(*N);
__syncthreads();
ip = 0;
__syncthreads();
for(int i=tid; i<rLen;i+=bsize){
int index = c_jdx[rBase+i];
float boolflag = atomicExch(&DROW[dBase+index], DROW[dBase+index] + c_val[rBase+i]);
if(boolflag<0.0001 && boolflag>-0.0001){
int ip_local = atomicAdd(&ip,1);
c_idx[rBase+ip_local] = index;//c_idx[rBase+i];
//atomicExch(&c_idx[rBase+ip_local],index);
}
}
__syncthreads();
for(int i=tid;i<ip;i+=bsize){
int v = c_idx[rBase + i];
c_val[rBase + i] = DROW[dBase+v];
atomicExch(&DROW[dBase+v], 0);
}
__syncthreads();
if(tid==0) c_ptr[ii] = ip;
}
}
__global__ void merge(
//const cooData *interC, // INTERMEDIATE C
int *RUPP,
int *RBOX,
float* DROW, // DENSE ROW
//int* MIDX, float* MVAL, int* MPTR, // MERGED C
int* c_jdx, float* c_val, int* c_ptr,
int *c_idx,
int* else_c,
int* number_of_else_in_c,
int *N)
{
int tid = threadIdx.x;
int bid = blockIdx.x;
int bsize = blockDim.x;
// __shared__ double shm[3*1024] ;
// shm[0] = 0;
__shared__ int ip;
for(int RR = 0 ; RR<(*N/GRID)+1; RR++){
if(bid+(RR)*GRID>=*number_of_else_in_c) return;
int ii = else_c[bid+RR*GRID];
int rBase = RUPP[ii];
int rLen = RUPP[ii+1]-RUPP[ii];
int dBase = bid*(*N);
__syncthreads();
ip = 0;
__syncthreads();
for(int i=tid; i<rLen;i+=bsize){
int index = c_jdx[rBase+i];
float boolflag = atomicExch(&DROW[dBase+index], DROW[dBase+index] + c_val[rBase+i]);
if(boolflag<0.0001 && boolflag>-0.0001){
int ip_local = atomicAdd(&ip,1);
c_idx[rBase+ip_local] = index;//c_idx[rBase+i];
//atomicExch(&c_idx[rBase+ip_local],index);
}
}
__syncthreads();
for(int i=tid;i<ip;i+=bsize){
int v = c_idx[rBase + i];
c_val[rBase + i] = DROW[dBase+v];
atomicExch(&DROW[dBase+v], 0);
}
__syncthreads();
if(tid==0) c_ptr[ii] = ip;
}
}
int *thread_counter_d, *thread_counter_h;
int *thread_bin_d, *thread_bin_h;
int *idx_bin_d, *idx_bin_h;
int *participants_d;
void cudaPath_find_thread_count_approx(int n)
{
int block_size = 32;
int block_num = n/32+1;
float time;
hipMalloc((void**) &(thread_bin_d), sizeof(int) *10);
hipMalloc((void**) &(thread_counter_d), sizeof(int) *10);
hipMemset(thread_bin_d, 0, sizeof(int)*10);
hipMemset(thread_counter_d, 0, sizeof(int)*10);
hipEvent_t ev_count_thread_s, ev_count_thread_e;
hipEventCreate(&ev_count_thread_s);
hipEventCreate(&ev_count_thread_e);
hipEventRecord(ev_count_thread_s, 0);
hipLaunchKernelGGL(( find_thread_count_approx), dim3(block_num),dim3(block_size), 0, 0,
CSC_PTR_DEV,
CSR_PTR_DEV,
thread_bin_d,N_DEV);
thread_bin_h = (int*)malloc(sizeof(int)*10);
thread_counter_h = (int*)malloc(sizeof(int)*10);
hipMemcpy((void*)&thread_bin_h[1], (const void*)thread_bin_d, sizeof(int)*8,hipMemcpyDeviceToHost); ERROR_CHECK;
int total = 0;
thread_bin_h[0] = 0;
thread_bin_h[8] = 0;
for(int i=0;i<8;i++){
thread_bin_h[i+1] += thread_bin_h[i];
}
total = thread_bin_h[8];
hipMemcpy((void*)thread_bin_d, (const void*)thread_bin_h, sizeof(int)*8,hipMemcpyHostToDevice); ERROR_CHECK;
hipMalloc((void**) &(idx_bin_d), sizeof(int)*thread_bin_h[8]); ERROR_CHECK;
idx_bin_h = (int*)malloc(sizeof(int)*thread_bin_h[8]);
hipLaunchKernelGGL(( fill_thread_bin_approx), dim3(block_num), dim3(block_size), 0, 0,
CSC_PTR_DEV, CSR_PTR_DEV,
thread_bin_d, thread_counter_d,
idx_bin_d,N_DEV);
ERROR_CHECK;
hipDeviceSynchronize();
hipEventRecord(ev_count_thread_e, 0);
hipEventSynchronize(ev_count_thread_e);
hipEventElapsedTime(&t_bin,ev_count_thread_s, ev_count_thread_e);
hipMemcpy((void*)idx_bin_h, (const void*)idx_bin_d, sizeof(int)*thread_bin_h[8],hipMemcpyDeviceToHost); ERROR_CHECK;
hipMemcpy((void*)thread_counter_h, (const void*)thread_counter_d, sizeof(int)*10,hipMemcpyDeviceToHost); ERROR_CHECK;
int counter = 0;
/* for(int i=0;i<8;i++){
printf("%d ",thread_counter_h[i]);
int a = thread_bin_h[i+1] - thread_bin_h[i];
for(int j =0;j<a;j++){
printf("%d %d\n",counter++,idx_bin_h[j]);
}
}
printf("\n");*/
}
int* participants_row_d;
void cudaPass_(int n, int e){
int BLOCK_SIZE=32;
int BLOCK_NUM = n;
dim3 block(BLOCK_SIZE);
dim3 grid(BLOCK_NUM);
/* CUDAMALLOC & CUDAMEMCPY B(CSR) */
hipMalloc((void**) &(CSR_CIDX_DEV), sizeof(int) *e);
hipMalloc((void**) &(CSR_VAL_DEV), sizeof(float)*e);
hipMalloc((void**) &(CSR_PTR_DEV), sizeof(int) *(2*n+1));
hipMemcpy((void*) (CSR_CIDX_DEV), (const void*)(CSR_HOST.cidx), sizeof(int) *e, hipMemcpyHostToDevice);
hipMemcpy((void*) (CSR_VAL_DEV), (const void*)(CSR_HOST.val), sizeof(float) *e, hipMemcpyHostToDevice);
hipMemcpy((void*) (CSR_PTR_DEV), (const void*)(CSR_HOST.header), sizeof(int) *(n+1), hipMemcpyHostToDevice);
/* CUDAMALLOC & CUDAMEMCPY A(CSC) */
hipMalloc((void**) &(CSC_RIDX_DEV), sizeof(int) *e);
hipMalloc((void**) &(CSC_VAL_DEV), sizeof(float)*e);
hipMalloc((void**) &(CSC_PTR_DEV), sizeof(int) *(2*n+1));
hipMemcpy((void*) (CSC_RIDX_DEV), (const void*)(CSC_HOST.ridx), sizeof(int) *e, hipMemcpyHostToDevice);
hipMemcpy((void*) (CSC_VAL_DEV), (const void*)(CSC_HOST.val), sizeof(float) *e, hipMemcpyHostToDevice);
hipMemcpy((void*) (CSC_PTR_DEV), (const void*)(CSC_HOST.header), sizeof(int) *(n+1), hipMemcpyHostToDevice);
hipMalloc((void**) &(RUPP_DEV), sizeof(int)*(n+1));
hipMalloc((void**) &(WLS_DEV), sizeof(int)*(n+1));
hipMemset(RUPP_DEV, 0, sizeof(int)*(n+1));
hipMalloc((void**) &(N_DEV),sizeof(int));
hipMemcpy((void*)N_DEV, (const void*)&n ,sizeof(int),hipMemcpyHostToDevice);
hipMalloc((void**) &(participants_d), sizeof(int));
hipMalloc((void**) &(participants_row_d), sizeof(int));//row
hipMemset(RUPP_DEV, 0, sizeof(int));
hipEvent_t ev_pre_s, ev_pre_e;
hipEventCreate(&ev_pre_s);
hipEventCreate(&ev_pre_e);
hipEventRecord(ev_pre_s, 0);
hipLaunchKernelGGL(( calcUpp), dim3(grid), dim3(block), 0, 0,
CSR_CIDX_DEV, CSR_PTR_DEV,
CSC_RIDX_DEV, CSC_PTR_DEV,
RUPP_DEV,
WLS_DEV,
N_DEV,
participants_d, participants_row_d);
//ERROR_CHECK;
hipDeviceSynchronize();
hipEventRecord(ev_pre_e, 0);
hipEventSynchronize(ev_pre_e);
hipEventElapsedTime(&t_pre,ev_pre_s, ev_pre_e);
RUPP_HOST = (int*)malloc(sizeof(int)*(n+1));
hipHostMalloc((void**)&(WLS_HOST),sizeof(int)*(n+1));
hipMemcpy((void*)&RUPP_HOST[1], (const void*)RUPP_DEV, sizeof(int)*n, hipMemcpyDeviceToHost);
}
int* number_of_dominators_in_c;
int* number_of_dominators_in_c_h;
int* dominator_c;
int* dominator_c_h;
int* else_c;
int *number_of_else_in_c;
char* c_bool;
void cudaPass_F(int n){
int* domRC_dev;
int* domCC_dev;
int* domN_dev;
int* domR_dev;
hipMemcpy((void*)RUPP_DEV,(const void*)RUPP_HOST, sizeof(int)*(n+1), hipMemcpyHostToDevice); ERROR_CHECK;
hipMalloc((void**)&(domR_dev), sizeof(int)*100000);ERROR_CHECK;
hipMalloc((void**)&(domRC_dev), sizeof(int));ERROR_CHECK;
hipMalloc((void**)&(domCC_dev), sizeof(int));ERROR_CHECK;
hipMalloc((void**)&(dominator_c),sizeof(int)*n);
hipMalloc((void**)&(else_c),sizeof(int)*n);
hipMalloc((void**)&(domN_dev), sizeof(int)) ;ERROR_CHECK;
hipMalloc((void**)&(number_of_dominators_in_c), sizeof(int)); ERROR_CHECK;
hipMalloc((void**)&(number_of_else_in_c), sizeof(int)); ERROR_CHECK;
hipMalloc((void**)&(c_bool), sizeof(char)*n); ERROR_CHECK;
hipMemset(domRC_dev, 0, sizeof(int));
hipMemset(domCC_dev, 0, sizeof(int));
hipMemset(domN_dev, 0, sizeof(int));
hipMemset(number_of_dominators_in_c, 0, sizeof(int));
hipMemset(number_of_else_in_c, 0, sizeof(int));
hipMemset(c_bool, 0, sizeof(char)*n);
//printf("%d\n",n);
int grid = n/256+1;
int block = 256;
hipLaunchKernelGGL(( findDom), dim3(grid), dim3(block), 0, 0,
CSR_PTR_DEV, CSC_PTR_DEV,
RUPP_DEV,
WLS_DEV,
domCC_dev, domRC_dev,
domR_dev, domN_dev,
dominator_c,
number_of_dominators_in_c,
else_c,
number_of_else_in_c,
c_bool,
N_DEV,
participants_d,participants_row_d);
ERROR_CHECK;
hipDeviceSynchronize();
hipEvent_t ev_fd_s, ev_fd_e;
hipEventCreate(&ev_fd_s);
hipEventCreate(&ev_fd_e);
hipEventRecord(ev_fd_s, 0);
hipMemcpy((void*)domR, (const void*)domR_dev, sizeof(int)*100000,hipMemcpyDeviceToHost); ERROR_CHECK;
hipMemcpy((void*)domRC, (const void*)domRC_dev, sizeof(int),hipMemcpyDeviceToHost); ERROR_CHECK;
hipMemcpy((void*)domCC, (const void*)domCC_dev, sizeof(int),hipMemcpyDeviceToHost); ERROR_CHECK;
hipMemcpy((void*)domN, (const void*)domN_dev, sizeof(int),hipMemcpyDeviceToHost); ERROR_CHECK;
dominator_c_h = (int*)malloc(sizeof(int)*n);
number_of_dominators_in_c_h = (int*)malloc(sizeof(int));
hipMemcpy((void*)dominator_c_h, (const void*)dominator_c, sizeof(int)*n, hipMemcpyDeviceToHost); ERROR_CHECK;
hipMemcpy((void*)number_of_dominators_in_c_h, number_of_dominators_in_c, sizeof(int),hipMemcpyDeviceToHost);
if(number_of_dominators_in_c_h[0]!=0) printf("!\n");
hipEventRecord(ev_fd_e, 0);
hipEventSynchronize(ev_fd_e);
hipEventElapsedTime(&t_fd,ev_fd_s, ev_fd_e);
}
void cudaPassB(int n){
int BLOCK_SIZE = 256;
dim3 block(BLOCK_SIZE);
dim3 grid(thread_counter_h[0]);
hipMalloc((void**) &(c_jdx_d), sizeof(int)*RUPP_HOST[n]);
hipMalloc((void**) &(c_idx_d), sizeof(int)*RUPP_HOST[n]);
hipMalloc((void**) &(c_val_d), sizeof(float)*RUPP_HOST[n]);
hipMalloc((void**) &(c_ptr_d), sizeof(int)*(n+1));
hipMalloc((void**) &(RBOX_DEV), sizeof(int)*n);
hipMemset(RBOX_DEV, 0, sizeof(int)*n);
hipEvent_t ev_spgemm_s, ev_spgemm_e;
hipEventCreate(&ev_spgemm_s);
hipEventCreate(&ev_spgemm_e);
hipEventRecord(ev_spgemm_s, 0);
/*
calcInterBase<<<grid,block>>>(
&idx_bin_d[0],
CSR_CIDX_DEV, CSR_PTR_DEV, CSR_VAL_DEV, // MATRIX B(CSR)
CSC_RIDX_DEV, CSC_PTR_DEV, CSC_VAL_DEV, // MATRIX A(CSC)
c_jdx_d, c_ptr_d, c_val_d, // MATRIX C(CSR)
RUPP_DEV, // UPPER BOUND
WLS_DEV, // WLS_DEV
RBOX_DEV);
hipDeviceSynchronize();
hipDeviceSynchronize();
hipEventRecord(ev_spgemm_e, 0);
hipEventSynchronize(ev_spgemm_e);
hipEventElapsedTime(&t_spgemm_l,ev_spgemm_s, ev_spgemm_e);
return;
*/
int off;
off = thread_counter_h[0];
//printf("%d\n",thread_counter_h[0]);
if(off){
dim3 block_num0(off/M16+1); // mergefactor : 8
//dim3 block_num0(off); // mergefactor : 8
hipLaunchKernelGGL(( calcInterMerge16), dim3(block_num0),dim3(block), 0, 0, //block>>>(
//calcInterNoMerge<<<block_num0,block>>>(
&idx_bin_d[0],
CSR_CIDX_DEV, CSR_PTR_DEV, CSR_VAL_DEV, // MATRIX B(CSR)
CSC_RIDX_DEV, CSC_PTR_DEV, CSC_VAL_DEV, // MATRIX A(CSC)
c_jdx_d, c_ptr_d, c_val_d, // MATRIX C(CSR)
RUPP_DEV, // UPPER BOUND
WLS_DEV, // WLS_DEV
RBOX_DEV);
hipDeviceSynchronize();
ERROR_CHECK;
}
off = thread_counter_h[1];
//printf("%d\n",off);
if(off){
//printf("2\n");
dim3 block_num1(off/M8+1); // merge factor : 4
//dim3 block_num1(off); // merge factor : 4
hipLaunchKernelGGL(( calcInterMerge8), dim3(block_num1),dim3(block), 0, 0,
//calcInterNoMerge<<<block_num1,block>>>(
&idx_bin_d[thread_bin_h[1]],
CSR_CIDX_DEV, CSR_PTR_DEV, CSR_VAL_DEV, // MATRIX B(CSR)
CSC_RIDX_DEV, CSC_PTR_DEV, CSC_VAL_DEV, // MATRIX A(CSC)
c_jdx_d, c_ptr_d, c_val_d, // MATRIX C(CSR)
RUPP_DEV, // UPPER BOUND
WLS_DEV, // WLS_DEV
RBOX_DEV);
hipDeviceSynchronize();
ERROR_CHECK;
}
off = thread_counter_h[2];
//off = thread_bin_h[3] - thread_bin_h[2];
//printf("%d\n",off);
if(off){
//printf("3\n");
dim3 block_num2(off/M4+1);
hipLaunchKernelGGL(( calcInterMerge4), dim3(block_num2),dim3(block), 0, 0,
&idx_bin_d[thread_bin_h[2]],
CSR_CIDX_DEV, CSR_PTR_DEV, CSR_VAL_DEV, // MATRIX B(CSR)
CSC_RIDX_DEV, CSC_PTR_DEV, CSC_VAL_DEV, // MATRIX A(CSC)
c_jdx_d, c_ptr_d, c_val_d, // MATRIX C(CSR)
RUPP_DEV, // UPPER BOUND
WLS_DEV, // WLS_DEV
RBOX_DEV);
hipDeviceSynchronize();
ERROR_CHECK;
}
//off = thread_bin_h[4] - thread_bin_h[3]; i
off = thread_counter_h[3];
// printf("%d\n",off);
if(off){
//printf("4\n");
dim3 block_num3(off/M2+1);
hipLaunchKernelGGL(( calcInterMerge2), dim3(block_num3),dim3(block), 0, 0,
&idx_bin_d[thread_bin_h[3]],
CSR_CIDX_DEV, CSR_PTR_DEV, CSR_VAL_DEV, // MATRIX B(CSR)
CSC_RIDX_DEV, CSC_PTR_DEV, CSC_VAL_DEV, // MATRIX A(CSC)
c_jdx_d, c_ptr_d, c_val_d, // MATRIX C(CSR)
RUPP_DEV, // UPPER BOUND
WLS_DEV, // WLS_DEV
RBOX_DEV);
hipDeviceSynchronize();
ERROR_CHECK;
}
//off = thread_bin_h[5] - thread_bin_h[4];
off = thread_counter_h[4];
//printf("%d\n",off);
if(off){
//printf("5\n");
dim3 block_num4(off/M1+1);
hipLaunchKernelGGL(( calcInterMerge1), dim3(block_num4),dim3(block), 0, 0,
&idx_bin_d[thread_bin_h[4]],
CSR_CIDX_DEV, CSR_PTR_DEV, CSR_VAL_DEV, // MATRIX B(CSR)
CSC_RIDX_DEV, CSC_PTR_DEV, CSC_VAL_DEV, // MATRIX A(CSC)
c_jdx_d, c_ptr_d, c_val_d, // MATRIX C(CSR)
RUPP_DEV, // UPPER BOUND
WLS_DEV, // WLS_DEV
RBOX_DEV);
hipDeviceSynchronize();
ERROR_CHECK;
}
//off = thread_bin_h[6] - thread_bin_h[5];
off = thread_counter_h[5];
//printf("%d\n",off);
if(off){
//printf("6\n");
dim3 block_size64(64);
dim3 block_num5(off);
hipLaunchKernelGGL(( calcInterNoMerge), dim3(block_num5),dim3(block_size64), 0, 0,
&idx_bin_d[thread_bin_h[5]],
CSR_CIDX_DEV, CSR_PTR_DEV, CSR_VAL_DEV, // MATRIX B(CSR)
CSC_RIDX_DEV, CSC_PTR_DEV, CSC_VAL_DEV, // MATRIX A(CSC)
c_jdx_d, c_ptr_d, c_val_d, // MATRIX C(CSR)
RUPP_DEV, // UPPER BOUND
WLS_DEV, // WLS_DEV
RBOX_DEV);
hipDeviceSynchronize();
ERROR_CHECK;
}
//off = thread_bin_h[7] - thread_bin_h[6];
off = thread_counter_h[6];
//printf("%d\n",off);
if(off){
dim3 block_size128(128);
dim3 block_num6(off);
hipLaunchKernelGGL(( calcInterNoMerge), dim3(block_num6),dim3(block_size128), 0, 0,
&idx_bin_d[thread_bin_h[6]],
CSR_CIDX_DEV, CSR_PTR_DEV, CSR_VAL_DEV, // MATRIX B(CSR)
CSC_RIDX_DEV, CSC_PTR_DEV, CSC_VAL_DEV, // MATRIX A(CSC)
c_jdx_d, c_ptr_d, c_val_d, // MATRIX C(CSR)
RUPP_DEV, // UPPER BOUND
WLS_DEV, // WLS_DEV
RBOX_DEV);
hipDeviceSynchronize();
ERROR_CHECK;
}
//off = thread_bin_h[8] - thread_bin_h[7];
off = thread_counter_h[7];
//printf("%d\n",off);
if(off){
dim3 block_size256(128);
dim3 block_num7(off);
hipLaunchKernelGGL(( calcInterNoMerge), dim3(block_num7),dim3(block_size256), 0, 0,
&idx_bin_d[thread_bin_h[7]],
CSR_CIDX_DEV, CSR_PTR_DEV, CSR_VAL_DEV, // MATRIX B(CSR)
CSC_RIDX_DEV, CSC_PTR_DEV, CSC_VAL_DEV, // MATRIX A(CSC)
c_jdx_d, c_ptr_d, c_val_d, // MATRIX C(CSR)
RUPP_DEV, // UPPER BOUND
WLS_DEV, // WLS_DEV
RBOX_DEV);
hipDeviceSynchronize();
ERROR_CHECK;
}
for(int i=0;i<8;i++){
printf("%d ",thread_counter_h[i]);
}
printf("\n");
/* float total_eff = 0;
for(int i=0;i<8;i++){
int stripe;
int base;
int box;
if(i==0){ stripe = 32; box=64;}
else if(i==1) {stripe=16; box = 64;}
else if(i==2) {stripe=8; box = 64;}
else if(i==3) {stripe=4; box = 64;}
else if(i==4) {stripe=2; box = 64;}
else {stripe = 1; box = 128;}
if(i==0) base = 0;
else thread_bin_h[i-1];
float eff = 0;
for(int j=0;j<thread_counter_h[i];j+=stripe){
float temp = 0;
int max = 1;
for(int k=0;k<stripe;k++){
if(thread_counter_h[i] <= j+k) break;
int index = idx_bin_h[base+j+k];
int len = CSR_HOST.header[index+1] - CSR_HOST.header[index];
if(max<len) max = len;
temp +=len*(CSC_HOST.header[index+1] - CSC_HOST.header[index]);
}
temp/=max;
temp/=box;
eff += temp;
}
printf("%f %d %d\n",eff, thread_counter_h[i], thread_bin_h[8]);
total_eff += eff/((float)thread_bin_h[8]);
}
printf("%f \n",total_eff);
*/
hipDeviceSynchronize();
hipEventRecord(ev_spgemm_e, 0);
hipEventSynchronize(ev_spgemm_e);
hipEventElapsedTime(&t_spgemm_l,ev_spgemm_s, ev_spgemm_e);
}
void cudaPassBB(int n,int t){
int BLOCK_SIZE = 384;
dim3 block(BLOCK_SIZE);
dim3 grid(t);
hipEvent_t ev_spgemm_s, ev_spgemm_e;
hipEventCreate(&ev_spgemm_s);
hipEventCreate(&ev_spgemm_e);
hipEventRecord(ev_spgemm_s, 0);
hipMalloc((void**)&(P_DEV), sizeof(PP)*t); ERROR_CHECK;
hipMemcpy((void*)P_DEV, (const void*)P_HOST, sizeof(PP)*(t), hipMemcpyHostToDevice); ERROR_CHECK;
hipMemcpy((void*)CSR_CIDX_DEV, (const void*) DCSR_HOST.cidx, sizeof(int)*DCSR_HOST.e, hipMemcpyHostToDevice); ERROR_CHECK;
hipMemcpy((void*)CSR_VAL_DEV, (const void*) DCSR_HOST.val, sizeof(float)*DCSR_HOST.e, hipMemcpyHostToDevice); ERROR_CHECK;
hipMemcpy((void*)CSR_PTR_DEV, (const void*) DCSR_HOST.header, sizeof(int)*(DCSR_HOST.n+1), hipMemcpyHostToDevice); ERROR_CHECK;
hipMemcpy((void*)CSC_RIDX_DEV, (const void*) DCSC_HOST.ridx, sizeof(int)*DCSC_HOST.e, hipMemcpyHostToDevice); ERROR_CHECK;
hipMemcpy((void*)CSC_VAL_DEV, (const void*) DCSC_HOST.val, sizeof(float)*DCSC_HOST.e, hipMemcpyHostToDevice); ERROR_CHECK;
hipMemcpy((void*)CSC_PTR_DEV, (const void*) DCSC_HOST.header, sizeof(int)*(DCSC_HOST.n+1), hipMemcpyHostToDevice); ERROR_CHECK;
hipLaunchKernelGGL(( calcInterDom), dim3(grid), dim3(block), 0, 0,
CSR_CIDX_DEV, CSR_PTR_DEV, CSR_VAL_DEV, // MATRIX B(CSR)
CSC_RIDX_DEV, CSC_PTR_DEV, CSC_VAL_DEV, // MATRIX A(CSC)
c_jdx_d, c_ptr_d, c_val_d,
//COO_DEV, // INTERMEDIATE C(COO)
RUPP_DEV, // UPPER BOUND
P_DEV, // WLS_DEV
RBOX_DEV); // RBOX
ERROR_CHECK;
hipEventRecord(ev_spgemm_e, 0);
hipEventSynchronize(ev_spgemm_e);
hipEventElapsedTime(&t_spgemm_d,ev_spgemm_s, ev_spgemm_e);
}
void cudaPassC(int n){
int BLOCK_SIZE = MT;
int GRID_SIZE = GRID;
float* DROW_DEV;
hipMalloc((void**) &(DROW_DEV), sizeof(float)*n*GRID_SIZE); ERROR_CHECK;
hipMemset(DROW_DEV, 0, sizeof(float)*n*GRID_SIZE); ERROR_CHECK;
// hipHostMalloc((void**)&(c_val_h),sizeof(float)*(RUPP_HOST[n]));
// hipHostMalloc((void**)&(c_idx_h),sizeof(int)*(RUPP_HOST[n]));
c_val_h=(float*)malloc(sizeof(float)*RUPP_HOST[n]);
c_idx_h=(int*)malloc(sizeof(int)*RUPP_HOST[n]);
nnzC = 0;
dim3 block(BLOCK_SIZE);
dim3 grid(GRID);
hipEvent_t ev_merge_s, ev_merge_e;
hipEventCreate(&ev_merge_s);
hipEventCreate(&ev_merge_e);
hipEventRecord(ev_merge_s, 0);
hipLaunchKernelGGL(( merge), dim3(grid), dim3(block), 0, 0,
RUPP_DEV, // UPPER BOUND
RBOX_DEV, // RBOX
DROW_DEV, // DENSE
c_jdx_d,c_val_d,c_ptr_d,
c_idx_d,
else_c,
number_of_else_in_c,
N_DEV);
hipDeviceSynchronize();
hipLaunchKernelGGL(( merge_limitting), dim3(grid), dim3(block), 0, 0,
RUPP_DEV, // UPPER BOUND
RBOX_DEV, // RBOX
DROW_DEV, // DENSE
c_jdx_d,c_val_d,c_ptr_d,
c_idx_d,
dominator_c,
number_of_dominators_in_c,
N_DEV);
hipDeviceSynchronize();
hipEventRecord(ev_merge_e, 0);
hipEventSynchronize(ev_merge_e);
hipEventElapsedTime(&t_merge,ev_merge_s, ev_merge_e);
printf("%d\n",number_of_dominators_in_c_h[0]);
c_ptr_h = (int*)malloc(sizeof(int)*(n+1));
c_ptr_h[0] = 0;
hipMemcpy((void*)&c_ptr_h[1],(const void*) &c_ptr_d[0], sizeof(int)*n , hipMemcpyDeviceToHost);
// hipMemcpyAsync((void*)&c_val_h[0],(const void*) &c_val_d[0], sizeof(float)*RUPP_HOST[n] , hipMemcpyDeviceToHost);
// hipMemcpyAsync((void*)&c_idx_h[0],(const void*) &c_idx_d[0], sizeof(int)*RUPP_HOST[n], hipMemcpyDeviceToHost);
int sHost = 0;
for(int i=0;i<n;i++){
int sDev = RUPP_HOST[i];
int l = c_ptr_h[i+1];
//hipDeviceSynchronize();
nnzC += l;
//hipMemcpy((void*)&MVAL_HOST[sHost],(const void*) &MVAL_DEV[sDev], sizeof(float)*l , hipMemcpyDeviceToHost);
//hipDeviceSynchronize();
//hipMemcpyAsync((void*)&MIDX_HOST[sHost],(const void*) &MIDX_DEV[sDev], sizeof(int)*l, hipMemcpyDeviceToHost);
hipMemcpyAsync((void*)&c_val_h[sHost],(const void*) &c_val_d[sDev], sizeof(float)*l , hipMemcpyDeviceToHost);
hipMemcpyAsync((void*)&c_idx_h[sHost],(const void*) &c_idx_d[sDev], sizeof(int)*l, hipMemcpyDeviceToHost);
sHost +=l;
//MPTR_HOST[i+1] += MPTR_HOST[i];
c_ptr_h[i+1] += c_ptr_h[i];
}
/*
for(int j=0;j < RUPP_HOST[n] ;j++){
if(c_val_h[j]>0.000001 ){//|| MVAL_HOST[j]<-0.00001){
nnzC++;
}
}
*/
}
| 19f98abe8c735d943cbc04c315ca236c64540c18.cu | #include "../lib/coo.h"
#include "../lib/csc.h"
#include "../lib/csr.h"
#include "../lib/wlt.h"
#include "../lib/thread.h"
#include "../lib/spGEMM.h"
#include <string.h>
#include <stdlib.h>
#include <stdio.h>
#include <unistd.h>
#define SHARED
#define ERROR_CHECK \
{\
cudaError err = cudaGetLastError(); \
if ( cudaSuccess != err ) \
{\
printf("[%s:%d]CUDA ERROR : %s\n", __FILE__, __LINE__, cudaGetErrorString(err) ); \
sleep(5); \
exit(-1); \
}\
}
//#define MERGE
#define M16 1//32
#define M8 1//16
#define M4 1//8
#define M2 1//4
#define M1 1//2
/*
#define M16 32
#define M8 16
#define M4 8
#define M2 4
#define M1 2
*/
#define MT 64
#define GRID 1024
#define ALPHA 50
#define BETA 10
__device__ uint get_smid(void){
uint ret;
asm("mov.u32 %0, %smid;" : "=r"(ret));
return ret;
}
__global__ void calcUpp(
int* csrCidx,int *csrPtr,
int* cscRidx,int *cscPtr,
int *rUpp,
int *wlt,
int* N, int * part_out, int* part_row){
int tid = threadIdx.x;
int bid = blockIdx.x; //bid = rowIdx
int bsize = blockDim.x;
if(bid<N[0]){
/* CALCULATE UPPER BOUND */
int base = csrPtr[bid];
int rLen = csrPtr[bid+1]-base;
int cLen = cscPtr[bid+1]-cscPtr[bid];
for(int i = tid ; i < rLen; i+=bsize){
int target = csrCidx[base+i];
int len = csrPtr[target+1] - csrPtr[target];
atomicAdd(&rUpp[bid], csrPtr[target+1]-csrPtr[target]);
}
/* BUILD WORKLOADTABLE */
wlt[bid] = rLen*cLen;
if(rLen*cLen && tid==0) atomicAdd(&part_out[0],1);
if(rUpp[bid] && tid==0) atomicAdd(&part_row[0],1);
}
}
__global__ void findDom(int* csrPtr, int* cscPtr,
int *rUpp,
int* wlt,
int *domCC, int *domRC,
int *domR, int *domN,
int *dominator_c,
int *number_of_dominators_in_c,
int *else_c,
int *number_of_else_in_c,
char *c_bool,
int *N,
int* part_out,
int* part_row){
int tid = threadIdx.x;
int bid = blockIdx.x;
int bsize = blockDim.x;
int index = bsize*bid + tid;
if(index < N[0]){
if(1){//(double)wlt[index]>(rUpp[N[0]]/part_out[0] * ALPHA)){
int cntr = atomicAdd(&domN[0],1);
int cc = cscPtr[index+1] - cscPtr[index];
int rc = csrPtr[index+1] - csrPtr[index];
domR[cntr] = index;
wlt[index]=0;
atomicAdd(&domCC[0],cc);
atomicAdd(&domRC[0],rc);
}
if(rUpp[index+1] - rUpp[index] > rUpp[*N]/part_row[0]*5){
int temp = atomicAdd(&number_of_dominators_in_c[0], 1);
dominator_c[temp] = index;
// c_bool[index] = 1;
}
else{
int temp = atomicAdd(&number_of_else_in_c[0],1);
else_c[temp] = index;
}
}
}
__global__ void find_thread_count_approx(
int* a_ptr, int* b_ptr,int* thread_bin, int *N)
{
int global_tid = threadIdx.x + blockIdx.x*blockDim.x;
if(global_tid<N[0]){
int len = b_ptr[global_tid+1] - b_ptr[global_tid];
if(0<len && len <=2) atomicAdd(&thread_bin[0],1);
else if(2<len && len <=4) atomicAdd(&thread_bin[1],1);
else if(4<len && len <=8) atomicAdd(&thread_bin[2],1);
else if(8<len && len <=16) atomicAdd(&thread_bin[3],1);
else if(16<len && len <=32) atomicAdd(&thread_bin[4],1);
else if(32<len && len <=64) atomicAdd(&thread_bin[5],1);
else if(64<len && len <= 128) atomicAdd(&thread_bin[6],1);
else if(128<len) atomicAdd(&thread_bin[7],1);
}
}
__global__ void fill_thread_bin_approx(
int* a_ptr, int* b_ptr,
int* thread_bin, int* thread_counter,
int* index_bin, int* N)
{
int global_tid = threadIdx.x + blockIdx.x*blockDim.x;
if(global_tid < N[0]){
int len = b_ptr[global_tid+1] - b_ptr[global_tid];
if(0<len && len <=2){
int a = atomicAdd(&thread_counter[0], 1);
index_bin[thread_bin[0] + a] = global_tid;
}
else if(2<len && len <=4){
int a = atomicAdd(&thread_counter[1], 1);
index_bin[thread_bin[1] + a] = global_tid;
}
else if(4<len && len <=8){
int a = atomicAdd(&thread_counter[2], 1);
index_bin[thread_bin[2] + a] = global_tid;
}
else if(8<len && len <=16){
int a = atomicAdd(&thread_counter[3], 1);
index_bin[thread_bin[3] + a]=global_tid;
}
else if(16<len && len <=32){
int a = atomicAdd(&thread_counter[4], 1);
index_bin[thread_bin[4] + a] = global_tid;
}
else if(32<len && len <=64){
int a = atomicAdd(&thread_counter[5], 1);
index_bin[thread_bin[5] + a] = global_tid;
}
else if(64<len && len <=128){
int a = atomicAdd(&thread_counter[6], 1);
index_bin[thread_bin[6] + a] =global_tid;
}
else if(128< len ){
int a = atomicAdd(&thread_counter[7], 1);
index_bin[thread_bin[7] + a] =global_tid;
}
}
}
__global__ void calcInterBase(
int* idx_bin,
int* csrCidx, int* csrPtr, float* csrVal, // MATRIX B(CSR)
int* cscRidx, int* cscPtr, float* cscVal, // MATRIX A(CSC)
int* c_idx, int* c_ptr, float* c_val,
int *RUPP,
int *WLS,
int *RBOX)
{
int tid = threadIdx.x;
int bid = blockIdx.x;
int bsize = blockDim.x;
int idx = idx_bin[bid];
if(WLS[idx]==0){asm("exit;");}
int csrBase = csrPtr[idx];
int cscBase = cscPtr[idx];
int rLen = csrPtr[idx+1] - csrBase;
int cLen = cscPtr[idx+1] - cscBase;
__shared__ int resBase;
__syncthreads();
for( int ci = 0 ; ci < cLen; ci ++)
{
int rIdx = cscRidx[ cscBase + ci ];
int rowBase = RUPP[rIdx];
if(tid==0)
resBase = atomicAdd(&RBOX[rIdx], rLen);
__syncthreads();
for(int ri= tid ;ri < rLen ; ri+=bsize)
{
int cIdx = csrCidx[ csrBase + ri ];
c_idx[rowBase+resBase+ri] = cIdx;
c_val[rowBase+resBase+ri] = cscVal[ cscBase + ci ]*csrVal[ csrBase + ri ];
}
}
}
__global__ void calcInterNoMerge(
int* idx_bin,
int* csrCidx, int* csrPtr, float* csrVal, // MATRIX B(CSR)
int* cscRidx, int* cscPtr, float* cscVal, // MATRIX A(CSC)
int* c_idx, int* c_ptr, float* c_val,
int *RUPP,
int *WLS,
int *RBOX)
{
int tid = threadIdx.x;
int bid = blockIdx.x;
int bsize = blockDim.x;
int newTid=tid;
int newBid=0;
int idx = idx_bin[bid];
if(WLS[idx]==0){asm("exit;");}
else{
int csrBase = csrPtr[idx];
int cscBase = cscPtr[idx];
int rLen = csrPtr[idx+1] - csrBase;
int cLen = cscPtr[idx+1] - cscBase;
__shared__ int resBase[16];
__syncthreads();
for( int ci = 0 ; ci < cLen; ci ++)
{
int rIdx = cscRidx[ cscBase + ci ];
int rowBase = RUPP[rIdx];
if(newTid==0)
resBase[newBid] = atomicAdd(&RBOX[rIdx], rLen);
__syncthreads();
for(int ri= newTid ;ri < rLen ; ri+=bsize)
{
int cIdx = csrCidx[ csrBase + ri ];
c_idx[rowBase+resBase[newBid]+ri] = cIdx;
c_val[rowBase+resBase[newBid]+ri] = cscVal[ cscBase + ci ]*csrVal[ csrBase + ri ];
}
}
WLS[idx] = 0; // fuck
}
}
__global__ void calcInterMerge1(
int* idx_bin,
int* csrCidx, int* csrPtr, float* csrVal, // MATRIX B(CSR)
int* cscRidx, int* cscPtr, float* cscVal, // MATRIX A(CSC)
int* c_idx, int* c_ptr, float* c_val,
int *RUPP,
int *WLS,
int *RBOX)
{
int tid = threadIdx.x;
int bid = blockIdx.x;
int bsize = blockDim.x;
int newTid=tid;
int newBid=0;
bid *=M1;
newBid = tid / (bsize/M1);
newTid = tid - (bsize/M1)*newBid;
int idx = idx_bin[bid+newBid];
if(WLS[idx]==0){asm("exit;");}
else{
int csrBase = csrPtr[idx];
int cscBase = cscPtr[idx];
int rLen = csrPtr[idx+1] - csrBase;
int cLen = cscPtr[idx+1] - cscBase;
__shared__ int resBase[32];
__syncthreads();
for( int ci = 0 ; ci < cLen; ci ++)
{
int rIdx = cscRidx[ cscBase + ci ];
int rowBase = RUPP[rIdx];
if(newTid==0)
resBase[newBid] = atomicAdd(&RBOX[rIdx], rLen);
__syncthreads();
for(int ri= newTid ;ri < rLen ; ri+=bsize/M1)
{
int cIdx = csrCidx[ csrBase + ri ];
c_idx[rowBase+resBase[newBid]+ri] = cIdx;
c_val[rowBase+resBase[newBid]+ri] = cscVal[ cscBase + ci ]*csrVal[ csrBase + ri ];
}
}
WLS[idx] = 0; // fuck
}
}
__global__ void calcInterMerge2(
int* idx_bin,
int* csrCidx, int* csrPtr, float* csrVal, // MATRIX B(CSR)
int* cscRidx, int* cscPtr, float* cscVal, // MATRIX A(CSC)
int* c_idx, int* c_ptr, float* c_val,
int *RUPP,
int *WLS,
int *RBOX)
{
int tid = threadIdx.x;
int bid = blockIdx.x;
int bsize = blockDim.x;
int newTid=tid;
int newBid=0;
bid *=M2;
newBid = tid / (bsize/M2);
newTid = tid - (bsize/M2)*newBid;
int idx = idx_bin[bid+newBid];
if(WLS[idx]==0){asm("exit;");}
else{
int csrBase = csrPtr[idx];
int cscBase = cscPtr[idx];
int rLen = csrPtr[idx+1] - csrBase;
int cLen = cscPtr[idx+1] - cscBase;
__shared__ int resBase[32];
__syncthreads();
for( int ci = 0 ; ci < cLen; ci ++)
{
int rIdx = cscRidx[ cscBase + ci ];
int rowBase = RUPP[rIdx];
if(newTid==0)
resBase[newBid] = atomicAdd(&RBOX[rIdx], rLen);
__syncthreads();
for(int ri= newTid ;ri < rLen ; ri+=bsize/M2)
{
int cIdx = csrCidx[ csrBase + ri ];
c_idx[rowBase+resBase[newBid]+ri] = cIdx;
c_val[rowBase+resBase[newBid]+ri] = cscVal[ cscBase + ci ]*csrVal[ csrBase + ri ];
}
}
WLS[idx] = 0; // fuck
}
}
__global__ void calcInterMerge4(
int* idx_bin,
int* csrCidx, int* csrPtr, float* csrVal, // MATRIX B(CSR)
int* cscRidx, int* cscPtr, float* cscVal, // MATRIX A(CSC)
int* c_idx, int* c_ptr, float* c_val,
int *RUPP,
int *WLS,
int *RBOX)
{
int tid = threadIdx.x;
int bid = blockIdx.x;
int bsize = blockDim.x;
int newTid = tid;
int newBid = 0;
bid*= M4;
newBid = tid / (bsize/M4);
newTid = tid - (bsize/M4)*newBid;
int idx = idx_bin[bid + newBid];
if(WLS[idx]==0){asm("exit;");}
else{
int csrBase = csrPtr[idx];
int cscBase = cscPtr[idx];
int rLen = csrPtr[idx+1] - csrBase;
int cLen = cscPtr[idx+1] - cscBase;
__shared__ int resBase[32];
__syncthreads();
for( int ci = 0 ; ci < cLen; ci ++)
{
int rIdx = cscRidx[ cscBase + ci ];
int rowBase = RUPP[rIdx];
if(newTid==0)
resBase[newBid] = atomicAdd(&RBOX[rIdx], rLen);
__syncthreads();
for(int ri= newTid ;ri < rLen ; ri+=bsize/M4)
{
int cIdx = csrCidx[ csrBase + ri ];
c_idx[rowBase+resBase[newBid]+ri] = cIdx;
c_val[rowBase+resBase[newBid]+ri] = cscVal[ cscBase + ci ]*csrVal[ csrBase + ri ];
}
}
WLS[idx] = 0; // fuck
}
}
__global__ void calcInterMerge8(
int* idx_bin,
int* csrCidx, int* csrPtr, float* csrVal, // MATRIX B(CSR)
int* cscRidx, int* cscPtr, float* cscVal, // MATRIX A(CSC)
int* c_idx, int* c_ptr, float* c_val,
int *RUPP,
int *WLS,
int *RBOX)
{
int tid = threadIdx.x;
int bid = blockIdx.x;
int bsize = blockDim.x;
int newBid = 0;
int newTid = tid;
bid*= M8;
newBid = tid / (bsize/M8);
newTid = tid - (bsize/M8)*newBid;
int idx = idx_bin[bid + newBid];
if(WLS[idx]==0){asm("exit;");}
else{
int csrBase = csrPtr[idx];
int cscBase = cscPtr[idx];
int rLen = csrPtr[idx+1] - csrBase;
int cLen = cscPtr[idx+1] - cscBase;
__shared__ int resBase[32];
__syncthreads();
for( int ci = 0 ; ci < cLen; ci ++)
{
int rIdx = cscRidx[ cscBase + ci ];
int rowBase = RUPP[rIdx];
if(newTid==0)
resBase[newBid] = atomicAdd(&RBOX[rIdx], rLen);
__syncthreads();
for(int ri= newTid ;ri < rLen ; ri+=bsize/M8)
{
int cIdx = csrCidx[ csrBase + ri ];
c_idx[rowBase+resBase[newBid]+ri] = cIdx;
c_val[rowBase+resBase[newBid]+ri] = cscVal[ cscBase + ci ]*csrVal[ csrBase + ri ];
}
}
WLS[idx] = 0; // fuck
}
}
__global__ void calcInterMerge16(
int* idx_bin,
int* csrCidx, int* csrPtr, float* csrVal, // MATRIX B(CSR)
int* cscRidx, int* cscPtr, float* cscVal, // MATRIX A(CSC)
int* c_idx, int* c_ptr, float* c_val,
int *RUPP,
int *WLS,
int *RBOX)
{
int tid = threadIdx.x;
int bid = blockIdx.x;
int bsize = blockDim.x;
int newBid = 0;
int newTid = tid;
bid*= M16;
newBid = tid / (bsize/M16);
newTid = tid - (bsize/M16)*newBid;
int idx = idx_bin[bid + newBid];
if(WLS[idx]==0){asm("exit;");}
else{
int csrBase = csrPtr[idx];
int cscBase = cscPtr[idx];
int rLen = csrPtr[idx+1] - csrBase;
int cLen = cscPtr[idx+1] - cscBase;
__shared__ int resBase[32];
__syncthreads();
for( int ci = 0 ; ci < cLen; ci ++)
{
int rIdx = cscRidx[ cscBase + ci ];
int rowBase = RUPP[rIdx];
if(newTid==0)
resBase[newBid] = atomicAdd(&RBOX[rIdx], rLen);
__syncthreads();
for(int ri= newTid ;ri < rLen ; ri+=bsize/M16)
{
int cIdx = csrCidx[ csrBase + ri ];
c_idx[rowBase+resBase[newBid]+ri] = cIdx;
c_val[rowBase+resBase[newBid]+ri] = cscVal[ cscBase + ci ]*csrVal[ csrBase + ri ];
}
}
WLS[idx] = 0; // fuck
}
}
__global__ void calcInterDom(
int* csrCidx, int* csrPtr, float* csrVal, // MATRIX B(CSR)
int* cscRidx, int* cscPtr, float* cscVal, // MATRIX A(CSC)
int* c_idx, int* c_ptr, float* c_val,
// cooData *interC, // INTERMEDIATE C
int *RUPP,
PP *P,
int *RBOX)
{
int tid = threadIdx.x;
int bid = blockIdx.x;
int bsize = blockDim.x;
int rid = P[bid].r;
int cid = P[bid].c;
int csrBase = csrPtr[rid];
int cscBase = cscPtr[cid];
int rLen = csrPtr[rid+1] - csrBase;
int cLen = cscPtr[cid+1] - cscBase;
__shared__ int resBase;
resBase = 0;
for( int ci=0 ; ci < cLen; ci ++)
{
__syncthreads();
int rIdx = cscRidx[ cscBase + ci ];
int rowBase = RUPP[rIdx];
if(tid==0) resBase = atomicAdd(&RBOX[rIdx], rLen);
__syncthreads();
for(int ri = tid; ri < rLen ; ri+=bsize )
{
int cIdx = csrCidx[ csrBase + ri ];
//interC[rowBase + resBase + ri].cidx = cIdx;
//interC[rowBase + resBase + ri].ridx = rIdx;
//interC[rowBase + resBase + ri].val = cscVal[ cscBase + ci ] * csrVal[ csrBase + ri ];
c_idx[rowBase+resBase+ri] = cIdx;
c_val[rowBase+resBase+ri] = cscVal[ cscBase + ci ] * csrVal[ csrBase + ri ];
}
}
}
__global__ void merge_limitting(
//const cooData *interC, // INTERMEDIATE C
int *RUPP,
int *RBOX,
float* DROW, // DENSE ROW
//int* MIDX, float* MVAL, int* MPTR, // MERGED C
int* c_jdx, float* c_val, int* c_ptr,
int *c_idx,
int* dominator_c,
int* number_of_dominators_in_c,
int *N)
{
int tid = threadIdx.x;
int bid = blockIdx.x;
int bsize = blockDim.x;
__shared__ int ip;
__shared__ double shm[768*4];
shm[0] = 0;
for(int RR = 0 ; RR<(*N/GRID)+1; RR++){
if(bid+(RR)*GRID>=*number_of_dominators_in_c) return;
int ii = dominator_c[bid+RR*GRID];
int rBase = RUPP[ii];
int rLen = RUPP[ii+1]-RUPP[ii];
int dBase = bid*(*N);
__syncthreads();
ip = 0;
__syncthreads();
for(int i=tid; i<rLen;i+=bsize){
int index = c_jdx[rBase+i];
float boolflag = atomicExch(&DROW[dBase+index], DROW[dBase+index] + c_val[rBase+i]);
if(boolflag<0.0001 && boolflag>-0.0001){
int ip_local = atomicAdd(&ip,1);
c_idx[rBase+ip_local] = index;//c_idx[rBase+i];
//atomicExch(&c_idx[rBase+ip_local],index);
}
}
__syncthreads();
for(int i=tid;i<ip;i+=bsize){
int v = c_idx[rBase + i];
c_val[rBase + i] = DROW[dBase+v];
atomicExch(&DROW[dBase+v], 0);
}
__syncthreads();
if(tid==0) c_ptr[ii] = ip;
}
}
__global__ void merge(
//const cooData *interC, // INTERMEDIATE C
int *RUPP,
int *RBOX,
float* DROW, // DENSE ROW
//int* MIDX, float* MVAL, int* MPTR, // MERGED C
int* c_jdx, float* c_val, int* c_ptr,
int *c_idx,
int* else_c,
int* number_of_else_in_c,
int *N)
{
int tid = threadIdx.x;
int bid = blockIdx.x;
int bsize = blockDim.x;
// __shared__ double shm[3*1024] ;
// shm[0] = 0;
__shared__ int ip;
for(int RR = 0 ; RR<(*N/GRID)+1; RR++){
if(bid+(RR)*GRID>=*number_of_else_in_c) return;
int ii = else_c[bid+RR*GRID];
int rBase = RUPP[ii];
int rLen = RUPP[ii+1]-RUPP[ii];
int dBase = bid*(*N);
__syncthreads();
ip = 0;
__syncthreads();
for(int i=tid; i<rLen;i+=bsize){
int index = c_jdx[rBase+i];
float boolflag = atomicExch(&DROW[dBase+index], DROW[dBase+index] + c_val[rBase+i]);
if(boolflag<0.0001 && boolflag>-0.0001){
int ip_local = atomicAdd(&ip,1);
c_idx[rBase+ip_local] = index;//c_idx[rBase+i];
//atomicExch(&c_idx[rBase+ip_local],index);
}
}
__syncthreads();
for(int i=tid;i<ip;i+=bsize){
int v = c_idx[rBase + i];
c_val[rBase + i] = DROW[dBase+v];
atomicExch(&DROW[dBase+v], 0);
}
__syncthreads();
if(tid==0) c_ptr[ii] = ip;
}
}
int *thread_counter_d, *thread_counter_h;
int *thread_bin_d, *thread_bin_h;
int *idx_bin_d, *idx_bin_h;
int *participants_d;
void cudaPath_find_thread_count_approx(int n)
{
int block_size = 32;
int block_num = n/32+1;
float time;
cudaMalloc((void**) &(thread_bin_d), sizeof(int) *10);
cudaMalloc((void**) &(thread_counter_d), sizeof(int) *10);
cudaMemset(thread_bin_d, 0, sizeof(int)*10);
cudaMemset(thread_counter_d, 0, sizeof(int)*10);
cudaEvent_t ev_count_thread_s, ev_count_thread_e;
cudaEventCreate(&ev_count_thread_s);
cudaEventCreate(&ev_count_thread_e);
cudaEventRecord(ev_count_thread_s, 0);
find_thread_count_approx<<<block_num,block_size>>>
(CSC_PTR_DEV,
CSR_PTR_DEV,
thread_bin_d,N_DEV);
thread_bin_h = (int*)malloc(sizeof(int)*10);
thread_counter_h = (int*)malloc(sizeof(int)*10);
cudaMemcpy((void*)&thread_bin_h[1], (const void*)thread_bin_d, sizeof(int)*8,cudaMemcpyDeviceToHost); ERROR_CHECK;
int total = 0;
thread_bin_h[0] = 0;
thread_bin_h[8] = 0;
for(int i=0;i<8;i++){
thread_bin_h[i+1] += thread_bin_h[i];
}
total = thread_bin_h[8];
cudaMemcpy((void*)thread_bin_d, (const void*)thread_bin_h, sizeof(int)*8,cudaMemcpyHostToDevice); ERROR_CHECK;
cudaMalloc((void**) &(idx_bin_d), sizeof(int)*thread_bin_h[8]); ERROR_CHECK;
idx_bin_h = (int*)malloc(sizeof(int)*thread_bin_h[8]);
fill_thread_bin_approx<<<block_num, block_size>>>
(CSC_PTR_DEV, CSR_PTR_DEV,
thread_bin_d, thread_counter_d,
idx_bin_d,N_DEV);
ERROR_CHECK;
cudaDeviceSynchronize();
cudaEventRecord(ev_count_thread_e, 0);
cudaEventSynchronize(ev_count_thread_e);
cudaEventElapsedTime(&t_bin,ev_count_thread_s, ev_count_thread_e);
cudaMemcpy((void*)idx_bin_h, (const void*)idx_bin_d, sizeof(int)*thread_bin_h[8],cudaMemcpyDeviceToHost); ERROR_CHECK;
cudaMemcpy((void*)thread_counter_h, (const void*)thread_counter_d, sizeof(int)*10,cudaMemcpyDeviceToHost); ERROR_CHECK;
int counter = 0;
/* for(int i=0;i<8;i++){
printf("%d ",thread_counter_h[i]);
int a = thread_bin_h[i+1] - thread_bin_h[i];
for(int j =0;j<a;j++){
printf("%d %d\n",counter++,idx_bin_h[j]);
}
}
printf("\n");*/
}
int* participants_row_d;
void cudaPass_(int n, int e){
int BLOCK_SIZE=32;
int BLOCK_NUM = n;
dim3 block(BLOCK_SIZE);
dim3 grid(BLOCK_NUM);
/* CUDAMALLOC & CUDAMEMCPY B(CSR) */
cudaMalloc((void**) &(CSR_CIDX_DEV), sizeof(int) *e);
cudaMalloc((void**) &(CSR_VAL_DEV), sizeof(float)*e);
cudaMalloc((void**) &(CSR_PTR_DEV), sizeof(int) *(2*n+1));
cudaMemcpy((void*) (CSR_CIDX_DEV), (const void*)(CSR_HOST.cidx), sizeof(int) *e, cudaMemcpyHostToDevice);
cudaMemcpy((void*) (CSR_VAL_DEV), (const void*)(CSR_HOST.val), sizeof(float) *e, cudaMemcpyHostToDevice);
cudaMemcpy((void*) (CSR_PTR_DEV), (const void*)(CSR_HOST.header), sizeof(int) *(n+1), cudaMemcpyHostToDevice);
/* CUDAMALLOC & CUDAMEMCPY A(CSC) */
cudaMalloc((void**) &(CSC_RIDX_DEV), sizeof(int) *e);
cudaMalloc((void**) &(CSC_VAL_DEV), sizeof(float)*e);
cudaMalloc((void**) &(CSC_PTR_DEV), sizeof(int) *(2*n+1));
cudaMemcpy((void*) (CSC_RIDX_DEV), (const void*)(CSC_HOST.ridx), sizeof(int) *e, cudaMemcpyHostToDevice);
cudaMemcpy((void*) (CSC_VAL_DEV), (const void*)(CSC_HOST.val), sizeof(float) *e, cudaMemcpyHostToDevice);
cudaMemcpy((void*) (CSC_PTR_DEV), (const void*)(CSC_HOST.header), sizeof(int) *(n+1), cudaMemcpyHostToDevice);
cudaMalloc((void**) &(RUPP_DEV), sizeof(int)*(n+1));
cudaMalloc((void**) &(WLS_DEV), sizeof(int)*(n+1));
cudaMemset(RUPP_DEV, 0, sizeof(int)*(n+1));
cudaMalloc((void**) &(N_DEV),sizeof(int));
cudaMemcpy((void*)N_DEV, (const void*)&n ,sizeof(int),cudaMemcpyHostToDevice);
cudaMalloc((void**) &(participants_d), sizeof(int));
cudaMalloc((void**) &(participants_row_d), sizeof(int));//row
cudaMemset(RUPP_DEV, 0, sizeof(int));
cudaEvent_t ev_pre_s, ev_pre_e;
cudaEventCreate(&ev_pre_s);
cudaEventCreate(&ev_pre_e);
cudaEventRecord(ev_pre_s, 0);
calcUpp<<<grid, block>>>(
CSR_CIDX_DEV, CSR_PTR_DEV,
CSC_RIDX_DEV, CSC_PTR_DEV,
RUPP_DEV,
WLS_DEV,
N_DEV,
participants_d, participants_row_d);
//ERROR_CHECK;
cudaDeviceSynchronize();
cudaEventRecord(ev_pre_e, 0);
cudaEventSynchronize(ev_pre_e);
cudaEventElapsedTime(&t_pre,ev_pre_s, ev_pre_e);
RUPP_HOST = (int*)malloc(sizeof(int)*(n+1));
cudaMallocHost((void**)&(WLS_HOST),sizeof(int)*(n+1));
cudaMemcpy((void*)&RUPP_HOST[1], (const void*)RUPP_DEV, sizeof(int)*n, cudaMemcpyDeviceToHost);
}
int* number_of_dominators_in_c;
int* number_of_dominators_in_c_h;
int* dominator_c;
int* dominator_c_h;
int* else_c;
int *number_of_else_in_c;
char* c_bool;
void cudaPass_F(int n){
int* domRC_dev;
int* domCC_dev;
int* domN_dev;
int* domR_dev;
cudaMemcpy((void*)RUPP_DEV,(const void*)RUPP_HOST, sizeof(int)*(n+1), cudaMemcpyHostToDevice); ERROR_CHECK;
cudaMalloc((void**)&(domR_dev), sizeof(int)*100000);ERROR_CHECK;
cudaMalloc((void**)&(domRC_dev), sizeof(int));ERROR_CHECK;
cudaMalloc((void**)&(domCC_dev), sizeof(int));ERROR_CHECK;
cudaMalloc((void**)&(dominator_c),sizeof(int)*n);
cudaMalloc((void**)&(else_c),sizeof(int)*n);
cudaMalloc((void**)&(domN_dev), sizeof(int)) ;ERROR_CHECK;
cudaMalloc((void**)&(number_of_dominators_in_c), sizeof(int)); ERROR_CHECK;
cudaMalloc((void**)&(number_of_else_in_c), sizeof(int)); ERROR_CHECK;
cudaMalloc((void**)&(c_bool), sizeof(char)*n); ERROR_CHECK;
cudaMemset(domRC_dev, 0, sizeof(int));
cudaMemset(domCC_dev, 0, sizeof(int));
cudaMemset(domN_dev, 0, sizeof(int));
cudaMemset(number_of_dominators_in_c, 0, sizeof(int));
cudaMemset(number_of_else_in_c, 0, sizeof(int));
cudaMemset(c_bool, 0, sizeof(char)*n);
//printf("%d\n",n);
int grid = n/256+1;
int block = 256;
findDom<<<grid, block>>>(
CSR_PTR_DEV, CSC_PTR_DEV,
RUPP_DEV,
WLS_DEV,
domCC_dev, domRC_dev,
domR_dev, domN_dev,
dominator_c,
number_of_dominators_in_c,
else_c,
number_of_else_in_c,
c_bool,
N_DEV,
participants_d,participants_row_d);
ERROR_CHECK;
cudaDeviceSynchronize();
cudaEvent_t ev_fd_s, ev_fd_e;
cudaEventCreate(&ev_fd_s);
cudaEventCreate(&ev_fd_e);
cudaEventRecord(ev_fd_s, 0);
cudaMemcpy((void*)domR, (const void*)domR_dev, sizeof(int)*100000,cudaMemcpyDeviceToHost); ERROR_CHECK;
cudaMemcpy((void*)domRC, (const void*)domRC_dev, sizeof(int),cudaMemcpyDeviceToHost); ERROR_CHECK;
cudaMemcpy((void*)domCC, (const void*)domCC_dev, sizeof(int),cudaMemcpyDeviceToHost); ERROR_CHECK;
cudaMemcpy((void*)domN, (const void*)domN_dev, sizeof(int),cudaMemcpyDeviceToHost); ERROR_CHECK;
dominator_c_h = (int*)malloc(sizeof(int)*n);
number_of_dominators_in_c_h = (int*)malloc(sizeof(int));
cudaMemcpy((void*)dominator_c_h, (const void*)dominator_c, sizeof(int)*n, cudaMemcpyDeviceToHost); ERROR_CHECK;
cudaMemcpy((void*)number_of_dominators_in_c_h, number_of_dominators_in_c, sizeof(int),cudaMemcpyDeviceToHost);
if(number_of_dominators_in_c_h[0]!=0) printf("!\n");
cudaEventRecord(ev_fd_e, 0);
cudaEventSynchronize(ev_fd_e);
cudaEventElapsedTime(&t_fd,ev_fd_s, ev_fd_e);
}
void cudaPassB(int n){
int BLOCK_SIZE = 256;
dim3 block(BLOCK_SIZE);
dim3 grid(thread_counter_h[0]);
cudaMalloc((void**) &(c_jdx_d), sizeof(int)*RUPP_HOST[n]);
cudaMalloc((void**) &(c_idx_d), sizeof(int)*RUPP_HOST[n]);
cudaMalloc((void**) &(c_val_d), sizeof(float)*RUPP_HOST[n]);
cudaMalloc((void**) &(c_ptr_d), sizeof(int)*(n+1));
cudaMalloc((void**) &(RBOX_DEV), sizeof(int)*n);
cudaMemset(RBOX_DEV, 0, sizeof(int)*n);
cudaEvent_t ev_spgemm_s, ev_spgemm_e;
cudaEventCreate(&ev_spgemm_s);
cudaEventCreate(&ev_spgemm_e);
cudaEventRecord(ev_spgemm_s, 0);
/*
calcInterBase<<<grid,block>>>(
&idx_bin_d[0],
CSR_CIDX_DEV, CSR_PTR_DEV, CSR_VAL_DEV, // MATRIX B(CSR)
CSC_RIDX_DEV, CSC_PTR_DEV, CSC_VAL_DEV, // MATRIX A(CSC)
c_jdx_d, c_ptr_d, c_val_d, // MATRIX C(CSR)
RUPP_DEV, // UPPER BOUND
WLS_DEV, // WLS_DEV
RBOX_DEV);
cudaDeviceSynchronize();
cudaDeviceSynchronize();
cudaEventRecord(ev_spgemm_e, 0);
cudaEventSynchronize(ev_spgemm_e);
cudaEventElapsedTime(&t_spgemm_l,ev_spgemm_s, ev_spgemm_e);
return;
*/
int off;
off = thread_counter_h[0];
//printf("%d\n",thread_counter_h[0]);
if(off){
dim3 block_num0(off/M16+1); // mergefactor : 8
//dim3 block_num0(off); // mergefactor : 8
calcInterMerge16<<<block_num0,block>>>(//block>>>(
//calcInterNoMerge<<<block_num0,block>>>(
&idx_bin_d[0],
CSR_CIDX_DEV, CSR_PTR_DEV, CSR_VAL_DEV, // MATRIX B(CSR)
CSC_RIDX_DEV, CSC_PTR_DEV, CSC_VAL_DEV, // MATRIX A(CSC)
c_jdx_d, c_ptr_d, c_val_d, // MATRIX C(CSR)
RUPP_DEV, // UPPER BOUND
WLS_DEV, // WLS_DEV
RBOX_DEV);
cudaDeviceSynchronize();
ERROR_CHECK;
}
off = thread_counter_h[1];
//printf("%d\n",off);
if(off){
//printf("2\n");
dim3 block_num1(off/M8+1); // merge factor : 4
//dim3 block_num1(off); // merge factor : 4
calcInterMerge8<<<block_num1,block>>>(
//calcInterNoMerge<<<block_num1,block>>>(
&idx_bin_d[thread_bin_h[1]],
CSR_CIDX_DEV, CSR_PTR_DEV, CSR_VAL_DEV, // MATRIX B(CSR)
CSC_RIDX_DEV, CSC_PTR_DEV, CSC_VAL_DEV, // MATRIX A(CSC)
c_jdx_d, c_ptr_d, c_val_d, // MATRIX C(CSR)
RUPP_DEV, // UPPER BOUND
WLS_DEV, // WLS_DEV
RBOX_DEV);
cudaDeviceSynchronize();
ERROR_CHECK;
}
off = thread_counter_h[2];
//off = thread_bin_h[3] - thread_bin_h[2];
//printf("%d\n",off);
if(off){
//printf("3\n");
dim3 block_num2(off/M4+1);
calcInterMerge4<<<block_num2,block>>>(
&idx_bin_d[thread_bin_h[2]],
CSR_CIDX_DEV, CSR_PTR_DEV, CSR_VAL_DEV, // MATRIX B(CSR)
CSC_RIDX_DEV, CSC_PTR_DEV, CSC_VAL_DEV, // MATRIX A(CSC)
c_jdx_d, c_ptr_d, c_val_d, // MATRIX C(CSR)
RUPP_DEV, // UPPER BOUND
WLS_DEV, // WLS_DEV
RBOX_DEV);
cudaDeviceSynchronize();
ERROR_CHECK;
}
//off = thread_bin_h[4] - thread_bin_h[3]; i
off = thread_counter_h[3];
// printf("%d\n",off);
if(off){
//printf("4\n");
dim3 block_num3(off/M2+1);
calcInterMerge2<<<block_num3,block>>>(
&idx_bin_d[thread_bin_h[3]],
CSR_CIDX_DEV, CSR_PTR_DEV, CSR_VAL_DEV, // MATRIX B(CSR)
CSC_RIDX_DEV, CSC_PTR_DEV, CSC_VAL_DEV, // MATRIX A(CSC)
c_jdx_d, c_ptr_d, c_val_d, // MATRIX C(CSR)
RUPP_DEV, // UPPER BOUND
WLS_DEV, // WLS_DEV
RBOX_DEV);
cudaDeviceSynchronize();
ERROR_CHECK;
}
//off = thread_bin_h[5] - thread_bin_h[4];
off = thread_counter_h[4];
//printf("%d\n",off);
if(off){
//printf("5\n");
dim3 block_num4(off/M1+1);
calcInterMerge1<<<block_num4,block>>>(
&idx_bin_d[thread_bin_h[4]],
CSR_CIDX_DEV, CSR_PTR_DEV, CSR_VAL_DEV, // MATRIX B(CSR)
CSC_RIDX_DEV, CSC_PTR_DEV, CSC_VAL_DEV, // MATRIX A(CSC)
c_jdx_d, c_ptr_d, c_val_d, // MATRIX C(CSR)
RUPP_DEV, // UPPER BOUND
WLS_DEV, // WLS_DEV
RBOX_DEV);
cudaDeviceSynchronize();
ERROR_CHECK;
}
//off = thread_bin_h[6] - thread_bin_h[5];
off = thread_counter_h[5];
//printf("%d\n",off);
if(off){
//printf("6\n");
dim3 block_size64(64);
dim3 block_num5(off);
calcInterNoMerge<<<block_num5,block_size64>>>(
&idx_bin_d[thread_bin_h[5]],
CSR_CIDX_DEV, CSR_PTR_DEV, CSR_VAL_DEV, // MATRIX B(CSR)
CSC_RIDX_DEV, CSC_PTR_DEV, CSC_VAL_DEV, // MATRIX A(CSC)
c_jdx_d, c_ptr_d, c_val_d, // MATRIX C(CSR)
RUPP_DEV, // UPPER BOUND
WLS_DEV, // WLS_DEV
RBOX_DEV);
cudaDeviceSynchronize();
ERROR_CHECK;
}
//off = thread_bin_h[7] - thread_bin_h[6];
off = thread_counter_h[6];
//printf("%d\n",off);
if(off){
dim3 block_size128(128);
dim3 block_num6(off);
calcInterNoMerge<<<block_num6,block_size128>>>(
&idx_bin_d[thread_bin_h[6]],
CSR_CIDX_DEV, CSR_PTR_DEV, CSR_VAL_DEV, // MATRIX B(CSR)
CSC_RIDX_DEV, CSC_PTR_DEV, CSC_VAL_DEV, // MATRIX A(CSC)
c_jdx_d, c_ptr_d, c_val_d, // MATRIX C(CSR)
RUPP_DEV, // UPPER BOUND
WLS_DEV, // WLS_DEV
RBOX_DEV);
cudaDeviceSynchronize();
ERROR_CHECK;
}
//off = thread_bin_h[8] - thread_bin_h[7];
off = thread_counter_h[7];
//printf("%d\n",off);
if(off){
dim3 block_size256(128);
dim3 block_num7(off);
calcInterNoMerge<<<block_num7,block_size256>>>(
&idx_bin_d[thread_bin_h[7]],
CSR_CIDX_DEV, CSR_PTR_DEV, CSR_VAL_DEV, // MATRIX B(CSR)
CSC_RIDX_DEV, CSC_PTR_DEV, CSC_VAL_DEV, // MATRIX A(CSC)
c_jdx_d, c_ptr_d, c_val_d, // MATRIX C(CSR)
RUPP_DEV, // UPPER BOUND
WLS_DEV, // WLS_DEV
RBOX_DEV);
cudaDeviceSynchronize();
ERROR_CHECK;
}
for(int i=0;i<8;i++){
printf("%d ",thread_counter_h[i]);
}
printf("\n");
/* float total_eff = 0;
for(int i=0;i<8;i++){
int stripe;
int base;
int box;
if(i==0){ stripe = 32; box=64;}
else if(i==1) {stripe=16; box = 64;}
else if(i==2) {stripe=8; box = 64;}
else if(i==3) {stripe=4; box = 64;}
else if(i==4) {stripe=2; box = 64;}
else {stripe = 1; box = 128;}
if(i==0) base = 0;
else thread_bin_h[i-1];
float eff = 0;
for(int j=0;j<thread_counter_h[i];j+=stripe){
float temp = 0;
int max = 1;
for(int k=0;k<stripe;k++){
if(thread_counter_h[i] <= j+k) break;
int index = idx_bin_h[base+j+k];
int len = CSR_HOST.header[index+1] - CSR_HOST.header[index];
if(max<len) max = len;
temp +=len*(CSC_HOST.header[index+1] - CSC_HOST.header[index]);
}
temp/=max;
temp/=box;
eff += temp;
}
printf("%f %d %d\n",eff, thread_counter_h[i], thread_bin_h[8]);
total_eff += eff/((float)thread_bin_h[8]);
}
printf("%f \n",total_eff);
*/
cudaDeviceSynchronize();
cudaEventRecord(ev_spgemm_e, 0);
cudaEventSynchronize(ev_spgemm_e);
cudaEventElapsedTime(&t_spgemm_l,ev_spgemm_s, ev_spgemm_e);
}
void cudaPassBB(int n,int t){
int BLOCK_SIZE = 384;
dim3 block(BLOCK_SIZE);
dim3 grid(t);
cudaEvent_t ev_spgemm_s, ev_spgemm_e;
cudaEventCreate(&ev_spgemm_s);
cudaEventCreate(&ev_spgemm_e);
cudaEventRecord(ev_spgemm_s, 0);
cudaMalloc((void**)&(P_DEV), sizeof(PP)*t); ERROR_CHECK;
cudaMemcpy((void*)P_DEV, (const void*)P_HOST, sizeof(PP)*(t), cudaMemcpyHostToDevice); ERROR_CHECK;
cudaMemcpy((void*)CSR_CIDX_DEV, (const void*) DCSR_HOST.cidx, sizeof(int)*DCSR_HOST.e, cudaMemcpyHostToDevice); ERROR_CHECK;
cudaMemcpy((void*)CSR_VAL_DEV, (const void*) DCSR_HOST.val, sizeof(float)*DCSR_HOST.e, cudaMemcpyHostToDevice); ERROR_CHECK;
cudaMemcpy((void*)CSR_PTR_DEV, (const void*) DCSR_HOST.header, sizeof(int)*(DCSR_HOST.n+1), cudaMemcpyHostToDevice); ERROR_CHECK;
cudaMemcpy((void*)CSC_RIDX_DEV, (const void*) DCSC_HOST.ridx, sizeof(int)*DCSC_HOST.e, cudaMemcpyHostToDevice); ERROR_CHECK;
cudaMemcpy((void*)CSC_VAL_DEV, (const void*) DCSC_HOST.val, sizeof(float)*DCSC_HOST.e, cudaMemcpyHostToDevice); ERROR_CHECK;
cudaMemcpy((void*)CSC_PTR_DEV, (const void*) DCSC_HOST.header, sizeof(int)*(DCSC_HOST.n+1), cudaMemcpyHostToDevice); ERROR_CHECK;
calcInterDom<<<grid, block>>>(
CSR_CIDX_DEV, CSR_PTR_DEV, CSR_VAL_DEV, // MATRIX B(CSR)
CSC_RIDX_DEV, CSC_PTR_DEV, CSC_VAL_DEV, // MATRIX A(CSC)
c_jdx_d, c_ptr_d, c_val_d,
//COO_DEV, // INTERMEDIATE C(COO)
RUPP_DEV, // UPPER BOUND
P_DEV, // WLS_DEV
RBOX_DEV); // RBOX
ERROR_CHECK;
cudaEventRecord(ev_spgemm_e, 0);
cudaEventSynchronize(ev_spgemm_e);
cudaEventElapsedTime(&t_spgemm_d,ev_spgemm_s, ev_spgemm_e);
}
void cudaPassC(int n){
int BLOCK_SIZE = MT;
int GRID_SIZE = GRID;
float* DROW_DEV;
cudaMalloc((void**) &(DROW_DEV), sizeof(float)*n*GRID_SIZE); ERROR_CHECK;
cudaMemset(DROW_DEV, 0, sizeof(float)*n*GRID_SIZE); ERROR_CHECK;
// cudaMallocHost((void**)&(c_val_h),sizeof(float)*(RUPP_HOST[n]));
// cudaMallocHost((void**)&(c_idx_h),sizeof(int)*(RUPP_HOST[n]));
c_val_h=(float*)malloc(sizeof(float)*RUPP_HOST[n]);
c_idx_h=(int*)malloc(sizeof(int)*RUPP_HOST[n]);
nnzC = 0;
dim3 block(BLOCK_SIZE);
dim3 grid(GRID);
cudaEvent_t ev_merge_s, ev_merge_e;
cudaEventCreate(&ev_merge_s);
cudaEventCreate(&ev_merge_e);
cudaEventRecord(ev_merge_s, 0);
merge<<<grid, block>>>(
RUPP_DEV, // UPPER BOUND
RBOX_DEV, // RBOX
DROW_DEV, // DENSE
c_jdx_d,c_val_d,c_ptr_d,
c_idx_d,
else_c,
number_of_else_in_c,
N_DEV);
cudaDeviceSynchronize();
merge_limitting<<<grid, block>>>(
RUPP_DEV, // UPPER BOUND
RBOX_DEV, // RBOX
DROW_DEV, // DENSE
c_jdx_d,c_val_d,c_ptr_d,
c_idx_d,
dominator_c,
number_of_dominators_in_c,
N_DEV);
cudaDeviceSynchronize();
cudaEventRecord(ev_merge_e, 0);
cudaEventSynchronize(ev_merge_e);
cudaEventElapsedTime(&t_merge,ev_merge_s, ev_merge_e);
printf("%d\n",number_of_dominators_in_c_h[0]);
c_ptr_h = (int*)malloc(sizeof(int)*(n+1));
c_ptr_h[0] = 0;
cudaMemcpy((void*)&c_ptr_h[1],(const void*) &c_ptr_d[0], sizeof(int)*n , cudaMemcpyDeviceToHost);
// cudaMemcpyAsync((void*)&c_val_h[0],(const void*) &c_val_d[0], sizeof(float)*RUPP_HOST[n] , cudaMemcpyDeviceToHost);
// cudaMemcpyAsync((void*)&c_idx_h[0],(const void*) &c_idx_d[0], sizeof(int)*RUPP_HOST[n], cudaMemcpyDeviceToHost);
int sHost = 0;
for(int i=0;i<n;i++){
int sDev = RUPP_HOST[i];
int l = c_ptr_h[i+1];
//cudaDeviceSynchronize();
nnzC += l;
//cudaMemcpy((void*)&MVAL_HOST[sHost],(const void*) &MVAL_DEV[sDev], sizeof(float)*l , cudaMemcpyDeviceToHost);
//cudaThreadSynchronize();
//cudaMemcpyAsync((void*)&MIDX_HOST[sHost],(const void*) &MIDX_DEV[sDev], sizeof(int)*l, cudaMemcpyDeviceToHost);
cudaMemcpyAsync((void*)&c_val_h[sHost],(const void*) &c_val_d[sDev], sizeof(float)*l , cudaMemcpyDeviceToHost);
cudaMemcpyAsync((void*)&c_idx_h[sHost],(const void*) &c_idx_d[sDev], sizeof(int)*l, cudaMemcpyDeviceToHost);
sHost +=l;
//MPTR_HOST[i+1] += MPTR_HOST[i];
c_ptr_h[i+1] += c_ptr_h[i];
}
/*
for(int j=0;j < RUPP_HOST[n] ;j++){
if(c_val_h[j]>0.000001 ){//|| MVAL_HOST[j]<-0.00001){
nnzC++;
}
}
*/
}
|
c5d34e9b51c5f59d85e2837cf97b3b48f937be56.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
const int Nx = 12;
const int Ny = 6;
dim3 threadsPerBlock(4, 3, 1);
dim3 numBlocks(Nx/threadsPerBlock.x, Ny/threadsPerBlock.y, 1)
// kernel definition
__global__ void matrixAdd(float A[Ny][Nx], float B[Ny][Nx], float C[Ny][Nx]) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
C[j][i] = A[j][i] + B[j][i]
}
// __global__ denote a CUDA kernel function
__device__ float doubleValue(float x) {
return 2 * x;
}
// __device__ SPMD execution on GPU
Nx = 11;
Ny = 6;
dim3 numBlocks((Nx+threadsPerBlock.x-1)/threadsPerBlock.x,
(Ny+threadsPerBlock.y-1)/threadsPerBlock.y, 1);
__global__ void matrixAddDoubleB(float A[Ny][Nx], float B[Ny][Nx], float C[Ny][Nx]) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if (i<Nx && j<Ny) {
C[j][i] = A[j][i] + B[j][i]
}
}
const int N = 100;
float* A = new float[N];
for (int i=0; i<N; ++i) {
A[i] = (float)i;
}
int bytes = sizeof(float) * N;
float* deviceA;
hipMalloc(&deviceA, bytes);
hipMemcpy(deviceA, A, bytes, hipMemcpyHostToDevice);
// deviceA[i] is an invalid op, deviceA is not a pointer into the host's address space
// 3 distince types of address space
// device global memory(all threads)
// per-block shared memory(all threads in block)
// per-thread private memory(thread)
#define THREADS_PER_BLK 128
__global__ void convolve(int N, float *input, float *output) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
float result = 0.0f;
for (int i=0; i<3; ++i) {
result += input[index + i];
}
output[index] = result / 3.f;
}
int N = 1024*1024;
hipMalloc(&devInput, sizeof(float) * (N+2));
hipMalloc(&devOutput, sizeof(float) * N);
//convolve<<<N/THREADS_PER_BLK, THREADS_PER_BLK>>>(N, devInput, devOutput)
__global__ void convolve_shared(int N, float *input, float *output) {
__shared__ float support[THREADS_PER_BLK+2];
int index = blockIdx.x * blockDim.x + threadIdx.x;
support[threadIdx.x] = input[index];
if (threadIdx.x < 2) {
support[THREADS_PER_BLK + threadIdx.x] = input[index + THREADS_PER_BLK];
}
__syncthreads();
float result = 0.0f;
for (int i=0; i<3; ++i) {
result += support[threadIdx.x + i];
}
output[index] = result / 3.f;
}
// __syncthreads: wait for all the threads in the same block to arrive at this point
//float atomicAdd(float* addr, float amount);
//atomic op on both global memory and per-block shared memory
//major CUDA assumption: thread block execution can be carried out in any order
//GPU implemenation map thread blocks to cores using a dynamic scheduling policy
//warps are an important GPU implemenation detail, but not a CUDA abstraction
| c5d34e9b51c5f59d85e2837cf97b3b48f937be56.cu | const int Nx = 12;
const int Ny = 6;
dim3 threadsPerBlock(4, 3, 1);
dim3 numBlocks(Nx/threadsPerBlock.x, Ny/threadsPerBlock.y, 1)
// kernel definition
__global__ void matrixAdd(float A[Ny][Nx], float B[Ny][Nx], float C[Ny][Nx]) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
C[j][i] = A[j][i] + B[j][i]
}
// __global__ denote a CUDA kernel function
__device__ float doubleValue(float x) {
return 2 * x;
}
// __device__ SPMD execution on GPU
Nx = 11;
Ny = 6;
dim3 numBlocks((Nx+threadsPerBlock.x-1)/threadsPerBlock.x,
(Ny+threadsPerBlock.y-1)/threadsPerBlock.y, 1);
__global__ void matrixAddDoubleB(float A[Ny][Nx], float B[Ny][Nx], float C[Ny][Nx]) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if (i<Nx && j<Ny) {
C[j][i] = A[j][i] + B[j][i]
}
}
const int N = 100;
float* A = new float[N];
for (int i=0; i<N; ++i) {
A[i] = (float)i;
}
int bytes = sizeof(float) * N;
float* deviceA;
cudaMalloc(&deviceA, bytes);
cudaMemcpy(deviceA, A, bytes, cudaMemcpyHostToDevice);
// deviceA[i] is an invalid op, deviceA is not a pointer into the host's address space
// 3 distince types of address space
// device global memory(all threads)
// per-block shared memory(all threads in block)
// per-thread private memory(thread)
#define THREADS_PER_BLK 128
__global__ void convolve(int N, float *input, float *output) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
float result = 0.0f;
for (int i=0; i<3; ++i) {
result += input[index + i];
}
output[index] = result / 3.f;
}
int N = 1024*1024;
cudaMalloc(&devInput, sizeof(float) * (N+2));
cudaMalloc(&devOutput, sizeof(float) * N);
//convolve<<<N/THREADS_PER_BLK, THREADS_PER_BLK>>>(N, devInput, devOutput)
__global__ void convolve_shared(int N, float *input, float *output) {
__shared__ float support[THREADS_PER_BLK+2];
int index = blockIdx.x * blockDim.x + threadIdx.x;
support[threadIdx.x] = input[index];
if (threadIdx.x < 2) {
support[THREADS_PER_BLK + threadIdx.x] = input[index + THREADS_PER_BLK];
}
__syncthreads();
float result = 0.0f;
for (int i=0; i<3; ++i) {
result += support[threadIdx.x + i];
}
output[index] = result / 3.f;
}
// __syncthreads: wait for all the threads in the same block to arrive at this point
//float atomicAdd(float* addr, float amount);
//atomic op on both global memory and per-block shared memory
//major CUDA assumption: thread block execution can be carried out in any order
//GPU implemenation map thread blocks to cores using a dynamic scheduling policy
//warps are an important GPU implemenation detail, but not a CUDA abstraction
|
2ff9a93c4984a4415a8532e39ed3e7da1a2b938c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/***************************************************************************
*cr
*cr (C) Copyright 2010 The Board of Trustees of the
*cr University of Illinois
*cr All Rights Reserved
*cr
***************************************************************************/
/*
* Kernel of dense matrix-matrix multiplication kernel.
* The algorithm is based on CUDA sgemm code from Vasily Volkov
* at UC Berkeley.
*/
#include "../benchmark_common.h"
#define CHECK_ERROR(errorMessage) \
{ \
hipError_t err = hipGetLastError(); \
if (hipSuccess != err) { \
fprintf(stderr, "Cuda error: %s in file '%s' in line %i : %s.\n", \
errorMessage, __FILE__, __LINE__, hipGetErrorString(err)); \
exit(EXIT_FAILURE); \
} \
}
// CML x RML = CML, baseline version, 510FLOP/s on Fermi
/* Pseudo code
for i < M ; i += 64 // thread block.x
for j < N; j += 16 // thread block.y
for tx = 0; tx < 16; tx++ // thread index x; tile of M loop
for ty = 0; ty < 4 ; ty++ // thread index y; tile of M loop
for m < 16; m += 1;
c[m] = 0.0f
for k < K; k += 4 // seq
b[ty][tx] = B[k+ty][j+tx]
for l < 4; l +=1 // seq
for m < 16; m +=1 // seq
c[m] += A[i+ty*16+tx][k+l]+b[l][m]
*/
// Parameters of tile sizes
#define TILE_N 16
#define TILE_TB_HEIGHT 8
#define TILE_M (TILE_N * TILE_TB_HEIGHT)
__global__ void mysgemmNT(const float* A,
int lda,
const float* B,
int ldb,
float* C,
int ldc,
int k,
float alpha,
float beta) {
// Partial results
float c[TILE_N];
for (int i = 0; i < TILE_N; i++)
c[i] = 0.0f;
int mid = threadIdx.y * blockDim.x + threadIdx.x; // flattened id
int m = blockIdx.x * TILE_M + mid;
int n = blockIdx.y * TILE_N + threadIdx.x;
__shared__ float b_s[TILE_TB_HEIGHT][TILE_N];
for (int i = 0; i < k; i += TILE_TB_HEIGHT) {
float a;
b_s[threadIdx.y][threadIdx.x] = B[n + (i + threadIdx.y) * ldb];
__syncthreads();
for (int j = 0; j < TILE_TB_HEIGHT; j++) {
a = A[m + (i + j) * lda];
for (int kk = 0; kk < TILE_N; kk++)
c[kk] += a * b_s[j][kk];
}
__syncthreads();
}
int t = ldc * blockIdx.y * TILE_N + m;
for (int i = 0; i < TILE_N; i++) {
C[t + i * ldc] = C[t + i * ldc] * beta + alpha * c[i];
}
}
void regtileSgemm(char transa,
char transb,
int m,
int n,
int k,
float alpha,
const float* A,
int lda,
const float* B,
int ldb,
float beta,
float* C,
int ldc,
hipStream_t stream_app,
pthread_mutex_t* mutexapp,
bool flag) {
if ((transa != 'N') && (transa != 'n')) {
std::cerr << "unsupported value of 'transa' in regtileSgemm()" << std::endl;
return;
}
if ((transb != 'T') && (transb != 't')) {
std::cerr << "unsupported value of 'transb' in regtileSgemm()" << std::endl;
return;
}
// In this code we assume the matrix sizes are multiple of tile size
if ((m % TILE_M) || (n % TILE_N)) {
std::cerr << "unsupported size of matrix. m should be multiple of "
<< TILE_M << "; n should be multiple of " << TILE_N << std::endl;
}
dim3 grid(m / TILE_M, n / TILE_N), threads(TILE_N, TILE_TB_HEIGHT);
hipLaunchKernelGGL(( mysgemmNT), dim3(grid), dim3(threads), 0, stream_app, A, lda, B, ldb, C, ldc, k, alpha,
beta);
CHECK_ERROR("mySgemm");
pthread_mutex_unlock(mutexapp);
if (flag)
cutilSafeCall(hipStreamSynchronize(stream_app));
}
| 2ff9a93c4984a4415a8532e39ed3e7da1a2b938c.cu | /***************************************************************************
*cr
*cr (C) Copyright 2010 The Board of Trustees of the
*cr University of Illinois
*cr All Rights Reserved
*cr
***************************************************************************/
/*
* Kernel of dense matrix-matrix multiplication kernel.
* The algorithm is based on CUDA sgemm code from Vasily Volkov
* at UC Berkeley.
*/
#include "../benchmark_common.h"
#define CHECK_ERROR(errorMessage) \
{ \
cudaError_t err = cudaGetLastError(); \
if (cudaSuccess != err) { \
fprintf(stderr, "Cuda error: %s in file '%s' in line %i : %s.\n", \
errorMessage, __FILE__, __LINE__, cudaGetErrorString(err)); \
exit(EXIT_FAILURE); \
} \
}
// CML x RML = CML, baseline version, 510FLOP/s on Fermi
/* Pseudo code
for i < M ; i += 64 // thread block.x
for j < N; j += 16 // thread block.y
for tx = 0; tx < 16; tx++ // thread index x; tile of M loop
for ty = 0; ty < 4 ; ty++ // thread index y; tile of M loop
for m < 16; m += 1;
c[m] = 0.0f
for k < K; k += 4 // seq
b[ty][tx] = B[k+ty][j+tx]
for l < 4; l +=1 // seq
for m < 16; m +=1 // seq
c[m] += A[i+ty*16+tx][k+l]+b[l][m]
*/
// Parameters of tile sizes
#define TILE_N 16
#define TILE_TB_HEIGHT 8
#define TILE_M (TILE_N * TILE_TB_HEIGHT)
__global__ void mysgemmNT(const float* A,
int lda,
const float* B,
int ldb,
float* C,
int ldc,
int k,
float alpha,
float beta) {
// Partial results
float c[TILE_N];
for (int i = 0; i < TILE_N; i++)
c[i] = 0.0f;
int mid = threadIdx.y * blockDim.x + threadIdx.x; // flattened id
int m = blockIdx.x * TILE_M + mid;
int n = blockIdx.y * TILE_N + threadIdx.x;
__shared__ float b_s[TILE_TB_HEIGHT][TILE_N];
for (int i = 0; i < k; i += TILE_TB_HEIGHT) {
float a;
b_s[threadIdx.y][threadIdx.x] = B[n + (i + threadIdx.y) * ldb];
__syncthreads();
for (int j = 0; j < TILE_TB_HEIGHT; j++) {
a = A[m + (i + j) * lda];
for (int kk = 0; kk < TILE_N; kk++)
c[kk] += a * b_s[j][kk];
}
__syncthreads();
}
int t = ldc * blockIdx.y * TILE_N + m;
for (int i = 0; i < TILE_N; i++) {
C[t + i * ldc] = C[t + i * ldc] * beta + alpha * c[i];
}
}
void regtileSgemm(char transa,
char transb,
int m,
int n,
int k,
float alpha,
const float* A,
int lda,
const float* B,
int ldb,
float beta,
float* C,
int ldc,
cudaStream_t stream_app,
pthread_mutex_t* mutexapp,
bool flag) {
if ((transa != 'N') && (transa != 'n')) {
std::cerr << "unsupported value of 'transa' in regtileSgemm()" << std::endl;
return;
}
if ((transb != 'T') && (transb != 't')) {
std::cerr << "unsupported value of 'transb' in regtileSgemm()" << std::endl;
return;
}
// In this code we assume the matrix sizes are multiple of tile size
if ((m % TILE_M) || (n % TILE_N)) {
std::cerr << "unsupported size of matrix. m should be multiple of "
<< TILE_M << "; n should be multiple of " << TILE_N << std::endl;
}
dim3 grid(m / TILE_M, n / TILE_N), threads(TILE_N, TILE_TB_HEIGHT);
mysgemmNT<<<grid, threads, 0, stream_app>>>(A, lda, B, ldb, C, ldc, k, alpha,
beta);
CHECK_ERROR("mySgemm");
pthread_mutex_unlock(mutexapp);
if (flag)
cutilSafeCall(cudaStreamSynchronize(stream_app));
}
|
857c35ada8777fdccfc0b63446c31e5482ea0c12.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//---------------------------------*-CUDA-*----------------------------------//
// Copyright 2020 UT-Battelle, LLC, and other Celeritas developers.
// See the top-level COPYRIGHT file for details.
// SPDX-License-Identifier: (Apache-2.0 OR MIT)
//---------------------------------------------------------------------------//
//! \file RungeKutta.test.cu
//---------------------------------------------------------------------------//
#include "RungeKutta.test.hh"
#include "base/KernelParamCalculator.cuda.hh"
#include <thrust/device_vector.h>
#include "field/MagField.hh"
#include "field/MagFieldEquation.hh"
#include "field/RungeKuttaStepper.hh"
#include "field/FieldInterface.hh"
#include "base/Range.hh"
#include "base/Types.hh"
#include "base/Constants.hh"
#include "base/Units.hh"
#include "physics/base/Units.hh"
using thrust::raw_pointer_cast;
namespace celeritas_test
{
using namespace celeritas;
//---------------------------------------------------------------------------//
// KERNELS
//---------------------------------------------------------------------------//
__global__ void rk4_test_kernel(FieldTestParams param,
real_type* pos_x,
real_type* pos_z,
real_type* mom_y,
real_type* mom_z,
real_type* error)
{
auto tid = celeritas::KernelParamCalculator::thread_id();
if (tid.get() >= param.nstates)
return;
// Construct the Runge-Kutta stepper
MagField field({0, 0, param.field_value});
MagFieldEquation equation(field, units::ElementaryCharge{-1});
RungeKuttaStepper<MagFieldEquation> rk4(equation);
// Initial state and the epected state after revolutions
OdeState y;
y.pos = {param.radius, 0.0, tid.get() * 1.0e-6};
y.mom = {0.0, param.momentum_y, param.momentum_z};
// Test parameters and the sub-step size
real_type hstep = 2.0 * constants::pi * param.radius / param.nsteps;
real_type total_error = 0;
for (auto nr : range(param.revolutions))
{
// Travel hstep for nsteps times in the field
for (CELER_MAYBE_UNUSED int i : celeritas::range(param.nsteps))
{
StepperResult result = rk4(hstep, y);
y = result.end_state;
total_error += truncation_error(hstep, 0.001, y, result.err_state);
}
}
// Output for verification
pos_x[tid.get()] = y.pos[0];
pos_z[tid.get()] = y.pos[2];
mom_y[tid.get()] = y.mom[1];
mom_z[tid.get()] = y.mom[2];
error[tid.get()] = total_error;
}
//---------------------------------------------------------------------------//
// TESTING INTERFACE
//---------------------------------------------------------------------------//
//! Run on device and return results
RK4TestOutput rk4_test(FieldTestParams test_param)
{
// Output data for kernel
thrust::device_vector<real_type> pos_x(test_param.nstates, 0.0);
thrust::device_vector<real_type> pos_z(test_param.nstates, 0.0);
thrust::device_vector<real_type> mom_y(test_param.nstates, 0.0);
thrust::device_vector<real_type> mom_z(test_param.nstates, 0.0);
thrust::device_vector<real_type> error(test_param.nstates, 0.0);
// Run kernel
celeritas::KernelParamCalculator calc_launch_params(rk4_test_kernel,
"rk4_test");
auto params = calc_launch_params(test_param.nstates);
hipLaunchKernelGGL(( rk4_test_kernel), dim3(params.grid_size), dim3(params.block_size), 0, 0,
test_param,
raw_pointer_cast(pos_x.data()),
raw_pointer_cast(pos_z.data()),
raw_pointer_cast(mom_y.data()),
raw_pointer_cast(mom_z.data()),
raw_pointer_cast(error.data()));
CELER_CUDA_CALL(hipDeviceSynchronize());
// Copy result back to CPU
RK4TestOutput result;
result.pos_x.resize(pos_x.size());
thrust::copy(pos_x.begin(), pos_x.end(), result.pos_x.begin());
result.pos_z.resize(pos_z.size());
thrust::copy(pos_z.begin(), pos_z.end(), result.pos_z.begin());
result.mom_y.resize(mom_y.size());
thrust::copy(mom_y.begin(), mom_y.end(), result.mom_y.begin());
result.mom_z.resize(mom_z.size());
thrust::copy(mom_z.begin(), mom_z.end(), result.mom_z.begin());
result.error.resize(error.size());
thrust::copy(error.begin(), error.end(), result.error.begin());
return result;
}
//---------------------------------------------------------------------------//
} // namespace celeritas_test
| 857c35ada8777fdccfc0b63446c31e5482ea0c12.cu | //---------------------------------*-CUDA-*----------------------------------//
// Copyright 2020 UT-Battelle, LLC, and other Celeritas developers.
// See the top-level COPYRIGHT file for details.
// SPDX-License-Identifier: (Apache-2.0 OR MIT)
//---------------------------------------------------------------------------//
//! \file RungeKutta.test.cu
//---------------------------------------------------------------------------//
#include "RungeKutta.test.hh"
#include "base/KernelParamCalculator.cuda.hh"
#include <thrust/device_vector.h>
#include "field/MagField.hh"
#include "field/MagFieldEquation.hh"
#include "field/RungeKuttaStepper.hh"
#include "field/FieldInterface.hh"
#include "base/Range.hh"
#include "base/Types.hh"
#include "base/Constants.hh"
#include "base/Units.hh"
#include "physics/base/Units.hh"
using thrust::raw_pointer_cast;
namespace celeritas_test
{
using namespace celeritas;
//---------------------------------------------------------------------------//
// KERNELS
//---------------------------------------------------------------------------//
__global__ void rk4_test_kernel(FieldTestParams param,
real_type* pos_x,
real_type* pos_z,
real_type* mom_y,
real_type* mom_z,
real_type* error)
{
auto tid = celeritas::KernelParamCalculator::thread_id();
if (tid.get() >= param.nstates)
return;
// Construct the Runge-Kutta stepper
MagField field({0, 0, param.field_value});
MagFieldEquation equation(field, units::ElementaryCharge{-1});
RungeKuttaStepper<MagFieldEquation> rk4(equation);
// Initial state and the epected state after revolutions
OdeState y;
y.pos = {param.radius, 0.0, tid.get() * 1.0e-6};
y.mom = {0.0, param.momentum_y, param.momentum_z};
// Test parameters and the sub-step size
real_type hstep = 2.0 * constants::pi * param.radius / param.nsteps;
real_type total_error = 0;
for (auto nr : range(param.revolutions))
{
// Travel hstep for nsteps times in the field
for (CELER_MAYBE_UNUSED int i : celeritas::range(param.nsteps))
{
StepperResult result = rk4(hstep, y);
y = result.end_state;
total_error += truncation_error(hstep, 0.001, y, result.err_state);
}
}
// Output for verification
pos_x[tid.get()] = y.pos[0];
pos_z[tid.get()] = y.pos[2];
mom_y[tid.get()] = y.mom[1];
mom_z[tid.get()] = y.mom[2];
error[tid.get()] = total_error;
}
//---------------------------------------------------------------------------//
// TESTING INTERFACE
//---------------------------------------------------------------------------//
//! Run on device and return results
RK4TestOutput rk4_test(FieldTestParams test_param)
{
// Output data for kernel
thrust::device_vector<real_type> pos_x(test_param.nstates, 0.0);
thrust::device_vector<real_type> pos_z(test_param.nstates, 0.0);
thrust::device_vector<real_type> mom_y(test_param.nstates, 0.0);
thrust::device_vector<real_type> mom_z(test_param.nstates, 0.0);
thrust::device_vector<real_type> error(test_param.nstates, 0.0);
// Run kernel
celeritas::KernelParamCalculator calc_launch_params(rk4_test_kernel,
"rk4_test");
auto params = calc_launch_params(test_param.nstates);
rk4_test_kernel<<<params.grid_size, params.block_size>>>(
test_param,
raw_pointer_cast(pos_x.data()),
raw_pointer_cast(pos_z.data()),
raw_pointer_cast(mom_y.data()),
raw_pointer_cast(mom_z.data()),
raw_pointer_cast(error.data()));
CELER_CUDA_CALL(cudaDeviceSynchronize());
// Copy result back to CPU
RK4TestOutput result;
result.pos_x.resize(pos_x.size());
thrust::copy(pos_x.begin(), pos_x.end(), result.pos_x.begin());
result.pos_z.resize(pos_z.size());
thrust::copy(pos_z.begin(), pos_z.end(), result.pos_z.begin());
result.mom_y.resize(mom_y.size());
thrust::copy(mom_y.begin(), mom_y.end(), result.mom_y.begin());
result.mom_z.resize(mom_z.size());
thrust::copy(mom_z.begin(), mom_z.end(), result.mom_z.begin());
result.error.resize(error.size());
thrust::copy(error.begin(), error.end(), result.error.begin());
return result;
}
//---------------------------------------------------------------------------//
} // namespace celeritas_test
|
c9cd46bfc585eaf224c9d48a6aa6d7f0f02b7e4b.hip | // !!! This is a file automatically generated by hipify!!!
#include "stdio.h"
#include<iostream>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#define N 50000
__global__ void gpuAdd(int *d_a, int *d_b, int *d_c) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < N)
{
d_c[tid] = d_a[tid] + d_b[tid];
tid += blockDim.x * gridDim.x;
}
}
int main(void) {
int *h_a, *h_b, *h_c;
int *d_a0, *d_b0, *d_c0;
int *d_a1, *d_b1, *d_c1;
hipStream_t stream0, stream1;
hipStreamCreate(&stream0);
hipStreamCreate(&stream1);
hipEvent_t e_start, e_stop;
hipEventCreate(&e_start);
hipEventCreate(&e_stop);
hipEventRecord(e_start, 0);
// Allocate page-locker memory
hipHostMalloc((void**)&h_a, N*2*sizeof(int),hipHostMallocDefault);
hipHostMalloc((void**)&h_b, N*2*sizeof(int), hipHostMallocDefault);
hipHostMalloc((void**)&h_c, N*2*sizeof(int), hipHostMallocDefault);
// Allocate device memory
hipMalloc((void**)&d_a0, N * sizeof(int));
hipMalloc((void**)&d_b0, N * sizeof(int));
hipMalloc((void**)&d_c0, N * sizeof(int));
hipMalloc((void**)&d_a1, N * sizeof(int));
hipMalloc((void**)&d_b1, N * sizeof(int));
hipMalloc((void**)&d_c1, N * sizeof(int));
for (int i = 0; i < N*2; i++) {
h_a[i] = 2 * i*i;
h_b[i] = i;
}
hipMemcpyAsync(d_a0, h_a, N*sizeof(int), hipMemcpyHostToDevice, stream0);
hipMemcpyAsync(d_a1, h_a+N, N*sizeof(int), hipMemcpyHostToDevice, stream1);
hipMemcpyAsync(d_b0, h_b, N*sizeof(int), hipMemcpyHostToDevice, stream0);
hipMemcpyAsync(d_b1, h_b+N, N*sizeof(int), hipMemcpyHostToDevice, stream1);
gpuAdd << <512, 512, 0, stream0 >> > (d_a0, d_b0, d_c0);
gpuAdd << <512, 512, 0, stream1 >> > (d_a1, d_b1, d_c1);
hipMemcpyAsync(h_c , d_c0, N * sizeof(int), hipMemcpyDeviceToHost, stream0);
hipMemcpyAsync(h_c + N, d_c1, N * sizeof(int), hipMemcpyDeviceToHost, stream1);
hipDeviceSynchronize();
hipStreamSynchronize(stream0);
hipStreamSynchronize(stream1);
hipEventRecord(e_stop, 0);
hipEventSynchronize(e_stop);
float elapsedTime;
hipEventElapsedTime(&elapsedTime, e_start, e_stop);
printf("Time to add %d numbers: %3.1f ms\n",2* N, elapsedTime);
int Correct = 1;
printf("Vector addition on GPU \n");
//Printing result on console
for (int i = 0; i < 2*N; i++) {
if ((h_a[i] + h_b[i] != h_c[i]))
Correct = 0;
}
if (Correct == 1)
printf("GPU has computed Sum Correctly\n");
else
printf("There is an Error in GPU Computation\n");
//Free up memory
hipFree(d_a0);
hipFree(d_b0);
hipFree(d_c0);
hipFree(d_a0);
hipFree(d_b0);
hipFree(d_c0);
hipHostFree(h_a);
hipHostFree(h_b);
hipHostFree(h_c);
return 0;
}
| c9cd46bfc585eaf224c9d48a6aa6d7f0f02b7e4b.cu | #include "stdio.h"
#include<iostream>
#include <cuda.h>
#include <cuda_runtime.h>
#define N 50000
__global__ void gpuAdd(int *d_a, int *d_b, int *d_c) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < N)
{
d_c[tid] = d_a[tid] + d_b[tid];
tid += blockDim.x * gridDim.x;
}
}
int main(void) {
int *h_a, *h_b, *h_c;
int *d_a0, *d_b0, *d_c0;
int *d_a1, *d_b1, *d_c1;
cudaStream_t stream0, stream1;
cudaStreamCreate(&stream0);
cudaStreamCreate(&stream1);
cudaEvent_t e_start, e_stop;
cudaEventCreate(&e_start);
cudaEventCreate(&e_stop);
cudaEventRecord(e_start, 0);
// Allocate page-locker memory
cudaHostAlloc((void**)&h_a, N*2*sizeof(int),cudaHostAllocDefault);
cudaHostAlloc((void**)&h_b, N*2*sizeof(int), cudaHostAllocDefault);
cudaHostAlloc((void**)&h_c, N*2*sizeof(int), cudaHostAllocDefault);
// Allocate device memory
cudaMalloc((void**)&d_a0, N * sizeof(int));
cudaMalloc((void**)&d_b0, N * sizeof(int));
cudaMalloc((void**)&d_c0, N * sizeof(int));
cudaMalloc((void**)&d_a1, N * sizeof(int));
cudaMalloc((void**)&d_b1, N * sizeof(int));
cudaMalloc((void**)&d_c1, N * sizeof(int));
for (int i = 0; i < N*2; i++) {
h_a[i] = 2 * i*i;
h_b[i] = i;
}
cudaMemcpyAsync(d_a0, h_a, N*sizeof(int), cudaMemcpyHostToDevice, stream0);
cudaMemcpyAsync(d_a1, h_a+N, N*sizeof(int), cudaMemcpyHostToDevice, stream1);
cudaMemcpyAsync(d_b0, h_b, N*sizeof(int), cudaMemcpyHostToDevice, stream0);
cudaMemcpyAsync(d_b1, h_b+N, N*sizeof(int), cudaMemcpyHostToDevice, stream1);
gpuAdd << <512, 512, 0, stream0 >> > (d_a0, d_b0, d_c0);
gpuAdd << <512, 512, 0, stream1 >> > (d_a1, d_b1, d_c1);
cudaMemcpyAsync(h_c , d_c0, N * sizeof(int), cudaMemcpyDeviceToHost, stream0);
cudaMemcpyAsync(h_c + N, d_c1, N * sizeof(int), cudaMemcpyDeviceToHost, stream1);
cudaDeviceSynchronize();
cudaStreamSynchronize(stream0);
cudaStreamSynchronize(stream1);
cudaEventRecord(e_stop, 0);
cudaEventSynchronize(e_stop);
float elapsedTime;
cudaEventElapsedTime(&elapsedTime, e_start, e_stop);
printf("Time to add %d numbers: %3.1f ms\n",2* N, elapsedTime);
int Correct = 1;
printf("Vector addition on GPU \n");
//Printing result on console
for (int i = 0; i < 2*N; i++) {
if ((h_a[i] + h_b[i] != h_c[i]))
Correct = 0;
}
if (Correct == 1)
printf("GPU has computed Sum Correctly\n");
else
printf("There is an Error in GPU Computation\n");
//Free up memory
cudaFree(d_a0);
cudaFree(d_b0);
cudaFree(d_c0);
cudaFree(d_a0);
cudaFree(d_b0);
cudaFree(d_c0);
cudaFreeHost(h_a);
cudaFreeHost(h_b);
cudaFreeHost(h_c);
return 0;
}
|
f9f4c2ddc3dc700c459e7bcad45996958eeee0a1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define GOOGLE_CUDA 1
#define EIGEN_USE_GPU
#include <tensorflow/core/framework/tensor.h>
#include "tf_cuda_utils.h"
using namespace tensorflow;
__global__ void upload_background(hipSurfaceObject_t dest_surface, TTypes<float, 4>::ConstTensor const src_tensor, int const frames_per_row, dim3 const total_threads)
{
auto const batch_size = src_tensor.dimension(0);
auto const frame_height = src_tensor.dimension(1);
auto const frame_width = src_tensor.dimension(2);
auto const channels = src_tensor.dimension(3);
CUDA_AXIS_KERNEL_LOOP(dest_x, total_threads, x) {
CUDA_AXIS_KERNEL_LOOP(dest_y, total_threads, y) {
auto const iib = dest_y / frame_height * frames_per_row + dest_x / frame_width;
if (iib < batch_size) {
auto const x_in_frame = dest_x % frame_width;
auto const y_in_frame = frame_height - 1 - dest_y % frame_height; // the vertical flip ensures that our images are top-row-first, as in tensorflow
if (channels == 1) {
auto const &value = src_tensor(iib, y_in_frame, x_in_frame, 0);
surf2Dwrite(float4{value, value, value, 1.f}, dest_surface, dest_x * 16, dest_y); // *16 is required because surface-writes use byte addressing (!)
} else if (channels == 3) {
surf2Dwrite(float4{
src_tensor(iib, y_in_frame, x_in_frame, 0),
src_tensor(iib, y_in_frame, x_in_frame, 1),
src_tensor(iib, y_in_frame, x_in_frame, 2),
1.f,
}, dest_surface, dest_x * 16, dest_y);
}
}
}
}
}
void launch_background_upload(
hipArray_t &dest_array, Tensor const &src_tensor,
int const dest_height, int const dest_width,
Eigen::GpuDevice const &device
) {
hipResourceDesc dest_resource_descriptor;
dest_resource_descriptor.resType = hipResourceTypeArray;
dest_resource_descriptor.res.array.array = dest_array;
hipSurfaceObject_t dest_surface;
if (auto const err = hipCreateSurfaceObject(&dest_surface, &dest_resource_descriptor))
LOG(FATAL) << "hipCreateSurfaceObject failed: " << hipGetErrorName(err);
auto const config = GetCuda2DLaunchConfig(dest_width, dest_height, device);
auto const src = src_tensor.tensor<float, 4>();
hipLaunchKernelGGL(( upload_background), dim3(config.block_count), dim3(config.thread_per_block), 0, device.stream(),
dest_surface,
src,
dest_width / src_tensor.dim_size(2),
config.virtual_thread_count
);
if (auto const err = hipDestroySurfaceObject(dest_surface))
LOG(FATAL) << "hipDestroySurfaceObject failed: " << hipGetErrorName(err);
}
__global__ void download_pixels(TTypes<float, 4>::Tensor pixels, hipSurfaceObject_t const src_surface, int const frames_per_row, dim3 const total_threads)
{
auto const batch_size = pixels.dimension(0);
auto const frame_height = pixels.dimension(1);
auto const frame_width = pixels.dimension(2);
auto const channels = pixels.dimension(3);
CUDA_AXIS_KERNEL_LOOP(src_x, total_threads, x) {
CUDA_AXIS_KERNEL_LOOP(src_y, total_threads, y) {
auto const iib = src_y / frame_height * frames_per_row + src_x / frame_width;
if (iib < batch_size) {
auto const pixel = surf2Dread<float4>(src_surface, src_x * 16, src_y); // *16 is required because surface-loads use byte addressing (!)
auto const x_in_frame = src_x % frame_width;
auto const y_in_frame = frame_height - 1 - src_y % frame_height; // the vertical flip ensures that our images are top-row-first, as in tensorflow
if (channels == 1) {
pixels(iib, y_in_frame, x_in_frame, 0) = pixel.x;
} else if (channels == 3) {
pixels(iib, y_in_frame, x_in_frame, 0) = pixel.x;
pixels(iib, y_in_frame, x_in_frame, 1) = pixel.y;
pixels(iib, y_in_frame, x_in_frame, 2) = pixel.z;
}
}
}
}
}
void launch_pixels_download(
Tensor &dest_tensor, hipArray_t const &src_array,
int const src_height, int const src_width,
Eigen::GpuDevice const &device
) {
hipResourceDesc src_resource_descriptor;
src_resource_descriptor.resType = hipResourceTypeArray;
src_resource_descriptor.res.array.array = src_array;
hipSurfaceObject_t src_surface;
if (auto const err = hipCreateSurfaceObject(&src_surface, &src_resource_descriptor))
LOG(FATAL) << "hipCreateSurfaceObject failed: " << hipGetErrorName(err);
auto const config = GetCuda2DLaunchConfig(src_width, src_height, device);
auto dest = dest_tensor.tensor<float, 4>();
hipLaunchKernelGGL(( download_pixels), dim3(config.block_count), dim3(config.thread_per_block), 0, device.stream(),
dest,
src_surface,
src_width / dest_tensor.dim_size(2),
config.virtual_thread_count
);
if (auto const err = hipDestroySurfaceObject(src_surface))
LOG(FATAL) << "hipDestroySurfaceObject failed: " << hipGetErrorName(err);
}
| f9f4c2ddc3dc700c459e7bcad45996958eeee0a1.cu |
#define GOOGLE_CUDA 1
#define EIGEN_USE_GPU
#include <tensorflow/core/framework/tensor.h>
#include "tf_cuda_utils.h"
using namespace tensorflow;
__global__ void upload_background(cudaSurfaceObject_t dest_surface, TTypes<float, 4>::ConstTensor const src_tensor, int const frames_per_row, dim3 const total_threads)
{
auto const batch_size = src_tensor.dimension(0);
auto const frame_height = src_tensor.dimension(1);
auto const frame_width = src_tensor.dimension(2);
auto const channels = src_tensor.dimension(3);
CUDA_AXIS_KERNEL_LOOP(dest_x, total_threads, x) {
CUDA_AXIS_KERNEL_LOOP(dest_y, total_threads, y) {
auto const iib = dest_y / frame_height * frames_per_row + dest_x / frame_width;
if (iib < batch_size) {
auto const x_in_frame = dest_x % frame_width;
auto const y_in_frame = frame_height - 1 - dest_y % frame_height; // the vertical flip ensures that our images are top-row-first, as in tensorflow
if (channels == 1) {
auto const &value = src_tensor(iib, y_in_frame, x_in_frame, 0);
surf2Dwrite(float4{value, value, value, 1.f}, dest_surface, dest_x * 16, dest_y); // *16 is required because surface-writes use byte addressing (!)
} else if (channels == 3) {
surf2Dwrite(float4{
src_tensor(iib, y_in_frame, x_in_frame, 0),
src_tensor(iib, y_in_frame, x_in_frame, 1),
src_tensor(iib, y_in_frame, x_in_frame, 2),
1.f,
}, dest_surface, dest_x * 16, dest_y);
}
}
}
}
}
void launch_background_upload(
cudaArray_t &dest_array, Tensor const &src_tensor,
int const dest_height, int const dest_width,
Eigen::GpuDevice const &device
) {
cudaResourceDesc dest_resource_descriptor;
dest_resource_descriptor.resType = cudaResourceTypeArray;
dest_resource_descriptor.res.array.array = dest_array;
cudaSurfaceObject_t dest_surface;
if (auto const err = cudaCreateSurfaceObject(&dest_surface, &dest_resource_descriptor))
LOG(FATAL) << "cudaCreateSurfaceObject failed: " << cudaGetErrorName(err);
auto const config = GetCuda2DLaunchConfig(dest_width, dest_height, device);
auto const src = src_tensor.tensor<float, 4>();
upload_background<<<config.block_count, config.thread_per_block, 0, device.stream()>>>(
dest_surface,
src,
dest_width / src_tensor.dim_size(2),
config.virtual_thread_count
);
if (auto const err = cudaDestroySurfaceObject(dest_surface))
LOG(FATAL) << "cudaDestroySurfaceObject failed: " << cudaGetErrorName(err);
}
__global__ void download_pixels(TTypes<float, 4>::Tensor pixels, cudaSurfaceObject_t const src_surface, int const frames_per_row, dim3 const total_threads)
{
auto const batch_size = pixels.dimension(0);
auto const frame_height = pixels.dimension(1);
auto const frame_width = pixels.dimension(2);
auto const channels = pixels.dimension(3);
CUDA_AXIS_KERNEL_LOOP(src_x, total_threads, x) {
CUDA_AXIS_KERNEL_LOOP(src_y, total_threads, y) {
auto const iib = src_y / frame_height * frames_per_row + src_x / frame_width;
if (iib < batch_size) {
auto const pixel = surf2Dread<float4>(src_surface, src_x * 16, src_y); // *16 is required because surface-loads use byte addressing (!)
auto const x_in_frame = src_x % frame_width;
auto const y_in_frame = frame_height - 1 - src_y % frame_height; // the vertical flip ensures that our images are top-row-first, as in tensorflow
if (channels == 1) {
pixels(iib, y_in_frame, x_in_frame, 0) = pixel.x;
} else if (channels == 3) {
pixels(iib, y_in_frame, x_in_frame, 0) = pixel.x;
pixels(iib, y_in_frame, x_in_frame, 1) = pixel.y;
pixels(iib, y_in_frame, x_in_frame, 2) = pixel.z;
}
}
}
}
}
void launch_pixels_download(
Tensor &dest_tensor, cudaArray_t const &src_array,
int const src_height, int const src_width,
Eigen::GpuDevice const &device
) {
cudaResourceDesc src_resource_descriptor;
src_resource_descriptor.resType = cudaResourceTypeArray;
src_resource_descriptor.res.array.array = src_array;
cudaSurfaceObject_t src_surface;
if (auto const err = cudaCreateSurfaceObject(&src_surface, &src_resource_descriptor))
LOG(FATAL) << "cudaCreateSurfaceObject failed: " << cudaGetErrorName(err);
auto const config = GetCuda2DLaunchConfig(src_width, src_height, device);
auto dest = dest_tensor.tensor<float, 4>();
download_pixels<<<config.block_count, config.thread_per_block, 0, device.stream()>>>(
dest,
src_surface,
src_width / dest_tensor.dim_size(2),
config.virtual_thread_count
);
if (auto const err = cudaDestroySurfaceObject(src_surface))
LOG(FATAL) << "cudaDestroySurfaceObject failed: " << cudaGetErrorName(err);
}
|
135d6d23c1273ae59f411dc7cc08157ae6212d08.hip | // !!! This is a file automatically generated by hipify!!!
#include <ATen/Dispatch.h>
#include <ATen/NativeFunctions.h>
#include <ATen/AccumulateType.h>
#include <ATen/hip/Exceptions.h>
#include <ATen/hip/HIPContext.h>
#include <cmath>
#include <limits>
#include <ATen/native/hip/Loops.cuh>
namespace at {
namespace native {
Tensor& linspace_cuda_out(Tensor& result, Scalar start, Scalar end, int64_t steps) {
TORCH_CHECK(steps >= 0, "number of steps must be non-negative");
if (result.numel() != steps) {
result.resize_({steps});
}
// Using TensorIter, output no longer need to be contiguous
// We still need to check if there is internal overlap
// YES: error out, TOO_HARD: fallback to copy behavior, NO: use result directly
auto overlap = has_internal_overlap(result);
TORCH_CHECK(overlap != MemOverlap::YES,
"unsupported operation: more than one element of the written-to tensor "
"refers to a single memory location. Please clone() the tensor before "
"performing the operation.");
Tensor r = (overlap == MemOverlap::TOO_HARD) ? at::empty_like(result, LEGACY_CONTIGUOUS_MEMORY_FORMAT) : result;
if (steps == 0) {
// skip
} else if (steps == 1) {
r.fill_(start);
} else if (isIntegralType(r.scalar_type(), 0)) {
AT_DISPATCH_INTEGRAL_TYPES(r.scalar_type(), "linspace_cuda", [&]() {
scalar_t scalar_start = start.to<scalar_t>();
scalar_t scalar_end = end.to<scalar_t>();
float step = static_cast<float>(scalar_end - scalar_start) / (steps - 1);
auto iter = TensorIterator::nullary_op(r);
gpu_kernel_with_index(iter, [scalar_start, step]GPU_LAMBDA(int ind) -> scalar_t {
scalar_t val = scalar_start + step * ind;
return val;
});
});
} else {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(r.scalar_type(), "linspace_cuda", [&]() {
scalar_t scalar_start = start.to<scalar_t>();
scalar_t scalar_end = end.to<scalar_t>();
scalar_t step = (scalar_end - scalar_start) / static_cast<scalar_t>(steps - 1);
auto iter = TensorIterator::nullary_op(r);
gpu_kernel_with_index(iter, [scalar_start, step]GPU_LAMBDA(int ind) -> scalar_t {
scalar_t val = scalar_start + step * ind;
return val;
});
});
}
if(overlap == MemOverlap::TOO_HARD) {
result.copy_(r);
}
AT_CUDA_CHECK(hipGetLastError());
return result;
}
Tensor& logspace_cuda_out(Tensor& result, Scalar start, Scalar end, int64_t steps, double base) {
TORCH_CHECK(steps >= 0, "number of steps must be non-negative");
if (result.numel() != steps) {
result.resize_({steps});
}
// Using TensorIter, output no longer need to be contiguous
// We still need to check if there is internal overlap
// YES: error out, TOO_HARD: fallback to copy behavior, NO: use result directly
auto overlap = has_internal_overlap(result);
TORCH_CHECK(overlap != MemOverlap::YES,
"unsupported operation: more than one element of the written-to tensor "
"refers to a single memory location. Please clone() the tensor before "
"performing the operation.");
Tensor r = (overlap == MemOverlap::TOO_HARD) ? at::empty_like(result, LEGACY_CONTIGUOUS_MEMORY_FORMAT) : result;
if (steps == 0) {
// skip
} else if (steps == 1) {
r.fill_(::pow(base, start.to<double>()));
} else {
AT_DISPATCH_ALL_TYPES_AND(at::ScalarType::Half, r.scalar_type(), "logspace_cuda", [&]() {
scalar_t scalar_base = static_cast<scalar_t>(base);
scalar_t scalar_start = start.to<scalar_t>();
scalar_t scalar_end = end.to<scalar_t>();
scalar_t step = (scalar_end - scalar_start) / static_cast<scalar_t>(steps - 1);
auto iter = TensorIterator::nullary_op(r);
gpu_kernel_with_index(iter, [scalar_start, step, scalar_base]GPU_LAMBDA(int ind) -> scalar_t {
scalar_t inc = step * ind;
scalar_t val = ::pow(scalar_base, scalar_start + inc);
return val;
});
});
}
if(overlap == MemOverlap::TOO_HARD) {
result.copy_(r);
}
AT_CUDA_CHECK(hipGetLastError());
return result;
}
Tensor& range_cuda_out(Tensor& result, Scalar start, Scalar end, Scalar step) {
AT_DISPATCH_ALL_TYPES_AND(at::ScalarType::Half, result.scalar_type(), "range_cuda", [&]() {
using accscalar_t = at::acc_type<scalar_t, true>;
auto xstart = start.to<accscalar_t>();
auto xend = end.to<accscalar_t>();
auto xstep = step.to<accscalar_t>();
TORCH_CHECK(xstep > 0 || xstep < 0, "step must be nonzero");
TORCH_CHECK(std::isfinite(static_cast<double>(xstart)) &&
std::isfinite(static_cast<double>(xend)),
"unsupported range: ", xstart, " -> ", xend);
TORCH_CHECK(((xstep > 0) && (xend >= xstart)) || ((xstep < 0) && (xend <= xstart)),
"upper bound and larger bound inconsistent with step sign");
int64_t size = static_cast<int64_t>(((xend - xstart) / xstep) + 1);
if (result.numel() != size) {
result.resize_({size});
}
// Using TensorIter, output no longer need to be contiguous
// We still need to check if there is internal overlap
// YES: error out, TOO_HARD: fallback to copy behavior, NO: use result directly
auto overlap = has_internal_overlap(result);
TORCH_CHECK(overlap != MemOverlap::YES,
"unsupported operation: more than one element of the written-to tensor "
"refers to a single memory location. Please clone() the tensor before "
"performing the operation.");
Tensor r = (overlap == MemOverlap::TOO_HARD) ? at::empty_like(result, LEGACY_CONTIGUOUS_MEMORY_FORMAT) : result;
auto iter = TensorIterator::nullary_op(r);
gpu_kernel_with_index(iter, [xstart, xstep]GPU_LAMBDA(int ind) -> scalar_t {
accscalar_t inc = xstep * static_cast<accscalar_t>(ind);
accscalar_t val = xstart + inc;
return static_cast<scalar_t>(val);
});
if(overlap == MemOverlap::TOO_HARD) {
result.copy_(r);
}
});
AT_CUDA_CHECK(hipGetLastError());
return result;
}
Tensor& arange_cuda_out(Tensor& result, Scalar start, Scalar end, Scalar step) {
AT_DISPATCH_ALL_TYPES_AND(at::ScalarType::Half, result.scalar_type(), "arange_cuda", [&]() {
using accscalar_t = at::acc_type<scalar_t, true>;
auto xstart = start.to<accscalar_t>();
auto xend = end.to<accscalar_t>();
auto xstep = step.to<accscalar_t>();
// we use double precision for (start - end) / step
// to compute size_d for consistency across devices.
// The problem with using accscalar_t is that accscalar_t might be float32 on gpu for a float32 scalar_t,
// but double on cpu for the same,
// and the effective output size starts differing on CPU vs GPU because of precision issues, which
// we dont want.
// the corner-case we do want to take into account is int64_t, which has higher precision than double
double size_d;
if (std::is_same<scalar_t, int64_t>::value) {
size_d = ::ceil(static_cast<double>(end.to<accscalar_t>() - start.to<accscalar_t>())
/ step.to<accscalar_t>());
} else {
size_d = ::ceil(static_cast<double>(end.to<double>() - start.to<double>())
/ step.to<double>());
}
TORCH_CHECK(xstep > 0 || xstep < 0, "step must be nonzero");
TORCH_CHECK(std::isfinite(static_cast<double>(xstart)) &&
std::isfinite(static_cast<double>(xend)),
"unsupported range: ", xstart, " -> ", xend);
TORCH_CHECK(((xstep > 0) && (xend >= xstart)) || ((xstep < 0) && (xend <= xstart)),
"upper bound and larger bound inconsistent with step sign");
TORCH_CHECK(size_d >= 0 && size_d <= static_cast<double>(std::numeric_limits<int64_t>::max()),
"invalid size, possible overflow?");
int64_t size = static_cast<int64_t>(size_d);
int64_t numel = result.numel();
if (numel != size) {
if(numel > 0){
TORCH_WARN("The number of elements in the out tensor of shape ", result.sizes(),
" is ", numel, " which does not match the computed number of elements ", size,
". Note that this may occur as a result of rounding error. "
"The out tensor will be resized to a tensor of shape (", size, ",).");
}
result.resize_({size});
}
// Using TensorIter, output no longer need to be contiguous
// We still need to check if there is internal overlap
// YES: error out, TOO_HARD: fallback to copy behavior, NO: use result directly
auto overlap = has_internal_overlap(result);
TORCH_CHECK(overlap != MemOverlap::YES,
"unsupported operation: more than one element of the written-to tensor "
"refers to a single memory location. Please clone() the tensor before "
"performing the operation.");
Tensor r = (overlap == MemOverlap::TOO_HARD) ? at::empty_like(result, LEGACY_CONTIGUOUS_MEMORY_FORMAT) : result;
auto iter = TensorIterator::nullary_op(r);
gpu_kernel_with_index(iter, [xstart, xstep]GPU_LAMBDA(int ind) -> scalar_t {
accscalar_t inc = xstep * static_cast<accscalar_t>(ind);
accscalar_t val = xstart + inc;
return static_cast<scalar_t>(val);
});
if(overlap == MemOverlap::TOO_HARD) {
result.copy_(r);
}
});
AT_CUDA_CHECK(hipGetLastError());
return result;
}
}} // namespace at::native
| 135d6d23c1273ae59f411dc7cc08157ae6212d08.cu | #include <ATen/Dispatch.h>
#include <ATen/NativeFunctions.h>
#include <ATen/AccumulateType.h>
#include <ATen/cuda/Exceptions.h>
#include <ATen/cuda/CUDAContext.h>
#include <cmath>
#include <limits>
#include <ATen/native/cuda/Loops.cuh>
namespace at {
namespace native {
Tensor& linspace_cuda_out(Tensor& result, Scalar start, Scalar end, int64_t steps) {
TORCH_CHECK(steps >= 0, "number of steps must be non-negative");
if (result.numel() != steps) {
result.resize_({steps});
}
// Using TensorIter, output no longer need to be contiguous
// We still need to check if there is internal overlap
// YES: error out, TOO_HARD: fallback to copy behavior, NO: use result directly
auto overlap = has_internal_overlap(result);
TORCH_CHECK(overlap != MemOverlap::YES,
"unsupported operation: more than one element of the written-to tensor "
"refers to a single memory location. Please clone() the tensor before "
"performing the operation.");
Tensor r = (overlap == MemOverlap::TOO_HARD) ? at::empty_like(result, LEGACY_CONTIGUOUS_MEMORY_FORMAT) : result;
if (steps == 0) {
// skip
} else if (steps == 1) {
r.fill_(start);
} else if (isIntegralType(r.scalar_type(), 0)) {
AT_DISPATCH_INTEGRAL_TYPES(r.scalar_type(), "linspace_cuda", [&]() {
scalar_t scalar_start = start.to<scalar_t>();
scalar_t scalar_end = end.to<scalar_t>();
float step = static_cast<float>(scalar_end - scalar_start) / (steps - 1);
auto iter = TensorIterator::nullary_op(r);
gpu_kernel_with_index(iter, [scalar_start, step]GPU_LAMBDA(int ind) -> scalar_t {
scalar_t val = scalar_start + step * ind;
return val;
});
});
} else {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(r.scalar_type(), "linspace_cuda", [&]() {
scalar_t scalar_start = start.to<scalar_t>();
scalar_t scalar_end = end.to<scalar_t>();
scalar_t step = (scalar_end - scalar_start) / static_cast<scalar_t>(steps - 1);
auto iter = TensorIterator::nullary_op(r);
gpu_kernel_with_index(iter, [scalar_start, step]GPU_LAMBDA(int ind) -> scalar_t {
scalar_t val = scalar_start + step * ind;
return val;
});
});
}
if(overlap == MemOverlap::TOO_HARD) {
result.copy_(r);
}
AT_CUDA_CHECK(cudaGetLastError());
return result;
}
Tensor& logspace_cuda_out(Tensor& result, Scalar start, Scalar end, int64_t steps, double base) {
TORCH_CHECK(steps >= 0, "number of steps must be non-negative");
if (result.numel() != steps) {
result.resize_({steps});
}
// Using TensorIter, output no longer need to be contiguous
// We still need to check if there is internal overlap
// YES: error out, TOO_HARD: fallback to copy behavior, NO: use result directly
auto overlap = has_internal_overlap(result);
TORCH_CHECK(overlap != MemOverlap::YES,
"unsupported operation: more than one element of the written-to tensor "
"refers to a single memory location. Please clone() the tensor before "
"performing the operation.");
Tensor r = (overlap == MemOverlap::TOO_HARD) ? at::empty_like(result, LEGACY_CONTIGUOUS_MEMORY_FORMAT) : result;
if (steps == 0) {
// skip
} else if (steps == 1) {
r.fill_(std::pow(base, start.to<double>()));
} else {
AT_DISPATCH_ALL_TYPES_AND(at::ScalarType::Half, r.scalar_type(), "logspace_cuda", [&]() {
scalar_t scalar_base = static_cast<scalar_t>(base);
scalar_t scalar_start = start.to<scalar_t>();
scalar_t scalar_end = end.to<scalar_t>();
scalar_t step = (scalar_end - scalar_start) / static_cast<scalar_t>(steps - 1);
auto iter = TensorIterator::nullary_op(r);
gpu_kernel_with_index(iter, [scalar_start, step, scalar_base]GPU_LAMBDA(int ind) -> scalar_t {
scalar_t inc = step * ind;
scalar_t val = std::pow(scalar_base, scalar_start + inc);
return val;
});
});
}
if(overlap == MemOverlap::TOO_HARD) {
result.copy_(r);
}
AT_CUDA_CHECK(cudaGetLastError());
return result;
}
Tensor& range_cuda_out(Tensor& result, Scalar start, Scalar end, Scalar step) {
AT_DISPATCH_ALL_TYPES_AND(at::ScalarType::Half, result.scalar_type(), "range_cuda", [&]() {
using accscalar_t = at::acc_type<scalar_t, true>;
auto xstart = start.to<accscalar_t>();
auto xend = end.to<accscalar_t>();
auto xstep = step.to<accscalar_t>();
TORCH_CHECK(xstep > 0 || xstep < 0, "step must be nonzero");
TORCH_CHECK(std::isfinite(static_cast<double>(xstart)) &&
std::isfinite(static_cast<double>(xend)),
"unsupported range: ", xstart, " -> ", xend);
TORCH_CHECK(((xstep > 0) && (xend >= xstart)) || ((xstep < 0) && (xend <= xstart)),
"upper bound and larger bound inconsistent with step sign");
int64_t size = static_cast<int64_t>(((xend - xstart) / xstep) + 1);
if (result.numel() != size) {
result.resize_({size});
}
// Using TensorIter, output no longer need to be contiguous
// We still need to check if there is internal overlap
// YES: error out, TOO_HARD: fallback to copy behavior, NO: use result directly
auto overlap = has_internal_overlap(result);
TORCH_CHECK(overlap != MemOverlap::YES,
"unsupported operation: more than one element of the written-to tensor "
"refers to a single memory location. Please clone() the tensor before "
"performing the operation.");
Tensor r = (overlap == MemOverlap::TOO_HARD) ? at::empty_like(result, LEGACY_CONTIGUOUS_MEMORY_FORMAT) : result;
auto iter = TensorIterator::nullary_op(r);
gpu_kernel_with_index(iter, [xstart, xstep]GPU_LAMBDA(int ind) -> scalar_t {
accscalar_t inc = xstep * static_cast<accscalar_t>(ind);
accscalar_t val = xstart + inc;
return static_cast<scalar_t>(val);
});
if(overlap == MemOverlap::TOO_HARD) {
result.copy_(r);
}
});
AT_CUDA_CHECK(cudaGetLastError());
return result;
}
Tensor& arange_cuda_out(Tensor& result, Scalar start, Scalar end, Scalar step) {
AT_DISPATCH_ALL_TYPES_AND(at::ScalarType::Half, result.scalar_type(), "arange_cuda", [&]() {
using accscalar_t = at::acc_type<scalar_t, true>;
auto xstart = start.to<accscalar_t>();
auto xend = end.to<accscalar_t>();
auto xstep = step.to<accscalar_t>();
// we use double precision for (start - end) / step
// to compute size_d for consistency across devices.
// The problem with using accscalar_t is that accscalar_t might be float32 on gpu for a float32 scalar_t,
// but double on cpu for the same,
// and the effective output size starts differing on CPU vs GPU because of precision issues, which
// we dont want.
// the corner-case we do want to take into account is int64_t, which has higher precision than double
double size_d;
if (std::is_same<scalar_t, int64_t>::value) {
size_d = std::ceil(static_cast<double>(end.to<accscalar_t>() - start.to<accscalar_t>())
/ step.to<accscalar_t>());
} else {
size_d = std::ceil(static_cast<double>(end.to<double>() - start.to<double>())
/ step.to<double>());
}
TORCH_CHECK(xstep > 0 || xstep < 0, "step must be nonzero");
TORCH_CHECK(std::isfinite(static_cast<double>(xstart)) &&
std::isfinite(static_cast<double>(xend)),
"unsupported range: ", xstart, " -> ", xend);
TORCH_CHECK(((xstep > 0) && (xend >= xstart)) || ((xstep < 0) && (xend <= xstart)),
"upper bound and larger bound inconsistent with step sign");
TORCH_CHECK(size_d >= 0 && size_d <= static_cast<double>(std::numeric_limits<int64_t>::max()),
"invalid size, possible overflow?");
int64_t size = static_cast<int64_t>(size_d);
int64_t numel = result.numel();
if (numel != size) {
if(numel > 0){
TORCH_WARN("The number of elements in the out tensor of shape ", result.sizes(),
" is ", numel, " which does not match the computed number of elements ", size,
". Note that this may occur as a result of rounding error. "
"The out tensor will be resized to a tensor of shape (", size, ",).");
}
result.resize_({size});
}
// Using TensorIter, output no longer need to be contiguous
// We still need to check if there is internal overlap
// YES: error out, TOO_HARD: fallback to copy behavior, NO: use result directly
auto overlap = has_internal_overlap(result);
TORCH_CHECK(overlap != MemOverlap::YES,
"unsupported operation: more than one element of the written-to tensor "
"refers to a single memory location. Please clone() the tensor before "
"performing the operation.");
Tensor r = (overlap == MemOverlap::TOO_HARD) ? at::empty_like(result, LEGACY_CONTIGUOUS_MEMORY_FORMAT) : result;
auto iter = TensorIterator::nullary_op(r);
gpu_kernel_with_index(iter, [xstart, xstep]GPU_LAMBDA(int ind) -> scalar_t {
accscalar_t inc = xstep * static_cast<accscalar_t>(ind);
accscalar_t val = xstart + inc;
return static_cast<scalar_t>(val);
});
if(overlap == MemOverlap::TOO_HARD) {
result.copy_(r);
}
});
AT_CUDA_CHECK(cudaGetLastError());
return result;
}
}} // namespace at::native
|
292b0d5beeee90eef4e74adfbccc2ed48635666f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Authored by: Chen, Shifu
*
* Email: [email protected] / [email protected]
*
* The code is distributed under BSD license, you are allowed to use, modify or sell this code, but a statement is required if you used this code any where.
*
*/
#include <stdio.h>
#include <stdlib.h>
#include <cutil.h>
#include "hip/hip_vector_types.h"
#include "bbsort.cuh"
#include "bbsort_kernel.cu"
float cmpKernel(float4 v){
return v.w;
}
bool assignSliceToBuckets(unsigned int* sliceCount,int sliceSize,unsigned int* bucketOffset,unsigned int* bucketOfSlice,unsigned int* bucketSizes,unsigned int* sliceOffsetInBucket,int& bucketsCount,float step)
{
int i=0;
bool overflow=false;
int tmpSum=0;
bucketOffset[0]=0;
for(i=0;i<sliceSize; i++){
if(sliceCount[i] >BLOCK_SIZE)
{
overflow=true;
}
tmpSum += sliceCount[i];
bucketOfSlice[i]=bucketsCount;
bucketSizes[bucketsCount] = tmpSum;
sliceOffsetInBucket[i]=tmpSum -sliceCount[i];
if(tmpSum > BLOCK_SIZE )
{
if(i != 0)
{
bucketOfSlice[i]=bucketsCount+1;
bucketSizes[bucketsCount] -= sliceCount[i];
sliceOffsetInBucket[i]=0;
bucketOffset[bucketsCount+1]=bucketOffset[bucketsCount] + tmpSum - sliceCount[i];
bucketsCount++;
tmpSum=sliceCount[i];
bucketSizes[bucketsCount] = tmpSum;
}
else
{
bucketOffset[bucketsCount+1]=bucketOffset[bucketsCount] + tmpSum ;
sliceOffsetInBucket[i]=0;
tmpSum=0;
bucketsCount++;
}
}
}
bucketsCount++;
return overflow;
}
void reduceMinMax(float4* dData,int size,float& result,bool isMax)
{
int step;
step=(size%2==0)?
(size/2):(size/2 +1);
int blockSize=BLOCK_SIZE;
int blockCount;
int length=size;
float4 float4result;
while(step > 0)
{
if(step%BLOCK_SIZE==0)
blockCount=step/BLOCK_SIZE;
else
blockCount=step/BLOCK_SIZE+1;
if(isMax)
hipLaunchKernelGGL(( reduceMaxD), dim3(blockCount),dim3(blockSize), 0, 0, dData,step,length);
else
hipLaunchKernelGGL(( reduceMinD), dim3(blockCount),dim3(blockSize), 0, 0, dData,step,length);
length=step;
step=(step%2==0 || step==1)?(step/2):(step/2 +1);
}
CUDA_SAFE_CALL(hipMemcpy(&float4result, dData, sizeof(float4), hipMemcpyDeviceToHost));
result=cmpKernel(float4result);
}
void evaluateDisorder(float4* dData,int size,float maxValue, float minValue, int& listOrder)
{
int blockCount;
if((size-1) % BLOCK_SIZE ==0)blockCount=size/BLOCK_SIZE;
else blockCount=size/BLOCK_SIZE+1;
float* dDiffData;
CUDA_SAFE_CALL(hipMalloc((void**)&dDiffData, sizeof(float) * size));
hipLaunchKernelGGL(( calDifferenceD), dim3(blockCount),dim3(BLOCK_SIZE),(BLOCK_SIZE)*sizeof(float4), 0, dData,dDiffData,size);
float sum=0;
int step;
step=(size%2==0)?
(size/2):(size/2 +1);
int blockSize=BLOCK_SIZE;
int length=size;
while(step > 0)
{
if(step%BLOCK_SIZE==0)
blockCount=step/BLOCK_SIZE;
else
blockCount=step/BLOCK_SIZE+1;
hipLaunchKernelGGL(( reduceSumD), dim3(blockCount),dim3(blockSize), 0, 0, dDiffData,step,length);
length=step;
step=(step%2==0 || step==1)?(step/2):(step/2 +1);
}
CUDA_SAFE_CALL(hipMemcpy(&sum, dDiffData, sizeof(float), hipMemcpyDeviceToHost));
if( sum < (maxValue - minValue) * size / 10)
listOrder=NEARLY_SORTED;
else
listOrder=DISORDERLY;
CUDA_SAFE_CALL(hipFree(dDiffData));
}
void bbSort(float4* dData,int size,int listOrder)
{
unsigned int timer;
CUT_SAFE_CALL(cutCreateTimer(&timer));
//CUDA_SAFE_CALL( hipDeviceSynchronize() );
//CUT_SAFE_CALL(cutStartTimer(timer));
float minValue,maxValue;
float4* dTmpData;
CUDA_SAFE_CALL(hipMalloc((void**)&dTmpData, sizeof(float4) * size));
CUDA_SAFE_CALL(hipMemcpy(dTmpData, dData, sizeof(float4) * size, hipMemcpyDeviceToDevice));
reduceMinMax(dTmpData,size,maxValue,true);
CUDA_SAFE_CALL(hipMemcpy(dTmpData, dData, sizeof(float4) * size, hipMemcpyDeviceToDevice));
reduceMinMax(dTmpData,size,minValue,false);
//CUDA_SAFE_CALL( hipDeviceSynchronize() );
//CUT_SAFE_CALL(cutStopTimer(timer));
//float t = cutGetAverageTimerValue(timer);
//printf( "Max min used: %fms\n",t);
if(minValue == maxValue)
{
CUDA_SAFE_CALL(hipFree(dTmpData));
//printf(" flat, size is:%d\n",size);
return ;
}
if(listOrder == AUTO_EVALUATE )
{
evaluateDisorder(dData,size,maxValue,minValue,listOrder);
}
int blockCount;
if(size%BLOCK_SIZE==0)blockCount=size/BLOCK_SIZE;
else blockCount=size/BLOCK_SIZE+1;
float sliceStep =(float)(50.0*((double)(maxValue-minValue)/(double)size));
int sliceSize = (int)((maxValue-minValue)/sliceStep) + 10;
unsigned int* dSliceCounts;
unsigned int* dOffsetInSlice;
CUDA_SAFE_CALL(hipMalloc((void**)&dOffsetInSlice, sizeof(unsigned int) * size));
CUDA_SAFE_CALL(hipMalloc((void**)&dSliceCounts, sizeof(unsigned int) * sliceSize));
CUDA_SAFE_CALL(hipMemset(dSliceCounts,0, sizeof(int) * sliceSize));
//CUDA_SAFE_CALL( hipDeviceSynchronize() );
//CUT_SAFE_CALL(cutResetTimer(timer));
if(listOrder == NEARLY_SORTED)
{
hipLaunchKernelGGL(( assignElementToSlicesNearlySortedD), dim3(blockCount), dim3(BLOCK_SIZE), 0, 0, dData,size,dSliceCounts,dOffsetInSlice,minValue,sliceStep,sliceSize,blockCount);
//printf("NEARLY SORTED\n");
}
else
hipLaunchKernelGGL(( assignElementToSlicesD), dim3(blockCount), dim3(BLOCK_SIZE), 0, 0, dData,size,dSliceCounts,dOffsetInSlice,minValue,sliceStep,sliceSize);
//CUDA_SAFE_CALL( hipDeviceSynchronize() );
//CUT_SAFE_CALL(cutStopTimer(timer));
//t = cutGetAverageTimerValue(timer);
//printf( "slice dvision used: %fms\n",t);
unsigned int* hSliceCounts=new unsigned int[sliceSize];
CUDA_SAFE_CALL(hipMemcpy(hSliceCounts, dSliceCounts, sizeof(unsigned int) * sliceSize, hipMemcpyDeviceToHost));
int looseBucketSize=size/100;
unsigned int* hBucketOffsets=new unsigned int[looseBucketSize];
unsigned int* hBucketSizes=new unsigned int[looseBucketSize];
unsigned int* hBucketOfSlices=new unsigned int[sliceSize];
unsigned int* hSliceOffsetInBucket=new unsigned int[sliceSize];
int bucketsCount=0;
memset(hBucketSizes,0,sizeof(int) * looseBucketSize);
memset(hSliceOffsetInBucket,0,sizeof(unsigned int) * sliceSize);
bool overflow;
overflow = assignSliceToBuckets(hSliceCounts,sliceSize,hBucketOffsets,hBucketOfSlices,hBucketSizes,hSliceOffsetInBucket,bucketsCount,sliceStep);
unsigned int* dBucketOffsets;
unsigned int* dBucketSizes;
unsigned int* dBucketOfSlices;
unsigned int* dSliceOffsetInBucket;
CUDA_SAFE_CALL(hipMalloc((void**)&dBucketOfSlices, sizeof(unsigned int) * sliceSize));
CUDA_SAFE_CALL(hipMalloc((void**)&dSliceOffsetInBucket, sizeof(unsigned int) * sliceSize));
CUDA_SAFE_CALL(hipMalloc((void**)&dBucketOffsets, sizeof(unsigned int) * bucketsCount));
CUDA_SAFE_CALL(hipMalloc((void**)&dBucketSizes, sizeof(unsigned int) * bucketsCount));
CUDA_SAFE_CALL(hipMemcpy(dBucketOfSlices, hBucketOfSlices, sizeof(unsigned int) * sliceSize, hipMemcpyHostToDevice));
CUDA_SAFE_CALL(hipMemcpy(dSliceOffsetInBucket, hSliceOffsetInBucket, sizeof(unsigned int) * sliceSize, hipMemcpyHostToDevice));
CUDA_SAFE_CALL(hipMemcpy(dBucketOffsets, hBucketOffsets, sizeof(unsigned int) * bucketsCount, hipMemcpyHostToDevice));
CUDA_SAFE_CALL(hipMemcpy(dBucketSizes, hBucketSizes, sizeof(unsigned int) * bucketsCount, hipMemcpyHostToDevice));
hipBindTexture(0,tBucketOffsets,dBucketOffsets);
hipBindTexture(0,tBucketSizes,dBucketSizes);
hipBindTexture(0,tBucketOfSlices,dBucketOfSlices);
hipBindTexture(0,tSliceOffsetInBucket,dSliceOffsetInBucket);
//CUDA_SAFE_CALL( hipDeviceSynchronize() );
//CUT_SAFE_CALL(cutResetTimer(timer));
hipLaunchKernelGGL(( assignElementToBucketD), dim3(blockCount), dim3(BLOCK_SIZE), 0, 0, dData,dTmpData,size,dOffsetInSlice,minValue,sliceStep);
//CUDA_SAFE_CALL( hipDeviceSynchronize() );
//CUT_SAFE_CALL(cutStopTimer(timer));
//t = cutGetAverageTimerValue(timer);
//printf( "bucket dvision used: %fms\n",t);
CUDA_SAFE_CALL( hipDeviceSynchronize() );
hipLaunchKernelGGL(( bitonicSortD), dim3(bucketsCount), dim3(BLOCK_SIZE), sizeof(float4) * BLOCK_SIZE, 0, dTmpData);
CUDA_SAFE_CALL(hipMemcpy(dData, dTmpData, sizeof(float4) * size, hipMemcpyDeviceToDevice));
if(overflow){
for(int i=0;i<bucketsCount;i++)
{
if(hBucketSizes[i] > BLOCK_SIZE)
{
bbSort(dData + hBucketOffsets[i],hBucketSizes[i],listOrder);
}
}
}
delete hBucketOffsets;
delete hBucketOfSlices;
delete hSliceCounts;
delete hBucketSizes;
CUDA_SAFE_CALL(hipFree(dOffsetInSlice));
CUDA_SAFE_CALL(hipFree(dSliceCounts));
CUDA_SAFE_CALL(hipFree(dTmpData));
hipUnbindTexture( tBucketSizes );
CUDA_SAFE_CALL(hipFree(dBucketSizes));
hipUnbindTexture( tBucketOffsets );
CUDA_SAFE_CALL(hipFree(dBucketOffsets));
hipUnbindTexture( tBucketOfSlices );
CUDA_SAFE_CALL(hipFree(dBucketOfSlices));
hipUnbindTexture( tSliceOffsetInBucket );
CUDA_SAFE_CALL(hipFree(dSliceOffsetInBucket));
}
| 292b0d5beeee90eef4e74adfbccc2ed48635666f.cu | /*
* Authored by: Chen, Shifu
*
* Email: [email protected] / [email protected]
*
* The code is distributed under BSD license, you are allowed to use, modify or sell this code, but a statement is required if you used this code any where.
*
*/
#include <stdio.h>
#include <stdlib.h>
#include <cutil.h>
#include "vector_types.h"
#include "bbsort.cuh"
#include "bbsort_kernel.cu"
float cmpKernel(float4 v){
return v.w;
}
bool assignSliceToBuckets(unsigned int* sliceCount,int sliceSize,unsigned int* bucketOffset,unsigned int* bucketOfSlice,unsigned int* bucketSizes,unsigned int* sliceOffsetInBucket,int& bucketsCount,float step)
{
int i=0;
bool overflow=false;
int tmpSum=0;
bucketOffset[0]=0;
for(i=0;i<sliceSize; i++){
if(sliceCount[i] >BLOCK_SIZE)
{
overflow=true;
}
tmpSum += sliceCount[i];
bucketOfSlice[i]=bucketsCount;
bucketSizes[bucketsCount] = tmpSum;
sliceOffsetInBucket[i]=tmpSum -sliceCount[i];
if(tmpSum > BLOCK_SIZE )
{
if(i != 0)
{
bucketOfSlice[i]=bucketsCount+1;
bucketSizes[bucketsCount] -= sliceCount[i];
sliceOffsetInBucket[i]=0;
bucketOffset[bucketsCount+1]=bucketOffset[bucketsCount] + tmpSum - sliceCount[i];
bucketsCount++;
tmpSum=sliceCount[i];
bucketSizes[bucketsCount] = tmpSum;
}
else
{
bucketOffset[bucketsCount+1]=bucketOffset[bucketsCount] + tmpSum ;
sliceOffsetInBucket[i]=0;
tmpSum=0;
bucketsCount++;
}
}
}
bucketsCount++;
return overflow;
}
void reduceMinMax(float4* dData,int size,float& result,bool isMax)
{
int step;
step=(size%2==0)?
(size/2):(size/2 +1);
int blockSize=BLOCK_SIZE;
int blockCount;
int length=size;
float4 float4result;
while(step > 0)
{
if(step%BLOCK_SIZE==0)
blockCount=step/BLOCK_SIZE;
else
blockCount=step/BLOCK_SIZE+1;
if(isMax)
reduceMaxD<<<blockCount,blockSize>>>(dData,step,length);
else
reduceMinD<<<blockCount,blockSize>>>(dData,step,length);
length=step;
step=(step%2==0 || step==1)?(step/2):(step/2 +1);
}
CUDA_SAFE_CALL(cudaMemcpy(&float4result, dData, sizeof(float4), cudaMemcpyDeviceToHost));
result=cmpKernel(float4result);
}
void evaluateDisorder(float4* dData,int size,float maxValue, float minValue, int& listOrder)
{
int blockCount;
if((size-1) % BLOCK_SIZE ==0)blockCount=size/BLOCK_SIZE;
else blockCount=size/BLOCK_SIZE+1;
float* dDiffData;
CUDA_SAFE_CALL(cudaMalloc((void**)&dDiffData, sizeof(float) * size));
calDifferenceD<<<blockCount,BLOCK_SIZE,(BLOCK_SIZE)*sizeof(float4)>>>(dData,dDiffData,size);
float sum=0;
int step;
step=(size%2==0)?
(size/2):(size/2 +1);
int blockSize=BLOCK_SIZE;
int length=size;
while(step > 0)
{
if(step%BLOCK_SIZE==0)
blockCount=step/BLOCK_SIZE;
else
blockCount=step/BLOCK_SIZE+1;
reduceSumD<<<blockCount,blockSize>>>(dDiffData,step,length);
length=step;
step=(step%2==0 || step==1)?(step/2):(step/2 +1);
}
CUDA_SAFE_CALL(cudaMemcpy(&sum, dDiffData, sizeof(float), cudaMemcpyDeviceToHost));
if( sum < (maxValue - minValue) * size / 10)
listOrder=NEARLY_SORTED;
else
listOrder=DISORDERLY;
CUDA_SAFE_CALL(cudaFree(dDiffData));
}
void bbSort(float4* dData,int size,int listOrder)
{
unsigned int timer;
CUT_SAFE_CALL(cutCreateTimer(&timer));
//CUDA_SAFE_CALL( cudaThreadSynchronize() );
//CUT_SAFE_CALL(cutStartTimer(timer));
float minValue,maxValue;
float4* dTmpData;
CUDA_SAFE_CALL(cudaMalloc((void**)&dTmpData, sizeof(float4) * size));
CUDA_SAFE_CALL(cudaMemcpy(dTmpData, dData, sizeof(float4) * size, cudaMemcpyDeviceToDevice));
reduceMinMax(dTmpData,size,maxValue,true);
CUDA_SAFE_CALL(cudaMemcpy(dTmpData, dData, sizeof(float4) * size, cudaMemcpyDeviceToDevice));
reduceMinMax(dTmpData,size,minValue,false);
//CUDA_SAFE_CALL( cudaThreadSynchronize() );
//CUT_SAFE_CALL(cutStopTimer(timer));
//float t = cutGetAverageTimerValue(timer);
//printf( "Max min used: %fms\n",t);
if(minValue == maxValue)
{
CUDA_SAFE_CALL(cudaFree(dTmpData));
//printf(" flat, size is:%d\n",size);
return ;
}
if(listOrder == AUTO_EVALUATE )
{
evaluateDisorder(dData,size,maxValue,minValue,listOrder);
}
int blockCount;
if(size%BLOCK_SIZE==0)blockCount=size/BLOCK_SIZE;
else blockCount=size/BLOCK_SIZE+1;
float sliceStep =(float)(50.0*((double)(maxValue-minValue)/(double)size));
int sliceSize = (int)((maxValue-minValue)/sliceStep) + 10;
unsigned int* dSliceCounts;
unsigned int* dOffsetInSlice;
CUDA_SAFE_CALL(cudaMalloc((void**)&dOffsetInSlice, sizeof(unsigned int) * size));
CUDA_SAFE_CALL(cudaMalloc((void**)&dSliceCounts, sizeof(unsigned int) * sliceSize));
CUDA_SAFE_CALL(cudaMemset(dSliceCounts,0, sizeof(int) * sliceSize));
//CUDA_SAFE_CALL( cudaThreadSynchronize() );
//CUT_SAFE_CALL(cutResetTimer(timer));
if(listOrder == NEARLY_SORTED)
{
assignElementToSlicesNearlySortedD<<<blockCount, BLOCK_SIZE>>>(dData,size,dSliceCounts,dOffsetInSlice,minValue,sliceStep,sliceSize,blockCount);
//printf("NEARLY SORTED\n");
}
else
assignElementToSlicesD<<<blockCount, BLOCK_SIZE>>>(dData,size,dSliceCounts,dOffsetInSlice,minValue,sliceStep,sliceSize);
//CUDA_SAFE_CALL( cudaThreadSynchronize() );
//CUT_SAFE_CALL(cutStopTimer(timer));
//t = cutGetAverageTimerValue(timer);
//printf( "slice dvision used: %fms\n",t);
unsigned int* hSliceCounts=new unsigned int[sliceSize];
CUDA_SAFE_CALL(cudaMemcpy(hSliceCounts, dSliceCounts, sizeof(unsigned int) * sliceSize, cudaMemcpyDeviceToHost));
int looseBucketSize=size/100;
unsigned int* hBucketOffsets=new unsigned int[looseBucketSize];
unsigned int* hBucketSizes=new unsigned int[looseBucketSize];
unsigned int* hBucketOfSlices=new unsigned int[sliceSize];
unsigned int* hSliceOffsetInBucket=new unsigned int[sliceSize];
int bucketsCount=0;
memset(hBucketSizes,0,sizeof(int) * looseBucketSize);
memset(hSliceOffsetInBucket,0,sizeof(unsigned int) * sliceSize);
bool overflow;
overflow = assignSliceToBuckets(hSliceCounts,sliceSize,hBucketOffsets,hBucketOfSlices,hBucketSizes,hSliceOffsetInBucket,bucketsCount,sliceStep);
unsigned int* dBucketOffsets;
unsigned int* dBucketSizes;
unsigned int* dBucketOfSlices;
unsigned int* dSliceOffsetInBucket;
CUDA_SAFE_CALL(cudaMalloc((void**)&dBucketOfSlices, sizeof(unsigned int) * sliceSize));
CUDA_SAFE_CALL(cudaMalloc((void**)&dSliceOffsetInBucket, sizeof(unsigned int) * sliceSize));
CUDA_SAFE_CALL(cudaMalloc((void**)&dBucketOffsets, sizeof(unsigned int) * bucketsCount));
CUDA_SAFE_CALL(cudaMalloc((void**)&dBucketSizes, sizeof(unsigned int) * bucketsCount));
CUDA_SAFE_CALL(cudaMemcpy(dBucketOfSlices, hBucketOfSlices, sizeof(unsigned int) * sliceSize, cudaMemcpyHostToDevice));
CUDA_SAFE_CALL(cudaMemcpy(dSliceOffsetInBucket, hSliceOffsetInBucket, sizeof(unsigned int) * sliceSize, cudaMemcpyHostToDevice));
CUDA_SAFE_CALL(cudaMemcpy(dBucketOffsets, hBucketOffsets, sizeof(unsigned int) * bucketsCount, cudaMemcpyHostToDevice));
CUDA_SAFE_CALL(cudaMemcpy(dBucketSizes, hBucketSizes, sizeof(unsigned int) * bucketsCount, cudaMemcpyHostToDevice));
cudaBindTexture(0,tBucketOffsets,dBucketOffsets);
cudaBindTexture(0,tBucketSizes,dBucketSizes);
cudaBindTexture(0,tBucketOfSlices,dBucketOfSlices);
cudaBindTexture(0,tSliceOffsetInBucket,dSliceOffsetInBucket);
//CUDA_SAFE_CALL( cudaThreadSynchronize() );
//CUT_SAFE_CALL(cutResetTimer(timer));
assignElementToBucketD<<<blockCount, BLOCK_SIZE>>>(dData,dTmpData,size,dOffsetInSlice,minValue,sliceStep);
//CUDA_SAFE_CALL( cudaThreadSynchronize() );
//CUT_SAFE_CALL(cutStopTimer(timer));
//t = cutGetAverageTimerValue(timer);
//printf( "bucket dvision used: %fms\n",t);
CUDA_SAFE_CALL( cudaThreadSynchronize() );
bitonicSortD<<<bucketsCount, BLOCK_SIZE, sizeof(float4) * BLOCK_SIZE>>>(dTmpData);
CUDA_SAFE_CALL(cudaMemcpy(dData, dTmpData, sizeof(float4) * size, cudaMemcpyDeviceToDevice));
if(overflow){
for(int i=0;i<bucketsCount;i++)
{
if(hBucketSizes[i] > BLOCK_SIZE)
{
bbSort(dData + hBucketOffsets[i],hBucketSizes[i],listOrder);
}
}
}
delete hBucketOffsets;
delete hBucketOfSlices;
delete hSliceCounts;
delete hBucketSizes;
CUDA_SAFE_CALL(cudaFree(dOffsetInSlice));
CUDA_SAFE_CALL(cudaFree(dSliceCounts));
CUDA_SAFE_CALL(cudaFree(dTmpData));
cudaUnbindTexture( tBucketSizes );
CUDA_SAFE_CALL(cudaFree(dBucketSizes));
cudaUnbindTexture( tBucketOffsets );
CUDA_SAFE_CALL(cudaFree(dBucketOffsets));
cudaUnbindTexture( tBucketOfSlices );
CUDA_SAFE_CALL(cudaFree(dBucketOfSlices));
cudaUnbindTexture( tSliceOffsetInBucket );
CUDA_SAFE_CALL(cudaFree(dSliceOffsetInBucket));
}
|
ba5d1573cd6a174c00fc2094ed2becb534849d49.hip | // !!! This is a file automatically generated by hipify!!!
///sta programa calcula la versin paralelizada del algoritmo FFT_DIF_DIT_TD (VARIABLES TIPO DOUBLE)
///(21/01/2017)
///sta versin sirve para graficar en matlab los errores absolutos y relativos Caso: N=(2^5)x(3^4)x(5^4), Li=800,000, Lo=800,000
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <hipfft.h>
#include <cufftw.h>
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_complex.h>
#include <math.h>
#include <math_constants.h>
#include <iostream>
#include <time.h>
//////////////////////////////////////////////////////////////////////////
///////////////////////DECLARACIN DE FUNCIONES///////////////////////////
//////////////////////////////////////////////////////////////////////////
void vector_entrada_xn(int Li);
void arreglo_W(int N);
void asign_rap(int N,int Li,int Lo);
void factor(int N);
void product(int vector_1[500],int vector_2[500],int valor);
void etapa_entrada(void);
__global__ void inputStage_kernel(int N, int Li,int Dip,int Dop,int P,hipDoubleComplex *x,hipDoubleComplex *W,hipDoubleComplex *y);
void etapa_intermedia(void);
void etapa_salida(void);
__global__ void outputStage_kernel(int N,int Lo,int Dip,int Dop,int P,hipDoubleComplex *z,hipDoubleComplex *W,hipDoubleComplex *X);
//////////////////////////////////////////////////////////////////////////
/////////////////////DECLARACIN DE VARIABLES GLOBALES////////////////////
//////////////////////////////////////////////////////////////////////////
hipDoubleComplex *x_host;
hipDoubleComplex *W_host;
//hipDoubleComplex *y_host;
//hipDoubleComplex *z_host;
hipDoubleComplex *X_host;
hipDoubleComplex *x_device;
hipDoubleComplex *W_device;
hipDoubleComplex *y_device;
hipDoubleComplex *z_device;
hipDoubleComplex *X_device;
hipfftDoubleComplex *in,*out;
FILE *db_open,*dc_open;
int Dip,Dop,P,N,Li,Lo;
int vF[500]; //Almacena los factores de N
int svF; //Almacena el numero de factores de N
int Prod[500];
int a;
#define inf 99999
//////////////////////////////////////////////////////////////////////////
//////////////////////////DATOS DE ENTRADA////////////////////////////////
//////////////////////////////////////////////////////////////////////////
/// N >>> Nmero de elementos del vector de entrada
/// Li >>> Nmero de elementos de entrada diferentes de cero
/// Lo >>> Nmero de elementos de salida requeridos
/// loop >>> Nmero de iteraciones
/// muestras >>> Nmero de muestras
//////////////////////////////////////////////////////////////////////////
///////////////////////////DATOS DE SALIDA////////////////////////////////
//////////////////////////////////////////////////////////////////////////
/// X >>> Vector de salida
//////////////////////////////////////////////////////////////////////////
/////////////////// SE INGRESAN LOS DATOS DE ENTRADA /////////////////////
//////////////////////////////////////////////////////////////////////////
///Ingrese el nmero de iteraciones requeridas
const int loop = 300;
///Ingrese el valor de N_max
const int N_max = 1620000;
///Ingrese el valor de Li_max
const int Li_max = 800000;
///Ingrese el valor de Lo_max
const int Lo_max = 800000;
//////////////////////////////////////////////////////////////////////////
//////////////////////////FUNCION PRINCIPAL///////////////////////////////
//////////////////////////////////////////////////////////////////////////
//Funcin principal
int main()
{
//////////////////////////////////////////////////////////////////////////
//////////////////////////SELECCIN DEL DEVICE////////////////////////////
//////////////////////////////////////////////////////////////////////////
int device;
hipSetDevice(0);
hipGetDevice(&device);
if(device == 1)
{
printf("\n\n---DEVICE = GeForce GTX 970---\n\n");
}
if(device == 0)
{
printf("\n\n---DEVICE = TESLA K20---\n\n");
}
//////////////////////////////////////////////////////////////////////////
int i,j,i_N,j_res,k_res,cont,i_prom,m;
double *parte_real;
double *parte_imag;
//float suma;
//float promedio[N_max];
FILE *da,*db;
//da = fopen("Tiempos_N20_LiN_LoVARIA_CUDA.bin","a+b"); //Crea o sobre escribe archivo
da = fopen("Resultados_NCompuesta_Li800000_Lo800000_real_CUDA_DO.bin","a+b"); //Crea o sobre escribe archivo
db = fopen("Resultados_NCompuesta_Li800000_Lo800000_imag_CUDA_DO.bin","a+b"); //Crea o sobre escribe archivo
//Pausa
printf("\n---PRESIONA UNA TECLA PARA CONTINUAR---\n\n");
getchar();
for(i_N = N_max;i_N <= N_max;i_N++)
{
N = i_N;
//N = N_max;
printf("\n N = %d \n",N);
for(j_res=Li_max;j_res <= Li_max;j_res++)
{
Li=j_res;
for(k_res=Lo_max;k_res <= Lo_max;k_res++)
{
Lo=k_res;
printf("\n Li = %d Lo = %d",Li,Lo);
//////////////////////////////////////////////////////////
parte_real = (double*) malloc(Lo*sizeof(double));
parte_imag = (double*) malloc(Lo*sizeof(double));
for(m=0;m<=Lo-1;m++)
{
parte_real[m] = 0.0;
parte_imag[m] = 0.0;
}
///Se abre el archivo binario
db_open = fopen("Entrada_real_NCompuesta_C.bin","rb");
dc_open = fopen("Entrada_imag_NCompuesta_C.bin","rb");
//suma=0.0;
for(j=0;j<loop;j++)
{
//Comandos necesarios para medir el tiempo
float elapsedTime_app;
hipEvent_t start_app, stop_app;
hipEventCreate(&start_app);
hipEventCreate(&stop_app);
//Se generan en el host los valores del vector de entrada x[n]
vector_entrada_xn(Li);
///Se genera el arreglo W[N]
arreglo_W(N);
//---------------------------------------------------------------------------------------------
//Se empieza a medir el tiempo de ejecucion de la aplicacion
hipEventRecord(start_app,0);
//Se generan en el host los factores Dip y Dop
asign_rap(N,Li,Lo);
//Clculo en el host del factor P
P = N/(Dip*Dop);
//printf("\n\n FACTOR P:\n\n");
//printf("\n Dip = %d Dop = %d P = %d ",Dip,Dop,P);
//Funcin auxiliar del host para ejecutar la etapa de entrada
etapa_entrada();
//Funcin auxiliar del host para ejecutar la etapa intermedia
etapa_intermedia();
//Funcin auxiliar del host para ejecutar la etapa de salida
etapa_salida();
//printf("\n Dip = %d Dop = %d P = %d ",Dip,Dop,P);
//printf("\n Li = %d Lo = %d",Li,Lo);
////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////
///SUMATORIAS
for(m=0;m<=Lo-1;m++)
{
parte_real[m] = parte_real[m] + cuCreal(X_host[m]);
parte_imag[m] = parte_imag[m] + cuCimag(X_host[m]);
//printf("\n X[%d] = %.4f + (%.4f)",m,creal(X[m]),cimag(X[m]));
//fprintf(dc,"%f %f\n",creal(X[m]),cimag(X[m]));
}
////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////
//---------------------------------------------------------------------------------------------
//Comandos necesarios para medir el tiempo de la aplicacion (app)
hipEventRecord(stop_app,0);
hipEventSynchronize(stop_app);
hipEventElapsedTime(&elapsedTime_app,start_app,stop_app);
//Suma de todos los tiempos
//suma = suma + elapsedTime_app;
//Se destruyen los eventos que miden el tiempo de la aplicacion
hipEventDestroy(start_app);
hipEventDestroy(stop_app);
//Se liberan memorias del Host y Device
free(x_host);
free(W_host);
free(X_host);
hipFree(x_device);
hipFree(W_device);
hipFree(y_device);
hipFree(z_device);
hipFree(X_device);
}
///////////////////////////////////
///PROMEDIO DE ERRORES
for(m=0;m<=Lo-1;m++)
{
parte_real[m] = parte_real[m]/loop;
parte_imag[m] = parte_imag[m] /loop;
}
//////////////////////////////////
///Se imprimen los resultados en los archivos binarios
fwrite(parte_real,sizeof(double),Lo,da);
fwrite(parte_imag,sizeof(double),Lo,db);
//promedio[k_res-1] = suma/(float)loop;
fclose(db_open);
fclose(dc_open);
free(parte_real);
free(parte_imag);
}
}
}
//fwrite(promedio,sizeof(float),N_max,da);
fclose(da);
fclose(db);
return EXIT_SUCCESS;
}
//////////////////////////////////////////////////////////////////////////
/////////////////////////FUNCIONES SECUNDARIAS////////////////////////////
//////////////////////////////////////////////////////////////////////////
//sta funcin genera el vector de entrada x[n]
void vector_entrada_xn(int Li)
{
//Declaracin de variables locales
int k;
float *buffer_real,*buffer_imag;
//Se reserva memoria para xn_host en el host
x_host = (hipDoubleComplex*)malloc(sizeof(hipDoubleComplex)*Li);
buffer_real = (float*)malloc(sizeof(float)*N);
buffer_imag = (float*)malloc(sizeof(float)*N);
///Se lee el vector de entrada del archivo binario
fread(buffer_real,sizeof(float),N,db_open);
fread(buffer_imag,sizeof(float),N,dc_open);
//Se dan valores a x[n]
for(k = 0;k < Li; k++)
{
//x_host[k] = make_cuFloatComplex((float)(rand()%11),(float)(rand()%11));
//x_host[k] = make_cuFloatComplex((float)(k + 1),(float)(0.0));
x_host[k] = make_cuDoubleComplex((double)buffer_real[k],(double)buffer_imag[k]);
}
/*
//Se imprimen los valores de entrada x[n]
printf("\n---ELEMENTOS DE ENTRADA x[n]---\n\n");
for(k=0;k<Li;k++)
{
printf(" %d-> (%f) + (%f)\n",k+1,cuCreal(x_host[k]),cuCimag(x_host[k]));
}
*/
free(buffer_real);
free(buffer_imag);
}
//sta funcin genera el arreglo W
void arreglo_W(int N)
{
//Declaracin de variables locales
int n;
//Se reserva memoria para W_host en el host
W_host = (hipDoubleComplex*)malloc(sizeof(hipDoubleComplex)*N);
//Se genera el arreglo W
for(n = 1;n <= N;n++)
{
W_host[n-1] = make_cuDoubleComplex((double)cos((2*CUDART_PI*n)/N),(double)(-1)*sin((2*CUDART_PI*n)/N));
/*
if(n == 255)
{
printf("\nW[%d] = %f + %f",n-1,cuCrealf(W_host[n-1]),cuCimagf(W_host[n-1]));
}
*/
}
/*
//Se imprimen los valores del arreglo W[N]
printf("\n---ARREGLO W[N]---\n\n");
for(n = 0;n < N; n++)
{
printf(" W[%d]-> (%f) + (%f)\n",n+1,cuCreal(W_host[n]),cuCimag(W_host[n]));
}
*/
}
//sta funcin genera los factores Dip y Dop
void asign_rap(int N,int Li,int Lo)
{
//Declaracin de variables locales
float NLi,NLo,Diprapt,Doprapt;
int Nh[500];
int k[500];
int G;
int g,i,t,ta;
int Dipt[500],Dopt[500];
float distrapt,distrap;
int Pos,h,Poss;
int nk[500];
int r;
//Inicializaciones
G = 0;
svF = 0;
//Factores Dip y Dop ideales
NLi=(float)N/(float)Li;
NLo=(float)N/(float)Lo;
Diprapt=NLi;
Doprapt=NLo;
//Se encuentran los factores de "N"
//vF almacena los factores de "N"
//svF almacena el nmero de factores de "N"
factor(N);
/*
Almacena en el vector Nh los factores que son diferentes de del vector vF
En el vector k se almacena la cantidad de veces que se repite cada
elemento almacenado en el vector Nh.
*/
Nh[0] = vF[0];
k[0]=1;
for(g=1;g<=svF-1;g=g+1)
{
if(vF[g]!=vF[g-1])
{
G=G+1;
Nh[G]=vF[g];
k[G]=1;
}
else
{
k[G]=k[G]+1;
}
}
/*
Almacena en el vector Nh todas las posibles combinaciones que den como
producto a N. t almacena el numero de elementos del vector Nh.
*/
product(Nh,k,G);
t = a;
for(i=0;i<t;i=i+1)
{
Dipt[i]=Prod[i];
}
distrapt=inf;
for(g=1;g<=t;g=g+1)
{
if(Dipt[g-1]<=NLi)
{
Pos=g-1;
for(h=0;h<=G;h=h+1)
{
Poss=floor(Pos/(k[h]+1));
nk[h]=k[h]+Poss*(k[h]+1)-Pos;
Pos=Poss;
}
product(Nh,nk,G);
ta=a;
for(i=0;i<ta;i=i+1)
{
Dopt[i]=Prod[i];
}
////////////////////////////////////////////
//int j;
//for(j=0;j<ta;j++)
//{
// printf(" %d ",Dopt[j]);
//}
//printf("\n\n ta=%d\n\n",ta);
///////////////////////////////////////////
for(r=0;r<ta;r=r+1)
{
distrap=sqrt(pow(Diprapt-(Dipt[g-1]),2)+pow(Doprapt-(Dopt[r]),2));
if(distrap<distrapt)
{
distrapt=distrap;
Dip=Dipt[g-1];
Dop=Dopt[r];
}
}
}
}
/*
printf("\n\n FACTOR Dip :\n\n");
printf(" %d ",Dip);
printf("\n\n FACTOR Dop:\n\n");
printf(" %d ",Dop);
*/
}
//sta funcin encuentra los factores de "N"
void factor(int N)
{
//Se empieza a verificar los factores desde 2
int i=2;
long N_factor;
N_factor = N;
while(i<=N_factor)
{
while((N_factor%i)==0)
{
vF[svF]=i;
N_factor=N_factor/i;
// printf("Factores: %d ",vF[svF]);
svF++;
}
i++;
}
}
//sta funcin encuentra todas las posibles combinaciones de factores que den como resultado "N"
void product(int vector_1[500],int vector_2[500],int valor)
{
int d,e,s,pNh,i;
int cont=0;
Prod[0]=1;
a=1;
for(d=0;d<=valor;d=d+1)
{
s=a;
pNh=1;
for(e=1;e<=vector_2[d];e=e+1)
{
pNh=pNh*vector_1[d];
for(i=(s*e+1);i<=(s*e+s);i=i+1)
{
Prod[i-1]=pNh*Prod[cont];
cont=cont+1;
}
a=a+s;
cont=0;
}
}
}
//Funcin auxiliar del host para calcular la etapa de entrada en el device
void etapa_entrada(void)
{
//////////////////////////////////////////////////////////////////////////
////////////////////////////ETAPA DE ENTRADA//////////////////////////////
//////////////////////////////////////////////////////////////////////////
//Declaracin de variables locales
int k1,n1,n2;
//Asignacin de memoria en el device para el arreglo "x_device"
hipMalloc((void**)&x_device,Li*sizeof(hipDoubleComplex));
//Se reserva memoria en el device para el arreglo "W_device"
hipMalloc((void**)&W_device,N*sizeof(hipDoubleComplex));
//Asignacin de memoria en el device para el arreglo "y"
hipMalloc((void**)&y_device,P*Dip*Dop*sizeof(hipDoubleComplex));
//Se pasa el arreglo x_host a x_device
hipMemcpy(x_device,x_host,Li*sizeof(hipDoubleComplex),hipMemcpyHostToDevice);
//Envo de los arreglos W hacia la memoria global del device
hipMemcpy(W_device,W_host,N*sizeof(hipDoubleComplex),hipMemcpyHostToDevice);
//Asignacin de memoria en el host para "y"
//y_host = (hipDoubleComplex*)malloc(sizeof(hipDoubleComplex)*P*Dip*Dop);
//Dimensionamiento del grid para la funcin kernel "inputStage"
//Dimensionamiento del Grid
dim3 gridDim(1,1,1);
//Dimensionamiento del block
dim3 blockDim(1,1,1);
if((P*Dop) < 32 && (Dip) < 32)
{
blockDim.x = (P*Dop);
blockDim.y = (Dip);
gridDim.x = 1;
gridDim.y = 1;
}
else
{
blockDim.x = 32;
blockDim.y = 32;
gridDim.x = (unsigned int) (ceilf((float)(P*Dop)/(float)blockDim.x));
gridDim.y = (unsigned int) (ceilf((float)Dip/(float)blockDim.y));
}
//Lanzamiento del kernel "inputStage_kernel"
hipLaunchKernelGGL(( inputStage_kernel), dim3(gridDim),dim3(blockDim), 0, 0, N,Li,Dip,Dop,P,x_device,W_device,y_device);
//Esperar que el kernel termine de ejecutarse totalmente
hipDeviceSynchronize();
/*
//Copia del arreglo "y" del device hacia el host
hipMemcpy(y_host,y_device,sizeof(hipDoubleComplex)*P*Dip*Dop,hipMemcpyDeviceToHost);
//Se imprimen los valores de "y"
printf("\n\n--- ARREGLO y(n1,n2,k1) ---\n\n");
for(k1 = 0;k1 < Dip;k1++)
{
for(n1 = 0;n1 < Dop;n1++)
{
for(n2 = 0;n2 < P;n2++)
{
printf(" (%f) + (%f) ",cuCreal(y_host[(k1*Dop*P)+(n1*P)+n2]),cuCimag(y_host[(k1*Dop*P)+(n1*P)+n2]));
}
printf("\n");
}
printf("\n\n");
}
printf("\n");
*/
}
//funcin kernel que ejecuta la etapa de entrada en el device
__global__ void inputStage_kernel(int N, int Li,int Dip,int Dop,int P,hipDoubleComplex *x,hipDoubleComplex *W,hipDoubleComplex *y)
{
int n1,n2;
hipDoubleComplex t1;
//Threads
int n = blockDim.x *blockIdx.x + threadIdx.x;
int k1 = blockDim.y *blockIdx.y + threadIdx.y;
//Se resetean las flags
//flag_inputstage_1_d[0] = 0;
//flag_inputstage_2_d[0] = 0;
//flag_inputstage_3_d[0] = 0;
//printf("\n n = %d k1 = %d",n,k1);
if( (n < (P*Dop)) && (k1 < Dip))
{
n2 = floorf(n/Dop);
n1 = n - (Dop*n2);
//Generacin de los elementos que dependen de x[0]
if(n == 0)
{
y[(k1*Dop*P)+(0*P)+ 0] = x[0];
///Flag
//flag_inputstage_1_d[0] = 1;
}
//Mapeo de x[n] a las entradas del primer conjunto de Dop DFT's
if((n >= 1) && (n <= (Li-1)))
{
t1 = x[n];
if(k1 == 0)
{
y[(0*Dop*P)+(n1*P)+ n2] = t1;
}
if(k1 >= 1)
{
y[(k1*Dop*P)+(n1*P)+ n2] = cuCmul(W[((n*k1)%N)-1],t1);
}
///Flag
//flag_inputstage_2_d[0] = 1;
}
//Rellenado de ceros para los elementos de "y" para Li <= n <= (P*Dop)-1
if((n >= Li) && (n <= (P*Dop)-1))
{
y[(k1*Dop*P)+(n1*P)+ n2] = make_cuDoubleComplex(0.0,0.0);
///Flag
//flag_inputstage_3_d[0] = 1;
}
//printf("\n (%f) + (%f)\n ",cuCrealf(y[(k1*Dop*P)+(n1*P)+ n2]),cuCimagf(y[(k1*Dop*P)+(n1*P)+ n2]));
}
}
//Funcin auxiliar del host para calcular la etapa intermedia en el device
void etapa_intermedia(void)
{
//////////////////////////////////////////////////////////////////////////
////////////////////////////ETAPA INTERMEDIA//////////////////////////////
//////////////////////////////////////////////////////////////////////////
//Declaracin de variables locales
int k1,k2,n1;
int n[1] = {P};
int inembed[1] = {P};
int onembed[1] = {P};
//Asignacin de memoria en el device para "z"
hipMalloc((void**)&z_device,P*Dip*Dop*sizeof(hipDoubleComplex));
//Asignacin de memoria en el host para "z"
//z_host = (hipDoubleComplex*)malloc(sizeof(hipDoubleComplex)*P*Dip*Dop);
//Asignacin de memoria en el device para "in" y "out"
hipMalloc((void**)&in,sizeof(hipfftDoubleComplex)*P*Dip*Dop);
hipMalloc((void**)&out,sizeof(hipfftDoubleComplex)*P*Dip*Dop);
//Se copia el arreglo "y" al arreglo "in"
hipMemcpy(in,y_device,sizeof(hipDoubleComplex)*P*Dip*Dop,hipMemcpyDeviceToDevice);
//Se crea un plan
hipfftHandle plan;
hipfftPlanMany(&plan,1,n,inembed,1,P,onembed,1,P,HIPFFT_Z2Z,Dip*Dop);
//Ejecucin del plan
hipfftExecZ2Z(plan,in,out,HIPFFT_FORWARD);
//Esperar que el kernel termine de ejecutarse totalmente
hipDeviceSynchronize();
//Se copian los datos del arreglo "out" al arreglo "z_device"
hipMemcpy(z_device,out,sizeof(hipfftDoubleComplex)*P*Dip*Dop,hipMemcpyDeviceToDevice);
//Se destruye el plan
hipfftDestroy(plan);
//Se liberan los arreglos "in" y "out"
hipFree(in);
hipFree(out);
/*
//Se copian los datos del arreglo "z_device" al arreglo "z_host"
hipMemcpy(z_host,z_device,sizeof(hipDoubleComplex)*P*Dip*Dop,hipMemcpyDeviceToHost);
///Se imprimen los valores de z(n1,k2,k1)
printf("\n\n--- ARREGLO z(n1,k2,k1) ---\n\n");
for(k1 = 0;k1 < Dip;k1++)
{
for(n1 = 0;n1 < Dop;n1++)
{
for(k2 = 0;k2 < P;k2++)
{
printf(" (%f) + (%f) ",cuCreal(z_host[(k1*Dop*P)+(n1*P)+k2]),cuCimag(z_host[(k1*Dop*P)+(n1*P)+k2]));
}
printf("\n");
}
printf("\n\n");
}
printf("\n");
*/
}
//Funcin auxiliar del host para calcular la etapa de salida en el device
void etapa_salida(void)
{
//////////////////////////////////////////////////////////////////////////
////////////////////////////ETAPA DE SALIDA///////////////////////////////
//////////////////////////////////////////////////////////////////////////
//Declaracin de variables locales
int m;
//Asignacin de memoria en el device para "X"
hipMalloc((void**)&X_device,Lo*sizeof(hipDoubleComplex));
//Asignacin de memoria en el host para "X"
X_host = (hipDoubleComplex*)malloc(sizeof(hipDoubleComplex)*Lo);
//Dimensionamiento del grid para la funcin kernel "outputStage"
//Dimensionamiento del Grid
dim3 gridDim(1,1,1);
//Dimensionamiento del block
dim3 blockDim(1,1,1);
if((Lo) < 1024)
{
blockDim.x = Lo;
gridDim.x = 1;
}
else
{
blockDim.x = 1024;
gridDim.x = (unsigned int) (ceilf((float)Lo/(float)blockDim.x));
}
//Lanzamiento del kernel "outputStage_kernel"
hipLaunchKernelGGL(( outputStage_kernel), dim3(gridDim),dim3(blockDim), 0, 0, N,Lo,Dip,Dop,P,z_device,W_device,X_device);
//Esperar que el kernel termine de ejecutarse totalmente
hipDeviceSynchronize();
//Copia del arreglo "X" del device hacia el host
hipMemcpy(X_host,X_device,sizeof(hipDoubleComplex)*Lo,hipMemcpyDeviceToHost);
/*
//Se imprimen los valores de "X_host"
///Imprimir X[k]
printf("\n\n--- ARREGLO X[k] ---\n\n");
for(m=0;m<=Lo-1;m++)
{
printf("\n X[%d] = %f + (%f)",m,cuCreal(X_host[m]),cuCimag(X_host[m]));
//fprintf(da,"%.4f %.4f\n",creal(X[i]),cimag(X[i]));
}
*/
}
//funcin kernel que ejecuta la etapa de salida en el device
__global__ void outputStage_kernel(int N,int Lo,int Dip,int Dop,int P,hipDoubleComplex *z,hipDoubleComplex *W,hipDoubleComplex *X)
{
//Declaracin de variables locales
int n1,k_aux,k1,k2,a,b;
hipDoubleComplex t1,t2,t3,t4,t5;
//Threads
int k = blockDim.x *blockIdx.x + threadIdx.x;
//Se resetean las flags
//flag_outputstage_1_d[0] = 0;
//flag_outputstage_2_d[0] = 0;
//flag_outputstage_3_d[0] = 0;
if(k < Lo)
{
for(n1 = 0; n1 <= (Dop-1); n1 = n1+1)
{
if(Lo <= Dip)
{
//Clculo de X(k) para 0<=k<=Lo-1.
//printf("\n--- Caso (Lo <= Dip) ---\n");
//En la descomposicin k = k1 + Dipk2; k2 = 0, y por lo tanto, k = k1
if(n1 == 0) //Caso para lograr que por lo menos ingrese una vez
{
X[k] = z[(k*Dop*P)+(0*P) + 0];
///Flag
//flag_outputstage_1_d[0] = 1;
}
else
{
if(n1 == 1)
{
X[k] = z[(k*Dop*P)+(0*P) + 0];
}
X[k] = cuCadd(z[(k*Dop*P)+(n1*P) + 0],X[k]);
///Flag
//flag_outputstage_1_d[0] = 1;
}
}
else
{
if((k >= 0) && (k <= (Dip-1)))
{
//Clculo de X(k) para 0<=k<=Dip-1.
//En la descomposicin k = k1 + Dipk2; k2 = 0, y por lo tanto, k = k1
if(n1 == 0) //Caso para lograr que por lo menos ingrese una vez
{
X[k] = z[(k*Dop*P)+(0*P) + 0];
}
else
{
if(n1 == 1)
{
X[k] = z[(k*Dop*P)+(0*P) + 0];
}
X[k] = cuCadd(z[(k*Dop*P)+(n1*P) + 0],X[k]);
}
}
else
{
if(Dop <= 4)
{
//Usando el mtodo directo
//printf("\n--- Caso (Metodo directo) ---\n");
if(n1 == 0) //Caso para lograr que por lo menos ingrese una vez
{
k_aux = k-((Dip*P)*floorf(k/(Dip*P)));
k2 = floorf(k_aux/Dip);
k1 = k_aux-(Dip*k2);
X[k] = z[(k1*Dop*P)+(0*P)+ (k2%P)];
//printf("\nk = %d,k_aux = %d,k2 = %d,k1 = %d",k,k_aux,k2,k1);
///Flag
//flag_outputstage_2_d[0] = 1;
}
else
{
if(n1 == 1)
{
k_aux = k-((Dip*P)*floorf(k/(Dip*P)));
k2 = floorf(k_aux/Dip);
k1 = k_aux-(Dip*k2);
X[k] = z[(k1*Dop*P)+(0*P)+ (k2%P)];
}
a = floorf(k/(Dip*P));
X[k] = cuCadd(X[k],cuCmul(z[(k1*Dop*P)+(n1*P)+ (k2%P)],W[((n1*(k2+P*(a))*Dip)%N)-1]));
///Flag
//flag_outputstage_2_d[0] = 1;
}
}
else
{
//Usando el mtodo filtering 2BF
//printf("\n--- Caso (Filtro 2BF) ---\n");
if((Dop-2) >= 1)
{
if(n1 == 0)
{
k_aux = k-((Dip*P)*floorf(k/(Dip*P)));
k2 = floorf(k_aux/Dip);
k1 = k_aux-(Dip*k2);
t1 = z[(k1*Dop*P)+((Dop-1)*P)+ (k2%P)];
b = floorf(k/(Dip*P));
t4 = cuCmul(t1,make_cuDoubleComplex(2*cuCreal(W[(((k2+(P*(b)))*Dip)%N)-1]),0.0));
/*
if(k == 256)
{
printf("\nW = %d, k = %d,k_aux = %d,k2 = %d,k1 = %d, b= %d,z= %d",(((k2+(P*(b)))*Dip)%N)-1,k,k_aux,k2,k1,b,(k1*Dop*P)+((Dop-1)*P)+ (k2%P));
}
*/
///Flag
//flag_outputstage_3_d[0] = 1;
}
if((n1 >= 1) && (n1 <= (Dop-2)))
{
t2 = t1;
t1 = cuCadd(z[(k1*Dop*P)+((-(n1-(Dop-1)))*P)+ (k2%P)],t4);
t3 = cuCmul(t1,make_cuDoubleComplex(2*cuCreal(W[(((k2+(P*(b)))*Dip)%N)-1]),0.0));
t4 = cuCsub(t3,t2);
/*
if(k == 256)
{
printf("\nW= %d",(((k2+(P*(b)))*Dip)%N)-1);
}
*/
}
if(n1 == (Dop-1))
{
t5 = cuCadd(z[(k1*Dop*P)+(k2%P)],t4);
X[k] = cuCsub(t5,cuCmul(t1,cuConj(W[(((k2+(P*(b)))*Dip)%N)-1])));
}
}
else
{
if(Dop == 1)
{
k_aux = k-((Dip*P)*floorf(k/(Dip*P)));
k2 = floorf(k_aux/Dip);
k1 = k_aux-(Dip*k2);
t1 = z[(k1*Dop*P)+((Dop-1)*P)+ (k2%P)];
X[k] = t1;
///Flag
//flag_outputstage_3_d[0] = 1;
}
else
{
k_aux = k-((Dip*P)*floorf(k/(Dip*P)));
k2 = floorf(k_aux/Dip);
k1 = k_aux-(Dip*k2);
t1 = z[(k1*Dop*P)+((Dop-1)*P)+ (k2%P)];
b = floorf(k/(Dip*P));
t4 = cuCmul(t1,make_cuDoubleComplex(2*cuCreal(W[(((k2+(P*(b)))*Dip)%N)-1]),0.0));
t5 = cuCadd(z[(k1*Dop*P)+(0*P)+ (k2%P)],t4);
X[k] = cuCsub(t5,cuCmul(t1,cuConj(W[(((k2+(P*(b)))*Dip)%N)-1])));
///Flag
//flag_outputstage_3_d[0] = 1;
}
}
}
}
}
}
}
}
| ba5d1573cd6a174c00fc2094ed2becb534849d49.cu | ///Ésta programa calcula la versión paralelizada del algoritmo FFT_DIF_DIT_TD (VARIABLES TIPO DOUBLE)
///(21/01/2017)
///Ésta versión sirve para graficar en matlab los errores absolutos y relativos Caso: N=(2^5)x(3^4)x(5^4), Li=800,000, Lo=800,000
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <cufft.h>
#include <cufftw.h>
#include <stdio.h>
#include <stdlib.h>
#include <cuComplex.h>
#include <math.h>
#include <math_constants.h>
#include <iostream>
#include <time.h>
//////////////////////////////////////////////////////////////////////////
///////////////////////DECLARACIÓN DE FUNCIONES///////////////////////////
//////////////////////////////////////////////////////////////////////////
void vector_entrada_xn(int Li);
void arreglo_W(int N);
void asign_rap(int N,int Li,int Lo);
void factor(int N);
void product(int vector_1[500],int vector_2[500],int valor);
void etapa_entrada(void);
__global__ void inputStage_kernel(int N, int Li,int Dip,int Dop,int P,cuDoubleComplex *x,cuDoubleComplex *W,cuDoubleComplex *y);
void etapa_intermedia(void);
void etapa_salida(void);
__global__ void outputStage_kernel(int N,int Lo,int Dip,int Dop,int P,cuDoubleComplex *z,cuDoubleComplex *W,cuDoubleComplex *X);
//////////////////////////////////////////////////////////////////////////
/////////////////////DECLARACIÓN DE VARIABLES GLOBALES////////////////////
//////////////////////////////////////////////////////////////////////////
cuDoubleComplex *x_host;
cuDoubleComplex *W_host;
//cuDoubleComplex *y_host;
//cuDoubleComplex *z_host;
cuDoubleComplex *X_host;
cuDoubleComplex *x_device;
cuDoubleComplex *W_device;
cuDoubleComplex *y_device;
cuDoubleComplex *z_device;
cuDoubleComplex *X_device;
cufftDoubleComplex *in,*out;
FILE *db_open,*dc_open;
int Dip,Dop,P,N,Li,Lo;
int vF[500]; //Almacena los factores de N
int svF; //Almacena el numero de factores de N
int Prod[500];
int a;
#define inf 99999
//////////////////////////////////////////////////////////////////////////
//////////////////////////DATOS DE ENTRADA////////////////////////////////
//////////////////////////////////////////////////////////////////////////
/// N >>> Número de elementos del vector de entrada
/// Li >>> Número de elementos de entrada diferentes de cero
/// Lo >>> Número de elementos de salida requeridos
/// loop >>> Número de iteraciones
/// muestras >>> Número de muestras
//////////////////////////////////////////////////////////////////////////
///////////////////////////DATOS DE SALIDA////////////////////////////////
//////////////////////////////////////////////////////////////////////////
/// X >>> Vector de salida
//////////////////////////////////////////////////////////////////////////
/////////////////// SE INGRESAN LOS DATOS DE ENTRADA /////////////////////
//////////////////////////////////////////////////////////////////////////
///Ingrese el número de iteraciones requeridas
const int loop = 300;
///Ingrese el valor de N_max
const int N_max = 1620000;
///Ingrese el valor de Li_max
const int Li_max = 800000;
///Ingrese el valor de Lo_max
const int Lo_max = 800000;
//////////////////////////////////////////////////////////////////////////
//////////////////////////FUNCION PRINCIPAL///////////////////////////////
//////////////////////////////////////////////////////////////////////////
//Función principal
int main()
{
//////////////////////////////////////////////////////////////////////////
//////////////////////////SELECCIÓN DEL DEVICE////////////////////////////
//////////////////////////////////////////////////////////////////////////
int device;
cudaSetDevice(0);
cudaGetDevice(&device);
if(device == 1)
{
printf("\n\n---DEVICE = GeForce GTX 970---\n\n");
}
if(device == 0)
{
printf("\n\n---DEVICE = TESLA K20---\n\n");
}
//////////////////////////////////////////////////////////////////////////
int i,j,i_N,j_res,k_res,cont,i_prom,m;
double *parte_real;
double *parte_imag;
//float suma;
//float promedio[N_max];
FILE *da,*db;
//da = fopen("Tiempos_N20_LiN_LoVARIA_CUDA.bin","a+b"); //Crea o sobre escribe archivo
da = fopen("Resultados_NCompuesta_Li800000_Lo800000_real_CUDA_DO.bin","a+b"); //Crea o sobre escribe archivo
db = fopen("Resultados_NCompuesta_Li800000_Lo800000_imag_CUDA_DO.bin","a+b"); //Crea o sobre escribe archivo
//Pausa
printf("\n---PRESIONA UNA TECLA PARA CONTINUAR---\n\n");
getchar();
for(i_N = N_max;i_N <= N_max;i_N++)
{
N = i_N;
//N = N_max;
printf("\n N = %d \n",N);
for(j_res=Li_max;j_res <= Li_max;j_res++)
{
Li=j_res;
for(k_res=Lo_max;k_res <= Lo_max;k_res++)
{
Lo=k_res;
printf("\n Li = %d Lo = %d",Li,Lo);
//////////////////////////////////////////////////////////
parte_real = (double*) malloc(Lo*sizeof(double));
parte_imag = (double*) malloc(Lo*sizeof(double));
for(m=0;m<=Lo-1;m++)
{
parte_real[m] = 0.0;
parte_imag[m] = 0.0;
}
///Se abre el archivo binario
db_open = fopen("Entrada_real_NCompuesta_C.bin","rb");
dc_open = fopen("Entrada_imag_NCompuesta_C.bin","rb");
//suma=0.0;
for(j=0;j<loop;j++)
{
//Comandos necesarios para medir el tiempo
float elapsedTime_app;
cudaEvent_t start_app, stop_app;
cudaEventCreate(&start_app);
cudaEventCreate(&stop_app);
//Se generan en el host los valores del vector de entrada x[n]
vector_entrada_xn(Li);
///Se genera el arreglo W[N]
arreglo_W(N);
//---------------------------------------------------------------------------------------------
//Se empieza a medir el tiempo de ejecucion de la aplicacion
cudaEventRecord(start_app,0);
//Se generan en el host los factores Dip y Dop
asign_rap(N,Li,Lo);
//Cálculo en el host del factor P
P = N/(Dip*Dop);
//printf("\n\n FACTOR P:\n\n");
//printf("\n Dip = %d Dop = %d P = %d ",Dip,Dop,P);
//Función auxiliar del host para ejecutar la etapa de entrada
etapa_entrada();
//Función auxiliar del host para ejecutar la etapa intermedia
etapa_intermedia();
//Función auxiliar del host para ejecutar la etapa de salida
etapa_salida();
//printf("\n Dip = %d Dop = %d P = %d ",Dip,Dop,P);
//printf("\n Li = %d Lo = %d",Li,Lo);
////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////
///SUMATORIAS
for(m=0;m<=Lo-1;m++)
{
parte_real[m] = parte_real[m] + cuCreal(X_host[m]);
parte_imag[m] = parte_imag[m] + cuCimag(X_host[m]);
//printf("\n X[%d] = %.4f + (%.4f)",m,creal(X[m]),cimag(X[m]));
//fprintf(dc,"%f %f\n",creal(X[m]),cimag(X[m]));
}
////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////
//---------------------------------------------------------------------------------------------
//Comandos necesarios para medir el tiempo de la aplicacion (app)
cudaEventRecord(stop_app,0);
cudaEventSynchronize(stop_app);
cudaEventElapsedTime(&elapsedTime_app,start_app,stop_app);
//Suma de todos los tiempos
//suma = suma + elapsedTime_app;
//Se destruyen los eventos que miden el tiempo de la aplicacion
cudaEventDestroy(start_app);
cudaEventDestroy(stop_app);
//Se liberan memorias del Host y Device
free(x_host);
free(W_host);
free(X_host);
cudaFree(x_device);
cudaFree(W_device);
cudaFree(y_device);
cudaFree(z_device);
cudaFree(X_device);
}
///////////////////////////////////
///PROMEDIO DE ERRORES
for(m=0;m<=Lo-1;m++)
{
parte_real[m] = parte_real[m]/loop;
parte_imag[m] = parte_imag[m] /loop;
}
//////////////////////////////////
///Se imprimen los resultados en los archivos binarios
fwrite(parte_real,sizeof(double),Lo,da);
fwrite(parte_imag,sizeof(double),Lo,db);
//promedio[k_res-1] = suma/(float)loop;
fclose(db_open);
fclose(dc_open);
free(parte_real);
free(parte_imag);
}
}
}
//fwrite(promedio,sizeof(float),N_max,da);
fclose(da);
fclose(db);
return EXIT_SUCCESS;
}
//////////////////////////////////////////////////////////////////////////
/////////////////////////FUNCIONES SECUNDARIAS////////////////////////////
//////////////////////////////////////////////////////////////////////////
//Ésta función genera el vector de entrada x[n]
void vector_entrada_xn(int Li)
{
//Declaración de variables locales
int k;
float *buffer_real,*buffer_imag;
//Se reserva memoria para xn_host en el host
x_host = (cuDoubleComplex*)malloc(sizeof(cuDoubleComplex)*Li);
buffer_real = (float*)malloc(sizeof(float)*N);
buffer_imag = (float*)malloc(sizeof(float)*N);
///Se lee el vector de entrada del archivo binario
fread(buffer_real,sizeof(float),N,db_open);
fread(buffer_imag,sizeof(float),N,dc_open);
//Se dan valores a x[n]
for(k = 0;k < Li; k++)
{
//x_host[k] = make_cuFloatComplex((float)(rand()%11),(float)(rand()%11));
//x_host[k] = make_cuFloatComplex((float)(k + 1),(float)(0.0));
x_host[k] = make_cuDoubleComplex((double)buffer_real[k],(double)buffer_imag[k]);
}
/*
//Se imprimen los valores de entrada x[n]
printf("\n---ELEMENTOS DE ENTRADA x[n]---\n\n");
for(k=0;k<Li;k++)
{
printf(" %d-> (%f) + (%f)\n",k+1,cuCreal(x_host[k]),cuCimag(x_host[k]));
}
*/
free(buffer_real);
free(buffer_imag);
}
//Ésta función genera el arreglo W
void arreglo_W(int N)
{
//Declaración de variables locales
int n;
//Se reserva memoria para W_host en el host
W_host = (cuDoubleComplex*)malloc(sizeof(cuDoubleComplex)*N);
//Se genera el arreglo W
for(n = 1;n <= N;n++)
{
W_host[n-1] = make_cuDoubleComplex((double)cos((2*CUDART_PI*n)/N),(double)(-1)*sin((2*CUDART_PI*n)/N));
/*
if(n == 255)
{
printf("\nW[%d] = %f + %f",n-1,cuCrealf(W_host[n-1]),cuCimagf(W_host[n-1]));
}
*/
}
/*
//Se imprimen los valores del arreglo W[N]
printf("\n---ARREGLO W[N]---\n\n");
for(n = 0;n < N; n++)
{
printf(" W[%d]-> (%f) + (%f)\n",n+1,cuCreal(W_host[n]),cuCimag(W_host[n]));
}
*/
}
//Ésta función genera los factores Dip y Dop
void asign_rap(int N,int Li,int Lo)
{
//Declaración de variables locales
float NLi,NLo,Diprapt,Doprapt;
int Nh[500];
int k[500];
int G;
int g,i,t,ta;
int Dipt[500],Dopt[500];
float distrapt,distrap;
int Pos,h,Poss;
int nk[500];
int r;
//Inicializaciones
G = 0;
svF = 0;
//Factores Dip y Dop ideales
NLi=(float)N/(float)Li;
NLo=(float)N/(float)Lo;
Diprapt=NLi;
Doprapt=NLo;
//Se encuentran los factores de "N"
//vF almacena los factores de "N"
//svF almacena el número de factores de "N"
factor(N);
/*
Almacena en el vector Nh los factores que son diferentes de del vector vF
En el vector k se almacena la cantidad de veces que se repite cada
elemento almacenado en el vector Nh.
*/
Nh[0] = vF[0];
k[0]=1;
for(g=1;g<=svF-1;g=g+1)
{
if(vF[g]!=vF[g-1])
{
G=G+1;
Nh[G]=vF[g];
k[G]=1;
}
else
{
k[G]=k[G]+1;
}
}
/*
Almacena en el vector Nh todas las posibles combinaciones que den como
producto a N. t almacena el numero de elementos del vector Nh.
*/
product(Nh,k,G);
t = a;
for(i=0;i<t;i=i+1)
{
Dipt[i]=Prod[i];
}
distrapt=inf;
for(g=1;g<=t;g=g+1)
{
if(Dipt[g-1]<=NLi)
{
Pos=g-1;
for(h=0;h<=G;h=h+1)
{
Poss=floor(Pos/(k[h]+1));
nk[h]=k[h]+Poss*(k[h]+1)-Pos;
Pos=Poss;
}
product(Nh,nk,G);
ta=a;
for(i=0;i<ta;i=i+1)
{
Dopt[i]=Prod[i];
}
////////////////////////////////////////////
//int j;
//for(j=0;j<ta;j++)
//{
// printf(" %d ",Dopt[j]);
//}
//printf("\n\n ta=%d\n\n",ta);
///////////////////////////////////////////
for(r=0;r<ta;r=r+1)
{
distrap=sqrt(pow(Diprapt-(Dipt[g-1]),2)+pow(Doprapt-(Dopt[r]),2));
if(distrap<distrapt)
{
distrapt=distrap;
Dip=Dipt[g-1];
Dop=Dopt[r];
}
}
}
}
/*
printf("\n\n FACTOR Dip :\n\n");
printf(" %d ",Dip);
printf("\n\n FACTOR Dop:\n\n");
printf(" %d ",Dop);
*/
}
//Ésta función encuentra los factores de "N"
void factor(int N)
{
//Se empieza a verificar los factores desde 2
int i=2;
long N_factor;
N_factor = N;
while(i<=N_factor)
{
while((N_factor%i)==0)
{
vF[svF]=i;
N_factor=N_factor/i;
// printf("Factores: %d ",vF[svF]);
svF++;
}
i++;
}
}
//Ésta función encuentra todas las posibles combinaciones de factores que den como resultado "N"
void product(int vector_1[500],int vector_2[500],int valor)
{
int d,e,s,pNh,i;
int cont=0;
Prod[0]=1;
a=1;
for(d=0;d<=valor;d=d+1)
{
s=a;
pNh=1;
for(e=1;e<=vector_2[d];e=e+1)
{
pNh=pNh*vector_1[d];
for(i=(s*e+1);i<=(s*e+s);i=i+1)
{
Prod[i-1]=pNh*Prod[cont];
cont=cont+1;
}
a=a+s;
cont=0;
}
}
}
//Función auxiliar del host para calcular la etapa de entrada en el device
void etapa_entrada(void)
{
//////////////////////////////////////////////////////////////////////////
////////////////////////////ETAPA DE ENTRADA//////////////////////////////
//////////////////////////////////////////////////////////////////////////
//Declaración de variables locales
int k1,n1,n2;
//Asignación de memoria en el device para el arreglo "x_device"
cudaMalloc((void**)&x_device,Li*sizeof(cuDoubleComplex));
//Se reserva memoria en el device para el arreglo "W_device"
cudaMalloc((void**)&W_device,N*sizeof(cuDoubleComplex));
//Asignación de memoria en el device para el arreglo "y"
cudaMalloc((void**)&y_device,P*Dip*Dop*sizeof(cuDoubleComplex));
//Se pasa el arreglo x_host a x_device
cudaMemcpy(x_device,x_host,Li*sizeof(cuDoubleComplex),cudaMemcpyHostToDevice);
//Envío de los arreglos W hacia la memoria global del device
cudaMemcpy(W_device,W_host,N*sizeof(cuDoubleComplex),cudaMemcpyHostToDevice);
//Asignación de memoria en el host para "y"
//y_host = (cuDoubleComplex*)malloc(sizeof(cuDoubleComplex)*P*Dip*Dop);
//Dimensionamiento del grid para la función kernel "inputStage"
//Dimensionamiento del Grid
dim3 gridDim(1,1,1);
//Dimensionamiento del block
dim3 blockDim(1,1,1);
if((P*Dop) < 32 && (Dip) < 32)
{
blockDim.x = (P*Dop);
blockDim.y = (Dip);
gridDim.x = 1;
gridDim.y = 1;
}
else
{
blockDim.x = 32;
blockDim.y = 32;
gridDim.x = (unsigned int) (ceilf((float)(P*Dop)/(float)blockDim.x));
gridDim.y = (unsigned int) (ceilf((float)Dip/(float)blockDim.y));
}
//Lanzamiento del kernel "inputStage_kernel"
inputStage_kernel<<<gridDim,blockDim>>>(N,Li,Dip,Dop,P,x_device,W_device,y_device);
//Esperar que el kernel termine de ejecutarse totalmente
cudaDeviceSynchronize();
/*
//Copia del arreglo "y" del device hacia el host
cudaMemcpy(y_host,y_device,sizeof(cuDoubleComplex)*P*Dip*Dop,cudaMemcpyDeviceToHost);
//Se imprimen los valores de "y"
printf("\n\n--- ARREGLO y(n1,n2,k1) ---\n\n");
for(k1 = 0;k1 < Dip;k1++)
{
for(n1 = 0;n1 < Dop;n1++)
{
for(n2 = 0;n2 < P;n2++)
{
printf(" (%f) + (%f) ",cuCreal(y_host[(k1*Dop*P)+(n1*P)+n2]),cuCimag(y_host[(k1*Dop*P)+(n1*P)+n2]));
}
printf("\n");
}
printf("\n\n");
}
printf("\n");
*/
}
//función kernel que ejecuta la etapa de entrada en el device
__global__ void inputStage_kernel(int N, int Li,int Dip,int Dop,int P,cuDoubleComplex *x,cuDoubleComplex *W,cuDoubleComplex *y)
{
int n1,n2;
cuDoubleComplex t1;
//Threads
int n = blockDim.x *blockIdx.x + threadIdx.x;
int k1 = blockDim.y *blockIdx.y + threadIdx.y;
//Se resetean las flags
//flag_inputstage_1_d[0] = 0;
//flag_inputstage_2_d[0] = 0;
//flag_inputstage_3_d[0] = 0;
//printf("\n n = %d k1 = %d",n,k1);
if( (n < (P*Dop)) && (k1 < Dip))
{
n2 = floorf(n/Dop);
n1 = n - (Dop*n2);
//Generación de los elementos que dependen de x[0]
if(n == 0)
{
y[(k1*Dop*P)+(0*P)+ 0] = x[0];
///Flag
//flag_inputstage_1_d[0] = 1;
}
//Mapeo de x[n] a las entradas del primer conjunto de Dop DFT's
if((n >= 1) && (n <= (Li-1)))
{
t1 = x[n];
if(k1 == 0)
{
y[(0*Dop*P)+(n1*P)+ n2] = t1;
}
if(k1 >= 1)
{
y[(k1*Dop*P)+(n1*P)+ n2] = cuCmul(W[((n*k1)%N)-1],t1);
}
///Flag
//flag_inputstage_2_d[0] = 1;
}
//Rellenado de ceros para los elementos de "y" para Li <= n <= (P*Dop)-1
if((n >= Li) && (n <= (P*Dop)-1))
{
y[(k1*Dop*P)+(n1*P)+ n2] = make_cuDoubleComplex(0.0,0.0);
///Flag
//flag_inputstage_3_d[0] = 1;
}
//printf("\n (%f) + (%f)\n ",cuCrealf(y[(k1*Dop*P)+(n1*P)+ n2]),cuCimagf(y[(k1*Dop*P)+(n1*P)+ n2]));
}
}
//Función auxiliar del host para calcular la etapa intermedia en el device
void etapa_intermedia(void)
{
//////////////////////////////////////////////////////////////////////////
////////////////////////////ETAPA INTERMEDIA//////////////////////////////
//////////////////////////////////////////////////////////////////////////
//Declaración de variables locales
int k1,k2,n1;
int n[1] = {P};
int inembed[1] = {P};
int onembed[1] = {P};
//Asignación de memoria en el device para "z"
cudaMalloc((void**)&z_device,P*Dip*Dop*sizeof(cuDoubleComplex));
//Asignación de memoria en el host para "z"
//z_host = (cuDoubleComplex*)malloc(sizeof(cuDoubleComplex)*P*Dip*Dop);
//Asignación de memoria en el device para "in" y "out"
cudaMalloc((void**)&in,sizeof(cufftDoubleComplex)*P*Dip*Dop);
cudaMalloc((void**)&out,sizeof(cufftDoubleComplex)*P*Dip*Dop);
//Se copia el arreglo "y" al arreglo "in"
cudaMemcpy(in,y_device,sizeof(cuDoubleComplex)*P*Dip*Dop,cudaMemcpyDeviceToDevice);
//Se crea un plan
cufftHandle plan;
cufftPlanMany(&plan,1,n,inembed,1,P,onembed,1,P,CUFFT_Z2Z,Dip*Dop);
//Ejecución del plan
cufftExecZ2Z(plan,in,out,CUFFT_FORWARD);
//Esperar que el kernel termine de ejecutarse totalmente
cudaDeviceSynchronize();
//Se copian los datos del arreglo "out" al arreglo "z_device"
cudaMemcpy(z_device,out,sizeof(cufftDoubleComplex)*P*Dip*Dop,cudaMemcpyDeviceToDevice);
//Se destruye el plan
cufftDestroy(plan);
//Se liberan los arreglos "in" y "out"
cudaFree(in);
cudaFree(out);
/*
//Se copian los datos del arreglo "z_device" al arreglo "z_host"
cudaMemcpy(z_host,z_device,sizeof(cuDoubleComplex)*P*Dip*Dop,cudaMemcpyDeviceToHost);
///Se imprimen los valores de z(n1,k2,k1)
printf("\n\n--- ARREGLO z(n1,k2,k1) ---\n\n");
for(k1 = 0;k1 < Dip;k1++)
{
for(n1 = 0;n1 < Dop;n1++)
{
for(k2 = 0;k2 < P;k2++)
{
printf(" (%f) + (%f) ",cuCreal(z_host[(k1*Dop*P)+(n1*P)+k2]),cuCimag(z_host[(k1*Dop*P)+(n1*P)+k2]));
}
printf("\n");
}
printf("\n\n");
}
printf("\n");
*/
}
//Función auxiliar del host para calcular la etapa de salida en el device
void etapa_salida(void)
{
//////////////////////////////////////////////////////////////////////////
////////////////////////////ETAPA DE SALIDA///////////////////////////////
//////////////////////////////////////////////////////////////////////////
//Declaración de variables locales
int m;
//Asignación de memoria en el device para "X"
cudaMalloc((void**)&X_device,Lo*sizeof(cuDoubleComplex));
//Asignación de memoria en el host para "X"
X_host = (cuDoubleComplex*)malloc(sizeof(cuDoubleComplex)*Lo);
//Dimensionamiento del grid para la función kernel "outputStage"
//Dimensionamiento del Grid
dim3 gridDim(1,1,1);
//Dimensionamiento del block
dim3 blockDim(1,1,1);
if((Lo) < 1024)
{
blockDim.x = Lo;
gridDim.x = 1;
}
else
{
blockDim.x = 1024;
gridDim.x = (unsigned int) (ceilf((float)Lo/(float)blockDim.x));
}
//Lanzamiento del kernel "outputStage_kernel"
outputStage_kernel<<<gridDim,blockDim>>>(N,Lo,Dip,Dop,P,z_device,W_device,X_device);
//Esperar que el kernel termine de ejecutarse totalmente
cudaDeviceSynchronize();
//Copia del arreglo "X" del device hacia el host
cudaMemcpy(X_host,X_device,sizeof(cuDoubleComplex)*Lo,cudaMemcpyDeviceToHost);
/*
//Se imprimen los valores de "X_host"
///Imprimir X[k]
printf("\n\n--- ARREGLO X[k] ---\n\n");
for(m=0;m<=Lo-1;m++)
{
printf("\n X[%d] = %f + (%f)",m,cuCreal(X_host[m]),cuCimag(X_host[m]));
//fprintf(da,"%.4f %.4f\n",creal(X[i]),cimag(X[i]));
}
*/
}
//función kernel que ejecuta la etapa de salida en el device
__global__ void outputStage_kernel(int N,int Lo,int Dip,int Dop,int P,cuDoubleComplex *z,cuDoubleComplex *W,cuDoubleComplex *X)
{
//Declaración de variables locales
int n1,k_aux,k1,k2,a,b;
cuDoubleComplex t1,t2,t3,t4,t5;
//Threads
int k = blockDim.x *blockIdx.x + threadIdx.x;
//Se resetean las flags
//flag_outputstage_1_d[0] = 0;
//flag_outputstage_2_d[0] = 0;
//flag_outputstage_3_d[0] = 0;
if(k < Lo)
{
for(n1 = 0; n1 <= (Dop-1); n1 = n1+1)
{
if(Lo <= Dip)
{
//Cálculo de X(k) para 0<=k<=Lo-1.
//printf("\n--- Caso (Lo <= Dip) ---\n");
//En la descomposición k = k1 + Dipk2; k2 = 0, y por lo tanto, k = k1
if(n1 == 0) //Caso para lograr que por lo menos ingrese una vez
{
X[k] = z[(k*Dop*P)+(0*P) + 0];
///Flag
//flag_outputstage_1_d[0] = 1;
}
else
{
if(n1 == 1)
{
X[k] = z[(k*Dop*P)+(0*P) + 0];
}
X[k] = cuCadd(z[(k*Dop*P)+(n1*P) + 0],X[k]);
///Flag
//flag_outputstage_1_d[0] = 1;
}
}
else
{
if((k >= 0) && (k <= (Dip-1)))
{
//Cálculo de X(k) para 0<=k<=Dip-1.
//En la descomposición k = k1 + Dipk2; k2 = 0, y por lo tanto, k = k1
if(n1 == 0) //Caso para lograr que por lo menos ingrese una vez
{
X[k] = z[(k*Dop*P)+(0*P) + 0];
}
else
{
if(n1 == 1)
{
X[k] = z[(k*Dop*P)+(0*P) + 0];
}
X[k] = cuCadd(z[(k*Dop*P)+(n1*P) + 0],X[k]);
}
}
else
{
if(Dop <= 4)
{
//Usando el método directo
//printf("\n--- Caso (Metodo directo) ---\n");
if(n1 == 0) //Caso para lograr que por lo menos ingrese una vez
{
k_aux = k-((Dip*P)*floorf(k/(Dip*P)));
k2 = floorf(k_aux/Dip);
k1 = k_aux-(Dip*k2);
X[k] = z[(k1*Dop*P)+(0*P)+ (k2%P)];
//printf("\nk = %d,k_aux = %d,k2 = %d,k1 = %d",k,k_aux,k2,k1);
///Flag
//flag_outputstage_2_d[0] = 1;
}
else
{
if(n1 == 1)
{
k_aux = k-((Dip*P)*floorf(k/(Dip*P)));
k2 = floorf(k_aux/Dip);
k1 = k_aux-(Dip*k2);
X[k] = z[(k1*Dop*P)+(0*P)+ (k2%P)];
}
a = floorf(k/(Dip*P));
X[k] = cuCadd(X[k],cuCmul(z[(k1*Dop*P)+(n1*P)+ (k2%P)],W[((n1*(k2+P*(a))*Dip)%N)-1]));
///Flag
//flag_outputstage_2_d[0] = 1;
}
}
else
{
//Usando el método filtering 2BF
//printf("\n--- Caso (Filtro 2BF) ---\n");
if((Dop-2) >= 1)
{
if(n1 == 0)
{
k_aux = k-((Dip*P)*floorf(k/(Dip*P)));
k2 = floorf(k_aux/Dip);
k1 = k_aux-(Dip*k2);
t1 = z[(k1*Dop*P)+((Dop-1)*P)+ (k2%P)];
b = floorf(k/(Dip*P));
t4 = cuCmul(t1,make_cuDoubleComplex(2*cuCreal(W[(((k2+(P*(b)))*Dip)%N)-1]),0.0));
/*
if(k == 256)
{
printf("\nW = %d, k = %d,k_aux = %d,k2 = %d,k1 = %d, b= %d,z= %d",(((k2+(P*(b)))*Dip)%N)-1,k,k_aux,k2,k1,b,(k1*Dop*P)+((Dop-1)*P)+ (k2%P));
}
*/
///Flag
//flag_outputstage_3_d[0] = 1;
}
if((n1 >= 1) && (n1 <= (Dop-2)))
{
t2 = t1;
t1 = cuCadd(z[(k1*Dop*P)+((-(n1-(Dop-1)))*P)+ (k2%P)],t4);
t3 = cuCmul(t1,make_cuDoubleComplex(2*cuCreal(W[(((k2+(P*(b)))*Dip)%N)-1]),0.0));
t4 = cuCsub(t3,t2);
/*
if(k == 256)
{
printf("\nW= %d",(((k2+(P*(b)))*Dip)%N)-1);
}
*/
}
if(n1 == (Dop-1))
{
t5 = cuCadd(z[(k1*Dop*P)+(k2%P)],t4);
X[k] = cuCsub(t5,cuCmul(t1,cuConj(W[(((k2+(P*(b)))*Dip)%N)-1])));
}
}
else
{
if(Dop == 1)
{
k_aux = k-((Dip*P)*floorf(k/(Dip*P)));
k2 = floorf(k_aux/Dip);
k1 = k_aux-(Dip*k2);
t1 = z[(k1*Dop*P)+((Dop-1)*P)+ (k2%P)];
X[k] = t1;
///Flag
//flag_outputstage_3_d[0] = 1;
}
else
{
k_aux = k-((Dip*P)*floorf(k/(Dip*P)));
k2 = floorf(k_aux/Dip);
k1 = k_aux-(Dip*k2);
t1 = z[(k1*Dop*P)+((Dop-1)*P)+ (k2%P)];
b = floorf(k/(Dip*P));
t4 = cuCmul(t1,make_cuDoubleComplex(2*cuCreal(W[(((k2+(P*(b)))*Dip)%N)-1]),0.0));
t5 = cuCadd(z[(k1*Dop*P)+(0*P)+ (k2%P)],t4);
X[k] = cuCsub(t5,cuCmul(t1,cuConj(W[(((k2+(P*(b)))*Dip)%N)-1])));
///Flag
//flag_outputstage_3_d[0] = 1;
}
}
}
}
}
}
}
}
|
0931c91dc25852978f1910f1d82b53e29babb70b.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "innerProd.hip"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *aa = NULL;
hipMalloc(&aa, XSIZE*YSIZE);
float *bb = NULL;
hipMalloc(&bb, XSIZE*YSIZE);
float *cc = NULL;
hipMalloc(&cc, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
innerProd), dim3(gridBlock),dim3(threadBlock), 0, 0, aa,bb,cc);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
innerProd), dim3(gridBlock),dim3(threadBlock), 0, 0, aa,bb,cc);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
innerProd), dim3(gridBlock),dim3(threadBlock), 0, 0, aa,bb,cc);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 0931c91dc25852978f1910f1d82b53e29babb70b.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "innerProd.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *aa = NULL;
cudaMalloc(&aa, XSIZE*YSIZE);
float *bb = NULL;
cudaMalloc(&bb, XSIZE*YSIZE);
float *cc = NULL;
cudaMalloc(&cc, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
innerProd<<<gridBlock,threadBlock>>>(aa,bb,cc);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
innerProd<<<gridBlock,threadBlock>>>(aa,bb,cc);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
innerProd<<<gridBlock,threadBlock>>>(aa,bb,cc);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
492abf0b33f7d4d4cbfe94424aee5abe4e1266b1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "hiprand/hiprand.h"
#include "rocblas.h"
#define GPU
//#define CUDNN
extern "C" {
#include "convolutional_layer.h"
#include "batchnorm_layer.h"
#include "gemm.h"
#include "blas.h"
#include "im2col.h"
#include "col2im.h"
#include "utils.h"
#include "hip/hip_runtime.h"
}
__global__ void binarize_kernel(float *x, int n, float *binary)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (i >= n) return;
binary[i] = (x[i] >= 0) ? 1 : -1;
}
void binarize_gpu(float *x, int n, float *binary)
{
hipLaunchKernelGGL(( binarize_kernel), dim3(cuda_gridsize(n)), dim3(BLOCK), 0, 0, x, n, binary);
check_error(hipPeekAtLastError());
}
__global__ void binarize_input_kernel(float *input, int n, int size, float *binary)
{
int s = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (s >= size) return;
int i = 0;
float mean = 0;
for(i = 0; i < n; ++i){
mean += abs(input[i*size + s]);
}
mean = mean / n;
for(i = 0; i < n; ++i){
binary[i*size + s] = (input[i*size + s] > 0) ? mean : -mean;
}
}
void binarize_input_gpu(float *input, int n, int size, float *binary)
{
hipLaunchKernelGGL(( binarize_input_kernel), dim3(cuda_gridsize(size)), dim3(BLOCK), 0, 0, input, n, size, binary);
check_error(hipPeekAtLastError());
}
__global__ void binarize_weights_kernel(float *weights, int n, int size, float *binary)
{
int f = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (f >= n) return;
int i = 0;
float mean = 0;
for(i = 0; i < size; ++i){
mean += abs(weights[f*size + i]);
}
mean = mean / size;
for(i = 0; i < size; ++i){
binary[f*size + i] = (weights[f*size + i] > 0) ? mean : -mean;
//binary[f*size + i] = weights[f*size + i];
}
}
void binarize_weights_gpu(float *weights, int n, int size, float *binary)
{
hipLaunchKernelGGL(( binarize_weights_kernel), dim3(cuda_gridsize(n)), dim3(BLOCK), 0, 0, weights, n, size, binary);
check_error(hipPeekAtLastError());
}
void forward_convolutional_layer_gpu(convolutional_layer l, network_state state)
{
fill_ongpu(l.outputs*l.batch, 0, l.output_gpu, 1);
if(l.binary){
binarize_weights_gpu(l.weights_gpu, l.n, l.c*l.size*l.size, l.binary_weights_gpu);
swap_binary(&l);
}
if(l.xnor){
binarize_weights_gpu(l.weights_gpu, l.n, l.c*l.size*l.size, l.binary_weights_gpu);
swap_binary(&l);
binarize_gpu(state.input, l.c*l.h*l.w*l.batch, l.binary_input_gpu);
state.input = l.binary_input_gpu;
}
#ifdef CUDNN
float one = 1;
cudnnConvolutionForward(cudnn_handle(),
&one,
l.srcTensorDesc,
state.input,
l.weightDesc,
l.weights_gpu,
l.convDesc,
l.fw_algo,
state.workspace,
l.workspace_size,
&one,
l.dstTensorDesc,
l.output_gpu);
#else
int i;
int m = l.n;
int k = l.size*l.size*l.c;
int n = l.out_w*l.out_h;
for(i = 0; i < l.batch; ++i){
im2col_ongpu(state.input + i*l.c*l.h*l.w, l.c, l.h, l.w, l.size, l.stride, l.pad, state.workspace);
float * a = l.weights_gpu;
float * b = state.workspace;
float * c = l.output_gpu;
gemm_ongpu(0,0,m,n,k,1.,a,k,b,n,1.,c+i*m*n,n);
}
#endif
if (l.batch_normalize) {
forward_batchnorm_layer_gpu(l, state);
}
add_bias_gpu(l.output_gpu, l.biases_gpu, l.batch, l.n, l.out_w*l.out_h);
activate_array_ongpu(l.output_gpu, l.outputs*l.batch, l.activation);
//if(l.dot > 0) dot_error_gpu(l);
if(l.binary || l.xnor) swap_binary(&l);
}
void backward_convolutional_layer_gpu(convolutional_layer l, network_state state)
{
//constrain_ongpu(l.outputs*l.batch, 1, l.delta_gpu, 1);
gradient_array_ongpu(l.output_gpu, l.outputs*l.batch, l.activation, l.delta_gpu);
backward_bias_gpu(l.bias_updates_gpu, l.delta_gpu, l.batch, l.n, l.out_w*l.out_h);
if(l.batch_normalize){
backward_batchnorm_layer_gpu(l, state);
//axpy_ongpu(l.outputs*l.batch, -state.net.decay, l.x_gpu, 1, l.delta_gpu, 1);
} else {
//axpy_ongpu(l.outputs*l.batch, -state.net.decay, l.output_gpu, 1, l.delta_gpu, 1);
}
float *original_input = state.input;
if(l.xnor) state.input = l.binary_input_gpu;
#ifdef CUDNN
float one = 1;
cudnnConvolutionBackwardFilter(cudnn_handle(),
&one,
l.srcTensorDesc,
state.input,
l.ddstTensorDesc,
l.delta_gpu,
l.convDesc,
l.bf_algo,
state.workspace,
l.workspace_size,
&one,
l.dweightDesc,
l.weight_updates_gpu);
if(state.delta){
if(l.binary || l.xnor) swap_binary(&l);
cudnnConvolutionBackwardData(cudnn_handle(),
&one,
l.weightDesc,
l.weights_gpu,
l.ddstTensorDesc,
l.delta_gpu,
l.convDesc,
l.bd_algo,
state.workspace,
l.workspace_size,
&one,
l.dsrcTensorDesc,
state.delta);
if(l.binary || l.xnor) swap_binary(&l);
if(l.xnor) gradient_array_ongpu(original_input, l.batch*l.c*l.h*l.w, HARDTAN, state.delta);
}
#else
int m = l.n;
int n = l.size*l.size*l.c;
int k = l.out_w*l.out_h;
int i;
for(i = 0; i < l.batch; ++i){
float * a = l.delta_gpu;
float * b = state.workspace;
float * c = l.weight_updates_gpu;
im2col_ongpu(state.input + i*l.c*l.h*l.w, l.c, l.h, l.w, l.size, l.stride, l.pad, state.workspace);
gemm_ongpu(0,1,m,n,k,1,a + i*m*k,k,b,k,1,c,n);
if(state.delta){
if(l.binary || l.xnor) swap_binary(&l);
float * a = l.weights_gpu;
float * b = l.delta_gpu;
float * c = state.workspace;
gemm_ongpu(1,0,n,k,m,1,a,n,b + i*k*m,k,0,c,k);
col2im_ongpu(state.workspace, l.c, l.h, l.w, l.size, l.stride, l.pad, state.delta + i*l.c*l.h*l.w);
if(l.binary || l.xnor) {
swap_binary(&l);
}
if(l.xnor) gradient_array_ongpu(original_input + i*l.c*l.h*l.w, l.c*l.h*l.w, HARDTAN, state.delta + i*l.c*l.h*l.w);
}
}
#endif
}
void pull_convolutional_layer(convolutional_layer layer)
{
cuda_pull_array(layer.weights_gpu, layer.weights, layer.c*layer.n*layer.size*layer.size);
cuda_pull_array(layer.biases_gpu, layer.biases, layer.n);
cuda_pull_array(layer.weight_updates_gpu, layer.weight_updates, layer.c*layer.n*layer.size*layer.size);
cuda_pull_array(layer.bias_updates_gpu, layer.bias_updates, layer.n);
if (layer.batch_normalize){
cuda_pull_array(layer.scales_gpu, layer.scales, layer.n);
cuda_pull_array(layer.rolling_mean_gpu, layer.rolling_mean, layer.n);
cuda_pull_array(layer.rolling_variance_gpu, layer.rolling_variance, layer.n);
}
if (layer.adam){
cuda_pull_array(layer.m_gpu, layer.m, layer.c*layer.n*layer.size*layer.size);
cuda_pull_array(layer.v_gpu, layer.v, layer.c*layer.n*layer.size*layer.size);
}
}
void push_convolutional_layer(convolutional_layer layer)
{
cuda_push_array(layer.weights_gpu, layer.weights, layer.c*layer.n*layer.size*layer.size);
cuda_push_array(layer.biases_gpu, layer.biases, layer.n);
cuda_push_array(layer.weight_updates_gpu, layer.weight_updates, layer.c*layer.n*layer.size*layer.size);
cuda_push_array(layer.bias_updates_gpu, layer.bias_updates, layer.n);
if (layer.batch_normalize){
cuda_push_array(layer.scales_gpu, layer.scales, layer.n);
cuda_push_array(layer.rolling_mean_gpu, layer.rolling_mean, layer.n);
cuda_push_array(layer.rolling_variance_gpu, layer.rolling_variance, layer.n);
}
if (layer.adam){
cuda_push_array(layer.m_gpu, layer.m, layer.c*layer.n*layer.size*layer.size);
cuda_push_array(layer.v_gpu, layer.v, layer.c*layer.n*layer.size*layer.size);
}
}
void update_convolutional_layer_gpu(convolutional_layer layer, int batch, float learning_rate, float momentum, float decay)
{
int size = layer.size*layer.size*layer.c*layer.n;
axpy_ongpu(layer.n, learning_rate/batch, layer.bias_updates_gpu, 1, layer.biases_gpu, 1);
scal_ongpu(layer.n, momentum, layer.bias_updates_gpu, 1);
if(layer.scales_gpu){
axpy_ongpu(layer.n, learning_rate/batch, layer.scale_updates_gpu, 1, layer.scales_gpu, 1);
scal_ongpu(layer.n, momentum, layer.scale_updates_gpu, 1);
}
if(layer.adam){
scal_ongpu(size, layer.B1, layer.m_gpu, 1);
scal_ongpu(size, layer.B2, layer.v_gpu, 1);
axpy_ongpu(size, -decay*batch, layer.weights_gpu, 1, layer.weight_updates_gpu, 1);
axpy_ongpu(size, -(1-layer.B1), layer.weight_updates_gpu, 1, layer.m_gpu, 1);
mul_ongpu(size, layer.weight_updates_gpu, 1, layer.weight_updates_gpu, 1);
axpy_ongpu(size, (1-layer.B2), layer.weight_updates_gpu, 1, layer.v_gpu, 1);
adam_gpu(size, layer.weights_gpu, layer.m_gpu, layer.v_gpu, layer.B1, layer.B2, learning_rate/batch, layer.eps, layer.t+1);
fill_ongpu(size, 0, layer.weight_updates_gpu, 1);
}else{
axpy_ongpu(size, -decay*batch, layer.weights_gpu, 1, layer.weight_updates_gpu, 1);
axpy_ongpu(size, learning_rate/batch, layer.weight_updates_gpu, 1, layer.weights_gpu, 1);
scal_ongpu(size, momentum, layer.weight_updates_gpu, 1);
}
}
| 492abf0b33f7d4d4cbfe94424aee5abe4e1266b1.cu | #include "cuda_runtime.h"
#include "curand.h"
#include "cublas_v2.h"
#define GPU
//#define CUDNN
extern "C" {
#include "convolutional_layer.h"
#include "batchnorm_layer.h"
#include "gemm.h"
#include "blas.h"
#include "im2col.h"
#include "col2im.h"
#include "utils.h"
#include "cuda.h"
}
__global__ void binarize_kernel(float *x, int n, float *binary)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (i >= n) return;
binary[i] = (x[i] >= 0) ? 1 : -1;
}
void binarize_gpu(float *x, int n, float *binary)
{
binarize_kernel<<<cuda_gridsize(n), BLOCK>>>(x, n, binary);
check_error(cudaPeekAtLastError());
}
__global__ void binarize_input_kernel(float *input, int n, int size, float *binary)
{
int s = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (s >= size) return;
int i = 0;
float mean = 0;
for(i = 0; i < n; ++i){
mean += abs(input[i*size + s]);
}
mean = mean / n;
for(i = 0; i < n; ++i){
binary[i*size + s] = (input[i*size + s] > 0) ? mean : -mean;
}
}
void binarize_input_gpu(float *input, int n, int size, float *binary)
{
binarize_input_kernel<<<cuda_gridsize(size), BLOCK>>>(input, n, size, binary);
check_error(cudaPeekAtLastError());
}
__global__ void binarize_weights_kernel(float *weights, int n, int size, float *binary)
{
int f = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (f >= n) return;
int i = 0;
float mean = 0;
for(i = 0; i < size; ++i){
mean += abs(weights[f*size + i]);
}
mean = mean / size;
for(i = 0; i < size; ++i){
binary[f*size + i] = (weights[f*size + i] > 0) ? mean : -mean;
//binary[f*size + i] = weights[f*size + i];
}
}
void binarize_weights_gpu(float *weights, int n, int size, float *binary)
{
binarize_weights_kernel<<<cuda_gridsize(n), BLOCK>>>(weights, n, size, binary);
check_error(cudaPeekAtLastError());
}
void forward_convolutional_layer_gpu(convolutional_layer l, network_state state)
{
fill_ongpu(l.outputs*l.batch, 0, l.output_gpu, 1);
if(l.binary){
binarize_weights_gpu(l.weights_gpu, l.n, l.c*l.size*l.size, l.binary_weights_gpu);
swap_binary(&l);
}
if(l.xnor){
binarize_weights_gpu(l.weights_gpu, l.n, l.c*l.size*l.size, l.binary_weights_gpu);
swap_binary(&l);
binarize_gpu(state.input, l.c*l.h*l.w*l.batch, l.binary_input_gpu);
state.input = l.binary_input_gpu;
}
#ifdef CUDNN
float one = 1;
cudnnConvolutionForward(cudnn_handle(),
&one,
l.srcTensorDesc,
state.input,
l.weightDesc,
l.weights_gpu,
l.convDesc,
l.fw_algo,
state.workspace,
l.workspace_size,
&one,
l.dstTensorDesc,
l.output_gpu);
#else
int i;
int m = l.n;
int k = l.size*l.size*l.c;
int n = l.out_w*l.out_h;
for(i = 0; i < l.batch; ++i){
im2col_ongpu(state.input + i*l.c*l.h*l.w, l.c, l.h, l.w, l.size, l.stride, l.pad, state.workspace);
float * a = l.weights_gpu;
float * b = state.workspace;
float * c = l.output_gpu;
gemm_ongpu(0,0,m,n,k,1.,a,k,b,n,1.,c+i*m*n,n);
}
#endif
if (l.batch_normalize) {
forward_batchnorm_layer_gpu(l, state);
}
add_bias_gpu(l.output_gpu, l.biases_gpu, l.batch, l.n, l.out_w*l.out_h);
activate_array_ongpu(l.output_gpu, l.outputs*l.batch, l.activation);
//if(l.dot > 0) dot_error_gpu(l);
if(l.binary || l.xnor) swap_binary(&l);
}
void backward_convolutional_layer_gpu(convolutional_layer l, network_state state)
{
//constrain_ongpu(l.outputs*l.batch, 1, l.delta_gpu, 1);
gradient_array_ongpu(l.output_gpu, l.outputs*l.batch, l.activation, l.delta_gpu);
backward_bias_gpu(l.bias_updates_gpu, l.delta_gpu, l.batch, l.n, l.out_w*l.out_h);
if(l.batch_normalize){
backward_batchnorm_layer_gpu(l, state);
//axpy_ongpu(l.outputs*l.batch, -state.net.decay, l.x_gpu, 1, l.delta_gpu, 1);
} else {
//axpy_ongpu(l.outputs*l.batch, -state.net.decay, l.output_gpu, 1, l.delta_gpu, 1);
}
float *original_input = state.input;
if(l.xnor) state.input = l.binary_input_gpu;
#ifdef CUDNN
float one = 1;
cudnnConvolutionBackwardFilter(cudnn_handle(),
&one,
l.srcTensorDesc,
state.input,
l.ddstTensorDesc,
l.delta_gpu,
l.convDesc,
l.bf_algo,
state.workspace,
l.workspace_size,
&one,
l.dweightDesc,
l.weight_updates_gpu);
if(state.delta){
if(l.binary || l.xnor) swap_binary(&l);
cudnnConvolutionBackwardData(cudnn_handle(),
&one,
l.weightDesc,
l.weights_gpu,
l.ddstTensorDesc,
l.delta_gpu,
l.convDesc,
l.bd_algo,
state.workspace,
l.workspace_size,
&one,
l.dsrcTensorDesc,
state.delta);
if(l.binary || l.xnor) swap_binary(&l);
if(l.xnor) gradient_array_ongpu(original_input, l.batch*l.c*l.h*l.w, HARDTAN, state.delta);
}
#else
int m = l.n;
int n = l.size*l.size*l.c;
int k = l.out_w*l.out_h;
int i;
for(i = 0; i < l.batch; ++i){
float * a = l.delta_gpu;
float * b = state.workspace;
float * c = l.weight_updates_gpu;
im2col_ongpu(state.input + i*l.c*l.h*l.w, l.c, l.h, l.w, l.size, l.stride, l.pad, state.workspace);
gemm_ongpu(0,1,m,n,k,1,a + i*m*k,k,b,k,1,c,n);
if(state.delta){
if(l.binary || l.xnor) swap_binary(&l);
float * a = l.weights_gpu;
float * b = l.delta_gpu;
float * c = state.workspace;
gemm_ongpu(1,0,n,k,m,1,a,n,b + i*k*m,k,0,c,k);
col2im_ongpu(state.workspace, l.c, l.h, l.w, l.size, l.stride, l.pad, state.delta + i*l.c*l.h*l.w);
if(l.binary || l.xnor) {
swap_binary(&l);
}
if(l.xnor) gradient_array_ongpu(original_input + i*l.c*l.h*l.w, l.c*l.h*l.w, HARDTAN, state.delta + i*l.c*l.h*l.w);
}
}
#endif
}
void pull_convolutional_layer(convolutional_layer layer)
{
cuda_pull_array(layer.weights_gpu, layer.weights, layer.c*layer.n*layer.size*layer.size);
cuda_pull_array(layer.biases_gpu, layer.biases, layer.n);
cuda_pull_array(layer.weight_updates_gpu, layer.weight_updates, layer.c*layer.n*layer.size*layer.size);
cuda_pull_array(layer.bias_updates_gpu, layer.bias_updates, layer.n);
if (layer.batch_normalize){
cuda_pull_array(layer.scales_gpu, layer.scales, layer.n);
cuda_pull_array(layer.rolling_mean_gpu, layer.rolling_mean, layer.n);
cuda_pull_array(layer.rolling_variance_gpu, layer.rolling_variance, layer.n);
}
if (layer.adam){
cuda_pull_array(layer.m_gpu, layer.m, layer.c*layer.n*layer.size*layer.size);
cuda_pull_array(layer.v_gpu, layer.v, layer.c*layer.n*layer.size*layer.size);
}
}
void push_convolutional_layer(convolutional_layer layer)
{
cuda_push_array(layer.weights_gpu, layer.weights, layer.c*layer.n*layer.size*layer.size);
cuda_push_array(layer.biases_gpu, layer.biases, layer.n);
cuda_push_array(layer.weight_updates_gpu, layer.weight_updates, layer.c*layer.n*layer.size*layer.size);
cuda_push_array(layer.bias_updates_gpu, layer.bias_updates, layer.n);
if (layer.batch_normalize){
cuda_push_array(layer.scales_gpu, layer.scales, layer.n);
cuda_push_array(layer.rolling_mean_gpu, layer.rolling_mean, layer.n);
cuda_push_array(layer.rolling_variance_gpu, layer.rolling_variance, layer.n);
}
if (layer.adam){
cuda_push_array(layer.m_gpu, layer.m, layer.c*layer.n*layer.size*layer.size);
cuda_push_array(layer.v_gpu, layer.v, layer.c*layer.n*layer.size*layer.size);
}
}
void update_convolutional_layer_gpu(convolutional_layer layer, int batch, float learning_rate, float momentum, float decay)
{
int size = layer.size*layer.size*layer.c*layer.n;
axpy_ongpu(layer.n, learning_rate/batch, layer.bias_updates_gpu, 1, layer.biases_gpu, 1);
scal_ongpu(layer.n, momentum, layer.bias_updates_gpu, 1);
if(layer.scales_gpu){
axpy_ongpu(layer.n, learning_rate/batch, layer.scale_updates_gpu, 1, layer.scales_gpu, 1);
scal_ongpu(layer.n, momentum, layer.scale_updates_gpu, 1);
}
if(layer.adam){
scal_ongpu(size, layer.B1, layer.m_gpu, 1);
scal_ongpu(size, layer.B2, layer.v_gpu, 1);
axpy_ongpu(size, -decay*batch, layer.weights_gpu, 1, layer.weight_updates_gpu, 1);
axpy_ongpu(size, -(1-layer.B1), layer.weight_updates_gpu, 1, layer.m_gpu, 1);
mul_ongpu(size, layer.weight_updates_gpu, 1, layer.weight_updates_gpu, 1);
axpy_ongpu(size, (1-layer.B2), layer.weight_updates_gpu, 1, layer.v_gpu, 1);
adam_gpu(size, layer.weights_gpu, layer.m_gpu, layer.v_gpu, layer.B1, layer.B2, learning_rate/batch, layer.eps, layer.t+1);
fill_ongpu(size, 0, layer.weight_updates_gpu, 1);
}else{
axpy_ongpu(size, -decay*batch, layer.weights_gpu, 1, layer.weight_updates_gpu, 1);
axpy_ongpu(size, learning_rate/batch, layer.weight_updates_gpu, 1, layer.weights_gpu, 1);
scal_ongpu(size, momentum, layer.weight_updates_gpu, 1);
}
}
|
c020405571f851239b1fc70dab9fba85c4e41235.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* calcNormalWHA.cu
*
* Created on: 02-07-2015
* Author: Kamil Szewc ([email protected])
*/
#include <math.h>
#include "../../sph.h"
#include "../../hlp.h"
#include "../../methods/kernels.cuh"
#include "../../methods/interactions.cuh"
__device__ static real2 interaction(uint i, uint j, real2 dpos, real2 dvel, Particle *p, Parameters *par)
{
real q = hypot(dpos.x, dpos.y) * par->I_H;
if (q < 2.0) {
real gkx = grad_of_kern(dpos.x, q, par->I_H);
real gky = grad_of_kern(dpos.y, q, par->I_H);
real put = 0.0;
if (p[i].c != p[j].c)
{
put = p[j].d/(p[i].d + p[j].d);
}
put *= (p[i].o + p[j].o);
return MAKE_REAL2(put*gkx, put*gky);
}
else {
return MAKE_REAL2(0.0, 0.0);
}
}
__global__ void calcNormalWHA(Particle *p,
uint *gridParticleIndex,
uint *cellStart,
uint *cellEnd,
Parameters *par)
{
uint index = threadIdx.x + blockIdx.x*blockDim.x;
if (index < par->N) {
register real2 result = MAKE_REAL2(0.0,0.0);
#include "../../methods/interactions/interactionsNegativeOnWallNoSlip.cuh"
p[index].n.x = result.x * p[index].d / p[index].m;
p[index].n.y = result.y * p[index].d / p[index].m;
p[index].n.z = sqrt(pow2(result.x) + pow2(result.y));
/*uint originalIndex = gridParticleIndex[index];
if (p[originalIndex].c == 0)
{
p[originalIndex].n.x = -result.x * p[originalIndex].d / p[originalIndex].m;
p[originalIndex].n.y = -result.y * p[originalIndex].d / p[originalIndex].m;
p[originalIndex].n.z = sqrt(pow2(p[originalIndex].n.x) + pow2(p[originalIndex].n.y));
}
else
{
p[originalIndex].n.x = result.x * p[originalIndex].d / p[originalIndex].m;
p[originalIndex].n.y = result.y * p[originalIndex].d / p[originalIndex].m;
p[originalIndex].n.z = sqrt(pow2(p[originalIndex].n.x) + pow2(p[originalIndex].n.y));
}*/
}
}
| c020405571f851239b1fc70dab9fba85c4e41235.cu | /*
* calcNormalWHA.cu
*
* Created on: 02-07-2015
* Author: Kamil Szewc ([email protected])
*/
#include <math.h>
#include "../../sph.h"
#include "../../hlp.h"
#include "../../methods/kernels.cuh"
#include "../../methods/interactions.cuh"
__device__ static real2 interaction(uint i, uint j, real2 dpos, real2 dvel, Particle *p, Parameters *par)
{
real q = hypot(dpos.x, dpos.y) * par->I_H;
if (q < 2.0) {
real gkx = grad_of_kern(dpos.x, q, par->I_H);
real gky = grad_of_kern(dpos.y, q, par->I_H);
real put = 0.0;
if (p[i].c != p[j].c)
{
put = p[j].d/(p[i].d + p[j].d);
}
put *= (p[i].o + p[j].o);
return MAKE_REAL2(put*gkx, put*gky);
}
else {
return MAKE_REAL2(0.0, 0.0);
}
}
__global__ void calcNormalWHA(Particle *p,
uint *gridParticleIndex,
uint *cellStart,
uint *cellEnd,
Parameters *par)
{
uint index = threadIdx.x + blockIdx.x*blockDim.x;
if (index < par->N) {
register real2 result = MAKE_REAL2(0.0,0.0);
#include "../../methods/interactions/interactionsNegativeOnWallNoSlip.cuh"
p[index].n.x = result.x * p[index].d / p[index].m;
p[index].n.y = result.y * p[index].d / p[index].m;
p[index].n.z = sqrt(pow2(result.x) + pow2(result.y));
/*uint originalIndex = gridParticleIndex[index];
if (p[originalIndex].c == 0)
{
p[originalIndex].n.x = -result.x * p[originalIndex].d / p[originalIndex].m;
p[originalIndex].n.y = -result.y * p[originalIndex].d / p[originalIndex].m;
p[originalIndex].n.z = sqrt(pow2(p[originalIndex].n.x) + pow2(p[originalIndex].n.y));
}
else
{
p[originalIndex].n.x = result.x * p[originalIndex].d / p[originalIndex].m;
p[originalIndex].n.y = result.y * p[originalIndex].d / p[originalIndex].m;
p[originalIndex].n.z = sqrt(pow2(p[originalIndex].n.x) + pow2(p[originalIndex].n.y));
}*/
}
}
|
cb49922306c42fea0adb71c3b3d4e04ffb1bb05b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// $ nvcc -arch=sm_30 hw4-1b.cu -o hw4-1b -Xcompiler -fopenmp
// flag -Xcompiler passes next flag directly to compiler
#include <algorithm>
#include <stdio.h>
#include <omp.h>
#include <string>
//#include <hip/hip_cooperative_groups.h>
//using namespace cooperative_groups;
#define BLOCK_SIZE 1024
void vec_mat_mult(double* c, const double* a, const double* b, long N, long M){
for(long j = 0; j < M; j++) {
#pragma omp parallel for schedule(static) reduction(+:c[j])
for (long i = 0; i < N; i++) {
c[j] += a[j*M + i] * b[i];
}
}
}
//THIS IS THE ONE USED. NOT THE ONE ABOVE
void vec_mat_mult2dim(double* c, const double* a, const double* b, long N, long M){
#pragma omp parallel for reduction(+:c[0:M])
for(long j = 0; j < M; j++) {
for (long i = 0; i < N; i++) {
c[j] += a[j*N + i] * b[i];
}
}
}
__global__ void vec_mat_mult_kernel(double* c, const double* a, const double* b, long N, long M){
//THIS HAS A RACE CONDITION when updating c[idx/N]
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if(threadIdx.x == 1) printf("hello from thread %d of block %d\n", threadIdx.x, blockIdx.x);
if (idx < M*N) {
//printf("c[%ld] += a[%d] * b[%ld] == %f += %f * %f\n", idx/N, idx, idx%N, c[idx/N], a[idx], b[idx%N]);
c[idx/N] += a[idx] * b[idx%N];
}
}
__device__ void warpReduce(volatile double* smem, int tid){
//Loop unrolling
smem[tid] += smem[tid + 32];
smem[tid] += smem[tid + 16];
smem[tid] += smem[tid + 8];
smem[tid] += smem[tid + 4];
smem[tid] += smem[tid + 2];
smem[tid] += smem[tid + 1];
}
__global__ void mat_product_kernel(double* c, const double* a, const double* b, long N, long M){
__shared__ double smem[BLOCK_SIZE];//WILL THIS CAUSE INITIALIZATION ERROR? UNDEFINED.
int tid = threadIdx.x;
smem[tid] = 0;
int idx = (blockIdx.x) * blockDim.x + threadIdx.x;
int width_of_row = (((int)((N + BLOCK_SIZE - 1)/BLOCK_SIZE))*BLOCK_SIZE);
int m = (idx / width_of_row);//Change to LONG if needed??????
int n = (idx % width_of_row);
if (n < N){
smem[tid] = a[m * N + n] * b[n];
//printf("Block %d: smem[%d] = %f\n", blockIdx.x, tid, smem[tid]);
} else smem[tid] = 0;
//if(threadIdx.x == 524) printf("hello from thread %d of block %d\n", threadIdx.x, blockIdx.x);
__syncthreads();
for(unsigned int s = blockDim.x/2; s>0; s>>=1){
if(tid < s) {
smem[tid] += smem[tid+s];
}
__syncthreads();
}
//if(tid <32) warpReduce(smem, tid);
if(tid == 0){
c[blockIdx.x] = smem[tid];
//printf("Block %d: Updated c[%d] to: %f\n", blockIdx.x, blockIdx.x, smem[tid]);
//if(blockIdx.x == 0)
// printf("For above calculations: width_of_row = %d\tN = %ld\tM = %ld\n", width_of_row, N, M);
}
}
__global__ void reduction_kernel1(double* sum, const double* a, long N, long M){
__shared__ double smem[BLOCK_SIZE];
int tid = threadIdx.x;
int idx = (blockIdx.x) * blockDim.x + threadIdx.x;
int width_of_row = (((int)((N + BLOCK_SIZE - 1)/BLOCK_SIZE))*BLOCK_SIZE);
int m = (idx / width_of_row);//Change to LONG if needed??????
int n = (idx % width_of_row);
if (n < N){
smem[tid] = a[m * N + n];
//printf("Reduction kernel: Block %d: smem[%d] = %f\n", blockIdx.x, tid, smem[tid]);
} else smem[tid] = 0;
__syncthreads();
for(int s = blockDim.x/2; s > 0; s >>=1) {
if(tid < s)
smem[tid] += smem[tid + s];
__syncthreads();
}
if (tid == 0){
sum[blockIdx.x] = smem[tid];
//printf("Block %d: Updated sum[%d] to: %f\n", blockIdx.x, blockIdx.x, smem[tid]);
//if(blockIdx.x == 0)
// printf("For above calculations: width_of_row = %d\tN = %ld\tM = %ld\n", width_of_row, N, M);
}
}
void Check_CUDA_Error(const char *message){
hipError_t error = hipGetLastError();
if(error!=hipSuccess) {
fprintf(stderr,"ERROR: %s: %s\n", message, hipGetErrorString(error) );
exit(-1);
}
}
int main() {
long N = (1UL<<25);//323*BLOCK_SIZE;//(1UL<<17); // 2^25
long M = 17;
printf("\nFor Matrix Vector multiplication with N = %ld\t M = %ld\n", N, M);
double* x = (double*) malloc(M * N * sizeof(double));
double* y = (double*) malloc(N * sizeof(double));
double* z = (double*) malloc(M * sizeof(double));
double* z_ref = (double*) malloc(M * sizeof(double));
#pragma omp parallel for schedule(static)
for (long i = 0; i < N; i++) {
for (long j = 0; j < M; j++) {
x[i+j*N] = i+2;
}
y[i] = 1.0/(i+1);
}
for (long j = 0; j < M; j++) {
z[j] = 0;
z_ref[j] = 0;
}
double tt = omp_get_wtime();
vec_mat_mult2dim(z_ref, x, y, N, M);
printf("CPU Bandwidth = %f GB/s\n", (M*N + M + N) *sizeof(double) / (omp_get_wtime()-tt)/1e9);
//printf("Mat Mult[0] from CPU: %f\n", z_ref[0]);
//printf("Mat Mult[%ld] from CPU: %f\n", M-1, z_ref[M-1]);
double *x_d, *y_d, *mat_d;
hipMalloc(&x_d, M*N*sizeof(double));
Check_CUDA_Error("malloc x failed");
hipMalloc(&y_d, N*sizeof(double));
long N_work = 1;
for (long i = (N+BLOCK_SIZE-1)/(BLOCK_SIZE); i > 1; i = (i+BLOCK_SIZE-1)/(BLOCK_SIZE)) N_work += i;
N_work *= M;
//printf("Size of buffer memory: %ld doubles\n", N_work);
hipMalloc(&mat_d, M * N_work*sizeof(double)); // extra memory buffer for reduction across thread-blocks
tt = omp_get_wtime();
hipMemcpy(x_d, x, M*N*sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(y_d, y, N*sizeof(double), hipMemcpyHostToDevice);
hipDeviceSynchronize();
double* mat_product_d = mat_d;
long Nb = (N+BLOCK_SIZE-1)/(BLOCK_SIZE);
//Dot Product + A bit of reduction
//Will be Following page 18 of https://developer.download.nvidia.com/assets/cuda/files/reduction.pdf
hipLaunchKernelGGL(( mat_product_kernel), dim3((N*M+BLOCK_SIZE-1)/BLOCK_SIZE), dim3(BLOCK_SIZE), 0, 0, mat_product_d, x_d, y_d, N, M);
hipDeviceSynchronize();
//It's just reduction now onwards.
while (Nb > 1) {
long N = Nb;
Nb = (Nb+BLOCK_SIZE-1)/(BLOCK_SIZE);
// printf("Size of buffer memory: %ld doubles\n", Nb * M);
hipLaunchKernelGGL(( reduction_kernel1), dim3((N+BLOCK_SIZE-1)/BLOCK_SIZE * M),dim3(BLOCK_SIZE), 0, 0, mat_product_d + N*M, mat_product_d, N, M);
mat_product_d += N * M;
}
hipMemcpyAsync(z, mat_product_d, M*sizeof(double), hipMemcpyDeviceToHost);
hipDeviceSynchronize();
printf("GPU Bandwidth = %f GB/s\n", (M*N + M + N) *sizeof(double) / (omp_get_wtime()-tt)/1e9);
//printf("Mat Mult[0] from GPU: %f\n", z[0]);
//printf("Mat Mult[%ld] from GPU: %f\n", M-1, z[M-1]);
double err = 0;
for (long i = 0; i < M; i++) err += fabs(z[i]-z_ref[i]);
printf("Error = %f\n", fabs(err));
hipFree(x_d);
hipFree(y_d);
hipFree(mat_d);
//hipFree(z_d);
free(x);
free(y);
free(z);
free(z_ref);
return 0;
}
| cb49922306c42fea0adb71c3b3d4e04ffb1bb05b.cu | // $ nvcc -arch=sm_30 hw4-1b.cu -o hw4-1b -Xcompiler -fopenmp
// flag -Xcompiler passes next flag directly to compiler
#include <algorithm>
#include <stdio.h>
#include <omp.h>
#include <string>
//#include <cooperative_groups.h>
//using namespace cooperative_groups;
#define BLOCK_SIZE 1024
void vec_mat_mult(double* c, const double* a, const double* b, long N, long M){
for(long j = 0; j < M; j++) {
#pragma omp parallel for schedule(static) reduction(+:c[j])
for (long i = 0; i < N; i++) {
c[j] += a[j*M + i] * b[i];
}
}
}
//THIS IS THE ONE USED. NOT THE ONE ABOVE
void vec_mat_mult2dim(double* c, const double* a, const double* b, long N, long M){
#pragma omp parallel for reduction(+:c[0:M])
for(long j = 0; j < M; j++) {
for (long i = 0; i < N; i++) {
c[j] += a[j*N + i] * b[i];
}
}
}
__global__ void vec_mat_mult_kernel(double* c, const double* a, const double* b, long N, long M){
//THIS HAS A RACE CONDITION when updating c[idx/N]
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if(threadIdx.x == 1) printf("hello from thread %d of block %d\n", threadIdx.x, blockIdx.x);
if (idx < M*N) {
//printf("c[%ld] += a[%d] * b[%ld] == %f += %f * %f\n", idx/N, idx, idx%N, c[idx/N], a[idx], b[idx%N]);
c[idx/N] += a[idx] * b[idx%N];
}
}
__device__ void warpReduce(volatile double* smem, int tid){
//Loop unrolling
smem[tid] += smem[tid + 32];
smem[tid] += smem[tid + 16];
smem[tid] += smem[tid + 8];
smem[tid] += smem[tid + 4];
smem[tid] += smem[tid + 2];
smem[tid] += smem[tid + 1];
}
__global__ void mat_product_kernel(double* c, const double* a, const double* b, long N, long M){
__shared__ double smem[BLOCK_SIZE];//WILL THIS CAUSE INITIALIZATION ERROR? UNDEFINED.
int tid = threadIdx.x;
smem[tid] = 0;
int idx = (blockIdx.x) * blockDim.x + threadIdx.x;
int width_of_row = (((int)((N + BLOCK_SIZE - 1)/BLOCK_SIZE))*BLOCK_SIZE);
int m = (idx / width_of_row);//Change to LONG if needed??????
int n = (idx % width_of_row);
if (n < N){
smem[tid] = a[m * N + n] * b[n];
//printf("Block %d: smem[%d] = %f\n", blockIdx.x, tid, smem[tid]);
} else smem[tid] = 0;
//if(threadIdx.x == 524) printf("hello from thread %d of block %d\n", threadIdx.x, blockIdx.x);
__syncthreads();
for(unsigned int s = blockDim.x/2; s>0; s>>=1){
if(tid < s) {
smem[tid] += smem[tid+s];
}
__syncthreads();
}
//if(tid <32) warpReduce(smem, tid);
if(tid == 0){
c[blockIdx.x] = smem[tid];
//printf("Block %d: Updated c[%d] to: %f\n", blockIdx.x, blockIdx.x, smem[tid]);
//if(blockIdx.x == 0)
// printf("For above calculations: width_of_row = %d\tN = %ld\tM = %ld\n", width_of_row, N, M);
}
}
__global__ void reduction_kernel1(double* sum, const double* a, long N, long M){
__shared__ double smem[BLOCK_SIZE];
int tid = threadIdx.x;
int idx = (blockIdx.x) * blockDim.x + threadIdx.x;
int width_of_row = (((int)((N + BLOCK_SIZE - 1)/BLOCK_SIZE))*BLOCK_SIZE);
int m = (idx / width_of_row);//Change to LONG if needed??????
int n = (idx % width_of_row);
if (n < N){
smem[tid] = a[m * N + n];
//printf("Reduction kernel: Block %d: smem[%d] = %f\n", blockIdx.x, tid, smem[tid]);
} else smem[tid] = 0;
__syncthreads();
for(int s = blockDim.x/2; s > 0; s >>=1) {
if(tid < s)
smem[tid] += smem[tid + s];
__syncthreads();
}
if (tid == 0){
sum[blockIdx.x] = smem[tid];
//printf("Block %d: Updated sum[%d] to: %f\n", blockIdx.x, blockIdx.x, smem[tid]);
//if(blockIdx.x == 0)
// printf("For above calculations: width_of_row = %d\tN = %ld\tM = %ld\n", width_of_row, N, M);
}
}
void Check_CUDA_Error(const char *message){
cudaError_t error = cudaGetLastError();
if(error!=cudaSuccess) {
fprintf(stderr,"ERROR: %s: %s\n", message, cudaGetErrorString(error) );
exit(-1);
}
}
int main() {
long N = (1UL<<25);//323*BLOCK_SIZE;//(1UL<<17); // 2^25
long M = 17;
printf("\nFor Matrix Vector multiplication with N = %ld\t M = %ld\n", N, M);
double* x = (double*) malloc(M * N * sizeof(double));
double* y = (double*) malloc(N * sizeof(double));
double* z = (double*) malloc(M * sizeof(double));
double* z_ref = (double*) malloc(M * sizeof(double));
#pragma omp parallel for schedule(static)
for (long i = 0; i < N; i++) {
for (long j = 0; j < M; j++) {
x[i+j*N] = i+2;
}
y[i] = 1.0/(i+1);
}
for (long j = 0; j < M; j++) {
z[j] = 0;
z_ref[j] = 0;
}
double tt = omp_get_wtime();
vec_mat_mult2dim(z_ref, x, y, N, M);
printf("CPU Bandwidth = %f GB/s\n", (M*N + M + N) *sizeof(double) / (omp_get_wtime()-tt)/1e9);
//printf("Mat Mult[0] from CPU: %f\n", z_ref[0]);
//printf("Mat Mult[%ld] from CPU: %f\n", M-1, z_ref[M-1]);
double *x_d, *y_d, *mat_d;
cudaMalloc(&x_d, M*N*sizeof(double));
Check_CUDA_Error("malloc x failed");
cudaMalloc(&y_d, N*sizeof(double));
long N_work = 1;
for (long i = (N+BLOCK_SIZE-1)/(BLOCK_SIZE); i > 1; i = (i+BLOCK_SIZE-1)/(BLOCK_SIZE)) N_work += i;
N_work *= M;
//printf("Size of buffer memory: %ld doubles\n", N_work);
cudaMalloc(&mat_d, M * N_work*sizeof(double)); // extra memory buffer for reduction across thread-blocks
tt = omp_get_wtime();
cudaMemcpy(x_d, x, M*N*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(y_d, y, N*sizeof(double), cudaMemcpyHostToDevice);
cudaDeviceSynchronize();
double* mat_product_d = mat_d;
long Nb = (N+BLOCK_SIZE-1)/(BLOCK_SIZE);
//Dot Product + A bit of reduction
//Will be Following page 18 of https://developer.download.nvidia.com/assets/cuda/files/reduction.pdf
mat_product_kernel<<<(N*M+BLOCK_SIZE-1)/BLOCK_SIZE, BLOCK_SIZE>>>(mat_product_d, x_d, y_d, N, M);
cudaDeviceSynchronize();
//It's just reduction now onwards.
while (Nb > 1) {
long N = Nb;
Nb = (Nb+BLOCK_SIZE-1)/(BLOCK_SIZE);
// printf("Size of buffer memory: %ld doubles\n", Nb * M);
reduction_kernel1<<<(N+BLOCK_SIZE-1)/BLOCK_SIZE * M,BLOCK_SIZE>>>(mat_product_d + N*M, mat_product_d, N, M);
mat_product_d += N * M;
}
cudaMemcpyAsync(z, mat_product_d, M*sizeof(double), cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
printf("GPU Bandwidth = %f GB/s\n", (M*N + M + N) *sizeof(double) / (omp_get_wtime()-tt)/1e9);
//printf("Mat Mult[0] from GPU: %f\n", z[0]);
//printf("Mat Mult[%ld] from GPU: %f\n", M-1, z[M-1]);
double err = 0;
for (long i = 0; i < M; i++) err += fabs(z[i]-z_ref[i]);
printf("Error = %f\n", fabs(err));
cudaFree(x_d);
cudaFree(y_d);
cudaFree(mat_d);
//cudaFree(z_d);
free(x);
free(y);
free(z);
free(z_ref);
return 0;
}
|
885f36543c25c384fe5a0d367a74cd48daf975e0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cstdio>
#include <cstdlib>
#include "SyncedMemory.h"
#define CHECK {\
auto e = hipDeviceSynchronize();\
if (e != hipSuccess) {\
printf("At " __FILE__ ":%d, %s\n", __LINE__, hipGetErrorString(e));\
abort();\
}\
}
__global__ void SomeTransform(char *input_gpu, int fsize) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < fsize && input_gpu[idx] != '\n') {
input_gpu[idx] = '!';
}
}
__global__ void MyTransform(char *input_gpu, int fsize) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < fsize && input_gpu[idx] != '\n') {
if(int(input_gpu[idx]) > 64 && input_gpu[idx] < 91)
{input_gpu[idx] = char(int(input_gpu[idx]) + 32);}
else if(int(input_gpu[idx]) > 96 && input_gpu[idx] < 123)
{input_gpu[idx] = char(int(input_gpu[idx]) - 32);}
}
}
int main(int argc, char **argv)
{
// init, and check
if (argc != 2) {
printf("Usage %s <input text file>\n", argv[0]);
abort();
}
FILE *fp = fopen(argv[1], "r");
if (! fp) {
printf("Cannot open %s", argv[1]);
abort();
}
// get file size
fseek(fp, 0, SEEK_END);
size_t fsize = ftell(fp);
fseek(fp, 0, SEEK_SET);
// read files
MemoryBuffer<char> text(fsize+1);
auto text_smem = text.CreateSync(fsize);
CHECK;
fread(text_smem.get_cpu_wo(), 1, fsize, fp);
text_smem.get_cpu_wo()[fsize] = '\0';
fclose(fp);
// TODO: do your transform here
char *input_gpu = text_smem.get_gpu_rw();
// An example: transform the first 64 characters to '!'
// Don't transform over the tail
// And don't transform the line breaks
//SomeTransform<<<100, 64>>>(input_gpu, fsize);
//MyTransform turns the lower case into upper case and turns the upper case into lower one
hipLaunchKernelGGL(( MyTransform), dim3(100), dim3(64), 0, 0, input_gpu, fsize);
puts(text_smem.get_cpu_ro());
return 0;
}
| 885f36543c25c384fe5a0d367a74cd48daf975e0.cu | #include <cstdio>
#include <cstdlib>
#include "SyncedMemory.h"
#define CHECK {\
auto e = cudaDeviceSynchronize();\
if (e != cudaSuccess) {\
printf("At " __FILE__ ":%d, %s\n", __LINE__, cudaGetErrorString(e));\
abort();\
}\
}
__global__ void SomeTransform(char *input_gpu, int fsize) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < fsize && input_gpu[idx] != '\n') {
input_gpu[idx] = '!';
}
}
__global__ void MyTransform(char *input_gpu, int fsize) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < fsize && input_gpu[idx] != '\n') {
if(int(input_gpu[idx]) > 64 && input_gpu[idx] < 91)
{input_gpu[idx] = char(int(input_gpu[idx]) + 32);}
else if(int(input_gpu[idx]) > 96 && input_gpu[idx] < 123)
{input_gpu[idx] = char(int(input_gpu[idx]) - 32);}
}
}
int main(int argc, char **argv)
{
// init, and check
if (argc != 2) {
printf("Usage %s <input text file>\n", argv[0]);
abort();
}
FILE *fp = fopen(argv[1], "r");
if (! fp) {
printf("Cannot open %s", argv[1]);
abort();
}
// get file size
fseek(fp, 0, SEEK_END);
size_t fsize = ftell(fp);
fseek(fp, 0, SEEK_SET);
// read files
MemoryBuffer<char> text(fsize+1);
auto text_smem = text.CreateSync(fsize);
CHECK;
fread(text_smem.get_cpu_wo(), 1, fsize, fp);
text_smem.get_cpu_wo()[fsize] = '\0';
fclose(fp);
// TODO: do your transform here
char *input_gpu = text_smem.get_gpu_rw();
// An example: transform the first 64 characters to '!'
// Don't transform over the tail
// And don't transform the line breaks
//SomeTransform<<<100, 64>>>(input_gpu, fsize);
//MyTransform turns the lower case into upper case and turns the upper case into lower one
MyTransform<<<100, 64>>>(input_gpu, fsize);
puts(text_smem.get_cpu_ro());
return 0;
}
|
b2a5ed2cbebba3e08f9d90a9b886a9eb3c62091b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// -------------------------------------------------------------
// CUDPP -- CUDA Data Parallel Primitives library
// -------------------------------------------------------------
// $Revision$
// $Date$
// -------------------------------------------------------------
// This source code is distributed under the terms of license.txt
// in the root directory of this source distribution.
// -------------------------------------------------------------
#include <stdio.h>
#include <stdlib.h>
#include "cuda_util.h"
#include "cudpp_globals.h"
#include "cudpp.h"
#include "cudpp_util.h"
#include "cudpp_plan.h"
#include "kernel/compress_kernel.cuh"
/**
* @file
* compress_app.cu
*
* @brief CUDPP application-level compress routines
*/
/** \addtogroup cudpp_app
* @{
*/
/** @name Compress Functions
* @{
*/
/** @brief Perform Huffman encoding
*
*
* Performs Huffman encoding on the input data stream. The input data
* stream is the output data stream from the previous stage (MTF) in our
* compress stream.
*
* The input is given by the output of the Move-to-Front transform (MTF).
* There are a few things that need to be store along with the compressed
* data. We also store the word offset of the compressed data stream because
* our data is compressed into indepedent blocks (word granularity) so that
* they can be encoded and decoded in parallel. The number of independent blocks
* is HUFF_THREADS_PER_BLOCK*HUFF_WORK_PER_THREAD.
*
*
* @param[out] d_hist Histogram array of the input data stream used for decoding.
* @param[out] d_encodeOffset An array of the word offsets of the independent compressed data blocks.
* @param[out] d_compressedSize Pointer to the total size in words of all compressed data blocks combined.
* @param[out] d_compressed A pointer to the compressed data blocks.
* @param[in] numElements Total number of input elements to compress.
* @param[in] plan Pointer to the plan object used for this compress.
*
*/
void huffmanEncoding(unsigned int *d_hist,
unsigned int *d_encodeOffset,
unsigned int *d_compressedSize,
unsigned int *d_compressed,
size_t numElements,
const CUDPPCompressPlan *plan)
{
unsigned char* d_input = plan->m_d_mtfOut;
//d_hist = plan->m_d_histogram;
//d_encodeOffset = plan->m_d_encodeOffset;
//d_compressedSize = plan->m_d_totalEncodedSize;
//d_compressed = plan->m_d_encodedData;
// Set work dimensions
size_t nCodesPacked = 0;
size_t histBlocks = (numElements%(HUFF_WORK_PER_THREAD_HIST*HUFF_THREADS_PER_BLOCK_HIST)==0) ?
numElements/(HUFF_WORK_PER_THREAD_HIST*HUFF_THREADS_PER_BLOCK_HIST) : numElements%(HUFF_WORK_PER_THREAD_HIST*HUFF_THREADS_PER_BLOCK_HIST)+1;
size_t tThreads = ((numElements%HUFF_WORK_PER_THREAD) == 0) ? numElements/HUFF_WORK_PER_THREAD : numElements/HUFF_WORK_PER_THREAD+1;
size_t nBlocks = ( (tThreads%HUFF_THREADS_PER_BLOCK) == 0) ? tThreads/HUFF_THREADS_PER_BLOCK : tThreads/HUFF_THREADS_PER_BLOCK+1;
dim3 grid_hist(histBlocks, 1, 1);
dim3 threads_hist(HUFF_THREADS_PER_BLOCK_HIST, 1, 1);
dim3 grid_tree(1, 1, 1);
dim3 threads_tree(128, 1, 1);
dim3 grid_huff(nBlocks, 1, 1);
dim3 threads_huff(HUFF_THREADS_PER_BLOCK, 1, 1);
//---------------------------------------
// 1) Build histogram from MTF output
//---------------------------------------
hipLaunchKernelGGL(( huffman_build_histogram_kernel), dim3(grid_hist), dim3(threads_hist), 0, 0,
(unsigned int*)d_input, plan->m_d_histograms, numElements);
CUDA_SAFE_CALL(hipDeviceSynchronize());
//----------------------------------------------------
// 2) Compute final Histogram + Build Huffman codes
//----------------------------------------------------
hipLaunchKernelGGL(( huffman_build_tree_kernel), dim3(grid_tree), dim3(threads_tree), 0, 0,
d_input, plan->m_d_huffCodesPacked, plan->m_d_huffCodeLocations, plan->m_d_huffCodeLengths, plan->m_d_histograms,
d_hist, plan->m_d_nCodesPacked, d_compressedSize, histBlocks, numElements);
CUDA_SAFE_CALL(hipDeviceSynchronize());
//----------------------------------------------
// 3) Main Huffman encoding step (encode data)
//----------------------------------------------
CUDA_SAFE_CALL(hipMemcpy((void*)&nCodesPacked, plan->m_d_nCodesPacked, sizeof(size_t), hipMemcpyDeviceToHost));
hipLaunchKernelGGL(( huffman_kernel_en), dim3(grid_huff), dim3(threads_huff), nCodesPacked*sizeof(unsigned char), 0,
(uchar4*)d_input, plan->m_d_huffCodesPacked, plan->m_d_huffCodeLocations, plan->m_d_huffCodeLengths,
plan->m_d_encoded, nCodesPacked, tThreads);
CUDA_SAFE_CALL(hipDeviceSynchronize());
//--------------------------------------------------
// 4) Pack together encoded data to determine how
// much encoded data needs to be transferred
//--------------------------------------------------
hipLaunchKernelGGL(( huffman_datapack_kernel), dim3(grid_huff), dim3(threads_huff), 0, 0,
plan->m_d_encoded, d_compressed, d_compressedSize, d_encodeOffset);
CUDA_SAFE_CALL(hipDeviceSynchronize());
}
/** @brief Perform the Move-to-Front Transform (MTF)
*
* Performs a Move-to-Front (MTF) transform on the input data stream.
* The MTF transform is the second stage in our compress pipeline. The
* MTF manipulates the input data stream to improve the performance of
* entropy encoding.
*
* @param[in] d_mtfIn An array of the input data stream to perform the MTF transform on.
* @param[out] d_mtfOut An array to store the output of the MTF transform.
* @param[in] numElements Total number of input elements of the MTF transform.
* @param[in] plan Pointer to the plan object used for this MTF transform.
*
*/
template <class T>
void moveToFrontTransform(unsigned char *d_mtfIn,
unsigned char *d_mtfOut,
size_t numElements,
const T *plan)
{
unsigned int npad = numElements-1;
npad |= npad >> 1;
npad |= npad >> 2;
npad |= npad >> 4;
npad |= npad >> 8;
npad |= npad >> 16;
npad++;
unsigned int nThreads = MTF_THREADS_BLOCK;
unsigned int nLists = npad/MTF_PER_THREAD;
unsigned int tThreads = npad/MTF_PER_THREAD;
unsigned int offset = 2;
bool fullBlocks = (tThreads%nThreads == 0);
unsigned int nBlocks = (fullBlocks) ? (tThreads/nThreads) : (tThreads/nThreads + 1);
//-------------------------------------------
// Initial MTF lists + Initial Reduction
//-------------------------------------------
// Set work-item dimensions
dim3 grid(nBlocks, 1, 1);
dim3 threads(nThreads, 1, 1);
// Kernel call
hipLaunchKernelGGL(( mtf_reduction_kernel), dim3(grid), dim3(threads), 0, 0,
d_mtfIn, plan->m_d_lists, plan->m_d_list_sizes, nLists, offset, numElements);
CUDA_SAFE_CALL(hipDeviceSynchronize());
if(nBlocks > 1)
{
//----------------------
// MTF Global Reduce
//----------------------
unsigned int init_offset = offset * nThreads;
offset = init_offset;
tThreads = nBlocks/2;
fullBlocks = (tThreads%nThreads == 0);
nBlocks = (fullBlocks) ? (tThreads/nThreads) : (tThreads/nThreads + 1);
// Set work dimensions
dim3 grid_GLred(nBlocks, 1, 1);
dim3 threads_GLred(nThreads, 1, 1);
while(offset <= nLists)
{
hipLaunchKernelGGL(( mtf_GLreduction_kernel), dim3(grid_GLred), dim3(threads_GLred), 0, 0,
plan->m_d_lists, plan->m_d_list_sizes, offset, tThreads, nLists);
CUDA_SAFE_CALL(hipDeviceSynchronize());
offset *= 2*nThreads;
}
//-----------------------------
// MTF Global Down-sweep
//-----------------------------
offset = nLists/4;
unsigned int lastLevel = 0;
// Work-dimensions
dim3 grid_GLsweep(nBlocks, 1, 1);
dim3 threads_GLsweep(nThreads, 1, 1);
while(offset >= init_offset/2)
{
lastLevel = offset/nThreads;
lastLevel = (lastLevel>=(init_offset/2)) ? lastLevel : init_offset/2;
hipLaunchKernelGGL(( mtf_GLdownsweep_kernel), dim3(grid_GLsweep), dim3(threads_GLsweep), 0, 0,
plan->m_d_lists, plan->m_d_list_sizes, offset, lastLevel, nLists, tThreads);
CUDA_SAFE_CALL(hipDeviceSynchronize());
offset = lastLevel/2;
}
}
//------------------------
// Local Scan
//------------------------
tThreads = npad/MTF_PER_THREAD;
offset = 2;
fullBlocks = (tThreads%nThreads == 0);
nBlocks = (fullBlocks) ? (tThreads/nThreads) : (tThreads/nThreads + 1);
dim3 grid_loc(nBlocks, 1, 1);
dim3 threads_loc(nThreads, 1, 1);
hipLaunchKernelGGL(( mtf_localscan_lists_kernel), dim3(grid_loc), dim3(threads_loc), 0, 0,
d_mtfIn, d_mtfOut, plan->m_d_lists, plan->m_d_list_sizes, nLists, offset, numElements);
CUDA_SAFE_CALL(hipDeviceSynchronize());
}
/** @brief Perform the Burrows-Wheeler Transform (BWT)
*
* Performs the Burrows-Wheeler Transform (BWT) on a given
* character string. The BWT is an algorithm which is commonly used
* in compression applications, mainly bzip2. The BWT orders the
* characters in such a way that the output tends to have many long
* runs of repeated characters. This bodes well for later stages in
* compression pipelines which perform better with repeated characters.
*
*
* @param[in] d_uncompressed A char array of the input data stream to perform the BWT on.
* @param[out] d_bwtIndex The index at which the original string in the BWT sorts to.
* @param[out] d_bwtOut An array to store the output of the BWT.
* @param[in] numElements Total number of input elements of the BWT.
* @param[in] plan Pointer to the plan object used for this BWT.
*
*
*/
template <class T>
void burrowsWheelerTransform(unsigned char *d_uncompressed,
int *d_bwtIndex,
unsigned char *d_bwtOut,
size_t numElements,
const T *plan)
{
size_t tThreads = (numElements%4 == 0) ? numElements/4 : numElements/4 + 1;
size_t nThreads = BWT_CTA_BLOCK;
bool fullBlocks = (tThreads%nThreads == 0);
uint nBlocks = (fullBlocks) ? (tThreads/nThreads) : (tThreads/nThreads+1);
dim3 grid_construct(nBlocks, 1, 1);
dim3 threads_construct(nThreads, 1, 1);
int numThreads = 64;
int secondBlocks;
size_t count;
size_t mult;
size_t numBlocks;
int initSubPartitions;
int subPartitions;
int step;
// Massage input to create sorting key-value pairs
hipLaunchKernelGGL(( bwt_keys_construct_kernel), dim3(grid_construct), dim3(threads_construct) , 0, 0,
(uchar4*)d_uncompressed, plan->m_d_bwtInRef,
plan->m_d_keys, plan->m_d_values, plan->m_d_bwtInRef2, tThreads);
CUDA_SAFE_CALL(hipDeviceSynchronize());
// First satge -- block sort
nBlocks = numElements/BWT_BLOCKSORT_SIZE;
dim3 grid_blocksort(nBlocks, 1, 1);
dim3 threads_blocksort(BWT_CTA_BLOCK, 1, 1);
hipLaunchKernelGGL(( blockWiseStringSort<unsigned int, 8>), dim3(grid_blocksort), dim3(threads_blocksort), 0, 0,
plan->m_d_keys, plan->m_d_values, (const unsigned int*)plan->m_d_bwtInRef, plan->m_d_bwtInRef2, BWT_BLOCKSORT_SIZE, numElements);
CUDA_SAFE_CALL(hipDeviceSynchronize());
// Start merging blocks
// Second stage -- merge sorted blocks using simple merge
count = 0;
mult = 1;
numBlocks = nBlocks;
while(count < 6)
{
if(count%2 == 0)
{
hipLaunchKernelGGL(( simpleStringMerge<unsigned int, 2>), dim3(numBlocks), dim3(BWT_CTASIZE_simple), sizeof(unsigned int)*(2*BWT_INTERSECT_B_BLOCK_SIZE_simple+2), 0,
plan->m_d_keys, plan->m_d_keys_dev, plan->m_d_values, plan->m_d_values_dev,
plan->m_d_bwtInRef, BWT_BLOCKSORT_SIZE*mult, numBlocks*BWT_BLOCKSORT_SIZE, plan->m_d_bwtInRef2, numElements);
CUDA_SAFE_CALL(hipDeviceSynchronize());
}
else
{
hipLaunchKernelGGL(( simpleStringMerge<unsigned int, 2>), dim3(numBlocks), dim3(BWT_CTASIZE_simple), sizeof(unsigned int)*(2*BWT_INTERSECT_B_BLOCK_SIZE_simple+2), 0,
plan->m_d_keys_dev, plan->m_d_keys, plan->m_d_values_dev, plan->m_d_values,
plan->m_d_bwtInRef, BWT_BLOCKSORT_SIZE*mult, numBlocks*BWT_BLOCKSORT_SIZE, plan->m_d_bwtInRef2, numElements);
CUDA_SAFE_CALL(hipDeviceSynchronize());
}
mult*=2;
count++;
numBlocks /= 2;
}
// Third stage -- merge remaining blocks using multi-merge
initSubPartitions = 2;
subPartitions = initSubPartitions;
secondBlocks = (2*numBlocks*initSubPartitions+numThreads-1)/numThreads;
step = 1;
while (numBlocks > 1)
{
if(count%2 == 1)
{
hipLaunchKernelGGL(( findMultiPartitions<unsigned int>), dim3(secondBlocks), dim3(numThreads), 0, 0,
plan->m_d_keys_dev, subPartitions, numBlocks, BWT_BLOCKSORT_SIZE*mult,
plan->m_d_partitionBeginA, plan->m_d_partitionSizeA, plan->m_d_partitionBeginB, plan->m_d_partitionSizeB, BWT_SIZE);
CUDA_SAFE_CALL(hipDeviceSynchronize());
hipLaunchKernelGGL(( stringMergeMulti<unsigned int, 2>), dim3(numBlocks*subPartitions), dim3(BWT_CTASIZE_multi), (2*BWT_INTERSECT_B_BLOCK_SIZE_multi+5)*sizeof(unsigned int), 0,
plan->m_d_keys_dev, plan->m_d_keys, plan->m_d_values_dev, plan->m_d_values, plan->m_d_bwtInRef2, subPartitions, numBlocks,
plan->m_d_partitionBeginA, plan->m_d_partitionSizeA, plan->m_d_partitionBeginB, plan->m_d_partitionSizeB, BWT_BLOCKSORT_SIZE*mult, numElements);
CUDA_SAFE_CALL(hipDeviceSynchronize());
}
else
{
hipLaunchKernelGGL(( findMultiPartitions<unsigned int>), dim3(secondBlocks), dim3(numThreads), 0, 0,
plan->m_d_keys, subPartitions, numBlocks, BWT_BLOCKSORT_SIZE*mult,
plan->m_d_partitionBeginA, plan->m_d_partitionSizeA, plan->m_d_partitionBeginB, plan->m_d_partitionSizeB, BWT_SIZE);
CUDA_SAFE_CALL(hipDeviceSynchronize());
hipLaunchKernelGGL(( stringMergeMulti<unsigned int, 2>), dim3(numBlocks*subPartitions), dim3(BWT_CTASIZE_multi), (2*BWT_INTERSECT_B_BLOCK_SIZE_multi+5)*sizeof(unsigned int), 0,
plan->m_d_keys, plan->m_d_keys_dev, plan->m_d_values, plan->m_d_values_dev, plan->m_d_bwtInRef2, subPartitions, numBlocks,
plan->m_d_partitionBeginA, plan->m_d_partitionSizeA, plan->m_d_partitionBeginB, plan->m_d_partitionSizeB, BWT_BLOCKSORT_SIZE*mult, numElements);
CUDA_SAFE_CALL(hipDeviceSynchronize());
}
numBlocks/=2;
subPartitions*=2;
count++;
mult*=2;
step++;
}
// Final stage -- compute BWT and BWT Index using sorted values
if(count%2 == 0)
{
hipLaunchKernelGGL(( bwt_compute_final_kernel), dim3(grid_construct), dim3(threads_construct) , 0, 0,
d_uncompressed, plan->m_d_values, d_bwtIndex, d_bwtOut, numElements, tThreads);
CUDA_SAFE_CALL(hipDeviceSynchronize());
}
else
{
hipLaunchKernelGGL(( bwt_compute_final_kernel), dim3(grid_construct), dim3(threads_construct) , 0, 0,
d_uncompressed, plan->m_d_values_dev, d_bwtIndex, d_bwtOut, numElements, tThreads);
CUDA_SAFE_CALL(hipDeviceSynchronize());
}
}
/** @brief Wrapper for calling the Burrows-Wheeler Transform (BWT).
*
* This is a wrapper function for calling the BWT. This wrapper is used
* internally via the compress application to call burrowsWheelerTransform().
*
*
* @param[in] d_in A char array of the input data stream to perform the BWT on.
* @param[out] d_bwtIndex The index at which the original string in the BWT sorts to.
* @param[in] numElements Total number of input elements to the compress stream.
* @param[in] plan Pointer to the plan object used for this compress.
*
*
*/
void burrowsWheelerTransformWrapper(unsigned char *d_in,
int *d_bwtIndex,
size_t numElements,
const CUDPPCompressPlan *plan)
{
burrowsWheelerTransform<CUDPPCompressPlan>(d_in, d_bwtIndex, plan->m_d_bwtOut, numElements, plan);
}
/** @brief Wrapper for calling the Burrows-Wheeler Transform (BWT).
*
* This is a wrapper function for calling the BWT. This wrapper is used
* internally via the BWT primitive to call burrowsWheelerTransform().
*
*
* @param[in] d_in A char array of the input data stream to perform the BWT on.
* @param[out] d_bwtIndex The index at which the original string in the BWT sorts to.
* @param[out] d_bwtOut An array to store the output of the BWT.
* @param[in] numElements Total number of input elements to the BWT.
* @param[in] plan Pointer to the plan object used for this BWT.
*
*
*/
void burrowsWheelerTransformWrapper(unsigned char *d_in,
int *d_bwtIndex,
unsigned char *d_bwtOut,
size_t numElements,
const CUDPPBwtPlan *plan)
{
burrowsWheelerTransform<CUDPPBwtPlan>(d_in, d_bwtIndex, d_bwtOut, numElements, plan);
}
/** @brief Wrapper for calling the Move-to-Front (MTF) transform.
*
* This is a wrapper function for calling the MTF. This wrapper is used
* internally via the compress application to call moveToFrontTransform().
*
*
* @param[in] numElements Total number of input elements to the MTF transform.
* @param[in] plan Pointer to the plan object used for this compress.
*
*
*/
void moveToFrontTransformWrapper(size_t numElements,
const CUDPPCompressPlan *plan)
{
moveToFrontTransform<CUDPPCompressPlan>(plan->m_d_bwtOut, plan->m_d_mtfOut, numElements, plan);
}
/** @brief Wrapper for calling the Move-to-Front (MTF) transform.
*
* This is a wrapper function for calling the MTF. This wrapper is used
* internally via the MTF primitive to call moveToFrontTransform().
*
*
* @param[in] d_in An input char array to perform the MTF on.
* @param[in] d_mtfOut An output char array to store the MTF transformed
stream.
* @param[in] numElements Total number of input elements to the MTF transform.
* @param[in] plan Pointer to the plan object used for this MTF.
*
*
*/
void moveToFrontTransformWrapper(unsigned char *d_in,
unsigned char *d_mtfOut,
size_t numElements,
const CUDPPMtfPlan *plan)
{
moveToFrontTransform<CUDPPMtfPlan>(d_in, d_mtfOut, numElements, plan);
}
#ifdef __cplusplus
extern "C"
{
#endif
/** @brief Allocate intermediate arrays used by BWT.
*
*
* @param [in,out] plan Pointer to CUDPPBwtPlan object containing options and number
* of elements, which is used to compute storage requirements, and
* within which intermediate storage is allocated.
*/
void allocBwtStorage(CUDPPBwtPlan *plan)
{
size_t numElts = plan->m_numElements;
// BWT
CUDA_SAFE_CALL(hipMalloc((void**) &(plan->m_d_keys), numElts*sizeof(unsigned int) ));
CUDA_SAFE_CALL(hipMalloc((void**) &(plan->m_d_values), numElts*sizeof(unsigned int) ));
CUDA_SAFE_CALL(hipMalloc((void**) &(plan->m_d_bwtInRef), numElts*sizeof(unsigned int) ));
CUDA_SAFE_CALL(hipMalloc((void**) &(plan->m_d_bwtInRef2), numElts*sizeof(unsigned int) ));
CUDA_SAFE_CALL(hipMalloc((void**) &(plan->m_d_keys_dev), numElts*sizeof(unsigned int) ));
CUDA_SAFE_CALL(hipMalloc((void**) &(plan->m_d_values_dev), numElts*sizeof(unsigned int) ));
CUDA_SAFE_CALL(hipMalloc((void**)&(plan->m_d_partitionBeginA), 1024*sizeof(int)) );
CUDA_SAFE_CALL(hipMalloc((void**)&(plan->m_d_partitionSizeA), 1024*sizeof(int)) );
CUDA_SAFE_CALL(hipMalloc((void**)&(plan->m_d_partitionBeginB), 1024*sizeof(int)) );
CUDA_SAFE_CALL(hipMalloc((void**)&(plan->m_d_partitionSizeB), 1024*sizeof(int)) );
}
/** @brief Allocate intermediate arrays used by MTF.
*
*
* @param [in,out] plan Pointer to CUDPPMtfPlan object containing
* options and number of elements, which is used
* to compute storage requirements, and within
* which intermediate storage is allocated.
*/
void allocMtfStorage(CUDPPMtfPlan *plan)
{
// Number of padding
size_t tmp = plan->m_numElements-1;
tmp |= tmp >> 1;
tmp |= tmp >> 2;
tmp |= tmp >> 4;
tmp |= tmp >> 8;
tmp |= tmp >> 16;
tmp++;
plan->npad = tmp;
// MTF
CUDA_SAFE_CALL(hipMalloc( (void**) &(plan->m_d_lists), (tmp/MTF_PER_THREAD)*256*sizeof(unsigned char)));
CUDA_SAFE_CALL(hipMalloc( (void**) &(plan->m_d_list_sizes), (tmp/MTF_PER_THREAD)*sizeof(unsigned short)));
CUDA_SAFE_CALL(hipMemset(plan->m_d_lists, 0, (tmp/MTF_PER_THREAD)*256*sizeof(unsigned char)));
CUDA_SAFE_CALL(hipMemset(plan->m_d_list_sizes, 0, (tmp/MTF_PER_THREAD)*sizeof(unsigned short)));
}
/** @brief Allocate intermediate arrays used by compression.
*
*
* @param [in,out] plan Pointer to CUDPPCompressPlan object
* containing options and number of elements,
* which is used to compute storage
* requirements, and within which intermediate
* storage is allocated.
*/
void allocCompressStorage(CUDPPCompressPlan *plan)
{
size_t numElts = plan->m_numElements;
plan->npad = numElts;
// BWT
CUDA_SAFE_CALL(hipMalloc((void**) &(plan->m_d_keys), numElts*sizeof(unsigned int) ));
CUDA_SAFE_CALL(hipMalloc((void**) &(plan->m_d_values), numElts*sizeof(unsigned int) ));
CUDA_SAFE_CALL(hipMalloc( (void**) &(plan->m_d_bwtOut), numElts*sizeof(unsigned char) ));
CUDA_SAFE_CALL(hipMalloc((void**) &(plan->m_d_bwtInRef), numElts*sizeof(unsigned int) ));
CUDA_SAFE_CALL(hipMalloc((void**) &(plan->m_d_bwtInRef2), numElts*sizeof(unsigned int) ));
CUDA_SAFE_CALL(hipMalloc((void**) &(plan->m_d_keys_dev), numElts*sizeof(unsigned int) ));
CUDA_SAFE_CALL(hipMalloc((void**) &(plan->m_d_values_dev), numElts*sizeof(unsigned int) ));
CUDA_SAFE_CALL(hipMalloc((void**)&(plan->m_d_partitionBeginA), 1024*sizeof(int)) );
CUDA_SAFE_CALL(hipMalloc((void**)&(plan->m_d_partitionSizeA), 1024*sizeof(int)) );
CUDA_SAFE_CALL(hipMalloc((void**)&(plan->m_d_partitionBeginB), 1024*sizeof(int)) );
CUDA_SAFE_CALL(hipMalloc((void**)&(plan->m_d_partitionSizeB), 1024*sizeof(int)) );
// MTF
CUDA_SAFE_CALL(hipMalloc( (void**) &(plan->m_d_lists), (numElts/MTF_PER_THREAD)*256*sizeof(unsigned char)));
CUDA_SAFE_CALL(hipMalloc( (void**) &(plan->m_d_list_sizes), (numElts/MTF_PER_THREAD)*sizeof(unsigned short)));
CUDA_SAFE_CALL(hipMalloc( (void**) &(plan->m_d_mtfOut), numElts*sizeof(unsigned char) ));
// Huffman
size_t numBitsAlloc = HUFF_NUM_CHARS*(HUFF_NUM_CHARS+1)/2;
size_t numCharsAlloc = (numBitsAlloc%8 == 0) ? numBitsAlloc/8 : numBitsAlloc/8 + 1;
size_t histBlocks = (numElts%(HUFF_WORK_PER_THREAD_HIST*HUFF_THREADS_PER_BLOCK_HIST)==0) ?
numElts/(HUFF_WORK_PER_THREAD_HIST*HUFF_THREADS_PER_BLOCK_HIST) : numElts%(HUFF_WORK_PER_THREAD_HIST*HUFF_THREADS_PER_BLOCK_HIST)+1;
size_t tThreads = ((numElts%HUFF_WORK_PER_THREAD) == 0) ? numElts/HUFF_WORK_PER_THREAD : numElts/HUFF_WORK_PER_THREAD+1;
size_t nBlocks = ( (tThreads%HUFF_THREADS_PER_BLOCK) == 0) ? tThreads/HUFF_THREADS_PER_BLOCK : tThreads/HUFF_THREADS_PER_BLOCK+1;
CUDA_SAFE_CALL(hipMalloc( (void**) &(plan->m_d_huffCodesPacked), numCharsAlloc*sizeof(unsigned char) ));
CUDA_SAFE_CALL(hipMalloc( (void**) &(plan->m_d_huffCodeLocations), HUFF_NUM_CHARS*sizeof(size_t) ));
CUDA_SAFE_CALL(hipMalloc( (void**) &(plan->m_d_huffCodeLengths), HUFF_NUM_CHARS*sizeof(unsigned char) ));
CUDA_SAFE_CALL(hipMalloc( (void**) &(plan->m_d_histograms), histBlocks*256*sizeof(size_t) ));
//CUDA_SAFE_CALL(hipMalloc( (void**) &(plan->m_d_histogram), 256*sizeof(size_t) ));
//CUDA_SAFE_CALL(hipMalloc( (void**) &(plan->m_d_totalEncodedSize), sizeof(size_t)));
//CUDA_SAFE_CALL(hipMalloc( (void**) &(plan->m_d_encodedData), sizeof(size_t)*(HUFF_CODE_BYTES+1)*nBlocks));
CUDA_SAFE_CALL(hipMalloc( (void**) &(plan->m_d_nCodesPacked), sizeof(size_t)));
CUDA_SAFE_CALL(hipMalloc( (void**) &(plan->m_d_encoded), sizeof(encoded)*nBlocks));
//CUDA_SAFE_CALL(hipMalloc( (void**) &(plan->m_d_encodeOffset), sizeof(size_t)*nBlocks));
CUDA_CHECK_ERROR("allocCompressStorage");
}
/** @brief Deallocate intermediate block arrays in a CUDPPCompressPlan object.
*
*
* @param[in,out] plan Pointer to CUDPPCompressPlan object initialized by allocCompressStorage().
*/
void freeCompressStorage(CUDPPCompressPlan *plan)
{
// BWT
CUDA_SAFE_CALL( hipFree(plan->m_d_keys));
CUDA_SAFE_CALL( hipFree(plan->m_d_values));
CUDA_SAFE_CALL( hipFree(plan->m_d_bwtOut));
CUDA_SAFE_CALL( hipFree(plan->m_d_bwtInRef));
CUDA_SAFE_CALL( hipFree(plan->m_d_bwtInRef2));
CUDA_SAFE_CALL( hipFree(plan->m_d_keys_dev));
CUDA_SAFE_CALL( hipFree(plan->m_d_values_dev));
CUDA_SAFE_CALL( hipFree(plan->m_d_partitionBeginA));
CUDA_SAFE_CALL( hipFree(plan->m_d_partitionSizeA));
CUDA_SAFE_CALL( hipFree(plan->m_d_partitionBeginB));
CUDA_SAFE_CALL( hipFree(plan->m_d_partitionSizeB));
// MTF
CUDA_SAFE_CALL( hipFree(plan->m_d_lists));
CUDA_SAFE_CALL( hipFree(plan->m_d_list_sizes));
CUDA_SAFE_CALL( hipFree(plan->m_d_mtfOut));
// Huffman
CUDA_SAFE_CALL(hipFree(plan->m_d_histograms));
//CUDA_SAFE_CALL(hipFree(plan->m_d_histogram));
CUDA_SAFE_CALL(hipFree(plan->m_d_huffCodeLengths));
CUDA_SAFE_CALL(hipFree(plan->m_d_huffCodesPacked));
CUDA_SAFE_CALL(hipFree(plan->m_d_huffCodeLocations));
//CUDA_SAFE_CALL(hipFree(plan->m_d_totalEncodedSize));
//CUDA_SAFE_CALL(hipFree(plan->m_d_encodedData));
CUDA_SAFE_CALL(hipFree(plan->m_d_nCodesPacked));
CUDA_SAFE_CALL(hipFree(plan->m_d_encoded));
//CUDA_SAFE_CALL(hipFree(plan->m_d_encodeOffset));
CUDA_CHECK_ERROR("freeCompressStorage");
}
/** @brief Deallocate intermediate block arrays in a CUDPPBwtPlan object.
*
*
* @param[in,out] plan Pointer to CUDPPBwtPlan object initialized by allocBwtStorage().
*/
void freeBwtStorage(CUDPPBwtPlan *plan)
{
// BWT
CUDA_SAFE_CALL( hipFree(plan->m_d_keys));
CUDA_SAFE_CALL( hipFree(plan->m_d_values));
CUDA_SAFE_CALL( hipFree(plan->m_d_bwtInRef));
CUDA_SAFE_CALL( hipFree(plan->m_d_bwtInRef2));
CUDA_SAFE_CALL( hipFree(plan->m_d_keys_dev));
CUDA_SAFE_CALL( hipFree(plan->m_d_values_dev));
CUDA_SAFE_CALL( hipFree(plan->m_d_partitionBeginA));
CUDA_SAFE_CALL( hipFree(plan->m_d_partitionSizeA));
CUDA_SAFE_CALL( hipFree(plan->m_d_partitionBeginB));
CUDA_SAFE_CALL( hipFree(plan->m_d_partitionSizeB));
}
/** @brief Deallocate intermediate block arrays in a CUDPPMtfPlan object.
*
*
* @param[in,out] plan Pointer to CUDPPMtfPlan object initialized by allocMtfStorage().
*/
void freeMtfStorage(CUDPPMtfPlan *plan)
{
// MTF
CUDA_SAFE_CALL( hipFree(plan->m_d_lists));
CUDA_SAFE_CALL( hipFree(plan->m_d_list_sizes));
}
/** @brief Dispatch function to perform parallel compression on an
* array with the specified configuration.
*
*
* @param[in] d_uncompressed Uncompressed data
* @param[out] d_bwtIndex BWT Index
* @param[out] d_histSize Histogram size
* @param[out] d_hist Histogram
* @param[out] d_encodeOffset Encoded offset table
* @param[out] d_compressedSize Size of compressed data
* @param[out] d_compressed Compressed data
* @param[in] numElements Number of elements to compress
* @param[in] plan Pointer to CUDPPCompressPlan object containing
* compress options and intermediate storage
*/
void cudppCompressDispatch(void *d_uncompressed,
void *d_bwtIndex,
void *d_histSize, // ignore
void *d_hist,
void *d_encodeOffset,
void *d_compressedSize,
void *d_compressed,
size_t numElements,
const CUDPPCompressPlan *plan)
{
// Call to perform the Burrows-Wheeler transform
burrowsWheelerTransformWrapper((unsigned char*)d_uncompressed, (int*)d_bwtIndex,
numElements, plan);
// Call to perform the move-to-front transform
moveToFrontTransformWrapper(numElements, plan);
// Call to perform the Huffman encoding
huffmanEncoding((unsigned int*)d_hist, (unsigned int*)d_encodeOffset,
(unsigned int*)d_compressedSize, (unsigned int*)d_compressed, numElements, plan);
}
/** @brief Dispatch function to perform the Burrows-Wheeler transform
*
*
* @param[in] d_bwtIn Input data
* @param[out] d_bwtOut Transformed data
* @param[out] d_bwtIndex BWT Index
* @param[in] numElements Number of elements to compress
* @param[in] plan Pointer to CUDPPBwtPlan object containing
* compress options and intermediate storage
*/
void cudppBwtDispatch(void *d_bwtIn,
void *d_bwtOut,
void *d_bwtIndex,
size_t numElements,
const CUDPPBwtPlan *plan)
{
// Call to perform the Burrows-Wheeler transform
burrowsWheelerTransformWrapper((unsigned char*)d_bwtIn, (int*)d_bwtIndex,
(unsigned char*) d_bwtOut, numElements,
plan);
}
/** @brief Dispatch function to perform the Move-to-Front transform
*
*
* @param[in] d_mtfIn Input data
* @param[out] d_mtfOut Transformed data
* @param[in] numElements Number of elements to compress
* @param[in] plan Pointer to CUDPPMtfPlan object containing
* compress options and intermediate storage
*/
void cudppMtfDispatch(void *d_mtfIn,
void *d_mtfOut,
size_t numElements,
const CUDPPMtfPlan *plan)
{
// Call to perform the Burrows-Wheeler transform
moveToFrontTransformWrapper((unsigned char*) d_mtfIn,
(unsigned char*) d_mtfOut, numElements, plan);
}
#ifdef __cplusplus
}
#endif
/** @} */ // end compress functions
/** @} */ // end cudpp_app
| b2a5ed2cbebba3e08f9d90a9b886a9eb3c62091b.cu | // -------------------------------------------------------------
// CUDPP -- CUDA Data Parallel Primitives library
// -------------------------------------------------------------
// $Revision$
// $Date$
// -------------------------------------------------------------
// This source code is distributed under the terms of license.txt
// in the root directory of this source distribution.
// -------------------------------------------------------------
#include <stdio.h>
#include <stdlib.h>
#include "cuda_util.h"
#include "cudpp_globals.h"
#include "cudpp.h"
#include "cudpp_util.h"
#include "cudpp_plan.h"
#include "kernel/compress_kernel.cuh"
/**
* @file
* compress_app.cu
*
* @brief CUDPP application-level compress routines
*/
/** \addtogroup cudpp_app
* @{
*/
/** @name Compress Functions
* @{
*/
/** @brief Perform Huffman encoding
*
*
* Performs Huffman encoding on the input data stream. The input data
* stream is the output data stream from the previous stage (MTF) in our
* compress stream.
*
* The input is given by the output of the Move-to-Front transform (MTF).
* There are a few things that need to be store along with the compressed
* data. We also store the word offset of the compressed data stream because
* our data is compressed into indepedent blocks (word granularity) so that
* they can be encoded and decoded in parallel. The number of independent blocks
* is HUFF_THREADS_PER_BLOCK*HUFF_WORK_PER_THREAD.
*
*
* @param[out] d_hist Histogram array of the input data stream used for decoding.
* @param[out] d_encodeOffset An array of the word offsets of the independent compressed data blocks.
* @param[out] d_compressedSize Pointer to the total size in words of all compressed data blocks combined.
* @param[out] d_compressed A pointer to the compressed data blocks.
* @param[in] numElements Total number of input elements to compress.
* @param[in] plan Pointer to the plan object used for this compress.
*
*/
void huffmanEncoding(unsigned int *d_hist,
unsigned int *d_encodeOffset,
unsigned int *d_compressedSize,
unsigned int *d_compressed,
size_t numElements,
const CUDPPCompressPlan *plan)
{
unsigned char* d_input = plan->m_d_mtfOut;
//d_hist = plan->m_d_histogram;
//d_encodeOffset = plan->m_d_encodeOffset;
//d_compressedSize = plan->m_d_totalEncodedSize;
//d_compressed = plan->m_d_encodedData;
// Set work dimensions
size_t nCodesPacked = 0;
size_t histBlocks = (numElements%(HUFF_WORK_PER_THREAD_HIST*HUFF_THREADS_PER_BLOCK_HIST)==0) ?
numElements/(HUFF_WORK_PER_THREAD_HIST*HUFF_THREADS_PER_BLOCK_HIST) : numElements%(HUFF_WORK_PER_THREAD_HIST*HUFF_THREADS_PER_BLOCK_HIST)+1;
size_t tThreads = ((numElements%HUFF_WORK_PER_THREAD) == 0) ? numElements/HUFF_WORK_PER_THREAD : numElements/HUFF_WORK_PER_THREAD+1;
size_t nBlocks = ( (tThreads%HUFF_THREADS_PER_BLOCK) == 0) ? tThreads/HUFF_THREADS_PER_BLOCK : tThreads/HUFF_THREADS_PER_BLOCK+1;
dim3 grid_hist(histBlocks, 1, 1);
dim3 threads_hist(HUFF_THREADS_PER_BLOCK_HIST, 1, 1);
dim3 grid_tree(1, 1, 1);
dim3 threads_tree(128, 1, 1);
dim3 grid_huff(nBlocks, 1, 1);
dim3 threads_huff(HUFF_THREADS_PER_BLOCK, 1, 1);
//---------------------------------------
// 1) Build histogram from MTF output
//---------------------------------------
huffman_build_histogram_kernel<<< grid_hist, threads_hist>>>
((unsigned int*)d_input, plan->m_d_histograms, numElements);
CUDA_SAFE_CALL(cudaThreadSynchronize());
//----------------------------------------------------
// 2) Compute final Histogram + Build Huffman codes
//----------------------------------------------------
huffman_build_tree_kernel<<< grid_tree, threads_tree>>>
(d_input, plan->m_d_huffCodesPacked, plan->m_d_huffCodeLocations, plan->m_d_huffCodeLengths, plan->m_d_histograms,
d_hist, plan->m_d_nCodesPacked, d_compressedSize, histBlocks, numElements);
CUDA_SAFE_CALL(cudaThreadSynchronize());
//----------------------------------------------
// 3) Main Huffman encoding step (encode data)
//----------------------------------------------
CUDA_SAFE_CALL(cudaMemcpy((void*)&nCodesPacked, plan->m_d_nCodesPacked, sizeof(size_t), cudaMemcpyDeviceToHost));
huffman_kernel_en<<< grid_huff, threads_huff, nCodesPacked*sizeof(unsigned char)>>>
((uchar4*)d_input, plan->m_d_huffCodesPacked, plan->m_d_huffCodeLocations, plan->m_d_huffCodeLengths,
plan->m_d_encoded, nCodesPacked, tThreads);
CUDA_SAFE_CALL(cudaThreadSynchronize());
//--------------------------------------------------
// 4) Pack together encoded data to determine how
// much encoded data needs to be transferred
//--------------------------------------------------
huffman_datapack_kernel<<<grid_huff, threads_huff>>>
(plan->m_d_encoded, d_compressed, d_compressedSize, d_encodeOffset);
CUDA_SAFE_CALL(cudaThreadSynchronize());
}
/** @brief Perform the Move-to-Front Transform (MTF)
*
* Performs a Move-to-Front (MTF) transform on the input data stream.
* The MTF transform is the second stage in our compress pipeline. The
* MTF manipulates the input data stream to improve the performance of
* entropy encoding.
*
* @param[in] d_mtfIn An array of the input data stream to perform the MTF transform on.
* @param[out] d_mtfOut An array to store the output of the MTF transform.
* @param[in] numElements Total number of input elements of the MTF transform.
* @param[in] plan Pointer to the plan object used for this MTF transform.
*
*/
template <class T>
void moveToFrontTransform(unsigned char *d_mtfIn,
unsigned char *d_mtfOut,
size_t numElements,
const T *plan)
{
unsigned int npad = numElements-1;
npad |= npad >> 1;
npad |= npad >> 2;
npad |= npad >> 4;
npad |= npad >> 8;
npad |= npad >> 16;
npad++;
unsigned int nThreads = MTF_THREADS_BLOCK;
unsigned int nLists = npad/MTF_PER_THREAD;
unsigned int tThreads = npad/MTF_PER_THREAD;
unsigned int offset = 2;
bool fullBlocks = (tThreads%nThreads == 0);
unsigned int nBlocks = (fullBlocks) ? (tThreads/nThreads) : (tThreads/nThreads + 1);
//-------------------------------------------
// Initial MTF lists + Initial Reduction
//-------------------------------------------
// Set work-item dimensions
dim3 grid(nBlocks, 1, 1);
dim3 threads(nThreads, 1, 1);
// Kernel call
mtf_reduction_kernel<<< grid, threads>>>
(d_mtfIn, plan->m_d_lists, plan->m_d_list_sizes, nLists, offset, numElements);
CUDA_SAFE_CALL(cudaThreadSynchronize());
if(nBlocks > 1)
{
//----------------------
// MTF Global Reduce
//----------------------
unsigned int init_offset = offset * nThreads;
offset = init_offset;
tThreads = nBlocks/2;
fullBlocks = (tThreads%nThreads == 0);
nBlocks = (fullBlocks) ? (tThreads/nThreads) : (tThreads/nThreads + 1);
// Set work dimensions
dim3 grid_GLred(nBlocks, 1, 1);
dim3 threads_GLred(nThreads, 1, 1);
while(offset <= nLists)
{
mtf_GLreduction_kernel<<< grid_GLred, threads_GLred>>>
(plan->m_d_lists, plan->m_d_list_sizes, offset, tThreads, nLists);
CUDA_SAFE_CALL(cudaThreadSynchronize());
offset *= 2*nThreads;
}
//-----------------------------
// MTF Global Down-sweep
//-----------------------------
offset = nLists/4;
unsigned int lastLevel = 0;
// Work-dimensions
dim3 grid_GLsweep(nBlocks, 1, 1);
dim3 threads_GLsweep(nThreads, 1, 1);
while(offset >= init_offset/2)
{
lastLevel = offset/nThreads;
lastLevel = (lastLevel>=(init_offset/2)) ? lastLevel : init_offset/2;
mtf_GLdownsweep_kernel<<< grid_GLsweep, threads_GLsweep>>>
(plan->m_d_lists, plan->m_d_list_sizes, offset, lastLevel, nLists, tThreads);
CUDA_SAFE_CALL(cudaThreadSynchronize());
offset = lastLevel/2;
}
}
//------------------------
// Local Scan
//------------------------
tThreads = npad/MTF_PER_THREAD;
offset = 2;
fullBlocks = (tThreads%nThreads == 0);
nBlocks = (fullBlocks) ? (tThreads/nThreads) : (tThreads/nThreads + 1);
dim3 grid_loc(nBlocks, 1, 1);
dim3 threads_loc(nThreads, 1, 1);
mtf_localscan_lists_kernel<<< grid_loc, threads_loc>>>
(d_mtfIn, d_mtfOut, plan->m_d_lists, plan->m_d_list_sizes, nLists, offset, numElements);
CUDA_SAFE_CALL(cudaThreadSynchronize());
}
/** @brief Perform the Burrows-Wheeler Transform (BWT)
*
* Performs the Burrows-Wheeler Transform (BWT) on a given
* character string. The BWT is an algorithm which is commonly used
* in compression applications, mainly bzip2. The BWT orders the
* characters in such a way that the output tends to have many long
* runs of repeated characters. This bodes well for later stages in
* compression pipelines which perform better with repeated characters.
*
*
* @param[in] d_uncompressed A char array of the input data stream to perform the BWT on.
* @param[out] d_bwtIndex The index at which the original string in the BWT sorts to.
* @param[out] d_bwtOut An array to store the output of the BWT.
* @param[in] numElements Total number of input elements of the BWT.
* @param[in] plan Pointer to the plan object used for this BWT.
*
*
*/
template <class T>
void burrowsWheelerTransform(unsigned char *d_uncompressed,
int *d_bwtIndex,
unsigned char *d_bwtOut,
size_t numElements,
const T *plan)
{
size_t tThreads = (numElements%4 == 0) ? numElements/4 : numElements/4 + 1;
size_t nThreads = BWT_CTA_BLOCK;
bool fullBlocks = (tThreads%nThreads == 0);
uint nBlocks = (fullBlocks) ? (tThreads/nThreads) : (tThreads/nThreads+1);
dim3 grid_construct(nBlocks, 1, 1);
dim3 threads_construct(nThreads, 1, 1);
int numThreads = 64;
int secondBlocks;
size_t count;
size_t mult;
size_t numBlocks;
int initSubPartitions;
int subPartitions;
int step;
// Massage input to create sorting key-value pairs
bwt_keys_construct_kernel<<< grid_construct, threads_construct >>>
((uchar4*)d_uncompressed, plan->m_d_bwtInRef,
plan->m_d_keys, plan->m_d_values, plan->m_d_bwtInRef2, tThreads);
CUDA_SAFE_CALL(cudaThreadSynchronize());
// First satge -- block sort
nBlocks = numElements/BWT_BLOCKSORT_SIZE;
dim3 grid_blocksort(nBlocks, 1, 1);
dim3 threads_blocksort(BWT_CTA_BLOCK, 1, 1);
blockWiseStringSort<unsigned int, 8><<<grid_blocksort, threads_blocksort>>>
(plan->m_d_keys, plan->m_d_values, (const unsigned int*)plan->m_d_bwtInRef, plan->m_d_bwtInRef2, BWT_BLOCKSORT_SIZE, numElements);
CUDA_SAFE_CALL(cudaThreadSynchronize());
// Start merging blocks
// Second stage -- merge sorted blocks using simple merge
count = 0;
mult = 1;
numBlocks = nBlocks;
while(count < 6)
{
if(count%2 == 0)
{
simpleStringMerge<unsigned int, 2><<<numBlocks, BWT_CTASIZE_simple, sizeof(unsigned int)*(2*BWT_INTERSECT_B_BLOCK_SIZE_simple+2)>>>
(plan->m_d_keys, plan->m_d_keys_dev, plan->m_d_values, plan->m_d_values_dev,
plan->m_d_bwtInRef, BWT_BLOCKSORT_SIZE*mult, numBlocks*BWT_BLOCKSORT_SIZE, plan->m_d_bwtInRef2, numElements);
CUDA_SAFE_CALL(cudaThreadSynchronize());
}
else
{
simpleStringMerge<unsigned int, 2><<<numBlocks, BWT_CTASIZE_simple, sizeof(unsigned int)*(2*BWT_INTERSECT_B_BLOCK_SIZE_simple+2)>>>
(plan->m_d_keys_dev, plan->m_d_keys, plan->m_d_values_dev, plan->m_d_values,
plan->m_d_bwtInRef, BWT_BLOCKSORT_SIZE*mult, numBlocks*BWT_BLOCKSORT_SIZE, plan->m_d_bwtInRef2, numElements);
CUDA_SAFE_CALL(cudaThreadSynchronize());
}
mult*=2;
count++;
numBlocks /= 2;
}
// Third stage -- merge remaining blocks using multi-merge
initSubPartitions = 2;
subPartitions = initSubPartitions;
secondBlocks = (2*numBlocks*initSubPartitions+numThreads-1)/numThreads;
step = 1;
while (numBlocks > 1)
{
if(count%2 == 1)
{
findMultiPartitions<unsigned int><<<secondBlocks, numThreads>>>
(plan->m_d_keys_dev, subPartitions, numBlocks, BWT_BLOCKSORT_SIZE*mult,
plan->m_d_partitionBeginA, plan->m_d_partitionSizeA, plan->m_d_partitionBeginB, plan->m_d_partitionSizeB, BWT_SIZE);
CUDA_SAFE_CALL(cudaThreadSynchronize());
stringMergeMulti<unsigned int, 2><<<numBlocks*subPartitions, BWT_CTASIZE_multi, (2*BWT_INTERSECT_B_BLOCK_SIZE_multi+5)*sizeof(unsigned int)>>>
(plan->m_d_keys_dev, plan->m_d_keys, plan->m_d_values_dev, plan->m_d_values, plan->m_d_bwtInRef2, subPartitions, numBlocks,
plan->m_d_partitionBeginA, plan->m_d_partitionSizeA, plan->m_d_partitionBeginB, plan->m_d_partitionSizeB, BWT_BLOCKSORT_SIZE*mult, numElements);
CUDA_SAFE_CALL(cudaThreadSynchronize());
}
else
{
findMultiPartitions<unsigned int><<<secondBlocks, numThreads>>>
(plan->m_d_keys, subPartitions, numBlocks, BWT_BLOCKSORT_SIZE*mult,
plan->m_d_partitionBeginA, plan->m_d_partitionSizeA, plan->m_d_partitionBeginB, plan->m_d_partitionSizeB, BWT_SIZE);
CUDA_SAFE_CALL(cudaThreadSynchronize());
stringMergeMulti<unsigned int, 2><<<numBlocks*subPartitions, BWT_CTASIZE_multi, (2*BWT_INTERSECT_B_BLOCK_SIZE_multi+5)*sizeof(unsigned int)>>>
(plan->m_d_keys, plan->m_d_keys_dev, plan->m_d_values, plan->m_d_values_dev, plan->m_d_bwtInRef2, subPartitions, numBlocks,
plan->m_d_partitionBeginA, plan->m_d_partitionSizeA, plan->m_d_partitionBeginB, plan->m_d_partitionSizeB, BWT_BLOCKSORT_SIZE*mult, numElements);
CUDA_SAFE_CALL(cudaThreadSynchronize());
}
numBlocks/=2;
subPartitions*=2;
count++;
mult*=2;
step++;
}
// Final stage -- compute BWT and BWT Index using sorted values
if(count%2 == 0)
{
bwt_compute_final_kernel<<< grid_construct, threads_construct >>>
(d_uncompressed, plan->m_d_values, d_bwtIndex, d_bwtOut, numElements, tThreads);
CUDA_SAFE_CALL(cudaThreadSynchronize());
}
else
{
bwt_compute_final_kernel<<< grid_construct, threads_construct >>>
(d_uncompressed, plan->m_d_values_dev, d_bwtIndex, d_bwtOut, numElements, tThreads);
CUDA_SAFE_CALL(cudaThreadSynchronize());
}
}
/** @brief Wrapper for calling the Burrows-Wheeler Transform (BWT).
*
* This is a wrapper function for calling the BWT. This wrapper is used
* internally via the compress application to call burrowsWheelerTransform().
*
*
* @param[in] d_in A char array of the input data stream to perform the BWT on.
* @param[out] d_bwtIndex The index at which the original string in the BWT sorts to.
* @param[in] numElements Total number of input elements to the compress stream.
* @param[in] plan Pointer to the plan object used for this compress.
*
*
*/
void burrowsWheelerTransformWrapper(unsigned char *d_in,
int *d_bwtIndex,
size_t numElements,
const CUDPPCompressPlan *plan)
{
burrowsWheelerTransform<CUDPPCompressPlan>(d_in, d_bwtIndex, plan->m_d_bwtOut, numElements, plan);
}
/** @brief Wrapper for calling the Burrows-Wheeler Transform (BWT).
*
* This is a wrapper function for calling the BWT. This wrapper is used
* internally via the BWT primitive to call burrowsWheelerTransform().
*
*
* @param[in] d_in A char array of the input data stream to perform the BWT on.
* @param[out] d_bwtIndex The index at which the original string in the BWT sorts to.
* @param[out] d_bwtOut An array to store the output of the BWT.
* @param[in] numElements Total number of input elements to the BWT.
* @param[in] plan Pointer to the plan object used for this BWT.
*
*
*/
void burrowsWheelerTransformWrapper(unsigned char *d_in,
int *d_bwtIndex,
unsigned char *d_bwtOut,
size_t numElements,
const CUDPPBwtPlan *plan)
{
burrowsWheelerTransform<CUDPPBwtPlan>(d_in, d_bwtIndex, d_bwtOut, numElements, plan);
}
/** @brief Wrapper for calling the Move-to-Front (MTF) transform.
*
* This is a wrapper function for calling the MTF. This wrapper is used
* internally via the compress application to call moveToFrontTransform().
*
*
* @param[in] numElements Total number of input elements to the MTF transform.
* @param[in] plan Pointer to the plan object used for this compress.
*
*
*/
void moveToFrontTransformWrapper(size_t numElements,
const CUDPPCompressPlan *plan)
{
moveToFrontTransform<CUDPPCompressPlan>(plan->m_d_bwtOut, plan->m_d_mtfOut, numElements, plan);
}
/** @brief Wrapper for calling the Move-to-Front (MTF) transform.
*
* This is a wrapper function for calling the MTF. This wrapper is used
* internally via the MTF primitive to call moveToFrontTransform().
*
*
* @param[in] d_in An input char array to perform the MTF on.
* @param[in] d_mtfOut An output char array to store the MTF transformed
stream.
* @param[in] numElements Total number of input elements to the MTF transform.
* @param[in] plan Pointer to the plan object used for this MTF.
*
*
*/
void moveToFrontTransformWrapper(unsigned char *d_in,
unsigned char *d_mtfOut,
size_t numElements,
const CUDPPMtfPlan *plan)
{
moveToFrontTransform<CUDPPMtfPlan>(d_in, d_mtfOut, numElements, plan);
}
#ifdef __cplusplus
extern "C"
{
#endif
/** @brief Allocate intermediate arrays used by BWT.
*
*
* @param [in,out] plan Pointer to CUDPPBwtPlan object containing options and number
* of elements, which is used to compute storage requirements, and
* within which intermediate storage is allocated.
*/
void allocBwtStorage(CUDPPBwtPlan *plan)
{
size_t numElts = plan->m_numElements;
// BWT
CUDA_SAFE_CALL(cudaMalloc((void**) &(plan->m_d_keys), numElts*sizeof(unsigned int) ));
CUDA_SAFE_CALL(cudaMalloc((void**) &(plan->m_d_values), numElts*sizeof(unsigned int) ));
CUDA_SAFE_CALL(cudaMalloc((void**) &(plan->m_d_bwtInRef), numElts*sizeof(unsigned int) ));
CUDA_SAFE_CALL(cudaMalloc((void**) &(plan->m_d_bwtInRef2), numElts*sizeof(unsigned int) ));
CUDA_SAFE_CALL(cudaMalloc((void**) &(plan->m_d_keys_dev), numElts*sizeof(unsigned int) ));
CUDA_SAFE_CALL(cudaMalloc((void**) &(plan->m_d_values_dev), numElts*sizeof(unsigned int) ));
CUDA_SAFE_CALL(cudaMalloc((void**)&(plan->m_d_partitionBeginA), 1024*sizeof(int)) );
CUDA_SAFE_CALL(cudaMalloc((void**)&(plan->m_d_partitionSizeA), 1024*sizeof(int)) );
CUDA_SAFE_CALL(cudaMalloc((void**)&(plan->m_d_partitionBeginB), 1024*sizeof(int)) );
CUDA_SAFE_CALL(cudaMalloc((void**)&(plan->m_d_partitionSizeB), 1024*sizeof(int)) );
}
/** @brief Allocate intermediate arrays used by MTF.
*
*
* @param [in,out] plan Pointer to CUDPPMtfPlan object containing
* options and number of elements, which is used
* to compute storage requirements, and within
* which intermediate storage is allocated.
*/
void allocMtfStorage(CUDPPMtfPlan *plan)
{
// Number of padding
size_t tmp = plan->m_numElements-1;
tmp |= tmp >> 1;
tmp |= tmp >> 2;
tmp |= tmp >> 4;
tmp |= tmp >> 8;
tmp |= tmp >> 16;
tmp++;
plan->npad = tmp;
// MTF
CUDA_SAFE_CALL(cudaMalloc( (void**) &(plan->m_d_lists), (tmp/MTF_PER_THREAD)*256*sizeof(unsigned char)));
CUDA_SAFE_CALL(cudaMalloc( (void**) &(plan->m_d_list_sizes), (tmp/MTF_PER_THREAD)*sizeof(unsigned short)));
CUDA_SAFE_CALL(cudaMemset(plan->m_d_lists, 0, (tmp/MTF_PER_THREAD)*256*sizeof(unsigned char)));
CUDA_SAFE_CALL(cudaMemset(plan->m_d_list_sizes, 0, (tmp/MTF_PER_THREAD)*sizeof(unsigned short)));
}
/** @brief Allocate intermediate arrays used by compression.
*
*
* @param [in,out] plan Pointer to CUDPPCompressPlan object
* containing options and number of elements,
* which is used to compute storage
* requirements, and within which intermediate
* storage is allocated.
*/
void allocCompressStorage(CUDPPCompressPlan *plan)
{
size_t numElts = plan->m_numElements;
plan->npad = numElts;
// BWT
CUDA_SAFE_CALL(cudaMalloc((void**) &(plan->m_d_keys), numElts*sizeof(unsigned int) ));
CUDA_SAFE_CALL(cudaMalloc((void**) &(plan->m_d_values), numElts*sizeof(unsigned int) ));
CUDA_SAFE_CALL(cudaMalloc( (void**) &(plan->m_d_bwtOut), numElts*sizeof(unsigned char) ));
CUDA_SAFE_CALL(cudaMalloc((void**) &(plan->m_d_bwtInRef), numElts*sizeof(unsigned int) ));
CUDA_SAFE_CALL(cudaMalloc((void**) &(plan->m_d_bwtInRef2), numElts*sizeof(unsigned int) ));
CUDA_SAFE_CALL(cudaMalloc((void**) &(plan->m_d_keys_dev), numElts*sizeof(unsigned int) ));
CUDA_SAFE_CALL(cudaMalloc((void**) &(plan->m_d_values_dev), numElts*sizeof(unsigned int) ));
CUDA_SAFE_CALL(cudaMalloc((void**)&(plan->m_d_partitionBeginA), 1024*sizeof(int)) );
CUDA_SAFE_CALL(cudaMalloc((void**)&(plan->m_d_partitionSizeA), 1024*sizeof(int)) );
CUDA_SAFE_CALL(cudaMalloc((void**)&(plan->m_d_partitionBeginB), 1024*sizeof(int)) );
CUDA_SAFE_CALL(cudaMalloc((void**)&(plan->m_d_partitionSizeB), 1024*sizeof(int)) );
// MTF
CUDA_SAFE_CALL(cudaMalloc( (void**) &(plan->m_d_lists), (numElts/MTF_PER_THREAD)*256*sizeof(unsigned char)));
CUDA_SAFE_CALL(cudaMalloc( (void**) &(plan->m_d_list_sizes), (numElts/MTF_PER_THREAD)*sizeof(unsigned short)));
CUDA_SAFE_CALL(cudaMalloc( (void**) &(plan->m_d_mtfOut), numElts*sizeof(unsigned char) ));
// Huffman
size_t numBitsAlloc = HUFF_NUM_CHARS*(HUFF_NUM_CHARS+1)/2;
size_t numCharsAlloc = (numBitsAlloc%8 == 0) ? numBitsAlloc/8 : numBitsAlloc/8 + 1;
size_t histBlocks = (numElts%(HUFF_WORK_PER_THREAD_HIST*HUFF_THREADS_PER_BLOCK_HIST)==0) ?
numElts/(HUFF_WORK_PER_THREAD_HIST*HUFF_THREADS_PER_BLOCK_HIST) : numElts%(HUFF_WORK_PER_THREAD_HIST*HUFF_THREADS_PER_BLOCK_HIST)+1;
size_t tThreads = ((numElts%HUFF_WORK_PER_THREAD) == 0) ? numElts/HUFF_WORK_PER_THREAD : numElts/HUFF_WORK_PER_THREAD+1;
size_t nBlocks = ( (tThreads%HUFF_THREADS_PER_BLOCK) == 0) ? tThreads/HUFF_THREADS_PER_BLOCK : tThreads/HUFF_THREADS_PER_BLOCK+1;
CUDA_SAFE_CALL(cudaMalloc( (void**) &(plan->m_d_huffCodesPacked), numCharsAlloc*sizeof(unsigned char) ));
CUDA_SAFE_CALL(cudaMalloc( (void**) &(plan->m_d_huffCodeLocations), HUFF_NUM_CHARS*sizeof(size_t) ));
CUDA_SAFE_CALL(cudaMalloc( (void**) &(plan->m_d_huffCodeLengths), HUFF_NUM_CHARS*sizeof(unsigned char) ));
CUDA_SAFE_CALL(cudaMalloc( (void**) &(plan->m_d_histograms), histBlocks*256*sizeof(size_t) ));
//CUDA_SAFE_CALL(cudaMalloc( (void**) &(plan->m_d_histogram), 256*sizeof(size_t) ));
//CUDA_SAFE_CALL(cudaMalloc( (void**) &(plan->m_d_totalEncodedSize), sizeof(size_t)));
//CUDA_SAFE_CALL(cudaMalloc( (void**) &(plan->m_d_encodedData), sizeof(size_t)*(HUFF_CODE_BYTES+1)*nBlocks));
CUDA_SAFE_CALL(cudaMalloc( (void**) &(plan->m_d_nCodesPacked), sizeof(size_t)));
CUDA_SAFE_CALL(cudaMalloc( (void**) &(plan->m_d_encoded), sizeof(encoded)*nBlocks));
//CUDA_SAFE_CALL(cudaMalloc( (void**) &(plan->m_d_encodeOffset), sizeof(size_t)*nBlocks));
CUDA_CHECK_ERROR("allocCompressStorage");
}
/** @brief Deallocate intermediate block arrays in a CUDPPCompressPlan object.
*
*
* @param[in,out] plan Pointer to CUDPPCompressPlan object initialized by allocCompressStorage().
*/
void freeCompressStorage(CUDPPCompressPlan *plan)
{
// BWT
CUDA_SAFE_CALL( cudaFree(plan->m_d_keys));
CUDA_SAFE_CALL( cudaFree(plan->m_d_values));
CUDA_SAFE_CALL( cudaFree(plan->m_d_bwtOut));
CUDA_SAFE_CALL( cudaFree(plan->m_d_bwtInRef));
CUDA_SAFE_CALL( cudaFree(plan->m_d_bwtInRef2));
CUDA_SAFE_CALL( cudaFree(plan->m_d_keys_dev));
CUDA_SAFE_CALL( cudaFree(plan->m_d_values_dev));
CUDA_SAFE_CALL( cudaFree(plan->m_d_partitionBeginA));
CUDA_SAFE_CALL( cudaFree(plan->m_d_partitionSizeA));
CUDA_SAFE_CALL( cudaFree(plan->m_d_partitionBeginB));
CUDA_SAFE_CALL( cudaFree(plan->m_d_partitionSizeB));
// MTF
CUDA_SAFE_CALL( cudaFree(plan->m_d_lists));
CUDA_SAFE_CALL( cudaFree(plan->m_d_list_sizes));
CUDA_SAFE_CALL( cudaFree(plan->m_d_mtfOut));
// Huffman
CUDA_SAFE_CALL(cudaFree(plan->m_d_histograms));
//CUDA_SAFE_CALL(cudaFree(plan->m_d_histogram));
CUDA_SAFE_CALL(cudaFree(plan->m_d_huffCodeLengths));
CUDA_SAFE_CALL(cudaFree(plan->m_d_huffCodesPacked));
CUDA_SAFE_CALL(cudaFree(plan->m_d_huffCodeLocations));
//CUDA_SAFE_CALL(cudaFree(plan->m_d_totalEncodedSize));
//CUDA_SAFE_CALL(cudaFree(plan->m_d_encodedData));
CUDA_SAFE_CALL(cudaFree(plan->m_d_nCodesPacked));
CUDA_SAFE_CALL(cudaFree(plan->m_d_encoded));
//CUDA_SAFE_CALL(cudaFree(plan->m_d_encodeOffset));
CUDA_CHECK_ERROR("freeCompressStorage");
}
/** @brief Deallocate intermediate block arrays in a CUDPPBwtPlan object.
*
*
* @param[in,out] plan Pointer to CUDPPBwtPlan object initialized by allocBwtStorage().
*/
void freeBwtStorage(CUDPPBwtPlan *plan)
{
// BWT
CUDA_SAFE_CALL( cudaFree(plan->m_d_keys));
CUDA_SAFE_CALL( cudaFree(plan->m_d_values));
CUDA_SAFE_CALL( cudaFree(plan->m_d_bwtInRef));
CUDA_SAFE_CALL( cudaFree(plan->m_d_bwtInRef2));
CUDA_SAFE_CALL( cudaFree(plan->m_d_keys_dev));
CUDA_SAFE_CALL( cudaFree(plan->m_d_values_dev));
CUDA_SAFE_CALL( cudaFree(plan->m_d_partitionBeginA));
CUDA_SAFE_CALL( cudaFree(plan->m_d_partitionSizeA));
CUDA_SAFE_CALL( cudaFree(plan->m_d_partitionBeginB));
CUDA_SAFE_CALL( cudaFree(plan->m_d_partitionSizeB));
}
/** @brief Deallocate intermediate block arrays in a CUDPPMtfPlan object.
*
*
* @param[in,out] plan Pointer to CUDPPMtfPlan object initialized by allocMtfStorage().
*/
void freeMtfStorage(CUDPPMtfPlan *plan)
{
// MTF
CUDA_SAFE_CALL( cudaFree(plan->m_d_lists));
CUDA_SAFE_CALL( cudaFree(plan->m_d_list_sizes));
}
/** @brief Dispatch function to perform parallel compression on an
* array with the specified configuration.
*
*
* @param[in] d_uncompressed Uncompressed data
* @param[out] d_bwtIndex BWT Index
* @param[out] d_histSize Histogram size
* @param[out] d_hist Histogram
* @param[out] d_encodeOffset Encoded offset table
* @param[out] d_compressedSize Size of compressed data
* @param[out] d_compressed Compressed data
* @param[in] numElements Number of elements to compress
* @param[in] plan Pointer to CUDPPCompressPlan object containing
* compress options and intermediate storage
*/
void cudppCompressDispatch(void *d_uncompressed,
void *d_bwtIndex,
void *d_histSize, // ignore
void *d_hist,
void *d_encodeOffset,
void *d_compressedSize,
void *d_compressed,
size_t numElements,
const CUDPPCompressPlan *plan)
{
// Call to perform the Burrows-Wheeler transform
burrowsWheelerTransformWrapper((unsigned char*)d_uncompressed, (int*)d_bwtIndex,
numElements, plan);
// Call to perform the move-to-front transform
moveToFrontTransformWrapper(numElements, plan);
// Call to perform the Huffman encoding
huffmanEncoding((unsigned int*)d_hist, (unsigned int*)d_encodeOffset,
(unsigned int*)d_compressedSize, (unsigned int*)d_compressed, numElements, plan);
}
/** @brief Dispatch function to perform the Burrows-Wheeler transform
*
*
* @param[in] d_bwtIn Input data
* @param[out] d_bwtOut Transformed data
* @param[out] d_bwtIndex BWT Index
* @param[in] numElements Number of elements to compress
* @param[in] plan Pointer to CUDPPBwtPlan object containing
* compress options and intermediate storage
*/
void cudppBwtDispatch(void *d_bwtIn,
void *d_bwtOut,
void *d_bwtIndex,
size_t numElements,
const CUDPPBwtPlan *plan)
{
// Call to perform the Burrows-Wheeler transform
burrowsWheelerTransformWrapper((unsigned char*)d_bwtIn, (int*)d_bwtIndex,
(unsigned char*) d_bwtOut, numElements,
plan);
}
/** @brief Dispatch function to perform the Move-to-Front transform
*
*
* @param[in] d_mtfIn Input data
* @param[out] d_mtfOut Transformed data
* @param[in] numElements Number of elements to compress
* @param[in] plan Pointer to CUDPPMtfPlan object containing
* compress options and intermediate storage
*/
void cudppMtfDispatch(void *d_mtfIn,
void *d_mtfOut,
size_t numElements,
const CUDPPMtfPlan *plan)
{
// Call to perform the Burrows-Wheeler transform
moveToFrontTransformWrapper((unsigned char*) d_mtfIn,
(unsigned char*) d_mtfOut, numElements, plan);
}
#ifdef __cplusplus
}
#endif
/** @} */ // end compress functions
/** @} */ // end cudpp_app
|
7487c04bae996dc51787fd66513b0a508eb7be4d.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column_factories.hpp>
#include <cudf/detail/copy.hpp>
#include <cudf/detail/gather.cuh>
#include <cudf/detail/gather.hpp>
#include <cudf/detail/groupby/sort_helper.hpp>
#include <cudf/detail/iterator.cuh>
#include <cudf/detail/scatter.hpp>
#include <cudf/detail/sorting.hpp>
#include <cudf/table/row_operators.cuh>
#include <cudf/table/table_device_view.cuh>
#include <thrust/binary_search.h>
#include <thrust/iterator/constant_iterator.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/iterator/discard_iterator.h>
#include <thrust/scan.h>
#include <thrust/sequence.h>
#include <thrust/unique.h>
#include <algorithm>
#include <numeric>
#include <tuple>
namespace {
/**
* @brief Compares two `table` rows for equality as if the table were
* ordered according to a specified permutation map.
*
*/
template <bool nullable = true>
struct permuted_row_equality_comparator {
cudf::row_equality_comparator<nullable> _comparator;
cudf::size_type const* _map;
/**
* @brief Construct a permuted_row_equality_comparator.
*
* @param t The `table` whose rows will be compared
* @param map The permutation map that specifies the effective ordering of
*`t`. Must be the same size as `t.num_rows()`
*/
permuted_row_equality_comparator(cudf::table_device_view const& t, cudf::size_type const* map)
: _comparator(t, t, true), _map{map}
{
}
/**
* @brief Returns true if the two rows at the specified indices in the permuted
* order are equivalent.
*
* For example, comparing rows `i` and `j` is
* equivalent to comparing rows `map[i]` and `map[j]` in the original table.
*
* @param lhs The index of the first row
* @param rhs The index of the second row
* @returns if the two specified rows in the permuted order are equivalent
*/
CUDA_DEVICE_CALLABLE
bool operator()(cudf::size_type lhs, cudf::size_type rhs)
{
return _comparator(_map[lhs], _map[rhs]);
}
};
} // namespace
namespace cudf {
namespace groupby {
namespace detail {
namespace sort {
size_type sort_groupby_helper::num_keys(hipStream_t stream)
{
if (_num_keys > -1) return _num_keys;
if (_include_null_keys == null_policy::EXCLUDE and has_nulls(_keys)) {
// The number of rows w/o null values `n` is indicated by number of valid bits
// in the row bitmask. When `_include_null_keys == NO`, then only rows `[0, n)`
// in the sorted keys are considered for grouping.
_num_keys = keys_bitmask_column(stream).size() - keys_bitmask_column(stream).null_count();
} else {
_num_keys = _keys.num_rows();
}
return _num_keys;
}
column_view sort_groupby_helper::key_sort_order(hipStream_t stream)
{
auto sliced_key_sorted_order = [stream, this]() {
return cudf::detail::slice(this->_key_sorted_order->view(), 0, this->num_keys(stream));
};
if (_key_sorted_order) { return sliced_key_sorted_order(); }
// TODO (dm): optimization. When keys are pre sorted but ignore nulls is true,
// we still want all rows with nulls in the end. Sort is costly, so
// do a copy_if(counting, sorted_order, {bitmask.is_valid(i)})
if (_keys_pre_sorted == sorted::YES) {
_key_sorted_order = make_numeric_column(
data_type(type_to_id<size_type>()), _keys.num_rows(), mask_state::UNALLOCATED, stream);
auto d_key_sorted_order = _key_sorted_order->mutable_view().data<size_type>();
thrust::sequence(rmm::exec_policy(stream)->on(stream),
d_key_sorted_order,
d_key_sorted_order + _key_sorted_order->size(),
0);
return sliced_key_sorted_order();
}
if (_include_null_keys == null_policy::INCLUDE || !cudf::has_nulls(_keys)) { // SQL style
_key_sorted_order =
cudf::detail::sorted_order(_keys,
{},
std::vector<null_order>(_keys.num_columns(), null_order::AFTER),
rmm::mr::get_default_resource(),
stream);
} else { // Pandas style
// Temporarily prepend the keys table with a column that indicates the
// presence of a null value within a row. This allows moving all rows that
// contain a null value to the end of the sorted order.
auto augmented_keys = table_view({table_view({keys_bitmask_column()}), _keys});
_key_sorted_order = cudf::detail::sorted_order(
augmented_keys,
{},
std::vector<null_order>(_keys.num_columns() + 1, null_order::AFTER),
rmm::mr::get_default_resource(),
stream);
// All rows with one or more null values are at the end of the resulting sorted order.
}
return sliced_key_sorted_order();
}
sort_groupby_helper::index_vector const& sort_groupby_helper::group_offsets(hipStream_t stream)
{
if (_group_offsets) return *_group_offsets;
_group_offsets = std::make_unique<index_vector>(num_keys(stream) + 1);
auto device_input_table = table_device_view::create(_keys, stream);
auto sorted_order = key_sort_order().data<size_type>();
decltype(_group_offsets->begin()) result_end;
auto exec = rmm::exec_policy(stream);
if (has_nulls(_keys)) {
result_end = thrust::unique_copy(
exec->on(stream),
thrust::make_counting_iterator<size_type>(0),
thrust::make_counting_iterator<size_type>(num_keys(stream)),
_group_offsets->begin(),
permuted_row_equality_comparator<true>(*device_input_table, sorted_order));
} else {
result_end = thrust::unique_copy(
exec->on(stream),
thrust::make_counting_iterator<size_type>(0),
thrust::make_counting_iterator<size_type>(num_keys(stream)),
_group_offsets->begin(),
permuted_row_equality_comparator<false>(*device_input_table, sorted_order));
}
size_type num_groups = thrust::distance(_group_offsets->begin(), result_end);
(*_group_offsets)[num_groups] = num_keys(stream);
_group_offsets->resize(num_groups + 1);
return *_group_offsets;
}
sort_groupby_helper::index_vector const& sort_groupby_helper::group_labels(hipStream_t stream)
{
if (_group_labels) return *_group_labels;
// Get group labels for future use in segmented sorting
_group_labels = std::make_unique<index_vector>(num_keys(stream));
auto& group_labels = *_group_labels;
if (num_keys(stream) == 0) return group_labels;
auto exec = rmm::exec_policy(stream);
thrust::scatter(exec->on(stream),
thrust::make_constant_iterator(1, decltype(num_groups())(1)),
thrust::make_constant_iterator(1, num_groups()),
group_offsets().begin() + 1,
group_labels.begin());
thrust::inclusive_scan(
exec->on(stream), group_labels.begin(), group_labels.end(), group_labels.begin());
return group_labels;
}
column_view sort_groupby_helper::unsorted_keys_labels(hipStream_t stream)
{
if (_unsorted_keys_labels) return _unsorted_keys_labels->view();
column_ptr temp_labels = make_numeric_column(
data_type(type_to_id<size_type>()), _keys.num_rows(), mask_state::ALL_NULL, stream);
auto group_labels_view = cudf::column_view(
data_type(type_to_id<size_type>()), group_labels().size(), group_labels().data().get());
auto scatter_map = key_sort_order();
std::unique_ptr<table> t_unsorted_keys_labels =
cudf::detail::scatter(table_view({group_labels_view}),
scatter_map,
table_view({temp_labels->view()}),
false,
rmm::mr::get_default_resource(),
stream);
_unsorted_keys_labels = std::move(t_unsorted_keys_labels->release()[0]);
return _unsorted_keys_labels->view();
}
column_view sort_groupby_helper::keys_bitmask_column(hipStream_t stream)
{
if (_keys_bitmask_column) return _keys_bitmask_column->view();
auto row_bitmask = bitmask_and(_keys, rmm::mr::get_default_resource(), stream);
_keys_bitmask_column = make_numeric_column(data_type(type_id::INT8),
_keys.num_rows(),
std::move(row_bitmask),
cudf::UNKNOWN_NULL_COUNT,
stream);
auto keys_bitmask_view = _keys_bitmask_column->mutable_view();
using T = id_to_type<type_id::INT8>;
thrust::fill(rmm::exec_policy(stream)->on(stream),
keys_bitmask_view.begin<T>(),
keys_bitmask_view.end<T>(),
0);
return _keys_bitmask_column->view();
}
sort_groupby_helper::column_ptr sort_groupby_helper::sorted_values(
column_view const& values, rmm::mr::device_memory_resource* mr, hipStream_t stream)
{
column_ptr values_sort_order =
cudf::detail::sorted_order(table_view({unsorted_keys_labels(), values}),
{},
std::vector<null_order>(2, null_order::AFTER),
mr,
stream);
// Zero-copy slice this sort order so that its new size is num_keys()
column_view gather_map = cudf::detail::slice(values_sort_order->view(), 0, num_keys(stream));
auto sorted_values_table =
cudf::detail::gather(table_view({values}), gather_map, false, false, false, mr, stream);
return std::move(sorted_values_table->release()[0]);
}
sort_groupby_helper::column_ptr sort_groupby_helper::grouped_values(
column_view const& values, rmm::mr::device_memory_resource* mr, hipStream_t stream)
{
auto gather_map = key_sort_order();
auto grouped_values_table =
cudf::detail::gather(table_view({values}), gather_map, false, false, false, mr, stream);
return std::move(grouped_values_table->release()[0]);
}
std::unique_ptr<table> sort_groupby_helper::unique_keys(rmm::mr::device_memory_resource* mr,
hipStream_t stream)
{
auto idx_data = key_sort_order().data<size_type>();
auto gather_map_it = thrust::make_transform_iterator(
group_offsets().begin(), [idx_data] __device__(size_type i) { return idx_data[i]; });
return cudf::detail::gather(
_keys, gather_map_it, gather_map_it + num_groups(), false, mr, stream);
}
std::unique_ptr<table> sort_groupby_helper::sorted_keys(rmm::mr::device_memory_resource* mr,
hipStream_t stream)
{
return cudf::detail::gather(_keys, key_sort_order(), false, false, false, mr, stream);
}
} // namespace sort
} // namespace detail
} // namespace groupby
} // namespace cudf
| 7487c04bae996dc51787fd66513b0a508eb7be4d.cu | /*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column_factories.hpp>
#include <cudf/detail/copy.hpp>
#include <cudf/detail/gather.cuh>
#include <cudf/detail/gather.hpp>
#include <cudf/detail/groupby/sort_helper.hpp>
#include <cudf/detail/iterator.cuh>
#include <cudf/detail/scatter.hpp>
#include <cudf/detail/sorting.hpp>
#include <cudf/table/row_operators.cuh>
#include <cudf/table/table_device_view.cuh>
#include <thrust/binary_search.h>
#include <thrust/iterator/constant_iterator.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/iterator/discard_iterator.h>
#include <thrust/scan.h>
#include <thrust/sequence.h>
#include <thrust/unique.h>
#include <algorithm>
#include <numeric>
#include <tuple>
namespace {
/**
* @brief Compares two `table` rows for equality as if the table were
* ordered according to a specified permutation map.
*
*/
template <bool nullable = true>
struct permuted_row_equality_comparator {
cudf::row_equality_comparator<nullable> _comparator;
cudf::size_type const* _map;
/**
* @brief Construct a permuted_row_equality_comparator.
*
* @param t The `table` whose rows will be compared
* @param map The permutation map that specifies the effective ordering of
*`t`. Must be the same size as `t.num_rows()`
*/
permuted_row_equality_comparator(cudf::table_device_view const& t, cudf::size_type const* map)
: _comparator(t, t, true), _map{map}
{
}
/**
* @brief Returns true if the two rows at the specified indices in the permuted
* order are equivalent.
*
* For example, comparing rows `i` and `j` is
* equivalent to comparing rows `map[i]` and `map[j]` in the original table.
*
* @param lhs The index of the first row
* @param rhs The index of the second row
* @returns if the two specified rows in the permuted order are equivalent
*/
CUDA_DEVICE_CALLABLE
bool operator()(cudf::size_type lhs, cudf::size_type rhs)
{
return _comparator(_map[lhs], _map[rhs]);
}
};
} // namespace
namespace cudf {
namespace groupby {
namespace detail {
namespace sort {
size_type sort_groupby_helper::num_keys(cudaStream_t stream)
{
if (_num_keys > -1) return _num_keys;
if (_include_null_keys == null_policy::EXCLUDE and has_nulls(_keys)) {
// The number of rows w/o null values `n` is indicated by number of valid bits
// in the row bitmask. When `_include_null_keys == NO`, then only rows `[0, n)`
// in the sorted keys are considered for grouping.
_num_keys = keys_bitmask_column(stream).size() - keys_bitmask_column(stream).null_count();
} else {
_num_keys = _keys.num_rows();
}
return _num_keys;
}
column_view sort_groupby_helper::key_sort_order(cudaStream_t stream)
{
auto sliced_key_sorted_order = [stream, this]() {
return cudf::detail::slice(this->_key_sorted_order->view(), 0, this->num_keys(stream));
};
if (_key_sorted_order) { return sliced_key_sorted_order(); }
// TODO (dm): optimization. When keys are pre sorted but ignore nulls is true,
// we still want all rows with nulls in the end. Sort is costly, so
// do a copy_if(counting, sorted_order, {bitmask.is_valid(i)})
if (_keys_pre_sorted == sorted::YES) {
_key_sorted_order = make_numeric_column(
data_type(type_to_id<size_type>()), _keys.num_rows(), mask_state::UNALLOCATED, stream);
auto d_key_sorted_order = _key_sorted_order->mutable_view().data<size_type>();
thrust::sequence(rmm::exec_policy(stream)->on(stream),
d_key_sorted_order,
d_key_sorted_order + _key_sorted_order->size(),
0);
return sliced_key_sorted_order();
}
if (_include_null_keys == null_policy::INCLUDE || !cudf::has_nulls(_keys)) { // SQL style
_key_sorted_order =
cudf::detail::sorted_order(_keys,
{},
std::vector<null_order>(_keys.num_columns(), null_order::AFTER),
rmm::mr::get_default_resource(),
stream);
} else { // Pandas style
// Temporarily prepend the keys table with a column that indicates the
// presence of a null value within a row. This allows moving all rows that
// contain a null value to the end of the sorted order.
auto augmented_keys = table_view({table_view({keys_bitmask_column()}), _keys});
_key_sorted_order = cudf::detail::sorted_order(
augmented_keys,
{},
std::vector<null_order>(_keys.num_columns() + 1, null_order::AFTER),
rmm::mr::get_default_resource(),
stream);
// All rows with one or more null values are at the end of the resulting sorted order.
}
return sliced_key_sorted_order();
}
sort_groupby_helper::index_vector const& sort_groupby_helper::group_offsets(cudaStream_t stream)
{
if (_group_offsets) return *_group_offsets;
_group_offsets = std::make_unique<index_vector>(num_keys(stream) + 1);
auto device_input_table = table_device_view::create(_keys, stream);
auto sorted_order = key_sort_order().data<size_type>();
decltype(_group_offsets->begin()) result_end;
auto exec = rmm::exec_policy(stream);
if (has_nulls(_keys)) {
result_end = thrust::unique_copy(
exec->on(stream),
thrust::make_counting_iterator<size_type>(0),
thrust::make_counting_iterator<size_type>(num_keys(stream)),
_group_offsets->begin(),
permuted_row_equality_comparator<true>(*device_input_table, sorted_order));
} else {
result_end = thrust::unique_copy(
exec->on(stream),
thrust::make_counting_iterator<size_type>(0),
thrust::make_counting_iterator<size_type>(num_keys(stream)),
_group_offsets->begin(),
permuted_row_equality_comparator<false>(*device_input_table, sorted_order));
}
size_type num_groups = thrust::distance(_group_offsets->begin(), result_end);
(*_group_offsets)[num_groups] = num_keys(stream);
_group_offsets->resize(num_groups + 1);
return *_group_offsets;
}
sort_groupby_helper::index_vector const& sort_groupby_helper::group_labels(cudaStream_t stream)
{
if (_group_labels) return *_group_labels;
// Get group labels for future use in segmented sorting
_group_labels = std::make_unique<index_vector>(num_keys(stream));
auto& group_labels = *_group_labels;
if (num_keys(stream) == 0) return group_labels;
auto exec = rmm::exec_policy(stream);
thrust::scatter(exec->on(stream),
thrust::make_constant_iterator(1, decltype(num_groups())(1)),
thrust::make_constant_iterator(1, num_groups()),
group_offsets().begin() + 1,
group_labels.begin());
thrust::inclusive_scan(
exec->on(stream), group_labels.begin(), group_labels.end(), group_labels.begin());
return group_labels;
}
column_view sort_groupby_helper::unsorted_keys_labels(cudaStream_t stream)
{
if (_unsorted_keys_labels) return _unsorted_keys_labels->view();
column_ptr temp_labels = make_numeric_column(
data_type(type_to_id<size_type>()), _keys.num_rows(), mask_state::ALL_NULL, stream);
auto group_labels_view = cudf::column_view(
data_type(type_to_id<size_type>()), group_labels().size(), group_labels().data().get());
auto scatter_map = key_sort_order();
std::unique_ptr<table> t_unsorted_keys_labels =
cudf::detail::scatter(table_view({group_labels_view}),
scatter_map,
table_view({temp_labels->view()}),
false,
rmm::mr::get_default_resource(),
stream);
_unsorted_keys_labels = std::move(t_unsorted_keys_labels->release()[0]);
return _unsorted_keys_labels->view();
}
column_view sort_groupby_helper::keys_bitmask_column(cudaStream_t stream)
{
if (_keys_bitmask_column) return _keys_bitmask_column->view();
auto row_bitmask = bitmask_and(_keys, rmm::mr::get_default_resource(), stream);
_keys_bitmask_column = make_numeric_column(data_type(type_id::INT8),
_keys.num_rows(),
std::move(row_bitmask),
cudf::UNKNOWN_NULL_COUNT,
stream);
auto keys_bitmask_view = _keys_bitmask_column->mutable_view();
using T = id_to_type<type_id::INT8>;
thrust::fill(rmm::exec_policy(stream)->on(stream),
keys_bitmask_view.begin<T>(),
keys_bitmask_view.end<T>(),
0);
return _keys_bitmask_column->view();
}
sort_groupby_helper::column_ptr sort_groupby_helper::sorted_values(
column_view const& values, rmm::mr::device_memory_resource* mr, cudaStream_t stream)
{
column_ptr values_sort_order =
cudf::detail::sorted_order(table_view({unsorted_keys_labels(), values}),
{},
std::vector<null_order>(2, null_order::AFTER),
mr,
stream);
// Zero-copy slice this sort order so that its new size is num_keys()
column_view gather_map = cudf::detail::slice(values_sort_order->view(), 0, num_keys(stream));
auto sorted_values_table =
cudf::detail::gather(table_view({values}), gather_map, false, false, false, mr, stream);
return std::move(sorted_values_table->release()[0]);
}
sort_groupby_helper::column_ptr sort_groupby_helper::grouped_values(
column_view const& values, rmm::mr::device_memory_resource* mr, cudaStream_t stream)
{
auto gather_map = key_sort_order();
auto grouped_values_table =
cudf::detail::gather(table_view({values}), gather_map, false, false, false, mr, stream);
return std::move(grouped_values_table->release()[0]);
}
std::unique_ptr<table> sort_groupby_helper::unique_keys(rmm::mr::device_memory_resource* mr,
cudaStream_t stream)
{
auto idx_data = key_sort_order().data<size_type>();
auto gather_map_it = thrust::make_transform_iterator(
group_offsets().begin(), [idx_data] __device__(size_type i) { return idx_data[i]; });
return cudf::detail::gather(
_keys, gather_map_it, gather_map_it + num_groups(), false, mr, stream);
}
std::unique_ptr<table> sort_groupby_helper::sorted_keys(rmm::mr::device_memory_resource* mr,
cudaStream_t stream)
{
return cudf::detail::gather(_keys, key_sort_order(), false, false, false, mr, stream);
}
} // namespace sort
} // namespace detail
} // namespace groupby
} // namespace cudf
|
df808b47bbc114f928de28fa0b05581296af7966.hip | // !!! This is a file automatically generated by hipify!!!
/**
* @file density_potential_cuda_kernel.cu
* @author Yibo Lin
* @date Jun 2018
* @brief Compute density potential according to NTUPlace3 (https://doi.org/10.1109/TCAD.2008.923063).
* This is for movable and filler cells.
*/
#include <stdio.h>
#include <float.h>
#include <cstdint>
#include "hip/hip_runtime.h"
#include "utility/src/utils.cuh"
DREAMPLACE_BEGIN_NAMESPACE
#if 0
template <typename T>
__global__ void computePaddingDensityMap(
const int num_bins_x, const int num_bins_y,
const int padding,
T* density_map_tensor)
{
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < num_nodes*num_impacted_bins_x*num_impacted_bins_y; i += blockDim.x * gridDim.x)
{
int ix = i/num_bins_y;
int iy = i-ix*num_bins_y;
if (ix < padding)
{
density_map_tensor[i] = density_map_tensor[padding*num_bins_y+iy];
}
}
}
#endif
template <typename T>
__global__ void computeDensityMap(
const T* x_tensor, const T* y_tensor,
const T* node_size_x_tensor, const T* node_size_y_tensor,
const T* ax_tensor, const T* bx_tensor, const T* cx_tensor,
const T* ay_tensor, const T* by_tensor, const T* cy_tensor,
const T* bin_center_x_tensor, const T* bin_center_y_tensor,
const int num_nodes,
const int num_bins_x, const int num_bins_y,
const T xl, const T yl, const T xh, const T yh,
const T bin_size_x, const T bin_size_y,
const int num_impacted_bins_x, const int num_impacted_bins_y,
T* density_map_tensor)
{
int64_t i = blockIdx.x * blockDim.x + threadIdx.x;
int64_t bound = int64_t(num_nodes)*num_impacted_bins_x*num_impacted_bins_y;
// rank-one update density map
if (i < bound)
{
// density potential function
auto computeDensityPotentialFunc = [](T x, T node_size, T bin_center, T bin_size, T a, T b, T c){
// from origin to center
x += node_size/2;
//printf("x = %g, bin_center = %g\n", x, bin_center);
T dist = fabs(x-bin_center);
//printf("dist = %g\n", dist);
T partition1 = node_size/2+bin_size;
//printf("partition1 = %g\n", partition1);
T partition2 = partition1+bin_size;
//printf("partition2 = %g\n", partition2);
//printf("a = %g, b = %g, c = %g\n", a, b, c);
if (dist < partition1)
{
return c*(1-a*dist*dist);
}
else if (dist < partition2)
{
return c*(b*(dist-partition2)*(dist-partition2));
}
else
{
return T(0.0);
}
};
int node_id = i/(num_impacted_bins_x*num_impacted_bins_y);
int residual_index = i-node_id*num_impacted_bins_x*num_impacted_bins_y;
// x direction
int bin_index_xl = int((x_tensor[node_id]-xl-2*bin_size_x)/bin_size_x);
bin_index_xl = max(bin_index_xl, 0);
int k = bin_index_xl+int(residual_index / num_impacted_bins_y);
if (k+1 > num_bins_x)
{
return;
}
// y direction
int bin_index_yl = int((y_tensor[node_id]-yl-2*bin_size_y)/bin_size_y);
bin_index_yl = max(bin_index_yl, 0);
int h = bin_index_yl+(residual_index % num_impacted_bins_y);
if (h+1 > num_bins_y)
{
return;
}
T px = computeDensityPotentialFunc(x_tensor[node_id], node_size_x_tensor[node_id], bin_center_x_tensor[k], bin_size_x, ax_tensor[node_id], bx_tensor[node_id], cx_tensor[node_id]);
T py = computeDensityPotentialFunc(y_tensor[node_id], node_size_y_tensor[node_id], bin_center_y_tensor[h], bin_size_y, ay_tensor[node_id], by_tensor[node_id], cy_tensor[node_id]);
//printf("px[%d, %d] = %g, py[%d, %d] = %g\n", k, h, px, k, h, py);
// still area
atomicAdd(&density_map_tensor[k*num_bins_y+h], px*py);
}
}
template <typename T>
__global__ void computeDensityGradient(
const T* x_tensor, const T* y_tensor,
const T* node_size_x_tensor, const T* node_size_y_tensor,
const T* ax_tensor, const T* bx_tensor, const T* cx_tensor,
const T* ay_tensor, const T* by_tensor, const T* cy_tensor,
const T* bin_center_x_tensor, const T* bin_center_y_tensor,
const int num_nodes,
const int num_bins_x, const int num_bins_y,
const T xl, const T yl, const T xh, const T yh,
const T bin_size_x, const T bin_size_y,
const int num_impacted_bins_x, const int num_impacted_bins_y,
const T* grad_tensor, const T target_area,
const T* density_map_tensor,
T* grad_x_tensor, T* grad_y_tensor
)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
// rank-one update density map
if (i < num_nodes)
{
// density potential function
auto computeDensityPotentialFunc = [](T x, T node_size, T bin_center, T bin_size, T a, T b, T c){
// from origin to center
x += node_size/2;
//printf("x = %g, bin_center = %g\n", x, bin_center);
T dist = fabs(x-bin_center);
//printf("dist = %g\n", dist);
T partition1 = node_size/2+bin_size;
//printf("partition1 = %g\n", partition1);
T partition2 = partition1+bin_size;
//printf("partition2 = %g\n", partition2);
//printf("a = %g, b = %g, c = %g\n", a, b, c);
if (dist < partition1)
{
return c*(1-a*dist*dist);
}
else if (dist < partition2)
{
return c*(b*(dist-partition2)*(dist-partition2));
}
else
{
return T(0.0);
}
};
// density potential gradient function
auto computeDensityPotentialGradFunc = [](T x, T node_size, T bin_center, T bin_size, T a, T b, T c){
// from origin to center
x += node_size/2;
T dist = fabs(x-bin_center);
T partition1 = node_size/2+bin_size;
T partition2 = partition1+bin_size;
if (dist < partition1)
{
return -2*c*a*(x-bin_center);
}
else if (dist < partition2)
{
T sign = (x < bin_center)? -1.0 : 1.0;
return 2*c*b*(dist-partition2)*sign;
}
else
{
return T(0.0);
}
};
int bin_index_xl = int((x_tensor[i]-xl-2*bin_size_x)/bin_size_x);
int bin_index_xh = int(ceil((x_tensor[i]-xl+node_size_x_tensor[i]+2*bin_size_x)/bin_size_x))+1; // exclusive
bin_index_xl = max(bin_index_xl, 0);
// be careful about the bin_index_xl and bin_index_xh here
// the assumption is that num_bins_x >= num_impacted_bins_x
// each row of the px matrix should be filled with num_impacted_bins_x columns
bin_index_xl = min(bin_index_xl, num_bins_x-num_impacted_bins_x);
bin_index_xh = min(bin_index_xh, num_bins_x);
//int bin_index_xh = bin_index_xl+num_impacted_bins_x;
int bin_index_yl = int((y_tensor[i]-yl-2*bin_size_y)/bin_size_y);
int bin_index_yh = int(ceil((y_tensor[i]-yl+node_size_y_tensor[i]+2*bin_size_y)/bin_size_y))+1; // exclusive
bin_index_yl = max(bin_index_yl, 0);
// be careful about the bin_index_yl and bin_index_yh here
// the assumption is that num_bins_y >= num_impacted_bins_y
// each row of the py matrix should be filled with num_impacted_bins_y columns
bin_index_yl = min(bin_index_yl, num_bins_y-num_impacted_bins_y);
bin_index_yh = min(bin_index_yh, num_bins_y);
//int bin_index_yh = bin_index_yl+num_impacted_bins_y;
grad_x_tensor[i] = 0;
grad_y_tensor[i] = 0;
// update density potential map
for (int k = bin_index_xl; k < bin_index_xh; ++k)
{
T px = computeDensityPotentialFunc(x_tensor[i], node_size_x_tensor[i], bin_center_x_tensor[k], bin_size_x, ax_tensor[i], bx_tensor[i], cx_tensor[i]);
T gradx = computeDensityPotentialGradFunc(x_tensor[i], node_size_x_tensor[i], bin_center_x_tensor[k], bin_size_x, ax_tensor[i], bx_tensor[i], cx_tensor[i]);
for (int h = bin_index_yl; h < bin_index_yh; ++h)
{
T py = computeDensityPotentialFunc(y_tensor[i], node_size_y_tensor[i], bin_center_y_tensor[h], bin_size_y, ay_tensor[i], by_tensor[i], cy_tensor[i]);
T grady = computeDensityPotentialGradFunc(y_tensor[i], node_size_y_tensor[i], bin_center_y_tensor[h], bin_size_y, ay_tensor[i], by_tensor[i], cy_tensor[i]);
T delta = density_map_tensor[k*num_bins_y+h]-target_area;
//delta = max(delta, (T)0);
grad_x_tensor[i] += 2*delta*py*gradx;
grad_y_tensor[i] += 2*delta*px*grady;
}
}
grad_x_tensor[i] *= *grad_tensor;
grad_y_tensor[i] *= *grad_tensor;
}
}
template <typename T>
int computeDensityPotentialMapCudaLauncher(
const T* x_tensor, const T* y_tensor,
const T* node_size_x_tensor, const T* node_size_y_tensor,
const T* ax_tensor, const T* bx_tensor, const T* cx_tensor,
const T* ay_tensor, const T* by_tensor, const T* cy_tensor,
const T* bin_center_x_tensor, const T* bin_center_y_tensor,
const int num_impacted_bins_x, const int num_impacted_bins_y,
const int mat_size_x, const int mat_size_y,
const int num_nodes,
const int num_bins_x, const int num_bins_y, const int padding,
const T xl, const T yl, const T xh, const T yh,
const T bin_size_x, const T bin_size_y,
const T target_area,
T* density_map_tensor,
const T* grad_tensor,
T* grad_x_tensor, T* grad_y_tensor
)
{
int64_t block_count;
int64_t thread_count = 512;
// compute gradient
if (grad_tensor)
{
block_count = (num_nodes - 1 + thread_count) / thread_count;
hipLaunchKernelGGL(( computeDensityGradient), dim3(block_count), dim3(thread_count), 0, 0,
x_tensor, y_tensor,
node_size_x_tensor, node_size_y_tensor,
ax_tensor, bx_tensor, cx_tensor,
ay_tensor, by_tensor, cy_tensor,
bin_center_x_tensor, bin_center_y_tensor,
num_nodes,
num_bins_x, num_bins_y,
xl, yl, xh, yh,
bin_size_x, bin_size_y,
num_impacted_bins_x, num_impacted_bins_y,
grad_tensor, target_area,
density_map_tensor,
grad_x_tensor, grad_y_tensor
);
// print gradient
//printArray(grad_x_tensor, 10, "grad_x_tensor");
//printArray(grad_y_tensor, 10, "grad_y_tensor");
}
else
{
block_count = (int64_t(num_nodes)*num_impacted_bins_x*num_impacted_bins_y - 1 + thread_count) / thread_count;
hipLaunchKernelGGL(( computeDensityMap), dim3(block_count), dim3(thread_count), 0, 0,
x_tensor, y_tensor,
node_size_x_tensor, node_size_y_tensor,
ax_tensor, bx_tensor, cx_tensor,
ay_tensor, by_tensor, cy_tensor,
bin_center_x_tensor, bin_center_y_tensor,
num_nodes,
num_bins_x, num_bins_y,
xl, yl, xh, yh,
bin_size_x, bin_size_y,
num_impacted_bins_x, num_impacted_bins_y,
density_map_tensor);
// print density map
//print2DArray(density_map_tensor, num_bins_x, num_bins_y, "potential density_map_tensor");
//printScalar(bin_size_x, "bin_size_x");
//printScalar(bin_size_y, "bin_size_y");
//printScalar(target_area, "target_area");
}
return 0;
}
#define REGISTER_KERNEL_LAUNCHER(T) \
template int computeDensityPotentialMapCudaLauncher<T>(\
const T* x_tensor, const T* y_tensor, \
const T* node_size_x_tensor, const T* node_size_y_tensor, \
const T* ax_tensor, const T* bx_tensor, const T* cx_tensor, \
const T* ay_tensor, const T* by_tensor, const T* cy_tensor, \
const T* bin_center_x_tensor, const T* bin_center_y_tensor, \
const int num_impacted_bins_x, const int num_impacted_bins_y, \
const int mat_size_x, const int mat_size_y, \
const int num_nodes, \
const int num_bins_x, const int num_bins_y, const int padding, \
const T xl, const T yl, const T xh, const T yh, \
const T bin_size_x, const T bin_size_y, \
const T target_area, \
T* density_map_tensor, \
const T* grad_tensor, \
T* grad_x_tensor, T* grad_y_tensor \
);
REGISTER_KERNEL_LAUNCHER(float);
REGISTER_KERNEL_LAUNCHER(double);
DREAMPLACE_END_NAMESPACE
| df808b47bbc114f928de28fa0b05581296af7966.cu | /**
* @file density_potential_cuda_kernel.cu
* @author Yibo Lin
* @date Jun 2018
* @brief Compute density potential according to NTUPlace3 (https://doi.org/10.1109/TCAD.2008.923063).
* This is for movable and filler cells.
*/
#include <stdio.h>
#include <float.h>
#include <cstdint>
#include "cuda_runtime.h"
#include "utility/src/utils.cuh"
DREAMPLACE_BEGIN_NAMESPACE
#if 0
template <typename T>
__global__ void computePaddingDensityMap(
const int num_bins_x, const int num_bins_y,
const int padding,
T* density_map_tensor)
{
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < num_nodes*num_impacted_bins_x*num_impacted_bins_y; i += blockDim.x * gridDim.x)
{
int ix = i/num_bins_y;
int iy = i-ix*num_bins_y;
if (ix < padding)
{
density_map_tensor[i] = density_map_tensor[padding*num_bins_y+iy];
}
}
}
#endif
template <typename T>
__global__ void computeDensityMap(
const T* x_tensor, const T* y_tensor,
const T* node_size_x_tensor, const T* node_size_y_tensor,
const T* ax_tensor, const T* bx_tensor, const T* cx_tensor,
const T* ay_tensor, const T* by_tensor, const T* cy_tensor,
const T* bin_center_x_tensor, const T* bin_center_y_tensor,
const int num_nodes,
const int num_bins_x, const int num_bins_y,
const T xl, const T yl, const T xh, const T yh,
const T bin_size_x, const T bin_size_y,
const int num_impacted_bins_x, const int num_impacted_bins_y,
T* density_map_tensor)
{
int64_t i = blockIdx.x * blockDim.x + threadIdx.x;
int64_t bound = int64_t(num_nodes)*num_impacted_bins_x*num_impacted_bins_y;
// rank-one update density map
if (i < bound)
{
// density potential function
auto computeDensityPotentialFunc = [](T x, T node_size, T bin_center, T bin_size, T a, T b, T c){
// from origin to center
x += node_size/2;
//printf("x = %g, bin_center = %g\n", x, bin_center);
T dist = fabs(x-bin_center);
//printf("dist = %g\n", dist);
T partition1 = node_size/2+bin_size;
//printf("partition1 = %g\n", partition1);
T partition2 = partition1+bin_size;
//printf("partition2 = %g\n", partition2);
//printf("a = %g, b = %g, c = %g\n", a, b, c);
if (dist < partition1)
{
return c*(1-a*dist*dist);
}
else if (dist < partition2)
{
return c*(b*(dist-partition2)*(dist-partition2));
}
else
{
return T(0.0);
}
};
int node_id = i/(num_impacted_bins_x*num_impacted_bins_y);
int residual_index = i-node_id*num_impacted_bins_x*num_impacted_bins_y;
// x direction
int bin_index_xl = int((x_tensor[node_id]-xl-2*bin_size_x)/bin_size_x);
bin_index_xl = max(bin_index_xl, 0);
int k = bin_index_xl+int(residual_index / num_impacted_bins_y);
if (k+1 > num_bins_x)
{
return;
}
// y direction
int bin_index_yl = int((y_tensor[node_id]-yl-2*bin_size_y)/bin_size_y);
bin_index_yl = max(bin_index_yl, 0);
int h = bin_index_yl+(residual_index % num_impacted_bins_y);
if (h+1 > num_bins_y)
{
return;
}
T px = computeDensityPotentialFunc(x_tensor[node_id], node_size_x_tensor[node_id], bin_center_x_tensor[k], bin_size_x, ax_tensor[node_id], bx_tensor[node_id], cx_tensor[node_id]);
T py = computeDensityPotentialFunc(y_tensor[node_id], node_size_y_tensor[node_id], bin_center_y_tensor[h], bin_size_y, ay_tensor[node_id], by_tensor[node_id], cy_tensor[node_id]);
//printf("px[%d, %d] = %g, py[%d, %d] = %g\n", k, h, px, k, h, py);
// still area
atomicAdd(&density_map_tensor[k*num_bins_y+h], px*py);
}
}
template <typename T>
__global__ void computeDensityGradient(
const T* x_tensor, const T* y_tensor,
const T* node_size_x_tensor, const T* node_size_y_tensor,
const T* ax_tensor, const T* bx_tensor, const T* cx_tensor,
const T* ay_tensor, const T* by_tensor, const T* cy_tensor,
const T* bin_center_x_tensor, const T* bin_center_y_tensor,
const int num_nodes,
const int num_bins_x, const int num_bins_y,
const T xl, const T yl, const T xh, const T yh,
const T bin_size_x, const T bin_size_y,
const int num_impacted_bins_x, const int num_impacted_bins_y,
const T* grad_tensor, const T target_area,
const T* density_map_tensor,
T* grad_x_tensor, T* grad_y_tensor
)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
// rank-one update density map
if (i < num_nodes)
{
// density potential function
auto computeDensityPotentialFunc = [](T x, T node_size, T bin_center, T bin_size, T a, T b, T c){
// from origin to center
x += node_size/2;
//printf("x = %g, bin_center = %g\n", x, bin_center);
T dist = fabs(x-bin_center);
//printf("dist = %g\n", dist);
T partition1 = node_size/2+bin_size;
//printf("partition1 = %g\n", partition1);
T partition2 = partition1+bin_size;
//printf("partition2 = %g\n", partition2);
//printf("a = %g, b = %g, c = %g\n", a, b, c);
if (dist < partition1)
{
return c*(1-a*dist*dist);
}
else if (dist < partition2)
{
return c*(b*(dist-partition2)*(dist-partition2));
}
else
{
return T(0.0);
}
};
// density potential gradient function
auto computeDensityPotentialGradFunc = [](T x, T node_size, T bin_center, T bin_size, T a, T b, T c){
// from origin to center
x += node_size/2;
T dist = fabs(x-bin_center);
T partition1 = node_size/2+bin_size;
T partition2 = partition1+bin_size;
if (dist < partition1)
{
return -2*c*a*(x-bin_center);
}
else if (dist < partition2)
{
T sign = (x < bin_center)? -1.0 : 1.0;
return 2*c*b*(dist-partition2)*sign;
}
else
{
return T(0.0);
}
};
int bin_index_xl = int((x_tensor[i]-xl-2*bin_size_x)/bin_size_x);
int bin_index_xh = int(ceil((x_tensor[i]-xl+node_size_x_tensor[i]+2*bin_size_x)/bin_size_x))+1; // exclusive
bin_index_xl = max(bin_index_xl, 0);
// be careful about the bin_index_xl and bin_index_xh here
// the assumption is that num_bins_x >= num_impacted_bins_x
// each row of the px matrix should be filled with num_impacted_bins_x columns
bin_index_xl = min(bin_index_xl, num_bins_x-num_impacted_bins_x);
bin_index_xh = min(bin_index_xh, num_bins_x);
//int bin_index_xh = bin_index_xl+num_impacted_bins_x;
int bin_index_yl = int((y_tensor[i]-yl-2*bin_size_y)/bin_size_y);
int bin_index_yh = int(ceil((y_tensor[i]-yl+node_size_y_tensor[i]+2*bin_size_y)/bin_size_y))+1; // exclusive
bin_index_yl = max(bin_index_yl, 0);
// be careful about the bin_index_yl and bin_index_yh here
// the assumption is that num_bins_y >= num_impacted_bins_y
// each row of the py matrix should be filled with num_impacted_bins_y columns
bin_index_yl = min(bin_index_yl, num_bins_y-num_impacted_bins_y);
bin_index_yh = min(bin_index_yh, num_bins_y);
//int bin_index_yh = bin_index_yl+num_impacted_bins_y;
grad_x_tensor[i] = 0;
grad_y_tensor[i] = 0;
// update density potential map
for (int k = bin_index_xl; k < bin_index_xh; ++k)
{
T px = computeDensityPotentialFunc(x_tensor[i], node_size_x_tensor[i], bin_center_x_tensor[k], bin_size_x, ax_tensor[i], bx_tensor[i], cx_tensor[i]);
T gradx = computeDensityPotentialGradFunc(x_tensor[i], node_size_x_tensor[i], bin_center_x_tensor[k], bin_size_x, ax_tensor[i], bx_tensor[i], cx_tensor[i]);
for (int h = bin_index_yl; h < bin_index_yh; ++h)
{
T py = computeDensityPotentialFunc(y_tensor[i], node_size_y_tensor[i], bin_center_y_tensor[h], bin_size_y, ay_tensor[i], by_tensor[i], cy_tensor[i]);
T grady = computeDensityPotentialGradFunc(y_tensor[i], node_size_y_tensor[i], bin_center_y_tensor[h], bin_size_y, ay_tensor[i], by_tensor[i], cy_tensor[i]);
T delta = density_map_tensor[k*num_bins_y+h]-target_area;
//delta = max(delta, (T)0);
grad_x_tensor[i] += 2*delta*py*gradx;
grad_y_tensor[i] += 2*delta*px*grady;
}
}
grad_x_tensor[i] *= *grad_tensor;
grad_y_tensor[i] *= *grad_tensor;
}
}
template <typename T>
int computeDensityPotentialMapCudaLauncher(
const T* x_tensor, const T* y_tensor,
const T* node_size_x_tensor, const T* node_size_y_tensor,
const T* ax_tensor, const T* bx_tensor, const T* cx_tensor,
const T* ay_tensor, const T* by_tensor, const T* cy_tensor,
const T* bin_center_x_tensor, const T* bin_center_y_tensor,
const int num_impacted_bins_x, const int num_impacted_bins_y,
const int mat_size_x, const int mat_size_y,
const int num_nodes,
const int num_bins_x, const int num_bins_y, const int padding,
const T xl, const T yl, const T xh, const T yh,
const T bin_size_x, const T bin_size_y,
const T target_area,
T* density_map_tensor,
const T* grad_tensor,
T* grad_x_tensor, T* grad_y_tensor
)
{
int64_t block_count;
int64_t thread_count = 512;
// compute gradient
if (grad_tensor)
{
block_count = (num_nodes - 1 + thread_count) / thread_count;
computeDensityGradient<<<block_count, thread_count>>>(
x_tensor, y_tensor,
node_size_x_tensor, node_size_y_tensor,
ax_tensor, bx_tensor, cx_tensor,
ay_tensor, by_tensor, cy_tensor,
bin_center_x_tensor, bin_center_y_tensor,
num_nodes,
num_bins_x, num_bins_y,
xl, yl, xh, yh,
bin_size_x, bin_size_y,
num_impacted_bins_x, num_impacted_bins_y,
grad_tensor, target_area,
density_map_tensor,
grad_x_tensor, grad_y_tensor
);
// print gradient
//printArray(grad_x_tensor, 10, "grad_x_tensor");
//printArray(grad_y_tensor, 10, "grad_y_tensor");
}
else
{
block_count = (int64_t(num_nodes)*num_impacted_bins_x*num_impacted_bins_y - 1 + thread_count) / thread_count;
computeDensityMap<<<block_count, thread_count>>>(
x_tensor, y_tensor,
node_size_x_tensor, node_size_y_tensor,
ax_tensor, bx_tensor, cx_tensor,
ay_tensor, by_tensor, cy_tensor,
bin_center_x_tensor, bin_center_y_tensor,
num_nodes,
num_bins_x, num_bins_y,
xl, yl, xh, yh,
bin_size_x, bin_size_y,
num_impacted_bins_x, num_impacted_bins_y,
density_map_tensor);
// print density map
//print2DArray(density_map_tensor, num_bins_x, num_bins_y, "potential density_map_tensor");
//printScalar(bin_size_x, "bin_size_x");
//printScalar(bin_size_y, "bin_size_y");
//printScalar(target_area, "target_area");
}
return 0;
}
#define REGISTER_KERNEL_LAUNCHER(T) \
template int computeDensityPotentialMapCudaLauncher<T>(\
const T* x_tensor, const T* y_tensor, \
const T* node_size_x_tensor, const T* node_size_y_tensor, \
const T* ax_tensor, const T* bx_tensor, const T* cx_tensor, \
const T* ay_tensor, const T* by_tensor, const T* cy_tensor, \
const T* bin_center_x_tensor, const T* bin_center_y_tensor, \
const int num_impacted_bins_x, const int num_impacted_bins_y, \
const int mat_size_x, const int mat_size_y, \
const int num_nodes, \
const int num_bins_x, const int num_bins_y, const int padding, \
const T xl, const T yl, const T xh, const T yh, \
const T bin_size_x, const T bin_size_y, \
const T target_area, \
T* density_map_tensor, \
const T* grad_tensor, \
T* grad_x_tensor, T* grad_y_tensor \
);
REGISTER_KERNEL_LAUNCHER(float);
REGISTER_KERNEL_LAUNCHER(double);
DREAMPLACE_END_NAMESPACE
|
25ff38772c8c8f9b1d701fb17065c24ebfed1c20.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "oneMinusTanh.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *out = NULL;
hipMalloc(&out, XSIZE*YSIZE);
float *in = NULL;
hipMalloc(&in, XSIZE*YSIZE);
int size = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
oneMinusTanh), dim3(gridBlock),dim3(threadBlock), 0, 0, out,in,size);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
oneMinusTanh), dim3(gridBlock),dim3(threadBlock), 0, 0, out,in,size);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
oneMinusTanh), dim3(gridBlock),dim3(threadBlock), 0, 0, out,in,size);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 25ff38772c8c8f9b1d701fb17065c24ebfed1c20.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "oneMinusTanh.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *out = NULL;
cudaMalloc(&out, XSIZE*YSIZE);
float *in = NULL;
cudaMalloc(&in, XSIZE*YSIZE);
int size = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
oneMinusTanh<<<gridBlock,threadBlock>>>(out,in,size);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
oneMinusTanh<<<gridBlock,threadBlock>>>(out,in,size);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
oneMinusTanh<<<gridBlock,threadBlock>>>(out,in,size);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
16c3781e19f4ee21bc7095c603026da2db016958.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by ops.py
//
__constant__ int dims_update_halo_kernel3_minus_2_a [3][2];
static int dims_update_halo_kernel3_minus_2_a_h [3][2] = {0};
//user function
__device__
inline void update_halo_kernel3_minus_2_a_gpu(ACC<double> &vol_flux_x,
ACC<double> &mass_flux_x,
const int* fields) {
if(fields[FIELD_VOL_FLUX_X] == 1) vol_flux_x(0,0,0) = -(vol_flux_x(2,0,0));
if(fields[FIELD_MASS_FLUX_X] == 1) mass_flux_x(0,0,0) = -(mass_flux_x(2,0,0));
}
__global__ void ops_update_halo_kernel3_minus_2_a(
double* __restrict arg0,
double* __restrict arg1,
const int* __restrict arg2,
int size0,
int size1,
int size2 ){
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel3_minus_2_a[0][0] + idx_z * 1*1 * dims_update_halo_kernel3_minus_2_a[0][0] * dims_update_halo_kernel3_minus_2_a[0][1];
arg1 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel3_minus_2_a[1][0] + idx_z * 1*1 * dims_update_halo_kernel3_minus_2_a[1][0] * dims_update_halo_kernel3_minus_2_a[1][1];
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
ACC<double> argp0(dims_update_halo_kernel3_minus_2_a[0][0], dims_update_halo_kernel3_minus_2_a[0][1], arg0);
ACC<double> argp1(dims_update_halo_kernel3_minus_2_a[1][0], dims_update_halo_kernel3_minus_2_a[1][1], arg1);
update_halo_kernel3_minus_2_a_gpu(argp0, argp1, arg2);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_update_halo_kernel3_minus_2_a(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2) {
#else
void ops_par_loop_update_halo_kernel3_minus_2_a_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
#if OPS_MPI
ops_block block = desc->block;
#endif
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
#endif
//Timing
double t1,t2,c1,c2;
ops_arg args[3] = { arg0, arg1, arg2};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args,3,range,64)) return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(64,"update_halo_kernel3_minus_2_a");
OPS_kernels[64].count++;
ops_timers_core(&c1,&t1);
}
//compute locally allocated range for the sub-block
int start[3];
int end[3];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
#endif //OPS_MPI
#ifdef OPS_MPI
int arg_idx[3];
#endif
#ifdef OPS_MPI
if (compute_ranges(args, 3,block, range, start, end, arg_idx) < 0) return;
#else //OPS_MPI
for ( int n=0; n<3; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
if (xdim0 != dims_update_halo_kernel3_minus_2_a_h[0][0] || ydim0 != dims_update_halo_kernel3_minus_2_a_h[0][1] || xdim1 != dims_update_halo_kernel3_minus_2_a_h[1][0] || ydim1 != dims_update_halo_kernel3_minus_2_a_h[1][1]) {
dims_update_halo_kernel3_minus_2_a_h[0][0] = xdim0;
dims_update_halo_kernel3_minus_2_a_h[0][1] = ydim0;
dims_update_halo_kernel3_minus_2_a_h[1][0] = xdim1;
dims_update_halo_kernel3_minus_2_a_h[1][1] = ydim1;
cutilSafeCall(hipMemcpyToSymbol( dims_update_halo_kernel3_minus_2_a, dims_update_halo_kernel3_minus_2_a_h, sizeof(dims_update_halo_kernel3_minus_2_a)));
}
int *arg2h = (int *)arg2.data;
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
int z_size = MAX(0,end[2]-start[2]);
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg2.data = OPS_consts_h + consts_bytes;
arg2.data_d = OPS_consts_d + consts_bytes;
for (int d=0; d<NUM_FIELDS; d++) ((int *)arg2.data)[d] = arg2h[d];
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
char *p_a[3];
//set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
(start[1] * args[1].stencil->stride[1]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2]);
p_a[1] = (char *)args[1].data_d + base1;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 3);
ops_halo_exchanges(args,3,range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2,&t2);
OPS_kernels[64].mpi_time += t2-t1;
}
//call kernel wrapper function, passing in pointers to data
if (x_size > 0 && y_size > 0 && z_size > 0)
hipLaunchKernelGGL(( ops_update_halo_kernel3_minus_2_a), dim3(grid), dim3(tblock) , 0, 0, (double *)p_a[0], (double *)p_a[1],
(int *)arg2.data_d,x_size, y_size, z_size);
cutilSafeCall(hipGetLastError());
if (OPS_diags>1) {
cutilSafeCall(hipDeviceSynchronize());
ops_timers_core(&c1,&t1);
OPS_kernels[64].time += t1-t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 3);
ops_set_halo_dirtybit3(&args[0],range);
ops_set_halo_dirtybit3(&args[1],range);
#endif
if (OPS_diags > 1) {
//Update kernel record
ops_timers_core(&c2,&t2);
OPS_kernels[64].mpi_time += t2-t1;
OPS_kernels[64].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[64].transfer += ops_compute_transfer(dim, start, end, &arg1);
}
}
#ifdef OPS_LAZY
void ops_par_loop_update_halo_kernel3_minus_2_a(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2) {
ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 64;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 64;
for ( int i=0; i<6; i++ ){
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 3;
desc->args = (ops_arg*)malloc(3*sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
char *tmp = (char*)malloc(NUM_FIELDS*sizeof(int));
memcpy(tmp, arg2.data,NUM_FIELDS*sizeof(int));
desc->args[2].data = tmp;
desc->function = ops_par_loop_update_halo_kernel3_minus_2_a_execute;
if (OPS_diags > 1) {
ops_timing_realloc(64,"update_halo_kernel3_minus_2_a");
}
ops_enqueue_kernel(desc);
}
#endif
| 16c3781e19f4ee21bc7095c603026da2db016958.cu | //
// auto-generated by ops.py
//
__constant__ int dims_update_halo_kernel3_minus_2_a [3][2];
static int dims_update_halo_kernel3_minus_2_a_h [3][2] = {0};
//user function
__device__
inline void update_halo_kernel3_minus_2_a_gpu(ACC<double> &vol_flux_x,
ACC<double> &mass_flux_x,
const int* fields) {
if(fields[FIELD_VOL_FLUX_X] == 1) vol_flux_x(0,0,0) = -(vol_flux_x(2,0,0));
if(fields[FIELD_MASS_FLUX_X] == 1) mass_flux_x(0,0,0) = -(mass_flux_x(2,0,0));
}
__global__ void ops_update_halo_kernel3_minus_2_a(
double* __restrict arg0,
double* __restrict arg1,
const int* __restrict arg2,
int size0,
int size1,
int size2 ){
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel3_minus_2_a[0][0] + idx_z * 1*1 * dims_update_halo_kernel3_minus_2_a[0][0] * dims_update_halo_kernel3_minus_2_a[0][1];
arg1 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel3_minus_2_a[1][0] + idx_z * 1*1 * dims_update_halo_kernel3_minus_2_a[1][0] * dims_update_halo_kernel3_minus_2_a[1][1];
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
ACC<double> argp0(dims_update_halo_kernel3_minus_2_a[0][0], dims_update_halo_kernel3_minus_2_a[0][1], arg0);
ACC<double> argp1(dims_update_halo_kernel3_minus_2_a[1][0], dims_update_halo_kernel3_minus_2_a[1][1], arg1);
update_halo_kernel3_minus_2_a_gpu(argp0, argp1, arg2);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_update_halo_kernel3_minus_2_a(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2) {
#else
void ops_par_loop_update_halo_kernel3_minus_2_a_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
#if OPS_MPI
ops_block block = desc->block;
#endif
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
#endif
//Timing
double t1,t2,c1,c2;
ops_arg args[3] = { arg0, arg1, arg2};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args,3,range,64)) return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(64,"update_halo_kernel3_minus_2_a");
OPS_kernels[64].count++;
ops_timers_core(&c1,&t1);
}
//compute locally allocated range for the sub-block
int start[3];
int end[3];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
#endif //OPS_MPI
#ifdef OPS_MPI
int arg_idx[3];
#endif
#ifdef OPS_MPI
if (compute_ranges(args, 3,block, range, start, end, arg_idx) < 0) return;
#else //OPS_MPI
for ( int n=0; n<3; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
if (xdim0 != dims_update_halo_kernel3_minus_2_a_h[0][0] || ydim0 != dims_update_halo_kernel3_minus_2_a_h[0][1] || xdim1 != dims_update_halo_kernel3_minus_2_a_h[1][0] || ydim1 != dims_update_halo_kernel3_minus_2_a_h[1][1]) {
dims_update_halo_kernel3_minus_2_a_h[0][0] = xdim0;
dims_update_halo_kernel3_minus_2_a_h[0][1] = ydim0;
dims_update_halo_kernel3_minus_2_a_h[1][0] = xdim1;
dims_update_halo_kernel3_minus_2_a_h[1][1] = ydim1;
cutilSafeCall(cudaMemcpyToSymbol( dims_update_halo_kernel3_minus_2_a, dims_update_halo_kernel3_minus_2_a_h, sizeof(dims_update_halo_kernel3_minus_2_a)));
}
int *arg2h = (int *)arg2.data;
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
int z_size = MAX(0,end[2]-start[2]);
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg2.data = OPS_consts_h + consts_bytes;
arg2.data_d = OPS_consts_d + consts_bytes;
for (int d=0; d<NUM_FIELDS; d++) ((int *)arg2.data)[d] = arg2h[d];
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
char *p_a[3];
//set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
(start[1] * args[1].stencil->stride[1]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2]);
p_a[1] = (char *)args[1].data_d + base1;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 3);
ops_halo_exchanges(args,3,range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2,&t2);
OPS_kernels[64].mpi_time += t2-t1;
}
//call kernel wrapper function, passing in pointers to data
if (x_size > 0 && y_size > 0 && z_size > 0)
ops_update_halo_kernel3_minus_2_a<<<grid, tblock >>> ( (double *)p_a[0], (double *)p_a[1],
(int *)arg2.data_d,x_size, y_size, z_size);
cutilSafeCall(cudaGetLastError());
if (OPS_diags>1) {
cutilSafeCall(cudaDeviceSynchronize());
ops_timers_core(&c1,&t1);
OPS_kernels[64].time += t1-t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 3);
ops_set_halo_dirtybit3(&args[0],range);
ops_set_halo_dirtybit3(&args[1],range);
#endif
if (OPS_diags > 1) {
//Update kernel record
ops_timers_core(&c2,&t2);
OPS_kernels[64].mpi_time += t2-t1;
OPS_kernels[64].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[64].transfer += ops_compute_transfer(dim, start, end, &arg1);
}
}
#ifdef OPS_LAZY
void ops_par_loop_update_halo_kernel3_minus_2_a(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2) {
ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 64;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 64;
for ( int i=0; i<6; i++ ){
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 3;
desc->args = (ops_arg*)malloc(3*sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
char *tmp = (char*)malloc(NUM_FIELDS*sizeof(int));
memcpy(tmp, arg2.data,NUM_FIELDS*sizeof(int));
desc->args[2].data = tmp;
desc->function = ops_par_loop_update_halo_kernel3_minus_2_a_execute;
if (OPS_diags > 1) {
ops_timing_realloc(64,"update_halo_kernel3_minus_2_a");
}
ops_enqueue_kernel(desc);
}
#endif
|
c21735a95052273b5b07c19e85b13946f92b8174.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
using namespace std;
#include <cassert>
#include <iomanip>
#include <hip/hip_runtime.h>
#include <hipfft.h>
#include <helper_functions.h>
#include <helper_cuda.h>
#include <rocblas.h>
#include "complex.h"
#include "matutils.h"
#include "mat.h"
#include "fftwinterface.h"
#include "cumath.h"
inline int number_of_blocks(const int n_threads, const int n)
{ return n/n_threads*n_threads == n ? n/n_threads : n/n_threads+1; }
static __global__ void _psi_times_kinitic_energy_(Complex *psiOut, const Complex *psiIn,
const double xl, const int nx, const double dx,
const double yl, const int ny, const double dy)
{
extern __shared__ double s_data[];
double *Tx = (double *) s_data;
double *Ty = (double *) &Tx[nx];
cumath::setup_kinetic_energy_for_fft(Tx, nx, xl+dx, 1.0);
cumath::setup_kinetic_energy_for_fft(Ty, ny, yl+dy, 1.0);
__syncthreads();
const int index = threadIdx.x + blockDim.x*blockIdx.x;
if(index < nx*ny) {
int i = -1; int j = -1;
cumath::index_2_ij(index, nx, ny, i, j);
psiOut[index] = psiIn[index]*(Tx[i] + Ty[j]);
}
}
void setup_kinetic_energy_for_fft(double *kin, const int n, const double xl, const double mass)
{
insist(n/2*2 == n);
const double two_pi_xl = 2*Pi/xl;
for(int i = 0; i < n; i++) {
if(i < n/2) {
kin[i] = cumath::sq(two_pi_xl*i)/(mass+mass);
} else if(i >= n/2) {
kin[i] = cumath::sq(two_pi_xl*(-n+i))/(mass+mass);
}
}
}
void calculate_kinetic_energy(double &ke,
const Complex *phi,
const double xl, const int nx, const double dx,
const double yl, const int ny, const double dy)
{
double *kx = new double [nx];
insist(kx);
setup_kinetic_energy_for_fft(kx, nx, xl+dx, 1.0);
double *ky = new double [ny];
insist(ky);
setup_kinetic_energy_for_fft(ky, ny, yl+dy, 1.0);
Complex s(0.0, 0.0);
for(int k = 0; k < nx*ny; k++) {
int i = -1; int j = -1;
cumath::index_2_ij(k, nx, ny, i, j);
s += abs2(phi[k])*(kx[i] + ky[j]);
}
ke = s.real()*dx*dy/nx/ny;
if(kx) { delete [] kx; kx = 0; }
if(ky) { delete [] ky; ky = 0; }
}
void cuda_test()
{
cout << "Harmonic oscillator test" << endl;
cout << " sizeof(Complex) = " << sizeof(Complex) << endl;
const double pi = Pi;
const double pi_1_4 = pow(pi, -0.25);
const int m = 160;
const int nx = 1024;
const int ny = 2048;
Complex *phi = new Complex [nx*ny];
insist(phi);
const double xl = 54.248;
const double dx = xl/(nx-1);
const double yl = 34.896;
const double dy = yl/(ny-1);
cout << " dx: " << dx << " dy: " << dy << endl;
cout << " " << -0.5*xl << " " << -0.5*xl+(nx-1)*dx << endl;
int k = 0;
double y = -0.5*yl;
for(int j = 0; j < ny; j++) {
const Complex phiy = Complex(pi_1_4*sqrt(2.0)*y*exp(-0.5*y*y), 0.0);
double x = -0.5*xl;
for(int i = 0; i < nx; i++) {
const Complex phix = Complex(pi_1_4*exp(-0.5*x*x), 0.0);
phi[k] = phix*phiy;
x += dx;
k++;
}
y += dy;
}
Complex dot = Complex(0.0, 0.0);
for(int i = 0; i < nx*ny; i++) { dot += phi[i]*conj(phi[i]); }
cout << dot*dx*dy << endl;
// CPU version test
FFTWInterface fftw((double *) phi, nx, ny, FFTW_ESTIMATE, 1);
fftw.forward_transform();
double ke = 0.0;
calculate_kinetic_energy(ke, phi, xl, nx, dx, yl, ny, dy);
fftw.backward_transform();
for(int i = 0; i < nx*ny; i++) { phi[i] /= nx*ny; }
cout << " CPU kinetic energy: " << ke << endl;
Complex *phi_dev = 0;
checkCudaErrors(hipMalloc((void **) &phi_dev, nx*ny*m*sizeof(Complex)));
for(int j = 0; j < m; j++)
checkCudaErrors(hipMemcpy(phi_dev+j*nx*ny, phi, nx*ny*sizeof(Complex), hipMemcpyHostToDevice));
Complex *work_dev = 0;
checkCudaErrors(hipMalloc(&work_dev, nx*ny*sizeof(Complex)));
StopWatchInterface *timer = 0;
sdkCreateTimer(&timer);
/* CUFFT performs FFTs in row-major or C order.
For example, if the user requests a 3D transform plan for sizes X, Y, and Z,
CUFFT transforms along Z, Y, and then X.
The user can configure column-major FFTs by simply changing the order of size parameters
to the plan creation API functions.
*/
hipfftHandle cufft_plan;
int dim[] = { ny, nx };
insist(hipfftPlanMany(&cufft_plan, 2, dim, NULL, 1, 0, NULL, 1, 0, HIPFFT_Z2Z, m) == HIPFFT_SUCCESS);
hipblasHandle_t cublas_handle;
insist(hipblasCreate(&cublas_handle) == HIPBLAS_STATUS_SUCCESS);
for(int i = 0; i < 1000; i++) {
cout << "\n Loop " << i << endl;
sdkResetTimer(&timer); sdkStartTimer(&timer);
dot.zero();
for(int k = 0; k < m; k++) {
hipDoubleComplex *phi_dev_ = (hipfftDoubleComplex *) phi_dev + k*nx*ny;
Complex dot_(0.0, 0.0);
assert(hipblasZdotc(cublas_handle, nx*ny, phi_dev_, 1, phi_dev_, 1, (hipfftDoubleComplex *) &dot_)
== HIPBLAS_STATUS_SUCCESS);
dot += dot_;
}
cout << " Norm before FFT: " << dot.real()*(dx*dy)/m << endl;
checkCudaErrors(hipfftExecZ2Z(cufft_plan, (hipfftDoubleComplex *) phi_dev, (hipfftDoubleComplex *) phi_dev, HIPFFT_FORWARD));
const int n_threads = 512;
const int n_blocks = number_of_blocks(n_threads, nx*ny);
hipDoubleComplex *phi_tmp_dev = (hipDoubleComplex *) work_dev;
double kinetic_energy = 0.0;
double ke_cpu = 0.0;
for(int k = 0; k < m; k++) {
const hipDoubleComplex *phi_dev_ = (hipDoubleComplex *) phi_dev + k*nx*ny;
hipLaunchKernelGGL(( _psi_times_kinitic_energy_), dim3(n_blocks), dim3(n_threads), (nx+ny)*sizeof(double), 0,
(Complex *) phi_tmp_dev, (const Complex *) phi_dev_, xl, nx, dx, yl, ny, dy);
checkCudaErrors(hipDeviceSynchronize());
Complex dot(0.0, 0.0);
insist(hipblasZdotc(cublas_handle, nx*ny, phi_dev_, 1, phi_tmp_dev, 1, (hipDoubleComplex *) &dot)
== HIPBLAS_STATUS_SUCCESS);
kinetic_energy += dot.real();
memset(phi, 0, nx*ny*sizeof(Complex));
double ke = 0.0;
checkCudaErrors(hipMemcpy(phi, phi_dev, nx*ny*sizeof(Complex), hipMemcpyDeviceToHost));
calculate_kinetic_energy(ke, phi, xl, nx, dx, yl, ny, dy);
ke_cpu += ke;
}
cout << " Kinetic energy GPU: " << kinetic_energy*dx*dy/(nx*ny*m) << endl;
cout << " Kinetic energy CPU: " << ke_cpu/m << endl;
dot.zero();
for(int k = 0; k < m; k++) {
hipDoubleComplex *phi_dev_ = (hipfftDoubleComplex *) phi_dev + k*nx*ny;
Complex dot_(0.0, 0.0);
assert(hipblasZdotc(cublas_handle, nx*ny, phi_dev_, 1, phi_dev_, 1, (hipfftDoubleComplex *) &dot_)
== HIPBLAS_STATUS_SUCCESS);
dot += dot_;
}
cout << " Norm after forward FFT: " << dot.real()*(dx*dy)/(m*nx*ny) << endl;
checkCudaErrors(hipfftExecZ2Z(cufft_plan, (hipfftDoubleComplex *) phi_dev, (hipfftDoubleComplex *) phi_dev, HIPFFT_BACKWARD));
const double s = 1.0/(nx*ny);
insist(hipblasZdscal(cublas_handle, nx*ny*m, &s, (hipDoubleComplex *) phi_dev, 1) == HIPBLAS_STATUS_SUCCESS);
dot.zero();
for(int k = 0; k < m; k++) {
hipDoubleComplex *phiDev_ = (hipfftDoubleComplex *) phi_dev + k*nx*ny;
Complex dot_(0.0, 0.0);
assert(hipblasZdotc(cublas_handle, nx*ny, phiDev_, 1, phiDev_, 1, (hipfftDoubleComplex *) &dot_)
== HIPBLAS_STATUS_SUCCESS);
dot += dot_;
}
cout << " Norm after backward FFT: " << dot.real()*(dx*dy)/m << endl;
sdkStopTimer(&timer);
double reduceTime = sdkGetAverageTimerValue(&timer);
cout << " GPU time: " << reduceTime*1e-3 << endl;
}
if(timer) sdkDeleteTimer(&timer);
insist(hipblasDestroy(cublas_handle) == HIPBLAS_STATUS_SUCCESS);
if(phi_dev) { checkCudaErrors(hipFree(phi_dev)); phi_dev = 0; }
if(work_dev) { checkCudaErrors(hipFree(work_dev)); work_dev = 0; }
if(phi) { delete [] phi; phi = 0; }
}
/***
clc
clear all
format long
xL = 54.248
n = 1024;
x = linspace(-xL/2, xL/2, n);
dx = x(2) - x(1)
V = 1/2*x.*x;
f = 1/pi^(1/4)*exp(-1/2*x.^2);
sum(conj(f).*f)*dx
sum(conj(f).*V.*f)*dx
f = fft(f);
sum(conj(f).*f)/n*dx
L = xL + dx
N = n;
k = (2*pi/L)*[0:N/2 (-N/2+1):(-1)];
sum(conj(f).*k.^2.*f)/n*dx/2
=== Output ===
xL =
54.247999999999998
dx =
0.053028347996090
ans =
1.000000000000001
ans =
0.250000000000000
ans =
1.000000000000002
L =
54.301028347996088
ans =
0.250000000000000
***/
| c21735a95052273b5b07c19e85b13946f92b8174.cu |
#include <iostream>
using namespace std;
#include <cassert>
#include <iomanip>
#include <cuda_runtime.h>
#include <cufft.h>
#include <helper_functions.h>
#include <helper_cuda.h>
#include <cublas_v2.h>
#include "complex.h"
#include "matutils.h"
#include "mat.h"
#include "fftwinterface.h"
#include "cumath.h"
inline int number_of_blocks(const int n_threads, const int n)
{ return n/n_threads*n_threads == n ? n/n_threads : n/n_threads+1; }
static __global__ void _psi_times_kinitic_energy_(Complex *psiOut, const Complex *psiIn,
const double xl, const int nx, const double dx,
const double yl, const int ny, const double dy)
{
extern __shared__ double s_data[];
double *Tx = (double *) s_data;
double *Ty = (double *) &Tx[nx];
cumath::setup_kinetic_energy_for_fft(Tx, nx, xl+dx, 1.0);
cumath::setup_kinetic_energy_for_fft(Ty, ny, yl+dy, 1.0);
__syncthreads();
const int index = threadIdx.x + blockDim.x*blockIdx.x;
if(index < nx*ny) {
int i = -1; int j = -1;
cumath::index_2_ij(index, nx, ny, i, j);
psiOut[index] = psiIn[index]*(Tx[i] + Ty[j]);
}
}
void setup_kinetic_energy_for_fft(double *kin, const int n, const double xl, const double mass)
{
insist(n/2*2 == n);
const double two_pi_xl = 2*Pi/xl;
for(int i = 0; i < n; i++) {
if(i < n/2) {
kin[i] = cumath::sq(two_pi_xl*i)/(mass+mass);
} else if(i >= n/2) {
kin[i] = cumath::sq(two_pi_xl*(-n+i))/(mass+mass);
}
}
}
void calculate_kinetic_energy(double &ke,
const Complex *phi,
const double xl, const int nx, const double dx,
const double yl, const int ny, const double dy)
{
double *kx = new double [nx];
insist(kx);
setup_kinetic_energy_for_fft(kx, nx, xl+dx, 1.0);
double *ky = new double [ny];
insist(ky);
setup_kinetic_energy_for_fft(ky, ny, yl+dy, 1.0);
Complex s(0.0, 0.0);
for(int k = 0; k < nx*ny; k++) {
int i = -1; int j = -1;
cumath::index_2_ij(k, nx, ny, i, j);
s += abs2(phi[k])*(kx[i] + ky[j]);
}
ke = s.real()*dx*dy/nx/ny;
if(kx) { delete [] kx; kx = 0; }
if(ky) { delete [] ky; ky = 0; }
}
void cuda_test()
{
cout << "Harmonic oscillator test" << endl;
cout << " sizeof(Complex) = " << sizeof(Complex) << endl;
const double pi = Pi;
const double pi_1_4 = pow(pi, -0.25);
const int m = 160;
const int nx = 1024;
const int ny = 2048;
Complex *phi = new Complex [nx*ny];
insist(phi);
const double xl = 54.248;
const double dx = xl/(nx-1);
const double yl = 34.896;
const double dy = yl/(ny-1);
cout << " dx: " << dx << " dy: " << dy << endl;
cout << " " << -0.5*xl << " " << -0.5*xl+(nx-1)*dx << endl;
int k = 0;
double y = -0.5*yl;
for(int j = 0; j < ny; j++) {
const Complex phiy = Complex(pi_1_4*sqrt(2.0)*y*exp(-0.5*y*y), 0.0);
double x = -0.5*xl;
for(int i = 0; i < nx; i++) {
const Complex phix = Complex(pi_1_4*exp(-0.5*x*x), 0.0);
phi[k] = phix*phiy;
x += dx;
k++;
}
y += dy;
}
Complex dot = Complex(0.0, 0.0);
for(int i = 0; i < nx*ny; i++) { dot += phi[i]*conj(phi[i]); }
cout << dot*dx*dy << endl;
// CPU version test
FFTWInterface fftw((double *) phi, nx, ny, FFTW_ESTIMATE, 1);
fftw.forward_transform();
double ke = 0.0;
calculate_kinetic_energy(ke, phi, xl, nx, dx, yl, ny, dy);
fftw.backward_transform();
for(int i = 0; i < nx*ny; i++) { phi[i] /= nx*ny; }
cout << " CPU kinetic energy: " << ke << endl;
Complex *phi_dev = 0;
checkCudaErrors(cudaMalloc((void **) &phi_dev, nx*ny*m*sizeof(Complex)));
for(int j = 0; j < m; j++)
checkCudaErrors(cudaMemcpy(phi_dev+j*nx*ny, phi, nx*ny*sizeof(Complex), cudaMemcpyHostToDevice));
Complex *work_dev = 0;
checkCudaErrors(cudaMalloc(&work_dev, nx*ny*sizeof(Complex)));
StopWatchInterface *timer = 0;
sdkCreateTimer(&timer);
/* CUFFT performs FFTs in row-major or C order.
For example, if the user requests a 3D transform plan for sizes X, Y, and Z,
CUFFT transforms along Z, Y, and then X.
The user can configure column-major FFTs by simply changing the order of size parameters
to the plan creation API functions.
*/
cufftHandle cufft_plan;
int dim[] = { ny, nx };
insist(cufftPlanMany(&cufft_plan, 2, dim, NULL, 1, 0, NULL, 1, 0, CUFFT_Z2Z, m) == CUFFT_SUCCESS);
cublasHandle_t cublas_handle;
insist(cublasCreate(&cublas_handle) == CUBLAS_STATUS_SUCCESS);
for(int i = 0; i < 1000; i++) {
cout << "\n Loop " << i << endl;
sdkResetTimer(&timer); sdkStartTimer(&timer);
dot.zero();
for(int k = 0; k < m; k++) {
cuDoubleComplex *phi_dev_ = (cufftDoubleComplex *) phi_dev + k*nx*ny;
Complex dot_(0.0, 0.0);
assert(cublasZdotc(cublas_handle, nx*ny, phi_dev_, 1, phi_dev_, 1, (cufftDoubleComplex *) &dot_)
== CUBLAS_STATUS_SUCCESS);
dot += dot_;
}
cout << " Norm before FFT: " << dot.real()*(dx*dy)/m << endl;
checkCudaErrors(cufftExecZ2Z(cufft_plan, (cufftDoubleComplex *) phi_dev, (cufftDoubleComplex *) phi_dev, CUFFT_FORWARD));
const int n_threads = 512;
const int n_blocks = number_of_blocks(n_threads, nx*ny);
cuDoubleComplex *phi_tmp_dev = (cuDoubleComplex *) work_dev;
double kinetic_energy = 0.0;
double ke_cpu = 0.0;
for(int k = 0; k < m; k++) {
const cuDoubleComplex *phi_dev_ = (cuDoubleComplex *) phi_dev + k*nx*ny;
_psi_times_kinitic_energy_<<<n_blocks, n_threads, (nx+ny)*sizeof(double)>>>
((Complex *) phi_tmp_dev, (const Complex *) phi_dev_, xl, nx, dx, yl, ny, dy);
checkCudaErrors(cudaDeviceSynchronize());
Complex dot(0.0, 0.0);
insist(cublasZdotc(cublas_handle, nx*ny, phi_dev_, 1, phi_tmp_dev, 1, (cuDoubleComplex *) &dot)
== CUBLAS_STATUS_SUCCESS);
kinetic_energy += dot.real();
memset(phi, 0, nx*ny*sizeof(Complex));
double ke = 0.0;
checkCudaErrors(cudaMemcpy(phi, phi_dev, nx*ny*sizeof(Complex), cudaMemcpyDeviceToHost));
calculate_kinetic_energy(ke, phi, xl, nx, dx, yl, ny, dy);
ke_cpu += ke;
}
cout << " Kinetic energy GPU: " << kinetic_energy*dx*dy/(nx*ny*m) << endl;
cout << " Kinetic energy CPU: " << ke_cpu/m << endl;
dot.zero();
for(int k = 0; k < m; k++) {
cuDoubleComplex *phi_dev_ = (cufftDoubleComplex *) phi_dev + k*nx*ny;
Complex dot_(0.0, 0.0);
assert(cublasZdotc(cublas_handle, nx*ny, phi_dev_, 1, phi_dev_, 1, (cufftDoubleComplex *) &dot_)
== CUBLAS_STATUS_SUCCESS);
dot += dot_;
}
cout << " Norm after forward FFT: " << dot.real()*(dx*dy)/(m*nx*ny) << endl;
checkCudaErrors(cufftExecZ2Z(cufft_plan, (cufftDoubleComplex *) phi_dev, (cufftDoubleComplex *) phi_dev, CUFFT_INVERSE));
const double s = 1.0/(nx*ny);
insist(cublasZdscal(cublas_handle, nx*ny*m, &s, (cuDoubleComplex *) phi_dev, 1) == CUBLAS_STATUS_SUCCESS);
dot.zero();
for(int k = 0; k < m; k++) {
cuDoubleComplex *phiDev_ = (cufftDoubleComplex *) phi_dev + k*nx*ny;
Complex dot_(0.0, 0.0);
assert(cublasZdotc(cublas_handle, nx*ny, phiDev_, 1, phiDev_, 1, (cufftDoubleComplex *) &dot_)
== CUBLAS_STATUS_SUCCESS);
dot += dot_;
}
cout << " Norm after backward FFT: " << dot.real()*(dx*dy)/m << endl;
sdkStopTimer(&timer);
double reduceTime = sdkGetAverageTimerValue(&timer);
cout << " GPU time: " << reduceTime*1e-3 << endl;
}
if(timer) sdkDeleteTimer(&timer);
insist(cublasDestroy(cublas_handle) == CUBLAS_STATUS_SUCCESS);
if(phi_dev) { checkCudaErrors(cudaFree(phi_dev)); phi_dev = 0; }
if(work_dev) { checkCudaErrors(cudaFree(work_dev)); work_dev = 0; }
if(phi) { delete [] phi; phi = 0; }
}
/***
clc
clear all
format long
xL = 54.248
n = 1024;
x = linspace(-xL/2, xL/2, n);
dx = x(2) - x(1)
V = 1/2*x.*x;
f = 1/pi^(1/4)*exp(-1/2*x.^2);
sum(conj(f).*f)*dx
sum(conj(f).*V.*f)*dx
f = fft(f);
sum(conj(f).*f)/n*dx
L = xL + dx
N = n;
k = (2*pi/L)*[0:N/2 (-N/2+1):(-1)];
sum(conj(f).*k.^2.*f)/n*dx/2
=== Output ===
xL =
54.247999999999998
dx =
0.053028347996090
ans =
1.000000000000001
ans =
0.250000000000000
ans =
1.000000000000002
L =
54.301028347996088
ans =
0.250000000000000
***/
|
75d6d91187e364aac137e19cd25644f6d93d7a9c.hip | // !!! This is a file automatically generated by hipify!!!
#include "cublas_wrappers.h"
#ifdef __HIP_PLATFORM_HCC__
int cublas_gemm_ex(rocblas_handle handle,
rocblas_operation transa,
rocblas_operation transb,
int m,
int n,
int k,
const float* alpha,
const float* beta,
const float* A,
const float* B,
float* C,
rocblas_gemm_algo algo)
#else
int cublas_gemm_ex(hipblasHandle_t handle,
hipblasOperation_t transa,
hipblasOperation_t transb,
int m,
int n,
int k,
const float* alpha,
const float* beta,
const float* A,
const float* B,
float* C,
hipblasGemmAlgo_t algo)
#endif
{
#ifdef __HIP_PLATFORM_HCC__
rocblas_status status = rocblas_gemm_ex(handle,
transa,
transb,
m,
n,
k,
(const void*)alpha,
(const void*)A,
rocblas_datatype_f32_r,
(transa == rocblas_operation_none) ? m : k,
(const void*)B,
rocblas_datatype_f32_r,
(transb == rocblas_operation_none) ? k : n,
(const void*)beta,
C,
rocblas_datatype_f32_r,
m,
C,
rocblas_datatype_f32_r,
m,
rocblas_datatype_f32_r,
algo,
0,
0);
#else
hipblasStatus_t status = hipblasGemmEx(handle,
transa,
transb,
m,
n,
k,
(const void*)alpha,
(const void*)A,
HIP_R_32F,
(transa == HIPBLAS_OP_N) ? m : k,
(const void*)B,
HIP_R_32F,
(transb == HIPBLAS_OP_N) ? k : n,
(const void*)beta,
C,
HIP_R_32F,
m,
HIP_R_32F,
algo);
#endif
#ifdef __HIP_PLATFORM_HCC__
if (status != rocblas_status_success) {
#else
if (status != HIPBLAS_STATUS_SUCCESS) {
#endif
fprintf(stderr,
"!!!! kernel execution error. (m: %d, n: %d, k: %d, error: %d) \n",
m,
n,
k,
(int)status);
return EXIT_FAILURE;
}
return 0;
}
#ifdef __HIP_PLATFORM_HCC__
int cublas_gemm_ex(rocblas_handle handle,
rocblas_operation transa,
rocblas_operation transb,
int m,
int n,
int k,
const float* alpha,
const float* beta,
const __half* A,
const __half* B,
__half* C,
rocblas_gemm_algo algo)
#else
int cublas_gemm_ex(hipblasHandle_t handle,
hipblasOperation_t transa,
hipblasOperation_t transb,
int m,
int n,
int k,
const float* alpha,
const float* beta,
const __half* A,
const __half* B,
__half* C,
hipblasGemmAlgo_t algo)
#endif
{
#ifdef __HIP_PLATFORM_HCC__
rocblas_status status = rocblas_gemm_ex(handle,
transa,
transb,
m,
n,
k,
(const void*)alpha,
(const void*)A,
rocblas_datatype_f16_r,
(transa == rocblas_operation_none) ? m : k,
(const void*)B,
rocblas_datatype_f16_r,
(transb == rocblas_operation_none) ? k : n,
(const void*)beta,
(void*)C,
rocblas_datatype_f16_r,
m,
(void*)C,
rocblas_datatype_f16_r,
m,
rocblas_datatype_f32_r,
algo,
0,
0);
#else
hipblasStatus_t status = hipblasGemmEx(handle,
transa,
transb,
m,
n,
k,
(const void*)alpha,
(const void*)A,
HIP_R_16F,
(transa == HIPBLAS_OP_N) ? m : k,
(const void*)B,
HIP_R_16F,
(transb == HIPBLAS_OP_N) ? k : n,
(const void*)beta,
(void*)C,
HIP_R_16F,
m,
HIP_R_32F,
algo);
#endif
#ifdef __HIP_PLATFORM_HCC__
if (status != rocblas_status_success) {
#else
if (status != HIPBLAS_STATUS_SUCCESS) {
#endif
fprintf(stderr,
"!!!! kernel execution error. (m: %d, n: %d, k: %d, error: %d) \n",
m,
n,
k,
(int)status);
return EXIT_FAILURE;
}
return 0;
}
#ifdef __HIP_PLATFORM_HCC__
int cublas_strided_batched_gemm(rocblas_handle handle,
int m,
int n,
int k,
const float* alpha,
const float* beta,
const float* A,
const float* B,
float* C,
rocblas_operation op_A,
rocblas_operation op_B,
int stride_A,
int stride_B,
int stride_C,
int batch,
rocblas_gemm_algo algo)
#else
int cublas_strided_batched_gemm(hipblasHandle_t handle,
int m,
int n,
int k,
const float* alpha,
const float* beta,
const float* A,
const float* B,
float* C,
hipblasOperation_t op_A,
hipblasOperation_t op_B,
int stride_A,
int stride_B,
int stride_C,
int batch,
hipblasGemmAlgo_t algo)
#endif
{
#ifdef __HIP_PLATFORM_HCC__
rocblas_status status =
rocblas_gemm_strided_batched_ex(handle,
op_A,
op_B,
m,
n,
k,
alpha,
A,
rocblas_datatype_f32_r,
(op_A == rocblas_operation_none) ? m : k,
stride_A,
B,
rocblas_datatype_f32_r,
(op_B == rocblas_operation_none) ? k : n,
stride_B,
beta,
C,
rocblas_datatype_f32_r,
m,
stride_C,
C,
rocblas_datatype_f32_r,
m,
stride_C,
batch,
rocblas_datatype_f32_r,
algo,
0,
0);
#else
hipblasStatus_t status = hipblasGemmStridedBatchedEx(handle,
op_A,
op_B,
m,
n,
k,
alpha,
A,
HIP_R_32F,
(op_A == HIPBLAS_OP_N) ? m : k,
stride_A,
B,
HIP_R_32F,
(op_B == HIPBLAS_OP_N) ? k : n,
stride_B,
beta,
C,
HIP_R_32F,
m,
stride_C,
batch,
HIP_R_32F,
algo);
#endif
#ifdef __HIP_PLATFORM_HCC__
if (status != rocblas_status_success) {
#else
if (status != HIPBLAS_STATUS_SUCCESS) {
#endif
fprintf(stderr,
"!!!! kernel execution error. (batch: %d, m: %d, n: %d, k: %d, error: %d) \n",
batch,
m,
n,
k,
(int)status);
return EXIT_FAILURE;
}
return 0;
}
#ifdef __HIP_PLATFORM_HCC__
int cublas_strided_batched_gemm(rocblas_handle handle,
int m,
int n,
int k,
const float* alpha,
const float* beta,
const __half* A,
const __half* B,
__half* C,
rocblas_operation op_A,
rocblas_operation op_B,
int stride_A,
int stride_B,
int stride_C,
int batch,
rocblas_gemm_algo algo)
#else
int cublas_strided_batched_gemm(hipblasHandle_t handle,
int m,
int n,
int k,
const float* alpha,
const float* beta,
const __half* A,
const __half* B,
__half* C,
hipblasOperation_t op_A,
hipblasOperation_t op_B,
int stride_A,
int stride_B,
int stride_C,
int batch,
hipblasGemmAlgo_t algo)
#endif
{
#ifdef __HIP_PLATFORM_HCC__
rocblas_status status =
rocblas_gemm_strided_batched_ex(handle,
op_A,
op_B,
m,
n,
k,
alpha,
A,
rocblas_datatype_f16_r,
(op_A == rocblas_operation_none) ? m : k,
stride_A,
B,
rocblas_datatype_f16_r,
(op_B == rocblas_operation_none) ? k : n,
stride_B,
beta,
C,
rocblas_datatype_f16_r,
m,
stride_C,
C,
rocblas_datatype_f16_r,
m,
stride_C,
batch,
rocblas_datatype_f32_r,
algo,
0,
0);
#else
hipblasStatus_t status = hipblasGemmStridedBatchedEx(handle,
op_A,
op_B,
m,
n,
k,
alpha,
A,
HIP_R_16F,
(op_A == HIPBLAS_OP_N) ? m : k,
stride_A,
B,
HIP_R_16F,
(op_B == HIPBLAS_OP_N) ? k : n,
stride_B,
beta,
C,
HIP_R_16F,
m,
stride_C,
batch,
HIP_R_32F,
algo);
#endif
#ifdef __HIP_PLATFORM_HCC__
if (status != rocblas_status_success) {
#else
if (status != HIPBLAS_STATUS_SUCCESS) {
#endif
fprintf(stderr,
"!!!! kernel execution error. (m: %d, n: %d, k: %d, error: %d) \n",
m,
n,
k,
(int)status);
return EXIT_FAILURE;
}
return 0;
}
| 75d6d91187e364aac137e19cd25644f6d93d7a9c.cu | #include "cublas_wrappers.h"
#ifdef __HIP_PLATFORM_HCC__
int cublas_gemm_ex(rocblas_handle handle,
rocblas_operation transa,
rocblas_operation transb,
int m,
int n,
int k,
const float* alpha,
const float* beta,
const float* A,
const float* B,
float* C,
rocblas_gemm_algo algo)
#else
int cublas_gemm_ex(cublasHandle_t handle,
cublasOperation_t transa,
cublasOperation_t transb,
int m,
int n,
int k,
const float* alpha,
const float* beta,
const float* A,
const float* B,
float* C,
cublasGemmAlgo_t algo)
#endif
{
#ifdef __HIP_PLATFORM_HCC__
rocblas_status status = rocblas_gemm_ex(handle,
transa,
transb,
m,
n,
k,
(const void*)alpha,
(const void*)A,
rocblas_datatype_f32_r,
(transa == rocblas_operation_none) ? m : k,
(const void*)B,
rocblas_datatype_f32_r,
(transb == rocblas_operation_none) ? k : n,
(const void*)beta,
C,
rocblas_datatype_f32_r,
m,
C,
rocblas_datatype_f32_r,
m,
rocblas_datatype_f32_r,
algo,
0,
0);
#else
cublasStatus_t status = cublasGemmEx(handle,
transa,
transb,
m,
n,
k,
(const void*)alpha,
(const void*)A,
CUDA_R_32F,
(transa == CUBLAS_OP_N) ? m : k,
(const void*)B,
CUDA_R_32F,
(transb == CUBLAS_OP_N) ? k : n,
(const void*)beta,
C,
CUDA_R_32F,
m,
CUDA_R_32F,
algo);
#endif
#ifdef __HIP_PLATFORM_HCC__
if (status != rocblas_status_success) {
#else
if (status != CUBLAS_STATUS_SUCCESS) {
#endif
fprintf(stderr,
"!!!! kernel execution error. (m: %d, n: %d, k: %d, error: %d) \n",
m,
n,
k,
(int)status);
return EXIT_FAILURE;
}
return 0;
}
#ifdef __HIP_PLATFORM_HCC__
int cublas_gemm_ex(rocblas_handle handle,
rocblas_operation transa,
rocblas_operation transb,
int m,
int n,
int k,
const float* alpha,
const float* beta,
const __half* A,
const __half* B,
__half* C,
rocblas_gemm_algo algo)
#else
int cublas_gemm_ex(cublasHandle_t handle,
cublasOperation_t transa,
cublasOperation_t transb,
int m,
int n,
int k,
const float* alpha,
const float* beta,
const __half* A,
const __half* B,
__half* C,
cublasGemmAlgo_t algo)
#endif
{
#ifdef __HIP_PLATFORM_HCC__
rocblas_status status = rocblas_gemm_ex(handle,
transa,
transb,
m,
n,
k,
(const void*)alpha,
(const void*)A,
rocblas_datatype_f16_r,
(transa == rocblas_operation_none) ? m : k,
(const void*)B,
rocblas_datatype_f16_r,
(transb == rocblas_operation_none) ? k : n,
(const void*)beta,
(void*)C,
rocblas_datatype_f16_r,
m,
(void*)C,
rocblas_datatype_f16_r,
m,
rocblas_datatype_f32_r,
algo,
0,
0);
#else
cublasStatus_t status = cublasGemmEx(handle,
transa,
transb,
m,
n,
k,
(const void*)alpha,
(const void*)A,
CUDA_R_16F,
(transa == CUBLAS_OP_N) ? m : k,
(const void*)B,
CUDA_R_16F,
(transb == CUBLAS_OP_N) ? k : n,
(const void*)beta,
(void*)C,
CUDA_R_16F,
m,
CUDA_R_32F,
algo);
#endif
#ifdef __HIP_PLATFORM_HCC__
if (status != rocblas_status_success) {
#else
if (status != CUBLAS_STATUS_SUCCESS) {
#endif
fprintf(stderr,
"!!!! kernel execution error. (m: %d, n: %d, k: %d, error: %d) \n",
m,
n,
k,
(int)status);
return EXIT_FAILURE;
}
return 0;
}
#ifdef __HIP_PLATFORM_HCC__
int cublas_strided_batched_gemm(rocblas_handle handle,
int m,
int n,
int k,
const float* alpha,
const float* beta,
const float* A,
const float* B,
float* C,
rocblas_operation op_A,
rocblas_operation op_B,
int stride_A,
int stride_B,
int stride_C,
int batch,
rocblas_gemm_algo algo)
#else
int cublas_strided_batched_gemm(cublasHandle_t handle,
int m,
int n,
int k,
const float* alpha,
const float* beta,
const float* A,
const float* B,
float* C,
cublasOperation_t op_A,
cublasOperation_t op_B,
int stride_A,
int stride_B,
int stride_C,
int batch,
cublasGemmAlgo_t algo)
#endif
{
#ifdef __HIP_PLATFORM_HCC__
rocblas_status status =
rocblas_gemm_strided_batched_ex(handle,
op_A,
op_B,
m,
n,
k,
alpha,
A,
rocblas_datatype_f32_r,
(op_A == rocblas_operation_none) ? m : k,
stride_A,
B,
rocblas_datatype_f32_r,
(op_B == rocblas_operation_none) ? k : n,
stride_B,
beta,
C,
rocblas_datatype_f32_r,
m,
stride_C,
C,
rocblas_datatype_f32_r,
m,
stride_C,
batch,
rocblas_datatype_f32_r,
algo,
0,
0);
#else
cublasStatus_t status = cublasGemmStridedBatchedEx(handle,
op_A,
op_B,
m,
n,
k,
alpha,
A,
CUDA_R_32F,
(op_A == CUBLAS_OP_N) ? m : k,
stride_A,
B,
CUDA_R_32F,
(op_B == CUBLAS_OP_N) ? k : n,
stride_B,
beta,
C,
CUDA_R_32F,
m,
stride_C,
batch,
CUDA_R_32F,
algo);
#endif
#ifdef __HIP_PLATFORM_HCC__
if (status != rocblas_status_success) {
#else
if (status != CUBLAS_STATUS_SUCCESS) {
#endif
fprintf(stderr,
"!!!! kernel execution error. (batch: %d, m: %d, n: %d, k: %d, error: %d) \n",
batch,
m,
n,
k,
(int)status);
return EXIT_FAILURE;
}
return 0;
}
#ifdef __HIP_PLATFORM_HCC__
int cublas_strided_batched_gemm(rocblas_handle handle,
int m,
int n,
int k,
const float* alpha,
const float* beta,
const __half* A,
const __half* B,
__half* C,
rocblas_operation op_A,
rocblas_operation op_B,
int stride_A,
int stride_B,
int stride_C,
int batch,
rocblas_gemm_algo algo)
#else
int cublas_strided_batched_gemm(cublasHandle_t handle,
int m,
int n,
int k,
const float* alpha,
const float* beta,
const __half* A,
const __half* B,
__half* C,
cublasOperation_t op_A,
cublasOperation_t op_B,
int stride_A,
int stride_B,
int stride_C,
int batch,
cublasGemmAlgo_t algo)
#endif
{
#ifdef __HIP_PLATFORM_HCC__
rocblas_status status =
rocblas_gemm_strided_batched_ex(handle,
op_A,
op_B,
m,
n,
k,
alpha,
A,
rocblas_datatype_f16_r,
(op_A == rocblas_operation_none) ? m : k,
stride_A,
B,
rocblas_datatype_f16_r,
(op_B == rocblas_operation_none) ? k : n,
stride_B,
beta,
C,
rocblas_datatype_f16_r,
m,
stride_C,
C,
rocblas_datatype_f16_r,
m,
stride_C,
batch,
rocblas_datatype_f32_r,
algo,
0,
0);
#else
cublasStatus_t status = cublasGemmStridedBatchedEx(handle,
op_A,
op_B,
m,
n,
k,
alpha,
A,
CUDA_R_16F,
(op_A == CUBLAS_OP_N) ? m : k,
stride_A,
B,
CUDA_R_16F,
(op_B == CUBLAS_OP_N) ? k : n,
stride_B,
beta,
C,
CUDA_R_16F,
m,
stride_C,
batch,
CUDA_R_32F,
algo);
#endif
#ifdef __HIP_PLATFORM_HCC__
if (status != rocblas_status_success) {
#else
if (status != CUBLAS_STATUS_SUCCESS) {
#endif
fprintf(stderr,
"!!!! kernel execution error. (m: %d, n: %d, k: %d, error: %d) \n",
m,
n,
k,
(int)status);
return EXIT_FAILURE;
}
return 0;
}
|
a56b67f937b6bb979442a035f00bed3b3dea1ab2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* calcInteractionWHA.cu
*
* Created on: 13-04-2013
* Author: Kamil Szewc ([email protected])
*/
#include <math.h>
#include "../../sph.h"
#include "../../hlp.h"
#include "../../methods/kernels.cuh"
#include "../../methods/interactions.cuh"
__device__ static real2 interaction(uint i, uint j, real2 dpos, real2 dvel, Particle *p, Parameters *par)
{
real r = sqrt(pow2(dpos.x) + pow2(dpos.y));
real q = r * par->I_H;
if (q < 2.0) {
real gkx = grad_of_kern(dpos.x, q, par->I_H);
real gky = grad_of_kern(dpos.y, q, par->I_H);
real pres = (p[i].p*p[i].o) + (p[j].p*p[j].o);
if ((par->T_INTERFACE_CORRECTION == 1) && (p[i].c != p[j].c)) pres += par->INTERFACE_CORRECTION * (p[i].o + p[j].o);
if ((par->T_INTERFACE_CORRECTION == 2) && (p[i].c != p[j].c))
{
pres += 0.02 * sqrt(2.0) * sqrt(pow2(p[i].str.x) + pow2(p[i].str.y) + pow2(p[i].str.z) + pow2(p[i].str.w)) * (p[i].o + p[j].o);
}
real visc = 2.0*(p[i].mi*p[j].mi)*(p[i].o + p[j].o) * (dpos.x*gkx + dpos.y*gky) / ( (r*r+0.01*par->H*par->H) * (p[i].mi + p[j].mi));
return MAKE_REAL2((visc * dvel.x - pres * gkx), (visc * dvel.y - pres * gky));
}
else {
return MAKE_REAL2(0.0, 0.0);
}
}
__global__ void calcInteractionWHA(
Particle *p,
uint *gridParticleIndex,
uint *cellStart,
uint *cellEnd,
Parameters *par)
{
uint index = threadIdx.x + blockIdx.x*blockDim.x;
if (index < par->N) {
register real2 result = MAKE_REAL2(0.0,0.0);
#include "../../methods/interactions/interactionsNegativeOnWallNoSlip.cuh"
p[index].rh_vel.x = result.x / p[index].m;
p[index].rh_vel.y = result.y / p[index].m;
}
}
| a56b67f937b6bb979442a035f00bed3b3dea1ab2.cu | /*
* calcInteractionWHA.cu
*
* Created on: 13-04-2013
* Author: Kamil Szewc ([email protected])
*/
#include <math.h>
#include "../../sph.h"
#include "../../hlp.h"
#include "../../methods/kernels.cuh"
#include "../../methods/interactions.cuh"
__device__ static real2 interaction(uint i, uint j, real2 dpos, real2 dvel, Particle *p, Parameters *par)
{
real r = sqrt(pow2(dpos.x) + pow2(dpos.y));
real q = r * par->I_H;
if (q < 2.0) {
real gkx = grad_of_kern(dpos.x, q, par->I_H);
real gky = grad_of_kern(dpos.y, q, par->I_H);
real pres = (p[i].p*p[i].o) + (p[j].p*p[j].o);
if ((par->T_INTERFACE_CORRECTION == 1) && (p[i].c != p[j].c)) pres += par->INTERFACE_CORRECTION * (p[i].o + p[j].o);
if ((par->T_INTERFACE_CORRECTION == 2) && (p[i].c != p[j].c))
{
pres += 0.02 * sqrt(2.0) * sqrt(pow2(p[i].str.x) + pow2(p[i].str.y) + pow2(p[i].str.z) + pow2(p[i].str.w)) * (p[i].o + p[j].o);
}
real visc = 2.0*(p[i].mi*p[j].mi)*(p[i].o + p[j].o) * (dpos.x*gkx + dpos.y*gky) / ( (r*r+0.01*par->H*par->H) * (p[i].mi + p[j].mi));
return MAKE_REAL2((visc * dvel.x - pres * gkx), (visc * dvel.y - pres * gky));
}
else {
return MAKE_REAL2(0.0, 0.0);
}
}
__global__ void calcInteractionWHA(
Particle *p,
uint *gridParticleIndex,
uint *cellStart,
uint *cellEnd,
Parameters *par)
{
uint index = threadIdx.x + blockIdx.x*blockDim.x;
if (index < par->N) {
register real2 result = MAKE_REAL2(0.0,0.0);
#include "../../methods/interactions/interactionsNegativeOnWallNoSlip.cuh"
p[index].rh_vel.x = result.x / p[index].m;
p[index].rh_vel.y = result.y / p[index].m;
}
}
|
4230bd9962e779cff41cca12ccb5e8480bc0130e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "caffe/common.hpp"
#include "caffe/util/interp.hpp"
#include "caffe/util/gpu_util.cuh"
namespace caffe {
// Bi-linear interpolation
// IN : [channels height1 width1] cropped from a bigger [Height1 Width1] image
// OUT: [channels height2 width2] cropped from a bigger [Height2 Width2] image
template <typename Dtype, bool packed>
__global__ void caffe_gpu_interp2_kernel(const int n, const float rheight, const float rwidth,
const int channels,
const Dtype *data1, const int x1, const int y1, const int height1, const int width1, const int Height1, const int Width1,
Dtype *data2, const int x2, const int y2, const int height2, const int width2, const int Height2, const int Width2) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index < n) {
const int w2 = index % width2; // 0:width2-1
const int h2 = index / width2; // 0:height2-1
// special case: just copy
if (height1 == height2 && width1 == width2) {
const int h1 = h2;
const int w1 = w2;
if (packed) {
const Dtype* pos1 = &data1[channels * ((y1 + h1) * Width1 + (x1 + w1))];
Dtype* pos2 = &data2[channels * ((y2 + h2) * Width2 + (x2 + w2))];
for (int c = 0; c < channels; ++c) {
pos2[0] = pos1[0];
pos1++;
pos2++;
}
}
else {
const Dtype* pos1 = &data1[(y1 + h1) * Width1 + (x1 + w1)];
Dtype* pos2 = &data2[(y2 + h2) * Width2 + (x2 + w2)];
for (int c = 0; c < channels; ++c) {
pos2[0] = pos1[0];
pos1 += Width1 * Height1;
pos2 += Width2 * Height2;
}
}
return;
}
//
const float h1r = rheight * h2;
const int h1 = h1r;
const int h1p = (h1 < height1 - 1) ? 1 : 0;
const Dtype h1lambda = h1r - h1;
const Dtype h0lambda = Dtype(1.) - h1lambda;
//
const float w1r = rwidth * w2;
const int w1 = w1r;
const int w1p = (w1 < width1 - 1) ? 1 : 0;
const Dtype w1lambda = w1r - w1;
const Dtype w0lambda = Dtype(1.) - w1lambda;
//
if (packed) {
const Dtype* pos1 = &data1[channels * ((y1 + h1) * Width1 + (x1 + w1))];
Dtype* pos2 = &data2[channels * ((y2 + h2) * Width2 + (x2 + w2))];
for (int c = 0; c < channels; ++c) {
pos2[0] =
h0lambda * (w0lambda * pos1[0] + w1lambda * pos1[channels * w1p]) +
h1lambda * (w0lambda * pos1[channels * h1p * Width1] + w1lambda * pos1[channels * (h1p * Width1 + w1p)]);
pos1++;
pos2++;
}
}
else {
const Dtype* pos1 = &data1[(y1 + h1) * Width1 + (x1 + w1)];
Dtype* pos2 = &data2[(y2 + h2) * Width2 + (x2 + w2)];
for (int c = 0; c < channels; ++c) {
pos2[0] =
h0lambda * (w0lambda * pos1[0] + w1lambda * pos1[w1p]) +
h1lambda * (w0lambda * pos1[h1p * Width1] + w1lambda * pos1[h1p * Width1 + w1p]);
pos1 += Width1 * Height1;
pos2 += Width2 * Height2;
}
}
}
}
template <typename Dtype, bool packed>
void caffe_gpu_interp2(const int channels,
const Dtype *data1, const int x1, const int y1, const int height1, const int width1, const int Height1, const int Width1,
Dtype *data2, const int x2, const int y2, const int height2, const int width2, const int Height2, const int Width2) {
CHECK(x1 >= 0 && y1 >= 0 && height1 > 0 && width1 > 0 && x2 >= 0 && y2 >= 0 && height2 > 0 && width2 > 0);
CHECK(Width1 >= width1 + x1 && Height1 >= height1 + y1 && Width2 >= width2 + x2 && Height2 >= height2 + y2);
const float rheight = (height2 > 1) ? static_cast<float>(height1 - 1) / (height2 - 1) : 0.f;
const float rwidth = (width2 > 1) ? static_cast<float>(width1 - 1) / (width2 - 1) : 0.f;
const int num_kernels = height2 * width2;
hipLaunchKernelGGL(( caffe_gpu_interp2_kernel<Dtype,packed>), dim3(CAFFE_GET_BLOCKS(num_kernels)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
num_kernels, rheight, rwidth, channels,
data1, x1, y1, height1, width1, Height1, Width1,
data2, x2, y2, height2, width2, Height2, Width2);
CUDA_POST_KERNEL_CHECK;
}
// Backward (adjoint) operation 1 <- 2 (accumulates)
template <typename Dtype, bool packed>
__global__ void caffe_gpu_interp2_kernel_backward(const int n, const float rheight, const float rwidth,
const int channels,
Dtype *data1, const int x1, const int y1, const int height1, const int width1, const int Height1, const int Width1,
const Dtype *data2, const int x2, const int y2, const int height2, const int width2, const int Height2, const int Width2) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index < n) {
const int w2 = index % width2; // 0:width2-1
const int h2 = index / width2; // 0:height2-1
// special case: just copy
if (height1 == height2 && width1 == width2) {
const int h1 = h2;
const int w1 = w2;
if (packed) {
Dtype* pos1 = &data1[channels * ((y1 + h1) * Width1 + (x1 + w1))];
const Dtype* pos2 = &data2[channels * ((y2 + h2) * Width2 + (x2 + w2))];
for (int c = 0; c < channels; ++c) {
pos1[0] += pos2[0];
pos1++;
pos2++;
}
}
else {
Dtype* pos1 = &data1[(y1 + h1) * Width1 + (x1 + w1)];
const Dtype* pos2 = &data2[(y2 + h2) * Width2 + (x2 + w2)];
for (int c = 0; c < channels; ++c) {
pos1[0] += pos2[0];
pos1 += Width1 * Height1;
pos2 += Width2 * Height2;
}
}
return;
}
//
const float h1r = rheight * h2;
const int h1 = h1r;
const int h1p = (h1 < height1 - 1) ? 1 : 0;
const Dtype h1lambda = h1r - h1;
const Dtype h0lambda = Dtype(1.) - h1lambda;
//
const float w1r = rwidth * w2;
const int w1 = w1r;
const int w1p = (w1 < width1 - 1) ? 1 : 0;
const Dtype w1lambda = w1r - w1;
const Dtype w0lambda = Dtype(1.) - w1lambda;
//
if (packed) {
Dtype* pos1 = &data1[channels * ((y1 + h1) * Width1 + (x1 + w1))];
const Dtype* pos2 = &data2[channels * ((y2 + h2) * Width2 + (x2 + w2))];
for (int c = 0; c < channels; ++c) {
caffe_gpu_atomic_add(h0lambda * w0lambda * pos2[0], &pos1[0]);
caffe_gpu_atomic_add(h0lambda * w1lambda * pos2[0], &pos1[channels * w1p]);
caffe_gpu_atomic_add(h1lambda * w0lambda * pos2[0], &pos1[channels * h1p * Width1]);
caffe_gpu_atomic_add(h1lambda * w1lambda * pos2[0], &pos1[channels * (h1p * Width1 + w1p)]);
pos1++;
pos2++;
}
}
else {
Dtype* pos1 = &data1[(y1 + h1) * Width1 + (x1 + w1)];
const Dtype* pos2 = &data2[(y2 + h2) * Width2 + (x2 + w2)];
for (int c = 0; c < channels; ++c) {
caffe_gpu_atomic_add(h0lambda * w0lambda * pos2[0], &pos1[0]);
caffe_gpu_atomic_add(h0lambda * w1lambda * pos2[0], &pos1[w1p]);
caffe_gpu_atomic_add(h1lambda * w0lambda * pos2[0], &pos1[h1p * Width1]);
caffe_gpu_atomic_add(h1lambda * w1lambda * pos2[0], &pos1[h1p * Width1 + w1p]);
pos1 += Width1 * Height1;
pos2 += Width2 * Height2;
}
}
}
}
template <typename Dtype, bool packed>
void caffe_gpu_interp2_backward(const int channels,
Dtype *data1, const int x1, const int y1, const int height1, const int width1, const int Height1, const int Width1,
const Dtype *data2, const int x2, const int y2, const int height2, const int width2, const int Height2, const int Width2) {
CHECK(x1 >= 0 && y1 >= 0 && height1 > 0 && width1 > 0 && x2 >= 0 && y2 >= 0 && height2 > 0 && width2 > 0);
CHECK(Width1 >= width1 + x1 && Height1 >= height1 + y1 && Width2 >= width2 + x2 && Height2 >= height2 + y2);
const float rheight = (height2 > 1) ? static_cast<float>(height1 - 1) / (height2 - 1) : 0.f;
const float rwidth = (width2 > 1) ? static_cast<float>(width1 - 1) / (width2 - 1) : 0.f;
const int num_kernels = height2 * width2;
hipLaunchKernelGGL(( caffe_gpu_interp2_kernel_backward<Dtype,packed>), dim3(CAFFE_GET_BLOCKS(num_kernels)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
num_kernels, rheight, rwidth, channels,
data1, x1, y1, height1, width1, Height1, Width1,
data2, x2, y2, height2, width2, Height2, Width2);
CUDA_POST_KERNEL_CHECK;
}
// Create Gaussian pyramid of an image. Assume output space is pre-allocated.
// IN : [channels height width]
template <typename Dtype, bool packed>
__global__ void caffe_gpu_pyramid2_kernel(const int n, const int channels,
const Dtype *data1, const int height1, const int width1,
Dtype *data2, const int height2, const int width2) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index < n) {
const int w2 = index % width2; // 0:width2-1
const int h2 = index / width2; // 0:height2-1
const int w1 = 2 * w2;
const int h1 = 2 * h2;
if (packed) {
const Dtype* pos1 = &data1[channels * (h1 * width1 + w1)];
Dtype* pos2 = &data2[channels * (h2 * width2 + w2)];
for (int c = 0; c < channels; ++c) {
pos2[0] = static_cast<Dtype>(.25) *
(pos1[0] + pos1[channels] +
pos1[channels * width1] + pos1[channels * (width1 + 1)]);
pos1++;
pos2++;
}
}
else {
const Dtype* pos1 = &data1[h1 * width1 + w1];
Dtype* pos2 = &data2[h2 * width2 + w2];
for (int c = 0; c < channels; ++c) {
pos2[0] = static_cast<Dtype>(.25) *
(pos1[0] + pos1[1] +
pos1[width1] + pos1[width1 + 1]);
pos1 += width1 * height1;
pos2 += width2 * height2;
}
}
}
}
template <typename Dtype, bool packed>
void caffe_gpu_pyramid2(const int channels,
const Dtype *data, const int height, const int width,
Dtype *data_pyr, const int levels) {
CHECK(height > 0 && width > 0 && levels >= 0);
int height1 = height, width1 = width;
int height2 = height, width2 = width;
const Dtype *data1 = data;
Dtype *data2 = data_pyr;
for (int l = 0; l < levels; ++l) {
height2 /= 2;
width2 /= 2;
if (height2 == 0 || width2 == 0) {
break;
}
const int num_kernels = height2 * width2;
hipLaunchKernelGGL(( caffe_gpu_pyramid2_kernel<Dtype,packed>), dim3(CAFFE_GET_BLOCKS(num_kernels)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
num_kernels, channels, data1, height1, width1, data2, height2, width2);
CUDA_POST_KERNEL_CHECK;
data1 = data2;
height1 = height2;
width1 = width2;
data2 += channels * height2 * width2;
}
}
// Explicit instances
template void caffe_gpu_interp2<float,false>(const int, const float *, const int, const int, const int, const int, const int, const int, float *, const int, const int, const int, const int, const int, const int);
template void caffe_gpu_interp2<float,true>(const int, const float *, const int, const int, const int, const int, const int, const int, float *, const int, const int, const int, const int, const int, const int);
template void caffe_gpu_interp2<double,false>(const int, const double *, const int, const int, const int, const int, const int, const int, double *, const int, const int, const int, const int, const int, const int);
template void caffe_gpu_interp2<double,true>(const int, const double *, const int, const int, const int, const int, const int, const int, double *, const int, const int, const int, const int, const int, const int);
template void caffe_gpu_interp2_backward<float,false>(const int, float *, const int, const int, const int, const int, const int, const int, const float *, const int, const int, const int, const int, const int, const int);
template void caffe_gpu_interp2_backward<double,false>(const int, double *, const int, const int, const int, const int, const int, const int, const double *, const int, const int, const int, const int, const int, const int);
template void caffe_gpu_pyramid2<float,false>(const int, const float *, const int, const int, float *, const int);
template void caffe_gpu_pyramid2<float,true>(const int, const float *, const int, const int, float *, const int);
template void caffe_gpu_pyramid2<double,false>(const int, const double *, const int, const int, double *, const int);
template void caffe_gpu_pyramid2<double,true>(const int, const double *, const int, const int, double *, const int);
} // namespace caffe
| 4230bd9962e779cff41cca12ccb5e8480bc0130e.cu | #include "caffe/common.hpp"
#include "caffe/util/interp.hpp"
#include "caffe/util/gpu_util.cuh"
namespace caffe {
// Bi-linear interpolation
// IN : [channels height1 width1] cropped from a bigger [Height1 Width1] image
// OUT: [channels height2 width2] cropped from a bigger [Height2 Width2] image
template <typename Dtype, bool packed>
__global__ void caffe_gpu_interp2_kernel(const int n, const float rheight, const float rwidth,
const int channels,
const Dtype *data1, const int x1, const int y1, const int height1, const int width1, const int Height1, const int Width1,
Dtype *data2, const int x2, const int y2, const int height2, const int width2, const int Height2, const int Width2) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index < n) {
const int w2 = index % width2; // 0:width2-1
const int h2 = index / width2; // 0:height2-1
// special case: just copy
if (height1 == height2 && width1 == width2) {
const int h1 = h2;
const int w1 = w2;
if (packed) {
const Dtype* pos1 = &data1[channels * ((y1 + h1) * Width1 + (x1 + w1))];
Dtype* pos2 = &data2[channels * ((y2 + h2) * Width2 + (x2 + w2))];
for (int c = 0; c < channels; ++c) {
pos2[0] = pos1[0];
pos1++;
pos2++;
}
}
else {
const Dtype* pos1 = &data1[(y1 + h1) * Width1 + (x1 + w1)];
Dtype* pos2 = &data2[(y2 + h2) * Width2 + (x2 + w2)];
for (int c = 0; c < channels; ++c) {
pos2[0] = pos1[0];
pos1 += Width1 * Height1;
pos2 += Width2 * Height2;
}
}
return;
}
//
const float h1r = rheight * h2;
const int h1 = h1r;
const int h1p = (h1 < height1 - 1) ? 1 : 0;
const Dtype h1lambda = h1r - h1;
const Dtype h0lambda = Dtype(1.) - h1lambda;
//
const float w1r = rwidth * w2;
const int w1 = w1r;
const int w1p = (w1 < width1 - 1) ? 1 : 0;
const Dtype w1lambda = w1r - w1;
const Dtype w0lambda = Dtype(1.) - w1lambda;
//
if (packed) {
const Dtype* pos1 = &data1[channels * ((y1 + h1) * Width1 + (x1 + w1))];
Dtype* pos2 = &data2[channels * ((y2 + h2) * Width2 + (x2 + w2))];
for (int c = 0; c < channels; ++c) {
pos2[0] =
h0lambda * (w0lambda * pos1[0] + w1lambda * pos1[channels * w1p]) +
h1lambda * (w0lambda * pos1[channels * h1p * Width1] + w1lambda * pos1[channels * (h1p * Width1 + w1p)]);
pos1++;
pos2++;
}
}
else {
const Dtype* pos1 = &data1[(y1 + h1) * Width1 + (x1 + w1)];
Dtype* pos2 = &data2[(y2 + h2) * Width2 + (x2 + w2)];
for (int c = 0; c < channels; ++c) {
pos2[0] =
h0lambda * (w0lambda * pos1[0] + w1lambda * pos1[w1p]) +
h1lambda * (w0lambda * pos1[h1p * Width1] + w1lambda * pos1[h1p * Width1 + w1p]);
pos1 += Width1 * Height1;
pos2 += Width2 * Height2;
}
}
}
}
template <typename Dtype, bool packed>
void caffe_gpu_interp2(const int channels,
const Dtype *data1, const int x1, const int y1, const int height1, const int width1, const int Height1, const int Width1,
Dtype *data2, const int x2, const int y2, const int height2, const int width2, const int Height2, const int Width2) {
CHECK(x1 >= 0 && y1 >= 0 && height1 > 0 && width1 > 0 && x2 >= 0 && y2 >= 0 && height2 > 0 && width2 > 0);
CHECK(Width1 >= width1 + x1 && Height1 >= height1 + y1 && Width2 >= width2 + x2 && Height2 >= height2 + y2);
const float rheight = (height2 > 1) ? static_cast<float>(height1 - 1) / (height2 - 1) : 0.f;
const float rwidth = (width2 > 1) ? static_cast<float>(width1 - 1) / (width2 - 1) : 0.f;
const int num_kernels = height2 * width2;
caffe_gpu_interp2_kernel<Dtype,packed><<<CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS>>>
(num_kernels, rheight, rwidth, channels,
data1, x1, y1, height1, width1, Height1, Width1,
data2, x2, y2, height2, width2, Height2, Width2);
CUDA_POST_KERNEL_CHECK;
}
// Backward (adjoint) operation 1 <- 2 (accumulates)
template <typename Dtype, bool packed>
__global__ void caffe_gpu_interp2_kernel_backward(const int n, const float rheight, const float rwidth,
const int channels,
Dtype *data1, const int x1, const int y1, const int height1, const int width1, const int Height1, const int Width1,
const Dtype *data2, const int x2, const int y2, const int height2, const int width2, const int Height2, const int Width2) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index < n) {
const int w2 = index % width2; // 0:width2-1
const int h2 = index / width2; // 0:height2-1
// special case: just copy
if (height1 == height2 && width1 == width2) {
const int h1 = h2;
const int w1 = w2;
if (packed) {
Dtype* pos1 = &data1[channels * ((y1 + h1) * Width1 + (x1 + w1))];
const Dtype* pos2 = &data2[channels * ((y2 + h2) * Width2 + (x2 + w2))];
for (int c = 0; c < channels; ++c) {
pos1[0] += pos2[0];
pos1++;
pos2++;
}
}
else {
Dtype* pos1 = &data1[(y1 + h1) * Width1 + (x1 + w1)];
const Dtype* pos2 = &data2[(y2 + h2) * Width2 + (x2 + w2)];
for (int c = 0; c < channels; ++c) {
pos1[0] += pos2[0];
pos1 += Width1 * Height1;
pos2 += Width2 * Height2;
}
}
return;
}
//
const float h1r = rheight * h2;
const int h1 = h1r;
const int h1p = (h1 < height1 - 1) ? 1 : 0;
const Dtype h1lambda = h1r - h1;
const Dtype h0lambda = Dtype(1.) - h1lambda;
//
const float w1r = rwidth * w2;
const int w1 = w1r;
const int w1p = (w1 < width1 - 1) ? 1 : 0;
const Dtype w1lambda = w1r - w1;
const Dtype w0lambda = Dtype(1.) - w1lambda;
//
if (packed) {
Dtype* pos1 = &data1[channels * ((y1 + h1) * Width1 + (x1 + w1))];
const Dtype* pos2 = &data2[channels * ((y2 + h2) * Width2 + (x2 + w2))];
for (int c = 0; c < channels; ++c) {
caffe_gpu_atomic_add(h0lambda * w0lambda * pos2[0], &pos1[0]);
caffe_gpu_atomic_add(h0lambda * w1lambda * pos2[0], &pos1[channels * w1p]);
caffe_gpu_atomic_add(h1lambda * w0lambda * pos2[0], &pos1[channels * h1p * Width1]);
caffe_gpu_atomic_add(h1lambda * w1lambda * pos2[0], &pos1[channels * (h1p * Width1 + w1p)]);
pos1++;
pos2++;
}
}
else {
Dtype* pos1 = &data1[(y1 + h1) * Width1 + (x1 + w1)];
const Dtype* pos2 = &data2[(y2 + h2) * Width2 + (x2 + w2)];
for (int c = 0; c < channels; ++c) {
caffe_gpu_atomic_add(h0lambda * w0lambda * pos2[0], &pos1[0]);
caffe_gpu_atomic_add(h0lambda * w1lambda * pos2[0], &pos1[w1p]);
caffe_gpu_atomic_add(h1lambda * w0lambda * pos2[0], &pos1[h1p * Width1]);
caffe_gpu_atomic_add(h1lambda * w1lambda * pos2[0], &pos1[h1p * Width1 + w1p]);
pos1 += Width1 * Height1;
pos2 += Width2 * Height2;
}
}
}
}
template <typename Dtype, bool packed>
void caffe_gpu_interp2_backward(const int channels,
Dtype *data1, const int x1, const int y1, const int height1, const int width1, const int Height1, const int Width1,
const Dtype *data2, const int x2, const int y2, const int height2, const int width2, const int Height2, const int Width2) {
CHECK(x1 >= 0 && y1 >= 0 && height1 > 0 && width1 > 0 && x2 >= 0 && y2 >= 0 && height2 > 0 && width2 > 0);
CHECK(Width1 >= width1 + x1 && Height1 >= height1 + y1 && Width2 >= width2 + x2 && Height2 >= height2 + y2);
const float rheight = (height2 > 1) ? static_cast<float>(height1 - 1) / (height2 - 1) : 0.f;
const float rwidth = (width2 > 1) ? static_cast<float>(width1 - 1) / (width2 - 1) : 0.f;
const int num_kernels = height2 * width2;
caffe_gpu_interp2_kernel_backward<Dtype,packed><<<CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS>>>
(num_kernels, rheight, rwidth, channels,
data1, x1, y1, height1, width1, Height1, Width1,
data2, x2, y2, height2, width2, Height2, Width2);
CUDA_POST_KERNEL_CHECK;
}
// Create Gaussian pyramid of an image. Assume output space is pre-allocated.
// IN : [channels height width]
template <typename Dtype, bool packed>
__global__ void caffe_gpu_pyramid2_kernel(const int n, const int channels,
const Dtype *data1, const int height1, const int width1,
Dtype *data2, const int height2, const int width2) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index < n) {
const int w2 = index % width2; // 0:width2-1
const int h2 = index / width2; // 0:height2-1
const int w1 = 2 * w2;
const int h1 = 2 * h2;
if (packed) {
const Dtype* pos1 = &data1[channels * (h1 * width1 + w1)];
Dtype* pos2 = &data2[channels * (h2 * width2 + w2)];
for (int c = 0; c < channels; ++c) {
pos2[0] = static_cast<Dtype>(.25) *
(pos1[0] + pos1[channels] +
pos1[channels * width1] + pos1[channels * (width1 + 1)]);
pos1++;
pos2++;
}
}
else {
const Dtype* pos1 = &data1[h1 * width1 + w1];
Dtype* pos2 = &data2[h2 * width2 + w2];
for (int c = 0; c < channels; ++c) {
pos2[0] = static_cast<Dtype>(.25) *
(pos1[0] + pos1[1] +
pos1[width1] + pos1[width1 + 1]);
pos1 += width1 * height1;
pos2 += width2 * height2;
}
}
}
}
template <typename Dtype, bool packed>
void caffe_gpu_pyramid2(const int channels,
const Dtype *data, const int height, const int width,
Dtype *data_pyr, const int levels) {
CHECK(height > 0 && width > 0 && levels >= 0);
int height1 = height, width1 = width;
int height2 = height, width2 = width;
const Dtype *data1 = data;
Dtype *data2 = data_pyr;
for (int l = 0; l < levels; ++l) {
height2 /= 2;
width2 /= 2;
if (height2 == 0 || width2 == 0) {
break;
}
const int num_kernels = height2 * width2;
caffe_gpu_pyramid2_kernel<Dtype,packed><<<CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS>>>
(num_kernels, channels, data1, height1, width1, data2, height2, width2);
CUDA_POST_KERNEL_CHECK;
data1 = data2;
height1 = height2;
width1 = width2;
data2 += channels * height2 * width2;
}
}
// Explicit instances
template void caffe_gpu_interp2<float,false>(const int, const float *, const int, const int, const int, const int, const int, const int, float *, const int, const int, const int, const int, const int, const int);
template void caffe_gpu_interp2<float,true>(const int, const float *, const int, const int, const int, const int, const int, const int, float *, const int, const int, const int, const int, const int, const int);
template void caffe_gpu_interp2<double,false>(const int, const double *, const int, const int, const int, const int, const int, const int, double *, const int, const int, const int, const int, const int, const int);
template void caffe_gpu_interp2<double,true>(const int, const double *, const int, const int, const int, const int, const int, const int, double *, const int, const int, const int, const int, const int, const int);
template void caffe_gpu_interp2_backward<float,false>(const int, float *, const int, const int, const int, const int, const int, const int, const float *, const int, const int, const int, const int, const int, const int);
template void caffe_gpu_interp2_backward<double,false>(const int, double *, const int, const int, const int, const int, const int, const int, const double *, const int, const int, const int, const int, const int, const int);
template void caffe_gpu_pyramid2<float,false>(const int, const float *, const int, const int, float *, const int);
template void caffe_gpu_pyramid2<float,true>(const int, const float *, const int, const int, float *, const int);
template void caffe_gpu_pyramid2<double,false>(const int, const double *, const int, const int, double *, const int);
template void caffe_gpu_pyramid2<double,true>(const int, const double *, const int, const int, double *, const int);
} // namespace caffe
|
b47b8df4d5d652c6890966e3317c0169eb70bccd.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <cusp/helper_cuda.h>
#include <cusp/copy.cuh>
namespace cusp {
template <typename T> __global__ void kernel_copy(const T *in, T *out, int N) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < N) {
out[i] = in[i];
}
}
template <typename T>
hipError_t copy<T>::launch(const T *in, T *out, int N, int grid_size, int block_size,
hipStream_t stream) {
if (stream) {
hipLaunchKernelGGL(( kernel_copy), dim3(grid_size), dim3(block_size), 0, stream, in, out, N);
} else {
hipLaunchKernelGGL(( kernel_copy), dim3(grid_size), dim3(block_size), 0, 0, in, out, N);
}
return hipPeekAtLastError();
}
template <typename T>
hipError_t copy<T>::launch(const std::vector<const void *>& inputs,
const std::vector<void *>& outputs, size_t nitems) {
return launch((const T*)inputs[0], (T*)outputs[0], nitems, _grid_size, _block_size, _stream);
}
template <typename T> hipError_t copy<T>::occupancy(int *minBlock, int *minGrid) {
return hipOccupancyMaxPotentialBlockSize(minGrid, minBlock, kernel_copy<T>, 0, 0);
}
#define IMPLEMENT_KERNEL(T) template class copy<T>;
IMPLEMENT_KERNEL(uint8_t)
IMPLEMENT_KERNEL(uint16_t)
IMPLEMENT_KERNEL(uint32_t)
IMPLEMENT_KERNEL(uint64_t)
} // namespace cusp
| b47b8df4d5d652c6890966e3317c0169eb70bccd.cu | #include <cuda.h>
#include <cuda_runtime.h>
#include <cusp/helper_cuda.h>
#include <cusp/copy.cuh>
namespace cusp {
template <typename T> __global__ void kernel_copy(const T *in, T *out, int N) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < N) {
out[i] = in[i];
}
}
template <typename T>
cudaError_t copy<T>::launch(const T *in, T *out, int N, int grid_size, int block_size,
cudaStream_t stream) {
if (stream) {
kernel_copy<<<grid_size, block_size, 0, stream>>>(in, out, N);
} else {
kernel_copy<<<grid_size, block_size>>>(in, out, N);
}
return cudaPeekAtLastError();
}
template <typename T>
cudaError_t copy<T>::launch(const std::vector<const void *>& inputs,
const std::vector<void *>& outputs, size_t nitems) {
return launch((const T*)inputs[0], (T*)outputs[0], nitems, _grid_size, _block_size, _stream);
}
template <typename T> cudaError_t copy<T>::occupancy(int *minBlock, int *minGrid) {
return cudaOccupancyMaxPotentialBlockSize(minGrid, minBlock, kernel_copy<T>, 0, 0);
}
#define IMPLEMENT_KERNEL(T) template class copy<T>;
IMPLEMENT_KERNEL(uint8_t)
IMPLEMENT_KERNEL(uint16_t)
IMPLEMENT_KERNEL(uint32_t)
IMPLEMENT_KERNEL(uint64_t)
} // namespace cusp
|
8e46da3cf60a2d27d1ffcd26d67c657ee26e5603.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright 1993-2006 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws.
*
* This software and the information contained herein is PROPRIETARY and
* CONFIDENTIAL to NVIDIA and is being provided under the terms and
* conditions of a Non-Disclosure Agreement. Any reproduction or
* disclosure to any third party without the express written consent of
* NVIDIA is prohibited.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
* "commercial compute r software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*/
#include "2Dconvolution.h"
#include <hip/hip_runtime.h>
__constant__ float Md[KERNEL_SIZE * KERNEL_SIZE];
float Mh[KERNEL_SIZE * KERNEL_SIZE];
bool
CopyToConstMem(float * M, size_t msize)
{
hipError_t err = hipMemcpyToSymbol(Md, M, msize);
return (err == hipSuccess);
}
// Matrix convolution kernel specification
__global__ void ConvolutionKernel(Matrix M, Matrix N, Matrix P)
{
const unsigned int dim_size = (BLOCK_SIZE + (KERNEL_SIZE-1));
__shared__ float ds_N[dim_size][dim_size];
int halosize = (KERNEL_SIZE-1) / 2;
int ii = threadIdx.y * BLOCK_SIZE + threadIdx.x;
int I = ii%(dim_size);
int J = ii/(dim_size);
int I_o = blockIdx.x * BLOCK_SIZE + (blockIdx.y * BLOCK_SIZE) * N.width;
int I_real= I + blockIdx.x * BLOCK_SIZE - halosize;
int J_real= J + blockIdx.y * BLOCK_SIZE - halosize;
int I_n = I_o + (J - halosize) * (N.width) + I - halosize;
ds_N[J][I] = (I_real < 0 || I_real >= N.width || J_real < 0 || J_real >= N.height) ? 0 : N.elements[I_n];
int ii2 = BLOCK_SIZE * BLOCK_SIZE + ii;
I = ii2%(dim_size);
J = ii2/(dim_size);
I_real= I + blockIdx.x * BLOCK_SIZE - halosize;
J_real= J + blockIdx.y * BLOCK_SIZE - halosize;
I_n = I_o + (J - halosize) * (N.width) + I - halosize;
if (ii2 < dim_size*dim_size)
{
if (I_real < 0 || I_real >= N.width || J_real < 0 || J_real >= N.height)
{
ds_N[J][I] = 0;
}
else
{
ds_N[J][I] = N.elements[I_n];
}
}
syncthreads();
if ((blockIdx.x * BLOCK_SIZE + threadIdx.x < P.width) && (blockIdx.y * BLOCK_SIZE + threadIdx.y < P.height))
{
for (unsigned int j = 0; j < KERNEL_SIZE; j++)
{
for (unsigned int i = 0; i < KERNEL_SIZE; i++)
{
P.elements[blockIdx.x * BLOCK_SIZE + threadIdx.x + (blockIdx.y * BLOCK_SIZE + threadIdx.y) * P.width] += Md[i + j * KERNEL_SIZE] * ds_N[j + threadIdx.y][i + threadIdx.x];
}
}
}
} | 8e46da3cf60a2d27d1ffcd26d67c657ee26e5603.cu | /*
* Copyright 1993-2006 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws.
*
* This software and the information contained herein is PROPRIETARY and
* CONFIDENTIAL to NVIDIA and is being provided under the terms and
* conditions of a Non-Disclosure Agreement. Any reproduction or
* disclosure to any third party without the express written consent of
* NVIDIA is prohibited.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
* "commercial compute r software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*/
#include "2Dconvolution.h"
#include <cuda.h>
__constant__ float Md[KERNEL_SIZE * KERNEL_SIZE];
float Mh[KERNEL_SIZE * KERNEL_SIZE];
bool
CopyToConstMem(float * M, size_t msize)
{
cudaError_t err = cudaMemcpyToSymbol(Md, M, msize);
return (err == cudaSuccess);
}
// Matrix convolution kernel specification
__global__ void ConvolutionKernel(Matrix M, Matrix N, Matrix P)
{
const unsigned int dim_size = (BLOCK_SIZE + (KERNEL_SIZE-1));
__shared__ float ds_N[dim_size][dim_size];
int halosize = (KERNEL_SIZE-1) / 2;
int ii = threadIdx.y * BLOCK_SIZE + threadIdx.x;
int I = ii%(dim_size);
int J = ii/(dim_size);
int I_o = blockIdx.x * BLOCK_SIZE + (blockIdx.y * BLOCK_SIZE) * N.width;
int I_real= I + blockIdx.x * BLOCK_SIZE - halosize;
int J_real= J + blockIdx.y * BLOCK_SIZE - halosize;
int I_n = I_o + (J - halosize) * (N.width) + I - halosize;
ds_N[J][I] = (I_real < 0 || I_real >= N.width || J_real < 0 || J_real >= N.height) ? 0 : N.elements[I_n];
int ii2 = BLOCK_SIZE * BLOCK_SIZE + ii;
I = ii2%(dim_size);
J = ii2/(dim_size);
I_real= I + blockIdx.x * BLOCK_SIZE - halosize;
J_real= J + blockIdx.y * BLOCK_SIZE - halosize;
I_n = I_o + (J - halosize) * (N.width) + I - halosize;
if (ii2 < dim_size*dim_size)
{
if (I_real < 0 || I_real >= N.width || J_real < 0 || J_real >= N.height)
{
ds_N[J][I] = 0;
}
else
{
ds_N[J][I] = N.elements[I_n];
}
}
syncthreads();
if ((blockIdx.x * BLOCK_SIZE + threadIdx.x < P.width) && (blockIdx.y * BLOCK_SIZE + threadIdx.y < P.height))
{
for (unsigned int j = 0; j < KERNEL_SIZE; j++)
{
for (unsigned int i = 0; i < KERNEL_SIZE; i++)
{
P.elements[blockIdx.x * BLOCK_SIZE + threadIdx.x + (blockIdx.y * BLOCK_SIZE + threadIdx.y) * P.width] += Md[i + j * KERNEL_SIZE] * ds_N[j + threadIdx.y][i + threadIdx.x];
}
}
}
} |
02c9ae18429f4717df9040c5b210ba9e05f27717.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
__global__
void matAddKernel(float *A, float *B, float *C, int n){
int i = threadIdx.x + blockDim.x * blockIdx.x, j;
if(i < n){
for(j = 0; j < n; j++){
C[i*n+j] = A[i*n+j] + B[i*n+j];
}
}
}
void matAdd(float* A, float* B, float* C, int n){
int size = n*n*sizeof(float);
float *d_A, *d_B, *d_C;
hipMalloc((void **) &d_A, size);
hipMemcpy(d_A,A,size,hipMemcpyHostToDevice);
hipMalloc((void **) &d_B, size);
hipMemcpy(d_B,B,size,hipMemcpyHostToDevice);
hipMalloc((void **) &d_C, size);
hipLaunchKernelGGL(( matAddKernel), dim3(ceil(n/1024.0)), dim3(1024), 0, 0, d_A,d_B,d_C,n);
hipMemcpy(C,d_C,size,hipMemcpyDeviceToHost);
hipFree(d_A); hipFree(d_B); hipFree(d_C);
}
int main(){
int n,i,j;
float *h_A,*h_B,*h_C;
printf("ingrese el tamao de la matriz:\n");
scanf("%d", &n);
h_A = (float*) malloc(n*n*sizeof(float));
h_B = (float*) malloc(n*n*sizeof(float));
h_C = (float*) malloc(n*n*sizeof(float));
for(i = 0; i < n; i++){
for(j = 0; j < n; j++){
h_A[i*n+j] = 1;
h_B[i*n+j] = 1;
}
}
matAdd(h_A,h_B,h_C,n);
for(i = 0; i < n; i++){
for(j = 0; j < n; j++){
printf("%f ", h_C[i*n+j]);
}
printf("\n");
}
printf("\n");
return 0;
} | 02c9ae18429f4717df9040c5b210ba9e05f27717.cu | #include <stdio.h>
__global__
void matAddKernel(float *A, float *B, float *C, int n){
int i = threadIdx.x + blockDim.x * blockIdx.x, j;
if(i < n){
for(j = 0; j < n; j++){
C[i*n+j] = A[i*n+j] + B[i*n+j];
}
}
}
void matAdd(float* A, float* B, float* C, int n){
int size = n*n*sizeof(float);
float *d_A, *d_B, *d_C;
cudaMalloc((void **) &d_A, size);
cudaMemcpy(d_A,A,size,cudaMemcpyHostToDevice);
cudaMalloc((void **) &d_B, size);
cudaMemcpy(d_B,B,size,cudaMemcpyHostToDevice);
cudaMalloc((void **) &d_C, size);
matAddKernel<<<ceil(n/1024.0), 1024>>>(d_A,d_B,d_C,n);
cudaMemcpy(C,d_C,size,cudaMemcpyDeviceToHost);
cudaFree(d_A); cudaFree(d_B); cudaFree(d_C);
}
int main(){
int n,i,j;
float *h_A,*h_B,*h_C;
printf("ingrese el tamaño de la matriz:\n");
scanf("%d", &n);
h_A = (float*) malloc(n*n*sizeof(float));
h_B = (float*) malloc(n*n*sizeof(float));
h_C = (float*) malloc(n*n*sizeof(float));
for(i = 0; i < n; i++){
for(j = 0; j < n; j++){
h_A[i*n+j] = 1;
h_B[i*n+j] = 1;
}
}
matAdd(h_A,h_B,h_C,n);
for(i = 0; i < n; i++){
for(j = 0; j < n; j++){
printf("%f ", h_C[i*n+j]);
}
printf("\n");
}
printf("\n");
return 0;
} |
326a5e34aa8dd3fcb77f418d74387fe746ef1702.hip | // !!! This is a file automatically generated by hipify!!!
/* Copyright 2022 NVIDIA Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#include "cunumeric/sort/thrust_sort.cuh"
namespace cunumeric {
void thrust_local_sort(const uint64_t* values_in,
uint64_t* values_out,
const int64_t* indices_in,
int64_t* indices_out,
const size_t volume,
const size_t sort_dim_size,
const bool stable,
hipStream_t stream)
{
detail::thrust_local_sort(
values_in, values_out, indices_in, indices_out, volume, sort_dim_size, stable, stream);
}
} // namespace cunumeric
| 326a5e34aa8dd3fcb77f418d74387fe746ef1702.cu | /* Copyright 2022 NVIDIA Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#include "cunumeric/sort/thrust_sort.cuh"
namespace cunumeric {
void thrust_local_sort(const uint64_t* values_in,
uint64_t* values_out,
const int64_t* indices_in,
int64_t* indices_out,
const size_t volume,
const size_t sort_dim_size,
const bool stable,
cudaStream_t stream)
{
detail::thrust_local_sort(
values_in, values_out, indices_in, indices_out, volume, sort_dim_size, stable, stream);
}
} // namespace cunumeric
|
038b8c9c1d9fd0a7c233cf7c0be03fc91f1daa58.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "mul256.cu"
#define GID (blockDim.x * blockIdx.x + threadIdx.x)
#ifndef BLOCKS
#define BLOCKS 3584*16
#endif
#ifndef N_BATCH
#define N_BATCH 256
#endif
typedef t256 t1024_batch[BLOCKS*N_BATCH*4];
__device__ __constant__ uint32 I[] =
{
0x4a0ea0b0, 0xc4ee1b27, 0xad2fe478, 0x2f431806,
0x3dfbd7a7, 0x2b4d0099, 0x4fc1df0b, 0x2b832480,
};
__global__ void batch_ge_add_step0(const t256 *const_b_table_xpy, const t256 *const_b_table_ymx, const t256 *const_b_table_t, const t256 *ymx, const t256 *xpy , const t256 *t, t1024_batch out_xy, t256 *temp_c)
{
__shared__ t256 s_b_table[N_BATCH*2], ymx_l, xpy_l;
t256 A, B, out, c1, c2;
const int globalid1 = (GID < (BLOCKS*N_BATCH)) ? GID : ((BLOCKS * N_BATCH)*2 + GID);
const int globalid2 = (GID < (BLOCKS*N_BATCH)) ? GID : (GID - (BLOCKS * N_BATCH));
const int c_id1 = (BLOCKS * N_BATCH) + GID;
const int lid = threadIdx.x;
const int lid1 = (GID < (BLOCKS*N_BATCH)) ? (lid+N_BATCH) : lid;
const int lid2 = (GID < (BLOCKS*N_BATCH)) ? lid : (lid+N_BATCH);
const int gid2 = (GID < (BLOCKS*N_BATCH)) ? blockIdx.x : (blockIdx.x-BLOCKS);
copy(s_b_table[lid].u32, const_b_table_xpy[lid].u32);
copy(s_b_table[N_BATCH+lid].u32, const_b_table_ymx[lid].u32);
copy(ymx_l.u32, ymx[gid2].u32);
copy(xpy_l.u32, xpy[gid2].u32);
copy(c1.u32, temp_c[globalid2].u32); // C = 2 + C
__syncthreads();
mul_reduce(A.u32, s_b_table[lid1].u32, ymx_l.u32); // A1 = (Y1-X1)*(Y2-X2) / A2 = (Y1+X1)*(Y2-X2)
mul_reduce(B.u32, s_b_table[lid2].u32, xpy_l.u32); // B1 = (Y1+X1)*(Y2+X2) / B2 = (Y1-X1)*(Y2+X2)
sub_4(c2.u32, c1.u32); // F = 4 - C
s_b_table[lid].u256 = (GID < (BLOCKS*N_BATCH)) ? c1.u256 : c2.u256;
s_b_table[N_BATCH+lid].u256 = (GID < (BLOCKS*N_BATCH)) ? c2.u256 : c1.u256;
__syncthreads();
add_reduce(out.u32, B.u32, A.u32); // H = B + A
mul_reduce(out.u32, s_b_table[lid].u32, out.u32); // Y3 = H * G / Y4 = H * F
copy(out_xy[c_id1].u32, out.u32);
sub_reduce(out.u32, B.u32, A.u32); // E = B - A
mul_reduce(out.u32, s_b_table[N_BATCH+lid].u32, out.u32); // X3 = E * F * I / X4 = E * G * I / I = SQRT(-1)
mul_reduce(out.u32, I, out.u32);
copy(out_xy[globalid1].u32, out.u32);
}
__global__ void batch_ge_add_step1(const t256 *const_b_table_t, const t256 *t, t1024_batch out_xy, t256 *in_z, t256 *temp_c)
{
__shared__ t256 s_b_table_t[N_BATCH], s_t;
t256 c, z_out;
copy(s_b_table_t[threadIdx.x].u32, const_b_table_t[threadIdx.x].u32);
copy(s_t.u32, t[blockIdx.x].u32);
__syncthreads();
mul_reduce(c.u32, s_b_table_t[threadIdx.x].u32, s_t.u32); // C = T1*k*T2
mul_reduce(z_out.u32, c.u32, c.u32); // z_out = C ^ 2
sub_4(z_out.u32, z_out.u32); // z_out = 4 - C ^ 2
copy(in_z[GID].u32, z_out.u32);
add_2(c.u32, c.u32); // C = 2 + C
copy(temp_c[GID].u32, c.u32);
} | 038b8c9c1d9fd0a7c233cf7c0be03fc91f1daa58.cu | #include "mul256.cu"
#define GID (blockDim.x * blockIdx.x + threadIdx.x)
#ifndef BLOCKS
#define BLOCKS 3584*16
#endif
#ifndef N_BATCH
#define N_BATCH 256
#endif
typedef t256 t1024_batch[BLOCKS*N_BATCH*4];
__device__ __constant__ uint32 I[] =
{
0x4a0ea0b0, 0xc4ee1b27, 0xad2fe478, 0x2f431806,
0x3dfbd7a7, 0x2b4d0099, 0x4fc1df0b, 0x2b832480,
};
__global__ void batch_ge_add_step0(const t256 *const_b_table_xpy, const t256 *const_b_table_ymx, const t256 *const_b_table_t, const t256 *ymx, const t256 *xpy , const t256 *t, t1024_batch out_xy, t256 *temp_c)
{
__shared__ t256 s_b_table[N_BATCH*2], ymx_l, xpy_l;
t256 A, B, out, c1, c2;
const int globalid1 = (GID < (BLOCKS*N_BATCH)) ? GID : ((BLOCKS * N_BATCH)*2 + GID);
const int globalid2 = (GID < (BLOCKS*N_BATCH)) ? GID : (GID - (BLOCKS * N_BATCH));
const int c_id1 = (BLOCKS * N_BATCH) + GID;
const int lid = threadIdx.x;
const int lid1 = (GID < (BLOCKS*N_BATCH)) ? (lid+N_BATCH) : lid;
const int lid2 = (GID < (BLOCKS*N_BATCH)) ? lid : (lid+N_BATCH);
const int gid2 = (GID < (BLOCKS*N_BATCH)) ? blockIdx.x : (blockIdx.x-BLOCKS);
copy(s_b_table[lid].u32, const_b_table_xpy[lid].u32);
copy(s_b_table[N_BATCH+lid].u32, const_b_table_ymx[lid].u32);
copy(ymx_l.u32, ymx[gid2].u32);
copy(xpy_l.u32, xpy[gid2].u32);
copy(c1.u32, temp_c[globalid2].u32); // C = 2 + C
__syncthreads();
mul_reduce(A.u32, s_b_table[lid1].u32, ymx_l.u32); // A1 = (Y1-X1)*(Y2-X2) / A2 = (Y1+X1)*(Y2-X2)
mul_reduce(B.u32, s_b_table[lid2].u32, xpy_l.u32); // B1 = (Y1+X1)*(Y2+X2) / B2 = (Y1-X1)*(Y2+X2)
sub_4(c2.u32, c1.u32); // F = 4 - C
s_b_table[lid].u256 = (GID < (BLOCKS*N_BATCH)) ? c1.u256 : c2.u256;
s_b_table[N_BATCH+lid].u256 = (GID < (BLOCKS*N_BATCH)) ? c2.u256 : c1.u256;
__syncthreads();
add_reduce(out.u32, B.u32, A.u32); // H = B + A
mul_reduce(out.u32, s_b_table[lid].u32, out.u32); // Y3 = H * G / Y4 = H * F
copy(out_xy[c_id1].u32, out.u32);
sub_reduce(out.u32, B.u32, A.u32); // E = B - A
mul_reduce(out.u32, s_b_table[N_BATCH+lid].u32, out.u32); // X3 = E * F * I / X4 = E * G * I / I = SQRT(-1)
mul_reduce(out.u32, I, out.u32);
copy(out_xy[globalid1].u32, out.u32);
}
__global__ void batch_ge_add_step1(const t256 *const_b_table_t, const t256 *t, t1024_batch out_xy, t256 *in_z, t256 *temp_c)
{
__shared__ t256 s_b_table_t[N_BATCH], s_t;
t256 c, z_out;
copy(s_b_table_t[threadIdx.x].u32, const_b_table_t[threadIdx.x].u32);
copy(s_t.u32, t[blockIdx.x].u32);
__syncthreads();
mul_reduce(c.u32, s_b_table_t[threadIdx.x].u32, s_t.u32); // C = T1*k*T2
mul_reduce(z_out.u32, c.u32, c.u32); // z_out = C ^ 2
sub_4(z_out.u32, z_out.u32); // z_out = 4 - C ^ 2
copy(in_z[GID].u32, z_out.u32);
add_2(c.u32, c.u32); // C = 2 + C
copy(temp_c[GID].u32, c.u32);
} |
1ffe66aa07cbdaddd5d75ad77510878a16bda781.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*!
* Copyright 2019 XGBoost contributors
*/
#include <thrust/copy.h>
#include <thrust/device_vector.h>
#include <thrust/execution_policy.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/device_ptr.h>
#include <thrust/fill.h>
#include <algorithm>
#include <string>
#include <set>
#include "xgboost/logging.h"
#include "xgboost/span.h"
#include "constraints.cuh"
#include "param.h"
#include "../common/device_helpers.cuh"
namespace xgboost {
size_t FeatureInteractionConstraintDevice::Features() const {
return d_sets_ptr_.size() - 1;
}
void FeatureInteractionConstraintDevice::Configure(
tree::TrainParam const& param, int32_t const n_features) {
has_constraint_ = true;
if (param.interaction_constraints.length() == 0) {
has_constraint_ = false;
return;
}
// --- Parse interaction constraints
// Interaction constraints parsed from string parameter. After
// parsing, this looks like {{0, 1, 2}, {2, 3 ,4}}.
std::vector<std::vector<bst_feature_t>> h_feature_constraints;
try {
ParseInteractionConstraint(param.interaction_constraints, &h_feature_constraints);
} catch (dmlc::Error const& e) {
LOG(FATAL) << "Failed to parse feature interaction constraint:\n"
<< param.interaction_constraints << "\n"
<< "With error:\n" << e.what();
}
n_sets_ = h_feature_constraints.size();
size_t const n_feat_storage = LBitField64::ComputeStorageSize(n_features);
if (n_feat_storage == 0 && n_features != 0) {
LOG(FATAL) << "Wrong storage size, n_features: " << n_features;
}
// --- Initialize allowed features attached to nodes.
int32_t n_nodes { param.MaxNodes() };
node_constraints_.resize(n_nodes);
node_constraints_storage_.resize(n_nodes);
for (auto& n : node_constraints_storage_) {
n.resize(LBitField64::ComputeStorageSize(n_features));
}
for (size_t i = 0; i < node_constraints_storage_.size(); ++i) {
auto span = dh::ToSpan(node_constraints_storage_[i]);
node_constraints_[i] = LBitField64(span);
}
s_node_constraints_ = common::Span<LBitField64>(node_constraints_.data(),
node_constraints_.size());
// Represent constraints as CSR format, flatten is the value vector,
// ptr is row_ptr vector in CSR.
std::vector<uint32_t> h_feature_constraints_flatten;
for (auto const& constraints : h_feature_constraints) {
for (uint32_t c : constraints) {
h_feature_constraints_flatten.emplace_back(c);
}
}
std::vector<size_t> h_feature_constraints_ptr;
size_t n_features_in_constraints = 0;
h_feature_constraints_ptr.emplace_back(n_features_in_constraints);
for (auto const& v : h_feature_constraints) {
n_features_in_constraints += v.size();
h_feature_constraints_ptr.emplace_back(n_features_in_constraints);
}
// Copy the CSR to device.
d_fconstraints_.resize(h_feature_constraints_flatten.size());
thrust::copy(h_feature_constraints_flatten.cbegin(), h_feature_constraints_flatten.cend(),
d_fconstraints_.begin());
s_fconstraints_ = dh::ToSpan(d_fconstraints_);
d_fconstraints_ptr_.resize(h_feature_constraints_ptr.size());
thrust::copy(h_feature_constraints_ptr.cbegin(), h_feature_constraints_ptr.cend(),
d_fconstraints_ptr_.begin());
s_fconstraints_ptr_ = dh::ToSpan(d_fconstraints_ptr_);
// --- Compute interaction sets attached to each feature.
// Use a set to eliminate duplicated entries.
std::vector<std::set<int32_t> > h_features_set(n_features);
int32_t cid = 0;
for (auto const& constraints : h_feature_constraints) {
for (auto const& feat : constraints) {
h_features_set.at(feat).insert(cid);
}
cid++;
}
// Compute device sets.
std::vector<int32_t> h_sets;
int32_t ptr = 0;
std::vector<int32_t> h_sets_ptr {ptr};
for (auto const& feature : h_features_set) {
for (auto constraint_id : feature) {
h_sets.emplace_back(constraint_id);
}
// empty set is well defined here.
ptr += feature.size();
h_sets_ptr.emplace_back(ptr);
}
d_sets_ = h_sets;
d_sets_ptr_ = h_sets_ptr;
s_sets_ = dh::ToSpan(d_sets_);
s_sets_ptr_ = dh::ToSpan(d_sets_ptr_);
d_feature_buffer_storage_.resize(LBitField64::ComputeStorageSize(n_features));
feature_buffer_ = LBitField64{dh::ToSpan(d_feature_buffer_storage_)};
// --- Initialize result buffers.
output_buffer_bits_storage_.resize(LBitField64::ComputeStorageSize(n_features));
output_buffer_bits_ = LBitField64(dh::ToSpan(output_buffer_bits_storage_));
input_buffer_bits_storage_.resize(LBitField64::ComputeStorageSize(n_features));
input_buffer_bits_ = LBitField64(dh::ToSpan(input_buffer_bits_storage_));
result_buffer_.resize(n_features);
s_result_buffer_ = dh::ToSpan(result_buffer_);
}
FeatureInteractionConstraintDevice::FeatureInteractionConstraintDevice(
tree::TrainParam const& param, int32_t const n_features) :
has_constraint_{true}, n_sets_{0} {
this->Configure(param, n_features);
}
void FeatureInteractionConstraintDevice::Reset() {
for (auto& node : node_constraints_storage_) {
thrust::fill(node.begin(), node.end(), 0);
}
}
__global__ void ClearBuffersKernel(
LBitField64 result_buffer_output, LBitField64 result_buffer_input) {
auto tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < result_buffer_output.Size()) {
result_buffer_output.Clear(tid);
}
if (tid < result_buffer_input.Size()) {
result_buffer_input.Clear(tid);
}
}
void FeatureInteractionConstraintDevice::ClearBuffers() {
CHECK_EQ(output_buffer_bits_.Size(), input_buffer_bits_.Size());
CHECK_LE(feature_buffer_.Size(), output_buffer_bits_.Size());
uint32_t constexpr kBlockThreads = 256;
auto const n_grids = static_cast<uint32_t>(
common::DivRoundUp(input_buffer_bits_.Size(), kBlockThreads));
dh::LaunchKernel {n_grids, kBlockThreads} (
ClearBuffersKernel,
output_buffer_bits_, input_buffer_bits_);
}
common::Span<bst_feature_t> FeatureInteractionConstraintDevice::QueryNode(int32_t node_id) {
if (!has_constraint_) { return {}; }
CHECK_LT(node_id, s_node_constraints_.size());
ClearBuffers();
thrust::counting_iterator<int32_t> begin(0);
thrust::counting_iterator<int32_t> end(result_buffer_.size());
auto p_result_buffer = result_buffer_.data();
LBitField64 node_constraints = s_node_constraints_[node_id];
thrust::device_ptr<bst_feature_t> const out_end = thrust::copy_if(
thrust::device,
begin, end,
p_result_buffer,
[=]__device__(int32_t pos) {
bool res = node_constraints.Check(pos);
return res;
});
size_t const n_available = std::distance(result_buffer_.data(), out_end);
return {s_result_buffer_.data(), s_result_buffer_.data() + n_available};
}
__global__ void SetInputBufferKernel(common::Span<bst_feature_t> feature_list_input,
LBitField64 result_buffer_input) {
uint32_t tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid < feature_list_input.size()) {
result_buffer_input.Set(feature_list_input[tid]);
}
}
__global__ void QueryFeatureListKernel(LBitField64 node_constraints,
LBitField64 result_buffer_input,
LBitField64 result_buffer_output) {
result_buffer_output |= node_constraints;
result_buffer_output &= result_buffer_input;
}
common::Span<bst_feature_t> FeatureInteractionConstraintDevice::Query(
common::Span<bst_feature_t> feature_list, int32_t nid) {
if (!has_constraint_ || nid == 0) {
return feature_list;
}
ClearBuffers();
LBitField64 node_constraints = s_node_constraints_[nid];
CHECK_EQ(input_buffer_bits_.Size(), output_buffer_bits_.Size());
uint32_t constexpr kBlockThreads = 256;
auto n_grids = static_cast<uint32_t>(
common::DivRoundUp(output_buffer_bits_.Size(), kBlockThreads));
dh::LaunchKernel {n_grids, kBlockThreads} (
SetInputBufferKernel,
feature_list, input_buffer_bits_);
dh::LaunchKernel {n_grids, kBlockThreads} (
QueryFeatureListKernel,
node_constraints, input_buffer_bits_, output_buffer_bits_);
thrust::counting_iterator<int32_t> begin(0);
thrust::counting_iterator<int32_t> end(result_buffer_.size());
LBitField64 local_result_buffer = output_buffer_bits_;
thrust::device_ptr<bst_feature_t> const out_end = thrust::copy_if(
thrust::device,
begin, end,
result_buffer_.data(),
[=]__device__(int32_t pos) {
bool res = local_result_buffer.Check(pos);
return res;
});
size_t const n_available = std::distance(result_buffer_.data(), out_end);
common::Span<bst_feature_t> result =
{s_result_buffer_.data(), s_result_buffer_.data() + n_available};
return result;
}
// Find interaction sets for each feature, then store all features in
// those sets in a buffer.
__global__ void RestoreFeatureListFromSetsKernel(
LBitField64 feature_buffer,
bst_feature_t fid,
common::Span<bst_feature_t> feature_interactions,
common::Span<size_t> feature_interactions_ptr, // of size n interaction set + 1
common::Span<bst_feature_t> interactions_list,
common::Span<size_t> interactions_list_ptr) {
auto const tid_x = threadIdx.x + blockIdx.x * blockDim.x;
auto const tid_y = threadIdx.y + blockIdx.y * blockDim.y;
// painful mapping: fid -> sets related to it -> features related to sets.
auto const beg = interactions_list_ptr[fid];
auto const end = interactions_list_ptr[fid+1];
auto const n_sets = end - beg;
if (tid_x < n_sets) {
auto const set_id_pos = beg + tid_x;
auto const set_id = interactions_list[set_id_pos];
auto const set_beg = feature_interactions_ptr[set_id];
auto const set_end = feature_interactions_ptr[set_id + 1];
auto const feature_pos = set_beg + tid_y;
if (feature_pos < set_end) {
feature_buffer.Set(feature_interactions[feature_pos]);
}
}
}
__global__ void InteractionConstraintSplitKernel(LBitField64 feature,
int32_t feature_id,
LBitField64 node,
LBitField64 left,
LBitField64 right) {
auto tid = threadIdx.x + blockDim.x * blockIdx.x;
if (tid > node.Size()) {
return;
}
// enable constraints from feature
node |= feature;
// clear the buffer after use
if (tid < feature.Size()) {
feature.Clear(tid);
}
// enable constraints from parent
left |= node;
right |= node;
if (tid == feature_id) {
// enable the split feature, set all of them at last instead of
// setting it for parent to avoid race.
node.Set(feature_id);
left.Set(feature_id);
right.Set(feature_id);
}
}
void FeatureInteractionConstraintDevice::Split(
bst_node_t node_id, bst_feature_t feature_id, bst_node_t left_id, bst_node_t right_id) {
if (!has_constraint_) { return; }
CHECK_NE(node_id, left_id)
<< " Split node: " << node_id << " and its left child: "
<< left_id << " cannot be the same.";
CHECK_NE(node_id, right_id)
<< " Split node: " << node_id << " and its left child: "
<< right_id << " cannot be the same.";
CHECK_LT(right_id, s_node_constraints_.size());
CHECK_NE(s_node_constraints_.size(), 0);
LBitField64 node = s_node_constraints_[node_id];
LBitField64 left = s_node_constraints_[left_id];
LBitField64 right = s_node_constraints_[right_id];
dim3 const block3(16, 64, 1);
dim3 const grid3(common::DivRoundUp(n_sets_, 16),
common::DivRoundUp(s_fconstraints_.size(), 64));
dh::LaunchKernel {grid3, block3} (
RestoreFeatureListFromSetsKernel,
feature_buffer_, feature_id,
s_fconstraints_, s_fconstraints_ptr_,
s_sets_, s_sets_ptr_);
uint32_t constexpr kBlockThreads = 256;
auto n_grids = static_cast<uint32_t>(common::DivRoundUp(node.Size(), kBlockThreads));
dh::LaunchKernel {n_grids, kBlockThreads} (
InteractionConstraintSplitKernel,
feature_buffer_,
feature_id,
node, left, right);
}
} // namespace xgboost
| 1ffe66aa07cbdaddd5d75ad77510878a16bda781.cu | /*!
* Copyright 2019 XGBoost contributors
*/
#include <thrust/copy.h>
#include <thrust/device_vector.h>
#include <thrust/execution_policy.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/device_ptr.h>
#include <thrust/fill.h>
#include <algorithm>
#include <string>
#include <set>
#include "xgboost/logging.h"
#include "xgboost/span.h"
#include "constraints.cuh"
#include "param.h"
#include "../common/device_helpers.cuh"
namespace xgboost {
size_t FeatureInteractionConstraintDevice::Features() const {
return d_sets_ptr_.size() - 1;
}
void FeatureInteractionConstraintDevice::Configure(
tree::TrainParam const& param, int32_t const n_features) {
has_constraint_ = true;
if (param.interaction_constraints.length() == 0) {
has_constraint_ = false;
return;
}
// --- Parse interaction constraints
// Interaction constraints parsed from string parameter. After
// parsing, this looks like {{0, 1, 2}, {2, 3 ,4}}.
std::vector<std::vector<bst_feature_t>> h_feature_constraints;
try {
ParseInteractionConstraint(param.interaction_constraints, &h_feature_constraints);
} catch (dmlc::Error const& e) {
LOG(FATAL) << "Failed to parse feature interaction constraint:\n"
<< param.interaction_constraints << "\n"
<< "With error:\n" << e.what();
}
n_sets_ = h_feature_constraints.size();
size_t const n_feat_storage = LBitField64::ComputeStorageSize(n_features);
if (n_feat_storage == 0 && n_features != 0) {
LOG(FATAL) << "Wrong storage size, n_features: " << n_features;
}
// --- Initialize allowed features attached to nodes.
int32_t n_nodes { param.MaxNodes() };
node_constraints_.resize(n_nodes);
node_constraints_storage_.resize(n_nodes);
for (auto& n : node_constraints_storage_) {
n.resize(LBitField64::ComputeStorageSize(n_features));
}
for (size_t i = 0; i < node_constraints_storage_.size(); ++i) {
auto span = dh::ToSpan(node_constraints_storage_[i]);
node_constraints_[i] = LBitField64(span);
}
s_node_constraints_ = common::Span<LBitField64>(node_constraints_.data(),
node_constraints_.size());
// Represent constraints as CSR format, flatten is the value vector,
// ptr is row_ptr vector in CSR.
std::vector<uint32_t> h_feature_constraints_flatten;
for (auto const& constraints : h_feature_constraints) {
for (uint32_t c : constraints) {
h_feature_constraints_flatten.emplace_back(c);
}
}
std::vector<size_t> h_feature_constraints_ptr;
size_t n_features_in_constraints = 0;
h_feature_constraints_ptr.emplace_back(n_features_in_constraints);
for (auto const& v : h_feature_constraints) {
n_features_in_constraints += v.size();
h_feature_constraints_ptr.emplace_back(n_features_in_constraints);
}
// Copy the CSR to device.
d_fconstraints_.resize(h_feature_constraints_flatten.size());
thrust::copy(h_feature_constraints_flatten.cbegin(), h_feature_constraints_flatten.cend(),
d_fconstraints_.begin());
s_fconstraints_ = dh::ToSpan(d_fconstraints_);
d_fconstraints_ptr_.resize(h_feature_constraints_ptr.size());
thrust::copy(h_feature_constraints_ptr.cbegin(), h_feature_constraints_ptr.cend(),
d_fconstraints_ptr_.begin());
s_fconstraints_ptr_ = dh::ToSpan(d_fconstraints_ptr_);
// --- Compute interaction sets attached to each feature.
// Use a set to eliminate duplicated entries.
std::vector<std::set<int32_t> > h_features_set(n_features);
int32_t cid = 0;
for (auto const& constraints : h_feature_constraints) {
for (auto const& feat : constraints) {
h_features_set.at(feat).insert(cid);
}
cid++;
}
// Compute device sets.
std::vector<int32_t> h_sets;
int32_t ptr = 0;
std::vector<int32_t> h_sets_ptr {ptr};
for (auto const& feature : h_features_set) {
for (auto constraint_id : feature) {
h_sets.emplace_back(constraint_id);
}
// empty set is well defined here.
ptr += feature.size();
h_sets_ptr.emplace_back(ptr);
}
d_sets_ = h_sets;
d_sets_ptr_ = h_sets_ptr;
s_sets_ = dh::ToSpan(d_sets_);
s_sets_ptr_ = dh::ToSpan(d_sets_ptr_);
d_feature_buffer_storage_.resize(LBitField64::ComputeStorageSize(n_features));
feature_buffer_ = LBitField64{dh::ToSpan(d_feature_buffer_storage_)};
// --- Initialize result buffers.
output_buffer_bits_storage_.resize(LBitField64::ComputeStorageSize(n_features));
output_buffer_bits_ = LBitField64(dh::ToSpan(output_buffer_bits_storage_));
input_buffer_bits_storage_.resize(LBitField64::ComputeStorageSize(n_features));
input_buffer_bits_ = LBitField64(dh::ToSpan(input_buffer_bits_storage_));
result_buffer_.resize(n_features);
s_result_buffer_ = dh::ToSpan(result_buffer_);
}
FeatureInteractionConstraintDevice::FeatureInteractionConstraintDevice(
tree::TrainParam const& param, int32_t const n_features) :
has_constraint_{true}, n_sets_{0} {
this->Configure(param, n_features);
}
void FeatureInteractionConstraintDevice::Reset() {
for (auto& node : node_constraints_storage_) {
thrust::fill(node.begin(), node.end(), 0);
}
}
__global__ void ClearBuffersKernel(
LBitField64 result_buffer_output, LBitField64 result_buffer_input) {
auto tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < result_buffer_output.Size()) {
result_buffer_output.Clear(tid);
}
if (tid < result_buffer_input.Size()) {
result_buffer_input.Clear(tid);
}
}
void FeatureInteractionConstraintDevice::ClearBuffers() {
CHECK_EQ(output_buffer_bits_.Size(), input_buffer_bits_.Size());
CHECK_LE(feature_buffer_.Size(), output_buffer_bits_.Size());
uint32_t constexpr kBlockThreads = 256;
auto const n_grids = static_cast<uint32_t>(
common::DivRoundUp(input_buffer_bits_.Size(), kBlockThreads));
dh::LaunchKernel {n_grids, kBlockThreads} (
ClearBuffersKernel,
output_buffer_bits_, input_buffer_bits_);
}
common::Span<bst_feature_t> FeatureInteractionConstraintDevice::QueryNode(int32_t node_id) {
if (!has_constraint_) { return {}; }
CHECK_LT(node_id, s_node_constraints_.size());
ClearBuffers();
thrust::counting_iterator<int32_t> begin(0);
thrust::counting_iterator<int32_t> end(result_buffer_.size());
auto p_result_buffer = result_buffer_.data();
LBitField64 node_constraints = s_node_constraints_[node_id];
thrust::device_ptr<bst_feature_t> const out_end = thrust::copy_if(
thrust::device,
begin, end,
p_result_buffer,
[=]__device__(int32_t pos) {
bool res = node_constraints.Check(pos);
return res;
});
size_t const n_available = std::distance(result_buffer_.data(), out_end);
return {s_result_buffer_.data(), s_result_buffer_.data() + n_available};
}
__global__ void SetInputBufferKernel(common::Span<bst_feature_t> feature_list_input,
LBitField64 result_buffer_input) {
uint32_t tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid < feature_list_input.size()) {
result_buffer_input.Set(feature_list_input[tid]);
}
}
__global__ void QueryFeatureListKernel(LBitField64 node_constraints,
LBitField64 result_buffer_input,
LBitField64 result_buffer_output) {
result_buffer_output |= node_constraints;
result_buffer_output &= result_buffer_input;
}
common::Span<bst_feature_t> FeatureInteractionConstraintDevice::Query(
common::Span<bst_feature_t> feature_list, int32_t nid) {
if (!has_constraint_ || nid == 0) {
return feature_list;
}
ClearBuffers();
LBitField64 node_constraints = s_node_constraints_[nid];
CHECK_EQ(input_buffer_bits_.Size(), output_buffer_bits_.Size());
uint32_t constexpr kBlockThreads = 256;
auto n_grids = static_cast<uint32_t>(
common::DivRoundUp(output_buffer_bits_.Size(), kBlockThreads));
dh::LaunchKernel {n_grids, kBlockThreads} (
SetInputBufferKernel,
feature_list, input_buffer_bits_);
dh::LaunchKernel {n_grids, kBlockThreads} (
QueryFeatureListKernel,
node_constraints, input_buffer_bits_, output_buffer_bits_);
thrust::counting_iterator<int32_t> begin(0);
thrust::counting_iterator<int32_t> end(result_buffer_.size());
LBitField64 local_result_buffer = output_buffer_bits_;
thrust::device_ptr<bst_feature_t> const out_end = thrust::copy_if(
thrust::device,
begin, end,
result_buffer_.data(),
[=]__device__(int32_t pos) {
bool res = local_result_buffer.Check(pos);
return res;
});
size_t const n_available = std::distance(result_buffer_.data(), out_end);
common::Span<bst_feature_t> result =
{s_result_buffer_.data(), s_result_buffer_.data() + n_available};
return result;
}
// Find interaction sets for each feature, then store all features in
// those sets in a buffer.
__global__ void RestoreFeatureListFromSetsKernel(
LBitField64 feature_buffer,
bst_feature_t fid,
common::Span<bst_feature_t> feature_interactions,
common::Span<size_t> feature_interactions_ptr, // of size n interaction set + 1
common::Span<bst_feature_t> interactions_list,
common::Span<size_t> interactions_list_ptr) {
auto const tid_x = threadIdx.x + blockIdx.x * blockDim.x;
auto const tid_y = threadIdx.y + blockIdx.y * blockDim.y;
// painful mapping: fid -> sets related to it -> features related to sets.
auto const beg = interactions_list_ptr[fid];
auto const end = interactions_list_ptr[fid+1];
auto const n_sets = end - beg;
if (tid_x < n_sets) {
auto const set_id_pos = beg + tid_x;
auto const set_id = interactions_list[set_id_pos];
auto const set_beg = feature_interactions_ptr[set_id];
auto const set_end = feature_interactions_ptr[set_id + 1];
auto const feature_pos = set_beg + tid_y;
if (feature_pos < set_end) {
feature_buffer.Set(feature_interactions[feature_pos]);
}
}
}
__global__ void InteractionConstraintSplitKernel(LBitField64 feature,
int32_t feature_id,
LBitField64 node,
LBitField64 left,
LBitField64 right) {
auto tid = threadIdx.x + blockDim.x * blockIdx.x;
if (tid > node.Size()) {
return;
}
// enable constraints from feature
node |= feature;
// clear the buffer after use
if (tid < feature.Size()) {
feature.Clear(tid);
}
// enable constraints from parent
left |= node;
right |= node;
if (tid == feature_id) {
// enable the split feature, set all of them at last instead of
// setting it for parent to avoid race.
node.Set(feature_id);
left.Set(feature_id);
right.Set(feature_id);
}
}
void FeatureInteractionConstraintDevice::Split(
bst_node_t node_id, bst_feature_t feature_id, bst_node_t left_id, bst_node_t right_id) {
if (!has_constraint_) { return; }
CHECK_NE(node_id, left_id)
<< " Split node: " << node_id << " and its left child: "
<< left_id << " cannot be the same.";
CHECK_NE(node_id, right_id)
<< " Split node: " << node_id << " and its left child: "
<< right_id << " cannot be the same.";
CHECK_LT(right_id, s_node_constraints_.size());
CHECK_NE(s_node_constraints_.size(), 0);
LBitField64 node = s_node_constraints_[node_id];
LBitField64 left = s_node_constraints_[left_id];
LBitField64 right = s_node_constraints_[right_id];
dim3 const block3(16, 64, 1);
dim3 const grid3(common::DivRoundUp(n_sets_, 16),
common::DivRoundUp(s_fconstraints_.size(), 64));
dh::LaunchKernel {grid3, block3} (
RestoreFeatureListFromSetsKernel,
feature_buffer_, feature_id,
s_fconstraints_, s_fconstraints_ptr_,
s_sets_, s_sets_ptr_);
uint32_t constexpr kBlockThreads = 256;
auto n_grids = static_cast<uint32_t>(common::DivRoundUp(node.Size(), kBlockThreads));
dh::LaunchKernel {n_grids, kBlockThreads} (
InteractionConstraintSplitKernel,
feature_buffer_,
feature_id,
node, left, right);
}
} // namespace xgboost
|
b2a75f49bffcd848f2467080de882393a659f6c4.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright 1993-2006 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
* "commercial computer software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*/
#ifdef _WIN32
# define NOMINMAX
#endif
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <cutil_inline.h>
// includes, kernels
#include <scan.cu> // defines prescanArray()
////////////////////////////////////////////////////////////////////////////////
// declaration, forward
void runTest( int argc, char** argv);
// regression test functionality
extern "C"
unsigned int compare( const float* reference, const float* data,
const unsigned int len);
extern "C"
void computeGold( float* reference, float* idata, const unsigned int len);
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int
main( int argc, char** argv)
{
runTest( argc, argv);
cutilExit(argc, argv);
}
////////////////////////////////////////////////////////////////////////////////
//! Run a scan test for CUDA
////////////////////////////////////////////////////////////////////////////////
void
runTest( int argc, char** argv)
{
// use command-line specified CUDA device, otherwise use device with highest Gflops/s
if( cutCheckCmdLineFlag(argc, (const char**)argv, "device") )
cutilDeviceInit(argc, argv);
else
hipSetDevice( cutGetMaxGflopsDeviceId() );
#ifndef __DEVICE_EMULATION__
unsigned int num_test_iterations = 100;
unsigned int num_elements = 1000000; // can support large, non-power-of-2 arrays!
#else
unsigned int num_test_iterations = 1;
unsigned int num_elements = 10000; // can support large, non-power-of-2 arrays!
#endif
cutGetCmdLineArgumenti( argc, (const char**) argv, "n", (int*)&num_elements);
cutGetCmdLineArgumenti( argc, (const char**) argv, "i", (int*)&num_test_iterations);
unsigned int mem_size = sizeof( float) * num_elements;
unsigned int timerGPU, timerCPU;
cutilCheckError(cutCreateTimer(&timerCPU));
cutilCheckError(cutCreateTimer(&timerGPU));
// allocate host memory to store the input data
float* h_data = (float*) malloc( mem_size);
// initialize the input data on the host
for( unsigned int i = 0; i < num_elements; ++i)
{
h_data[i] = 1.0f;//(int)(10 * rand()/32768.f);
}
// compute reference solution
float* reference = (float*) malloc( mem_size);
cutStartTimer(timerCPU);
for (unsigned int i = 0; i < num_test_iterations; i++)
{
computeGold( reference, h_data, num_elements);
}
cutStopTimer(timerCPU);
// allocate device memory input and output arrays
float* d_idata = NULL;
float* d_odata = NULL;
cutilSafeCall( hipMalloc( (void**) &d_idata, mem_size));
cutilSafeCall( hipMalloc( (void**) &d_odata, mem_size));
// copy host memory to device input array
cutilSafeCall( hipMemcpy( d_idata, h_data, mem_size, hipMemcpyHostToDevice) );
// initialize all the other device arrays to be safe
cutilSafeCall( hipMemcpy( d_odata, h_data, mem_size, hipMemcpyHostToDevice) );
printf("Running parallel prefix sum (prescan) of %d elements\n", num_elements);
printf("This version is work efficient (O(n) adds)\n");
printf("and has very few shared memory bank conflicts\n\n");
preallocBlockSums(num_elements);
// run once to remove startup overhead
prescanArray(d_odata, d_idata, num_elements);
// Run the prescan
cutStartTimer(timerGPU);
for (unsigned int i = 0; i < num_test_iterations; i++)
{
//printf("prescanArray\n");
prescanArray(d_odata, d_idata, num_elements);
}
cutStopTimer(timerGPU);
deallocBlockSums();
// copy result from device to host
cutilSafeCall(hipMemcpy( h_data, d_odata, sizeof(float) * num_elements,
hipMemcpyDeviceToHost));
// If this is a regression test write the results to a file
if( cutCheckCmdLineFlag( argc, (const char**) argv, "regression"))
{
// write file for regression test
cutWriteFilef( "./data/result.dat", h_data, num_elements, 0.0);
}
else
{
// custom output handling when no regression test running
// in this case check if the result is equivalent to the expected soluion
unsigned int result_regtest = cutComparef( reference, h_data, num_elements);
printf( "Test %s\n", (1 == result_regtest) ? "PASSED" : "FAILED");
printf( "Average GPU execution time: %f ms\n", cutGetTimerValue(timerGPU) / num_test_iterations);
printf( "CPU execution time: %f ms\n", cutGetTimerValue(timerCPU) / num_test_iterations);
}
printf("\nCheck out the CUDA Data Parallel Primitives Library for more on scan.\n");
printf("http://www.gpgpu.org/developer/cudpp\n");
// cleanup memory
cutDeleteTimer(timerCPU);
cutDeleteTimer(timerGPU);
free( h_data);
free( reference);
hipFree( d_odata);
hipFree( d_idata);
hipDeviceReset();
}
| b2a75f49bffcd848f2467080de882393a659f6c4.cu | /*
* Copyright 1993-2006 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
* "commercial computer software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*/
#ifdef _WIN32
# define NOMINMAX
#endif
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <cutil_inline.h>
// includes, kernels
#include <scan.cu> // defines prescanArray()
////////////////////////////////////////////////////////////////////////////////
// declaration, forward
void runTest( int argc, char** argv);
// regression test functionality
extern "C"
unsigned int compare( const float* reference, const float* data,
const unsigned int len);
extern "C"
void computeGold( float* reference, float* idata, const unsigned int len);
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int
main( int argc, char** argv)
{
runTest( argc, argv);
cutilExit(argc, argv);
}
////////////////////////////////////////////////////////////////////////////////
//! Run a scan test for CUDA
////////////////////////////////////////////////////////////////////////////////
void
runTest( int argc, char** argv)
{
// use command-line specified CUDA device, otherwise use device with highest Gflops/s
if( cutCheckCmdLineFlag(argc, (const char**)argv, "device") )
cutilDeviceInit(argc, argv);
else
cudaSetDevice( cutGetMaxGflopsDeviceId() );
#ifndef __DEVICE_EMULATION__
unsigned int num_test_iterations = 100;
unsigned int num_elements = 1000000; // can support large, non-power-of-2 arrays!
#else
unsigned int num_test_iterations = 1;
unsigned int num_elements = 10000; // can support large, non-power-of-2 arrays!
#endif
cutGetCmdLineArgumenti( argc, (const char**) argv, "n", (int*)&num_elements);
cutGetCmdLineArgumenti( argc, (const char**) argv, "i", (int*)&num_test_iterations);
unsigned int mem_size = sizeof( float) * num_elements;
unsigned int timerGPU, timerCPU;
cutilCheckError(cutCreateTimer(&timerCPU));
cutilCheckError(cutCreateTimer(&timerGPU));
// allocate host memory to store the input data
float* h_data = (float*) malloc( mem_size);
// initialize the input data on the host
for( unsigned int i = 0; i < num_elements; ++i)
{
h_data[i] = 1.0f;//(int)(10 * rand()/32768.f);
}
// compute reference solution
float* reference = (float*) malloc( mem_size);
cutStartTimer(timerCPU);
for (unsigned int i = 0; i < num_test_iterations; i++)
{
computeGold( reference, h_data, num_elements);
}
cutStopTimer(timerCPU);
// allocate device memory input and output arrays
float* d_idata = NULL;
float* d_odata = NULL;
cutilSafeCall( cudaMalloc( (void**) &d_idata, mem_size));
cutilSafeCall( cudaMalloc( (void**) &d_odata, mem_size));
// copy host memory to device input array
cutilSafeCall( cudaMemcpy( d_idata, h_data, mem_size, cudaMemcpyHostToDevice) );
// initialize all the other device arrays to be safe
cutilSafeCall( cudaMemcpy( d_odata, h_data, mem_size, cudaMemcpyHostToDevice) );
printf("Running parallel prefix sum (prescan) of %d elements\n", num_elements);
printf("This version is work efficient (O(n) adds)\n");
printf("and has very few shared memory bank conflicts\n\n");
preallocBlockSums(num_elements);
// run once to remove startup overhead
prescanArray(d_odata, d_idata, num_elements);
// Run the prescan
cutStartTimer(timerGPU);
for (unsigned int i = 0; i < num_test_iterations; i++)
{
//printf("prescanArray\n");
prescanArray(d_odata, d_idata, num_elements);
}
cutStopTimer(timerGPU);
deallocBlockSums();
// copy result from device to host
cutilSafeCall(cudaMemcpy( h_data, d_odata, sizeof(float) * num_elements,
cudaMemcpyDeviceToHost));
// If this is a regression test write the results to a file
if( cutCheckCmdLineFlag( argc, (const char**) argv, "regression"))
{
// write file for regression test
cutWriteFilef( "./data/result.dat", h_data, num_elements, 0.0);
}
else
{
// custom output handling when no regression test running
// in this case check if the result is equivalent to the expected soluion
unsigned int result_regtest = cutComparef( reference, h_data, num_elements);
printf( "Test %s\n", (1 == result_regtest) ? "PASSED" : "FAILED");
printf( "Average GPU execution time: %f ms\n", cutGetTimerValue(timerGPU) / num_test_iterations);
printf( "CPU execution time: %f ms\n", cutGetTimerValue(timerCPU) / num_test_iterations);
}
printf("\nCheck out the CUDA Data Parallel Primitives Library for more on scan.\n");
printf("http://www.gpgpu.org/developer/cudpp\n");
// cleanup memory
cutDeleteTimer(timerCPU);
cutDeleteTimer(timerGPU);
free( h_data);
free( reference);
cudaFree( d_odata);
cudaFree( d_idata);
cudaThreadExit();
}
|
d9d166ada663614022f554cce2ec14e5c1deb45c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.6.1) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2015
@generated from zgetf2.cu normal z -> d, Fri Jan 30 19:00:10 2015
*/
#include "common_magma.h"
#define PRECISION_d
#define dswap_bs 64
//#if (GPUSHMEM < 200)
#define dger_bs 512 // 512 is max threads for 1.x cards
//#else
//#define dger_bs 1024
//#endif
void magma_dgetf2_swap(
magma_int_t n, double *x, magma_int_t i, magma_int_t j, magma_int_t incx);
void magma_dscal_dger(
magma_int_t m, magma_int_t n, double *A, magma_int_t lda);
/**
DGETF2 computes an LU factorization of a general m-by-n matrix A
using partial pivoting with row interchanges.
The factorization has the form
A = P * L * U
where P is a permutation matrix, L is lower triangular with unit
diagonal elements (lower trapezoidal if m > n), and U is upper
triangular (upper trapezoidal if m < n).
This is the right-looking Level 2 BLAS version of the algorithm.
Arguments
---------
@param[in]
m INTEGER
The number of rows of the matrix A. M >= 0.
@param[in]
n INTEGER
The number of columns of the matrix A. N >= 0 and N <= 1024.
On CUDA architecture 1.x cards, N <= 512.
@param[in,out]
A DOUBLE_PRECISION array, dimension (LDA,N)
On entry, the m by n matrix to be factored.
On exit, the factors L and U from the factorization
A = P*L*U; the unit diagonal elements of L are not stored.
@param[in]
lda INTEGER
The leading dimension of the array A. LDA >= max(1,M).
@param[out]
ipiv INTEGER array, dimension (min(M,N))
The pivot indices; for 1 <= i <= min(M,N), row i of the
matrix was interchanged with row IPIV(i).
@param[out]
info INTEGER
- = 0: successful exit
- < 0: if INFO = -k, the k-th argument had an illegal value
- > 0: if INFO = k, U(k,k) is exactly zero. The factorization
has been completed, but the factor U is exactly
singular, and division by zero will occur if it is used
to solve a system of equations.
@ingroup magma_dgesv_aux
********************************************************************/
extern "C" magma_int_t
magma_dgetf2_gpu(
magma_int_t m, magma_int_t n,
magmaDouble_ptr dA, magma_int_t ldda,
magma_int_t *ipiv,
magma_int_t *info )
{
#define dA(i, j) (dA + (i) + (j)*ldda)
*info = 0;
if (m < 0) {
*info = -1;
} else if (n < 0 || n > dger_bs) {
*info = -2;
} else if (ldda < max(1,m)) {
*info = -4;
}
if (*info != 0) {
magma_xerbla( __func__, -(*info) );
return *info;
}
// Quick return if possible
if (m == 0 || n == 0) {
return *info;
}
magma_int_t min_mn = min(m, n);
magma_int_t j, jp;
for( j=0; j < min_mn; j++ ) {
hipDeviceSetCacheConfig( hipFuncCachePreferShared );
// Find pivot and test for singularity.
jp = j - 1 + magma_idamax(m-j, dA(j,j), 1);
ipiv[j] = jp + 1; // ipiv uses Fortran one-based index
// Can't check value of dA since it is on GPU
//if ( dA(jp, j) != 0.0) {
hipDeviceSetCacheConfig( hipFuncCachePreferL1 );
// Apply the interchange to columns 1:N.
if (jp != j) {
magma_dgetf2_swap(n, dA, j, jp, ldda);
}
// Compute elements J+1:M of J-th column.
if (j < m) {
magma_dscal_dger(m-j, n-j, dA(j, j), ldda);
}
//}
//else if (*info == 0) {
// *info = j;
//}
}
return *info;
}
__global__
void kernel_dswap(int n, double *x, int i, int j, int incx)
{
int id = blockIdx.x * dswap_bs + threadIdx.x;
if (id < n) {
double tmp = x[i + incx*id];
x[i + incx*id] = x[j + incx*id];
x[j + incx*id] = tmp;
}
}
void magma_dgetf2_swap(magma_int_t n, double *x, magma_int_t i, magma_int_t j, magma_int_t incx)
{
/*
dswap two row vectors: ith and jth
*/
dim3 threads(dswap_bs, 1, 1);
int num_blocks = (n - 1)/dswap_bs + 1;
dim3 grid(num_blocks,1);
hipLaunchKernelGGL(( kernel_dswap), dim3(grid), dim3(threads), 0, magma_stream , n, x, i, j, incx);
}
// dynamically allocated shared memory, set to size n when the kernel is launched.
// See CUDA Guide B.2.3
extern __shared__ double shared_data[];
__global__
void kernel_dscal_dger(int m, int n, double *A, int lda)
{
double *shared_y = shared_data;
int tid = blockIdx.x * dger_bs + threadIdx.x;
double reg = MAGMA_D_ZERO;
if (threadIdx.x < n) {
shared_y[threadIdx.x] = A[lda * threadIdx.x];
}
__syncthreads();
if (tid < m && tid > 0) {
reg = A[tid];
reg *= MAGMA_D_DIV(MAGMA_D_ONE, shared_y[0]);
A[tid] = reg;
#pragma unroll
for(int i=1; i < n; i++) {
A[tid + i*lda] += (MAGMA_D_NEG_ONE) * shared_y[i] * reg;
}
}
}
void magma_dscal_dger(magma_int_t m, magma_int_t n, double *A, magma_int_t lda)
{
/*
Specialized kernel which merged dscal and dger the two kernels
1) dscale the first column vector A(1:M-1,0) with 1/A(0,0);
2) Performe a dger Operation for trailing matrix of A(1:M-1,1:N-1) += alpha*x*y**T, where
alpha := -1.0; x := A(1:M-1,0) and y:= A(0,1:N-1);
*/
dim3 threads(dger_bs, 1, 1);
int num_blocks = (m - 1)/dger_bs + 1;
dim3 grid(num_blocks,1);
size_t shared_size = sizeof(double)*(n);
hipLaunchKernelGGL(( kernel_dscal_dger), dim3(grid), dim3(threads), shared_size, magma_stream, m, n, A, lda);
}
| d9d166ada663614022f554cce2ec14e5c1deb45c.cu | /*
-- MAGMA (version 1.6.1) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2015
@generated from zgetf2.cu normal z -> d, Fri Jan 30 19:00:10 2015
*/
#include "common_magma.h"
#define PRECISION_d
#define dswap_bs 64
//#if (GPUSHMEM < 200)
#define dger_bs 512 // 512 is max threads for 1.x cards
//#else
//#define dger_bs 1024
//#endif
void magma_dgetf2_swap(
magma_int_t n, double *x, magma_int_t i, magma_int_t j, magma_int_t incx);
void magma_dscal_dger(
magma_int_t m, magma_int_t n, double *A, magma_int_t lda);
/**
DGETF2 computes an LU factorization of a general m-by-n matrix A
using partial pivoting with row interchanges.
The factorization has the form
A = P * L * U
where P is a permutation matrix, L is lower triangular with unit
diagonal elements (lower trapezoidal if m > n), and U is upper
triangular (upper trapezoidal if m < n).
This is the right-looking Level 2 BLAS version of the algorithm.
Arguments
---------
@param[in]
m INTEGER
The number of rows of the matrix A. M >= 0.
@param[in]
n INTEGER
The number of columns of the matrix A. N >= 0 and N <= 1024.
On CUDA architecture 1.x cards, N <= 512.
@param[in,out]
A DOUBLE_PRECISION array, dimension (LDA,N)
On entry, the m by n matrix to be factored.
On exit, the factors L and U from the factorization
A = P*L*U; the unit diagonal elements of L are not stored.
@param[in]
lda INTEGER
The leading dimension of the array A. LDA >= max(1,M).
@param[out]
ipiv INTEGER array, dimension (min(M,N))
The pivot indices; for 1 <= i <= min(M,N), row i of the
matrix was interchanged with row IPIV(i).
@param[out]
info INTEGER
- = 0: successful exit
- < 0: if INFO = -k, the k-th argument had an illegal value
- > 0: if INFO = k, U(k,k) is exactly zero. The factorization
has been completed, but the factor U is exactly
singular, and division by zero will occur if it is used
to solve a system of equations.
@ingroup magma_dgesv_aux
********************************************************************/
extern "C" magma_int_t
magma_dgetf2_gpu(
magma_int_t m, magma_int_t n,
magmaDouble_ptr dA, magma_int_t ldda,
magma_int_t *ipiv,
magma_int_t *info )
{
#define dA(i, j) (dA + (i) + (j)*ldda)
*info = 0;
if (m < 0) {
*info = -1;
} else if (n < 0 || n > dger_bs) {
*info = -2;
} else if (ldda < max(1,m)) {
*info = -4;
}
if (*info != 0) {
magma_xerbla( __func__, -(*info) );
return *info;
}
// Quick return if possible
if (m == 0 || n == 0) {
return *info;
}
magma_int_t min_mn = min(m, n);
magma_int_t j, jp;
for( j=0; j < min_mn; j++ ) {
cudaDeviceSetCacheConfig( cudaFuncCachePreferShared );
// Find pivot and test for singularity.
jp = j - 1 + magma_idamax(m-j, dA(j,j), 1);
ipiv[j] = jp + 1; // ipiv uses Fortran one-based index
// Can't check value of dA since it is on GPU
//if ( dA(jp, j) != 0.0) {
cudaDeviceSetCacheConfig( cudaFuncCachePreferL1 );
// Apply the interchange to columns 1:N.
if (jp != j) {
magma_dgetf2_swap(n, dA, j, jp, ldda);
}
// Compute elements J+1:M of J-th column.
if (j < m) {
magma_dscal_dger(m-j, n-j, dA(j, j), ldda);
}
//}
//else if (*info == 0) {
// *info = j;
//}
}
return *info;
}
__global__
void kernel_dswap(int n, double *x, int i, int j, int incx)
{
int id = blockIdx.x * dswap_bs + threadIdx.x;
if (id < n) {
double tmp = x[i + incx*id];
x[i + incx*id] = x[j + incx*id];
x[j + incx*id] = tmp;
}
}
void magma_dgetf2_swap(magma_int_t n, double *x, magma_int_t i, magma_int_t j, magma_int_t incx)
{
/*
dswap two row vectors: ith and jth
*/
dim3 threads(dswap_bs, 1, 1);
int num_blocks = (n - 1)/dswap_bs + 1;
dim3 grid(num_blocks,1);
kernel_dswap<<< grid, threads, 0, magma_stream >>>(n, x, i, j, incx);
}
// dynamically allocated shared memory, set to size n when the kernel is launched.
// See CUDA Guide B.2.3
extern __shared__ double shared_data[];
__global__
void kernel_dscal_dger(int m, int n, double *A, int lda)
{
double *shared_y = shared_data;
int tid = blockIdx.x * dger_bs + threadIdx.x;
double reg = MAGMA_D_ZERO;
if (threadIdx.x < n) {
shared_y[threadIdx.x] = A[lda * threadIdx.x];
}
__syncthreads();
if (tid < m && tid > 0) {
reg = A[tid];
reg *= MAGMA_D_DIV(MAGMA_D_ONE, shared_y[0]);
A[tid] = reg;
#pragma unroll
for(int i=1; i < n; i++) {
A[tid + i*lda] += (MAGMA_D_NEG_ONE) * shared_y[i] * reg;
}
}
}
void magma_dscal_dger(magma_int_t m, magma_int_t n, double *A, magma_int_t lda)
{
/*
Specialized kernel which merged dscal and dger the two kernels
1) dscale the first column vector A(1:M-1,0) with 1/A(0,0);
2) Performe a dger Operation for trailing matrix of A(1:M-1,1:N-1) += alpha*x*y**T, where
alpha := -1.0; x := A(1:M-1,0) and y:= A(0,1:N-1);
*/
dim3 threads(dger_bs, 1, 1);
int num_blocks = (m - 1)/dger_bs + 1;
dim3 grid(num_blocks,1);
size_t shared_size = sizeof(double)*(n);
kernel_dscal_dger<<< grid, threads, shared_size, magma_stream>>>(m, n, A, lda);
}
|
209c91f0cdf65e5929dce5bf6d5b8624da215e12.hip | // !!! This is a file automatically generated by hipify!!!
#include "getCUDA.h"
#include <iostream>
#include <stdio.h>
#include "checkDevices.h"
// Print device properties
void printDevProp(hipDeviceProp_t devProp)
{
printf("Major revision number: %d\n", devProp.major);
printf("Minor revision number: %d\n", devProp.minor);
printf("Name: %s\n", devProp.name);
printf("Total global memory: %lu\n", devProp.totalGlobalMem);
printf("Total shared memory per block: %lu\n", devProp.sharedMemPerBlock);
printf("Total registers per block: %d\n", devProp.regsPerBlock);
printf("Warp size: %d\n", devProp.warpSize);
printf("Maximum memory pitch: %lu\n", devProp.memPitch);
printf("Maximum threads per block: %d\n", devProp.maxThreadsPerBlock);
for (int i = 0; i < 3; ++i)
printf("Maximum dimension %d of block: %d\n", i, devProp.maxThreadsDim[i]);
for (int i = 0; i < 3; ++i)
printf("Maximum dimension %d of grid: %d\n", i, devProp.maxGridSize[i]);
printf("Clock rate: %d\n", devProp.clockRate);
printf("Total constant memory: %lu\n", devProp.totalConstMem);
printf("Texture alignment: %lu\n", devProp.textureAlignment);
printf("Concurrent copy and execution: %s\n", (devProp.deviceOverlap ? "Yes" : "No"));
printf("Number of multiprocessors: %d\n", devProp.multiProcessorCount);
printf("Kernel execution timeout: %s\n", (devProp.kernelExecTimeoutEnabled ? "Yes" : "No"));
return;
}
void checkDevices(int cardNum)
{
std::cout << "Checking devices ...\n";
int numDevices;
cudasafe( hipGetDeviceCount(&numDevices), "hipGetDeviceCount" );
std::cout << "Number of devices: " << numDevices << std::endl;
std::cout << "Setting devices ..." << std::endl;
//cudasafe( hipSetValidDevices(NULL, 0), "hipSetValidDevices");
cudasafe( hipSetDevice(cardNum), "hipSetDevice");
cudasafe(hipFree(0), "hipFree");
int device;
cudasafe( hipGetDevice(&device), "hipGetDevice");
std::cout << "Selected device: " << device << std::endl;
size_t free_byte ;
size_t total_byte ;
cudasafe( hipMemGetInfo( &free_byte, &total_byte ), "hipMemGetInfo" ) ;
double free_db = (double)free_byte ;
double total_db = (double)total_byte ;
double used_db = total_db - free_db ;
std::cout << "GPU memory usage: used = " << used_db/1024.0/1024.0 << " MB, free = " << free_db/1024.0/1024.0 << " MB, total = " << total_db/1024.0/1024.0 << " MB" << std::endl;
hipDeviceProp_t devProp;
cudasafe( hipGetDeviceProperties(&devProp, device), "hipGetDeviceProperties" );
printDevProp(devProp);
}
void cudasafe (hipError_t error, const char* message)
{
if(error!=hipSuccess)
{
std::cout << "ERROR: " << message << ", " << error << std::endl;
exit(-1);
}
}
| 209c91f0cdf65e5929dce5bf6d5b8624da215e12.cu | #include "getCUDA.h"
#include <iostream>
#include <stdio.h>
#include "checkDevices.h"
// Print device properties
void printDevProp(cudaDeviceProp devProp)
{
printf("Major revision number: %d\n", devProp.major);
printf("Minor revision number: %d\n", devProp.minor);
printf("Name: %s\n", devProp.name);
printf("Total global memory: %lu\n", devProp.totalGlobalMem);
printf("Total shared memory per block: %lu\n", devProp.sharedMemPerBlock);
printf("Total registers per block: %d\n", devProp.regsPerBlock);
printf("Warp size: %d\n", devProp.warpSize);
printf("Maximum memory pitch: %lu\n", devProp.memPitch);
printf("Maximum threads per block: %d\n", devProp.maxThreadsPerBlock);
for (int i = 0; i < 3; ++i)
printf("Maximum dimension %d of block: %d\n", i, devProp.maxThreadsDim[i]);
for (int i = 0; i < 3; ++i)
printf("Maximum dimension %d of grid: %d\n", i, devProp.maxGridSize[i]);
printf("Clock rate: %d\n", devProp.clockRate);
printf("Total constant memory: %lu\n", devProp.totalConstMem);
printf("Texture alignment: %lu\n", devProp.textureAlignment);
printf("Concurrent copy and execution: %s\n", (devProp.deviceOverlap ? "Yes" : "No"));
printf("Number of multiprocessors: %d\n", devProp.multiProcessorCount);
printf("Kernel execution timeout: %s\n", (devProp.kernelExecTimeoutEnabled ? "Yes" : "No"));
return;
}
void checkDevices(int cardNum)
{
std::cout << "Checking devices ...\n";
int numDevices;
cudasafe( cudaGetDeviceCount(&numDevices), "cudaGetDeviceCount" );
std::cout << "Number of devices: " << numDevices << std::endl;
std::cout << "Setting devices ..." << std::endl;
//cudasafe( cudaSetValidDevices(NULL, 0), "cudaSetValidDevices");
cudasafe( cudaSetDevice(cardNum), "cudaSetDevice");
cudasafe(cudaFree(0), "cudaFree");
int device;
cudasafe( cudaGetDevice(&device), "cudaGetDevice");
std::cout << "Selected device: " << device << std::endl;
size_t free_byte ;
size_t total_byte ;
cudasafe( cudaMemGetInfo( &free_byte, &total_byte ), "cudaMemGetInfo" ) ;
double free_db = (double)free_byte ;
double total_db = (double)total_byte ;
double used_db = total_db - free_db ;
std::cout << "GPU memory usage: used = " << used_db/1024.0/1024.0 << " MB, free = " << free_db/1024.0/1024.0 << " MB, total = " << total_db/1024.0/1024.0 << " MB" << std::endl;
cudaDeviceProp devProp;
cudasafe( cudaGetDeviceProperties(&devProp, device), "cudaGetDeviceProperties" );
printDevProp(devProp);
}
void cudasafe (cudaError_t error, const char* message)
{
if(error!=cudaSuccess)
{
std::cout << "ERROR: " << message << ", " << error << std::endl;
exit(-1);
}
}
|
3b0d4b5db8472bf684d9eaf92d1263c6350582cc.hip | // !!! This is a file automatically generated by hipify!!!
// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "./abstract_context.h"
namespace paddle {
namespace mpc {
hipStream_t AbstractContext::_s_stream = NULL;
void AbstractContext::set_random_seed(const block &seed, size_t idx) {
PADDLE_ENFORCE_LE(idx, _num_party,
"prng idx should be less and equal to %d.",
_num_party);
get_prng(idx).set_seed(&seed, sizeof(seed));
}
} // namespace mpc
} //namespace paddle
| 3b0d4b5db8472bf684d9eaf92d1263c6350582cc.cu | // Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "./abstract_context.h"
namespace paddle {
namespace mpc {
cudaStream_t AbstractContext::_s_stream = NULL;
void AbstractContext::set_random_seed(const block &seed, size_t idx) {
PADDLE_ENFORCE_LE(idx, _num_party,
"prng idx should be less and equal to %d.",
_num_party);
get_prng(idx).set_seed(&seed, sizeof(seed));
}
} // namespace mpc
} //namespace paddle
|
a1cce20af9265f545562748931266fcc9a95c5c6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void gpu_stencil37_hack2_cp_cols(double * dst, double * shared_rows, double *shared_cols,double *shared_slices,int d_xpitch, int d_ypitch, int d_zpitch,int s_xpitch,int s_ypitch, int s_zpitch, int n_rows, int n_cols,int n_slices,int tile_x,int tile_y, int tile_z){
#ifdef CUDA_DARTS_DEBUG
if((blockIdx.x==0)&&(blockIdx.y==0)&&(blockIdx.z==0)&&(threadIdx.y==0)&& threadIdx.x==0 && threadIdx.z==0){
printf("copy cols: begin\n");
printf("copy cols: n_cols=%d,n_rows=%d,n_slices=%d\n",n_cols,n_rows,n_slices);
printf("copy cols: gridDim.x=%d,gridDim.y=%d,gridDim.z=%d\n",gridDim.x,gridDim.y,gridDim.z);
printf("copy cols: blockDim.x=%d,blockDim.y=%d,blockDim.z=%d\n",blockDim.x,blockDim.y,blockDim.z);
printf("copy cols: tile_x=%d,tile_y=%d,tile_z=%d\n",tile_x,tile_y,tile_z);
}
#endif
int base_global_slice = tile_z * blockIdx.z;
int base_global_row = blockDim.y * blockIdx.y;
int base_global_col = tile_x * blockIdx.x;
//int dst_area = n_rows*n_cols;
//int shared_area = gridDim.x*n_rows*2;
int dst_area = d_ypitch*d_xpitch;
int shared_area = gridDim.x*s_ypitch*2;
#ifdef CUDA_CUDA_DEBUG
if((blockIdx.x==0)&&(blockIdx.y==0)&&(blockIdx.z==0)&&(threadIdx.y==0&&threadIdx.x==0&&threadIdx.z==0)){
printf("copy cols: shared_area=%d\n",shared_area);
}
#endif
//int base_global_idx = base_global_slice*dst_area + base_global_row * n_cols + base_global_col;
int base_global_idx = base_global_slice*dst_area + base_global_row * d_xpitch + base_global_col;
int nextCol= base_global_col+1;
bool legalNextCol = (nextCol<n_cols)?1:0;
int ty = threadIdx.y;
bool legalCurRow = (base_global_row + ty)<n_rows;
for(int tz=0;tz<tile_z;++tz){
bool legalCurSlice = (base_global_slice + tz)<n_slices;
//int idx = (base_global_slice+tz)*shared_area + blockIdx.x*2*n_rows+blockIdx.y*blockDim.y+ty;
//int idx_dst =base_global_idx + tz*dst_area + ty*n_cols ;
int idx = (base_global_slice+tz)*shared_area + blockIdx.x*2*s_ypitch+blockIdx.y*blockDim.y+ty;
int idx_dst =base_global_idx + tz*dst_area + ty*d_xpitch ;
if(legalCurRow && legalCurSlice){
shared_cols[idx] = dst[idx_dst];
}
if(legalCurRow && legalCurSlice && legalNextCol){
//shared_cols[idx + n_rows] = dst[idx_dst + 1];
shared_cols[idx + s_ypitch] = dst[idx_dst + 1];
}
__syncthreads();
}
__syncthreads();
#ifdef CUDA_CUDA_DEBUG
if(blockIdx.z ==0 && blockIdx.y==0 && blockIdx.x==0 && (threadIdx.x==0)){
// printf("shared_cols: addr:%d, val = %f\n", threadIdx.y,shared_cols[threadIdx.y]);
}
if(blockIdx.y==0 && blockIdx.x==0 &&blockIdx.z==0 ){
if((threadIdx.x==0 || threadIdx.x==1 || threadIdx.x==2 ) && threadIdx.y==0){
int d_addr0 = base_global_idx+0*dst_area+threadIdx.x;
int d_addr1 = base_global_idx+1*dst_area+threadIdx.x;
int addr = base_global_slice+blockIdx.x*blockDim.x + threadIdx.x;
int addr1 = shared_area*(base_global_slice+1)+blockIdx.x*blockDim.x+ threadIdx.x;
int addr2 = shared_area*(base_global_slice+2)+blockIdx.x*blockDim.x+ threadIdx.x;
printf("copy cols: blockIdx.x=%d, blockIdx.y=%d,blockIdx.z=%d,dst : z:%d, addr:%d, val = %f\n",blockIdx.x, blockIdx.y,blockIdx.z,0,d_addr0,dst[d_addr0]);
printf("copy cols: blockIdx.x=%d, blockIdx.y=%d,blockIdx.z=%d,dst : z:%d, addr:%d, val = %f\n",blockIdx.x, blockIdx.y,blockIdx.z,1,d_addr1,dst[d_addr1]);
printf("copy cols: blockIdx.x=%d, blockIdx.y=%d,blockIdx.z=%d,shared_cols: z:%d, addr:%d, val = %f\n",blockIdx.x, blockIdx.y,blockIdx.z,0,addr,shared_cols[addr]);
printf("copy cols: blockIdx.x=%d, blockIdx.y=%d,blockIdx.z=%d,shared_cols: z:%d, addr:%d, val = %f\n",blockIdx.x, blockIdx.y,blockIdx.z,1,addr1,shared_cols[addr1]);
printf("copy cols: blockIdx.x=%d, blockIdx.y=%d,blockIdx.z=%d,shared_cols: z:%d, addr:%d, val = %f\n",blockIdx.x, blockIdx.y,blockIdx.z,2,addr2,shared_cols[addr2]);
}
}
#endif
#ifdef CUDA_DARTS_DEBUG
if((blockIdx.x==0)&&(blockIdx.y==0)&&(blockIdx.z==0)&&(threadIdx.y==0 && threadIdx.x==0 && threadIdx.z==0)){
printf("copy cols end!\n");
}
#endif
} | a1cce20af9265f545562748931266fcc9a95c5c6.cu | #include "includes.h"
__global__ void gpu_stencil37_hack2_cp_cols(double * dst, double * shared_rows, double *shared_cols,double *shared_slices,int d_xpitch, int d_ypitch, int d_zpitch,int s_xpitch,int s_ypitch, int s_zpitch, int n_rows, int n_cols,int n_slices,int tile_x,int tile_y, int tile_z){
#ifdef CUDA_DARTS_DEBUG
if((blockIdx.x==0)&&(blockIdx.y==0)&&(blockIdx.z==0)&&(threadIdx.y==0)&& threadIdx.x==0 && threadIdx.z==0){
printf("copy cols: begin\n");
printf("copy cols: n_cols=%d,n_rows=%d,n_slices=%d\n",n_cols,n_rows,n_slices);
printf("copy cols: gridDim.x=%d,gridDim.y=%d,gridDim.z=%d\n",gridDim.x,gridDim.y,gridDim.z);
printf("copy cols: blockDim.x=%d,blockDim.y=%d,blockDim.z=%d\n",blockDim.x,blockDim.y,blockDim.z);
printf("copy cols: tile_x=%d,tile_y=%d,tile_z=%d\n",tile_x,tile_y,tile_z);
}
#endif
int base_global_slice = tile_z * blockIdx.z;
int base_global_row = blockDim.y * blockIdx.y;
int base_global_col = tile_x * blockIdx.x;
//int dst_area = n_rows*n_cols;
//int shared_area = gridDim.x*n_rows*2;
int dst_area = d_ypitch*d_xpitch;
int shared_area = gridDim.x*s_ypitch*2;
#ifdef CUDA_CUDA_DEBUG
if((blockIdx.x==0)&&(blockIdx.y==0)&&(blockIdx.z==0)&&(threadIdx.y==0&&threadIdx.x==0&&threadIdx.z==0)){
printf("copy cols: shared_area=%d\n",shared_area);
}
#endif
//int base_global_idx = base_global_slice*dst_area + base_global_row * n_cols + base_global_col;
int base_global_idx = base_global_slice*dst_area + base_global_row * d_xpitch + base_global_col;
int nextCol= base_global_col+1;
bool legalNextCol = (nextCol<n_cols)?1:0;
int ty = threadIdx.y;
bool legalCurRow = (base_global_row + ty)<n_rows;
for(int tz=0;tz<tile_z;++tz){
bool legalCurSlice = (base_global_slice + tz)<n_slices;
//int idx = (base_global_slice+tz)*shared_area + blockIdx.x*2*n_rows+blockIdx.y*blockDim.y+ty;
//int idx_dst =base_global_idx + tz*dst_area + ty*n_cols ;
int idx = (base_global_slice+tz)*shared_area + blockIdx.x*2*s_ypitch+blockIdx.y*blockDim.y+ty;
int idx_dst =base_global_idx + tz*dst_area + ty*d_xpitch ;
if(legalCurRow && legalCurSlice){
shared_cols[idx] = dst[idx_dst];
}
if(legalCurRow && legalCurSlice && legalNextCol){
//shared_cols[idx + n_rows] = dst[idx_dst + 1];
shared_cols[idx + s_ypitch] = dst[idx_dst + 1];
}
__syncthreads();
}
__syncthreads();
#ifdef CUDA_CUDA_DEBUG
if(blockIdx.z ==0 && blockIdx.y==0 && blockIdx.x==0 && (threadIdx.x==0)){
// printf("shared_cols: addr:%d, val = %f\n", threadIdx.y,shared_cols[threadIdx.y]);
}
if(blockIdx.y==0 && blockIdx.x==0 &&blockIdx.z==0 ){
if((threadIdx.x==0 || threadIdx.x==1 || threadIdx.x==2 ) && threadIdx.y==0){
int d_addr0 = base_global_idx+0*dst_area+threadIdx.x;
int d_addr1 = base_global_idx+1*dst_area+threadIdx.x;
int addr = base_global_slice+blockIdx.x*blockDim.x + threadIdx.x;
int addr1 = shared_area*(base_global_slice+1)+blockIdx.x*blockDim.x+ threadIdx.x;
int addr2 = shared_area*(base_global_slice+2)+blockIdx.x*blockDim.x+ threadIdx.x;
printf("copy cols: blockIdx.x=%d, blockIdx.y=%d,blockIdx.z=%d,dst : z:%d, addr:%d, val = %f\n",blockIdx.x, blockIdx.y,blockIdx.z,0,d_addr0,dst[d_addr0]);
printf("copy cols: blockIdx.x=%d, blockIdx.y=%d,blockIdx.z=%d,dst : z:%d, addr:%d, val = %f\n",blockIdx.x, blockIdx.y,blockIdx.z,1,d_addr1,dst[d_addr1]);
printf("copy cols: blockIdx.x=%d, blockIdx.y=%d,blockIdx.z=%d,shared_cols: z:%d, addr:%d, val = %f\n",blockIdx.x, blockIdx.y,blockIdx.z,0,addr,shared_cols[addr]);
printf("copy cols: blockIdx.x=%d, blockIdx.y=%d,blockIdx.z=%d,shared_cols: z:%d, addr:%d, val = %f\n",blockIdx.x, blockIdx.y,blockIdx.z,1,addr1,shared_cols[addr1]);
printf("copy cols: blockIdx.x=%d, blockIdx.y=%d,blockIdx.z=%d,shared_cols: z:%d, addr:%d, val = %f\n",blockIdx.x, blockIdx.y,blockIdx.z,2,addr2,shared_cols[addr2]);
}
}
#endif
#ifdef CUDA_DARTS_DEBUG
if((blockIdx.x==0)&&(blockIdx.y==0)&&(blockIdx.z==0)&&(threadIdx.y==0 && threadIdx.x==0 && threadIdx.z==0)){
printf("copy cols end!\n");
}
#endif
} |
f4fbf0d6872a6a8606bb405b12ab7300b363f499.hip | // !!! This is a file automatically generated by hipify!!!
/******************************************************************************
*cr
*cr (C) Copyright 2010-2013 The Board of Trustees of the
*cr University of Illinois
*cr All Rights Reserved
*cr
******************************************************************************/
#include <stdio.h>
#include <stdlib.h>
#include "kernel.hip"
#include "support.h"
#include "timer.hip"
int main (int argc, char *argv[])
{
Timer timer;
hipError_t cuda_ret;
// Initialize host variables ----------------------------------------------
printf("\nSetting up the problem..."); fflush(stdout);
startTime(&timer);
float *A_h, *B_h, *C_h;
float *A_d, *B_d, *C_d;
size_t A_sz, B_sz, C_sz;
unsigned matArow, matAcol;
unsigned matBrow, matBcol;
dim3 dim_grid, dim_block;
if (argc == 1) {
matArow = 1000;
matAcol = matBrow = 1500;
matBcol = 1300;
} else if (argc == 2) {
matArow = atoi(argv[1]);
matAcol = matBrow = atoi(argv[1]);
matBcol = atoi(argv[1]);
} else if (argc == 4) {
matArow = atoi(argv[1]);
matAcol = matBrow = atoi(argv[2]);
matBcol = atoi(argv[3]);
} else {
printf("\n Invalid input parameters!"
"\n Usage: ./sgemm-tiled # All matrices are 1000 x 1000"
"\n Usage: ./sgemm-tiled <m> # All matrices are m x m"
"\n Usage: ./sgemm-tiled <m> <k> <n> # A: m x k, B: k x n, C: m x n"
"\n");
exit(0);
}
A_sz = matArow*matAcol;
B_sz = matBrow*matBcol;
C_sz = matArow*matBcol;
A_h = (float*) malloc( sizeof(float)*A_sz );
for (unsigned int i=0; i < A_sz; i++) { A_h[i] = (rand()%100)/100.00; }
B_h = (float*) malloc( sizeof(float)*B_sz );
for (unsigned int i=0; i < B_sz; i++) { B_h[i] = (rand()%100)/100.00; }
C_h = (float*) malloc( sizeof(float)*C_sz );
stopTime(&timer); printf("%f s\n", elapsedTime(timer));
printf(" A: %u x %u\n B: %u x %u\n C: %u x %u\n", matArow, matAcol,
matBrow, matBcol, matArow, matBcol);
// Allocate device variables ----------------------------------------------
printf("Allocating device variables..."); fflush(stdout);
startTime(&timer);
//INSERT CODE HERE
hipDeviceSynchronize();
stopTime(&timer); printf("%f s\n", elapsedTime(timer));
// Copy host variables to device ------------------------------------------
printf("Copying data from host to device..."); fflush(stdout);
startTime(&timer);
//INSERT CODE HERE
hipDeviceSynchronize();
stopTime(&timer); printf("%f s\n", elapsedTime(timer));
// Launch kernel using standard sgemm interface ---------------------------
kernelTimer(matArow, matBcol, matBrow, A_d, B_d, C_d);
// Copy device variables from host ----------------------------------------
printf("Copying data from device to host..."); fflush(stdout);
startTime(&timer);
//INSERT CODE HERE
hipDeviceSynchronize();
stopTime(&timer); printf("%f s\n", elapsedTime(timer));
// Verify correctness -----------------------------------------------------
printf("Verifying results..."); fflush(stdout);
verify(A_h, B_h, C_h, matArow, matAcol, matBcol);
// Free memory ------------------------------------------------------------
free(A_h);
free(B_h);
free(C_h);
//INSERT CODE HERE
return 0;
}
| f4fbf0d6872a6a8606bb405b12ab7300b363f499.cu | /******************************************************************************
*cr
*cr (C) Copyright 2010-2013 The Board of Trustees of the
*cr University of Illinois
*cr All Rights Reserved
*cr
******************************************************************************/
#include <stdio.h>
#include <stdlib.h>
#include "kernel.cu"
#include "support.h"
#include "timer.cu"
int main (int argc, char *argv[])
{
Timer timer;
cudaError_t cuda_ret;
// Initialize host variables ----------------------------------------------
printf("\nSetting up the problem..."); fflush(stdout);
startTime(&timer);
float *A_h, *B_h, *C_h;
float *A_d, *B_d, *C_d;
size_t A_sz, B_sz, C_sz;
unsigned matArow, matAcol;
unsigned matBrow, matBcol;
dim3 dim_grid, dim_block;
if (argc == 1) {
matArow = 1000;
matAcol = matBrow = 1500;
matBcol = 1300;
} else if (argc == 2) {
matArow = atoi(argv[1]);
matAcol = matBrow = atoi(argv[1]);
matBcol = atoi(argv[1]);
} else if (argc == 4) {
matArow = atoi(argv[1]);
matAcol = matBrow = atoi(argv[2]);
matBcol = atoi(argv[3]);
} else {
printf("\n Invalid input parameters!"
"\n Usage: ./sgemm-tiled # All matrices are 1000 x 1000"
"\n Usage: ./sgemm-tiled <m> # All matrices are m x m"
"\n Usage: ./sgemm-tiled <m> <k> <n> # A: m x k, B: k x n, C: m x n"
"\n");
exit(0);
}
A_sz = matArow*matAcol;
B_sz = matBrow*matBcol;
C_sz = matArow*matBcol;
A_h = (float*) malloc( sizeof(float)*A_sz );
for (unsigned int i=0; i < A_sz; i++) { A_h[i] = (rand()%100)/100.00; }
B_h = (float*) malloc( sizeof(float)*B_sz );
for (unsigned int i=0; i < B_sz; i++) { B_h[i] = (rand()%100)/100.00; }
C_h = (float*) malloc( sizeof(float)*C_sz );
stopTime(&timer); printf("%f s\n", elapsedTime(timer));
printf(" A: %u x %u\n B: %u x %u\n C: %u x %u\n", matArow, matAcol,
matBrow, matBcol, matArow, matBcol);
// Allocate device variables ----------------------------------------------
printf("Allocating device variables..."); fflush(stdout);
startTime(&timer);
//INSERT CODE HERE
cudaDeviceSynchronize();
stopTime(&timer); printf("%f s\n", elapsedTime(timer));
// Copy host variables to device ------------------------------------------
printf("Copying data from host to device..."); fflush(stdout);
startTime(&timer);
//INSERT CODE HERE
cudaDeviceSynchronize();
stopTime(&timer); printf("%f s\n", elapsedTime(timer));
// Launch kernel using standard sgemm interface ---------------------------
kernelTimer(matArow, matBcol, matBrow, A_d, B_d, C_d);
// Copy device variables from host ----------------------------------------
printf("Copying data from device to host..."); fflush(stdout);
startTime(&timer);
//INSERT CODE HERE
cudaDeviceSynchronize();
stopTime(&timer); printf("%f s\n", elapsedTime(timer));
// Verify correctness -----------------------------------------------------
printf("Verifying results..."); fflush(stdout);
verify(A_h, B_h, C_h, matArow, matAcol, matBcol);
// Free memory ------------------------------------------------------------
free(A_h);
free(B_h);
free(C_h);
//INSERT CODE HERE
return 0;
}
|
7763867a2061a80afbaabcd7198175219ffd5c80.hip | // !!! This is a file automatically generated by hipify!!!
/**CUDA
*
* 0 N block block 1 thread
* 1 1 block block N thread
* 2 N/2 block block N/2 thread
*
* The number of blocks and threads have been limitted for each hardware GPU.
* maxThreadPerBlock block RTX 2060 = 1024 5121024.
* 2^31 - 1 = 2,147,483,647.
* gpu_add << <((N + 511) / 512), 512>> > (device_a, device_b, device_c);
* gpu_add << <((N + 1023) / 1024), 1024>> > (device_a, device_b, device_c);
* Tips
*
* x y z block_number = 65535.
*/
#include <stdio.h>
#include <iostream>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <time.h>
// Defining the number of elements in array.
#define N 100000000
/**Compute the unique ID of thread.
* Formula: unsigned int tid = threadIdx.x + blockIdx.x * blockDim.x;
*
* threadIdx.x ID
* blockIdx.x ID
* blockDim.x
* block thread grid
* blockIdx.x * blockDim.x threadIdx.x ID
*
* tid x blockDim.x * gridDim.x
* blockDim.x x gridDim.x x
* tid /
*/
// Define the kernel function.
__global__ void gpu_add(int *device_a, int *device_b, int *device_c)
{
// Getting the index of current kernel.
unsigned int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < N)
{
device_c[tid] = device_b[tid] + device_a[tid];
tid += blockDim.x * gridDim.x;
}
}
int main(int argc, char **argv)
{
// Declare host and decive arrays.
int *host_a, *host_b, *host_c;
int *device_a, *device_b, *device_c;
// Allocate memory on host and device.
host_a = (int*)malloc(N * sizeof(int));
host_b = (int*)malloc(N * sizeof(int));
host_c = (int*)malloc(N * sizeof(int));
hipMalloc((void**)&device_a, N * sizeof(int));
hipMalloc((void**)&device_b, N * sizeof(int));
hipMalloc((void**)&device_c, N * sizeof(int));
// Initialize host arrays.
for (unsigned int i = 0; i < N; ++i)
{
host_a[i] = 2 * i * i;
host_b[i] = i;
}
// Copy data from host memory to device memory.
hipMemcpy(device_a, host_a, N * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(device_b, host_b, N * sizeof(int), hipMemcpyHostToDevice);
// Kernel call
clock_t start_device_gpu = clock();
// gpu_add << <((N + 511) / 512), 512>> > (device_a, device_b, device_c);
gpu_add << <((N + 1023) / 1024), 1024>> > (device_a, device_b, device_c);
// Copy data from device memory to host memory.
hipMemcpy(host_c, device_c, N * sizeof(int), hipMemcpyDeviceToHost);
// This ensures that kernel execution is finishes before going forward.
// << UNIX >> sync
hipDeviceSynchronize();
clock_t end_device_gpu = clock();
// Cost compute time on GPU.
double time_device_gpu = (double)(end_device_gpu - start_device_gpu) / CLOCKS_PER_SEC;
//
int correct = 1;
printf("Vector addition on GPU.\n");
for (unsigned int i = 0; i < N; ++i)
{
if ((host_a[i] + host_b[i] != host_c[i]))
{
correct = 0;
}
}
if (correct == 1)
{
printf("GPU has computed vector sum correctly.\n");
}
else
{
printf("There is an error in GPU computation.\n");
}
// GPU
// GPU GPU
std::cout << N << " of elements in array." << std::endl;
std::cout << "Device GPU time: " << time_device_gpu << std::endl;
// Free up the host memory and device memory.
hipFree(device_a);
hipFree(device_b);
hipFree(device_c);
free(host_a);
free(host_b);
free(host_c);
return 0;
}
| 7763867a2061a80afbaabcd7198175219ffd5c80.cu | /**CUDA 并行执行具有分层结构
* 即每次内核启动可以被切分为多个并行执行的块,同时每一个块可以被切分为多个并行执行的线程。
* 0、启动 N 个 block,每一个 block 只有 1 个 thread。
* 1、启动 1 个 block,每一个 block 中有 N 个 thread。
* 2、启动 N/2 个 block,每个 block 中有 N/2 个 thread。
*
* The number of blocks and threads have been limitted for each hardware GPU.
* 设备的 maxThreadPerBlock 属性限制每个 block 能启动的最大线程数量 RTX 2060 = 1024,常设 512、1024.
* 设备的最大能启动的块数量被限制为 2^31 - 1 = 2,147,483,647.
* gpu_add << <((N + 511) / 512), 512>> > (device_a, device_b, device_c);
* gpu_add << <((N + 1023) / 1024), 1024>> > (device_a, device_b, device_c);
* 技巧Tips:除法向上取整操作。使用取模操作一样能够达到相同的效果。
*
* 注意 x 维度限制计算是以上的情况,而对于 y 和 z 维度方向的计算限制 block_number = 65535.
*/
#include <stdio.h>
#include <iostream>
#include <cuda.h>
#include <cuda_runtime.h>
#include <time.h>
// Defining the number of elements in array.
#define N 100000000
/**Compute the unique ID of thread.
* Formula: unsigned int tid = threadIdx.x + blockIdx.x * blockDim.x;
* 解析:
* threadIdx.x 命令得到该线程在该块中的 ID 值;
* blockIdx.x 命令得到任意线程在当前块 ID 值;
* blockDim.x 命令得到该块中的线程总数;
* 由于 block 和 thread 就组合就类似与一个二维矩阵形式,称之为 grid
* 这样 blockIdx.x * blockDim.x 结果相当于一个偏移量,再加上 threadIdx.x 结果就是在二维矩阵中的唯一索引总 ID。
*
* tid 每次更新在 x 维度方向,blockDim.x * gridDim.x 结果就类似计算二维矩阵的行数和列数相乘,即已经启动的线程总数。
* blockDim.x 表示 x 维度方向上块中的线程数量;gridDim.x 表示 x 维度方向上启动的块的数量。
* tid 每次更新都加上这样一个偏移量值,则就是下一个任务/进程的索引。
*/
// Define the kernel function.
__global__ void gpu_add(int *device_a, int *device_b, int *device_c)
{
// Getting the index of current kernel.
unsigned int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < N)
{
device_c[tid] = device_b[tid] + device_a[tid];
tid += blockDim.x * gridDim.x;
}
}
int main(int argc, char **argv)
{
// Declare host and decive arrays.
int *host_a, *host_b, *host_c;
int *device_a, *device_b, *device_c;
// Allocate memory on host and device.
host_a = (int*)malloc(N * sizeof(int));
host_b = (int*)malloc(N * sizeof(int));
host_c = (int*)malloc(N * sizeof(int));
cudaMalloc((void**)&device_a, N * sizeof(int));
cudaMalloc((void**)&device_b, N * sizeof(int));
cudaMalloc((void**)&device_c, N * sizeof(int));
// Initialize host arrays.
for (unsigned int i = 0; i < N; ++i)
{
host_a[i] = 2 * i * i;
host_b[i] = i;
}
// Copy data from host memory to device memory.
cudaMemcpy(device_a, host_a, N * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(device_b, host_b, N * sizeof(int), cudaMemcpyHostToDevice);
// Kernel call
clock_t start_device_gpu = clock();
// gpu_add << <((N + 511) / 512), 512>> > (device_a, device_b, device_c);
gpu_add << <((N + 1023) / 1024), 1024>> > (device_a, device_b, device_c);
// Copy data from device memory to host memory.
cudaMemcpy(host_c, device_c, N * sizeof(int), cudaMemcpyDeviceToHost);
// This ensures that kernel execution is finishes before going forward.
// << UNIX 环境高级编程 >>第三版书籍中有介绍 sync 这种操作。
cudaDeviceSynchronize();
clock_t end_device_gpu = clock();
// Cost compute time on GPU.
double time_device_gpu = (double)(end_device_gpu - start_device_gpu) / CLOCKS_PER_SEC;
// 测试计算结果是否正确。
int correct = 1;
printf("Vector addition on GPU.\n");
for (unsigned int i = 0; i < N; ++i)
{
if ((host_a[i] + host_b[i] != host_c[i]))
{
correct = 0;
}
}
if (correct == 1)
{
printf("GPU has computed vector sum correctly.\n");
}
else
{
printf("There is an error in GPU computation.\n");
}
// 当然这样方式计算时间并不能完全体现 GPU 性能,
// 因为 GPU 启动需要预热时间,一般都有一个预热测试程序来启动GPU。
std::cout << N << " of elements in array." << std::endl;
std::cout << "Device GPU time: " << time_device_gpu << std::endl;
// Free up the host memory and device memory.
cudaFree(device_a);
cudaFree(device_b);
cudaFree(device_c);
free(host_a);
free(host_b);
free(host_c);
return 0;
}
|
771dd87b0a74a3d8f4b6d01ac02566f0aabff9ca.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<iostream>
#include<math.h>
#include<stdint.h>
#include<stdlib.h>
#define N 16
#define M 16
/*
each thread of this function handles one of the 25 convolution procedures
necessary for a 5x5 box filter(aka kernel). 25 pixels from the original
image must each be matched to one of the 25 values from the box filter,
and it must be done through use of their individual threadId.
*/
__global__
void convolve(uint8_t input[N][M],
int *numer,
int* denom,
int* kernel,
int i,
int j)
{
int tx = threadIdx.x;
int ty = threadIdx.y;
//location in box kernel to be convolved by this thread
int kpos = tx + ty * 5;
//pixel location in the input matrix, (x, y)
int x = i + tx - 2;
int y = j + ty - 2;
/*
We now know which location from the kernel matches which pixel
from the image, but before we continue we must account for
the bounds of the input matrix. Depending on the pixel being
sampled from the original image at (i, j), we may not be able
to make use of the entire kernel. Some threads may try to
access out of bounds when (i, j) lies close to the border. In
this case we only use the threads that lie within the bounds
of the image. Our image is of size NxM so:
0 <= x < N
0 <= y < M
*/
if (x>=0 && y>=0 && x<N && y<M)
{
/*
The convolution procedure is to average the pixel values
from the original image with some being weighted more than
others. 25 pixels in the original image are weighted by
a factor equal to its corresponding value in the kernel.
Then, all these weighted values are accumulated and divided
by the total weight of the kernel. It would be pointless
for each and every thread to perform the division (as it
would be exactly the same every time), so we only go as
far as accumulating the weighted values and kernel values
in global memory. atomicAdd prevents the accumulation from
writing over itself.
*/
int weightedVal = kernel[kpos] * int(input[x][y]);
int kVal = kernel[kpos];
atomicAdd(numer, weightedVal);
atomicAdd(denom, kVal);
}
}
void gauss(uint8_t input[N][M], uint8_t output[N][M])
{
/*
First I declare and allocate global space for our box filter.
I will be using a Gaussian filter, which is a bell curve
with greater values in the middle. Using this filter for
such a convolution is called a gaussian blur and has several
applications; I am familiar with it from scaling images and
feature extraction algorithms such as SIFT. Gaussian filters
of different sizes and distributions may be employed here,
and generating them would be a significant upgrade over my
hardcoding of the standard 5x5 gaussian filter.
*/
int* kernel;
hipMallocManaged(&kernel, sizeof(int) * 25);
int dummy[25] = { 1, 4, 7, 4, 1,
4,16,26,16, 4,
7,26,41,26, 7,
4,16,26,16, 4,
1, 4, 7, 4, 1 };
for (int i=0; i<25; i++)
kernel[i] = dummy[i];
//accumulators which our convolve function requires
int *numer;
int *denom;
hipMallocManaged(&numer, sizeof(int));
hipMallocManaged(&denom, sizeof(int));
/*
Before I can call convolve I must define the dimensions of the
block. A block is a collection of threads to be run together in
parallel, and I have decided each block will handle the gaussian
of each pixel. That means we need 25 threads per block, which
can be arranged in a 5x5 formation to better align with the 5x5
kernel.
*/
dim3 blockSize(5,5);
/*
(i, j) represents the coordinates of the pixel we're performing
a gaussian blur on. the following nested loops iterate through
every pixel of the input image matrix.
*/
for (int j = 0; j<N; j++)
{
for (int i = 0; i<M; i++)
{
//explained in convolution procedure
*numer = 0;
*denom = 0;
hipLaunchKernelGGL(( convolve), dim3(1),dim3(blockSize), 0, 0, input, numer, denom, kernel, i, j);
hipDeviceSynchronize();
//could this be parallelized as well? is it worth it?
output[i][j] = uint8_t((*numer) / (*denom));
}
}
hipFree(kernel);
hipFree(numer);
hipFree(denom);
}
/*
print function for the values of a matrix of unsigned 8 bit ints,
otherwise known as the data values of a greyscale image.
*/
void print(uint8_t image[N][M])
{
for (int i=0; i<N; i++)
{
for (int j=0; j<M; j++)
{
std::cout<< int(image[i][j]) << ",\t";
}
std::cout<< "\n";
}
}
int main()
{
srand(NULL);
uint8_t *image, blur[N][M];
hipMallocManaged(&image, N*M*sizeof(uint8_t));
for (int i = 0; i<N; i++)
for (int j = 0; j<M; j++)
reinterpret_cast<uint8_t (*)[M]>(image)[i][j] = rand()% 256;
/*
hipMallocManaged has certain limitations when it comes to 2D arrays
so image has been allocated as a 1D array and then cast to a 2D.
blur doesn't need to be allocated to global mem (doesn't run on device
code), so it's declared locally as a 2D array and passed as such.
*/
print(reinterpret_cast<uint8_t (*)[M]>(image));
gauss(reinterpret_cast<uint8_t (*)[M]>(image), blur);
std::cout<<"\n";
print(blur);
hipFree(image);
hipFree(blur);
return 0;
}
| 771dd87b0a74a3d8f4b6d01ac02566f0aabff9ca.cu | #include<iostream>
#include<math.h>
#include<stdint.h>
#include<stdlib.h>
#define N 16
#define M 16
/*
each thread of this function handles one of the 25 convolution procedures
necessary for a 5x5 box filter(aka kernel). 25 pixels from the original
image must each be matched to one of the 25 values from the box filter,
and it must be done through use of their individual threadId.
*/
__global__
void convolve(uint8_t input[N][M],
int *numer,
int* denom,
int* kernel,
int i,
int j)
{
int tx = threadIdx.x;
int ty = threadIdx.y;
//location in box kernel to be convolved by this thread
int kpos = tx + ty * 5;
//pixel location in the input matrix, (x, y)
int x = i + tx - 2;
int y = j + ty - 2;
/*
We now know which location from the kernel matches which pixel
from the image, but before we continue we must account for
the bounds of the input matrix. Depending on the pixel being
sampled from the original image at (i, j), we may not be able
to make use of the entire kernel. Some threads may try to
access out of bounds when (i, j) lies close to the border. In
this case we only use the threads that lie within the bounds
of the image. Our image is of size NxM so:
0 <= x < N
0 <= y < M
*/
if (x>=0 && y>=0 && x<N && y<M)
{
/*
The convolution procedure is to average the pixel values
from the original image with some being weighted more than
others. 25 pixels in the original image are weighted by
a factor equal to its corresponding value in the kernel.
Then, all these weighted values are accumulated and divided
by the total weight of the kernel. It would be pointless
for each and every thread to perform the division (as it
would be exactly the same every time), so we only go as
far as accumulating the weighted values and kernel values
in global memory. atomicAdd prevents the accumulation from
writing over itself.
*/
int weightedVal = kernel[kpos] * int(input[x][y]);
int kVal = kernel[kpos];
atomicAdd(numer, weightedVal);
atomicAdd(denom, kVal);
}
}
void gauss(uint8_t input[N][M], uint8_t output[N][M])
{
/*
First I declare and allocate global space for our box filter.
I will be using a Gaussian filter, which is a bell curve
with greater values in the middle. Using this filter for
such a convolution is called a gaussian blur and has several
applications; I am familiar with it from scaling images and
feature extraction algorithms such as SIFT. Gaussian filters
of different sizes and distributions may be employed here,
and generating them would be a significant upgrade over my
hardcoding of the standard 5x5 gaussian filter.
*/
int* kernel;
cudaMallocManaged(&kernel, sizeof(int) * 25);
int dummy[25] = { 1, 4, 7, 4, 1,
4,16,26,16, 4,
7,26,41,26, 7,
4,16,26,16, 4,
1, 4, 7, 4, 1 };
for (int i=0; i<25; i++)
kernel[i] = dummy[i];
//accumulators which our convolve function requires
int *numer;
int *denom;
cudaMallocManaged(&numer, sizeof(int));
cudaMallocManaged(&denom, sizeof(int));
/*
Before I can call convolve I must define the dimensions of the
block. A block is a collection of threads to be run together in
parallel, and I have decided each block will handle the gaussian
of each pixel. That means we need 25 threads per block, which
can be arranged in a 5x5 formation to better align with the 5x5
kernel.
*/
dim3 blockSize(5,5);
/*
(i, j) represents the coordinates of the pixel we're performing
a gaussian blur on. the following nested loops iterate through
every pixel of the input image matrix.
*/
for (int j = 0; j<N; j++)
{
for (int i = 0; i<M; i++)
{
//explained in convolution procedure
*numer = 0;
*denom = 0;
convolve<<<1,blockSize>>>(input, numer, denom, kernel, i, j);
cudaDeviceSynchronize();
//could this be parallelized as well? is it worth it?
output[i][j] = uint8_t((*numer) / (*denom));
}
}
cudaFree(kernel);
cudaFree(numer);
cudaFree(denom);
}
/*
print function for the values of a matrix of unsigned 8 bit ints,
otherwise known as the data values of a greyscale image.
*/
void print(uint8_t image[N][M])
{
for (int i=0; i<N; i++)
{
for (int j=0; j<M; j++)
{
std::cout<< int(image[i][j]) << ",\t";
}
std::cout<< "\n";
}
}
int main()
{
srand(NULL);
uint8_t *image, blur[N][M];
cudaMallocManaged(&image, N*M*sizeof(uint8_t));
for (int i = 0; i<N; i++)
for (int j = 0; j<M; j++)
reinterpret_cast<uint8_t (*)[M]>(image)[i][j] = rand()% 256;
/*
cudaMallocManaged has certain limitations when it comes to 2D arrays
so image has been allocated as a 1D array and then cast to a 2D.
blur doesn't need to be allocated to global mem (doesn't run on device
code), so it's declared locally as a 2D array and passed as such.
*/
print(reinterpret_cast<uint8_t (*)[M]>(image));
gauss(reinterpret_cast<uint8_t (*)[M]>(image), blur);
std::cout<<"\n";
print(blur);
cudaFree(image);
cudaFree(blur);
return 0;
}
|
35f84e9affb8d7989e6fc2870795734fc36cff57.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright 2017-2020 ABBYY Production LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
--------------------------------------------------------------------------------------------------------------*/
#include <NeoMathEngine/NeoMathEngineDefs.h>
#ifdef NEOML_USE_CUDA
#include <CudaMathEngine.h>
#include <CudaDevice.h>
#include <CudaCommon.h>
#include <MathEngineCommon.h>
#include <MathEngineDnnDropout.h>
#include <MemoryHandleInternal.h>
#include <Kernels/CudaDnnDropoutKernels.h>
namespace NeoML {
void CCudaMathEngine::Dropout( const CDropoutDesc& dropoutDesc, const CFloatHandle& inputData, const CFloatHandle& outputData )
{
ASSERT_EXPR( inputData.GetMathEngine() == this );
ASSERT_EXPR( outputData.GetMathEngine() == this );
SetCudaDevice( device->DeviceNumber );
const CMathEngineDropoutDesc& desc = static_cast<const CMathEngineDropoutDesc&>( dropoutDesc );
const CBlobDesc& input = desc.Input;
if( desc.ForwardRate == 1.f ) {
VectorCopy( outputData, inputData, input.BlobSize() );
return;
}
const int objectSize = desc.IsSpatial ? input.Channels() : input.ObjectSize();
const int batchLength = desc.IsBatchwise ? input.ObjectCount() : input.BatchLength();
const int batchWidth = input.ObjectCount() / batchLength;
const int maskSize = batchWidth * objectSize;
ASSERT_EXPR( desc.Mask.Size() == maskSize );
if( !desc.IsSpatial ) {
MultiplyMatrixByDiagMatrix( inputData, batchLength, maskSize, desc.Mask.GetHandle(),
outputData, desc.Output.BlobSize() );
return;
}
dim3 blockCount;
dim3 threadCount;
getCudaTaskGrid3D( blockCount, threadCount, input.ObjectCount(), input.ObjectSize() / objectSize,
objectSize );
hipLaunchKernelGGL(( ChannelLastBlobSpatialDropoutKernel), dim3(blockCount), dim3(threadCount), 0, 0, GetRaw( inputData ),
GetRaw( desc.Mask.GetHandle() ), GetRaw( outputData ), input.ObjectCount(), input.ObjectSize(),
batchWidth, objectSize );
}
CDropoutDesc* CCudaMathEngine::InitDropout( float rate, bool isSpatial, bool isBatchwise,
const CBlobDesc& input, const CBlobDesc& output, int seed )
{
return new CMathEngineDropoutDesc( mathEngine(), rate, isSpatial, isBatchwise, input, output, seed );
}
} // namespace NeoML
#endif // NEOML_USE_CUDA
| 35f84e9affb8d7989e6fc2870795734fc36cff57.cu | /* Copyright © 2017-2020 ABBYY Production LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
--------------------------------------------------------------------------------------------------------------*/
#include <NeoMathEngine/NeoMathEngineDefs.h>
#ifdef NEOML_USE_CUDA
#include <CudaMathEngine.h>
#include <CudaDevice.h>
#include <CudaCommon.h>
#include <MathEngineCommon.h>
#include <MathEngineDnnDropout.h>
#include <MemoryHandleInternal.h>
#include <Kernels/CudaDnnDropoutKernels.h>
namespace NeoML {
void CCudaMathEngine::Dropout( const CDropoutDesc& dropoutDesc, const CFloatHandle& inputData, const CFloatHandle& outputData )
{
ASSERT_EXPR( inputData.GetMathEngine() == this );
ASSERT_EXPR( outputData.GetMathEngine() == this );
SetCudaDevice( device->DeviceNumber );
const CMathEngineDropoutDesc& desc = static_cast<const CMathEngineDropoutDesc&>( dropoutDesc );
const CBlobDesc& input = desc.Input;
if( desc.ForwardRate == 1.f ) {
VectorCopy( outputData, inputData, input.BlobSize() );
return;
}
const int objectSize = desc.IsSpatial ? input.Channels() : input.ObjectSize();
const int batchLength = desc.IsBatchwise ? input.ObjectCount() : input.BatchLength();
const int batchWidth = input.ObjectCount() / batchLength;
const int maskSize = batchWidth * objectSize;
ASSERT_EXPR( desc.Mask.Size() == maskSize );
if( !desc.IsSpatial ) {
MultiplyMatrixByDiagMatrix( inputData, batchLength, maskSize, desc.Mask.GetHandle(),
outputData, desc.Output.BlobSize() );
return;
}
dim3 blockCount;
dim3 threadCount;
getCudaTaskGrid3D( blockCount, threadCount, input.ObjectCount(), input.ObjectSize() / objectSize,
objectSize );
ChannelLastBlobSpatialDropoutKernel<<<blockCount, threadCount>>>( GetRaw( inputData ),
GetRaw( desc.Mask.GetHandle() ), GetRaw( outputData ), input.ObjectCount(), input.ObjectSize(),
batchWidth, objectSize );
}
CDropoutDesc* CCudaMathEngine::InitDropout( float rate, bool isSpatial, bool isBatchwise,
const CBlobDesc& input, const CBlobDesc& output, int seed )
{
return new CMathEngineDropoutDesc( mathEngine(), rate, isSpatial, isBatchwise, input, output, seed );
}
} // namespace NeoML
#endif // NEOML_USE_CUDA
|
aec78d9a65f6270cfe7543f58e5c8784a3dd2716.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include "rocblas.h"
#include "matrix_mul.h"
// Host multiplication function
// Compute C = A * B
// hA is the height of A
// wA is the width of A
// wB is the width of B
extern "C"
void Mul(float* A, float* B, int hA, int wA, int wB, float* C)
{
int size;
hipblasHandle_t handle;
const float alpha = 1.0f;
const float beta = 0.0f;
// Load A and B to the device
float* Ad;
size = hA * wA * sizeof(float);
hipMalloc((void**)&Ad, size);
hipMemcpy(Ad, A, size, hipMemcpyHostToDevice);
float* Bd;
size = wA * wB * sizeof(float);
hipMalloc((void**)&Bd, size);
hipMemcpy(Bd, B, size, hipMemcpyHostToDevice);
// Allocate C on the device
float* Cd;
size = hA * wB * sizeof(float);
hipMalloc((void**)&Cd, size);
hipblasCreate(&handle);
// Compute the execution configuration
hipblasSgemm( handle, HIPBLAS_OP_N, HIPBLAS_OP_N,
hA, /* [m] */
wB, /* [n] */
wA, /* [k] */
&alpha, /* alfa */
A, wA, /* A[m][k], num columnas (lda) */
B, wB, /* B[k][n], num columnas (ldb) */
&beta, /* beta */
C, wB /* C[m][n], num columnas (ldc) */
);
// Read C from the device
hipMemcpy(C, Cd, size, hipMemcpyDeviceToHost);
// Free device memory
hipFree(Ad);
hipFree(Bd);
hipFree(Cd);
}
| aec78d9a65f6270cfe7543f58e5c8784a3dd2716.cu | #include <stdio.h>
#include "cublas_v2.h"
#include "matrix_mul.h"
// Host multiplication function
// Compute C = A * B
// hA is the height of A
// wA is the width of A
// wB is the width of B
extern "C"
void Mul(float* A, float* B, int hA, int wA, int wB, float* C)
{
int size;
cublasHandle_t handle;
const float alpha = 1.0f;
const float beta = 0.0f;
// Load A and B to the device
float* Ad;
size = hA * wA * sizeof(float);
cudaMalloc((void**)&Ad, size);
cudaMemcpy(Ad, A, size, cudaMemcpyHostToDevice);
float* Bd;
size = wA * wB * sizeof(float);
cudaMalloc((void**)&Bd, size);
cudaMemcpy(Bd, B, size, cudaMemcpyHostToDevice);
// Allocate C on the device
float* Cd;
size = hA * wB * sizeof(float);
cudaMalloc((void**)&Cd, size);
cublasCreate(&handle);
// Compute the execution configuration
cublasSgemm( handle, CUBLAS_OP_N, CUBLAS_OP_N,
hA, /* [m] */
wB, /* [n] */
wA, /* [k] */
&alpha, /* alfa */
A, wA, /* A[m][k], num columnas (lda) */
B, wB, /* B[k][n], num columnas (ldb) */
&beta, /* beta */
C, wB /* C[m][n], num columnas (ldc) */
);
// Read C from the device
cudaMemcpy(C, Cd, size, cudaMemcpyDeviceToHost);
// Free device memory
cudaFree(Ad);
cudaFree(Bd);
cudaFree(Cd);
}
|
5f96bd087a0b5f50b7b69971beaa28164e18e9d4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/******************************************************************************
* Copyright 2020 The Apollo Authors. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*****************************************************************************/
/*
* Copyright 2018-2019 Autoware Foundation. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <algorithm>
// headers in local files
#include "modules/perception/lidar/lib/detection/lidar_point_pillars/anchor_mask_cuda.h"
#include "modules/perception/lidar/lib/detection/lidar_point_pillars/common.h"
namespace apollo {
namespace perception {
namespace lidar {
// modified prefix sum code from
// https://www.mimuw.edu.pl/~ps209291/kgkp/slides/scan.pdf
__global__ void scan_x(int* g_odata, int* g_idata, int n) {
extern __shared__ int temp[]; // allocated on invocation
int thid = threadIdx.x;
int bid = blockIdx.x;
int bdim = blockDim.x;
int offset = 1;
temp[2 * thid] =
g_idata[bid * bdim * 2 + 2 * thid]; // load input into shared memory
temp[2 * thid + 1] = g_idata[bid * bdim * 2 + 2 * thid + 1];
for (int d = n >> 1; d > 0; d >>= 1) { // build sum in place up the tree
__syncthreads();
if (thid < d) {
int ai = offset * (2 * thid + 1) - 1;
int bi = offset * (2 * thid + 2) - 1;
temp[bi] += temp[ai];
}
offset *= 2;
}
if (thid == 0) {
temp[n - 1] = 0;
} // clear the last element
for (int d = 1; d < n; d *= 2) { // traverse down tree & build scan
offset >>= 1;
__syncthreads();
if (thid < d) {
int ai = offset * (2 * thid + 1) - 1;
int bi = offset * (2 * thid + 2) - 1;
int t = temp[ai];
temp[ai] = temp[bi];
temp[bi] += t;
}
}
__syncthreads();
g_odata[bid * bdim * 2 + 2 * thid] =
temp[2 * thid + 1]; // write results to device memory
int second_ind = 2 * thid + 2;
if (second_ind == bdim * 2) {
g_odata[bid * bdim * 2 + 2 * thid + 1] =
temp[2 * thid + 1] + g_idata[bid * bdim * 2 + 2 * thid + 1];
} else {
g_odata[bid * bdim * 2 + 2 * thid + 1] = temp[2 * thid + 2];
}
}
// modified prefix sum code from
// https://www.mimuw.edu.pl/~ps209291/kgkp/slides/scan.pdf
__global__ void scan_y(int* g_odata, int* g_idata, int n) {
extern __shared__ int temp[]; // allocated on invocation
int thid = threadIdx.x;
int bid = blockIdx.x;
int bdim = blockDim.x;
int gdim = gridDim.x;
int offset = 1;
temp[2 * thid] =
g_idata[bid + 2 * thid * gdim]; // load input into shared memory
temp[2 * thid + 1] = g_idata[bid + 2 * thid * gdim + gdim];
for (int d = n >> 1; d > 0; d >>= 1) { // build sum in place up the tree
__syncthreads();
if (thid < d) {
int ai = offset * (2 * thid + 1) - 1;
int bi = offset * (2 * thid + 2) - 1;
temp[bi] += temp[ai];
}
offset *= 2;
}
if (thid == 0) {
temp[n - 1] = 0;
} // clear the last element
for (int d = 1; d < n; d *= 2) { // traverse down tree & build scan
offset >>= 1;
__syncthreads();
if (thid < d) {
int ai = offset * (2 * thid + 1) - 1;
int bi = offset * (2 * thid + 2) - 1;
int t = temp[ai];
temp[ai] = temp[bi];
temp[bi] += t;
}
}
__syncthreads();
g_odata[bid + 2 * thid * gdim] =
temp[2 * thid + 1]; // write results to device memory
int second_ind = 2 * thid + 2;
if (second_ind == bdim * 2) {
g_odata[bid + 2 * thid * gdim + gdim] =
temp[2 * thid + 1] + g_idata[bid + 2 * thid * gdim + gdim];
} else {
g_odata[bid + 2 * thid * gdim + gdim] = temp[2 * thid + 2];
}
}
__global__ void make_anchor_mask_kernel(
const float* dev_box_anchors_min_x, const float* dev_box_anchors_min_y,
const float* dev_box_anchors_max_x, const float* dev_box_anchors_max_y,
int* dev_sparse_pillar_map, int* dev_anchor_mask, const float min_x_range,
const float min_y_range, const float pillar_x_size,
const float pillar_y_size, const int grid_x_size, const int grid_y_size,
const int num_inds_for_scan) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int anchor_coor[NUM_2D_BOX_CORNERS_MACRO] = {0};
const int grid_x_size_1 = grid_x_size - 1; // grid_x_size - 1
const int grid_y_size_1 = grid_y_size - 1; // grid_y_size - 1
anchor_coor[0] =
floor((dev_box_anchors_min_x[tid] - min_x_range) / pillar_x_size);
anchor_coor[1] =
floor((dev_box_anchors_min_y[tid] - min_y_range) / pillar_y_size);
anchor_coor[2] =
floor((dev_box_anchors_max_x[tid] - min_x_range) / pillar_x_size);
anchor_coor[3] =
floor((dev_box_anchors_max_y[tid] - min_y_range) / pillar_y_size);
anchor_coor[0] = max(anchor_coor[0], 0);
anchor_coor[1] = max(anchor_coor[1], 0);
anchor_coor[2] = min(anchor_coor[2], grid_x_size_1);
anchor_coor[3] = min(anchor_coor[3], grid_y_size_1);
int right_top = dev_sparse_pillar_map[anchor_coor[3] * num_inds_for_scan +
anchor_coor[2]];
int left_bottom = dev_sparse_pillar_map[anchor_coor[1] * num_inds_for_scan +
anchor_coor[0]];
int left_top = dev_sparse_pillar_map[anchor_coor[3] * num_inds_for_scan +
anchor_coor[0]];
int right_bottom = dev_sparse_pillar_map[anchor_coor[1] * num_inds_for_scan +
anchor_coor[2]];
int area = right_top - left_top - right_bottom + left_bottom;
if (area > 1) {
dev_anchor_mask[tid] = 1;
} else {
dev_anchor_mask[tid] = 0;
}
}
AnchorMaskCuda::AnchorMaskCuda(
const int num_threads, const int num_inds_for_scan, const int num_anchor,
const float min_x_range, const float min_y_range, const float pillar_x_size,
const float pillar_y_size, const int grid_x_size, const int grid_y_size)
: num_threads_(num_threads),
num_inds_for_scan_(num_inds_for_scan),
num_anchor_(num_anchor),
min_x_range_(min_x_range),
min_y_range_(min_y_range),
pillar_x_size_(pillar_x_size),
pillar_y_size_(pillar_y_size),
grid_x_size_(grid_x_size),
grid_y_size_(grid_y_size) {}
void AnchorMaskCuda::DoAnchorMaskCuda(
int* dev_sparse_pillar_map, int* dev_cumsum_along_x,
int* dev_cumsum_along_y, const float* dev_box_anchors_min_x,
const float* dev_box_anchors_min_y, const float* dev_box_anchors_max_x,
const float* dev_box_anchors_max_y, int* dev_anchor_mask) {
hipLaunchKernelGGL(( scan_x), dim3(num_inds_for_scan_), dim3(num_inds_for_scan_ / 2),
num_inds_for_scan_ * sizeof(int), 0,
dev_cumsum_along_x, dev_sparse_pillar_map, num_inds_for_scan_);
hipLaunchKernelGGL(( scan_y), dim3(num_inds_for_scan_), dim3(num_inds_for_scan_ / 2),
num_inds_for_scan_ * sizeof(int), 0,
dev_cumsum_along_y, dev_cumsum_along_x, num_inds_for_scan_);
GPU_CHECK(hipMemcpy(dev_sparse_pillar_map, dev_cumsum_along_y,
num_inds_for_scan_ * num_inds_for_scan_ * sizeof(int),
hipMemcpyDeviceToDevice));
int num_blocks = DIVUP(num_anchor_, num_threads_);
hipLaunchKernelGGL(( make_anchor_mask_kernel), dim3(num_blocks), dim3(num_threads_), 0, 0,
dev_box_anchors_min_x, dev_box_anchors_min_y, dev_box_anchors_max_x,
dev_box_anchors_max_y, dev_sparse_pillar_map, dev_anchor_mask,
min_x_range_, min_y_range_, pillar_x_size_, pillar_y_size_, grid_x_size_,
grid_y_size_, num_inds_for_scan_);
}
} // namespace lidar
} // namespace perception
} // namespace apollo
| 5f96bd087a0b5f50b7b69971beaa28164e18e9d4.cu | /******************************************************************************
* Copyright 2020 The Apollo Authors. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*****************************************************************************/
/*
* Copyright 2018-2019 Autoware Foundation. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <algorithm>
// headers in local files
#include "modules/perception/lidar/lib/detection/lidar_point_pillars/anchor_mask_cuda.h"
#include "modules/perception/lidar/lib/detection/lidar_point_pillars/common.h"
namespace apollo {
namespace perception {
namespace lidar {
// modified prefix sum code from
// https://www.mimuw.edu.pl/~ps209291/kgkp/slides/scan.pdf
__global__ void scan_x(int* g_odata, int* g_idata, int n) {
extern __shared__ int temp[]; // allocated on invocation
int thid = threadIdx.x;
int bid = blockIdx.x;
int bdim = blockDim.x;
int offset = 1;
temp[2 * thid] =
g_idata[bid * bdim * 2 + 2 * thid]; // load input into shared memory
temp[2 * thid + 1] = g_idata[bid * bdim * 2 + 2 * thid + 1];
for (int d = n >> 1; d > 0; d >>= 1) { // build sum in place up the tree
__syncthreads();
if (thid < d) {
int ai = offset * (2 * thid + 1) - 1;
int bi = offset * (2 * thid + 2) - 1;
temp[bi] += temp[ai];
}
offset *= 2;
}
if (thid == 0) {
temp[n - 1] = 0;
} // clear the last element
for (int d = 1; d < n; d *= 2) { // traverse down tree & build scan
offset >>= 1;
__syncthreads();
if (thid < d) {
int ai = offset * (2 * thid + 1) - 1;
int bi = offset * (2 * thid + 2) - 1;
int t = temp[ai];
temp[ai] = temp[bi];
temp[bi] += t;
}
}
__syncthreads();
g_odata[bid * bdim * 2 + 2 * thid] =
temp[2 * thid + 1]; // write results to device memory
int second_ind = 2 * thid + 2;
if (second_ind == bdim * 2) {
g_odata[bid * bdim * 2 + 2 * thid + 1] =
temp[2 * thid + 1] + g_idata[bid * bdim * 2 + 2 * thid + 1];
} else {
g_odata[bid * bdim * 2 + 2 * thid + 1] = temp[2 * thid + 2];
}
}
// modified prefix sum code from
// https://www.mimuw.edu.pl/~ps209291/kgkp/slides/scan.pdf
__global__ void scan_y(int* g_odata, int* g_idata, int n) {
extern __shared__ int temp[]; // allocated on invocation
int thid = threadIdx.x;
int bid = blockIdx.x;
int bdim = blockDim.x;
int gdim = gridDim.x;
int offset = 1;
temp[2 * thid] =
g_idata[bid + 2 * thid * gdim]; // load input into shared memory
temp[2 * thid + 1] = g_idata[bid + 2 * thid * gdim + gdim];
for (int d = n >> 1; d > 0; d >>= 1) { // build sum in place up the tree
__syncthreads();
if (thid < d) {
int ai = offset * (2 * thid + 1) - 1;
int bi = offset * (2 * thid + 2) - 1;
temp[bi] += temp[ai];
}
offset *= 2;
}
if (thid == 0) {
temp[n - 1] = 0;
} // clear the last element
for (int d = 1; d < n; d *= 2) { // traverse down tree & build scan
offset >>= 1;
__syncthreads();
if (thid < d) {
int ai = offset * (2 * thid + 1) - 1;
int bi = offset * (2 * thid + 2) - 1;
int t = temp[ai];
temp[ai] = temp[bi];
temp[bi] += t;
}
}
__syncthreads();
g_odata[bid + 2 * thid * gdim] =
temp[2 * thid + 1]; // write results to device memory
int second_ind = 2 * thid + 2;
if (second_ind == bdim * 2) {
g_odata[bid + 2 * thid * gdim + gdim] =
temp[2 * thid + 1] + g_idata[bid + 2 * thid * gdim + gdim];
} else {
g_odata[bid + 2 * thid * gdim + gdim] = temp[2 * thid + 2];
}
}
__global__ void make_anchor_mask_kernel(
const float* dev_box_anchors_min_x, const float* dev_box_anchors_min_y,
const float* dev_box_anchors_max_x, const float* dev_box_anchors_max_y,
int* dev_sparse_pillar_map, int* dev_anchor_mask, const float min_x_range,
const float min_y_range, const float pillar_x_size,
const float pillar_y_size, const int grid_x_size, const int grid_y_size,
const int num_inds_for_scan) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int anchor_coor[NUM_2D_BOX_CORNERS_MACRO] = {0};
const int grid_x_size_1 = grid_x_size - 1; // grid_x_size - 1
const int grid_y_size_1 = grid_y_size - 1; // grid_y_size - 1
anchor_coor[0] =
floor((dev_box_anchors_min_x[tid] - min_x_range) / pillar_x_size);
anchor_coor[1] =
floor((dev_box_anchors_min_y[tid] - min_y_range) / pillar_y_size);
anchor_coor[2] =
floor((dev_box_anchors_max_x[tid] - min_x_range) / pillar_x_size);
anchor_coor[3] =
floor((dev_box_anchors_max_y[tid] - min_y_range) / pillar_y_size);
anchor_coor[0] = max(anchor_coor[0], 0);
anchor_coor[1] = max(anchor_coor[1], 0);
anchor_coor[2] = min(anchor_coor[2], grid_x_size_1);
anchor_coor[3] = min(anchor_coor[3], grid_y_size_1);
int right_top = dev_sparse_pillar_map[anchor_coor[3] * num_inds_for_scan +
anchor_coor[2]];
int left_bottom = dev_sparse_pillar_map[anchor_coor[1] * num_inds_for_scan +
anchor_coor[0]];
int left_top = dev_sparse_pillar_map[anchor_coor[3] * num_inds_for_scan +
anchor_coor[0]];
int right_bottom = dev_sparse_pillar_map[anchor_coor[1] * num_inds_for_scan +
anchor_coor[2]];
int area = right_top - left_top - right_bottom + left_bottom;
if (area > 1) {
dev_anchor_mask[tid] = 1;
} else {
dev_anchor_mask[tid] = 0;
}
}
AnchorMaskCuda::AnchorMaskCuda(
const int num_threads, const int num_inds_for_scan, const int num_anchor,
const float min_x_range, const float min_y_range, const float pillar_x_size,
const float pillar_y_size, const int grid_x_size, const int grid_y_size)
: num_threads_(num_threads),
num_inds_for_scan_(num_inds_for_scan),
num_anchor_(num_anchor),
min_x_range_(min_x_range),
min_y_range_(min_y_range),
pillar_x_size_(pillar_x_size),
pillar_y_size_(pillar_y_size),
grid_x_size_(grid_x_size),
grid_y_size_(grid_y_size) {}
void AnchorMaskCuda::DoAnchorMaskCuda(
int* dev_sparse_pillar_map, int* dev_cumsum_along_x,
int* dev_cumsum_along_y, const float* dev_box_anchors_min_x,
const float* dev_box_anchors_min_y, const float* dev_box_anchors_max_x,
const float* dev_box_anchors_max_y, int* dev_anchor_mask) {
scan_x<<<num_inds_for_scan_, num_inds_for_scan_ / 2,
num_inds_for_scan_ * sizeof(int)>>>(
dev_cumsum_along_x, dev_sparse_pillar_map, num_inds_for_scan_);
scan_y<<<num_inds_for_scan_, num_inds_for_scan_ / 2,
num_inds_for_scan_ * sizeof(int)>>>(
dev_cumsum_along_y, dev_cumsum_along_x, num_inds_for_scan_);
GPU_CHECK(cudaMemcpy(dev_sparse_pillar_map, dev_cumsum_along_y,
num_inds_for_scan_ * num_inds_for_scan_ * sizeof(int),
cudaMemcpyDeviceToDevice));
int num_blocks = DIVUP(num_anchor_, num_threads_);
make_anchor_mask_kernel<<<num_blocks, num_threads_>>>(
dev_box_anchors_min_x, dev_box_anchors_min_y, dev_box_anchors_max_x,
dev_box_anchors_max_y, dev_sparse_pillar_map, dev_anchor_mask,
min_x_range_, min_y_range_, pillar_x_size_, pillar_y_size_, grid_x_size_,
grid_y_size_, num_inds_for_scan_);
}
} // namespace lidar
} // namespace perception
} // namespace apollo
|
cb0348a4f35515f8dc4f4c0bc386d28e7e0726fe.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <hiprand/hiprand.h>
#include <hiprand/hiprand_kernel.h>
#include <set>
#include <string>
#include <iostream>
#include <time.h>
#include <stdlib.h>
#include <random>
inline void GPUassert(hipError_t code, char * file, int line, bool Abort = true)
{
if (code != 0) {
fprintf(stderr, "GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
if (Abort) return;
}
}
#define GPUerrchk(ans) { GPUassert((ans), __FILE__, __LINE__); }
__device__ int factorial(int n) {
if (n == 1) {
return 1;
}
return n * factorial(n - 1);
}
__global__ void permute_kernel(char* d_A, int size) {
int jednoD = blockIdx.x;
int dvoD = jednoD + blockIdx.y*gridDim.x;
int troD = dvoD + gridDim.x*gridDim.y*blockIdx.z;
int tid;
tid = troD * blockDim.x + threadIdx.x;
int fakt = factorial(size);
if (tid < fakt) {
int* counter = new int[size];
char* kopija = new char[size];
for (int i = 0; i < size; i++) {
counter[i] = 0;
kopija[i] = d_A[i];
}
int j = 2;
int temp = tid;
for (int i = 1; i < size; i++) {
while (temp >= (fakt / j)) {
counter[i]++;
temp -= fakt / j;
}
fakt = fakt / j;
j++;
}
for (int i = 0; i < size; i++) {
int poz = i - counter[i];
if (poz < i) {
char temp = kopija[i];
kopija[i] = kopija[poz];
kopija[poz] = temp;
}
}
printf("GPU Thread: %i Permutacija: %s\n", tid, kopija);
delete[] counter;
delete[] kopija;
}
}
int factorialHost(int n) {
if (n == 1) {
return 1;
}
return n * factorialHost(n - 1);
}
void funkcija(FILE *fp, int n, double *sum, double *maxi, double *mini) {
clock_t begin = clock();
char h_a[] = "ABCDEF";
char* d_a;
int duzina = 6;
hipMalloc((void**)&d_a, sizeof(h_a));
GPUerrchk(hipMemcpy(d_a, h_a, sizeof(h_a), hipMemcpyHostToDevice));
int fakt = factorialHost(duzina);
int threadNoMC = fakt;
char* h_svePermutacije = new char[threadNoMC * duzina];
char* svePermutacije;
hipMalloc((void**)&svePermutacije, sizeof(char)* threadNoMC * duzina);
hipMemset(svePermutacije, '0', sizeof(char) * threadNoMC * duzina);
std::set<std::string> unikatno;
printf("\n\n B-P\n");
int number = 1;
while (threadNoMC / number > 320) number++;
while (1.0*threadNoMC / number - int(threadNoMC / number) > 0) number++;
int a = threadNoMC / number;
permute_kernel << <number, a >> > (d_a, duzina);
for (std::string s : unikatno) {
std::cout << s << std::endl;
}
GPUerrchk(hipPeekAtLastError());
GPUerrchk(hipDeviceSynchronize());
time_t end = clock();
printf("Vrijeme izvrsenja u sekundama je: %f\n", (double)(end - begin) / CLOCKS_PER_SEC);
if (n != 0) {
fprintf(fp, "%d,%f\n", n, (double)(end - begin) / CLOCKS_PER_SEC);
*sum += (double)(end - begin) / CLOCKS_PER_SEC;
if (*maxi < (double)(end - begin) / CLOCKS_PER_SEC) *maxi = (double)(end - begin) / CLOCKS_PER_SEC;
if (*mini > (double)(end - begin) / CLOCKS_PER_SEC) *mini = (double)(end - begin) / CLOCKS_PER_SEC;
}
}
int main()
{
srand(time(NULL));
FILE *fp;
fp = fopen("C:\\Users\\ismar\\Desktop\\BP.csv", "w");
double sum = 0.0;
double maxi = -999999.9;
double mini = 999999.9;
for (int i = 0; i <= 100; i++) {
if (fp == NULL) {
printf("Couldn't open file\n");
return;
}
funkcija(fp, i, &sum, &maxi, &mini);
}
fprintf(fp, "%s,%f\n", "Minimum", mini);
fprintf(fp, "%s,%f\n", "Maximum", maxi);
fprintf(fp, "%s,%f\n", "Prosjek", 1.0*sum / 100);
printf("Prosjecno vrijeme izvrsavanja je: %f", 1.0*sum / 100);
fclose(fp);
return 0;
}
| cb0348a4f35515f8dc4f4c0bc386d28e7e0726fe.cu | #include <stdio.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <curand.h>
#include <curand_kernel.h>
#include <set>
#include <string>
#include <iostream>
#include <time.h>
#include <stdlib.h>
#include <random>
inline void GPUassert(cudaError_t code, char * file, int line, bool Abort = true)
{
if (code != 0) {
fprintf(stderr, "GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (Abort) return;
}
}
#define GPUerrchk(ans) { GPUassert((ans), __FILE__, __LINE__); }
__device__ int factorial(int n) {
if (n == 1) {
return 1;
}
return n * factorial(n - 1);
}
__global__ void permute_kernel(char* d_A, int size) {
int jednoD = blockIdx.x;
int dvoD = jednoD + blockIdx.y*gridDim.x;
int troD = dvoD + gridDim.x*gridDim.y*blockIdx.z;
int tid;
tid = troD * blockDim.x + threadIdx.x;
int fakt = factorial(size);
if (tid < fakt) {
int* counter = new int[size];
char* kopija = new char[size];
for (int i = 0; i < size; i++) {
counter[i] = 0;
kopija[i] = d_A[i];
}
int j = 2;
int temp = tid;
for (int i = 1; i < size; i++) {
while (temp >= (fakt / j)) {
counter[i]++;
temp -= fakt / j;
}
fakt = fakt / j;
j++;
}
for (int i = 0; i < size; i++) {
int poz = i - counter[i];
if (poz < i) {
char temp = kopija[i];
kopija[i] = kopija[poz];
kopija[poz] = temp;
}
}
printf("GPU Thread: %i Permutacija: %s\n", tid, kopija);
delete[] counter;
delete[] kopija;
}
}
int factorialHost(int n) {
if (n == 1) {
return 1;
}
return n * factorialHost(n - 1);
}
void funkcija(FILE *fp, int n, double *sum, double *maxi, double *mini) {
clock_t begin = clock();
char h_a[] = "ABCDEF";
char* d_a;
int duzina = 6;
cudaMalloc((void**)&d_a, sizeof(h_a));
GPUerrchk(cudaMemcpy(d_a, h_a, sizeof(h_a), cudaMemcpyHostToDevice));
int fakt = factorialHost(duzina);
int threadNoMC = fakt;
char* h_svePermutacije = new char[threadNoMC * duzina];
char* svePermutacije;
cudaMalloc((void**)&svePermutacije, sizeof(char)* threadNoMC * duzina);
cudaMemset(svePermutacije, '0', sizeof(char) * threadNoMC * duzina);
std::set<std::string> unikatno;
printf("\n\n B-P\n");
int number = 1;
while (threadNoMC / number > 320) number++;
while (1.0*threadNoMC / number - int(threadNoMC / number) > 0) number++;
int a = threadNoMC / number;
permute_kernel << <number, a >> > (d_a, duzina);
for (std::string s : unikatno) {
std::cout << s << std::endl;
}
GPUerrchk(cudaPeekAtLastError());
GPUerrchk(cudaDeviceSynchronize());
time_t end = clock();
printf("Vrijeme izvrsenja u sekundama je: %f\n", (double)(end - begin) / CLOCKS_PER_SEC);
if (n != 0) {
fprintf(fp, "%d,%f\n", n, (double)(end - begin) / CLOCKS_PER_SEC);
*sum += (double)(end - begin) / CLOCKS_PER_SEC;
if (*maxi < (double)(end - begin) / CLOCKS_PER_SEC) *maxi = (double)(end - begin) / CLOCKS_PER_SEC;
if (*mini > (double)(end - begin) / CLOCKS_PER_SEC) *mini = (double)(end - begin) / CLOCKS_PER_SEC;
}
}
int main()
{
srand(time(NULL));
FILE *fp;
fp = fopen("C:\\Users\\ismar\\Desktop\\BP.csv", "w");
double sum = 0.0;
double maxi = -999999.9;
double mini = 999999.9;
for (int i = 0; i <= 100; i++) {
if (fp == NULL) {
printf("Couldn't open file\n");
return;
}
funkcija(fp, i, &sum, &maxi, &mini);
}
fprintf(fp, "%s,%f\n", "Minimum", mini);
fprintf(fp, "%s,%f\n", "Maximum", maxi);
fprintf(fp, "%s,%f\n", "Prosjek", 1.0*sum / 100);
printf("Prosjecno vrijeme izvrsavanja je: %f", 1.0*sum / 100);
fclose(fp);
return 0;
}
|
72352cf486c418858670608a47481ae757612a63.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* NVIDIA Corporation and its licensors retain all intellectual property and
* proprietary rights in and to this software and related documentation.
* Any use, reproduction, disclosure, or distribution of this software
* and related documentation without an express license agreement from
* NVIDIA Corporation is strictly prohibited.
*
* Please refer to the applicable NVIDIA end user license agreement (EULA)
* associated with this source code for terms and conditions that govern
* your use of this NVIDIA software.
*
* MNeumann (April 2010): Removed shrUtil dependency and added external declarations
* to enable usage for MNRT.
*
*/
//#include <shrUtils.h>
#include <stdlib.h>
#include <stdio.h>
#include "MersenneTwister.h"
#include "MNCudaUtil.h"
__device__ static mt_struct_stripped ds_MT[MT_RNG_COUNT];
static mt_struct_stripped h_MT[MT_RNG_COUNT];
////////////////////////////////////////////////////////////////////////////////
// Write MT_RNG_COUNT vertical lanes of NPerRng random numbers to *d_Random.
// For coalesced global writes MT_RNG_COUNT should be a multiple of warp size.
// Initial states for each generator are the same, since the states are
// initialized from the global seed. In order to improve distribution properties
// on small NPerRng supply dedicated (local) seed to each twister.
// The local seeds, in their turn, can be extracted from global seed
// by means of any simple random number generator, like LCG.
////////////////////////////////////////////////////////////////////////////////
__global__ void RandomGPU(
float *d_Random,
int NPerRng
){
const int tid = blockDim.x * blockIdx.x + threadIdx.x;
const int THREAD_N = blockDim.x * gridDim.x;
int iState, iState1, iStateM, iOut;
unsigned int mti, mti1, mtiM, x;
unsigned int mt[MT_NN];
for(int iRng = tid; iRng < MT_RNG_COUNT; iRng += THREAD_N){
//Load bit-vector Mersenne Twister parameters
mt_struct_stripped config = ds_MT[iRng];
//Initialize current state
mt[0] = config.seed;
for(iState = 1; iState < MT_NN; iState++)
mt[iState] = (1812433253U * (mt[iState - 1] ^ (mt[iState - 1] >> 30)) + iState) & MT_WMASK;
iState = 0;
mti1 = mt[0];
for(iOut = 0; iOut < NPerRng; iOut++){
//iState1 = (iState + 1) % MT_NN
//iStateM = (iState + MT_MM) % MT_NN
iState1 = iState + 1;
iStateM = iState + MT_MM;
if(iState1 >= MT_NN) iState1 -= MT_NN;
if(iStateM >= MT_NN) iStateM -= MT_NN;
mti = mti1;
mti1 = mt[iState1];
mtiM = mt[iStateM];
x = (mti & MT_UMASK) | (mti1 & MT_LMASK);
x = mtiM ^ (x >> 1) ^ ((x & 1) ? config.matrix_a : 0);
mt[iState] = x;
iState = iState1;
//Tempering transformation
x ^= (x >> MT_SHIFT0);
x ^= (x << MT_SHIFTB) & config.mask_b;
x ^= (x << MT_SHIFTC) & config.mask_c;
x ^= (x >> MT_SHIFT1);
//Convert to (0, 1] float and write to global memory
d_Random[iRng + iOut * MT_RNG_COUNT] = ((float)x + 1.0f) / 4294967296.0f;
}
}
}
////////////////////////////////////////////////////////////////////////////////
// Transform each of MT_RNG_COUNT lanes of NPerRng uniformly distributed
// random samples, produced by RandomGPU(), to normally distributed lanes
// using Cartesian form of Box-Muller transformation.
// NPerRng must be even.
////////////////////////////////////////////////////////////////////////////////
#define PI 3.14159265358979f
__device__ inline void BoxMuller(float& u1, float& u2){
float r = sqrtf(-2.0f * logf(u1));
float phi = 2 * PI * u2;
u1 = r * __cosf(phi);
u2 = r * __sinf(phi);
}
__global__ void BoxMullerGPU(float *d_Random, int NPerRng){
const int tid = blockDim.x * blockIdx.x + threadIdx.x;
const int THREAD_N = blockDim.x * gridDim.x;
for(int iRng = tid; iRng < MT_RNG_COUNT; iRng += THREAD_N)
for(int iOut = 0; iOut < NPerRng; iOut += 2)
BoxMuller(
d_Random[iRng + (iOut + 0) * MT_RNG_COUNT],
d_Random[iRng + (iOut + 1) * MT_RNG_COUNT]
);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
/// \fn extern "C" bool MersenneTwisterGPUInit(const char *fname)
///
/// \brief Loads Mersenne Twister configuration from given source file.
///
/// \author Mathias Neumann
/// \date 11.04.2010
///
/// \param fname Filename of the configuration file.
///
/// \return true if it succeeds, false if it fails.
////////////////////////////////////////////////////////////////////////////////////////////////////
extern "C"
bool MersenneTwisterGPUInit(const char *fname)
{
FILE *fd = fopen(fname, "rb");
if(!fd)
{
MNFatal("Failed to open %s for Mersenne Twister configuration.", fname);
return false;
}
if( !fread(h_MT, sizeof(h_MT), 1, fd) )
{
MNFatal("Failed to load %s for Mersenne Twister configuration.", fname);
return false;
}
fclose(fd);
return true;
}
////////////////////////////////////////////////////////////////////////////////////////////////////
/// \fn extern "C" void MersenneTwisterGPUSeed(unsigned int seed)
///
/// \brief Seeds Mersenne Twister for current GPU context.
///
/// \author Mathias Neumann
/// \date 11.04.2010
///
/// \param seed The seed.
////////////////////////////////////////////////////////////////////////////////////////////////////
extern "C"
void MersenneTwisterGPUSeed(unsigned int seed)
{
int i;
//Need to be thread-safe
mt_struct_stripped *MT = (mt_struct_stripped *)malloc(MT_RNG_COUNT * sizeof(mt_struct_stripped));
for(i = 0; i < MT_RNG_COUNT; i++){
MT[i] = h_MT[i];
MT[i].seed = seed;
}
mncudaSafeCallNoSync(hipMemcpyToSymbol(ds_MT, MT, sizeof(h_MT)));
free(MT);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
/// \fn extern "C" void MersenneTwisterGPU(float* d_outRand, int nPerRNG)
///
/// \brief Performs Mersenne Twister RNG to generate a predefined number of uniform random
/// numbers to use in other kernels.
///
/// \author Mathias Neumann
/// \date 11.04.2010
///
/// \param [out] d_outRand The generated uniform random numbers.
/// \param nPerRNG The random numbers per generator. Will generate
/// nPerRNG * MT_RNG_COUNT numbers.
////////////////////////////////////////////////////////////////////////////////////////////////////
extern "C"
void MersenneTwisterGPU(float* d_outRand, int nPerRNG)
{
// 32 * 128 = MT_RNG_COUNT = 4096. See SDK 3.0 sample.
hipLaunchKernelGGL(( RandomGPU), dim3(32), dim3(128), 0, 0, d_outRand, nPerRNG);
MNCUDA_CHECKERROR;
//BoxMullerGPU<<<32, 128>>>(d_outRand, nPerRNG);
} | 72352cf486c418858670608a47481ae757612a63.cu | /*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* NVIDIA Corporation and its licensors retain all intellectual property and
* proprietary rights in and to this software and related documentation.
* Any use, reproduction, disclosure, or distribution of this software
* and related documentation without an express license agreement from
* NVIDIA Corporation is strictly prohibited.
*
* Please refer to the applicable NVIDIA end user license agreement (EULA)
* associated with this source code for terms and conditions that govern
* your use of this NVIDIA software.
*
* MNeumann (April 2010): Removed shrUtil dependency and added external declarations
* to enable usage for MNRT.
*
*/
//#include <shrUtils.h>
#include <stdlib.h>
#include <stdio.h>
#include "MersenneTwister.h"
#include "MNCudaUtil.h"
__device__ static mt_struct_stripped ds_MT[MT_RNG_COUNT];
static mt_struct_stripped h_MT[MT_RNG_COUNT];
////////////////////////////////////////////////////////////////////////////////
// Write MT_RNG_COUNT vertical lanes of NPerRng random numbers to *d_Random.
// For coalesced global writes MT_RNG_COUNT should be a multiple of warp size.
// Initial states for each generator are the same, since the states are
// initialized from the global seed. In order to improve distribution properties
// on small NPerRng supply dedicated (local) seed to each twister.
// The local seeds, in their turn, can be extracted from global seed
// by means of any simple random number generator, like LCG.
////////////////////////////////////////////////////////////////////////////////
__global__ void RandomGPU(
float *d_Random,
int NPerRng
){
const int tid = blockDim.x * blockIdx.x + threadIdx.x;
const int THREAD_N = blockDim.x * gridDim.x;
int iState, iState1, iStateM, iOut;
unsigned int mti, mti1, mtiM, x;
unsigned int mt[MT_NN];
for(int iRng = tid; iRng < MT_RNG_COUNT; iRng += THREAD_N){
//Load bit-vector Mersenne Twister parameters
mt_struct_stripped config = ds_MT[iRng];
//Initialize current state
mt[0] = config.seed;
for(iState = 1; iState < MT_NN; iState++)
mt[iState] = (1812433253U * (mt[iState - 1] ^ (mt[iState - 1] >> 30)) + iState) & MT_WMASK;
iState = 0;
mti1 = mt[0];
for(iOut = 0; iOut < NPerRng; iOut++){
//iState1 = (iState + 1) % MT_NN
//iStateM = (iState + MT_MM) % MT_NN
iState1 = iState + 1;
iStateM = iState + MT_MM;
if(iState1 >= MT_NN) iState1 -= MT_NN;
if(iStateM >= MT_NN) iStateM -= MT_NN;
mti = mti1;
mti1 = mt[iState1];
mtiM = mt[iStateM];
x = (mti & MT_UMASK) | (mti1 & MT_LMASK);
x = mtiM ^ (x >> 1) ^ ((x & 1) ? config.matrix_a : 0);
mt[iState] = x;
iState = iState1;
//Tempering transformation
x ^= (x >> MT_SHIFT0);
x ^= (x << MT_SHIFTB) & config.mask_b;
x ^= (x << MT_SHIFTC) & config.mask_c;
x ^= (x >> MT_SHIFT1);
//Convert to (0, 1] float and write to global memory
d_Random[iRng + iOut * MT_RNG_COUNT] = ((float)x + 1.0f) / 4294967296.0f;
}
}
}
////////////////////////////////////////////////////////////////////////////////
// Transform each of MT_RNG_COUNT lanes of NPerRng uniformly distributed
// random samples, produced by RandomGPU(), to normally distributed lanes
// using Cartesian form of Box-Muller transformation.
// NPerRng must be even.
////////////////////////////////////////////////////////////////////////////////
#define PI 3.14159265358979f
__device__ inline void BoxMuller(float& u1, float& u2){
float r = sqrtf(-2.0f * logf(u1));
float phi = 2 * PI * u2;
u1 = r * __cosf(phi);
u2 = r * __sinf(phi);
}
__global__ void BoxMullerGPU(float *d_Random, int NPerRng){
const int tid = blockDim.x * blockIdx.x + threadIdx.x;
const int THREAD_N = blockDim.x * gridDim.x;
for(int iRng = tid; iRng < MT_RNG_COUNT; iRng += THREAD_N)
for(int iOut = 0; iOut < NPerRng; iOut += 2)
BoxMuller(
d_Random[iRng + (iOut + 0) * MT_RNG_COUNT],
d_Random[iRng + (iOut + 1) * MT_RNG_COUNT]
);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
/// \fn extern "C" bool MersenneTwisterGPUInit(const char *fname)
///
/// \brief Loads Mersenne Twister configuration from given source file.
///
/// \author Mathias Neumann
/// \date 11.04.2010
///
/// \param fname Filename of the configuration file.
///
/// \return true if it succeeds, false if it fails.
////////////////////////////////////////////////////////////////////////////////////////////////////
extern "C"
bool MersenneTwisterGPUInit(const char *fname)
{
FILE *fd = fopen(fname, "rb");
if(!fd)
{
MNFatal("Failed to open %s for Mersenne Twister configuration.", fname);
return false;
}
if( !fread(h_MT, sizeof(h_MT), 1, fd) )
{
MNFatal("Failed to load %s for Mersenne Twister configuration.", fname);
return false;
}
fclose(fd);
return true;
}
////////////////////////////////////////////////////////////////////////////////////////////////////
/// \fn extern "C" void MersenneTwisterGPUSeed(unsigned int seed)
///
/// \brief Seeds Mersenne Twister for current GPU context.
///
/// \author Mathias Neumann
/// \date 11.04.2010
///
/// \param seed The seed.
////////////////////////////////////////////////////////////////////////////////////////////////////
extern "C"
void MersenneTwisterGPUSeed(unsigned int seed)
{
int i;
//Need to be thread-safe
mt_struct_stripped *MT = (mt_struct_stripped *)malloc(MT_RNG_COUNT * sizeof(mt_struct_stripped));
for(i = 0; i < MT_RNG_COUNT; i++){
MT[i] = h_MT[i];
MT[i].seed = seed;
}
mncudaSafeCallNoSync(cudaMemcpyToSymbol(ds_MT, MT, sizeof(h_MT)));
free(MT);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
/// \fn extern "C" void MersenneTwisterGPU(float* d_outRand, int nPerRNG)
///
/// \brief Performs Mersenne Twister RNG to generate a predefined number of uniform random
/// numbers to use in other kernels.
///
/// \author Mathias Neumann
/// \date 11.04.2010
///
/// \param [out] d_outRand The generated uniform random numbers.
/// \param nPerRNG The random numbers per generator. Will generate
/// nPerRNG * MT_RNG_COUNT numbers.
////////////////////////////////////////////////////////////////////////////////////////////////////
extern "C"
void MersenneTwisterGPU(float* d_outRand, int nPerRNG)
{
// 32 * 128 = MT_RNG_COUNT = 4096. See SDK 3.0 sample.
RandomGPU<<<32, 128>>>(d_outRand, nPerRNG);
MNCUDA_CHECKERROR;
//BoxMullerGPU<<<32, 128>>>(d_outRand, nPerRNG);
} |
832f4d988e845eac83f15b4c5d90009fd520bb32.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <sys/time.h>
#include <time.h>
#define CHECK(call) \
{ \
const hipError_t error = call; \
if (error != hipSuccess) \
{ \
fprintf(stderr, "Error: %s:%d, ", __FILE__, __LINE__); \
fprintf(stderr, "code: %d, reason: %s\n", error, \
hipGetErrorString(error)); \
} \
}
inline double seconds()
{
struct timeval tp;
struct timezone tzp;
int i = gettimeofday(&tp, &tzp);
return ((double)tp.tv_sec + (double)tp.tv_usec * 1.e-6);
}
/*
* This example demonstrates a simple vector sum on the GPU and on the host.
* sumArraysOnGPU splits the work of the vector sum across CUDA threads on the
* GPU. Only a single thread block is used in this small case, for simplicity.
* sumArraysOnHost sequentially iterates through vector elements on the host.
*/
void checkResult(float *hostRef, float *gpuRef, const int N)
{
double epsilon = 1.0E-8;
bool match = 1;
for (int i = 0; i < N; i++)
{
if (abs(hostRef[i] - gpuRef[i]) > epsilon)
{
match = 0;
printf("Arrays do not match!\n");
printf("host %5.2f gpu %5.2f at current %d\n", hostRef[i],
gpuRef[i], i);
break;
}
}
if (match) printf("Arrays match.\n\n");
return;
}
void initialData(float *ip, int size)
{
// generate different seed for random number
time_t t;
srand((unsigned) time(&t));
for (int i = 0; i < size; i++)
{
ip[i] = (float)(rand() & 0xFF) / 10.0f;
}
return;
}
void sumArraysOnHost(float *A, float *B, float *C, const int N)
{
for (int idx = 0; idx < N; idx++)
C[idx] = A[idx] + B[idx];
}
__global__ void sumArraysOnGPU(float *A, float *B, float *C)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
C[i] = A[i] + B[i];
}
int main(int argc, char **argv)
{
//printf("%s Starting...\n", argv[0]);
// set up device
int dev = 0;
CHECK(hipSetDevice(dev));
// set up data size of vectors
int nElem = 1024 * 1024 * 10;
printf("Vector size %d\n", nElem);
// malloc host memory
size_t nBytes = nElem * sizeof(float);
float *h_A, *h_B, *hostRef, *gpuRef;
h_A = (float *)malloc(nBytes);
h_B = (float *)malloc(nBytes);
hostRef = (float *)malloc(nBytes);
gpuRef = (float *)malloc(nBytes);
// initialize data at host side
initialData(h_A, nElem);
initialData(h_B, nElem);
memset(hostRef, 0, nBytes);
memset(gpuRef, 0, nBytes);
// malloc device global memory
float *d_A, *d_B, *d_C;
CHECK(hipMalloc((float**)&d_A, nBytes));
CHECK(hipMalloc((float**)&d_B, nBytes));
CHECK(hipMalloc((float**)&d_C, nBytes));
// transfer data from host to device
CHECK(hipMemcpy(d_A, h_A, nBytes, hipMemcpyHostToDevice));
CHECK(hipMemcpy(d_B, h_B, nBytes, hipMemcpyHostToDevice));
CHECK(hipMemcpy(d_C, gpuRef, nBytes, hipMemcpyHostToDevice));
// invoke kernel at host side
dim3 block (nElem/128);
dim3 grid (128);
double started = seconds();
hipLaunchKernelGGL(( sumArraysOnGPU), dim3(grid), dim3(block), 0, 0, d_A, d_B, d_C);
hipDeviceSynchronize();
//printf("Execution configure <<<%d, %d>>>\n", grid.x, block.x);
double elapsed = seconds() - started;
printf("Elapsed %lf\n", elapsed);
// copy kernel result back to host side
CHECK(hipMemcpy(gpuRef, d_C, nBytes, hipMemcpyDeviceToHost));
// add vector at host side for result checks
sumArraysOnHost(h_A, h_B, hostRef, nElem);
// check device results
checkResult(hostRef, gpuRef, nElem);
// free device global memory
CHECK(hipFree(d_A));
CHECK(hipFree(d_B));
CHECK(hipFree(d_C));
// free host memory
free(h_A);
free(h_B);
free(hostRef);
free(gpuRef);
CHECK(hipDeviceReset());
return(0);
}
| 832f4d988e845eac83f15b4c5d90009fd520bb32.cu | #include <cuda_runtime.h>
#include <stdio.h>
#include <sys/time.h>
#include <time.h>
#define CHECK(call) \
{ \
const cudaError_t error = call; \
if (error != cudaSuccess) \
{ \
fprintf(stderr, "Error: %s:%d, ", __FILE__, __LINE__); \
fprintf(stderr, "code: %d, reason: %s\n", error, \
cudaGetErrorString(error)); \
} \
}
inline double seconds()
{
struct timeval tp;
struct timezone tzp;
int i = gettimeofday(&tp, &tzp);
return ((double)tp.tv_sec + (double)tp.tv_usec * 1.e-6);
}
/*
* This example demonstrates a simple vector sum on the GPU and on the host.
* sumArraysOnGPU splits the work of the vector sum across CUDA threads on the
* GPU. Only a single thread block is used in this small case, for simplicity.
* sumArraysOnHost sequentially iterates through vector elements on the host.
*/
void checkResult(float *hostRef, float *gpuRef, const int N)
{
double epsilon = 1.0E-8;
bool match = 1;
for (int i = 0; i < N; i++)
{
if (abs(hostRef[i] - gpuRef[i]) > epsilon)
{
match = 0;
printf("Arrays do not match!\n");
printf("host %5.2f gpu %5.2f at current %d\n", hostRef[i],
gpuRef[i], i);
break;
}
}
if (match) printf("Arrays match.\n\n");
return;
}
void initialData(float *ip, int size)
{
// generate different seed for random number
time_t t;
srand((unsigned) time(&t));
for (int i = 0; i < size; i++)
{
ip[i] = (float)(rand() & 0xFF) / 10.0f;
}
return;
}
void sumArraysOnHost(float *A, float *B, float *C, const int N)
{
for (int idx = 0; idx < N; idx++)
C[idx] = A[idx] + B[idx];
}
__global__ void sumArraysOnGPU(float *A, float *B, float *C)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
C[i] = A[i] + B[i];
}
int main(int argc, char **argv)
{
//printf("%s Starting...\n", argv[0]);
// set up device
int dev = 0;
CHECK(cudaSetDevice(dev));
// set up data size of vectors
int nElem = 1024 * 1024 * 10;
printf("Vector size %d\n", nElem);
// malloc host memory
size_t nBytes = nElem * sizeof(float);
float *h_A, *h_B, *hostRef, *gpuRef;
h_A = (float *)malloc(nBytes);
h_B = (float *)malloc(nBytes);
hostRef = (float *)malloc(nBytes);
gpuRef = (float *)malloc(nBytes);
// initialize data at host side
initialData(h_A, nElem);
initialData(h_B, nElem);
memset(hostRef, 0, nBytes);
memset(gpuRef, 0, nBytes);
// malloc device global memory
float *d_A, *d_B, *d_C;
CHECK(cudaMalloc((float**)&d_A, nBytes));
CHECK(cudaMalloc((float**)&d_B, nBytes));
CHECK(cudaMalloc((float**)&d_C, nBytes));
// transfer data from host to device
CHECK(cudaMemcpy(d_A, h_A, nBytes, cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(d_B, h_B, nBytes, cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(d_C, gpuRef, nBytes, cudaMemcpyHostToDevice));
// invoke kernel at host side
dim3 block (nElem/128);
dim3 grid (128);
double started = seconds();
sumArraysOnGPU<<<grid, block>>>(d_A, d_B, d_C);
cudaDeviceSynchronize();
//printf("Execution configure <<<%d, %d>>>\n", grid.x, block.x);
double elapsed = seconds() - started;
printf("Elapsed %lf\n", elapsed);
// copy kernel result back to host side
CHECK(cudaMemcpy(gpuRef, d_C, nBytes, cudaMemcpyDeviceToHost));
// add vector at host side for result checks
sumArraysOnHost(h_A, h_B, hostRef, nElem);
// check device results
checkResult(hostRef, gpuRef, nElem);
// free device global memory
CHECK(cudaFree(d_A));
CHECK(cudaFree(d_B));
CHECK(cudaFree(d_C));
// free host memory
free(h_A);
free(h_B);
free(hostRef);
free(gpuRef);
CHECK(cudaDeviceReset());
return(0);
}
|
7cde51e35adcfb09bf3d5200b346fb864f09516c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
PROGRAMMING ASSIGNMENT 1
JESSICA SMITH
CS791V
*/
#include <iostream>
#include "stdio.h"
#include "kernel.h"
#include <fstream>
int main() {
int n, T, B;
n = pow(2,21);
// n = 2097087;
int check = 0;
int result = 0;
float calcTime, memTransTime;
// std::ofstream out("ParVSSeq.csv");
// std::ofstream out("test.csv");
for(int p = 8; p < 11; p++){
// Create sizes
// T = pow(2,8);
T = pow(2,p);
// n = pow(2,p);
// T = 1024;
B = (n + (T *2 -1))/ T * 2;
if(B > 65535)
B = 65534;
if( (B * T) < n)
std::cout << "fail" << std::endl;
int *input, *output;
input = (int*) malloc(n*sizeof(int));
output = (int*) malloc(B*sizeof(int));
int *g_in, *g_out;
hipError_t err = hipMalloc( (void**) &g_in, n * sizeof(int));
err = hipMalloc( (void**) &g_out, B * sizeof(int));
if (err != hipSuccess) {
std::cerr << "Error: " << hipGetErrorString(err) << std::endl;
exit(1);
}
// Populate Array for addition
for (int i = 0; i < n; ++i) {
input[i] = 1;
}
// Create cuda Events
hipEvent_t start, end, m_start, m_end;
hipEventCreate(&start);
hipEventCreate(&end);
hipEventCreate(&m_start);
hipEventCreate(&m_end);
hipEventRecord( m_start, 0 );
err = hipMemcpy(g_out, output, B * sizeof(int), hipMemcpyHostToDevice);
err = hipMemcpy(g_in, input, n * sizeof(int), hipMemcpyHostToDevice);
if (err != hipSuccess) {
std::cerr << "Error: " << hipGetErrorString(err) << std::endl;
exit(1);
}
hipEventRecord(start,0);
hipLaunchKernelGGL(( reduce), dim3(B),dim3(T),T*sizeof(int), 0, g_in, g_out, n);
hipEventRecord(end, 0);
hipEventSynchronize(end);
hipEventElapsedTime( &calcTime, start, end );
err = hipMemcpy(output, g_out, B * sizeof(int), hipMemcpyDeviceToHost);
if (err != hipSuccess) {
std::cerr << "Error: " << hipGetErrorString(err) << std::endl;
exit(1);
}
hipEventRecord( m_end, 0 );
hipEventSynchronize( m_end );
hipEventElapsedTime( &memTransTime, m_start, m_end );
result = 0;
for(int i = 0; i < B; i++){
result += output[i];
}
check = 0;
for(int i = 0; i < n; i++){
check += input[i];
}
if(check != result){
std::cerr << "Oh no! Something went wrong. You Suck. :(" << std::endl;
std::cerr << result << " " << check << std::endl;
// std::cerr << result - check << std::endl;
hipFree(g_out);
hipFree(g_in);
free(input);
free(output);
exit(1);
}
// std::cout << "Yay! Your program's results are correct." << std::endl;
std::cout << std::endl;
std::cout << "Your program took: " << memTransTime << " ms. With Memory Transfer on " << n << " inputs" << std::endl;
std::cout << "Your program took: " << calcTime << " ms. Without Memory Transfer on" << n << " inputs" << std::endl;
// Cleanup in the event of success.
hipEventDestroy( start );
hipEventDestroy( end );
hipEventDestroy( m_start );
hipEventDestroy( m_end );
// // write to file
// int threads = i;
// int blocks = j;
double memThrough = n / memTransTime;
double calcThrough = n / calcTime;
// out << memThrough << ',' << calcThrough << ',' << T << ',' << B << '\n' ;
// out << memThrough << ',' << calcThrough << ',' << n << '\n' ;
// std::cout << memThrough << ',' << calcThrough << ',' << T << ',' << B << '\n' ;
// std::cout << std::endl;
std::cout << "Throughput: " << memThrough << " ms. With Memory Transfer on " << n << " inputs" << std::endl;
std::cout << "Throughput: " << calcThrough << " ms. Without Memory Transfer on " << n << " inputs" << std::endl;
hipFree(g_in);
hipFree(g_out);
free(input);
free(output);
// out.close();
}
}
| 7cde51e35adcfb09bf3d5200b346fb864f09516c.cu | /*
PROGRAMMING ASSIGNMENT 1
JESSICA SMITH
CS791V
*/
#include <iostream>
#include "stdio.h"
#include "kernel.h"
#include <fstream>
int main() {
int n, T, B;
n = pow(2,21);
// n = 2097087;
int check = 0;
int result = 0;
float calcTime, memTransTime;
// std::ofstream out("ParVSSeq.csv");
// std::ofstream out("test.csv");
for(int p = 8; p < 11; p++){
// Create sizes
// T = pow(2,8);
T = pow(2,p);
// n = pow(2,p);
// T = 1024;
B = (n + (T *2 -1))/ T * 2;
if(B > 65535)
B = 65534;
if( (B * T) < n)
std::cout << "fail" << std::endl;
int *input, *output;
input = (int*) malloc(n*sizeof(int));
output = (int*) malloc(B*sizeof(int));
int *g_in, *g_out;
cudaError_t err = cudaMalloc( (void**) &g_in, n * sizeof(int));
err = cudaMalloc( (void**) &g_out, B * sizeof(int));
if (err != cudaSuccess) {
std::cerr << "Error: " << cudaGetErrorString(err) << std::endl;
exit(1);
}
// Populate Array for addition
for (int i = 0; i < n; ++i) {
input[i] = 1;
}
// Create cuda Events
cudaEvent_t start, end, m_start, m_end;
cudaEventCreate(&start);
cudaEventCreate(&end);
cudaEventCreate(&m_start);
cudaEventCreate(&m_end);
cudaEventRecord( m_start, 0 );
err = cudaMemcpy(g_out, output, B * sizeof(int), cudaMemcpyHostToDevice);
err = cudaMemcpy(g_in, input, n * sizeof(int), cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
std::cerr << "Error: " << cudaGetErrorString(err) << std::endl;
exit(1);
}
cudaEventRecord(start,0);
reduce<<<B,T,T*sizeof(int)>>>(g_in, g_out, n);
cudaEventRecord(end, 0);
cudaEventSynchronize(end);
cudaEventElapsedTime( &calcTime, start, end );
err = cudaMemcpy(output, g_out, B * sizeof(int), cudaMemcpyDeviceToHost);
if (err != cudaSuccess) {
std::cerr << "Error: " << cudaGetErrorString(err) << std::endl;
exit(1);
}
cudaEventRecord( m_end, 0 );
cudaEventSynchronize( m_end );
cudaEventElapsedTime( &memTransTime, m_start, m_end );
result = 0;
for(int i = 0; i < B; i++){
result += output[i];
}
check = 0;
for(int i = 0; i < n; i++){
check += input[i];
}
if(check != result){
std::cerr << "Oh no! Something went wrong. You Suck. :(" << std::endl;
std::cerr << result << " " << check << std::endl;
// std::cerr << result - check << std::endl;
cudaFree(g_out);
cudaFree(g_in);
free(input);
free(output);
exit(1);
}
// std::cout << "Yay! Your program's results are correct." << std::endl;
std::cout << std::endl;
std::cout << "Your program took: " << memTransTime << " ms. With Memory Transfer on " << n << " inputs" << std::endl;
std::cout << "Your program took: " << calcTime << " ms. Without Memory Transfer on" << n << " inputs" << std::endl;
// Cleanup in the event of success.
cudaEventDestroy( start );
cudaEventDestroy( end );
cudaEventDestroy( m_start );
cudaEventDestroy( m_end );
// // write to file
// int threads = i;
// int blocks = j;
double memThrough = n / memTransTime;
double calcThrough = n / calcTime;
// out << memThrough << ',' << calcThrough << ',' << T << ',' << B << '\n' ;
// out << memThrough << ',' << calcThrough << ',' << n << '\n' ;
// std::cout << memThrough << ',' << calcThrough << ',' << T << ',' << B << '\n' ;
// std::cout << std::endl;
std::cout << "Throughput: " << memThrough << " ms. With Memory Transfer on " << n << " inputs" << std::endl;
std::cout << "Throughput: " << calcThrough << " ms. Without Memory Transfer on " << n << " inputs" << std::endl;
cudaFree(g_in);
cudaFree(g_out);
free(input);
free(output);
// out.close();
}
}
|
9088941c349af45c43f75f9341bcbef0f2ef564b.hip | // !!! This is a file automatically generated by hipify!!!
/*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author Yurii Shyrma ([email protected])
//
#include <svd.h>
#include <hip/hip_runtime.h>
#include <rocblas.h>
#include <cusolverDn.h>
#include <exceptions/cuda_exception.h>
#include <PointersManager.h>
#include <ShapeUtils.h>
namespace nd4j {
namespace ops {
namespace helpers {
// FIXME -> we should optimize these helpers for the case when input matrices have c order (perform transpositions appropriately)
template <typename T>
__global__ static void inverseColumnSignCuda(void* vu, const Nd4jLong* uShapeInfo, void* vv, const Nd4jLong* vShapeInfo) {
T* u = reinterpret_cast<T*>(vu);
T* v = reinterpret_cast<T*>(vv);
__shared__ int rank, uLastButOneColumn, vLastButOneColumn; // uRank = vRank
__shared__ Nd4jLong uLen, vLen;
__shared__ Nd4jLong *sharedMem;
if (threadIdx.x == 0) {
extern __shared__ unsigned char shmem[];
sharedMem = reinterpret_cast<Nd4jLong*>(shmem);
rank = shape::rank(uShapeInfo);
uLen = shape::length(uShapeInfo);
vLen = shape::length(vShapeInfo);
uLastButOneColumn = uShapeInfo[rank] - 2;
vLastButOneColumn = vShapeInfo[rank - 1] - 2;
}
__syncthreads();
const auto ind = threadIdx.x + blockIdx.x * blockDim.x;
auto coords = sharedMem + threadIdx.x * rank;
// u
for (Nd4jLong i = ind; i < uLen; i += gridDim.x * blockDim.x) {
shape::index2coords(rank, uShapeInfo + 1, i, uLen, coords);
if(coords[rank - 1] == 0 || coords[rank - 1] == uLastButOneColumn) // do not change sign in first and last but one columns
continue;
const auto uOffset = shape::getOffset(0, uShapeInfo + 1, uShapeInfo + rank + 1, coords, rank);
u[uOffset] = -u[uOffset];
}
// v
for (Nd4jLong i = ind; i < vLen; i += gridDim.x * blockDim.x) {
shape::index2coords(rank, vShapeInfo + 1, i, vLen, coords);
if(coords[rank - 2] == 0 || coords[rank - 2] == vLastButOneColumn) // do not change sign in first and last but one columns
continue;
const auto vOffset = shape::getOffset(0, vShapeInfo + 1, vShapeInfo + rank + 1, coords, rank);
v[vOffset] = -v[vOffset];
}
}
//////////////////////////////////////////////////////////////////////////
template <typename T>
static void inverseColumnSignCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const hipStream_t *stream,
void* vu, const Nd4jLong* uShapeInfo,
void* vv, const Nd4jLong* vShapeInfo) {
hipLaunchKernelGGL(( inverseColumnSignCuda<T>), dim3(blocksPerGrid), dim3(threadsPerBlock), sharedMem, *stream, vu, uShapeInfo, vv, vShapeInfo);
}
BUILD_SINGLE_TEMPLATE(template void inverseColumnSignCudaLauncher, (const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const hipStream_t* stream, void* vu, const Nd4jLong* uShapeInfo, void* vv, const Nd4jLong* vShapeInfo), FLOAT_TYPES);
//////////////////////////////////////////////////////////////////////////
static void svdQR(nd4j::LaunchContext* context, const NDArray& A, NDArray& S, NDArray& U, NDArray& VT, const bool fullUV, const bool calcUV) {
// since cusa api hipsolverDnDgesvd/hipsolverDnSgesvd have following constrain on input matrix A: A_rows >= A_columns && A_order = 'f'
// we make this function to have deal with 2 valid cases only:
// 1) A_rows >= A_columns and A_corder = 'f'
// 2) A_rows <= A_columns and A_corder = 'c' - int this case perform transposition to get f order
// if 1) or 2) are not met then throw exception
// A [m, n]
// S [n]
// U [m, m] or [m, n] if fullUV = false and m > n
// VT [n, n] or [m, n] if fullUV = false and m < n
if(A.rankOf() != 2)
throw std::runtime_error("svdQR: rank of A array is not equal 2 !");
auto m = A.sizeAt(0);
auto n = A.sizeAt(1);
const int minDim = m < n ? m : n;
const char orderA = A.ordering();
if(m < n)
throw std::runtime_error("svdQR: due to cuda api input constrains given shape of A array are not valid !");
if(ShapeUtils::shapeAsString({minDim}) != ShapeUtils::shapeAsString(&S))
throw std::runtime_error("svdQR: wrong shape of S array !");
if(calcUV) {
if(fullUV && ShapeUtils::shapeAsString({m,m}) != ShapeUtils::shapeAsString(&U))
throw std::runtime_error("svdQR: wrong shape of U array !");
else if(!fullUV && ShapeUtils::shapeAsString({m,minDim}) != ShapeUtils::shapeAsString(&U))
throw std::runtime_error("svdQR: wrong shape of U array !");
if(fullUV && ShapeUtils::shapeAsString({n,n}) != ShapeUtils::shapeAsString(&VT))
throw std::runtime_error("svdQR: wrong shape of VT array !");
else if(!fullUV && ShapeUtils::shapeAsString({minDim,n}) != ShapeUtils::shapeAsString(&VT))
throw std::runtime_error("svdQR: wrong shape of VT array !");
}
NDArray* pA = const_cast<NDArray*>(&A);
NDArray* pS = &S;
NDArray* pU = &U;
NDArray* pVT = &VT;
std::vector<NDArray*> toDelete;
if(pA->ews() != 1 || pA->ordering() == 'c') {
pA = A.dup('f');
toDelete.push_back(pA);
}
if(S.ews() != 1) {
pS = S.dup('f');
toDelete.push_back(pS);
}
if(calcUV) {
if(pU->ews() != 1 || pU->ordering() == 'c') {
pU = U.dup('f');
toDelete.push_back(pU);
}
if(pVT->ews() != 1 || pVT->ordering() == 'c') {
pVT = VT.dup('f');
toDelete.push_back(pVT);
}
}
// create cusolverDn handle
hipsolverDnHandle_t handle = nullptr;
cusolverStatus_t status = hipsolverDnCreate(&handle);
if(status != CUSOLVER_STATUS_SUCCESS)
throw cuda_exception::build("svdQR: cuda failed !", status);
// stream
status = hipsolverDnSetStream(handle, *context->getCudaStream());
if(status != CUSOLVER_STATUS_SUCCESS)
throw cuda_exception::build("svdQR: cuda failed !", status);
// query working space of SVD
int lwork = 0;
if(A.dataType() == DataType::DOUBLE)
status = hipsolverDnDgesvd_bufferSize(handle, m, n, &lwork);
else if(A.dataType() == DataType::FLOAT32)
status = hipsolverDnSgesvd_bufferSize(handle, m, n, &lwork);
else
throw std::invalid_argument("svdQR: given data type is unsupported !");
if(status != CUSOLVER_STATUS_SUCCESS)
throw cuda_exception::build("svdQR: cuda failed !", status);
// allocate memory for dWork
void* dWork = nullptr;
hipError_t status2 = hipMalloc((void**)&dWork , A.sizeOfT() * lwork);
if(status2 != hipSuccess)
throw cuda_exception::build("svdQR: cuda failed !", status2);
signed char jobu, jobvt;
if(calcUV) {
if(fullUV)
jobu = jobvt = 'A';
else
jobu = jobvt = 'S';
}
else {
jobu = jobvt = 'N';
}
int *devInfo = nullptr;
void* rWork = nullptr;
int lda(m), ldu, ldvt;
if(calcUV) {
ldu = pU->sizeAt(0);
ldvt = pVT->sizeAt(0);
}
PointersManager manager(context, "svdQR");
NDArray::prepareSpecialUse({pS, pU, pVT}, {pA});
// choose appropriate cuda gemm api depending on data types
if(A.dataType() == DataType::DOUBLE) {
status = hipsolverDnDgesvd(handle, jobu, jobvt, m, n, reinterpret_cast<double*>(pA->getSpecialBuffer()), lda, reinterpret_cast<double*>(pS->getSpecialBuffer()), reinterpret_cast<double*>(pU->getSpecialBuffer()), ldu, reinterpret_cast<double*>(pVT->getSpecialBuffer()), ldvt, reinterpret_cast<double*>(dWork), lwork, reinterpret_cast<double*>(rWork), devInfo);
}
else if(A.dataType() == DataType::FLOAT32) {
status = hipsolverDnSgesvd(handle, jobu, jobvt, m, n, reinterpret_cast<float*>(pA->getSpecialBuffer()), lda, reinterpret_cast<float*>(pS->getSpecialBuffer()), reinterpret_cast<float*>(pU->getSpecialBuffer()), ldu, reinterpret_cast<float*>(pVT->getSpecialBuffer()), ldvt, reinterpret_cast<float*>(dWork), lwork, reinterpret_cast<float*>(rWork), devInfo);
}
else
throw std::invalid_argument("svdQR: given data type is unsupported !");
if(status != CUSOLVER_STATUS_SUCCESS)
throw cuda_exception::build("svdQR: cuda failed !", status);
manager.synchronize();
NDArray::registerSpecialUse({pS, pU, pVT}, {pA});
S.assign(pS);
if(calcUV) {
U.assign(pU);
VT.assign(pVT);
}
for (int i = toDelete.size() - 1; i >= 0; --i)
delete toDelete[i];
if (devInfo)
hipFree(devInfo);
if (dWork )
hipFree(dWork);
if (rWork)
hipFree(rWork);
if(handle)
hipsolverDnDestroy(handle);
// hipDeviceReset();
}
//////////////////////////////////////////////////////////////////////////
static void svdJcb(nd4j::LaunchContext* context, const NDArray& A, NDArray& S, NDArray& U, NDArray& V, const bool fullUV, const bool calcUV) {
// A [m, n]
// S [n]
// U [m, m] or [m, n] if fullUV = false and m > n
// V [n, n] or [n, m] if fullUV = false and m < n
if(A.rankOf() != 2)
throw std::runtime_error("svdJcb: rank of A array is not equal 2 !");
auto m = A.sizeAt(0);
auto n = A.sizeAt(1);
const int minDim = m < n ? m : n;
if(ShapeUtils::shapeAsString({minDim}) != ShapeUtils::shapeAsString(&S))
throw std::runtime_error("svdJcb: wrong shape of S array !");
if(calcUV) {
if(fullUV && ShapeUtils::shapeAsString({m,m}) != ShapeUtils::shapeAsString(&U))
throw std::runtime_error("svdJcb: wrong shape of U array !");
else if(!fullUV && ShapeUtils::shapeAsString({m,minDim}) != ShapeUtils::shapeAsString(&U))
throw std::runtime_error("svdJcb: wrong shape of U array !");
if(fullUV && ShapeUtils::shapeAsString({n,n}) != ShapeUtils::shapeAsString(&V))
throw std::runtime_error("svdJcb: wrong shape of V array !");
else if(!fullUV && ShapeUtils::shapeAsString({n,minDim}) != ShapeUtils::shapeAsString(&V))
throw std::runtime_error("svdJcb: wrong shape of V array !");
}
NDArray* pA = const_cast<NDArray*>(&A);
NDArray* pS = &S;
NDArray* pU = &U;
NDArray* pV = &V;
std::vector<NDArray*> toDelete;
if(pA->ews() != 1 || pA->ordering() == 'c') {
pA = A.dup('f');
toDelete.push_back(pA);
}
if(S.ews() != 1) {
pS = S.dup('f');
toDelete.push_back(pS);
}
if(calcUV) {
if(pU->ews() != 1 || pU->ordering() == 'c') {
pU = U.dup('f');
toDelete.push_back(pU);
}
if(pV->ews() != 1 || pV->ordering() == 'c') {
pV = V.dup('f');
toDelete.push_back(pV);
}
}
// create cusolverDn handle
hipsolverDnHandle_t handle = nullptr;
cusolverStatus_t status = hipsolverDnCreate(&handle);
if(status != CUSOLVER_STATUS_SUCCESS)
throw cuda_exception::build("svdJcb: cuda failed !", status);
// stream
status = hipsolverDnSetStream(handle, *context->getCudaStream());
if(status != CUSOLVER_STATUS_SUCCESS)
throw cuda_exception::build("svdJcb: cuda failed !", status);
// set parameters
hipsolverGesvdjInfo_t gesvdjParams = nullptr;
status = hipsolverDnCreateGesvdjInfo(&gesvdjParams);
if(status != CUSOLVER_STATUS_SUCCESS)
throw cuda_exception::build("svdJcb: cuda failed !", status);
status = hipsolverDnXgesvdjSetTolerance(gesvdjParams, 1.e-7); // tolerance
if(status != CUSOLVER_STATUS_SUCCESS)
throw cuda_exception::build("svdJcb: cuda failed !", status);
status = hipsolverDnXgesvdjSetMaxSweeps(gesvdjParams, 15); // max_sweeps
if(status != CUSOLVER_STATUS_SUCCESS)
throw cuda_exception::build("svdJcb: cuda failed !", status);
int *devInfo = nullptr;
const hipsolverEigMode_t jobz = calcUV ? HIPSOLVER_EIG_MODE_VECTOR : HIPSOLVER_EIG_MODE_NOVECTOR;
const int econ = !fullUV;
int lda(m), ldu(m), ldv(m);
if(calcUV) {
ldu = pU->sizeAt(0);
ldv = pV->sizeAt(0);
}
// query working space of SVD
int lwork = 0;
if(A.dataType() == DataType::DOUBLE)
status = hipsolverDnDgesvdj_bufferSize(handle, jobz, econ, m, n, reinterpret_cast<double*>(pA->getSpecialBuffer()), lda, reinterpret_cast<double*>(pS->getSpecialBuffer()), reinterpret_cast<double*>(pU->getSpecialBuffer()), ldu, reinterpret_cast<double*>(pV->getSpecialBuffer()), ldv, &lwork, gesvdjParams);
else if(A.dataType() == DataType::FLOAT32)
status = hipsolverDnSgesvdj_bufferSize(handle, jobz, econ, m, n, reinterpret_cast<float*>(pA->getSpecialBuffer()), lda, reinterpret_cast<float*>(pS->getSpecialBuffer()), reinterpret_cast<float*>(pU->getSpecialBuffer()), ldu, reinterpret_cast<float*>(pV->getSpecialBuffer()), ldv, &lwork, gesvdjParams);
else
throw std::invalid_argument("svdJcb: given data type is unsupported !");
if(status != CUSOLVER_STATUS_SUCCESS)
throw cuda_exception::build("svdJcb: cuda failed !", status);
// allocate memory dWork
void* dWork = nullptr;
auto status2 = hipMalloc((void**)&dWork , A.sizeOfT() * lwork);
if(status2 != hipSuccess)
throw cuda_exception::build("svdJcb: cuda failed !", status2);
PointersManager manager(context, "svdJcb");
NDArray::prepareSpecialUse({pS, pU, pV}, {pA});
// choose appropriate cuda gemm api depending on data types
if(A.dataType() == DataType::DOUBLE) {
status = hipsolverDnDgesvdj(handle, jobz, econ, m, n, reinterpret_cast<double*>(pA->getSpecialBuffer()), lda, reinterpret_cast<double*>(pS->getSpecialBuffer()), reinterpret_cast<double*>(pU->getSpecialBuffer()), ldu, reinterpret_cast<double*>(pV->getSpecialBuffer()), ldv, reinterpret_cast<double*>(dWork), lwork, devInfo, gesvdjParams);
}
else if(A.dataType() == DataType::FLOAT32) {
status = hipsolverDnSgesvdj(handle, jobz, econ, m, n, reinterpret_cast<float*>(pA->getSpecialBuffer()), lda, reinterpret_cast<float*>(pS->getSpecialBuffer()), reinterpret_cast<float*>(pU->getSpecialBuffer()), ldu, reinterpret_cast<float*>(pV->getSpecialBuffer()), ldv, reinterpret_cast<float*>(dWork), lwork, devInfo, gesvdjParams);
}
else
throw std::invalid_argument("svdJcb: given data type is unsupported !");
if(status != CUSOLVER_STATUS_SUCCESS)
throw cuda_exception::build("svdJcb: cuda failed !", status);
manager.synchronize();
NDArray::registerSpecialUse({pS, pU, pV}, {pA});
S.assign(pS);
if(calcUV) {
U.assign(pU);
V.assign(pV);
}
for (int i = toDelete.size() - 1; i >= 0; --i)
delete toDelete[i];
if (devInfo)
hipFree(devInfo);
if (dWork )
hipFree(dWork);
if(handle)
hipsolverDnDestroy(handle);
if(gesvdjParams)
hipsolverDnDestroyGesvdjInfo(gesvdjParams);
// hipDeviceReset();
}
//////////////////////////////////////////////////////////////////////////
static void svdBatched(nd4j::LaunchContext* context, const NDArray& A, NDArray& S, NDArray& U, NDArray& V, const bool fullUV, const bool calcUV) {
// A [..., m, n]
// S [..., n]
// U [..., m, m] or [..., m, n] if fullUV = false and m > n
// V [..., n, n] or [..., n, m] if fullUV = false and m < n
auto m = A.sizeAt(-2);
auto n = A.sizeAt(-1);
const int minDim = m < n ? m : n;
const Nd4jLong bS = A.lengthOf() / (m * n);
if(m > 32 || n > 32)
throw std::runtime_error("svdBatched: numbers of rows and columns should be <= 32 !");
if(minDim != S.sizeAt(-1))
throw std::runtime_error("svdBatched: wrong shape of S array !");
if(calcUV) {
if(U.sizeAt(-2) != m)
throw std::runtime_error("svdBatched: wrong shape of U array !");
if(U.sizeAt(-1) != (fullUV ? m : minDim))
throw std::runtime_error("svdBatched: wrong shape of U array !");
if(U.lengthOf() / (U.sizeAt(-2) * U.sizeAt(-1)) != bS)
throw std::runtime_error("svdBatched: wrong shape of U array !");
if(V.sizeAt(-2) != n)
throw std::runtime_error("svdBatched: wrong shape of V array !");
if(V.sizeAt(-1) != (fullUV ? n : minDim))
throw std::runtime_error("svdBatched: wrong shape of V array !");
if(V.lengthOf() / (V.sizeAt(-2) * V.sizeAt(-1)) != bS)
throw std::runtime_error("svdBatched: wrong shape of V array !");
}
NDArray* pA = const_cast<NDArray*>(&A);
NDArray* pS = &S;
NDArray* pU = &U;
NDArray* pV = &V;
std::vector<NDArray*> toDelete;
if(pA->ews() != 1 || pA->ordering() == 'c') {
pA = A.dup('f');
toDelete.push_back(pA);
}
if(S.ews() != 1) {
pS = S.dup('f');
toDelete.push_back(pS);
}
if(calcUV) {
if(pU->ews() != 1 || pU->ordering() == 'c') {
pU = U.dup('f');
toDelete.push_back(pU);
}
if(pV->ews() != 1 || pV->ordering() == 'c') {
pV = V.dup('f');
toDelete.push_back(pV);
}
}
// create cusolverDn handle
hipsolverDnHandle_t handle = nullptr;
cusolverStatus_t status = hipsolverDnCreate(&handle);
if(status != CUSOLVER_STATUS_SUCCESS)
throw cuda_exception::build("svdBatched: cuda failed !", status);
// stream
status = hipsolverDnSetStream(handle, *context->getCudaStream());
if(status != CUSOLVER_STATUS_SUCCESS)
throw cuda_exception::build("svdBatched: cuda failed !", status);
// set parameters
hipsolverGesvdjInfo_t gesvdjParams = nullptr;
status = hipsolverDnCreateGesvdjInfo(&gesvdjParams);
if(status != CUSOLVER_STATUS_SUCCESS)
throw cuda_exception::build("svdBatched: cuda failed !", status);
status = hipsolverDnXgesvdjSetTolerance(gesvdjParams, 1.e-7); // tolerance
if(status != CUSOLVER_STATUS_SUCCESS)
throw cuda_exception::build("svdBatched: cuda failed !", status);
status = hipsolverDnXgesvdjSetMaxSweeps(gesvdjParams, 15); // max_sweeps
if(status != CUSOLVER_STATUS_SUCCESS)
throw cuda_exception::build("svdBatched: cuda failed !", status);
// devInfo
int *devInfo = nullptr;
auto status2 = hipMalloc((void**)&devInfo, sizeof(int) * bS);
if(status2 != hipSuccess)
throw cuda_exception::build("svdBatched: cuda failed !", status2);
status2 = hipDeviceSynchronize();
if(status2 != hipSuccess)
throw cuda_exception::build("svdJcb: cuda failed !", status2);
const hipsolverEigMode_t jobz = calcUV ? HIPSOLVER_EIG_MODE_VECTOR : HIPSOLVER_EIG_MODE_NOVECTOR;
int lda(m), ldu, ldv;
if(calcUV) {
ldu = pU->sizeAt(-2);
ldv = pV->sizeAt(-2);
}
// Ak (i,j) = A[i + 5*j + 25*k]
// query working space of SVD
int lwork = 0;
if(A.dataType() == DataType::DOUBLE)
status = hipsolverDnDgesvdjBatched_bufferSize(handle, jobz, m, n, reinterpret_cast<double*>(pA->getSpecialBuffer()), lda, reinterpret_cast<double*>(pS->getSpecialBuffer()), reinterpret_cast<double*>(pU->getSpecialBuffer()), ldu, reinterpret_cast<double*>(pV->getSpecialBuffer()), ldv, &lwork, gesvdjParams, bS);
else if(A.dataType() == DataType::FLOAT32)
status = hipsolverDnSgesvdjBatched_bufferSize(handle, jobz, m, n, reinterpret_cast<float*>(pA->getSpecialBuffer()), lda, reinterpret_cast<float*>(pS->getSpecialBuffer()), reinterpret_cast<float*>(pU->getSpecialBuffer()), ldu, reinterpret_cast<float*>(pV->getSpecialBuffer()), ldv, &lwork, gesvdjParams, bS);
else
throw std::invalid_argument("svdBatched: given data type is unsupported !");
if(status != CUSOLVER_STATUS_SUCCESS)
throw cuda_exception::build("svdBatched: cuda failed !", status);
// allocate memory dWork
void* dWork = nullptr;
status2 = hipMalloc((void**)&dWork , A.sizeOfT() * lwork);
if(status2 != hipSuccess)
throw cuda_exception::build("svdBatched: cuda failed !", status2);
status2 = hipDeviceSynchronize();
if(status2 != hipSuccess)
throw cuda_exception::build("svdBatched: cuda failed !", status2);
PointersManager manager(context, "svdBatched");
NDArray::prepareSpecialUse({pS, pU, pV}, {pA});
// choose appropriate cuda gemm api depending on data types
if(A.dataType() == DataType::DOUBLE) {
status = hipsolverDnDgesvdjBatched(handle, jobz, m, n, reinterpret_cast<double*>(pA->getSpecialBuffer()), lda, reinterpret_cast<double*>(pS->getSpecialBuffer()), reinterpret_cast<double*>(pU->getSpecialBuffer()), ldu, reinterpret_cast<double*>(pV->getSpecialBuffer()), ldv, reinterpret_cast<double*>(dWork), lwork, devInfo, gesvdjParams, bS);
}
else if(A.dataType() == DataType::FLOAT32) {
status = hipsolverDnSgesvdjBatched(handle, jobz, m, n, reinterpret_cast<float*>(pA->getSpecialBuffer()), lda, reinterpret_cast<float*>(pS->getSpecialBuffer()), reinterpret_cast<float*>(pU->getSpecialBuffer()), ldu, reinterpret_cast<float*>(pV->getSpecialBuffer()), ldv, reinterpret_cast<float*>(dWork), lwork, devInfo, gesvdjParams, bS);
}
else
throw std::invalid_argument("svdBatched: given data type is unsupported !");
if(status != CUSOLVER_STATUS_SUCCESS)
throw cuda_exception::build("svdBatched: cuda failed !", status);
manager.synchronize();
NDArray::registerSpecialUse({pS, pU, pV}, {pA});
S.assign(pS);
if(calcUV) {
U.assign(pU);
V.assign(pV);
}
for (int i = toDelete.size() - 1; i >= 0; --i)
delete toDelete[i];
if (devInfo)
hipFree(devInfo);
if (dWork )
hipFree(dWork);
if(handle)
hipsolverDnDestroy(handle);
if(gesvdjParams)
hipsolverDnDestroyGesvdjInfo(gesvdjParams);
// hipDeviceReset();
}
////////////////////////////////////////////////////////////////////
void svd(nd4j::LaunchContext* context, const NDArray* x, const std::vector<NDArray*>& outArrs, const bool fullUV, const bool calcUV, const int switchNum) {
NDArray* S = outArrs[0];
NDArray* U = outArrs[1];
// NDArray VT = outArrs[2]->transpose();
NDArray* V = outArrs[2];
if(x->rankOf() == 2) {
// svdQR(context, *x, *S, *U, VT, fullUV, calcUV);
svdJcb(context, *x, *S, *U, *V, fullUV, calcUV);
}
else {
// svdBatched(context, *x, *S, *U, *V, fullUV, calcUV);
ResultSet *tadsU(nullptr), *tadsV(nullptr);
auto tadsX = x->allTensorsAlongDimension({x->rankOf() - 2, x->rankOf() - 1});
auto tadsS = S->allTensorsAlongDimension({S->rankOf() - 1});
if(calcUV) {
tadsU = U->allTensorsAlongDimension({U->rankOf() - 2, U->rankOf() - 1});
tadsV = V->allTensorsAlongDimension({V->rankOf() - 2, V->rankOf() - 1});
}
for (int i = 0; i < tadsX->size(); ++i)
svdJcb(context, *tadsX->at(i), *tadsS->at(i), calcUV ? *tadsU->at(i) : *S, calcUV ? *tadsV->at(i) : *S, fullUV, calcUV);
delete tadsX;
delete tadsS;
if(calcUV) {
delete tadsU;
delete tadsV;
}
}
}
}
}
} | 9088941c349af45c43f75f9341bcbef0f2ef564b.cu | /*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author Yurii Shyrma ([email protected])
//
#include <svd.h>
#include <cuda_runtime.h>
#include <cublas_v2.h>
#include <cusolverDn.h>
#include <exceptions/cuda_exception.h>
#include <PointersManager.h>
#include <ShapeUtils.h>
namespace nd4j {
namespace ops {
namespace helpers {
// FIXME -> we should optimize these helpers for the case when input matrices have c order (perform transpositions appropriately)
template <typename T>
__global__ static void inverseColumnSignCuda(void* vu, const Nd4jLong* uShapeInfo, void* vv, const Nd4jLong* vShapeInfo) {
T* u = reinterpret_cast<T*>(vu);
T* v = reinterpret_cast<T*>(vv);
__shared__ int rank, uLastButOneColumn, vLastButOneColumn; // uRank = vRank
__shared__ Nd4jLong uLen, vLen;
__shared__ Nd4jLong *sharedMem;
if (threadIdx.x == 0) {
extern __shared__ unsigned char shmem[];
sharedMem = reinterpret_cast<Nd4jLong*>(shmem);
rank = shape::rank(uShapeInfo);
uLen = shape::length(uShapeInfo);
vLen = shape::length(vShapeInfo);
uLastButOneColumn = uShapeInfo[rank] - 2;
vLastButOneColumn = vShapeInfo[rank - 1] - 2;
}
__syncthreads();
const auto ind = threadIdx.x + blockIdx.x * blockDim.x;
auto coords = sharedMem + threadIdx.x * rank;
// u
for (Nd4jLong i = ind; i < uLen; i += gridDim.x * blockDim.x) {
shape::index2coords(rank, uShapeInfo + 1, i, uLen, coords);
if(coords[rank - 1] == 0 || coords[rank - 1] == uLastButOneColumn) // do not change sign in first and last but one columns
continue;
const auto uOffset = shape::getOffset(0, uShapeInfo + 1, uShapeInfo + rank + 1, coords, rank);
u[uOffset] = -u[uOffset];
}
// v
for (Nd4jLong i = ind; i < vLen; i += gridDim.x * blockDim.x) {
shape::index2coords(rank, vShapeInfo + 1, i, vLen, coords);
if(coords[rank - 2] == 0 || coords[rank - 2] == vLastButOneColumn) // do not change sign in first and last but one columns
continue;
const auto vOffset = shape::getOffset(0, vShapeInfo + 1, vShapeInfo + rank + 1, coords, rank);
v[vOffset] = -v[vOffset];
}
}
//////////////////////////////////////////////////////////////////////////
template <typename T>
static void inverseColumnSignCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const cudaStream_t *stream,
void* vu, const Nd4jLong* uShapeInfo,
void* vv, const Nd4jLong* vShapeInfo) {
inverseColumnSignCuda<T><<<blocksPerGrid, threadsPerBlock, sharedMem, *stream>>>(vu, uShapeInfo, vv, vShapeInfo);
}
BUILD_SINGLE_TEMPLATE(template void inverseColumnSignCudaLauncher, (const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const cudaStream_t* stream, void* vu, const Nd4jLong* uShapeInfo, void* vv, const Nd4jLong* vShapeInfo), FLOAT_TYPES);
//////////////////////////////////////////////////////////////////////////
static void svdQR(nd4j::LaunchContext* context, const NDArray& A, NDArray& S, NDArray& U, NDArray& VT, const bool fullUV, const bool calcUV) {
// since cusa api cusolverDnDgesvd/cusolverDnSgesvd have following constrain on input matrix A: A_rows >= A_columns && A_order = 'f'
// we make this function to have deal with 2 valid cases only:
// 1) A_rows >= A_columns and A_corder = 'f'
// 2) A_rows <= A_columns and A_corder = 'c' - int this case perform transposition to get f order
// if 1) or 2) are not met then throw exception
// A [m, n]
// S [n]
// U [m, m] or [m, n] if fullUV = false and m > n
// VT [n, n] or [m, n] if fullUV = false and m < n
if(A.rankOf() != 2)
throw std::runtime_error("svdQR: rank of A array is not equal 2 !");
auto m = A.sizeAt(0);
auto n = A.sizeAt(1);
const int minDim = m < n ? m : n;
const char orderA = A.ordering();
if(m < n)
throw std::runtime_error("svdQR: due to cuda api input constrains given shape of A array are not valid !");
if(ShapeUtils::shapeAsString({minDim}) != ShapeUtils::shapeAsString(&S))
throw std::runtime_error("svdQR: wrong shape of S array !");
if(calcUV) {
if(fullUV && ShapeUtils::shapeAsString({m,m}) != ShapeUtils::shapeAsString(&U))
throw std::runtime_error("svdQR: wrong shape of U array !");
else if(!fullUV && ShapeUtils::shapeAsString({m,minDim}) != ShapeUtils::shapeAsString(&U))
throw std::runtime_error("svdQR: wrong shape of U array !");
if(fullUV && ShapeUtils::shapeAsString({n,n}) != ShapeUtils::shapeAsString(&VT))
throw std::runtime_error("svdQR: wrong shape of VT array !");
else if(!fullUV && ShapeUtils::shapeAsString({minDim,n}) != ShapeUtils::shapeAsString(&VT))
throw std::runtime_error("svdQR: wrong shape of VT array !");
}
NDArray* pA = const_cast<NDArray*>(&A);
NDArray* pS = &S;
NDArray* pU = &U;
NDArray* pVT = &VT;
std::vector<NDArray*> toDelete;
if(pA->ews() != 1 || pA->ordering() == 'c') {
pA = A.dup('f');
toDelete.push_back(pA);
}
if(S.ews() != 1) {
pS = S.dup('f');
toDelete.push_back(pS);
}
if(calcUV) {
if(pU->ews() != 1 || pU->ordering() == 'c') {
pU = U.dup('f');
toDelete.push_back(pU);
}
if(pVT->ews() != 1 || pVT->ordering() == 'c') {
pVT = VT.dup('f');
toDelete.push_back(pVT);
}
}
// create cusolverDn handle
cusolverDnHandle_t handle = nullptr;
cusolverStatus_t status = cusolverDnCreate(&handle);
if(status != CUSOLVER_STATUS_SUCCESS)
throw cuda_exception::build("svdQR: cuda failed !", status);
// stream
status = cusolverDnSetStream(handle, *context->getCudaStream());
if(status != CUSOLVER_STATUS_SUCCESS)
throw cuda_exception::build("svdQR: cuda failed !", status);
// query working space of SVD
int lwork = 0;
if(A.dataType() == DataType::DOUBLE)
status = cusolverDnDgesvd_bufferSize(handle, m, n, &lwork);
else if(A.dataType() == DataType::FLOAT32)
status = cusolverDnSgesvd_bufferSize(handle, m, n, &lwork);
else
throw std::invalid_argument("svdQR: given data type is unsupported !");
if(status != CUSOLVER_STATUS_SUCCESS)
throw cuda_exception::build("svdQR: cuda failed !", status);
// allocate memory for dWork
void* dWork = nullptr;
cudaError_t status2 = cudaMalloc((void**)&dWork , A.sizeOfT() * lwork);
if(status2 != cudaSuccess)
throw cuda_exception::build("svdQR: cuda failed !", status2);
signed char jobu, jobvt;
if(calcUV) {
if(fullUV)
jobu = jobvt = 'A';
else
jobu = jobvt = 'S';
}
else {
jobu = jobvt = 'N';
}
int *devInfo = nullptr;
void* rWork = nullptr;
int lda(m), ldu, ldvt;
if(calcUV) {
ldu = pU->sizeAt(0);
ldvt = pVT->sizeAt(0);
}
PointersManager manager(context, "svdQR");
NDArray::prepareSpecialUse({pS, pU, pVT}, {pA});
// choose appropriate cuda gemm api depending on data types
if(A.dataType() == DataType::DOUBLE) {
status = cusolverDnDgesvd(handle, jobu, jobvt, m, n, reinterpret_cast<double*>(pA->getSpecialBuffer()), lda, reinterpret_cast<double*>(pS->getSpecialBuffer()), reinterpret_cast<double*>(pU->getSpecialBuffer()), ldu, reinterpret_cast<double*>(pVT->getSpecialBuffer()), ldvt, reinterpret_cast<double*>(dWork), lwork, reinterpret_cast<double*>(rWork), devInfo);
}
else if(A.dataType() == DataType::FLOAT32) {
status = cusolverDnSgesvd(handle, jobu, jobvt, m, n, reinterpret_cast<float*>(pA->getSpecialBuffer()), lda, reinterpret_cast<float*>(pS->getSpecialBuffer()), reinterpret_cast<float*>(pU->getSpecialBuffer()), ldu, reinterpret_cast<float*>(pVT->getSpecialBuffer()), ldvt, reinterpret_cast<float*>(dWork), lwork, reinterpret_cast<float*>(rWork), devInfo);
}
else
throw std::invalid_argument("svdQR: given data type is unsupported !");
if(status != CUSOLVER_STATUS_SUCCESS)
throw cuda_exception::build("svdQR: cuda failed !", status);
manager.synchronize();
NDArray::registerSpecialUse({pS, pU, pVT}, {pA});
S.assign(pS);
if(calcUV) {
U.assign(pU);
VT.assign(pVT);
}
for (int i = toDelete.size() - 1; i >= 0; --i)
delete toDelete[i];
if (devInfo)
cudaFree(devInfo);
if (dWork )
cudaFree(dWork);
if (rWork)
cudaFree(rWork);
if(handle)
cusolverDnDestroy(handle);
// cudaDeviceReset();
}
//////////////////////////////////////////////////////////////////////////
static void svdJcb(nd4j::LaunchContext* context, const NDArray& A, NDArray& S, NDArray& U, NDArray& V, const bool fullUV, const bool calcUV) {
// A [m, n]
// S [n]
// U [m, m] or [m, n] if fullUV = false and m > n
// V [n, n] or [n, m] if fullUV = false and m < n
if(A.rankOf() != 2)
throw std::runtime_error("svdJcb: rank of A array is not equal 2 !");
auto m = A.sizeAt(0);
auto n = A.sizeAt(1);
const int minDim = m < n ? m : n;
if(ShapeUtils::shapeAsString({minDim}) != ShapeUtils::shapeAsString(&S))
throw std::runtime_error("svdJcb: wrong shape of S array !");
if(calcUV) {
if(fullUV && ShapeUtils::shapeAsString({m,m}) != ShapeUtils::shapeAsString(&U))
throw std::runtime_error("svdJcb: wrong shape of U array !");
else if(!fullUV && ShapeUtils::shapeAsString({m,minDim}) != ShapeUtils::shapeAsString(&U))
throw std::runtime_error("svdJcb: wrong shape of U array !");
if(fullUV && ShapeUtils::shapeAsString({n,n}) != ShapeUtils::shapeAsString(&V))
throw std::runtime_error("svdJcb: wrong shape of V array !");
else if(!fullUV && ShapeUtils::shapeAsString({n,minDim}) != ShapeUtils::shapeAsString(&V))
throw std::runtime_error("svdJcb: wrong shape of V array !");
}
NDArray* pA = const_cast<NDArray*>(&A);
NDArray* pS = &S;
NDArray* pU = &U;
NDArray* pV = &V;
std::vector<NDArray*> toDelete;
if(pA->ews() != 1 || pA->ordering() == 'c') {
pA = A.dup('f');
toDelete.push_back(pA);
}
if(S.ews() != 1) {
pS = S.dup('f');
toDelete.push_back(pS);
}
if(calcUV) {
if(pU->ews() != 1 || pU->ordering() == 'c') {
pU = U.dup('f');
toDelete.push_back(pU);
}
if(pV->ews() != 1 || pV->ordering() == 'c') {
pV = V.dup('f');
toDelete.push_back(pV);
}
}
// create cusolverDn handle
cusolverDnHandle_t handle = nullptr;
cusolverStatus_t status = cusolverDnCreate(&handle);
if(status != CUSOLVER_STATUS_SUCCESS)
throw cuda_exception::build("svdJcb: cuda failed !", status);
// stream
status = cusolverDnSetStream(handle, *context->getCudaStream());
if(status != CUSOLVER_STATUS_SUCCESS)
throw cuda_exception::build("svdJcb: cuda failed !", status);
// set parameters
gesvdjInfo_t gesvdjParams = nullptr;
status = cusolverDnCreateGesvdjInfo(&gesvdjParams);
if(status != CUSOLVER_STATUS_SUCCESS)
throw cuda_exception::build("svdJcb: cuda failed !", status);
status = cusolverDnXgesvdjSetTolerance(gesvdjParams, 1.e-7); // tolerance
if(status != CUSOLVER_STATUS_SUCCESS)
throw cuda_exception::build("svdJcb: cuda failed !", status);
status = cusolverDnXgesvdjSetMaxSweeps(gesvdjParams, 15); // max_sweeps
if(status != CUSOLVER_STATUS_SUCCESS)
throw cuda_exception::build("svdJcb: cuda failed !", status);
int *devInfo = nullptr;
const cusolverEigMode_t jobz = calcUV ? CUSOLVER_EIG_MODE_VECTOR : CUSOLVER_EIG_MODE_NOVECTOR;
const int econ = !fullUV;
int lda(m), ldu(m), ldv(m);
if(calcUV) {
ldu = pU->sizeAt(0);
ldv = pV->sizeAt(0);
}
// query working space of SVD
int lwork = 0;
if(A.dataType() == DataType::DOUBLE)
status = cusolverDnDgesvdj_bufferSize(handle, jobz, econ, m, n, reinterpret_cast<double*>(pA->getSpecialBuffer()), lda, reinterpret_cast<double*>(pS->getSpecialBuffer()), reinterpret_cast<double*>(pU->getSpecialBuffer()), ldu, reinterpret_cast<double*>(pV->getSpecialBuffer()), ldv, &lwork, gesvdjParams);
else if(A.dataType() == DataType::FLOAT32)
status = cusolverDnSgesvdj_bufferSize(handle, jobz, econ, m, n, reinterpret_cast<float*>(pA->getSpecialBuffer()), lda, reinterpret_cast<float*>(pS->getSpecialBuffer()), reinterpret_cast<float*>(pU->getSpecialBuffer()), ldu, reinterpret_cast<float*>(pV->getSpecialBuffer()), ldv, &lwork, gesvdjParams);
else
throw std::invalid_argument("svdJcb: given data type is unsupported !");
if(status != CUSOLVER_STATUS_SUCCESS)
throw cuda_exception::build("svdJcb: cuda failed !", status);
// allocate memory dWork
void* dWork = nullptr;
auto status2 = cudaMalloc((void**)&dWork , A.sizeOfT() * lwork);
if(status2 != cudaSuccess)
throw cuda_exception::build("svdJcb: cuda failed !", status2);
PointersManager manager(context, "svdJcb");
NDArray::prepareSpecialUse({pS, pU, pV}, {pA});
// choose appropriate cuda gemm api depending on data types
if(A.dataType() == DataType::DOUBLE) {
status = cusolverDnDgesvdj(handle, jobz, econ, m, n, reinterpret_cast<double*>(pA->getSpecialBuffer()), lda, reinterpret_cast<double*>(pS->getSpecialBuffer()), reinterpret_cast<double*>(pU->getSpecialBuffer()), ldu, reinterpret_cast<double*>(pV->getSpecialBuffer()), ldv, reinterpret_cast<double*>(dWork), lwork, devInfo, gesvdjParams);
}
else if(A.dataType() == DataType::FLOAT32) {
status = cusolverDnSgesvdj(handle, jobz, econ, m, n, reinterpret_cast<float*>(pA->getSpecialBuffer()), lda, reinterpret_cast<float*>(pS->getSpecialBuffer()), reinterpret_cast<float*>(pU->getSpecialBuffer()), ldu, reinterpret_cast<float*>(pV->getSpecialBuffer()), ldv, reinterpret_cast<float*>(dWork), lwork, devInfo, gesvdjParams);
}
else
throw std::invalid_argument("svdJcb: given data type is unsupported !");
if(status != CUSOLVER_STATUS_SUCCESS)
throw cuda_exception::build("svdJcb: cuda failed !", status);
manager.synchronize();
NDArray::registerSpecialUse({pS, pU, pV}, {pA});
S.assign(pS);
if(calcUV) {
U.assign(pU);
V.assign(pV);
}
for (int i = toDelete.size() - 1; i >= 0; --i)
delete toDelete[i];
if (devInfo)
cudaFree(devInfo);
if (dWork )
cudaFree(dWork);
if(handle)
cusolverDnDestroy(handle);
if(gesvdjParams)
cusolverDnDestroyGesvdjInfo(gesvdjParams);
// cudaDeviceReset();
}
//////////////////////////////////////////////////////////////////////////
static void svdBatched(nd4j::LaunchContext* context, const NDArray& A, NDArray& S, NDArray& U, NDArray& V, const bool fullUV, const bool calcUV) {
// A [..., m, n]
// S [..., n]
// U [..., m, m] or [..., m, n] if fullUV = false and m > n
// V [..., n, n] or [..., n, m] if fullUV = false and m < n
auto m = A.sizeAt(-2);
auto n = A.sizeAt(-1);
const int minDim = m < n ? m : n;
const Nd4jLong bS = A.lengthOf() / (m * n);
if(m > 32 || n > 32)
throw std::runtime_error("svdBatched: numbers of rows and columns should be <= 32 !");
if(minDim != S.sizeAt(-1))
throw std::runtime_error("svdBatched: wrong shape of S array !");
if(calcUV) {
if(U.sizeAt(-2) != m)
throw std::runtime_error("svdBatched: wrong shape of U array !");
if(U.sizeAt(-1) != (fullUV ? m : minDim))
throw std::runtime_error("svdBatched: wrong shape of U array !");
if(U.lengthOf() / (U.sizeAt(-2) * U.sizeAt(-1)) != bS)
throw std::runtime_error("svdBatched: wrong shape of U array !");
if(V.sizeAt(-2) != n)
throw std::runtime_error("svdBatched: wrong shape of V array !");
if(V.sizeAt(-1) != (fullUV ? n : minDim))
throw std::runtime_error("svdBatched: wrong shape of V array !");
if(V.lengthOf() / (V.sizeAt(-2) * V.sizeAt(-1)) != bS)
throw std::runtime_error("svdBatched: wrong shape of V array !");
}
NDArray* pA = const_cast<NDArray*>(&A);
NDArray* pS = &S;
NDArray* pU = &U;
NDArray* pV = &V;
std::vector<NDArray*> toDelete;
if(pA->ews() != 1 || pA->ordering() == 'c') {
pA = A.dup('f');
toDelete.push_back(pA);
}
if(S.ews() != 1) {
pS = S.dup('f');
toDelete.push_back(pS);
}
if(calcUV) {
if(pU->ews() != 1 || pU->ordering() == 'c') {
pU = U.dup('f');
toDelete.push_back(pU);
}
if(pV->ews() != 1 || pV->ordering() == 'c') {
pV = V.dup('f');
toDelete.push_back(pV);
}
}
// create cusolverDn handle
cusolverDnHandle_t handle = nullptr;
cusolverStatus_t status = cusolverDnCreate(&handle);
if(status != CUSOLVER_STATUS_SUCCESS)
throw cuda_exception::build("svdBatched: cuda failed !", status);
// stream
status = cusolverDnSetStream(handle, *context->getCudaStream());
if(status != CUSOLVER_STATUS_SUCCESS)
throw cuda_exception::build("svdBatched: cuda failed !", status);
// set parameters
gesvdjInfo_t gesvdjParams = nullptr;
status = cusolverDnCreateGesvdjInfo(&gesvdjParams);
if(status != CUSOLVER_STATUS_SUCCESS)
throw cuda_exception::build("svdBatched: cuda failed !", status);
status = cusolverDnXgesvdjSetTolerance(gesvdjParams, 1.e-7); // tolerance
if(status != CUSOLVER_STATUS_SUCCESS)
throw cuda_exception::build("svdBatched: cuda failed !", status);
status = cusolverDnXgesvdjSetMaxSweeps(gesvdjParams, 15); // max_sweeps
if(status != CUSOLVER_STATUS_SUCCESS)
throw cuda_exception::build("svdBatched: cuda failed !", status);
// devInfo
int *devInfo = nullptr;
auto status2 = cudaMalloc((void**)&devInfo, sizeof(int) * bS);
if(status2 != cudaSuccess)
throw cuda_exception::build("svdBatched: cuda failed !", status2);
status2 = cudaDeviceSynchronize();
if(status2 != cudaSuccess)
throw cuda_exception::build("svdJcb: cuda failed !", status2);
const cusolverEigMode_t jobz = calcUV ? CUSOLVER_EIG_MODE_VECTOR : CUSOLVER_EIG_MODE_NOVECTOR;
int lda(m), ldu, ldv;
if(calcUV) {
ldu = pU->sizeAt(-2);
ldv = pV->sizeAt(-2);
}
// Ak (i,j) = A[i + 5*j + 25*k]
// query working space of SVD
int lwork = 0;
if(A.dataType() == DataType::DOUBLE)
status = cusolverDnDgesvdjBatched_bufferSize(handle, jobz, m, n, reinterpret_cast<double*>(pA->getSpecialBuffer()), lda, reinterpret_cast<double*>(pS->getSpecialBuffer()), reinterpret_cast<double*>(pU->getSpecialBuffer()), ldu, reinterpret_cast<double*>(pV->getSpecialBuffer()), ldv, &lwork, gesvdjParams, bS);
else if(A.dataType() == DataType::FLOAT32)
status = cusolverDnSgesvdjBatched_bufferSize(handle, jobz, m, n, reinterpret_cast<float*>(pA->getSpecialBuffer()), lda, reinterpret_cast<float*>(pS->getSpecialBuffer()), reinterpret_cast<float*>(pU->getSpecialBuffer()), ldu, reinterpret_cast<float*>(pV->getSpecialBuffer()), ldv, &lwork, gesvdjParams, bS);
else
throw std::invalid_argument("svdBatched: given data type is unsupported !");
if(status != CUSOLVER_STATUS_SUCCESS)
throw cuda_exception::build("svdBatched: cuda failed !", status);
// allocate memory dWork
void* dWork = nullptr;
status2 = cudaMalloc((void**)&dWork , A.sizeOfT() * lwork);
if(status2 != cudaSuccess)
throw cuda_exception::build("svdBatched: cuda failed !", status2);
status2 = cudaDeviceSynchronize();
if(status2 != cudaSuccess)
throw cuda_exception::build("svdBatched: cuda failed !", status2);
PointersManager manager(context, "svdBatched");
NDArray::prepareSpecialUse({pS, pU, pV}, {pA});
// choose appropriate cuda gemm api depending on data types
if(A.dataType() == DataType::DOUBLE) {
status = cusolverDnDgesvdjBatched(handle, jobz, m, n, reinterpret_cast<double*>(pA->getSpecialBuffer()), lda, reinterpret_cast<double*>(pS->getSpecialBuffer()), reinterpret_cast<double*>(pU->getSpecialBuffer()), ldu, reinterpret_cast<double*>(pV->getSpecialBuffer()), ldv, reinterpret_cast<double*>(dWork), lwork, devInfo, gesvdjParams, bS);
}
else if(A.dataType() == DataType::FLOAT32) {
status = cusolverDnSgesvdjBatched(handle, jobz, m, n, reinterpret_cast<float*>(pA->getSpecialBuffer()), lda, reinterpret_cast<float*>(pS->getSpecialBuffer()), reinterpret_cast<float*>(pU->getSpecialBuffer()), ldu, reinterpret_cast<float*>(pV->getSpecialBuffer()), ldv, reinterpret_cast<float*>(dWork), lwork, devInfo, gesvdjParams, bS);
}
else
throw std::invalid_argument("svdBatched: given data type is unsupported !");
if(status != CUSOLVER_STATUS_SUCCESS)
throw cuda_exception::build("svdBatched: cuda failed !", status);
manager.synchronize();
NDArray::registerSpecialUse({pS, pU, pV}, {pA});
S.assign(pS);
if(calcUV) {
U.assign(pU);
V.assign(pV);
}
for (int i = toDelete.size() - 1; i >= 0; --i)
delete toDelete[i];
if (devInfo)
cudaFree(devInfo);
if (dWork )
cudaFree(dWork);
if(handle)
cusolverDnDestroy(handle);
if(gesvdjParams)
cusolverDnDestroyGesvdjInfo(gesvdjParams);
// cudaDeviceReset();
}
////////////////////////////////////////////////////////////////////
void svd(nd4j::LaunchContext* context, const NDArray* x, const std::vector<NDArray*>& outArrs, const bool fullUV, const bool calcUV, const int switchNum) {
NDArray* S = outArrs[0];
NDArray* U = outArrs[1];
// NDArray VT = outArrs[2]->transpose();
NDArray* V = outArrs[2];
if(x->rankOf() == 2) {
// svdQR(context, *x, *S, *U, VT, fullUV, calcUV);
svdJcb(context, *x, *S, *U, *V, fullUV, calcUV);
}
else {
// svdBatched(context, *x, *S, *U, *V, fullUV, calcUV);
ResultSet *tadsU(nullptr), *tadsV(nullptr);
auto tadsX = x->allTensorsAlongDimension({x->rankOf() - 2, x->rankOf() - 1});
auto tadsS = S->allTensorsAlongDimension({S->rankOf() - 1});
if(calcUV) {
tadsU = U->allTensorsAlongDimension({U->rankOf() - 2, U->rankOf() - 1});
tadsV = V->allTensorsAlongDimension({V->rankOf() - 2, V->rankOf() - 1});
}
for (int i = 0; i < tadsX->size(); ++i)
svdJcb(context, *tadsX->at(i), *tadsS->at(i), calcUV ? *tadsU->at(i) : *S, calcUV ? *tadsV->at(i) : *S, fullUV, calcUV);
delete tadsX;
delete tadsS;
if(calcUV) {
delete tadsU;
delete tadsV;
}
}
}
}
}
} |
41b983cd33321897267192a2f01fbe02943ec9b2.hip | // !!! This is a file automatically generated by hipify!!!
#include <ATen/Context.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/Dispatch.h>
#include <ATen/NativeFunctions.h>
#include <ATen/hip/PinnedMemoryAllocator.h>
#include <ATen/hip/detail/IndexUtils.cuh>
#include <ATen/native/LinearAlgebraUtils.h>
#include <ATen/native/hip/MiscUtils.h>
#include <ATen/native/Resize.h>
#include <ATen/native/LinearAlgebra.h>
#include <ATen/native/BatchLinearAlgebra.h>
#include <ATen/native/hip/BatchLinearAlgebraLib.h>
#include <ATen/native/cpu/zmath.h>
#include <THH/THH.h> // for USE_MAGMA
#ifdef USE_MAGMA
#include <magma_types.h>
#include <magma_v2.h>
const bool use_magma_ = true;
#else
const bool use_magma_ = false;
#endif
namespace at {
namespace native {
#ifdef USE_MAGMA
template<class scalar_t>
void magmaSolve(
magma_int_t n, magma_int_t nrhs, scalar_t* dA, magma_int_t ldda,
magma_int_t* ipiv, scalar_t* dB, magma_int_t lddb, magma_int_t* info);
template<class scalar_t>
void magmaSolveBatched(
magma_int_t n, magma_int_t nrhs, scalar_t** dA_array, magma_int_t ldda,
magma_int_t** dipiv_array, scalar_t** dB_array, magma_int_t lddb,
magma_int_t* dinfo_array, magma_int_t batch_count, const MAGMAQueue& magma_queue);
template<class scalar_t>
void magmaLu(
magma_int_t m, magma_int_t n, scalar_t* dA, magma_int_t ldda,
magma_int_t* ipiv, magma_int_t* info);
template<class scalar_t>
void magmaLuBatched(
magma_int_t m, magma_int_t n, scalar_t** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, magma_int_t* info_array, magma_int_t batchsize,
const MAGMAQueue& magma_queue);
template<class scalar_t>
void magmaLuNoPiv(
magma_int_t m, magma_int_t n, scalar_t* dA, magma_int_t ldda,
magma_int_t* info);
template<class scalar_t>
void magmaLuNoPivBatched(
magma_int_t m, magma_int_t n, scalar_t** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue);
template<class scalar_t>
inline magma_int_t magmaGetriOptimalBlocksize(magma_int_t n);
template<class scalar_t>
void magmaGetri(
magma_int_t n, scalar_t* dA, magma_int_t ldda, magma_int_t* ipiv, scalar_t* dwork,
magma_int_t lwork, magma_int_t* info);
template<class scalar_t>
void magmaGetriBatched(
magma_int_t n, scalar_t** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, scalar_t** dinvA_array, magma_int_t lddia,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue);
template<class scalar_t>
void magmaCholeskySolve(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, scalar_t* dA, magma_int_t ldda,
scalar_t* dB, magma_int_t lddb, magma_int_t* info);
template<class scalar_t>
void magmaCholeskySolveBatched(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, scalar_t** dA_array, magma_int_t ldda,
scalar_t** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue);
template<class scalar_t>
void magmaCholesky(
magma_uplo_t uplo, magma_int_t n, scalar_t* dA,
magma_int_t ldda, magma_int_t* info);
template<class scalar_t>
void magmaCholeskyBatched(
magma_uplo_t uplo, magma_int_t n, scalar_t** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue);
template<class scalar_t>
void magmaTriangularSolveBatched(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n,
scalar_t** dA_array, magma_int_t ldda, scalar_t** dB_array, magma_int_t lddb, magma_int_t batchsize,
const MAGMAQueue& magma_queue);
template<class scalar_t>
inline magma_int_t magmaGeqrfOptimalBlocksize(magma_int_t m, magma_int_t n);
template<class scalar_t>
void magmaGeqrf(
magma_int_t m, magma_int_t n, scalar_t* dA, magma_int_t ldda,
scalar_t* tau, scalar_t* dT, magma_int_t* info, bool is_v2);
template<class scalar_t>
void magmaOrgqr(
magma_int_t m, magma_int_t n, magma_int_t k, scalar_t* dA,
magma_int_t ldda, scalar_t* tau, scalar_t* dT, magma_int_t nb, magma_int_t* info);
template<class scalar_t, class value_t=scalar_t>
void magmaSyevd(
magma_vec_t jobz, magma_uplo_t uplo, magma_int_t n, scalar_t* dA, magma_int_t ldda,
value_t* w, scalar_t* wA, magma_int_t ldwa, scalar_t* work, magma_int_t lwork, value_t* rwork,
magma_int_t lrwork, magma_int_t* iwork, magma_int_t liwork, magma_int_t* info);
template<class scalar_t, class value_t=scalar_t>
void magmaEig(
magma_vec_t jobvl, magma_vec_t jobvr, magma_int_t n, scalar_t *A, magma_int_t lda,
scalar_t *w, scalar_t *VL, magma_int_t ldvl,
scalar_t *VR, magma_int_t ldvr, scalar_t *work, magma_int_t lwork,
value_t *rwork,
magma_int_t *info);
template<class scalar_t, class value_t=scalar_t>
void magmaSvd(
magma_vec_t jobz, magma_int_t m, magma_int_t n, scalar_t* A,
magma_int_t lda, value_t* s, scalar_t* U, magma_int_t ldu,
scalar_t* VT, magma_int_t ldvt, scalar_t* work, magma_int_t lwork,
value_t* rwork,
magma_int_t* iwork, magma_int_t* info);
template<class scalar_t>
void magmaLuSolve(
magma_int_t n, magma_int_t nrhs, scalar_t* dA, magma_int_t ldda, magma_int_t* ipiv,
scalar_t* dB, magma_int_t lddb, magma_int_t* info, magma_trans_t trans);
template<class scalar_t>
void magmaLuSolveBatched(
magma_int_t n, magma_int_t nrhs, scalar_t** dA_array, magma_int_t ldda, magma_int_t** dipiv_array,
scalar_t** dB_array, magma_int_t lddb, magma_int_t& info,
magma_int_t batchsize, const MAGMAQueue& magma_queue, magma_trans_t trans);
template<class scalar_t>
void magmaGels(
magma_trans_t trans, magma_int_t m, magma_int_t n, magma_int_t nrhs,
scalar_t* dA, magma_int_t ldda, scalar_t* dB, magma_int_t lddb,
scalar_t* hwork, magma_int_t lwork, magma_int_t* info);
template<>
void magmaSolve<double>(
magma_int_t n, magma_int_t nrhs, double* dA, magma_int_t ldda,
magma_int_t* ipiv, double* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dgesv_gpu(n, nrhs, dA, ldda, ipiv, dB, lddb, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaSolve<float>(
magma_int_t n, magma_int_t nrhs, float* dA, magma_int_t ldda,
magma_int_t* ipiv, float* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_sgesv_gpu(n, nrhs, dA, ldda, ipiv, dB, lddb, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaSolve<c10::complex<double>>(
magma_int_t n, magma_int_t nrhs, c10::complex<double>* dA, magma_int_t ldda,
magma_int_t* ipiv, c10::complex<double>* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zgesv_gpu(n, nrhs,
reinterpret_cast<magmaDoubleComplex*>(dA), ldda, ipiv,
reinterpret_cast<magmaDoubleComplex*>(dB), lddb, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaSolve<c10::complex<float>>(
magma_int_t n, magma_int_t nrhs, c10::complex<float>* dA, magma_int_t ldda,
magma_int_t* ipiv, c10::complex<float>* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cgesv_gpu(n, nrhs,
reinterpret_cast<magmaFloatComplex*>(dA), ldda, ipiv,
reinterpret_cast<magmaFloatComplex*>(dB), lddb, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaSolveBatched<double>(
magma_int_t n, magma_int_t nrhs, double** dA_array, magma_int_t ldda,
magma_int_t** dipiv_array, double** dB_array, magma_int_t lddb,
magma_int_t* dinfo_array, magma_int_t batch_count, const MAGMAQueue& magma_queue) {
magma_dgesv_batched(n, nrhs, dA_array, ldda, dipiv_array, dB_array, lddb, dinfo_array, batch_count, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaSolveBatched<float>(
magma_int_t n, magma_int_t nrhs, float** dA_array, magma_int_t ldda,
magma_int_t** dipiv_array, float** dB_array, magma_int_t lddb,
magma_int_t* dinfo_array, magma_int_t batch_count, const MAGMAQueue& magma_queue) {
magma_sgesv_batched(n, nrhs, dA_array, ldda, dipiv_array, dB_array, lddb, dinfo_array, batch_count, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaSolveBatched<c10::complex<double>>(
magma_int_t n, magma_int_t nrhs, c10::complex<double>** dA_array, magma_int_t ldda,
magma_int_t** dipiv_array, c10::complex<double>** dB_array, magma_int_t lddb,
magma_int_t* dinfo_array, magma_int_t batch_count, const MAGMAQueue& magma_queue) {
magma_zgesv_batched(n, nrhs,
reinterpret_cast<magmaDoubleComplex**>(dA_array), ldda, dipiv_array,
reinterpret_cast<magmaDoubleComplex**>(dB_array), lddb, dinfo_array, batch_count, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaSolveBatched<c10::complex<float>>(
magma_int_t n, magma_int_t nrhs, c10::complex<float>** dA_array, magma_int_t ldda,
magma_int_t** dipiv_array, c10::complex<float>** dB_array, magma_int_t lddb,
magma_int_t* dinfo_array, magma_int_t batch_count, const MAGMAQueue& magma_queue) {
magma_cgesv_batched(n, nrhs,
reinterpret_cast<magmaFloatComplex**>(dA_array), ldda, dipiv_array,
reinterpret_cast<magmaFloatComplex**>(dB_array), lddb, dinfo_array, batch_count, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLu<double>(
magma_int_t m, magma_int_t n, double* dA, magma_int_t ldda,
magma_int_t* ipiv, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dgetrf_gpu(m, n, dA, ldda, ipiv, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLu<float>(
magma_int_t m, magma_int_t n, float* dA, magma_int_t ldda,
magma_int_t* ipiv, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_sgetrf_gpu(m, n, dA, ldda, ipiv, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLu<c10::complex<double>>(
magma_int_t m, magma_int_t n, c10::complex<double>* dA, magma_int_t ldda,
magma_int_t* ipiv, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zgetrf_gpu(m, n, reinterpret_cast<magmaDoubleComplex*>(dA), ldda, ipiv, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLu<c10::complex<float>>(
magma_int_t m, magma_int_t n, c10::complex<float>* dA, magma_int_t ldda,
magma_int_t* ipiv, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cgetrf_gpu(m, n, reinterpret_cast<magmaFloatComplex*>(dA), ldda, ipiv, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuBatched<double>(
magma_int_t m, magma_int_t n, double** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, magma_int_t* info_array, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magma_dgetrf_batched(m, n, dA_array, ldda, ipiv_array, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuBatched<float>(
magma_int_t m, magma_int_t n, float** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, magma_int_t* info_array, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magma_sgetrf_batched(m, n, dA_array, ldda, ipiv_array, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuBatched<c10::complex<double>>(
magma_int_t m, magma_int_t n, c10::complex<double>** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, magma_int_t* info_array, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magma_zgetrf_batched(m, n, reinterpret_cast<magmaDoubleComplex**>(dA_array), ldda, ipiv_array, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuBatched<c10::complex<float>>(
magma_int_t m, magma_int_t n, c10::complex<float>** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, magma_int_t* info_array, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magma_cgetrf_batched(m, n, reinterpret_cast<magmaFloatComplex**>(dA_array), ldda, ipiv_array, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuNoPiv<double>(
magma_int_t m, magma_int_t n, double* dA, magma_int_t ldda,
magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dgetrf_nopiv_gpu(m, n, dA, ldda, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuNoPiv<float>(
magma_int_t m, magma_int_t n, float* dA, magma_int_t ldda,
magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_sgetrf_nopiv_gpu(m, n, dA, ldda, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuNoPiv<c10::complex<double>>(
magma_int_t m, magma_int_t n, c10::complex<double>* dA, magma_int_t ldda,
magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zgetrf_nopiv_gpu(m, n, reinterpret_cast<magmaDoubleComplex*>(dA), ldda, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuNoPiv<c10::complex<float>>(
magma_int_t m, magma_int_t n, c10::complex<float>* dA, magma_int_t ldda,
magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cgetrf_nopiv_gpu(m, n, reinterpret_cast<magmaFloatComplex*>(dA), ldda, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuNoPivBatched<double>(
magma_int_t m, magma_int_t n, double** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_dgetrf_nopiv_batched(m, n, dA_array, ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuNoPivBatched<float>(
magma_int_t m, magma_int_t n, float** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_sgetrf_nopiv_batched(m, n, dA_array, ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuNoPivBatched<c10::complex<double>>(
magma_int_t m, magma_int_t n, c10::complex<double>** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_zgetrf_nopiv_batched(m, n, reinterpret_cast<magmaDoubleComplex**>(dA_array), ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuNoPivBatched<c10::complex<float>>(
magma_int_t m, magma_int_t n, c10::complex<float>** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_cgetrf_nopiv_batched(m, n, reinterpret_cast<magmaFloatComplex**>(dA_array), ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
inline magma_int_t magmaGetriOptimalBlocksize<double>(magma_int_t n) {
return magma_get_dgetri_nb(n);
}
template<>
inline magma_int_t magmaGetriOptimalBlocksize<float>(magma_int_t n) {
return magma_get_sgetri_nb(n);
}
template <>
inline magma_int_t magmaGetriOptimalBlocksize<c10::complex<double>>(
magma_int_t n) {
return magma_get_zgetri_nb(n);
}
template <>
inline magma_int_t magmaGetriOptimalBlocksize<c10::complex<float>>(
magma_int_t n) {
return magma_get_cgetri_nb(n);
}
template<>
void magmaGetri<double>(
magma_int_t n, double* dA, magma_int_t ldda, magma_int_t* ipiv, double* dwork,
magma_int_t lwork, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dgetri_gpu(n, dA, ldda, ipiv, dwork, lwork, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaGetri<float>(
magma_int_t n, float* dA, magma_int_t ldda, magma_int_t* ipiv, float* dwork,
magma_int_t lwork, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_sgetri_gpu(n, dA, ldda, ipiv, dwork, lwork, info);
AT_CUDA_CHECK(hipGetLastError());
}
template <>
void magmaGetri<c10::complex<double>>(
magma_int_t n,
c10::complex<double>* dA,
magma_int_t ldda,
magma_int_t* ipiv,
c10::complex<double>* dwork,
magma_int_t lwork,
magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zgetri_gpu(
n,
reinterpret_cast<magmaDoubleComplex*>(dA),
ldda,
ipiv,
reinterpret_cast<magmaDoubleComplex*>(dwork),
lwork,
info);
AT_CUDA_CHECK(hipGetLastError());
}
template <>
void magmaGetri<c10::complex<float>>(
magma_int_t n,
c10::complex<float>* dA,
magma_int_t ldda,
magma_int_t* ipiv,
c10::complex<float>* dwork,
magma_int_t lwork,
magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cgetri_gpu(
n,
reinterpret_cast<magmaFloatComplex*>(dA),
ldda,
ipiv,
reinterpret_cast<magmaFloatComplex*>(dwork),
lwork,
info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaGetriBatched<double>(
magma_int_t n, double** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, double** dinvA_array, magma_int_t lddia,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_dgetri_outofplace_batched(n, dA_array, ldda, ipiv_array, dinvA_array, lddia, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaGetriBatched<float>(
magma_int_t n, float** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, float** dinvA_array, magma_int_t lddia,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_sgetri_outofplace_batched(n, dA_array, ldda, ipiv_array, dinvA_array, lddia, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template <>
void magmaGetriBatched<c10::complex<double>>(
magma_int_t n,
c10::complex<double>** dA_array,
magma_int_t ldda,
magma_int_t** ipiv_array,
c10::complex<double>** dinvA_array,
magma_int_t lddia,
magma_int_t* info_array,
magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magma_zgetri_outofplace_batched(
n,
reinterpret_cast<magmaDoubleComplex**>(dA_array),
ldda,
ipiv_array,
reinterpret_cast<magmaDoubleComplex**>(dinvA_array),
lddia,
info_array,
batchsize,
magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template <>
void magmaGetriBatched<c10::complex<float>>(
magma_int_t n,
c10::complex<float>** dA_array,
magma_int_t ldda,
magma_int_t** ipiv_array,
c10::complex<float>** dinvA_array,
magma_int_t lddia,
magma_int_t* info_array,
magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magma_cgetri_outofplace_batched(
n,
reinterpret_cast<magmaFloatComplex**>(dA_array),
ldda,
ipiv_array,
reinterpret_cast<magmaFloatComplex**>(dinvA_array),
lddia,
info_array,
batchsize,
magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaCholeskySolve<double>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, double* dA, magma_int_t ldda,
double* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dpotrs_gpu(uplo, n, nrhs, dA, ldda, dB, lddb, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaCholeskySolve<float>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, float* dA, magma_int_t ldda,
float* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_spotrs_gpu(uplo, n, nrhs, dA, ldda, dB, lddb, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaCholeskySolve<c10::complex<double>>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, c10::complex<double>* dA, magma_int_t ldda,
c10::complex<double>* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zpotrs_gpu(uplo, n, nrhs,
reinterpret_cast<magmaDoubleComplex*>(dA), ldda,
reinterpret_cast<magmaDoubleComplex*>(dB), lddb, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaCholeskySolve<c10::complex<float>>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, c10::complex<float>* dA, magma_int_t ldda,
c10::complex<float>* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cpotrs_gpu(uplo, n, nrhs,
reinterpret_cast<magmaFloatComplex*>(dA), ldda,
reinterpret_cast<magmaFloatComplex*>(dB), lddb, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaCholeskySolveBatched<double>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, double** dA_array, magma_int_t ldda,
double** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
info = magma_dpotrs_batched(uplo, n, nrhs, dA_array, ldda, dB_array, lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaCholeskySolveBatched<float>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, float** dA_array, magma_int_t ldda,
float** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
info = magma_spotrs_batched(uplo, n, nrhs, dA_array, ldda, dB_array, lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaCholeskySolveBatched<c10::complex<double>>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, c10::complex<double>** dA_array, magma_int_t ldda,
c10::complex<double>** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
info = magma_zpotrs_batched(uplo, n, nrhs,
reinterpret_cast<magmaDoubleComplex**>(dA_array), ldda,
reinterpret_cast<magmaDoubleComplex**>(dB_array), lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaCholeskySolveBatched<c10::complex<float>>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, c10::complex<float>** dA_array, magma_int_t ldda,
c10::complex<float>** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
info = magma_cpotrs_batched(uplo, n, nrhs,
reinterpret_cast<magmaFloatComplex**>(dA_array), ldda,
reinterpret_cast<magmaFloatComplex**>(dB_array), lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaCholesky<double>(
magma_uplo_t uplo, magma_int_t n, double* dA,
magma_int_t ldda, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dpotrf_gpu(uplo, n, dA, ldda, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaCholesky<float>(
magma_uplo_t uplo, magma_int_t n, float* dA,
magma_int_t ldda, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_spotrf_gpu(uplo, n, dA, ldda, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaCholesky<c10::complex<double>>(
magma_uplo_t uplo, magma_int_t n, c10::complex<double>* dA,
magma_int_t ldda, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zpotrf_gpu(uplo, n, reinterpret_cast<magmaDoubleComplex*>(dA), ldda, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaCholesky<c10::complex<float>>(
magma_uplo_t uplo, magma_int_t n, c10::complex<float>* dA,
magma_int_t ldda, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cpotrf_gpu(uplo, n, reinterpret_cast<magmaFloatComplex*>(dA), ldda, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaCholeskyBatched<double>(
magma_uplo_t uplo, magma_int_t n, double** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_dpotrf_batched(uplo, n, dA_array, ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaCholeskyBatched<float>(
magma_uplo_t uplo, magma_int_t n, float** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_spotrf_batched(uplo, n, dA_array, ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaCholeskyBatched<c10::complex<double>>(
magma_uplo_t uplo, magma_int_t n, c10::complex<double>** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_zpotrf_batched(uplo, n, reinterpret_cast<magmaDoubleComplex**>(dA_array), ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaCholeskyBatched<c10::complex<float>>(
magma_uplo_t uplo, magma_int_t n, c10::complex<float>** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_cpotrf_batched(uplo, n, reinterpret_cast<magmaFloatComplex**>(dA_array), ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaTriangularSolveBatched<double>(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n,
double** dA_array, magma_int_t ldda, double** dB_array, magma_int_t lddb, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magmablas_dtrsm_batched(MagmaLeft, uplo, trans, diag, m, n, 1, dA_array, ldda, dB_array, lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaTriangularSolveBatched<float>(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n,
float** dA_array, magma_int_t ldda, float** dB_array, magma_int_t lddb, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magmablas_strsm_batched(MagmaLeft, uplo, trans, diag, m, n, 1, dA_array, ldda, dB_array, lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaTriangularSolveBatched<c10::complex<double>>(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n,
c10::complex<double>** dA_array, magma_int_t ldda, c10::complex<double>** dB_array, magma_int_t lddb, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magmaDoubleComplex alpha({1, 0});
magmablas_ztrsm_batched(MagmaLeft, uplo, trans, diag, m, n, alpha,
reinterpret_cast<magmaDoubleComplex**>(dA_array), ldda,
reinterpret_cast<magmaDoubleComplex**>(dB_array), lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaTriangularSolveBatched<c10::complex<float>>(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n,
c10::complex<float>** dA_array, magma_int_t ldda, c10::complex<float>** dB_array, magma_int_t lddb, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magmaFloatComplex alpha({1, 0});
magmablas_ctrsm_batched(MagmaLeft, uplo, trans, diag, m, n, alpha,
reinterpret_cast<magmaFloatComplex**>(dA_array), ldda,
reinterpret_cast<magmaFloatComplex**>(dB_array), lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
inline magma_int_t magmaGeqrfOptimalBlocksize<double>(magma_int_t m, magma_int_t n) {
return magma_get_dgeqrf_nb(m, n);
}
template<>
inline magma_int_t magmaGeqrfOptimalBlocksize<float>(magma_int_t m, magma_int_t n) {
return magma_get_sgeqrf_nb(m, n);
}
template <>
inline magma_int_t magmaGeqrfOptimalBlocksize<c10::complex<double>>(
magma_int_t m,
magma_int_t n) {
return magma_get_zgeqrf_nb(m, n);
}
template <>
inline magma_int_t magmaGeqrfOptimalBlocksize<c10::complex<float>>(
magma_int_t m,
magma_int_t n) {
return magma_get_cgeqrf_nb(m, n);
}
template<>
void magmaGeqrf<double>(
magma_int_t m, magma_int_t n, double* dA, magma_int_t ldda,
double* tau, double* dT, magma_int_t* info, bool is_v2) {
MagmaStreamSyncGuard guard;
if (!is_v2) {
magma_dgeqrf_gpu(m, n, dA, ldda, tau, dT, info);
} else {
magma_dgeqrf2_gpu(m, n, dA, ldda, tau, info);
}
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaGeqrf<float>(
magma_int_t m, magma_int_t n, float* dA, magma_int_t ldda,
float* tau, float* dT, magma_int_t* info, bool is_v2) {
MagmaStreamSyncGuard guard;
if (!is_v2) {
magma_sgeqrf_gpu(m, n, dA, ldda, tau, dT, info);
} else {
magma_sgeqrf2_gpu(m, n, dA, ldda, tau, info);
}
AT_CUDA_CHECK(hipGetLastError());
}
template <>
void magmaGeqrf<c10::complex<double>>(
magma_int_t m,
magma_int_t n,
c10::complex<double>* dA,
magma_int_t ldda,
c10::complex<double>* tau,
c10::complex<double>* dT,
magma_int_t* info,
bool is_v2) {
MagmaStreamSyncGuard guard;
if (!is_v2) {
magma_zgeqrf_gpu(
m,
n,
reinterpret_cast<magmaDoubleComplex*>(dA),
ldda,
reinterpret_cast<magmaDoubleComplex*>(tau),
reinterpret_cast<magmaDoubleComplex*>(dT),
info);
} else {
magma_zgeqrf2_gpu(
m,
n,
reinterpret_cast<magmaDoubleComplex*>(dA),
ldda,
reinterpret_cast<magmaDoubleComplex*>(tau),
info);
}
AT_CUDA_CHECK(hipGetLastError());
}
template <>
void magmaGeqrf<c10::complex<float>>(
magma_int_t m,
magma_int_t n,
c10::complex<float>* dA,
magma_int_t ldda,
c10::complex<float>* tau,
c10::complex<float>* dT,
magma_int_t* info,
bool is_v2) {
MagmaStreamSyncGuard guard;
if (!is_v2) {
magma_cgeqrf_gpu(
m,
n,
reinterpret_cast<magmaFloatComplex*>(dA),
ldda,
reinterpret_cast<magmaFloatComplex*>(tau),
reinterpret_cast<magmaFloatComplex*>(dT),
info);
} else {
magma_cgeqrf2_gpu(
m,
n,
reinterpret_cast<magmaFloatComplex*>(dA),
ldda,
reinterpret_cast<magmaFloatComplex*>(tau),
info);
}
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaOrgqr<double>(
magma_int_t m, magma_int_t n, magma_int_t k, double* dA, magma_int_t ldda,
double* tau, double* dT, magma_int_t nb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dorgqr_gpu(m, n, k, dA, ldda, tau, dT, nb, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaOrgqr<float>(
magma_int_t m, magma_int_t n, magma_int_t k, float* dA, magma_int_t ldda,
float* tau, float* dT, magma_int_t nb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_sorgqr_gpu(m, n, k, dA, ldda, tau, dT, nb, info);
AT_CUDA_CHECK(hipGetLastError());
}
template <>
void magmaOrgqr<c10::complex<double>>(
magma_int_t m,
magma_int_t n,
magma_int_t k,
c10::complex<double>* dA,
magma_int_t ldda,
c10::complex<double>* tau,
c10::complex<double>* dT,
magma_int_t nb,
magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zungqr_gpu(
m,
n,
k,
reinterpret_cast<magmaDoubleComplex*>(dA),
ldda,
reinterpret_cast<magmaDoubleComplex*>(tau),
reinterpret_cast<magmaDoubleComplex*>(dT),
nb,
info);
AT_CUDA_CHECK(hipGetLastError());
}
template <>
void magmaOrgqr<c10::complex<float>>(
magma_int_t m,
magma_int_t n,
magma_int_t k,
c10::complex<float>* dA,
magma_int_t ldda,
c10::complex<float>* tau,
c10::complex<float>* dT,
magma_int_t nb,
magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cungqr_gpu(
m,
n,
k,
reinterpret_cast<magmaFloatComplex*>(dA),
ldda,
reinterpret_cast<magmaFloatComplex*>(tau),
reinterpret_cast<magmaFloatComplex*>(dT),
nb,
info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaSyevd<double>(
magma_vec_t jobz, magma_uplo_t uplo, magma_int_t n, double* dA, magma_int_t ldda,
double* w, double* wA, magma_int_t ldwa, double* work, magma_int_t lwork, double* rwork,
magma_int_t lrwork, magma_int_t* iwork, magma_int_t liwork, magma_int_t* info) {
(void)rwork; // unused
(void)lrwork; // unused
MagmaStreamSyncGuard guard;
magma_dsyevd_gpu(jobz, uplo, n, dA, ldda, w, wA, ldwa, work, lwork, iwork, liwork, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaSyevd<float>(
magma_vec_t jobz, magma_uplo_t uplo, magma_int_t n, float* dA, magma_int_t ldda,
float* w, float* wA, magma_int_t ldwa, float* work, magma_int_t lwork, float* rwork,
magma_int_t lrwork, magma_int_t* iwork, magma_int_t liwork, magma_int_t* info) {
(void)rwork; // unused
(void)lrwork; // unused
MagmaStreamSyncGuard guard;
magma_ssyevd_gpu(jobz, uplo, n, dA, ldda, w, wA, ldwa, work, lwork, iwork, liwork, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaSyevd<c10::complex<double>, double>(
magma_vec_t jobz, magma_uplo_t uplo, magma_int_t n, c10::complex<double>* dA, magma_int_t ldda,
double* w, c10::complex<double>* wA, magma_int_t ldwa, c10::complex<double>* work, magma_int_t lwork, double* rwork,
magma_int_t lrwork, magma_int_t* iwork, magma_int_t liwork, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zheevd_gpu(
jobz, uplo, n, reinterpret_cast<magmaDoubleComplex*>(dA), ldda, w, reinterpret_cast<magmaDoubleComplex*>(wA),
ldwa, reinterpret_cast<magmaDoubleComplex*>(work), lwork, rwork, lrwork, iwork, liwork, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaSyevd<c10::complex<float>, float>(
magma_vec_t jobz, magma_uplo_t uplo, magma_int_t n, c10::complex<float>* dA, magma_int_t ldda,
float* w, c10::complex<float>* wA, magma_int_t ldwa, c10::complex<float>* work, magma_int_t lwork, float* rwork,
magma_int_t lrwork, magma_int_t* iwork, magma_int_t liwork, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cheevd_gpu(
jobz, uplo, n, reinterpret_cast<magmaFloatComplex*>(dA), ldda, w, reinterpret_cast<magmaFloatComplex*>(wA),
ldwa, reinterpret_cast<magmaFloatComplex*>(work), lwork, rwork, lrwork, iwork, liwork, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaEig<double>(
magma_vec_t jobvl, magma_vec_t jobvr, magma_int_t n,
double *A, magma_int_t lda,
double *w,
double *VL, magma_int_t ldvl,
double *VR, magma_int_t ldvr,
double *work, magma_int_t lwork,
double *rwork,
magma_int_t *info) {
MagmaStreamSyncGuard guard;
// magma [sd]geev wants to separate output arrays: wr and wi for the real
// and imaginary parts
double *wr = w;
double *wi = w + n;
(void)rwork; // unused
magma_dgeev(jobvl, jobvr, n, A, lda, wr, wi, VL, ldvl, VR, ldvr, work, lwork, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaEig<float>(
magma_vec_t jobvl, magma_vec_t jobvr, magma_int_t n,
float *A, magma_int_t lda,
float *w,
float *VL, magma_int_t ldvl,
float *VR, magma_int_t ldvr,
float *work, magma_int_t lwork,
float *rwork,
magma_int_t *info) {
MagmaStreamSyncGuard guard;
float *wr = w;
float *wi = w + n;
(void)rwork; // unused
magma_sgeev(jobvl, jobvr, n, A, lda, wr, wi, VL, ldvl, VR, ldvr, work, lwork, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaEig<c10::complex<double>, double>(
magma_vec_t jobvl, magma_vec_t jobvr, magma_int_t n,
c10::complex<double> *A, magma_int_t lda,
c10::complex<double> *w,
c10::complex<double> *VL, magma_int_t ldvl,
c10::complex<double> *VR, magma_int_t ldvr,
c10::complex<double> *work, magma_int_t lwork,
double *rwork,
magma_int_t *info) {
MagmaStreamSyncGuard guard;
magma_zgeev(jobvl, jobvr, n,
reinterpret_cast<magmaDoubleComplex*>(A), lda,
reinterpret_cast<magmaDoubleComplex*>(w),
reinterpret_cast<magmaDoubleComplex*>(VL), ldvl,
reinterpret_cast<magmaDoubleComplex*>(VR), ldvr,
reinterpret_cast<magmaDoubleComplex*>(work), lwork,
rwork, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaEig<c10::complex<float>, float>(
magma_vec_t jobvl, magma_vec_t jobvr, magma_int_t n,
c10::complex<float> *A, magma_int_t lda,
c10::complex<float> *w,
c10::complex<float> *VL, magma_int_t ldvl,
c10::complex<float> *VR, magma_int_t ldvr,
c10::complex<float> *work, magma_int_t lwork,
float *rwork,
magma_int_t *info) {
MagmaStreamSyncGuard guard;
magma_cgeev(jobvl, jobvr, n,
reinterpret_cast<magmaFloatComplex*>(A), lda,
reinterpret_cast<magmaFloatComplex*>(w),
reinterpret_cast<magmaFloatComplex*>(VL), ldvl,
reinterpret_cast<magmaFloatComplex*>(VR), ldvr,
reinterpret_cast<magmaFloatComplex*>(work), lwork,
rwork, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaSvd<double>(
magma_vec_t jobz, magma_int_t m, magma_int_t n, double* A,
magma_int_t lda, double* s, double* U, magma_int_t ldu,
double* VT, magma_int_t ldvt, double* work, magma_int_t lwork,
double *rwork, magma_int_t* iwork, magma_int_t* info) {
(void)rwork; // unused
MagmaStreamSyncGuard guard;
magma_dgesdd(jobz, m, n, A, lda, s, U, ldu, VT, ldvt, work, lwork, iwork, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaSvd<float>(
magma_vec_t jobz, magma_int_t m, magma_int_t n, float* A,
magma_int_t lda, float* s, float* U, magma_int_t ldu,
float* VT, magma_int_t ldvt, float* work, magma_int_t lwork,
float* rwork, magma_int_t* iwork, magma_int_t* info) {
(void)rwork; // unused
MagmaStreamSyncGuard guard;
magma_sgesdd(jobz, m, n, A, lda, s, U, ldu, VT, ldvt, work, lwork, iwork, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaSvd<c10::complex<float>, float>(
magma_vec_t jobz, magma_int_t m, magma_int_t n, c10::complex<float>* A,
magma_int_t lda, float* s, c10::complex<float>* U, magma_int_t ldu,
c10::complex<float>* VT, magma_int_t ldvt, c10::complex<float>* work, magma_int_t lwork,
float *rwork, magma_int_t* iwork, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cgesdd(jobz, m, n, reinterpret_cast<magmaFloatComplex*>(A), lda, s,
reinterpret_cast<magmaFloatComplex*>(U), ldu,
reinterpret_cast<magmaFloatComplex*>(VT), ldvt,
reinterpret_cast<magmaFloatComplex*>(work), lwork,
rwork, iwork, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaSvd<c10::complex<double>, double>(
magma_vec_t jobz, magma_int_t m, magma_int_t n, c10::complex<double>* A,
magma_int_t lda, double* s, c10::complex<double>* U, magma_int_t ldu,
c10::complex<double>* VT, magma_int_t ldvt, c10::complex<double>* work, magma_int_t lwork,
double *rwork, magma_int_t* iwork, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zgesdd(jobz, m, n, reinterpret_cast<magmaDoubleComplex*>(A), lda, s,
reinterpret_cast<magmaDoubleComplex*>(U), ldu,
reinterpret_cast<magmaDoubleComplex*>(VT), ldvt,
reinterpret_cast<magmaDoubleComplex*>(work), lwork,
rwork, iwork, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuSolve<double>(
magma_int_t n, magma_int_t nrhs, double* dA, magma_int_t ldda, magma_int_t* ipiv,
double* dB, magma_int_t lddb, magma_int_t* info, magma_trans_t trans) {
MagmaStreamSyncGuard guard;
magma_dgetrs_gpu(trans, n, nrhs, dA, ldda, ipiv, dB, lddb, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuSolve<float>(
magma_int_t n, magma_int_t nrhs, float* dA, magma_int_t ldda, magma_int_t* ipiv,
float* dB, magma_int_t lddb, magma_int_t* info, magma_trans_t trans) {
MagmaStreamSyncGuard guard;
magma_sgetrs_gpu(trans, n, nrhs, dA, ldda, ipiv, dB, lddb, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuSolve<c10::complex<double>>(
magma_int_t n, magma_int_t nrhs, c10::complex<double>* dA, magma_int_t ldda, magma_int_t* ipiv,
c10::complex<double>* dB, magma_int_t lddb, magma_int_t* info, magma_trans_t trans) {
MagmaStreamSyncGuard guard;
magma_zgetrs_gpu(trans, n, nrhs, reinterpret_cast<magmaDoubleComplex*>(dA), ldda, ipiv, reinterpret_cast<magmaDoubleComplex*>(dB), lddb, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuSolve<c10::complex<float>>(
magma_int_t n, magma_int_t nrhs, c10::complex<float>* dA, magma_int_t ldda, magma_int_t* ipiv,
c10::complex<float>* dB, magma_int_t lddb, magma_int_t* info, magma_trans_t trans) {
MagmaStreamSyncGuard guard;
magma_cgetrs_gpu(trans, n, nrhs, reinterpret_cast<magmaFloatComplex*>(dA), ldda, ipiv, reinterpret_cast<magmaFloatComplex*>(dB), lddb, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuSolveBatched<double>(
magma_int_t n, magma_int_t nrhs, double** dA_array, magma_int_t ldda, magma_int_t** dipiv_array,
double** dB_array, magma_int_t lddb, magma_int_t& info,
magma_int_t batchsize, const MAGMAQueue& magma_queue, magma_trans_t trans) {
info = magma_dgetrs_batched(trans, n, nrhs, dA_array, ldda, dipiv_array, dB_array, lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuSolveBatched<float>(
magma_int_t n, magma_int_t nrhs, float** dA_array, magma_int_t ldda, magma_int_t** dipiv_array,
float** dB_array, magma_int_t lddb, magma_int_t& info,
magma_int_t batchsize, const MAGMAQueue& magma_queue, magma_trans_t trans) {
info = magma_sgetrs_batched(trans, n, nrhs, dA_array, ldda, dipiv_array, dB_array, lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuSolveBatched<c10::complex<double>>(
magma_int_t n, magma_int_t nrhs, c10::complex<double>** dA_array, magma_int_t ldda, magma_int_t** dipiv_array,
c10::complex<double>** dB_array, magma_int_t lddb, magma_int_t& info,
magma_int_t batchsize, const MAGMAQueue& magma_queue, magma_trans_t trans) {
info = magma_zgetrs_batched(trans, n, nrhs, reinterpret_cast<magmaDoubleComplex**>(dA_array), ldda, dipiv_array, reinterpret_cast<magmaDoubleComplex**>(dB_array), lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuSolveBatched<c10::complex<float>>(
magma_int_t n, magma_int_t nrhs, c10::complex<float>** dA_array, magma_int_t ldda, magma_int_t** dipiv_array,
c10::complex<float>** dB_array, magma_int_t lddb, magma_int_t& info,
magma_int_t batchsize, const MAGMAQueue& magma_queue, magma_trans_t trans) {
info = magma_cgetrs_batched(trans, n, nrhs, reinterpret_cast<magmaFloatComplex**>(dA_array), ldda, dipiv_array, reinterpret_cast<magmaFloatComplex**>(dB_array), lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaGels<float>(
magma_trans_t trans, magma_int_t m, magma_int_t n, magma_int_t nrhs,
float* dA, magma_int_t ldda, float* dB, magma_int_t lddb,
float* hwork, magma_int_t lwork, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_sgels_gpu(trans, m, n, nrhs,
dA, ldda, dB, lddb,
hwork, lwork, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaGels<double>(
magma_trans_t trans, magma_int_t m, magma_int_t n, magma_int_t nrhs,
double* dA, magma_int_t ldda, double* dB, magma_int_t lddb,
double* hwork, magma_int_t lwork, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dgels_gpu(trans, m, n, nrhs,
dA, ldda, dB, lddb,
hwork, lwork, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaGels<c10::complex<float>>(
magma_trans_t trans, magma_int_t m, magma_int_t n, magma_int_t nrhs,
c10::complex<float>* dA, magma_int_t ldda, c10::complex<float>* dB, magma_int_t lddb,
c10::complex<float>* hwork, magma_int_t lwork, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cgels_gpu(trans, m, n, nrhs,
reinterpret_cast<magmaFloatComplex*>(dA), ldda,
reinterpret_cast<magmaFloatComplex*>(dB), lddb,
reinterpret_cast<magmaFloatComplex*>(hwork), lwork, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaGels<c10::complex<double>>(
magma_trans_t trans, magma_int_t m, magma_int_t n, magma_int_t nrhs,
c10::complex<double>* dA, magma_int_t ldda, c10::complex<double>* dB, magma_int_t lddb,
c10::complex<double>* hwork, magma_int_t lwork, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zgels_gpu(trans, m, n, nrhs,
reinterpret_cast<magmaDoubleComplex*>(dA), ldda,
reinterpret_cast<magmaDoubleComplex*>(dB), lddb,
reinterpret_cast<magmaDoubleComplex*>(hwork), lwork, info);
AT_CUDA_CHECK(hipGetLastError());
}
namespace {
/*
MAGMA can return errors both as a return value and in the info argument.
The return value and info should always be identical.
In general, the meaning is as given in this table.
Predefined error codes are large negative numbers. Using the symbolic
constants below is preferred, but the numeric values can be found in
include/magma_types.h.
Info | Description
----------- | -----------
info = 0 (MAGMA_SUCCESS) | Successful exit
info < 0, but small | For info = -i, the i-th argument had an illegal value
info > 0 | Function-specific error such as singular matrix
MAGMA_ERR_DEVICE_ALLOC | Could not allocate GPU device memory
MAGMA_ERR_HOST_ALLOC | Could not allocate CPU host memory
MAGMA_ERR_ILLEGAL_VALUE | An argument had an illegal value (deprecated; instead it should return -i to say the i-th argument was bad)
MAGMA_ERR_INVALID_PTR | Can't free pointer
MAGMA_ERR_NOT_IMPLEMENTED | Function or option not implemented
MAGMA_ERR_NOT_SUPPORTED | Function or option not supported on the current architecture
*/
void checkMagmaInternalError(magma_int_t info, const std::string& magma_function_name) {
// if info > 0 the error is function-specific, do nothing in this case
TORCH_CHECK(info >= 0,
"MAGMA error: ",
magma_strerror(info),
", info = ", info,
", when calling ", magma_function_name);
}
} // anonymous namespace
#endif // USE_MAGMA
#define ALLOCATE_ARRAY(name, type, size) \
auto storage_##name = pin_memory<type>(size); \
name = static_cast<type*>(storage_##name.data());
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ solve ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_solve(Tensor& b, Tensor& A, Tensor& infos_out) {
#ifndef USE_MAGMA
AT_ERROR("solve: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
auto A_data = A.data_ptr<scalar_t>();
auto b_data = b.data_ptr<scalar_t>();
magma_int_t n = magma_int_cast(A.size(-2), "A.size(-2)");
magma_int_t nrhs = magma_int_cast(b.size(-1), "b.size(-1)");
magma_int_t lda = ::max(magma_int_t{1}, n);
if (b.dim() == 2) {
auto ipiv = at::empty({n}, at::kInt);
// magmaSolve requires infos tensor to live on CPU
Tensor infos = at::empty(infos_out.sizes(), infos_out.options().device(kCPU));
magmaSolve<scalar_t>(n, nrhs, A_data, lda, ipiv.data_ptr<magma_int_t>(),
b_data, lda, infos.data_ptr<magma_int_t>());
infos_out.copy_(infos);
} else {
auto infos_data = infos_out.data_ptr<magma_int_t>();
auto A_mat_stride = matrixStride(A);
auto b_mat_stride = matrixStride(b);
magma_int_t batch_size = magma_int_cast(batchCount(A), "batchCount");
magma_int_t* ipiv_data;
magma_int_t** ipiv_array;
scalar_t** A_array;
scalar_t** b_array;
ALLOCATE_ARRAY(ipiv_data, magma_int_t, batch_size * n);
ALLOCATE_ARRAY(ipiv_array, magma_int_t*, batch_size);
ALLOCATE_ARRAY(A_array, scalar_t*, batch_size);
ALLOCATE_ARRAY(b_array, scalar_t*, batch_size);
// Set up the created arrays
for (int64_t i = 0; i < batch_size; i++) {
A_array[i] = &A_data[i * A_mat_stride];
b_array[i] = &b_data[i * b_mat_stride];
ipiv_array[i] = &ipiv_data[i * n];
}
MAGMAQueue magma_queue(b.get_device());
constexpr int64_t batch_limit = 65535;
// Compute as many batches of 65535 possible
// The number of "mini"-batches are floor(batch_size / batch_limit)
// and these cover floor(batch_size / batch_limit) * batch_limit matrix solves
int64_t mini_batches = batch_size / batch_limit, mini_idx;
for (mini_idx = 0; mini_idx < mini_batches * batch_limit; mini_idx += batch_limit) {
scalar_t** A_array_cur = &A_array[mini_idx];
scalar_t** b_array_cur = &b_array[mini_idx];
magma_int_t** ipiv_array_cur = &ipiv_array[mini_idx];
magma_int_t* info_array_cur = &infos_data[mini_idx];
magmaSolveBatched<scalar_t>(
n, nrhs, A_array_cur, lda, ipiv_array_cur, b_array_cur, lda,
info_array_cur, batch_limit, magma_queue);
}
// Compute whatever is left = batch_size - floor(batch_size / batch_limit) * batch_limit
// which concisely is equal to batch_size % batch_limit
if (batch_size % batch_limit != 0) {
magmaSolveBatched<scalar_t>(
n, nrhs, &A_array[mini_idx], lda, &ipiv_array[mini_idx], &b_array[mini_idx], lda,
&infos_data[mini_idx], batch_size % batch_limit, magma_queue);
}
}
#endif
}
std::tuple<Tensor, Tensor> _solve_helper_cuda(const Tensor& self, const Tensor& A) {
auto self_working_copy = cloneBatchedColumnMajor(self);
auto A_working_copy = cloneBatchedColumnMajor(A);
// infos might not get filled for empty inputs therefore at::zeros is used instead of at::empty
auto infos = at::zeros({std::max<int64_t>(1, batchCount(self))}, self.options().dtype(kInt));
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "solve_cuda", [&]{
apply_solve<scalar_t>(self_working_copy, A_working_copy, infos);
});
if (self.dim() > 2) {
batchCheckErrors(infos, "solve_cuda");
} else {
singleCheckErrors(infos.item().toInt(), "solve_cuda");
}
return std::tuple<Tensor, Tensor>(self_working_copy, A_working_copy);
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ inverse ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
/*
Computes the inverse of n-by-n matrix 'self', it is saved to 'self_inv'.
'infos' is an int Tensor containing error codes for each matrix in the batched input.
'infos_lu' is for holding magmaLU errors, and 'infos_getri' is for holding magmaGetri errors
For more information see MAGMA's documentation for GETRI and GETRF routines.
*/
template <typename scalar_t>
static void apply_batched_inverse(Tensor& self, Tensor& self_inv, Tensor& infos_lu, Tensor& infos_getri) {
#ifndef USE_MAGMA
AT_ERROR("inverse: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
auto self_data = self.data_ptr<scalar_t>();
auto self_mat_stride = matrixStride(self);
auto self_inv_data = self_inv.data_ptr<scalar_t>();
auto self_inv_mat_stride = matrixStride(self_inv);
auto infos_lu_data = infos_lu.data_ptr<magma_int_t>();
auto infos_getri_data = infos_getri.data_ptr<magma_int_t>();
magma_int_t batch_size = magma_int_cast(batchCount(self), "batchCount");
// MAGMA does not work with batch_size == 0, let's return early in this case
if (batch_size == 0) {
return;
}
magma_int_t n = magma_int_cast(self.size(-2), "self.size(-2)");
magma_int_t lda = std::max<magma_int_t>(1, n);
magma_int_t* ipiv_data;
magma_int_t** ipiv_array;
scalar_t** self_array;
scalar_t** self_inv_array;
ALLOCATE_ARRAY(ipiv_data, magma_int_t, batch_size * lda);
ALLOCATE_ARRAY(ipiv_array, magma_int_t*, batch_size);
ALLOCATE_ARRAY(self_array, scalar_t*, batch_size);
ALLOCATE_ARRAY(self_inv_array, scalar_t*, batch_size);
// Set up the created arrays
for (int64_t i = 0; i < batch_size; i++) {
self_array[i] = &self_data[i * self_mat_stride];
self_inv_array[i] = &self_inv_data[i * self_inv_mat_stride];
ipiv_array[i] = &ipiv_data[i * n];
}
// magmaLuBatched leaves ipiv_data values unwritten for singular matrices.
// Initialize to avoid memory access violations inside magma kernels (gh-51930).
std::fill_n(ipiv_data, batch_size * n, 1);
MAGMAQueue magma_queue(self.get_device());
magmaLuBatched<scalar_t>(
n, n, self_array, lda, ipiv_array, infos_lu_data,
batch_size, magma_queue);
constexpr int64_t batch_limit = 65535;
// Compute as many batches of 65535 possible
// The number of "mini"-batches are floor(batch_size / batch_limit)
// and these cover floor(batch_size / batch_limit) * batch_limit matrix solves
int64_t mini_batches = batch_size / batch_limit, mini_idx;
for (mini_idx = 0; mini_idx < mini_batches * batch_limit; mini_idx += batch_limit) {
scalar_t** self_array_cur = &self_array[mini_idx];
scalar_t** self_inv_array_cur = &self_inv_array[mini_idx];
magma_int_t** ipiv_array_cur = &ipiv_array[mini_idx];
magma_int_t* info_array_cur_getri = &infos_getri_data[mini_idx];
magmaGetriBatched<scalar_t>(
n, self_array_cur, lda, ipiv_array_cur, self_inv_array_cur,
lda, info_array_cur_getri, batch_limit, magma_queue);
}
// Compute whatever is left = batch_size - floor(batch_size / batch_limit) * batch_limit
// which concisely is equal to batch_size % batch_limit
if (batch_size % batch_limit != 0) {
magmaGetriBatched<scalar_t>(
n, &self_array[mini_idx], lda, &ipiv_array[mini_idx], &self_inv_array[mini_idx],
lda, &infos_getri_data[mini_idx], batch_size % batch_limit, magma_queue);
}
#endif
}
template <typename scalar_t>
static void apply_single_inverse(Tensor& self, Tensor& info_lu, Tensor& info_getri) {
#ifndef USE_MAGMA
AT_ERROR("inverse: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
auto self_data = self.data_ptr<scalar_t>();
magma_int_t n = magma_int_cast(self.size(-2), "self.size(-2)");
magma_int_t lda = std::max<magma_int_t>(1, n);
magma_int_t lwork = n * magmaGetriOptimalBlocksize<scalar_t>(n);
// magmaLu and magmaGetri requires info argument to live on CPU
// but info_lu and info_getri tensors are on the same device as self
magma_int_t info_lu_cpu = 0;
magma_int_t info_getri_cpu = 0;
Tensor ipiv = at::empty({lda}, at::kInt);
Tensor dwork = at::empty({lwork}, self.options());
magmaLu<scalar_t>(n, n, self_data, lda, ipiv.data_ptr<magma_int_t>(), &info_lu_cpu);
magmaGetri<scalar_t>(
n, self_data, lda, ipiv.data_ptr<magma_int_t>(), dwork.data_ptr<scalar_t>(), lwork, &info_getri_cpu);
info_lu.fill_(info_lu_cpu);
info_getri.fill_(info_getri_cpu);
#endif
}
Tensor _inverse_helper_cuda_legacy(const Tensor& self) {
auto self_inv_working_copy = cloneBatchedColumnMajor(self);
if (self.dim() > 2) {
auto infos_lu = at::zeros({std::max<int64_t>(1, batchCount(self))}, self.options().dtype(kInt));
auto infos_getri = at::zeros({std::max<int64_t>(1, batchCount(self))}, self.options().dtype(kInt));
auto self_working_copy = cloneBatchedColumnMajor(self);
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "inverse_cuda", [&]{
apply_batched_inverse<scalar_t>(
self_working_copy, self_inv_working_copy, infos_lu, infos_getri);
});
batchCheckErrors(infos_lu, "inverse_cuda");
batchCheckErrors(infos_getri, "inverse_cuda");
} else {
// magmaLu and magmaGetri requires infos tensor to live on CPU
auto infos_lu = at::zeros({1}, self.options().dtype(kInt).device(kCPU));
auto infos_getri = at::zeros({1}, self.options().dtype(kInt).device(kCPU));
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "inverse_cuda", [&]{
apply_single_inverse<scalar_t>(self_inv_working_copy, infos_lu, infos_getri);
});
singleCheckErrors(infos_lu.item().toInt(), "inverse_cuda");
singleCheckErrors(infos_getri.item().toInt(), "inverse_cuda");
}
return self_inv_working_copy;
}
Tensor _inverse_helper_cuda(const Tensor& self) {
#ifdef USE_CUSOLVER
if ((self.dim() == 2) || (/* self.dim() > 2 && */ batchCount(self) <= 2) || !use_magma_) {
return _inverse_helper_cuda_lib(self); // cusolver or cublas
} else {
return _inverse_helper_cuda_legacy(self); // magma-cuda
}
#else
return _inverse_helper_cuda_legacy(self); // magma-cuda
#endif
}
// This is a type dispatching helper function for 'apply_batched_inverse' and 'singleCheckErrors'
Tensor& _linalg_inv_out_helper_cuda_legacy(Tensor& result, Tensor& infos_lu, Tensor& infos_getri) {
// assuming result is in column major order and contains the matrices to invert
if (result.dim() > 2) {
auto input_working_copy = cloneBatchedColumnMajor(result);
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(result.scalar_type(), "linalg_inv_out_cuda", [&]{
apply_batched_inverse<scalar_t>(
input_working_copy, result, infos_lu, infos_getri);
});
} else {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(result.scalar_type(), "linalg_inv_out_cuda", [&]{
apply_single_inverse<scalar_t>(result, infos_lu, infos_getri);
});
}
return result;
}
// This is a MAGMA/cuSOLVER dispatching helper function
Tensor& _linalg_inv_out_helper_cuda(Tensor &result, Tensor& infos_lu, Tensor& infos_getri) {
// This function calculates the inverse matrix in-place
// result should be in column major order and contain matrices to invert
#ifdef USE_CUSOLVER
if ((result.dim() == 2) || (/* result.dim() > 2 && */ batchCount(result) <= 2) || !use_magma_) {
return _linalg_inv_out_helper_cuda_lib(result, infos_lu, infos_getri); // cusolver or cublas
} else {
return _linalg_inv_out_helper_cuda_legacy(result, infos_lu, infos_getri); // magma-cuda
}
#else
return _linalg_inv_out_helper_cuda_legacy(result, infos_lu, infos_getri); // magma-cuda
#endif
return result;
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ cholesky_solve ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_cholesky_solve(Tensor& b, Tensor& A, bool upper, int64_t& info) {
#ifndef USE_MAGMA
AT_ERROR("cholesky_solve: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
magma_uplo_t uplo = upper ? MagmaUpper : MagmaLower;
auto A_data = A.data_ptr<scalar_t>();
auto b_data = b.data_ptr<scalar_t>();
magma_int_t n = magma_int_cast(A.size(-2), "A.size(-2)");
magma_int_t lda = std::max<magma_int_t>(1, n);
magma_int_t nrhs = magma_int_cast(b.size(-1), "b.size(-1)");
int info_tmp = 0;
if (b.dim() == 2) {
magmaCholeskySolve<scalar_t>(uplo, n, nrhs, A_data, lda,
b_data, lda, &info_tmp);
info = info_tmp;
} else {
auto A_mat_stride = matrixStride(A);
auto b_mat_stride = matrixStride(b);
magma_int_t batch_size = magma_int_cast(batchCount(A), "batchCount");
scalar_t** A_array;
scalar_t** b_array;
ALLOCATE_ARRAY(A_array, scalar_t*, batch_size);
ALLOCATE_ARRAY(b_array, scalar_t*, batch_size);
// Set up the created arrays
for (int64_t i = 0; i < batch_size; i++) {
A_array[i] = &A_data[i * A_mat_stride];
b_array[i] = &b_data[i * b_mat_stride];
}
MAGMAQueue magma_queue(b.get_device());
constexpr int64_t batch_limit = 65535;
// Compute as many batches of 65535 possible
// The number of "mini"-batches are floor(batch_size / batch_limit)
// and these cover floor(batch_size / batch_limit) * batch_limit matrix solves
int64_t mini_batches = batch_size / batch_limit, mini_idx;
for (mini_idx = 0; mini_idx < mini_batches * batch_limit; mini_idx += batch_limit) {
scalar_t** A_array_cur = &A_array[mini_idx];
scalar_t** b_array_cur = &b_array[mini_idx];
magmaCholeskySolveBatched<scalar_t>(
uplo, n, nrhs, A_array_cur, lda, b_array_cur, lda,
info_tmp, batch_limit, magma_queue);
if (info_tmp != 0) {
break;
}
}
// Compute whatever is left = batch_size - floor(batch_size / batch_limit) * batch_limit
// which concisely is equal to batch_size % batch_limit
if (batch_size % batch_limit != 0 && info_tmp == 0) {
magmaCholeskySolveBatched<scalar_t>(
uplo, n, nrhs, &A_array[mini_idx], lda, &b_array[mini_idx], lda,
info_tmp, batch_size % batch_limit, magma_queue);
}
info = info_tmp;
}
#endif
}
Tensor _cholesky_solve_helper_cuda_magma(const Tensor& self, const Tensor& A, bool upper) {
int64_t info = 0;
auto self_working_copy = cloneBatchedColumnMajor(self);
auto A_working_copy = cloneBatchedColumnMajor(A);
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "cholesky_solve_cuda", [&]{
apply_cholesky_solve<scalar_t>(self_working_copy, A_working_copy, upper, info);
});
TORCH_CHECK(info == 0, "MAGMA cholesky_solve : invalid argument: ", -info);
return self_working_copy;
}
// Todo: cusolverDn<T>potrsBatched only supports nrhs == 1 and does not have good performance.
// Batched cholesky_solve is dispatched to magma.
Tensor _cholesky_solve_helper_cuda(const Tensor& self, const Tensor& A, bool upper) {
#ifdef USE_CUSOLVER
if (batchCount(self) == 1 || !use_magma_) {
return _cholesky_solve_helper_cuda_cusolver(self, A, upper);
} else {
return _cholesky_solve_helper_cuda_magma(self, A, upper);
}
#else
return _cholesky_solve_helper_cuda_magma(self, A, upper);
#endif
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ cholesky ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_cholesky(const Tensor& self, bool upper, const Tensor& info) {
#ifndef USE_MAGMA
TORCH_CHECK(
false,
"Calling torch.linalg.cholesky on a CUDA tensor requires compiling ",
"PyTorch with MAGMA. Please use PyTorch built with MAGMA support.");
#else
magma_uplo_t uplo = upper ? MagmaUpper : MagmaLower;
auto self_data = self.data_ptr<scalar_t>();
magma_int_t n = magma_int_cast(self.size(-2), "self.size(-2)");
auto lda = std::max<magma_int_t>(1, n);
if (self.dim() == 2) {
// magmaCholesky requires info to be on CPU
magma_int_t info_cpu = 0;
magmaCholesky<scalar_t>(uplo, n, self_data, lda, &info_cpu);
info.fill_(info_cpu);
} else {
TORCH_INTERNAL_ASSERT(info.is_cuda());
auto info_data = info.data_ptr<magma_int_t>();
// magmaCholeskyBatched supports only upper=false
uplo = MagmaLower;
auto self_mat_stride = matrixStride(self);
magma_int_t batch_size = magma_int_cast(batchCount(self), "batchCount");
scalar_t** self_array;
ALLOCATE_ARRAY(self_array, scalar_t*, batch_size);
// Set up the created arrays
for (int64_t i = 0; i < batch_size; i++) {
self_array[i] = &self_data[i * self_mat_stride];
}
MAGMAQueue magma_queue(self.get_device());
// Compute as many batches of 262140 possible
// 262140 is the size of the largest batch of matrices that can be run with
// violating maximum kernel configuration
// For complex input the batch limit is 65535 (determined experimentally, see https://github.com/pytorch/pytorch/pull/47047#discussion_r516086923 for more information)
int64_t batch_limit = self.is_complex() ? 65535 : 262140;
for (int64_t mini_idx = 0; mini_idx < batch_size; mini_idx += batch_limit) {
int64_t nbatches = ::min(batch_limit, batch_size - mini_idx);
scalar_t** self_array_cur = &self_array[mini_idx];
magma_int_t* info_array_cur = &info_data[mini_idx];
magmaCholeskyBatched<scalar_t>(
uplo, n, self_array_cur, lda, info_array_cur, nbatches, magma_queue);
}
}
#endif
}
void cholesky_helper_magma(const Tensor& input, bool upper, const Tensor& info) {
Tensor result = input;
if (input.dim() > 2) {
// MAGMA's batched cholesky operator has an off-by-one error causing IMA
// (see https://github.com/pytorch/pytorch/issues/42666). This code is based
// on the #cloneBatchedColumnMajor function however it pads the input with
// one extra element utilizing the fact that the resize_as_ method preserves
// the storage even if it's larger than the new sizes. This way if MAGMA
// reads off bounds it will still be valid user memory.
result = at::empty(input.numel() + 1, input.options());
result.resize_as_(input).transpose_(-2, -1);
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(result.transpose(-2, -1).is_contiguous());
// batched MAGMA doesn't support upper=true
// we transpose and conjugate the input as a workaround
result.copy_(upper ? input.conj().transpose(-2, -1) : input);
}
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(
input.scalar_type(), "cholesky_cuda", [&] {
apply_cholesky<scalar_t>(result, upper, info);
});
if (input.dim() > 2) {
// if upper=true we need to tranpose and conjugate the result tensor
// because the cholesky decomposition is stored in the lower triangular part
if (upper) {
input.copy_(result.conj().transpose(-2, -1));
} else {
input.copy_(result);
}
}
}
static void cholesky_kernel(const Tensor& input, const Tensor& info, bool upper) {
#ifdef USE_CUSOLVER
if (batchCount(input) == 1 || !use_magma_ || use_cusolver_potrf_batched_) {
cholesky_helper_cusolver(input, upper, info);
} else {
cholesky_helper_magma(input, upper, info);
}
#else
cholesky_helper_magma(input, upper, info);
#endif // USE_CUSOLVER
}
REGISTER_DISPATCH(cholesky_stub, &cholesky_kernel)
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ cholesky_inverse ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
/*
Computes the inverse of a symmetric (Hermitian) positive-definite matrix n-by-n matrix 'input' using the Cholesky solver
This is an in-place routine, content of 'input' is overwritten.
'infos' is an int Tensor containing error codes for each matrix in the batched input.
MAGMA requires 'infos' to reside in CPU memory.
For more information see MAGMA's documentation for POTRS routine.
*/
template <typename scalar_t>
static void apply_cholesky_inverse(Tensor& input, Tensor& infos, bool upper) {
#ifndef USE_MAGMA
TORCH_CHECK(false, "cholesky_inverse: MAGMA library not found in compilation. Please rebuild with MAGMA.");
#else
// magmaCholeskyInverse (magma_dpotri_gpu) is slow because internally
// it transfers data several times between GPU and CPU and calls lapack routine on CPU
// using magmaCholeskySolveBatched is a lot faster
// note that magmaCholeskySolve is also slow
// 'input' is modified in-place we need to clone it and replace with a diagonal matrix
// for apply_cholesky_solve
auto input_working_copy = cloneBatchedColumnMajor(input);
// 'input' tensor has to be a batch of diagonal matrix
input.fill_(0);
input.diagonal(/*offset=*/0, /*dim1=*/-2, /*dim2=*/-1).fill_(1);
Tensor result_u, input_u;
if (input.dim() == 2) {
// unsqueezing here so that the batched version is used
result_u = input.unsqueeze(0);
input_u = input_working_copy.unsqueeze(0);
} else {
result_u = input;
input_u = input_working_copy;
}
// magma's potrs_batched doesn't take matrix-wise array of ints as an 'info' argument
// it returns a single 'magma_int_t'
// if info = 0 the operation is successful, if info = -i, the i-th parameter had an illegal value.
int64_t info_tmp = 0;
apply_cholesky_solve<scalar_t>(result_u, input_u, upper, info_tmp);
infos.fill_(info_tmp);
#endif
}
// This is a type dispatching helper function for 'apply_cholesky_inverse'
Tensor& cholesky_inverse_kernel_impl_magma(Tensor &result, Tensor& infos, bool upper) {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(result.scalar_type(), "cholesky_inverse_out_cuda", [&]{
apply_cholesky_inverse<scalar_t>(result, infos, upper);
});
return result;
}
Tensor& cholesky_inverse_kernel_impl(Tensor &result, Tensor& infos, bool upper) {
// This function calculates the inverse matrix in-place
// result should be in column major order and contain matrices to invert
// the content of result is overwritten by 'apply_cholesky_inverse'
#ifdef USE_CUSOLVER
if (batchCount(result) == 1 || !use_magma_) {
return cholesky_inverse_kernel_impl_cusolver(result, infos, upper);
} else {
return cholesky_inverse_kernel_impl_magma(result, infos, upper);
}
#else
return cholesky_inverse_kernel_impl_magma(result, infos, upper);
#endif
}
REGISTER_DISPATCH(cholesky_inverse_stub, &cholesky_inverse_kernel_impl);
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ lu ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
/*
Computes the LU decomposition of a mn matrix or batch of matrices in 'input' tensor.
This is an in-place routine, content of 'input', 'pivots', and 'infos' is overwritten.
This is a "looped" variant for calling single input MAGMA function on batched input.
Args:
* `input` - [in] the input matrix for LU decomposition
[out] the LU decomposition
* `pivots` - [out] the pivot indices
* `infos` - [out] error codes, positive values indicate singular matrices
* `compute_pivots` - controls whether LU is computed with or without pivoting
For further details, please see the MAGMA documentation for magma_dgetrf_gpu.
*/
template <typename scalar_t>
static void apply_lu_looped_magma(const Tensor& input, const Tensor& pivots, const Tensor& infos, bool compute_pivots) {
#ifndef USE_MAGMA
TORCH_CHECK(
false,
"Calling torch.lu on a CUDA tensor requires compiling ",
"PyTorch with MAGMA. lease rebuild with MAGMA.");
#else
// magmaLu and magmaLuNoPiv require infos and pivots tensor to be on CPU
// the data is later copied back to the appropriate output tensor
Tensor infos_cpu = at::empty_like(infos, infos.options().device(kCPU).pinned_memory(true));
auto input_data = input.data_ptr<scalar_t>();
auto infos_data = infos_cpu.data_ptr<magma_int_t>();
auto input_matrix_stride = matrixStride(input);
auto pivots_stride = pivots.size(-1);
auto batch_size = batchCount(input);
magma_int_t m = magma_int_cast(input.size(-2), "m");
magma_int_t n = magma_int_cast(input.size(-1), "n");
auto leading_dimension = std::max<magma_int_t>(1, m);
if (compute_pivots) {
Tensor pivots_cpu = at::empty_like(pivots, pivots.options().device(kCPU).pinned_memory(true));
auto pivots_data = pivots_cpu.data_ptr<magma_int_t>();
for (decltype(batch_size) i = 0; i < batch_size; i++) {
scalar_t* input_working_ptr = &input_data[i * input_matrix_stride];
int* pivots_working_ptr = &pivots_data[i * pivots_stride];
int* infos_working_ptr = &infos_data[i];
magmaLu<scalar_t>(m, n, input_working_ptr, leading_dimension, pivots_working_ptr, infos_working_ptr);
}
pivots.copy_(pivots_cpu, /*non_blocking=*/true);
} else {
for (decltype(batch_size) i = 0; i < batch_size; i++) {
scalar_t* input_working_ptr = &input_data[i * input_matrix_stride];
int* infos_working_ptr = &infos_data[i];
magmaLuNoPiv<scalar_t>(m, n, input_working_ptr, leading_dimension, infos_working_ptr);
}
// fill the pivots tensor with indices using 1-based (Fortran) indexing
auto k = ::min(m, n);
Tensor pivots_tmp = at::arange(1, k + 1, input.options().dtype(at::kInt)).expand_as(pivots);
pivots.copy_(pivots_tmp);
}
infos.copy_(infos_cpu, /*non_blocking=*/true);
#endif
}
/*
Computes the LU decomposition of a mn matrix or batch of matrices in 'input' tensor.
This is an in-place routine, content of 'input', 'pivots', and 'infos' is overwritten.
This is a specialized batched variant, it is expected to be faster than the "looped" version only for small inputs.
Args:
* `input` - [in] the input matrix for LU decomposition
[out] the LU decomposition
* `pivots` - [out] the pivot indices
* `infos` - [out] error codes, positive values indicate singular matrices
* `compute_pivots` - controls whether LU is computed with or without pivoting
For further details, please see the MAGMA documentation for magma_dgetrf_batched.
*/
template <typename scalar_t>
static void apply_lu_batched_magma(const Tensor& input, const Tensor& pivots, const Tensor& infos, bool compute_pivots) {
#ifndef USE_MAGMA
TORCH_CHECK(
false,
"Calling torch.lu on a CUDA tensor requires compiling ",
"PyTorch with MAGMA. lease rebuild with MAGMA.");
#else
auto input_data = input.data_ptr<scalar_t>();
auto infos_data = infos.data_ptr<magma_int_t>();
auto input_matrix_stride = matrixStride(input);
magma_int_t batch_size = magma_int_cast(batchCount(input), "batchCount");
// magmaLuBatched doesn't work with zero batch dimensions
// it gives CUDA error: invalid configuration argument
if (batch_size == 0) {
infos.fill_(0);
return;
}
magma_int_t m = magma_int_cast(input.size(-2), "m");
magma_int_t n = magma_int_cast(input.size(-1), "n");
auto leading_dimension = std::max<magma_int_t>(1, m);
scalar_t** input_array;
ALLOCATE_ARRAY(input_array, scalar_t*, batch_size);
// Set up array of pointers to matrices
for (int64_t i = 0; i < batch_size; i++) {
input_array[i] = &input_data[i * input_matrix_stride];
}
MAGMAQueue magma_queue(input.get_device());
if (compute_pivots) {
auto pivots_data = pivots.data_ptr<magma_int_t>();
auto pivots_stride = pivots.size(-1);
// fill pivots with ones to avoid memory access violations inside magma kernels
// magmaLuBatched might not set the values for it
// see https://github.com/pytorch/pytorch/pull/53064
pivots.fill_(1);
magma_int_t** pivots_array;
ALLOCATE_ARRAY(pivots_array, magma_int_t*, batch_size);
for (int64_t i = 0; i < batch_size; i++) {
pivots_array[i] = &pivots_data[i * pivots_stride];
}
magmaLuBatched<scalar_t>(m, n, input_array, leading_dimension, pivots_array, infos_data, batch_size, magma_queue);
} else {
magmaLuNoPivBatched<scalar_t>(m, n, input_array, leading_dimension, infos_data, batch_size, magma_queue);
// fill the pivots tensor with indices using 1-based (Fortran) indexing
auto k = ::min(m, n);
Tensor pivots_tmp = at::arange(1, k + 1, input.options().dtype(at::kInt)).expand_as(pivots);
pivots.copy_(pivots_tmp);
}
// block CPU until all operations on the queue are finished
// this explicit sync prevents garbage results from the subsequent magmaLuSolveBatched call from a different queue
magma_queue_sync(magma_queue.get_queue());
#endif
}
static void lu_looped_magma(const Tensor& input, const Tensor& pivots, const Tensor& infos, bool compute_pivots) {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(input.scalar_type(), "lu_magma_looped", [&]{
apply_lu_looped_magma<scalar_t>(input, pivots, infos, compute_pivots);
});
}
static void lu_batched_magma(const Tensor& input, const Tensor& pivots, const Tensor& infos, bool compute_pivots) {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(input.scalar_type(), "lu_magma_batched", [&]{
apply_lu_batched_magma<scalar_t>(input, pivots, infos, compute_pivots);
});
}
static void apply_lu(const Tensor& input, const Tensor& pivots, const Tensor& infos, bool compute_pivots) {
int64_t batch_size = batchCount(input);
#ifdef USE_CUSOLVER
// Use a heuristic to determine that cusolver is faster than MAGMA for the following sizes.
auto m = input.size(-2);
// exclude complex128 since nan_to_num_ does not work with it.
if ((batch_size == 1 || (batch_size <= 8 && m <= 16) || !use_magma_ ) && !input.is_complex()) {
lu_looped_cusolver(input, pivots, infos, compute_pivots);
}
#else
if (batch_size == 1) {
lu_looped_magma(input, pivots, infos, compute_pivots);
}
#endif // USE_CUSOLVER
else {
lu_batched_magma(input, pivots, infos, compute_pivots);
}
}
REGISTER_DISPATCH(lu_stub, &apply_lu);
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ triangular_solve ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_triangular_solve_batched(Tensor& A, Tensor& b, bool upper, bool transpose, bool conjugate_transpose, bool unitriangular) {
#ifndef USE_MAGMA
AT_ERROR("triangular_solve: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
magma_uplo_t uplo = upper ? MagmaUpper : MagmaLower;
magma_trans_t trans = transpose ? MagmaTrans : MagmaNoTrans;
trans = conjugate_transpose ? MagmaConjTrans : trans;
magma_diag_t diag = unitriangular ? MagmaUnit : MagmaNonUnit;
auto A_data = A.data_ptr<scalar_t>();
auto b_data = b.data_ptr<scalar_t>();
magma_int_t m = magma_int_cast(A.size(-2), "A.size(-2)");
magma_int_t n = magma_int_cast(A.size(-1), "A.size(-1)");
magma_int_t nrhs = magma_int_cast(b.size(-1), "b.size(-1)");
// magma returns early if m <= 0 || n <= 0 for magmaTriangularSolveBatched
// magmaTriangularSolve is calling cuBLAS and it prints
// ** On entry to DTRSM parameter number 9 had an illegal value
// so let's use proper lda parameter here
magma_int_t lda = std::max<magma_int_t>(1, m);
magma_int_t batch_size = magma_int_cast(batchCount(A), "batchCount");
auto A_mat_stride = matrixStride(A);
auto b_mat_stride = matrixStride(b);
scalar_t** A_array;
scalar_t** b_array;
ALLOCATE_ARRAY(A_array, scalar_t*, batch_size);
ALLOCATE_ARRAY(b_array, scalar_t*, batch_size);
// Set up the created arrays
for (int64_t i = 0; i < batch_size; i++) {
A_array[i] = &A_data[i * A_mat_stride];
b_array[i] = &b_data[i * b_mat_stride];
}
MAGMAQueue magma_queue(b.get_device());
constexpr int64_t batch_limit = 65535;
// Compute as many batches of 65535 possible
// The number of "mini"-batches are floor(batch_size / batch_limit)
// and these cover floor(batch_size / batch_limit) * batch_limit matrix solves
int64_t mini_batches = batch_size / batch_limit;
int64_t mini_idx; // this is outside the loop because it is used for the case batch_size % batch_limit != 0
for (mini_idx = 0; mini_idx < mini_batches * batch_limit; mini_idx += batch_limit) {
scalar_t** A_array_cur = &A_array[mini_idx];
scalar_t** b_array_cur = &b_array[mini_idx];
magmaTriangularSolveBatched<scalar_t>(
uplo, trans, diag, n, nrhs, A_array_cur,
lda, b_array_cur, lda, batch_limit, magma_queue);
}
// Compute whatever is left = batch_size - floor(batch_size / batch_limit) * batch_limit
// which concisely is equal to batch_size % batch_limit
if (batch_size % batch_limit != 0) {
magmaTriangularSolveBatched<scalar_t>(
uplo, trans, diag, n, nrhs, &A_array[mini_idx],
lda, &b_array[mini_idx], lda, batch_size % batch_limit, magma_queue);
}
#endif
}
void triangular_solve_batched_magma(Tensor& A, Tensor& B, Tensor& infos, bool upper, bool transpose, bool conjugate_transpose, bool unitriangular) {
(void)infos; // unused
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(A.scalar_type(), "triangular_solve_cuda", [&]{
apply_triangular_solve_batched<scalar_t>(A, B, upper, transpose, conjugate_transpose, unitriangular);
});
}
void triangular_solve_kernel(Tensor& A, Tensor& B, Tensor& infos, bool upper, bool transpose, bool conjugate_transpose, bool unitriangular) {
// For batches smaller than 8 and matrix sizes larger than 64x64 cuBLAS forloop is faster than batched version
if (batchCount(A) <= 8 && A.size(-1) >= 64) {
triangular_solve_cublas(A, B, infos, upper, transpose, conjugate_transpose, unitriangular);
} else {
#ifndef USE_MAGMA
triangular_solve_batched_cublas(A, B, infos, upper, transpose, conjugate_transpose, unitriangular);
#else
// cuBLAS batched is faster than MAGMA batched up until 512x512, after that MAGMA is faster
if (A.size(-1) <= 512) {
triangular_solve_batched_cublas(A, B, infos, upper, transpose, conjugate_transpose, unitriangular);
} else {
triangular_solve_batched_magma(A, B, infos, upper, transpose, conjugate_transpose, unitriangular);
}
#endif // USE_MAGMA
}
}
REGISTER_DISPATCH(triangular_solve_stub, &triangular_solve_kernel);
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ orgqr ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Tensor& orgqr_kernel_impl(Tensor& result, const Tensor& tau) {
// TODO: It is possible to implement efficient batched orgqr for small tau (tau.size(-1) <= 32)
// using MAGMA, however it fails on Windows because of some illegal memory reads inside MAGMA.
// See discussions in https://github.com/pytorch/pytorch/pull/51348 for comparison of cuSOLVER-MAGMA
// and Windows failure.
// For reference here is the MAGMA-based implementation: https://gist.github.com/IvanYashchuk/2db50002c9d3c1462ff769e6410ad983
#if defined(USE_CUSOLVER)
return orgqr_helper_cusolver(result, tau); // cusolver
#else
TORCH_CHECK(false, "Calling torch.orgqr on a CUDA tensor requires compiling ",
"PyTorch with cuSOLVER. Please use PyTorch built with cuSOLVER support.");
#endif
}
REGISTER_DISPATCH(orgqr_stub, &orgqr_kernel_impl);
void ormqr_kernel(const Tensor& input, const Tensor& tau, const Tensor& other, bool left, bool transpose) {
#if defined(USE_CUSOLVER)
ormqr_cusolver(input, tau, other, left, transpose);
#else
TORCH_CHECK(false,
"Calling torch.ormqr on a CUDA tensor requires compiling ",
"PyTorch with cuSOLVER. Please use PyTorch built with cuSOLVER support.");
#endif
}
REGISTER_DISPATCH(ormqr_stub, &ormqr_kernel);
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ qr ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_geqrf(const Tensor& input, const Tensor& tau) {
#ifndef USE_MAGMA
TORCH_CHECK(
false,
"Calling torch.geqrf on a CUDA tensor requires compiling ",
"PyTorch with MAGMA. Please use PyTorch built with MAGMA support.");
#else
magma_int_t m = magma_int_cast(input.size(-2), "m");
magma_int_t n = magma_int_cast(input.size(-1), "n");
auto input_data = input.data_ptr<scalar_t>();
auto input_matrix_stride = matrixStride(input);
auto tau_stride = tau.size(-1);
auto batch_size = batchCount(input);
auto lda = std::max<int>(1, m);
// magmaGeqrf uses a hybrid CPU-GPU algorithm to compute the elementary reflectors.
// The driver routine geqrf2_gpu accepts a tensor on the CPU for elementary reflectors.
Tensor tau_cpu = at::empty(tau.sizes(), tau.options().device(at::kCPU).pinned_memory(true));
scalar_t* tau_data = tau_cpu.data_ptr<scalar_t>();
scalar_t* work_data = nullptr; // workspace is not needed for geqrf2_gpu
magma_int_t info = 0;
for (int64_t i = 0; i < batch_size; i++) {
scalar_t* input_working_ptr = &input_data[i * input_matrix_stride];
scalar_t* tau_working_ptr = &tau_data[i * tau_stride];
// now compute the actual QR and tau
// MAGMA's geqrf2_gpu function is used, this version has LAPACK-complaint arguments.
magmaGeqrf<scalar_t>(m, n, input_working_ptr, lda, tau_working_ptr, work_data, &info, /*is_v2=*/true);
checkMagmaInternalError(info, "geqrf");
}
tau.copy_(tau_cpu, /*non_blocking=*/true);
#endif
}
// This is a type dispatching helper function for 'apply_geqrf'
void geqrf_magma(const Tensor& input, const Tensor& tau) {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(input.scalar_type(), "geqrf_magma", [&]{
apply_geqrf<scalar_t>(input, tau);
});
}
// This is a backend library dispatching helper function for calling looped batch implementation
void geqrf_looped(const Tensor& input, const Tensor& tau) {
#if defined(USE_CUSOLVER)
return geqrf_cusolver(input, tau);
#else
return geqrf_magma(input, tau);
#endif
}
// This is a backend library dispatching helper function for calling specialized batched implementation
void geqrf_batched(const Tensor& input, const Tensor& tau) {
#ifdef CUDART_VERSION
// if cuBLAS is available
return geqrf_batched_cublas(input, tau);
#else
// TODO: implement MAGMA-based path using magma_zgeqrf_expert_batched
return geqrf_looped(input, tau);
#endif
}
void geqrf_kernel(const Tensor& input, const Tensor& tau) {
// if number of rows is smaller than 32 batched is always faster for batch size > 1
// for larger number of rows number of batches condition
if (input.size(-2) <= 256 && batchCount(input) >= std::max<int64_t>(2, input.size(-2) / 16)) {
return geqrf_batched(input, tau);
} else {
return geqrf_looped(input, tau);
}
}
REGISTER_DISPATCH(geqrf_stub, &geqrf_kernel);
template <typename scalar_t>
static void apply_qr(Tensor& Q, Tensor& R, int64_t q_size_minus_2, int64_t r_size_minus_1, int64_t n_columns,
bool compute_q) {
#ifndef USE_MAGMA
AT_ERROR("qr: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
magma_int_t m = magma_int_cast(q_size_minus_2, "Q.size(-2)");
magma_int_t n = magma_int_cast(r_size_minus_1, "R.size(-1)");
auto r_data = R.data_ptr<scalar_t>();
auto r_matrix_stride = matrixStride(R);
magma_int_t k = m < n ? m : n;
magma_int_t nb = magmaGeqrfOptimalBlocksize<scalar_t>(m, n);
int64_t batch_size = batchCount(R);
// magmaGeqrf uses a hybrid CPU-GPU algorithm to compute the elementary reflectors.
// The driver routine magma_(d/s)geqrf2_gpu accepts a tensor on the CPU for elementary reflectors.
Tensor tau = at::empty({k}, Q.options().device(at::kCPU));
Tensor work = at::empty({(2 * k + magma_roundup(n, 32)) * nb}, R.options());
scalar_t* tau_data = tau.data_ptr<scalar_t>();
scalar_t* work_data = work.data_ptr<scalar_t>();
// This phase computes R (the raw version)
// This uses MAGMA's ?geqrf2_gpu function
magma_int_t info = 0;
for (int64_t i = 0; i < batch_size; i++) {
scalar_t* r_working_ptr = &r_data[i * r_matrix_stride];
magmaGeqrf<scalar_t>(m, n, r_working_ptr, m, tau_data, work_data, &info, /*is_v2=*/true);
checkMagmaInternalError(info, "geqrf");
}
if (!compute_q) {
// this is for mode='r'
return;
}
// This phase computes Q (the raw version)
// We require to perform ?geqrf_gpu again due to this bug in MAGMA:
// - ?geqrf_gpu allows fast computation of Q via ?orgqr_gpu, but doesn't give R properly.
// - ?geqrf2_gpu gives correct R, but doesn't allow computation of Q via ?orgqr_gpu
// Refer to the below link for more details:
// http://icl.cs.utk.edu/magma/forum/viewtopic.php?f=2&t=1015&p=2800&hilit=geqrf_gpu#p2800
auto q_data = Q.data_ptr<scalar_t>();
auto q_matrix_stride = matrixStride(Q);
for (int64_t i = 0; i < batch_size; i++) {
scalar_t* q_working_ptr = &q_data[i * q_matrix_stride];
magmaGeqrf<scalar_t>(m, n, q_working_ptr, m, tau_data, work_data, &info, /*is_v2=*/false);
checkMagmaInternalError(info, "geqrf");
magmaOrgqr<scalar_t>(m, n_columns, k, q_working_ptr, m, tau_data, work_data, nb, &info);
checkMagmaInternalError(info, "orgqr");
}
#endif
}
std::tuple<Tensor, Tensor> linalg_qr_helper_magma(const Tensor& self, c10::string_view mode) {
bool compute_q, reduced;
std::tie(compute_q, reduced) = _parse_qr_mode(mode);
// Setup input geometry and inputs for apply_qr
std::vector<int64_t> q_sizes, q_strides;
int64_t n_columns_q;
std::tie(q_sizes, q_strides, n_columns_q) = _compute_geometry_for_Q(self, reduced);
Tensor q_working_copy, r_working_copy;
// If there are no elements, then we simply return a pair of tensors of required dimensions
if (self.numel() == 0) {
int64_t n = self.size(-1);
auto r_shape = self.sizes().vec();
r_shape.end()[-2] = n_columns_q;
r_shape.end()[-1] = n;
r_working_copy = at::empty(r_shape, self.options());
if (compute_q) {
auto q_shape = q_sizes;
q_shape.end()[-1] = n_columns_q;
q_working_copy = at::zeros(q_shape, self.options());
q_working_copy.diagonal(/*offset=*/0, /*dim1=*/-2, /*dim2=*/-1).fill_(1);
} else {
q_working_copy = at::empty({0}, self.options());
}
return std::make_tuple(q_working_copy, r_working_copy);
}
if (compute_q) {
q_working_copy = at::empty_strided(q_sizes, q_strides, self.options());
q_working_copy.narrow(-1, 0, self.size(-1)).copy_(self);
} else {
q_working_copy = at::empty({0}, self.options());
}
r_working_copy = cloneBatchedColumnMajor(self);
int64_t m = q_sizes[self.dim() - 2];
int64_t n = r_working_copy.size(-1);
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "qr_cuda", [&]{
apply_qr<scalar_t>(q_working_copy, r_working_copy, m, n, n_columns_q, compute_q);
});
if (compute_q) {
q_working_copy = q_working_copy.narrow(-1, 0, n_columns_q);
}
r_working_copy = r_working_copy.narrow(-2, 0, n_columns_q).triu();
return std::make_tuple(q_working_copy, r_working_copy);
}
std::tuple<Tensor, Tensor> _linalg_qr_helper_cuda(const Tensor& input, c10::string_view mode) {
#if defined(USE_CUSOLVER)
// _linalg_qr_helper_default is a generic function that is implemented using
// geqrf_stub and orgqr_stub. It dispatches to cuSOLVER for CUDA inputs if USE_CUSOLVER is defined
return _linalg_qr_helper_default(input, mode);
#else
return linalg_qr_helper_magma(input, mode);
#endif
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ symeig ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_magma_eigh(const Tensor& values, const Tensor& vectors, const Tensor& infos, bool upper, bool compute_eigenvectors) {
#ifndef USE_MAGMA
TORCH_CHECK(
false,
"Calling torch.linalg.eigh/eigvalsh on a CUDA tensor requires compiling ",
"PyTorch with MAGMA. Please use PyTorch built with MAGMA support.");
#else
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(values.device() == kCPU);
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(infos.device() == kCPU);
using value_t = typename c10::scalar_value_type<scalar_t>::type;
magma_uplo_t uplo = upper ? MagmaUpper : MagmaLower;
magma_vec_t jobz = compute_eigenvectors ? MagmaVec : MagmaNoVec;
magma_int_t n = magma_int_cast(vectors.size(-1), "n");
auto lda = std::max<magma_int_t>(1, n);
auto batch_size = batchCount(vectors);
auto vectors_stride = matrixStride(vectors);
auto values_stride = values.size(-1);
auto vectors_data = vectors.data_ptr<scalar_t>();
auto values_data = values.data_ptr<value_t>();
auto infos_data = infos.data_ptr<magma_int_t>();
scalar_t* wA;
ALLOCATE_ARRAY(wA, scalar_t, lda * lda);
// Run once, first to get the optimum work sizes.
// Since we deal with batches of matrices with the same dimensions, doing this outside
// the loop saves (batch_size - 1) workspace queries which would provide the same result
// and (batch_size - 1) calls to allocate and deallocate workspace using at::empty()
magma_int_t lwork = -1;
scalar_t wkopt;
magma_int_t liwork = -1;
magma_int_t iwkopt;
magma_int_t lrwork = -1;
value_t rwkopt;
magmaSyevd<scalar_t, value_t>(jobz, uplo, n, vectors_data, lda, values_data,
wA, lda, &wkopt, lwork, &rwkopt, lrwork, &iwkopt, liwork, infos_data);
scalar_t* work;
magma_int_t* iwork;
lwork = magma_int_cast(std::max<int64_t>(1, real_impl<scalar_t, value_t>(wkopt)), "work_size");
liwork = magma_int_cast(std::max<int64_t>(1, iwkopt), "iwork_size");
ALLOCATE_ARRAY(work, scalar_t, lwork);
ALLOCATE_ARRAY(iwork, magma_int_t, liwork);
value_t* rwork = nullptr;
c10::Storage storage_rwork;
if (vectors.is_complex()) {
lrwork = magma_int_cast(std::max<int64_t>(1, rwkopt), "rwork_size");
storage_rwork = pin_memory<value_t>(lrwork);
rwork = static_cast<value_t*>(storage_rwork.data());
}
for (decltype(batch_size) i = 0; i < batch_size; i++) {
scalar_t* vectors_working_ptr = &vectors_data[i * vectors_stride];
value_t* values_working_ptr = &values_data[i * values_stride];
magma_int_t* info_working_ptr = &infos_data[i];
magmaSyevd<scalar_t, value_t>(jobz, uplo, n, vectors_working_ptr, lda, values_working_ptr,
wA, lda, work, lwork, rwork, lrwork, iwork, liwork, info_working_ptr);
// The current behaviour for Linear Algebra functions to raise an error if something goes wrong
// or input doesn't satisfy some requirement
// therefore return early since further computations will be wasted anyway
if (*info_working_ptr != 0) {
return;
}
}
#endif
}
std::tuple<Tensor, Tensor> _symeig_helper_cuda(const Tensor& self, bool eigenvectors, bool upper) {
Tensor infos = at::zeros({std::max<int64_t>(1, batchCount(self))}, self.options().dtype(kInt).device(at::kCPU));
auto eigvals_shape = IntArrayRef(self.sizes().data(), self.dim()-1); // self.shape[:-1]
ScalarType real_dtype = toValueType(self.scalar_type());
// magmaSyevd uses a hybrid CPU-GPU algorithm to compute the eigenvalues and eigenvectors.
// The driver routine magma_(d/s)syev_gpu accepts a tensor on the CPU for eigvalenvalues.
// The data is later moved to the appropriate device.
// In the case where self.numel() == 0, we just return an empty tensor of
// dimensions on the CUDA (to avoid the unnecessary "to(at::kCUDA)")
auto eigvals_working_copy = self.numel() == 0
? at::empty(eigvals_shape, self.options().dtype(real_dtype))
: at::empty(eigvals_shape, self.options().dtype(real_dtype).device(at::kCPU));
if (self.numel() == 0) {
return std::tuple<Tensor, Tensor>(eigvals_working_copy, at::empty_like(self, LEGACY_CONTIGUOUS_MEMORY_FORMAT));
}
auto self_working_copy = cloneBatchedColumnMajor(self);
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "symeig_cuda", [&]{
apply_magma_eigh<scalar_t>(eigvals_working_copy, self_working_copy, infos, upper, eigenvectors);
});
if (self.dim() > 2) {
batchCheckErrors(infos, "symeig_cuda");
} else {
singleCheckErrors(infos.item().toInt(), "symeig_cuda");
}
if (eigenvectors) {
return std::tuple<Tensor, Tensor>(eigvals_working_copy.to(self.device()), self_working_copy);
} else {
return std::tuple<Tensor, Tensor>(eigvals_working_copy.to(self.device()), at::empty({0}, self.options()));
}
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ linalg_eigh ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// This is a type dispatch function for 'apply_magma_eigh'
// For small inputs result is computed on CPU
void linalg_eigh_magma(const Tensor& eigenvalues, const Tensor& eigenvectors, const Tensor& infos, bool upper, bool compute_eigenvectors) {
// MAGMA just calls LAPACK for eigenvectors.size(-1) <= 128
// See https://bitbucket.org/icl/magma/src/e6fdca447bd402693e8b0b950a898b6879bbcc41/src/zheevd_gpu.cpp?at=master#lines-258
// in addition lda is ignored breaking 0x0 inputs
if (eigenvectors.size(-1) > 128) {
// MAGMA requires eigenvalues and infos tensors to reside on CPU
Tensor eigenvalues_cpu = eigenvalues.to(kCPU);
Tensor infos_cpu = infos.to(kCPU);
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(
eigenvectors.scalar_type(), "linalg_eigh_magma", [&] {
apply_magma_eigh<scalar_t>(
eigenvalues_cpu, eigenvectors, infos_cpu, upper, compute_eigenvectors);
});
// Transfer computed by MAGMA results from CPU to GPU
eigenvalues.copy_(eigenvalues_cpu);
infos.copy_(infos_cpu);
} else { // eigenvectors.size(-1) <= 128
// transfer to CPU, compute the result and copy back to GPU
// this is faster than going through MAGMA that does the same
Tensor eigenvalues_cpu = at::empty_like(eigenvalues, eigenvalues.options().device(kCPU));
if (compute_eigenvectors) {
Tensor eigenvectors_cpu = at::empty_like(eigenvectors, eigenvectors.options().device(kCPU));
at::linalg_eigh_out(eigenvalues_cpu, eigenvectors_cpu, eigenvectors.to(kCPU), upper ? "U" : "L");
eigenvectors.copy_(eigenvectors_cpu);
} else {
at::linalg_eigvalsh_out(eigenvalues_cpu, eigenvectors.to(kCPU), upper ? "U" : "L");
}
eigenvalues.copy_(eigenvalues_cpu);
}
}
void linalg_eigh_kernel(const Tensor& eigenvalues, const Tensor& eigenvectors, const Tensor& infos, bool upper, bool compute_eigenvectors) {
#if defined(USE_CUSOLVER)
linalg_eigh_cusolver(eigenvalues, eigenvectors, infos, upper, compute_eigenvectors);
#else
linalg_eigh_magma(eigenvalues, eigenvectors, infos, upper, compute_eigenvectors);
#endif
}
REGISTER_DISPATCH(linalg_eigh_stub, &linalg_eigh_kernel);
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ eig ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// magmaEig uses a hybrid CPU-GPU algorithm, which takes and return CPU
// memory. So, we accept a GPU tensor, copy it to CPU memory, and later copy
// the returned values from CPU to GPU. See also magmaSymeig, which uses a
// similar approach.
template <typename scalar_t>
static void apply_eig(const Tensor& self, bool eigenvectors, Tensor& out_eigvals, Tensor& out_eigvecs,
int64_t *info_ptr) {
#ifndef USE_MAGMA
TORCH_CHECK(false, "Calling torch.eig on a CUDA tensor requires compiling PyTorch with MAGMA. "
"Either transfer the tensor to the CPU before calling torch.eig or recompile with MAGMA.");
#else
TORCH_INTERNAL_ASSERT(self.device() == at::kCPU, "Internal error: apply_eig needs a CPU tensor");
using value_t = typename c10::scalar_value_type<scalar_t>::type;
magma_vec_t jobvr = eigenvectors ? MagmaVec : MagmaNoVec;
magma_int_t n = magma_int_cast(self.size(-1), "n");
auto self_data = self.data_ptr<scalar_t>();
auto out_eigvals_data = out_eigvals.data_ptr<scalar_t>();
scalar_t *wr = out_eigvals_data;
scalar_t *vr_data = NULL;
magma_int_t ldvr = 1;
if (jobvr == MagmaVec)
{
vr_data = out_eigvecs.data_ptr<scalar_t>();
ldvr = n;
}
value_t *rwork_data = nullptr;
if (isComplexType(at::typeMetaToScalarType(self.dtype()))) {
ALLOCATE_ARRAY(rwork_data, value_t, n*2);
}
if (n > 0) {
// call magmaEig once to get the optimal size of work_data
scalar_t wkopt;
magma_int_t info;
magmaEig<scalar_t, value_t>(MagmaNoVec, jobvr, n, self_data, n, wr, NULL, 1, vr_data, ldvr, &wkopt, -1, rwork_data, &info);
magma_int_t lwork = static_cast<magma_int_t>(real_impl<scalar_t, value_t>(wkopt));
// call it a 2nd time to to the actual work
scalar_t *work_data = nullptr;
ALLOCATE_ARRAY(work_data, scalar_t, lwork);
magmaEig<scalar_t, value_t>(MagmaNoVec, jobvr, n, self_data, n, wr, NULL, 1, vr_data, ldvr, work_data, lwork, rwork_data, &info);
*info_ptr = info;
}
#endif
}
/*
* Internal helper; like eig_cuda but:
* 1. assume that self is a square matrix of side "n"
* 2. return CPU tensors (because this is what magmaEig returns), which will be copied to GPU memory
* by the caller
*/
std::tuple<Tensor, Tensor> eig_kernel_impl(const Tensor& self, bool& eigenvectors) {
int64_t n = self.size(-1);
// copy self to pinned CPU memory
auto self_working_copy = at::empty_strided(
{n, n}, // square matrix
{1, n}, // column-ordered, as magmaEig expects
at::TensorOptions(at::kCPU).dtype(self.dtype()).pinned_memory(true));
self_working_copy.copy_(self);
// tensors holding the results. We use empty_strided to make them column-ordered
auto options = self.options().device(at::kCPU).memory_format(LEGACY_CONTIGUOUS_MEMORY_FORMAT);
Tensor out_eigvals;
if (isComplexType(at::typeMetaToScalarType(self.dtype()))) {
out_eigvals = at::empty({n}, options);
} else {
out_eigvals = at::empty_strided({n, 2}, {1, n}, options);
}
auto out_eigvecs = eigenvectors
? at::empty_strided({n, n}, {1, n}, options)
: Tensor();
int64_t info;
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "eig_cuda", [&]{
apply_eig<scalar_t>(self_working_copy, eigenvectors, out_eigvals, out_eigvecs, &info);
});
singleCheckErrors(info, "eig_cuda");
return std::tuple<Tensor, Tensor>(out_eigvals, out_eigvecs);
}
REGISTER_DISPATCH(eig_stub, &eig_kernel_impl);
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ linalg_eig ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
/*
Computes the eigenvalues and eigenvectors of n-by-n matrix 'input'.
This is an in-place routine, content of 'input', 'values', 'vectors' is overwritten.
'infos' is an int Tensor containing error codes for each matrix in the batched input.
For more information see MAGMA's documentation for GEEV routine.
*/
template <typename scalar_t>
void apply_linalg_eig(Tensor& values, Tensor& vectors, Tensor& input, Tensor& infos, bool compute_eigenvectors) {
#ifndef USE_MAGMA
TORCH_CHECK(false, "Calling torch.linalg.eig on a CUDA tensor requires compiling PyTorch with MAGMA. "
"Either transfer the tensor to the CPU before calling torch.linalg.eig or recompile with MAGMA.");
#else
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(input.device() == at::kCPU);
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(values.device() == at::kCPU);
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(infos.device() == at::kCPU);
if (compute_eigenvectors) {
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(vectors.device() == at::kCPU);
}
using value_t = typename c10::scalar_value_type<scalar_t>::type;
magma_vec_t jobvr = compute_eigenvectors ? MagmaVec : MagmaNoVec;
magma_vec_t jobvl = MagmaNoVec; // only right eigenvectors are computed
magma_int_t n = magma_int_cast(input.size(-1), "n");
auto lda = std::max<magma_int_t>(1, n);
auto batch_size = batchCount(input);
auto input_matrix_stride = matrixStride(input);
auto values_stride = values.size(-1);
auto input_data = input.data_ptr<scalar_t>();
auto values_data = values.data_ptr<scalar_t>();
auto infos_data = infos.data_ptr<magma_int_t>();
auto rvectors_data = compute_eigenvectors ? vectors.data_ptr<scalar_t>() : nullptr;
scalar_t* lvectors_data = nullptr; // only right eigenvectors are computed
int64_t ldvr = compute_eigenvectors ? lda : 1;
int64_t ldvl = 1;
Tensor rwork;
value_t* rwork_data = nullptr;
if (input.is_complex()) {
ScalarType real_dtype = toValueType(input.scalar_type());
rwork = at::empty({lda * 2}, input.options().dtype(real_dtype));
rwork_data = rwork.data_ptr<value_t>();
}
// call magmaEig once to get the optimal size of work_data
scalar_t work_query;
magmaEig<scalar_t, value_t>(jobvl, jobvr, n, input_data, lda, values_data,
lvectors_data, ldvl, rvectors_data, ldvr, &work_query, -1, rwork_data, &infos_data[0]);
magma_int_t lwork = std::max<magma_int_t>(1, static_cast<magma_int_t>(real_impl<scalar_t, value_t>(work_query)));
Tensor work = at::empty({lwork}, input.dtype());
auto work_data = work.data_ptr<scalar_t>();
for (auto i = decltype(batch_size){0}; i < batch_size; i++) {
scalar_t* input_working_ptr = &input_data[i * input_matrix_stride];
scalar_t* values_working_ptr = &values_data[i * values_stride];
scalar_t* rvectors_working_ptr = compute_eigenvectors ? &rvectors_data[i * input_matrix_stride] : nullptr;
int* info_working_ptr = &infos_data[i];
magmaEig<scalar_t, value_t>(jobvl, jobvr, n, input_working_ptr, lda, values_working_ptr,
lvectors_data, ldvl, rvectors_working_ptr, ldvr, work_data, lwork, rwork_data, info_working_ptr);
}
#endif
}
// This is a type dispatching helper function for 'apply_linalg_eig'
void linalg_eig_kernel(Tensor& eigenvalues, Tensor& eigenvectors, Tensor& infos, const Tensor& input, bool compute_eigenvectors) {
// This function calculates the non-symmetric eigendecomposition in-place
// tensors should be in batched column major memory format
// the content of eigenvalues, eigenvectors and infos is overwritten by 'apply_linalg_eig'
// apply_linalg_eig modifies the provided input matrix in-place, therefore we need a copy
// MAGMA doesn't have GPU interface for the eigendecomposition and it forces us to transfer 'input' to CPU
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(input.is_cuda());
Tensor input_working_copy = at::empty(input.sizes(), input.options().device(kCPU));
input_working_copy.transpose_(-2, -1); // make input_working_copy to have Fortran contiguous memory layout
input_working_copy.copy_(input);
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(input.scalar_type(), "linalg_eig_out_cuda", [&]{
apply_linalg_eig<scalar_t>(eigenvalues, eigenvectors, input_working_copy, infos, compute_eigenvectors);
});
}
REGISTER_DISPATCH(linalg_eig_stub, &linalg_eig_kernel);
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ svd ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template<typename scalar_t>
static void apply_svd(Tensor& self, Tensor& U, Tensor& S, Tensor& VT,
char jobchar, std::vector<int64_t>& infos) {
#ifndef USE_MAGMA
AT_ERROR("svd: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
using value_t = typename c10::scalar_value_type<scalar_t>::type;
auto self_data = self.data_ptr<scalar_t>();
auto U_data = U.data_ptr<scalar_t>();
auto S_data = S.data_ptr<value_t>();
auto VT_data = VT.data_ptr<scalar_t>();
auto self_stride = matrixStride(self);
auto U_stride = jobchar == 'N' ? 1 : matrixStride(U);
auto S_stride = S.size(-1);
auto VT_stride = jobchar == 'N' ? 1 :matrixStride(VT);
auto batchsize = batchCount(self);
magma_vec_t jobz = jobchar == 'A' ? MagmaAllVec : (jobchar == 'S' ? MagmaSomeVec : MagmaNoVec);
magma_int_t m = magma_int_cast(self.size(-2), "m");
magma_int_t n = magma_int_cast(self.size(-1), "n");
auto lda = std::max<magma_int_t>(1, m);
auto ldvt = std::max<magma_int_t>(1, jobchar == 'N' ? 1 : VT.size(-2));
auto mn = ::min(m, n);
c10::Storage storage_rwork;
value_t* rwork = nullptr;
magma_int_t* iwork;
ALLOCATE_ARRAY(iwork, magma_int_t, 8 * mn);
if (isComplexType(at::typeMetaToScalarType(self.dtype()))) {
auto lrwork = computeLRWorkDim(jobchar, m, n);
storage_rwork = pin_memory<value_t>(lrwork);
rwork = static_cast<value_t*>(storage_rwork.data());
}
magma_int_t info = 0;
// Run once, first to get the optimum work size.
// Since we deal with batches of matrices with the same dimensions, doing this outside
// the loop saves (batch_size - 1) workspace queries which would provide the same result
// and (batch_size - 1) calls to allocate and deallocate workspace using at::empty()
magma_int_t lwork = -1;
scalar_t wkopt = 1; // MAGMA might not set the value for the optimal workspace therefore use 1 as the default value
magmaSvd<scalar_t, value_t>(jobz, m, n, self_data, lda, S_data, U_data, lda, VT_data, ldvt, &wkopt, lwork, rwork, iwork, &info);
lwork = magma_int_cast(real_impl<scalar_t, value_t>(wkopt), "work_size");
scalar_t* work;
ALLOCATE_ARRAY(work, scalar_t, lwork);
for (int64_t i = 0; i < batchsize; i++) {
scalar_t* self_working_ptr = &self_data[i * self_stride];
value_t* S_working_ptr = &S_data[i * S_stride];
scalar_t* U_working_ptr = &U_data[i * U_stride];
scalar_t* VT_working_ptr = &VT_data[i * VT_stride];
// Compute S, U (optionally), VT (optionally)
magmaSvd<scalar_t, value_t>(jobz, m, n, self_working_ptr, lda,
S_working_ptr, U_working_ptr, lda, VT_working_ptr, ldvt, work, lwork, rwork, iwork, &info);
infos[i] = info;
if (info != 0) {
return;
}
}
#endif
}
std::tuple<Tensor, Tensor, Tensor> _svd_helper_cuda_legacy(const Tensor& self, bool some, bool compute_uv) {
std::vector<int64_t> infos(batchCount(self), 0);
int64_t m = self.size(-2), n = self.size(-1);
int64_t k = ::min(m, n);
char jobchar = compute_uv ? (some ? 'S' : 'A') : 'N';
Tensor U_working_copy, S_working_copy, VT_working_copy;
std::tie(U_working_copy, S_working_copy, VT_working_copy) = _create_U_S_VT(self, some, compute_uv);
// The input matrix, U, S and VT have to reside in pinned memory.
// Additionally, the input and U have to be in column major format.
// _create_U_S_VT takes care of a part of these requirements (for U, S and VT)
// For the input matrix, this requirements are being taken care of below.
// Specify strides
auto self_col_major_strides = at::detail::defaultStrides(self.sizes());
self_col_major_strides[self.dim() - 2] = 1;
self_col_major_strides[self.dim() - 1] = m;
// Create strided tensor in pinned memory
auto self_working_copy = at::empty_strided(self.sizes(), self_col_major_strides,
at::TensorOptions(at::kCPU).dtype(self.dtype()).pinned_memory(true));
self_working_copy.copy_(self);
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "svd_cuda", [&] {
apply_svd<scalar_t>(self_working_copy, U_working_copy, S_working_copy, VT_working_copy, jobchar, infos);
});
if (self.dim() > 2) {
batchCheckErrors(infos, "svd_cuda");
} else {
singleCheckErrors(infos[0], "svd_cuda");
}
U_working_copy = same_stride_to(U_working_copy, self.options());
S_working_copy = same_stride_to(S_working_copy, S_working_copy.options().device(self.device()));
VT_working_copy = same_stride_to(VT_working_copy, self.options());
// so far we have computed VT, but torch.svd returns V instead. Adjust accordingly.
// Note that the 'apply_svd' routine returns VT = V^T (for real inputs) or VT = V^H (for complex inputs), not V.
if (compute_uv) {
VT_working_copy = VT_working_copy.conj();
VT_working_copy.transpose_(-2, -1);
}
return std::make_tuple(U_working_copy, S_working_copy, VT_working_copy);
}
std::tuple<Tensor, Tensor, Tensor> _svd_helper_cuda(const Tensor& self, bool some, bool compute_uv) {
#ifdef USE_CUSOLVER
return _svd_helper_cuda_lib(self, some, compute_uv);
#else
return _svd_helper_cuda_legacy(self, some, compute_uv);
#endif
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ lu_solve ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#ifdef USE_MAGMA
magma_trans_t _get_magma_trans(char trans) {
switch (trans) {
case 'N':
return MagmaNoTrans;
case 'T':
return MagmaTrans;
case 'C':
return MagmaConjTrans;
default:
return MagmaNoTrans;
}
}
#endif
/*
Solves the matrix equation A X = B
X and B are n-by-nrhs matrices, A is represented using the LU factorization.
This is an in-place routine, content of `b` is overwritten.
This is a "looped" variant for calling single input MAGMA function on batched input.
Args:
* `b` - [in] the right hand side matrix B
[out] the solution matrix X
* `lu` - [in] the LU factorization of matrix A (see at::_lu_with_info)
* `pivots` - [in] the pivot indices (see at::_lu_with_info)
For further details, please see the MAGMA documentation for magma_dgetrs_gpu.
*/
template <typename scalar_t>
static void apply_lu_solve_looped_magma(const Tensor& b, const Tensor& lu, const Tensor& pivots, char lapack_trans) {
#ifndef USE_MAGMA
TORCH_CHECK(
false,
"Calling torch.lu_solve on a CUDA tensor requires compiling ",
"PyTorch with MAGMA. lease rebuild with MAGMA.");
#else
auto trans = _get_magma_trans(lapack_trans);
auto b_data = b.data_ptr<scalar_t>();
auto lu_data = lu.data_ptr<scalar_t>();
// MAGMA requires pivots to be a CPU tensor
Tensor pivots_cpu = pivots.cpu();
auto pivots_data = pivots_cpu.data_ptr<magma_int_t>();
auto b_stride = matrixStride(b);
auto lu_stride = matrixStride(lu);
auto pivots_stride = pivots_cpu.size(-1);
auto batch_size = batchCount(b);
magma_int_t n = magma_int_cast(lu.size(-2), "n");
magma_int_t nrhs = magma_int_cast(b.size(-1), "nrhs");
auto leading_dimension = std::max<magma_int_t>(1, n);
int info = 0;
for (decltype(batch_size) i = 0; i < batch_size; i++) {
scalar_t* b_working_ptr = &b_data[i * b_stride];
scalar_t* lu_working_ptr = &lu_data[i * lu_stride];
int* pivots_working_ptr = &pivots_data[i * pivots_stride];
magmaLuSolve<scalar_t>(n, nrhs, lu_working_ptr, leading_dimension, pivots_working_ptr, b_working_ptr, leading_dimension, &info, trans);
// info from magmaLuSolve only reports if the i-th parameter is wrong
// so we don't need to check it all the time
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(info == 0);
}
#endif
}
/*
Solves the matrix equation A X = B
X and B are n-by-nrhs matrices, A is represented using the LU factorization.
This is an in-place routine, content of `b` is overwritten.
This is a specialized batched variant, it is expected to be faster than the "looped" version only for small inputs.
Args:
* `b` - [in] the right hand side matrix B
[out] the solution matrix X
* `lu` - [in] the LU factorization of matrix A (see at::_lu_with_info)
* `pivots` - [in] the pivot indices (see at::_lu_with_info)
For further details, please see the MAGMA documentation for magma_dgetrs_batched.
*/
template <typename scalar_t>
static void apply_lu_solve_batched_magma(const Tensor& b, const Tensor& lu, const Tensor& pivots, char lapack_trans) {
#ifndef USE_MAGMA
TORCH_CHECK(
false,
"Calling torch.lu_solve on a CUDA tensor requires compiling ",
"PyTorch with MAGMA. lease rebuild with MAGMA.");
#else
auto trans = _get_magma_trans(lapack_trans);
auto b_data = b.data_ptr<scalar_t>();
auto lu_data = lu.data_ptr<scalar_t>();
magma_int_t n = magma_int_cast(lu.size(-2), "n");
magma_int_t nrhs = magma_int_cast(b.size(-1), "nrhs");
auto leading_dimension = std::max<magma_int_t>(1, n);
auto pivots_data = pivots.data_ptr<magma_int_t>();
auto b_stride = matrixStride(b);
auto lu_stride = matrixStride(lu);
auto pivots_stride = pivots.size(-1);
magma_int_t batch_size = magma_int_cast(batchCount(b), "batchCount");
magma_int_t** pivots_array;
scalar_t** lu_array;
scalar_t** b_array;
ALLOCATE_ARRAY(pivots_array, magma_int_t*, batch_size);
ALLOCATE_ARRAY(lu_array, scalar_t*, batch_size);
ALLOCATE_ARRAY(b_array, scalar_t*, batch_size);
for (int64_t i = 0; i < batch_size; i++) {
pivots_array[i] = &pivots_data[i * pivots_stride];
b_array[i] = &b_data[i * b_stride];
lu_array[i] = &lu_data[i * lu_stride];
}
MAGMAQueue magma_queue(b.get_device());
// Compute the result in batches of 65535
// that is the maximum allowed number for batch_size in MAGMA
constexpr int64_t batch_limit = 65535;
for (int64_t mini_idx = 0; mini_idx < batch_size; mini_idx += batch_limit) {
int64_t nbatches = ::min(batch_limit, batch_size - mini_idx);
scalar_t** lu_array_cur = &lu_array[mini_idx];
scalar_t** b_array_cur = &b_array[mini_idx];
magma_int_t** pivots_array_cur = &pivots_array[mini_idx];
int info;
magmaLuSolveBatched<scalar_t>(
n, nrhs, lu_array_cur, leading_dimension,
pivots_array_cur, b_array_cur, leading_dimension,
info, nbatches, magma_queue, trans);
// info from magmaLuSolveBatched only reports if the i-th parameter is wrong
// so we don't need to check it all the time
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(info == 0);
}
#endif
}
static void lu_solve_batched_magma(const Tensor& b, const Tensor& lu, const Tensor& pivots, char lapack_trans) {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(b.scalar_type(), "lu_solve_batched_magma", [&]{
apply_lu_solve_batched_magma<scalar_t>(b, lu, pivots, lapack_trans);
});
}
static void lu_solve_looped_magma(const Tensor& b, const Tensor& lu, const Tensor& pivots, char lapack_trans) {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(b.scalar_type(), "lu_solve_looped_magma", [&]{
apply_lu_solve_looped_magma<scalar_t>(b, lu, pivots, lapack_trans);
});
}
#if defined(USE_CUSOLVER) || defined(CUDART_VERSION)
hipblasOperation_t _get_cublas_trans(char trans) {
switch (trans) {
case 'N':
return HIPBLAS_OP_N;
case 'T':
return HIPBLAS_OP_T;
case 'C':
return HIPBLAS_OP_C;
default:
return HIPBLAS_OP_N;
}
}
#endif
static void lu_solve_trans_dispatch(const Tensor& b, const Tensor& lu, const Tensor& pivots, char trans) {
auto batch_size = batchCount(lu);
auto m = lu.size(-2);
auto b2 = b.size(-1);
bool over_magma_dim_limit = b2 > 1024; // magma implementation of LU solve cannot handle a b tensor with last dim > 1024 (https://bitbucket.org/icl/magma/issues/19/dgesv_batched-dgetrs_batched-fails-for)
// heuristics determined from tests dicussed in https://github.com/pytorch/pytorch/pull/59148
#ifdef USE_CUSOLVER
if ((batch_size == 1 && m > 512) || (batch_size <= 8 && over_magma_dim_limit)) {
lu_solve_looped_cusolver(b, lu, pivots, _get_cublas_trans(trans));
}
#else
if (batch_size == 1) {
lu_solve_looped_magma(b, lu, pivots, trans);
}
#endif // ifdef USE_CUSOLVER
#ifdef CUDART_VERSION
else if ((batch_size > 2 && m <= 128) || (batch_size > 8 && over_magma_dim_limit)) {
lu_solve_batched_cublas(b, lu, pivots, _get_cublas_trans(trans));
}
#endif // ifdef CUDART_VERSION
else {
lu_solve_batched_magma(b, lu, pivots, trans);
}
}
REGISTER_DISPATCH(lu_solve_trans_stub, &lu_solve_trans_dispatch);
static void lu_solve_dispatch(const Tensor& b, const Tensor& lu, const Tensor& pivots) {
lu_solve_trans_dispatch(b, lu, pivots, 'N');
}
REGISTER_DISPATCH(lu_solve_stub, &lu_solve_dispatch);
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ lstsq ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_gels(const Tensor& a, Tensor& b, Tensor& infos) {
#ifndef USE_MAGMA
TORCH_CHECK(false, "torch.linalg.lstsq: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
auto trans = MagmaNoTrans;
auto m = magma_int_cast(a.size(-2), "m");
auto n = magma_int_cast(a.size(-1), "n");
TORCH_CHECK(
m >= n,
"torch.linalg.lstsq: only overdetermined systems (input.size(-2) >= input.size(-1)) are allowed on CUDA");
auto nrhs = magma_int_cast(b.size(-1), "nrhs");
auto ldda = std::max<magma_int_t>(1, m);
auto lddb = std::max<magma_int_t>(1, ::max(m, n));
auto nb = magmaGeqrfOptimalBlocksize<scalar_t>(m, n);
auto lwork = (m - n + nb) * (nrhs + nb) + nrhs * nb;
Tensor hwork = at::empty({static_cast<int64_t>(lwork)}, a.scalar_type());
auto* hwork_ptr = hwork.data_ptr<scalar_t>();
// MAGMA requires infos tensor to live on CPU
infos = infos.to(at::kCPU);
auto infos_data = infos.data_ptr<magma_int_t>();
batch_iterator_with_broadcasting<scalar_t>(a, b,
[&](scalar_t* a_working_ptr, scalar_t* b_working_ptr,
int64_t a_linear_batch_idx) {
magma_int_t* infos_working_ptr = &infos_data[a_linear_batch_idx];
magmaGels<scalar_t>(trans, m, n, nrhs,
a_working_ptr, ldda, b_working_ptr, lddb,
hwork_ptr, lwork, infos_working_ptr);
}
);
#endif
}
void gels_magma(const Tensor& a, Tensor& b, Tensor& infos) {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(a.scalar_type(), "gels_magma", [&] {
apply_gels<scalar_t>(a, b, infos);
});
}
void linalg_lstsq_gels(const Tensor& A, const Tensor& B, const Tensor& infos) {
// The steps for using the QR decomposition for solving least squares problems
// are outlined here https://en.wikipedia.org/wiki/QR_decomposition#Using_for_solution_to_linear_inverse_problems
auto m = A.size(-2);
auto n = A.size(-1);
auto mn = ::min(m, n);
// explicitly broadcast the batch dimensions of A
// TODO: revisit this later to use batch_iterator_with_broadcasting in triangular_solve
IntArrayRef A_batch_sizes(A.sizes().data(), A.dim() - 2);
IntArrayRef B_batch_sizes(B.sizes().data(), B.dim() - 2);
std::vector<int64_t> expand_batch_portion = at::infer_size(A_batch_sizes, B_batch_sizes);
auto tau_shape = A.sizes().vec();
tau_shape.pop_back();
tau_shape.back() = mn;
Tensor tau = at::empty(tau_shape, A.options());
if (m >= n) {
// Step 1: compute QR factorization using geqrf
geqrf_kernel(A, tau);
// explicitly broadcast the batch dimensions of A
// we do it after geqrf so that we don't do redundant computations for the same input
auto A_expand_batch = expand_batch_portion;
A_expand_batch.insert(A_expand_batch.end(), {A.size(-2), A.size(-1)});
Tensor A_expanded = A.expand({A_expand_batch});
bool is_fortran_contiguous = A_expanded.transpose(-2, -1).is_contiguous();
Tensor A_broadcasted = is_fortran_contiguous ? A_expanded : cloneBatchedColumnMajor(A_expanded);
auto tau_expand_batch = expand_batch_portion;
tau_expand_batch.push_back(tau.size(-1));
Tensor tau_broadcasted = tau.expand({tau_expand_batch}).contiguous();
// Step 2: B <- Q^H B
ormqr_kernel(A_broadcasted, tau_broadcasted, B, /*left=*/true, /*transpose=*/true);
// Step 3: solve R X = B
bool upper = true;
bool transpose = false;
bool conjugate_transpose = false;
bool unitriangular = false;
triangular_solve_kernel(
const_cast<Tensor&>(A_broadcasted),
const_cast<Tensor&>(B),
const_cast<Tensor&>(infos),
upper, transpose, conjugate_transpose, unitriangular);
} else { // underdetermined case
Tensor Ah = cloneBatchedColumnMajor(A.conj().transpose(-2, -1));
// Step 1: compute QR factorization of conjugate transpose of A using geqrf
geqrf_kernel(Ah, tau);
// explicitly broadcast the batch dimensions of A
// we do it after geqrf so that we don't do redundant computations for the same input
auto A_expand_batch = expand_batch_portion;
A_expand_batch.insert(A_expand_batch.end(), {Ah.size(-2), Ah.size(-1)});
Tensor Ah_expanded = Ah.expand({A_expand_batch});
bool is_fortran_contiguous = Ah_expanded.transpose(-2, -1).is_contiguous();
Tensor Ah_broadcasted = is_fortran_contiguous ? Ah_expanded : cloneBatchedColumnMajor(Ah_expanded);
// Step 2: R^H Z = B
bool upper = true;
bool transpose = true;
bool conjugate_transpose = true;
bool unitriangular = false;
triangular_solve_kernel(
const_cast<Tensor&>(Ah_broadcasted),
const_cast<Tensor&>(B),
const_cast<Tensor&>(infos),
upper, transpose, conjugate_transpose, unitriangular);
// B matrix has the size max(m, n) x nrhs
// triangular_solve_kernel writes its output into the first m rows of B leaving the rest untouched
// we need to set the rest of the rows to zero so that the multiplication from step 3 is correct
B.narrow(-2, m, n - m).zero_();
auto tau_expand_batch = expand_batch_portion;
tau_expand_batch.push_back(tau.size(-1));
Tensor tau_broadcasted = tau.expand({tau_expand_batch}).contiguous();
// Step 3: X <- Q Z
ormqr_kernel(Ah_broadcasted, tau_broadcasted, B, /*left=*/true, /*transpose=*/false);
}
}
void gels_looped(const Tensor& a, Tensor& b, Tensor& infos) {
#if defined(USE_CUSOLVER)
// linalg_lstsq_gels is a generic function that is implemented using
// geqrf_stub, ormqr_stub, and triangular_solve_stub
// It dispatches to cuSOLVER for CUDA inputs if USE_CUSOLVER is defined
return linalg_lstsq_gels(a, b, infos);
#else
return gels_magma(a, b, infos);
#endif
}
void lstsq_kernel(const Tensor& a, Tensor& b, Tensor& /*rank*/, Tensor& /*singular_values*/, Tensor& infos, double /*rcond*/, std::string /*driver_name*/) {
auto m = a.size(-2);
auto n = a.size(-1);
// first handle the underdetermined case (m < n)
// this case is not supported by MAGMA or cuBLAS
if (m < n) {
#if defined(USE_CUSOLVER)
linalg_lstsq_gels(a, b, infos);
#else
TORCH_CHECK(
false,
"torch.linalg.lstsq: only overdetermined systems (input.size(-2) >= input.size(-1)) are allowed on CUDA. ",
"Please rebuild with cuSOLVER.");
#endif
} else { // m >= n
#ifndef USE_MAGMA
// MAGMA is not available we can either use cuBLAS or cuSOLVER here
// the batched vs looped dispatch is implemented based on the following performance results
// https://github.com/pytorch/pytorch/pull/54725#issuecomment-832234456
if (m <= 256 && batchCount(b) >= std::max<int64_t>(2, m / 16)) {
// if CUDART_VERSION is defined then cuBLAS is available
#ifdef CUDART_VERSION
gels_batched_cublas(a, b, infos);
#else
// this would either call cuSOLVER or MAGMA,
// if MAGMA is called a runtime error is thrown about not finding MAGMA in compilation
gels_looped(a, b, infos);
#endif // CUDART_VERSION
} else {
gels_looped(a, b, infos);
}
#else
// if both MAGMA and cuSOLVER are available this would call cuSOLVER
// MAGMA is called if cuSOLVER is not available
gels_looped(a, b, infos);
#endif // USE_MAGMA
}
}
REGISTER_DISPATCH(lstsq_stub, &lstsq_kernel);
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ legacy_lstsq ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
std::tuple<Tensor, Tensor> legacy_lstsq_cuda(const Tensor &B, const Tensor &A) {
TORCH_WARN_ONCE(
"torch.lstsq is deprecated in favor of torch.linalg.lstsq and will be removed in a future PyTorch release.\n",
"torch.linalg.lstsq has reversed arguments and does not return the QR decomposition in "
"the returned tuple (although it returns other information about the problem).\n",
"To get the qr decomposition consider using torch.linalg.qr.\n",
"The returned solution in torch.lstsq stored the residuals of the solution in the ",
"last m - n columns of the returned value whenever m > n. In torch.linalg.lstsq, the ",
"residuals in the field 'residuals' of the returned named tuple.\n",
"The unpacking of the solution, as in\n",
"X, _ = torch.lstsq(B, A).solution[:A.size(1)]\n",
"should be replaced with\n",
"X = torch.linalg.lstsq(A, B).solution"
);
#ifndef USE_MAGMA
TORCH_CHECK(false, "solve: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
const auto dtype = A.scalar_type();
TORCH_CHECK(B.scalar_type() == dtype, "exepected A and B dtypes to match but found ",
dtype, " and ", B.scalar_type());
TORCH_CHECK(A.numel() > 0 && A.dim() == 2, "A should be (non-empty) 2 dimensional");
TORCH_CHECK(B.numel() > 0 && B.dim() == 2, "B should be (non-empty) 2 dimensional");
auto a_sizes = A.sizes();
auto b_sizes = B.sizes();
TORCH_CHECK(a_sizes[0] == b_sizes[0], "Expected A and b to have same size "
"at dim 0, but A has ", a_sizes[0], " rows and B has ", b_sizes[0], " rows");
TORCH_CHECK(a_sizes[0] >= a_sizes[1], "Expected A with shape (m x n) to have "
"m >= n. The case for m < n is not implemented yet.");
Tensor A_working = cloneBatchedColumnMajor(A);
Tensor B_working = cloneBatchedColumnMajor(B);
int64_t m = a_sizes[0];
int64_t n = a_sizes[1];
int64_t nrhs = b_sizes[1];
int info;
AT_DISPATCH_FLOATING_TYPES(A.scalar_type(), "legacy_lstsq_cuda", [&] {
scalar_t *a_data = A_working.data_ptr<scalar_t>();
scalar_t *b_data = B_working.data_ptr<scalar_t>();
scalar_t wkopt;
magmaGels(MagmaNoTrans, m, n, nrhs, a_data, m, b_data, m, &wkopt, -1, &info);
const auto hwork_size = static_cast<magma_int_t>(wkopt);
scalar_t *hwork = nullptr;
ALLOCATE_ARRAY(hwork, scalar_t, hwork_size);
magmaGels(MagmaNoTrans, m, n, nrhs, a_data, m, b_data, m, hwork, hwork_size, &info);
});
TORCH_CHECK(info == 0, "MAGMA gels : Argument %d : illegal value", -info);
return std::tuple<Tensor, Tensor>(B_working, A_working);
#endif // USE_MAGMA
}
std::tuple<Tensor&, Tensor&> legacy_lstsq_out_cuda(
const Tensor& B, const Tensor& A, Tensor& B_out, Tensor& A_out) {
const auto dtype = A.scalar_type();
TORCH_CHECK(B.scalar_type() == dtype, "exepected A and B dtypes to match but found ",
A.scalar_type(), " and ", B.scalar_type());
TORCH_CHECK(A_out.scalar_type() == dtype, "A_out to have scalar type ", dtype,
" but found", A_out.scalar_type());
TORCH_CHECK(B_out.scalar_type() == dtype, "A_out to have scalar type ", dtype,
" but found", B_out.scalar_type());
Tensor A_tmp, B_tmp;
std::tie(B_tmp, A_tmp) = native::legacy_lstsq_cuda(B, A);
resize_output(A_out, A_tmp.sizes());
A_out.copy_(A_tmp);
resize_output(B_out, B_tmp.sizes());
B_out.copy_(B_tmp);
return std::tuple<Tensor&, Tensor&>(B_out, A_out);
}
}} // namespace at::native
#undef ALLOCATE_ARRAY
| 41b983cd33321897267192a2f01fbe02943ec9b2.cu | #include <ATen/Context.h>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/Dispatch.h>
#include <ATen/NativeFunctions.h>
#include <ATen/cuda/PinnedMemoryAllocator.h>
#include <ATen/cuda/detail/IndexUtils.cuh>
#include <ATen/native/LinearAlgebraUtils.h>
#include <ATen/native/cuda/MiscUtils.h>
#include <ATen/native/Resize.h>
#include <ATen/native/LinearAlgebra.h>
#include <ATen/native/BatchLinearAlgebra.h>
#include <ATen/native/cuda/BatchLinearAlgebraLib.h>
#include <ATen/native/cpu/zmath.h>
#include <THC/THC.h> // for USE_MAGMA
#ifdef USE_MAGMA
#include <magma_types.h>
#include <magma_v2.h>
const bool use_magma_ = true;
#else
const bool use_magma_ = false;
#endif
namespace at {
namespace native {
#ifdef USE_MAGMA
template<class scalar_t>
void magmaSolve(
magma_int_t n, magma_int_t nrhs, scalar_t* dA, magma_int_t ldda,
magma_int_t* ipiv, scalar_t* dB, magma_int_t lddb, magma_int_t* info);
template<class scalar_t>
void magmaSolveBatched(
magma_int_t n, magma_int_t nrhs, scalar_t** dA_array, magma_int_t ldda,
magma_int_t** dipiv_array, scalar_t** dB_array, magma_int_t lddb,
magma_int_t* dinfo_array, magma_int_t batch_count, const MAGMAQueue& magma_queue);
template<class scalar_t>
void magmaLu(
magma_int_t m, magma_int_t n, scalar_t* dA, magma_int_t ldda,
magma_int_t* ipiv, magma_int_t* info);
template<class scalar_t>
void magmaLuBatched(
magma_int_t m, magma_int_t n, scalar_t** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, magma_int_t* info_array, magma_int_t batchsize,
const MAGMAQueue& magma_queue);
template<class scalar_t>
void magmaLuNoPiv(
magma_int_t m, magma_int_t n, scalar_t* dA, magma_int_t ldda,
magma_int_t* info);
template<class scalar_t>
void magmaLuNoPivBatched(
magma_int_t m, magma_int_t n, scalar_t** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue);
template<class scalar_t>
inline magma_int_t magmaGetriOptimalBlocksize(magma_int_t n);
template<class scalar_t>
void magmaGetri(
magma_int_t n, scalar_t* dA, magma_int_t ldda, magma_int_t* ipiv, scalar_t* dwork,
magma_int_t lwork, magma_int_t* info);
template<class scalar_t>
void magmaGetriBatched(
magma_int_t n, scalar_t** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, scalar_t** dinvA_array, magma_int_t lddia,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue);
template<class scalar_t>
void magmaCholeskySolve(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, scalar_t* dA, magma_int_t ldda,
scalar_t* dB, magma_int_t lddb, magma_int_t* info);
template<class scalar_t>
void magmaCholeskySolveBatched(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, scalar_t** dA_array, magma_int_t ldda,
scalar_t** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue);
template<class scalar_t>
void magmaCholesky(
magma_uplo_t uplo, magma_int_t n, scalar_t* dA,
magma_int_t ldda, magma_int_t* info);
template<class scalar_t>
void magmaCholeskyBatched(
magma_uplo_t uplo, magma_int_t n, scalar_t** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue);
template<class scalar_t>
void magmaTriangularSolveBatched(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n,
scalar_t** dA_array, magma_int_t ldda, scalar_t** dB_array, magma_int_t lddb, magma_int_t batchsize,
const MAGMAQueue& magma_queue);
template<class scalar_t>
inline magma_int_t magmaGeqrfOptimalBlocksize(magma_int_t m, magma_int_t n);
template<class scalar_t>
void magmaGeqrf(
magma_int_t m, magma_int_t n, scalar_t* dA, magma_int_t ldda,
scalar_t* tau, scalar_t* dT, magma_int_t* info, bool is_v2);
template<class scalar_t>
void magmaOrgqr(
magma_int_t m, magma_int_t n, magma_int_t k, scalar_t* dA,
magma_int_t ldda, scalar_t* tau, scalar_t* dT, magma_int_t nb, magma_int_t* info);
template<class scalar_t, class value_t=scalar_t>
void magmaSyevd(
magma_vec_t jobz, magma_uplo_t uplo, magma_int_t n, scalar_t* dA, magma_int_t ldda,
value_t* w, scalar_t* wA, magma_int_t ldwa, scalar_t* work, magma_int_t lwork, value_t* rwork,
magma_int_t lrwork, magma_int_t* iwork, magma_int_t liwork, magma_int_t* info);
template<class scalar_t, class value_t=scalar_t>
void magmaEig(
magma_vec_t jobvl, magma_vec_t jobvr, magma_int_t n, scalar_t *A, magma_int_t lda,
scalar_t *w, scalar_t *VL, magma_int_t ldvl,
scalar_t *VR, magma_int_t ldvr, scalar_t *work, magma_int_t lwork,
value_t *rwork,
magma_int_t *info);
template<class scalar_t, class value_t=scalar_t>
void magmaSvd(
magma_vec_t jobz, magma_int_t m, magma_int_t n, scalar_t* A,
magma_int_t lda, value_t* s, scalar_t* U, magma_int_t ldu,
scalar_t* VT, magma_int_t ldvt, scalar_t* work, magma_int_t lwork,
value_t* rwork,
magma_int_t* iwork, magma_int_t* info);
template<class scalar_t>
void magmaLuSolve(
magma_int_t n, magma_int_t nrhs, scalar_t* dA, magma_int_t ldda, magma_int_t* ipiv,
scalar_t* dB, magma_int_t lddb, magma_int_t* info, magma_trans_t trans);
template<class scalar_t>
void magmaLuSolveBatched(
magma_int_t n, magma_int_t nrhs, scalar_t** dA_array, magma_int_t ldda, magma_int_t** dipiv_array,
scalar_t** dB_array, magma_int_t lddb, magma_int_t& info,
magma_int_t batchsize, const MAGMAQueue& magma_queue, magma_trans_t trans);
template<class scalar_t>
void magmaGels(
magma_trans_t trans, magma_int_t m, magma_int_t n, magma_int_t nrhs,
scalar_t* dA, magma_int_t ldda, scalar_t* dB, magma_int_t lddb,
scalar_t* hwork, magma_int_t lwork, magma_int_t* info);
template<>
void magmaSolve<double>(
magma_int_t n, magma_int_t nrhs, double* dA, magma_int_t ldda,
magma_int_t* ipiv, double* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dgesv_gpu(n, nrhs, dA, ldda, ipiv, dB, lddb, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaSolve<float>(
magma_int_t n, magma_int_t nrhs, float* dA, magma_int_t ldda,
magma_int_t* ipiv, float* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_sgesv_gpu(n, nrhs, dA, ldda, ipiv, dB, lddb, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaSolve<c10::complex<double>>(
magma_int_t n, magma_int_t nrhs, c10::complex<double>* dA, magma_int_t ldda,
magma_int_t* ipiv, c10::complex<double>* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zgesv_gpu(n, nrhs,
reinterpret_cast<magmaDoubleComplex*>(dA), ldda, ipiv,
reinterpret_cast<magmaDoubleComplex*>(dB), lddb, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaSolve<c10::complex<float>>(
magma_int_t n, magma_int_t nrhs, c10::complex<float>* dA, magma_int_t ldda,
magma_int_t* ipiv, c10::complex<float>* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cgesv_gpu(n, nrhs,
reinterpret_cast<magmaFloatComplex*>(dA), ldda, ipiv,
reinterpret_cast<magmaFloatComplex*>(dB), lddb, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaSolveBatched<double>(
magma_int_t n, magma_int_t nrhs, double** dA_array, magma_int_t ldda,
magma_int_t** dipiv_array, double** dB_array, magma_int_t lddb,
magma_int_t* dinfo_array, magma_int_t batch_count, const MAGMAQueue& magma_queue) {
magma_dgesv_batched(n, nrhs, dA_array, ldda, dipiv_array, dB_array, lddb, dinfo_array, batch_count, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaSolveBatched<float>(
magma_int_t n, magma_int_t nrhs, float** dA_array, magma_int_t ldda,
magma_int_t** dipiv_array, float** dB_array, magma_int_t lddb,
magma_int_t* dinfo_array, magma_int_t batch_count, const MAGMAQueue& magma_queue) {
magma_sgesv_batched(n, nrhs, dA_array, ldda, dipiv_array, dB_array, lddb, dinfo_array, batch_count, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaSolveBatched<c10::complex<double>>(
magma_int_t n, magma_int_t nrhs, c10::complex<double>** dA_array, magma_int_t ldda,
magma_int_t** dipiv_array, c10::complex<double>** dB_array, magma_int_t lddb,
magma_int_t* dinfo_array, magma_int_t batch_count, const MAGMAQueue& magma_queue) {
magma_zgesv_batched(n, nrhs,
reinterpret_cast<magmaDoubleComplex**>(dA_array), ldda, dipiv_array,
reinterpret_cast<magmaDoubleComplex**>(dB_array), lddb, dinfo_array, batch_count, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaSolveBatched<c10::complex<float>>(
magma_int_t n, magma_int_t nrhs, c10::complex<float>** dA_array, magma_int_t ldda,
magma_int_t** dipiv_array, c10::complex<float>** dB_array, magma_int_t lddb,
magma_int_t* dinfo_array, magma_int_t batch_count, const MAGMAQueue& magma_queue) {
magma_cgesv_batched(n, nrhs,
reinterpret_cast<magmaFloatComplex**>(dA_array), ldda, dipiv_array,
reinterpret_cast<magmaFloatComplex**>(dB_array), lddb, dinfo_array, batch_count, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLu<double>(
magma_int_t m, magma_int_t n, double* dA, magma_int_t ldda,
magma_int_t* ipiv, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dgetrf_gpu(m, n, dA, ldda, ipiv, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLu<float>(
magma_int_t m, magma_int_t n, float* dA, magma_int_t ldda,
magma_int_t* ipiv, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_sgetrf_gpu(m, n, dA, ldda, ipiv, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLu<c10::complex<double>>(
magma_int_t m, magma_int_t n, c10::complex<double>* dA, magma_int_t ldda,
magma_int_t* ipiv, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zgetrf_gpu(m, n, reinterpret_cast<magmaDoubleComplex*>(dA), ldda, ipiv, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLu<c10::complex<float>>(
magma_int_t m, magma_int_t n, c10::complex<float>* dA, magma_int_t ldda,
magma_int_t* ipiv, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cgetrf_gpu(m, n, reinterpret_cast<magmaFloatComplex*>(dA), ldda, ipiv, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuBatched<double>(
magma_int_t m, magma_int_t n, double** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, magma_int_t* info_array, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magma_dgetrf_batched(m, n, dA_array, ldda, ipiv_array, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuBatched<float>(
magma_int_t m, magma_int_t n, float** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, magma_int_t* info_array, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magma_sgetrf_batched(m, n, dA_array, ldda, ipiv_array, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuBatched<c10::complex<double>>(
magma_int_t m, magma_int_t n, c10::complex<double>** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, magma_int_t* info_array, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magma_zgetrf_batched(m, n, reinterpret_cast<magmaDoubleComplex**>(dA_array), ldda, ipiv_array, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuBatched<c10::complex<float>>(
magma_int_t m, magma_int_t n, c10::complex<float>** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, magma_int_t* info_array, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magma_cgetrf_batched(m, n, reinterpret_cast<magmaFloatComplex**>(dA_array), ldda, ipiv_array, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuNoPiv<double>(
magma_int_t m, magma_int_t n, double* dA, magma_int_t ldda,
magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dgetrf_nopiv_gpu(m, n, dA, ldda, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuNoPiv<float>(
magma_int_t m, magma_int_t n, float* dA, magma_int_t ldda,
magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_sgetrf_nopiv_gpu(m, n, dA, ldda, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuNoPiv<c10::complex<double>>(
magma_int_t m, magma_int_t n, c10::complex<double>* dA, magma_int_t ldda,
magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zgetrf_nopiv_gpu(m, n, reinterpret_cast<magmaDoubleComplex*>(dA), ldda, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuNoPiv<c10::complex<float>>(
magma_int_t m, magma_int_t n, c10::complex<float>* dA, magma_int_t ldda,
magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cgetrf_nopiv_gpu(m, n, reinterpret_cast<magmaFloatComplex*>(dA), ldda, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuNoPivBatched<double>(
magma_int_t m, magma_int_t n, double** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_dgetrf_nopiv_batched(m, n, dA_array, ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuNoPivBatched<float>(
magma_int_t m, magma_int_t n, float** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_sgetrf_nopiv_batched(m, n, dA_array, ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuNoPivBatched<c10::complex<double>>(
magma_int_t m, magma_int_t n, c10::complex<double>** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_zgetrf_nopiv_batched(m, n, reinterpret_cast<magmaDoubleComplex**>(dA_array), ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuNoPivBatched<c10::complex<float>>(
magma_int_t m, magma_int_t n, c10::complex<float>** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_cgetrf_nopiv_batched(m, n, reinterpret_cast<magmaFloatComplex**>(dA_array), ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
inline magma_int_t magmaGetriOptimalBlocksize<double>(magma_int_t n) {
return magma_get_dgetri_nb(n);
}
template<>
inline magma_int_t magmaGetriOptimalBlocksize<float>(magma_int_t n) {
return magma_get_sgetri_nb(n);
}
template <>
inline magma_int_t magmaGetriOptimalBlocksize<c10::complex<double>>(
magma_int_t n) {
return magma_get_zgetri_nb(n);
}
template <>
inline magma_int_t magmaGetriOptimalBlocksize<c10::complex<float>>(
magma_int_t n) {
return magma_get_cgetri_nb(n);
}
template<>
void magmaGetri<double>(
magma_int_t n, double* dA, magma_int_t ldda, magma_int_t* ipiv, double* dwork,
magma_int_t lwork, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dgetri_gpu(n, dA, ldda, ipiv, dwork, lwork, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaGetri<float>(
magma_int_t n, float* dA, magma_int_t ldda, magma_int_t* ipiv, float* dwork,
magma_int_t lwork, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_sgetri_gpu(n, dA, ldda, ipiv, dwork, lwork, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template <>
void magmaGetri<c10::complex<double>>(
magma_int_t n,
c10::complex<double>* dA,
magma_int_t ldda,
magma_int_t* ipiv,
c10::complex<double>* dwork,
magma_int_t lwork,
magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zgetri_gpu(
n,
reinterpret_cast<magmaDoubleComplex*>(dA),
ldda,
ipiv,
reinterpret_cast<magmaDoubleComplex*>(dwork),
lwork,
info);
AT_CUDA_CHECK(cudaGetLastError());
}
template <>
void magmaGetri<c10::complex<float>>(
magma_int_t n,
c10::complex<float>* dA,
magma_int_t ldda,
magma_int_t* ipiv,
c10::complex<float>* dwork,
magma_int_t lwork,
magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cgetri_gpu(
n,
reinterpret_cast<magmaFloatComplex*>(dA),
ldda,
ipiv,
reinterpret_cast<magmaFloatComplex*>(dwork),
lwork,
info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaGetriBatched<double>(
magma_int_t n, double** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, double** dinvA_array, magma_int_t lddia,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_dgetri_outofplace_batched(n, dA_array, ldda, ipiv_array, dinvA_array, lddia, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaGetriBatched<float>(
magma_int_t n, float** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, float** dinvA_array, magma_int_t lddia,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_sgetri_outofplace_batched(n, dA_array, ldda, ipiv_array, dinvA_array, lddia, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template <>
void magmaGetriBatched<c10::complex<double>>(
magma_int_t n,
c10::complex<double>** dA_array,
magma_int_t ldda,
magma_int_t** ipiv_array,
c10::complex<double>** dinvA_array,
magma_int_t lddia,
magma_int_t* info_array,
magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magma_zgetri_outofplace_batched(
n,
reinterpret_cast<magmaDoubleComplex**>(dA_array),
ldda,
ipiv_array,
reinterpret_cast<magmaDoubleComplex**>(dinvA_array),
lddia,
info_array,
batchsize,
magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template <>
void magmaGetriBatched<c10::complex<float>>(
magma_int_t n,
c10::complex<float>** dA_array,
magma_int_t ldda,
magma_int_t** ipiv_array,
c10::complex<float>** dinvA_array,
magma_int_t lddia,
magma_int_t* info_array,
magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magma_cgetri_outofplace_batched(
n,
reinterpret_cast<magmaFloatComplex**>(dA_array),
ldda,
ipiv_array,
reinterpret_cast<magmaFloatComplex**>(dinvA_array),
lddia,
info_array,
batchsize,
magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaCholeskySolve<double>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, double* dA, magma_int_t ldda,
double* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dpotrs_gpu(uplo, n, nrhs, dA, ldda, dB, lddb, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaCholeskySolve<float>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, float* dA, magma_int_t ldda,
float* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_spotrs_gpu(uplo, n, nrhs, dA, ldda, dB, lddb, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaCholeskySolve<c10::complex<double>>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, c10::complex<double>* dA, magma_int_t ldda,
c10::complex<double>* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zpotrs_gpu(uplo, n, nrhs,
reinterpret_cast<magmaDoubleComplex*>(dA), ldda,
reinterpret_cast<magmaDoubleComplex*>(dB), lddb, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaCholeskySolve<c10::complex<float>>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, c10::complex<float>* dA, magma_int_t ldda,
c10::complex<float>* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cpotrs_gpu(uplo, n, nrhs,
reinterpret_cast<magmaFloatComplex*>(dA), ldda,
reinterpret_cast<magmaFloatComplex*>(dB), lddb, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaCholeskySolveBatched<double>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, double** dA_array, magma_int_t ldda,
double** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
info = magma_dpotrs_batched(uplo, n, nrhs, dA_array, ldda, dB_array, lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaCholeskySolveBatched<float>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, float** dA_array, magma_int_t ldda,
float** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
info = magma_spotrs_batched(uplo, n, nrhs, dA_array, ldda, dB_array, lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaCholeskySolveBatched<c10::complex<double>>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, c10::complex<double>** dA_array, magma_int_t ldda,
c10::complex<double>** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
info = magma_zpotrs_batched(uplo, n, nrhs,
reinterpret_cast<magmaDoubleComplex**>(dA_array), ldda,
reinterpret_cast<magmaDoubleComplex**>(dB_array), lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaCholeskySolveBatched<c10::complex<float>>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, c10::complex<float>** dA_array, magma_int_t ldda,
c10::complex<float>** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
info = magma_cpotrs_batched(uplo, n, nrhs,
reinterpret_cast<magmaFloatComplex**>(dA_array), ldda,
reinterpret_cast<magmaFloatComplex**>(dB_array), lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaCholesky<double>(
magma_uplo_t uplo, magma_int_t n, double* dA,
magma_int_t ldda, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dpotrf_gpu(uplo, n, dA, ldda, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaCholesky<float>(
magma_uplo_t uplo, magma_int_t n, float* dA,
magma_int_t ldda, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_spotrf_gpu(uplo, n, dA, ldda, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaCholesky<c10::complex<double>>(
magma_uplo_t uplo, magma_int_t n, c10::complex<double>* dA,
magma_int_t ldda, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zpotrf_gpu(uplo, n, reinterpret_cast<magmaDoubleComplex*>(dA), ldda, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaCholesky<c10::complex<float>>(
magma_uplo_t uplo, magma_int_t n, c10::complex<float>* dA,
magma_int_t ldda, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cpotrf_gpu(uplo, n, reinterpret_cast<magmaFloatComplex*>(dA), ldda, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaCholeskyBatched<double>(
magma_uplo_t uplo, magma_int_t n, double** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_dpotrf_batched(uplo, n, dA_array, ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaCholeskyBatched<float>(
magma_uplo_t uplo, magma_int_t n, float** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_spotrf_batched(uplo, n, dA_array, ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaCholeskyBatched<c10::complex<double>>(
magma_uplo_t uplo, magma_int_t n, c10::complex<double>** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_zpotrf_batched(uplo, n, reinterpret_cast<magmaDoubleComplex**>(dA_array), ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaCholeskyBatched<c10::complex<float>>(
magma_uplo_t uplo, magma_int_t n, c10::complex<float>** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_cpotrf_batched(uplo, n, reinterpret_cast<magmaFloatComplex**>(dA_array), ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaTriangularSolveBatched<double>(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n,
double** dA_array, magma_int_t ldda, double** dB_array, magma_int_t lddb, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magmablas_dtrsm_batched(MagmaLeft, uplo, trans, diag, m, n, 1, dA_array, ldda, dB_array, lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaTriangularSolveBatched<float>(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n,
float** dA_array, magma_int_t ldda, float** dB_array, magma_int_t lddb, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magmablas_strsm_batched(MagmaLeft, uplo, trans, diag, m, n, 1, dA_array, ldda, dB_array, lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaTriangularSolveBatched<c10::complex<double>>(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n,
c10::complex<double>** dA_array, magma_int_t ldda, c10::complex<double>** dB_array, magma_int_t lddb, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magmaDoubleComplex alpha({1, 0});
magmablas_ztrsm_batched(MagmaLeft, uplo, trans, diag, m, n, alpha,
reinterpret_cast<magmaDoubleComplex**>(dA_array), ldda,
reinterpret_cast<magmaDoubleComplex**>(dB_array), lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaTriangularSolveBatched<c10::complex<float>>(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n,
c10::complex<float>** dA_array, magma_int_t ldda, c10::complex<float>** dB_array, magma_int_t lddb, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magmaFloatComplex alpha({1, 0});
magmablas_ctrsm_batched(MagmaLeft, uplo, trans, diag, m, n, alpha,
reinterpret_cast<magmaFloatComplex**>(dA_array), ldda,
reinterpret_cast<magmaFloatComplex**>(dB_array), lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
inline magma_int_t magmaGeqrfOptimalBlocksize<double>(magma_int_t m, magma_int_t n) {
return magma_get_dgeqrf_nb(m, n);
}
template<>
inline magma_int_t magmaGeqrfOptimalBlocksize<float>(magma_int_t m, magma_int_t n) {
return magma_get_sgeqrf_nb(m, n);
}
template <>
inline magma_int_t magmaGeqrfOptimalBlocksize<c10::complex<double>>(
magma_int_t m,
magma_int_t n) {
return magma_get_zgeqrf_nb(m, n);
}
template <>
inline magma_int_t magmaGeqrfOptimalBlocksize<c10::complex<float>>(
magma_int_t m,
magma_int_t n) {
return magma_get_cgeqrf_nb(m, n);
}
template<>
void magmaGeqrf<double>(
magma_int_t m, magma_int_t n, double* dA, magma_int_t ldda,
double* tau, double* dT, magma_int_t* info, bool is_v2) {
MagmaStreamSyncGuard guard;
if (!is_v2) {
magma_dgeqrf_gpu(m, n, dA, ldda, tau, dT, info);
} else {
magma_dgeqrf2_gpu(m, n, dA, ldda, tau, info);
}
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaGeqrf<float>(
magma_int_t m, magma_int_t n, float* dA, magma_int_t ldda,
float* tau, float* dT, magma_int_t* info, bool is_v2) {
MagmaStreamSyncGuard guard;
if (!is_v2) {
magma_sgeqrf_gpu(m, n, dA, ldda, tau, dT, info);
} else {
magma_sgeqrf2_gpu(m, n, dA, ldda, tau, info);
}
AT_CUDA_CHECK(cudaGetLastError());
}
template <>
void magmaGeqrf<c10::complex<double>>(
magma_int_t m,
magma_int_t n,
c10::complex<double>* dA,
magma_int_t ldda,
c10::complex<double>* tau,
c10::complex<double>* dT,
magma_int_t* info,
bool is_v2) {
MagmaStreamSyncGuard guard;
if (!is_v2) {
magma_zgeqrf_gpu(
m,
n,
reinterpret_cast<magmaDoubleComplex*>(dA),
ldda,
reinterpret_cast<magmaDoubleComplex*>(tau),
reinterpret_cast<magmaDoubleComplex*>(dT),
info);
} else {
magma_zgeqrf2_gpu(
m,
n,
reinterpret_cast<magmaDoubleComplex*>(dA),
ldda,
reinterpret_cast<magmaDoubleComplex*>(tau),
info);
}
AT_CUDA_CHECK(cudaGetLastError());
}
template <>
void magmaGeqrf<c10::complex<float>>(
magma_int_t m,
magma_int_t n,
c10::complex<float>* dA,
magma_int_t ldda,
c10::complex<float>* tau,
c10::complex<float>* dT,
magma_int_t* info,
bool is_v2) {
MagmaStreamSyncGuard guard;
if (!is_v2) {
magma_cgeqrf_gpu(
m,
n,
reinterpret_cast<magmaFloatComplex*>(dA),
ldda,
reinterpret_cast<magmaFloatComplex*>(tau),
reinterpret_cast<magmaFloatComplex*>(dT),
info);
} else {
magma_cgeqrf2_gpu(
m,
n,
reinterpret_cast<magmaFloatComplex*>(dA),
ldda,
reinterpret_cast<magmaFloatComplex*>(tau),
info);
}
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaOrgqr<double>(
magma_int_t m, magma_int_t n, magma_int_t k, double* dA, magma_int_t ldda,
double* tau, double* dT, magma_int_t nb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dorgqr_gpu(m, n, k, dA, ldda, tau, dT, nb, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaOrgqr<float>(
magma_int_t m, magma_int_t n, magma_int_t k, float* dA, magma_int_t ldda,
float* tau, float* dT, magma_int_t nb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_sorgqr_gpu(m, n, k, dA, ldda, tau, dT, nb, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template <>
void magmaOrgqr<c10::complex<double>>(
magma_int_t m,
magma_int_t n,
magma_int_t k,
c10::complex<double>* dA,
magma_int_t ldda,
c10::complex<double>* tau,
c10::complex<double>* dT,
magma_int_t nb,
magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zungqr_gpu(
m,
n,
k,
reinterpret_cast<magmaDoubleComplex*>(dA),
ldda,
reinterpret_cast<magmaDoubleComplex*>(tau),
reinterpret_cast<magmaDoubleComplex*>(dT),
nb,
info);
AT_CUDA_CHECK(cudaGetLastError());
}
template <>
void magmaOrgqr<c10::complex<float>>(
magma_int_t m,
magma_int_t n,
magma_int_t k,
c10::complex<float>* dA,
magma_int_t ldda,
c10::complex<float>* tau,
c10::complex<float>* dT,
magma_int_t nb,
magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cungqr_gpu(
m,
n,
k,
reinterpret_cast<magmaFloatComplex*>(dA),
ldda,
reinterpret_cast<magmaFloatComplex*>(tau),
reinterpret_cast<magmaFloatComplex*>(dT),
nb,
info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaSyevd<double>(
magma_vec_t jobz, magma_uplo_t uplo, magma_int_t n, double* dA, magma_int_t ldda,
double* w, double* wA, magma_int_t ldwa, double* work, magma_int_t lwork, double* rwork,
magma_int_t lrwork, magma_int_t* iwork, magma_int_t liwork, magma_int_t* info) {
(void)rwork; // unused
(void)lrwork; // unused
MagmaStreamSyncGuard guard;
magma_dsyevd_gpu(jobz, uplo, n, dA, ldda, w, wA, ldwa, work, lwork, iwork, liwork, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaSyevd<float>(
magma_vec_t jobz, magma_uplo_t uplo, magma_int_t n, float* dA, magma_int_t ldda,
float* w, float* wA, magma_int_t ldwa, float* work, magma_int_t lwork, float* rwork,
magma_int_t lrwork, magma_int_t* iwork, magma_int_t liwork, magma_int_t* info) {
(void)rwork; // unused
(void)lrwork; // unused
MagmaStreamSyncGuard guard;
magma_ssyevd_gpu(jobz, uplo, n, dA, ldda, w, wA, ldwa, work, lwork, iwork, liwork, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaSyevd<c10::complex<double>, double>(
magma_vec_t jobz, magma_uplo_t uplo, magma_int_t n, c10::complex<double>* dA, magma_int_t ldda,
double* w, c10::complex<double>* wA, magma_int_t ldwa, c10::complex<double>* work, magma_int_t lwork, double* rwork,
magma_int_t lrwork, magma_int_t* iwork, magma_int_t liwork, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zheevd_gpu(
jobz, uplo, n, reinterpret_cast<magmaDoubleComplex*>(dA), ldda, w, reinterpret_cast<magmaDoubleComplex*>(wA),
ldwa, reinterpret_cast<magmaDoubleComplex*>(work), lwork, rwork, lrwork, iwork, liwork, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaSyevd<c10::complex<float>, float>(
magma_vec_t jobz, magma_uplo_t uplo, magma_int_t n, c10::complex<float>* dA, magma_int_t ldda,
float* w, c10::complex<float>* wA, magma_int_t ldwa, c10::complex<float>* work, magma_int_t lwork, float* rwork,
magma_int_t lrwork, magma_int_t* iwork, magma_int_t liwork, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cheevd_gpu(
jobz, uplo, n, reinterpret_cast<magmaFloatComplex*>(dA), ldda, w, reinterpret_cast<magmaFloatComplex*>(wA),
ldwa, reinterpret_cast<magmaFloatComplex*>(work), lwork, rwork, lrwork, iwork, liwork, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaEig<double>(
magma_vec_t jobvl, magma_vec_t jobvr, magma_int_t n,
double *A, magma_int_t lda,
double *w,
double *VL, magma_int_t ldvl,
double *VR, magma_int_t ldvr,
double *work, magma_int_t lwork,
double *rwork,
magma_int_t *info) {
MagmaStreamSyncGuard guard;
// magma [sd]geev wants to separate output arrays: wr and wi for the real
// and imaginary parts
double *wr = w;
double *wi = w + n;
(void)rwork; // unused
magma_dgeev(jobvl, jobvr, n, A, lda, wr, wi, VL, ldvl, VR, ldvr, work, lwork, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaEig<float>(
magma_vec_t jobvl, magma_vec_t jobvr, magma_int_t n,
float *A, magma_int_t lda,
float *w,
float *VL, magma_int_t ldvl,
float *VR, magma_int_t ldvr,
float *work, magma_int_t lwork,
float *rwork,
magma_int_t *info) {
MagmaStreamSyncGuard guard;
float *wr = w;
float *wi = w + n;
(void)rwork; // unused
magma_sgeev(jobvl, jobvr, n, A, lda, wr, wi, VL, ldvl, VR, ldvr, work, lwork, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaEig<c10::complex<double>, double>(
magma_vec_t jobvl, magma_vec_t jobvr, magma_int_t n,
c10::complex<double> *A, magma_int_t lda,
c10::complex<double> *w,
c10::complex<double> *VL, magma_int_t ldvl,
c10::complex<double> *VR, magma_int_t ldvr,
c10::complex<double> *work, magma_int_t lwork,
double *rwork,
magma_int_t *info) {
MagmaStreamSyncGuard guard;
magma_zgeev(jobvl, jobvr, n,
reinterpret_cast<magmaDoubleComplex*>(A), lda,
reinterpret_cast<magmaDoubleComplex*>(w),
reinterpret_cast<magmaDoubleComplex*>(VL), ldvl,
reinterpret_cast<magmaDoubleComplex*>(VR), ldvr,
reinterpret_cast<magmaDoubleComplex*>(work), lwork,
rwork, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaEig<c10::complex<float>, float>(
magma_vec_t jobvl, magma_vec_t jobvr, magma_int_t n,
c10::complex<float> *A, magma_int_t lda,
c10::complex<float> *w,
c10::complex<float> *VL, magma_int_t ldvl,
c10::complex<float> *VR, magma_int_t ldvr,
c10::complex<float> *work, magma_int_t lwork,
float *rwork,
magma_int_t *info) {
MagmaStreamSyncGuard guard;
magma_cgeev(jobvl, jobvr, n,
reinterpret_cast<magmaFloatComplex*>(A), lda,
reinterpret_cast<magmaFloatComplex*>(w),
reinterpret_cast<magmaFloatComplex*>(VL), ldvl,
reinterpret_cast<magmaFloatComplex*>(VR), ldvr,
reinterpret_cast<magmaFloatComplex*>(work), lwork,
rwork, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaSvd<double>(
magma_vec_t jobz, magma_int_t m, magma_int_t n, double* A,
magma_int_t lda, double* s, double* U, magma_int_t ldu,
double* VT, magma_int_t ldvt, double* work, magma_int_t lwork,
double *rwork, magma_int_t* iwork, magma_int_t* info) {
(void)rwork; // unused
MagmaStreamSyncGuard guard;
magma_dgesdd(jobz, m, n, A, lda, s, U, ldu, VT, ldvt, work, lwork, iwork, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaSvd<float>(
magma_vec_t jobz, magma_int_t m, magma_int_t n, float* A,
magma_int_t lda, float* s, float* U, magma_int_t ldu,
float* VT, magma_int_t ldvt, float* work, magma_int_t lwork,
float* rwork, magma_int_t* iwork, magma_int_t* info) {
(void)rwork; // unused
MagmaStreamSyncGuard guard;
magma_sgesdd(jobz, m, n, A, lda, s, U, ldu, VT, ldvt, work, lwork, iwork, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaSvd<c10::complex<float>, float>(
magma_vec_t jobz, magma_int_t m, magma_int_t n, c10::complex<float>* A,
magma_int_t lda, float* s, c10::complex<float>* U, magma_int_t ldu,
c10::complex<float>* VT, magma_int_t ldvt, c10::complex<float>* work, magma_int_t lwork,
float *rwork, magma_int_t* iwork, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cgesdd(jobz, m, n, reinterpret_cast<magmaFloatComplex*>(A), lda, s,
reinterpret_cast<magmaFloatComplex*>(U), ldu,
reinterpret_cast<magmaFloatComplex*>(VT), ldvt,
reinterpret_cast<magmaFloatComplex*>(work), lwork,
rwork, iwork, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaSvd<c10::complex<double>, double>(
magma_vec_t jobz, magma_int_t m, magma_int_t n, c10::complex<double>* A,
magma_int_t lda, double* s, c10::complex<double>* U, magma_int_t ldu,
c10::complex<double>* VT, magma_int_t ldvt, c10::complex<double>* work, magma_int_t lwork,
double *rwork, magma_int_t* iwork, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zgesdd(jobz, m, n, reinterpret_cast<magmaDoubleComplex*>(A), lda, s,
reinterpret_cast<magmaDoubleComplex*>(U), ldu,
reinterpret_cast<magmaDoubleComplex*>(VT), ldvt,
reinterpret_cast<magmaDoubleComplex*>(work), lwork,
rwork, iwork, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuSolve<double>(
magma_int_t n, magma_int_t nrhs, double* dA, magma_int_t ldda, magma_int_t* ipiv,
double* dB, magma_int_t lddb, magma_int_t* info, magma_trans_t trans) {
MagmaStreamSyncGuard guard;
magma_dgetrs_gpu(trans, n, nrhs, dA, ldda, ipiv, dB, lddb, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuSolve<float>(
magma_int_t n, magma_int_t nrhs, float* dA, magma_int_t ldda, magma_int_t* ipiv,
float* dB, magma_int_t lddb, magma_int_t* info, magma_trans_t trans) {
MagmaStreamSyncGuard guard;
magma_sgetrs_gpu(trans, n, nrhs, dA, ldda, ipiv, dB, lddb, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuSolve<c10::complex<double>>(
magma_int_t n, magma_int_t nrhs, c10::complex<double>* dA, magma_int_t ldda, magma_int_t* ipiv,
c10::complex<double>* dB, magma_int_t lddb, magma_int_t* info, magma_trans_t trans) {
MagmaStreamSyncGuard guard;
magma_zgetrs_gpu(trans, n, nrhs, reinterpret_cast<magmaDoubleComplex*>(dA), ldda, ipiv, reinterpret_cast<magmaDoubleComplex*>(dB), lddb, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuSolve<c10::complex<float>>(
magma_int_t n, magma_int_t nrhs, c10::complex<float>* dA, magma_int_t ldda, magma_int_t* ipiv,
c10::complex<float>* dB, magma_int_t lddb, magma_int_t* info, magma_trans_t trans) {
MagmaStreamSyncGuard guard;
magma_cgetrs_gpu(trans, n, nrhs, reinterpret_cast<magmaFloatComplex*>(dA), ldda, ipiv, reinterpret_cast<magmaFloatComplex*>(dB), lddb, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuSolveBatched<double>(
magma_int_t n, magma_int_t nrhs, double** dA_array, magma_int_t ldda, magma_int_t** dipiv_array,
double** dB_array, magma_int_t lddb, magma_int_t& info,
magma_int_t batchsize, const MAGMAQueue& magma_queue, magma_trans_t trans) {
info = magma_dgetrs_batched(trans, n, nrhs, dA_array, ldda, dipiv_array, dB_array, lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuSolveBatched<float>(
magma_int_t n, magma_int_t nrhs, float** dA_array, magma_int_t ldda, magma_int_t** dipiv_array,
float** dB_array, magma_int_t lddb, magma_int_t& info,
magma_int_t batchsize, const MAGMAQueue& magma_queue, magma_trans_t trans) {
info = magma_sgetrs_batched(trans, n, nrhs, dA_array, ldda, dipiv_array, dB_array, lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuSolveBatched<c10::complex<double>>(
magma_int_t n, magma_int_t nrhs, c10::complex<double>** dA_array, magma_int_t ldda, magma_int_t** dipiv_array,
c10::complex<double>** dB_array, magma_int_t lddb, magma_int_t& info,
magma_int_t batchsize, const MAGMAQueue& magma_queue, magma_trans_t trans) {
info = magma_zgetrs_batched(trans, n, nrhs, reinterpret_cast<magmaDoubleComplex**>(dA_array), ldda, dipiv_array, reinterpret_cast<magmaDoubleComplex**>(dB_array), lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuSolveBatched<c10::complex<float>>(
magma_int_t n, magma_int_t nrhs, c10::complex<float>** dA_array, magma_int_t ldda, magma_int_t** dipiv_array,
c10::complex<float>** dB_array, magma_int_t lddb, magma_int_t& info,
magma_int_t batchsize, const MAGMAQueue& magma_queue, magma_trans_t trans) {
info = magma_cgetrs_batched(trans, n, nrhs, reinterpret_cast<magmaFloatComplex**>(dA_array), ldda, dipiv_array, reinterpret_cast<magmaFloatComplex**>(dB_array), lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaGels<float>(
magma_trans_t trans, magma_int_t m, magma_int_t n, magma_int_t nrhs,
float* dA, magma_int_t ldda, float* dB, magma_int_t lddb,
float* hwork, magma_int_t lwork, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_sgels_gpu(trans, m, n, nrhs,
dA, ldda, dB, lddb,
hwork, lwork, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaGels<double>(
magma_trans_t trans, magma_int_t m, magma_int_t n, magma_int_t nrhs,
double* dA, magma_int_t ldda, double* dB, magma_int_t lddb,
double* hwork, magma_int_t lwork, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dgels_gpu(trans, m, n, nrhs,
dA, ldda, dB, lddb,
hwork, lwork, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaGels<c10::complex<float>>(
magma_trans_t trans, magma_int_t m, magma_int_t n, magma_int_t nrhs,
c10::complex<float>* dA, magma_int_t ldda, c10::complex<float>* dB, magma_int_t lddb,
c10::complex<float>* hwork, magma_int_t lwork, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cgels_gpu(trans, m, n, nrhs,
reinterpret_cast<magmaFloatComplex*>(dA), ldda,
reinterpret_cast<magmaFloatComplex*>(dB), lddb,
reinterpret_cast<magmaFloatComplex*>(hwork), lwork, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaGels<c10::complex<double>>(
magma_trans_t trans, magma_int_t m, magma_int_t n, magma_int_t nrhs,
c10::complex<double>* dA, magma_int_t ldda, c10::complex<double>* dB, magma_int_t lddb,
c10::complex<double>* hwork, magma_int_t lwork, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zgels_gpu(trans, m, n, nrhs,
reinterpret_cast<magmaDoubleComplex*>(dA), ldda,
reinterpret_cast<magmaDoubleComplex*>(dB), lddb,
reinterpret_cast<magmaDoubleComplex*>(hwork), lwork, info);
AT_CUDA_CHECK(cudaGetLastError());
}
namespace {
/*
MAGMA can return errors both as a return value and in the info argument.
The return value and info should always be identical.
In general, the meaning is as given in this table.
Predefined error codes are large negative numbers. Using the symbolic
constants below is preferred, but the numeric values can be found in
include/magma_types.h.
Info | Description
----------- | -----------
info = 0 (MAGMA_SUCCESS) | Successful exit
info < 0, but small | For info = -i, the i-th argument had an illegal value
info > 0 | Function-specific error such as singular matrix
MAGMA_ERR_DEVICE_ALLOC | Could not allocate GPU device memory
MAGMA_ERR_HOST_ALLOC | Could not allocate CPU host memory
MAGMA_ERR_ILLEGAL_VALUE | An argument had an illegal value (deprecated; instead it should return -i to say the i-th argument was bad)
MAGMA_ERR_INVALID_PTR | Can't free pointer
MAGMA_ERR_NOT_IMPLEMENTED | Function or option not implemented
MAGMA_ERR_NOT_SUPPORTED | Function or option not supported on the current architecture
*/
void checkMagmaInternalError(magma_int_t info, const std::string& magma_function_name) {
// if info > 0 the error is function-specific, do nothing in this case
TORCH_CHECK(info >= 0,
"MAGMA error: ",
magma_strerror(info),
", info = ", info,
", when calling ", magma_function_name);
}
} // anonymous namespace
#endif // USE_MAGMA
#define ALLOCATE_ARRAY(name, type, size) \
auto storage_##name = pin_memory<type>(size); \
name = static_cast<type*>(storage_##name.data());
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ solve ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_solve(Tensor& b, Tensor& A, Tensor& infos_out) {
#ifndef USE_MAGMA
AT_ERROR("solve: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
auto A_data = A.data_ptr<scalar_t>();
auto b_data = b.data_ptr<scalar_t>();
magma_int_t n = magma_int_cast(A.size(-2), "A.size(-2)");
magma_int_t nrhs = magma_int_cast(b.size(-1), "b.size(-1)");
magma_int_t lda = std::max(magma_int_t{1}, n);
if (b.dim() == 2) {
auto ipiv = at::empty({n}, at::kInt);
// magmaSolve requires infos tensor to live on CPU
Tensor infos = at::empty(infos_out.sizes(), infos_out.options().device(kCPU));
magmaSolve<scalar_t>(n, nrhs, A_data, lda, ipiv.data_ptr<magma_int_t>(),
b_data, lda, infos.data_ptr<magma_int_t>());
infos_out.copy_(infos);
} else {
auto infos_data = infos_out.data_ptr<magma_int_t>();
auto A_mat_stride = matrixStride(A);
auto b_mat_stride = matrixStride(b);
magma_int_t batch_size = magma_int_cast(batchCount(A), "batchCount");
magma_int_t* ipiv_data;
magma_int_t** ipiv_array;
scalar_t** A_array;
scalar_t** b_array;
ALLOCATE_ARRAY(ipiv_data, magma_int_t, batch_size * n);
ALLOCATE_ARRAY(ipiv_array, magma_int_t*, batch_size);
ALLOCATE_ARRAY(A_array, scalar_t*, batch_size);
ALLOCATE_ARRAY(b_array, scalar_t*, batch_size);
// Set up the created arrays
for (int64_t i = 0; i < batch_size; i++) {
A_array[i] = &A_data[i * A_mat_stride];
b_array[i] = &b_data[i * b_mat_stride];
ipiv_array[i] = &ipiv_data[i * n];
}
MAGMAQueue magma_queue(b.get_device());
constexpr int64_t batch_limit = 65535;
// Compute as many batches of 65535 possible
// The number of "mini"-batches are floor(batch_size / batch_limit)
// and these cover floor(batch_size / batch_limit) * batch_limit matrix solves
int64_t mini_batches = batch_size / batch_limit, mini_idx;
for (mini_idx = 0; mini_idx < mini_batches * batch_limit; mini_idx += batch_limit) {
scalar_t** A_array_cur = &A_array[mini_idx];
scalar_t** b_array_cur = &b_array[mini_idx];
magma_int_t** ipiv_array_cur = &ipiv_array[mini_idx];
magma_int_t* info_array_cur = &infos_data[mini_idx];
magmaSolveBatched<scalar_t>(
n, nrhs, A_array_cur, lda, ipiv_array_cur, b_array_cur, lda,
info_array_cur, batch_limit, magma_queue);
}
// Compute whatever is left = batch_size - floor(batch_size / batch_limit) * batch_limit
// which concisely is equal to batch_size % batch_limit
if (batch_size % batch_limit != 0) {
magmaSolveBatched<scalar_t>(
n, nrhs, &A_array[mini_idx], lda, &ipiv_array[mini_idx], &b_array[mini_idx], lda,
&infos_data[mini_idx], batch_size % batch_limit, magma_queue);
}
}
#endif
}
std::tuple<Tensor, Tensor> _solve_helper_cuda(const Tensor& self, const Tensor& A) {
auto self_working_copy = cloneBatchedColumnMajor(self);
auto A_working_copy = cloneBatchedColumnMajor(A);
// infos might not get filled for empty inputs therefore at::zeros is used instead of at::empty
auto infos = at::zeros({std::max<int64_t>(1, batchCount(self))}, self.options().dtype(kInt));
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "solve_cuda", [&]{
apply_solve<scalar_t>(self_working_copy, A_working_copy, infos);
});
if (self.dim() > 2) {
batchCheckErrors(infos, "solve_cuda");
} else {
singleCheckErrors(infos.item().toInt(), "solve_cuda");
}
return std::tuple<Tensor, Tensor>(self_working_copy, A_working_copy);
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ inverse ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
/*
Computes the inverse of n-by-n matrix 'self', it is saved to 'self_inv'.
'infos' is an int Tensor containing error codes for each matrix in the batched input.
'infos_lu' is for holding magmaLU errors, and 'infos_getri' is for holding magmaGetri errors
For more information see MAGMA's documentation for GETRI and GETRF routines.
*/
template <typename scalar_t>
static void apply_batched_inverse(Tensor& self, Tensor& self_inv, Tensor& infos_lu, Tensor& infos_getri) {
#ifndef USE_MAGMA
AT_ERROR("inverse: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
auto self_data = self.data_ptr<scalar_t>();
auto self_mat_stride = matrixStride(self);
auto self_inv_data = self_inv.data_ptr<scalar_t>();
auto self_inv_mat_stride = matrixStride(self_inv);
auto infos_lu_data = infos_lu.data_ptr<magma_int_t>();
auto infos_getri_data = infos_getri.data_ptr<magma_int_t>();
magma_int_t batch_size = magma_int_cast(batchCount(self), "batchCount");
// MAGMA does not work with batch_size == 0, let's return early in this case
if (batch_size == 0) {
return;
}
magma_int_t n = magma_int_cast(self.size(-2), "self.size(-2)");
magma_int_t lda = std::max<magma_int_t>(1, n);
magma_int_t* ipiv_data;
magma_int_t** ipiv_array;
scalar_t** self_array;
scalar_t** self_inv_array;
ALLOCATE_ARRAY(ipiv_data, magma_int_t, batch_size * lda);
ALLOCATE_ARRAY(ipiv_array, magma_int_t*, batch_size);
ALLOCATE_ARRAY(self_array, scalar_t*, batch_size);
ALLOCATE_ARRAY(self_inv_array, scalar_t*, batch_size);
// Set up the created arrays
for (int64_t i = 0; i < batch_size; i++) {
self_array[i] = &self_data[i * self_mat_stride];
self_inv_array[i] = &self_inv_data[i * self_inv_mat_stride];
ipiv_array[i] = &ipiv_data[i * n];
}
// magmaLuBatched leaves ipiv_data values unwritten for singular matrices.
// Initialize to avoid memory access violations inside magma kernels (gh-51930).
std::fill_n(ipiv_data, batch_size * n, 1);
MAGMAQueue magma_queue(self.get_device());
magmaLuBatched<scalar_t>(
n, n, self_array, lda, ipiv_array, infos_lu_data,
batch_size, magma_queue);
constexpr int64_t batch_limit = 65535;
// Compute as many batches of 65535 possible
// The number of "mini"-batches are floor(batch_size / batch_limit)
// and these cover floor(batch_size / batch_limit) * batch_limit matrix solves
int64_t mini_batches = batch_size / batch_limit, mini_idx;
for (mini_idx = 0; mini_idx < mini_batches * batch_limit; mini_idx += batch_limit) {
scalar_t** self_array_cur = &self_array[mini_idx];
scalar_t** self_inv_array_cur = &self_inv_array[mini_idx];
magma_int_t** ipiv_array_cur = &ipiv_array[mini_idx];
magma_int_t* info_array_cur_getri = &infos_getri_data[mini_idx];
magmaGetriBatched<scalar_t>(
n, self_array_cur, lda, ipiv_array_cur, self_inv_array_cur,
lda, info_array_cur_getri, batch_limit, magma_queue);
}
// Compute whatever is left = batch_size - floor(batch_size / batch_limit) * batch_limit
// which concisely is equal to batch_size % batch_limit
if (batch_size % batch_limit != 0) {
magmaGetriBatched<scalar_t>(
n, &self_array[mini_idx], lda, &ipiv_array[mini_idx], &self_inv_array[mini_idx],
lda, &infos_getri_data[mini_idx], batch_size % batch_limit, magma_queue);
}
#endif
}
template <typename scalar_t>
static void apply_single_inverse(Tensor& self, Tensor& info_lu, Tensor& info_getri) {
#ifndef USE_MAGMA
AT_ERROR("inverse: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
auto self_data = self.data_ptr<scalar_t>();
magma_int_t n = magma_int_cast(self.size(-2), "self.size(-2)");
magma_int_t lda = std::max<magma_int_t>(1, n);
magma_int_t lwork = n * magmaGetriOptimalBlocksize<scalar_t>(n);
// magmaLu and magmaGetri requires info argument to live on CPU
// but info_lu and info_getri tensors are on the same device as self
magma_int_t info_lu_cpu = 0;
magma_int_t info_getri_cpu = 0;
Tensor ipiv = at::empty({lda}, at::kInt);
Tensor dwork = at::empty({lwork}, self.options());
magmaLu<scalar_t>(n, n, self_data, lda, ipiv.data_ptr<magma_int_t>(), &info_lu_cpu);
magmaGetri<scalar_t>(
n, self_data, lda, ipiv.data_ptr<magma_int_t>(), dwork.data_ptr<scalar_t>(), lwork, &info_getri_cpu);
info_lu.fill_(info_lu_cpu);
info_getri.fill_(info_getri_cpu);
#endif
}
Tensor _inverse_helper_cuda_legacy(const Tensor& self) {
auto self_inv_working_copy = cloneBatchedColumnMajor(self);
if (self.dim() > 2) {
auto infos_lu = at::zeros({std::max<int64_t>(1, batchCount(self))}, self.options().dtype(kInt));
auto infos_getri = at::zeros({std::max<int64_t>(1, batchCount(self))}, self.options().dtype(kInt));
auto self_working_copy = cloneBatchedColumnMajor(self);
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "inverse_cuda", [&]{
apply_batched_inverse<scalar_t>(
self_working_copy, self_inv_working_copy, infos_lu, infos_getri);
});
batchCheckErrors(infos_lu, "inverse_cuda");
batchCheckErrors(infos_getri, "inverse_cuda");
} else {
// magmaLu and magmaGetri requires infos tensor to live on CPU
auto infos_lu = at::zeros({1}, self.options().dtype(kInt).device(kCPU));
auto infos_getri = at::zeros({1}, self.options().dtype(kInt).device(kCPU));
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "inverse_cuda", [&]{
apply_single_inverse<scalar_t>(self_inv_working_copy, infos_lu, infos_getri);
});
singleCheckErrors(infos_lu.item().toInt(), "inverse_cuda");
singleCheckErrors(infos_getri.item().toInt(), "inverse_cuda");
}
return self_inv_working_copy;
}
Tensor _inverse_helper_cuda(const Tensor& self) {
#ifdef USE_CUSOLVER
if ((self.dim() == 2) || (/* self.dim() > 2 && */ batchCount(self) <= 2) || !use_magma_) {
return _inverse_helper_cuda_lib(self); // cusolver or cublas
} else {
return _inverse_helper_cuda_legacy(self); // magma-cuda
}
#else
return _inverse_helper_cuda_legacy(self); // magma-cuda
#endif
}
// This is a type dispatching helper function for 'apply_batched_inverse' and 'singleCheckErrors'
Tensor& _linalg_inv_out_helper_cuda_legacy(Tensor& result, Tensor& infos_lu, Tensor& infos_getri) {
// assuming result is in column major order and contains the matrices to invert
if (result.dim() > 2) {
auto input_working_copy = cloneBatchedColumnMajor(result);
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(result.scalar_type(), "linalg_inv_out_cuda", [&]{
apply_batched_inverse<scalar_t>(
input_working_copy, result, infos_lu, infos_getri);
});
} else {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(result.scalar_type(), "linalg_inv_out_cuda", [&]{
apply_single_inverse<scalar_t>(result, infos_lu, infos_getri);
});
}
return result;
}
// This is a MAGMA/cuSOLVER dispatching helper function
Tensor& _linalg_inv_out_helper_cuda(Tensor &result, Tensor& infos_lu, Tensor& infos_getri) {
// This function calculates the inverse matrix in-place
// result should be in column major order and contain matrices to invert
#ifdef USE_CUSOLVER
if ((result.dim() == 2) || (/* result.dim() > 2 && */ batchCount(result) <= 2) || !use_magma_) {
return _linalg_inv_out_helper_cuda_lib(result, infos_lu, infos_getri); // cusolver or cublas
} else {
return _linalg_inv_out_helper_cuda_legacy(result, infos_lu, infos_getri); // magma-cuda
}
#else
return _linalg_inv_out_helper_cuda_legacy(result, infos_lu, infos_getri); // magma-cuda
#endif
return result;
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ cholesky_solve ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_cholesky_solve(Tensor& b, Tensor& A, bool upper, int64_t& info) {
#ifndef USE_MAGMA
AT_ERROR("cholesky_solve: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
magma_uplo_t uplo = upper ? MagmaUpper : MagmaLower;
auto A_data = A.data_ptr<scalar_t>();
auto b_data = b.data_ptr<scalar_t>();
magma_int_t n = magma_int_cast(A.size(-2), "A.size(-2)");
magma_int_t lda = std::max<magma_int_t>(1, n);
magma_int_t nrhs = magma_int_cast(b.size(-1), "b.size(-1)");
int info_tmp = 0;
if (b.dim() == 2) {
magmaCholeskySolve<scalar_t>(uplo, n, nrhs, A_data, lda,
b_data, lda, &info_tmp);
info = info_tmp;
} else {
auto A_mat_stride = matrixStride(A);
auto b_mat_stride = matrixStride(b);
magma_int_t batch_size = magma_int_cast(batchCount(A), "batchCount");
scalar_t** A_array;
scalar_t** b_array;
ALLOCATE_ARRAY(A_array, scalar_t*, batch_size);
ALLOCATE_ARRAY(b_array, scalar_t*, batch_size);
// Set up the created arrays
for (int64_t i = 0; i < batch_size; i++) {
A_array[i] = &A_data[i * A_mat_stride];
b_array[i] = &b_data[i * b_mat_stride];
}
MAGMAQueue magma_queue(b.get_device());
constexpr int64_t batch_limit = 65535;
// Compute as many batches of 65535 possible
// The number of "mini"-batches are floor(batch_size / batch_limit)
// and these cover floor(batch_size / batch_limit) * batch_limit matrix solves
int64_t mini_batches = batch_size / batch_limit, mini_idx;
for (mini_idx = 0; mini_idx < mini_batches * batch_limit; mini_idx += batch_limit) {
scalar_t** A_array_cur = &A_array[mini_idx];
scalar_t** b_array_cur = &b_array[mini_idx];
magmaCholeskySolveBatched<scalar_t>(
uplo, n, nrhs, A_array_cur, lda, b_array_cur, lda,
info_tmp, batch_limit, magma_queue);
if (info_tmp != 0) {
break;
}
}
// Compute whatever is left = batch_size - floor(batch_size / batch_limit) * batch_limit
// which concisely is equal to batch_size % batch_limit
if (batch_size % batch_limit != 0 && info_tmp == 0) {
magmaCholeskySolveBatched<scalar_t>(
uplo, n, nrhs, &A_array[mini_idx], lda, &b_array[mini_idx], lda,
info_tmp, batch_size % batch_limit, magma_queue);
}
info = info_tmp;
}
#endif
}
Tensor _cholesky_solve_helper_cuda_magma(const Tensor& self, const Tensor& A, bool upper) {
int64_t info = 0;
auto self_working_copy = cloneBatchedColumnMajor(self);
auto A_working_copy = cloneBatchedColumnMajor(A);
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "cholesky_solve_cuda", [&]{
apply_cholesky_solve<scalar_t>(self_working_copy, A_working_copy, upper, info);
});
TORCH_CHECK(info == 0, "MAGMA cholesky_solve : invalid argument: ", -info);
return self_working_copy;
}
// Todo: cusolverDn<T>potrsBatched only supports nrhs == 1 and does not have good performance.
// Batched cholesky_solve is dispatched to magma.
Tensor _cholesky_solve_helper_cuda(const Tensor& self, const Tensor& A, bool upper) {
#ifdef USE_CUSOLVER
if (batchCount(self) == 1 || !use_magma_) {
return _cholesky_solve_helper_cuda_cusolver(self, A, upper);
} else {
return _cholesky_solve_helper_cuda_magma(self, A, upper);
}
#else
return _cholesky_solve_helper_cuda_magma(self, A, upper);
#endif
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ cholesky ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_cholesky(const Tensor& self, bool upper, const Tensor& info) {
#ifndef USE_MAGMA
TORCH_CHECK(
false,
"Calling torch.linalg.cholesky on a CUDA tensor requires compiling ",
"PyTorch with MAGMA. Please use PyTorch built with MAGMA support.");
#else
magma_uplo_t uplo = upper ? MagmaUpper : MagmaLower;
auto self_data = self.data_ptr<scalar_t>();
magma_int_t n = magma_int_cast(self.size(-2), "self.size(-2)");
auto lda = std::max<magma_int_t>(1, n);
if (self.dim() == 2) {
// magmaCholesky requires info to be on CPU
magma_int_t info_cpu = 0;
magmaCholesky<scalar_t>(uplo, n, self_data, lda, &info_cpu);
info.fill_(info_cpu);
} else {
TORCH_INTERNAL_ASSERT(info.is_cuda());
auto info_data = info.data_ptr<magma_int_t>();
// magmaCholeskyBatched supports only upper=false
uplo = MagmaLower;
auto self_mat_stride = matrixStride(self);
magma_int_t batch_size = magma_int_cast(batchCount(self), "batchCount");
scalar_t** self_array;
ALLOCATE_ARRAY(self_array, scalar_t*, batch_size);
// Set up the created arrays
for (int64_t i = 0; i < batch_size; i++) {
self_array[i] = &self_data[i * self_mat_stride];
}
MAGMAQueue magma_queue(self.get_device());
// Compute as many batches of 262140 possible
// 262140 is the size of the largest batch of matrices that can be run with
// violating maximum kernel configuration
// For complex input the batch limit is 65535 (determined experimentally, see https://github.com/pytorch/pytorch/pull/47047#discussion_r516086923 for more information)
int64_t batch_limit = self.is_complex() ? 65535 : 262140;
for (int64_t mini_idx = 0; mini_idx < batch_size; mini_idx += batch_limit) {
int64_t nbatches = std::min(batch_limit, batch_size - mini_idx);
scalar_t** self_array_cur = &self_array[mini_idx];
magma_int_t* info_array_cur = &info_data[mini_idx];
magmaCholeskyBatched<scalar_t>(
uplo, n, self_array_cur, lda, info_array_cur, nbatches, magma_queue);
}
}
#endif
}
void cholesky_helper_magma(const Tensor& input, bool upper, const Tensor& info) {
Tensor result = input;
if (input.dim() > 2) {
// MAGMA's batched cholesky operator has an off-by-one error causing IMA
// (see https://github.com/pytorch/pytorch/issues/42666). This code is based
// on the #cloneBatchedColumnMajor function however it pads the input with
// one extra element utilizing the fact that the resize_as_ method preserves
// the storage even if it's larger than the new sizes. This way if MAGMA
// reads off bounds it will still be valid user memory.
result = at::empty(input.numel() + 1, input.options());
result.resize_as_(input).transpose_(-2, -1);
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(result.transpose(-2, -1).is_contiguous());
// batched MAGMA doesn't support upper=true
// we transpose and conjugate the input as a workaround
result.copy_(upper ? input.conj().transpose(-2, -1) : input);
}
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(
input.scalar_type(), "cholesky_cuda", [&] {
apply_cholesky<scalar_t>(result, upper, info);
});
if (input.dim() > 2) {
// if upper=true we need to tranpose and conjugate the result tensor
// because the cholesky decomposition is stored in the lower triangular part
if (upper) {
input.copy_(result.conj().transpose(-2, -1));
} else {
input.copy_(result);
}
}
}
static void cholesky_kernel(const Tensor& input, const Tensor& info, bool upper) {
#ifdef USE_CUSOLVER
if (batchCount(input) == 1 || !use_magma_ || use_cusolver_potrf_batched_) {
cholesky_helper_cusolver(input, upper, info);
} else {
cholesky_helper_magma(input, upper, info);
}
#else
cholesky_helper_magma(input, upper, info);
#endif // USE_CUSOLVER
}
REGISTER_DISPATCH(cholesky_stub, &cholesky_kernel)
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ cholesky_inverse ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
/*
Computes the inverse of a symmetric (Hermitian) positive-definite matrix n-by-n matrix 'input' using the Cholesky solver
This is an in-place routine, content of 'input' is overwritten.
'infos' is an int Tensor containing error codes for each matrix in the batched input.
MAGMA requires 'infos' to reside in CPU memory.
For more information see MAGMA's documentation for POTRS routine.
*/
template <typename scalar_t>
static void apply_cholesky_inverse(Tensor& input, Tensor& infos, bool upper) {
#ifndef USE_MAGMA
TORCH_CHECK(false, "cholesky_inverse: MAGMA library not found in compilation. Please rebuild with MAGMA.");
#else
// magmaCholeskyInverse (magma_dpotri_gpu) is slow because internally
// it transfers data several times between GPU and CPU and calls lapack routine on CPU
// using magmaCholeskySolveBatched is a lot faster
// note that magmaCholeskySolve is also slow
// 'input' is modified in-place we need to clone it and replace with a diagonal matrix
// for apply_cholesky_solve
auto input_working_copy = cloneBatchedColumnMajor(input);
// 'input' tensor has to be a batch of diagonal matrix
input.fill_(0);
input.diagonal(/*offset=*/0, /*dim1=*/-2, /*dim2=*/-1).fill_(1);
Tensor result_u, input_u;
if (input.dim() == 2) {
// unsqueezing here so that the batched version is used
result_u = input.unsqueeze(0);
input_u = input_working_copy.unsqueeze(0);
} else {
result_u = input;
input_u = input_working_copy;
}
// magma's potrs_batched doesn't take matrix-wise array of ints as an 'info' argument
// it returns a single 'magma_int_t'
// if info = 0 the operation is successful, if info = -i, the i-th parameter had an illegal value.
int64_t info_tmp = 0;
apply_cholesky_solve<scalar_t>(result_u, input_u, upper, info_tmp);
infos.fill_(info_tmp);
#endif
}
// This is a type dispatching helper function for 'apply_cholesky_inverse'
Tensor& cholesky_inverse_kernel_impl_magma(Tensor &result, Tensor& infos, bool upper) {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(result.scalar_type(), "cholesky_inverse_out_cuda", [&]{
apply_cholesky_inverse<scalar_t>(result, infos, upper);
});
return result;
}
Tensor& cholesky_inverse_kernel_impl(Tensor &result, Tensor& infos, bool upper) {
// This function calculates the inverse matrix in-place
// result should be in column major order and contain matrices to invert
// the content of result is overwritten by 'apply_cholesky_inverse'
#ifdef USE_CUSOLVER
if (batchCount(result) == 1 || !use_magma_) {
return cholesky_inverse_kernel_impl_cusolver(result, infos, upper);
} else {
return cholesky_inverse_kernel_impl_magma(result, infos, upper);
}
#else
return cholesky_inverse_kernel_impl_magma(result, infos, upper);
#endif
}
REGISTER_DISPATCH(cholesky_inverse_stub, &cholesky_inverse_kernel_impl);
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ lu ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
/*
Computes the LU decomposition of a m×n matrix or batch of matrices in 'input' tensor.
This is an in-place routine, content of 'input', 'pivots', and 'infos' is overwritten.
This is a "looped" variant for calling single input MAGMA function on batched input.
Args:
* `input` - [in] the input matrix for LU decomposition
[out] the LU decomposition
* `pivots` - [out] the pivot indices
* `infos` - [out] error codes, positive values indicate singular matrices
* `compute_pivots` - controls whether LU is computed with or without pivoting
For further details, please see the MAGMA documentation for magma_dgetrf_gpu.
*/
template <typename scalar_t>
static void apply_lu_looped_magma(const Tensor& input, const Tensor& pivots, const Tensor& infos, bool compute_pivots) {
#ifndef USE_MAGMA
TORCH_CHECK(
false,
"Calling torch.lu on a CUDA tensor requires compiling ",
"PyTorch with MAGMA. lease rebuild with MAGMA.");
#else
// magmaLu and magmaLuNoPiv require infos and pivots tensor to be on CPU
// the data is later copied back to the appropriate output tensor
Tensor infos_cpu = at::empty_like(infos, infos.options().device(kCPU).pinned_memory(true));
auto input_data = input.data_ptr<scalar_t>();
auto infos_data = infos_cpu.data_ptr<magma_int_t>();
auto input_matrix_stride = matrixStride(input);
auto pivots_stride = pivots.size(-1);
auto batch_size = batchCount(input);
magma_int_t m = magma_int_cast(input.size(-2), "m");
magma_int_t n = magma_int_cast(input.size(-1), "n");
auto leading_dimension = std::max<magma_int_t>(1, m);
if (compute_pivots) {
Tensor pivots_cpu = at::empty_like(pivots, pivots.options().device(kCPU).pinned_memory(true));
auto pivots_data = pivots_cpu.data_ptr<magma_int_t>();
for (decltype(batch_size) i = 0; i < batch_size; i++) {
scalar_t* input_working_ptr = &input_data[i * input_matrix_stride];
int* pivots_working_ptr = &pivots_data[i * pivots_stride];
int* infos_working_ptr = &infos_data[i];
magmaLu<scalar_t>(m, n, input_working_ptr, leading_dimension, pivots_working_ptr, infos_working_ptr);
}
pivots.copy_(pivots_cpu, /*non_blocking=*/true);
} else {
for (decltype(batch_size) i = 0; i < batch_size; i++) {
scalar_t* input_working_ptr = &input_data[i * input_matrix_stride];
int* infos_working_ptr = &infos_data[i];
magmaLuNoPiv<scalar_t>(m, n, input_working_ptr, leading_dimension, infos_working_ptr);
}
// fill the pivots tensor with indices using 1-based (Fortran) indexing
auto k = std::min(m, n);
Tensor pivots_tmp = at::arange(1, k + 1, input.options().dtype(at::kInt)).expand_as(pivots);
pivots.copy_(pivots_tmp);
}
infos.copy_(infos_cpu, /*non_blocking=*/true);
#endif
}
/*
Computes the LU decomposition of a m×n matrix or batch of matrices in 'input' tensor.
This is an in-place routine, content of 'input', 'pivots', and 'infos' is overwritten.
This is a specialized batched variant, it is expected to be faster than the "looped" version only for small inputs.
Args:
* `input` - [in] the input matrix for LU decomposition
[out] the LU decomposition
* `pivots` - [out] the pivot indices
* `infos` - [out] error codes, positive values indicate singular matrices
* `compute_pivots` - controls whether LU is computed with or without pivoting
For further details, please see the MAGMA documentation for magma_dgetrf_batched.
*/
template <typename scalar_t>
static void apply_lu_batched_magma(const Tensor& input, const Tensor& pivots, const Tensor& infos, bool compute_pivots) {
#ifndef USE_MAGMA
TORCH_CHECK(
false,
"Calling torch.lu on a CUDA tensor requires compiling ",
"PyTorch with MAGMA. lease rebuild with MAGMA.");
#else
auto input_data = input.data_ptr<scalar_t>();
auto infos_data = infos.data_ptr<magma_int_t>();
auto input_matrix_stride = matrixStride(input);
magma_int_t batch_size = magma_int_cast(batchCount(input), "batchCount");
// magmaLuBatched doesn't work with zero batch dimensions
// it gives CUDA error: invalid configuration argument
if (batch_size == 0) {
infos.fill_(0);
return;
}
magma_int_t m = magma_int_cast(input.size(-2), "m");
magma_int_t n = magma_int_cast(input.size(-1), "n");
auto leading_dimension = std::max<magma_int_t>(1, m);
scalar_t** input_array;
ALLOCATE_ARRAY(input_array, scalar_t*, batch_size);
// Set up array of pointers to matrices
for (int64_t i = 0; i < batch_size; i++) {
input_array[i] = &input_data[i * input_matrix_stride];
}
MAGMAQueue magma_queue(input.get_device());
if (compute_pivots) {
auto pivots_data = pivots.data_ptr<magma_int_t>();
auto pivots_stride = pivots.size(-1);
// fill pivots with ones to avoid memory access violations inside magma kernels
// magmaLuBatched might not set the values for it
// see https://github.com/pytorch/pytorch/pull/53064
pivots.fill_(1);
magma_int_t** pivots_array;
ALLOCATE_ARRAY(pivots_array, magma_int_t*, batch_size);
for (int64_t i = 0; i < batch_size; i++) {
pivots_array[i] = &pivots_data[i * pivots_stride];
}
magmaLuBatched<scalar_t>(m, n, input_array, leading_dimension, pivots_array, infos_data, batch_size, magma_queue);
} else {
magmaLuNoPivBatched<scalar_t>(m, n, input_array, leading_dimension, infos_data, batch_size, magma_queue);
// fill the pivots tensor with indices using 1-based (Fortran) indexing
auto k = std::min(m, n);
Tensor pivots_tmp = at::arange(1, k + 1, input.options().dtype(at::kInt)).expand_as(pivots);
pivots.copy_(pivots_tmp);
}
// block CPU until all operations on the queue are finished
// this explicit sync prevents garbage results from the subsequent magmaLuSolveBatched call from a different queue
magma_queue_sync(magma_queue.get_queue());
#endif
}
static void lu_looped_magma(const Tensor& input, const Tensor& pivots, const Tensor& infos, bool compute_pivots) {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(input.scalar_type(), "lu_magma_looped", [&]{
apply_lu_looped_magma<scalar_t>(input, pivots, infos, compute_pivots);
});
}
static void lu_batched_magma(const Tensor& input, const Tensor& pivots, const Tensor& infos, bool compute_pivots) {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(input.scalar_type(), "lu_magma_batched", [&]{
apply_lu_batched_magma<scalar_t>(input, pivots, infos, compute_pivots);
});
}
static void apply_lu(const Tensor& input, const Tensor& pivots, const Tensor& infos, bool compute_pivots) {
int64_t batch_size = batchCount(input);
#ifdef USE_CUSOLVER
// Use a heuristic to determine that cusolver is faster than MAGMA for the following sizes.
auto m = input.size(-2);
// exclude complex128 since nan_to_num_ does not work with it.
if ((batch_size == 1 || (batch_size <= 8 && m <= 16) || !use_magma_ ) && !input.is_complex()) {
lu_looped_cusolver(input, pivots, infos, compute_pivots);
}
#else
if (batch_size == 1) {
lu_looped_magma(input, pivots, infos, compute_pivots);
}
#endif // USE_CUSOLVER
else {
lu_batched_magma(input, pivots, infos, compute_pivots);
}
}
REGISTER_DISPATCH(lu_stub, &apply_lu);
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ triangular_solve ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_triangular_solve_batched(Tensor& A, Tensor& b, bool upper, bool transpose, bool conjugate_transpose, bool unitriangular) {
#ifndef USE_MAGMA
AT_ERROR("triangular_solve: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
magma_uplo_t uplo = upper ? MagmaUpper : MagmaLower;
magma_trans_t trans = transpose ? MagmaTrans : MagmaNoTrans;
trans = conjugate_transpose ? MagmaConjTrans : trans;
magma_diag_t diag = unitriangular ? MagmaUnit : MagmaNonUnit;
auto A_data = A.data_ptr<scalar_t>();
auto b_data = b.data_ptr<scalar_t>();
magma_int_t m = magma_int_cast(A.size(-2), "A.size(-2)");
magma_int_t n = magma_int_cast(A.size(-1), "A.size(-1)");
magma_int_t nrhs = magma_int_cast(b.size(-1), "b.size(-1)");
// magma returns early if m <= 0 || n <= 0 for magmaTriangularSolveBatched
// magmaTriangularSolve is calling cuBLAS and it prints
// ** On entry to DTRSM parameter number 9 had an illegal value
// so let's use proper lda parameter here
magma_int_t lda = std::max<magma_int_t>(1, m);
magma_int_t batch_size = magma_int_cast(batchCount(A), "batchCount");
auto A_mat_stride = matrixStride(A);
auto b_mat_stride = matrixStride(b);
scalar_t** A_array;
scalar_t** b_array;
ALLOCATE_ARRAY(A_array, scalar_t*, batch_size);
ALLOCATE_ARRAY(b_array, scalar_t*, batch_size);
// Set up the created arrays
for (int64_t i = 0; i < batch_size; i++) {
A_array[i] = &A_data[i * A_mat_stride];
b_array[i] = &b_data[i * b_mat_stride];
}
MAGMAQueue magma_queue(b.get_device());
constexpr int64_t batch_limit = 65535;
// Compute as many batches of 65535 possible
// The number of "mini"-batches are floor(batch_size / batch_limit)
// and these cover floor(batch_size / batch_limit) * batch_limit matrix solves
int64_t mini_batches = batch_size / batch_limit;
int64_t mini_idx; // this is outside the loop because it is used for the case batch_size % batch_limit != 0
for (mini_idx = 0; mini_idx < mini_batches * batch_limit; mini_idx += batch_limit) {
scalar_t** A_array_cur = &A_array[mini_idx];
scalar_t** b_array_cur = &b_array[mini_idx];
magmaTriangularSolveBatched<scalar_t>(
uplo, trans, diag, n, nrhs, A_array_cur,
lda, b_array_cur, lda, batch_limit, magma_queue);
}
// Compute whatever is left = batch_size - floor(batch_size / batch_limit) * batch_limit
// which concisely is equal to batch_size % batch_limit
if (batch_size % batch_limit != 0) {
magmaTriangularSolveBatched<scalar_t>(
uplo, trans, diag, n, nrhs, &A_array[mini_idx],
lda, &b_array[mini_idx], lda, batch_size % batch_limit, magma_queue);
}
#endif
}
void triangular_solve_batched_magma(Tensor& A, Tensor& B, Tensor& infos, bool upper, bool transpose, bool conjugate_transpose, bool unitriangular) {
(void)infos; // unused
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(A.scalar_type(), "triangular_solve_cuda", [&]{
apply_triangular_solve_batched<scalar_t>(A, B, upper, transpose, conjugate_transpose, unitriangular);
});
}
void triangular_solve_kernel(Tensor& A, Tensor& B, Tensor& infos, bool upper, bool transpose, bool conjugate_transpose, bool unitriangular) {
// For batches smaller than 8 and matrix sizes larger than 64x64 cuBLAS forloop is faster than batched version
if (batchCount(A) <= 8 && A.size(-1) >= 64) {
triangular_solve_cublas(A, B, infos, upper, transpose, conjugate_transpose, unitriangular);
} else {
#ifndef USE_MAGMA
triangular_solve_batched_cublas(A, B, infos, upper, transpose, conjugate_transpose, unitriangular);
#else
// cuBLAS batched is faster than MAGMA batched up until 512x512, after that MAGMA is faster
if (A.size(-1) <= 512) {
triangular_solve_batched_cublas(A, B, infos, upper, transpose, conjugate_transpose, unitriangular);
} else {
triangular_solve_batched_magma(A, B, infos, upper, transpose, conjugate_transpose, unitriangular);
}
#endif // USE_MAGMA
}
}
REGISTER_DISPATCH(triangular_solve_stub, &triangular_solve_kernel);
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ orgqr ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Tensor& orgqr_kernel_impl(Tensor& result, const Tensor& tau) {
// TODO: It is possible to implement efficient batched orgqr for small tau (tau.size(-1) <= 32)
// using MAGMA, however it fails on Windows because of some illegal memory reads inside MAGMA.
// See discussions in https://github.com/pytorch/pytorch/pull/51348 for comparison of cuSOLVER-MAGMA
// and Windows failure.
// For reference here is the MAGMA-based implementation: https://gist.github.com/IvanYashchuk/2db50002c9d3c1462ff769e6410ad983
#if defined(USE_CUSOLVER)
return orgqr_helper_cusolver(result, tau); // cusolver
#else
TORCH_CHECK(false, "Calling torch.orgqr on a CUDA tensor requires compiling ",
"PyTorch with cuSOLVER. Please use PyTorch built with cuSOLVER support.");
#endif
}
REGISTER_DISPATCH(orgqr_stub, &orgqr_kernel_impl);
void ormqr_kernel(const Tensor& input, const Tensor& tau, const Tensor& other, bool left, bool transpose) {
#if defined(USE_CUSOLVER)
ormqr_cusolver(input, tau, other, left, transpose);
#else
TORCH_CHECK(false,
"Calling torch.ormqr on a CUDA tensor requires compiling ",
"PyTorch with cuSOLVER. Please use PyTorch built with cuSOLVER support.");
#endif
}
REGISTER_DISPATCH(ormqr_stub, &ormqr_kernel);
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ qr ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_geqrf(const Tensor& input, const Tensor& tau) {
#ifndef USE_MAGMA
TORCH_CHECK(
false,
"Calling torch.geqrf on a CUDA tensor requires compiling ",
"PyTorch with MAGMA. Please use PyTorch built with MAGMA support.");
#else
magma_int_t m = magma_int_cast(input.size(-2), "m");
magma_int_t n = magma_int_cast(input.size(-1), "n");
auto input_data = input.data_ptr<scalar_t>();
auto input_matrix_stride = matrixStride(input);
auto tau_stride = tau.size(-1);
auto batch_size = batchCount(input);
auto lda = std::max<int>(1, m);
// magmaGeqrf uses a hybrid CPU-GPU algorithm to compute the elementary reflectors.
// The driver routine geqrf2_gpu accepts a tensor on the CPU for elementary reflectors.
Tensor tau_cpu = at::empty(tau.sizes(), tau.options().device(at::kCPU).pinned_memory(true));
scalar_t* tau_data = tau_cpu.data_ptr<scalar_t>();
scalar_t* work_data = nullptr; // workspace is not needed for geqrf2_gpu
magma_int_t info = 0;
for (int64_t i = 0; i < batch_size; i++) {
scalar_t* input_working_ptr = &input_data[i * input_matrix_stride];
scalar_t* tau_working_ptr = &tau_data[i * tau_stride];
// now compute the actual QR and tau
// MAGMA's geqrf2_gpu function is used, this version has LAPACK-complaint arguments.
magmaGeqrf<scalar_t>(m, n, input_working_ptr, lda, tau_working_ptr, work_data, &info, /*is_v2=*/true);
checkMagmaInternalError(info, "geqrf");
}
tau.copy_(tau_cpu, /*non_blocking=*/true);
#endif
}
// This is a type dispatching helper function for 'apply_geqrf'
void geqrf_magma(const Tensor& input, const Tensor& tau) {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(input.scalar_type(), "geqrf_magma", [&]{
apply_geqrf<scalar_t>(input, tau);
});
}
// This is a backend library dispatching helper function for calling looped batch implementation
void geqrf_looped(const Tensor& input, const Tensor& tau) {
#if defined(USE_CUSOLVER)
return geqrf_cusolver(input, tau);
#else
return geqrf_magma(input, tau);
#endif
}
// This is a backend library dispatching helper function for calling specialized batched implementation
void geqrf_batched(const Tensor& input, const Tensor& tau) {
#ifdef CUDART_VERSION
// if cuBLAS is available
return geqrf_batched_cublas(input, tau);
#else
// TODO: implement MAGMA-based path using magma_zgeqrf_expert_batched
return geqrf_looped(input, tau);
#endif
}
void geqrf_kernel(const Tensor& input, const Tensor& tau) {
// if number of rows is smaller than 32 batched is always faster for batch size > 1
// for larger number of rows number of batches condition
if (input.size(-2) <= 256 && batchCount(input) >= std::max<int64_t>(2, input.size(-2) / 16)) {
return geqrf_batched(input, tau);
} else {
return geqrf_looped(input, tau);
}
}
REGISTER_DISPATCH(geqrf_stub, &geqrf_kernel);
template <typename scalar_t>
static void apply_qr(Tensor& Q, Tensor& R, int64_t q_size_minus_2, int64_t r_size_minus_1, int64_t n_columns,
bool compute_q) {
#ifndef USE_MAGMA
AT_ERROR("qr: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
magma_int_t m = magma_int_cast(q_size_minus_2, "Q.size(-2)");
magma_int_t n = magma_int_cast(r_size_minus_1, "R.size(-1)");
auto r_data = R.data_ptr<scalar_t>();
auto r_matrix_stride = matrixStride(R);
magma_int_t k = m < n ? m : n;
magma_int_t nb = magmaGeqrfOptimalBlocksize<scalar_t>(m, n);
int64_t batch_size = batchCount(R);
// magmaGeqrf uses a hybrid CPU-GPU algorithm to compute the elementary reflectors.
// The driver routine magma_(d/s)geqrf2_gpu accepts a tensor on the CPU for elementary reflectors.
Tensor tau = at::empty({k}, Q.options().device(at::kCPU));
Tensor work = at::empty({(2 * k + magma_roundup(n, 32)) * nb}, R.options());
scalar_t* tau_data = tau.data_ptr<scalar_t>();
scalar_t* work_data = work.data_ptr<scalar_t>();
// This phase computes R (the raw version)
// This uses MAGMA's ?geqrf2_gpu function
magma_int_t info = 0;
for (int64_t i = 0; i < batch_size; i++) {
scalar_t* r_working_ptr = &r_data[i * r_matrix_stride];
magmaGeqrf<scalar_t>(m, n, r_working_ptr, m, tau_data, work_data, &info, /*is_v2=*/true);
checkMagmaInternalError(info, "geqrf");
}
if (!compute_q) {
// this is for mode='r'
return;
}
// This phase computes Q (the raw version)
// We require to perform ?geqrf_gpu again due to this bug in MAGMA:
// - ?geqrf_gpu allows fast computation of Q via ?orgqr_gpu, but doesn't give R properly.
// - ?geqrf2_gpu gives correct R, but doesn't allow computation of Q via ?orgqr_gpu
// Refer to the below link for more details:
// http://icl.cs.utk.edu/magma/forum/viewtopic.php?f=2&t=1015&p=2800&hilit=geqrf_gpu#p2800
auto q_data = Q.data_ptr<scalar_t>();
auto q_matrix_stride = matrixStride(Q);
for (int64_t i = 0; i < batch_size; i++) {
scalar_t* q_working_ptr = &q_data[i * q_matrix_stride];
magmaGeqrf<scalar_t>(m, n, q_working_ptr, m, tau_data, work_data, &info, /*is_v2=*/false);
checkMagmaInternalError(info, "geqrf");
magmaOrgqr<scalar_t>(m, n_columns, k, q_working_ptr, m, tau_data, work_data, nb, &info);
checkMagmaInternalError(info, "orgqr");
}
#endif
}
std::tuple<Tensor, Tensor> linalg_qr_helper_magma(const Tensor& self, c10::string_view mode) {
bool compute_q, reduced;
std::tie(compute_q, reduced) = _parse_qr_mode(mode);
// Setup input geometry and inputs for apply_qr
std::vector<int64_t> q_sizes, q_strides;
int64_t n_columns_q;
std::tie(q_sizes, q_strides, n_columns_q) = _compute_geometry_for_Q(self, reduced);
Tensor q_working_copy, r_working_copy;
// If there are no elements, then we simply return a pair of tensors of required dimensions
if (self.numel() == 0) {
int64_t n = self.size(-1);
auto r_shape = self.sizes().vec();
r_shape.end()[-2] = n_columns_q;
r_shape.end()[-1] = n;
r_working_copy = at::empty(r_shape, self.options());
if (compute_q) {
auto q_shape = q_sizes;
q_shape.end()[-1] = n_columns_q;
q_working_copy = at::zeros(q_shape, self.options());
q_working_copy.diagonal(/*offset=*/0, /*dim1=*/-2, /*dim2=*/-1).fill_(1);
} else {
q_working_copy = at::empty({0}, self.options());
}
return std::make_tuple(q_working_copy, r_working_copy);
}
if (compute_q) {
q_working_copy = at::empty_strided(q_sizes, q_strides, self.options());
q_working_copy.narrow(-1, 0, self.size(-1)).copy_(self);
} else {
q_working_copy = at::empty({0}, self.options());
}
r_working_copy = cloneBatchedColumnMajor(self);
int64_t m = q_sizes[self.dim() - 2];
int64_t n = r_working_copy.size(-1);
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "qr_cuda", [&]{
apply_qr<scalar_t>(q_working_copy, r_working_copy, m, n, n_columns_q, compute_q);
});
if (compute_q) {
q_working_copy = q_working_copy.narrow(-1, 0, n_columns_q);
}
r_working_copy = r_working_copy.narrow(-2, 0, n_columns_q).triu();
return std::make_tuple(q_working_copy, r_working_copy);
}
std::tuple<Tensor, Tensor> _linalg_qr_helper_cuda(const Tensor& input, c10::string_view mode) {
#if defined(USE_CUSOLVER)
// _linalg_qr_helper_default is a generic function that is implemented using
// geqrf_stub and orgqr_stub. It dispatches to cuSOLVER for CUDA inputs if USE_CUSOLVER is defined
return _linalg_qr_helper_default(input, mode);
#else
return linalg_qr_helper_magma(input, mode);
#endif
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ symeig ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_magma_eigh(const Tensor& values, const Tensor& vectors, const Tensor& infos, bool upper, bool compute_eigenvectors) {
#ifndef USE_MAGMA
TORCH_CHECK(
false,
"Calling torch.linalg.eigh/eigvalsh on a CUDA tensor requires compiling ",
"PyTorch with MAGMA. Please use PyTorch built with MAGMA support.");
#else
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(values.device() == kCPU);
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(infos.device() == kCPU);
using value_t = typename c10::scalar_value_type<scalar_t>::type;
magma_uplo_t uplo = upper ? MagmaUpper : MagmaLower;
magma_vec_t jobz = compute_eigenvectors ? MagmaVec : MagmaNoVec;
magma_int_t n = magma_int_cast(vectors.size(-1), "n");
auto lda = std::max<magma_int_t>(1, n);
auto batch_size = batchCount(vectors);
auto vectors_stride = matrixStride(vectors);
auto values_stride = values.size(-1);
auto vectors_data = vectors.data_ptr<scalar_t>();
auto values_data = values.data_ptr<value_t>();
auto infos_data = infos.data_ptr<magma_int_t>();
scalar_t* wA;
ALLOCATE_ARRAY(wA, scalar_t, lda * lda);
// Run once, first to get the optimum work sizes.
// Since we deal with batches of matrices with the same dimensions, doing this outside
// the loop saves (batch_size - 1) workspace queries which would provide the same result
// and (batch_size - 1) calls to allocate and deallocate workspace using at::empty()
magma_int_t lwork = -1;
scalar_t wkopt;
magma_int_t liwork = -1;
magma_int_t iwkopt;
magma_int_t lrwork = -1;
value_t rwkopt;
magmaSyevd<scalar_t, value_t>(jobz, uplo, n, vectors_data, lda, values_data,
wA, lda, &wkopt, lwork, &rwkopt, lrwork, &iwkopt, liwork, infos_data);
scalar_t* work;
magma_int_t* iwork;
lwork = magma_int_cast(std::max<int64_t>(1, real_impl<scalar_t, value_t>(wkopt)), "work_size");
liwork = magma_int_cast(std::max<int64_t>(1, iwkopt), "iwork_size");
ALLOCATE_ARRAY(work, scalar_t, lwork);
ALLOCATE_ARRAY(iwork, magma_int_t, liwork);
value_t* rwork = nullptr;
c10::Storage storage_rwork;
if (vectors.is_complex()) {
lrwork = magma_int_cast(std::max<int64_t>(1, rwkopt), "rwork_size");
storage_rwork = pin_memory<value_t>(lrwork);
rwork = static_cast<value_t*>(storage_rwork.data());
}
for (decltype(batch_size) i = 0; i < batch_size; i++) {
scalar_t* vectors_working_ptr = &vectors_data[i * vectors_stride];
value_t* values_working_ptr = &values_data[i * values_stride];
magma_int_t* info_working_ptr = &infos_data[i];
magmaSyevd<scalar_t, value_t>(jobz, uplo, n, vectors_working_ptr, lda, values_working_ptr,
wA, lda, work, lwork, rwork, lrwork, iwork, liwork, info_working_ptr);
// The current behaviour for Linear Algebra functions to raise an error if something goes wrong
// or input doesn't satisfy some requirement
// therefore return early since further computations will be wasted anyway
if (*info_working_ptr != 0) {
return;
}
}
#endif
}
std::tuple<Tensor, Tensor> _symeig_helper_cuda(const Tensor& self, bool eigenvectors, bool upper) {
Tensor infos = at::zeros({std::max<int64_t>(1, batchCount(self))}, self.options().dtype(kInt).device(at::kCPU));
auto eigvals_shape = IntArrayRef(self.sizes().data(), self.dim()-1); // self.shape[:-1]
ScalarType real_dtype = toValueType(self.scalar_type());
// magmaSyevd uses a hybrid CPU-GPU algorithm to compute the eigenvalues and eigenvectors.
// The driver routine magma_(d/s)syev_gpu accepts a tensor on the CPU for eigvalenvalues.
// The data is later moved to the appropriate device.
// In the case where self.numel() == 0, we just return an empty tensor of
// dimensions on the CUDA (to avoid the unnecessary "to(at::kCUDA)")
auto eigvals_working_copy = self.numel() == 0
? at::empty(eigvals_shape, self.options().dtype(real_dtype))
: at::empty(eigvals_shape, self.options().dtype(real_dtype).device(at::kCPU));
if (self.numel() == 0) {
return std::tuple<Tensor, Tensor>(eigvals_working_copy, at::empty_like(self, LEGACY_CONTIGUOUS_MEMORY_FORMAT));
}
auto self_working_copy = cloneBatchedColumnMajor(self);
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "symeig_cuda", [&]{
apply_magma_eigh<scalar_t>(eigvals_working_copy, self_working_copy, infos, upper, eigenvectors);
});
if (self.dim() > 2) {
batchCheckErrors(infos, "symeig_cuda");
} else {
singleCheckErrors(infos.item().toInt(), "symeig_cuda");
}
if (eigenvectors) {
return std::tuple<Tensor, Tensor>(eigvals_working_copy.to(self.device()), self_working_copy);
} else {
return std::tuple<Tensor, Tensor>(eigvals_working_copy.to(self.device()), at::empty({0}, self.options()));
}
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ linalg_eigh ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// This is a type dispatch function for 'apply_magma_eigh'
// For small inputs result is computed on CPU
void linalg_eigh_magma(const Tensor& eigenvalues, const Tensor& eigenvectors, const Tensor& infos, bool upper, bool compute_eigenvectors) {
// MAGMA just calls LAPACK for eigenvectors.size(-1) <= 128
// See https://bitbucket.org/icl/magma/src/e6fdca447bd402693e8b0b950a898b6879bbcc41/src/zheevd_gpu.cpp?at=master#lines-258
// in addition lda is ignored breaking 0x0 inputs
if (eigenvectors.size(-1) > 128) {
// MAGMA requires eigenvalues and infos tensors to reside on CPU
Tensor eigenvalues_cpu = eigenvalues.to(kCPU);
Tensor infos_cpu = infos.to(kCPU);
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(
eigenvectors.scalar_type(), "linalg_eigh_magma", [&] {
apply_magma_eigh<scalar_t>(
eigenvalues_cpu, eigenvectors, infos_cpu, upper, compute_eigenvectors);
});
// Transfer computed by MAGMA results from CPU to GPU
eigenvalues.copy_(eigenvalues_cpu);
infos.copy_(infos_cpu);
} else { // eigenvectors.size(-1) <= 128
// transfer to CPU, compute the result and copy back to GPU
// this is faster than going through MAGMA that does the same
Tensor eigenvalues_cpu = at::empty_like(eigenvalues, eigenvalues.options().device(kCPU));
if (compute_eigenvectors) {
Tensor eigenvectors_cpu = at::empty_like(eigenvectors, eigenvectors.options().device(kCPU));
at::linalg_eigh_out(eigenvalues_cpu, eigenvectors_cpu, eigenvectors.to(kCPU), upper ? "U" : "L");
eigenvectors.copy_(eigenvectors_cpu);
} else {
at::linalg_eigvalsh_out(eigenvalues_cpu, eigenvectors.to(kCPU), upper ? "U" : "L");
}
eigenvalues.copy_(eigenvalues_cpu);
}
}
void linalg_eigh_kernel(const Tensor& eigenvalues, const Tensor& eigenvectors, const Tensor& infos, bool upper, bool compute_eigenvectors) {
#if defined(USE_CUSOLVER)
linalg_eigh_cusolver(eigenvalues, eigenvectors, infos, upper, compute_eigenvectors);
#else
linalg_eigh_magma(eigenvalues, eigenvectors, infos, upper, compute_eigenvectors);
#endif
}
REGISTER_DISPATCH(linalg_eigh_stub, &linalg_eigh_kernel);
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ eig ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// magmaEig uses a hybrid CPU-GPU algorithm, which takes and return CPU
// memory. So, we accept a GPU tensor, copy it to CPU memory, and later copy
// the returned values from CPU to GPU. See also magmaSymeig, which uses a
// similar approach.
template <typename scalar_t>
static void apply_eig(const Tensor& self, bool eigenvectors, Tensor& out_eigvals, Tensor& out_eigvecs,
int64_t *info_ptr) {
#ifndef USE_MAGMA
TORCH_CHECK(false, "Calling torch.eig on a CUDA tensor requires compiling PyTorch with MAGMA. "
"Either transfer the tensor to the CPU before calling torch.eig or recompile with MAGMA.");
#else
TORCH_INTERNAL_ASSERT(self.device() == at::kCPU, "Internal error: apply_eig needs a CPU tensor");
using value_t = typename c10::scalar_value_type<scalar_t>::type;
magma_vec_t jobvr = eigenvectors ? MagmaVec : MagmaNoVec;
magma_int_t n = magma_int_cast(self.size(-1), "n");
auto self_data = self.data_ptr<scalar_t>();
auto out_eigvals_data = out_eigvals.data_ptr<scalar_t>();
scalar_t *wr = out_eigvals_data;
scalar_t *vr_data = NULL;
magma_int_t ldvr = 1;
if (jobvr == MagmaVec)
{
vr_data = out_eigvecs.data_ptr<scalar_t>();
ldvr = n;
}
value_t *rwork_data = nullptr;
if (isComplexType(at::typeMetaToScalarType(self.dtype()))) {
ALLOCATE_ARRAY(rwork_data, value_t, n*2);
}
if (n > 0) {
// call magmaEig once to get the optimal size of work_data
scalar_t wkopt;
magma_int_t info;
magmaEig<scalar_t, value_t>(MagmaNoVec, jobvr, n, self_data, n, wr, NULL, 1, vr_data, ldvr, &wkopt, -1, rwork_data, &info);
magma_int_t lwork = static_cast<magma_int_t>(real_impl<scalar_t, value_t>(wkopt));
// call it a 2nd time to to the actual work
scalar_t *work_data = nullptr;
ALLOCATE_ARRAY(work_data, scalar_t, lwork);
magmaEig<scalar_t, value_t>(MagmaNoVec, jobvr, n, self_data, n, wr, NULL, 1, vr_data, ldvr, work_data, lwork, rwork_data, &info);
*info_ptr = info;
}
#endif
}
/*
* Internal helper; like eig_cuda but:
* 1. assume that self is a square matrix of side "n"
* 2. return CPU tensors (because this is what magmaEig returns), which will be copied to GPU memory
* by the caller
*/
std::tuple<Tensor, Tensor> eig_kernel_impl(const Tensor& self, bool& eigenvectors) {
int64_t n = self.size(-1);
// copy self to pinned CPU memory
auto self_working_copy = at::empty_strided(
{n, n}, // square matrix
{1, n}, // column-ordered, as magmaEig expects
at::TensorOptions(at::kCPU).dtype(self.dtype()).pinned_memory(true));
self_working_copy.copy_(self);
// tensors holding the results. We use empty_strided to make them column-ordered
auto options = self.options().device(at::kCPU).memory_format(LEGACY_CONTIGUOUS_MEMORY_FORMAT);
Tensor out_eigvals;
if (isComplexType(at::typeMetaToScalarType(self.dtype()))) {
out_eigvals = at::empty({n}, options);
} else {
out_eigvals = at::empty_strided({n, 2}, {1, n}, options);
}
auto out_eigvecs = eigenvectors
? at::empty_strided({n, n}, {1, n}, options)
: Tensor();
int64_t info;
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "eig_cuda", [&]{
apply_eig<scalar_t>(self_working_copy, eigenvectors, out_eigvals, out_eigvecs, &info);
});
singleCheckErrors(info, "eig_cuda");
return std::tuple<Tensor, Tensor>(out_eigvals, out_eigvecs);
}
REGISTER_DISPATCH(eig_stub, &eig_kernel_impl);
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ linalg_eig ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
/*
Computes the eigenvalues and eigenvectors of n-by-n matrix 'input'.
This is an in-place routine, content of 'input', 'values', 'vectors' is overwritten.
'infos' is an int Tensor containing error codes for each matrix in the batched input.
For more information see MAGMA's documentation for GEEV routine.
*/
template <typename scalar_t>
void apply_linalg_eig(Tensor& values, Tensor& vectors, Tensor& input, Tensor& infos, bool compute_eigenvectors) {
#ifndef USE_MAGMA
TORCH_CHECK(false, "Calling torch.linalg.eig on a CUDA tensor requires compiling PyTorch with MAGMA. "
"Either transfer the tensor to the CPU before calling torch.linalg.eig or recompile with MAGMA.");
#else
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(input.device() == at::kCPU);
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(values.device() == at::kCPU);
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(infos.device() == at::kCPU);
if (compute_eigenvectors) {
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(vectors.device() == at::kCPU);
}
using value_t = typename c10::scalar_value_type<scalar_t>::type;
magma_vec_t jobvr = compute_eigenvectors ? MagmaVec : MagmaNoVec;
magma_vec_t jobvl = MagmaNoVec; // only right eigenvectors are computed
magma_int_t n = magma_int_cast(input.size(-1), "n");
auto lda = std::max<magma_int_t>(1, n);
auto batch_size = batchCount(input);
auto input_matrix_stride = matrixStride(input);
auto values_stride = values.size(-1);
auto input_data = input.data_ptr<scalar_t>();
auto values_data = values.data_ptr<scalar_t>();
auto infos_data = infos.data_ptr<magma_int_t>();
auto rvectors_data = compute_eigenvectors ? vectors.data_ptr<scalar_t>() : nullptr;
scalar_t* lvectors_data = nullptr; // only right eigenvectors are computed
int64_t ldvr = compute_eigenvectors ? lda : 1;
int64_t ldvl = 1;
Tensor rwork;
value_t* rwork_data = nullptr;
if (input.is_complex()) {
ScalarType real_dtype = toValueType(input.scalar_type());
rwork = at::empty({lda * 2}, input.options().dtype(real_dtype));
rwork_data = rwork.data_ptr<value_t>();
}
// call magmaEig once to get the optimal size of work_data
scalar_t work_query;
magmaEig<scalar_t, value_t>(jobvl, jobvr, n, input_data, lda, values_data,
lvectors_data, ldvl, rvectors_data, ldvr, &work_query, -1, rwork_data, &infos_data[0]);
magma_int_t lwork = std::max<magma_int_t>(1, static_cast<magma_int_t>(real_impl<scalar_t, value_t>(work_query)));
Tensor work = at::empty({lwork}, input.dtype());
auto work_data = work.data_ptr<scalar_t>();
for (auto i = decltype(batch_size){0}; i < batch_size; i++) {
scalar_t* input_working_ptr = &input_data[i * input_matrix_stride];
scalar_t* values_working_ptr = &values_data[i * values_stride];
scalar_t* rvectors_working_ptr = compute_eigenvectors ? &rvectors_data[i * input_matrix_stride] : nullptr;
int* info_working_ptr = &infos_data[i];
magmaEig<scalar_t, value_t>(jobvl, jobvr, n, input_working_ptr, lda, values_working_ptr,
lvectors_data, ldvl, rvectors_working_ptr, ldvr, work_data, lwork, rwork_data, info_working_ptr);
}
#endif
}
// This is a type dispatching helper function for 'apply_linalg_eig'
void linalg_eig_kernel(Tensor& eigenvalues, Tensor& eigenvectors, Tensor& infos, const Tensor& input, bool compute_eigenvectors) {
// This function calculates the non-symmetric eigendecomposition in-place
// tensors should be in batched column major memory format
// the content of eigenvalues, eigenvectors and infos is overwritten by 'apply_linalg_eig'
// apply_linalg_eig modifies the provided input matrix in-place, therefore we need a copy
// MAGMA doesn't have GPU interface for the eigendecomposition and it forces us to transfer 'input' to CPU
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(input.is_cuda());
Tensor input_working_copy = at::empty(input.sizes(), input.options().device(kCPU));
input_working_copy.transpose_(-2, -1); // make input_working_copy to have Fortran contiguous memory layout
input_working_copy.copy_(input);
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(input.scalar_type(), "linalg_eig_out_cuda", [&]{
apply_linalg_eig<scalar_t>(eigenvalues, eigenvectors, input_working_copy, infos, compute_eigenvectors);
});
}
REGISTER_DISPATCH(linalg_eig_stub, &linalg_eig_kernel);
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ svd ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template<typename scalar_t>
static void apply_svd(Tensor& self, Tensor& U, Tensor& S, Tensor& VT,
char jobchar, std::vector<int64_t>& infos) {
#ifndef USE_MAGMA
AT_ERROR("svd: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
using value_t = typename c10::scalar_value_type<scalar_t>::type;
auto self_data = self.data_ptr<scalar_t>();
auto U_data = U.data_ptr<scalar_t>();
auto S_data = S.data_ptr<value_t>();
auto VT_data = VT.data_ptr<scalar_t>();
auto self_stride = matrixStride(self);
auto U_stride = jobchar == 'N' ? 1 : matrixStride(U);
auto S_stride = S.size(-1);
auto VT_stride = jobchar == 'N' ? 1 :matrixStride(VT);
auto batchsize = batchCount(self);
magma_vec_t jobz = jobchar == 'A' ? MagmaAllVec : (jobchar == 'S' ? MagmaSomeVec : MagmaNoVec);
magma_int_t m = magma_int_cast(self.size(-2), "m");
magma_int_t n = magma_int_cast(self.size(-1), "n");
auto lda = std::max<magma_int_t>(1, m);
auto ldvt = std::max<magma_int_t>(1, jobchar == 'N' ? 1 : VT.size(-2));
auto mn = std::min(m, n);
c10::Storage storage_rwork;
value_t* rwork = nullptr;
magma_int_t* iwork;
ALLOCATE_ARRAY(iwork, magma_int_t, 8 * mn);
if (isComplexType(at::typeMetaToScalarType(self.dtype()))) {
auto lrwork = computeLRWorkDim(jobchar, m, n);
storage_rwork = pin_memory<value_t>(lrwork);
rwork = static_cast<value_t*>(storage_rwork.data());
}
magma_int_t info = 0;
// Run once, first to get the optimum work size.
// Since we deal with batches of matrices with the same dimensions, doing this outside
// the loop saves (batch_size - 1) workspace queries which would provide the same result
// and (batch_size - 1) calls to allocate and deallocate workspace using at::empty()
magma_int_t lwork = -1;
scalar_t wkopt = 1; // MAGMA might not set the value for the optimal workspace therefore use 1 as the default value
magmaSvd<scalar_t, value_t>(jobz, m, n, self_data, lda, S_data, U_data, lda, VT_data, ldvt, &wkopt, lwork, rwork, iwork, &info);
lwork = magma_int_cast(real_impl<scalar_t, value_t>(wkopt), "work_size");
scalar_t* work;
ALLOCATE_ARRAY(work, scalar_t, lwork);
for (int64_t i = 0; i < batchsize; i++) {
scalar_t* self_working_ptr = &self_data[i * self_stride];
value_t* S_working_ptr = &S_data[i * S_stride];
scalar_t* U_working_ptr = &U_data[i * U_stride];
scalar_t* VT_working_ptr = &VT_data[i * VT_stride];
// Compute S, U (optionally), VT (optionally)
magmaSvd<scalar_t, value_t>(jobz, m, n, self_working_ptr, lda,
S_working_ptr, U_working_ptr, lda, VT_working_ptr, ldvt, work, lwork, rwork, iwork, &info);
infos[i] = info;
if (info != 0) {
return;
}
}
#endif
}
std::tuple<Tensor, Tensor, Tensor> _svd_helper_cuda_legacy(const Tensor& self, bool some, bool compute_uv) {
std::vector<int64_t> infos(batchCount(self), 0);
int64_t m = self.size(-2), n = self.size(-1);
int64_t k = std::min(m, n);
char jobchar = compute_uv ? (some ? 'S' : 'A') : 'N';
Tensor U_working_copy, S_working_copy, VT_working_copy;
std::tie(U_working_copy, S_working_copy, VT_working_copy) = _create_U_S_VT(self, some, compute_uv);
// The input matrix, U, S and VT have to reside in pinned memory.
// Additionally, the input and U have to be in column major format.
// _create_U_S_VT takes care of a part of these requirements (for U, S and VT)
// For the input matrix, this requirements are being taken care of below.
// Specify strides
auto self_col_major_strides = at::detail::defaultStrides(self.sizes());
self_col_major_strides[self.dim() - 2] = 1;
self_col_major_strides[self.dim() - 1] = m;
// Create strided tensor in pinned memory
auto self_working_copy = at::empty_strided(self.sizes(), self_col_major_strides,
at::TensorOptions(at::kCPU).dtype(self.dtype()).pinned_memory(true));
self_working_copy.copy_(self);
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "svd_cuda", [&] {
apply_svd<scalar_t>(self_working_copy, U_working_copy, S_working_copy, VT_working_copy, jobchar, infos);
});
if (self.dim() > 2) {
batchCheckErrors(infos, "svd_cuda");
} else {
singleCheckErrors(infos[0], "svd_cuda");
}
U_working_copy = same_stride_to(U_working_copy, self.options());
S_working_copy = same_stride_to(S_working_copy, S_working_copy.options().device(self.device()));
VT_working_copy = same_stride_to(VT_working_copy, self.options());
// so far we have computed VT, but torch.svd returns V instead. Adjust accordingly.
// Note that the 'apply_svd' routine returns VT = V^T (for real inputs) or VT = V^H (for complex inputs), not V.
if (compute_uv) {
VT_working_copy = VT_working_copy.conj();
VT_working_copy.transpose_(-2, -1);
}
return std::make_tuple(U_working_copy, S_working_copy, VT_working_copy);
}
std::tuple<Tensor, Tensor, Tensor> _svd_helper_cuda(const Tensor& self, bool some, bool compute_uv) {
#ifdef USE_CUSOLVER
return _svd_helper_cuda_lib(self, some, compute_uv);
#else
return _svd_helper_cuda_legacy(self, some, compute_uv);
#endif
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ lu_solve ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#ifdef USE_MAGMA
magma_trans_t _get_magma_trans(char trans) {
switch (trans) {
case 'N':
return MagmaNoTrans;
case 'T':
return MagmaTrans;
case 'C':
return MagmaConjTrans;
default:
return MagmaNoTrans;
}
}
#endif
/*
Solves the matrix equation A X = B
X and B are n-by-nrhs matrices, A is represented using the LU factorization.
This is an in-place routine, content of `b` is overwritten.
This is a "looped" variant for calling single input MAGMA function on batched input.
Args:
* `b` - [in] the right hand side matrix B
[out] the solution matrix X
* `lu` - [in] the LU factorization of matrix A (see at::_lu_with_info)
* `pivots` - [in] the pivot indices (see at::_lu_with_info)
For further details, please see the MAGMA documentation for magma_dgetrs_gpu.
*/
template <typename scalar_t>
static void apply_lu_solve_looped_magma(const Tensor& b, const Tensor& lu, const Tensor& pivots, char lapack_trans) {
#ifndef USE_MAGMA
TORCH_CHECK(
false,
"Calling torch.lu_solve on a CUDA tensor requires compiling ",
"PyTorch with MAGMA. lease rebuild with MAGMA.");
#else
auto trans = _get_magma_trans(lapack_trans);
auto b_data = b.data_ptr<scalar_t>();
auto lu_data = lu.data_ptr<scalar_t>();
// MAGMA requires pivots to be a CPU tensor
Tensor pivots_cpu = pivots.cpu();
auto pivots_data = pivots_cpu.data_ptr<magma_int_t>();
auto b_stride = matrixStride(b);
auto lu_stride = matrixStride(lu);
auto pivots_stride = pivots_cpu.size(-1);
auto batch_size = batchCount(b);
magma_int_t n = magma_int_cast(lu.size(-2), "n");
magma_int_t nrhs = magma_int_cast(b.size(-1), "nrhs");
auto leading_dimension = std::max<magma_int_t>(1, n);
int info = 0;
for (decltype(batch_size) i = 0; i < batch_size; i++) {
scalar_t* b_working_ptr = &b_data[i * b_stride];
scalar_t* lu_working_ptr = &lu_data[i * lu_stride];
int* pivots_working_ptr = &pivots_data[i * pivots_stride];
magmaLuSolve<scalar_t>(n, nrhs, lu_working_ptr, leading_dimension, pivots_working_ptr, b_working_ptr, leading_dimension, &info, trans);
// info from magmaLuSolve only reports if the i-th parameter is wrong
// so we don't need to check it all the time
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(info == 0);
}
#endif
}
/*
Solves the matrix equation A X = B
X and B are n-by-nrhs matrices, A is represented using the LU factorization.
This is an in-place routine, content of `b` is overwritten.
This is a specialized batched variant, it is expected to be faster than the "looped" version only for small inputs.
Args:
* `b` - [in] the right hand side matrix B
[out] the solution matrix X
* `lu` - [in] the LU factorization of matrix A (see at::_lu_with_info)
* `pivots` - [in] the pivot indices (see at::_lu_with_info)
For further details, please see the MAGMA documentation for magma_dgetrs_batched.
*/
template <typename scalar_t>
static void apply_lu_solve_batched_magma(const Tensor& b, const Tensor& lu, const Tensor& pivots, char lapack_trans) {
#ifndef USE_MAGMA
TORCH_CHECK(
false,
"Calling torch.lu_solve on a CUDA tensor requires compiling ",
"PyTorch with MAGMA. lease rebuild with MAGMA.");
#else
auto trans = _get_magma_trans(lapack_trans);
auto b_data = b.data_ptr<scalar_t>();
auto lu_data = lu.data_ptr<scalar_t>();
magma_int_t n = magma_int_cast(lu.size(-2), "n");
magma_int_t nrhs = magma_int_cast(b.size(-1), "nrhs");
auto leading_dimension = std::max<magma_int_t>(1, n);
auto pivots_data = pivots.data_ptr<magma_int_t>();
auto b_stride = matrixStride(b);
auto lu_stride = matrixStride(lu);
auto pivots_stride = pivots.size(-1);
magma_int_t batch_size = magma_int_cast(batchCount(b), "batchCount");
magma_int_t** pivots_array;
scalar_t** lu_array;
scalar_t** b_array;
ALLOCATE_ARRAY(pivots_array, magma_int_t*, batch_size);
ALLOCATE_ARRAY(lu_array, scalar_t*, batch_size);
ALLOCATE_ARRAY(b_array, scalar_t*, batch_size);
for (int64_t i = 0; i < batch_size; i++) {
pivots_array[i] = &pivots_data[i * pivots_stride];
b_array[i] = &b_data[i * b_stride];
lu_array[i] = &lu_data[i * lu_stride];
}
MAGMAQueue magma_queue(b.get_device());
// Compute the result in batches of 65535
// that is the maximum allowed number for batch_size in MAGMA
constexpr int64_t batch_limit = 65535;
for (int64_t mini_idx = 0; mini_idx < batch_size; mini_idx += batch_limit) {
int64_t nbatches = std::min(batch_limit, batch_size - mini_idx);
scalar_t** lu_array_cur = &lu_array[mini_idx];
scalar_t** b_array_cur = &b_array[mini_idx];
magma_int_t** pivots_array_cur = &pivots_array[mini_idx];
int info;
magmaLuSolveBatched<scalar_t>(
n, nrhs, lu_array_cur, leading_dimension,
pivots_array_cur, b_array_cur, leading_dimension,
info, nbatches, magma_queue, trans);
// info from magmaLuSolveBatched only reports if the i-th parameter is wrong
// so we don't need to check it all the time
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(info == 0);
}
#endif
}
static void lu_solve_batched_magma(const Tensor& b, const Tensor& lu, const Tensor& pivots, char lapack_trans) {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(b.scalar_type(), "lu_solve_batched_magma", [&]{
apply_lu_solve_batched_magma<scalar_t>(b, lu, pivots, lapack_trans);
});
}
static void lu_solve_looped_magma(const Tensor& b, const Tensor& lu, const Tensor& pivots, char lapack_trans) {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(b.scalar_type(), "lu_solve_looped_magma", [&]{
apply_lu_solve_looped_magma<scalar_t>(b, lu, pivots, lapack_trans);
});
}
#if defined(USE_CUSOLVER) || defined(CUDART_VERSION)
cublasOperation_t _get_cublas_trans(char trans) {
switch (trans) {
case 'N':
return CUBLAS_OP_N;
case 'T':
return CUBLAS_OP_T;
case 'C':
return CUBLAS_OP_C;
default:
return CUBLAS_OP_N;
}
}
#endif
static void lu_solve_trans_dispatch(const Tensor& b, const Tensor& lu, const Tensor& pivots, char trans) {
auto batch_size = batchCount(lu);
auto m = lu.size(-2);
auto b2 = b.size(-1);
bool over_magma_dim_limit = b2 > 1024; // magma implementation of LU solve cannot handle a b tensor with last dim > 1024 (https://bitbucket.org/icl/magma/issues/19/dgesv_batched-dgetrs_batched-fails-for)
// heuristics determined from tests dicussed in https://github.com/pytorch/pytorch/pull/59148
#ifdef USE_CUSOLVER
if ((batch_size == 1 && m > 512) || (batch_size <= 8 && over_magma_dim_limit)) {
lu_solve_looped_cusolver(b, lu, pivots, _get_cublas_trans(trans));
}
#else
if (batch_size == 1) {
lu_solve_looped_magma(b, lu, pivots, trans);
}
#endif // ifdef USE_CUSOLVER
#ifdef CUDART_VERSION
else if ((batch_size > 2 && m <= 128) || (batch_size > 8 && over_magma_dim_limit)) {
lu_solve_batched_cublas(b, lu, pivots, _get_cublas_trans(trans));
}
#endif // ifdef CUDART_VERSION
else {
lu_solve_batched_magma(b, lu, pivots, trans);
}
}
REGISTER_DISPATCH(lu_solve_trans_stub, &lu_solve_trans_dispatch);
static void lu_solve_dispatch(const Tensor& b, const Tensor& lu, const Tensor& pivots) {
lu_solve_trans_dispatch(b, lu, pivots, 'N');
}
REGISTER_DISPATCH(lu_solve_stub, &lu_solve_dispatch);
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ lstsq ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_gels(const Tensor& a, Tensor& b, Tensor& infos) {
#ifndef USE_MAGMA
TORCH_CHECK(false, "torch.linalg.lstsq: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
auto trans = MagmaNoTrans;
auto m = magma_int_cast(a.size(-2), "m");
auto n = magma_int_cast(a.size(-1), "n");
TORCH_CHECK(
m >= n,
"torch.linalg.lstsq: only overdetermined systems (input.size(-2) >= input.size(-1)) are allowed on CUDA");
auto nrhs = magma_int_cast(b.size(-1), "nrhs");
auto ldda = std::max<magma_int_t>(1, m);
auto lddb = std::max<magma_int_t>(1, std::max(m, n));
auto nb = magmaGeqrfOptimalBlocksize<scalar_t>(m, n);
auto lwork = (m - n + nb) * (nrhs + nb) + nrhs * nb;
Tensor hwork = at::empty({static_cast<int64_t>(lwork)}, a.scalar_type());
auto* hwork_ptr = hwork.data_ptr<scalar_t>();
// MAGMA requires infos tensor to live on CPU
infos = infos.to(at::kCPU);
auto infos_data = infos.data_ptr<magma_int_t>();
batch_iterator_with_broadcasting<scalar_t>(a, b,
[&](scalar_t* a_working_ptr, scalar_t* b_working_ptr,
int64_t a_linear_batch_idx) {
magma_int_t* infos_working_ptr = &infos_data[a_linear_batch_idx];
magmaGels<scalar_t>(trans, m, n, nrhs,
a_working_ptr, ldda, b_working_ptr, lddb,
hwork_ptr, lwork, infos_working_ptr);
}
);
#endif
}
void gels_magma(const Tensor& a, Tensor& b, Tensor& infos) {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(a.scalar_type(), "gels_magma", [&] {
apply_gels<scalar_t>(a, b, infos);
});
}
void linalg_lstsq_gels(const Tensor& A, const Tensor& B, const Tensor& infos) {
// The steps for using the QR decomposition for solving least squares problems
// are outlined here https://en.wikipedia.org/wiki/QR_decomposition#Using_for_solution_to_linear_inverse_problems
auto m = A.size(-2);
auto n = A.size(-1);
auto mn = std::min(m, n);
// explicitly broadcast the batch dimensions of A
// TODO: revisit this later to use batch_iterator_with_broadcasting in triangular_solve
IntArrayRef A_batch_sizes(A.sizes().data(), A.dim() - 2);
IntArrayRef B_batch_sizes(B.sizes().data(), B.dim() - 2);
std::vector<int64_t> expand_batch_portion = at::infer_size(A_batch_sizes, B_batch_sizes);
auto tau_shape = A.sizes().vec();
tau_shape.pop_back();
tau_shape.back() = mn;
Tensor tau = at::empty(tau_shape, A.options());
if (m >= n) {
// Step 1: compute QR factorization using geqrf
geqrf_kernel(A, tau);
// explicitly broadcast the batch dimensions of A
// we do it after geqrf so that we don't do redundant computations for the same input
auto A_expand_batch = expand_batch_portion;
A_expand_batch.insert(A_expand_batch.end(), {A.size(-2), A.size(-1)});
Tensor A_expanded = A.expand({A_expand_batch});
bool is_fortran_contiguous = A_expanded.transpose(-2, -1).is_contiguous();
Tensor A_broadcasted = is_fortran_contiguous ? A_expanded : cloneBatchedColumnMajor(A_expanded);
auto tau_expand_batch = expand_batch_portion;
tau_expand_batch.push_back(tau.size(-1));
Tensor tau_broadcasted = tau.expand({tau_expand_batch}).contiguous();
// Step 2: B <- Q^H B
ormqr_kernel(A_broadcasted, tau_broadcasted, B, /*left=*/true, /*transpose=*/true);
// Step 3: solve R X = B
bool upper = true;
bool transpose = false;
bool conjugate_transpose = false;
bool unitriangular = false;
triangular_solve_kernel(
const_cast<Tensor&>(A_broadcasted),
const_cast<Tensor&>(B),
const_cast<Tensor&>(infos),
upper, transpose, conjugate_transpose, unitriangular);
} else { // underdetermined case
Tensor Ah = cloneBatchedColumnMajor(A.conj().transpose(-2, -1));
// Step 1: compute QR factorization of conjugate transpose of A using geqrf
geqrf_kernel(Ah, tau);
// explicitly broadcast the batch dimensions of A
// we do it after geqrf so that we don't do redundant computations for the same input
auto A_expand_batch = expand_batch_portion;
A_expand_batch.insert(A_expand_batch.end(), {Ah.size(-2), Ah.size(-1)});
Tensor Ah_expanded = Ah.expand({A_expand_batch});
bool is_fortran_contiguous = Ah_expanded.transpose(-2, -1).is_contiguous();
Tensor Ah_broadcasted = is_fortran_contiguous ? Ah_expanded : cloneBatchedColumnMajor(Ah_expanded);
// Step 2: R^H Z = B
bool upper = true;
bool transpose = true;
bool conjugate_transpose = true;
bool unitriangular = false;
triangular_solve_kernel(
const_cast<Tensor&>(Ah_broadcasted),
const_cast<Tensor&>(B),
const_cast<Tensor&>(infos),
upper, transpose, conjugate_transpose, unitriangular);
// B matrix has the size max(m, n) x nrhs
// triangular_solve_kernel writes its output into the first m rows of B leaving the rest untouched
// we need to set the rest of the rows to zero so that the multiplication from step 3 is correct
B.narrow(-2, m, n - m).zero_();
auto tau_expand_batch = expand_batch_portion;
tau_expand_batch.push_back(tau.size(-1));
Tensor tau_broadcasted = tau.expand({tau_expand_batch}).contiguous();
// Step 3: X <- Q Z
ormqr_kernel(Ah_broadcasted, tau_broadcasted, B, /*left=*/true, /*transpose=*/false);
}
}
void gels_looped(const Tensor& a, Tensor& b, Tensor& infos) {
#if defined(USE_CUSOLVER)
// linalg_lstsq_gels is a generic function that is implemented using
// geqrf_stub, ormqr_stub, and triangular_solve_stub
// It dispatches to cuSOLVER for CUDA inputs if USE_CUSOLVER is defined
return linalg_lstsq_gels(a, b, infos);
#else
return gels_magma(a, b, infos);
#endif
}
void lstsq_kernel(const Tensor& a, Tensor& b, Tensor& /*rank*/, Tensor& /*singular_values*/, Tensor& infos, double /*rcond*/, std::string /*driver_name*/) {
auto m = a.size(-2);
auto n = a.size(-1);
// first handle the underdetermined case (m < n)
// this case is not supported by MAGMA or cuBLAS
if (m < n) {
#if defined(USE_CUSOLVER)
linalg_lstsq_gels(a, b, infos);
#else
TORCH_CHECK(
false,
"torch.linalg.lstsq: only overdetermined systems (input.size(-2) >= input.size(-1)) are allowed on CUDA. ",
"Please rebuild with cuSOLVER.");
#endif
} else { // m >= n
#ifndef USE_MAGMA
// MAGMA is not available we can either use cuBLAS or cuSOLVER here
// the batched vs looped dispatch is implemented based on the following performance results
// https://github.com/pytorch/pytorch/pull/54725#issuecomment-832234456
if (m <= 256 && batchCount(b) >= std::max<int64_t>(2, m / 16)) {
// if CUDART_VERSION is defined then cuBLAS is available
#ifdef CUDART_VERSION
gels_batched_cublas(a, b, infos);
#else
// this would either call cuSOLVER or MAGMA,
// if MAGMA is called a runtime error is thrown about not finding MAGMA in compilation
gels_looped(a, b, infos);
#endif // CUDART_VERSION
} else {
gels_looped(a, b, infos);
}
#else
// if both MAGMA and cuSOLVER are available this would call cuSOLVER
// MAGMA is called if cuSOLVER is not available
gels_looped(a, b, infos);
#endif // USE_MAGMA
}
}
REGISTER_DISPATCH(lstsq_stub, &lstsq_kernel);
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ legacy_lstsq ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
std::tuple<Tensor, Tensor> legacy_lstsq_cuda(const Tensor &B, const Tensor &A) {
TORCH_WARN_ONCE(
"torch.lstsq is deprecated in favor of torch.linalg.lstsq and will be removed in a future PyTorch release.\n",
"torch.linalg.lstsq has reversed arguments and does not return the QR decomposition in "
"the returned tuple (although it returns other information about the problem).\n",
"To get the qr decomposition consider using torch.linalg.qr.\n",
"The returned solution in torch.lstsq stored the residuals of the solution in the ",
"last m - n columns of the returned value whenever m > n. In torch.linalg.lstsq, the ",
"residuals in the field 'residuals' of the returned named tuple.\n",
"The unpacking of the solution, as in\n",
"X, _ = torch.lstsq(B, A).solution[:A.size(1)]\n",
"should be replaced with\n",
"X = torch.linalg.lstsq(A, B).solution"
);
#ifndef USE_MAGMA
TORCH_CHECK(false, "solve: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
const auto dtype = A.scalar_type();
TORCH_CHECK(B.scalar_type() == dtype, "exepected A and B dtypes to match but found ",
dtype, " and ", B.scalar_type());
TORCH_CHECK(A.numel() > 0 && A.dim() == 2, "A should be (non-empty) 2 dimensional");
TORCH_CHECK(B.numel() > 0 && B.dim() == 2, "B should be (non-empty) 2 dimensional");
auto a_sizes = A.sizes();
auto b_sizes = B.sizes();
TORCH_CHECK(a_sizes[0] == b_sizes[0], "Expected A and b to have same size "
"at dim 0, but A has ", a_sizes[0], " rows and B has ", b_sizes[0], " rows");
TORCH_CHECK(a_sizes[0] >= a_sizes[1], "Expected A with shape (m x n) to have "
"m >= n. The case for m < n is not implemented yet.");
Tensor A_working = cloneBatchedColumnMajor(A);
Tensor B_working = cloneBatchedColumnMajor(B);
int64_t m = a_sizes[0];
int64_t n = a_sizes[1];
int64_t nrhs = b_sizes[1];
int info;
AT_DISPATCH_FLOATING_TYPES(A.scalar_type(), "legacy_lstsq_cuda", [&] {
scalar_t *a_data = A_working.data_ptr<scalar_t>();
scalar_t *b_data = B_working.data_ptr<scalar_t>();
scalar_t wkopt;
magmaGels(MagmaNoTrans, m, n, nrhs, a_data, m, b_data, m, &wkopt, -1, &info);
const auto hwork_size = static_cast<magma_int_t>(wkopt);
scalar_t *hwork = nullptr;
ALLOCATE_ARRAY(hwork, scalar_t, hwork_size);
magmaGels(MagmaNoTrans, m, n, nrhs, a_data, m, b_data, m, hwork, hwork_size, &info);
});
TORCH_CHECK(info == 0, "MAGMA gels : Argument %d : illegal value", -info);
return std::tuple<Tensor, Tensor>(B_working, A_working);
#endif // USE_MAGMA
}
std::tuple<Tensor&, Tensor&> legacy_lstsq_out_cuda(
const Tensor& B, const Tensor& A, Tensor& B_out, Tensor& A_out) {
const auto dtype = A.scalar_type();
TORCH_CHECK(B.scalar_type() == dtype, "exepected A and B dtypes to match but found ",
A.scalar_type(), " and ", B.scalar_type());
TORCH_CHECK(A_out.scalar_type() == dtype, "A_out to have scalar type ", dtype,
" but found", A_out.scalar_type());
TORCH_CHECK(B_out.scalar_type() == dtype, "A_out to have scalar type ", dtype,
" but found", B_out.scalar_type());
Tensor A_tmp, B_tmp;
std::tie(B_tmp, A_tmp) = native::legacy_lstsq_cuda(B, A);
resize_output(A_out, A_tmp.sizes());
A_out.copy_(A_tmp);
resize_output(B_out, B_tmp.sizes());
B_out.copy_(B_tmp);
return std::tuple<Tensor&, Tensor&>(B_out, A_out);
}
}} // namespace at::native
#undef ALLOCATE_ARRAY
|
ca9e94a0fb866f86272e2f9ef813dc745dfe05ed.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <ATen/ATen.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/DeviceGuard.h>
#include <THH/THHAtomics.cuh>
#include <cmath>
#include <vector>
#define WITH_CUDA
using namespace at;
#define CUDA_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); \
i += blockDim.x * gridDim.x)
const int CUDA_NUM_THREADS = 1024;
const int kMaxGridNum = 65535;
inline int GET_BLOCKS(const int N)
{
return ::min(kMaxGridNum, (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS);
}
template <typename scalar_t>
__device__ scalar_t dmcn_im2col_bilinear(const scalar_t *bottom_data, const int data_width,
const int height, const int width, scalar_t h, scalar_t w)
{
int h_low = floor(h);
int w_low = floor(w);
int h_high = h_low + 1;
int w_high = w_low + 1;
scalar_t lh = h - h_low;
scalar_t lw = w - w_low;
scalar_t hh = 1 - lh, hw = 1 - lw;
scalar_t v1 = 0;
if (h_low >= 0 && w_low >= 0)
v1 = bottom_data[h_low * data_width + w_low];
scalar_t v2 = 0;
if (h_low >= 0 && w_high <= width - 1)
v2 = bottom_data[h_low * data_width + w_high];
scalar_t v3 = 0;
if (h_high <= height - 1 && w_low >= 0)
v3 = bottom_data[h_high * data_width + w_low];
scalar_t v4 = 0;
if (h_high <= height - 1 && w_high <= width - 1)
v4 = bottom_data[h_high * data_width + w_high];
scalar_t w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw;
scalar_t val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
return val;
}
template <typename scalar_t>
__global__ void modulated_deformable_im2col_gpu_kernel(const int n,
const scalar_t *data_im, const scalar_t *data_offset, const scalar_t *data_mask,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int channel_per_deformable_group,
const int batch_size, const int num_channels, const int deformable_group,
const int height_col, const int width_col,
scalar_t *data_col)
{
CUDA_KERNEL_LOOP(index, n)
{
// index index of output matrix
const int w_col = index % width_col;
const int h_col = (index / width_col) % height_col;
const int b_col = (index / width_col / height_col) % batch_size;
const int c_im = (index / width_col / height_col) / batch_size;
const int c_col = c_im * kernel_h * kernel_w;
// compute deformable group index
const int deformable_group_index = c_im / channel_per_deformable_group;
const int h_in = h_col * stride_h - pad_h;
const int w_in = w_col * stride_w - pad_w;
scalar_t *data_col_ptr = data_col + ((c_col * batch_size + b_col) * height_col + h_col) * width_col + w_col;
//const float* data_im_ptr = data_im + ((b_col * num_channels + c_im) * height + h_in) * width + w_in;
const scalar_t *data_im_ptr = data_im + (b_col * num_channels + c_im) * height * width;
const scalar_t *data_offset_ptr = data_offset + (b_col * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col;
const scalar_t *data_mask_ptr = data_mask + (b_col * deformable_group + deformable_group_index) * kernel_h * kernel_w * height_col * width_col;
for (int i = 0; i < kernel_h; ++i)
{
for (int j = 0; j < kernel_w; ++j)
{
const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_col) * width_col + w_col;
const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_col) * width_col + w_col;
const int data_mask_hw_ptr = ((i * kernel_w + j) * height_col + h_col) * width_col + w_col;
const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr];
const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr];
const scalar_t mask = data_mask_ptr[data_mask_hw_ptr];
scalar_t val = static_cast<scalar_t>(0);
const scalar_t h_im = h_in + i * dilation_h + offset_h;
const scalar_t w_im = w_in + j * dilation_w + offset_w;
//if (h_im >= 0 && w_im >= 0 && h_im < height && w_im < width) {
if (h_im > -1 && w_im > -1 && h_im < height && w_im < width)
{
//const float map_h = i * dilation_h + offset_h;
//const float map_w = j * dilation_w + offset_w;
//const int cur_height = height - h_in;
//const int cur_width = width - w_in;
//val = dmcn_im2col_bilinear(data_im_ptr, width, cur_height, cur_width, map_h, map_w);
val = dmcn_im2col_bilinear(data_im_ptr, width, height, width, h_im, w_im);
}
*data_col_ptr = val * mask;
data_col_ptr += batch_size * height_col * width_col;
//data_col_ptr += height_col * width_col;
}
}
}
}
void modulated_deformable_im2col_cuda(
const at::Tensor data_im, const at::Tensor data_offset, const at::Tensor data_mask,
const int batch_size, const int channels, const int height_im, const int width_im,
const int height_col, const int width_col, const int kernel_h, const int kenerl_w,
const int pad_h, const int pad_w, const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int deformable_group, at::Tensor data_col)
{
// num_axes should be smaller than block size
const int channel_per_deformable_group = channels / deformable_group;
const int num_kernels = channels * batch_size * height_col * width_col;
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
data_im.scalar_type(), "modulated_deformable_im2col_gpu", ([&] {
const scalar_t *data_im_ = data_im.data_ptr<scalar_t>();
const scalar_t *data_offset_ = data_offset.data_ptr<scalar_t>();
const scalar_t *data_mask_ = data_mask.data_ptr<scalar_t>();
scalar_t *data_col_ = data_col.data_ptr<scalar_t>();
hipLaunchKernelGGL(( modulated_deformable_im2col_gpu_kernel), dim3(GET_BLOCKS(num_kernels)), dim3(CUDA_NUM_THREADS), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
num_kernels, data_im_, data_offset_, data_mask_, height_im, width_im, kernel_h, kenerl_w,
pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, channel_per_deformable_group,
batch_size, channels, deformable_group, height_col, width_col, data_col_);
}));
hipError_t err = hipGetLastError();
if (err != hipSuccess)
{
printf("error in modulated_deformable_im2col_cuda: %s\n", hipGetErrorString(err));
}
}
void modulated_deform_conv_cuda_forward(
at::Tensor input, at::Tensor weight, at::Tensor bias, at::Tensor ones,
at::Tensor offset, at::Tensor mask, at::Tensor output, at::Tensor columns,
int kernel_h, int kernel_w, const int stride_h, const int stride_w,
const int pad_h, const int pad_w, const int dilation_h,
const int dilation_w, const int group, const int deformable_group,
const bool with_bias)
{
TORCH_CHECK(input.is_contiguous(), "input tensor has to be contiguous");
TORCH_CHECK(weight.is_contiguous(), "weight tensor has to be contiguous");
at::DeviceGuard guard(input.device());
const int batch = input.size(0);
const int channels = input.size(1);
const int height = input.size(2);
const int width = input.size(3);
const int channels_out = weight.size(0);
const int channels_kernel = weight.size(1);
const int kernel_h_ = weight.size(2);
const int kernel_w_ = weight.size(3);
if (kernel_h_ != kernel_h || kernel_w_ != kernel_w)
AT_ERROR("Input shape and kernel shape wont match: (%d x %d vs %d x %d).",
kernel_h_, kernel_w, kernel_h_, kernel_w_);
if (channels != channels_kernel * group)
AT_ERROR("Input shape and kernel channels wont match: (%d vs %d).",
channels, channels_kernel * group);
const int height_out =
(height + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) / stride_h + 1;
const int width_out =
(width + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) / stride_w + 1;
// if (ones.ndimension() != 2 ||
// ones.size(0) * ones.size(1) < height_out * width_out)
// {
// Resize plane and fill with ones...
ones = at::ones({height_out, width_out}, input.options());
// }
// resize output
output = output.view({batch, channels_out, height_out, width_out}).zero_();
// resize temporary columns
columns =
at::zeros({channels * kernel_h * kernel_w, 1 * height_out * width_out},
input.options());
output = output.view({output.size(0), group, output.size(1) / group,
output.size(2), output.size(3)});
for (int b = 0; b < batch; b++)
{
modulated_deformable_im2col_cuda(
input[b], offset[b], mask[b], 1, channels, height, width, height_out,
width_out, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w,
dilation_h, dilation_w, deformable_group, columns);
// divide into group
weight = weight.view({group, weight.size(0) / group, weight.size(1),
weight.size(2), weight.size(3)});
columns = columns.view({group, columns.size(0) / group, columns.size(1)});
for (int g = 0; g < group; g++)
{
output[b][g] = output[b][g]
.flatten(1)
.addmm_(weight[g].flatten(1), columns[g])
.view_as(output[b][g]);
}
weight = weight.view({weight.size(0) * weight.size(1), weight.size(2),
weight.size(3), weight.size(4)});
columns =
columns.view({columns.size(0) * columns.size(1), columns.size(2)});
}
output = output.view({output.size(0), output.size(1) * output.size(2),
output.size(3), output.size(4)});
if (with_bias)
{
output += bias.view({1, bias.size(0), 1, 1});
}
}
void modulated_deform_conv_forward(
at::Tensor input, at::Tensor weight, at::Tensor bias, at::Tensor ones,
at::Tensor offset, at::Tensor mask, at::Tensor output, at::Tensor columns,
int kernel_h, int kernel_w, const int stride_h, const int stride_w,
const int pad_h, const int pad_w, const int dilation_h,
const int dilation_w, const int group, const int deformable_group,
const bool with_bias)
{
if (input.device().is_cuda())
{
#ifdef WITH_CUDA
return modulated_deform_conv_cuda_forward(input, weight, bias, ones,
offset, mask, output, columns, kernel_h, kernel_w, stride_h,
stride_w, pad_h, pad_w, dilation_h, dilation_w, group,
deformable_group, with_bias);
#else
AT_ERROR("modulated deform conv is not compiled with GPU support");
#endif
}
AT_ERROR("modulated deform conv is not implemented on CPU");
}
//nvcc -D_GLIBCXX_USE_CXX11_ABI=0 -D__CUDA_NO_HALF_OPERATORS__ -D__CUDA_NO_HALF_CONVERSIONS__ -D__CUDA_NO_HALF2_OPERATORS__ -isystem /usr/include/python3.6m/ -isystem /usr/local/lib/python3.6/dist-packages/torch/include -isystem /usr/local/lib/python3.6/dist-packages/torch/include/torch/csrc/api/include -L /usr/local/lib/python3.6/dist-packages/torch/lib/ -lc10 -ltorch -ltorch_cpu -ltorch_python -lcudart -lc10_cuda -ltorch_cuda deform_conv.cu
| ca9e94a0fb866f86272e2f9ef813dc745dfe05ed.cu | #include <ATen/ATen.h>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/DeviceGuard.h>
#include <THC/THCAtomics.cuh>
#include <cmath>
#include <vector>
#define WITH_CUDA
using namespace at;
#define CUDA_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); \
i += blockDim.x * gridDim.x)
const int CUDA_NUM_THREADS = 1024;
const int kMaxGridNum = 65535;
inline int GET_BLOCKS(const int N)
{
return std::min(kMaxGridNum, (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS);
}
template <typename scalar_t>
__device__ scalar_t dmcn_im2col_bilinear(const scalar_t *bottom_data, const int data_width,
const int height, const int width, scalar_t h, scalar_t w)
{
int h_low = floor(h);
int w_low = floor(w);
int h_high = h_low + 1;
int w_high = w_low + 1;
scalar_t lh = h - h_low;
scalar_t lw = w - w_low;
scalar_t hh = 1 - lh, hw = 1 - lw;
scalar_t v1 = 0;
if (h_low >= 0 && w_low >= 0)
v1 = bottom_data[h_low * data_width + w_low];
scalar_t v2 = 0;
if (h_low >= 0 && w_high <= width - 1)
v2 = bottom_data[h_low * data_width + w_high];
scalar_t v3 = 0;
if (h_high <= height - 1 && w_low >= 0)
v3 = bottom_data[h_high * data_width + w_low];
scalar_t v4 = 0;
if (h_high <= height - 1 && w_high <= width - 1)
v4 = bottom_data[h_high * data_width + w_high];
scalar_t w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw;
scalar_t val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
return val;
}
template <typename scalar_t>
__global__ void modulated_deformable_im2col_gpu_kernel(const int n,
const scalar_t *data_im, const scalar_t *data_offset, const scalar_t *data_mask,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int channel_per_deformable_group,
const int batch_size, const int num_channels, const int deformable_group,
const int height_col, const int width_col,
scalar_t *data_col)
{
CUDA_KERNEL_LOOP(index, n)
{
// index index of output matrix
const int w_col = index % width_col;
const int h_col = (index / width_col) % height_col;
const int b_col = (index / width_col / height_col) % batch_size;
const int c_im = (index / width_col / height_col) / batch_size;
const int c_col = c_im * kernel_h * kernel_w;
// compute deformable group index
const int deformable_group_index = c_im / channel_per_deformable_group;
const int h_in = h_col * stride_h - pad_h;
const int w_in = w_col * stride_w - pad_w;
scalar_t *data_col_ptr = data_col + ((c_col * batch_size + b_col) * height_col + h_col) * width_col + w_col;
//const float* data_im_ptr = data_im + ((b_col * num_channels + c_im) * height + h_in) * width + w_in;
const scalar_t *data_im_ptr = data_im + (b_col * num_channels + c_im) * height * width;
const scalar_t *data_offset_ptr = data_offset + (b_col * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col;
const scalar_t *data_mask_ptr = data_mask + (b_col * deformable_group + deformable_group_index) * kernel_h * kernel_w * height_col * width_col;
for (int i = 0; i < kernel_h; ++i)
{
for (int j = 0; j < kernel_w; ++j)
{
const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_col) * width_col + w_col;
const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_col) * width_col + w_col;
const int data_mask_hw_ptr = ((i * kernel_w + j) * height_col + h_col) * width_col + w_col;
const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr];
const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr];
const scalar_t mask = data_mask_ptr[data_mask_hw_ptr];
scalar_t val = static_cast<scalar_t>(0);
const scalar_t h_im = h_in + i * dilation_h + offset_h;
const scalar_t w_im = w_in + j * dilation_w + offset_w;
//if (h_im >= 0 && w_im >= 0 && h_im < height && w_im < width) {
if (h_im > -1 && w_im > -1 && h_im < height && w_im < width)
{
//const float map_h = i * dilation_h + offset_h;
//const float map_w = j * dilation_w + offset_w;
//const int cur_height = height - h_in;
//const int cur_width = width - w_in;
//val = dmcn_im2col_bilinear(data_im_ptr, width, cur_height, cur_width, map_h, map_w);
val = dmcn_im2col_bilinear(data_im_ptr, width, height, width, h_im, w_im);
}
*data_col_ptr = val * mask;
data_col_ptr += batch_size * height_col * width_col;
//data_col_ptr += height_col * width_col;
}
}
}
}
void modulated_deformable_im2col_cuda(
const at::Tensor data_im, const at::Tensor data_offset, const at::Tensor data_mask,
const int batch_size, const int channels, const int height_im, const int width_im,
const int height_col, const int width_col, const int kernel_h, const int kenerl_w,
const int pad_h, const int pad_w, const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int deformable_group, at::Tensor data_col)
{
// num_axes should be smaller than block size
const int channel_per_deformable_group = channels / deformable_group;
const int num_kernels = channels * batch_size * height_col * width_col;
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
data_im.scalar_type(), "modulated_deformable_im2col_gpu", ([&] {
const scalar_t *data_im_ = data_im.data_ptr<scalar_t>();
const scalar_t *data_offset_ = data_offset.data_ptr<scalar_t>();
const scalar_t *data_mask_ = data_mask.data_ptr<scalar_t>();
scalar_t *data_col_ = data_col.data_ptr<scalar_t>();
modulated_deformable_im2col_gpu_kernel<<<GET_BLOCKS(num_kernels), CUDA_NUM_THREADS, 0, at::cuda::getCurrentCUDAStream()>>>(
num_kernels, data_im_, data_offset_, data_mask_, height_im, width_im, kernel_h, kenerl_w,
pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, channel_per_deformable_group,
batch_size, channels, deformable_group, height_col, width_col, data_col_);
}));
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess)
{
printf("error in modulated_deformable_im2col_cuda: %s\n", cudaGetErrorString(err));
}
}
void modulated_deform_conv_cuda_forward(
at::Tensor input, at::Tensor weight, at::Tensor bias, at::Tensor ones,
at::Tensor offset, at::Tensor mask, at::Tensor output, at::Tensor columns,
int kernel_h, int kernel_w, const int stride_h, const int stride_w,
const int pad_h, const int pad_w, const int dilation_h,
const int dilation_w, const int group, const int deformable_group,
const bool with_bias)
{
TORCH_CHECK(input.is_contiguous(), "input tensor has to be contiguous");
TORCH_CHECK(weight.is_contiguous(), "weight tensor has to be contiguous");
at::DeviceGuard guard(input.device());
const int batch = input.size(0);
const int channels = input.size(1);
const int height = input.size(2);
const int width = input.size(3);
const int channels_out = weight.size(0);
const int channels_kernel = weight.size(1);
const int kernel_h_ = weight.size(2);
const int kernel_w_ = weight.size(3);
if (kernel_h_ != kernel_h || kernel_w_ != kernel_w)
AT_ERROR("Input shape and kernel shape wont match: (%d x %d vs %d x %d).",
kernel_h_, kernel_w, kernel_h_, kernel_w_);
if (channels != channels_kernel * group)
AT_ERROR("Input shape and kernel channels wont match: (%d vs %d).",
channels, channels_kernel * group);
const int height_out =
(height + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) / stride_h + 1;
const int width_out =
(width + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) / stride_w + 1;
// if (ones.ndimension() != 2 ||
// ones.size(0) * ones.size(1) < height_out * width_out)
// {
// Resize plane and fill with ones...
ones = at::ones({height_out, width_out}, input.options());
// }
// resize output
output = output.view({batch, channels_out, height_out, width_out}).zero_();
// resize temporary columns
columns =
at::zeros({channels * kernel_h * kernel_w, 1 * height_out * width_out},
input.options());
output = output.view({output.size(0), group, output.size(1) / group,
output.size(2), output.size(3)});
for (int b = 0; b < batch; b++)
{
modulated_deformable_im2col_cuda(
input[b], offset[b], mask[b], 1, channels, height, width, height_out,
width_out, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w,
dilation_h, dilation_w, deformable_group, columns);
// divide into group
weight = weight.view({group, weight.size(0) / group, weight.size(1),
weight.size(2), weight.size(3)});
columns = columns.view({group, columns.size(0) / group, columns.size(1)});
for (int g = 0; g < group; g++)
{
output[b][g] = output[b][g]
.flatten(1)
.addmm_(weight[g].flatten(1), columns[g])
.view_as(output[b][g]);
}
weight = weight.view({weight.size(0) * weight.size(1), weight.size(2),
weight.size(3), weight.size(4)});
columns =
columns.view({columns.size(0) * columns.size(1), columns.size(2)});
}
output = output.view({output.size(0), output.size(1) * output.size(2),
output.size(3), output.size(4)});
if (with_bias)
{
output += bias.view({1, bias.size(0), 1, 1});
}
}
void modulated_deform_conv_forward(
at::Tensor input, at::Tensor weight, at::Tensor bias, at::Tensor ones,
at::Tensor offset, at::Tensor mask, at::Tensor output, at::Tensor columns,
int kernel_h, int kernel_w, const int stride_h, const int stride_w,
const int pad_h, const int pad_w, const int dilation_h,
const int dilation_w, const int group, const int deformable_group,
const bool with_bias)
{
if (input.device().is_cuda())
{
#ifdef WITH_CUDA
return modulated_deform_conv_cuda_forward(input, weight, bias, ones,
offset, mask, output, columns, kernel_h, kernel_w, stride_h,
stride_w, pad_h, pad_w, dilation_h, dilation_w, group,
deformable_group, with_bias);
#else
AT_ERROR("modulated deform conv is not compiled with GPU support");
#endif
}
AT_ERROR("modulated deform conv is not implemented on CPU");
}
//nvcc -D_GLIBCXX_USE_CXX11_ABI=0 -D__CUDA_NO_HALF_OPERATORS__ -D__CUDA_NO_HALF_CONVERSIONS__ -D__CUDA_NO_HALF2_OPERATORS__ -isystem /usr/include/python3.6m/ -isystem /usr/local/lib/python3.6/dist-packages/torch/include -isystem /usr/local/lib/python3.6/dist-packages/torch/include/torch/csrc/api/include -L /usr/local/lib/python3.6/dist-packages/torch/lib/ -lc10 -ltorch -ltorch_cpu -ltorch_python -lcudart -lc10_cuda -ltorch_cuda deform_conv.cu
|
20bb7ea2dc37031e6cbcd86ea42dcc5eeb798583.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**********************************************************************
* DESCRIPTION:
* Serial Concurrent Wave Equation - C Version
* This program implements the concurrent wave equation
*********************************************************************/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
#define MAXPOINTS 1000000
#define MAXSTEPS 1000000
#define MINPOINTS 20
#define PI 3.14159265
void check_param(void);
void init_line(void);
void update (void);
void printfinal (void);
int nsteps, /* number of time steps */
tpoints, /* total points along string */
rcode; /* generic return code */
float values[MAXPOINTS+2], /* values at time t */
oldval[MAXPOINTS+2], /* values at time (t-dt) */
newval[MAXPOINTS+2]; /* values at time (t+dt) */
/**********************************************************************
* Checks input values from parameters
*********************************************************************/
void check_param(void)
{
char tchar[20];
/* check number of points, number of iterations */
while ((tpoints < MINPOINTS) || (tpoints > MAXPOINTS)) {
printf("Enter number of points along vibrating string [%d-%d]: "
,MINPOINTS, MAXPOINTS);
scanf("%s", tchar);
tpoints = atoi(tchar);
if ((tpoints < MINPOINTS) || (tpoints > MAXPOINTS))
printf("Invalid. Please enter value between %d and %d\n",
MINPOINTS, MAXPOINTS);
}
while ((nsteps < 1) || (nsteps > MAXSTEPS)) {
printf("Enter number of time steps [1-%d]: ", MAXSTEPS);
scanf("%s", tchar);
nsteps = atoi(tchar);
if ((nsteps < 1) || (nsteps > MAXSTEPS))
printf("Invalid. Please enter value between 1 and %d\n", MAXSTEPS);
}
printf("Using points = %d, steps = %d\n", tpoints, nsteps);
}
/**********************************************************************
* Initialize points on line
*********************************************************************/
void init_line(void)
{
int i, j;
float x, fac, k, tmp;
/* Calculate initial values based on sine curve */
fac = 2.0 * PI;
k = 0.0;
tmp = tpoints - 1;
for (j = 1; j <= tpoints; j++) {
x = k/tmp;
values[j] = sin (fac * x);
k = k + 1.0;
}
/* Initialize old values array */
for (i = 1; i <= tpoints; i++)
oldval[i] = values[i];
}
/**********************************************************************
* Calculate new values using wave equation
*********************************************************************/
void do_math(int i)
{
float dtime, c, dx, tau, sqtau;
dtime = 0.3;
c = 1.0;
dx = 1.0;
tau = (c * dtime / dx);
sqtau = tau * tau;
newval[i] = (2.0 * values[i]) - oldval[i] + (sqtau * (-2.0)*values[i]);
}
/**********************************************************************
* Update all values along line a specified number of times
*********************************************************************/
__device__ inline unsigned global_thread_id() {
/* Get global thread idx */
return blockIdx.x * blockDim.x + threadIdx.x;
}
__global__ void update_parallel(float *t_values, int nsteps, int tpoints) {
float l_value, lo_value, ln_value;
unsigned idx = global_thread_id();
/* Initailize */
lo_value = l_value = sin((2.0 * PI) * ((float)idx / (float)(tpoints - 1)));
for (int i = 0; i < nsteps; ++i) {
/* Calculate Math */
ln_value = 1.82 * l_value - lo_value;
lo_value = l_value;
l_value = ln_value;
}
if (idx == 0 || idx == tpoints - 1) {
t_values[idx] = 0;
} else if (idx < tpoints - 1 && idx > 0) {
t_values[idx] = l_value;
}
}
void update()
{
int i, j;
/* Update values for each time step */
for (i = 1; i<= nsteps; i++) {
/* Update points along line for this time step */
for (j = 1; j <= tpoints; j++) {
/* global endpoints */
if ((j == 1) || (j == tpoints))
newval[j] = 0.0;
else
do_math(j);
}
/* Update old values with new values */
for (j = 1; j <= tpoints; j++) {
oldval[j] = values[j];
values[j] = newval[j];
}
}
}
/**********************************************************************
* Print final results
*********************************************************************/
void printfinal()
{
int i;
for (i = 0; i < tpoints; i++) {
printf("%6.4f ", values[i]);
if (i % 10 == 9)
printf("\n");
}
}
/**********************************************************************
* Main program
*********************************************************************/
int main(int argc, char *argv[])
{
sscanf(argv[1],"%d",&tpoints);
sscanf(argv[2],"%d",&nsteps);
check_param();
float *t_values;
hipMalloc(&t_values, sizeof(values));
printf("Initializing points on the line...\n");
//init_line();
printf("Updating all points for all time steps...\n");
//update();
hipLaunchKernelGGL(( update_parallel), dim3(((tpoints + 1023) >> 10)), dim3(1024), 0, 0, t_values, nsteps, tpoints);
hipMemcpy(values, t_values, sizeof(values), hipMemcpyDeviceToHost);
printf("Printing final results...\n");
printfinal();
printf("\nDone.\n\n");
return 0;
}
| 20bb7ea2dc37031e6cbcd86ea42dcc5eeb798583.cu | /**********************************************************************
* DESCRIPTION:
* Serial Concurrent Wave Equation - C Version
* This program implements the concurrent wave equation
*********************************************************************/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
#define MAXPOINTS 1000000
#define MAXSTEPS 1000000
#define MINPOINTS 20
#define PI 3.14159265
void check_param(void);
void init_line(void);
void update (void);
void printfinal (void);
int nsteps, /* number of time steps */
tpoints, /* total points along string */
rcode; /* generic return code */
float values[MAXPOINTS+2], /* values at time t */
oldval[MAXPOINTS+2], /* values at time (t-dt) */
newval[MAXPOINTS+2]; /* values at time (t+dt) */
/**********************************************************************
* Checks input values from parameters
*********************************************************************/
void check_param(void)
{
char tchar[20];
/* check number of points, number of iterations */
while ((tpoints < MINPOINTS) || (tpoints > MAXPOINTS)) {
printf("Enter number of points along vibrating string [%d-%d]: "
,MINPOINTS, MAXPOINTS);
scanf("%s", tchar);
tpoints = atoi(tchar);
if ((tpoints < MINPOINTS) || (tpoints > MAXPOINTS))
printf("Invalid. Please enter value between %d and %d\n",
MINPOINTS, MAXPOINTS);
}
while ((nsteps < 1) || (nsteps > MAXSTEPS)) {
printf("Enter number of time steps [1-%d]: ", MAXSTEPS);
scanf("%s", tchar);
nsteps = atoi(tchar);
if ((nsteps < 1) || (nsteps > MAXSTEPS))
printf("Invalid. Please enter value between 1 and %d\n", MAXSTEPS);
}
printf("Using points = %d, steps = %d\n", tpoints, nsteps);
}
/**********************************************************************
* Initialize points on line
*********************************************************************/
void init_line(void)
{
int i, j;
float x, fac, k, tmp;
/* Calculate initial values based on sine curve */
fac = 2.0 * PI;
k = 0.0;
tmp = tpoints - 1;
for (j = 1; j <= tpoints; j++) {
x = k/tmp;
values[j] = sin (fac * x);
k = k + 1.0;
}
/* Initialize old values array */
for (i = 1; i <= tpoints; i++)
oldval[i] = values[i];
}
/**********************************************************************
* Calculate new values using wave equation
*********************************************************************/
void do_math(int i)
{
float dtime, c, dx, tau, sqtau;
dtime = 0.3;
c = 1.0;
dx = 1.0;
tau = (c * dtime / dx);
sqtau = tau * tau;
newval[i] = (2.0 * values[i]) - oldval[i] + (sqtau * (-2.0)*values[i]);
}
/**********************************************************************
* Update all values along line a specified number of times
*********************************************************************/
__device__ inline unsigned global_thread_id() {
/* Get global thread idx */
return blockIdx.x * blockDim.x + threadIdx.x;
}
__global__ void update_parallel(float *t_values, int nsteps, int tpoints) {
float l_value, lo_value, ln_value;
unsigned idx = global_thread_id();
/* Initailize */
lo_value = l_value = sin((2.0 * PI) * ((float)idx / (float)(tpoints - 1)));
for (int i = 0; i < nsteps; ++i) {
/* Calculate Math */
ln_value = 1.82 * l_value - lo_value;
lo_value = l_value;
l_value = ln_value;
}
if (idx == 0 || idx == tpoints - 1) {
t_values[idx] = 0;
} else if (idx < tpoints - 1 && idx > 0) {
t_values[idx] = l_value;
}
}
void update()
{
int i, j;
/* Update values for each time step */
for (i = 1; i<= nsteps; i++) {
/* Update points along line for this time step */
for (j = 1; j <= tpoints; j++) {
/* global endpoints */
if ((j == 1) || (j == tpoints))
newval[j] = 0.0;
else
do_math(j);
}
/* Update old values with new values */
for (j = 1; j <= tpoints; j++) {
oldval[j] = values[j];
values[j] = newval[j];
}
}
}
/**********************************************************************
* Print final results
*********************************************************************/
void printfinal()
{
int i;
for (i = 0; i < tpoints; i++) {
printf("%6.4f ", values[i]);
if (i % 10 == 9)
printf("\n");
}
}
/**********************************************************************
* Main program
*********************************************************************/
int main(int argc, char *argv[])
{
sscanf(argv[1],"%d",&tpoints);
sscanf(argv[2],"%d",&nsteps);
check_param();
float *t_values;
cudaMalloc(&t_values, sizeof(values));
printf("Initializing points on the line...\n");
//init_line();
printf("Updating all points for all time steps...\n");
//update();
update_parallel<<<((tpoints + 1023) >> 10), 1024>>>(t_values, nsteps, tpoints);
cudaMemcpy(values, t_values, sizeof(values), cudaMemcpyDeviceToHost);
printf("Printing final results...\n");
printfinal();
printf("\nDone.\n\n");
return 0;
}
|
28e1a45670eeaa4ed846014ee2a106d987712d4d.hip | // !!! This is a file automatically generated by hipify!!!
#include "ATen/Dispatch.h"
#include "ATen/ExpandUtils.h"
#include "ATen/NativeFunctions.h"
#include "ATen/hip/HIPApplyUtils.cuh"
#include "ATen/AccumulateType.h"
#include <hiprand/hiprand.h>
#include <hiprand/hiprand_kernel.h>
#include <hiprand/hiprand_kernel.h>
#include <utility>
#include <functional>
#include <nvfunctional>
#include "ATen/native/Distributions.h"
#include <THH/THHGeneral.h>
#include <THH/THHTensorRandom.h>
#include <THH/THHGenerator.hpp>
#include <THH/THHApply.cuh>
#include <THH/THHDeviceUtils.cuh>
#include <cstdint>
#include <limits>
#include <utility>
#include <type_traits>
THCGenerator* THCRandom_getGenerator(THCState* state);
namespace {
// increment should be at least the number of hiprand() random numbers used in
// each thread.
std::pair<uint64_t, uint64_t> next_philox_seed(at::Generator* gen, uint64_t increment) {
auto gen_ = THCRandom_getGenerator(at::globalContext().getTHCState());
uint64_t offset = gen_->state.philox_seed_offset.fetch_add(increment);
return std::make_pair(gen_->state.initial_seed, offset);
}
template <typename scalar_t>
void poisson_cuda_kernel(
at::Tensor& ret,
const at::Tensor& lambda,
std::pair<uint64_t, uint64_t> seeds) {
at::cuda::CUDA_tensor_apply2<scalar_t, scalar_t>(
ret,
lambda,
[seeds] __device__(
scalar_t & ret_val, const scalar_t& lambda) {
hiprandStatePhilox4_32_10_t state;
hiprand_init(
seeds.first,
blockIdx.x * blockDim.x + threadIdx.x,
seeds.second,
&state);
ret_val = static_cast<scalar_t>(hiprand_poisson(&state, lambda));
});
}
template <typename scalar_t>
void gamma_cuda_kernel(
at::Tensor& ret,
const at::Tensor& alpha,
std::pair<uint64_t, uint64_t> seeds) {
using accscalar_t = at::acc_type<scalar_t, true>;
at::cuda::CUDA_tensor_apply2<scalar_t, scalar_t>(
ret,
alpha,
[seeds] __device__(
scalar_t & ret_val, const scalar_t& alpha) {
hiprandStatePhilox4_32_10_t state;
hiprand_init(
seeds.first,
blockIdx.x * blockDim.x + threadIdx.x,
seeds.second,
&state);
BaseSampler<accscalar_t> standard_uniform([&state] __device__ () {
return hiprand_uniform(&state);
});
BaseSampler<accscalar_t> standard_normal([&state] __device__ () {
return hiprand_normal(&state);
});
auto sample = sample_gamma<scalar_t, accscalar_t>(alpha, standard_uniform, standard_normal);
auto min_value = std::numeric_limits<scalar_t>::lowest();
ret_val = (min_value > sample) ? min_value : sample;
});
}
template <typename scalar_t>
void gamma_grad_cuda_kernel(
at::Tensor& ret,
const at::Tensor& self,
const at::Tensor& output) {
using accscalar_t = at::acc_type<scalar_t, true>;
at::cuda::CUDA_tensor_apply3<scalar_t, scalar_t, scalar_t>(
ret, self, output,
[] __device__ (scalar_t& ret_val, const scalar_t& self_val, const scalar_t &output_val) {
ret_val = standard_gamma_grad_one<scalar_t, accscalar_t>(self_val, output_val);
});
}
template<typename scalar_t, typename prob_t>
void bernoulli_tensor_cuda_kernel(
at::Tensor& ret, const at::Tensor& p,
std::pair<uint64_t, uint64_t> seeds) {
// The template argument `4` below indicates that we want to operate on four
// element at each time. See NOTE [ CUDA_tensor_applyN helpers ] for details.
at::cuda::CUDA_tensor_apply2<scalar_t, prob_t, 4>(
ret, p,
[seeds] __device__(
int n, scalar_t& v1, scalar_t& v2, scalar_t& v3, scalar_t& v4,
const prob_t& p1, const prob_t& p2, const prob_t& p3, const prob_t& p4) {
hiprandStatePhilox4_32_10_t state;
hiprand_init(
seeds.first,
blockIdx.x * blockDim.x + threadIdx.x,
seeds.second,
&state);
float4 rand = hiprand_uniform4(&state);
switch (n) {
case 4: {
assert(0 <= p4 && p4 <= 1);
v4 = static_cast<scalar_t>(rand.w <= p4);
// fallthrough
}
case 3: {
assert(0 <= p3 && p3 <= 1);
v3 = static_cast<scalar_t>(rand.z <= p3);
// fallthrough
}
case 2: {
assert(0 <= p2 && p2 <= 1);
v2 = static_cast<scalar_t>(rand.y <= p2);
// fallthrough
}
case 1: {
assert(0 <= p1 && p1 <= 1);
v1 = static_cast<scalar_t>(rand.x <= p1);
}
}
}
);
}
template<typename scalar_t>
void bernoulli_scalar_cuda_kernel(
at::Tensor& ret, double p_,
std::pair<uint64_t, uint64_t> seeds) {
float p = static_cast<float>(p_);
// The template argument `4` below indicates that we want to operate on four
// element at each time. See NOTE [ CUDA_tensor_applyN helpers ] for details.
at::cuda::CUDA_tensor_apply1<scalar_t, 4>(
ret, [seeds, p] __device__(
int n, scalar_t& v1, scalar_t& v2, scalar_t& v3, scalar_t& v4) {
hiprandStatePhilox4_32_10_t state;
hiprand_init(
seeds.first,
blockIdx.x * blockDim.x + threadIdx.x,
seeds.second,
&state);
float4 rand = hiprand_uniform4(&state);
switch (n) {
case 4: {
v4 = static_cast<scalar_t>(rand.w <= p);
// fallthrough
}
case 3: {
v3 = static_cast<scalar_t>(rand.z <= p);
// fallthrough
}
case 2: {
v2 = static_cast<scalar_t>(rand.y <= p);
// fallthrough
}
case 1: {
v1 = static_cast<scalar_t>(rand.x <= p);
}
}
}
);
}
} // namespace
namespace at { namespace native {
Tensor _s_poisson_cuda(const Tensor& lambda, Generator* gen) {
Tensor ret = at::empty(lambda.sizes(), lambda.options());
AT_DISPATCH_FLOATING_TYPES_AND_HALF(ret.type(), "poisson", [&] {
poisson_cuda_kernel<scalar_t>(ret, lambda, next_philox_seed(gen, 20));
});
return ret;
}
Tensor _s_gamma_cuda(const Tensor& alpha, Generator* gen) {
Tensor ret = at::empty(alpha.sizes(), alpha.options());
AT_DISPATCH_FLOATING_TYPES_AND_HALF(ret.type(), "gamma", [&] {
gamma_cuda_kernel<scalar_t>(ret, alpha, next_philox_seed(gen, 10));
});
return ret;
}
Tensor _standard_gamma_grad_cuda(const Tensor& self, const Tensor& output) {
Tensor ret = at::empty(self.sizes(), self.options());
AT_DISPATCH_FLOATING_TYPES_AND_HALF(self.type(), "_standard_gamma_grad", [&] {
gamma_grad_cuda_kernel<scalar_t>(ret, self, output);
});
return ret;
}
Tensor& bernoulli_tensor_cuda_(Tensor &self, const Tensor& p_, Generator* gen) {
auto p = std::get<0>(expand_inplace(self, p_.to(kCUDA)));
AT_DISPATCH_ALL_TYPES_AND_HALF(self.type(), "bernoulli_tensor_cuda_self_", [&] {
const at::Type& p_type = p.type();
using self_t = scalar_t;
auto seeds = next_philox_seed(gen, 10);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(p.type(), "bernoulli_tensor_cuda_p_", [&] {
using p_t = scalar_t;
return bernoulli_tensor_cuda_kernel<self_t, p_t>(self, p, seeds);
});
});
return self;
}
Tensor& bernoulli_scalar_cuda_(Tensor &self, double p, Generator* gen) {
AT_CHECK(0 <= p && p <= 1, "bernoulli_ expects p to be in [0, 1], but got p=", p);
AT_DISPATCH_ALL_TYPES(self.type(), "bernoulli_scalar_cuda_", [&] {
auto seeds = next_philox_seed(gen, 10);
bernoulli_scalar_cuda_kernel<scalar_t>(self, p, seeds);
});
return self;
}
}} // namespace at::native
| 28e1a45670eeaa4ed846014ee2a106d987712d4d.cu | #include "ATen/Dispatch.h"
#include "ATen/ExpandUtils.h"
#include "ATen/NativeFunctions.h"
#include "ATen/cuda/CUDAApplyUtils.cuh"
#include "ATen/AccumulateType.h"
#include <curand.h>
#include <curand_kernel.h>
#include <curand_philox4x32_x.h>
#include <utility>
#include <functional>
#include <nvfunctional>
#include "ATen/native/Distributions.h"
#include <THC/THCGeneral.h>
#include <THC/THCTensorRandom.h>
#include <THC/THCGenerator.hpp>
#include <THC/THCApply.cuh>
#include <THC/THCDeviceUtils.cuh>
#include <cstdint>
#include <limits>
#include <utility>
#include <type_traits>
THCGenerator* THCRandom_getGenerator(THCState* state);
namespace {
// increment should be at least the number of curand() random numbers used in
// each thread.
std::pair<uint64_t, uint64_t> next_philox_seed(at::Generator* gen, uint64_t increment) {
auto gen_ = THCRandom_getGenerator(at::globalContext().getTHCState());
uint64_t offset = gen_->state.philox_seed_offset.fetch_add(increment);
return std::make_pair(gen_->state.initial_seed, offset);
}
template <typename scalar_t>
void poisson_cuda_kernel(
at::Tensor& ret,
const at::Tensor& lambda,
std::pair<uint64_t, uint64_t> seeds) {
at::cuda::CUDA_tensor_apply2<scalar_t, scalar_t>(
ret,
lambda,
[seeds] __device__(
scalar_t & ret_val, const scalar_t& lambda) {
curandStatePhilox4_32_10_t state;
curand_init(
seeds.first,
blockIdx.x * blockDim.x + threadIdx.x,
seeds.second,
&state);
ret_val = static_cast<scalar_t>(curand_poisson(&state, lambda));
});
}
template <typename scalar_t>
void gamma_cuda_kernel(
at::Tensor& ret,
const at::Tensor& alpha,
std::pair<uint64_t, uint64_t> seeds) {
using accscalar_t = at::acc_type<scalar_t, true>;
at::cuda::CUDA_tensor_apply2<scalar_t, scalar_t>(
ret,
alpha,
[seeds] __device__(
scalar_t & ret_val, const scalar_t& alpha) {
curandStatePhilox4_32_10_t state;
curand_init(
seeds.first,
blockIdx.x * blockDim.x + threadIdx.x,
seeds.second,
&state);
BaseSampler<accscalar_t> standard_uniform([&state] __device__ () {
return curand_uniform(&state);
});
BaseSampler<accscalar_t> standard_normal([&state] __device__ () {
return curand_normal(&state);
});
auto sample = sample_gamma<scalar_t, accscalar_t>(alpha, standard_uniform, standard_normal);
auto min_value = std::numeric_limits<scalar_t>::lowest();
ret_val = (min_value > sample) ? min_value : sample;
});
}
template <typename scalar_t>
void gamma_grad_cuda_kernel(
at::Tensor& ret,
const at::Tensor& self,
const at::Tensor& output) {
using accscalar_t = at::acc_type<scalar_t, true>;
at::cuda::CUDA_tensor_apply3<scalar_t, scalar_t, scalar_t>(
ret, self, output,
[] __device__ (scalar_t& ret_val, const scalar_t& self_val, const scalar_t &output_val) {
ret_val = standard_gamma_grad_one<scalar_t, accscalar_t>(self_val, output_val);
});
}
template<typename scalar_t, typename prob_t>
void bernoulli_tensor_cuda_kernel(
at::Tensor& ret, const at::Tensor& p,
std::pair<uint64_t, uint64_t> seeds) {
// The template argument `4` below indicates that we want to operate on four
// element at each time. See NOTE [ CUDA_tensor_applyN helpers ] for details.
at::cuda::CUDA_tensor_apply2<scalar_t, prob_t, 4>(
ret, p,
[seeds] __device__(
int n, scalar_t& v1, scalar_t& v2, scalar_t& v3, scalar_t& v4,
const prob_t& p1, const prob_t& p2, const prob_t& p3, const prob_t& p4) {
curandStatePhilox4_32_10_t state;
curand_init(
seeds.first,
blockIdx.x * blockDim.x + threadIdx.x,
seeds.second,
&state);
float4 rand = curand_uniform4(&state);
switch (n) {
case 4: {
assert(0 <= p4 && p4 <= 1);
v4 = static_cast<scalar_t>(rand.w <= p4);
// fallthrough
}
case 3: {
assert(0 <= p3 && p3 <= 1);
v3 = static_cast<scalar_t>(rand.z <= p3);
// fallthrough
}
case 2: {
assert(0 <= p2 && p2 <= 1);
v2 = static_cast<scalar_t>(rand.y <= p2);
// fallthrough
}
case 1: {
assert(0 <= p1 && p1 <= 1);
v1 = static_cast<scalar_t>(rand.x <= p1);
}
}
}
);
}
template<typename scalar_t>
void bernoulli_scalar_cuda_kernel(
at::Tensor& ret, double p_,
std::pair<uint64_t, uint64_t> seeds) {
float p = static_cast<float>(p_);
// The template argument `4` below indicates that we want to operate on four
// element at each time. See NOTE [ CUDA_tensor_applyN helpers ] for details.
at::cuda::CUDA_tensor_apply1<scalar_t, 4>(
ret, [seeds, p] __device__(
int n, scalar_t& v1, scalar_t& v2, scalar_t& v3, scalar_t& v4) {
curandStatePhilox4_32_10_t state;
curand_init(
seeds.first,
blockIdx.x * blockDim.x + threadIdx.x,
seeds.second,
&state);
float4 rand = curand_uniform4(&state);
switch (n) {
case 4: {
v4 = static_cast<scalar_t>(rand.w <= p);
// fallthrough
}
case 3: {
v3 = static_cast<scalar_t>(rand.z <= p);
// fallthrough
}
case 2: {
v2 = static_cast<scalar_t>(rand.y <= p);
// fallthrough
}
case 1: {
v1 = static_cast<scalar_t>(rand.x <= p);
}
}
}
);
}
} // namespace
namespace at { namespace native {
Tensor _s_poisson_cuda(const Tensor& lambda, Generator* gen) {
Tensor ret = at::empty(lambda.sizes(), lambda.options());
AT_DISPATCH_FLOATING_TYPES_AND_HALF(ret.type(), "poisson", [&] {
poisson_cuda_kernel<scalar_t>(ret, lambda, next_philox_seed(gen, 20));
});
return ret;
}
Tensor _s_gamma_cuda(const Tensor& alpha, Generator* gen) {
Tensor ret = at::empty(alpha.sizes(), alpha.options());
AT_DISPATCH_FLOATING_TYPES_AND_HALF(ret.type(), "gamma", [&] {
gamma_cuda_kernel<scalar_t>(ret, alpha, next_philox_seed(gen, 10));
});
return ret;
}
Tensor _standard_gamma_grad_cuda(const Tensor& self, const Tensor& output) {
Tensor ret = at::empty(self.sizes(), self.options());
AT_DISPATCH_FLOATING_TYPES_AND_HALF(self.type(), "_standard_gamma_grad", [&] {
gamma_grad_cuda_kernel<scalar_t>(ret, self, output);
});
return ret;
}
Tensor& bernoulli_tensor_cuda_(Tensor &self, const Tensor& p_, Generator* gen) {
auto p = std::get<0>(expand_inplace(self, p_.to(kCUDA)));
AT_DISPATCH_ALL_TYPES_AND_HALF(self.type(), "bernoulli_tensor_cuda_self_", [&] {
const at::Type& p_type = p.type();
using self_t = scalar_t;
auto seeds = next_philox_seed(gen, 10);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(p.type(), "bernoulli_tensor_cuda_p_", [&] {
using p_t = scalar_t;
return bernoulli_tensor_cuda_kernel<self_t, p_t>(self, p, seeds);
});
});
return self;
}
Tensor& bernoulli_scalar_cuda_(Tensor &self, double p, Generator* gen) {
AT_CHECK(0 <= p && p <= 1, "bernoulli_ expects p to be in [0, 1], but got p=", p);
AT_DISPATCH_ALL_TYPES(self.type(), "bernoulli_scalar_cuda_", [&] {
auto seeds = next_philox_seed(gen, 10);
bernoulli_scalar_cuda_kernel<scalar_t>(self, p, seeds);
});
return self;
}
}} // namespace at::native
|
d7808387b565f123c795aebbbf42f7c6771399b4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "hashkernel.cuh"
template <typename T>
__host__ int getnumdistinctkeys(T* h_key_columns, int num_key_columns, int num_key_rows)
{
return num_key_rows; //for now return number of rows
}
template <typename T>
__device__ int comparekeyrows(T* d_key_columns, int num_key_columns, int num_key_rows,
int a, int b)
{
int equal = 1;
for (int i = 0; i < num_key_columns; i++) {
if (d_key_columns[i * num_key_rows + a] != d_key_columns[i * num_key_rows + b])
return 0;
}
return equal;
}
template <typename T>
__global__ void groupbykernel(T* d_key_columns, int num_key_columns, int num_key_rows,
T* d_value_columns, int num_value_columns, int num_value_rows,
reduction_op reduct_ops[], int num_ops,
hashbucket<T>* d_hashtable, int hash_table_rows, int* d_num_unique_keys)
{
int row = threadIdx.x + blockIdx.x * blockDim.x;
if (row < num_key_rows) {
int bucket_index = crc_x64_32_hash<T>(d_key_columns, num_key_columns, num_key_rows, row) % hash_table_rows;
int old_key_row, current_key_row;
int tryagain = 1;
while (tryagain) {
current_key_row = d_hashtable[bucket_index].key_row;
if (current_key_row == EMPTYMARKER) {
old_key_row = atomicCAS(&d_hashtable[bucket_index].key_row, current_key_row, row);
if (old_key_row != current_key_row) {
current_key_row = old_key_row;
} else {
tryagain = 0; //key was inserted, proceed to update reduction fields
current_key_row = row;
atomicAdd(d_num_unique_keys, 1); //update count of unique keys
}
}
if (current_key_row != row) {
//compare rows
if (comparekeyrows<T>(d_key_columns, num_key_rows, num_key_columns, current_key_row, row)) {
tryagain = 0; //found matching bucket, proceed to update reduction fields
} else {
tryagain = 1; //collision, try next bucket
bucket_index = (bucket_index + 1) % hash_table_rows;
}
}
__syncthreads();
}
//update reduction fields
for (int i = 0; i < num_ops; i++) {
if (reduct_ops[i] == max_op) {
atomicMax(&d_hashtable[bucket_index].max, d_value_columns[i * num_value_rows + row]);
} else if (reduct_ops[i] == min_op) {
atomicMin(&d_hashtable[bucket_index].min, d_value_columns[i * num_value_rows + row]);
} else if (reduct_ops[i] == sum) {
atomicAdd(&d_hashtable[bucket_index].sum, d_value_columns[i * num_value_rows + row]);
} else if (reduct_ops[i] == count) {
atomicAdd(&d_hashtable[bucket_index].count, 1);
}
}
}
}
template <typename T>
__global__ void getouputdatakernel(T* d_output_keys, int num_key_columns, int num_key_rows,
T* d_output_values, int num_value_columns, int num_value_rows,
hashbucket<T>* d_hashtable, int num_unique_keys, int hash_table_rows,
reduction_op reduct_ops[], int num_ops, T* d_key_columns)
{
int output_row = threadIdx.x + blockIdx.x * blockDim.x;
if (output_row < num_unique_keys) {
int scan_size = (hash_table_rows / num_unique_keys);
int num_scan_rows = scan_size;
if ((output_row = num_unique_keys - 1) && (num_key_rows % num_unique_keys)) {
num_scan_rows += 1;
}
int start_row = output_row * scan_size;
hashbucket<T> bucket;
for (int i = 0; i < num_scan_rows; i++) {
start_row += i;
bucket = d_hashtable[start_row];
if (bucket.key_row != EMPTYMARKER) {
//copy row
for (int j = 0; j < num_key_columns; j++) {
d_output_keys[j * num_unique_keys + output_row] = d_key_columns[j * num_key_rows + bucket.key_row];
}
//copy reduction values
for (int k = 0; k < num_ops; k++) {
if (reduct_ops[k] == max_op) {
d_output_values[k * num_unique_keys + output_row] = bucket.max;
} else if (reduct_ops[k] == min_op) {
d_output_values[k * num_unique_keys + output_row] = bucket.min;
} else if (reduct_ops[k] == sum) {
d_output_values[k * num_unique_keys + output_row] = bucket.sum;
} else if (reduct_ops[k] == count) {
d_output_values[k * num_unique_keys + output_row] = bucket.count;
}
}
}
}
}
}
template <typename T>
__host__ struct output_data<T> groupby(T* h_key_columns, int num_key_columns, int num_key_rows,
T* h_value_columns, int num_value_columns, int num_value_rows,
reduction_op ops[], int num_ops)
{
//get number of unique keys
int* h_num_unique_keys;
int* d_num_unique_keys;
hipHostMalloc(&h_num_unique_keys, sizeof(int));
hipMalloc((void **) &d_num_unique_keys, sizeof(int));
int hash_table_rows = getnumdistinctkeys<T>(h_key_columns, num_key_columns, num_key_rows);
//allocate memory for hash table on device
hashbucket<T>* d_hashtable;
int hashtablesize = hash_table_rows * sizeof(hashbucket<T>);
hipMalloc((void **) &d_hashtable, hashtablesize);
//initialize hash table
init_hash_table<T>(d_hashtable, hash_table_rows);
//transfer keys and values data to device
T* d_key_columns;
T* d_value_columns;
int key_data_size = num_key_rows * num_key_columns * sizeof(T);
hipMalloc((void **)&d_key_columns, key_data_size);
hipMemcpy(d_key_columns, h_key_columns, key_data_size, hipMemcpyHostToDevice);
int value_data_size = num_value_rows * num_value_columns * sizeof(T);
hipMalloc((void **)&d_value_columns, value_data_size);
hipMemcpy(d_value_columns, h_value_columns, value_data_size, hipMemcpyHostToDevice);
//copy reduction operations to device
reduction_op* d_reduct_ops;
int reduction_ops_size = num_ops * sizeof(reduction_op);
hipMalloc((void **)&d_reduct_ops, num_ops * sizeof(reduction_op));
hipMemcpy(d_reduct_ops, ops, reduction_ops_size, hipMemcpyHostToDevice);
//copy hash key tab to constant memory
hipMemcpyToSymbol(c_crc_x64_32_tab, crc_x64_32_tab, HASH_TAB_SIZE * sizeof(uint32_t));
//launch reduction kernel
dim3 dimBlock(BLOCK_SIZE, 1, 1);
dim3 dimGrid((num_key_rows + BLOCK_SIZE - 1) / BLOCK_SIZE, 1, 1);
hipLaunchKernelGGL(( groupbykernel<T>), dim3(dimGrid), dim3(dimBlock), 0, 0, d_key_columns, num_key_columns, num_key_rows,
d_value_columns, num_value_columns, num_value_rows,
d_reduct_ops, num_ops,
d_hashtable, hash_table_rows, d_num_unique_keys);
hipDeviceSynchronize();
//copy number of unique keys from device memory
hipMemcpy(h_num_unique_keys, d_num_unique_keys, sizeof(int), hipMemcpyDeviceToHost);
//allocate space on host memory for output keys and output values
int output_key_size = *(h_num_unique_keys) * num_key_columns * sizeof(T);
int output_values_size = *(h_num_unique_keys) * num_value_columns * sizeof(T);
T* h_output_keys;
T* h_output_values;
hipHostMalloc(&h_output_keys, output_key_size);
hipHostMalloc(&h_output_values, output_values_size);
T* d_output_keys;
T* d_output_values;
hipMalloc((void **) &d_output_keys, output_key_size);
hipMalloc((void **) &d_output_values, output_values_size);
//launch kernel to summarize results in output format
dimGrid.x = (*(h_num_unique_keys) + BLOCK_SIZE - 1) / BLOCK_SIZE; dimGrid.y = 1; dimGrid.z = 1;
hipLaunchKernelGGL(( getouputdatakernel<T>), dim3(dimGrid), dim3(dimBlock), 0, 0, d_output_keys, num_key_columns, num_key_rows,
d_output_values, num_value_columns, num_value_rows,
d_hashtable, *(h_num_unique_keys), hash_table_rows,
d_reduct_ops, num_ops, d_key_columns);
//copy results back to host
hipMemcpy(h_output_keys, d_output_keys, output_key_size, hipMemcpyDeviceToHost);
hipMemcpy(h_output_values, d_output_values, output_values_size, hipMemcpyDeviceToHost);
struct output_data<T> output;
output.keys = h_output_keys;
output.values = h_output_values;
output.unique_keys = *(h_num_unique_keys);
//free device memory
hipFree(h_num_unique_keys);
hipFree(d_num_unique_keys);
hipFree(d_hashtable);
hipFree(d_key_columns);
hipFree(d_value_columns);
hipFree(d_output_keys);
hipFree(d_output_values);
return output;
}
| d7808387b565f123c795aebbbf42f7c6771399b4.cu | #include "hashkernel.cuh"
template <typename T>
__host__ int getnumdistinctkeys(T* h_key_columns, int num_key_columns, int num_key_rows)
{
return num_key_rows; //for now return number of rows
}
template <typename T>
__device__ int comparekeyrows(T* d_key_columns, int num_key_columns, int num_key_rows,
int a, int b)
{
int equal = 1;
for (int i = 0; i < num_key_columns; i++) {
if (d_key_columns[i * num_key_rows + a] != d_key_columns[i * num_key_rows + b])
return 0;
}
return equal;
}
template <typename T>
__global__ void groupbykernel(T* d_key_columns, int num_key_columns, int num_key_rows,
T* d_value_columns, int num_value_columns, int num_value_rows,
reduction_op reduct_ops[], int num_ops,
hashbucket<T>* d_hashtable, int hash_table_rows, int* d_num_unique_keys)
{
int row = threadIdx.x + blockIdx.x * blockDim.x;
if (row < num_key_rows) {
int bucket_index = crc_x64_32_hash<T>(d_key_columns, num_key_columns, num_key_rows, row) % hash_table_rows;
int old_key_row, current_key_row;
int tryagain = 1;
while (tryagain) {
current_key_row = d_hashtable[bucket_index].key_row;
if (current_key_row == EMPTYMARKER) {
old_key_row = atomicCAS(&d_hashtable[bucket_index].key_row, current_key_row, row);
if (old_key_row != current_key_row) {
current_key_row = old_key_row;
} else {
tryagain = 0; //key was inserted, proceed to update reduction fields
current_key_row = row;
atomicAdd(d_num_unique_keys, 1); //update count of unique keys
}
}
if (current_key_row != row) {
//compare rows
if (comparekeyrows<T>(d_key_columns, num_key_rows, num_key_columns, current_key_row, row)) {
tryagain = 0; //found matching bucket, proceed to update reduction fields
} else {
tryagain = 1; //collision, try next bucket
bucket_index = (bucket_index + 1) % hash_table_rows;
}
}
__syncthreads();
}
//update reduction fields
for (int i = 0; i < num_ops; i++) {
if (reduct_ops[i] == max_op) {
atomicMax(&d_hashtable[bucket_index].max, d_value_columns[i * num_value_rows + row]);
} else if (reduct_ops[i] == min_op) {
atomicMin(&d_hashtable[bucket_index].min, d_value_columns[i * num_value_rows + row]);
} else if (reduct_ops[i] == sum) {
atomicAdd(&d_hashtable[bucket_index].sum, d_value_columns[i * num_value_rows + row]);
} else if (reduct_ops[i] == count) {
atomicAdd(&d_hashtable[bucket_index].count, 1);
}
}
}
}
template <typename T>
__global__ void getouputdatakernel(T* d_output_keys, int num_key_columns, int num_key_rows,
T* d_output_values, int num_value_columns, int num_value_rows,
hashbucket<T>* d_hashtable, int num_unique_keys, int hash_table_rows,
reduction_op reduct_ops[], int num_ops, T* d_key_columns)
{
int output_row = threadIdx.x + blockIdx.x * blockDim.x;
if (output_row < num_unique_keys) {
int scan_size = (hash_table_rows / num_unique_keys);
int num_scan_rows = scan_size;
if ((output_row = num_unique_keys - 1) && (num_key_rows % num_unique_keys)) {
num_scan_rows += 1;
}
int start_row = output_row * scan_size;
hashbucket<T> bucket;
for (int i = 0; i < num_scan_rows; i++) {
start_row += i;
bucket = d_hashtable[start_row];
if (bucket.key_row != EMPTYMARKER) {
//copy row
for (int j = 0; j < num_key_columns; j++) {
d_output_keys[j * num_unique_keys + output_row] = d_key_columns[j * num_key_rows + bucket.key_row];
}
//copy reduction values
for (int k = 0; k < num_ops; k++) {
if (reduct_ops[k] == max_op) {
d_output_values[k * num_unique_keys + output_row] = bucket.max;
} else if (reduct_ops[k] == min_op) {
d_output_values[k * num_unique_keys + output_row] = bucket.min;
} else if (reduct_ops[k] == sum) {
d_output_values[k * num_unique_keys + output_row] = bucket.sum;
} else if (reduct_ops[k] == count) {
d_output_values[k * num_unique_keys + output_row] = bucket.count;
}
}
}
}
}
}
template <typename T>
__host__ struct output_data<T> groupby(T* h_key_columns, int num_key_columns, int num_key_rows,
T* h_value_columns, int num_value_columns, int num_value_rows,
reduction_op ops[], int num_ops)
{
//get number of unique keys
int* h_num_unique_keys;
int* d_num_unique_keys;
cudaMallocHost(&h_num_unique_keys, sizeof(int));
cudaMalloc((void **) &d_num_unique_keys, sizeof(int));
int hash_table_rows = getnumdistinctkeys<T>(h_key_columns, num_key_columns, num_key_rows);
//allocate memory for hash table on device
hashbucket<T>* d_hashtable;
int hashtablesize = hash_table_rows * sizeof(hashbucket<T>);
cudaMalloc((void **) &d_hashtable, hashtablesize);
//initialize hash table
init_hash_table<T>(d_hashtable, hash_table_rows);
//transfer keys and values data to device
T* d_key_columns;
T* d_value_columns;
int key_data_size = num_key_rows * num_key_columns * sizeof(T);
cudaMalloc((void **)&d_key_columns, key_data_size);
cudaMemcpy(d_key_columns, h_key_columns, key_data_size, cudaMemcpyHostToDevice);
int value_data_size = num_value_rows * num_value_columns * sizeof(T);
cudaMalloc((void **)&d_value_columns, value_data_size);
cudaMemcpy(d_value_columns, h_value_columns, value_data_size, cudaMemcpyHostToDevice);
//copy reduction operations to device
reduction_op* d_reduct_ops;
int reduction_ops_size = num_ops * sizeof(reduction_op);
cudaMalloc((void **)&d_reduct_ops, num_ops * sizeof(reduction_op));
cudaMemcpy(d_reduct_ops, ops, reduction_ops_size, cudaMemcpyHostToDevice);
//copy hash key tab to constant memory
cudaMemcpyToSymbol(c_crc_x64_32_tab, crc_x64_32_tab, HASH_TAB_SIZE * sizeof(uint32_t));
//launch reduction kernel
dim3 dimBlock(BLOCK_SIZE, 1, 1);
dim3 dimGrid((num_key_rows + BLOCK_SIZE - 1) / BLOCK_SIZE, 1, 1);
groupbykernel<T><<<dimGrid, dimBlock>>>(d_key_columns, num_key_columns, num_key_rows,
d_value_columns, num_value_columns, num_value_rows,
d_reduct_ops, num_ops,
d_hashtable, hash_table_rows, d_num_unique_keys);
cudaDeviceSynchronize();
//copy number of unique keys from device memory
cudaMemcpy(h_num_unique_keys, d_num_unique_keys, sizeof(int), cudaMemcpyDeviceToHost);
//allocate space on host memory for output keys and output values
int output_key_size = *(h_num_unique_keys) * num_key_columns * sizeof(T);
int output_values_size = *(h_num_unique_keys) * num_value_columns * sizeof(T);
T* h_output_keys;
T* h_output_values;
cudaMallocHost(&h_output_keys, output_key_size);
cudaMallocHost(&h_output_values, output_values_size);
T* d_output_keys;
T* d_output_values;
cudaMalloc((void **) &d_output_keys, output_key_size);
cudaMalloc((void **) &d_output_values, output_values_size);
//launch kernel to summarize results in output format
dimGrid.x = (*(h_num_unique_keys) + BLOCK_SIZE - 1) / BLOCK_SIZE; dimGrid.y = 1; dimGrid.z = 1;
getouputdatakernel<T><<<dimGrid, dimBlock>>>(d_output_keys, num_key_columns, num_key_rows,
d_output_values, num_value_columns, num_value_rows,
d_hashtable, *(h_num_unique_keys), hash_table_rows,
d_reduct_ops, num_ops, d_key_columns);
//copy results back to host
cudaMemcpy(h_output_keys, d_output_keys, output_key_size, cudaMemcpyDeviceToHost);
cudaMemcpy(h_output_values, d_output_values, output_values_size, cudaMemcpyDeviceToHost);
struct output_data<T> output;
output.keys = h_output_keys;
output.values = h_output_values;
output.unique_keys = *(h_num_unique_keys);
//free device memory
cudaFree(h_num_unique_keys);
cudaFree(d_num_unique_keys);
cudaFree(d_hashtable);
cudaFree(d_key_columns);
cudaFree(d_value_columns);
cudaFree(d_output_keys);
cudaFree(d_output_values);
return output;
}
|
fa7726397c48e66e0525120c6d3dc23f9658eb49.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
int main() {
int num_dev;
hipGetDeviceCount(&num_dev);
printf("%d\n", num_dev);
return 0;
}
| fa7726397c48e66e0525120c6d3dc23f9658eb49.cu | #include <stdio.h>
int main() {
int num_dev;
cudaGetDeviceCount(&num_dev);
printf("%d\n", num_dev);
return 0;
}
|
99e6b56bdb74898b07295f77bd90154dd8cec543.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void cu_minus(float *A, const float b, const int n){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
while(tid < n){
A[tid] = __fsub_rd(A[tid], b);
tid += stride;
}
} | 99e6b56bdb74898b07295f77bd90154dd8cec543.cu | #include "includes.h"
__global__ void cu_minus(float *A, const float b, const int n){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
while(tid < n){
A[tid] = __fsub_rd(A[tid], b);
tid += stride;
}
} |
6fb24896ef9dde8b6ead23e20d261555c69a349d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef __REALTIME__
#define __REALTIME__
#include <stdio.h>
#include <algorithm>
#include <iostream>
#include <fstream>
#include "microMC_chem.h"
void printDevProp(int device)
// print out device properties
{
int devCount;
hipDeviceProp_t devProp;
// device properties
hipGetDeviceCount(&devCount);
cout << "Number of device: " << devCount << endl;
cout << "Using device #: " << device << endl;
hipGetDeviceProperties(&devProp, device);
printf("Major revision number: %d\n", devProp.major);
printf("Minor revision number: %d\n", devProp.minor);
printf("Name: %s\n", devProp.name);
printf("Total global memory: %7.2f MB\n",
devProp.totalGlobalMem/1024.0/1024.0);
printf("Total shared memory per block: %5.2f kB\n",
devProp.sharedMemPerBlock/1024.0);
printf("Total registers per block: %u\n", devProp.regsPerBlock);
printf("Warp size: %d\n", devProp.warpSize);
printf("Maximum memory pitch: %u\n", devProp.memPitch);
printf("Maximum threads per block: %d\n", devProp.maxThreadsPerBlock);
printf("Maximum dimension of block: %d*%d*%d\n",
devProp.maxThreadsDim[0],devProp.maxThreadsDim[1],devProp.maxThreadsDim[2]);
printf("Maximum dimension of grid: %d*%d*%d\n",
devProp.maxGridSize[0],devProp.maxGridSize[1],devProp.maxGridSize[2]);
printf("Clock rate: %4.2f GHz\n", devProp.clockRate/1000000.0);
printf("Total constant memory: %5.2f kB\n", devProp.totalConstMem/1024.0);
printf("Texture alignment: %u\n", devProp.textureAlignment);
printf("Concurrent copy and execution: %s\n", (devProp.deviceOverlap ? "Yes" : "No"));
printf("Number of multiprocessors: %d\n", devProp.multiProcessorCount);
printf("Kernel execution timeout: %s\n", (devProp.kernelExecTimeoutEnabled ? "Yes" : "No"));
// obtain computing resource
}
void calDNAreact_radius(float* rDNA,float deltat)
{
float k[5]={6.1,9.2,6.4,6.1,1.8};
float tmp=sqrtf(PI*DiffusionOfOH*deltat*0.001);
for(int i=0;i<5;i++)
{
rDNA[i]=k[i]/(4*PI*DiffusionOfOH)*10/6.023;//k 10^9 L/(mol*s), Diffusion 10^9 nm^2/s. t ps
rDNA[i]=sqrtf(rDNA[i]*tmp+tmp*tmp*0.25)-tmp*0.5;
}
rDNA[5]=0;//histone protein absorption radius, assumed!!!
}
__device__ float caldistance(float3 pos1, float3 pos2)
{
return (sqrtf((pos1.x -pos2.x)*(pos1.x -pos2.x)+(pos1.y -pos2.y)*(pos1.y -pos2.y)+(pos1.z -pos2.z)*(pos1.z -pos2.z)));
}
#if RANDGEO==0
__device__ float3 pos2local(int type, float3 pos, int index)
{
//do the coordinate transformation, index is the linear index for the referred box
//from global XYZ to local XYZ so that we can use the position of DNA base in two basic type (Straight and Bend)
int x = index%NUCLEUS_DIM;//the x,y,z index of the box
int z = floorf(index/(NUCLEUS_DIM*NUCLEUS_DIM));
int y = floorf((index%(NUCLEUS_DIM*NUCLEUS_DIM))/NUCLEUS_DIM);
//printf("relative to type %d %d %d %d\n", type, x,y,z);
pos.x = pos.x-(2*x + 1 - NUCLEUS_DIM)*UNITLENGTH*0.5;//relative to its center
pos.y = pos.y-(2*y + 1 - NUCLEUS_DIM)*UNITLENGTH*0.5;
pos.z = pos.z-(2*z + 1 - NUCLEUS_DIM)*UNITLENGTH*0.5;
//printf("local coordinate %f %f %f\n", pos.x, pos.y, pos.z);
float xc, yc, zc;
switch(type)
{
//Straight type
case 1:////!!!!!the following needs to be revised and confirmed
{xc = pos.x;
yc = pos.y;
zc = pos.z;
break;}
case 2://-z
{xc = -pos.x;//Ry(pi)
yc = pos.y;
zc = -pos.z;
break;}
case 3://+y
{xc = pos.x;//Rx(pi/2)
yc = -pos.z;
zc = pos.y;
break;}
case 4:
{xc = pos.x;
yc = pos.z;
zc = -pos.y;
break;}
case 5://+x
{xc = -pos.z;//Ry(-pi/2)
yc = pos.y;
zc = pos.x;
break;}
case 6:
{xc = pos.z;
yc = pos.y;
zc = -pos.x;
break;}
case 7://Bend
{xc = pos.x;
yc = pos.y;
zc = pos.z;
break;}
case 8:
{xc = -pos.z;//Rz(pi)Ry(pi/2)
yc = -pos.y;
zc = -pos.x;
break;}
case 9:
{xc = -pos.x;//Rz(pi)
yc = -pos.y;
zc = pos.z;
break;}
case 10:
{xc = -pos.z;//Ry(-pi/2)
yc = pos.y;
zc = pos.x;
break;}
case 11:
{xc = -pos.x;//Ry(pi)
yc = pos.y;
zc = -pos.z;
break;}
case 12:
{xc = pos.z;//Rz(pi)Ry(-pi/2)
yc = -pos.y;
zc = pos.x;
break;}
case 13:
{xc = pos.x;//Rx(pi)
yc = -pos.y;
zc = -pos.z;
break;}
case 14:
{xc = pos.z;//Ry(pi/2)
yc = pos.y;
zc = -pos.x;
break;}
case 15:
{xc = pos.y;//Rz(-pi/2)
yc = -pos.x;
zc = pos.z;
break;}
case 16:
{xc = -pos.z;//Ry(-pi/2)Rz(pi/2)
yc = pos.x;
zc = -pos.y;
break;}
case 17:
{xc = -pos.y;//Rz(pi/2)
yc = pos.x;
zc = pos.z;
break;}
case 18:
{xc = -pos.z;//Rz(-pi/2)Rx(pi/2)
yc = -pos.x;
zc = pos.y;
break;}
case 19:
{xc = pos.y;//Rz(-pi/2)Ry(pi)
yc = pos.x;
zc = -pos.z;
break;}
case 20:
{xc = pos.z;//Rz(-pi/2)Rx(-pi/2)
yc = -pos.x;
zc = pos.y;
break;}
case 21:
{xc = -pos.y;//Rz(pi/2)Ry(pi)
yc = -pos.x;
zc = -pos.z;
break;}
case 22:
{xc = pos.z;//Rz(pi/2)Rx(pi/2)
yc = pos.x;
zc = pos.y;
break;}
case 23:
{xc = pos.x;//Rx(pi/2)
yc = -pos.z;
zc = pos.y;
break;}
case 24:
{xc = -pos.y;//Rz(pi/2)Ry(pi/2)
yc = pos.z;
zc = -pos.x;
break;}
case 25:
{xc = -pos.x;//Rx(pi/2)Ry(pi)
yc = pos.z;
zc = pos.y;
break;}
case 26:
{xc = -pos.y;//Rx(pi/2)Rz(pi/2)
yc = -pos.z;
zc = pos.x;
break;}
case 27:
{xc = pos.x;//Rx(-pi/2)
yc =pos.z;
zc = -pos.y;
break;}
case 28:
{xc = pos.y;//Rx(pi/2)Rz(-pi/2)
yc = -pos.z;
zc = -pos.x;
break;}
case 29:
{xc = -pos.x;//Rx(-pi/2)Ry(pi)
yc = -pos.z;
zc = -pos.y;
break;}
case 30:
{xc = pos.y;//Rz(-pi/2)Ry(-pi/2)
yc = pos.z;
zc = pos.x;
break;}
default:
{printf("wrong type\n"); // for test
break;}
}
pos.x=xc;
pos.y=yc;
pos.z=zc;//*/
return pos;
}
__global__ void chemSearch(int num, Edeposit* d_edrop, int* dev_chromatinIndex,int* dev_chromatinStart,int* dev_chromatinType, CoorBasePair* dev_straightChrom,
CoorBasePair* dev_bendChrom,float3* dev_straightHistone,float3* dev_bendHistone, combinePhysics* d_recorde)
{
int id = blockIdx.x*blockDim.x+ threadIdx.x;
hiprandState_t localState = cuseed[id%MAXNUMPAR2];
float3 newpos, pos_cur_target;
int3 index;
CoorBasePair* chrom;
float3 *histone;
int chromNum, histoneNum,flag=0;
while(id<num)
{
d_recorde[id].site.x=-1;//initialize
d_recorde[id].site.y=-1;
d_recorde[id].site.z=-1;
d_recorde[id].site.w=-1;
d_recorde[id].prob2=d_edrop[id].e;
d_recorde[id].prob1=hiprand_uniform(&localState);
pos_cur_target=d_edrop[id].position;
index.x=floorf((pos_cur_target.x+UNITLENGTH*NUCLEUS_DIM/2)/UNITLENGTH);
index.y=floorf((pos_cur_target.y+UNITLENGTH*NUCLEUS_DIM/2)/UNITLENGTH);
index.z=floorf((pos_cur_target.z+UNITLENGTH*NUCLEUS_DIM/2)/UNITLENGTH);
int delta=index.x+index.y*NUCLEUS_DIM+index.z*NUCLEUS_DIM*NUCLEUS_DIM,minindex=-1;
float distance[3]={100},mindis=100;
for(int i=0;i<27;i++)
{
flag=0;
int newindex = delta+neighborindex[i];
if(newindex<0 || newindex > NUCLEUS_DIM*NUCLEUS_DIM*NUCLEUS_DIM-1) continue;
int type = dev_chromatinType[newindex];
if(type==-1 || type==0) continue;
newpos = pos2local(type, pos_cur_target, newindex);
if(type<7)
{
if(newpos.x<(min1-SPACETOBODER) || newpos.y<(min2-SPACETOBODER) || newpos.z<(min3-SPACETOBODER) ||newpos.x>(max1+SPACETOBODER)
|| newpos.y>(max2+SPACETOBODER) || newpos.z>(max3+SPACETOBODER))
continue;
chrom=dev_straightChrom;
chromNum=STRAIGHT_BP_NUM;
histone=dev_straightHistone;
histoneNum=STRAIGHT_HISTONE_NUM;
}
else
{
if(newpos.x<(min1-SPACETOBODER) || newpos.y<(min2-SPACETOBODER) || newpos.z<(min3-SPACETOBODER) ||newpos.x>(max3+SPACETOBODER)
|| newpos.y>(max2+SPACETOBODER) || newpos.z>(max1+SPACETOBODER))
continue;
chrom=dev_bendChrom;
chromNum=BEND_BP_NUM;
histone=dev_bendHistone;
histoneNum=BEND_HISTONE_NUM;
}
for(int j=0;j<histoneNum;j++)
{
mindis = caldistance(newpos, histone[j])-RHISTONE;
if(mindis < 0) flag=1;
}
if(flag) break;
for(int j=0;j<chromNum;j++)
{
// can take the size of base into consideration, distance should be distance-r;
mindis=100,minindex=-1;
distance[0] = caldistance(newpos, chrom[j].base)-RBASE;
distance[1] = caldistance(newpos,chrom[j].left)-RSUGAR;
distance[2] = caldistance(newpos,chrom[j].right)-RSUGAR;
for(int iii=0;iii<3;iii++)
{
if(mindis>distance[iii])
{
mindis=distance[iii];
minindex=iii;
}
}
if(mindis<0)
{
if(minindex>0)
{
d_recorde[id].site.x = dev_chromatinIndex[newindex];
d_recorde[id].site.y = dev_chromatinStart[newindex]+j;
d_recorde[id].site.z = 3+minindex;
d_recorde[id].site.w = 1;
}
flag=1;
break;
}
int tmp = floorf(hiprand_uniform(&localState)/0.25);
distance[0] = caldistance(newpos, chrom[j].base)-RBASE-d_rDNA[tmp];
distance[1] = caldistance(newpos,chrom[j].left)-RSUGAR- d_rDNA[4];
distance[2] = caldistance(newpos,chrom[j].right)-RSUGAR- d_rDNA[4];
for(int iii=0;iii<3;iii++)
{
if(mindis>distance[iii])
{
mindis=distance[iii];
minindex=iii;
}
}
if(mindis<0)
{
if(minindex>0)
{
d_recorde[id].site.x = dev_chromatinIndex[newindex];
d_recorde[id].site.y = dev_chromatinStart[newindex]+j;
d_recorde[id].site.z = 3+minindex;
d_recorde[id].site.w = 1;
}
flag=1;
break;
}
}
if(flag) break;
}
id+=blockDim.x*gridDim.x;
}
cuseed[id%MAXNUMPAR2]=localState;
}
__global__ void phySearch(int num, Edeposit* d_edrop, int* dev_chromatinIndex,int* dev_chromatinStart,int* dev_chromatinType, CoorBasePair* dev_straightChrom,
CoorBasePair* dev_bendChrom,float3* dev_straightHistone,float3* dev_bendHistone, combinePhysics* d_recorde)
{
int id = blockIdx.x*blockDim.x+ threadIdx.x;
hiprandState_t localState = cuseed[id%MAXNUMPAR2];
float3 newpos, pos_cur_target;
int3 index;
CoorBasePair* chrom;
float3 *histone;
int chromNum, histoneNum,flag=0;
while(id<num)
{
d_recorde[id].site.x=-1;//initialize
d_recorde[id].site.y=-1;
d_recorde[id].site.z=-1;
d_recorde[id].site.w=-1;
d_recorde[id].prob1=d_edrop[id].e;
d_recorde[id].prob2=hiprand_uniform(&localState)*(EMAX-EMIN)+EMIN;
pos_cur_target=d_edrop[id].position;
index.x=floorf((pos_cur_target.x+UNITLENGTH*NUCLEUS_DIM/2)/UNITLENGTH);
index.y=floorf((pos_cur_target.y+UNITLENGTH*NUCLEUS_DIM/2)/UNITLENGTH);
index.z=floorf((pos_cur_target.z+UNITLENGTH*NUCLEUS_DIM/2)/UNITLENGTH);
int delta=index.x+index.y*NUCLEUS_DIM+index.z*NUCLEUS_DIM*NUCLEUS_DIM,minindex=-1;
float distance[3]={100},mindis=100;
for(int i=0;i<27;i++)
{
flag=0;
int newindex = delta+neighborindex[i];
if(newindex<0 || newindex > NUCLEUS_DIM*NUCLEUS_DIM*NUCLEUS_DIM-1) continue;
int type = dev_chromatinType[newindex];
if(type==-1 || type==0) continue;
newpos = pos2local(type, pos_cur_target, newindex);
if(type<7)
{
if(newpos.x<(min1-SPACETOBODER) || newpos.y<(min2-SPACETOBODER) || newpos.z<(min3-SPACETOBODER) ||newpos.x>(max1+SPACETOBODER)
|| newpos.y>(max2+SPACETOBODER) || newpos.z>(max3+SPACETOBODER))
continue;
chrom=dev_straightChrom;
chromNum=STRAIGHT_BP_NUM;
histone=dev_straightHistone;
histoneNum=STRAIGHT_HISTONE_NUM;
}
else
{
if(newpos.x<(min1-SPACETOBODER) || newpos.y<(min2-SPACETOBODER) || newpos.z<(min3-SPACETOBODER) ||newpos.x>(max3+SPACETOBODER)
|| newpos.y>(max2+SPACETOBODER) || newpos.z>(max1+SPACETOBODER))
continue;
chrom=dev_bendChrom;
chromNum=BEND_BP_NUM;
histone=dev_bendHistone;
histoneNum=BEND_HISTONE_NUM;
}
for(int j=0;j<histoneNum;j++)
{
mindis = caldistance(newpos, histone[j])-RHISTONE;
if(mindis < 0) flag=1;
}
if(flag) break;
for(int j=0;j<chromNum;j++)
{
// can take the size of base into consideration, distance should be distance-r;
mindis=100,minindex=-1;
distance[0] = caldistance(newpos, chrom[j].base)-RBASE-RPHYS;
distance[1] = caldistance(newpos,chrom[j].left)-RSUGAR- RPHYS;
distance[2] = caldistance(newpos,chrom[j].right)-RSUGAR- RPHYS;
for(int iii=0;iii<3;iii++)
{
if(mindis>distance[iii])
{
mindis=distance[iii];
minindex=iii;
}
}
if(mindis<0)
{
if(minindex>0)
{
d_recorde[id].site.x = dev_chromatinIndex[newindex];
d_recorde[id].site.y = dev_chromatinStart[newindex]+j;
d_recorde[id].site.z = 3+minindex;
d_recorde[id].site.w = 0;
}
flag=1;
}
}
if(flag) break;
}
//if(id%(blockDim.x*gridDim.x)==0) printf("id is %d\n", id);
id+=blockDim.x*gridDim.x;//*/
}
cuseed[id%MAXNUMPAR2]=localState;
}//*/
#endif
/***********************************************************************************/
Edeposit* readStage(int *numPhy,int mode)
/*******************************************************************
c* Reads electron reactive events from physics stage result *
c* Setup electron events as a list for the DNA damages *
output *effphy
Number of effective Physics damage
c******************************************************************/
{
int start,stop;
float data[4];
ifstream infile;
if(mode==0) {infile.open("./Results/totalphy.dat",ios::binary);printf("physics results: Reading ./Results/totalphy.dat\n");}
else {infile.open("./Results/totalchem.dat",ios::binary);printf("physics results: Reading ./Results/totalchem.dat\n");}
start=infile.tellg();
infile.seekg(0, ios::end);
stop=infile.tellg();
(*numPhy)=(stop-start)/16;
if(*numPhy==0) {infile.close();return NULL;}
infile.seekg(0, ios::beg);
Edeposit *hs = (Edeposit*)malloc(sizeof(Edeposit)*(*numPhy));
for(int i=0;i<(*numPhy);i++)
{
infile.read(reinterpret_cast <char*> (&data), sizeof(data));
if(i<8) printf("x y z e %f %f %f %f\n", data[0],data[1],data[2],data[3]);
hs[i].position.x=data[0];
hs[i].position.y=data[1];
hs[i].position.z=data[2];
if(mode==0) hs[i].e=data[3];
else hs[i].e=1-PROBCHEM;
}
infile.close();
return hs;
}
void quicksort(chemReact* hits,int start, int stop, int sorttype)
{
//CPU sort function for ordering chemReacts in cpu memory
switch(sorttype)
{
case 1:
{ sort(hits+start,hits+stop,compare1);
break;
}
case 2:
{ sort(hits+start,hits+stop,compare2);
break;
}
default:
{ sort(hits+start,hits+stop,compare1);
break;
}
}
}
chemReact* combinePhy(int* totalphy, combinePhysics* recorde,int mode)
{
int counts=(*totalphy);
sort(recorde,recorde+counts,compare3);
int j,num=0;
for(int i=0; i<counts;)
{
if(recorde[i].site.z==-1) {i++;continue;}
j=i+1;
while(recorde[j].site.x==recorde[i].site.x)
{
if(recorde[j].site.y==recorde[i].site.y && recorde[j].site.z==recorde[i].site.z)
{
if(mode==0) recorde[i].prob1 +=recorde[j].prob1;
else recorde[i].prob2 *= recorde[j].prob2;
recorde[j].site.z=-1;
}
j++;
if(j==counts) break;
}
i++;
}
for(int i=0;i<counts;i++)
{
if(recorde[i].site.z!=-1 && recorde[i].prob2<recorde[i].prob1)
{
num++;
}
}
if(num==0) {(*totalphy)=0;return NULL;}
chemReact* recordPhy=(chemReact*) malloc(sizeof(chemReact)*num);
int index=0;
for(int i=0;i<counts;i++)
{
if(recorde[i].site.z!=-1 && recorde[i].prob2<recorde[i].prob1)
{
recordPhy[index].x=recorde[i].site.x;
recordPhy[index].y=recorde[i].site.y;
recordPhy[index].z=recorde[i].site.z;
recordPhy[index].w=recorde[i].site.w;
index++;
}
}
(*totalphy)=num;
return recordPhy;
}
void damageAnalysis(int counts, chemReact* recordpos)
{
// seems currently only the number of total SSB or DSB are correct
// be careful to use the number in each category!!
if(counts==0) return;
char buffer[256];
int complexity[7]={0};//SSB,2xSSB, SSB+, 2SSB, DSB, DSB+, DSB++
int results[7]={0};//SSBd, SSbi, SSbm, DSBd, DSBi, DSBm, DSBh.
quicksort(recordpos,0,counts,1);
int start=0,m,numofstrand,numoftype,k,cur_dsb;
for(int i=0; i<counts;)
{
if(recordpos[i].z==-1) {i++;continue;}
start=i;
while(i<counts)
{
if(recordpos[i].x==recordpos[start].x) i++;
else break;
}
if(i==start+1)//only one break on the DNA
{
complexity[0]++;
results[recordpos[start].w]++;
continue;//find breaks in another DNA
}
if(i>start+1) quicksort(recordpos,start,i,2);//order damage sites so that search can be done
cur_dsb=0;
for(k=start;k<i-1;)//more than one break
{
if(recordpos[k+1].y-recordpos[k].y>dS)
{
complexity[1]++;
results[recordpos[k].w]++;
k++;
continue;
}
else
{
m=k+1;
numoftype=0;
numofstrand=0;
int flag=0;//means SSB, 1 for DSB
while(m<i)
{
if( recordpos[m].z!=recordpos[m-1].z)//recordpos[m].y-recordpos[m-1].y<dDSB &&
{
numofstrand++;
if(recordpos[m].w!=recordpos[k].w) numoftype++;
int j=m;
int tmptype=0;
for(;j>k-1;j--)
{
if(recordpos[m].y-recordpos[j].y>dDSB) break;
if(recordpos[j].w!=recordpos[k].w) tmptype++;
}
if(j==k-1) flag=1;//DSB
else if(j==k && m==k+1) flag=2;//2SSB
else {m=j+1;numoftype-=tmptype;}
break;
}
if(recordpos[m].y-recordpos[k].y>dS) {m--;break;}//SSB+
if(recordpos[m].w!=recordpos[k].w) numoftype++;
m++;
}
if(flag==0)
{
complexity[2]++;
if(numoftype!=0) results[2]++;
else results[recordpos[k].w]++;//=m-k;
}
else if(flag==2)
{
complexity[3]++;
if(numoftype!=0) results[2]++;
else results[recordpos[k].w]++;
}
else
{//if flag=1,m must be k+1 and from k there must be a DSB
m=k;//in consitent with the calculation of chem type,
numoftype=0;
int numofchem=0;
while(m<i)
{
if(recordpos[m].y-recordpos[k].y<dDSB)
{
if(recordpos[m].w!=recordpos[k].w) numoftype++;
if(recordpos[m].w==1) numofchem++;
m++;
}
else
break;
}
if(numofchem==1) results[6]++;
else if(numoftype!=0) results[5]++;
else results[3+recordpos[k].w]++;
if(m-k==2) complexity[4]++;
else complexity[5]++;
cur_dsb++;
}
k=m;
}
}
if(cur_dsb>1) complexity[6]++;
if(k==i-1)//deal with the last one in a segment
{
complexity[1]++;
results[recordpos[k].w]++;
}
}
FILE* fp= fopen("./Results/finalstat.txt","a");
fprintf(fp, "SSBd SSbi SSbm DSBd DSBi DSBm DSBh\n");
for(int index=0;index<7;index++)
fprintf(fp, "%d ", results[index]);
fprintf(fp, "\n");
fprintf(fp, "SSB 2xSSB SSB+ 2SSB DSB DSB+ DSB++\n");
for(int index=0;index<7;index++)
fprintf(fp, "%d ", complexity[index]);
fprintf(fp, "\n");
fclose(fp);//*/
}
#endif | 6fb24896ef9dde8b6ead23e20d261555c69a349d.cu | #ifndef __REALTIME__
#define __REALTIME__
#include <stdio.h>
#include <algorithm>
#include <iostream>
#include <fstream>
#include "microMC_chem.h"
void printDevProp(int device)
// print out device properties
{
int devCount;
cudaDeviceProp devProp;
// device properties
cudaGetDeviceCount(&devCount);
cout << "Number of device: " << devCount << endl;
cout << "Using device #: " << device << endl;
cudaGetDeviceProperties(&devProp, device);
printf("Major revision number: %d\n", devProp.major);
printf("Minor revision number: %d\n", devProp.minor);
printf("Name: %s\n", devProp.name);
printf("Total global memory: %7.2f MB\n",
devProp.totalGlobalMem/1024.0/1024.0);
printf("Total shared memory per block: %5.2f kB\n",
devProp.sharedMemPerBlock/1024.0);
printf("Total registers per block: %u\n", devProp.regsPerBlock);
printf("Warp size: %d\n", devProp.warpSize);
printf("Maximum memory pitch: %u\n", devProp.memPitch);
printf("Maximum threads per block: %d\n", devProp.maxThreadsPerBlock);
printf("Maximum dimension of block: %d*%d*%d\n",
devProp.maxThreadsDim[0],devProp.maxThreadsDim[1],devProp.maxThreadsDim[2]);
printf("Maximum dimension of grid: %d*%d*%d\n",
devProp.maxGridSize[0],devProp.maxGridSize[1],devProp.maxGridSize[2]);
printf("Clock rate: %4.2f GHz\n", devProp.clockRate/1000000.0);
printf("Total constant memory: %5.2f kB\n", devProp.totalConstMem/1024.0);
printf("Texture alignment: %u\n", devProp.textureAlignment);
printf("Concurrent copy and execution: %s\n", (devProp.deviceOverlap ? "Yes" : "No"));
printf("Number of multiprocessors: %d\n", devProp.multiProcessorCount);
printf("Kernel execution timeout: %s\n", (devProp.kernelExecTimeoutEnabled ? "Yes" : "No"));
// obtain computing resource
}
void calDNAreact_radius(float* rDNA,float deltat)
{
float k[5]={6.1,9.2,6.4,6.1,1.8};
float tmp=sqrtf(PI*DiffusionOfOH*deltat*0.001);
for(int i=0;i<5;i++)
{
rDNA[i]=k[i]/(4*PI*DiffusionOfOH)*10/6.023;//k 10^9 L/(mol*s), Diffusion 10^9 nm^2/s. t ps
rDNA[i]=sqrtf(rDNA[i]*tmp+tmp*tmp*0.25)-tmp*0.5;
}
rDNA[5]=0;//histone protein absorption radius, assumed!!!
}
__device__ float caldistance(float3 pos1, float3 pos2)
{
return (sqrtf((pos1.x -pos2.x)*(pos1.x -pos2.x)+(pos1.y -pos2.y)*(pos1.y -pos2.y)+(pos1.z -pos2.z)*(pos1.z -pos2.z)));
}
#if RANDGEO==0
__device__ float3 pos2local(int type, float3 pos, int index)
{
//do the coordinate transformation, index is the linear index for the referred box
//from global XYZ to local XYZ so that we can use the position of DNA base in two basic type (Straight and Bend)
int x = index%NUCLEUS_DIM;//the x,y,z index of the box
int z = floorf(index/(NUCLEUS_DIM*NUCLEUS_DIM));
int y = floorf((index%(NUCLEUS_DIM*NUCLEUS_DIM))/NUCLEUS_DIM);
//printf("relative to type %d %d %d %d\n", type, x,y,z);
pos.x = pos.x-(2*x + 1 - NUCLEUS_DIM)*UNITLENGTH*0.5;//relative to its center
pos.y = pos.y-(2*y + 1 - NUCLEUS_DIM)*UNITLENGTH*0.5;
pos.z = pos.z-(2*z + 1 - NUCLEUS_DIM)*UNITLENGTH*0.5;
//printf("local coordinate %f %f %f\n", pos.x, pos.y, pos.z);
float xc, yc, zc;
switch(type)
{
//Straight type
case 1:////!!!!!the following needs to be revised and confirmed
{xc = pos.x;
yc = pos.y;
zc = pos.z;
break;}
case 2://-z
{xc = -pos.x;//Ry(pi)
yc = pos.y;
zc = -pos.z;
break;}
case 3://+y
{xc = pos.x;//Rx(pi/2)
yc = -pos.z;
zc = pos.y;
break;}
case 4:
{xc = pos.x;
yc = pos.z;
zc = -pos.y;
break;}
case 5://+x
{xc = -pos.z;//Ry(-pi/2)
yc = pos.y;
zc = pos.x;
break;}
case 6:
{xc = pos.z;
yc = pos.y;
zc = -pos.x;
break;}
case 7://Bend
{xc = pos.x;
yc = pos.y;
zc = pos.z;
break;}
case 8:
{xc = -pos.z;//Rz(pi)Ry(pi/2)
yc = -pos.y;
zc = -pos.x;
break;}
case 9:
{xc = -pos.x;//Rz(pi)
yc = -pos.y;
zc = pos.z;
break;}
case 10:
{xc = -pos.z;//Ry(-pi/2)
yc = pos.y;
zc = pos.x;
break;}
case 11:
{xc = -pos.x;//Ry(pi)
yc = pos.y;
zc = -pos.z;
break;}
case 12:
{xc = pos.z;//Rz(pi)Ry(-pi/2)
yc = -pos.y;
zc = pos.x;
break;}
case 13:
{xc = pos.x;//Rx(pi)
yc = -pos.y;
zc = -pos.z;
break;}
case 14:
{xc = pos.z;//Ry(pi/2)
yc = pos.y;
zc = -pos.x;
break;}
case 15:
{xc = pos.y;//Rz(-pi/2)
yc = -pos.x;
zc = pos.z;
break;}
case 16:
{xc = -pos.z;//Ry(-pi/2)Rz(pi/2)
yc = pos.x;
zc = -pos.y;
break;}
case 17:
{xc = -pos.y;//Rz(pi/2)
yc = pos.x;
zc = pos.z;
break;}
case 18:
{xc = -pos.z;//Rz(-pi/2)Rx(pi/2)
yc = -pos.x;
zc = pos.y;
break;}
case 19:
{xc = pos.y;//Rz(-pi/2)Ry(pi)
yc = pos.x;
zc = -pos.z;
break;}
case 20:
{xc = pos.z;//Rz(-pi/2)Rx(-pi/2)
yc = -pos.x;
zc = pos.y;
break;}
case 21:
{xc = -pos.y;//Rz(pi/2)Ry(pi)
yc = -pos.x;
zc = -pos.z;
break;}
case 22:
{xc = pos.z;//Rz(pi/2)Rx(pi/2)
yc = pos.x;
zc = pos.y;
break;}
case 23:
{xc = pos.x;//Rx(pi/2)
yc = -pos.z;
zc = pos.y;
break;}
case 24:
{xc = -pos.y;//Rz(pi/2)Ry(pi/2)
yc = pos.z;
zc = -pos.x;
break;}
case 25:
{xc = -pos.x;//Rx(pi/2)Ry(pi)
yc = pos.z;
zc = pos.y;
break;}
case 26:
{xc = -pos.y;//Rx(pi/2)Rz(pi/2)
yc = -pos.z;
zc = pos.x;
break;}
case 27:
{xc = pos.x;//Rx(-pi/2)
yc =pos.z;
zc = -pos.y;
break;}
case 28:
{xc = pos.y;//Rx(pi/2)Rz(-pi/2)
yc = -pos.z;
zc = -pos.x;
break;}
case 29:
{xc = -pos.x;//Rx(-pi/2)Ry(pi)
yc = -pos.z;
zc = -pos.y;
break;}
case 30:
{xc = pos.y;//Rz(-pi/2)Ry(-pi/2)
yc = pos.z;
zc = pos.x;
break;}
default:
{printf("wrong type\n"); // for test
break;}
}
pos.x=xc;
pos.y=yc;
pos.z=zc;//*/
return pos;
}
__global__ void chemSearch(int num, Edeposit* d_edrop, int* dev_chromatinIndex,int* dev_chromatinStart,int* dev_chromatinType, CoorBasePair* dev_straightChrom,
CoorBasePair* dev_bendChrom,float3* dev_straightHistone,float3* dev_bendHistone, combinePhysics* d_recorde)
{
int id = blockIdx.x*blockDim.x+ threadIdx.x;
curandState localState = cuseed[id%MAXNUMPAR2];
float3 newpos, pos_cur_target;
int3 index;
CoorBasePair* chrom;
float3 *histone;
int chromNum, histoneNum,flag=0;
while(id<num)
{
d_recorde[id].site.x=-1;//initialize
d_recorde[id].site.y=-1;
d_recorde[id].site.z=-1;
d_recorde[id].site.w=-1;
d_recorde[id].prob2=d_edrop[id].e;
d_recorde[id].prob1=curand_uniform(&localState);
pos_cur_target=d_edrop[id].position;
index.x=floorf((pos_cur_target.x+UNITLENGTH*NUCLEUS_DIM/2)/UNITLENGTH);
index.y=floorf((pos_cur_target.y+UNITLENGTH*NUCLEUS_DIM/2)/UNITLENGTH);
index.z=floorf((pos_cur_target.z+UNITLENGTH*NUCLEUS_DIM/2)/UNITLENGTH);
int delta=index.x+index.y*NUCLEUS_DIM+index.z*NUCLEUS_DIM*NUCLEUS_DIM,minindex=-1;
float distance[3]={100},mindis=100;
for(int i=0;i<27;i++)
{
flag=0;
int newindex = delta+neighborindex[i];
if(newindex<0 || newindex > NUCLEUS_DIM*NUCLEUS_DIM*NUCLEUS_DIM-1) continue;
int type = dev_chromatinType[newindex];
if(type==-1 || type==0) continue;
newpos = pos2local(type, pos_cur_target, newindex);
if(type<7)
{
if(newpos.x<(min1-SPACETOBODER) || newpos.y<(min2-SPACETOBODER) || newpos.z<(min3-SPACETOBODER) ||newpos.x>(max1+SPACETOBODER)
|| newpos.y>(max2+SPACETOBODER) || newpos.z>(max3+SPACETOBODER))
continue;
chrom=dev_straightChrom;
chromNum=STRAIGHT_BP_NUM;
histone=dev_straightHistone;
histoneNum=STRAIGHT_HISTONE_NUM;
}
else
{
if(newpos.x<(min1-SPACETOBODER) || newpos.y<(min2-SPACETOBODER) || newpos.z<(min3-SPACETOBODER) ||newpos.x>(max3+SPACETOBODER)
|| newpos.y>(max2+SPACETOBODER) || newpos.z>(max1+SPACETOBODER))
continue;
chrom=dev_bendChrom;
chromNum=BEND_BP_NUM;
histone=dev_bendHistone;
histoneNum=BEND_HISTONE_NUM;
}
for(int j=0;j<histoneNum;j++)
{
mindis = caldistance(newpos, histone[j])-RHISTONE;
if(mindis < 0) flag=1;
}
if(flag) break;
for(int j=0;j<chromNum;j++)
{
// can take the size of base into consideration, distance should be distance-r;
mindis=100,minindex=-1;
distance[0] = caldistance(newpos, chrom[j].base)-RBASE;
distance[1] = caldistance(newpos,chrom[j].left)-RSUGAR;
distance[2] = caldistance(newpos,chrom[j].right)-RSUGAR;
for(int iii=0;iii<3;iii++)
{
if(mindis>distance[iii])
{
mindis=distance[iii];
minindex=iii;
}
}
if(mindis<0)
{
if(minindex>0)
{
d_recorde[id].site.x = dev_chromatinIndex[newindex];
d_recorde[id].site.y = dev_chromatinStart[newindex]+j;
d_recorde[id].site.z = 3+minindex;
d_recorde[id].site.w = 1;
}
flag=1;
break;
}
int tmp = floorf(curand_uniform(&localState)/0.25);
distance[0] = caldistance(newpos, chrom[j].base)-RBASE-d_rDNA[tmp];
distance[1] = caldistance(newpos,chrom[j].left)-RSUGAR- d_rDNA[4];
distance[2] = caldistance(newpos,chrom[j].right)-RSUGAR- d_rDNA[4];
for(int iii=0;iii<3;iii++)
{
if(mindis>distance[iii])
{
mindis=distance[iii];
minindex=iii;
}
}
if(mindis<0)
{
if(minindex>0)
{
d_recorde[id].site.x = dev_chromatinIndex[newindex];
d_recorde[id].site.y = dev_chromatinStart[newindex]+j;
d_recorde[id].site.z = 3+minindex;
d_recorde[id].site.w = 1;
}
flag=1;
break;
}
}
if(flag) break;
}
id+=blockDim.x*gridDim.x;
}
cuseed[id%MAXNUMPAR2]=localState;
}
__global__ void phySearch(int num, Edeposit* d_edrop, int* dev_chromatinIndex,int* dev_chromatinStart,int* dev_chromatinType, CoorBasePair* dev_straightChrom,
CoorBasePair* dev_bendChrom,float3* dev_straightHistone,float3* dev_bendHistone, combinePhysics* d_recorde)
{
int id = blockIdx.x*blockDim.x+ threadIdx.x;
curandState localState = cuseed[id%MAXNUMPAR2];
float3 newpos, pos_cur_target;
int3 index;
CoorBasePair* chrom;
float3 *histone;
int chromNum, histoneNum,flag=0;
while(id<num)
{
d_recorde[id].site.x=-1;//initialize
d_recorde[id].site.y=-1;
d_recorde[id].site.z=-1;
d_recorde[id].site.w=-1;
d_recorde[id].prob1=d_edrop[id].e;
d_recorde[id].prob2=curand_uniform(&localState)*(EMAX-EMIN)+EMIN;
pos_cur_target=d_edrop[id].position;
index.x=floorf((pos_cur_target.x+UNITLENGTH*NUCLEUS_DIM/2)/UNITLENGTH);
index.y=floorf((pos_cur_target.y+UNITLENGTH*NUCLEUS_DIM/2)/UNITLENGTH);
index.z=floorf((pos_cur_target.z+UNITLENGTH*NUCLEUS_DIM/2)/UNITLENGTH);
int delta=index.x+index.y*NUCLEUS_DIM+index.z*NUCLEUS_DIM*NUCLEUS_DIM,minindex=-1;
float distance[3]={100},mindis=100;
for(int i=0;i<27;i++)
{
flag=0;
int newindex = delta+neighborindex[i];
if(newindex<0 || newindex > NUCLEUS_DIM*NUCLEUS_DIM*NUCLEUS_DIM-1) continue;
int type = dev_chromatinType[newindex];
if(type==-1 || type==0) continue;
newpos = pos2local(type, pos_cur_target, newindex);
if(type<7)
{
if(newpos.x<(min1-SPACETOBODER) || newpos.y<(min2-SPACETOBODER) || newpos.z<(min3-SPACETOBODER) ||newpos.x>(max1+SPACETOBODER)
|| newpos.y>(max2+SPACETOBODER) || newpos.z>(max3+SPACETOBODER))
continue;
chrom=dev_straightChrom;
chromNum=STRAIGHT_BP_NUM;
histone=dev_straightHistone;
histoneNum=STRAIGHT_HISTONE_NUM;
}
else
{
if(newpos.x<(min1-SPACETOBODER) || newpos.y<(min2-SPACETOBODER) || newpos.z<(min3-SPACETOBODER) ||newpos.x>(max3+SPACETOBODER)
|| newpos.y>(max2+SPACETOBODER) || newpos.z>(max1+SPACETOBODER))
continue;
chrom=dev_bendChrom;
chromNum=BEND_BP_NUM;
histone=dev_bendHistone;
histoneNum=BEND_HISTONE_NUM;
}
for(int j=0;j<histoneNum;j++)
{
mindis = caldistance(newpos, histone[j])-RHISTONE;
if(mindis < 0) flag=1;
}
if(flag) break;
for(int j=0;j<chromNum;j++)
{
// can take the size of base into consideration, distance should be distance-r;
mindis=100,minindex=-1;
distance[0] = caldistance(newpos, chrom[j].base)-RBASE-RPHYS;
distance[1] = caldistance(newpos,chrom[j].left)-RSUGAR- RPHYS;
distance[2] = caldistance(newpos,chrom[j].right)-RSUGAR- RPHYS;
for(int iii=0;iii<3;iii++)
{
if(mindis>distance[iii])
{
mindis=distance[iii];
minindex=iii;
}
}
if(mindis<0)
{
if(minindex>0)
{
d_recorde[id].site.x = dev_chromatinIndex[newindex];
d_recorde[id].site.y = dev_chromatinStart[newindex]+j;
d_recorde[id].site.z = 3+minindex;
d_recorde[id].site.w = 0;
}
flag=1;
}
}
if(flag) break;
}
//if(id%(blockDim.x*gridDim.x)==0) printf("id is %d\n", id);
id+=blockDim.x*gridDim.x;//*/
}
cuseed[id%MAXNUMPAR2]=localState;
}//*/
#endif
/***********************************************************************************/
Edeposit* readStage(int *numPhy,int mode)
/*******************************************************************
c* Reads electron reactive events from physics stage result *
c* Setup electron events as a list for the DNA damages *
output *effphy
Number of effective Physics damage
c******************************************************************/
{
int start,stop;
float data[4];
ifstream infile;
if(mode==0) {infile.open("./Results/totalphy.dat",ios::binary);printf("physics results: Reading ./Results/totalphy.dat\n");}
else {infile.open("./Results/totalchem.dat",ios::binary);printf("physics results: Reading ./Results/totalchem.dat\n");}
start=infile.tellg();
infile.seekg(0, ios::end);
stop=infile.tellg();
(*numPhy)=(stop-start)/16;
if(*numPhy==0) {infile.close();return NULL;}
infile.seekg(0, ios::beg);
Edeposit *hs = (Edeposit*)malloc(sizeof(Edeposit)*(*numPhy));
for(int i=0;i<(*numPhy);i++)
{
infile.read(reinterpret_cast <char*> (&data), sizeof(data));
if(i<8) printf("x y z e %f %f %f %f\n", data[0],data[1],data[2],data[3]);
hs[i].position.x=data[0];
hs[i].position.y=data[1];
hs[i].position.z=data[2];
if(mode==0) hs[i].e=data[3];
else hs[i].e=1-PROBCHEM;
}
infile.close();
return hs;
}
void quicksort(chemReact* hits,int start, int stop, int sorttype)
{
//CPU sort function for ordering chemReacts in cpu memory
switch(sorttype)
{
case 1:
{ sort(hits+start,hits+stop,compare1);
break;
}
case 2:
{ sort(hits+start,hits+stop,compare2);
break;
}
default:
{ sort(hits+start,hits+stop,compare1);
break;
}
}
}
chemReact* combinePhy(int* totalphy, combinePhysics* recorde,int mode)
{
int counts=(*totalphy);
sort(recorde,recorde+counts,compare3);
int j,num=0;
for(int i=0; i<counts;)
{
if(recorde[i].site.z==-1) {i++;continue;}
j=i+1;
while(recorde[j].site.x==recorde[i].site.x)
{
if(recorde[j].site.y==recorde[i].site.y && recorde[j].site.z==recorde[i].site.z)
{
if(mode==0) recorde[i].prob1 +=recorde[j].prob1;
else recorde[i].prob2 *= recorde[j].prob2;
recorde[j].site.z=-1;
}
j++;
if(j==counts) break;
}
i++;
}
for(int i=0;i<counts;i++)
{
if(recorde[i].site.z!=-1 && recorde[i].prob2<recorde[i].prob1)
{
num++;
}
}
if(num==0) {(*totalphy)=0;return NULL;}
chemReact* recordPhy=(chemReact*) malloc(sizeof(chemReact)*num);
int index=0;
for(int i=0;i<counts;i++)
{
if(recorde[i].site.z!=-1 && recorde[i].prob2<recorde[i].prob1)
{
recordPhy[index].x=recorde[i].site.x;
recordPhy[index].y=recorde[i].site.y;
recordPhy[index].z=recorde[i].site.z;
recordPhy[index].w=recorde[i].site.w;
index++;
}
}
(*totalphy)=num;
return recordPhy;
}
void damageAnalysis(int counts, chemReact* recordpos)
{
// seems currently only the number of total SSB or DSB are correct
// be careful to use the number in each category!!
if(counts==0) return;
char buffer[256];
int complexity[7]={0};//SSB,2xSSB, SSB+, 2SSB, DSB, DSB+, DSB++
int results[7]={0};//SSBd, SSbi, SSbm, DSBd, DSBi, DSBm, DSBh.
quicksort(recordpos,0,counts,1);
int start=0,m,numofstrand,numoftype,k,cur_dsb;
for(int i=0; i<counts;)
{
if(recordpos[i].z==-1) {i++;continue;}
start=i;
while(i<counts)
{
if(recordpos[i].x==recordpos[start].x) i++;
else break;
}
if(i==start+1)//only one break on the DNA
{
complexity[0]++;
results[recordpos[start].w]++;
continue;//find breaks in another DNA
}
if(i>start+1) quicksort(recordpos,start,i,2);//order damage sites so that search can be done
cur_dsb=0;
for(k=start;k<i-1;)//more than one break
{
if(recordpos[k+1].y-recordpos[k].y>dS)
{
complexity[1]++;
results[recordpos[k].w]++;
k++;
continue;
}
else
{
m=k+1;
numoftype=0;
numofstrand=0;
int flag=0;//means SSB, 1 for DSB
while(m<i)
{
if( recordpos[m].z!=recordpos[m-1].z)//recordpos[m].y-recordpos[m-1].y<dDSB &&
{
numofstrand++;
if(recordpos[m].w!=recordpos[k].w) numoftype++;
int j=m;
int tmptype=0;
for(;j>k-1;j--)
{
if(recordpos[m].y-recordpos[j].y>dDSB) break;
if(recordpos[j].w!=recordpos[k].w) tmptype++;
}
if(j==k-1) flag=1;//DSB
else if(j==k && m==k+1) flag=2;//2SSB
else {m=j+1;numoftype-=tmptype;}
break;
}
if(recordpos[m].y-recordpos[k].y>dS) {m--;break;}//SSB+
if(recordpos[m].w!=recordpos[k].w) numoftype++;
m++;
}
if(flag==0)
{
complexity[2]++;
if(numoftype!=0) results[2]++;
else results[recordpos[k].w]++;//=m-k;
}
else if(flag==2)
{
complexity[3]++;
if(numoftype!=0) results[2]++;
else results[recordpos[k].w]++;
}
else
{//if flag=1,m must be k+1 and from k there must be a DSB
m=k;//in consitent with the calculation of chem type,
numoftype=0;
int numofchem=0;
while(m<i)
{
if(recordpos[m].y-recordpos[k].y<dDSB)
{
if(recordpos[m].w!=recordpos[k].w) numoftype++;
if(recordpos[m].w==1) numofchem++;
m++;
}
else
break;
}
if(numofchem==1) results[6]++;
else if(numoftype!=0) results[5]++;
else results[3+recordpos[k].w]++;
if(m-k==2) complexity[4]++;
else complexity[5]++;
cur_dsb++;
}
k=m;
}
}
if(cur_dsb>1) complexity[6]++;
if(k==i-1)//deal with the last one in a segment
{
complexity[1]++;
results[recordpos[k].w]++;
}
}
FILE* fp= fopen("./Results/finalstat.txt","a");
fprintf(fp, "SSBd SSbi SSbm DSBd DSBi DSBm DSBh\n");
for(int index=0;index<7;index++)
fprintf(fp, "%d ", results[index]);
fprintf(fp, "\n");
fprintf(fp, "SSB 2xSSB SSB+ 2SSB DSB DSB+ DSB++\n");
for(int index=0;index<7;index++)
fprintf(fp, "%d ", complexity[index]);
fprintf(fp, "\n");
fclose(fp);//*/
}
#endif |
0626c6c908b4c9649ea5776836c85c5bb1edf069.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <cfloat>
#include <vector>
#include "RoIAlign.h"
#include "../../../include/common_gpu.h"
#include "fp16.h"
using std::max;
using std::min;
//CUDA_1D_KERNEL_LOOP(i, nboxes) {
//CUDA_2D_KERNEL_LOOP(ibox, nboxes_to_generate, image_index, num_images) {
//CUDA_2D_KERNEL_LOOP(box_idx, KA, img_idx, num_images) {
namespace nvinfer1 {
RoIAlignLayerPlugin::RoIAlignLayerPlugin(const int cudaThread /*= 512*/) :
mThreadCount(cudaThread) {
/*mClassCount = CLASS_NUM;
mRoIAlignKernel.clear();
mRoIAlignKernel.push_back(yolo1);
mRoIAlignKernel.push_back(yolo2);
mRoIAlignKernel.push_back(yolo3);
mKernelCount = mRoIAlignKernel.size();*/
}
RoIAlignLayerPlugin::~RoIAlignLayerPlugin() {
if (mInputBuffer)
CUDA_CHECK(hipHostFree(mInputBuffer));
if (mOutputBuffer)
CUDA_CHECK(hipHostFree(mOutputBuffer));
}
// create the plugin at runtime from a byte stream
RoIAlignLayerPlugin::RoIAlignLayerPlugin(const void* data, size_t length) {
}
void RoIAlignLayerPlugin::serialize(void* buffer)
{
}
size_t RoIAlignLayerPlugin::getSerializationSize()
{
return 0;
}
int RoIAlignLayerPlugin::initialize()
{
return 0;
}
Dims RoIAlignLayerPlugin::getOutputDimensions(int index, const Dims * inputs, int nbInputDims)
{
assert(nbInputDims == 4);
mFeatureMap_C = inputs[0].d[0];
mFeatureMap_H = inputs[0].d[1];
mFeatureMap_W = inputs[0].d[2];
mRois_H = inputs[0].d[0];
mRois_W = inputs[0].d[1];
//X shape : 1, 256, 56, 56 R shape : 841, 5
//X shape : 1, 256, 56, 56 R shape : 131, 5
//X shape : 1, 256, 56, 56 R shape : 27, 5
//X shape : 1, 256, 56, 56 R shape : 1, 5
// 841, 256, 7,7
// 131, 256, 7,7
// 27, 256, 7,7
// 1, 256, 7,7
// Concat -> 1000, 256, 7,7?
return DimsCHW(mRois_H, pooled_height, pooled_width);
}
/*
template <typename Dtype>
__global__ void ROIPoolForward(const int nthreads, const Dtype* bottom_data,
const Dtype spatial_scale, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const Dtype* bottom_rois, Dtype* top_data, int* argmax_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
bottom_rois += n * 5;
int roi_batch_ind = bottom_rois[0];
int roi_start_w = round(bottom_rois[1] * spatial_scale);
int roi_start_h = round(bottom_rois[2] * spatial_scale);
int roi_end_w = round(bottom_rois[3] * spatial_scale);
int roi_end_h = round(bottom_rois[4] * spatial_scale);
// Force malformed ROIs to be 1x1
int roi_width = max(roi_end_w - roi_start_w + 1, 1);
int roi_height = max(roi_end_h - roi_start_h + 1, 1);
Dtype bin_size_h = static_cast<Dtype>(roi_height)
/ static_cast<Dtype>(pooled_height);
Dtype bin_size_w = static_cast<Dtype>(roi_width)
/ static_cast<Dtype>(pooled_width);
int hstart = static_cast<int>(floor(static_cast<Dtype>(ph)
* bin_size_h));
int wstart = static_cast<int>(floor(static_cast<Dtype>(pw)
* bin_size_w));
int hend = static_cast<int>(ceil(static_cast<Dtype>(ph + 1)
* bin_size_h));
int wend = static_cast<int>(ceil(static_cast<Dtype>(pw + 1)
* bin_size_w));
// Add roi offsets and clip to input boundaries
hstart = min(max(hstart + roi_start_h, 0), height);
hend = min(max(hend + roi_start_h, 0), height);
wstart = min(max(wstart + roi_start_w, 0), width);
wend = min(max(wend + roi_start_w, 0), width);
bool is_empty = (hend <= hstart) || (wend <= wstart);
// Define an empty pooling region to be zero
Dtype maxval = is_empty ? 0 : -FLT_MAX;
// If nothing is pooled, argmax = -1 causes nothing to be backprop'd
int maxidx = -1;
bottom_data += (roi_batch_ind * channels + c) * height * width;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
int bottom_index = h * width + w;
if (bottom_data[bottom_index] > maxval) {
maxval = bottom_data[bottom_index];
maxidx = bottom_index;
}
}
}
top_data[index] = maxval;
argmax_data[index] = maxidx;
}
}
*/
//==============================================================================================
template <typename T>
__device__ T bilinear_interpolate(const T * bottom_data,
const int height, const int width,
T y, T x,
const int index /* index for debug only*/) {
// deal with cases that inverse elements are out of feature map boundary
if (y < -1.0 || y > height || x < -1.0 || x > width) {
//empty
return 0;
}
if (y <= 0) y = 0;
if (x <= 0) x = 0;
int y_low = (int)y;
int x_low = (int)x;
int y_high;
int x_high;
if (y_low >= height - 1) {
y_high = y_low = height - 1;
y = (T)y_low;
}
else {
y_high = y_low + 1;
}
if (x_low >= width - 1) {
x_high = x_low = width - 1;
x = (T)x_low;
}
else {
x_high = x_low + 1;
}
T ly = y - y_low;
T lx = x - x_low;
T hy = 1. - ly, hx = 1. - lx;
// do bilinear interpolation
T v1 = bottom_data[y_low * width + x_low];
T v2 = bottom_data[y_low * width + x_high];
T v3 = bottom_data[y_high * width + x_low];
T v4 = bottom_data[y_high * width + x_high];
T w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx;
T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
return val;
}
template <typename T>
__global__ void RoIAlignForward(const int nthreads,
const T * bottom_data,
const T spatial_scale, const int channels,
const int height, const int width,
const int pooled_height, const int pooled_width,
const int sampling_ratio,
const T * bottom_rois, T * top_data) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
const T * offset_bottom_rois = bottom_rois + n * 5;
int roi_batch_ind = (int)offset_bottom_rois[0];
// Do not using rounding; this implementation detail is critical
T roi_start_w = offset_bottom_rois[1] * spatial_scale;
T roi_start_h = offset_bottom_rois[2] * spatial_scale;
T roi_end_w = offset_bottom_rois[3] * spatial_scale;
T roi_end_h = offset_bottom_rois[4] * spatial_scale;
// T roi_start_w = round(offset_bottom_rois[1] * spatial_scale);
// T roi_start_h = round(offset_bottom_rois[2] * spatial_scale);
// T roi_end_w = round(offset_bottom_rois[3] * spatial_scale);
// T roi_end_h = round(offset_bottom_rois[4] * spatial_scale);
// Force malformed ROIs to be 1x1
T roi_width = max(roi_end_w - roi_start_w, (T)1.);
T roi_height = max(roi_end_h - roi_start_h, (T)1.);
T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height);
T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width);
const T * offset_bottom_data = bottom_data + (roi_batch_ind * channels + c) * height * width;
// We use roi_bin_grid to sample the grid and mimic integral
int roi_bin_grid_h = (sampling_ratio > 0) ? sampling_ratio : ceilf(roi_height / pooled_height); // e.g., = 2
int roi_bin_grid_w = (sampling_ratio > 0) ? sampling_ratio : ceilf(roi_width / pooled_width);
// We do average (integral) pooling inside a bin
const T count = roi_bin_grid_h * roi_bin_grid_w; // e.g. = 4
T output_val = 0.;
for (int iy = 0; iy < roi_bin_grid_h; iy++) // e.g., iy = 0, 1
{
const T y = roi_start_h + ph * bin_size_h + static_cast<T>(iy + .5f) * bin_size_h / static_cast<T>(roi_bin_grid_h); // e.g., 0.5, 1.5
for (int ix = 0; ix < roi_bin_grid_w; ix++)
{
const T x = roi_start_w + pw * bin_size_w + static_cast<T>(ix + .5f) * bin_size_w / static_cast<T>(roi_bin_grid_w);
T val = bilinear_interpolate(offset_bottom_data, height, width, y, x, index);
output_val += val;
}
}
output_val /= count;
top_data[index] = output_val;
}
}
template <typename DType>
void RoIAlignLayerPlugin::forwardGpu( const DType* features,
const DType* rois,
const float spatial_scale,
const int pooled_height,
const int pooled_width,
const int sampling_ratio,
hipStream_t stream,
const int num_rois,
const int channels,
const int height,
const int width,
DType* output) {
auto output_size = num_rois * pooled_height * pooled_width * channels;
// 841, 256, 7,7
// 131, 256, 7,7
// 27, 256, 7,7
// 1, 256, 7,7
// Concat -> 1000, 256, 7,7?
//hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
dim3 grid(GET_BLOCKS_COUNT_IN_GRID(output_size));//::min(THCCeilDiv((long)output_size, 512L), 4096L));
dim3 block(CUDA_NUM_THREADS);
//if (output.numel() == 0) {
// hipGetLastError();
// return output;
//}
RoIAlignForward<DType> << <grid, block, 0, stream >> > (output_size,
features,
spatial_scale, channels,
height, width,
pooled_height, pooled_width,
sampling_ratio,
rois,
output);// .data<scalar_t>());
hipGetLastError();
return;
}
int RoIAlignLayerPlugin::enqueue(int batchSize,
const void*const * inputs,
void** outputs,
void* workspace, hipStream_t stream){
assert(batchSize == 1);
switch (mDataType){
// bottom: "fpn_resXf_sum" <- Featuremap from Conv
// bottom: "rois_fpnX" <- RoIs
case DataType::kFLOAT:
forwardGpu<float>((const float*)inputs[0],
(const float*)inputs[1],
spatial_scale,
pooled_height,
pooled_width,
sampling_ratio,
stream,
mRois_H,
mFeatureMap_C,
mFeatureMap_H,
mFeatureMap_W,
(float*)outputs[0]);
break;
/*
case DataType::kHALF:
forwardGpu<__half>((const __half*)inputs[0],
(const __half*)inputs[1],
spatial_scale,
pooled_height,
pooled_width,
sampling_ratio,
stream,
mRois_H,
mFeatureMap_C,
mFeatureMap_H,
mFeatureMap_W,
(__half*)outputs[0]);
break;*/
case DataType::kINT8:
forwardGpu<u_int8_t>((const u_int8_t*)inputs[0],
(const u_int8_t*)inputs[1],
spatial_scale,
pooled_height,
pooled_width,
sampling_ratio,
stream,
mRois_H,
mFeatureMap_C,
mFeatureMap_H,
mFeatureMap_W,
(u_int8_t*)outputs[0]);
/*
const DType * features,
const DType * rois,
const float spatial_scale,
const int pooled_height,
const int pooled_width,
const int sampling_ratio,
hipStream_t stream,
const int num_rois,
const int channels,
const int height,
const int width,
DType* output */
break;
default:
std::cerr << "error data type" << std::endl;
}
return 0;
};
}
| 0626c6c908b4c9649ea5776836c85c5bb1edf069.cu | #include <algorithm>
#include <cfloat>
#include <vector>
#include "RoIAlign.h"
#include "../../../include/common_gpu.h"
#include "fp16.h"
using std::max;
using std::min;
//CUDA_1D_KERNEL_LOOP(i, nboxes) {
//CUDA_2D_KERNEL_LOOP(ibox, nboxes_to_generate, image_index, num_images) {
//CUDA_2D_KERNEL_LOOP(box_idx, KA, img_idx, num_images) {
namespace nvinfer1 {
RoIAlignLayerPlugin::RoIAlignLayerPlugin(const int cudaThread /*= 512*/) :
mThreadCount(cudaThread) {
/*mClassCount = CLASS_NUM;
mRoIAlignKernel.clear();
mRoIAlignKernel.push_back(yolo1);
mRoIAlignKernel.push_back(yolo2);
mRoIAlignKernel.push_back(yolo3);
mKernelCount = mRoIAlignKernel.size();*/
}
RoIAlignLayerPlugin::~RoIAlignLayerPlugin() {
if (mInputBuffer)
CUDA_CHECK(cudaFreeHost(mInputBuffer));
if (mOutputBuffer)
CUDA_CHECK(cudaFreeHost(mOutputBuffer));
}
// create the plugin at runtime from a byte stream
RoIAlignLayerPlugin::RoIAlignLayerPlugin(const void* data, size_t length) {
}
void RoIAlignLayerPlugin::serialize(void* buffer)
{
}
size_t RoIAlignLayerPlugin::getSerializationSize()
{
return 0;
}
int RoIAlignLayerPlugin::initialize()
{
return 0;
}
Dims RoIAlignLayerPlugin::getOutputDimensions(int index, const Dims * inputs, int nbInputDims)
{
assert(nbInputDims == 4);
mFeatureMap_C = inputs[0].d[0];
mFeatureMap_H = inputs[0].d[1];
mFeatureMap_W = inputs[0].d[2];
mRois_H = inputs[0].d[0];
mRois_W = inputs[0].d[1];
//X shape : 1, 256, 56, 56 R shape : 841, 5
//X shape : 1, 256, 56, 56 R shape : 131, 5
//X shape : 1, 256, 56, 56 R shape : 27, 5
//X shape : 1, 256, 56, 56 R shape : 1, 5
// 841, 256, 7,7
// 131, 256, 7,7
// 27, 256, 7,7
// 1, 256, 7,7
// Concat -> 1000, 256, 7,7?
return DimsCHW(mRois_H, pooled_height, pooled_width);
}
/*
template <typename Dtype>
__global__ void ROIPoolForward(const int nthreads, const Dtype* bottom_data,
const Dtype spatial_scale, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const Dtype* bottom_rois, Dtype* top_data, int* argmax_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
bottom_rois += n * 5;
int roi_batch_ind = bottom_rois[0];
int roi_start_w = round(bottom_rois[1] * spatial_scale);
int roi_start_h = round(bottom_rois[2] * spatial_scale);
int roi_end_w = round(bottom_rois[3] * spatial_scale);
int roi_end_h = round(bottom_rois[4] * spatial_scale);
// Force malformed ROIs to be 1x1
int roi_width = max(roi_end_w - roi_start_w + 1, 1);
int roi_height = max(roi_end_h - roi_start_h + 1, 1);
Dtype bin_size_h = static_cast<Dtype>(roi_height)
/ static_cast<Dtype>(pooled_height);
Dtype bin_size_w = static_cast<Dtype>(roi_width)
/ static_cast<Dtype>(pooled_width);
int hstart = static_cast<int>(floor(static_cast<Dtype>(ph)
* bin_size_h));
int wstart = static_cast<int>(floor(static_cast<Dtype>(pw)
* bin_size_w));
int hend = static_cast<int>(ceil(static_cast<Dtype>(ph + 1)
* bin_size_h));
int wend = static_cast<int>(ceil(static_cast<Dtype>(pw + 1)
* bin_size_w));
// Add roi offsets and clip to input boundaries
hstart = min(max(hstart + roi_start_h, 0), height);
hend = min(max(hend + roi_start_h, 0), height);
wstart = min(max(wstart + roi_start_w, 0), width);
wend = min(max(wend + roi_start_w, 0), width);
bool is_empty = (hend <= hstart) || (wend <= wstart);
// Define an empty pooling region to be zero
Dtype maxval = is_empty ? 0 : -FLT_MAX;
// If nothing is pooled, argmax = -1 causes nothing to be backprop'd
int maxidx = -1;
bottom_data += (roi_batch_ind * channels + c) * height * width;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
int bottom_index = h * width + w;
if (bottom_data[bottom_index] > maxval) {
maxval = bottom_data[bottom_index];
maxidx = bottom_index;
}
}
}
top_data[index] = maxval;
argmax_data[index] = maxidx;
}
}
*/
//==============================================================================================
template <typename T>
__device__ T bilinear_interpolate(const T * bottom_data,
const int height, const int width,
T y, T x,
const int index /* index for debug only*/) {
// deal with cases that inverse elements are out of feature map boundary
if (y < -1.0 || y > height || x < -1.0 || x > width) {
//empty
return 0;
}
if (y <= 0) y = 0;
if (x <= 0) x = 0;
int y_low = (int)y;
int x_low = (int)x;
int y_high;
int x_high;
if (y_low >= height - 1) {
y_high = y_low = height - 1;
y = (T)y_low;
}
else {
y_high = y_low + 1;
}
if (x_low >= width - 1) {
x_high = x_low = width - 1;
x = (T)x_low;
}
else {
x_high = x_low + 1;
}
T ly = y - y_low;
T lx = x - x_low;
T hy = 1. - ly, hx = 1. - lx;
// do bilinear interpolation
T v1 = bottom_data[y_low * width + x_low];
T v2 = bottom_data[y_low * width + x_high];
T v3 = bottom_data[y_high * width + x_low];
T v4 = bottom_data[y_high * width + x_high];
T w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx;
T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
return val;
}
template <typename T>
__global__ void RoIAlignForward(const int nthreads,
const T * bottom_data,
const T spatial_scale, const int channels,
const int height, const int width,
const int pooled_height, const int pooled_width,
const int sampling_ratio,
const T * bottom_rois, T * top_data) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
const T * offset_bottom_rois = bottom_rois + n * 5;
int roi_batch_ind = (int)offset_bottom_rois[0];
// Do not using rounding; this implementation detail is critical
T roi_start_w = offset_bottom_rois[1] * spatial_scale;
T roi_start_h = offset_bottom_rois[2] * spatial_scale;
T roi_end_w = offset_bottom_rois[3] * spatial_scale;
T roi_end_h = offset_bottom_rois[4] * spatial_scale;
// T roi_start_w = round(offset_bottom_rois[1] * spatial_scale);
// T roi_start_h = round(offset_bottom_rois[2] * spatial_scale);
// T roi_end_w = round(offset_bottom_rois[3] * spatial_scale);
// T roi_end_h = round(offset_bottom_rois[4] * spatial_scale);
// Force malformed ROIs to be 1x1
T roi_width = max(roi_end_w - roi_start_w, (T)1.);
T roi_height = max(roi_end_h - roi_start_h, (T)1.);
T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height);
T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width);
const T * offset_bottom_data = bottom_data + (roi_batch_ind * channels + c) * height * width;
// We use roi_bin_grid to sample the grid and mimic integral
int roi_bin_grid_h = (sampling_ratio > 0) ? sampling_ratio : ceilf(roi_height / pooled_height); // e.g., = 2
int roi_bin_grid_w = (sampling_ratio > 0) ? sampling_ratio : ceilf(roi_width / pooled_width);
// We do average (integral) pooling inside a bin
const T count = roi_bin_grid_h * roi_bin_grid_w; // e.g. = 4
T output_val = 0.;
for (int iy = 0; iy < roi_bin_grid_h; iy++) // e.g., iy = 0, 1
{
const T y = roi_start_h + ph * bin_size_h + static_cast<T>(iy + .5f) * bin_size_h / static_cast<T>(roi_bin_grid_h); // e.g., 0.5, 1.5
for (int ix = 0; ix < roi_bin_grid_w; ix++)
{
const T x = roi_start_w + pw * bin_size_w + static_cast<T>(ix + .5f) * bin_size_w / static_cast<T>(roi_bin_grid_w);
T val = bilinear_interpolate(offset_bottom_data, height, width, y, x, index);
output_val += val;
}
}
output_val /= count;
top_data[index] = output_val;
}
}
template <typename DType>
void RoIAlignLayerPlugin::forwardGpu( const DType* features,
const DType* rois,
const float spatial_scale,
const int pooled_height,
const int pooled_width,
const int sampling_ratio,
cudaStream_t stream,
const int num_rois,
const int channels,
const int height,
const int width,
DType* output) {
auto output_size = num_rois * pooled_height * pooled_width * channels;
// 841, 256, 7,7
// 131, 256, 7,7
// 27, 256, 7,7
// 1, 256, 7,7
// Concat -> 1000, 256, 7,7?
//cudaStream_t stream = at::cuda::getCurrentCUDAStream();
dim3 grid(GET_BLOCKS_COUNT_IN_GRID(output_size));//std::min(THCCeilDiv((long)output_size, 512L), 4096L));
dim3 block(CUDA_NUM_THREADS);
//if (output.numel() == 0) {
// cudaGetLastError();
// return output;
//}
RoIAlignForward<DType> << <grid, block, 0, stream >> > (output_size,
features,
spatial_scale, channels,
height, width,
pooled_height, pooled_width,
sampling_ratio,
rois,
output);// .data<scalar_t>());
cudaGetLastError();
return;
}
int RoIAlignLayerPlugin::enqueue(int batchSize,
const void*const * inputs,
void** outputs,
void* workspace, cudaStream_t stream){
assert(batchSize == 1);
switch (mDataType){
// bottom: "fpn_resXf_sum" <- Featuremap from Conv
// bottom: "rois_fpnX" <- RoIs
case DataType::kFLOAT:
forwardGpu<float>((const float*)inputs[0],
(const float*)inputs[1],
spatial_scale,
pooled_height,
pooled_width,
sampling_ratio,
stream,
mRois_H,
mFeatureMap_C,
mFeatureMap_H,
mFeatureMap_W,
(float*)outputs[0]);
break;
/*
case DataType::kHALF:
forwardGpu<__half>((const __half*)inputs[0],
(const __half*)inputs[1],
spatial_scale,
pooled_height,
pooled_width,
sampling_ratio,
stream,
mRois_H,
mFeatureMap_C,
mFeatureMap_H,
mFeatureMap_W,
(__half*)outputs[0]);
break;*/
case DataType::kINT8:
forwardGpu<u_int8_t>((const u_int8_t*)inputs[0],
(const u_int8_t*)inputs[1],
spatial_scale,
pooled_height,
pooled_width,
sampling_ratio,
stream,
mRois_H,
mFeatureMap_C,
mFeatureMap_H,
mFeatureMap_W,
(u_int8_t*)outputs[0]);
/*
const DType * features,
const DType * rois,
const float spatial_scale,
const int pooled_height,
const int pooled_width,
const int sampling_ratio,
cudaStream_t stream,
const int num_rois,
const int channels,
const int height,
const int width,
DType* output */
break;
default:
std::cerr << "error data type" << std::endl;
}
return 0;
};
}
|
d274118521e4b4c47deaf6a6643b85e1720c8374.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <algorithm>
#include <string>
#include <cstdio>
#include <cstdlib>
//#include <ctime>
#include <thrust/extrema.h>
#include <thrust/device_vector.h>
//#include "../lib/cuPrintf.cu"
using namespace std;
typedef double TNum;
#define CSC(call) do { \
hipError_t e = call; \
if (e != hipSuccess) { \
fprintf(stderr, "CUDA Error in %s:%d: %s\n"\
, __FILE__, __LINE__, hipGetErrorString(e)); \
exit(0); \
} \
} while(0)
//#define EPS .0000001;
//const int32_t BLOCK_DIM = 32;
struct Comparator {
__host__ __device__ bool operator()(TNum a, TNum b) {
return a < b;
}
};
__constant__ int32_t SIZE_N[1];
__constant__ int32_t SIZE_M[1];
__constant__ int32_t SIZE_K[1];
struct Position {
int32_t Row;
int32_t Col;
};
#define IsCorrectPos(i, j, height, width) (i < height && j < width)
#define GetLinearPosition(i, j, height, width) (IsCorrectPos(i, j, height, width) ? \
(j * height + i) : -1)
__global__ void SwapRows(TNum *a, TNum *b, int32_t row1, int32_t row2, int32_t shift) {
int32_t begin = blockDim.x * blockIdx.x + threadIdx.x;
int32_t offset = gridDim.x * blockDim.x;
int32_t col;
TNum tmp;
for (col = begin + shift; col < *SIZE_M; col += offset) {
tmp = a[GetLinearPosition(row1, col, *SIZE_N, *SIZE_M)];
a[GetLinearPosition(row1, col, *SIZE_N, *SIZE_M)] = a[GetLinearPosition(row2, col, *SIZE_N, *SIZE_M)];
a[GetLinearPosition(row2, col, *SIZE_N, *SIZE_M)] = tmp;
}
for (col = begin; col < *SIZE_K; col += offset) {
tmp = b[GetLinearPosition(row1, col, *SIZE_N, *SIZE_K)];
b[GetLinearPosition(row1, col, *SIZE_N, *SIZE_K)] = b[GetLinearPosition(row2, col, *SIZE_N, *SIZE_K)];
b[GetLinearPosition(row2, col, *SIZE_N, *SIZE_K)] = tmp;
}
}
__global__ void Normalize(TNum *a, TNum *b, int32_t row, int32_t shift) {
if (!(abs(a[GetLinearPosition(row, shift, *SIZE_N, *SIZE_M)]) > .0000001)) {
return;
}
int32_t begin = blockDim.x * blockIdx.x + threadIdx.x;
int32_t offset = gridDim.x * blockDim.x;
int32_t col;
for (col = begin + shift + 1; col < *SIZE_M; col += offset) {
a[GetLinearPosition(row, col, *SIZE_N, *SIZE_M)] /=
a[GetLinearPosition(row, shift, *SIZE_N, *SIZE_M)];
}
for (col = begin; col < *SIZE_K; col += offset) {
b[GetLinearPosition(row, col, *SIZE_N, *SIZE_K)] /=
a[GetLinearPosition(row, shift, *SIZE_N, *SIZE_M)];
}
}
__global__ void GaussFirst(TNum *a, TNum *b, int32_t row, int32_t shift) {
if (!(abs(a[GetLinearPosition(row, shift, *SIZE_N, *SIZE_M)]) > .0000001)) {
return;
}
/*Position begin = SetPosition(blockDim.x * blockIdx.x + threadIdx.x,
blockDim.y * blockIdx.y + threadIdx.y);
Position offset = SetPosition(blockDim.x * gridDim.x, blockDim.y * gridDim.y);*/
//Position curr = begin;
int32_t beginRow = blockDim.x * blockIdx.x + threadIdx.x;
int32_t beginCol = blockDim.y * blockIdx.y + threadIdx.y;
int32_t offsetRow = blockDim.x * gridDim.x;
int32_t offsetCol = blockDim.y * gridDim.y;
Position curr;
//TNum head;
for (curr.Row = beginRow + row + 1; curr.Row < *SIZE_N; curr.Row += offsetRow) {
//head = a[GetLinearPosition(curr.Row, shift, *SIZE_N, *SIZE_M)];
if (!(abs(a[GetLinearPosition(curr.Row, shift, *SIZE_N, *SIZE_M)]) > .0000001)) {
continue;
}
for (curr.Col = beginCol + shift + 1; curr.Col < *SIZE_M; curr.Col += offsetCol) {
//cuPrintf("\nA\n");
a[GetLinearPosition(curr.Row, curr.Col, *SIZE_N, *SIZE_M)] -=
a[GetLinearPosition(row, curr.Col, *SIZE_N, *SIZE_M)] *
a[GetLinearPosition(curr.Row, shift, *SIZE_N, *SIZE_M)];
}
for (curr.Col = beginCol; curr.Col < *SIZE_K; curr.Col += offsetCol) {
//cuPrintf("\nB\n");
b[GetLinearPosition(curr.Row, curr.Col, *SIZE_N, *SIZE_K)] -=
b[GetLinearPosition(row, curr.Col, *SIZE_N, *SIZE_K)] *
a[GetLinearPosition(curr.Row, shift, *SIZE_N, *SIZE_M)];
}
//cuPrintf("\nMAX = %ld\n", max(*SIZE_M, *SIZE_K));
/*for (curr.Col = beginCol; curr.Col < max(*SIZE_M - shift - 1, *SIZE_K); curr.Col += offsetCol) {
//cuPrintf("\nSTEP %d\n", curr.Col);
//cuPrintf("%d >= %d + %d + 1 && %d < %d\n", curr.Col, beginCol, shift, curr.Col, *SIZE_M);
if (curr.Col < *SIZE_M - shift - 1) {
//cuPrintf("\nA\n");
a[GetLinearPosition(curr.Row, (curr.Col + shift + 1), *SIZE_N, *SIZE_M)] -=
a[GetLinearPosition(row, (curr.Col + shift + 1), *SIZE_N, *SIZE_M)] *
a[GetLinearPosition(curr.Row, shift, *SIZE_N, *SIZE_M)];
}
if (curr.Col < *SIZE_K) {
//cuPrintf("\nB\n");
b[GetLinearPosition(curr.Row, curr.Col, *SIZE_N, *SIZE_K)] -=
b[GetLinearPosition(row, curr.Col, *SIZE_N, *SIZE_K)] *
a[GetLinearPosition(curr.Row, shift, *SIZE_N, *SIZE_M)];
}
}*/
}
}
__global__ void GaussSecond(TNum *a, TNum *b, int32_t row, int32_t shift) {
/*Position begin = SetPosition(blockDim.x * blockIdx.x + threadIdx.x,
blockDim.y * blockIdx.y + threadIdx.y);
Position offset = SetPosition(blockDim.x * gridDim.x, blockDim.y * gridDim.y);*/
int32_t beginRow = blockDim.x * blockIdx.x + threadIdx.x;
int32_t beginCol = blockDim.y * blockIdx.y + threadIdx.y;
int32_t offsetRow = blockDim.x * gridDim.x;
int32_t offsetCol = blockDim.y * gridDim.y;
Position curr;
for (curr.Row = row - 1 - beginRow; curr.Row >= 0; curr.Row -= offsetRow) {
/*for (curr.Col = begin.Col + shift; curr.Col < *SIZE_M; curr.Col += offset.Col) {
a[GetLinearPosition(curr.Row, curr.Col, *SIZE_N, *SIZE_M)] -=
a[GetLinearPosition(row, curr.Col, *SIZE_N, *SIZE_M)] *
a[GetLinearPosition(curr.Row, shift, *SIZE_N, *SIZE_M)];
}*/
for (curr.Col = beginCol; curr.Col < *SIZE_K; curr.Col += offsetCol) {
b[GetLinearPosition(curr.Row, curr.Col, *SIZE_N, *SIZE_K)] -=
b[GetLinearPosition(row, curr.Col, *SIZE_N, *SIZE_K)] *
a[GetLinearPosition(curr.Row, shift, *SIZE_N, *SIZE_M)];
}
}
}
/*__host__ void GaussSecondCPU(TNum *a, TNum *b, int32_t row, int32_t shift) {
Position curr;
for (curr.Row = row - 1; curr.Row >= 0; curr.Row--) {
for (curr.Col = shift; curr.Col >= 0; curr.Col -= offset.Col) {
a[GetLinearPosition(curr.Row, curr.Col, *SIZE_N, *SIZE_M)] -=
a[GetLinearPosition(row, curr.Col, *SIZE_N, *SIZE_M)] *
a[GetLinearPosition(curr.Row, shift, *SIZE_N, *SIZE_M)];
}
for (curr.Col = begin.Col; curr.Col >= 0; curr.Col -= offset.Col) {
b[GetLinearPosition(curr.Row, curr.Col, *SIZE_N, *SIZE_K)] -=
b[GetLinearPosition(row, curr.Col, *SIZE_N, *SIZE_K)] *
a[GetLinearPosition(curr.Row, shift, *SIZE_N, *SIZE_M)];
}
}
}*/
__host__ void InputMatrix(TNum *matrix, int32_t height, int32_t width) {
for (int32_t i = 0; i < height; i++) {
for (int32_t j = 0; j < width; j++) {
//cin >> matrix[GetLinearPosition(i, j, height, width)];
scanf("%le", matrix + GetLinearPosition(i, j, height, width));
}
}
}
__host__ void PrintMatrix(TNum *matrix, int32_t height, int32_t width) {
for (int32_t i = 0; i < height; i++) {
for (int32_t j = 0; j < width; j++) {
if (j > 0) {
//cout << " ";
printf(" ");
}
//cout << scientific << matrix[GetLinearPosition(i, j, height, width)];
printf("%e", matrix[GetLinearPosition(i, j, height, width)]);
}
cout << endl;
}
}
__host__ int main(void) {
Comparator cmp;
int32_t n, m, k;
//cin >> n >> m >> k;
//scanf("%d%d%d", &n, &m, &k);
scanf("%d", &n);
scanf("%d", &m);
scanf("%d", &k);
///cout << n << " " << m << " " << k << endl;
CSC(hipMemcpyToSymbol(SIZE_N, &n, sizeof(int32_t)));
CSC(hipMemcpyToSymbol(SIZE_M, &m, sizeof(int32_t)));
CSC(hipMemcpyToSymbol(SIZE_K, &k, sizeof(int32_t)));
TNum *a = new TNum[n * m];
TNum *b = new TNum[n * k];
//bool *is_success = new bool;
InputMatrix(a, n, m);
InputMatrix(b, n, k);
TNum *cuda_a;
TNum *cuda_b;
//bool *cuda_is_success;
CSC(hipMalloc((void**) &cuda_a, sizeof(TNum) * n * m));
CSC(hipMalloc((void**) &cuda_b, sizeof(TNum) * n * k));
//CSC(hipMalloc((void**) &cuda_is_success, sizeof(bool)));
CSC(hipMemcpy(cuda_a, a, sizeof(TNum) * n * m, hipMemcpyHostToDevice));
CSC(hipMemcpy(cuda_b, b, sizeof(TNum) * n * k, hipMemcpyHostToDevice));
int32_t row = 0;
int32_t *shifts = new int32_t[n];
//cudaPrintfInit();
memset(shifts, 0, n * sizeof(int32_t));
/*dim3 threads_per_block(n, m);
dim3 blocks_per_grid(1, 1);
if (n * m > BLOCK_DIM * BLOCK_DIM){
threads_per_block.x = BLOCK_DIM;
threads_per_block.y = BLOCK_DIM;
blocks_per_grid.x = ceil((double) (n) / (double)(threads_per_block.x));
blocks_per_grid.y = ceil((double) (m) / (double)(threads_per_block.y));
}*/
for (int32_t col = 0; col < m && row < n; col++) {
/*CSC(hipMemcpy(a, cuda_a, sizeof(TNum) * n * m, hipMemcpyDeviceToHost));
CSC(hipMemcpy(b, cuda_b, sizeof(TNum) * n * k, hipMemcpyDeviceToHost));
PrintMatrix(a, n, m);
cout << "---" << endl;
PrintMatrix(b, n, k);
cout << "___" << endl;*/
if (row < n - 1) {
thrust::device_ptr <TNum> cuda_a_begin = thrust::device_pointer_cast(cuda_a);
thrust::device_ptr <TNum> cuda_a_max = thrust::max_element(
cuda_a_begin + GetLinearPosition(row, col, n, m),
cuda_a_begin + (col + 1) * n, cmp);
int32_t row_max_pos = cuda_a_max - cuda_a_begin - GetLinearPosition(0, col, n, m);
//TNum row_value, max_value;
//cout << sizeof(TNum) << endl;
//cout << cuda_a << " : " << cuda_a + n * m * sizeof(TNum) << endl;
//cout << cuda_a + sizeof(TNum) * GetLinearPosition(row, col, n, m) << " : " <<
//cuda_a + sizeof(TNum) * GetLinearPosition(row_max_pos, col, n, m) << endl;
/*CSC(hipMemcpy(&row_value, cuda_a + GetLinearPosition(row, col, n, m),
sizeof(TNum), hipMemcpyDeviceToHost));
CSC(hipMemcpy(&max_value, cuda_a + GetLinearPosition(row_max_pos, col, n, m),
sizeof(TNum), hipMemcpyDeviceToHost));
TNum curr = row_value;*/
//cout << curr << " : " << max_value << endl;
if (row_max_pos != row) {
hipLaunchKernelGGL(( SwapRows), dim3(dim3(1024)), dim3(dim3(1024)), 0, 0, cuda_a, cuda_b, row, row_max_pos, col);
//curr = max_value;
}
/*if (!(abs(curr) > .0000001)) {
//cout << "CURR = " << curr << endl;
//cout << "OUT1" << endl;
continue;
}*/
}/* else {
TNum curr;
//cout << GetLinearPosition(row, col, n, m) << endl;
//cout << row << ":" << col << endl;
CSC(hipMemcpy(&curr, cuda_a + GetLinearPosition(row, col, n, m),
sizeof(TNum), hipMemcpyDeviceToHost));
if (!(abs(curr) > .0000001)) {
//cout << "OUT2" << endl;
continue;
}
}*/
/*CSC(hipMemcpy(a, cuda_a, sizeof(TNum) * n * m, hipMemcpyDeviceToHost));
CSC(hipMemcpy(b, cuda_b, sizeof(TNum) * n * k, hipMemcpyDeviceToHost));
cout << "Col: " << col << endl;
PrintMatrix(a, n, m);
cout << "---" << endl;
PrintMatrix(b, n, k);
cout << "~~~" << endl;*/
//cudaPrintfInit();
hipLaunchKernelGGL(( Normalize), dim3(dim3(1024)), dim3(dim3(1024)), 0, 0, cuda_a, cuda_b, row, col);
//bool is_success;
TNum curr;
CSC(hipMemcpy(&curr, cuda_a + GetLinearPosition(row, col, n, m),
sizeof(TNum), hipMemcpyDeviceToHost));
if (!(abs(curr) > .0000001)) {
//cout << "OUT2" << endl;
continue;
}
//cout << (*is_success ? "true" : "false") << endl;
//cudaPrintfDisplay(stdout, true);
//cudaPrintfEnd();
/*CSC(hipMemcpy(a, cuda_a, sizeof(TNum) * n * m, hipMemcpyDeviceToHost));
CSC(hipMemcpy(b, cuda_b, sizeof(TNum) * n * k, hipMemcpyDeviceToHost));
PrintMatrix(a, n, m);
cout << "---" << endl;
PrintMatrix(b, n, k);
cout << "+++" << endl;*/
if (row < n - 1) {
hipLaunchKernelGGL(( GaussFirst), dim3(dim3(32, 32)), dim3(dim3(32, 32)), 0, 0, cuda_a, cuda_b, row, col);
}
//cout << shifts[row] << " -> " << col << endl;
shifts[row] = col;
row++;
/*CSC(hipMemcpy(a, cuda_a, sizeof(TNum) * n * m, hipMemcpyDeviceToHost));
CSC(hipMemcpy(b, cuda_b, sizeof(TNum) * n * k, hipMemcpyDeviceToHost));
PrintMatrix(a, n, m);
cout << "---" << endl;
PrintMatrix(b, n, k);
cout << "===" << endl << endl;*/
}
/*cout << "NEXT!!" << endl;
CSC(hipMemcpy(a, cuda_a, sizeof(TNum) * n * m, hipMemcpyDeviceToHost));
CSC(hipMemcpy(b, cuda_b, sizeof(TNum) * n * k, hipMemcpyDeviceToHost));
PrintMatrix(a, n, m);
cout << "---" << endl;
PrintMatrix(b, n, k);
cout << "===" << endl << endl;*/
for (int32_t row_curr = row - 1; row_curr >= 0; row_curr--) {
if (row_curr > 0) {
hipLaunchKernelGGL(( GaussSecond), dim3(dim3(32, 32)), dim3(dim3(32, 32)), 0, 0, cuda_a, cuda_b, row_curr, shifts[row_curr]);
}
/*CSC(hipMemcpy(a, cuda_a, sizeof(TNum) * n * m, hipMemcpyDeviceToHost));
CSC(hipMemcpy(b, cuda_b, sizeof(TNum) * n * k, hipMemcpyDeviceToHost));
PrintMatrix(a, n, m);
cout << "---" << endl;
PrintMatrix(b, n, k);
cout << "===" << endl << endl;*/
}
//int32_t *cuda_shifts;
//hipMalloc((void**) &cuda_shifts, sizeof(int32_t) * row);
//hipMemcpy(cuda_shifts, shifts, sizeof(int32_t) * row, hipMemcpyHostToDevice);
//GetResult<<<dim3(32, 32), dim3(32, 32)>>>(cuda_b, cuda_x, cuda_shifts, row, );
//cudaPrintfDisplay(stdout, true);
//cudaPrintfEnd();
/*hipEvent_t syncEvent;
hipEventCreate(&syncEvent);
hipEventRecord(syncEvent, 0);
hipEventSynchronize(syncEvent);
hipEventDestroy(syncEvent);*/
//Calculating end
CSC(hipMemcpy(b, cuda_b, sizeof(TNum) * n * k, hipMemcpyDeviceToHost));
CSC(hipFree(cuda_a));
CSC(hipFree(cuda_b));
//hipFree(cuda_x);
//PrintMatrix(cuda_b, shifts, m, k);
TNum zero = 0.;
int32_t untill = 0;
if (row > 0) {
untill = shifts[0];
}
int32_t rows_cnt = 0;
for (int32_t i = 0; i < untill; i++) {
for (int32_t j = 0; j < k; j++) {
//cout << "1: " << shifts[0] << "::" << i << ":" << j << endl;
if (j > 0) {
//cout << " ";
printf(" ");
}
//cout << scientific << zero;
printf("%e", zero);
}
rows_cnt++;
//cout << endl;
printf("\n");
}
//cout << row << endl;
for (int32_t i = 0; i < row; i++) {
if (i > 0) {
for (int32_t ii = 0; ii < shifts[i] - shifts[i - 1] - 1; ii++) {
for (int32_t j = 0; j < k; j++) {
if (j > 0) {
//cout << " ";
printf(" ");
}
//cout << "2: " << i << ":" << j << endl;
//cout << scientific << zero;
printf("%e", zero);
}
rows_cnt++;
//cout << endl;
printf("\n");
}
}
for (int32_t j = 0; j < k; j++) {
if (j > 0) {
//cout << " ";
printf(" ");
}
//cout << "3: " << i << ":" << j << endl;
//cout << scientific << b[GetLinearPosition(i, j, n, k)];
printf("%e", b[GetLinearPosition(i, j, n, k)]);
}
rows_cnt++;
//cout << endl;
printf("\n");
}
//cout << "TEST0" << endl;
//cout << shifts[0] << endl;
//untill = m - shifts[max(0, (int32_t) row - 1)];
for (int32_t i = 0; i < m - rows_cnt; i++) {
for (int32_t j = 0; j < k; j++) {
if (j > 0) {
//cout << " ";
printf(" ");
}
//cout << "4: " << i << ":" << j << endl;
//cout << scientific << zero;
printf("%e", zero);
}
//cout << endl;
printf("\n");
}
//cout << "TEST1" << endl;
/*cout << "SHIFTS:\n";
for (int32_t i = 0; i < row; i++) {
cout << shifts[i] << endl;
}*/
delete [] shifts;
delete [] a;
delete [] b;
//delete [] cuda_x;
return 0;
} | d274118521e4b4c47deaf6a6643b85e1720c8374.cu | #include <iostream>
#include <algorithm>
#include <string>
#include <cstdio>
#include <cstdlib>
//#include <ctime>
#include <thrust/extrema.h>
#include <thrust/device_vector.h>
//#include "../lib/cuPrintf.cu"
using namespace std;
typedef double TNum;
#define CSC(call) do { \
cudaError_t e = call; \
if (e != cudaSuccess) { \
fprintf(stderr, "CUDA Error in %s:%d: %s\n"\
, __FILE__, __LINE__, cudaGetErrorString(e)); \
exit(0); \
} \
} while(0)
//#define EPS .0000001;
//const int32_t BLOCK_DIM = 32;
struct Comparator {
__host__ __device__ bool operator()(TNum a, TNum b) {
return a < b;
}
};
__constant__ int32_t SIZE_N[1];
__constant__ int32_t SIZE_M[1];
__constant__ int32_t SIZE_K[1];
struct Position {
int32_t Row;
int32_t Col;
};
#define IsCorrectPos(i, j, height, width) (i < height && j < width)
#define GetLinearPosition(i, j, height, width) (IsCorrectPos(i, j, height, width) ? \
(j * height + i) : -1)
__global__ void SwapRows(TNum *a, TNum *b, int32_t row1, int32_t row2, int32_t shift) {
int32_t begin = blockDim.x * blockIdx.x + threadIdx.x;
int32_t offset = gridDim.x * blockDim.x;
int32_t col;
TNum tmp;
for (col = begin + shift; col < *SIZE_M; col += offset) {
tmp = a[GetLinearPosition(row1, col, *SIZE_N, *SIZE_M)];
a[GetLinearPosition(row1, col, *SIZE_N, *SIZE_M)] = a[GetLinearPosition(row2, col, *SIZE_N, *SIZE_M)];
a[GetLinearPosition(row2, col, *SIZE_N, *SIZE_M)] = tmp;
}
for (col = begin; col < *SIZE_K; col += offset) {
tmp = b[GetLinearPosition(row1, col, *SIZE_N, *SIZE_K)];
b[GetLinearPosition(row1, col, *SIZE_N, *SIZE_K)] = b[GetLinearPosition(row2, col, *SIZE_N, *SIZE_K)];
b[GetLinearPosition(row2, col, *SIZE_N, *SIZE_K)] = tmp;
}
}
__global__ void Normalize(TNum *a, TNum *b, int32_t row, int32_t shift) {
if (!(abs(a[GetLinearPosition(row, shift, *SIZE_N, *SIZE_M)]) > .0000001)) {
return;
}
int32_t begin = blockDim.x * blockIdx.x + threadIdx.x;
int32_t offset = gridDim.x * blockDim.x;
int32_t col;
for (col = begin + shift + 1; col < *SIZE_M; col += offset) {
a[GetLinearPosition(row, col, *SIZE_N, *SIZE_M)] /=
a[GetLinearPosition(row, shift, *SIZE_N, *SIZE_M)];
}
for (col = begin; col < *SIZE_K; col += offset) {
b[GetLinearPosition(row, col, *SIZE_N, *SIZE_K)] /=
a[GetLinearPosition(row, shift, *SIZE_N, *SIZE_M)];
}
}
__global__ void GaussFirst(TNum *a, TNum *b, int32_t row, int32_t shift) {
if (!(abs(a[GetLinearPosition(row, shift, *SIZE_N, *SIZE_M)]) > .0000001)) {
return;
}
/*Position begin = SetPosition(blockDim.x * blockIdx.x + threadIdx.x,
blockDim.y * blockIdx.y + threadIdx.y);
Position offset = SetPosition(blockDim.x * gridDim.x, blockDim.y * gridDim.y);*/
//Position curr = begin;
int32_t beginRow = blockDim.x * blockIdx.x + threadIdx.x;
int32_t beginCol = blockDim.y * blockIdx.y + threadIdx.y;
int32_t offsetRow = blockDim.x * gridDim.x;
int32_t offsetCol = blockDim.y * gridDim.y;
Position curr;
//TNum head;
for (curr.Row = beginRow + row + 1; curr.Row < *SIZE_N; curr.Row += offsetRow) {
//head = a[GetLinearPosition(curr.Row, shift, *SIZE_N, *SIZE_M)];
if (!(abs(a[GetLinearPosition(curr.Row, shift, *SIZE_N, *SIZE_M)]) > .0000001)) {
continue;
}
for (curr.Col = beginCol + shift + 1; curr.Col < *SIZE_M; curr.Col += offsetCol) {
//cuPrintf("\nA\n");
a[GetLinearPosition(curr.Row, curr.Col, *SIZE_N, *SIZE_M)] -=
a[GetLinearPosition(row, curr.Col, *SIZE_N, *SIZE_M)] *
a[GetLinearPosition(curr.Row, shift, *SIZE_N, *SIZE_M)];
}
for (curr.Col = beginCol; curr.Col < *SIZE_K; curr.Col += offsetCol) {
//cuPrintf("\nB\n");
b[GetLinearPosition(curr.Row, curr.Col, *SIZE_N, *SIZE_K)] -=
b[GetLinearPosition(row, curr.Col, *SIZE_N, *SIZE_K)] *
a[GetLinearPosition(curr.Row, shift, *SIZE_N, *SIZE_M)];
}
//cuPrintf("\nMAX = %ld\n", max(*SIZE_M, *SIZE_K));
/*for (curr.Col = beginCol; curr.Col < max(*SIZE_M - shift - 1, *SIZE_K); curr.Col += offsetCol) {
//cuPrintf("\nSTEP %d\n", curr.Col);
//cuPrintf("%d >= %d + %d + 1 && %d < %d\n", curr.Col, beginCol, shift, curr.Col, *SIZE_M);
if (curr.Col < *SIZE_M - shift - 1) {
//cuPrintf("\nA\n");
a[GetLinearPosition(curr.Row, (curr.Col + shift + 1), *SIZE_N, *SIZE_M)] -=
a[GetLinearPosition(row, (curr.Col + shift + 1), *SIZE_N, *SIZE_M)] *
a[GetLinearPosition(curr.Row, shift, *SIZE_N, *SIZE_M)];
}
if (curr.Col < *SIZE_K) {
//cuPrintf("\nB\n");
b[GetLinearPosition(curr.Row, curr.Col, *SIZE_N, *SIZE_K)] -=
b[GetLinearPosition(row, curr.Col, *SIZE_N, *SIZE_K)] *
a[GetLinearPosition(curr.Row, shift, *SIZE_N, *SIZE_M)];
}
}*/
}
}
__global__ void GaussSecond(TNum *a, TNum *b, int32_t row, int32_t shift) {
/*Position begin = SetPosition(blockDim.x * blockIdx.x + threadIdx.x,
blockDim.y * blockIdx.y + threadIdx.y);
Position offset = SetPosition(blockDim.x * gridDim.x, blockDim.y * gridDim.y);*/
int32_t beginRow = blockDim.x * blockIdx.x + threadIdx.x;
int32_t beginCol = blockDim.y * blockIdx.y + threadIdx.y;
int32_t offsetRow = blockDim.x * gridDim.x;
int32_t offsetCol = blockDim.y * gridDim.y;
Position curr;
for (curr.Row = row - 1 - beginRow; curr.Row >= 0; curr.Row -= offsetRow) {
/*for (curr.Col = begin.Col + shift; curr.Col < *SIZE_M; curr.Col += offset.Col) {
a[GetLinearPosition(curr.Row, curr.Col, *SIZE_N, *SIZE_M)] -=
a[GetLinearPosition(row, curr.Col, *SIZE_N, *SIZE_M)] *
a[GetLinearPosition(curr.Row, shift, *SIZE_N, *SIZE_M)];
}*/
for (curr.Col = beginCol; curr.Col < *SIZE_K; curr.Col += offsetCol) {
b[GetLinearPosition(curr.Row, curr.Col, *SIZE_N, *SIZE_K)] -=
b[GetLinearPosition(row, curr.Col, *SIZE_N, *SIZE_K)] *
a[GetLinearPosition(curr.Row, shift, *SIZE_N, *SIZE_M)];
}
}
}
/*__host__ void GaussSecondCPU(TNum *a, TNum *b, int32_t row, int32_t shift) {
Position curr;
for (curr.Row = row - 1; curr.Row >= 0; curr.Row--) {
for (curr.Col = shift; curr.Col >= 0; curr.Col -= offset.Col) {
a[GetLinearPosition(curr.Row, curr.Col, *SIZE_N, *SIZE_M)] -=
a[GetLinearPosition(row, curr.Col, *SIZE_N, *SIZE_M)] *
a[GetLinearPosition(curr.Row, shift, *SIZE_N, *SIZE_M)];
}
for (curr.Col = begin.Col; curr.Col >= 0; curr.Col -= offset.Col) {
b[GetLinearPosition(curr.Row, curr.Col, *SIZE_N, *SIZE_K)] -=
b[GetLinearPosition(row, curr.Col, *SIZE_N, *SIZE_K)] *
a[GetLinearPosition(curr.Row, shift, *SIZE_N, *SIZE_M)];
}
}
}*/
__host__ void InputMatrix(TNum *matrix, int32_t height, int32_t width) {
for (int32_t i = 0; i < height; i++) {
for (int32_t j = 0; j < width; j++) {
//cin >> matrix[GetLinearPosition(i, j, height, width)];
scanf("%le", matrix + GetLinearPosition(i, j, height, width));
}
}
}
__host__ void PrintMatrix(TNum *matrix, int32_t height, int32_t width) {
for (int32_t i = 0; i < height; i++) {
for (int32_t j = 0; j < width; j++) {
if (j > 0) {
//cout << " ";
printf(" ");
}
//cout << scientific << matrix[GetLinearPosition(i, j, height, width)];
printf("%e", matrix[GetLinearPosition(i, j, height, width)]);
}
cout << endl;
}
}
__host__ int main(void) {
Comparator cmp;
int32_t n, m, k;
//cin >> n >> m >> k;
//scanf("%d%d%d", &n, &m, &k);
scanf("%d", &n);
scanf("%d", &m);
scanf("%d", &k);
///cout << n << " " << m << " " << k << endl;
CSC(cudaMemcpyToSymbol(SIZE_N, &n, sizeof(int32_t)));
CSC(cudaMemcpyToSymbol(SIZE_M, &m, sizeof(int32_t)));
CSC(cudaMemcpyToSymbol(SIZE_K, &k, sizeof(int32_t)));
TNum *a = new TNum[n * m];
TNum *b = new TNum[n * k];
//bool *is_success = new bool;
InputMatrix(a, n, m);
InputMatrix(b, n, k);
TNum *cuda_a;
TNum *cuda_b;
//bool *cuda_is_success;
CSC(cudaMalloc((void**) &cuda_a, sizeof(TNum) * n * m));
CSC(cudaMalloc((void**) &cuda_b, sizeof(TNum) * n * k));
//CSC(cudaMalloc((void**) &cuda_is_success, sizeof(bool)));
CSC(cudaMemcpy(cuda_a, a, sizeof(TNum) * n * m, cudaMemcpyHostToDevice));
CSC(cudaMemcpy(cuda_b, b, sizeof(TNum) * n * k, cudaMemcpyHostToDevice));
int32_t row = 0;
int32_t *shifts = new int32_t[n];
//cudaPrintfInit();
memset(shifts, 0, n * sizeof(int32_t));
/*dim3 threads_per_block(n, m);
dim3 blocks_per_grid(1, 1);
if (n * m > BLOCK_DIM * BLOCK_DIM){
threads_per_block.x = BLOCK_DIM;
threads_per_block.y = BLOCK_DIM;
blocks_per_grid.x = ceil((double) (n) / (double)(threads_per_block.x));
blocks_per_grid.y = ceil((double) (m) / (double)(threads_per_block.y));
}*/
for (int32_t col = 0; col < m && row < n; col++) {
/*CSC(cudaMemcpy(a, cuda_a, sizeof(TNum) * n * m, cudaMemcpyDeviceToHost));
CSC(cudaMemcpy(b, cuda_b, sizeof(TNum) * n * k, cudaMemcpyDeviceToHost));
PrintMatrix(a, n, m);
cout << "---" << endl;
PrintMatrix(b, n, k);
cout << "___" << endl;*/
if (row < n - 1) {
thrust::device_ptr <TNum> cuda_a_begin = thrust::device_pointer_cast(cuda_a);
thrust::device_ptr <TNum> cuda_a_max = thrust::max_element(
cuda_a_begin + GetLinearPosition(row, col, n, m),
cuda_a_begin + (col + 1) * n, cmp);
int32_t row_max_pos = cuda_a_max - cuda_a_begin - GetLinearPosition(0, col, n, m);
//TNum row_value, max_value;
//cout << sizeof(TNum) << endl;
//cout << cuda_a << " : " << cuda_a + n * m * sizeof(TNum) << endl;
//cout << cuda_a + sizeof(TNum) * GetLinearPosition(row, col, n, m) << " : " <<
//cuda_a + sizeof(TNum) * GetLinearPosition(row_max_pos, col, n, m) << endl;
/*CSC(cudaMemcpy(&row_value, cuda_a + GetLinearPosition(row, col, n, m),
sizeof(TNum), cudaMemcpyDeviceToHost));
CSC(cudaMemcpy(&max_value, cuda_a + GetLinearPosition(row_max_pos, col, n, m),
sizeof(TNum), cudaMemcpyDeviceToHost));
TNum curr = row_value;*/
//cout << curr << " : " << max_value << endl;
if (row_max_pos != row) {
SwapRows<<<dim3(1024), dim3(1024)>>>(cuda_a, cuda_b, row, row_max_pos, col);
//curr = max_value;
}
/*if (!(abs(curr) > .0000001)) {
//cout << "CURR = " << curr << endl;
//cout << "OUT1" << endl;
continue;
}*/
}/* else {
TNum curr;
//cout << GetLinearPosition(row, col, n, m) << endl;
//cout << row << ":" << col << endl;
CSC(cudaMemcpy(&curr, cuda_a + GetLinearPosition(row, col, n, m),
sizeof(TNum), cudaMemcpyDeviceToHost));
if (!(abs(curr) > .0000001)) {
//cout << "OUT2" << endl;
continue;
}
}*/
/*CSC(cudaMemcpy(a, cuda_a, sizeof(TNum) * n * m, cudaMemcpyDeviceToHost));
CSC(cudaMemcpy(b, cuda_b, sizeof(TNum) * n * k, cudaMemcpyDeviceToHost));
cout << "Col: " << col << endl;
PrintMatrix(a, n, m);
cout << "---" << endl;
PrintMatrix(b, n, k);
cout << "~~~" << endl;*/
//cudaPrintfInit();
Normalize<<<dim3(1024), dim3(1024)>>>(cuda_a, cuda_b, row, col);
//bool is_success;
TNum curr;
CSC(cudaMemcpy(&curr, cuda_a + GetLinearPosition(row, col, n, m),
sizeof(TNum), cudaMemcpyDeviceToHost));
if (!(abs(curr) > .0000001)) {
//cout << "OUT2" << endl;
continue;
}
//cout << (*is_success ? "true" : "false") << endl;
//cudaPrintfDisplay(stdout, true);
//cudaPrintfEnd();
/*CSC(cudaMemcpy(a, cuda_a, sizeof(TNum) * n * m, cudaMemcpyDeviceToHost));
CSC(cudaMemcpy(b, cuda_b, sizeof(TNum) * n * k, cudaMemcpyDeviceToHost));
PrintMatrix(a, n, m);
cout << "---" << endl;
PrintMatrix(b, n, k);
cout << "+++" << endl;*/
if (row < n - 1) {
GaussFirst<<<dim3(32, 32), dim3(32, 32)>>>(cuda_a, cuda_b, row, col);
}
//cout << shifts[row] << " -> " << col << endl;
shifts[row] = col;
row++;
/*CSC(cudaMemcpy(a, cuda_a, sizeof(TNum) * n * m, cudaMemcpyDeviceToHost));
CSC(cudaMemcpy(b, cuda_b, sizeof(TNum) * n * k, cudaMemcpyDeviceToHost));
PrintMatrix(a, n, m);
cout << "---" << endl;
PrintMatrix(b, n, k);
cout << "===" << endl << endl;*/
}
/*cout << "NEXT!!" << endl;
CSC(cudaMemcpy(a, cuda_a, sizeof(TNum) * n * m, cudaMemcpyDeviceToHost));
CSC(cudaMemcpy(b, cuda_b, sizeof(TNum) * n * k, cudaMemcpyDeviceToHost));
PrintMatrix(a, n, m);
cout << "---" << endl;
PrintMatrix(b, n, k);
cout << "===" << endl << endl;*/
for (int32_t row_curr = row - 1; row_curr >= 0; row_curr--) {
if (row_curr > 0) {
GaussSecond<<<dim3(32, 32), dim3(32, 32)>>>(cuda_a, cuda_b, row_curr, shifts[row_curr]);
}
/*CSC(cudaMemcpy(a, cuda_a, sizeof(TNum) * n * m, cudaMemcpyDeviceToHost));
CSC(cudaMemcpy(b, cuda_b, sizeof(TNum) * n * k, cudaMemcpyDeviceToHost));
PrintMatrix(a, n, m);
cout << "---" << endl;
PrintMatrix(b, n, k);
cout << "===" << endl << endl;*/
}
//int32_t *cuda_shifts;
//cudaMalloc((void**) &cuda_shifts, sizeof(int32_t) * row);
//cudaMemcpy(cuda_shifts, shifts, sizeof(int32_t) * row, cudaMemcpyHostToDevice);
//GetResult<<<dim3(32, 32), dim3(32, 32)>>>(cuda_b, cuda_x, cuda_shifts, row, );
//cudaPrintfDisplay(stdout, true);
//cudaPrintfEnd();
/*cudaEvent_t syncEvent;
cudaEventCreate(&syncEvent);
cudaEventRecord(syncEvent, 0);
cudaEventSynchronize(syncEvent);
cudaEventDestroy(syncEvent);*/
//Calculating end
CSC(cudaMemcpy(b, cuda_b, sizeof(TNum) * n * k, cudaMemcpyDeviceToHost));
CSC(cudaFree(cuda_a));
CSC(cudaFree(cuda_b));
//cudaFree(cuda_x);
//PrintMatrix(cuda_b, shifts, m, k);
TNum zero = 0.;
int32_t untill = 0;
if (row > 0) {
untill = shifts[0];
}
int32_t rows_cnt = 0;
for (int32_t i = 0; i < untill; i++) {
for (int32_t j = 0; j < k; j++) {
//cout << "1: " << shifts[0] << "::" << i << ":" << j << endl;
if (j > 0) {
//cout << " ";
printf(" ");
}
//cout << scientific << zero;
printf("%e", zero);
}
rows_cnt++;
//cout << endl;
printf("\n");
}
//cout << row << endl;
for (int32_t i = 0; i < row; i++) {
if (i > 0) {
for (int32_t ii = 0; ii < shifts[i] - shifts[i - 1] - 1; ii++) {
for (int32_t j = 0; j < k; j++) {
if (j > 0) {
//cout << " ";
printf(" ");
}
//cout << "2: " << i << ":" << j << endl;
//cout << scientific << zero;
printf("%e", zero);
}
rows_cnt++;
//cout << endl;
printf("\n");
}
}
for (int32_t j = 0; j < k; j++) {
if (j > 0) {
//cout << " ";
printf(" ");
}
//cout << "3: " << i << ":" << j << endl;
//cout << scientific << b[GetLinearPosition(i, j, n, k)];
printf("%e", b[GetLinearPosition(i, j, n, k)]);
}
rows_cnt++;
//cout << endl;
printf("\n");
}
//cout << "TEST0" << endl;
//cout << shifts[0] << endl;
//untill = m - shifts[max(0, (int32_t) row - 1)];
for (int32_t i = 0; i < m - rows_cnt; i++) {
for (int32_t j = 0; j < k; j++) {
if (j > 0) {
//cout << " ";
printf(" ");
}
//cout << "4: " << i << ":" << j << endl;
//cout << scientific << zero;
printf("%e", zero);
}
//cout << endl;
printf("\n");
}
//cout << "TEST1" << endl;
/*cout << "SHIFTS:\n";
for (int32_t i = 0; i < row; i++) {
cout << shifts[i] << endl;
}*/
delete [] shifts;
delete [] a;
delete [] b;
//delete [] cuda_x;
return 0;
} |
f93da9da1be4db1c662054d90c9506c5cc085d81.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifdef __CUDNN__
#include "NagOptimizer.hpp"
// template class NagOptimizer<int>;
template class NagOptimizer<float>;
// template class NagOptimizer<double>;
/*!
@brief .
@details UpdateParameterOnGPU .
@details 1 block thread .
@param pDevWeight GPU data.
@param pDevAccGradient gradient.
@param weightDim dimension.
@param signed_learning_rate Optimizer .
@param momentum step size .
@param weightDecayRate .
@param pDevVelocity pDevVelocity
@see int NagOptimizer<DTYPE>::UpdateParameterOnGPU(Operator<DTYPE> *pParameter, Tensor<DTYPE> *pVelocity)
*/
__global__ void NagUpdate_kernel(float *pDevWeight, float *pDevAccGradient, int weightDim, float signed_learning_rate, float momentum, float weightDecayRate, float *pDevVelocity) {
for (int idx = blockIdx.x * blockDim.x + threadIdx.x; idx < weightDim; idx += blockDim.x * gridDim.x) {
float g = pDevAccGradient[idx];
float pre_velo = pDevVelocity[idx];
pDevVelocity[idx] = (momentum * pDevVelocity[idx]) + (signed_learning_rate * g);
pDevWeight[idx] += signed_learning_rate * weightDecayRate * pDevWeight[idx];
pDevWeight[idx] += -momentum * pre_velo + ((1.f + momentum) * pDevVelocity[idx]);
pDevAccGradient[idx] = 0.F;
}
}
/*!
@brief NagOptimizer UpdateParameterOnGPU .
@details GPU , .
@details noBlock GPU block
@details threadsPerBlock block thread
@details m_parameterDim dimension
@details m_pDevData, m_pDevGrad, m_pDevGradientSquared GPU GPU data. CPU data GetGPUData() GPU data
@see template<typename DTYPE> DTYPE *LongArray<DTYPE>::GetGPUData(unsigned int pTime)
@details AdagradUpdate_kernel . , , thread GPU .
@see __global__ void NagUpdate_kernel(float *pDevWeight, float *pDevAccGradient, int weightDim, float signed_learning_rate, float momentum, float weightDecayRate, float *pDevVelocity)
@param *pParameter Tensor Operator
@param pVelocity pVelocity
@return TRUE
*/
template<typename DTYPE> int NagOptimizer<DTYPE>::UpdateParameterOnGPU(Operator<DTYPE> *pParameter, Tensor<DTYPE> *pVelocity) {
int noBlock = 3, threadsPerBlock = 128;
int m_parameterDim = pParameter->GetResult()->GetCapacity();
GetKernelParameters(m_parameterDim, &noBlock, &threadsPerBlock);
float signed_learning_rate = this->GetOptimizeDirection() * this->GetLearningRate();
float weightDecayRate = this->GetWeightDecayRate();
Tensor<DTYPE> *trainable_data = pParameter->GetResult();
Tensor<DTYPE> *gradient = pParameter->GetGradient();
DTYPE *m_pDevData = trainable_data->GetGPUData();
DTYPE *m_pDevGrad = gradient->GetGPUData();
DTYPE *m_pDevVelocity = pVelocity->GetGPUData();
NagUpdate_kernel << < noBlock, threadsPerBlock >> > (m_pDevData, m_pDevGrad, m_parameterDim, signed_learning_rate, m_momentum, weightDecayRate, m_pDevVelocity);
return TRUE;
}
#endif // ifdef __CUDNN__
| f93da9da1be4db1c662054d90c9506c5cc085d81.cu | #ifdef __CUDNN__
#include "NagOptimizer.hpp"
// template class NagOptimizer<int>;
template class NagOptimizer<float>;
// template class NagOptimizer<double>;
/*!
@brief 파라미터 값들을 업데이트 하는 커널함수.
@details UpdateParameterOnGPU 생성자에서 호출되어 실행.
@details 1차원으로 배열 된 block과 thread에 접근하여 연산.
@param pDevWeight 업데이트 할 파라미터의 GPU data.
@param pDevAccGradient 업데이트 할 파라미터의 gradient.
@param weightDim 업데이트 할 파라미터의 dimension.
@param signed_learning_rate Optimizer의 학습률.
@param momentum step size 조정 값.
@param weightDecayRate 가중치 매개변수가 클 때 패널티를 부과하는 값.
@param pDevVelocity 업데이트 될 pDevVelocity
@see int NagOptimizer<DTYPE>::UpdateParameterOnGPU(Operator<DTYPE> *pParameter, Tensor<DTYPE> *pVelocity)
*/
__global__ void NagUpdate_kernel(float *pDevWeight, float *pDevAccGradient, int weightDim, float signed_learning_rate, float momentum, float weightDecayRate, float *pDevVelocity) {
for (int idx = blockIdx.x * blockDim.x + threadIdx.x; idx < weightDim; idx += blockDim.x * gridDim.x) {
float g = pDevAccGradient[idx];
float pre_velo = pDevVelocity[idx];
pDevVelocity[idx] = (momentum * pDevVelocity[idx]) + (signed_learning_rate * g);
pDevWeight[idx] += signed_learning_rate * weightDecayRate * pDevWeight[idx];
pDevWeight[idx] += -momentum * pre_velo + ((1.f + momentum) * pDevVelocity[idx]);
pDevAccGradient[idx] = 0.F;
}
}
/*!
@brief NagOptimizer UpdateParameterOnGPU 생성자.
@details GPU변수를 생성하고, 커널 함수를 실행한다.
@details noBlock는 GPU 연산시 사용되는 block의 수
@details threadsPerBlock는 한 block당 생성되는 thread 갯수
@details m_parameterDim는 업데이트 할 파라미터의 dimension
@details m_pDevData, m_pDevGrad, m_pDevGradientSquared는 GPU함수 연산에 수행되는 GPU data. 각 CPU data를 GetGPUData() 호출로 GPU data 생성
@see template<typename DTYPE> DTYPE *LongArray<DTYPE>::GetGPUData(unsigned int pTime)
@details AdagradUpdate_kernel 커널 함수를 호출. 커널함수이름, 블록 수, 블록당 thread 수와 GPU데이터를 다음과 같은 형식으로 호출.
@see __global__ void NagUpdate_kernel(float *pDevWeight, float *pDevAccGradient, int weightDim, float signed_learning_rate, float momentum, float weightDecayRate, float *pDevVelocity)
@param *pParameter 업데이트 할 Tensor를 가지고 있는 Operator포인터
@param pVelocity 업데이트할 pVelocity
@return 성공 시 TRUE
*/
template<typename DTYPE> int NagOptimizer<DTYPE>::UpdateParameterOnGPU(Operator<DTYPE> *pParameter, Tensor<DTYPE> *pVelocity) {
int noBlock = 3, threadsPerBlock = 128;
int m_parameterDim = pParameter->GetResult()->GetCapacity();
GetKernelParameters(m_parameterDim, &noBlock, &threadsPerBlock);
float signed_learning_rate = this->GetOptimizeDirection() * this->GetLearningRate();
float weightDecayRate = this->GetWeightDecayRate();
Tensor<DTYPE> *trainable_data = pParameter->GetResult();
Tensor<DTYPE> *gradient = pParameter->GetGradient();
DTYPE *m_pDevData = trainable_data->GetGPUData();
DTYPE *m_pDevGrad = gradient->GetGPUData();
DTYPE *m_pDevVelocity = pVelocity->GetGPUData();
NagUpdate_kernel << < noBlock, threadsPerBlock >> > (m_pDevData, m_pDevGrad, m_parameterDim, signed_learning_rate, m_momentum, weightDecayRate, m_pDevVelocity);
return TRUE;
}
#endif // ifdef __CUDNN__
|
7770a80b0580b18e2403275dc2ce5f21ea2dc164.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include <assert.h>
#include <inttypes.h>
#include <vector>
#include "chronoCPU.hpp"
#include "chronoGPU.hpp"
__global__ void addMatricesCUDA(float* a, const float* b, uint w, uint h) {
for(uint y = blockIdx.y * blockDim.y + threadIdx.y ; y < h ; y += gridDim.y * blockDim.y) {
for(uint x = blockIdx.x * blockDim.x + threadIdx.x ; x < w ; x += gridDim.x * blockDim.x) {
const uint i = y * w + x;
a[i] += b[i];
}
}
}
void addMatricesCPU(float* a, const float* b, uint w, uint h) {
for(uint y=0 ; y<h ; ++y) {
for(uint x=0 ; x<w ; ++x) {
const uint i = y * w + x;
a[i] += b[i];
}
}
}
bool eqMatrices(const float* cpu_m, const float* gpu_m, uint w, uint h) {
for(uint y=0 ; y<h ; ++y) {
for(uint x=0 ; x<w ; ++x) {
uint i = y*w + x;
if(fabsf(cpu_m[i] - gpu_m[i]) > 0.0001f) {
printf(
"Results do not match (at x=%u, y=%u): %f (CPU) vs. %f (GPU)\n",
x, y, cpu_m[i], gpu_m[i]
);
return false;
}
}
}
return true;
}
// Juste pratique pour aligner horizontalement les mesures
void printTimeMs(const char *s, float t) {
printf("%-24s : %f ms\n", s, t);
}
int main(int argc, char *argv[]) {
if(argc < 3) {
printf("Usage: %s <width> <height> [<nthreads_x> <nthreads_y>]\n", argv[0]);
return EXIT_FAILURE;
}
long long llw = strtoll(argv[1], NULL, 0);
long long llh = strtoll(argv[2], NULL, 0);
assert(llw >= 0 && llh >= 0); // Ca ne devrait pas vraiment tre un assert() ici mais bon
uint w = llw;
uint h = llh;
const size_t nbytes = w*h*sizeof(float);
// 16*16 = 256 threads/tile
// 32*32 = 1024 threads/tile
dim3 tile_size(32, 32);
if(argc >= 5) {
long long tx = strtoll(argv[3], NULL, 0);
long long ty = strtoll(argv[4], NULL, 0);
assert(tx >= 0 && ty >= 0);
tile_size = dim3(tx, ty);
}
dim3 n_tiles(
min(65535, (w + tile_size.x - 1) / tile_size.x),
min(65535, (h + tile_size.y - 1) / tile_size.y)
);
printf(
"Matrix size: %ux%u, Threads: %ux%u, Blocks: %ux%u\n",
w, h, tile_size.x, tile_size.y, n_tiles.x, n_tiles.y
);
ChronoGPU chrGPU;
ChronoCPU chrCPU;
std::vector<float> downloaded_dev_a(w*h);
chrCPU.start();
std::vector<float> a(w*h);
std::vector<float> b(w*h);
chrCPU.stop();
printTimeMs("CPU alloc (2 matrices)", chrCPU.elapsedTime());
float* dev_a = NULL;
float* dev_b = NULL;
chrGPU.start();
hipError_t status_a = hipMalloc((void**) &dev_a, nbytes);
hipError_t status_b = hipMalloc((void**) &dev_b, nbytes);
chrGPU.stop();
printTimeMs("GPU alloc (2 matrices)", chrGPU.elapsedTime());
assert(status_a == hipSuccess);
assert(status_b == hipSuccess);
srand(time(NULL));
for(uint y=0 ; y<h ; ++y) {
for(uint x=0 ; x<w ; ++x) {
uint i = y*w + x;
a[i] = (rand()%100) / 100.f;
b[i] = (rand()%100) / 100.f;
}
}
chrGPU.start();
hipMemcpy(dev_a, a.data(), nbytes, hipMemcpyHostToDevice);
hipMemcpy(dev_b, b.data(), nbytes, hipMemcpyHostToDevice);
chrGPU.stop();
printTimeMs("GPU upload (2 matrices)", chrGPU.elapsedTime());
chrCPU.start();
addMatricesCPU(a.data(), b.data(), w, h);
chrCPU.stop();
printTimeMs("CPU addMatrices", chrCPU.elapsedTime());
chrGPU.start();
hipLaunchKernelGGL(( addMatricesCUDA), dim3(n_tiles), dim3(tile_size), 0, 0, dev_a, dev_b, w, h);
chrGPU.stop();
printTimeMs("GPU addMatrices", chrGPU.elapsedTime());
chrGPU.start();
hipMemcpy(downloaded_dev_a.data(), dev_a, nbytes, hipMemcpyDeviceToHost);
chrGPU.stop();
printTimeMs("GPU download (1 matrix)", chrGPU.elapsedTime());
hipFree(dev_a);
hipFree(dev_b);
if(!eqMatrices(a.data(), downloaded_dev_a.data(), w, h)) {
puts("Failure: matrices don't match!");
return EXIT_FAILURE;
}
puts("Success: matrices do match!");
return EXIT_SUCCESS;
}
| 7770a80b0580b18e2403275dc2ce5f21ea2dc164.cu | #include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include <assert.h>
#include <inttypes.h>
#include <vector>
#include "chronoCPU.hpp"
#include "chronoGPU.hpp"
__global__ void addMatricesCUDA(float* a, const float* b, uint w, uint h) {
for(uint y = blockIdx.y * blockDim.y + threadIdx.y ; y < h ; y += gridDim.y * blockDim.y) {
for(uint x = blockIdx.x * blockDim.x + threadIdx.x ; x < w ; x += gridDim.x * blockDim.x) {
const uint i = y * w + x;
a[i] += b[i];
}
}
}
void addMatricesCPU(float* a, const float* b, uint w, uint h) {
for(uint y=0 ; y<h ; ++y) {
for(uint x=0 ; x<w ; ++x) {
const uint i = y * w + x;
a[i] += b[i];
}
}
}
bool eqMatrices(const float* cpu_m, const float* gpu_m, uint w, uint h) {
for(uint y=0 ; y<h ; ++y) {
for(uint x=0 ; x<w ; ++x) {
uint i = y*w + x;
if(fabsf(cpu_m[i] - gpu_m[i]) > 0.0001f) {
printf(
"Results do not match (at x=%u, y=%u): %f (CPU) vs. %f (GPU)\n",
x, y, cpu_m[i], gpu_m[i]
);
return false;
}
}
}
return true;
}
// Juste pratique pour aligner horizontalement les mesures
void printTimeMs(const char *s, float t) {
printf("%-24s : %f ms\n", s, t);
}
int main(int argc, char *argv[]) {
if(argc < 3) {
printf("Usage: %s <width> <height> [<nthreads_x> <nthreads_y>]\n", argv[0]);
return EXIT_FAILURE;
}
long long llw = strtoll(argv[1], NULL, 0);
long long llh = strtoll(argv[2], NULL, 0);
assert(llw >= 0 && llh >= 0); // Ca ne devrait pas vraiment être un assert() ici mais bon
uint w = llw;
uint h = llh;
const size_t nbytes = w*h*sizeof(float);
// 16*16 = 256 threads/tile
// 32*32 = 1024 threads/tile
dim3 tile_size(32, 32);
if(argc >= 5) {
long long tx = strtoll(argv[3], NULL, 0);
long long ty = strtoll(argv[4], NULL, 0);
assert(tx >= 0 && ty >= 0);
tile_size = dim3(tx, ty);
}
dim3 n_tiles(
min(65535, (w + tile_size.x - 1) / tile_size.x),
min(65535, (h + tile_size.y - 1) / tile_size.y)
);
printf(
"Matrix size: %ux%u, Threads: %ux%u, Blocks: %ux%u\n",
w, h, tile_size.x, tile_size.y, n_tiles.x, n_tiles.y
);
ChronoGPU chrGPU;
ChronoCPU chrCPU;
std::vector<float> downloaded_dev_a(w*h);
chrCPU.start();
std::vector<float> a(w*h);
std::vector<float> b(w*h);
chrCPU.stop();
printTimeMs("CPU alloc (2 matrices)", chrCPU.elapsedTime());
float* dev_a = NULL;
float* dev_b = NULL;
chrGPU.start();
cudaError status_a = cudaMalloc((void**) &dev_a, nbytes);
cudaError status_b = cudaMalloc((void**) &dev_b, nbytes);
chrGPU.stop();
printTimeMs("GPU alloc (2 matrices)", chrGPU.elapsedTime());
assert(status_a == cudaSuccess);
assert(status_b == cudaSuccess);
srand(time(NULL));
for(uint y=0 ; y<h ; ++y) {
for(uint x=0 ; x<w ; ++x) {
uint i = y*w + x;
a[i] = (rand()%100) / 100.f;
b[i] = (rand()%100) / 100.f;
}
}
chrGPU.start();
cudaMemcpy(dev_a, a.data(), nbytes, cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, b.data(), nbytes, cudaMemcpyHostToDevice);
chrGPU.stop();
printTimeMs("GPU upload (2 matrices)", chrGPU.elapsedTime());
chrCPU.start();
addMatricesCPU(a.data(), b.data(), w, h);
chrCPU.stop();
printTimeMs("CPU addMatrices", chrCPU.elapsedTime());
chrGPU.start();
addMatricesCUDA<<<n_tiles, tile_size>>>(dev_a, dev_b, w, h);
chrGPU.stop();
printTimeMs("GPU addMatrices", chrGPU.elapsedTime());
chrGPU.start();
cudaMemcpy(downloaded_dev_a.data(), dev_a, nbytes, cudaMemcpyDeviceToHost);
chrGPU.stop();
printTimeMs("GPU download (1 matrix)", chrGPU.elapsedTime());
cudaFree(dev_a);
cudaFree(dev_b);
if(!eqMatrices(a.data(), downloaded_dev_a.data(), w, h)) {
puts("Failure: matrices don't match!");
return EXIT_FAILURE;
}
puts("Success: matrices do match!");
return EXIT_SUCCESS;
}
|
5f905162b560f003b327407cabf28539a42ec819.hip | // !!! This is a file automatically generated by hipify!!!
/*
Faz a soma dos elementos de dois vetores
Exemplifica o uso de hipHostMalloc() para alocar memoria paginada no host e
o uso de hipHostFree para desalocar()
Para compilar: nvcc 01-soma-vet-pinned.cu -o 01-soma-vet-pinned
Para executar: ./01-soma-vet-pinned
OBS: os valores de tamanho do vetor e o conteudo do vetor
estao fixos no codigo
*/
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
__global__ void soma(int *vetorA, int *vetorB,int *vetorC,int tam)
{
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx < tam)
{
vetorC[idx]=vetorA[idx]+vetorB[idx];
}
}
int main(int argc,char **argv)
{
int i,*vetorA,*vetorB,*vetorC,threadsPerBlock,blocksPerGrid;
int *vetorA_d,*vetorB_d,*vetorC_d;
int tam= 16; // 5000;
//Define a quantidade de threads por bloco
threadsPerBlock = 256;
//Aloca os vetores no host
hipHostMalloc((void**)&vetorA,tam*(sizeof(int)));
hipHostMalloc((void**)&vetorB,tam*(sizeof(int)));
hipHostMalloc((void**)&vetorC,tam*(sizeof(int)));
//Aloca os vetores no device
hipMalloc((void**)&vetorA_d,tam*(sizeof(int)));
hipMalloc((void**)&vetorB_d,tam*(sizeof(int)));
hipMalloc((void**)&vetorC_d,tam*(sizeof(int)));
//Preenche os vetores no host
for(i=0;i<tam;i++)
{
vetorA[i] = i;
vetorB[i] = 0; //-i;
}
//Define a quantidade de blocos por grade
blocksPerGrid=(tam+threadsPerBlock-1)/threadsPerBlock;
//Copia o contedo dos vetores para o device
hipMemcpy(vetorA_d,vetorA,tam*(sizeof(int)), hipMemcpyHostToDevice);
hipMemcpy(vetorB_d,vetorB,tam*(sizeof(int)), hipMemcpyHostToDevice);
//Invoca o kernel com blocksPerGrid blocos e threadsPerBlock threads
hipLaunchKernelGGL(( soma) , dim3(blocksPerGrid),dim3(threadsPerBlock), 0, 0, vetorA_d,vetorB_d,vetorC_d,tam);
//Copia o resultado da soma de volta para o host
hipMemcpy(vetorC,vetorC_d,tam*(sizeof(int)), hipMemcpyDeviceToHost);
//Imprime o resultado no host
for(i=0;i<tam;i++)
{
printf("%d ",vetorC[i]);
}
printf("\n");
//Desaloca os vetores no host
hipHostFree(vetorA);
hipHostFree(vetorB);
hipHostFree(vetorC);
//Desaloca os vetores no device
hipFree(vetorA_d);
hipFree(vetorB_d);
hipFree(vetorC_d);
} | 5f905162b560f003b327407cabf28539a42ec819.cu | /*
Faz a soma dos elementos de dois vetores
Exemplifica o uso de cudaMallocHost() para alocar memoria paginada no host e
o uso de cudaFreeHost para desalocar()
Para compilar: nvcc 01-soma-vet-pinned.cu -o 01-soma-vet-pinned
Para executar: ./01-soma-vet-pinned
OBS: os valores de tamanho do vetor e o conteudo do vetor
estao fixos no codigo
*/
#include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
__global__ void soma(int *vetorA, int *vetorB,int *vetorC,int tam)
{
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx < tam)
{
vetorC[idx]=vetorA[idx]+vetorB[idx];
}
}
int main(int argc,char **argv)
{
int i,*vetorA,*vetorB,*vetorC,threadsPerBlock,blocksPerGrid;
int *vetorA_d,*vetorB_d,*vetorC_d;
int tam= 16; // 5000;
//Define a quantidade de threads por bloco
threadsPerBlock = 256;
//Aloca os vetores no host
cudaMallocHost((void**)&vetorA,tam*(sizeof(int)));
cudaMallocHost((void**)&vetorB,tam*(sizeof(int)));
cudaMallocHost((void**)&vetorC,tam*(sizeof(int)));
//Aloca os vetores no device
cudaMalloc((void**)&vetorA_d,tam*(sizeof(int)));
cudaMalloc((void**)&vetorB_d,tam*(sizeof(int)));
cudaMalloc((void**)&vetorC_d,tam*(sizeof(int)));
//Preenche os vetores no host
for(i=0;i<tam;i++)
{
vetorA[i] = i;
vetorB[i] = 0; //-i;
}
//Define a quantidade de blocos por grade
blocksPerGrid=(tam+threadsPerBlock-1)/threadsPerBlock;
//Copia o conteúdo dos vetores para o device
cudaMemcpy(vetorA_d,vetorA,tam*(sizeof(int)), cudaMemcpyHostToDevice);
cudaMemcpy(vetorB_d,vetorB,tam*(sizeof(int)), cudaMemcpyHostToDevice);
//Invoca o kernel com blocksPerGrid blocos e threadsPerBlock threads
soma <<<blocksPerGrid,threadsPerBlock>>> (vetorA_d,vetorB_d,vetorC_d,tam);
//Copia o resultado da soma de volta para o host
cudaMemcpy(vetorC,vetorC_d,tam*(sizeof(int)), cudaMemcpyDeviceToHost);
//Imprime o resultado no host
for(i=0;i<tam;i++)
{
printf("%d ",vetorC[i]);
}
printf("\n");
//Desaloca os vetores no host
cudaFreeHost(vetorA);
cudaFreeHost(vetorB);
cudaFreeHost(vetorC);
//Desaloca os vetores no device
cudaFree(vetorA_d);
cudaFree(vetorB_d);
cudaFree(vetorC_d);
} |
f1f1ce337c0464b06203b19f7248dfee84bf7e93.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <chrono>
#include <GL/glew.h>
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
#include <string>
#include <sstream>
#include "../Vector/Vector.h"
#include "../Point/Point.h"
#include "ParallelSteering.cuh"
#include "../Constant/Constant.h"
#pragma once
#ifdef NVCC
#define CU_LAUNCH(...) <<<__VA_ARGS__>>>
#else
#define CU_LAUNCH(...)
#define __launch_bounds__(...)
#define __syncwarp()
#define __syncthreads()
#define __any_sync() (0)
#define __all_sync() (0)
#define __ballot_sync() (0)
#endif
#pragma region Cuda error
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, const char *file, int line, bool abort = true)
{
if (code != hipSuccess)
{
fprintf(stderr, "GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
#pragma endregion
#pragma region Cuda Vector
__device__ struct CudaVector
{
public:
float X, Y;
__device__ CudaVector(float x, float y);
};
__device__ CudaVector::CudaVector(float x, float y)
{
this->X = x;
this->Y = y;
}
#pragma endregion
#pragma region Kernel
__global__ void MoveFish(Fish *fish, float mouseX, float mouseY)
{
int threadIndex = threadIdx.x;
int blockIndex = blockIdx.x;
threadIndex = (blockIndex * 1024) + threadIndex;
if (threadIndex < FISH_COUNT)
{
float fishPositionX = fish[threadIndex].Position.X;
float fishPositionY = fish[threadIndex].Position.Y;
float fishSize = fish[threadIndex].Settings.size;
CudaVector averageDirection = CudaVector(fish[threadIndex].Direction.X, fish[threadIndex].Direction.Y);
CudaVector groupingDirection = CudaVector(0, 0);
CudaVector antyCrowdingVector = CudaVector(0, 0);
#pragma region Steering
{
CudaVector groupingPoint = CudaVector(fishPositionX, fishPositionY);
int n = 1;
for (int i = 0; i < FISH_COUNT; i++)
{
if (i == threadIndex) continue;
float x = fish[i].Position.X;
float y = fish[i].Position.Y;
float distance_X = (fishPositionX - x);
float distance_Y = (fishPositionY - y);
float distance = sqrt(distance_X*distance_X+distance_Y*distance_Y);
if ( distance < FISH_VIEW_RANGE)
{
#pragma region SteerToTheAverageHeadingOfLocalFlockmates
if (!fish[threadIndex].Settings.independence)
{
averageDirection.X += fish[i].Direction.X;
averageDirection.Y += fish[i].Direction.Y;
}
#pragma endregion
#pragma region SteerToTheAveragePositionOfLocalFlockmates
if (fish[threadIndex].Settings.grouping)
{
groupingPoint.X += x;
groupingPoint.Y += y;
n++;
}
#pragma endregion
}
#pragma region SteerToAvoidCrowdingLocalFlockmates
if (fish[threadIndex].Settings.grouping)
{
float colisionRange = (FISH_COLISION_RANGE * fish[i].Settings.size) + (FISH_COLISION_RANGE * fishSize);
if (distance < colisionRange)
{
antyCrowdingVector.X += (((colisionRange*distance_X) / distance) - distance_X);
antyCrowdingVector.Y += (((colisionRange*distance_Y) / distance) - distance_Y);
}
}
#pragma endregion
}
#pragma region SteerToTheAverageHeadingOfLocalFlockmates
float length = sqrt(averageDirection.X*averageDirection.X+averageDirection.Y*averageDirection.Y);
if (!fish[threadIndex].Settings.independence)
{
if (length > 0.0001)
{
averageDirection.X /= length;
averageDirection.Y /= length;
}
else
{
averageDirection.X = fish[threadIndex].Direction.X;
averageDirection.Y = fish[threadIndex].Direction.Y;
}
}
#pragma endregion
#pragma region SteerToTheAveragePositionOfLocalFlockmates
if (fish[threadIndex].Settings.grouping)
{
groupingPoint.X /= n;
groupingPoint.Y /= n;
groupingDirection.X = (groupingPoint.X - fishPositionX);
groupingDirection.Y = (groupingPoint.Y - fishPositionY);
length = sqrt(groupingDirection.X*groupingDirection.X + groupingDirection.Y*groupingDirection.Y);
if (length > 0.001)
{
groupingDirection.X /= length;
groupingDirection.Y /= length;
}
}
#pragma endregion
}
#pragma endregion
CudaVector avoidMouse = CudaVector(0, 0);
#pragma region AvoidingMouse
float dist = sqrt((mouseX - fishPositionX)*(mouseX - fishPositionX) + (mouseY - fishPositionY)*(mouseY - fishPositionY));
if (dist <= MOUSE_FEAR_DISTANCE)
{
avoidMouse.X = ((fishPositionX - mouseX) / dist) * (MOUSE_FEAR_DISTANCE - dist);
avoidMouse.Y = ((fishPositionY - mouseY) / dist) * (MOUSE_FEAR_DISTANCE - dist);
}
#pragma endregion
__syncthreads();
float x_move = groupingDirection.X + antyCrowdingVector.X * 0.3 + averageDirection.X * fish[threadIndex].Settings.speed + avoidMouse.X;
float y_move = groupingDirection.Y + antyCrowdingVector.Y * 0.3 + averageDirection.Y * fish[threadIndex].Settings.speed + avoidMouse.Y;
fish[threadIndex].Position.X += x_move;
fish[threadIndex].Position.Y += y_move;
#pragma region Window frame
if (fish[threadIndex].Position.X > MATRIX_HALF_WIDTH)
{
fish[threadIndex].Position.X = -MATRIX_HALF_WIDTH;
fish[threadIndex].Position.Y *= -1;
}
if (fish[threadIndex].Position.X < -MATRIX_HALF_WIDTH)
{
fish[threadIndex].Position.X = MATRIX_HALF_WIDTH;
fish[threadIndex].Position.Y *= -1;
}
if (fish[threadIndex].Position.Y > MATRIX_HALF_HEIGHT)
{
fish[threadIndex].Position.X *= -1;
fish[threadIndex].Position.Y = -MATRIX_HALF_HEIGHT;
}
if (fish[threadIndex].Position.Y < -MATRIX_HALF_HEIGHT)
{
fish[threadIndex].Position.X *= -1;
fish[threadIndex].Position.Y = MATRIX_HALF_HEIGHT;
}
#pragma endregion
float directionLength = sqrt((averageDirection.X + avoidMouse.X) * (averageDirection.X + avoidMouse.X) + (averageDirection.Y + avoidMouse.Y)*(averageDirection.Y + avoidMouse.Y));
if (directionLength > 0.0001)
{
fish[threadIndex].Direction.X = (averageDirection.X + avoidMouse.X) / directionLength;
fish[threadIndex].Direction.Y = (averageDirection.Y + avoidMouse.Y) / directionLength;
}
else
{
fish[threadIndex].Direction.X = averageDirection.X;
fish[threadIndex].Direction.Y = averageDirection.Y;
}
}
}
#pragma endregion
Fish* d_fish;
extern "C" void InitParallelSteering()
{
hipMalloc((void**)&d_fish, FISH_COUNT * sizeof(Fish));
}
extern "C" void FinalizeParallelSteering()
{
hipFree(d_fish);
}
extern "C" std::string ParallelSteering(Fish* h_fish, float MouseX, float MouseY)
{
double mallocTime = 0;
double calcualtionTime = 0;
auto start = std::chrono::high_resolution_clock::now();
#pragma region HostToDevice
hipMemcpy(d_fish, h_fish, FISH_COUNT * sizeof(Fish), hipMemcpyHostToDevice);
#pragma endregion
auto finish = std::chrono::high_resolution_clock::now();
std::chrono::duration<double> elapsedMem = (finish - start);
start = std::chrono::high_resolution_clock::now();
#pragma region Calcualtion
MoveFish << <1 + (FISH_COUNT / 1024), 1024 >> > (d_fish, MouseX, MouseY);
gpuErrchk(hipPeekAtLastError());
hipDeviceSynchronize();
#pragma endregion
finish = std::chrono::high_resolution_clock::now();
std::chrono::duration<double> elapsedCal = (finish - start);
start = std::chrono::high_resolution_clock::now();
#pragma region DeviceToHost
hipMemcpy(h_fish, d_fish, FISH_COUNT * sizeof(Fish), hipMemcpyDeviceToHost);
#pragma endregion
finish = std::chrono::high_resolution_clock::now();
elapsedMem += (finish - start);
std::stringstream streams;
streams << std::fixed << "| Calcualtion time : " << elapsedCal.count() << "s | Data copying time: " << elapsedMem.count() << "s ";
return streams.str();
}
| f1f1ce337c0464b06203b19f7248dfee84bf7e93.cu | #include <iostream>
#include <chrono>
#include <GL/glew.h>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <string>
#include <sstream>
#include "../Vector/Vector.h"
#include "../Point/Point.h"
#include "ParallelSteering.cuh"
#include "../Constant/Constant.h"
#pragma once
#ifdef NVCC
#define CU_LAUNCH(...) <<<__VA_ARGS__>>>
#else
#define CU_LAUNCH(...)
#define __launch_bounds__(...)
#define __syncwarp()
#define __syncthreads()
#define __any_sync() (0)
#define __all_sync() (0)
#define __ballot_sync() (0)
#endif
#pragma region Cuda error
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort = true)
{
if (code != cudaSuccess)
{
fprintf(stderr, "GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
#pragma endregion
#pragma region Cuda Vector
__device__ struct CudaVector
{
public:
float X, Y;
__device__ CudaVector(float x, float y);
};
__device__ CudaVector::CudaVector(float x, float y)
{
this->X = x;
this->Y = y;
}
#pragma endregion
#pragma region Kernel
__global__ void MoveFish(Fish *fish, float mouseX, float mouseY)
{
int threadIndex = threadIdx.x;
int blockIndex = blockIdx.x;
threadIndex = (blockIndex * 1024) + threadIndex;
if (threadIndex < FISH_COUNT)
{
float fishPositionX = fish[threadIndex].Position.X;
float fishPositionY = fish[threadIndex].Position.Y;
float fishSize = fish[threadIndex].Settings.size;
CudaVector averageDirection = CudaVector(fish[threadIndex].Direction.X, fish[threadIndex].Direction.Y);
CudaVector groupingDirection = CudaVector(0, 0);
CudaVector antyCrowdingVector = CudaVector(0, 0);
#pragma region Steering
{
CudaVector groupingPoint = CudaVector(fishPositionX, fishPositionY);
int n = 1;
for (int i = 0; i < FISH_COUNT; i++)
{
if (i == threadIndex) continue;
float x = fish[i].Position.X;
float y = fish[i].Position.Y;
float distance_X = (fishPositionX - x);
float distance_Y = (fishPositionY - y);
float distance = sqrt(distance_X*distance_X+distance_Y*distance_Y);
if ( distance < FISH_VIEW_RANGE)
{
#pragma region SteerToTheAverageHeadingOfLocalFlockmates
if (!fish[threadIndex].Settings.independence)
{
averageDirection.X += fish[i].Direction.X;
averageDirection.Y += fish[i].Direction.Y;
}
#pragma endregion
#pragma region SteerToTheAveragePositionOfLocalFlockmates
if (fish[threadIndex].Settings.grouping)
{
groupingPoint.X += x;
groupingPoint.Y += y;
n++;
}
#pragma endregion
}
#pragma region SteerToAvoidCrowdingLocalFlockmates
if (fish[threadIndex].Settings.grouping)
{
float colisionRange = (FISH_COLISION_RANGE * fish[i].Settings.size) + (FISH_COLISION_RANGE * fishSize);
if (distance < colisionRange)
{
antyCrowdingVector.X += (((colisionRange*distance_X) / distance) - distance_X);
antyCrowdingVector.Y += (((colisionRange*distance_Y) / distance) - distance_Y);
}
}
#pragma endregion
}
#pragma region SteerToTheAverageHeadingOfLocalFlockmates
float length = sqrt(averageDirection.X*averageDirection.X+averageDirection.Y*averageDirection.Y);
if (!fish[threadIndex].Settings.independence)
{
if (length > 0.0001)
{
averageDirection.X /= length;
averageDirection.Y /= length;
}
else
{
averageDirection.X = fish[threadIndex].Direction.X;
averageDirection.Y = fish[threadIndex].Direction.Y;
}
}
#pragma endregion
#pragma region SteerToTheAveragePositionOfLocalFlockmates
if (fish[threadIndex].Settings.grouping)
{
groupingPoint.X /= n;
groupingPoint.Y /= n;
groupingDirection.X = (groupingPoint.X - fishPositionX);
groupingDirection.Y = (groupingPoint.Y - fishPositionY);
length = sqrt(groupingDirection.X*groupingDirection.X + groupingDirection.Y*groupingDirection.Y);
if (length > 0.001)
{
groupingDirection.X /= length;
groupingDirection.Y /= length;
}
}
#pragma endregion
}
#pragma endregion
CudaVector avoidMouse = CudaVector(0, 0);
#pragma region AvoidingMouse
float dist = sqrt((mouseX - fishPositionX)*(mouseX - fishPositionX) + (mouseY - fishPositionY)*(mouseY - fishPositionY));
if (dist <= MOUSE_FEAR_DISTANCE)
{
avoidMouse.X = ((fishPositionX - mouseX) / dist) * (MOUSE_FEAR_DISTANCE - dist);
avoidMouse.Y = ((fishPositionY - mouseY) / dist) * (MOUSE_FEAR_DISTANCE - dist);
}
#pragma endregion
__syncthreads();
float x_move = groupingDirection.X + antyCrowdingVector.X * 0.3 + averageDirection.X * fish[threadIndex].Settings.speed + avoidMouse.X;
float y_move = groupingDirection.Y + antyCrowdingVector.Y * 0.3 + averageDirection.Y * fish[threadIndex].Settings.speed + avoidMouse.Y;
fish[threadIndex].Position.X += x_move;
fish[threadIndex].Position.Y += y_move;
#pragma region Window frame
if (fish[threadIndex].Position.X > MATRIX_HALF_WIDTH)
{
fish[threadIndex].Position.X = -MATRIX_HALF_WIDTH;
fish[threadIndex].Position.Y *= -1;
}
if (fish[threadIndex].Position.X < -MATRIX_HALF_WIDTH)
{
fish[threadIndex].Position.X = MATRIX_HALF_WIDTH;
fish[threadIndex].Position.Y *= -1;
}
if (fish[threadIndex].Position.Y > MATRIX_HALF_HEIGHT)
{
fish[threadIndex].Position.X *= -1;
fish[threadIndex].Position.Y = -MATRIX_HALF_HEIGHT;
}
if (fish[threadIndex].Position.Y < -MATRIX_HALF_HEIGHT)
{
fish[threadIndex].Position.X *= -1;
fish[threadIndex].Position.Y = MATRIX_HALF_HEIGHT;
}
#pragma endregion
float directionLength = sqrt((averageDirection.X + avoidMouse.X) * (averageDirection.X + avoidMouse.X) + (averageDirection.Y + avoidMouse.Y)*(averageDirection.Y + avoidMouse.Y));
if (directionLength > 0.0001)
{
fish[threadIndex].Direction.X = (averageDirection.X + avoidMouse.X) / directionLength;
fish[threadIndex].Direction.Y = (averageDirection.Y + avoidMouse.Y) / directionLength;
}
else
{
fish[threadIndex].Direction.X = averageDirection.X;
fish[threadIndex].Direction.Y = averageDirection.Y;
}
}
}
#pragma endregion
Fish* d_fish;
extern "C" void InitParallelSteering()
{
cudaMalloc((void**)&d_fish, FISH_COUNT * sizeof(Fish));
}
extern "C" void FinalizeParallelSteering()
{
cudaFree(d_fish);
}
extern "C" std::string ParallelSteering(Fish* h_fish, float MouseX, float MouseY)
{
double mallocTime = 0;
double calcualtionTime = 0;
auto start = std::chrono::high_resolution_clock::now();
#pragma region HostToDevice
cudaMemcpy(d_fish, h_fish, FISH_COUNT * sizeof(Fish), cudaMemcpyHostToDevice);
#pragma endregion
auto finish = std::chrono::high_resolution_clock::now();
std::chrono::duration<double> elapsedMem = (finish - start);
start = std::chrono::high_resolution_clock::now();
#pragma region Calcualtion
MoveFish << <1 + (FISH_COUNT / 1024), 1024 >> > (d_fish, MouseX, MouseY);
gpuErrchk(cudaPeekAtLastError());
cudaThreadSynchronize();
#pragma endregion
finish = std::chrono::high_resolution_clock::now();
std::chrono::duration<double> elapsedCal = (finish - start);
start = std::chrono::high_resolution_clock::now();
#pragma region DeviceToHost
cudaMemcpy(h_fish, d_fish, FISH_COUNT * sizeof(Fish), cudaMemcpyDeviceToHost);
#pragma endregion
finish = std::chrono::high_resolution_clock::now();
elapsedMem += (finish - start);
std::stringstream streams;
streams << std::fixed << "| Calcualtion time : " << elapsedCal.count() << "s | Data copying time: " << elapsedMem.count() << "s ";
return streams.str();
}
|
811c54fddc460cae1fc51e02fcbc1387a36b9c8c.hip | // !!! This is a file automatically generated by hipify!!!
/*
Copyright 2018 XIAOLIN WANG ([email protected]; [email protected])
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "ParamsMt.h"
#include "Global.h"
namespace cytonMt
{
ParamsMt::ParamsMt()
{
const Option options[] = {
{"mode", "", "train/translate"},
{"saveModel", "", ""},
{"loadModel", "", "load model for continue training or translate"},
{"maxSaveModels", "10","maximum number of saved models"},
{"train", "trainSrc:trainTrg", "source-side and target-side training files, one sentences per line. trainSrc:trainTrg[:weight:trainSrc2:trainSrc2:weight2]"},
{"dev", "devSrc:devTrg", "source-side and target-side development files, one sentences per line"},
{"testInput", "testInput", "input file for translating"},
{"testOutput", "testOutput", "output file for translating"},
{"vocab", "vocabSrc:vocabTrg", "source-side and target-side vocabulary files, one word per line"},
{"srcTrgShareEmbed", "1","share the embedding weight between the source side and the target side"},
{"srcVocabSize", "0", "size of source-side vocabulary, 0 means using whole vocabulary in vocabSrc file"},
{"trgVocabSize", "0", "size of source-side vocabulary, 0 means using whole vocabulary in vocabTrg file"},
{"ignoreUnk", "1", "0/1, 1 means ignoring unknown words"},
{"initParam", "0.1", "initialize weights uniformly in (-initParam, initParam)"},
{"optimization", "SGD", "SGD/Adam"},
{"learningRate", "1", "learning rate"},
{"decayRate", "0.7", "decay factor of learning rate"},
{"decayStart", "1000", "learning rate start to decay from the epoch of decayStart"},
{"decayConti", "0", "0/1, 1 means that learning rate keeps decaying per check once it decays, OpenNMT's mode, "},
{"decayStatus", "0", "0/1, 1 means that learning rate is in a status of decaying, useful for continue training."},
{"epochs", "100", "max epochs of training"},
{"epochStart", "1", "the number of first epoch, useful for continue training"},
{"batchSize", "64", "batch size"},
{"maxSeqLen", "100", "max length of source and target sentence"},
{"embedSize", "512", "size of word embedding"},
{"hiddenSize", "512", "size of hidden states"},
{"numLayers", "2", "number of encoder/decoder layers"},
{"dropout", "0.2", "dropout rate, 0 means disabling dropout"},
{"clipGradient", "5", "threshold for clip gradient"},
{"labelSmooth", "0.1", "factor of smoothing the target labels"},
{"probeFreq", "1", "number of times probing the development likelihood per epoch"},
{"probeMargin", "0.01", "margin for checking whether the development likelihood has increased"},
{"patience", "1", "threshold for decaying the learning rate and restart training from the best model"},
{"beamSize", "10", "size of beam search in translating"},
{"lenPenalty", "0.6", "length penalty"},
{"","",""}
};
addOptions(options);
}
void ParamsMt::init_members()
{
mode=get("mode");
saveModel=get("saveModel");
if(!saveModel.empty())
{
saveModel+="/";
}
loadModel=get("loadModel");
maxSaveModels=geti("maxSaveModels");
trainData=get("train");
devData=get("dev");
testInput=get("testInput");
testOutput=get("testOutput");
vector<string> ts;
XLLib::str2list(get("vocab"),":", ts);
srcTrgShareEmbed=geti("srcTrgShareEmbed");
if(!ts.empty())
{
if(ts.size()==2)
{
srcVocab=ts.at(0);
trgVocab=ts.at(1);
}
else if(ts.size()==1 && srcTrgShareEmbed)
{
srcVocab=ts.at(0);
trgVocab=ts.at(0);
}
else
{
XLLib::printfln("the parameter of vocab is wrong: %s", get("vocab"));
exit(1);
}
}
srcVocabSize=geti("srcVocabSize");
trgVocabSize=geti("trgVocabSize");
ignoreUnk=geti("ignoreUnk");
initParam=getf("initParam");
optimization=get("optimization");
learningRate=getf("learningRate");
decayRate=getf("decayRate");
decayStart=getf("decayStart");
decayConti=geti("decayConti");
decayStatus=geti("decayStatus");
epochs=geti("epochs");
epochStart=geti("epochStart");
cytonLib::batchSize=geti("batchSize");
maxSeqLen=geti("maxSeqLen");
embedSize=geti("embedSize");
XLLib::str2ints(get("hiddenSize"), ":", hiddenSize);
numLayers=geti("numLayers");
dropout=getf("dropout");
clipGradient=getf("clipGradient");
labelSmooth=getf("labelSmooth");
probeFreq=getf("probeFreq");
probeMargin=getf("probeMargin");
patience=geti("patience");
beamSize=geti("beamSize");
lenPenalty=getf("lenPenalty");
if(!loadModel.empty())
{
XLLib::str2list(loadModel, ":", ts);
string tModel=ts.at(0);
int i=tModel.rfind("/");
string tDir=tModel.substr(0,i+1);
string tFile=tDir+"/settings";
XLLib::printfln(os, "load arguments from %s", tFile.c_str());
loadModelParams(tFile);
}
}
void ParamsMt::saveModelParams(std::string fileName)
{
std::ofstream f(fileName.c_str());
f<<numLayers<<"\n";
f<<embedSize<<"\n";
f<<XLLib::toString_vec_ostream(hiddenSize,":")<<"\n";
f<<srcVocabSize<<"\n";
f<<trgVocabSize<<"\n";
f<< (srcTrgShareEmbed?"1":"0") <<"\n";
f<<cytonLib::batchSize<<"\n";
f<<maxSeqLen<<"\n";
f.close();
}
void ParamsMt::loadModelParams(std::string fileName)
{
std::ifstream f(fileName.c_str());
string t;
f>>numLayers;
f>>embedSize;
getline(f, t);
if(t.empty())
{
getline(f, t);
}
hiddenSize.clear();
XLLib::str2ints(t, ":", hiddenSize);
f>>srcVocabSize;
f>>trgVocabSize;
int td;
f>>td;
srcTrgShareEmbed=td;
f>>cytonLib::batchSize;
int tn;
f>>tn;
maxSeqLen=::max(maxSeqLen, tn);
XLLib::printfln(os, " numLayers %d", numLayers);
XLLib::printfln(os, " embedSize %d", embedSize);
XLLib::printfln(os, " hiddenSize %s", XLLib::toString_vec_ostream(hiddenSize,":").c_str());
XLLib::printfln(os, " srcVocabSize %d", srcVocabSize);
XLLib::printfln(os, " trgVocabSize %d", trgVocabSize);
XLLib::printfln(os, " srcTrgShareEmbed %d", srcTrgShareEmbed);
XLLib::printfln(os, " maxSeqLen %d", maxSeqLen);
f.close();
}
ParamsMt params;
}
| 811c54fddc460cae1fc51e02fcbc1387a36b9c8c.cu | /*
Copyright 2018 XIAOLIN WANG ([email protected]; [email protected])
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "ParamsMt.h"
#include "Global.h"
namespace cytonMt
{
ParamsMt::ParamsMt()
{
const Option options[] = {
{"mode", "", "train/translate"},
{"saveModel", "", ""},
{"loadModel", "", "load model for continue training or translate"},
{"maxSaveModels", "10","maximum number of saved models"},
{"train", "trainSrc:trainTrg", "source-side and target-side training files, one sentences per line. trainSrc:trainTrg[:weight:trainSrc2:trainSrc2:weight2]"},
{"dev", "devSrc:devTrg", "source-side and target-side development files, one sentences per line"},
{"testInput", "testInput", "input file for translating"},
{"testOutput", "testOutput", "output file for translating"},
{"vocab", "vocabSrc:vocabTrg", "source-side and target-side vocabulary files, one word per line"},
{"srcTrgShareEmbed", "1","share the embedding weight between the source side and the target side"},
{"srcVocabSize", "0", "size of source-side vocabulary, 0 means using whole vocabulary in vocabSrc file"},
{"trgVocabSize", "0", "size of source-side vocabulary, 0 means using whole vocabulary in vocabTrg file"},
{"ignoreUnk", "1", "0/1, 1 means ignoring unknown words"},
{"initParam", "0.1", "initialize weights uniformly in (-initParam, initParam)"},
{"optimization", "SGD", "SGD/Adam"},
{"learningRate", "1", "learning rate"},
{"decayRate", "0.7", "decay factor of learning rate"},
{"decayStart", "1000", "learning rate start to decay from the epoch of decayStart"},
{"decayConti", "0", "0/1, 1 means that learning rate keeps decaying per check once it decays, OpenNMT's mode, "},
{"decayStatus", "0", "0/1, 1 means that learning rate is in a status of decaying, useful for continue training."},
{"epochs", "100", "max epochs of training"},
{"epochStart", "1", "the number of first epoch, useful for continue training"},
{"batchSize", "64", "batch size"},
{"maxSeqLen", "100", "max length of source and target sentence"},
{"embedSize", "512", "size of word embedding"},
{"hiddenSize", "512", "size of hidden states"},
{"numLayers", "2", "number of encoder/decoder layers"},
{"dropout", "0.2", "dropout rate, 0 means disabling dropout"},
{"clipGradient", "5", "threshold for clip gradient"},
{"labelSmooth", "0.1", "factor of smoothing the target labels"},
{"probeFreq", "1", "number of times probing the development likelihood per epoch"},
{"probeMargin", "0.01", "margin for checking whether the development likelihood has increased"},
{"patience", "1", "threshold for decaying the learning rate and restart training from the best model"},
{"beamSize", "10", "size of beam search in translating"},
{"lenPenalty", "0.6", "length penalty"},
{"","",""}
};
addOptions(options);
}
void ParamsMt::init_members()
{
mode=get("mode");
saveModel=get("saveModel");
if(!saveModel.empty())
{
saveModel+="/";
}
loadModel=get("loadModel");
maxSaveModels=geti("maxSaveModels");
trainData=get("train");
devData=get("dev");
testInput=get("testInput");
testOutput=get("testOutput");
vector<string> ts;
XLLib::str2list(get("vocab"),":", ts);
srcTrgShareEmbed=geti("srcTrgShareEmbed");
if(!ts.empty())
{
if(ts.size()==2)
{
srcVocab=ts.at(0);
trgVocab=ts.at(1);
}
else if(ts.size()==1 && srcTrgShareEmbed)
{
srcVocab=ts.at(0);
trgVocab=ts.at(0);
}
else
{
XLLib::printfln("the parameter of vocab is wrong: %s", get("vocab"));
exit(1);
}
}
srcVocabSize=geti("srcVocabSize");
trgVocabSize=geti("trgVocabSize");
ignoreUnk=geti("ignoreUnk");
initParam=getf("initParam");
optimization=get("optimization");
learningRate=getf("learningRate");
decayRate=getf("decayRate");
decayStart=getf("decayStart");
decayConti=geti("decayConti");
decayStatus=geti("decayStatus");
epochs=geti("epochs");
epochStart=geti("epochStart");
cytonLib::batchSize=geti("batchSize");
maxSeqLen=geti("maxSeqLen");
embedSize=geti("embedSize");
XLLib::str2ints(get("hiddenSize"), ":", hiddenSize);
numLayers=geti("numLayers");
dropout=getf("dropout");
clipGradient=getf("clipGradient");
labelSmooth=getf("labelSmooth");
probeFreq=getf("probeFreq");
probeMargin=getf("probeMargin");
patience=geti("patience");
beamSize=geti("beamSize");
lenPenalty=getf("lenPenalty");
if(!loadModel.empty())
{
XLLib::str2list(loadModel, ":", ts);
string tModel=ts.at(0);
int i=tModel.rfind("/");
string tDir=tModel.substr(0,i+1);
string tFile=tDir+"/settings";
XLLib::printfln(os, "load arguments from %s", tFile.c_str());
loadModelParams(tFile);
}
}
void ParamsMt::saveModelParams(std::string fileName)
{
std::ofstream f(fileName.c_str());
f<<numLayers<<"\n";
f<<embedSize<<"\n";
f<<XLLib::toString_vec_ostream(hiddenSize,":")<<"\n";
f<<srcVocabSize<<"\n";
f<<trgVocabSize<<"\n";
f<< (srcTrgShareEmbed?"1":"0") <<"\n";
f<<cytonLib::batchSize<<"\n";
f<<maxSeqLen<<"\n";
f.close();
}
void ParamsMt::loadModelParams(std::string fileName)
{
std::ifstream f(fileName.c_str());
string t;
f>>numLayers;
f>>embedSize;
getline(f, t);
if(t.empty())
{
getline(f, t);
}
hiddenSize.clear();
XLLib::str2ints(t, ":", hiddenSize);
f>>srcVocabSize;
f>>trgVocabSize;
int td;
f>>td;
srcTrgShareEmbed=td;
f>>cytonLib::batchSize;
int tn;
f>>tn;
maxSeqLen=std::max(maxSeqLen, tn);
XLLib::printfln(os, " numLayers %d", numLayers);
XLLib::printfln(os, " embedSize %d", embedSize);
XLLib::printfln(os, " hiddenSize %s", XLLib::toString_vec_ostream(hiddenSize,":").c_str());
XLLib::printfln(os, " srcVocabSize %d", srcVocabSize);
XLLib::printfln(os, " trgVocabSize %d", trgVocabSize);
XLLib::printfln(os, " srcTrgShareEmbed %d", srcTrgShareEmbed);
XLLib::printfln(os, " maxSeqLen %d", maxSeqLen);
f.close();
}
ParamsMt params;
}
|
3c431b18da6a6615f58224fbacea6446699cfb30.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <accelerate_cuda.h>
extern "C" __global__ void generate(const Int64 shOut_2, const Int64 shOut_1, const Int64 shOut_0, double* __restrict__ arrOut_0)
{
const int shapeSize = shOut_2 * (shOut_1 * shOut_0);
const int gridSize = blockDim.x * gridDim.x;
int ix;
for (ix = blockDim.x * blockIdx.x + threadIdx.x; ix < shapeSize; ix += gridSize) {
const Int64 tmp_0 = ix;
const Int64 tmp_1 = tmp_0 / shOut_0;
const Int64 tmp_2 = tmp_1 / shOut_1;
const Int64 sh2 = tmp_2 % shOut_2;
const Int64 sh1 = tmp_1 % shOut_1;
const Int64 sh0 = tmp_0 % shOut_0;
const Int64 v0 = (Int64) 0;
const Word8 v1 = v0 == sh2 && (v0 == sh1 && v0 == sh0);
double lv20;
if (v1) {
lv20 = 3.948746e7;
} else {
lv20 = 0.0;
}
arrOut_0[ix] = lv20;
}
}
| 3c431b18da6a6615f58224fbacea6446699cfb30.cu | #include <accelerate_cuda.h>
extern "C" __global__ void generate(const Int64 shOut_2, const Int64 shOut_1, const Int64 shOut_0, double* __restrict__ arrOut_0)
{
const int shapeSize = shOut_2 * (shOut_1 * shOut_0);
const int gridSize = blockDim.x * gridDim.x;
int ix;
for (ix = blockDim.x * blockIdx.x + threadIdx.x; ix < shapeSize; ix += gridSize) {
const Int64 tmp_0 = ix;
const Int64 tmp_1 = tmp_0 / shOut_0;
const Int64 tmp_2 = tmp_1 / shOut_1;
const Int64 sh2 = tmp_2 % shOut_2;
const Int64 sh1 = tmp_1 % shOut_1;
const Int64 sh0 = tmp_0 % shOut_0;
const Int64 v0 = (Int64) 0;
const Word8 v1 = v0 == sh2 && (v0 == sh1 && v0 == sh0);
double lv20;
if (v1) {
lv20 = 3.948746e7;
} else {
lv20 = 0.0;
}
arrOut_0[ix] = lv20;
}
}
|
dafd7564bbb9743637d6192a00b092a9174bdd2d.hip | // !!! This is a file automatically generated by hipify!!!
// System includes
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <assert.h>
// CUDA runtime
#include <hip/hip_runtime.h>
// helper functions and utilities to work with CUDA
#include <helper_cuda.h>
#include <helper_functions.h>
#include <helper_math.h>
typedef unsigned int uint;
float4 calculatePressure(float4* positions, uint index, uint numberOfParticles, float smoothingWidth);
__global__ void particleUpdate(
float4* positions,
float4* velocity,
const float dt,
const float smoothingWidth,
const float4 gravity,
const float4 dimension,
const uint numberOfParticles)
{
const uint tid = threadIdx.x;
positions[tid] += gravity * dt;
}
__global__ float4 calculatePressure(float4* positions, uint index, uint numberOfParticles, float smoothingWidth)
{
float4 particlePosition = positions[index];
float4 pressureVec;
for (uint i = 0; i < numberOfParticles; i++)
{
if (index == i)
continue;
//float3 dirVec = particlePosition - positions[i];
//float dist = length(dirVec);//TODO: maybe use half_length
//if (dist > smoothingWidth * 1.0f)
// continue;
//float pressure = 1.f - (dist / smoothingWidth);
////float pressure = amplitude * exp(-dist / smoothingWidth);
//pressureVec += (float4)(pressure * normalize(dirVec), 0.f);
//// pressureVec += vec4(dirVec, 0.f);
//pressureVec.w = dist;
//break;
}
return pressureVec;
}
extern "C" void cudaUpdate(
float4* positions,
float4* velocity,
const float dt,
const float smoothingWidth,
const float4 gravity,
const float4 dimension,
const unsigned int numberOfParticles)
{
dim3 grid(1, 0, 0);
dim3 threads(numberOfParticles, 0, 0);
hipLaunchKernelGGL(( particleUpdate), dim3(grid), dim3(threads) , 0, 0, positions, velocity, dt, smoothingWidth, gravity, dimension, numberOfParticles);
}
| dafd7564bbb9743637d6192a00b092a9174bdd2d.cu | // System includes
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <assert.h>
// CUDA runtime
#include <cuda_runtime.h>
// helper functions and utilities to work with CUDA
#include <helper_cuda.h>
#include <helper_functions.h>
#include <helper_math.h>
typedef unsigned int uint;
float4 calculatePressure(float4* positions, uint index, uint numberOfParticles, float smoothingWidth);
__global__ void particleUpdate(
float4* positions,
float4* velocity,
const float dt,
const float smoothingWidth,
const float4 gravity,
const float4 dimension,
const uint numberOfParticles)
{
const uint tid = threadIdx.x;
positions[tid] += gravity * dt;
}
__global__ float4 calculatePressure(float4* positions, uint index, uint numberOfParticles, float smoothingWidth)
{
float4 particlePosition = positions[index];
float4 pressureVec;
for (uint i = 0; i < numberOfParticles; i++)
{
if (index == i)
continue;
//float3 dirVec = particlePosition - positions[i];
//float dist = length(dirVec);//TODO: maybe use half_length
//if (dist > smoothingWidth * 1.0f)
// continue;
//float pressure = 1.f - (dist / smoothingWidth);
////float pressure = amplitude * exp(-dist / smoothingWidth);
//pressureVec += (float4)(pressure * normalize(dirVec), 0.f);
//// pressureVec += vec4(dirVec, 0.f);
//pressureVec.w = dist;
//break;
}
return pressureVec;
}
extern "C" void cudaUpdate(
float4* positions,
float4* velocity,
const float dt,
const float smoothingWidth,
const float4 gravity,
const float4 dimension,
const unsigned int numberOfParticles)
{
dim3 grid(1, 0, 0);
dim3 threads(numberOfParticles, 0, 0);
particleUpdate<<< grid, threads >>>(positions, velocity, dt, smoothingWidth, gravity, dimension, numberOfParticles);
}
|
c6fa1d561db7a75e77aa1f67bcf01a847bd11f57.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <assert.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <errno.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <unistd.h>
#include <math.h>
#include <gloop/gloop.h>
#include <gloop/benchmark.h>
__device__ void gpuMain(gloop::DeviceLoop<>* loop, char* src, int, int, int);
#define MAIN_FS_FILE
void stdavg(double *avg_time, double *avg_thpt, double* std_time, double *std_thpt, const double* times, const double total_data, int arr_len)
{
*avg_time=*avg_thpt=*std_time=*std_thpt=0;
int counter=0;
for( int i=0;i<arr_len;i++){
if (times[i]<=0) continue;
*avg_time+=times[i];
*avg_thpt+=((double)total_data)/times[i];
counter++;
}
if (counter==0) return;
*avg_time/=(double)counter;
*avg_thpt/=(double)counter;
for( int i=0;i<arr_len;i++){
if (times[i]<=0) continue;
*std_time=(times[i]-*avg_time)*(times[i]-*avg_time);
double tmp=(((double)total_data)/times[i])-*avg_thpt;
*std_thpt=tmp*tmp;
}
*std_time/=(double)counter;
*std_thpt/=(double)counter;
*std_time=sqrt(*std_time);
*std_thpt=sqrt(*std_thpt);
}
char* update_filename(const char* h_filename){
int n=strlen(h_filename);
assert(n>0);
if (n>GLOOP_FILENAME_SIZE) {
fprintf(stderr,"Filname %s too long, should be only %d symbols including \\0",h_filename,GLOOP_FILENAME_SIZE);
exit (-1);
}
char* d_filename;
CUDA_SAFE_CALL(hipMalloc(&d_filename,n+1));
CUDA_SAFE_CALL(hipMemcpy(d_filename, h_filename, n+1,hipMemcpyHostToDevice));
return d_filename;
}
#include <assert.h>
#define MAX_TRIALS (10)
double time_res[MAX_TRIALS];
double match_threshold;
int global_devicenum;
int main( int argc, char** argv)
{
char* threshold=getenv("GREPTH");
match_threshold=0.01;
if(threshold!=0) match_threshold=strtof(threshold,NULL);
fprintf(stderr,"Match threshold is %f\n",match_threshold);
char* gpudev=getenv("GPUDEVICE");
global_devicenum=0;
if (gpudev!=NULL) global_devicenum=atoi(gpudev);
fprintf(stderr,"GPU device chosen %d\n",global_devicenum);
if(argc<8) {
fprintf(stderr,"<kernel_iterations> <blocks> <threads> f1 f2 ... f_#files\n");
return -1;
}
int trials=atoi(argv[1]);
assert(trials<=MAX_TRIALS);
int vblocks=atoi(argv[2]);
int pblocks=atoi(argv[3]);
int nthreads=atoi(argv[4]);
int id = atoi(argv[5]);
int ioSize = atoi(argv[6]);
int loopCount = atoi(argv[7]);
fprintf(stderr, " trials:(%d),vblocks:(%d),pblocks:(%d),threads:(%d),id:(%d),ioSize:(%d),loops:(%d),file:(%s)\n", trials, vblocks, pblocks, nthreads, id, ioSize, loopCount, argv[8]);
int num_files=1;
char** d_filenames=NULL;
double total_time=0;
size_t total_size;
std::memset(time_res,0,MAX_TRIALS*sizeof(double));
{
dim3 virtualBlocks(vblocks);
dim3 physicalBlocks(pblocks);
std::unique_ptr<gloop::HostLoop> hostLoop = gloop::HostLoop::create(global_devicenum);
std::unique_ptr<gloop::HostContext> hostContext = gloop::HostContext::create(*hostLoop, physicalBlocks);
{
std::lock_guard<gloop::HostLoop::KernelLock> lock(hostLoop->kernelLock());
CUDA_SAFE_CALL(hipDeviceSetLimit(hipLimitMallocHeapSize, (256UL << 23)));
if (num_files>0){
d_filenames=(char**)malloc(sizeof(char*)*num_files);
for(int i=0;i<num_files;i++){
d_filenames[i]=update_filename(argv[i+8]);
fprintf(stderr,"file -%s\n",argv[i+8]);
}
}
}
gloop::Benchmark benchmark;
benchmark.begin();
{
hostLoop->launch(*hostContext, virtualBlocks, nthreads, [] GLOOP_DEVICE_LAMBDA (gloop::DeviceLoop<>* loop, int trials, int ioSize, int loopCount, char* src) {
gpuMain(loop, src, trials, ioSize, loopCount);
}, trials, ioSize, loopCount, d_filenames[0]);
}
benchmark.end();
printf("[%d] ", id);
benchmark.report();
}
return 0;
}
| c6fa1d561db7a75e77aa1f67bcf01a847bd11f57.cu |
#include <cuda.h>
#include <cuda_runtime.h>
#include <assert.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <errno.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <unistd.h>
#include <math.h>
#include <gloop/gloop.h>
#include <gloop/benchmark.h>
__device__ void gpuMain(gloop::DeviceLoop<>* loop, char* src, int, int, int);
#define MAIN_FS_FILE
void stdavg(double *avg_time, double *avg_thpt, double* std_time, double *std_thpt, const double* times, const double total_data, int arr_len)
{
*avg_time=*avg_thpt=*std_time=*std_thpt=0;
int counter=0;
for( int i=0;i<arr_len;i++){
if (times[i]<=0) continue;
*avg_time+=times[i];
*avg_thpt+=((double)total_data)/times[i];
counter++;
}
if (counter==0) return;
*avg_time/=(double)counter;
*avg_thpt/=(double)counter;
for( int i=0;i<arr_len;i++){
if (times[i]<=0) continue;
*std_time=(times[i]-*avg_time)*(times[i]-*avg_time);
double tmp=(((double)total_data)/times[i])-*avg_thpt;
*std_thpt=tmp*tmp;
}
*std_time/=(double)counter;
*std_thpt/=(double)counter;
*std_time=sqrt(*std_time);
*std_thpt=sqrt(*std_thpt);
}
char* update_filename(const char* h_filename){
int n=strlen(h_filename);
assert(n>0);
if (n>GLOOP_FILENAME_SIZE) {
fprintf(stderr,"Filname %s too long, should be only %d symbols including \\0",h_filename,GLOOP_FILENAME_SIZE);
exit (-1);
}
char* d_filename;
CUDA_SAFE_CALL(cudaMalloc(&d_filename,n+1));
CUDA_SAFE_CALL(cudaMemcpy(d_filename, h_filename, n+1,cudaMemcpyHostToDevice));
return d_filename;
}
#include <assert.h>
#define MAX_TRIALS (10)
double time_res[MAX_TRIALS];
double match_threshold;
int global_devicenum;
int main( int argc, char** argv)
{
char* threshold=getenv("GREPTH");
match_threshold=0.01;
if(threshold!=0) match_threshold=strtof(threshold,NULL);
fprintf(stderr,"Match threshold is %f\n",match_threshold);
char* gpudev=getenv("GPUDEVICE");
global_devicenum=0;
if (gpudev!=NULL) global_devicenum=atoi(gpudev);
fprintf(stderr,"GPU device chosen %d\n",global_devicenum);
if(argc<8) {
fprintf(stderr,"<kernel_iterations> <blocks> <threads> f1 f2 ... f_#files\n");
return -1;
}
int trials=atoi(argv[1]);
assert(trials<=MAX_TRIALS);
int vblocks=atoi(argv[2]);
int pblocks=atoi(argv[3]);
int nthreads=atoi(argv[4]);
int id = atoi(argv[5]);
int ioSize = atoi(argv[6]);
int loopCount = atoi(argv[7]);
fprintf(stderr, " trials:(%d),vblocks:(%d),pblocks:(%d),threads:(%d),id:(%d),ioSize:(%d),loops:(%d),file:(%s)\n", trials, vblocks, pblocks, nthreads, id, ioSize, loopCount, argv[8]);
int num_files=1;
char** d_filenames=NULL;
double total_time=0;
size_t total_size;
std::memset(time_res,0,MAX_TRIALS*sizeof(double));
{
dim3 virtualBlocks(vblocks);
dim3 physicalBlocks(pblocks);
std::unique_ptr<gloop::HostLoop> hostLoop = gloop::HostLoop::create(global_devicenum);
std::unique_ptr<gloop::HostContext> hostContext = gloop::HostContext::create(*hostLoop, physicalBlocks);
{
std::lock_guard<gloop::HostLoop::KernelLock> lock(hostLoop->kernelLock());
CUDA_SAFE_CALL(cudaDeviceSetLimit(cudaLimitMallocHeapSize, (256UL << 23)));
if (num_files>0){
d_filenames=(char**)malloc(sizeof(char*)*num_files);
for(int i=0;i<num_files;i++){
d_filenames[i]=update_filename(argv[i+8]);
fprintf(stderr,"file -%s\n",argv[i+8]);
}
}
}
gloop::Benchmark benchmark;
benchmark.begin();
{
hostLoop->launch(*hostContext, virtualBlocks, nthreads, [] GLOOP_DEVICE_LAMBDA (gloop::DeviceLoop<>* loop, int trials, int ioSize, int loopCount, char* src) {
gpuMain(loop, src, trials, ioSize, loopCount);
}, trials, ioSize, loopCount, d_filenames[0]);
}
benchmark.end();
printf("[%d] ", id);
benchmark.report();
}
return 0;
}
|
8b9ba169f6d5a71dec916c8f547f56cfa569b2d7.hip | // !!! This is a file automatically generated by hipify!!!
/**************************************************************************
* Copyright (c) 2017-2019 by the mfmg authors *
* All rights reserved. *
* *
* This file is part of the mfmg library. mfmg is distributed under a BSD *
* 3-clause license. For the licensing terms see the LICENSE file in the *
* top-level directory *
* *
* SPDX-License-Identifier: BSD-3-Clause *
*************************************************************************/
#define BOOST_TEST_MODULE utils
#include <mfmg/cuda/sparse_matrix_device.cuh>
#include <mfmg/cuda/utils.cuh>
#include <deal.II/lac/la_parallel_vector.h>
#include <random>
#include <set>
#include "main.cc"
BOOST_AUTO_TEST_CASE(serial_mv)
{
MPI_Comm comm = MPI_COMM_WORLD;
unsigned int const comm_size = dealii::Utilities::MPI::n_mpi_processes(comm);
if (comm_size == 1)
{
hipsparseHandle_t cusparse_handle = nullptr;
hipsparseStatus_t cusparse_error_code;
cusparse_error_code = hipsparseCreate(&cusparse_handle);
mfmg::ASSERT_CUSPARSE(cusparse_error_code);
// Build the sparse matrix on the host
unsigned int const size = 10;
dealii::IndexSet parallel_partitioning(size);
for (unsigned int i = 0; i < size; ++i)
parallel_partitioning.add_index(i);
parallel_partitioning.compress();
dealii::TrilinosWrappers::SparseMatrix sparse_matrix(parallel_partitioning);
unsigned int nnz = 0;
for (unsigned int i = 0; i < size; ++i)
{
std::default_random_engine generator(i);
std::uniform_int_distribution<int> distribution(0, size - 1);
std::set<int> column_indices;
for (unsigned int j = 0; j < 5; ++j)
{
int column_index = distribution(generator);
sparse_matrix.set(i, column_index, static_cast<double>(i + j));
column_indices.insert(column_index);
}
nnz += column_indices.size();
}
sparse_matrix.compress(dealii::VectorOperation::insert);
// Move the sparse matrix to the device and change the format to a regular
// CSR
mfmg::SparseMatrixDevice<double> sparse_matrix_dev =
mfmg::convert_matrix(sparse_matrix);
hipsparseMatDescr_t descr;
cusparse_error_code = hipsparseCreateMatDescr(&descr);
mfmg::ASSERT_CUSPARSE(cusparse_error_code);
cusparse_error_code =
hipsparseSetMatType(descr, HIPSPARSE_MATRIX_TYPE_GENERAL);
mfmg::ASSERT_CUSPARSE(cusparse_error_code);
cusparse_error_code =
hipsparseSetMatIndexBase(descr, HIPSPARSE_INDEX_BASE_ZERO);
mfmg::ASSERT_CUSPARSE(cusparse_error_code);
sparse_matrix_dev.descr = descr;
sparse_matrix_dev.cusparse_handle = cusparse_handle;
// Build a vector on the host
dealii::LinearAlgebra::distributed::Vector<double> vector(
parallel_partitioning, comm);
unsigned int vector_local_size = vector.local_size();
for (unsigned int i = 0; i < vector_local_size; ++i)
vector[i] = i;
// Move the vector to the device
dealii::LinearAlgebra::distributed::Vector<double,
dealii::MemorySpace::CUDA>
vector_dev(vector.get_partitioner());
hipError_t cuda_error_code;
cuda_error_code =
hipMemcpy(vector_dev.get_values(), vector.begin(),
vector_local_size * sizeof(double), hipMemcpyHostToDevice);
mfmg::ASSERT_CUDA(cuda_error_code);
// Perform the matrix-vector multiplication on the host
dealii::LinearAlgebra::distributed::Vector<double> result(vector);
sparse_matrix.vmult(result, vector);
// Perform the matrix-vector multiplication on the host
dealii::LinearAlgebra::distributed::Vector<double,
dealii::MemorySpace::CUDA>
result_dev(vector.get_partitioner());
sparse_matrix_dev.vmult(result_dev, vector_dev);
// Check the result
std::vector<double> result_host(vector_local_size);
mfmg::cuda_mem_copy_to_host(result_dev.get_values(), result_host);
for (unsigned int i = 0; i < vector_local_size; ++i)
BOOST_CHECK_CLOSE(result[i], result_host[i], 1e-14);
// Destroy cusparse_handle
cusparse_error_code = hipsparseDestroy(cusparse_handle);
mfmg::ASSERT_CUSPARSE(cusparse_error_code);
cusparse_handle = nullptr;
}
}
BOOST_AUTO_TEST_CASE(distributed_mv)
{
// We assume that the user launched as many processes as there are gpus,
// that each node as the same number of GPUS, and that each node has at least
// two GPUs. The reason for the last assumption is to make sure that the test
// runs on the tester but not on desktop or laptop that have only one GPU.
int n_devices = 0;
hipError_t cuda_error_code = hipGetDeviceCount(&n_devices);
mfmg::ASSERT_CUDA(cuda_error_code);
hipsparseHandle_t cusparse_handle = nullptr;
hipsparseStatus_t cusparse_error_code;
cusparse_error_code = hipsparseCreate(&cusparse_handle);
mfmg::ASSERT_CUSPARSE(cusparse_error_code);
if (n_devices > 1)
{
MPI_Comm comm = MPI_COMM_WORLD;
unsigned int const comm_size =
dealii::Utilities::MPI::n_mpi_processes(comm);
unsigned int const rank = dealii::Utilities::MPI::this_mpi_process(comm);
// Set the device for each process
int device_id = rank % n_devices;
cuda_error_code = hipSetDevice(device_id);
// Build the sparse matrix on the host
unsigned int const n_local_rows = 10;
unsigned int const row_offset = rank * n_local_rows;
unsigned int const size = comm_size * n_local_rows;
dealii::IndexSet parallel_partitioning(size);
for (unsigned int i = 0; i < n_local_rows; ++i)
parallel_partitioning.add_index(row_offset + i);
parallel_partitioning.compress();
dealii::TrilinosWrappers::SparseMatrix sparse_matrix(parallel_partitioning);
unsigned int nnz = 0;
for (unsigned int i = 0; i < n_local_rows; ++i)
{
std::default_random_engine generator(i);
std::uniform_int_distribution<int> distribution(0, size - 1);
std::set<int> column_indices;
for (unsigned int j = 0; j < 5; ++j)
{
int column_index = distribution(generator);
sparse_matrix.set(row_offset + i, column_index,
static_cast<double>(i + j));
column_indices.insert(column_index);
}
nnz += column_indices.size();
}
sparse_matrix.compress(dealii::VectorOperation::insert);
// Move the sparse matrix to the device and change the format to a regular
// CSR
mfmg::SparseMatrixDevice<double> sparse_matrix_dev =
mfmg::convert_matrix(sparse_matrix);
hipsparseMatDescr_t descr;
cusparse_error_code = hipsparseCreateMatDescr(&descr);
mfmg::ASSERT_CUSPARSE(cusparse_error_code);
cusparse_error_code =
hipsparseSetMatType(descr, HIPSPARSE_MATRIX_TYPE_GENERAL);
mfmg::ASSERT_CUSPARSE(cusparse_error_code);
cusparse_error_code =
hipsparseSetMatIndexBase(descr, HIPSPARSE_INDEX_BASE_ZERO);
mfmg::ASSERT_CUSPARSE(cusparse_error_code);
sparse_matrix_dev.descr = descr;
sparse_matrix_dev.cusparse_handle = cusparse_handle;
// Build a vector on the host
dealii::LinearAlgebra::distributed::Vector<double> vector(
parallel_partitioning, comm);
unsigned int vector_local_size = vector.local_size();
for (unsigned int i = 0; i < vector_local_size; ++i)
vector.local_element(i) = i;
// Move the vector to the device
dealii::LinearAlgebra::distributed::Vector<double,
dealii::MemorySpace::CUDA>
vector_dev(vector.get_partitioner());
cuda_error_code =
hipMemcpy(vector_dev.get_values(), vector.begin(),
vector_local_size * sizeof(double), hipMemcpyHostToDevice);
mfmg::ASSERT_CUDA(cuda_error_code);
// Perform the matrix-vector multiplication on the host
dealii::LinearAlgebra::distributed::Vector<double> result(vector);
sparse_matrix.vmult(result, vector);
// Perform the matrix-vector multiplication on the host
dealii::LinearAlgebra::distributed::Vector<double,
dealii::MemorySpace::CUDA>
result_dev(vector.get_partitioner());
sparse_matrix_dev.vmult(result_dev, vector_dev);
// Check the result
std::vector<double> result_host(vector_local_size);
mfmg::cuda_mem_copy_to_host(result_dev.get_values(), result_host);
for (unsigned int i = 0; i < vector_local_size; ++i)
BOOST_CHECK_CLOSE(result.local_element(i), result_host[i], 1e-14);
}
// Destroy cusparse_handle
cusparse_error_code = hipsparseDestroy(cusparse_handle);
mfmg::ASSERT_CUSPARSE(cusparse_error_code);
cusparse_handle = nullptr;
}
template <typename ScalarType>
std::tuple<std::vector<ScalarType>, std::vector<int>, std::vector<int>>
copy_sparse_matrix_to_host(
mfmg::SparseMatrixDevice<ScalarType> const &sparse_matrix_dev)
{
std::vector<ScalarType> val(sparse_matrix_dev.local_nnz());
mfmg::cuda_mem_copy_to_host(sparse_matrix_dev.val_dev, val);
std::vector<int> column_index(sparse_matrix_dev.local_nnz());
mfmg::cuda_mem_copy_to_host(sparse_matrix_dev.column_index_dev, column_index);
std::vector<int> row_ptr(sparse_matrix_dev.m() + 1);
mfmg::cuda_mem_copy_to_host(sparse_matrix_dev.row_ptr_dev, row_ptr);
return std::make_tuple(val, column_index, row_ptr);
}
BOOST_AUTO_TEST_CASE(mmult)
{
MPI_Comm comm = MPI_COMM_WORLD;
unsigned int const comm_size = dealii::Utilities::MPI::n_mpi_processes(comm);
int n_devices = 0;
hipError_t cuda_error_code = hipGetDeviceCount(&n_devices);
mfmg::ASSERT_CUDA(cuda_error_code);
if ((comm_size == 1) || (comm_size == 2) && (n_devices == 2))
{
int const rank = dealii::Utilities::MPI::this_mpi_process(comm);
cuda_error_code = hipSetDevice(rank);
mfmg::ASSERT_CUDA(cuda_error_code);
hipsparseHandle_t cusparse_handle = nullptr;
hipsparseStatus_t cusparse_error_code;
cusparse_error_code = hipsparseCreate(&cusparse_handle);
mfmg::ASSERT_CUSPARSE(cusparse_error_code);
// Build the sparsity pattern
dealii::SparsityPattern sparsity_pattern;
unsigned int const size = 30;
std::vector<std::vector<unsigned int>> column_indices(size);
for (unsigned int i = 0; i < size; ++i)
{
std::vector<unsigned int> indices;
std::default_random_engine generator(i);
std::uniform_int_distribution<int> distribution(0, size - 1);
for (unsigned int j = 0; j < 5; ++j)
indices.push_back(distribution(generator));
indices.push_back(i);
std::sort(indices.begin(), indices.end());
indices.erase(std::unique(indices.begin(), indices.end()), indices.end());
column_indices[i] = indices;
}
sparsity_pattern.copy_from(size, size, column_indices.begin(),
column_indices.end());
// Build the sparse matrix
dealii::SparseMatrix<double> A(sparsity_pattern);
dealii::SparseMatrix<double> B(sparsity_pattern);
for (unsigned int i = 0; i < size; ++i)
for (unsigned int j = 0; j < size; ++j)
if (sparsity_pattern.exists(i, j))
{
A.set(i, j, static_cast<double>(i + j));
B.set(i, j, static_cast<double>(i - j));
}
dealii::SparsityPattern sparsity_pattern_c;
dealii::SparseMatrix<double> C(sparsity_pattern_c);
A.mmult(C, B);
// Move the sparse matrices to the device and change the format to a regular
// CSR
mfmg::SparseMatrixDevice<double> A_dev = mfmg::convert_matrix(A);
mfmg::SparseMatrixDevice<double> B_dev = mfmg::convert_matrix(B);
mfmg::SparseMatrixDevice<double> C_dev = mfmg::convert_matrix(B);
hipsparseMatDescr_t A_descr;
cusparse_error_code = hipsparseCreateMatDescr(&A_descr);
mfmg::ASSERT_CUSPARSE(cusparse_error_code);
cusparse_error_code =
hipsparseSetMatType(A_descr, HIPSPARSE_MATRIX_TYPE_GENERAL);
mfmg::ASSERT_CUSPARSE(cusparse_error_code);
cusparse_error_code =
hipsparseSetMatIndexBase(A_descr, HIPSPARSE_INDEX_BASE_ZERO);
mfmg::ASSERT_CUSPARSE(cusparse_error_code);
A_dev.descr = A_descr;
A_dev.cusparse_handle = cusparse_handle;
hipsparseMatDescr_t B_descr;
cusparse_error_code = hipsparseCreateMatDescr(&B_descr);
mfmg::ASSERT_CUSPARSE(cusparse_error_code);
cusparse_error_code =
hipsparseSetMatType(B_descr, HIPSPARSE_MATRIX_TYPE_GENERAL);
mfmg::ASSERT_CUSPARSE(cusparse_error_code);
cusparse_error_code =
hipsparseSetMatIndexBase(B_descr, HIPSPARSE_INDEX_BASE_ZERO);
mfmg::ASSERT_CUSPARSE(cusparse_error_code);
B_dev.descr = B_descr;
B_dev.cusparse_handle = cusparse_handle;
hipsparseMatDescr_t C_descr;
cusparse_error_code = hipsparseCreateMatDescr(&C_descr);
mfmg::ASSERT_CUSPARSE(cusparse_error_code);
cusparse_error_code =
hipsparseSetMatType(C_descr, HIPSPARSE_MATRIX_TYPE_GENERAL);
mfmg::ASSERT_CUSPARSE(cusparse_error_code);
cusparse_error_code =
hipsparseSetMatIndexBase(C_descr, HIPSPARSE_INDEX_BASE_ZERO);
mfmg::ASSERT_CUSPARSE(cusparse_error_code);
C_dev.descr = C_descr;
C_dev.cusparse_handle = cusparse_handle;
A_dev.mmult(C_dev, B_dev);
// Move C_dev to the host
std::vector<double> val_host;
std::vector<int> column_index_host;
std::vector<int> row_ptr_host;
std::tie(val_host, column_index_host, row_ptr_host) =
copy_sparse_matrix_to_host(C_dev);
// Check the result
unsigned int const n_rows = C_dev.m();
unsigned int pos = 0;
for (unsigned int i = 0; i < n_rows; ++i)
for (unsigned int j = row_ptr_host[i]; j < row_ptr_host[i + 1];
++j, ++pos)
BOOST_CHECK_EQUAL(val_host[pos], C(i, column_index_host[j]));
// Destroy cusparse_handle
cusparse_error_code = hipsparseDestroy(cusparse_handle);
mfmg::ASSERT_CUSPARSE(cusparse_error_code);
cusparse_handle = nullptr;
}
}
| 8b9ba169f6d5a71dec916c8f547f56cfa569b2d7.cu | /**************************************************************************
* Copyright (c) 2017-2019 by the mfmg authors *
* All rights reserved. *
* *
* This file is part of the mfmg library. mfmg is distributed under a BSD *
* 3-clause license. For the licensing terms see the LICENSE file in the *
* top-level directory *
* *
* SPDX-License-Identifier: BSD-3-Clause *
*************************************************************************/
#define BOOST_TEST_MODULE utils
#include <mfmg/cuda/sparse_matrix_device.cuh>
#include <mfmg/cuda/utils.cuh>
#include <deal.II/lac/la_parallel_vector.h>
#include <random>
#include <set>
#include "main.cc"
BOOST_AUTO_TEST_CASE(serial_mv)
{
MPI_Comm comm = MPI_COMM_WORLD;
unsigned int const comm_size = dealii::Utilities::MPI::n_mpi_processes(comm);
if (comm_size == 1)
{
cusparseHandle_t cusparse_handle = nullptr;
cusparseStatus_t cusparse_error_code;
cusparse_error_code = cusparseCreate(&cusparse_handle);
mfmg::ASSERT_CUSPARSE(cusparse_error_code);
// Build the sparse matrix on the host
unsigned int const size = 10;
dealii::IndexSet parallel_partitioning(size);
for (unsigned int i = 0; i < size; ++i)
parallel_partitioning.add_index(i);
parallel_partitioning.compress();
dealii::TrilinosWrappers::SparseMatrix sparse_matrix(parallel_partitioning);
unsigned int nnz = 0;
for (unsigned int i = 0; i < size; ++i)
{
std::default_random_engine generator(i);
std::uniform_int_distribution<int> distribution(0, size - 1);
std::set<int> column_indices;
for (unsigned int j = 0; j < 5; ++j)
{
int column_index = distribution(generator);
sparse_matrix.set(i, column_index, static_cast<double>(i + j));
column_indices.insert(column_index);
}
nnz += column_indices.size();
}
sparse_matrix.compress(dealii::VectorOperation::insert);
// Move the sparse matrix to the device and change the format to a regular
// CSR
mfmg::SparseMatrixDevice<double> sparse_matrix_dev =
mfmg::convert_matrix(sparse_matrix);
cusparseMatDescr_t descr;
cusparse_error_code = cusparseCreateMatDescr(&descr);
mfmg::ASSERT_CUSPARSE(cusparse_error_code);
cusparse_error_code =
cusparseSetMatType(descr, CUSPARSE_MATRIX_TYPE_GENERAL);
mfmg::ASSERT_CUSPARSE(cusparse_error_code);
cusparse_error_code =
cusparseSetMatIndexBase(descr, CUSPARSE_INDEX_BASE_ZERO);
mfmg::ASSERT_CUSPARSE(cusparse_error_code);
sparse_matrix_dev.descr = descr;
sparse_matrix_dev.cusparse_handle = cusparse_handle;
// Build a vector on the host
dealii::LinearAlgebra::distributed::Vector<double> vector(
parallel_partitioning, comm);
unsigned int vector_local_size = vector.local_size();
for (unsigned int i = 0; i < vector_local_size; ++i)
vector[i] = i;
// Move the vector to the device
dealii::LinearAlgebra::distributed::Vector<double,
dealii::MemorySpace::CUDA>
vector_dev(vector.get_partitioner());
cudaError_t cuda_error_code;
cuda_error_code =
cudaMemcpy(vector_dev.get_values(), vector.begin(),
vector_local_size * sizeof(double), cudaMemcpyHostToDevice);
mfmg::ASSERT_CUDA(cuda_error_code);
// Perform the matrix-vector multiplication on the host
dealii::LinearAlgebra::distributed::Vector<double> result(vector);
sparse_matrix.vmult(result, vector);
// Perform the matrix-vector multiplication on the host
dealii::LinearAlgebra::distributed::Vector<double,
dealii::MemorySpace::CUDA>
result_dev(vector.get_partitioner());
sparse_matrix_dev.vmult(result_dev, vector_dev);
// Check the result
std::vector<double> result_host(vector_local_size);
mfmg::cuda_mem_copy_to_host(result_dev.get_values(), result_host);
for (unsigned int i = 0; i < vector_local_size; ++i)
BOOST_CHECK_CLOSE(result[i], result_host[i], 1e-14);
// Destroy cusparse_handle
cusparse_error_code = cusparseDestroy(cusparse_handle);
mfmg::ASSERT_CUSPARSE(cusparse_error_code);
cusparse_handle = nullptr;
}
}
BOOST_AUTO_TEST_CASE(distributed_mv)
{
// We assume that the user launched as many processes as there are gpus,
// that each node as the same number of GPUS, and that each node has at least
// two GPUs. The reason for the last assumption is to make sure that the test
// runs on the tester but not on desktop or laptop that have only one GPU.
int n_devices = 0;
cudaError_t cuda_error_code = cudaGetDeviceCount(&n_devices);
mfmg::ASSERT_CUDA(cuda_error_code);
cusparseHandle_t cusparse_handle = nullptr;
cusparseStatus_t cusparse_error_code;
cusparse_error_code = cusparseCreate(&cusparse_handle);
mfmg::ASSERT_CUSPARSE(cusparse_error_code);
if (n_devices > 1)
{
MPI_Comm comm = MPI_COMM_WORLD;
unsigned int const comm_size =
dealii::Utilities::MPI::n_mpi_processes(comm);
unsigned int const rank = dealii::Utilities::MPI::this_mpi_process(comm);
// Set the device for each process
int device_id = rank % n_devices;
cuda_error_code = cudaSetDevice(device_id);
// Build the sparse matrix on the host
unsigned int const n_local_rows = 10;
unsigned int const row_offset = rank * n_local_rows;
unsigned int const size = comm_size * n_local_rows;
dealii::IndexSet parallel_partitioning(size);
for (unsigned int i = 0; i < n_local_rows; ++i)
parallel_partitioning.add_index(row_offset + i);
parallel_partitioning.compress();
dealii::TrilinosWrappers::SparseMatrix sparse_matrix(parallel_partitioning);
unsigned int nnz = 0;
for (unsigned int i = 0; i < n_local_rows; ++i)
{
std::default_random_engine generator(i);
std::uniform_int_distribution<int> distribution(0, size - 1);
std::set<int> column_indices;
for (unsigned int j = 0; j < 5; ++j)
{
int column_index = distribution(generator);
sparse_matrix.set(row_offset + i, column_index,
static_cast<double>(i + j));
column_indices.insert(column_index);
}
nnz += column_indices.size();
}
sparse_matrix.compress(dealii::VectorOperation::insert);
// Move the sparse matrix to the device and change the format to a regular
// CSR
mfmg::SparseMatrixDevice<double> sparse_matrix_dev =
mfmg::convert_matrix(sparse_matrix);
cusparseMatDescr_t descr;
cusparse_error_code = cusparseCreateMatDescr(&descr);
mfmg::ASSERT_CUSPARSE(cusparse_error_code);
cusparse_error_code =
cusparseSetMatType(descr, CUSPARSE_MATRIX_TYPE_GENERAL);
mfmg::ASSERT_CUSPARSE(cusparse_error_code);
cusparse_error_code =
cusparseSetMatIndexBase(descr, CUSPARSE_INDEX_BASE_ZERO);
mfmg::ASSERT_CUSPARSE(cusparse_error_code);
sparse_matrix_dev.descr = descr;
sparse_matrix_dev.cusparse_handle = cusparse_handle;
// Build a vector on the host
dealii::LinearAlgebra::distributed::Vector<double> vector(
parallel_partitioning, comm);
unsigned int vector_local_size = vector.local_size();
for (unsigned int i = 0; i < vector_local_size; ++i)
vector.local_element(i) = i;
// Move the vector to the device
dealii::LinearAlgebra::distributed::Vector<double,
dealii::MemorySpace::CUDA>
vector_dev(vector.get_partitioner());
cuda_error_code =
cudaMemcpy(vector_dev.get_values(), vector.begin(),
vector_local_size * sizeof(double), cudaMemcpyHostToDevice);
mfmg::ASSERT_CUDA(cuda_error_code);
// Perform the matrix-vector multiplication on the host
dealii::LinearAlgebra::distributed::Vector<double> result(vector);
sparse_matrix.vmult(result, vector);
// Perform the matrix-vector multiplication on the host
dealii::LinearAlgebra::distributed::Vector<double,
dealii::MemorySpace::CUDA>
result_dev(vector.get_partitioner());
sparse_matrix_dev.vmult(result_dev, vector_dev);
// Check the result
std::vector<double> result_host(vector_local_size);
mfmg::cuda_mem_copy_to_host(result_dev.get_values(), result_host);
for (unsigned int i = 0; i < vector_local_size; ++i)
BOOST_CHECK_CLOSE(result.local_element(i), result_host[i], 1e-14);
}
// Destroy cusparse_handle
cusparse_error_code = cusparseDestroy(cusparse_handle);
mfmg::ASSERT_CUSPARSE(cusparse_error_code);
cusparse_handle = nullptr;
}
template <typename ScalarType>
std::tuple<std::vector<ScalarType>, std::vector<int>, std::vector<int>>
copy_sparse_matrix_to_host(
mfmg::SparseMatrixDevice<ScalarType> const &sparse_matrix_dev)
{
std::vector<ScalarType> val(sparse_matrix_dev.local_nnz());
mfmg::cuda_mem_copy_to_host(sparse_matrix_dev.val_dev, val);
std::vector<int> column_index(sparse_matrix_dev.local_nnz());
mfmg::cuda_mem_copy_to_host(sparse_matrix_dev.column_index_dev, column_index);
std::vector<int> row_ptr(sparse_matrix_dev.m() + 1);
mfmg::cuda_mem_copy_to_host(sparse_matrix_dev.row_ptr_dev, row_ptr);
return std::make_tuple(val, column_index, row_ptr);
}
BOOST_AUTO_TEST_CASE(mmult)
{
MPI_Comm comm = MPI_COMM_WORLD;
unsigned int const comm_size = dealii::Utilities::MPI::n_mpi_processes(comm);
int n_devices = 0;
cudaError_t cuda_error_code = cudaGetDeviceCount(&n_devices);
mfmg::ASSERT_CUDA(cuda_error_code);
if ((comm_size == 1) || (comm_size == 2) && (n_devices == 2))
{
int const rank = dealii::Utilities::MPI::this_mpi_process(comm);
cuda_error_code = cudaSetDevice(rank);
mfmg::ASSERT_CUDA(cuda_error_code);
cusparseHandle_t cusparse_handle = nullptr;
cusparseStatus_t cusparse_error_code;
cusparse_error_code = cusparseCreate(&cusparse_handle);
mfmg::ASSERT_CUSPARSE(cusparse_error_code);
// Build the sparsity pattern
dealii::SparsityPattern sparsity_pattern;
unsigned int const size = 30;
std::vector<std::vector<unsigned int>> column_indices(size);
for (unsigned int i = 0; i < size; ++i)
{
std::vector<unsigned int> indices;
std::default_random_engine generator(i);
std::uniform_int_distribution<int> distribution(0, size - 1);
for (unsigned int j = 0; j < 5; ++j)
indices.push_back(distribution(generator));
indices.push_back(i);
std::sort(indices.begin(), indices.end());
indices.erase(std::unique(indices.begin(), indices.end()), indices.end());
column_indices[i] = indices;
}
sparsity_pattern.copy_from(size, size, column_indices.begin(),
column_indices.end());
// Build the sparse matrix
dealii::SparseMatrix<double> A(sparsity_pattern);
dealii::SparseMatrix<double> B(sparsity_pattern);
for (unsigned int i = 0; i < size; ++i)
for (unsigned int j = 0; j < size; ++j)
if (sparsity_pattern.exists(i, j))
{
A.set(i, j, static_cast<double>(i + j));
B.set(i, j, static_cast<double>(i - j));
}
dealii::SparsityPattern sparsity_pattern_c;
dealii::SparseMatrix<double> C(sparsity_pattern_c);
A.mmult(C, B);
// Move the sparse matrices to the device and change the format to a regular
// CSR
mfmg::SparseMatrixDevice<double> A_dev = mfmg::convert_matrix(A);
mfmg::SparseMatrixDevice<double> B_dev = mfmg::convert_matrix(B);
mfmg::SparseMatrixDevice<double> C_dev = mfmg::convert_matrix(B);
cusparseMatDescr_t A_descr;
cusparse_error_code = cusparseCreateMatDescr(&A_descr);
mfmg::ASSERT_CUSPARSE(cusparse_error_code);
cusparse_error_code =
cusparseSetMatType(A_descr, CUSPARSE_MATRIX_TYPE_GENERAL);
mfmg::ASSERT_CUSPARSE(cusparse_error_code);
cusparse_error_code =
cusparseSetMatIndexBase(A_descr, CUSPARSE_INDEX_BASE_ZERO);
mfmg::ASSERT_CUSPARSE(cusparse_error_code);
A_dev.descr = A_descr;
A_dev.cusparse_handle = cusparse_handle;
cusparseMatDescr_t B_descr;
cusparse_error_code = cusparseCreateMatDescr(&B_descr);
mfmg::ASSERT_CUSPARSE(cusparse_error_code);
cusparse_error_code =
cusparseSetMatType(B_descr, CUSPARSE_MATRIX_TYPE_GENERAL);
mfmg::ASSERT_CUSPARSE(cusparse_error_code);
cusparse_error_code =
cusparseSetMatIndexBase(B_descr, CUSPARSE_INDEX_BASE_ZERO);
mfmg::ASSERT_CUSPARSE(cusparse_error_code);
B_dev.descr = B_descr;
B_dev.cusparse_handle = cusparse_handle;
cusparseMatDescr_t C_descr;
cusparse_error_code = cusparseCreateMatDescr(&C_descr);
mfmg::ASSERT_CUSPARSE(cusparse_error_code);
cusparse_error_code =
cusparseSetMatType(C_descr, CUSPARSE_MATRIX_TYPE_GENERAL);
mfmg::ASSERT_CUSPARSE(cusparse_error_code);
cusparse_error_code =
cusparseSetMatIndexBase(C_descr, CUSPARSE_INDEX_BASE_ZERO);
mfmg::ASSERT_CUSPARSE(cusparse_error_code);
C_dev.descr = C_descr;
C_dev.cusparse_handle = cusparse_handle;
A_dev.mmult(C_dev, B_dev);
// Move C_dev to the host
std::vector<double> val_host;
std::vector<int> column_index_host;
std::vector<int> row_ptr_host;
std::tie(val_host, column_index_host, row_ptr_host) =
copy_sparse_matrix_to_host(C_dev);
// Check the result
unsigned int const n_rows = C_dev.m();
unsigned int pos = 0;
for (unsigned int i = 0; i < n_rows; ++i)
for (unsigned int j = row_ptr_host[i]; j < row_ptr_host[i + 1];
++j, ++pos)
BOOST_CHECK_EQUAL(val_host[pos], C(i, column_index_host[j]));
// Destroy cusparse_handle
cusparse_error_code = cusparseDestroy(cusparse_handle);
mfmg::ASSERT_CUSPARSE(cusparse_error_code);
cusparse_handle = nullptr;
}
}
|
98777dbf81319a72aaac5373f9b20d3327955632.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <rocblas.h>
#include <hiprand/hiprand.h>
#include<memory.h>
#include<iostream>
#include <thrust/sort.h>
#include <thrust/execution_policy.h>
#include <thrust/random.h>
#include <thrust/device_vector.h>
#include <cmath>
#include<string>
#include<algorithm>
#include "sfm.h"
#include<cudaSift/cudaSift.h>
#include<cudaSift/cudaImage.h>
#include <hip/hip_runtime.h>
#include <cusolverDn.h>
#include <thrust/remove.h>
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include<iomanip>
#include <hip/hip_runtime.h>
#include <random>
#include <thrust/extrema.h>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <glm/glm.hpp>
#define FILENAME (strrchr(__FILE__, '/') ? strrchr(__FILE__, '/') + 1 : __FILE__)
#define checkCUDAError(msg) checkCUDAErrorFn(msg, FILENAME, __LINE__)
#define checkCUDAErrorWithLine(msg) checkCUDAError(msg, _LINE_)
#define scene_scale 100.0f
#define blockSize 128
#define TILE_DIM 32
#define BLOCK_ROWS 8
hipblasHandle_t handle;
hipsolverDnHandle_t cusolverH = NULL;
hipStream_t stream = NULL;
hipsolverGesvdjInfo_t gesvdj_params = NULL;
cusolverStatus_t status = CUSOLVER_STATUS_SUCCESS;
hipError_t cudaStat1 = hipSuccess;
hipError_t cudaStat2 = hipSuccess;
hipError_t cudaStat3 = hipSuccess;
hipError_t cudaStat4 = hipSuccess;
hipError_t cudaStat5 = hipSuccess;
float residual = 0;
int executed_sweeps = 0;
const hipsolverEigMode_t jobz = HIPSOLVER_EIG_MODE_VECTOR;
const float tol = 1.e-7;
const int max_sweeps = 15;
const int sort_svd = 1;
using namespace std;
glm::vec3 *dev_pos;
glm::vec3 *dev_correspond;
glm::vec3 *dev_vel1;
glm::vec3 *dev_vel2;
int gridCellCount;
int gridSideCount;
float gridCellWidth;
float gridInverseCellWidth;
glm::vec3 gridMinimum;
void checkCUDAErrorFn(const char *msg, const char *file, int line) {
hipError_t err = hipGetLastError();
if (hipSuccess == err) {
return;
}
fprintf(stderr, "CUDA error");
if (file) {
fprintf(stderr, " (%s:%d)", file, line);
}
fprintf(stderr, ": %s: %s\n", msg, hipGetErrorString(err));
exit(EXIT_FAILURE);
}
namespace SFM {
structure_from_motion::structure_from_motion(){
}
structure_from_motion::structure_from_motion(int num_images, int num_points) {
hipblasCreate(&handle);
status = hipsolverDnCreate(&cusolverH);
hipStreamCreateWithFlags(&stream, hipStreamNonBlocking);
checkCUDAError("Could not create flags");
hipsolverDnSetStream(cusolverH, stream);
checkCUDAError("Could not Set strea,");
hipsolverDnCreateGesvdjInfo(&gesvdj_params);
checkCUDAError("Could not create GesvdjInfo");
hipsolverDnXgesvdjSetTolerance(
gesvdj_params,
tol);
checkCUDAError("Could not SetTolerance");
hipsolverDnXgesvdjSetMaxSweeps(
gesvdj_params,
max_sweeps);
checkCUDAError("Could not SetMaxSweeps");
hipsolverDnXgesvdjSetSortEig(
gesvdj_params,
sort_svd);
checkCUDAError("Could not SetSortEigs");
this->num_points = num_points;
float *normalized_pts1;
float *normalized_pts2;
float *norm1;
float *norm2;
hipMalloc((void **)&d_E, 3 * 3 * sizeof(float));
// Canidate R, T
hipMalloc((void **)&d_P, 4 * 4 * 4 * sizeof(float));
for (int i = 0; i < num_images; i++) {
hipMalloc((void**)&normalized_pts1, 3 * num_points * sizeof(float));
norm_pts1.push_back(normalized_pts1);
hipMalloc((void**)&normalized_pts2, 3 * num_points * sizeof(float));
norm_pts2.push_back(normalized_pts2);
hipMalloc((void**)&norm1, 3 * num_points * sizeof(float));
norms1.push_back(norm1);
hipMalloc((void**)&norm2, 3 * num_points * sizeof(float));
norms2.push_back(norm2);
}
hipMalloc((void **)&d_final_points, 4 * num_points * sizeof(float));
}
__host__ __device__ unsigned int hash(unsigned int a) {
a = (a + 0x7ed55d16) + (a << 12);
a = (a ^ 0xc761c23c) ^ (a >> 19);
a = (a + 0x165667b1) + (a << 5);
a = (a + 0xd3a2646c) ^ (a << 9);
a = (a + 0xfd7046c5) + (a << 3);
a = (a ^ 0xb55a4f09) ^ (a >> 16);
return a;
}
/**
* Copy the boid positions into the VBO so that they can be drawn by OpenGL.
*/
__global__
void kernCopyPositionsToVBO(int N, float *pos, float *vbo, float s_scale) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
float c_scale = s_scale;
if (index < N) {
vbo[access2(index, x_pos, 4)] = pos[access2(x_pos, index, N)] * c_scale;
vbo[access2(index, y_pos, 4)] = pos[access2(y_pos, index, N)] * c_scale;
vbo[access2(index, z_pos, 4)] = pos[access2(z_pos, index, N)] * c_scale;
vbo[access2(index, 3, 4)] = 1.0f;
}
}
__global__
void kernCopyVelocitiesToVBO(int N, float *vbo, float s_scale) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index < N) {
vbo[4 * index + 0] = 1;//vel[index].x + 0.3f;
vbo[4 * index + 1] = 1;//vel[index].y + 0.3f;
vbo[4 * index + 2] = 1;//vel[index].z + 0.3f;
vbo[4 * index + 3] = 1.0f;
}
}
void structure_from_motion::copyBoidsToVBO(float *vbodptr_positions, float *vbodptr_velocities) {
dim3 fullBlocksPerGrid((num_points + blockSize - 1) / blockSize);
checkCUDAErrorWithLine("Not copyBoidsToVBO failed!");
kernCopyPositionsToVBO << <fullBlocksPerGrid, blockSize >> > (num_points, d_final_points, vbodptr_positions, 1);
kernCopyVelocitiesToVBO << <fullBlocksPerGrid, blockSize >> > (num_points, vbodptr_velocities, 1);
checkCUDAErrorWithLine("copyBoidsToVBO failed!");
hipDeviceSynchronize();
}
__global__ void copy_point(SiftPoint* data, int numPoints, float *U1, float *U2) {
const int index_col = blockIdx.x*blockDim.x + threadIdx.x; // col is x to prevent warp divergence as much as possible in this naive implementation
const int index_row = blockIdx.y*blockDim.y + threadIdx.y;
if (index_row >= 3 || index_col >= numPoints)
return;
if (!index_row) {
U1[access2(index_row, index_col, numPoints)] = data[index_col].xpos;
U2[access2(index_row, index_col, numPoints)] = data[index_col].match_xpos;
}
else if (index_row == 1) {
U1[access2(index_row, index_col, numPoints)] = data[index_col].ypos;
U2[access2(index_row, index_col, numPoints)] = data[index_col].match_ypos;
}
else {
U1[access2(index_row, index_col, numPoints)] = 1;
U2[access2(index_row, index_col, numPoints)] = 1;
}
}
__global__ void normalizeE(float *E, int ransac_iterations) {
const int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index >= ransac_iterations)
return;
float u[9], d[9], v[9];
svd(&(E[index * 3 * 3]), u, d, v);
d[2 * 3 + 2] = 0;
d[1 * 3 + 1] = 1;
d[0] = 1;
// E = U * D * V'
float tmp_u[9];
multAB(u, d, tmp_u);
multABt(tmp_u, v, &(E[index * 3 * 3]));
}
__global__ void element_wise_mult(float *A, float *B, int size) {
const int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index >= size)
return;
A[index] *= B[index];
}
__global__ void element_wise_div(float *A, float *B, int size) {
const int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index >= size)
return;
float val = B[index];
if (val == 0)
A[index] = 0;
else
A[index] /= val;
}
__global__ void element_wise_sum(float *A, float *B, int size) {
const int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index >= size)
return;
A[index] += B[index];
}
__global__
void vecnorm(float *A, float *res, int row, int col, float exp, float final_pow) {
int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index >= col)
return;
float tmp_vlaue = 0;
#pragma unroll
for (int i = 0; i < row; i++) {
tmp_vlaue += powf(A[access2(i, index, col)], exp);
}
// Now we can take the sqrt of exp and then rais to the final_pow
if (exp == final_pow) {
res[index] = tmp_vlaue;
return;
}
res[index] = powf(tmp_vlaue, final_pow / exp);
}
__global__
void threshold_count(float *A, int *count_res, int batch_size, int ransac_count, float threshold) {
int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index >= ransac_count)
return;
int count = 0;
#pragma unroll
for (int i = 0; i < batch_size; i++) {
if (A[i + index * batch_size] < threshold)
count++;
}
count_res[index] = count;
}
__global__ void canidate_kernels(float *d_P, const float *u, const float *v) {
int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index >= 4) // only 4 canidate positions exist so fixed value
return;
float W[9] = { 0, -1, 0, 1, 0, 0, 0, 0, 1 }; // rotation about z axis
float Wt[9]; transpose_copy3x3(W, Wt, 3, 3);
float canidate_P[4 * 4];
float tmp_prod[9], tmp_prod2[9], T[9];
// T
canidate_P[access2(x_pos, 3, 4)] = ((!index || index == 2) ? -1 : 1) * u[access2(x_pos, 2, 3)];
canidate_P[access2(y_pos, 3, 4)] = ((!index || index == 2) ? -1 : 1) * u[access2(y_pos, 2, 3)];
canidate_P[access2(z_pos, 3, 4)] = ((!index || index == 2) ? -1 : 1) * u[access2(z_pos, 2, 3)];
// R
if (index < 2)
multABt(W, v, tmp_prod);
else
multABt(Wt, v, tmp_prod);
multAB(u, tmp_prod, tmp_prod2); // 3x3 transpose
transpose_copy3x3(tmp_prod2, canidate_P, 3, 4);
// Now we copy from 2d to 3d into d_P
//d_P[index] = index;
memcpy(&(d_P[access3(0, 0, index, 4, 4)]), canidate_P, 4 * 4 * sizeof(float));
d_P[access3(3, 0, index, 4, 4)] = 0; // Set last row maually
d_P[access3(3, 1, index, 4, 4)] = 0;
d_P[access3(3, 2, index, 4, 4)] = 0;
d_P[access3(3, 3, index, 4, 4)] = 1;
}
__global__ void compute_linear_triangulation_A(float *A, const float *pt1, const float *pt2, const int count, const int num_points, const float *m1, const float *m2, int P_ind, bool canidate_m2) {
// if canidate_m2, we are computing 4 A's for different m2
// Points are 3xN and Projection matrices are 4x4
int index = blockIdx.x*blockDim.x + threadIdx.x;
int row = blockIdx.y*blockDim.y + threadIdx.y; // 2 rows, x, y
if (index >= count || row >= 2)
return;
float tmp_A[2 * 4], valx, valy;
const float *correct_pt, *correct_m;
if (canidate_m2) {
assert(count == 4);
if (!row) { // Slightly help with the warp divergence here
correct_pt = pt1;
correct_m = m1;
}
else {
correct_pt = pt2;
correct_m = &(m2[access3(0, 0, index, 4, 4)]);
}
valx = correct_pt[access2(x_pos, 0, num_points)]; // we only use the first point
valy = correct_pt[access2(y_pos, 0, num_points)];
}
else {
assert(P_ind < 4 && P_ind >= 0);
if (!row) { // Slightly help with the warp divergence here
correct_pt = pt1;
correct_m = m1;
}
else {
correct_pt = pt2;
correct_m = &(m2[access3(0, 0, P_ind, 4, 4)]);;
}
valx = correct_pt[access2(x_pos, index, num_points)];
valy = correct_pt[access2(y_pos, index, num_points)]; // Num points does not need to be the same as count
}
#pragma unroll
for (int i = 0; i < 4; i++) {
tmp_A[access2(x_pos, i, 4)] = valx * correct_m[access2(2, i, 4)] - correct_m[access2(x_pos, i, 4)];
tmp_A[access2(y_pos, i, 4)] = valy * correct_m[access2(2, i, 4)] - correct_m[access2(y_pos, i, 4)];
}
memcpy(&(A[access3(((!row) ? 0 : 2), 0, index, 4, 4)]), tmp_A, 4 * 2 * sizeof(float));
}
__global__
void normalize_pt_kernal(float *v, float *converted_pt, int number_points) { // assumes size of converted_pt is 4xnum_points and v is 4x4xnum_points
int index = blockIdx.x*blockDim.x + threadIdx.x; // one per num_points
if (index >= number_points)
return;
float norm_value = v[access3(3, 3, index, 4, 4)];
if (norm_value == 0 || abs(norm_value) > 10) {
converted_pt[access2(x_pos, index, number_points)] = 1;
converted_pt[access2(y_pos, index, number_points)] = 1;
converted_pt[access2(z_pos, index, number_points)] = 1;
}
else {
converted_pt[access2(x_pos, index, number_points)] = v[access3(3, x_pos, index, 4, 4)] / norm_value;
converted_pt[access2(y_pos, index, number_points)] = v[access3(3, y_pos, index, 4, 4)] / norm_value;
converted_pt[access2(z_pos, index, number_points)] = v[access3(3, z_pos, index, 4, 4)] / norm_value;
}
converted_pt[access2(3, index, number_points)] = 1;
}
template<typename T>
int printMatrix(const T*A, int row, int col, int print_col, const char* name)
{
/// Prints first and last print_col values of A if A is a 2d matrix
T *print_a = new T[col*row];
hipMemcpy(print_a, A, row* col * sizeof(T), hipMemcpyDeviceToHost);
cout << name << endl;
cout << "{" << endl;
for (int i = 0; i < row; i++) {
for (int j = 0; j < col; j++) {
if (j < print_col || j > col - print_col - 1) {
float Areg = print_a[i * col + j];
cout << "\t" << Areg;
}
else if (j == print_col) {
cout << "\t....";
}
}
cout << endl;
}
cout << "}" << endl;
delete[]print_a;
return 0;
}
__global__ void fillData(float *normalized_pts, SiftPoint * sift1, int num_points, int match) {
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
if (row >= 3 || col >= num_points)
return;
if (row == 0)
normalized_pts[row*num_points + col] = match == 0 ? sift1[col].xpos : sift1[col].match_xpos;
else if (row == 1)
normalized_pts[row*num_points + col] = match == 0 ? sift1[col].ypos : sift1[col].match_ypos;
else
normalized_pts[row*num_points + col] = 1;
}
__global__
void kron_kernal(float*d1, float*d2, float *A, int *indices, const int ransac_iterations, int num_points) {
const int index = blockIdx.x*blockDim.x + threadIdx.x;
const int A_row = 8;
const int A_col = 9;
if (index > ransac_iterations)
return;
#pragma unroll
for (int i = 0; i < A_row; i++) {
// begin
A[access3(i, 0, index, A_row, A_col)] = d1[access2(x_pos, indices[index * A_row + i], num_points)] * d2[access2(x_pos, indices[index * A_row + i], num_points)];
A[access3(i, 1, index, A_row, A_col)] = d1[access2(x_pos, indices[index * A_row + i], num_points)] * d2[access2(y_pos, indices[index * A_row + i], num_points)];
A[access3(i, 2, index, A_row, A_col)] = d1[access2(x_pos, indices[index * A_row + i], num_points)] * d2[access2(z_pos, indices[index * A_row + i], num_points)];
// second
A[access3(i, 3, index, A_row, A_col)] = d1[access2(y_pos, indices[index * A_row + i], num_points)] * d2[access2(x_pos, indices[index * A_row + i], num_points)];
A[access3(i, 4, index, A_row, A_col)] = d1[access2(y_pos, indices[index * A_row + i], num_points)] * d2[access2(y_pos, indices[index * A_row + i], num_points)];
A[access3(i, 5, index, A_row, A_col)] = d1[access2(y_pos, indices[index * A_row + i], num_points)] * d2[access2(z_pos, indices[index * A_row + i], num_points)];
//third
A[access3(i, 6, index, A_row, A_col)] = d1[access2(z_pos, indices[index * A_row + i], num_points)] * d2[access2(x_pos, indices[index * A_row + i], num_points)];
A[access3(i, 7, index, A_row, A_col)] = d1[access2(z_pos, indices[index * A_row + i], num_points)] * d2[access2(y_pos, indices[index * A_row + i], num_points)];
A[access3(i, 8, index, A_row, A_col)] = d1[access2(z_pos, indices[index * A_row + i], num_points)] * d2[access2(z_pos, indices[index * A_row + i], num_points)];
}
}
void printCuda(float *a1, int n, std::string name) {
float *print_a = new float[n];
std::cout << name.c_str() << std::endl;
std::cout << "{" << std::endl;
hipMemcpy(print_a, a1, n * sizeof(float), hipMemcpyDeviceToHost);
for (int i = 0; i < n; i++) {
std::cout << "\t" << print_a[i] << std::endl;
}
std::cout << "}" << std::endl;
delete[]print_a;
}
void svd_device(float *src, float *VT, float *S, float *U, int m, int n, const int batchSize, int *d_info) {
const int minmn = (m < n) ? m : n;
const int lda = m;
const int ldu = m;
const int ldv = n;
int lwork = 0; /* size of workspace */
float *d_work = NULL; /* device workspace for gesvdjBatched */
hipDeviceSynchronize();
checkCUDAError("Could not Synchronize");
hipsolverDnSgesvdjBatched_bufferSize(cusolverH, jobz, m, n, src, lda, S, VT, ldu, U, ldv, &lwork, gesvdj_params, batchSize);
checkCUDAError("Could not SgesvdjBatched_bufferSize");
hipMalloc((void**)&d_work, sizeof(float)*lwork);
hipsolverDnSgesvdjBatched(cusolverH, jobz, m, n, src, lda, S, VT, ldu, U, ldv, d_work, lwork, d_info, gesvdj_params, batchSize);
checkCUDAError("Could not SgesvdjBatched");
hipDeviceSynchronize();
}
__global__ void transpose(float *odata, float* idata, int width, int height)
{
unsigned int xIndex = blockDim.x * blockIdx.x + threadIdx.x;
unsigned int yIndex = blockDim.y * blockIdx.y + threadIdx.y;
if (xIndex < width && yIndex < height)
{
unsigned int index_in = xIndex + width * yIndex;
unsigned int index_out = yIndex + height * xIndex;
odata[index_out] = idata[index_in];
}
}
void svd_device_transpose(float *src, float *UT, float *S, float *VT, int m, int n, const int batchSize, int *d_info) {
float *d_A_trans = NULL;
hipMalloc((void **)&d_A_trans, 8 * 9 * batchSize * sizeof(float));
for (int i = 0; i < batchSize; i++) {
dim3 blocks(10, 10);
dim3 fullBlocksPerGrid(1, 1);
transpose << < fullBlocksPerGrid, blocks >> > (d_A_trans + i * 8 * 9, src + i * 8 * 9, 9, 8);
}
const int minmn = (m < n) ? m : n;
const int lda = m;
const int ldu = m;
const int ldv = n;
int lwork = 0; /* size of workspace */
float *d_work = NULL; /* device workspace for gesvdjBatched */
hipDeviceSynchronize();
checkCUDAError("Could not Synchronize");
hipsolverDnSgesvdjBatched_bufferSize(cusolverH, jobz, m, n, d_A_trans, lda, S, UT, ldu, VT, ldv, &lwork, gesvdj_params, batchSize);
checkCUDAError("Could not SgesvdjBatched_bufferSize");
hipMalloc((void**)&d_work, sizeof(float)*lwork);
hipsolverDnSgesvdjBatched(cusolverH, jobz, m, n, d_A_trans, lda, S, UT, ldu, VT, ldv, d_work, lwork, d_info, gesvdj_params, batchSize);
checkCUDAError("Could not SgesvdjBatched");
hipDeviceSynchronize();
}
void invert_device(float *src, float *dst, int n) {
int batchSize = 1;
int *P, *INFO;
hipMalloc<int>(&P, n * batchSize * sizeof(int));
hipMalloc<int>(&INFO, batchSize * sizeof(int));
int lda = n;
float *A[] = {src};
float ** A_d;
hipMalloc<float*>(&A_d, sizeof(A));
hipMemcpy(A_d, A, sizeof(A), hipMemcpyHostToDevice);
hipblasSgetrfBatched(handle, n, A_d, lda, P, INFO, batchSize);
int INFOh = 0;
hipMemcpy(&INFOh, INFO, sizeof(int), hipMemcpyDeviceToHost);
if (INFOh == 17) {
fprintf(stderr, "Factorization Failed: Matrix is singular\n");
hipDeviceReset();
exit(EXIT_FAILURE);
}
float* C[] = { dst };
float** C_d;
hipMalloc<float*>(&C_d, sizeof(C));
hipMemcpy(C_d, C, sizeof(C), hipMemcpyHostToDevice);
hipblasSgetriBatched(handle, n, A_d, lda, P, C_d, n, INFO, batchSize);
hipMemcpy(&INFOh, INFO, sizeof(int), hipMemcpyDeviceToHost);
if (INFOh != 0)
{
fprintf(stderr, "Inversion Failed: Matrix is singular\n");
hipDeviceReset();
exit(EXIT_FAILURE);
}
hipFree(P), hipFree(INFO);
}
void invert(float *s, float *d, int n) {
float *src;
hipMalloc<float>(&src, n * n * sizeof(float));
hipMemcpy(src, s, n * n * sizeof(float), hipMemcpyHostToDevice);
invert_device(src, d, n);
hipFree(src);
}
void mmul(const float* A, const float* B, float* C, const int m, const int k, const int n) {
const float alf = 1;
const float bet = 0;
const float *alpha = &alf;
const float *beta = &bet;
hipblasSgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, n, m, k, alpha, B, n, A, k, beta, C, n);
}
// For A'B
// A is M by N and B is M by N
//lda = num_col_A = num_row_AT = N;
//ldb = num_col_B = num_row_BT = N;
//ldc = num_row_C = N;
//m = num_row_C = num_row_AT = num_col_A = N;
//n = num_col_C = num_row_BT = num_col_B = N;
//k = num_col_AT = num_row_B = M;
void gpu_blas_mmul_batched(const float *A, const float *B, float *C, const int m, const int k, const int n, const int stride_A, const int stride_B, const int stride_C, const int batches,
bool trans_a, bool trans_b) {
assert(stride_A == 0 || stride_A == m * k);
assert(stride_B == 0 || stride_B == n * k);
assert(stride_C == 0 || stride_C == m * n);
hipblasHandle_t handle;
hipblasCreate(&handle);
const float alf = 1; // gpu vs cpu
const float bet = 0;
const float *alpha = &alf;
const float *beta = &bet;
if(trans_a == 0 && trans_b == 0)
hipblasSgemmStridedBatched(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, n, m, k, alpha, B, n, stride_B, A, k, stride_A, beta, C, n, stride_C, batches);
else if(trans_a == 1 && trans_b == 0)
hipblasSgemmStridedBatched(handle, HIPBLAS_OP_N, HIPBLAS_OP_T, n, m, k, alpha, B, n, stride_B, A, m, stride_A, beta, C, n, stride_C, batches);
else if(trans_a == 0 && trans_b == 1)
hipblasSgemmStridedBatched(handle, HIPBLAS_OP_T, HIPBLAS_OP_N, n, m, k, alpha, B, k, stride_B, A, m, stride_A, beta, C, k, stride_C, batches);
hipblasDestroy(handle);
}
template<typename T>
T* cuda_alloc_copy(const T* host, int size) {
T* data;
hipMalloc((void**)&data, size * sizeof(T));
hipMemcpy(data, host, size * sizeof(T), hipMemcpyHostToDevice);
return data;
}
int * structure_from_motion::calculateInliers(float *d_E_canidate, int ransac_iter) {
/// This function calculates n1, d1, n2, d2 and then finds the number of residuals per E canidate in X[0] and X[1]
// Init E1
float E1[9] = { 0, -1, 0, 1, 0, 0, 0, 0, 0 };
float *d_E1;
hipMalloc((void **)&d_E1, 9 * sizeof(float));
hipMemcpy(d_E1, E1, 9 * sizeof(float), hipMemcpyHostToDevice);
// Allocs
float *x1_transformed, *x2_transformed;
hipMalloc((void**)&x1_transformed, 3 * num_points * ransac_iter * sizeof(float));
hipMalloc((void**)&x2_transformed, 3 * num_points * ransac_iter * sizeof(float));
float *d1, *d2;
hipMalloc((void**)&d1, 3 * num_points * ransac_iter * sizeof(float));
hipMalloc((void**)&d2, 3 * num_points * ransac_iter * sizeof(float));
float *n1, *n2;
hipMalloc((void **)&n1, 3 * num_points * ransac_iter * sizeof(float));
hipMalloc((void **)&n2, 3 * num_points * ransac_iter * sizeof(float));
// Calculate x1 (from matlab code) {
int m = 3, k = 3, n = num_points;
gpu_blas_mmul_batched(d_E_canidate, norms1[0], x1_transformed, m, k, n, m * k, 0, m * n, ransac_iter, 0,0);
//Compute n1
m = num_points, k = 3, n = 3; // these probably need to change because we need to transpose X[1]
gpu_blas_mmul_batched(norms2[0], d_E_canidate, n1, m, k, n, 0, 3 * 3, m * n, ransac_iter, 1,0); // transpose X[1]
int blocks = ceil((3 * num_points + blockSize - 1) / blockSize); // BUG!!! we need to make this batched
element_wise_mult << <blocks, blockSize >> > (n1, norms1[0], 3 * num_points);
// Compute d1
// d1 = E1 * x1_transformed
m = 3, k = 3, n = num_points;
gpu_blas_mmul_batched(d_E_canidate, x1_transformed, d1, m, k, n, m*k, 0, m* n, ransac_iter, 0,0);
// }
// Now calculate x2_transformed, n2 and d2 {
m = 3, k = 3, n = num_points;
gpu_blas_mmul_batched(d_E_canidate, norms2[0], x2_transformed, m, k, n, m*k, 0, m* n, ransac_iter, 0,0);
//Compute n2
m = num_points, k = 3, n = 3; // these probably need to change because we need to transpose X[0]
gpu_blas_mmul_batched(norms1[0], d_E_canidate, n2, m, k, n, 0, 3 * 3, m * n, ransac_iter, 1,0); // transpose X[0]
blocks = ceil((3 * num_points + blockSize - 1) / blockSize);
element_wise_mult << <blocks, blockSize >> > (n2, norms2[0], 3 * num_points);
// Compute d2
m = 3, k = 3, n = num_points;
gpu_blas_mmul_batched(d_E_canidate, x2_transformed, d2, m, k, n, m*k, 0, m* n, ransac_iter, 0,0);
// }
// Now calculate the residual per canidate E{
float *norm_n1, *norm_n2, *norm_d1, *norm_d2;
int *inliers;
int size = num_points * ransac_iter;
hipMalloc((void**)&norm_n1, size * sizeof(float));
hipMalloc((void**)&norm_n2, size * sizeof(float));
hipMalloc((void**)&norm_d1, size * sizeof(float));
hipMalloc((void**)&norm_d2, size * sizeof(float));
hipMalloc((void**)&inliers, ransac_iter * sizeof(int));
blocks = ceil((num_points * ransac_iter + blockSize - 1) / blockSize);
vecnorm << <blocks, blockSize >> > (n1, norm_n1, 3, size, 1, 2);
vecnorm << <blocks, blockSize >> > (n2, norm_n2, 3, size, 1, 2);
vecnorm << <blocks, blockSize >> > (d1, norm_d1, 3, size, 2, 2);
vecnorm << <blocks, blockSize >> > (d1, norm_d1, 3, size, 2, 2);
element_wise_div << <blocks, blockSize >> > (norm_n1, norm_d1, size);
element_wise_div << <blocks, blockSize >> > (norm_n2, norm_d2, size);
// have the residuals in norm_n1
element_wise_sum << <blocks, blockSize >> > (norm_n1, norm_n2, size);
// Calculate inliers per cell
blocks = ceil((ransac_iter + blockSize - 1) / blockSize);
threshold_count << <blocks, blockSize >> > (norm_n1, inliers, num_points, ransac_iter, 1e-3); // tested
//}
// Not sure if we should free
hipFree(n1);
hipFree(n2);
hipFree(d1);
hipFree(d2);
hipFree(x1_transformed);
hipFree(x2_transformed);
// Free the norms!!!
hipFree(norm_n1);
hipFree(norm_n2);
hipFree(norm_d1);
hipFree(norm_d2);
// 100% free
hipFree(d_E1);
return inliers;
}
void GPU_fill_rand(float *A, int nr_rows_A, int nr_cols_A) {
// Create a pseudo-random number generator
hiprandGenerator_t prng;
hiprandCreateGenerator(&prng, HIPRAND_RNG_PSEUDO_DEFAULT);
// Set the seed for the random number generator using the system clock
hiprandSetPseudoRandomGeneratorSeed(prng, (unsigned long long) clock());
// Fill the array with random numbers on the device
hiprandGenerateUniform(prng, A, nr_rows_A * nr_cols_A);
}
void structure_from_motion::computePoseCandidates() {
// Tested
float E[9];// = { -0.211 , -0.798 , -0.561, -0.967 , 0.252 , 0.009, 0.046 , 0.047 , 0.039 }; // TODO remove this once testing is done
hipMemcpy(E, d_E, 3 * 3 * sizeof(float), hipMemcpyDeviceToHost);
float u[9], d[9], v[9], tmp[9];
svd(E, u, d, v); // v is not transposed
multABt(u, v, tmp); // u * v'
if (det(tmp) < 0)
neg(v);
float *d_u, *d_v;
d_u = cuda_alloc_copy(u, 3 * 3);
d_v = cuda_alloc_copy(v, 3 * 3);
canidate_kernels << <1, 32 >> > (d_P, d_u, d_v);
hipFree(d_u);
hipFree(d_v);
}
void structure_from_motion::linear_triangulation() {
float P1[16] = { 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1 }; // I(4)
float *d_P1 = cuda_alloc_copy(P1, 16);
float *d_A, *d_u, *d_d, *d_vt;
hipMalloc((void **)&d_A, 4 * 4 * num_points * sizeof(float));
hipMalloc((void **)&d_u, 4 * 4 * num_points * sizeof(float));
hipMalloc((void **)&d_d, 4 * num_points * sizeof(float));
hipMalloc((void **)&d_vt, 4 * 4 * num_points * sizeof(float));
// Create A
dim3 grids(ceil((num_points * 2 + blockSize - 1) / blockSize), 1);
dim3 block_sizes(blockSize / 2, 2);
compute_linear_triangulation_A << <grids, block_sizes >> > (d_A, norms1[0], norms2[0], num_points, num_points, d_P1, d_P, P_ind, false);
checkCUDAError("A computation error");
// Assumes V isnt transposed, we need to take the last column
int *d_info = NULL;
hipMalloc((void**)&d_info, 4 * sizeof(int));
svd_device(d_A, d_vt, d_d, d_u, 4, 4, num_points, d_info);
checkCUDAError("SVD error");
dim3 grids2(ceil((num_points + blockSize - 1) / blockSize), 1);
dim3 block_sizes2(blockSize, 4);
// Normalize by using the last row of v'
normalize_pt_kernal << <grids2, block_sizes2 >> > (d_vt, d_final_points, num_points);
printMatrix(d_final_points, 3, num_points, 5, "Transformed points");
hipFree(d_P1);
hipFree(d_A);
hipFree(d_u);
hipFree(d_d);
hipFree(d_vt);
hipFree(d_info);
}
void structure_from_motion::choosePose() {
float P1[16] = { 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1 }; // I(4)
float *d_P1 = cuda_alloc_copy(P1, 16);
float *d_A, *d_u, *d_d, *d_vt;
hipMalloc((void **)&d_A, 4 * 4 * 4 * sizeof(float));
hipMalloc((void **)&d_u, 4 * 4 * 4 * sizeof(float));
hipMalloc((void **)&d_d, 4 * 4 * sizeof(float));
hipMalloc((void **)&d_vt, 4 * 4 * 4 * sizeof(float));
// Create A
dim3 blocks(1, 1);
dim3 block_sizes(4, 2);
compute_linear_triangulation_A << <blocks, block_sizes >> > (d_A, norms1[0], norms2[0], 4, num_points, d_P1, d_P, -1, true);
// We only care about V
float *d_d1, *d_d2; // 3x4 batched
hipMalloc((void **)&d_d1, 4 * 4 * sizeof(float));
hipMalloc((void **)&d_d2, 4 * 4 * sizeof(float));
// Assumes V isnt transposed, we need to take the last row
// svd(d_A, d_u, d_d, d_v, 4 batches)
checkCUDAErrorWithLine("Before SVD");
int *d_info = NULL;
hipMalloc((void**)&d_info, 4 * sizeof(int));
svd_device(d_A, d_vt, d_d, d_u, 4, 4, 4, d_info);
checkCUDAErrorWithLine("SVD");
normalize_pt_kernal << <1, 4 >> > (d_vt, d_d1, 4);
printMatrix(d_d1, 4, 4, 4, "d1");
float val_d1, val_d2;
P_ind = 0;
for (int i = 0; i < 4; i++) { // batched doesn't work for inverse + it is only 4, 4x4 matrices, should be easy
invert(d_P + i * 4 * 4, d_P + i * 4 * 4, 4);
int m = 4, k = 4, n = 4;
mmul(d_P + i * 4 * 4, d_d1, d_d2, m, k, n);
// Do the final testing on the host
hipMemcpy(&val_d1, &(d_d1[access2(2, i, 4)]), sizeof(float), hipMemcpyDeviceToHost);
hipMemcpy(&val_d2, &(d_d2[access2(2, i, 4)]), sizeof(float), hipMemcpyDeviceToHost);
// Now we do the final check on the cpu as well, because it is the same ease
if (val_d1 > 0 && val_d2 > 0)
P_ind = i;
}
hipFree(d_P1);
hipFree(d_A);
hipFree(d_u);
hipFree(d_d);
hipFree(d_vt);
hipFree(d_d1);
hipFree(d_d2);
hipFree(d_info);
}
void structure_from_motion::struct_from_motion(SiftData siftData1, float *intrinsic, int n, int num_images){
float *invert_intrinsic;
hipMalloc<float>(&invert_intrinsic, n * n * sizeof(float));
invert(intrinsic, invert_intrinsic, n);
// TODO: Improve this
printMatrix(invert_intrinsic, 3, 3, 3, "inver");
SiftPoint *sift1 = siftData1.d_data;
for (int i = 0; i < num_images-1; i++) {
dim3 block(blockSize, 3);
dim3 fullBlocksPerGrid((siftData1.numPts + blockSize - 1) / blockSize, 1);
fillData << <fullBlocksPerGrid, block >> > (norm_pts1[i], sift1, siftData1.numPts, 0);
fillData << <fullBlocksPerGrid, block >> > (norm_pts2[i], sift1, siftData1.numPts, 1);
mmul(invert_intrinsic, norm_pts1[i], norms1[i], 3, 3, siftData1.numPts);
mmul(invert_intrinsic, norm_pts2[i], norms2[i], 3, 3, siftData1.numPts);
}
const int ransac_count = floor(num_points / 8);
// Create random order of points (on cpu using std::shuffle)
int *indices = new int[num_points];
int *d_indices;
hipMalloc((void **)&d_indices, num_points * sizeof(int));
for (int i = 0; i < num_points; indices[i] = i, i++);
// Shufle data
std::random_device rd;
std::mt19937 g(rd());
//shuffle(indices, indices + num_points, g); todo enable this
// Copy data to gpu
hipMemcpy(d_indices, indices, num_points * sizeof(int), hipMemcpyHostToDevice);
// Calculate all kron products correctly
float *d_A;
hipMalloc((void **)&d_A, 8 * 9 * ransac_count * sizeof(float));
checkCUDAErrorWithLine("A malloc failed!");
int grids = ceil((ransac_count + blockSize - 1) / blockSize);
kron_kernal << <grids, blockSize >> > (norms1[0], norms2[0], d_A, d_indices, ransac_count, num_points);
checkCUDAErrorWithLine("Kron failed!");
float *d_E_canidate;
hipMalloc((void **)&d_E_canidate, 3 * 3 * ransac_count * sizeof(float));
// Calculate batch SVD of d_A
float *d_ut, *d_vt, *d_s;
hipMalloc((void **)&d_ut, 8 * 8 * ransac_count * sizeof(float));
hipMalloc((void **)&d_vt, 9 * 9 * ransac_count * sizeof(float));
hipMalloc((void **)&d_s, 8 * ransac_count * sizeof(float));
int *d_info = NULL;
hipMalloc((void**)&d_info, 4 * sizeof(int));
svd_device_transpose(d_A, d_ut, d_s, d_vt, 8, 9, ransac_count, d_info);
// Last column of V becomes E (row of v' in our case)
int blocks = ceil((ransac_count + blockSize - 1) / blockSize);
for (int i = 0; i < ransac_count; i++) {
hipMemcpy(d_E_canidate + 3 * 3 * i, d_vt + 9 * 9 * i + 8 * 9, 9 * sizeof(float), hipMemcpyDeviceToDevice);
} // Calculate target E's
normalizeE << <grids, blockSize >> > (d_E_canidate, ransac_count);
// Calculate number of inliers for each E
int *inliers = calculateInliers(d_E_canidate, ransac_count);
// Pick best E and allocate d_E and E using thrust
thrust::device_ptr<int> dv_in(inliers);
auto iter = thrust::max_element(dv_in, dv_in + ransac_count);
unsigned int best_pos = (iter - dv_in) - 1;
// Assigne d_E
hipMemcpy(d_E, &(d_E_canidate[access3(0, 0, best_pos, 3, 3)]), 3 * 3 * sizeof(float), hipMemcpyDeviceToDevice);
// Free stuff
hipFree(inliers);
hipFree(d_A);
// svd free
hipFree(d_ut);
hipFree(d_s);
hipFree(d_vt);
hipFree(d_info);
hipFree(d_indices);
free(indices);
hipFree(d_E_canidate);
}
structure_from_motion::~structure_from_motion() {
hipblasDestroy(handle);
}
} | 98777dbf81319a72aaac5373f9b20d3327955632.cu | #include <cuda.h>
#include <cuda_runtime.h>
#include <cublas_v2.h>
#include <curand.h>
#include<memory.h>
#include<iostream>
#include <thrust/sort.h>
#include <thrust/execution_policy.h>
#include <thrust/random.h>
#include <thrust/device_vector.h>
#include <cmath>
#include<string>
#include<algorithm>
#include "sfm.h"
#include<cudaSift/cudaSift.h>
#include<cudaSift/cudaImage.h>
#include <cuda_runtime.h>
#include <cusolverDn.h>
#include <thrust/remove.h>
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include<iomanip>
#include <cuda_runtime.h>
#include <random>
#include <thrust/extrema.h>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <glm/glm.hpp>
#define FILENAME (strrchr(__FILE__, '/') ? strrchr(__FILE__, '/') + 1 : __FILE__)
#define checkCUDAError(msg) checkCUDAErrorFn(msg, FILENAME, __LINE__)
#define checkCUDAErrorWithLine(msg) checkCUDAError(msg, _LINE_)
#define scene_scale 100.0f
#define blockSize 128
#define TILE_DIM 32
#define BLOCK_ROWS 8
cublasHandle_t handle;
cusolverDnHandle_t cusolverH = NULL;
cudaStream_t stream = NULL;
gesvdjInfo_t gesvdj_params = NULL;
cusolverStatus_t status = CUSOLVER_STATUS_SUCCESS;
cudaError_t cudaStat1 = cudaSuccess;
cudaError_t cudaStat2 = cudaSuccess;
cudaError_t cudaStat3 = cudaSuccess;
cudaError_t cudaStat4 = cudaSuccess;
cudaError_t cudaStat5 = cudaSuccess;
float residual = 0;
int executed_sweeps = 0;
const cusolverEigMode_t jobz = CUSOLVER_EIG_MODE_VECTOR;
const float tol = 1.e-7;
const int max_sweeps = 15;
const int sort_svd = 1;
using namespace std;
glm::vec3 *dev_pos;
glm::vec3 *dev_correspond;
glm::vec3 *dev_vel1;
glm::vec3 *dev_vel2;
int gridCellCount;
int gridSideCount;
float gridCellWidth;
float gridInverseCellWidth;
glm::vec3 gridMinimum;
void checkCUDAErrorFn(const char *msg, const char *file, int line) {
cudaError_t err = cudaGetLastError();
if (cudaSuccess == err) {
return;
}
fprintf(stderr, "CUDA error");
if (file) {
fprintf(stderr, " (%s:%d)", file, line);
}
fprintf(stderr, ": %s: %s\n", msg, cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
namespace SFM {
structure_from_motion::structure_from_motion(){
}
structure_from_motion::structure_from_motion(int num_images, int num_points) {
cublasCreate_v2(&handle);
status = cusolverDnCreate(&cusolverH);
cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking);
checkCUDAError("Could not create flags");
cusolverDnSetStream(cusolverH, stream);
checkCUDAError("Could not Set strea,");
cusolverDnCreateGesvdjInfo(&gesvdj_params);
checkCUDAError("Could not create GesvdjInfo");
cusolverDnXgesvdjSetTolerance(
gesvdj_params,
tol);
checkCUDAError("Could not SetTolerance");
cusolverDnXgesvdjSetMaxSweeps(
gesvdj_params,
max_sweeps);
checkCUDAError("Could not SetMaxSweeps");
cusolverDnXgesvdjSetSortEig(
gesvdj_params,
sort_svd);
checkCUDAError("Could not SetSortEigs");
this->num_points = num_points;
float *normalized_pts1;
float *normalized_pts2;
float *norm1;
float *norm2;
cudaMalloc((void **)&d_E, 3 * 3 * sizeof(float));
// Canidate R, T
cudaMalloc((void **)&d_P, 4 * 4 * 4 * sizeof(float));
for (int i = 0; i < num_images; i++) {
cudaMalloc((void**)&normalized_pts1, 3 * num_points * sizeof(float));
norm_pts1.push_back(normalized_pts1);
cudaMalloc((void**)&normalized_pts2, 3 * num_points * sizeof(float));
norm_pts2.push_back(normalized_pts2);
cudaMalloc((void**)&norm1, 3 * num_points * sizeof(float));
norms1.push_back(norm1);
cudaMalloc((void**)&norm2, 3 * num_points * sizeof(float));
norms2.push_back(norm2);
}
cudaMalloc((void **)&d_final_points, 4 * num_points * sizeof(float));
}
__host__ __device__ unsigned int hash(unsigned int a) {
a = (a + 0x7ed55d16) + (a << 12);
a = (a ^ 0xc761c23c) ^ (a >> 19);
a = (a + 0x165667b1) + (a << 5);
a = (a + 0xd3a2646c) ^ (a << 9);
a = (a + 0xfd7046c5) + (a << 3);
a = (a ^ 0xb55a4f09) ^ (a >> 16);
return a;
}
/**
* Copy the boid positions into the VBO so that they can be drawn by OpenGL.
*/
__global__
void kernCopyPositionsToVBO(int N, float *pos, float *vbo, float s_scale) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
float c_scale = s_scale;
if (index < N) {
vbo[access2(index, x_pos, 4)] = pos[access2(x_pos, index, N)] * c_scale;
vbo[access2(index, y_pos, 4)] = pos[access2(y_pos, index, N)] * c_scale;
vbo[access2(index, z_pos, 4)] = pos[access2(z_pos, index, N)] * c_scale;
vbo[access2(index, 3, 4)] = 1.0f;
}
}
__global__
void kernCopyVelocitiesToVBO(int N, float *vbo, float s_scale) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index < N) {
vbo[4 * index + 0] = 1;//vel[index].x + 0.3f;
vbo[4 * index + 1] = 1;//vel[index].y + 0.3f;
vbo[4 * index + 2] = 1;//vel[index].z + 0.3f;
vbo[4 * index + 3] = 1.0f;
}
}
void structure_from_motion::copyBoidsToVBO(float *vbodptr_positions, float *vbodptr_velocities) {
dim3 fullBlocksPerGrid((num_points + blockSize - 1) / blockSize);
checkCUDAErrorWithLine("Not copyBoidsToVBO failed!");
kernCopyPositionsToVBO << <fullBlocksPerGrid, blockSize >> > (num_points, d_final_points, vbodptr_positions, 1);
kernCopyVelocitiesToVBO << <fullBlocksPerGrid, blockSize >> > (num_points, vbodptr_velocities, 1);
checkCUDAErrorWithLine("copyBoidsToVBO failed!");
cudaDeviceSynchronize();
}
__global__ void copy_point(SiftPoint* data, int numPoints, float *U1, float *U2) {
const int index_col = blockIdx.x*blockDim.x + threadIdx.x; // col is x to prevent warp divergence as much as possible in this naive implementation
const int index_row = blockIdx.y*blockDim.y + threadIdx.y;
if (index_row >= 3 || index_col >= numPoints)
return;
if (!index_row) {
U1[access2(index_row, index_col, numPoints)] = data[index_col].xpos;
U2[access2(index_row, index_col, numPoints)] = data[index_col].match_xpos;
}
else if (index_row == 1) {
U1[access2(index_row, index_col, numPoints)] = data[index_col].ypos;
U2[access2(index_row, index_col, numPoints)] = data[index_col].match_ypos;
}
else {
U1[access2(index_row, index_col, numPoints)] = 1;
U2[access2(index_row, index_col, numPoints)] = 1;
}
}
__global__ void normalizeE(float *E, int ransac_iterations) {
const int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index >= ransac_iterations)
return;
float u[9], d[9], v[9];
svd(&(E[index * 3 * 3]), u, d, v);
d[2 * 3 + 2] = 0;
d[1 * 3 + 1] = 1;
d[0] = 1;
// E = U * D * V'
float tmp_u[9];
multAB(u, d, tmp_u);
multABt(tmp_u, v, &(E[index * 3 * 3]));
}
__global__ void element_wise_mult(float *A, float *B, int size) {
const int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index >= size)
return;
A[index] *= B[index];
}
__global__ void element_wise_div(float *A, float *B, int size) {
const int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index >= size)
return;
float val = B[index];
if (val == 0)
A[index] = 0;
else
A[index] /= val;
}
__global__ void element_wise_sum(float *A, float *B, int size) {
const int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index >= size)
return;
A[index] += B[index];
}
__global__
void vecnorm(float *A, float *res, int row, int col, float exp, float final_pow) {
int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index >= col)
return;
float tmp_vlaue = 0;
#pragma unroll
for (int i = 0; i < row; i++) {
tmp_vlaue += powf(A[access2(i, index, col)], exp);
}
// Now we can take the sqrt of exp and then rais to the final_pow
if (exp == final_pow) {
res[index] = tmp_vlaue;
return;
}
res[index] = powf(tmp_vlaue, final_pow / exp);
}
__global__
void threshold_count(float *A, int *count_res, int batch_size, int ransac_count, float threshold) {
int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index >= ransac_count)
return;
int count = 0;
#pragma unroll
for (int i = 0; i < batch_size; i++) {
if (A[i + index * batch_size] < threshold)
count++;
}
count_res[index] = count;
}
__global__ void canidate_kernels(float *d_P, const float *u, const float *v) {
int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index >= 4) // only 4 canidate positions exist so fixed value
return;
float W[9] = { 0, -1, 0, 1, 0, 0, 0, 0, 1 }; // rotation about z axis
float Wt[9]; transpose_copy3x3(W, Wt, 3, 3);
float canidate_P[4 * 4];
float tmp_prod[9], tmp_prod2[9], T[9];
// T
canidate_P[access2(x_pos, 3, 4)] = ((!index || index == 2) ? -1 : 1) * u[access2(x_pos, 2, 3)];
canidate_P[access2(y_pos, 3, 4)] = ((!index || index == 2) ? -1 : 1) * u[access2(y_pos, 2, 3)];
canidate_P[access2(z_pos, 3, 4)] = ((!index || index == 2) ? -1 : 1) * u[access2(z_pos, 2, 3)];
// R
if (index < 2)
multABt(W, v, tmp_prod);
else
multABt(Wt, v, tmp_prod);
multAB(u, tmp_prod, tmp_prod2); // 3x3 transpose
transpose_copy3x3(tmp_prod2, canidate_P, 3, 4);
// Now we copy from 2d to 3d into d_P
//d_P[index] = index;
memcpy(&(d_P[access3(0, 0, index, 4, 4)]), canidate_P, 4 * 4 * sizeof(float));
d_P[access3(3, 0, index, 4, 4)] = 0; // Set last row maually
d_P[access3(3, 1, index, 4, 4)] = 0;
d_P[access3(3, 2, index, 4, 4)] = 0;
d_P[access3(3, 3, index, 4, 4)] = 1;
}
__global__ void compute_linear_triangulation_A(float *A, const float *pt1, const float *pt2, const int count, const int num_points, const float *m1, const float *m2, int P_ind, bool canidate_m2) {
// if canidate_m2, we are computing 4 A's for different m2
// Points are 3xN and Projection matrices are 4x4
int index = blockIdx.x*blockDim.x + threadIdx.x;
int row = blockIdx.y*blockDim.y + threadIdx.y; // 2 rows, x, y
if (index >= count || row >= 2)
return;
float tmp_A[2 * 4], valx, valy;
const float *correct_pt, *correct_m;
if (canidate_m2) {
assert(count == 4);
if (!row) { // Slightly help with the warp divergence here
correct_pt = pt1;
correct_m = m1;
}
else {
correct_pt = pt2;
correct_m = &(m2[access3(0, 0, index, 4, 4)]);
}
valx = correct_pt[access2(x_pos, 0, num_points)]; // we only use the first point
valy = correct_pt[access2(y_pos, 0, num_points)];
}
else {
assert(P_ind < 4 && P_ind >= 0);
if (!row) { // Slightly help with the warp divergence here
correct_pt = pt1;
correct_m = m1;
}
else {
correct_pt = pt2;
correct_m = &(m2[access3(0, 0, P_ind, 4, 4)]);;
}
valx = correct_pt[access2(x_pos, index, num_points)];
valy = correct_pt[access2(y_pos, index, num_points)]; // Num points does not need to be the same as count
}
#pragma unroll
for (int i = 0; i < 4; i++) {
tmp_A[access2(x_pos, i, 4)] = valx * correct_m[access2(2, i, 4)] - correct_m[access2(x_pos, i, 4)];
tmp_A[access2(y_pos, i, 4)] = valy * correct_m[access2(2, i, 4)] - correct_m[access2(y_pos, i, 4)];
}
memcpy(&(A[access3(((!row) ? 0 : 2), 0, index, 4, 4)]), tmp_A, 4 * 2 * sizeof(float));
}
__global__
void normalize_pt_kernal(float *v, float *converted_pt, int number_points) { // assumes size of converted_pt is 4xnum_points and v is 4x4xnum_points
int index = blockIdx.x*blockDim.x + threadIdx.x; // one per num_points
if (index >= number_points)
return;
float norm_value = v[access3(3, 3, index, 4, 4)];
if (norm_value == 0 || abs(norm_value) > 10) {
converted_pt[access2(x_pos, index, number_points)] = 1;
converted_pt[access2(y_pos, index, number_points)] = 1;
converted_pt[access2(z_pos, index, number_points)] = 1;
}
else {
converted_pt[access2(x_pos, index, number_points)] = v[access3(3, x_pos, index, 4, 4)] / norm_value;
converted_pt[access2(y_pos, index, number_points)] = v[access3(3, y_pos, index, 4, 4)] / norm_value;
converted_pt[access2(z_pos, index, number_points)] = v[access3(3, z_pos, index, 4, 4)] / norm_value;
}
converted_pt[access2(3, index, number_points)] = 1;
}
template<typename T>
int printMatrix(const T*A, int row, int col, int print_col, const char* name)
{
/// Prints first and last print_col values of A if A is a 2d matrix
T *print_a = new T[col*row];
cudaMemcpy(print_a, A, row* col * sizeof(T), cudaMemcpyDeviceToHost);
cout << name << endl;
cout << "{" << endl;
for (int i = 0; i < row; i++) {
for (int j = 0; j < col; j++) {
if (j < print_col || j > col - print_col - 1) {
float Areg = print_a[i * col + j];
cout << "\t" << Areg;
}
else if (j == print_col) {
cout << "\t....";
}
}
cout << endl;
}
cout << "}" << endl;
delete[]print_a;
return 0;
}
__global__ void fillData(float *normalized_pts, SiftPoint * sift1, int num_points, int match) {
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
if (row >= 3 || col >= num_points)
return;
if (row == 0)
normalized_pts[row*num_points + col] = match == 0 ? sift1[col].xpos : sift1[col].match_xpos;
else if (row == 1)
normalized_pts[row*num_points + col] = match == 0 ? sift1[col].ypos : sift1[col].match_ypos;
else
normalized_pts[row*num_points + col] = 1;
}
__global__
void kron_kernal(float*d1, float*d2, float *A, int *indices, const int ransac_iterations, int num_points) {
const int index = blockIdx.x*blockDim.x + threadIdx.x;
const int A_row = 8;
const int A_col = 9;
if (index > ransac_iterations)
return;
#pragma unroll
for (int i = 0; i < A_row; i++) {
// begin
A[access3(i, 0, index, A_row, A_col)] = d1[access2(x_pos, indices[index * A_row + i], num_points)] * d2[access2(x_pos, indices[index * A_row + i], num_points)];
A[access3(i, 1, index, A_row, A_col)] = d1[access2(x_pos, indices[index * A_row + i], num_points)] * d2[access2(y_pos, indices[index * A_row + i], num_points)];
A[access3(i, 2, index, A_row, A_col)] = d1[access2(x_pos, indices[index * A_row + i], num_points)] * d2[access2(z_pos, indices[index * A_row + i], num_points)];
// second
A[access3(i, 3, index, A_row, A_col)] = d1[access2(y_pos, indices[index * A_row + i], num_points)] * d2[access2(x_pos, indices[index * A_row + i], num_points)];
A[access3(i, 4, index, A_row, A_col)] = d1[access2(y_pos, indices[index * A_row + i], num_points)] * d2[access2(y_pos, indices[index * A_row + i], num_points)];
A[access3(i, 5, index, A_row, A_col)] = d1[access2(y_pos, indices[index * A_row + i], num_points)] * d2[access2(z_pos, indices[index * A_row + i], num_points)];
//third
A[access3(i, 6, index, A_row, A_col)] = d1[access2(z_pos, indices[index * A_row + i], num_points)] * d2[access2(x_pos, indices[index * A_row + i], num_points)];
A[access3(i, 7, index, A_row, A_col)] = d1[access2(z_pos, indices[index * A_row + i], num_points)] * d2[access2(y_pos, indices[index * A_row + i], num_points)];
A[access3(i, 8, index, A_row, A_col)] = d1[access2(z_pos, indices[index * A_row + i], num_points)] * d2[access2(z_pos, indices[index * A_row + i], num_points)];
}
}
void printCuda(float *a1, int n, std::string name) {
float *print_a = new float[n];
std::cout << name.c_str() << std::endl;
std::cout << "{" << std::endl;
cudaMemcpy(print_a, a1, n * sizeof(float), cudaMemcpyDeviceToHost);
for (int i = 0; i < n; i++) {
std::cout << "\t" << print_a[i] << std::endl;
}
std::cout << "}" << std::endl;
delete[]print_a;
}
void svd_device(float *src, float *VT, float *S, float *U, int m, int n, const int batchSize, int *d_info) {
const int minmn = (m < n) ? m : n;
const int lda = m;
const int ldu = m;
const int ldv = n;
int lwork = 0; /* size of workspace */
float *d_work = NULL; /* device workspace for gesvdjBatched */
cudaDeviceSynchronize();
checkCUDAError("Could not Synchronize");
cusolverDnSgesvdjBatched_bufferSize(cusolverH, jobz, m, n, src, lda, S, VT, ldu, U, ldv, &lwork, gesvdj_params, batchSize);
checkCUDAError("Could not SgesvdjBatched_bufferSize");
cudaMalloc((void**)&d_work, sizeof(float)*lwork);
cusolverDnSgesvdjBatched(cusolverH, jobz, m, n, src, lda, S, VT, ldu, U, ldv, d_work, lwork, d_info, gesvdj_params, batchSize);
checkCUDAError("Could not SgesvdjBatched");
cudaDeviceSynchronize();
}
__global__ void transpose(float *odata, float* idata, int width, int height)
{
unsigned int xIndex = blockDim.x * blockIdx.x + threadIdx.x;
unsigned int yIndex = blockDim.y * blockIdx.y + threadIdx.y;
if (xIndex < width && yIndex < height)
{
unsigned int index_in = xIndex + width * yIndex;
unsigned int index_out = yIndex + height * xIndex;
odata[index_out] = idata[index_in];
}
}
void svd_device_transpose(float *src, float *UT, float *S, float *VT, int m, int n, const int batchSize, int *d_info) {
float *d_A_trans = NULL;
cudaMalloc((void **)&d_A_trans, 8 * 9 * batchSize * sizeof(float));
for (int i = 0; i < batchSize; i++) {
dim3 blocks(10, 10);
dim3 fullBlocksPerGrid(1, 1);
transpose << < fullBlocksPerGrid, blocks >> > (d_A_trans + i * 8 * 9, src + i * 8 * 9, 9, 8);
}
const int minmn = (m < n) ? m : n;
const int lda = m;
const int ldu = m;
const int ldv = n;
int lwork = 0; /* size of workspace */
float *d_work = NULL; /* device workspace for gesvdjBatched */
cudaDeviceSynchronize();
checkCUDAError("Could not Synchronize");
cusolverDnSgesvdjBatched_bufferSize(cusolverH, jobz, m, n, d_A_trans, lda, S, UT, ldu, VT, ldv, &lwork, gesvdj_params, batchSize);
checkCUDAError("Could not SgesvdjBatched_bufferSize");
cudaMalloc((void**)&d_work, sizeof(float)*lwork);
cusolverDnSgesvdjBatched(cusolverH, jobz, m, n, d_A_trans, lda, S, UT, ldu, VT, ldv, d_work, lwork, d_info, gesvdj_params, batchSize);
checkCUDAError("Could not SgesvdjBatched");
cudaDeviceSynchronize();
}
void invert_device(float *src, float *dst, int n) {
int batchSize = 1;
int *P, *INFO;
cudaMalloc<int>(&P, n * batchSize * sizeof(int));
cudaMalloc<int>(&INFO, batchSize * sizeof(int));
int lda = n;
float *A[] = {src};
float ** A_d;
cudaMalloc<float*>(&A_d, sizeof(A));
cudaMemcpy(A_d, A, sizeof(A), cudaMemcpyHostToDevice);
cublasSgetrfBatched(handle, n, A_d, lda, P, INFO, batchSize);
int INFOh = 0;
cudaMemcpy(&INFOh, INFO, sizeof(int), cudaMemcpyDeviceToHost);
if (INFOh == 17) {
fprintf(stderr, "Factorization Failed: Matrix is singular\n");
cudaDeviceReset();
exit(EXIT_FAILURE);
}
float* C[] = { dst };
float** C_d;
cudaMalloc<float*>(&C_d, sizeof(C));
cudaMemcpy(C_d, C, sizeof(C), cudaMemcpyHostToDevice);
cublasSgetriBatched(handle, n, A_d, lda, P, C_d, n, INFO, batchSize);
cudaMemcpy(&INFOh, INFO, sizeof(int), cudaMemcpyDeviceToHost);
if (INFOh != 0)
{
fprintf(stderr, "Inversion Failed: Matrix is singular\n");
cudaDeviceReset();
exit(EXIT_FAILURE);
}
cudaFree(P), cudaFree(INFO);
}
void invert(float *s, float *d, int n) {
float *src;
cudaMalloc<float>(&src, n * n * sizeof(float));
cudaMemcpy(src, s, n * n * sizeof(float), cudaMemcpyHostToDevice);
invert_device(src, d, n);
cudaFree(src);
}
void mmul(const float* A, const float* B, float* C, const int m, const int k, const int n) {
const float alf = 1;
const float bet = 0;
const float *alpha = &alf;
const float *beta = &bet;
cublasSgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, n, m, k, alpha, B, n, A, k, beta, C, n);
}
// For A'B
// A is M by N and B is M by N
//lda = num_col_A = num_row_AT = N;
//ldb = num_col_B = num_row_BT = N;
//ldc = num_row_C = N;
//m = num_row_C = num_row_AT = num_col_A = N;
//n = num_col_C = num_row_BT = num_col_B = N;
//k = num_col_AT = num_row_B = M;
void gpu_blas_mmul_batched(const float *A, const float *B, float *C, const int m, const int k, const int n, const int stride_A, const int stride_B, const int stride_C, const int batches,
bool trans_a, bool trans_b) {
assert(stride_A == 0 || stride_A == m * k);
assert(stride_B == 0 || stride_B == n * k);
assert(stride_C == 0 || stride_C == m * n);
cublasHandle_t handle;
cublasCreate(&handle);
const float alf = 1; // gpu vs cpu
const float bet = 0;
const float *alpha = &alf;
const float *beta = &bet;
if(trans_a == 0 && trans_b == 0)
cublasSgemmStridedBatched(handle, CUBLAS_OP_N, CUBLAS_OP_N, n, m, k, alpha, B, n, stride_B, A, k, stride_A, beta, C, n, stride_C, batches);
else if(trans_a == 1 && trans_b == 0)
cublasSgemmStridedBatched(handle, CUBLAS_OP_N, CUBLAS_OP_T, n, m, k, alpha, B, n, stride_B, A, m, stride_A, beta, C, n, stride_C, batches);
else if(trans_a == 0 && trans_b == 1)
cublasSgemmStridedBatched(handle, CUBLAS_OP_T, CUBLAS_OP_N, n, m, k, alpha, B, k, stride_B, A, m, stride_A, beta, C, k, stride_C, batches);
cublasDestroy(handle);
}
template<typename T>
T* cuda_alloc_copy(const T* host, int size) {
T* data;
cudaMalloc((void**)&data, size * sizeof(T));
cudaMemcpy(data, host, size * sizeof(T), cudaMemcpyHostToDevice);
return data;
}
int * structure_from_motion::calculateInliers(float *d_E_canidate, int ransac_iter) {
/// This function calculates n1, d1, n2, d2 and then finds the number of residuals per E canidate in X[0] and X[1]
// Init E1
float E1[9] = { 0, -1, 0, 1, 0, 0, 0, 0, 0 };
float *d_E1;
cudaMalloc((void **)&d_E1, 9 * sizeof(float));
cudaMemcpy(d_E1, E1, 9 * sizeof(float), cudaMemcpyHostToDevice);
// Allocs
float *x1_transformed, *x2_transformed;
cudaMalloc((void**)&x1_transformed, 3 * num_points * ransac_iter * sizeof(float));
cudaMalloc((void**)&x2_transformed, 3 * num_points * ransac_iter * sizeof(float));
float *d1, *d2;
cudaMalloc((void**)&d1, 3 * num_points * ransac_iter * sizeof(float));
cudaMalloc((void**)&d2, 3 * num_points * ransac_iter * sizeof(float));
float *n1, *n2;
cudaMalloc((void **)&n1, 3 * num_points * ransac_iter * sizeof(float));
cudaMalloc((void **)&n2, 3 * num_points * ransac_iter * sizeof(float));
// Calculate x1 (from matlab code) {
int m = 3, k = 3, n = num_points;
gpu_blas_mmul_batched(d_E_canidate, norms1[0], x1_transformed, m, k, n, m * k, 0, m * n, ransac_iter, 0,0);
//Compute n1
m = num_points, k = 3, n = 3; // these probably need to change because we need to transpose X[1]
gpu_blas_mmul_batched(norms2[0], d_E_canidate, n1, m, k, n, 0, 3 * 3, m * n, ransac_iter, 1,0); // transpose X[1]
int blocks = ceil((3 * num_points + blockSize - 1) / blockSize); // BUG!!! we need to make this batched
element_wise_mult << <blocks, blockSize >> > (n1, norms1[0], 3 * num_points);
// Compute d1
// d1 = E1 * x1_transformed
m = 3, k = 3, n = num_points;
gpu_blas_mmul_batched(d_E_canidate, x1_transformed, d1, m, k, n, m*k, 0, m* n, ransac_iter, 0,0);
// }
// Now calculate x2_transformed, n2 and d2 {
m = 3, k = 3, n = num_points;
gpu_blas_mmul_batched(d_E_canidate, norms2[0], x2_transformed, m, k, n, m*k, 0, m* n, ransac_iter, 0,0);
//Compute n2
m = num_points, k = 3, n = 3; // these probably need to change because we need to transpose X[0]
gpu_blas_mmul_batched(norms1[0], d_E_canidate, n2, m, k, n, 0, 3 * 3, m * n, ransac_iter, 1,0); // transpose X[0]
blocks = ceil((3 * num_points + blockSize - 1) / blockSize);
element_wise_mult << <blocks, blockSize >> > (n2, norms2[0], 3 * num_points);
// Compute d2
m = 3, k = 3, n = num_points;
gpu_blas_mmul_batched(d_E_canidate, x2_transformed, d2, m, k, n, m*k, 0, m* n, ransac_iter, 0,0);
// }
// Now calculate the residual per canidate E{
float *norm_n1, *norm_n2, *norm_d1, *norm_d2;
int *inliers;
int size = num_points * ransac_iter;
cudaMalloc((void**)&norm_n1, size * sizeof(float));
cudaMalloc((void**)&norm_n2, size * sizeof(float));
cudaMalloc((void**)&norm_d1, size * sizeof(float));
cudaMalloc((void**)&norm_d2, size * sizeof(float));
cudaMalloc((void**)&inliers, ransac_iter * sizeof(int));
blocks = ceil((num_points * ransac_iter + blockSize - 1) / blockSize);
vecnorm << <blocks, blockSize >> > (n1, norm_n1, 3, size, 1, 2);
vecnorm << <blocks, blockSize >> > (n2, norm_n2, 3, size, 1, 2);
vecnorm << <blocks, blockSize >> > (d1, norm_d1, 3, size, 2, 2);
vecnorm << <blocks, blockSize >> > (d1, norm_d1, 3, size, 2, 2);
element_wise_div << <blocks, blockSize >> > (norm_n1, norm_d1, size);
element_wise_div << <blocks, blockSize >> > (norm_n2, norm_d2, size);
// have the residuals in norm_n1
element_wise_sum << <blocks, blockSize >> > (norm_n1, norm_n2, size);
// Calculate inliers per cell
blocks = ceil((ransac_iter + blockSize - 1) / blockSize);
threshold_count << <blocks, blockSize >> > (norm_n1, inliers, num_points, ransac_iter, 1e-3); // tested
//}
// Not sure if we should free
cudaFree(n1);
cudaFree(n2);
cudaFree(d1);
cudaFree(d2);
cudaFree(x1_transformed);
cudaFree(x2_transformed);
// Free the norms!!!
cudaFree(norm_n1);
cudaFree(norm_n2);
cudaFree(norm_d1);
cudaFree(norm_d2);
// 100% free
cudaFree(d_E1);
return inliers;
}
void GPU_fill_rand(float *A, int nr_rows_A, int nr_cols_A) {
// Create a pseudo-random number generator
curandGenerator_t prng;
curandCreateGenerator(&prng, CURAND_RNG_PSEUDO_DEFAULT);
// Set the seed for the random number generator using the system clock
curandSetPseudoRandomGeneratorSeed(prng, (unsigned long long) clock());
// Fill the array with random numbers on the device
curandGenerateUniform(prng, A, nr_rows_A * nr_cols_A);
}
void structure_from_motion::computePoseCandidates() {
// Tested
float E[9];// = { -0.211 , -0.798 , -0.561, -0.967 , 0.252 , 0.009, 0.046 , 0.047 , 0.039 }; // TODO remove this once testing is done
cudaMemcpy(E, d_E, 3 * 3 * sizeof(float), cudaMemcpyDeviceToHost);
float u[9], d[9], v[9], tmp[9];
svd(E, u, d, v); // v is not transposed
multABt(u, v, tmp); // u * v'
if (det(tmp) < 0)
neg(v);
float *d_u, *d_v;
d_u = cuda_alloc_copy(u, 3 * 3);
d_v = cuda_alloc_copy(v, 3 * 3);
canidate_kernels << <1, 32 >> > (d_P, d_u, d_v);
cudaFree(d_u);
cudaFree(d_v);
}
void structure_from_motion::linear_triangulation() {
float P1[16] = { 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1 }; // I(4)
float *d_P1 = cuda_alloc_copy(P1, 16);
float *d_A, *d_u, *d_d, *d_vt;
cudaMalloc((void **)&d_A, 4 * 4 * num_points * sizeof(float));
cudaMalloc((void **)&d_u, 4 * 4 * num_points * sizeof(float));
cudaMalloc((void **)&d_d, 4 * num_points * sizeof(float));
cudaMalloc((void **)&d_vt, 4 * 4 * num_points * sizeof(float));
// Create A
dim3 grids(ceil((num_points * 2 + blockSize - 1) / blockSize), 1);
dim3 block_sizes(blockSize / 2, 2);
compute_linear_triangulation_A << <grids, block_sizes >> > (d_A, norms1[0], norms2[0], num_points, num_points, d_P1, d_P, P_ind, false);
checkCUDAError("A computation error");
// Assumes V isnt transposed, we need to take the last column
int *d_info = NULL;
cudaMalloc((void**)&d_info, 4 * sizeof(int));
svd_device(d_A, d_vt, d_d, d_u, 4, 4, num_points, d_info);
checkCUDAError("SVD error");
dim3 grids2(ceil((num_points + blockSize - 1) / blockSize), 1);
dim3 block_sizes2(blockSize, 4);
// Normalize by using the last row of v'
normalize_pt_kernal << <grids2, block_sizes2 >> > (d_vt, d_final_points, num_points);
printMatrix(d_final_points, 3, num_points, 5, "Transformed points");
cudaFree(d_P1);
cudaFree(d_A);
cudaFree(d_u);
cudaFree(d_d);
cudaFree(d_vt);
cudaFree(d_info);
}
void structure_from_motion::choosePose() {
float P1[16] = { 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1 }; // I(4)
float *d_P1 = cuda_alloc_copy(P1, 16);
float *d_A, *d_u, *d_d, *d_vt;
cudaMalloc((void **)&d_A, 4 * 4 * 4 * sizeof(float));
cudaMalloc((void **)&d_u, 4 * 4 * 4 * sizeof(float));
cudaMalloc((void **)&d_d, 4 * 4 * sizeof(float));
cudaMalloc((void **)&d_vt, 4 * 4 * 4 * sizeof(float));
// Create A
dim3 blocks(1, 1);
dim3 block_sizes(4, 2);
compute_linear_triangulation_A << <blocks, block_sizes >> > (d_A, norms1[0], norms2[0], 4, num_points, d_P1, d_P, -1, true);
// We only care about V
float *d_d1, *d_d2; // 3x4 batched
cudaMalloc((void **)&d_d1, 4 * 4 * sizeof(float));
cudaMalloc((void **)&d_d2, 4 * 4 * sizeof(float));
// Assumes V isnt transposed, we need to take the last row
// svd(d_A, d_u, d_d, d_v, 4 batches)
checkCUDAErrorWithLine("Before SVD");
int *d_info = NULL;
cudaMalloc((void**)&d_info, 4 * sizeof(int));
svd_device(d_A, d_vt, d_d, d_u, 4, 4, 4, d_info);
checkCUDAErrorWithLine("SVD");
normalize_pt_kernal << <1, 4 >> > (d_vt, d_d1, 4);
printMatrix(d_d1, 4, 4, 4, "d1");
float val_d1, val_d2;
P_ind = 0;
for (int i = 0; i < 4; i++) { // batched doesn't work for inverse + it is only 4, 4x4 matrices, should be easy
invert(d_P + i * 4 * 4, d_P + i * 4 * 4, 4);
int m = 4, k = 4, n = 4;
mmul(d_P + i * 4 * 4, d_d1, d_d2, m, k, n);
// Do the final testing on the host
cudaMemcpy(&val_d1, &(d_d1[access2(2, i, 4)]), sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(&val_d2, &(d_d2[access2(2, i, 4)]), sizeof(float), cudaMemcpyDeviceToHost);
// Now we do the final check on the cpu as well, because it is the same ease
if (val_d1 > 0 && val_d2 > 0)
P_ind = i;
}
cudaFree(d_P1);
cudaFree(d_A);
cudaFree(d_u);
cudaFree(d_d);
cudaFree(d_vt);
cudaFree(d_d1);
cudaFree(d_d2);
cudaFree(d_info);
}
void structure_from_motion::struct_from_motion(SiftData siftData1, float *intrinsic, int n, int num_images){
float *invert_intrinsic;
cudaMalloc<float>(&invert_intrinsic, n * n * sizeof(float));
invert(intrinsic, invert_intrinsic, n);
// TODO: Improve this
printMatrix(invert_intrinsic, 3, 3, 3, "inver");
SiftPoint *sift1 = siftData1.d_data;
for (int i = 0; i < num_images-1; i++) {
dim3 block(blockSize, 3);
dim3 fullBlocksPerGrid((siftData1.numPts + blockSize - 1) / blockSize, 1);
fillData << <fullBlocksPerGrid, block >> > (norm_pts1[i], sift1, siftData1.numPts, 0);
fillData << <fullBlocksPerGrid, block >> > (norm_pts2[i], sift1, siftData1.numPts, 1);
mmul(invert_intrinsic, norm_pts1[i], norms1[i], 3, 3, siftData1.numPts);
mmul(invert_intrinsic, norm_pts2[i], norms2[i], 3, 3, siftData1.numPts);
}
const int ransac_count = floor(num_points / 8);
// Create random order of points (on cpu using std::shuffle)
int *indices = new int[num_points];
int *d_indices;
cudaMalloc((void **)&d_indices, num_points * sizeof(int));
for (int i = 0; i < num_points; indices[i] = i, i++);
// Shufle data
std::random_device rd;
std::mt19937 g(rd());
//shuffle(indices, indices + num_points, g); todo enable this
// Copy data to gpu
cudaMemcpy(d_indices, indices, num_points * sizeof(int), cudaMemcpyHostToDevice);
// Calculate all kron products correctly
float *d_A;
cudaMalloc((void **)&d_A, 8 * 9 * ransac_count * sizeof(float));
checkCUDAErrorWithLine("A malloc failed!");
int grids = ceil((ransac_count + blockSize - 1) / blockSize);
kron_kernal << <grids, blockSize >> > (norms1[0], norms2[0], d_A, d_indices, ransac_count, num_points);
checkCUDAErrorWithLine("Kron failed!");
float *d_E_canidate;
cudaMalloc((void **)&d_E_canidate, 3 * 3 * ransac_count * sizeof(float));
// Calculate batch SVD of d_A
float *d_ut, *d_vt, *d_s;
cudaMalloc((void **)&d_ut, 8 * 8 * ransac_count * sizeof(float));
cudaMalloc((void **)&d_vt, 9 * 9 * ransac_count * sizeof(float));
cudaMalloc((void **)&d_s, 8 * ransac_count * sizeof(float));
int *d_info = NULL;
cudaMalloc((void**)&d_info, 4 * sizeof(int));
svd_device_transpose(d_A, d_ut, d_s, d_vt, 8, 9, ransac_count, d_info);
// Last column of V becomes E (row of v' in our case)
int blocks = ceil((ransac_count + blockSize - 1) / blockSize);
for (int i = 0; i < ransac_count; i++) {
cudaMemcpy(d_E_canidate + 3 * 3 * i, d_vt + 9 * 9 * i + 8 * 9, 9 * sizeof(float), cudaMemcpyDeviceToDevice);
} // Calculate target E's
normalizeE << <grids, blockSize >> > (d_E_canidate, ransac_count);
// Calculate number of inliers for each E
int *inliers = calculateInliers(d_E_canidate, ransac_count);
// Pick best E and allocate d_E and E using thrust
thrust::device_ptr<int> dv_in(inliers);
auto iter = thrust::max_element(dv_in, dv_in + ransac_count);
unsigned int best_pos = (iter - dv_in) - 1;
// Assigne d_E
cudaMemcpy(d_E, &(d_E_canidate[access3(0, 0, best_pos, 3, 3)]), 3 * 3 * sizeof(float), cudaMemcpyDeviceToDevice);
// Free stuff
cudaFree(inliers);
cudaFree(d_A);
// svd free
cudaFree(d_ut);
cudaFree(d_s);
cudaFree(d_vt);
cudaFree(d_info);
cudaFree(d_indices);
free(indices);
cudaFree(d_E_canidate);
}
structure_from_motion::~structure_from_motion() {
cublasDestroy(handle);
}
} |
31e4c426802a98c82729928fba905704537c1977.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.6.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date November 2014
@generated from zgeelltmv.cu normal z -> c, Sat Nov 15 19:54:21 2014
*/
#include "common_magma.h"
#if (GPUSHMEM < 200)
#define BLOCK_SIZE 128
#else
#define BLOCK_SIZE 512
#endif
// ELL SpMV kernel
//Michael Garland
__global__ void
cgeelltmv_kernel(
int num_rows,
int num_cols,
int num_cols_per_row,
magmaFloatComplex alpha,
magmaFloatComplex_ptr dval,
magmaIndex_ptr dcolind,
magmaFloatComplex_ptr dx,
magmaFloatComplex beta,
magmaFloatComplex_ptr dy)
{
int row = blockDim.x * blockIdx.x + threadIdx.x ;
if(row < num_rows ){
magmaFloatComplex dot = MAGMA_C_MAKE(0.0, 0.0);
for ( int n = 0; n < num_cols_per_row ; n ++){
int col = dcolind [ num_rows * n + row ];
magmaFloatComplex val = dval [ num_rows * n + row ];
if( val != 0)
dot += val * dx[col ];
}
dy[ row ] = dot * alpha + beta * dy [ row ];
}
}
// shifted ELL SpMV kernel
//Michael Garland
__global__ void
cgeelltmv_kernel_shift(
int num_rows,
int num_cols,
int num_cols_per_row,
magmaFloatComplex alpha,
magmaFloatComplex lambda,
magmaFloatComplex_ptr dval,
magmaIndex_ptr dcolind,
magmaFloatComplex_ptr dx,
magmaFloatComplex beta,
int offset,
int blocksize,
magmaIndex_ptr addrows,
magmaFloatComplex_ptr dy)
{
int row = blockDim.x * blockIdx.x + threadIdx.x ;
if(row < num_rows ){
magmaFloatComplex dot = MAGMA_C_MAKE(0.0, 0.0);
for ( int n = 0; n < num_cols_per_row ; n ++){
int col = dcolind [ num_rows * n + row ];
magmaFloatComplex val = dval [ num_rows * n + row ];
if( val != 0)
dot += val * dx[col ];
}
if( row<blocksize )
dy[ row ] = dot * alpha - lambda
* dx[ offset+row ] + beta * dy [ row ];
else
dy[ row ] = dot * alpha - lambda
* dx[ addrows[row-blocksize] ] + beta * dy [ row ];
}
}
/**
Purpose
-------
This routine computes y = alpha * A^t * x + beta * y on the GPU.
Input format is ELL.
Arguments
---------
@param[in]
transA magma_trans_t
transposition parameter for A
@param[in]
m magma_int_t
number of rows in A
@param[in]
n magma_int_t
number of columns in A
@param[in]
nnz_per_row magma_int_t
number of elements in the longest row
@param[in]
alpha magmaFloatComplex
scalar multiplier
@param[in]
dval magmaFloatComplex_ptr
array containing values of A in ELL
@param[in]
dcolind magmaIndex_ptr
columnindices of A in ELL
@param[in]
dx magmaFloatComplex_ptr
input vector x
@param[in]
beta magmaFloatComplex
scalar multiplier
@param[out]
dy magmaFloatComplex_ptr
input/output vector y
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_c
********************************************************************/
extern "C" magma_int_t
magma_cgeelltmv(
magma_trans_t transA,
magma_int_t m, magma_int_t n,
magma_int_t nnz_per_row,
magmaFloatComplex alpha,
magmaFloatComplex_ptr dval,
magmaIndex_ptr dcolind,
magmaFloatComplex_ptr dx,
magmaFloatComplex beta,
magmaFloatComplex_ptr dy,
magma_queue_t queue )
{
dim3 grid( (m+BLOCK_SIZE-1)/BLOCK_SIZE, 1, 1);
magma_int_t threads = BLOCK_SIZE;
hipLaunchKernelGGL(( cgeelltmv_kernel), dim3(grid), dim3(threads), 0, queue ,
m, n, nnz_per_row, alpha, dval, dcolind, dx, beta, dy );
return MAGMA_SUCCESS;
}
/**
Purpose
-------
This routine computes y = alpha *( A - lambda I ) * x + beta * y on the GPU.
Input format is ELL.
Arguments
---------
@param[in]
transA magma_trans_t
transposition parameter for A
@param[in]
m magma_int_t
number of rows in A
@param[in]
n magma_int_t
number of columns in A
@param[in]
nnz_per_row magma_int_t
number of elements in the longest row
@param[in]
alpha magmaFloatComplex
scalar multiplier
@param[in]
lambda magmaFloatComplex
scalar multiplier
@param[in]
dval magmaFloatComplex_ptr
array containing values of A in ELL
@param[in]
dcolind magmaIndex_ptr
columnindices of A in ELL
@param[in]
dx magmaFloatComplex_ptr
input vector x
@param[in]
beta magmaFloatComplex
scalar multiplier
@param[in]
offset magma_int_t
in case not the main diagonal is scaled
@param[in]
blocksize magma_int_t
in case of processing multiple vectors
@param[in]
addrows magma_int_t*
in case the matrixpowerskernel is used
@param[out]
dy magmaFloatComplex_ptr
input/output vector y
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_cblas
********************************************************************/
extern "C" magma_int_t
magma_cgeelltmv_shift(
magma_trans_t transA,
magma_int_t m, magma_int_t n,
magma_int_t nnz_per_row,
magmaFloatComplex alpha,
magmaFloatComplex lambda,
magmaFloatComplex_ptr dval,
magmaIndex_ptr dcolind,
magmaFloatComplex_ptr dx,
magmaFloatComplex beta,
int offset,
int blocksize,
magma_index_t *addrows,
magmaFloatComplex_ptr dy,
magma_queue_t queue )
{
dim3 grid( (m+BLOCK_SIZE-1)/BLOCK_SIZE, 1, 1);
magma_int_t threads = BLOCK_SIZE;
magmaFloatComplex tmp_shift;
//magma_csetvector(1,&lambda,1,&tmp_shift,1);
tmp_shift = lambda;
hipLaunchKernelGGL(( cgeelltmv_kernel_shift), dim3(grid), dim3(threads), 0, queue ,
m, n, nnz_per_row, alpha, tmp_shift, dval, dcolind, dx,
beta, offset, blocksize, addrows, dy );
return MAGMA_SUCCESS;
}
| 31e4c426802a98c82729928fba905704537c1977.cu | /*
-- MAGMA (version 1.6.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date November 2014
@generated from zgeelltmv.cu normal z -> c, Sat Nov 15 19:54:21 2014
*/
#include "common_magma.h"
#if (GPUSHMEM < 200)
#define BLOCK_SIZE 128
#else
#define BLOCK_SIZE 512
#endif
// ELL SpMV kernel
//Michael Garland
__global__ void
cgeelltmv_kernel(
int num_rows,
int num_cols,
int num_cols_per_row,
magmaFloatComplex alpha,
magmaFloatComplex_ptr dval,
magmaIndex_ptr dcolind,
magmaFloatComplex_ptr dx,
magmaFloatComplex beta,
magmaFloatComplex_ptr dy)
{
int row = blockDim.x * blockIdx.x + threadIdx.x ;
if(row < num_rows ){
magmaFloatComplex dot = MAGMA_C_MAKE(0.0, 0.0);
for ( int n = 0; n < num_cols_per_row ; n ++){
int col = dcolind [ num_rows * n + row ];
magmaFloatComplex val = dval [ num_rows * n + row ];
if( val != 0)
dot += val * dx[col ];
}
dy[ row ] = dot * alpha + beta * dy [ row ];
}
}
// shifted ELL SpMV kernel
//Michael Garland
__global__ void
cgeelltmv_kernel_shift(
int num_rows,
int num_cols,
int num_cols_per_row,
magmaFloatComplex alpha,
magmaFloatComplex lambda,
magmaFloatComplex_ptr dval,
magmaIndex_ptr dcolind,
magmaFloatComplex_ptr dx,
magmaFloatComplex beta,
int offset,
int blocksize,
magmaIndex_ptr addrows,
magmaFloatComplex_ptr dy)
{
int row = blockDim.x * blockIdx.x + threadIdx.x ;
if(row < num_rows ){
magmaFloatComplex dot = MAGMA_C_MAKE(0.0, 0.0);
for ( int n = 0; n < num_cols_per_row ; n ++){
int col = dcolind [ num_rows * n + row ];
magmaFloatComplex val = dval [ num_rows * n + row ];
if( val != 0)
dot += val * dx[col ];
}
if( row<blocksize )
dy[ row ] = dot * alpha - lambda
* dx[ offset+row ] + beta * dy [ row ];
else
dy[ row ] = dot * alpha - lambda
* dx[ addrows[row-blocksize] ] + beta * dy [ row ];
}
}
/**
Purpose
-------
This routine computes y = alpha * A^t * x + beta * y on the GPU.
Input format is ELL.
Arguments
---------
@param[in]
transA magma_trans_t
transposition parameter for A
@param[in]
m magma_int_t
number of rows in A
@param[in]
n magma_int_t
number of columns in A
@param[in]
nnz_per_row magma_int_t
number of elements in the longest row
@param[in]
alpha magmaFloatComplex
scalar multiplier
@param[in]
dval magmaFloatComplex_ptr
array containing values of A in ELL
@param[in]
dcolind magmaIndex_ptr
columnindices of A in ELL
@param[in]
dx magmaFloatComplex_ptr
input vector x
@param[in]
beta magmaFloatComplex
scalar multiplier
@param[out]
dy magmaFloatComplex_ptr
input/output vector y
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_c
********************************************************************/
extern "C" magma_int_t
magma_cgeelltmv(
magma_trans_t transA,
magma_int_t m, magma_int_t n,
magma_int_t nnz_per_row,
magmaFloatComplex alpha,
magmaFloatComplex_ptr dval,
magmaIndex_ptr dcolind,
magmaFloatComplex_ptr dx,
magmaFloatComplex beta,
magmaFloatComplex_ptr dy,
magma_queue_t queue )
{
dim3 grid( (m+BLOCK_SIZE-1)/BLOCK_SIZE, 1, 1);
magma_int_t threads = BLOCK_SIZE;
cgeelltmv_kernel<<< grid, threads, 0, queue >>>
( m, n, nnz_per_row, alpha, dval, dcolind, dx, beta, dy );
return MAGMA_SUCCESS;
}
/**
Purpose
-------
This routine computes y = alpha *( A - lambda I ) * x + beta * y on the GPU.
Input format is ELL.
Arguments
---------
@param[in]
transA magma_trans_t
transposition parameter for A
@param[in]
m magma_int_t
number of rows in A
@param[in]
n magma_int_t
number of columns in A
@param[in]
nnz_per_row magma_int_t
number of elements in the longest row
@param[in]
alpha magmaFloatComplex
scalar multiplier
@param[in]
lambda magmaFloatComplex
scalar multiplier
@param[in]
dval magmaFloatComplex_ptr
array containing values of A in ELL
@param[in]
dcolind magmaIndex_ptr
columnindices of A in ELL
@param[in]
dx magmaFloatComplex_ptr
input vector x
@param[in]
beta magmaFloatComplex
scalar multiplier
@param[in]
offset magma_int_t
in case not the main diagonal is scaled
@param[in]
blocksize magma_int_t
in case of processing multiple vectors
@param[in]
addrows magma_int_t*
in case the matrixpowerskernel is used
@param[out]
dy magmaFloatComplex_ptr
input/output vector y
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_cblas
********************************************************************/
extern "C" magma_int_t
magma_cgeelltmv_shift(
magma_trans_t transA,
magma_int_t m, magma_int_t n,
magma_int_t nnz_per_row,
magmaFloatComplex alpha,
magmaFloatComplex lambda,
magmaFloatComplex_ptr dval,
magmaIndex_ptr dcolind,
magmaFloatComplex_ptr dx,
magmaFloatComplex beta,
int offset,
int blocksize,
magma_index_t *addrows,
magmaFloatComplex_ptr dy,
magma_queue_t queue )
{
dim3 grid( (m+BLOCK_SIZE-1)/BLOCK_SIZE, 1, 1);
magma_int_t threads = BLOCK_SIZE;
magmaFloatComplex tmp_shift;
//magma_csetvector(1,&lambda,1,&tmp_shift,1);
tmp_shift = lambda;
cgeelltmv_kernel_shift<<< grid, threads, 0, queue >>>
( m, n, nnz_per_row, alpha, tmp_shift, dval, dcolind, dx,
beta, offset, blocksize, addrows, dy );
return MAGMA_SUCCESS;
}
|
885a831a57accb52f10114f97f4fd85dd018887c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "Macro.h"
#include "CUFLU.h"
#if ( defined GPU && MODEL == HYDRO && FLU_SCHEME == WAF )
// check before compiling anything else
#if ( NCOMP_PASSIVE != 0 )
# error : WAF scheme does NOT support passive scalars !!
#endif
#define to1D1(z,y,x) ( __umul24(z, FLU_NXT*FLU_NXT) + __umul24(y, FLU_NXT) + x )
#define to1D2(z,y,x) ( __umul24(z-FLU_GHOST_SIZE, PS2*PS2) + __umul24(y-FLU_GHOST_SIZE, PS2) + x-FLU_GHOST_SIZE )
#include "CUFLU_Shared_FluUtility.cu"
#if( RSOLVER == EXACT )
#include "CUFLU_Shared_RiemannSolver_Exact.cu"
#elif ( RSOLVER == ROE )
static __device__ void Solve_StarRoe( real eival[5], real L_star[5], real R_star[5], const real L[5],
const real R[5], const real Gamma, const real MinPres );
#endif
#ifdef WAF_DISSIPATE
static __device__ void Dis_Stru( real flux[5], const real L[5], const real R[5], const real L_star[5],
const real R_star[5], const real limit[5], const real theta[5],
const real Gamma );
#else
static __device__ void Undis_Stru( real flux[5], const real L[5], const real R[5], const real L_star[5],
const real R_star[5], const real limit[5], const real theta[5],
const real Gamma );
#endif
static __device__ void CUFLU_Advance( real g_Fluid_In [][5][ FLU_NXT*FLU_NXT*FLU_NXT ],
real g_Fluid_Out[][5][ PS2*PS2*PS2 ],
real g_Flux[][9][5][ PS2*PS2 ],
const real dt, const real _dh, const real Gamma, const bool StoreFlux,
const int j_gap, const int k_gap, real s_u[][FLU_NXT][5],
real s_flux[][PS2+1][5], real s_Lstar[][PS2+3][5], real s_Rstar[][PS2+3][5],
const bool FinalOut, const int XYZ, const WAF_Limiter_t WAF_Limiter,
const real MinDens, const real MinPres );
static __device__ void Solve_Flux( real flux[5], const real lL_star[5], const real lR_star[5],
const real cL_star[5], const real cR_star[5], const real rL_star[5],
const real rR_star[5], const real eival[5] ,const real L_2[5],
const real L_1[5], const real R_1[5],const real R_2[5],
const real Gamma, const real ratio, const WAF_Limiter_t WAF_Limiter );
static __device__ void set_flux( real flux[5], const real val[5], const real Gamma );
static __device__ real set_limit( const real r, const real c, const WAF_Limiter_t WAF_Limiter );
//-------------------------------------------------------------------------------------------------------
// Function : CUFLU_FluidSolver_WAF
// Description : GPU fluid solver based on the Weighted-Average-Flux (WAF) scheme
//
// Note : a. Prefix "g" for pointers pointing to the "Global" memory space
// Prefix "s" for pointers pointing to the "Shared" memory space
// b. The three-dimensional evolution is achieved by using the dimensional-split method
//
// Parameter : g_Fluid_In : Global memory array to store the input fluid variables
// g_Fluid_Out : Global memory array to store the output fluid variables
// g_Flux : Global memory array to store the output flux
// g_Corner : Global memory array storing the physical corner coordinates of each patch group (USELESS CURRENTLY)
// g_Pot_USG : Global memory array storing the input potential for UNSPLIT_GRAVITY (NOT SUPPORTED in RTVD)
// dt : Time interval to advance solution
// _dh : 1 / grid size
// Gamma : Ratio of specific heats
// StoreFlux : true --> store the coarse-fine fluxes
// XYZ : true : x->y->z ( forward sweep)
// false : z->y->x (backward sweep)
// WAF_Limiter : Selection of the limit function
// 0 : superbee
// 1 : van-Leer
// 2 : van-Albada
// 3 : minbee
// MinDens/Pres : Minimum allowed density and pressure
//-------------------------------------------------------------------------------------------------------
__global__ void CUFLU_FluidSolver_WAF( real g_Fluid_In [] [NCOMP_TOTAL][ FLU_NXT*FLU_NXT*FLU_NXT ],
real g_Fluid_Out[] [NCOMP_TOTAL][ PS2*PS2*PS2 ],
real g_Flux [][9][NCOMP_TOTAL][ PS2*PS2 ],
const double g_Corner[][3],
const real g_Pot_USG[][ USG_NXT_F*USG_NXT_F*USG_NXT_F ],
const real dt, const real _dh, const real Gamma, const bool StoreFlux,
const bool XYZ, const WAF_Limiter_t WAF_Limiter,
const real MinDens, const real MinPres )
{
__shared__ real s_u [FLU_BLOCK_SIZE_Y][FLU_NXT][5];
__shared__ real s_flux [FLU_BLOCK_SIZE_Y][PS2+1][5];
__shared__ real s_L_st [FLU_BLOCK_SIZE_Y][PS2+3][5];
__shared__ real s_R_st [FLU_BLOCK_SIZE_Y][PS2+3][5];
if ( XYZ )
{
CUFLU_Advance( g_Fluid_In, g_Fluid_Out, g_Flux, dt, _dh, Gamma, StoreFlux, 0, 0,
s_u, s_flux, s_L_st, s_R_st, false, 0, WAF_Limiter, MinDens, MinPres );
CUFLU_Advance( g_Fluid_In, g_Fluid_Out, g_Flux, dt, _dh, Gamma, StoreFlux, FLU_GHOST_SIZE, 0,
s_u, s_flux, s_L_st, s_R_st, false, 3, WAF_Limiter, MinDens, MinPres );
CUFLU_Advance( g_Fluid_In, g_Fluid_Out, g_Flux, dt, _dh, Gamma, StoreFlux, FLU_GHOST_SIZE, FLU_GHOST_SIZE,
s_u, s_flux, s_L_st, s_R_st, true, 6, WAF_Limiter, MinDens, MinPres );
}
else
{
CUFLU_Advance( g_Fluid_In, g_Fluid_Out, g_Flux, dt, _dh, Gamma, StoreFlux, 0, 0,
s_u, s_flux, s_L_st, s_R_st, false, 6, WAF_Limiter, MinDens, MinPres );
CUFLU_Advance( g_Fluid_In, g_Fluid_Out, g_Flux, dt, _dh, Gamma, StoreFlux, 0, FLU_GHOST_SIZE,
s_u, s_flux, s_L_st, s_R_st, false, 3, WAF_Limiter, MinDens, MinPres );
CUFLU_Advance( g_Fluid_In, g_Fluid_Out, g_Flux, dt, _dh, Gamma, StoreFlux, FLU_GHOST_SIZE, FLU_GHOST_SIZE,
s_u, s_flux, s_L_st, s_R_st, true, 0, WAF_Limiter, MinDens, MinPres );
}
} // FUNCTION : CUFLU_FluidSolver_WAF
//-------------------------------------------------------------------------------------------------------
// Function : CUFLU_Advance
// Description : GPU device function, which performs a one-dimensional sweep based on the WAF scheme
//
// Note : a. Prefix "g" for pointers pointing to the "Global" memory space
// Prefix "s" for pointers pointing to the "Shared" memory space
// b. The direction of the one dimensional sweep is determined by the input parameter "XYZ"
//
// Parameter : g_Fluid_In : Global memory array to store the input fluid variables
// g_Fluid_Out : Global memory array to store the output fluid variables
// g_Flux : Global memory array to store the output flux
// dt : Time interval to advance solution
// _dh : 1 / grid size
// Gamma : ratio of specific heats
// StoreFlux : true --> store the coarse-fine fluxes
// j_gap : Number of useless grids in each side in the j direction (j may not be equal to y)
// k_gap : Number of useless grids in each side in the k direction (k mya not be equal to z)
// s_u : Shared memory array storing the fluid variables used to compute the intercell flux
// s_flux : Shared memory array storing the final flux used to update the fluid variables
// s_Lstar : Shared memory array storing the left region in the solution of Riemann problem
// s_Rstar : Shared memory array storing the right region in the solution of Riemann problem
// FinalOut : true : output data
// false : don't output data
// XYZ : 0 : Update the solution in the x direction
// 3 : Update the solution in the y direction
// 6 : Update the solution in the z direction
// --> This parameter is also used to determine the place to store the output flux
// WAF_Limiter : Selection of the limit function
// 0 : superbee
// 1 : van-Leer
// 2 : van-Albada
// 3 : minbee
// MinDens/Pres : Minimum allowed density and pressure
//-------------------------------------------------------------------------------------------------------
__device__ void CUFLU_Advance( real g_Fluid_In [][5][ FLU_NXT*FLU_NXT*FLU_NXT ],
real g_Fluid_Out[][5][ PS2*PS2*PS2 ],
real g_Flux[][9][5][ PS2*PS2 ],
const real dt, const real _dh, const real Gamma, const bool StoreFlux,
const int j_gap, const int k_gap, real s_u[][FLU_NXT][5], real s_flux[][PS2+1][5],
real s_Lstar[][PS2+3][5], real s_Rstar[][PS2+3][5], const bool FinalOut,
const int XYZ, const WAF_Limiter_t WAF_Limiter, const real MinDens, const real MinPres )
{
const uint bx = blockIdx.x;
const uint tx = threadIdx.x;
const uint ty = threadIdx.y;
const uint dj = blockDim.y;
const uint size_j = FLU_NXT - (j_gap<<1);
const uint size_k = FLU_NXT - (k_gap<<1);
const uint NColumn = __umul24( size_j, size_k );
const uint i = tx; // (i,j) the element in shared memory under evaluation
uint j = j_gap + ty%size_j;
uint k = k_gap + ty/size_j;
uint Column0 = 0; // the total number of columns that have been updated
const uint j_end = FLU_NXT - j_gap;
const uint k_end = FLU_NXT - k_gap;
const real ratio = dt*_dh; // dt over dx
const real Gamma_m1 = Gamma - (real)1.0;
const real _Gamma_m1 = (real)1.0 / Gamma_m1;
bool RuleOut = false;
real Fluid[5], eval[5];
int ID1, ID2, ID3, ii, delta_k, Comp[5];
FluVar ConVar;
// set the order of component for update in different directions
switch ( XYZ )
{
case 0: Comp[0] = 0; Comp[1] = 1; Comp[2] = 2; Comp[3] = 3; Comp[4] = 4; break;
case 3: Comp[0] = 0; Comp[1] = 2; Comp[2] = 1; Comp[3] = 3; Comp[4] = 4; break;
case 6: Comp[0] = 0; Comp[1] = 3; Comp[2] = 2; Comp[3] = 1; Comp[4] = 4; break;
}
// start the WAF scheme
do
{
// determine the array indices for updating in different directions
switch ( XYZ )
{
case 0: ID1 = to1D1( k, j, i ); break;
case 3: ID1 = to1D1( k, i, j ); break;
case 6: ID1 = to1D1( i, k, j ); break;
}
// load data into per-thread registers
for (int v=0; v<5; v++) Fluid[v] = g_Fluid_In[bx][ Comp[v] ][ID1];
// load the primitive variables into shared memory
s_u[ty][i][0] = Fluid[0];
s_u[ty][i][1] = Fluid[1] / Fluid[0];
s_u[ty][i][2] = Fluid[2] / Fluid[0];
s_u[ty][i][3] = Fluid[3] / Fluid[0];
s_u[ty][i][4] = Gamma_m1*( Fluid[4] - (real)0.5*( Fluid[1]*Fluid[1] + Fluid[2]*Fluid[2] +
Fluid[3]*Fluid[3] ) / Fluid[0] );
s_u[ty][i][4] = CUFLU_CheckMinPres( s_u[ty][i][4], MinPres );
__syncthreads();
// solve the Riemann problem
if ( i >= 1 && i <= FLU_GHOST_SIZE + PS2 + 1 )
{
ii = i - 1;
# if ( RSOLVER == EXACT )
FluVar5 eival_st, L_star_st, R_star_st;
FluVar L_st, R_st;
L_st.Rho = s_u[ty][ii][0];
L_st.Px = s_u[ty][ii][1];
L_st.Py = s_u[ty][ii][2];
L_st.Pz = s_u[ty][ii][3];
L_st.Egy = s_u[ty][ii][4];
R_st.Rho = s_u[ty][ i][0];
R_st.Px = s_u[ty][ i][1];
R_st.Py = s_u[ty][ i][2];
R_st.Pz = s_u[ty][ i][3];
R_st.Egy = s_u[ty][ i][4];
CUFLU_RiemannSolver_Exact( 0, &eival_st, &L_star_st, &R_star_st, L_st, R_st, Gamma );
eval[0] = eival_st.Rho;
eval[1] = eival_st.Px;
eval[2] = eival_st.Py;
eval[3] = eival_st.Pz;
eval[4] = eival_st.Egy;
s_Lstar[ty][ii][0] = L_star_st.Rho;
s_Lstar[ty][ii][1] = L_star_st.Px;
s_Lstar[ty][ii][2] = L_star_st.Py;
s_Lstar[ty][ii][3] = L_star_st.Pz;
s_Lstar[ty][ii][4] = L_star_st.Egy;
s_Rstar[ty][ii][0] = R_star_st.Rho;
s_Rstar[ty][ii][1] = R_star_st.Px;
s_Rstar[ty][ii][2] = R_star_st.Py;
s_Rstar[ty][ii][3] = R_star_st.Pz;
s_Rstar[ty][ii][4] = R_star_st.Egy;
# elif ( RSOLVER == ROE )
Solve_StarRoe( eval, s_Lstar[ty][ii], s_Rstar[ty][ii], s_u[ty][ii], s_u[ty][i], Gamma, MinPres );
# else
# error : ERROR : unsupported Riemann solver (EXACT/ROE) !!
# endif
}
__syncthreads();
// solve the intercell flux
if ( i >= FLU_GHOST_SIZE && i <= FLU_GHOST_SIZE+PS2 )
{
ii = i - FLU_GHOST_SIZE;
int ii_p1 = ii + 1;
Solve_Flux( s_flux[ty][ii], s_Lstar[ty][ii], s_Rstar[ty][ii],
s_Lstar[ty][ii_p1], s_Rstar[ty][ii_p1], s_Lstar[ty][i], s_Rstar[ty][i], eval,
s_u[ty][i-2], s_u[ty][i-1], s_u[ty][i], s_u[ty][i+1],
Gamma, ratio, WAF_Limiter );
}
__syncthreads();
// update the conservative variables
if ( i >= FLU_GHOST_SIZE && i < FLU_GHOST_SIZE+PS2 && RuleOut == false )
{
ii = i - FLU_GHOST_SIZE;
for (int v=0; v<5; v++) Fluid[v] += ratio*( s_flux[ty][ii][v] - s_flux[ty][ii+1][v] );
// enforce positive density and pressure
ConVar.Rho = Fluid[0];
ConVar.Px = Fluid[1];
ConVar.Py = Fluid[2];
ConVar.Pz = Fluid[3];
ConVar.Egy = Fluid[4];
ConVar.Rho = FMAX( ConVar.Rho, MinDens );
Fluid[0] = ConVar.Rho;
Fluid[4] = CUFLU_CheckMinPresInEngy( ConVar, Gamma_m1, _Gamma_m1, MinPres );
// check negative density and energy
# ifdef CHECK_NEGATIVE_IN_FLUID
if ( CUFLU_CheckNegative(Fluid[0]) )
printf( "ERROR : negative density (%14.7e) at file <%s>, line <%d>, function <%s>\n",
Fluid[0], __FILE__, __LINE__, __FUNCTION__ );
if ( CUFLU_CheckNegative(Fluid[4]) )
printf( "ERROR : negative energy (%14.7e) at file <%s>, line <%d>, function <%s>\n",
Fluid[4], __FILE__, __LINE__, __FUNCTION__ );
# endif
// store the updated data back to the global memory
if ( FinalOut )
{
switch ( XYZ )
{
case 0: ID2 = to1D2( k, j, i ); break;
case 3: ID2 = to1D2( k, i, j ); break;
case 6: ID2 = to1D2( i, k, j ); break;
}
for (int v=0; v<5; v++) g_Fluid_Out[bx][ Comp[v] ][ID2] = Fluid[v];
}
else
for (int v=0; v<5; v++) g_Fluid_In [bx][ Comp[v] ][ID1] = Fluid[v];
}
// paste the s_flux into g_Flux
if ( StoreFlux )
if ( k >= FLU_GHOST_SIZE && k < FLU_NXT-FLU_GHOST_SIZE )
if ( j >= FLU_GHOST_SIZE && j < FLU_NXT-FLU_GHOST_SIZE )
if ( i == 0 )
{
ID3 = __umul24( k-FLU_GHOST_SIZE, PS2 ) + (j-FLU_GHOST_SIZE);
for (int v=0; v<5; v++)
{
g_Flux[bx][XYZ+0][v][ID3] = s_flux[ty][ 0 ][ Comp[v] ];
g_Flux[bx][XYZ+1][v][ID3] = s_flux[ty][ PS2/2 ][ Comp[v] ];
g_Flux[bx][XYZ+2][v][ID3] = s_flux[ty][ PS2 ][ Comp[v] ];
}
}
// reset the target array indices
j += dj;
if ( j >= j_end )
{
delta_k = ( j - j_end )/size_j + 1;
k += delta_k;
j -= __umul24( size_j, delta_k );
}
Column0 += dj;
// if the index k exceeds the maximum allowed value --> reset (j,k) to harmless values and wait for other
// threads (all threads must exist the while loop "at the same time", otherwise __syncthreads will fail !!)
if ( k >= k_end )
{
j = 0;
k = 0;
RuleOut = true;
}
__syncthreads();
}
while ( Column0 < NColumn );
} // CUFLU_Advance
//-------------------------------------------------------------------------------------------------------
// Function : Solve_Flux
// Description : Solve the intercell flux
//
// Parameter : flux : Intercell flux
// lL_star : Primitive variables in the left star region of the left region
// lR_star : Primitive variables in the right star region of the left region
// cL_star : Primitive variables in the left star region of the centor region
// cR_star : Primitive variables in the right star region of the centor region
// rL_star : Primitive variables in the left star region of the right region
// rR_star : Primitive variables in the right star region of the right region
// eival : Eigenvalue
// L_2 : Primitive variables in the region left to the left region
// L_1 : Primitive variables in the left region
// R_1 : Primitive variables in the right region
// R_2 : Primitive variables in the region right to the right region
// ratio : dt over dx
// Gamma : Ratio of specific heats
// WAF_Limiter : Selection of te limit function
// 0 : superbee
// 1 : van-Leer
// 2 : van-Albada
// 3 : minbee
//-------------------------------------------------------------------------------------------------------
__device__ void Solve_Flux( real flux[5], const real lL_star[5], const real lR_star[5],
const real cL_star[5], const real cR_star[5], const real rL_star[5],
const real rR_star[5], const real eival[5], const real L_2[5], const real L_1[5],
const real R_1[5],const real R_2[5], const real Gamma, const real ratio,
const WAF_Limiter_t WAF_Limiter )
{
real theta[5]; // the sign of speed of waves
real limit[5]; // limit functions
real mean [3][5];
real delta[3][5];
delta[0][0] = lL_star[0] - L_2[0];
delta[0][1] = lR_star[0] - lL_star[0];
delta[0][2] = lR_star[2] - lL_star[2];
delta[0][3] = lR_star[3] - lL_star[3];
delta[0][4] = L_1[0] - lR_star[0];
mean[0][0] = (real)0.5*( FABS( lL_star[0] ) + FABS( L_2[0] ) );
mean[0][1] = (real)0.5*( FABS( lR_star[0] ) + FABS( lL_star[0] ) );
mean[0][2] = (real)0.5*( FABS( lR_star[2] ) + FABS( lL_star[2] ) );
mean[0][3] = (real)0.5*( FABS( lR_star[3] ) + FABS( lL_star[3] ) );
mean[0][4] = (real)0.5*( FABS( L_1[0] ) + FABS( lR_star[0] ) );
delta[1][0] = cL_star[0] - L_1[0];
delta[1][1] = cR_star[0] - cL_star[0];
delta[1][2] = cR_star[2] - cL_star[2];
delta[1][3] = cR_star[3] - cL_star[3];
delta[1][4] = R_1[0] - cR_star[0];
mean[1][0] = (real)0.5*( FABS( cL_star[0] ) + FABS( L_1[0] ) );
mean[1][1] = (real)0.5*( FABS( cR_star[0] ) + FABS( cL_star[0] ) );
mean[1][2] = (real)0.5*( FABS( cR_star[2] ) + FABS( cL_star[2] ) );
mean[1][3] = (real)0.5*( FABS( cR_star[3] ) + FABS( cL_star[3] ) );
mean[1][4] = (real)0.5*( FABS( R_1[0] ) + FABS( cR_star[0] ) );
delta[2][0] = rL_star[0] - R_1[0];
delta[2][1] = rR_star[0] - rL_star[0];
delta[2][2] = rR_star[2] - rL_star[2];
delta[2][3] = rR_star[3] - rL_star[3];
delta[2][4] = R_2[0] - rR_star[0];
mean[2][0] = (real)0.5*( FABS( rL_star[0] ) + FABS( R_1[0] ) );
mean[2][1] = (real)0.5*( FABS( rR_star[0] ) + FABS( rL_star[0] ) );
mean[2][2] = (real)0.5*( FABS( rR_star[2] ) + FABS( rL_star[2] ) );
mean[2][3] = (real)0.5*( FABS( rR_star[3] ) + FABS( rL_star[3] ) );
mean[2][4] = (real)0.5*( FABS( R_2[0] ) + FABS( rR_star[0] ) );
// set limit function
for (int i=0; i<5; i++)
{
if ( FABS( eival[i] ) < MAX_ERROR ) limit[i] = (real)1.0;
else
{
if ( eival[i] > (real)0.0 )
{
if ( mean[0][i] == (real)0.0 || mean[1][i] == (real)0.0 ) limit[i] = (real)1.0;
else
{
if ( ( delta[0][i]*delta[1][i] ) / ( mean[0][i]*mean[1][i] ) < MAX_ERROR*MAX_ERROR )
limit[i] = (real)1.0;
else
{
real r = delta[0][i] / delta[1][i];
limit[i] = set_limit( r, eival[i] * ratio, WAF_Limiter );
}
}
}
else
{
if ( mean[2][i] == (real)0.0 || mean[1][i] == (real)0.0 ) limit[i] = (real)1.0;
else
{
if ( ( delta[2][i]*delta[1][i] ) / ( mean[2][i]*mean[1][i] ) < MAX_ERROR*MAX_ERROR )
limit[i] = (real)1.0;
else
{
real r = delta[2][i] / delta[1][i];
limit[i] = set_limit( r, eival[i] * ratio, WAF_Limiter );
}
}
}
}
} // for (int i=0; i<5; i++)
// solve the sign of waves
for (int i=0; i<5; i++)
{
if ( FABS( eival[i] ) < MAX_ERROR ) theta[i] = (real)0.0;
else if ( eival[i] > (real)0.0 ) theta[i] = (real)1.0;
else theta[i] = -(real)1.0;
}
// solve the intercell flux
# ifdef WAF_DISSIPATE
Dis_Stru ( flux, L_1, R_1, cL_star, cR_star, limit, theta, Gamma );
# else
Undis_Stru( flux, L_1, R_1, cL_star, cR_star, limit, theta, Gamma );
# endif
} // FUNCTION : Solve_Flux
//-----------------------------------------------------------------------------------------------------
// Function : set_limit
// Description : set the limit function
//
// parameter : r : flow variable
// c : Courant number
// WAF_Limiter : Selection of te limit function
// 0 : superbee
// 1 : van-Leer
// 2 : van-Albada
// 3 : minbee
//-------------------------------------------------------------------------------------------------------
__device__ real set_limit( const real r, const real c, const WAF_Limiter_t WAF_Limiter )
{
real limit;
// choose the limit function
switch ( WAF_Limiter )
{
case WAF_SUPERBEE :
{
if ( r > (real)0.0 && r <= (real)0.5 ) limit = (real)1.0 - (real)2.0*r*( (real)1.0 - FABS(c) );
else if ( r <= (real)1.0 ) limit = FABS(c);
else if ( r <= (real)2.0 ) limit = (real)1.0 - r*( (real)1.0 - FABS(c) );
else limit = (real)2.0*FABS(c) - (real)1.0;
break;
}
case WAF_VANLEER :
{
limit = (real)1.0 - (real)2.0*r*( (real)1.0 - FABS(c) ) / ( (real)1.0 + r );
break;
}
case WAF_ALBADA :
{
limit = (real)1.0 - r*( (real)1.0 + r )*( (real)1.0 - FABS(c) ) / ( (real)1.0 + r*r );
break;
}
case WAF_MINBEE :
{
if ( r > (real)0.0 && r <= (real)1.0 ) limit = (real)1.0 - r*( (real)1.0 - FABS(c) );
else limit = FABS(c);
break;
}
default:
break;
}
return limit;
} // FUNCTION : set_limit
#ifdef WAF_DISSIPATE
//------------------------------------------------------------------------------------------------------
// Function : Dis_Stru
// Description : Set the intercell flux by dissipative wave structure
//
// Parameter : flux : Intercel flux
// L : Primitive variables in the left region
// R : Primitive variables in the right region
// L_star : Primitive variables in the left star region
// R_star : Primitive variables in the right star region
// limit : Limit functions
// theta : Sign of wave speed
// Gamma : Ratio of specific heats
//-------------------------------------------------------------------------------------------------------
__device__ void Dis_Stru( real flux[5], const real L[5], const real R[5], const real L_star[5],
const real R_star[5], const real limit[5], const real theta[5], const real Gamma )
{
real iflux[6][5];
real lim[5];
for (int i=0; i<5; i++) lim[i] = limit[i];
// flux function evaluated at the given stat
set_flux( iflux[0], L, Gamma );
set_flux( iflux[1], L_star, Gamma );
set_flux( iflux[4], R_star, Gamma );
set_flux( iflux[5], R, Gamma );
// determine the ghost stats
real stat[2][5];
if ( limit[1] <= limit[2] )
{
if ( limit[3] <= limit[1] )
{
stat[0][0] = L_star[0];
stat[0][1] = L_star[1];
stat[0][2] = L_star[2];
stat[0][3] = R_star[3];
stat[0][4] = L_star[4];
stat[1][0] = R_star[0];
stat[1][1] = L_star[1];
stat[1][2] = L_star[2];
stat[1][3] = R_star[3];
stat[1][4] = L_star[4];
}
else if ( limit[3] <= limit[2] )
{
stat[0][0] = R_star[0];
stat[0][1] = L_star[1];
stat[0][2] = L_star[2];
stat[0][3] = L_star[3];
stat[0][4] = L_star[4];
stat[1][0] = R_star[0];
stat[1][1] = L_star[1];
stat[1][2] = L_star[2];
stat[1][3] = R_star[3];
stat[1][4] = L_star[4];
}
else
{
stat[0][0] = R_star[0];
stat[0][1] = L_star[1];
stat[0][2] = L_star[2];
stat[0][3] = L_star[3];
stat[0][4] = L_star[4];
stat[1][0] = R_star[0];
stat[1][1] = L_star[1];
stat[1][2] = R_star[2];
stat[1][3] = L_star[3];
stat[1][4] = L_star[4];
}
} // if ( limit[1] <= limit[2] )
else // limit[1] > limit[2]
{
if ( limit[3] <= limit[2] )
{
stat[0][0] = L_star[0];
stat[0][1] = L_star[1];
stat[0][2] = L_star[2];
stat[0][3] = R_star[3];
stat[0][4] = L_star[4];
stat[1][0] = L_star[0];
stat[1][1] = L_star[1];
stat[1][2] = R_star[2];
stat[1][3] = R_star[3];
stat[1][4] = L_star[4];
}
else if ( limit[3] <= limit[1] )
{
stat[0][0] = L_star[0];
stat[0][1] = L_star[1];
stat[0][2] = R_star[2];
stat[0][3] = L_star[3];
stat[0][4] = L_star[4];
stat[1][0] = L_star[0];
stat[1][1] = L_star[1];
stat[1][2] = R_star[2];
stat[1][3] = R_star[3];
stat[1][4] = L_star[4];
}
else
{
stat[0][0] = L_star[0];
stat[0][1] = L_star[1];
stat[0][2] = R_star[2];
stat[0][3] = L_star[3];
stat[0][4] = L_star[4];
stat[1][0] = R_star[0];
stat[1][1] = L_star[1];
stat[1][2] = R_star[2];
stat[1][3] = L_star[3];
stat[1][4] = L_star[4];
}
} // if ( limit[1] <= limit[2] ) ... else ...
// set flux in ghost region
set_flux( iflux[2], stat[0], Gamma );
set_flux( iflux[3], stat[1], Gamma );
// reoder the limit
for (int i=1; i<3; i++)
{
if ( lim[i] > lim[i+1] )
{
real tmp = lim[i+1];
lim[i+1] = lim[i ];
lim[i ] = tmp;
}
}
if ( lim[1] > lim[2] )
{
real tmp = lim[2];
lim[2] = lim[1];
lim[1] = tmp;
}
// set the intercell flux
for (int i=0; i<5; i++)
{
flux[i] = (real)0.5*( iflux[0][i] + iflux[5][i] )
- (real)0.5*( theta[0]*lim[0]*( iflux[1][i] - iflux[0][i] ) +
theta[1]*lim[1]*( iflux[2][i] - iflux[1][i] ) +
theta[2]*lim[2]*( iflux[3][i] - iflux[2][i] ) +
theta[3]*lim[3]*( iflux[4][i] - iflux[3][i] ) +
theta[4]*lim[4]*( iflux[5][i] - iflux[4][i] ) );
}
} // FUNCTION : Dis_Stru
#endif // #ifdef WAF_DISSIPATE
#ifndef WAF_DISSIPATE
//------------------------------------------------------------------------------------------------------
// Function : Undis_Stru
// Description : Set the intercell flux by non-dissipative wave structure
//
// Parameter : flux : Intercel flux
// L : Primitive variables in the left region
// R : Primitive variables in the right region
// L_star : Primitive variables in the left star region
// R_star : Primitive variables in the right star region
// limit : Limit functions
// theta : Sign of wave speed
// Gamma : Ratio of specific heats
//-------------------------------------------------------------------------------------------------------
__device__ void Undis_Stru( real flux[5], const real L[5], const real R[5], const real L_star[5],
const real R_star[5], const real limit[5], const real theta[5], const real Gamma )
{
// flux function evaluated at the given stat
real iflux[4][5];
set_flux( iflux[0], L, Gamma );
set_flux( iflux[1], L_star, Gamma );
set_flux( iflux[2], R_star, Gamma );
set_flux( iflux[3], R, Gamma );
// set the intercell flux
flux[0] = (real)0.5*( iflux[0][0] + iflux[3][0] )
- (real)0.5*( theta[0]*limit[0]*( iflux[1][0] - iflux[0][0] ) +
theta[1]*limit[1]*( iflux[2][0] - iflux[1][0] ) +
theta[4]*limit[4]*( iflux[3][0] - iflux[2][0] ) );
flux[1] = (real)0.5*( iflux[0][1] + iflux[3][1] )
- (real)0.5*( theta[0]*limit[0]*( iflux[1][1] - iflux[0][1] ) +
theta[1]*limit[1]*( iflux[2][1] - iflux[1][1] ) +
theta[4]*limit[4]*( iflux[3][1] - iflux[2][1] ) );
flux[4] = (real)0.5*( iflux[0][4] + iflux[3][4] )
- (real)0.5*( theta[0]*limit[0]*( iflux[1][4] - iflux[0][4] ) +
theta[1]*limit[1]*( iflux[2][4] - iflux[1][4] ) +
theta[4]*limit[4]*( iflux[3][4] - iflux[2][4] ) );
flux[2] = (real)0.5*( iflux[0][2] + iflux[3][2] )
- (real)0.5*( theta[0]*limit[0]*( iflux[1][2] - iflux[0][2] ) +
theta[2]*limit[2]*( iflux[2][2] - iflux[1][2] ) +
theta[4]*limit[4]*( iflux[3][2] - iflux[2][2] ) );
flux[3] = (real)0.5*( iflux[0][3] + iflux[3][3] )
- (real)0.5*( theta[0]*limit[0]*( iflux[1][3] - iflux[0][3] ) +
theta[3]*limit[3]*( iflux[2][3] - iflux[1][3] ) +
theta[4]*limit[4]*( iflux[3][3] - iflux[2][3] ) );
} // FUNCTION : Undis_Stru
#endif // #ifndef WAF_DISSIPATE
//-------------------------------------------------------------------------------------------------------
// Function : set_flux
// Description : Set the flux function evaluated at the given stat
//
// Parameter : flux : Flux function
// val : Primitive variables
// Gamma : Ratio of specific heats
//-------------------------------------------------------------------------------------------------------
__device__ void set_flux( real flux[5], const real val[5], const real Gamma )
{
const real Gamma_m1 = Gamma -(real)1.0;
// set flux
flux[0] = val[0]*val[1];
flux[1] = val[0]*val[1]*val[1] + val[4];
flux[2] = val[0]*val[1]*val[2];
flux[3] = val[0]*val[1]*val[3];
flux[4] = val[1]*( (real)0.5*val[0]*( val[1]*val[1] + val[2]*val[2] + val[3]*val[3] )
+ val[4]/Gamma_m1 + val[4] );
} // FUNCTION : set_flux
#if ( RSOLVER == ROE )
//-------------------------------------------------------------------------------------------------------
// Function : Solve_StarRoe
// Description : Solve the star region and speed of waves by Roe's method
//
// Parameter : eival : Speed of waves
// L_star : Primitive variables in the left star region
// R_star : Primitive variables in the right star region
// L : Primitive variables in the left rrgion
// R : Primitive variables in the right rrgion
// Gamma : Ratio of specific heats
// MinPres : Minimum allowed pressure
//-------------------------------------------------------------------------------------------------------
__device__ void Solve_StarRoe( real eival[5], real L_star[5], real R_star[5], const real L[5], const real R[5],
const real Gamma, const real MinPres )
{
const real Gamma_m1 = Gamma - (real)1.0; // for evaluating pressure and sound speed
real u_bar, v_bar, w_bar, h_bar, a_bar, a_bar_inv; // Roe's average of vx, vy, vz, enthapy, sound speed, and
// one over a_bar
real coef[5]; // Roe's coefficients
real TempPres, TempRho, _TempRho;
// solve Roe's average
{
# ifdef CHECK_NEGATIVE_IN_FLUID
if ( CUFLU_CheckNegative(L[0]) )
printf( "ERROR : negative density (%14.7e) at file <%s>, line <%d>, function <%s>\n",
L[0], __FILE__, __LINE__, __FUNCTION__ );
if ( CUFLU_CheckNegative(R[0]) )
printf( "ERROR : negative density (%14.7e) at file <%s>, line <%d>, function <%s>\n",
R[0], __FILE__, __LINE__, __FUNCTION__ );
# endif
real n_L_sq = SQRT( L[0] ); // rooting number of left density
real n_R_sq = SQRT( R[0] ); // rooting number of right density
real h_L = (real)0.5*( L[1]*L[1] + L[2]*L[2] + L[3]*L[3] ) + Gamma/Gamma_m1*L[4]/L[0]; // left enthapy
real h_R = (real)0.5*( R[1]*R[1] + R[2]*R[2] + R[3]*R[3] ) + Gamma/Gamma_m1*R[4]/R[0]; // right enthapy
real n_bar_inv = (real)1.0 / ( n_L_sq + n_R_sq ); // one over (n_L_sq plus n_L_sq)
u_bar = ( n_L_sq*L[1] + n_R_sq*R[1] )*n_bar_inv;
v_bar = ( n_L_sq*L[2] + n_R_sq*R[2] )*n_bar_inv;
w_bar = ( n_L_sq*L[3] + n_R_sq*R[3] )*n_bar_inv;
h_bar = ( n_L_sq*h_L + n_R_sq*h_R )*n_bar_inv;
real GammaP_Rho = Gamma_m1*( h_bar - (real)0.5*( u_bar*u_bar + v_bar*v_bar + w_bar*w_bar ) );
TempRho = (real)0.5*( L[0] + R[0] );
_TempRho = (real)1.0/TempRho;
TempPres = GammaP_Rho*TempRho/Gamma;
TempPres = CUFLU_CheckMinPres( TempPres, MinPres );
GammaP_Rho = Gamma*TempPres*_TempRho;
# ifdef CHECK_NEGATIVE_IN_FLUID
if ( CUFLU_CheckNegative(GammaP_Rho) )
printf( "ERROR : negative GammaP_Rho (%14.7e) at file <%s>, line <%d>, function <%s>\n",
GammaP_Rho, __FILE__, __LINE__, __FUNCTION__ );
# endif
a_bar = SQRT( GammaP_Rho );
a_bar_inv = (real)1.0 / a_bar;
}
// solve Roe's coefficients
{
// the difference of conservative variables
real du_1 = R[0] - L[0];
real du_2 = R[0]*R[1] - L[0]*L[1];
real du_3 = R[0]*R[2] - L[0]*L[2];
real du_4 = R[0]*R[3] - L[0]*L[3];
real du_5 = + (real)0.5*R[0]*( R[1]*R[1] + R[2]*R[2] + R[3]*R[3] ) + R[4]/Gamma_m1
- (real)0.5*L[0]*( L[1]*L[1] + L[2]*L[2] + L[3]*L[3] ) - L[4]/Gamma_m1;
coef[2] = du_3 - v_bar*du_1;
coef[3] = du_4 - w_bar*du_1;
coef[1] = Gamma_m1*a_bar_inv*a_bar_inv*( du_1*( h_bar - u_bar*u_bar ) + u_bar*du_2 - du_5
+ coef[2]*v_bar + coef[3]*w_bar );
coef[0] = (real)0.5*a_bar_inv*( du_1*( u_bar + a_bar ) - du_2 - a_bar*coef[1] );
coef[4] = du_1 - ( coef[0] + coef[1] );
}
// solve the star region
{
L_star[0] = L[0] + coef[0];
R_star[0] = R[0] - coef[4];
L_star[1] = (real)0.5*( ( L[0]*L[1] + coef[0]*( u_bar - a_bar ) ) / L_star[0]
+ ( R[0]*R[1] - coef[4]*( u_bar + a_bar ) ) / R_star[0] );
R_star[1] = L_star[1];
L_star[2] = L[2];
R_star[2] = R[2];
L_star[3] = L[3];
R_star[3] = R[3];
real E_L = (real)0.5*L[0]*( L[1]*L[1] + L[2]*L[2] + L[3]*L[3] );
real E_R = (real)0.5*R[0]*( R[1]*R[1] + R[2]*R[2] + R[3]*R[3] );
real e_L_star = (real)0.5*L_star[0]*( L_star[1]*L_star[1] + L_star[2]*L_star[2] + L_star[3]*L_star[3] );
real e_R_star = (real)0.5*R_star[0]*( R_star[1]*R_star[1] + R_star[2]*R_star[2] + R_star[3]*R_star[3] );
L_star[4] = (real)0.5*Gamma_m1*( E_L - e_L_star + L[4]/Gamma_m1 + coef[0]*( h_bar - u_bar*a_bar )
+ E_R - e_R_star + R[4]/Gamma_m1 - coef[4]*( h_bar + u_bar*a_bar ) );
L_star[4] = CUFLU_CheckMinPres( L_star[4], MinPres );
R_star[4] = L_star[4];
}
// solve the speed of waves
{
real eigen[2];
eival[1] = L_star[1];
eival[2] = L_star[1];
eival[3] = L_star[1];
# ifdef CHECK_NEGATIVE_IN_FLUID
if ( CUFLU_CheckNegative(L[4]) )
printf( "ERROR : negative pressure (%14.7e) at file <%s>, line <%d>, function <%s>\n",
L[4], __FILE__, __LINE__, __FUNCTION__ );
if ( CUFLU_CheckNegative(L[0]) )
printf( "ERROR : negative density (%14.7e) at file <%s>, line <%d>, function <%s>\n",
L[0], __FILE__, __LINE__, __FUNCTION__ );
if ( CUFLU_CheckNegative(L_star[4]) )
printf( "ERROR : negative pressure (%14.7e) at file <%s>, line <%d>, function <%s>\n",
L_star[4], __FILE__, __LINE__, __FUNCTION__ );
if ( CUFLU_CheckNegative(L_star[0]) )
printf( "ERROR : negative density(%14.7e) at file <%s>, line <%d>, function <%s>\n",
L_star[0], __FILE__, __LINE__, __FUNCTION__ );
if ( CUFLU_CheckNegative(R[4]) )
printf( "ERROR : negative pressure (%14.7e) at file <%s>, line <%d>, function <%s>\n",
R[4], __FILE__, __LINE__, __FUNCTION__ );
if ( CUFLU_CheckNegative(R[0]) )
printf( "ERROR : negative density (%14.7e) at file <%s>, line <%d>, function <%s>\n",
R[0], __FILE__, __LINE__, __FUNCTION__ );
if ( CUFLU_CheckNegative(R_star[4]) )
printf( "ERROR : negative pressure (%14.7e) at file <%s>, line <%d>, function <%s>\n",
R_star[4], __FILE__, __LINE__, __FUNCTION__ );
if ( CUFLU_CheckNegative(R_star[0]) )
printf( "ERROR : negative density(%14.7e) at file <%s>, line <%d>, function <%s>\n",
R_star[0], __FILE__, __LINE__, __FUNCTION__ );
# endif
eigen[0] = L [1] - SQRT( Gamma*L [4]/L [0] );
eigen[1] = L_star[1] - SQRT( Gamma*L_star[4]/L_star[0] );
if ( eigen[0] <= eigen[1] ) eival[0] = eigen[0];
else eival[0] = eigen[1];
eigen[0] = R [1] + SQRT( Gamma*R [4]/R [0] );
eigen[1] = R_star[1] + SQRT( Gamma*R_star[4]/R_star[0] );
if ( eigen[0] <= eigen[1] ) eival[4] = eigen[1];
else eival[4] = eigen[0];
}
} // FUNCTION : Solve_StarRoe
#endif // #if ( RSOLVER == ROE )
#endif // #if ( defined GPU && MODEL == HYDRO && FLU_SCHEME == WAF )
| 885a831a57accb52f10114f97f4fd85dd018887c.cu | #include "Macro.h"
#include "CUFLU.h"
#if ( defined GPU && MODEL == HYDRO && FLU_SCHEME == WAF )
// check before compiling anything else
#if ( NCOMP_PASSIVE != 0 )
# error : WAF scheme does NOT support passive scalars !!
#endif
#define to1D1(z,y,x) ( __umul24(z, FLU_NXT*FLU_NXT) + __umul24(y, FLU_NXT) + x )
#define to1D2(z,y,x) ( __umul24(z-FLU_GHOST_SIZE, PS2*PS2) + __umul24(y-FLU_GHOST_SIZE, PS2) + x-FLU_GHOST_SIZE )
#include "CUFLU_Shared_FluUtility.cu"
#if( RSOLVER == EXACT )
#include "CUFLU_Shared_RiemannSolver_Exact.cu"
#elif ( RSOLVER == ROE )
static __device__ void Solve_StarRoe( real eival[5], real L_star[5], real R_star[5], const real L[5],
const real R[5], const real Gamma, const real MinPres );
#endif
#ifdef WAF_DISSIPATE
static __device__ void Dis_Stru( real flux[5], const real L[5], const real R[5], const real L_star[5],
const real R_star[5], const real limit[5], const real theta[5],
const real Gamma );
#else
static __device__ void Undis_Stru( real flux[5], const real L[5], const real R[5], const real L_star[5],
const real R_star[5], const real limit[5], const real theta[5],
const real Gamma );
#endif
static __device__ void CUFLU_Advance( real g_Fluid_In [][5][ FLU_NXT*FLU_NXT*FLU_NXT ],
real g_Fluid_Out[][5][ PS2*PS2*PS2 ],
real g_Flux[][9][5][ PS2*PS2 ],
const real dt, const real _dh, const real Gamma, const bool StoreFlux,
const int j_gap, const int k_gap, real s_u[][FLU_NXT][5],
real s_flux[][PS2+1][5], real s_Lstar[][PS2+3][5], real s_Rstar[][PS2+3][5],
const bool FinalOut, const int XYZ, const WAF_Limiter_t WAF_Limiter,
const real MinDens, const real MinPres );
static __device__ void Solve_Flux( real flux[5], const real lL_star[5], const real lR_star[5],
const real cL_star[5], const real cR_star[5], const real rL_star[5],
const real rR_star[5], const real eival[5] ,const real L_2[5],
const real L_1[5], const real R_1[5],const real R_2[5],
const real Gamma, const real ratio, const WAF_Limiter_t WAF_Limiter );
static __device__ void set_flux( real flux[5], const real val[5], const real Gamma );
static __device__ real set_limit( const real r, const real c, const WAF_Limiter_t WAF_Limiter );
//-------------------------------------------------------------------------------------------------------
// Function : CUFLU_FluidSolver_WAF
// Description : GPU fluid solver based on the Weighted-Average-Flux (WAF) scheme
//
// Note : a. Prefix "g" for pointers pointing to the "Global" memory space
// Prefix "s" for pointers pointing to the "Shared" memory space
// b. The three-dimensional evolution is achieved by using the dimensional-split method
//
// Parameter : g_Fluid_In : Global memory array to store the input fluid variables
// g_Fluid_Out : Global memory array to store the output fluid variables
// g_Flux : Global memory array to store the output flux
// g_Corner : Global memory array storing the physical corner coordinates of each patch group (USELESS CURRENTLY)
// g_Pot_USG : Global memory array storing the input potential for UNSPLIT_GRAVITY (NOT SUPPORTED in RTVD)
// dt : Time interval to advance solution
// _dh : 1 / grid size
// Gamma : Ratio of specific heats
// StoreFlux : true --> store the coarse-fine fluxes
// XYZ : true : x->y->z ( forward sweep)
// false : z->y->x (backward sweep)
// WAF_Limiter : Selection of the limit function
// 0 : superbee
// 1 : van-Leer
// 2 : van-Albada
// 3 : minbee
// MinDens/Pres : Minimum allowed density and pressure
//-------------------------------------------------------------------------------------------------------
__global__ void CUFLU_FluidSolver_WAF( real g_Fluid_In [] [NCOMP_TOTAL][ FLU_NXT*FLU_NXT*FLU_NXT ],
real g_Fluid_Out[] [NCOMP_TOTAL][ PS2*PS2*PS2 ],
real g_Flux [][9][NCOMP_TOTAL][ PS2*PS2 ],
const double g_Corner[][3],
const real g_Pot_USG[][ USG_NXT_F*USG_NXT_F*USG_NXT_F ],
const real dt, const real _dh, const real Gamma, const bool StoreFlux,
const bool XYZ, const WAF_Limiter_t WAF_Limiter,
const real MinDens, const real MinPres )
{
__shared__ real s_u [FLU_BLOCK_SIZE_Y][FLU_NXT][5];
__shared__ real s_flux [FLU_BLOCK_SIZE_Y][PS2+1][5];
__shared__ real s_L_st [FLU_BLOCK_SIZE_Y][PS2+3][5];
__shared__ real s_R_st [FLU_BLOCK_SIZE_Y][PS2+3][5];
if ( XYZ )
{
CUFLU_Advance( g_Fluid_In, g_Fluid_Out, g_Flux, dt, _dh, Gamma, StoreFlux, 0, 0,
s_u, s_flux, s_L_st, s_R_st, false, 0, WAF_Limiter, MinDens, MinPres );
CUFLU_Advance( g_Fluid_In, g_Fluid_Out, g_Flux, dt, _dh, Gamma, StoreFlux, FLU_GHOST_SIZE, 0,
s_u, s_flux, s_L_st, s_R_st, false, 3, WAF_Limiter, MinDens, MinPres );
CUFLU_Advance( g_Fluid_In, g_Fluid_Out, g_Flux, dt, _dh, Gamma, StoreFlux, FLU_GHOST_SIZE, FLU_GHOST_SIZE,
s_u, s_flux, s_L_st, s_R_st, true, 6, WAF_Limiter, MinDens, MinPres );
}
else
{
CUFLU_Advance( g_Fluid_In, g_Fluid_Out, g_Flux, dt, _dh, Gamma, StoreFlux, 0, 0,
s_u, s_flux, s_L_st, s_R_st, false, 6, WAF_Limiter, MinDens, MinPres );
CUFLU_Advance( g_Fluid_In, g_Fluid_Out, g_Flux, dt, _dh, Gamma, StoreFlux, 0, FLU_GHOST_SIZE,
s_u, s_flux, s_L_st, s_R_st, false, 3, WAF_Limiter, MinDens, MinPres );
CUFLU_Advance( g_Fluid_In, g_Fluid_Out, g_Flux, dt, _dh, Gamma, StoreFlux, FLU_GHOST_SIZE, FLU_GHOST_SIZE,
s_u, s_flux, s_L_st, s_R_st, true, 0, WAF_Limiter, MinDens, MinPres );
}
} // FUNCTION : CUFLU_FluidSolver_WAF
//-------------------------------------------------------------------------------------------------------
// Function : CUFLU_Advance
// Description : GPU device function, which performs a one-dimensional sweep based on the WAF scheme
//
// Note : a. Prefix "g" for pointers pointing to the "Global" memory space
// Prefix "s" for pointers pointing to the "Shared" memory space
// b. The direction of the one dimensional sweep is determined by the input parameter "XYZ"
//
// Parameter : g_Fluid_In : Global memory array to store the input fluid variables
// g_Fluid_Out : Global memory array to store the output fluid variables
// g_Flux : Global memory array to store the output flux
// dt : Time interval to advance solution
// _dh : 1 / grid size
// Gamma : ratio of specific heats
// StoreFlux : true --> store the coarse-fine fluxes
// j_gap : Number of useless grids in each side in the j direction (j may not be equal to y)
// k_gap : Number of useless grids in each side in the k direction (k mya not be equal to z)
// s_u : Shared memory array storing the fluid variables used to compute the intercell flux
// s_flux : Shared memory array storing the final flux used to update the fluid variables
// s_Lstar : Shared memory array storing the left region in the solution of Riemann problem
// s_Rstar : Shared memory array storing the right region in the solution of Riemann problem
// FinalOut : true : output data
// false : don't output data
// XYZ : 0 : Update the solution in the x direction
// 3 : Update the solution in the y direction
// 6 : Update the solution in the z direction
// --> This parameter is also used to determine the place to store the output flux
// WAF_Limiter : Selection of the limit function
// 0 : superbee
// 1 : van-Leer
// 2 : van-Albada
// 3 : minbee
// MinDens/Pres : Minimum allowed density and pressure
//-------------------------------------------------------------------------------------------------------
__device__ void CUFLU_Advance( real g_Fluid_In [][5][ FLU_NXT*FLU_NXT*FLU_NXT ],
real g_Fluid_Out[][5][ PS2*PS2*PS2 ],
real g_Flux[][9][5][ PS2*PS2 ],
const real dt, const real _dh, const real Gamma, const bool StoreFlux,
const int j_gap, const int k_gap, real s_u[][FLU_NXT][5], real s_flux[][PS2+1][5],
real s_Lstar[][PS2+3][5], real s_Rstar[][PS2+3][5], const bool FinalOut,
const int XYZ, const WAF_Limiter_t WAF_Limiter, const real MinDens, const real MinPres )
{
const uint bx = blockIdx.x;
const uint tx = threadIdx.x;
const uint ty = threadIdx.y;
const uint dj = blockDim.y;
const uint size_j = FLU_NXT - (j_gap<<1);
const uint size_k = FLU_NXT - (k_gap<<1);
const uint NColumn = __umul24( size_j, size_k );
const uint i = tx; // (i,j) the element in shared memory under evaluation
uint j = j_gap + ty%size_j;
uint k = k_gap + ty/size_j;
uint Column0 = 0; // the total number of columns that have been updated
const uint j_end = FLU_NXT - j_gap;
const uint k_end = FLU_NXT - k_gap;
const real ratio = dt*_dh; // dt over dx
const real Gamma_m1 = Gamma - (real)1.0;
const real _Gamma_m1 = (real)1.0 / Gamma_m1;
bool RuleOut = false;
real Fluid[5], eval[5];
int ID1, ID2, ID3, ii, delta_k, Comp[5];
FluVar ConVar;
// set the order of component for update in different directions
switch ( XYZ )
{
case 0: Comp[0] = 0; Comp[1] = 1; Comp[2] = 2; Comp[3] = 3; Comp[4] = 4; break;
case 3: Comp[0] = 0; Comp[1] = 2; Comp[2] = 1; Comp[3] = 3; Comp[4] = 4; break;
case 6: Comp[0] = 0; Comp[1] = 3; Comp[2] = 2; Comp[3] = 1; Comp[4] = 4; break;
}
// start the WAF scheme
do
{
// determine the array indices for updating in different directions
switch ( XYZ )
{
case 0: ID1 = to1D1( k, j, i ); break;
case 3: ID1 = to1D1( k, i, j ); break;
case 6: ID1 = to1D1( i, k, j ); break;
}
// load data into per-thread registers
for (int v=0; v<5; v++) Fluid[v] = g_Fluid_In[bx][ Comp[v] ][ID1];
// load the primitive variables into shared memory
s_u[ty][i][0] = Fluid[0];
s_u[ty][i][1] = Fluid[1] / Fluid[0];
s_u[ty][i][2] = Fluid[2] / Fluid[0];
s_u[ty][i][3] = Fluid[3] / Fluid[0];
s_u[ty][i][4] = Gamma_m1*( Fluid[4] - (real)0.5*( Fluid[1]*Fluid[1] + Fluid[2]*Fluid[2] +
Fluid[3]*Fluid[3] ) / Fluid[0] );
s_u[ty][i][4] = CUFLU_CheckMinPres( s_u[ty][i][4], MinPres );
__syncthreads();
// solve the Riemann problem
if ( i >= 1 && i <= FLU_GHOST_SIZE + PS2 + 1 )
{
ii = i - 1;
# if ( RSOLVER == EXACT )
FluVar5 eival_st, L_star_st, R_star_st;
FluVar L_st, R_st;
L_st.Rho = s_u[ty][ii][0];
L_st.Px = s_u[ty][ii][1];
L_st.Py = s_u[ty][ii][2];
L_st.Pz = s_u[ty][ii][3];
L_st.Egy = s_u[ty][ii][4];
R_st.Rho = s_u[ty][ i][0];
R_st.Px = s_u[ty][ i][1];
R_st.Py = s_u[ty][ i][2];
R_st.Pz = s_u[ty][ i][3];
R_st.Egy = s_u[ty][ i][4];
CUFLU_RiemannSolver_Exact( 0, &eival_st, &L_star_st, &R_star_st, L_st, R_st, Gamma );
eval[0] = eival_st.Rho;
eval[1] = eival_st.Px;
eval[2] = eival_st.Py;
eval[3] = eival_st.Pz;
eval[4] = eival_st.Egy;
s_Lstar[ty][ii][0] = L_star_st.Rho;
s_Lstar[ty][ii][1] = L_star_st.Px;
s_Lstar[ty][ii][2] = L_star_st.Py;
s_Lstar[ty][ii][3] = L_star_st.Pz;
s_Lstar[ty][ii][4] = L_star_st.Egy;
s_Rstar[ty][ii][0] = R_star_st.Rho;
s_Rstar[ty][ii][1] = R_star_st.Px;
s_Rstar[ty][ii][2] = R_star_st.Py;
s_Rstar[ty][ii][3] = R_star_st.Pz;
s_Rstar[ty][ii][4] = R_star_st.Egy;
# elif ( RSOLVER == ROE )
Solve_StarRoe( eval, s_Lstar[ty][ii], s_Rstar[ty][ii], s_u[ty][ii], s_u[ty][i], Gamma, MinPres );
# else
# error : ERROR : unsupported Riemann solver (EXACT/ROE) !!
# endif
}
__syncthreads();
// solve the intercell flux
if ( i >= FLU_GHOST_SIZE && i <= FLU_GHOST_SIZE+PS2 )
{
ii = i - FLU_GHOST_SIZE;
int ii_p1 = ii + 1;
Solve_Flux( s_flux[ty][ii], s_Lstar[ty][ii], s_Rstar[ty][ii],
s_Lstar[ty][ii_p1], s_Rstar[ty][ii_p1], s_Lstar[ty][i], s_Rstar[ty][i], eval,
s_u[ty][i-2], s_u[ty][i-1], s_u[ty][i], s_u[ty][i+1],
Gamma, ratio, WAF_Limiter );
}
__syncthreads();
// update the conservative variables
if ( i >= FLU_GHOST_SIZE && i < FLU_GHOST_SIZE+PS2 && RuleOut == false )
{
ii = i - FLU_GHOST_SIZE;
for (int v=0; v<5; v++) Fluid[v] += ratio*( s_flux[ty][ii][v] - s_flux[ty][ii+1][v] );
// enforce positive density and pressure
ConVar.Rho = Fluid[0];
ConVar.Px = Fluid[1];
ConVar.Py = Fluid[2];
ConVar.Pz = Fluid[3];
ConVar.Egy = Fluid[4];
ConVar.Rho = FMAX( ConVar.Rho, MinDens );
Fluid[0] = ConVar.Rho;
Fluid[4] = CUFLU_CheckMinPresInEngy( ConVar, Gamma_m1, _Gamma_m1, MinPres );
// check negative density and energy
# ifdef CHECK_NEGATIVE_IN_FLUID
if ( CUFLU_CheckNegative(Fluid[0]) )
printf( "ERROR : negative density (%14.7e) at file <%s>, line <%d>, function <%s>\n",
Fluid[0], __FILE__, __LINE__, __FUNCTION__ );
if ( CUFLU_CheckNegative(Fluid[4]) )
printf( "ERROR : negative energy (%14.7e) at file <%s>, line <%d>, function <%s>\n",
Fluid[4], __FILE__, __LINE__, __FUNCTION__ );
# endif
// store the updated data back to the global memory
if ( FinalOut )
{
switch ( XYZ )
{
case 0: ID2 = to1D2( k, j, i ); break;
case 3: ID2 = to1D2( k, i, j ); break;
case 6: ID2 = to1D2( i, k, j ); break;
}
for (int v=0; v<5; v++) g_Fluid_Out[bx][ Comp[v] ][ID2] = Fluid[v];
}
else
for (int v=0; v<5; v++) g_Fluid_In [bx][ Comp[v] ][ID1] = Fluid[v];
}
// paste the s_flux into g_Flux
if ( StoreFlux )
if ( k >= FLU_GHOST_SIZE && k < FLU_NXT-FLU_GHOST_SIZE )
if ( j >= FLU_GHOST_SIZE && j < FLU_NXT-FLU_GHOST_SIZE )
if ( i == 0 )
{
ID3 = __umul24( k-FLU_GHOST_SIZE, PS2 ) + (j-FLU_GHOST_SIZE);
for (int v=0; v<5; v++)
{
g_Flux[bx][XYZ+0][v][ID3] = s_flux[ty][ 0 ][ Comp[v] ];
g_Flux[bx][XYZ+1][v][ID3] = s_flux[ty][ PS2/2 ][ Comp[v] ];
g_Flux[bx][XYZ+2][v][ID3] = s_flux[ty][ PS2 ][ Comp[v] ];
}
}
// reset the target array indices
j += dj;
if ( j >= j_end )
{
delta_k = ( j - j_end )/size_j + 1;
k += delta_k;
j -= __umul24( size_j, delta_k );
}
Column0 += dj;
// if the index k exceeds the maximum allowed value --> reset (j,k) to harmless values and wait for other
// threads (all threads must exist the while loop "at the same time", otherwise __syncthreads will fail !!)
if ( k >= k_end )
{
j = 0;
k = 0;
RuleOut = true;
}
__syncthreads();
}
while ( Column0 < NColumn );
} // CUFLU_Advance
//-------------------------------------------------------------------------------------------------------
// Function : Solve_Flux
// Description : Solve the intercell flux
//
// Parameter : flux : Intercell flux
// lL_star : Primitive variables in the left star region of the left region
// lR_star : Primitive variables in the right star region of the left region
// cL_star : Primitive variables in the left star region of the centor region
// cR_star : Primitive variables in the right star region of the centor region
// rL_star : Primitive variables in the left star region of the right region
// rR_star : Primitive variables in the right star region of the right region
// eival : Eigenvalue
// L_2 : Primitive variables in the region left to the left region
// L_1 : Primitive variables in the left region
// R_1 : Primitive variables in the right region
// R_2 : Primitive variables in the region right to the right region
// ratio : dt over dx
// Gamma : Ratio of specific heats
// WAF_Limiter : Selection of te limit function
// 0 : superbee
// 1 : van-Leer
// 2 : van-Albada
// 3 : minbee
//-------------------------------------------------------------------------------------------------------
__device__ void Solve_Flux( real flux[5], const real lL_star[5], const real lR_star[5],
const real cL_star[5], const real cR_star[5], const real rL_star[5],
const real rR_star[5], const real eival[5], const real L_2[5], const real L_1[5],
const real R_1[5],const real R_2[5], const real Gamma, const real ratio,
const WAF_Limiter_t WAF_Limiter )
{
real theta[5]; // the sign of speed of waves
real limit[5]; // limit functions
real mean [3][5];
real delta[3][5];
delta[0][0] = lL_star[0] - L_2[0];
delta[0][1] = lR_star[0] - lL_star[0];
delta[0][2] = lR_star[2] - lL_star[2];
delta[0][3] = lR_star[3] - lL_star[3];
delta[0][4] = L_1[0] - lR_star[0];
mean[0][0] = (real)0.5*( FABS( lL_star[0] ) + FABS( L_2[0] ) );
mean[0][1] = (real)0.5*( FABS( lR_star[0] ) + FABS( lL_star[0] ) );
mean[0][2] = (real)0.5*( FABS( lR_star[2] ) + FABS( lL_star[2] ) );
mean[0][3] = (real)0.5*( FABS( lR_star[3] ) + FABS( lL_star[3] ) );
mean[0][4] = (real)0.5*( FABS( L_1[0] ) + FABS( lR_star[0] ) );
delta[1][0] = cL_star[0] - L_1[0];
delta[1][1] = cR_star[0] - cL_star[0];
delta[1][2] = cR_star[2] - cL_star[2];
delta[1][3] = cR_star[3] - cL_star[3];
delta[1][4] = R_1[0] - cR_star[0];
mean[1][0] = (real)0.5*( FABS( cL_star[0] ) + FABS( L_1[0] ) );
mean[1][1] = (real)0.5*( FABS( cR_star[0] ) + FABS( cL_star[0] ) );
mean[1][2] = (real)0.5*( FABS( cR_star[2] ) + FABS( cL_star[2] ) );
mean[1][3] = (real)0.5*( FABS( cR_star[3] ) + FABS( cL_star[3] ) );
mean[1][4] = (real)0.5*( FABS( R_1[0] ) + FABS( cR_star[0] ) );
delta[2][0] = rL_star[0] - R_1[0];
delta[2][1] = rR_star[0] - rL_star[0];
delta[2][2] = rR_star[2] - rL_star[2];
delta[2][3] = rR_star[3] - rL_star[3];
delta[2][4] = R_2[0] - rR_star[0];
mean[2][0] = (real)0.5*( FABS( rL_star[0] ) + FABS( R_1[0] ) );
mean[2][1] = (real)0.5*( FABS( rR_star[0] ) + FABS( rL_star[0] ) );
mean[2][2] = (real)0.5*( FABS( rR_star[2] ) + FABS( rL_star[2] ) );
mean[2][3] = (real)0.5*( FABS( rR_star[3] ) + FABS( rL_star[3] ) );
mean[2][4] = (real)0.5*( FABS( R_2[0] ) + FABS( rR_star[0] ) );
// set limit function
for (int i=0; i<5; i++)
{
if ( FABS( eival[i] ) < MAX_ERROR ) limit[i] = (real)1.0;
else
{
if ( eival[i] > (real)0.0 )
{
if ( mean[0][i] == (real)0.0 || mean[1][i] == (real)0.0 ) limit[i] = (real)1.0;
else
{
if ( ( delta[0][i]*delta[1][i] ) / ( mean[0][i]*mean[1][i] ) < MAX_ERROR*MAX_ERROR )
limit[i] = (real)1.0;
else
{
real r = delta[0][i] / delta[1][i];
limit[i] = set_limit( r, eival[i] * ratio, WAF_Limiter );
}
}
}
else
{
if ( mean[2][i] == (real)0.0 || mean[1][i] == (real)0.0 ) limit[i] = (real)1.0;
else
{
if ( ( delta[2][i]*delta[1][i] ) / ( mean[2][i]*mean[1][i] ) < MAX_ERROR*MAX_ERROR )
limit[i] = (real)1.0;
else
{
real r = delta[2][i] / delta[1][i];
limit[i] = set_limit( r, eival[i] * ratio, WAF_Limiter );
}
}
}
}
} // for (int i=0; i<5; i++)
// solve the sign of waves
for (int i=0; i<5; i++)
{
if ( FABS( eival[i] ) < MAX_ERROR ) theta[i] = (real)0.0;
else if ( eival[i] > (real)0.0 ) theta[i] = (real)1.0;
else theta[i] = -(real)1.0;
}
// solve the intercell flux
# ifdef WAF_DISSIPATE
Dis_Stru ( flux, L_1, R_1, cL_star, cR_star, limit, theta, Gamma );
# else
Undis_Stru( flux, L_1, R_1, cL_star, cR_star, limit, theta, Gamma );
# endif
} // FUNCTION : Solve_Flux
//-----------------------------------------------------------------------------------------------------
// Function : set_limit
// Description : set the limit function
//
// parameter : r : flow variable
// c : Courant number
// WAF_Limiter : Selection of te limit function
// 0 : superbee
// 1 : van-Leer
// 2 : van-Albada
// 3 : minbee
//-------------------------------------------------------------------------------------------------------
__device__ real set_limit( const real r, const real c, const WAF_Limiter_t WAF_Limiter )
{
real limit;
// choose the limit function
switch ( WAF_Limiter )
{
case WAF_SUPERBEE :
{
if ( r > (real)0.0 && r <= (real)0.5 ) limit = (real)1.0 - (real)2.0*r*( (real)1.0 - FABS(c) );
else if ( r <= (real)1.0 ) limit = FABS(c);
else if ( r <= (real)2.0 ) limit = (real)1.0 - r*( (real)1.0 - FABS(c) );
else limit = (real)2.0*FABS(c) - (real)1.0;
break;
}
case WAF_VANLEER :
{
limit = (real)1.0 - (real)2.0*r*( (real)1.0 - FABS(c) ) / ( (real)1.0 + r );
break;
}
case WAF_ALBADA :
{
limit = (real)1.0 - r*( (real)1.0 + r )*( (real)1.0 - FABS(c) ) / ( (real)1.0 + r*r );
break;
}
case WAF_MINBEE :
{
if ( r > (real)0.0 && r <= (real)1.0 ) limit = (real)1.0 - r*( (real)1.0 - FABS(c) );
else limit = FABS(c);
break;
}
default:
break;
}
return limit;
} // FUNCTION : set_limit
#ifdef WAF_DISSIPATE
//------------------------------------------------------------------------------------------------------
// Function : Dis_Stru
// Description : Set the intercell flux by dissipative wave structure
//
// Parameter : flux : Intercel flux
// L : Primitive variables in the left region
// R : Primitive variables in the right region
// L_star : Primitive variables in the left star region
// R_star : Primitive variables in the right star region
// limit : Limit functions
// theta : Sign of wave speed
// Gamma : Ratio of specific heats
//-------------------------------------------------------------------------------------------------------
__device__ void Dis_Stru( real flux[5], const real L[5], const real R[5], const real L_star[5],
const real R_star[5], const real limit[5], const real theta[5], const real Gamma )
{
real iflux[6][5];
real lim[5];
for (int i=0; i<5; i++) lim[i] = limit[i];
// flux function evaluated at the given stat
set_flux( iflux[0], L, Gamma );
set_flux( iflux[1], L_star, Gamma );
set_flux( iflux[4], R_star, Gamma );
set_flux( iflux[5], R, Gamma );
// determine the ghost stats
real stat[2][5];
if ( limit[1] <= limit[2] )
{
if ( limit[3] <= limit[1] )
{
stat[0][0] = L_star[0];
stat[0][1] = L_star[1];
stat[0][2] = L_star[2];
stat[0][3] = R_star[3];
stat[0][4] = L_star[4];
stat[1][0] = R_star[0];
stat[1][1] = L_star[1];
stat[1][2] = L_star[2];
stat[1][3] = R_star[3];
stat[1][4] = L_star[4];
}
else if ( limit[3] <= limit[2] )
{
stat[0][0] = R_star[0];
stat[0][1] = L_star[1];
stat[0][2] = L_star[2];
stat[0][3] = L_star[3];
stat[0][4] = L_star[4];
stat[1][0] = R_star[0];
stat[1][1] = L_star[1];
stat[1][2] = L_star[2];
stat[1][3] = R_star[3];
stat[1][4] = L_star[4];
}
else
{
stat[0][0] = R_star[0];
stat[0][1] = L_star[1];
stat[0][2] = L_star[2];
stat[0][3] = L_star[3];
stat[0][4] = L_star[4];
stat[1][0] = R_star[0];
stat[1][1] = L_star[1];
stat[1][2] = R_star[2];
stat[1][3] = L_star[3];
stat[1][4] = L_star[4];
}
} // if ( limit[1] <= limit[2] )
else // limit[1] > limit[2]
{
if ( limit[3] <= limit[2] )
{
stat[0][0] = L_star[0];
stat[0][1] = L_star[1];
stat[0][2] = L_star[2];
stat[0][3] = R_star[3];
stat[0][4] = L_star[4];
stat[1][0] = L_star[0];
stat[1][1] = L_star[1];
stat[1][2] = R_star[2];
stat[1][3] = R_star[3];
stat[1][4] = L_star[4];
}
else if ( limit[3] <= limit[1] )
{
stat[0][0] = L_star[0];
stat[0][1] = L_star[1];
stat[0][2] = R_star[2];
stat[0][3] = L_star[3];
stat[0][4] = L_star[4];
stat[1][0] = L_star[0];
stat[1][1] = L_star[1];
stat[1][2] = R_star[2];
stat[1][3] = R_star[3];
stat[1][4] = L_star[4];
}
else
{
stat[0][0] = L_star[0];
stat[0][1] = L_star[1];
stat[0][2] = R_star[2];
stat[0][3] = L_star[3];
stat[0][4] = L_star[4];
stat[1][0] = R_star[0];
stat[1][1] = L_star[1];
stat[1][2] = R_star[2];
stat[1][3] = L_star[3];
stat[1][4] = L_star[4];
}
} // if ( limit[1] <= limit[2] ) ... else ...
// set flux in ghost region
set_flux( iflux[2], stat[0], Gamma );
set_flux( iflux[3], stat[1], Gamma );
// reoder the limit
for (int i=1; i<3; i++)
{
if ( lim[i] > lim[i+1] )
{
real tmp = lim[i+1];
lim[i+1] = lim[i ];
lim[i ] = tmp;
}
}
if ( lim[1] > lim[2] )
{
real tmp = lim[2];
lim[2] = lim[1];
lim[1] = tmp;
}
// set the intercell flux
for (int i=0; i<5; i++)
{
flux[i] = (real)0.5*( iflux[0][i] + iflux[5][i] )
- (real)0.5*( theta[0]*lim[0]*( iflux[1][i] - iflux[0][i] ) +
theta[1]*lim[1]*( iflux[2][i] - iflux[1][i] ) +
theta[2]*lim[2]*( iflux[3][i] - iflux[2][i] ) +
theta[3]*lim[3]*( iflux[4][i] - iflux[3][i] ) +
theta[4]*lim[4]*( iflux[5][i] - iflux[4][i] ) );
}
} // FUNCTION : Dis_Stru
#endif // #ifdef WAF_DISSIPATE
#ifndef WAF_DISSIPATE
//------------------------------------------------------------------------------------------------------
// Function : Undis_Stru
// Description : Set the intercell flux by non-dissipative wave structure
//
// Parameter : flux : Intercel flux
// L : Primitive variables in the left region
// R : Primitive variables in the right region
// L_star : Primitive variables in the left star region
// R_star : Primitive variables in the right star region
// limit : Limit functions
// theta : Sign of wave speed
// Gamma : Ratio of specific heats
//-------------------------------------------------------------------------------------------------------
__device__ void Undis_Stru( real flux[5], const real L[5], const real R[5], const real L_star[5],
const real R_star[5], const real limit[5], const real theta[5], const real Gamma )
{
// flux function evaluated at the given stat
real iflux[4][5];
set_flux( iflux[0], L, Gamma );
set_flux( iflux[1], L_star, Gamma );
set_flux( iflux[2], R_star, Gamma );
set_flux( iflux[3], R, Gamma );
// set the intercell flux
flux[0] = (real)0.5*( iflux[0][0] + iflux[3][0] )
- (real)0.5*( theta[0]*limit[0]*( iflux[1][0] - iflux[0][0] ) +
theta[1]*limit[1]*( iflux[2][0] - iflux[1][0] ) +
theta[4]*limit[4]*( iflux[3][0] - iflux[2][0] ) );
flux[1] = (real)0.5*( iflux[0][1] + iflux[3][1] )
- (real)0.5*( theta[0]*limit[0]*( iflux[1][1] - iflux[0][1] ) +
theta[1]*limit[1]*( iflux[2][1] - iflux[1][1] ) +
theta[4]*limit[4]*( iflux[3][1] - iflux[2][1] ) );
flux[4] = (real)0.5*( iflux[0][4] + iflux[3][4] )
- (real)0.5*( theta[0]*limit[0]*( iflux[1][4] - iflux[0][4] ) +
theta[1]*limit[1]*( iflux[2][4] - iflux[1][4] ) +
theta[4]*limit[4]*( iflux[3][4] - iflux[2][4] ) );
flux[2] = (real)0.5*( iflux[0][2] + iflux[3][2] )
- (real)0.5*( theta[0]*limit[0]*( iflux[1][2] - iflux[0][2] ) +
theta[2]*limit[2]*( iflux[2][2] - iflux[1][2] ) +
theta[4]*limit[4]*( iflux[3][2] - iflux[2][2] ) );
flux[3] = (real)0.5*( iflux[0][3] + iflux[3][3] )
- (real)0.5*( theta[0]*limit[0]*( iflux[1][3] - iflux[0][3] ) +
theta[3]*limit[3]*( iflux[2][3] - iflux[1][3] ) +
theta[4]*limit[4]*( iflux[3][3] - iflux[2][3] ) );
} // FUNCTION : Undis_Stru
#endif // #ifndef WAF_DISSIPATE
//-------------------------------------------------------------------------------------------------------
// Function : set_flux
// Description : Set the flux function evaluated at the given stat
//
// Parameter : flux : Flux function
// val : Primitive variables
// Gamma : Ratio of specific heats
//-------------------------------------------------------------------------------------------------------
__device__ void set_flux( real flux[5], const real val[5], const real Gamma )
{
const real Gamma_m1 = Gamma -(real)1.0;
// set flux
flux[0] = val[0]*val[1];
flux[1] = val[0]*val[1]*val[1] + val[4];
flux[2] = val[0]*val[1]*val[2];
flux[3] = val[0]*val[1]*val[3];
flux[4] = val[1]*( (real)0.5*val[0]*( val[1]*val[1] + val[2]*val[2] + val[3]*val[3] )
+ val[4]/Gamma_m1 + val[4] );
} // FUNCTION : set_flux
#if ( RSOLVER == ROE )
//-------------------------------------------------------------------------------------------------------
// Function : Solve_StarRoe
// Description : Solve the star region and speed of waves by Roe's method
//
// Parameter : eival : Speed of waves
// L_star : Primitive variables in the left star region
// R_star : Primitive variables in the right star region
// L : Primitive variables in the left rrgion
// R : Primitive variables in the right rrgion
// Gamma : Ratio of specific heats
// MinPres : Minimum allowed pressure
//-------------------------------------------------------------------------------------------------------
__device__ void Solve_StarRoe( real eival[5], real L_star[5], real R_star[5], const real L[5], const real R[5],
const real Gamma, const real MinPres )
{
const real Gamma_m1 = Gamma - (real)1.0; // for evaluating pressure and sound speed
real u_bar, v_bar, w_bar, h_bar, a_bar, a_bar_inv; // Roe's average of vx, vy, vz, enthapy, sound speed, and
// one over a_bar
real coef[5]; // Roe's coefficients
real TempPres, TempRho, _TempRho;
// solve Roe's average
{
# ifdef CHECK_NEGATIVE_IN_FLUID
if ( CUFLU_CheckNegative(L[0]) )
printf( "ERROR : negative density (%14.7e) at file <%s>, line <%d>, function <%s>\n",
L[0], __FILE__, __LINE__, __FUNCTION__ );
if ( CUFLU_CheckNegative(R[0]) )
printf( "ERROR : negative density (%14.7e) at file <%s>, line <%d>, function <%s>\n",
R[0], __FILE__, __LINE__, __FUNCTION__ );
# endif
real n_L_sq = SQRT( L[0] ); // rooting number of left density
real n_R_sq = SQRT( R[0] ); // rooting number of right density
real h_L = (real)0.5*( L[1]*L[1] + L[2]*L[2] + L[3]*L[3] ) + Gamma/Gamma_m1*L[4]/L[0]; // left enthapy
real h_R = (real)0.5*( R[1]*R[1] + R[2]*R[2] + R[3]*R[3] ) + Gamma/Gamma_m1*R[4]/R[0]; // right enthapy
real n_bar_inv = (real)1.0 / ( n_L_sq + n_R_sq ); // one over (n_L_sq plus n_L_sq)
u_bar = ( n_L_sq*L[1] + n_R_sq*R[1] )*n_bar_inv;
v_bar = ( n_L_sq*L[2] + n_R_sq*R[2] )*n_bar_inv;
w_bar = ( n_L_sq*L[3] + n_R_sq*R[3] )*n_bar_inv;
h_bar = ( n_L_sq*h_L + n_R_sq*h_R )*n_bar_inv;
real GammaP_Rho = Gamma_m1*( h_bar - (real)0.5*( u_bar*u_bar + v_bar*v_bar + w_bar*w_bar ) );
TempRho = (real)0.5*( L[0] + R[0] );
_TempRho = (real)1.0/TempRho;
TempPres = GammaP_Rho*TempRho/Gamma;
TempPres = CUFLU_CheckMinPres( TempPres, MinPres );
GammaP_Rho = Gamma*TempPres*_TempRho;
# ifdef CHECK_NEGATIVE_IN_FLUID
if ( CUFLU_CheckNegative(GammaP_Rho) )
printf( "ERROR : negative GammaP_Rho (%14.7e) at file <%s>, line <%d>, function <%s>\n",
GammaP_Rho, __FILE__, __LINE__, __FUNCTION__ );
# endif
a_bar = SQRT( GammaP_Rho );
a_bar_inv = (real)1.0 / a_bar;
}
// solve Roe's coefficients
{
// the difference of conservative variables
real du_1 = R[0] - L[0];
real du_2 = R[0]*R[1] - L[0]*L[1];
real du_3 = R[0]*R[2] - L[0]*L[2];
real du_4 = R[0]*R[3] - L[0]*L[3];
real du_5 = + (real)0.5*R[0]*( R[1]*R[1] + R[2]*R[2] + R[3]*R[3] ) + R[4]/Gamma_m1
- (real)0.5*L[0]*( L[1]*L[1] + L[2]*L[2] + L[3]*L[3] ) - L[4]/Gamma_m1;
coef[2] = du_3 - v_bar*du_1;
coef[3] = du_4 - w_bar*du_1;
coef[1] = Gamma_m1*a_bar_inv*a_bar_inv*( du_1*( h_bar - u_bar*u_bar ) + u_bar*du_2 - du_5
+ coef[2]*v_bar + coef[3]*w_bar );
coef[0] = (real)0.5*a_bar_inv*( du_1*( u_bar + a_bar ) - du_2 - a_bar*coef[1] );
coef[4] = du_1 - ( coef[0] + coef[1] );
}
// solve the star region
{
L_star[0] = L[0] + coef[0];
R_star[0] = R[0] - coef[4];
L_star[1] = (real)0.5*( ( L[0]*L[1] + coef[0]*( u_bar - a_bar ) ) / L_star[0]
+ ( R[0]*R[1] - coef[4]*( u_bar + a_bar ) ) / R_star[0] );
R_star[1] = L_star[1];
L_star[2] = L[2];
R_star[2] = R[2];
L_star[3] = L[3];
R_star[3] = R[3];
real E_L = (real)0.5*L[0]*( L[1]*L[1] + L[2]*L[2] + L[3]*L[3] );
real E_R = (real)0.5*R[0]*( R[1]*R[1] + R[2]*R[2] + R[3]*R[3] );
real e_L_star = (real)0.5*L_star[0]*( L_star[1]*L_star[1] + L_star[2]*L_star[2] + L_star[3]*L_star[3] );
real e_R_star = (real)0.5*R_star[0]*( R_star[1]*R_star[1] + R_star[2]*R_star[2] + R_star[3]*R_star[3] );
L_star[4] = (real)0.5*Gamma_m1*( E_L - e_L_star + L[4]/Gamma_m1 + coef[0]*( h_bar - u_bar*a_bar )
+ E_R - e_R_star + R[4]/Gamma_m1 - coef[4]*( h_bar + u_bar*a_bar ) );
L_star[4] = CUFLU_CheckMinPres( L_star[4], MinPres );
R_star[4] = L_star[4];
}
// solve the speed of waves
{
real eigen[2];
eival[1] = L_star[1];
eival[2] = L_star[1];
eival[3] = L_star[1];
# ifdef CHECK_NEGATIVE_IN_FLUID
if ( CUFLU_CheckNegative(L[4]) )
printf( "ERROR : negative pressure (%14.7e) at file <%s>, line <%d>, function <%s>\n",
L[4], __FILE__, __LINE__, __FUNCTION__ );
if ( CUFLU_CheckNegative(L[0]) )
printf( "ERROR : negative density (%14.7e) at file <%s>, line <%d>, function <%s>\n",
L[0], __FILE__, __LINE__, __FUNCTION__ );
if ( CUFLU_CheckNegative(L_star[4]) )
printf( "ERROR : negative pressure (%14.7e) at file <%s>, line <%d>, function <%s>\n",
L_star[4], __FILE__, __LINE__, __FUNCTION__ );
if ( CUFLU_CheckNegative(L_star[0]) )
printf( "ERROR : negative density(%14.7e) at file <%s>, line <%d>, function <%s>\n",
L_star[0], __FILE__, __LINE__, __FUNCTION__ );
if ( CUFLU_CheckNegative(R[4]) )
printf( "ERROR : negative pressure (%14.7e) at file <%s>, line <%d>, function <%s>\n",
R[4], __FILE__, __LINE__, __FUNCTION__ );
if ( CUFLU_CheckNegative(R[0]) )
printf( "ERROR : negative density (%14.7e) at file <%s>, line <%d>, function <%s>\n",
R[0], __FILE__, __LINE__, __FUNCTION__ );
if ( CUFLU_CheckNegative(R_star[4]) )
printf( "ERROR : negative pressure (%14.7e) at file <%s>, line <%d>, function <%s>\n",
R_star[4], __FILE__, __LINE__, __FUNCTION__ );
if ( CUFLU_CheckNegative(R_star[0]) )
printf( "ERROR : negative density(%14.7e) at file <%s>, line <%d>, function <%s>\n",
R_star[0], __FILE__, __LINE__, __FUNCTION__ );
# endif
eigen[0] = L [1] - SQRT( Gamma*L [4]/L [0] );
eigen[1] = L_star[1] - SQRT( Gamma*L_star[4]/L_star[0] );
if ( eigen[0] <= eigen[1] ) eival[0] = eigen[0];
else eival[0] = eigen[1];
eigen[0] = R [1] + SQRT( Gamma*R [4]/R [0] );
eigen[1] = R_star[1] + SQRT( Gamma*R_star[4]/R_star[0] );
if ( eigen[0] <= eigen[1] ) eival[4] = eigen[1];
else eival[4] = eigen[0];
}
} // FUNCTION : Solve_StarRoe
#endif // #if ( RSOLVER == ROE )
#endif // #if ( defined GPU && MODEL == HYDRO && FLU_SCHEME == WAF )
|
de4a7e370f043df802d4fadad95c9b7e84e5b376.hip | // !!! This is a file automatically generated by hipify!!!
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#if !defined CUDA_DISABLER
#include "column_filter.hpp"
namespace filter
{
template void linearColumn<float3, int3>(PtrStepSzb src, PtrStepSzb dst, const float* kernel, int ksize, int anchor, int brd_type, int cc, hipStream_t stream);
}
#endif /* CUDA_DISABLER */
| de4a7e370f043df802d4fadad95c9b7e84e5b376.cu | /*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#if !defined CUDA_DISABLER
#include "column_filter.hpp"
namespace filter
{
template void linearColumn<float3, int3>(PtrStepSzb src, PtrStepSzb dst, const float* kernel, int ksize, int anchor, int brd_type, int cc, cudaStream_t stream);
}
#endif /* CUDA_DISABLER */
|
5cde17e5cd8ded7c456a697d6ebcc8422c9eb015.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include "kernels.hpp"
#define CHECK_CUDA(func) { \
hipError_t err = (func); \
if (err != hipSuccess) { \
fprintf(stderr, "CUDA Error @ %s:%d - %s\n", __FILE__, __LINE__, hipGetErrorString(err)); \
abort(); \
} \
}
__global__ void init_kernel(double *A, double *B, int N) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < N) {
A[tid] = tid;
B[tid] = 2 * tid;
}
}
void initialize_device_arrays(double *dA, double *dB, int N) {
int threads_per_block = 128;
int blocks_per_grid = (N + threads_per_block - 1) / threads_per_block;
hipLaunchKernelGGL(( init_kernel), dim3(blocks_per_grid), dim3(threads_per_block), 0, 0, dA, dB, N);
CHECK_CUDA(hipDeviceSynchronize());
CHECK_CUDA(hipGetLastError());
}
__global__ void vecadd_kernel(double *A, double *B, double *C, int N) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < N) {
C[tid] = A[tid] + B[tid];
}
}
void gpu_vector_sum(double *dA, double *dB, double *dC, int start, int end) {
int N = end - start;
int threads_per_block = 128;
int blocks_per_grid = (N + threads_per_block - 1) / threads_per_block;
hipLaunchKernelGGL(( vecadd_kernel), dim3(blocks_per_grid), dim3(threads_per_block), 0, 0, dA + start, dB + start,
dC + start, N);
CHECK_CUDA(hipDeviceSynchronize());
CHECK_CUDA(hipGetLastError());
}
| 5cde17e5cd8ded7c456a697d6ebcc8422c9eb015.cu | #include <stdio.h>
#include "kernels.hpp"
#define CHECK_CUDA(func) { \
cudaError_t err = (func); \
if (err != cudaSuccess) { \
fprintf(stderr, "CUDA Error @ %s:%d - %s\n", __FILE__, __LINE__, cudaGetErrorString(err)); \
abort(); \
} \
}
__global__ void init_kernel(double *A, double *B, int N) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < N) {
A[tid] = tid;
B[tid] = 2 * tid;
}
}
void initialize_device_arrays(double *dA, double *dB, int N) {
int threads_per_block = 128;
int blocks_per_grid = (N + threads_per_block - 1) / threads_per_block;
init_kernel<<<blocks_per_grid, threads_per_block>>>(dA, dB, N);
CHECK_CUDA(cudaDeviceSynchronize());
CHECK_CUDA(cudaGetLastError());
}
__global__ void vecadd_kernel(double *A, double *B, double *C, int N) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < N) {
C[tid] = A[tid] + B[tid];
}
}
void gpu_vector_sum(double *dA, double *dB, double *dC, int start, int end) {
int N = end - start;
int threads_per_block = 128;
int blocks_per_grid = (N + threads_per_block - 1) / threads_per_block;
vecadd_kernel<<<blocks_per_grid, threads_per_block>>>(dA + start, dB + start,
dC + start, N);
CHECK_CUDA(cudaDeviceSynchronize());
CHECK_CUDA(cudaGetLastError());
}
|
9199c3ab5ec87f1e1f9426a0ebe252505b9493a2.hip | // !!! This is a file automatically generated by hipify!!!
/*
Copyright (c) 2018 Carsten Burstedde, Donna Calhoun, Melody Shih, Scott Aiton,
Xinsheng Qin.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "../fc2d_cudaclaw_cuda.h"
#include "../fc2d_cudaclaw_check.cu"
#include <fclaw2d_global.h>
#if defined(FCLAW_ENABLE_MPI)
#endif
#include <fclaw_mpi.h>
void fc2d_cudaclaw_initialize_GPUs(fclaw2d_global_t *glob)
{
hipDeviceProp_t prop;
int mpirank, count, device_num;
fclaw_global_essentialf("Block-size (FC2D_CUDACLAW_BLOCK_SIZE) set to %d\n",
FC2D_CUDACLAW_BLOCK_SIZE);
mpirank = glob->mpirank;
CHECK(hipGetDeviceCount(&count));
device_num = mpirank % count;
CHECK(hipSetDevice(device_num));
/* Print out info */
#if defined(FCLAW_ENABLE_MPI)
char name[MPI_MAX_PROCESSOR_NAME];
int len;
MPI_Get_processor_name(name, &len);
#else
const char *name = "unknown00";
#endif
fclaw_mpi_serialization_enter (glob);
hipGetDeviceProperties(&prop, device_num);
printf("[fclaw] Rank %2d (%s) assigned to GPU %d (%s)\n",mpirank, name,
device_num,prop.name);
fclaw_mpi_serialization_leave (glob);
fflush(stdout);
hipDeviceReset();
}
| 9199c3ab5ec87f1e1f9426a0ebe252505b9493a2.cu | /*
Copyright (c) 2018 Carsten Burstedde, Donna Calhoun, Melody Shih, Scott Aiton,
Xinsheng Qin.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "../fc2d_cudaclaw_cuda.h"
#include "../fc2d_cudaclaw_check.cu"
#include <fclaw2d_global.h>
#if defined(FCLAW_ENABLE_MPI)
#endif
#include <fclaw_mpi.h>
void fc2d_cudaclaw_initialize_GPUs(fclaw2d_global_t *glob)
{
cudaDeviceProp prop;
int mpirank, count, device_num;
fclaw_global_essentialf("Block-size (FC2D_CUDACLAW_BLOCK_SIZE) set to %d\n",
FC2D_CUDACLAW_BLOCK_SIZE);
mpirank = glob->mpirank;
CHECK(cudaGetDeviceCount(&count));
device_num = mpirank % count;
CHECK(cudaSetDevice(device_num));
/* Print out info */
#if defined(FCLAW_ENABLE_MPI)
char name[MPI_MAX_PROCESSOR_NAME];
int len;
MPI_Get_processor_name(name, &len);
#else
const char *name = "unknown00";
#endif
fclaw_mpi_serialization_enter (glob);
cudaGetDeviceProperties(&prop, device_num);
printf("[fclaw] Rank %2d (%s) assigned to GPU %d (%s)\n",mpirank, name,
device_num,prop.name);
fclaw_mpi_serialization_leave (glob);
fflush(stdout);
cudaDeviceReset();
}
|
4736cfe53bf041fe456f19abcb8c656fd8befc07.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.4.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date June 2018
@precisions normal z -> c d s
*/
#include "magmasparse_internal.h"
//F. Vzquez, G. Ortega, J.J. Fernndez, E.M. Garzn, Almeria University
__global__ void
zgeellrtmv_kernel_32(
int num_rows,
int num_cols,
magmaDoubleComplex alpha,
magmaDoubleComplex * dval,
magma_index_t * dcolind,
magma_index_t * drowlength,
magmaDoubleComplex * dx,
magmaDoubleComplex beta,
magmaDoubleComplex * dy,
int T,
int alignment )
{
int idx = blockIdx.y * gridDim.x * blockDim.x +
blockDim.x * blockIdx.x + threadIdx.x; // global thread index
int idb = threadIdx.x; // local thread index
int idp = idb%T; // number of threads assigned to one row
int i = idx/T; // row index
extern __shared__ magmaDoubleComplex shared[];
if (i < num_rows ) {
magmaDoubleComplex dot = MAGMA_Z_MAKE(0.0, 0.0);
int max_ = magma_ceildiv( drowlength[i], T );
// number of elements each thread handles
for ( int k = 0; k < max_; k++ ) {
// original code in paper (not working for me)
//magmaDoubleComplex val = dval[ k*(T*alignment)+(i*T)+idp ];
//int col = dcolind [ k*(T*alignment)+(i*T)+idp ];
// new code (working for me)
magmaDoubleComplex val = dval[ k*(T)+(i*alignment)+idp ];
int col = dcolind [ k*(T)+(i*alignment)+idp ];
dot += val * dx[ col ];
}
shared[idb] = dot;
if ( idp < 16 ) {
shared[idb] += shared[idb+16];
if ( idp < 8 ) shared[idb] += shared[idb+8];
if ( idp < 4 ) shared[idb] += shared[idb+4];
if ( idp < 2 ) shared[idb] += shared[idb+2];
if ( idp == 0 ) {
dy[i] = (shared[idb]+shared[idb+1])*alpha + beta*dy [i];
}
}
}
}
//F. Vzquez, G. Ortega, J.J. Fernndez, E.M. Garzn, Almeria University
__global__ void
zgeellrtmv_kernel_16(
int num_rows,
int num_cols,
magmaDoubleComplex alpha,
magmaDoubleComplex * dval,
magma_index_t * dcolind,
magma_index_t * drowlength,
magmaDoubleComplex * dx,
magmaDoubleComplex beta,
magmaDoubleComplex * dy,
int T,
int alignment )
{
int idx = blockIdx.y * gridDim.x * blockDim.x +
blockDim.x * blockIdx.x + threadIdx.x; // global thread index
int idb = threadIdx.x; // local thread index
int idp = idb%T; // number of threads assigned to one row
int i = idx/T; // row index
extern __shared__ magmaDoubleComplex shared[];
if (i < num_rows ) {
magmaDoubleComplex dot = MAGMA_Z_MAKE(0.0, 0.0);
int max_ = magma_ceildiv( drowlength[i], T );
// number of elements each thread handles
for ( int k = 0; k < max_; k++ ) {
// original code in paper (not working for me)
//magmaDoubleComplex val = dval[ k*(T*alignment)+(i*T)+idp ];
//int col = dcolind [ k*(T*alignment)+(i*T)+idp ];
// new code (working for me)
magmaDoubleComplex val = dval[ k*(T)+(i*alignment)+idp ];
int col = dcolind [ k*(T)+(i*alignment)+idp ];
dot += val * dx[ col ];
}
shared[idb] = dot;
if ( idp < 8 ) {
shared[idb] += shared[idb+8];
if ( idp < 4 ) shared[idb] += shared[idb+4];
if ( idp < 2 ) shared[idb] += shared[idb+2];
if ( idp == 0 ) {
dy[i] = (shared[idb]+shared[idb+1])*alpha + beta*dy [i];
}
}
}
}
//F. Vzquez, G. Ortega, J.J. Fernndez, E.M. Garzn, Almeria University
__global__ void
zgeellrtmv_kernel_8(
int num_rows,
int num_cols,
magmaDoubleComplex alpha,
magmaDoubleComplex * dval,
magma_index_t * dcolind,
magma_index_t * drowlength,
magmaDoubleComplex * dx,
magmaDoubleComplex beta,
magmaDoubleComplex * dy,
int T,
int alignment )
{
int idx = blockIdx.y * gridDim.x * blockDim.x +
blockDim.x * blockIdx.x + threadIdx.x; // global thread index
int idb = threadIdx.x; // local thread index
int idp = idb%T; // number of threads assigned to one row
int i = idx/T; // row index
extern __shared__ magmaDoubleComplex shared[];
if (i < num_rows ) {
magmaDoubleComplex dot = MAGMA_Z_MAKE(0.0, 0.0);
int max_ = magma_ceildiv( drowlength[i], T );
// number of elements each thread handles
for ( int k = 0; k < max_; k++ ) {
// original code in paper (not working for me)
//magmaDoubleComplex val = dval[ k*(T*alignment)+(i*T)+idp ];
//int col = dcolind [ k*(T*alignment)+(i*T)+idp ];
// new code (working for me)
magmaDoubleComplex val = dval[ k*(T)+(i*alignment)+idp ];
int col = dcolind [ k*(T)+(i*alignment)+idp ];
dot += val * dx[ col ];
}
shared[idb] = dot;
if ( idp < 4 ) {
shared[idb] += shared[idb+4];
if ( idp < 2 ) shared[idb] += shared[idb+2];
if ( idp == 0 ) {
dy[i] = (shared[idb]+shared[idb+1])*alpha + beta*dy [i];
}
}
}
}
/**
Purpose
-------
This routine computes y = alpha * A * x + beta * y on the GPU.
Input format is ELLRT. The ideas are taken from
"Improving the performance of the sparse matrix
vector product with GPUs", (CIT 2010),
and modified to provide correct values.
Arguments
---------
@param[in]
transA magma_trans_t
transposition parameter for A
@param[in]
m magma_int_t
number of rows
@param[in]
n magma_int_t
number of columns
@param[in]
nnz_per_row magma_int_t
max number of nonzeros in a row
@param[in]
alpha magmaDoubleComplex
scalar alpha
@param[in]
dval magmaDoubleComplex_ptr
val array
@param[in]
dcolind magmaIndex_ptr
col indices
@param[in]
drowlength magmaIndex_ptr
number of elements in each row
@param[in]
dx magmaDoubleComplex_ptr
input vector x
@param[in]
beta magmaDoubleComplex
scalar beta
@param[out]
dy magmaDoubleComplex_ptr
output vector y
@param[in]
blocksize magma_int_t
threads per block
@param[in]
alignment magma_int_t
threads assigned to each row
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_zblas
********************************************************************/
extern "C" magma_int_t
magma_zgeellrtmv(
magma_trans_t transA,
magma_int_t m, magma_int_t n,
magma_int_t nnz_per_row,
magmaDoubleComplex alpha,
magmaDoubleComplex_ptr dval,
magmaIndex_ptr dcolind,
magmaIndex_ptr drowlength,
magmaDoubleComplex_ptr dx,
magmaDoubleComplex beta,
magmaDoubleComplex_ptr dy,
magma_int_t alignment,
magma_int_t blocksize,
magma_queue_t queue )
{
int num_blocks = magma_ceildiv( m, blocksize );
magma_int_t num_threads = alignment*blocksize;
magma_int_t threads = alignment*blocksize;
int real_row_length = magma_roundup( nnz_per_row, alignment );
magma_int_t arch = magma_getdevice_arch();
if ( arch < 200 && num_threads > 256 )
printf("error: too much shared memory requested.\n");
int dimgrid1 = int( sqrt( double( num_blocks )));
int dimgrid2 = magma_ceildiv( num_blocks, dimgrid1 );
dim3 grid( dimgrid1, dimgrid2, 1);
int Ms = alignment * blocksize * sizeof( magmaDoubleComplex );
// printf("launch kernel: %dx%d %d %d\n", grid.x, grid.y, num_threads, Ms);
if ( alignment == 32 ) {
hipLaunchKernelGGL(( zgeellrtmv_kernel_32), dim3(grid), dim3(threads), Ms, queue->cuda_stream() ,
m, n, alpha, dval, dcolind, drowlength, dx, beta, dy,
alignment, real_row_length );
}
else if ( alignment == 16 ) {
hipLaunchKernelGGL(( zgeellrtmv_kernel_16), dim3(grid), dim3(threads), Ms, queue->cuda_stream() ,
m, n, alpha, dval, dcolind, drowlength, dx, beta, dy,
alignment, real_row_length );
}
else if ( alignment == 8 ) {
hipLaunchKernelGGL(( zgeellrtmv_kernel_8), dim3(grid), dim3(threads), Ms, queue->cuda_stream() ,
m, n, alpha, dval, dcolind, drowlength, dx, beta, dy,
alignment, real_row_length );
}
else {
printf("error: alignment %d not supported.\n", int(alignment) );
return MAGMA_ERR_NOT_SUPPORTED;
}
return MAGMA_SUCCESS;
}
| 4736cfe53bf041fe456f19abcb8c656fd8befc07.cu | /*
-- MAGMA (version 2.4.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date June 2018
@precisions normal z -> c d s
*/
#include "magmasparse_internal.h"
//F. Vázquez, G. Ortega, J.J. Fernández, E.M. Garzón, Almeria University
__global__ void
zgeellrtmv_kernel_32(
int num_rows,
int num_cols,
magmaDoubleComplex alpha,
magmaDoubleComplex * dval,
magma_index_t * dcolind,
magma_index_t * drowlength,
magmaDoubleComplex * dx,
magmaDoubleComplex beta,
magmaDoubleComplex * dy,
int T,
int alignment )
{
int idx = blockIdx.y * gridDim.x * blockDim.x +
blockDim.x * blockIdx.x + threadIdx.x; // global thread index
int idb = threadIdx.x; // local thread index
int idp = idb%T; // number of threads assigned to one row
int i = idx/T; // row index
extern __shared__ magmaDoubleComplex shared[];
if (i < num_rows ) {
magmaDoubleComplex dot = MAGMA_Z_MAKE(0.0, 0.0);
int max_ = magma_ceildiv( drowlength[i], T );
// number of elements each thread handles
for ( int k = 0; k < max_; k++ ) {
// original code in paper (not working for me)
//magmaDoubleComplex val = dval[ k*(T*alignment)+(i*T)+idp ];
//int col = dcolind [ k*(T*alignment)+(i*T)+idp ];
// new code (working for me)
magmaDoubleComplex val = dval[ k*(T)+(i*alignment)+idp ];
int col = dcolind [ k*(T)+(i*alignment)+idp ];
dot += val * dx[ col ];
}
shared[idb] = dot;
if ( idp < 16 ) {
shared[idb] += shared[idb+16];
if ( idp < 8 ) shared[idb] += shared[idb+8];
if ( idp < 4 ) shared[idb] += shared[idb+4];
if ( idp < 2 ) shared[idb] += shared[idb+2];
if ( idp == 0 ) {
dy[i] = (shared[idb]+shared[idb+1])*alpha + beta*dy [i];
}
}
}
}
//F. Vázquez, G. Ortega, J.J. Fernández, E.M. Garzón, Almeria University
__global__ void
zgeellrtmv_kernel_16(
int num_rows,
int num_cols,
magmaDoubleComplex alpha,
magmaDoubleComplex * dval,
magma_index_t * dcolind,
magma_index_t * drowlength,
magmaDoubleComplex * dx,
magmaDoubleComplex beta,
magmaDoubleComplex * dy,
int T,
int alignment )
{
int idx = blockIdx.y * gridDim.x * blockDim.x +
blockDim.x * blockIdx.x + threadIdx.x; // global thread index
int idb = threadIdx.x; // local thread index
int idp = idb%T; // number of threads assigned to one row
int i = idx/T; // row index
extern __shared__ magmaDoubleComplex shared[];
if (i < num_rows ) {
magmaDoubleComplex dot = MAGMA_Z_MAKE(0.0, 0.0);
int max_ = magma_ceildiv( drowlength[i], T );
// number of elements each thread handles
for ( int k = 0; k < max_; k++ ) {
// original code in paper (not working for me)
//magmaDoubleComplex val = dval[ k*(T*alignment)+(i*T)+idp ];
//int col = dcolind [ k*(T*alignment)+(i*T)+idp ];
// new code (working for me)
magmaDoubleComplex val = dval[ k*(T)+(i*alignment)+idp ];
int col = dcolind [ k*(T)+(i*alignment)+idp ];
dot += val * dx[ col ];
}
shared[idb] = dot;
if ( idp < 8 ) {
shared[idb] += shared[idb+8];
if ( idp < 4 ) shared[idb] += shared[idb+4];
if ( idp < 2 ) shared[idb] += shared[idb+2];
if ( idp == 0 ) {
dy[i] = (shared[idb]+shared[idb+1])*alpha + beta*dy [i];
}
}
}
}
//F. Vázquez, G. Ortega, J.J. Fernández, E.M. Garzón, Almeria University
__global__ void
zgeellrtmv_kernel_8(
int num_rows,
int num_cols,
magmaDoubleComplex alpha,
magmaDoubleComplex * dval,
magma_index_t * dcolind,
magma_index_t * drowlength,
magmaDoubleComplex * dx,
magmaDoubleComplex beta,
magmaDoubleComplex * dy,
int T,
int alignment )
{
int idx = blockIdx.y * gridDim.x * blockDim.x +
blockDim.x * blockIdx.x + threadIdx.x; // global thread index
int idb = threadIdx.x; // local thread index
int idp = idb%T; // number of threads assigned to one row
int i = idx/T; // row index
extern __shared__ magmaDoubleComplex shared[];
if (i < num_rows ) {
magmaDoubleComplex dot = MAGMA_Z_MAKE(0.0, 0.0);
int max_ = magma_ceildiv( drowlength[i], T );
// number of elements each thread handles
for ( int k = 0; k < max_; k++ ) {
// original code in paper (not working for me)
//magmaDoubleComplex val = dval[ k*(T*alignment)+(i*T)+idp ];
//int col = dcolind [ k*(T*alignment)+(i*T)+idp ];
// new code (working for me)
magmaDoubleComplex val = dval[ k*(T)+(i*alignment)+idp ];
int col = dcolind [ k*(T)+(i*alignment)+idp ];
dot += val * dx[ col ];
}
shared[idb] = dot;
if ( idp < 4 ) {
shared[idb] += shared[idb+4];
if ( idp < 2 ) shared[idb] += shared[idb+2];
if ( idp == 0 ) {
dy[i] = (shared[idb]+shared[idb+1])*alpha + beta*dy [i];
}
}
}
}
/**
Purpose
-------
This routine computes y = alpha * A * x + beta * y on the GPU.
Input format is ELLRT. The ideas are taken from
"Improving the performance of the sparse matrix
vector product with GPUs", (CIT 2010),
and modified to provide correct values.
Arguments
---------
@param[in]
transA magma_trans_t
transposition parameter for A
@param[in]
m magma_int_t
number of rows
@param[in]
n magma_int_t
number of columns
@param[in]
nnz_per_row magma_int_t
max number of nonzeros in a row
@param[in]
alpha magmaDoubleComplex
scalar alpha
@param[in]
dval magmaDoubleComplex_ptr
val array
@param[in]
dcolind magmaIndex_ptr
col indices
@param[in]
drowlength magmaIndex_ptr
number of elements in each row
@param[in]
dx magmaDoubleComplex_ptr
input vector x
@param[in]
beta magmaDoubleComplex
scalar beta
@param[out]
dy magmaDoubleComplex_ptr
output vector y
@param[in]
blocksize magma_int_t
threads per block
@param[in]
alignment magma_int_t
threads assigned to each row
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_zblas
********************************************************************/
extern "C" magma_int_t
magma_zgeellrtmv(
magma_trans_t transA,
magma_int_t m, magma_int_t n,
magma_int_t nnz_per_row,
magmaDoubleComplex alpha,
magmaDoubleComplex_ptr dval,
magmaIndex_ptr dcolind,
magmaIndex_ptr drowlength,
magmaDoubleComplex_ptr dx,
magmaDoubleComplex beta,
magmaDoubleComplex_ptr dy,
magma_int_t alignment,
magma_int_t blocksize,
magma_queue_t queue )
{
int num_blocks = magma_ceildiv( m, blocksize );
magma_int_t num_threads = alignment*blocksize;
magma_int_t threads = alignment*blocksize;
int real_row_length = magma_roundup( nnz_per_row, alignment );
magma_int_t arch = magma_getdevice_arch();
if ( arch < 200 && num_threads > 256 )
printf("error: too much shared memory requested.\n");
int dimgrid1 = int( sqrt( double( num_blocks )));
int dimgrid2 = magma_ceildiv( num_blocks, dimgrid1 );
dim3 grid( dimgrid1, dimgrid2, 1);
int Ms = alignment * blocksize * sizeof( magmaDoubleComplex );
// printf("launch kernel: %dx%d %d %d\n", grid.x, grid.y, num_threads, Ms);
if ( alignment == 32 ) {
zgeellrtmv_kernel_32<<< grid, threads, Ms, queue->cuda_stream() >>>
( m, n, alpha, dval, dcolind, drowlength, dx, beta, dy,
alignment, real_row_length );
}
else if ( alignment == 16 ) {
zgeellrtmv_kernel_16<<< grid, threads, Ms, queue->cuda_stream() >>>
( m, n, alpha, dval, dcolind, drowlength, dx, beta, dy,
alignment, real_row_length );
}
else if ( alignment == 8 ) {
zgeellrtmv_kernel_8<<< grid, threads, Ms, queue->cuda_stream() >>>
( m, n, alpha, dval, dcolind, drowlength, dx, beta, dy,
alignment, real_row_length );
}
else {
printf("error: alignment %d not supported.\n", int(alignment) );
return MAGMA_ERR_NOT_SUPPORTED;
}
return MAGMA_SUCCESS;
}
|
51bbba956d2d0332f97b11084290dc146f257b9c.hip | // !!! This is a file automatically generated by hipify!!!
#include <math.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <hip/hip_runtime.h>
#include <cusparse_v2.h>
#include "rocblas.h"
#include <hiprand/hiprand.h>
#include <helper_functions.h>
#include <helper_cuda.h>
#include "mex.h"
#include "kcDefs.h" //see for info on anything starting with KC_
#include "kcArrayFunctions.h"
void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) {
mwSize *size = (mwSize *)mxGetPr(mxGetField(prhs[0],0,KC_ARRAY_SIZE));
KC_FP_TYPE *d_a;
d_a = kcGetArrayData(prhs[0]);
mwSize *size2 = (mwSize*)malloc(sizeof(mwSize)*2);
size2[0] = size[0];
size2[1] = 1;
int cNum = (int)mxGetScalar(prhs[1]);
if(cNum < size[1]) {
plhs[0] = kcSetupEmptyArray(2,size2);
unsigned KC_PTR_SIZE int * ptr = (unsigned KC_PTR_SIZE int*)mxGetPr(mxGetField(plhs[0],0,KC_ARRAY_PTR));
*ptr = (unsigned KC_PTR_SIZE int)(&(d_a[cNum*(size[0])]));
}
else {
plhs[0] = mxCreateNumericMatrix(1,1,mxDOUBLE_CLASS,mxREAL);
mexPrintf("Index out-of-bounds\n");
}
}
| 51bbba956d2d0332f97b11084290dc146f257b9c.cu |
#include <math.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <cuda_runtime.h>
#include <cusparse_v2.h>
#include "cublas_v2.h"
#include <curand.h>
#include <helper_functions.h>
#include <helper_cuda.h>
#include "mex.h"
#include "kcDefs.h" //see for info on anything starting with KC_
#include "kcArrayFunctions.h"
void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) {
mwSize *size = (mwSize *)mxGetPr(mxGetField(prhs[0],0,KC_ARRAY_SIZE));
KC_FP_TYPE *d_a;
d_a = kcGetArrayData(prhs[0]);
mwSize *size2 = (mwSize*)malloc(sizeof(mwSize)*2);
size2[0] = size[0];
size2[1] = 1;
int cNum = (int)mxGetScalar(prhs[1]);
if(cNum < size[1]) {
plhs[0] = kcSetupEmptyArray(2,size2);
unsigned KC_PTR_SIZE int * ptr = (unsigned KC_PTR_SIZE int*)mxGetPr(mxGetField(plhs[0],0,KC_ARRAY_PTR));
*ptr = (unsigned KC_PTR_SIZE int)(&(d_a[cNum*(size[0])]));
}
else {
plhs[0] = mxCreateNumericMatrix(1,1,mxDOUBLE_CLASS,mxREAL);
mexPrintf("Index out-of-bounds\n");
}
}
|
8eb806ea04164d8f69a7e9cd5c644d1eeae84b4c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <math.h>
// Array access macros
#define INPUT(i,j) A[(i) + (m)*(j)]
#define OUTPUT(i,j) B[(i) + (m)*(j)]
#define H(i,j) Gauss[(i) + (Reg_x)*(j)]
/*----------------------------------------- 9 O kernel---------------------------------------- */
__global__ void myKernel(float const* const A, float *B, float const* const Gauss, int const m, int const n, int const thrds, float const s, int const Reg_x, int const Reg_y)
{
/* block grid. */
// int i = blockIdx.x * blockDim.x + threadIdx.x;
// int j = blockIdx.y * blockDim.y + threadIdx.y;
int k = blockIdx.x;
int l = blockIdx.y;
int a,b,e,g,c,d;
int xx, yy, xk, yk, xxk, yyk;
float w,w1,z1;
float norm=0;
float Z_total = 0;
float subs;
if(k<1 && l<1)
{
w1 = m/thrds ;
z1 = n/thrds ;
for(e=0 ; e<w1 ; e++)
{
for(g=0 ; g<z1 ; g++)
{
Z_total = 0 ;
xx = threadIdx.x*w1 + e; // .
yy = threadIdx.y*z1 + g;
for(a=0; a<m ; a++)
{
for(b=0; b<n ; b++)
{
norm = 0 ;
for(c=0 ; c<Reg_x ; c++)
{
for(d=0 ; d<Reg_y ; d++)
{
xk = xx - Reg_x/2 + c ; // .
yk = yy - Reg_y/2 + d ;
xxk = a - Reg_x/2 + c ;
yyk = b - Reg_y/2 + d ; // pixel.
/* . . */
if(xk<0)
xk = fabsf(xk);
if(xk>m)
xk = m-(xk-m);
if(yk<0)
yk = fabsf(yk);
if(yk>n)
yk = n-(yk-n);
/* pixel . */
if(xxk<0)
xxk = fabsf(xxk);
if(xk>m)
xxk = m-(xxk-m);
if(yyk<0)
yyk = fabsf(yyk);
if(yk>n)
yyk = n-(yyk-n);
subs = INPUT(xk,yk) - INPUT(xxk,yyk); /* . */
/* fspecial, Gaussian. */
// .
subs = H(c,d)*subs ;
subs = powf(subs,2); // . .
norm += subs ; // .!
}
}
Z_total += expf(-norm/s);
w = expf(-norm/s) ;
OUTPUT(xx,yy) += w*INPUT(a,b) ;
}
}
OUTPUT(xx,yy) = (OUTPUT(xx,yy)/Z_total) ;
__syncthreads() ;
}
}
}
}
| 8eb806ea04164d8f69a7e9cd5c644d1eeae84b4c.cu | #include <stdio.h>
#include <math.h>
// Array access macros
#define INPUT(i,j) A[(i) + (m)*(j)]
#define OUTPUT(i,j) B[(i) + (m)*(j)]
#define H(i,j) Gauss[(i) + (Reg_x)*(j)]
/*----------------------------------------- 9 Oρίσματα στον kernel---------------------------------------- */
__global__ void myKernel(float const* const A, float *B, float const* const Gauss, int const m, int const n, int const thrds, float const s, int const Reg_x, int const Reg_y)
{
/* Συντεταγμένες του κάθε νήματος και του κάθε επιμέρους block στο grid. */
// int i = blockIdx.x * blockDim.x + threadIdx.x;
// int j = blockIdx.y * blockDim.y + threadIdx.y;
int k = blockIdx.x;
int l = blockIdx.y;
int a,b,e,g,c,d;
int xx, yy, xk, yk, xxk, yyk;
float w,w1,z1;
float norm=0;
float Z_total = 0;
float subs;
if(k<1 && l<1)
{
w1 = m/thrds ;
z1 = n/thrds ;
for(e=0 ; e<w1 ; e++)
{
for(g=0 ; g<z1 ; g++)
{
Z_total = 0 ;
xx = threadIdx.x*w1 + e; // Συντεταγμένες του κέντρου καθενός στοιχείου.
yy = threadIdx.y*z1 + g;
for(a=0; a<m ; a++)
{
for(b=0; b<n ; b++)
{
norm = 0 ;
for(c=0 ; c<Reg_x ; c++)
{
for(d=0 ; d<Reg_y ; d++)
{
xk = xx - Reg_x/2 + c ; // Γείτονες του πίξελ που εξετάζεται τώρα.
yk = yy - Reg_y/2 + d ;
xxk = a - Reg_x/2 + c ;
yyk = b - Reg_y/2 + d ; // Συντεταγμένες των γειτονικών του καθενός pixel.
/* Έλεγχος συνθηκών εάν βγούμε εκτός πίνακα.Τότε χρησιμοποιούμε κατοπτρικές συνθήκες. */
if(xk<0)
xk = fabsf(xk);
if(xk>m)
xk = m-(xk-m);
if(yk<0)
yk = fabsf(yk);
if(yk>n)
yk = n-(yk-n);
/* Έλεγχος των συνθηκών κάποιο γειτονικό pixel του τρέχοντος προς εξέταση να βγαίνει εκτός του πίνακα. */
if(xxk<0)
xxk = fabsf(xxk);
if(xk>m)
xxk = m-(xxk-m);
if(yyk<0)
yyk = fabsf(yyk);
if(yk>n)
yyk = n-(yyk-n);
subs = INPUT(xk,yk) - INPUT(xxk,yyk); /* Αφαίρεση στοιχείο προς στοιχείο τα γειτονικά. */
/* τώρα το πολλαπλασιάζουμε στοιχείο προς στοιχείο με τον fspecial,δηλ τη Gaussian. */
// Για μεγαλύτερη ταχύτητα προτιμήθηκε να γίνει ο πολλαπλασιασμός κατευθείαν.
subs = H(c,d)*subs ;
subs = powf(subs,2); // Έγινε και το τετράγωνο του στοιχείου.Στο τέλος θα προστεθούν όλα για να προκύχψει η νόρμα.
norm += subs ; // Είναι η νόρμα του ενός πίξελ με ένα μόνο πίξελ από όλα.ΠΡΟΣΟΧΗ!
}
}
Z_total += expf(-norm/s);
w = expf(-norm/s) ;
OUTPUT(xx,yy) += w*INPUT(a,b) ;
}
}
OUTPUT(xx,yy) = (OUTPUT(xx,yy)/Z_total) ;
__syncthreads() ;
}
}
}
}
|
1ce91c80d8ee5a08f6e3501dae606fb95c77b3bb.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <vector>
#include "caffe/layers/slice_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void Slice(const int nthreads, const Dtype* in_data,
const bool forward, const int num_slices, const int slice_size,
const int bottom_slice_axis, const int top_slice_axis,
const int offset_slice_axis, Dtype* out_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int total_slice_size = slice_size * top_slice_axis;
const int slice_num = index / total_slice_size;
const int slice_index = index % total_slice_size;
const int bottom_index = slice_index +
(slice_num * bottom_slice_axis + offset_slice_axis) * slice_size;
if (forward) {
out_data[index] = in_data[bottom_index];
} else {
out_data[bottom_index] = in_data[index];
}
}
}
template <typename Dtype>
void SliceLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
Forward_const_gpu(bottom,top);
}
template <typename Dtype>
void SliceLayer<Dtype>::Forward_const_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) const {
if (top.size() == 1) { return; }
int offset_slice_axis = 0;
const Dtype* bottom_data = bottom[0]->gpu_data();
const int slice_axis=get_slice_axis(bottom);
const int bottom_slice_axis = bottom[0]->shape(slice_axis);
const bool kForward = true;
const int num_slices = bottom[0]->count(0, slice_axis);
const int slice_size = bottom[0]->count(slice_axis + 1);
for (int i = 0; i < top.size(); ++i) {
Dtype* top_data = top[i]->mutable_gpu_data();
const int top_slice_axis = top[i]->shape(slice_axis);
const int top_slice_size = top_slice_axis * slice_size;
const int nthreads = top_slice_size * num_slices;
Slice<Dtype> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(nthreads)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
nthreads, bottom_data, kForward, num_slices, slice_size,
bottom_slice_axis, top_slice_axis, offset_slice_axis, top_data);
offset_slice_axis += top_slice_axis;
}
}
INSTANTIATE_LAYER_GPU_FUNCS_CONST(SliceLayer);
} // namespace caffe
| 1ce91c80d8ee5a08f6e3501dae606fb95c77b3bb.cu | #include <vector>
#include "caffe/layers/slice_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void Slice(const int nthreads, const Dtype* in_data,
const bool forward, const int num_slices, const int slice_size,
const int bottom_slice_axis, const int top_slice_axis,
const int offset_slice_axis, Dtype* out_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int total_slice_size = slice_size * top_slice_axis;
const int slice_num = index / total_slice_size;
const int slice_index = index % total_slice_size;
const int bottom_index = slice_index +
(slice_num * bottom_slice_axis + offset_slice_axis) * slice_size;
if (forward) {
out_data[index] = in_data[bottom_index];
} else {
out_data[bottom_index] = in_data[index];
}
}
}
template <typename Dtype>
void SliceLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
Forward_const_gpu(bottom,top);
}
template <typename Dtype>
void SliceLayer<Dtype>::Forward_const_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) const {
if (top.size() == 1) { return; }
int offset_slice_axis = 0;
const Dtype* bottom_data = bottom[0]->gpu_data();
const int slice_axis=get_slice_axis(bottom);
const int bottom_slice_axis = bottom[0]->shape(slice_axis);
const bool kForward = true;
const int num_slices = bottom[0]->count(0, slice_axis);
const int slice_size = bottom[0]->count(slice_axis + 1);
for (int i = 0; i < top.size(); ++i) {
Dtype* top_data = top[i]->mutable_gpu_data();
const int top_slice_axis = top[i]->shape(slice_axis);
const int top_slice_size = top_slice_axis * slice_size;
const int nthreads = top_slice_size * num_slices;
Slice<Dtype> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS>>>(
nthreads, bottom_data, kForward, num_slices, slice_size,
bottom_slice_axis, top_slice_axis, offset_slice_axis, top_data);
offset_slice_axis += top_slice_axis;
}
}
INSTANTIATE_LAYER_GPU_FUNCS_CONST(SliceLayer);
} // namespace caffe
|
a8e327edd760df08ca07f50e7ef5f1d20c7cf59f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <mpi.h>
#include <cstdio>
__global__ void GPU_Kernel() {
printf(" GPU block : %d / %d GPU thread : %d / %d\n",
blockIdx.x, gridDim.x, threadIdx.x, blockDim.x);
}
int main(int argc, char **argv) {
char hostname[256];
int mpisize, mpirank, gpusize, gpurank, len;
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &mpisize);
MPI_Comm_rank(MPI_COMM_WORLD, &mpirank);
MPI_Get_processor_name(hostname, &len);
hipGetDeviceCount(&gpusize);
hipSetDevice(mpirank % gpusize);
hipGetDevice(&gpurank);
for (int irank=0; irank<mpisize; irank++) {
MPI_Barrier(MPI_COMM_WORLD);
if (mpirank == irank) {
printf("Hostname : %s\n", hostname);
printf("MPI rank : %d / %d GPU device : %d / %d\n",
mpirank, mpisize, gpurank, gpusize);
hipLaunchKernelGGL(( GPU_Kernel), dim3(2),dim3(2), 0, 0, );
hipDeviceSynchronize();
}
}
MPI_Finalize();
}
| a8e327edd760df08ca07f50e7ef5f1d20c7cf59f.cu | #include <mpi.h>
#include <cstdio>
__global__ void GPU_Kernel() {
printf(" GPU block : %d / %d GPU thread : %d / %d\n",
blockIdx.x, gridDim.x, threadIdx.x, blockDim.x);
}
int main(int argc, char **argv) {
char hostname[256];
int mpisize, mpirank, gpusize, gpurank, len;
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &mpisize);
MPI_Comm_rank(MPI_COMM_WORLD, &mpirank);
MPI_Get_processor_name(hostname, &len);
cudaGetDeviceCount(&gpusize);
cudaSetDevice(mpirank % gpusize);
cudaGetDevice(&gpurank);
for (int irank=0; irank<mpisize; irank++) {
MPI_Barrier(MPI_COMM_WORLD);
if (mpirank == irank) {
printf("Hostname : %s\n", hostname);
printf("MPI rank : %d / %d GPU device : %d / %d\n",
mpirank, mpisize, gpurank, gpusize);
GPU_Kernel<<<2,2>>>();
cudaThreadSynchronize();
}
}
MPI_Finalize();
}
|
88052b0be28ce9e42287cc2c4066baa52d2dc0cf.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define N (1024*1024)
__global__ void kernel ( float * data )
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
float x = 2.0f * 3.1415926f * (float) idx / (float) N;
data [idx] = sinf ( sqrtf ( x ) );
}
int main ( int argc, char * argv [] )
{
float a [N];
float * dev = NULL;
hipMalloc ( (void**)&dev, N * sizeof ( float ) );
hipLaunchKernelGGL(( kernel), dim3(dim3((N/512),1)), dim3(dim3(512,1)), 0, 0, dev );
hipMemcpy ( a, dev, N * sizeof ( float ), hipMemcpyDeviceToHost );
hipFree ( dev );
for (int idx = 0; idx < N; idx++) printf("a[%d] = %.5f\n", idx, a[idx]);
return 0;
}
| 88052b0be28ce9e42287cc2c4066baa52d2dc0cf.cu | #define N (1024*1024)
__global__ void kernel ( float * data )
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
float x = 2.0f * 3.1415926f * (float) idx / (float) N;
data [idx] = sinf ( sqrtf ( x ) );
}
int main ( int argc, char * argv [] )
{
float a [N];
float * dev = NULL;
cudaMalloc ( (void**)&dev, N * sizeof ( float ) );
kernel<<<dim3((N/512),1), dim3(512,1)>>> ( dev );
cudaMemcpy ( a, dev, N * sizeof ( float ), cudaMemcpyDeviceToHost );
cudaFree ( dev );
for (int idx = 0; idx < N; idx++) printf("a[%d] = %.5f\n", idx, a[idx]);
return 0;
}
|
49e979176f77e7c8b3289cd2fc84b1c5c54b9a0e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// ###
// ###
// ### Practical Course: GPU Programming in Computer Vision
// ###
// ###
// ### Technical University Munich, Computer Vision Group
// ### Summer Semester 2015, September 7 - October 6
// ###
// ###
// ### Thomas Moellenhoff, Robert Maier, Caner Hazirbas
// ###
// ###
// ###
// ### THIS FILE IS SUPPOSED TO REMAIN UNCHANGED
// ###
// ###
#include "aux.h"
#include <iostream>
#include "math.h"
using namespace std;
// uncomment to use the camera
//#define CAMERA
//CPU Computation
__global__ void mykernel(float *d_imgIn, int *d_hist, int w, int h, int nc)
{
int x = threadIdx.x + blockDim.x * blockIdx.x;
int y = threadIdx.y + blockDim.y * blockIdx.y;
int z = threadIdx.z + blockDim.z * blockIdx.z;
if (x < w && y < h && z < nc)
{
int ind = x + w*y + w*h*z;
int index = d_imgIn[ind]*255.f;
atomicAdd(&d_hist[index], 1);
}
}
__global__ void mykernel_shared(float *d_imgIn, int *d_hist, int w, int h, int nc)
{
__shared__
int shared_hist[256];
// first thread init the shared memory
if (threadIdx.x < 256) {
shared_hist[threadIdx.x] = 0;
}
__syncthreads();
int x = threadIdx.x + blockDim.x * blockIdx.x;
int y = threadIdx.y + blockDim.y * blockIdx.y;
int z = threadIdx.z + blockDim.z * blockIdx.z;
if (x < w && y < h && z < nc)
{
int ind = x + w*y + w*h*z;
int index = d_imgIn[ind]*255.f;
atomicAdd(&shared_hist[index], 1);
}
__syncthreads();
// first block thread updates global histogramm
if (threadIdx.x < 256) {
atomicAdd(&d_hist[threadIdx.x],shared_hist[threadIdx.x]);
}
}
int main(int argc, char **argv)
{
// Before the GPU can process your kernels, a so called "CUDA context" must be initialized
// This happens on the very first call to a CUDA function, and takes some time (around half a second)
// We will do it right here, so that the run time measurements are accurate
hipDeviceSynchronize(); CUDA_CHECK;
// Reading command line parameters:
// getParam("param", var, argc, argv) looks whether "-param xyz" is specified, and if so stores the value "xyz" in "var"
// If "-param" is not specified, the value of "var" remains unchanged
//
// return value: getParam("param", ...) returns true if "-param" is specified, and false otherwise
#ifdef CAMERA
#else
// input image
string image = "";
bool ret = getParam("i", image, argc, argv);
if (!ret) cerr << "ERROR: no image specified" << endl;
if (argc <= 1) { cout << "Usage: " << argv[0] << " -i <image> [-repeats <repeats>] [-gray]" << endl; return 1; }
#endif
// number of computation repetitions to get a better run time measurement
int repeats = 1;
getParam("repeats", repeats, argc, argv);
cout << "repeats: " << repeats << endl;
// load the input image as grayscale if "-gray" is specifed
bool gray = false;
getParam("gray", gray, argc, argv);
cout << "gray: " << gray << endl;
// ### Define your own parameters here as needed
float gamma = 1;
getParam("gamma", gamma, argc, argv);
cout << "gamma: " << gamma << endl;
float blockX = 32;
getParam("blockX", blockX, argc, argv);
cout << "blockX: " << blockX << endl;
float blockY = 8;
getParam("blockY", blockY, argc, argv);
cout << "blockY: " << blockY << endl;
// Init camera / Load input image
#ifdef CAMERA
// Init camera
cv::VideoCapture camera(0);
if(!camera.isOpened()) { cerr << "ERROR: Could not open camera" << endl; return 1; }
int camW = 640;
int camH = 480;
camera.set(CV_CAP_PROP_FRAME_WIDTH,camW);
camera.set(CV_CAP_PROP_FRAME_HEIGHT,camH);
// read in first frame to get the dimensions
cv::Mat mIn;
camera >> mIn;
#else
// Load the input image using opencv (load as grayscale if "gray==true", otherwise as is (may be color or grayscale))
cv::Mat mIn = cv::imread(image.c_str(), (gray? CV_LOAD_IMAGE_GRAYSCALE : -1));
// check
if (mIn.data == NULL) { cerr << "ERROR: Could not load image " << image << endl; return 1; }
#endif
// convert to float representation (opencv loads image values as single bytes by default)
mIn.convertTo(mIn,CV_32F);
// convert range of each channel to [0,1] (opencv default is [0,255])
mIn /= 255.f;
// get image dimensions
int w = mIn.cols; // width
int h = mIn.rows; // height
int nc = mIn.channels(); // number of channels
cout << "image: " << w << " x " << h << endl;
// Set the output image format
// ###
// ###
// ### TODO: Change the output image format as needed
// ###
// ###
cv::Mat mOut(h,w,mIn.type()); // mOut will have the same number of channels as the input image, nc layers
//cv::Mat mOut(h,w,CV_32FC3); // mOut will be a color image, 3 layers
//cv::Mat mOut(h,w,CV_32FC1); // mOut will be a grayscale image, 1 layer
// ### Define your own output images here as needed
// Allocate arrays
// input/output image width: w
// input/output image height: h
// input image number of channels: nc
// output image number of channels: mOut.channels(), as defined above (nc, 3, or 1)
// allocate raw input image array
float *imgIn = new float[(size_t)w*h*nc];
// allocate raw output array (the computation result will be stored in this array, then later converted to mOut for displaying)
float *imgOut = new float[(size_t)w*h*mOut.channels()];
int hist_size = 256;
int *hist = new int[hist_size];
//allocate memory on device
float *d_imgIn = NULL;
int *d_hist = NULL;
int imgSize = (size_t)w*h*nc;
hipMalloc(&d_imgIn, imgSize*sizeof(float)); CUDA_CHECK;
hipMalloc(&d_hist, hist_size*sizeof(int)); CUDA_CHECK;
dim3 block = dim3(32, 8, 1);
dim3 grid = dim3((w + block.x - 1) / block.x,
(h + block.y - 1) / block.y, (nc));
Timer timer; float t = 0;
// For camera mode: Make a loop to read in camera frames
#ifdef CAMERA
// Read a camera image frame every 30 milliseconds:
// cv::waitKey(30) waits 30 milliseconds for a keyboard input,
// returns a value <0 if no key is pressed during this time, returns immediately with a value >=0 if a key is pressed
while (cv::waitKey(30) < 0)
{
// Get camera image
camera >> mIn;
// convert to float representation (opencv loads image values as single bytes by default)
mIn.convertTo(mIn,CV_32F);
// convert range of each channel to [0,1] (opencv default is [0,255])
mIn /= 255.f;
#endif
// Init raw input image array
// opencv images are interleaved: rgb rgb rgb... (actually bgr bgr bgr...)
// But for CUDA it's better to work with layered images: rrr... ggg... bbb...
// So we will convert as necessary, using interleaved "cv::Mat" for loading/saving/displaying, and layered "float*" for CUDA computations
convert_mat_to_layered (imgIn, mIn);
for(int i=0;i<hist_size;i++)
hist[i] = 0;
//copy host memory to device
hipMemcpy(d_imgIn, imgIn, imgSize*sizeof(float), hipMemcpyHostToDevice); CUDA_CHECK;
hipMemcpy(d_hist, hist, hist_size*sizeof(int), hipMemcpyHostToDevice); CUDA_CHECK;
timer.start();
for (int i=0; i< repeats ; i++)
{
hipLaunchKernelGGL(( mykernel_shared) , dim3(grid),dim3(block), 0, 0, d_imgIn, d_hist, w, h, nc);
}
timer.end();
t = timer.get(); // elapsed time in seconds
cout << "Average time (shared) for " << repeats << " repeat(s): " << t * 1000 / repeats << " ms" << endl;
hipMemcpy(d_hist, hist, hist_size*sizeof(int), hipMemcpyHostToDevice); CUDA_CHECK;
timer.start();
for (int i=0; i< repeats ; i++)
{
hipLaunchKernelGGL(( mykernel) , dim3(grid),dim3(block), 0, 0, d_imgIn, d_hist, w, h, nc);
}
timer.end();
t = timer.get(); // elapsed time in seconds
cout << "Average time (naive) for " << repeats << " repeat(s): " << t * 1000 / repeats << " ms" << endl;
// for(int i=0;i<hist_size;i++)
// hist[i] = 0;
//
// t = 0;
// for (int i=0; i< repeats ; i++)
// {
// timer.start();
// mykernel_shared <<<grid,block>>> (d_imgIn, d_hist, w, h, nc);
// timer.end();
// t += timer.get(); // elapsed time in seconds
// }
// cout << "SHARED - Average time for " << repeats << " repeat(s): " << t * 1000 / repeats << " ms" << endl;
//copy result back to host memory
hipMemcpy(hist, d_hist, hist_size * sizeof(int), hipMemcpyDeviceToHost); CUDA_CHECK;
showHistogram256("Histogram", hist, 1000, 100);
// show input image
showImage("Input", mIn, 100, 100); // show at position (x_from_left=100,y_from_above=100)
// show output image: first convert to interleaved opencv format from the layered raw array
//convert_layered_to_mat(mOut, imgOut);
//showImage("Histogram", mOut, 100+w+40, 100);
// ### Display your own output images here as needed
#ifdef CAMERA
// end of camera loop
}
#else
// wait for key inputs
cv::waitKey(0);
#endif
//free memory
hipFree(d_imgIn);
hipFree(d_hist);
// save input and result
cv::imwrite("image_input.png",mIn*255.f); // "imwrite" assumes channel range [0,255]
cv::imwrite("image_result.png",mOut*255.f);
// free allocated arrays
delete[] imgIn;
delete[] imgOut;
delete[] hist;
// close all opencv windows
cvDestroyAllWindows();
return 0;
}
| 49e979176f77e7c8b3289cd2fc84b1c5c54b9a0e.cu | // ###
// ###
// ### Practical Course: GPU Programming in Computer Vision
// ###
// ###
// ### Technical University Munich, Computer Vision Group
// ### Summer Semester 2015, September 7 - October 6
// ###
// ###
// ### Thomas Moellenhoff, Robert Maier, Caner Hazirbas
// ###
// ###
// ###
// ### THIS FILE IS SUPPOSED TO REMAIN UNCHANGED
// ###
// ###
#include "aux.h"
#include <iostream>
#include "math.h"
using namespace std;
// uncomment to use the camera
//#define CAMERA
//CPU Computation
__global__ void mykernel(float *d_imgIn, int *d_hist, int w, int h, int nc)
{
int x = threadIdx.x + blockDim.x * blockIdx.x;
int y = threadIdx.y + blockDim.y * blockIdx.y;
int z = threadIdx.z + blockDim.z * blockIdx.z;
if (x < w && y < h && z < nc)
{
int ind = x + w*y + w*h*z;
int index = d_imgIn[ind]*255.f;
atomicAdd(&d_hist[index], 1);
}
}
__global__ void mykernel_shared(float *d_imgIn, int *d_hist, int w, int h, int nc)
{
__shared__
int shared_hist[256];
// first thread init the shared memory
if (threadIdx.x < 256) {
shared_hist[threadIdx.x] = 0;
}
__syncthreads();
int x = threadIdx.x + blockDim.x * blockIdx.x;
int y = threadIdx.y + blockDim.y * blockIdx.y;
int z = threadIdx.z + blockDim.z * blockIdx.z;
if (x < w && y < h && z < nc)
{
int ind = x + w*y + w*h*z;
int index = d_imgIn[ind]*255.f;
atomicAdd(&shared_hist[index], 1);
}
__syncthreads();
// first block thread updates global histogramm
if (threadIdx.x < 256) {
atomicAdd(&d_hist[threadIdx.x],shared_hist[threadIdx.x]);
}
}
int main(int argc, char **argv)
{
// Before the GPU can process your kernels, a so called "CUDA context" must be initialized
// This happens on the very first call to a CUDA function, and takes some time (around half a second)
// We will do it right here, so that the run time measurements are accurate
cudaDeviceSynchronize(); CUDA_CHECK;
// Reading command line parameters:
// getParam("param", var, argc, argv) looks whether "-param xyz" is specified, and if so stores the value "xyz" in "var"
// If "-param" is not specified, the value of "var" remains unchanged
//
// return value: getParam("param", ...) returns true if "-param" is specified, and false otherwise
#ifdef CAMERA
#else
// input image
string image = "";
bool ret = getParam("i", image, argc, argv);
if (!ret) cerr << "ERROR: no image specified" << endl;
if (argc <= 1) { cout << "Usage: " << argv[0] << " -i <image> [-repeats <repeats>] [-gray]" << endl; return 1; }
#endif
// number of computation repetitions to get a better run time measurement
int repeats = 1;
getParam("repeats", repeats, argc, argv);
cout << "repeats: " << repeats << endl;
// load the input image as grayscale if "-gray" is specifed
bool gray = false;
getParam("gray", gray, argc, argv);
cout << "gray: " << gray << endl;
// ### Define your own parameters here as needed
float gamma = 1;
getParam("gamma", gamma, argc, argv);
cout << "gamma: " << gamma << endl;
float blockX = 32;
getParam("blockX", blockX, argc, argv);
cout << "blockX: " << blockX << endl;
float blockY = 8;
getParam("blockY", blockY, argc, argv);
cout << "blockY: " << blockY << endl;
// Init camera / Load input image
#ifdef CAMERA
// Init camera
cv::VideoCapture camera(0);
if(!camera.isOpened()) { cerr << "ERROR: Could not open camera" << endl; return 1; }
int camW = 640;
int camH = 480;
camera.set(CV_CAP_PROP_FRAME_WIDTH,camW);
camera.set(CV_CAP_PROP_FRAME_HEIGHT,camH);
// read in first frame to get the dimensions
cv::Mat mIn;
camera >> mIn;
#else
// Load the input image using opencv (load as grayscale if "gray==true", otherwise as is (may be color or grayscale))
cv::Mat mIn = cv::imread(image.c_str(), (gray? CV_LOAD_IMAGE_GRAYSCALE : -1));
// check
if (mIn.data == NULL) { cerr << "ERROR: Could not load image " << image << endl; return 1; }
#endif
// convert to float representation (opencv loads image values as single bytes by default)
mIn.convertTo(mIn,CV_32F);
// convert range of each channel to [0,1] (opencv default is [0,255])
mIn /= 255.f;
// get image dimensions
int w = mIn.cols; // width
int h = mIn.rows; // height
int nc = mIn.channels(); // number of channels
cout << "image: " << w << " x " << h << endl;
// Set the output image format
// ###
// ###
// ### TODO: Change the output image format as needed
// ###
// ###
cv::Mat mOut(h,w,mIn.type()); // mOut will have the same number of channels as the input image, nc layers
//cv::Mat mOut(h,w,CV_32FC3); // mOut will be a color image, 3 layers
//cv::Mat mOut(h,w,CV_32FC1); // mOut will be a grayscale image, 1 layer
// ### Define your own output images here as needed
// Allocate arrays
// input/output image width: w
// input/output image height: h
// input image number of channels: nc
// output image number of channels: mOut.channels(), as defined above (nc, 3, or 1)
// allocate raw input image array
float *imgIn = new float[(size_t)w*h*nc];
// allocate raw output array (the computation result will be stored in this array, then later converted to mOut for displaying)
float *imgOut = new float[(size_t)w*h*mOut.channels()];
int hist_size = 256;
int *hist = new int[hist_size];
//allocate memory on device
float *d_imgIn = NULL;
int *d_hist = NULL;
int imgSize = (size_t)w*h*nc;
cudaMalloc(&d_imgIn, imgSize*sizeof(float)); CUDA_CHECK;
cudaMalloc(&d_hist, hist_size*sizeof(int)); CUDA_CHECK;
dim3 block = dim3(32, 8, 1);
dim3 grid = dim3((w + block.x - 1) / block.x,
(h + block.y - 1) / block.y, (nc));
Timer timer; float t = 0;
// For camera mode: Make a loop to read in camera frames
#ifdef CAMERA
// Read a camera image frame every 30 milliseconds:
// cv::waitKey(30) waits 30 milliseconds for a keyboard input,
// returns a value <0 if no key is pressed during this time, returns immediately with a value >=0 if a key is pressed
while (cv::waitKey(30) < 0)
{
// Get camera image
camera >> mIn;
// convert to float representation (opencv loads image values as single bytes by default)
mIn.convertTo(mIn,CV_32F);
// convert range of each channel to [0,1] (opencv default is [0,255])
mIn /= 255.f;
#endif
// Init raw input image array
// opencv images are interleaved: rgb rgb rgb... (actually bgr bgr bgr...)
// But for CUDA it's better to work with layered images: rrr... ggg... bbb...
// So we will convert as necessary, using interleaved "cv::Mat" for loading/saving/displaying, and layered "float*" for CUDA computations
convert_mat_to_layered (imgIn, mIn);
for(int i=0;i<hist_size;i++)
hist[i] = 0;
//copy host memory to device
cudaMemcpy(d_imgIn, imgIn, imgSize*sizeof(float), cudaMemcpyHostToDevice); CUDA_CHECK;
cudaMemcpy(d_hist, hist, hist_size*sizeof(int), cudaMemcpyHostToDevice); CUDA_CHECK;
timer.start();
for (int i=0; i< repeats ; i++)
{
mykernel_shared <<<grid,block>>> (d_imgIn, d_hist, w, h, nc);
}
timer.end();
t = timer.get(); // elapsed time in seconds
cout << "Average time (shared) for " << repeats << " repeat(s): " << t * 1000 / repeats << " ms" << endl;
cudaMemcpy(d_hist, hist, hist_size*sizeof(int), cudaMemcpyHostToDevice); CUDA_CHECK;
timer.start();
for (int i=0; i< repeats ; i++)
{
mykernel <<<grid,block>>> (d_imgIn, d_hist, w, h, nc);
}
timer.end();
t = timer.get(); // elapsed time in seconds
cout << "Average time (naive) for " << repeats << " repeat(s): " << t * 1000 / repeats << " ms" << endl;
// for(int i=0;i<hist_size;i++)
// hist[i] = 0;
//
// t = 0;
// for (int i=0; i< repeats ; i++)
// {
// timer.start();
// mykernel_shared <<<grid,block>>> (d_imgIn, d_hist, w, h, nc);
// timer.end();
// t += timer.get(); // elapsed time in seconds
// }
// cout << "SHARED - Average time for " << repeats << " repeat(s): " << t * 1000 / repeats << " ms" << endl;
//copy result back to host memory
cudaMemcpy(hist, d_hist, hist_size * sizeof(int), cudaMemcpyDeviceToHost); CUDA_CHECK;
showHistogram256("Histogram", hist, 1000, 100);
// show input image
showImage("Input", mIn, 100, 100); // show at position (x_from_left=100,y_from_above=100)
// show output image: first convert to interleaved opencv format from the layered raw array
//convert_layered_to_mat(mOut, imgOut);
//showImage("Histogram", mOut, 100+w+40, 100);
// ### Display your own output images here as needed
#ifdef CAMERA
// end of camera loop
}
#else
// wait for key inputs
cv::waitKey(0);
#endif
//free memory
cudaFree(d_imgIn);
cudaFree(d_hist);
// save input and result
cv::imwrite("image_input.png",mIn*255.f); // "imwrite" assumes channel range [0,255]
cv::imwrite("image_result.png",mOut*255.f);
// free allocated arrays
delete[] imgIn;
delete[] imgOut;
delete[] hist;
// close all opencv windows
cvDestroyAllWindows();
return 0;
}
|
a69bcc41410b9e073f4e5211c888426e34d4986e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<iostream>
#include<stdlib.h>
#include<stdio.h>
using namespace std;
#define BUF_LEN 16
#define N 2
typedef unsigned long(*pFdummy)(void);
__device__ __noinline__ unsigned long dummy1()
{
return 0x1111111111111111;
}
__device__ __noinline__ unsigned long dummy2()
{
return 0x2222222222222222;
}
__device__ __noinline__ unsigned long dummy3()
{
return 0x3333333333333333;
}
__device__ __noinline__ unsigned long dummy4()
{
return 0x4444444444444444;
}
__device__ __noinline__ unsigned long dummy5()
{
return 0x5555555555555555;
}
__device__ __noinline__ unsigned long dummy6()
{
return 0x6666666666666666;
}
__device__ __noinline__ unsigned long dummy7()
{
return 0x7777777777777777;
}
__device__ __noinline__ unsigned long dummy8()
{
return 0x8888888888888888;
}
__device__ __noinline__ unsigned long dummy9()
{
return 0x9999999999999999;
}
__device__ unsigned long __noinline__ unsafe(unsigned int *input,int len)
{
__shared__ unsigned int buf[BUF_LEN];
__shared__ pFdummy fp[8];
fp[0]=dummy1;
fp[1]=dummy2;
fp[2]=dummy3;
fp[3]=dummy4;
fp[4]=dummy5;
fp[5]=dummy6;
fp[6]=dummy7;
fp[7]=dummy8;
unsigned int hash=5381;
//copy input to buf
//printf("%x %x %x");
printf("%d\n",len);
//printf("%p\n",&buf[21]);
//printf("%p\n",&fp[5]);
printf("%p\n",dummy9);
if(blockIdx.x==0)
for(int i=0;i<len;i++)
{
buf[i]=input[i];
}
//buf[-6]=input[0];//shared mermoy
//djb2
for(int i=0;i<BUF_LEN;i++)
{
hash=((hash<<5)+hash)+buf[i];
printf("%d\n", hash%8 );
}
return (unsigned long) (fp[hash%8])();
}
__global__ void test_kernel(unsigned long *hashes,unsigned int *input,int len,int admin)
{
unsigned long my_hash;
//int m;
//m=*len;
int idx=blockDim.x*blockIdx.x+threadIdx.x;
printf("blockid: %d,idx: %d, len: %d\n",blockIdx.x, idx, len);
if(admin)
{ my_hash=dummy9();
//my_hash=dummy8();
// printf("%p\n",&idx);
}
else
{
if(idx==0)
my_hash=unsafe(input+(len*idx),len);
else
my_hash=unsafe(input+(len*idx),len-1);
}
hashes[idx]=my_hash;
}
static void checkCudaErrorAux(const char*file,unsigned line,const char*statement,hipError_t error)
{
if(error==hipSuccess)
return;
cout<<statement<<"returned:"<<hipGetErrorString(error)<<"at file:"<<file<<"line:"<<line<<endl;
exit(1);
}
#define CUDA_CHECK_RETURN(value) checkCudaErrorAux(__FILE__,__LINE__,#value,value)
int main()
{
unsigned int input[100];
int len=27,admin=0;
unsigned long hashes[N];
unsigned long *dev_hashes;
unsigned int *dev_input;
unsigned int m=0;
m=0x2d8;
//m=0x450;
//cout<<"start!"<<endl;
for(int i=0;i<len;i++)
input[i]=m;
for(int i=len;i<N*len-1;i++)
input[i]=0;
CUDA_CHECK_RETURN(hipMalloc((void**)&dev_hashes,N*sizeof(unsigned long)));
CUDA_CHECK_RETURN(hipMalloc((void**)&dev_input,100*sizeof(unsigned int)));
CUDA_CHECK_RETURN(hipMemcpy(dev_input,input,100*sizeof(unsigned int),hipMemcpyHostToDevice));
hipLaunchKernelGGL(( test_kernel), dim3(N),dim3(1), 0, 0, dev_hashes,dev_input,len,admin);
CUDA_CHECK_RETURN(hipMemcpy(hashes,dev_hashes,N*sizeof(unsigned long),hipMemcpyDeviceToHost));
for(int i=0;i<N;i++)
{
printf("%lx\n", hashes[i]);
}
CUDA_CHECK_RETURN(hipFree(dev_input));
CUDA_CHECK_RETURN(hipFree(dev_hashes));
return 0;
}
| a69bcc41410b9e073f4e5211c888426e34d4986e.cu | #include<iostream>
#include<stdlib.h>
#include<stdio.h>
using namespace std;
#define BUF_LEN 16
#define N 2
typedef unsigned long(*pFdummy)(void);
__device__ __noinline__ unsigned long dummy1()
{
return 0x1111111111111111;
}
__device__ __noinline__ unsigned long dummy2()
{
return 0x2222222222222222;
}
__device__ __noinline__ unsigned long dummy3()
{
return 0x3333333333333333;
}
__device__ __noinline__ unsigned long dummy4()
{
return 0x4444444444444444;
}
__device__ __noinline__ unsigned long dummy5()
{
return 0x5555555555555555;
}
__device__ __noinline__ unsigned long dummy6()
{
return 0x6666666666666666;
}
__device__ __noinline__ unsigned long dummy7()
{
return 0x7777777777777777;
}
__device__ __noinline__ unsigned long dummy8()
{
return 0x8888888888888888;
}
__device__ __noinline__ unsigned long dummy9()
{
return 0x9999999999999999;
}
__device__ unsigned long __noinline__ unsafe(unsigned int *input,int len)
{
__shared__ unsigned int buf[BUF_LEN];
__shared__ pFdummy fp[8];
fp[0]=dummy1;
fp[1]=dummy2;
fp[2]=dummy3;
fp[3]=dummy4;
fp[4]=dummy5;
fp[5]=dummy6;
fp[6]=dummy7;
fp[7]=dummy8;
unsigned int hash=5381;
//copy input to buf
//printf("%x %x %x");
printf("%d\n",len);
//printf("%p\n",&buf[21]);
//printf("%p\n",&fp[5]);
printf("%p\n",dummy9);
if(blockIdx.x==0)
for(int i=0;i<len;i++)
{
buf[i]=input[i];
}
//buf[-6]=input[0];//shared mermoy中
//djb2
for(int i=0;i<BUF_LEN;i++)
{
hash=((hash<<5)+hash)+buf[i];
printf("%d\n", hash%8 );
}
return (unsigned long) (fp[hash%8])();
}
__global__ void test_kernel(unsigned long *hashes,unsigned int *input,int len,int admin)
{
unsigned long my_hash;
//int m;
//m=*len;
int idx=blockDim.x*blockIdx.x+threadIdx.x;
printf("blockid: %d,idx: %d, len: %d\n",blockIdx.x, idx, len);
if(admin)
{ my_hash=dummy9();
//my_hash=dummy8();
// printf("%p\n",&idx);
}
else
{
if(idx==0)
my_hash=unsafe(input+(len*idx),len);
else
my_hash=unsafe(input+(len*idx),len-1);
}
hashes[idx]=my_hash;
}
static void checkCudaErrorAux(const char*file,unsigned line,const char*statement,cudaError_t error)
{
if(error==cudaSuccess)
return;
cout<<statement<<"returned:"<<cudaGetErrorString(error)<<"at file:"<<file<<"line:"<<line<<endl;
exit(1);
}
#define CUDA_CHECK_RETURN(value) checkCudaErrorAux(__FILE__,__LINE__,#value,value)
int main()
{
unsigned int input[100];
int len=27,admin=0;
unsigned long hashes[N];
unsigned long *dev_hashes;
unsigned int *dev_input;
unsigned int m=0;
m=0x2d8;
//m=0x450;
//cout<<"start!"<<endl;
for(int i=0;i<len;i++)
input[i]=m;
for(int i=len;i<N*len-1;i++)
input[i]=0;
CUDA_CHECK_RETURN(cudaMalloc((void**)&dev_hashes,N*sizeof(unsigned long)));
CUDA_CHECK_RETURN(cudaMalloc((void**)&dev_input,100*sizeof(unsigned int)));
CUDA_CHECK_RETURN(cudaMemcpy(dev_input,input,100*sizeof(unsigned int),cudaMemcpyHostToDevice));
test_kernel<<<N,1>>>(dev_hashes,dev_input,len,admin);
CUDA_CHECK_RETURN(cudaMemcpy(hashes,dev_hashes,N*sizeof(unsigned long),cudaMemcpyDeviceToHost));
for(int i=0;i<N;i++)
{
printf("%lx\n", hashes[i]);
}
CUDA_CHECK_RETURN(cudaFree(dev_input));
CUDA_CHECK_RETURN(cudaFree(dev_hashes));
return 0;
}
|
64b10eab71f85b32ceed5b92b1109288670cfbea.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
////////////////////////////////////////////////////////////////////////////////
// Copyright (c) 2014-2019, Lawrence Livermore National Security, LLC.
// Produced at the Lawrence Livermore National Laboratory.
// Written by the LBANN Research Team (B. Van Essen, et al.) listed in
// the CONTRIBUTORS file. <[email protected]>
//
// LLNL-CODE-697807.
// All rights reserved.
//
// This file is part of LBANN: Livermore Big Artificial Neural Network
// Toolkit. For details, see http://software.llnl.gov/LBANN or
// https://github.com/LLNL/LBANN.
//
// Licensed under the Apache License, Version 2.0 (the "Licensee"); you
// may not use this file except in compliance with the License. You may
// obtain a copy of the License at:
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
// implied. See the License for the specific language governing
// permissions and limitations under the license.
//
////////////////////////////////////////////////////////////////////////////////
#include "lbann/execution_algorithms/kfac/kfac_block_bn.hpp"
namespace lbann {
namespace {
template <typename TensorDataType>
__global__ void kfac_compute_bn_factor_data2col_kernel(
const TensorDataType * __restrict__ activations,
const TensorDataType * __restrict__ errors,
const TensorDataType * __restrict__ scales,
const TensorDataType * __restrict__ biases,
TensorDataType * __restrict__ cols,
const size_t batch_size,
const size_t num_channels,
const size_t spatial_prod,
const size_t num_threads) { // = batch_size*num_channels*spatial_prod
const size_t gid = threadIdx.x + blockIdx.x * blockDim.x;
if(gid < num_threads) {
const size_t i_c = gid%num_channels;
const size_t i_n = (gid/num_channels)%batch_size;
const size_t i_s = gid/num_channels/batch_size;
const auto scale = scales[i_c];
const auto bias = biases[i_c];
const auto i_act = i_s+i_c*spatial_prod+i_n*spatial_prod*num_channels;
const auto error = errors[i_act];
const auto act = (activations[i_act]-bias)/scale;
const auto i_out = i_c+i_n*num_channels*2 + i_s*(num_channels*2*batch_size);
cols[i_out] = error*act;
cols[i_out+num_channels] = error;
}
}
} // namespace
template <>
void kfac_bn_util::compute_bn_factor_data2col(
const El::Matrix<DataType, El::Device::GPU>& activations,
const El::Matrix<DataType, El::Device::GPU>& errors,
const El::Matrix<DataType, El::Device::GPU>& scales,
const El::Matrix<DataType, El::Device::GPU>& biases,
El::Matrix<DataType, El::Device::GPU>& cols,
const size_t batch_size,
const size_t num_channels,
const size_t spatial_prod,
const El::SyncInfo<El::Device::GPU>& sync_info) {
constexpr size_t block_size = 256;
const size_t num_threads = batch_size * num_channels * spatial_prod;
const size_t grid_size = (num_threads + block_size - 1) / block_size;
if (grid_size > 0) {
hydrogen::gpu::LaunchKernel(
kfac_compute_bn_factor_data2col_kernel<DataType>,
grid_size, block_size, 0, sync_info,
activations.LockedBuffer(),
errors.LockedBuffer(),
scales.LockedBuffer(),
biases.LockedBuffer(),
cols.Buffer(),
batch_size, num_channels, spatial_prod,
num_threads);
}
}
} // namespace lbann
| 64b10eab71f85b32ceed5b92b1109288670cfbea.cu | ////////////////////////////////////////////////////////////////////////////////
// Copyright (c) 2014-2019, Lawrence Livermore National Security, LLC.
// Produced at the Lawrence Livermore National Laboratory.
// Written by the LBANN Research Team (B. Van Essen, et al.) listed in
// the CONTRIBUTORS file. <[email protected]>
//
// LLNL-CODE-697807.
// All rights reserved.
//
// This file is part of LBANN: Livermore Big Artificial Neural Network
// Toolkit. For details, see http://software.llnl.gov/LBANN or
// https://github.com/LLNL/LBANN.
//
// Licensed under the Apache License, Version 2.0 (the "Licensee"); you
// may not use this file except in compliance with the License. You may
// obtain a copy of the License at:
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
// implied. See the License for the specific language governing
// permissions and limitations under the license.
//
////////////////////////////////////////////////////////////////////////////////
#include "lbann/execution_algorithms/kfac/kfac_block_bn.hpp"
namespace lbann {
namespace {
template <typename TensorDataType>
__global__ void kfac_compute_bn_factor_data2col_kernel(
const TensorDataType * __restrict__ activations,
const TensorDataType * __restrict__ errors,
const TensorDataType * __restrict__ scales,
const TensorDataType * __restrict__ biases,
TensorDataType * __restrict__ cols,
const size_t batch_size,
const size_t num_channels,
const size_t spatial_prod,
const size_t num_threads) { // = batch_size*num_channels*spatial_prod
const size_t gid = threadIdx.x + blockIdx.x * blockDim.x;
if(gid < num_threads) {
const size_t i_c = gid%num_channels;
const size_t i_n = (gid/num_channels)%batch_size;
const size_t i_s = gid/num_channels/batch_size;
const auto scale = scales[i_c];
const auto bias = biases[i_c];
const auto i_act = i_s+i_c*spatial_prod+i_n*spatial_prod*num_channels;
const auto error = errors[i_act];
const auto act = (activations[i_act]-bias)/scale;
const auto i_out = i_c+i_n*num_channels*2 + i_s*(num_channels*2*batch_size);
cols[i_out] = error*act;
cols[i_out+num_channels] = error;
}
}
} // namespace
template <>
void kfac_bn_util::compute_bn_factor_data2col(
const El::Matrix<DataType, El::Device::GPU>& activations,
const El::Matrix<DataType, El::Device::GPU>& errors,
const El::Matrix<DataType, El::Device::GPU>& scales,
const El::Matrix<DataType, El::Device::GPU>& biases,
El::Matrix<DataType, El::Device::GPU>& cols,
const size_t batch_size,
const size_t num_channels,
const size_t spatial_prod,
const El::SyncInfo<El::Device::GPU>& sync_info) {
constexpr size_t block_size = 256;
const size_t num_threads = batch_size * num_channels * spatial_prod;
const size_t grid_size = (num_threads + block_size - 1) / block_size;
if (grid_size > 0) {
hydrogen::gpu::LaunchKernel(
kfac_compute_bn_factor_data2col_kernel<DataType>,
grid_size, block_size, 0, sync_info,
activations.LockedBuffer(),
errors.LockedBuffer(),
scales.LockedBuffer(),
biases.LockedBuffer(),
cols.Buffer(),
batch_size, num_channels, spatial_prod,
num_threads);
}
}
} // namespace lbann
|
73b6d8c44d02665bd1c355b7a720545c1363fb06.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <cstdio>
__global__ void helloFromGPU()
{
printf("Hello World from GPU!\n");
}
int main(void)
{
printf("Hello World from CPU!\n");
std::cout << "hello c++" << std::endl;
hipLaunchKernelGGL(( helloFromGPU) , dim3(1), dim3(10), 0, 0, );
hipDeviceReset();
return 0;
}
| 73b6d8c44d02665bd1c355b7a720545c1363fb06.cu | #include <iostream>
#include <cstdio>
__global__ void helloFromGPU()
{
printf("Hello World from GPU!\n");
}
int main(void)
{
printf("Hello World from CPU!\n");
std::cout << "hello c++" << std::endl;
helloFromGPU <<<1, 10>>>();
cudaDeviceReset();
return 0;
}
|
1609cfe5f6e91b2f10818c26e8a3d129141befb8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* @file kernel.cu
*
* @brief The CUDA kernel that will run.
*/
#include "kernel.h"
#include "mpz.h" // multiple precision cuda code
#include "cuda_string.h"
#include <stdio.h>
char *devA;
char *devB;
char *devC;
/**
* for i in 0...count:
* devC[i] = devA[i] + devB[i]
*/
__global__ void additionKernel(char *devA, char *devB, char *devC, unsigned count) {
char str[STRING_MAX_SIZE];
char *global_str;
int threadId = threadIdx.x;
int numThreads = blockDim.x;
int index;
mpz_t sum;
mpz_t op1;
mpz_t op2;
mpz_init(&op1);
mpz_init(&op2);
mpz_init(&sum);
for (index = threadId; index < count; index += numThreads) {
mpz_set_str(&op1, devA + (index * STRING_MAX_SIZE));
mpz_set_str(&op2, devB + (index * STRING_MAX_SIZE));
mpz_add(&sum, &op1, &op2);
mpz_get_str(&sum, str, STRING_MAX_SIZE);
global_str = devC + (index * STRING_MAX_SIZE);
memcpy(global_str, str, cuda_strlen(str) + 1);
}
mpz_destroy(&sum);
mpz_destroy(&op1);
mpz_destroy(&op2);
}
void run_addition_kernel(char *A, char *B, char *C, unsigned num_strings) {
size_t size = num_strings * STRING_MAX_SIZE;
hipMalloc(&devA, size);
hipMalloc(&devB, size);
hipMalloc(&devC, size);
hipMemset(&devA, 0, size);
hipMemset(&devB, 0, size);
hipMemset(&devC, 0, size);
hipMemcpy(devA, A, size, hipMemcpyHostToDevice);
hipMemcpy(devB, B, size, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( additionKernel), dim3(1),dim3(32), 0, 0, devA, devB, devC, num_strings);
hipMemcpy(C, devC, size, hipMemcpyDeviceToHost);
}
| 1609cfe5f6e91b2f10818c26e8a3d129141befb8.cu | /**
* @file kernel.cu
*
* @brief The CUDA kernel that will run.
*/
#include "kernel.h"
#include "mpz.h" // multiple precision cuda code
#include "cuda_string.h"
#include <stdio.h>
char *devA;
char *devB;
char *devC;
/**
* for i in 0...count:
* devC[i] = devA[i] + devB[i]
*/
__global__ void additionKernel(char *devA, char *devB, char *devC, unsigned count) {
char str[STRING_MAX_SIZE];
char *global_str;
int threadId = threadIdx.x;
int numThreads = blockDim.x;
int index;
mpz_t sum;
mpz_t op1;
mpz_t op2;
mpz_init(&op1);
mpz_init(&op2);
mpz_init(&sum);
for (index = threadId; index < count; index += numThreads) {
mpz_set_str(&op1, devA + (index * STRING_MAX_SIZE));
mpz_set_str(&op2, devB + (index * STRING_MAX_SIZE));
mpz_add(&sum, &op1, &op2);
mpz_get_str(&sum, str, STRING_MAX_SIZE);
global_str = devC + (index * STRING_MAX_SIZE);
memcpy(global_str, str, cuda_strlen(str) + 1);
}
mpz_destroy(&sum);
mpz_destroy(&op1);
mpz_destroy(&op2);
}
void run_addition_kernel(char *A, char *B, char *C, unsigned num_strings) {
size_t size = num_strings * STRING_MAX_SIZE;
cudaMalloc(&devA, size);
cudaMalloc(&devB, size);
cudaMalloc(&devC, size);
cudaMemset(&devA, 0, size);
cudaMemset(&devB, 0, size);
cudaMemset(&devC, 0, size);
cudaMemcpy(devA, A, size, cudaMemcpyHostToDevice);
cudaMemcpy(devB, B, size, cudaMemcpyHostToDevice);
additionKernel<<<1,32>>>(devA, devB, devC, num_strings);
cudaMemcpy(C, devC, size, cudaMemcpyDeviceToHost);
}
|
35fdab8fae7daece9a402118c2da52289f2aadf8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "headers/myHeaders.h"
#include "headers/myUtilityFunctions.h"
using namespace std;
int main(int argc, char const *argv[])
{
//create array at host : initialize accordingly
cellType *h_array;
h_array = create_array_host();
//initialize base row arguments (cellType *h_array, int rowNumber, int mode, int value)
// : mode =1 for random initialization, put any value in that case
initialize_this_row(h_array, 0, 0, 0);
initialize_this_col(h_array, 0, 0, 0);
//Create array at device
cellType *d_array;
hipMalloc((void**) &d_array, sizeof(cellType)*(nRows*TOTAL_COLS));
//copy host array to device arrray, if needed
copy_host_to_device(h_array, d_array);
// create/initialize and transfer other resources and pass to the function
//int W = nRows;
/*int h_v[5] = {0, 10, 40, 30, 50};
int h_w[5] = {0, 5, 4, 6, 3};*/
int *h_v = create_array_host_1D(nRows);
initialize_this_1D_array(h_v, nRows);
int *d_v;
hipMalloc((void**) &d_v, sizeof(int)*(nRows));
copy_host_to_device_1D(h_v, d_v, nRows);
int *h_w = create_array_host_1D(nRows);
initialize_this_1D_array(h_w, nRows);
int *d_w;
hipMalloc((void**) &d_w, sizeof(int)*(nRows));
copy_host_to_device_1D(h_w, d_w, nRows);
//configure kernel
configure_kernal(TOTAL_COLS);
GpuTimer phase1;
phase1.Start();
//execute on GPU, row by row
for (int i = 1; i < nRows; ++i)
{
hipLaunchKernelGGL(( update_array_gpu), dim3(dim3(g,1,1)), dim3(dim3(x,1,1)), 0, 0, i, nCols, d_array, d_v, d_w);
}
phase1.Stop();
cout <<"Time (Basic GPU): " <<phase1.Elapsed()<< " Milli Seconds\n";
//copy back to cpu
copy_device_to_host(h_array, d_array);
//Access the resultant matrix : dump into output file
//write_array_console(h_array);
ofstream myfile ("files_output/o_gpu_basic.txt");
write_array_file(h_array, myfile);
return 0;
}
__global__ void update_array_gpu(int i, int numberOfThreadsRequired, cellType *d_array, int *d_v, int *d_w)
{
long j=blockIdx.x *blockDim.x + threadIdx.x + 1;
if (j>= numberOfThreadsRequired || j < 1)
{}
else
{
int j_ext = j - d_w[i];
if (j_ext <= 0)
j_ext = 0;
int a = d_array(i-1,j);
int b = d_v[i] + d_array(i-1,j_ext);
(( (d_w[i]) > j || (a >= b)) ? d_array(i,j) = a : d_array(i,j) = b );
}
}
| 35fdab8fae7daece9a402118c2da52289f2aadf8.cu |
#include "headers/myHeaders.h"
#include "headers/myUtilityFunctions.h"
using namespace std;
int main(int argc, char const *argv[])
{
//create array at host : initialize accordingly
cellType *h_array;
h_array = create_array_host();
//initialize base row arguments (cellType *h_array, int rowNumber, int mode, int value)
// : mode =1 for random initialization, put any value in that case
initialize_this_row(h_array, 0, 0, 0);
initialize_this_col(h_array, 0, 0, 0);
//Create array at device
cellType *d_array;
cudaMalloc((void**) &d_array, sizeof(cellType)*(nRows*TOTAL_COLS));
//copy host array to device arrray, if needed
copy_host_to_device(h_array, d_array);
// create/initialize and transfer other resources and pass to the function
//int W = nRows;
/*int h_v[5] = {0, 10, 40, 30, 50};
int h_w[5] = {0, 5, 4, 6, 3};*/
int *h_v = create_array_host_1D(nRows);
initialize_this_1D_array(h_v, nRows);
int *d_v;
cudaMalloc((void**) &d_v, sizeof(int)*(nRows));
copy_host_to_device_1D(h_v, d_v, nRows);
int *h_w = create_array_host_1D(nRows);
initialize_this_1D_array(h_w, nRows);
int *d_w;
cudaMalloc((void**) &d_w, sizeof(int)*(nRows));
copy_host_to_device_1D(h_w, d_w, nRows);
//configure kernel
configure_kernal(TOTAL_COLS);
GpuTimer phase1;
phase1.Start();
//execute on GPU, row by row
for (int i = 1; i < nRows; ++i)
{
update_array_gpu<<<dim3(g,1,1), dim3(x,1,1)>>>(i, nCols, d_array, d_v, d_w);
}
phase1.Stop();
cout <<"Time (Basic GPU): " <<phase1.Elapsed()<< " Milli Seconds\n";
//copy back to cpu
copy_device_to_host(h_array, d_array);
//Access the resultant matrix : dump into output file
//write_array_console(h_array);
ofstream myfile ("files_output/o_gpu_basic.txt");
write_array_file(h_array, myfile);
return 0;
}
__global__ void update_array_gpu(int i, int numberOfThreadsRequired, cellType *d_array, int *d_v, int *d_w)
{
long j=blockIdx.x *blockDim.x + threadIdx.x + 1;
if (j>= numberOfThreadsRequired || j < 1)
{}
else
{
int j_ext = j - d_w[i];
if (j_ext <= 0)
j_ext = 0;
int a = d_array(i-1,j);
int b = d_v[i] + d_array(i-1,j_ext);
(( (d_w[i]) > j || (a >= b)) ? d_array(i,j) = a : d_array(i,j) = b );
}
}
|
3140bf4e0a237c1627f5579cc53f84a1c28ee425.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <cusparse_v2.h>
#include <hipsparse.h>
#include <hip/hip_runtime.h>
#include "loadMatrixMarket.h"
//#define FLOAT
// Define some error checking macros.
#define cudaErrCheck(stat) { cudaErrCheck_((stat), __FILE__, __LINE__); }
void cudaErrCheck_(hipError_t stat, const char *file, int line) {
if (stat != hipSuccess) {
fprintf(stderr, "CUDA Error: %s %s %d\n", hipGetErrorString(stat), file, line);
}
}
#define cusparseErrCheck(stat) { cusparseErrCheck_((stat), __FILE__, __LINE__); }
void cusparseErrCheck_(hipsparseStatus_t stat, const char *file, int line) {
if (stat != HIPSPARSE_STATUS_SUCCESS) {
fprintf(stderr, "CUSPARSE Error: %d %s %d\n", stat, file, line);
}
}
int main(int argc, char **argv)
{
if (argc < 2) {
fprintf(
stderr,
"-- Usage examples --\n"
" %s inline_1.mtx type: run with inline_1 matrix in matrix market format\n",
argv[0]);
return -1;
}
CSR matrixA;
int outputbase = 0;
loadMatrixMarket(argv[1], &matrixA, outputbase, 0 /*transpose =false*/);
int n = matrixA.n;
int *CsrRowPtrA = matrixA.rowptr;
int *CsrColIndA = matrixA.colidx;
// index pointer on device
int *dCsrRowPtrA;
int *dCsrColIndA;
#ifdef FLOAT
float alpha = (float)1.0;
float *CsrValA = (float*)malloc(matrixA.nnz*sizeof(float));
for(int i =0; i < matrixA.nnz; i++){
CsrValA[i] = (float) matrixA.values[i];
}
float *Y = (float*)malloc(n*sizeof(float));
float *X = (float*)malloc(n*sizeof(float));
for (int i = 0; i < n; i++) X[i] = (float) 1.0;
// device
float *dCsrValA;
float *dX;
float *dZ; // intermediate solution Lz =b
float *dY;
#else
double alpha = (double)1.0;
double *CsrValA = matrixA.values;
double *Y = (double*)malloc(n*sizeof(double));
double *X = (double*)malloc(n*sizeof(double));
for (int i = 0; i < n; i++) X[i] = (double)1.0;
//device
double *dCsrValA;
double *dX;
double *dZ; // intermediate solution Lz =b
double *dY;
#endif
hipsparseHandle_t handle = 0;
// Create the cuSPARSE handle
cusparseErrCheck(hipsparseCreate(&handle));
// Allocate device memory to store the sparse CSR representation of A
cudaErrCheck(hipMalloc((void **)&dCsrRowPtrA, sizeof(int) * (n+1)));
cudaErrCheck(hipMalloc((void **)&dCsrColIndA, sizeof(int) * matrixA.nnz));
#ifdef FLOAT
cudaErrCheck(hipMalloc((void **)&dCsrValA, sizeof(float) * matrixA.nnz));
#else
cudaErrCheck(hipMalloc((void **)&dCsrValA, sizeof(double) * matrixA.nnz));
#endif
// Allocate device memory to store the X and Y
#ifdef FLOAT
cudaErrCheck(hipMalloc((void **)&dX, sizeof(float) * n));
cudaErrCheck(hipMalloc((void **)&dY, sizeof(float) * n));
cudaErrCheck(hipMalloc((void **)&dZ, sizeof(float) * n));
#else
cudaErrCheck(hipMalloc((void **)&dX, sizeof(double) * n));
cudaErrCheck(hipMalloc((void **)&dY, sizeof(double) * n));
cudaErrCheck(hipMalloc((void **)&dZ, sizeof(double) * n));
#endif
// transfer data to device
// Transfer the input vectors and dense matrix A to the device
cudaErrCheck(hipMemcpy(dCsrRowPtrA, CsrRowPtrA, sizeof(int) * (n+1), hipMemcpyHostToDevice));
cudaErrCheck(hipMemcpy(dCsrColIndA, CsrColIndA, sizeof(int) * matrixA.nnz, hipMemcpyHostToDevice));
#ifdef FLOAT
cudaErrCheck(hipMemcpy(dCsrValA, CsrValA, sizeof(float) * matrixA.nnz, hipMemcpyHostToDevice));
cudaErrCheck(hipMemcpy(dX, X, sizeof(float) * n, hipMemcpyHostToDevice));
#else
cudaErrCheck(hipMemcpy(dCsrValA, CsrValA, sizeof(double) * matrixA.nnz, hipMemcpyHostToDevice));
cudaErrCheck(hipMemcpy(dX, X, sizeof(double) * n, hipMemcpyHostToDevice));
#endif
// Create descriptor A
hipsparseMatDescr_t desc_A = 0;
cusparseErrCheck(hipsparseCreateMatDescr(&desc_A));
cusparseErrCheck(hipsparseSetMatType(desc_A, HIPSPARSE_MATRIX_TYPE_GENERAL));
cusparseErrCheck(hipsparseSetMatIndexBase(desc_A, HIPSPARSE_INDEX_BASE_ZERO));
// create descriptor L
hipsparseMatDescr_t desc_L = 0;
hipsparseCreateMatDescr(&desc_L);
hipsparseSetMatIndexBase(desc_L, HIPSPARSE_INDEX_BASE_ZERO);
hipsparseSetMatType(desc_L, HIPSPARSE_MATRIX_TYPE_GENERAL);
hipsparseSetMatFillMode(desc_L, HIPSPARSE_FILL_MODE_LOWER);
hipsparseSetMatDiagType(desc_L, HIPSPARSE_DIAG_TYPE_UNIT);
hipsparseMatDescr_t desc_U = 0;
hipsparseCreateMatDescr(&desc_U);
hipsparseSetMatIndexBase(desc_U, HIPSPARSE_INDEX_BASE_ONE);
hipsparseSetMatType(desc_U, HIPSPARSE_MATRIX_TYPE_GENERAL);
hipsparseSetMatFillMode(desc_U, HIPSPARSE_FILL_MODE_UPPER);
hipsparseSetMatDiagType(desc_U, HIPSPARSE_DIAG_TYPE_NON_UNIT);
// Create a empty info structure
csrilu02Info_t info_A = 0;
csrsv2Info_t info_L = 0;
csrsv2Info_t info_U = 0;
hipsparseCreateCsrilu02Info(&info_A);
hipsparseCreateCsrsv2Info(&info_L);
hipsparseCreateCsrsv2Info(&info_U);
// Query how much memory used in csric02 and csrsv2, and allocate the buffer
int pBufferSize_A;
int pBufferSize_L;
int pBufferSize_U;
// Timing variables
hipEvent_t start;
hipEvent_t stop;
cudaErrCheck(hipEventCreate(&start));
cudaErrCheck(hipEventCreate(&stop));
#ifdef FLOAT
hipsparseScsrilu02_bufferSize(handle, n, matrixA.nnz,
desc_A, dCsrValA, dCsrRowPtrA, dCsrColIndA,
info_A, &pBufferSize_A);
hipsparseScsrsv2_bufferSize(handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, n, matrixA.nnz,
desc_L, dCsrValA, dCsrRowPtrA, dCsrColIndA,
info_L, &pBufferSize_L);
hipsparseScsrsv2_bufferSize(handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, n, matrixA.nnz,
desc_U, dCsrValA, dCsrRowPtrA, dCsrColIndA, info_U,&pBufferSize_U);
#else
hipsparseDcsrilu02_bufferSize(handle, n, matrixA.nnz,
desc_A, dCsrValA, dCsrRowPtrA, dCsrColIndA,
info_A, &pBufferSize_A);
hipsparseDcsrsv2_bufferSize(handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, n, matrixA.nnz,
desc_L, dCsrValA, dCsrRowPtrA, dCsrColIndA,
info_L, &pBufferSize_L);
hipsparseDcsrsv2_bufferSize(handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, n, matrixA.nnz,
desc_U, dCsrValA, dCsrRowPtrA, dCsrColIndA, info_U,&pBufferSize_U);
#endif
int pBufferSize = max(pBufferSize_A, max(pBufferSize_L, pBufferSize_U));
// pBuffer returned by hipMalloc is automatically aligned to 128 bytes.
void *pBuffer = 0;
hipMalloc((void**)&pBuffer, pBufferSize);
// Timing the analysis
hipEventRecord(start);
// Perform analysis of ILU0 on A
const hipsparseSolvePolicy_t policy_A = HIPSPARSE_SOLVE_POLICY_NO_LEVEL;
#ifdef FLOAT
hipsparseScsrilu02_analysis(handle, n, matrixA.nnz, desc_A,
dCsrValA, dCsrRowPtrA, dCsrColIndA, info_A,
policy_A, pBuffer);
#else
hipsparseDcsrilu02_analysis(handle, n, matrixA.nnz, desc_A,
dCsrValA, dCsrRowPtrA, dCsrColIndA, info_A,
policy_A, pBuffer);
#endif
hipEventRecord(stop);
hipEventSynchronize(stop);
float milliseconds = 0;
hipEventElapsedTime(&milliseconds, start, stop);
float time_symbolique = milliseconds;
int structural_zero;
hipsparseStatus_t status = hipsparseXcsrilu02_zeroPivot(handle, info_A, &structural_zero);
if (HIPSPARSE_STATUS_ZERO_PIVOT == status){
printf("A(%d,%d) is missing\n", structural_zero, structural_zero);
return 0;
}
// Perform analysis of triangular solve on L
const hipsparseSolvePolicy_t policy_L = HIPSPARSE_SOLVE_POLICY_NO_LEVEL;
#ifdef FLOAT
hipsparseScsrsv2_analysis(handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, n, matrixA.nnz, desc_L,
dCsrValA, dCsrRowPtrA, dCsrColIndA,
info_L, policy_L, pBuffer);
#else
hipsparseDcsrsv2_analysis(handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, n, matrixA.nnz, desc_L,
dCsrValA, dCsrRowPtrA, dCsrColIndA,
info_L, policy_L, pBuffer);
#endif
// Perform analysis of triangular solve on U
const hipsparseSolvePolicy_t policy_U = HIPSPARSE_SOLVE_POLICY_USE_LEVEL;
#ifdef FLOAT
hipsparseScsrsv2_analysis(handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, n, matrixA.nnz, desc_L,
dCsrValA, dCsrRowPtrA, dCsrColIndA,
info_U, policy_U, pBuffer);
#else
hipsparseDcsrsv2_analysis(handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, n, matrixA.nnz, desc_L,
dCsrValA, dCsrRowPtrA, dCsrColIndA,
info_U, policy_U, pBuffer);
#endif
// Numerical factorization
int numerical_zero;
// Timing the numerical factorization
hipEventRecord(start);
#ifdef FLOAT
hipsparseScsrilu02(handle, n, matrixA.nnz, desc_A,
dCsrValA, dCsrRowPtrA, dCsrColIndA, info_A, policy_A, pBuffer);
#else
hipsparseDcsrilu02(handle, n, matrixA.nnz, desc_A,
dCsrValA, dCsrRowPtrA, dCsrColIndA, info_A, policy_A, pBuffer);
#endif
hipEventRecord(stop);
hipEventSynchronize(stop);
milliseconds = 0;
hipEventElapsedTime(&milliseconds, start, stop);
float time_numeric = milliseconds;
status = hipsparseXcsrilu02_zeroPivot(handle, info_A, &numerical_zero);
if (HIPSPARSE_STATUS_ZERO_PIVOT == status){
printf("L(%d,%d) is zero\n", numerical_zero, numerical_zero);
return 0;
}
hipEventRecord(start);
#ifdef FLOAT
// Solve L*z = x
hipsparseScsrsv2_solve(handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, n, matrixA.nnz, &alpha, desc_L,
dCsrValA, dCsrRowPtrA, dCsrColIndA, info_L,
dX, dZ, policy_L, pBuffer);
// Solve L'*y = z
hipsparseScsrsv2_solve(handle, HIPSPARSE_OPERATION_TRANSPOSE, n, matrixA.nnz, &alpha, desc_L,
dCsrValA, dCsrRowPtrA, dCsrColIndA, info_U,
dZ, dY, policy_U, pBuffer);
#else
// Solve L*z = x
hipsparseDcsrsv2_solve(handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, n, matrixA.nnz, &alpha, desc_L,
dCsrValA, dCsrRowPtrA, dCsrColIndA, info_L,
dX, dZ, policy_L, pBuffer);
// Solve L'*y = z
hipsparseDcsrsv2_solve(handle, HIPSPARSE_OPERATION_TRANSPOSE, n, matrixA.nnz, &alpha, desc_L,
dCsrValA, dCsrRowPtrA, dCsrColIndA, info_U,
dZ, dY, policy_U, pBuffer);
#endif
hipEventRecord(stop);
hipEventSynchronize(stop);
milliseconds = 0;
hipEventElapsedTime(&milliseconds, start, stop);
float time_solve = milliseconds;
#ifdef FLOAT
printf ("SINGLE PRECISION SOLVE IN MILLISECONDS\n ");
#else
printf ("DOUBLE PRECISION SOLVE IN MILLISECONDS\n ");
#endif
printf ("Symbolic = %f\n Numeric = %f \n Symbolic+ Numeric = %f\n Solve = %f\n", time_symbolique, time_numeric, time_symbolique + time_numeric, time_solve);
cudaErrCheck(hipEventDestroy(start));
cudaErrCheck(hipEventDestroy(stop));
free(CsrValA);
free(CsrRowPtrA);
free(CsrColIndA);
free(X);
free(Y);
cudaErrCheck(hipFree(dY));
cudaErrCheck(hipFree(dX));
cudaErrCheck(hipFree(dCsrValA));
cudaErrCheck(hipFree(dCsrRowPtrA));
cudaErrCheck(hipFree(dCsrColIndA));
cudaErrCheck(hipFree(pBuffer));
hipsparseDestroyCsrilu02Info(info_A);
hipsparseDestroyCsrsv2Info(info_L);
hipsparseDestroyCsrsv2Info(info_U);
cusparseErrCheck(hipsparseDestroyMatDescr(desc_A));
cusparseErrCheck(hipsparseDestroyMatDescr(desc_L));
cusparseErrCheck(hipsparseDestroyMatDescr(desc_U));
cusparseErrCheck(hipsparseDestroy(handle));
return 0;
}
| 3140bf4e0a237c1627f5579cc53f84a1c28ee425.cu | #include <stdio.h>
#include <stdlib.h>
#include <cusparse_v2.h>
#include <cusparse.h>
#include <cuda.h>
#include "loadMatrixMarket.h"
//#define FLOAT
// Define some error checking macros.
#define cudaErrCheck(stat) { cudaErrCheck_((stat), __FILE__, __LINE__); }
void cudaErrCheck_(cudaError_t stat, const char *file, int line) {
if (stat != cudaSuccess) {
fprintf(stderr, "CUDA Error: %s %s %d\n", cudaGetErrorString(stat), file, line);
}
}
#define cusparseErrCheck(stat) { cusparseErrCheck_((stat), __FILE__, __LINE__); }
void cusparseErrCheck_(cusparseStatus_t stat, const char *file, int line) {
if (stat != CUSPARSE_STATUS_SUCCESS) {
fprintf(stderr, "CUSPARSE Error: %d %s %d\n", stat, file, line);
}
}
int main(int argc, char **argv)
{
if (argc < 2) {
fprintf(
stderr,
"-- Usage examples --\n"
" %s inline_1.mtx type: run with inline_1 matrix in matrix market format\n",
argv[0]);
return -1;
}
CSR matrixA;
int outputbase = 0;
loadMatrixMarket(argv[1], &matrixA, outputbase, 0 /*transpose =false*/);
int n = matrixA.n;
int *CsrRowPtrA = matrixA.rowptr;
int *CsrColIndA = matrixA.colidx;
// index pointer on device
int *dCsrRowPtrA;
int *dCsrColIndA;
#ifdef FLOAT
float alpha = (float)1.0;
float *CsrValA = (float*)malloc(matrixA.nnz*sizeof(float));
for(int i =0; i < matrixA.nnz; i++){
CsrValA[i] = (float) matrixA.values[i];
}
float *Y = (float*)malloc(n*sizeof(float));
float *X = (float*)malloc(n*sizeof(float));
for (int i = 0; i < n; i++) X[i] = (float) 1.0;
// device
float *dCsrValA;
float *dX;
float *dZ; // intermediate solution Lz =b
float *dY;
#else
double alpha = (double)1.0;
double *CsrValA = matrixA.values;
double *Y = (double*)malloc(n*sizeof(double));
double *X = (double*)malloc(n*sizeof(double));
for (int i = 0; i < n; i++) X[i] = (double)1.0;
//device
double *dCsrValA;
double *dX;
double *dZ; // intermediate solution Lz =b
double *dY;
#endif
cusparseHandle_t handle = 0;
// Create the cuSPARSE handle
cusparseErrCheck(cusparseCreate(&handle));
// Allocate device memory to store the sparse CSR representation of A
cudaErrCheck(cudaMalloc((void **)&dCsrRowPtrA, sizeof(int) * (n+1)));
cudaErrCheck(cudaMalloc((void **)&dCsrColIndA, sizeof(int) * matrixA.nnz));
#ifdef FLOAT
cudaErrCheck(cudaMalloc((void **)&dCsrValA, sizeof(float) * matrixA.nnz));
#else
cudaErrCheck(cudaMalloc((void **)&dCsrValA, sizeof(double) * matrixA.nnz));
#endif
// Allocate device memory to store the X and Y
#ifdef FLOAT
cudaErrCheck(cudaMalloc((void **)&dX, sizeof(float) * n));
cudaErrCheck(cudaMalloc((void **)&dY, sizeof(float) * n));
cudaErrCheck(cudaMalloc((void **)&dZ, sizeof(float) * n));
#else
cudaErrCheck(cudaMalloc((void **)&dX, sizeof(double) * n));
cudaErrCheck(cudaMalloc((void **)&dY, sizeof(double) * n));
cudaErrCheck(cudaMalloc((void **)&dZ, sizeof(double) * n));
#endif
// transfer data to device
// Transfer the input vectors and dense matrix A to the device
cudaErrCheck(cudaMemcpy(dCsrRowPtrA, CsrRowPtrA, sizeof(int) * (n+1), cudaMemcpyHostToDevice));
cudaErrCheck(cudaMemcpy(dCsrColIndA, CsrColIndA, sizeof(int) * matrixA.nnz, cudaMemcpyHostToDevice));
#ifdef FLOAT
cudaErrCheck(cudaMemcpy(dCsrValA, CsrValA, sizeof(float) * matrixA.nnz, cudaMemcpyHostToDevice));
cudaErrCheck(cudaMemcpy(dX, X, sizeof(float) * n, cudaMemcpyHostToDevice));
#else
cudaErrCheck(cudaMemcpy(dCsrValA, CsrValA, sizeof(double) * matrixA.nnz, cudaMemcpyHostToDevice));
cudaErrCheck(cudaMemcpy(dX, X, sizeof(double) * n, cudaMemcpyHostToDevice));
#endif
// Create descriptor A
cusparseMatDescr_t desc_A = 0;
cusparseErrCheck(cusparseCreateMatDescr(&desc_A));
cusparseErrCheck(cusparseSetMatType(desc_A, CUSPARSE_MATRIX_TYPE_GENERAL));
cusparseErrCheck(cusparseSetMatIndexBase(desc_A, CUSPARSE_INDEX_BASE_ZERO));
// create descriptor L
cusparseMatDescr_t desc_L = 0;
cusparseCreateMatDescr(&desc_L);
cusparseSetMatIndexBase(desc_L, CUSPARSE_INDEX_BASE_ZERO);
cusparseSetMatType(desc_L, CUSPARSE_MATRIX_TYPE_GENERAL);
cusparseSetMatFillMode(desc_L, CUSPARSE_FILL_MODE_LOWER);
cusparseSetMatDiagType(desc_L, CUSPARSE_DIAG_TYPE_UNIT);
cusparseMatDescr_t desc_U = 0;
cusparseCreateMatDescr(&desc_U);
cusparseSetMatIndexBase(desc_U, CUSPARSE_INDEX_BASE_ONE);
cusparseSetMatType(desc_U, CUSPARSE_MATRIX_TYPE_GENERAL);
cusparseSetMatFillMode(desc_U, CUSPARSE_FILL_MODE_UPPER);
cusparseSetMatDiagType(desc_U, CUSPARSE_DIAG_TYPE_NON_UNIT);
// Create a empty info structure
csrilu02Info_t info_A = 0;
csrsv2Info_t info_L = 0;
csrsv2Info_t info_U = 0;
cusparseCreateCsrilu02Info(&info_A);
cusparseCreateCsrsv2Info(&info_L);
cusparseCreateCsrsv2Info(&info_U);
// Query how much memory used in csric02 and csrsv2, and allocate the buffer
int pBufferSize_A;
int pBufferSize_L;
int pBufferSize_U;
// Timing variables
cudaEvent_t start;
cudaEvent_t stop;
cudaErrCheck(cudaEventCreate(&start));
cudaErrCheck(cudaEventCreate(&stop));
#ifdef FLOAT
cusparseScsrilu02_bufferSize(handle, n, matrixA.nnz,
desc_A, dCsrValA, dCsrRowPtrA, dCsrColIndA,
info_A, &pBufferSize_A);
cusparseScsrsv2_bufferSize(handle, CUSPARSE_OPERATION_NON_TRANSPOSE, n, matrixA.nnz,
desc_L, dCsrValA, dCsrRowPtrA, dCsrColIndA,
info_L, &pBufferSize_L);
cusparseScsrsv2_bufferSize(handle, CUSPARSE_OPERATION_NON_TRANSPOSE, n, matrixA.nnz,
desc_U, dCsrValA, dCsrRowPtrA, dCsrColIndA, info_U,&pBufferSize_U);
#else
cusparseDcsrilu02_bufferSize(handle, n, matrixA.nnz,
desc_A, dCsrValA, dCsrRowPtrA, dCsrColIndA,
info_A, &pBufferSize_A);
cusparseDcsrsv2_bufferSize(handle, CUSPARSE_OPERATION_NON_TRANSPOSE, n, matrixA.nnz,
desc_L, dCsrValA, dCsrRowPtrA, dCsrColIndA,
info_L, &pBufferSize_L);
cusparseDcsrsv2_bufferSize(handle, CUSPARSE_OPERATION_NON_TRANSPOSE, n, matrixA.nnz,
desc_U, dCsrValA, dCsrRowPtrA, dCsrColIndA, info_U,&pBufferSize_U);
#endif
int pBufferSize = max(pBufferSize_A, max(pBufferSize_L, pBufferSize_U));
// pBuffer returned by cudaMalloc is automatically aligned to 128 bytes.
void *pBuffer = 0;
cudaMalloc((void**)&pBuffer, pBufferSize);
// Timing the analysis
cudaEventRecord(start);
// Perform analysis of ILU0 on A
const cusparseSolvePolicy_t policy_A = CUSPARSE_SOLVE_POLICY_NO_LEVEL;
#ifdef FLOAT
cusparseScsrilu02_analysis(handle, n, matrixA.nnz, desc_A,
dCsrValA, dCsrRowPtrA, dCsrColIndA, info_A,
policy_A, pBuffer);
#else
cusparseDcsrilu02_analysis(handle, n, matrixA.nnz, desc_A,
dCsrValA, dCsrRowPtrA, dCsrColIndA, info_A,
policy_A, pBuffer);
#endif
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
float time_symbolique = milliseconds;
int structural_zero;
cusparseStatus_t status = cusparseXcsrilu02_zeroPivot(handle, info_A, &structural_zero);
if (CUSPARSE_STATUS_ZERO_PIVOT == status){
printf("A(%d,%d) is missing\n", structural_zero, structural_zero);
return 0;
}
// Perform analysis of triangular solve on L
const cusparseSolvePolicy_t policy_L = CUSPARSE_SOLVE_POLICY_NO_LEVEL;
#ifdef FLOAT
cusparseScsrsv2_analysis(handle, CUSPARSE_OPERATION_NON_TRANSPOSE, n, matrixA.nnz, desc_L,
dCsrValA, dCsrRowPtrA, dCsrColIndA,
info_L, policy_L, pBuffer);
#else
cusparseDcsrsv2_analysis(handle, CUSPARSE_OPERATION_NON_TRANSPOSE, n, matrixA.nnz, desc_L,
dCsrValA, dCsrRowPtrA, dCsrColIndA,
info_L, policy_L, pBuffer);
#endif
// Perform analysis of triangular solve on U
const cusparseSolvePolicy_t policy_U = CUSPARSE_SOLVE_POLICY_USE_LEVEL;
#ifdef FLOAT
cusparseScsrsv2_analysis(handle, CUSPARSE_OPERATION_NON_TRANSPOSE, n, matrixA.nnz, desc_L,
dCsrValA, dCsrRowPtrA, dCsrColIndA,
info_U, policy_U, pBuffer);
#else
cusparseDcsrsv2_analysis(handle, CUSPARSE_OPERATION_NON_TRANSPOSE, n, matrixA.nnz, desc_L,
dCsrValA, dCsrRowPtrA, dCsrColIndA,
info_U, policy_U, pBuffer);
#endif
// Numerical factorization
int numerical_zero;
// Timing the numerical factorization
cudaEventRecord(start);
#ifdef FLOAT
cusparseScsrilu02(handle, n, matrixA.nnz, desc_A,
dCsrValA, dCsrRowPtrA, dCsrColIndA, info_A, policy_A, pBuffer);
#else
cusparseDcsrilu02(handle, n, matrixA.nnz, desc_A,
dCsrValA, dCsrRowPtrA, dCsrColIndA, info_A, policy_A, pBuffer);
#endif
cudaEventRecord(stop);
cudaEventSynchronize(stop);
milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
float time_numeric = milliseconds;
status = cusparseXcsrilu02_zeroPivot(handle, info_A, &numerical_zero);
if (CUSPARSE_STATUS_ZERO_PIVOT == status){
printf("L(%d,%d) is zero\n", numerical_zero, numerical_zero);
return 0;
}
cudaEventRecord(start);
#ifdef FLOAT
// Solve L*z = x
cusparseScsrsv2_solve(handle, CUSPARSE_OPERATION_NON_TRANSPOSE, n, matrixA.nnz, &alpha, desc_L,
dCsrValA, dCsrRowPtrA, dCsrColIndA, info_L,
dX, dZ, policy_L, pBuffer);
// Solve L'*y = z
cusparseScsrsv2_solve(handle, CUSPARSE_OPERATION_TRANSPOSE, n, matrixA.nnz, &alpha, desc_L,
dCsrValA, dCsrRowPtrA, dCsrColIndA, info_U,
dZ, dY, policy_U, pBuffer);
#else
// Solve L*z = x
cusparseDcsrsv2_solve(handle, CUSPARSE_OPERATION_NON_TRANSPOSE, n, matrixA.nnz, &alpha, desc_L,
dCsrValA, dCsrRowPtrA, dCsrColIndA, info_L,
dX, dZ, policy_L, pBuffer);
// Solve L'*y = z
cusparseDcsrsv2_solve(handle, CUSPARSE_OPERATION_TRANSPOSE, n, matrixA.nnz, &alpha, desc_L,
dCsrValA, dCsrRowPtrA, dCsrColIndA, info_U,
dZ, dY, policy_U, pBuffer);
#endif
cudaEventRecord(stop);
cudaEventSynchronize(stop);
milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
float time_solve = milliseconds;
#ifdef FLOAT
printf ("SINGLE PRECISION SOLVE IN MILLISECONDS\n ");
#else
printf ("DOUBLE PRECISION SOLVE IN MILLISECONDS\n ");
#endif
printf ("Symbolic = %f\n Numeric = %f \n Symbolic+ Numeric = %f\n Solve = %f\n", time_symbolique, time_numeric, time_symbolique + time_numeric, time_solve);
cudaErrCheck(cudaEventDestroy(start));
cudaErrCheck(cudaEventDestroy(stop));
free(CsrValA);
free(CsrRowPtrA);
free(CsrColIndA);
free(X);
free(Y);
cudaErrCheck(cudaFree(dY));
cudaErrCheck(cudaFree(dX));
cudaErrCheck(cudaFree(dCsrValA));
cudaErrCheck(cudaFree(dCsrRowPtrA));
cudaErrCheck(cudaFree(dCsrColIndA));
cudaErrCheck(cudaFree(pBuffer));
cusparseDestroyCsrilu02Info(info_A);
cusparseDestroyCsrsv2Info(info_L);
cusparseDestroyCsrsv2Info(info_U);
cusparseErrCheck(cusparseDestroyMatDescr(desc_A));
cusparseErrCheck(cusparseDestroyMatDescr(desc_L));
cusparseErrCheck(cusparseDestroyMatDescr(desc_U));
cusparseErrCheck(cusparseDestroy(handle));
return 0;
}
|
3a40b3bc8a75a111d581b8bc0b1ac78b3df58060.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "THHUNN.h"
#include "common.h"
#include "TH/THHalf.h"
#include "THHHalfAutoNumerics.cuh"
#include "THHTensor.hpp"
#include "THHStorage.hpp"
#define MULTIMARGIN_THREADS 128
template <int P, typename Dtype, typename Acctype>
__global__ void cunn_MultiMarginCriterion_updateOutput_kernel(Dtype *output, Dtype *input, THCIndex_t *target, Dtype *weights, int nframe, int dim, bool sizeAverage, Dtype margin)
{
__shared__ Acctype buffer[MULTIMARGIN_THREADS];
int k = blockIdx.x;
Dtype *input_k = input + k*dim;
Dtype *output_k = output + k;
int target_k = ((int)target[k]) - TH_INDEX_BASE;
Dtype input_target_k = input_k[target_k];
int i_start = threadIdx.x;
int i_end = dim;
int i_step = blockDim.x;
buffer[threadIdx.x] = 0;
for (int i = i_start; i < i_end; i += i_step)
{
Dtype z = margin - input_target_k + input_k[i];
if (i == target_k)
continue;
if (z > 0) {
Dtype h = (P==1) ? z : z*z;
if(weights)
h *= weights[target_k];
buffer[threadIdx.x] += h;
}
}
__syncthreads();
// reduce
if (threadIdx.x == 0)
{
Acctype sum = 0;
for (int i=0; i < blockDim.x; i++)
sum += buffer[i];
*output_k = ScalarConvert<Acctype, Dtype>::to(sum/dim);
if(sizeAverage)
*output_k /= nframe;
}
}
template <int P, typename Dtype, typename Acctype>
__global__ void cunn_MultiMarginCriterion_updateGradInput_kernel(Dtype *gradInput,
Dtype *gradOutput,
Dtype *input,
THCIndex_t *target,
Dtype *weights,
int nframe,
int dim,
bool sizeAverage,
Dtype margin,
int reduce)
{
__shared__ Acctype buffer[MULTIMARGIN_THREADS];
int k = blockIdx.x;
Dtype *input_k = input + k*dim;
Dtype *gradInput_k = gradInput + k*dim;
int target_k = ((int)target[k]) - TH_INDEX_BASE;
Dtype input_target_k = input_k[target_k];
Dtype *gradOutput_k = gradOutput;
if (!reduce) {
gradOutput_k += k;
}
Acctype g = (sizeAverage && reduce ? 1./((Acctype)(nframe*dim)) : 1./((Acctype)dim));
int i_start = threadIdx.x;
int i_end = dim;
int i_step = blockDim.x;
buffer[threadIdx.x] = 0;
for (int i=i_start; i<i_end; i+=i_step)
{
Dtype z = margin - input_target_k + input_k[i];
if (i == target_k)
continue;
if (z > 0)
{
Dtype h = ScalarConvert<Acctype, Dtype>::to((P == 1) ? g : 2*g*z);
if(weights)
h *= weights[target_k];
buffer[threadIdx.x] -= h;
gradInput_k[i] = h;
}
else
gradInput_k[i] = ScalarConvert<int, Dtype>::to(0);
}
__syncthreads();
// reduce
if (threadIdx.x == 0)
{
Acctype gradInput_target_k = 0;
for (int i=0; i<blockDim.x; i++)
gradInput_target_k += buffer[i];
gradInput_k[target_k] = ScalarConvert<Acctype, Dtype>::to(gradInput_target_k);
}
for (int i=i_start; i<i_end; i+= i_step)
{
gradInput_k[i] *= * gradOutput_k;
}
}
#include "generic/MultiMarginCriterion.cu"
#include "THHGenerateFloatTypes.h"
#undef MULTIMARGIN_THREADS
| 3a40b3bc8a75a111d581b8bc0b1ac78b3df58060.cu | #include "THCUNN.h"
#include "common.h"
#include "TH/THHalf.h"
#include "THCHalfAutoNumerics.cuh"
#include "THCTensor.hpp"
#include "THCStorage.hpp"
#define MULTIMARGIN_THREADS 128
template <int P, typename Dtype, typename Acctype>
__global__ void cunn_MultiMarginCriterion_updateOutput_kernel(Dtype *output, Dtype *input, THCIndex_t *target, Dtype *weights, int nframe, int dim, bool sizeAverage, Dtype margin)
{
__shared__ Acctype buffer[MULTIMARGIN_THREADS];
int k = blockIdx.x;
Dtype *input_k = input + k*dim;
Dtype *output_k = output + k;
int target_k = ((int)target[k]) - TH_INDEX_BASE;
Dtype input_target_k = input_k[target_k];
int i_start = threadIdx.x;
int i_end = dim;
int i_step = blockDim.x;
buffer[threadIdx.x] = 0;
for (int i = i_start; i < i_end; i += i_step)
{
Dtype z = margin - input_target_k + input_k[i];
if (i == target_k)
continue;
if (z > 0) {
Dtype h = (P==1) ? z : z*z;
if(weights)
h *= weights[target_k];
buffer[threadIdx.x] += h;
}
}
__syncthreads();
// reduce
if (threadIdx.x == 0)
{
Acctype sum = 0;
for (int i=0; i < blockDim.x; i++)
sum += buffer[i];
*output_k = ScalarConvert<Acctype, Dtype>::to(sum/dim);
if(sizeAverage)
*output_k /= nframe;
}
}
template <int P, typename Dtype, typename Acctype>
__global__ void cunn_MultiMarginCriterion_updateGradInput_kernel(Dtype *gradInput,
Dtype *gradOutput,
Dtype *input,
THCIndex_t *target,
Dtype *weights,
int nframe,
int dim,
bool sizeAverage,
Dtype margin,
int reduce)
{
__shared__ Acctype buffer[MULTIMARGIN_THREADS];
int k = blockIdx.x;
Dtype *input_k = input + k*dim;
Dtype *gradInput_k = gradInput + k*dim;
int target_k = ((int)target[k]) - TH_INDEX_BASE;
Dtype input_target_k = input_k[target_k];
Dtype *gradOutput_k = gradOutput;
if (!reduce) {
gradOutput_k += k;
}
Acctype g = (sizeAverage && reduce ? 1./((Acctype)(nframe*dim)) : 1./((Acctype)dim));
int i_start = threadIdx.x;
int i_end = dim;
int i_step = blockDim.x;
buffer[threadIdx.x] = 0;
for (int i=i_start; i<i_end; i+=i_step)
{
Dtype z = margin - input_target_k + input_k[i];
if (i == target_k)
continue;
if (z > 0)
{
Dtype h = ScalarConvert<Acctype, Dtype>::to((P == 1) ? g : 2*g*z);
if(weights)
h *= weights[target_k];
buffer[threadIdx.x] -= h;
gradInput_k[i] = h;
}
else
gradInput_k[i] = ScalarConvert<int, Dtype>::to(0);
}
__syncthreads();
// reduce
if (threadIdx.x == 0)
{
Acctype gradInput_target_k = 0;
for (int i=0; i<blockDim.x; i++)
gradInput_target_k += buffer[i];
gradInput_k[target_k] = ScalarConvert<Acctype, Dtype>::to(gradInput_target_k);
}
for (int i=i_start; i<i_end; i+= i_step)
{
gradInput_k[i] *= * gradOutput_k;
}
}
#include "generic/MultiMarginCriterion.cu"
#include "THCGenerateFloatTypes.h"
#undef MULTIMARGIN_THREADS
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.