hip_filename
stringlengths
5
84
hip_content
stringlengths
79
9.69M
cuda_filename
stringlengths
4
83
cuda_content
stringlengths
19
9.69M
693c4ba22b13bf9703ab60ad1e07f8827e045244.hip
// !!! This is a file automatically generated by hipify!!! #include "utils.h" #include <hip/hip_runtime_api.h> #include <algorithm> #include <vector> /// Memory-bound dummy kernel. Do not edit. __global__ void fastKernel(const double *a, double *b, int M) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= M) return; b[idx] = 10.0 * a[idx]; } /// Compute-bound dummy kernel. Do not edit. __global__ void slowKernel(const double *a, double *b, int M) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= M) return; double x = a[idx]; for (int i = 0; i < 10000; ++i) x *= 1.01; b[idx] = (x != 0.1231 ? 10.0 : -1.0) * a[idx]; } /// Check whether `bHost` contains the correct result. Do not edit. void checkResults(const double *bHost, int N) { for (int i = 0; i < N; ++i) { if (bHost[i] != 100.0 * i) { printf("Incorrect value for i=%d: value before kernel=%.1f " "expected after=%.1f now=%.1f\n", i, 10.0 * i, 100. * i, bHost[i]); exit(1); } } } /// Asynchronously, and in chunks, copy the array to the device, execute the /// kernel and copy the result back. template <typename Kernel> void runAsync(const char *kernelName, Kernel kernel, int N, int chunkSize, int numStreams) { double *aHost, *aDev; double *bHost, *bDev; CUDA_CHECK(hipHostMalloc(&aHost, N * sizeof(double))); CUDA_CHECK(hipHostMalloc(&bHost, N * sizeof(double))); for (int i = 0; i < N; ++i) aHost[i] = 10.0 * i; hipStream_t* streams = new hipStream_t[numStreams]; for (int i = 0; i < numStreams; ++i) hipStreamCreate(streams + i); // TODO 3.a) Allocate chunks and create streams. // hipStream_t stream; // Declaring the stream variable // hipStreamCreate(&stream); // Creating the stream // // Assigning Stream to kernel launch // myKernel<<grid, shmem, stream>>(args); // // Checking if the stream has finished // if (hipStreamQuery(stream) == hipSuccess) cout << "Finished"; // // Waiting for finalization // hipStreamSynchronize(stream); // // De-allocating memory // hipStreamDestroy(stream); CUDA_CHECK(hipMalloc(&aDev, N * sizeof(double))); CUDA_CHECK(hipMalloc(&bDev, N * sizeof(double))); // Instead of benchmark() we use a simplified measure() which invokes the // function only once (to get a cleaner profiling information). double dt = measure([&]() { // TODO 3.b) (1) Upload `a`, (2) launch the kernel, and (3) download // `b` in chunks of size `chunkSize`. // Use streams in a cyclic fashion. // // Note: you can use CUDA_CHECK and CUDA_LAUNCH_EX from // utils.h for error checking. int curStream = 0; for (int j = 0; j < N; j += chunkSize) { int curSize = ::min(chunkSize, N - j); CUDA_CHECK(hipMemcpyAsync(aDev + j, aHost + j, curSize * sizeof(double), hipMemcpyHostToDevice, streams[curStream])); int threads = 1024; int maxBlocks = 65'536; int blocks = (curSize + threads - 1) / threads; for (int i = 0; i < blocks; i += maxBlocks) { int curN = ::min(maxBlocks*threads, N -j - i*threads); int curStart = j + i * threads; int curBlocks = ::min(maxBlocks, blocks - i); // printf("Launching kernel in stream %3d to compute from %12d to %12d with %4d threads and %5d blocks on data of size %12d\n", // curStream, curStart, curStart + curN - 1, threads, curBlocks, curN); CUDA_LAUNCH_EX(kernel, curBlocks, threads, 0, streams[curStream], aDev + curStart, bDev + curStart, curN); } CUDA_CHECK(hipMemcpyAsync(bHost + j, bDev + j, curSize * sizeof(double), hipMemcpyDeviceToHost, streams[curStream])); curStream = (curStream + 1) % numStreams; } for (int i = 0; i < numStreams; ++i) hipStreamSynchronize(streams[i]); // TODO 3.b) Synchronize the streams. }); checkResults(bHost, N); printf("async %s N=%9d chunkSize=%9d numStreams=%d time=%fs\n", kernelName, N, chunkSize, numStreams, dt); // TODO: 3.a) Deallocate chunks and destroy streams. for (int i = 0; i < numStreams; ++i) hipStreamDestroy(streams[i]); delete[] streams; CUDA_CHECK(hipFree(aDev)); CUDA_CHECK(hipFree(bDev)); CUDA_CHECK(hipHostFree(bHost)); CUDA_CHECK(hipHostFree(aHost)); } /// Synchronously copy the whole array to the device, execute the kernel and /// copy the result back. Do not edit. template <typename Kernel> void runSync(const char *kernelName, Kernel kernel, int N) { double *aHost; double *bHost; double *aDev; double *bDev; CUDA_CHECK(hipHostMalloc(&aHost, N * sizeof(double))); CUDA_CHECK(hipHostMalloc(&bHost, N * sizeof(double))); CUDA_CHECK(hipMalloc(&aDev, N * sizeof(double))); CUDA_CHECK(hipMalloc(&bDev, N * sizeof(double))); for (int i = 0; i < N; ++i) aHost[i] = 10.0 * i; // Host -> device. double dt1 = measure([&]() { CUDA_CHECK(hipMemcpy(aDev, aHost, N * sizeof(double), hipMemcpyHostToDevice)); }); // Kernel. double dt2 = measure([&]() { // We cannot execute more than maxBlocks blocks, so we split the work // into multiple launches. That's another reason for using chunks. int threads = 1024; int maxBlocks = 65'536; int blocks = (N + threads - 1) / threads; for (int i = 0; i < blocks; i += maxBlocks) { CUDA_LAUNCH(kernel, ::min(maxBlocks, blocks - i), threads, aDev + i * threads, bDev + i * threads, ::min(maxBlocks*threads, N - i*threads)); } }); // Device -> host. double dt3 = measure([&]() { CUDA_CHECK(hipMemcpy(bHost, bDev, N * sizeof(double), hipMemcpyDeviceToHost)); }); checkResults(bHost, N); printf("sync %s N=%9d upload=%fs kernel=%fs download=%fs total=%fs\n", kernelName, N, dt1, dt2, dt3, dt1 + dt2 + dt3); CUDA_CHECK(hipFree(bDev)); CUDA_CHECK(hipFree(aDev)); CUDA_CHECK(hipHostFree(bHost)); CUDA_CHECK(hipHostFree(aHost)); } /// Selection of runs to use for profiling. void profile() { runSync("fastKernel", fastKernel, 100'000'000); runAsync("fastKernel", fastKernel, 100'000'000, 10'000'000, 4); runSync("slowKernel", slowKernel, 100'000'000); runAsync("slowKernel", slowKernel, 100'000'000, 10'000'000, 4); hipProfilerStop(); } /// Selection of runs to use for benchmarking. void runBenchmarks() { runSync("fastKernel", fastKernel, 1'000'000); runSync("fastKernel", fastKernel, 100'000'000); runAsync("fastKernel", fastKernel, 100'000'000, 100'000'000, 1); runAsync("fastKernel", fastKernel, 100'000'000, 10'000'000, 4); runAsync("fastKernel", fastKernel, 100'000'000, 10'000'000, 8); runAsync("fastKernel", fastKernel, 100'000'000, 1'000'000, 4); runAsync("fastKernel", fastKernel, 100'000'000, 1'000'000, 8); printf("\n"); runSync("slowKernel", slowKernel, 1'000'000); runSync("slowKernel", slowKernel, 100'000'000); runAsync("slowKernel", slowKernel, 100'000'000, 100'000'000, 1); runAsync("slowKernel", slowKernel, 100'000'000, 10'000'000, 4); runAsync("slowKernel", slowKernel, 100'000'000, 10'000'000, 8); runAsync("slowKernel", slowKernel, 100'000'000, 1'000'000, 4); runAsync("slowKernel", slowKernel, 100'000'000, 1'000'000, 8); } int main() { // TODO: 3.c.) Enable `profile` and disable `runBenchmarks` to get a // cleaner profiling information. // profile(); runBenchmarks(); }
693c4ba22b13bf9703ab60ad1e07f8827e045244.cu
#include "utils.h" #include <cuda_profiler_api.h> #include <algorithm> #include <vector> /// Memory-bound dummy kernel. Do not edit. __global__ void fastKernel(const double *a, double *b, int M) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= M) return; b[idx] = 10.0 * a[idx]; } /// Compute-bound dummy kernel. Do not edit. __global__ void slowKernel(const double *a, double *b, int M) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= M) return; double x = a[idx]; for (int i = 0; i < 10000; ++i) x *= 1.01; b[idx] = (x != 0.1231 ? 10.0 : -1.0) * a[idx]; } /// Check whether `bHost` contains the correct result. Do not edit. void checkResults(const double *bHost, int N) { for (int i = 0; i < N; ++i) { if (bHost[i] != 100.0 * i) { printf("Incorrect value for i=%d: value before kernel=%.1f " "expected after=%.1f now=%.1f\n", i, 10.0 * i, 100. * i, bHost[i]); exit(1); } } } /// Asynchronously, and in chunks, copy the array to the device, execute the /// kernel and copy the result back. template <typename Kernel> void runAsync(const char *kernelName, Kernel kernel, int N, int chunkSize, int numStreams) { double *aHost, *aDev; double *bHost, *bDev; CUDA_CHECK(cudaMallocHost(&aHost, N * sizeof(double))); CUDA_CHECK(cudaMallocHost(&bHost, N * sizeof(double))); for (int i = 0; i < N; ++i) aHost[i] = 10.0 * i; cudaStream_t* streams = new cudaStream_t[numStreams]; for (int i = 0; i < numStreams; ++i) cudaStreamCreate(streams + i); // TODO 3.a) Allocate chunks and create streams. // cudaStream_t stream; // Declaring the stream variable // cudaStreamCreate(&stream); // Creating the stream // // Assigning Stream to kernel launch // myKernel<<grid, shmem, stream>>(args); // // Checking if the stream has finished // if (cudaStreamQuery(stream) == cudaSuccess) cout << "Finished"; // // Waiting for finalization // cudaStreamSynchronize(stream); // // De-allocating memory // cudaStreamDestroy(stream); CUDA_CHECK(cudaMalloc(&aDev, N * sizeof(double))); CUDA_CHECK(cudaMalloc(&bDev, N * sizeof(double))); // Instead of benchmark() we use a simplified measure() which invokes the // function only once (to get a cleaner profiling information). double dt = measure([&]() { // TODO 3.b) (1) Upload `a`, (2) launch the kernel, and (3) download // `b` in chunks of size `chunkSize`. // Use streams in a cyclic fashion. // // Note: you can use CUDA_CHECK and CUDA_LAUNCH_EX from // utils.h for error checking. int curStream = 0; for (int j = 0; j < N; j += chunkSize) { int curSize = std::min(chunkSize, N - j); CUDA_CHECK(cudaMemcpyAsync(aDev + j, aHost + j, curSize * sizeof(double), cudaMemcpyHostToDevice, streams[curStream])); int threads = 1024; int maxBlocks = 65'536; int blocks = (curSize + threads - 1) / threads; for (int i = 0; i < blocks; i += maxBlocks) { int curN = std::min(maxBlocks*threads, N -j - i*threads); int curStart = j + i * threads; int curBlocks = std::min(maxBlocks, blocks - i); // printf("Launching kernel in stream %3d to compute from %12d to %12d with %4d threads and %5d blocks on data of size %12d\n", // curStream, curStart, curStart + curN - 1, threads, curBlocks, curN); CUDA_LAUNCH_EX(kernel, curBlocks, threads, 0, streams[curStream], aDev + curStart, bDev + curStart, curN); } CUDA_CHECK(cudaMemcpyAsync(bHost + j, bDev + j, curSize * sizeof(double), cudaMemcpyDeviceToHost, streams[curStream])); curStream = (curStream + 1) % numStreams; } for (int i = 0; i < numStreams; ++i) cudaStreamSynchronize(streams[i]); // TODO 3.b) Synchronize the streams. }); checkResults(bHost, N); printf("async %s N=%9d chunkSize=%9d numStreams=%d time=%fs\n", kernelName, N, chunkSize, numStreams, dt); // TODO: 3.a) Deallocate chunks and destroy streams. for (int i = 0; i < numStreams; ++i) cudaStreamDestroy(streams[i]); delete[] streams; CUDA_CHECK(cudaFree(aDev)); CUDA_CHECK(cudaFree(bDev)); CUDA_CHECK(cudaFreeHost(bHost)); CUDA_CHECK(cudaFreeHost(aHost)); } /// Synchronously copy the whole array to the device, execute the kernel and /// copy the result back. Do not edit. template <typename Kernel> void runSync(const char *kernelName, Kernel kernel, int N) { double *aHost; double *bHost; double *aDev; double *bDev; CUDA_CHECK(cudaMallocHost(&aHost, N * sizeof(double))); CUDA_CHECK(cudaMallocHost(&bHost, N * sizeof(double))); CUDA_CHECK(cudaMalloc(&aDev, N * sizeof(double))); CUDA_CHECK(cudaMalloc(&bDev, N * sizeof(double))); for (int i = 0; i < N; ++i) aHost[i] = 10.0 * i; // Host -> device. double dt1 = measure([&]() { CUDA_CHECK(cudaMemcpy(aDev, aHost, N * sizeof(double), cudaMemcpyHostToDevice)); }); // Kernel. double dt2 = measure([&]() { // We cannot execute more than maxBlocks blocks, so we split the work // into multiple launches. That's another reason for using chunks. int threads = 1024; int maxBlocks = 65'536; int blocks = (N + threads - 1) / threads; for (int i = 0; i < blocks; i += maxBlocks) { CUDA_LAUNCH(kernel, std::min(maxBlocks, blocks - i), threads, aDev + i * threads, bDev + i * threads, std::min(maxBlocks*threads, N - i*threads)); } }); // Device -> host. double dt3 = measure([&]() { CUDA_CHECK(cudaMemcpy(bHost, bDev, N * sizeof(double), cudaMemcpyDeviceToHost)); }); checkResults(bHost, N); printf("sync %s N=%9d upload=%fs kernel=%fs download=%fs total=%fs\n", kernelName, N, dt1, dt2, dt3, dt1 + dt2 + dt3); CUDA_CHECK(cudaFree(bDev)); CUDA_CHECK(cudaFree(aDev)); CUDA_CHECK(cudaFreeHost(bHost)); CUDA_CHECK(cudaFreeHost(aHost)); } /// Selection of runs to use for profiling. void profile() { runSync("fastKernel", fastKernel, 100'000'000); runAsync("fastKernel", fastKernel, 100'000'000, 10'000'000, 4); runSync("slowKernel", slowKernel, 100'000'000); runAsync("slowKernel", slowKernel, 100'000'000, 10'000'000, 4); cudaProfilerStop(); } /// Selection of runs to use for benchmarking. void runBenchmarks() { runSync("fastKernel", fastKernel, 1'000'000); runSync("fastKernel", fastKernel, 100'000'000); runAsync("fastKernel", fastKernel, 100'000'000, 100'000'000, 1); runAsync("fastKernel", fastKernel, 100'000'000, 10'000'000, 4); runAsync("fastKernel", fastKernel, 100'000'000, 10'000'000, 8); runAsync("fastKernel", fastKernel, 100'000'000, 1'000'000, 4); runAsync("fastKernel", fastKernel, 100'000'000, 1'000'000, 8); printf("\n"); runSync("slowKernel", slowKernel, 1'000'000); runSync("slowKernel", slowKernel, 100'000'000); runAsync("slowKernel", slowKernel, 100'000'000, 100'000'000, 1); runAsync("slowKernel", slowKernel, 100'000'000, 10'000'000, 4); runAsync("slowKernel", slowKernel, 100'000'000, 10'000'000, 8); runAsync("slowKernel", slowKernel, 100'000'000, 1'000'000, 4); runAsync("slowKernel", slowKernel, 100'000'000, 1'000'000, 8); } int main() { // TODO: 3.c.) Enable `profile` and disable `runBenchmarks` to get a // cleaner profiling information. // profile(); runBenchmarks(); }
8d11afaed2650aeec1dc4047d07b32c6126acd04.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "collisionCheck.cuh" // TODO: fix to only check collision in the spatial dimensions __global__ void freeEdges(float *obstacles, int obstaclesCount, float *samples, bool *isFreeSamples, int numDisc, float *discMotions, bool *isFreeEdges, int numEdges, float *debugOutput) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid >= numEdges) return; float v[DIM], w[DIM]; float bbMin[DIM], bbMax[DIM]; bool motionValid = true; for (int i = 0; i < numDisc; ++i) { if (!motionValid) break; int baseIdx = tid*(numDisc+1)*DIM + i*DIM; for (int d = 0; d < DIM; ++d) { v[d] = discMotions[baseIdx + d]; w[d] = discMotions[baseIdx + d + DIM]; if (v[d] > w[d]) { bbMin[d] = w[d]; bbMax[d] = v[d]; } else { bbMin[d] = v[d]; bbMax[d] = w[d]; } } motionValid = motionValid && isMotionValid(v, w, bbMin, bbMax, obstaclesCount, obstacles, debugOutput); } isFreeEdges[tid] = motionValid; } __device__ void waypointCollisionCheck(int v_idx, int w_idx, int obstaclesCount, float* obstacles, int *nnIdxs, float *discMotions, int discIdx, int numDisc, bool *isCollision, int tid, float *debugOutput) { // motion from w_idx to v_idx int discMotionsIdx = nnIdxs[v_idx*NUM + w_idx]; // calculate bounds of the bounding box float v[DIM], w[DIM]; // TODO: do v and w need ot be vectors? float bbMin[DIM], bbMax[DIM]; for (int d = 0; d < DIM; ++d) { v[d] = discMotions[discMotionsIdx*(numDisc+1)*DIM + discIdx*DIM + d]; w[d] = discMotions[discMotionsIdx*(numDisc+1)*DIM + (discIdx+1)*DIM + d]; if (v[d] > w[d]) { bbMin[d] = w[d]; bbMax[d] = v[d]; } else { bbMin[d] = v[d]; bbMax[d] = w[d]; } } isCollision[tid] = !isMotionValid(v, w, bbMin, bbMax, obstaclesCount, obstacles, debugOutput); } __device__ bool isMotionValid(float *v, float *w, float *bbMin, float *bbMax, int obstaclesCount, float* obstacles, float *debugOutput) { // TODO: eventually put each point (v, w) into shared memory // TODO: read http://http.developer.nvidia.com/GPUGems3/gpugems3_ch32.html // identify which obstacle this processor is checking against // I don't think necessary, but routine to check if point is within an obstacle // for (int obsIdx = 0; obsIdx < obstaclesCount; ++obsIdx) { // bool notFree = true; // for (int d = 0; d < DIM/2; ++d) { // notFree = notFree && // v[d] > obstacles[obsIdx*2*DIM + d] && // v[d] < obstacles[obsIdx*2*DIM + DIM + d]; // if (!notFree) // break; // } // if (notFree) { // return false; // } // } // go through each obstacle and do broad then narrow phase collision checking for (int obsIdx = 0; obsIdx < obstaclesCount; ++obsIdx) { float obs[DIM*2]; for (int d = 0; d < DIM; ++d) { obs[d] = obstacles[obsIdx*2*DIM + d]; obs[DIM+d] = obstacles[obsIdx*2*DIM + DIM + d]; } if (!broadphaseValidQ(bbMin, bbMax, obs, debugOutput)) { bool motionValid = motionValidQ(v, w, obs, debugOutput); if (!motionValid) { return false; } } } return true; } __device__ bool broadphaseValidQ(float *bbMin, float *bbMax, float *obs, float *debugOutput) { for (int d = 0; d < DIM/2; ++d) { if (bbMax[d] <= obs[d] || obs[DIM+d] <= bbMin[d]) return true; } return false; } __device__ bool motionValidQ(float *v, float *w, float *obs, float *debugOutput) { float v_to_w[DIM/2]; for (int d = 0; d < DIM/2; ++d) { float lambda; v_to_w[d] = w[d] - v[d]; if (v[d] < obs[d]) { lambda = (obs[d] - v[d])/v_to_w[d]; } else { lambda = (obs[DIM + d] - v[d])/v_to_w[d]; } if (faceContainsProjection(v, w, lambda, d, obs, debugOutput)) return false; } return true; } __device__ bool faceContainsProjection(float *v, float *w, float lambda, int j, float *obs, float* debugOutput) { for (int d = 0; d < DIM/2; ++d) { float projection = v[d] + (w[d] - v[d])*lambda; if (d != j && !(obs[d] <= projection && projection <= obs[DIM+d])) return false; } return true; } // odd bug when called with v_to_w where the value is not passed correctly // resulting in v_to_w[d] = -2e+30 (for example), and collisions being allowed through // this code is left here to remind me of the error/so I can figure it out later __device__ bool faceContainsProjectionError(float *v, float *v_to_w, float lambda, int j, float *obs, float* debugOutput) { for (int d = 0; d < DIM/2; ++d) { float projection = v[d] + v_to_w[d]*lambda; if (d != j && !(obs[d] <= projection && projection <= obs[DIM+d])) return false; } return true; }
8d11afaed2650aeec1dc4047d07b32c6126acd04.cu
#include "collisionCheck.cuh" // TODO: fix to only check collision in the spatial dimensions __global__ void freeEdges(float *obstacles, int obstaclesCount, float *samples, bool *isFreeSamples, int numDisc, float *discMotions, bool *isFreeEdges, int numEdges, float *debugOutput) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid >= numEdges) return; float v[DIM], w[DIM]; float bbMin[DIM], bbMax[DIM]; bool motionValid = true; for (int i = 0; i < numDisc; ++i) { if (!motionValid) break; int baseIdx = tid*(numDisc+1)*DIM + i*DIM; for (int d = 0; d < DIM; ++d) { v[d] = discMotions[baseIdx + d]; w[d] = discMotions[baseIdx + d + DIM]; if (v[d] > w[d]) { bbMin[d] = w[d]; bbMax[d] = v[d]; } else { bbMin[d] = v[d]; bbMax[d] = w[d]; } } motionValid = motionValid && isMotionValid(v, w, bbMin, bbMax, obstaclesCount, obstacles, debugOutput); } isFreeEdges[tid] = motionValid; } __device__ void waypointCollisionCheck(int v_idx, int w_idx, int obstaclesCount, float* obstacles, int *nnIdxs, float *discMotions, int discIdx, int numDisc, bool *isCollision, int tid, float *debugOutput) { // motion from w_idx to v_idx int discMotionsIdx = nnIdxs[v_idx*NUM + w_idx]; // calculate bounds of the bounding box float v[DIM], w[DIM]; // TODO: do v and w need ot be vectors? float bbMin[DIM], bbMax[DIM]; for (int d = 0; d < DIM; ++d) { v[d] = discMotions[discMotionsIdx*(numDisc+1)*DIM + discIdx*DIM + d]; w[d] = discMotions[discMotionsIdx*(numDisc+1)*DIM + (discIdx+1)*DIM + d]; if (v[d] > w[d]) { bbMin[d] = w[d]; bbMax[d] = v[d]; } else { bbMin[d] = v[d]; bbMax[d] = w[d]; } } isCollision[tid] = !isMotionValid(v, w, bbMin, bbMax, obstaclesCount, obstacles, debugOutput); } __device__ bool isMotionValid(float *v, float *w, float *bbMin, float *bbMax, int obstaclesCount, float* obstacles, float *debugOutput) { // TODO: eventually put each point (v, w) into shared memory // TODO: read http://http.developer.nvidia.com/GPUGems3/gpugems3_ch32.html // identify which obstacle this processor is checking against // I don't think necessary, but routine to check if point is within an obstacle // for (int obsIdx = 0; obsIdx < obstaclesCount; ++obsIdx) { // bool notFree = true; // for (int d = 0; d < DIM/2; ++d) { // notFree = notFree && // v[d] > obstacles[obsIdx*2*DIM + d] && // v[d] < obstacles[obsIdx*2*DIM + DIM + d]; // if (!notFree) // break; // } // if (notFree) { // return false; // } // } // go through each obstacle and do broad then narrow phase collision checking for (int obsIdx = 0; obsIdx < obstaclesCount; ++obsIdx) { float obs[DIM*2]; for (int d = 0; d < DIM; ++d) { obs[d] = obstacles[obsIdx*2*DIM + d]; obs[DIM+d] = obstacles[obsIdx*2*DIM + DIM + d]; } if (!broadphaseValidQ(bbMin, bbMax, obs, debugOutput)) { bool motionValid = motionValidQ(v, w, obs, debugOutput); if (!motionValid) { return false; } } } return true; } __device__ bool broadphaseValidQ(float *bbMin, float *bbMax, float *obs, float *debugOutput) { for (int d = 0; d < DIM/2; ++d) { if (bbMax[d] <= obs[d] || obs[DIM+d] <= bbMin[d]) return true; } return false; } __device__ bool motionValidQ(float *v, float *w, float *obs, float *debugOutput) { float v_to_w[DIM/2]; for (int d = 0; d < DIM/2; ++d) { float lambda; v_to_w[d] = w[d] - v[d]; if (v[d] < obs[d]) { lambda = (obs[d] - v[d])/v_to_w[d]; } else { lambda = (obs[DIM + d] - v[d])/v_to_w[d]; } if (faceContainsProjection(v, w, lambda, d, obs, debugOutput)) return false; } return true; } __device__ bool faceContainsProjection(float *v, float *w, float lambda, int j, float *obs, float* debugOutput) { for (int d = 0; d < DIM/2; ++d) { float projection = v[d] + (w[d] - v[d])*lambda; if (d != j && !(obs[d] <= projection && projection <= obs[DIM+d])) return false; } return true; } // odd bug when called with v_to_w where the value is not passed correctly // resulting in v_to_w[d] = -2e+30 (for example), and collisions being allowed through // this code is left here to remind me of the error/so I can figure it out later __device__ bool faceContainsProjectionError(float *v, float *v_to_w, float lambda, int j, float *obs, float* debugOutput) { for (int d = 0; d < DIM/2; ++d) { float projection = v[d] + v_to_w[d]*lambda; if (d != j && !(obs[d] <= projection && projection <= obs[DIM+d])) return false; } return true; }
6009b505872cd13aa81ff87763e7c98c7d71f1a1.hip
// !!! This is a file automatically generated by hipify!!! #include "cuda_error_hadling.h" #include "bfs_gpu.cuh" #include <limits> #include <iostream> #include <hip/hip_runtime.h> #include <stdio.h> #include <stdlib.h> #include <getopt.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include <math.h> #include <assert.h> #include <float.h> #include "const.h" using namespace std; ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // init levels ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////// void __global__ init_kernel(int *_levels, int _vertices_count, int _source_vertex) { register const int idx = (blockIdx.x * blockDim.x + threadIdx.x) + blockIdx.y * blockDim.x * gridDim.x; // if (idx < _vertices_count) _levels[idx] = UNVISITED_VERTEX; _levels[_source_vertex] = 1; // - "" } ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // main computational algorithm ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////// void __global__ bfs_kernel(int *_levels, long long *_outgoing_ptrs, int *_outgoing_ids, int _vertices_count, long long _edges_count, int *_changes, int _current_level) { // register const int src_id = blockIdx.x * blockDim.x + threadIdx.x; register const int src_id = (blockIdx.x * blockDim.x + threadIdx.x) / 64; if (src_id < _vertices_count) // { if(_levels[src_id] == _current_level) // ( ) { const long long edge_start = _outgoing_ptrs[src_id]; // const int connections_count = _outgoing_ptrs[src_id + 1] - _outgoing_ptrs[src_id]; // for(int edge_pos = threadIdx.x % 64; edge_pos < connections_count; edge_pos += 64) // : { int dst_id = _outgoing_ids[edge_start + edge_pos]; // ID if (_levels[dst_id] == UNVISITED_VERTEX) // - { _levels[dst_id] = _current_level + 1; // _changes[0] = 1; } } } } } ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // single GPU implememntation ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////// void gpu_bfs_wrapper(long long *_outgoing_ptrs, int *_outgoing_ids, int _vertices_count, long long _edges_count, int _source_vertex, int *_levels) { dim3 init_threads(1024); dim3 init_blocks((_vertices_count - 1) / init_threads.x + 1); // call init kernel hipLaunchKernelGGL(( SAFE_KERNEL_CALL((init_kernel) , dim3(init_blocks), dim3(init_threads) , 0, 0, _levels, _vertices_count, _source_vertex))); // device variable to stop iterations, for each source vertex int *changes; SAFE_CALL(hipMallocManaged((void**)&changes, sizeof(int))); // set grid size dim3 compute_threads(1024); dim3 compute_blocks(64 * (_vertices_count - 1) / compute_threads.x + 1); int current_level = 1; // compute shortest paths do { changes[0] = 0; hipLaunchKernelGGL(( SAFE_KERNEL_CALL((bfs_kernel) , dim3(compute_blocks), dim3(compute_threads) , 0, 0, _levels, _outgoing_ptrs, _outgoing_ids, _vertices_count, _edges_count, changes, current_level))); current_level++; } while(changes[0] > 0); SAFE_CALL(hipFree(changes)); } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
6009b505872cd13aa81ff87763e7c98c7d71f1a1.cu
#include "cuda_error_hadling.h" #include "bfs_gpu.cuh" #include <limits> #include <iostream> #include <cuda_runtime.h> #include <stdio.h> #include <stdlib.h> #include <getopt.h> #include <cuda.h> #include <sys/time.h> #include <math.h> #include <assert.h> #include <float.h> #include "const.h" using namespace std; ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // init levels ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////// void __global__ init_kernel(int *_levels, int _vertices_count, int _source_vertex) { register const int idx = (blockIdx.x * blockDim.x + threadIdx.x) + blockIdx.y * blockDim.x * gridDim.x; // все вершины кроме источника еще не посещены if (idx < _vertices_count) _levels[idx] = UNVISITED_VERTEX; _levels[_source_vertex] = 1; // вершина-источник помещается на первый "уровень" } ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // main computational algorithm ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////// void __global__ bfs_kernel(int *_levels, long long *_outgoing_ptrs, int *_outgoing_ids, int _vertices_count, long long _edges_count, int *_changes, int _current_level) { // register const int src_id = blockIdx.x * blockDim.x + threadIdx.x; register const int src_id = (blockIdx.x * blockDim.x + threadIdx.x) / 64; if (src_id < _vertices_count) // для всех графовых вершин выполнить следующее { if(_levels[src_id] == _current_level) // если графовая вершина принадлежит текущему (ранее посещенному уровню) { const long long edge_start = _outgoing_ptrs[src_id]; // получаем положение первого ребра вершины const int connections_count = _outgoing_ptrs[src_id + 1] - _outgoing_ptrs[src_id]; // получаем число смежных ребер вершины for(int edge_pos = threadIdx.x % 64; edge_pos < connections_count; edge_pos += 64) // для каждого смежного ребра делаем: { int dst_id = _outgoing_ids[edge_start + edge_pos]; // загружаем ID напарвляющей вершины ребра if (_levels[dst_id] == UNVISITED_VERTEX) // если направляющая вершина - не посещенная { _levels[dst_id] = _current_level + 1; // то помечаем её следующим уровнем _changes[0] = 1; } } } } } ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // single GPU implememntation ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////// void gpu_bfs_wrapper(long long *_outgoing_ptrs, int *_outgoing_ids, int _vertices_count, long long _edges_count, int _source_vertex, int *_levels) { dim3 init_threads(1024); dim3 init_blocks((_vertices_count - 1) / init_threads.x + 1); // call init kernel SAFE_KERNEL_CALL((init_kernel <<< init_blocks, init_threads >>> (_levels, _vertices_count, _source_vertex))); // device variable to stop iterations, for each source vertex int *changes; SAFE_CALL(cudaMallocManaged((void**)&changes, sizeof(int))); // set grid size dim3 compute_threads(1024); dim3 compute_blocks(64 * (_vertices_count - 1) / compute_threads.x + 1); int current_level = 1; // compute shortest paths do { changes[0] = 0; SAFE_KERNEL_CALL((bfs_kernel <<< compute_blocks, compute_threads >>> (_levels, _outgoing_ptrs, _outgoing_ids, _vertices_count, _edges_count, changes, current_level))); current_level++; } while(changes[0] > 0); SAFE_CALL(cudaFree(changes)); } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
6a2282561216bd79e3812d47ef86edfd216a2c1f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void cube(float * d_out, float * d_in){ int idx = threadIdx.x; float f = d_in[idx]; d_out[idx] = f * f * f; }
6a2282561216bd79e3812d47ef86edfd216a2c1f.cu
#include "includes.h" __global__ void cube(float * d_out, float * d_in){ int idx = threadIdx.x; float f = d_in[idx]; d_out[idx] = f * f * f; }
f292ad708a744447c3a7a5e9c51b6a0a0b752f79.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <hip/hip_runtime.h> __device__ int value; __global__ void child() { printf("in child %d\n", threadIdx.x); } __device__ void dchild() { hipLaunchKernelGGL(( child), dim3(1), dim3(10), 0, 0, ); hipDeviceSynchronize(); } __global__ void parent() { dchild(); } int main() { hipLaunchKernelGGL(( parent), dim3(1), dim3(2), 0, 0, ); hipDeviceSynchronize(); }
f292ad708a744447c3a7a5e9c51b6a0a0b752f79.cu
#include <stdio.h> #include <cuda.h> __device__ int value; __global__ void child() { printf("in child %d\n", threadIdx.x); } __device__ void dchild() { child<<<1, 10>>>(); cudaDeviceSynchronize(); } __global__ void parent() { dchild(); } int main() { parent<<<1, 2>>>(); cudaDeviceSynchronize(); }
b132a676302004a93d2bfc3c3ad072046b1fbe2c.hip
// !!! This is a file automatically generated by hipify!!! /********************************************************************** * * main.cu -- main function for the NN in CUDA * * Frank Blanning <[email protected]> * John Flionis <[email protected]> * **********************************************************************/ #include <stdio.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <string.h> #include "cuRandFloat.h" #include "hashing3D.h" #include "cuNearestNeighbor.h" #include "cuNearestNeighbor2ndPass.h" #include "cpuValidation.h" #include "gpuValidation.h" #define DIM 3 int main (int argc, char *argv[]) { float *Q, *C, *d_Q, *d_C; int *S, *d_S, *P, *d_P, *QBoxIdToCheck, *d_QBoxIdToCheck; int NC, NQ, d, SDim; hipError_t err; char verboseFlag = 0; char noValidationFlag = 0; char cpuValidationFlag = 0; // Parsing input arguments if (argc < 4) { printf("Usage: %s [flags] arg1 arg2 arg3\n where NC=2^arg1, NQ=2^arg2 and d=2^arg3\n", argv[0]); exit(1); } for(int i=1; i<argc; i++) { if (strcmp(argv[i], "-v") == 0) { verboseFlag = 1; // use only with small NC NQ and d } if (strcmp(argv[i], "--novalidation") == 0) { noValidationFlag = 1; // Do not run the slow validation in the end } if (strcmp(argv[i], "--cpuvalidation") == 0) { cpuValidationFlag = 1; // Run the validation with the cpu code } if (strncmp(argv[i], "-", 1) != 0) { NC = 1<<atoi(argv[i]); NQ = 1<<atoi(argv[i+1]); d = 1<<atoi(argv[i+2]); break; } } // Initializing int d3 = d*d*d; size_t QSize = DIM * NQ * sizeof(float); size_t CSize = DIM * NC * sizeof(float); size_t QBoxIdToCheckSize = NQ * sizeof(int); size_t SSize = (d3+1) * sizeof(int); // CUDA Device setup size_t threadsPerBlock, warp; size_t numberOfBlocks, multiP; int deviceId; hipDeviceProp_t props; hipGetDevice(&deviceId); hipGetDeviceProperties(&props, deviceId); warp = props.warpSize; multiP = props.multiProcessorCount; threadsPerBlock = 8*warp; numberOfBlocks = 5*multiP; // Timers setup hipEvent_t startOfHashing, startOfFirstRun, startOfSecondRun, stop; hipEventCreate(&startOfHashing); hipEventCreate(&startOfFirstRun); hipEventCreate(&startOfSecondRun); hipEventCreate(&stop); // Create input Data randFloat(&Q, &d_Q, NQ); randFloat(&C, &d_C, NC); CUDA_CALL(hipDeviceSynchronize()); if(verboseFlag == 1) { /* Show result */ printf(" ======Q vector====== \n"); for(int i = 0; i < NQ ; i++){ for (int d=0; d<DIM; d++) printf("%1.4f ", Q[i*DIM+d]); printf("\n"); } printf(" ======C vector====== \n"); for(int i = 0; i < NC ; i++){ for (int d=0; d<DIM; d++) printf("%1.4f ", C[i*DIM+d]); printf("\n"); } } // Aranging C and Q into d*d*d boxes hipEventRecord(startOfHashing); hashing3D(C, &d_C, CSize, NC, d, &S, &d_S, numberOfBlocks, threadsPerBlock); hashing3D(Q, &d_Q, QSize, NQ, d, &P, &d_P, &QBoxIdToCheck, &d_QBoxIdToCheck, numberOfBlocks, threadsPerBlock); if(verboseFlag == 1){ /* Show result */ CUDA_CALL(hipMemcpy(Q, d_Q, QSize, hipMemcpyDeviceToHost)); CUDA_CALL(hipMemcpy(QBoxIdToCheck, d_QBoxIdToCheck, QBoxIdToCheckSize, hipMemcpyDeviceToHost)); printf("\nd=%d\n\n",d); printf(" ====new Q vector==== \n"); for(int i = 0; i < NQ ; i++){ for (int d=0; d<DIM; d++) printf("%1.4f ", Q[i*DIM+d]); printf("| Belongs to box:%d\n",QBoxIdToCheck[i]); } CUDA_CALL(hipMemcpy(C, d_C, CSize, hipMemcpyDeviceToHost)); CUDA_CALL(hipMemcpy(S, d_S, SSize, hipMemcpyDeviceToHost)); printf(" ======S vector====== \n"); for(int boxid=0;boxid<d*d*d;boxid++){ SDim = S[boxid+1]-S[boxid]; printf("Box%d size=%d\n", boxid, SDim); for(int i = S[boxid] ; i < S[boxid+1] ; i++){ for (int d=0; d<DIM; d++) printf("%1.4f ", C[ i*DIM +d ]); printf("\n"); } } } // First Run of Nearest neighbor function hipEventRecord(startOfFirstRun); int *neighbor, *d_neighbor; int *d_checkOutside; size_t neighborSize = NQ * sizeof(int); size_t checkOutsideSize = (NQ+1) * sizeof(int); CUDA_CALL(hipMalloc(&d_neighbor,neighborSize)); neighbor = (int *)malloc(neighborSize); if(neighbor == NULL) { printf("Error allocating neighbor"); exit(1); } CUDA_CALL(hipMalloc(&d_checkOutside,checkOutsideSize)); hipLaunchKernelGGL(( cuNearestNeighbor), dim3(numberOfBlocks), dim3(threadsPerBlock), 0, 0, d_C,d_S,d_Q,NQ,d_QBoxIdToCheck,d,d_neighbor,d_checkOutside); err = hipGetLastError(); if (err != hipSuccess) { printf("Error \"%s\" at %s:%d\n", hipGetErrorString(err), __FILE__,__LINE__); return EXIT_FAILURE; } CUDA_CALL(hipDeviceSynchronize()); if(verboseFlag == 1) { CUDA_CALL(hipMemcpy(neighbor, d_neighbor, neighborSize, hipMemcpyDeviceToHost)); printf(" ==== Neighbors! ==== \n"); for(int i = 0; i < NQ ; i++) printf("> Q[%d] -> C[%d]\n",i,neighbor[i]); } // Second Run of Nearest neighbor function hipEventRecord(startOfSecondRun); hipLaunchKernelGGL(( cuNearestNeighbor2ndPass), dim3(numberOfBlocks), dim3(threadsPerBlock/8), 0, 0, d_C,d_S,d_Q,NQ,d_QBoxIdToCheck,d,d_neighbor,d_checkOutside); err = hipGetLastError(); if (err != hipSuccess) { printf("Error \"%s\" at %s:%d\n", hipGetErrorString(err), __FILE__,__LINE__); return EXIT_FAILURE; } CUDA_CALL(hipDeviceSynchronize()); if(verboseFlag == 1) { CUDA_CALL(hipMemcpy(neighbor, d_neighbor, neighborSize, hipMemcpyDeviceToHost)); printf(" ==== Neighbors! ==== \n"); for(int i = 0; i < NQ ; i++) printf(">> Q[%d] -> C[%d]\n",i,neighbor[i]); } // THE END hipEventRecord(stop); hipEventSynchronize(stop); float milliseconds = 0; hipEventElapsedTime(&milliseconds, startOfHashing, startOfFirstRun); printf("Duration of Q and C hashing: %1.6fms\n",milliseconds); milliseconds = 0; hipEventElapsedTime(&milliseconds, startOfFirstRun, startOfSecondRun); printf("Duration of the first run of the kernel: %1.6fms\n",milliseconds); milliseconds = 0; hipEventElapsedTime(&milliseconds, startOfSecondRun, stop); printf("Duration of the second run of the kernel: %1.6fms\n",milliseconds); if(noValidationFlag==0) { /* Validating the NN results */ if(cpuValidationFlag==1) { CUDA_CALL(hipMemcpy(neighbor, d_neighbor, neighborSize, hipMemcpyDeviceToHost)); CUDA_CALL(hipMemcpy(C, d_C, CSize, hipMemcpyDeviceToHost)); CUDA_CALL(hipMemcpy(Q, d_Q, QSize, hipMemcpyDeviceToHost)); cpuValidation(Q, NQ, C, NC, neighbor, verboseFlag); } else { gpuValidation(d_Q, NQ, d_C, NC, d_neighbor, verboseFlag, numberOfBlocks, threadsPerBlock); } } /* Cleanup */ CUDA_CALL(hipFree(d_Q)); CUDA_CALL(hipFree(d_C)); CUDA_CALL(hipFree(d_S)); CUDA_CALL(hipFree(d_QBoxIdToCheck)); free(Q); free(C); free(S); free(QBoxIdToCheck); return 0; }
b132a676302004a93d2bfc3c3ad072046b1fbe2c.cu
/********************************************************************** * * main.cu -- main function for the NN in CUDA * * Frank Blanning <[email protected]> * John Flionis <[email protected]> * **********************************************************************/ #include <stdio.h> #include <stdlib.h> #include <cuda.h> #include <string.h> #include "cuRandFloat.h" #include "hashing3D.h" #include "cuNearestNeighbor.h" #include "cuNearestNeighbor2ndPass.h" #include "cpuValidation.h" #include "gpuValidation.h" #define DIM 3 int main (int argc, char *argv[]) { float *Q, *C, *d_Q, *d_C; int *S, *d_S, *P, *d_P, *QBoxIdToCheck, *d_QBoxIdToCheck; int NC, NQ, d, SDim; cudaError_t err; char verboseFlag = 0; char noValidationFlag = 0; char cpuValidationFlag = 0; // Parsing input arguments if (argc < 4) { printf("Usage: %s [flags] arg1 arg2 arg3\n where NC=2^arg1, NQ=2^arg2 and d=2^arg3\n", argv[0]); exit(1); } for(int i=1; i<argc; i++) { if (strcmp(argv[i], "-v") == 0) { verboseFlag = 1; // use only with small NC NQ and d } if (strcmp(argv[i], "--novalidation") == 0) { noValidationFlag = 1; // Do not run the slow validation in the end } if (strcmp(argv[i], "--cpuvalidation") == 0) { cpuValidationFlag = 1; // Run the validation with the cpu code } if (strncmp(argv[i], "-", 1) != 0) { NC = 1<<atoi(argv[i]); NQ = 1<<atoi(argv[i+1]); d = 1<<atoi(argv[i+2]); break; } } // Initializing int d3 = d*d*d; size_t QSize = DIM * NQ * sizeof(float); size_t CSize = DIM * NC * sizeof(float); size_t QBoxIdToCheckSize = NQ * sizeof(int); size_t SSize = (d3+1) * sizeof(int); // CUDA Device setup size_t threadsPerBlock, warp; size_t numberOfBlocks, multiP; int deviceId; cudaDeviceProp props; cudaGetDevice(&deviceId); cudaGetDeviceProperties(&props, deviceId); warp = props.warpSize; multiP = props.multiProcessorCount; threadsPerBlock = 8*warp; numberOfBlocks = 5*multiP; // Timers setup cudaEvent_t startOfHashing, startOfFirstRun, startOfSecondRun, stop; cudaEventCreate(&startOfHashing); cudaEventCreate(&startOfFirstRun); cudaEventCreate(&startOfSecondRun); cudaEventCreate(&stop); // Create input Data randFloat(&Q, &d_Q, NQ); randFloat(&C, &d_C, NC); CUDA_CALL(cudaDeviceSynchronize()); if(verboseFlag == 1) { /* Show result */ printf(" ======Q vector====== \n"); for(int i = 0; i < NQ ; i++){ for (int d=0; d<DIM; d++) printf("%1.4f ", Q[i*DIM+d]); printf("\n"); } printf(" ======C vector====== \n"); for(int i = 0; i < NC ; i++){ for (int d=0; d<DIM; d++) printf("%1.4f ", C[i*DIM+d]); printf("\n"); } } // Aranging C and Q into d*d*d boxes cudaEventRecord(startOfHashing); hashing3D(C, &d_C, CSize, NC, d, &S, &d_S, numberOfBlocks, threadsPerBlock); hashing3D(Q, &d_Q, QSize, NQ, d, &P, &d_P, &QBoxIdToCheck, &d_QBoxIdToCheck, numberOfBlocks, threadsPerBlock); if(verboseFlag == 1){ /* Show result */ CUDA_CALL(cudaMemcpy(Q, d_Q, QSize, cudaMemcpyDeviceToHost)); CUDA_CALL(cudaMemcpy(QBoxIdToCheck, d_QBoxIdToCheck, QBoxIdToCheckSize, cudaMemcpyDeviceToHost)); printf("\nd=%d\n\n",d); printf(" ====new Q vector==== \n"); for(int i = 0; i < NQ ; i++){ for (int d=0; d<DIM; d++) printf("%1.4f ", Q[i*DIM+d]); printf("| Belongs to box:%d\n",QBoxIdToCheck[i]); } CUDA_CALL(cudaMemcpy(C, d_C, CSize, cudaMemcpyDeviceToHost)); CUDA_CALL(cudaMemcpy(S, d_S, SSize, cudaMemcpyDeviceToHost)); printf(" ======S vector====== \n"); for(int boxid=0;boxid<d*d*d;boxid++){ SDim = S[boxid+1]-S[boxid]; printf("Box%d size=%d\n", boxid, SDim); for(int i = S[boxid] ; i < S[boxid+1] ; i++){ for (int d=0; d<DIM; d++) printf("%1.4f ", C[ i*DIM +d ]); printf("\n"); } } } // First Run of Nearest neighbor function cudaEventRecord(startOfFirstRun); int *neighbor, *d_neighbor; int *d_checkOutside; size_t neighborSize = NQ * sizeof(int); size_t checkOutsideSize = (NQ+1) * sizeof(int); CUDA_CALL(cudaMalloc(&d_neighbor,neighborSize)); neighbor = (int *)malloc(neighborSize); if(neighbor == NULL) { printf("Error allocating neighbor"); exit(1); } CUDA_CALL(cudaMalloc(&d_checkOutside,checkOutsideSize)); cuNearestNeighbor<<<numberOfBlocks, threadsPerBlock>>> (d_C,d_S,d_Q,NQ,d_QBoxIdToCheck,d,d_neighbor,d_checkOutside); err = cudaGetLastError(); if (err != cudaSuccess) { printf("Error \"%s\" at %s:%d\n", cudaGetErrorString(err), __FILE__,__LINE__); return EXIT_FAILURE; } CUDA_CALL(cudaDeviceSynchronize()); if(verboseFlag == 1) { CUDA_CALL(cudaMemcpy(neighbor, d_neighbor, neighborSize, cudaMemcpyDeviceToHost)); printf(" ==== Neighbors! ==== \n"); for(int i = 0; i < NQ ; i++) printf("> Q[%d] -> C[%d]\n",i,neighbor[i]); } // Second Run of Nearest neighbor function cudaEventRecord(startOfSecondRun); cuNearestNeighbor2ndPass<<<numberOfBlocks, threadsPerBlock/8>>> (d_C,d_S,d_Q,NQ,d_QBoxIdToCheck,d,d_neighbor,d_checkOutside); err = cudaGetLastError(); if (err != cudaSuccess) { printf("Error \"%s\" at %s:%d\n", cudaGetErrorString(err), __FILE__,__LINE__); return EXIT_FAILURE; } CUDA_CALL(cudaDeviceSynchronize()); if(verboseFlag == 1) { CUDA_CALL(cudaMemcpy(neighbor, d_neighbor, neighborSize, cudaMemcpyDeviceToHost)); printf(" ==== Neighbors! ==== \n"); for(int i = 0; i < NQ ; i++) printf(">> Q[%d] -> C[%d]\n",i,neighbor[i]); } // THE END cudaEventRecord(stop); cudaEventSynchronize(stop); float milliseconds = 0; cudaEventElapsedTime(&milliseconds, startOfHashing, startOfFirstRun); printf("Duration of Q and C hashing: %1.6fms\n",milliseconds); milliseconds = 0; cudaEventElapsedTime(&milliseconds, startOfFirstRun, startOfSecondRun); printf("Duration of the first run of the kernel: %1.6fms\n",milliseconds); milliseconds = 0; cudaEventElapsedTime(&milliseconds, startOfSecondRun, stop); printf("Duration of the second run of the kernel: %1.6fms\n",milliseconds); if(noValidationFlag==0) { /* Validating the NN results */ if(cpuValidationFlag==1) { CUDA_CALL(cudaMemcpy(neighbor, d_neighbor, neighborSize, cudaMemcpyDeviceToHost)); CUDA_CALL(cudaMemcpy(C, d_C, CSize, cudaMemcpyDeviceToHost)); CUDA_CALL(cudaMemcpy(Q, d_Q, QSize, cudaMemcpyDeviceToHost)); cpuValidation(Q, NQ, C, NC, neighbor, verboseFlag); } else { gpuValidation(d_Q, NQ, d_C, NC, d_neighbor, verboseFlag, numberOfBlocks, threadsPerBlock); } } /* Cleanup */ CUDA_CALL(cudaFree(d_Q)); CUDA_CALL(cudaFree(d_C)); CUDA_CALL(cudaFree(d_S)); CUDA_CALL(cudaFree(d_QBoxIdToCheck)); free(Q); free(C); free(S); free(QBoxIdToCheck); return 0; }
bc10ba43a8f0bbbda32f6dcd8d7d071fb01fbc16.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <algorithm> #include <cfloat> #include <vector> #include "thrust/device_vector.h" #include "caffe/layers/softmax_layer.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { template <typename Dtype> __global__ void kernel_channel_max(const int num, const int channels, const int spatial_dim, const Dtype* data, Dtype* out) { CUDA_KERNEL_LOOP(index, num * spatial_dim) { int n = index / spatial_dim; int s = index % spatial_dim; Dtype maxval = -FLT_MAX; for (int c = 0; c < channels; ++c) { maxval = max(data[(n * channels + c) * spatial_dim + s], maxval); } out[index] = maxval; } } template <typename Dtype> __global__ void kernel_channel_subtract(const int count, const int num, const int channels, const int spatial_dim, const Dtype* channel_max, Dtype* data) { CUDA_KERNEL_LOOP(index, count) { int n = index / channels / spatial_dim; int s = index % spatial_dim; data[index] -= channel_max[n * spatial_dim + s]; } } template <typename Dtype> __global__ void kernel_exp(const int count, const Dtype* data, Dtype* out) { CUDA_KERNEL_LOOP(index, count) { out[index] = exp(data[index]); } } template <typename Dtype> __global__ void kernel_channel_sum(const int num, const int channels, const int spatial_dim, const Dtype* data, Dtype* channel_sum) { CUDA_KERNEL_LOOP(index, num * spatial_dim) { int n = index / spatial_dim; int s = index % spatial_dim; Dtype sum = 0; for (int c = 0; c < channels; ++c) { sum += data[(n * channels + c) * spatial_dim + s]; } channel_sum[index] = sum; } } template <typename Dtype> __global__ void kernel_channel_div(const int count, const int num, const int channels, const int spatial_dim, const Dtype* channel_sum, Dtype* data) { CUDA_KERNEL_LOOP(index, count) { int n = index / channels / spatial_dim; int s = index % spatial_dim; data[index] /= channel_sum[n * spatial_dim + s]; } } template <typename Dtype> __global__ void kernel_channel_dot(const int num, const int channels, const int spatial_dim, const Dtype* data_1, const Dtype* data_2, Dtype* channel_dot) { CUDA_KERNEL_LOOP(index, num * spatial_dim) { int n = index / spatial_dim; int s = index % spatial_dim; Dtype dot = 0; for (int c = 0; c < channels; ++c) { dot += (data_1[(n * channels + c) * spatial_dim + s] * data_2[(n * channels + c) * spatial_dim + s]); } channel_dot[index] = dot; } } template <typename Dtype> void SoftmaxLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); Dtype* scale_data = scale_.mutable_gpu_data(); int count = bottom[0]->count(); int channels = top[0]->shape(softmax_axis_); caffe_copy(count, bottom_data, top_data); // We need to subtract the max to avoid numerical issues, compute the exp, // and then normalize. // compute max // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( kernel_channel_max<Dtype>), dim3(CAFFE_GET_BLOCKS(outer_num_ * inner_num_)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, outer_num_, channels, inner_num_, top_data, scale_data); // subtract // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( kernel_channel_subtract<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, outer_num_, channels, inner_num_, scale_data, top_data); // exponentiate // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( kernel_exp<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, top_data, top_data); // sum after exp // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( kernel_channel_sum<Dtype>), dim3(CAFFE_GET_BLOCKS(outer_num_ * inner_num_)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, outer_num_, channels, inner_num_, top_data, scale_data); // divide // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( kernel_channel_div<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, outer_num_, channels, inner_num_, scale_data, top_data); // const Dtype* embs = top[0]->cpu_data(); // for (int i=0; i < 10; i++){ // LOG(ERROR) << "for example, soft[" << i << "] = " << embs[i]; // } } template <typename Dtype> void SoftmaxLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { const Dtype* top_diff = top[0]->gpu_diff(); const Dtype* top_data = top[0]->gpu_data(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); Dtype* scale_data = scale_.mutable_gpu_data(); int count = top[0]->count(); int channels = top[0]->shape(softmax_axis_); caffe_copy(count, top_diff, bottom_diff); // Compute inner1d(top_diff, top_data) and subtract them from the bottom diff. // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( kernel_channel_dot<Dtype>), dim3(CAFFE_GET_BLOCKS(outer_num_ * inner_num_)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, outer_num_, channels, inner_num_, top_diff, top_data, scale_data); // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( kernel_channel_subtract<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, outer_num_, channels, inner_num_, scale_data, bottom_diff); // elementwise multiplication caffe_gpu_mul<Dtype>(top[0]->count(), bottom_diff, top_data, bottom_diff); } INSTANTIATE_LAYER_GPU_FUNCS(SoftmaxLayer); } // namespace caffe
bc10ba43a8f0bbbda32f6dcd8d7d071fb01fbc16.cu
#include <algorithm> #include <cfloat> #include <vector> #include "thrust/device_vector.h" #include "caffe/layers/softmax_layer.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { template <typename Dtype> __global__ void kernel_channel_max(const int num, const int channels, const int spatial_dim, const Dtype* data, Dtype* out) { CUDA_KERNEL_LOOP(index, num * spatial_dim) { int n = index / spatial_dim; int s = index % spatial_dim; Dtype maxval = -FLT_MAX; for (int c = 0; c < channels; ++c) { maxval = max(data[(n * channels + c) * spatial_dim + s], maxval); } out[index] = maxval; } } template <typename Dtype> __global__ void kernel_channel_subtract(const int count, const int num, const int channels, const int spatial_dim, const Dtype* channel_max, Dtype* data) { CUDA_KERNEL_LOOP(index, count) { int n = index / channels / spatial_dim; int s = index % spatial_dim; data[index] -= channel_max[n * spatial_dim + s]; } } template <typename Dtype> __global__ void kernel_exp(const int count, const Dtype* data, Dtype* out) { CUDA_KERNEL_LOOP(index, count) { out[index] = exp(data[index]); } } template <typename Dtype> __global__ void kernel_channel_sum(const int num, const int channels, const int spatial_dim, const Dtype* data, Dtype* channel_sum) { CUDA_KERNEL_LOOP(index, num * spatial_dim) { int n = index / spatial_dim; int s = index % spatial_dim; Dtype sum = 0; for (int c = 0; c < channels; ++c) { sum += data[(n * channels + c) * spatial_dim + s]; } channel_sum[index] = sum; } } template <typename Dtype> __global__ void kernel_channel_div(const int count, const int num, const int channels, const int spatial_dim, const Dtype* channel_sum, Dtype* data) { CUDA_KERNEL_LOOP(index, count) { int n = index / channels / spatial_dim; int s = index % spatial_dim; data[index] /= channel_sum[n * spatial_dim + s]; } } template <typename Dtype> __global__ void kernel_channel_dot(const int num, const int channels, const int spatial_dim, const Dtype* data_1, const Dtype* data_2, Dtype* channel_dot) { CUDA_KERNEL_LOOP(index, num * spatial_dim) { int n = index / spatial_dim; int s = index % spatial_dim; Dtype dot = 0; for (int c = 0; c < channels; ++c) { dot += (data_1[(n * channels + c) * spatial_dim + s] * data_2[(n * channels + c) * spatial_dim + s]); } channel_dot[index] = dot; } } template <typename Dtype> void SoftmaxLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); Dtype* scale_data = scale_.mutable_gpu_data(); int count = bottom[0]->count(); int channels = top[0]->shape(softmax_axis_); caffe_copy(count, bottom_data, top_data); // We need to subtract the max to avoid numerical issues, compute the exp, // and then normalize. // compute max // NOLINT_NEXT_LINE(whitespace/operators) kernel_channel_max<Dtype><<<CAFFE_GET_BLOCKS(outer_num_ * inner_num_), CAFFE_CUDA_NUM_THREADS>>>(outer_num_, channels, inner_num_, top_data, scale_data); // subtract // NOLINT_NEXT_LINE(whitespace/operators) kernel_channel_subtract<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(count, outer_num_, channels, inner_num_, scale_data, top_data); // exponentiate // NOLINT_NEXT_LINE(whitespace/operators) kernel_exp<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, top_data, top_data); // sum after exp // NOLINT_NEXT_LINE(whitespace/operators) kernel_channel_sum<Dtype><<<CAFFE_GET_BLOCKS(outer_num_ * inner_num_), CAFFE_CUDA_NUM_THREADS>>>(outer_num_, channels, inner_num_, top_data, scale_data); // divide // NOLINT_NEXT_LINE(whitespace/operators) kernel_channel_div<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(count, outer_num_, channels, inner_num_, scale_data, top_data); // const Dtype* embs = top[0]->cpu_data(); // for (int i=0; i < 10; i++){ // LOG(ERROR) << "for example, soft[" << i << "] = " << embs[i]; // } } template <typename Dtype> void SoftmaxLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { const Dtype* top_diff = top[0]->gpu_diff(); const Dtype* top_data = top[0]->gpu_data(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); Dtype* scale_data = scale_.mutable_gpu_data(); int count = top[0]->count(); int channels = top[0]->shape(softmax_axis_); caffe_copy(count, top_diff, bottom_diff); // Compute inner1d(top_diff, top_data) and subtract them from the bottom diff. // NOLINT_NEXT_LINE(whitespace/operators) kernel_channel_dot<Dtype><<<CAFFE_GET_BLOCKS(outer_num_ * inner_num_), CAFFE_CUDA_NUM_THREADS>>>(outer_num_, channels, inner_num_, top_diff, top_data, scale_data); // NOLINT_NEXT_LINE(whitespace/operators) kernel_channel_subtract<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(count, outer_num_, channels, inner_num_, scale_data, bottom_diff); // elementwise multiplication caffe_gpu_mul<Dtype>(top[0]->count(), bottom_diff, top_data, bottom_diff); } INSTANTIATE_LAYER_GPU_FUNCS(SoftmaxLayer); } // namespace caffe
74458f32c468033e9336cb44cb71ee30f9352214.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <common/cudart_utils.h> #include <gtest/gtest.h> #include <raft/linalg/cusolver_wrappers.h> #include <test_utils.h> #include <matrix/matrix.cuh> #include <solver/sgd.cuh> namespace ML { namespace Solver { using namespace MLCommon; using namespace MLCommon::LinAlg; template <typename T> struct SgdInputs { T tol; int n_row; int n_col; int n_row2; int n_col2; int batch_size; }; template <typename T> class SgdTest : public ::testing::TestWithParam<SgdInputs<T>> { protected: void linearRegressionTest() { params = ::testing::TestWithParam<SgdInputs<T>>::GetParam(); int len = params.n_row * params.n_col; allocate(data, len); allocate(labels, params.n_row); allocate(coef, params.n_col, true); allocate(coef2, params.n_col, true); allocate(coef_ref, params.n_col); allocate(coef2_ref, params.n_col); T data_h[len] = {1.0, 1.0, 2.0, 2.0, 1.0, 2.0, 2.0, 3.0}; updateDevice(data, data_h, len, stream); T labels_h[params.n_row] = {6.0, 8.0, 9.0, 11.0}; updateDevice(labels, labels_h, params.n_row, stream); T coef_ref_h[params.n_col] = {2.087, 2.5454557}; updateDevice(coef_ref, coef_ref_h, params.n_col, stream); T coef2_ref_h[params.n_col] = {1.000001, 1.9999998}; updateDevice(coef2_ref, coef2_ref_h, params.n_col, stream); bool fit_intercept = false; intercept = T(0); int epochs = 2000; T lr = T(0.01); ML::lr_type lr_type = ML::lr_type::ADAPTIVE; T power_t = T(0.5); T alpha = T(0.0001); T l1_ratio = T(0.15); bool shuffle = true; T tol = T(1e-10); ML::loss_funct loss = ML::loss_funct::SQRD_LOSS; MLCommon::Functions::penalty pen = MLCommon::Functions::penalty::NONE; int n_iter_no_change = 10; sgdFit(handle, data, params.n_row, params.n_col, labels, coef, &intercept, fit_intercept, params.batch_size, epochs, lr_type, lr, power_t, loss, pen, alpha, l1_ratio, shuffle, tol, n_iter_no_change, stream); fit_intercept = true; intercept2 = T(0); sgdFit(handle, data, params.n_row, params.n_col, labels, coef2, &intercept2, fit_intercept, params.batch_size, epochs, ML::lr_type::CONSTANT, lr, power_t, loss, pen, alpha, l1_ratio, shuffle, tol, n_iter_no_change, stream); } void logisticRegressionTest() { params = ::testing::TestWithParam<SgdInputs<T>>::GetParam(); int len = params.n_row2 * params.n_col2; T *coef_class; allocate(data_logreg, len); allocate(data_logreg_test, len); allocate(labels_logreg, params.n_row2); allocate(coef_class, params.n_col2, true); allocate(pred_log, params.n_row2); allocate(pred_log_ref, params.n_row2); T data_h[len] = {0.1, -2.1, 5.4, 5.4, -1.5, -2.15, 2.65, 2.65, 3.25, -0.15, -7.35, -7.35}; updateDevice(data_logreg, data_h, len, stream); T data_test_h[len] = {0.3, 1.1, 2.1, -10.1, 0.5, 2.5, -3.55, -20.5, -1.3, 3.0, -5.0, 15.0}; updateDevice(data_logreg_test, data_test_h, len, stream); T labels_logreg_h[params.n_row2] = {0.0, 1.0, 1.0, 0.0}; updateDevice(labels_logreg, labels_logreg_h, params.n_row2, stream); T pred_log_ref_h[params.n_row2] = {1.0, 0.0, 1.0, 1.0}; updateDevice(pred_log_ref, pred_log_ref_h, params.n_row2, stream); bool fit_intercept = true; T intercept_class = T(0); int epochs = 1000; T lr = T(0.05); ML::lr_type lr_type = ML::lr_type::CONSTANT; T power_t = T(0.5); T alpha = T(0.0); T l1_ratio = T(0.0); bool shuffle = false; T tol = T(0.0); ML::loss_funct loss = ML::loss_funct::LOG; MLCommon::Functions::penalty pen = MLCommon::Functions::penalty::NONE; int n_iter_no_change = 10; sgdFit(handle, data_logreg, params.n_row2, params.n_col2, labels_logreg, coef_class, &intercept_class, fit_intercept, params.batch_size, epochs, lr_type, lr, power_t, loss, pen, alpha, l1_ratio, shuffle, tol, n_iter_no_change, stream); sgdPredictBinaryClass(handle, data_logreg_test, params.n_row2, params.n_col2, coef_class, intercept_class, pred_log, loss, stream); CUDA_CHECK(hipFree(coef_class)); } void svmTest() { params = ::testing::TestWithParam<SgdInputs<T>>::GetParam(); int len = params.n_row2 * params.n_col2; T *coef_class; allocate(data_svmreg, len); allocate(data_svmreg_test, len); allocate(labels_svmreg, params.n_row2); allocate(coef_class, params.n_col2, true); allocate(pred_svm, params.n_row2); allocate(pred_svm_ref, params.n_row2); T data_h[len] = {0.1, -2.1, 5.4, 5.4, -1.5, -2.15, 2.65, 2.65, 3.25, -0.15, -7.35, -7.35}; updateDevice(data_svmreg, data_h, len, stream); T data_test_h[len] = {0.3, 1.1, 2.1, -10.1, 0.5, 2.5, -3.55, -20.5, -1.3, 3.0, -5.0, 15.0}; updateDevice(data_svmreg_test, data_test_h, len, stream); T labels_svmreg_h[params.n_row2] = {0.0, 1.0, 1.0, 0.0}; updateDevice(labels_svmreg, labels_svmreg_h, params.n_row2, stream); T pred_svm_ref_h[params.n_row2] = {1.0, 0.0, 1.0, 1.0}; updateDevice(pred_svm_ref, pred_svm_ref_h, params.n_row2, stream); bool fit_intercept = true; T intercept_class = T(0); int epochs = 1000; T lr = T(0.05); ML::lr_type lr_type = ML::lr_type::CONSTANT; T power_t = T(0.5); T alpha = T(1) / T(epochs); T l1_ratio = T(0.0); bool shuffle = false; T tol = T(0.0); ML::loss_funct loss = ML::loss_funct::HINGE; MLCommon::Functions::penalty pen = MLCommon::Functions::penalty::L2; int n_iter_no_change = 10; sgdFit(handle, data_svmreg, params.n_row2, params.n_col2, labels_svmreg, coef_class, &intercept_class, fit_intercept, params.batch_size, epochs, lr_type, lr, power_t, loss, pen, alpha, l1_ratio, shuffle, tol, n_iter_no_change, stream); sgdPredictBinaryClass(handle, data_svmreg_test, params.n_row2, params.n_col2, coef_class, intercept_class, pred_svm, loss, stream); CUDA_CHECK(hipFree(coef_class)); } void SetUp() override { CUDA_CHECK(hipStreamCreate(&stream)); handle.set_stream(stream); linearRegressionTest(); logisticRegressionTest(); svmTest(); } void TearDown() override { CUDA_CHECK(hipFree(data)); CUDA_CHECK(hipFree(labels)); CUDA_CHECK(hipFree(coef)); CUDA_CHECK(hipFree(coef_ref)); CUDA_CHECK(hipFree(coef2)); CUDA_CHECK(hipFree(coef2_ref)); CUDA_CHECK(hipFree(data_logreg)); CUDA_CHECK(hipFree(data_logreg_test)); CUDA_CHECK(hipFree(labels_logreg)); CUDA_CHECK(hipFree(data_svmreg)); CUDA_CHECK(hipFree(data_svmreg_test)); CUDA_CHECK(hipFree(labels_svmreg)); CUDA_CHECK(hipFree(pred_svm)); CUDA_CHECK(hipFree(pred_svm_ref)); CUDA_CHECK(hipFree(pred_log)); CUDA_CHECK(hipFree(pred_log_ref)); CUDA_CHECK(hipStreamDestroy(stream)); } protected: SgdInputs<T> params; T *data, *labels, *coef, *coef_ref; T *coef2, *coef2_ref; T *data_logreg, *data_logreg_test, *labels_logreg; T *data_svmreg, *data_svmreg_test, *labels_svmreg; T *pred_svm, *pred_svm_ref, *pred_log, *pred_log_ref; T intercept, intercept2; hipStream_t stream; raft::handle_t handle; }; const std::vector<SgdInputs<float>> inputsf2 = {{0.01f, 4, 2, 4, 3, 2}}; const std::vector<SgdInputs<double>> inputsd2 = {{0.01, 4, 2, 4, 3, 2}}; typedef SgdTest<float> SgdTestF; TEST_P(SgdTestF, Fit) { ASSERT_TRUE(devArrMatch(coef_ref, coef, params.n_col, CompareApproxAbs<float>(params.tol))); ASSERT_TRUE(devArrMatch(coef2_ref, coef2, params.n_col, CompareApproxAbs<float>(params.tol))); ASSERT_TRUE(devArrMatch(pred_log_ref, pred_log, params.n_row, CompareApproxAbs<float>(params.tol))); ASSERT_TRUE(devArrMatch(pred_svm_ref, pred_svm, params.n_row, CompareApproxAbs<float>(params.tol))); } typedef SgdTest<double> SgdTestD; TEST_P(SgdTestD, Fit) { ASSERT_TRUE(devArrMatch(coef_ref, coef, params.n_col, CompareApproxAbs<double>(params.tol))); ASSERT_TRUE(devArrMatch(coef2_ref, coef2, params.n_col, CompareApproxAbs<double>(params.tol))); ASSERT_TRUE(devArrMatch(pred_log_ref, pred_log, params.n_row, CompareApproxAbs<double>(params.tol))); ASSERT_TRUE(devArrMatch(pred_svm_ref, pred_svm, params.n_row, CompareApproxAbs<double>(params.tol))); } INSTANTIATE_TEST_CASE_P(SgdTests, SgdTestF, ::testing::ValuesIn(inputsf2)); INSTANTIATE_TEST_CASE_P(SgdTests, SgdTestD, ::testing::ValuesIn(inputsd2)); } // namespace Solver } // end namespace ML
74458f32c468033e9336cb44cb71ee30f9352214.cu
/* * Copyright (c) 2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <common/cudart_utils.h> #include <gtest/gtest.h> #include <raft/linalg/cusolver_wrappers.h> #include <test_utils.h> #include <matrix/matrix.cuh> #include <solver/sgd.cuh> namespace ML { namespace Solver { using namespace MLCommon; using namespace MLCommon::LinAlg; template <typename T> struct SgdInputs { T tol; int n_row; int n_col; int n_row2; int n_col2; int batch_size; }; template <typename T> class SgdTest : public ::testing::TestWithParam<SgdInputs<T>> { protected: void linearRegressionTest() { params = ::testing::TestWithParam<SgdInputs<T>>::GetParam(); int len = params.n_row * params.n_col; allocate(data, len); allocate(labels, params.n_row); allocate(coef, params.n_col, true); allocate(coef2, params.n_col, true); allocate(coef_ref, params.n_col); allocate(coef2_ref, params.n_col); T data_h[len] = {1.0, 1.0, 2.0, 2.0, 1.0, 2.0, 2.0, 3.0}; updateDevice(data, data_h, len, stream); T labels_h[params.n_row] = {6.0, 8.0, 9.0, 11.0}; updateDevice(labels, labels_h, params.n_row, stream); T coef_ref_h[params.n_col] = {2.087, 2.5454557}; updateDevice(coef_ref, coef_ref_h, params.n_col, stream); T coef2_ref_h[params.n_col] = {1.000001, 1.9999998}; updateDevice(coef2_ref, coef2_ref_h, params.n_col, stream); bool fit_intercept = false; intercept = T(0); int epochs = 2000; T lr = T(0.01); ML::lr_type lr_type = ML::lr_type::ADAPTIVE; T power_t = T(0.5); T alpha = T(0.0001); T l1_ratio = T(0.15); bool shuffle = true; T tol = T(1e-10); ML::loss_funct loss = ML::loss_funct::SQRD_LOSS; MLCommon::Functions::penalty pen = MLCommon::Functions::penalty::NONE; int n_iter_no_change = 10; sgdFit(handle, data, params.n_row, params.n_col, labels, coef, &intercept, fit_intercept, params.batch_size, epochs, lr_type, lr, power_t, loss, pen, alpha, l1_ratio, shuffle, tol, n_iter_no_change, stream); fit_intercept = true; intercept2 = T(0); sgdFit(handle, data, params.n_row, params.n_col, labels, coef2, &intercept2, fit_intercept, params.batch_size, epochs, ML::lr_type::CONSTANT, lr, power_t, loss, pen, alpha, l1_ratio, shuffle, tol, n_iter_no_change, stream); } void logisticRegressionTest() { params = ::testing::TestWithParam<SgdInputs<T>>::GetParam(); int len = params.n_row2 * params.n_col2; T *coef_class; allocate(data_logreg, len); allocate(data_logreg_test, len); allocate(labels_logreg, params.n_row2); allocate(coef_class, params.n_col2, true); allocate(pred_log, params.n_row2); allocate(pred_log_ref, params.n_row2); T data_h[len] = {0.1, -2.1, 5.4, 5.4, -1.5, -2.15, 2.65, 2.65, 3.25, -0.15, -7.35, -7.35}; updateDevice(data_logreg, data_h, len, stream); T data_test_h[len] = {0.3, 1.1, 2.1, -10.1, 0.5, 2.5, -3.55, -20.5, -1.3, 3.0, -5.0, 15.0}; updateDevice(data_logreg_test, data_test_h, len, stream); T labels_logreg_h[params.n_row2] = {0.0, 1.0, 1.0, 0.0}; updateDevice(labels_logreg, labels_logreg_h, params.n_row2, stream); T pred_log_ref_h[params.n_row2] = {1.0, 0.0, 1.0, 1.0}; updateDevice(pred_log_ref, pred_log_ref_h, params.n_row2, stream); bool fit_intercept = true; T intercept_class = T(0); int epochs = 1000; T lr = T(0.05); ML::lr_type lr_type = ML::lr_type::CONSTANT; T power_t = T(0.5); T alpha = T(0.0); T l1_ratio = T(0.0); bool shuffle = false; T tol = T(0.0); ML::loss_funct loss = ML::loss_funct::LOG; MLCommon::Functions::penalty pen = MLCommon::Functions::penalty::NONE; int n_iter_no_change = 10; sgdFit(handle, data_logreg, params.n_row2, params.n_col2, labels_logreg, coef_class, &intercept_class, fit_intercept, params.batch_size, epochs, lr_type, lr, power_t, loss, pen, alpha, l1_ratio, shuffle, tol, n_iter_no_change, stream); sgdPredictBinaryClass(handle, data_logreg_test, params.n_row2, params.n_col2, coef_class, intercept_class, pred_log, loss, stream); CUDA_CHECK(cudaFree(coef_class)); } void svmTest() { params = ::testing::TestWithParam<SgdInputs<T>>::GetParam(); int len = params.n_row2 * params.n_col2; T *coef_class; allocate(data_svmreg, len); allocate(data_svmreg_test, len); allocate(labels_svmreg, params.n_row2); allocate(coef_class, params.n_col2, true); allocate(pred_svm, params.n_row2); allocate(pred_svm_ref, params.n_row2); T data_h[len] = {0.1, -2.1, 5.4, 5.4, -1.5, -2.15, 2.65, 2.65, 3.25, -0.15, -7.35, -7.35}; updateDevice(data_svmreg, data_h, len, stream); T data_test_h[len] = {0.3, 1.1, 2.1, -10.1, 0.5, 2.5, -3.55, -20.5, -1.3, 3.0, -5.0, 15.0}; updateDevice(data_svmreg_test, data_test_h, len, stream); T labels_svmreg_h[params.n_row2] = {0.0, 1.0, 1.0, 0.0}; updateDevice(labels_svmreg, labels_svmreg_h, params.n_row2, stream); T pred_svm_ref_h[params.n_row2] = {1.0, 0.0, 1.0, 1.0}; updateDevice(pred_svm_ref, pred_svm_ref_h, params.n_row2, stream); bool fit_intercept = true; T intercept_class = T(0); int epochs = 1000; T lr = T(0.05); ML::lr_type lr_type = ML::lr_type::CONSTANT; T power_t = T(0.5); T alpha = T(1) / T(epochs); T l1_ratio = T(0.0); bool shuffle = false; T tol = T(0.0); ML::loss_funct loss = ML::loss_funct::HINGE; MLCommon::Functions::penalty pen = MLCommon::Functions::penalty::L2; int n_iter_no_change = 10; sgdFit(handle, data_svmreg, params.n_row2, params.n_col2, labels_svmreg, coef_class, &intercept_class, fit_intercept, params.batch_size, epochs, lr_type, lr, power_t, loss, pen, alpha, l1_ratio, shuffle, tol, n_iter_no_change, stream); sgdPredictBinaryClass(handle, data_svmreg_test, params.n_row2, params.n_col2, coef_class, intercept_class, pred_svm, loss, stream); CUDA_CHECK(cudaFree(coef_class)); } void SetUp() override { CUDA_CHECK(cudaStreamCreate(&stream)); handle.set_stream(stream); linearRegressionTest(); logisticRegressionTest(); svmTest(); } void TearDown() override { CUDA_CHECK(cudaFree(data)); CUDA_CHECK(cudaFree(labels)); CUDA_CHECK(cudaFree(coef)); CUDA_CHECK(cudaFree(coef_ref)); CUDA_CHECK(cudaFree(coef2)); CUDA_CHECK(cudaFree(coef2_ref)); CUDA_CHECK(cudaFree(data_logreg)); CUDA_CHECK(cudaFree(data_logreg_test)); CUDA_CHECK(cudaFree(labels_logreg)); CUDA_CHECK(cudaFree(data_svmreg)); CUDA_CHECK(cudaFree(data_svmreg_test)); CUDA_CHECK(cudaFree(labels_svmreg)); CUDA_CHECK(cudaFree(pred_svm)); CUDA_CHECK(cudaFree(pred_svm_ref)); CUDA_CHECK(cudaFree(pred_log)); CUDA_CHECK(cudaFree(pred_log_ref)); CUDA_CHECK(cudaStreamDestroy(stream)); } protected: SgdInputs<T> params; T *data, *labels, *coef, *coef_ref; T *coef2, *coef2_ref; T *data_logreg, *data_logreg_test, *labels_logreg; T *data_svmreg, *data_svmreg_test, *labels_svmreg; T *pred_svm, *pred_svm_ref, *pred_log, *pred_log_ref; T intercept, intercept2; cudaStream_t stream; raft::handle_t handle; }; const std::vector<SgdInputs<float>> inputsf2 = {{0.01f, 4, 2, 4, 3, 2}}; const std::vector<SgdInputs<double>> inputsd2 = {{0.01, 4, 2, 4, 3, 2}}; typedef SgdTest<float> SgdTestF; TEST_P(SgdTestF, Fit) { ASSERT_TRUE(devArrMatch(coef_ref, coef, params.n_col, CompareApproxAbs<float>(params.tol))); ASSERT_TRUE(devArrMatch(coef2_ref, coef2, params.n_col, CompareApproxAbs<float>(params.tol))); ASSERT_TRUE(devArrMatch(pred_log_ref, pred_log, params.n_row, CompareApproxAbs<float>(params.tol))); ASSERT_TRUE(devArrMatch(pred_svm_ref, pred_svm, params.n_row, CompareApproxAbs<float>(params.tol))); } typedef SgdTest<double> SgdTestD; TEST_P(SgdTestD, Fit) { ASSERT_TRUE(devArrMatch(coef_ref, coef, params.n_col, CompareApproxAbs<double>(params.tol))); ASSERT_TRUE(devArrMatch(coef2_ref, coef2, params.n_col, CompareApproxAbs<double>(params.tol))); ASSERT_TRUE(devArrMatch(pred_log_ref, pred_log, params.n_row, CompareApproxAbs<double>(params.tol))); ASSERT_TRUE(devArrMatch(pred_svm_ref, pred_svm, params.n_row, CompareApproxAbs<double>(params.tol))); } INSTANTIATE_TEST_CASE_P(SgdTests, SgdTestF, ::testing::ValuesIn(inputsf2)); INSTANTIATE_TEST_CASE_P(SgdTests, SgdTestD, ::testing::ValuesIn(inputsd2)); } // namespace Solver } // end namespace ML
a1c847c59720eda0ac1e506dd74a7c11a735b5c9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "lab3.h" #include <cstdio> //#include "Timer.h" #include <iostream> using namespace std; __device__ __host__ int CeilDiv(int a, int b) { return (a-1)/b + 1; } __device__ __host__ int CeilAlign(int a, int b) { return CeilDiv(a, b) * b; } __global__ void SimpleClone( const float *background, const float *target, const float *mask, float *output, const int wb, const int hb, const int wt, const int ht, const int oy, const int ox ) { const int yt = blockIdx.y * blockDim.y + threadIdx.y; const int xt = blockIdx.x * blockDim.x + threadIdx.x; const int curt = wt*yt+xt; if (yt < ht and xt < wt and mask[curt] > 127.0f) { const int yb = oy+yt, xb = ox+xt; const int curb = wb*yb+xb; if (0 <= yb and yb < hb and 0 <= xb and xb < wb) { output[curb*3+0] = target[curt*3+0]; output[curb*3+1] = target[curt*3+1]; output[curb*3+2] = target[curt*3+2]; } } } // ref: https://github.com/gdoggg2032/GPGPU_Programming_2016S/blob/master/lab3/lab3.cu __global__ void CalculateFixed( const float *background, const float *target, const float *mask, float *fixed, const int wb, const int hb, const int wt, const int ht, const int oy, const int ox ) { const int yt = blockIdx.y * blockDim.y + threadIdx.y; const int xt = blockIdx.x * blockDim.x + threadIdx.x; if (yt < ht and xt < wt) { const int curt = wt*yt+xt; const int yb = oy+yt, xb = ox+xt; if (0 < yb and yb < hb and 0 < xb and xb < wb) { const int curb = wb*yb+xb; fixed[curt*3+0] = 0; fixed[curt*3+1] = 0; fixed[curt*3+2] = 0; if (0 < yt) { fixed[curt*3+0] += target[curt*3+0]-target[(curt-wt)*3+0]; fixed[curt*3+1] += target[curt*3+1]-target[(curt-wt)*3+1]; fixed[curt*3+2] += target[curt*3+2]-target[(curt-wt)*3+2]; } if(yt < ht-1) { fixed[curt*3+0] += target[curt*3+0]-target[(curt+wt)*3+0]; fixed[curt*3+1] += target[curt*3+1]-target[(curt+wt)*3+1]; fixed[curt*3+2] += target[curt*3+2]-target[(curt+wt)*3+2]; } if(0 < xt) { fixed[curt*3+0] += target[curt*3+0]-target[(curt-1)*3+0]; fixed[curt*3+1] += target[curt*3+1]-target[(curt-1)*3+1]; fixed[curt*3+2] += target[curt*3+2]-target[(curt-1)*3+2]; } if(xt < wt-1) { fixed[curt*3+0] += target[curt*3+0]-target[(curt+1)*3+0]; fixed[curt*3+1] += target[curt*3+1]-target[(curt+1)*3+1]; fixed[curt*3+2] += target[curt*3+2]-target[(curt+1)*3+2]; } // 0 < yb and // yb < hb-1 and // 0 < xb and // xb < wb-1 and // yt == 0 || // yt == ht-1 || // xt == 0 || // xt == wt-1 || if(yt == 0 || mask[curt-wt] < 127.0f) { fixed[curt*3+0] += background[(curb-wb)*3+0]; fixed[curt*3+1] += background[(curb-wb)*3+1]; fixed[curt*3+2] += background[(curb-wb)*3+2]; } if(yt == ht-1 || mask[curt+wt] < 127.0f) { fixed[curt*3+0] += background[(curb+wb)*3+0]; fixed[curt*3+1] += background[(curb+wb)*3+1]; fixed[curt*3+2] += background[(curb+wb)*3+2]; } if(xt == 0 || mask[curt-1] < 127.0f) { fixed[curt*3+0] += background[(curb-1)*3+0]; fixed[curt*3+1] += background[(curb-1)*3+1]; fixed[curt*3+2] += background[(curb-1)*3+2]; } if(xt == wt-1 || mask[curt+1] < 127.0f) { fixed[curt*3+0] += background[(curb+1)*3+0]; fixed[curt*3+1] += background[(curb+1)*3+1]; fixed[curt*3+2] += background[(curb+1)*3+2]; } if( mask[curt] < 127.0f ) { fixed[curt*3+0] = background[curb*3+0]; fixed[curt*3+1] = background[curb*3+1]; fixed[curt*3+2] = background[curb*3+2]; } } } } __global__ void PoissonImageCloningIteration( const float *background, float *fixed, const float *mask, float *buf1, float *buf2, // buf1 -> buf2 int wt, int ht ) { const int yt = blockIdx.y * blockDim.y + threadIdx.y; const int xt = blockIdx.x * blockDim.x + threadIdx.x; if (yt < ht and xt < wt) { const int curt = wt*yt+xt; if(mask[curt] > 127.0f) { buf2[curt*3+0] = 0; buf2[curt*3+1] = 0; buf2[curt*3+2] = 0; if (0 < yt and mask[curt-wt] > 127.0f) { buf2[curt*3+0] += buf1[(curt-wt)*3+0]; buf2[curt*3+1] += buf1[(curt-wt)*3+1]; buf2[curt*3+2] += buf1[(curt-wt)*3+2]; } if(yt+1 < ht and mask[curt+wt] > 127.0f) { buf2[curt*3+0] += buf1[(curt+wt)*3+0]; buf2[curt*3+1] += buf1[(curt+wt)*3+1]; buf2[curt*3+2] += buf1[(curt+wt)*3+2]; } if(0 < xt and mask[curt-1] > 127.0f) { buf2[curt*3+0] += buf1[(curt-1)*3+0]; buf2[curt*3+1] += buf1[(curt-1)*3+1]; buf2[curt*3+2] += buf1[(curt-1)*3+2]; } if(xt+1 < wt and mask[curt+1] > 127.0f) { buf2[curt*3+0] += buf1[(curt+1)*3+0]; buf2[curt*3+1] += buf1[(curt+1)*3+1]; buf2[curt*3+2] += buf1[(curt+1)*3+2]; } buf2[curt*3+0] += fixed[curt*3+0]; buf2[curt*3+1] += fixed[curt*3+1]; buf2[curt*3+2] += fixed[curt*3+2]; buf2[curt*3+0] /= 4; buf2[curt*3+1] /= 4; buf2[curt*3+2] /= 4; } else { buf2[curt*3+0] = fixed[curt*3+0]; buf2[curt*3+1] = fixed[curt*3+1]; buf2[curt*3+2] = fixed[curt*3+2]; } } } __global__ void Downsample( const float *original, float *sampled, const int wt, const int ht, int scale ) { const int yt = blockIdx.y * blockDim.y + threadIdx.y; const int xt = blockIdx.x * blockDim.x + threadIdx.x; if (yt < ht/scale and xt < wt/scale) { const int curt = wt*yt+xt; sampled[curt] = original[curt*scale]; } } __global__ void Upsample( float *sampled, float *original, const int wt, const int ht, int scale ) { const int yt = blockIdx.y * blockDim.y + threadIdx.y; const int xt = blockIdx.x * blockDim.x + threadIdx.x; if (yt < ht and xt < wt) { const int curt = wt*yt+xt; const int curs = wt*yt/scale+xt/scale; original[curt] = sampled[curs]; } } void PoissonImageCloning( const float *background, const float *target, const float *mask, float *output, const int wb, const int hb, const int wt, const int ht, const int oy, const int ox ) { //Timer timer_count_position; //timer_count_position.Start(); // set up float *fixed, *buf1, *buf2; hipMalloc(&fixed, 3*wt*ht*sizeof(float)); hipMalloc(&buf1, 3*wt*ht*sizeof(float)); hipMalloc(&buf2, 3*wt*ht*sizeof(float)); float *fixed_scaled, *buf1_scaled, *buf2_scaled, *mask_scaled; hipMalloc(&fixed_scaled, 3*wt*ht*sizeof(float)); hipMalloc(&buf1_scaled, 3*wt*ht*sizeof(float)); hipMalloc(&buf2_scaled, 3*wt*ht*sizeof(float)); hipMalloc(&mask_scaled, wt*ht*sizeof(float)); // initialize the iteration dim3 gdim(CeilDiv(wt,32), CeilDiv(ht,16)), bdim(32,16); /* float* fixed = new float[3*wt*ht*sizeof(float)]; float* buf1 = new float[3*wt*ht*sizeof(float)]; float* buf2 = new float[3*wt*ht*sizeof(float)]; */ //printf("debug0\n"); hipLaunchKernelGGL(( CalculateFixed), dim3(gdim), dim3(bdim), 0, 0, background, target, mask, fixed, wb, hb, wt, ht, oy, ox ); hipMemcpy(buf1, target, sizeof(float)*3*wt*ht, hipMemcpyDeviceToDevice); /* for(int i=0; i<3*wt*ht; i++) buf1[i] = target[i]; */ //printf("debug1\n"); int level = 8; //int iter_num = 5000; //dim3 gdim(CeilDiv(wt, 32*level), CeilDiv(ht, 16*level)), bdim(32, 16); //?????????????????????????????????????????????????? hipLaunchKernelGGL(( Downsample), dim3(gdim), dim3(bdim), 0, 0, mask, mask_scaled, wt, ht, level); hipLaunchKernelGGL(( Downsample), dim3(gdim), dim3(bdim), 0, 0, fixed, fixed_scaled, wt, ht, level); hipLaunchKernelGGL(( Downsample), dim3(gdim), dim3(bdim), 0, 0, buf1, buf1_scaled, wt, ht, level); // iterate for (int i = 0; i < 235; ++i) { hipLaunchKernelGGL(( PoissonImageCloningIteration), dim3(gdim), dim3(bdim), 0, 0, background, fixed_scaled, mask_scaled, buf1_scaled, buf2, wt/level, ht/level ); hipLaunchKernelGGL(( PoissonImageCloningIteration), dim3(gdim), dim3(bdim), 0, 0, background, fixed_scaled, mask_scaled, buf2, buf1_scaled, wt/level, ht/level ); } level /= 2; //dim3 gdim(CeilDiv(wt, 32*level), CeilDiv(ht, 16*level)), bdim(32, 16); hipLaunchKernelGGL(( Downsample), dim3(gdim), dim3(bdim), 0, 0, mask, mask_scaled, wt, ht, level); hipLaunchKernelGGL(( Downsample), dim3(gdim), dim3(bdim), 0, 0, fixed, fixed_scaled, wt, ht, level); hipLaunchKernelGGL(( Upsample), dim3(gdim), dim3(bdim), 0, 0, buf1, buf1_scaled, wt, ht, 2); for (int i = 0; i < 941; ++i) { hipLaunchKernelGGL(( PoissonImageCloningIteration), dim3(gdim), dim3(bdim), 0, 0, background, fixed_scaled, mask_scaled, buf1_scaled, buf2, wt/level, ht/level ); hipLaunchKernelGGL(( PoissonImageCloningIteration), dim3(gdim), dim3(bdim), 0, 0, background, fixed_scaled, mask_scaled, buf2, buf1_scaled, wt/level, ht/level ); } level /= 2; //dim3 gdim(CeilDiv(wt, 32*level), CeilDiv(ht, 16*level)), bdim(32, 16); hipLaunchKernelGGL(( Downsample), dim3(gdim), dim3(bdim), 0, 0, mask, mask_scaled, wt, ht, level); hipLaunchKernelGGL(( Downsample), dim3(gdim), dim3(bdim), 0, 0, fixed, fixed_scaled, wt, ht, level); hipLaunchKernelGGL(( Upsample), dim3(gdim), dim3(bdim), 0, 0, buf1, buf1_scaled, wt, ht, 2); for (int i = 0; i < 3764; ++i) { hipLaunchKernelGGL(( PoissonImageCloningIteration), dim3(gdim), dim3(bdim), 0, 0, background, fixed_scaled, mask_scaled, buf1_scaled, buf2, wt/level, ht/level ); hipLaunchKernelGGL(( PoissonImageCloningIteration), dim3(gdim), dim3(bdim), 0, 0, background, fixed_scaled, mask_scaled, buf2, buf1_scaled, wt/level, ht/level ); } hipLaunchKernelGGL(( Upsample), dim3(gdim), dim3(bdim), 0, 0, buf1, buf1_scaled, wt, ht, 2); for (int i = 0; i < 15060; ++i) { hipLaunchKernelGGL(( PoissonImageCloningIteration), dim3(gdim), dim3(bdim), 0, 0, background, fixed, mask, buf1, buf2, wt, ht ); hipLaunchKernelGGL(( PoissonImageCloningIteration), dim3(gdim), dim3(bdim), 0, 0, background, fixed, mask, buf2, buf1, wt, ht ); } // copy the image back hipMemcpy(output, background, wb*hb*sizeof(float)*3, hipMemcpyDeviceToDevice); /* for(int i=0; i<3*wb*hb; i++) output[i] = background[i]; */ hipLaunchKernelGGL(( SimpleClone), dim3(gdim), dim3(bdim), 0, 0, background, buf1, mask, output, wb, hb, wt, ht, oy, ox ); /* clone( background, buf1, mask, output, wb, hb, wt, ht, oy, ox ); */ // clean up hipFree(fixed); hipFree(buf1); hipFree(buf2); hipFree(fixed_scaled); hipFree(buf1_scaled); hipFree(buf2_scaled); hipFree(mask_scaled); //timer_count_position.Pause(); //printf_timer(timer_count_position); }
a1c847c59720eda0ac1e506dd74a7c11a735b5c9.cu
#include "lab3.h" #include <cstdio> //#include "Timer.h" #include <iostream> using namespace std; __device__ __host__ int CeilDiv(int a, int b) { return (a-1)/b + 1; } __device__ __host__ int CeilAlign(int a, int b) { return CeilDiv(a, b) * b; } __global__ void SimpleClone( const float *background, const float *target, const float *mask, float *output, const int wb, const int hb, const int wt, const int ht, const int oy, const int ox ) { const int yt = blockIdx.y * blockDim.y + threadIdx.y; const int xt = blockIdx.x * blockDim.x + threadIdx.x; const int curt = wt*yt+xt; if (yt < ht and xt < wt and mask[curt] > 127.0f) { const int yb = oy+yt, xb = ox+xt; const int curb = wb*yb+xb; if (0 <= yb and yb < hb and 0 <= xb and xb < wb) { output[curb*3+0] = target[curt*3+0]; output[curb*3+1] = target[curt*3+1]; output[curb*3+2] = target[curt*3+2]; } } } // ref: https://github.com/gdoggg2032/GPGPU_Programming_2016S/blob/master/lab3/lab3.cu __global__ void CalculateFixed( const float *background, const float *target, const float *mask, float *fixed, const int wb, const int hb, const int wt, const int ht, const int oy, const int ox ) { const int yt = blockIdx.y * blockDim.y + threadIdx.y; const int xt = blockIdx.x * blockDim.x + threadIdx.x; if (yt < ht and xt < wt) { const int curt = wt*yt+xt; const int yb = oy+yt, xb = ox+xt; if (0 < yb and yb < hb and 0 < xb and xb < wb) { const int curb = wb*yb+xb; fixed[curt*3+0] = 0; fixed[curt*3+1] = 0; fixed[curt*3+2] = 0; if (0 < yt) { fixed[curt*3+0] += target[curt*3+0]-target[(curt-wt)*3+0]; fixed[curt*3+1] += target[curt*3+1]-target[(curt-wt)*3+1]; fixed[curt*3+2] += target[curt*3+2]-target[(curt-wt)*3+2]; } if(yt < ht-1) { fixed[curt*3+0] += target[curt*3+0]-target[(curt+wt)*3+0]; fixed[curt*3+1] += target[curt*3+1]-target[(curt+wt)*3+1]; fixed[curt*3+2] += target[curt*3+2]-target[(curt+wt)*3+2]; } if(0 < xt) { fixed[curt*3+0] += target[curt*3+0]-target[(curt-1)*3+0]; fixed[curt*3+1] += target[curt*3+1]-target[(curt-1)*3+1]; fixed[curt*3+2] += target[curt*3+2]-target[(curt-1)*3+2]; } if(xt < wt-1) { fixed[curt*3+0] += target[curt*3+0]-target[(curt+1)*3+0]; fixed[curt*3+1] += target[curt*3+1]-target[(curt+1)*3+1]; fixed[curt*3+2] += target[curt*3+2]-target[(curt+1)*3+2]; } // 0 < yb and // yb < hb-1 and // 0 < xb and // xb < wb-1 and // yt == 0 || // yt == ht-1 || // xt == 0 || // xt == wt-1 || if(yt == 0 || mask[curt-wt] < 127.0f) { fixed[curt*3+0] += background[(curb-wb)*3+0]; fixed[curt*3+1] += background[(curb-wb)*3+1]; fixed[curt*3+2] += background[(curb-wb)*3+2]; } if(yt == ht-1 || mask[curt+wt] < 127.0f) { fixed[curt*3+0] += background[(curb+wb)*3+0]; fixed[curt*3+1] += background[(curb+wb)*3+1]; fixed[curt*3+2] += background[(curb+wb)*3+2]; } if(xt == 0 || mask[curt-1] < 127.0f) { fixed[curt*3+0] += background[(curb-1)*3+0]; fixed[curt*3+1] += background[(curb-1)*3+1]; fixed[curt*3+2] += background[(curb-1)*3+2]; } if(xt == wt-1 || mask[curt+1] < 127.0f) { fixed[curt*3+0] += background[(curb+1)*3+0]; fixed[curt*3+1] += background[(curb+1)*3+1]; fixed[curt*3+2] += background[(curb+1)*3+2]; } if( mask[curt] < 127.0f ) { fixed[curt*3+0] = background[curb*3+0]; fixed[curt*3+1] = background[curb*3+1]; fixed[curt*3+2] = background[curb*3+2]; } } } } __global__ void PoissonImageCloningIteration( const float *background, float *fixed, const float *mask, float *buf1, float *buf2, // buf1 -> buf2 int wt, int ht ) { const int yt = blockIdx.y * blockDim.y + threadIdx.y; const int xt = blockIdx.x * blockDim.x + threadIdx.x; if (yt < ht and xt < wt) { const int curt = wt*yt+xt; if(mask[curt] > 127.0f) { buf2[curt*3+0] = 0; buf2[curt*3+1] = 0; buf2[curt*3+2] = 0; if (0 < yt and mask[curt-wt] > 127.0f) { buf2[curt*3+0] += buf1[(curt-wt)*3+0]; buf2[curt*3+1] += buf1[(curt-wt)*3+1]; buf2[curt*3+2] += buf1[(curt-wt)*3+2]; } if(yt+1 < ht and mask[curt+wt] > 127.0f) { buf2[curt*3+0] += buf1[(curt+wt)*3+0]; buf2[curt*3+1] += buf1[(curt+wt)*3+1]; buf2[curt*3+2] += buf1[(curt+wt)*3+2]; } if(0 < xt and mask[curt-1] > 127.0f) { buf2[curt*3+0] += buf1[(curt-1)*3+0]; buf2[curt*3+1] += buf1[(curt-1)*3+1]; buf2[curt*3+2] += buf1[(curt-1)*3+2]; } if(xt+1 < wt and mask[curt+1] > 127.0f) { buf2[curt*3+0] += buf1[(curt+1)*3+0]; buf2[curt*3+1] += buf1[(curt+1)*3+1]; buf2[curt*3+2] += buf1[(curt+1)*3+2]; } buf2[curt*3+0] += fixed[curt*3+0]; buf2[curt*3+1] += fixed[curt*3+1]; buf2[curt*3+2] += fixed[curt*3+2]; buf2[curt*3+0] /= 4; buf2[curt*3+1] /= 4; buf2[curt*3+2] /= 4; } else { buf2[curt*3+0] = fixed[curt*3+0]; buf2[curt*3+1] = fixed[curt*3+1]; buf2[curt*3+2] = fixed[curt*3+2]; } } } __global__ void Downsample( const float *original, float *sampled, const int wt, const int ht, int scale ) { const int yt = blockIdx.y * blockDim.y + threadIdx.y; const int xt = blockIdx.x * blockDim.x + threadIdx.x; if (yt < ht/scale and xt < wt/scale) { const int curt = wt*yt+xt; sampled[curt] = original[curt*scale]; } } __global__ void Upsample( float *sampled, float *original, const int wt, const int ht, int scale ) { const int yt = blockIdx.y * blockDim.y + threadIdx.y; const int xt = blockIdx.x * blockDim.x + threadIdx.x; if (yt < ht and xt < wt) { const int curt = wt*yt+xt; const int curs = wt*yt/scale+xt/scale; original[curt] = sampled[curs]; } } void PoissonImageCloning( const float *background, const float *target, const float *mask, float *output, const int wb, const int hb, const int wt, const int ht, const int oy, const int ox ) { //Timer timer_count_position; //timer_count_position.Start(); // set up float *fixed, *buf1, *buf2; cudaMalloc(&fixed, 3*wt*ht*sizeof(float)); cudaMalloc(&buf1, 3*wt*ht*sizeof(float)); cudaMalloc(&buf2, 3*wt*ht*sizeof(float)); float *fixed_scaled, *buf1_scaled, *buf2_scaled, *mask_scaled; cudaMalloc(&fixed_scaled, 3*wt*ht*sizeof(float)); cudaMalloc(&buf1_scaled, 3*wt*ht*sizeof(float)); cudaMalloc(&buf2_scaled, 3*wt*ht*sizeof(float)); cudaMalloc(&mask_scaled, wt*ht*sizeof(float)); // initialize the iteration dim3 gdim(CeilDiv(wt,32), CeilDiv(ht,16)), bdim(32,16); /* float* fixed = new float[3*wt*ht*sizeof(float)]; float* buf1 = new float[3*wt*ht*sizeof(float)]; float* buf2 = new float[3*wt*ht*sizeof(float)]; */ //printf("debug0\n"); CalculateFixed<<<gdim, bdim>>>( background, target, mask, fixed, wb, hb, wt, ht, oy, ox ); cudaMemcpy(buf1, target, sizeof(float)*3*wt*ht, cudaMemcpyDeviceToDevice); /* for(int i=0; i<3*wt*ht; i++) buf1[i] = target[i]; */ //printf("debug1\n"); int level = 8; //int iter_num = 5000; //dim3 gdim(CeilDiv(wt, 32*level), CeilDiv(ht, 16*level)), bdim(32, 16); //?????????????????????????????????????????????????? Downsample<<<gdim, bdim>>>(mask, mask_scaled, wt, ht, level); Downsample<<<gdim, bdim>>>(fixed, fixed_scaled, wt, ht, level); Downsample<<<gdim, bdim>>>(buf1, buf1_scaled, wt, ht, level); // iterate for (int i = 0; i < 235; ++i) { PoissonImageCloningIteration<<<gdim, bdim>>>( background, fixed_scaled, mask_scaled, buf1_scaled, buf2, wt/level, ht/level ); PoissonImageCloningIteration<<<gdim, bdim>>>( background, fixed_scaled, mask_scaled, buf2, buf1_scaled, wt/level, ht/level ); } level /= 2; //dim3 gdim(CeilDiv(wt, 32*level), CeilDiv(ht, 16*level)), bdim(32, 16); Downsample<<<gdim, bdim>>>(mask, mask_scaled, wt, ht, level); Downsample<<<gdim, bdim>>>(fixed, fixed_scaled, wt, ht, level); Upsample<<<gdim, bdim>>>(buf1, buf1_scaled, wt, ht, 2); for (int i = 0; i < 941; ++i) { PoissonImageCloningIteration<<<gdim, bdim>>>( background, fixed_scaled, mask_scaled, buf1_scaled, buf2, wt/level, ht/level ); PoissonImageCloningIteration<<<gdim, bdim>>>( background, fixed_scaled, mask_scaled, buf2, buf1_scaled, wt/level, ht/level ); } level /= 2; //dim3 gdim(CeilDiv(wt, 32*level), CeilDiv(ht, 16*level)), bdim(32, 16); Downsample<<<gdim, bdim>>>(mask, mask_scaled, wt, ht, level); Downsample<<<gdim, bdim>>>(fixed, fixed_scaled, wt, ht, level); Upsample<<<gdim, bdim>>>(buf1, buf1_scaled, wt, ht, 2); for (int i = 0; i < 3764; ++i) { PoissonImageCloningIteration<<<gdim, bdim>>>( background, fixed_scaled, mask_scaled, buf1_scaled, buf2, wt/level, ht/level ); PoissonImageCloningIteration<<<gdim, bdim>>>( background, fixed_scaled, mask_scaled, buf2, buf1_scaled, wt/level, ht/level ); } Upsample<<<gdim, bdim>>>(buf1, buf1_scaled, wt, ht, 2); for (int i = 0; i < 15060; ++i) { PoissonImageCloningIteration<<<gdim, bdim>>>( background, fixed, mask, buf1, buf2, wt, ht ); PoissonImageCloningIteration<<<gdim, bdim>>>( background, fixed, mask, buf2, buf1, wt, ht ); } // copy the image back cudaMemcpy(output, background, wb*hb*sizeof(float)*3, cudaMemcpyDeviceToDevice); /* for(int i=0; i<3*wb*hb; i++) output[i] = background[i]; */ SimpleClone<<<gdim, bdim>>>( background, buf1, mask, output, wb, hb, wt, ht, oy, ox ); /* clone( background, buf1, mask, output, wb, hb, wt, ht, oy, ox ); */ // clean up cudaFree(fixed); cudaFree(buf1); cudaFree(buf2); cudaFree(fixed_scaled); cudaFree(buf1_scaled); cudaFree(buf2_scaled); cudaFree(mask_scaled); //timer_count_position.Pause(); //printf_timer(timer_count_position); }
0ae60475b3512056a58f8ee8266794760cd2b5e0.hip
// !!! This is a file automatically generated by hipify!!! #include <cudnn.h> #include <stdio.h> #include <hip/hip_runtime.h> #include <malloc.h> #include <cstdlib> #include <time.h> #include <iostream> #include <sys/types.h> #include <errno.h> #include <vector> #include <fstream> #include <string> #include <omp.h> #define TH 1 #define TW 7 #define TC 16 #define C 64 #define N 32 #define H 224 #define W 224 #define TCS ((C-1)/TC + 1) #define THS ((H-1)/TH + 1) #define TWS ((W-1)/TW+1) #define WPAD (TWS*TW + 2) #define R 3 #define S 3 using namespace std; #define checkCUDNN(expression) \ { \ cudnnStatus_t status = (expression); \ if (status != CUDNN_STATUS_SUCCESS) { \ std::cerr << "Error on line " << __LINE__ << ": " \ << cudnnGetErrorString(status) << std::endl; \ std::exit(EXIT_FAILURE); \ } \ } inline void chkerr(hipError_t code) { if (code != hipSuccess) { std::cerr << "ERROR!!!:" << hipGetErrorString(code) <<endl; exit(-1); } } extern "C" __global__ void default_function_kernel0(float* __restrict__ data, float* __restrict__ kernel, float* __restrict__ compute) { float compute_local[128]; __shared__ float pad_temp_shared[2088]; __shared__ float kernel_shared[288]; float pad_temp_shared_local[48]; float kernel_shared_local[12]; for (int ff_c_init = 0; ff_c_init < 2; ++ff_c_init) { for (int yy_c_init = 0; yy_c_init < 4; ++yy_c_init) { compute_local[(((ff_c_init * 4) + yy_c_init))] = 0.000000e+00f; compute_local[((((ff_c_init * 4) + yy_c_init) + 64))] = 0.000000e+00f; compute_local[((((ff_c_init * 4) + yy_c_init) + 8))] = 0.000000e+00f; compute_local[((((ff_c_init * 4) + yy_c_init) + 72))] = 0.000000e+00f; compute_local[((((ff_c_init * 4) + yy_c_init) + 16))] = 0.000000e+00f; compute_local[((((ff_c_init * 4) + yy_c_init) + 80))] = 0.000000e+00f; compute_local[((((ff_c_init * 4) + yy_c_init) + 24))] = 0.000000e+00f; compute_local[((((ff_c_init * 4) + yy_c_init) + 88))] = 0.000000e+00f; compute_local[((((ff_c_init * 4) + yy_c_init) + 32))] = 0.000000e+00f; compute_local[((((ff_c_init * 4) + yy_c_init) + 96))] = 0.000000e+00f; compute_local[((((ff_c_init * 4) + yy_c_init) + 40))] = 0.000000e+00f; compute_local[((((ff_c_init * 4) + yy_c_init) + 104))] = 0.000000e+00f; compute_local[((((ff_c_init * 4) + yy_c_init) + 48))] = 0.000000e+00f; compute_local[((((ff_c_init * 4) + yy_c_init) + 112))] = 0.000000e+00f; compute_local[((((ff_c_init * 4) + yy_c_init) + 56))] = 0.000000e+00f; compute_local[((((ff_c_init * 4) + yy_c_init) + 120))] = 0.000000e+00f; } } for (int rc_outer = 0; rc_outer < 32; ++rc_outer) { __syncthreads(); for (int ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner = 0; ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner < 19; ++ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner) { if (((((int)threadIdx.z) * 29) + ((((((int)threadIdx.y) * 38) + (((int)threadIdx.x) * 19)) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner) / 18)) < 116) { if (((((((int)threadIdx.z) * 522) + (((int)threadIdx.y) * 38)) + (((int)threadIdx.x) * 19)) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner) < 2088) { if ((((((int)threadIdx.y) * 38) + (((int)threadIdx.x) * 19)) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner) < 522) { pad_temp_shared[(((((((int)threadIdx.z) * 522) + (((int)threadIdx.y) * 38)) + (((int)threadIdx.x) * 19)) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner))] = (((((1 <= ((((int)blockIdx.y) * 56) + (((((int)threadIdx.z) * 29) + ((((((int)threadIdx.y) * 38) + (((int)threadIdx.x) * 19)) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner) / 18)) % 58))) && (((((int)blockIdx.y) * 56) + (((((int)threadIdx.z) * 29) + ((((((int)threadIdx.y) * 38) + (((int)threadIdx.x) * 19)) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner) / 18)) % 58)) < 225)) && (1 <= ((((int)blockIdx.x) * 16) + ((((((int)threadIdx.y) * 38) + (((int)threadIdx.x) * 19)) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner) % 18)))) && (((((int)blockIdx.x) * 16) + ((((((int)threadIdx.y) * 38) + (((int)threadIdx.x) * 19)) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner) % 18)) < 225)) ? data[((((((((rc_outer * 100352) + ((((((int)threadIdx.z) * 29) + ((((((int)threadIdx.y) * 38) + (((int)threadIdx.x) * 19)) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner) / 18)) / 58) * 50176)) + (((int)blockIdx.y) * 12544)) + ((((((int)threadIdx.z) * 29) + ((((((int)threadIdx.y) * 38) + (((int)threadIdx.x) * 19)) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner) / 18)) % 58) * 224)) + (((int)blockIdx.x) * 16)) + ((((((int)threadIdx.y) * 38) + (((int)threadIdx.x) * 19)) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner) % 18)) - 225))] : 0.000000e+00f); } } } } for (int ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner1 = 0; ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner1 < 3; ++ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner1) { if (((((int)threadIdx.z) * 4) + (((((int)threadIdx.y) * 2) + ((int)threadIdx.x)) / 6)) < 16) { if (((((int)threadIdx.z) * 8) + (((((int)threadIdx.y) * 2) + ((int)threadIdx.x)) / 3)) < 32) { if ((((((int)threadIdx.z) * 24) + (((int)threadIdx.y) * 2)) + ((int)threadIdx.x)) < 96) { if (((((((int)threadIdx.z) * 72) + (((int)threadIdx.y) * 6)) + (((int)threadIdx.x) * 3)) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner1) < 288) { if ((((((int)threadIdx.y) * 6) + (((int)threadIdx.x) * 3)) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner1) < 72) { if ((((((int)blockIdx.z) * 16) + (((int)threadIdx.z) * 4)) + (((((int)threadIdx.y) * 2) + ((int)threadIdx.x)) / 6)) < 32) { kernel_shared[(((((((int)threadIdx.z) * 72) + (((int)threadIdx.y) * 6)) + (((int)threadIdx.x) * 3)) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner1))] = kernel[(((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 2304)) + ((((((int)threadIdx.y) * 2) + ((int)threadIdx.x)) / 6) * 576)) + (rc_outer * 18)) + ((((((int)threadIdx.y) * 2) + ((int)threadIdx.x)) % 6) * 3)) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner1))]; } } } } } } } __syncthreads(); for (int rc_inner_outer = 0; rc_inner_outer < 2; ++rc_inner_outer) { for (int rx_inner_outer = 0; rx_inner_outer < 3; ++rx_inner_outer) { for (int ax2 = 0; ax2 < 6; ++ax2) { pad_temp_shared_local[(ax2)] = pad_temp_shared[((((((rc_inner_outer * 1044) + (((int)threadIdx.y) * 72)) + (ax2 * 18)) + ((int)threadIdx.x)) + rx_inner_outer))]; pad_temp_shared_local[((ax2 + 6))] = pad_temp_shared[(((((((rc_inner_outer * 1044) + (((int)threadIdx.y) * 72)) + (ax2 * 18)) + ((int)threadIdx.x)) + rx_inner_outer) + 2))]; pad_temp_shared_local[((ax2 + 12))] = pad_temp_shared[(((((((rc_inner_outer * 1044) + (((int)threadIdx.y) * 72)) + (ax2 * 18)) + ((int)threadIdx.x)) + rx_inner_outer) + 4))]; pad_temp_shared_local[((ax2 + 18))] = pad_temp_shared[(((((((rc_inner_outer * 1044) + (((int)threadIdx.y) * 72)) + (ax2 * 18)) + ((int)threadIdx.x)) + rx_inner_outer) + 6))]; pad_temp_shared_local[((ax2 + 24))] = pad_temp_shared[(((((((rc_inner_outer * 1044) + (((int)threadIdx.y) * 72)) + (ax2 * 18)) + ((int)threadIdx.x)) + rx_inner_outer) + 8))]; pad_temp_shared_local[((ax2 + 30))] = pad_temp_shared[(((((((rc_inner_outer * 1044) + (((int)threadIdx.y) * 72)) + (ax2 * 18)) + ((int)threadIdx.x)) + rx_inner_outer) + 10))]; pad_temp_shared_local[((ax2 + 36))] = pad_temp_shared[(((((((rc_inner_outer * 1044) + (((int)threadIdx.y) * 72)) + (ax2 * 18)) + ((int)threadIdx.x)) + rx_inner_outer) + 12))]; pad_temp_shared_local[((ax2 + 42))] = pad_temp_shared[(((((((rc_inner_outer * 1044) + (((int)threadIdx.y) * 72)) + (ax2 * 18)) + ((int)threadIdx.x)) + rx_inner_outer) + 14))]; } for (int ax0 = 0; ax0 < 2; ++ax0) { for (int ax21 = 0; ax21 < 3; ++ax21) { kernel_shared_local[(((ax0 * 3) + ax21))] = kernel_shared[((((((((int)threadIdx.z) * 36) + (ax0 * 18)) + (rc_inner_outer * 9)) + (ax21 * 3)) + rx_inner_outer))]; kernel_shared_local[((((ax0 * 3) + ax21) + 6))] = kernel_shared[(((((((((int)threadIdx.z) * 36) + (ax0 * 18)) + (rc_inner_outer * 9)) + (ax21 * 3)) + rx_inner_outer) + 144))]; } } for (int ry_inner_inner = 0; ry_inner_inner < 3; ++ry_inner_inner) { for (int ff_c = 0; ff_c < 2; ++ff_c) { for (int yy_c = 0; yy_c < 4; ++yy_c) { compute_local[(((ff_c * 4) + yy_c))] = (compute_local[(((ff_c * 4) + yy_c))] + (pad_temp_shared_local[((yy_c + ry_inner_inner))] * kernel_shared_local[(((ff_c * 3) + ry_inner_inner))])); compute_local[((((ff_c * 4) + yy_c) + 64))] = (compute_local[((((ff_c * 4) + yy_c) + 64))] + (pad_temp_shared_local[((yy_c + ry_inner_inner))] * kernel_shared_local[((((ff_c * 3) + ry_inner_inner) + 6))])); compute_local[((((ff_c * 4) + yy_c) + 8))] = (compute_local[((((ff_c * 4) + yy_c) + 8))] + (pad_temp_shared_local[(((yy_c + ry_inner_inner) + 6))] * kernel_shared_local[(((ff_c * 3) + ry_inner_inner))])); compute_local[((((ff_c * 4) + yy_c) + 72))] = (compute_local[((((ff_c * 4) + yy_c) + 72))] + (pad_temp_shared_local[(((yy_c + ry_inner_inner) + 6))] * kernel_shared_local[((((ff_c * 3) + ry_inner_inner) + 6))])); compute_local[((((ff_c * 4) + yy_c) + 16))] = (compute_local[((((ff_c * 4) + yy_c) + 16))] + (pad_temp_shared_local[(((yy_c + ry_inner_inner) + 12))] * kernel_shared_local[(((ff_c * 3) + ry_inner_inner))])); compute_local[((((ff_c * 4) + yy_c) + 80))] = (compute_local[((((ff_c * 4) + yy_c) + 80))] + (pad_temp_shared_local[(((yy_c + ry_inner_inner) + 12))] * kernel_shared_local[((((ff_c * 3) + ry_inner_inner) + 6))])); compute_local[((((ff_c * 4) + yy_c) + 24))] = (compute_local[((((ff_c * 4) + yy_c) + 24))] + (pad_temp_shared_local[(((yy_c + ry_inner_inner) + 18))] * kernel_shared_local[(((ff_c * 3) + ry_inner_inner))])); compute_local[((((ff_c * 4) + yy_c) + 88))] = (compute_local[((((ff_c * 4) + yy_c) + 88))] + (pad_temp_shared_local[(((yy_c + ry_inner_inner) + 18))] * kernel_shared_local[((((ff_c * 3) + ry_inner_inner) + 6))])); compute_local[((((ff_c * 4) + yy_c) + 32))] = (compute_local[((((ff_c * 4) + yy_c) + 32))] + (pad_temp_shared_local[(((yy_c + ry_inner_inner) + 24))] * kernel_shared_local[(((ff_c * 3) + ry_inner_inner))])); compute_local[((((ff_c * 4) + yy_c) + 96))] = (compute_local[((((ff_c * 4) + yy_c) + 96))] + (pad_temp_shared_local[(((yy_c + ry_inner_inner) + 24))] * kernel_shared_local[((((ff_c * 3) + ry_inner_inner) + 6))])); compute_local[((((ff_c * 4) + yy_c) + 40))] = (compute_local[((((ff_c * 4) + yy_c) + 40))] + (pad_temp_shared_local[(((yy_c + ry_inner_inner) + 30))] * kernel_shared_local[(((ff_c * 3) + ry_inner_inner))])); compute_local[((((ff_c * 4) + yy_c) + 104))] = (compute_local[((((ff_c * 4) + yy_c) + 104))] + (pad_temp_shared_local[(((yy_c + ry_inner_inner) + 30))] * kernel_shared_local[((((ff_c * 3) + ry_inner_inner) + 6))])); compute_local[((((ff_c * 4) + yy_c) + 48))] = (compute_local[((((ff_c * 4) + yy_c) + 48))] + (pad_temp_shared_local[(((yy_c + ry_inner_inner) + 36))] * kernel_shared_local[(((ff_c * 3) + ry_inner_inner))])); compute_local[((((ff_c * 4) + yy_c) + 112))] = (compute_local[((((ff_c * 4) + yy_c) + 112))] + (pad_temp_shared_local[(((yy_c + ry_inner_inner) + 36))] * kernel_shared_local[((((ff_c * 3) + ry_inner_inner) + 6))])); compute_local[((((ff_c * 4) + yy_c) + 56))] = (compute_local[((((ff_c * 4) + yy_c) + 56))] + (pad_temp_shared_local[(((yy_c + ry_inner_inner) + 42))] * kernel_shared_local[(((ff_c * 3) + ry_inner_inner))])); compute_local[((((ff_c * 4) + yy_c) + 120))] = (compute_local[((((ff_c * 4) + yy_c) + 120))] + (pad_temp_shared_local[(((yy_c + ry_inner_inner) + 42))] * kernel_shared_local[((((ff_c * 3) + ry_inner_inner) + 6))])); } } } } } } for (int ff_inner_inner_inner = 0; ff_inner_inner_inner < 2; ++ff_inner_inner_inner) { for (int yy_inner_inner_inner = 0; yy_inner_inner_inner < 4; ++yy_inner_inner_inner) { compute[(((((((((((int)blockIdx.z) * 802816) + (((int)threadIdx.z) * 100352)) + (ff_inner_inner_inner * 50176)) + (((int)blockIdx.y) * 12544)) + (((int)threadIdx.y) * 896)) + (yy_inner_inner_inner * 224)) + (((int)blockIdx.x) * 16)) + ((int)threadIdx.x)))] = compute_local[(((ff_inner_inner_inner * 4) + yy_inner_inner_inner))]; compute[((((((((((((int)blockIdx.z) * 802816) + (((int)threadIdx.z) * 100352)) + (ff_inner_inner_inner * 50176)) + (((int)blockIdx.y) * 12544)) + (((int)threadIdx.y) * 896)) + (yy_inner_inner_inner * 224)) + (((int)blockIdx.x) * 16)) + ((int)threadIdx.x)) + 401408))] = compute_local[((((ff_inner_inner_inner * 4) + yy_inner_inner_inner) + 64))]; compute[((((((((((((int)blockIdx.z) * 802816) + (((int)threadIdx.z) * 100352)) + (ff_inner_inner_inner * 50176)) + (((int)blockIdx.y) * 12544)) + (((int)threadIdx.y) * 896)) + (yy_inner_inner_inner * 224)) + (((int)blockIdx.x) * 16)) + ((int)threadIdx.x)) + 2))] = compute_local[((((ff_inner_inner_inner * 4) + yy_inner_inner_inner) + 8))]; compute[((((((((((((int)blockIdx.z) * 802816) + (((int)threadIdx.z) * 100352)) + (ff_inner_inner_inner * 50176)) + (((int)blockIdx.y) * 12544)) + (((int)threadIdx.y) * 896)) + (yy_inner_inner_inner * 224)) + (((int)blockIdx.x) * 16)) + ((int)threadIdx.x)) + 401410))] = compute_local[((((ff_inner_inner_inner * 4) + yy_inner_inner_inner) + 72))]; compute[((((((((((((int)blockIdx.z) * 802816) + (((int)threadIdx.z) * 100352)) + (ff_inner_inner_inner * 50176)) + (((int)blockIdx.y) * 12544)) + (((int)threadIdx.y) * 896)) + (yy_inner_inner_inner * 224)) + (((int)blockIdx.x) * 16)) + ((int)threadIdx.x)) + 4))] = compute_local[((((ff_inner_inner_inner * 4) + yy_inner_inner_inner) + 16))]; compute[((((((((((((int)blockIdx.z) * 802816) + (((int)threadIdx.z) * 100352)) + (ff_inner_inner_inner * 50176)) + (((int)blockIdx.y) * 12544)) + (((int)threadIdx.y) * 896)) + (yy_inner_inner_inner * 224)) + (((int)blockIdx.x) * 16)) + ((int)threadIdx.x)) + 401412))] = compute_local[((((ff_inner_inner_inner * 4) + yy_inner_inner_inner) + 80))]; compute[((((((((((((int)blockIdx.z) * 802816) + (((int)threadIdx.z) * 100352)) + (ff_inner_inner_inner * 50176)) + (((int)blockIdx.y) * 12544)) + (((int)threadIdx.y) * 896)) + (yy_inner_inner_inner * 224)) + (((int)blockIdx.x) * 16)) + ((int)threadIdx.x)) + 6))] = compute_local[((((ff_inner_inner_inner * 4) + yy_inner_inner_inner) + 24))]; compute[((((((((((((int)blockIdx.z) * 802816) + (((int)threadIdx.z) * 100352)) + (ff_inner_inner_inner * 50176)) + (((int)blockIdx.y) * 12544)) + (((int)threadIdx.y) * 896)) + (yy_inner_inner_inner * 224)) + (((int)blockIdx.x) * 16)) + ((int)threadIdx.x)) + 401414))] = compute_local[((((ff_inner_inner_inner * 4) + yy_inner_inner_inner) + 88))]; compute[((((((((((((int)blockIdx.z) * 802816) + (((int)threadIdx.z) * 100352)) + (ff_inner_inner_inner * 50176)) + (((int)blockIdx.y) * 12544)) + (((int)threadIdx.y) * 896)) + (yy_inner_inner_inner * 224)) + (((int)blockIdx.x) * 16)) + ((int)threadIdx.x)) + 8))] = compute_local[((((ff_inner_inner_inner * 4) + yy_inner_inner_inner) + 32))]; compute[((((((((((((int)blockIdx.z) * 802816) + (((int)threadIdx.z) * 100352)) + (ff_inner_inner_inner * 50176)) + (((int)blockIdx.y) * 12544)) + (((int)threadIdx.y) * 896)) + (yy_inner_inner_inner * 224)) + (((int)blockIdx.x) * 16)) + ((int)threadIdx.x)) + 401416))] = compute_local[((((ff_inner_inner_inner * 4) + yy_inner_inner_inner) + 96))]; compute[((((((((((((int)blockIdx.z) * 802816) + (((int)threadIdx.z) * 100352)) + (ff_inner_inner_inner * 50176)) + (((int)blockIdx.y) * 12544)) + (((int)threadIdx.y) * 896)) + (yy_inner_inner_inner * 224)) + (((int)blockIdx.x) * 16)) + ((int)threadIdx.x)) + 10))] = compute_local[((((ff_inner_inner_inner * 4) + yy_inner_inner_inner) + 40))]; compute[((((((((((((int)blockIdx.z) * 802816) + (((int)threadIdx.z) * 100352)) + (ff_inner_inner_inner * 50176)) + (((int)blockIdx.y) * 12544)) + (((int)threadIdx.y) * 896)) + (yy_inner_inner_inner * 224)) + (((int)blockIdx.x) * 16)) + ((int)threadIdx.x)) + 401418))] = compute_local[((((ff_inner_inner_inner * 4) + yy_inner_inner_inner) + 104))]; compute[((((((((((((int)blockIdx.z) * 802816) + (((int)threadIdx.z) * 100352)) + (ff_inner_inner_inner * 50176)) + (((int)blockIdx.y) * 12544)) + (((int)threadIdx.y) * 896)) + (yy_inner_inner_inner * 224)) + (((int)blockIdx.x) * 16)) + ((int)threadIdx.x)) + 12))] = compute_local[((((ff_inner_inner_inner * 4) + yy_inner_inner_inner) + 48))]; compute[((((((((((((int)blockIdx.z) * 802816) + (((int)threadIdx.z) * 100352)) + (ff_inner_inner_inner * 50176)) + (((int)blockIdx.y) * 12544)) + (((int)threadIdx.y) * 896)) + (yy_inner_inner_inner * 224)) + (((int)blockIdx.x) * 16)) + ((int)threadIdx.x)) + 401420))] = compute_local[((((ff_inner_inner_inner * 4) + yy_inner_inner_inner) + 112))]; compute[((((((((((((int)blockIdx.z) * 802816) + (((int)threadIdx.z) * 100352)) + (ff_inner_inner_inner * 50176)) + (((int)blockIdx.y) * 12544)) + (((int)threadIdx.y) * 896)) + (yy_inner_inner_inner * 224)) + (((int)blockIdx.x) * 16)) + ((int)threadIdx.x)) + 14))] = compute_local[((((ff_inner_inner_inner * 4) + yy_inner_inner_inner) + 56))]; compute[((((((((((((int)blockIdx.z) * 802816) + (((int)threadIdx.z) * 100352)) + (ff_inner_inner_inner * 50176)) + (((int)blockIdx.y) * 12544)) + (((int)threadIdx.y) * 896)) + (yy_inner_inner_inner * 224)) + (((int)blockIdx.x) * 16)) + ((int)threadIdx.x)) + 401422))] = compute_local[((((ff_inner_inner_inner * 4) + yy_inner_inner_inner) + 120))]; } } } class ConvGemm{ public: float *cpuKernel; float alpha = 1.0f; float beta = 0.0f; cudnnHandle_t convCudnn; void* d_workspace{nullptr}; size_t workspace_bytes{0}; cudnnTensorDescriptor_t convInputDescriptor; cudnnTensorDescriptor_t convOutputDescriptor; cudnnFilterDescriptor_t convKernelDescriptor; cudnnConvolutionDescriptor_t convDesc; float *output; float *kernel; void initialize(); float *forward(float *input); }; void ConvGemm::initialize(){ hipMalloc(&kernel,sizeof(float)*C*N*9); hipMalloc(&this->output,sizeof(float)*N*H*W); cudnnCreate(&convCudnn); cudnnCreateTensorDescriptor(&convInputDescriptor); cudnnSetTensor4dDescriptor(convInputDescriptor, /*format=*/CUDNN_TENSOR_NCHW, /*dataType=*/CUDNN_DATA_FLOAT, /*batch_size=*/1, /*channels=*/C, /*image_height=*/H, /*image_width=*/W); cudnnCreateFilterDescriptor(&convKernelDescriptor); cudnnSetFilter4dDescriptor(convKernelDescriptor, /*dataType=*/CUDNN_DATA_FLOAT, /*format=*/CUDNN_TENSOR_NCHW, /*out_channels=*/N, /*in_channels=*/C, /*kernel_height=*/R, /*kernel_width=*/S); cudnnCreateConvolutionDescriptor(&convDesc); cudnnSetConvolution2dDescriptor(convDesc, /*pad_height=*/1, /*pad_width=*/1, /*vertical_stride=*/1, /*horizontal_stride=*/1, /*dilation_height=*/1, /*dilation_width=*/1, /*mode=*/CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT); int batch_size{0}, channels{0}, height{0}, width{0}; cudnnGetConvolution2dForwardOutputDim(convDesc, convInputDescriptor, convKernelDescriptor, &batch_size, &channels, &height, &width); cudnnCreateTensorDescriptor(&convOutputDescriptor); cudnnSetTensor4dDescriptor(convOutputDescriptor, /*format=*/CUDNN_TENSOR_NCHW, /*dataType=*/CUDNN_DATA_FLOAT, /*batch_size=*/1, /*channels=*/N, /*image_height=*/H, /*image_width=*/W); cudnnGetConvolutionForwardWorkspaceSize(convCudnn, convInputDescriptor, convKernelDescriptor, convDesc, convOutputDescriptor, CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_GEMM, &workspace_bytes); hipMalloc(&d_workspace, workspace_bytes); unsigned int kernelSize = R*S*C*N;//kernel this->cpuKernel = (float *)malloc(kernelSize*sizeof(float)); for(int i=0;i<kernelSize;++i){ this->cpuKernel[i] = 1.0f; } hipMemcpy(kernel,cpuKernel,R*S*C*N*sizeof(float),hipMemcpyHostToDevice); free(cpuKernel); } float * ConvGemm::forward(float *input) { hipMemset(output, 0, 1*N*H*W*sizeof(float)); checkCUDNN(cudnnConvolutionForward(convCudnn, &alpha, convInputDescriptor, input, convKernelDescriptor, kernel, convDesc, CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_GEMM, d_workspace, workspace_bytes, &beta, convOutputDescriptor, output)); return output; } class ConvWinogradeNon{ public: float *cpuKernel; float alpha = 1.0f; float beta = 0.0f; cudnnHandle_t convCudnn; void* d_workspace{nullptr}; size_t workspace_bytes{0}; cudnnTensorDescriptor_t convInputDescriptor; cudnnTensorDescriptor_t convOutputDescriptor; cudnnFilterDescriptor_t convKernelDescriptor; cudnnConvolutionDescriptor_t convDesc; float *output; float *kernel; void initialize(); float *forward(float *input); }; void ConvWinogradeNon::initialize(){ hipMalloc(&kernel,sizeof(float)*C*N*9); hipMalloc(&this->output,sizeof(float)*N*H*W); cudnnCreate(&convCudnn); cudnnCreateTensorDescriptor(&convInputDescriptor); cudnnSetTensor4dDescriptor(convInputDescriptor, /*format=*/CUDNN_TENSOR_NCHW, /*dataType=*/CUDNN_DATA_FLOAT, /*batch_size=*/1, /*channels=*/C, /*image_height=*/H, /*image_width=*/W); cudnnCreateFilterDescriptor(&convKernelDescriptor); cudnnSetFilter4dDescriptor(convKernelDescriptor, /*dataType=*/CUDNN_DATA_FLOAT, /*format=*/CUDNN_TENSOR_NCHW, /*out_channels=*/N, /*in_channels=*/C, /*kernel_height=*/R, /*kernel_width=*/S); cudnnCreateConvolutionDescriptor(&convDesc); cudnnSetConvolution2dDescriptor(convDesc, /*pad_height=*/1, /*pad_width=*/1, /*vertical_stride=*/1, /*horizontal_stride=*/1, /*dilation_height=*/1, /*dilation_width=*/1, /*mode=*/CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT); int batch_size{0}, channels{0}, height{0}, width{0}; cudnnGetConvolution2dForwardOutputDim(convDesc, convInputDescriptor, convKernelDescriptor, &batch_size, &channels, &height, &width); cudnnCreateTensorDescriptor(&convOutputDescriptor); cudnnSetTensor4dDescriptor(convOutputDescriptor, /*format=*/CUDNN_TENSOR_NCHW, /*dataType=*/CUDNN_DATA_FLOAT, /*batch_size=*/1, /*channels=*/N, /*image_height=*/H, /*image_width=*/W); cudnnGetConvolutionForwardWorkspaceSize(convCudnn, convInputDescriptor, convKernelDescriptor, convDesc, convOutputDescriptor, CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD_NONFUSED, &workspace_bytes); hipMalloc(&d_workspace, workspace_bytes); unsigned int kernelSize = R*S*C*N;//kernel this->cpuKernel = (float *)malloc(kernelSize*sizeof(float)); for(int i=0;i<kernelSize;++i){ this->cpuKernel[i] = 1.0f; } hipMemcpy(kernel,cpuKernel,R*S*C*N*sizeof(float),hipMemcpyHostToDevice); free(cpuKernel); } float * ConvWinogradeNon::forward(float *input) { hipMemset(output, 0, 1*N*H*W*sizeof(float)); checkCUDNN(cudnnConvolutionForward(convCudnn, &alpha, convInputDescriptor, input, convKernelDescriptor, kernel, convDesc, CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD_NONFUSED, d_workspace, workspace_bytes, &beta, convOutputDescriptor, output)); return output; } class ConvFFT{ public: float *cpuKernel; float alpha = 1.0f; float beta = 0.0f; cudnnHandle_t convCudnn; void* d_workspace{nullptr}; size_t workspace_bytes{0}; cudnnTensorDescriptor_t convInputDescriptor; cudnnTensorDescriptor_t convOutputDescriptor; cudnnFilterDescriptor_t convKernelDescriptor; cudnnConvolutionDescriptor_t convDesc; float *output; float *kernel; void initialize(); float *forward(float *input); }; void ConvFFT::initialize(){ hipMalloc(&kernel,sizeof(float)*C*N*9); hipMalloc(&this->output,sizeof(float)*N*H*W); cudnnCreate(&convCudnn); cudnnCreateTensorDescriptor(&convInputDescriptor); cudnnSetTensor4dDescriptor(convInputDescriptor, /*format=*/CUDNN_TENSOR_NCHW, /*dataType=*/CUDNN_DATA_FLOAT, /*batch_size=*/1, /*channels=*/C, /*image_height=*/H, /*image_width=*/W); cudnnCreateFilterDescriptor(&convKernelDescriptor); cudnnSetFilter4dDescriptor(convKernelDescriptor, /*dataType=*/CUDNN_DATA_FLOAT, /*format=*/CUDNN_TENSOR_NCHW, /*out_channels=*/N, /*in_channels=*/C, /*kernel_height=*/R, /*kernel_width=*/S); cudnnCreateConvolutionDescriptor(&convDesc); cudnnSetConvolution2dDescriptor(convDesc, /*pad_height=*/1, /*pad_width=*/1, /*vertical_stride=*/1, /*horizontal_stride=*/1, /*dilation_height=*/1, /*dilation_width=*/1, /*mode=*/CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT); int batch_size{0}, channels{0}, height{0}, width{0}; cudnnGetConvolution2dForwardOutputDim(convDesc, convInputDescriptor, convKernelDescriptor, &batch_size, &channels, &height, &width); cudnnCreateTensorDescriptor(&convOutputDescriptor); cudnnSetTensor4dDescriptor(convOutputDescriptor, /*format=*/CUDNN_TENSOR_NCHW, /*dataType=*/CUDNN_DATA_FLOAT, /*batch_size=*/1, /*channels=*/N, /*image_height=*/H, /*image_width=*/W); cudnnGetConvolutionForwardWorkspaceSize(convCudnn, convInputDescriptor, convKernelDescriptor, convDesc, convOutputDescriptor, CUDNN_CONVOLUTION_FWD_ALGO_FFT, &workspace_bytes); hipMalloc(&d_workspace, workspace_bytes); unsigned int kernelSize = R*S*C*N;//kernel this->cpuKernel = (float *)malloc(kernelSize*sizeof(float)); for(int i=0;i<kernelSize;++i){ this->cpuKernel[i] = 1.0f; } hipMemcpy(kernel,cpuKernel,R*S*C*N*sizeof(float),hipMemcpyHostToDevice); free(cpuKernel); } float * ConvFFT::forward(float *input) { hipMemset(output, 0, 1*N*H*W*sizeof(float)); checkCUDNN(cudnnConvolutionForward(convCudnn, &alpha, convInputDescriptor, input, convKernelDescriptor, kernel, convDesc, CUDNN_CONVOLUTION_FWD_ALGO_FFT, d_workspace, workspace_bytes, &beta, convOutputDescriptor, output)); return output; } __device__ void load_input_2_shared_memory(float *input, float *shared_input, unsigned int h_start, unsigned int h_end, unsigned int h_offset, unsigned int c_start, unsigned int warp_id, unsigned int lane_id, unsigned int warp_size){ switch(h_offset){ case 0: for(unsigned int c = warp_id; c<TC; c+=TWS){ for(unsigned int i=lane_id; i<(h_end - h_start) * W; i+=warp_size){ unsigned int r = i/W; unsigned int s = i%W; shared_input[c*(TH + 2)*(WPAD) + r * WPAD + s + 1] = input[(c_start + c) * H * W + h_start * W + i]; } } break; case 1: for(unsigned int c = warp_id; c<TC; c+=TWS){ for(unsigned int i=lane_id; i<(h_end - h_start) * W; i+=warp_size){ unsigned int r = i/W; unsigned int s = i%W; shared_input[c*(TH + 2)*(WPAD) + (1 + r) * WPAD + s + 1] = input[(c_start + c) * H * W + h_start * W + i]; } } break; } } __device__ __forceinline__ void switch_write_back(unsigned int write_h, unsigned int write_w, unsigned int h_out_start, unsigned int w_out_start, unsigned int n, float * outputs, float * temp_result){ switch(write_h){ case 1: switch(write_w){ case 1: #pragma unroll for (unsigned int th = 0; th < 1; ++th) { #pragma unroll for (unsigned int tw = 0; tw < 1; ++tw) { atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]); } } break; case 2: #pragma unroll for (unsigned int th = 0; th < 1; ++th) { #pragma unroll for (unsigned int tw = 0; tw < 2; ++tw) { atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]); } } break; case 3: #pragma unroll for (unsigned int th = 0; th < 1; ++th) { #pragma unroll for (unsigned int tw = 0; tw < 3; ++tw) { atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]); } } break; case 4: #pragma unroll for (unsigned int th = 0; th < 1; ++th) { #pragma unroll for (unsigned int tw = 0; tw < 4; ++tw) { atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]); } } break; case 5: #pragma unroll for (unsigned int th = 0; th < 1; ++th) { #pragma unroll for (unsigned int tw = 0; tw < 5; ++tw) { atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]); } } break; case 6: #pragma unroll for (unsigned int th = 0; th < 1; ++th) { #pragma unroll for (unsigned int tw = 0; tw < 6; ++tw) { atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]); } } break; case 7: #pragma unroll for (unsigned int th = 0; th < 1; ++th) { #pragma unroll for (unsigned int tw = 0; tw < 7; ++tw) { atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]); } } break; } break; } } __global__ void conv2d(float * __restrict__ input,const float * __restrict__ kernel, float * __restrict__ outputs){ extern __shared__ float shared_input[]; const unsigned int tile_id = blockIdx.x; const unsigned int tc_id = tile_id / THS; const unsigned int th_id = tile_id % THS; const unsigned int tw_id = threadIdx.x / N; const int h_out_start = th_id * TH; const int w_out_start = tw_id * TW; const unsigned int warp_id = tw_id; const unsigned int lane_id = threadIdx.x % N; float data_array[9]; float temp_result[TH*TW] = {0.0f}; for(unsigned int i=threadIdx.x;i<TC*(TH+2)*WPAD;i+=blockDim.x){ shared_input[i] = 0.0f; } unsigned int n = lane_id; unsigned int c_offset = tc_id * TC; int h_offset = (h_out_start == 0)?1:0; int h_padded_start = h_out_start; int h_padded_end = min(h_padded_start + TH + 2, H + 2); int h_non_padded_start = max(h_out_start - 1, 0); int h_non_padded_end = min(H, h_padded_end - 1); __syncthreads(); load_input_2_shared_memory(input, shared_input, h_non_padded_start, h_non_padded_end, h_offset, c_offset, warp_id, lane_id, N); __syncthreads(); #pragma unroll for(unsigned int c=0;c<TC;c++){ #pragma unroll for(unsigned int r=0;r<R;++r){ #pragma unroll for(unsigned int s=0;s<S;++s){ data_array[r*S+s] = kernel[(c + c_offset)*N*9+r*3*N+s*N+n]; } } temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 0]*data_array[0]; temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 1]*data_array[0]; temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 1]*data_array[1]; temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 2]*data_array[0]; temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 2]*data_array[1]; temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 2]*data_array[2]; temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 3]*data_array[0]; temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 3]*data_array[1]; temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 3]*data_array[2]; temp_result[4] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 4]*data_array[0]; temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 4]*data_array[1]; temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 4]*data_array[2]; temp_result[5] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 5]*data_array[0]; temp_result[4] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 5]*data_array[1]; temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 5]*data_array[2]; temp_result[6] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 6]*data_array[0]; temp_result[5] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 6]*data_array[1]; temp_result[4] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 6]*data_array[2]; temp_result[6] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 7]*data_array[1]; temp_result[5] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 7]*data_array[2]; temp_result[6] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 8]*data_array[2]; temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 0]*data_array[3]; temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 1]*data_array[3]; temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 1]*data_array[4]; temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 2]*data_array[3]; temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 2]*data_array[4]; temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 2]*data_array[5]; temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 3]*data_array[3]; temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 3]*data_array[4]; temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 3]*data_array[5]; temp_result[4] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 4]*data_array[3]; temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 4]*data_array[4]; temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 4]*data_array[5]; temp_result[5] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 5]*data_array[3]; temp_result[4] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 5]*data_array[4]; temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 5]*data_array[5]; temp_result[6] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 6]*data_array[3]; temp_result[5] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 6]*data_array[4]; temp_result[4] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 6]*data_array[5]; temp_result[6] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 7]*data_array[4]; temp_result[5] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 7]*data_array[5]; temp_result[6] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 8]*data_array[5]; temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 0]*data_array[6]; temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 1]*data_array[6]; temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 1]*data_array[7]; temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 2]*data_array[6]; temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 2]*data_array[7]; temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 2]*data_array[8]; temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 3]*data_array[6]; temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 3]*data_array[7]; temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 3]*data_array[8]; temp_result[4] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 4]*data_array[6]; temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 4]*data_array[7]; temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 4]*data_array[8]; temp_result[5] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 5]*data_array[6]; temp_result[4] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 5]*data_array[7]; temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 5]*data_array[8]; temp_result[6] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 6]*data_array[6]; temp_result[5] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 6]*data_array[7]; temp_result[4] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 6]*data_array[8]; temp_result[6] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 7]*data_array[7]; temp_result[5] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 7]*data_array[8]; temp_result[6] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 8]*data_array[8]; } switch_write_back(min(TH, H - h_out_start), min(TW, W - w_out_start), h_out_start, w_out_start, n, outputs, temp_result); } float check_diff(float *x, float *y, unsigned int size){ float diff = 0.0f; #pragma omp parallel for reduction(+ : diff) for(unsigned int i=0;i<size;++i){ diff += abs(x[i] - y[i]); } return diff; } int main(void){ float *input = new float[C*H*W]; time_t t; float *matrix; hipMalloc(&matrix,C*(TH+2)*(TW+2)*THS*TWS*sizeof(float)); hipMemset(matrix,0,C*(TH+2)*(TW+2)*THS*TWS*sizeof(float)); srand((unsigned) time(&t)); for(int i =0;i<C*H*W;++i){ input[i] = rand() % 10; } float *device_input; hipMalloc(&device_input,C*H*W*sizeof(float)); hipMemcpy(device_input,input,C*H*W*sizeof(float),hipMemcpyHostToDevice); float *K = new float[C*N*9]; for(int i=0;i<C*N*9;++i){ K[i] = 1.0f; } ConvGemm convGemm; convGemm.initialize(); ConvWinogradeNon convWinogradeNon; convWinogradeNon.initialize(); ConvFFT convFFT; convFFT.initialize(); float *out_cudnn; float *out_cudnn_host = new float[N*H*W]; hipEvent_t event_start; hipEvent_t event_stop; hipEventCreate(&event_start); hipEventCreate(&event_stop); out_cudnn = convGemm.forward(device_input); hipMemcpy(out_cudnn_host,out_cudnn,N*H*W*sizeof(float),hipMemcpyDeviceToHost); out_cudnn = convFFT.forward(device_input); out_cudnn = convWinogradeNon.forward(device_input); float *device_K; float *device_out; hipMalloc(&device_out,H*W*N*sizeof(float)); hipMemset(device_out,0,H*W*N*sizeof(float)); hipMalloc(&device_K,C*N*9*sizeof(float)); hipMemcpy(device_K,K,C*N*9*sizeof(float),hipMemcpyHostToDevice); hipEventRecord(event_start); convGemm.forward(device_input); hipEventRecord(event_stop); hipEventSynchronize(event_stop); float cudnnGemmTime; hipEventElapsedTime(&cudnnGemmTime, event_start, event_stop); hipEventRecord(event_start); convWinogradeNon.forward(device_input); hipEventRecord(event_stop); hipEventSynchronize(event_stop); float cudnnWinogradeTimeNon; hipEventElapsedTime(&cudnnWinogradeTimeNon, event_start, event_stop); hipEventRecord(event_start); convFFT.forward(device_input); hipEventRecord(event_stop); hipEventSynchronize(event_stop); float cudnnFFTTime; hipEventElapsedTime(&cudnnFFTTime, event_start, event_stop); dim3 grid(14,4,2); dim3 block(2,14,4); hipEventRecord(event_start); hipLaunchKernelGGL(( default_function_kernel0), dim3(grid), dim3(block), 0, 0, device_input, device_K, device_out); hipEventRecord(event_stop); hipEventSynchronize(event_stop); float time_tvm; hipEventElapsedTime(&time_tvm, event_start, event_stop); float *out_tvm = new float[N*H*W]; hipMemcpy(out_tvm,device_out,N*H*W*sizeof(float),hipMemcpyDeviceToHost); hipMemset(device_out, 0, sizeof(float)*N*H*W); chkerr(hipFuncSetAttribute(conv2d,hipFuncAttributeMaxDynamicSharedMemorySize, TC*(TH+2)*(WPAD)*4)); hipEventRecord(event_start); hipLaunchKernelGGL(( conv2d), dim3(TCS*THS), dim3(N * TWS), TC*(TH+2)*(WPAD)*4, 0, device_input, device_K, device_out); hipEventRecord(event_stop); hipEventSynchronize(event_stop); float time_tdc; hipEventElapsedTime(&time_tdc, event_start, event_stop); float *out_tdc = new float[N*H*W]; hipMemcpy(out_tdc,device_out,N*H*W*sizeof(float),hipMemcpyDeviceToHost); ofstream outfile; char buffer[1000]; int ret = sprintf(buffer,"%d,%d,%d,%d,%f,%f,%f,%f,%f,%f,%f,%f,%f\n",N,C,H,W, cudnnFFTTime,cudnnWinogradeTimeNon,cudnnGemmTime,time_tvm,time_tdc, cudnnFFTTime/time_tdc,cudnnWinogradeTimeNon/time_tdc,cudnnGemmTime/time_tdc,time_tvm/time_tdc); outfile.open("../../evaluation_outcome/2080Ti-layers-eval-modeling.csv", std::ios_base::app); outfile << buffer; float difference = check_diff(out_cudnn_host, out_tdc, N*H*W); cout<<N<<","<<C<<","<<H<<","<<W<<","<<cudnnFFTTime<<","<<cudnnWinogradeTimeNon<<","<<cudnnGemmTime<<","<< time_tvm<<","<<time_tdc<<","<<cudnnFFTTime/time_tdc<<","<<cudnnWinogradeTimeNon/time_tdc<<","<< cudnnGemmTime/time_tdc<<","<<time_tvm/time_tdc<<endl; return 0; }
0ae60475b3512056a58f8ee8266794760cd2b5e0.cu
#include <cudnn.h> #include <stdio.h> #include <cuda.h> #include <malloc.h> #include <cstdlib> #include <time.h> #include <iostream> #include <sys/types.h> #include <errno.h> #include <vector> #include <fstream> #include <string> #include <omp.h> #define TH 1 #define TW 7 #define TC 16 #define C 64 #define N 32 #define H 224 #define W 224 #define TCS ((C-1)/TC + 1) #define THS ((H-1)/TH + 1) #define TWS ((W-1)/TW+1) #define WPAD (TWS*TW + 2) #define R 3 #define S 3 using namespace std; #define checkCUDNN(expression) \ { \ cudnnStatus_t status = (expression); \ if (status != CUDNN_STATUS_SUCCESS) { \ std::cerr << "Error on line " << __LINE__ << ": " \ << cudnnGetErrorString(status) << std::endl; \ std::exit(EXIT_FAILURE); \ } \ } inline void chkerr(cudaError_t code) { if (code != cudaSuccess) { std::cerr << "ERROR!!!:" << cudaGetErrorString(code) <<endl; exit(-1); } } extern "C" __global__ void default_function_kernel0(float* __restrict__ data, float* __restrict__ kernel, float* __restrict__ compute) { float compute_local[128]; __shared__ float pad_temp_shared[2088]; __shared__ float kernel_shared[288]; float pad_temp_shared_local[48]; float kernel_shared_local[12]; for (int ff_c_init = 0; ff_c_init < 2; ++ff_c_init) { for (int yy_c_init = 0; yy_c_init < 4; ++yy_c_init) { compute_local[(((ff_c_init * 4) + yy_c_init))] = 0.000000e+00f; compute_local[((((ff_c_init * 4) + yy_c_init) + 64))] = 0.000000e+00f; compute_local[((((ff_c_init * 4) + yy_c_init) + 8))] = 0.000000e+00f; compute_local[((((ff_c_init * 4) + yy_c_init) + 72))] = 0.000000e+00f; compute_local[((((ff_c_init * 4) + yy_c_init) + 16))] = 0.000000e+00f; compute_local[((((ff_c_init * 4) + yy_c_init) + 80))] = 0.000000e+00f; compute_local[((((ff_c_init * 4) + yy_c_init) + 24))] = 0.000000e+00f; compute_local[((((ff_c_init * 4) + yy_c_init) + 88))] = 0.000000e+00f; compute_local[((((ff_c_init * 4) + yy_c_init) + 32))] = 0.000000e+00f; compute_local[((((ff_c_init * 4) + yy_c_init) + 96))] = 0.000000e+00f; compute_local[((((ff_c_init * 4) + yy_c_init) + 40))] = 0.000000e+00f; compute_local[((((ff_c_init * 4) + yy_c_init) + 104))] = 0.000000e+00f; compute_local[((((ff_c_init * 4) + yy_c_init) + 48))] = 0.000000e+00f; compute_local[((((ff_c_init * 4) + yy_c_init) + 112))] = 0.000000e+00f; compute_local[((((ff_c_init * 4) + yy_c_init) + 56))] = 0.000000e+00f; compute_local[((((ff_c_init * 4) + yy_c_init) + 120))] = 0.000000e+00f; } } for (int rc_outer = 0; rc_outer < 32; ++rc_outer) { __syncthreads(); for (int ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner = 0; ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner < 19; ++ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner) { if (((((int)threadIdx.z) * 29) + ((((((int)threadIdx.y) * 38) + (((int)threadIdx.x) * 19)) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner) / 18)) < 116) { if (((((((int)threadIdx.z) * 522) + (((int)threadIdx.y) * 38)) + (((int)threadIdx.x) * 19)) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner) < 2088) { if ((((((int)threadIdx.y) * 38) + (((int)threadIdx.x) * 19)) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner) < 522) { pad_temp_shared[(((((((int)threadIdx.z) * 522) + (((int)threadIdx.y) * 38)) + (((int)threadIdx.x) * 19)) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner))] = (((((1 <= ((((int)blockIdx.y) * 56) + (((((int)threadIdx.z) * 29) + ((((((int)threadIdx.y) * 38) + (((int)threadIdx.x) * 19)) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner) / 18)) % 58))) && (((((int)blockIdx.y) * 56) + (((((int)threadIdx.z) * 29) + ((((((int)threadIdx.y) * 38) + (((int)threadIdx.x) * 19)) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner) / 18)) % 58)) < 225)) && (1 <= ((((int)blockIdx.x) * 16) + ((((((int)threadIdx.y) * 38) + (((int)threadIdx.x) * 19)) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner) % 18)))) && (((((int)blockIdx.x) * 16) + ((((((int)threadIdx.y) * 38) + (((int)threadIdx.x) * 19)) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner) % 18)) < 225)) ? data[((((((((rc_outer * 100352) + ((((((int)threadIdx.z) * 29) + ((((((int)threadIdx.y) * 38) + (((int)threadIdx.x) * 19)) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner) / 18)) / 58) * 50176)) + (((int)blockIdx.y) * 12544)) + ((((((int)threadIdx.z) * 29) + ((((((int)threadIdx.y) * 38) + (((int)threadIdx.x) * 19)) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner) / 18)) % 58) * 224)) + (((int)blockIdx.x) * 16)) + ((((((int)threadIdx.y) * 38) + (((int)threadIdx.x) * 19)) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner) % 18)) - 225))] : 0.000000e+00f); } } } } for (int ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner1 = 0; ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner1 < 3; ++ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner1) { if (((((int)threadIdx.z) * 4) + (((((int)threadIdx.y) * 2) + ((int)threadIdx.x)) / 6)) < 16) { if (((((int)threadIdx.z) * 8) + (((((int)threadIdx.y) * 2) + ((int)threadIdx.x)) / 3)) < 32) { if ((((((int)threadIdx.z) * 24) + (((int)threadIdx.y) * 2)) + ((int)threadIdx.x)) < 96) { if (((((((int)threadIdx.z) * 72) + (((int)threadIdx.y) * 6)) + (((int)threadIdx.x) * 3)) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner1) < 288) { if ((((((int)threadIdx.y) * 6) + (((int)threadIdx.x) * 3)) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner1) < 72) { if ((((((int)blockIdx.z) * 16) + (((int)threadIdx.z) * 4)) + (((((int)threadIdx.y) * 2) + ((int)threadIdx.x)) / 6)) < 32) { kernel_shared[(((((((int)threadIdx.z) * 72) + (((int)threadIdx.y) * 6)) + (((int)threadIdx.x) * 3)) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner1))] = kernel[(((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 2304)) + ((((((int)threadIdx.y) * 2) + ((int)threadIdx.x)) / 6) * 576)) + (rc_outer * 18)) + ((((((int)threadIdx.y) * 2) + ((int)threadIdx.x)) % 6) * 3)) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner1))]; } } } } } } } __syncthreads(); for (int rc_inner_outer = 0; rc_inner_outer < 2; ++rc_inner_outer) { for (int rx_inner_outer = 0; rx_inner_outer < 3; ++rx_inner_outer) { for (int ax2 = 0; ax2 < 6; ++ax2) { pad_temp_shared_local[(ax2)] = pad_temp_shared[((((((rc_inner_outer * 1044) + (((int)threadIdx.y) * 72)) + (ax2 * 18)) + ((int)threadIdx.x)) + rx_inner_outer))]; pad_temp_shared_local[((ax2 + 6))] = pad_temp_shared[(((((((rc_inner_outer * 1044) + (((int)threadIdx.y) * 72)) + (ax2 * 18)) + ((int)threadIdx.x)) + rx_inner_outer) + 2))]; pad_temp_shared_local[((ax2 + 12))] = pad_temp_shared[(((((((rc_inner_outer * 1044) + (((int)threadIdx.y) * 72)) + (ax2 * 18)) + ((int)threadIdx.x)) + rx_inner_outer) + 4))]; pad_temp_shared_local[((ax2 + 18))] = pad_temp_shared[(((((((rc_inner_outer * 1044) + (((int)threadIdx.y) * 72)) + (ax2 * 18)) + ((int)threadIdx.x)) + rx_inner_outer) + 6))]; pad_temp_shared_local[((ax2 + 24))] = pad_temp_shared[(((((((rc_inner_outer * 1044) + (((int)threadIdx.y) * 72)) + (ax2 * 18)) + ((int)threadIdx.x)) + rx_inner_outer) + 8))]; pad_temp_shared_local[((ax2 + 30))] = pad_temp_shared[(((((((rc_inner_outer * 1044) + (((int)threadIdx.y) * 72)) + (ax2 * 18)) + ((int)threadIdx.x)) + rx_inner_outer) + 10))]; pad_temp_shared_local[((ax2 + 36))] = pad_temp_shared[(((((((rc_inner_outer * 1044) + (((int)threadIdx.y) * 72)) + (ax2 * 18)) + ((int)threadIdx.x)) + rx_inner_outer) + 12))]; pad_temp_shared_local[((ax2 + 42))] = pad_temp_shared[(((((((rc_inner_outer * 1044) + (((int)threadIdx.y) * 72)) + (ax2 * 18)) + ((int)threadIdx.x)) + rx_inner_outer) + 14))]; } for (int ax0 = 0; ax0 < 2; ++ax0) { for (int ax21 = 0; ax21 < 3; ++ax21) { kernel_shared_local[(((ax0 * 3) + ax21))] = kernel_shared[((((((((int)threadIdx.z) * 36) + (ax0 * 18)) + (rc_inner_outer * 9)) + (ax21 * 3)) + rx_inner_outer))]; kernel_shared_local[((((ax0 * 3) + ax21) + 6))] = kernel_shared[(((((((((int)threadIdx.z) * 36) + (ax0 * 18)) + (rc_inner_outer * 9)) + (ax21 * 3)) + rx_inner_outer) + 144))]; } } for (int ry_inner_inner = 0; ry_inner_inner < 3; ++ry_inner_inner) { for (int ff_c = 0; ff_c < 2; ++ff_c) { for (int yy_c = 0; yy_c < 4; ++yy_c) { compute_local[(((ff_c * 4) + yy_c))] = (compute_local[(((ff_c * 4) + yy_c))] + (pad_temp_shared_local[((yy_c + ry_inner_inner))] * kernel_shared_local[(((ff_c * 3) + ry_inner_inner))])); compute_local[((((ff_c * 4) + yy_c) + 64))] = (compute_local[((((ff_c * 4) + yy_c) + 64))] + (pad_temp_shared_local[((yy_c + ry_inner_inner))] * kernel_shared_local[((((ff_c * 3) + ry_inner_inner) + 6))])); compute_local[((((ff_c * 4) + yy_c) + 8))] = (compute_local[((((ff_c * 4) + yy_c) + 8))] + (pad_temp_shared_local[(((yy_c + ry_inner_inner) + 6))] * kernel_shared_local[(((ff_c * 3) + ry_inner_inner))])); compute_local[((((ff_c * 4) + yy_c) + 72))] = (compute_local[((((ff_c * 4) + yy_c) + 72))] + (pad_temp_shared_local[(((yy_c + ry_inner_inner) + 6))] * kernel_shared_local[((((ff_c * 3) + ry_inner_inner) + 6))])); compute_local[((((ff_c * 4) + yy_c) + 16))] = (compute_local[((((ff_c * 4) + yy_c) + 16))] + (pad_temp_shared_local[(((yy_c + ry_inner_inner) + 12))] * kernel_shared_local[(((ff_c * 3) + ry_inner_inner))])); compute_local[((((ff_c * 4) + yy_c) + 80))] = (compute_local[((((ff_c * 4) + yy_c) + 80))] + (pad_temp_shared_local[(((yy_c + ry_inner_inner) + 12))] * kernel_shared_local[((((ff_c * 3) + ry_inner_inner) + 6))])); compute_local[((((ff_c * 4) + yy_c) + 24))] = (compute_local[((((ff_c * 4) + yy_c) + 24))] + (pad_temp_shared_local[(((yy_c + ry_inner_inner) + 18))] * kernel_shared_local[(((ff_c * 3) + ry_inner_inner))])); compute_local[((((ff_c * 4) + yy_c) + 88))] = (compute_local[((((ff_c * 4) + yy_c) + 88))] + (pad_temp_shared_local[(((yy_c + ry_inner_inner) + 18))] * kernel_shared_local[((((ff_c * 3) + ry_inner_inner) + 6))])); compute_local[((((ff_c * 4) + yy_c) + 32))] = (compute_local[((((ff_c * 4) + yy_c) + 32))] + (pad_temp_shared_local[(((yy_c + ry_inner_inner) + 24))] * kernel_shared_local[(((ff_c * 3) + ry_inner_inner))])); compute_local[((((ff_c * 4) + yy_c) + 96))] = (compute_local[((((ff_c * 4) + yy_c) + 96))] + (pad_temp_shared_local[(((yy_c + ry_inner_inner) + 24))] * kernel_shared_local[((((ff_c * 3) + ry_inner_inner) + 6))])); compute_local[((((ff_c * 4) + yy_c) + 40))] = (compute_local[((((ff_c * 4) + yy_c) + 40))] + (pad_temp_shared_local[(((yy_c + ry_inner_inner) + 30))] * kernel_shared_local[(((ff_c * 3) + ry_inner_inner))])); compute_local[((((ff_c * 4) + yy_c) + 104))] = (compute_local[((((ff_c * 4) + yy_c) + 104))] + (pad_temp_shared_local[(((yy_c + ry_inner_inner) + 30))] * kernel_shared_local[((((ff_c * 3) + ry_inner_inner) + 6))])); compute_local[((((ff_c * 4) + yy_c) + 48))] = (compute_local[((((ff_c * 4) + yy_c) + 48))] + (pad_temp_shared_local[(((yy_c + ry_inner_inner) + 36))] * kernel_shared_local[(((ff_c * 3) + ry_inner_inner))])); compute_local[((((ff_c * 4) + yy_c) + 112))] = (compute_local[((((ff_c * 4) + yy_c) + 112))] + (pad_temp_shared_local[(((yy_c + ry_inner_inner) + 36))] * kernel_shared_local[((((ff_c * 3) + ry_inner_inner) + 6))])); compute_local[((((ff_c * 4) + yy_c) + 56))] = (compute_local[((((ff_c * 4) + yy_c) + 56))] + (pad_temp_shared_local[(((yy_c + ry_inner_inner) + 42))] * kernel_shared_local[(((ff_c * 3) + ry_inner_inner))])); compute_local[((((ff_c * 4) + yy_c) + 120))] = (compute_local[((((ff_c * 4) + yy_c) + 120))] + (pad_temp_shared_local[(((yy_c + ry_inner_inner) + 42))] * kernel_shared_local[((((ff_c * 3) + ry_inner_inner) + 6))])); } } } } } } for (int ff_inner_inner_inner = 0; ff_inner_inner_inner < 2; ++ff_inner_inner_inner) { for (int yy_inner_inner_inner = 0; yy_inner_inner_inner < 4; ++yy_inner_inner_inner) { compute[(((((((((((int)blockIdx.z) * 802816) + (((int)threadIdx.z) * 100352)) + (ff_inner_inner_inner * 50176)) + (((int)blockIdx.y) * 12544)) + (((int)threadIdx.y) * 896)) + (yy_inner_inner_inner * 224)) + (((int)blockIdx.x) * 16)) + ((int)threadIdx.x)))] = compute_local[(((ff_inner_inner_inner * 4) + yy_inner_inner_inner))]; compute[((((((((((((int)blockIdx.z) * 802816) + (((int)threadIdx.z) * 100352)) + (ff_inner_inner_inner * 50176)) + (((int)blockIdx.y) * 12544)) + (((int)threadIdx.y) * 896)) + (yy_inner_inner_inner * 224)) + (((int)blockIdx.x) * 16)) + ((int)threadIdx.x)) + 401408))] = compute_local[((((ff_inner_inner_inner * 4) + yy_inner_inner_inner) + 64))]; compute[((((((((((((int)blockIdx.z) * 802816) + (((int)threadIdx.z) * 100352)) + (ff_inner_inner_inner * 50176)) + (((int)blockIdx.y) * 12544)) + (((int)threadIdx.y) * 896)) + (yy_inner_inner_inner * 224)) + (((int)blockIdx.x) * 16)) + ((int)threadIdx.x)) + 2))] = compute_local[((((ff_inner_inner_inner * 4) + yy_inner_inner_inner) + 8))]; compute[((((((((((((int)blockIdx.z) * 802816) + (((int)threadIdx.z) * 100352)) + (ff_inner_inner_inner * 50176)) + (((int)blockIdx.y) * 12544)) + (((int)threadIdx.y) * 896)) + (yy_inner_inner_inner * 224)) + (((int)blockIdx.x) * 16)) + ((int)threadIdx.x)) + 401410))] = compute_local[((((ff_inner_inner_inner * 4) + yy_inner_inner_inner) + 72))]; compute[((((((((((((int)blockIdx.z) * 802816) + (((int)threadIdx.z) * 100352)) + (ff_inner_inner_inner * 50176)) + (((int)blockIdx.y) * 12544)) + (((int)threadIdx.y) * 896)) + (yy_inner_inner_inner * 224)) + (((int)blockIdx.x) * 16)) + ((int)threadIdx.x)) + 4))] = compute_local[((((ff_inner_inner_inner * 4) + yy_inner_inner_inner) + 16))]; compute[((((((((((((int)blockIdx.z) * 802816) + (((int)threadIdx.z) * 100352)) + (ff_inner_inner_inner * 50176)) + (((int)blockIdx.y) * 12544)) + (((int)threadIdx.y) * 896)) + (yy_inner_inner_inner * 224)) + (((int)blockIdx.x) * 16)) + ((int)threadIdx.x)) + 401412))] = compute_local[((((ff_inner_inner_inner * 4) + yy_inner_inner_inner) + 80))]; compute[((((((((((((int)blockIdx.z) * 802816) + (((int)threadIdx.z) * 100352)) + (ff_inner_inner_inner * 50176)) + (((int)blockIdx.y) * 12544)) + (((int)threadIdx.y) * 896)) + (yy_inner_inner_inner * 224)) + (((int)blockIdx.x) * 16)) + ((int)threadIdx.x)) + 6))] = compute_local[((((ff_inner_inner_inner * 4) + yy_inner_inner_inner) + 24))]; compute[((((((((((((int)blockIdx.z) * 802816) + (((int)threadIdx.z) * 100352)) + (ff_inner_inner_inner * 50176)) + (((int)blockIdx.y) * 12544)) + (((int)threadIdx.y) * 896)) + (yy_inner_inner_inner * 224)) + (((int)blockIdx.x) * 16)) + ((int)threadIdx.x)) + 401414))] = compute_local[((((ff_inner_inner_inner * 4) + yy_inner_inner_inner) + 88))]; compute[((((((((((((int)blockIdx.z) * 802816) + (((int)threadIdx.z) * 100352)) + (ff_inner_inner_inner * 50176)) + (((int)blockIdx.y) * 12544)) + (((int)threadIdx.y) * 896)) + (yy_inner_inner_inner * 224)) + (((int)blockIdx.x) * 16)) + ((int)threadIdx.x)) + 8))] = compute_local[((((ff_inner_inner_inner * 4) + yy_inner_inner_inner) + 32))]; compute[((((((((((((int)blockIdx.z) * 802816) + (((int)threadIdx.z) * 100352)) + (ff_inner_inner_inner * 50176)) + (((int)blockIdx.y) * 12544)) + (((int)threadIdx.y) * 896)) + (yy_inner_inner_inner * 224)) + (((int)blockIdx.x) * 16)) + ((int)threadIdx.x)) + 401416))] = compute_local[((((ff_inner_inner_inner * 4) + yy_inner_inner_inner) + 96))]; compute[((((((((((((int)blockIdx.z) * 802816) + (((int)threadIdx.z) * 100352)) + (ff_inner_inner_inner * 50176)) + (((int)blockIdx.y) * 12544)) + (((int)threadIdx.y) * 896)) + (yy_inner_inner_inner * 224)) + (((int)blockIdx.x) * 16)) + ((int)threadIdx.x)) + 10))] = compute_local[((((ff_inner_inner_inner * 4) + yy_inner_inner_inner) + 40))]; compute[((((((((((((int)blockIdx.z) * 802816) + (((int)threadIdx.z) * 100352)) + (ff_inner_inner_inner * 50176)) + (((int)blockIdx.y) * 12544)) + (((int)threadIdx.y) * 896)) + (yy_inner_inner_inner * 224)) + (((int)blockIdx.x) * 16)) + ((int)threadIdx.x)) + 401418))] = compute_local[((((ff_inner_inner_inner * 4) + yy_inner_inner_inner) + 104))]; compute[((((((((((((int)blockIdx.z) * 802816) + (((int)threadIdx.z) * 100352)) + (ff_inner_inner_inner * 50176)) + (((int)blockIdx.y) * 12544)) + (((int)threadIdx.y) * 896)) + (yy_inner_inner_inner * 224)) + (((int)blockIdx.x) * 16)) + ((int)threadIdx.x)) + 12))] = compute_local[((((ff_inner_inner_inner * 4) + yy_inner_inner_inner) + 48))]; compute[((((((((((((int)blockIdx.z) * 802816) + (((int)threadIdx.z) * 100352)) + (ff_inner_inner_inner * 50176)) + (((int)blockIdx.y) * 12544)) + (((int)threadIdx.y) * 896)) + (yy_inner_inner_inner * 224)) + (((int)blockIdx.x) * 16)) + ((int)threadIdx.x)) + 401420))] = compute_local[((((ff_inner_inner_inner * 4) + yy_inner_inner_inner) + 112))]; compute[((((((((((((int)blockIdx.z) * 802816) + (((int)threadIdx.z) * 100352)) + (ff_inner_inner_inner * 50176)) + (((int)blockIdx.y) * 12544)) + (((int)threadIdx.y) * 896)) + (yy_inner_inner_inner * 224)) + (((int)blockIdx.x) * 16)) + ((int)threadIdx.x)) + 14))] = compute_local[((((ff_inner_inner_inner * 4) + yy_inner_inner_inner) + 56))]; compute[((((((((((((int)blockIdx.z) * 802816) + (((int)threadIdx.z) * 100352)) + (ff_inner_inner_inner * 50176)) + (((int)blockIdx.y) * 12544)) + (((int)threadIdx.y) * 896)) + (yy_inner_inner_inner * 224)) + (((int)blockIdx.x) * 16)) + ((int)threadIdx.x)) + 401422))] = compute_local[((((ff_inner_inner_inner * 4) + yy_inner_inner_inner) + 120))]; } } } class ConvGemm{ public: float *cpuKernel; float alpha = 1.0f; float beta = 0.0f; cudnnHandle_t convCudnn; void* d_workspace{nullptr}; size_t workspace_bytes{0}; cudnnTensorDescriptor_t convInputDescriptor; cudnnTensorDescriptor_t convOutputDescriptor; cudnnFilterDescriptor_t convKernelDescriptor; cudnnConvolutionDescriptor_t convDesc; float *output; float *kernel; void initialize(); float *forward(float *input); }; void ConvGemm::initialize(){ cudaMalloc(&kernel,sizeof(float)*C*N*9); cudaMalloc(&this->output,sizeof(float)*N*H*W); cudnnCreate(&convCudnn); cudnnCreateTensorDescriptor(&convInputDescriptor); cudnnSetTensor4dDescriptor(convInputDescriptor, /*format=*/CUDNN_TENSOR_NCHW, /*dataType=*/CUDNN_DATA_FLOAT, /*batch_size=*/1, /*channels=*/C, /*image_height=*/H, /*image_width=*/W); cudnnCreateFilterDescriptor(&convKernelDescriptor); cudnnSetFilter4dDescriptor(convKernelDescriptor, /*dataType=*/CUDNN_DATA_FLOAT, /*format=*/CUDNN_TENSOR_NCHW, /*out_channels=*/N, /*in_channels=*/C, /*kernel_height=*/R, /*kernel_width=*/S); cudnnCreateConvolutionDescriptor(&convDesc); cudnnSetConvolution2dDescriptor(convDesc, /*pad_height=*/1, /*pad_width=*/1, /*vertical_stride=*/1, /*horizontal_stride=*/1, /*dilation_height=*/1, /*dilation_width=*/1, /*mode=*/CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT); int batch_size{0}, channels{0}, height{0}, width{0}; cudnnGetConvolution2dForwardOutputDim(convDesc, convInputDescriptor, convKernelDescriptor, &batch_size, &channels, &height, &width); cudnnCreateTensorDescriptor(&convOutputDescriptor); cudnnSetTensor4dDescriptor(convOutputDescriptor, /*format=*/CUDNN_TENSOR_NCHW, /*dataType=*/CUDNN_DATA_FLOAT, /*batch_size=*/1, /*channels=*/N, /*image_height=*/H, /*image_width=*/W); cudnnGetConvolutionForwardWorkspaceSize(convCudnn, convInputDescriptor, convKernelDescriptor, convDesc, convOutputDescriptor, CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_GEMM, &workspace_bytes); cudaMalloc(&d_workspace, workspace_bytes); unsigned int kernelSize = R*S*C*N;//kernel this->cpuKernel = (float *)malloc(kernelSize*sizeof(float)); for(int i=0;i<kernelSize;++i){ this->cpuKernel[i] = 1.0f; } cudaMemcpy(kernel,cpuKernel,R*S*C*N*sizeof(float),cudaMemcpyHostToDevice); free(cpuKernel); } float * ConvGemm::forward(float *input) { cudaMemset(output, 0, 1*N*H*W*sizeof(float)); checkCUDNN(cudnnConvolutionForward(convCudnn, &alpha, convInputDescriptor, input, convKernelDescriptor, kernel, convDesc, CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_GEMM, d_workspace, workspace_bytes, &beta, convOutputDescriptor, output)); return output; } class ConvWinogradeNon{ public: float *cpuKernel; float alpha = 1.0f; float beta = 0.0f; cudnnHandle_t convCudnn; void* d_workspace{nullptr}; size_t workspace_bytes{0}; cudnnTensorDescriptor_t convInputDescriptor; cudnnTensorDescriptor_t convOutputDescriptor; cudnnFilterDescriptor_t convKernelDescriptor; cudnnConvolutionDescriptor_t convDesc; float *output; float *kernel; void initialize(); float *forward(float *input); }; void ConvWinogradeNon::initialize(){ cudaMalloc(&kernel,sizeof(float)*C*N*9); cudaMalloc(&this->output,sizeof(float)*N*H*W); cudnnCreate(&convCudnn); cudnnCreateTensorDescriptor(&convInputDescriptor); cudnnSetTensor4dDescriptor(convInputDescriptor, /*format=*/CUDNN_TENSOR_NCHW, /*dataType=*/CUDNN_DATA_FLOAT, /*batch_size=*/1, /*channels=*/C, /*image_height=*/H, /*image_width=*/W); cudnnCreateFilterDescriptor(&convKernelDescriptor); cudnnSetFilter4dDescriptor(convKernelDescriptor, /*dataType=*/CUDNN_DATA_FLOAT, /*format=*/CUDNN_TENSOR_NCHW, /*out_channels=*/N, /*in_channels=*/C, /*kernel_height=*/R, /*kernel_width=*/S); cudnnCreateConvolutionDescriptor(&convDesc); cudnnSetConvolution2dDescriptor(convDesc, /*pad_height=*/1, /*pad_width=*/1, /*vertical_stride=*/1, /*horizontal_stride=*/1, /*dilation_height=*/1, /*dilation_width=*/1, /*mode=*/CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT); int batch_size{0}, channels{0}, height{0}, width{0}; cudnnGetConvolution2dForwardOutputDim(convDesc, convInputDescriptor, convKernelDescriptor, &batch_size, &channels, &height, &width); cudnnCreateTensorDescriptor(&convOutputDescriptor); cudnnSetTensor4dDescriptor(convOutputDescriptor, /*format=*/CUDNN_TENSOR_NCHW, /*dataType=*/CUDNN_DATA_FLOAT, /*batch_size=*/1, /*channels=*/N, /*image_height=*/H, /*image_width=*/W); cudnnGetConvolutionForwardWorkspaceSize(convCudnn, convInputDescriptor, convKernelDescriptor, convDesc, convOutputDescriptor, CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD_NONFUSED, &workspace_bytes); cudaMalloc(&d_workspace, workspace_bytes); unsigned int kernelSize = R*S*C*N;//kernel this->cpuKernel = (float *)malloc(kernelSize*sizeof(float)); for(int i=0;i<kernelSize;++i){ this->cpuKernel[i] = 1.0f; } cudaMemcpy(kernel,cpuKernel,R*S*C*N*sizeof(float),cudaMemcpyHostToDevice); free(cpuKernel); } float * ConvWinogradeNon::forward(float *input) { cudaMemset(output, 0, 1*N*H*W*sizeof(float)); checkCUDNN(cudnnConvolutionForward(convCudnn, &alpha, convInputDescriptor, input, convKernelDescriptor, kernel, convDesc, CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD_NONFUSED, d_workspace, workspace_bytes, &beta, convOutputDescriptor, output)); return output; } class ConvFFT{ public: float *cpuKernel; float alpha = 1.0f; float beta = 0.0f; cudnnHandle_t convCudnn; void* d_workspace{nullptr}; size_t workspace_bytes{0}; cudnnTensorDescriptor_t convInputDescriptor; cudnnTensorDescriptor_t convOutputDescriptor; cudnnFilterDescriptor_t convKernelDescriptor; cudnnConvolutionDescriptor_t convDesc; float *output; float *kernel; void initialize(); float *forward(float *input); }; void ConvFFT::initialize(){ cudaMalloc(&kernel,sizeof(float)*C*N*9); cudaMalloc(&this->output,sizeof(float)*N*H*W); cudnnCreate(&convCudnn); cudnnCreateTensorDescriptor(&convInputDescriptor); cudnnSetTensor4dDescriptor(convInputDescriptor, /*format=*/CUDNN_TENSOR_NCHW, /*dataType=*/CUDNN_DATA_FLOAT, /*batch_size=*/1, /*channels=*/C, /*image_height=*/H, /*image_width=*/W); cudnnCreateFilterDescriptor(&convKernelDescriptor); cudnnSetFilter4dDescriptor(convKernelDescriptor, /*dataType=*/CUDNN_DATA_FLOAT, /*format=*/CUDNN_TENSOR_NCHW, /*out_channels=*/N, /*in_channels=*/C, /*kernel_height=*/R, /*kernel_width=*/S); cudnnCreateConvolutionDescriptor(&convDesc); cudnnSetConvolution2dDescriptor(convDesc, /*pad_height=*/1, /*pad_width=*/1, /*vertical_stride=*/1, /*horizontal_stride=*/1, /*dilation_height=*/1, /*dilation_width=*/1, /*mode=*/CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT); int batch_size{0}, channels{0}, height{0}, width{0}; cudnnGetConvolution2dForwardOutputDim(convDesc, convInputDescriptor, convKernelDescriptor, &batch_size, &channels, &height, &width); cudnnCreateTensorDescriptor(&convOutputDescriptor); cudnnSetTensor4dDescriptor(convOutputDescriptor, /*format=*/CUDNN_TENSOR_NCHW, /*dataType=*/CUDNN_DATA_FLOAT, /*batch_size=*/1, /*channels=*/N, /*image_height=*/H, /*image_width=*/W); cudnnGetConvolutionForwardWorkspaceSize(convCudnn, convInputDescriptor, convKernelDescriptor, convDesc, convOutputDescriptor, CUDNN_CONVOLUTION_FWD_ALGO_FFT, &workspace_bytes); cudaMalloc(&d_workspace, workspace_bytes); unsigned int kernelSize = R*S*C*N;//kernel this->cpuKernel = (float *)malloc(kernelSize*sizeof(float)); for(int i=0;i<kernelSize;++i){ this->cpuKernel[i] = 1.0f; } cudaMemcpy(kernel,cpuKernel,R*S*C*N*sizeof(float),cudaMemcpyHostToDevice); free(cpuKernel); } float * ConvFFT::forward(float *input) { cudaMemset(output, 0, 1*N*H*W*sizeof(float)); checkCUDNN(cudnnConvolutionForward(convCudnn, &alpha, convInputDescriptor, input, convKernelDescriptor, kernel, convDesc, CUDNN_CONVOLUTION_FWD_ALGO_FFT, d_workspace, workspace_bytes, &beta, convOutputDescriptor, output)); return output; } __device__ void load_input_2_shared_memory(float *input, float *shared_input, unsigned int h_start, unsigned int h_end, unsigned int h_offset, unsigned int c_start, unsigned int warp_id, unsigned int lane_id, unsigned int warp_size){ switch(h_offset){ case 0: for(unsigned int c = warp_id; c<TC; c+=TWS){ for(unsigned int i=lane_id; i<(h_end - h_start) * W; i+=warp_size){ unsigned int r = i/W; unsigned int s = i%W; shared_input[c*(TH + 2)*(WPAD) + r * WPAD + s + 1] = input[(c_start + c) * H * W + h_start * W + i]; } } break; case 1: for(unsigned int c = warp_id; c<TC; c+=TWS){ for(unsigned int i=lane_id; i<(h_end - h_start) * W; i+=warp_size){ unsigned int r = i/W; unsigned int s = i%W; shared_input[c*(TH + 2)*(WPAD) + (1 + r) * WPAD + s + 1] = input[(c_start + c) * H * W + h_start * W + i]; } } break; } } __device__ __forceinline__ void switch_write_back(unsigned int write_h, unsigned int write_w, unsigned int h_out_start, unsigned int w_out_start, unsigned int n, float * outputs, float * temp_result){ switch(write_h){ case 1: switch(write_w){ case 1: #pragma unroll for (unsigned int th = 0; th < 1; ++th) { #pragma unroll for (unsigned int tw = 0; tw < 1; ++tw) { atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]); } } break; case 2: #pragma unroll for (unsigned int th = 0; th < 1; ++th) { #pragma unroll for (unsigned int tw = 0; tw < 2; ++tw) { atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]); } } break; case 3: #pragma unroll for (unsigned int th = 0; th < 1; ++th) { #pragma unroll for (unsigned int tw = 0; tw < 3; ++tw) { atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]); } } break; case 4: #pragma unroll for (unsigned int th = 0; th < 1; ++th) { #pragma unroll for (unsigned int tw = 0; tw < 4; ++tw) { atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]); } } break; case 5: #pragma unroll for (unsigned int th = 0; th < 1; ++th) { #pragma unroll for (unsigned int tw = 0; tw < 5; ++tw) { atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]); } } break; case 6: #pragma unroll for (unsigned int th = 0; th < 1; ++th) { #pragma unroll for (unsigned int tw = 0; tw < 6; ++tw) { atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]); } } break; case 7: #pragma unroll for (unsigned int th = 0; th < 1; ++th) { #pragma unroll for (unsigned int tw = 0; tw < 7; ++tw) { atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]); } } break; } break; } } __global__ void conv2d(float * __restrict__ input,const float * __restrict__ kernel, float * __restrict__ outputs){ extern __shared__ float shared_input[]; const unsigned int tile_id = blockIdx.x; const unsigned int tc_id = tile_id / THS; const unsigned int th_id = tile_id % THS; const unsigned int tw_id = threadIdx.x / N; const int h_out_start = th_id * TH; const int w_out_start = tw_id * TW; const unsigned int warp_id = tw_id; const unsigned int lane_id = threadIdx.x % N; float data_array[9]; float temp_result[TH*TW] = {0.0f}; for(unsigned int i=threadIdx.x;i<TC*(TH+2)*WPAD;i+=blockDim.x){ shared_input[i] = 0.0f; } unsigned int n = lane_id; unsigned int c_offset = tc_id * TC; int h_offset = (h_out_start == 0)?1:0; int h_padded_start = h_out_start; int h_padded_end = min(h_padded_start + TH + 2, H + 2); int h_non_padded_start = max(h_out_start - 1, 0); int h_non_padded_end = min(H, h_padded_end - 1); __syncthreads(); load_input_2_shared_memory(input, shared_input, h_non_padded_start, h_non_padded_end, h_offset, c_offset, warp_id, lane_id, N); __syncthreads(); #pragma unroll for(unsigned int c=0;c<TC;c++){ #pragma unroll for(unsigned int r=0;r<R;++r){ #pragma unroll for(unsigned int s=0;s<S;++s){ data_array[r*S+s] = kernel[(c + c_offset)*N*9+r*3*N+s*N+n]; } } temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 0]*data_array[0]; temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 1]*data_array[0]; temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 1]*data_array[1]; temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 2]*data_array[0]; temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 2]*data_array[1]; temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 2]*data_array[2]; temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 3]*data_array[0]; temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 3]*data_array[1]; temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 3]*data_array[2]; temp_result[4] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 4]*data_array[0]; temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 4]*data_array[1]; temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 4]*data_array[2]; temp_result[5] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 5]*data_array[0]; temp_result[4] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 5]*data_array[1]; temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 5]*data_array[2]; temp_result[6] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 6]*data_array[0]; temp_result[5] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 6]*data_array[1]; temp_result[4] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 6]*data_array[2]; temp_result[6] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 7]*data_array[1]; temp_result[5] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 7]*data_array[2]; temp_result[6] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 8]*data_array[2]; temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 0]*data_array[3]; temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 1]*data_array[3]; temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 1]*data_array[4]; temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 2]*data_array[3]; temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 2]*data_array[4]; temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 2]*data_array[5]; temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 3]*data_array[3]; temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 3]*data_array[4]; temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 3]*data_array[5]; temp_result[4] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 4]*data_array[3]; temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 4]*data_array[4]; temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 4]*data_array[5]; temp_result[5] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 5]*data_array[3]; temp_result[4] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 5]*data_array[4]; temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 5]*data_array[5]; temp_result[6] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 6]*data_array[3]; temp_result[5] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 6]*data_array[4]; temp_result[4] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 6]*data_array[5]; temp_result[6] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 7]*data_array[4]; temp_result[5] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 7]*data_array[5]; temp_result[6] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 8]*data_array[5]; temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 0]*data_array[6]; temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 1]*data_array[6]; temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 1]*data_array[7]; temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 2]*data_array[6]; temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 2]*data_array[7]; temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 2]*data_array[8]; temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 3]*data_array[6]; temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 3]*data_array[7]; temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 3]*data_array[8]; temp_result[4] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 4]*data_array[6]; temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 4]*data_array[7]; temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 4]*data_array[8]; temp_result[5] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 5]*data_array[6]; temp_result[4] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 5]*data_array[7]; temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 5]*data_array[8]; temp_result[6] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 6]*data_array[6]; temp_result[5] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 6]*data_array[7]; temp_result[4] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 6]*data_array[8]; temp_result[6] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 7]*data_array[7]; temp_result[5] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 7]*data_array[8]; temp_result[6] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 8]*data_array[8]; } switch_write_back(min(TH, H - h_out_start), min(TW, W - w_out_start), h_out_start, w_out_start, n, outputs, temp_result); } float check_diff(float *x, float *y, unsigned int size){ float diff = 0.0f; #pragma omp parallel for reduction(+ : diff) for(unsigned int i=0;i<size;++i){ diff += abs(x[i] - y[i]); } return diff; } int main(void){ float *input = new float[C*H*W]; time_t t; float *matrix; cudaMalloc(&matrix,C*(TH+2)*(TW+2)*THS*TWS*sizeof(float)); cudaMemset(matrix,0,C*(TH+2)*(TW+2)*THS*TWS*sizeof(float)); srand((unsigned) time(&t)); for(int i =0;i<C*H*W;++i){ input[i] = rand() % 10; } float *device_input; cudaMalloc(&device_input,C*H*W*sizeof(float)); cudaMemcpy(device_input,input,C*H*W*sizeof(float),cudaMemcpyHostToDevice); float *K = new float[C*N*9]; for(int i=0;i<C*N*9;++i){ K[i] = 1.0f; } ConvGemm convGemm; convGemm.initialize(); ConvWinogradeNon convWinogradeNon; convWinogradeNon.initialize(); ConvFFT convFFT; convFFT.initialize(); float *out_cudnn; float *out_cudnn_host = new float[N*H*W]; cudaEvent_t event_start; cudaEvent_t event_stop; cudaEventCreate(&event_start); cudaEventCreate(&event_stop); out_cudnn = convGemm.forward(device_input); cudaMemcpy(out_cudnn_host,out_cudnn,N*H*W*sizeof(float),cudaMemcpyDeviceToHost); out_cudnn = convFFT.forward(device_input); out_cudnn = convWinogradeNon.forward(device_input); float *device_K; float *device_out; cudaMalloc(&device_out,H*W*N*sizeof(float)); cudaMemset(device_out,0,H*W*N*sizeof(float)); cudaMalloc(&device_K,C*N*9*sizeof(float)); cudaMemcpy(device_K,K,C*N*9*sizeof(float),cudaMemcpyHostToDevice); cudaEventRecord(event_start); convGemm.forward(device_input); cudaEventRecord(event_stop); cudaEventSynchronize(event_stop); float cudnnGemmTime; cudaEventElapsedTime(&cudnnGemmTime, event_start, event_stop); cudaEventRecord(event_start); convWinogradeNon.forward(device_input); cudaEventRecord(event_stop); cudaEventSynchronize(event_stop); float cudnnWinogradeTimeNon; cudaEventElapsedTime(&cudnnWinogradeTimeNon, event_start, event_stop); cudaEventRecord(event_start); convFFT.forward(device_input); cudaEventRecord(event_stop); cudaEventSynchronize(event_stop); float cudnnFFTTime; cudaEventElapsedTime(&cudnnFFTTime, event_start, event_stop); dim3 grid(14,4,2); dim3 block(2,14,4); cudaEventRecord(event_start); default_function_kernel0<<<grid, block>>>(device_input, device_K, device_out); cudaEventRecord(event_stop); cudaEventSynchronize(event_stop); float time_tvm; cudaEventElapsedTime(&time_tvm, event_start, event_stop); float *out_tvm = new float[N*H*W]; cudaMemcpy(out_tvm,device_out,N*H*W*sizeof(float),cudaMemcpyDeviceToHost); cudaMemset(device_out, 0, sizeof(float)*N*H*W); chkerr(cudaFuncSetAttribute(conv2d,cudaFuncAttributeMaxDynamicSharedMemorySize, TC*(TH+2)*(WPAD)*4)); cudaEventRecord(event_start); conv2d<<<TCS*THS, N * TWS, TC*(TH+2)*(WPAD)*4>>>(device_input, device_K, device_out); cudaEventRecord(event_stop); cudaEventSynchronize(event_stop); float time_tdc; cudaEventElapsedTime(&time_tdc, event_start, event_stop); float *out_tdc = new float[N*H*W]; cudaMemcpy(out_tdc,device_out,N*H*W*sizeof(float),cudaMemcpyDeviceToHost); ofstream outfile; char buffer[1000]; int ret = sprintf(buffer,"%d,%d,%d,%d,%f,%f,%f,%f,%f,%f,%f,%f,%f\n",N,C,H,W, cudnnFFTTime,cudnnWinogradeTimeNon,cudnnGemmTime,time_tvm,time_tdc, cudnnFFTTime/time_tdc,cudnnWinogradeTimeNon/time_tdc,cudnnGemmTime/time_tdc,time_tvm/time_tdc); outfile.open("../../evaluation_outcome/2080Ti-layers-eval-modeling.csv", std::ios_base::app); outfile << buffer; float difference = check_diff(out_cudnn_host, out_tdc, N*H*W); cout<<N<<","<<C<<","<<H<<","<<W<<","<<cudnnFFTTime<<","<<cudnnWinogradeTimeNon<<","<<cudnnGemmTime<<","<< time_tvm<<","<<time_tdc<<","<<cudnnFFTTime/time_tdc<<","<<cudnnWinogradeTimeNon/time_tdc<<","<< cudnnGemmTime/time_tdc<<","<<time_tvm/time_tdc<<endl; return 0; }
e83aafd596a95d06186283a9ed5cc22b41cb5ccf.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifdef USE_ROCM #include "dragon/core/context_cuda.h" #include "dragon/core/workspace.h" #include "dragon/utils/device/common_cub.h" #include "dragon/utils/math_functions.h" #include "dragon/utils/op_kernels.h" namespace dragon { namespace kernel { namespace { template <typename IndexType, typename CoordType, int D> __global__ void _UnravelIndex( const int nthreads, const int num_dims, const SimpleArray<int, D> dims, const IndexType* index, CoordType* coord) { CUDA_1D_KERNEL_LOOP(i, nthreads) { IndexType tmp = index[i]; CoordType* offset_coord = coord + i * num_dims; for (int d = num_dims - 1; d >= 0; --d) { FIXED_DIVISOR_DIV_MOD(dims.data[d], tmp, &tmp, (offset_coord + d)); } } } } // namespace /* ------------------- Launcher Separator ------------------- */ #define DEFINE_KERNEL_LAUNCHER(IndexType) \ template <> \ void Flagged<IndexType, CUDAContext>( \ const int count, \ const uint8_t* mask, \ IndexType* index, \ int* num_selected, \ CUDAContext* ctx) { \ IndexType num_selected_host; \ auto* num_selected_dev = index + count; \ size_t ws_nbytes = 0; \ hipcub::CountingInputIterator<int> itr(0); \ hipcub::DeviceSelect::Flagged( \ nullptr, \ ws_nbytes, \ itr, \ mask, \ index, \ static_cast<int64_t*>(nullptr), \ count, \ ctx->cuda_stream()); \ hipcub::DeviceSelect::Flagged( \ ctx->workspace()->template data<CUDAContext>( \ {ws_nbytes}, "data:1")[0], \ ws_nbytes, \ itr, \ mask, \ index, \ num_selected_dev, \ count, \ ctx->cuda_stream()); \ CUDA_CHECK(hipMemcpyAsync( \ &num_selected_host, \ num_selected_dev, \ sizeof(IndexType), \ hipMemcpyDefault, \ ctx->cuda_stream())); \ ctx->FinishDeviceComputation(); \ num_selected[0] = num_selected_host; \ } DEFINE_KERNEL_LAUNCHER(int); DEFINE_KERNEL_LAUNCHER(int64_t); #undef DEFINE_KERNEL_LAUNCHER #define DEFINE_KERNEL_LAUNCHER(IndexType, CoordType) \ template <> \ void UnravelIndex<IndexType, CoordType, CUDAContext>( \ const int count, \ const int num_dims, \ const int64_t* dims, \ const IndexType* index, \ CoordType* coord, \ CUDAContext* ctx) { \ CUDA_TENSOR_DIMS_CHECK(num_dims); \ SimpleArray<int, CUDA_TENSOR_MAX_DIMS> X_dims; \ for (int i = 0; i < num_dims; ++i) \ X_dims.data[i] = dims[i]; \ hipLaunchKernelGGL(( _UnravelIndex), \ CUDA_BLOCKS(count), \ CUDA_THREADS, \ 0, \ ctx->cuda_stream(), count, num_dims, X_dims, index, coord); \ } DEFINE_KERNEL_LAUNCHER(int, int); DEFINE_KERNEL_LAUNCHER(int, int64_t); DEFINE_KERNEL_LAUNCHER(int64_t, int); DEFINE_KERNEL_LAUNCHER(int64_t, int64_t); #undef DEFINE_KERNEL_LAUNCHER } // namespace kernel } // namespace dragon #endif // USE_ROCM
e83aafd596a95d06186283a9ed5cc22b41cb5ccf.cu
#ifdef USE_CUDA #include "dragon/core/context_cuda.h" #include "dragon/core/workspace.h" #include "dragon/utils/device/common_cub.h" #include "dragon/utils/math_functions.h" #include "dragon/utils/op_kernels.h" namespace dragon { namespace kernel { namespace { template <typename IndexType, typename CoordType, int D> __global__ void _UnravelIndex( const int nthreads, const int num_dims, const SimpleArray<int, D> dims, const IndexType* index, CoordType* coord) { CUDA_1D_KERNEL_LOOP(i, nthreads) { IndexType tmp = index[i]; CoordType* offset_coord = coord + i * num_dims; for (int d = num_dims - 1; d >= 0; --d) { FIXED_DIVISOR_DIV_MOD(dims.data[d], tmp, &tmp, (offset_coord + d)); } } } } // namespace /* ------------------- Launcher Separator ------------------- */ #define DEFINE_KERNEL_LAUNCHER(IndexType) \ template <> \ void Flagged<IndexType, CUDAContext>( \ const int count, \ const uint8_t* mask, \ IndexType* index, \ int* num_selected, \ CUDAContext* ctx) { \ IndexType num_selected_host; \ auto* num_selected_dev = index + count; \ size_t ws_nbytes = 0; \ cub::CountingInputIterator<int> itr(0); \ cub::DeviceSelect::Flagged( \ nullptr, \ ws_nbytes, \ itr, \ mask, \ index, \ static_cast<int64_t*>(nullptr), \ count, \ ctx->cuda_stream()); \ cub::DeviceSelect::Flagged( \ ctx->workspace()->template data<CUDAContext>( \ {ws_nbytes}, "data:1")[0], \ ws_nbytes, \ itr, \ mask, \ index, \ num_selected_dev, \ count, \ ctx->cuda_stream()); \ CUDA_CHECK(cudaMemcpyAsync( \ &num_selected_host, \ num_selected_dev, \ sizeof(IndexType), \ cudaMemcpyDefault, \ ctx->cuda_stream())); \ ctx->FinishDeviceComputation(); \ num_selected[0] = num_selected_host; \ } DEFINE_KERNEL_LAUNCHER(int); DEFINE_KERNEL_LAUNCHER(int64_t); #undef DEFINE_KERNEL_LAUNCHER #define DEFINE_KERNEL_LAUNCHER(IndexType, CoordType) \ template <> \ void UnravelIndex<IndexType, CoordType, CUDAContext>( \ const int count, \ const int num_dims, \ const int64_t* dims, \ const IndexType* index, \ CoordType* coord, \ CUDAContext* ctx) { \ CUDA_TENSOR_DIMS_CHECK(num_dims); \ SimpleArray<int, CUDA_TENSOR_MAX_DIMS> X_dims; \ for (int i = 0; i < num_dims; ++i) \ X_dims.data[i] = dims[i]; \ _UnravelIndex<<< \ CUDA_BLOCKS(count), \ CUDA_THREADS, \ 0, \ ctx->cuda_stream()>>>(count, num_dims, X_dims, index, coord); \ } DEFINE_KERNEL_LAUNCHER(int, int); DEFINE_KERNEL_LAUNCHER(int, int64_t); DEFINE_KERNEL_LAUNCHER(int64_t, int); DEFINE_KERNEL_LAUNCHER(int64_t, int64_t); #undef DEFINE_KERNEL_LAUNCHER } // namespace kernel } // namespace dragon #endif // USE_CUDA
eb70e07db26e0af198333753039406fb44f9403c.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "initCurand.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; hiprandState_t *state = NULL; hipMalloc(&state, XSIZE*YSIZE); unsigned long seed = 1; int n_rows = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( initCurand), dim3(gridBlock),dim3(threadBlock), 0, 0, state,seed,n_rows); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( initCurand), dim3(gridBlock),dim3(threadBlock), 0, 0, state,seed,n_rows); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( initCurand), dim3(gridBlock),dim3(threadBlock), 0, 0, state,seed,n_rows); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
eb70e07db26e0af198333753039406fb44f9403c.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "initCurand.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; curandState *state = NULL; cudaMalloc(&state, XSIZE*YSIZE); unsigned long seed = 1; int n_rows = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); initCurand<<<gridBlock,threadBlock>>>(state,seed,n_rows); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { initCurand<<<gridBlock,threadBlock>>>(state,seed,n_rows); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { initCurand<<<gridBlock,threadBlock>>>(state,seed,n_rows); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
794c3495380a8c77c33fabb07334f049bf85522e.hip
// !!! This is a file automatically generated by hipify!!! /** * Copyright 1993-2015 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ /** */ #include <stdio.h> // For the CUDA runtime routines (prefixed with "cuda_") #include <hip/hip_runtime.h> __global__ void averagePoolNCHW(const float *A, float *C, int batchSize, int channels, int width, int height, int stride, int kernelSize) { int tid = blockDim.x * blockIdx.x + threadIdx.x;// Get global thread ID int tpr = (width+stride-1)/stride;// Threads to work in a row int tpc = (height+stride-1)/stride;// Threads to work in a col int tgrpSize = tpr * tpc;// Number of threads to work on a image int tgrpNum = tid/tgrpSize;// Image number int tgrpLocaltid = (tid % tgrpSize);// local id in working image int startRow = tgrpNum * height;// starting row value of Image int localRow = (tgrpLocaltid/tpr)*stride;// local row number in the working image int col = (tid%tpr) * stride; //check row boundaries if(startRow + localRow > (channels * batchSize * height -1)) return; float outTemp = 0.0f; //Access elements from pooling window to compute Average pooling for(int i = 0 ; i < kernelSize && localRow + i < height ; i++) for( int j =0; j < kernelSize && col+ j < width; j++) { outTemp = outTemp + A[(startRow + localRow + i)*width+ col+ j]; } C[tid] = outTemp/(kernelSize * kernelSize);// Store output } __global__ void averagePoolShared(const float *A, float *C, int batchSize, int channels, int width, int height, int stride, int kernelSize) { extern __shared__ float sArr[];// Shared memory to store image data int size = height * width;// Image size int tpr = (width+stride-1)/stride;// Threads to work in a row int tpc = (height+stride-1)/stride;// Threads to work in a col int startOutIndex = blockIdx.x * tpr * tpc;// Starting output index of image //load image data into shared memory for(int i = threadIdx.x; i < size ; i += blockDim.x) { sArr[i] = A[blockIdx.x * size + i]; } __syncthreads(); if(threadIdx.x > tpr*tpc) return; // loop over image output indices for(int k = threadIdx.x ; k < tpr * tpc ; k += blockDim.x) { float outTemp = 0.0f; int row = (k/tpr) * stride; int col = (k%tpr) * stride; // Compute average pooling for(int i = 0 ; i < kernelSize && row + i < height ; i++) for( int j =0; j < kernelSize && col+ j < width; j++) { outTemp = outTemp+ sArr[(row + i)*width+ col+ j]; } C[startOutIndex+k] = outTemp/(kernelSize * kernelSize); // Store output } } int avgPool(int N, const float* inputs, float* outputs, int C, int H, int W, int kernelSize, int begPad, int endPad, int stride, hipStream_t stream) { //size_t sharedMemorySize = sizeof(float)*((H * W) + (((W + stride -1)/stride)*((H + stride - 1)/stride))); size_t sharedMemorySize = sizeof(float)*((H * W)); float* inArr = (float*)malloc(sizeof(float)*H*W); float* outArr = (float*)malloc(sizeof(float)*((W+stride-1)/stride)*((H+stride-1)/stride)); hipMemcpy(inArr,inputs,sizeof(float)*H*W,hipMemcpyDeviceToHost); if (stride < kernelSize) { unsigned int blocksPerGrid = N * C; unsigned int threadsPerBlock = (W * H < 1024)?(W * H):1024; hipLaunchKernelGGL(( averagePoolShared), dim3(blocksPerGrid), dim3(threadsPerBlock), sharedMemorySize,stream, inputs, outputs, N, C, W, H, stride, kernelSize); } else { unsigned int threadsPerBlock = 1024; unsigned int blocksPerGrid =((N*C*H*W) + threadsPerBlock - 1) / threadsPerBlock; hipLaunchKernelGGL(( averagePoolNCHW), dim3(blocksPerGrid), dim3(threadsPerBlock),0,stream, inputs, outputs, N, C, W, H, 1, kernelSize); } hipError_t err = hipGetLastError(); if ( hipSuccess != err ) { fprintf( stderr, "cudaCheckError() failed at %s:%i : %s\n", __FILE__, __LINE__, hipGetErrorString( err ) ); } return 0; }
794c3495380a8c77c33fabb07334f049bf85522e.cu
/** * Copyright 1993-2015 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ /** */ #include <stdio.h> // For the CUDA runtime routines (prefixed with "cuda_") #include <cuda_runtime.h> __global__ void averagePoolNCHW(const float *A, float *C, int batchSize, int channels, int width, int height, int stride, int kernelSize) { int tid = blockDim.x * blockIdx.x + threadIdx.x;// Get global thread ID int tpr = (width+stride-1)/stride;// Threads to work in a row int tpc = (height+stride-1)/stride;// Threads to work in a col int tgrpSize = tpr * tpc;// Number of threads to work on a image int tgrpNum = tid/tgrpSize;// Image number int tgrpLocaltid = (tid % tgrpSize);// local id in working image int startRow = tgrpNum * height;// starting row value of Image int localRow = (tgrpLocaltid/tpr)*stride;// local row number in the working image int col = (tid%tpr) * stride; //check row boundaries if(startRow + localRow > (channels * batchSize * height -1)) return; float outTemp = 0.0f; //Access elements from pooling window to compute Average pooling for(int i = 0 ; i < kernelSize && localRow + i < height ; i++) for( int j =0; j < kernelSize && col+ j < width; j++) { outTemp = outTemp + A[(startRow + localRow + i)*width+ col+ j]; } C[tid] = outTemp/(kernelSize * kernelSize);// Store output } __global__ void averagePoolShared(const float *A, float *C, int batchSize, int channels, int width, int height, int stride, int kernelSize) { extern __shared__ float sArr[];// Shared memory to store image data int size = height * width;// Image size int tpr = (width+stride-1)/stride;// Threads to work in a row int tpc = (height+stride-1)/stride;// Threads to work in a col int startOutIndex = blockIdx.x * tpr * tpc;// Starting output index of image //load image data into shared memory for(int i = threadIdx.x; i < size ; i += blockDim.x) { sArr[i] = A[blockIdx.x * size + i]; } __syncthreads(); if(threadIdx.x > tpr*tpc) return; // loop over image output indices for(int k = threadIdx.x ; k < tpr * tpc ; k += blockDim.x) { float outTemp = 0.0f; int row = (k/tpr) * stride; int col = (k%tpr) * stride; // Compute average pooling for(int i = 0 ; i < kernelSize && row + i < height ; i++) for( int j =0; j < kernelSize && col+ j < width; j++) { outTemp = outTemp+ sArr[(row + i)*width+ col+ j]; } C[startOutIndex+k] = outTemp/(kernelSize * kernelSize); // Store output } } int avgPool(int N, const float* inputs, float* outputs, int C, int H, int W, int kernelSize, int begPad, int endPad, int stride, cudaStream_t stream) { //size_t sharedMemorySize = sizeof(float)*((H * W) + (((W + stride -1)/stride)*((H + stride - 1)/stride))); size_t sharedMemorySize = sizeof(float)*((H * W)); float* inArr = (float*)malloc(sizeof(float)*H*W); float* outArr = (float*)malloc(sizeof(float)*((W+stride-1)/stride)*((H+stride-1)/stride)); cudaMemcpy(inArr,inputs,sizeof(float)*H*W,cudaMemcpyDeviceToHost); if (stride < kernelSize) { unsigned int blocksPerGrid = N * C; unsigned int threadsPerBlock = (W * H < 1024)?(W * H):1024; averagePoolShared<<<blocksPerGrid, threadsPerBlock, sharedMemorySize,stream>>>(inputs, outputs, N, C, W, H, stride, kernelSize); } else { unsigned int threadsPerBlock = 1024; unsigned int blocksPerGrid =((N*C*H*W) + threadsPerBlock - 1) / threadsPerBlock; averagePoolNCHW<<<blocksPerGrid, threadsPerBlock,0,stream>>>(inputs, outputs, N, C, W, H, 1, kernelSize); } cudaError_t err = cudaGetLastError(); if ( cudaSuccess != err ) { fprintf( stderr, "cudaCheckError() failed at %s:%i : %s\n", __FILE__, __LINE__, cudaGetErrorString( err ) ); } return 0; }
67b535aa8a1ffc76cd52474ca650cf70ac50a624.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 2.1.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date August 2016 @generated from magmablas/ztranspose_conj.cu, normal z -> c, Tue Aug 30 09:38:34 2016 @author Stan Tomov @author Mark Gates */ #include "magma_internal.h" #define PRECISION_c #if defined(PRECISION_z) #define NX 16 #else #define NX 32 #endif #define NB 32 #define NY 8 // nearly same code in ctranspose.cu // tile M-by-N matrix with ceil(M/NB) by ceil(N/NB) tiles sized NB-by-NB. // uses NX-by-NY threads, where NB/NX, NB/NY, NX/NY evenly. // subtile each NB-by-NB tile with (NB/NX) subtiles sized NX-by-NB // for each subtile // load NX-by-NB subtile transposed from A into sA, as (NB/NY) blocks sized NX-by-NY // save NB-by-NX subtile from sA into AT, as (NB/NX)*(NX/NY) blocks sized NX-by-NY // A += NX // AT += NX*ldat // // e.g., with NB=32, NX=32, NY=8 ([sdc] precisions) // load 32x32 subtile as 4 blocks of 32x8 columns: (A11 A12 A13 A14 ) // save 32x32 subtile as 1*4 blocks of 32x8 columns: (AT11 AT12 AT13 AT14) // // e.g., with NB=32, NX=16, NY=8 (z precision) // load 16x32 subtile as 4 blocks of 16x8 columns: (A11 A12 A13 A14) // save 32x16 subtile as 2*2 blocks of 16x8 columns: (AT11 AT12) // (AT21 AT22) static __device__ void ctranspose_conj_device( int m, int n, const magmaFloatComplex *A, int lda, magmaFloatComplex *AT, int ldat) { __shared__ magmaFloatComplex sA[NB][NX+1]; int tx = threadIdx.x; int ty = threadIdx.y; int ibx = blockIdx.x*NB; int iby = blockIdx.y*NB; int i, j; A += ibx + tx + (iby + ty)*lda; AT += iby + tx + (ibx + ty)*ldat; #pragma unroll for( int tile=0; tile < NB/NX; ++tile ) { // load NX-by-NB subtile transposed from A into sA i = ibx + tx + tile*NX; j = iby + ty; if (i < m) { #pragma unroll for( int j2=0; j2 < NB; j2 += NY ) { if (j + j2 < n) { sA[ty + j2][tx] = MAGMA_C_CONJ( A[j2*lda] ); } } } __syncthreads(); // save NB-by-NX subtile from sA into AT i = iby + tx; j = ibx + ty + tile*NX; #pragma unroll for( int i2=0; i2 < NB; i2 += NX ) { if (i + i2 < n) { #pragma unroll for( int j2=0; j2 < NX; j2 += NY ) { if (j + j2 < m) { AT[i2 + j2*ldat] = sA[tx + i2][ty + j2]; } } } } __syncthreads(); // move to next subtile A += NX; AT += NX*ldat; } } /* kernel wrapper to call the device function. */ __global__ void ctranspose_conj_kernel( int m, int n, const magmaFloatComplex *A, int lda, magmaFloatComplex *AT, int ldat) { ctranspose_conj_device(m, n, A, lda, AT, ldat); } __global__ void ctranspose_conj_kernel_batched( int m, int n, magmaFloatComplex **dA_array, int lda, magmaFloatComplex **dAT_array, int ldat) { int batchid = blockIdx.z; ctranspose_conj_device(m, n, dA_array[batchid], lda, dAT_array[batchid], ldat); } /***************************************************************************//** Purpose ------- ctranspose_conj_q copies and conjugate-transposes a matrix dA to matrix dAT. Same as ctranspose_conj, but adds queue argument. Arguments --------- @param[in] m INTEGER The number of rows of the matrix dA. M >= 0. @param[in] n INTEGER The number of columns of the matrix dA. N >= 0. @param[in] dA COMPLEX array, dimension (LDDA,N) The M-by-N matrix dA. @param[in] ldda INTEGER The leading dimension of the array dA. LDDA >= M. @param[in] dAT COMPLEX array, dimension (LDDAT,M) The N-by-M matrix dAT. @param[in] lddat INTEGER The leading dimension of the array dAT. LDDAT >= N. @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_transpose *******************************************************************************/ extern "C" void magmablas_ctranspose_conj_q( magma_int_t m, magma_int_t n, magmaFloatComplex_const_ptr dA, magma_int_t ldda, magmaFloatComplex_ptr dAT, magma_int_t lddat, magma_queue_t queue ) { magma_int_t info = 0; if ( m < 0 ) info = -1; else if ( n < 0 ) info = -2; else if ( ldda < m ) info = -4; else if ( lddat < n ) info = -6; if ( info != 0 ) { magma_xerbla( __func__, -(info) ); return; //info; } /* Quick return */ if ( (m == 0) || (n == 0) ) return; dim3 threads( NX, NY ); dim3 grid( magma_ceildiv( m, NB ), magma_ceildiv( n, NB ) ); hipLaunchKernelGGL(( ctranspose_conj_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, n, dA, ldda, dAT, lddat ); } /***************************************************************************//** Purpose ------- ctranspose_conj_batched_q copies and conjugate-transposes a matrix dA_array[i] to matrix dAT_array[i]. Same as ctranspose_conj_batched, but adds queue argument. Arguments --------- @param[in] m INTEGER The number of rows of the matrix dA. M >= 0. @param[in] n INTEGER The number of columns of the matrix dA. N >= 0. @param[in] dA_array COMPLEX* array, dimension (batchCount) array of pointers to the matrices dA, where each dA is of dimension (LDDA,N) The M-by-N matrix dA. @param[in] ldda INTEGER The leading dimension of the array dA. LDDA >= M. @param[in] dAT_array COMPLEX* array, dimension (batchCount) array of pointers to the matrices dAT, where each dAT is of dimension (LDDAT,M) The N-by-M matrix dAT. @param[in] lddat INTEGER The leading dimension of the array dAT. LDDAT >= N. @param[in] queue magma_queue_t Queue to execute in. @param[in] batchCount Number of matrices in dA_array and dAT_array @ingroup magma_transpose_batched *******************************************************************************/ extern "C" void magmablas_ctranspose_conj_batched( magma_int_t m, magma_int_t n, magmaFloatComplex **dA_array, magma_int_t ldda, magmaFloatComplex **dAT_array, magma_int_t lddat, magma_int_t batchCount, magma_queue_t queue ) { magma_int_t info = 0; if ( m < 0 ) info = -1; else if ( n < 0 ) info = -2; else if ( ldda < m ) info = -4; else if ( lddat < n ) info = -6; if ( info != 0 ) { magma_xerbla( __func__, -(info) ); return; //info; } /* Quick return */ if ( (m == 0) || (n == 0) ) return; dim3 threads( NX, NY ); dim3 grid( magma_ceildiv( m, NB ), magma_ceildiv( n, NB ), batchCount ); hipLaunchKernelGGL(( ctranspose_conj_kernel_batched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, n, dA_array, ldda, dAT_array, lddat ); }
67b535aa8a1ffc76cd52474ca650cf70ac50a624.cu
/* -- MAGMA (version 2.1.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date August 2016 @generated from magmablas/ztranspose_conj.cu, normal z -> c, Tue Aug 30 09:38:34 2016 @author Stan Tomov @author Mark Gates */ #include "magma_internal.h" #define PRECISION_c #if defined(PRECISION_z) #define NX 16 #else #define NX 32 #endif #define NB 32 #define NY 8 // nearly same code in ctranspose.cu // tile M-by-N matrix with ceil(M/NB) by ceil(N/NB) tiles sized NB-by-NB. // uses NX-by-NY threads, where NB/NX, NB/NY, NX/NY evenly. // subtile each NB-by-NB tile with (NB/NX) subtiles sized NX-by-NB // for each subtile // load NX-by-NB subtile transposed from A into sA, as (NB/NY) blocks sized NX-by-NY // save NB-by-NX subtile from sA into AT, as (NB/NX)*(NX/NY) blocks sized NX-by-NY // A += NX // AT += NX*ldat // // e.g., with NB=32, NX=32, NY=8 ([sdc] precisions) // load 32x32 subtile as 4 blocks of 32x8 columns: (A11 A12 A13 A14 ) // save 32x32 subtile as 1*4 blocks of 32x8 columns: (AT11 AT12 AT13 AT14) // // e.g., with NB=32, NX=16, NY=8 (z precision) // load 16x32 subtile as 4 blocks of 16x8 columns: (A11 A12 A13 A14) // save 32x16 subtile as 2*2 blocks of 16x8 columns: (AT11 AT12) // (AT21 AT22) static __device__ void ctranspose_conj_device( int m, int n, const magmaFloatComplex *A, int lda, magmaFloatComplex *AT, int ldat) { __shared__ magmaFloatComplex sA[NB][NX+1]; int tx = threadIdx.x; int ty = threadIdx.y; int ibx = blockIdx.x*NB; int iby = blockIdx.y*NB; int i, j; A += ibx + tx + (iby + ty)*lda; AT += iby + tx + (ibx + ty)*ldat; #pragma unroll for( int tile=0; tile < NB/NX; ++tile ) { // load NX-by-NB subtile transposed from A into sA i = ibx + tx + tile*NX; j = iby + ty; if (i < m) { #pragma unroll for( int j2=0; j2 < NB; j2 += NY ) { if (j + j2 < n) { sA[ty + j2][tx] = MAGMA_C_CONJ( A[j2*lda] ); } } } __syncthreads(); // save NB-by-NX subtile from sA into AT i = iby + tx; j = ibx + ty + tile*NX; #pragma unroll for( int i2=0; i2 < NB; i2 += NX ) { if (i + i2 < n) { #pragma unroll for( int j2=0; j2 < NX; j2 += NY ) { if (j + j2 < m) { AT[i2 + j2*ldat] = sA[tx + i2][ty + j2]; } } } } __syncthreads(); // move to next subtile A += NX; AT += NX*ldat; } } /* kernel wrapper to call the device function. */ __global__ void ctranspose_conj_kernel( int m, int n, const magmaFloatComplex *A, int lda, magmaFloatComplex *AT, int ldat) { ctranspose_conj_device(m, n, A, lda, AT, ldat); } __global__ void ctranspose_conj_kernel_batched( int m, int n, magmaFloatComplex **dA_array, int lda, magmaFloatComplex **dAT_array, int ldat) { int batchid = blockIdx.z; ctranspose_conj_device(m, n, dA_array[batchid], lda, dAT_array[batchid], ldat); } /***************************************************************************//** Purpose ------- ctranspose_conj_q copies and conjugate-transposes a matrix dA to matrix dAT. Same as ctranspose_conj, but adds queue argument. Arguments --------- @param[in] m INTEGER The number of rows of the matrix dA. M >= 0. @param[in] n INTEGER The number of columns of the matrix dA. N >= 0. @param[in] dA COMPLEX array, dimension (LDDA,N) The M-by-N matrix dA. @param[in] ldda INTEGER The leading dimension of the array dA. LDDA >= M. @param[in] dAT COMPLEX array, dimension (LDDAT,M) The N-by-M matrix dAT. @param[in] lddat INTEGER The leading dimension of the array dAT. LDDAT >= N. @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_transpose *******************************************************************************/ extern "C" void magmablas_ctranspose_conj_q( magma_int_t m, magma_int_t n, magmaFloatComplex_const_ptr dA, magma_int_t ldda, magmaFloatComplex_ptr dAT, magma_int_t lddat, magma_queue_t queue ) { magma_int_t info = 0; if ( m < 0 ) info = -1; else if ( n < 0 ) info = -2; else if ( ldda < m ) info = -4; else if ( lddat < n ) info = -6; if ( info != 0 ) { magma_xerbla( __func__, -(info) ); return; //info; } /* Quick return */ if ( (m == 0) || (n == 0) ) return; dim3 threads( NX, NY ); dim3 grid( magma_ceildiv( m, NB ), magma_ceildiv( n, NB ) ); ctranspose_conj_kernel<<< grid, threads, 0, queue->cuda_stream() >>> ( m, n, dA, ldda, dAT, lddat ); } /***************************************************************************//** Purpose ------- ctranspose_conj_batched_q copies and conjugate-transposes a matrix dA_array[i] to matrix dAT_array[i]. Same as ctranspose_conj_batched, but adds queue argument. Arguments --------- @param[in] m INTEGER The number of rows of the matrix dA. M >= 0. @param[in] n INTEGER The number of columns of the matrix dA. N >= 0. @param[in] dA_array COMPLEX* array, dimension (batchCount) array of pointers to the matrices dA, where each dA is of dimension (LDDA,N) The M-by-N matrix dA. @param[in] ldda INTEGER The leading dimension of the array dA. LDDA >= M. @param[in] dAT_array COMPLEX* array, dimension (batchCount) array of pointers to the matrices dAT, where each dAT is of dimension (LDDAT,M) The N-by-M matrix dAT. @param[in] lddat INTEGER The leading dimension of the array dAT. LDDAT >= N. @param[in] queue magma_queue_t Queue to execute in. @param[in] batchCount Number of matrices in dA_array and dAT_array @ingroup magma_transpose_batched *******************************************************************************/ extern "C" void magmablas_ctranspose_conj_batched( magma_int_t m, magma_int_t n, magmaFloatComplex **dA_array, magma_int_t ldda, magmaFloatComplex **dAT_array, magma_int_t lddat, magma_int_t batchCount, magma_queue_t queue ) { magma_int_t info = 0; if ( m < 0 ) info = -1; else if ( n < 0 ) info = -2; else if ( ldda < m ) info = -4; else if ( lddat < n ) info = -6; if ( info != 0 ) { magma_xerbla( __func__, -(info) ); return; //info; } /* Quick return */ if ( (m == 0) || (n == 0) ) return; dim3 threads( NX, NY ); dim3 grid( magma_ceildiv( m, NB ), magma_ceildiv( n, NB ), batchCount ); ctranspose_conj_kernel_batched<<< grid, threads, 0, queue->cuda_stream() >>> ( m, n, dA_array, ldda, dAT_array, lddat ); }
24b94f01b0f3782e10979c09c6b988f198e582b7.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /***************************************************************************** * * File: pape_kernels.c * Author: Alex Stivala * Created: March 2011 * * $Id: pape_kernels.cu 668 2011-09-08 04:40:08Z astivala $ * * CUDa implementation ofsingle-source shortest paths using the * d'Esopo-Pape algorithm [Pape 1974 "Implementation and Efficiency of * Moore-Algorithms for the shortest route problem" * Math. Prorgam. 7:212-222]. This algorithm is well suited to * shorest paths in road networks (see Klunder & Post 2006 "The * Shortest Path Problem on Large-Scale Real-Road Networks" Networks * 48(4):182-194). * * The d'Esopo-Pape is termed a "label correcting" rather than "label setting" * (Dijkstra type) algorithm. A node is always removed from head of queue, * and is placed at the tail of the queue if it has never been in the queue * before, otehrwised placed at the head. * * TODO SLF and LLL variations (as per sssp_pape.c) and bidirectional. * ****************************************************************************/ #include <cutil_inline.h> /* CUDA SDK */ #include "sssp.h" #include "pape_kernels.h" /**************************************************************************** * * constants and type definitions * ****************************************************************************/ #define INVALID -1 #define WAS_IN_QUEUE -2 /**************************************************************************** * * __global__ functions: GPU kernels, callable from host * ****************************************************************************/ /* * pape_init_arrays() - kernel to init arrays for pape_kernel() * * Parameters: * num_nodes - number of nodes (elemnts in dist, queue_next) * num_start_nodes - number of source nodes * start_nodes - source nodes * dist - (OUT) distance array, dist from each source to each node * dist[i,s] is dist from s to i * must be intiizlize dto all INVALID except start_node=0 * prev - (OUT) predecessor array, * must have space for num_nodes*num_setart_nodes entries * Each prev[i,s] is predecessor of node i for startnode s * in shortest path to node i from s * intiizlied to all INVALID * queue_next (OUT]) - array of length num_nodes for queue * initilized to all INVALID */ __global__ void pape_init_arrays(long num_nodes, long num_start_nodes, long start_nodes[], double dist[], long prev[], long queue_next[]) { // each thread does as many iterations as necessary to cover all nodes // (usually we would want each thread to only do a single node but // this way any number of nodes can be handled with any number of threads) for (long v = blockIdx.x; v < num_nodes; v += gridDim.x) { for (long i = threadIdx.x; i < num_start_nodes; i += blockDim.x) { long s = start_nodes[i]; #if defined(__DEVICE_EMULATION__) || (defined(DEBUG) && !defined(CUDA)) fprintf(stderr, "node %d source %d\n", v, s); #endif prev[v * num_start_nodes + s] = INVALID; queue_next[v * num_start_nodes + s] = INVALID; if (v == s) dist[v * num_start_nodes + s] = 0; else dist[v * num_start_nodes + s] = FLOATINF; } } } /* * pape_kernel() - single-source shortest path by d'Esopo-Pape algorithm * * Parameters: * Va, Ea, Wa - graph in packed adjacency list represention * num_nodes - number of nodes (elemnts in Va) * num_edges - number of edges (elements in Ea, Wa) * num_start_nodes - number of source nodes * start_node - source nodes * first_thru_node - first node number that is allowed in a path * (earlier ones are actually 'zones' for origin/dest). * dist - (in/OUT) distance array, dist from source to each node * dist[s,i] is dist from s to i * must be intiizlize dto all INVALID except start_node=0 * prev - (OUT) predecessor array, * must have space for num_nodes entries * Each prev[i,s] is predecessor of node i * in shortest path to node i from source s * must be intiizlied to all INVALID * queue_next ([workspace]) - array of length num_nodes for queue * must be initilized to all INVALID */ __global__ void pape_kernel(long Va[],long Ea[], double Wa[], long num_nodes, long num_edges, long num_start_nodes, long start_nodes[], long first_thru_node, double dist[], long prev[], long queue_next[]) { long u,v; long i; long queue_first, queue_last; double uvdist, newcost; // each thread does one start node (not caring about block/grid) long idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx < num_start_nodes) { long s = start_nodes[idx]; // assert (!(start_node >= num_nodes)); queue_first = INVALID; queue_last = INVALID; u = s; while (u != INVALID && u != WAS_IN_QUEUE) { if (u >= first_thru_node || u == s) { for (i = Va[u]; i < Va[u+1]; i++) /* all neighbours of u */ { v = Ea[i]; /* v is adjacent to u */ // assert(v >= 0 && v < num_nodes); uvdist = Wa[i]; /* with this cost on edge */ newcost = dist[u * num_start_nodes + s] + uvdist; if (newcost < dist[v * num_start_nodes + s]) { dist[v * num_start_nodes + s] = newcost; prev[v * num_start_nodes + s] = u; if (queue_next[v * num_start_nodes + s] == WAS_IN_QUEUE) { queue_next[v * num_start_nodes + s] = queue_first; queue_first = v; if (queue_last == INVALID) { queue_last = v; } } else if (queue_next[v * num_start_nodes + s] == INVALID && v != queue_last) { if (queue_last != INVALID) { queue_next[queue_last * num_start_nodes + s] = v; queue_last = v; } else { queue_first = v; queue_last = v; queue_next[queue_last * num_start_nodes + s] = INVALID; } } } } } u = queue_first; if (u == INVALID || u == WAS_IN_QUEUE) break; // assert(u >=0 && u < num_nodes); queue_first = queue_next[u * num_start_nodes + s]; queue_next[u * num_start_nodes + s] = WAS_IN_QUEUE; if (queue_last == u) queue_last = INVALID; } } }
24b94f01b0f3782e10979c09c6b988f198e582b7.cu
/***************************************************************************** * * File: pape_kernels.c * Author: Alex Stivala * Created: March 2011 * * $Id: pape_kernels.cu 668 2011-09-08 04:40:08Z astivala $ * * CUDa implementation ofsingle-source shortest paths using the * d'Esopo-Pape algorithm [Pape 1974 "Implementation and Efficiency of * Moore-Algorithms for the shortest route problem" * Math. Prorgam. 7:212-222]. This algorithm is well suited to * shorest paths in road networks (see Klunder & Post 2006 "The * Shortest Path Problem on Large-Scale Real-Road Networks" Networks * 48(4):182-194). * * The d'Esopo-Pape is termed a "label correcting" rather than "label setting" * (Dijkstra type) algorithm. A node is always removed from head of queue, * and is placed at the tail of the queue if it has never been in the queue * before, otehrwised placed at the head. * * TODO SLF and LLL variations (as per sssp_pape.c) and bidirectional. * ****************************************************************************/ #include <cutil_inline.h> /* CUDA SDK */ #include "sssp.h" #include "pape_kernels.h" /**************************************************************************** * * constants and type definitions * ****************************************************************************/ #define INVALID -1 #define WAS_IN_QUEUE -2 /**************************************************************************** * * __global__ functions: GPU kernels, callable from host * ****************************************************************************/ /* * pape_init_arrays() - kernel to init arrays for pape_kernel() * * Parameters: * num_nodes - number of nodes (elemnts in dist, queue_next) * num_start_nodes - number of source nodes * start_nodes - source nodes * dist - (OUT) distance array, dist from each source to each node * dist[i,s] is dist from s to i * must be intiizlize dto all INVALID except start_node=0 * prev - (OUT) predecessor array, * must have space for num_nodes*num_setart_nodes entries * Each prev[i,s] is predecessor of node i for startnode s * in shortest path to node i from s * intiizlied to all INVALID * queue_next (OUT]) - array of length num_nodes for queue * initilized to all INVALID */ __global__ void pape_init_arrays(long num_nodes, long num_start_nodes, long start_nodes[], double dist[], long prev[], long queue_next[]) { // each thread does as many iterations as necessary to cover all nodes // (usually we would want each thread to only do a single node but // this way any number of nodes can be handled with any number of threads) for (long v = blockIdx.x; v < num_nodes; v += gridDim.x) { for (long i = threadIdx.x; i < num_start_nodes; i += blockDim.x) { long s = start_nodes[i]; #if defined(__DEVICE_EMULATION__) || (defined(DEBUG) && !defined(CUDA)) fprintf(stderr, "node %d source %d\n", v, s); #endif prev[v * num_start_nodes + s] = INVALID; queue_next[v * num_start_nodes + s] = INVALID; if (v == s) dist[v * num_start_nodes + s] = 0; else dist[v * num_start_nodes + s] = FLOATINF; } } } /* * pape_kernel() - single-source shortest path by d'Esopo-Pape algorithm * * Parameters: * Va, Ea, Wa - graph in packed adjacency list represention * num_nodes - number of nodes (elemnts in Va) * num_edges - number of edges (elements in Ea, Wa) * num_start_nodes - number of source nodes * start_node - source nodes * first_thru_node - first node number that is allowed in a path * (earlier ones are actually 'zones' for origin/dest). * dist - (in/OUT) distance array, dist from source to each node * dist[s,i] is dist from s to i * must be intiizlize dto all INVALID except start_node=0 * prev - (OUT) predecessor array, * must have space for num_nodes entries * Each prev[i,s] is predecessor of node i * in shortest path to node i from source s * must be intiizlied to all INVALID * queue_next ([workspace]) - array of length num_nodes for queue * must be initilized to all INVALID */ __global__ void pape_kernel(long Va[],long Ea[], double Wa[], long num_nodes, long num_edges, long num_start_nodes, long start_nodes[], long first_thru_node, double dist[], long prev[], long queue_next[]) { long u,v; long i; long queue_first, queue_last; double uvdist, newcost; // each thread does one start node (not caring about block/grid) long idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx < num_start_nodes) { long s = start_nodes[idx]; // assert (!(start_node >= num_nodes)); queue_first = INVALID; queue_last = INVALID; u = s; while (u != INVALID && u != WAS_IN_QUEUE) { if (u >= first_thru_node || u == s) { for (i = Va[u]; i < Va[u+1]; i++) /* all neighbours of u */ { v = Ea[i]; /* v is adjacent to u */ // assert(v >= 0 && v < num_nodes); uvdist = Wa[i]; /* with this cost on edge */ newcost = dist[u * num_start_nodes + s] + uvdist; if (newcost < dist[v * num_start_nodes + s]) { dist[v * num_start_nodes + s] = newcost; prev[v * num_start_nodes + s] = u; if (queue_next[v * num_start_nodes + s] == WAS_IN_QUEUE) { queue_next[v * num_start_nodes + s] = queue_first; queue_first = v; if (queue_last == INVALID) { queue_last = v; } } else if (queue_next[v * num_start_nodes + s] == INVALID && v != queue_last) { if (queue_last != INVALID) { queue_next[queue_last * num_start_nodes + s] = v; queue_last = v; } else { queue_first = v; queue_last = v; queue_next[queue_last * num_start_nodes + s] = INVALID; } } } } } u = queue_first; if (u == INVALID || u == WAS_IN_QUEUE) break; // assert(u >=0 && u < num_nodes); queue_first = queue_next[u * num_start_nodes + s]; queue_next[u * num_start_nodes + s] = WAS_IN_QUEUE; if (queue_last == u) queue_last = INVALID; } } }
7b263f8c8d9d2a9eec5cebcd0d975d76d03c6ad4.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // auto-generated by op2.py // //user function __device__ void Timestep_gpu( const float *maxEdgeEigenvalues0, const float *maxEdgeEigenvalues1, const float *maxEdgeEigenvalues2, const float *EdgeVolumes0, const float *EdgeVolumes1, const float *EdgeVolumes2, const float *cellVolumes, float *minTimeStep ) { float local = 0.0f; local += *maxEdgeEigenvalues0 * *(EdgeVolumes0); local += *maxEdgeEigenvalues1 * *(EdgeVolumes1); local += *maxEdgeEigenvalues2 * *(EdgeVolumes2); *minTimeStep = MIN(*minTimeStep, 2.0f * *cellVolumes / local); } // CUDA kernel function __global__ void op_cuda_Timestep( const float *__restrict ind_arg0, const float *__restrict ind_arg1, const int *__restrict opDat0Map, const float *__restrict arg6, float *arg7, int block_offset, int *blkmap, int *offset, int *nelems, int *ncolors, int *colors, int nblocks, int set_size) { float arg7_l[1]; for ( int d=0; d<1; d++ ){ arg7_l[d]=arg7[d+blockIdx.x*1]; } __shared__ int nelem, offset_b; extern __shared__ char shared[]; if (blockIdx.x+blockIdx.y*gridDim.x >= nblocks) { return; } if (threadIdx.x==0) { //get sizes and shift pointers and direct-mapped data int blockId = blkmap[blockIdx.x + blockIdx.y*gridDim.x + block_offset]; nelem = nelems[blockId]; offset_b = offset[blockId]; } __syncthreads(); // make sure all of above completed for ( int n=threadIdx.x; n<nelem; n+=blockDim.x ){ int map0idx; int map1idx; int map2idx; map0idx = opDat0Map[n + offset_b + set_size * 0]; map1idx = opDat0Map[n + offset_b + set_size * 1]; map2idx = opDat0Map[n + offset_b + set_size * 2]; //user-supplied kernel call Timestep_gpu(ind_arg0+map0idx*1, ind_arg0+map1idx*1, ind_arg0+map2idx*1, ind_arg1+map0idx*1, ind_arg1+map1idx*1, ind_arg1+map2idx*1, arg6+(n+offset_b)*1, arg7_l); } //global reductions for ( int d=0; d<1; d++ ){ op_reduction<OP_MIN>(&arg7[d+blockIdx.x*1],arg7_l[d]); } } //host stub function void op_par_loop_Timestep(char const *name, op_set set, op_arg arg0, op_arg arg1, op_arg arg2, op_arg arg3, op_arg arg4, op_arg arg5, op_arg arg6, op_arg arg7){ float*arg7h = (float *)arg7.data; int nargs = 8; op_arg args[8]; args[0] = arg0; args[1] = arg1; args[2] = arg2; args[3] = arg3; args[4] = arg4; args[5] = arg5; args[6] = arg6; args[7] = arg7; // initialise timers double cpu_t1, cpu_t2, wall_t1, wall_t2; op_timing_realloc(25); op_timers_core(&cpu_t1, &wall_t1); OP_kernels[25].name = name; OP_kernels[25].count += 1; int ninds = 2; int inds[8] = {0,0,0,1,1,1,-1,-1}; if (OP_diags>2) { printf(" kernel routine with indirection: Timestep\n"); } //get plan #ifdef OP_PART_SIZE_25 int part_size = OP_PART_SIZE_25; #else int part_size = OP_part_size; #endif int set_size = op_mpi_halo_exchanges_grouped(set, nargs, args, 2); if (set_size > 0) { op_plan *Plan = op_plan_get(name,set,part_size,nargs,args,ninds,inds); //transfer global reduction data to GPU int maxblocks = 0; for ( int col=0; col<Plan->ncolors; col++ ){ maxblocks = MAX(maxblocks,Plan->ncolblk[col]); } int reduct_bytes = 0; int reduct_size = 0; reduct_bytes += ROUND_UP(maxblocks*1*sizeof(float)); reduct_size = MAX(reduct_size,sizeof(float)); reallocReductArrays(reduct_bytes); reduct_bytes = 0; arg7.data = OP_reduct_h + reduct_bytes; arg7.data_d = OP_reduct_d + reduct_bytes; for ( int b=0; b<maxblocks; b++ ){ for ( int d=0; d<1; d++ ){ ((float *)arg7.data)[d+b*1] = arg7h[d]; } } reduct_bytes += ROUND_UP(maxblocks*1*sizeof(float)); mvReductArraysToDevice(reduct_bytes); //execute plan int block_offset = 0; for ( int col=0; col<Plan->ncolors; col++ ){ if (col==Plan->ncolors_core) { op_mpi_wait_all_grouped(nargs, args, 2); } #ifdef OP_BLOCK_SIZE_25 int nthread = OP_BLOCK_SIZE_25; #else int nthread = OP_block_size; #endif dim3 nblocks = dim3(Plan->ncolblk[col] >= (1<<16) ? 65535 : Plan->ncolblk[col], Plan->ncolblk[col] >= (1<<16) ? (Plan->ncolblk[col]-1)/65535+1: 1, 1); if (Plan->ncolblk[col] > 0) { int nshared = MAX(Plan->nshared,reduct_size*nthread); hipLaunchKernelGGL(( op_cuda_Timestep), dim3(nblocks),dim3(nthread),nshared, 0, (float *)arg0.data_d, (float *)arg3.data_d, arg0.map_data_d, (float*)arg6.data_d, (float*)arg7.data_d, block_offset, Plan->blkmap, Plan->offset, Plan->nelems, Plan->nthrcol, Plan->thrcol, Plan->ncolblk[col], set->size+set->exec_size); //transfer global reduction data back to CPU if (col == Plan->ncolors_owned-1) { mvReductArraysToHost(reduct_bytes); } } block_offset += Plan->ncolblk[col]; } OP_kernels[25].transfer += Plan->transfer; OP_kernels[25].transfer2 += Plan->transfer2; for ( int b=0; b<maxblocks; b++ ){ for ( int d=0; d<1; d++ ){ arg7h[d] = MIN(arg7h[d],((float *)arg7.data)[d+b*1]); } } arg7.data = (char *)arg7h; op_mpi_reduce(&arg7,arg7h); } op_mpi_set_dirtybit_cuda(nargs, args); if (OP_diags>1) { cutilSafeCall(hipDeviceSynchronize()); } //update kernel record op_timers_core(&cpu_t2, &wall_t2); OP_kernels[25].time += wall_t2 - wall_t1; }
7b263f8c8d9d2a9eec5cebcd0d975d76d03c6ad4.cu
// // auto-generated by op2.py // //user function __device__ void Timestep_gpu( const float *maxEdgeEigenvalues0, const float *maxEdgeEigenvalues1, const float *maxEdgeEigenvalues2, const float *EdgeVolumes0, const float *EdgeVolumes1, const float *EdgeVolumes2, const float *cellVolumes, float *minTimeStep ) { float local = 0.0f; local += *maxEdgeEigenvalues0 * *(EdgeVolumes0); local += *maxEdgeEigenvalues1 * *(EdgeVolumes1); local += *maxEdgeEigenvalues2 * *(EdgeVolumes2); *minTimeStep = MIN(*minTimeStep, 2.0f * *cellVolumes / local); } // CUDA kernel function __global__ void op_cuda_Timestep( const float *__restrict ind_arg0, const float *__restrict ind_arg1, const int *__restrict opDat0Map, const float *__restrict arg6, float *arg7, int block_offset, int *blkmap, int *offset, int *nelems, int *ncolors, int *colors, int nblocks, int set_size) { float arg7_l[1]; for ( int d=0; d<1; d++ ){ arg7_l[d]=arg7[d+blockIdx.x*1]; } __shared__ int nelem, offset_b; extern __shared__ char shared[]; if (blockIdx.x+blockIdx.y*gridDim.x >= nblocks) { return; } if (threadIdx.x==0) { //get sizes and shift pointers and direct-mapped data int blockId = blkmap[blockIdx.x + blockIdx.y*gridDim.x + block_offset]; nelem = nelems[blockId]; offset_b = offset[blockId]; } __syncthreads(); // make sure all of above completed for ( int n=threadIdx.x; n<nelem; n+=blockDim.x ){ int map0idx; int map1idx; int map2idx; map0idx = opDat0Map[n + offset_b + set_size * 0]; map1idx = opDat0Map[n + offset_b + set_size * 1]; map2idx = opDat0Map[n + offset_b + set_size * 2]; //user-supplied kernel call Timestep_gpu(ind_arg0+map0idx*1, ind_arg0+map1idx*1, ind_arg0+map2idx*1, ind_arg1+map0idx*1, ind_arg1+map1idx*1, ind_arg1+map2idx*1, arg6+(n+offset_b)*1, arg7_l); } //global reductions for ( int d=0; d<1; d++ ){ op_reduction<OP_MIN>(&arg7[d+blockIdx.x*1],arg7_l[d]); } } //host stub function void op_par_loop_Timestep(char const *name, op_set set, op_arg arg0, op_arg arg1, op_arg arg2, op_arg arg3, op_arg arg4, op_arg arg5, op_arg arg6, op_arg arg7){ float*arg7h = (float *)arg7.data; int nargs = 8; op_arg args[8]; args[0] = arg0; args[1] = arg1; args[2] = arg2; args[3] = arg3; args[4] = arg4; args[5] = arg5; args[6] = arg6; args[7] = arg7; // initialise timers double cpu_t1, cpu_t2, wall_t1, wall_t2; op_timing_realloc(25); op_timers_core(&cpu_t1, &wall_t1); OP_kernels[25].name = name; OP_kernels[25].count += 1; int ninds = 2; int inds[8] = {0,0,0,1,1,1,-1,-1}; if (OP_diags>2) { printf(" kernel routine with indirection: Timestep\n"); } //get plan #ifdef OP_PART_SIZE_25 int part_size = OP_PART_SIZE_25; #else int part_size = OP_part_size; #endif int set_size = op_mpi_halo_exchanges_grouped(set, nargs, args, 2); if (set_size > 0) { op_plan *Plan = op_plan_get(name,set,part_size,nargs,args,ninds,inds); //transfer global reduction data to GPU int maxblocks = 0; for ( int col=0; col<Plan->ncolors; col++ ){ maxblocks = MAX(maxblocks,Plan->ncolblk[col]); } int reduct_bytes = 0; int reduct_size = 0; reduct_bytes += ROUND_UP(maxblocks*1*sizeof(float)); reduct_size = MAX(reduct_size,sizeof(float)); reallocReductArrays(reduct_bytes); reduct_bytes = 0; arg7.data = OP_reduct_h + reduct_bytes; arg7.data_d = OP_reduct_d + reduct_bytes; for ( int b=0; b<maxblocks; b++ ){ for ( int d=0; d<1; d++ ){ ((float *)arg7.data)[d+b*1] = arg7h[d]; } } reduct_bytes += ROUND_UP(maxblocks*1*sizeof(float)); mvReductArraysToDevice(reduct_bytes); //execute plan int block_offset = 0; for ( int col=0; col<Plan->ncolors; col++ ){ if (col==Plan->ncolors_core) { op_mpi_wait_all_grouped(nargs, args, 2); } #ifdef OP_BLOCK_SIZE_25 int nthread = OP_BLOCK_SIZE_25; #else int nthread = OP_block_size; #endif dim3 nblocks = dim3(Plan->ncolblk[col] >= (1<<16) ? 65535 : Plan->ncolblk[col], Plan->ncolblk[col] >= (1<<16) ? (Plan->ncolblk[col]-1)/65535+1: 1, 1); if (Plan->ncolblk[col] > 0) { int nshared = MAX(Plan->nshared,reduct_size*nthread); op_cuda_Timestep<<<nblocks,nthread,nshared>>>( (float *)arg0.data_d, (float *)arg3.data_d, arg0.map_data_d, (float*)arg6.data_d, (float*)arg7.data_d, block_offset, Plan->blkmap, Plan->offset, Plan->nelems, Plan->nthrcol, Plan->thrcol, Plan->ncolblk[col], set->size+set->exec_size); //transfer global reduction data back to CPU if (col == Plan->ncolors_owned-1) { mvReductArraysToHost(reduct_bytes); } } block_offset += Plan->ncolblk[col]; } OP_kernels[25].transfer += Plan->transfer; OP_kernels[25].transfer2 += Plan->transfer2; for ( int b=0; b<maxblocks; b++ ){ for ( int d=0; d<1; d++ ){ arg7h[d] = MIN(arg7h[d],((float *)arg7.data)[d+b*1]); } } arg7.data = (char *)arg7h; op_mpi_reduce(&arg7,arg7h); } op_mpi_set_dirtybit_cuda(nargs, args); if (OP_diags>1) { cutilSafeCall(cudaDeviceSynchronize()); } //update kernel record op_timers_core(&cpu_t2, &wall_t2); OP_kernels[25].time += wall_t2 - wall_t1; }
508e8b142664acde410199340ec6a815d558e0db.hip
// !!! This is a file automatically generated by hipify!!! #include <stdlib.h> #include <iostream> #include "device_launch_parameters.h" #include "hip/hip_runtime.h" #include <stdio.h> __global__ void test(int *z) { char x, y; if (x==y) { *z=0; } printf("%d\n", *z); } int main(void) { int z; int *dev_z; hipMalloc((void**)&dev_z, sizeof(int)); hipLaunchKernelGGL(( test), dim3(1),dim3(1), 0, 0, dev_z); hipMemcpy(&z, dev_z, sizeof(int), hipMemcpyHostToDevice); printf("%d\n",z); hipFree(dev_z); return 0; }
508e8b142664acde410199340ec6a815d558e0db.cu
#include <stdlib.h> #include <iostream> #include "device_launch_parameters.h" #include "cuda_runtime.h" #include <stdio.h> __global__ void test(int *z) { char x, y; if (x==y) { *z=0; } printf("%d\n", *z); } int main(void) { int z; int *dev_z; cudaMalloc((void**)&dev_z, sizeof(int)); test<<<1,1>>>(dev_z); cudaMemcpy(&z, dev_z, sizeof(int), cudaMemcpyHostToDevice); printf("%d\n",z); cudaFree(dev_z); return 0; }
c879dda20ef9898d354595540c9bf9b39ddc3eaa.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <stdio.h> #include <helper_functions.h> // helper for shared functions common to CUDA Samples #include <helper_cuda.h> // helper functions for CUDA error checking and initialization // Some kernels assume square blocks #define BDIMX 16 #define BDIMY 16 __global__ void transposeNaiveRow(float *out, float *in, const int nrows, const int ncols) { int iy = blockIdx.y * blockDim.y + threadIdx.y; int ix = blockIdx.x * blockDim.x + threadIdx.x; if (iy < nrows && ix < ncols) { out[ix*nrows + iy] = in[iy*ncols + ix]; } } __global__ void transposeNaiveCol(float *out, float *in, const int nrows, const int ncols) { int iy = blockIdx.y * blockDim.y + threadIdx.y; int ix = blockIdx.x * blockDim.x + threadIdx.x; if (iy < nrows && ix < ncols) { out[iy*ncols + ix] = in[ix*nrows + iy]; } } #define INDEX(ROW, COL, INNER) ((ROW) * (INNER) + (COL)) void initialData(float *in, const int size) { for (int i = 0; i < size; i++) { in[i] = (float)(rand() & 0xFF) / 10.0f; } return; } void printData(float *in, const int size) { for (int i = 0; i < size; i++) { printf("%3.0f ", in[i]); } printf("\n"); return; } void checkResult(float *hostRef, float *gpuRef, int rows, int cols) { double epsilon = 1.0E-8; bool match = 1; for (int i = 0; i < rows; i++) { for (int j = 0; j < cols; j++) { int index = INDEX(i, j, cols); if (abs(hostRef[index] - gpuRef[index]) > epsilon) { match = 0; printf("different on (%d, %d) (offset=%d) element in " "transposed matrix: host %f gpu %f\n", i, j, index, hostRef[index], gpuRef[index]); break; } } if (!match) break; } if (match) printf("PASS\n\n"); else printf("FAIL\n\n"); } void transposeHost(float *out, float *in, const int nrows, const int ncols) { for (int iy = 0; iy < nrows; ++iy) { for (int ix = 0; ix < ncols; ++ix) { out[INDEX(ix, iy, nrows)] = in[INDEX(iy, ix, ncols)]; } } } int main(int argc, char **argv) { bool iprint = 0; int nrows = 1 << 10; int ncols = 1 << 10; printf(" with matrix nrows %d ncols %d\n", nrows, ncols); size_t ncells = nrows * ncols; size_t nBytes = ncells * sizeof(float); // allocate host memory float *h_A = (float *)malloc(nBytes); float *hostRef = (float *)malloc(nBytes); float *gpuRef = (float *)malloc(nBytes); // initialize host array initialData(h_A, nrows * ncols); // transpose at host side transposeHost(hostRef, h_A, nrows, ncols); // allocate device memory float *d_A, *d_C; checkCudaErrors(hipMalloc((float**)&d_A, nBytes)); checkCudaErrors(hipMalloc((float**)&d_C, nBytes)); // copy data from host to device checkCudaErrors(hipMemcpy(d_A, h_A, nBytes, hipMemcpyHostToDevice)); checkCudaErrors(hipMemset(d_C, 0, nBytes)); memset(gpuRef, 0, nBytes); dim3 block(BDIMX, BDIMY); dim3 grid((ncols + block.x - 1) / block.x, (nrows + block.y - 1) / block.y); printf("Transpose: row-wise read, column-wise write\n"); hipLaunchKernelGGL(( transposeNaiveRow) , dim3(grid), dim3(block), 0, 0, d_C, d_A, nrows, ncols); checkCudaErrors(hipMemcpy(gpuRef, d_C, nBytes, hipMemcpyDeviceToHost)); if(iprint) printData(gpuRef, ncells); checkResult(hostRef, gpuRef, ncols, nrows); printf("Transpose: column-wise read, row-wise write\n"); transposeNaiveCol << <grid, block >> >(d_C, d_A, nrows, ncols); checkCudaErrors(hipMemcpy(gpuRef, d_C, nBytes, hipMemcpyDeviceToHost)); if (iprint) printData(gpuRef, ncells); checkResult(hostRef, gpuRef, ncols, nrows); // free host and device memory checkCudaErrors(hipFree(d_A)); checkCudaErrors(hipFree(d_C)); free(h_A); free(hostRef); free(gpuRef); return EXIT_SUCCESS; }
c879dda20ef9898d354595540c9bf9b39ddc3eaa.cu
#include <cuda_runtime.h> #include <stdio.h> #include <helper_functions.h> // helper for shared functions common to CUDA Samples #include <helper_cuda.h> // helper functions for CUDA error checking and initialization // Some kernels assume square blocks #define BDIMX 16 #define BDIMY 16 __global__ void transposeNaiveRow(float *out, float *in, const int nrows, const int ncols) { int iy = blockIdx.y * blockDim.y + threadIdx.y; int ix = blockIdx.x * blockDim.x + threadIdx.x; if (iy < nrows && ix < ncols) { out[ix*nrows + iy] = in[iy*ncols + ix]; } } __global__ void transposeNaiveCol(float *out, float *in, const int nrows, const int ncols) { int iy = blockIdx.y * blockDim.y + threadIdx.y; int ix = blockIdx.x * blockDim.x + threadIdx.x; if (iy < nrows && ix < ncols) { out[iy*ncols + ix] = in[ix*nrows + iy]; } } #define INDEX(ROW, COL, INNER) ((ROW) * (INNER) + (COL)) void initialData(float *in, const int size) { for (int i = 0; i < size; i++) { in[i] = (float)(rand() & 0xFF) / 10.0f; } return; } void printData(float *in, const int size) { for (int i = 0; i < size; i++) { printf("%3.0f ", in[i]); } printf("\n"); return; } void checkResult(float *hostRef, float *gpuRef, int rows, int cols) { double epsilon = 1.0E-8; bool match = 1; for (int i = 0; i < rows; i++) { for (int j = 0; j < cols; j++) { int index = INDEX(i, j, cols); if (abs(hostRef[index] - gpuRef[index]) > epsilon) { match = 0; printf("different on (%d, %d) (offset=%d) element in " "transposed matrix: host %f gpu %f\n", i, j, index, hostRef[index], gpuRef[index]); break; } } if (!match) break; } if (match) printf("PASS\n\n"); else printf("FAIL\n\n"); } void transposeHost(float *out, float *in, const int nrows, const int ncols) { for (int iy = 0; iy < nrows; ++iy) { for (int ix = 0; ix < ncols; ++ix) { out[INDEX(ix, iy, nrows)] = in[INDEX(iy, ix, ncols)]; } } } int main(int argc, char **argv) { bool iprint = 0; int nrows = 1 << 10; int ncols = 1 << 10; printf(" with matrix nrows %d ncols %d\n", nrows, ncols); size_t ncells = nrows * ncols; size_t nBytes = ncells * sizeof(float); // allocate host memory float *h_A = (float *)malloc(nBytes); float *hostRef = (float *)malloc(nBytes); float *gpuRef = (float *)malloc(nBytes); // initialize host array initialData(h_A, nrows * ncols); // transpose at host side transposeHost(hostRef, h_A, nrows, ncols); // allocate device memory float *d_A, *d_C; checkCudaErrors(cudaMalloc((float**)&d_A, nBytes)); checkCudaErrors(cudaMalloc((float**)&d_C, nBytes)); // copy data from host to device checkCudaErrors(cudaMemcpy(d_A, h_A, nBytes, cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemset(d_C, 0, nBytes)); memset(gpuRef, 0, nBytes); dim3 block(BDIMX, BDIMY); dim3 grid((ncols + block.x - 1) / block.x, (nrows + block.y - 1) / block.y); printf("Transpose: row-wise read, column-wise write\n"); transposeNaiveRow <<<grid, block>>>(d_C, d_A, nrows, ncols); checkCudaErrors(cudaMemcpy(gpuRef, d_C, nBytes, cudaMemcpyDeviceToHost)); if(iprint) printData(gpuRef, ncells); checkResult(hostRef, gpuRef, ncols, nrows); printf("Transpose: column-wise read, row-wise write\n"); transposeNaiveCol << <grid, block >> >(d_C, d_A, nrows, ncols); checkCudaErrors(cudaMemcpy(gpuRef, d_C, nBytes, cudaMemcpyDeviceToHost)); if (iprint) printData(gpuRef, ncells); checkResult(hostRef, gpuRef, ncols, nrows); // free host and device memory checkCudaErrors(cudaFree(d_A)); checkCudaErrors(cudaFree(d_C)); free(h_A); free(hostRef); free(gpuRef); return EXIT_SUCCESS; }
538fd66059b30c7154acab7b8610dc8477fdc8ee.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** * Copyright (c) 2016-present, Facebook, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "caffe2/core/context_gpu.h" #include "threshold_sigmoid_cross_entropy_loss_op.h" namespace caffe2 { namespace { __global__ void ElementwiseMaxKernel(const int n, float* data, const float a) { CUDA_1D_KERNEL_LOOP(index, n) { data[index] = (data[index] > a) ? data[index] : a; } } __global__ void ThresholdSigmoidCrossEntropyLossKernel( const int n, const float logit_threshold, const float* logits, const int* targets, float* losses, float* counts) { CUDA_1D_KERNEL_LOOP(index, n) { if (targets[index] == -1 || logits[index] < logit_threshold) { losses[index] = 0.; counts[index] = 0.; } else { losses[index] = -1. * logits[index] * (targets[index] - (logits[index] >= 0)) + logf( 1 + expf(logits[index] - 2 * logits[index] * (logits[index] >= 0))); counts[index] = 1.; } } } __global__ void ThresholdSigmoidCrossEntropyLossGradientKernel( const int n, const float logit_threshold, const float* logits, const int* targets, float* d_logits, float* counts) { CUDA_1D_KERNEL_LOOP(index, n) { if (targets[index] == -1 || logits[index] < logit_threshold) { d_logits[index] = 0.; counts[index] = 0.; } else { d_logits[index] = 1. / (1. + expf(-logits[index])) - targets[index]; counts[index] = 1.; } } } } // namespace template <> bool ThresholdSigmoidCrossEntropyLossOp<float, CUDAContext>::RunOnDevice() { auto& X = Input(0); auto& T = Input(1); auto* avg_loss = Output(0); const float logit_threshold = logf(threshold_ / (1. - threshold_)); CAFFE_ENFORCE( X.size() == T.size(), "Logit and target must have the same size", "(", X.size(), " vs. ", T.size(), ")"); avg_loss->Resize(vector<TIndex>()); counts_.ResizeLike(X); losses_.ResizeLike(X); normalizer_.Resize(vector<TIndex>()); hipLaunchKernelGGL(( ThresholdSigmoidCrossEntropyLossKernel), dim3(CAFFE_GET_BLOCKS(X.size())), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), X.size(), logit_threshold, X.data<float>(), T.data<int>(), losses_.mutable_data<float>(), counts_.mutable_data<float>()); float* avg_loss_data = avg_loss->mutable_data<float>(); math::Sum<float, CUDAContext>( losses_.size(), losses_.data<float>(), avg_loss_data, &context_); if (normalize_) { float* normalizer_data = normalizer_.mutable_data<float>(); math::Sum<float, CUDAContext>( counts_.size(), counts_.data<float>(), normalizer_data, &context_); // Prevent division by zero is all counts are zero hipLaunchKernelGGL(( ElementwiseMaxKernel), dim3(CAFFE_GET_BLOCKS(normalizer_.size())), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), normalizer_.size(), normalizer_data, 1e-5); math::Div<float, CUDAContext>( 1, avg_loss_data, normalizer_data, avg_loss_data, &context_); } math::Scale<float, CUDAContext>( 1, scale_, avg_loss_data, avg_loss_data, &context_); return true; } template <> bool ThresholdSigmoidCrossEntropyLossGradientOp<float, CUDAContext>::RunOnDevice() { auto& X = Input(0); auto& T = Input(1); auto& d_avg_loss = Input(2); auto* dX = Output(0); const float logit_threshold = logf(threshold_ / (1. - threshold_)); dX->ResizeLike(X); counts_.ResizeLike(X); normalizer_.Resize(vector<TIndex>()); hipLaunchKernelGGL(( ThresholdSigmoidCrossEntropyLossGradientKernel), dim3(CAFFE_GET_BLOCKS(X.size())), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), X.size(), logit_threshold, X.data<float>(), T.data<int>(), dX->mutable_data<float>(), counts_.mutable_data<float>()); if (normalize_) { float* normalizer_data = normalizer_.mutable_data<float>(); math::Sum<float, CUDAContext>( counts_.size(), counts_.data<float>(), normalizer_data, &context_); // Prevent division by zero is all counts are zero hipLaunchKernelGGL(( ElementwiseMaxKernel), dim3(CAFFE_GET_BLOCKS(normalizer_.size())), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), normalizer_.size(), normalizer_data, 1e-5); math::Div<float, CUDAContext>( 1, d_avg_loss.data<float>(), normalizer_data, normalizer_data, &context_); math::Scale<float, CUDAContext>( 1, scale_, normalizer_data, normalizer_data, &context_); math::Scale<float, CUDAContext>( dX->size(), normalizer_data, dX->data<float>(), dX->mutable_data<float>(), &context_); } else { math::Scale<float, CUDAContext>( dX->size(), scale_, dX->data<float>(), dX->mutable_data<float>(), &context_); math::Scale<float, CUDAContext>( dX->size(), d_avg_loss.data<float>(), dX->data<float>(), dX->mutable_data<float>(), &context_); } return true; } REGISTER_CUDA_OPERATOR( ThresholdSigmoidCrossEntropyLoss, ThresholdSigmoidCrossEntropyLossOp<float, CUDAContext>); REGISTER_CUDA_OPERATOR( ThresholdSigmoidCrossEntropyLossGradient, ThresholdSigmoidCrossEntropyLossGradientOp<float, CUDAContext>); } // namespace caffe2
538fd66059b30c7154acab7b8610dc8477fdc8ee.cu
/** * Copyright (c) 2016-present, Facebook, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "caffe2/core/context_gpu.h" #include "threshold_sigmoid_cross_entropy_loss_op.h" namespace caffe2 { namespace { __global__ void ElementwiseMaxKernel(const int n, float* data, const float a) { CUDA_1D_KERNEL_LOOP(index, n) { data[index] = (data[index] > a) ? data[index] : a; } } __global__ void ThresholdSigmoidCrossEntropyLossKernel( const int n, const float logit_threshold, const float* logits, const int* targets, float* losses, float* counts) { CUDA_1D_KERNEL_LOOP(index, n) { if (targets[index] == -1 || logits[index] < logit_threshold) { losses[index] = 0.; counts[index] = 0.; } else { losses[index] = -1. * logits[index] * (targets[index] - (logits[index] >= 0)) + logf( 1 + expf(logits[index] - 2 * logits[index] * (logits[index] >= 0))); counts[index] = 1.; } } } __global__ void ThresholdSigmoidCrossEntropyLossGradientKernel( const int n, const float logit_threshold, const float* logits, const int* targets, float* d_logits, float* counts) { CUDA_1D_KERNEL_LOOP(index, n) { if (targets[index] == -1 || logits[index] < logit_threshold) { d_logits[index] = 0.; counts[index] = 0.; } else { d_logits[index] = 1. / (1. + expf(-logits[index])) - targets[index]; counts[index] = 1.; } } } } // namespace template <> bool ThresholdSigmoidCrossEntropyLossOp<float, CUDAContext>::RunOnDevice() { auto& X = Input(0); auto& T = Input(1); auto* avg_loss = Output(0); const float logit_threshold = logf(threshold_ / (1. - threshold_)); CAFFE_ENFORCE( X.size() == T.size(), "Logit and target must have the same size", "(", X.size(), " vs. ", T.size(), ")"); avg_loss->Resize(vector<TIndex>()); counts_.ResizeLike(X); losses_.ResizeLike(X); normalizer_.Resize(vector<TIndex>()); ThresholdSigmoidCrossEntropyLossKernel<<< CAFFE_GET_BLOCKS(X.size()), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( X.size(), logit_threshold, X.data<float>(), T.data<int>(), losses_.mutable_data<float>(), counts_.mutable_data<float>()); float* avg_loss_data = avg_loss->mutable_data<float>(); math::Sum<float, CUDAContext>( losses_.size(), losses_.data<float>(), avg_loss_data, &context_); if (normalize_) { float* normalizer_data = normalizer_.mutable_data<float>(); math::Sum<float, CUDAContext>( counts_.size(), counts_.data<float>(), normalizer_data, &context_); // Prevent division by zero is all counts are zero ElementwiseMaxKernel<<< CAFFE_GET_BLOCKS(normalizer_.size()), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>(normalizer_.size(), normalizer_data, 1e-5); math::Div<float, CUDAContext>( 1, avg_loss_data, normalizer_data, avg_loss_data, &context_); } math::Scale<float, CUDAContext>( 1, scale_, avg_loss_data, avg_loss_data, &context_); return true; } template <> bool ThresholdSigmoidCrossEntropyLossGradientOp<float, CUDAContext>::RunOnDevice() { auto& X = Input(0); auto& T = Input(1); auto& d_avg_loss = Input(2); auto* dX = Output(0); const float logit_threshold = logf(threshold_ / (1. - threshold_)); dX->ResizeLike(X); counts_.ResizeLike(X); normalizer_.Resize(vector<TIndex>()); ThresholdSigmoidCrossEntropyLossGradientKernel<<< CAFFE_GET_BLOCKS(X.size()), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( X.size(), logit_threshold, X.data<float>(), T.data<int>(), dX->mutable_data<float>(), counts_.mutable_data<float>()); if (normalize_) { float* normalizer_data = normalizer_.mutable_data<float>(); math::Sum<float, CUDAContext>( counts_.size(), counts_.data<float>(), normalizer_data, &context_); // Prevent division by zero is all counts are zero ElementwiseMaxKernel<<< CAFFE_GET_BLOCKS(normalizer_.size()), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>(normalizer_.size(), normalizer_data, 1e-5); math::Div<float, CUDAContext>( 1, d_avg_loss.data<float>(), normalizer_data, normalizer_data, &context_); math::Scale<float, CUDAContext>( 1, scale_, normalizer_data, normalizer_data, &context_); math::Scale<float, CUDAContext>( dX->size(), normalizer_data, dX->data<float>(), dX->mutable_data<float>(), &context_); } else { math::Scale<float, CUDAContext>( dX->size(), scale_, dX->data<float>(), dX->mutable_data<float>(), &context_); math::Scale<float, CUDAContext>( dX->size(), d_avg_loss.data<float>(), dX->data<float>(), dX->mutable_data<float>(), &context_); } return true; } REGISTER_CUDA_OPERATOR( ThresholdSigmoidCrossEntropyLoss, ThresholdSigmoidCrossEntropyLossOp<float, CUDAContext>); REGISTER_CUDA_OPERATOR( ThresholdSigmoidCrossEntropyLossGradient, ThresholdSigmoidCrossEntropyLossGradientOp<float, CUDAContext>); } // namespace caffe2
7c607931a55ca84cc60ea92ec45fda03fd3f332e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // auto-generated by ops.py // __constant__ int xdim0_update_halo_kernel5_plus_2_left; int xdim0_update_halo_kernel5_plus_2_left_h = -1; __constant__ int ydim0_update_halo_kernel5_plus_2_left; int ydim0_update_halo_kernel5_plus_2_left_h = -1; __constant__ int xdim1_update_halo_kernel5_plus_2_left; int xdim1_update_halo_kernel5_plus_2_left_h = -1; __constant__ int ydim1_update_halo_kernel5_plus_2_left; int ydim1_update_halo_kernel5_plus_2_left_h = -1; #undef OPS_ACC0 #undef OPS_ACC1 #define OPS_ACC0(x, y, z) \ (x + xdim0_update_halo_kernel5_plus_2_left * (y) + \ xdim0_update_halo_kernel5_plus_2_left * \ ydim0_update_halo_kernel5_plus_2_left * (z)) #define OPS_ACC1(x, y, z) \ (x + xdim1_update_halo_kernel5_plus_2_left * (y) + \ xdim1_update_halo_kernel5_plus_2_left * \ ydim1_update_halo_kernel5_plus_2_left * (z)) // user function __device__ inline void update_halo_kernel5_plus_2_left_gpu(double *vol_flux_z, double *mass_flux_z, const int *fields) { if (fields[FIELD_VOL_FLUX_Z] == 1) vol_flux_z[OPS_ACC0(0, 0, 0)] = (vol_flux_z[OPS_ACC0(2, 0, 0)]); if (fields[FIELD_MASS_FLUX_Z] == 1) mass_flux_z[OPS_ACC1(0, 0, 0)] = (mass_flux_z[OPS_ACC1(2, 0, 0)]); } #undef OPS_ACC0 #undef OPS_ACC1 __global__ void ops_update_halo_kernel5_plus_2_left(double *__restrict arg0, double *__restrict arg1, const int *__restrict arg2, int size0, int size1, int size2) { int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim0_update_halo_kernel5_plus_2_left + idx_z * 1 * 1 * xdim0_update_halo_kernel5_plus_2_left * ydim0_update_halo_kernel5_plus_2_left; arg1 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim1_update_halo_kernel5_plus_2_left + idx_z * 1 * 1 * xdim1_update_halo_kernel5_plus_2_left * ydim1_update_halo_kernel5_plus_2_left; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { update_halo_kernel5_plus_2_left_gpu(arg0, arg1, arg2); } } // host stub function #ifndef OPS_LAZY void ops_par_loop_update_halo_kernel5_plus_2_left(char const *name, ops_block block, int dim, int *range, ops_arg arg0, ops_arg arg1, ops_arg arg2) { #else void ops_par_loop_update_halo_kernel5_plus_2_left_execute( ops_kernel_descriptor *desc) { int dim = desc->dim; int *range = desc->range; ops_arg arg0 = desc->args[0]; ops_arg arg1 = desc->args[1]; ops_arg arg2 = desc->args[2]; #endif // Timing double t1, t2, c1, c2; ops_arg args[3] = {arg0, arg1, arg2}; #if CHECKPOINTING && !OPS_LAZY if (!ops_checkpointing_before(args, 3, range, 89)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(89, "update_halo_kernel5_plus_2_left"); OPS_kernels[89].count++; ops_timers_core(&c1, &t1); } // compute locally allocated range for the sub-block int start[3]; int end[3]; #if OPS_MPI && !OPS_LAZY sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for (int n = 0; n < 3; n++) { start[n] = sb->decomp_disp[n]; end[n] = sb->decomp_disp[n] + sb->decomp_size[n]; if (start[n] >= range[2 * n]) { start[n] = 0; } else { start[n] = range[2 * n] - start[n]; } if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0) start[n] = range[2 * n]; if (end[n] >= range[2 * n + 1]) { end[n] = range[2 * n + 1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n] == MPI_PROC_NULL && (range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n])) end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]); } #else for (int n = 0; n < 3; n++) { start[n] = range[2 * n]; end[n] = range[2 * n + 1]; } #endif int x_size = MAX(0, end[0] - start[0]); int y_size = MAX(0, end[1] - start[1]); int z_size = MAX(0, end[2] - start[2]); int xdim0 = args[0].dat->size[0]; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]; int ydim1 = args[1].dat->size[1]; if (xdim0 != xdim0_update_halo_kernel5_plus_2_left_h || ydim0 != ydim0_update_halo_kernel5_plus_2_left_h || xdim1 != xdim1_update_halo_kernel5_plus_2_left_h || ydim1 != ydim1_update_halo_kernel5_plus_2_left_h) { hipMemcpyToSymbol(xdim0_update_halo_kernel5_plus_2_left, &xdim0, sizeof(int)); xdim0_update_halo_kernel5_plus_2_left_h = xdim0; hipMemcpyToSymbol(ydim0_update_halo_kernel5_plus_2_left, &ydim0, sizeof(int)); ydim0_update_halo_kernel5_plus_2_left_h = ydim0; hipMemcpyToSymbol(xdim1_update_halo_kernel5_plus_2_left, &xdim1, sizeof(int)); xdim1_update_halo_kernel5_plus_2_left_h = xdim1; hipMemcpyToSymbol(ydim1_update_halo_kernel5_plus_2_left, &ydim1, sizeof(int)); ydim1_update_halo_kernel5_plus_2_left_h = ydim1; } int *arg2h = (int *)arg2.data; dim3 grid((x_size - 1) / OPS_block_size_x + 1, (y_size - 1) / OPS_block_size_y + 1, z_size); dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1); int consts_bytes = 0; consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int)); reallocConstArrays(consts_bytes); consts_bytes = 0; arg2.data = OPS_consts_h + consts_bytes; arg2.data_d = OPS_consts_d + consts_bytes; for (int d = 0; d < NUM_FIELDS; d++) ((int *)arg2.data)[d] = arg2h[d]; consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int)); mvConstArraysToDevice(consts_bytes); int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size); int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size); char *p_a[3]; // set up initial pointers int base0 = args[0].dat->base_offset + dat0 * 1 * (start[0] * args[0].stencil->stride[0]); base0 = base0 + dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1]); base0 = base0 + dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2]); p_a[0] = (char *)args[0].data_d + base0; int base1 = args[1].dat->base_offset + dat1 * 1 * (start[0] * args[1].stencil->stride[0]); base1 = base1 + dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1]); base1 = base1 + dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2]); p_a[1] = (char *)args[1].data_d + base1; #ifndef OPS_LAZY ops_H_D_exchanges_device(args, 3); ops_halo_exchanges(args, 3, range); #endif if (OPS_diags > 1) { ops_timers_core(&c2, &t2); OPS_kernels[89].mpi_time += t2 - t1; } // call kernel wrapper function, passing in pointers to data hipLaunchKernelGGL(( ops_update_halo_kernel5_plus_2_left), dim3(grid), dim3(tblock), 0, 0, (double *)p_a[0], (double *)p_a[1], (int *)arg2.data_d, x_size, y_size, z_size); cutilSafeCall(hipGetLastError()); if (OPS_diags > 1) { cutilSafeCall(hipDeviceSynchronize()); ops_timers_core(&c1, &t1); OPS_kernels[89].time += t1 - t2; } #ifndef OPS_LAZY ops_set_dirtybit_device(args, 3); ops_set_halo_dirtybit3(&args[0], range); ops_set_halo_dirtybit3(&args[1], range); #endif if (OPS_diags > 1) { // Update kernel record ops_timers_core(&c2, &t2); OPS_kernels[89].mpi_time += t2 - t1; OPS_kernels[89].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[89].transfer += ops_compute_transfer(dim, start, end, &arg1); } } #ifdef OPS_LAZY void ops_par_loop_update_halo_kernel5_plus_2_left(char const *name, ops_block block, int dim, int *range, ops_arg arg0, ops_arg arg1, ops_arg arg2) { ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor)); desc->name = name; desc->block = block; desc->dim = dim; desc->device = 1; desc->index = 89; desc->hash = 5381; desc->hash = ((desc->hash << 5) + desc->hash) + 89; for (int i = 0; i < 6; i++) { desc->range[i] = range[i]; desc->orig_range[i] = range[i]; desc->hash = ((desc->hash << 5) + desc->hash) + range[i]; } desc->nargs = 3; desc->args = (ops_arg *)malloc(3 * sizeof(ops_arg)); desc->args[0] = arg0; desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index; desc->args[1] = arg1; desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index; desc->args[2] = arg2; char *tmp = (char *)malloc(NUM_FIELDS * sizeof(int)); memcpy(tmp, arg2.data, NUM_FIELDS * sizeof(int)); desc->args[2].data = tmp; desc->function = ops_par_loop_update_halo_kernel5_plus_2_left_execute; if (OPS_diags > 1) { ops_timing_realloc(89, "update_halo_kernel5_plus_2_left"); } ops_enqueue_kernel(desc); } #endif
7c607931a55ca84cc60ea92ec45fda03fd3f332e.cu
// // auto-generated by ops.py // __constant__ int xdim0_update_halo_kernel5_plus_2_left; int xdim0_update_halo_kernel5_plus_2_left_h = -1; __constant__ int ydim0_update_halo_kernel5_plus_2_left; int ydim0_update_halo_kernel5_plus_2_left_h = -1; __constant__ int xdim1_update_halo_kernel5_plus_2_left; int xdim1_update_halo_kernel5_plus_2_left_h = -1; __constant__ int ydim1_update_halo_kernel5_plus_2_left; int ydim1_update_halo_kernel5_plus_2_left_h = -1; #undef OPS_ACC0 #undef OPS_ACC1 #define OPS_ACC0(x, y, z) \ (x + xdim0_update_halo_kernel5_plus_2_left * (y) + \ xdim0_update_halo_kernel5_plus_2_left * \ ydim0_update_halo_kernel5_plus_2_left * (z)) #define OPS_ACC1(x, y, z) \ (x + xdim1_update_halo_kernel5_plus_2_left * (y) + \ xdim1_update_halo_kernel5_plus_2_left * \ ydim1_update_halo_kernel5_plus_2_left * (z)) // user function __device__ inline void update_halo_kernel5_plus_2_left_gpu(double *vol_flux_z, double *mass_flux_z, const int *fields) { if (fields[FIELD_VOL_FLUX_Z] == 1) vol_flux_z[OPS_ACC0(0, 0, 0)] = (vol_flux_z[OPS_ACC0(2, 0, 0)]); if (fields[FIELD_MASS_FLUX_Z] == 1) mass_flux_z[OPS_ACC1(0, 0, 0)] = (mass_flux_z[OPS_ACC1(2, 0, 0)]); } #undef OPS_ACC0 #undef OPS_ACC1 __global__ void ops_update_halo_kernel5_plus_2_left(double *__restrict arg0, double *__restrict arg1, const int *__restrict arg2, int size0, int size1, int size2) { int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim0_update_halo_kernel5_plus_2_left + idx_z * 1 * 1 * xdim0_update_halo_kernel5_plus_2_left * ydim0_update_halo_kernel5_plus_2_left; arg1 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim1_update_halo_kernel5_plus_2_left + idx_z * 1 * 1 * xdim1_update_halo_kernel5_plus_2_left * ydim1_update_halo_kernel5_plus_2_left; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { update_halo_kernel5_plus_2_left_gpu(arg0, arg1, arg2); } } // host stub function #ifndef OPS_LAZY void ops_par_loop_update_halo_kernel5_plus_2_left(char const *name, ops_block block, int dim, int *range, ops_arg arg0, ops_arg arg1, ops_arg arg2) { #else void ops_par_loop_update_halo_kernel5_plus_2_left_execute( ops_kernel_descriptor *desc) { int dim = desc->dim; int *range = desc->range; ops_arg arg0 = desc->args[0]; ops_arg arg1 = desc->args[1]; ops_arg arg2 = desc->args[2]; #endif // Timing double t1, t2, c1, c2; ops_arg args[3] = {arg0, arg1, arg2}; #if CHECKPOINTING && !OPS_LAZY if (!ops_checkpointing_before(args, 3, range, 89)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(89, "update_halo_kernel5_plus_2_left"); OPS_kernels[89].count++; ops_timers_core(&c1, &t1); } // compute locally allocated range for the sub-block int start[3]; int end[3]; #if OPS_MPI && !OPS_LAZY sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for (int n = 0; n < 3; n++) { start[n] = sb->decomp_disp[n]; end[n] = sb->decomp_disp[n] + sb->decomp_size[n]; if (start[n] >= range[2 * n]) { start[n] = 0; } else { start[n] = range[2 * n] - start[n]; } if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0) start[n] = range[2 * n]; if (end[n] >= range[2 * n + 1]) { end[n] = range[2 * n + 1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n] == MPI_PROC_NULL && (range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n])) end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]); } #else for (int n = 0; n < 3; n++) { start[n] = range[2 * n]; end[n] = range[2 * n + 1]; } #endif int x_size = MAX(0, end[0] - start[0]); int y_size = MAX(0, end[1] - start[1]); int z_size = MAX(0, end[2] - start[2]); int xdim0 = args[0].dat->size[0]; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]; int ydim1 = args[1].dat->size[1]; if (xdim0 != xdim0_update_halo_kernel5_plus_2_left_h || ydim0 != ydim0_update_halo_kernel5_plus_2_left_h || xdim1 != xdim1_update_halo_kernel5_plus_2_left_h || ydim1 != ydim1_update_halo_kernel5_plus_2_left_h) { cudaMemcpyToSymbol(xdim0_update_halo_kernel5_plus_2_left, &xdim0, sizeof(int)); xdim0_update_halo_kernel5_plus_2_left_h = xdim0; cudaMemcpyToSymbol(ydim0_update_halo_kernel5_plus_2_left, &ydim0, sizeof(int)); ydim0_update_halo_kernel5_plus_2_left_h = ydim0; cudaMemcpyToSymbol(xdim1_update_halo_kernel5_plus_2_left, &xdim1, sizeof(int)); xdim1_update_halo_kernel5_plus_2_left_h = xdim1; cudaMemcpyToSymbol(ydim1_update_halo_kernel5_plus_2_left, &ydim1, sizeof(int)); ydim1_update_halo_kernel5_plus_2_left_h = ydim1; } int *arg2h = (int *)arg2.data; dim3 grid((x_size - 1) / OPS_block_size_x + 1, (y_size - 1) / OPS_block_size_y + 1, z_size); dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1); int consts_bytes = 0; consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int)); reallocConstArrays(consts_bytes); consts_bytes = 0; arg2.data = OPS_consts_h + consts_bytes; arg2.data_d = OPS_consts_d + consts_bytes; for (int d = 0; d < NUM_FIELDS; d++) ((int *)arg2.data)[d] = arg2h[d]; consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int)); mvConstArraysToDevice(consts_bytes); int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size); int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size); char *p_a[3]; // set up initial pointers int base0 = args[0].dat->base_offset + dat0 * 1 * (start[0] * args[0].stencil->stride[0]); base0 = base0 + dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1]); base0 = base0 + dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2]); p_a[0] = (char *)args[0].data_d + base0; int base1 = args[1].dat->base_offset + dat1 * 1 * (start[0] * args[1].stencil->stride[0]); base1 = base1 + dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1]); base1 = base1 + dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2]); p_a[1] = (char *)args[1].data_d + base1; #ifndef OPS_LAZY ops_H_D_exchanges_device(args, 3); ops_halo_exchanges(args, 3, range); #endif if (OPS_diags > 1) { ops_timers_core(&c2, &t2); OPS_kernels[89].mpi_time += t2 - t1; } // call kernel wrapper function, passing in pointers to data ops_update_halo_kernel5_plus_2_left<<<grid, tblock>>>( (double *)p_a[0], (double *)p_a[1], (int *)arg2.data_d, x_size, y_size, z_size); cutilSafeCall(cudaGetLastError()); if (OPS_diags > 1) { cutilSafeCall(cudaDeviceSynchronize()); ops_timers_core(&c1, &t1); OPS_kernels[89].time += t1 - t2; } #ifndef OPS_LAZY ops_set_dirtybit_device(args, 3); ops_set_halo_dirtybit3(&args[0], range); ops_set_halo_dirtybit3(&args[1], range); #endif if (OPS_diags > 1) { // Update kernel record ops_timers_core(&c2, &t2); OPS_kernels[89].mpi_time += t2 - t1; OPS_kernels[89].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[89].transfer += ops_compute_transfer(dim, start, end, &arg1); } } #ifdef OPS_LAZY void ops_par_loop_update_halo_kernel5_plus_2_left(char const *name, ops_block block, int dim, int *range, ops_arg arg0, ops_arg arg1, ops_arg arg2) { ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor)); desc->name = name; desc->block = block; desc->dim = dim; desc->device = 1; desc->index = 89; desc->hash = 5381; desc->hash = ((desc->hash << 5) + desc->hash) + 89; for (int i = 0; i < 6; i++) { desc->range[i] = range[i]; desc->orig_range[i] = range[i]; desc->hash = ((desc->hash << 5) + desc->hash) + range[i]; } desc->nargs = 3; desc->args = (ops_arg *)malloc(3 * sizeof(ops_arg)); desc->args[0] = arg0; desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index; desc->args[1] = arg1; desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index; desc->args[2] = arg2; char *tmp = (char *)malloc(NUM_FIELDS * sizeof(int)); memcpy(tmp, arg2.data, NUM_FIELDS * sizeof(int)); desc->args[2].data = tmp; desc->function = ops_par_loop_update_halo_kernel5_plus_2_left_execute; if (OPS_diags > 1) { ops_timing_realloc(89, "update_halo_kernel5_plus_2_left"); } ops_enqueue_kernel(desc); } #endif
d7a6b04a6e195b0e15a3ade0436df6babd130899.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <complex> #include "multiplier.h" #include "multiplier_three_cavity.h" #include "multiplier_multimode.h" #include "cu_mult.h" //#include "print.h" #define PAR ParamsM #ifndef dm_Pi __device__ const double dm_Pi = 3.141592653589793; #endif int Nz = 0; double Lsolver = 0; double MultiplierGroupSpeedCoefficient = 0; __device__ double grSpeedCoeff; int *d_Nz; double *d_Lsolver; //hipPitchedPtr d2_rJ3, d2_iJ3, d2_int_rJ3, d2_int_iJ3, d2_W; //hipPitchedPtr d1_rJ3, d1_iJ3, d1_int_rJ3, d1_int_iJ3, d1_W; __device__ void biReduce(double *A, double *B, int p0, int datasize, int logsize) { int stride = datasize; for (int q = 1; q <= logsize; q++) { stride = stride >> 1; if (p0 < stride) { A[p0] += A[p0 + stride]; } else { if (p0 < 2 * stride) { B[p0 - stride] += B[p0]; } } __syncthreads(); } } __device__ double dh( double delta) { return delta*delta*(grSpeedCoeff); } __device__ void funcA (double z, double *rA, double *iA, double2 *Amps, int Na) { *rA = 0; *iA = 0; double rF, iF; for(int i = 0; i < Na; i++) { sincos(z*double(i - Na/2), &iF, &rF); *rA += Amps[i].x*rF - Amps[i].y*iF; *iA += Amps[i].x*iF + Amps[i].y*rF; } } __global__ void __launch_bounds__ (512, 2) MotionEquationMultiplier(PAR *par, double Lstop, int Nharm, double A, double2 B)//Fixed Structure { unsigned int p0 = threadIdx.x; unsigned int Np = blockDim.x; unsigned int q0 = threadIdx.y; unsigned int Nq = blockDim.y; unsigned int s0 = threadIdx.z; unsigned int Ns = blockDim.z; unsigned int q_init = Nq*blockIdx.x + q0; unsigned int Nq_max = Nq*gridDim.x; unsigned int s_init = Ns*blockIdx.y + s0; unsigned int Ns_max = Ns*gridDim.y; unsigned int v_init = blockIdx.z; unsigned int Nv_max = gridDim.z; int warpsize = Np*Nq*Ns; int log2warpsize = round(log2((double)warpsize)); int X = blockIdx.x + gridDim.x*blockIdx.y + gridDim.y*gridDim.x*blockIdx.z; double la, lb, ld, h, k1, voltage, g1, g3; __shared__ double avEN, int_rJ3, int_iJ3; int N; double dz; N = par->Nz; la = par->la; lb = par->lb; ld = par->ld; h = par->h; k1 = par->k1; g1 = par->g1; g3 = par->g3; voltage = par->voltage; double *rJ3 = par->rJ3; double *iJ3 = par->iJ3; dz = par->L/(double)N; double z; int ifinal = floor(Lstop/dz); double Q, Qk1, Qk2, Qk3, Qk4; double W, Wk1, Wk2, Wk3, Wk4; double fA, fB, rA, r; double Wmax, Wmin; double ifnotdestroyed = 1; double R_cyclotron, R_center, kappa_cyclotron, phase_cyclotron, initial_angle, angle_spread_factor; double wall = par->wall; R_center = 0.5*wall + wall*((double)q_init-0.5*(double)Nq_max)/(double)(Nq_max); initial_angle = 0;//(0.0810194 - 2.05972*R_center + 28.0433*R_center*R_center); R_cyclotron = (0.568*initial_angle + 0*0.035156*((double)v_init)/double(Nv_max)); kappa_cyclotron = 1.758; phase_cyclotron = 2.*dm_Pi*(double)s_init/(double)Ns_max; /*R_center = 0.5*wall + wall*((double)q_init-0.5*(double)Nq_max)/(double)(Nq_max); initial_angle = 0.087*((double)v_init - 0.5*Nv_max)/double(Nv_max); R_cyclotron = 0.744*initial_angle;// f = 86.6 . 0,568; f = 95.5: 0.744 kappa_cyclotron = 1.344; // f = 86.6 . 1.758; f = 95.5: 1.344 phase_cyclotron = 2.*dm_Pi*(double)s_init/(double)Ns_max;*/ double en0 = 1. + voltage/511.; angle_spread_factor = 1./sqrt(1. + initial_angle*initial_angle); // wall. - initial_angle Q = 2.*dm_Pi/double(Np)*double(p0); W = 0; __shared__ double sh_sinQ[NS*NQ*NP]; __shared__ double sh_cosQ[NS*NQ*NP]; /* __shared__ double shQ[NS][NQ][NP]; __shared__ double shW[NS][NQ][NP]; __shared__ double d2_rJ3[NQ][NP]; __shared__ double d2_iJ3[NQ][NP]; __shared__ double d1_rJ3[NP]; __shared__ double d1_iJ3[NP];*/ double PH, EN, cosPH, sinPH, cosPS, sinPS, rB, iB; double H = h;//+dh(delta); if(p0+q_init+s_init + v_init == 0) { rJ3[0] = 0; iJ3[0] = 0; } if(p0+q0+s0== 0) { par->int_rJ3[gridDim.x*gridDim.y*blockIdx.z + gridDim.x*blockIdx.y+blockIdx.x] = 0; par->int_iJ3[gridDim.x*gridDim.y*blockIdx.z + gridDim.x*blockIdx.y+blockIdx.x] = 0; int_rJ3 = 0; int_iJ3 = 0; avEN = 0; } // if(s0+p0+q0 == 0) printf("la = %g, ld = %g, lb = %g \n", la, ld, lb); int i = 0; for(i = 1; i < N; i++) { ///////////////// z = (double)i*dz; r = (R_center + R_cyclotron*cos(kappa_cyclotron*z + phase_cyclotron)); ifnotdestroyed *= (r > -wall)? 1. : 0.; ///!!!!!! // ifnotdestroyed = 1; PH = Q; EN = W + en0; // if((s0+p0+q0 == 0)) printf("%g\t%g\n", z, fB); fA = ((z<la)?sin(dm_Pi/la*z)* exp(-g1*r):0); fB = (((z>la+ld)&&(z < la+ld+lb))?sin(dm_Pi/lb*(z-la-ld))*exp(-g3*r):0); rA = A*fA; rB = B.x*fB; iB = B.y*fB; sincos(PH, &sinPH, &cosPH); sincos(3.*PH, &sinPS, &cosPS); Qk1 = dz*(H - k1*EN*angle_spread_factor/sqrt(EN*EN-1.)); Wk1 = -dz*((rA*cosPH)+(rB*cosPS-iB*sinPS))*ifnotdestroyed; ///////////////// z = ((double)i+0.5)*dz; r = (R_center + R_cyclotron*cos(kappa_cyclotron*z + phase_cyclotron)); // ifnotdestroyed *= (r > -wall)? 1. : 0.; // ifnotdestroyed = 1; PH = Q + 0.5*Qk1; EN = W + 0.5*Wk1 + en0; fA = ((z<la)?sin(dm_Pi/la*z) * exp(-g1*r):0); fB = ((z>la+ld)&&(z < la+ld+lb))?sin(dm_Pi/lb*(z-la-ld))*exp(-g3*r):0; rA = A*fA; rB = B.x*fB; iB = B.y*fB; sincos(PH, &sinPH, &cosPH); sincos(3.*PH, &sinPS, &cosPS); Qk2 = dz*(H - k1*EN*angle_spread_factor/sqrt(EN*EN-1.)); Wk2 = -dz*((rA*cosPH)+(rB*cosPS-iB*sinPS))*ifnotdestroyed; ///////////////// PH = Q + 0.5*Qk2; EN = W + 0.5*Wk2 + en0; sincos(PH, &sinPH, &cosPH); sincos(3.*PH, &sinPS, &cosPS); Qk3 = dz*(H - k1*EN*angle_spread_factor/sqrt(EN*EN-1.)); Wk3 = -dz*((rA*cosPH)+(rB*cosPS-iB*sinPS))*ifnotdestroyed; ///////////////// z = ((double)i+1.)*dz; r = (R_center + R_cyclotron*cos(kappa_cyclotron*z + phase_cyclotron)); // ifnotdestroyed *= (r > -wall)? 1. : 0.; // ifnotdestroyed = 1; PH = Q + Qk3; EN = W + Wk3 + en0; fA = ((z<la)? sin(dm_Pi/la*z)* exp(-g1*r):0); fB = (((z>la+ld)&&(z < la+ld+lb))?sin(dm_Pi/lb*(z-la-ld))*exp(-g3*r):0); rA = A*fA; rB = B.x*fB; iB = B.y*fB; sincos(PH, &sinPH, &cosPH); sincos(3.*PH, &sinPS, &cosPS); Qk4= dz*(H - k1*EN*angle_spread_factor/sqrt(EN*EN-1.)); Wk4= -dz*((rA*cosPH)+(rB*cosPS-iB*sinPS))*ifnotdestroyed; /////////////// Q += 1./6.*(Qk1+2.*Qk2+2.*Qk3+Qk4); W += 1./6.*(Wk1+2.*Wk2+2.*Wk3+Wk4); /* shQ[s0][q0][p0] = Q; shW[s0][q0][p0] = W;*/ __syncthreads(); sincos(double(Nharm)*Q, &sinPH, &cosPH); if(Nharm == 1) fB = ((z<la)?sin(dm_Pi/la*z):0)*exp(-g1*r); //fB !! fB q0, s0 else fB = (((z>la+ld)&&(z < la+ld+lb))?sin(dm_Pi/lb*(z-la-ld)):0)*exp(-g3*r); fB *= ifnotdestroyed; int xi = p0 + Np*q0 + Np*Nq*s0; sh_sinQ[xi] = sinPH*fB; sh_cosQ[xi] = cosPH*fB; unsigned int stride = warpsize; __syncthreads(); for(int q = 1; q <= log2warpsize; q++) { stride = stride >> 1;//roundf(powf(2., q)); if(xi < stride) { sh_sinQ[xi] += sh_sinQ[xi + stride]; } else { if(xi < 2*stride) { sh_cosQ[xi - stride] += sh_cosQ[xi]; } } __syncthreads(); } // if((i == 1300)) printf("%g\n", Q); if(xi == 0) { rJ3[X*N+i] = sh_cosQ[0]; iJ3[X*N+i] = -sh_sinQ[0]; // if((i == 1300)) printf("\n%g\n", sh_cosQ[0]); int_rJ3 += sh_cosQ[0]; int_iJ3 += -sh_sinQ[0]; } /* //////// Nharm if(s0 == 0) { double tmp_rJ3 = 0, tmp_iJ3 = 0, tmpPhCycl = 0; for(int ii = 0; ii < Ns; ii++) { int ii_init = Ns*blockIdx.y + ii; tmpPhCycl = 2.*dm_Pi*(double)ii_init/(double)Ns_max; r = (R_center + R_cyclotron*cos(kappa_cyclotron*z + tmpPhCycl)); if(Nharm == 1) fB = ((z<la)?sin(dm_Pi/la*z):0)*exp(-g1*r); //fB !! fB q0, s0 else fB = (((z>la+ld)&&(z < la+ld+lb))?sin(dm_Pi/lb*(z-la-ld)):0)*exp(-g3*r); fB *= ifnotdestroyed; PH = shQ[ii][q0][p0]; sincos((double)Nharm*PH, &sinPS, &cosPS); tmp_rJ3 += cosPS*fB; tmp_iJ3 -= sinPS*fB; } d2_rJ3[q0][p0] = tmp_rJ3; d2_iJ3[q0][p0] = tmp_iJ3; } __threadfence(); __syncthreads(); if(s0 + q0 == 0) { double tmp_rJ3 = 0, tmp_iJ3 = 0; for(int ii = 0; ii < Nq; ii++) { tmp_rJ3 += d2_rJ3[ii][p0]; tmp_iJ3 += d2_iJ3[ii][p0]; } d1_rJ3[p0] = tmp_rJ3; d1_iJ3[p0] = tmp_iJ3; } __threadfence(); __syncthreads(); if(p0 + q0 +s0 == 0) { double tmp_rJ3 = 0, tmp_iJ3 = 0; for(int ii = 0; ii < Np; ii++) { tmp_rJ3 += d1_rJ3[ii]; tmp_iJ3 += d1_iJ3[ii]; } rJ3[i] = tmp_rJ3; iJ3[i] = tmp_iJ3; int_rJ3 += tmp_rJ3; int_iJ3 += tmp_iJ3; // if(i == ifinal) printf("<< %g %g\n", A*int_rJ3, A*tmp_rJ3); } __threadfence(); __syncthreads(); //////////////////// Nharm */ if(i == ifinal) { sh_sinQ[xi] = W; __syncthreads(); stride = warpsize; for(int q = 1; q <= log2warpsize; q++) { stride = stride >> 1;//warpsize/roundf(powf(2., q)); if(xi < stride) { sh_sinQ[xi] += sh_sinQ[xi + stride]; } __syncthreads(); } if(xi == 0) { avEN = sh_sinQ[0]; } __syncthreads(); sh_sinQ[xi] = W; sh_cosQ[xi] = W; stride = warpsize; for(int q = 1; q <= log2warpsize; q++) { stride = stride >> 1;// stride = warpsize/roundf(powf(2., q)); if(xi < stride) { sh_sinQ[xi] = (sh_sinQ[xi] > sh_sinQ[xi + stride]) ? sh_sinQ[xi] : sh_sinQ[xi + stride]; } else { if(xi < 2*stride) { sh_cosQ[xi - stride] = (sh_cosQ[xi - stride] < sh_cosQ[xi]) ? sh_cosQ[xi - stride] : sh_cosQ[xi]; } } __syncthreads(); } if(xi == 0) { Wmax = sh_sinQ[0]; Wmin = sh_cosQ[0]; } /* if(s0 == 0) { double tmp_W = 0; for(int ii = 0; ii < Ns; ii++) { EN = shW[ii][q0][p0]; tmp_W += EN; } d2_rJ3[q0][p0] = tmp_W; // if((p0 == 0)) printf(" %g >>, \n", d2_rJ3[q0][p0]); } __threadfence(); __syncthreads(); if(s0 + q0 == 0) { double tmp_rJ3 = 0; for(int ii = 0; ii < Nq; ii++) tmp_rJ3 += d2_rJ3[ii][p0]; d1_rJ3[p0] = tmp_rJ3; } __threadfence(); __syncthreads(); if(p0 + q0 +s0 == 0) { double tmp_rJ3 = 0; for(int ii = 0; ii < Np; ii++) tmp_rJ3 += d1_rJ3[ii]; (avEN) += tmp_rJ3; } */ __syncthreads(); } __threadfence(); __syncthreads(); if(i > ifinal) break; } // printf("END\t"); // if(p0 + s0 == 0) printf("(%i, %i, %i)...<%g, %g> =?= <%g>...\n", blockIdx.x, blockIdx.y, blockIdx.z,A*int_rJ3*dz, A*int_iJ3*dz, avEN); __syncthreads(); // if(p0+q_init+s_init + v_init == 0) if(p0+q0+s0 == 0) { /* printf("%i, %i, %i\t (%g, %g)\n", blockIdx.x, blockIdx.y, blockIdx.z, int_rJ3/double(Np*Nq_max*Ns_max*N)*(par->L), int_iJ3/double(Np*Nq_max*Ns_max*N)*(par->L)) ;*/ par->avEN[gridDim.x*gridDim.y*blockIdx.z + gridDim.x*blockIdx.y+blockIdx.x] = avEN; par->int_rJ3[gridDim.x*gridDim.y*blockIdx.z + gridDim.x*blockIdx.y+blockIdx.x] = int_rJ3; par->int_iJ3[gridDim.x*gridDim.y*blockIdx.z + gridDim.x*blockIdx.y+blockIdx.x] = int_iJ3; par->Wmax[gridDim.x*gridDim.y*blockIdx.z + gridDim.x*blockIdx.y+blockIdx.x] = Wmax; par->Wmin[gridDim.x*gridDim.y*blockIdx.z + gridDim.x*blockIdx.y+blockIdx.x] = Wmin; } } __global__ void __launch_bounds__ (512, 2) MotionEquationMultiplierDoubleScheme(PAR par, double Lstop, int Nharm, double A, double2 A2, double2 B)//Fixed Structure { unsigned int p0 = threadIdx.x; unsigned int q0 = threadIdx.y; unsigned int s0 = threadIdx.z; unsigned int Np = blockDim.x; unsigned int Nq = blockDim.y; unsigned int Ns = blockDim.z; unsigned int q_init = Nq*blockIdx.x + q0; unsigned int s_init = Ns*blockIdx.y + s0; unsigned int v_init = blockIdx.z; unsigned int Nq_max = Nq*gridDim.x; unsigned int Ns_max = Ns*gridDim.y; unsigned int Nv_max = gridDim.z; // printf("Thread %i/%i, %i/%i started; ", q0, Nq, p0, Np); double la1, la2, lb, ld1, ld2, h, k1, voltage, g1, g3; double fA2, rA2, iA2; __shared__ double avEN, int_rJ3, int_iJ3; // printf("Step0; "); int N; double dz; N = par.Nz; la1 = par.la1; la2 = par.la2; lb = par.lb; ld1 = par.ld1; ld2 = par.ld2; h = par.h; k1 = par.k1; g1 = par.g1; g3 = par.g3; voltage = par.voltage; double la_tot = la1 + la2 + ld1; double ifnotdestroyed = 1; double *rJ3 = par.rJ3; double *iJ3 = par.iJ3; // double *int_rJ3 = par.int_rJ3; // double *int_iJ3 = par.int_iJ3; dz = par.L/(double)N; double z; int ifinal = floor(Lstop/dz); double Q, Qk1, Qk2, Qk3, Qk4; double W, Wk1, Wk2, Wk3, Wk4; double fB; double R_cyclotron, R_center, kappa_cyclotron, phase_cyclotron, initial_angle; double wall = par.wall, r; R_center = 0.5*wall + wall*((double)q_init-0.5*(double)Nq_max)/(double)(Nq_max); initial_angle = (0.0810194 - 2.05972*R_center + 28.0433*R_center*R_center); R_cyclotron = 0.568*initial_angle + 0.035156*((double)v_init)/double(Nv_max); kappa_cyclotron = 1.758; phase_cyclotron = 2.*dm_Pi*(double)s_init/(double)Ns_max; double en0 = 1. + voltage/511.; en0 -= 0.5*initial_angle*initial_angle*(en0*en0 - 1)*en0; Q = 2.*dm_Pi/double(Np)*double(p0);// + 1./(double)Nq*((double)q0 + (double)s0/(double)Ns)); W = 0; __shared__ double shQ[NS][NQ][NP]; __shared__ double shW[NS][NQ][NP]; __shared__ double d2_rJ3[NQ][NP]; __shared__ double d2_iJ3[NQ][NP]; __shared__ double d1_rJ3[NP]; __shared__ double d1_iJ3[NP]; double PH, EN, cosPH, sinPH, cosPS, sinPS, rA, rB, iB; // printf("Step3; "); double H = h;//+dh(delta); if(p0+q_init+s_init + v_init == 0) { rJ3[0] = 0; iJ3[0] = 0; // printf("init:\t%i\t%i\t%i\t%i...........\n",p0, q_init,s_init,v_init); } if(p0+q0+s0== 0) { par.int_rJ3[gridDim.x*gridDim.y*blockIdx.z + gridDim.x*blockIdx.y+blockIdx.x] = 0; par.int_iJ3[gridDim.x*gridDim.y*blockIdx.z + gridDim.x*blockIdx.y+blockIdx.x] = 0; int_rJ3 = 0; int_iJ3 = 0; avEN = 0; } int i = 0; for(i = 1; i < N; i++) { ///////////////// z = (double)i*dz; r = (R_center + R_cyclotron*cos(kappa_cyclotron*z + phase_cyclotron)); ifnotdestroyed *= (r > -wall)? 1. : 0.; PH = Q; EN = W + en0; rA = A *((z<la1) ? sin(dm_Pi/la1*z) *exp(-g1*r):0); fA2= ( ((la1+ld1<z)&&(z<la_tot))? sin(dm_Pi/la2*(z-la1 - ld1)) *exp(-g1*r):0); rA2 = A2.x*fA2; iA2 = A2.y*fA2; fB = ( ((z> la_tot+ld2)&&(z < la_tot+ld2+lb))?sin(dm_Pi/lb*(z-la_tot-ld2)) *exp(-g3*r):0); rB = B.x*fB; iB = B.y*fB; sincos(PH, &sinPH, &cosPH); sincos(3.*PH, &sinPS, &cosPS); Qk1 = dz*(H - k1*EN/sqrt(EN*EN-1.)); Wk1 = -dz*((rA*cosPH)+(rA2*cosPH-iA2*sinPH)+(rB*cosPS-iB*sinPS))*ifnotdestroyed; ///////////////// z = ((double)i+0.5)*dz; r = (R_center + R_cyclotron*cos(kappa_cyclotron*z + phase_cyclotron)); ifnotdestroyed *= (r > -wall)? 1. : 0.; PH = Q + 0.5*Qk1; EN = W + 0.5*Wk1 + en0; rA = A *((z<la1) ? sin(dm_Pi/la1*z) *exp(-g1*r):0); fA2= ( ((la1+ld1<z)&&(z<la_tot))? sin(dm_Pi/la2*(z-la1 - ld1)) *exp(-g1*r):0); rA2 = A2.x*fA2; iA2 = A2.y*fA2; fB = ( ((z> la_tot+ld2)&&(z < la_tot+ld2+lb))?sin(dm_Pi/lb*(z-la_tot-ld2)) *exp(-g3*r):0); rB = B.x*fB; iB = B.y*fB; sincos(PH, &sinPH, &cosPH); sincos(3.*PH, &sinPS, &cosPS); Qk2 = dz*(H - k1*EN/sqrt(EN*EN-1.)); Wk2 = -dz*((rA*cosPH)+(rA2*cosPH-iA2*sinPH)+(rB*cosPS-iB*sinPS))*ifnotdestroyed; ///////////////// PH = Q + 0.5*Qk2; EN = W + 0.5*Wk2 + en0; sincos(PH, &sinPH, &cosPH); sincos(3.*PH, &sinPS, &cosPS); Qk3 = dz*(H - k1*EN/sqrt(EN*EN-1.)); Wk3 = -dz*((rA*cosPH)+(rA2*cosPH-iA2*sinPH)+(rB*cosPS-iB*sinPS))*ifnotdestroyed; ///////////////// z = ((double)i+1)*dz; r = (R_center + R_cyclotron*cos(kappa_cyclotron*z + phase_cyclotron)); ifnotdestroyed *= (r > -wall)? 1. : 0.; PH = Q + Qk3; EN = W + Wk3 + en0; rA = A *((z<la1) ? sin(dm_Pi/la1*z) *exp(-g1*r):0); fA2= ( ((la1+ld1<z)&&(z<la_tot))? sin(dm_Pi/la2*(z-la1 - ld1)) *exp(-g1*r):0); rA2 = A2.x*fA2; iA2 = A2.y*fA2; fB = ( ((z> la_tot+ld2)&&(z < la_tot+ld2+lb))?sin(dm_Pi/lb*(z-la_tot-ld2)) *exp(-g3*r):0); rB = B.x*fB; iB = B.y*fB; sincos(PH, &sinPH, &cosPH); sincos(3.*PH, &sinPS, &cosPS); Qk4= dz*(H - k1*EN/sqrt(EN*EN-1.)); Wk4= -dz*((rA*cosPH)+(rA2*cosPH-iA2*sinPH)+(rB*cosPS-iB*sinPS))*ifnotdestroyed; /////////////// Q += 1./6.*(Qk1+2.*Qk2+2.*Qk3+Qk4); W += 1./6.*(Wk1+2.*Wk2+2.*Wk3+Wk4); // printf("#< %i -> (%i, %i, %i)>\t", i, s0, q0, p0); // printf("#<%i>", Np_Q*Nq*(Ns*i+s0) + Np_Q*q0 + p0); // if(q0+p0+s0 == 0) printf("%i", i); shQ[s0][q0][p0] = Q; shW[s0][q0][p0] = W; __threadfence(); __syncthreads(); //////// - if(s0 == 0) { double tmp_rJ3 = 0, tmp_iJ3 = 0; for(int ii = 0; ii < Ns; ii++) { PH = shQ[ii][q0][p0]; sincos((double)Nharm*PH, &sinPS, &cosPS); tmp_rJ3 += cosPS; tmp_iJ3 -= sinPS; } d2_rJ3[q0][p0] = tmp_rJ3; d2_iJ3[q0][p0] = tmp_iJ3; } __threadfence(); __syncthreads(); if(s0 + q0 == 0) { double tmp_rJ3 = 0, tmp_iJ3 = 0; for(int ii = 0; ii < Nq; ii++) { tmp_rJ3 += d2_rJ3[ii][p0]; tmp_iJ3 += d2_iJ3[ii][p0]; } d1_rJ3[p0] = tmp_rJ3; d1_iJ3[p0] = tmp_iJ3; } __threadfence(); __syncthreads(); if(p0 + q0 +s0 == 0) { double tmp_rJ3 = 0, tmp_iJ3 = 0; for(int ii = 0; ii < Np; ii++) { tmp_rJ3 += d1_rJ3[ii]; tmp_iJ3 += d1_iJ3[ii]; } rJ3[i] = tmp_rJ3; iJ3[i] = tmp_iJ3; int_rJ3 += tmp_rJ3*((Nharm == 3)?fB:fA2); int_iJ3 += tmp_iJ3*((Nharm == 3)?fB:fA2); } __threadfence(); __syncthreads(); //////////////////// - // if((q0+p0 == 0)&&(s0 == 0)) printf("%i\t%g\t%g\n", q0, PH, EN); // if(q0+p0+s0 == 0) printf("....%i\t", i); /////////////////////// if(i == ifinal) { if(s0 == 0) { double tmp_W = 0; for(int ii = 0; ii < Ns; ii++) { EN = shW[ii][q0][p0]; tmp_W += EN; } d2_rJ3[q0][p0] = W; } __threadfence(); __syncthreads(); if(s0 + q0 == 0) { double tmp_rJ3 = 0; for(int ii = 0; ii < Nq; ii++) tmp_rJ3 += d2_rJ3[ii][p0]; d1_rJ3[p0] = tmp_rJ3; } __threadfence(); __syncthreads(); if(p0 + q0 +s0 == 0) { double tmp_rJ3 = 0; for(int ii = 0; ii < Np; ii++) tmp_rJ3 += d1_rJ3[ii]; (avEN) += tmp_rJ3; } } ///////////////// __threadfence(); __syncthreads(); if(i > ifinal) break; } __syncthreads(); if(p0+q0+s0 == 0) { *par.avEN = avEN; par.int_rJ3[gridDim.x*gridDim.y*blockIdx.z + gridDim.x*blockIdx.y+blockIdx.x] = int_rJ3; par.int_iJ3[gridDim.x*gridDim.y*blockIdx.z + gridDim.x*blockIdx.y+blockIdx.x] = int_iJ3; } } __global__ void __launch_bounds__ (512, 2) MotionEquationMultiplierMultiModes(PAR par, double Lstop, int Nharm, int Na, double2 B)//Fixed Structure { unsigned int p0 = threadIdx.x; unsigned int q0 = threadIdx.y; unsigned int s0 = threadIdx.z; unsigned int Np = blockDim.x; unsigned int Nq = blockDim.y; unsigned int Ns = blockDim.z; unsigned int q_init = Nq*blockIdx.x + q0; unsigned int s_init = Ns*blockIdx.y + s0; unsigned int v_init = blockIdx.z; unsigned int Nq_max = Nq*gridDim.x; unsigned int Ns_max = Ns*gridDim.y; // unsigned int Nv_max = gridDim.z; int warpsize = Np*Nq*Ns; int log2warpsize = round(log2((double)warpsize)); double la, lb, ld, h, k1, voltage, g1, g3; double rA1, iA1; __shared__ double avEN, int_rJ3, int_iJ3, int_rJ3_1, int_iJ3_1; int N; double dz; N = par.Nz; la = par.la1; lb = par.lb; ld = par.ld; h = par.h; k1 = par.k1; g1 = par.g1; g3 = par.g3; voltage = par.voltage; double ifnotdestroyed = 1; double *rJ3 = par.rJ3; double *iJ3 = par.iJ3; double2 *Amps = (double2 *)par.Amps; // double *int_rJ3 = par.int_rJ3; // double *int_iJ3 = par.int_iJ3; dz = par.L/(double)N; double z; int ifinal = floor(Lstop/dz); double Q, Qk1, Qk2, Qk3, Qk4; double W, Wk1, Wk2, Wk3, Wk4; double fB; double R_cyclotron, R_center, kappa_cyclotron, phase_cyclotron, initial_angle; double wall = par.wall, r; R_center = 0.5*wall + wall*((double)q_init-0.5*(double)Nq_max)/(double)(Nq_max); initial_angle = 0;//(0.0810194 - 2.05972*R_center + 28.0433*R_center*R_center); R_cyclotron = 0;//0.568*initial_angle + 0.035156*((double)v_init)/double(Nv_max); kappa_cyclotron = 1.758; phase_cyclotron = 2.*dm_Pi*(double)s_init/(double)Ns_max; double en0 = 1. + voltage/511.; en0 -= 0.5*initial_angle*initial_angle*(en0*en0 - 1)*en0; double beta0 = sqrt(en0*en0 - 1)/en0; // double Delta = k1*dm_Pi/(la*beta0) ;// \delta f / f = (k_0 \pi /L)/beta_ph Q = 2.*dm_Pi/double(Np)*double(p0);// + 1./(double)Nq*((double)q0 + (double)s0/(double)Ns)); W = 0; __shared__ double2 shAmps[NP]; __shared__ double sh_sinQ[NS*NQ*NP]; __shared__ double sh_cosQ[NS*NQ*NP]; double PH, EN, cosPH, sinPH, cosPS, sinPS, rB, iB; double H = h;//+dh(delta); if(p0+q_init+s_init + v_init == 0) { rJ3[0] = 0; iJ3[0] = 0; } if(p0+q0+s0== 0) { par.int_rJ3[gridDim.x*gridDim.y*blockIdx.z + gridDim.x*blockIdx.y+blockIdx.x] = 0; par.int_iJ3[gridDim.x*gridDim.y*blockIdx.z + gridDim.x*blockIdx.y+blockIdx.x] = 0; par.int_rJ3_1[gridDim.x*gridDim.y*blockIdx.z + gridDim.x*blockIdx.y+blockIdx.x] = 0; par.int_iJ3_1[gridDim.x*gridDim.y*blockIdx.z + gridDim.x*blockIdx.y+blockIdx.x] = 0; int_rJ3 = 0; int_iJ3 = 0; int_rJ3_1 = 0; int_iJ3_1 = 0; avEN = 0; } if((q0 + s0 == 0)&&(p0 < Na)) { shAmps[p0] = Amps[p0]; } __syncthreads(); int i = 0; for(i = 1; i < N; i++) { ///////////////// z = (double)i*dz; r = (R_center + R_cyclotron*cos(kappa_cyclotron*z + phase_cyclotron)); ifnotdestroyed *= 1;//(r > -wall)? 1. : 0.; PH = Q; EN = W + en0; funcA(dm_Pi/la*z, &rA1, &iA1, shAmps, Na); if(z > la) {rA1 =0; iA1 = 0;} rA1 *= exp(-g1*r); iA1 *= exp(-g1*r); fB = ( ((z> la+ld)&&(z < la+ld+lb))?sin(dm_Pi/lb*(z-la-ld)) *exp(-g3*r):0); rB = 0;//B.x*fB; iB = 0;//B.y*fB; sincos(PH, &sinPH, &cosPH); sincos(3.*PH, &sinPS, &cosPS); Qk1 = dz*(H - k1*EN/sqrt(EN*EN-1.)); Wk1 = -dz*((rA1*cosPH - iA1*sinPH)+(rB*cosPS-iB*sinPS))*ifnotdestroyed; // if(s0 + p0 + q0 == 0 && (i == 1)) printf("%g,%g,%g,%g\n", r, g1, Qk1, Wk1); ///////////////// z = ((double)i+0.5)*dz; r = (R_center + R_cyclotron*cos(kappa_cyclotron*z + phase_cyclotron)); // ifnotdestroyed *= (r > -wall)? 1. : 0.; PH = Q + 0.5*Qk1; EN = W + 0.5*Wk1 + en0; if(z > la) {rA1 =0; iA1 = 0;} fB = ( ((z> la+ld)&&(z < la+ld+lb))?sin(dm_Pi/lb*(z-la-ld)) *exp(-g3*r):0); rB = 0;//B.x*fB; iB = 0;//B.y*fB; sincos(PH, &sinPH, &cosPH); sincos(3.*PH, &sinPS, &cosPS); Qk2 = dz*(H - k1*EN/sqrt(EN*EN-1.)); Wk2 = -dz*((rA1*cosPH - iA1*sinPH)+(rB*cosPS-iB*sinPS))*ifnotdestroyed; ///////////////// PH = Q + 0.5*Qk2; EN = W + 0.5*Wk2 + en0; sincos(PH, &sinPH, &cosPH); sincos(3.*PH, &sinPS, &cosPS); Qk3 = dz*(H - k1*EN/sqrt(EN*EN-1.)); Wk3 = -dz*((rA1*cosPH - iA1*sinPH)+(rB*cosPS-iB*sinPS))*ifnotdestroyed; ///////////////// z = ((double)i+1)*dz; r = (R_center + R_cyclotron*cos(kappa_cyclotron*z + phase_cyclotron)); // ifnotdestroyed *= (r > -wall)? 1. : 0.; PH = Q + Qk3; EN = W + Wk3 + en0; if(z > la) {rA1 =0; iA1 = 0;} fB = ( ((z> la+ld)&&(z < la+ld+lb))?sin(dm_Pi/lb*(z-la-ld)) *exp(-g3*r):0); rB = 0;//B.x*fB; iB = 0;//B.y*fB; sincos(PH, &sinPH, &cosPH); sincos(3.*PH, &sinPS, &cosPS); Qk4= dz*(H - k1*EN/sqrt(EN*EN-1.)); Wk4= -dz*((rA1*cosPH - iA1*sinPH)+(rB*cosPS-iB*sinPS))*ifnotdestroyed; /////////////// Q += 1./6.*(Qk1+2.*Qk2+2.*Qk3+Qk4); W += 1./6.*(Wk1+2.*Wk2+2.*Wk3+Wk4); __syncthreads(); sincos(double(Nharm)*Q, &sinPH, &cosPH); if(Nharm == 1) fB = exp(-g1*r); //fB !! fB q0, s0 else fB = (((z>la+ld)&&(z < la+ld+lb))?sin(dm_Pi/lb*(z-la-ld)):0)*exp(-g3*r); fB *= ifnotdestroyed; int xi = p0 + Np*q0 + Np*Nq*s0; int X = blockIdx.x + gridDim.x*blockIdx.y + gridDim.y*gridDim.x*blockIdx.z; sh_sinQ[xi] = sinPH*fB; sh_cosQ[xi] = cosPH*fB; __syncthreads(); biReduce(sh_sinQ, sh_cosQ, xi, warpsize, log2warpsize); if(xi == 0) { rJ3[X*N+i] = sh_cosQ[0]; iJ3[X*N+i] = -sh_sinQ[0]; int_rJ3 += sh_cosQ[0]; int_iJ3 += -sh_sinQ[0]; } /////////////////////// if(i == ifinal) { sh_sinQ[xi] = W; __syncthreads(); biReduce(sh_sinQ, sh_cosQ, xi, warpsize, log2warpsize); if(xi == 0) { avEN = sh_sinQ[0]; } __syncthreads(); } ///////////////// __threadfence(); __syncthreads(); if(i > ifinal) break; } __syncthreads(); if(p0+q0+s0 == 0) { *par.avEN = avEN; par.int_rJ3[gridDim.x*gridDim.y*blockIdx.z + gridDim.x*blockIdx.y+blockIdx.x] = int_rJ3; par.int_iJ3[gridDim.x*gridDim.y*blockIdx.z + gridDim.x*blockIdx.y+blockIdx.x] = int_iJ3; par.int_rJ3_1[gridDim.x*gridDim.y*blockIdx.z + gridDim.x*blockIdx.y+blockIdx.x] = int_rJ3_1; par.int_iJ3_1[gridDim.x*gridDim.y*blockIdx.z + gridDim.x*blockIdx.y+blockIdx.x] = int_iJ3_1; } } std::complex<double> Multiplier::retriveBCurr() { int GQ = Nq/NQ; int GS = Ns/NS; int GV = Nv; double t_deltaEn[512]; double t_deltaEn2[512]; double reJ = 0, imJ = 0; // printf("memcpy: %i\t", hipMemcpy((void *)&t_deltaEn, d_int_rJ3, sizeof(double), hipMemcpyDeviceToHost)); // printf("memcpy: %i\n", hipMemcpy((void *)&t_deltaEn2, d_int_iJ3, sizeof(double), hipMemcpyDeviceToHost)); hipMemcpy((void *) t_deltaEn, d_int_rJ3, sizeof(double)*GQ*GS*GV, hipMemcpyDeviceToHost); hipMemcpy((void *) t_deltaEn2, d_int_iJ3, sizeof(double)*GQ*GS*GV, hipMemcpyDeviceToHost); for(int i = 0; i < GQ*GS*GV; i++){ reJ += t_deltaEn[i]; imJ += t_deltaEn2[i]; } double coeff = Lsolver/double(Nz*Np*Nq*Ns*Nv); // printf("re = %g, im = %g\n", reJ*coeff, imJ*coeff); std::complex<double> res = std::complex<double> (reJ*coeff, imJ*coeff); return res; } void Multiplier::retriveBCurr(std::complex<double> *J1, std::complex<double> *J2) { int GQ = Nq/NQ; int GS = Ns/NS; int GV = Nv; double t_Jre[512]; double t_Jim[512]; double t_J2re[512]; double t_J2im[512]; double reJ = 0, imJ = 0; double re2J = 0, im2J = 0; // printf("memcpy: %i\t", hipMemcpy((void *)&t_deltaEn, d_int_rJ3, sizeof(double), hipMemcpyDeviceToHost)); // printf("memcpy: %i\n", hipMemcpy((void *)&t_deltaEn2, d_int_iJ3, sizeof(double), hipMemcpyDeviceToHost)); hipMemcpy((void *) t_Jre, d_int_rJ3, sizeof(double)*GQ*GS*GV, hipMemcpyDeviceToHost); hipMemcpy((void *) t_Jim, d_int_iJ3, sizeof(double)*GQ*GS*GV, hipMemcpyDeviceToHost); hipMemcpy((void *) t_J2re, d_int_rJ3_1, sizeof(double)*GQ*GS*GV, hipMemcpyDeviceToHost); hipMemcpy((void *) t_J2im, d_int_iJ3_1, sizeof(double)*GQ*GS*GV, hipMemcpyDeviceToHost); for(int i = 0; i < GQ*GS*GV; i++){ reJ += t_Jre[i]; imJ += t_Jim[i]; re2J += t_J2re[i]; im2J += t_J2im[i]; } double coeff = Lsolver/double(Nz*Np*Nq*Ns*Nv); // printf("re = %g, im = %g\n", reJ*coeff, imJ*coeff); std::complex<double> res1 = std::complex<double> (reJ*coeff, imJ*coeff); std::complex<double> res2 = std::complex<double> (re2J*coeff, im2J*coeff); *J1 = res1; *J2 = res2; // printf("J1 = %g, %g\tJ2 = %g, %g\n", *J1, *J2); } double Multiplier::retriveDeltaEnergy() { int GQ = Nq/NQ; int GS = Ns/NS; int GV = Nv; double t_deltaEn[512]; double t_wmax[512]; double t_wmin[512]; double averagedEn = 0, wmax = -99999, wmin = 99999; hipMemcpy( t_deltaEn, d_avEN, sizeof(double)*GQ*GS*GV, hipMemcpyDeviceToHost); hipMemcpy( t_wmax, d_Wmax, sizeof(double)*GQ*GS*GV, hipMemcpyDeviceToHost); hipMemcpy( t_wmin, d_Wmin, sizeof(double)*GQ*GS*GV, hipMemcpyDeviceToHost); for(int i = 0; i < GQ*GS*GV; i++) { wmax =(wmax > t_wmax[i]) ? wmax : t_wmax[i]; wmin =(wmin < t_wmin[i]) ? wmin : t_wmin[i]; averagedEn += t_deltaEn[i]; // printf("%g\n", t_deltaEn[i]/double(NP*NQ*NS)); } double coeff = 1./double(Np*Nq*Ns*Nv); // printf("deltaW + = %g \t deltaW - = %g\n", wmax*511000., wmin*511000.); return averagedEn*coeff; } bool Device::initSolver(int nz, double lsolver, double groupSpeedCoeff, char *_solverName) { Nz = nz; Lsolver = lsolver; Lmax = lsolver; solverName = _solverName; Nmax = nz; MultiplierGroupSpeedCoefficient = groupSpeedCoeff; printf("The %s solver is intialized\n", solverName); int GQ = Nq/NQ; int GS = Ns/NS; int GV = Nv; // printf(" Nq %i, Ns %i, Nv %i \t GQ %i, GS %i, GV %i \n",Nq, Ns, Nv, GQ, GS, GV); printf("Nz, Lsolver, grSpeed, %i, %g, %g\n", Nz, Lsolver,MultiplierGroupSpeedCoefficient); gpuErrChk(hipMalloc((void**)&d_rJ3, Nz*GQ*GS*GV*sizeof(double))); gpuErrChk(hipMalloc((void**)&d_iJ3, Nz*GQ*GS*GV*sizeof(double))); gpuErrChk(hipMalloc((void**)&d_Nz, sizeof(int))); gpuErrChk(hipMalloc((void**)&d_Lsolver, sizeof(double))); gpuErrChk(hipMalloc((void**)&d_avEN, sizeof(double)*GQ*GS*GV)); gpuErrChk(hipMalloc((void**)&d_int_rJ3_1, sizeof(double)*GQ*GS*GV)); gpuErrChk(hipMalloc((void**)&d_int_iJ3_1, sizeof(double)*GQ*GS*GV)); gpuErrChk(hipMalloc((void**)&d_Amps, sizeof(cplx) * 30)); if(strcmp(solverName,"multiplier_spcharge_2d") != 0) { gpuErrChk(hipMalloc((void**)&d_int_rJ3, sizeof(double)*GQ*GS*GV)); gpuErrChk(hipMalloc((void**)&d_int_iJ3, sizeof(double)*GQ*GS*GV)); } gpuErrChk(hipMalloc((void**)&d_Wmax, sizeof(double)*GQ*GS*GV)); gpuErrChk(hipMalloc((void**)&d_Wmin, sizeof(double)*GQ*GS*GV)); gpuErrChk(hipMalloc((void**)&d_par, sizeof(PAR))); gpuErrChk(hipMalloc((void**)&grSpeedCoeff, sizeof(double))); gpuErrChk(hipMemcpy((void*)d_Nz, &Nz, sizeof(int), hipMemcpyHostToDevice)); gpuErrChk(hipMemcpy((void*)&grSpeedCoeff, &MultiplierGroupSpeedCoefficient, sizeof(double), hipMemcpyHostToDevice)); // TODO Here is a bug gpuErrChk(hipMemcpy((void*)d_Lsolver, (void*)&Lsolver, sizeof(double), hipMemcpyHostToDevice)); return 1; } void Device::releaseDeviceMemory() { hipFree((void*)d_Nz); hipFree((void*)d_Lsolver); hipFree((void*)d_avEN); hipFree((void*)d_int_rJ3); hipFree((void*)d_int_iJ3); if(fieldLoaded) { hipFree((void*) d_tAr); hipFree((void*) d_tAi); } } double Multiplier::DeltaEnergy(double A) { PAR par; double d = period; double h = 2.*Pi/d; double La = period*double(Nperiods); par.la = La; par.lb = Lb; par.ld = Ld; par.k1 = k1; par.h = h; par.voltage = voltage; par.Nz = Nz; par.L = Lsolver; par.wall = wall; par.g1 = g1; par.g3 = g3; par.Wmax = d_Wmax; par.Wmin = d_Wmin; par.rJ3 = d_rJ3; par.iJ3 = d_iJ3; par.avEN = d_avEN; par.int_rJ3 = d_int_rJ3; par.int_iJ3 = d_int_iJ3; double2 zero = {0,0}; // hipMemcpy( d_rJ3, &dzero, sizeof(double), hipMemcpyHostToDevice); // hipMemcpy( d_iJ3, &dzero, sizeof(double), hipMemcpyHostToDevice); // hipMemcpy(d_int_rJ3, &dzero, sizeof(double), hipMemcpyHostToDevice); // hipMemcpy(d_int_iJ3, &dzero, sizeof(double), hipMemcpyHostToDevice); // hipMemcpy( d_avEN, &dzero, sizeof(double), hipMemcpyHostToDevice); dim3 threadsPerBlock(NP, NQ, NS); hipMemcpy(d_par, &par, sizeof(PAR), hipMemcpyHostToDevice); hipLaunchKernelGGL(( MotionEquationMultiplier), dim3(dim3((size_t) Nq/NQ,(size_t) Ns/NS,(size_t) Nv)), dim3(threadsPerBlock), 0, 0, d_par, La, 1, A, zero); /* double *debRe = new double [Nz]; double *debIm = new double [Nz]; hipError_t copy1 = hipMemcpy((void*) debRe, (void *)dm_rJq, sizeof(double)*Nz, hipMemcpyDeviceToHost); printf("copy1 = %i \n", copy1); hipError_t copy2 = hipMemcpy((void*) debIm, (void *)dm_iJq, sizeof(double)*Nz, hipMemcpyDeviceToHost); printf("copy2 = %i \n", copy2); */ //printf("memcpy: %i \n", hipMemcpy((void*) &t_deltaEn, d_avEN, sizeof(double), hipMemcpyDeviceToHost)); //printf("Energy delta = %g \n", t_deltaEn/double(NP*NQ*NS)); double res = retriveDeltaEnergy(); // printf("Retrieve returned: %g \n", res); return res; // delete[] debRe; delete[] debIm; } std::complex<double> Multiplier::CurrentB(double reB, double imB, double A) { PAR par; double d = period; double h = 2.*Pi/d; double La = period*double(Nperiods); par.la = La; par.lb = Lb; par.ld = Ld; par.k1 = k1; par.h = h; par.voltage = voltage; par.Nz = Nz; par.L = Lsolver; par.wall = wall; par.g1 = g1; par.g3 = g3; par.Wmax = d_Wmax; par.Wmin = d_Wmin; // printf("CurrentB: %g, %g, %g \n", La, Ld, Lb); hipMemset(d_rJ3, 0, sizeof(double)*Nz); hipMemset(d_iJ3, 0, sizeof(double)*Nz); par.rJ3 = d_rJ3; par.iJ3 = d_iJ3; par.avEN = d_avEN; par.int_rJ3 = d_int_rJ3; par.int_iJ3 = d_int_iJ3; double2 B; B.x = reB; B.y = imB; // printf("\n B loop: %g\n", La+Ld+Lb ); // printf("\n Threads: %i, %i, %i\n", threadsPerBlock.x, threadsPerBlock.y, threadsPerBlock.z ); dim3 numblocks(Nq/NQ, Ns/NS, Nv); dim3 threadsPerBlock(NP, NQ, NS); // hipMemcpy( d_rJ3, &dzero, sizeof(double), hipMemcpyHostToDevice); // hipMemcpy( d_iJ3, &dzero, sizeof(double), hipMemcpyHostToDevice); // hipMemcpy(d_int_rJ3, &dzero, sizeof(double), hipMemcpyHostToDevice); // hipMemcpy(d_int_iJ3, &dzero, sizeof(double), hipMemcpyHostToDevice); // hipMemcpy( d_avEN, &dzero, sizeof(double), hipMemcpyHostToDevice); hipMemcpy(d_par, &par, sizeof(PAR), hipMemcpyHostToDevice); MotionEquationMultiplier << <numblocks, threadsPerBlock >> >(d_par, La + Ld + Lb, 3, A, B); double *jr = new double [Nz]; double *ji = new double [Nz]; hipMemcpy(jr, d_rJ3, sizeof(double)*Nz, hipMemcpyDeviceToHost); hipMemcpy(ji, d_iJ3, sizeof(double)*Nz, hipMemcpyDeviceToHost); FILE *resamp_ar = fopen("F:\\Piotr\\bwo_Data\\mdebug_jr.csv", "w"); FILE *resamp_ai = fopen("F:\\Piotr\\bwo_Data\\mdebug_ji.csv", "w"); for(int j = 0; j < Nz; j++) { fprintf(resamp_ar, "%i,%g\n", j, jr[j]); fprintf(resamp_ai, "%i,%g\n", j, ji[j]); } fclose(resamp_ar); fclose(resamp_ai); delete []jr; delete []ji; return retriveBCurr(); } std::complex<double> MultiplierThreeCavity::CurrentB2(double reB, double imB, double A, cplx A2) { PAR par; double d = period; double h = 2.*Pi/d; double La1 = period*(double)Nperiods; // printf("CurrentB2: %g, %g, %g, %g, %g \n", La1, Ld1, La2, Ld2, Lb); par.la1 = La1; par.lb = Lb; par.ld1 = Ld1; par.k1 = k1; par.h = h; par.voltage = voltage; par.la2 = La2; par.ld2 = Ld2; par.Nz = Nz; par.L = Lsolver; par.wall = wall; par.g1 = g1; par.g3 = g3; par.Wmax = d_Wmax; par.Wmin = d_Wmin; par.rJ3 = d_rJ3; par.iJ3 = d_iJ3; par.avEN = d_avEN; par.int_rJ3 = d_int_rJ3; par.int_iJ3 = d_int_iJ3; double2 B; B.x = reB; B.y = imB; double2 Astat2 ={A2.real(), A2.imag()}; dim3 numblocks(Nq/NQ, Ns/NS, Nv); dim3 threadsPerBlock(NP, NQ, NS); MotionEquationMultiplierDoubleScheme << <numblocks, threadsPerBlock >> >(par, La1 + Ld1 + La2 + Ld2 + Lb, 3, A, Astat2, B); return retriveBCurr(); } std::complex<double> Multiplier::CurrentA(double reA, double imA) { PAR par; double d = period; double h = 2.*Pi/d; double La = period*double(Nperiods); par.la = La; par.lb = 1.; par.ld = 1.; par.k1 = k1; par.h = h; par.voltage = voltage; par.Nz = Nz; par.L = Lsolver; par.wall = wall; par.g1 = g1; par.g3 = g3; par.Wmax = d_Wmax; par.Wmin = d_Wmin; par.rJ3 = d_rJ3; par.iJ3 = d_iJ3; par.avEN = d_avEN; par.int_rJ3 = d_int_rJ3; par.int_iJ3 = d_int_iJ3; double2 zero = {0,0}; dim3 threadsPerBlock(NP, NQ, NS); dim3 numblocks(Nq / NQ, Ns / NS, Nv); double A; A = sqrt(reA*reA + imA*imA); // printf("\n B loop: %g\n", La+Ld+Lb ); // printf("\n Threads: %i, %i, %i\n", threadsPerBlock.x, threadsPerBlock.y, threadsPerBlock.z ); // hipMemcpy( d_rJ3, &dzero, sizeof(double), hipMemcpyHostToDevice); // hipMemcpy( d_iJ3, &dzero, sizeof(double), hipMemcpyHostToDevice); // hipMemcpy(d_int_rJ3, &dzero, sizeof(double), hipMemcpyHostToDevice); // hipMemcpy(d_int_iJ3, &dzero, sizeof(double), hipMemcpyHostToDevice); // hipMemcpy( d_avEN, &dzero, sizeof(double), hipMemcpyHostToDevice); gpuErrChk(hipMemcpy(d_par, &par, sizeof(PAR), hipMemcpyHostToDevice)); MotionEquationMultiplier << <numblocks, threadsPerBlock >> >(d_par, La, 1, A, zero); return retriveBCurr()*exp(I*arg(reA + I*imA)); } std::complex<double> MultiplierThreeCavity::CurrentA2(double A1, double reA, double imA) { PAR par; double d = period; double h = 2.*Pi/d; double La1 = period*double(Nperiods); par.la1 = La1; par.la2 = La2; par.ld1 = Ld1; par.lb = 1.; par.ld = 1.; par.k1 = k1; par.h = h; par.voltage = voltage; par.Nz = Nz; par.L = Lsolver; par.wall = wall; par.g1 = g1; par.g3 = g3; par.Wmax = d_Wmax; par.Wmin = d_Wmin; par.rJ3 = d_rJ3; par.iJ3 = d_iJ3; par.avEN = d_avEN; par.int_rJ3 = d_int_rJ3; par.int_iJ3 = d_int_iJ3; double2 zero = {0,0}; double2 A = {reA, imA}; dim3 threadsPerBlock(NP, NQ, NS); dim3 numblocks(Nq/NQ, Ns/NS, Nv); hipLaunchKernelGGL(( MotionEquationMultiplierDoubleScheme) , dim3(numblocks), dim3(threadsPerBlock) , 0, 0, par, La1 + La2 + Ld1, 1, A1, A, zero); return retriveBCurr(); } void MultiplierMultiModes::CurrentAMultiModes(std::complex<double> *Amps, std::complex<double> * currs, double *buffRe, double *buffIm, int Na, cplx *J1, cplx *J2) { PAR par; double d = period; double h = 2.*Pi/d; double La = period*double(Nperiods); int Nstop = La/dz; par.la1 = La; par.ld = Ld; par.lb = 1.; par.k1 = k1; par.h = h; par.voltage = voltage; par.Nz = Nz; par.L = Lsolver; par.wall = wall; par.g1 = g1; par.g3 = g3; par.rJ3 = d_rJ3; par.iJ3 = d_iJ3; par.avEN = d_avEN; par.int_rJ3 = d_int_rJ3; par.int_iJ3 = d_int_iJ3; par.int_rJ3_1 = d_int_rJ3_1; par.int_iJ3_1 = d_int_iJ3_1; double2 zero = {0,0}; dim3 threadsPerBlock(NP, NQ, NS); dim3 numblocks(Nq/NQ, Ns/NS, Nv); par.Amps = (double2*) d_Amps; int ierr = hipMemcpy(d_Amps, (void*) Amps, sizeof(double2)*Na, hipMemcpyHostToDevice); hipLaunchKernelGGL(( MotionEquationMultiplierMultiModes) , dim3(numblocks), dim3(threadsPerBlock) , 0, 0, par, La, 1, Na, zero); gpuErrChk(hipPeekAtLastError()); retriveACurrComplex((std::complex<double>*)Amps, currs, buffRe, buffIm, Namm, Nstop); } void MultiplierMultiModes::retriveACurrComplex(std::complex<double> *Amps, std::complex<double> *currs, double *currsBuffRe, double *currsBuffIm, int Na, int Nstop) { int GQ = Nq / NQ; int GS = Ns / NS; int GV = Nv; double reJ = 0, imJ = 0; double rF, iF, z; double La = period*double(Nperiods); std::complex<double> J; // printf("memcpy: %i\t", hipMemcpy((void *)&t_deltaEn, d_int_rJ3, sizeof(double), hipMemcpyDeviceToHost)); // printf("memcpy: %i\n", hipMemcpy((void *)&t_deltaEn2, d_int_iJ3, sizeof(double), hipMemcpyDeviceToHost)); gpuErrChk(hipMemcpy((void *)currsBuffRe, d_rJ3, sizeof(double)*GQ*GS*GV*Nmax, hipMemcpyDeviceToHost)) gpuErrChk(hipMemcpy((void *)currsBuffIm, d_iJ3, sizeof(double)*GQ*GS*GV*Nmax, hipMemcpyDeviceToHost)) for (int a = 0; a < Na; a++) { currs[a] = 0; } // FILE* debugfile = fopen("F:\\Piotr\\CalcData\\mm_orotron_Data\\debug.txt", "w"); for (int j = 0; j < Nstop; j++) { reJ = 0; imJ = 0; for (int i = 0; i < GQ*GS*GV; i++) { reJ += currsBuffRe[i*Nmax + j]; imJ += currsBuffIm[i*Nmax + j]; } for (int a = 0; a < Na; a++) { z = (double)j * dz; sincos(Pi / La*z*double(a - Na / 2), &iF, &rF); J = cplx(reJ, imJ)*cplx(rF, -iF); currs[a] += (J); // if(a == 1) fprintf(debugfile, "%g,%g,%g,%g,%g\n",z, real(J)/double(Np*Nq*Ns*Nv), imag(J)/double(Np*Nq*Ns*Nv), abs(J)/double(Np*Nq*Ns*Nv), arg(J) ); } } double coeff = Lsolver / double(Nz*Np*Nq*Ns*Nv); for (int a = 0; a < Na; a++) currs[a] *= coeff; // fclose(debugfile); } ////////////////////////////////// ParamsM Device::setPar() { ParamsM par; int GQ = Nq/NQ; int GS = Ns/NS; int GV = Nv; int gridsize = GQ*GS*GV; double La = Nperiods*period; double h = 2.*Pi/period; par.la = La; par.k1 = k1; par.h = h; par.voltage = voltage; par.Nz = Nmax; par.L = Lmax; par.wall = wall; par.g1 = g1; par.Ngrid = gridsize; par.ar0 = d_ar0; par.ai0 = d_ai0; par.rJ3 = d_rJ3; par.iJ3 = d_iJ3; par.delta = 0; par.Q0 = d_Q0; par.W0 = d_W0; par.rAk = d_rAk; par.iAk = d_iAk; par.rAq1k = d_rAq1k; par.iAq1k = d_iAq1k; par.Qk = d_Qk; par.Wk = d_Wk; par.ar0_t = d_ar0_t; par.ai0_t = d_ai0_t; par.int_rQ1 = d_int_rQ1; par.int_iQ1 = d_int_iQ1; par.ifnotdestroyed = d_ifnotdestroyed; par.g3 = g3; par.rAq1 =d_rAq1; par.iAq1 =d_iAq1; par.radii = d_radii; par.int_rJ3 = d_int_rJ3; par.int_iJ3 = d_int_iJ3; par.int_rJ3_1 = d_int_rJ3_1; par.int_iJ3_1 = d_int_iJ3_1; par.avEN = d_avEN; int *mass = new int [Np*Nq*Ns*Nv]; for(int a = 0; a < Np*Nq*Ns*Nv; a++) mass[a] = 1; gpuErrChk(hipMemcpy(d_ifnotdestroyed, mass, sizeof(int)*Np*Nq*Ns*Nv, hipMemcpyHostToDevice)); delete [] mass; return par; }
d7a6b04a6e195b0e15a3ade0436df6babd130899.cu
#include <stdio.h> #include <complex> #include "multiplier.h" #include "multiplier_three_cavity.h" #include "multiplier_multimode.h" #include "cu_mult.h" //#include "print.h" #define PAR ParamsM #ifndef dm_Pi __device__ const double dm_Pi = 3.141592653589793; #endif int Nz = 0; double Lsolver = 0; double MultiplierGroupSpeedCoefficient = 0; __device__ double grSpeedCoeff; int *d_Nz; double *d_Lsolver; //cudaPitchedPtr d2_rJ3, d2_iJ3, d2_int_rJ3, d2_int_iJ3, d2_W; //cudaPitchedPtr d1_rJ3, d1_iJ3, d1_int_rJ3, d1_int_iJ3, d1_W; __device__ void biReduce(double *A, double *B, int p0, int datasize, int logsize) { int stride = datasize; for (int q = 1; q <= logsize; q++) { stride = stride >> 1; if (p0 < stride) { A[p0] += A[p0 + stride]; } else { if (p0 < 2 * stride) { B[p0 - stride] += B[p0]; } } __syncthreads(); } } __device__ double dh( double delta) { return delta*delta*(grSpeedCoeff); } __device__ void funcA (double z, double *rA, double *iA, double2 *Amps, int Na) { *rA = 0; *iA = 0; double rF, iF; for(int i = 0; i < Na; i++) { sincos(z*double(i - Na/2), &iF, &rF); *rA += Amps[i].x*rF - Amps[i].y*iF; *iA += Amps[i].x*iF + Amps[i].y*rF; } } __global__ void __launch_bounds__ (512, 2) MotionEquationMultiplier(PAR *par, double Lstop, int Nharm, double A, double2 B)//Fixed Structure { unsigned int p0 = threadIdx.x; unsigned int Np = blockDim.x; unsigned int q0 = threadIdx.y; unsigned int Nq = blockDim.y; unsigned int s0 = threadIdx.z; unsigned int Ns = blockDim.z; unsigned int q_init = Nq*blockIdx.x + q0; unsigned int Nq_max = Nq*gridDim.x; unsigned int s_init = Ns*blockIdx.y + s0; unsigned int Ns_max = Ns*gridDim.y; unsigned int v_init = blockIdx.z; unsigned int Nv_max = gridDim.z; int warpsize = Np*Nq*Ns; int log2warpsize = round(log2((double)warpsize)); int X = blockIdx.x + gridDim.x*blockIdx.y + gridDim.y*gridDim.x*blockIdx.z; double la, lb, ld, h, k1, voltage, g1, g3; __shared__ double avEN, int_rJ3, int_iJ3; int N; double dz; N = par->Nz; la = par->la; lb = par->lb; ld = par->ld; h = par->h; k1 = par->k1; g1 = par->g1; g3 = par->g3; voltage = par->voltage; double *rJ3 = par->rJ3; double *iJ3 = par->iJ3; dz = par->L/(double)N; double z; int ifinal = floor(Lstop/dz); double Q, Qk1, Qk2, Qk3, Qk4; double W, Wk1, Wk2, Wk3, Wk4; double fA, fB, rA, r; double Wmax, Wmin; double ifnotdestroyed = 1; double R_cyclotron, R_center, kappa_cyclotron, phase_cyclotron, initial_angle, angle_spread_factor; double wall = par->wall; R_center = 0.5*wall + wall*((double)q_init-0.5*(double)Nq_max)/(double)(Nq_max); initial_angle = 0;//(0.0810194 - 2.05972*R_center + 28.0433*R_center*R_center); R_cyclotron = (0.568*initial_angle + 0*0.035156*((double)v_init)/double(Nv_max)); kappa_cyclotron = 1.758; phase_cyclotron = 2.*dm_Pi*(double)s_init/(double)Ns_max; /*R_center = 0.5*wall + wall*((double)q_init-0.5*(double)Nq_max)/(double)(Nq_max); initial_angle = 0.087*((double)v_init - 0.5*Nv_max)/double(Nv_max); R_cyclotron = 0.744*initial_angle;//для f = 86.6 коэфф. 0,568; для f = 95.5: 0.744 kappa_cyclotron = 1.344; //для f = 86.6 коэфф. 1.758; для f = 95.5: 1.344 phase_cyclotron = 2.*dm_Pi*(double)s_init/(double)Ns_max;*/ double en0 = 1. + voltage/511.; angle_spread_factor = 1./sqrt(1. + initial_angle*initial_angle); // Вызывает недопустимую операцию при достаточно больших wall. Наверное из-за резкости параболы в initial_angle Q = 2.*dm_Pi/double(Np)*double(p0); W = 0; __shared__ double sh_sinQ[NS*NQ*NP]; __shared__ double sh_cosQ[NS*NQ*NP]; /* __shared__ double shQ[NS][NQ][NP]; __shared__ double shW[NS][NQ][NP]; __shared__ double d2_rJ3[NQ][NP]; __shared__ double d2_iJ3[NQ][NP]; __shared__ double d1_rJ3[NP]; __shared__ double d1_iJ3[NP];*/ double PH, EN, cosPH, sinPH, cosPS, sinPS, rB, iB; double H = h;//+dh(delta); if(p0+q_init+s_init + v_init == 0) { rJ3[0] = 0; iJ3[0] = 0; } if(p0+q0+s0== 0) { par->int_rJ3[gridDim.x*gridDim.y*blockIdx.z + gridDim.x*blockIdx.y+blockIdx.x] = 0; par->int_iJ3[gridDim.x*gridDim.y*blockIdx.z + gridDim.x*blockIdx.y+blockIdx.x] = 0; int_rJ3 = 0; int_iJ3 = 0; avEN = 0; } // if(s0+p0+q0 == 0) printf("la = %g, ld = %g, lb = %g \n", la, ld, lb); int i = 0; for(i = 1; i < N; i++) { ///////////////// z = (double)i*dz; r = (R_center + R_cyclotron*cos(kappa_cyclotron*z + phase_cyclotron)); ifnotdestroyed *= (r > -wall)? 1. : 0.; ///!!!!!! // ifnotdestroyed = 1; PH = Q; EN = W + en0; // if((s0+p0+q0 == 0)) printf("%g\t%g\n", z, fB); fA = ((z<la)?sin(dm_Pi/la*z)* exp(-g1*r):0); fB = (((z>la+ld)&&(z < la+ld+lb))?sin(dm_Pi/lb*(z-la-ld))*exp(-g3*r):0); rA = A*fA; rB = B.x*fB; iB = B.y*fB; sincos(PH, &sinPH, &cosPH); sincos(3.*PH, &sinPS, &cosPS); Qk1 = dz*(H - k1*EN*angle_spread_factor/sqrt(EN*EN-1.)); Wk1 = -dz*((rA*cosPH)+(rB*cosPS-iB*sinPS))*ifnotdestroyed; ///////////////// z = ((double)i+0.5)*dz; r = (R_center + R_cyclotron*cos(kappa_cyclotron*z + phase_cyclotron)); // ifnotdestroyed *= (r > -wall)? 1. : 0.; // ifnotdestroyed = 1; PH = Q + 0.5*Qk1; EN = W + 0.5*Wk1 + en0; fA = ((z<la)?sin(dm_Pi/la*z) * exp(-g1*r):0); fB = ((z>la+ld)&&(z < la+ld+lb))?sin(dm_Pi/lb*(z-la-ld))*exp(-g3*r):0; rA = A*fA; rB = B.x*fB; iB = B.y*fB; sincos(PH, &sinPH, &cosPH); sincos(3.*PH, &sinPS, &cosPS); Qk2 = dz*(H - k1*EN*angle_spread_factor/sqrt(EN*EN-1.)); Wk2 = -dz*((rA*cosPH)+(rB*cosPS-iB*sinPS))*ifnotdestroyed; ///////////////// PH = Q + 0.5*Qk2; EN = W + 0.5*Wk2 + en0; sincos(PH, &sinPH, &cosPH); sincos(3.*PH, &sinPS, &cosPS); Qk3 = dz*(H - k1*EN*angle_spread_factor/sqrt(EN*EN-1.)); Wk3 = -dz*((rA*cosPH)+(rB*cosPS-iB*sinPS))*ifnotdestroyed; ///////////////// z = ((double)i+1.)*dz; r = (R_center + R_cyclotron*cos(kappa_cyclotron*z + phase_cyclotron)); // ifnotdestroyed *= (r > -wall)? 1. : 0.; // ifnotdestroyed = 1; PH = Q + Qk3; EN = W + Wk3 + en0; fA = ((z<la)? sin(dm_Pi/la*z)* exp(-g1*r):0); fB = (((z>la+ld)&&(z < la+ld+lb))?sin(dm_Pi/lb*(z-la-ld))*exp(-g3*r):0); rA = A*fA; rB = B.x*fB; iB = B.y*fB; sincos(PH, &sinPH, &cosPH); sincos(3.*PH, &sinPS, &cosPS); Qk4= dz*(H - k1*EN*angle_spread_factor/sqrt(EN*EN-1.)); Wk4= -dz*((rA*cosPH)+(rB*cosPS-iB*sinPS))*ifnotdestroyed; /////////////// Q += 1./6.*(Qk1+2.*Qk2+2.*Qk3+Qk4); W += 1./6.*(Wk1+2.*Wk2+2.*Wk3+Wk4); /* shQ[s0][q0][p0] = Q; shW[s0][q0][p0] = W;*/ __syncthreads(); sincos(double(Nharm)*Q, &sinPH, &cosPH); if(Nharm == 1) fB = ((z<la)?sin(dm_Pi/la*z):0)*exp(-g1*r); //fB используется как множитель при интегрировании тока вдоль продольной координаты ВНИМАНИЕ!! fB зависит от q0, s0 else fB = (((z>la+ld)&&(z < la+ld+lb))?sin(dm_Pi/lb*(z-la-ld)):0)*exp(-g3*r); fB *= ifnotdestroyed; int xi = p0 + Np*q0 + Np*Nq*s0; sh_sinQ[xi] = sinPH*fB; sh_cosQ[xi] = cosPH*fB; unsigned int stride = warpsize; __syncthreads(); for(int q = 1; q <= log2warpsize; q++) { stride = stride >> 1;//roundf(powf(2., q)); if(xi < stride) { sh_sinQ[xi] += sh_sinQ[xi + stride]; } else { if(xi < 2*stride) { sh_cosQ[xi - stride] += sh_cosQ[xi]; } } __syncthreads(); } // if((i == 1300)) printf("%g\n", Q); if(xi == 0) { rJ3[X*N+i] = sh_cosQ[0]; iJ3[X*N+i] = -sh_sinQ[0]; // if((i == 1300)) printf("\n%g\n", sh_cosQ[0]); int_rJ3 += sh_cosQ[0]; int_iJ3 += -sh_sinQ[0]; } /* //////// усреднение Nharm гармоники if(s0 == 0) { double tmp_rJ3 = 0, tmp_iJ3 = 0, tmpPhCycl = 0; for(int ii = 0; ii < Ns; ii++) { int ii_init = Ns*blockIdx.y + ii; tmpPhCycl = 2.*dm_Pi*(double)ii_init/(double)Ns_max; r = (R_center + R_cyclotron*cos(kappa_cyclotron*z + tmpPhCycl)); if(Nharm == 1) fB = ((z<la)?sin(dm_Pi/la*z):0)*exp(-g1*r); //fB используется как множитель при интегрировании тока вдоль продольной координаты ВНИМАНИЕ!! fB зависит от q0, s0 else fB = (((z>la+ld)&&(z < la+ld+lb))?sin(dm_Pi/lb*(z-la-ld)):0)*exp(-g3*r); fB *= ifnotdestroyed; PH = shQ[ii][q0][p0]; sincos((double)Nharm*PH, &sinPS, &cosPS); tmp_rJ3 += cosPS*fB; tmp_iJ3 -= sinPS*fB; } d2_rJ3[q0][p0] = tmp_rJ3; d2_iJ3[q0][p0] = tmp_iJ3; } __threadfence(); __syncthreads(); if(s0 + q0 == 0) { double tmp_rJ3 = 0, tmp_iJ3 = 0; for(int ii = 0; ii < Nq; ii++) { tmp_rJ3 += d2_rJ3[ii][p0]; tmp_iJ3 += d2_iJ3[ii][p0]; } d1_rJ3[p0] = tmp_rJ3; d1_iJ3[p0] = tmp_iJ3; } __threadfence(); __syncthreads(); if(p0 + q0 +s0 == 0) { double tmp_rJ3 = 0, tmp_iJ3 = 0; for(int ii = 0; ii < Np; ii++) { tmp_rJ3 += d1_rJ3[ii]; tmp_iJ3 += d1_iJ3[ii]; } rJ3[i] = tmp_rJ3; iJ3[i] = tmp_iJ3; int_rJ3 += tmp_rJ3; int_iJ3 += tmp_iJ3; // if(i == ifinal) printf("<< %g %g\n", A*int_rJ3, A*tmp_rJ3); } __threadfence(); __syncthreads(); //////////////////// конец усреднения Nharm гармоники */ if(i == ifinal) { sh_sinQ[xi] = W; __syncthreads(); stride = warpsize; for(int q = 1; q <= log2warpsize; q++) { stride = stride >> 1;//warpsize/roundf(powf(2., q)); if(xi < stride) { sh_sinQ[xi] += sh_sinQ[xi + stride]; } __syncthreads(); } if(xi == 0) { avEN = sh_sinQ[0]; } __syncthreads(); sh_sinQ[xi] = W; sh_cosQ[xi] = W; stride = warpsize; for(int q = 1; q <= log2warpsize; q++) { stride = stride >> 1;// stride = warpsize/roundf(powf(2., q)); if(xi < stride) { sh_sinQ[xi] = (sh_sinQ[xi] > sh_sinQ[xi + stride]) ? sh_sinQ[xi] : sh_sinQ[xi + stride]; } else { if(xi < 2*stride) { sh_cosQ[xi - stride] = (sh_cosQ[xi - stride] < sh_cosQ[xi]) ? sh_cosQ[xi - stride] : sh_cosQ[xi]; } } __syncthreads(); } if(xi == 0) { Wmax = sh_sinQ[0]; Wmin = sh_cosQ[0]; } /* if(s0 == 0) { double tmp_W = 0; for(int ii = 0; ii < Ns; ii++) { EN = shW[ii][q0][p0]; tmp_W += EN; } d2_rJ3[q0][p0] = tmp_W; // if((p0 == 0)) printf(" %g >>, \n", d2_rJ3[q0][p0]); } __threadfence(); __syncthreads(); if(s0 + q0 == 0) { double tmp_rJ3 = 0; for(int ii = 0; ii < Nq; ii++) tmp_rJ3 += d2_rJ3[ii][p0]; d1_rJ3[p0] = tmp_rJ3; } __threadfence(); __syncthreads(); if(p0 + q0 +s0 == 0) { double tmp_rJ3 = 0; for(int ii = 0; ii < Np; ii++) tmp_rJ3 += d1_rJ3[ii]; (avEN) += tmp_rJ3; } */ __syncthreads(); } __threadfence(); __syncthreads(); if(i > ifinal) break; } // printf("END\t"); // if(p0 + s0 == 0) printf("(%i, %i, %i)...<%g, %g> =?= <%g>...\n", blockIdx.x, blockIdx.y, blockIdx.z,A*int_rJ3*dz, A*int_iJ3*dz, avEN); __syncthreads(); // if(p0+q_init+s_init + v_init == 0) if(p0+q0+s0 == 0) { /* printf("%i, %i, %i\t (%g, %g)\n", blockIdx.x, blockIdx.y, blockIdx.z, int_rJ3/double(Np*Nq_max*Ns_max*N)*(par->L), int_iJ3/double(Np*Nq_max*Ns_max*N)*(par->L)) ;*/ par->avEN[gridDim.x*gridDim.y*blockIdx.z + gridDim.x*blockIdx.y+blockIdx.x] = avEN; par->int_rJ3[gridDim.x*gridDim.y*blockIdx.z + gridDim.x*blockIdx.y+blockIdx.x] = int_rJ3; par->int_iJ3[gridDim.x*gridDim.y*blockIdx.z + gridDim.x*blockIdx.y+blockIdx.x] = int_iJ3; par->Wmax[gridDim.x*gridDim.y*blockIdx.z + gridDim.x*blockIdx.y+blockIdx.x] = Wmax; par->Wmin[gridDim.x*gridDim.y*blockIdx.z + gridDim.x*blockIdx.y+blockIdx.x] = Wmin; } } __global__ void __launch_bounds__ (512, 2) MotionEquationMultiplierDoubleScheme(PAR par, double Lstop, int Nharm, double A, double2 A2, double2 B)//Fixed Structure { unsigned int p0 = threadIdx.x; unsigned int q0 = threadIdx.y; unsigned int s0 = threadIdx.z; unsigned int Np = blockDim.x; unsigned int Nq = blockDim.y; unsigned int Ns = blockDim.z; unsigned int q_init = Nq*blockIdx.x + q0; unsigned int s_init = Ns*blockIdx.y + s0; unsigned int v_init = blockIdx.z; unsigned int Nq_max = Nq*gridDim.x; unsigned int Ns_max = Ns*gridDim.y; unsigned int Nv_max = gridDim.z; // printf("Thread %i/%i, %i/%i started; ", q0, Nq, p0, Np); double la1, la2, lb, ld1, ld2, h, k1, voltage, g1, g3; double fA2, rA2, iA2; __shared__ double avEN, int_rJ3, int_iJ3; // printf("Step0; "); int N; double dz; N = par.Nz; la1 = par.la1; la2 = par.la2; lb = par.lb; ld1 = par.ld1; ld2 = par.ld2; h = par.h; k1 = par.k1; g1 = par.g1; g3 = par.g3; voltage = par.voltage; double la_tot = la1 + la2 + ld1; double ifnotdestroyed = 1; double *rJ3 = par.rJ3; double *iJ3 = par.iJ3; // double *int_rJ3 = par.int_rJ3; // double *int_iJ3 = par.int_iJ3; dz = par.L/(double)N; double z; int ifinal = floor(Lstop/dz); double Q, Qk1, Qk2, Qk3, Qk4; double W, Wk1, Wk2, Wk3, Wk4; double fB; double R_cyclotron, R_center, kappa_cyclotron, phase_cyclotron, initial_angle; double wall = par.wall, r; R_center = 0.5*wall + wall*((double)q_init-0.5*(double)Nq_max)/(double)(Nq_max); initial_angle = (0.0810194 - 2.05972*R_center + 28.0433*R_center*R_center); R_cyclotron = 0.568*initial_angle + 0.035156*((double)v_init)/double(Nv_max); kappa_cyclotron = 1.758; phase_cyclotron = 2.*dm_Pi*(double)s_init/(double)Ns_max; double en0 = 1. + voltage/511.; en0 -= 0.5*initial_angle*initial_angle*(en0*en0 - 1)*en0; Q = 2.*dm_Pi/double(Np)*double(p0);// + 1./(double)Nq*((double)q0 + (double)s0/(double)Ns)); W = 0; __shared__ double shQ[NS][NQ][NP]; __shared__ double shW[NS][NQ][NP]; __shared__ double d2_rJ3[NQ][NP]; __shared__ double d2_iJ3[NQ][NP]; __shared__ double d1_rJ3[NP]; __shared__ double d1_iJ3[NP]; double PH, EN, cosPH, sinPH, cosPS, sinPS, rA, rB, iB; // printf("Step3; "); double H = h;//+dh(delta); if(p0+q_init+s_init + v_init == 0) { rJ3[0] = 0; iJ3[0] = 0; // printf("init:\t%i\t%i\t%i\t%i...........\n",p0, q_init,s_init,v_init); } if(p0+q0+s0== 0) { par.int_rJ3[gridDim.x*gridDim.y*blockIdx.z + gridDim.x*blockIdx.y+blockIdx.x] = 0; par.int_iJ3[gridDim.x*gridDim.y*blockIdx.z + gridDim.x*blockIdx.y+blockIdx.x] = 0; int_rJ3 = 0; int_iJ3 = 0; avEN = 0; } int i = 0; for(i = 1; i < N; i++) { ///////////////// z = (double)i*dz; r = (R_center + R_cyclotron*cos(kappa_cyclotron*z + phase_cyclotron)); ifnotdestroyed *= (r > -wall)? 1. : 0.; PH = Q; EN = W + en0; rA = A *((z<la1) ? sin(dm_Pi/la1*z) *exp(-g1*r):0); fA2= ( ((la1+ld1<z)&&(z<la_tot))? sin(dm_Pi/la2*(z-la1 - ld1)) *exp(-g1*r):0); rA2 = A2.x*fA2; iA2 = A2.y*fA2; fB = ( ((z> la_tot+ld2)&&(z < la_tot+ld2+lb))?sin(dm_Pi/lb*(z-la_tot-ld2)) *exp(-g3*r):0); rB = B.x*fB; iB = B.y*fB; sincos(PH, &sinPH, &cosPH); sincos(3.*PH, &sinPS, &cosPS); Qk1 = dz*(H - k1*EN/sqrt(EN*EN-1.)); Wk1 = -dz*((rA*cosPH)+(rA2*cosPH-iA2*sinPH)+(rB*cosPS-iB*sinPS))*ifnotdestroyed; ///////////////// z = ((double)i+0.5)*dz; r = (R_center + R_cyclotron*cos(kappa_cyclotron*z + phase_cyclotron)); ifnotdestroyed *= (r > -wall)? 1. : 0.; PH = Q + 0.5*Qk1; EN = W + 0.5*Wk1 + en0; rA = A *((z<la1) ? sin(dm_Pi/la1*z) *exp(-g1*r):0); fA2= ( ((la1+ld1<z)&&(z<la_tot))? sin(dm_Pi/la2*(z-la1 - ld1)) *exp(-g1*r):0); rA2 = A2.x*fA2; iA2 = A2.y*fA2; fB = ( ((z> la_tot+ld2)&&(z < la_tot+ld2+lb))?sin(dm_Pi/lb*(z-la_tot-ld2)) *exp(-g3*r):0); rB = B.x*fB; iB = B.y*fB; sincos(PH, &sinPH, &cosPH); sincos(3.*PH, &sinPS, &cosPS); Qk2 = dz*(H - k1*EN/sqrt(EN*EN-1.)); Wk2 = -dz*((rA*cosPH)+(rA2*cosPH-iA2*sinPH)+(rB*cosPS-iB*sinPS))*ifnotdestroyed; ///////////////// PH = Q + 0.5*Qk2; EN = W + 0.5*Wk2 + en0; sincos(PH, &sinPH, &cosPH); sincos(3.*PH, &sinPS, &cosPS); Qk3 = dz*(H - k1*EN/sqrt(EN*EN-1.)); Wk3 = -dz*((rA*cosPH)+(rA2*cosPH-iA2*sinPH)+(rB*cosPS-iB*sinPS))*ifnotdestroyed; ///////////////// z = ((double)i+1)*dz; r = (R_center + R_cyclotron*cos(kappa_cyclotron*z + phase_cyclotron)); ifnotdestroyed *= (r > -wall)? 1. : 0.; PH = Q + Qk3; EN = W + Wk3 + en0; rA = A *((z<la1) ? sin(dm_Pi/la1*z) *exp(-g1*r):0); fA2= ( ((la1+ld1<z)&&(z<la_tot))? sin(dm_Pi/la2*(z-la1 - ld1)) *exp(-g1*r):0); rA2 = A2.x*fA2; iA2 = A2.y*fA2; fB = ( ((z> la_tot+ld2)&&(z < la_tot+ld2+lb))?sin(dm_Pi/lb*(z-la_tot-ld2)) *exp(-g3*r):0); rB = B.x*fB; iB = B.y*fB; sincos(PH, &sinPH, &cosPH); sincos(3.*PH, &sinPS, &cosPS); Qk4= dz*(H - k1*EN/sqrt(EN*EN-1.)); Wk4= -dz*((rA*cosPH)+(rA2*cosPH-iA2*sinPH)+(rB*cosPS-iB*sinPS))*ifnotdestroyed; /////////////// Q += 1./6.*(Qk1+2.*Qk2+2.*Qk3+Qk4); W += 1./6.*(Wk1+2.*Wk2+2.*Wk3+Wk4); // printf("#< %i -> (%i, %i, %i)>\t", i, s0, q0, p0); // printf("#<%i>", Np_Q*Nq*(Ns*i+s0) + Np_Q*q0 + p0); // if(q0+p0+s0 == 0) printf("%i", i); shQ[s0][q0][p0] = Q; shW[s0][q0][p0] = W; __threadfence(); __syncthreads(); //////// усреднение какой-то гармоники if(s0 == 0) { double tmp_rJ3 = 0, tmp_iJ3 = 0; for(int ii = 0; ii < Ns; ii++) { PH = shQ[ii][q0][p0]; sincos((double)Nharm*PH, &sinPS, &cosPS); tmp_rJ3 += cosPS; tmp_iJ3 -= sinPS; } d2_rJ3[q0][p0] = tmp_rJ3; d2_iJ3[q0][p0] = tmp_iJ3; } __threadfence(); __syncthreads(); if(s0 + q0 == 0) { double tmp_rJ3 = 0, tmp_iJ3 = 0; for(int ii = 0; ii < Nq; ii++) { tmp_rJ3 += d2_rJ3[ii][p0]; tmp_iJ3 += d2_iJ3[ii][p0]; } d1_rJ3[p0] = tmp_rJ3; d1_iJ3[p0] = tmp_iJ3; } __threadfence(); __syncthreads(); if(p0 + q0 +s0 == 0) { double tmp_rJ3 = 0, tmp_iJ3 = 0; for(int ii = 0; ii < Np; ii++) { tmp_rJ3 += d1_rJ3[ii]; tmp_iJ3 += d1_iJ3[ii]; } rJ3[i] = tmp_rJ3; iJ3[i] = tmp_iJ3; int_rJ3 += tmp_rJ3*((Nharm == 3)?fB:fA2); int_iJ3 += tmp_iJ3*((Nharm == 3)?fB:fA2); } __threadfence(); __syncthreads(); //////////////////// конец усреднения какой-то гармоники // if((q0+p0 == 0)&&(s0 == 0)) printf("%i\t%g\t%g\n", q0, PH, EN); // if(q0+p0+s0 == 0) printf("....%i\t", i); /////////////////////// усреднение энергии if(i == ifinal) { if(s0 == 0) { double tmp_W = 0; for(int ii = 0; ii < Ns; ii++) { EN = shW[ii][q0][p0]; tmp_W += EN; } d2_rJ3[q0][p0] = W; } __threadfence(); __syncthreads(); if(s0 + q0 == 0) { double tmp_rJ3 = 0; for(int ii = 0; ii < Nq; ii++) tmp_rJ3 += d2_rJ3[ii][p0]; d1_rJ3[p0] = tmp_rJ3; } __threadfence(); __syncthreads(); if(p0 + q0 +s0 == 0) { double tmp_rJ3 = 0; for(int ii = 0; ii < Np; ii++) tmp_rJ3 += d1_rJ3[ii]; (avEN) += tmp_rJ3; } } ///////////////// конец усреднения энергии __threadfence(); __syncthreads(); if(i > ifinal) break; } __syncthreads(); if(p0+q0+s0 == 0) { *par.avEN = avEN; par.int_rJ3[gridDim.x*gridDim.y*blockIdx.z + gridDim.x*blockIdx.y+blockIdx.x] = int_rJ3; par.int_iJ3[gridDim.x*gridDim.y*blockIdx.z + gridDim.x*blockIdx.y+blockIdx.x] = int_iJ3; } } __global__ void __launch_bounds__ (512, 2) MotionEquationMultiplierMultiModes(PAR par, double Lstop, int Nharm, int Na, double2 B)//Fixed Structure { unsigned int p0 = threadIdx.x; unsigned int q0 = threadIdx.y; unsigned int s0 = threadIdx.z; unsigned int Np = blockDim.x; unsigned int Nq = blockDim.y; unsigned int Ns = blockDim.z; unsigned int q_init = Nq*blockIdx.x + q0; unsigned int s_init = Ns*blockIdx.y + s0; unsigned int v_init = blockIdx.z; unsigned int Nq_max = Nq*gridDim.x; unsigned int Ns_max = Ns*gridDim.y; // unsigned int Nv_max = gridDim.z; int warpsize = Np*Nq*Ns; int log2warpsize = round(log2((double)warpsize)); double la, lb, ld, h, k1, voltage, g1, g3; double rA1, iA1; __shared__ double avEN, int_rJ3, int_iJ3, int_rJ3_1, int_iJ3_1; int N; double dz; N = par.Nz; la = par.la1; lb = par.lb; ld = par.ld; h = par.h; k1 = par.k1; g1 = par.g1; g3 = par.g3; voltage = par.voltage; double ifnotdestroyed = 1; double *rJ3 = par.rJ3; double *iJ3 = par.iJ3; double2 *Amps = (double2 *)par.Amps; // double *int_rJ3 = par.int_rJ3; // double *int_iJ3 = par.int_iJ3; dz = par.L/(double)N; double z; int ifinal = floor(Lstop/dz); double Q, Qk1, Qk2, Qk3, Qk4; double W, Wk1, Wk2, Wk3, Wk4; double fB; double R_cyclotron, R_center, kappa_cyclotron, phase_cyclotron, initial_angle; double wall = par.wall, r; R_center = 0.5*wall + wall*((double)q_init-0.5*(double)Nq_max)/(double)(Nq_max); initial_angle = 0;//(0.0810194 - 2.05972*R_center + 28.0433*R_center*R_center); R_cyclotron = 0;//0.568*initial_angle + 0.035156*((double)v_init)/double(Nv_max); kappa_cyclotron = 1.758; phase_cyclotron = 2.*dm_Pi*(double)s_init/(double)Ns_max; double en0 = 1. + voltage/511.; en0 -= 0.5*initial_angle*initial_angle*(en0*en0 - 1)*en0; double beta0 = sqrt(en0*en0 - 1)/en0; // double Delta = k1*dm_Pi/(la*beta0) ;// \delta f / f = (k_0 \pi /L)/beta_ph Q = 2.*dm_Pi/double(Np)*double(p0);// + 1./(double)Nq*((double)q0 + (double)s0/(double)Ns)); W = 0; __shared__ double2 shAmps[NP]; __shared__ double sh_sinQ[NS*NQ*NP]; __shared__ double sh_cosQ[NS*NQ*NP]; double PH, EN, cosPH, sinPH, cosPS, sinPS, rB, iB; double H = h;//+dh(delta); if(p0+q_init+s_init + v_init == 0) { rJ3[0] = 0; iJ3[0] = 0; } if(p0+q0+s0== 0) { par.int_rJ3[gridDim.x*gridDim.y*blockIdx.z + gridDim.x*blockIdx.y+blockIdx.x] = 0; par.int_iJ3[gridDim.x*gridDim.y*blockIdx.z + gridDim.x*blockIdx.y+blockIdx.x] = 0; par.int_rJ3_1[gridDim.x*gridDim.y*blockIdx.z + gridDim.x*blockIdx.y+blockIdx.x] = 0; par.int_iJ3_1[gridDim.x*gridDim.y*blockIdx.z + gridDim.x*blockIdx.y+blockIdx.x] = 0; int_rJ3 = 0; int_iJ3 = 0; int_rJ3_1 = 0; int_iJ3_1 = 0; avEN = 0; } if((q0 + s0 == 0)&&(p0 < Na)) { shAmps[p0] = Amps[p0]; } __syncthreads(); int i = 0; for(i = 1; i < N; i++) { ///////////////// z = (double)i*dz; r = (R_center + R_cyclotron*cos(kappa_cyclotron*z + phase_cyclotron)); ifnotdestroyed *= 1;//(r > -wall)? 1. : 0.; PH = Q; EN = W + en0; funcA(dm_Pi/la*z, &rA1, &iA1, shAmps, Na); if(z > la) {rA1 =0; iA1 = 0;} rA1 *= exp(-g1*r); iA1 *= exp(-g1*r); fB = ( ((z> la+ld)&&(z < la+ld+lb))?sin(dm_Pi/lb*(z-la-ld)) *exp(-g3*r):0); rB = 0;//B.x*fB; iB = 0;//B.y*fB; sincos(PH, &sinPH, &cosPH); sincos(3.*PH, &sinPS, &cosPS); Qk1 = dz*(H - k1*EN/sqrt(EN*EN-1.)); Wk1 = -dz*((rA1*cosPH - iA1*sinPH)+(rB*cosPS-iB*sinPS))*ifnotdestroyed; // if(s0 + p0 + q0 == 0 && (i == 1)) printf("%g,%g,%g,%g\n", r, g1, Qk1, Wk1); ///////////////// z = ((double)i+0.5)*dz; r = (R_center + R_cyclotron*cos(kappa_cyclotron*z + phase_cyclotron)); // ifnotdestroyed *= (r > -wall)? 1. : 0.; PH = Q + 0.5*Qk1; EN = W + 0.5*Wk1 + en0; if(z > la) {rA1 =0; iA1 = 0;} fB = ( ((z> la+ld)&&(z < la+ld+lb))?sin(dm_Pi/lb*(z-la-ld)) *exp(-g3*r):0); rB = 0;//B.x*fB; iB = 0;//B.y*fB; sincos(PH, &sinPH, &cosPH); sincos(3.*PH, &sinPS, &cosPS); Qk2 = dz*(H - k1*EN/sqrt(EN*EN-1.)); Wk2 = -dz*((rA1*cosPH - iA1*sinPH)+(rB*cosPS-iB*sinPS))*ifnotdestroyed; ///////////////// PH = Q + 0.5*Qk2; EN = W + 0.5*Wk2 + en0; sincos(PH, &sinPH, &cosPH); sincos(3.*PH, &sinPS, &cosPS); Qk3 = dz*(H - k1*EN/sqrt(EN*EN-1.)); Wk3 = -dz*((rA1*cosPH - iA1*sinPH)+(rB*cosPS-iB*sinPS))*ifnotdestroyed; ///////////////// z = ((double)i+1)*dz; r = (R_center + R_cyclotron*cos(kappa_cyclotron*z + phase_cyclotron)); // ifnotdestroyed *= (r > -wall)? 1. : 0.; PH = Q + Qk3; EN = W + Wk3 + en0; if(z > la) {rA1 =0; iA1 = 0;} fB = ( ((z> la+ld)&&(z < la+ld+lb))?sin(dm_Pi/lb*(z-la-ld)) *exp(-g3*r):0); rB = 0;//B.x*fB; iB = 0;//B.y*fB; sincos(PH, &sinPH, &cosPH); sincos(3.*PH, &sinPS, &cosPS); Qk4= dz*(H - k1*EN/sqrt(EN*EN-1.)); Wk4= -dz*((rA1*cosPH - iA1*sinPH)+(rB*cosPS-iB*sinPS))*ifnotdestroyed; /////////////// Q += 1./6.*(Qk1+2.*Qk2+2.*Qk3+Qk4); W += 1./6.*(Wk1+2.*Wk2+2.*Wk3+Wk4); __syncthreads(); sincos(double(Nharm)*Q, &sinPH, &cosPH); if(Nharm == 1) fB = exp(-g1*r); //fB используется как множитель при интегрировании тока вдоль продольной координаты ВНИМАНИЕ!! fB зависит от q0, s0 else fB = (((z>la+ld)&&(z < la+ld+lb))?sin(dm_Pi/lb*(z-la-ld)):0)*exp(-g3*r); fB *= ifnotdestroyed; int xi = p0 + Np*q0 + Np*Nq*s0; int X = blockIdx.x + gridDim.x*blockIdx.y + gridDim.y*gridDim.x*blockIdx.z; sh_sinQ[xi] = sinPH*fB; sh_cosQ[xi] = cosPH*fB; __syncthreads(); biReduce(sh_sinQ, sh_cosQ, xi, warpsize, log2warpsize); if(xi == 0) { rJ3[X*N+i] = sh_cosQ[0]; iJ3[X*N+i] = -sh_sinQ[0]; int_rJ3 += sh_cosQ[0]; int_iJ3 += -sh_sinQ[0]; } /////////////////////// усреднение энергии if(i == ifinal) { sh_sinQ[xi] = W; __syncthreads(); biReduce(sh_sinQ, sh_cosQ, xi, warpsize, log2warpsize); if(xi == 0) { avEN = sh_sinQ[0]; } __syncthreads(); } ///////////////// конец усреднения энергии __threadfence(); __syncthreads(); if(i > ifinal) break; } __syncthreads(); if(p0+q0+s0 == 0) { *par.avEN = avEN; par.int_rJ3[gridDim.x*gridDim.y*blockIdx.z + gridDim.x*blockIdx.y+blockIdx.x] = int_rJ3; par.int_iJ3[gridDim.x*gridDim.y*blockIdx.z + gridDim.x*blockIdx.y+blockIdx.x] = int_iJ3; par.int_rJ3_1[gridDim.x*gridDim.y*blockIdx.z + gridDim.x*blockIdx.y+blockIdx.x] = int_rJ3_1; par.int_iJ3_1[gridDim.x*gridDim.y*blockIdx.z + gridDim.x*blockIdx.y+blockIdx.x] = int_iJ3_1; } } std::complex<double> Multiplier::retriveBCurr() { int GQ = Nq/NQ; int GS = Ns/NS; int GV = Nv; double t_deltaEn[512]; double t_deltaEn2[512]; double reJ = 0, imJ = 0; // printf("memcpy: %i\t", cudaMemcpy((void *)&t_deltaEn, d_int_rJ3, sizeof(double), cudaMemcpyDeviceToHost)); // printf("memcpy: %i\n", cudaMemcpy((void *)&t_deltaEn2, d_int_iJ3, sizeof(double), cudaMemcpyDeviceToHost)); cudaMemcpy((void *) t_deltaEn, d_int_rJ3, sizeof(double)*GQ*GS*GV, cudaMemcpyDeviceToHost); cudaMemcpy((void *) t_deltaEn2, d_int_iJ3, sizeof(double)*GQ*GS*GV, cudaMemcpyDeviceToHost); for(int i = 0; i < GQ*GS*GV; i++){ reJ += t_deltaEn[i]; imJ += t_deltaEn2[i]; } double coeff = Lsolver/double(Nz*Np*Nq*Ns*Nv); // printf("re = %g, im = %g\n", reJ*coeff, imJ*coeff); std::complex<double> res = std::complex<double> (reJ*coeff, imJ*coeff); return res; } void Multiplier::retriveBCurr(std::complex<double> *J1, std::complex<double> *J2) { int GQ = Nq/NQ; int GS = Ns/NS; int GV = Nv; double t_Jre[512]; double t_Jim[512]; double t_J2re[512]; double t_J2im[512]; double reJ = 0, imJ = 0; double re2J = 0, im2J = 0; // printf("memcpy: %i\t", cudaMemcpy((void *)&t_deltaEn, d_int_rJ3, sizeof(double), cudaMemcpyDeviceToHost)); // printf("memcpy: %i\n", cudaMemcpy((void *)&t_deltaEn2, d_int_iJ3, sizeof(double), cudaMemcpyDeviceToHost)); cudaMemcpy((void *) t_Jre, d_int_rJ3, sizeof(double)*GQ*GS*GV, cudaMemcpyDeviceToHost); cudaMemcpy((void *) t_Jim, d_int_iJ3, sizeof(double)*GQ*GS*GV, cudaMemcpyDeviceToHost); cudaMemcpy((void *) t_J2re, d_int_rJ3_1, sizeof(double)*GQ*GS*GV, cudaMemcpyDeviceToHost); cudaMemcpy((void *) t_J2im, d_int_iJ3_1, sizeof(double)*GQ*GS*GV, cudaMemcpyDeviceToHost); for(int i = 0; i < GQ*GS*GV; i++){ reJ += t_Jre[i]; imJ += t_Jim[i]; re2J += t_J2re[i]; im2J += t_J2im[i]; } double coeff = Lsolver/double(Nz*Np*Nq*Ns*Nv); // printf("re = %g, im = %g\n", reJ*coeff, imJ*coeff); std::complex<double> res1 = std::complex<double> (reJ*coeff, imJ*coeff); std::complex<double> res2 = std::complex<double> (re2J*coeff, im2J*coeff); *J1 = res1; *J2 = res2; // printf("J1 = %g, %g\tJ2 = %g, %g\n", *J1, *J2); } double Multiplier::retriveDeltaEnergy() { int GQ = Nq/NQ; int GS = Ns/NS; int GV = Nv; double t_deltaEn[512]; double t_wmax[512]; double t_wmin[512]; double averagedEn = 0, wmax = -99999, wmin = 99999; cudaMemcpy( t_deltaEn, d_avEN, sizeof(double)*GQ*GS*GV, cudaMemcpyDeviceToHost); cudaMemcpy( t_wmax, d_Wmax, sizeof(double)*GQ*GS*GV, cudaMemcpyDeviceToHost); cudaMemcpy( t_wmin, d_Wmin, sizeof(double)*GQ*GS*GV, cudaMemcpyDeviceToHost); for(int i = 0; i < GQ*GS*GV; i++) { wmax =(wmax > t_wmax[i]) ? wmax : t_wmax[i]; wmin =(wmin < t_wmin[i]) ? wmin : t_wmin[i]; averagedEn += t_deltaEn[i]; // printf("%g\n", t_deltaEn[i]/double(NP*NQ*NS)); } double coeff = 1./double(Np*Nq*Ns*Nv); // printf("deltaW + = %g \t deltaW - = %g\n", wmax*511000., wmin*511000.); return averagedEn*coeff; } bool Device::initSolver(int nz, double lsolver, double groupSpeedCoeff, char *_solverName) { Nz = nz; Lsolver = lsolver; Lmax = lsolver; solverName = _solverName; Nmax = nz; MultiplierGroupSpeedCoefficient = groupSpeedCoeff; printf("The %s solver is intialized\n", solverName); int GQ = Nq/NQ; int GS = Ns/NS; int GV = Nv; // printf(" Nq %i, Ns %i, Nv %i \t GQ %i, GS %i, GV %i \n",Nq, Ns, Nv, GQ, GS, GV); printf("Nz, Lsolver, grSpeed, %i, %g, %g\n", Nz, Lsolver,MultiplierGroupSpeedCoefficient); gpuErrChk(cudaMalloc((void**)&d_rJ3, Nz*GQ*GS*GV*sizeof(double))); gpuErrChk(cudaMalloc((void**)&d_iJ3, Nz*GQ*GS*GV*sizeof(double))); gpuErrChk(cudaMalloc((void**)&d_Nz, sizeof(int))); gpuErrChk(cudaMalloc((void**)&d_Lsolver, sizeof(double))); gpuErrChk(cudaMalloc((void**)&d_avEN, sizeof(double)*GQ*GS*GV)); gpuErrChk(cudaMalloc((void**)&d_int_rJ3_1, sizeof(double)*GQ*GS*GV)); gpuErrChk(cudaMalloc((void**)&d_int_iJ3_1, sizeof(double)*GQ*GS*GV)); gpuErrChk(cudaMalloc((void**)&d_Amps, sizeof(cplx) * 30)); if(strcmp(solverName,"multiplier_spcharge_2d") != 0) { gpuErrChk(cudaMalloc((void**)&d_int_rJ3, sizeof(double)*GQ*GS*GV)); gpuErrChk(cudaMalloc((void**)&d_int_iJ3, sizeof(double)*GQ*GS*GV)); } gpuErrChk(cudaMalloc((void**)&d_Wmax, sizeof(double)*GQ*GS*GV)); gpuErrChk(cudaMalloc((void**)&d_Wmin, sizeof(double)*GQ*GS*GV)); gpuErrChk(cudaMalloc((void**)&d_par, sizeof(PAR))); gpuErrChk(cudaMalloc((void**)&grSpeedCoeff, sizeof(double))); gpuErrChk(cudaMemcpy((void*)d_Nz, &Nz, sizeof(int), cudaMemcpyHostToDevice)); gpuErrChk(cudaMemcpy((void*)&grSpeedCoeff, &MultiplierGroupSpeedCoefficient, sizeof(double), cudaMemcpyHostToDevice)); // TODO Here is a bug gpuErrChk(cudaMemcpy((void*)d_Lsolver, (void*)&Lsolver, sizeof(double), cudaMemcpyHostToDevice)); return 1; } void Device::releaseDeviceMemory() { cudaFree((void*)d_Nz); cudaFree((void*)d_Lsolver); cudaFree((void*)d_avEN); cudaFree((void*)d_int_rJ3); cudaFree((void*)d_int_iJ3); if(fieldLoaded) { cudaFree((void*) d_tAr); cudaFree((void*) d_tAi); } } double Multiplier::DeltaEnergy(double A) { PAR par; double d = period; double h = 2.*Pi/d; double La = period*double(Nperiods); par.la = La; par.lb = Lb; par.ld = Ld; par.k1 = k1; par.h = h; par.voltage = voltage; par.Nz = Nz; par.L = Lsolver; par.wall = wall; par.g1 = g1; par.g3 = g3; par.Wmax = d_Wmax; par.Wmin = d_Wmin; par.rJ3 = d_rJ3; par.iJ3 = d_iJ3; par.avEN = d_avEN; par.int_rJ3 = d_int_rJ3; par.int_iJ3 = d_int_iJ3; double2 zero = {0,0}; // cudaMemcpy( d_rJ3, &dzero, sizeof(double), cudaMemcpyHostToDevice); // cudaMemcpy( d_iJ3, &dzero, sizeof(double), cudaMemcpyHostToDevice); // cudaMemcpy(d_int_rJ3, &dzero, sizeof(double), cudaMemcpyHostToDevice); // cudaMemcpy(d_int_iJ3, &dzero, sizeof(double), cudaMemcpyHostToDevice); // cudaMemcpy( d_avEN, &dzero, sizeof(double), cudaMemcpyHostToDevice); dim3 threadsPerBlock(NP, NQ, NS); cudaMemcpy(d_par, &par, sizeof(PAR), cudaMemcpyHostToDevice); MotionEquationMultiplier<<<dim3((size_t) Nq/NQ,(size_t) Ns/NS,(size_t) Nv), threadsPerBlock>>>(d_par, La, 1, A, zero); /* double *debRe = new double [Nz]; double *debIm = new double [Nz]; cudaError copy1 = cudaMemcpy((void*) debRe, (void *)dm_rJq, sizeof(double)*Nz, cudaMemcpyDeviceToHost); printf("copy1 = %i \n", copy1); cudaError_t copy2 = cudaMemcpy((void*) debIm, (void *)dm_iJq, sizeof(double)*Nz, cudaMemcpyDeviceToHost); printf("copy2 = %i \n", copy2); */ //printf("memcpy: %i \n", cudaMemcpy((void*) &t_deltaEn, d_avEN, sizeof(double), cudaMemcpyDeviceToHost)); //printf("Energy delta = %g \n", t_deltaEn/double(NP*NQ*NS)); double res = retriveDeltaEnergy(); // printf("Retrieve returned: %g \n", res); return res; // delete[] debRe; delete[] debIm; } std::complex<double> Multiplier::CurrentB(double reB, double imB, double A) { PAR par; double d = period; double h = 2.*Pi/d; double La = period*double(Nperiods); par.la = La; par.lb = Lb; par.ld = Ld; par.k1 = k1; par.h = h; par.voltage = voltage; par.Nz = Nz; par.L = Lsolver; par.wall = wall; par.g1 = g1; par.g3 = g3; par.Wmax = d_Wmax; par.Wmin = d_Wmin; // printf("CurrentB: %g, %g, %g \n", La, Ld, Lb); cudaMemset(d_rJ3, 0, sizeof(double)*Nz); cudaMemset(d_iJ3, 0, sizeof(double)*Nz); par.rJ3 = d_rJ3; par.iJ3 = d_iJ3; par.avEN = d_avEN; par.int_rJ3 = d_int_rJ3; par.int_iJ3 = d_int_iJ3; double2 B; B.x = reB; B.y = imB; // printf("\n B loop: %g\n", La+Ld+Lb ); // printf("\n Threads: %i, %i, %i\n", threadsPerBlock.x, threadsPerBlock.y, threadsPerBlock.z ); dim3 numblocks(Nq/NQ, Ns/NS, Nv); dim3 threadsPerBlock(NP, NQ, NS); // cudaMemcpy( d_rJ3, &dzero, sizeof(double), cudaMemcpyHostToDevice); // cudaMemcpy( d_iJ3, &dzero, sizeof(double), cudaMemcpyHostToDevice); // cudaMemcpy(d_int_rJ3, &dzero, sizeof(double), cudaMemcpyHostToDevice); // cudaMemcpy(d_int_iJ3, &dzero, sizeof(double), cudaMemcpyHostToDevice); // cudaMemcpy( d_avEN, &dzero, sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(d_par, &par, sizeof(PAR), cudaMemcpyHostToDevice); MotionEquationMultiplier << <numblocks, threadsPerBlock >> >(d_par, La + Ld + Lb, 3, A, B); double *jr = new double [Nz]; double *ji = new double [Nz]; cudaMemcpy(jr, d_rJ3, sizeof(double)*Nz, cudaMemcpyDeviceToHost); cudaMemcpy(ji, d_iJ3, sizeof(double)*Nz, cudaMemcpyDeviceToHost); FILE *resamp_ar = fopen("F:\\Piotr\\bwo_Data\\mdebug_jr.csv", "w"); FILE *resamp_ai = fopen("F:\\Piotr\\bwo_Data\\mdebug_ji.csv", "w"); for(int j = 0; j < Nz; j++) { fprintf(resamp_ar, "%i,%g\n", j, jr[j]); fprintf(resamp_ai, "%i,%g\n", j, ji[j]); } fclose(resamp_ar); fclose(resamp_ai); delete []jr; delete []ji; return retriveBCurr(); } std::complex<double> MultiplierThreeCavity::CurrentB2(double reB, double imB, double A, cplx A2) { PAR par; double d = period; double h = 2.*Pi/d; double La1 = period*(double)Nperiods; // printf("CurrentB2: %g, %g, %g, %g, %g \n", La1, Ld1, La2, Ld2, Lb); par.la1 = La1; par.lb = Lb; par.ld1 = Ld1; par.k1 = k1; par.h = h; par.voltage = voltage; par.la2 = La2; par.ld2 = Ld2; par.Nz = Nz; par.L = Lsolver; par.wall = wall; par.g1 = g1; par.g3 = g3; par.Wmax = d_Wmax; par.Wmin = d_Wmin; par.rJ3 = d_rJ3; par.iJ3 = d_iJ3; par.avEN = d_avEN; par.int_rJ3 = d_int_rJ3; par.int_iJ3 = d_int_iJ3; double2 B; B.x = reB; B.y = imB; double2 Astat2 ={A2.real(), A2.imag()}; dim3 numblocks(Nq/NQ, Ns/NS, Nv); dim3 threadsPerBlock(NP, NQ, NS); MotionEquationMultiplierDoubleScheme << <numblocks, threadsPerBlock >> >(par, La1 + Ld1 + La2 + Ld2 + Lb, 3, A, Astat2, B); return retriveBCurr(); } std::complex<double> Multiplier::CurrentA(double reA, double imA) { PAR par; double d = period; double h = 2.*Pi/d; double La = period*double(Nperiods); par.la = La; par.lb = 1.; par.ld = 1.; par.k1 = k1; par.h = h; par.voltage = voltage; par.Nz = Nz; par.L = Lsolver; par.wall = wall; par.g1 = g1; par.g3 = g3; par.Wmax = d_Wmax; par.Wmin = d_Wmin; par.rJ3 = d_rJ3; par.iJ3 = d_iJ3; par.avEN = d_avEN; par.int_rJ3 = d_int_rJ3; par.int_iJ3 = d_int_iJ3; double2 zero = {0,0}; dim3 threadsPerBlock(NP, NQ, NS); dim3 numblocks(Nq / NQ, Ns / NS, Nv); double A; A = sqrt(reA*reA + imA*imA); // printf("\n B loop: %g\n", La+Ld+Lb ); // printf("\n Threads: %i, %i, %i\n", threadsPerBlock.x, threadsPerBlock.y, threadsPerBlock.z ); // cudaMemcpy( d_rJ3, &dzero, sizeof(double), cudaMemcpyHostToDevice); // cudaMemcpy( d_iJ3, &dzero, sizeof(double), cudaMemcpyHostToDevice); // cudaMemcpy(d_int_rJ3, &dzero, sizeof(double), cudaMemcpyHostToDevice); // cudaMemcpy(d_int_iJ3, &dzero, sizeof(double), cudaMemcpyHostToDevice); // cudaMemcpy( d_avEN, &dzero, sizeof(double), cudaMemcpyHostToDevice); gpuErrChk(cudaMemcpy(d_par, &par, sizeof(PAR), cudaMemcpyHostToDevice)); MotionEquationMultiplier << <numblocks, threadsPerBlock >> >(d_par, La, 1, A, zero); return retriveBCurr()*exp(I*arg(reA + I*imA)); } std::complex<double> MultiplierThreeCavity::CurrentA2(double A1, double reA, double imA) { PAR par; double d = period; double h = 2.*Pi/d; double La1 = period*double(Nperiods); par.la1 = La1; par.la2 = La2; par.ld1 = Ld1; par.lb = 1.; par.ld = 1.; par.k1 = k1; par.h = h; par.voltage = voltage; par.Nz = Nz; par.L = Lsolver; par.wall = wall; par.g1 = g1; par.g3 = g3; par.Wmax = d_Wmax; par.Wmin = d_Wmin; par.rJ3 = d_rJ3; par.iJ3 = d_iJ3; par.avEN = d_avEN; par.int_rJ3 = d_int_rJ3; par.int_iJ3 = d_int_iJ3; double2 zero = {0,0}; double2 A = {reA, imA}; dim3 threadsPerBlock(NP, NQ, NS); dim3 numblocks(Nq/NQ, Ns/NS, Nv); MotionEquationMultiplierDoubleScheme <<< numblocks, threadsPerBlock >>>(par, La1 + La2 + Ld1, 1, A1, A, zero); return retriveBCurr(); } void MultiplierMultiModes::CurrentAMultiModes(std::complex<double> *Amps, std::complex<double> * currs, double *buffRe, double *buffIm, int Na, cplx *J1, cplx *J2) { PAR par; double d = period; double h = 2.*Pi/d; double La = period*double(Nperiods); int Nstop = La/dz; par.la1 = La; par.ld = Ld; par.lb = 1.; par.k1 = k1; par.h = h; par.voltage = voltage; par.Nz = Nz; par.L = Lsolver; par.wall = wall; par.g1 = g1; par.g3 = g3; par.rJ3 = d_rJ3; par.iJ3 = d_iJ3; par.avEN = d_avEN; par.int_rJ3 = d_int_rJ3; par.int_iJ3 = d_int_iJ3; par.int_rJ3_1 = d_int_rJ3_1; par.int_iJ3_1 = d_int_iJ3_1; double2 zero = {0,0}; dim3 threadsPerBlock(NP, NQ, NS); dim3 numblocks(Nq/NQ, Ns/NS, Nv); par.Amps = (double2*) d_Amps; int ierr = cudaMemcpy(d_Amps, (void*) Amps, sizeof(double2)*Na, cudaMemcpyHostToDevice); MotionEquationMultiplierMultiModes <<< numblocks, threadsPerBlock >>>(par, La, 1, Na, zero); gpuErrChk(cudaPeekAtLastError()); retriveACurrComplex((std::complex<double>*)Amps, currs, buffRe, buffIm, Namm, Nstop); } void MultiplierMultiModes::retriveACurrComplex(std::complex<double> *Amps, std::complex<double> *currs, double *currsBuffRe, double *currsBuffIm, int Na, int Nstop) { int GQ = Nq / NQ; int GS = Ns / NS; int GV = Nv; double reJ = 0, imJ = 0; double rF, iF, z; double La = period*double(Nperiods); std::complex<double> J; // printf("memcpy: %i\t", cudaMemcpy((void *)&t_deltaEn, d_int_rJ3, sizeof(double), cudaMemcpyDeviceToHost)); // printf("memcpy: %i\n", cudaMemcpy((void *)&t_deltaEn2, d_int_iJ3, sizeof(double), cudaMemcpyDeviceToHost)); gpuErrChk(cudaMemcpy((void *)currsBuffRe, d_rJ3, sizeof(double)*GQ*GS*GV*Nmax, cudaMemcpyDeviceToHost)) gpuErrChk(cudaMemcpy((void *)currsBuffIm, d_iJ3, sizeof(double)*GQ*GS*GV*Nmax, cudaMemcpyDeviceToHost)) for (int a = 0; a < Na; a++) { currs[a] = 0; } // FILE* debugfile = fopen("F:\\Piotr\\CalcData\\mm_orotron_Data\\debug.txt", "w"); for (int j = 0; j < Nstop; j++) { reJ = 0; imJ = 0; for (int i = 0; i < GQ*GS*GV; i++) { reJ += currsBuffRe[i*Nmax + j]; imJ += currsBuffIm[i*Nmax + j]; } for (int a = 0; a < Na; a++) { z = (double)j * dz; sincos(Pi / La*z*double(a - Na / 2), &iF, &rF); J = cplx(reJ, imJ)*cplx(rF, -iF); currs[a] += (J); // if(a == 1) fprintf(debugfile, "%g,%g,%g,%g,%g\n",z, real(J)/double(Np*Nq*Ns*Nv), imag(J)/double(Np*Nq*Ns*Nv), abs(J)/double(Np*Nq*Ns*Nv), arg(J) ); } } double coeff = Lsolver / double(Nz*Np*Nq*Ns*Nv); for (int a = 0; a < Na; a++) currs[a] *= coeff; // fclose(debugfile); } ////////////////////////////////// ParamsM Device::setPar() { ParamsM par; int GQ = Nq/NQ; int GS = Ns/NS; int GV = Nv; int gridsize = GQ*GS*GV; double La = Nperiods*period; double h = 2.*Pi/period; par.la = La; par.k1 = k1; par.h = h; par.voltage = voltage; par.Nz = Nmax; par.L = Lmax; par.wall = wall; par.g1 = g1; par.Ngrid = gridsize; par.ar0 = d_ar0; par.ai0 = d_ai0; par.rJ3 = d_rJ3; par.iJ3 = d_iJ3; par.delta = 0; par.Q0 = d_Q0; par.W0 = d_W0; par.rAk = d_rAk; par.iAk = d_iAk; par.rAq1k = d_rAq1k; par.iAq1k = d_iAq1k; par.Qk = d_Qk; par.Wk = d_Wk; par.ar0_t = d_ar0_t; par.ai0_t = d_ai0_t; par.int_rQ1 = d_int_rQ1; par.int_iQ1 = d_int_iQ1; par.ifnotdestroyed = d_ifnotdestroyed; par.g3 = g3; par.rAq1 =d_rAq1; par.iAq1 =d_iAq1; par.radii = d_radii; par.int_rJ3 = d_int_rJ3; par.int_iJ3 = d_int_iJ3; par.int_rJ3_1 = d_int_rJ3_1; par.int_iJ3_1 = d_int_iJ3_1; par.avEN = d_avEN; int *mass = new int [Np*Nq*Ns*Nv]; for(int a = 0; a < Np*Nq*Ns*Nv; a++) mass[a] = 1; gpuErrChk(cudaMemcpy(d_ifnotdestroyed, mass, sizeof(int)*Np*Nq*Ns*Nv, cudaMemcpyHostToDevice)); delete [] mass; return par; }
ee4142101b3070b440d371185e8d96f9d608a600.hip
// !!! This is a file automatically generated by hipify!!! /** * Copyright 2022 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "search_sorted_impl.cuh" #include <hip/hip_runtime.h> #include <cmath> #include<iostream> template <typename S, typename T> __global__ void SearchSortedKernelUpper(const S *sequence, const S *values, T *output, size_t search_repeat, size_t search_len, size_t size) { for (int x = blockIdx.x * blockDim.x + threadIdx.x; x < size/search_len * search_repeat; x += blockDim.x * gridDim.x) { T start = x / search_repeat * search_len; T start1 = start; T end = start + search_len; T index; S key = values[x]; while (start < end) { index = start + ((end - start) >> 1); if (!(key < sequence[index])) { start = index + 1; } else { end = index; } } output[x] = start -start1; } } template <typename S, typename T> __global__ void SearchSortedKernelLower(const S *sequence, const S *values, T *output, size_t search_repeat, size_t search_len, size_t size) { for (int x = blockIdx.x * blockDim.x + threadIdx.x; x < size/search_len * search_repeat; x += blockDim.x * gridDim.x) { T start = x / search_repeat * search_len; T start1 = start; T end = start + search_len; size_t index; S key = values[x]; while (start < end) { index = start + ((end - start) >> 1); if (!(key <= sequence[index])) { start = index + 1; } else { end = index; } } output[x] = start -start1; } } template <typename S, typename T> void CalSearchSorted(const size_t size, const S *sequence, const S *values, T *output, int *seq_dim, size_t search_repeat, size_t search_len, bool right, const uint32_t &device_id, hipStream_t cuda_stream, int *count) { size_t mn = size / search_len * search_repeat; if (right) { hipLaunchKernelGGL(( SearchSortedKernelUpper), dim3(CUDA_BLOCKS(device_id, mn)), dim3(CUDA_THREADS(device_id)), 0, cuda_stream, sequence, values, output, search_repeat, search_len, size); } else { hipLaunchKernelGGL(( SearchSortedKernelLower), dim3(CUDA_BLOCKS(device_id, mn)), dim3(CUDA_THREADS(device_id)), 0, cuda_stream, sequence, values, output, search_repeat, search_len, size); } return; } template CUDA_LIB_EXPORT void CalSearchSorted<double, int32_t>(const size_t size, const double *sequence, const double *values, int32_t *output, int *seq_dim, size_t search_repeat, size_t search_len, bool right, const uint32_t &device_id, hipStream_t cuda_stream, int *count); template CUDA_LIB_EXPORT void CalSearchSorted<float, int32_t>(const size_t size, const float *sequence, const float *values, int32_t *output, int *seq_dim, size_t search_repeat, size_t search_len, bool right, const uint32_t &device_id, hipStream_t cuda_stream, int *count); template CUDA_LIB_EXPORT void CalSearchSorted<int64_t, int32_t>(const size_t size, const int64_t *sequence, const int64_t *values, int32_t *output, int *seq_dim, size_t search_repeat, size_t search_len, bool right, const uint32_t &device_id, hipStream_t cuda_stream, int *count); template CUDA_LIB_EXPORT void CalSearchSorted<int32_t, int32_t>(const size_t size, const int32_t *sequence, const int32_t *values, int32_t *output, int *seq_dim, size_t search_repeat, size_t search_len, bool right, const uint32_t &device_id, hipStream_t cuda_stream, int *count); template CUDA_LIB_EXPORT void CalSearchSorted<int16_t, int32_t>(const size_t size, const int16_t *sequence, const int16_t *values, int32_t *output, int *seq_dim, size_t search_repeat, size_t search_len, bool right, const uint32_t &device_id, hipStream_t cuda_stream, int *count); template CUDA_LIB_EXPORT void CalSearchSorted<int8_t, int32_t>(const size_t size, const int8_t *sequence, const int8_t *values, int32_t *output, int *seq_dim, size_t search_repeat, size_t search_len, bool right, const uint32_t &device_id, hipStream_t cuda_stream, int *count); template CUDA_LIB_EXPORT void CalSearchSorted<double, int64_t>(const size_t size, const double *sequence, const double *values, int64_t *output, int *seq_dim, size_t search_repeat, size_t search_len, bool right, const uint32_t &device_id, hipStream_t cuda_stream, int *count); template CUDA_LIB_EXPORT void CalSearchSorted<float, int64_t>(const size_t size, const float *sequence, const float *values, int64_t *output, int *seq_dim, size_t search_repeat, size_t search_len, bool right, const uint32_t &device_id, hipStream_t cuda_stream, int *count); template CUDA_LIB_EXPORT void CalSearchSorted<int64_t, int64_t>(const size_t size, const int64_t *sequence, const int64_t *values, int64_t *output, int *seq_dim, size_t search_repeat, size_t search_len, bool right, const uint32_t &device_id, hipStream_t cuda_stream, int *count); template CUDA_LIB_EXPORT void CalSearchSorted<int32_t, int64_t>(const size_t size, const int32_t *sequence, const int32_t *values, int64_t *output, int *seq_dim, size_t search_repeat, size_t search_len, bool right, const uint32_t &device_id, hipStream_t cuda_stream, int *count); template CUDA_LIB_EXPORT void CalSearchSorted<int16_t, int64_t>(const size_t size, const int16_t *sequence, const int16_t *values, int64_t *output, int *seq_dim, size_t search_repeat, size_t search_len, bool right, const uint32_t &device_id, hipStream_t cuda_stream, int *count); template CUDA_LIB_EXPORT void CalSearchSorted<int8_t, int64_t>(const size_t size, const int8_t *sequence, const int8_t *values, int64_t *output, int *seq_dim, size_t search_repeat, size_t search_len, bool right, const uint32_t &device_id, hipStream_t cuda_stream, int *count);
ee4142101b3070b440d371185e8d96f9d608a600.cu
/** * Copyright 2022 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "search_sorted_impl.cuh" #include <cuda_runtime.h> #include <cmath> #include<iostream> template <typename S, typename T> __global__ void SearchSortedKernelUpper(const S *sequence, const S *values, T *output, size_t search_repeat, size_t search_len, size_t size) { for (int x = blockIdx.x * blockDim.x + threadIdx.x; x < size/search_len * search_repeat; x += blockDim.x * gridDim.x) { T start = x / search_repeat * search_len; T start1 = start; T end = start + search_len; T index; S key = values[x]; while (start < end) { index = start + ((end - start) >> 1); if (!(key < sequence[index])) { start = index + 1; } else { end = index; } } output[x] = start -start1; } } template <typename S, typename T> __global__ void SearchSortedKernelLower(const S *sequence, const S *values, T *output, size_t search_repeat, size_t search_len, size_t size) { for (int x = blockIdx.x * blockDim.x + threadIdx.x; x < size/search_len * search_repeat; x += blockDim.x * gridDim.x) { T start = x / search_repeat * search_len; T start1 = start; T end = start + search_len; size_t index; S key = values[x]; while (start < end) { index = start + ((end - start) >> 1); if (!(key <= sequence[index])) { start = index + 1; } else { end = index; } } output[x] = start -start1; } } template <typename S, typename T> void CalSearchSorted(const size_t size, const S *sequence, const S *values, T *output, int *seq_dim, size_t search_repeat, size_t search_len, bool right, const uint32_t &device_id, cudaStream_t cuda_stream, int *count) { size_t mn = size / search_len * search_repeat; if (right) { SearchSortedKernelUpper<<<CUDA_BLOCKS(device_id, mn), CUDA_THREADS(device_id), 0, cuda_stream>>>(sequence, values, output, search_repeat, search_len, size); } else { SearchSortedKernelLower<<<CUDA_BLOCKS(device_id, mn), CUDA_THREADS(device_id), 0, cuda_stream>>>(sequence, values, output, search_repeat, search_len, size); } return; } template CUDA_LIB_EXPORT void CalSearchSorted<double, int32_t>(const size_t size, const double *sequence, const double *values, int32_t *output, int *seq_dim, size_t search_repeat, size_t search_len, bool right, const uint32_t &device_id, cudaStream_t cuda_stream, int *count); template CUDA_LIB_EXPORT void CalSearchSorted<float, int32_t>(const size_t size, const float *sequence, const float *values, int32_t *output, int *seq_dim, size_t search_repeat, size_t search_len, bool right, const uint32_t &device_id, cudaStream_t cuda_stream, int *count); template CUDA_LIB_EXPORT void CalSearchSorted<int64_t, int32_t>(const size_t size, const int64_t *sequence, const int64_t *values, int32_t *output, int *seq_dim, size_t search_repeat, size_t search_len, bool right, const uint32_t &device_id, cudaStream_t cuda_stream, int *count); template CUDA_LIB_EXPORT void CalSearchSorted<int32_t, int32_t>(const size_t size, const int32_t *sequence, const int32_t *values, int32_t *output, int *seq_dim, size_t search_repeat, size_t search_len, bool right, const uint32_t &device_id, cudaStream_t cuda_stream, int *count); template CUDA_LIB_EXPORT void CalSearchSorted<int16_t, int32_t>(const size_t size, const int16_t *sequence, const int16_t *values, int32_t *output, int *seq_dim, size_t search_repeat, size_t search_len, bool right, const uint32_t &device_id, cudaStream_t cuda_stream, int *count); template CUDA_LIB_EXPORT void CalSearchSorted<int8_t, int32_t>(const size_t size, const int8_t *sequence, const int8_t *values, int32_t *output, int *seq_dim, size_t search_repeat, size_t search_len, bool right, const uint32_t &device_id, cudaStream_t cuda_stream, int *count); template CUDA_LIB_EXPORT void CalSearchSorted<double, int64_t>(const size_t size, const double *sequence, const double *values, int64_t *output, int *seq_dim, size_t search_repeat, size_t search_len, bool right, const uint32_t &device_id, cudaStream_t cuda_stream, int *count); template CUDA_LIB_EXPORT void CalSearchSorted<float, int64_t>(const size_t size, const float *sequence, const float *values, int64_t *output, int *seq_dim, size_t search_repeat, size_t search_len, bool right, const uint32_t &device_id, cudaStream_t cuda_stream, int *count); template CUDA_LIB_EXPORT void CalSearchSorted<int64_t, int64_t>(const size_t size, const int64_t *sequence, const int64_t *values, int64_t *output, int *seq_dim, size_t search_repeat, size_t search_len, bool right, const uint32_t &device_id, cudaStream_t cuda_stream, int *count); template CUDA_LIB_EXPORT void CalSearchSorted<int32_t, int64_t>(const size_t size, const int32_t *sequence, const int32_t *values, int64_t *output, int *seq_dim, size_t search_repeat, size_t search_len, bool right, const uint32_t &device_id, cudaStream_t cuda_stream, int *count); template CUDA_LIB_EXPORT void CalSearchSorted<int16_t, int64_t>(const size_t size, const int16_t *sequence, const int16_t *values, int64_t *output, int *seq_dim, size_t search_repeat, size_t search_len, bool right, const uint32_t &device_id, cudaStream_t cuda_stream, int *count); template CUDA_LIB_EXPORT void CalSearchSorted<int8_t, int64_t>(const size_t size, const int8_t *sequence, const int8_t *values, int64_t *output, int *seq_dim, size_t search_repeat, size_t search_len, bool right, const uint32_t &device_id, cudaStream_t cuda_stream, int *count);
62b62a52ce17a32f15c35d97d05e4bf208a4d75a.hip
// !!! This is a file automatically generated by hipify!!! #include <stdlib.h> #include <stdio.h> #include <hip/hip_runtime.h> #include <limits.h> // Include para las utilidades de computacin paralela #include "cputils.h" /** * Estructura antena */ typedef struct { int y; int x; } Antena; /** * Macro para acceder a las posiciones del mapa */ #define m(y,x) mapa[ (y * cols) + x ] #define currentGPU 0 __global__ void gpu_init(int *mapad, int max, int size){ /*Identificaciones necesarios*/ int IDX_Thread = threadIdx.x; /*Identificacion del hilo en la dimension*/ int IDY_Thread = threadIdx.y; /*Identificacion del hilo en la dimension y*/ int IDX_block = blockIdx.x; /*Identificacion del bloque en la dimension x*/ int IDY_block = blockIdx.y; /*Identificacion del bloque en la dimension y */ int shapeGrid_X = gridDim.x; /*Numeros del bloques en la dimension */ int threads_per_block = blockDim.x * blockDim.y; /* Numero de hilos por bloque (1 dimension) */ /*Formula para calcular la posicion*/ //Posicion del vector dependiendo del hilo y del bloque int position = threads_per_block * ((IDY_block * shapeGrid_X)+IDX_block)+((IDY_Thread*blockDim.x)+IDX_Thread); //inicializamos if(position<size) mapad[position] = max; } void print_mapa(int * mapa, int rows, int cols, Antena * a){ if(rows > 50 || cols > 30){ printf("Mapa muy grande para imprimir\n"); return; }; printf("Mapa [%d,%d]\n",rows,cols); for(int i=0; i<rows; i++){ for(int j=0; j<cols; j++){ int val = m(i,j); printf(" %6d ",val); } printf("\n"); } printf("\n"); } /** * Funcin principal */ int main(int nargs, char ** vargs){ // // 1. LEER DATOS DE ENTRADA // // Comprobar nmero de argumentos if(nargs < 7){ fprintf(stderr,"Uso: %s rows cols distMax nAntenas x0 y0 [x1 y1, ...]\n",vargs[0]); return -1; } // Leer los argumentos de entrada int rows = atoi(vargs[1]); int cols = atoi(vargs[2]); int distMax = atoi(vargs[3]); int nAntenas = atoi(vargs[4]); if(nAntenas<1 || nargs != (nAntenas*2+5)){ fprintf(stderr,"Error en la lista de antenas\n"); return -1; } // Mensaje printf("Calculando el nmero de antenas necesarias para cubrir un mapa de" " (%d x %d)\ncon una distancia mxima no superior a %d " "y con %d antenas iniciales\n\n",rows,cols,distMax,nAntenas); // Reservar memoria para las antenas // Leer antenas // // 2. INICIACIN // // Medir el tiempo double tiempo = cp_Wtime(); // Crear el mapa int * mapa = (int *) malloc((size_t) (rows*cols) * sizeof(int) ); //Crear y reservar la memoria DEVICE int *mapad; hipMalloc( (void**) &mapad, sizeof(int) * (int) (rows*cols)); // Iniciar el mapa con el valor MAX INT int size = rows*cols; int tam = (int) ceil( ((float)(rows * cols)) /size); dim3 bloqdimfunc1(128,1); dim3 griddimfunc1(tam,1); /* Enviamos la matriz al dispositivo */ hipMemcpy(mapad, mapa, sizeof(int) * (rows*cols),hipMemcpyHostToDevice); /* Llamamos a la funcion gpu_init */ hipLaunchKernelGGL(( gpu_init), dim3(griddimfunc1), dim3(bloqdimfunc1), 0, 0, mapad,INT_MAX,size); /* Sincronizamos para estabilizar los datos */ hipDeviceSynchronize(); /* Recibimos la matriz de Device */ hipMemcpy(mapa, mapad, sizeof(int) * (rows*cols),hipMemcpyDeviceToHost); print_mapa(mapa,rows,cols,NULL); // // 4. MOSTRAR RESULTADOS // // tiempo tiempo = cp_Wtime() - tiempo; // Salida printf("Time: %f\n",tiempo); /* Comprobamos si se ha realizado bien la funcion */ int error=0,z; for(z=0;z<rows*cols;z++){ if(mapa[z]!=INT_MAX) error=1; } if(error==1) printf("Algo salio mal\n"); else printf ("Todo correcto\n"); /* Liberamos memoria */ hipFree(mapad); /* Liberamos el dispositivo */ hipDeviceReset(); return 0; }
62b62a52ce17a32f15c35d97d05e4bf208a4d75a.cu
#include <stdlib.h> #include <stdio.h> #include <cuda.h> #include <limits.h> // Include para las utilidades de computación paralela #include "cputils.h" /** * Estructura antena */ typedef struct { int y; int x; } Antena; /** * Macro para acceder a las posiciones del mapa */ #define m(y,x) mapa[ (y * cols) + x ] #define currentGPU 0 __global__ void gpu_init(int *mapad, int max, int size){ /*Identificaciones necesarios*/ int IDX_Thread = threadIdx.x; /*Identificacion del hilo en la dimension*/ int IDY_Thread = threadIdx.y; /*Identificacion del hilo en la dimension y*/ int IDX_block = blockIdx.x; /*Identificacion del bloque en la dimension x*/ int IDY_block = blockIdx.y; /*Identificacion del bloque en la dimension y */ int shapeGrid_X = gridDim.x; /*Numeros del bloques en la dimension */ int threads_per_block = blockDim.x * blockDim.y; /* Numero de hilos por bloque (1 dimension) */ /*Formula para calcular la posicion*/ //Posicion del vector dependiendo del hilo y del bloque int position = threads_per_block * ((IDY_block * shapeGrid_X)+IDX_block)+((IDY_Thread*blockDim.x)+IDX_Thread); //inicializamos if(position<size) mapad[position] = max; } void print_mapa(int * mapa, int rows, int cols, Antena * a){ if(rows > 50 || cols > 30){ printf("Mapa muy grande para imprimir\n"); return; }; printf("Mapa [%d,%d]\n",rows,cols); for(int i=0; i<rows; i++){ for(int j=0; j<cols; j++){ int val = m(i,j); printf(" %6d ",val); } printf("\n"); } printf("\n"); } /** * Función principal */ int main(int nargs, char ** vargs){ // // 1. LEER DATOS DE ENTRADA // // Comprobar número de argumentos if(nargs < 7){ fprintf(stderr,"Uso: %s rows cols distMax nAntenas x0 y0 [x1 y1, ...]\n",vargs[0]); return -1; } // Leer los argumentos de entrada int rows = atoi(vargs[1]); int cols = atoi(vargs[2]); int distMax = atoi(vargs[3]); int nAntenas = atoi(vargs[4]); if(nAntenas<1 || nargs != (nAntenas*2+5)){ fprintf(stderr,"Error en la lista de antenas\n"); return -1; } // Mensaje printf("Calculando el número de antenas necesarias para cubrir un mapa de" " (%d x %d)\ncon una distancia máxima no superior a %d " "y con %d antenas iniciales\n\n",rows,cols,distMax,nAntenas); // Reservar memoria para las antenas // Leer antenas // // 2. INICIACIÓN // // Medir el tiempo double tiempo = cp_Wtime(); // Crear el mapa int * mapa = (int *) malloc((size_t) (rows*cols) * sizeof(int) ); //Crear y reservar la memoria DEVICE int *mapad; cudaMalloc( (void**) &mapad, sizeof(int) * (int) (rows*cols)); // Iniciar el mapa con el valor MAX INT int size = rows*cols; int tam = (int) ceil( ((float)(rows * cols)) /size); dim3 bloqdimfunc1(128,1); dim3 griddimfunc1(tam,1); /* Enviamos la matriz al dispositivo */ cudaMemcpy(mapad, mapa, sizeof(int) * (rows*cols),cudaMemcpyHostToDevice); /* Llamamos a la funcion gpu_init */ gpu_init<<<griddimfunc1, bloqdimfunc1>>>(mapad,INT_MAX,size); /* Sincronizamos para estabilizar los datos */ cudaDeviceSynchronize(); /* Recibimos la matriz de Device */ cudaMemcpy(mapa, mapad, sizeof(int) * (rows*cols),cudaMemcpyDeviceToHost); print_mapa(mapa,rows,cols,NULL); // // 4. MOSTRAR RESULTADOS // // tiempo tiempo = cp_Wtime() - tiempo; // Salida printf("Time: %f\n",tiempo); /* Comprobamos si se ha realizado bien la funcion */ int error=0,z; for(z=0;z<rows*cols;z++){ if(mapa[z]!=INT_MAX) error=1; } if(error==1) printf("Algo salio mal\n"); else printf ("Todo correcto\n"); /* Liberamos memoria */ cudaFree(mapad); /* Liberamos el dispositivo */ cudaDeviceReset(); return 0; }
d9175270edad42fb9c3a23844c8f6c8eaa6d5177.hip
// !!! This is a file automatically generated by hipify!!! #include <vector> #include "caffe/data_layers.hpp" namespace caffe { template <typename Dtype, typename Mtype> void BasePrefetchingDataLayer<Dtype,Mtype>::Forward_gpu( const vector<Blob<Dtype,Mtype>*>& bottom, const vector<Blob<Dtype,Mtype>*>& top) { Batch<Dtype,Mtype>* batch = prefetch_full_.pop("Data layer prefetch queue empty"); // Reshape to loaded data. top[0]->ReshapeLike(batch->data_); // Copy the data caffe_copy<Dtype,Mtype>(batch->data_.count(), batch->data_.gpu_data(), top[0]->mutable_gpu_data()); if (this->output_labels_) { // Reshape to loaded labels. top[1]->ReshapeLike(batch->label_); // Copy the labels. caffe_copy<Dtype,Mtype>(batch->label_.count(), batch->label_.gpu_data(), top[1]->mutable_gpu_data()); } // Ensure the copy is synchronous wrt the host, so that the next batch isn't // copied in meanwhile. CUDA_CHECK(hipStreamSynchronize(hipStreamDefault)); prefetch_free_.push(batch); } INSTANTIATE_LAYER_GPU_FORWARD(BasePrefetchingDataLayer); INSTANTIATE_LAYER_GPU_FORWARD_FF(BasePrefetchingDataLayer); } // namespace caffe
d9175270edad42fb9c3a23844c8f6c8eaa6d5177.cu
#include <vector> #include "caffe/data_layers.hpp" namespace caffe { template <typename Dtype, typename Mtype> void BasePrefetchingDataLayer<Dtype,Mtype>::Forward_gpu( const vector<Blob<Dtype,Mtype>*>& bottom, const vector<Blob<Dtype,Mtype>*>& top) { Batch<Dtype,Mtype>* batch = prefetch_full_.pop("Data layer prefetch queue empty"); // Reshape to loaded data. top[0]->ReshapeLike(batch->data_); // Copy the data caffe_copy<Dtype,Mtype>(batch->data_.count(), batch->data_.gpu_data(), top[0]->mutable_gpu_data()); if (this->output_labels_) { // Reshape to loaded labels. top[1]->ReshapeLike(batch->label_); // Copy the labels. caffe_copy<Dtype,Mtype>(batch->label_.count(), batch->label_.gpu_data(), top[1]->mutable_gpu_data()); } // Ensure the copy is synchronous wrt the host, so that the next batch isn't // copied in meanwhile. CUDA_CHECK(cudaStreamSynchronize(cudaStreamDefault)); prefetch_free_.push(batch); } INSTANTIATE_LAYER_GPU_FORWARD(BasePrefetchingDataLayer); INSTANTIATE_LAYER_GPU_FORWARD_FF(BasePrefetchingDataLayer); } // namespace caffe
dece19d864f2dbb3f9f602c5f810674cd8f8e66d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. Indicesou may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include <algorithm> #ifdef __NVCC__ #include "hipcub/hipcub.hpp" #endif #ifdef __HIPCC__ #include <hipcub/hipcub.hpp> namespace cub = hipcub; #endif #include "paddle/fluid/operators/amp/fp16_type_traits.h" #include "paddle/fluid/operators/norm_op.h" #include "paddle/fluid/platform/bfloat16.h" namespace paddle { namespace operators { __device__ __forceinline__ platform::float16 square_root(platform::float16 x) { return static_cast<platform::float16>(sqrtf(static_cast<float>(x))); } __device__ __forceinline__ float square_root(float x) { return sqrtf(x); } __device__ __forceinline__ double square_root(double x) { return sqrt(x); } template <typename T, int BlockDim> __global__ void Normalize(const T* x, const int pre, const int axis_n, // dim in axis const int post, const T eps, T* y, T* out_norm) { using MT = typename details::MPTypeTrait<T>::Type; typedef hipcub::BlockReduce<MT, BlockDim> BlockReduce; __shared__ typename BlockReduce::TempStorage temp_storage; int num = pre * post; for (int i = blockIdx.x; i < num; i += gridDim.x) { int base = (i / post) * post * axis_n + (i % post); MT sum = 0.0; __shared__ MT norm; for (int j = threadIdx.x; j < axis_n; j += blockDim.x) { const MT x_ij = static_cast<MT>(x[base + j * post]); sum += x_ij * x_ij; } MT reduce_result = BlockReduce(temp_storage).Sum(sum); if (threadIdx.x == 0) { norm = square_root(reduce_result + static_cast<MT>(eps)); out_norm[i] = static_cast<T>(norm); } __syncthreads(); for (int j = threadIdx.x; j < axis_n; j += blockDim.x) { const int index = base + j * post; y[index] = static_cast<T>((static_cast<MT>(x[index]) / norm)); } } } template <typename DeviceContext, typename T> class NormCUDAKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { auto* in_x = ctx.Input<framework::Tensor>("X"); auto* out_y = ctx.Output<framework::Tensor>("Out"); auto xdim = in_x->dims(); int axis = ctx.Attr<int>("axis"); if (axis < 0) axis = xdim.size() + axis; T eps = static_cast<T>(ctx.Attr<float>("epsilon")); bool is_test = ctx.Attr<bool>("is_test"); framework::Tensor* out_norm; framework::Tensor out_norm_tmp; if (is_test) { auto out_dim = in_x->dims(); out_dim[axis] = 1; out_norm = &out_norm_tmp; out_norm->Resize(out_dim); } else { out_norm = ctx.Output<framework::Tensor>("Norm"); } const T* x = in_x->data<T>(); T* y = out_y->mutable_data<T>(ctx.GetPlace()); T* norm = out_norm->mutable_data<T>(ctx.GetPlace()); int pre, n, post; GetDims(xdim, axis, &pre, &n, &post); auto& dev_ctx = ctx.cuda_device_context(); #ifdef __HIPCC__ const int block = 256; #else const int block = 512; #endif int max_threads = dev_ctx.GetMaxPhysicalThreadCount(); const int max_blocks = ::max(max_threads / block, 1); int grid = ::min(max_blocks, pre * post); hipLaunchKernelGGL(( Normalize<T, block>), dim3(grid), dim3(block), 0, dev_ctx.stream(), x, pre, n, post, eps, y, norm); } }; template <typename T, int BlockDim> __global__ void NormalizeGradient(const T* x, const T* x_norm, const T* y_grad, const int pre, const int axis_n, const int post, T* x_grad) { using MT = typename details::MPTypeTrait<T>::Type; typedef hipcub::BlockReduce<MT, BlockDim> BlockReduce; __shared__ typename BlockReduce::TempStorage temp_storage_sum; int num = pre * post; for (int i = blockIdx.x; i < num; i += gridDim.x) { MT sum = 0.0; __shared__ MT row_sum; __shared__ MT row_sqrt_norm; __shared__ MT row_norm; auto base = (i / post) * post * axis_n + (i % post); for (int j = threadIdx.x; j < axis_n; j += blockDim.x) { int index = base + j * post; sum += static_cast<MT>(x[index]) * static_cast<MT>(y_grad[index]); } MT reduce_result = BlockReduce(temp_storage_sum).Sum(sum); if (threadIdx.x == 0) { row_sum = reduce_result; row_sqrt_norm = static_cast<MT>(x_norm[i]); row_norm = row_sqrt_norm * row_sqrt_norm; } __syncthreads(); for (int j = threadIdx.x; j < axis_n; j += blockDim.x) { int index = base + j * post; const MT x_ij = static_cast<MT>(x[index]); const MT dy_ij = static_cast<MT>(y_grad[index]); x_grad[index] = static_cast<T>((dy_ij - x_ij * row_sum / row_norm) / row_sqrt_norm); } } } template <typename DeviceContext, typename T, typename AttrType = T> class NormGradCUDAKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { auto* in_x = ctx.Input<framework::Tensor>("X"); auto* in_norm = ctx.Input<framework::Tensor>("Norm"); auto* in_dy = ctx.Input<framework::Tensor>(framework::GradVarName("Out")); auto* out_dx = ctx.Output<framework::Tensor>(framework::GradVarName("X")); T* dx = out_dx->mutable_data<T>(ctx.GetPlace()); const T* x = in_x->data<T>(); const T* x_norm = in_norm->data<T>(); const T* dy = in_dy->data<T>(); auto xdim = in_x->dims(); int axis = ctx.Attr<int>("axis"); if (axis < 0) axis = xdim.size() + axis; int pre, n, post; GetDims(xdim, axis, &pre, &n, &post); auto& dev_ctx = ctx.cuda_device_context(); #ifdef __HIPCC__ const int block = 256; #else const int block = 512; #endif int max_threads = dev_ctx.GetMaxPhysicalThreadCount(); const int max_blocks = ::max(max_threads / block, 1); int grid = ::min(max_blocks, pre * post); hipLaunchKernelGGL(( NormalizeGradient<T, block>), dim3(grid), dim3(block), 0, dev_ctx.stream(), x, x_norm, dy, pre, n, post, dx); } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; using CUDA = paddle::platform::CUDADeviceContext; REGISTER_OP_CUDA_KERNEL(norm, ops::NormCUDAKernel<CUDA, paddle::platform::float16>, ops::NormCUDAKernel<CUDA, float>, ops::NormCUDAKernel<CUDA, double>); REGISTER_OP_CUDA_KERNEL( norm_grad, ops::NormGradCUDAKernel<CUDA, paddle::platform::float16>, ops::NormGradCUDAKernel<CUDA, float>, ops::NormGradCUDAKernel<CUDA, double>);
dece19d864f2dbb3f9f602c5f810674cd8f8e66d.cu
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. Indicesou may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include <algorithm> #ifdef __NVCC__ #include "cub/cub.cuh" #endif #ifdef __HIPCC__ #include <hipcub/hipcub.hpp> namespace cub = hipcub; #endif #include "paddle/fluid/operators/amp/fp16_type_traits.h" #include "paddle/fluid/operators/norm_op.h" #include "paddle/fluid/platform/bfloat16.h" namespace paddle { namespace operators { __device__ __forceinline__ platform::float16 square_root(platform::float16 x) { return static_cast<platform::float16>(sqrtf(static_cast<float>(x))); } __device__ __forceinline__ float square_root(float x) { return sqrtf(x); } __device__ __forceinline__ double square_root(double x) { return sqrt(x); } template <typename T, int BlockDim> __global__ void Normalize(const T* x, const int pre, const int axis_n, // dim in axis const int post, const T eps, T* y, T* out_norm) { using MT = typename details::MPTypeTrait<T>::Type; typedef cub::BlockReduce<MT, BlockDim> BlockReduce; __shared__ typename BlockReduce::TempStorage temp_storage; int num = pre * post; for (int i = blockIdx.x; i < num; i += gridDim.x) { int base = (i / post) * post * axis_n + (i % post); MT sum = 0.0; __shared__ MT norm; for (int j = threadIdx.x; j < axis_n; j += blockDim.x) { const MT x_ij = static_cast<MT>(x[base + j * post]); sum += x_ij * x_ij; } MT reduce_result = BlockReduce(temp_storage).Sum(sum); if (threadIdx.x == 0) { norm = square_root(reduce_result + static_cast<MT>(eps)); out_norm[i] = static_cast<T>(norm); } __syncthreads(); for (int j = threadIdx.x; j < axis_n; j += blockDim.x) { const int index = base + j * post; y[index] = static_cast<T>((static_cast<MT>(x[index]) / norm)); } } } template <typename DeviceContext, typename T> class NormCUDAKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { auto* in_x = ctx.Input<framework::Tensor>("X"); auto* out_y = ctx.Output<framework::Tensor>("Out"); auto xdim = in_x->dims(); int axis = ctx.Attr<int>("axis"); if (axis < 0) axis = xdim.size() + axis; T eps = static_cast<T>(ctx.Attr<float>("epsilon")); bool is_test = ctx.Attr<bool>("is_test"); framework::Tensor* out_norm; framework::Tensor out_norm_tmp; if (is_test) { auto out_dim = in_x->dims(); out_dim[axis] = 1; out_norm = &out_norm_tmp; out_norm->Resize(out_dim); } else { out_norm = ctx.Output<framework::Tensor>("Norm"); } const T* x = in_x->data<T>(); T* y = out_y->mutable_data<T>(ctx.GetPlace()); T* norm = out_norm->mutable_data<T>(ctx.GetPlace()); int pre, n, post; GetDims(xdim, axis, &pre, &n, &post); auto& dev_ctx = ctx.cuda_device_context(); #ifdef __HIPCC__ const int block = 256; #else const int block = 512; #endif int max_threads = dev_ctx.GetMaxPhysicalThreadCount(); const int max_blocks = std::max(max_threads / block, 1); int grid = std::min(max_blocks, pre * post); Normalize<T, block><<<grid, block, 0, dev_ctx.stream()>>>(x, pre, n, post, eps, y, norm); } }; template <typename T, int BlockDim> __global__ void NormalizeGradient(const T* x, const T* x_norm, const T* y_grad, const int pre, const int axis_n, const int post, T* x_grad) { using MT = typename details::MPTypeTrait<T>::Type; typedef cub::BlockReduce<MT, BlockDim> BlockReduce; __shared__ typename BlockReduce::TempStorage temp_storage_sum; int num = pre * post; for (int i = blockIdx.x; i < num; i += gridDim.x) { MT sum = 0.0; __shared__ MT row_sum; __shared__ MT row_sqrt_norm; __shared__ MT row_norm; auto base = (i / post) * post * axis_n + (i % post); for (int j = threadIdx.x; j < axis_n; j += blockDim.x) { int index = base + j * post; sum += static_cast<MT>(x[index]) * static_cast<MT>(y_grad[index]); } MT reduce_result = BlockReduce(temp_storage_sum).Sum(sum); if (threadIdx.x == 0) { row_sum = reduce_result; row_sqrt_norm = static_cast<MT>(x_norm[i]); row_norm = row_sqrt_norm * row_sqrt_norm; } __syncthreads(); for (int j = threadIdx.x; j < axis_n; j += blockDim.x) { int index = base + j * post; const MT x_ij = static_cast<MT>(x[index]); const MT dy_ij = static_cast<MT>(y_grad[index]); x_grad[index] = static_cast<T>((dy_ij - x_ij * row_sum / row_norm) / row_sqrt_norm); } } } template <typename DeviceContext, typename T, typename AttrType = T> class NormGradCUDAKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { auto* in_x = ctx.Input<framework::Tensor>("X"); auto* in_norm = ctx.Input<framework::Tensor>("Norm"); auto* in_dy = ctx.Input<framework::Tensor>(framework::GradVarName("Out")); auto* out_dx = ctx.Output<framework::Tensor>(framework::GradVarName("X")); T* dx = out_dx->mutable_data<T>(ctx.GetPlace()); const T* x = in_x->data<T>(); const T* x_norm = in_norm->data<T>(); const T* dy = in_dy->data<T>(); auto xdim = in_x->dims(); int axis = ctx.Attr<int>("axis"); if (axis < 0) axis = xdim.size() + axis; int pre, n, post; GetDims(xdim, axis, &pre, &n, &post); auto& dev_ctx = ctx.cuda_device_context(); #ifdef __HIPCC__ const int block = 256; #else const int block = 512; #endif int max_threads = dev_ctx.GetMaxPhysicalThreadCount(); const int max_blocks = std::max(max_threads / block, 1); int grid = std::min(max_blocks, pre * post); NormalizeGradient<T, block><<<grid, block, 0, dev_ctx.stream()>>>( x, x_norm, dy, pre, n, post, dx); } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; using CUDA = paddle::platform::CUDADeviceContext; REGISTER_OP_CUDA_KERNEL(norm, ops::NormCUDAKernel<CUDA, paddle::platform::float16>, ops::NormCUDAKernel<CUDA, float>, ops::NormCUDAKernel<CUDA, double>); REGISTER_OP_CUDA_KERNEL( norm_grad, ops::NormGradCUDAKernel<CUDA, paddle::platform::float16>, ops::NormGradCUDAKernel<CUDA, float>, ops::NormGradCUDAKernel<CUDA, double>);
4f577a64eb44e6d832cd769d00bf29f7030838c9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //scan.cu //#include "kernel.hip" #include "comm.h" #include "wtime.h" #include "iostream" #define max_thd 256 #define max_block 256 #define thread_limit 512 //#define block_limit 1024 #define GPU_COWORKER 1 graph * mygraph; __global__ void warp_merge_kernel ( vertex_t* head, vertex_t* adj, vertex_t* adj_list, index_t* begin, index_t Ns, index_t Ne, index_t* count ) { //phase 1, partition index_t tid = Ns + (threadIdx.x + blockIdx.x * blockDim.x)/32; int i = threadIdx.x%32; int p = threadIdx.x/32; long int mycount=0; __shared__ index_t local[max_thd]; __shared__ vertex_t A_diag[33*8]; __shared__ vertex_t B_diag[33*8]; while(tid<Ne){ vertex_t A = head[tid]; vertex_t B = adj[tid]; index_t m = begin[A+1]-begin[A];//degree[A]; index_t n = begin[B+1]-begin[B];//degree[B]; vertex_t* a = &(adj_list[begin[A]]); vertex_t* b = &(adj_list[begin[B]]); if(i==0){ A_diag[p*33+32]=m; B_diag[p*33+32]=n; } index_t index = (m+n)/32*i; vertex_t A_top, A_bottom, B_top, Ai, Bi; if(index>m){ A_top = m; B_top = index-m; } else if(index<=m){ A_top = index; B_top = 0; } if(index>n){ A_bottom = index-n; } else if(index<=n){ A_bottom = 0; } while(1){ int offset=(A_top-A_bottom)/2; if(A_top==A_bottom){ A_diag[p*33+i]=A_top; B_diag[p*33+i]=B_top; break; } Ai = A_top - offset; Bi = B_top + offset; if(offset<1){ if(a[Ai-1]<b[Bi]){ A_diag[p*33+i]=Ai; B_diag[p*33+i]=Bi; break; } else if(a[Ai-1]>b[Bi]){ A_diag[p*33+i]=Ai-1; B_diag[p*33+i]=Bi+1; break; } else if(a[Ai-1]==b[Bi]){ A_diag[p*33+i]=Ai; B_diag[p*33+i]=Bi+1; break; } } if(a[Ai]>b[Bi-1]){ if(a[Ai-1]<b[Bi]){ A_diag[p*33+i]=Ai; B_diag[p*33+i]=Bi; break; } else if(a[Ai-1]>b[Bi]){ A_top = Ai-1; B_top = Bi+1; } else if(a[Ai-1]==b[Bi]){ A_diag[p*33+i]=Ai; B_diag[p*33+i]=Bi+1; break; } } else if(a[Ai]<b[Bi-1]){ A_bottom = Ai+1; } else if(a[Ai]==b[Bi-1]){ A_diag[p*33+i]=Ai+1; B_diag[p*33+i]=Bi; break; } } // __syncthreads(); vertex_t lowA = A_diag[p*33+i]; vertex_t lowB = B_diag[p*33+i]; vertex_t highA = A_diag[p*33+i+1]; vertex_t highB = B_diag[p*33+i+1]; vertex_t x,y; while(lowA<highA && lowB<highB){ x=a[lowA]; y=b[lowB]; if(x<y){ lowA++; } else if(x>y){ lowB++; } else if(x==y){ lowA++; lowB++; mycount++; } } tid += blockDim.x * gridDim.x/32; // __syncthreads(); } //reduce local[threadIdx.x] = mycount; __syncthreads(); if(threadIdx.x==0){ index_t val=0; for(int i=0; i<blockDim.x; i++){ val+= local[i]; } count[blockIdx.x]=val; } __syncthreads(); } __global__ void block_merge_kernel ( vertex_t* head, vertex_t* adj, vertex_t* adj_list, index_t* begin, index_t Ns, index_t Ne, index_t* count ) { //phase 1, partition index_t tid = Ns + (threadIdx.x + blockIdx.x * blockDim.x)/256; int i = threadIdx.x; index_t mycount=0; __shared__ index_t local[max_thd]; __shared__ vertex_t A_diag[257]; __shared__ vertex_t B_diag[257]; while(tid<Ne){ vertex_t A = head[tid]; vertex_t B = adj[tid]; index_t m = begin[A+1]-begin[A];//degree[A]; index_t n = begin[B+1]-begin[B];//degree[B]; vertex_t* a = &(adj_list[begin[A]]); vertex_t* b = &(adj_list[begin[B]]); if(i==0){ A_diag[256]=m; B_diag[256]=n; } index_t index = (m+n)/256*i; vertex_t A_top, A_bottom, B_top, Ai, Bi; if(index>m){ A_top = m; B_top = index-m; } else if(index<=m){ A_top = index; B_top = 0; } if(index>n){ A_bottom = index-n; } else if(index<=n){ A_bottom = 0; } while(1){ int offset=(A_top-A_bottom)/2; if(A_top==A_bottom){ A_diag[i]=A_top; B_diag[i]=B_top; break; } Ai = A_top - offset; Bi = B_top + offset; if(offset<1){ if(a[Ai-1]<b[Bi]){ A_diag[i]=Ai; B_diag[i]=Bi; break; } else if(a[Ai-1]>b[Bi]){ A_diag[i]=Ai-1; B_diag[i]=Bi+1; break; } else if(a[Ai-1]==b[Bi]){ A_diag[i]=Ai; B_diag[i]=Bi+1; break; } } if(a[Ai]>b[Bi-1]){ if(a[Ai-1]<b[Bi]){ A_diag[i]=Ai; B_diag[i]=Bi; break; } else if(a[Ai-1]>b[Bi]){ A_top = Ai-1; B_top = Bi+1; } else if(a[Ai-1]==b[Bi]){ A_diag[i]=Ai; B_diag[i]=Bi+1; break; } } else if(a[Ai]<b[Bi-1]){ A_bottom = Ai+1; } else if(a[Ai]==b[Bi-1]){ A_diag[i]=Ai+1; B_diag[i]=Bi; break; } } __syncthreads(); vertex_t lowA = A_diag[i]; vertex_t lowB = B_diag[i]; vertex_t highA = A_diag[i+1]; vertex_t highB = B_diag[i+1]; vertex_t x,y; while(lowA<highA && lowB<highB){ x=a[lowA]; y=b[lowB]; if(x<y){ lowA++; } else if(x>y){ lowB++; } else if(x==y){ lowA++; lowB++; mycount++; } } tid += blockDim.x * gridDim.x/256; __syncthreads(); } //reduce __syncthreads(); local[threadIdx.x] = mycount; __syncthreads(); if(threadIdx.x==0){ index_t val=0; for(int i=0; i<blockDim.x; i++){ val+= local[i]; } count[blockIdx.x]+=val; // count[blockIdx.x]=val; } } __global__ void block_binary_kernel ( vertex_t* head, vertex_t* adj, vertex_t* adj_list, index_t* begin, index_t Ns, index_t Ne, index_t* count ) { //phase 1, partition index_t tid = Ns + (threadIdx.x + blockIdx.x * blockDim.x)/ max_thd; int i = threadIdx.x% max_thd; index_t mycount=0; // __shared__ vertex_t cache[256]; __shared__ index_t local[max_thd]; while(tid<Ne){ vertex_t A = head[tid]; vertex_t B = adj[tid]; index_t m = begin[A+1]-begin[A];//degree[A]; index_t n = begin[B+1]-begin[B];//degree[B]; index_t temp; if(m<n){ temp = A; A = B; B = temp; temp = m; m = n; n = temp; } vertex_t* a = &(adj_list[begin[A]]); vertex_t* b = &(adj_list[begin[B]]); //initial cache local[i]=a[i*m/max_thd]; __syncthreads(); //search int j=i; while(j<n){ vertex_t X = b[j]; vertex_t Y; //phase 1: cache int bot = 0; int top = max_thd; int r; while(top>bot+1){ r = (top+bot)/2; Y = local[r]; if(X==Y){ mycount++; bot = top + max_thd; } if(X<Y){ top = r; } if(X>Y){ bot = r; } } //phase 2 bot = bot*m/max_thd; top = top*m/max_thd -1; while(top>=bot){ r = (top+bot)/2; Y = a[r]; if(X==Y){ mycount++; } if(X<=Y){ top = r-1; } if(X>=Y){ bot = r+1; } } j += max_thd; } tid += GPU_COWORKER * gridDim.x*blockDim.x/ max_thd; __syncthreads(); } //reduce __syncthreads(); local[threadIdx.x] = mycount; __syncthreads(); if(threadIdx.x==0){ index_t val=0; for(int i=0; i<blockDim.x; i++){ val+= local[i]; } count[blockIdx.x]+=val; // count[blockIdx.x]=val; } } __global__ void warp_binary_kernel ( vertex_t* head, vertex_t* adj, vertex_t* adj_list, index_t* begin, index_t Ns, index_t Ne, index_t* count ) { //phase 1, partition index_t tid = (threadIdx.x + blockIdx.x * blockDim.x)/32 + Ns; index_t mycount=0; __shared__ index_t local[max_thd]; int i = threadIdx.x%32; int p = threadIdx.x/32; while(tid<Ne){ vertex_t A = head[tid]; vertex_t B = adj[tid]; index_t m = begin[A+1]-begin[A];//degree[A]; index_t n = begin[B+1]-begin[B];//degree[B]; index_t temp; if(m<n){ temp = A; A = B; B = temp; temp = m; m = n; n = temp; } vertex_t* a = &(adj_list[begin[A]]); vertex_t* b = &(adj_list[begin[B]]); //initial cache local[p*32+i]=a[i*m/32]; __syncthreads(); //search int j=i; while(j<n){ vertex_t X = b[j]; vertex_t Y; //phase 1: cache int bot = 0; int top = 32; int r; while(top>bot+1){ r = (top+bot)/2; Y = local[p*32+r]; if(X==Y){ mycount++; bot = top + 32; } if(X<Y){ top = r; } if(X>Y){ bot = r; } } //phase 2 bot = bot*m/32; top = top*m/32 -1; while(top>=bot){ r = (top+bot)/2; Y = a[r]; if(X==Y){ mycount++; } if(X<=Y){ top = r-1; } if(X>=Y){ bot = r+1; } } j += 32; } tid += GPU_COWORKER* blockDim.x*gridDim.x/32; __syncthreads(); } __syncthreads(); //reduce local[threadIdx.x] = mycount; __syncthreads(); if(threadIdx.x==0){ index_t val=0; for(int i=0; i<blockDim.x; i++){ val+= local[i]; } count[blockIdx.x]=val; } __syncthreads(); } //---------------------------------------------------------------------------------------- __global__ void classify_kernel //step 1: classify the edge list into different arrays ( vertex_t* adj_list, vertex_t* head_list, index_t* begin, index_t N, //inputs index_t* small_num, index_t* mid_num, index_t* large_num //outputs: small/large head, adjacent, and number by thread ) { int tid = threadIdx.x +blockIdx.x*blockDim.x; index_t bin_size = (N-1)/(blockDim.x*gridDim.x)+1; index_t thd_base = tid*bin_size; //start point of threads space index_t small_offset=0; index_t mid_offset=0; index_t large_offset=0; //temp variables vertex_t head; vertex_t adj; index_t m; index_t n; for(index_t i=0; i<bin_size; i++){ index_t id = thd_base + i; if(id<N){ head = head_list[id]; adj = adj_list[id]; m = begin[head+1]-begin[head];//degree[head]; n = begin[adj+1]-begin[adj];//degree[adj]; if(m<n){ n=m; } if(n<thread_limit){ small_offset++; } else { //could be more then 2 catigories // else{ mid_offset++; } } } small_num[tid] = small_offset; mid_num[tid] = mid_offset; large_num[tid] = large_offset; } __global__ void prefix_kernel_1 //this prefix scan function could be easier for data size is always 256*256 ( index_t* data, index_t* block_offset ) { //step 1: each block do prefix sum inside int tid = threadIdx.x +blockIdx.x*blockDim.x; __shared__ index_t temp_in[256]; temp_in[threadIdx.x] = data[tid]; __syncthreads(); index_t val=0; for(int i=0; i<=threadIdx.x; i++){ val += temp_in[i]; } __syncthreads(); if(threadIdx.x==255){ block_offset[blockIdx.x] = val; } data[tid] = val; __syncthreads(); } __global__ void prefix_kernel_2 ( index_t* block_offset ) { //step 2: collect each block's offset and do prefix for this set __shared__ index_t temp_in[256]; temp_in[threadIdx.x] = block_offset[threadIdx.x]; __syncthreads(); index_t val=0; for(int i=0; i<threadIdx.x; i++){ val += temp_in[i]; } // val = temp_in[threadIdx.x]; block_offset[threadIdx.x] = val; __syncthreads(); } __global__ void prefix_kernel_3 ( index_t* data, index_t* block_offset ) { //step 3: update by adding block offset int tid = threadIdx.x + blockIdx.x*blockDim.x; index_t val = data[tid]; index_t offset = block_offset[blockIdx.x]; val += offset; data[tid] = val; __syncthreads(); } __global__ void collect_kernel ( vertex_t* adj_list, vertex_t* head_list, index_t* begin, index_t N, index_t* small_num, index_t* mid_num, index_t* large_num, index_t N1, index_t N2, vertex_t* dest_head, vertex_t* dest_adj ) { int tid = threadIdx.x +blockIdx.x*blockDim.x; index_t bin_size = (N-1)/(blockDim.x*gridDim.x)+1; index_t thd_base = tid*bin_size; //start point of threads space index_t thd_base_small = 0; index_t thd_base_mid = N1; // index_t thd_base_large = N1+N2; if(tid!=0){ thd_base_small = small_num[tid-1]; thd_base_mid = N1 + mid_num[tid-1]; // thd_base_large = N1 + N2 + large_num[tid-1]; } //temp variables vertex_t head; vertex_t adj; index_t m; index_t n; index_t small_offset = thd_base_small; index_t mid_offset = thd_base_mid; // index_t large_offset = thd_base_large; for(index_t i=0; i<bin_size; i++){ index_t id = thd_base + i; if(id<N){ head = head_list[id]; adj = adj_list[id]; m = begin[head+1]-begin[head];//degree[head]; n = begin[adj+1]-begin[adj];//degree[adj]; if(m<n){ n=m; } if(n<thread_limit){ dest_head[small_offset] = head; dest_adj [small_offset] = adj; small_offset++; } // else if(n>0){ //could be more then 2 catigories else{ dest_head[mid_offset] = head; dest_adj [mid_offset] = adj; mid_offset++; } } } } __global__ void reduce_kernel2(index_t* count) { index_t val = 0; for(int i=0; i<max_block; i++){ val += count[i]; } count[0] = val; } //---------------------------------------- cpu function-------------------- //------------------------------------------------------------------ void* part_scan(void * data){ index_t thd_count=0; int GPU_id = *(int*)data; int i = GPU_id; // cout<<"GPU id = "<<GPU_id<<"\n"; // hipSetDevice(GPU_id); hipSetDevice(1); H_ERR(hipDeviceSynchronize() ); vertex_t* dev_adj; // vertex_t* dev_head; index_t* dev_begin; index_t* dev_count; index_t partEdgeCount = mygraph->partEdgeCount[i]; vertex_t vert_count = mygraph->vert_count; vertex_t* partAdj = mygraph->partAdj[i]; // vertex_t* partHead= mygraph->partHead[i]; // index_t* partDegree = mygraph->partDegree[i]; index_t* partBegin = mygraph->partBegin[i]; index_t* count = mygraph->count; H_ERR(hipMalloc(&dev_adj, partEdgeCount*sizeof(vertex_t)) ); // H_ERR(hipMalloc(&dev_head, partEdgeCount*sizeof(vertex_t)) ); // H_ERR(hipMalloc(&dev_degree, vert_count*sizeof(index_t)) ); H_ERR(hipMalloc(&dev_begin, (vert_count+1)*sizeof(index_t)) ); H_ERR(hipMalloc(&dev_count, max_block*sizeof(index_t)) ); index_t* block_offset; H_ERR(hipMalloc(&block_offset, max_block*sizeof(index_t)) ); H_ERR(hipMemcpy(dev_adj, partAdj, partEdgeCount*sizeof(vertex_t), hipMemcpyHostToDevice) ); // H_ERR(hipMemcpy(dev_head, partHead, partEdgeCount*sizeof(vertex_t), hipMemcpyHostToDevice) ); // H_ERR(hipMemcpy(dev_degree, partDegree, vert_count*sizeof(index_t), hipMemcpyHostToDevice) ); H_ERR(hipMemcpy(dev_begin, partBegin, (vert_count+1)*sizeof(index_t), hipMemcpyHostToDevice) ); double time2=wtime(); for(int j=0; j<PART_NUM; j++){ index_t totalEdgeCount = mygraph->partEdgeCount[j]; vertex_t* head = mygraph->partHead[j]; vertex_t* adj = mygraph->partAdj[j]; // index_t* degree = mygraph->partDegree[j]; vertex_t* classified_head; vertex_t* classified_adj; index_t* small_num; index_t* mid_num; index_t* large_num; vertex_t* src_head; vertex_t* src_adj; // index_t* src_degree; H_ERR(hipMalloc(&small_num, max_thd*max_block*sizeof(index_t)) ); H_ERR(hipMalloc(&mid_num, max_thd*max_block*sizeof(index_t)) ); H_ERR(hipMalloc(&large_num, max_thd*max_block*sizeof(index_t)) ); H_ERR(hipMalloc(&src_head, totalEdgeCount*sizeof(vertex_t)) ); H_ERR(hipMalloc(&src_adj, totalEdgeCount*sizeof(vertex_t)) ); // H_ERR(hipMalloc(&src_degree, vert_count*sizeof(index_t)) ); H_ERR(hipMemcpy(src_adj, adj, totalEdgeCount*sizeof(vertex_t), hipMemcpyHostToDevice) ); H_ERR(hipMemcpy(src_head, head, totalEdgeCount*sizeof(vertex_t), hipMemcpyHostToDevice) ); // H_ERR(hipMemcpy(src_degree, degree, vert_count*sizeof(index_t), hipMemcpyHostToDevice) ); H_ERR(hipMalloc(&classified_head, totalEdgeCount*sizeof(vertex_t)) ); H_ERR(hipMalloc(&classified_adj, totalEdgeCount*sizeof(vertex_t)) ); // double time1=wtime(); H_ERR(hipDeviceSynchronize() ); hipLaunchKernelGGL(( classify_kernel) , dim3(max_block),dim3(max_thd), 0, 0, src_adj, src_head, dev_begin, totalEdgeCount, small_num, mid_num, large_num ); H_ERR(hipDeviceSynchronize() ); //test for prefix sum hipLaunchKernelGGL(( prefix_kernel_1) , dim3(max_block),dim3(max_thd), 0, 0, small_num, block_offset); H_ERR(hipDeviceSynchronize() ); hipLaunchKernelGGL(( prefix_kernel_2) , dim3(1),dim3(max_thd), 0, 0, block_offset); H_ERR(hipDeviceSynchronize() ); hipLaunchKernelGGL(( prefix_kernel_3) , dim3(max_block),dim3(max_thd), 0, 0, small_num, block_offset); H_ERR(hipDeviceSynchronize() ); hipLaunchKernelGGL(( prefix_kernel_1) , dim3(max_block),dim3(max_thd), 0, 0, mid_num, block_offset); H_ERR(hipDeviceSynchronize() ); hipLaunchKernelGGL(( prefix_kernel_2) , dim3(1),dim3(max_thd), 0, 0, block_offset); H_ERR(hipDeviceSynchronize() ); hipLaunchKernelGGL(( prefix_kernel_3) , dim3(max_block),dim3(max_thd), 0, 0, mid_num, block_offset); H_ERR(hipDeviceSynchronize() ); /* hipLaunchKernelGGL(( prefix_kernel_1) , dim3(max_block),dim3(max_thd), 0, 0, large_num, block_offset); H_ERR(hipDeviceSynchronize() ); hipLaunchKernelGGL(( prefix_kernel_2) , dim3(1),dim3(max_thd), 0, 0, block_offset); H_ERR(hipDeviceSynchronize() ); hipLaunchKernelGGL(( prefix_kernel_3) , dim3(max_block),dim3(max_thd), 0, 0, large_num, block_offset); H_ERR(hipDeviceSynchronize() ); */ index_t N1,N2,N3; H_ERR(hipMemcpy(&N1 , &small_num[65535] , sizeof(index_t), hipMemcpyDeviceToHost) ); H_ERR(hipMemcpy(&N2 , &mid_num[65535] , sizeof(index_t), hipMemcpyDeviceToHost) ); // H_ERR(hipMemcpy(&N3 , &large_num[65535] , sizeof(index_t), hipMemcpyDeviceToHost) ); H_ERR(hipDeviceSynchronize() ); // cout<<"N1 = "<<N1<<"\n"; // cout<<"N2 = "<<N2<<"\n"; // cout<<"N3 = "<<N3<<"\n"; hipLaunchKernelGGL(( collect_kernel) , dim3(max_block),dim3(max_thd), 0, 0, src_adj, src_head, dev_begin, totalEdgeCount, small_num, mid_num, large_num, N1, N2, classified_head, classified_adj ); H_ERR(hipDeviceSynchronize() ); hipLaunchKernelGGL(( warp_merge_kernel), dim3(max_block),dim3(max_thd), 0, 0, classified_head, classified_adj, dev_adj, // dev_degree, dev_begin, 0, N1, dev_count ); H_ERR(hipDeviceSynchronize() ); hipLaunchKernelGGL(( block_merge_kernel), dim3(max_block),dim3(max_thd), 0, 0, classified_head, classified_adj, //dev_head, //dev_adj, dev_adj, // dev_degree, dev_begin, N1, N1+N2, // 0 + GPU_id*256, // totalEdgeCount, dev_count ); H_ERR(hipDeviceSynchronize() ); hipLaunchKernelGGL(( reduce_kernel2) , dim3(1),dim3(1), 0, 0, dev_count); H_ERR(hipDeviceSynchronize() ); H_ERR(hipMemcpy(&count[i], dev_count, sizeof(index_t), hipMemcpyDeviceToHost)); thd_count += count[i]; H_ERR(hipFree(small_num) ); H_ERR(hipFree(large_num) ); H_ERR(hipFree(classified_head) ); H_ERR(hipFree(classified_adj) ); H_ERR(hipFree(src_head) ); H_ERR(hipFree(src_adj) ); // H_ERR(hipFree(src_begin) ); // cout<<"GPU "<<i<<" part "<<j<<"\n"; } double time4 = wtime(); count[i] = thd_count; // cout<<"gpu "<<i<<" binary count="<<count[i]<<"\n"; // cout<<"time = "<<time4-time2<<" seconds"<<endl; H_ERR(hipFree(dev_adj) ); // H_ERR(hipFree(dev_head) ); // H_ERR(hipFree(dev_degree) ); H_ERR(hipFree(dev_begin) ); H_ERR(hipFree(block_offset) ); H_ERR(hipFree(dev_count) ); return NULL; }
4f577a64eb44e6d832cd769d00bf29f7030838c9.cu
//scan.cu //#include "kernel.cu" #include "comm.h" #include "wtime.h" #include "iostream" #define max_thd 256 #define max_block 256 #define thread_limit 512 //#define block_limit 1024 #define GPU_COWORKER 1 graph * mygraph; __global__ void warp_merge_kernel ( vertex_t* head, vertex_t* adj, vertex_t* adj_list, index_t* begin, index_t Ns, index_t Ne, index_t* count ) { //phase 1, partition index_t tid = Ns + (threadIdx.x + blockIdx.x * blockDim.x)/32; int i = threadIdx.x%32; int p = threadIdx.x/32; long int mycount=0; __shared__ index_t local[max_thd]; __shared__ vertex_t A_diag[33*8]; __shared__ vertex_t B_diag[33*8]; while(tid<Ne){ vertex_t A = head[tid]; vertex_t B = adj[tid]; index_t m = begin[A+1]-begin[A];//degree[A]; index_t n = begin[B+1]-begin[B];//degree[B]; vertex_t* a = &(adj_list[begin[A]]); vertex_t* b = &(adj_list[begin[B]]); if(i==0){ A_diag[p*33+32]=m; B_diag[p*33+32]=n; } index_t index = (m+n)/32*i; vertex_t A_top, A_bottom, B_top, Ai, Bi; if(index>m){ A_top = m; B_top = index-m; } else if(index<=m){ A_top = index; B_top = 0; } if(index>n){ A_bottom = index-n; } else if(index<=n){ A_bottom = 0; } while(1){ int offset=(A_top-A_bottom)/2; if(A_top==A_bottom){ A_diag[p*33+i]=A_top; B_diag[p*33+i]=B_top; break; } Ai = A_top - offset; Bi = B_top + offset; if(offset<1){ if(a[Ai-1]<b[Bi]){ A_diag[p*33+i]=Ai; B_diag[p*33+i]=Bi; break; } else if(a[Ai-1]>b[Bi]){ A_diag[p*33+i]=Ai-1; B_diag[p*33+i]=Bi+1; break; } else if(a[Ai-1]==b[Bi]){ A_diag[p*33+i]=Ai; B_diag[p*33+i]=Bi+1; break; } } if(a[Ai]>b[Bi-1]){ if(a[Ai-1]<b[Bi]){ A_diag[p*33+i]=Ai; B_diag[p*33+i]=Bi; break; } else if(a[Ai-1]>b[Bi]){ A_top = Ai-1; B_top = Bi+1; } else if(a[Ai-1]==b[Bi]){ A_diag[p*33+i]=Ai; B_diag[p*33+i]=Bi+1; break; } } else if(a[Ai]<b[Bi-1]){ A_bottom = Ai+1; } else if(a[Ai]==b[Bi-1]){ A_diag[p*33+i]=Ai+1; B_diag[p*33+i]=Bi; break; } } // __syncthreads(); vertex_t lowA = A_diag[p*33+i]; vertex_t lowB = B_diag[p*33+i]; vertex_t highA = A_diag[p*33+i+1]; vertex_t highB = B_diag[p*33+i+1]; vertex_t x,y; while(lowA<highA && lowB<highB){ x=a[lowA]; y=b[lowB]; if(x<y){ lowA++; } else if(x>y){ lowB++; } else if(x==y){ lowA++; lowB++; mycount++; } } tid += blockDim.x * gridDim.x/32; // __syncthreads(); } //reduce local[threadIdx.x] = mycount; __syncthreads(); if(threadIdx.x==0){ index_t val=0; for(int i=0; i<blockDim.x; i++){ val+= local[i]; } count[blockIdx.x]=val; } __syncthreads(); } __global__ void block_merge_kernel ( vertex_t* head, vertex_t* adj, vertex_t* adj_list, index_t* begin, index_t Ns, index_t Ne, index_t* count ) { //phase 1, partition index_t tid = Ns + (threadIdx.x + blockIdx.x * blockDim.x)/256; int i = threadIdx.x; index_t mycount=0; __shared__ index_t local[max_thd]; __shared__ vertex_t A_diag[257]; __shared__ vertex_t B_diag[257]; while(tid<Ne){ vertex_t A = head[tid]; vertex_t B = adj[tid]; index_t m = begin[A+1]-begin[A];//degree[A]; index_t n = begin[B+1]-begin[B];//degree[B]; vertex_t* a = &(adj_list[begin[A]]); vertex_t* b = &(adj_list[begin[B]]); if(i==0){ A_diag[256]=m; B_diag[256]=n; } index_t index = (m+n)/256*i; vertex_t A_top, A_bottom, B_top, Ai, Bi; if(index>m){ A_top = m; B_top = index-m; } else if(index<=m){ A_top = index; B_top = 0; } if(index>n){ A_bottom = index-n; } else if(index<=n){ A_bottom = 0; } while(1){ int offset=(A_top-A_bottom)/2; if(A_top==A_bottom){ A_diag[i]=A_top; B_diag[i]=B_top; break; } Ai = A_top - offset; Bi = B_top + offset; if(offset<1){ if(a[Ai-1]<b[Bi]){ A_diag[i]=Ai; B_diag[i]=Bi; break; } else if(a[Ai-1]>b[Bi]){ A_diag[i]=Ai-1; B_diag[i]=Bi+1; break; } else if(a[Ai-1]==b[Bi]){ A_diag[i]=Ai; B_diag[i]=Bi+1; break; } } if(a[Ai]>b[Bi-1]){ if(a[Ai-1]<b[Bi]){ A_diag[i]=Ai; B_diag[i]=Bi; break; } else if(a[Ai-1]>b[Bi]){ A_top = Ai-1; B_top = Bi+1; } else if(a[Ai-1]==b[Bi]){ A_diag[i]=Ai; B_diag[i]=Bi+1; break; } } else if(a[Ai]<b[Bi-1]){ A_bottom = Ai+1; } else if(a[Ai]==b[Bi-1]){ A_diag[i]=Ai+1; B_diag[i]=Bi; break; } } __syncthreads(); vertex_t lowA = A_diag[i]; vertex_t lowB = B_diag[i]; vertex_t highA = A_diag[i+1]; vertex_t highB = B_diag[i+1]; vertex_t x,y; while(lowA<highA && lowB<highB){ x=a[lowA]; y=b[lowB]; if(x<y){ lowA++; } else if(x>y){ lowB++; } else if(x==y){ lowA++; lowB++; mycount++; } } tid += blockDim.x * gridDim.x/256; __syncthreads(); } //reduce __syncthreads(); local[threadIdx.x] = mycount; __syncthreads(); if(threadIdx.x==0){ index_t val=0; for(int i=0; i<blockDim.x; i++){ val+= local[i]; } count[blockIdx.x]+=val; // count[blockIdx.x]=val; } } __global__ void block_binary_kernel ( vertex_t* head, vertex_t* adj, vertex_t* adj_list, index_t* begin, index_t Ns, index_t Ne, index_t* count ) { //phase 1, partition index_t tid = Ns + (threadIdx.x + blockIdx.x * blockDim.x)/ max_thd; int i = threadIdx.x% max_thd; index_t mycount=0; // __shared__ vertex_t cache[256]; __shared__ index_t local[max_thd]; while(tid<Ne){ vertex_t A = head[tid]; vertex_t B = adj[tid]; index_t m = begin[A+1]-begin[A];//degree[A]; index_t n = begin[B+1]-begin[B];//degree[B]; index_t temp; if(m<n){ temp = A; A = B; B = temp; temp = m; m = n; n = temp; } vertex_t* a = &(adj_list[begin[A]]); vertex_t* b = &(adj_list[begin[B]]); //initial cache local[i]=a[i*m/max_thd]; __syncthreads(); //search int j=i; while(j<n){ vertex_t X = b[j]; vertex_t Y; //phase 1: cache int bot = 0; int top = max_thd; int r; while(top>bot+1){ r = (top+bot)/2; Y = local[r]; if(X==Y){ mycount++; bot = top + max_thd; } if(X<Y){ top = r; } if(X>Y){ bot = r; } } //phase 2 bot = bot*m/max_thd; top = top*m/max_thd -1; while(top>=bot){ r = (top+bot)/2; Y = a[r]; if(X==Y){ mycount++; } if(X<=Y){ top = r-1; } if(X>=Y){ bot = r+1; } } j += max_thd; } tid += GPU_COWORKER * gridDim.x*blockDim.x/ max_thd; __syncthreads(); } //reduce __syncthreads(); local[threadIdx.x] = mycount; __syncthreads(); if(threadIdx.x==0){ index_t val=0; for(int i=0; i<blockDim.x; i++){ val+= local[i]; } count[blockIdx.x]+=val; // count[blockIdx.x]=val; } } __global__ void warp_binary_kernel ( vertex_t* head, vertex_t* adj, vertex_t* adj_list, index_t* begin, index_t Ns, index_t Ne, index_t* count ) { //phase 1, partition index_t tid = (threadIdx.x + blockIdx.x * blockDim.x)/32 + Ns; index_t mycount=0; __shared__ index_t local[max_thd]; int i = threadIdx.x%32; int p = threadIdx.x/32; while(tid<Ne){ vertex_t A = head[tid]; vertex_t B = adj[tid]; index_t m = begin[A+1]-begin[A];//degree[A]; index_t n = begin[B+1]-begin[B];//degree[B]; index_t temp; if(m<n){ temp = A; A = B; B = temp; temp = m; m = n; n = temp; } vertex_t* a = &(adj_list[begin[A]]); vertex_t* b = &(adj_list[begin[B]]); //initial cache local[p*32+i]=a[i*m/32]; __syncthreads(); //search int j=i; while(j<n){ vertex_t X = b[j]; vertex_t Y; //phase 1: cache int bot = 0; int top = 32; int r; while(top>bot+1){ r = (top+bot)/2; Y = local[p*32+r]; if(X==Y){ mycount++; bot = top + 32; } if(X<Y){ top = r; } if(X>Y){ bot = r; } } //phase 2 bot = bot*m/32; top = top*m/32 -1; while(top>=bot){ r = (top+bot)/2; Y = a[r]; if(X==Y){ mycount++; } if(X<=Y){ top = r-1; } if(X>=Y){ bot = r+1; } } j += 32; } tid += GPU_COWORKER* blockDim.x*gridDim.x/32; __syncthreads(); } __syncthreads(); //reduce local[threadIdx.x] = mycount; __syncthreads(); if(threadIdx.x==0){ index_t val=0; for(int i=0; i<blockDim.x; i++){ val+= local[i]; } count[blockIdx.x]=val; } __syncthreads(); } //---------------------------------------------------------------------------------------- __global__ void classify_kernel //step 1: classify the edge list into different arrays ( vertex_t* adj_list, vertex_t* head_list, index_t* begin, index_t N, //inputs index_t* small_num, index_t* mid_num, index_t* large_num //outputs: small/large head, adjacent, and number by thread ) { int tid = threadIdx.x +blockIdx.x*blockDim.x; index_t bin_size = (N-1)/(blockDim.x*gridDim.x)+1; index_t thd_base = tid*bin_size; //start point of threads space index_t small_offset=0; index_t mid_offset=0; index_t large_offset=0; //temp variables vertex_t head; vertex_t adj; index_t m; index_t n; for(index_t i=0; i<bin_size; i++){ index_t id = thd_base + i; if(id<N){ head = head_list[id]; adj = adj_list[id]; m = begin[head+1]-begin[head];//degree[head]; n = begin[adj+1]-begin[adj];//degree[adj]; if(m<n){ n=m; } if(n<thread_limit){ small_offset++; } else { //could be more then 2 catigories // else{ mid_offset++; } } } small_num[tid] = small_offset; mid_num[tid] = mid_offset; large_num[tid] = large_offset; } __global__ void prefix_kernel_1 //this prefix scan function could be easier for data size is always 256*256 ( index_t* data, index_t* block_offset ) { //step 1: each block do prefix sum inside int tid = threadIdx.x +blockIdx.x*blockDim.x; __shared__ index_t temp_in[256]; temp_in[threadIdx.x] = data[tid]; __syncthreads(); index_t val=0; for(int i=0; i<=threadIdx.x; i++){ val += temp_in[i]; } __syncthreads(); if(threadIdx.x==255){ block_offset[blockIdx.x] = val; } data[tid] = val; __syncthreads(); } __global__ void prefix_kernel_2 ( index_t* block_offset ) { //step 2: collect each block's offset and do prefix for this set __shared__ index_t temp_in[256]; temp_in[threadIdx.x] = block_offset[threadIdx.x]; __syncthreads(); index_t val=0; for(int i=0; i<threadIdx.x; i++){ val += temp_in[i]; } // val = temp_in[threadIdx.x]; block_offset[threadIdx.x] = val; __syncthreads(); } __global__ void prefix_kernel_3 ( index_t* data, index_t* block_offset ) { //step 3: update by adding block offset int tid = threadIdx.x + blockIdx.x*blockDim.x; index_t val = data[tid]; index_t offset = block_offset[blockIdx.x]; val += offset; data[tid] = val; __syncthreads(); } __global__ void collect_kernel ( vertex_t* adj_list, vertex_t* head_list, index_t* begin, index_t N, index_t* small_num, index_t* mid_num, index_t* large_num, index_t N1, index_t N2, vertex_t* dest_head, vertex_t* dest_adj ) { int tid = threadIdx.x +blockIdx.x*blockDim.x; index_t bin_size = (N-1)/(blockDim.x*gridDim.x)+1; index_t thd_base = tid*bin_size; //start point of threads space index_t thd_base_small = 0; index_t thd_base_mid = N1; // index_t thd_base_large = N1+N2; if(tid!=0){ thd_base_small = small_num[tid-1]; thd_base_mid = N1 + mid_num[tid-1]; // thd_base_large = N1 + N2 + large_num[tid-1]; } //temp variables vertex_t head; vertex_t adj; index_t m; index_t n; index_t small_offset = thd_base_small; index_t mid_offset = thd_base_mid; // index_t large_offset = thd_base_large; for(index_t i=0; i<bin_size; i++){ index_t id = thd_base + i; if(id<N){ head = head_list[id]; adj = adj_list[id]; m = begin[head+1]-begin[head];//degree[head]; n = begin[adj+1]-begin[adj];//degree[adj]; if(m<n){ n=m; } if(n<thread_limit){ dest_head[small_offset] = head; dest_adj [small_offset] = adj; small_offset++; } // else if(n>0){ //could be more then 2 catigories else{ dest_head[mid_offset] = head; dest_adj [mid_offset] = adj; mid_offset++; } } } } __global__ void reduce_kernel2(index_t* count) { index_t val = 0; for(int i=0; i<max_block; i++){ val += count[i]; } count[0] = val; } //---------------------------------------- cpu function-------------------- //------------------------------------------------------------------ void* part_scan(void * data){ index_t thd_count=0; int GPU_id = *(int*)data; int i = GPU_id; // cout<<"GPU id = "<<GPU_id<<"\n"; // cudaSetDevice(GPU_id); cudaSetDevice(1); H_ERR(cudaDeviceSynchronize() ); vertex_t* dev_adj; // vertex_t* dev_head; index_t* dev_begin; index_t* dev_count; index_t partEdgeCount = mygraph->partEdgeCount[i]; vertex_t vert_count = mygraph->vert_count; vertex_t* partAdj = mygraph->partAdj[i]; // vertex_t* partHead= mygraph->partHead[i]; // index_t* partDegree = mygraph->partDegree[i]; index_t* partBegin = mygraph->partBegin[i]; index_t* count = mygraph->count; H_ERR(cudaMalloc(&dev_adj, partEdgeCount*sizeof(vertex_t)) ); // H_ERR(cudaMalloc(&dev_head, partEdgeCount*sizeof(vertex_t)) ); // H_ERR(cudaMalloc(&dev_degree, vert_count*sizeof(index_t)) ); H_ERR(cudaMalloc(&dev_begin, (vert_count+1)*sizeof(index_t)) ); H_ERR(cudaMalloc(&dev_count, max_block*sizeof(index_t)) ); index_t* block_offset; H_ERR(cudaMalloc(&block_offset, max_block*sizeof(index_t)) ); H_ERR(cudaMemcpy(dev_adj, partAdj, partEdgeCount*sizeof(vertex_t), cudaMemcpyHostToDevice) ); // H_ERR(cudaMemcpy(dev_head, partHead, partEdgeCount*sizeof(vertex_t), cudaMemcpyHostToDevice) ); // H_ERR(cudaMemcpy(dev_degree, partDegree, vert_count*sizeof(index_t), cudaMemcpyHostToDevice) ); H_ERR(cudaMemcpy(dev_begin, partBegin, (vert_count+1)*sizeof(index_t), cudaMemcpyHostToDevice) ); double time2=wtime(); for(int j=0; j<PART_NUM; j++){ index_t totalEdgeCount = mygraph->partEdgeCount[j]; vertex_t* head = mygraph->partHead[j]; vertex_t* adj = mygraph->partAdj[j]; // index_t* degree = mygraph->partDegree[j]; vertex_t* classified_head; vertex_t* classified_adj; index_t* small_num; index_t* mid_num; index_t* large_num; vertex_t* src_head; vertex_t* src_adj; // index_t* src_degree; H_ERR(cudaMalloc(&small_num, max_thd*max_block*sizeof(index_t)) ); H_ERR(cudaMalloc(&mid_num, max_thd*max_block*sizeof(index_t)) ); H_ERR(cudaMalloc(&large_num, max_thd*max_block*sizeof(index_t)) ); H_ERR(cudaMalloc(&src_head, totalEdgeCount*sizeof(vertex_t)) ); H_ERR(cudaMalloc(&src_adj, totalEdgeCount*sizeof(vertex_t)) ); // H_ERR(cudaMalloc(&src_degree, vert_count*sizeof(index_t)) ); H_ERR(cudaMemcpy(src_adj, adj, totalEdgeCount*sizeof(vertex_t), cudaMemcpyHostToDevice) ); H_ERR(cudaMemcpy(src_head, head, totalEdgeCount*sizeof(vertex_t), cudaMemcpyHostToDevice) ); // H_ERR(cudaMemcpy(src_degree, degree, vert_count*sizeof(index_t), cudaMemcpyHostToDevice) ); H_ERR(cudaMalloc(&classified_head, totalEdgeCount*sizeof(vertex_t)) ); H_ERR(cudaMalloc(&classified_adj, totalEdgeCount*sizeof(vertex_t)) ); // double time1=wtime(); H_ERR(cudaDeviceSynchronize() ); classify_kernel <<<max_block,max_thd>>>( src_adj, src_head, dev_begin, totalEdgeCount, small_num, mid_num, large_num ); H_ERR(cudaDeviceSynchronize() ); //test for prefix sum prefix_kernel_1 <<<max_block,max_thd>>>(small_num, block_offset); H_ERR(cudaDeviceSynchronize() ); prefix_kernel_2 <<<1,max_thd>>>(block_offset); H_ERR(cudaDeviceSynchronize() ); prefix_kernel_3 <<<max_block,max_thd>>>(small_num, block_offset); H_ERR(cudaDeviceSynchronize() ); prefix_kernel_1 <<<max_block,max_thd>>>(mid_num, block_offset); H_ERR(cudaDeviceSynchronize() ); prefix_kernel_2 <<<1,max_thd>>>(block_offset); H_ERR(cudaDeviceSynchronize() ); prefix_kernel_3 <<<max_block,max_thd>>>(mid_num, block_offset); H_ERR(cudaDeviceSynchronize() ); /* prefix_kernel_1 <<<max_block,max_thd>>>(large_num, block_offset); H_ERR(cudaDeviceSynchronize() ); prefix_kernel_2 <<<1,max_thd>>>(block_offset); H_ERR(cudaDeviceSynchronize() ); prefix_kernel_3 <<<max_block,max_thd>>>(large_num, block_offset); H_ERR(cudaDeviceSynchronize() ); */ index_t N1,N2,N3; H_ERR(cudaMemcpy(&N1 , &small_num[65535] , sizeof(index_t), cudaMemcpyDeviceToHost) ); H_ERR(cudaMemcpy(&N2 , &mid_num[65535] , sizeof(index_t), cudaMemcpyDeviceToHost) ); // H_ERR(cudaMemcpy(&N3 , &large_num[65535] , sizeof(index_t), cudaMemcpyDeviceToHost) ); H_ERR(cudaDeviceSynchronize() ); // cout<<"N1 = "<<N1<<"\n"; // cout<<"N2 = "<<N2<<"\n"; // cout<<"N3 = "<<N3<<"\n"; collect_kernel <<<max_block,max_thd>>>( src_adj, src_head, dev_begin, totalEdgeCount, small_num, mid_num, large_num, N1, N2, classified_head, classified_adj ); H_ERR(cudaDeviceSynchronize() ); warp_merge_kernel<<<max_block,max_thd>>> ( classified_head, classified_adj, dev_adj, // dev_degree, dev_begin, 0, N1, dev_count ); H_ERR(cudaDeviceSynchronize() ); block_merge_kernel<<<max_block,max_thd>>> ( classified_head, classified_adj, //dev_head, //dev_adj, dev_adj, // dev_degree, dev_begin, N1, N1+N2, // 0 + GPU_id*256, // totalEdgeCount, dev_count ); H_ERR(cudaDeviceSynchronize() ); reduce_kernel2 <<<1,1>>>(dev_count); H_ERR(cudaDeviceSynchronize() ); H_ERR(cudaMemcpy(&count[i], dev_count, sizeof(index_t), cudaMemcpyDeviceToHost)); thd_count += count[i]; H_ERR(cudaFree(small_num) ); H_ERR(cudaFree(large_num) ); H_ERR(cudaFree(classified_head) ); H_ERR(cudaFree(classified_adj) ); H_ERR(cudaFree(src_head) ); H_ERR(cudaFree(src_adj) ); // H_ERR(cudaFree(src_begin) ); // cout<<"GPU "<<i<<" part "<<j<<"\n"; } double time4 = wtime(); count[i] = thd_count; // cout<<"gpu "<<i<<" binary count="<<count[i]<<"\n"; // cout<<"time = "<<time4-time2<<" seconds"<<endl; H_ERR(cudaFree(dev_adj) ); // H_ERR(cudaFree(dev_head) ); // H_ERR(cudaFree(dev_degree) ); H_ERR(cudaFree(dev_begin) ); H_ERR(cudaFree(block_offset) ); H_ERR(cudaFree(dev_count) ); return NULL; }
d667e503ef73cebaf10da0a018fef24be1941a1c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "kmeansKernel.h" /////////////////////////////// // Atomic function overloads // /////////////////////////////// /* device helper function provides a double precision implementation of atomicMax using atomicCAS */ __device__ void atomicMax(double *const address, const double value) { if (*address >= value) return; unsigned long long int * const address_as_i = (unsigned long long int *)address; unsigned long long int old = * address_as_i, assumed; do { assumed = old; if(__longlong_as_double(assumed) >= value) break; old = atomicCAS(address_as_i, assumed, __double_as_longlong(value)); }while(assumed != old); } /* device helper function provides a single precision implementation of atomicMax using atomicCAS */ __device__ void atomicMax(float *const address, const float value) { if(*address >= value) return; int * const address_as_i = (int *)address; int old = * address_as_i, assumed; do { assumed = old; if(__int_as_float(assumed) >= value) break; old = atomicCAS(address_as_i, assumed, __float_as_int(value)); } while(assumed != old); } ////////////////////////////// // Point Assignment Kernels // ////////////////////////////// /* Global kernel that assigns one thread to one point Given points are each assigned a centroid and upper and lower bounds */ __global__ void initRunKernel(PointInfo *pointInfo, CentInfo *centInfo, DTYPE *pointData, DTYPE *pointLwrs, DTYPE *centData, const int numPnt, const int numCent, const int numGrp, const int numDim) { unsigned int tid=threadIdx.x+(blockIdx.x*BLOCKSIZE); if(tid >= numPnt) return; unsigned int centIndex; DTYPE currDistance; pointInfo[tid].uprBound = INFINITY; for(centIndex = 0; centIndex < numCent; centIndex++) { // calculate euclidean distance between point and centroid currDistance = calcDis(&pointData[tid * numDim], &centData[centIndex * numDim], numDim); if(currDistance < pointInfo[tid].uprBound) { // make the former current min the new // lower bound for it's group if(pointInfo[tid].uprBound != INFINITY) pointLwrs[(tid * numGrp) + centInfo[pointInfo[tid].centroidIndex].groupNum] = pointInfo[tid].uprBound; // update assignment and upper bound pointInfo[tid].centroidIndex = centIndex; pointInfo[tid].uprBound = currDistance; } else if(currDistance < pointLwrs[(tid * numGrp) + centInfo[centIndex].groupNum]) { pointLwrs[(tid * numGrp) + centInfo[centIndex].groupNum] = currDistance; } } } __global__ void initRunKernelLloyd(PointInfo *pointInfo, CentInfo *centInfo, DTYPE *pointData, DTYPE *centData, const int numPnt, const int numCent, const int numDim) { unsigned int tid=threadIdx.x+(blockIdx.x*BLOCKSIZE); if(tid >= numPnt) return; unsigned int centIndex; DTYPE currDistance; pointInfo[tid].uprBound = INFINITY; for(centIndex = 0; centIndex < numCent; centIndex++) { // calculate euclidean distance between point and centroid currDistance = calcDis(&pointData[tid * numDim], &centData[centIndex * numDim], numDim); if(currDistance < pointInfo[tid].uprBound) { // update assignment and upper bound pointInfo[tid].centroidIndex = centIndex; pointInfo[tid].uprBound = currDistance; } } } // Lloyds point assignment step __global__ void assignPointsLloyd(PointInfo *pointInfo, CentInfo *centInfo, DTYPE *pointData, DTYPE *centData, const int numPnt, const int numCent, const int numDim) { unsigned int tid=threadIdx.x+(blockIdx.x*BLOCKSIZE); if(tid >= numPnt) return; DTYPE currMin = INFINITY; DTYPE currDis; unsigned int index; // reassign point's former centroid before finding new centroid pointInfo[tid].oldCentroid = pointInfo[tid].centroidIndex; for(index = 0; index < numCent; index++) { currDis = calcDis(&pointData[tid * numDim], &centData[index * numDim], numDim); if(currDis < currMin) { pointInfo[tid].centroidIndex = index; currMin = currDis; } } } /* Full Yinyang algorithm point assignment step Includes global, group, and local filters */ __global__ void assignPointsFull(PointInfo *pointInfo, CentInfo *centInfo, DTYPE *pointData, DTYPE *pointLwrs, DTYPE *centData, DTYPE *maxDriftArr, const int numPnt, const int numCent, const int numGrp, const int numDim) { unsigned int tid=threadIdx.x+(blockIdx.x*BLOCKSIZE); if(tid >= numPnt) return; DTYPE tmpGlobLwr = INFINITY; int btid = threadIdx.x; unsigned int index; extern __shared__ unsigned int groupLclArr[]; // reassign point's former centroid before finding new centroid pointInfo[tid].oldCentroid = pointInfo[tid].centroidIndex; // update points upper bound ub = ub + drift(b(x)) pointInfo[tid].uprBound += centInfo[pointInfo[tid].centroidIndex].drift; // update group lower bounds // for all in lower bound array for(index = 0; index < numGrp; index++) { // subtract lowerbound by group's drift pointLwrs[(tid * numGrp) + index] -= maxDriftArr[index]; // if the lowerbound is less than the temp global lower, if(pointLwrs[(tid * numGrp) + index] < tmpGlobLwr) { // lower bound is new temp global lower tmpGlobLwr = pointLwrs[(tid * numGrp) + index]; } } // if the global lower bound is less than the upper bound if(tmpGlobLwr < pointInfo[tid].uprBound) { // tighten upper bound ub = d(x, b(x)) pointInfo[tid].uprBound = calcDis(&pointData[tid * numDim], &centData[pointInfo[tid].centroidIndex * numDim], numDim); // if the lower bound is less than the upper bound if(tmpGlobLwr < pointInfo[tid].uprBound) { // loop through groups for(index = 0; index < numGrp; index++) { // if the lower bound is less than the upper bound // mark the group to go through the group filter if(pointLwrs[(tid * numGrp) + index] < pointInfo[tid].uprBound) groupLclArr[index + (btid * numGrp)] = 1; else groupLclArr[index + (btid * numGrp)] = 0; } // execute point calcs given the groups pointCalcsFull(&pointInfo[tid], centInfo, &pointData[tid * numDim], &pointLwrs[tid * numGrp], centData, maxDriftArr, &groupLclArr[btid * numGrp], numPnt, numCent, numGrp, numDim); } } } /* Simplified Yinyang algorithm point assignment step Includes global and group filters */ __global__ void assignPointsSimple(PointInfo *pointInfo, CentInfo *centInfo, DTYPE *pointData, DTYPE *pointLwrs, DTYPE *centData, DTYPE *maxDriftArr, const int numPnt, const int numCent, const int numGrp, const int numDim) { unsigned int tid=threadIdx.x+(blockIdx.x*BLOCKSIZE); if(tid >= numPnt) return; DTYPE tmpGlobLwr = INFINITY; unsigned int btid = threadIdx.x; unsigned int index; pointInfo[tid].oldCentroid = pointInfo[tid].centroidIndex; extern __shared__ unsigned int groupLclArr[]; // update points upper bound pointInfo[tid].uprBound += centInfo[pointInfo[tid].centroidIndex].drift; // update group lower bounds // for all in lower bound array for(index = 0; index < numGrp; index++) { // subtract lowerbound by group's drift pointLwrs[(tid * numGrp) + index] -= maxDriftArr[index]; // if the lowerbound is less than the temp global lower, if(pointLwrs[(tid * numGrp) + index] < tmpGlobLwr) { // lower bound is new temp global lower tmpGlobLwr = pointLwrs[(tid * numGrp) + index]; } } // if the global lower bound is less than the upper bound if(tmpGlobLwr < pointInfo[tid].uprBound) { // tighten upper bound ub = d(x, b(x)) pointInfo[tid].uprBound = calcDis(&pointData[tid * numDim], &centData[pointInfo[tid].centroidIndex * numDim], numDim); // if the lower bound is less than the upper bound if(tmpGlobLwr < pointInfo[tid].uprBound) { // loop through groups for(index = 0; index < numGrp; index++) { // if the lower bound is less than the upper bound // mark the group to go through the group filter if(pointLwrs[(tid * numGrp) + index] < pointInfo[tid].uprBound) groupLclArr[index + (btid * numGrp)] = 1; else groupLclArr[index + (btid * numGrp)] = 0; } // execute point calcs given the groups pointCalcsSimple(&pointInfo[tid],centInfo,&pointData[tid * numDim], &pointLwrs[tid * numGrp], centData, maxDriftArr, &groupLclArr[btid * numGrp], numPnt, numCent, numGrp, numDim); } } } /* Super Simplified Yinyang algorithm point assignment step Includes only the global filter */ __global__ void assignPointsSuper(PointInfo *pointInfo, CentInfo *centInfo, DTYPE *pointData, DTYPE *pointLwrs, DTYPE *centData, DTYPE *maxDrift, const int numPnt, const int numCent, const int numGrp, const int numDim) { unsigned int tid=threadIdx.x+(blockIdx.x*BLOCKSIZE); if(tid >= numPnt) return; // point calc variables int centIndex; DTYPE compDistance; // set centroid's old centroid to be current assignment pointInfo[tid].oldCentroid = pointInfo[tid].centroidIndex; // update bounds pointInfo[tid].uprBound += centInfo[pointInfo[tid].centroidIndex].drift; pointLwrs[tid * numGrp] -= *maxDrift; if(pointLwrs[tid * numGrp] < pointInfo[tid].uprBound) { // tighten upper bound pointInfo[tid].uprBound = calcDis(&pointData[tid * numDim], &centData[pointInfo[tid].centroidIndex * numDim],numDim); if(pointLwrs[(tid * numGrp)] < pointInfo[tid].uprBound) { // to get a new lower bound pointLwrs[tid * numGrp] = INFINITY; for(centIndex = 0; centIndex < numCent; centIndex++) { // do not calculate for the already assigned cluster if(centIndex == pointInfo[tid].oldCentroid) continue; compDistance = calcDis(&pointData[tid * numDim], &centData[centIndex * numDim], numDim); if(compDistance < pointInfo[tid].uprBound) { pointLwrs[tid * numGrp] = pointInfo[tid].uprBound; pointInfo[tid].centroidIndex = centIndex; pointInfo[tid].uprBound = compDistance; } else if(compDistance < pointLwrs[tid * numGrp]) { pointLwrs[tid * numGrp] = compDistance; } } } } } //////////////////////////////////////// // Point Calculation Device Functions // //////////////////////////////////////// __device__ void pointCalcsFull(PointInfo *pointInfoPtr, CentInfo *centInfo, DTYPE *pointDataPtr, DTYPE *pointLwrPtr, DTYPE *centData, DTYPE *maxDriftArr, unsigned int *groupArr, const int numPnt, const int numCent, const int numGrp, const int numDim) { unsigned int grpIndex, centIndex; DTYPE compDistance; DTYPE oldLwr = INFINITY; DTYPE oldCentUpr = pointInfoPtr->uprBound; DTYPE oldCentLwr = pointLwrPtr[centInfo[pointInfoPtr->oldCentroid].groupNum]; // loop through all the groups for(grpIndex = 0; grpIndex < numGrp; grpIndex++) { // if the group is marked as going through the group filter if(groupArr[grpIndex]) { // save the former lower bound pre-update if(grpIndex == centInfo[pointInfoPtr->oldCentroid].groupNum) oldLwr = oldCentLwr + maxDriftArr[grpIndex]; else oldLwr = pointLwrPtr[grpIndex] + maxDriftArr[grpIndex]; // reset the group's lower bound in order to find the new lower bound pointLwrPtr[grpIndex] = INFINITY; if(grpIndex == centInfo[pointInfoPtr->oldCentroid].groupNum && pointInfoPtr->oldCentroid != pointInfoPtr->centroidIndex) pointLwrPtr[centInfo[pointInfoPtr->oldCentroid].groupNum] = oldCentUpr; // loop through all the group's centroids for(centIndex = 0; centIndex < numCent; centIndex++) { // if the cluster is the cluster already assigned // at the start of this iteration if(centIndex == pointInfoPtr->oldCentroid) continue; // if the cluster is a part of the group being checked now if(grpIndex == centInfo[centIndex].groupNum) { // local filtering condition if(pointLwrPtr[grpIndex] < oldLwr - centInfo[centIndex].drift) continue; // perform distance calculation compDistance = calcDis(pointDataPtr, &centData[centIndex * numDim], numDim); if(compDistance < pointInfoPtr->uprBound) { pointLwrPtr[centInfo[pointInfoPtr->centroidIndex].groupNum] = pointInfoPtr->uprBound; pointInfoPtr->centroidIndex = centIndex; pointInfoPtr->uprBound = compDistance; } else if(compDistance < pointLwrPtr[grpIndex]) { pointLwrPtr[grpIndex] = compDistance; } } } } } } __device__ void pointCalcsSimple(PointInfo *pointInfoPtr, CentInfo *centInfo, DTYPE *pointDataPtr, DTYPE *pointLwrPtr, DTYPE *centData, DTYPE *maxDriftArr, unsigned int *groupArr, const int numPnt, const int numCent, const int numGrp, const int numDim) { unsigned int index; DTYPE compDistance; for(index = 0; index < numGrp; index++) { if(groupArr[index]) { pointLwrPtr[index] = INFINITY; } } for(index = 0; index < numCent; index++) { if(groupArr[centInfo[index].groupNum]) { if(index == pointInfoPtr->oldCentroid) continue; compDistance = calcDis(pointDataPtr, &centData[index * numDim], numDim); if(compDistance < pointInfoPtr->uprBound) { pointLwrPtr[centInfo[pointInfoPtr->centroidIndex].groupNum] = pointInfoPtr->uprBound; pointInfoPtr->centroidIndex = index; pointInfoPtr->uprBound = compDistance; } else if(compDistance < pointLwrPtr[centInfo[index].groupNum]) { pointLwrPtr[centInfo[index].groupNum] = compDistance; } } } } ////////////////////////////////// // Centroid Calculation kernels // ////////////////////////////////// __global__ void calcCentData(PointInfo *pointInfo, CentInfo *centInfo, DTYPE *pointData, DTYPE *oldSums, DTYPE *newSums, unsigned int *oldCounts, unsigned int *newCounts, const int numPnt, const int numDim) { unsigned int tid=threadIdx.x+(blockIdx.x*BLOCKSIZE); if(tid >= numPnt) return; unsigned int dimIndex; // atomicAdd 1 to old and new counts corresponding if(pointInfo[tid].oldCentroid >= 0) atomicAdd(&oldCounts[pointInfo[tid].oldCentroid], 1); atomicAdd(&newCounts[pointInfo[tid].centroidIndex], 1); // if old assignment and new assignment are not equal if(pointInfo[tid].oldCentroid != pointInfo[tid].centroidIndex) { // for all values in the vector for(dimIndex = 0; dimIndex < numDim; dimIndex++) { // atomic add the point's vector to the sum count if(pointInfo[tid].oldCentroid >= 0) { atomicAdd(&oldSums[(pointInfo[tid].oldCentroid * numDim) + dimIndex], pointData[(tid * numDim) + dimIndex]); } atomicAdd(&newSums[(pointInfo[tid].centroidIndex * numDim) + dimIndex], pointData[(tid * numDim) + dimIndex]); } } } __global__ void calcNewCentroids(PointInfo *pointInfo, CentInfo *centInfo, DTYPE *centData, DTYPE *oldCentData, DTYPE *oldSums, DTYPE *newSums, DTYPE *maxDriftArr, unsigned int *oldCounts, unsigned int *newCounts, const int numCent, const int numDim) { unsigned int tid=threadIdx.x+(blockIdx.x*BLOCKSIZE); if(tid >= numCent) return; DTYPE oldFeature, oldSumFeat, newSumFeat, compDrift; unsigned int dimIndex; // create the new centroid vector for(dimIndex = 0; dimIndex < numDim; dimIndex++) { if(newCounts[tid] > 0) { oldCentData[(tid * numDim) + dimIndex] = centData[(tid * numDim) + dimIndex]; oldFeature = centData[(tid * numDim) + dimIndex]; oldSumFeat = oldSums[(tid * numDim) + dimIndex]; newSumFeat = newSums[(tid * numDim) + dimIndex]; centData[(tid * numDim) + dimIndex] = (oldFeature * oldCounts[tid] - oldSumFeat + newSumFeat)/newCounts[tid]; } else { // no change to centroid oldCentData[(tid * numDim) + dimIndex] = centData[(tid * numDim) + dimIndex]; } newSums[(tid * numDim) + dimIndex] = 0.0; oldSums[(tid * numDim) + dimIndex] = 0.0; } // calculate the centroid's drift compDrift = calcDis(&oldCentData[tid * numDim], &centData[tid * numDim], numDim); atomicMax(&maxDriftArr[centInfo[tid].groupNum], compDrift); // set the centroid's vector to the new vector centInfo[tid].drift = compDrift; centInfo[tid].count = newCounts[tid]; // clear the count and the sum arrays oldCounts[tid] = 0; newCounts[tid] = 0; } __global__ void calcCentDataLloyd(PointInfo *pointInfo, DTYPE *pointData, DTYPE *newSums, unsigned int *newCounts, const int numPnt, const int numDim) { unsigned int tid=threadIdx.x+(blockIdx.x*BLOCKSIZE); if(tid >= numPnt) return; unsigned int dimIndex; // atomicAdd 1 to new counts corresponding atomicAdd(&newCounts[pointInfo[tid].centroidIndex], 1); // for all values in the vector for(dimIndex = 0; dimIndex < numDim; dimIndex++) { atomicAdd(&newSums[(pointInfo[tid].centroidIndex * numDim) + dimIndex], pointData[(tid * numDim) + dimIndex]); } } __global__ void calcNewCentroidsLloyd(PointInfo *pointInfo, CentInfo *centInfo, DTYPE *centData, DTYPE *newSums, unsigned int *newCounts, const int numCent, const int numDim) { unsigned int tid=threadIdx.x+(blockIdx.x*BLOCKSIZE); if(tid >= numCent) return; unsigned int dimIndex; for(dimIndex = 0; dimIndex < numDim; dimIndex++) { if(newCounts[tid] > 0) { centData[(tid * numDim) + dimIndex] = newSums[(tid * numDim) + dimIndex] / newCounts[tid]; } // otherwise, no change newSums[(tid * numDim) + dimIndex] = 0.0; } centInfo[tid].count = newCounts[tid]; newCounts[tid] = 0; } /* this kernel is used to test performance differences between the yinyang centroid update and the standard centroid update */ __global__ void calcNewCentroidsAve(PointInfo *pointInfo, CentInfo *centInfo, DTYPE *centData, DTYPE *newSums, DTYPE *maxDriftArr, unsigned int *newCounts, const int numCent, const int numDim) { unsigned int tid=threadIdx.x+(blockIdx.x*BLOCKSIZE); unsigned int btid = threadIdx.x; if(tid >= numCent) return; unsigned int dimIndex; DTYPE compDrift; extern __shared__ DTYPE oldCentPos[]; for(dimIndex = 0; dimIndex < numDim; dimIndex++) { if(newCounts[tid] > 0) { oldCentPos[(btid * numDim) + dimIndex] = centData[(tid * numDim) + dimIndex]; centData[(tid * numDim) + dimIndex] = newSums[(tid * numDim) + dimIndex] / newCounts[tid]; newSums[(tid * numDim) + dimIndex] = 0.0; } else { oldCentPos[(btid * numDim) + dimIndex] = centData[(tid * numDim) + dimIndex]; newSums[(tid * numDim) + dimIndex] = 0.0; } } // compute drift compDrift = calcDis(&oldCentPos[btid * numDim], &centData[tid * numDim], numDim); centInfo[tid].drift = compDrift; atomicMax(&maxDriftArr[centInfo[tid].groupNum], compDrift); newCounts[tid] = 0; } //////////////////// // Helper Kernels // //////////////////// // warms up gpu for time trialing __global__ void warmup(unsigned int * tmp) { if(threadIdx.x == 0) { *tmp = 555; } return; } // checks convergence of data on GPU __global__ void checkConverge(PointInfo *pointInfo, unsigned int *conFlag, const int numPnt) { unsigned int tid=threadIdx.x+(blockIdx.x*BLOCKSIZE); if(tid >= numPnt) return; if(pointInfo[tid].oldCentroid != pointInfo[tid].centroidIndex) atomicCAS(conFlag, 0, 1); } /* simple helper kernel that clears the drift array of size T on the GPU. Called once each iteration for a total of MAXITER times */ __global__ void clearDriftArr(DTYPE *maxDriftArr, const int numGrp) { unsigned int tid=threadIdx.x+(blockIdx.x*BLOCKSIZE); if(tid >= numGrp) return; maxDriftArr[tid] = 0.0; } __global__ void clearCentCalcData(DTYPE *newCentSum, DTYPE *oldCentSum, unsigned int *newCentCount, unsigned int *oldCentCount, const int numCent, const int numDim) { unsigned int tid=threadIdx.x+(blockIdx.x*BLOCKSIZE); if(tid >= numCent) return; unsigned int dimIndex; for(dimIndex = 0; dimIndex < numDim; dimIndex++) { newCentSum[(tid * numDim) + dimIndex] = 0.0; oldCentSum[(tid * numDim) + dimIndex] = 0.0; } newCentCount[tid] = 0; oldCentCount[tid] = 0; } __global__ void clearCentCalcDataLloyd(DTYPE *newCentSum, unsigned int *newCentCount, const int numCent, const int numDim) { unsigned int tid=threadIdx.x+(blockIdx.x*BLOCKSIZE); if(tid >= numCent) return; unsigned int dimIndex; for(dimIndex = 0; dimIndex < numDim; dimIndex++) { newCentSum[(tid * numDim) + dimIndex] = 0.0; } newCentCount[tid] = 0; } ///////////////////////////// // device Helper Functions // ///////////////////////////// /* Simple device helper function that takes in two vectors and returns the euclidean distance between them at DTYPE precision */ __device__ DTYPE calcDis(DTYPE *vec1, DTYPE *vec2, const int numDim) { int index; DTYPE total = 0; DTYPE square; for(index = 0; index < numDim; index++) { square = (vec1[index] - vec2[index]); total += square * square; } return sqrt(total); } ///////////////////////////////////////////////////////////////////////// // Overloaded kernels and functions for counting distance calculations // ///////////////////////////////////////////////////////////////////////// /* Global kernel that assigns one thread to one point Given points are each assigned a centroid and upper and lower bounds */ __global__ void initRunKernel(PointInfo *pointInfo, CentInfo *centInfo, DTYPE *pointData, DTYPE *pointLwrs, DTYPE *centData, const int numPnt, const int numCent, const int numGrp, const int numDim, unsigned long long int *calcCount) { unsigned int tid=threadIdx.x+(blockIdx.x*BLOCKSIZE); if(tid >= numPnt) return; unsigned int centIndex; DTYPE currDistance; pointInfo[tid].uprBound = INFINITY; for(centIndex = 0; centIndex < numCent; centIndex++) { // calculate euclidean distance between point and centroid currDistance = calcDis(&pointData[tid * numDim], &centData[centIndex * numDim], numDim); atomicAdd(calcCount, 1); if(currDistance < pointInfo[tid].uprBound) { // make the former current min the new // lower bound for it's group if(pointInfo[tid].uprBound != INFINITY) pointLwrs[(tid * numGrp) + centInfo[pointInfo[tid].centroidIndex].groupNum] = pointInfo[tid].uprBound; // update assignment and upper bound pointInfo[tid].centroidIndex = centIndex; pointInfo[tid].uprBound = currDistance; } else if(currDistance < pointLwrs[(tid * numGrp) + centInfo[centIndex].groupNum]) { pointLwrs[(tid * numGrp) + centInfo[centIndex].groupNum] = currDistance; } } } /* Full Yinyang algorithm point assignment step Includes global, group, and local filters */ __global__ void assignPointsFull(PointInfo *pointInfo, CentInfo *centInfo, DTYPE *pointData, DTYPE *pointLwrs, DTYPE *centData, DTYPE *maxDriftArr, const int numPnt, const int numCent, const int numGrp, const int numDim, unsigned long long int *calcCount) { unsigned int tid=threadIdx.x+(blockIdx.x*BLOCKSIZE); if(tid >= numPnt) return; DTYPE tmpGlobLwr = INFINITY; int btid = threadIdx.x; unsigned int index; extern __shared__ unsigned int groupLclArr[]; // reassign point's former centroid before finding new centroid pointInfo[tid].oldCentroid = pointInfo[tid].centroidIndex; // update points upper bound ub = ub + drift(b(x)) pointInfo[tid].uprBound += centInfo[pointInfo[tid].centroidIndex].drift; // update group lower bounds // for all in lower bound array for(index = 0; index < numGrp; index++) { // subtract lowerbound by group's drift pointLwrs[(tid * numGrp) + index] -= maxDriftArr[index]; // if the lowerbound is less than the temp global lower, if(pointLwrs[(tid * numGrp) + index] < tmpGlobLwr) { // lower bound is new temp global lower tmpGlobLwr = pointLwrs[(tid * numGrp) + index]; } } // if the global lower bound is less than the upper bound if(tmpGlobLwr < pointInfo[tid].uprBound) { // tighten upper bound ub = d(x, b(x)) pointInfo[tid].uprBound = calcDis(&pointData[tid * numDim], &centData[pointInfo[tid].centroidIndex * numDim], numDim); atomicAdd(calcCount, 1); // if the lower bound is less than the upper bound if(tmpGlobLwr < pointInfo[tid].uprBound) { // loop through groups for(index = 0; index < numGrp; index++) { // if the lower bound is less than the upper bound // mark the group to go through the group filter if(pointLwrs[(tid * numGrp) + index] < pointInfo[tid].uprBound) groupLclArr[index + (btid * numGrp)] = 1; else groupLclArr[index + (btid * numGrp)] = 0; } // execute point calcs given the groups pointCalcsFull(&pointInfo[tid], centInfo, &pointData[tid * numDim], &pointLwrs[tid * numGrp], centData, maxDriftArr, &groupLclArr[btid * numGrp], numPnt, numCent, numGrp, numDim, calcCount); } } } /* Simplified Yinyang algorithm point assignment step Includes global and group filters */ __global__ void assignPointsSimple(PointInfo *pointInfo, CentInfo *centInfo, DTYPE *pointData, DTYPE *pointLwrs, DTYPE *centData, DTYPE *maxDriftArr, const int numPnt, const int numCent, const int numGrp, const int numDim, unsigned long long int *calcCount) { unsigned int tid=threadIdx.x+(blockIdx.x*BLOCKSIZE); if(tid >= numPnt) return; DTYPE tmpGlobLwr = INFINITY; unsigned int btid = threadIdx.x; unsigned int index; pointInfo[tid].oldCentroid = pointInfo[tid].centroidIndex; extern __shared__ unsigned int groupLclArr[]; // update points upper bound pointInfo[tid].uprBound += centInfo[pointInfo[tid].centroidIndex].drift; // update group lower bounds // for all in lower bound array for(index = 0; index < numGrp; index++) { // subtract lowerbound by group's drift pointLwrs[(tid * numGrp) + index] -= maxDriftArr[index]; // if the lowerbound is less than the temp global lower, if(pointLwrs[(tid * numGrp) + index] < tmpGlobLwr) { // lower bound is new temp global lower tmpGlobLwr = pointLwrs[(tid * numGrp) + index]; } } // if the global lower bound is less than the upper bound if(tmpGlobLwr < pointInfo[tid].uprBound) { // tighten upper bound ub = d(x, b(x)) pointInfo[tid].uprBound = calcDis(&pointData[tid * numDim], &centData[pointInfo[tid].centroidIndex * numDim], numDim); atomicAdd(calcCount, 1); // if the lower bound is less than the upper bound if(tmpGlobLwr < pointInfo[tid].uprBound) { // loop through groups for(index = 0; index < numGrp; index++) { // if the lower bound is less than the upper bound // mark the group to go through the group filter if(pointLwrs[(tid * numGrp) + index] < pointInfo[tid].uprBound) groupLclArr[index + (btid * numGrp)] = 1; else groupLclArr[index + (btid * numGrp)] = 0; } // execute point calcs given the groups pointCalcsSimple(&pointInfo[tid],centInfo,&pointData[tid * numDim], &pointLwrs[tid * numGrp], centData, maxDriftArr, &groupLclArr[btid * numGrp], numPnt, numCent, numGrp, numDim, calcCount); } } } /* Super Simplified Yinyang algorithm point assignment step Includes only the global filter */ __global__ void assignPointsSuper(PointInfo *pointInfo, CentInfo *centInfo, DTYPE *pointData, DTYPE *pointLwrs, DTYPE *centData, DTYPE *maxDrift, const int numPnt, const int numCent, const int numGrp, const int numDim, unsigned long long int *calcCount) { unsigned int tid=threadIdx.x+(blockIdx.x*BLOCKSIZE); if(tid >= numPnt) return; // point calc variables int centIndex; DTYPE compDistance; // set centroid's old centroid to be current assignment pointInfo[tid].oldCentroid = pointInfo[tid].centroidIndex; // update bounds pointInfo[tid].uprBound += centInfo[pointInfo[tid].centroidIndex].drift; pointLwrs[tid * numGrp] -= *maxDrift; if(pointLwrs[tid * numGrp] < pointInfo[tid].uprBound) { // tighten upper bound pointInfo[tid].uprBound = calcDis(&pointData[tid * numDim], &centData[pointInfo[tid].centroidIndex * numDim],numDim); atomicAdd(calcCount, 1); if(pointLwrs[(tid * numGrp)] < pointInfo[tid].uprBound) { // to get a new lower bound pointLwrs[tid * numGrp] = INFINITY; for(centIndex = 0; centIndex < numCent; centIndex++) { // do not calculate for the already assigned cluster if(centIndex == pointInfo[tid].oldCentroid) continue; compDistance = calcDis(&pointData[tid * numDim], &centData[centIndex * numDim], numDim); atomicAdd(calcCount, 1); if(compDistance < pointInfo[tid].uprBound) { pointLwrs[tid * numGrp] = pointInfo[tid].uprBound; pointInfo[tid].centroidIndex = centIndex; pointInfo[tid].uprBound = compDistance; } else if(compDistance < pointLwrs[tid * numGrp]) { pointLwrs[tid * numGrp] = compDistance; } } } } } //////////////////////////////////////// // Point Calculation Device Functions // //////////////////////////////////////// __device__ void pointCalcsFull(PointInfo *pointInfoPtr, CentInfo *centInfo, DTYPE *pointDataPtr, DTYPE *pointLwrPtr, DTYPE *centData, DTYPE *maxDriftArr, unsigned int *groupArr, const int numPnt, const int numCent, const int numGrp, const int numDim, unsigned long long int *calcCount) { unsigned int grpIndex, centIndex; DTYPE compDistance; DTYPE oldLwr = INFINITY; DTYPE oldCentUpr = pointInfoPtr->uprBound; DTYPE oldCentLwr = pointLwrPtr[centInfo[pointInfoPtr->oldCentroid].groupNum]; // loop through all the groups for(grpIndex = 0; grpIndex < numGrp; grpIndex++) { // if the group is marked as going through the group filter if(groupArr[grpIndex]) { // save the former lower bound pre-update if(grpIndex == centInfo[pointInfoPtr->oldCentroid].groupNum) oldLwr = oldCentLwr + maxDriftArr[grpIndex]; else oldLwr = pointLwrPtr[grpIndex] + maxDriftArr[grpIndex]; // reset the group's lower bound in order to find the new lower bound pointLwrPtr[grpIndex] = INFINITY; if(grpIndex == centInfo[pointInfoPtr->oldCentroid].groupNum && pointInfoPtr->oldCentroid != pointInfoPtr->centroidIndex) pointLwrPtr[centInfo[pointInfoPtr->oldCentroid].groupNum] = oldCentUpr; // loop through all the group's centroids for(centIndex = 0; centIndex < numCent; centIndex++) { // if the cluster is the cluster already assigned // at the start of this iteration if(centIndex == pointInfoPtr->oldCentroid) continue; // if the cluster is a part of the group being checked now if(grpIndex == centInfo[centIndex].groupNum) { // local filtering condition if(pointLwrPtr[grpIndex] < oldLwr - centInfo[centIndex].drift) continue; // perform distance calculation compDistance = calcDis(pointDataPtr, &centData[centIndex * numDim], numDim); atomicAdd(calcCount, 1); if(compDistance < pointInfoPtr->uprBound) { pointLwrPtr[centInfo[pointInfoPtr->centroidIndex].groupNum] = pointInfoPtr->uprBound; pointInfoPtr->centroidIndex = centIndex; pointInfoPtr->uprBound = compDistance; } else if(compDistance < pointLwrPtr[grpIndex]) { pointLwrPtr[grpIndex] = compDistance; } } } } } } __device__ void pointCalcsSimple(PointInfo *pointInfoPtr, CentInfo *centInfo, DTYPE *pointDataPtr, DTYPE *pointLwrPtr, DTYPE *centData, DTYPE *maxDriftArr, unsigned int *groupArr, const int numPnt, const int numCent, const int numGrp, const int numDim, unsigned long long int *calcCount) { unsigned int index; DTYPE compDistance; for(index = 0; index < numGrp; index++) { if(groupArr[index]) { pointLwrPtr[index] = INFINITY; } } for(index = 0; index < numCent; index++) { if(groupArr[centInfo[index].groupNum]) { if(index == pointInfoPtr->oldCentroid) continue; compDistance = calcDis(pointDataPtr, &centData[index * numDim], numDim); atomicAdd(calcCount, 1); if(compDistance < pointInfoPtr->uprBound) { pointLwrPtr[centInfo[pointInfoPtr->centroidIndex].groupNum] = pointInfoPtr->uprBound; pointInfoPtr->centroidIndex = index; pointInfoPtr->uprBound = compDistance; } else if(compDistance < pointLwrPtr[centInfo[index].groupNum]) { pointLwrPtr[centInfo[index].groupNum] = compDistance; } } } }
d667e503ef73cebaf10da0a018fef24be1941a1c.cu
#include "kmeansKernel.h" /////////////////////////////// // Atomic function overloads // /////////////////////////////// /* device helper function provides a double precision implementation of atomicMax using atomicCAS */ __device__ void atomicMax(double *const address, const double value) { if (*address >= value) return; unsigned long long int * const address_as_i = (unsigned long long int *)address; unsigned long long int old = * address_as_i, assumed; do { assumed = old; if(__longlong_as_double(assumed) >= value) break; old = atomicCAS(address_as_i, assumed, __double_as_longlong(value)); }while(assumed != old); } /* device helper function provides a single precision implementation of atomicMax using atomicCAS */ __device__ void atomicMax(float *const address, const float value) { if(*address >= value) return; int * const address_as_i = (int *)address; int old = * address_as_i, assumed; do { assumed = old; if(__int_as_float(assumed) >= value) break; old = atomicCAS(address_as_i, assumed, __float_as_int(value)); } while(assumed != old); } ////////////////////////////// // Point Assignment Kernels // ////////////////////////////// /* Global kernel that assigns one thread to one point Given points are each assigned a centroid and upper and lower bounds */ __global__ void initRunKernel(PointInfo *pointInfo, CentInfo *centInfo, DTYPE *pointData, DTYPE *pointLwrs, DTYPE *centData, const int numPnt, const int numCent, const int numGrp, const int numDim) { unsigned int tid=threadIdx.x+(blockIdx.x*BLOCKSIZE); if(tid >= numPnt) return; unsigned int centIndex; DTYPE currDistance; pointInfo[tid].uprBound = INFINITY; for(centIndex = 0; centIndex < numCent; centIndex++) { // calculate euclidean distance between point and centroid currDistance = calcDis(&pointData[tid * numDim], &centData[centIndex * numDim], numDim); if(currDistance < pointInfo[tid].uprBound) { // make the former current min the new // lower bound for it's group if(pointInfo[tid].uprBound != INFINITY) pointLwrs[(tid * numGrp) + centInfo[pointInfo[tid].centroidIndex].groupNum] = pointInfo[tid].uprBound; // update assignment and upper bound pointInfo[tid].centroidIndex = centIndex; pointInfo[tid].uprBound = currDistance; } else if(currDistance < pointLwrs[(tid * numGrp) + centInfo[centIndex].groupNum]) { pointLwrs[(tid * numGrp) + centInfo[centIndex].groupNum] = currDistance; } } } __global__ void initRunKernelLloyd(PointInfo *pointInfo, CentInfo *centInfo, DTYPE *pointData, DTYPE *centData, const int numPnt, const int numCent, const int numDim) { unsigned int tid=threadIdx.x+(blockIdx.x*BLOCKSIZE); if(tid >= numPnt) return; unsigned int centIndex; DTYPE currDistance; pointInfo[tid].uprBound = INFINITY; for(centIndex = 0; centIndex < numCent; centIndex++) { // calculate euclidean distance between point and centroid currDistance = calcDis(&pointData[tid * numDim], &centData[centIndex * numDim], numDim); if(currDistance < pointInfo[tid].uprBound) { // update assignment and upper bound pointInfo[tid].centroidIndex = centIndex; pointInfo[tid].uprBound = currDistance; } } } // Lloyds point assignment step __global__ void assignPointsLloyd(PointInfo *pointInfo, CentInfo *centInfo, DTYPE *pointData, DTYPE *centData, const int numPnt, const int numCent, const int numDim) { unsigned int tid=threadIdx.x+(blockIdx.x*BLOCKSIZE); if(tid >= numPnt) return; DTYPE currMin = INFINITY; DTYPE currDis; unsigned int index; // reassign point's former centroid before finding new centroid pointInfo[tid].oldCentroid = pointInfo[tid].centroidIndex; for(index = 0; index < numCent; index++) { currDis = calcDis(&pointData[tid * numDim], &centData[index * numDim], numDim); if(currDis < currMin) { pointInfo[tid].centroidIndex = index; currMin = currDis; } } } /* Full Yinyang algorithm point assignment step Includes global, group, and local filters */ __global__ void assignPointsFull(PointInfo *pointInfo, CentInfo *centInfo, DTYPE *pointData, DTYPE *pointLwrs, DTYPE *centData, DTYPE *maxDriftArr, const int numPnt, const int numCent, const int numGrp, const int numDim) { unsigned int tid=threadIdx.x+(blockIdx.x*BLOCKSIZE); if(tid >= numPnt) return; DTYPE tmpGlobLwr = INFINITY; int btid = threadIdx.x; unsigned int index; extern __shared__ unsigned int groupLclArr[]; // reassign point's former centroid before finding new centroid pointInfo[tid].oldCentroid = pointInfo[tid].centroidIndex; // update points upper bound ub = ub + drift(b(x)) pointInfo[tid].uprBound += centInfo[pointInfo[tid].centroidIndex].drift; // update group lower bounds // for all in lower bound array for(index = 0; index < numGrp; index++) { // subtract lowerbound by group's drift pointLwrs[(tid * numGrp) + index] -= maxDriftArr[index]; // if the lowerbound is less than the temp global lower, if(pointLwrs[(tid * numGrp) + index] < tmpGlobLwr) { // lower bound is new temp global lower tmpGlobLwr = pointLwrs[(tid * numGrp) + index]; } } // if the global lower bound is less than the upper bound if(tmpGlobLwr < pointInfo[tid].uprBound) { // tighten upper bound ub = d(x, b(x)) pointInfo[tid].uprBound = calcDis(&pointData[tid * numDim], &centData[pointInfo[tid].centroidIndex * numDim], numDim); // if the lower bound is less than the upper bound if(tmpGlobLwr < pointInfo[tid].uprBound) { // loop through groups for(index = 0; index < numGrp; index++) { // if the lower bound is less than the upper bound // mark the group to go through the group filter if(pointLwrs[(tid * numGrp) + index] < pointInfo[tid].uprBound) groupLclArr[index + (btid * numGrp)] = 1; else groupLclArr[index + (btid * numGrp)] = 0; } // execute point calcs given the groups pointCalcsFull(&pointInfo[tid], centInfo, &pointData[tid * numDim], &pointLwrs[tid * numGrp], centData, maxDriftArr, &groupLclArr[btid * numGrp], numPnt, numCent, numGrp, numDim); } } } /* Simplified Yinyang algorithm point assignment step Includes global and group filters */ __global__ void assignPointsSimple(PointInfo *pointInfo, CentInfo *centInfo, DTYPE *pointData, DTYPE *pointLwrs, DTYPE *centData, DTYPE *maxDriftArr, const int numPnt, const int numCent, const int numGrp, const int numDim) { unsigned int tid=threadIdx.x+(blockIdx.x*BLOCKSIZE); if(tid >= numPnt) return; DTYPE tmpGlobLwr = INFINITY; unsigned int btid = threadIdx.x; unsigned int index; pointInfo[tid].oldCentroid = pointInfo[tid].centroidIndex; extern __shared__ unsigned int groupLclArr[]; // update points upper bound pointInfo[tid].uprBound += centInfo[pointInfo[tid].centroidIndex].drift; // update group lower bounds // for all in lower bound array for(index = 0; index < numGrp; index++) { // subtract lowerbound by group's drift pointLwrs[(tid * numGrp) + index] -= maxDriftArr[index]; // if the lowerbound is less than the temp global lower, if(pointLwrs[(tid * numGrp) + index] < tmpGlobLwr) { // lower bound is new temp global lower tmpGlobLwr = pointLwrs[(tid * numGrp) + index]; } } // if the global lower bound is less than the upper bound if(tmpGlobLwr < pointInfo[tid].uprBound) { // tighten upper bound ub = d(x, b(x)) pointInfo[tid].uprBound = calcDis(&pointData[tid * numDim], &centData[pointInfo[tid].centroidIndex * numDim], numDim); // if the lower bound is less than the upper bound if(tmpGlobLwr < pointInfo[tid].uprBound) { // loop through groups for(index = 0; index < numGrp; index++) { // if the lower bound is less than the upper bound // mark the group to go through the group filter if(pointLwrs[(tid * numGrp) + index] < pointInfo[tid].uprBound) groupLclArr[index + (btid * numGrp)] = 1; else groupLclArr[index + (btid * numGrp)] = 0; } // execute point calcs given the groups pointCalcsSimple(&pointInfo[tid],centInfo,&pointData[tid * numDim], &pointLwrs[tid * numGrp], centData, maxDriftArr, &groupLclArr[btid * numGrp], numPnt, numCent, numGrp, numDim); } } } /* Super Simplified Yinyang algorithm point assignment step Includes only the global filter */ __global__ void assignPointsSuper(PointInfo *pointInfo, CentInfo *centInfo, DTYPE *pointData, DTYPE *pointLwrs, DTYPE *centData, DTYPE *maxDrift, const int numPnt, const int numCent, const int numGrp, const int numDim) { unsigned int tid=threadIdx.x+(blockIdx.x*BLOCKSIZE); if(tid >= numPnt) return; // point calc variables int centIndex; DTYPE compDistance; // set centroid's old centroid to be current assignment pointInfo[tid].oldCentroid = pointInfo[tid].centroidIndex; // update bounds pointInfo[tid].uprBound += centInfo[pointInfo[tid].centroidIndex].drift; pointLwrs[tid * numGrp] -= *maxDrift; if(pointLwrs[tid * numGrp] < pointInfo[tid].uprBound) { // tighten upper bound pointInfo[tid].uprBound = calcDis(&pointData[tid * numDim], &centData[pointInfo[tid].centroidIndex * numDim],numDim); if(pointLwrs[(tid * numGrp)] < pointInfo[tid].uprBound) { // to get a new lower bound pointLwrs[tid * numGrp] = INFINITY; for(centIndex = 0; centIndex < numCent; centIndex++) { // do not calculate for the already assigned cluster if(centIndex == pointInfo[tid].oldCentroid) continue; compDistance = calcDis(&pointData[tid * numDim], &centData[centIndex * numDim], numDim); if(compDistance < pointInfo[tid].uprBound) { pointLwrs[tid * numGrp] = pointInfo[tid].uprBound; pointInfo[tid].centroidIndex = centIndex; pointInfo[tid].uprBound = compDistance; } else if(compDistance < pointLwrs[tid * numGrp]) { pointLwrs[tid * numGrp] = compDistance; } } } } } //////////////////////////////////////// // Point Calculation Device Functions // //////////////////////////////////////// __device__ void pointCalcsFull(PointInfo *pointInfoPtr, CentInfo *centInfo, DTYPE *pointDataPtr, DTYPE *pointLwrPtr, DTYPE *centData, DTYPE *maxDriftArr, unsigned int *groupArr, const int numPnt, const int numCent, const int numGrp, const int numDim) { unsigned int grpIndex, centIndex; DTYPE compDistance; DTYPE oldLwr = INFINITY; DTYPE oldCentUpr = pointInfoPtr->uprBound; DTYPE oldCentLwr = pointLwrPtr[centInfo[pointInfoPtr->oldCentroid].groupNum]; // loop through all the groups for(grpIndex = 0; grpIndex < numGrp; grpIndex++) { // if the group is marked as going through the group filter if(groupArr[grpIndex]) { // save the former lower bound pre-update if(grpIndex == centInfo[pointInfoPtr->oldCentroid].groupNum) oldLwr = oldCentLwr + maxDriftArr[grpIndex]; else oldLwr = pointLwrPtr[grpIndex] + maxDriftArr[grpIndex]; // reset the group's lower bound in order to find the new lower bound pointLwrPtr[grpIndex] = INFINITY; if(grpIndex == centInfo[pointInfoPtr->oldCentroid].groupNum && pointInfoPtr->oldCentroid != pointInfoPtr->centroidIndex) pointLwrPtr[centInfo[pointInfoPtr->oldCentroid].groupNum] = oldCentUpr; // loop through all the group's centroids for(centIndex = 0; centIndex < numCent; centIndex++) { // if the cluster is the cluster already assigned // at the start of this iteration if(centIndex == pointInfoPtr->oldCentroid) continue; // if the cluster is a part of the group being checked now if(grpIndex == centInfo[centIndex].groupNum) { // local filtering condition if(pointLwrPtr[grpIndex] < oldLwr - centInfo[centIndex].drift) continue; // perform distance calculation compDistance = calcDis(pointDataPtr, &centData[centIndex * numDim], numDim); if(compDistance < pointInfoPtr->uprBound) { pointLwrPtr[centInfo[pointInfoPtr->centroidIndex].groupNum] = pointInfoPtr->uprBound; pointInfoPtr->centroidIndex = centIndex; pointInfoPtr->uprBound = compDistance; } else if(compDistance < pointLwrPtr[grpIndex]) { pointLwrPtr[grpIndex] = compDistance; } } } } } } __device__ void pointCalcsSimple(PointInfo *pointInfoPtr, CentInfo *centInfo, DTYPE *pointDataPtr, DTYPE *pointLwrPtr, DTYPE *centData, DTYPE *maxDriftArr, unsigned int *groupArr, const int numPnt, const int numCent, const int numGrp, const int numDim) { unsigned int index; DTYPE compDistance; for(index = 0; index < numGrp; index++) { if(groupArr[index]) { pointLwrPtr[index] = INFINITY; } } for(index = 0; index < numCent; index++) { if(groupArr[centInfo[index].groupNum]) { if(index == pointInfoPtr->oldCentroid) continue; compDistance = calcDis(pointDataPtr, &centData[index * numDim], numDim); if(compDistance < pointInfoPtr->uprBound) { pointLwrPtr[centInfo[pointInfoPtr->centroidIndex].groupNum] = pointInfoPtr->uprBound; pointInfoPtr->centroidIndex = index; pointInfoPtr->uprBound = compDistance; } else if(compDistance < pointLwrPtr[centInfo[index].groupNum]) { pointLwrPtr[centInfo[index].groupNum] = compDistance; } } } } ////////////////////////////////// // Centroid Calculation kernels // ////////////////////////////////// __global__ void calcCentData(PointInfo *pointInfo, CentInfo *centInfo, DTYPE *pointData, DTYPE *oldSums, DTYPE *newSums, unsigned int *oldCounts, unsigned int *newCounts, const int numPnt, const int numDim) { unsigned int tid=threadIdx.x+(blockIdx.x*BLOCKSIZE); if(tid >= numPnt) return; unsigned int dimIndex; // atomicAdd 1 to old and new counts corresponding if(pointInfo[tid].oldCentroid >= 0) atomicAdd(&oldCounts[pointInfo[tid].oldCentroid], 1); atomicAdd(&newCounts[pointInfo[tid].centroidIndex], 1); // if old assignment and new assignment are not equal if(pointInfo[tid].oldCentroid != pointInfo[tid].centroidIndex) { // for all values in the vector for(dimIndex = 0; dimIndex < numDim; dimIndex++) { // atomic add the point's vector to the sum count if(pointInfo[tid].oldCentroid >= 0) { atomicAdd(&oldSums[(pointInfo[tid].oldCentroid * numDim) + dimIndex], pointData[(tid * numDim) + dimIndex]); } atomicAdd(&newSums[(pointInfo[tid].centroidIndex * numDim) + dimIndex], pointData[(tid * numDim) + dimIndex]); } } } __global__ void calcNewCentroids(PointInfo *pointInfo, CentInfo *centInfo, DTYPE *centData, DTYPE *oldCentData, DTYPE *oldSums, DTYPE *newSums, DTYPE *maxDriftArr, unsigned int *oldCounts, unsigned int *newCounts, const int numCent, const int numDim) { unsigned int tid=threadIdx.x+(blockIdx.x*BLOCKSIZE); if(tid >= numCent) return; DTYPE oldFeature, oldSumFeat, newSumFeat, compDrift; unsigned int dimIndex; // create the new centroid vector for(dimIndex = 0; dimIndex < numDim; dimIndex++) { if(newCounts[tid] > 0) { oldCentData[(tid * numDim) + dimIndex] = centData[(tid * numDim) + dimIndex]; oldFeature = centData[(tid * numDim) + dimIndex]; oldSumFeat = oldSums[(tid * numDim) + dimIndex]; newSumFeat = newSums[(tid * numDim) + dimIndex]; centData[(tid * numDim) + dimIndex] = (oldFeature * oldCounts[tid] - oldSumFeat + newSumFeat)/newCounts[tid]; } else { // no change to centroid oldCentData[(tid * numDim) + dimIndex] = centData[(tid * numDim) + dimIndex]; } newSums[(tid * numDim) + dimIndex] = 0.0; oldSums[(tid * numDim) + dimIndex] = 0.0; } // calculate the centroid's drift compDrift = calcDis(&oldCentData[tid * numDim], &centData[tid * numDim], numDim); atomicMax(&maxDriftArr[centInfo[tid].groupNum], compDrift); // set the centroid's vector to the new vector centInfo[tid].drift = compDrift; centInfo[tid].count = newCounts[tid]; // clear the count and the sum arrays oldCounts[tid] = 0; newCounts[tid] = 0; } __global__ void calcCentDataLloyd(PointInfo *pointInfo, DTYPE *pointData, DTYPE *newSums, unsigned int *newCounts, const int numPnt, const int numDim) { unsigned int tid=threadIdx.x+(blockIdx.x*BLOCKSIZE); if(tid >= numPnt) return; unsigned int dimIndex; // atomicAdd 1 to new counts corresponding atomicAdd(&newCounts[pointInfo[tid].centroidIndex], 1); // for all values in the vector for(dimIndex = 0; dimIndex < numDim; dimIndex++) { atomicAdd(&newSums[(pointInfo[tid].centroidIndex * numDim) + dimIndex], pointData[(tid * numDim) + dimIndex]); } } __global__ void calcNewCentroidsLloyd(PointInfo *pointInfo, CentInfo *centInfo, DTYPE *centData, DTYPE *newSums, unsigned int *newCounts, const int numCent, const int numDim) { unsigned int tid=threadIdx.x+(blockIdx.x*BLOCKSIZE); if(tid >= numCent) return; unsigned int dimIndex; for(dimIndex = 0; dimIndex < numDim; dimIndex++) { if(newCounts[tid] > 0) { centData[(tid * numDim) + dimIndex] = newSums[(tid * numDim) + dimIndex] / newCounts[tid]; } // otherwise, no change newSums[(tid * numDim) + dimIndex] = 0.0; } centInfo[tid].count = newCounts[tid]; newCounts[tid] = 0; } /* this kernel is used to test performance differences between the yinyang centroid update and the standard centroid update */ __global__ void calcNewCentroidsAve(PointInfo *pointInfo, CentInfo *centInfo, DTYPE *centData, DTYPE *newSums, DTYPE *maxDriftArr, unsigned int *newCounts, const int numCent, const int numDim) { unsigned int tid=threadIdx.x+(blockIdx.x*BLOCKSIZE); unsigned int btid = threadIdx.x; if(tid >= numCent) return; unsigned int dimIndex; DTYPE compDrift; extern __shared__ DTYPE oldCentPos[]; for(dimIndex = 0; dimIndex < numDim; dimIndex++) { if(newCounts[tid] > 0) { oldCentPos[(btid * numDim) + dimIndex] = centData[(tid * numDim) + dimIndex]; centData[(tid * numDim) + dimIndex] = newSums[(tid * numDim) + dimIndex] / newCounts[tid]; newSums[(tid * numDim) + dimIndex] = 0.0; } else { oldCentPos[(btid * numDim) + dimIndex] = centData[(tid * numDim) + dimIndex]; newSums[(tid * numDim) + dimIndex] = 0.0; } } // compute drift compDrift = calcDis(&oldCentPos[btid * numDim], &centData[tid * numDim], numDim); centInfo[tid].drift = compDrift; atomicMax(&maxDriftArr[centInfo[tid].groupNum], compDrift); newCounts[tid] = 0; } //////////////////// // Helper Kernels // //////////////////// // warms up gpu for time trialing __global__ void warmup(unsigned int * tmp) { if(threadIdx.x == 0) { *tmp = 555; } return; } // checks convergence of data on GPU __global__ void checkConverge(PointInfo *pointInfo, unsigned int *conFlag, const int numPnt) { unsigned int tid=threadIdx.x+(blockIdx.x*BLOCKSIZE); if(tid >= numPnt) return; if(pointInfo[tid].oldCentroid != pointInfo[tid].centroidIndex) atomicCAS(conFlag, 0, 1); } /* simple helper kernel that clears the drift array of size T on the GPU. Called once each iteration for a total of MAXITER times */ __global__ void clearDriftArr(DTYPE *maxDriftArr, const int numGrp) { unsigned int tid=threadIdx.x+(blockIdx.x*BLOCKSIZE); if(tid >= numGrp) return; maxDriftArr[tid] = 0.0; } __global__ void clearCentCalcData(DTYPE *newCentSum, DTYPE *oldCentSum, unsigned int *newCentCount, unsigned int *oldCentCount, const int numCent, const int numDim) { unsigned int tid=threadIdx.x+(blockIdx.x*BLOCKSIZE); if(tid >= numCent) return; unsigned int dimIndex; for(dimIndex = 0; dimIndex < numDim; dimIndex++) { newCentSum[(tid * numDim) + dimIndex] = 0.0; oldCentSum[(tid * numDim) + dimIndex] = 0.0; } newCentCount[tid] = 0; oldCentCount[tid] = 0; } __global__ void clearCentCalcDataLloyd(DTYPE *newCentSum, unsigned int *newCentCount, const int numCent, const int numDim) { unsigned int tid=threadIdx.x+(blockIdx.x*BLOCKSIZE); if(tid >= numCent) return; unsigned int dimIndex; for(dimIndex = 0; dimIndex < numDim; dimIndex++) { newCentSum[(tid * numDim) + dimIndex] = 0.0; } newCentCount[tid] = 0; } ///////////////////////////// // device Helper Functions // ///////////////////////////// /* Simple device helper function that takes in two vectors and returns the euclidean distance between them at DTYPE precision */ __device__ DTYPE calcDis(DTYPE *vec1, DTYPE *vec2, const int numDim) { int index; DTYPE total = 0; DTYPE square; for(index = 0; index < numDim; index++) { square = (vec1[index] - vec2[index]); total += square * square; } return sqrt(total); } ///////////////////////////////////////////////////////////////////////// // Overloaded kernels and functions for counting distance calculations // ///////////////////////////////////////////////////////////////////////// /* Global kernel that assigns one thread to one point Given points are each assigned a centroid and upper and lower bounds */ __global__ void initRunKernel(PointInfo *pointInfo, CentInfo *centInfo, DTYPE *pointData, DTYPE *pointLwrs, DTYPE *centData, const int numPnt, const int numCent, const int numGrp, const int numDim, unsigned long long int *calcCount) { unsigned int tid=threadIdx.x+(blockIdx.x*BLOCKSIZE); if(tid >= numPnt) return; unsigned int centIndex; DTYPE currDistance; pointInfo[tid].uprBound = INFINITY; for(centIndex = 0; centIndex < numCent; centIndex++) { // calculate euclidean distance between point and centroid currDistance = calcDis(&pointData[tid * numDim], &centData[centIndex * numDim], numDim); atomicAdd(calcCount, 1); if(currDistance < pointInfo[tid].uprBound) { // make the former current min the new // lower bound for it's group if(pointInfo[tid].uprBound != INFINITY) pointLwrs[(tid * numGrp) + centInfo[pointInfo[tid].centroidIndex].groupNum] = pointInfo[tid].uprBound; // update assignment and upper bound pointInfo[tid].centroidIndex = centIndex; pointInfo[tid].uprBound = currDistance; } else if(currDistance < pointLwrs[(tid * numGrp) + centInfo[centIndex].groupNum]) { pointLwrs[(tid * numGrp) + centInfo[centIndex].groupNum] = currDistance; } } } /* Full Yinyang algorithm point assignment step Includes global, group, and local filters */ __global__ void assignPointsFull(PointInfo *pointInfo, CentInfo *centInfo, DTYPE *pointData, DTYPE *pointLwrs, DTYPE *centData, DTYPE *maxDriftArr, const int numPnt, const int numCent, const int numGrp, const int numDim, unsigned long long int *calcCount) { unsigned int tid=threadIdx.x+(blockIdx.x*BLOCKSIZE); if(tid >= numPnt) return; DTYPE tmpGlobLwr = INFINITY; int btid = threadIdx.x; unsigned int index; extern __shared__ unsigned int groupLclArr[]; // reassign point's former centroid before finding new centroid pointInfo[tid].oldCentroid = pointInfo[tid].centroidIndex; // update points upper bound ub = ub + drift(b(x)) pointInfo[tid].uprBound += centInfo[pointInfo[tid].centroidIndex].drift; // update group lower bounds // for all in lower bound array for(index = 0; index < numGrp; index++) { // subtract lowerbound by group's drift pointLwrs[(tid * numGrp) + index] -= maxDriftArr[index]; // if the lowerbound is less than the temp global lower, if(pointLwrs[(tid * numGrp) + index] < tmpGlobLwr) { // lower bound is new temp global lower tmpGlobLwr = pointLwrs[(tid * numGrp) + index]; } } // if the global lower bound is less than the upper bound if(tmpGlobLwr < pointInfo[tid].uprBound) { // tighten upper bound ub = d(x, b(x)) pointInfo[tid].uprBound = calcDis(&pointData[tid * numDim], &centData[pointInfo[tid].centroidIndex * numDim], numDim); atomicAdd(calcCount, 1); // if the lower bound is less than the upper bound if(tmpGlobLwr < pointInfo[tid].uprBound) { // loop through groups for(index = 0; index < numGrp; index++) { // if the lower bound is less than the upper bound // mark the group to go through the group filter if(pointLwrs[(tid * numGrp) + index] < pointInfo[tid].uprBound) groupLclArr[index + (btid * numGrp)] = 1; else groupLclArr[index + (btid * numGrp)] = 0; } // execute point calcs given the groups pointCalcsFull(&pointInfo[tid], centInfo, &pointData[tid * numDim], &pointLwrs[tid * numGrp], centData, maxDriftArr, &groupLclArr[btid * numGrp], numPnt, numCent, numGrp, numDim, calcCount); } } } /* Simplified Yinyang algorithm point assignment step Includes global and group filters */ __global__ void assignPointsSimple(PointInfo *pointInfo, CentInfo *centInfo, DTYPE *pointData, DTYPE *pointLwrs, DTYPE *centData, DTYPE *maxDriftArr, const int numPnt, const int numCent, const int numGrp, const int numDim, unsigned long long int *calcCount) { unsigned int tid=threadIdx.x+(blockIdx.x*BLOCKSIZE); if(tid >= numPnt) return; DTYPE tmpGlobLwr = INFINITY; unsigned int btid = threadIdx.x; unsigned int index; pointInfo[tid].oldCentroid = pointInfo[tid].centroidIndex; extern __shared__ unsigned int groupLclArr[]; // update points upper bound pointInfo[tid].uprBound += centInfo[pointInfo[tid].centroidIndex].drift; // update group lower bounds // for all in lower bound array for(index = 0; index < numGrp; index++) { // subtract lowerbound by group's drift pointLwrs[(tid * numGrp) + index] -= maxDriftArr[index]; // if the lowerbound is less than the temp global lower, if(pointLwrs[(tid * numGrp) + index] < tmpGlobLwr) { // lower bound is new temp global lower tmpGlobLwr = pointLwrs[(tid * numGrp) + index]; } } // if the global lower bound is less than the upper bound if(tmpGlobLwr < pointInfo[tid].uprBound) { // tighten upper bound ub = d(x, b(x)) pointInfo[tid].uprBound = calcDis(&pointData[tid * numDim], &centData[pointInfo[tid].centroidIndex * numDim], numDim); atomicAdd(calcCount, 1); // if the lower bound is less than the upper bound if(tmpGlobLwr < pointInfo[tid].uprBound) { // loop through groups for(index = 0; index < numGrp; index++) { // if the lower bound is less than the upper bound // mark the group to go through the group filter if(pointLwrs[(tid * numGrp) + index] < pointInfo[tid].uprBound) groupLclArr[index + (btid * numGrp)] = 1; else groupLclArr[index + (btid * numGrp)] = 0; } // execute point calcs given the groups pointCalcsSimple(&pointInfo[tid],centInfo,&pointData[tid * numDim], &pointLwrs[tid * numGrp], centData, maxDriftArr, &groupLclArr[btid * numGrp], numPnt, numCent, numGrp, numDim, calcCount); } } } /* Super Simplified Yinyang algorithm point assignment step Includes only the global filter */ __global__ void assignPointsSuper(PointInfo *pointInfo, CentInfo *centInfo, DTYPE *pointData, DTYPE *pointLwrs, DTYPE *centData, DTYPE *maxDrift, const int numPnt, const int numCent, const int numGrp, const int numDim, unsigned long long int *calcCount) { unsigned int tid=threadIdx.x+(blockIdx.x*BLOCKSIZE); if(tid >= numPnt) return; // point calc variables int centIndex; DTYPE compDistance; // set centroid's old centroid to be current assignment pointInfo[tid].oldCentroid = pointInfo[tid].centroidIndex; // update bounds pointInfo[tid].uprBound += centInfo[pointInfo[tid].centroidIndex].drift; pointLwrs[tid * numGrp] -= *maxDrift; if(pointLwrs[tid * numGrp] < pointInfo[tid].uprBound) { // tighten upper bound pointInfo[tid].uprBound = calcDis(&pointData[tid * numDim], &centData[pointInfo[tid].centroidIndex * numDim],numDim); atomicAdd(calcCount, 1); if(pointLwrs[(tid * numGrp)] < pointInfo[tid].uprBound) { // to get a new lower bound pointLwrs[tid * numGrp] = INFINITY; for(centIndex = 0; centIndex < numCent; centIndex++) { // do not calculate for the already assigned cluster if(centIndex == pointInfo[tid].oldCentroid) continue; compDistance = calcDis(&pointData[tid * numDim], &centData[centIndex * numDim], numDim); atomicAdd(calcCount, 1); if(compDistance < pointInfo[tid].uprBound) { pointLwrs[tid * numGrp] = pointInfo[tid].uprBound; pointInfo[tid].centroidIndex = centIndex; pointInfo[tid].uprBound = compDistance; } else if(compDistance < pointLwrs[tid * numGrp]) { pointLwrs[tid * numGrp] = compDistance; } } } } } //////////////////////////////////////// // Point Calculation Device Functions // //////////////////////////////////////// __device__ void pointCalcsFull(PointInfo *pointInfoPtr, CentInfo *centInfo, DTYPE *pointDataPtr, DTYPE *pointLwrPtr, DTYPE *centData, DTYPE *maxDriftArr, unsigned int *groupArr, const int numPnt, const int numCent, const int numGrp, const int numDim, unsigned long long int *calcCount) { unsigned int grpIndex, centIndex; DTYPE compDistance; DTYPE oldLwr = INFINITY; DTYPE oldCentUpr = pointInfoPtr->uprBound; DTYPE oldCentLwr = pointLwrPtr[centInfo[pointInfoPtr->oldCentroid].groupNum]; // loop through all the groups for(grpIndex = 0; grpIndex < numGrp; grpIndex++) { // if the group is marked as going through the group filter if(groupArr[grpIndex]) { // save the former lower bound pre-update if(grpIndex == centInfo[pointInfoPtr->oldCentroid].groupNum) oldLwr = oldCentLwr + maxDriftArr[grpIndex]; else oldLwr = pointLwrPtr[grpIndex] + maxDriftArr[grpIndex]; // reset the group's lower bound in order to find the new lower bound pointLwrPtr[grpIndex] = INFINITY; if(grpIndex == centInfo[pointInfoPtr->oldCentroid].groupNum && pointInfoPtr->oldCentroid != pointInfoPtr->centroidIndex) pointLwrPtr[centInfo[pointInfoPtr->oldCentroid].groupNum] = oldCentUpr; // loop through all the group's centroids for(centIndex = 0; centIndex < numCent; centIndex++) { // if the cluster is the cluster already assigned // at the start of this iteration if(centIndex == pointInfoPtr->oldCentroid) continue; // if the cluster is a part of the group being checked now if(grpIndex == centInfo[centIndex].groupNum) { // local filtering condition if(pointLwrPtr[grpIndex] < oldLwr - centInfo[centIndex].drift) continue; // perform distance calculation compDistance = calcDis(pointDataPtr, &centData[centIndex * numDim], numDim); atomicAdd(calcCount, 1); if(compDistance < pointInfoPtr->uprBound) { pointLwrPtr[centInfo[pointInfoPtr->centroidIndex].groupNum] = pointInfoPtr->uprBound; pointInfoPtr->centroidIndex = centIndex; pointInfoPtr->uprBound = compDistance; } else if(compDistance < pointLwrPtr[grpIndex]) { pointLwrPtr[grpIndex] = compDistance; } } } } } } __device__ void pointCalcsSimple(PointInfo *pointInfoPtr, CentInfo *centInfo, DTYPE *pointDataPtr, DTYPE *pointLwrPtr, DTYPE *centData, DTYPE *maxDriftArr, unsigned int *groupArr, const int numPnt, const int numCent, const int numGrp, const int numDim, unsigned long long int *calcCount) { unsigned int index; DTYPE compDistance; for(index = 0; index < numGrp; index++) { if(groupArr[index]) { pointLwrPtr[index] = INFINITY; } } for(index = 0; index < numCent; index++) { if(groupArr[centInfo[index].groupNum]) { if(index == pointInfoPtr->oldCentroid) continue; compDistance = calcDis(pointDataPtr, &centData[index * numDim], numDim); atomicAdd(calcCount, 1); if(compDistance < pointInfoPtr->uprBound) { pointLwrPtr[centInfo[pointInfoPtr->centroidIndex].groupNum] = pointInfoPtr->uprBound; pointInfoPtr->centroidIndex = index; pointInfoPtr->uprBound = compDistance; } else if(compDistance < pointLwrPtr[centInfo[index].groupNum]) { pointLwrPtr[centInfo[index].groupNum] = compDistance; } } } }
3026cb252db920a9b1c19203b83778e6592b8265.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #define MAX_SECRET 8000000 #define KEY_SIZE 8 #define BUFFER 512 __global__ void exor(const int size, const char *secret, char *key) { char temp[KEY_SIZE]; temp[0] = blockIdx.x/10 + 48; temp[1] = blockIdx.x%10 + 48; temp[2] = blockIdx.y/10 + 48; temp[3] = blockIdx.y%10 + 48; temp[4] = blockIdx.z + 48; temp[5] = threadIdx.x + 48; temp[6] = threadIdx.y + 48; temp[7] = threadIdx.z + 48; for(int i = 0; i < size; i++) { switch(secret[i] ^ temp[i % KEY_SIZE]) { case '|': case '~': case '^': case '*': case '+': case '_': case '{': case '}': case '\\': case '#': return; } } for(int i = 0; i < KEY_SIZE; i++) key[i] = temp[i]; } int main() { char secret[MAX_SECRET], key[KEY_SIZE+1]; char *p = secret; while (int n = fread((void *) p, 1, (p - secret + BUFFER < MAX_SECRET) ? BUFFER : secret + MAX_SECRET - p, stdin)) p += n; int size = p - secret; char *d_secret, *d_key; hipMalloc((void **) &d_secret, size); hipMalloc((void **) &d_key, KEY_SIZE); hipMemcpy(d_secret, secret, size, hipMemcpyHostToDevice); dim3 blocksPerGrid(100, 100, 10); dim3 threadsPerBlock(10, 10, 10); hipLaunchKernelGGL(( exor), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, size, d_secret, d_key); hipMemcpy(key, d_key, KEY_SIZE, hipMemcpyDeviceToHost); hipFree(d_secret); hipFree(d_key); hipDeviceReset(); hipError_t error = hipGetLastError(); if(error != hipSuccess) { fprintf(stderr, "CUDA error: %s\n", hipGetErrorString(error)); return -1; } for(int i = 0; i < size; i++) secret[i] ^= key[i % KEY_SIZE]; secret[size] = '\0'; key[KEY_SIZE] = '\0'; printf("%s\nKey: %s\n", secret, key); fprintf(stderr, "Done\n"); return 0; }
3026cb252db920a9b1c19203b83778e6592b8265.cu
#include <stdio.h> #define MAX_SECRET 8000000 #define KEY_SIZE 8 #define BUFFER 512 __global__ void exor(const int size, const char *secret, char *key) { char temp[KEY_SIZE]; temp[0] = blockIdx.x/10 + 48; temp[1] = blockIdx.x%10 + 48; temp[2] = blockIdx.y/10 + 48; temp[3] = blockIdx.y%10 + 48; temp[4] = blockIdx.z + 48; temp[5] = threadIdx.x + 48; temp[6] = threadIdx.y + 48; temp[7] = threadIdx.z + 48; for(int i = 0; i < size; i++) { switch(secret[i] ^ temp[i % KEY_SIZE]) { case '|': case '~': case '^': case '*': case '+': case '_': case '{': case '}': case '\\': case '#': return; } } for(int i = 0; i < KEY_SIZE; i++) key[i] = temp[i]; } int main() { char secret[MAX_SECRET], key[KEY_SIZE+1]; char *p = secret; while (int n = fread((void *) p, 1, (p - secret + BUFFER < MAX_SECRET) ? BUFFER : secret + MAX_SECRET - p, stdin)) p += n; int size = p - secret; char *d_secret, *d_key; cudaMalloc((void **) &d_secret, size); cudaMalloc((void **) &d_key, KEY_SIZE); cudaMemcpy(d_secret, secret, size, cudaMemcpyHostToDevice); dim3 blocksPerGrid(100, 100, 10); dim3 threadsPerBlock(10, 10, 10); exor<<<blocksPerGrid, threadsPerBlock>>>(size, d_secret, d_key); cudaMemcpy(key, d_key, KEY_SIZE, cudaMemcpyDeviceToHost); cudaFree(d_secret); cudaFree(d_key); cudaDeviceReset(); cudaError_t error = cudaGetLastError(); if(error != cudaSuccess) { fprintf(stderr, "CUDA error: %s\n", cudaGetErrorString(error)); return -1; } for(int i = 0; i < size; i++) secret[i] ^= key[i % KEY_SIZE]; secret[size] = '\0'; key[KEY_SIZE] = '\0'; printf("%s\nKey: %s\n", secret, key); fprintf(stderr, "Done\n"); return 0; }
a5a818f7ddcbc631ded70d5e21af7dd0ab2f4914.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //#include <helper_cuda.h> #include <omp.h> #include <stdio.h> #include "AstroAccelerate/params.h" #include "device_binning_kernel.cu" //extern "C" void bin_gpu(float *bin_buffer, float *input_buffer, int nchans, int nsamp); void bin_gpu(unsigned short *d_input, float *d_output, int nchans, int nsamp) { int divisions_in_t = BINDIVINT; int divisions_in_f = BINDIVINF; int num_blocks_t = (int) ( ( nsamp + 1 ) / ( 2 * divisions_in_t ) ); int num_blocks_f = nchans / divisions_in_f; dim3 threads_per_block(divisions_in_t, divisions_in_f); dim3 num_blocks(num_blocks_t, num_blocks_f); //printf("\nDIVISOR:\t%f", (float)(nsamp)/(2*divisions_in_t)); //printf("\ndivisions_in_t:%d\tdivisions_in_f:%d",divisions_in_t, divisions_in_f); //printf("\nnum_blocks_t:%d\tnum_blocks_f:%d\n",num_blocks_t,num_blocks_f); //hipFuncSetCacheConfig(bin, hipFuncCachePreferL1); double start_t, end_t; start_t = omp_get_wtime(); hipLaunchKernelGGL(( bin), dim3(num_blocks), dim3(threads_per_block), 0, 0, d_input, d_output, nsamp); // getLastCudaError("Kernel execution failed"); int swap_divisions_in_t = CT; int swap_divisions_in_f = CF; int swap_num_blocks_t = nsamp / swap_divisions_in_t; int swap_num_blocks_f = nchans / swap_divisions_in_f; dim3 swap_threads_per_block(swap_divisions_in_t, swap_divisions_in_f); dim3 swap_num_blocks(swap_num_blocks_t, swap_num_blocks_f); hipDeviceSynchronize(); hipLaunchKernelGGL(( swap), dim3(swap_num_blocks), dim3(swap_threads_per_block), 0, 0, d_input, d_output, nchans, nsamp); hipDeviceSynchronize(); end_t = omp_get_wtime(); float time = (float) ( end_t - start_t ); //printf("\nPerformed Bin: %f (GPU estimate)", time); //printf("\nGops based on %.2f ops per channel per tsamp: %f",14.0,((15.0*(divisions_in_t*divisions_in_f*num_blocks_t*num_blocks_f))/(time))/1000000000.0); //printf("\nBN Device memory bandwidth in GB/s: %f", (2*(sizeof(float)+sizeof(unsigned short))*(divisions_in_t*divisions_in_f*num_blocks_t*num_blocks_f))/(time)/1000000000.0); hipMemset(d_output, 0, nchans * nsamp * sizeof(float)); //hipMemcpy(input_buffer, bin_buffer, sizeof(float)*nchans*(nsamp/2), hipMemcpyDeviceToDevice); // getLastCudaError("Kernel execution failed"); }
a5a818f7ddcbc631ded70d5e21af7dd0ab2f4914.cu
//#include <helper_cuda.h> #include <omp.h> #include <stdio.h> #include "AstroAccelerate/params.h" #include "device_binning_kernel.cu" //extern "C" void bin_gpu(float *bin_buffer, float *input_buffer, int nchans, int nsamp); void bin_gpu(unsigned short *d_input, float *d_output, int nchans, int nsamp) { int divisions_in_t = BINDIVINT; int divisions_in_f = BINDIVINF; int num_blocks_t = (int) ( ( nsamp + 1 ) / ( 2 * divisions_in_t ) ); int num_blocks_f = nchans / divisions_in_f; dim3 threads_per_block(divisions_in_t, divisions_in_f); dim3 num_blocks(num_blocks_t, num_blocks_f); //printf("\nDIVISOR:\t%f", (float)(nsamp)/(2*divisions_in_t)); //printf("\ndivisions_in_t:%d\tdivisions_in_f:%d",divisions_in_t, divisions_in_f); //printf("\nnum_blocks_t:%d\tnum_blocks_f:%d\n",num_blocks_t,num_blocks_f); //cudaFuncSetCacheConfig(bin, cudaFuncCachePreferL1); double start_t, end_t; start_t = omp_get_wtime(); bin<<<num_blocks, threads_per_block>>>(d_input, d_output, nsamp); // getLastCudaError("Kernel execution failed"); int swap_divisions_in_t = CT; int swap_divisions_in_f = CF; int swap_num_blocks_t = nsamp / swap_divisions_in_t; int swap_num_blocks_f = nchans / swap_divisions_in_f; dim3 swap_threads_per_block(swap_divisions_in_t, swap_divisions_in_f); dim3 swap_num_blocks(swap_num_blocks_t, swap_num_blocks_f); cudaDeviceSynchronize(); swap<<<swap_num_blocks, swap_threads_per_block>>>(d_input, d_output, nchans, nsamp); cudaDeviceSynchronize(); end_t = omp_get_wtime(); float time = (float) ( end_t - start_t ); //printf("\nPerformed Bin: %f (GPU estimate)", time); //printf("\nGops based on %.2f ops per channel per tsamp: %f",14.0,((15.0*(divisions_in_t*divisions_in_f*num_blocks_t*num_blocks_f))/(time))/1000000000.0); //printf("\nBN Device memory bandwidth in GB/s: %f", (2*(sizeof(float)+sizeof(unsigned short))*(divisions_in_t*divisions_in_f*num_blocks_t*num_blocks_f))/(time)/1000000000.0); cudaMemset(d_output, 0, nchans * nsamp * sizeof(float)); //cudaMemcpy(input_buffer, bin_buffer, sizeof(float)*nchans*(nsamp/2), cudaMemcpyDeviceToDevice); // getLastCudaError("Kernel execution failed"); }
f2d733d68a2f2e61e796bfebf0f4150fc57b7001.hip
// !!! This is a file automatically generated by hipify!!! // libs #include <camera.cuh> #include <cbuffer.cuh> #include <color.hpp> #include <debug.hpp> #include <external.hpp> #include <makeworld.cuh> #include <ray.cuh> #include <trace.cuh> #include <vec3.cuh> // scene objects #include <kernelio.cuh> #include <record.cuh> #include <sceneobj.cuh> #include <scenetype.cuh> void get_device_props() { int nDevices; hipGetDeviceCount(&nDevices); for (int i = 0; i < nDevices; i++) { hipDeviceProp_t prop; hipGetDeviceProperties(&prop, i); std::cerr << "Device Number: " << i << std::endl; std::cerr << "Device name: " << prop.name << std::endl; std::cerr << "Memory Clock Rate (KHz): " << prop.memoryClockRate << std::endl; std::cerr << "Memory Bus Width (bits): " << prop.memoryBusWidth << std::endl; std::cerr << " Peak Memory Bandwidth (GB/s): " << 2.0 * prop.memoryClockRate * (prop.memoryBusWidth / 8) / 1.0e6 << std::endl; } } Camera makeCam(int WIDTH, int HEIGHT) { Vec3 lookfrom(278, 278, -800); Vec3 lookat(278, 278, 0); Vec3 wup(0, 1, 0); float vfov = 40.0f; float aspect_r = float(WIDTH) / float(HEIGHT); float dist_to_focus = (lookfrom - lookat).length(); float aperture = 0.0; float t0 = 0.0f, t1 = 1.0f; Camera cam(lookfrom, lookat, wup, vfov, aspect_r, aperture, dist_to_focus, t0, t1); return cam; } void make_image(thrust::device_ptr<unsigned char> &imdata, thrust::device_ptr<int> &imwidths, thrust::device_ptr<int> &imhs, thrust::device_ptr<int> &imch) { std::vector<const char *> impaths = {"media/earthmap.png", "media/lsjimg.png"}; std::vector<int> ws, hes, nbChannels, indices; std::vector<unsigned char> imdata_h; imread(impaths, ws, hes, nbChannels, imdata_h, indices); ////// thrust::device_ptr<unsigned char> imda = ////// thrust::device_malloc<unsigned char>(imd.size); unsigned char *h_ptr = imdata_h.data(); // --------------------- image ------------------------ upload_thrust<unsigned char>(imdata, h_ptr, (int)imdata_h.size()); int *ws_ptr = ws.data(); upload_thrust<int>(imwidths, ws_ptr, (int)ws.size()); int *hs_ptr = hes.data(); upload_thrust<int>(imhs, hs_ptr, (int)hes.size()); int *nb_ptr = nbChannels.data(); upload_thrust<int>(imch, nb_ptr, (int)nbChannels.size()); } int main() { float aspect_ratio = 16.0f / 9.0f; int WIDTH = 320; int HEIGHT = static_cast<int>(WIDTH / aspect_ratio); int BLOCK_WIDTH = 32; int BLOCK_HEIGHT = 4; int SAMPLE_NB = 20; int BOUNCE_NB = 20; bool gpu_io = false; get_device_props(); std::cerr << "Resim boyutumuz " << WIDTH << "x" << HEIGHT << std::endl; std::cerr << BLOCK_WIDTH << "x" << BLOCK_HEIGHT << " bloklar halinde" << std::endl; // declare world SceneObjects sobjs = make_cornell_box2(); // declare camera Camera cam = makeCam(WIDTH, HEIGHT); // if (gpu_io) { gpu_main(WIDTH, HEIGHT, BLOCK_WIDTH, BLOCK_HEIGHT, SAMPLE_NB, BOUNCE_NB, aspect_ratio, sobjs, cam); } else { cpu_main(WIDTH, HEIGHT, SAMPLE_NB, BOUNCE_NB, cam, sobjs); } }
f2d733d68a2f2e61e796bfebf0f4150fc57b7001.cu
// libs #include <camera.cuh> #include <cbuffer.cuh> #include <color.hpp> #include <debug.hpp> #include <external.hpp> #include <makeworld.cuh> #include <ray.cuh> #include <trace.cuh> #include <vec3.cuh> // scene objects #include <kernelio.cuh> #include <record.cuh> #include <sceneobj.cuh> #include <scenetype.cuh> void get_device_props() { int nDevices; cudaGetDeviceCount(&nDevices); for (int i = 0; i < nDevices; i++) { cudaDeviceProp prop; cudaGetDeviceProperties(&prop, i); std::cerr << "Device Number: " << i << std::endl; std::cerr << "Device name: " << prop.name << std::endl; std::cerr << "Memory Clock Rate (KHz): " << prop.memoryClockRate << std::endl; std::cerr << "Memory Bus Width (bits): " << prop.memoryBusWidth << std::endl; std::cerr << " Peak Memory Bandwidth (GB/s): " << 2.0 * prop.memoryClockRate * (prop.memoryBusWidth / 8) / 1.0e6 << std::endl; } } Camera makeCam(int WIDTH, int HEIGHT) { Vec3 lookfrom(278, 278, -800); Vec3 lookat(278, 278, 0); Vec3 wup(0, 1, 0); float vfov = 40.0f; float aspect_r = float(WIDTH) / float(HEIGHT); float dist_to_focus = (lookfrom - lookat).length(); float aperture = 0.0; float t0 = 0.0f, t1 = 1.0f; Camera cam(lookfrom, lookat, wup, vfov, aspect_r, aperture, dist_to_focus, t0, t1); return cam; } void make_image(thrust::device_ptr<unsigned char> &imdata, thrust::device_ptr<int> &imwidths, thrust::device_ptr<int> &imhs, thrust::device_ptr<int> &imch) { std::vector<const char *> impaths = {"media/earthmap.png", "media/lsjimg.png"}; std::vector<int> ws, hes, nbChannels, indices; std::vector<unsigned char> imdata_h; imread(impaths, ws, hes, nbChannels, imdata_h, indices); ////// thrust::device_ptr<unsigned char> imda = ////// thrust::device_malloc<unsigned char>(imd.size); unsigned char *h_ptr = imdata_h.data(); // --------------------- image ------------------------ upload_thrust<unsigned char>(imdata, h_ptr, (int)imdata_h.size()); int *ws_ptr = ws.data(); upload_thrust<int>(imwidths, ws_ptr, (int)ws.size()); int *hs_ptr = hes.data(); upload_thrust<int>(imhs, hs_ptr, (int)hes.size()); int *nb_ptr = nbChannels.data(); upload_thrust<int>(imch, nb_ptr, (int)nbChannels.size()); } int main() { float aspect_ratio = 16.0f / 9.0f; int WIDTH = 320; int HEIGHT = static_cast<int>(WIDTH / aspect_ratio); int BLOCK_WIDTH = 32; int BLOCK_HEIGHT = 4; int SAMPLE_NB = 20; int BOUNCE_NB = 20; bool gpu_io = false; get_device_props(); std::cerr << "Resim boyutumuz " << WIDTH << "x" << HEIGHT << std::endl; std::cerr << BLOCK_WIDTH << "x" << BLOCK_HEIGHT << " bloklar halinde" << std::endl; // declare world SceneObjects sobjs = make_cornell_box2(); // declare camera Camera cam = makeCam(WIDTH, HEIGHT); // if (gpu_io) { gpu_main(WIDTH, HEIGHT, BLOCK_WIDTH, BLOCK_HEIGHT, SAMPLE_NB, BOUNCE_NB, aspect_ratio, sobjs, cam); } else { cpu_main(WIDTH, HEIGHT, SAMPLE_NB, BOUNCE_NB, cam, sobjs); } }
4ed657d0a728a3792442488749a740e4eb2fb002.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Note this file isn't configured to automatically compile. // Here's how: // If you want to look at the ptx first: // nvcc -arch sm_50 -m 32 -ptx sgemm.cu // Manually compile your kernel to a cubin. // You should only have to do this once, unless you change params or shared size or globals: // nvcc -arch sm_50 -m 32 -cubin sgemm.cu // If tweaking a kernel or writing a new one based on this shell code you would then do this: // maxas.pl -e kernel.cubin kernel.sass // I've already included a modified kernel (sgemm.sass) so the next step is.. // Splice the manually assembled code back into the cubin: // maxas.pl -i sgemm.sass sgemm.cubin #include <hip/device_functions.h> #include <device_launch_parameters.h> #include <hip/hip_texture_types.h> #include <texture_fetch_functions.h> typedef texture<float4, hipTextureType1D, hipReadModeElementType> floatTex; floatTex texA(0, hipFilterModePoint, hipAddressModeBorder); floatTex texB(0, hipFilterModePoint, hipAddressModeBorder); /* // Use extern C so C++ doesn't mangle our kernel name extern "C" // This kernel requires 256x1x1 threads per block __global__ void __launch_bounds__(256) sgemm_kernel_128( float *C, const int m, const int n, const int k, const int lda, const int ldb, const int ldc, float alpha, int *D) { // Declare any shared memory your kernel requires // Or you could just pass the amount in as a param to hipModuleLaunchKernel __shared__ float4 share[1024]; int tid = threadIdx.x; // If you use indirect texture references, they will be passed as params at the end of the param list // So set that up here to make sure they're available in your kernel floatTex tex = tid > 127 ? texB : texA; // Make use of shared and your textures so it doesn't get optimized away share[tid] = tex1Dfetch(tex, tid); __syncthreads(); // output something so your setup isn't optimized away. C[tid] = share[255-tid].x; }*/ extern "C" __global__ void __launch_bounds__(64) sgemm_kernel_64( float *C, const int m, const int n, const int k, const int lda, const int ldb, const int ldc, float alpha, int *D) { __shared__ float4 share[512]; int tid = threadIdx.x; floatTex tex = tid > 127 ? texB : texA; share[tid] = tex1Dfetch(tex, tid); __syncthreads(); C[tid] = share[255-tid].x; } // A note about using the Cuda Runtime. // If that's your preference over the driver API then here's what you'd do: // In your project properties in the Cuda C/C++ panel: // -Set the "Keep Processed Files" (-keep) option // -Add a -v manually to the command line // If compiling on command line just add -keep -v options to nvcc. // Rebuild your solution and look in the log for these lines that follow the ptxas step: // #$ fatbinary --create="Release/kernel.fatbin" -32 --key="a7bce87544c2a492" --ident="C:/Users/Scott/Documents/sgemm6/sgemm6/kernel.cu" --cmdline="-v --opt-level 4 --generate-line-info " "--image=profile=sm_50,file=Release/kernel.sm_50.cubin" "--image=profile=compute_50,file=Release/kernel.ptx" --embedded-fatbin="Release/kernel.fatbin.c" --cuda // #$ cl.exe @Release/kernel.cu.cpp.ii.res > "Release/kernel.cu.cpp.ii" // #$ cl.exe @Release/kernel.cu.obj.res -Fo"Release/kernel.cu.obj" // You just need to manually run these 3 commands (or add them to a build script) // after you've modified the cubin generated from the preceeding ptxas command. // That will give you a new .cu.obj file which will automatically be linked in for you next time you // build your project (or you could manually run the linker step as well). // Having done that you can call your kernel normally using the <<< >>> syntax. // Debugging will have to be with the sass syntax but that's what you'll want to see anyway. // With fatbin you can also keep non-maxwell optimized versions of your code. // I just discovered this also works as a shortcut to the above: // nvcc -lib -arch sm_52 -m 32 -use-cubin code=sm_52,cubin=sgemm.cubin -o sgemm.lib sgemm.cu // The cu kernel definitions above need to have empty bodies. // And, the cu file must be compiled to a lib seperately before linking.
4ed657d0a728a3792442488749a740e4eb2fb002.cu
// Note this file isn't configured to automatically compile. // Here's how: // If you want to look at the ptx first: // nvcc -arch sm_50 -m 32 -ptx sgemm.cu // Manually compile your kernel to a cubin. // You should only have to do this once, unless you change params or shared size or globals: // nvcc -arch sm_50 -m 32 -cubin sgemm.cu // If tweaking a kernel or writing a new one based on this shell code you would then do this: // maxas.pl -e kernel.cubin kernel.sass // I've already included a modified kernel (sgemm.sass) so the next step is.. // Splice the manually assembled code back into the cubin: // maxas.pl -i sgemm.sass sgemm.cubin #include <device_functions.h> #include <device_launch_parameters.h> #include <cuda_texture_types.h> #include <texture_fetch_functions.h> typedef texture<float4, cudaTextureType1D, cudaReadModeElementType> floatTex; floatTex texA(0, cudaFilterModePoint, cudaAddressModeBorder); floatTex texB(0, cudaFilterModePoint, cudaAddressModeBorder); /* // Use extern C so C++ doesn't mangle our kernel name extern "C" // This kernel requires 256x1x1 threads per block __global__ void __launch_bounds__(256) sgemm_kernel_128( float *C, const int m, const int n, const int k, const int lda, const int ldb, const int ldc, float alpha, int *D) { // Declare any shared memory your kernel requires // Or you could just pass the amount in as a param to cuLaunchKernel __shared__ float4 share[1024]; int tid = threadIdx.x; // If you use indirect texture references, they will be passed as params at the end of the param list // So set that up here to make sure they're available in your kernel floatTex tex = tid > 127 ? texB : texA; // Make use of shared and your textures so it doesn't get optimized away share[tid] = tex1Dfetch(tex, tid); __syncthreads(); // output something so your setup isn't optimized away. C[tid] = share[255-tid].x; }*/ extern "C" __global__ void __launch_bounds__(64) sgemm_kernel_64( float *C, const int m, const int n, const int k, const int lda, const int ldb, const int ldc, float alpha, int *D) { __shared__ float4 share[512]; int tid = threadIdx.x; floatTex tex = tid > 127 ? texB : texA; share[tid] = tex1Dfetch(tex, tid); __syncthreads(); C[tid] = share[255-tid].x; } // A note about using the Cuda Runtime. // If that's your preference over the driver API then here's what you'd do: // In your project properties in the Cuda C/C++ panel: // -Set the "Keep Processed Files" (-keep) option // -Add a -v manually to the command line // If compiling on command line just add -keep -v options to nvcc. // Rebuild your solution and look in the log for these lines that follow the ptxas step: // #$ fatbinary --create="Release/kernel.fatbin" -32 --key="a7bce87544c2a492" --ident="C:/Users/Scott/Documents/sgemm6/sgemm6/kernel.cu" --cmdline="-v --opt-level 4 --generate-line-info " "--image=profile=sm_50,file=Release/kernel.sm_50.cubin" "--image=profile=compute_50,file=Release/kernel.ptx" --embedded-fatbin="Release/kernel.fatbin.c" --cuda // #$ cl.exe @Release/kernel.cu.cpp.ii.res > "Release/kernel.cu.cpp.ii" // #$ cl.exe @Release/kernel.cu.obj.res -Fo"Release/kernel.cu.obj" // You just need to manually run these 3 commands (or add them to a build script) // after you've modified the cubin generated from the preceeding ptxas command. // That will give you a new .cu.obj file which will automatically be linked in for you next time you // build your project (or you could manually run the linker step as well). // Having done that you can call your kernel normally using the <<< >>> syntax. // Debugging will have to be with the sass syntax but that's what you'll want to see anyway. // With fatbin you can also keep non-maxwell optimized versions of your code. // I just discovered this also works as a shortcut to the above: // nvcc -lib -arch sm_52 -m 32 -use-cubin code=sm_52,cubin=sgemm.cubin -o sgemm.lib sgemm.cu // The cu kernel definitions above need to have empty bodies. // And, the cu file must be compiled to a lib seperately before linking.
a8cbd23f7b8fa1d133dfe02c56c4c85e6506dfe5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // auto-generated by op2.py // __constant__ int opDat0_adt_calc_stride_OP2CONSTANT; int opDat0_adt_calc_stride_OP2HOST=-1; __constant__ int direct_adt_calc_stride_OP2CONSTANT; int direct_adt_calc_stride_OP2HOST=-1; //user function __device__ void adt_calc_gpu( const double *x1, const double *x2, const double *x3, const double *x4, const double *q, double *adt) { double dx, dy, ri, u, v, c; ri = 1.0f / q[(0)*direct_adt_calc_stride_OP2CONSTANT]; u = ri * q[(1)*direct_adt_calc_stride_OP2CONSTANT]; v = ri * q[(2)*direct_adt_calc_stride_OP2CONSTANT]; c = sqrt(gam_cuda * gm1_cuda * (ri * q[(3)*direct_adt_calc_stride_OP2CONSTANT] - 0.5f * (u * u + v * v))); dx = x2[(0)*opDat0_adt_calc_stride_OP2CONSTANT] - x1[(0)*opDat0_adt_calc_stride_OP2CONSTANT]; dy = x2[(1)*opDat0_adt_calc_stride_OP2CONSTANT] - x1[(1)*opDat0_adt_calc_stride_OP2CONSTANT]; *adt = fabs(u * dy - v * dx) + c * sqrt(dx * dx + dy * dy); dx = x3[(0)*opDat0_adt_calc_stride_OP2CONSTANT] - x2[(0)*opDat0_adt_calc_stride_OP2CONSTANT]; dy = x3[(1)*opDat0_adt_calc_stride_OP2CONSTANT] - x2[(1)*opDat0_adt_calc_stride_OP2CONSTANT]; *adt += fabs(u * dy - v * dx) + c * sqrt(dx * dx + dy * dy); dx = x4[(0)*opDat0_adt_calc_stride_OP2CONSTANT] - x3[(0)*opDat0_adt_calc_stride_OP2CONSTANT]; dy = x4[(1)*opDat0_adt_calc_stride_OP2CONSTANT] - x3[(1)*opDat0_adt_calc_stride_OP2CONSTANT]; *adt += fabs(u * dy - v * dx) + c * sqrt(dx * dx + dy * dy); dx = x1[(0)*opDat0_adt_calc_stride_OP2CONSTANT] - x4[(0)*opDat0_adt_calc_stride_OP2CONSTANT]; dy = x1[(1)*opDat0_adt_calc_stride_OP2CONSTANT] - x4[(1)*opDat0_adt_calc_stride_OP2CONSTANT]; *adt += fabs(u * dy - v * dx) + c * sqrt(dx * dx + dy * dy); *adt = (*adt) / cfl_cuda; } // CUDA kernel function __global__ void op_cuda_adt_calc( const double *__restrict ind_arg0, const int *__restrict opDat0Map, const double *__restrict arg4, double *arg5, int start, int end, int set_size) { int tid = threadIdx.x + blockIdx.x * blockDim.x; if (tid + start < end) { int n = tid + start; //initialise local variables int map0idx; int map1idx; int map2idx; int map3idx; map0idx = opDat0Map[n + set_size * 0]; map1idx = opDat0Map[n + set_size * 1]; map2idx = opDat0Map[n + set_size * 2]; map3idx = opDat0Map[n + set_size * 3]; //user-supplied kernel call adt_calc_gpu(ind_arg0+map0idx, ind_arg0+map1idx, ind_arg0+map2idx, ind_arg0+map3idx, arg4+n, arg5+n*1); } } //host stub function void op_par_loop_adt_calc(char const *name, op_set set, op_arg arg0, op_arg arg1, op_arg arg2, op_arg arg3, op_arg arg4, op_arg arg5){ int nargs = 6; op_arg args[6]; args[0] = arg0; args[1] = arg1; args[2] = arg2; args[3] = arg3; args[4] = arg4; args[5] = arg5; // initialise timers double cpu_t1, cpu_t2, wall_t1, wall_t2; op_timing_realloc(1); op_timers_core(&cpu_t1, &wall_t1); OP_kernels[1].name = name; OP_kernels[1].count += 1; int ninds = 1; int inds[6] = {0,0,0,0,-1,-1}; if (OP_diags>2) { printf(" kernel routine with indirection: adt_calc\n"); } int set_size = op_mpi_halo_exchanges_grouped(set, nargs, args, 2); if (set_size > 0) { if ((OP_kernels[1].count==1) || (opDat0_adt_calc_stride_OP2HOST != getSetSizeFromOpArg(&arg0))) { opDat0_adt_calc_stride_OP2HOST = getSetSizeFromOpArg(&arg0); hipMemcpyToSymbol(opDat0_adt_calc_stride_OP2CONSTANT, &opDat0_adt_calc_stride_OP2HOST,sizeof(int)); } if ((OP_kernels[1].count==1) || (direct_adt_calc_stride_OP2HOST != getSetSizeFromOpArg(&arg4))) { direct_adt_calc_stride_OP2HOST = getSetSizeFromOpArg(&arg4); hipMemcpyToSymbol(direct_adt_calc_stride_OP2CONSTANT,&direct_adt_calc_stride_OP2HOST,sizeof(int)); } //set CUDA execution parameters #ifdef OP_BLOCK_SIZE_1 int nthread = OP_BLOCK_SIZE_1; #else int nthread = OP_block_size; #endif for ( int round=0; round<2; round++ ){ if (round==1) { op_mpi_wait_all_grouped(nargs, args, 2); } int start = round==0 ? 0 : set->core_size; int end = round==0 ? set->core_size : set->size + set->exec_size; if (end-start>0) { int nblocks = (end-start-1)/nthread+1; hipLaunchKernelGGL(( op_cuda_adt_calc), dim3(nblocks),dim3(nthread), 0, 0, (double *)arg0.data_d, arg0.map_data_d, (double*)arg4.data_d, (double*)arg5.data_d, start,end,set->size+set->exec_size); } } } op_mpi_set_dirtybit_cuda(nargs, args); cutilSafeCall(hipDeviceSynchronize()); //update kernel record op_timers_core(&cpu_t2, &wall_t2); OP_kernels[1].time += wall_t2 - wall_t1; }
a8cbd23f7b8fa1d133dfe02c56c4c85e6506dfe5.cu
// // auto-generated by op2.py // __constant__ int opDat0_adt_calc_stride_OP2CONSTANT; int opDat0_adt_calc_stride_OP2HOST=-1; __constant__ int direct_adt_calc_stride_OP2CONSTANT; int direct_adt_calc_stride_OP2HOST=-1; //user function __device__ void adt_calc_gpu( const double *x1, const double *x2, const double *x3, const double *x4, const double *q, double *adt) { double dx, dy, ri, u, v, c; ri = 1.0f / q[(0)*direct_adt_calc_stride_OP2CONSTANT]; u = ri * q[(1)*direct_adt_calc_stride_OP2CONSTANT]; v = ri * q[(2)*direct_adt_calc_stride_OP2CONSTANT]; c = sqrt(gam_cuda * gm1_cuda * (ri * q[(3)*direct_adt_calc_stride_OP2CONSTANT] - 0.5f * (u * u + v * v))); dx = x2[(0)*opDat0_adt_calc_stride_OP2CONSTANT] - x1[(0)*opDat0_adt_calc_stride_OP2CONSTANT]; dy = x2[(1)*opDat0_adt_calc_stride_OP2CONSTANT] - x1[(1)*opDat0_adt_calc_stride_OP2CONSTANT]; *adt = fabs(u * dy - v * dx) + c * sqrt(dx * dx + dy * dy); dx = x3[(0)*opDat0_adt_calc_stride_OP2CONSTANT] - x2[(0)*opDat0_adt_calc_stride_OP2CONSTANT]; dy = x3[(1)*opDat0_adt_calc_stride_OP2CONSTANT] - x2[(1)*opDat0_adt_calc_stride_OP2CONSTANT]; *adt += fabs(u * dy - v * dx) + c * sqrt(dx * dx + dy * dy); dx = x4[(0)*opDat0_adt_calc_stride_OP2CONSTANT] - x3[(0)*opDat0_adt_calc_stride_OP2CONSTANT]; dy = x4[(1)*opDat0_adt_calc_stride_OP2CONSTANT] - x3[(1)*opDat0_adt_calc_stride_OP2CONSTANT]; *adt += fabs(u * dy - v * dx) + c * sqrt(dx * dx + dy * dy); dx = x1[(0)*opDat0_adt_calc_stride_OP2CONSTANT] - x4[(0)*opDat0_adt_calc_stride_OP2CONSTANT]; dy = x1[(1)*opDat0_adt_calc_stride_OP2CONSTANT] - x4[(1)*opDat0_adt_calc_stride_OP2CONSTANT]; *adt += fabs(u * dy - v * dx) + c * sqrt(dx * dx + dy * dy); *adt = (*adt) / cfl_cuda; } // CUDA kernel function __global__ void op_cuda_adt_calc( const double *__restrict ind_arg0, const int *__restrict opDat0Map, const double *__restrict arg4, double *arg5, int start, int end, int set_size) { int tid = threadIdx.x + blockIdx.x * blockDim.x; if (tid + start < end) { int n = tid + start; //initialise local variables int map0idx; int map1idx; int map2idx; int map3idx; map0idx = opDat0Map[n + set_size * 0]; map1idx = opDat0Map[n + set_size * 1]; map2idx = opDat0Map[n + set_size * 2]; map3idx = opDat0Map[n + set_size * 3]; //user-supplied kernel call adt_calc_gpu(ind_arg0+map0idx, ind_arg0+map1idx, ind_arg0+map2idx, ind_arg0+map3idx, arg4+n, arg5+n*1); } } //host stub function void op_par_loop_adt_calc(char const *name, op_set set, op_arg arg0, op_arg arg1, op_arg arg2, op_arg arg3, op_arg arg4, op_arg arg5){ int nargs = 6; op_arg args[6]; args[0] = arg0; args[1] = arg1; args[2] = arg2; args[3] = arg3; args[4] = arg4; args[5] = arg5; // initialise timers double cpu_t1, cpu_t2, wall_t1, wall_t2; op_timing_realloc(1); op_timers_core(&cpu_t1, &wall_t1); OP_kernels[1].name = name; OP_kernels[1].count += 1; int ninds = 1; int inds[6] = {0,0,0,0,-1,-1}; if (OP_diags>2) { printf(" kernel routine with indirection: adt_calc\n"); } int set_size = op_mpi_halo_exchanges_grouped(set, nargs, args, 2); if (set_size > 0) { if ((OP_kernels[1].count==1) || (opDat0_adt_calc_stride_OP2HOST != getSetSizeFromOpArg(&arg0))) { opDat0_adt_calc_stride_OP2HOST = getSetSizeFromOpArg(&arg0); cudaMemcpyToSymbol(opDat0_adt_calc_stride_OP2CONSTANT, &opDat0_adt_calc_stride_OP2HOST,sizeof(int)); } if ((OP_kernels[1].count==1) || (direct_adt_calc_stride_OP2HOST != getSetSizeFromOpArg(&arg4))) { direct_adt_calc_stride_OP2HOST = getSetSizeFromOpArg(&arg4); cudaMemcpyToSymbol(direct_adt_calc_stride_OP2CONSTANT,&direct_adt_calc_stride_OP2HOST,sizeof(int)); } //set CUDA execution parameters #ifdef OP_BLOCK_SIZE_1 int nthread = OP_BLOCK_SIZE_1; #else int nthread = OP_block_size; #endif for ( int round=0; round<2; round++ ){ if (round==1) { op_mpi_wait_all_grouped(nargs, args, 2); } int start = round==0 ? 0 : set->core_size; int end = round==0 ? set->core_size : set->size + set->exec_size; if (end-start>0) { int nblocks = (end-start-1)/nthread+1; op_cuda_adt_calc<<<nblocks,nthread>>>( (double *)arg0.data_d, arg0.map_data_d, (double*)arg4.data_d, (double*)arg5.data_d, start,end,set->size+set->exec_size); } } } op_mpi_set_dirtybit_cuda(nargs, args); cutilSafeCall(cudaDeviceSynchronize()); //update kernel record op_timers_core(&cpu_t2, &wall_t2); OP_kernels[1].time += wall_t2 - wall_t1; }
a5f6ea877cd96e3215b8d85f59bb7c8f75cbf9c3.hip
// !!! This is a file automatically generated by hipify!!! // Sim_3m_LIBOROptions_GPU_v2.cu // // Function to simulate the 3 Factor Hull-White model // dll to be called from Visual Basic in Excel // This model prices 3m LIBOR Options (European style ED Options) // Input number of option expirations and maximum number of strikes // for each expiration // Input the futures rate, number of strikes, expiration, and term // Must calculate A and B coefficients at expiration dates // // Created January 2019, by Louis Scott // #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include "stdafx.h" #include "DynamicMemoryAllocation.h" #include "SimulationFunctions_MRG32k3a.h" #include "ODE_Solver.h" #include "Sim_3m_LIBOROptions_kernel.cuh" #define MAX_NUM_THREADS 4 #define NEQ 1 #define nFactors 3 using namespace std; /* prototype */ unsigned __stdcall RunThread(void *param); void SimulateOISRates(int jThr); void derivs(double t, double y[], double dydx[]); void derivs2(double t, double y[], double dydx[]); /* Global Variables */ const unsigned int I31_max = 2147483647; const double DaysPerYear = 365.245; int nMat, nSteps, nSim, maxStrikes, jMat; int *nMat1, *nMat2, *nStrikes; unsigned int **OptType; double dSeeds_Threads[MAX_NUM_THREADS + 1][6]; double dt, sqdt, r0, y10, y20; double kappa[4], theta[4], sigma[4], lambda[4]; double *A, *FutRate0, *FwdSpread, **Strikes, ***LIBOROption, ***StdErr; double *Discount, *AConst, *B0, *B1, *B2; unsigned int **An1, **An2; FILE *fout; /* create thread argument struct for Run_Thread() */ typedef struct _thread_data_t { int tid; // double stuff; } thread_data_t; int main() { const unsigned int im1 = 4294967087; const unsigned int im2 = 4294944443; short ifail = 0; int i, j, k, nrerror; double *A; int nMaturities, nThreadsToUse, nSimIn, nStepsPerDay, maxStrikesIn; double r0_in, y10_in, y20_in; double tem, dSeed_Init[6]; int neq, nok, nbad; double eps, hstart, t1, t2, h1, hmin; double C1, C2, C01, C02, *ystart; double Atem, B0tem, B1tem, B2tem; int nSimsPerPath, nThreads; unsigned int **Bn1, **Bn2, iseed[6]; unsigned long long lp1, lp2, seed1[3], seed2[3], sseed1[3], sseed2[3]; double time0, time1; struct _timeb timebuffer; HANDLE threads[MAX_NUM_THREADS]; errno_t errcheck; unsigned threadID[MAX_NUM_THREADS]; // create a thread_data_t argument array thread_data_t thr_data[MAX_NUM_THREADS]; errcheck = fopen_s(&fout, "Check_3m_LIBOR_Options.txt", "w"); FILE *fin; errcheck = fopen_s(&fin, "LIBOROption_Parameters.txt", "r"); if (errcheck) printf(" File LIBOROption_Parameters.txt not opened \n"); fscanf_s(fin, " %i %i %i %i %i ", &nSimIn, &nThreadsToUse, &nMaturities, &maxStrikesIn, &nStepsPerDay); printf(" Enter the number of threads and the number of simulations per thread \n"); printf(" Enter negative numbers to use defaults \n"); cin >> i; cin >> j; if (i > 0) nThreadsToUse = i; if (j > 0) nSimIn = j; nMat = nMaturities; nThreads = nThreadsToUse; if (nThreads > MAX_NUM_THREADS) nThreads = MAX_NUM_THREADS; nSim = nSimIn; nSteps = nStepsPerDay; maxStrikes = maxStrikesIn; nrerror = 0; neq = NEQ; A = (double *)malloc(25 * sizeof(double)); LIBOROption = d3tensor(1, nMat, 1, maxStrikes, 0, nThreads); StdErr = d3tensor(1, nMat, 1, maxStrikes, 0, nThreads); Strikes = matrix_fp64(nMat + 1, maxStrikes + 1); OptType = uint_matrix(1, nMat, 1, maxStrikes); FutRate0 = (double *)malloc((nMat + 1) * sizeof(double)); nStrikes = (int *)malloc((nMat + 1) * sizeof(int)); FwdSpread = (double *)malloc((nMat + 1) * sizeof(double)); nMat1 = (int *)malloc((nMat + 1) * sizeof(int)); nMat2 = (int *)malloc((nMat + 1) * sizeof(int)); An1 = uint_matrix(0, 2, 0, 2); An2 = uint_matrix(0, 2, 0, 2); Bn1 = uint_matrix(0, 2, 0, 2); Bn2 = uint_matrix(0, 2, 0, 2); printf(" Parameters \n"); fprintf(fout, " Parameters \n"); for (i = 1; i <= 10; i++) { fscanf_s(fin, " %lf ", &A[i]); printf(" %f ", A[i]); fprintf(fout, " %f ", A[i]); } printf(" \n"); fprintf(fout, " \n"); fscanf_s(fin, " %lf %lf %lf ", &r0_in, &y10_in, &y20_in); r0 = r0_in; y10 = y10_in; y20 = y20_in; printf(" Initial values for r, y1, y2: %f %f %f \n", r0, y10, y20); if (nMat > nThreads) nMat = nThreads; printf(" %i %i %i \n", nSim, nStepsPerDay, nThreads); fprintf(fout, " %i %i %i \n", nSim, nStepsPerDay, nThreads); printf(" The input maturity days, nMat = %i \n", nMat); fprintf(fout, " The input maturity nMat, nMat = %i \n", nMat); printf(" The initial seeds for MRG32k3a \n"); fprintf(fout, " The initial seeds for MRG32k3a \n"); for (i = 1; i <= 6; i++) { fscanf_s(fin, " %i ", &j); iseed[i - 1] = j; printf(" %i ", iseed[i - 1]); fprintf(fout, " %i ", iseed[i - 1]); } printf(" \n"); fprintf(fout, " \n"); for (i = 1; i <= nMat; i++) { fscanf_s(fin, " %lf %i %i %i ", &FutRate0[i], &nMat1[i], &nMat2[i], &nStrikes[i]); for (j = 1; j <= nStrikes[i]; j++) { fscanf_s(fin, " %lf %i ", &Strikes[i][j], &OptType[i][j]); } } /* Parameter inputs A[1] = kappa[0] A[2] = sigma[0] A[3] = lambda[0] A[4] = kappa[1] A[5] = theta[1] A[6] = sigma[1] A[7] = lambda[1] A[8] = kappa[2] A[9] = sigma[2] A[10] = lambda[2] */ kappa[0] = A[1]; sigma[0] = A[2]; lambda[0] = A[3]; kappa[1] = A[4]; theta[1] = A[5]; sigma[1] = A[6]; lambda[1] = A[7]; kappa[2] = A[8]; sigma[2] = A[9]; lambda[2] = A[10]; r0 = r0_in; y10 = y10_in; y20 = y20_in; // End of input section _ftime64_s(&timebuffer); time0 = timebuffer.time + timebuffer.millitm / 1000.0; dt = 1.0 / (DaysPerYear*nSteps); sqdt = sqrt(dt); // Calculate initial discount function for payment dates // and annuity factors for swaption expirations AConst = (double *)malloc((nMat + 1) * sizeof(double)); B0 = (double *)malloc((nMat + 1) * sizeof(double)); B1 = (double *)malloc((nMat + 1) * sizeof(double)); B2 = (double *)malloc((nMat + 1) * sizeof(double)); ystart = (double *)malloc((NEQ + 1) * sizeof(double)); Discount = (double *)malloc((nMat + 1) * sizeof(double)); // Use position i = 0 for initial discount function // Not certain that we need initial discount function hstart = 0.0; eps = 0.00000001; hmin = 0.0; // Compute exp. affine coefficients for 3m, using nMat2, at each maturity/expiration // NMat2 days past 2 days forward for (i = 1; i <= nMat; i++) { ystart[1] = 0.0; t1 = 0.0; t2 = 2.0 / DaysPerYear; h1 = 0.5*(t2 - t1); odeint(ystart, neq, t1, t2, eps, h1, hmin, &nok, &nbad, &nrerror, derivs, rkqs); if (nrerror < 0) { ifail = -10 + nrerror; return ifail; } Atem = ystart[1]; // Need to calculate B coefficients for 2 day rate tem = kappa[0] * t2; if (fabs(tem) < 1.0e-06) B0tem = t2 * (1.0 - 0.5*tem + tem * tem / 6.0 - tem * tem*tem / 24.0); else B0tem = (1.0 - exp(-tem)) / kappa[0]; tem = kappa[1] * t2; if (fabs(tem) < 1.0e-06) C1 = t2 * (1.0 - 0.5*tem + tem * tem / 6.0 - tem * tem*tem / 24.0); else C1 = (1.0 - exp(-tem)) / kappa[1]; tem = kappa[2] * t2; if (fabs(tem) < 1.0e-06) C2 = t2 * (1.0 - 0.5*tem + tem * tem / 6.0 - tem * tem*tem / 24.0); else C2 = (1.0 - exp(-tem)) / kappa[2]; tem = (kappa[0] - kappa[1]) * t2; if (fabs(tem) < 1.0e-06) C01 = t2 * (1.0 - 0.5*tem + tem * tem / 6.0 - tem * tem*tem / 24.0); else C01 = (1.0 - exp(-tem)) / (kappa[0] - kappa[1]); tem = (kappa[0] - kappa[2]) * t2; if (fabs(tem) < 1.0e-06) C02 = t2 * (1.0 - 0.5*tem + tem * tem / 6.0 - tem * tem*tem / 24.0); else C02 = (1.0 - exp(-tem)) / (kappa[0] - kappa[2]); B1tem = C1 - exp(-kappa[1] * t2)*C01; B2tem = C2 - exp(-kappa[2] * t2)*C02; // Discount[i] = exp(-(AConst[0][i] + B0[0][i] * r0 + B1[0][i] * y10 + B2[0][i] * y20)); t1 = t2; t2 = (2 + nMat2[i]) / DaysPerYear; h1 = 0.5*(t2 - t1); odeint(ystart, neq, t1, t2, eps, h1, hmin, &nok, &nbad, &nrerror, derivs, rkqs); if (nrerror < 0) { ifail = -10 + nrerror; return ifail; } AConst[i] = ystart[1] - Atem; // Need to calculate B coefficients tem = kappa[0] * t2; if (fabs(tem) < 1.0e-06) B0[i] = t2 * (1.0 - 0.5*tem + tem * tem / 6.0 - tem * tem*tem / 24.0); else B0[i] = (1.0 - exp(-tem)) / kappa[0]; tem = kappa[1] * t2; if (fabs(tem) < 1.0e-06) C1 = t2 * (1.0 - 0.5*tem + tem * tem / 6.0 - tem * tem*tem / 24.0); else C1 = (1.0 - exp(-tem)) / kappa[1]; tem = kappa[2] * t2; if (fabs(tem) < 1.0e-06) C2 = t2 * (1.0 - 0.5*tem + tem * tem / 6.0 - tem * tem*tem / 24.0); else C2 = (1.0 - exp(-tem)) / kappa[2]; tem = (kappa[0] - kappa[1]) * t2; if (fabs(tem) < 1.0e-06) C01 = t2 * (1.0 - 0.5*tem + tem * tem / 6.0 - tem * tem*tem / 24.0); else C01 = (1.0 - exp(-tem)) / (kappa[0] - kappa[1]); tem = (kappa[0] - kappa[2]) * t2; if (fabs(tem) < 1.0e-06) C02 = t2 * (1.0 - 0.5*tem + tem * tem / 6.0 - tem * tem*tem / 24.0); else C02 = (1.0 - exp(-tem)) / (kappa[0] - kappa[2]); B1[i] = C1 - exp(-kappa[1] * t2)*C01; B2[i] = C2 - exp(-kappa[2] * t2)*C02; B0[i] = B0[i] - B0tem; B1[i] = B1[i] - B1tem; B2[i] = B2[i] - B2tem; // Discount[i] = exp(-(AConst[i] + B0[i] * r0 + B1[i] * y10 + B2[i] * y20)); } // Use previous coefficients and calculate MGF at each expiration date, tehn calculate spread between // 3m LIBOR futures and 3m OIS futures (hypothetical) -> FwdSpread[i], calculated for annualized simple interest rate for (i = 1; i <= nMat; i++) { jMat = i; ystart[1] = 0.0; t1 = 0.0; t2 = nMat1[i] / DaysPerYear; h1 = 0.5*(t2 - t1); odeint(ystart, neq, t1, t2, eps, h1, hmin, &nok, &nbad, &nrerror, derivs2, rkqs); if (nrerror < 0) { ifail = -10 + nrerror; return ifail; } Atem = ystart[1]; // Need to calculate B coefficients for 2 day rate B0tem = exp(-kappa[0] * t2)*B0[i]; tem = (kappa[0] - kappa[1]) * t2; if (fabs(tem) < 1.0e-06) C01 = t2 * (1.0 - 0.5*tem + tem * tem / 6.0 - tem * tem*tem / 24.0); else C01 = (1.0 - exp(-tem)) / (kappa[0] - kappa[1]); tem = (kappa[0] - kappa[2]) * t2; if (fabs(tem) < 1.0e-06) C02 = t2 * (1.0 - 0.5*tem + tem * tem / 6.0 - tem * tem*tem / 24.0); else C02 = (1.0 - exp(-tem)) / (kappa[0] - kappa[2]); C1 = exp(-kappa[1] * t2); C2 = exp(-kappa[2] * t2); B1tem = C1 * (B1[i] + kappa[0] * B0[i] * C01); B2tem = C2 * (B2[i] + kappa[0] * B0[i] * C02); // Now calcuate spread between 3m LIBOR futures and 3m OIS futures // place the 3m OIS futures into tem tem = AConst[i] + Atem + B0tem * r0 + B1tem * y10 + B2tem * y20; tem = (exp(tem) - 1.0)*360.0 / nMat2[i]; FwdSpread[i] = FutRate0[i] - tem; fprintf(fout, " 3m LIBOR - 3m OIS Futures Rate Spread %f 3m OIS Futures %f \n", FwdSpread[i], tem); fprintf(fout, " OIS Futures Coeff %f %f %f %f %f \n", AConst[i], Atem, B0tem, B1tem, B2tem); } // Check and initialize seeds for each thread for (i = 0; i < 6; i++) dSeed_Init[i] = iseed[i]; // roll seeds 3 times for initiaization for (i = 1; i <= 3; i++) roll_seedCPU(dSeed_Init); for (i = 0; i < 3; i++) { seed1[i] = dSeed_Init[i]; seed2[i] = dSeed_Init[i + 3]; } for (i = 0; i < 6; i++) dSeeds_Threads[1][i] = dSeed_Init[i]; printf(" The starting seeds for MRG32k3a \n"); fprintf(fout, " The starting seeds for MRG32k3a \n"); for (i = 0; i < 6; i++) { printf(" %f ", dSeed_Init[i]); fprintf(fout, " %f ", dSeed_Init[i]); } printf(" \n"); fprintf(fout, " \n"); nSimsPerPath = nMat1[nMat] * (nSteps * 3); SkipAhead_MRG32k3a(nSimsPerPath, An1, An2); SkipAhead2_MRG32k3a(nSim, An1, An2, Bn1, Bn2); if (nThreads > 1) { for (k = 1; k < nThreads; k++) { for (i = 0; i < 3; i++) { seed1[i] = dSeeds_Threads[k][i]; seed2[i] = dSeeds_Threads[k][i + 3]; } for (i = 0; i < 3; i++) { sseed1[i] = 0.0; sseed2[i] = 0.0; for (j = 0; j < 3; j++) { sseed1[i] += (Bn1[i][j] * seed1[j]) % im1; sseed2[i] += (Bn2[i][j] * seed2[j]) % im2; } lp1 = sseed1[i]; lp1 = lp1 % im1; if (lp1 < 0) lp1 += im1; sseed1[i] = lp1; lp2 = sseed2[i]; lp2 = lp2 % im2; if (lp2 < 0) lp2 += im2; sseed2[i] = lp2; } for (i = 0; i < 3; i++) { dSeeds_Threads[k + 1][i] = sseed1[i]; dSeeds_Threads[k + 1][i + 3] = sseed2[i]; } } } // end of if nThreads > 1 // Set up multi-threading here if (nThreads == 1) SimulateOISRates(0); else { for (i = 0; i < nThreads; i++) { thr_data[i].tid = i; threads[i] = (HANDLE)_beginthreadex(NULL, 0, RunThread, &thr_data[i], 0, &threadID[i]); } WaitForMultipleObjects(nThreads, threads, TRUE, INFINITE); for (i = 0; i < nThreads; i++) CloseHandle(threads[i]); } // Average across the threads fprintf(fout, " Completed simulations. Now averaging across threads \n"); for (k = 1; k <= nMat; k++) { for (i = 1; i <= nStrikes[k]; i++) { LIBOROption[k][i][0] = 0.0; StdErr[k][i][0] = 0.0; for (j = 1; j <= nThreads; j++) { LIBOROption[k][i][0] = LIBOROption[k][i][0] + LIBOROption[k][i][j]; StdErr[k][i][0] = StdErr[k][i][0] + StdErr[k][i][j]; } LIBOROption[k][i][0] = LIBOROption[k][i][0] / (nThreads*nSim); StdErr[k][i][0] = StdErr[k][i][0] / (nThreads*nSim) - LIBOROption[k][i][0] * LIBOROption[k][i][0]; StdErr[k][i][0] = sqrt(StdErr[k][i][0] / (nThreads*nSim)); } } _ftime64_s(&timebuffer); time1 = timebuffer.time + timebuffer.millitm / 1000.0; time1 = time1 - time0; // Print results printf(" European Options Prices (in percent) from Monte Carlo Simulation using %i simulations across %i threads \n", nSim*nThreads, nThreads); for (i = 1; i <= nMat; i++) { for (j = 1; j <= nStrikes[i]; j++) { printf(" %i %10.8f %10.8f %10.8f \n", OptType[i][j], Strikes[i][j] * 100, LIBOROption[i][j][0] * 100, StdErr[i][j][0] * 100); } } printf(" Compute time %f \n", time1); fprintf(fout, " Compute time %f \n", time1); fclose(fout); // free the work arrays free(A); free(FutRate0); free(nStrikes); free(nMat1); free(nMat2); free(FwdSpread); free(ystart); free(Discount); free_matrix_fp64(Strikes); free_uint_matrix(OptType, 1, nMat, 1, maxStrikes); free(AConst); free(B0); free(B1); free(B2); free_d3tensor(LIBOROption, 1, nMat, 1, maxStrikes, 0, nThreads); free_d3tensor(StdErr, 1, nMat, 1, maxStrikes, 0, nThreads); free_uint_matrix(An1, 0, 2, 0, 2); free_uint_matrix(An2, 0, 2, 0, 2); free_uint_matrix(Bn1, 0, 2, 0, 2); free_uint_matrix(Bn2, 0, 2, 0, 2); return 0; } // End of Sim_3m_LIBOROptions_GPU_v2 unsigned __stdcall RunThread(void *param) { int iThread; thread_data_t *data = (thread_data_t *)param; iThread = data->tid; SimulateOISRates(iThread); return 1; } void SimulateOISRates(int iThread) { const unsigned int im1 = 4294967087; const unsigned int im2 = 4294944443; int i, j, k, kk, jk, jThr, jstart, jend, cudaNumber; double dseed[6], y1rho, tem, tem1, temopt; double **sum, **sumStdErr; unsigned long long lp1, lp2, seed1[3], seed2[3], sseed1[3], sseed2[3]; unsigned int *h_seeds; int *h_nMat1, *h_nMat2; float *h_FwdSpread, *h_AConst, *h_B0, *h_B1, *h_B2; float h_r0, h_y10, h_y20; float y1const, *h_sigz, *h_lamsig, *h_temexp; double temkappa[3], tem2kappa[3], sigz[3], lamsig[3], temexp[3]; double *h_SimDiscount, *h_SimLIBOR; int *d_nMat1, *d_nMat2; float *d_FwdSpread, *d_AConst, *d_B0, *d_B1, *d_B2; unsigned int *d_seeds; float *d_sigz, *d_lamsig, *d_temexp; double *d_SimDiscount, *d_SimLIBOR; hipError_t cudaStatus; cudaNumber = 0; jThr = iThread + 1; printf(" Now running simulation for thread %i \n", jThr); // fprintf(fout, " Now running simulation for thread %i \n", jThr); cudaStatus = hipSetDevice(cudaNumber); sum = matrix_fp64(nMat + 1, maxStrikes + 1); sumStdErr = matrix_fp64(nMat + 1, maxStrikes + 1); for (k = 1; k <= nMat; k++) { for (j = 1; j <= nStrikes[k]; j++) { sum[k][j] = 0.0; sumStdErr[k][j] = 0.0; } } h_seeds = (unsigned int *)malloc(6 * nSim * sizeof(unsigned int)); h_r0 = r0; h_y10 = y10; h_y20 = y20; h_nMat1 = (int *)malloc(nMat * sizeof(int)); h_nMat2 = (int *)malloc(nMat * sizeof(int)); h_FwdSpread = (float *)malloc(nMat * sizeof(float)); h_AConst = (float *)malloc(nMat * sizeof(float)); h_B0 = (float *)malloc(nMat * sizeof(float)); h_B1 = (float *)malloc(nMat * sizeof(float)); h_B2 = (float *)malloc(nMat * sizeof(float)); h_sigz = (float *)malloc(3 * sizeof(float)); h_lamsig = (float *)malloc(3 * sizeof(float)); h_temexp = (float *)malloc(3 * sizeof(float)); h_SimDiscount = (double *)malloc(nMat*nSim*sizeof(double)); h_SimLIBOR = (double *)malloc(nMat*nSim * sizeof(double)); hipMalloc((void **)&d_seeds, 6 * nSim * sizeof(unsigned int)); hipMalloc((void **)&d_nMat1, nMat * sizeof(int)); hipMalloc((void **)&d_nMat2, nMat * sizeof(int)); hipMalloc((void **)&d_FwdSpread, nMat * sizeof(float)); hipMalloc((void **)&d_AConst, nMat * sizeof(float)); hipMalloc((void **)&d_B0, nMat * sizeof(float)); hipMalloc((void **)&d_B1, nMat * sizeof(float)); hipMalloc((void **)&d_B2, nMat * sizeof(float)); hipMalloc((void **)&d_sigz, 3 * sizeof(float)); hipMalloc((void **)&d_lamsig, 3 * sizeof(float)); hipMalloc((void **)&d_temexp, 3 * sizeof(float)); hipMalloc((void **)&d_SimDiscount, nMat*nSim*sizeof(double)); hipMalloc((void **)&d_SimLIBOR, nMat*nSim * sizeof(double)); // set seeds for the start of each path; this requires the most time for (i = 0; i < 6; i++) h_seeds[i] = dSeeds_Threads[jThr][i]; for (i = 0; i < 3; i++) { seed1[i] = dSeeds_Threads[jThr][i]; seed2[i] = dSeeds_Threads[jThr][i + 3]; } for (k = 1; k < nSim; k++) { for (i = 0; i < 3; i++) { sseed1[i] = 0.0; sseed2[i] = 0.0; for (j = 0; j < 3; j++) { sseed1[i] += (An1[i][j] * seed1[j]) % im1; sseed2[i] += (An2[i][j] * seed2[j]) % im2; } lp1 = sseed1[i]; lp1 = lp1 % im1; if (lp1 < 0) lp1 += im1; sseed1[i] = lp1; lp2 = sseed2[i]; lp2 = lp2 % im2; if (lp2 < 0) lp2 += im2; sseed2[i] = lp2; } for (i = 0; i < 3; i++) { h_seeds[i + k * 6] = sseed1[i]; h_seeds[i + 3 + k * 6] = sseed2[i]; } for (i = 0; i < 3; i++) { seed1[i] = sseed1[i]; seed2[i] = sseed2[i]; } } // Move values to GPU device arrays hipMemcpy(d_seeds, h_seeds, 6 * nSim * sizeof(unsigned int), hipMemcpyHostToDevice); for (j = 0; j < 3; j++) { temexp[j] = exp(-kappa[j] * dt); tem = kappa[j] * dt; if (fabs(tem) < 1.0e-06) temkappa[j] = 1.0 - 0.5*tem + tem*tem / 6.0 - tem*tem*tem / 24.0 + tem*tem*tem*tem / 120.0; else temkappa[j] = (1.0 - exp(-kappa[j] * dt)) / tem; tem = 2.0*kappa[j]*dt; if (fabs(tem) < 1.0e-06) tem2kappa[j] = 1.0 - 0.5*tem + tem*tem / 6.0 - tem*tem*tem / 24.0 + tem*tem*tem*tem / 120.0; else tem2kappa[j] = (1.0 - temexp[j]*temexp[j]) / tem; lamsig[j] = lambda[j] * sigma[j] * sigma[j] * dt*temkappa[j]; sigz[j] = sigma[j] * sqrt(dt*tem2kappa[j]); } for (i = 0;i < 3;i++) { h_sigz[i] = sigz[i]; h_lamsig[i] = lamsig[i]; h_temexp[i] = temexp[i]; } // Pass y1const y1const = theta[1] * (1.0 - temexp[1]); for (i = 0;i < nMat;i++) { h_nMat1[i] = nMat1[i + 1]; h_nMat2[i] = nMat2[i + 1]; } for (i = 0;i < nMat;i++) { h_FwdSpread[i] = FwdSpread[i+1]; } for (j = 0;j < nMat;j++) { h_AConst[j] = AConst[j + 1]; h_B0[j] = B0[j + 1]; h_B1[j] = B1[j + 1]; h_B2[j] = B2[j + 1]; } hipMemcpy(d_nMat1, h_nMat1, nMat * sizeof(int), hipMemcpyHostToDevice); hipMemcpy(d_nMat2, h_nMat2, nMat * sizeof(int), hipMemcpyHostToDevice); hipMemcpy(d_FwdSpread, h_FwdSpread, nMat * sizeof(float), hipMemcpyHostToDevice); hipMemcpy(d_AConst, h_AConst, nMat * sizeof(float), hipMemcpyHostToDevice); hipMemcpy(d_B0, h_B0, nMat * sizeof(float), hipMemcpyHostToDevice); hipMemcpy(d_B1, h_B1, nMat * sizeof(float), hipMemcpyHostToDevice); hipMemcpy(d_B2, h_B2, nMat * sizeof(float), hipMemcpyHostToDevice); hipMemcpy(d_sigz, h_sigz, 3 * sizeof(float), hipMemcpyHostToDevice); hipMemcpy(d_lamsig, h_lamsig, 3 * sizeof(float), hipMemcpyHostToDevice); hipMemcpy(d_temexp, h_temexp, 3 * sizeof(float), hipMemcpyHostToDevice); hipLaunchKernelGGL(( SimulatePathGPU) , dim3((1 + nSim / 128)), dim3(128) , 0, 0, nSim, nMat, nSteps, dt, y1const, h_r0, h_y10, h_y20, d_nMat1, d_nMat2, d_seeds, d_AConst, d_B0, d_B1, d_B2, d_FwdSpread, d_temexp, d_lamsig, d_sigz,d_SimDiscount, d_SimLIBOR); hipGetLastError(); hipDeviceSynchronize(); //Read GPU simulations back to host CPU hipMemcpy(h_SimDiscount, d_SimDiscount, nMat*nSim*sizeof(double), hipMemcpyDeviceToHost); hipMemcpy(h_SimLIBOR, d_SimLIBOR, nMat*nSim * sizeof(double), hipMemcpyDeviceToHost); // Compute option payoffs fromsimulated discounts and simulated LIBOR's for (i = 0; i < nSim; i++) { for (j = 0; j < nMat; j++) { tem1 = h_SimDiscount[i*nMat + j] ; tem = h_SimLIBOR[i*nMat + j] ; // Calculate discounted option payoffs for (k = 1; k <= nStrikes[j+1]; k++) { if (OptType[j+1][k] == 1) temopt = max(0.0, tem - Strikes[j+1][k]); else temopt = max(0.0, Strikes[j+1][k] - tem); sum[j+1][k] += tem1 * temopt; sumStdErr[j+1][k] += tem1 * tem1*temopt*temopt; } } // End of loop on j for nMat maturities } // end of loop on i for independent simulations for (k = 1; k <= nMat; k++) { for (j = 1; j <= nStrikes[k]; j++) { LIBOROption[k][j][jThr] = sum[k][j]; StdErr[k][j][jThr] = sumStdErr[k][j]; } } // Release memory allocation free_matrix_fp64(sum); free_matrix_fp64(sumStdErr); free(h_seeds); free(h_nMat1); free(h_nMat2); free(h_FwdSpread); free(h_AConst); free(h_B0); free(h_B1); free(h_B2); free(h_sigz); free(h_lamsig); free(h_temexp); free(h_SimDiscount); free(h_SimLIBOR); hipFree(d_seeds); hipFree(d_nMat1); hipFree(d_nMat2); hipFree(d_FwdSpread); hipFree(d_AConst); hipFree(d_B0); hipFree(d_B1); hipFree(d_B2); hipFree(d_sigz); hipFree(d_lamsig); hipFree(d_temexp); hipFree(d_SimDiscount); hipFree(d_SimLIBOR); hipDeviceReset(); printf(" Finished with simulation function and GPU \n"); } /* The function for computing the derivatives */ void derivs(double t, double y[], double dydx[]) { double B0tem, B1tem, B2tem, tem, C1, C2, C01, C02; // No Need to calculate B1 tem = kappa[0] * t; if (fabs(tem) < 1.0e-06) B0tem = t * (1.0 - 0.5*tem + tem * tem / 6.0 - tem * tem*tem / 24.0); else B0tem = (1.0 - exp(-tem)) / kappa[0]; tem = kappa[1] * t; if (fabs(tem) < 1.0e-06) C1 = t * (1.0 - 0.5*tem + tem * tem / 6.0 - tem * tem*tem / 24.0); else C1 = (1.0 - exp(-tem)) / kappa[1]; tem = kappa[2] * t; if (fabs(tem) < 1.0e-06) C2 = t * (1.0 - 0.5*tem + tem * tem / 6.0 - tem * tem*tem / 24.0); else C2 = (1.0 - exp(-tem)) / kappa[2]; tem = (kappa[0] - kappa[1]) * t; if (fabs(tem) < 1.0e-06) C01 = t * (1.0 - 0.5*tem + tem * tem / 6.0 - tem * tem*tem / 24.0); else C01 = (1.0 - exp(-tem)) / (kappa[0] - kappa[1]); tem = (kappa[0] - kappa[2]) * t; if (fabs(tem) < 1.0e-06) C02 = t * (1.0 - 0.5*tem + tem * tem / 6.0 - tem * tem*tem / 24.0); else C02 = (1.0 - exp(-tem)) / (kappa[0] - kappa[2]); B1tem = C1 - exp(-kappa[1] * t)*C01; B2tem = C2 - exp(-kappa[2] * t)*C02; dydx[1] = kappa[1] * theta[1] * B1tem - 0.5*(sigma[0] * sigma[0] * B0tem*B0tem + sigma[1] * sigma[1] * B1tem*B1tem + sigma[2] * sigma[2] * B2tem*B2tem) - (lambda[0] * sigma[0] * sigma[0] * B0tem + lambda[1] * sigma[1] * sigma[1] * B1tem + lambda[2] * sigma[2] * sigma[2] * B2tem); } void derivs2(double t, double y[], double dydx[]) { double B0tem, B1tem, B2tem, tem, C1, C2, C01, C02; B0tem = exp(-kappa[0] * t)*B0[jMat]; tem = (kappa[0] - kappa[1]) * t; if (fabs(tem) < 1.0e-06) C01 = t * (1.0 - 0.5*tem + tem * tem / 6.0 - tem * tem*tem / 24.0); else C01 = (1.0 - exp(-tem)) / (kappa[0] - kappa[1]); tem = (kappa[0] - kappa[2]) * t; if (fabs(tem) < 1.0e-06) C02 = t * (1.0 - 0.5*tem + tem * tem / 6.0 - tem * tem*tem / 24.0); else C02 = (1.0 - exp(-tem)) / (kappa[0] - kappa[2]); C1 = exp(-kappa[1] * t); C2 = exp(-kappa[2] * t); B1tem = C1 * (B1[jMat] + kappa[0] * B0[jMat] * C01); B2tem = C2 * (B2[jMat] + kappa[0] * B0[jMat] * C02); dydx[1] = kappa[1] * theta[1] * B1tem - 0.5*(sigma[0] * sigma[0] * B0tem*B0tem + sigma[1] * sigma[1] * B1tem*B1tem + sigma[2] * sigma[2] * B2tem*B2tem) - (lambda[0] * sigma[0] * sigma[0] * B0tem + lambda[1] * sigma[1] * sigma[1] * B1tem + lambda[2] * sigma[2] * sigma[2] * B2tem); } /* (C) Copr. 1986-92 Numerical Recipes Software G2v#X):K. */ #undef MAXSTP #undef TINY #undef SAFETY #undef PGROW #undef PSHRNK #undef ERRCON #undef NR_END #undef FREE_ARG #undef MAX_NUM_THREADS #undef NEQ #undef nFactors
a5f6ea877cd96e3215b8d85f59bb7c8f75cbf9c3.cu
// Sim_3m_LIBOROptions_GPU_v2.cu // // Function to simulate the 3 Factor Hull-White model // dll to be called from Visual Basic in Excel // This model prices 3m LIBOR Options (European style ED Options) // Input number of option expirations and maximum number of strikes // for each expiration // Input the futures rate, number of strikes, expiration, and term // Must calculate A and B coefficients at expiration dates // // Created January 2019, by Louis Scott // #include "cuda_runtime.h" #include "device_launch_parameters.h" #include "stdafx.h" #include "DynamicMemoryAllocation.h" #include "SimulationFunctions_MRG32k3a.h" #include "ODE_Solver.h" #include "Sim_3m_LIBOROptions_kernel.cuh" #define MAX_NUM_THREADS 4 #define NEQ 1 #define nFactors 3 using namespace std; /* prototype */ unsigned __stdcall RunThread(void *param); void SimulateOISRates(int jThr); void derivs(double t, double y[], double dydx[]); void derivs2(double t, double y[], double dydx[]); /* Global Variables */ const unsigned int I31_max = 2147483647; const double DaysPerYear = 365.245; int nMat, nSteps, nSim, maxStrikes, jMat; int *nMat1, *nMat2, *nStrikes; unsigned int **OptType; double dSeeds_Threads[MAX_NUM_THREADS + 1][6]; double dt, sqdt, r0, y10, y20; double kappa[4], theta[4], sigma[4], lambda[4]; double *A, *FutRate0, *FwdSpread, **Strikes, ***LIBOROption, ***StdErr; double *Discount, *AConst, *B0, *B1, *B2; unsigned int **An1, **An2; FILE *fout; /* create thread argument struct for Run_Thread() */ typedef struct _thread_data_t { int tid; // double stuff; } thread_data_t; int main() { const unsigned int im1 = 4294967087; const unsigned int im2 = 4294944443; short ifail = 0; int i, j, k, nrerror; double *A; int nMaturities, nThreadsToUse, nSimIn, nStepsPerDay, maxStrikesIn; double r0_in, y10_in, y20_in; double tem, dSeed_Init[6]; int neq, nok, nbad; double eps, hstart, t1, t2, h1, hmin; double C1, C2, C01, C02, *ystart; double Atem, B0tem, B1tem, B2tem; int nSimsPerPath, nThreads; unsigned int **Bn1, **Bn2, iseed[6]; unsigned long long lp1, lp2, seed1[3], seed2[3], sseed1[3], sseed2[3]; double time0, time1; struct _timeb timebuffer; HANDLE threads[MAX_NUM_THREADS]; errno_t errcheck; unsigned threadID[MAX_NUM_THREADS]; // create a thread_data_t argument array thread_data_t thr_data[MAX_NUM_THREADS]; errcheck = fopen_s(&fout, "Check_3m_LIBOR_Options.txt", "w"); FILE *fin; errcheck = fopen_s(&fin, "LIBOROption_Parameters.txt", "r"); if (errcheck) printf(" File LIBOROption_Parameters.txt not opened \n"); fscanf_s(fin, " %i %i %i %i %i ", &nSimIn, &nThreadsToUse, &nMaturities, &maxStrikesIn, &nStepsPerDay); printf(" Enter the number of threads and the number of simulations per thread \n"); printf(" Enter negative numbers to use defaults \n"); cin >> i; cin >> j; if (i > 0) nThreadsToUse = i; if (j > 0) nSimIn = j; nMat = nMaturities; nThreads = nThreadsToUse; if (nThreads > MAX_NUM_THREADS) nThreads = MAX_NUM_THREADS; nSim = nSimIn; nSteps = nStepsPerDay; maxStrikes = maxStrikesIn; nrerror = 0; neq = NEQ; A = (double *)malloc(25 * sizeof(double)); LIBOROption = d3tensor(1, nMat, 1, maxStrikes, 0, nThreads); StdErr = d3tensor(1, nMat, 1, maxStrikes, 0, nThreads); Strikes = matrix_fp64(nMat + 1, maxStrikes + 1); OptType = uint_matrix(1, nMat, 1, maxStrikes); FutRate0 = (double *)malloc((nMat + 1) * sizeof(double)); nStrikes = (int *)malloc((nMat + 1) * sizeof(int)); FwdSpread = (double *)malloc((nMat + 1) * sizeof(double)); nMat1 = (int *)malloc((nMat + 1) * sizeof(int)); nMat2 = (int *)malloc((nMat + 1) * sizeof(int)); An1 = uint_matrix(0, 2, 0, 2); An2 = uint_matrix(0, 2, 0, 2); Bn1 = uint_matrix(0, 2, 0, 2); Bn2 = uint_matrix(0, 2, 0, 2); printf(" Parameters \n"); fprintf(fout, " Parameters \n"); for (i = 1; i <= 10; i++) { fscanf_s(fin, " %lf ", &A[i]); printf(" %f ", A[i]); fprintf(fout, " %f ", A[i]); } printf(" \n"); fprintf(fout, " \n"); fscanf_s(fin, " %lf %lf %lf ", &r0_in, &y10_in, &y20_in); r0 = r0_in; y10 = y10_in; y20 = y20_in; printf(" Initial values for r, y1, y2: %f %f %f \n", r0, y10, y20); if (nMat > nThreads) nMat = nThreads; printf(" %i %i %i \n", nSim, nStepsPerDay, nThreads); fprintf(fout, " %i %i %i \n", nSim, nStepsPerDay, nThreads); printf(" The input maturity days, nMat = %i \n", nMat); fprintf(fout, " The input maturity nMat, nMat = %i \n", nMat); printf(" The initial seeds for MRG32k3a \n"); fprintf(fout, " The initial seeds for MRG32k3a \n"); for (i = 1; i <= 6; i++) { fscanf_s(fin, " %i ", &j); iseed[i - 1] = j; printf(" %i ", iseed[i - 1]); fprintf(fout, " %i ", iseed[i - 1]); } printf(" \n"); fprintf(fout, " \n"); for (i = 1; i <= nMat; i++) { fscanf_s(fin, " %lf %i %i %i ", &FutRate0[i], &nMat1[i], &nMat2[i], &nStrikes[i]); for (j = 1; j <= nStrikes[i]; j++) { fscanf_s(fin, " %lf %i ", &Strikes[i][j], &OptType[i][j]); } } /* Parameter inputs A[1] = kappa[0] A[2] = sigma[0] A[3] = lambda[0] A[4] = kappa[1] A[5] = theta[1] A[6] = sigma[1] A[7] = lambda[1] A[8] = kappa[2] A[9] = sigma[2] A[10] = lambda[2] */ kappa[0] = A[1]; sigma[0] = A[2]; lambda[0] = A[3]; kappa[1] = A[4]; theta[1] = A[5]; sigma[1] = A[6]; lambda[1] = A[7]; kappa[2] = A[8]; sigma[2] = A[9]; lambda[2] = A[10]; r0 = r0_in; y10 = y10_in; y20 = y20_in; // End of input section _ftime64_s(&timebuffer); time0 = timebuffer.time + timebuffer.millitm / 1000.0; dt = 1.0 / (DaysPerYear*nSteps); sqdt = sqrt(dt); // Calculate initial discount function for payment dates // and annuity factors for swaption expirations AConst = (double *)malloc((nMat + 1) * sizeof(double)); B0 = (double *)malloc((nMat + 1) * sizeof(double)); B1 = (double *)malloc((nMat + 1) * sizeof(double)); B2 = (double *)malloc((nMat + 1) * sizeof(double)); ystart = (double *)malloc((NEQ + 1) * sizeof(double)); Discount = (double *)malloc((nMat + 1) * sizeof(double)); // Use position i = 0 for initial discount function // Not certain that we need initial discount function hstart = 0.0; eps = 0.00000001; hmin = 0.0; // Compute exp. affine coefficients for 3m, using nMat2, at each maturity/expiration // NMat2 days past 2 days forward for (i = 1; i <= nMat; i++) { ystart[1] = 0.0; t1 = 0.0; t2 = 2.0 / DaysPerYear; h1 = 0.5*(t2 - t1); odeint(ystart, neq, t1, t2, eps, h1, hmin, &nok, &nbad, &nrerror, derivs, rkqs); if (nrerror < 0) { ifail = -10 + nrerror; return ifail; } Atem = ystart[1]; // Need to calculate B coefficients for 2 day rate tem = kappa[0] * t2; if (fabs(tem) < 1.0e-06) B0tem = t2 * (1.0 - 0.5*tem + tem * tem / 6.0 - tem * tem*tem / 24.0); else B0tem = (1.0 - exp(-tem)) / kappa[0]; tem = kappa[1] * t2; if (fabs(tem) < 1.0e-06) C1 = t2 * (1.0 - 0.5*tem + tem * tem / 6.0 - tem * tem*tem / 24.0); else C1 = (1.0 - exp(-tem)) / kappa[1]; tem = kappa[2] * t2; if (fabs(tem) < 1.0e-06) C2 = t2 * (1.0 - 0.5*tem + tem * tem / 6.0 - tem * tem*tem / 24.0); else C2 = (1.0 - exp(-tem)) / kappa[2]; tem = (kappa[0] - kappa[1]) * t2; if (fabs(tem) < 1.0e-06) C01 = t2 * (1.0 - 0.5*tem + tem * tem / 6.0 - tem * tem*tem / 24.0); else C01 = (1.0 - exp(-tem)) / (kappa[0] - kappa[1]); tem = (kappa[0] - kappa[2]) * t2; if (fabs(tem) < 1.0e-06) C02 = t2 * (1.0 - 0.5*tem + tem * tem / 6.0 - tem * tem*tem / 24.0); else C02 = (1.0 - exp(-tem)) / (kappa[0] - kappa[2]); B1tem = C1 - exp(-kappa[1] * t2)*C01; B2tem = C2 - exp(-kappa[2] * t2)*C02; // Discount[i] = exp(-(AConst[0][i] + B0[0][i] * r0 + B1[0][i] * y10 + B2[0][i] * y20)); t1 = t2; t2 = (2 + nMat2[i]) / DaysPerYear; h1 = 0.5*(t2 - t1); odeint(ystart, neq, t1, t2, eps, h1, hmin, &nok, &nbad, &nrerror, derivs, rkqs); if (nrerror < 0) { ifail = -10 + nrerror; return ifail; } AConst[i] = ystart[1] - Atem; // Need to calculate B coefficients tem = kappa[0] * t2; if (fabs(tem) < 1.0e-06) B0[i] = t2 * (1.0 - 0.5*tem + tem * tem / 6.0 - tem * tem*tem / 24.0); else B0[i] = (1.0 - exp(-tem)) / kappa[0]; tem = kappa[1] * t2; if (fabs(tem) < 1.0e-06) C1 = t2 * (1.0 - 0.5*tem + tem * tem / 6.0 - tem * tem*tem / 24.0); else C1 = (1.0 - exp(-tem)) / kappa[1]; tem = kappa[2] * t2; if (fabs(tem) < 1.0e-06) C2 = t2 * (1.0 - 0.5*tem + tem * tem / 6.0 - tem * tem*tem / 24.0); else C2 = (1.0 - exp(-tem)) / kappa[2]; tem = (kappa[0] - kappa[1]) * t2; if (fabs(tem) < 1.0e-06) C01 = t2 * (1.0 - 0.5*tem + tem * tem / 6.0 - tem * tem*tem / 24.0); else C01 = (1.0 - exp(-tem)) / (kappa[0] - kappa[1]); tem = (kappa[0] - kappa[2]) * t2; if (fabs(tem) < 1.0e-06) C02 = t2 * (1.0 - 0.5*tem + tem * tem / 6.0 - tem * tem*tem / 24.0); else C02 = (1.0 - exp(-tem)) / (kappa[0] - kappa[2]); B1[i] = C1 - exp(-kappa[1] * t2)*C01; B2[i] = C2 - exp(-kappa[2] * t2)*C02; B0[i] = B0[i] - B0tem; B1[i] = B1[i] - B1tem; B2[i] = B2[i] - B2tem; // Discount[i] = exp(-(AConst[i] + B0[i] * r0 + B1[i] * y10 + B2[i] * y20)); } // Use previous coefficients and calculate MGF at each expiration date, tehn calculate spread between // 3m LIBOR futures and 3m OIS futures (hypothetical) -> FwdSpread[i], calculated for annualized simple interest rate for (i = 1; i <= nMat; i++) { jMat = i; ystart[1] = 0.0; t1 = 0.0; t2 = nMat1[i] / DaysPerYear; h1 = 0.5*(t2 - t1); odeint(ystart, neq, t1, t2, eps, h1, hmin, &nok, &nbad, &nrerror, derivs2, rkqs); if (nrerror < 0) { ifail = -10 + nrerror; return ifail; } Atem = ystart[1]; // Need to calculate B coefficients for 2 day rate B0tem = exp(-kappa[0] * t2)*B0[i]; tem = (kappa[0] - kappa[1]) * t2; if (fabs(tem) < 1.0e-06) C01 = t2 * (1.0 - 0.5*tem + tem * tem / 6.0 - tem * tem*tem / 24.0); else C01 = (1.0 - exp(-tem)) / (kappa[0] - kappa[1]); tem = (kappa[0] - kappa[2]) * t2; if (fabs(tem) < 1.0e-06) C02 = t2 * (1.0 - 0.5*tem + tem * tem / 6.0 - tem * tem*tem / 24.0); else C02 = (1.0 - exp(-tem)) / (kappa[0] - kappa[2]); C1 = exp(-kappa[1] * t2); C2 = exp(-kappa[2] * t2); B1tem = C1 * (B1[i] + kappa[0] * B0[i] * C01); B2tem = C2 * (B2[i] + kappa[0] * B0[i] * C02); // Now calcuate spread between 3m LIBOR futures and 3m OIS futures // place the 3m OIS futures into tem tem = AConst[i] + Atem + B0tem * r0 + B1tem * y10 + B2tem * y20; tem = (exp(tem) - 1.0)*360.0 / nMat2[i]; FwdSpread[i] = FutRate0[i] - tem; fprintf(fout, " 3m LIBOR - 3m OIS Futures Rate Spread %f 3m OIS Futures %f \n", FwdSpread[i], tem); fprintf(fout, " OIS Futures Coeff %f %f %f %f %f \n", AConst[i], Atem, B0tem, B1tem, B2tem); } // Check and initialize seeds for each thread for (i = 0; i < 6; i++) dSeed_Init[i] = iseed[i]; // roll seeds 3 times for initiaization for (i = 1; i <= 3; i++) roll_seedCPU(dSeed_Init); for (i = 0; i < 3; i++) { seed1[i] = dSeed_Init[i]; seed2[i] = dSeed_Init[i + 3]; } for (i = 0; i < 6; i++) dSeeds_Threads[1][i] = dSeed_Init[i]; printf(" The starting seeds for MRG32k3a \n"); fprintf(fout, " The starting seeds for MRG32k3a \n"); for (i = 0; i < 6; i++) { printf(" %f ", dSeed_Init[i]); fprintf(fout, " %f ", dSeed_Init[i]); } printf(" \n"); fprintf(fout, " \n"); nSimsPerPath = nMat1[nMat] * (nSteps * 3); SkipAhead_MRG32k3a(nSimsPerPath, An1, An2); SkipAhead2_MRG32k3a(nSim, An1, An2, Bn1, Bn2); if (nThreads > 1) { for (k = 1; k < nThreads; k++) { for (i = 0; i < 3; i++) { seed1[i] = dSeeds_Threads[k][i]; seed2[i] = dSeeds_Threads[k][i + 3]; } for (i = 0; i < 3; i++) { sseed1[i] = 0.0; sseed2[i] = 0.0; for (j = 0; j < 3; j++) { sseed1[i] += (Bn1[i][j] * seed1[j]) % im1; sseed2[i] += (Bn2[i][j] * seed2[j]) % im2; } lp1 = sseed1[i]; lp1 = lp1 % im1; if (lp1 < 0) lp1 += im1; sseed1[i] = lp1; lp2 = sseed2[i]; lp2 = lp2 % im2; if (lp2 < 0) lp2 += im2; sseed2[i] = lp2; } for (i = 0; i < 3; i++) { dSeeds_Threads[k + 1][i] = sseed1[i]; dSeeds_Threads[k + 1][i + 3] = sseed2[i]; } } } // end of if nThreads > 1 // Set up multi-threading here if (nThreads == 1) SimulateOISRates(0); else { for (i = 0; i < nThreads; i++) { thr_data[i].tid = i; threads[i] = (HANDLE)_beginthreadex(NULL, 0, RunThread, &thr_data[i], 0, &threadID[i]); } WaitForMultipleObjects(nThreads, threads, TRUE, INFINITE); for (i = 0; i < nThreads; i++) CloseHandle(threads[i]); } // Average across the threads fprintf(fout, " Completed simulations. Now averaging across threads \n"); for (k = 1; k <= nMat; k++) { for (i = 1; i <= nStrikes[k]; i++) { LIBOROption[k][i][0] = 0.0; StdErr[k][i][0] = 0.0; for (j = 1; j <= nThreads; j++) { LIBOROption[k][i][0] = LIBOROption[k][i][0] + LIBOROption[k][i][j]; StdErr[k][i][0] = StdErr[k][i][0] + StdErr[k][i][j]; } LIBOROption[k][i][0] = LIBOROption[k][i][0] / (nThreads*nSim); StdErr[k][i][0] = StdErr[k][i][0] / (nThreads*nSim) - LIBOROption[k][i][0] * LIBOROption[k][i][0]; StdErr[k][i][0] = sqrt(StdErr[k][i][0] / (nThreads*nSim)); } } _ftime64_s(&timebuffer); time1 = timebuffer.time + timebuffer.millitm / 1000.0; time1 = time1 - time0; // Print results printf(" European Options Prices (in percent) from Monte Carlo Simulation using %i simulations across %i threads \n", nSim*nThreads, nThreads); for (i = 1; i <= nMat; i++) { for (j = 1; j <= nStrikes[i]; j++) { printf(" %i %10.8f %10.8f %10.8f \n", OptType[i][j], Strikes[i][j] * 100, LIBOROption[i][j][0] * 100, StdErr[i][j][0] * 100); } } printf(" Compute time %f \n", time1); fprintf(fout, " Compute time %f \n", time1); fclose(fout); // free the work arrays free(A); free(FutRate0); free(nStrikes); free(nMat1); free(nMat2); free(FwdSpread); free(ystart); free(Discount); free_matrix_fp64(Strikes); free_uint_matrix(OptType, 1, nMat, 1, maxStrikes); free(AConst); free(B0); free(B1); free(B2); free_d3tensor(LIBOROption, 1, nMat, 1, maxStrikes, 0, nThreads); free_d3tensor(StdErr, 1, nMat, 1, maxStrikes, 0, nThreads); free_uint_matrix(An1, 0, 2, 0, 2); free_uint_matrix(An2, 0, 2, 0, 2); free_uint_matrix(Bn1, 0, 2, 0, 2); free_uint_matrix(Bn2, 0, 2, 0, 2); return 0; } // End of Sim_3m_LIBOROptions_GPU_v2 unsigned __stdcall RunThread(void *param) { int iThread; thread_data_t *data = (thread_data_t *)param; iThread = data->tid; SimulateOISRates(iThread); return 1; } void SimulateOISRates(int iThread) { const unsigned int im1 = 4294967087; const unsigned int im2 = 4294944443; int i, j, k, kk, jk, jThr, jstart, jend, cudaNumber; double dseed[6], y1rho, tem, tem1, temopt; double **sum, **sumStdErr; unsigned long long lp1, lp2, seed1[3], seed2[3], sseed1[3], sseed2[3]; unsigned int *h_seeds; int *h_nMat1, *h_nMat2; float *h_FwdSpread, *h_AConst, *h_B0, *h_B1, *h_B2; float h_r0, h_y10, h_y20; float y1const, *h_sigz, *h_lamsig, *h_temexp; double temkappa[3], tem2kappa[3], sigz[3], lamsig[3], temexp[3]; double *h_SimDiscount, *h_SimLIBOR; int *d_nMat1, *d_nMat2; float *d_FwdSpread, *d_AConst, *d_B0, *d_B1, *d_B2; unsigned int *d_seeds; float *d_sigz, *d_lamsig, *d_temexp; double *d_SimDiscount, *d_SimLIBOR; cudaError_t cudaStatus; cudaNumber = 0; jThr = iThread + 1; printf(" Now running simulation for thread %i \n", jThr); // fprintf(fout, " Now running simulation for thread %i \n", jThr); cudaStatus = cudaSetDevice(cudaNumber); sum = matrix_fp64(nMat + 1, maxStrikes + 1); sumStdErr = matrix_fp64(nMat + 1, maxStrikes + 1); for (k = 1; k <= nMat; k++) { for (j = 1; j <= nStrikes[k]; j++) { sum[k][j] = 0.0; sumStdErr[k][j] = 0.0; } } h_seeds = (unsigned int *)malloc(6 * nSim * sizeof(unsigned int)); h_r0 = r0; h_y10 = y10; h_y20 = y20; h_nMat1 = (int *)malloc(nMat * sizeof(int)); h_nMat2 = (int *)malloc(nMat * sizeof(int)); h_FwdSpread = (float *)malloc(nMat * sizeof(float)); h_AConst = (float *)malloc(nMat * sizeof(float)); h_B0 = (float *)malloc(nMat * sizeof(float)); h_B1 = (float *)malloc(nMat * sizeof(float)); h_B2 = (float *)malloc(nMat * sizeof(float)); h_sigz = (float *)malloc(3 * sizeof(float)); h_lamsig = (float *)malloc(3 * sizeof(float)); h_temexp = (float *)malloc(3 * sizeof(float)); h_SimDiscount = (double *)malloc(nMat*nSim*sizeof(double)); h_SimLIBOR = (double *)malloc(nMat*nSim * sizeof(double)); cudaMalloc((void **)&d_seeds, 6 * nSim * sizeof(unsigned int)); cudaMalloc((void **)&d_nMat1, nMat * sizeof(int)); cudaMalloc((void **)&d_nMat2, nMat * sizeof(int)); cudaMalloc((void **)&d_FwdSpread, nMat * sizeof(float)); cudaMalloc((void **)&d_AConst, nMat * sizeof(float)); cudaMalloc((void **)&d_B0, nMat * sizeof(float)); cudaMalloc((void **)&d_B1, nMat * sizeof(float)); cudaMalloc((void **)&d_B2, nMat * sizeof(float)); cudaMalloc((void **)&d_sigz, 3 * sizeof(float)); cudaMalloc((void **)&d_lamsig, 3 * sizeof(float)); cudaMalloc((void **)&d_temexp, 3 * sizeof(float)); cudaMalloc((void **)&d_SimDiscount, nMat*nSim*sizeof(double)); cudaMalloc((void **)&d_SimLIBOR, nMat*nSim * sizeof(double)); // set seeds for the start of each path; this requires the most time for (i = 0; i < 6; i++) h_seeds[i] = dSeeds_Threads[jThr][i]; for (i = 0; i < 3; i++) { seed1[i] = dSeeds_Threads[jThr][i]; seed2[i] = dSeeds_Threads[jThr][i + 3]; } for (k = 1; k < nSim; k++) { for (i = 0; i < 3; i++) { sseed1[i] = 0.0; sseed2[i] = 0.0; for (j = 0; j < 3; j++) { sseed1[i] += (An1[i][j] * seed1[j]) % im1; sseed2[i] += (An2[i][j] * seed2[j]) % im2; } lp1 = sseed1[i]; lp1 = lp1 % im1; if (lp1 < 0) lp1 += im1; sseed1[i] = lp1; lp2 = sseed2[i]; lp2 = lp2 % im2; if (lp2 < 0) lp2 += im2; sseed2[i] = lp2; } for (i = 0; i < 3; i++) { h_seeds[i + k * 6] = sseed1[i]; h_seeds[i + 3 + k * 6] = sseed2[i]; } for (i = 0; i < 3; i++) { seed1[i] = sseed1[i]; seed2[i] = sseed2[i]; } } // Move values to GPU device arrays cudaMemcpy(d_seeds, h_seeds, 6 * nSim * sizeof(unsigned int), cudaMemcpyHostToDevice); for (j = 0; j < 3; j++) { temexp[j] = exp(-kappa[j] * dt); tem = kappa[j] * dt; if (fabs(tem) < 1.0e-06) temkappa[j] = 1.0 - 0.5*tem + tem*tem / 6.0 - tem*tem*tem / 24.0 + tem*tem*tem*tem / 120.0; else temkappa[j] = (1.0 - exp(-kappa[j] * dt)) / tem; tem = 2.0*kappa[j]*dt; if (fabs(tem) < 1.0e-06) tem2kappa[j] = 1.0 - 0.5*tem + tem*tem / 6.0 - tem*tem*tem / 24.0 + tem*tem*tem*tem / 120.0; else tem2kappa[j] = (1.0 - temexp[j]*temexp[j]) / tem; lamsig[j] = lambda[j] * sigma[j] * sigma[j] * dt*temkappa[j]; sigz[j] = sigma[j] * sqrt(dt*tem2kappa[j]); } for (i = 0;i < 3;i++) { h_sigz[i] = sigz[i]; h_lamsig[i] = lamsig[i]; h_temexp[i] = temexp[i]; } // Pass y1const y1const = theta[1] * (1.0 - temexp[1]); for (i = 0;i < nMat;i++) { h_nMat1[i] = nMat1[i + 1]; h_nMat2[i] = nMat2[i + 1]; } for (i = 0;i < nMat;i++) { h_FwdSpread[i] = FwdSpread[i+1]; } for (j = 0;j < nMat;j++) { h_AConst[j] = AConst[j + 1]; h_B0[j] = B0[j + 1]; h_B1[j] = B1[j + 1]; h_B2[j] = B2[j + 1]; } cudaMemcpy(d_nMat1, h_nMat1, nMat * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_nMat2, h_nMat2, nMat * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_FwdSpread, h_FwdSpread, nMat * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_AConst, h_AConst, nMat * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_B0, h_B0, nMat * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_B1, h_B1, nMat * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_B2, h_B2, nMat * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_sigz, h_sigz, 3 * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_lamsig, h_lamsig, 3 * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_temexp, h_temexp, 3 * sizeof(float), cudaMemcpyHostToDevice); SimulatePathGPU <<<(1 + nSim / 128), 128 >>> ( nSim, nMat, nSteps, dt, y1const, h_r0, h_y10, h_y20, d_nMat1, d_nMat2, d_seeds, d_AConst, d_B0, d_B1, d_B2, d_FwdSpread, d_temexp, d_lamsig, d_sigz,d_SimDiscount, d_SimLIBOR); cudaGetLastError(); cudaDeviceSynchronize(); //Read GPU simulations back to host CPU cudaMemcpy(h_SimDiscount, d_SimDiscount, nMat*nSim*sizeof(double), cudaMemcpyDeviceToHost); cudaMemcpy(h_SimLIBOR, d_SimLIBOR, nMat*nSim * sizeof(double), cudaMemcpyDeviceToHost); // Compute option payoffs fromsimulated discounts and simulated LIBOR's for (i = 0; i < nSim; i++) { for (j = 0; j < nMat; j++) { tem1 = h_SimDiscount[i*nMat + j] ; tem = h_SimLIBOR[i*nMat + j] ; // Calculate discounted option payoffs for (k = 1; k <= nStrikes[j+1]; k++) { if (OptType[j+1][k] == 1) temopt = max(0.0, tem - Strikes[j+1][k]); else temopt = max(0.0, Strikes[j+1][k] - tem); sum[j+1][k] += tem1 * temopt; sumStdErr[j+1][k] += tem1 * tem1*temopt*temopt; } } // End of loop on j for nMat maturities } // end of loop on i for independent simulations for (k = 1; k <= nMat; k++) { for (j = 1; j <= nStrikes[k]; j++) { LIBOROption[k][j][jThr] = sum[k][j]; StdErr[k][j][jThr] = sumStdErr[k][j]; } } // Release memory allocation free_matrix_fp64(sum); free_matrix_fp64(sumStdErr); free(h_seeds); free(h_nMat1); free(h_nMat2); free(h_FwdSpread); free(h_AConst); free(h_B0); free(h_B1); free(h_B2); free(h_sigz); free(h_lamsig); free(h_temexp); free(h_SimDiscount); free(h_SimLIBOR); cudaFree(d_seeds); cudaFree(d_nMat1); cudaFree(d_nMat2); cudaFree(d_FwdSpread); cudaFree(d_AConst); cudaFree(d_B0); cudaFree(d_B1); cudaFree(d_B2); cudaFree(d_sigz); cudaFree(d_lamsig); cudaFree(d_temexp); cudaFree(d_SimDiscount); cudaFree(d_SimLIBOR); cudaDeviceReset(); printf(" Finished with simulation function and GPU \n"); } /* The function for computing the derivatives */ void derivs(double t, double y[], double dydx[]) { double B0tem, B1tem, B2tem, tem, C1, C2, C01, C02; // No Need to calculate B1 tem = kappa[0] * t; if (fabs(tem) < 1.0e-06) B0tem = t * (1.0 - 0.5*tem + tem * tem / 6.0 - tem * tem*tem / 24.0); else B0tem = (1.0 - exp(-tem)) / kappa[0]; tem = kappa[1] * t; if (fabs(tem) < 1.0e-06) C1 = t * (1.0 - 0.5*tem + tem * tem / 6.0 - tem * tem*tem / 24.0); else C1 = (1.0 - exp(-tem)) / kappa[1]; tem = kappa[2] * t; if (fabs(tem) < 1.0e-06) C2 = t * (1.0 - 0.5*tem + tem * tem / 6.0 - tem * tem*tem / 24.0); else C2 = (1.0 - exp(-tem)) / kappa[2]; tem = (kappa[0] - kappa[1]) * t; if (fabs(tem) < 1.0e-06) C01 = t * (1.0 - 0.5*tem + tem * tem / 6.0 - tem * tem*tem / 24.0); else C01 = (1.0 - exp(-tem)) / (kappa[0] - kappa[1]); tem = (kappa[0] - kappa[2]) * t; if (fabs(tem) < 1.0e-06) C02 = t * (1.0 - 0.5*tem + tem * tem / 6.0 - tem * tem*tem / 24.0); else C02 = (1.0 - exp(-tem)) / (kappa[0] - kappa[2]); B1tem = C1 - exp(-kappa[1] * t)*C01; B2tem = C2 - exp(-kappa[2] * t)*C02; dydx[1] = kappa[1] * theta[1] * B1tem - 0.5*(sigma[0] * sigma[0] * B0tem*B0tem + sigma[1] * sigma[1] * B1tem*B1tem + sigma[2] * sigma[2] * B2tem*B2tem) - (lambda[0] * sigma[0] * sigma[0] * B0tem + lambda[1] * sigma[1] * sigma[1] * B1tem + lambda[2] * sigma[2] * sigma[2] * B2tem); } void derivs2(double t, double y[], double dydx[]) { double B0tem, B1tem, B2tem, tem, C1, C2, C01, C02; B0tem = exp(-kappa[0] * t)*B0[jMat]; tem = (kappa[0] - kappa[1]) * t; if (fabs(tem) < 1.0e-06) C01 = t * (1.0 - 0.5*tem + tem * tem / 6.0 - tem * tem*tem / 24.0); else C01 = (1.0 - exp(-tem)) / (kappa[0] - kappa[1]); tem = (kappa[0] - kappa[2]) * t; if (fabs(tem) < 1.0e-06) C02 = t * (1.0 - 0.5*tem + tem * tem / 6.0 - tem * tem*tem / 24.0); else C02 = (1.0 - exp(-tem)) / (kappa[0] - kappa[2]); C1 = exp(-kappa[1] * t); C2 = exp(-kappa[2] * t); B1tem = C1 * (B1[jMat] + kappa[0] * B0[jMat] * C01); B2tem = C2 * (B2[jMat] + kappa[0] * B0[jMat] * C02); dydx[1] = kappa[1] * theta[1] * B1tem - 0.5*(sigma[0] * sigma[0] * B0tem*B0tem + sigma[1] * sigma[1] * B1tem*B1tem + sigma[2] * sigma[2] * B2tem*B2tem) - (lambda[0] * sigma[0] * sigma[0] * B0tem + lambda[1] * sigma[1] * sigma[1] * B1tem + lambda[2] * sigma[2] * sigma[2] * B2tem); } /* (C) Copr. 1986-92 Numerical Recipes Software G2v#X):K. */ #undef MAXSTP #undef TINY #undef SAFETY #undef PGROW #undef PSHRNK #undef ERRCON #undef NR_END #undef FREE_ARG #undef MAX_NUM_THREADS #undef NEQ #undef nFactors
f29434230df963a472a0dfef854cd0bce7d94804.hip
// !!! This is a file automatically generated by hipify!!! /** * David Dao, Johannes Rausch, Michal Szymczak * TU Munich * Sep 2015 */ #include <stdio.h> #include "mex.h" #include <iostream> #include <vector> #include <cmath> #include <hip/hip_runtime.h> #include <rocblas.h> #include <cula.h> #include <cula_lapack.h> using namespace std; void checkStatus(culaStatus status) { char buf[80]; if(!status) return; culaGetErrorInfoString(status, culaGetErrorInfo(), buf, sizeof(buf)); printf("%s\n %d", buf, status); culaShutdown(); //exit(EXIT_FAILURE); } /* Input arguments */ #define IN_A prhs[0] #define IN_B prhs[1] /* Gateway routine */ void mexFunction( int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[] ) { float *matrix_A, *matrix_B; //float *d_matrix_A, *d_matrix_B; int N, D; /* Get the sizes of each input argument */ N = mxGetM(IN_A); D = mxGetN(IN_B); /* Assign pointers to the input arguments */ matrix_A = (float*) mxGetPr(IN_A); matrix_B = (float*) mxGetPr(IN_B); int matrix_pivot[N]; culaStatus status; status = culaInitialize(); checkStatus(status); status = culaSgesv(N, D, matrix_A, N, matrix_pivot, matrix_B, N); checkStatus(status); culaShutdown(); return; }
f29434230df963a472a0dfef854cd0bce7d94804.cu
/** * David Dao, Johannes Rausch, Michal Szymczak * TU Munich * Sep 2015 */ #include <stdio.h> #include "mex.h" #include <iostream> #include <vector> #include <cmath> #include <cuda_runtime.h> #include <cublas_v2.h> #include <cula.h> #include <cula_lapack.h> using namespace std; void checkStatus(culaStatus status) { char buf[80]; if(!status) return; culaGetErrorInfoString(status, culaGetErrorInfo(), buf, sizeof(buf)); printf("%s\n %d", buf, status); culaShutdown(); //exit(EXIT_FAILURE); } /* Input arguments */ #define IN_A prhs[0] #define IN_B prhs[1] /* Gateway routine */ void mexFunction( int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[] ) { float *matrix_A, *matrix_B; //float *d_matrix_A, *d_matrix_B; int N, D; /* Get the sizes of each input argument */ N = mxGetM(IN_A); D = mxGetN(IN_B); /* Assign pointers to the input arguments */ matrix_A = (float*) mxGetPr(IN_A); matrix_B = (float*) mxGetPr(IN_B); int matrix_pivot[N]; culaStatus status; status = culaInitialize(); checkStatus(status); status = culaSgesv(N, D, matrix_A, N, matrix_pivot, matrix_B, N); checkStatus(status); culaShutdown(); return; }
a5f8b89b86a57116c9f093ee5ab7e5beb29afb6d.hip
// !!! This is a file automatically generated by hipify!!! //Inner product (dot product) of two vectors in a parallel fashion #include <stdio.h> #include <iostream> #include <hip/hip_runtime.h> #define N 1024 #define THREADS_PER_BLOCK 512 #define NUMBER_OF_BLOCKS (N/THREADS_PER_BLOCK) __global__ void innerProd(float *aa, float *bb, float *cc) { __shared__ float temp[THREADS_PER_BLOCK]; int index = threadIdx.x + blockIdx.x* blockDim.x; temp[threadIdx.x] = aa[index]*bb[index]; *cc = 0; // Initialized to avoid memory problems. See comments // below, next to the free and hipFree commands. // No thread goes beyond this point until all of them // have reached it. Threads are only synchronized within // a block. __syncthreads(); // Thread 0 sums the pairwise products if (threadIdx.x == 0) { float sum = 0; for (int i = 0; i < THREADS_PER_BLOCK; i++){ sum += temp[i]; } // Use atomicAdd to avoid different blocks accessing cc at the // same time (race condition). The atomic opperation enables // read-modify-write to be performed by a block without interruption. //*cc += sum; atomicAdd(cc, sum); } } int main(void) { float *a, *b, *c;// host copies of a, b, c float *d_a, *d_b, *d_c;// device copies of a, b, c float size = N * sizeof(float); //int sizeInGPU; a = (float *)malloc(size); b = (float *)malloc(size); c = (float *)malloc(sizeof(float)); // Define QoS: p0 // supervisor(float *lambda_GPU) // sizeInGPU = lambda_GPU*N; for (int i = 0; i < N; i++) { a[i] = 2; b[i] = 0.5; } hipMalloc((void**)&d_a, size); hipMalloc((void**)&d_b, size); hipMalloc((void**)&d_c, sizeof(float)); hipMemcpy(d_a, a, size, hipMemcpyHostToDevice); hipMemcpy(d_b, b, size, hipMemcpyHostToDevice); // Call kernel hipLaunchKernelGGL(( innerProd), dim3(NUMBER_OF_BLOCKS), dim3(THREADS_PER_BLOCK), 0, 0, d_a, d_b, d_c); // innerProd<<<1, N>>>(d_a, d_b, d_c); hipMemcpy(c, d_c, sizeof(float), hipMemcpyDeviceToHost); std::cout << "c = " << *c << "\n"; // Remember: free and hipFree DO NOT ERASE MEMORY! They only // return memory to a pool to be re-allocated. That is why the shared // variable 'cc' is initialized inside the kernel. See this: // http://stackoverflow.com/questions/13100615/cudafree-is-not-freeing-memory free(a); free(b); free(c); hipFree(d_a); hipFree(d_b); hipFree(d_c); return 0; }
a5f8b89b86a57116c9f093ee5ab7e5beb29afb6d.cu
//Inner product (dot product) of two vectors in a parallel fashion #include <stdio.h> #include <iostream> #include <cuda.h> #define N 1024 #define THREADS_PER_BLOCK 512 #define NUMBER_OF_BLOCKS (N/THREADS_PER_BLOCK) __global__ void innerProd(float *aa, float *bb, float *cc) { __shared__ float temp[THREADS_PER_BLOCK]; int index = threadIdx.x + blockIdx.x* blockDim.x; temp[threadIdx.x] = aa[index]*bb[index]; *cc = 0; // Initialized to avoid memory problems. See comments // below, next to the free and cudaFree commands. // No thread goes beyond this point until all of them // have reached it. Threads are only synchronized within // a block. __syncthreads(); // Thread 0 sums the pairwise products if (threadIdx.x == 0) { float sum = 0; for (int i = 0; i < THREADS_PER_BLOCK; i++){ sum += temp[i]; } // Use atomicAdd to avoid different blocks accessing cc at the // same time (race condition). The atomic opperation enables // read-modify-write to be performed by a block without interruption. //*cc += sum; atomicAdd(cc, sum); } } int main(void) { float *a, *b, *c;// host copies of a, b, c float *d_a, *d_b, *d_c;// device copies of a, b, c float size = N * sizeof(float); //int sizeInGPU; a = (float *)malloc(size); b = (float *)malloc(size); c = (float *)malloc(sizeof(float)); // Define QoS: p0 // supervisor(float *lambda_GPU) // sizeInGPU = lambda_GPU*N; for (int i = 0; i < N; i++) { a[i] = 2; b[i] = 0.5; } cudaMalloc((void**)&d_a, size); cudaMalloc((void**)&d_b, size); cudaMalloc((void**)&d_c, sizeof(float)); cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice); cudaMemcpy(d_b, b, size, cudaMemcpyHostToDevice); // Call kernel innerProd<<<NUMBER_OF_BLOCKS, THREADS_PER_BLOCK>>>(d_a, d_b, d_c); // innerProd<<<1, N>>>(d_a, d_b, d_c); cudaMemcpy(c, d_c, sizeof(float), cudaMemcpyDeviceToHost); std::cout << "c = " << *c << "\n"; // Remember: free and cudaFree DO NOT ERASE MEMORY! They only // return memory to a pool to be re-allocated. That is why the shared // variable 'cc' is initialized inside the kernel. See this: // http://stackoverflow.com/questions/13100615/cudafree-is-not-freeing-memory free(a); free(b); free(c); cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); return 0; }
fb8543014de99564b193bd4c1c9de4429254851c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <iostream> #include <algorithm> #include <string> #include "poisson2d.hpp" #include "timer.hpp" #define BLOCK_SIZE 256 #define GRID_SIZE 256 __global__ void dot_product(double *x, double *y, double *dot, unsigned int n) { unsigned int index = threadIdx.x + blockDim.x*blockIdx.x; unsigned int stride = blockDim.x*gridDim.x; __shared__ double cache[256]; double temp = 0.0; while(index < n){ temp += x[index]*y[index]; index += stride; } cache[threadIdx.x] = temp; __syncthreads(); for(int i = blockDim.x/2; i>0; i/=2) { __syncthreads(); if(threadIdx.x < i) cache[threadIdx.x] += cache[threadIdx.x + i]; } if(threadIdx.x == 0){ atomicAdd(dot, cache[0]); } } // Naming is motivated by BLAS/LAPACK naming scheme...though bit simplified. __global__ void xADDay(const size_t N, double *x, double *y, double *z, const double alpha) { const size_t stride = blockDim.x * gridDim.x; for(size_t i = threadIdx.x + blockIdx.x * blockDim.x; i < N; i += stride) z[i] = x[i] + alpha * y[i]; } __global__ void xDOTy(const size_t N, double* x, double* y, double* z) { size_t tid = threadIdx.x + blockDim.x* blockIdx.x; size_t stride = blockDim.x* gridDim.x; __shared__ double cache[BLOCK_SIZE]; double tid_sum = 0.0; for (; tid < N; tid += stride) { tid_sum += x[tid] * y[tid]; } tid = threadIdx.x; cache[tid] = tid_sum; __syncthreads(); for (size_t i = blockDim.x/2; i != 0; i /=2) { __syncthreads(); if (tid < i) //lower half does smth, rest idles cache[tid] += cache[tid + i]; //lower looks up by stride and sums up } if(tid == 0) // cache[0] now contains block_sum { atomicAdd(z, cache[0]); } } int main() { int N = 256; double xInit = 1.; double alpha = 2.; double yInit = 2.5; double *x = (double*)malloc(sizeof(double) * N); double *y = (double*)malloc(sizeof(double) * N); double *z = (double*)malloc(sizeof(double) * N); double *Dot = (double*)malloc(sizeof(double)); *Dot = -1.; std::fill(x, x + N, xInit); std::fill(y, y + N, yInit); std::fill(z, z + N, 0.0); double *px, *py, *pz, *pDot; hipMalloc(&px, N*sizeof(double)); hipMalloc(&py, N*sizeof(double)); hipMalloc(&pz, N*sizeof(double)); hipMalloc(&pDot, sizeof(double)); hipMemcpy(px, x, N*sizeof(double), hipMemcpyHostToDevice); hipMemcpy(py, y, N*sizeof(double), hipMemcpyHostToDevice); //hipMemcpy(pz, z, N*sizeof(double), hipMemcpyHostToDevice); hipMemcpy(pDot, Dot, sizeof(double), hipMemcpyHostToDevice); hipLaunchKernelGGL(( xADDay), dim3(GRID_SIZE), dim3(BLOCK_SIZE), 0, 0, N, px, py, pz, alpha); hipDeviceSynchronize(); hipLaunchKernelGGL(( xDOTy), dim3(GRID_SIZE), dim3(BLOCK_SIZE), 0, 0, N, px, py, pDot); //dot_product<<<GRID_SIZE, BLOCK_SIZE>>>(px, py, pDot, N); hipDeviceSynchronize(); hipMemcpy(z, pz, N*sizeof(double), hipMemcpyDeviceToHost); hipMemcpy(Dot, pDot, sizeof(double), hipMemcpyDeviceToHost); hipDeviceSynchronize(); std::cout << "Checking xADDay..." << std::endl; int cnt = 0; for (int i = 0; i < N; ++i) if (z[i] != xInit + alpha*yInit) ++cnt; if (cnt) std::cout << "Something went wrong...let's see:" << std::endl; else std::cout << "Everything ok, see:" << std::endl; for (int i = 0; i < 5; ++i) std::cout << "z[" << i << "] = " << z[i] << std::endl; std::cout << "..." << std::endl; for (int i = N-1-5; i < N; ++i) std::cout << "z[" << i << "] = " << z[i] << std::endl; std::cout << "-----------------------------------" << std::endl; std::cout << "Checking xDOTy..." << std::endl; if (*Dot != xInit*yInit*N) std::cout << "NOPE: " << *Dot << " != " << xInit*yInit*N << std::endl; else std::cout << "OK: " << *Dot << " == " << xInit*yInit*N << std::endl; free(x); free(y); free(z); free(Dot); hipFree(px); hipFree(py); hipFree(pz); hipFree(pDot); return EXIT_SUCCESS; }
fb8543014de99564b193bd4c1c9de4429254851c.cu
#include <stdio.h> #include <iostream> #include <algorithm> #include <string> #include "poisson2d.hpp" #include "timer.hpp" #define BLOCK_SIZE 256 #define GRID_SIZE 256 __global__ void dot_product(double *x, double *y, double *dot, unsigned int n) { unsigned int index = threadIdx.x + blockDim.x*blockIdx.x; unsigned int stride = blockDim.x*gridDim.x; __shared__ double cache[256]; double temp = 0.0; while(index < n){ temp += x[index]*y[index]; index += stride; } cache[threadIdx.x] = temp; __syncthreads(); for(int i = blockDim.x/2; i>0; i/=2) { __syncthreads(); if(threadIdx.x < i) cache[threadIdx.x] += cache[threadIdx.x + i]; } if(threadIdx.x == 0){ atomicAdd(dot, cache[0]); } } // Naming is motivated by BLAS/LAPACK naming scheme...though bit simplified. __global__ void xADDay(const size_t N, double *x, double *y, double *z, const double alpha) { const size_t stride = blockDim.x * gridDim.x; for(size_t i = threadIdx.x + blockIdx.x * blockDim.x; i < N; i += stride) z[i] = x[i] + alpha * y[i]; } __global__ void xDOTy(const size_t N, double* x, double* y, double* z) { size_t tid = threadIdx.x + blockDim.x* blockIdx.x; size_t stride = blockDim.x* gridDim.x; __shared__ double cache[BLOCK_SIZE]; double tid_sum = 0.0; for (; tid < N; tid += stride) { tid_sum += x[tid] * y[tid]; } tid = threadIdx.x; cache[tid] = tid_sum; __syncthreads(); for (size_t i = blockDim.x/2; i != 0; i /=2) { __syncthreads(); if (tid < i) //lower half does smth, rest idles cache[tid] += cache[tid + i]; //lower looks up by stride and sums up } if(tid == 0) // cache[0] now contains block_sum { atomicAdd(z, cache[0]); } } int main() { int N = 256; double xInit = 1.; double alpha = 2.; double yInit = 2.5; double *x = (double*)malloc(sizeof(double) * N); double *y = (double*)malloc(sizeof(double) * N); double *z = (double*)malloc(sizeof(double) * N); double *Dot = (double*)malloc(sizeof(double)); *Dot = -1.; std::fill(x, x + N, xInit); std::fill(y, y + N, yInit); std::fill(z, z + N, 0.0); double *px, *py, *pz, *pDot; cudaMalloc(&px, N*sizeof(double)); cudaMalloc(&py, N*sizeof(double)); cudaMalloc(&pz, N*sizeof(double)); cudaMalloc(&pDot, sizeof(double)); cudaMemcpy(px, x, N*sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(py, y, N*sizeof(double), cudaMemcpyHostToDevice); //cudaMemcpy(pz, z, N*sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(pDot, Dot, sizeof(double), cudaMemcpyHostToDevice); xADDay<<<GRID_SIZE, BLOCK_SIZE>>>(N, px, py, pz, alpha); cudaDeviceSynchronize(); xDOTy<<<GRID_SIZE, BLOCK_SIZE>>>(N, px, py, pDot); //dot_product<<<GRID_SIZE, BLOCK_SIZE>>>(px, py, pDot, N); cudaDeviceSynchronize(); cudaMemcpy(z, pz, N*sizeof(double), cudaMemcpyDeviceToHost); cudaMemcpy(Dot, pDot, sizeof(double), cudaMemcpyDeviceToHost); cudaDeviceSynchronize(); std::cout << "Checking xADDay..." << std::endl; int cnt = 0; for (int i = 0; i < N; ++i) if (z[i] != xInit + alpha*yInit) ++cnt; if (cnt) std::cout << "Something went wrong...let's see:" << std::endl; else std::cout << "Everything ok, see:" << std::endl; for (int i = 0; i < 5; ++i) std::cout << "z[" << i << "] = " << z[i] << std::endl; std::cout << "..." << std::endl; for (int i = N-1-5; i < N; ++i) std::cout << "z[" << i << "] = " << z[i] << std::endl; std::cout << "-----------------------------------" << std::endl; std::cout << "Checking xDOTy..." << std::endl; if (*Dot != xInit*yInit*N) std::cout << "NOPE: " << *Dot << " != " << xInit*yInit*N << std::endl; else std::cout << "OK: " << *Dot << " == " << xInit*yInit*N << std::endl; free(x); free(y); free(z); free(Dot); cudaFree(px); cudaFree(py); cudaFree(pz); cudaFree(pDot); return EXIT_SUCCESS; }
d794b9ad112e6aa96293b3de7040cbfcf63848cf.hip
// !!! This is a file automatically generated by hipify!!! // // Created by root on 2020/11/24. // #include "rocblas.h" #include "hip/hip_runtime.h" #include "stdio.h" int M = 1024; int N = 1024; void generate_random_vector(int n, float** outX) { int i = 0; double rMax = (double ) RAND_MAX; float* X = (float *) malloc(n * sizeof(float )); for (; i < n; i++) { X[i] = ((double ) rand()) / rMax * 100.0; } *outX = X; } void generate_random_dense_matrix(int m, int n, float** outX) { int i = 0, j = 0; double rMax = (double ) RAND_MAX; float *A = (float *) malloc(m * n * sizeof(float )); for (; j < n; j++) { for (; i < m; i++) { int r = rand(); float* curr = A + (j * m + i); if (r % 3 > 0) { *curr = 0.0f; } else { *curr = ((double ) r) / rMax * 100; } } } *outX = A; } // nvcc -lcublas -o CuBlasTest CuBlasTest.cu int main() { int i; float *A, *dA, *X, *dX, *Y, *dY; float alpha = 3.0f, beta=4.0f; hipblasHandle_t handle; // allocate memory A = (float *) malloc(sizeof(float ) * M * N); X = (float *) malloc(sizeof(float ) * N); Y = (float *) malloc(sizeof(float ) * M); hipMalloc(&dA, sizeof(float ) * M * N); hipMalloc(&dX, sizeof(float ) * N); hipMalloc(&dY, sizeof(float) * M); // generate input srand(3432); generate_random_dense_matrix(M, N, &A); generate_random_vector(N, &X); generate_random_vector(M, &Y); // create cublas handle hipblasCreate(&handle); // get cublas data from original input hipblasSetMatrix(M, N, sizeof(float), A, M, dA, N); hipblasSetVector(N, sizeof(float ), X, 1, dX, 1); hipblasSetVector(M, sizeof(float ), Y, 1, dY, 1); // perform matrix multiplication y = alpha * op(A) * x + beta * y hipblasSgemv(handle, HIPBLAS_OP_N, M, N, &alpha, dA, M, dX, 1, &beta, dY, 1); // get result from cublas and print demo data hipblasGetVector(M, sizeof(float ), dY, 1, Y, 1); for (i = 0; i < 10; i++) { printf("%.2f\t", Y[i]); } printf("\n"); // free memory free(A); free(X); free(Y); hipFree(dA); hipFree(dX); hipFree(dY); return 0; }
d794b9ad112e6aa96293b3de7040cbfcf63848cf.cu
// // Created by root on 2020/11/24. // #include "cublas_v2.h" #include "cuda_runtime.h" #include "stdio.h" int M = 1024; int N = 1024; void generate_random_vector(int n, float** outX) { int i = 0; double rMax = (double ) RAND_MAX; float* X = (float *) malloc(n * sizeof(float )); for (; i < n; i++) { X[i] = ((double ) rand()) / rMax * 100.0; } *outX = X; } void generate_random_dense_matrix(int m, int n, float** outX) { int i = 0, j = 0; double rMax = (double ) RAND_MAX; float *A = (float *) malloc(m * n * sizeof(float )); for (; j < n; j++) { for (; i < m; i++) { int r = rand(); float* curr = A + (j * m + i); if (r % 3 > 0) { *curr = 0.0f; } else { *curr = ((double ) r) / rMax * 100; } } } *outX = A; } // nvcc -lcublas -o CuBlasTest CuBlasTest.cu int main() { int i; float *A, *dA, *X, *dX, *Y, *dY; float alpha = 3.0f, beta=4.0f; cublasHandle_t handle; // allocate memory A = (float *) malloc(sizeof(float ) * M * N); X = (float *) malloc(sizeof(float ) * N); Y = (float *) malloc(sizeof(float ) * M); cudaMalloc(&dA, sizeof(float ) * M * N); cudaMalloc(&dX, sizeof(float ) * N); cudaMalloc(&dY, sizeof(float) * M); // generate input srand(3432); generate_random_dense_matrix(M, N, &A); generate_random_vector(N, &X); generate_random_vector(M, &Y); // create cublas handle cublasCreate(&handle); // get cublas data from original input cublasSetMatrix(M, N, sizeof(float), A, M, dA, N); cublasSetVector(N, sizeof(float ), X, 1, dX, 1); cublasSetVector(M, sizeof(float ), Y, 1, dY, 1); // perform matrix multiplication y = alpha * op(A) * x + beta * y cublasSgemv_v2(handle, CUBLAS_OP_N, M, N, &alpha, dA, M, dX, 1, &beta, dY, 1); // get result from cublas and print demo data cublasGetVector(M, sizeof(float ), dY, 1, Y, 1); for (i = 0; i < 10; i++) { printf("%.2f\t", Y[i]); } printf("\n"); // free memory free(A); free(X); free(Y); cudaFree(dA); cudaFree(dX); cudaFree(dY); return 0; }
5d435fbc45a4d2b85f7ccd403cf8d0669524d73e.hip
// !!! This is a file automatically generated by hipify!!! #include<tfhe++.hpp> #include<params.hpp> #include <random> #include <cassert> #include <iostream> #include <cmath> #include "externalproduct.cuh" using namespace FFHEE; using namespace SPCULIOS; using namespace std; int main( int argc, char** argv) { using namespace TFHEpp; random_device seed_gen; default_random_engine engine(seed_gen()); uniform_int_distribution<uint32_t> binary(0, 1); lweKey key; array<bool, DEF_N> p; for (bool &i : p) i = (binary(engine) > 0); Polynomiallvl1 pmu; for (int i = 0; i < DEF_N; i++) pmu[i] = p[i] ? DEF_ : -DEF_; TRLWElvl1 c = trlweSymEncryptlvl1(pmu, DEF_bk, key.lvl1); TRLWElvl1 res; TRGSWFFTlvl1 trgswfft = trgswfftSymEncryptlvl1(1, DEF_bk, key.lvl1); FFTinit(); FFHEE::trgswfftExternalProductlvl1(res,c,trgswfft); hipDeviceSynchronize(); array<bool, DEF_N> p2 = trlweSymDecryptlvl1(res, key.lvl1); for(int i = 0;i<DEF_N;i++) {cout<<i<<":"<<p2[i]<<":"<<p[i]<<endl;} for (int i = 0; i < DEF_N; i++) assert(p[i] == p2[i]); cout<<"PASS"<<endl; }
5d435fbc45a4d2b85f7ccd403cf8d0669524d73e.cu
#include<tfhe++.hpp> #include<params.hpp> #include <random> #include <cassert> #include <iostream> #include <cmath> #include "externalproduct.cuh" using namespace FFHEE; using namespace SPCULIOS; using namespace std; int main( int argc, char** argv) { using namespace TFHEpp; random_device seed_gen; default_random_engine engine(seed_gen()); uniform_int_distribution<uint32_t> binary(0, 1); lweKey key; array<bool, DEF_N> p; for (bool &i : p) i = (binary(engine) > 0); Polynomiallvl1 pmu; for (int i = 0; i < DEF_N; i++) pmu[i] = p[i] ? DEF_μ : -DEF_μ; TRLWElvl1 c = trlweSymEncryptlvl1(pmu, DEF_αbk, key.lvl1); TRLWElvl1 res; TRGSWFFTlvl1 trgswfft = trgswfftSymEncryptlvl1(1, DEF_αbk, key.lvl1); FFTinit(); FFHEE::trgswfftExternalProductlvl1(res,c,trgswfft); cudaDeviceSynchronize(); array<bool, DEF_N> p2 = trlweSymDecryptlvl1(res, key.lvl1); for(int i = 0;i<DEF_N;i++) {cout<<i<<":"<<p2[i]<<":"<<p[i]<<endl;} for (int i = 0; i < DEF_N; i++) assert(p[i] == p2[i]); cout<<"PASS"<<endl; }
4a4c53f7776f3f66b0b963af38de53977f3dac55.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 1.7.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date September 2015 @generated from ztrtri_diag.cu normal z -> d, Fri Sep 11 18:29:22 2015 @author Peng Du @author Tingxing Dong @author Mark Gates @author Azzam Haidar File named dtrtri_diag.cu to avoid name conflict with src/dtrtri.o in the library. The actual kernels are in dtrtri_lower.cu and dtrtri_upper.cu */ #include "common_magma.h" #include "dtrtri.cuh" /** Inverts the NB x NB diagonal blocks of a triangular matrix. This routine is used in dtrsm. Same as dtrtri_diag, but adds queue argument. @ingroup magma_dblas3 ********************************************************************/ /** Purpose ------- dtrtri_diag inverts the NB x NB diagonal blocks of A. Arguments ---------- @param[in] uplo magma_uplo_t. On entry, uplo specifies whether the matrix A is an upper or lower triangular matrix as follows: - = MagmaUpper: A is an upper triangular matrix. - = MagmaLower: A is a lower triangular matrix. @param[in] diag magma_diag_t. On entry, diag specifies whether or not A is unit triangular as follows: - = MagmaUnit: A is assumed to be unit triangular. - = MagmaNonUnit: A is not assumed to be unit triangular. @param[in] n INTEGER. On entry, n specifies the order of the matrix A. N >= 0. @param[in] dA DOUBLE_PRECISION array of dimension ( ldda, n ) The triangular matrix A. \n If UPLO = 'U', the leading N-by-N upper triangular part of A contains the upper triangular matrix, and the strictly lower triangular part of A is not referenced. \n If UPLO = 'L', the leading N-by-N lower triangular part of A contains the lower triangular matrix, and the strictly upper triangular part of A is not referenced. \n If DIAG = 'U', the diagonal elements of A are also not referenced and are assumed to be 1. @param[in] ldda INTEGER. The leading dimension of the array A. LDDA >= max(1,N). @param[out] d_dinvA DOUBLE_PRECISION array of dimension (NB, ceil(n/NB)*NB), where NB = 128. On exit, contains inverses of the NB-by-NB diagonal blocks of A. @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_dblas3 ********************************************************************/ extern "C" void magmablas_dtrtri_diag_q( magma_uplo_t uplo, magma_diag_t diag, magma_int_t n, magmaDouble_const_ptr dA, magma_int_t ldda, magmaDouble_ptr d_dinvA, magma_queue_t queue) { magma_int_t info = 0; if (uplo != MagmaLower && uplo != MagmaUpper) info = -1; else if (diag != MagmaNonUnit && diag != MagmaUnit) info = -2; else if (n < 0) info = -3; else if (ldda < n) info = -5; if (info != 0) { magma_xerbla( __func__, -(info) ); return; //info } int nblocks = magma_ceildiv( n, IB ); hipMemset( d_dinvA, 0, magma_roundup( n, NB )*NB * sizeof(double) ); if ( uplo == MagmaLower ) { // invert diagonal IB x IB inner blocks hipLaunchKernelGGL(( dtrtri_diag_lower_kernel), dim3(nblocks), dim3(IB), 0, queue , diag, n, dA, ldda, d_dinvA ); // build up NB x NB blocks (assuming IB=16 here): // use 16 x 16 blocks to build 32 x 32 blocks, 1 x (1 x npages) grid, 4 x 4 threads; // then 32 x 32 blocks to build 64 x 64 blocks, 1 x (2 x npages) grid, 8 x 4 threads; // then 64 x 64 blocks to build 128 x 128 blocks, 1 x (4 x npages) grid, 16 x 4 threads; // then 128 x 128 blocks to build 256 x 256 blocks, 2 x (8 x npages) grid, 16 x 4 threads. for( int jb=IB; jb < NB; jb *= 2 ) { int kb = jb*2; int npages = magma_ceildiv( n, kb ); dim3 threads( (jb <= 32 ? jb/4 : 16), 4 ); dim3 grid( jb/(threads.x*threads.y), npages*(jb/16) ); // emulate 3D grid: NX * (NY*npages), for CUDA ARCH 1.x //printf( "n %d, jb %d, grid %d x %d (%d x %d)\n", n, jb, grid.x, grid.y, grid.y / npages, npages ); switch (jb) { case 16: hipLaunchKernelGGL(( triple_dgemm16_part1_lower_kernel), dim3(grid), dim3(threads), 0, queue , n, dA, ldda, d_dinvA, jb, npages ); hipLaunchKernelGGL(( triple_dgemm16_part2_lower_kernel), dim3(grid), dim3(threads), 0, queue , n, dA, ldda, d_dinvA, jb, npages ); break; case 32: hipLaunchKernelGGL(( triple_dgemm32_part1_lower_kernel), dim3(grid), dim3(threads), 0, queue , n, dA, ldda, d_dinvA, jb, npages ); hipLaunchKernelGGL(( triple_dgemm32_part2_lower_kernel), dim3(grid), dim3(threads), 0, queue , n, dA, ldda, d_dinvA, jb, npages ); break; case 64: hipLaunchKernelGGL(( triple_dgemm64_part1_lower_kernel), dim3(grid), dim3(threads), 0, queue , n, dA, ldda, d_dinvA, jb, npages ); hipLaunchKernelGGL(( triple_dgemm64_part2_lower_kernel), dim3(grid), dim3(threads), 0, queue , n, dA, ldda, d_dinvA, jb, npages ); break; default: hipLaunchKernelGGL(( triple_dgemm_above64_part1_lower_kernel), dim3(grid), dim3(threads), 0, queue , n, dA, ldda, d_dinvA, jb, npages ); hipLaunchKernelGGL(( triple_dgemm_above64_part2_lower_kernel), dim3(grid), dim3(threads), 0, queue , n, dA, ldda, d_dinvA, jb, npages ); hipLaunchKernelGGL(( triple_dgemm_above64_part3_lower_kernel), dim3(grid), dim3(threads), 0, queue , n, dA, ldda, d_dinvA, jb, npages ); break; } if ( kb >= n ) break; } } else { hipLaunchKernelGGL(( dtrtri_diag_upper_kernel), dim3(nblocks), dim3(IB), 0, queue , diag, n, dA, ldda, d_dinvA ); // update the inverse up to the size of IB for( int jb=IB; jb < NB; jb *= 2 ) { int kb = jb*2; int npages = magma_ceildiv( n, kb ); dim3 threads( (jb <= 32 ? jb/4 : 16), 4 ); dim3 grid( jb/(threads.x*threads.y), npages*(jb/16) ); // emulate 3D grid: NX * (NY*npages), for CUDA ARCH 1.x switch (jb) { case 16: hipLaunchKernelGGL(( triple_dgemm16_part1_upper_kernel), dim3(grid), dim3(threads), 0, queue , n, dA, ldda, d_dinvA, jb, npages ); hipLaunchKernelGGL(( triple_dgemm16_part2_upper_kernel), dim3(grid), dim3(threads), 0, queue , n, dA, ldda, d_dinvA, jb, npages ); break; case 32: hipLaunchKernelGGL(( triple_dgemm32_part1_upper_kernel), dim3(grid), dim3(threads), 0, queue , n, dA, ldda, d_dinvA, jb, npages ); hipLaunchKernelGGL(( triple_dgemm32_part2_upper_kernel), dim3(grid), dim3(threads), 0, queue , n, dA, ldda, d_dinvA, jb, npages ); break; case 64: hipLaunchKernelGGL(( triple_dgemm64_part1_upper_kernel), dim3(grid), dim3(threads), 0, queue , n, dA, ldda, d_dinvA, jb, npages ); hipLaunchKernelGGL(( triple_dgemm64_part2_upper_kernel), dim3(grid), dim3(threads), 0, queue , n, dA, ldda, d_dinvA, jb, npages ); break; default: hipLaunchKernelGGL(( triple_dgemm_above64_part1_upper_kernel), dim3(grid), dim3(threads), 0, queue , n, dA, ldda, d_dinvA, jb, npages ); hipLaunchKernelGGL(( triple_dgemm_above64_part2_upper_kernel), dim3(grid), dim3(threads), 0, queue , n, dA, ldda, d_dinvA, jb, npages ); hipLaunchKernelGGL(( triple_dgemm_above64_part3_upper_kernel), dim3(grid), dim3(threads), 0, queue , n, dA, ldda, d_dinvA, jb, npages ); break; } if ( kb >= n ) break; } } } /** @see magmablas_dtrtri_diag_q @ingroup magma_dblas3 ********************************************************************/ extern "C" void magmablas_dtrtri_diag( magma_uplo_t uplo, magma_diag_t diag, magma_int_t n, magmaDouble_const_ptr dA, magma_int_t ldda, magmaDouble_ptr d_dinvA) { magmablas_dtrtri_diag_q( uplo, diag, n, dA, ldda, d_dinvA, magma_stream ); }
4a4c53f7776f3f66b0b963af38de53977f3dac55.cu
/* -- MAGMA (version 1.7.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date September 2015 @generated from ztrtri_diag.cu normal z -> d, Fri Sep 11 18:29:22 2015 @author Peng Du @author Tingxing Dong @author Mark Gates @author Azzam Haidar File named dtrtri_diag.cu to avoid name conflict with src/dtrtri.o in the library. The actual kernels are in dtrtri_lower.cu and dtrtri_upper.cu */ #include "common_magma.h" #include "dtrtri.cuh" /** Inverts the NB x NB diagonal blocks of a triangular matrix. This routine is used in dtrsm. Same as dtrtri_diag, but adds queue argument. @ingroup magma_dblas3 ********************************************************************/ /** Purpose ------- dtrtri_diag inverts the NB x NB diagonal blocks of A. Arguments ---------- @param[in] uplo magma_uplo_t. On entry, uplo specifies whether the matrix A is an upper or lower triangular matrix as follows: - = MagmaUpper: A is an upper triangular matrix. - = MagmaLower: A is a lower triangular matrix. @param[in] diag magma_diag_t. On entry, diag specifies whether or not A is unit triangular as follows: - = MagmaUnit: A is assumed to be unit triangular. - = MagmaNonUnit: A is not assumed to be unit triangular. @param[in] n INTEGER. On entry, n specifies the order of the matrix A. N >= 0. @param[in] dA DOUBLE_PRECISION array of dimension ( ldda, n ) The triangular matrix A. \n If UPLO = 'U', the leading N-by-N upper triangular part of A contains the upper triangular matrix, and the strictly lower triangular part of A is not referenced. \n If UPLO = 'L', the leading N-by-N lower triangular part of A contains the lower triangular matrix, and the strictly upper triangular part of A is not referenced. \n If DIAG = 'U', the diagonal elements of A are also not referenced and are assumed to be 1. @param[in] ldda INTEGER. The leading dimension of the array A. LDDA >= max(1,N). @param[out] d_dinvA DOUBLE_PRECISION array of dimension (NB, ceil(n/NB)*NB), where NB = 128. On exit, contains inverses of the NB-by-NB diagonal blocks of A. @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_dblas3 ********************************************************************/ extern "C" void magmablas_dtrtri_diag_q( magma_uplo_t uplo, magma_diag_t diag, magma_int_t n, magmaDouble_const_ptr dA, magma_int_t ldda, magmaDouble_ptr d_dinvA, magma_queue_t queue) { magma_int_t info = 0; if (uplo != MagmaLower && uplo != MagmaUpper) info = -1; else if (diag != MagmaNonUnit && diag != MagmaUnit) info = -2; else if (n < 0) info = -3; else if (ldda < n) info = -5; if (info != 0) { magma_xerbla( __func__, -(info) ); return; //info } int nblocks = magma_ceildiv( n, IB ); cudaMemset( d_dinvA, 0, magma_roundup( n, NB )*NB * sizeof(double) ); if ( uplo == MagmaLower ) { // invert diagonal IB x IB inner blocks dtrtri_diag_lower_kernel<<< nblocks, IB, 0, queue >>>( diag, n, dA, ldda, d_dinvA ); // build up NB x NB blocks (assuming IB=16 here): // use 16 x 16 blocks to build 32 x 32 blocks, 1 x (1 x npages) grid, 4 x 4 threads; // then 32 x 32 blocks to build 64 x 64 blocks, 1 x (2 x npages) grid, 8 x 4 threads; // then 64 x 64 blocks to build 128 x 128 blocks, 1 x (4 x npages) grid, 16 x 4 threads; // then 128 x 128 blocks to build 256 x 256 blocks, 2 x (8 x npages) grid, 16 x 4 threads. for( int jb=IB; jb < NB; jb *= 2 ) { int kb = jb*2; int npages = magma_ceildiv( n, kb ); dim3 threads( (jb <= 32 ? jb/4 : 16), 4 ); dim3 grid( jb/(threads.x*threads.y), npages*(jb/16) ); // emulate 3D grid: NX * (NY*npages), for CUDA ARCH 1.x //printf( "n %d, jb %d, grid %d x %d (%d x %d)\n", n, jb, grid.x, grid.y, grid.y / npages, npages ); switch (jb) { case 16: triple_dgemm16_part1_lower_kernel<<< grid, threads, 0, queue >>>( n, dA, ldda, d_dinvA, jb, npages ); triple_dgemm16_part2_lower_kernel<<< grid, threads, 0, queue >>>( n, dA, ldda, d_dinvA, jb, npages ); break; case 32: triple_dgemm32_part1_lower_kernel<<< grid, threads, 0, queue >>>( n, dA, ldda, d_dinvA, jb, npages ); triple_dgemm32_part2_lower_kernel<<< grid, threads, 0, queue >>>( n, dA, ldda, d_dinvA, jb, npages ); break; case 64: triple_dgemm64_part1_lower_kernel<<< grid, threads, 0, queue >>>( n, dA, ldda, d_dinvA, jb, npages ); triple_dgemm64_part2_lower_kernel<<< grid, threads, 0, queue >>>( n, dA, ldda, d_dinvA, jb, npages ); break; default: triple_dgemm_above64_part1_lower_kernel<<< grid, threads, 0, queue >>>( n, dA, ldda, d_dinvA, jb, npages ); triple_dgemm_above64_part2_lower_kernel<<< grid, threads, 0, queue >>>( n, dA, ldda, d_dinvA, jb, npages ); triple_dgemm_above64_part3_lower_kernel<<< grid, threads, 0, queue >>>( n, dA, ldda, d_dinvA, jb, npages ); break; } if ( kb >= n ) break; } } else { dtrtri_diag_upper_kernel<<< nblocks, IB, 0, queue >>>( diag, n, dA, ldda, d_dinvA ); // update the inverse up to the size of IB for( int jb=IB; jb < NB; jb *= 2 ) { int kb = jb*2; int npages = magma_ceildiv( n, kb ); dim3 threads( (jb <= 32 ? jb/4 : 16), 4 ); dim3 grid( jb/(threads.x*threads.y), npages*(jb/16) ); // emulate 3D grid: NX * (NY*npages), for CUDA ARCH 1.x switch (jb) { case 16: triple_dgemm16_part1_upper_kernel<<< grid, threads, 0, queue >>>( n, dA, ldda, d_dinvA, jb, npages ); triple_dgemm16_part2_upper_kernel<<< grid, threads, 0, queue >>>( n, dA, ldda, d_dinvA, jb, npages ); break; case 32: triple_dgemm32_part1_upper_kernel<<< grid, threads, 0, queue >>>( n, dA, ldda, d_dinvA, jb, npages ); triple_dgemm32_part2_upper_kernel<<< grid, threads, 0, queue >>>( n, dA, ldda, d_dinvA, jb, npages ); break; case 64: triple_dgemm64_part1_upper_kernel<<< grid, threads, 0, queue >>>( n, dA, ldda, d_dinvA, jb, npages ); triple_dgemm64_part2_upper_kernel<<< grid, threads, 0, queue >>>( n, dA, ldda, d_dinvA, jb, npages ); break; default: triple_dgemm_above64_part1_upper_kernel<<< grid, threads, 0, queue >>>( n, dA, ldda, d_dinvA, jb, npages ); triple_dgemm_above64_part2_upper_kernel<<< grid, threads, 0, queue >>>( n, dA, ldda, d_dinvA, jb, npages ); triple_dgemm_above64_part3_upper_kernel<<< grid, threads, 0, queue >>>( n, dA, ldda, d_dinvA, jb, npages ); break; } if ( kb >= n ) break; } } } /** @see magmablas_dtrtri_diag_q @ingroup magma_dblas3 ********************************************************************/ extern "C" void magmablas_dtrtri_diag( magma_uplo_t uplo, magma_diag_t diag, magma_int_t n, magmaDouble_const_ptr dA, magma_int_t ldda, magmaDouble_ptr d_dinvA) { magmablas_dtrtri_diag_q( uplo, diag, n, dA, ldda, d_dinvA, magma_stream ); }
830b3905da3a1f7b8389e5bec4a9e42c38e7a7e1.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * * globalRead.cu * * Microbenchmark for read bandwidth from global memory. * * Build with: nvcc -I ../chLib <options> globalRead.cu * Requires: No minimum SM requirement. * * Copyright (c) 2011-2012, Archaea Software, LLC. * All rights reserved. * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * */ #include <stdio.h> #include <chError.h> #include <chCommandLine.h> template<class T> __device__ __host__ T plus( const T& a, const T& b ) { T ret = a; ret += b; return ret; } struct myInt2 { int2 i2; __host__ __device__ myInt2() { } __host__ __device__ myInt2( int i ) { i2.x = i2.y = i; } }; template<> __device__ __host__ myInt2 plus( const myInt2& a, const myInt2& b ) { myInt2 ret; ret.i2.x = a.i2.x + b.i2.x; ret.i2.y = a.i2.y + b.i2.y; return ret; } struct myInt4 { int4 i4; __host__ __device__ myInt4() { } __host__ __device__ myInt4( int i ) { i4.x = i4.y = i4.z = i4.w = i; } }; template<> __device__ __host__ myInt4 plus( const myInt4& a, const myInt4& b ) { myInt4 ret; ret.i4.x = a.i4.x + b.i4.x; ret.i4.y = a.i4.y + b.i4.y; ret.i4.z = a.i4.z + b.i4.z; ret.i4.w = a.i4.w + b.i4.w; return ret; } template<class T, const int n> __global__ void GlobalReads( T *out, const T *in, size_t N, bool bWriteResults ) { T sums[n]; size_t i; for ( int j = 0; j < n; j++ ) { sums[j] = T(0); } for ( i = n*blockIdx.x*blockDim.x+threadIdx.x; i < N-n*blockDim.x*gridDim.x; i += n*blockDim.x*gridDim.x ) { for ( int j = 0; j < n; j++ ) { size_t index = i+j*blockDim.x; sums[j] = plus( sums[j], in[index] ); } } // to avoid the (index<N) conditional in the inner loop, // we left off some work at the end for ( int j = 0; j < n; j++ ) { size_t index = i+j*blockDim.x; if ( index<N ) sums[j] = plus( sums[j], in[index] ); } if ( bWriteResults ) { T sum = T(0); for ( int j = 0; j < n; j++ ) { sum = plus( sum, sums[j] ); } out[blockIdx.x*blockDim.x+threadIdx.x] = sum; } } template<class T, const int n, bool bOffset> double BandwidthReads( size_t N, int cBlocks, int cThreads ) { T *in = 0; T *out = 0; T *hostIn = 0; T *hostOut = 0; double ret = 0.0; double elapsedTime; float ms; int cIterations; hipError_t status; T sumCPU; hipEvent_t evStart = 0; hipEvent_t evStop = 0; cuda(Malloc( &in, N*sizeof(T) ) ); cuda(Malloc( &out, cBlocks*cThreads*sizeof(T) ) ); hostIn = new T[N]; if ( ! hostIn ) goto Error; hostOut = new T[cBlocks*cThreads]; if ( ! hostOut ) goto Error; sumCPU = T(0); // populate input array with random numbers for ( size_t i = bOffset; i < N; i++ ) { T nextrand = T(rand()); sumCPU = plus( sumCPU, nextrand ); hostIn[i] = nextrand; } cuda(Memcpy( in, hostIn, N*sizeof(T), hipMemcpyHostToDevice ) ); cuda(EventCreate( &evStart ) ); cuda(EventCreate( &evStop ) ); { // confirm that kernel launch with this configuration writes correct result hipLaunchKernelGGL(( GlobalReads<T,n>), dim3(cBlocks),dim3(cThreads), 0, 0, out, in+bOffset, N-bOffset, true ); cuda(Memcpy( hostOut, out, cBlocks*cThreads*sizeof(T), hipMemcpyDeviceToHost ) ); cuda(GetLastError() ); T sumGPU = T(0); for ( size_t i = 0; i < cBlocks*cThreads; i++ ) { sumGPU = plus( sumGPU, hostOut[i] ); } if ( memcmp( &sumCPU, &sumGPU, sizeof(T) ) ) { printf( "Incorrect sum computed!\n" ); goto Error; } } cIterations = 10; hipEventRecord( evStart ); for ( int i = 0; i < cIterations; i++ ) { hipLaunchKernelGGL(( GlobalReads<T,n>), dim3(cBlocks),dim3(cThreads), 0, 0, out, in+bOffset, N-bOffset, false ); } hipEventRecord( evStop ); cuda(ThreadSynchronize() ); // make configurations that cannot launch error-out with 0 bandwidth cuda(GetLastError() ); cuda(EventElapsedTime( &ms, evStart, evStop ) ); elapsedTime = ms/1000.0f; // bytes per second ret = ((double)N*cIterations*sizeof(T)) / elapsedTime; // gigabytes per second ret /= 1024.0*1048576.0; Error: if ( hostIn ) delete[] hostIn; if ( hostOut ) delete[] hostOut; hipEventDestroy( evStart ); hipEventDestroy( evStop ); hipFree( in ); hipFree( out ); return ret; } template<class T, const int n, bool bOffset> double ReportRow( size_t N, size_t threadStart, size_t threadStop, size_t cBlocks ) { int maxThreads = 0; double maxBW = 0.0; printf( "%d\t", n ); for ( int cThreads = threadStart; cThreads <= threadStop; cThreads *= 2 ) { double bw = BandwidthReads<T,n,bOffset>( N, cBlocks, cThreads ); if ( bw > maxBW ) { maxBW = bw; maxThreads = cThreads; } printf( "%.2f\t", bw ); } printf( "%.2f\t%d\n", maxBW, maxThreads ); return maxBW; } template<class T, bool bCoalesced> void Shmoo( size_t N, size_t threadStart, size_t threadStop, size_t cBlocks ) { printf( "Operand size: %d byte%c\n", (int) sizeof(T), sizeof(T)==1 ? '\0' : 's' ); printf( "Input size: %dM operands\n", (int) (N>>20) ); printf( "Unroll\t" ); for ( int cThreads = threadStart; cThreads <= threadStop; cThreads *= 2 ) { printf( "%d\t", cThreads ); } printf( "maxBW\tmaxThreads\n" ); ReportRow<T, 1, ! bCoalesced>( N, threadStart, threadStop, cBlocks ); ReportRow<T, 2, ! bCoalesced>( N, threadStart, threadStop, cBlocks ); ReportRow<T, 3, ! bCoalesced>( N, threadStart, threadStop, cBlocks ); ReportRow<T, 4, ! bCoalesced>( N, threadStart, threadStop, cBlocks ); ReportRow<T, 5, ! bCoalesced>( N, threadStart, threadStop, cBlocks ); ReportRow<T, 6, ! bCoalesced>( N, threadStart, threadStop, cBlocks ); ReportRow<T, 7, ! bCoalesced>( N, threadStart, threadStop, cBlocks ); ReportRow<T, 8, ! bCoalesced>( N, threadStart, threadStop, cBlocks ); ReportRow<T, 9, ! bCoalesced>( N, threadStart, threadStop, cBlocks ); ReportRow<T,10, ! bCoalesced>( N, threadStart, threadStop, cBlocks ); ReportRow<T,11, ! bCoalesced>( N, threadStart, threadStop, cBlocks ); ReportRow<T,12, ! bCoalesced>( N, threadStart, threadStop, cBlocks ); ReportRow<T,13, ! bCoalesced>( N, threadStart, threadStop, cBlocks ); ReportRow<T,14, ! bCoalesced>( N, threadStart, threadStop, cBlocks ); ReportRow<T,15, ! bCoalesced>( N, threadStart, threadStop, cBlocks ); ReportRow<T,16, ! bCoalesced>( N, threadStart, threadStop, cBlocks ); } int main( int argc, char *argv[] ) { hipError_t status; int device = 0; int size = 16; hipDeviceProp_t prop; if ( chCommandLineGet( &device, "device", argc, argv ) ) { printf( "Using device %d...\n", device ); } cuda(SetDevice(device) ); cuda(GetDeviceProperties( &prop, device ) ); printf( "Running globalRead.cu microbenchmark on %s\n", prop.name ); if ( chCommandLineGet( &size, "size", argc, argv ) ) { printf( "Using %dM operands ...\n", size ); } if ( chCommandLineGetBool( "uncoalesced", argc, argv ) ) { printf( "Using uncoalesced memory transactions\n" ); Shmoo<char,false>( (size_t) size*1048576, 32, 512, 1500 ); Shmoo<short,false>( (size_t) size*1048576, 32, 512, 1500 ); Shmoo<int,false>( (size_t) size*1048576, 32, 512, 1500 ); Shmoo<myInt2,false>( (size_t) size*1048576, 32, 512, 1500 ); Shmoo<myInt4,false>( (size_t) size*1048576, 32, 512, 1500 ); } else { printf( "Using coalesced memory transactions\n" ); Shmoo<char,true>( (size_t) size*1048576, 32, 512, 1500 ); Shmoo<short,true>( (size_t) size*1048576, 32, 512, 1500 ); Shmoo<int,true>( (size_t) size*1048576, 32, 512, 1500 ); Shmoo<myInt2,true>( (size_t) size*1048576, 32, 512, 1500 ); Shmoo<myInt4,true>( (size_t) size*1048576, 32, 512, 1500 ); } return 0; Error: return 1; }
830b3905da3a1f7b8389e5bec4a9e42c38e7a7e1.cu
/* * * globalRead.cu * * Microbenchmark for read bandwidth from global memory. * * Build with: nvcc -I ../chLib <options> globalRead.cu * Requires: No minimum SM requirement. * * Copyright (c) 2011-2012, Archaea Software, LLC. * All rights reserved. * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * */ #include <stdio.h> #include <chError.h> #include <chCommandLine.h> template<class T> __device__ __host__ T plus( const T& a, const T& b ) { T ret = a; ret += b; return ret; } struct myInt2 { int2 i2; __host__ __device__ myInt2() { } __host__ __device__ myInt2( int i ) { i2.x = i2.y = i; } }; template<> __device__ __host__ myInt2 plus( const myInt2& a, const myInt2& b ) { myInt2 ret; ret.i2.x = a.i2.x + b.i2.x; ret.i2.y = a.i2.y + b.i2.y; return ret; } struct myInt4 { int4 i4; __host__ __device__ myInt4() { } __host__ __device__ myInt4( int i ) { i4.x = i4.y = i4.z = i4.w = i; } }; template<> __device__ __host__ myInt4 plus( const myInt4& a, const myInt4& b ) { myInt4 ret; ret.i4.x = a.i4.x + b.i4.x; ret.i4.y = a.i4.y + b.i4.y; ret.i4.z = a.i4.z + b.i4.z; ret.i4.w = a.i4.w + b.i4.w; return ret; } template<class T, const int n> __global__ void GlobalReads( T *out, const T *in, size_t N, bool bWriteResults ) { T sums[n]; size_t i; for ( int j = 0; j < n; j++ ) { sums[j] = T(0); } for ( i = n*blockIdx.x*blockDim.x+threadIdx.x; i < N-n*blockDim.x*gridDim.x; i += n*blockDim.x*gridDim.x ) { for ( int j = 0; j < n; j++ ) { size_t index = i+j*blockDim.x; sums[j] = plus( sums[j], in[index] ); } } // to avoid the (index<N) conditional in the inner loop, // we left off some work at the end for ( int j = 0; j < n; j++ ) { size_t index = i+j*blockDim.x; if ( index<N ) sums[j] = plus( sums[j], in[index] ); } if ( bWriteResults ) { T sum = T(0); for ( int j = 0; j < n; j++ ) { sum = plus( sum, sums[j] ); } out[blockIdx.x*blockDim.x+threadIdx.x] = sum; } } template<class T, const int n, bool bOffset> double BandwidthReads( size_t N, int cBlocks, int cThreads ) { T *in = 0; T *out = 0; T *hostIn = 0; T *hostOut = 0; double ret = 0.0; double elapsedTime; float ms; int cIterations; cudaError_t status; T sumCPU; cudaEvent_t evStart = 0; cudaEvent_t evStop = 0; cuda(Malloc( &in, N*sizeof(T) ) ); cuda(Malloc( &out, cBlocks*cThreads*sizeof(T) ) ); hostIn = new T[N]; if ( ! hostIn ) goto Error; hostOut = new T[cBlocks*cThreads]; if ( ! hostOut ) goto Error; sumCPU = T(0); // populate input array with random numbers for ( size_t i = bOffset; i < N; i++ ) { T nextrand = T(rand()); sumCPU = plus( sumCPU, nextrand ); hostIn[i] = nextrand; } cuda(Memcpy( in, hostIn, N*sizeof(T), cudaMemcpyHostToDevice ) ); cuda(EventCreate( &evStart ) ); cuda(EventCreate( &evStop ) ); { // confirm that kernel launch with this configuration writes correct result GlobalReads<T,n><<<cBlocks,cThreads>>>( out, in+bOffset, N-bOffset, true ); cuda(Memcpy( hostOut, out, cBlocks*cThreads*sizeof(T), cudaMemcpyDeviceToHost ) ); cuda(GetLastError() ); T sumGPU = T(0); for ( size_t i = 0; i < cBlocks*cThreads; i++ ) { sumGPU = plus( sumGPU, hostOut[i] ); } if ( memcmp( &sumCPU, &sumGPU, sizeof(T) ) ) { printf( "Incorrect sum computed!\n" ); goto Error; } } cIterations = 10; cudaEventRecord( evStart ); for ( int i = 0; i < cIterations; i++ ) { GlobalReads<T,n><<<cBlocks,cThreads>>>( out, in+bOffset, N-bOffset, false ); } cudaEventRecord( evStop ); cuda(ThreadSynchronize() ); // make configurations that cannot launch error-out with 0 bandwidth cuda(GetLastError() ); cuda(EventElapsedTime( &ms, evStart, evStop ) ); elapsedTime = ms/1000.0f; // bytes per second ret = ((double)N*cIterations*sizeof(T)) / elapsedTime; // gigabytes per second ret /= 1024.0*1048576.0; Error: if ( hostIn ) delete[] hostIn; if ( hostOut ) delete[] hostOut; cudaEventDestroy( evStart ); cudaEventDestroy( evStop ); cudaFree( in ); cudaFree( out ); return ret; } template<class T, const int n, bool bOffset> double ReportRow( size_t N, size_t threadStart, size_t threadStop, size_t cBlocks ) { int maxThreads = 0; double maxBW = 0.0; printf( "%d\t", n ); for ( int cThreads = threadStart; cThreads <= threadStop; cThreads *= 2 ) { double bw = BandwidthReads<T,n,bOffset>( N, cBlocks, cThreads ); if ( bw > maxBW ) { maxBW = bw; maxThreads = cThreads; } printf( "%.2f\t", bw ); } printf( "%.2f\t%d\n", maxBW, maxThreads ); return maxBW; } template<class T, bool bCoalesced> void Shmoo( size_t N, size_t threadStart, size_t threadStop, size_t cBlocks ) { printf( "Operand size: %d byte%c\n", (int) sizeof(T), sizeof(T)==1 ? '\0' : 's' ); printf( "Input size: %dM operands\n", (int) (N>>20) ); printf( "Unroll\t" ); for ( int cThreads = threadStart; cThreads <= threadStop; cThreads *= 2 ) { printf( "%d\t", cThreads ); } printf( "maxBW\tmaxThreads\n" ); ReportRow<T, 1, ! bCoalesced>( N, threadStart, threadStop, cBlocks ); ReportRow<T, 2, ! bCoalesced>( N, threadStart, threadStop, cBlocks ); ReportRow<T, 3, ! bCoalesced>( N, threadStart, threadStop, cBlocks ); ReportRow<T, 4, ! bCoalesced>( N, threadStart, threadStop, cBlocks ); ReportRow<T, 5, ! bCoalesced>( N, threadStart, threadStop, cBlocks ); ReportRow<T, 6, ! bCoalesced>( N, threadStart, threadStop, cBlocks ); ReportRow<T, 7, ! bCoalesced>( N, threadStart, threadStop, cBlocks ); ReportRow<T, 8, ! bCoalesced>( N, threadStart, threadStop, cBlocks ); ReportRow<T, 9, ! bCoalesced>( N, threadStart, threadStop, cBlocks ); ReportRow<T,10, ! bCoalesced>( N, threadStart, threadStop, cBlocks ); ReportRow<T,11, ! bCoalesced>( N, threadStart, threadStop, cBlocks ); ReportRow<T,12, ! bCoalesced>( N, threadStart, threadStop, cBlocks ); ReportRow<T,13, ! bCoalesced>( N, threadStart, threadStop, cBlocks ); ReportRow<T,14, ! bCoalesced>( N, threadStart, threadStop, cBlocks ); ReportRow<T,15, ! bCoalesced>( N, threadStart, threadStop, cBlocks ); ReportRow<T,16, ! bCoalesced>( N, threadStart, threadStop, cBlocks ); } int main( int argc, char *argv[] ) { cudaError_t status; int device = 0; int size = 16; cudaDeviceProp prop; if ( chCommandLineGet( &device, "device", argc, argv ) ) { printf( "Using device %d...\n", device ); } cuda(SetDevice(device) ); cuda(GetDeviceProperties( &prop, device ) ); printf( "Running globalRead.cu microbenchmark on %s\n", prop.name ); if ( chCommandLineGet( &size, "size", argc, argv ) ) { printf( "Using %dM operands ...\n", size ); } if ( chCommandLineGetBool( "uncoalesced", argc, argv ) ) { printf( "Using uncoalesced memory transactions\n" ); Shmoo<char,false>( (size_t) size*1048576, 32, 512, 1500 ); Shmoo<short,false>( (size_t) size*1048576, 32, 512, 1500 ); Shmoo<int,false>( (size_t) size*1048576, 32, 512, 1500 ); Shmoo<myInt2,false>( (size_t) size*1048576, 32, 512, 1500 ); Shmoo<myInt4,false>( (size_t) size*1048576, 32, 512, 1500 ); } else { printf( "Using coalesced memory transactions\n" ); Shmoo<char,true>( (size_t) size*1048576, 32, 512, 1500 ); Shmoo<short,true>( (size_t) size*1048576, 32, 512, 1500 ); Shmoo<int,true>( (size_t) size*1048576, 32, 512, 1500 ); Shmoo<myInt2,true>( (size_t) size*1048576, 32, 512, 1500 ); Shmoo<myInt4,true>( (size_t) size*1048576, 32, 512, 1500 ); } return 0; Error: return 1; }
a696487f68613346b6be4cabfbbd67aacb4207d1.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <time.h> #include <assert.h> #include <stdio.h> #include <cudnn.h> #define CUDA_CALL(x) do { \ hipError_t ____rc = (x); \ assert(____rc == hipSuccess); \ } while (0) /* Image channels, height, width. */ #define CHANNELS 1 #define HEIGHT 3 #define WIDTH 3 /* Tile size. */ #define TILE_WIDTH 1 #define TILE_HEIGHT 1 #define POOL_WIDTH 3 #define POOL_HEIGHT 3 #define DIV_RUP(x, y) (((x)+(y)-1)/(y)) __global__ void max_pool_kernel(int channels, int image_height, int image_width, int pool_height, int pool_width, double *global_pointer, double *output_pointer) { __shared__ double shared_pointer[2704]; int pad_width = pool_width/2; int pad_height = pool_height/2; int block_x_index = blockDim.x*blockIdx.x; int block_y_index = blockDim.y*blockIdx.y; int global_offset = blockIdx.z*image_width*image_height; int global_x_index; int global_y_index; int i = -1*pad_height; int j = -1*pad_width; int shared_mem_index = 0; // while(i < blockDim.y + 2*pad_height){ // while(j < blockDim.x + 2*pad_width){ // shared_mem_index = (i+pad_height)*(2*pad_width+blockDim.x) + (j+pad_width) // global_x_index = block_x_index + j; // global_y_index = block_y_index + i; // if(global_x_index < 0 || global_x_index >= image_width || global_y_index < 0 || global_y_index >= image_height){ // shared_pointer[shared_mem_index] = 0; // }else{ // shared_pointer[shared_mem_index] = global_pointer[global_offset+ (global_y_index*WIDTH) + global_x_index]; // } // } // } for(int i = threadIdx.y; i < blockDim.y + 2*pad_height; i = i + blockDim.y){ for(int j = threadIdx.x; j < blockDim.x + 2*pad_width; j = j + blockDim.x){ int shared_mem_index = i*(blockDim.x+ 2*pad_width) + j; global_y_index = block_y_index - pad_height; global_x_index = block_x_index - pad_width; if(global_x_index < 0 || global_x_index >= image_width || global_y_index < 0 || global_y_index >= image_height){ shared_pointer[shared_mem_index] = 0; }else{ shared_pointer[shared_mem_index] = global_pointer[global_offset+ (global_y_index*WIDTH) + global_x_index]; } } } __syncthreads(); double max_value = 0.0; for(int i = 0; i < pool_height; i++){ for(int j = 0; j < pool_width; j++){ int loc_index = (i+threadIdx.y)*(blockDim.x + 2*pad_width) + (j+threadIdx.x); if(shared_pointer[loc_index] > max_value){ max_value = shared_pointer[loc_index]; } } } global_y_index = block_y_index + threadIdx.y; global_x_index = block_x_index + threadIdx.x; output_pointer[global_offset+ (global_y_index*WIDTH) + global_x_index] = max_value; } int get_shared_memory_size(int pooling_height, int pooling_width){ int total_height = TILE_HEIGHT + pooling_height/2 * 2; int total_width = TILE_WIDTH + pooling_width/2 * 2; return total_width*total_height; } /////////////////////////////////////////////////////////////////////////////// // Create Image in CPU memory //////////////////////////////////////////////////////////////////////////////// void fill_image(int channels, int height, int width, double *image_pointer) { int image_memory_size = channels*height*width*sizeof(double); memset(image_pointer, 0, image_memory_size); for(int k = 0; k < channels; k++){ for(int i = 0; i < height; i++){ for(int j = 0; j < width; j++){ int index = i*WIDTH + j + k*WIDTH*HEIGHT; image_pointer[index] = (i+j); } } } } void validate_image_data(int channels, int height, int width, double *image_pointer){ double sum = 0.0; for(int k = 0; k < channels; k++) for(int i = 0; i < height; i++){ for(int j = 0; j < width; j++){ { int index = i*WIDTH + j + k*WIDTH*HEIGHT; sum = sum + image_pointer[index]; } } } printf("Check sum value is %lf \n",sum); if(sum == 3218079744.0){ printf("Check sum of image validated \n"); } else{ printf("Check sum is wrong.\n",sum); printf("Exiting program \n"); // exit(0); } } void print_max_pool_checksum(int channels, int height, int width, double *output_pointer){ double sum = 0.0; for(int k = 0; k < channels; k++) for(int i = 0; i < height; i++){ for(int j = 0; j < width; j++){ { int index = i*WIDTH + j + k*WIDTH*HEIGHT; sum = sum + output_pointer[index]; } } } printf("The checksum after the max_pool is %lf \n",sum); } void check_on_cpu(double *image_pointer, double *output_pointer){ int pooling_height = POOL_HEIGHT; int pooling_width = POOL_WIDTH; int index = 0; int pad_height = pooling_height/2; int pad_width = pooling_width/2; int output_index = 0; for(int c = 0; c < CHANNELS; c++){ int offset = c*HEIGHT*WIDTH; for(int i = 0; i< HEIGHT; i++){ for(int j = 0; j< WIDTH; j++){ int start_i = i; int start_j = j; int max_val = 0; for(int k = start_i - pad_height; k <= start_i+pad_height; k++){ for(int l = start_j - pad_width; l <= start_j+pad_width; l++){ if(k >= 0 && k < HEIGHT && l >= 0 && l < WIDTH){ index = offset + k*WIDTH + l; if(image_pointer[index] > max_val){ max_val = image_pointer[index]; } } } } output_index = offset + i*WIDTH + j; output_pointer[output_index] = max_val; } } } } void print_image(double *image_pointer){ for(int c = 0; c < CHANNELS; c++){ int offset = c*HEIGHT*WIDTH; for(int i = 0; i< HEIGHT; i++){ for(int j = 0; j< WIDTH; j++){ int index = offset + i*WIDTH + j; int cpu_value = image_pointer[index]; printf(" %d ",cpu_value); } printf("\n"); } printf("\n\n"); } } int main(int ac, char *av[]){ int image_size = CHANNELS*HEIGHT*WIDTH*sizeof(double); int pooling_height = POOL_HEIGHT; int pooling_width = POOL_WIDTH; double *gpu_image_pointer, *gpu_output_pointer; double *image_pointer, *output_pointer, *cpu_output_pointer; image_pointer = (double *) malloc(image_size); output_pointer = (double *) malloc(image_size); cpu_output_pointer = (double *) malloc(image_size); memset(output_pointer, 0, image_size); memset(cpu_output_pointer, 0, image_size); fill_image(CHANNELS, HEIGHT, WIDTH, image_pointer); validate_image_data(CHANNELS, HEIGHT, WIDTH, image_pointer); CUDA_CALL(hipMalloc(&gpu_image_pointer, image_size)); CUDA_CALL(hipMalloc(&gpu_output_pointer, image_size)); CUDA_CALL(hipMemcpy(gpu_image_pointer, image_pointer, image_size, hipMemcpyHostToDevice)); hipDeviceSynchronize(); dim3 image_block_vector(TILE_WIDTH, TILE_HEIGHT); dim3 image_grid_vector(DIV_RUP(WIDTH, TILE_WIDTH), DIV_RUP(HEIGHT, TILE_HEIGHT), CHANNELS); int shared_memory_size = get_shared_memory_size(pooling_height, pooling_width); shared_memory_size = shared_memory_size*sizeof(double); printf(" Shared memory size = %d\n", shared_memory_size); hipLaunchKernelGGL(( max_pool_kernel), dim3(image_grid_vector), dim3(image_block_vector), 0, 0, CHANNELS, HEIGHT, WIDTH, pooling_height, pooling_width, gpu_image_pointer, gpu_output_pointer); hipDeviceSynchronize(); hipMemcpy(output_pointer, gpu_output_pointer, image_size, hipMemcpyDeviceToHost); hipDeviceSynchronize(); print_max_pool_checksum(CHANNELS, HEIGHT, WIDTH, output_pointer); check_on_cpu(image_pointer, cpu_output_pointer); print_max_pool_checksum(CHANNELS, HEIGHT, WIDTH, cpu_output_pointer); }
a696487f68613346b6be4cabfbbd67aacb4207d1.cu
#include <time.h> #include <assert.h> #include <stdio.h> #include <cudnn.h> #define CUDA_CALL(x) do { \ cudaError_t ____rc = (x); \ assert(____rc == cudaSuccess); \ } while (0) /* Image channels, height, width. */ #define CHANNELS 1 #define HEIGHT 3 #define WIDTH 3 /* Tile size. */ #define TILE_WIDTH 1 #define TILE_HEIGHT 1 #define POOL_WIDTH 3 #define POOL_HEIGHT 3 #define DIV_RUP(x, y) (((x)+(y)-1)/(y)) __global__ void max_pool_kernel(int channels, int image_height, int image_width, int pool_height, int pool_width, double *global_pointer, double *output_pointer) { __shared__ double shared_pointer[2704]; int pad_width = pool_width/2; int pad_height = pool_height/2; int block_x_index = blockDim.x*blockIdx.x; int block_y_index = blockDim.y*blockIdx.y; int global_offset = blockIdx.z*image_width*image_height; int global_x_index; int global_y_index; int i = -1*pad_height; int j = -1*pad_width; int shared_mem_index = 0; // while(i < blockDim.y + 2*pad_height){ // while(j < blockDim.x + 2*pad_width){ // shared_mem_index = (i+pad_height)*(2*pad_width+blockDim.x) + (j+pad_width) // global_x_index = block_x_index + j; // global_y_index = block_y_index + i; // if(global_x_index < 0 || global_x_index >= image_width || global_y_index < 0 || global_y_index >= image_height){ // shared_pointer[shared_mem_index] = 0; // }else{ // shared_pointer[shared_mem_index] = global_pointer[global_offset+ (global_y_index*WIDTH) + global_x_index]; // } // } // } for(int i = threadIdx.y; i < blockDim.y + 2*pad_height; i = i + blockDim.y){ for(int j = threadIdx.x; j < blockDim.x + 2*pad_width; j = j + blockDim.x){ int shared_mem_index = i*(blockDim.x+ 2*pad_width) + j; global_y_index = block_y_index - pad_height; global_x_index = block_x_index - pad_width; if(global_x_index < 0 || global_x_index >= image_width || global_y_index < 0 || global_y_index >= image_height){ shared_pointer[shared_mem_index] = 0; }else{ shared_pointer[shared_mem_index] = global_pointer[global_offset+ (global_y_index*WIDTH) + global_x_index]; } } } __syncthreads(); double max_value = 0.0; for(int i = 0; i < pool_height; i++){ for(int j = 0; j < pool_width; j++){ int loc_index = (i+threadIdx.y)*(blockDim.x + 2*pad_width) + (j+threadIdx.x); if(shared_pointer[loc_index] > max_value){ max_value = shared_pointer[loc_index]; } } } global_y_index = block_y_index + threadIdx.y; global_x_index = block_x_index + threadIdx.x; output_pointer[global_offset+ (global_y_index*WIDTH) + global_x_index] = max_value; } int get_shared_memory_size(int pooling_height, int pooling_width){ int total_height = TILE_HEIGHT + pooling_height/2 * 2; int total_width = TILE_WIDTH + pooling_width/2 * 2; return total_width*total_height; } /////////////////////////////////////////////////////////////////////////////// // Create Image in CPU memory //////////////////////////////////////////////////////////////////////////////// void fill_image(int channels, int height, int width, double *image_pointer) { int image_memory_size = channels*height*width*sizeof(double); memset(image_pointer, 0, image_memory_size); for(int k = 0; k < channels; k++){ for(int i = 0; i < height; i++){ for(int j = 0; j < width; j++){ int index = i*WIDTH + j + k*WIDTH*HEIGHT; image_pointer[index] = (i+j); } } } } void validate_image_data(int channels, int height, int width, double *image_pointer){ double sum = 0.0; for(int k = 0; k < channels; k++) for(int i = 0; i < height; i++){ for(int j = 0; j < width; j++){ { int index = i*WIDTH + j + k*WIDTH*HEIGHT; sum = sum + image_pointer[index]; } } } printf("Check sum value is %lf \n",sum); if(sum == 3218079744.0){ printf("Check sum of image validated \n"); } else{ printf("Check sum is wrong.\n",sum); printf("Exiting program \n"); // exit(0); } } void print_max_pool_checksum(int channels, int height, int width, double *output_pointer){ double sum = 0.0; for(int k = 0; k < channels; k++) for(int i = 0; i < height; i++){ for(int j = 0; j < width; j++){ { int index = i*WIDTH + j + k*WIDTH*HEIGHT; sum = sum + output_pointer[index]; } } } printf("The checksum after the max_pool is %lf \n",sum); } void check_on_cpu(double *image_pointer, double *output_pointer){ int pooling_height = POOL_HEIGHT; int pooling_width = POOL_WIDTH; int index = 0; int pad_height = pooling_height/2; int pad_width = pooling_width/2; int output_index = 0; for(int c = 0; c < CHANNELS; c++){ int offset = c*HEIGHT*WIDTH; for(int i = 0; i< HEIGHT; i++){ for(int j = 0; j< WIDTH; j++){ int start_i = i; int start_j = j; int max_val = 0; for(int k = start_i - pad_height; k <= start_i+pad_height; k++){ for(int l = start_j - pad_width; l <= start_j+pad_width; l++){ if(k >= 0 && k < HEIGHT && l >= 0 && l < WIDTH){ index = offset + k*WIDTH + l; if(image_pointer[index] > max_val){ max_val = image_pointer[index]; } } } } output_index = offset + i*WIDTH + j; output_pointer[output_index] = max_val; } } } } void print_image(double *image_pointer){ for(int c = 0; c < CHANNELS; c++){ int offset = c*HEIGHT*WIDTH; for(int i = 0; i< HEIGHT; i++){ for(int j = 0; j< WIDTH; j++){ int index = offset + i*WIDTH + j; int cpu_value = image_pointer[index]; printf(" %d ",cpu_value); } printf("\n"); } printf("\n\n"); } } int main(int ac, char *av[]){ int image_size = CHANNELS*HEIGHT*WIDTH*sizeof(double); int pooling_height = POOL_HEIGHT; int pooling_width = POOL_WIDTH; double *gpu_image_pointer, *gpu_output_pointer; double *image_pointer, *output_pointer, *cpu_output_pointer; image_pointer = (double *) malloc(image_size); output_pointer = (double *) malloc(image_size); cpu_output_pointer = (double *) malloc(image_size); memset(output_pointer, 0, image_size); memset(cpu_output_pointer, 0, image_size); fill_image(CHANNELS, HEIGHT, WIDTH, image_pointer); validate_image_data(CHANNELS, HEIGHT, WIDTH, image_pointer); CUDA_CALL(cudaMalloc(&gpu_image_pointer, image_size)); CUDA_CALL(cudaMalloc(&gpu_output_pointer, image_size)); CUDA_CALL(cudaMemcpy(gpu_image_pointer, image_pointer, image_size, cudaMemcpyHostToDevice)); cudaDeviceSynchronize(); dim3 image_block_vector(TILE_WIDTH, TILE_HEIGHT); dim3 image_grid_vector(DIV_RUP(WIDTH, TILE_WIDTH), DIV_RUP(HEIGHT, TILE_HEIGHT), CHANNELS); int shared_memory_size = get_shared_memory_size(pooling_height, pooling_width); shared_memory_size = shared_memory_size*sizeof(double); printf(" Shared memory size = %d\n", shared_memory_size); max_pool_kernel<<<image_grid_vector, image_block_vector>>>(CHANNELS, HEIGHT, WIDTH, pooling_height, pooling_width, gpu_image_pointer, gpu_output_pointer); cudaDeviceSynchronize(); cudaMemcpy(output_pointer, gpu_output_pointer, image_size, cudaMemcpyDeviceToHost); cudaDeviceSynchronize(); print_max_pool_checksum(CHANNELS, HEIGHT, WIDTH, output_pointer); check_on_cpu(image_pointer, cpu_output_pointer); print_max_pool_checksum(CHANNELS, HEIGHT, WIDTH, cpu_output_pointer); }
9d34e893bf97e7eed8a42b26d06457b976966e29.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #define TILE_WIDTH 16 __global__ void matrixMul(float *A, float *B, float *C, int width) { int column = ( blockDim.x * blockIdx.x ) + threadIdx.x; int row = ( blockDim.y * blockIdx.y ) + threadIdx.y; if (row < width && column < width) { float sum = 0; for(int k = 0; k < width; k++) sum += A[row * width + k] + B[k * width + column]; C[row*width + column] = sum; } } __global__ void matrixMulTiled(float *A, float *B, float *C, int width) { int column = ( blockDim.x * blockIdx.x ) + threadIdx.x; int row = ( blockDim.y * blockIdx.y ) + threadIdx.y; float sum = 0; // Loop over the A and B tiles required to compute the submatrix for (int t = 0; t < width/TILE_WIDTH; t++) { __shared__ float sub_A[TILE_WIDTH][TILE_WIDTH]; __shared__ float sub_B[TILE_WIDTH][TILE_WIDTH]; // Coolaborative loading of A and B tiles into shared memory sub_A[threadIdx.y][threadIdx.x] = A[row*width + (t*TILE_WIDTH + threadIdx.x)]; sub_B[threadIdx.y][threadIdx.x] = B[column + (t*TILE_WIDTH + threadIdx.y)*width]; __syncthreads(); // Loop within shared memory for (int k = 0; k < TILE_WIDTH; k++) sum += sub_A[threadIdx.y][k] * sub_B[k][threadIdx.x]; __syncthreads(); } C[row*width + column] = sum; } void MatrixMultiplicationHost(float *A, float *B, float *C, int width) { for (int i = 0; i < width; i++) for (int j = 0; j < width; j++) { float sum = 0; for (int k = 0; k < width; k++) sum += A[i * width + k] * B[k * width + j]; C[i * width + j] = sum; } } int main(int argc, char* argv[]) { int numElements = 2147483646; // Max size of the int // Allocate host memory float *h_A = (float *)malloc(numElements * sizeof(float)); // Initialize the host input matrixs for (int i = 0; i < numElements; ++i) { h_A[i] = rand()/(float)RAND_MAX; } // Allocate the device input array A float *d_A; hipMalloc(&d_A, numElements * sizeof(float)); // Copy the host input matrixs A and B in host memory to the device input matrixs in hipMemcpy(d_A, h_A, numElements * sizeof(float), hipMemcpyHostToDevice); hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); float milliseconds = 0; int threadsPerBlockDim = 16; int gridDimSize = (matrixSize + threadsPerBlockDim - 1) / threadsPerBlockDim; dim3 blockSize(threadsPerBlockDim, threadsPerBlockDim); dim3 gridSize (gridDimSize, gridDimSize); printf("CUDA kernel launch with %dx%d blocks of %dx%d threads\n", gridDimSize, gridDimSize, threadsPerBlockDim, threadsPerBlockDim); hipEventRecord(start); hipLaunchKernelGGL(( matrixMul), dim3(gridSize), dim3(blockSize), 0, 0, d_A, d_B, d_C, numElements); hipEventRecord(stop); hipEventSynchronize(stop); hipEventElapsedTime(&milliseconds, start, stop); printf("GPU time to multiple matrixes %f ms\n", milliseconds); hipEventRecord(start); hipLaunchKernelGGL(( matrixMulTiled), dim3(gridSize), dim3(blockSize), 0, 0, d_A, d_B, d_C, matrixSize); hipEventRecord(stop); hipEventSynchronize(stop); hipEventElapsedTime(&milliseconds, start, stop); printf("GPU time to multiple matrixes tiled %f ms\n", milliseconds); // Copy the device result matrix in device memory to the host result matrix hipMemcpy(h_C, d_C, numElements * sizeof(float), hipMemcpyDeviceToHost); hipError_t hipError_t = hipGetLastError(); if(hipError_t != hipSuccess) { fprintf(stderr, "hipGetLastError() returned %d: %s\n", hipError_t, hipGetErrorString(hipError_t)); exit(EXIT_FAILURE); } // Compute CPU time hipEventRecord(start); MatrixMultiplicationHost(h_A, h_B, h_C_CPUres, matrixSize); hipEventRecord(stop); hipEventSynchronize(stop); hipEventElapsedTime(&milliseconds, start, stop); printf("CPU time to sum the matrixes %f ms\n", milliseconds); // Verify that the result matrix is correct for (int i = 0; i < numElements; i++) if (fabs(h_C[i] - h_C_CPUres[i]) > 1e-3) { fprintf(stderr, "Result verification failed at element %d, %f vs %f!\n", i, h_C[i], h_C_CPUres[i]); exit(EXIT_FAILURE); } printf("Multiplication of the matrixes was OK\n"); // Free device global memory hipFree(d_A); hipFree(d_B); hipFree(d_C); // Free host memory free(h_A); free(h_B); free(h_C); free(h_C_CPUres); return 0; }
9d34e893bf97e7eed8a42b26d06457b976966e29.cu
#include <stdio.h> #define TILE_WIDTH 16 __global__ void matrixMul(float *A, float *B, float *C, int width) { int column = ( blockDim.x * blockIdx.x ) + threadIdx.x; int row = ( blockDim.y * blockIdx.y ) + threadIdx.y; if (row < width && column < width) { float sum = 0; for(int k = 0; k < width; k++) sum += A[row * width + k] + B[k * width + column]; C[row*width + column] = sum; } } __global__ void matrixMulTiled(float *A, float *B, float *C, int width) { int column = ( blockDim.x * blockIdx.x ) + threadIdx.x; int row = ( blockDim.y * blockIdx.y ) + threadIdx.y; float sum = 0; // Loop over the A and B tiles required to compute the submatrix for (int t = 0; t < width/TILE_WIDTH; t++) { __shared__ float sub_A[TILE_WIDTH][TILE_WIDTH]; __shared__ float sub_B[TILE_WIDTH][TILE_WIDTH]; // Coolaborative loading of A and B tiles into shared memory sub_A[threadIdx.y][threadIdx.x] = A[row*width + (t*TILE_WIDTH + threadIdx.x)]; sub_B[threadIdx.y][threadIdx.x] = B[column + (t*TILE_WIDTH + threadIdx.y)*width]; __syncthreads(); // Loop within shared memory for (int k = 0; k < TILE_WIDTH; k++) sum += sub_A[threadIdx.y][k] * sub_B[k][threadIdx.x]; __syncthreads(); } C[row*width + column] = sum; } void MatrixMultiplicationHost(float *A, float *B, float *C, int width) { for (int i = 0; i < width; i++) for (int j = 0; j < width; j++) { float sum = 0; for (int k = 0; k < width; k++) sum += A[i * width + k] * B[k * width + j]; C[i * width + j] = sum; } } int main(int argc, char* argv[]) { int numElements = 2147483646; // Max size of the int // Allocate host memory float *h_A = (float *)malloc(numElements * sizeof(float)); // Initialize the host input matrixs for (int i = 0; i < numElements; ++i) { h_A[i] = rand()/(float)RAND_MAX; } // Allocate the device input array A float *d_A; cudaMalloc(&d_A, numElements * sizeof(float)); // Copy the host input matrixs A and B in host memory to the device input matrixs in cudaMemcpy(d_A, h_A, numElements * sizeof(float), cudaMemcpyHostToDevice); cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); float milliseconds = 0; int threadsPerBlockDim = 16; int gridDimSize = (matrixSize + threadsPerBlockDim - 1) / threadsPerBlockDim; dim3 blockSize(threadsPerBlockDim, threadsPerBlockDim); dim3 gridSize (gridDimSize, gridDimSize); printf("CUDA kernel launch with %dx%d blocks of %dx%d threads\n", gridDimSize, gridDimSize, threadsPerBlockDim, threadsPerBlockDim); cudaEventRecord(start); matrixMul<<<gridSize, blockSize>>>(d_A, d_B, d_C, numElements); cudaEventRecord(stop); cudaEventSynchronize(stop); cudaEventElapsedTime(&milliseconds, start, stop); printf("GPU time to multiple matrixes %f ms\n", milliseconds); cudaEventRecord(start); matrixMulTiled<<<gridSize, blockSize>>>(d_A, d_B, d_C, matrixSize); cudaEventRecord(stop); cudaEventSynchronize(stop); cudaEventElapsedTime(&milliseconds, start, stop); printf("GPU time to multiple matrixes tiled %f ms\n", milliseconds); // Copy the device result matrix in device memory to the host result matrix cudaMemcpy(h_C, d_C, numElements * sizeof(float), cudaMemcpyDeviceToHost); cudaError_t cudaError = cudaGetLastError(); if(cudaError != cudaSuccess) { fprintf(stderr, "cudaGetLastError() returned %d: %s\n", cudaError, cudaGetErrorString(cudaError)); exit(EXIT_FAILURE); } // Compute CPU time cudaEventRecord(start); MatrixMultiplicationHost(h_A, h_B, h_C_CPUres, matrixSize); cudaEventRecord(stop); cudaEventSynchronize(stop); cudaEventElapsedTime(&milliseconds, start, stop); printf("CPU time to sum the matrixes %f ms\n", milliseconds); // Verify that the result matrix is correct for (int i = 0; i < numElements; i++) if (fabs(h_C[i] - h_C_CPUres[i]) > 1e-3) { fprintf(stderr, "Result verification failed at element %d, %f vs %f!\n", i, h_C[i], h_C_CPUres[i]); exit(EXIT_FAILURE); } printf("Multiplication of the matrixes was OK\n"); // Free device global memory cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); // Free host memory free(h_A); free(h_B); free(h_C); free(h_C_CPUres); return 0; }
c495245fd9d4a42c009a8c1555c7cf935c707c4d.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2019-2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <gtest/gtest.h> #include <sparse/csr.cuh> #include "csr.h" #include <common/cudart_utils.h> #include <random/rng.cuh> #include "test_utils.h" #include <iostream> #include <limits> namespace MLCommon { namespace Sparse { template <typename T> class CSRTest : public ::testing::TestWithParam<CSRInputs<T>> { protected: void SetUp() override {} void TearDown() override {} protected: CSRInputs<T> params; }; const std::vector<CSRInputs<float>> inputsf = {{5, 10, 5, 1234ULL}}; typedef CSRTest<float> CSRToCOO; TEST_P(CSRToCOO, Result) { hipStream_t stream; hipStreamCreate(&stream); int *ex_scan; int *result, *verify; int *ex_scan_h = new int[4]{0, 4, 8, 9}; int *verify_h = new int[10]{0, 0, 0, 0, 1, 1, 1, 1, 2, 3}; allocate(verify, 10); allocate(ex_scan, 4); allocate(result, 10, true); updateDevice(ex_scan, ex_scan_h, 4, stream); updateDevice(verify, verify_h, 10, stream); csr_to_coo<32>(ex_scan, 4, result, 10, stream); ASSERT_TRUE(devArrMatch<int>(verify, result, 10, Compare<float>(), stream)); delete[] ex_scan_h; delete[] verify_h; CUDA_CHECK(hipFree(ex_scan)); CUDA_CHECK(hipFree(verify)); CUDA_CHECK(hipFree(result)); hipStreamDestroy(stream); } typedef CSRTest<float> CSRRowNormalizeMax; TEST_P(CSRRowNormalizeMax, Result) { hipStream_t stream; hipStreamCreate(&stream); int *ex_scan; float *in_vals, *result, *verify; int ex_scan_h[4] = {0, 4, 8, 9}; float in_vals_h[10] = {5.0, 1.0, 0.0, 0.0, 10.0, 1.0, 0.0, 0.0, 1.0, 0.0}; float verify_h[10] = {1.0, 0.2, 0.0, 0.0, 1.0, 0.1, 0.0, 0.0, 1, 0.0}; allocate(in_vals, 10); allocate(verify, 10); allocate(ex_scan, 4); allocate(result, 10, true); updateDevice(ex_scan, *&ex_scan_h, 4, stream); updateDevice(in_vals, *&in_vals_h, 10, stream); updateDevice(verify, *&verify_h, 10, stream); csr_row_normalize_max<32, float>(ex_scan, in_vals, 10, 4, result, stream); ASSERT_TRUE(devArrMatch<float>(verify, result, 10, Compare<float>())); hipStreamDestroy(stream); CUDA_CHECK(hipFree(ex_scan)); CUDA_CHECK(hipFree(in_vals)); CUDA_CHECK(hipFree(verify)); CUDA_CHECK(hipFree(result)); } typedef CSRTest<float> CSRRowNormalizeL1; TEST_P(CSRRowNormalizeL1, Result) { int *ex_scan; float *in_vals, *result, *verify; int ex_scan_h[4] = {0, 4, 8, 9}; float in_vals_h[10] = {1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0}; float verify_h[10] = {0.5, 0.5, 0.0, 0.0, 0.5, 0.5, 0.0, 0.0, 1, 0.0}; allocate(in_vals, 10); allocate(verify, 10); allocate(ex_scan, 4); allocate(result, 10, true); updateDevice(ex_scan, *&ex_scan_h, 4, 0); updateDevice(in_vals, *&in_vals_h, 10, 0); updateDevice(verify, *&verify_h, 10, 0); csr_row_normalize_l1<32, float>(ex_scan, in_vals, 10, 4, result, 0); hipDeviceSynchronize(); ASSERT_TRUE(devArrMatch<float>(verify, result, 10, Compare<float>())); CUDA_CHECK(hipFree(ex_scan)); CUDA_CHECK(hipFree(in_vals)); CUDA_CHECK(hipFree(verify)); CUDA_CHECK(hipFree(result)); } typedef CSRTest<float> CSRSum; TEST_P(CSRSum, Result) { hipStream_t stream; hipStreamCreate(&stream); std::shared_ptr<deviceAllocator> alloc(new defaultDeviceAllocator); int *ex_scan, *ind_ptr_a, *ind_ptr_b, *verify_indptr; float *in_vals_a, *in_vals_b, *verify; int ex_scan_h[4] = {0, 4, 8, 9}; int indptr_a_h[10] = {1, 2, 3, 4, 1, 2, 3, 5, 0, 1}; int indptr_b_h[10] = {1, 2, 5, 4, 0, 2, 3, 5, 1, 0}; float in_vals_h[10] = {1.0, 1.0, 0.5, 0.5, 1.0, 1.0, 0.5, 0.5, 1.0, 1.0}; float verify_h[14] = {2.0, 2.0, 0.5, 1.0, 0.5, 1.0, 2.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0}; int verify_indptr_h[14] = {1, 2, 3, 4, 5, 1, 2, 3, 5, 0, 0, 1, 1, 0}; allocate(in_vals_a, 10); allocate(in_vals_b, 10); allocate(verify, 14); allocate(ex_scan, 4); allocate(verify_indptr, 14); allocate(ind_ptr_a, 10); allocate(ind_ptr_b, 10); updateDevice(ex_scan, *&ex_scan_h, 4, stream); updateDevice(in_vals_a, *&in_vals_h, 10, stream); updateDevice(in_vals_b, *&in_vals_h, 10, stream); updateDevice(verify, *&verify_h, 14, stream); updateDevice(verify_indptr, *&verify_indptr_h, 14, stream); updateDevice(ind_ptr_a, *&indptr_a_h, 10, stream); updateDevice(ind_ptr_b, *&indptr_b_h, 10, stream); int *result_ind; allocate(result_ind, 4); int nnz = csr_add_calc_inds<float, 32>(ex_scan, ind_ptr_a, in_vals_a, 10, ex_scan, ind_ptr_b, in_vals_b, 10, 4, result_ind, alloc, stream); int *result_indptr; float *result_val; allocate(result_indptr, nnz); allocate(result_val, nnz); csr_add_finalize<float, 32>(ex_scan, ind_ptr_a, in_vals_a, 10, ex_scan, ind_ptr_b, in_vals_b, 10, 4, result_ind, result_indptr, result_val, stream); ASSERT_TRUE(nnz == 14); ASSERT_TRUE(devArrMatch<float>(verify, result_val, nnz, Compare<float>())); ASSERT_TRUE( devArrMatch<int>(verify_indptr, result_indptr, nnz, Compare<int>())); hipStreamDestroy(stream); CUDA_CHECK(hipFree(ex_scan)); CUDA_CHECK(hipFree(in_vals_a)); CUDA_CHECK(hipFree(in_vals_b)); CUDA_CHECK(hipFree(ind_ptr_a)); CUDA_CHECK(hipFree(ind_ptr_b)); CUDA_CHECK(hipFree(verify)); CUDA_CHECK(hipFree(result_indptr)); CUDA_CHECK(hipFree(result_val)); } typedef CSRTest<float> CSRRowOpTest; TEST_P(CSRRowOpTest, Result) { hipStream_t stream; hipStreamCreate(&stream); int *ex_scan; float *result, *verify; int ex_scan_h[4] = {0, 4, 8, 9}; float verify_h[10] = {0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 2.0, 3.0}; allocate(verify, 10); allocate(ex_scan, 4); allocate(result, 10, true); updateDevice(ex_scan, *&ex_scan_h, 4, stream); updateDevice(verify, *&verify_h, 10, stream); csr_row_op<int, 32>( ex_scan, 4, 10, [result] __device__(int row, int start_idx, int stop_idx) { for (int i = start_idx; i < stop_idx; i++) result[i] = row; }, stream); ASSERT_TRUE(devArrMatch<float>(verify, result, 10, Compare<float>())); hipStreamDestroy(stream); CUDA_CHECK(hipFree(ex_scan)); CUDA_CHECK(hipFree(verify)); CUDA_CHECK(hipFree(result)); } typedef CSRTest<float> AdjGraphTest; TEST_P(AdjGraphTest, Result) { hipStream_t stream; hipStreamCreate(&stream); int *row_ind, *result, *verify; bool *adj; int row_ind_h[3] = {0, 3, 6}; bool adj_h[18] = {1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0}; int verify_h[9] = {0, 1, 2, 0, 1, 2, 0, 1, 2}; allocate(row_ind, 3); allocate(adj, 18); allocate(result, 9, true); allocate(verify, 9); updateDevice(row_ind, *&row_ind_h, 3, stream); updateDevice(adj, *&adj_h, 18, stream); updateDevice(verify, *&verify_h, 9, stream); csr_adj_graph_batched<int, 32>(row_ind, 6, 9, 3, adj, result, stream); ASSERT_TRUE(devArrMatch<int>(verify, result, 9, Compare<int>())); hipStreamDestroy(stream); CUDA_CHECK(hipFree(row_ind)); CUDA_CHECK(hipFree(adj)); CUDA_CHECK(hipFree(verify)); CUDA_CHECK(hipFree(result)); } typedef CSRTest<float> WeakCCTest; TEST_P(WeakCCTest, Result) { hipStream_t stream; hipStreamCreate(&stream); std::shared_ptr<deviceAllocator> alloc(new defaultDeviceAllocator); int *row_ind, *row_ind_ptr, *result, *verify; int row_ind_h1[3] = {0, 3, 6}; int row_ind_ptr_h1[9] = {0, 1, 2, 0, 1, 2, 0, 1, 2}; int verify_h1[6] = {1, 1, 1, 2147483647, 2147483647, 2147483647}; int row_ind_h2[3] = {0, 2, 4}; int row_ind_ptr_h2[5] = {3, 4, 3, 4, 5}; int verify_h2[6] = {1, 1, 1, 5, 5, 5}; allocate(row_ind, 3); allocate(row_ind_ptr, 9); allocate(result, 9, true); allocate(verify, 9); device_buffer<bool> xa(alloc, stream, 6); device_buffer<bool> fa(alloc, stream, 6); device_buffer<bool> m(alloc, stream, 1); WeakCCState state(xa.data(), fa.data(), m.data()); /** * Run batch #1 */ updateDevice(row_ind, *&row_ind_h1, 3, stream); updateDevice(row_ind_ptr, *&row_ind_ptr_h1, 9, stream); updateDevice(verify, *&verify_h1, 6, stream); weak_cc_batched<int, 32>(result, row_ind, row_ind_ptr, 9, 6, 0, 3, &state, stream); hipStreamSynchronize(stream); ASSERT_TRUE(devArrMatch<int>(verify, result, 6, Compare<int>())); /** * Run batch #2 */ updateDevice(row_ind, *&row_ind_h2, 3, stream); updateDevice(row_ind_ptr, *&row_ind_ptr_h2, 5, stream); updateDevice(verify, *&verify_h2, 6, stream); weak_cc_batched<int, 32>(result, row_ind, row_ind_ptr, 5, 6, 4, 3, &state, stream); ASSERT_TRUE(devArrMatch<int>(verify, result, 6, Compare<int>())); hipStreamSynchronize(stream); hipStreamDestroy(stream); CUDA_CHECK(hipFree(row_ind)); CUDA_CHECK(hipFree(row_ind_ptr)); CUDA_CHECK(hipFree(verify)); CUDA_CHECK(hipFree(result)); } INSTANTIATE_TEST_CASE_P(CSRTests, WeakCCTest, ::testing::ValuesIn(inputsf)); INSTANTIATE_TEST_CASE_P(CSRTests, AdjGraphTest, ::testing::ValuesIn(inputsf)); INSTANTIATE_TEST_CASE_P(CSRTests, CSRRowOpTest, ::testing::ValuesIn(inputsf)); INSTANTIATE_TEST_CASE_P(CSRTests, CSRToCOO, ::testing::ValuesIn(inputsf)); INSTANTIATE_TEST_CASE_P(CSRTests, CSRRowNormalizeMax, ::testing::ValuesIn(inputsf)); INSTANTIATE_TEST_CASE_P(CSRTests, CSRRowNormalizeL1, ::testing::ValuesIn(inputsf)); INSTANTIATE_TEST_CASE_P(CSRTests, CSRSum, ::testing::ValuesIn(inputsf)); } // namespace Sparse } // namespace MLCommon
c495245fd9d4a42c009a8c1555c7cf935c707c4d.cu
/* * Copyright (c) 2019-2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <gtest/gtest.h> #include <sparse/csr.cuh> #include "csr.h" #include <common/cudart_utils.h> #include <random/rng.cuh> #include "test_utils.h" #include <iostream> #include <limits> namespace MLCommon { namespace Sparse { template <typename T> class CSRTest : public ::testing::TestWithParam<CSRInputs<T>> { protected: void SetUp() override {} void TearDown() override {} protected: CSRInputs<T> params; }; const std::vector<CSRInputs<float>> inputsf = {{5, 10, 5, 1234ULL}}; typedef CSRTest<float> CSRToCOO; TEST_P(CSRToCOO, Result) { cudaStream_t stream; cudaStreamCreate(&stream); int *ex_scan; int *result, *verify; int *ex_scan_h = new int[4]{0, 4, 8, 9}; int *verify_h = new int[10]{0, 0, 0, 0, 1, 1, 1, 1, 2, 3}; allocate(verify, 10); allocate(ex_scan, 4); allocate(result, 10, true); updateDevice(ex_scan, ex_scan_h, 4, stream); updateDevice(verify, verify_h, 10, stream); csr_to_coo<32>(ex_scan, 4, result, 10, stream); ASSERT_TRUE(devArrMatch<int>(verify, result, 10, Compare<float>(), stream)); delete[] ex_scan_h; delete[] verify_h; CUDA_CHECK(cudaFree(ex_scan)); CUDA_CHECK(cudaFree(verify)); CUDA_CHECK(cudaFree(result)); cudaStreamDestroy(stream); } typedef CSRTest<float> CSRRowNormalizeMax; TEST_P(CSRRowNormalizeMax, Result) { cudaStream_t stream; cudaStreamCreate(&stream); int *ex_scan; float *in_vals, *result, *verify; int ex_scan_h[4] = {0, 4, 8, 9}; float in_vals_h[10] = {5.0, 1.0, 0.0, 0.0, 10.0, 1.0, 0.0, 0.0, 1.0, 0.0}; float verify_h[10] = {1.0, 0.2, 0.0, 0.0, 1.0, 0.1, 0.0, 0.0, 1, 0.0}; allocate(in_vals, 10); allocate(verify, 10); allocate(ex_scan, 4); allocate(result, 10, true); updateDevice(ex_scan, *&ex_scan_h, 4, stream); updateDevice(in_vals, *&in_vals_h, 10, stream); updateDevice(verify, *&verify_h, 10, stream); csr_row_normalize_max<32, float>(ex_scan, in_vals, 10, 4, result, stream); ASSERT_TRUE(devArrMatch<float>(verify, result, 10, Compare<float>())); cudaStreamDestroy(stream); CUDA_CHECK(cudaFree(ex_scan)); CUDA_CHECK(cudaFree(in_vals)); CUDA_CHECK(cudaFree(verify)); CUDA_CHECK(cudaFree(result)); } typedef CSRTest<float> CSRRowNormalizeL1; TEST_P(CSRRowNormalizeL1, Result) { int *ex_scan; float *in_vals, *result, *verify; int ex_scan_h[4] = {0, 4, 8, 9}; float in_vals_h[10] = {1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0}; float verify_h[10] = {0.5, 0.5, 0.0, 0.0, 0.5, 0.5, 0.0, 0.0, 1, 0.0}; allocate(in_vals, 10); allocate(verify, 10); allocate(ex_scan, 4); allocate(result, 10, true); updateDevice(ex_scan, *&ex_scan_h, 4, 0); updateDevice(in_vals, *&in_vals_h, 10, 0); updateDevice(verify, *&verify_h, 10, 0); csr_row_normalize_l1<32, float>(ex_scan, in_vals, 10, 4, result, 0); cudaDeviceSynchronize(); ASSERT_TRUE(devArrMatch<float>(verify, result, 10, Compare<float>())); CUDA_CHECK(cudaFree(ex_scan)); CUDA_CHECK(cudaFree(in_vals)); CUDA_CHECK(cudaFree(verify)); CUDA_CHECK(cudaFree(result)); } typedef CSRTest<float> CSRSum; TEST_P(CSRSum, Result) { cudaStream_t stream; cudaStreamCreate(&stream); std::shared_ptr<deviceAllocator> alloc(new defaultDeviceAllocator); int *ex_scan, *ind_ptr_a, *ind_ptr_b, *verify_indptr; float *in_vals_a, *in_vals_b, *verify; int ex_scan_h[4] = {0, 4, 8, 9}; int indptr_a_h[10] = {1, 2, 3, 4, 1, 2, 3, 5, 0, 1}; int indptr_b_h[10] = {1, 2, 5, 4, 0, 2, 3, 5, 1, 0}; float in_vals_h[10] = {1.0, 1.0, 0.5, 0.5, 1.0, 1.0, 0.5, 0.5, 1.0, 1.0}; float verify_h[14] = {2.0, 2.0, 0.5, 1.0, 0.5, 1.0, 2.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0}; int verify_indptr_h[14] = {1, 2, 3, 4, 5, 1, 2, 3, 5, 0, 0, 1, 1, 0}; allocate(in_vals_a, 10); allocate(in_vals_b, 10); allocate(verify, 14); allocate(ex_scan, 4); allocate(verify_indptr, 14); allocate(ind_ptr_a, 10); allocate(ind_ptr_b, 10); updateDevice(ex_scan, *&ex_scan_h, 4, stream); updateDevice(in_vals_a, *&in_vals_h, 10, stream); updateDevice(in_vals_b, *&in_vals_h, 10, stream); updateDevice(verify, *&verify_h, 14, stream); updateDevice(verify_indptr, *&verify_indptr_h, 14, stream); updateDevice(ind_ptr_a, *&indptr_a_h, 10, stream); updateDevice(ind_ptr_b, *&indptr_b_h, 10, stream); int *result_ind; allocate(result_ind, 4); int nnz = csr_add_calc_inds<float, 32>(ex_scan, ind_ptr_a, in_vals_a, 10, ex_scan, ind_ptr_b, in_vals_b, 10, 4, result_ind, alloc, stream); int *result_indptr; float *result_val; allocate(result_indptr, nnz); allocate(result_val, nnz); csr_add_finalize<float, 32>(ex_scan, ind_ptr_a, in_vals_a, 10, ex_scan, ind_ptr_b, in_vals_b, 10, 4, result_ind, result_indptr, result_val, stream); ASSERT_TRUE(nnz == 14); ASSERT_TRUE(devArrMatch<float>(verify, result_val, nnz, Compare<float>())); ASSERT_TRUE( devArrMatch<int>(verify_indptr, result_indptr, nnz, Compare<int>())); cudaStreamDestroy(stream); CUDA_CHECK(cudaFree(ex_scan)); CUDA_CHECK(cudaFree(in_vals_a)); CUDA_CHECK(cudaFree(in_vals_b)); CUDA_CHECK(cudaFree(ind_ptr_a)); CUDA_CHECK(cudaFree(ind_ptr_b)); CUDA_CHECK(cudaFree(verify)); CUDA_CHECK(cudaFree(result_indptr)); CUDA_CHECK(cudaFree(result_val)); } typedef CSRTest<float> CSRRowOpTest; TEST_P(CSRRowOpTest, Result) { cudaStream_t stream; cudaStreamCreate(&stream); int *ex_scan; float *result, *verify; int ex_scan_h[4] = {0, 4, 8, 9}; float verify_h[10] = {0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 2.0, 3.0}; allocate(verify, 10); allocate(ex_scan, 4); allocate(result, 10, true); updateDevice(ex_scan, *&ex_scan_h, 4, stream); updateDevice(verify, *&verify_h, 10, stream); csr_row_op<int, 32>( ex_scan, 4, 10, [result] __device__(int row, int start_idx, int stop_idx) { for (int i = start_idx; i < stop_idx; i++) result[i] = row; }, stream); ASSERT_TRUE(devArrMatch<float>(verify, result, 10, Compare<float>())); cudaStreamDestroy(stream); CUDA_CHECK(cudaFree(ex_scan)); CUDA_CHECK(cudaFree(verify)); CUDA_CHECK(cudaFree(result)); } typedef CSRTest<float> AdjGraphTest; TEST_P(AdjGraphTest, Result) { cudaStream_t stream; cudaStreamCreate(&stream); int *row_ind, *result, *verify; bool *adj; int row_ind_h[3] = {0, 3, 6}; bool adj_h[18] = {1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0}; int verify_h[9] = {0, 1, 2, 0, 1, 2, 0, 1, 2}; allocate(row_ind, 3); allocate(adj, 18); allocate(result, 9, true); allocate(verify, 9); updateDevice(row_ind, *&row_ind_h, 3, stream); updateDevice(adj, *&adj_h, 18, stream); updateDevice(verify, *&verify_h, 9, stream); csr_adj_graph_batched<int, 32>(row_ind, 6, 9, 3, adj, result, stream); ASSERT_TRUE(devArrMatch<int>(verify, result, 9, Compare<int>())); cudaStreamDestroy(stream); CUDA_CHECK(cudaFree(row_ind)); CUDA_CHECK(cudaFree(adj)); CUDA_CHECK(cudaFree(verify)); CUDA_CHECK(cudaFree(result)); } typedef CSRTest<float> WeakCCTest; TEST_P(WeakCCTest, Result) { cudaStream_t stream; cudaStreamCreate(&stream); std::shared_ptr<deviceAllocator> alloc(new defaultDeviceAllocator); int *row_ind, *row_ind_ptr, *result, *verify; int row_ind_h1[3] = {0, 3, 6}; int row_ind_ptr_h1[9] = {0, 1, 2, 0, 1, 2, 0, 1, 2}; int verify_h1[6] = {1, 1, 1, 2147483647, 2147483647, 2147483647}; int row_ind_h2[3] = {0, 2, 4}; int row_ind_ptr_h2[5] = {3, 4, 3, 4, 5}; int verify_h2[6] = {1, 1, 1, 5, 5, 5}; allocate(row_ind, 3); allocate(row_ind_ptr, 9); allocate(result, 9, true); allocate(verify, 9); device_buffer<bool> xa(alloc, stream, 6); device_buffer<bool> fa(alloc, stream, 6); device_buffer<bool> m(alloc, stream, 1); WeakCCState state(xa.data(), fa.data(), m.data()); /** * Run batch #1 */ updateDevice(row_ind, *&row_ind_h1, 3, stream); updateDevice(row_ind_ptr, *&row_ind_ptr_h1, 9, stream); updateDevice(verify, *&verify_h1, 6, stream); weak_cc_batched<int, 32>(result, row_ind, row_ind_ptr, 9, 6, 0, 3, &state, stream); cudaStreamSynchronize(stream); ASSERT_TRUE(devArrMatch<int>(verify, result, 6, Compare<int>())); /** * Run batch #2 */ updateDevice(row_ind, *&row_ind_h2, 3, stream); updateDevice(row_ind_ptr, *&row_ind_ptr_h2, 5, stream); updateDevice(verify, *&verify_h2, 6, stream); weak_cc_batched<int, 32>(result, row_ind, row_ind_ptr, 5, 6, 4, 3, &state, stream); ASSERT_TRUE(devArrMatch<int>(verify, result, 6, Compare<int>())); cudaStreamSynchronize(stream); cudaStreamDestroy(stream); CUDA_CHECK(cudaFree(row_ind)); CUDA_CHECK(cudaFree(row_ind_ptr)); CUDA_CHECK(cudaFree(verify)); CUDA_CHECK(cudaFree(result)); } INSTANTIATE_TEST_CASE_P(CSRTests, WeakCCTest, ::testing::ValuesIn(inputsf)); INSTANTIATE_TEST_CASE_P(CSRTests, AdjGraphTest, ::testing::ValuesIn(inputsf)); INSTANTIATE_TEST_CASE_P(CSRTests, CSRRowOpTest, ::testing::ValuesIn(inputsf)); INSTANTIATE_TEST_CASE_P(CSRTests, CSRToCOO, ::testing::ValuesIn(inputsf)); INSTANTIATE_TEST_CASE_P(CSRTests, CSRRowNormalizeMax, ::testing::ValuesIn(inputsf)); INSTANTIATE_TEST_CASE_P(CSRTests, CSRRowNormalizeL1, ::testing::ValuesIn(inputsf)); INSTANTIATE_TEST_CASE_P(CSRTests, CSRSum, ::testing::ValuesIn(inputsf)); } // namespace Sparse } // namespace MLCommon
06278c8c85e5d4072cf37f571b0145b53acaefae.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "assemble_boundary_potential_on_device.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *d_potential_dot_dot_acoustic = NULL; hipMalloc(&d_potential_dot_dot_acoustic, XSIZE*YSIZE); const float *d_send_potential_dot_dot_buffer = NULL; hipMalloc(&d_send_potential_dot_dot_buffer, XSIZE*YSIZE); const int num_interfaces = 1; const int max_nibool_interfaces = 1; const int *d_nibool_interfaces = NULL; hipMalloc(&d_nibool_interfaces, XSIZE*YSIZE); const int *d_ibool_interfaces = NULL; hipMalloc(&d_ibool_interfaces, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( assemble_boundary_potential_on_device), dim3(gridBlock),dim3(threadBlock), 0, 0, d_potential_dot_dot_acoustic,d_send_potential_dot_dot_buffer,num_interfaces,max_nibool_interfaces,d_nibool_interfaces,d_ibool_interfaces); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( assemble_boundary_potential_on_device), dim3(gridBlock),dim3(threadBlock), 0, 0, d_potential_dot_dot_acoustic,d_send_potential_dot_dot_buffer,num_interfaces,max_nibool_interfaces,d_nibool_interfaces,d_ibool_interfaces); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( assemble_boundary_potential_on_device), dim3(gridBlock),dim3(threadBlock), 0, 0, d_potential_dot_dot_acoustic,d_send_potential_dot_dot_buffer,num_interfaces,max_nibool_interfaces,d_nibool_interfaces,d_ibool_interfaces); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
06278c8c85e5d4072cf37f571b0145b53acaefae.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "assemble_boundary_potential_on_device.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *d_potential_dot_dot_acoustic = NULL; cudaMalloc(&d_potential_dot_dot_acoustic, XSIZE*YSIZE); const float *d_send_potential_dot_dot_buffer = NULL; cudaMalloc(&d_send_potential_dot_dot_buffer, XSIZE*YSIZE); const int num_interfaces = 1; const int max_nibool_interfaces = 1; const int *d_nibool_interfaces = NULL; cudaMalloc(&d_nibool_interfaces, XSIZE*YSIZE); const int *d_ibool_interfaces = NULL; cudaMalloc(&d_ibool_interfaces, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); assemble_boundary_potential_on_device<<<gridBlock,threadBlock>>>(d_potential_dot_dot_acoustic,d_send_potential_dot_dot_buffer,num_interfaces,max_nibool_interfaces,d_nibool_interfaces,d_ibool_interfaces); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { assemble_boundary_potential_on_device<<<gridBlock,threadBlock>>>(d_potential_dot_dot_acoustic,d_send_potential_dot_dot_buffer,num_interfaces,max_nibool_interfaces,d_nibool_interfaces,d_ibool_interfaces); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { assemble_boundary_potential_on_device<<<gridBlock,threadBlock>>>(d_potential_dot_dot_acoustic,d_send_potential_dot_dot_buffer,num_interfaces,max_nibool_interfaces,d_nibool_interfaces,d_ibool_interfaces); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
e648352ae9772e1b42a710c93d6ee2f890594ed4.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" __device__ float relu (float x) { return fmaxf(x, 0.0); } extern "C" __global__ void reluKernel (int length, float *source, float *destination) { int index = blockDim.x * blockIdx.x + threadIdx.x; if(index < length) { destination[index] = relu(source[index]); } }
e648352ae9772e1b42a710c93d6ee2f890594ed4.cu
__device__ float relu (float x) { return fmaxf(x, 0.0); } extern "C" __global__ void reluKernel (int length, float *source, float *destination) { int index = blockDim.x * blockIdx.x + threadIdx.x; if(index < length) { destination[index] = relu(source[index]); } }
cb1d9e89baa94204ae6a8cf6acd1a5d7186bd015.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include"global.h" #include"header.h" using namespace std; # just a new comment int main() { pad=60; O=10; //Define Dimensions of model dimMod[0]=122;dimMod[1]=384; NX=dimMod[1]+2*pad; NZ=dimMod[0]+2*pad; NZ1=dimMod[0]+2*pad; //Parameters for the modelling float t_n=3; float f=15; dt=0.001; dx=10; dz=dx; int nt=(int)t_n/dt; //Define File Names // std::string veloFn="marmousi.dat"; // std::string veloFn="marmsmooth.dat"; // std::string veloFn="marmhard.dat"; std::string veloFn="homogenous.velo"; // std::string sourceFn="wavelet2.dat"; float velo1[dimMod[0]*dimMod[1]], field1[NX*NZ],//[dimMod[0]*dimMod[1]], field2[NX*NZ],//[dimMod[0]*dimMod[1]], velocity[NZ*NX]; consVec(field1, NX*NZ,/*dimMod[0]*dimMod[1]*/ 0); consVec(field2, NX*NZ,/*dimMod[0]*dimMod[1]*/ 0); ofstream myFile ("wavefield1.bin", ios::out | ios::binary); dimW[0]=nt;dimW[1]=1; float wavelet[dimW[0]*dimW[1]]; //Read files for velocity model, wavelet etc. readASCIIFile((char*)veloFn.c_str(),dimMod, velo1); matrixPadding(velocity,velo1, dimMod[0], dimMod[1],pad); // VectoFileWrite(velocity, NX*NZ, "outVelocity.dat"); // exit(1); dx=max(velo1,dimMod)/f/6; //readASCIIFile((char*)sourceFn.c_str(),dimW, wavelet); // source_x=(dimMod[1]/2)*dx;//dimMod[1]/2*dx; // source_z=(dimMod[0]/2)*dx;//(30)*dx, source_x=(dimMod[1]/2+pad)*dx; source_z=(10+pad)*dx, bc=1; // float C[11]={0.565794,-6.261905,31.544643,-95.523810,193.361111,-275.080000,281.291667,-207.650794,109.303571,-38.579365,7.029087}; //float C[11]={7.029087,-38.579365,109.303571,-207.650794,281.291667,-275.080000,193.361111,-95.523810,31.544643,-6.261905,0.565794}; float C[11]={0.000317,-0.004960,0.039683,-0.238095,1.666667,-2.927222,1.666667,-0.238095,0.039683,-0.004960,0.000317}; // float CC[11]; clock_t t1,t2,t3,t4; printf("Position of Source:\nX:%f\nZ:%f\n",source_x,source_z); double stability=(double)(dt/dx)*max(velo1,dimMod); printf("Stability=%f\n",(float)stability); if(stability>1/sqrt(2)) { printf("Grid not stable! Check the parameters and run again!\n"); exit(1); } //VectoFileWrite(field2, dimMod[0]*dimMod[1], "field2.dat"); t1=clock(); //Initializing Cuda Device int numDevs= 0; hipGetDeviceCount(&numDevs); //For now using only one device printf("Number of Device:%d\n",numDevs); int deviceID=0; hipSetDevice(deviceID ); int cDeviceID=0; hipGetDevice(&cDeviceID ); printf("Current Active Device ID:%d\n",cDeviceID); dim3 blockDim(32,32); //dim3 gridDim((int)ceil(dimMod[1]/32)+1,(int)ceil(dimMod[0]/32)+1); dim3 gridDim(20,20); printf("GridDim:(%d,%d)\nBlockDim(%d,%d)\n",gridDim.x,gridDim.y,blockDim.x,blockDim.y); t2=clock(); //Creating Device variables in cuda devices float *d_field1, *d_field2, *d_wavelet, *source_grid, *d_velocity, *d_wave_propagate_t, *d_laplace_temp, *d_laplace, *d_C, *d_correctField, *d_temp1, *d_G, *d_cerjanMatrix; hipError_t cudaStatus; cudaStatus=hipMalloc((void**)&d_field1,NX*NZ/*dimMod[0]*dimMod[1]*/*sizeof(float)); cudaCheck(cudaStatus); cudaStatus=hipMalloc((void**)&d_field2,NX*NZ/*dimMod[0]*dimMod[1]*/*sizeof(float)); cudaCheck(cudaStatus); cudaStatus=hipMalloc((void**)&d_temp1,NX*NZ/*dimMod[0]*dimMod[1]*/*sizeof(float)); cudaCheck(cudaStatus); cudaStatus=hipMalloc((void**)&d_wavelet,dimW[0]*dimW[1]*sizeof(float)); cudaCheck(cudaStatus); cudaStatus=hipMalloc((void**)&source_grid,NX*NZ/*dimMod[0]*dimMod[1]*/*sizeof(float)); cudaCheck(cudaStatus); cudaStatus=hipMalloc((void**)&d_correctField,dimMod[0]*dimMod[1]*sizeof(float)); cudaCheck(cudaStatus); // cudaStatus=hipMalloc((void**)&d_velocity,dimMod[0]*dimMod[1]*sizeof(float)); // cudaCheck(cudaStatus); cudaStatus=hipMalloc((void**)&d_velocity,NX*NZ*sizeof(float)); cudaCheck(cudaStatus); cudaStatus=hipMalloc((void**)&d_wave_propagate_t,NX*NZ/*dimMod[0]*dimMod[1]*/*sizeof(float)); cudaCheck(cudaStatus); cudaStatus=hipMalloc((void**)&d_laplace_temp,(NZ/*dimMod[0]*/+2*O)*(NX/*dimMod[1]*/+2*O)*sizeof(float)); cudaCheck(cudaStatus); cudaStatus=hipMalloc((void**)&d_laplace,NX*NZ/*dimMod[0]*dimMod[1]*/*sizeof(float)); cudaCheck(cudaStatus); cudaStatus=hipMalloc((void**)&d_cerjanMatrix,NX*NZ/*dimMod[0]*dimMod[1]*/*sizeof(float)); cudaCheck(cudaStatus); cudaStatus=hipMalloc((void**)&d_C,(O+1)*sizeof(float)); cudaCheck(cudaStatus); cudaStatus=hipMalloc((void**)&d_G,(O+pad)*sizeof(float)); cudaCheck(cudaStatus); //Generating Wavelet rickerWavelet<<<gridDim,blockDim>>>( d_wavelet, f, dimW[0]*dimW[1], dt); cudaStatus = hipMemcpy(wavelet, d_wavelet , dimW[0]*dimW[1]*sizeof(float), hipMemcpyDeviceToHost); cudaCheck(cudaStatus); VectoFileWrite(wavelet, dimW[0]*dimW[1], "outWavelet.dat"); //Constructing the initial fields by placing the source in correct position construct_source(field1,field2,wavelet[0]); //Copying Data to Device cudaStatus = hipMemcpy(d_field1, field1 , NX*NZ/*dimMod[0]*dimMod[1]*/*sizeof(float), hipMemcpyHostToDevice); cudaCheck(cudaStatus); cudaStatus = hipMemcpy(d_field2, field2 , NX*NZ/*dimMod[0]*dimMod[1]*/*sizeof(float), hipMemcpyHostToDevice); cudaCheck(cudaStatus); cudaStatus = hipMemcpy(d_wavelet, wavelet , dimW[0]*dimW[1]*sizeof(float), hipMemcpyHostToDevice); cudaCheck(cudaStatus); cudaStatus = hipMemcpy(source_grid, field2 , NX*NZ/*dimMod[0]*dimMod[1]*/*sizeof(float), hipMemcpyHostToDevice); cudaCheck(cudaStatus); cudaStatus = hipMemcpy(d_velocity, velocity , NX*NZ/*dimMod[0]*dimMod[1]*/*sizeof(float), hipMemcpyHostToDevice); cudaCheck(cudaStatus); cudaStatus = hipMemcpy(d_C, C , (O+1)*sizeof(float), hipMemcpyHostToDevice); cudaCheck(cudaStatus); /*cudaStatus = hipMemcpy(CC, d_C , O*sizeof(float), hipMemcpyDeviceToHost); cudaCheck(cudaStatus); VectoFileWrite(CC, O, "CC.dat"); */ // Calculating Cerjan Boundary Condition Coefficients hipLaunchKernelGGL(( calculateCerjanCoeff), dim3(gridDim),dim3(blockDim), 0, 0, pad, d_G, 60); hipLaunchKernelGGL(( cerjanMatrix), dim3(gridDim),dim3(blockDim), 0, 0, d_cerjanMatrix, d_G, NZ,NX, pad); float CM[NX*NZ]; cudaStatus = hipMemcpy(CM, d_cerjanMatrix , (NZ*NX)*sizeof(float), hipMemcpyDeviceToHost); cudaCheck(cudaStatus); VectoFileWrite(CM, NX*NZ, "outCerjanMatrix.dat"); float G[pad+O]; cudaStatus = hipMemcpy(G, d_G , (pad)*sizeof(float), hipMemcpyDeviceToHost); cudaCheck(cudaStatus); VectoFileWrite(G, pad, "outCerjan.dat"); // exit(1); int size=dimMod[0]*dimMod[1]; //(dimMod[0])*(dimMod[1]); float check[size]; // int size1=(dimMod[0]+2*O)*(dimMod[1]+2*O); // float check1[size1]; // cudaStatus = hipMemcpy(check, d_velocity , size*sizeof(float), hipMemcpyDeviceToHost); // if(cudaStatus!=hipSuccess) // { // printf("Cuda couldn't allocated! Error no.:%d\n",(int)cudaStatus); // } // VectoFileWrite(check, size, "check.dat"); t3=clock(); printf("Running Loop!\n"); //char buffer[32]; for(int step=1; step<=nt; step++) { //Wave Propagation // fdm_acoustic(d_velocity,d_field1,d_field2,bc,gridDim,blockDim); //Calculate Laplacian // myCudaMemset ( d_laplace_temp,0.0, (dimMod[0]+2*O),(dimMod[1]+2*O) ); hipLaunchKernelGGL(( myCudaMemset), dim3(gridDim),dim3(blockDim), 0, 0, d_laplace_temp, 0.0, (NZ/*dimMod[0]*/+2*O), (NX/*dimMod[1]*/+2*O)); //myCudaMemset<<<gridDim,blockDim>>>( d_field2,1.0, (dimMod[0]),(dimMod[1])); // cudaStatus = hipMemcpy(check, d_field2 , size*sizeof(float), hipMemcpyDeviceToHost); // cudaCheck(cudaStatus); // VectoFileWrite(check, size, "field2.dat"); //wavefield Transfer hipLaunchKernelGGL(( wavefieldTransfer), dim3(gridDim),dim3(blockDim), 0, 0, d_laplace_temp,d_field2,NZ/*dimMod[0]*/,NX/*dimMod[1]*/,O); // cudaStatus = hipMemcpy(check1, d_laplace_temp , size1*sizeof(float), hipMemcpyDeviceToHost); // cudaCheck(cudaStatus); // VectoFileWrite(check1, size1, "check1.dat"); // exit(1); //Propagating waves hipLaunchKernelGGL(( calculateLaplace), dim3(gridDim),dim3(blockDim), 0, 0, d_laplace, d_wave_propagate_t, d_laplace_temp, d_velocity, d_field1,d_field2,dt,dx, NZ/*dimMod[0]*/,NX/*dimMod[1]*/, bc, O, d_C); // cudaStatus = hipMemcpy(check, d_laplace , size*sizeof(float), hipMemcpyDeviceToHost); // cudaCheck(cudaStatus); // VectoFileWrite(check, size, "check.dat"); //exit(1); //ABC hipLaunchKernelGGL(( ABC_inner), dim3(gridDim),dim3(blockDim), 0, 0, d_velocity,d_field1,d_field2,d_wave_propagate_t,dx,dt,bc,NZ/*dimMod[0]*/,NX/*dimMod[1]*/); //ABC_outer<<<gridDim,blockDim>>>(d_velocity,d_field1,d_field2,d_wave_propagate_t,dx,dt,bc,dimMod[0],dimMod[1]); // BadBoundaryCondition makes everything in the extra region zero // myCudaMemset<<<gridDim,blockDim>>>(d_temp1, 0.0, NZ/*dimMod[0]*/, NX); // badBoundaryCondition<<<gridDim,blockDim>>>(d_wave_propagate_t,d_temp1,dimMod[0],dimMod[1],pad); // cudaStatus=hipMemcpy(d_wave_propagate_t,d_temp1,NX*NZ/*dimMod[0]*dimMod[1]*/*sizeof(float),hipMemcpyDeviceToDevice); // cudaCheck(cudaStatus); // myCudaMemset<<<gridDim,blockDim>>>(d_temp1, 0.0, NZ/*dimMod[0]*/, NX); // badBoundaryCondition<<<gridDim,blockDim>>>(d_field1,d_temp1,dimMod[0],dimMod[1],pad); // cudaStatus=hipMemcpy(d_field1,d_temp1,NX*NZ/*dimMod[0]*dimMod[1]*/*sizeof(float),hipMemcpyDeviceToDevice); // cudaCheck(cudaStatus); // myCudaMemset<<<gridDim,blockDim>>>(d_temp1, 0.0, NZ/*dimMod[0]*/, NX); // badBoundaryCondition<<<gridDim,blockDim>>>(d_field2,d_temp1,dimMod[0],dimMod[1],pad); // cudaStatus=hipMemcpy(d_field2,d_temp1,NX*NZ/*dimMod[0]*dimMod[1]*/*sizeof(float),hipMemcpyDeviceToDevice); // cudaCheck(cudaStatus); // cerjanBoundaryCondition<<<gridDim,blockDim>>>( d_wave_propagate_t, d_field1, d_field2, d_cerjanMatrix, NZ, NX); //field1=field2; cudaStatus=hipMemcpy(d_field1,d_field2,NX*NZ/*dimMod[0]*dimMod[1]*/*sizeof(float),hipMemcpyDeviceToDevice); cudaCheck(cudaStatus); if(step+1<dimW[0]) { add_source<<<gridDim,blockDim>>>(d_wave_propagate_t,d_field2,source_grid,wavelet[step+1],NZ/*dimMod[0]*/,NX/*dimMod[1]*/); } else { cudaStatus=hipMemcpy(d_field2,d_wave_propagate_t,NX*NZ/*dimMod[0]*dimMod[1]*/*sizeof(float),hipMemcpyDeviceToDevice); cudaCheck(cudaStatus); } if(remainderf((float)step,1)==0) { extractCorrectRegion<<<gridDim,blockDim>>>(d_correctField,d_field2,dimMod[0],dimMod[1],pad); cudaStatus=hipMemcpy(check,d_correctField,dimMod[0]*dimMod[1]*sizeof(float),hipMemcpyDeviceToHost); cudaCheck(cudaStatus); // snprintf(buffer, sizeof(char) * 32, "file%d.txt", step); // VectoFileWrite(check, size, buffer); myFile.write ((char*)check, size*sizeof(float)); } } myFile.close(); t4=clock(); printf("Total Execution Time:%f\n",((float)t4-(float)t1)/CLOCKS_PER_SEC); printf("Cuda Device Query Execution Time:%f\n",((float)t2-(float)t1)/CLOCKS_PER_SEC); printf("Cuda Malloc Execution Time:%f\n",((float)t3-(float)t2)/CLOCKS_PER_SEC); printf("Cuda Loop Execution Time:%f\n",((float)t4-(float)t3)/CLOCKS_PER_SEC); hipFree(d_field1); hipFree(d_field2); hipFree(d_wavelet); hipFree(source_grid); hipFree(d_velocity); hipFree(d_wave_propagate_t); hipFree(d_laplace_temp); hipFree(d_laplace); return 0; }
cb1d9e89baa94204ae6a8cf6acd1a5d7186bd015.cu
#include"global.h" #include"header.h" using namespace std; # just a new comment int main() { pad=60; O=10; //Define Dimensions of model dimMod[0]=122;dimMod[1]=384; NX=dimMod[1]+2*pad; NZ=dimMod[0]+2*pad; NZ1=dimMod[0]+2*pad; //Parameters for the modelling float t_n=3; float f=15; dt=0.001; dx=10; dz=dx; int nt=(int)t_n/dt; //Define File Names // std::string veloFn="marmousi.dat"; // std::string veloFn="marmsmooth.dat"; // std::string veloFn="marmhard.dat"; std::string veloFn="homogenous.velo"; // std::string sourceFn="wavelet2.dat"; float velo1[dimMod[0]*dimMod[1]], field1[NX*NZ],//[dimMod[0]*dimMod[1]], field2[NX*NZ],//[dimMod[0]*dimMod[1]], velocity[NZ*NX]; consVec(field1, NX*NZ,/*dimMod[0]*dimMod[1]*/ 0); consVec(field2, NX*NZ,/*dimMod[0]*dimMod[1]*/ 0); ofstream myFile ("wavefield1.bin", ios::out | ios::binary); dimW[0]=nt;dimW[1]=1; float wavelet[dimW[0]*dimW[1]]; //Read files for velocity model, wavelet etc. readASCIIFile((char*)veloFn.c_str(),dimMod, velo1); matrixPadding(velocity,velo1, dimMod[0], dimMod[1],pad); // VectoFileWrite(velocity, NX*NZ, "outVelocity.dat"); // exit(1); dx=max(velo1,dimMod)/f/6; //readASCIIFile((char*)sourceFn.c_str(),dimW, wavelet); // source_x=(dimMod[1]/2)*dx;//dimMod[1]/2*dx; // source_z=(dimMod[0]/2)*dx;//(30)*dx, source_x=(dimMod[1]/2+pad)*dx; source_z=(10+pad)*dx, bc=1; // float C[11]={0.565794,-6.261905,31.544643,-95.523810,193.361111,-275.080000,281.291667,-207.650794,109.303571,-38.579365,7.029087}; //float C[11]={7.029087,-38.579365,109.303571,-207.650794,281.291667,-275.080000,193.361111,-95.523810,31.544643,-6.261905,0.565794}; float C[11]={0.000317,-0.004960,0.039683,-0.238095,1.666667,-2.927222,1.666667,-0.238095,0.039683,-0.004960,0.000317}; // float CC[11]; clock_t t1,t2,t3,t4; printf("Position of Source:\nX:%f\nZ:%f\n",source_x,source_z); double stability=(double)(dt/dx)*max(velo1,dimMod); printf("Stability=%f\n",(float)stability); if(stability>1/sqrt(2)) { printf("Grid not stable! Check the parameters and run again!\n"); exit(1); } //VectoFileWrite(field2, dimMod[0]*dimMod[1], "field2.dat"); t1=clock(); //Initializing Cuda Device int numDevs= 0; cudaGetDeviceCount(&numDevs); //For now using only one device printf("Number of Device:%d\n",numDevs); int deviceID=0; cudaSetDevice(deviceID ); int cDeviceID=0; cudaGetDevice(&cDeviceID ); printf("Current Active Device ID:%d\n",cDeviceID); dim3 blockDim(32,32); //dim3 gridDim((int)ceil(dimMod[1]/32)+1,(int)ceil(dimMod[0]/32)+1); dim3 gridDim(20,20); printf("GridDim:(%d,%d)\nBlockDim(%d,%d)\n",gridDim.x,gridDim.y,blockDim.x,blockDim.y); t2=clock(); //Creating Device variables in cuda devices float *d_field1, *d_field2, *d_wavelet, *source_grid, *d_velocity, *d_wave_propagate_t, *d_laplace_temp, *d_laplace, *d_C, *d_correctField, *d_temp1, *d_G, *d_cerjanMatrix; cudaError_t cudaStatus; cudaStatus=cudaMalloc((void**)&d_field1,NX*NZ/*dimMod[0]*dimMod[1]*/*sizeof(float)); cudaCheck(cudaStatus); cudaStatus=cudaMalloc((void**)&d_field2,NX*NZ/*dimMod[0]*dimMod[1]*/*sizeof(float)); cudaCheck(cudaStatus); cudaStatus=cudaMalloc((void**)&d_temp1,NX*NZ/*dimMod[0]*dimMod[1]*/*sizeof(float)); cudaCheck(cudaStatus); cudaStatus=cudaMalloc((void**)&d_wavelet,dimW[0]*dimW[1]*sizeof(float)); cudaCheck(cudaStatus); cudaStatus=cudaMalloc((void**)&source_grid,NX*NZ/*dimMod[0]*dimMod[1]*/*sizeof(float)); cudaCheck(cudaStatus); cudaStatus=cudaMalloc((void**)&d_correctField,dimMod[0]*dimMod[1]*sizeof(float)); cudaCheck(cudaStatus); // cudaStatus=cudaMalloc((void**)&d_velocity,dimMod[0]*dimMod[1]*sizeof(float)); // cudaCheck(cudaStatus); cudaStatus=cudaMalloc((void**)&d_velocity,NX*NZ*sizeof(float)); cudaCheck(cudaStatus); cudaStatus=cudaMalloc((void**)&d_wave_propagate_t,NX*NZ/*dimMod[0]*dimMod[1]*/*sizeof(float)); cudaCheck(cudaStatus); cudaStatus=cudaMalloc((void**)&d_laplace_temp,(NZ/*dimMod[0]*/+2*O)*(NX/*dimMod[1]*/+2*O)*sizeof(float)); cudaCheck(cudaStatus); cudaStatus=cudaMalloc((void**)&d_laplace,NX*NZ/*dimMod[0]*dimMod[1]*/*sizeof(float)); cudaCheck(cudaStatus); cudaStatus=cudaMalloc((void**)&d_cerjanMatrix,NX*NZ/*dimMod[0]*dimMod[1]*/*sizeof(float)); cudaCheck(cudaStatus); cudaStatus=cudaMalloc((void**)&d_C,(O+1)*sizeof(float)); cudaCheck(cudaStatus); cudaStatus=cudaMalloc((void**)&d_G,(O+pad)*sizeof(float)); cudaCheck(cudaStatus); //Generating Wavelet rickerWavelet<<<gridDim,blockDim>>>( d_wavelet, f, dimW[0]*dimW[1], dt); cudaStatus = cudaMemcpy(wavelet, d_wavelet , dimW[0]*dimW[1]*sizeof(float), cudaMemcpyDeviceToHost); cudaCheck(cudaStatus); VectoFileWrite(wavelet, dimW[0]*dimW[1], "outWavelet.dat"); //Constructing the initial fields by placing the source in correct position construct_source(field1,field2,wavelet[0]); //Copying Data to Device cudaStatus = cudaMemcpy(d_field1, field1 , NX*NZ/*dimMod[0]*dimMod[1]*/*sizeof(float), cudaMemcpyHostToDevice); cudaCheck(cudaStatus); cudaStatus = cudaMemcpy(d_field2, field2 , NX*NZ/*dimMod[0]*dimMod[1]*/*sizeof(float), cudaMemcpyHostToDevice); cudaCheck(cudaStatus); cudaStatus = cudaMemcpy(d_wavelet, wavelet , dimW[0]*dimW[1]*sizeof(float), cudaMemcpyHostToDevice); cudaCheck(cudaStatus); cudaStatus = cudaMemcpy(source_grid, field2 , NX*NZ/*dimMod[0]*dimMod[1]*/*sizeof(float), cudaMemcpyHostToDevice); cudaCheck(cudaStatus); cudaStatus = cudaMemcpy(d_velocity, velocity , NX*NZ/*dimMod[0]*dimMod[1]*/*sizeof(float), cudaMemcpyHostToDevice); cudaCheck(cudaStatus); cudaStatus = cudaMemcpy(d_C, C , (O+1)*sizeof(float), cudaMemcpyHostToDevice); cudaCheck(cudaStatus); /*cudaStatus = cudaMemcpy(CC, d_C , O*sizeof(float), cudaMemcpyDeviceToHost); cudaCheck(cudaStatus); VectoFileWrite(CC, O, "CC.dat"); */ // Calculating Cerjan Boundary Condition Coefficients calculateCerjanCoeff<<<gridDim,blockDim>>>(pad, d_G, 60); cerjanMatrix<<<gridDim,blockDim>>>(d_cerjanMatrix, d_G, NZ,NX, pad); float CM[NX*NZ]; cudaStatus = cudaMemcpy(CM, d_cerjanMatrix , (NZ*NX)*sizeof(float), cudaMemcpyDeviceToHost); cudaCheck(cudaStatus); VectoFileWrite(CM, NX*NZ, "outCerjanMatrix.dat"); float G[pad+O]; cudaStatus = cudaMemcpy(G, d_G , (pad)*sizeof(float), cudaMemcpyDeviceToHost); cudaCheck(cudaStatus); VectoFileWrite(G, pad, "outCerjan.dat"); // exit(1); int size=dimMod[0]*dimMod[1]; //(dimMod[0])*(dimMod[1]); float check[size]; // int size1=(dimMod[0]+2*O)*(dimMod[1]+2*O); // float check1[size1]; // cudaStatus = cudaMemcpy(check, d_velocity , size*sizeof(float), cudaMemcpyDeviceToHost); // if(cudaStatus!=cudaSuccess) // { // printf("Cuda couldn't allocated! Error no.:%d\n",(int)cudaStatus); // } // VectoFileWrite(check, size, "check.dat"); t3=clock(); printf("Running Loop!\n"); //char buffer[32]; for(int step=1; step<=nt; step++) { //Wave Propagation // fdm_acoustic(d_velocity,d_field1,d_field2,bc,gridDim,blockDim); //Calculate Laplacian // myCudaMemset ( d_laplace_temp,0.0, (dimMod[0]+2*O),(dimMod[1]+2*O) ); myCudaMemset<<<gridDim,blockDim>>>(d_laplace_temp, 0.0, (NZ/*dimMod[0]*/+2*O), (NX/*dimMod[1]*/+2*O)); //myCudaMemset<<<gridDim,blockDim>>>( d_field2,1.0, (dimMod[0]),(dimMod[1])); // cudaStatus = cudaMemcpy(check, d_field2 , size*sizeof(float), cudaMemcpyDeviceToHost); // cudaCheck(cudaStatus); // VectoFileWrite(check, size, "field2.dat"); //wavefield Transfer wavefieldTransfer<<<gridDim,blockDim>>>(d_laplace_temp,d_field2,NZ/*dimMod[0]*/,NX/*dimMod[1]*/,O); // cudaStatus = cudaMemcpy(check1, d_laplace_temp , size1*sizeof(float), cudaMemcpyDeviceToHost); // cudaCheck(cudaStatus); // VectoFileWrite(check1, size1, "check1.dat"); // exit(1); //Propagating waves calculateLaplace<<<gridDim,blockDim>>>(d_laplace, d_wave_propagate_t, d_laplace_temp, d_velocity, d_field1,d_field2,dt,dx, NZ/*dimMod[0]*/,NX/*dimMod[1]*/, bc, O, d_C); // cudaStatus = cudaMemcpy(check, d_laplace , size*sizeof(float), cudaMemcpyDeviceToHost); // cudaCheck(cudaStatus); // VectoFileWrite(check, size, "check.dat"); //exit(1); //ABC ABC_inner<<<gridDim,blockDim>>>(d_velocity,d_field1,d_field2,d_wave_propagate_t,dx,dt,bc,NZ/*dimMod[0]*/,NX/*dimMod[1]*/); //ABC_outer<<<gridDim,blockDim>>>(d_velocity,d_field1,d_field2,d_wave_propagate_t,dx,dt,bc,dimMod[0],dimMod[1]); // BadBoundaryCondition makes everything in the extra region zero // myCudaMemset<<<gridDim,blockDim>>>(d_temp1, 0.0, NZ/*dimMod[0]*/, NX); // badBoundaryCondition<<<gridDim,blockDim>>>(d_wave_propagate_t,d_temp1,dimMod[0],dimMod[1],pad); // cudaStatus=cudaMemcpy(d_wave_propagate_t,d_temp1,NX*NZ/*dimMod[0]*dimMod[1]*/*sizeof(float),cudaMemcpyDeviceToDevice); // cudaCheck(cudaStatus); // myCudaMemset<<<gridDim,blockDim>>>(d_temp1, 0.0, NZ/*dimMod[0]*/, NX); // badBoundaryCondition<<<gridDim,blockDim>>>(d_field1,d_temp1,dimMod[0],dimMod[1],pad); // cudaStatus=cudaMemcpy(d_field1,d_temp1,NX*NZ/*dimMod[0]*dimMod[1]*/*sizeof(float),cudaMemcpyDeviceToDevice); // cudaCheck(cudaStatus); // myCudaMemset<<<gridDim,blockDim>>>(d_temp1, 0.0, NZ/*dimMod[0]*/, NX); // badBoundaryCondition<<<gridDim,blockDim>>>(d_field2,d_temp1,dimMod[0],dimMod[1],pad); // cudaStatus=cudaMemcpy(d_field2,d_temp1,NX*NZ/*dimMod[0]*dimMod[1]*/*sizeof(float),cudaMemcpyDeviceToDevice); // cudaCheck(cudaStatus); // cerjanBoundaryCondition<<<gridDim,blockDim>>>( d_wave_propagate_t, d_field1, d_field2, d_cerjanMatrix, NZ, NX); //field1=field2; cudaStatus=cudaMemcpy(d_field1,d_field2,NX*NZ/*dimMod[0]*dimMod[1]*/*sizeof(float),cudaMemcpyDeviceToDevice); cudaCheck(cudaStatus); if(step+1<dimW[0]) { add_source<<<gridDim,blockDim>>>(d_wave_propagate_t,d_field2,source_grid,wavelet[step+1],NZ/*dimMod[0]*/,NX/*dimMod[1]*/); } else { cudaStatus=cudaMemcpy(d_field2,d_wave_propagate_t,NX*NZ/*dimMod[0]*dimMod[1]*/*sizeof(float),cudaMemcpyDeviceToDevice); cudaCheck(cudaStatus); } if(remainderf((float)step,1)==0) { extractCorrectRegion<<<gridDim,blockDim>>>(d_correctField,d_field2,dimMod[0],dimMod[1],pad); cudaStatus=cudaMemcpy(check,d_correctField,dimMod[0]*dimMod[1]*sizeof(float),cudaMemcpyDeviceToHost); cudaCheck(cudaStatus); // snprintf(buffer, sizeof(char) * 32, "file%d.txt", step); // VectoFileWrite(check, size, buffer); myFile.write ((char*)check, size*sizeof(float)); } } myFile.close(); t4=clock(); printf("Total Execution Time:%f\n",((float)t4-(float)t1)/CLOCKS_PER_SEC); printf("Cuda Device Query Execution Time:%f\n",((float)t2-(float)t1)/CLOCKS_PER_SEC); printf("Cuda Malloc Execution Time:%f\n",((float)t3-(float)t2)/CLOCKS_PER_SEC); printf("Cuda Loop Execution Time:%f\n",((float)t4-(float)t3)/CLOCKS_PER_SEC); cudaFree(d_field1); cudaFree(d_field2); cudaFree(d_wavelet); cudaFree(source_grid); cudaFree(d_velocity); cudaFree(d_wave_propagate_t); cudaFree(d_laplace_temp); cudaFree(d_laplace); return 0; }
510e243854f7358c1c454e9fda4a3705587bbf1e.hip
// !!! This is a file automatically generated by hipify!!! #ifndef CPU_ONLY #include <algorithm> #include <vector> #include "caffe/common.hpp" #if defined(USE_OPENCL) && defined(USE_FFT) #include "caffe/filler.hpp" #include "caffe/layer.hpp" #include "caffe/util/fft.hpp" #include "caffe/layers/conv_fft_layer.hpp" #include "caffe/greentea/greentea.hpp" #include "caffe/greentea/greentea_im2col.hpp" #include "caffe/greentea/greentea_math_functions.hpp" // #define COMPLEX_MULT_CONJ_1D // #define COMPLEX_NULT_CONJ_RESHAPE #define COMPLEX_MULT_CONJ_2D // Best speed for CaffeNet conv1,2,3 // #define COMPLEX_MULT_CONJ_2D_SLM // #define COMPLEX_MULT_CONJ_3D // Accuracy issue // #define COMPLEX_MULT_CONJ_3D_SLM // Accuracy issue // #define FFT_BACKWARD #ifdef FFT_BACKWARD #define COMPLEX_MULT_1D // Fast for small size data of unit test // #define COMPLEX_MULT_2D_SLM // Segmentation fault on TestGradientGroup // #define COMPLEX_MULT_3D // Accuracy issue #endif namespace caffe { template<typename Dtype, typename MItype, typename MOtype> void ConvolutionLayerFFT<Dtype>::fft_gpu_setup() { if (fft_gpu_initialized_) { return; } viennacl::ocl::context &ctx = viennacl::ocl::current_context(); // Evaluate memory needed for buffers int num_weights = this->num_output_ * (this->channels_ / this->group_); int tmpMax = ::max(this->num_output_, this->channels_); size_t fft_gpu_map_in_real_bytes = fft_map_real_size_ * sizeof(Dtype); size_t fft_gpu_map_in_complex_bytes = fft_map_complex_size_ * sizeof(DtypeComplex<Dtype>); size_t fft_gpu_map_out_complex_bytes = tmpMax * fft_gpu_map_in_complex_bytes; size_t fft_gpu_map_out_real_bytes = tmpMax * fft_gpu_map_in_real_bytes; size_t fft_gpu_weights_complex_bytes = num_weights * fft_gpu_map_in_complex_bytes; int layerMemoryBytes = fft_gpu_weights_complex_bytes + fft_gpu_map_in_real_bytes * this->channels_ + fft_gpu_map_in_real_bytes * this->num_output_ + fft_gpu_map_in_complex_bytes * this->channels_ + fft_gpu_map_in_complex_bytes * this->num_output_ + fft_gpu_map_out_complex_bytes + fft_gpu_map_out_real_bytes; LOG(INFO) << "FFT buffers - memory needed = " << ((Dtype)layerMemoryBytes / (1024.f * 1024.f)) << " MB"; cl_int cl_err; fft_gpu_weights_complex_ = clCreateBuffer(ctx.handle().get(), CL_MEM_READ_WRITE, fft_gpu_weights_complex_bytes, NULL, &cl_err); #ifdef COMPLEX_NULT_CONJ_RESHAPE fft_gpu_weights_complex_reshape_ = clCreateBuffer(ctx.handle().get(), CL_MEM_READ_WRITE, fft_gpu_weights_complex_bytes, NULL, &cl_err); #endif fft_gpu_map_in_real_all_channels_ = clCreateBuffer(ctx.handle().get(), CL_MEM_READ_WRITE, fft_gpu_map_in_real_bytes * this->channels_, NULL, &cl_err); fft_gpu_map_in_complex_all_channels_ = clCreateBuffer(ctx.handle().get(), CL_MEM_READ_WRITE, fft_gpu_map_in_complex_bytes * this->channels_, NULL, &cl_err); fft_gpu_map_in_real_all_num_output_ = clCreateBuffer(ctx.handle().get(), CL_MEM_READ_WRITE, fft_gpu_map_in_real_bytes * this->num_output_, NULL, &cl_err); fft_gpu_map_in_complex_all_num_output_ = clCreateBuffer(ctx.handle().get(), CL_MEM_READ_WRITE, fft_gpu_map_in_complex_bytes * this->num_output_, NULL, &cl_err); fft_gpu_map_out_complex_ = clCreateBuffer(ctx.handle().get(), CL_MEM_READ_WRITE, fft_gpu_map_out_complex_bytes, NULL, &cl_err); fft_gpu_map_out_real_ = clCreateBuffer(ctx.handle().get(), CL_MEM_READ_WRITE, fft_gpu_map_out_real_bytes, NULL, &cl_err); ClFFTState& fft_state = Caffe::cl_fft_state(); // FFT plan for weights fft_gpu_many_weights_handle_ = fft_state.getForwardInPlaceFFTManyPlanHandle( fft_height_, fft_width_, num_weights); // FFT plan fft_gpu_forward_many_handle_ = fft_state.getForwardOutOfPlaceFFTManyPlanHandle(fft_height_, fft_width_, this->channels_); // Inverse FFT plan ifft_gpu_forward_many_handle_ = fft_state.getForwardOutOfPlaceIFFTManyPlanHandle(fft_height_, fft_width_, this->num_output_); #ifdef FFT_BACKWARD // FFT plan fft_gpu_backward_many_handle_ = fft_state.getBackwardOutOfPlaceFFTManyPlanHandle(fft_height_, fft_width_, this->num_output_); // Inverse FFT plan ifft_gpu_backward_many_handle_ = fft_state.getBackwardOutOfPlaceIFFTManyPlanHandle(fft_height_, fft_width_, this->channels_); #endif fft_gpu_initialized_ = true; } template<typename Dtype, typename MItype, typename MOtype> void ConvolutionLayerFFT<Dtype>::fft_gpu_clean() { if (fft_gpu_initialized_) { clReleaseMemObject((cl_mem)fft_gpu_weights_complex_); #ifdef COMPLEX_NULT_CONJ_RESHAPE clReleaseMemObject(fft_gpu_weights_complex_reshape_); #endif clReleaseMemObject((cl_mem)fft_gpu_map_in_real_all_channels_); clReleaseMemObject((cl_mem)fft_gpu_map_in_complex_all_channels_); clReleaseMemObject((cl_mem)fft_gpu_map_in_real_all_num_output_); clReleaseMemObject((cl_mem)fft_gpu_map_in_complex_all_num_output_); clReleaseMemObject((cl_mem)fft_gpu_map_out_complex_); clReleaseMemObject((cl_mem)fft_gpu_map_out_real_); } fft_gpu_initialized_ = false; } template<typename Dtype, typename MItype, typename MOtype> void ConvolutionLayerFFT<Dtype>::fft_gpu_compute_weights() { int num_weights = this->num_output_ * (this->channels_ / this->group_); int size = num_weights * fft_map_complex_size_ * sizeof(DtypeComplex<Dtype>); // Clear buffer clear_gpu_fft_buffer(fft_gpu_weights_complex_, size); // Cyclic-shift 0-padding of weights const Dtype* weight = this->blobs_[0]->gpu_data(); fft_gpu_copy2buffer(reinterpret_cast<Dtype*>(fft_gpu_weights_complex_), weight, this->num_output_, this->group_, this->channels_, this->kernel_h_, this->kernel_w_, kernel_center_h_, kernel_center_w_, fft_height_, fft_width_); // Batched in-place FFT of weights caffe_gpu_fft_execute_r2c_inplace(fft_gpu_many_weights_handle_, reinterpret_cast<Dtype*>(fft_gpu_weights_complex_)); // Reshape #ifdef COMPLEX_NULT_CONJ_RESHAPE reshape_weights(reinterpret_cast< DtypeComplex<Dtype>* >( fft_gpu_weights_complex_reshape_), reinterpret_cast< DtypeComplex<Dtype>* >(fft_gpu_weights_complex_), fft_map_complex_size_, this->num_output_, (this->channels_/this->group_)); #endif } template<typename Dtype, typename MItype, typename MOtype> void ConvolutionLayerFFT<Dtype>::Forward_gpu_fft_task(const Dtype* bottom_data, int bottom_data_offset, Dtype* top_data, int top_data_offset, int n, int ch_gr, int out_gr) { // Clear buffer clear_gpu_fft_buffer(fft_gpu_map_out_complex_, this->num_output_ * fft_map_complex_size_ * sizeof(DtypeComplex<Dtype>)); clear_gpu_fft_buffer(fft_gpu_map_in_real_all_channels_, this->channels_ * fft_map_real_size_ * sizeof(Dtype)); // Left-top 0-padding of bottom data fft_gpu_copy2buffer_in_2D( reinterpret_cast<Dtype*>(fft_gpu_map_in_real_all_channels_), bottom_data, bottom_data_offset, this->channels_, fft_height_, fft_width_, this->height_, this->width_, 1, 1, this->pad_h_, this->pad_w_); // Batched FFT for all channels of padded bottom data caffe_gpu_fft_execute_r2c(fft_gpu_forward_many_handle_, reinterpret_cast<Dtype*>(fft_gpu_map_in_real_all_channels_), reinterpret_cast<DtypeComplex<Dtype>*>( fft_gpu_map_in_complex_all_channels_)); // Multiplication of FFT bottom data and FFT weights #ifdef COMPLEX_MULT_CONJ_1D for (int c = 0; c < this->channels_; c+=ch_gr) { int g = c / ch_gr; int out_first = g * out_gr; int out_last = out_first + out_gr; for (int out = out_first; out < out_last; ++out) { caffe_gpu_elementMulConj_1D( reinterpret_cast<DtypeComplex<Dtype>*>(fft_gpu_map_out_complex_) + out * fft_map_complex_size_, reinterpret_cast<DtypeComplex<Dtype>*>( fft_gpu_map_in_complex_all_channels_) + c * fft_map_complex_size_, reinterpret_cast<DtypeComplex<Dtype>*>(fft_gpu_weights_complex_) + (out * ch_gr) * fft_map_complex_size_, fft_map_complex_size_, ch_gr); } } #elif defined(COMPLEX_NULT_CONJ_RESHAPE) for (int c = 0; c < this->channels_; c+=ch_gr) { int g = c / ch_gr; int out_first = g * out_gr; caffe_gpu_elementMulConj_Reshape( reinterpret_cast<DtypeComplex<Dtype>*>(fft_gpu_map_out_complex_) + out_first * fft_map_complex_size_, reinterpret_cast<DtypeComplex<Dtype>*>( fft_gpu_map_in_complex_all_channels_) + c * fft_map_complex_size_, reinterpret_cast<DtypeComplex<Dtype>*>( fft_gpu_weights_complex_reshape_) + (out_first * ch_gr) * fft_map_complex_size_, out_gr, fft_map_complex_size_, ch_gr); } #elif defined(COMPLEX_MULT_CONJ_2D) for (int c = 0; c < this->channels_; c+=ch_gr) { int g = c / ch_gr; int out_first = g * out_gr; caffe_gpu_elementMulConj_2D( reinterpret_cast<DtypeComplex<Dtype>*>(fft_gpu_map_out_complex_), out_first * fft_map_complex_size_, reinterpret_cast<DtypeComplex<Dtype>*>( fft_gpu_map_in_complex_all_channels_), c * fft_map_complex_size_, reinterpret_cast<DtypeComplex<Dtype>*>(fft_gpu_weights_complex_), (out_first * ch_gr) * fft_map_complex_size_, out_gr, fft_map_complex_size_, ch_gr); } #elif defined(COMPLEX_MULT_CONJ_2D_SLM) for (int c = 0; c < this->channels_; c+=ch_gr) { int g = c / ch_gr; int out_first = g * out_gr; caffe_gpu_elementMulConj_2D_SLM( reinterpret_cast<DtypeComplex<Dtype>*>(fft_gpu_map_out_complex_) + out_first * fft_map_complex_size_, reinterpret_cast<DtypeComplex<Dtype>*>( fft_gpu_map_in_complex_all_channels_) + c * fft_map_complex_size_, reinterpret_cast<DtypeComplex<Dtype>*>(fft_gpu_weights_complex_) + (out_first * ch_gr) * fft_map_complex_size_, out_gr, fft_map_complex_size_, ch_gr); } #elif defined(COMPLEX_MULT_CONJ_3D) for (int c = 0; c < this->channels_; c+=ch_gr) { int g = c / ch_gr; int out_first = g * out_gr; caffe_gpu_elementMulConj_3D( reinterpret_cast<DtypeComplex<Dtype>*>(fft_gpu_map_out_complex_) + out_first * fft_map_complex_size_, reinterpret_cast<DtypeComplex<Dtype>*>( fft_gpu_map_in_complex_all_channels_) + c * fft_map_complex_size_, reinterpret_cast<DtypeComplex<Dtype>*>(fft_gpu_weights_complex_) + (out_first * ch_gr) * fft_map_complex_size_, out_gr, fft_map_complex_size_, ch_gr); } #elif defined(COMPLEX_MULT_CONJ_3D_SLM) for (int c = 0; c < this->channels_; c+=ch_gr) { int g = c / ch_gr; int out_first = g * out_gr; caffe_gpu_elementMulConj_3D_SLM( reinterpret_cast<DtypeComplex<Dtype>*>(fft_gpu_map_out_complex_) + out_first * fft_map_complex_size_, reinterpret_cast<DtypeComplex<Dtype>*>( fft_gpu_map_in_complex_all_channels_) + c * fft_map_complex_size_, reinterpret_cast<DtypeComplex<Dtype>*>(fft_gpu_weights_complex_) + (out_first * ch_gr) * fft_map_complex_size_, out_gr, fft_map_complex_size_, ch_gr); } #endif // Batched IFFT for num output of result caffe_gpu_fft_execute_c2r(ifft_gpu_forward_many_handle_, reinterpret_cast<DtypeComplex<Dtype>*>(fft_gpu_map_out_complex_), reinterpret_cast<Dtype*>(fft_gpu_map_out_real_)); // Mapping from IFFT result to top data fft_gpu_copy2buffer_out_forward_2D( top_data, top_data_offset, reinterpret_cast<Dtype*>(fft_gpu_map_out_real_), this->num_output_, this->height_out_, this->width_out_, fft_height_, fft_width_, kernel_center_h_, kernel_center_w_, this->stride_h_, this->stride_w_ , 0, 0); if (this->bias_term_) { const Dtype* bias = this->blobs_[1]->gpu_data(); this->forward_gpu_bias(top_data, top_data_offset, bias); } } template<typename Dtype, typename MItype, typename MOtype> void ConvolutionLayerFFT<Dtype>::Forward_gpu_fft( const vector<Blob<MItype>*>& bottom, const vector<Blob<MOtype>*>& top) { fft_gpu_compute_weights(); int ch_gr = this->channels_ / this->group_; int out_gr = this->num_output_ / this->group_; // Calculate tile count based on fft complex data size for (int i = 0; i < bottom.size(); ++i) { const Dtype* bottom_data = bottom[i]->gpu_data(); Dtype* top_data = top[i]->mutable_gpu_data(); for (int n = 0; n < this->num_; ++n) { Forward_gpu_fft_task(bottom_data, n * this->bottom_dim_, top_data, n * this->top_dim_, n, ch_gr, out_gr); } } } template<typename Dtype, typename MItype, typename MOtype> void ConvolutionLayerFFT<Dtype>::Forward_gpu(const vector<Blob<MItype>*>& bottom, const vector<Blob<MOtype>*>& top) { Forward_gpu_fft(bottom, top); } template<typename Dtype, typename MItype, typename MOtype> void ConvolutionLayerFFT<Dtype>::Backward_gpu_fft_task( const vector<Blob<MItype>*>& bottom, const vector<Blob<MOtype>*>& top, const Dtype* weight, int i, int n, int ch_gr, int out_gr) { const Dtype* top_diff = top[i]->gpu_diff(); Dtype* bottom_diff = bottom[i]->mutable_gpu_diff(); // Clear buffers clear_gpu_fft_buffer(fft_gpu_map_in_real_all_num_output_, fft_map_real_size_ * this->num_output_ * sizeof(Dtype)); clear_gpu_fft_buffer(fft_gpu_map_out_complex_, this->channels_ * fft_map_complex_size_ * sizeof(DtypeComplex<Dtype>)); // Left-top 0-padding of top data fft_gpu_copy2buffer_in_2D( reinterpret_cast<Dtype*>(fft_gpu_map_in_real_all_num_output_), top_diff, n * this->top_dim_, this->num_output_, fft_height_, fft_width_, this->height_out_, this->width_out_, this->stride_h_, this->stride_w_, 0, 0); // Batched FFT for all num output of padded top data caffe_gpu_fft_execute_r2c(fft_gpu_backward_many_handle_, reinterpret_cast<Dtype*>(fft_gpu_map_in_real_all_num_output_), reinterpret_cast<DtypeComplex<Dtype>*>( fft_gpu_map_in_complex_all_num_output_)); // Multiplication of FFT top data and FFT weights #ifdef COMPLEX_MULT_1D for (int out = 0; out < this->num_output_; out++) { int g = out / out_gr; int c_first = g * ch_gr; int c_last = (g + 1) * ch_gr; for (int c = c_first; c < c_last; c+=ch_gr) { caffe_gpu_elementMul_1D( reinterpret_cast<DtypeComplex<Dtype>*>(fft_gpu_map_out_complex_) + c * fft_map_complex_size_, reinterpret_cast<DtypeComplex<Dtype>*>( fft_gpu_map_in_complex_all_num_output_) + out * fft_map_complex_size_, reinterpret_cast<DtypeComplex<Dtype>*>(fft_gpu_weights_complex_) + (out * ch_gr) * fft_map_complex_size_, fft_map_complex_size_, ch_gr); } } #elif defined(COMPLEX_MULT_2D_SLM) caffe_gpu_elementMul_2D_SLM( reinterpret_cast<DtypeComplex<Dtype>*>(fft_gpu_map_out_complex_), reinterpret_cast<DtypeComplex<Dtype>*>( fft_gpu_map_in_complex_all_num_output_), reinterpret_cast<DtypeComplex<Dtype>*>(fft_gpu_weights_complex_), fft_map_complex_size_, ch_gr, this->num_output_); #elif defined(COMPLEX_MULT_3D) // TEST in: WIP: Unit test accuracy issue caffe_gpu_elementMul_3D( reinterpret_cast<DtypeComplex<Dtype>*>(fft_gpu_map_out_complex_), reinterpret_cast<DtypeComplex<Dtype>*>( fft_gpu_map_in_complex_all_num_output_), reinterpret_cast<DtypeComplex<Dtype>*>(fft_gpu_weights_complex_), fft_map_complex_size_, ch_gr, out_gr, this->num_output_); #endif // Batched IFFT for all channels of result caffe_gpu_fft_execute_c2r(ifft_gpu_backward_many_handle_, reinterpret_cast<DtypeComplex<Dtype>*>(fft_gpu_map_out_complex_), reinterpret_cast<Dtype*>(fft_gpu_map_out_real_)); // Mapping from IFFT result to bottom diff // TEST out /* for (int c = 0; c < this->channels_; c++) { fft_gpu_copy2buffer_out_backward( bottom_diff + n * this->bottom_dim_ + c * map_size_, reinterpret_cast<Dtype*>(fft_gpu_map_out_real_) + c * fft_map_real_size_, this->height_, this->width_, fft_height_, fft_width_, kernel_center_h_, kernel_center_w_, 1, 1, this->pad_h_, this->pad_w_); } */ fft_gpu_copy2buffer_out_backward_2D( bottom_diff, n * this->bottom_dim_, reinterpret_cast<Dtype*>(fft_gpu_map_out_real_), this->channels_, this->height_, this->width_, fft_height_, fft_width_, kernel_center_h_, kernel_center_w_, 1, 1, this->pad_h_, this->pad_w_); } template<typename Dtype, typename MItype, typename MOtype> void ConvolutionLayerFFT<Dtype>::Backward_gpu( const vector<Blob<MOtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<MItype>*>& bottom) { const Dtype* weight = this->blobs_[0]->gpu_data(); Dtype* weight_diff = this->blobs_[0]->mutable_gpu_diff(); if (this->param_propagate_down_[0]) { greentea_gpu_set(this->device_->id(), this->blobs_[0]->count(), Dtype(0), (cl_mem)weight_diff, Dtype(0)); } if (this->bias_term_ && this->param_propagate_down_[1]) { greentea_gpu_set(this->device_->id(), this->blobs_[1]->count(), Dtype(0), (cl_mem)this->blobs_[1]->mutable_gpu_diff(), Dtype(0)); } for (int i = 0; i < top.size(); ++i) { const Dtype* top_diff = top[i]->gpu_diff(); if (this->bias_term_ && this->param_propagate_down_[1]) { Dtype* bias_diff = this->blobs_[1]->mutable_gpu_diff(); for (int n = 0; n < this->num_; ++n) { this->backward_gpu_bias(bias_diff, top_diff, n * this->top_dim_); } } if (this->param_propagate_down_[0] || propagate_down[i]) { const Dtype* bottom_data = bottom[i]->gpu_data(); #ifdef FFT_BACKWARD int ch_gr = this->channels_ / this->group_; int out_gr = this->num_output_ / this->group_; if (this->param_propagate_down_[0]) { for (int n = 0; n < this->num_; ++n) { this->weight_gpu_gemm(bottom_data + n * this->bottom_dim_, top_diff + n * this->top_dim_, weight_diff); } } if (propagate_down[i]) { for (int n = 0; n < this->num_; ++n) { Backward_gpu_fft_task(bottom, top, weight, i, n, ch_gr, out_gr); } } #else // Default GEMM approach Dtype* bottom_diff = bottom[i]->mutable_gpu_diff(); for (int n = 0; n < this->num_; ++n) { if (this->param_propagate_down_[0]) { this->weight_gpu_gemm(bottom_data, n * this->bottom_dim_, top_diff, n * this->top_dim_, weight_diff); } if (propagate_down[i]) { this->backward_gpu_gemm(top_diff, n * this->top_dim_, weight, bottom_diff, n * this->bottom_dim_); } } #endif } } } // float instantiation template void ConvolutionLayerFFT<float>::fft_gpu_setup(); template void ConvolutionLayerFFT<float>::fft_gpu_clean(); template void ConvolutionLayerFFT<float>::Forward_gpu_fft( const vector<Blob<float>*>& bottom, const vector<Blob<float>*>& top); template void ConvolutionLayerFFT<float>::Forward_gpu_fft_task( const float *bottom_data, int bottom_data_offset, float* top_data, int top_data_offset, int n, int ch_gr, int out_gr); template void ConvolutionLayerFFT<float>::fft_gpu_compute_weights(); template void ConvolutionLayerFFT<float>::Backward_gpu_fft_task( const vector<Blob<float>*>& bottom, const vector<Blob<float>*>& top, const float* weight, int i, int n, int ch_gr, int out_gr); // double instantiation template<> void ConvolutionLayerFFT<double>::fft_gpu_setup() { NOT_IMPLEMENTED; } template<> void ConvolutionLayerFFT<double>::fft_gpu_clean() { NOT_IMPLEMENTED; } template<> void ConvolutionLayerFFT<double>::Forward_gpu_fft( const vector<Blob<double>*>& bottom, const vector<Blob<double>*>& top) { NOT_IMPLEMENTED; } template<> void ConvolutionLayerFFT<double>::Forward_gpu_fft_task( const double *bottom_data, int bottom_data_offset, double* top_data, int top_data_offset, int n, int ch_gr, int out_gr) { NOT_IMPLEMENTED; } template<> void ConvolutionLayerFFT<double>::fft_gpu_compute_weights() { NOT_IMPLEMENTED; } template<> void ConvolutionLayerFFT<double>::Backward_gpu_fft_task( const vector<Blob<double>*>& bottom, const vector<Blob<double>*>& top, const double* weight, int i, int n, int ch_gr, int out_gr) { NOT_IMPLEMENTED; } template <> void ConvolutionLayerFFT<double>::Forward_gpu( const vector<Blob<double>*>& bottom, const vector<Blob<double>*>& top) { NOT_IMPLEMENTED; } template <> void ConvolutionLayerFFT<double>::Backward_gpu( const vector<Blob<double>*>& top, const vector<bool>& propagate_down, const vector<Blob<double>*>& bottom) { NOT_IMPLEMENTED; } INSTANTIATE_CLASST_FUNC_3T_GUARDED(ConvolutionLayerFFT); } // namespace caffe #endif // USE_OPENCL && USE_FFT #endif // !CPU_ONLY
510e243854f7358c1c454e9fda4a3705587bbf1e.cu
#ifndef CPU_ONLY #include <algorithm> #include <vector> #include "caffe/common.hpp" #if defined(USE_OPENCL) && defined(USE_FFT) #include "caffe/filler.hpp" #include "caffe/layer.hpp" #include "caffe/util/fft.hpp" #include "caffe/layers/conv_fft_layer.hpp" #include "caffe/greentea/greentea.hpp" #include "caffe/greentea/greentea_im2col.hpp" #include "caffe/greentea/greentea_math_functions.hpp" // #define COMPLEX_MULT_CONJ_1D // #define COMPLEX_NULT_CONJ_RESHAPE #define COMPLEX_MULT_CONJ_2D // Best speed for CaffeNet conv1,2,3 // #define COMPLEX_MULT_CONJ_2D_SLM // #define COMPLEX_MULT_CONJ_3D // Accuracy issue // #define COMPLEX_MULT_CONJ_3D_SLM // Accuracy issue // #define FFT_BACKWARD #ifdef FFT_BACKWARD #define COMPLEX_MULT_1D // Fast for small size data of unit test // #define COMPLEX_MULT_2D_SLM // Segmentation fault on TestGradientGroup // #define COMPLEX_MULT_3D // Accuracy issue #endif namespace caffe { template<typename Dtype, typename MItype, typename MOtype> void ConvolutionLayerFFT<Dtype>::fft_gpu_setup() { if (fft_gpu_initialized_) { return; } viennacl::ocl::context &ctx = viennacl::ocl::current_context(); // Evaluate memory needed for buffers int num_weights = this->num_output_ * (this->channels_ / this->group_); int tmpMax = std::max(this->num_output_, this->channels_); size_t fft_gpu_map_in_real_bytes = fft_map_real_size_ * sizeof(Dtype); size_t fft_gpu_map_in_complex_bytes = fft_map_complex_size_ * sizeof(DtypeComplex<Dtype>); size_t fft_gpu_map_out_complex_bytes = tmpMax * fft_gpu_map_in_complex_bytes; size_t fft_gpu_map_out_real_bytes = tmpMax * fft_gpu_map_in_real_bytes; size_t fft_gpu_weights_complex_bytes = num_weights * fft_gpu_map_in_complex_bytes; int layerMemoryBytes = fft_gpu_weights_complex_bytes + fft_gpu_map_in_real_bytes * this->channels_ + fft_gpu_map_in_real_bytes * this->num_output_ + fft_gpu_map_in_complex_bytes * this->channels_ + fft_gpu_map_in_complex_bytes * this->num_output_ + fft_gpu_map_out_complex_bytes + fft_gpu_map_out_real_bytes; LOG(INFO) << "FFT buffers - memory needed = " << ((Dtype)layerMemoryBytes / (1024.f * 1024.f)) << " MB"; cl_int cl_err; fft_gpu_weights_complex_ = clCreateBuffer(ctx.handle().get(), CL_MEM_READ_WRITE, fft_gpu_weights_complex_bytes, NULL, &cl_err); #ifdef COMPLEX_NULT_CONJ_RESHAPE fft_gpu_weights_complex_reshape_ = clCreateBuffer(ctx.handle().get(), CL_MEM_READ_WRITE, fft_gpu_weights_complex_bytes, NULL, &cl_err); #endif fft_gpu_map_in_real_all_channels_ = clCreateBuffer(ctx.handle().get(), CL_MEM_READ_WRITE, fft_gpu_map_in_real_bytes * this->channels_, NULL, &cl_err); fft_gpu_map_in_complex_all_channels_ = clCreateBuffer(ctx.handle().get(), CL_MEM_READ_WRITE, fft_gpu_map_in_complex_bytes * this->channels_, NULL, &cl_err); fft_gpu_map_in_real_all_num_output_ = clCreateBuffer(ctx.handle().get(), CL_MEM_READ_WRITE, fft_gpu_map_in_real_bytes * this->num_output_, NULL, &cl_err); fft_gpu_map_in_complex_all_num_output_ = clCreateBuffer(ctx.handle().get(), CL_MEM_READ_WRITE, fft_gpu_map_in_complex_bytes * this->num_output_, NULL, &cl_err); fft_gpu_map_out_complex_ = clCreateBuffer(ctx.handle().get(), CL_MEM_READ_WRITE, fft_gpu_map_out_complex_bytes, NULL, &cl_err); fft_gpu_map_out_real_ = clCreateBuffer(ctx.handle().get(), CL_MEM_READ_WRITE, fft_gpu_map_out_real_bytes, NULL, &cl_err); ClFFTState& fft_state = Caffe::cl_fft_state(); // FFT plan for weights fft_gpu_many_weights_handle_ = fft_state.getForwardInPlaceFFTManyPlanHandle( fft_height_, fft_width_, num_weights); // FFT plan fft_gpu_forward_many_handle_ = fft_state.getForwardOutOfPlaceFFTManyPlanHandle(fft_height_, fft_width_, this->channels_); // Inverse FFT plan ifft_gpu_forward_many_handle_ = fft_state.getForwardOutOfPlaceIFFTManyPlanHandle(fft_height_, fft_width_, this->num_output_); #ifdef FFT_BACKWARD // FFT plan fft_gpu_backward_many_handle_ = fft_state.getBackwardOutOfPlaceFFTManyPlanHandle(fft_height_, fft_width_, this->num_output_); // Inverse FFT plan ifft_gpu_backward_many_handle_ = fft_state.getBackwardOutOfPlaceIFFTManyPlanHandle(fft_height_, fft_width_, this->channels_); #endif fft_gpu_initialized_ = true; } template<typename Dtype, typename MItype, typename MOtype> void ConvolutionLayerFFT<Dtype>::fft_gpu_clean() { if (fft_gpu_initialized_) { clReleaseMemObject((cl_mem)fft_gpu_weights_complex_); #ifdef COMPLEX_NULT_CONJ_RESHAPE clReleaseMemObject(fft_gpu_weights_complex_reshape_); #endif clReleaseMemObject((cl_mem)fft_gpu_map_in_real_all_channels_); clReleaseMemObject((cl_mem)fft_gpu_map_in_complex_all_channels_); clReleaseMemObject((cl_mem)fft_gpu_map_in_real_all_num_output_); clReleaseMemObject((cl_mem)fft_gpu_map_in_complex_all_num_output_); clReleaseMemObject((cl_mem)fft_gpu_map_out_complex_); clReleaseMemObject((cl_mem)fft_gpu_map_out_real_); } fft_gpu_initialized_ = false; } template<typename Dtype, typename MItype, typename MOtype> void ConvolutionLayerFFT<Dtype>::fft_gpu_compute_weights() { int num_weights = this->num_output_ * (this->channels_ / this->group_); int size = num_weights * fft_map_complex_size_ * sizeof(DtypeComplex<Dtype>); // Clear buffer clear_gpu_fft_buffer(fft_gpu_weights_complex_, size); // Cyclic-shift 0-padding of weights const Dtype* weight = this->blobs_[0]->gpu_data(); fft_gpu_copy2buffer(reinterpret_cast<Dtype*>(fft_gpu_weights_complex_), weight, this->num_output_, this->group_, this->channels_, this->kernel_h_, this->kernel_w_, kernel_center_h_, kernel_center_w_, fft_height_, fft_width_); // Batched in-place FFT of weights caffe_gpu_fft_execute_r2c_inplace(fft_gpu_many_weights_handle_, reinterpret_cast<Dtype*>(fft_gpu_weights_complex_)); // Reshape #ifdef COMPLEX_NULT_CONJ_RESHAPE reshape_weights(reinterpret_cast< DtypeComplex<Dtype>* >( fft_gpu_weights_complex_reshape_), reinterpret_cast< DtypeComplex<Dtype>* >(fft_gpu_weights_complex_), fft_map_complex_size_, this->num_output_, (this->channels_/this->group_)); #endif } template<typename Dtype, typename MItype, typename MOtype> void ConvolutionLayerFFT<Dtype>::Forward_gpu_fft_task(const Dtype* bottom_data, int bottom_data_offset, Dtype* top_data, int top_data_offset, int n, int ch_gr, int out_gr) { // Clear buffer clear_gpu_fft_buffer(fft_gpu_map_out_complex_, this->num_output_ * fft_map_complex_size_ * sizeof(DtypeComplex<Dtype>)); clear_gpu_fft_buffer(fft_gpu_map_in_real_all_channels_, this->channels_ * fft_map_real_size_ * sizeof(Dtype)); // Left-top 0-padding of bottom data fft_gpu_copy2buffer_in_2D( reinterpret_cast<Dtype*>(fft_gpu_map_in_real_all_channels_), bottom_data, bottom_data_offset, this->channels_, fft_height_, fft_width_, this->height_, this->width_, 1, 1, this->pad_h_, this->pad_w_); // Batched FFT for all channels of padded bottom data caffe_gpu_fft_execute_r2c(fft_gpu_forward_many_handle_, reinterpret_cast<Dtype*>(fft_gpu_map_in_real_all_channels_), reinterpret_cast<DtypeComplex<Dtype>*>( fft_gpu_map_in_complex_all_channels_)); // Multiplication of FFT bottom data and FFT weights #ifdef COMPLEX_MULT_CONJ_1D for (int c = 0; c < this->channels_; c+=ch_gr) { int g = c / ch_gr; int out_first = g * out_gr; int out_last = out_first + out_gr; for (int out = out_first; out < out_last; ++out) { caffe_gpu_elementMulConj_1D( reinterpret_cast<DtypeComplex<Dtype>*>(fft_gpu_map_out_complex_) + out * fft_map_complex_size_, reinterpret_cast<DtypeComplex<Dtype>*>( fft_gpu_map_in_complex_all_channels_) + c * fft_map_complex_size_, reinterpret_cast<DtypeComplex<Dtype>*>(fft_gpu_weights_complex_) + (out * ch_gr) * fft_map_complex_size_, fft_map_complex_size_, ch_gr); } } #elif defined(COMPLEX_NULT_CONJ_RESHAPE) for (int c = 0; c < this->channels_; c+=ch_gr) { int g = c / ch_gr; int out_first = g * out_gr; caffe_gpu_elementMulConj_Reshape( reinterpret_cast<DtypeComplex<Dtype>*>(fft_gpu_map_out_complex_) + out_first * fft_map_complex_size_, reinterpret_cast<DtypeComplex<Dtype>*>( fft_gpu_map_in_complex_all_channels_) + c * fft_map_complex_size_, reinterpret_cast<DtypeComplex<Dtype>*>( fft_gpu_weights_complex_reshape_) + (out_first * ch_gr) * fft_map_complex_size_, out_gr, fft_map_complex_size_, ch_gr); } #elif defined(COMPLEX_MULT_CONJ_2D) for (int c = 0; c < this->channels_; c+=ch_gr) { int g = c / ch_gr; int out_first = g * out_gr; caffe_gpu_elementMulConj_2D( reinterpret_cast<DtypeComplex<Dtype>*>(fft_gpu_map_out_complex_), out_first * fft_map_complex_size_, reinterpret_cast<DtypeComplex<Dtype>*>( fft_gpu_map_in_complex_all_channels_), c * fft_map_complex_size_, reinterpret_cast<DtypeComplex<Dtype>*>(fft_gpu_weights_complex_), (out_first * ch_gr) * fft_map_complex_size_, out_gr, fft_map_complex_size_, ch_gr); } #elif defined(COMPLEX_MULT_CONJ_2D_SLM) for (int c = 0; c < this->channels_; c+=ch_gr) { int g = c / ch_gr; int out_first = g * out_gr; caffe_gpu_elementMulConj_2D_SLM( reinterpret_cast<DtypeComplex<Dtype>*>(fft_gpu_map_out_complex_) + out_first * fft_map_complex_size_, reinterpret_cast<DtypeComplex<Dtype>*>( fft_gpu_map_in_complex_all_channels_) + c * fft_map_complex_size_, reinterpret_cast<DtypeComplex<Dtype>*>(fft_gpu_weights_complex_) + (out_first * ch_gr) * fft_map_complex_size_, out_gr, fft_map_complex_size_, ch_gr); } #elif defined(COMPLEX_MULT_CONJ_3D) for (int c = 0; c < this->channels_; c+=ch_gr) { int g = c / ch_gr; int out_first = g * out_gr; caffe_gpu_elementMulConj_3D( reinterpret_cast<DtypeComplex<Dtype>*>(fft_gpu_map_out_complex_) + out_first * fft_map_complex_size_, reinterpret_cast<DtypeComplex<Dtype>*>( fft_gpu_map_in_complex_all_channels_) + c * fft_map_complex_size_, reinterpret_cast<DtypeComplex<Dtype>*>(fft_gpu_weights_complex_) + (out_first * ch_gr) * fft_map_complex_size_, out_gr, fft_map_complex_size_, ch_gr); } #elif defined(COMPLEX_MULT_CONJ_3D_SLM) for (int c = 0; c < this->channels_; c+=ch_gr) { int g = c / ch_gr; int out_first = g * out_gr; caffe_gpu_elementMulConj_3D_SLM( reinterpret_cast<DtypeComplex<Dtype>*>(fft_gpu_map_out_complex_) + out_first * fft_map_complex_size_, reinterpret_cast<DtypeComplex<Dtype>*>( fft_gpu_map_in_complex_all_channels_) + c * fft_map_complex_size_, reinterpret_cast<DtypeComplex<Dtype>*>(fft_gpu_weights_complex_) + (out_first * ch_gr) * fft_map_complex_size_, out_gr, fft_map_complex_size_, ch_gr); } #endif // Batched IFFT for num output of result caffe_gpu_fft_execute_c2r(ifft_gpu_forward_many_handle_, reinterpret_cast<DtypeComplex<Dtype>*>(fft_gpu_map_out_complex_), reinterpret_cast<Dtype*>(fft_gpu_map_out_real_)); // Mapping from IFFT result to top data fft_gpu_copy2buffer_out_forward_2D( top_data, top_data_offset, reinterpret_cast<Dtype*>(fft_gpu_map_out_real_), this->num_output_, this->height_out_, this->width_out_, fft_height_, fft_width_, kernel_center_h_, kernel_center_w_, this->stride_h_, this->stride_w_ , 0, 0); if (this->bias_term_) { const Dtype* bias = this->blobs_[1]->gpu_data(); this->forward_gpu_bias(top_data, top_data_offset, bias); } } template<typename Dtype, typename MItype, typename MOtype> void ConvolutionLayerFFT<Dtype>::Forward_gpu_fft( const vector<Blob<MItype>*>& bottom, const vector<Blob<MOtype>*>& top) { fft_gpu_compute_weights(); int ch_gr = this->channels_ / this->group_; int out_gr = this->num_output_ / this->group_; // Calculate tile count based on fft complex data size for (int i = 0; i < bottom.size(); ++i) { const Dtype* bottom_data = bottom[i]->gpu_data(); Dtype* top_data = top[i]->mutable_gpu_data(); for (int n = 0; n < this->num_; ++n) { Forward_gpu_fft_task(bottom_data, n * this->bottom_dim_, top_data, n * this->top_dim_, n, ch_gr, out_gr); } } } template<typename Dtype, typename MItype, typename MOtype> void ConvolutionLayerFFT<Dtype>::Forward_gpu(const vector<Blob<MItype>*>& bottom, const vector<Blob<MOtype>*>& top) { Forward_gpu_fft(bottom, top); } template<typename Dtype, typename MItype, typename MOtype> void ConvolutionLayerFFT<Dtype>::Backward_gpu_fft_task( const vector<Blob<MItype>*>& bottom, const vector<Blob<MOtype>*>& top, const Dtype* weight, int i, int n, int ch_gr, int out_gr) { const Dtype* top_diff = top[i]->gpu_diff(); Dtype* bottom_diff = bottom[i]->mutable_gpu_diff(); // Clear buffers clear_gpu_fft_buffer(fft_gpu_map_in_real_all_num_output_, fft_map_real_size_ * this->num_output_ * sizeof(Dtype)); clear_gpu_fft_buffer(fft_gpu_map_out_complex_, this->channels_ * fft_map_complex_size_ * sizeof(DtypeComplex<Dtype>)); // Left-top 0-padding of top data fft_gpu_copy2buffer_in_2D( reinterpret_cast<Dtype*>(fft_gpu_map_in_real_all_num_output_), top_diff, n * this->top_dim_, this->num_output_, fft_height_, fft_width_, this->height_out_, this->width_out_, this->stride_h_, this->stride_w_, 0, 0); // Batched FFT for all num output of padded top data caffe_gpu_fft_execute_r2c(fft_gpu_backward_many_handle_, reinterpret_cast<Dtype*>(fft_gpu_map_in_real_all_num_output_), reinterpret_cast<DtypeComplex<Dtype>*>( fft_gpu_map_in_complex_all_num_output_)); // Multiplication of FFT top data and FFT weights #ifdef COMPLEX_MULT_1D for (int out = 0; out < this->num_output_; out++) { int g = out / out_gr; int c_first = g * ch_gr; int c_last = (g + 1) * ch_gr; for (int c = c_first; c < c_last; c+=ch_gr) { caffe_gpu_elementMul_1D( reinterpret_cast<DtypeComplex<Dtype>*>(fft_gpu_map_out_complex_) + c * fft_map_complex_size_, reinterpret_cast<DtypeComplex<Dtype>*>( fft_gpu_map_in_complex_all_num_output_) + out * fft_map_complex_size_, reinterpret_cast<DtypeComplex<Dtype>*>(fft_gpu_weights_complex_) + (out * ch_gr) * fft_map_complex_size_, fft_map_complex_size_, ch_gr); } } #elif defined(COMPLEX_MULT_2D_SLM) caffe_gpu_elementMul_2D_SLM( reinterpret_cast<DtypeComplex<Dtype>*>(fft_gpu_map_out_complex_), reinterpret_cast<DtypeComplex<Dtype>*>( fft_gpu_map_in_complex_all_num_output_), reinterpret_cast<DtypeComplex<Dtype>*>(fft_gpu_weights_complex_), fft_map_complex_size_, ch_gr, this->num_output_); #elif defined(COMPLEX_MULT_3D) // TEST in: WIP: Unit test accuracy issue caffe_gpu_elementMul_3D( reinterpret_cast<DtypeComplex<Dtype>*>(fft_gpu_map_out_complex_), reinterpret_cast<DtypeComplex<Dtype>*>( fft_gpu_map_in_complex_all_num_output_), reinterpret_cast<DtypeComplex<Dtype>*>(fft_gpu_weights_complex_), fft_map_complex_size_, ch_gr, out_gr, this->num_output_); #endif // Batched IFFT for all channels of result caffe_gpu_fft_execute_c2r(ifft_gpu_backward_many_handle_, reinterpret_cast<DtypeComplex<Dtype>*>(fft_gpu_map_out_complex_), reinterpret_cast<Dtype*>(fft_gpu_map_out_real_)); // Mapping from IFFT result to bottom diff // TEST out /* for (int c = 0; c < this->channels_; c++) { fft_gpu_copy2buffer_out_backward( bottom_diff + n * this->bottom_dim_ + c * map_size_, reinterpret_cast<Dtype*>(fft_gpu_map_out_real_) + c * fft_map_real_size_, this->height_, this->width_, fft_height_, fft_width_, kernel_center_h_, kernel_center_w_, 1, 1, this->pad_h_, this->pad_w_); } */ fft_gpu_copy2buffer_out_backward_2D( bottom_diff, n * this->bottom_dim_, reinterpret_cast<Dtype*>(fft_gpu_map_out_real_), this->channels_, this->height_, this->width_, fft_height_, fft_width_, kernel_center_h_, kernel_center_w_, 1, 1, this->pad_h_, this->pad_w_); } template<typename Dtype, typename MItype, typename MOtype> void ConvolutionLayerFFT<Dtype>::Backward_gpu( const vector<Blob<MOtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<MItype>*>& bottom) { const Dtype* weight = this->blobs_[0]->gpu_data(); Dtype* weight_diff = this->blobs_[0]->mutable_gpu_diff(); if (this->param_propagate_down_[0]) { greentea_gpu_set(this->device_->id(), this->blobs_[0]->count(), Dtype(0), (cl_mem)weight_diff, Dtype(0)); } if (this->bias_term_ && this->param_propagate_down_[1]) { greentea_gpu_set(this->device_->id(), this->blobs_[1]->count(), Dtype(0), (cl_mem)this->blobs_[1]->mutable_gpu_diff(), Dtype(0)); } for (int i = 0; i < top.size(); ++i) { const Dtype* top_diff = top[i]->gpu_diff(); if (this->bias_term_ && this->param_propagate_down_[1]) { Dtype* bias_diff = this->blobs_[1]->mutable_gpu_diff(); for (int n = 0; n < this->num_; ++n) { this->backward_gpu_bias(bias_diff, top_diff, n * this->top_dim_); } } if (this->param_propagate_down_[0] || propagate_down[i]) { const Dtype* bottom_data = bottom[i]->gpu_data(); #ifdef FFT_BACKWARD int ch_gr = this->channels_ / this->group_; int out_gr = this->num_output_ / this->group_; if (this->param_propagate_down_[0]) { for (int n = 0; n < this->num_; ++n) { this->weight_gpu_gemm(bottom_data + n * this->bottom_dim_, top_diff + n * this->top_dim_, weight_diff); } } if (propagate_down[i]) { for (int n = 0; n < this->num_; ++n) { Backward_gpu_fft_task(bottom, top, weight, i, n, ch_gr, out_gr); } } #else // Default GEMM approach Dtype* bottom_diff = bottom[i]->mutable_gpu_diff(); for (int n = 0; n < this->num_; ++n) { if (this->param_propagate_down_[0]) { this->weight_gpu_gemm(bottom_data, n * this->bottom_dim_, top_diff, n * this->top_dim_, weight_diff); } if (propagate_down[i]) { this->backward_gpu_gemm(top_diff, n * this->top_dim_, weight, bottom_diff, n * this->bottom_dim_); } } #endif } } } // float instantiation template void ConvolutionLayerFFT<float>::fft_gpu_setup(); template void ConvolutionLayerFFT<float>::fft_gpu_clean(); template void ConvolutionLayerFFT<float>::Forward_gpu_fft( const vector<Blob<float>*>& bottom, const vector<Blob<float>*>& top); template void ConvolutionLayerFFT<float>::Forward_gpu_fft_task( const float *bottom_data, int bottom_data_offset, float* top_data, int top_data_offset, int n, int ch_gr, int out_gr); template void ConvolutionLayerFFT<float>::fft_gpu_compute_weights(); template void ConvolutionLayerFFT<float>::Backward_gpu_fft_task( const vector<Blob<float>*>& bottom, const vector<Blob<float>*>& top, const float* weight, int i, int n, int ch_gr, int out_gr); // double instantiation template<> void ConvolutionLayerFFT<double>::fft_gpu_setup() { NOT_IMPLEMENTED; } template<> void ConvolutionLayerFFT<double>::fft_gpu_clean() { NOT_IMPLEMENTED; } template<> void ConvolutionLayerFFT<double>::Forward_gpu_fft( const vector<Blob<double>*>& bottom, const vector<Blob<double>*>& top) { NOT_IMPLEMENTED; } template<> void ConvolutionLayerFFT<double>::Forward_gpu_fft_task( const double *bottom_data, int bottom_data_offset, double* top_data, int top_data_offset, int n, int ch_gr, int out_gr) { NOT_IMPLEMENTED; } template<> void ConvolutionLayerFFT<double>::fft_gpu_compute_weights() { NOT_IMPLEMENTED; } template<> void ConvolutionLayerFFT<double>::Backward_gpu_fft_task( const vector<Blob<double>*>& bottom, const vector<Blob<double>*>& top, const double* weight, int i, int n, int ch_gr, int out_gr) { NOT_IMPLEMENTED; } template <> void ConvolutionLayerFFT<double>::Forward_gpu( const vector<Blob<double>*>& bottom, const vector<Blob<double>*>& top) { NOT_IMPLEMENTED; } template <> void ConvolutionLayerFFT<double>::Backward_gpu( const vector<Blob<double>*>& top, const vector<bool>& propagate_down, const vector<Blob<double>*>& bottom) { NOT_IMPLEMENTED; } INSTANTIATE_CLASST_FUNC_3T_GUARDED(ConvolutionLayerFFT); } // namespace caffe #endif // USE_OPENCL && USE_FFT #endif // !CPU_ONLY
32cc64af2a2979f376952b58a36c685792be19fa.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "gpu_square_matrix_mult.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int *d_a = NULL; hipMalloc(&d_a, XSIZE*YSIZE); int *d_b = NULL; hipMalloc(&d_b, XSIZE*YSIZE); int *d_result = NULL; hipMalloc(&d_result, XSIZE*YSIZE); int n = XSIZE*YSIZE; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( gpu_square_matrix_mult), dim3(gridBlock),dim3(threadBlock), 0, 0, d_a,d_b,d_result,n); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( gpu_square_matrix_mult), dim3(gridBlock),dim3(threadBlock), 0, 0, d_a,d_b,d_result,n); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( gpu_square_matrix_mult), dim3(gridBlock),dim3(threadBlock), 0, 0, d_a,d_b,d_result,n); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
32cc64af2a2979f376952b58a36c685792be19fa.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "gpu_square_matrix_mult.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int *d_a = NULL; cudaMalloc(&d_a, XSIZE*YSIZE); int *d_b = NULL; cudaMalloc(&d_b, XSIZE*YSIZE); int *d_result = NULL; cudaMalloc(&d_result, XSIZE*YSIZE); int n = XSIZE*YSIZE; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); gpu_square_matrix_mult<<<gridBlock,threadBlock>>>(d_a,d_b,d_result,n); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { gpu_square_matrix_mult<<<gridBlock,threadBlock>>>(d_a,d_b,d_result,n); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { gpu_square_matrix_mult<<<gridBlock,threadBlock>>>(d_a,d_b,d_result,n); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
36a52cce67c347dbeb95768eccc470d3f86fc17a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 1.6.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date November 2014 @generated from zgeqr2x_gpu-v4.cu normal z -> c, Sat Nov 15 19:53:59 2014 */ #include "common_magma.h" #include "commonblas_c.h" // 512 is maximum number of threads for CUDA capability 1.x #define BLOCK_SIZE 512 /** Purpose ------- CGEQR2 computes a QR factorization of a complex m by n matrix A: A = Q * R. This expert routine requires two more arguments than the standard cgeqr2, namely, dT and ddA, explained below. The storage for A is also not as in the LAPACK's cgeqr2 routine (see below). The first is used to output the triangular n x n factor T of the block reflector used in the factorization. The second holds the diagonal nxn blocks of A, i.e., the diagonal submatrices of R. This routine implements the left looking QR. This version adds internal blocking. Arguments --------- @param[in] m INTEGER The number of rows of the matrix A. M >= 0. @param[in] n INTEGER The number of columns of the matrix A. N >= 0. @param[in,out] dA COMPLEX array, dimension (LDA,N) On entry, the m by n matrix A. On exit, the unitary matrix Q as a product of elementary reflectors (see Further Details). \n the elements on and above the diagonal of the array contain the min(m,n) by n upper trapezoidal matrix R (R is upper triangular if m >= n); the elements below the diagonal, with the array TAU, represent the unitary matrix Q as a product of elementary reflectors (see Further Details). @param[in] ldda INTEGER The leading dimension of the array A. LDA >= max(1,M). @param[out] dtau COMPLEX array, dimension (min(M,N)) The scalar factors of the elementary reflectors (see Further Details). @param[out] dT COMPLEX array, dimension N x N. Stores the triangular N x N factor T of the block reflector used in the factorization. The lower triangular part is 0. @param[out] ddA COMPLEX array, dimension N x N. Stores the elements of the upper N x N diagonal block of A. LAPACK stores this array in A. There are 0s below the diagonal. @param dwork (workspace) DOUBLE_PRECISION array, dimension (3 N) @param[out] info INTEGER - = 0: successful exit - < 0: if INFO = -i, the i-th argument had an illegal value Further Details --------------- The matrix Q is represented as a product of elementary reflectors Q = H(1) H(2) . . . H(k), where k = min(m,n). Each H(i) has the form H(i) = I - tau * v * v**H where tau is a complex scalar, and v is a complex vector with v(1:i-1) = 0 and v(i) = 1; v(i+1:m) is stored on exit in A(i+1:m,i), and tau in TAU(i). @ingroup magma_cgeqrf_comp ********************************************************************/ extern "C" magma_int_t magma_cgeqr2x4_gpu( magma_int_t m, magma_int_t n, magmaFloatComplex_ptr dA, magma_int_t ldda, magmaFloatComplex_ptr dtau, magmaFloatComplex_ptr dT, magmaFloatComplex_ptr ddA, magmaFloat_ptr dwork, magma_queue_t queue, magma_int_t *info) { #define dA(i_,j_) (dA + (j_)*(ldda) + (i_)) #define dT(i_,j_) (dT + (j_)*(k) + (i_)) #define BS 32 magma_int_t i, k; float *dnorm = (float *)dwork; magmaFloatComplex *work = (magmaFloatComplex *)(dwork+2*n); magma_queue_t cstream; magmablasGetKernelStream(&cstream); magmablasSetKernelStream(queue); *info = 0; if (m < 0) { *info = -1; } else if (n < 0) { *info = -2; } else if (ldda < max(1,m)) { *info = -4; } if (*info != 0) { magma_xerbla( __func__, -(*info) ); return *info; } /* Compute the norms of the trailing columns */ k = min(m,n); magmablas_scnrm2_cols(m, k, dA(0,0), ldda, dnorm); for (magma_int_t b=0; b < k; b += BS) { for (i = b; i < min(k, b+BS); ++i) { /* Apply H**H to A(:,i) from the left */ if (i-b > 0) { /* Compute the (i-1)th column of T */ if ( i-1 > 0 ) { hipLaunchKernelGGL(( magma_cgemv_kernel3), dim3(i-1), dim3(BLOCK_SIZE), 0, magma_stream , m-i+1, dA(i-1,0), ldda, dA(i-1, i-1), work, dtau+i-1); hipLaunchKernelGGL(( magma_ctrmv_kernel2), dim3(i-1), dim3(i-1), 0, magma_stream , dT(0,0), k, work, dT(0,i-1), dtau+i-1); } /* dwork = V**H c */ hipLaunchKernelGGL(( magma_cgemv_kernel1), dim3(i-b), dim3(BLOCK_SIZE), 0, magma_stream , m-b, dA(b, b), ldda, dA(b,i), work); /* dwork = T**H work */ hipLaunchKernelGGL(( magma_ctrmv_tkernel), dim3(i-b), dim3(i-b), 0, magma_stream , dT(b,b), k, work, work+i-b); /* c = c - V work */ if ( m-b > 0 ) { dim3 blocks3( (m-b + BLOCK_SIZE-1) / BLOCK_SIZE ); dim3 threads3( BLOCK_SIZE ); hipLaunchKernelGGL(( magma_cgemv_kernel2), dim3(blocks3), dim3(threads3), 0, magma_stream , m-b, i-b, dA(b,b), ldda, work+i-b, dA(b, i)); } } /* Adjust the dnorm[i] to hold the norm of A(i:m,i) */ if ( i > 0 ) { hipLaunchKernelGGL(( magma_scnrm2_adjust_kernel), dim3(1), dim3(i), 0, magma_stream , dnorm+i, dA(0, i)); } /* Generate elementary reflector H(i) to annihilate A(i+1:m,i) 1. 1 is not yet put on the diagonal of A 2. Elements above the diagonal are copied in ddA and the ones in A are set to zero 3. update T */ magma_clarfgx_gpu(m-i, dA(i, i), dA(min(i+1,m),i), dtau+i, dnorm+i, ddA + i + i*n, i); if (i == 0) { magmaFloatComplex tt = MAGMA_C_ONE; magmablas_clacpy(MagmaUpperLower, 1, 1, dtau, 1, dT(0,0), 1); magma_csetmatrix_async(1, 1, &tt, 1, dA(i, i), 1, magma_stream); } } if ( i-1 > 0 ) { hipLaunchKernelGGL(( magma_cgemv_kernel3), dim3(i-1), dim3(BLOCK_SIZE), 0, magma_stream , m-i+1, dA(i-1,0), ldda, dA(i-1, i-1), work, dtau+i-1); hipLaunchKernelGGL(( magma_ctrmv_kernel2), dim3(i-1), dim3(i-1), 0, magma_stream , dT(0,0), k, work, dT(0,i-1), dtau+i-1); } /* Apply the transformations to the trailing matrix. */ //magma_clarfb2_gpu( MagmaLeft, MagmaConjTrans, MagmaForward, MagmaColumnwise, magma_clarfb2_gpu( m-b, k-i, BS, dA(b, b), ldda, dT+b+b*k, k, dA(b, i), ldda, work, k-i); } magmablasSetKernelStream(cstream); return *info; } /* magma_cgeqr2 */
36a52cce67c347dbeb95768eccc470d3f86fc17a.cu
/* -- MAGMA (version 1.6.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date November 2014 @generated from zgeqr2x_gpu-v4.cu normal z -> c, Sat Nov 15 19:53:59 2014 */ #include "common_magma.h" #include "commonblas_c.h" // 512 is maximum number of threads for CUDA capability 1.x #define BLOCK_SIZE 512 /** Purpose ------- CGEQR2 computes a QR factorization of a complex m by n matrix A: A = Q * R. This expert routine requires two more arguments than the standard cgeqr2, namely, dT and ddA, explained below. The storage for A is also not as in the LAPACK's cgeqr2 routine (see below). The first is used to output the triangular n x n factor T of the block reflector used in the factorization. The second holds the diagonal nxn blocks of A, i.e., the diagonal submatrices of R. This routine implements the left looking QR. This version adds internal blocking. Arguments --------- @param[in] m INTEGER The number of rows of the matrix A. M >= 0. @param[in] n INTEGER The number of columns of the matrix A. N >= 0. @param[in,out] dA COMPLEX array, dimension (LDA,N) On entry, the m by n matrix A. On exit, the unitary matrix Q as a product of elementary reflectors (see Further Details). \n the elements on and above the diagonal of the array contain the min(m,n) by n upper trapezoidal matrix R (R is upper triangular if m >= n); the elements below the diagonal, with the array TAU, represent the unitary matrix Q as a product of elementary reflectors (see Further Details). @param[in] ldda INTEGER The leading dimension of the array A. LDA >= max(1,M). @param[out] dtau COMPLEX array, dimension (min(M,N)) The scalar factors of the elementary reflectors (see Further Details). @param[out] dT COMPLEX array, dimension N x N. Stores the triangular N x N factor T of the block reflector used in the factorization. The lower triangular part is 0. @param[out] ddA COMPLEX array, dimension N x N. Stores the elements of the upper N x N diagonal block of A. LAPACK stores this array in A. There are 0s below the diagonal. @param dwork (workspace) DOUBLE_PRECISION array, dimension (3 N) @param[out] info INTEGER - = 0: successful exit - < 0: if INFO = -i, the i-th argument had an illegal value Further Details --------------- The matrix Q is represented as a product of elementary reflectors Q = H(1) H(2) . . . H(k), where k = min(m,n). Each H(i) has the form H(i) = I - tau * v * v**H where tau is a complex scalar, and v is a complex vector with v(1:i-1) = 0 and v(i) = 1; v(i+1:m) is stored on exit in A(i+1:m,i), and tau in TAU(i). @ingroup magma_cgeqrf_comp ********************************************************************/ extern "C" magma_int_t magma_cgeqr2x4_gpu( magma_int_t m, magma_int_t n, magmaFloatComplex_ptr dA, magma_int_t ldda, magmaFloatComplex_ptr dtau, magmaFloatComplex_ptr dT, magmaFloatComplex_ptr ddA, magmaFloat_ptr dwork, magma_queue_t queue, magma_int_t *info) { #define dA(i_,j_) (dA + (j_)*(ldda) + (i_)) #define dT(i_,j_) (dT + (j_)*(k) + (i_)) #define BS 32 magma_int_t i, k; float *dnorm = (float *)dwork; magmaFloatComplex *work = (magmaFloatComplex *)(dwork+2*n); magma_queue_t cstream; magmablasGetKernelStream(&cstream); magmablasSetKernelStream(queue); *info = 0; if (m < 0) { *info = -1; } else if (n < 0) { *info = -2; } else if (ldda < max(1,m)) { *info = -4; } if (*info != 0) { magma_xerbla( __func__, -(*info) ); return *info; } /* Compute the norms of the trailing columns */ k = min(m,n); magmablas_scnrm2_cols(m, k, dA(0,0), ldda, dnorm); for (magma_int_t b=0; b < k; b += BS) { for (i = b; i < min(k, b+BS); ++i) { /* Apply H**H to A(:,i) from the left */ if (i-b > 0) { /* Compute the (i-1)th column of T */ if ( i-1 > 0 ) { magma_cgemv_kernel3<<< i-1, BLOCK_SIZE, 0, magma_stream >>> ( m-i+1, dA(i-1,0), ldda, dA(i-1, i-1), work, dtau+i-1); magma_ctrmv_kernel2<<< i-1, i-1, 0, magma_stream >>> ( dT(0,0), k, work, dT(0,i-1), dtau+i-1); } /* dwork = V**H c */ magma_cgemv_kernel1<<< i-b, BLOCK_SIZE, 0, magma_stream >>> (m-b, dA(b, b), ldda, dA(b,i), work); /* dwork = T**H work */ magma_ctrmv_tkernel<<< i-b, i-b, 0, magma_stream >>> (dT(b,b), k, work, work+i-b); /* c = c - V work */ if ( m-b > 0 ) { dim3 blocks3( (m-b + BLOCK_SIZE-1) / BLOCK_SIZE ); dim3 threads3( BLOCK_SIZE ); magma_cgemv_kernel2<<< blocks3, threads3, 0, magma_stream >>> (m-b, i-b, dA(b,b), ldda, work+i-b, dA(b, i)); } } /* Adjust the dnorm[i] to hold the norm of A(i:m,i) */ if ( i > 0 ) { magma_scnrm2_adjust_kernel<<< 1, i, 0, magma_stream >>>(dnorm+i, dA(0, i)); } /* Generate elementary reflector H(i) to annihilate A(i+1:m,i) 1. 1 is not yet put on the diagonal of A 2. Elements above the diagonal are copied in ddA and the ones in A are set to zero 3. update T */ magma_clarfgx_gpu(m-i, dA(i, i), dA(min(i+1,m),i), dtau+i, dnorm+i, ddA + i + i*n, i); if (i == 0) { magmaFloatComplex tt = MAGMA_C_ONE; magmablas_clacpy(MagmaUpperLower, 1, 1, dtau, 1, dT(0,0), 1); magma_csetmatrix_async(1, 1, &tt, 1, dA(i, i), 1, magma_stream); } } if ( i-1 > 0 ) { magma_cgemv_kernel3<<< i-1, BLOCK_SIZE, 0, magma_stream >>> ( m-i+1, dA(i-1,0), ldda, dA(i-1, i-1), work, dtau+i-1); magma_ctrmv_kernel2<<< i-1, i-1, 0, magma_stream >>> ( dT(0,0), k, work, dT(0,i-1), dtau+i-1); } /* Apply the transformations to the trailing matrix. */ //magma_clarfb2_gpu( MagmaLeft, MagmaConjTrans, MagmaForward, MagmaColumnwise, magma_clarfb2_gpu( m-b, k-i, BS, dA(b, b), ldda, dT+b+b*k, k, dA(b, i), ldda, work, k-i); } magmablasSetKernelStream(cstream); return *info; } /* magma_cgeqr2 */
b8824753faa812ca3d136258534a532802fff489.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifndef _2DCONVOLUTION_KERNEL_H_ #define _2DCONVOLUTION_KERNEL_H_ #include <stdio.h> #include "2Dconvolution.h" // #define THREAD_BLOCK_SIZE 32 // #define KERNEL_SIZE 5 // #define MATRIX_SIZE 1024 __global__ void ConvolutionKernel(float *M, float *N, float *P) { int row=blockIdx.y*blockDim.y+threadIdx.y; int col=blockIdx.x*blockDim.x+threadIdx.x; double sum = 0; // check the start and end values of m and n to prevent overrunning the // matrix edges unsigned int M_start = (row < 2)? 2 - row : 0; unsigned int M_stop = (row > (MATRIX_SIZE - 3))? MATRIX_SIZE - row + 2 : 5; unsigned int N_start = (col < 2)? 2 - col : 0; unsigned int N_stop = (col > (MATRIX_SIZE - 3))? (MATRIX_SIZE-col) + 2 : 5; // overlay M over N centered at element (row,col). For each // overlapping element, multiply the two and accumulate for(unsigned int i = M_start; i < M_stop; ++i) { for(unsigned int j = N_start; j < N_stop; j++) { sum += M[i * 5 + j] * N[MATRIX_SIZE*(row + i - 2) + (col+j - 2)]; } } // store the result P[row*MATRIX_SIZE + col] = (float)sum; } #endif // #ifndef _2DCONVOLUTION_KERNEL_H_
b8824753faa812ca3d136258534a532802fff489.cu
#ifndef _2DCONVOLUTION_KERNEL_H_ #define _2DCONVOLUTION_KERNEL_H_ #include <stdio.h> #include "2Dconvolution.h" // #define THREAD_BLOCK_SIZE 32 // #define KERNEL_SIZE 5 // #define MATRIX_SIZE 1024 __global__ void ConvolutionKernel(float *M, float *N, float *P) { int row=blockIdx.y*blockDim.y+threadIdx.y; int col=blockIdx.x*blockDim.x+threadIdx.x; double sum = 0; // check the start and end values of m and n to prevent overrunning the // matrix edges unsigned int M_start = (row < 2)? 2 - row : 0; unsigned int M_stop = (row > (MATRIX_SIZE - 3))? MATRIX_SIZE - row + 2 : 5; unsigned int N_start = (col < 2)? 2 - col : 0; unsigned int N_stop = (col > (MATRIX_SIZE - 3))? (MATRIX_SIZE-col) + 2 : 5; // overlay M over N centered at element (row,col). For each // overlapping element, multiply the two and accumulate for(unsigned int i = M_start; i < M_stop; ++i) { for(unsigned int j = N_start; j < N_stop; j++) { sum += M[i * 5 + j] * N[MATRIX_SIZE*(row + i - 2) + (col+j - 2)]; } } // store the result P[row*MATRIX_SIZE + col] = (float)sum; } #endif // #ifndef _2DCONVOLUTION_KERNEL_H_
1a849526ba7cacd2898a4cf3f21384e6e8931ba6.hip
// !!! This is a file automatically generated by hipify!!! #include "errors.hpp" #include "lagrangian.hpp" #include "metropolizer.hpp" #include "nDHarmonicOscillator.hpp" #include "numtest.hpp" #include "opEvaluator.hpp" #include <cmath> #include <iostream> void harmosci_test() { double time = 100.0; int N = 2048; double a = time / N; int dim = 1; double omega = 1; double mass = 1; nDHarmonicOscillator<double> osc(a, N, dim, omega, mass); metropolizer<double, nDHarmonicOscillator<double>> metro(osc, 2 * sqrt(a), 10); opEvaluator<double, nDHarmonicOscillator<double>, &nDHarmonicOscillator<double>::opEvalPosition<0, 2>> x2(osc); opEvaluator<double, nDHarmonicOscillator<double>, &nDHarmonicOscillator<double>::opEvalHamiltonian> ham(osc); double *path; hipMallocManaged(&path, dim * N * sizeof(double)); metro.getRandomPath(path, 1 * sqrt(a), 0); // for (int i = 0; i < N; i++) { // path[i] = 0; // } for (int j = 0; j < 10000; j++) { metro.makeMetroStep(path); } for (int i = 0; i < 10000; i++) { for (int j = 0; j < 50; j++) { metro.makeMetroStep(path); } ham.evalPath(path); x2.evalPath(path); // double E = 0; // for (int i = 0; i < N; i++) { // E += path[i] * path[i]; // } // E = E / N; // std::cout << E << std::endl; std::cout << "Energy is: " << omega * omega * x2.getMean() << " Hamiltonian is: " << ham.getMean() << std::endl; } // for(int i = 0; i<N; i++){ // std::cout << path[i] << std::endl; // } hipFree(path); }
1a849526ba7cacd2898a4cf3f21384e6e8931ba6.cu
#include "errors.hpp" #include "lagrangian.hpp" #include "metropolizer.hpp" #include "nDHarmonicOscillator.hpp" #include "numtest.hpp" #include "opEvaluator.hpp" #include <cmath> #include <iostream> void harmosci_test() { double time = 100.0; int N = 2048; double a = time / N; int dim = 1; double omega = 1; double mass = 1; nDHarmonicOscillator<double> osc(a, N, dim, omega, mass); metropolizer<double, nDHarmonicOscillator<double>> metro(osc, 2 * sqrt(a), 10); opEvaluator<double, nDHarmonicOscillator<double>, &nDHarmonicOscillator<double>::opEvalPosition<0, 2>> x2(osc); opEvaluator<double, nDHarmonicOscillator<double>, &nDHarmonicOscillator<double>::opEvalHamiltonian> ham(osc); double *path; cudaMallocManaged(&path, dim * N * sizeof(double)); metro.getRandomPath(path, 1 * sqrt(a), 0); // for (int i = 0; i < N; i++) { // path[i] = 0; // } for (int j = 0; j < 10000; j++) { metro.makeMetroStep(path); } for (int i = 0; i < 10000; i++) { for (int j = 0; j < 50; j++) { metro.makeMetroStep(path); } ham.evalPath(path); x2.evalPath(path); // double E = 0; // for (int i = 0; i < N; i++) { // E += path[i] * path[i]; // } // E = E / N; // std::cout << E << std::endl; std::cout << "Energy is: " << omega * omega * x2.getMean() << " Hamiltonian is: " << ham.getMean() << std::endl; } // for(int i = 0; i<N; i++){ // std::cout << path[i] << std::endl; // } cudaFree(path); }
9b9efda0ad252d6fc1940831a9372f5b672ac0b6.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void gpu_simple_kernel(float* a, float* b, float* c, int N) { //int thread_idx = threadIdx.x; int idx = blockIdx.x*blockDim.x + threadIdx.x; if ( idx > N) return; #define PRINT_IDS #if !defined( __CUDA_ARCH__) || (__CUDA_ARCH__ >= 200 ) && defined(PRINT_IDS) // Check nvcc compiler gencode // at least -gencode=arch=compute_20,code=\"sm_20,compute_20\" should be set printf("thread: %3d - block: %3d - threadIdx: %3d, warp: %3d\n", idx, blockIdx.x, threadIdx.x, threadIdx.x/warpSize ); #endif c[idx] = a[idx] * b[idx]; }
9b9efda0ad252d6fc1940831a9372f5b672ac0b6.cu
#include "includes.h" __global__ void gpu_simple_kernel(float* a, float* b, float* c, int N) { //int thread_idx = threadIdx.x; int idx = blockIdx.x*blockDim.x + threadIdx.x; if ( idx > N) return; #define PRINT_IDS #if !defined( __CUDA_ARCH__) || (__CUDA_ARCH__ >= 200 ) && defined(PRINT_IDS) // Check nvcc compiler gencode // at least -gencode=arch=compute_20,code=\"sm_20,compute_20\" should be set printf("thread: %3d - block: %3d - threadIdx: %3d, warp: %3d\n", idx, blockIdx.x, threadIdx.x, threadIdx.x/warpSize ); #endif c[idx] = a[idx] * b[idx]; }
e9f93bb2581f75f0560742a26bf87b0b30a7e8fa.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <random> #include <hip/hip_runtime.h> // #include <hiprand/hiprand.h> #include <hiprand/hiprand_kernel.h> #include <thrust/reduce.h> #include <thrust/scan.h> #include <thrust/execution_policy.h> #include <thrust/extrema.h> #include <thrust/sort.h> #include "device_launch_parameters.h" #include "hip/hip_runtime.h" #include "../utils.h" #include "utilsGPU.cuh" #include "cudaErrorUtils.cu" #include "parallelKernels.cuh" #include "hip/hip_runtime_api.h" floating_t *x, *w, *ancestorX, *prefixSum; int* cumulativeOffspring; floating_t *planeXDev, *planeObsDev, *mapApproxDev; hiprandState_t* randStates; bool initializedRandStates = false; const bool DEBUG_MSE = false; double MSE; double MSEAvg = 0; int counter = 0; using namespace std; floating_t *sortW; void calcMSE() { cudaSafeCall(hipMemcpy(sortW, w, NUM_PARTICLES * sizeof(floating_t), hipMemcpyDefault)); double weightSum = thrust::reduce(thrust::device, sortW, sortW + NUM_PARTICLES); if(RESAMPLING_STRATEGY == SYSTEMATIC) { for(int i = 0; i < NUM_PARTICLES; i++) { double expectedOffspring = (w[i] * NUM_PARTICLES) / weightSum; int start = i == 0 ? 0 : cumulativeOffspring[i - 1]; int numCurrentOffspring = cumulativeOffspring[i] - start; MSE += pow(numCurrentOffspring - expectedOffspring, 2); } } else if(RESAMPLING_STRATEGY == REJECTION) { /*for(int i = 0; i < NUM_PARTICLES; i++) { double expectedOffspring = (w[i] * NUM_PARTICLES) / weightSum; }*/ } } void runSMC(floating_t* planeX, floating_t* planeObs) { default_random_engine generator; generator.seed(time(NULL)); uniform_real_distribution<floating_t> uniDist(0.0, 1.0); MSE = 0; // Initialize int numThreadsPerBlock = 64; int numBlocks = (NUM_PARTICLES + numThreadsPerBlock - 1) / numThreadsPerBlock; if(! initializedRandStates) { printf("Initializing rand states...\n"); hipLaunchKernelGGL(( initRandStates), dim3(numBlocks), dim3(numThreadsPerBlock), 0, 0, randStates); // Apparently slow! hipDeviceSynchronize(); printf("Initialized!\n"); initializedRandStates = true; } // hipProfilerStart(); hipLaunchKernelGGL(( initX), dim3(numBlocks), dim3(numThreadsPerBlock), 0, 0, randStates, x); cudaCheckError(); //printArray(x, NUM_PARTICLES); // First weights update hipLaunchKernelGGL(( weigh), dim3(numBlocks), dim3(numThreadsPerBlock), 0, 0, randStates, x, w, planeObs[0], mapApproxDev); cudaCheckError(); hipDeviceSynchronize(); if(LOG_WEIGHTS) { if(RESAMPLING_STRATEGY != REJECTION) { floating_t m = *(thrust::max_element(thrust::device, w, w + NUM_PARTICLES)); // should be on device // floating_t m = log(maxPDFObs()); // not sure if this is okay // floating_t m = maxValue(w, NUM_PARTICLES); // just for test hipLaunchKernelGGL(( scaleWeightsAndExp), dim3(numBlocks), dim3(numThreadsPerBlock), 0, 0, w, m); cudaCheckError(); } } if(RESAMPLING_STRATEGY == MULTINOMIAL) { floating_t weightSum = thrust::reduce(w, w + NUM_PARTICLES); hipLaunchKernelGGL(( normalizeWeights), dim3(numBlocks), dim3(numThreadsPerBlock), 0, 0, w, weightSum); cudaCheckError(); } for(int t = 0; t < TIME_STEPS; t++) { // Resample if(RESAMPLING_STRATEGY == MULTINOMIAL) hipLaunchKernelGGL(( sampleAncestorCategorical), dim3(numBlocks), dim3(numThreadsPerBlock), 0, 0, randStates, x, w, ancestorX); // rip performance else if(RESAMPLING_STRATEGY == SYSTEMATIC) { thrust::inclusive_scan(thrust::device, w, w + NUM_PARTICLES, prefixSum); // prefix sum floating_t u = uniDist(generator); hipLaunchKernelGGL(( systematicCumulativeOffspring), dim3(numBlocks), dim3(numThreadsPerBlock), 0, 0, prefixSum, cumulativeOffspring, u); // 0.00909617 seconds for N=2^18 cudaCheckError(); hipLaunchKernelGGL(( cumulativeOffspringToAncestor), dim3(numBlocks), dim3(numThreadsPerBlock), 0, 0, cumulativeOffspring, ancestorX, x); } else if(RESAMPLING_STRATEGY == REJECTION) { // empirical // floating_t m = *(thrust::max_element(thrust::device, w, w + NUM_PARTICLES)); // done twice now, if log weights? can we compare log weights instead? // alternative: approximated if(LOG_WEIGHTS) { floating_t m = log(maxPDFObs()); hipLaunchKernelGGL(( rejectionAncestorsLog), dim3(numBlocks), dim3(numThreadsPerBlock), 0, 0, randStates, x, w, ancestorX, m); } else { floating_t m = maxPDFObs(); hipLaunchKernelGGL(( rejectionAncestors), dim3(numBlocks), dim3(numThreadsPerBlock), 0, 0, randStates, x, w, ancestorX, m); } } cudaCheckError(); if(DEBUG_MSE) calcMSE(); // assignAncestor<<<numBlocks, numThreadsPerBlock>>>(x, ancestorX); // cudaCheckError(); // Propagate & Update weights if(t < TIME_STEPS-1) { hipLaunchKernelGGL(( assignPropagateWeigh), dim3(numBlocks), dim3(numThreadsPerBlock), 0, 0, randStates, x, w, planeObs[t+1], mapApproxDev, ancestorX); cudaCheckError(); if(LOG_WEIGHTS) { if(RESAMPLING_STRATEGY != REJECTION) { floating_t m = *(thrust::max_element(thrust::device, w, w + NUM_PARTICLES)); // should be on device // floating_t m = maxValue(w, NUM_PARTICLES); // just for test // floating_t m = log(maxPDFObs()); // not sure if this is okay hipLaunchKernelGGL(( scaleWeightsAndExp), dim3(numBlocks), dim3(numThreadsPerBlock), 0, 0, w, m); cudaCheckError(); } } } else hipLaunchKernelGGL(( assignPropagate), dim3(numBlocks), dim3(numThreadsPerBlock), 0, 0, randStates, x, ancestorX); cudaCheckError(); hipDeviceSynchronize(); // necessary? if(RESAMPLING_STRATEGY == MULTINOMIAL) { floating_t weightSum = thrust::reduce(w, w + NUM_PARTICLES); hipLaunchKernelGGL(( normalizeWeights), dim3(numBlocks), dim3(numThreadsPerBlock), 0, 0, w, weightSum); cudaCheckError(); } // CURAND_CALL(hiprandDestroyDistribution(catDist)); // printArray(x, NUM_PARTICLES); // printStatus(x, w, planeX, t); } //hipProfilerStop(); if(DEBUG_MSE) { MSE /= TIME_STEPS; if(MSE/NUM_PARTICLES > 20) { printf("MSE: %f, MSE/N: %f\n", MSE, MSE/NUM_PARTICLES); printf("AvgMSE/N: %f\n", MSEAvg / counter); } MSEAvg += MSE / NUM_PARTICLES; counter++; if(counter >= 20) { printf("AvgMSE/N: %f\n", MSEAvg / counter); } } //printStatus(x, w, planeX, TIME_STEPS-1); } void smc(floating_t* planeX, floating_t* planeObs) { // hiprandDiscreteDistribution_t catDist; // cudaSafeCall(hipDeviceSetCacheConfig(hipFuncCachePreferL1)); // Transfer map to device hipMalloc(&mapApproxDev, MAP_SIZE * sizeof(floating_t)); cudaSafeCall(hipMemcpy(mapApproxDev, getMapApproxArr(), MAP_SIZE * sizeof(floating_t), hipMemcpyHostToDevice)); // Allocate GPU compatible memory cudaSafeCall(hipMallocManaged(&x, NUM_PARTICLES * sizeof(floating_t))); cudaSafeCall(hipMallocManaged(&w, NUM_PARTICLES * sizeof(floating_t))); cudaSafeCall(hipMallocManaged(&ancestorX, NUM_PARTICLES * sizeof(floating_t))); cudaSafeCall(hipMallocManaged(&sortW, NUM_PARTICLES * sizeof(floating_t))); if(RESAMPLING_STRATEGY == SYSTEMATIC) { cudaSafeCall(hipMallocManaged(&prefixSum, NUM_PARTICLES * sizeof(floating_t))); cudaSafeCall(hipMallocManaged(&cumulativeOffspring, NUM_PARTICLES * sizeof(int))); } cudaSafeCall(hipMallocManaged(&planeXDev, TIME_STEPS * sizeof(floating_t))); cudaSafeCall(hipMallocManaged(&planeObsDev, TIME_STEPS * sizeof(floating_t))); if(! initializedRandStates) cudaSafeCall(hipMallocManaged(&randStates, NUM_PARTICLES * sizeof(hiprandState_t))); // Transfer to Cuda managed memory cudaSafeCall(hipMemcpy(planeXDev, planeX, TIME_STEPS * sizeof(floating_t), hipMemcpyDefault)); cudaSafeCall(hipMemcpy(planeObsDev, planeObs, TIME_STEPS * sizeof(floating_t), hipMemcpyDefault)); runSMC(planeX, planeObs); // Print result to file? hipFree(x); hipFree(w); hipFree(ancestorX); if(RESAMPLING_STRATEGY == SYSTEMATIC) { hipFree(prefixSum); hipFree(cumulativeOffspring); } hipFree(planeXDev); hipFree(planeObsDev); hipFree(mapApproxDev); hipFree(sortW); } void freeRandStates() { hipFree(randStates); } /* (min exec time over 10.000 trials) N=2^10 singe_precision REJECTION: 0.000804418 seconds SYSTEMATIC: 0.00122542 seconds double_precision REJECTION: 0.000796794 seconds SYSTEMATIC: 0.00128205 seconds N=2^15 (min exec time over 1000 trials) singe_precision REJECTION: 0.00513635 seconds SYSTEMATIC: 0.00403192 seconds double_precision REJECTION: 0.00510502 seconds SYSTEMATIC: 0.00442799 seconds N=2^15, with log-weights in systematic, without approx max in systematic singe_precision REJECTION: 0.00517758 seconds SYSTEMATIC: 0.00525198 seconds double_precision REJECTION: 0.00511814 seconds SYSTEMATIC: 0.0059867 seconds N=2^20 (min exec time over 50 trials) singe_precision REJECTION: 0.119812 seconds SYSTEMATIC: 0.0690664 seconds double_precision REJECTION: 0.118409 seconds SYSTEMATIC: 0.083449 seconds N=2^20, with log-weights in systematic, without approx max in systematic single_precision REJECTION: 0.119864 seconds double_precision SYSTEMATIC: 0.0909 seconds */
e9f93bb2581f75f0560742a26bf87b0b30a7e8fa.cu
#include <stdio.h> #include <random> #include <cuda.h> // #include <curand.h> #include <curand_kernel.h> #include <thrust/reduce.h> #include <thrust/scan.h> #include <thrust/execution_policy.h> #include <thrust/extrema.h> #include <thrust/sort.h> #include "device_launch_parameters.h" #include "cuda_runtime.h" #include "../utils.h" #include "utilsGPU.cuh" #include "cudaErrorUtils.cu" #include "parallelKernels.cuh" #include "cuda_profiler_api.h" floating_t *x, *w, *ancestorX, *prefixSum; int* cumulativeOffspring; floating_t *planeXDev, *planeObsDev, *mapApproxDev; curandState* randStates; bool initializedRandStates = false; const bool DEBUG_MSE = false; double MSE; double MSEAvg = 0; int counter = 0; using namespace std; floating_t *sortW; void calcMSE() { cudaSafeCall(cudaMemcpy(sortW, w, NUM_PARTICLES * sizeof(floating_t), cudaMemcpyDefault)); double weightSum = thrust::reduce(thrust::device, sortW, sortW + NUM_PARTICLES); if(RESAMPLING_STRATEGY == SYSTEMATIC) { for(int i = 0; i < NUM_PARTICLES; i++) { double expectedOffspring = (w[i] * NUM_PARTICLES) / weightSum; int start = i == 0 ? 0 : cumulativeOffspring[i - 1]; int numCurrentOffspring = cumulativeOffspring[i] - start; MSE += pow(numCurrentOffspring - expectedOffspring, 2); } } else if(RESAMPLING_STRATEGY == REJECTION) { /*for(int i = 0; i < NUM_PARTICLES; i++) { double expectedOffspring = (w[i] * NUM_PARTICLES) / weightSum; }*/ } } void runSMC(floating_t* planeX, floating_t* planeObs) { default_random_engine generator; generator.seed(time(NULL)); uniform_real_distribution<floating_t> uniDist(0.0, 1.0); MSE = 0; // Initialize int numThreadsPerBlock = 64; int numBlocks = (NUM_PARTICLES + numThreadsPerBlock - 1) / numThreadsPerBlock; if(! initializedRandStates) { printf("Initializing rand states...\n"); initRandStates<<<numBlocks, numThreadsPerBlock>>>(randStates); // Apparently slow! cudaDeviceSynchronize(); printf("Initialized!\n"); initializedRandStates = true; } // cudaProfilerStart(); initX<<<numBlocks, numThreadsPerBlock>>>(randStates, x); cudaCheckError(); //printArray(x, NUM_PARTICLES); // First weights update weigh<<<numBlocks, numThreadsPerBlock>>>(randStates, x, w, planeObs[0], mapApproxDev); cudaCheckError(); cudaDeviceSynchronize(); if(LOG_WEIGHTS) { if(RESAMPLING_STRATEGY != REJECTION) { floating_t m = *(thrust::max_element(thrust::device, w, w + NUM_PARTICLES)); // should be on device // floating_t m = log(maxPDFObs()); // not sure if this is okay // floating_t m = maxValue(w, NUM_PARTICLES); // just for test scaleWeightsAndExp<<<numBlocks, numThreadsPerBlock>>>(w, m); cudaCheckError(); } } if(RESAMPLING_STRATEGY == MULTINOMIAL) { floating_t weightSum = thrust::reduce(w, w + NUM_PARTICLES); normalizeWeights<<<numBlocks, numThreadsPerBlock>>>(w, weightSum); cudaCheckError(); } for(int t = 0; t < TIME_STEPS; t++) { // Resample if(RESAMPLING_STRATEGY == MULTINOMIAL) sampleAncestorCategorical<<<numBlocks, numThreadsPerBlock>>>(randStates, x, w, ancestorX); // rip performance else if(RESAMPLING_STRATEGY == SYSTEMATIC) { thrust::inclusive_scan(thrust::device, w, w + NUM_PARTICLES, prefixSum); // prefix sum floating_t u = uniDist(generator); systematicCumulativeOffspring<<<numBlocks, numThreadsPerBlock>>>(prefixSum, cumulativeOffspring, u); // 0.00909617 seconds for N=2^18 cudaCheckError(); cumulativeOffspringToAncestor<<<numBlocks, numThreadsPerBlock>>>(cumulativeOffspring, ancestorX, x); } else if(RESAMPLING_STRATEGY == REJECTION) { // empirical // floating_t m = *(thrust::max_element(thrust::device, w, w + NUM_PARTICLES)); // done twice now, if log weights? can we compare log weights instead? // alternative: approximated if(LOG_WEIGHTS) { floating_t m = log(maxPDFObs()); rejectionAncestorsLog<<<numBlocks, numThreadsPerBlock>>>(randStates, x, w, ancestorX, m); } else { floating_t m = maxPDFObs(); rejectionAncestors<<<numBlocks, numThreadsPerBlock>>>(randStates, x, w, ancestorX, m); } } cudaCheckError(); if(DEBUG_MSE) calcMSE(); // assignAncestor<<<numBlocks, numThreadsPerBlock>>>(x, ancestorX); // cudaCheckError(); // Propagate & Update weights if(t < TIME_STEPS-1) { assignPropagateWeigh<<<numBlocks, numThreadsPerBlock>>>(randStates, x, w, planeObs[t+1], mapApproxDev, ancestorX); cudaCheckError(); if(LOG_WEIGHTS) { if(RESAMPLING_STRATEGY != REJECTION) { floating_t m = *(thrust::max_element(thrust::device, w, w + NUM_PARTICLES)); // should be on device // floating_t m = maxValue(w, NUM_PARTICLES); // just for test // floating_t m = log(maxPDFObs()); // not sure if this is okay scaleWeightsAndExp<<<numBlocks, numThreadsPerBlock>>>(w, m); cudaCheckError(); } } } else assignPropagate<<<numBlocks, numThreadsPerBlock>>>(randStates, x, ancestorX); cudaCheckError(); cudaDeviceSynchronize(); // necessary? if(RESAMPLING_STRATEGY == MULTINOMIAL) { floating_t weightSum = thrust::reduce(w, w + NUM_PARTICLES); normalizeWeights<<<numBlocks, numThreadsPerBlock>>>(w, weightSum); cudaCheckError(); } // CURAND_CALL(curandDestroyDistribution(catDist)); // printArray(x, NUM_PARTICLES); // printStatus(x, w, planeX, t); } //cudaProfilerStop(); if(DEBUG_MSE) { MSE /= TIME_STEPS; if(MSE/NUM_PARTICLES > 20) { printf("MSE: %f, MSE/N: %f\n", MSE, MSE/NUM_PARTICLES); printf("AvgMSE/N: %f\n", MSEAvg / counter); } MSEAvg += MSE / NUM_PARTICLES; counter++; if(counter >= 20) { printf("AvgMSE/N: %f\n", MSEAvg / counter); } } //printStatus(x, w, planeX, TIME_STEPS-1); } void smc(floating_t* planeX, floating_t* planeObs) { // curandDiscreteDistribution_t catDist; // cudaSafeCall(cudaDeviceSetCacheConfig(cudaFuncCachePreferL1)); // Transfer map to device cudaMalloc(&mapApproxDev, MAP_SIZE * sizeof(floating_t)); cudaSafeCall(cudaMemcpy(mapApproxDev, getMapApproxArr(), MAP_SIZE * sizeof(floating_t), cudaMemcpyHostToDevice)); // Allocate GPU compatible memory cudaSafeCall(cudaMallocManaged(&x, NUM_PARTICLES * sizeof(floating_t))); cudaSafeCall(cudaMallocManaged(&w, NUM_PARTICLES * sizeof(floating_t))); cudaSafeCall(cudaMallocManaged(&ancestorX, NUM_PARTICLES * sizeof(floating_t))); cudaSafeCall(cudaMallocManaged(&sortW, NUM_PARTICLES * sizeof(floating_t))); if(RESAMPLING_STRATEGY == SYSTEMATIC) { cudaSafeCall(cudaMallocManaged(&prefixSum, NUM_PARTICLES * sizeof(floating_t))); cudaSafeCall(cudaMallocManaged(&cumulativeOffspring, NUM_PARTICLES * sizeof(int))); } cudaSafeCall(cudaMallocManaged(&planeXDev, TIME_STEPS * sizeof(floating_t))); cudaSafeCall(cudaMallocManaged(&planeObsDev, TIME_STEPS * sizeof(floating_t))); if(! initializedRandStates) cudaSafeCall(cudaMallocManaged(&randStates, NUM_PARTICLES * sizeof(curandState))); // Transfer to Cuda managed memory cudaSafeCall(cudaMemcpy(planeXDev, planeX, TIME_STEPS * sizeof(floating_t), cudaMemcpyDefault)); cudaSafeCall(cudaMemcpy(planeObsDev, planeObs, TIME_STEPS * sizeof(floating_t), cudaMemcpyDefault)); runSMC(planeX, planeObs); // Print result to file? cudaFree(x); cudaFree(w); cudaFree(ancestorX); if(RESAMPLING_STRATEGY == SYSTEMATIC) { cudaFree(prefixSum); cudaFree(cumulativeOffspring); } cudaFree(planeXDev); cudaFree(planeObsDev); cudaFree(mapApproxDev); cudaFree(sortW); } void freeRandStates() { cudaFree(randStates); } /* (min exec time over 10.000 trials) N=2^10 singe_precision REJECTION: 0.000804418 seconds SYSTEMATIC: 0.00122542 seconds double_precision REJECTION: 0.000796794 seconds SYSTEMATIC: 0.00128205 seconds N=2^15 (min exec time over 1000 trials) singe_precision REJECTION: 0.00513635 seconds SYSTEMATIC: 0.00403192 seconds double_precision REJECTION: 0.00510502 seconds SYSTEMATIC: 0.00442799 seconds N=2^15, with log-weights in systematic, without approx max in systematic singe_precision REJECTION: 0.00517758 seconds SYSTEMATIC: 0.00525198 seconds double_precision REJECTION: 0.00511814 seconds SYSTEMATIC: 0.0059867 seconds N=2^20 (min exec time over 50 trials) singe_precision REJECTION: 0.119812 seconds SYSTEMATIC: 0.0690664 seconds double_precision REJECTION: 0.118409 seconds SYSTEMATIC: 0.083449 seconds N=2^20, with log-weights in systematic, without approx max in systematic single_precision REJECTION: 0.119864 seconds double_precision SYSTEMATIC: 0.0909 seconds */
556aea4bd0480ac7085dd8381f7d1324cc2c9c93.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //Technique 1 //Count array act as a multiple bucket set //frequent-items-using-CUDA #include<iostream> #include<cuda.h> #include<cuda_runtime.h> #include<stdio.h> #include <stdlib.h> #include<time.h> #include<fstream> using namespace std; __global__ void addKernel(int *a,int *count_d,int *nOfItemSet_d) { int i=blockIdx.x*blockDim.x+threadIdx.x; int val=a[blockIdx.x*blockDim.x+threadIdx.x]; atomicAdd(&count_d[1000*(int(i/5000))+val],1); //right //count_d[threadIdx.x]=a[threadIdx.x]; if(i<1000){ for(int j=1;j< *nOfItemSet_d;i++){ count_d[i]=count_d[i]+count_d[j*1000+i]; } } } int main(){ int n; cout<<"enter number of transaction"; cin>>n; ifstream in("out.txt"); int *a_d,*a_h,*count_d,*count_h,*nOfItemSet_d,*nOfItemSet_h; int size=n*sizeof(int); int size1=1000*sizeof(int); a_h=(int*)malloc(n*sizeof(int)); count_h=(int*)malloc(1000*sizeof(int)); nOfItemSet_h=(int*)malloc(sizeof(int)); for(int i=0;i<n;i++) { in>>a_h[i]; } for(int i=0;i<1000;i++) count_h[i]=0; *nOfItemSet_h=((n-1)/5000)+1; hipMalloc((void**)&a_d,size); hipMalloc((void**)&count_d,size1); hipMalloc((void**)&nOfItemSet_d,sizeof(int)); hipMemcpy(a_d,a_h,size,hipMemcpyHostToDevice); hipMemcpy(count_d,count_h,size1,hipMemcpyHostToDevice); hipMemcpy(nOfItemSet_d,nOfItemSet_h,sizeof(int),hipMemcpyHostToDevice); hipLaunchKernelGGL(( addKernel), dim3(((n-1)/256)+1),dim3(256), 0, 0, a_d,count_d,nOfItemSet_d); hipMemcpy(count_h,count_d,size1,hipMemcpyDeviceToHost); hipFree(a_d); hipFree(count_d); for(int i=0;i<1000;i++) cout<<i<<" "<<count_h[i]<<endl; //cout<<"time taken: "<<(double) (end-start) / CLOCKS_PER_SEC * 1000.0; return 0; }
556aea4bd0480ac7085dd8381f7d1324cc2c9c93.cu
//Technique 1 //Count array act as a multiple bucket set //frequent-items-using-CUDA #include<iostream> #include<cuda.h> #include<cuda_runtime.h> #include<stdio.h> #include <stdlib.h> #include<time.h> #include<fstream> using namespace std; __global__ void addKernel(int *a,int *count_d,int *nOfItemSet_d) { int i=blockIdx.x*blockDim.x+threadIdx.x; int val=a[blockIdx.x*blockDim.x+threadIdx.x]; atomicAdd(&count_d[1000*(int(i/5000))+val],1); //right //count_d[threadIdx.x]=a[threadIdx.x]; if(i<1000){ for(int j=1;j< *nOfItemSet_d;i++){ count_d[i]=count_d[i]+count_d[j*1000+i]; } } } int main(){ int n; cout<<"enter number of transaction"; cin>>n; ifstream in("out.txt"); int *a_d,*a_h,*count_d,*count_h,*nOfItemSet_d,*nOfItemSet_h; int size=n*sizeof(int); int size1=1000*sizeof(int); a_h=(int*)malloc(n*sizeof(int)); count_h=(int*)malloc(1000*sizeof(int)); nOfItemSet_h=(int*)malloc(sizeof(int)); for(int i=0;i<n;i++) { in>>a_h[i]; } for(int i=0;i<1000;i++) count_h[i]=0; *nOfItemSet_h=((n-1)/5000)+1; cudaMalloc((void**)&a_d,size); cudaMalloc((void**)&count_d,size1); cudaMalloc((void**)&nOfItemSet_d,sizeof(int)); cudaMemcpy(a_d,a_h,size,cudaMemcpyHostToDevice); cudaMemcpy(count_d,count_h,size1,cudaMemcpyHostToDevice); cudaMemcpy(nOfItemSet_d,nOfItemSet_h,sizeof(int),cudaMemcpyHostToDevice); addKernel<<<((n-1)/256)+1,256>>>(a_d,count_d,nOfItemSet_d); cudaMemcpy(count_h,count_d,size1,cudaMemcpyDeviceToHost); cudaFree(a_d); cudaFree(count_d); for(int i=0;i<1000;i++) cout<<i<<" "<<count_h[i]<<endl; //cout<<"time taken: "<<(double) (end-start) / CLOCKS_PER_SEC * 1000.0; return 0; }
e751562bdc4015103fa941ec34b97c001681a423.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #define TORCH_ASSERT_ONLY_METHOD_OPERATORS #include <ATen/core/Tensor.h> #include <ATen/AccumulateType.h> #include <ATen/Dispatch.h> #include <ATen/hip/detail/KernelUtils.h> #include <ATen/native/TensorIterator.h> #include <ATen/TensorUtils.h> #include <ATen/TensorOperators.h> #include <ATen/hip/detail/KernelUtils.h> #include <ATen/native/hip/Loops.cuh> #include <ATen/native/Resize.h> #ifndef AT_PER_OPERATOR_HEADERS #include <ATen/Functions.h> #include <ATen/NativeFunctions.h> #else #include <ATen/ops/binary_cross_entropy_backward_native.h> #include <ATen/ops/binary_cross_entropy_native.h> #include <ATen/ops/empty_like.h> #include <ATen/ops/exp.h> #include <ATen/ops/nll_loss_backward_native.h> #include <ATen/ops/nll_loss_forward_native.h> #include <ATen/ops/squeeze.h> #endif constexpr float EPSILON = 1e-12; namespace { using namespace at; void binary_cross_entropy_backward_out_kernel(Tensor& grad_input, const Tensor& grad, const Tensor& input, const Tensor& target) { at::TensorIterator iter = TensorIteratorConfig() .add_output(grad_input) .add_input(grad) .add_input(input) .add_input(target) .build(); AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.common_dtype(), "binary_cross_entropy_backward_out_cuda", [&]() { at::native::gpu_kernel(iter, [] GPU_LAMBDA ( scalar_t grad_val, scalar_t input_val, scalar_t target_val ) -> scalar_t { const scalar_t one = 1; const scalar_t epsilon = EPSILON; scalar_t grad_input_denominator = max( (one - input_val) * input_val, epsilon ); return grad_val * (input_val - target_val) / grad_input_denominator; } ); }); } } // namespace namespace at { namespace native { Tensor binary_cross_entropy_cuda(const Tensor& input, const Tensor& target, const c10::optional<Tensor>& weight_opt, int64_t reduction) { // See [Note: hacky wrapper removal for optional tensor] c10::MaybeOwned<Tensor> weight_maybe_owned = at::borrow_from_optional_tensor(weight_opt); const Tensor& weight = *weight_maybe_owned; Tensor loss = at::empty_like(input); return at::native::binary_cross_entropy_out_cuda( input, target, weight, reduction, loss); } Tensor& binary_cross_entropy_out_cuda(const Tensor& input, const Tensor& target, const c10::optional<Tensor>& weight_opt, int64_t reduction, Tensor& loss) { // See [Note: hacky wrapper removal for optional tensor] c10::MaybeOwned<Tensor> weight_maybe_owned = at::borrow_from_optional_tensor(weight_opt); const Tensor& weight = *weight_maybe_owned; Tensor loss_squeezed = at::squeeze(loss); TensorIterator iter = TensorIteratorConfig() .add_output(loss_squeezed) .add_owned_input(at::squeeze(input)) .add_owned_input(at::squeeze(target)) .build(); AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.common_dtype(), "binary_cross_entropy_out_cuda", [&]() { gpu_kernel(iter, [] GPU_LAMBDA (scalar_t input_val, scalar_t target_val) -> scalar_t { const scalar_t zero = 0; const scalar_t one = 1; const scalar_t neg_100 = -100; CUDA_KERNEL_ASSERT(input_val >= zero && input_val <= one); scalar_t log_input_val = ::log(input_val); scalar_t log_1_minus_input_val = ::log(one - input_val); log_input_val = ::max(log_input_val, neg_100); log_1_minus_input_val = ::max(log_1_minus_input_val, neg_100); return ((target_val - one) * log_1_minus_input_val) - (target_val * log_input_val); } ); }); if (weight.defined()) { loss.mul_(weight); } if (reduction != at::Reduction::None) { Tensor loss_reduced; if (reduction == at::Reduction::Mean) { loss_reduced = loss.mean(); } else if (reduction == at::Reduction::Sum) { loss_reduced = loss.sum(); } loss.resize_as_(loss_reduced).copy_(loss_reduced); } return loss; } Tensor binary_cross_entropy_backward_cuda(const Tensor& grad, const Tensor& input, const Tensor& target, const c10::optional<Tensor>& weight_opt, int64_t reduction) { // See [Note: hacky wrapper removal for optional tensor] c10::MaybeOwned<Tensor> weight_maybe_owned = at::borrow_from_optional_tensor(weight_opt); const Tensor& weight = *weight_maybe_owned; Tensor grad_input = at::empty_like(input); return at::native::binary_cross_entropy_backward_out_cuda( grad, input, target, weight, reduction, grad_input); } Tensor& binary_cross_entropy_backward_out_cuda(const Tensor& grad, const Tensor& input, const Tensor& target, const c10::optional<Tensor>& weight_opt, int64_t reduction, Tensor& grad_input) { // See [Note: hacky wrapper removal for optional tensor] c10::MaybeOwned<Tensor> weight_maybe_owned = at::borrow_from_optional_tensor(weight_opt); const Tensor& weight = *weight_maybe_owned; Tensor grad_expand = grad.expand_as(input); binary_cross_entropy_backward_out_kernel(grad_input, grad_expand, input, target); if (weight.defined()) { grad_input.mul_(weight); } if (reduction == at::Reduction::Mean) { grad_input.div_(input.numel()); } return grad_input; } // ----------------------------------- // nll_loss // ----------------------------------- namespace { constexpr int NLL_LOSS_THREADS = 32; #define AT_DISPATCH_NLL_LOSS_INDEX_TYPES(TYPE, NAME, ...) \ AT_DISPATCH_SWITCH(TYPE, NAME, \ AT_PRIVATE_CASE_TYPE_USING_HINT(at::ScalarType::Byte, index_t, __VA_ARGS__) \ AT_PRIVATE_CASE_TYPE_USING_HINT(at::ScalarType::Long, index_t, __VA_ARGS__)) template <typename scalar_t, typename index_t> __global__ void nll_loss_forward_no_reduce_cuda_kernel( int64_t batch_size, PackedTensorAccessor64<scalar_t, 2> input, index_t* target, scalar_t* output, scalar_t* weights, int n_classes, int ignore_index) { CUDA_KERNEL_LOOP(index, batch_size) { int cur_target = target[index]; if (cur_target == ignore_index) { output[index] = static_cast<scalar_t>(0); continue; } CUDA_KERNEL_ASSERT(cur_target >= 0 && cur_target < n_classes); auto cur_weight = weights != nullptr ? weights[cur_target] : static_cast<scalar_t>(1); output[index] = -cur_weight * input[index][cur_target]; } } template <typename scalar_t, typename index_t> __global__ void nll_loss_forward_reduce_cuda_kernel_1d( scalar_t* output, scalar_t* total_weight, scalar_t* input, index_t* target, scalar_t* weights, bool size_average, int n_classes, int64_t ignore_index) { CUDA_KERNEL_ASSERT(threadIdx.x == 0 && threadIdx.y == 0 && threadIdx.z == 0); int t = static_cast<int>(*target); if (t != static_cast<int>(ignore_index)) { CUDA_KERNEL_ASSERT(t >= 0 && t < n_classes); const auto cur_weight = weights != nullptr ? weights[t] : scalar_t{1}; *total_weight = cur_weight; if (size_average) { // If we try to normalize a zero then we return a NaN if (cur_weight == 0) { *output = std::numeric_limits<scalar_t>::quiet_NaN(); } else { *output = -input[t]; } } else { *output = -cur_weight * input[t]; } } else { // If the only element was omited, we get 0. See the discussion in // https://github.com/pytorch/pytorch/pull/64572#issuecomment-926504162 *output = scalar_t{0}; } } template <typename scalar_t, typename accscalar_t, typename index_t> __global__ void nll_loss_forward_reduce_cuda_kernel_2d( scalar_t* output, scalar_t* total_weight, scalar_t* input, index_t* target, scalar_t* weights, bool size_average, int nframe, int ndim, int n_classes, int64_t ignore_index) { // NOLINTNEXTLINE(cppcoreguidelines-init-variables) __shared__ accscalar_t sh_inputs[NLL_LOSS_THREADS], acc_weight[NLL_LOSS_THREADS]; sh_inputs[threadIdx.x] = static_cast<accscalar_t>(0); acc_weight[threadIdx.x] = static_cast<accscalar_t>(0); for (int i = threadIdx.x; i < nframe; i += NLL_LOSS_THREADS) { int t = target[i]; if (t != static_cast<int>(ignore_index)) { CUDA_KERNEL_ASSERT(t >= 0 && t < n_classes); scalar_t cur_weight = weights != nullptr ? weights[t] : static_cast<scalar_t>(1); sh_inputs[threadIdx.x] -= input[i * ndim + t] * cur_weight; acc_weight[threadIdx.x] += cur_weight; } } __syncthreads(); if (threadIdx.x == 0) { accscalar_t output_acc = 0; accscalar_t total_weight_acc = 0; for (int i = 0; i < NLL_LOSS_THREADS; ++i) { output_acc += sh_inputs[i]; total_weight_acc += acc_weight[i]; } *total_weight = static_cast<scalar_t>(total_weight_acc); if (size_average) { *output = static_cast<scalar_t>(output_acc / total_weight_acc); } else { *output = static_cast<scalar_t>(output_acc); } } } void nll_loss_forward_out_cuda_template( const Tensor& output, const Tensor& total_weight, const Tensor& input_, const Tensor& target_, const Tensor& weight, int64_t reduction, int64_t ignore_index) { auto input = *input_.expect_contiguous(); auto target = *target_.expect_contiguous(); int64_t n_classes = input.size(-1); int64_t n_dims = input.dim(); int64_t batch_size = n_dims == 1 ? 1 : input.size(0); auto weight_ = weight.defined() ? weight.contiguous() : weight; if (reduction == Reduction::None && n_dims == 2) { at::native::resize_output(output, {batch_size}); if (batch_size == 0) { // This guards from unnecessary operations and launching CUDA kernel with // 0 blocks. return; } AT_DISPATCH_FLOATING_TYPES_AND2( at::ScalarType::Half, at::ScalarType::BFloat16, input.scalar_type(), "nll_loss_forward_no_reduce_cuda_kernel", [&] { AT_DISPATCH_NLL_LOSS_INDEX_TYPES( target.scalar_type(), "nll_loss_forward_no_reduce_cuda_kernel_index", [&] { hipLaunchKernelGGL(( nll_loss_forward_no_reduce_cuda_kernel<scalar_t, index_t>) , dim3(at::cuda::detail::GET_BLOCKS(batch_size)), dim3(at::cuda::detail::CUDA_NUM_THREADS), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), batch_size, input.packed_accessor64<scalar_t, 2>(), target.data_ptr<index_t>(), output.data_ptr<scalar_t>(), weight_.defined() ? weight_.data_ptr<scalar_t>() : nullptr, n_classes, ignore_index); C10_HIP_KERNEL_LAUNCH_CHECK(); }); }); return; } // produce scalar outputs for the reduction case at::native::resize_output(output, {}); total_weight.resize_({}); if (target.numel() == 0) { // Here target (and input) have zero elements // Mean reduction on empty tensors produces NaN. See the discussion in // https://github.com/pytorch/pytorch/pull/64572#issuecomment-926504162 if (reduction == Reduction::Mean) { output.fill_(std::numeric_limits<double>::quiet_NaN()); } else { output.zero_(); } total_weight.zero_(); return; } if (n_dims == 1) { AT_DISPATCH_FLOATING_TYPES_AND2( at::ScalarType::Half, at::ScalarType::BFloat16, input.scalar_type(), "nll_loss_forward_reduce_cuda_kernel_1d", [&] { AT_DISPATCH_NLL_LOSS_INDEX_TYPES( target.scalar_type(), "nll_loss_forward_reduce_cuda_kernel_1d_index", [&] { hipLaunchKernelGGL(( nll_loss_forward_reduce_cuda_kernel_1d<scalar_t, index_t>) , dim3(1), dim3(1), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), output.data_ptr<scalar_t>(), total_weight.data_ptr<scalar_t>(), input.data_ptr<scalar_t>(), target.data_ptr<index_t>(), weight_.defined() ? weight_.data_ptr<scalar_t>() : nullptr, reduction == at::Reduction::Mean, n_classes, ignore_index); C10_HIP_KERNEL_LAUNCH_CHECK(); }); }); } else if (n_dims == 2) { AT_DISPATCH_FLOATING_TYPES_AND2( at::ScalarType::Half, at::ScalarType::BFloat16, input.scalar_type(), "nll_loss_forward_reduce_cuda_kernel_2d", [&] { AT_DISPATCH_NLL_LOSS_INDEX_TYPES( target.scalar_type(), "nll_loss_forward_reduce_cuda_kernel_2d_index", [&] { using accscalar_t = at::acc_type<scalar_t, /*is_cuda*/true>; hipLaunchKernelGGL(( nll_loss_forward_reduce_cuda_kernel_2d<scalar_t, accscalar_t, index_t>) , dim3(1), dim3(NLL_LOSS_THREADS), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), output.data_ptr<scalar_t>(), total_weight.data_ptr<scalar_t>(), input.data_ptr<scalar_t>(), target.data_ptr<index_t>(), weight_.defined() ? weight_.data_ptr<scalar_t>() : nullptr, reduction == at::Reduction::Mean, input.size(0), input.size(1), n_classes, ignore_index); C10_HIP_KERNEL_LAUNCH_CHECK(); }); }); } } template <typename scalar_t, typename index_t> __global__ void nll_loss_backward_no_reduce_cuda_kernel( int batch_size, index_t *target, PackedTensorAccessor64<scalar_t, 1> grad_output, PackedTensorAccessor64<scalar_t, 2> grad_input, scalar_t *weights, int n_classes, int ignore_index) { CUDA_KERNEL_LOOP(index, batch_size) { int cur_target = target[index]; if (cur_target == ignore_index) { continue; } CUDA_KERNEL_ASSERT(cur_target >= 0 && cur_target < n_classes); scalar_t weight = weights != nullptr ? weights[cur_target] : static_cast<scalar_t>(1); grad_input[index][cur_target] = -weight * grad_output[index]; } }; template <typename scalar_t, typename index_t> __global__ void nll_loss_backward_reduce_cuda_kernel_1d( scalar_t *grad_input, scalar_t *grad_output, scalar_t *weights, index_t *target, scalar_t *total_weight, bool size_average, int n_classes, int64_t ignore_index ) { int t = static_cast<int>(*target); if (t != static_cast<int>(ignore_index)) { CUDA_KERNEL_ASSERT(t >= 0 && t < n_classes); const auto grad = -(size_average ? *grad_output / *total_weight : *grad_output); grad_input[t] = weights != nullptr ? weights[t] * grad : grad; } } template <typename scalar_t, typename index_t> __global__ void nll_loss_backward_reduce_cuda_kernel_2d( scalar_t* grad_input, scalar_t* grad_output, index_t* target, scalar_t* weights, scalar_t* total_weight, bool size_average, int nframe, int ndim, int n_classes, int64_t ignore_index) { const auto grad = -(size_average ? *grad_output / *total_weight : *grad_output); for (int i = threadIdx.x; i < nframe; i += NLL_LOSS_THREADS) { int t = target[i]; if (t != static_cast<int>(ignore_index)) { CUDA_KERNEL_ASSERT(t >= 0 && t < n_classes); grad_input[i * ndim + t] = weights != nullptr ? weights[t] * grad : grad; } } } void nll_loss_backward_out_cuda_template( const Tensor& grad_input_, const Tensor& grad_output_, const Tensor& input_, const Tensor& target_, const Tensor& total_weight, const Tensor& weight, int64_t reduction, int64_t ignore_index) { auto target = *target_.expect_contiguous(); auto input = *input_.expect_contiguous(); auto grad_input = *grad_input_.expect_contiguous(); auto grad_output = *grad_output_.expect_contiguous(); int64_t n_dims = input.dim(); int64_t n_classes = input.size(-1); int64_t batch_size = n_dims == 1 ? 1 : input.size(0); auto weight_ = weight.defined() ? weight.contiguous() : weight; if (reduction == at::Reduction::None && n_dims == 2) { if (batch_size == 0) { // This guards from unnecessary operations and launching CUDA kernel with 0 blocks. return; } AT_DISPATCH_FLOATING_TYPES_AND2( at::ScalarType::Half, at::ScalarType::BFloat16, input.scalar_type(), "nll_loss_backward_no_reduce_cuda_kernel", [&] { AT_DISPATCH_NLL_LOSS_INDEX_TYPES( target.scalar_type(), "nll_loss_backward_no_reduce_cuda_kernel_index", [&] { hipLaunchKernelGGL(( nll_loss_backward_no_reduce_cuda_kernel<scalar_t, index_t>) , dim3(at::cuda::detail::GET_BLOCKS(batch_size)), dim3(at::cuda::detail::CUDA_NUM_THREADS), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), batch_size, target.data_ptr<index_t>(), grad_output.packed_accessor64<scalar_t, 1>(), grad_input.packed_accessor64<scalar_t, 2>(), weight.defined() ? weight_.data_ptr<scalar_t>() : nullptr, n_classes, ignore_index); C10_HIP_KERNEL_LAUNCH_CHECK(); }); }); return; } if (n_dims == 1) { AT_DISPATCH_FLOATING_TYPES_AND2( at::ScalarType::Half, at::ScalarType::BFloat16, input.scalar_type(), "nll_loss_backward_reduce_cuda_kernel_1d", [&] { AT_DISPATCH_NLL_LOSS_INDEX_TYPES( target.scalar_type(), "nll_loss_backward_reduce_cuda_kernel_1d_index", [&] { hipLaunchKernelGGL(( nll_loss_backward_reduce_cuda_kernel_1d<scalar_t, index_t>) , dim3(1), dim3(1), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), grad_input.data_ptr<scalar_t>(), grad_output.data_ptr<scalar_t>(), weight.defined() ? weight_.data_ptr<scalar_t>() : nullptr, target.data_ptr<index_t>(), total_weight.data_ptr<scalar_t>(), reduction == at::Reduction::Mean, n_classes, ignore_index); C10_HIP_KERNEL_LAUNCH_CHECK(); }); }); } else { AT_DISPATCH_FLOATING_TYPES_AND2( at::ScalarType::Half, at::ScalarType::BFloat16, input.scalar_type(), "nll_loss_backward_reduce_cuda_kernel_2d", [&] { AT_DISPATCH_NLL_LOSS_INDEX_TYPES( target.scalar_type(), "nll_loss_backward_reduce_cuda_kernel_2d_index", [&] { hipLaunchKernelGGL(( nll_loss_backward_reduce_cuda_kernel_2d<scalar_t, index_t>) , dim3(1), dim3(NLL_LOSS_THREADS), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), grad_input.data_ptr<scalar_t>(), grad_output.data_ptr<scalar_t>(), target.data_ptr<index_t>(), weight.defined() ? weight_.data_ptr<scalar_t>() : nullptr, total_weight.data_ptr<scalar_t>(), reduction == at::Reduction::Mean, input.size(0), input.size(1), n_classes, ignore_index); C10_HIP_KERNEL_LAUNCH_CHECK(); }); }); } } #undef AT_DISPATCH_NLL_LOSS_INDEX_TYPES } // namespace TORCH_IMPL_FUNC(nll_loss_forward_out_cuda) (const Tensor& self, const Tensor& target, const OptionalTensorRef weight_opt, int64_t reduction, int64_t ignore_index, const Tensor& output, const Tensor& total_weight) { const Tensor& weight = weight_opt.getTensorRef(); nll_loss_forward_out_cuda_template( output, total_weight, self, target, weight, reduction, ignore_index); } TORCH_IMPL_FUNC(nll_loss_backward_out_cuda) (const Tensor& grad_output, const Tensor& self, const Tensor& target, OptionalTensorRef weight_opt, int64_t reduction, int64_t ignore_index, const Tensor& total_weight, const Tensor& grad_input) { const Tensor& weight = weight_opt.getTensorRef(); grad_input.zero_(); nll_loss_backward_out_cuda_template( grad_input, grad_output, self, target, total_weight, weight, reduction, ignore_index); } }} // namespace at::native
e751562bdc4015103fa941ec34b97c001681a423.cu
#define TORCH_ASSERT_ONLY_METHOD_OPERATORS #include <ATen/core/Tensor.h> #include <ATen/AccumulateType.h> #include <ATen/Dispatch.h> #include <ATen/cuda/detail/KernelUtils.h> #include <ATen/native/TensorIterator.h> #include <ATen/TensorUtils.h> #include <ATen/TensorOperators.h> #include <ATen/cuda/detail/KernelUtils.h> #include <ATen/native/cuda/Loops.cuh> #include <ATen/native/Resize.h> #ifndef AT_PER_OPERATOR_HEADERS #include <ATen/Functions.h> #include <ATen/NativeFunctions.h> #else #include <ATen/ops/binary_cross_entropy_backward_native.h> #include <ATen/ops/binary_cross_entropy_native.h> #include <ATen/ops/empty_like.h> #include <ATen/ops/exp.h> #include <ATen/ops/nll_loss_backward_native.h> #include <ATen/ops/nll_loss_forward_native.h> #include <ATen/ops/squeeze.h> #endif constexpr float EPSILON = 1e-12; namespace { using namespace at; void binary_cross_entropy_backward_out_kernel(Tensor& grad_input, const Tensor& grad, const Tensor& input, const Tensor& target) { at::TensorIterator iter = TensorIteratorConfig() .add_output(grad_input) .add_input(grad) .add_input(input) .add_input(target) .build(); AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.common_dtype(), "binary_cross_entropy_backward_out_cuda", [&]() { at::native::gpu_kernel(iter, [] GPU_LAMBDA ( scalar_t grad_val, scalar_t input_val, scalar_t target_val ) -> scalar_t { const scalar_t one = 1; const scalar_t epsilon = EPSILON; scalar_t grad_input_denominator = max( (one - input_val) * input_val, epsilon ); return grad_val * (input_val - target_val) / grad_input_denominator; } ); }); } } // namespace namespace at { namespace native { Tensor binary_cross_entropy_cuda(const Tensor& input, const Tensor& target, const c10::optional<Tensor>& weight_opt, int64_t reduction) { // See [Note: hacky wrapper removal for optional tensor] c10::MaybeOwned<Tensor> weight_maybe_owned = at::borrow_from_optional_tensor(weight_opt); const Tensor& weight = *weight_maybe_owned; Tensor loss = at::empty_like(input); return at::native::binary_cross_entropy_out_cuda( input, target, weight, reduction, loss); } Tensor& binary_cross_entropy_out_cuda(const Tensor& input, const Tensor& target, const c10::optional<Tensor>& weight_opt, int64_t reduction, Tensor& loss) { // See [Note: hacky wrapper removal for optional tensor] c10::MaybeOwned<Tensor> weight_maybe_owned = at::borrow_from_optional_tensor(weight_opt); const Tensor& weight = *weight_maybe_owned; Tensor loss_squeezed = at::squeeze(loss); TensorIterator iter = TensorIteratorConfig() .add_output(loss_squeezed) .add_owned_input(at::squeeze(input)) .add_owned_input(at::squeeze(target)) .build(); AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.common_dtype(), "binary_cross_entropy_out_cuda", [&]() { gpu_kernel(iter, [] GPU_LAMBDA (scalar_t input_val, scalar_t target_val) -> scalar_t { const scalar_t zero = 0; const scalar_t one = 1; const scalar_t neg_100 = -100; CUDA_KERNEL_ASSERT(input_val >= zero && input_val <= one); scalar_t log_input_val = std::log(input_val); scalar_t log_1_minus_input_val = std::log(one - input_val); log_input_val = std::max(log_input_val, neg_100); log_1_minus_input_val = std::max(log_1_minus_input_val, neg_100); return ((target_val - one) * log_1_minus_input_val) - (target_val * log_input_val); } ); }); if (weight.defined()) { loss.mul_(weight); } if (reduction != at::Reduction::None) { Tensor loss_reduced; if (reduction == at::Reduction::Mean) { loss_reduced = loss.mean(); } else if (reduction == at::Reduction::Sum) { loss_reduced = loss.sum(); } loss.resize_as_(loss_reduced).copy_(loss_reduced); } return loss; } Tensor binary_cross_entropy_backward_cuda(const Tensor& grad, const Tensor& input, const Tensor& target, const c10::optional<Tensor>& weight_opt, int64_t reduction) { // See [Note: hacky wrapper removal for optional tensor] c10::MaybeOwned<Tensor> weight_maybe_owned = at::borrow_from_optional_tensor(weight_opt); const Tensor& weight = *weight_maybe_owned; Tensor grad_input = at::empty_like(input); return at::native::binary_cross_entropy_backward_out_cuda( grad, input, target, weight, reduction, grad_input); } Tensor& binary_cross_entropy_backward_out_cuda(const Tensor& grad, const Tensor& input, const Tensor& target, const c10::optional<Tensor>& weight_opt, int64_t reduction, Tensor& grad_input) { // See [Note: hacky wrapper removal for optional tensor] c10::MaybeOwned<Tensor> weight_maybe_owned = at::borrow_from_optional_tensor(weight_opt); const Tensor& weight = *weight_maybe_owned; Tensor grad_expand = grad.expand_as(input); binary_cross_entropy_backward_out_kernel(grad_input, grad_expand, input, target); if (weight.defined()) { grad_input.mul_(weight); } if (reduction == at::Reduction::Mean) { grad_input.div_(input.numel()); } return grad_input; } // ----------------------------------- // nll_loss // ----------------------------------- namespace { constexpr int NLL_LOSS_THREADS = 32; #define AT_DISPATCH_NLL_LOSS_INDEX_TYPES(TYPE, NAME, ...) \ AT_DISPATCH_SWITCH(TYPE, NAME, \ AT_PRIVATE_CASE_TYPE_USING_HINT(at::ScalarType::Byte, index_t, __VA_ARGS__) \ AT_PRIVATE_CASE_TYPE_USING_HINT(at::ScalarType::Long, index_t, __VA_ARGS__)) template <typename scalar_t, typename index_t> __global__ void nll_loss_forward_no_reduce_cuda_kernel( int64_t batch_size, PackedTensorAccessor64<scalar_t, 2> input, index_t* target, scalar_t* output, scalar_t* weights, int n_classes, int ignore_index) { CUDA_KERNEL_LOOP(index, batch_size) { int cur_target = target[index]; if (cur_target == ignore_index) { output[index] = static_cast<scalar_t>(0); continue; } CUDA_KERNEL_ASSERT(cur_target >= 0 && cur_target < n_classes); auto cur_weight = weights != nullptr ? weights[cur_target] : static_cast<scalar_t>(1); output[index] = -cur_weight * input[index][cur_target]; } } template <typename scalar_t, typename index_t> __global__ void nll_loss_forward_reduce_cuda_kernel_1d( scalar_t* output, scalar_t* total_weight, scalar_t* input, index_t* target, scalar_t* weights, bool size_average, int n_classes, int64_t ignore_index) { CUDA_KERNEL_ASSERT(threadIdx.x == 0 && threadIdx.y == 0 && threadIdx.z == 0); int t = static_cast<int>(*target); if (t != static_cast<int>(ignore_index)) { CUDA_KERNEL_ASSERT(t >= 0 && t < n_classes); const auto cur_weight = weights != nullptr ? weights[t] : scalar_t{1}; *total_weight = cur_weight; if (size_average) { // If we try to normalize a zero then we return a NaN if (cur_weight == 0) { *output = std::numeric_limits<scalar_t>::quiet_NaN(); } else { *output = -input[t]; } } else { *output = -cur_weight * input[t]; } } else { // If the only element was omited, we get 0. See the discussion in // https://github.com/pytorch/pytorch/pull/64572#issuecomment-926504162 *output = scalar_t{0}; } } template <typename scalar_t, typename accscalar_t, typename index_t> __global__ void nll_loss_forward_reduce_cuda_kernel_2d( scalar_t* output, scalar_t* total_weight, scalar_t* input, index_t* target, scalar_t* weights, bool size_average, int nframe, int ndim, int n_classes, int64_t ignore_index) { // NOLINTNEXTLINE(cppcoreguidelines-init-variables) __shared__ accscalar_t sh_inputs[NLL_LOSS_THREADS], acc_weight[NLL_LOSS_THREADS]; sh_inputs[threadIdx.x] = static_cast<accscalar_t>(0); acc_weight[threadIdx.x] = static_cast<accscalar_t>(0); for (int i = threadIdx.x; i < nframe; i += NLL_LOSS_THREADS) { int t = target[i]; if (t != static_cast<int>(ignore_index)) { CUDA_KERNEL_ASSERT(t >= 0 && t < n_classes); scalar_t cur_weight = weights != nullptr ? weights[t] : static_cast<scalar_t>(1); sh_inputs[threadIdx.x] -= input[i * ndim + t] * cur_weight; acc_weight[threadIdx.x] += cur_weight; } } __syncthreads(); if (threadIdx.x == 0) { accscalar_t output_acc = 0; accscalar_t total_weight_acc = 0; for (int i = 0; i < NLL_LOSS_THREADS; ++i) { output_acc += sh_inputs[i]; total_weight_acc += acc_weight[i]; } *total_weight = static_cast<scalar_t>(total_weight_acc); if (size_average) { *output = static_cast<scalar_t>(output_acc / total_weight_acc); } else { *output = static_cast<scalar_t>(output_acc); } } } void nll_loss_forward_out_cuda_template( const Tensor& output, const Tensor& total_weight, const Tensor& input_, const Tensor& target_, const Tensor& weight, int64_t reduction, int64_t ignore_index) { auto input = *input_.expect_contiguous(); auto target = *target_.expect_contiguous(); int64_t n_classes = input.size(-1); int64_t n_dims = input.dim(); int64_t batch_size = n_dims == 1 ? 1 : input.size(0); auto weight_ = weight.defined() ? weight.contiguous() : weight; if (reduction == Reduction::None && n_dims == 2) { at::native::resize_output(output, {batch_size}); if (batch_size == 0) { // This guards from unnecessary operations and launching CUDA kernel with // 0 blocks. return; } AT_DISPATCH_FLOATING_TYPES_AND2( at::ScalarType::Half, at::ScalarType::BFloat16, input.scalar_type(), "nll_loss_forward_no_reduce_cuda_kernel", [&] { AT_DISPATCH_NLL_LOSS_INDEX_TYPES( target.scalar_type(), "nll_loss_forward_no_reduce_cuda_kernel_index", [&] { nll_loss_forward_no_reduce_cuda_kernel<scalar_t, index_t> <<<at::cuda::detail::GET_BLOCKS(batch_size), at::cuda::detail::CUDA_NUM_THREADS, 0, at::cuda::getCurrentCUDAStream()>>>( batch_size, input.packed_accessor64<scalar_t, 2>(), target.data_ptr<index_t>(), output.data_ptr<scalar_t>(), weight_.defined() ? weight_.data_ptr<scalar_t>() : nullptr, n_classes, ignore_index); C10_CUDA_KERNEL_LAUNCH_CHECK(); }); }); return; } // produce scalar outputs for the reduction case at::native::resize_output(output, {}); total_weight.resize_({}); if (target.numel() == 0) { // Here target (and input) have zero elements // Mean reduction on empty tensors produces NaN. See the discussion in // https://github.com/pytorch/pytorch/pull/64572#issuecomment-926504162 if (reduction == Reduction::Mean) { output.fill_(std::numeric_limits<double>::quiet_NaN()); } else { output.zero_(); } total_weight.zero_(); return; } if (n_dims == 1) { AT_DISPATCH_FLOATING_TYPES_AND2( at::ScalarType::Half, at::ScalarType::BFloat16, input.scalar_type(), "nll_loss_forward_reduce_cuda_kernel_1d", [&] { AT_DISPATCH_NLL_LOSS_INDEX_TYPES( target.scalar_type(), "nll_loss_forward_reduce_cuda_kernel_1d_index", [&] { nll_loss_forward_reduce_cuda_kernel_1d<scalar_t, index_t> <<<1, 1, 0, at::cuda::getCurrentCUDAStream()>>>( output.data_ptr<scalar_t>(), total_weight.data_ptr<scalar_t>(), input.data_ptr<scalar_t>(), target.data_ptr<index_t>(), weight_.defined() ? weight_.data_ptr<scalar_t>() : nullptr, reduction == at::Reduction::Mean, n_classes, ignore_index); C10_CUDA_KERNEL_LAUNCH_CHECK(); }); }); } else if (n_dims == 2) { AT_DISPATCH_FLOATING_TYPES_AND2( at::ScalarType::Half, at::ScalarType::BFloat16, input.scalar_type(), "nll_loss_forward_reduce_cuda_kernel_2d", [&] { AT_DISPATCH_NLL_LOSS_INDEX_TYPES( target.scalar_type(), "nll_loss_forward_reduce_cuda_kernel_2d_index", [&] { using accscalar_t = at::acc_type<scalar_t, /*is_cuda*/true>; nll_loss_forward_reduce_cuda_kernel_2d<scalar_t, accscalar_t, index_t> <<<1, NLL_LOSS_THREADS, 0, at::cuda::getCurrentCUDAStream()>>>( output.data_ptr<scalar_t>(), total_weight.data_ptr<scalar_t>(), input.data_ptr<scalar_t>(), target.data_ptr<index_t>(), weight_.defined() ? weight_.data_ptr<scalar_t>() : nullptr, reduction == at::Reduction::Mean, input.size(0), input.size(1), n_classes, ignore_index); C10_CUDA_KERNEL_LAUNCH_CHECK(); }); }); } } template <typename scalar_t, typename index_t> __global__ void nll_loss_backward_no_reduce_cuda_kernel( int batch_size, index_t *target, PackedTensorAccessor64<scalar_t, 1> grad_output, PackedTensorAccessor64<scalar_t, 2> grad_input, scalar_t *weights, int n_classes, int ignore_index) { CUDA_KERNEL_LOOP(index, batch_size) { int cur_target = target[index]; if (cur_target == ignore_index) { continue; } CUDA_KERNEL_ASSERT(cur_target >= 0 && cur_target < n_classes); scalar_t weight = weights != nullptr ? weights[cur_target] : static_cast<scalar_t>(1); grad_input[index][cur_target] = -weight * grad_output[index]; } }; template <typename scalar_t, typename index_t> __global__ void nll_loss_backward_reduce_cuda_kernel_1d( scalar_t *grad_input, scalar_t *grad_output, scalar_t *weights, index_t *target, scalar_t *total_weight, bool size_average, int n_classes, int64_t ignore_index ) { int t = static_cast<int>(*target); if (t != static_cast<int>(ignore_index)) { CUDA_KERNEL_ASSERT(t >= 0 && t < n_classes); const auto grad = -(size_average ? *grad_output / *total_weight : *grad_output); grad_input[t] = weights != nullptr ? weights[t] * grad : grad; } } template <typename scalar_t, typename index_t> __global__ void nll_loss_backward_reduce_cuda_kernel_2d( scalar_t* grad_input, scalar_t* grad_output, index_t* target, scalar_t* weights, scalar_t* total_weight, bool size_average, int nframe, int ndim, int n_classes, int64_t ignore_index) { const auto grad = -(size_average ? *grad_output / *total_weight : *grad_output); for (int i = threadIdx.x; i < nframe; i += NLL_LOSS_THREADS) { int t = target[i]; if (t != static_cast<int>(ignore_index)) { CUDA_KERNEL_ASSERT(t >= 0 && t < n_classes); grad_input[i * ndim + t] = weights != nullptr ? weights[t] * grad : grad; } } } void nll_loss_backward_out_cuda_template( const Tensor& grad_input_, const Tensor& grad_output_, const Tensor& input_, const Tensor& target_, const Tensor& total_weight, const Tensor& weight, int64_t reduction, int64_t ignore_index) { auto target = *target_.expect_contiguous(); auto input = *input_.expect_contiguous(); auto grad_input = *grad_input_.expect_contiguous(); auto grad_output = *grad_output_.expect_contiguous(); int64_t n_dims = input.dim(); int64_t n_classes = input.size(-1); int64_t batch_size = n_dims == 1 ? 1 : input.size(0); auto weight_ = weight.defined() ? weight.contiguous() : weight; if (reduction == at::Reduction::None && n_dims == 2) { if (batch_size == 0) { // This guards from unnecessary operations and launching CUDA kernel with 0 blocks. return; } AT_DISPATCH_FLOATING_TYPES_AND2( at::ScalarType::Half, at::ScalarType::BFloat16, input.scalar_type(), "nll_loss_backward_no_reduce_cuda_kernel", [&] { AT_DISPATCH_NLL_LOSS_INDEX_TYPES( target.scalar_type(), "nll_loss_backward_no_reduce_cuda_kernel_index", [&] { nll_loss_backward_no_reduce_cuda_kernel<scalar_t, index_t> <<<at::cuda::detail::GET_BLOCKS(batch_size), at::cuda::detail::CUDA_NUM_THREADS, 0, at::cuda::getCurrentCUDAStream()>>>( batch_size, target.data_ptr<index_t>(), grad_output.packed_accessor64<scalar_t, 1>(), grad_input.packed_accessor64<scalar_t, 2>(), weight.defined() ? weight_.data_ptr<scalar_t>() : nullptr, n_classes, ignore_index); C10_CUDA_KERNEL_LAUNCH_CHECK(); }); }); return; } if (n_dims == 1) { AT_DISPATCH_FLOATING_TYPES_AND2( at::ScalarType::Half, at::ScalarType::BFloat16, input.scalar_type(), "nll_loss_backward_reduce_cuda_kernel_1d", [&] { AT_DISPATCH_NLL_LOSS_INDEX_TYPES( target.scalar_type(), "nll_loss_backward_reduce_cuda_kernel_1d_index", [&] { nll_loss_backward_reduce_cuda_kernel_1d<scalar_t, index_t> <<<1, 1, 0, at::cuda::getCurrentCUDAStream()>>>( grad_input.data_ptr<scalar_t>(), grad_output.data_ptr<scalar_t>(), weight.defined() ? weight_.data_ptr<scalar_t>() : nullptr, target.data_ptr<index_t>(), total_weight.data_ptr<scalar_t>(), reduction == at::Reduction::Mean, n_classes, ignore_index); C10_CUDA_KERNEL_LAUNCH_CHECK(); }); }); } else { AT_DISPATCH_FLOATING_TYPES_AND2( at::ScalarType::Half, at::ScalarType::BFloat16, input.scalar_type(), "nll_loss_backward_reduce_cuda_kernel_2d", [&] { AT_DISPATCH_NLL_LOSS_INDEX_TYPES( target.scalar_type(), "nll_loss_backward_reduce_cuda_kernel_2d_index", [&] { nll_loss_backward_reduce_cuda_kernel_2d<scalar_t, index_t> <<<1, NLL_LOSS_THREADS, 0, at::cuda::getCurrentCUDAStream()>>>( grad_input.data_ptr<scalar_t>(), grad_output.data_ptr<scalar_t>(), target.data_ptr<index_t>(), weight.defined() ? weight_.data_ptr<scalar_t>() : nullptr, total_weight.data_ptr<scalar_t>(), reduction == at::Reduction::Mean, input.size(0), input.size(1), n_classes, ignore_index); C10_CUDA_KERNEL_LAUNCH_CHECK(); }); }); } } #undef AT_DISPATCH_NLL_LOSS_INDEX_TYPES } // namespace TORCH_IMPL_FUNC(nll_loss_forward_out_cuda) (const Tensor& self, const Tensor& target, const OptionalTensorRef weight_opt, int64_t reduction, int64_t ignore_index, const Tensor& output, const Tensor& total_weight) { const Tensor& weight = weight_opt.getTensorRef(); nll_loss_forward_out_cuda_template( output, total_weight, self, target, weight, reduction, ignore_index); } TORCH_IMPL_FUNC(nll_loss_backward_out_cuda) (const Tensor& grad_output, const Tensor& self, const Tensor& target, OptionalTensorRef weight_opt, int64_t reduction, int64_t ignore_index, const Tensor& total_weight, const Tensor& grad_input) { const Tensor& weight = weight_opt.getTensorRef(); grad_input.zero_(); nll_loss_backward_out_cuda_template( grad_input, grad_output, self, target, total_weight, weight, reduction, ignore_index); } }} // namespace at::native
caf5ee824bad502bfc2b59ff9f376cd69fd85ad1.hip
// !!! This is a file automatically generated by hipify!!! #include "stencil/stencil.hpp" #include "stencil/logging.hpp" #include "stencil/tx_colocated.cuh" #include <cstdlib> #include <vector> DistributedDomain::DistributedDomain(size_t x, size_t y, size_t z) : size_(x, y, z), placement_(nullptr), flags_(Method::Default), strategy_(PlacementStrategy::NodeAware) { #ifdef STENCIL_SETUP_STATS timeMpiTopo_ = 0; timeNodeGpus_ = 0; timePeerEn_ = 0; timePlacement_ = 0; timePlan_ = 0; timeRealize_ = 0; timeCreate_ = 0; #endif #ifdef STENCIL_EXCHANGE_STATS timeExchange_ = 0; timeSwap_ = 0; #endif MPI_Comm_rank(MPI_COMM_WORLD, &rank_); MPI_Comm_size(MPI_COMM_WORLD, &worldSize_); /* Try to set the planfile output prefix from environment */ if (const char *s = std::getenv("STENCIL_OUTPUT_PREFIX")) { outputPrefix_ = std::string(s); } #ifdef STENCIL_SETUP_STATS MPI_Barrier(MPI_COMM_WORLD); double start = MPI_Wtime(); #endif mpiTopology_ = std::move(MpiTopology(MPI_COMM_WORLD)); #ifdef STENCIL_SETUP_STATS double elapsed = MPI_Wtime() - start; double maxElapsed = -1; MPI_Reduce(&elapsed, &maxElapsed, 1, MPI_DOUBLE, MPI_MAX, 0, MPI_COMM_WORLD); if (0 == rank_) { timeMpiTopo_ += maxElapsed; } #endif LOG_DEBUG("colocated with " << mpiTopology_.colocated_size() << " ranks"); int deviceCount; CUDA_RUNTIME(hipGetDeviceCount(&deviceCount)); std::cerr << "[" << rank_ << "] hipGetDeviceCount= " << deviceCount << "\n"; /* hipComputeModeDefault = 0 Default compute mode (Multiple threads can use hipSetDevice() with this device) hipComputeModeExclusive = 1 Compute-exclusive-thread mode (Only one thread in one process will be able to use hipSetDevice() with this device) hipComputeModeProhibited = 2 Compute-prohibited mode (No threads can use hipSetDevice() with this device) hipComputeModeExclusiveProcess = 3 Compute-exclusive-process mode (Many threads in one process will be able to use hipSetDevice() with this device) */ hipDeviceProp_t prop; for (int i = 0; i < deviceCount; ++i) { CUDA_RUNTIME(hipGetDeviceProperties(&prop, i)); std::cerr << "[" << rank_ << "] hipDeviceProp_t.computeMode=" << prop.computeMode << "\n"; } // Determine GPUs this DistributedDomain is reposible for if (gpus_.empty()) { // if fewer colocated ranks than GPUs, round-robin GPUs to ranks if (mpiTopology_.colocated_size() <= deviceCount) { for (int id = 0; id < deviceCount; ++id) { if (id % mpiTopology_.colocated_size() == mpiTopology_.colocated_rank()) { gpus_.push_back(id); } } } else { // if more ranks, share gpus among ranks gpus_.push_back(mpiTopology_.colocated_rank() % deviceCount); } } assert(!gpus_.empty()); // create a list of cuda device IDs in use by the ranks on this node // TODO: assumes all ranks use the same number of GPUs #ifdef STENCIL_SETUP_STATS MPI_Barrier(MPI_COMM_WORLD); start = MPI_Wtime(); #endif std::vector<int> nodeCudaIds(gpus_.size() * mpiTopology_.colocated_size()); MPI_Allgather(gpus_.data(), int(gpus_.size()), MPI_INT, nodeCudaIds.data(), int(gpus_.size()), MPI_INT, mpiTopology_.colocated_comm()); #ifdef STENCIL_SETUP_STATS elapsed = MPI_Wtime() - start; MPI_Reduce(&elapsed, &maxElapsed, 1, MPI_DOUBLE, MPI_MAX, 0, MPI_COMM_WORLD); if (0 == rank_) { timeNodeGpus_ += maxElapsed; } #endif { { #if STENCIL_OUTPUT_LEVEL <= 2 std::stringstream ss; ss << "[" << rank_ << "] colocated with ranks using gpus"; for (auto &e : nodeCudaIds) { ss << " " << e; } LOG_INFO(ss.str()); #endif } } #ifdef STENCIL_SETUP_STATS MPI_Barrier(MPI_COMM_WORLD); start = MPI_Wtime(); #endif // Try to enable peer access between all GPUs roctxRangePush("peer_en"); for (const auto &srcGpu : gpus_) { for (const auto &dstGpu : nodeCudaIds) { gpu_topo::enable_peer(srcGpu, dstGpu); } } roctxRangePop(); #ifdef STENCIL_SETUP_STATS elapsed = MPI_Wtime() - start; MPI_Reduce(&elapsed, &maxElapsed, 1, MPI_DOUBLE, MPI_MAX, 0, MPI_COMM_WORLD); if (0 == rank_) { timePeerEn_ += maxElapsed; } #endif CUDA_RUNTIME(hipGetLastError()); } uint64_t DistributedDomain::exchange_bytes_for_method(const Method &method) const { uint64_t ret = 0; #ifdef STENCIL_SETUP_STATS if (method && Method::CudaMpi) { ret += numBytesCudaMpi_; } if (method && Method::ColoQuantityKernel) { ret += numBytesColoDirectAccess_; } if (method && Method::ColoPackMemcpyUnpack) { ret += numBytesColoPackMemcpyUnpack_; } if (method && Method::CudaMemcpyPeer) { ret += numBytesCudaMemcpyPeer_; } if (method && Method::CudaKernel) { ret += numBytesCudaKernel_; } #else (void)method; #endif return ret; } DistributedDomain::~DistributedDomain() { LOG_SPEW("~DD entry"); for (auto &m : remoteSenders_) { for (auto &kv : m) { delete kv.second; } } for (auto &m : remoteRecvers_) { for (auto &kv : m) { delete kv.second; } } for (auto &m : coloSenders_) { for (auto &kv : m) { delete kv.second; } } for (auto &m : coloRecvers_) { for (auto &kv : m) { delete kv.second; } } if (placement_) { delete placement_; placement_ = nullptr; } LOG_SPEW("~DD: exit"); } void DistributedDomain::set_methods(Method flags) noexcept { if ((flags && Method::ColoQuantityKernel) && (flags && Method::ColoPackMemcpyUnpack)) { LOG_FATAL("can't use Direct Access and Pack-Memcpy-Unpack for colocated ranks"); } flags_ = flags; } /* place domains on GPUs, and initialize topology */ void DistributedDomain::do_placement() { // TODO: make sure everyone has the same Placement Strategy // compute domain placement #ifdef STENCIL_SETUP_STATS MPI_Barrier(MPI_COMM_WORLD); double start = MPI_Wtime(); #endif roctxRangePush("placement"); switch (strategy_) { case PlacementStrategy::NodeAware: { assert(!placement_); placement_ = new NodeAware(size_, mpiTopology_, radius_, gpus_); break; } case PlacementStrategy::Trivial: { assert(!placement_); placement_ = new Trivial(size_, mpiTopology_, gpus_); break; } case PlacementStrategy::IntraNodeRandom: { assert(!placement_); placement_ = new IntraNodeRandom(size_, mpiTopology_, radius_, gpus_); break; } } assert(placement_); roctxRangePop(); // "placement" #ifdef STENCIL_SETUP_STATS double maxElapsed = -1; double elapsed = MPI_Wtime() - start; MPI_Reduce(&elapsed, &maxElapsed, 1, MPI_DOUBLE, MPI_MAX, 0, MPI_COMM_WORLD); if (0 == rank_) { timePlacement_ += maxElapsed; } #endif topology_ = Topology(placement_->dim(), Topology::Boundary::PERIODIC); } void DistributedDomain::realize() { do_placement(); #ifdef STENCIL_SETUP_STATS MPI_Barrier(MPI_COMM_WORLD); double start = MPI_Wtime(); #endif for (int64_t domId = 0; domId < int64_t(gpus_.size()); domId++) { const Dim3 idx = placement_->get_idx(rank_, domId); const Dim3 sdSize = placement_->subdomain_size(idx); const Dim3 sdOrigin = placement_->subdomain_origin(idx); // placement algorithm should agree with me what my GPU is const int cudaId = placement_->get_cuda(idx); assert(cudaId == gpus_[domId]); LOG_DEBUG("domain=" << domId << " cuda=" << cudaId << " idx=" << idx); LocalDomain sd(sdSize, sdOrigin, cudaId); sd.set_radius(radius_); for (size_t dataIdx = 0; dataIdx < dataElemSize_.size(); ++dataIdx) { sd.add_data(dataElemSize_[dataIdx]); } domains_.push_back(sd); } // realize local domains for (auto &d : domains_) { d.realize(); } #ifdef STENCIL_SETUP_STATS double maxElapsed = -1; double elapsed = MPI_Wtime() - start; MPI_Reduce(&elapsed, &maxElapsed, 1, MPI_DOUBLE, MPI_MAX, 0, MPI_COMM_WORLD); if (0 == rank_) { timeRealize_ += maxElapsed; } #endif #ifdef STENCIL_SETUP_STATS MPI_Barrier(MPI_COMM_WORLD); start = MPI_Wtime(); #endif // outbox for same-GPU exchanges std::vector<Message> peerAccessOutbox; // outboxes for same-rank exchanges std::vector<std::vector<std::vector<Message>>> peerCopyOutboxes; // peerCopyOutboxes[di][dj] = peer copy from di to dj // outbox for co-located domains in different ranks // one outbox for each co-located domain std::vector<std::map<Dim3, std::vector<Message>>> coloOutboxes; std::vector<std::map<Dim3, std::vector<Message>>> coloInboxes; // coloOutboxes[di][dstRank] = messages // inbox for each remote domain my domains recv from std::vector<std::map<Dim3, std::vector<Message>>> remoteInboxes; // remoteOutboxes_[domain][srcIdx] = messages // outbox for each remote domain my domains send to std::vector<std::map<Dim3, std::vector<Message>>> remoteOutboxes; // remoteOutboxes[domain][dstIdx] = messages LOG_DEBUG("comm plan"); /* For each direction, look up where the destination device is and decide which communication method to use. We do not create a message where the message size would be zero */ roctxRangePush("DistributedDomain::realize() plan messages"); #ifdef STENCIL_SETUP_STATS // rank-rank communication amount matrix Mat2D<uint64_t> rankCommBytes(mpi::comm_size(MPI_COMM_WORLD), mpi::comm_size(MPI_COMM_WORLD), 0); #endif peerCopyOutboxes.resize(gpus_.size()); for (auto &v : peerCopyOutboxes) { v.resize(gpus_.size()); } coloOutboxes.resize(gpus_.size()); coloInboxes.resize(gpus_.size()); remoteOutboxes.resize(gpus_.size()); remoteInboxes.resize(gpus_.size()); for (size_t di = 0; di < domains_.size(); ++di) { const Dim3 myIdx = placement_->get_idx(rank_, di); const int myDev = domains_[di].gpu(); assert(myDev == placement_->get_cuda(myIdx)); for (int z = -1; z <= 1; ++z) { for (int y = -1; y <= 1; ++y) { for (int x = -1; x <= 1; ++x) { // send direction const Dim3 dir(x, y, z); if (Dim3(0, 0, 0) == dir) { continue; // no message } // Only do sends when the stencil radius in the opposite // direction is non-zero for example, if +x radius is 2, our -x // neighbor needs a halo region from us, so we need to plan to send // in that direction if (0 == radius_.dir(dir * -1)) { continue; // no sends or recvs for this dir } else { LOG_DEBUG(dir << " radius = " << radius_.dir(dir * -1)); } const Topology::OptionalNeighbor dstNbr = topology_.get_neighbor(myIdx, dir); if (!dstNbr.exists) { continue; } const Dim3 dstIdx = dstNbr.index; const int dstRank = placement_->get_rank(dstIdx); const int dstGPU = placement_->get_subdomain_id(dstIdx); const int dstDev = placement_->get_cuda(dstIdx); // size of our send is the size of the recieving neighbor's halo in -dir const Dim3 dstSize = placement_->subdomain_size(dstIdx); const Dim3 sExt = LocalDomain::halo_extent(dir * -1, dstSize, radius_); Message sMsg(dir, di, dstGPU, sExt); // TODO: move this out of the plan so that this time isn't accumulated into the statistics #ifdef STENCIL_SETUP_STATS for (int qi = 0; qi < domains_[di].num_data(); ++qi) { // send size matches size of halo that we're recving into const size_t bytes = domains_[di].halo_bytes(dir * -1, qi); // FIXME: directionality? rankCommBytes.at(rank_, dstRank) += bytes; } #endif // TODO: this method can be removed, in place of the peer access method if (any_methods(Method::CudaKernel)) { if (dstRank == rank_ && myDev == dstDev) { peerAccessOutbox.push_back(sMsg); goto send_planned; } } if (any_methods(Method::CudaMemcpyPeer)) { LOG_DEBUG("peer " << rank_ << " " << dstRank << " peer(" << myDev << "," << dstDev << ")=" << gpu_topo::peer(myDev, dstDev)); if (dstRank == rank_ && gpu_topo::peer(myDev, dstDev)) { peerCopyOutboxes[di][dstGPU].push_back(sMsg); goto send_planned; } } /* FIXME: for now, we require that all GPUs be visible to all colocated ranks. This is used to detect the GPU distance. Ultimately, we'd like to be able to figure this out even in the presence of CUDA_VISIBLE_DEVICES making each rank have a different CUDA device 0 Then, we could restrict CPU code to run on CPUs nearby to the GPU */ if (any_methods(Method::ColoPackMemcpyUnpack | Method::ColoQuantityKernel | Method::ColoRegionKernel | Method::ColoMemcpy3d | Method::ColoDomainKernel)) { if ((dstRank != rank_) && mpiTopology_.colocated(dstRank) && gpu_topo::peer(myDev, dstDev)) { assert(di < coloOutboxes.size()); coloOutboxes[di].emplace(dstIdx, std::vector<Message>()); coloOutboxes[di][dstIdx].push_back(sMsg); LOG_DEBUG("Plan send <colocated> for Mesage dir=" << sMsg.dir_); goto send_planned; } } if (any_methods(Method::CudaMpi)) { assert(di < remoteOutboxes.size()); remoteOutboxes[di][dstIdx].push_back(sMsg); LOG_DEBUG("Plan send <remote> " << myIdx << " (r" << rank_ << "d" << di << "g" << myDev << ")" << " -> " << dstIdx << " (r" << dstRank << "d" << dstGPU << "g" << dstDev << ")" << " (dir=" << dir << ", rad" << dir * -1 << "=" << radius_.dir(dir * -1) << ")"); goto send_planned; } LOG_FATAL("No method available to send required message " << sMsg.dir_ << "\n"); send_planned: // successfully found a way to send const Topology::OptionalNeighbor srcNbr = topology_.get_neighbor(myIdx, dir * -1); if (!srcNbr.exists) { continue; } const Dim3 srcIdx = srcNbr.index; const int srcRank = placement_->get_rank(srcIdx); const int srcGPU = placement_->get_subdomain_id(srcIdx); const int srcDev = placement_->get_cuda(srcIdx); // size of our recv is the size of our halo in -dir const Dim3 rExt = domains_[di].halo_extent(dir * -1); Message rMsg(dir, srcGPU, di, rExt); if (any_methods(Method::CudaKernel)) { if (srcRank == rank_ && srcDev == myDev) { // no recver needed goto recv_planned; } } if (any_methods(Method::CudaMemcpyPeer)) { if (srcRank == rank_ && gpu_topo::peer(srcDev, myDev)) { // no recver needed goto recv_planned; } } if (any_methods(Method::ColoPackMemcpyUnpack | Method::ColoQuantityKernel | Method::ColoRegionKernel | Method::ColoMemcpy3d | Method::ColoDomainKernel)) { if ((srcRank != rank_) && mpiTopology_.colocated(srcRank) && gpu_topo::peer(srcDev, myDev)) { assert(di < coloInboxes.size()); coloInboxes[di].emplace(srcIdx, std::vector<Message>()); coloInboxes[di][srcIdx].push_back(sMsg); LOG_SPEW("Plan recv <colo> " << srcIdx << "->" << myIdx << " (dir=" << dir << "): r" << dir * -1 << "=" << radius_.dir(dir * -1)); goto recv_planned; } } if (any_methods(Method::CudaMpi)) { assert(di < remoteInboxes.size()); remoteInboxes[di].emplace(srcIdx, std::vector<Message>()); remoteInboxes[di][srcIdx].push_back(sMsg); LOG_SPEW("Plan recv <remote> " << srcIdx << "->" << myIdx << " (dir=" << dir << "): r" << dir * -1 << "=" << radius_.dir(dir * -1)); goto recv_planned; } LOG_FATAL("No method available to recv required message"); recv_planned: // found a way to recv (void)0; } } } } roctxRangePop(); // plan #ifdef STENCIL_SETUP_STATS elapsed = MPI_Wtime() - start; MPI_Reduce(&elapsed, &maxElapsed, 1, MPI_DOUBLE, MPI_MAX, 0, MPI_COMM_WORLD); if (0 == rank_) { timePlan_ += maxElapsed; } #endif /* ------------------------- dump communication matrices ---------------------------- to be loaded with numpy.loadtxt */ #ifdef STENCIL_SETUP_STATS { if (0 == rank_) { MPI_Reduce(MPI_IN_PLACE, rankCommBytes.data(), rankCommBytes.size(), MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD); } else { MPI_Reduce(rankCommBytes.data(), rankCommBytes.data(), rankCommBytes.size(), MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD); } if (0 == rank_) { std::string matFileName = outputPrefix_ + "mat_npy_loadtxt" + ".txt"; std::ofstream matFile(matFileName, std::ofstream::out); for (unsigned r = 0; r < rankCommBytes.shape().y; ++r) { for (unsigned c = 0; c < rankCommBytes.shape().x; ++c) { matFile << rankCommBytes.at(r, c) << " "; } matFile << std::endl; } } } #endif /* ------------------------- summarize communication plan ---------------------------- Dump one file per rank describing who and how we communicate Also total up the number of bytes we are sending for an aggregate bandwidth estimation. ----------------------------*/ { #ifdef STENCIL_SETUP_STATS numBytesCudaMpi_ = 0; numBytesColoDirectAccess_ = 0; numBytesColoPackMemcpyUnpack_ = 0; numBytesCudaMemcpyPeer_ = 0; numBytesCudaKernel_ = 0; #endif std::string planFileName = outputPrefix_ + "plan_" + std::to_string(rank_) + ".txt"; std::ofstream planFile(planFileName, std::ofstream::out); planFile << "rank=" << rank_ << "\n\n"; planFile << "== quantities == \n"; planFile << "domains\n"; for (size_t di = 0; di < domains_.size(); ++di) { planFile << di << ":cuda" << domains_[di].gpu() << ":" << placement_->get_idx(rank_, di) << " sz=" << domains_[di].size() << "\n"; } planFile << "\n"; planFile << "== peerAccess ==\n"; for (auto &msg : peerAccessOutbox) { size_t peerBytes = 0; for (int qi = 0; qi < domains_[msg.srcGPU_].num_data(); ++qi) { // send size matches size of halo that we're recving into const size_t bytes = domains_[msg.srcGPU_].halo_bytes(msg.dir_ * -1, qi); peerBytes += bytes; #ifdef STENCIL_SETUP_STATS numBytesCudaKernel_ += bytes; #endif } planFile << msg.srcGPU_ << "->" << msg.dstGPU_ << " " << msg.dir_ << " " << peerBytes << "B\n"; } planFile << "\n"; planFile << "== peerCopy ==\n"; for (size_t srcGPU = 0; srcGPU < peerCopyOutboxes.size(); ++srcGPU) { for (size_t dstGPU = 0; dstGPU < peerCopyOutboxes[srcGPU].size(); ++dstGPU) { size_t peerBytes = 0; for (const auto &msg : peerCopyOutboxes[srcGPU][dstGPU]) { for (int64_t i = 0; i < domains_[srcGPU].num_data(); ++i) { // send size matches size of halo that we're recving into const int64_t bytes = domains_[srcGPU].halo_bytes(msg.dir_ * -1, i); peerBytes += bytes; #ifdef STENCIL_SETUP_STATS numBytesCudaMemcpyPeer_ += bytes; #endif } planFile << srcGPU << "->" << dstGPU << " " << msg.dir_ << " " << peerBytes << "B\n"; } } } planFile << "\n"; // std::vector<std::map<Dim3, std::vector<Message>>> coloOutboxes; planFile << "== colo ==\n"; for (size_t di = 0; di < coloOutboxes.size(); ++di) { std::map<Dim3, std::vector<Message>> &obxs = coloOutboxes[di]; for (auto &kv : obxs) { const Dim3 dstIdx = kv.first; auto &box = kv.second; planFile << "colo to dstIdx=" << dstIdx << "\n"; for (auto &msg : box) { planFile << "dir=" << msg.dir_ << " (" << msg.srcGPU_ << "->" << msg.dstGPU_ << ")\n"; #ifdef STENCIL_SETUP_STATS for (int64_t i = 0; i < domains_[di].num_data(); ++i) { // send size matches size of halo that we're recving into uint64_t numBytes = domains_[di].halo_bytes(msg.dir_ * -1, i); if (flags_ && Method::ColoQuantityKernel) { numBytesColoDirectAccess_ += numBytes; } else if (flags_ && Method::ColoPackMemcpyUnpack) { numBytesColoPackMemcpyUnpack_ += numBytes; } else { LOG_WARN("unpected method flag, statistics may be nonsense"); } } #endif } } } planFile << "\n"; planFile << "== remote ==\n"; for (size_t di = 0; di < remoteOutboxes.size(); ++di) { std::map<Dim3, std::vector<Message>> &obxs = remoteOutboxes[di]; for (auto &kv : obxs) { const Dim3 dstIdx = kv.first; auto &box = kv.second; planFile << "remote to dstIdx=" << dstIdx << "\n"; for (auto &msg : box) { planFile << "dir=" << msg.dir_ << " (" << msg.srcGPU_ << "->" << msg.dstGPU_ << ")\n"; #ifdef STENCIL_SETUP_STATS for (int64_t i = 0; i < domains_[di].num_data(); ++i) { // send size matches size of halo that we're recving into numBytesCudaMpi_ += domains_[di].halo_bytes(msg.dir_ * -1, i); } #endif } } } planFile.close(); // give every rank the total send volume #ifdef STENCIL_SETUP_STATS roctxRangePush("allreduce communication stats"); MPI_Allreduce(MPI_IN_PLACE, &numBytesCudaMpi_, 1, MPI_UINT64_T, MPI_SUM, MPI_COMM_WORLD); MPI_Allreduce(MPI_IN_PLACE, &numBytesColoDirectAccess_, 1, MPI_UINT64_T, MPI_SUM, MPI_COMM_WORLD); MPI_Allreduce(MPI_IN_PLACE, &numBytesColoPackMemcpyUnpack_, 1, MPI_UINT64_T, MPI_SUM, MPI_COMM_WORLD); MPI_Allreduce(MPI_IN_PLACE, &numBytesCudaMemcpyPeer_, 1, MPI_UINT64_T, MPI_SUM, MPI_COMM_WORLD); MPI_Allreduce(MPI_IN_PLACE, &numBytesCudaKernel_, 1, MPI_UINT64_T, MPI_SUM, MPI_COMM_WORLD); roctxRangePop(); if (rank_ == 0) { LOG_INFO(numBytesCudaMpi_ << "B CudaMpi / exchange"); LOG_INFO(numBytesColoDirectAccess_ << "B ColoDirectAccess / exchange"); LOG_INFO(numBytesColoPackMemcpyUnpack_ << "B ColoPackMemcpyUnpack / exchange"); LOG_INFO(numBytesCudaMemcpyPeer_ << "B CudaMemcpyPeer / exchange"); LOG_INFO(numBytesCudaKernel_ << "B CudaKernel / exchange"); } #endif } #ifdef STENCIL_SETUP_STATS MPI_Barrier(MPI_COMM_WORLD); start = MPI_Wtime(); #endif // create remote sender/recvers LOG_DEBUG("create remote"); roctxRangePush("DistributedDomain::realize: create remote"); // per-domain senders and messages remoteSenders_.resize(gpus_.size()); remoteRecvers_.resize(gpus_.size()); // create all required remote senders/recvers for (size_t di = 0; di < domains_.size(); ++di) { for (auto &kv : remoteOutboxes[di]) { const Dim3 dstIdx = kv.first; const int dstRank = placement_->get_rank(dstIdx); const int dstGPU = placement_->get_subdomain_id(dstIdx); if (0 == remoteSenders_[di].count(dstIdx)) { StatefulSender *sender = nullptr; if (any_methods(Method::CudaMpi)) { #if STENCIL_USE_CUDA_AWARE_MPI == 1 sender = new CudaAwareMpiSender(rank_, di, dstRank, dstGPU, domains_[di]); #else sender = new RemoteSender(rank_, di, dstRank, dstGPU, domains_[di]); #endif } assert(sender); remoteSenders_[di].emplace(dstIdx, sender); } } for (auto &kv : remoteInboxes[di]) { const Dim3 srcIdx = kv.first; const int srcRank = placement_->get_rank(srcIdx); const int srcGPU = placement_->get_subdomain_id(srcIdx); if (0 == remoteRecvers_[di].count(srcIdx)) { StatefulRecver *recver = nullptr; if (any_methods(Method::CudaMpi)) { #if STENCIL_USE_CUDA_AWARE_MPI == 1 recver = new CudaAwareMpiRecver(srcRank, srcGPU, rank_, di, domains_[di]); #else recver = new RemoteRecver(srcRank, srcGPU, rank_, di, domains_[di]); #endif } assert(recver); remoteRecvers_[di].emplace(srcIdx, recver); } } } roctxRangePop(); // create remote LOG_DEBUG("create colocated"); // create colocated sender/recvers roctxRangePush("DistributedDomain::realize: create colocated"); // per-domain senders and messages coloSenders_.resize(gpus_.size()); coloRecvers_.resize(gpus_.size()); // create all required colocated senders/recvers for (size_t di = 0; di < domains_.size(); ++di) { for (auto &kv : coloOutboxes[di]) { StatefulSender *sender = nullptr; const Dim3 dstIdx = kv.first; const int dstRank = placement_->get_rank(dstIdx); const int dstGPU = placement_->get_subdomain_id(dstIdx); LOG_DEBUG("create ColoSender to " << dstIdx << " on " << dstRank << " (" << dstGPU << ")"); if (any_methods(Method::ColoPackMemcpyUnpack)) { sender = new ColocatedHaloSender(rank_, di, dstRank, dstGPU, domains_[di]); } else if (any_methods(Method::ColoQuantityKernel)) { sender = new ColoQuantityKernelSender(rank_, di, dstRank, dstGPU, domains_[di], placement_); } else if (any_methods(Method::ColoRegionKernel)) { sender = new ColoRegionKernelSender(rank_, di, dstRank, dstGPU, domains_[di], placement_); } else if (any_methods(Method::ColoMemcpy3d)) { sender = new ColoMemcpy3dHaloSender(rank_, di, dstRank, dstGPU, domains_[di], placement_); } else if (any_methods(Method::ColoDomainKernel)) { sender = new ColoDomainKernelSender(rank_, di, dstRank, dstGPU, domains_[di], placement_); } coloSenders_[di].emplace(dstIdx, sender); } for (auto &kv : coloInboxes[di]) { StatefulRecver *recver; const Dim3 srcIdx = kv.first; const int srcRank = placement_->get_rank(srcIdx); const int srcGPU = placement_->get_subdomain_id(srcIdx); LOG_DEBUG("create ColoRecver from " << srcIdx << " on " << srcRank << " (" << srcGPU << ")"); if (any_methods(Method::ColoPackMemcpyUnpack)) { recver = new ColocatedHaloRecver(srcRank, srcGPU, rank_, di, domains_[di]); } else if (any_methods(Method::ColoQuantityKernel)) { recver = new ColoHaloRecver(srcRank, srcGPU, rank_, di, domains_[di]); } else if (any_methods(Method::ColoRegionKernel)) { recver = new ColoHaloRecver(srcRank, srcGPU, rank_, di, domains_[di]); } else if (any_methods(Method::ColoMemcpy3d)) { recver = new ColoHaloRecver(srcRank, srcGPU, rank_, di, domains_[di]); } else if (any_methods(Method::ColoDomainKernel)) { recver = new ColoHaloRecver(srcRank, srcGPU, rank_, di, domains_[di]); } coloRecvers_[di].emplace(srcIdx, recver); } } roctxRangePop(); // create colocated LOG_DEBUG("create peer copy"); // create colocated sender/recvers roctxRangePush("DistributedDomain::realize: create PeerCopySender"); // per-domain senders and messages peerCopySenders_.resize(gpus_.size()); LOG_SPEW("Peer Copy Sender for " << peerCopySenders_.size() << " sources"); // create all required colocated senders/recvers for (size_t srcGPU = 0; srcGPU < peerCopyOutboxes.size(); ++srcGPU) { LOG_SPEW("srcGPU = " << srcGPU); for (size_t dstGPU = 0; dstGPU < peerCopyOutboxes[srcGPU].size(); ++dstGPU) { LOG_SPEW("dstGPU = " << dstGPU); if (!peerCopyOutboxes[srcGPU][dstGPU].empty()) { LOG_SPEW("create PeerCopySender(" << srcGPU << "," << dstGPU << "...)"); PeerCopySender pcs(srcGPU, dstGPU, domains_[srcGPU], domains_[dstGPU]); LOG_SPEW("finished create"); peerCopySenders_[srcGPU].emplace(dstGPU, pcs); } else { LOG_SPEW("no msg between srcGPU=" << srcGPU << " and dstGPU=" << dstGPU); } } } roctxRangePop(); // create peer copy // prepare senders and receivers LOG_DEBUG("DistributedDomain::realize: prepare PeerAccessSender"); roctxRangePush("DistributedDomain::realize: prep peerAccessSender"); peerAccessSender_.prepare(peerAccessOutbox, domains_); roctxRangePop(); std::cerr << "DistributedDomain::realize: prepare PeerCopySender\n"; roctxRangePush("DistributedDomain::realize: prep peerCopySender"); for (size_t srcGPU = 0; srcGPU < peerCopySenders_.size(); ++srcGPU) { for (auto &kv : peerCopySenders_[srcGPU]) { const int dstGPU = kv.first; auto &sender = kv.second; sender.prepare(peerCopyOutboxes[srcGPU][dstGPU]); } } roctxRangePop(); std::cerr << "DistributedDomain::realize: start_prepare " "ColocatedSender/ColocatedRecver\n"; roctxRangePush("DistributedDomain::realize: prep colocated"); assert(coloSenders_.size() == coloRecvers_.size()); for (size_t di = 0; di < coloSenders_.size(); ++di) { for (auto &kv : coloSenders_[di]) { const Dim3 dstIdx = kv.first; const int dstRank = placement_->get_rank(dstIdx); StatefulSender *sender = kv.second; LOG_DEBUG(" colo sender.start_prepare " << placement_->get_idx(rank_, di) << "->" << dstIdx << "(rank " << dstRank << ")"); sender->start_prepare(coloOutboxes[di][dstIdx]); } for (auto &kv : coloRecvers_[di]) { const Dim3 srcIdx = kv.first; StatefulRecver *recver = kv.second; LOG_DEBUG(" colo recver.start_prepare " << srcIdx << "->" << placement_->get_idx(rank_, di)); recver->start_prepare(coloInboxes[di][srcIdx]); } } LOG_DEBUG("DistributedDomain::realize: finish_prepare ColocatedSender/ColocatedRecver"); for (size_t di = 0; di < coloSenders_.size(); ++di) { for (auto &kv : coloSenders_[di]) { const Dim3 dstIdx = kv.first; StatefulSender *sender = kv.second; const int srcDev = domains_[di].gpu(); const int dstDev = placement_->get_cuda(dstIdx); LOG_DEBUG("colo sender.finish_prepare " << placement_->get_idx(rank_, di) << " -> " << dstIdx); sender->finish_prepare(); } for (auto &kv : coloRecvers_[di]) { StatefulRecver *recver = kv.second; LOG_DEBUG("colo recver.finish_prepare for colo from " << kv.first); recver->finish_prepare(); } } roctxRangePop(); // prep remote LOG_DEBUG("DistributedDomain::realize: prepare RemoteSender/RemoteRecver"); roctxRangePush("DistributedDomain::realize: prep remote"); assert(remoteSenders_.size() == remoteRecvers_.size()); for (size_t di = 0; di < remoteSenders_.size(); ++di) { for (auto &kv : remoteSenders_[di]) { const Dim3 dstIdx = kv.first; auto &sender = kv.second; sender->start_prepare(remoteOutboxes[di][dstIdx]); } for (auto &kv : remoteRecvers_[di]) { const Dim3 srcIdx = kv.first; auto &recver = kv.second; recver->start_prepare(remoteInboxes[di][srcIdx]); } } for (size_t di = 0; di < remoteSenders_.size(); ++di) { for (auto &kv : remoteSenders_[di]) { // const Dim3 dstIdx = kv.first; StatefulSender *sender = kv.second; sender->finish_prepare(); } for (auto &kv : remoteRecvers_[di]) { // const Dim3 srcIdx = kv.first; StatefulRecver *recver = kv.second; recver->finish_prepare(); } } roctxRangePop(); // prep remote #ifdef STENCIL_SETUP_STATS elapsed = MPI_Wtime() - start; MPI_Reduce(&elapsed, &maxElapsed, 1, MPI_DOUBLE, MPI_MAX, 0, MPI_COMM_WORLD); if (0 == rank_) { timeCreate_ += maxElapsed; } #endif } void DistributedDomain::swap() { LOG_DEBUG("swap()"); #ifdef STENCIL_EXCHANGE_STATS MPI_Barrier(MPI_COMM_WORLD); double start = MPI_Wtime(); #endif for (auto &d : domains_) { d.swap(); } #ifdef STENCIL_EXCHANGE_STATS double elapsed = MPI_Wtime() - start; double maxElapsed = -1; MPI_Reduce(&elapsed, &maxElapsed, 1, MPI_DOUBLE, MPI_MAX, 0, MPI_COMM_WORLD); if (0 == rank_) { timeSwap_ += maxElapsed; } #endif } /* start with the whole compute region, and check each direction of the stencil. move the correponding face/edge/corner inward enough to compensate an access in that direction */ std::vector<Rect3> DistributedDomain::get_interior() const { // one sparse domain for each LocalDomain std::vector<Rect3> ret(domains_.size()); // direction of our halo for (size_t di = 0; di < domains_.size(); ++di) { const LocalDomain &dom = domains_[di]; const Rect3 comReg = dom.get_compute_region(); Rect3 intReg = dom.get_compute_region(); for (int dz = -1; dz <= 1; ++dz) { for (int dy = -1; dy <= 1; ++dy) { for (int dx = -1; dx <= 1; ++dx) { const Dim3 dir(dx, dy, dz); if (Dim3(0, 0, 0) == dir) { continue; } // if the radius is non-zero in a negative direction, // move the lower corner of that direction inward if (dir.x < 0) { intReg.lo.x = ::max(comReg.lo.x + int64_t(radius_.dir(dir)), intReg.lo.x); } else if (dir.x > 0) { intReg.hi.x = ::min(comReg.hi.x - int64_t(radius_.dir(dir)), intReg.hi.x); } if (dir.y < 0) { intReg.lo.y = ::max(comReg.lo.y + int64_t(radius_.dir(dir)), intReg.lo.y); } else if (dir.y > 0) { intReg.hi.y = ::min(comReg.hi.y - int64_t(radius_.dir(dir)), intReg.hi.y); } if (dir.z < 0) { intReg.lo.z = ::max(comReg.lo.z + int64_t(radius_.dir(dir)), intReg.lo.z); } else if (dir.z > 0) { intReg.hi.z = ::min(comReg.hi.z - int64_t(radius_.dir(dir)), intReg.hi.z); } } } } ret[di] = intReg; } return ret; } /* the exterior is everything that is not in the interior. build non-overlapping regions by sliding faces of the compute region in until they reach the interior */ std::vector<std::vector<Rect3>> DistributedDomain::get_exterior() const { // one sparse domain for each LocalDomain std::vector<std::vector<Rect3>> ret(domains_.size()); const std::vector<Rect3> intRegs = get_interior(); for (size_t di = 0; di < domains_.size(); ++di) { const LocalDomain &dom = domains_[di]; const Rect3 &intReg = intRegs[di]; Rect3 comReg = dom.get_compute_region(); // +x if (intReg.hi.x != comReg.hi.x) { Rect3 extReg(Dim3(intReg.hi.x, comReg.lo.y, comReg.lo.z), Dim3(comReg.hi.x, comReg.hi.y, comReg.hi.z)); comReg.hi.x = intReg.hi.x; // slide face in ret[di].push_back(extReg); } // +y if (intReg.hi.y != comReg.hi.y) { Rect3 extReg(Dim3(comReg.lo.x, intReg.hi.y, comReg.lo.z), Dim3(comReg.hi.x, comReg.hi.y, comReg.hi.z)); comReg.hi.y = intReg.hi.y; // slide face in ret[di].push_back(extReg); } // +z if (intReg.hi.z != comReg.hi.z) { Rect3 extReg(Dim3(comReg.lo.x, comReg.lo.y, intReg.hi.z), Dim3(comReg.hi.x, comReg.hi.y, comReg.hi.z)); comReg.hi.z = intReg.hi.z; // slide face in ret[di].push_back(extReg); } // -x if (intReg.lo.x != comReg.lo.x) { Rect3 extReg(Dim3(comReg.lo.x, comReg.lo.y, comReg.lo.z), Dim3(intReg.lo.x, comReg.hi.y, comReg.hi.z)); comReg.lo.x = intReg.lo.x; // slide face in ret[di].push_back(extReg); } // -y if (intReg.lo.y != comReg.lo.y) { Rect3 extReg(Dim3(comReg.lo.x, comReg.lo.y, comReg.lo.z), Dim3(comReg.hi.x, intReg.lo.y, comReg.hi.z)); comReg.lo.y = intReg.lo.y; // slide face in ret[di].push_back(extReg); } // -z if (intReg.lo.z != comReg.lo.z) { Rect3 extReg(Dim3(comReg.lo.x, comReg.lo.y, comReg.lo.z), Dim3(comReg.hi.x, comReg.hi.y, intReg.lo.z)); comReg.lo.z = intReg.lo.z; // slide face in ret[di].push_back(extReg); } } return ret; } const Rect3 DistributedDomain::get_compute_region() const noexcept { return Rect3(Dim3(0, 0, 0), size_); } bool DistributedDomain::poll_advance_sends() { roctxRangePush("DD::poll_advance_sends"); bool pending = false; // move senders from d2h to h2h for (auto &domSenders : remoteSenders_) { for (auto &kv : domSenders) { StatefulSender *sender = kv.second; if (sender->active()) { pending = true; if (sender->next_ready()) { sender->next(); } } } } roctxRangePop(); return pending; } void DistributedDomain::exchange() { roctxRangePush("DD::exchange()"); #ifdef STENCIL_EXCHANGE_STATS MPI_Barrier(MPI_COMM_WORLD); double start = MPI_Wtime(); #endif /*! Try to start sends in order from longest to shortest * we expect remote to be longest, followed by peer copy, followed by colo * colo is shorter than peer copy due to the node-aware data placement: * if we try to place bigger exchanges nearby, they will be faster */ // start remote send d2h LOG_DEBUG("remote send start"); roctxRangePush("DD::exchange: remote send d2h"); for (auto &domSenders : remoteSenders_) { for (auto &kv : domSenders) { StatefulSender *sender = kv.second; sender->send(); poll_advance_sends(); } } roctxRangePop(); // start colocated Senders LOG_DEBUG("start colo send"); roctxRangePush("DD::exchange: colo send"); for (auto &domSenders : coloSenders_) { for (auto &kv : domSenders) { StatefulSender *sender = kv.second; sender->send(); } } roctxRangePop(); // send same-rank messages LOG_DEBUG("send peer copy"); roctxRangePush("DD::exchange: peer copy send"); for (auto &src : peerCopySenders_) { for (auto &kv : src) { PeerCopySender &sender = kv.second; sender.send(); } } roctxRangePop(); // send self messages LOG_DEBUG("send peer access"); roctxRangePush("DD::exchange: peer access send"); peerAccessSender_.send(); roctxRangePop(); // start colocated recvers LOG_DEBUG("start colo recv"); roctxRangePush("DD::exchange: colo recv"); for (auto &domRecvers : coloRecvers_) { for (auto &kv : domRecvers) { StatefulRecver *recver = kv.second; recver->recv(); } } roctxRangePop(); // start remote recv h2h LOG_DEBUG("[" << rank_ << "] remote recv start"); roctxRangePush("DD::exchange: remote recv h2h"); for (auto &domRecvers : remoteRecvers_) { for (auto &kv : domRecvers) { StatefulRecver *recver = kv.second; recver->recv(); } } roctxRangePop(); // poll stateful senders and recvers to move onto next step until all are done LOG_DEBUG("[" << rank_ << "] start poll"); roctxRangePush("DD::exchange: poll"); bool pending = true; /* the intuition here is to prefer senders. as soon as we make progress on anything that's not a sender, jump back to the senders and try again */ while (pending) { pending = false; senders: pending |= poll_advance_sends(); // move recvers from h2h to h2d for (auto &domRecvers : remoteRecvers_) { for (auto &kv : domRecvers) { StatefulRecver *recver = kv.second; if (recver->active()) { pending = true; if (recver->next_ready()) { // const Dim3 srcIdx = kv.first; // std::cerr << "[" << rank_ << "] src=" << srcIdx << " // recv_h2d\n"; recver->next(); goto senders; // try to send as early as possible } } } } for (auto &domRecvers : coloRecvers_) { for (auto &kv : domRecvers) { StatefulRecver *recver = kv.second; if (recver->active()) { pending = true; if (recver->next_ready()) { recver->next(); goto senders; // try to send as early as possible } } } } // colosender: none of them are stateful, so we do not check them } roctxRangePop(); // DD::exchange: poll // wait for sends LOG_SPEW("wait for peer access senders"); roctxRangePush("peerAccessSender.wait()"); peerAccessSender_.wait(); roctxRangePop(); roctxRangePush("peerCopySender.wait()"); for (auto &src : peerCopySenders_) { for (auto &kv : src) { PeerCopySender &sender = kv.second; sender.wait(); } } roctxRangePop(); // peerCopySender.wait() // wait for colocated roctxRangePush("colocated.wait()"); for (auto &domSenders : coloSenders_) { for (auto &kv : domSenders) { LOG_SPEW("domain=" << kv.first << " wait colocated sender"); StatefulSender *sender = kv.second; sender->wait(); } } for (auto &domRecvers : coloRecvers_) { for (auto &kv : domRecvers) { LOG_SPEW("domain=" << kv.first << " wait colocated recver"); StatefulRecver *recver = kv.second; recver->wait(); } } roctxRangePop(); // colocated wait roctxRangePush("remote wait"); // wait for remote senders and recvers // printf("rank=%d wait for RemoteRecver/RemoteSender\n", rank_); for (auto &domRecvers : remoteRecvers_) { for (auto &kv : domRecvers) { LOG_SPEW("domain=" << kv.first << " wait remote recver"); StatefulRecver *recver = kv.second; assert(recver); recver->wait(); } } for (auto &domSenders : remoteSenders_) { for (auto &kv : domSenders) { LOG_SPEW("domain=" << kv.first << " wait remote sender"); StatefulSender *sender = kv.second; assert(sender); sender->wait(); } } roctxRangePop(); // remote wait #ifdef STENCIL_EXCHANGE_STATS double maxElapsed = -1; double elapsed = MPI_Wtime() - start; MPI_Reduce(&elapsed, &maxElapsed, 1, MPI_DOUBLE, MPI_MAX, 0, MPI_COMM_WORLD); if (0 == rank_) { timeExchange_ += maxElapsed; } #endif roctxRangePop(); // "DD::excchange" // No barrier necessary: the CPU thread has already blocked until all recvs are done, so it is safe to proceed. } void DistributedDomain::write_paraview(const std::string &prefix, bool zeroNaNs) { const char delim[] = ","; roctxRangePush("write_paraview"); const int rank = mpi::world_rank(); const int size = mpi::world_size(); int64_t num = rank * domains_.size(); for (size_t di = 0; di < domains_.size(); ++di) { int64_t id = rank * domains_.size() + di; const std::string path = prefix + "_" + std::to_string(id) + ".txt"; LOG_INFO("write paraview file " << path); LocalDomain &domain = domains_[di]; LOG_DEBUG("copy interiors to host"); std::vector<std::vector<unsigned char>> quantities; for (int64_t qi = 0; qi < domain.num_data(); ++qi) { quantities.push_back(domain.interior_to_host(qi)); } LOG_DEBUG("open " << path); FILE *outf = fopen(path.c_str(), "w"); if (!outf) { LOG_ERROR("unable to open \"" << path << "\" for writing"); return; } // column headers fprintf(outf, "Z%sY%sX", delim, delim); for (int64_t qi = 0; qi < domain.num_data(); ++qi) { std::string colName = domain.dataName_[qi]; if (colName.empty()) { colName = "data" + std::to_string(qi); } fprintf(outf, "%s%s", delim, colName.c_str()); } fprintf(outf, "\n"); const Dim3 origin = domains_[di].origin(); // print rows for (int64_t lz = 0; lz < domain.sz_.z; ++lz) { for (int64_t ly = 0; ly < domain.sz_.y; ++ly) { for (int64_t lx = 0; lx < domain.sz_.x; ++lx) { Dim3 pos = origin + Dim3(lx, ly, lz); fprintf(outf, "%ld%s%ld%s%ld", pos.z, delim, pos.y, delim, pos.x); for (int64_t qi = 0; qi < domain.num_data(); ++qi) { if (8 == domain.elem_size(qi)) { double val = reinterpret_cast<double *>( quantities[qi].data())[lz * (domain.sz_.y * domain.sz_.x) + ly * domain.sz_.x + lx]; if (zeroNaNs && std::isnan(val)) { val = 0.0; } fprintf(outf, "%s%.17f", delim, val); } else if (4 == domain.elem_size(qi)) { float val = reinterpret_cast<float *>( quantities[qi].data())[lz * (domain.sz_.y * domain.sz_.x) + ly * domain.sz_.x + lx]; if (zeroNaNs && std::isnan(val)) { val = 0.0f; } fprintf(outf, "%s%.9f", delim, val); } } fprintf(outf, "\n"); } } } } roctxRangePop(); } void DistributedDomain::set_output_prefix(const std::string &prefix) { outputPrefix_ = prefix; }
caf5ee824bad502bfc2b59ff9f376cd69fd85ad1.cu
#include "stencil/stencil.hpp" #include "stencil/logging.hpp" #include "stencil/tx_colocated.cuh" #include <cstdlib> #include <vector> DistributedDomain::DistributedDomain(size_t x, size_t y, size_t z) : size_(x, y, z), placement_(nullptr), flags_(Method::Default), strategy_(PlacementStrategy::NodeAware) { #ifdef STENCIL_SETUP_STATS timeMpiTopo_ = 0; timeNodeGpus_ = 0; timePeerEn_ = 0; timePlacement_ = 0; timePlan_ = 0; timeRealize_ = 0; timeCreate_ = 0; #endif #ifdef STENCIL_EXCHANGE_STATS timeExchange_ = 0; timeSwap_ = 0; #endif MPI_Comm_rank(MPI_COMM_WORLD, &rank_); MPI_Comm_size(MPI_COMM_WORLD, &worldSize_); /* Try to set the planfile output prefix from environment */ if (const char *s = std::getenv("STENCIL_OUTPUT_PREFIX")) { outputPrefix_ = std::string(s); } #ifdef STENCIL_SETUP_STATS MPI_Barrier(MPI_COMM_WORLD); double start = MPI_Wtime(); #endif mpiTopology_ = std::move(MpiTopology(MPI_COMM_WORLD)); #ifdef STENCIL_SETUP_STATS double elapsed = MPI_Wtime() - start; double maxElapsed = -1; MPI_Reduce(&elapsed, &maxElapsed, 1, MPI_DOUBLE, MPI_MAX, 0, MPI_COMM_WORLD); if (0 == rank_) { timeMpiTopo_ += maxElapsed; } #endif LOG_DEBUG("colocated with " << mpiTopology_.colocated_size() << " ranks"); int deviceCount; CUDA_RUNTIME(cudaGetDeviceCount(&deviceCount)); std::cerr << "[" << rank_ << "] cudaGetDeviceCount= " << deviceCount << "\n"; /* cudaComputeModeDefault = 0 Default compute mode (Multiple threads can use cudaSetDevice() with this device) cudaComputeModeExclusive = 1 Compute-exclusive-thread mode (Only one thread in one process will be able to use cudaSetDevice() with this device) cudaComputeModeProhibited = 2 Compute-prohibited mode (No threads can use cudaSetDevice() with this device) cudaComputeModeExclusiveProcess = 3 Compute-exclusive-process mode (Many threads in one process will be able to use cudaSetDevice() with this device) */ cudaDeviceProp prop; for (int i = 0; i < deviceCount; ++i) { CUDA_RUNTIME(cudaGetDeviceProperties(&prop, i)); std::cerr << "[" << rank_ << "] cudaDeviceProp.computeMode=" << prop.computeMode << "\n"; } // Determine GPUs this DistributedDomain is reposible for if (gpus_.empty()) { // if fewer colocated ranks than GPUs, round-robin GPUs to ranks if (mpiTopology_.colocated_size() <= deviceCount) { for (int id = 0; id < deviceCount; ++id) { if (id % mpiTopology_.colocated_size() == mpiTopology_.colocated_rank()) { gpus_.push_back(id); } } } else { // if more ranks, share gpus among ranks gpus_.push_back(mpiTopology_.colocated_rank() % deviceCount); } } assert(!gpus_.empty()); // create a list of cuda device IDs in use by the ranks on this node // TODO: assumes all ranks use the same number of GPUs #ifdef STENCIL_SETUP_STATS MPI_Barrier(MPI_COMM_WORLD); start = MPI_Wtime(); #endif std::vector<int> nodeCudaIds(gpus_.size() * mpiTopology_.colocated_size()); MPI_Allgather(gpus_.data(), int(gpus_.size()), MPI_INT, nodeCudaIds.data(), int(gpus_.size()), MPI_INT, mpiTopology_.colocated_comm()); #ifdef STENCIL_SETUP_STATS elapsed = MPI_Wtime() - start; MPI_Reduce(&elapsed, &maxElapsed, 1, MPI_DOUBLE, MPI_MAX, 0, MPI_COMM_WORLD); if (0 == rank_) { timeNodeGpus_ += maxElapsed; } #endif { { #if STENCIL_OUTPUT_LEVEL <= 2 std::stringstream ss; ss << "[" << rank_ << "] colocated with ranks using gpus"; for (auto &e : nodeCudaIds) { ss << " " << e; } LOG_INFO(ss.str()); #endif } } #ifdef STENCIL_SETUP_STATS MPI_Barrier(MPI_COMM_WORLD); start = MPI_Wtime(); #endif // Try to enable peer access between all GPUs nvtxRangePush("peer_en"); for (const auto &srcGpu : gpus_) { for (const auto &dstGpu : nodeCudaIds) { gpu_topo::enable_peer(srcGpu, dstGpu); } } nvtxRangePop(); #ifdef STENCIL_SETUP_STATS elapsed = MPI_Wtime() - start; MPI_Reduce(&elapsed, &maxElapsed, 1, MPI_DOUBLE, MPI_MAX, 0, MPI_COMM_WORLD); if (0 == rank_) { timePeerEn_ += maxElapsed; } #endif CUDA_RUNTIME(cudaGetLastError()); } uint64_t DistributedDomain::exchange_bytes_for_method(const Method &method) const { uint64_t ret = 0; #ifdef STENCIL_SETUP_STATS if (method && Method::CudaMpi) { ret += numBytesCudaMpi_; } if (method && Method::ColoQuantityKernel) { ret += numBytesColoDirectAccess_; } if (method && Method::ColoPackMemcpyUnpack) { ret += numBytesColoPackMemcpyUnpack_; } if (method && Method::CudaMemcpyPeer) { ret += numBytesCudaMemcpyPeer_; } if (method && Method::CudaKernel) { ret += numBytesCudaKernel_; } #else (void)method; #endif return ret; } DistributedDomain::~DistributedDomain() { LOG_SPEW("~DD entry"); for (auto &m : remoteSenders_) { for (auto &kv : m) { delete kv.second; } } for (auto &m : remoteRecvers_) { for (auto &kv : m) { delete kv.second; } } for (auto &m : coloSenders_) { for (auto &kv : m) { delete kv.second; } } for (auto &m : coloRecvers_) { for (auto &kv : m) { delete kv.second; } } if (placement_) { delete placement_; placement_ = nullptr; } LOG_SPEW("~DD: exit"); } void DistributedDomain::set_methods(Method flags) noexcept { if ((flags && Method::ColoQuantityKernel) && (flags && Method::ColoPackMemcpyUnpack)) { LOG_FATAL("can't use Direct Access and Pack-Memcpy-Unpack for colocated ranks"); } flags_ = flags; } /* place domains on GPUs, and initialize topology */ void DistributedDomain::do_placement() { // TODO: make sure everyone has the same Placement Strategy // compute domain placement #ifdef STENCIL_SETUP_STATS MPI_Barrier(MPI_COMM_WORLD); double start = MPI_Wtime(); #endif nvtxRangePush("placement"); switch (strategy_) { case PlacementStrategy::NodeAware: { assert(!placement_); placement_ = new NodeAware(size_, mpiTopology_, radius_, gpus_); break; } case PlacementStrategy::Trivial: { assert(!placement_); placement_ = new Trivial(size_, mpiTopology_, gpus_); break; } case PlacementStrategy::IntraNodeRandom: { assert(!placement_); placement_ = new IntraNodeRandom(size_, mpiTopology_, radius_, gpus_); break; } } assert(placement_); nvtxRangePop(); // "placement" #ifdef STENCIL_SETUP_STATS double maxElapsed = -1; double elapsed = MPI_Wtime() - start; MPI_Reduce(&elapsed, &maxElapsed, 1, MPI_DOUBLE, MPI_MAX, 0, MPI_COMM_WORLD); if (0 == rank_) { timePlacement_ += maxElapsed; } #endif topology_ = Topology(placement_->dim(), Topology::Boundary::PERIODIC); } void DistributedDomain::realize() { do_placement(); #ifdef STENCIL_SETUP_STATS MPI_Barrier(MPI_COMM_WORLD); double start = MPI_Wtime(); #endif for (int64_t domId = 0; domId < int64_t(gpus_.size()); domId++) { const Dim3 idx = placement_->get_idx(rank_, domId); const Dim3 sdSize = placement_->subdomain_size(idx); const Dim3 sdOrigin = placement_->subdomain_origin(idx); // placement algorithm should agree with me what my GPU is const int cudaId = placement_->get_cuda(idx); assert(cudaId == gpus_[domId]); LOG_DEBUG("domain=" << domId << " cuda=" << cudaId << " idx=" << idx); LocalDomain sd(sdSize, sdOrigin, cudaId); sd.set_radius(radius_); for (size_t dataIdx = 0; dataIdx < dataElemSize_.size(); ++dataIdx) { sd.add_data(dataElemSize_[dataIdx]); } domains_.push_back(sd); } // realize local domains for (auto &d : domains_) { d.realize(); } #ifdef STENCIL_SETUP_STATS double maxElapsed = -1; double elapsed = MPI_Wtime() - start; MPI_Reduce(&elapsed, &maxElapsed, 1, MPI_DOUBLE, MPI_MAX, 0, MPI_COMM_WORLD); if (0 == rank_) { timeRealize_ += maxElapsed; } #endif #ifdef STENCIL_SETUP_STATS MPI_Barrier(MPI_COMM_WORLD); start = MPI_Wtime(); #endif // outbox for same-GPU exchanges std::vector<Message> peerAccessOutbox; // outboxes for same-rank exchanges std::vector<std::vector<std::vector<Message>>> peerCopyOutboxes; // peerCopyOutboxes[di][dj] = peer copy from di to dj // outbox for co-located domains in different ranks // one outbox for each co-located domain std::vector<std::map<Dim3, std::vector<Message>>> coloOutboxes; std::vector<std::map<Dim3, std::vector<Message>>> coloInboxes; // coloOutboxes[di][dstRank] = messages // inbox for each remote domain my domains recv from std::vector<std::map<Dim3, std::vector<Message>>> remoteInboxes; // remoteOutboxes_[domain][srcIdx] = messages // outbox for each remote domain my domains send to std::vector<std::map<Dim3, std::vector<Message>>> remoteOutboxes; // remoteOutboxes[domain][dstIdx] = messages LOG_DEBUG("comm plan"); /* For each direction, look up where the destination device is and decide which communication method to use. We do not create a message where the message size would be zero */ nvtxRangePush("DistributedDomain::realize() plan messages"); #ifdef STENCIL_SETUP_STATS // rank-rank communication amount matrix Mat2D<uint64_t> rankCommBytes(mpi::comm_size(MPI_COMM_WORLD), mpi::comm_size(MPI_COMM_WORLD), 0); #endif peerCopyOutboxes.resize(gpus_.size()); for (auto &v : peerCopyOutboxes) { v.resize(gpus_.size()); } coloOutboxes.resize(gpus_.size()); coloInboxes.resize(gpus_.size()); remoteOutboxes.resize(gpus_.size()); remoteInboxes.resize(gpus_.size()); for (size_t di = 0; di < domains_.size(); ++di) { const Dim3 myIdx = placement_->get_idx(rank_, di); const int myDev = domains_[di].gpu(); assert(myDev == placement_->get_cuda(myIdx)); for (int z = -1; z <= 1; ++z) { for (int y = -1; y <= 1; ++y) { for (int x = -1; x <= 1; ++x) { // send direction const Dim3 dir(x, y, z); if (Dim3(0, 0, 0) == dir) { continue; // no message } // Only do sends when the stencil radius in the opposite // direction is non-zero for example, if +x radius is 2, our -x // neighbor needs a halo region from us, so we need to plan to send // in that direction if (0 == radius_.dir(dir * -1)) { continue; // no sends or recvs for this dir } else { LOG_DEBUG(dir << " radius = " << radius_.dir(dir * -1)); } const Topology::OptionalNeighbor dstNbr = topology_.get_neighbor(myIdx, dir); if (!dstNbr.exists) { continue; } const Dim3 dstIdx = dstNbr.index; const int dstRank = placement_->get_rank(dstIdx); const int dstGPU = placement_->get_subdomain_id(dstIdx); const int dstDev = placement_->get_cuda(dstIdx); // size of our send is the size of the recieving neighbor's halo in -dir const Dim3 dstSize = placement_->subdomain_size(dstIdx); const Dim3 sExt = LocalDomain::halo_extent(dir * -1, dstSize, radius_); Message sMsg(dir, di, dstGPU, sExt); // TODO: move this out of the plan so that this time isn't accumulated into the statistics #ifdef STENCIL_SETUP_STATS for (int qi = 0; qi < domains_[di].num_data(); ++qi) { // send size matches size of halo that we're recving into const size_t bytes = domains_[di].halo_bytes(dir * -1, qi); // FIXME: directionality? rankCommBytes.at(rank_, dstRank) += bytes; } #endif // TODO: this method can be removed, in place of the peer access method if (any_methods(Method::CudaKernel)) { if (dstRank == rank_ && myDev == dstDev) { peerAccessOutbox.push_back(sMsg); goto send_planned; } } if (any_methods(Method::CudaMemcpyPeer)) { LOG_DEBUG("peer " << rank_ << " " << dstRank << " peer(" << myDev << "," << dstDev << ")=" << gpu_topo::peer(myDev, dstDev)); if (dstRank == rank_ && gpu_topo::peer(myDev, dstDev)) { peerCopyOutboxes[di][dstGPU].push_back(sMsg); goto send_planned; } } /* FIXME: for now, we require that all GPUs be visible to all colocated ranks. This is used to detect the GPU distance. Ultimately, we'd like to be able to figure this out even in the presence of CUDA_VISIBLE_DEVICES making each rank have a different CUDA device 0 Then, we could restrict CPU code to run on CPUs nearby to the GPU */ if (any_methods(Method::ColoPackMemcpyUnpack | Method::ColoQuantityKernel | Method::ColoRegionKernel | Method::ColoMemcpy3d | Method::ColoDomainKernel)) { if ((dstRank != rank_) && mpiTopology_.colocated(dstRank) && gpu_topo::peer(myDev, dstDev)) { assert(di < coloOutboxes.size()); coloOutboxes[di].emplace(dstIdx, std::vector<Message>()); coloOutboxes[di][dstIdx].push_back(sMsg); LOG_DEBUG("Plan send <colocated> for Mesage dir=" << sMsg.dir_); goto send_planned; } } if (any_methods(Method::CudaMpi)) { assert(di < remoteOutboxes.size()); remoteOutboxes[di][dstIdx].push_back(sMsg); LOG_DEBUG("Plan send <remote> " << myIdx << " (r" << rank_ << "d" << di << "g" << myDev << ")" << " -> " << dstIdx << " (r" << dstRank << "d" << dstGPU << "g" << dstDev << ")" << " (dir=" << dir << ", rad" << dir * -1 << "=" << radius_.dir(dir * -1) << ")"); goto send_planned; } LOG_FATAL("No method available to send required message " << sMsg.dir_ << "\n"); send_planned: // successfully found a way to send const Topology::OptionalNeighbor srcNbr = topology_.get_neighbor(myIdx, dir * -1); if (!srcNbr.exists) { continue; } const Dim3 srcIdx = srcNbr.index; const int srcRank = placement_->get_rank(srcIdx); const int srcGPU = placement_->get_subdomain_id(srcIdx); const int srcDev = placement_->get_cuda(srcIdx); // size of our recv is the size of our halo in -dir const Dim3 rExt = domains_[di].halo_extent(dir * -1); Message rMsg(dir, srcGPU, di, rExt); if (any_methods(Method::CudaKernel)) { if (srcRank == rank_ && srcDev == myDev) { // no recver needed goto recv_planned; } } if (any_methods(Method::CudaMemcpyPeer)) { if (srcRank == rank_ && gpu_topo::peer(srcDev, myDev)) { // no recver needed goto recv_planned; } } if (any_methods(Method::ColoPackMemcpyUnpack | Method::ColoQuantityKernel | Method::ColoRegionKernel | Method::ColoMemcpy3d | Method::ColoDomainKernel)) { if ((srcRank != rank_) && mpiTopology_.colocated(srcRank) && gpu_topo::peer(srcDev, myDev)) { assert(di < coloInboxes.size()); coloInboxes[di].emplace(srcIdx, std::vector<Message>()); coloInboxes[di][srcIdx].push_back(sMsg); LOG_SPEW("Plan recv <colo> " << srcIdx << "->" << myIdx << " (dir=" << dir << "): r" << dir * -1 << "=" << radius_.dir(dir * -1)); goto recv_planned; } } if (any_methods(Method::CudaMpi)) { assert(di < remoteInboxes.size()); remoteInboxes[di].emplace(srcIdx, std::vector<Message>()); remoteInboxes[di][srcIdx].push_back(sMsg); LOG_SPEW("Plan recv <remote> " << srcIdx << "->" << myIdx << " (dir=" << dir << "): r" << dir * -1 << "=" << radius_.dir(dir * -1)); goto recv_planned; } LOG_FATAL("No method available to recv required message"); recv_planned: // found a way to recv (void)0; } } } } nvtxRangePop(); // plan #ifdef STENCIL_SETUP_STATS elapsed = MPI_Wtime() - start; MPI_Reduce(&elapsed, &maxElapsed, 1, MPI_DOUBLE, MPI_MAX, 0, MPI_COMM_WORLD); if (0 == rank_) { timePlan_ += maxElapsed; } #endif /* ------------------------- dump communication matrices ---------------------------- to be loaded with numpy.loadtxt */ #ifdef STENCIL_SETUP_STATS { if (0 == rank_) { MPI_Reduce(MPI_IN_PLACE, rankCommBytes.data(), rankCommBytes.size(), MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD); } else { MPI_Reduce(rankCommBytes.data(), rankCommBytes.data(), rankCommBytes.size(), MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD); } if (0 == rank_) { std::string matFileName = outputPrefix_ + "mat_npy_loadtxt" + ".txt"; std::ofstream matFile(matFileName, std::ofstream::out); for (unsigned r = 0; r < rankCommBytes.shape().y; ++r) { for (unsigned c = 0; c < rankCommBytes.shape().x; ++c) { matFile << rankCommBytes.at(r, c) << " "; } matFile << std::endl; } } } #endif /* ------------------------- summarize communication plan ---------------------------- Dump one file per rank describing who and how we communicate Also total up the number of bytes we are sending for an aggregate bandwidth estimation. ----------------------------*/ { #ifdef STENCIL_SETUP_STATS numBytesCudaMpi_ = 0; numBytesColoDirectAccess_ = 0; numBytesColoPackMemcpyUnpack_ = 0; numBytesCudaMemcpyPeer_ = 0; numBytesCudaKernel_ = 0; #endif std::string planFileName = outputPrefix_ + "plan_" + std::to_string(rank_) + ".txt"; std::ofstream planFile(planFileName, std::ofstream::out); planFile << "rank=" << rank_ << "\n\n"; planFile << "== quantities == \n"; planFile << "domains\n"; for (size_t di = 0; di < domains_.size(); ++di) { planFile << di << ":cuda" << domains_[di].gpu() << ":" << placement_->get_idx(rank_, di) << " sz=" << domains_[di].size() << "\n"; } planFile << "\n"; planFile << "== peerAccess ==\n"; for (auto &msg : peerAccessOutbox) { size_t peerBytes = 0; for (int qi = 0; qi < domains_[msg.srcGPU_].num_data(); ++qi) { // send size matches size of halo that we're recving into const size_t bytes = domains_[msg.srcGPU_].halo_bytes(msg.dir_ * -1, qi); peerBytes += bytes; #ifdef STENCIL_SETUP_STATS numBytesCudaKernel_ += bytes; #endif } planFile << msg.srcGPU_ << "->" << msg.dstGPU_ << " " << msg.dir_ << " " << peerBytes << "B\n"; } planFile << "\n"; planFile << "== peerCopy ==\n"; for (size_t srcGPU = 0; srcGPU < peerCopyOutboxes.size(); ++srcGPU) { for (size_t dstGPU = 0; dstGPU < peerCopyOutboxes[srcGPU].size(); ++dstGPU) { size_t peerBytes = 0; for (const auto &msg : peerCopyOutboxes[srcGPU][dstGPU]) { for (int64_t i = 0; i < domains_[srcGPU].num_data(); ++i) { // send size matches size of halo that we're recving into const int64_t bytes = domains_[srcGPU].halo_bytes(msg.dir_ * -1, i); peerBytes += bytes; #ifdef STENCIL_SETUP_STATS numBytesCudaMemcpyPeer_ += bytes; #endif } planFile << srcGPU << "->" << dstGPU << " " << msg.dir_ << " " << peerBytes << "B\n"; } } } planFile << "\n"; // std::vector<std::map<Dim3, std::vector<Message>>> coloOutboxes; planFile << "== colo ==\n"; for (size_t di = 0; di < coloOutboxes.size(); ++di) { std::map<Dim3, std::vector<Message>> &obxs = coloOutboxes[di]; for (auto &kv : obxs) { const Dim3 dstIdx = kv.first; auto &box = kv.second; planFile << "colo to dstIdx=" << dstIdx << "\n"; for (auto &msg : box) { planFile << "dir=" << msg.dir_ << " (" << msg.srcGPU_ << "->" << msg.dstGPU_ << ")\n"; #ifdef STENCIL_SETUP_STATS for (int64_t i = 0; i < domains_[di].num_data(); ++i) { // send size matches size of halo that we're recving into uint64_t numBytes = domains_[di].halo_bytes(msg.dir_ * -1, i); if (flags_ && Method::ColoQuantityKernel) { numBytesColoDirectAccess_ += numBytes; } else if (flags_ && Method::ColoPackMemcpyUnpack) { numBytesColoPackMemcpyUnpack_ += numBytes; } else { LOG_WARN("unpected method flag, statistics may be nonsense"); } } #endif } } } planFile << "\n"; planFile << "== remote ==\n"; for (size_t di = 0; di < remoteOutboxes.size(); ++di) { std::map<Dim3, std::vector<Message>> &obxs = remoteOutboxes[di]; for (auto &kv : obxs) { const Dim3 dstIdx = kv.first; auto &box = kv.second; planFile << "remote to dstIdx=" << dstIdx << "\n"; for (auto &msg : box) { planFile << "dir=" << msg.dir_ << " (" << msg.srcGPU_ << "->" << msg.dstGPU_ << ")\n"; #ifdef STENCIL_SETUP_STATS for (int64_t i = 0; i < domains_[di].num_data(); ++i) { // send size matches size of halo that we're recving into numBytesCudaMpi_ += domains_[di].halo_bytes(msg.dir_ * -1, i); } #endif } } } planFile.close(); // give every rank the total send volume #ifdef STENCIL_SETUP_STATS nvtxRangePush("allreduce communication stats"); MPI_Allreduce(MPI_IN_PLACE, &numBytesCudaMpi_, 1, MPI_UINT64_T, MPI_SUM, MPI_COMM_WORLD); MPI_Allreduce(MPI_IN_PLACE, &numBytesColoDirectAccess_, 1, MPI_UINT64_T, MPI_SUM, MPI_COMM_WORLD); MPI_Allreduce(MPI_IN_PLACE, &numBytesColoPackMemcpyUnpack_, 1, MPI_UINT64_T, MPI_SUM, MPI_COMM_WORLD); MPI_Allreduce(MPI_IN_PLACE, &numBytesCudaMemcpyPeer_, 1, MPI_UINT64_T, MPI_SUM, MPI_COMM_WORLD); MPI_Allreduce(MPI_IN_PLACE, &numBytesCudaKernel_, 1, MPI_UINT64_T, MPI_SUM, MPI_COMM_WORLD); nvtxRangePop(); if (rank_ == 0) { LOG_INFO(numBytesCudaMpi_ << "B CudaMpi / exchange"); LOG_INFO(numBytesColoDirectAccess_ << "B ColoDirectAccess / exchange"); LOG_INFO(numBytesColoPackMemcpyUnpack_ << "B ColoPackMemcpyUnpack / exchange"); LOG_INFO(numBytesCudaMemcpyPeer_ << "B CudaMemcpyPeer / exchange"); LOG_INFO(numBytesCudaKernel_ << "B CudaKernel / exchange"); } #endif } #ifdef STENCIL_SETUP_STATS MPI_Barrier(MPI_COMM_WORLD); start = MPI_Wtime(); #endif // create remote sender/recvers LOG_DEBUG("create remote"); nvtxRangePush("DistributedDomain::realize: create remote"); // per-domain senders and messages remoteSenders_.resize(gpus_.size()); remoteRecvers_.resize(gpus_.size()); // create all required remote senders/recvers for (size_t di = 0; di < domains_.size(); ++di) { for (auto &kv : remoteOutboxes[di]) { const Dim3 dstIdx = kv.first; const int dstRank = placement_->get_rank(dstIdx); const int dstGPU = placement_->get_subdomain_id(dstIdx); if (0 == remoteSenders_[di].count(dstIdx)) { StatefulSender *sender = nullptr; if (any_methods(Method::CudaMpi)) { #if STENCIL_USE_CUDA_AWARE_MPI == 1 sender = new CudaAwareMpiSender(rank_, di, dstRank, dstGPU, domains_[di]); #else sender = new RemoteSender(rank_, di, dstRank, dstGPU, domains_[di]); #endif } assert(sender); remoteSenders_[di].emplace(dstIdx, sender); } } for (auto &kv : remoteInboxes[di]) { const Dim3 srcIdx = kv.first; const int srcRank = placement_->get_rank(srcIdx); const int srcGPU = placement_->get_subdomain_id(srcIdx); if (0 == remoteRecvers_[di].count(srcIdx)) { StatefulRecver *recver = nullptr; if (any_methods(Method::CudaMpi)) { #if STENCIL_USE_CUDA_AWARE_MPI == 1 recver = new CudaAwareMpiRecver(srcRank, srcGPU, rank_, di, domains_[di]); #else recver = new RemoteRecver(srcRank, srcGPU, rank_, di, domains_[di]); #endif } assert(recver); remoteRecvers_[di].emplace(srcIdx, recver); } } } nvtxRangePop(); // create remote LOG_DEBUG("create colocated"); // create colocated sender/recvers nvtxRangePush("DistributedDomain::realize: create colocated"); // per-domain senders and messages coloSenders_.resize(gpus_.size()); coloRecvers_.resize(gpus_.size()); // create all required colocated senders/recvers for (size_t di = 0; di < domains_.size(); ++di) { for (auto &kv : coloOutboxes[di]) { StatefulSender *sender = nullptr; const Dim3 dstIdx = kv.first; const int dstRank = placement_->get_rank(dstIdx); const int dstGPU = placement_->get_subdomain_id(dstIdx); LOG_DEBUG("create ColoSender to " << dstIdx << " on " << dstRank << " (" << dstGPU << ")"); if (any_methods(Method::ColoPackMemcpyUnpack)) { sender = new ColocatedHaloSender(rank_, di, dstRank, dstGPU, domains_[di]); } else if (any_methods(Method::ColoQuantityKernel)) { sender = new ColoQuantityKernelSender(rank_, di, dstRank, dstGPU, domains_[di], placement_); } else if (any_methods(Method::ColoRegionKernel)) { sender = new ColoRegionKernelSender(rank_, di, dstRank, dstGPU, domains_[di], placement_); } else if (any_methods(Method::ColoMemcpy3d)) { sender = new ColoMemcpy3dHaloSender(rank_, di, dstRank, dstGPU, domains_[di], placement_); } else if (any_methods(Method::ColoDomainKernel)) { sender = new ColoDomainKernelSender(rank_, di, dstRank, dstGPU, domains_[di], placement_); } coloSenders_[di].emplace(dstIdx, sender); } for (auto &kv : coloInboxes[di]) { StatefulRecver *recver; const Dim3 srcIdx = kv.first; const int srcRank = placement_->get_rank(srcIdx); const int srcGPU = placement_->get_subdomain_id(srcIdx); LOG_DEBUG("create ColoRecver from " << srcIdx << " on " << srcRank << " (" << srcGPU << ")"); if (any_methods(Method::ColoPackMemcpyUnpack)) { recver = new ColocatedHaloRecver(srcRank, srcGPU, rank_, di, domains_[di]); } else if (any_methods(Method::ColoQuantityKernel)) { recver = new ColoHaloRecver(srcRank, srcGPU, rank_, di, domains_[di]); } else if (any_methods(Method::ColoRegionKernel)) { recver = new ColoHaloRecver(srcRank, srcGPU, rank_, di, domains_[di]); } else if (any_methods(Method::ColoMemcpy3d)) { recver = new ColoHaloRecver(srcRank, srcGPU, rank_, di, domains_[di]); } else if (any_methods(Method::ColoDomainKernel)) { recver = new ColoHaloRecver(srcRank, srcGPU, rank_, di, domains_[di]); } coloRecvers_[di].emplace(srcIdx, recver); } } nvtxRangePop(); // create colocated LOG_DEBUG("create peer copy"); // create colocated sender/recvers nvtxRangePush("DistributedDomain::realize: create PeerCopySender"); // per-domain senders and messages peerCopySenders_.resize(gpus_.size()); LOG_SPEW("Peer Copy Sender for " << peerCopySenders_.size() << " sources"); // create all required colocated senders/recvers for (size_t srcGPU = 0; srcGPU < peerCopyOutboxes.size(); ++srcGPU) { LOG_SPEW("srcGPU = " << srcGPU); for (size_t dstGPU = 0; dstGPU < peerCopyOutboxes[srcGPU].size(); ++dstGPU) { LOG_SPEW("dstGPU = " << dstGPU); if (!peerCopyOutboxes[srcGPU][dstGPU].empty()) { LOG_SPEW("create PeerCopySender(" << srcGPU << "," << dstGPU << "...)"); PeerCopySender pcs(srcGPU, dstGPU, domains_[srcGPU], domains_[dstGPU]); LOG_SPEW("finished create"); peerCopySenders_[srcGPU].emplace(dstGPU, pcs); } else { LOG_SPEW("no msg between srcGPU=" << srcGPU << " and dstGPU=" << dstGPU); } } } nvtxRangePop(); // create peer copy // prepare senders and receivers LOG_DEBUG("DistributedDomain::realize: prepare PeerAccessSender"); nvtxRangePush("DistributedDomain::realize: prep peerAccessSender"); peerAccessSender_.prepare(peerAccessOutbox, domains_); nvtxRangePop(); std::cerr << "DistributedDomain::realize: prepare PeerCopySender\n"; nvtxRangePush("DistributedDomain::realize: prep peerCopySender"); for (size_t srcGPU = 0; srcGPU < peerCopySenders_.size(); ++srcGPU) { for (auto &kv : peerCopySenders_[srcGPU]) { const int dstGPU = kv.first; auto &sender = kv.second; sender.prepare(peerCopyOutboxes[srcGPU][dstGPU]); } } nvtxRangePop(); std::cerr << "DistributedDomain::realize: start_prepare " "ColocatedSender/ColocatedRecver\n"; nvtxRangePush("DistributedDomain::realize: prep colocated"); assert(coloSenders_.size() == coloRecvers_.size()); for (size_t di = 0; di < coloSenders_.size(); ++di) { for (auto &kv : coloSenders_[di]) { const Dim3 dstIdx = kv.first; const int dstRank = placement_->get_rank(dstIdx); StatefulSender *sender = kv.second; LOG_DEBUG(" colo sender.start_prepare " << placement_->get_idx(rank_, di) << "->" << dstIdx << "(rank " << dstRank << ")"); sender->start_prepare(coloOutboxes[di][dstIdx]); } for (auto &kv : coloRecvers_[di]) { const Dim3 srcIdx = kv.first; StatefulRecver *recver = kv.second; LOG_DEBUG(" colo recver.start_prepare " << srcIdx << "->" << placement_->get_idx(rank_, di)); recver->start_prepare(coloInboxes[di][srcIdx]); } } LOG_DEBUG("DistributedDomain::realize: finish_prepare ColocatedSender/ColocatedRecver"); for (size_t di = 0; di < coloSenders_.size(); ++di) { for (auto &kv : coloSenders_[di]) { const Dim3 dstIdx = kv.first; StatefulSender *sender = kv.second; const int srcDev = domains_[di].gpu(); const int dstDev = placement_->get_cuda(dstIdx); LOG_DEBUG("colo sender.finish_prepare " << placement_->get_idx(rank_, di) << " -> " << dstIdx); sender->finish_prepare(); } for (auto &kv : coloRecvers_[di]) { StatefulRecver *recver = kv.second; LOG_DEBUG("colo recver.finish_prepare for colo from " << kv.first); recver->finish_prepare(); } } nvtxRangePop(); // prep remote LOG_DEBUG("DistributedDomain::realize: prepare RemoteSender/RemoteRecver"); nvtxRangePush("DistributedDomain::realize: prep remote"); assert(remoteSenders_.size() == remoteRecvers_.size()); for (size_t di = 0; di < remoteSenders_.size(); ++di) { for (auto &kv : remoteSenders_[di]) { const Dim3 dstIdx = kv.first; auto &sender = kv.second; sender->start_prepare(remoteOutboxes[di][dstIdx]); } for (auto &kv : remoteRecvers_[di]) { const Dim3 srcIdx = kv.first; auto &recver = kv.second; recver->start_prepare(remoteInboxes[di][srcIdx]); } } for (size_t di = 0; di < remoteSenders_.size(); ++di) { for (auto &kv : remoteSenders_[di]) { // const Dim3 dstIdx = kv.first; StatefulSender *sender = kv.second; sender->finish_prepare(); } for (auto &kv : remoteRecvers_[di]) { // const Dim3 srcIdx = kv.first; StatefulRecver *recver = kv.second; recver->finish_prepare(); } } nvtxRangePop(); // prep remote #ifdef STENCIL_SETUP_STATS elapsed = MPI_Wtime() - start; MPI_Reduce(&elapsed, &maxElapsed, 1, MPI_DOUBLE, MPI_MAX, 0, MPI_COMM_WORLD); if (0 == rank_) { timeCreate_ += maxElapsed; } #endif } void DistributedDomain::swap() { LOG_DEBUG("swap()"); #ifdef STENCIL_EXCHANGE_STATS MPI_Barrier(MPI_COMM_WORLD); double start = MPI_Wtime(); #endif for (auto &d : domains_) { d.swap(); } #ifdef STENCIL_EXCHANGE_STATS double elapsed = MPI_Wtime() - start; double maxElapsed = -1; MPI_Reduce(&elapsed, &maxElapsed, 1, MPI_DOUBLE, MPI_MAX, 0, MPI_COMM_WORLD); if (0 == rank_) { timeSwap_ += maxElapsed; } #endif } /* start with the whole compute region, and check each direction of the stencil. move the correponding face/edge/corner inward enough to compensate an access in that direction */ std::vector<Rect3> DistributedDomain::get_interior() const { // one sparse domain for each LocalDomain std::vector<Rect3> ret(domains_.size()); // direction of our halo for (size_t di = 0; di < domains_.size(); ++di) { const LocalDomain &dom = domains_[di]; const Rect3 comReg = dom.get_compute_region(); Rect3 intReg = dom.get_compute_region(); for (int dz = -1; dz <= 1; ++dz) { for (int dy = -1; dy <= 1; ++dy) { for (int dx = -1; dx <= 1; ++dx) { const Dim3 dir(dx, dy, dz); if (Dim3(0, 0, 0) == dir) { continue; } // if the radius is non-zero in a negative direction, // move the lower corner of that direction inward if (dir.x < 0) { intReg.lo.x = std::max(comReg.lo.x + int64_t(radius_.dir(dir)), intReg.lo.x); } else if (dir.x > 0) { intReg.hi.x = std::min(comReg.hi.x - int64_t(radius_.dir(dir)), intReg.hi.x); } if (dir.y < 0) { intReg.lo.y = std::max(comReg.lo.y + int64_t(radius_.dir(dir)), intReg.lo.y); } else if (dir.y > 0) { intReg.hi.y = std::min(comReg.hi.y - int64_t(radius_.dir(dir)), intReg.hi.y); } if (dir.z < 0) { intReg.lo.z = std::max(comReg.lo.z + int64_t(radius_.dir(dir)), intReg.lo.z); } else if (dir.z > 0) { intReg.hi.z = std::min(comReg.hi.z - int64_t(radius_.dir(dir)), intReg.hi.z); } } } } ret[di] = intReg; } return ret; } /* the exterior is everything that is not in the interior. build non-overlapping regions by sliding faces of the compute region in until they reach the interior */ std::vector<std::vector<Rect3>> DistributedDomain::get_exterior() const { // one sparse domain for each LocalDomain std::vector<std::vector<Rect3>> ret(domains_.size()); const std::vector<Rect3> intRegs = get_interior(); for (size_t di = 0; di < domains_.size(); ++di) { const LocalDomain &dom = domains_[di]; const Rect3 &intReg = intRegs[di]; Rect3 comReg = dom.get_compute_region(); // +x if (intReg.hi.x != comReg.hi.x) { Rect3 extReg(Dim3(intReg.hi.x, comReg.lo.y, comReg.lo.z), Dim3(comReg.hi.x, comReg.hi.y, comReg.hi.z)); comReg.hi.x = intReg.hi.x; // slide face in ret[di].push_back(extReg); } // +y if (intReg.hi.y != comReg.hi.y) { Rect3 extReg(Dim3(comReg.lo.x, intReg.hi.y, comReg.lo.z), Dim3(comReg.hi.x, comReg.hi.y, comReg.hi.z)); comReg.hi.y = intReg.hi.y; // slide face in ret[di].push_back(extReg); } // +z if (intReg.hi.z != comReg.hi.z) { Rect3 extReg(Dim3(comReg.lo.x, comReg.lo.y, intReg.hi.z), Dim3(comReg.hi.x, comReg.hi.y, comReg.hi.z)); comReg.hi.z = intReg.hi.z; // slide face in ret[di].push_back(extReg); } // -x if (intReg.lo.x != comReg.lo.x) { Rect3 extReg(Dim3(comReg.lo.x, comReg.lo.y, comReg.lo.z), Dim3(intReg.lo.x, comReg.hi.y, comReg.hi.z)); comReg.lo.x = intReg.lo.x; // slide face in ret[di].push_back(extReg); } // -y if (intReg.lo.y != comReg.lo.y) { Rect3 extReg(Dim3(comReg.lo.x, comReg.lo.y, comReg.lo.z), Dim3(comReg.hi.x, intReg.lo.y, comReg.hi.z)); comReg.lo.y = intReg.lo.y; // slide face in ret[di].push_back(extReg); } // -z if (intReg.lo.z != comReg.lo.z) { Rect3 extReg(Dim3(comReg.lo.x, comReg.lo.y, comReg.lo.z), Dim3(comReg.hi.x, comReg.hi.y, intReg.lo.z)); comReg.lo.z = intReg.lo.z; // slide face in ret[di].push_back(extReg); } } return ret; } const Rect3 DistributedDomain::get_compute_region() const noexcept { return Rect3(Dim3(0, 0, 0), size_); } bool DistributedDomain::poll_advance_sends() { nvtxRangePush("DD::poll_advance_sends"); bool pending = false; // move senders from d2h to h2h for (auto &domSenders : remoteSenders_) { for (auto &kv : domSenders) { StatefulSender *sender = kv.second; if (sender->active()) { pending = true; if (sender->next_ready()) { sender->next(); } } } } nvtxRangePop(); return pending; } void DistributedDomain::exchange() { nvtxRangePush("DD::exchange()"); #ifdef STENCIL_EXCHANGE_STATS MPI_Barrier(MPI_COMM_WORLD); double start = MPI_Wtime(); #endif /*! Try to start sends in order from longest to shortest * we expect remote to be longest, followed by peer copy, followed by colo * colo is shorter than peer copy due to the node-aware data placement: * if we try to place bigger exchanges nearby, they will be faster */ // start remote send d2h LOG_DEBUG("remote send start"); nvtxRangePush("DD::exchange: remote send d2h"); for (auto &domSenders : remoteSenders_) { for (auto &kv : domSenders) { StatefulSender *sender = kv.second; sender->send(); poll_advance_sends(); } } nvtxRangePop(); // start colocated Senders LOG_DEBUG("start colo send"); nvtxRangePush("DD::exchange: colo send"); for (auto &domSenders : coloSenders_) { for (auto &kv : domSenders) { StatefulSender *sender = kv.second; sender->send(); } } nvtxRangePop(); // send same-rank messages LOG_DEBUG("send peer copy"); nvtxRangePush("DD::exchange: peer copy send"); for (auto &src : peerCopySenders_) { for (auto &kv : src) { PeerCopySender &sender = kv.second; sender.send(); } } nvtxRangePop(); // send self messages LOG_DEBUG("send peer access"); nvtxRangePush("DD::exchange: peer access send"); peerAccessSender_.send(); nvtxRangePop(); // start colocated recvers LOG_DEBUG("start colo recv"); nvtxRangePush("DD::exchange: colo recv"); for (auto &domRecvers : coloRecvers_) { for (auto &kv : domRecvers) { StatefulRecver *recver = kv.second; recver->recv(); } } nvtxRangePop(); // start remote recv h2h LOG_DEBUG("[" << rank_ << "] remote recv start"); nvtxRangePush("DD::exchange: remote recv h2h"); for (auto &domRecvers : remoteRecvers_) { for (auto &kv : domRecvers) { StatefulRecver *recver = kv.second; recver->recv(); } } nvtxRangePop(); // poll stateful senders and recvers to move onto next step until all are done LOG_DEBUG("[" << rank_ << "] start poll"); nvtxRangePush("DD::exchange: poll"); bool pending = true; /* the intuition here is to prefer senders. as soon as we make progress on anything that's not a sender, jump back to the senders and try again */ while (pending) { pending = false; senders: pending |= poll_advance_sends(); // move recvers from h2h to h2d for (auto &domRecvers : remoteRecvers_) { for (auto &kv : domRecvers) { StatefulRecver *recver = kv.second; if (recver->active()) { pending = true; if (recver->next_ready()) { // const Dim3 srcIdx = kv.first; // std::cerr << "[" << rank_ << "] src=" << srcIdx << " // recv_h2d\n"; recver->next(); goto senders; // try to send as early as possible } } } } for (auto &domRecvers : coloRecvers_) { for (auto &kv : domRecvers) { StatefulRecver *recver = kv.second; if (recver->active()) { pending = true; if (recver->next_ready()) { recver->next(); goto senders; // try to send as early as possible } } } } // colosender: none of them are stateful, so we do not check them } nvtxRangePop(); // DD::exchange: poll // wait for sends LOG_SPEW("wait for peer access senders"); nvtxRangePush("peerAccessSender.wait()"); peerAccessSender_.wait(); nvtxRangePop(); nvtxRangePush("peerCopySender.wait()"); for (auto &src : peerCopySenders_) { for (auto &kv : src) { PeerCopySender &sender = kv.second; sender.wait(); } } nvtxRangePop(); // peerCopySender.wait() // wait for colocated nvtxRangePush("colocated.wait()"); for (auto &domSenders : coloSenders_) { for (auto &kv : domSenders) { LOG_SPEW("domain=" << kv.first << " wait colocated sender"); StatefulSender *sender = kv.second; sender->wait(); } } for (auto &domRecvers : coloRecvers_) { for (auto &kv : domRecvers) { LOG_SPEW("domain=" << kv.first << " wait colocated recver"); StatefulRecver *recver = kv.second; recver->wait(); } } nvtxRangePop(); // colocated wait nvtxRangePush("remote wait"); // wait for remote senders and recvers // printf("rank=%d wait for RemoteRecver/RemoteSender\n", rank_); for (auto &domRecvers : remoteRecvers_) { for (auto &kv : domRecvers) { LOG_SPEW("domain=" << kv.first << " wait remote recver"); StatefulRecver *recver = kv.second; assert(recver); recver->wait(); } } for (auto &domSenders : remoteSenders_) { for (auto &kv : domSenders) { LOG_SPEW("domain=" << kv.first << " wait remote sender"); StatefulSender *sender = kv.second; assert(sender); sender->wait(); } } nvtxRangePop(); // remote wait #ifdef STENCIL_EXCHANGE_STATS double maxElapsed = -1; double elapsed = MPI_Wtime() - start; MPI_Reduce(&elapsed, &maxElapsed, 1, MPI_DOUBLE, MPI_MAX, 0, MPI_COMM_WORLD); if (0 == rank_) { timeExchange_ += maxElapsed; } #endif nvtxRangePop(); // "DD::excchange" // No barrier necessary: the CPU thread has already blocked until all recvs are done, so it is safe to proceed. } void DistributedDomain::write_paraview(const std::string &prefix, bool zeroNaNs) { const char delim[] = ","; nvtxRangePush("write_paraview"); const int rank = mpi::world_rank(); const int size = mpi::world_size(); int64_t num = rank * domains_.size(); for (size_t di = 0; di < domains_.size(); ++di) { int64_t id = rank * domains_.size() + di; const std::string path = prefix + "_" + std::to_string(id) + ".txt"; LOG_INFO("write paraview file " << path); LocalDomain &domain = domains_[di]; LOG_DEBUG("copy interiors to host"); std::vector<std::vector<unsigned char>> quantities; for (int64_t qi = 0; qi < domain.num_data(); ++qi) { quantities.push_back(domain.interior_to_host(qi)); } LOG_DEBUG("open " << path); FILE *outf = fopen(path.c_str(), "w"); if (!outf) { LOG_ERROR("unable to open \"" << path << "\" for writing"); return; } // column headers fprintf(outf, "Z%sY%sX", delim, delim); for (int64_t qi = 0; qi < domain.num_data(); ++qi) { std::string colName = domain.dataName_[qi]; if (colName.empty()) { colName = "data" + std::to_string(qi); } fprintf(outf, "%s%s", delim, colName.c_str()); } fprintf(outf, "\n"); const Dim3 origin = domains_[di].origin(); // print rows for (int64_t lz = 0; lz < domain.sz_.z; ++lz) { for (int64_t ly = 0; ly < domain.sz_.y; ++ly) { for (int64_t lx = 0; lx < domain.sz_.x; ++lx) { Dim3 pos = origin + Dim3(lx, ly, lz); fprintf(outf, "%ld%s%ld%s%ld", pos.z, delim, pos.y, delim, pos.x); for (int64_t qi = 0; qi < domain.num_data(); ++qi) { if (8 == domain.elem_size(qi)) { double val = reinterpret_cast<double *>( quantities[qi].data())[lz * (domain.sz_.y * domain.sz_.x) + ly * domain.sz_.x + lx]; if (zeroNaNs && std::isnan(val)) { val = 0.0; } fprintf(outf, "%s%.17f", delim, val); } else if (4 == domain.elem_size(qi)) { float val = reinterpret_cast<float *>( quantities[qi].data())[lz * (domain.sz_.y * domain.sz_.x) + ly * domain.sz_.x + lx]; if (zeroNaNs && std::isnan(val)) { val = 0.0f; } fprintf(outf, "%s%.9f", delim, val); } } fprintf(outf, "\n"); } } } } nvtxRangePop(); } void DistributedDomain::set_output_prefix(const std::string &prefix) { outputPrefix_ = prefix; }
83f021dc498faf82fc03843fdbf82cad939b50d2.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Written by Vasily Volkov. // Copyright (c) 2008-2009, The Regents of the University of California. // All rights reserved. #include "codelets.h" __global__ void FFT16_device( float2 *dst, float2 *src ) { int tid = threadIdx.x; int iblock = blockIdx.y * gridDim.x + blockIdx.x; int index = iblock * 1024 + tid; src += index; dst += index; float2 a[16]; load<16>( a, src, 64 ); FFT16( a ); store<16>( a, dst, 64 ); } extern "C" void FFT16( float2 *work, int batch ) { hipLaunchKernelGGL(( FFT16_device), dim3(grid2D(batch/64)), dim3(64) , 0, 0, work, work ); }
83f021dc498faf82fc03843fdbf82cad939b50d2.cu
// Written by Vasily Volkov. // Copyright (c) 2008-2009, The Regents of the University of California. // All rights reserved. #include "codelets.h" __global__ void FFT16_device( float2 *dst, float2 *src ) { int tid = threadIdx.x; int iblock = blockIdx.y * gridDim.x + blockIdx.x; int index = iblock * 1024 + tid; src += index; dst += index; float2 a[16]; load<16>( a, src, 64 ); FFT16( a ); store<16>( a, dst, 64 ); } extern "C" void FFT16( float2 *work, int batch ) { FFT16_device<<< grid2D(batch/64), 64 >>>( work, work ); }
c6928b18aa7ca837c1c3f4cf9305f59c5f717a74.hip
// !!! This is a file automatically generated by hipify!!! /*Realizar un programa CUDA que dado un vector V de N nmeros enteros multiplique a cada nmero por una constante C, se deben realizar dos implementaciones: a.Tanto C como N deben ser pasados como parmetros al kernel. b.Tanto C como N deben estar almacenados en la memoria de constantes de la GPU*/ #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <stdlib.h> #include <time.h> //M and N number of threads (grid and block) #define M 1 #define N 1 __global__ void multiply( int array[] , int dim, const int c, const int thread_number) { int index = blockIdx.x* blockDim.x* blockDim.y* blockDim.z+threadIdx.z* blockDim.y* blockDim.x+ threadIdx.y* blockDim.x+ threadIdx.x; if(index<dim){ if(dim<=thread_number){ //if more threads than array size printf("Thread %i; Modifying value of index %i for %i * %i because < dim %i\n", index, index, array[index], c, dim); array[index]*=c; } else{ //if less threads than array size if(index!=thread_number-1){//if not last thread deal with size_array/thread_nb array entries for(int i=index*(int)(dim/thread_number); i< index*(int)(dim/thread_number)+(int)(dim/thread_number); i++){ printf("Thread %i; Modifying value of index %i for %i * %i because < dim %i\n", index, i, array[i], c, dim); array[i]*=c; } } else{ //if last thread deal with all remaining array entries for(int i=index*(int)(dim/thread_number); i< dim; i++){ printf("Thread %i; Modifying value of index %i for %i * %i because < dim %i\n",index, i, array[i], c, dim); array[i]*=c; } } } } } int main(int argc, char *argv[]){ //Measure time clock_t time_begin; time_begin=clock(); // pointers to host & device arrays int *device_array = 0; int *host_array = 0; int size_array=10; // malloc a host array host_array = (int*)malloc( size_array * sizeof(int)); for(int i=0; i<size_array; i++){ host_array[i]=rand()%10; printf("%i\t", host_array[i]); } printf("\n"); // hipMalloc a device array hipMalloc(&device_array,size_array * sizeof(int)); // download and inspect the result on the host: hipMemcpy(device_array, host_array, sizeof(int)*size_array, hipMemcpyHostToDevice); dim3 bloque(N,N); //Bloque bidimensional de N*N hilos dim3 grid(M,M); //Grid bidimensional de M*M bloques int thread_number= N*N*M*M; hipLaunchKernelGGL(( multiply), dim3(grid), dim3(bloque), 0, 0, device_array, size_array , 2, thread_number); hipDeviceSynchronize(); // download and inspect the result on the host: hipMemcpy(host_array, device_array, sizeof(int)*size_array, hipMemcpyDeviceToHost); for(int i=0; i<size_array; i++) printf("%i\t", host_array[i]); // deallocate memory free(host_array); hipFree(device_array); printf("Time elapsed: %f seconds\n", (((float)clock() - (float)time_begin) / 1000000.0F ) * 1000 ); //1.215s }
c6928b18aa7ca837c1c3f4cf9305f59c5f717a74.cu
/*Realizar un programa CUDA que dado un vector V de N números enteros multiplique a cada número por una constante C, se deben realizar dos implementaciones: a.Tanto C como N deben ser pasados como parámetros al kernel. b.Tanto C como N deben estar almacenados en la memoria de constantes de la GPU*/ #include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <stdlib.h> #include <time.h> //M and N number of threads (grid and block) #define M 1 #define N 1 __global__ void multiply( int array[] , int dim, const int c, const int thread_number) { int index = blockIdx.x* blockDim.x* blockDim.y* blockDim.z+threadIdx.z* blockDim.y* blockDim.x+ threadIdx.y* blockDim.x+ threadIdx.x; if(index<dim){ if(dim<=thread_number){ //if more threads than array size printf("Thread %i; Modifying value of index %i for %i * %i because < dim %i\n", index, index, array[index], c, dim); array[index]*=c; } else{ //if less threads than array size if(index!=thread_number-1){//if not last thread deal with size_array/thread_nb array entries for(int i=index*(int)(dim/thread_number); i< index*(int)(dim/thread_number)+(int)(dim/thread_number); i++){ printf("Thread %i; Modifying value of index %i for %i * %i because < dim %i\n", index, i, array[i], c, dim); array[i]*=c; } } else{ //if last thread deal with all remaining array entries for(int i=index*(int)(dim/thread_number); i< dim; i++){ printf("Thread %i; Modifying value of index %i for %i * %i because < dim %i\n",index, i, array[i], c, dim); array[i]*=c; } } } } } int main(int argc, char *argv[]){ //Measure time clock_t time_begin; time_begin=clock(); // pointers to host & device arrays int *device_array = 0; int *host_array = 0; int size_array=10; // malloc a host array host_array = (int*)malloc( size_array * sizeof(int)); for(int i=0; i<size_array; i++){ host_array[i]=rand()%10; printf("%i\t", host_array[i]); } printf("\n"); // cudaMalloc a device array cudaMalloc(&device_array,size_array * sizeof(int)); // download and inspect the result on the host: cudaMemcpy(device_array, host_array, sizeof(int)*size_array, cudaMemcpyHostToDevice); dim3 bloque(N,N); //Bloque bidimensional de N*N hilos dim3 grid(M,M); //Grid bidimensional de M*M bloques int thread_number= N*N*M*M; multiply<<<grid, bloque>>>(device_array, size_array , 2, thread_number); cudaThreadSynchronize(); // download and inspect the result on the host: cudaMemcpy(host_array, device_array, sizeof(int)*size_array, cudaMemcpyDeviceToHost); for(int i=0; i<size_array; i++) printf("%i\t", host_array[i]); // deallocate memory free(host_array); cudaFree(device_array); printf("Time elapsed: %f seconds\n", (((float)clock() - (float)time_begin) / 1000000.0F ) * 1000 ); //1.215s }
8a3dbb02fc62501e3f7fe14f6e0918b760d3213c.hip
// !!! This is a file automatically generated by hipify!!! #pragma warning( disable : 4244 ) #include <iostream> #include <thrust/device_vector.h> #include <thrust/sort.h> #include <helper_cuda.h> #include <helper_string.h> #include <utils/chronoCPU.hpp> #include <utils/chronoGPU.hpp> #include <hiprand/hiprand_kernel.h> #include <exo2/student.h> #include <exercise2/Exercise2.h> namespace { struct RandomUnsignedFunctor : thrust::unary_function<int,unsigned> { int m_seed; RandomUnsignedFunctor(int seed) : m_seed(seed) {} __device__ unsigned operator()(const int idx) { hiprandState_t s; hiprand_init(m_seed+idx, 0, 0, &s); return hiprand(&s); } }; //template<int gold> class CheckFunctor : public thrust::unary_function<thrust::tuple<const int,const int>,long long> { public: __device__ long long operator() (const thrust::tuple<const int, const int>& t) { const int a = thrust::get<0>(t); const int b = thrust::get<1>(t); return static_cast<long long>(a != b); } }; } void Exercise2::displayHelpIfNeeded(const int argc, const char**argv) { if( checkCmdLineFlag(argc, argv, "-h") || checkCmdLineFlag(argc, argv, "help") ) { std::cout << "Usage: " << argv[0] << " [-h] [--help] [-n=xxx]" << std::endl; std::cout << "\twhere options -h and --help display this help," << std::endl; std::cout << "\t and option -n=xxx sets the number of elements of arrays to xxx" << std::endl; exit(0); } } Exercise2& Exercise2::parseCommandLine(const int argc, const char**argv) { n = 1 << getNFromCmdLine(argc, argv, 4, 28); std::cout << "Do the exercise with N=" << n << std::endl; return *this; } void Exercise2::createReference(const bool verbose) { if( verbose ) std::cout << "Build a device vector occupying " << (n>>18) << "Mb" << std::endl; d_input.resize(n); auto seed = std::chrono::duration_cast<std::chrono::minutes>( std::chrono::system_clock::now().time_since_epoch() ).count(); thrust::transform( thrust::make_counting_iterator(0), thrust::make_counting_iterator(n), d_input.begin(), RandomUnsignedFunctor(seed) ); } long long Exercise2::checkResult( const bool verbose=true ) { thrust::device_vector<unsigned> d_sorted(d_input); ChronoGPU chr; chr.start(); thrust::sort(d_sorted.begin(), d_sorted.end()); chr.stop(); if( verbose ) std::cout << "\tReference calculated in " << chr.elapsedTime() << " ms" << std::endl; auto start_zipped = thrust::make_zip_iterator( thrust::make_tuple( d_student.begin(), d_sorted.begin() ) ); auto start = thrust::make_transform_iterator( start_zipped, CheckFunctor() ); auto stop = start + n; return thrust::reduce( start, stop, 0ll ); } void Exercise2::run(const bool verbose) { if( verbose ) std::cout << std::endl << "Radix Sort using base 2 ..." << std::endl; createReference(verbose); ChronoGPU chr; chr.start(); StudentWork2*work = reinterpret_cast<StudentWork2*>(student); d_student = work->radixSortBase2(d_input); chr.stop(); if( verbose ) std::cout << "\tDone in " << chr.elapsedTime() << " ms" << std::endl; } bool Exercise2::check() { const long long nbErrors = checkResult(); return ( nbErrors == 0 ); }
8a3dbb02fc62501e3f7fe14f6e0918b760d3213c.cu
#pragma warning( disable : 4244 ) #include <iostream> #include <thrust/device_vector.h> #include <thrust/sort.h> #include <helper_cuda.h> #include <helper_string.h> #include <utils/chronoCPU.hpp> #include <utils/chronoGPU.hpp> #include <curand_kernel.h> #include <exo2/student.h> #include <exercise2/Exercise2.h> namespace { struct RandomUnsignedFunctor : thrust::unary_function<int,unsigned> { int m_seed; RandomUnsignedFunctor(int seed) : m_seed(seed) {} __device__ unsigned operator()(const int idx) { curandState s; curand_init(m_seed+idx, 0, 0, &s); return curand(&s); } }; //template<int gold> class CheckFunctor : public thrust::unary_function<thrust::tuple<const int,const int>,long long> { public: __device__ long long operator() (const thrust::tuple<const int, const int>& t) { const int a = thrust::get<0>(t); const int b = thrust::get<1>(t); return static_cast<long long>(a != b); } }; } void Exercise2::displayHelpIfNeeded(const int argc, const char**argv) { if( checkCmdLineFlag(argc, argv, "-h") || checkCmdLineFlag(argc, argv, "help") ) { std::cout << "Usage: " << argv[0] << " [-h] [--help] [-n=xxx]" << std::endl; std::cout << "\twhere options -h and --help display this help," << std::endl; std::cout << "\t and option -n=xxx sets the number of elements of arrays to xxx" << std::endl; exit(0); } } Exercise2& Exercise2::parseCommandLine(const int argc, const char**argv) { n = 1 << getNFromCmdLine(argc, argv, 4, 28); std::cout << "Do the exercise with N=" << n << std::endl; return *this; } void Exercise2::createReference(const bool verbose) { if( verbose ) std::cout << "Build a device vector occupying " << (n>>18) << "Mb" << std::endl; d_input.resize(n); auto seed = std::chrono::duration_cast<std::chrono::minutes>( std::chrono::system_clock::now().time_since_epoch() ).count(); thrust::transform( thrust::make_counting_iterator(0), thrust::make_counting_iterator(n), d_input.begin(), RandomUnsignedFunctor(seed) ); } long long Exercise2::checkResult( const bool verbose=true ) { thrust::device_vector<unsigned> d_sorted(d_input); ChronoGPU chr; chr.start(); thrust::sort(d_sorted.begin(), d_sorted.end()); chr.stop(); if( verbose ) std::cout << "\tReference calculated in " << chr.elapsedTime() << " ms" << std::endl; auto start_zipped = thrust::make_zip_iterator( thrust::make_tuple( d_student.begin(), d_sorted.begin() ) ); auto start = thrust::make_transform_iterator( start_zipped, CheckFunctor() ); auto stop = start + n; return thrust::reduce( start, stop, 0ll ); } void Exercise2::run(const bool verbose) { if( verbose ) std::cout << std::endl << "Radix Sort using base 2 ..." << std::endl; createReference(verbose); ChronoGPU chr; chr.start(); StudentWork2*work = reinterpret_cast<StudentWork2*>(student); d_student = work->radixSortBase2(d_input); chr.stop(); if( verbose ) std::cout << "\tDone in " << chr.elapsedTime() << " ms" << std::endl; } bool Exercise2::check() { const long long nbErrors = checkResult(); return ( nbErrors == 0 ); }
1d2146baabbe34f99c94a6fcd213da8822703144.hip
// !!! This is a file automatically generated by hipify!!! #include "net.cuh" #include "opencv2/opencv.hpp" #include "common/cuMatrix.h" #include <hip/hip_runtime.h> #include "common/util.h" #include <time.h> #include "dataAugmentation/cuTrasformation.cuh" #include "common/Config.h" #include "common/cuMatrixVector.h" #include <helper_functions.h> #include <helper_cuda.h> #include "common/MemoryMonitor.h" #include "layers/Pooling.h" #include "common/cuBase.h" #include "layers/ConvCFM.h" #include "layers/FullConnect.h" #include "layers/SoftMax.h" #include "layers/LayerBase.h" #include "layers/LocalConnect.h" #include "layers/LRN.h" #include "layers/NIN.h" #include <queue> cuMatrixVector<double>* cu_distortion_vector; int cuCurCorrect; cuMatrix<int>*cuCorrect = NULL; cuMatrix<int>*cuVote = NULL; std::vector<ConfigBase*>que; /*batch size images*/ cuMatrixVector<double>batchImg[2]; void getBatchImageWithStreams(cuMatrixVector<double>&x, cuMatrixVector<double>&batchImg, int start, hipStream_t stream1); void outputMatrix(cuMatrix<double>* m); void cuSaveConvNet() { FILE *pOut = fopen("Result/checkPoint.txt", "w"); for(int i = 0; i < que.size(); i++){ LayerBase* layer = Layers::instance()->get(que[i]->m_name); layer->save(pOut); } fclose(pOut); }; void cuFreeConvNet() { } void cuReadConvNet( int imgDim, char* path, int nclasses) { FILE *pIn = fopen(path, "r"); for(int i = 0; i < que.size(); i++){ LayerBase* layer = Layers::instance()->get(que[i]->m_name); layer->initFromCheckpoint(pIn); } fclose(pIn); }; void cuInitCNNMemory( int batch, cuMatrixVector<double>& trainX, cuMatrixVector<double>& testX, int ImgSize, int nclasses) { /*Transformation*/ cu_distortion_vector = new cuMatrixVector<double>(); for(int i = 0; i < batch; i++){ cu_distortion_vector->push_back(new cuMatrix<double>(ImgSize, ImgSize, Config::instance()->getChannels())); } cu_distortion_vector->toGpu(); Layers::instance()->setInputs(cu_distortion_vector); /*BFS*/ std::queue<ConfigBase*>qqq; for(int i = 0; i < Config::instance()->getFirstLayers().size(); i++){ qqq.push(Config::instance()->getFirstLayers()[i]); } while(!qqq.empty()){ ConfigBase* top = qqq.front(); qqq.pop(); que.push_back(top); if(top->m_type == std::string("CONV")){ ConfigConv * conv = (ConfigConv*) top; new ConvCFM(conv->m_name); }else if(top->m_type == std::string("LOCAL")){ new LocalConnect(top->m_name); } else if(top->m_type == std::string("POOLING")){ new Pooling(top->m_name); }else if(top->m_type == std::string("FC")){ new FullConnect(top->m_name); }else if(top->m_type == std::string("SOFTMAX")){ new SoftMax(top->m_name); }else if(top->m_type == std::string("NIN")){ new NIN(top->m_name); } else if(std::string("LRN") == top->m_type){ new LRN(top->m_name); } for(int n = 0; n < top->m_next.size(); n++){ qqq.push(top->m_next[n]); } } /*correct and cuVote*/ if(cuCorrect == NULL) { cuCorrect = new cuMatrix<int>(1,1,1); cuVote = new cuMatrix<int>(testX.size(), Config::instance()->getClasses(), 1); } /*double buffer for batch images*/ int crop = Config::instance()->getCrop(); for(int i = 0; i < 2; i ++){ for(int j = 0; j < batch; j++){ batchImg[i].push_back(new cuMatrix<double>(ImgSize + crop, ImgSize + crop, Config::instance()->getChannels())); } batchImg[i].toGpu(); } } void cuFreeCNNMemory( int batch, cuMatrixVector<double>&trainX, cuMatrixVector<double>&testX) { delete cu_distortion_vector; } void outputPoints(cuMatrix<int>* p) { p->toCpu(); for(int c = 0; c < p->channels; c++){ for(int i = 0; i < p->rows; i++) { for(int j = 0; j < p->cols; j++) { printf("%d ", p->get(i,j, c)); }printf("\n"); } printf("\n"); } } void outputMatrix(cuMatrix<double>* m) { m->toCpu(); for(int c = 0; c < m->channels; c++){ for(int i = 0; i < m->rows; i++){ for(int j = 0; j < m->cols; j++){ printf("%.10lf ", m->get(i,j, c)); }printf("\n"); } printf("\n"); } } void updataWB( double lrate, double momentum, int batch) { /*updateWb*/ for(int i = 0; i < que.size(); i++){ LayerBase* layer = Layers::instance()->get(que[i]->m_name); layer->updateWeight(); } hipDeviceSynchronize(); getLastCudaError("updateWB"); } void getNetworkCost(double** x, int* y, int batch, int ImgSize, int nclasses, hipblasHandle_t handle) { /*feedforward*/ SoftMax* sm = (SoftMax*)Layers::instance()->get("softmax1"); sm->setPredict(y); for(int i = 0; i < que.size(); i++){ LayerBase* layer = Layers::instance()->get(que[i]->m_name); layer->feedforward(); } /*Cost*/ // for(int i = que.size() - 1; i >= 0; i--){ // LayerBase* layer = Layers::instance()->get(que[i]->m_name); // layer->getCost(cost, y); // } /*backpropagation*/ for(int i = que.size() - 1; i >=0; i--){ ConfigBase* top = que[i]; LayerBase* layer = Layers::instance()->get(top->m_name); layer->backpropagation(); layer->getGrad(); } } /* dim3(1),dim3(batch) */ __global__ void g_getCorrect(double* softMaxP, int cols, int start, int* vote) { int id = threadIdx.x; if(id < start)return; double* p = softMaxP + id * cols; int* votep= vote + id * cols; int r = 0; double maxele = log(p[0]); for(int i = 1; i < cols; i++) { double val = log(p[i]); if(maxele < val) { maxele = val; r = i; } } votep[r]++; } void resultProdict(double** testX, int*testY, int* vote, int batch, int ImgSize, int nclasses, int start, hipblasHandle_t handle) { /*feedforward*/ for(int i = 0; i < que.size(); i++){ LayerBase* layer = Layers::instance()->get(que[i]->m_name); layer->feedforward(); } hipLaunchKernelGGL(( g_getCorrect), dim3(dim3(1)), dim3(batch), 0, 0, Layers::instance()->get("softmax1")->getOutputs()->getDev(), Layers::instance()->get("softmax1")->getOutputs()->cols, start, vote); hipDeviceSynchronize(); } void gradientChecking(double**x, int*y, int batch, int ImgSize, int nclasses, hipblasHandle_t handle) { /*for(int hl = 0; hl < hLayers.size(); hl++) { dropDelta(hLayers[hl].dropW, Config::instance()->getFC()[hl]->m_dropoutRate); } std::cout<<"test network !!!!"<<std::endl; double epsilon = 1e-4; for(int a = 0; a < convNCFM.size(); a++) { for(int b = 0; b < CLayers[a].layer.size(); b++) { printf("====%d %d\n",a, b); getNetworkCost(x, y, CLayers, hLayers, smr, batch, ImgSize, nclasses, handle); CLayers[a].layer[b].Wgrad->toCpu(); cuMatrix<double>* grad = new cuMatrix<double>(CLayers[a].layer[b].Wgrad->getHost(), CLayers[a].layer[b].Wgrad->rows, CLayers[a].layer[b].Wgrad->cols, CLayers[a].layer[b].Wgrad->channels); for(int c = 0; c < CLayers[a].layer[b].W->channels; c++){ for(int i = 0; i < CLayers[a].layer[b].W->rows; i++){ for(int j = 0; j < CLayers[a].layer[b].W->cols; j++){ double memo = CLayers[a].layer[b].W->get(i, j, c); CLayers[a].layer[b].W->set(i, j, c, memo + epsilon); CLayers[a].layer[b].W->toGpu(); getNetworkCost(x, y, CLayers, hLayers, smr, batch, ImgSize, nclasses, handle); smr.cost->toCpu(); double value1 = smr.cost->get(0, 0 , 0); CLayers[a].layer[b].W->set(i, j, c, memo - epsilon); CLayers[a].layer[b].W->toGpu(); getNetworkCost(x, y, CLayers, hLayers, smr, batch, ImgSize, nclasses, handle); smr.cost->toCpu(); double value2 = smr.cost->get(0, 0, 0); double tp = (value1 - value2) / (2 * epsilon); if(fabs(tp - grad->get(i, j, c)) > 0.00001) std::cout<<i<<","<<j<<","<<c<<","<<tp<<", "<<grad->get(i,j,c)<<", " <<tp - grad->get(i,j,c)<<std::endl; CLayers[a].layer[b].W->set(i, j, c, memo); CLayers[a].layer[b].W->toGpu(); } } } delete grad; } }*/ } /* */ void __global__ g_getVotingResult(int* voting, int* y, int* correct, int len, int nclasses) { for(int i = 0; i < len; i += blockDim.x * gridDim.x) { int idx = i + blockDim.x * blockIdx.x + threadIdx.x; if(idx < len) { int* pvoting = voting + idx * nclasses; int _max = pvoting[0]; int rid = 0; for(int j = 1; j < nclasses; j++) { if(pvoting[j] > _max) { _max = pvoting[j]; rid = j; } } if(rid == y[idx]) { atomicAdd(correct, 1); } } } } void predictTestDate(cuMatrixVector<double>&x, cuMatrix<int>*y , cuMatrixVector<double>&testX, cuMatrix<int>* testY, int batch, int ImgSize, int nclasses, bool vote, hipblasHandle_t handle) { for(int i = 0; i < que.size(); i++){ if(que[i]->m_type == std::string("FC")){ FullConnect* layer = (FullConnect*)Layers::instance()->get(que[i]->m_name); layer->drop(0.0); } } cuVote->gpuClear(); int cropr[] = {Config::instance()->getCrop() / 2, 0, 0, Config::instance()->getCrop(), Config::instance()->getCrop()}; int cropc[] = {Config::instance()->getCrop() / 2, 0, Config::instance()->getCrop(), 0, Config::instance()->getCrop()}; double scalex[] = {0, -Config::instance()->getScale(), Config::instance()->getScale()}; double scaley[] = {0, -Config::instance()->getScale(), Config::instance()->getScale()}; double rotate[] = {0, -Config::instance()->getRotation(), Config::instance()->getRotation()}; // if(fabs(Config::instance()->getDistortion()) >= 0.1 || Config::instance()->getScale() >= 1 || Config::instance()->getRotation() >= 1) // cuApplyDistortion(cu_distortion_vector->m_devPoint, cu_distortion_vector->m_devPoint, batch, ImgSize); hipStream_t stream1; checkCudaErrors(hipStreamCreate(&stream1)); int hlen = Config::instance()->getHorizontal() == 1 ? 2 : 1; int clen = Config::instance()->getCrop() == 0 ? 1 : sizeof(cropc) / sizeof(int); int scaleLen = Config::instance()->getScale() == 0 ? 1 : sizeof(scalex) / sizeof(double); int rotateLen = Config::instance()->getRotation() == 0 ? 1 : sizeof(rotate) / sizeof(double); if(!vote) hlen = clen = scaleLen = rotateLen = 1; for(int sidx = 0; sidx < scaleLen; sidx++){ for(int sidy = 0; sidy < scaleLen; sidy++){ for(int rid = 0; rid < rotateLen; rid++){ cuApplyScaleAndRotate(batch, ImgSize, scalex[sidx], scaley[sidy], rotate[rid]); for (int h = 0; h < hlen; h++) { for (int c = 0; c < clen; c++) { int batchImgId = 1; getBatchImageWithStreams(testX, batchImg[0], 0, stream1); for (int p = 0; p < (testX.size() + batch - 1) / batch; p++) { hipStreamSynchronize(stream1); printf("test %2d%%", 100 * p / ((testX.size() + batch - 1) / batch)); int tstart = p * batch; if(tstart + batch <= testX.size() - batch) getBatchImageWithStreams(testX, batchImg[batchImgId], tstart + batch, stream1); else { int start = testX.size() - batch; getBatchImageWithStreams(testX, batchImg[batchImgId], start, stream1); } if(tstart + batch > testX.size()){ tstart = testX.size() - batch; } //printf("start = %d\n", tstart); batchImgId = 1 - batchImgId; cuApplyCrop(batchImg[batchImgId].m_devPoint, cu_distortion_vector->m_devPoint, batch, ImgSize, cropr[c], cropc[c]); cuApplyDistortion(cu_distortion_vector->m_devPoint, cu_distortion_vector->m_devPoint, batch, ImgSize); if (h == 1) cuApplyHorizontal(cu_distortion_vector->m_devPoint, cu_distortion_vector->m_devPoint, batch, ImgSize, HORIZONTAL); resultProdict(cu_distortion_vector->m_devPoint, testY->getDev() + tstart, cuVote->getDev() + tstart * nclasses, batch, ImgSize, nclasses, p * batch - tstart, handle); printf("\b\b\b\b\b\b\b\b\b"); } } } } } } checkCudaErrors(hipStreamDestroy(stream1)); cuCorrect->gpuClear(); hipLaunchKernelGGL(( g_getVotingResult), dim3(dim3((testX.size() + batch - 1) / batch)), dim3(dim3(batch)), 0, 0, cuVote->getDev(), testY->getDev(), cuCorrect->getDev(), testX.size(), nclasses); hipDeviceSynchronize(); getLastCudaError("g_getVotingResult"); cuCorrect->toCpu(); if (cuCorrect->get(0, 0, 0) > cuCurCorrect) { cuCurCorrect = cuCorrect->get(0, 0, 0); cuSaveConvNet(); } } int voteTestDate( cuMatrixVector<double>&testX, cuMatrix<int>* testY, cuMatrix<int>*& vote, int batch, int ImgSize, int nclasses, hipblasHandle_t handle) { for(int i = 0; i < que.size(); i++){ if(que[i]->m_type == std::string("FC")){ FullConnect* layer = (FullConnect*)Layers::instance()->get(que[i]->m_name); layer->drop(0.0); } } cuVote->gpuClear(); int cropr[] = {Config::instance()->getCrop() / 2, 0, 0, Config::instance()->getCrop(), Config::instance()->getCrop()}; int cropc[] = {Config::instance()->getCrop() / 2, 0, Config::instance()->getCrop(), 0, Config::instance()->getCrop()}; hipStream_t stream1; checkCudaErrors(hipStreamCreate(&stream1)); for (int h = 0; h < (Config::instance()->getHorizontal() == 1 ? 2 : 1); h++) { for (int c = 0; c < (Config::instance()->getCrop() == 0 ? 1 : sizeof(cropc) / sizeof(int)); c++) { int batchImgId = 1; getBatchImageWithStreams(testX, batchImg[0], 0, stream1); for (int p = 0; p < (testX.size() + batch - 1) / batch; p++) { hipStreamSynchronize(stream1); printf("test %2d%%", 100 * p / ((testX.size() + batch - 1) / batch)); int tstart = p * batch; if(tstart + batch <= testX.size() - batch) getBatchImageWithStreams(testX, batchImg[batchImgId], tstart + batch, stream1); else { int start = testX.size() - batch; getBatchImageWithStreams(testX, batchImg[batchImgId], start, stream1); } if(tstart + batch > testX.size()){ tstart = testX.size() - batch; } batchImgId = 1 - batchImgId; cuApplyCrop(batchImg[batchImgId].m_devPoint, cu_distortion_vector->m_devPoint, batch, ImgSize, cropr[c], cropc[c]); if (h == 1) cuApplyHorizontal(cu_distortion_vector->m_devPoint, cu_distortion_vector->m_devPoint, batch, ImgSize, HORIZONTAL); resultProdict(cu_distortion_vector->m_devPoint, testY->getDev() + tstart, cuVote->getDev() + tstart * nclasses, batch, ImgSize, nclasses, p * batch - tstart, handle); printf("\b\b\b\b\b\b\b\b\b"); } } } cuCorrect->gpuClear(); hipLaunchKernelGGL(( g_getVotingResult), dim3(dim3((testX.size() + batch - 1) / batch)), dim3(dim3(batch)), 0, 0, vote->getDev(), testY->getDev(), cuCorrect->getDev(), testX.size(), nclasses); hipDeviceSynchronize(); getLastCudaError("g_getVotingResult"); cuCorrect->toCpu(); return cuCorrect->get(0,0,0); } void getBatchImageWithStreams(cuMatrixVector<double>&x, cuMatrixVector<double>&batchImg, int start, hipStream_t stream1){ for(int i = 0; i < batchImg.size(); i++){ memcpy(batchImg[i]->getHost(), x[i + start]->getHost(), sizeof(double) * batchImg[i]->getLen()); batchImg[i]->toGpu(stream1); } } void cuTrainNetwork(cuMatrixVector<double>&x, cuMatrix<int>*y, cuMatrixVector<double>&testX, cuMatrix<int>* testY, int batch, int ImgSize, int nclasses, std::vector<double>&nlrate, std::vector<double>&nMomentum, std::vector<int>&epoCount, hipblasHandle_t handle) { if(nlrate.size() != nMomentum.size() || nMomentum.size() != epoCount.size() || nlrate.size() != epoCount.size()) { printf("nlrate, nMomentum, epoCount size not equal\n"); exit(0); } if(Config::instance()->getIsGradientChecking()) gradientChecking(x.m_devPoint, y->getDev(), batch, ImgSize, nclasses, handle); predictTestDate(x, y, testX, testY, batch, ImgSize, nclasses, 0, handle); printf("correct is %d\n", cuCorrect->get(0,0,0)); int epochs = 10000; double lrate = 0.05; double Momentum = 0.9; int id = 0; for (int epo = 0; epo < epochs; epo++) { if (id >= nlrate.size()) break; lrate = nlrate[id]; Momentum = nMomentum[id]; Config::instance()->setLrate(lrate); Config::instance()->setMomentum(Momentum); double start, end; start = clock(); cuApplyRandom(batch, clock(), ImgSize); for(int i = 0; i < que.size(); i++){ if(que[i]->m_type == std::string("FC")){ FullConnect* layer = (FullConnect*)Layers::instance()->get(que[i]->m_name); layer->drop(); } } x.shuffle(5000, y); hipStream_t stream1; checkCudaErrors(hipStreamCreate(&stream1)); getBatchImageWithStreams(x, batchImg[0], 0, stream1); int batchImgId = 1; for (int k = 0; k < (x.size() + batch - 1) / batch; k ++) { hipStreamSynchronize(stream1); int start = k * batch; printf("train %2d%%", 100 * start / ((x.size() + batch - 1))); if(start + batch <= x.size() - batch) getBatchImageWithStreams(x, batchImg[batchImgId], start + batch, stream1); else{ int tstart = x.size() - batch; getBatchImageWithStreams(x, batchImg[batchImgId], tstart, stream1); } if(start + batch > x.size()){ start = x.size() - batch; } batchImgId = 1 - batchImgId; cuApplyCropRandom(batchImg[batchImgId].m_devPoint, cu_distortion_vector->m_devPoint, batch, ImgSize); if(fabs(Config::instance()->getDistortion()) >= 0.1 || Config::instance()->getScale() >= 1 || Config::instance()->getRotation() >= 1) cuApplyDistortion(cu_distortion_vector->m_devPoint, cu_distortion_vector->m_devPoint, batch, ImgSize); if (Config::instance()->getHorizontal()) { cuApplyHorizontal(cu_distortion_vector->m_devPoint, cu_distortion_vector->m_devPoint, batch, ImgSize, RANDOM_HORIZONTAL); } cuApplyWhiteNoise(cu_distortion_vector->m_devPoint, cu_distortion_vector->m_devPoint, batch, ImgSize, Config::instance()->getWhiteNoise()); if (Config::instance()->getImageShow()) { for (int ff = batch - 1; ff >= 0; ff--) { showImg(batchImg[batchImgId][ff], 5); showImg(cu_distortion_vector->m_vec[ff], 5); cv::waitKey(0); } } getNetworkCost(cu_distortion_vector->m_devPoint, y->getDev() + start, batch, ImgSize, nclasses, handle); updataWB(lrate, Momentum, batch); printf("\b\b\b\b\b\b\b\b\b"); } checkCudaErrors(hipStreamDestroy(stream1)); double cost = 0.0; for(int i = 0; i < que.size(); i++){ LayerBase* layer = (LayerBase*)Layers::instance()->get(que[i]->m_name); layer->calCost(); layer->printCost(); cost += layer->getCost(); } char str[512]; end = clock(); sprintf(str, "epoch=%d time=%.03lfs cost=%lf Momentum=%.06lf lrate=%.08lf", epo, (double) (end - start) / CLOCKS_PER_SEC, cost, Config::instance()->getMomentum(), Config::instance()->getLrate()); printf("%s\n", str); LOG(str, "Result/log.txt"); if (epo && epo % epoCount[id] == 0) { for(int i = 0; i < que.size(); i++){ LayerBase* layer = (LayerBase*)Layers::instance()->get(que[i]->m_name); layer->clearMomentum(); } id++; } printf("===================weight value================\n"); for(int i = 0; i < que.size(); i++){ LayerBase* layer = Layers::instance()->get(que[i]->m_name); layer->printParameter(); } printf("===================test Result================\n"); predictTestDate(x, y, testX, testY, batch, ImgSize, nclasses, false, handle); sprintf(str, "test %.2lf%%/%.2lf%%", 100.0 * cuCorrect->get(0, 0, 0) / testX.size(), 100.0 * cuCurCorrect / testX.size()); printf("%s\n",str); LOG(str, "Result/log.txt"); if(epo && epo % Config::instance()->getTestEpoch() == 0){ predictTestDate(x, y, testX, testY, batch, ImgSize, nclasses, true, handle); sprintf(str, "test voting correct %.2lf%%/%.2lf%%", 100.0 * cuCorrect->get(0, 0, 0) / testX.size(), 100.0 * cuCurCorrect / testX.size()); printf("%s\n",str); LOG(str, "Result/log.txt"); } if(epo == 0){ MemoryMonitor::instance()->printCpuMemory(); MemoryMonitor::instance()->printGpuMemory(); } } } /* */ void __global__ g_getVoteAdd(int* voting, int* predict, int* y, int* correct, int len, int nclasses) { for(int i = 0; i < len; i += blockDim.x * gridDim.x) { int idx = i + blockDim.x * blockIdx.x + threadIdx.x; if(idx < len) { int* pvoting = voting + idx * nclasses; int* ppredict= predict+ idx * nclasses; int _max = pvoting[0] + ppredict[0]; int rid = 0; for(int j = 0; j < nclasses; j++) { pvoting[j] += ppredict[j]; if(pvoting[j] > _max) { _max = pvoting[j]; rid = j; } } if(rid == y[idx]) { atomicAdd(correct, 1); } } } } int cuVoteAdd(cuMatrix<int>*& voteSum, cuMatrix<int>*& predict, cuMatrix<int>*& testY, cuMatrix<int>*& correct, int nclasses) { hipLaunchKernelGGL(( g_getVoteAdd), dim3(dim3((testY->getLen() + 256 - 1) / 256)), dim3(dim3(256)), 0, 0, voteSum->getDev(), predict->getDev(), testY->getDev(), correct->getDev(), testY->getLen(), nclasses); hipDeviceSynchronize(); getLastCudaError("g_getVoteAdd"); correct->toCpu(); return correct->get(0, 0, 0); }
1d2146baabbe34f99c94a6fcd213da8822703144.cu
#include "net.cuh" #include "opencv2/opencv.hpp" #include "common/cuMatrix.h" #include <cuda_runtime.h> #include "common/util.h" #include <time.h> #include "dataAugmentation/cuTrasformation.cuh" #include "common/Config.h" #include "common/cuMatrixVector.h" #include <helper_functions.h> #include <helper_cuda.h> #include "common/MemoryMonitor.h" #include "layers/Pooling.h" #include "common/cuBase.h" #include "layers/ConvCFM.h" #include "layers/FullConnect.h" #include "layers/SoftMax.h" #include "layers/LayerBase.h" #include "layers/LocalConnect.h" #include "layers/LRN.h" #include "layers/NIN.h" #include <queue> cuMatrixVector<double>* cu_distortion_vector; int cuCurCorrect; cuMatrix<int>*cuCorrect = NULL; cuMatrix<int>*cuVote = NULL; std::vector<ConfigBase*>que; /*batch size images*/ cuMatrixVector<double>batchImg[2]; void getBatchImageWithStreams(cuMatrixVector<double>&x, cuMatrixVector<double>&batchImg, int start, cudaStream_t stream1); void outputMatrix(cuMatrix<double>* m); void cuSaveConvNet() { FILE *pOut = fopen("Result/checkPoint.txt", "w"); for(int i = 0; i < que.size(); i++){ LayerBase* layer = Layers::instance()->get(que[i]->m_name); layer->save(pOut); } fclose(pOut); }; void cuFreeConvNet() { } void cuReadConvNet( int imgDim, char* path, int nclasses) { FILE *pIn = fopen(path, "r"); for(int i = 0; i < que.size(); i++){ LayerBase* layer = Layers::instance()->get(que[i]->m_name); layer->initFromCheckpoint(pIn); } fclose(pIn); }; void cuInitCNNMemory( int batch, cuMatrixVector<double>& trainX, cuMatrixVector<double>& testX, int ImgSize, int nclasses) { /*Transformation*/ cu_distortion_vector = new cuMatrixVector<double>(); for(int i = 0; i < batch; i++){ cu_distortion_vector->push_back(new cuMatrix<double>(ImgSize, ImgSize, Config::instance()->getChannels())); } cu_distortion_vector->toGpu(); Layers::instance()->setInputs(cu_distortion_vector); /*BFS*/ std::queue<ConfigBase*>qqq; for(int i = 0; i < Config::instance()->getFirstLayers().size(); i++){ qqq.push(Config::instance()->getFirstLayers()[i]); } while(!qqq.empty()){ ConfigBase* top = qqq.front(); qqq.pop(); que.push_back(top); if(top->m_type == std::string("CONV")){ ConfigConv * conv = (ConfigConv*) top; new ConvCFM(conv->m_name); }else if(top->m_type == std::string("LOCAL")){ new LocalConnect(top->m_name); } else if(top->m_type == std::string("POOLING")){ new Pooling(top->m_name); }else if(top->m_type == std::string("FC")){ new FullConnect(top->m_name); }else if(top->m_type == std::string("SOFTMAX")){ new SoftMax(top->m_name); }else if(top->m_type == std::string("NIN")){ new NIN(top->m_name); } else if(std::string("LRN") == top->m_type){ new LRN(top->m_name); } for(int n = 0; n < top->m_next.size(); n++){ qqq.push(top->m_next[n]); } } /*correct and cuVote*/ if(cuCorrect == NULL) { cuCorrect = new cuMatrix<int>(1,1,1); cuVote = new cuMatrix<int>(testX.size(), Config::instance()->getClasses(), 1); } /*double buffer for batch images*/ int crop = Config::instance()->getCrop(); for(int i = 0; i < 2; i ++){ for(int j = 0; j < batch; j++){ batchImg[i].push_back(new cuMatrix<double>(ImgSize + crop, ImgSize + crop, Config::instance()->getChannels())); } batchImg[i].toGpu(); } } void cuFreeCNNMemory( int batch, cuMatrixVector<double>&trainX, cuMatrixVector<double>&testX) { delete cu_distortion_vector; } void outputPoints(cuMatrix<int>* p) { p->toCpu(); for(int c = 0; c < p->channels; c++){ for(int i = 0; i < p->rows; i++) { for(int j = 0; j < p->cols; j++) { printf("%d ", p->get(i,j, c)); }printf("\n"); } printf("\n"); } } void outputMatrix(cuMatrix<double>* m) { m->toCpu(); for(int c = 0; c < m->channels; c++){ for(int i = 0; i < m->rows; i++){ for(int j = 0; j < m->cols; j++){ printf("%.10lf ", m->get(i,j, c)); }printf("\n"); } printf("\n"); } } void updataWB( double lrate, double momentum, int batch) { /*updateWb*/ for(int i = 0; i < que.size(); i++){ LayerBase* layer = Layers::instance()->get(que[i]->m_name); layer->updateWeight(); } cudaDeviceSynchronize(); getLastCudaError("updateWB"); } void getNetworkCost(double** x, int* y, int batch, int ImgSize, int nclasses, cublasHandle_t handle) { /*feedforward*/ SoftMax* sm = (SoftMax*)Layers::instance()->get("softmax1"); sm->setPredict(y); for(int i = 0; i < que.size(); i++){ LayerBase* layer = Layers::instance()->get(que[i]->m_name); layer->feedforward(); } /*Cost*/ // for(int i = que.size() - 1; i >= 0; i--){ // LayerBase* layer = Layers::instance()->get(que[i]->m_name); // layer->getCost(cost, y); // } /*backpropagation*/ for(int i = que.size() - 1; i >=0; i--){ ConfigBase* top = que[i]; LayerBase* layer = Layers::instance()->get(top->m_name); layer->backpropagation(); layer->getGrad(); } } /* dim3(1),dim3(batch) */ __global__ void g_getCorrect(double* softMaxP, int cols, int start, int* vote) { int id = threadIdx.x; if(id < start)return; double* p = softMaxP + id * cols; int* votep= vote + id * cols; int r = 0; double maxele = log(p[0]); for(int i = 1; i < cols; i++) { double val = log(p[i]); if(maxele < val) { maxele = val; r = i; } } votep[r]++; } void resultProdict(double** testX, int*testY, int* vote, int batch, int ImgSize, int nclasses, int start, cublasHandle_t handle) { /*feedforward*/ for(int i = 0; i < que.size(); i++){ LayerBase* layer = Layers::instance()->get(que[i]->m_name); layer->feedforward(); } g_getCorrect<<<dim3(1), batch>>>( Layers::instance()->get("softmax1")->getOutputs()->getDev(), Layers::instance()->get("softmax1")->getOutputs()->cols, start, vote); cudaDeviceSynchronize(); } void gradientChecking(double**x, int*y, int batch, int ImgSize, int nclasses, cublasHandle_t handle) { /*for(int hl = 0; hl < hLayers.size(); hl++) { dropDelta(hLayers[hl].dropW, Config::instance()->getFC()[hl]->m_dropoutRate); } std::cout<<"test network !!!!"<<std::endl; double epsilon = 1e-4; for(int a = 0; a < convNCFM.size(); a++) { for(int b = 0; b < CLayers[a].layer.size(); b++) { printf("====%d %d\n",a, b); getNetworkCost(x, y, CLayers, hLayers, smr, batch, ImgSize, nclasses, handle); CLayers[a].layer[b].Wgrad->toCpu(); cuMatrix<double>* grad = new cuMatrix<double>(CLayers[a].layer[b].Wgrad->getHost(), CLayers[a].layer[b].Wgrad->rows, CLayers[a].layer[b].Wgrad->cols, CLayers[a].layer[b].Wgrad->channels); for(int c = 0; c < CLayers[a].layer[b].W->channels; c++){ for(int i = 0; i < CLayers[a].layer[b].W->rows; i++){ for(int j = 0; j < CLayers[a].layer[b].W->cols; j++){ double memo = CLayers[a].layer[b].W->get(i, j, c); CLayers[a].layer[b].W->set(i, j, c, memo + epsilon); CLayers[a].layer[b].W->toGpu(); getNetworkCost(x, y, CLayers, hLayers, smr, batch, ImgSize, nclasses, handle); smr.cost->toCpu(); double value1 = smr.cost->get(0, 0 , 0); CLayers[a].layer[b].W->set(i, j, c, memo - epsilon); CLayers[a].layer[b].W->toGpu(); getNetworkCost(x, y, CLayers, hLayers, smr, batch, ImgSize, nclasses, handle); smr.cost->toCpu(); double value2 = smr.cost->get(0, 0, 0); double tp = (value1 - value2) / (2 * epsilon); if(fabs(tp - grad->get(i, j, c)) > 0.00001) std::cout<<i<<","<<j<<","<<c<<","<<tp<<", "<<grad->get(i,j,c)<<", " <<tp - grad->get(i,j,c)<<std::endl; CLayers[a].layer[b].W->set(i, j, c, memo); CLayers[a].layer[b].W->toGpu(); } } } delete grad; } }*/ } /* */ void __global__ g_getVotingResult(int* voting, int* y, int* correct, int len, int nclasses) { for(int i = 0; i < len; i += blockDim.x * gridDim.x) { int idx = i + blockDim.x * blockIdx.x + threadIdx.x; if(idx < len) { int* pvoting = voting + idx * nclasses; int _max = pvoting[0]; int rid = 0; for(int j = 1; j < nclasses; j++) { if(pvoting[j] > _max) { _max = pvoting[j]; rid = j; } } if(rid == y[idx]) { atomicAdd(correct, 1); } } } } void predictTestDate(cuMatrixVector<double>&x, cuMatrix<int>*y , cuMatrixVector<double>&testX, cuMatrix<int>* testY, int batch, int ImgSize, int nclasses, bool vote, cublasHandle_t handle) { for(int i = 0; i < que.size(); i++){ if(que[i]->m_type == std::string("FC")){ FullConnect* layer = (FullConnect*)Layers::instance()->get(que[i]->m_name); layer->drop(0.0); } } cuVote->gpuClear(); int cropr[] = {Config::instance()->getCrop() / 2, 0, 0, Config::instance()->getCrop(), Config::instance()->getCrop()}; int cropc[] = {Config::instance()->getCrop() / 2, 0, Config::instance()->getCrop(), 0, Config::instance()->getCrop()}; double scalex[] = {0, -Config::instance()->getScale(), Config::instance()->getScale()}; double scaley[] = {0, -Config::instance()->getScale(), Config::instance()->getScale()}; double rotate[] = {0, -Config::instance()->getRotation(), Config::instance()->getRotation()}; // if(fabs(Config::instance()->getDistortion()) >= 0.1 || Config::instance()->getScale() >= 1 || Config::instance()->getRotation() >= 1) // cuApplyDistortion(cu_distortion_vector->m_devPoint, cu_distortion_vector->m_devPoint, batch, ImgSize); cudaStream_t stream1; checkCudaErrors(cudaStreamCreate(&stream1)); int hlen = Config::instance()->getHorizontal() == 1 ? 2 : 1; int clen = Config::instance()->getCrop() == 0 ? 1 : sizeof(cropc) / sizeof(int); int scaleLen = Config::instance()->getScale() == 0 ? 1 : sizeof(scalex) / sizeof(double); int rotateLen = Config::instance()->getRotation() == 0 ? 1 : sizeof(rotate) / sizeof(double); if(!vote) hlen = clen = scaleLen = rotateLen = 1; for(int sidx = 0; sidx < scaleLen; sidx++){ for(int sidy = 0; sidy < scaleLen; sidy++){ for(int rid = 0; rid < rotateLen; rid++){ cuApplyScaleAndRotate(batch, ImgSize, scalex[sidx], scaley[sidy], rotate[rid]); for (int h = 0; h < hlen; h++) { for (int c = 0; c < clen; c++) { int batchImgId = 1; getBatchImageWithStreams(testX, batchImg[0], 0, stream1); for (int p = 0; p < (testX.size() + batch - 1) / batch; p++) { cudaStreamSynchronize(stream1); printf("test %2d%%", 100 * p / ((testX.size() + batch - 1) / batch)); int tstart = p * batch; if(tstart + batch <= testX.size() - batch) getBatchImageWithStreams(testX, batchImg[batchImgId], tstart + batch, stream1); else { int start = testX.size() - batch; getBatchImageWithStreams(testX, batchImg[batchImgId], start, stream1); } if(tstart + batch > testX.size()){ tstart = testX.size() - batch; } //printf("start = %d\n", tstart); batchImgId = 1 - batchImgId; cuApplyCrop(batchImg[batchImgId].m_devPoint, cu_distortion_vector->m_devPoint, batch, ImgSize, cropr[c], cropc[c]); cuApplyDistortion(cu_distortion_vector->m_devPoint, cu_distortion_vector->m_devPoint, batch, ImgSize); if (h == 1) cuApplyHorizontal(cu_distortion_vector->m_devPoint, cu_distortion_vector->m_devPoint, batch, ImgSize, HORIZONTAL); resultProdict(cu_distortion_vector->m_devPoint, testY->getDev() + tstart, cuVote->getDev() + tstart * nclasses, batch, ImgSize, nclasses, p * batch - tstart, handle); printf("\b\b\b\b\b\b\b\b\b"); } } } } } } checkCudaErrors(cudaStreamDestroy(stream1)); cuCorrect->gpuClear(); g_getVotingResult<<<dim3((testX.size() + batch - 1) / batch), dim3(batch)>>>( cuVote->getDev(), testY->getDev(), cuCorrect->getDev(), testX.size(), nclasses); cudaDeviceSynchronize(); getLastCudaError("g_getVotingResult"); cuCorrect->toCpu(); if (cuCorrect->get(0, 0, 0) > cuCurCorrect) { cuCurCorrect = cuCorrect->get(0, 0, 0); cuSaveConvNet(); } } int voteTestDate( cuMatrixVector<double>&testX, cuMatrix<int>* testY, cuMatrix<int>*& vote, int batch, int ImgSize, int nclasses, cublasHandle_t handle) { for(int i = 0; i < que.size(); i++){ if(que[i]->m_type == std::string("FC")){ FullConnect* layer = (FullConnect*)Layers::instance()->get(que[i]->m_name); layer->drop(0.0); } } cuVote->gpuClear(); int cropr[] = {Config::instance()->getCrop() / 2, 0, 0, Config::instance()->getCrop(), Config::instance()->getCrop()}; int cropc[] = {Config::instance()->getCrop() / 2, 0, Config::instance()->getCrop(), 0, Config::instance()->getCrop()}; cudaStream_t stream1; checkCudaErrors(cudaStreamCreate(&stream1)); for (int h = 0; h < (Config::instance()->getHorizontal() == 1 ? 2 : 1); h++) { for (int c = 0; c < (Config::instance()->getCrop() == 0 ? 1 : sizeof(cropc) / sizeof(int)); c++) { int batchImgId = 1; getBatchImageWithStreams(testX, batchImg[0], 0, stream1); for (int p = 0; p < (testX.size() + batch - 1) / batch; p++) { cudaStreamSynchronize(stream1); printf("test %2d%%", 100 * p / ((testX.size() + batch - 1) / batch)); int tstart = p * batch; if(tstart + batch <= testX.size() - batch) getBatchImageWithStreams(testX, batchImg[batchImgId], tstart + batch, stream1); else { int start = testX.size() - batch; getBatchImageWithStreams(testX, batchImg[batchImgId], start, stream1); } if(tstart + batch > testX.size()){ tstart = testX.size() - batch; } batchImgId = 1 - batchImgId; cuApplyCrop(batchImg[batchImgId].m_devPoint, cu_distortion_vector->m_devPoint, batch, ImgSize, cropr[c], cropc[c]); if (h == 1) cuApplyHorizontal(cu_distortion_vector->m_devPoint, cu_distortion_vector->m_devPoint, batch, ImgSize, HORIZONTAL); resultProdict(cu_distortion_vector->m_devPoint, testY->getDev() + tstart, cuVote->getDev() + tstart * nclasses, batch, ImgSize, nclasses, p * batch - tstart, handle); printf("\b\b\b\b\b\b\b\b\b"); } } } cuCorrect->gpuClear(); g_getVotingResult<<<dim3((testX.size() + batch - 1) / batch), dim3(batch)>>>( vote->getDev(), testY->getDev(), cuCorrect->getDev(), testX.size(), nclasses); cudaDeviceSynchronize(); getLastCudaError("g_getVotingResult"); cuCorrect->toCpu(); return cuCorrect->get(0,0,0); } void getBatchImageWithStreams(cuMatrixVector<double>&x, cuMatrixVector<double>&batchImg, int start, cudaStream_t stream1){ for(int i = 0; i < batchImg.size(); i++){ memcpy(batchImg[i]->getHost(), x[i + start]->getHost(), sizeof(double) * batchImg[i]->getLen()); batchImg[i]->toGpu(stream1); } } void cuTrainNetwork(cuMatrixVector<double>&x, cuMatrix<int>*y, cuMatrixVector<double>&testX, cuMatrix<int>* testY, int batch, int ImgSize, int nclasses, std::vector<double>&nlrate, std::vector<double>&nMomentum, std::vector<int>&epoCount, cublasHandle_t handle) { if(nlrate.size() != nMomentum.size() || nMomentum.size() != epoCount.size() || nlrate.size() != epoCount.size()) { printf("nlrate, nMomentum, epoCount size not equal\n"); exit(0); } if(Config::instance()->getIsGradientChecking()) gradientChecking(x.m_devPoint, y->getDev(), batch, ImgSize, nclasses, handle); predictTestDate(x, y, testX, testY, batch, ImgSize, nclasses, 0, handle); printf("correct is %d\n", cuCorrect->get(0,0,0)); int epochs = 10000; double lrate = 0.05; double Momentum = 0.9; int id = 0; for (int epo = 0; epo < epochs; epo++) { if (id >= nlrate.size()) break; lrate = nlrate[id]; Momentum = nMomentum[id]; Config::instance()->setLrate(lrate); Config::instance()->setMomentum(Momentum); double start, end; start = clock(); cuApplyRandom(batch, clock(), ImgSize); for(int i = 0; i < que.size(); i++){ if(que[i]->m_type == std::string("FC")){ FullConnect* layer = (FullConnect*)Layers::instance()->get(que[i]->m_name); layer->drop(); } } x.shuffle(5000, y); cudaStream_t stream1; checkCudaErrors(cudaStreamCreate(&stream1)); getBatchImageWithStreams(x, batchImg[0], 0, stream1); int batchImgId = 1; for (int k = 0; k < (x.size() + batch - 1) / batch; k ++) { cudaStreamSynchronize(stream1); int start = k * batch; printf("train %2d%%", 100 * start / ((x.size() + batch - 1))); if(start + batch <= x.size() - batch) getBatchImageWithStreams(x, batchImg[batchImgId], start + batch, stream1); else{ int tstart = x.size() - batch; getBatchImageWithStreams(x, batchImg[batchImgId], tstart, stream1); } if(start + batch > x.size()){ start = x.size() - batch; } batchImgId = 1 - batchImgId; cuApplyCropRandom(batchImg[batchImgId].m_devPoint, cu_distortion_vector->m_devPoint, batch, ImgSize); if(fabs(Config::instance()->getDistortion()) >= 0.1 || Config::instance()->getScale() >= 1 || Config::instance()->getRotation() >= 1) cuApplyDistortion(cu_distortion_vector->m_devPoint, cu_distortion_vector->m_devPoint, batch, ImgSize); if (Config::instance()->getHorizontal()) { cuApplyHorizontal(cu_distortion_vector->m_devPoint, cu_distortion_vector->m_devPoint, batch, ImgSize, RANDOM_HORIZONTAL); } cuApplyWhiteNoise(cu_distortion_vector->m_devPoint, cu_distortion_vector->m_devPoint, batch, ImgSize, Config::instance()->getWhiteNoise()); if (Config::instance()->getImageShow()) { for (int ff = batch - 1; ff >= 0; ff--) { showImg(batchImg[batchImgId][ff], 5); showImg(cu_distortion_vector->m_vec[ff], 5); cv::waitKey(0); } } getNetworkCost(cu_distortion_vector->m_devPoint, y->getDev() + start, batch, ImgSize, nclasses, handle); updataWB(lrate, Momentum, batch); printf("\b\b\b\b\b\b\b\b\b"); } checkCudaErrors(cudaStreamDestroy(stream1)); double cost = 0.0; for(int i = 0; i < que.size(); i++){ LayerBase* layer = (LayerBase*)Layers::instance()->get(que[i]->m_name); layer->calCost(); layer->printCost(); cost += layer->getCost(); } char str[512]; end = clock(); sprintf(str, "epoch=%d time=%.03lfs cost=%lf Momentum=%.06lf lrate=%.08lf", epo, (double) (end - start) / CLOCKS_PER_SEC, cost, Config::instance()->getMomentum(), Config::instance()->getLrate()); printf("%s\n", str); LOG(str, "Result/log.txt"); if (epo && epo % epoCount[id] == 0) { for(int i = 0; i < que.size(); i++){ LayerBase* layer = (LayerBase*)Layers::instance()->get(que[i]->m_name); layer->clearMomentum(); } id++; } printf("===================weight value================\n"); for(int i = 0; i < que.size(); i++){ LayerBase* layer = Layers::instance()->get(que[i]->m_name); layer->printParameter(); } printf("===================test Result================\n"); predictTestDate(x, y, testX, testY, batch, ImgSize, nclasses, false, handle); sprintf(str, "test %.2lf%%/%.2lf%%", 100.0 * cuCorrect->get(0, 0, 0) / testX.size(), 100.0 * cuCurCorrect / testX.size()); printf("%s\n",str); LOG(str, "Result/log.txt"); if(epo && epo % Config::instance()->getTestEpoch() == 0){ predictTestDate(x, y, testX, testY, batch, ImgSize, nclasses, true, handle); sprintf(str, "test voting correct %.2lf%%/%.2lf%%", 100.0 * cuCorrect->get(0, 0, 0) / testX.size(), 100.0 * cuCurCorrect / testX.size()); printf("%s\n",str); LOG(str, "Result/log.txt"); } if(epo == 0){ MemoryMonitor::instance()->printCpuMemory(); MemoryMonitor::instance()->printGpuMemory(); } } } /* */ void __global__ g_getVoteAdd(int* voting, int* predict, int* y, int* correct, int len, int nclasses) { for(int i = 0; i < len; i += blockDim.x * gridDim.x) { int idx = i + blockDim.x * blockIdx.x + threadIdx.x; if(idx < len) { int* pvoting = voting + idx * nclasses; int* ppredict= predict+ idx * nclasses; int _max = pvoting[0] + ppredict[0]; int rid = 0; for(int j = 0; j < nclasses; j++) { pvoting[j] += ppredict[j]; if(pvoting[j] > _max) { _max = pvoting[j]; rid = j; } } if(rid == y[idx]) { atomicAdd(correct, 1); } } } } int cuVoteAdd(cuMatrix<int>*& voteSum, cuMatrix<int>*& predict, cuMatrix<int>*& testY, cuMatrix<int>*& correct, int nclasses) { g_getVoteAdd<<<dim3((testY->getLen() + 256 - 1) / 256), dim3(256)>>>( voteSum->getDev(), predict->getDev(), testY->getDev(), correct->getDev(), testY->getLen(), nclasses); cudaDeviceSynchronize(); getLastCudaError("g_getVoteAdd"); correct->toCpu(); return correct->get(0, 0, 0); }
6c2c1346f6f60ce37eb07b8ba3bc719ed394d2e8.hip
// !!! This is a file automatically generated by hipify!!! #if __linux__ && defined(__INTEL_COMPILER) #define __sync_fetch_and_add(ptr,addend) _InterlockedExchangeAdd(const_cast<void*>(reinterpret_cast<volatile void*>(ptr)), addend) #endif #include <string> #include <cstring> #include <cctype> #include <cstdlib> #include <cstdio> #include <iostream> #include <fstream> #include <bitset> #include <sys/socket.h> #include <netinet/in.h> #include <arpa/inet.h> #include "tbb/concurrent_hash_map.h" #include "tbb/blocked_range.h" #include "tbb/parallel_for.h" #include "tbb/tick_count.h" #include "tbb/task_scheduler_init.h" #include "tbb/concurrent_vector.h" // #include "tbb/tbb_allocator.hz" #include "utility.h" #include "csv.hpp" #include "timer.h" using namespace tbb; using namespace std; std::vector<string> timestamp; std::vector<std::string> split_string_2(std::string str, char del) { int first = 0; int last = str.find_first_of(del); std::vector<std::string> result; while (first < str.size()) { std::string subStr(str, first, last - first); result.push_back(subStr); first = last + 1; last = str.find_first_of(del, first); if (last == std::string::npos) { last = str.size(); } } return result; } int main( int argc, char* argv[] ) { int counter = 0; int N = atoi(argv[2]); int ngpus = 4; int RATIO = 1000000; const size_t iBytes = N * sizeof(float); unsigned int t, travdirtime; struct in_addr inaddr; char *some_addr; try { tbb::tick_count mainStartTime = tbb::tick_count::now(); srand(2); utility::thread_number_range threads(tbb::task_scheduler_init::default_num_threads,0); // Data = new MyString[N]; float **d_A = (float **)malloc(sizeof(float *) * ngpus); float **d_B = (float **)malloc(sizeof(float *) * ngpus); float **d_C = (float **)malloc(sizeof(float *) * ngpus); float **h_A = (float **)malloc(sizeof(float *) * ngpus); float **h_B = (float **)malloc(sizeof(float *) * ngpus); hipStream_t *stream = (hipStream_t *)malloc(sizeof(hipStream_t) * ngpus); for (int i = 0; i < ngpus; i++) { hipSetDevice(i); hipMalloc((void **) &d_A[i], iBytes); hipMalloc((void **) &d_B[i], iBytes); hipMalloc((void **) &d_C[i], iBytes); hipHostMalloc((void **) &h_A[i], iBytes); hipHostMalloc((void **) &h_B[i], iBytes); hipStreamCreate(&stream[i]); } const string csv_file = std::string(argv[1]); vector<vector<string>> data; start_timer(&t); try { Csv objCsv(csv_file); if (!objCsv.getCsv(data)) { cout << "read ERROR" << endl; return 1; } for (unsigned int row = 0; row < data.size(); row++) { vector<string> rec = data[row]; std::string tms = rec[0]; for(size_t c = tms.find_first_of("\""); c != string::npos; c = c = tms.find_first_of("\"")){ tms.erase(c,1); } for(size_t c = tms.find_first_of("/"); c != string::npos; c = c = tms.find_first_of("/")){ tms.erase(c,1); } for(size_t c = tms.find_first_of("."); c != string::npos; c = c = tms.find_first_of(".")){ tms.erase(c,1); } for(size_t c = tms.find_first_of(" "); c != string::npos; c = c = tms.find_first_of(" ")){ tms.erase(c,1); } for(size_t c = tms.find_first_of(":"); c != string::npos; c = c = tms.find_first_of(":")){ tms.erase(c,1); } h_A[0][row] = stof(tms); if(row % RATIO == 0) { cout << "stored " << row / RATIO << "..." << endl; } // timestamp.push_back(tms); } } catch (...) { cout << "EXCEPTION!" << endl; return 1; } travdirtime = stop_timer(&t); print_timer(travdirtime); hipSetDevice(0); hipDeviceEnablePeerAccess(0, 1); start_timer(&t); hipMemcpy(d_A[0], h_A[0], iBytes, hipMemcpyHostToDevice); travdirtime = stop_timer(&t); print_timer(travdirtime); start_timer(&t); hipMemcpy(d_A[1], d_A[0], iBytes, hipMemcpyDeviceToDevice); travdirtime = stop_timer(&t); print_timer(travdirtime); /* std::remove("writethrough-timestamp"); ofstream outputfile("writethrough-timestamp"); for(auto itr = timestamp.begin(); itr != timestamp.end(); ++itr) { outputfile << *itr << std::endl; } outputfile.close(); */ utility::report_elapsed_time((tbb::tick_count::now() - mainStartTime).seconds()); return 0; } catch(std::exception& e) { std::cerr<<"error occurred. error text is :\"" <<e.what()<<"\"\n"; } }
6c2c1346f6f60ce37eb07b8ba3bc719ed394d2e8.cu
#if __linux__ && defined(__INTEL_COMPILER) #define __sync_fetch_and_add(ptr,addend) _InterlockedExchangeAdd(const_cast<void*>(reinterpret_cast<volatile void*>(ptr)), addend) #endif #include <string> #include <cstring> #include <cctype> #include <cstdlib> #include <cstdio> #include <iostream> #include <fstream> #include <bitset> #include <sys/socket.h> #include <netinet/in.h> #include <arpa/inet.h> #include "tbb/concurrent_hash_map.h" #include "tbb/blocked_range.h" #include "tbb/parallel_for.h" #include "tbb/tick_count.h" #include "tbb/task_scheduler_init.h" #include "tbb/concurrent_vector.h" // #include "tbb/tbb_allocator.hz" #include "utility.h" #include "csv.hpp" #include "timer.h" using namespace tbb; using namespace std; std::vector<string> timestamp; std::vector<std::string> split_string_2(std::string str, char del) { int first = 0; int last = str.find_first_of(del); std::vector<std::string> result; while (first < str.size()) { std::string subStr(str, first, last - first); result.push_back(subStr); first = last + 1; last = str.find_first_of(del, first); if (last == std::string::npos) { last = str.size(); } } return result; } int main( int argc, char* argv[] ) { int counter = 0; int N = atoi(argv[2]); int ngpus = 4; int RATIO = 1000000; const size_t iBytes = N * sizeof(float); unsigned int t, travdirtime; struct in_addr inaddr; char *some_addr; try { tbb::tick_count mainStartTime = tbb::tick_count::now(); srand(2); utility::thread_number_range threads(tbb::task_scheduler_init::default_num_threads,0); // Data = new MyString[N]; float **d_A = (float **)malloc(sizeof(float *) * ngpus); float **d_B = (float **)malloc(sizeof(float *) * ngpus); float **d_C = (float **)malloc(sizeof(float *) * ngpus); float **h_A = (float **)malloc(sizeof(float *) * ngpus); float **h_B = (float **)malloc(sizeof(float *) * ngpus); cudaStream_t *stream = (cudaStream_t *)malloc(sizeof(cudaStream_t) * ngpus); for (int i = 0; i < ngpus; i++) { cudaSetDevice(i); cudaMalloc((void **) &d_A[i], iBytes); cudaMalloc((void **) &d_B[i], iBytes); cudaMalloc((void **) &d_C[i], iBytes); cudaMallocHost((void **) &h_A[i], iBytes); cudaMallocHost((void **) &h_B[i], iBytes); cudaStreamCreate(&stream[i]); } const string csv_file = std::string(argv[1]); vector<vector<string>> data; start_timer(&t); try { Csv objCsv(csv_file); if (!objCsv.getCsv(data)) { cout << "read ERROR" << endl; return 1; } for (unsigned int row = 0; row < data.size(); row++) { vector<string> rec = data[row]; std::string tms = rec[0]; for(size_t c = tms.find_first_of("\""); c != string::npos; c = c = tms.find_first_of("\"")){ tms.erase(c,1); } for(size_t c = tms.find_first_of("/"); c != string::npos; c = c = tms.find_first_of("/")){ tms.erase(c,1); } for(size_t c = tms.find_first_of("."); c != string::npos; c = c = tms.find_first_of(".")){ tms.erase(c,1); } for(size_t c = tms.find_first_of(" "); c != string::npos; c = c = tms.find_first_of(" ")){ tms.erase(c,1); } for(size_t c = tms.find_first_of(":"); c != string::npos; c = c = tms.find_first_of(":")){ tms.erase(c,1); } h_A[0][row] = stof(tms); if(row % RATIO == 0) { cout << "stored " << row / RATIO << "..." << endl; } // timestamp.push_back(tms); } } catch (...) { cout << "EXCEPTION!" << endl; return 1; } travdirtime = stop_timer(&t); print_timer(travdirtime); cudaSetDevice(0); cudaDeviceEnablePeerAccess(0, 1); start_timer(&t); cudaMemcpy(d_A[0], h_A[0], iBytes, cudaMemcpyHostToDevice); travdirtime = stop_timer(&t); print_timer(travdirtime); start_timer(&t); cudaMemcpy(d_A[1], d_A[0], iBytes, cudaMemcpyDeviceToDevice); travdirtime = stop_timer(&t); print_timer(travdirtime); /* std::remove("writethrough-timestamp"); ofstream outputfile("writethrough-timestamp"); for(auto itr = timestamp.begin(); itr != timestamp.end(); ++itr) { outputfile << *itr << std::endl; } outputfile.close(); */ utility::report_elapsed_time((tbb::tick_count::now() - mainStartTime).seconds()); return 0; } catch(std::exception& e) { std::cerr<<"error occurred. error text is :\"" <<e.what()<<"\"\n"; } }
a56e71e0a11a6ef1d2b673c70912419d35514dac.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #define N (33 * 1024) __global__ void add( int *a, int *b, int *c ) { int tid = threadIdx.x + blockIdx.x * blockDim.x; while (tid < N) { c[tid] = a[tid] + b[tid]; tid += blockDim.x * gridDim.x; } } int main( void ) { int a[N], b[N], c[N]; int *dev_a, *dev_b, *dev_c; // allocate the memory on the GPU hipMalloc( (void**)&dev_a, N * sizeof(int) ); hipMalloc( (void**)&dev_b, N * sizeof(int) ); hipMalloc( (void**)&dev_c, N * sizeof(int) ); // fill the arrays a and b on the CPU for (int i=0; i<N; i++) { a[i] = i; b[i] = i * i; } // copy the arrays 'a' and 'b' to the GPU hipMemcpy( dev_a,a,N * sizeof(int),hipMemcpyHostToDevice ); hipMemcpy( dev_b,b,N * sizeof(int),hipMemcpyHostToDevice ); hipLaunchKernelGGL(( add), dim3(128),dim3(128), 0, 0, dev_a, dev_b, dev_c ); // copy the array 'c' back from the GPU to the CPU hipMemcpy( c,dev_c,N * sizeof(int),hipMemcpyDeviceToHost ); // verify that the GPU did the work we requested bool success = true; for (int i=0; i<N; i++) { if ((a[i] + b[i]) != c[i]) { printf( "Error: %d + %d != %d\n", a[i], b[i], c[i] ); success = false; } } if (success) printf( "We did it!\n" ); // free the memory allocated on the GPU hipFree( dev_a ); hipFree( dev_b ); hipFree( dev_c ); return 0; }
a56e71e0a11a6ef1d2b673c70912419d35514dac.cu
#include <stdio.h> #define N (33 * 1024) __global__ void add( int *a, int *b, int *c ) { int tid = threadIdx.x + blockIdx.x * blockDim.x; while (tid < N) { c[tid] = a[tid] + b[tid]; tid += blockDim.x * gridDim.x; } } int main( void ) { int a[N], b[N], c[N]; int *dev_a, *dev_b, *dev_c; // allocate the memory on the GPU cudaMalloc( (void**)&dev_a, N * sizeof(int) ); cudaMalloc( (void**)&dev_b, N * sizeof(int) ); cudaMalloc( (void**)&dev_c, N * sizeof(int) ); // fill the arrays ‘a’ and ‘b’ on the CPU for (int i=0; i<N; i++) { a[i] = i; b[i] = i * i; } // copy the arrays 'a' and 'b' to the GPU cudaMemcpy( dev_a,a,N * sizeof(int),cudaMemcpyHostToDevice ); cudaMemcpy( dev_b,b,N * sizeof(int),cudaMemcpyHostToDevice ); add<<<128,128>>>( dev_a, dev_b, dev_c ); // copy the array 'c' back from the GPU to the CPU cudaMemcpy( c,dev_c,N * sizeof(int),cudaMemcpyDeviceToHost ); // verify that the GPU did the work we requested bool success = true; for (int i=0; i<N; i++) { if ((a[i] + b[i]) != c[i]) { printf( "Error: %d + %d != %d\n", a[i], b[i], c[i] ); success = false; } } if (success) printf( "We did it!\n" ); // free the memory allocated on the GPU cudaFree( dev_a ); cudaFree( dev_b ); cudaFree( dev_c ); return 0; }
e87aef4cb0fdfef38abdc012a090ad8eb7e7d13e.hip
// !!! This is a file automatically generated by hipify!!! /************************************************************************************************/ /************ Arturo Gonzalez Bencomo 172906, Arturo Torre Gonzalez 90226 ***********************/ /************************************************************************************************/ /********************************** kernel.cu ***************************************************/ /************************************************************************************************/ /** Este programa especifica como tal la funcion para convertir de rgb a grayscale **************/ /** tal el codigo ejecutado en el GPU ***********************************************************/ /*Importamos librerias*/ #include <math.h> #include <iostream> #include "hip/hip_runtime.h" #include "kernel.h" #include <stdlib.h> using namespace std; /*Esta funcion es el kernel que ejecuta grayscale en paralelo sobre cada pixel de la matriz*/ __global__ void grayscale(float *RED, float *GREEN, float *BLUE, float *GRAY){ int ROW = blockIdx.y*blockDim.y+threadIdx.y; int COL = blockIdx.x*blockDim.x+threadIdx.x; float tmpSum = 0; if (ROW < N && COL < N) { // each thread computes one element of the block sub-matrix for (int i = 0; i < N; i++) { // Esta funcion es la que lleva a cabo la transformacion de colores rgb a blanco y negro GRAY[y][x] = truncf(0.2989*px[0] + 0.587*px[1] + 0.114*px[2]); } } C[ROW * N + COL] = tmpSum; }
e87aef4cb0fdfef38abdc012a090ad8eb7e7d13e.cu
/************************************************************************************************/ /************ Arturo Gonzalez Bencomo 172906, Arturo Torre Gonzalez 90226 ***********************/ /************************************************************************************************/ /********************************** kernel.cu ***************************************************/ /************************************************************************************************/ /** Este programa especifica como tal la funcion para convertir de rgb a grayscale **************/ /** tal el codigo ejecutado en el GPU ***********************************************************/ /*Importamos librerias*/ #include <math.h> #include <iostream> #include "cuda_runtime.h" #include "kernel.h" #include <stdlib.h> using namespace std; /*Esta funcion es el kernel que ejecuta grayscale en paralelo sobre cada pixel de la matriz*/ __global__ void grayscale(float *RED, float *GREEN, float *BLUE, float *GRAY){ int ROW = blockIdx.y*blockDim.y+threadIdx.y; int COL = blockIdx.x*blockDim.x+threadIdx.x; float tmpSum = 0; if (ROW < N && COL < N) { // each thread computes one element of the block sub-matrix for (int i = 0; i < N; i++) { // Esta funcion es la que lleva a cabo la transformacion de colores rgb a blanco y negro GRAY[y][x] = truncf(0.2989*px[0] + 0.587*px[1] + 0.114*px[2]); } } C[ROW * N + COL] = tmpSum; }
c8f995aa92e7323463d9cfbee4f72e2dcdf629b8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Parallel bitonic sort using CUDA. * Compile with * nvcc -arch=sm_11 bitonic_sort.cu * Based on http://www.tools-of-computing.com/tc/CS/Sorts/bitonic_sort.htm * License: BSD 3 */ #include <stdlib.h> #include <stdio.h> #include <time.h> /* Every thread gets exactly one value in the unsorted array. */ #define THREADS 512 // 2^9 #define BLOCKS 32768 // 2^15 #define NUM_VALS THREADS*BLOCKS void print_elapsed(clock_t start, clock_t stop) { double elapsed = ((double) (stop - start)) / CLOCKS_PER_SEC; printf("Elapsed time: %.3fs\n", elapsed); } float random_float() { return (float)rand()/(float)RAND_MAX; } void array_print(float *arr, int length) { int i; for (i = 0; i < length; ++i) { printf("%1.3f ", arr[i]); } printf("\n"); } void array_fill(float *arr, int length) { srand(time(NULL)); int i; for (i = 0; i < length; ++i) { arr[i] = random_float(); } } __global__ void bitonic_sort_step(float *dev_values, int j, int k) { unsigned int i, ixj; /* Sorting partners: i and ixj */ i = threadIdx.x + blockDim.x * blockIdx.x; ixj = i^j; /* The threads with the lowest ids sort the array. */ if ((ixj)>i) { if ((i&k)==0) { /* Sort ascending */ if (dev_values[i]>dev_values[ixj]) { /* exchange(i,ixj); */ float temp = dev_values[i]; dev_values[i] = dev_values[ixj]; dev_values[ixj] = temp; } } if ((i&k)!=0) { /* Sort descending */ if (dev_values[i]<dev_values[ixj]) { /* exchange(i,ixj); */ float temp = dev_values[i]; dev_values[i] = dev_values[ixj]; dev_values[ixj] = temp; } } } } /** * Inplace bitonic sort using CUDA. */ void bitonic_sort(float *values) { float *dev_values; size_t size = NUM_VALS * sizeof(float); hipMalloc((void**) &dev_values, size); hipMemcpy(dev_values, values, size, hipMemcpyHostToDevice); dim3 blocks(BLOCKS,1); /* Number of blocks */ dim3 threads(THREADS,1); /* Number of threads */ int j, k; /* Major step */ for (k = 2; k <= NUM_VALS; k <<= 1) { /* Minor step */ for (j=k>>1; j>0; j=j>>1) { hipLaunchKernelGGL(( bitonic_sort_step), dim3(blocks), dim3(threads), 0, 0, dev_values, j, k); } } hipMemcpy(values, dev_values, size, hipMemcpyDeviceToHost); hipFree(dev_values); } int main(void) { clock_t start, stop; float *values = (float*) malloc( NUM_VALS * sizeof(float)); array_fill(values, NUM_VALS); start = clock(); bitonic_sort(values); /* Inplace */ stop = clock(); print_elapsed(start, stop); }
c8f995aa92e7323463d9cfbee4f72e2dcdf629b8.cu
/* * Parallel bitonic sort using CUDA. * Compile with * nvcc -arch=sm_11 bitonic_sort.cu * Based on http://www.tools-of-computing.com/tc/CS/Sorts/bitonic_sort.htm * License: BSD 3 */ #include <stdlib.h> #include <stdio.h> #include <time.h> /* Every thread gets exactly one value in the unsorted array. */ #define THREADS 512 // 2^9 #define BLOCKS 32768 // 2^15 #define NUM_VALS THREADS*BLOCKS void print_elapsed(clock_t start, clock_t stop) { double elapsed = ((double) (stop - start)) / CLOCKS_PER_SEC; printf("Elapsed time: %.3fs\n", elapsed); } float random_float() { return (float)rand()/(float)RAND_MAX; } void array_print(float *arr, int length) { int i; for (i = 0; i < length; ++i) { printf("%1.3f ", arr[i]); } printf("\n"); } void array_fill(float *arr, int length) { srand(time(NULL)); int i; for (i = 0; i < length; ++i) { arr[i] = random_float(); } } __global__ void bitonic_sort_step(float *dev_values, int j, int k) { unsigned int i, ixj; /* Sorting partners: i and ixj */ i = threadIdx.x + blockDim.x * blockIdx.x; ixj = i^j; /* The threads with the lowest ids sort the array. */ if ((ixj)>i) { if ((i&k)==0) { /* Sort ascending */ if (dev_values[i]>dev_values[ixj]) { /* exchange(i,ixj); */ float temp = dev_values[i]; dev_values[i] = dev_values[ixj]; dev_values[ixj] = temp; } } if ((i&k)!=0) { /* Sort descending */ if (dev_values[i]<dev_values[ixj]) { /* exchange(i,ixj); */ float temp = dev_values[i]; dev_values[i] = dev_values[ixj]; dev_values[ixj] = temp; } } } } /** * Inplace bitonic sort using CUDA. */ void bitonic_sort(float *values) { float *dev_values; size_t size = NUM_VALS * sizeof(float); cudaMalloc((void**) &dev_values, size); cudaMemcpy(dev_values, values, size, cudaMemcpyHostToDevice); dim3 blocks(BLOCKS,1); /* Number of blocks */ dim3 threads(THREADS,1); /* Number of threads */ int j, k; /* Major step */ for (k = 2; k <= NUM_VALS; k <<= 1) { /* Minor step */ for (j=k>>1; j>0; j=j>>1) { bitonic_sort_step<<<blocks, threads>>>(dev_values, j, k); } } cudaMemcpy(values, dev_values, size, cudaMemcpyDeviceToHost); cudaFree(dev_values); } int main(void) { clock_t start, stop; float *values = (float*) malloc( NUM_VALS * sizeof(float)); array_fill(values, NUM_VALS); start = clock(); bitonic_sort(values); /* Inplace */ stop = clock(); print_elapsed(start, stop); }
c1884fed2dcb9f18bd6b2c7e62b47da7eb1807b9.hip
// !!! This is a file automatically generated by hipify!!! #include "imageProcessing.h" #include "globalVars.h" #include "routines.h" #include <stdio.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime_api.h> /* * blockMatchingFunction */ __host__ __device__ double computeMatch(unsigned char *im, int im_step, unsigned char *bl, int bl_step, int bl_cols, int bl_rows, int oi, int oj, int stride){ if (!im || !bl) return 0.0; double nb = bl_cols*bl_rows; double x = 0; int maxI = bl_rows-stride+1; int maxJ = bl_cols-stride+1; for(int i = 0;i < maxI;i+= stride){ for(int j = 0;j < maxJ;j+= stride){ unsigned char v1 = im[INDXs(im_step,oi+i,oj+j)]; unsigned char v2 = bl[INDXs(bl_step,i,j)]; x += (v2-v1)*(v2-v1); //im[INDXs(im_step,oi+i,oj+j)] = ABS(v2-v1); } } x = x / nb; return x; } /* My kernel */ __global__ void myKernel(double *x, int im_step, int bl_step, unsigned char *im, unsigned char *bl, int bl_cols, int bl_rows, int stride){ int idx =blockIdx.x * blockDim.x +threadIdx.x; int idy =blockIdx.y * blockDim.y +threadIdx.y; x[im_step*idy+idx] = computeMatch(im,im_step, bl,bl_step,bl_cols,bl_rows, idy,idx,stride); } double blockMatching(cv::Mat *image, cv::Mat *block, int stride, unsigned char *res, int samplenum){ if (!image || !block) return DBL_MAX; unsigned char *bl = (unsigned char*)(block->data); int bl_step = block->step; int bl_cols = block->cols; int bl_rows = block->rows; unsigned char *im = (unsigned char*)(image->data); int im_step = image->step; int im_cols = image->cols; int im_rows = image->rows; int coord_i_min = 0; int coord_j_min = 0; double minVal = DBL_MAX; int istart = 0; int iend = im_rows - bl_rows; int jstart = 0; int jend = im_cols - bl_cols; for(int i = istart;i < iend -stride+1;i+=stride){ for(int j = jstart;j < jend-stride+1;j+=stride){ double x = computeMatch(im,im_step, bl,bl_step,bl_cols,bl_rows, i,j,stride); if(x < minVal){ minVal = x; coord_i_min = i; coord_j_min = j; } } } if (Verbose) fprintf(stderr,"sample cols: %d\n",bl_cols); if (Verbose) fprintf(stderr,"sample rows: %d\n",bl_rows); if (Verbose) fprintf(stderr,"sample step: %d\n",bl_step); if (Verbose) fprintf(stderr,"image cols: %d\n",im_cols); if (Verbose) fprintf(stderr,"image rows: %d\n",im_rows); if (Verbose) fprintf(stderr,"image step: %d\n",im_step); memcpy(&(res[0]),&coord_i_min,sizeof(int)); memcpy(&(res[4]),&coord_j_min,sizeof(int)); memcpy(&(res[8]),&minVal,sizeof(double)); memcpy(&(res[16]),&samplenum,sizeof(int)); if (Verbose) fprintf(stderr,"%d sample x=%d, y=%d --> %f \n",samplenum, coord_j_min,coord_i_min,minVal); return minVal; } double blockMatchingWithScalingAndRotation(cv::Mat *image, cv::Mat *blocki, int stride, unsigned char *res, int samplenum){ hipSetDevice(0); if (!image || !blocki) return DBL_MAX; //this code has memory leaks... unsigned char *im = (unsigned char*)(image->data); int im_step = image->step; int im_cols = image->cols; int im_rows = image->rows; int coord_i_min = 0; int coord_j_min = 0; double minVal = DBL_MAX; float bestScale = 0; int bestRotation = 0; unsigned char *cim, *cbl; double *ctabX = NULL; hipMalloc((void **)&cim, sizeof(unsigned char)*(im_rows*im_cols)); hipMalloc((void **)&ctabX,sizeof(double)*(im_rows*im_cols)); for (int r = -10; r < 10; r = r+8){ printf("Trying rotation %d\n",r); cv::Mat *rot = rotateImage(blocki,r); for (float s = 1.0; s > 0.; s = s-0.6){ printf("Trying scaling %f min val : %d\n",s,minVal); cv::Mat *block = scaleImage(rot,s); //showOneImage(*block); unsigned char *bl = (unsigned char*)(block->data); int bl_step = block->step; int bl_cols = block->cols; int bl_rows = block->rows; int istart = 0; int iend = im_rows - bl_rows; int jstart = 0; int jend = im_cols - bl_cols; //unsigned char *cim,*cbl ; double res[(im_rows*im_cols)]; //double *ctabX = NULL ; // for(int i = istart;i < iend -stride+1;i+=stride){ // for(int j = jstart;j < jend-stride+1;j+=stride){ // double x = computeMatch(im,im_step, // bl,bl_step,bl_cols,bl_rows, // i,j,stride); dim3 blocks(jend); dim3 threads(1,iend); // hipMalloc((void **)&cim, sizeof(unsigned char)*(im_rows*im_cols)); hipMalloc((void **)&cbl, sizeof(unsigned char)*(bl_rows*bl_cols)); // hipMalloc((void **)&ctabX,sizeof(double)*(im_rows*im_cols)); hipMemcpy(cim,im,sizeof(unsigned char)*(im_rows*im_cols), hipMemcpyHostToDevice ); hipMemcpy(cbl,bl,sizeof(unsigned char)*(bl_rows*bl_cols), hipMemcpyHostToDevice ); hipMemcpy(ctabX, res,sizeof(double)*(im_rows*im_cols), hipMemcpyHostToDevice ); hipLaunchKernelGGL(( myKernel), dim3(blocks), dim3(threads) , 0, 0, ctabX, im_step, bl_step, cim, cbl, bl_cols, bl_rows, stride); hipMemcpy(im,cim, sizeof(unsigned char)*(im_cols*im_rows), hipMemcpyDeviceToHost ); hipMemcpy(bl,cbl, sizeof(unsigned char)*(bl_cols*bl_rows), hipMemcpyDeviceToHost ); hipMemcpy(res,ctabX, sizeof(double)*(im_rows*im_cols), hipMemcpyDeviceToHost ); for(int i = istart;i < iend -stride+1;i+=stride){ for(int j = jstart;j < jend-stride+1;j+=stride){ double tabActuel = res[j+im_step*i]; if (tabActuel<minVal){ minVal = tabActuel; coord_i_min = i; coord_j_min = j; bestScale = s; bestRotation = r; } } } if (Verbose) fprintf(stderr,"sample cols: %d\n",bl_cols); if (Verbose) fprintf(stderr,"sample rows: %d\n",bl_rows); if (Verbose) fprintf(stderr,"sample step: %d\n",bl_step); if (Verbose) fprintf(stderr,"image cols: %d\n",im_cols); if (Verbose) fprintf(stderr,"image rows: %d\n",im_rows); if (Verbose) fprintf(stderr,"image step: %d\n",im_step); if (Verbose) fprintf(stderr,"Current score: %f\n",minVal); delete block; } delete rot; } memcpy(&(res[0]),&coord_i_min,sizeof(int)); memcpy(&(res[4]),&coord_j_min,sizeof(int)); memcpy(&(res[8]),&minVal,sizeof(double)); memcpy(&(res[16]),&samplenum,sizeof(int)); hipFree(cim); hipFree(cbl); hipFree(ctabX); if (Verbose) fprintf(stderr,"%d sample x=%d, y=%d --> %f (scale %f, rot %d) \n", samplenum, coord_j_min,coord_i_min,minVal,bestScale,bestRotation); return minVal; }
c1884fed2dcb9f18bd6b2c7e62b47da7eb1807b9.cu
#include "imageProcessing.h" #include "globalVars.h" #include "routines.h" #include <stdio.h> #include <stdlib.h> #include <cuda.h> #include <cuda_runtime_api.h> /* * blockMatchingFunction */ __host__ __device__ double computeMatch(unsigned char *im, int im_step, unsigned char *bl, int bl_step, int bl_cols, int bl_rows, int oi, int oj, int stride){ if (!im || !bl) return 0.0; double nb = bl_cols*bl_rows; double x = 0; int maxI = bl_rows-stride+1; int maxJ = bl_cols-stride+1; for(int i = 0;i < maxI;i+= stride){ for(int j = 0;j < maxJ;j+= stride){ unsigned char v1 = im[INDXs(im_step,oi+i,oj+j)]; unsigned char v2 = bl[INDXs(bl_step,i,j)]; x += (v2-v1)*(v2-v1); //im[INDXs(im_step,oi+i,oj+j)] = ABS(v2-v1); } } x = x / nb; return x; } /* My kernel */ __global__ void myKernel(double *x, int im_step, int bl_step, unsigned char *im, unsigned char *bl, int bl_cols, int bl_rows, int stride){ int idx =blockIdx.x * blockDim.x +threadIdx.x; int idy =blockIdx.y * blockDim.y +threadIdx.y; x[im_step*idy+idx] = computeMatch(im,im_step, bl,bl_step,bl_cols,bl_rows, idy,idx,stride); } double blockMatching(cv::Mat *image, cv::Mat *block, int stride, unsigned char *res, int samplenum){ if (!image || !block) return DBL_MAX; unsigned char *bl = (unsigned char*)(block->data); int bl_step = block->step; int bl_cols = block->cols; int bl_rows = block->rows; unsigned char *im = (unsigned char*)(image->data); int im_step = image->step; int im_cols = image->cols; int im_rows = image->rows; int coord_i_min = 0; int coord_j_min = 0; double minVal = DBL_MAX; int istart = 0; int iend = im_rows - bl_rows; int jstart = 0; int jend = im_cols - bl_cols; for(int i = istart;i < iend -stride+1;i+=stride){ for(int j = jstart;j < jend-stride+1;j+=stride){ double x = computeMatch(im,im_step, bl,bl_step,bl_cols,bl_rows, i,j,stride); if(x < minVal){ minVal = x; coord_i_min = i; coord_j_min = j; } } } if (Verbose) fprintf(stderr,"sample cols: %d\n",bl_cols); if (Verbose) fprintf(stderr,"sample rows: %d\n",bl_rows); if (Verbose) fprintf(stderr,"sample step: %d\n",bl_step); if (Verbose) fprintf(stderr,"image cols: %d\n",im_cols); if (Verbose) fprintf(stderr,"image rows: %d\n",im_rows); if (Verbose) fprintf(stderr,"image step: %d\n",im_step); memcpy(&(res[0]),&coord_i_min,sizeof(int)); memcpy(&(res[4]),&coord_j_min,sizeof(int)); memcpy(&(res[8]),&minVal,sizeof(double)); memcpy(&(res[16]),&samplenum,sizeof(int)); if (Verbose) fprintf(stderr,"%d sample x=%d, y=%d --> %f \n",samplenum, coord_j_min,coord_i_min,minVal); return minVal; } double blockMatchingWithScalingAndRotation(cv::Mat *image, cv::Mat *blocki, int stride, unsigned char *res, int samplenum){ cudaSetDevice(0); if (!image || !blocki) return DBL_MAX; //this code has memory leaks... unsigned char *im = (unsigned char*)(image->data); int im_step = image->step; int im_cols = image->cols; int im_rows = image->rows; int coord_i_min = 0; int coord_j_min = 0; double minVal = DBL_MAX; float bestScale = 0; int bestRotation = 0; unsigned char *cim, *cbl; double *ctabX = NULL; cudaMalloc((void **)&cim, sizeof(unsigned char)*(im_rows*im_cols)); cudaMalloc((void **)&ctabX,sizeof(double)*(im_rows*im_cols)); for (int r = -10; r < 10; r = r+8){ printf("Trying rotation %d\n",r); cv::Mat *rot = rotateImage(blocki,r); for (float s = 1.0; s > 0.; s = s-0.6){ printf("Trying scaling %f min val : %d\n",s,minVal); cv::Mat *block = scaleImage(rot,s); //showOneImage(*block); unsigned char *bl = (unsigned char*)(block->data); int bl_step = block->step; int bl_cols = block->cols; int bl_rows = block->rows; int istart = 0; int iend = im_rows - bl_rows; int jstart = 0; int jend = im_cols - bl_cols; //unsigned char *cim,*cbl ; double res[(im_rows*im_cols)]; //double *ctabX = NULL ; // for(int i = istart;i < iend -stride+1;i+=stride){ // for(int j = jstart;j < jend-stride+1;j+=stride){ // double x = computeMatch(im,im_step, // bl,bl_step,bl_cols,bl_rows, // i,j,stride); dim3 blocks(jend); dim3 threads(1,iend); // cudaMalloc((void **)&cim, sizeof(unsigned char)*(im_rows*im_cols)); cudaMalloc((void **)&cbl, sizeof(unsigned char)*(bl_rows*bl_cols)); // cudaMalloc((void **)&ctabX,sizeof(double)*(im_rows*im_cols)); cudaMemcpy(cim,im,sizeof(unsigned char)*(im_rows*im_cols), cudaMemcpyHostToDevice ); cudaMemcpy(cbl,bl,sizeof(unsigned char)*(bl_rows*bl_cols), cudaMemcpyHostToDevice ); cudaMemcpy(ctabX, res,sizeof(double)*(im_rows*im_cols), cudaMemcpyHostToDevice ); myKernel<<< blocks, threads >>>(ctabX, im_step, bl_step, cim, cbl, bl_cols, bl_rows, stride); cudaMemcpy(im,cim, sizeof(unsigned char)*(im_cols*im_rows), cudaMemcpyDeviceToHost ); cudaMemcpy(bl,cbl, sizeof(unsigned char)*(bl_cols*bl_rows), cudaMemcpyDeviceToHost ); cudaMemcpy(res,ctabX, sizeof(double)*(im_rows*im_cols), cudaMemcpyDeviceToHost ); for(int i = istart;i < iend -stride+1;i+=stride){ for(int j = jstart;j < jend-stride+1;j+=stride){ double tabActuel = res[j+im_step*i]; if (tabActuel<minVal){ minVal = tabActuel; coord_i_min = i; coord_j_min = j; bestScale = s; bestRotation = r; } } } if (Verbose) fprintf(stderr,"sample cols: %d\n",bl_cols); if (Verbose) fprintf(stderr,"sample rows: %d\n",bl_rows); if (Verbose) fprintf(stderr,"sample step: %d\n",bl_step); if (Verbose) fprintf(stderr,"image cols: %d\n",im_cols); if (Verbose) fprintf(stderr,"image rows: %d\n",im_rows); if (Verbose) fprintf(stderr,"image step: %d\n",im_step); if (Verbose) fprintf(stderr,"Current score: %f\n",minVal); delete block; } delete rot; } memcpy(&(res[0]),&coord_i_min,sizeof(int)); memcpy(&(res[4]),&coord_j_min,sizeof(int)); memcpy(&(res[8]),&minVal,sizeof(double)); memcpy(&(res[16]),&samplenum,sizeof(int)); cudaFree(cim); cudaFree(cbl); cudaFree(ctabX); if (Verbose) fprintf(stderr,"%d sample x=%d, y=%d --> %f (scale %f, rot %d) \n", samplenum, coord_j_min,coord_i_min,minVal,bestScale,bestRotation); return minVal; }
d8e9bded1de324c3b9e85c3366cf44e618148643.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright (c) 2016 paddlepaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include <algorithm> #include <vector> #include "paddle/fluid/operators/math/pooling.h" #include "paddle/fluid/platform/cuda_primitives.h" namespace paddle { namespace operators { namespace math { template <typename PoolProcess, typename T> __global__ void KernelPool2D(const int nthreads, const T* input_data, const int channels, const int input_height, const int input_width, const int output_height, const int output_width, const int ksize_height, const int ksize_width, const int stride_height, const int stride_width, const int padding_height, const int padding_width, PoolProcess pool_process, bool exclusive, bool adaptive, T* output_data, bool channel_last = false) { for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads; index += blockDim.x * gridDim.x) { int pw, ph, c, batch_idx; if (!channel_last) { /*NCHW*/ pw = index % output_width; ph = (index / output_width) % output_height; c = (index / output_width / output_height) % channels; batch_idx = index / output_width / output_height / channels; } else { /*NHWC*/ c = index % channels; pw = (index / channels) % output_width; ph = (index / channels / output_width) % output_height; batch_idx = index / channels / output_width / output_height; } int hstart, hend; int wstart, wend; if (adaptive) { hstart = AdaptStartIndex(ph, input_height, output_height); hend = AdaptEndIndex(ph, input_height, output_height); wstart = AdaptStartIndex(pw, input_width, output_width); wend = AdaptEndIndex(pw, input_width, output_width); } else { hstart = ph * stride_height - padding_height; hend = min(hstart + ksize_height, input_height); hstart = max(hstart, 0); wstart = pw * stride_width - padding_width; wend = min(wstart + ksize_width, input_width); wstart = max(wstart, 0); } if (!channel_last) { input_data += (batch_idx * channels + c) * input_height * input_width; } else { input_data += batch_idx * input_height * input_width * channels; } T ele = pool_process.initial(); for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { auto input_idx = channel_last ? (h * input_width + w) * channels + c : h * input_width + w; pool_process.compute(input_data[input_idx], &ele); } } int pool_size = (exclusive || adaptive) ? (hend - hstart) * (wend - wstart) : ksize_height * ksize_width; pool_process.finalize(static_cast<T>(pool_size), &ele); output_data[index] = ele; } } template <typename PoolProcess, typename T> __global__ void KernelPool2DGrad( const int nthreads, const T* input_data, const T* output_data, const T* output_grad, const int channels, const int input_height, const int input_width, const int output_height, const int output_width, const int ksize_height, const int ksize_width, const int stride_height, const int stride_width, const int padding_height, const int padding_width, PoolProcess pool_process, bool exclusive, bool adaptive, T* input_grad, bool channel_last = false) { for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads; index += blockDim.x * gridDim.x) { int w_offset, h_offset, offsetC, batch_idx; if (!channel_last) { /* NCHW */ w_offset = index % input_width + padding_width; h_offset = (index / input_width) % input_height + padding_height; offsetC = (index / input_width / input_height) % channels; batch_idx = index / input_width / input_height / channels; } else { /* NHWC */ offsetC = index % channels; w_offset = (index / channels) % input_width + padding_width; h_offset = (index / channels / input_width) % input_height + padding_height; batch_idx = index / channels / input_width / input_height; } int phstart, phend; int pwstart, pwend; if (adaptive) { phstart = AdaptStartIndex(h_offset, output_height, input_height); phend = AdaptEndIndex(h_offset, output_height, input_height); pwstart = AdaptStartIndex(w_offset, output_width, input_width); pwend = AdaptEndIndex(w_offset, output_width, input_width); } else { phstart = (h_offset < ksize_height) ? 0 : (h_offset - ksize_height) / stride_height + 1; pwstart = (w_offset < ksize_width) ? 0 : (w_offset - ksize_width) / stride_width + 1; phend = min(h_offset / stride_height + 1, output_height); pwend = min(w_offset / stride_width + 1, output_width); } T gradient = static_cast<T>(0.0); T input = input_data[index]; int output_stride; if (!channel_last) { output_stride = (batch_idx * channels + offsetC) * output_height * output_width; } else { output_stride = batch_idx * output_height * output_width * channels; } output_data += output_stride; output_grad += output_stride; for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { int pool_size; if (adaptive) { pool_size = static_cast<int>(ceil(static_cast<double>(input_height) / ksize_height)) * static_cast<int>( ceil(static_cast<double>(input_width) / ksize_width)); } else { int hstart = ph * stride_height - padding_height; int wstart = pw * stride_width - padding_width; int hend = min(hstart + ksize_height, input_height); int wend = min(wstart + ksize_width, input_width); hstart = max(hstart, 0); wstart = max(wstart, 0); pool_size = exclusive ? (hend - hstart) * (wend - wstart) : ksize_height * ksize_width; } int output_sub_idx = channel_last ? (ph * output_width + pw) * channels + offsetC : ph * output_width + pw; pool_process.compute(input, output_data[output_sub_idx], output_grad[output_sub_idx], static_cast<T>(1.0 / pool_size), &gradient); } } input_grad[index] = gradient; } } template <typename T> __global__ void KernelMaxPool2DGrad( const int nthreads, const T* input_data, const T* output_data, const T* output_grad, const int channels, const int input_height, const int input_width, const int output_height, const int output_width, const int ksize_height, const int ksize_width, const int stride_height, const int stride_width, const int padding_height, const int padding_width, T* input_grad, bool channel_last = false) { for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads; index += blockDim.x * gridDim.x) { int pw, ph, c, batch_idx; if (!channel_last) { /* NCHW */ pw = index % output_width; ph = (index / output_width) % output_height; c = (index / output_width / output_height) % channels; batch_idx = index / output_width / output_height / channels; } else { /* NHWC */ c = index % channels; pw = (index / channels) % output_width; ph = (index / channels / output_width) % output_height; batch_idx = index / channels / output_width / output_height; } int hstart = ph * stride_height - padding_height; int hend = min(hstart + ksize_height, input_height); hstart = max(hstart, 0); int wstart = pw * stride_width - padding_width; int wend = min(wstart + ksize_width, input_width); wstart = max(wstart, 0); int input_stride; if (!channel_last) { input_stride = (batch_idx * channels + c) * input_height * input_width; } else { input_stride = batch_idx * input_height * input_width * channels; } input_data += input_stride; input_grad += input_stride; T ele = output_data[index]; int maxIndex = -1; bool stop = false; for (int h = hstart; h < hend && !stop; ++h) { for (int w = wstart; w < wend && !stop; ++w) { int input_data_idx = channel_last ? (h * input_width + w) * channels + c : h * input_width + w; if (ele == input_data[input_data_idx]) { maxIndex = input_data_idx; stop = true; } } } if (maxIndex != -1) { // atomic add platform::CudaAtomicAdd(input_grad + maxIndex, output_grad[index]); } } } template <typename PoolProcess, typename T> void Pool2dDirectCUDAFunctor<PoolProcess, T>::operator()( const T* input, const std::vector<int>& input_shape, const std::vector<int>& output_shape, const std::vector<int>& ksize, const std::vector<int>& strides, const std::vector<int>& paddings, PoolProcess pool_compute, bool exclusive, bool adaptive, T* output, hipStream_t stream) { const int batch_size = input_shape[0]; const int input_channels = input_shape[1]; const int input_height = input_shape[2]; const int input_width = input_shape[3]; const int output_channels = output_shape[1]; const int output_height = output_shape[2]; const int output_width = output_shape[3]; const int ksize_height = ksize[0]; const int ksize_width = ksize[1]; const int stride_height = strides[0]; const int stride_width = strides[1]; const int padding_height = paddings[0]; const int padding_width = paddings[1]; int nthreads = batch_size * output_channels * output_height * output_width; int blocks = (nthreads + 1024 - 1) / 1024; dim3 threads(1024, 1); dim3 grid(blocks, 1); hipLaunchKernelGGL(( KernelPool2D<PoolProcess, T>), dim3(grid), dim3(threads), 0, stream, nthreads, input, input_channels, input_height, input_width, output_height, output_width, ksize_height, ksize_width, stride_height, stride_width, padding_height, padding_width, pool_compute, exclusive, adaptive, output); } /* * Tensors are in NCHW or NHWC format. * Ksize, strides are two elements. These two elements represent height * and width, respectively. * Paddings are four elements. These four elements represent height_up, * height_down, width_left and width_right, respectively. */ template <typename PoolProcess, typename T> class Pool2dFunctor<platform::CUDADeviceContext, PoolProcess, T> { public: void operator()(const platform::CUDADeviceContext& context, const framework::Tensor& input, const std::vector<int>& ksize, const std::vector<int>& strides, const std::vector<int>& paddings, PoolProcess pool_process, bool exclusive, bool adaptive, framework::Tensor* output) { const int batch_size = input.dims()[0]; const int input_channels = input.dims()[1]; const int input_height = input.dims()[2]; const int input_width = input.dims()[3]; const int output_channels = output->dims()[1]; const int output_height = output->dims()[2]; const int output_width = output->dims()[3]; const int ksize_height = ksize[0]; const int ksize_width = ksize[1]; const int stride_height = strides[0]; const int stride_width = strides[1]; const int padding_height = paddings[0]; const int padding_width = paddings[1]; const T* input_data = input.data<T>(); T* output_data = output->mutable_data<T>(context.GetPlace()); int nthreads = batch_size * output_channels * output_height * output_width; int blocks = (nthreads + 1024 - 1) / 1024; dim3 threads(1024, 1); dim3 grid(blocks, 1); hipLaunchKernelGGL(( KernelPool2D<PoolProcess, T>), dim3(grid), dim3(threads), 0, context.stream(), nthreads, input_data, input_channels, input_height, input_width, output_height, output_width, ksize_height, ksize_width, stride_height, stride_width, padding_height, padding_width, pool_process, exclusive, adaptive, output_data); } void operator()(const platform::CUDADeviceContext& context, const framework::Tensor& input, const std::vector<int>& ksize, const std::vector<int>& strides, const std::vector<int>& paddings, const std::string data_format, PoolProcess pool_process, bool exclusive, bool adaptive, framework::Tensor* output) { bool channel_last = (data_format == "NHWC"); const int batch_size = input.dims()[0]; const int input_channels = channel_last ? input.dims()[3] : input.dims()[1]; const int input_height = channel_last ? input.dims()[1] : input.dims()[2]; const int input_width = channel_last ? input.dims()[2] : input.dims()[3]; const int output_channels = channel_last ? output->dims()[3] : output->dims()[1]; const int output_height = channel_last ? output->dims()[1] : output->dims()[2]; const int output_width = channel_last ? output->dims()[2] : output->dims()[3]; const int ksize_height = ksize[0]; const int ksize_width = ksize[1]; const int stride_height = strides[0]; const int stride_width = strides[1]; const int padding_height = paddings[0]; const int padding_width = paddings[1]; const T* input_data = input.data<T>(); T* output_data = output->mutable_data<T>(context.GetPlace()); int nthreads = batch_size * output_channels * output_height * output_width; int blocks = (nthreads + 1024 - 1) / 1024; dim3 threads(1024, 1); dim3 grid(blocks, 1); hipLaunchKernelGGL(( KernelPool2D<PoolProcess, T>), dim3(grid), dim3(threads), 0, context.stream(), nthreads, input_data, input_channels, input_height, input_width, output_height, output_width, ksize_height, ksize_width, stride_height, stride_width, padding_height, padding_width, pool_process, exclusive, adaptive, output_data, channel_last); } }; /* * Tensors are in NCHW or NHWC format. * Ksize, strides are two elements. These two elements represent height * and width, respectively. * Paddings are four elements. These four elements represent height_up, * height_down, width_left and width_right, respectively. */ template <typename PoolProcess, typename T> class Pool2dGradFunctor<platform::CUDADeviceContext, PoolProcess, T> { public: void operator()(const platform::CUDADeviceContext& context, const framework::Tensor& input, const framework::Tensor& output, const framework::Tensor& output_grad, const std::vector<int>& ksize, const std::vector<int>& strides, const std::vector<int>& paddings, PoolProcess pool_process, bool exclusive, bool adaptive, framework::Tensor* input_grad) { const int batch_size = input.dims()[0]; const int input_channels = input.dims()[1]; const int input_height = input.dims()[2]; const int input_width = input.dims()[3]; const int output_height = output.dims()[2]; const int output_width = output.dims()[3]; const int ksize_height = ksize[0]; const int ksize_width = ksize[1]; const int stride_height = strides[0]; const int stride_width = strides[1]; const int padding_height = paddings[0]; const int padding_width = paddings[1]; const T* input_data = input.data<T>(); const T* output_data = output.data<T>(); const T* output_grad_data = output_grad.data<T>(); T* input_grad_data = input_grad->mutable_data<T>(context.GetPlace()); int nthreads = batch_size * input_channels * input_height * input_width; int blocks = (nthreads + 1024 - 1) / 1024; dim3 threads(1024, 1); dim3 grid(blocks, 1); hipLaunchKernelGGL(( KernelPool2DGrad<PoolProcess, T>), dim3(grid), dim3(threads), 0, context.stream(), nthreads, input_data, output_data, output_grad_data, input_channels, input_height, input_width, output_height, output_width, ksize_height, ksize_width, stride_height, stride_width, padding_height, padding_width, pool_process, exclusive, adaptive, input_grad_data); } void operator()( const platform::CUDADeviceContext& context, const framework::Tensor& input, const framework::Tensor& output, const framework::Tensor& output_grad, const std::vector<int>& ksize, const std::vector<int>& strides, const std::vector<int>& paddings, const std::string data_format, PoolProcess pool_process, bool exclusive, bool adaptive, framework::Tensor* input_grad) { bool channel_last = (data_format == "NHWC"); const int batch_size = input.dims()[0]; const int input_channels = channel_last ? input.dims()[3] : input.dims()[1]; const int input_height = channel_last ? input.dims()[1] : input.dims()[2]; const int input_width = channel_last ? input.dims()[2] : input.dims()[3]; const int output_channels = channel_last ? output.dims()[3] : output.dims()[1]; const int output_height = channel_last ? output.dims()[1] : output.dims()[2]; const int output_width = channel_last ? output.dims()[2] : output.dims()[3]; const int ksize_height = ksize[0]; const int ksize_width = ksize[1]; const int stride_height = strides[0]; const int stride_width = strides[1]; const int padding_height = paddings[0]; const int padding_width = paddings[1]; const T* input_data = input.data<T>(); const T* output_data = output.data<T>(); const T* output_grad_data = output_grad.data<T>(); T* input_grad_data = input_grad->mutable_data<T>(context.GetPlace()); int nthreads = batch_size * input_channels * input_height * input_width; int blocks = (nthreads + 1024 - 1) / 1024; dim3 threads(1024, 1); dim3 grid(blocks, 1); hipLaunchKernelGGL(( KernelPool2DGrad<PoolProcess, T>), dim3(grid), dim3(threads), 0, context.stream(), nthreads, input_data, output_data, output_grad_data, input_channels, input_height, input_width, output_height, output_width, ksize_height, ksize_width, stride_height, stride_width, padding_height, padding_width, pool_process, exclusive, adaptive, input_grad_data, channel_last); } }; /* * Tensors are in NCHW or NHWC format. * Ksize, strides are two elements. These two elements represent height * and width, respectively. * Paddings are four elements. These four elements represent height_up, * height_down, width_left and width_right, respectively. */ template <typename T> class MaxPool2dGradFunctor<platform::CUDADeviceContext, T> { public: void operator()(const platform::CUDADeviceContext& context, const framework::Tensor& input, const framework::Tensor& output, const framework::Tensor& output_grad, const std::vector<int>& ksize, const std::vector<int>& strides, const std::vector<int>& paddings, framework::Tensor* input_grad) { const int batch_size = input.dims()[0]; const int input_channels = input.dims()[1]; const int input_height = input.dims()[2]; const int input_width = input.dims()[3]; const int output_channels = output.dims()[1]; const int output_height = output.dims()[2]; const int output_width = output.dims()[3]; const int ksize_height = ksize[0]; const int ksize_width = ksize[1]; const int stride_height = strides[0]; const int stride_width = strides[1]; const int padding_height = paddings[0]; const int padding_width = paddings[1]; const T* input_data = input.data<T>(); const T* output_data = output.data<T>(); const T* output_grad_data = output_grad.data<T>(); T* input_grad_data = input_grad->mutable_data<T>(context.GetPlace()); int nthreads = batch_size * output_channels * output_height * output_width; int blocks = (nthreads + 1024 - 1) / 1024; dim3 threads(1024, 1); dim3 grid(blocks, 1); hipLaunchKernelGGL(( KernelMaxPool2DGrad<T>), dim3(grid), dim3(threads), 0, context.stream(), nthreads, input_data, output_data, output_grad_data, input_channels, input_height, input_width, output_height, output_width, ksize_height, ksize_width, stride_height, stride_width, padding_height, padding_width, input_grad_data); } void operator()( const platform::CUDADeviceContext& context, const framework::Tensor& input, const framework::Tensor& output, const framework::Tensor& output_grad, const std::vector<int>& ksize, const std::vector<int>& strides, const std::vector<int>& paddings, const std::string data_format, framework::Tensor* input_grad) { bool channel_last = (data_format == "NHWC"); const int batch_size = input.dims()[0]; const int input_channels = channel_last ? input.dims()[3] : input.dims()[1]; const int input_height = channel_last ? input.dims()[1] : input.dims()[2]; const int input_width = channel_last ? input.dims()[2] : input.dims()[3]; const int output_channels = channel_last ? output.dims()[3] : output.dims()[1]; const int output_height = channel_last ? output.dims()[1] : output.dims()[2]; const int output_width = channel_last ? output.dims()[2] : output.dims()[3]; const int ksize_height = ksize[0]; const int ksize_width = ksize[1]; const int stride_height = strides[0]; const int stride_width = strides[1]; const int padding_height = paddings[0]; const int padding_width = paddings[1]; const T* input_data = input.data<T>(); const T* output_data = output.data<T>(); const T* output_grad_data = output_grad.data<T>(); T* input_grad_data = input_grad->mutable_data<T>(context.GetPlace()); int nthreads = batch_size * output_channels * output_height * output_width; int blocks = (nthreads + 1024 - 1) / 1024; dim3 threads(1024, 1); dim3 grid(blocks, 1); hipLaunchKernelGGL(( KernelMaxPool2DGrad<T>), dim3(grid), dim3(threads), 0, context.stream(), nthreads, input_data, output_data, output_grad_data, input_channels, input_height, input_width, output_height, output_width, ksize_height, ksize_width, stride_height, stride_width, padding_height, padding_width, input_grad_data, channel_last); } }; template class Pool2dDirectCUDAFunctor<paddle::operators::math::MaxPool<float>, float>; template class Pool2dDirectCUDAFunctor<paddle::operators::math::AvgPool<float>, float>; template class MaxPool2dGradFunctor<platform::CUDADeviceContext, float>; template class MaxPool2dGradFunctor<platform::CUDADeviceContext, double>; template class MaxPool2dGradFunctor<platform::CUDADeviceContext, paddle::platform::float16>; template class Pool2dFunctor<platform::CUDADeviceContext, paddle::operators::math::MaxPool<float>, float>; template class Pool2dFunctor<platform::CUDADeviceContext, paddle::operators::math::AvgPool<float>, float>; template class Pool2dGradFunctor<platform::CUDADeviceContext, paddle::operators::math::MaxPoolGrad<float>, float>; template class Pool2dGradFunctor<platform::CUDADeviceContext, paddle::operators::math::AvgPoolGrad<float>, float>; template class Pool2dFunctor<platform::CUDADeviceContext, paddle::operators::math::MaxPool<double>, double>; template class Pool2dFunctor<platform::CUDADeviceContext, paddle::operators::math::AvgPool<double>, double>; template class Pool2dGradFunctor<platform::CUDADeviceContext, paddle::operators::math::MaxPoolGrad<double>, double>; template class Pool2dGradFunctor<platform::CUDADeviceContext, paddle::operators::math::AvgPoolGrad<double>, double>; template class Pool2dFunctor< platform::CUDADeviceContext, paddle::operators::math::MaxPool<paddle::platform::float16>, paddle::platform::float16>; template class Pool2dFunctor< platform::CUDADeviceContext, paddle::operators::math::AvgPool<paddle::platform::float16>, paddle::platform::float16>; template class Pool2dGradFunctor< platform::CUDADeviceContext, paddle::operators::math::MaxPoolGrad<paddle::platform::float16>, paddle::platform::float16>; template class Pool2dGradFunctor< platform::CUDADeviceContext, paddle::operators::math::AvgPoolGrad<paddle::platform::float16>, paddle::platform::float16>; template <typename PoolProcess, typename T> __global__ void KernelPool3D( const int nthreads, const T* input_data, const int channels, const int input_depth, const int input_height, const int input_width, const int output_depth, const int output_height, const int output_width, const int ksize_depth, const int ksize_height, const int ksize_width, const int stride_depth, const int stride_height, const int stride_width, const int padding_depth, const int padding_height, const int padding_width, PoolProcess pool_process, bool exclusive, bool adaptive, T* output_data, bool channel_last = false) { for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads; index += blockDim.x * gridDim.x) { int pw, ph, pd, c, batch_idx; if (!channel_last) { pw = index % output_width; ph = (index / output_width) % output_height; pd = (index / output_width / output_height) % output_depth; c = (index / output_width / output_height / output_depth) % channels; batch_idx = index / output_width / output_height / output_depth / channels; } else { c = index % channels; pw = (index / channels) % output_width; ph = (index / channels / output_width) % output_height; pd = (index / channels / output_width / output_height) % output_depth; batch_idx = index / channels / output_width / output_height / output_depth; } int dstart, dend; int hstart, hend; int wstart, wend; if (adaptive) { dstart = AdaptStartIndex(pd, input_depth, output_depth); dend = AdaptEndIndex(pd, input_depth, output_depth); hstart = AdaptStartIndex(ph, input_height, output_height); hend = AdaptEndIndex(ph, input_height, output_height); wstart = AdaptStartIndex(pw, input_width, output_width); wend = AdaptEndIndex(pw, input_width, output_width); } else { dstart = pd * stride_depth - padding_depth; hstart = ph * stride_height - padding_height; wstart = pw * stride_width - padding_width; dend = min(dstart + ksize_depth, input_depth); hend = min(hstart + ksize_height, input_height); wend = min(wstart + ksize_width, input_width); dstart = max(dstart, 0); hstart = max(hstart, 0); wstart = max(wstart, 0); } int input_data_stride; if (!channel_last) { /* NCDHW */ input_data_stride = (batch_idx * channels + c) * input_depth * input_height * input_width; } else { /* NDHWC */ input_data_stride = batch_idx * input_depth * input_height * input_width * channels; } input_data += input_data_stride; T ele = pool_process.initial(); for (int d = dstart; d < dend; ++d) { for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { auto input_data_idx = channel_last ? ((d * input_height + h) * input_width + w) * channels + c : (d * input_height + h) * input_width + w; pool_process.compute(input_data[input_data_idx], &ele); } } } int pool_size = (exclusive || adaptive) ? (dend - dstart) * (hend - hstart) * (wend - wstart) : ksize_depth * ksize_height * ksize_width; pool_process.finalize(static_cast<T>(pool_size), &ele); output_data[index] = ele; } } template <typename PoolProcess, typename T> __global__ void KernelPool3DGrad( const int nthreads, const T* input_data, const T* output_data, const T* output_grad, const int channels, const int input_depth, const int input_height, const int input_width, const int output_depth, const int output_height, const int output_width, const int ksize_depth, const int ksize_height, const int ksize_width, const int stride_depth, const int stride_height, const int stride_width, const int padding_depth, const int padding_height, const int padding_width, PoolProcess pool_process, bool exclusive, bool adaptive, T* input_grad, bool channel_last = false) { for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads; index += blockDim.x * gridDim.x) { int w_offset, h_offset, d_offset, offsetC, batch_idx; if (!channel_last) { /* "NCDHW" */ w_offset = index % input_width + padding_width; h_offset = (index / input_width) % input_height + padding_height; d_offset = (index / input_width / input_height) % input_depth + padding_depth; offsetC = (index / input_width / input_height / input_depth) % channels; batch_idx = index / input_width / input_height / input_depth / channels; } else { /* "NDHWC" */ offsetC = index % channels; w_offset = (index / channels) % input_width + padding_width; h_offset = (index / channels / input_width) % input_height + padding_height; d_offset = (index / channels / input_width / input_height) % input_depth + padding_depth; batch_idx = index / channels / input_width / input_height / input_depth; } int pdstart, pdend; int phstart, phend; int pwstart, pwend; if (adaptive) { pdstart = AdaptStartIndex(d_offset, output_depth, input_depth); pdend = AdaptEndIndex(d_offset, output_depth, input_depth); phstart = AdaptStartIndex(h_offset, output_height, input_height); phend = AdaptEndIndex(h_offset, output_height, input_height); pwstart = AdaptStartIndex(w_offset, output_width, input_width); pwend = AdaptEndIndex(w_offset, output_width, input_width); } else { pdstart = (d_offset < ksize_depth) ? 0 : (d_offset - ksize_depth) / stride_depth + 1; phstart = (h_offset < ksize_height) ? 0 : (h_offset - ksize_height) / stride_height + 1; pwstart = (w_offset < ksize_width) ? 0 : (w_offset - ksize_width) / stride_width + 1; pdend = min((d_offset) / stride_depth + 1, output_depth); phend = min((h_offset) / stride_height + 1, output_height); pwend = min((w_offset) / stride_width + 1, output_width); } T gradient = static_cast<T>(0.0); T input = input_data[index]; int output_stride; if (!channel_last) { output_stride = (batch_idx * channels + offsetC) * output_depth * output_height * output_width; } else { output_stride = batch_idx * output_depth * output_height * output_width * channels; } output_data += output_stride; output_grad += output_stride; for (int pd = pdstart; pd < pdend; ++pd) { for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { // figure out the pooling size int pool_size; if (adaptive) { pool_size = static_cast<int>( ceil(static_cast<double>(input_depth) / ksize_depth)) * static_cast<int>( ceil(static_cast<double>(input_height) / ksize_height)) * static_cast<int>( ceil(static_cast<double>(input_width) / ksize_width)); } else { int dstart = pd * stride_depth - padding_depth; int hstart = ph * stride_height - padding_height; int wstart = pw * stride_width - padding_width; int dend = min(dstart + ksize_depth, input_depth); int hend = min(hstart + ksize_height, input_height); int wend = min(wstart + ksize_width, input_width); dstart = max(dstart, 0); hstart = max(hstart, 0); wstart = max(wstart, 0); pool_size = exclusive ? (dend - dstart) * (hend - hstart) * (wend - wstart) : ksize_depth * ksize_height * ksize_width; } int output_sub_idx = channel_last ? ((pd * output_height + ph) * output_width + pw) * channels + offsetC : (pd * output_height + ph) * output_width + pw; pool_process.compute(input, output_data[output_sub_idx], output_grad[output_sub_idx], static_cast<T>(1.0 / pool_size), &gradient); } } } input_grad[index] = gradient; } } template <typename T> __global__ void KernelMaxPool3DGrad( const int nthreads, const T* input_data, const T* output_data, const T* output_grad, const int channels, const int input_depth, const int input_height, const int input_width, const int output_depth, const int output_height, const int output_width, const int ksize_depth, const int ksize_height, const int ksize_width, const int stride_depth, const int stride_height, const int stride_width, const int padding_depth, const int padding_height, const int padding_width, T* input_grad, bool channel_last = false) { for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads; index += blockDim.x * gridDim.x) { int pw, ph, pd, c, batch_idx; if (!channel_last) { /*NCDHW*/ pw = index % output_width; ph = (index / output_width) % output_height; pd = (index / output_width / output_height) % output_depth; c = (index / output_width / output_height / output_depth) % channels; batch_idx = index / output_width / output_height / output_depth / channels; } else { /*NDHWC*/ c = index % channels; pw = (index / channels) % output_width; ph = (index / channels / output_width) % output_height; pd = (index / channels / output_width / output_height) % output_depth; batch_idx = index / channels / output_width / output_height / output_depth; } int dstart = pd * stride_depth - padding_depth; int hstart = ph * stride_height - padding_height; int wstart = pw * stride_width - padding_width; int dend = min(dstart + ksize_depth, input_depth); int hend = min(hstart + ksize_height, input_height); int wend = min(wstart + ksize_width, input_width); dstart = max(dstart, 0); hstart = max(hstart, 0); wstart = max(wstart, 0); T ele = output_data[index]; bool stop = false; int maxIdx = -1; int input_stride; if (!channel_last) { input_stride = (batch_idx * channels + c) * input_depth * input_height * input_width; } else { input_stride = batch_idx * input_depth * input_height * input_width * channels; } input_data += input_stride; input_grad += input_stride; for (int d = dstart; d < dend && !stop; ++d) { for (int h = hstart; h < hend && !stop; ++h) { for (int w = wstart; w < wend && !stop; ++w) { int input_data_idx = channel_last ? ((d * input_height + h) * input_width + w) * channels + c : (d * input_height + h) * input_width + w; if (ele == input_data[input_data_idx]) { stop = true; maxIdx = input_data_idx; } } } } if (maxIdx != -1) { // atomic add platform::CudaAtomicAdd(input_grad + maxIdx, output_grad[index]); } } } /* * Tensors are in NCDHW or NDHWC format. * Ksize, strides, paddings are three elements. These three elements represent * depth, height and width, respectively. * Paddings are six elements. These six elements represent depth_forth, * depth_back, * height_up, height_down, width_left and width_right, respectively. */ template <typename PoolProcess, class T> class Pool3dFunctor<platform::CUDADeviceContext, PoolProcess, T> { public: void operator()(const platform::CUDADeviceContext& context, const framework::Tensor& input, const std::vector<int>& ksize, const std::vector<int>& strides, const std::vector<int>& paddings, PoolProcess pool_process, bool exclusive, bool adaptive, framework::Tensor* output) { const int batch_size = input.dims()[0]; const int input_channels = input.dims()[1]; const int input_depth = input.dims()[2]; const int input_height = input.dims()[3]; const int input_width = input.dims()[4]; const int output_channels = output->dims()[1]; const int output_depth = output->dims()[2]; const int output_height = output->dims()[3]; const int output_width = output->dims()[4]; const int ksize_depth = ksize[0]; const int ksize_height = ksize[1]; const int ksize_width = ksize[2]; const int stride_depth = strides[0]; const int stride_height = strides[1]; const int stride_width = strides[2]; const int padding_depth = paddings[0]; const int padding_height = paddings[1]; const int padding_width = paddings[2]; const T* input_data = input.data<T>(); T* output_data = output->mutable_data<T>(context.GetPlace()); int nthreads = batch_size * output_channels * output_depth * output_height * output_width; int blocks = (nthreads + 1024 - 1) / 1024; dim3 threads(1024, 1); dim3 grid(blocks, 1); hipLaunchKernelGGL(( KernelPool3D<PoolProcess, T>), dim3(grid), dim3(threads), 0, context.stream(), nthreads, input_data, input_channels, input_depth, input_height, input_width, output_depth, output_height, output_width, ksize_depth, ksize_height, ksize_width, stride_depth, stride_height, stride_width, padding_depth, padding_height, padding_width, pool_process, exclusive, adaptive, output_data); } void operator()(const platform::CUDADeviceContext& context, const framework::Tensor& input, const std::vector<int>& ksize, const std::vector<int>& strides, const std::vector<int>& paddings, const std::string data_format, PoolProcess pool_process, bool exclusive, bool adaptive, framework::Tensor* output) { bool channel_last = (data_format == "NDHWC"); const int batch_size = input.dims()[0]; const int input_channels = channel_last ? input.dims()[4] : input.dims()[1]; const int input_depth = channel_last ? input.dims()[1] : input.dims()[2]; const int input_height = channel_last ? input.dims()[2] : input.dims()[3]; const int input_width = channel_last ? input.dims()[3] : input.dims()[4]; const int output_channels = channel_last ? output->dims()[4] : output->dims()[1]; const int output_depth = channel_last ? output->dims()[1] : output->dims()[2]; const int output_height = channel_last ? output->dims()[2] : output->dims()[3]; const int output_width = channel_last ? output->dims()[3] : output->dims()[4]; const int ksize_depth = ksize[0]; const int ksize_height = ksize[1]; const int ksize_width = ksize[2]; const int stride_depth = strides[0]; const int stride_height = strides[1]; const int stride_width = strides[2]; const int padding_depth = paddings[0]; const int padding_height = paddings[1]; const int padding_width = paddings[2]; const T* input_data = input.data<T>(); T* output_data = output->mutable_data<T>(context.GetPlace()); int nthreads = batch_size * output_channels * output_depth * output_height * output_width; int blocks = (nthreads + 1024 - 1) / 1024; dim3 threads(1024, 1); dim3 grid(blocks, 1); hipLaunchKernelGGL(( KernelPool3D<PoolProcess, T>), dim3(grid), dim3(threads), 0, context.stream(), nthreads, input_data, input_channels, input_depth, input_height, input_width, output_depth, output_height, output_width, ksize_depth, ksize_height, ksize_width, stride_depth, stride_height, stride_width, padding_depth, padding_height, padding_width, pool_process, exclusive, adaptive, output_data, channel_last); } }; /* * Tensors are in NCDHW or NDHWC format. * Ksize, strides, paddings are three elements. These three elements represent * depth, height and width, respectively. * Paddings are six elements. These six elements represent depth_forth, * depth_back, * height_up, height_down, width_left and width_right, respectively. */ template <typename PoolProcess, class T> class Pool3dGradFunctor<platform::CUDADeviceContext, PoolProcess, T> { public: void operator()(const platform::CUDADeviceContext& context, const framework::Tensor& input, const framework::Tensor& output, const framework::Tensor& output_grad, const std::vector<int>& ksize, const std::vector<int>& strides, const std::vector<int>& paddings, PoolProcess pool_process, bool exclusive, bool adaptive, framework::Tensor* input_grad) { const int batch_size = input.dims()[0]; const int input_channels = input.dims()[1]; const int input_depth = input.dims()[2]; const int input_height = input.dims()[3]; const int input_width = input.dims()[4]; const int output_channels = output.dims()[1]; const int output_depth = output.dims()[2]; const int output_height = output.dims()[3]; const int output_width = output.dims()[4]; const int ksize_depth = ksize[0]; const int ksize_height = ksize[1]; const int ksize_width = ksize[2]; const int stride_depth = strides[0]; const int stride_height = strides[1]; const int stride_width = strides[2]; const int padding_depth = paddings[0]; const int padding_height = paddings[1]; const int padding_width = paddings[2]; const T* input_data = input.data<T>(); const T* output_data = output.data<T>(); const T* output_grad_data = output_grad.data<T>(); T* input_grad_data = input_grad->mutable_data<T>(context.GetPlace()); int nthreads = batch_size * input_channels * input_depth * input_height * input_width; int blocks = (nthreads + 1024 - 1) / 1024; dim3 threads(1024, 1); dim3 grid(blocks, 1); hipLaunchKernelGGL(( KernelPool3DGrad<PoolProcess, T>), dim3(grid), dim3(threads), 0, context.stream(), nthreads, input_data, output_data, output_grad_data, input_channels, input_depth, input_height, input_width, output_depth, output_height, output_width, ksize_depth, ksize_height, ksize_width, stride_depth, stride_height, stride_width, padding_depth, padding_height, padding_width, pool_process, exclusive, adaptive, input_grad_data); } void operator()( const platform::CUDADeviceContext& context, const framework::Tensor& input, const framework::Tensor& output, const framework::Tensor& output_grad, const std::vector<int>& ksize, const std::vector<int>& strides, const std::vector<int>& paddings, const std::string data_format, PoolProcess pool_process, bool exclusive, bool adaptive, framework::Tensor* input_grad) { bool channel_last = (data_format == "NDHWC"); const int batch_size = input.dims()[0]; const int input_channels = channel_last ? input.dims()[4] : input.dims()[1]; const int input_depth = channel_last ? input.dims()[1] : input.dims()[2]; const int input_height = channel_last ? input.dims()[2] : input.dims()[3]; const int input_width = channel_last ? input.dims()[3] : input.dims()[4]; const int output_channels = channel_last ? output.dims()[4] : output.dims()[1]; const int output_depth = channel_last ? output.dims()[1] : output.dims()[2]; const int output_height = channel_last ? output.dims()[2] : output.dims()[3]; const int output_width = channel_last ? output.dims()[3] : output.dims()[4]; const int ksize_depth = ksize[0]; const int ksize_height = ksize[1]; const int ksize_width = ksize[2]; const int stride_depth = strides[0]; const int stride_height = strides[1]; const int stride_width = strides[2]; const int padding_depth = paddings[0]; const int padding_height = paddings[1]; const int padding_width = paddings[2]; const T* input_data = input.data<T>(); const T* output_data = output.data<T>(); const T* output_grad_data = output_grad.data<T>(); T* input_grad_data = input_grad->mutable_data<T>(context.GetPlace()); int nthreads = batch_size * input_channels * input_depth * input_height * input_width; int blocks = (nthreads + 1024 - 1) / 1024; dim3 threads(1024, 1); dim3 grid(blocks, 1); hipLaunchKernelGGL(( KernelPool3DGrad<PoolProcess, T>), dim3(grid), dim3(threads), 0, context.stream(), nthreads, input_data, output_data, output_grad_data, input_channels, input_depth, input_height, input_width, output_depth, output_height, output_width, ksize_depth, ksize_height, ksize_width, stride_depth, stride_height, stride_width, padding_depth, padding_height, padding_width, pool_process, exclusive, adaptive, input_grad_data, channel_last); // add channel_last } }; /* * tensors are in NCDHW or NDHWC format. * Ksize, strides, paddings are three elements. These three elements represent * depth, height and width, respectively. * Paddings are six elements. These six elements represent depth_forth, * depth_back, * height_up, height_down, width_left and width_right, respectively. */ template <class T> class MaxPool3dGradFunctor<platform::CUDADeviceContext, T> { public: void operator()(const platform::CUDADeviceContext& context, const framework::Tensor& input, const framework::Tensor& output, const framework::Tensor& output_grad, const std::vector<int>& ksize, const std::vector<int>& strides, const std::vector<int>& paddings, framework::Tensor* input_grad) { const int batch_size = input.dims()[0]; const int input_channels = input.dims()[1]; const int input_depth = input.dims()[2]; const int input_height = input.dims()[3]; const int input_width = input.dims()[4]; const int output_channels = output.dims()[1]; const int output_depth = output.dims()[2]; const int output_height = output.dims()[3]; const int output_width = output.dims()[4]; const int ksize_depth = ksize[0]; const int ksize_height = ksize[1]; const int ksize_width = ksize[2]; const int stride_depth = strides[0]; const int stride_height = strides[1]; const int stride_width = strides[2]; const int padding_depth = paddings[0]; const int padding_height = paddings[1]; const int padding_width = paddings[2]; const T* input_data = input.data<T>(); const T* output_data = output.data<T>(); const T* output_grad_data = output_grad.data<T>(); T* input_grad_data = input_grad->mutable_data<T>(context.GetPlace()); int nthreads = batch_size * output_channels * output_depth * output_height * output_width; int blocks = (nthreads + 1024 - 1) / 1024; dim3 threads(1024, 1); dim3 grid(blocks, 1); hipLaunchKernelGGL(( KernelMaxPool3DGrad<T>), dim3(grid), dim3(threads), 0, context.stream(), nthreads, input_data, output_data, output_grad_data, input_channels, input_depth, input_height, input_width, output_depth, output_height, output_width, ksize_depth, ksize_height, ksize_width, stride_depth, stride_height, stride_width, padding_depth, padding_height, padding_width, input_grad_data); } void operator()( const platform::CUDADeviceContext& context, const framework::Tensor& input, const framework::Tensor& output, const framework::Tensor& output_grad, const std::vector<int>& ksize, const std::vector<int>& strides, const std::vector<int>& paddings, const std::string data_format, framework::Tensor* input_grad) { bool channel_last = (data_format == "NDHWC"); const int batch_size = input.dims()[0]; const int input_channels = channel_last ? input.dims()[4] : input.dims()[1]; const int input_depth = channel_last ? input.dims()[1] : input.dims()[2]; const int input_height = channel_last ? input.dims()[2] : input.dims()[3]; const int input_width = channel_last ? input.dims()[3] : input.dims()[4]; const int output_channels = channel_last ? output.dims()[4] : output.dims()[1]; const int output_depth = channel_last ? output.dims()[1] : output.dims()[2]; const int output_height = channel_last ? output.dims()[2] : output.dims()[3]; const int output_width = channel_last ? output.dims()[3] : output.dims()[4]; const int ksize_depth = ksize[0]; const int ksize_height = ksize[1]; const int ksize_width = ksize[2]; const int stride_depth = strides[0]; const int stride_height = strides[1]; const int stride_width = strides[2]; const int padding_depth = paddings[0]; const int padding_height = paddings[1]; const int padding_width = paddings[2]; const T* input_data = input.data<T>(); const T* output_data = output.data<T>(); const T* output_grad_data = output_grad.data<T>(); T* input_grad_data = input_grad->mutable_data<T>(context.GetPlace()); int nthreads = batch_size * output_channels * output_depth * output_height * output_width; int blocks = (nthreads + 1024 - 1) / 1024; dim3 threads(1024, 1); dim3 grid(blocks, 1); hipLaunchKernelGGL(( KernelMaxPool3DGrad<T>), dim3(grid), dim3(threads), 0, context.stream(), nthreads, input_data, output_data, output_grad_data, input_channels, input_depth, input_height, input_width, output_depth, output_height, output_width, ksize_depth, ksize_height, ksize_width, stride_depth, stride_height, stride_width, padding_depth, padding_height, padding_width, input_grad_data, channel_last); // add channel_last } }; template class MaxPool3dGradFunctor<platform::CUDADeviceContext, float>; template class MaxPool3dGradFunctor<platform::CUDADeviceContext, double>; template class MaxPool3dGradFunctor<platform::CUDADeviceContext, paddle::platform::float16>; template class Pool3dFunctor<platform::CUDADeviceContext, paddle::operators::math::MaxPool<float>, float>; template class Pool3dFunctor<platform::CUDADeviceContext, paddle::operators::math::AvgPool<float>, float>; template class Pool3dGradFunctor<platform::CUDADeviceContext, paddle::operators::math::MaxPoolGrad<float>, float>; template class Pool3dGradFunctor<platform::CUDADeviceContext, paddle::operators::math::AvgPoolGrad<float>, float>; template class Pool3dFunctor<platform::CUDADeviceContext, paddle::operators::math::MaxPool<double>, double>; template class Pool3dFunctor<platform::CUDADeviceContext, paddle::operators::math::AvgPool<double>, double>; template class Pool3dGradFunctor<platform::CUDADeviceContext, paddle::operators::math::MaxPoolGrad<double>, double>; template class Pool3dGradFunctor<platform::CUDADeviceContext, paddle::operators::math::AvgPoolGrad<double>, double>; template class Pool3dFunctor< platform::CUDADeviceContext, paddle::operators::math::MaxPool<paddle::platform::float16>, paddle::platform::float16>; template class Pool3dFunctor< platform::CUDADeviceContext, paddle::operators::math::AvgPool<paddle::platform::float16>, paddle::platform::float16>; template class Pool3dGradFunctor< platform::CUDADeviceContext, paddle::operators::math::MaxPoolGrad<paddle::platform::float16>, paddle::platform::float16>; template class Pool3dGradFunctor< platform::CUDADeviceContext, paddle::operators::math::AvgPoolGrad<paddle::platform::float16>, paddle::platform::float16>; template <typename T1, typename T2> __global__ void KernelMaxPool2dWithIdx( const int nthreads, const T1* input_data, const int channels, const int input_height, const int input_width, const int output_height, const int output_width, const int ksize_height, const int ksize_width, const int stride_height, const int stride_width, const int padding_height, const int padding_width, bool adaptive, T1* output_data, T2* mask_data) { for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads; index += blockDim.x * gridDim.x) { int pw = index % output_width; int ph = (index / output_width) % output_height; int c = (index / output_width / output_height) % channels; int batch_idx = index / output_width / output_height / channels; int hstart, hend; int wstart, wend; if (adaptive) { hstart = AdaptStartIndex(ph, input_height, output_height); hend = AdaptEndIndex(ph, input_height, output_height); wstart = AdaptStartIndex(pw, input_width, output_width); wend = AdaptEndIndex(pw, input_width, output_width); } else { hstart = ph * stride_height - padding_height; hend = min(hstart + ksize_height, input_height); hstart = max(hstart, 0); wstart = pw * stride_width - padding_width; wend = min(wstart + ksize_width, input_width); wstart = max(wstart, 0); } input_data += (batch_idx * channels + c) * input_height * input_width; T1 ele = -FLT_MAX; int max_index = -1; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { int input_index = h * input_width + w; if (ele < input_data[input_index]) { max_index = input_index; ele = input_data[input_index]; } } } output_data[index] = ele; mask_data[index] = max_index; } } template <typename T1, typename T2> __global__ void KernelMaxPool2DWithIdxGrad( const int nthreads, const T1* output_grad, const T2* mask_data, const int channels, const int input_height, const int input_width, const int output_height, const int output_width, const int ksize_height, const int ksize_width, const int stride_height, const int stride_width, const int padding_height, const int padding_width, bool adaptive, T1* input_grad) { for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads; index += blockDim.x * gridDim.x) { int w_offset = index % input_width; int h_offset = (index / input_width) % input_height; int offsetC = (index / input_width / input_height) % channels; int batch_idx = index / input_width / input_height / channels; int phstart, phend; int pwstart, pwend; if (adaptive) { phstart = h_offset * output_height / input_height; phend = min((h_offset + 1) * output_height / input_height + 1, output_height); pwstart = w_offset * output_width / input_width; pwend = min((w_offset + 1) * output_width / input_width + 1, output_width); } else { phstart = (h_offset + padding_height < ksize_height) ? 0 : (h_offset + padding_height - ksize_height) / stride_height + 1; pwstart = (w_offset + padding_width < ksize_width) ? 0 : (w_offset + padding_width - ksize_width) / stride_width + 1; phend = min((h_offset + padding_height) / stride_height + 1, output_height); pwend = min((w_offset + padding_width) / stride_width + 1, output_width); } T1 gradient = 0; int input_current_featuremap_idx = h_offset * input_width + w_offset; int output_idx = (batch_idx * channels + offsetC) * output_height * output_width; mask_data += output_idx; output_grad += output_idx; for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { if (mask_data[ph * output_width + pw] == input_current_featuremap_idx) gradient += output_grad[ph * output_width + pw]; } } input_grad[index] = gradient; } } /* * All tensors are in NCHW format. * Ksize, strides, paddings are two elements. These two elements represent * height and width, respectively. */ template <typename T1, typename T2> class MaxPool2dWithIndexFunctor<platform::CUDADeviceContext, T1, T2> { public: void operator()(const platform::CUDADeviceContext& context, const framework::Tensor& input, const std::vector<int>& ksize, const std::vector<int>& strides, const std::vector<int>& paddings, bool adaptive, framework::Tensor* output, framework::Tensor* mask) { const int batch_size = input.dims()[0]; const int input_channels = input.dims()[1]; const int input_height = input.dims()[2]; const int input_width = input.dims()[3]; const int output_channels = output->dims()[1]; const int output_height = output->dims()[2]; const int output_width = output->dims()[3]; const int ksize_height = ksize[0]; const int ksize_width = ksize[1]; const int stride_height = strides[0]; const int stride_width = strides[1]; const int padding_height = paddings[0]; const int padding_width = paddings[1]; const T1* input_data = input.data<T1>(); T1* output_data = output->mutable_data<T1>(context.GetPlace()); T2* mask_data = mask->mutable_data<T2>(context.GetPlace()); int nthreads = batch_size * output_channels * output_height * output_width; int blocks = (nthreads + 1024 - 1) / 1024; dim3 threads(1024, 1); dim3 grid(blocks, 1); hipLaunchKernelGGL(( KernelMaxPool2dWithIdx<T1, T2>), dim3(grid), dim3(threads), 0, context.stream(), nthreads, input_data, input_channels, input_height, input_width, output_height, output_width, ksize_height, ksize_width, stride_height, stride_width, padding_height, padding_width, adaptive, output_data, mask_data); } }; /* * All tensors are in NCHW format. * Ksize, strides, paddings are two elements. These two elements represent * height and width, respectively. */ template <typename T1, typename T2> class MaxPool2dWithIndexGradFunctor<platform::CUDADeviceContext, T1, T2> { public: void operator()(const platform::CUDADeviceContext& context, const framework::Tensor& output_grad, const framework::Tensor& mask, const std::vector<int>& ksize, const std::vector<int>& strides, const std::vector<int>& paddings, bool adaptive, framework::Tensor* input_grad) { const int batch_size = input_grad->dims()[0]; const int input_channels = input_grad->dims()[1]; const int input_height = input_grad->dims()[2]; const int input_width = input_grad->dims()[3]; const int output_height = output_grad.dims()[2]; const int output_width = output_grad.dims()[3]; const int ksize_height = ksize[0]; const int ksize_width = ksize[1]; const int stride_height = strides[0]; const int stride_width = strides[1]; const int padding_height = paddings[0]; const int padding_width = paddings[1]; const T2* mask_data = mask.data<T2>(); const T1* output_grad_data = output_grad.data<T1>(); T1* input_grad_data = input_grad->mutable_data<T1>(context.GetPlace()); int nthreads = batch_size * input_channels * input_height * input_width; int blocks = (nthreads + 1024 - 1) / 1024; dim3 threads(1024, 1); dim3 grid(blocks, 1); hipLaunchKernelGGL(( KernelMaxPool2DWithIdxGrad<T1, T2>), dim3(grid), dim3(threads), 0, context.stream(), nthreads, output_grad_data, mask_data, input_channels, input_height, input_width, output_height, output_width, ksize_height, ksize_width, stride_height, stride_width, padding_height, padding_width, adaptive, input_grad_data); } }; template class MaxPool2dWithIndexFunctor<platform::CUDADeviceContext, float, int>; template class MaxPool2dWithIndexGradFunctor<platform::CUDADeviceContext, float, int>; template class MaxPool2dWithIndexFunctor<platform::CUDADeviceContext, double, int>; template class MaxPool2dWithIndexGradFunctor<platform::CUDADeviceContext, double, int>; template <typename T1, typename T2> __global__ void KernelMaxPool3DWithIdx( const int nthreads, const T1* input_data, const int channels, const int input_depth, const int input_height, const int input_width, const int output_depth, const int output_height, const int output_width, const int ksize_depth, const int ksize_height, const int ksize_width, const int stride_depth, const int stride_height, const int stride_width, const int padding_depth, const int padding_height, const int padding_width, bool adaptive, T1* output_data, T2* mask_data) { for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads; index += blockDim.x * gridDim.x) { int pw = index % output_width; int ph = (index / output_width) % output_height; int pd = (index / output_width / output_height) % output_depth; int c = (index / output_width / output_height / output_depth) % channels; int batch_idx = index / output_width / output_height / output_depth / channels; int dstart, dend; int hstart, hend; int wstart, wend; if (adaptive) { dstart = AdaptStartIndex(pd, input_depth, output_depth); dend = AdaptEndIndex(pd, input_depth, output_depth); hstart = AdaptStartIndex(ph, input_height, output_height); hend = AdaptEndIndex(ph, input_height, output_height); wstart = AdaptStartIndex(pw, input_width, output_width); wend = AdaptEndIndex(pw, input_width, output_width); } else { dstart = pd * stride_depth - padding_depth; hstart = ph * stride_height - padding_height; wstart = pw * stride_width - padding_width; dend = min(dstart + ksize_depth, input_depth); hend = min(hstart + ksize_height, input_height); wend = min(wstart + ksize_width, input_width); dstart = max(dstart, 0); hstart = max(hstart, 0); wstart = max(wstart, 0); } T1 ele = -FLT_MAX; int max_index = -1; input_data += (batch_idx * channels + c) * input_depth * input_height * input_width; for (int d = dstart; d < dend; ++d) { for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { if (ele < input_data[(d * input_height + h) * input_width + w]) { max_index = (d * input_height + h) * input_width + w; ele = input_data[max_index]; } } } } output_data[index] = ele; mask_data[index] = max_index; } } template <typename T1, typename T2> __global__ void KernelMaxPool3DWithIdxGrad( const int nthreads, const T1* output_grad, const T2* mask, const int channels, const int input_depth, const int input_height, const int input_width, const int output_depth, const int output_height, const int output_width, const int ksize_depth, const int ksize_height, const int ksize_width, const int stride_depth, const int stride_height, const int stride_width, const int padding_depth, const int padding_height, const int padding_width, bool adaptive, T1* input_grad) { for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads; index += blockDim.x * gridDim.x) { int w_offset = index % input_width; int h_offset = (index / input_width) % input_height; int d_offset = (index / input_width / input_height) % input_depth; int offsetC = (index / input_width / input_height / input_depth) % channels; int batch_idx = index / input_width / input_height / input_depth / channels; int pdstart, pdend; int phstart, phend; int pwstart, pwend; if (adaptive) { pdstart = d_offset * output_depth / input_depth; pdend = min((d_offset + 1) * output_depth / input_depth + 1, output_depth); phstart = h_offset * output_height / input_height; phend = min((h_offset + 1) * output_height / input_height + 1, output_height); pwstart = w_offset * output_width / input_width; pwend = min((w_offset + 1) * output_width / input_width + 1, output_width); } else { pdstart = (d_offset + padding_depth < ksize_depth) ? 0 : (d_offset + padding_depth - ksize_depth) / stride_depth + 1; phstart = (h_offset + padding_height < ksize_height) ? 0 : (h_offset + padding_height - ksize_height) / stride_height + 1; pwstart = (w_offset + padding_width < ksize_width) ? 0 : (w_offset + padding_width - ksize_width) / stride_width + 1; pdend = min((d_offset + padding_depth) / stride_depth + 1, output_depth); phend = min((h_offset + padding_height) / stride_height + 1, output_height); pwend = min((w_offset + padding_width) / stride_width + 1, output_width); } T1 gradient = 0; int input_current_feature_map_idx = (d_offset * input_height + h_offset) * input_width + w_offset; int output_idx = (batch_idx * channels + offsetC) * output_depth * output_height * output_width; mask += output_idx; output_grad += output_idx; for (int pd = pdstart; pd < pdend; ++pd) { for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { if (mask[(pd * output_height + ph) * output_width + pw] == input_current_feature_map_idx) gradient += output_grad[(pd * output_height + ph) * output_width + pw]; } } } input_grad[index] = gradient; } } /* * All tensors are in NCDHW format. * Ksize, strides, paddings are three elements. These three elements represent * depth, height and width, respectively. */ template <typename T1, typename T2> class MaxPool3dWithIndexFunctor<platform::CUDADeviceContext, T1, T2> { public: void operator()(const platform::CUDADeviceContext& context, const framework::Tensor& input, const std::vector<int>& ksize, const std::vector<int>& strides, const std::vector<int>& paddings, bool adaptive, framework::Tensor* output, framework::Tensor* mask) { const int batch_size = input.dims()[0]; const int input_channels = input.dims()[1]; const int input_depth = input.dims()[2]; const int input_height = input.dims()[3]; const int input_width = input.dims()[4]; const int output_channels = output->dims()[1]; const int output_depth = output->dims()[2]; const int output_height = output->dims()[3]; const int output_width = output->dims()[4]; const int ksize_depth = ksize[0]; const int ksize_height = ksize[1]; const int ksize_width = ksize[2]; const int stride_depth = strides[0]; const int stride_height = strides[1]; const int stride_width = strides[2]; const int padding_depth = paddings[0]; const int padding_height = paddings[1]; const int padding_width = paddings[2]; const T1* input_data = input.data<T1>(); T1* output_data = output->mutable_data<T1>(context.GetPlace()); T2* mask_data = mask->mutable_data<T2>(context.GetPlace()); int nthreads = batch_size * output_channels * output_depth * output_height * output_width; int blocks = (nthreads + 1024 - 1) / 1024; dim3 threads(1024, 1); dim3 grid(blocks, 1); hipLaunchKernelGGL(( KernelMaxPool3DWithIdx<T1, T2>), dim3(grid), dim3(threads), 0, context.stream(), nthreads, input_data, input_channels, input_depth, input_height, input_width, output_depth, output_height, output_width, ksize_depth, ksize_height, ksize_width, stride_depth, stride_height, stride_width, padding_depth, padding_height, padding_width, adaptive, output_data, mask_data); } }; /* * All tensors are in NCDHW format. * Ksize, strides, paddings are three elements. These three elements represent * depth, height and width, respectively. */ template <typename T1, typename T2> class MaxPool3dWithIndexGradFunctor<platform::CUDADeviceContext, T1, T2> { public: void operator()(const platform::CUDADeviceContext& context, const framework::Tensor& output_grad, const framework::Tensor& mask, const std::vector<int>& ksize, const std::vector<int>& strides, const std::vector<int>& paddings, bool adaptive, framework::Tensor* input_grad) { const int batch_size = input_grad->dims()[0]; const int input_channels = input_grad->dims()[1]; const int input_depth = input_grad->dims()[2]; const int input_height = input_grad->dims()[3]; const int input_width = input_grad->dims()[4]; const int output_depth = output_grad.dims()[2]; const int output_height = output_grad.dims()[3]; const int output_width = output_grad.dims()[4]; const int ksize_depth = ksize[0]; const int ksize_height = ksize[1]; const int ksize_width = ksize[2]; const int stride_depth = strides[0]; const int stride_height = strides[1]; const int stride_width = strides[2]; const int padding_depth = paddings[0]; const int padding_height = paddings[1]; const int padding_width = paddings[2]; const T1* output_grad_data = output_grad.data<T1>(); const T2* mask_data = mask.data<T2>(); T1* input_grad_data = input_grad->mutable_data<T1>(context.GetPlace()); int nthreads = batch_size * input_channels * input_depth * input_height * input_width; int blocks = (nthreads + 1024 - 1) / 1024; dim3 threads(1024, 1); dim3 grid(blocks, 1); hipLaunchKernelGGL(( KernelMaxPool3DWithIdxGrad<T1, T2>), dim3(grid), dim3(threads), 0, context.stream(), nthreads, output_grad_data, mask_data, input_channels, input_depth, input_height, input_width, output_depth, output_height, output_width, ksize_depth, ksize_height, ksize_width, stride_depth, stride_height, stride_width, padding_depth, padding_height, padding_width, adaptive, input_grad_data); } }; template class MaxPool3dWithIndexFunctor<platform::CUDADeviceContext, float, int>; template class MaxPool3dWithIndexGradFunctor<platform::CUDADeviceContext, float, int>; template class MaxPool3dWithIndexFunctor<platform::CUDADeviceContext, double, int>; template class MaxPool3dWithIndexGradFunctor<platform::CUDADeviceContext, double, int>; } // namespace math } // namespace operators } // namespace paddle
d8e9bded1de324c3b9e85c3366cf44e618148643.cu
/* Copyright (c) 2016 paddlepaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include <algorithm> #include <vector> #include "paddle/fluid/operators/math/pooling.h" #include "paddle/fluid/platform/cuda_primitives.h" namespace paddle { namespace operators { namespace math { template <typename PoolProcess, typename T> __global__ void KernelPool2D(const int nthreads, const T* input_data, const int channels, const int input_height, const int input_width, const int output_height, const int output_width, const int ksize_height, const int ksize_width, const int stride_height, const int stride_width, const int padding_height, const int padding_width, PoolProcess pool_process, bool exclusive, bool adaptive, T* output_data, bool channel_last = false) { for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads; index += blockDim.x * gridDim.x) { int pw, ph, c, batch_idx; if (!channel_last) { /*NCHW*/ pw = index % output_width; ph = (index / output_width) % output_height; c = (index / output_width / output_height) % channels; batch_idx = index / output_width / output_height / channels; } else { /*NHWC*/ c = index % channels; pw = (index / channels) % output_width; ph = (index / channels / output_width) % output_height; batch_idx = index / channels / output_width / output_height; } int hstart, hend; int wstart, wend; if (adaptive) { hstart = AdaptStartIndex(ph, input_height, output_height); hend = AdaptEndIndex(ph, input_height, output_height); wstart = AdaptStartIndex(pw, input_width, output_width); wend = AdaptEndIndex(pw, input_width, output_width); } else { hstart = ph * stride_height - padding_height; hend = min(hstart + ksize_height, input_height); hstart = max(hstart, 0); wstart = pw * stride_width - padding_width; wend = min(wstart + ksize_width, input_width); wstart = max(wstart, 0); } if (!channel_last) { input_data += (batch_idx * channels + c) * input_height * input_width; } else { input_data += batch_idx * input_height * input_width * channels; } T ele = pool_process.initial(); for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { auto input_idx = channel_last ? (h * input_width + w) * channels + c : h * input_width + w; pool_process.compute(input_data[input_idx], &ele); } } int pool_size = (exclusive || adaptive) ? (hend - hstart) * (wend - wstart) : ksize_height * ksize_width; pool_process.finalize(static_cast<T>(pool_size), &ele); output_data[index] = ele; } } template <typename PoolProcess, typename T> __global__ void KernelPool2DGrad( const int nthreads, const T* input_data, const T* output_data, const T* output_grad, const int channels, const int input_height, const int input_width, const int output_height, const int output_width, const int ksize_height, const int ksize_width, const int stride_height, const int stride_width, const int padding_height, const int padding_width, PoolProcess pool_process, bool exclusive, bool adaptive, T* input_grad, bool channel_last = false) { for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads; index += blockDim.x * gridDim.x) { int w_offset, h_offset, offsetC, batch_idx; if (!channel_last) { /* NCHW */ w_offset = index % input_width + padding_width; h_offset = (index / input_width) % input_height + padding_height; offsetC = (index / input_width / input_height) % channels; batch_idx = index / input_width / input_height / channels; } else { /* NHWC */ offsetC = index % channels; w_offset = (index / channels) % input_width + padding_width; h_offset = (index / channels / input_width) % input_height + padding_height; batch_idx = index / channels / input_width / input_height; } int phstart, phend; int pwstart, pwend; if (adaptive) { phstart = AdaptStartIndex(h_offset, output_height, input_height); phend = AdaptEndIndex(h_offset, output_height, input_height); pwstart = AdaptStartIndex(w_offset, output_width, input_width); pwend = AdaptEndIndex(w_offset, output_width, input_width); } else { phstart = (h_offset < ksize_height) ? 0 : (h_offset - ksize_height) / stride_height + 1; pwstart = (w_offset < ksize_width) ? 0 : (w_offset - ksize_width) / stride_width + 1; phend = min(h_offset / stride_height + 1, output_height); pwend = min(w_offset / stride_width + 1, output_width); } T gradient = static_cast<T>(0.0); T input = input_data[index]; int output_stride; if (!channel_last) { output_stride = (batch_idx * channels + offsetC) * output_height * output_width; } else { output_stride = batch_idx * output_height * output_width * channels; } output_data += output_stride; output_grad += output_stride; for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { int pool_size; if (adaptive) { pool_size = static_cast<int>(ceil(static_cast<double>(input_height) / ksize_height)) * static_cast<int>( ceil(static_cast<double>(input_width) / ksize_width)); } else { int hstart = ph * stride_height - padding_height; int wstart = pw * stride_width - padding_width; int hend = min(hstart + ksize_height, input_height); int wend = min(wstart + ksize_width, input_width); hstart = max(hstart, 0); wstart = max(wstart, 0); pool_size = exclusive ? (hend - hstart) * (wend - wstart) : ksize_height * ksize_width; } int output_sub_idx = channel_last ? (ph * output_width + pw) * channels + offsetC : ph * output_width + pw; pool_process.compute(input, output_data[output_sub_idx], output_grad[output_sub_idx], static_cast<T>(1.0 / pool_size), &gradient); } } input_grad[index] = gradient; } } template <typename T> __global__ void KernelMaxPool2DGrad( const int nthreads, const T* input_data, const T* output_data, const T* output_grad, const int channels, const int input_height, const int input_width, const int output_height, const int output_width, const int ksize_height, const int ksize_width, const int stride_height, const int stride_width, const int padding_height, const int padding_width, T* input_grad, bool channel_last = false) { for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads; index += blockDim.x * gridDim.x) { int pw, ph, c, batch_idx; if (!channel_last) { /* NCHW */ pw = index % output_width; ph = (index / output_width) % output_height; c = (index / output_width / output_height) % channels; batch_idx = index / output_width / output_height / channels; } else { /* NHWC */ c = index % channels; pw = (index / channels) % output_width; ph = (index / channels / output_width) % output_height; batch_idx = index / channels / output_width / output_height; } int hstart = ph * stride_height - padding_height; int hend = min(hstart + ksize_height, input_height); hstart = max(hstart, 0); int wstart = pw * stride_width - padding_width; int wend = min(wstart + ksize_width, input_width); wstart = max(wstart, 0); int input_stride; if (!channel_last) { input_stride = (batch_idx * channels + c) * input_height * input_width; } else { input_stride = batch_idx * input_height * input_width * channels; } input_data += input_stride; input_grad += input_stride; T ele = output_data[index]; int maxIndex = -1; bool stop = false; for (int h = hstart; h < hend && !stop; ++h) { for (int w = wstart; w < wend && !stop; ++w) { int input_data_idx = channel_last ? (h * input_width + w) * channels + c : h * input_width + w; if (ele == input_data[input_data_idx]) { maxIndex = input_data_idx; stop = true; } } } if (maxIndex != -1) { // atomic add platform::CudaAtomicAdd(input_grad + maxIndex, output_grad[index]); } } } template <typename PoolProcess, typename T> void Pool2dDirectCUDAFunctor<PoolProcess, T>::operator()( const T* input, const std::vector<int>& input_shape, const std::vector<int>& output_shape, const std::vector<int>& ksize, const std::vector<int>& strides, const std::vector<int>& paddings, PoolProcess pool_compute, bool exclusive, bool adaptive, T* output, cudaStream_t stream) { const int batch_size = input_shape[0]; const int input_channels = input_shape[1]; const int input_height = input_shape[2]; const int input_width = input_shape[3]; const int output_channels = output_shape[1]; const int output_height = output_shape[2]; const int output_width = output_shape[3]; const int ksize_height = ksize[0]; const int ksize_width = ksize[1]; const int stride_height = strides[0]; const int stride_width = strides[1]; const int padding_height = paddings[0]; const int padding_width = paddings[1]; int nthreads = batch_size * output_channels * output_height * output_width; int blocks = (nthreads + 1024 - 1) / 1024; dim3 threads(1024, 1); dim3 grid(blocks, 1); KernelPool2D<PoolProcess, T><<<grid, threads, 0, stream>>>( nthreads, input, input_channels, input_height, input_width, output_height, output_width, ksize_height, ksize_width, stride_height, stride_width, padding_height, padding_width, pool_compute, exclusive, adaptive, output); } /* * Tensors are in NCHW or NHWC format. * Ksize, strides are two elements. These two elements represent height * and width, respectively. * Paddings are four elements. These four elements represent height_up, * height_down, width_left and width_right, respectively. */ template <typename PoolProcess, typename T> class Pool2dFunctor<platform::CUDADeviceContext, PoolProcess, T> { public: void operator()(const platform::CUDADeviceContext& context, const framework::Tensor& input, const std::vector<int>& ksize, const std::vector<int>& strides, const std::vector<int>& paddings, PoolProcess pool_process, bool exclusive, bool adaptive, framework::Tensor* output) { const int batch_size = input.dims()[0]; const int input_channels = input.dims()[1]; const int input_height = input.dims()[2]; const int input_width = input.dims()[3]; const int output_channels = output->dims()[1]; const int output_height = output->dims()[2]; const int output_width = output->dims()[3]; const int ksize_height = ksize[0]; const int ksize_width = ksize[1]; const int stride_height = strides[0]; const int stride_width = strides[1]; const int padding_height = paddings[0]; const int padding_width = paddings[1]; const T* input_data = input.data<T>(); T* output_data = output->mutable_data<T>(context.GetPlace()); int nthreads = batch_size * output_channels * output_height * output_width; int blocks = (nthreads + 1024 - 1) / 1024; dim3 threads(1024, 1); dim3 grid(blocks, 1); KernelPool2D<PoolProcess, T><<<grid, threads, 0, context.stream()>>>( nthreads, input_data, input_channels, input_height, input_width, output_height, output_width, ksize_height, ksize_width, stride_height, stride_width, padding_height, padding_width, pool_process, exclusive, adaptive, output_data); } void operator()(const platform::CUDADeviceContext& context, const framework::Tensor& input, const std::vector<int>& ksize, const std::vector<int>& strides, const std::vector<int>& paddings, const std::string data_format, PoolProcess pool_process, bool exclusive, bool adaptive, framework::Tensor* output) { bool channel_last = (data_format == "NHWC"); const int batch_size = input.dims()[0]; const int input_channels = channel_last ? input.dims()[3] : input.dims()[1]; const int input_height = channel_last ? input.dims()[1] : input.dims()[2]; const int input_width = channel_last ? input.dims()[2] : input.dims()[3]; const int output_channels = channel_last ? output->dims()[3] : output->dims()[1]; const int output_height = channel_last ? output->dims()[1] : output->dims()[2]; const int output_width = channel_last ? output->dims()[2] : output->dims()[3]; const int ksize_height = ksize[0]; const int ksize_width = ksize[1]; const int stride_height = strides[0]; const int stride_width = strides[1]; const int padding_height = paddings[0]; const int padding_width = paddings[1]; const T* input_data = input.data<T>(); T* output_data = output->mutable_data<T>(context.GetPlace()); int nthreads = batch_size * output_channels * output_height * output_width; int blocks = (nthreads + 1024 - 1) / 1024; dim3 threads(1024, 1); dim3 grid(blocks, 1); KernelPool2D<PoolProcess, T><<<grid, threads, 0, context.stream()>>>( nthreads, input_data, input_channels, input_height, input_width, output_height, output_width, ksize_height, ksize_width, stride_height, stride_width, padding_height, padding_width, pool_process, exclusive, adaptive, output_data, channel_last); } }; /* * Tensors are in NCHW or NHWC format. * Ksize, strides are two elements. These two elements represent height * and width, respectively. * Paddings are four elements. These four elements represent height_up, * height_down, width_left and width_right, respectively. */ template <typename PoolProcess, typename T> class Pool2dGradFunctor<platform::CUDADeviceContext, PoolProcess, T> { public: void operator()(const platform::CUDADeviceContext& context, const framework::Tensor& input, const framework::Tensor& output, const framework::Tensor& output_grad, const std::vector<int>& ksize, const std::vector<int>& strides, const std::vector<int>& paddings, PoolProcess pool_process, bool exclusive, bool adaptive, framework::Tensor* input_grad) { const int batch_size = input.dims()[0]; const int input_channels = input.dims()[1]; const int input_height = input.dims()[2]; const int input_width = input.dims()[3]; const int output_height = output.dims()[2]; const int output_width = output.dims()[3]; const int ksize_height = ksize[0]; const int ksize_width = ksize[1]; const int stride_height = strides[0]; const int stride_width = strides[1]; const int padding_height = paddings[0]; const int padding_width = paddings[1]; const T* input_data = input.data<T>(); const T* output_data = output.data<T>(); const T* output_grad_data = output_grad.data<T>(); T* input_grad_data = input_grad->mutable_data<T>(context.GetPlace()); int nthreads = batch_size * input_channels * input_height * input_width; int blocks = (nthreads + 1024 - 1) / 1024; dim3 threads(1024, 1); dim3 grid(blocks, 1); KernelPool2DGrad<PoolProcess, T><<<grid, threads, 0, context.stream()>>>( nthreads, input_data, output_data, output_grad_data, input_channels, input_height, input_width, output_height, output_width, ksize_height, ksize_width, stride_height, stride_width, padding_height, padding_width, pool_process, exclusive, adaptive, input_grad_data); } void operator()( const platform::CUDADeviceContext& context, const framework::Tensor& input, const framework::Tensor& output, const framework::Tensor& output_grad, const std::vector<int>& ksize, const std::vector<int>& strides, const std::vector<int>& paddings, const std::string data_format, PoolProcess pool_process, bool exclusive, bool adaptive, framework::Tensor* input_grad) { bool channel_last = (data_format == "NHWC"); const int batch_size = input.dims()[0]; const int input_channels = channel_last ? input.dims()[3] : input.dims()[1]; const int input_height = channel_last ? input.dims()[1] : input.dims()[2]; const int input_width = channel_last ? input.dims()[2] : input.dims()[3]; const int output_channels = channel_last ? output.dims()[3] : output.dims()[1]; const int output_height = channel_last ? output.dims()[1] : output.dims()[2]; const int output_width = channel_last ? output.dims()[2] : output.dims()[3]; const int ksize_height = ksize[0]; const int ksize_width = ksize[1]; const int stride_height = strides[0]; const int stride_width = strides[1]; const int padding_height = paddings[0]; const int padding_width = paddings[1]; const T* input_data = input.data<T>(); const T* output_data = output.data<T>(); const T* output_grad_data = output_grad.data<T>(); T* input_grad_data = input_grad->mutable_data<T>(context.GetPlace()); int nthreads = batch_size * input_channels * input_height * input_width; int blocks = (nthreads + 1024 - 1) / 1024; dim3 threads(1024, 1); dim3 grid(blocks, 1); KernelPool2DGrad<PoolProcess, T><<<grid, threads, 0, context.stream()>>>( nthreads, input_data, output_data, output_grad_data, input_channels, input_height, input_width, output_height, output_width, ksize_height, ksize_width, stride_height, stride_width, padding_height, padding_width, pool_process, exclusive, adaptive, input_grad_data, channel_last); } }; /* * Tensors are in NCHW or NHWC format. * Ksize, strides are two elements. These two elements represent height * and width, respectively. * Paddings are four elements. These four elements represent height_up, * height_down, width_left and width_right, respectively. */ template <typename T> class MaxPool2dGradFunctor<platform::CUDADeviceContext, T> { public: void operator()(const platform::CUDADeviceContext& context, const framework::Tensor& input, const framework::Tensor& output, const framework::Tensor& output_grad, const std::vector<int>& ksize, const std::vector<int>& strides, const std::vector<int>& paddings, framework::Tensor* input_grad) { const int batch_size = input.dims()[0]; const int input_channels = input.dims()[1]; const int input_height = input.dims()[2]; const int input_width = input.dims()[3]; const int output_channels = output.dims()[1]; const int output_height = output.dims()[2]; const int output_width = output.dims()[3]; const int ksize_height = ksize[0]; const int ksize_width = ksize[1]; const int stride_height = strides[0]; const int stride_width = strides[1]; const int padding_height = paddings[0]; const int padding_width = paddings[1]; const T* input_data = input.data<T>(); const T* output_data = output.data<T>(); const T* output_grad_data = output_grad.data<T>(); T* input_grad_data = input_grad->mutable_data<T>(context.GetPlace()); int nthreads = batch_size * output_channels * output_height * output_width; int blocks = (nthreads + 1024 - 1) / 1024; dim3 threads(1024, 1); dim3 grid(blocks, 1); KernelMaxPool2DGrad<T><<<grid, threads, 0, context.stream()>>>( nthreads, input_data, output_data, output_grad_data, input_channels, input_height, input_width, output_height, output_width, ksize_height, ksize_width, stride_height, stride_width, padding_height, padding_width, input_grad_data); } void operator()( const platform::CUDADeviceContext& context, const framework::Tensor& input, const framework::Tensor& output, const framework::Tensor& output_grad, const std::vector<int>& ksize, const std::vector<int>& strides, const std::vector<int>& paddings, const std::string data_format, framework::Tensor* input_grad) { bool channel_last = (data_format == "NHWC"); const int batch_size = input.dims()[0]; const int input_channels = channel_last ? input.dims()[3] : input.dims()[1]; const int input_height = channel_last ? input.dims()[1] : input.dims()[2]; const int input_width = channel_last ? input.dims()[2] : input.dims()[3]; const int output_channels = channel_last ? output.dims()[3] : output.dims()[1]; const int output_height = channel_last ? output.dims()[1] : output.dims()[2]; const int output_width = channel_last ? output.dims()[2] : output.dims()[3]; const int ksize_height = ksize[0]; const int ksize_width = ksize[1]; const int stride_height = strides[0]; const int stride_width = strides[1]; const int padding_height = paddings[0]; const int padding_width = paddings[1]; const T* input_data = input.data<T>(); const T* output_data = output.data<T>(); const T* output_grad_data = output_grad.data<T>(); T* input_grad_data = input_grad->mutable_data<T>(context.GetPlace()); int nthreads = batch_size * output_channels * output_height * output_width; int blocks = (nthreads + 1024 - 1) / 1024; dim3 threads(1024, 1); dim3 grid(blocks, 1); KernelMaxPool2DGrad<T><<<grid, threads, 0, context.stream()>>>( nthreads, input_data, output_data, output_grad_data, input_channels, input_height, input_width, output_height, output_width, ksize_height, ksize_width, stride_height, stride_width, padding_height, padding_width, input_grad_data, channel_last); } }; template class Pool2dDirectCUDAFunctor<paddle::operators::math::MaxPool<float>, float>; template class Pool2dDirectCUDAFunctor<paddle::operators::math::AvgPool<float>, float>; template class MaxPool2dGradFunctor<platform::CUDADeviceContext, float>; template class MaxPool2dGradFunctor<platform::CUDADeviceContext, double>; template class MaxPool2dGradFunctor<platform::CUDADeviceContext, paddle::platform::float16>; template class Pool2dFunctor<platform::CUDADeviceContext, paddle::operators::math::MaxPool<float>, float>; template class Pool2dFunctor<platform::CUDADeviceContext, paddle::operators::math::AvgPool<float>, float>; template class Pool2dGradFunctor<platform::CUDADeviceContext, paddle::operators::math::MaxPoolGrad<float>, float>; template class Pool2dGradFunctor<platform::CUDADeviceContext, paddle::operators::math::AvgPoolGrad<float>, float>; template class Pool2dFunctor<platform::CUDADeviceContext, paddle::operators::math::MaxPool<double>, double>; template class Pool2dFunctor<platform::CUDADeviceContext, paddle::operators::math::AvgPool<double>, double>; template class Pool2dGradFunctor<platform::CUDADeviceContext, paddle::operators::math::MaxPoolGrad<double>, double>; template class Pool2dGradFunctor<platform::CUDADeviceContext, paddle::operators::math::AvgPoolGrad<double>, double>; template class Pool2dFunctor< platform::CUDADeviceContext, paddle::operators::math::MaxPool<paddle::platform::float16>, paddle::platform::float16>; template class Pool2dFunctor< platform::CUDADeviceContext, paddle::operators::math::AvgPool<paddle::platform::float16>, paddle::platform::float16>; template class Pool2dGradFunctor< platform::CUDADeviceContext, paddle::operators::math::MaxPoolGrad<paddle::platform::float16>, paddle::platform::float16>; template class Pool2dGradFunctor< platform::CUDADeviceContext, paddle::operators::math::AvgPoolGrad<paddle::platform::float16>, paddle::platform::float16>; template <typename PoolProcess, typename T> __global__ void KernelPool3D( const int nthreads, const T* input_data, const int channels, const int input_depth, const int input_height, const int input_width, const int output_depth, const int output_height, const int output_width, const int ksize_depth, const int ksize_height, const int ksize_width, const int stride_depth, const int stride_height, const int stride_width, const int padding_depth, const int padding_height, const int padding_width, PoolProcess pool_process, bool exclusive, bool adaptive, T* output_data, bool channel_last = false) { for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads; index += blockDim.x * gridDim.x) { int pw, ph, pd, c, batch_idx; if (!channel_last) { pw = index % output_width; ph = (index / output_width) % output_height; pd = (index / output_width / output_height) % output_depth; c = (index / output_width / output_height / output_depth) % channels; batch_idx = index / output_width / output_height / output_depth / channels; } else { c = index % channels; pw = (index / channels) % output_width; ph = (index / channels / output_width) % output_height; pd = (index / channels / output_width / output_height) % output_depth; batch_idx = index / channels / output_width / output_height / output_depth; } int dstart, dend; int hstart, hend; int wstart, wend; if (adaptive) { dstart = AdaptStartIndex(pd, input_depth, output_depth); dend = AdaptEndIndex(pd, input_depth, output_depth); hstart = AdaptStartIndex(ph, input_height, output_height); hend = AdaptEndIndex(ph, input_height, output_height); wstart = AdaptStartIndex(pw, input_width, output_width); wend = AdaptEndIndex(pw, input_width, output_width); } else { dstart = pd * stride_depth - padding_depth; hstart = ph * stride_height - padding_height; wstart = pw * stride_width - padding_width; dend = min(dstart + ksize_depth, input_depth); hend = min(hstart + ksize_height, input_height); wend = min(wstart + ksize_width, input_width); dstart = max(dstart, 0); hstart = max(hstart, 0); wstart = max(wstart, 0); } int input_data_stride; if (!channel_last) { /* NCDHW */ input_data_stride = (batch_idx * channels + c) * input_depth * input_height * input_width; } else { /* NDHWC */ input_data_stride = batch_idx * input_depth * input_height * input_width * channels; } input_data += input_data_stride; T ele = pool_process.initial(); for (int d = dstart; d < dend; ++d) { for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { auto input_data_idx = channel_last ? ((d * input_height + h) * input_width + w) * channels + c : (d * input_height + h) * input_width + w; pool_process.compute(input_data[input_data_idx], &ele); } } } int pool_size = (exclusive || adaptive) ? (dend - dstart) * (hend - hstart) * (wend - wstart) : ksize_depth * ksize_height * ksize_width; pool_process.finalize(static_cast<T>(pool_size), &ele); output_data[index] = ele; } } template <typename PoolProcess, typename T> __global__ void KernelPool3DGrad( const int nthreads, const T* input_data, const T* output_data, const T* output_grad, const int channels, const int input_depth, const int input_height, const int input_width, const int output_depth, const int output_height, const int output_width, const int ksize_depth, const int ksize_height, const int ksize_width, const int stride_depth, const int stride_height, const int stride_width, const int padding_depth, const int padding_height, const int padding_width, PoolProcess pool_process, bool exclusive, bool adaptive, T* input_grad, bool channel_last = false) { for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads; index += blockDim.x * gridDim.x) { int w_offset, h_offset, d_offset, offsetC, batch_idx; if (!channel_last) { /* "NCDHW" */ w_offset = index % input_width + padding_width; h_offset = (index / input_width) % input_height + padding_height; d_offset = (index / input_width / input_height) % input_depth + padding_depth; offsetC = (index / input_width / input_height / input_depth) % channels; batch_idx = index / input_width / input_height / input_depth / channels; } else { /* "NDHWC" */ offsetC = index % channels; w_offset = (index / channels) % input_width + padding_width; h_offset = (index / channels / input_width) % input_height + padding_height; d_offset = (index / channels / input_width / input_height) % input_depth + padding_depth; batch_idx = index / channels / input_width / input_height / input_depth; } int pdstart, pdend; int phstart, phend; int pwstart, pwend; if (adaptive) { pdstart = AdaptStartIndex(d_offset, output_depth, input_depth); pdend = AdaptEndIndex(d_offset, output_depth, input_depth); phstart = AdaptStartIndex(h_offset, output_height, input_height); phend = AdaptEndIndex(h_offset, output_height, input_height); pwstart = AdaptStartIndex(w_offset, output_width, input_width); pwend = AdaptEndIndex(w_offset, output_width, input_width); } else { pdstart = (d_offset < ksize_depth) ? 0 : (d_offset - ksize_depth) / stride_depth + 1; phstart = (h_offset < ksize_height) ? 0 : (h_offset - ksize_height) / stride_height + 1; pwstart = (w_offset < ksize_width) ? 0 : (w_offset - ksize_width) / stride_width + 1; pdend = min((d_offset) / stride_depth + 1, output_depth); phend = min((h_offset) / stride_height + 1, output_height); pwend = min((w_offset) / stride_width + 1, output_width); } T gradient = static_cast<T>(0.0); T input = input_data[index]; int output_stride; if (!channel_last) { output_stride = (batch_idx * channels + offsetC) * output_depth * output_height * output_width; } else { output_stride = batch_idx * output_depth * output_height * output_width * channels; } output_data += output_stride; output_grad += output_stride; for (int pd = pdstart; pd < pdend; ++pd) { for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { // figure out the pooling size int pool_size; if (adaptive) { pool_size = static_cast<int>( ceil(static_cast<double>(input_depth) / ksize_depth)) * static_cast<int>( ceil(static_cast<double>(input_height) / ksize_height)) * static_cast<int>( ceil(static_cast<double>(input_width) / ksize_width)); } else { int dstart = pd * stride_depth - padding_depth; int hstart = ph * stride_height - padding_height; int wstart = pw * stride_width - padding_width; int dend = min(dstart + ksize_depth, input_depth); int hend = min(hstart + ksize_height, input_height); int wend = min(wstart + ksize_width, input_width); dstart = max(dstart, 0); hstart = max(hstart, 0); wstart = max(wstart, 0); pool_size = exclusive ? (dend - dstart) * (hend - hstart) * (wend - wstart) : ksize_depth * ksize_height * ksize_width; } int output_sub_idx = channel_last ? ((pd * output_height + ph) * output_width + pw) * channels + offsetC : (pd * output_height + ph) * output_width + pw; pool_process.compute(input, output_data[output_sub_idx], output_grad[output_sub_idx], static_cast<T>(1.0 / pool_size), &gradient); } } } input_grad[index] = gradient; } } template <typename T> __global__ void KernelMaxPool3DGrad( const int nthreads, const T* input_data, const T* output_data, const T* output_grad, const int channels, const int input_depth, const int input_height, const int input_width, const int output_depth, const int output_height, const int output_width, const int ksize_depth, const int ksize_height, const int ksize_width, const int stride_depth, const int stride_height, const int stride_width, const int padding_depth, const int padding_height, const int padding_width, T* input_grad, bool channel_last = false) { for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads; index += blockDim.x * gridDim.x) { int pw, ph, pd, c, batch_idx; if (!channel_last) { /*NCDHW*/ pw = index % output_width; ph = (index / output_width) % output_height; pd = (index / output_width / output_height) % output_depth; c = (index / output_width / output_height / output_depth) % channels; batch_idx = index / output_width / output_height / output_depth / channels; } else { /*NDHWC*/ c = index % channels; pw = (index / channels) % output_width; ph = (index / channels / output_width) % output_height; pd = (index / channels / output_width / output_height) % output_depth; batch_idx = index / channels / output_width / output_height / output_depth; } int dstart = pd * stride_depth - padding_depth; int hstart = ph * stride_height - padding_height; int wstart = pw * stride_width - padding_width; int dend = min(dstart + ksize_depth, input_depth); int hend = min(hstart + ksize_height, input_height); int wend = min(wstart + ksize_width, input_width); dstart = max(dstart, 0); hstart = max(hstart, 0); wstart = max(wstart, 0); T ele = output_data[index]; bool stop = false; int maxIdx = -1; int input_stride; if (!channel_last) { input_stride = (batch_idx * channels + c) * input_depth * input_height * input_width; } else { input_stride = batch_idx * input_depth * input_height * input_width * channels; } input_data += input_stride; input_grad += input_stride; for (int d = dstart; d < dend && !stop; ++d) { for (int h = hstart; h < hend && !stop; ++h) { for (int w = wstart; w < wend && !stop; ++w) { int input_data_idx = channel_last ? ((d * input_height + h) * input_width + w) * channels + c : (d * input_height + h) * input_width + w; if (ele == input_data[input_data_idx]) { stop = true; maxIdx = input_data_idx; } } } } if (maxIdx != -1) { // atomic add platform::CudaAtomicAdd(input_grad + maxIdx, output_grad[index]); } } } /* * Tensors are in NCDHW or NDHWC format. * Ksize, strides, paddings are three elements. These three elements represent * depth, height and width, respectively. * Paddings are six elements. These six elements represent depth_forth, * depth_back, * height_up, height_down, width_left and width_right, respectively. */ template <typename PoolProcess, class T> class Pool3dFunctor<platform::CUDADeviceContext, PoolProcess, T> { public: void operator()(const platform::CUDADeviceContext& context, const framework::Tensor& input, const std::vector<int>& ksize, const std::vector<int>& strides, const std::vector<int>& paddings, PoolProcess pool_process, bool exclusive, bool adaptive, framework::Tensor* output) { const int batch_size = input.dims()[0]; const int input_channels = input.dims()[1]; const int input_depth = input.dims()[2]; const int input_height = input.dims()[3]; const int input_width = input.dims()[4]; const int output_channels = output->dims()[1]; const int output_depth = output->dims()[2]; const int output_height = output->dims()[3]; const int output_width = output->dims()[4]; const int ksize_depth = ksize[0]; const int ksize_height = ksize[1]; const int ksize_width = ksize[2]; const int stride_depth = strides[0]; const int stride_height = strides[1]; const int stride_width = strides[2]; const int padding_depth = paddings[0]; const int padding_height = paddings[1]; const int padding_width = paddings[2]; const T* input_data = input.data<T>(); T* output_data = output->mutable_data<T>(context.GetPlace()); int nthreads = batch_size * output_channels * output_depth * output_height * output_width; int blocks = (nthreads + 1024 - 1) / 1024; dim3 threads(1024, 1); dim3 grid(blocks, 1); KernelPool3D<PoolProcess, T><<<grid, threads, 0, context.stream()>>>( nthreads, input_data, input_channels, input_depth, input_height, input_width, output_depth, output_height, output_width, ksize_depth, ksize_height, ksize_width, stride_depth, stride_height, stride_width, padding_depth, padding_height, padding_width, pool_process, exclusive, adaptive, output_data); } void operator()(const platform::CUDADeviceContext& context, const framework::Tensor& input, const std::vector<int>& ksize, const std::vector<int>& strides, const std::vector<int>& paddings, const std::string data_format, PoolProcess pool_process, bool exclusive, bool adaptive, framework::Tensor* output) { bool channel_last = (data_format == "NDHWC"); const int batch_size = input.dims()[0]; const int input_channels = channel_last ? input.dims()[4] : input.dims()[1]; const int input_depth = channel_last ? input.dims()[1] : input.dims()[2]; const int input_height = channel_last ? input.dims()[2] : input.dims()[3]; const int input_width = channel_last ? input.dims()[3] : input.dims()[4]; const int output_channels = channel_last ? output->dims()[4] : output->dims()[1]; const int output_depth = channel_last ? output->dims()[1] : output->dims()[2]; const int output_height = channel_last ? output->dims()[2] : output->dims()[3]; const int output_width = channel_last ? output->dims()[3] : output->dims()[4]; const int ksize_depth = ksize[0]; const int ksize_height = ksize[1]; const int ksize_width = ksize[2]; const int stride_depth = strides[0]; const int stride_height = strides[1]; const int stride_width = strides[2]; const int padding_depth = paddings[0]; const int padding_height = paddings[1]; const int padding_width = paddings[2]; const T* input_data = input.data<T>(); T* output_data = output->mutable_data<T>(context.GetPlace()); int nthreads = batch_size * output_channels * output_depth * output_height * output_width; int blocks = (nthreads + 1024 - 1) / 1024; dim3 threads(1024, 1); dim3 grid(blocks, 1); KernelPool3D<PoolProcess, T><<<grid, threads, 0, context.stream()>>>( nthreads, input_data, input_channels, input_depth, input_height, input_width, output_depth, output_height, output_width, ksize_depth, ksize_height, ksize_width, stride_depth, stride_height, stride_width, padding_depth, padding_height, padding_width, pool_process, exclusive, adaptive, output_data, channel_last); } }; /* * Tensors are in NCDHW or NDHWC format. * Ksize, strides, paddings are three elements. These three elements represent * depth, height and width, respectively. * Paddings are six elements. These six elements represent depth_forth, * depth_back, * height_up, height_down, width_left and width_right, respectively. */ template <typename PoolProcess, class T> class Pool3dGradFunctor<platform::CUDADeviceContext, PoolProcess, T> { public: void operator()(const platform::CUDADeviceContext& context, const framework::Tensor& input, const framework::Tensor& output, const framework::Tensor& output_grad, const std::vector<int>& ksize, const std::vector<int>& strides, const std::vector<int>& paddings, PoolProcess pool_process, bool exclusive, bool adaptive, framework::Tensor* input_grad) { const int batch_size = input.dims()[0]; const int input_channels = input.dims()[1]; const int input_depth = input.dims()[2]; const int input_height = input.dims()[3]; const int input_width = input.dims()[4]; const int output_channels = output.dims()[1]; const int output_depth = output.dims()[2]; const int output_height = output.dims()[3]; const int output_width = output.dims()[4]; const int ksize_depth = ksize[0]; const int ksize_height = ksize[1]; const int ksize_width = ksize[2]; const int stride_depth = strides[0]; const int stride_height = strides[1]; const int stride_width = strides[2]; const int padding_depth = paddings[0]; const int padding_height = paddings[1]; const int padding_width = paddings[2]; const T* input_data = input.data<T>(); const T* output_data = output.data<T>(); const T* output_grad_data = output_grad.data<T>(); T* input_grad_data = input_grad->mutable_data<T>(context.GetPlace()); int nthreads = batch_size * input_channels * input_depth * input_height * input_width; int blocks = (nthreads + 1024 - 1) / 1024; dim3 threads(1024, 1); dim3 grid(blocks, 1); KernelPool3DGrad<PoolProcess, T><<<grid, threads, 0, context.stream()>>>( nthreads, input_data, output_data, output_grad_data, input_channels, input_depth, input_height, input_width, output_depth, output_height, output_width, ksize_depth, ksize_height, ksize_width, stride_depth, stride_height, stride_width, padding_depth, padding_height, padding_width, pool_process, exclusive, adaptive, input_grad_data); } void operator()( const platform::CUDADeviceContext& context, const framework::Tensor& input, const framework::Tensor& output, const framework::Tensor& output_grad, const std::vector<int>& ksize, const std::vector<int>& strides, const std::vector<int>& paddings, const std::string data_format, PoolProcess pool_process, bool exclusive, bool adaptive, framework::Tensor* input_grad) { bool channel_last = (data_format == "NDHWC"); const int batch_size = input.dims()[0]; const int input_channels = channel_last ? input.dims()[4] : input.dims()[1]; const int input_depth = channel_last ? input.dims()[1] : input.dims()[2]; const int input_height = channel_last ? input.dims()[2] : input.dims()[3]; const int input_width = channel_last ? input.dims()[3] : input.dims()[4]; const int output_channels = channel_last ? output.dims()[4] : output.dims()[1]; const int output_depth = channel_last ? output.dims()[1] : output.dims()[2]; const int output_height = channel_last ? output.dims()[2] : output.dims()[3]; const int output_width = channel_last ? output.dims()[3] : output.dims()[4]; const int ksize_depth = ksize[0]; const int ksize_height = ksize[1]; const int ksize_width = ksize[2]; const int stride_depth = strides[0]; const int stride_height = strides[1]; const int stride_width = strides[2]; const int padding_depth = paddings[0]; const int padding_height = paddings[1]; const int padding_width = paddings[2]; const T* input_data = input.data<T>(); const T* output_data = output.data<T>(); const T* output_grad_data = output_grad.data<T>(); T* input_grad_data = input_grad->mutable_data<T>(context.GetPlace()); int nthreads = batch_size * input_channels * input_depth * input_height * input_width; int blocks = (nthreads + 1024 - 1) / 1024; dim3 threads(1024, 1); dim3 grid(blocks, 1); KernelPool3DGrad<PoolProcess, T><<<grid, threads, 0, context.stream()>>>( nthreads, input_data, output_data, output_grad_data, input_channels, input_depth, input_height, input_width, output_depth, output_height, output_width, ksize_depth, ksize_height, ksize_width, stride_depth, stride_height, stride_width, padding_depth, padding_height, padding_width, pool_process, exclusive, adaptive, input_grad_data, channel_last); // add channel_last } }; /* * tensors are in NCDHW or NDHWC format. * Ksize, strides, paddings are three elements. These three elements represent * depth, height and width, respectively. * Paddings are six elements. These six elements represent depth_forth, * depth_back, * height_up, height_down, width_left and width_right, respectively. */ template <class T> class MaxPool3dGradFunctor<platform::CUDADeviceContext, T> { public: void operator()(const platform::CUDADeviceContext& context, const framework::Tensor& input, const framework::Tensor& output, const framework::Tensor& output_grad, const std::vector<int>& ksize, const std::vector<int>& strides, const std::vector<int>& paddings, framework::Tensor* input_grad) { const int batch_size = input.dims()[0]; const int input_channels = input.dims()[1]; const int input_depth = input.dims()[2]; const int input_height = input.dims()[3]; const int input_width = input.dims()[4]; const int output_channels = output.dims()[1]; const int output_depth = output.dims()[2]; const int output_height = output.dims()[3]; const int output_width = output.dims()[4]; const int ksize_depth = ksize[0]; const int ksize_height = ksize[1]; const int ksize_width = ksize[2]; const int stride_depth = strides[0]; const int stride_height = strides[1]; const int stride_width = strides[2]; const int padding_depth = paddings[0]; const int padding_height = paddings[1]; const int padding_width = paddings[2]; const T* input_data = input.data<T>(); const T* output_data = output.data<T>(); const T* output_grad_data = output_grad.data<T>(); T* input_grad_data = input_grad->mutable_data<T>(context.GetPlace()); int nthreads = batch_size * output_channels * output_depth * output_height * output_width; int blocks = (nthreads + 1024 - 1) / 1024; dim3 threads(1024, 1); dim3 grid(blocks, 1); KernelMaxPool3DGrad<T><<<grid, threads, 0, context.stream()>>>( nthreads, input_data, output_data, output_grad_data, input_channels, input_depth, input_height, input_width, output_depth, output_height, output_width, ksize_depth, ksize_height, ksize_width, stride_depth, stride_height, stride_width, padding_depth, padding_height, padding_width, input_grad_data); } void operator()( const platform::CUDADeviceContext& context, const framework::Tensor& input, const framework::Tensor& output, const framework::Tensor& output_grad, const std::vector<int>& ksize, const std::vector<int>& strides, const std::vector<int>& paddings, const std::string data_format, framework::Tensor* input_grad) { bool channel_last = (data_format == "NDHWC"); const int batch_size = input.dims()[0]; const int input_channels = channel_last ? input.dims()[4] : input.dims()[1]; const int input_depth = channel_last ? input.dims()[1] : input.dims()[2]; const int input_height = channel_last ? input.dims()[2] : input.dims()[3]; const int input_width = channel_last ? input.dims()[3] : input.dims()[4]; const int output_channels = channel_last ? output.dims()[4] : output.dims()[1]; const int output_depth = channel_last ? output.dims()[1] : output.dims()[2]; const int output_height = channel_last ? output.dims()[2] : output.dims()[3]; const int output_width = channel_last ? output.dims()[3] : output.dims()[4]; const int ksize_depth = ksize[0]; const int ksize_height = ksize[1]; const int ksize_width = ksize[2]; const int stride_depth = strides[0]; const int stride_height = strides[1]; const int stride_width = strides[2]; const int padding_depth = paddings[0]; const int padding_height = paddings[1]; const int padding_width = paddings[2]; const T* input_data = input.data<T>(); const T* output_data = output.data<T>(); const T* output_grad_data = output_grad.data<T>(); T* input_grad_data = input_grad->mutable_data<T>(context.GetPlace()); int nthreads = batch_size * output_channels * output_depth * output_height * output_width; int blocks = (nthreads + 1024 - 1) / 1024; dim3 threads(1024, 1); dim3 grid(blocks, 1); KernelMaxPool3DGrad<T><<<grid, threads, 0, context.stream()>>>( nthreads, input_data, output_data, output_grad_data, input_channels, input_depth, input_height, input_width, output_depth, output_height, output_width, ksize_depth, ksize_height, ksize_width, stride_depth, stride_height, stride_width, padding_depth, padding_height, padding_width, input_grad_data, channel_last); // add channel_last } }; template class MaxPool3dGradFunctor<platform::CUDADeviceContext, float>; template class MaxPool3dGradFunctor<platform::CUDADeviceContext, double>; template class MaxPool3dGradFunctor<platform::CUDADeviceContext, paddle::platform::float16>; template class Pool3dFunctor<platform::CUDADeviceContext, paddle::operators::math::MaxPool<float>, float>; template class Pool3dFunctor<platform::CUDADeviceContext, paddle::operators::math::AvgPool<float>, float>; template class Pool3dGradFunctor<platform::CUDADeviceContext, paddle::operators::math::MaxPoolGrad<float>, float>; template class Pool3dGradFunctor<platform::CUDADeviceContext, paddle::operators::math::AvgPoolGrad<float>, float>; template class Pool3dFunctor<platform::CUDADeviceContext, paddle::operators::math::MaxPool<double>, double>; template class Pool3dFunctor<platform::CUDADeviceContext, paddle::operators::math::AvgPool<double>, double>; template class Pool3dGradFunctor<platform::CUDADeviceContext, paddle::operators::math::MaxPoolGrad<double>, double>; template class Pool3dGradFunctor<platform::CUDADeviceContext, paddle::operators::math::AvgPoolGrad<double>, double>; template class Pool3dFunctor< platform::CUDADeviceContext, paddle::operators::math::MaxPool<paddle::platform::float16>, paddle::platform::float16>; template class Pool3dFunctor< platform::CUDADeviceContext, paddle::operators::math::AvgPool<paddle::platform::float16>, paddle::platform::float16>; template class Pool3dGradFunctor< platform::CUDADeviceContext, paddle::operators::math::MaxPoolGrad<paddle::platform::float16>, paddle::platform::float16>; template class Pool3dGradFunctor< platform::CUDADeviceContext, paddle::operators::math::AvgPoolGrad<paddle::platform::float16>, paddle::platform::float16>; template <typename T1, typename T2> __global__ void KernelMaxPool2dWithIdx( const int nthreads, const T1* input_data, const int channels, const int input_height, const int input_width, const int output_height, const int output_width, const int ksize_height, const int ksize_width, const int stride_height, const int stride_width, const int padding_height, const int padding_width, bool adaptive, T1* output_data, T2* mask_data) { for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads; index += blockDim.x * gridDim.x) { int pw = index % output_width; int ph = (index / output_width) % output_height; int c = (index / output_width / output_height) % channels; int batch_idx = index / output_width / output_height / channels; int hstart, hend; int wstart, wend; if (adaptive) { hstart = AdaptStartIndex(ph, input_height, output_height); hend = AdaptEndIndex(ph, input_height, output_height); wstart = AdaptStartIndex(pw, input_width, output_width); wend = AdaptEndIndex(pw, input_width, output_width); } else { hstart = ph * stride_height - padding_height; hend = min(hstart + ksize_height, input_height); hstart = max(hstart, 0); wstart = pw * stride_width - padding_width; wend = min(wstart + ksize_width, input_width); wstart = max(wstart, 0); } input_data += (batch_idx * channels + c) * input_height * input_width; T1 ele = -FLT_MAX; int max_index = -1; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { int input_index = h * input_width + w; if (ele < input_data[input_index]) { max_index = input_index; ele = input_data[input_index]; } } } output_data[index] = ele; mask_data[index] = max_index; } } template <typename T1, typename T2> __global__ void KernelMaxPool2DWithIdxGrad( const int nthreads, const T1* output_grad, const T2* mask_data, const int channels, const int input_height, const int input_width, const int output_height, const int output_width, const int ksize_height, const int ksize_width, const int stride_height, const int stride_width, const int padding_height, const int padding_width, bool adaptive, T1* input_grad) { for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads; index += blockDim.x * gridDim.x) { int w_offset = index % input_width; int h_offset = (index / input_width) % input_height; int offsetC = (index / input_width / input_height) % channels; int batch_idx = index / input_width / input_height / channels; int phstart, phend; int pwstart, pwend; if (adaptive) { phstart = h_offset * output_height / input_height; phend = min((h_offset + 1) * output_height / input_height + 1, output_height); pwstart = w_offset * output_width / input_width; pwend = min((w_offset + 1) * output_width / input_width + 1, output_width); } else { phstart = (h_offset + padding_height < ksize_height) ? 0 : (h_offset + padding_height - ksize_height) / stride_height + 1; pwstart = (w_offset + padding_width < ksize_width) ? 0 : (w_offset + padding_width - ksize_width) / stride_width + 1; phend = min((h_offset + padding_height) / stride_height + 1, output_height); pwend = min((w_offset + padding_width) / stride_width + 1, output_width); } T1 gradient = 0; int input_current_featuremap_idx = h_offset * input_width + w_offset; int output_idx = (batch_idx * channels + offsetC) * output_height * output_width; mask_data += output_idx; output_grad += output_idx; for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { if (mask_data[ph * output_width + pw] == input_current_featuremap_idx) gradient += output_grad[ph * output_width + pw]; } } input_grad[index] = gradient; } } /* * All tensors are in NCHW format. * Ksize, strides, paddings are two elements. These two elements represent * height and width, respectively. */ template <typename T1, typename T2> class MaxPool2dWithIndexFunctor<platform::CUDADeviceContext, T1, T2> { public: void operator()(const platform::CUDADeviceContext& context, const framework::Tensor& input, const std::vector<int>& ksize, const std::vector<int>& strides, const std::vector<int>& paddings, bool adaptive, framework::Tensor* output, framework::Tensor* mask) { const int batch_size = input.dims()[0]; const int input_channels = input.dims()[1]; const int input_height = input.dims()[2]; const int input_width = input.dims()[3]; const int output_channels = output->dims()[1]; const int output_height = output->dims()[2]; const int output_width = output->dims()[3]; const int ksize_height = ksize[0]; const int ksize_width = ksize[1]; const int stride_height = strides[0]; const int stride_width = strides[1]; const int padding_height = paddings[0]; const int padding_width = paddings[1]; const T1* input_data = input.data<T1>(); T1* output_data = output->mutable_data<T1>(context.GetPlace()); T2* mask_data = mask->mutable_data<T2>(context.GetPlace()); int nthreads = batch_size * output_channels * output_height * output_width; int blocks = (nthreads + 1024 - 1) / 1024; dim3 threads(1024, 1); dim3 grid(blocks, 1); KernelMaxPool2dWithIdx<T1, T2><<<grid, threads, 0, context.stream()>>>( nthreads, input_data, input_channels, input_height, input_width, output_height, output_width, ksize_height, ksize_width, stride_height, stride_width, padding_height, padding_width, adaptive, output_data, mask_data); } }; /* * All tensors are in NCHW format. * Ksize, strides, paddings are two elements. These two elements represent * height and width, respectively. */ template <typename T1, typename T2> class MaxPool2dWithIndexGradFunctor<platform::CUDADeviceContext, T1, T2> { public: void operator()(const platform::CUDADeviceContext& context, const framework::Tensor& output_grad, const framework::Tensor& mask, const std::vector<int>& ksize, const std::vector<int>& strides, const std::vector<int>& paddings, bool adaptive, framework::Tensor* input_grad) { const int batch_size = input_grad->dims()[0]; const int input_channels = input_grad->dims()[1]; const int input_height = input_grad->dims()[2]; const int input_width = input_grad->dims()[3]; const int output_height = output_grad.dims()[2]; const int output_width = output_grad.dims()[3]; const int ksize_height = ksize[0]; const int ksize_width = ksize[1]; const int stride_height = strides[0]; const int stride_width = strides[1]; const int padding_height = paddings[0]; const int padding_width = paddings[1]; const T2* mask_data = mask.data<T2>(); const T1* output_grad_data = output_grad.data<T1>(); T1* input_grad_data = input_grad->mutable_data<T1>(context.GetPlace()); int nthreads = batch_size * input_channels * input_height * input_width; int blocks = (nthreads + 1024 - 1) / 1024; dim3 threads(1024, 1); dim3 grid(blocks, 1); KernelMaxPool2DWithIdxGrad<T1, T2><<<grid, threads, 0, context.stream()>>>( nthreads, output_grad_data, mask_data, input_channels, input_height, input_width, output_height, output_width, ksize_height, ksize_width, stride_height, stride_width, padding_height, padding_width, adaptive, input_grad_data); } }; template class MaxPool2dWithIndexFunctor<platform::CUDADeviceContext, float, int>; template class MaxPool2dWithIndexGradFunctor<platform::CUDADeviceContext, float, int>; template class MaxPool2dWithIndexFunctor<platform::CUDADeviceContext, double, int>; template class MaxPool2dWithIndexGradFunctor<platform::CUDADeviceContext, double, int>; template <typename T1, typename T2> __global__ void KernelMaxPool3DWithIdx( const int nthreads, const T1* input_data, const int channels, const int input_depth, const int input_height, const int input_width, const int output_depth, const int output_height, const int output_width, const int ksize_depth, const int ksize_height, const int ksize_width, const int stride_depth, const int stride_height, const int stride_width, const int padding_depth, const int padding_height, const int padding_width, bool adaptive, T1* output_data, T2* mask_data) { for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads; index += blockDim.x * gridDim.x) { int pw = index % output_width; int ph = (index / output_width) % output_height; int pd = (index / output_width / output_height) % output_depth; int c = (index / output_width / output_height / output_depth) % channels; int batch_idx = index / output_width / output_height / output_depth / channels; int dstart, dend; int hstart, hend; int wstart, wend; if (adaptive) { dstart = AdaptStartIndex(pd, input_depth, output_depth); dend = AdaptEndIndex(pd, input_depth, output_depth); hstart = AdaptStartIndex(ph, input_height, output_height); hend = AdaptEndIndex(ph, input_height, output_height); wstart = AdaptStartIndex(pw, input_width, output_width); wend = AdaptEndIndex(pw, input_width, output_width); } else { dstart = pd * stride_depth - padding_depth; hstart = ph * stride_height - padding_height; wstart = pw * stride_width - padding_width; dend = min(dstart + ksize_depth, input_depth); hend = min(hstart + ksize_height, input_height); wend = min(wstart + ksize_width, input_width); dstart = max(dstart, 0); hstart = max(hstart, 0); wstart = max(wstart, 0); } T1 ele = -FLT_MAX; int max_index = -1; input_data += (batch_idx * channels + c) * input_depth * input_height * input_width; for (int d = dstart; d < dend; ++d) { for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { if (ele < input_data[(d * input_height + h) * input_width + w]) { max_index = (d * input_height + h) * input_width + w; ele = input_data[max_index]; } } } } output_data[index] = ele; mask_data[index] = max_index; } } template <typename T1, typename T2> __global__ void KernelMaxPool3DWithIdxGrad( const int nthreads, const T1* output_grad, const T2* mask, const int channels, const int input_depth, const int input_height, const int input_width, const int output_depth, const int output_height, const int output_width, const int ksize_depth, const int ksize_height, const int ksize_width, const int stride_depth, const int stride_height, const int stride_width, const int padding_depth, const int padding_height, const int padding_width, bool adaptive, T1* input_grad) { for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads; index += blockDim.x * gridDim.x) { int w_offset = index % input_width; int h_offset = (index / input_width) % input_height; int d_offset = (index / input_width / input_height) % input_depth; int offsetC = (index / input_width / input_height / input_depth) % channels; int batch_idx = index / input_width / input_height / input_depth / channels; int pdstart, pdend; int phstart, phend; int pwstart, pwend; if (adaptive) { pdstart = d_offset * output_depth / input_depth; pdend = min((d_offset + 1) * output_depth / input_depth + 1, output_depth); phstart = h_offset * output_height / input_height; phend = min((h_offset + 1) * output_height / input_height + 1, output_height); pwstart = w_offset * output_width / input_width; pwend = min((w_offset + 1) * output_width / input_width + 1, output_width); } else { pdstart = (d_offset + padding_depth < ksize_depth) ? 0 : (d_offset + padding_depth - ksize_depth) / stride_depth + 1; phstart = (h_offset + padding_height < ksize_height) ? 0 : (h_offset + padding_height - ksize_height) / stride_height + 1; pwstart = (w_offset + padding_width < ksize_width) ? 0 : (w_offset + padding_width - ksize_width) / stride_width + 1; pdend = min((d_offset + padding_depth) / stride_depth + 1, output_depth); phend = min((h_offset + padding_height) / stride_height + 1, output_height); pwend = min((w_offset + padding_width) / stride_width + 1, output_width); } T1 gradient = 0; int input_current_feature_map_idx = (d_offset * input_height + h_offset) * input_width + w_offset; int output_idx = (batch_idx * channels + offsetC) * output_depth * output_height * output_width; mask += output_idx; output_grad += output_idx; for (int pd = pdstart; pd < pdend; ++pd) { for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { if (mask[(pd * output_height + ph) * output_width + pw] == input_current_feature_map_idx) gradient += output_grad[(pd * output_height + ph) * output_width + pw]; } } } input_grad[index] = gradient; } } /* * All tensors are in NCDHW format. * Ksize, strides, paddings are three elements. These three elements represent * depth, height and width, respectively. */ template <typename T1, typename T2> class MaxPool3dWithIndexFunctor<platform::CUDADeviceContext, T1, T2> { public: void operator()(const platform::CUDADeviceContext& context, const framework::Tensor& input, const std::vector<int>& ksize, const std::vector<int>& strides, const std::vector<int>& paddings, bool adaptive, framework::Tensor* output, framework::Tensor* mask) { const int batch_size = input.dims()[0]; const int input_channels = input.dims()[1]; const int input_depth = input.dims()[2]; const int input_height = input.dims()[3]; const int input_width = input.dims()[4]; const int output_channels = output->dims()[1]; const int output_depth = output->dims()[2]; const int output_height = output->dims()[3]; const int output_width = output->dims()[4]; const int ksize_depth = ksize[0]; const int ksize_height = ksize[1]; const int ksize_width = ksize[2]; const int stride_depth = strides[0]; const int stride_height = strides[1]; const int stride_width = strides[2]; const int padding_depth = paddings[0]; const int padding_height = paddings[1]; const int padding_width = paddings[2]; const T1* input_data = input.data<T1>(); T1* output_data = output->mutable_data<T1>(context.GetPlace()); T2* mask_data = mask->mutable_data<T2>(context.GetPlace()); int nthreads = batch_size * output_channels * output_depth * output_height * output_width; int blocks = (nthreads + 1024 - 1) / 1024; dim3 threads(1024, 1); dim3 grid(blocks, 1); KernelMaxPool3DWithIdx<T1, T2><<<grid, threads, 0, context.stream()>>>( nthreads, input_data, input_channels, input_depth, input_height, input_width, output_depth, output_height, output_width, ksize_depth, ksize_height, ksize_width, stride_depth, stride_height, stride_width, padding_depth, padding_height, padding_width, adaptive, output_data, mask_data); } }; /* * All tensors are in NCDHW format. * Ksize, strides, paddings are three elements. These three elements represent * depth, height and width, respectively. */ template <typename T1, typename T2> class MaxPool3dWithIndexGradFunctor<platform::CUDADeviceContext, T1, T2> { public: void operator()(const platform::CUDADeviceContext& context, const framework::Tensor& output_grad, const framework::Tensor& mask, const std::vector<int>& ksize, const std::vector<int>& strides, const std::vector<int>& paddings, bool adaptive, framework::Tensor* input_grad) { const int batch_size = input_grad->dims()[0]; const int input_channels = input_grad->dims()[1]; const int input_depth = input_grad->dims()[2]; const int input_height = input_grad->dims()[3]; const int input_width = input_grad->dims()[4]; const int output_depth = output_grad.dims()[2]; const int output_height = output_grad.dims()[3]; const int output_width = output_grad.dims()[4]; const int ksize_depth = ksize[0]; const int ksize_height = ksize[1]; const int ksize_width = ksize[2]; const int stride_depth = strides[0]; const int stride_height = strides[1]; const int stride_width = strides[2]; const int padding_depth = paddings[0]; const int padding_height = paddings[1]; const int padding_width = paddings[2]; const T1* output_grad_data = output_grad.data<T1>(); const T2* mask_data = mask.data<T2>(); T1* input_grad_data = input_grad->mutable_data<T1>(context.GetPlace()); int nthreads = batch_size * input_channels * input_depth * input_height * input_width; int blocks = (nthreads + 1024 - 1) / 1024; dim3 threads(1024, 1); dim3 grid(blocks, 1); KernelMaxPool3DWithIdxGrad<T1, T2><<<grid, threads, 0, context.stream()>>>( nthreads, output_grad_data, mask_data, input_channels, input_depth, input_height, input_width, output_depth, output_height, output_width, ksize_depth, ksize_height, ksize_width, stride_depth, stride_height, stride_width, padding_depth, padding_height, padding_width, adaptive, input_grad_data); } }; template class MaxPool3dWithIndexFunctor<platform::CUDADeviceContext, float, int>; template class MaxPool3dWithIndexGradFunctor<platform::CUDADeviceContext, float, int>; template class MaxPool3dWithIndexFunctor<platform::CUDADeviceContext, double, int>; template class MaxPool3dWithIndexGradFunctor<platform::CUDADeviceContext, double, int>; } // namespace math } // namespace operators } // namespace paddle
b0c8a40506d0857bf222e6d5af4a5f8e28e6ae52.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <fstream> #include <sstream> #include <cstdlib> #include <algorithm> #include <ctime> #include <thrust/device_vector.h> #include "features_calculate.hpp" #include "matrix_mul.cuh" const int _c_fea = 2; features_calculate::features_calculate(std::string& u_file, int u_size, std::string& i_file, int i_size, std::string& in_f_list, int f_size) : features_size(f_size), items_size(i_size) { std::cerr<< "Read user's matrix: " << std::endl; users.assign(u_size * features_size, 0); read_matrix(u_file, users, u_id2pos, u_pos2id); std::cerr<< "Read items's matrix: " << std::endl; items.assign(i_size * features_size, 0); read_matrix(i_file, items, i_id2pos, i_pos2id); std::cerr<< "Read cluster matrix: " << std::endl; read_data_template(in_f_list); std::cerr<< "Normalize items: " << std::endl; clock_t time = clock(); normalize_items(); hipDeviceSynchronize(); time = clock() - time; std::cerr<< "Normalize time: " << (float)time / CLOCKS_PER_SEC << std::endl; } void features_calculate::read_matrix(const std::string& file_name, std::vector<float>& matrix, std::vector<int>& ids, std::vector<int>& pos) { std::ifstream m_stream(file_name.c_str()); std::string line; char const tab_delim = '\t'; size_t i=0; while ( getline(m_stream, line) && ((i + 1) * features_size - 1 < matrix.size())) { std::istringstream line_stream(line); std::string value; getline(line_stream, value, tab_delim); ids.push_back(atoi(value.c_str())); for (int j = 0; j < features_size; j++) { getline(line_stream, value, tab_delim); matrix[i * features_size + j] = (float)atof(value.c_str()); } if( i % 10000 == 0) std::cerr << i << "\r"; i++; } m_stream.close(); int max_id = *std::max_element(ids.begin(), ids.end()); //pos.assign(ids.size(), 0); pos.assign(max_id, 0); for( i=0; i < ids.size(); i++) { pos[ids[i]] = i; } } void features_calculate::read_data_template(const std::string& file_name ) { std::ifstream m_stream(file_name.c_str()); std::string line; char const tab_delim = '\t'; size_t i=0; while ( getline(m_stream, line) && (i < items_size)) { std::istringstream line_stream(line); std::string value; getline(line_stream, value, tab_delim); int user = atoi(value.c_str()); getline(line_stream, value, tab_delim); int rgroup = atoi(value.c_str()); getline(line_stream, value, tab_delim); int group = atoi(value.c_str()); __key k = std::make_pair(user, rgroup); __value::iterator it = data[k].find(group); if( it == data[k].end() ) { data[k][group].assign(_c_fea, 0); } if( i % 10000 == 0) std::cerr << i << "\r"; i++; } } void features_calculate::compute_features( ) { std::map<__key, __value >::iterator it = data.begin(); for( ; it != data.end() ; ++it) { int user = (*it).first.first; int rgroup = (*it).first.second; __value& m = (*it).second; __value::iterator git = m.begin(); for( ; git != m.end() ; ++git) { int group = (*git).first; (*git).second[0] = calculate_p(user, group); (*git).second[1] = calculate_sim(rgroup, group); } } } float features_calculate::calculate_p( int u_idx, int group_idx) { float sum = 0; for (int j = 0; j < features_size; j++) { if ((u_idx * features_size + j < users.size()) && (group_idx * features_size + j < items.size())) sum += users[ u_idx * features_size + j] * items[group_idx * features_size + j]; } return sum; } float features_calculate::calculate_sim( int rgroup_idx, int group_idx) { float sum = 0; for (int j = 0; j < features_size; j++) { if ((rgroup_idx * features_size + j < norm_items.size()) && (group_idx * features_size + j < norm_items.size())) sum += norm_items[ rgroup_idx * features_size + j] * norm_items[group_idx * features_size + j]; } return sum; } /// void features_calculate::normalize_items( ) { hipSetDevice(0); int part1_size = items_size / 2; int part2_size = items_size - part1_size; norm_items = items; thrust::device_vector<float> c_device_part1(norm_items.begin(), norm_items.begin() + part1_size * features_size); dim3 block1(NORM_BLOCK_SIZE, 1); dim3 grid1(1 + part1_size / NORM_BLOCK_SIZE, 1); hipLaunchKernelGGL(( euclidian_normalize), dim3(grid1), dim3(block1), 0, 0, thrust::raw_pointer_cast(&c_device_part1[0]), part1_size, features_size); hipSetDevice(1); thrust::device_vector<float> c_device_part2(norm_items.begin() + part1_size * features_size, norm_items.end()); dim3 block2(NORM_BLOCK_SIZE, 1); dim3 grid2(1 + part2_size / NORM_BLOCK_SIZE, 1); hipLaunchKernelGGL(( euclidian_normalize), dim3(grid2), dim3(block2), 0, 0, thrust::raw_pointer_cast(&c_device_part2[0]), part2_size, features_size); hipDeviceSynchronize(); if ( hipSuccess != hipGetLastError() ) std::cerr << "features_calculate::normalize_items:: !WARN - Cuda error : " << hipGetLastError() << std::endl; thrust::copy( c_device_part2.begin(), c_device_part2.end(), norm_items.begin() + part1_size * features_size ); hipSetDevice(0); hipDeviceSynchronize(); if ( hipSuccess != hipGetLastError() ) std::cerr << "features_calculate::normalize_items:: !WARN - Cuda error : " << hipGetLastError() << std::endl; thrust::copy( c_device_part1.begin(), c_device_part1.end(), norm_items.begin() ); /*thrust::device_vector<float> c_device(norm_items); dim3 block(NORM_BLOCK_SIZE, 1); dim3 grid(1 + i_id2pos.size() / NORM_BLOCK_SIZE, 1); euclidian_normalize<<<grid, block>>>(thrust::raw_pointer_cast(&c_device[0]), i_id2pos.size(), features_size); hipDeviceSynchronize(); if ( hipSuccess != hipGetLastError() ) std::cerr << "features_calculate::normalize_items:: !WARN - Cuda error : " << hipGetLastError() << std::endl; thrust::copy( c_device.begin(),c_device.end(), norm_items.begin() ); for (int i = 0; i < 5; i++) { std::cerr << norm_items[i] << " "; } for (int i = items_size - 1; i > items_size - 6; i--) { std::cerr << norm_items[i] << " "; } std::cerr << std::endl; */ } void features_calculate::serialize( std::ostream& out) { std::map<__key, __value >::iterator it = data.begin(); for( ; it != data.end() ; ++it) { int user = (*it).first.first; int rgroup = (*it).first.second; __value& m = (*it).second; __value::iterator git = m.begin(); for( ; git != m.end() ; ++git) { int group = (*git).first; out << user << "\t" << rgroup << "\t" << group << "\t" << (*git).second[0] << "\t" << (*git).second[1] << std::endl; } } }
b0c8a40506d0857bf222e6d5af4a5f8e28e6ae52.cu
#include <fstream> #include <sstream> #include <cstdlib> #include <algorithm> #include <ctime> #include <thrust/device_vector.h> #include "features_calculate.hpp" #include "matrix_mul.cuh" const int _c_fea = 2; features_calculate::features_calculate(std::string& u_file, int u_size, std::string& i_file, int i_size, std::string& in_f_list, int f_size) : features_size(f_size), items_size(i_size) { std::cerr<< "Read user's matrix: " << std::endl; users.assign(u_size * features_size, 0); read_matrix(u_file, users, u_id2pos, u_pos2id); std::cerr<< "Read items's matrix: " << std::endl; items.assign(i_size * features_size, 0); read_matrix(i_file, items, i_id2pos, i_pos2id); std::cerr<< "Read cluster matrix: " << std::endl; read_data_template(in_f_list); std::cerr<< "Normalize items: " << std::endl; clock_t time = clock(); normalize_items(); cudaDeviceSynchronize(); time = clock() - time; std::cerr<< "Normalize time: " << (float)time / CLOCKS_PER_SEC << std::endl; } void features_calculate::read_matrix(const std::string& file_name, std::vector<float>& matrix, std::vector<int>& ids, std::vector<int>& pos) { std::ifstream m_stream(file_name.c_str()); std::string line; char const tab_delim = '\t'; size_t i=0; while ( getline(m_stream, line) && ((i + 1) * features_size - 1 < matrix.size())) { std::istringstream line_stream(line); std::string value; getline(line_stream, value, tab_delim); ids.push_back(atoi(value.c_str())); for (int j = 0; j < features_size; j++) { getline(line_stream, value, tab_delim); matrix[i * features_size + j] = (float)atof(value.c_str()); } if( i % 10000 == 0) std::cerr << i << "\r"; i++; } m_stream.close(); int max_id = *std::max_element(ids.begin(), ids.end()); //pos.assign(ids.size(), 0); pos.assign(max_id, 0); for( i=0; i < ids.size(); i++) { pos[ids[i]] = i; } } void features_calculate::read_data_template(const std::string& file_name ) { std::ifstream m_stream(file_name.c_str()); std::string line; char const tab_delim = '\t'; size_t i=0; while ( getline(m_stream, line) && (i < items_size)) { std::istringstream line_stream(line); std::string value; getline(line_stream, value, tab_delim); int user = atoi(value.c_str()); getline(line_stream, value, tab_delim); int rgroup = atoi(value.c_str()); getline(line_stream, value, tab_delim); int group = atoi(value.c_str()); __key k = std::make_pair(user, rgroup); __value::iterator it = data[k].find(group); if( it == data[k].end() ) { data[k][group].assign(_c_fea, 0); } if( i % 10000 == 0) std::cerr << i << "\r"; i++; } } void features_calculate::compute_features( ) { std::map<__key, __value >::iterator it = data.begin(); for( ; it != data.end() ; ++it) { int user = (*it).first.first; int rgroup = (*it).first.second; __value& m = (*it).second; __value::iterator git = m.begin(); for( ; git != m.end() ; ++git) { int group = (*git).first; (*git).second[0] = calculate_p(user, group); (*git).second[1] = calculate_sim(rgroup, group); } } } float features_calculate::calculate_p( int u_idx, int group_idx) { float sum = 0; for (int j = 0; j < features_size; j++) { if ((u_idx * features_size + j < users.size()) && (group_idx * features_size + j < items.size())) sum += users[ u_idx * features_size + j] * items[group_idx * features_size + j]; } return sum; } float features_calculate::calculate_sim( int rgroup_idx, int group_idx) { float sum = 0; for (int j = 0; j < features_size; j++) { if ((rgroup_idx * features_size + j < norm_items.size()) && (group_idx * features_size + j < norm_items.size())) sum += norm_items[ rgroup_idx * features_size + j] * norm_items[group_idx * features_size + j]; } return sum; } /// void features_calculate::normalize_items( ) { cudaSetDevice(0); int part1_size = items_size / 2; int part2_size = items_size - part1_size; norm_items = items; thrust::device_vector<float> c_device_part1(norm_items.begin(), norm_items.begin() + part1_size * features_size); dim3 block1(NORM_BLOCK_SIZE, 1); dim3 grid1(1 + part1_size / NORM_BLOCK_SIZE, 1); euclidian_normalize<<<grid1, block1>>>(thrust::raw_pointer_cast(&c_device_part1[0]), part1_size, features_size); cudaSetDevice(1); thrust::device_vector<float> c_device_part2(norm_items.begin() + part1_size * features_size, norm_items.end()); dim3 block2(NORM_BLOCK_SIZE, 1); dim3 grid2(1 + part2_size / NORM_BLOCK_SIZE, 1); euclidian_normalize<<<grid2, block2>>>(thrust::raw_pointer_cast(&c_device_part2[0]), part2_size, features_size); cudaDeviceSynchronize(); if ( cudaSuccess != cudaGetLastError() ) std::cerr << "features_calculate::normalize_items:: !WARN - Cuda error : " << cudaGetLastError() << std::endl; thrust::copy( c_device_part2.begin(), c_device_part2.end(), norm_items.begin() + part1_size * features_size ); cudaSetDevice(0); cudaDeviceSynchronize(); if ( cudaSuccess != cudaGetLastError() ) std::cerr << "features_calculate::normalize_items:: !WARN - Cuda error : " << cudaGetLastError() << std::endl; thrust::copy( c_device_part1.begin(), c_device_part1.end(), norm_items.begin() ); /*thrust::device_vector<float> c_device(norm_items); dim3 block(NORM_BLOCK_SIZE, 1); dim3 grid(1 + i_id2pos.size() / NORM_BLOCK_SIZE, 1); euclidian_normalize<<<grid, block>>>(thrust::raw_pointer_cast(&c_device[0]), i_id2pos.size(), features_size); cudaDeviceSynchronize(); if ( cudaSuccess != cudaGetLastError() ) std::cerr << "features_calculate::normalize_items:: !WARN - Cuda error : " << cudaGetLastError() << std::endl; thrust::copy( c_device.begin(),c_device.end(), norm_items.begin() ); for (int i = 0; i < 5; i++) { std::cerr << norm_items[i] << " "; } for (int i = items_size - 1; i > items_size - 6; i--) { std::cerr << norm_items[i] << " "; } std::cerr << std::endl; */ } void features_calculate::serialize( std::ostream& out) { std::map<__key, __value >::iterator it = data.begin(); for( ; it != data.end() ; ++it) { int user = (*it).first.first; int rgroup = (*it).first.second; __value& m = (*it).second; __value::iterator git = m.begin(); for( ; git != m.end() ; ++git) { int group = (*git).first; out << user << "\t" << rgroup << "\t" << group << "\t" << (*git).second[0] << "\t" << (*git).second[1] << std::endl; } } }
73426e41e6e2178343e03c6773ab5c9966198ad0.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <hip/hip_runtime.h> #include "header.h" #include "util.h" global_const_t h_const; global_const_t *d_const_ptr; __constant__ global_const_t d_const; __constant__ kernel_const_t kc; int main(int argc, char *argv[]){ //! //! Variable Declaration //! char *dest; FILE *fin, *fout; int i, l; double dt, total_time; double ****U, ****Unew, ****Q, ****D, ****F; double *d_U, *d_Unew, *d_Q, *d_D, *d_F; hipGetSymbolAddress((void **) &d_const_ptr, d_const); hipGetSymbolAddress((void **) &(h_const.kc), kc); //! //! Prepare Global Constants //! read_configurations(h_const, argc, argv); copy_configurations(h_const, d_const_ptr); //! //! Allocation //! allocate_variables(U, Unew, Q, D, F, d_U, d_Unew, d_Q, d_D, d_F); //! //! Advance //! fin = fopen(h_const.input_file_name, "r"); FOR(l, 0, h_const.nc) read_3D(fin, U, h_const.dim_g, l); fclose(fin); // init_data(h_const, U); total_time = -get_time(); gpu_copy_from_host_4D(d_U, U, h_const.pitch_g, h_const.nc); FOR(i, 0, h_const.nsteps) gpu_advance(h_const, d_const_ptr, d_U, d_Unew, d_Q, d_D, d_F, dt); gpu_copy_to_host_4D(U, d_U, h_const.pitch_g, h_const.nc); total_time += get_time(); printf("Total time: %lf\n", total_time); fout = fopen("output", "w"); fprintf(fout, "%d\n", h_const.nc); fprintf(fout, "%d %d %d\n", h_const.dim_g[0], h_const.dim_g[1], h_const.dim_g[2]); print_4D(fout, U, h_const.dim_g, h_const.nc); fclose(fout); //! //! Free Allocations //! free_variables(U, Unew, Q, D, F, d_U, d_Unew, d_Q, d_D, d_F); return 0; }
73426e41e6e2178343e03c6773ab5c9966198ad0.cu
#include <stdio.h> #include <cuda.h> #include "header.h" #include "util.h" global_const_t h_const; global_const_t *d_const_ptr; __constant__ global_const_t d_const; __constant__ kernel_const_t kc; int main(int argc, char *argv[]){ //! //! Variable Declaration //! char *dest; FILE *fin, *fout; int i, l; double dt, total_time; double ****U, ****Unew, ****Q, ****D, ****F; double *d_U, *d_Unew, *d_Q, *d_D, *d_F; cudaGetSymbolAddress((void **) &d_const_ptr, d_const); cudaGetSymbolAddress((void **) &(h_const.kc), kc); //! //! Prepare Global Constants //! read_configurations(h_const, argc, argv); copy_configurations(h_const, d_const_ptr); //! //! Allocation //! allocate_variables(U, Unew, Q, D, F, d_U, d_Unew, d_Q, d_D, d_F); //! //! Advance //! fin = fopen(h_const.input_file_name, "r"); FOR(l, 0, h_const.nc) read_3D(fin, U, h_const.dim_g, l); fclose(fin); // init_data(h_const, U); total_time = -get_time(); gpu_copy_from_host_4D(d_U, U, h_const.pitch_g, h_const.nc); FOR(i, 0, h_const.nsteps) gpu_advance(h_const, d_const_ptr, d_U, d_Unew, d_Q, d_D, d_F, dt); gpu_copy_to_host_4D(U, d_U, h_const.pitch_g, h_const.nc); total_time += get_time(); printf("Total time: %lf\n", total_time); fout = fopen("output", "w"); fprintf(fout, "%d\n", h_const.nc); fprintf(fout, "%d %d %d\n", h_const.dim_g[0], h_const.dim_g[1], h_const.dim_g[2]); print_4D(fout, U, h_const.dim_g, h_const.nc); fclose(fout); //! //! Free Allocations //! free_variables(U, Unew, Q, D, F, d_U, d_Unew, d_Q, d_D, d_F); return 0; }
124b06c94f1baf2b4857c6eafab29766e8016c75.hip
// !!! This is a file automatically generated by hipify!!! /* Collatz code for CS 4380 / CS 5351 Copyright (c) 2021 Texas State University. All rights reserved. Redistribution in source or binary form, with or without modification, is *not* permitted. Use in source or binary form, with or without modification, is only permitted for academic use in CS 4380 or CS 5351 at Texas State University. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. Author: Martin Burtscher Bryan Valenzuela Connor Steed */ #include <cstdio> #include <hip/hip_runtime.h> #include <algorithm> #include <sys/time.h> static const int ThreadsPerBlock = 512; static __global__ void collatz(const long start, const long bound, const long step, int* const maxlen) { const long i = threadIdx.x + blockIdx.x * (long)blockDim.x + start; if( (i - start) % step == 0) if (i < bound){ long val = i; int len = 1; while (val != 1) { len++; if ((val % 2) == 0) { val /= 2; // even } else { val = 3 * val + 1; // odd } } atomicMax(maxlen, len); } } static void CheckCuda() { hipError_t e; hipDeviceSynchronize(); if (hipSuccess != (e = hipGetLastError())) { fprintf(stderr, "CUDA error %d: %s\n", e, hipGetErrorString(e)); exit(-1); } } int main(int argc, char *argv[]) { printf("Collatz v1.5\n"); // check command line if (argc != 4) {fprintf(stderr, "USAGE: %s start bound step\n", argv[0]); exit(-1);} const long start = atol(argv[1]); const long bound = atol(argv[2]); const long step = atol(argv[3]); if (start < 1) {fprintf(stderr, "ERROR: start value must be at least 1\n"); exit(-1);} if (bound <= start) {fprintf(stderr, "ERROR: bound must be larger than start\n"); exit(-1);} if (step < 1) {fprintf(stderr, "ERROR: step size must be at least 1\n"); exit(-1);} printf("start value: %ld\n", start); printf("upper bound: %ld\n", bound); printf("step size: %ld\n", step); int maxlen = 0; int size = sizeof(int); int* d_maxlen; if (hipSuccess != hipMalloc((void **)&d_maxlen, size)) {fprintf(stderr, "ERROR: could not allocate memory\n"); exit(-1);} if (hipSuccess != hipMemcpy(d_maxlen, &maxlen, sizeof(int), hipMemcpyHostToDevice)) {fprintf(stderr, "ERROR: copying to device failed\n"); exit(-1);} // start time timeval beg, end; gettimeofday(&beg, NULL); // execute timed code hipLaunchKernelGGL(( collatz), dim3((bound + ThreadsPerBlock - 1) / ThreadsPerBlock), dim3(ThreadsPerBlock), 0, 0, start, bound, step, d_maxlen); hipDeviceSynchronize(); // end time gettimeofday(&end, NULL); const double runtime = end.tv_sec - beg.tv_sec + (end.tv_usec - beg.tv_usec) / 1000000.0; printf("compute time: %.6f s\n", runtime); CheckCuda(); if (hipSuccess != hipMemcpy(&maxlen, d_maxlen, sizeof(int), hipMemcpyDeviceToHost)) {fprintf(stderr, "ERROR: copying from device failed\n"); exit(-1);} // print result printf("maximum sequence length: %d elements\n", maxlen); hipFree(d_maxlen); return 0; }
124b06c94f1baf2b4857c6eafab29766e8016c75.cu
/* Collatz code for CS 4380 / CS 5351 Copyright (c) 2021 Texas State University. All rights reserved. Redistribution in source or binary form, with or without modification, is *not* permitted. Use in source or binary form, with or without modification, is only permitted for academic use in CS 4380 or CS 5351 at Texas State University. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. Author: Martin Burtscher Bryan Valenzuela Connor Steed */ #include <cstdio> #include <cuda.h> #include <algorithm> #include <sys/time.h> static const int ThreadsPerBlock = 512; static __global__ void collatz(const long start, const long bound, const long step, int* const maxlen) { const long i = threadIdx.x + blockIdx.x * (long)blockDim.x + start; if( (i - start) % step == 0) if (i < bound){ long val = i; int len = 1; while (val != 1) { len++; if ((val % 2) == 0) { val /= 2; // even } else { val = 3 * val + 1; // odd } } atomicMax(maxlen, len); } } static void CheckCuda() { cudaError_t e; cudaDeviceSynchronize(); if (cudaSuccess != (e = cudaGetLastError())) { fprintf(stderr, "CUDA error %d: %s\n", e, cudaGetErrorString(e)); exit(-1); } } int main(int argc, char *argv[]) { printf("Collatz v1.5\n"); // check command line if (argc != 4) {fprintf(stderr, "USAGE: %s start bound step\n", argv[0]); exit(-1);} const long start = atol(argv[1]); const long bound = atol(argv[2]); const long step = atol(argv[3]); if (start < 1) {fprintf(stderr, "ERROR: start value must be at least 1\n"); exit(-1);} if (bound <= start) {fprintf(stderr, "ERROR: bound must be larger than start\n"); exit(-1);} if (step < 1) {fprintf(stderr, "ERROR: step size must be at least 1\n"); exit(-1);} printf("start value: %ld\n", start); printf("upper bound: %ld\n", bound); printf("step size: %ld\n", step); int maxlen = 0; int size = sizeof(int); int* d_maxlen; if (cudaSuccess != cudaMalloc((void **)&d_maxlen, size)) {fprintf(stderr, "ERROR: could not allocate memory\n"); exit(-1);} if (cudaSuccess != cudaMemcpy(d_maxlen, &maxlen, sizeof(int), cudaMemcpyHostToDevice)) {fprintf(stderr, "ERROR: copying to device failed\n"); exit(-1);} // start time timeval beg, end; gettimeofday(&beg, NULL); // execute timed code collatz<<<(bound + ThreadsPerBlock - 1) / ThreadsPerBlock, ThreadsPerBlock>>>(start, bound, step, d_maxlen); cudaDeviceSynchronize(); // end time gettimeofday(&end, NULL); const double runtime = end.tv_sec - beg.tv_sec + (end.tv_usec - beg.tv_usec) / 1000000.0; printf("compute time: %.6f s\n", runtime); CheckCuda(); if (cudaSuccess != cudaMemcpy(&maxlen, d_maxlen, sizeof(int), cudaMemcpyDeviceToHost)) {fprintf(stderr, "ERROR: copying from device failed\n"); exit(-1);} // print result printf("maximum sequence length: %d elements\n", maxlen); cudaFree(d_maxlen); return 0; }
a968fe7846cff8eb7ba17e2d1f6f9cd8c20528b5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 1.6.1) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date January 2015 @generated from zlacpy_cnjg.cu normal z -> c, Fri Jan 30 19:00:08 2015 */ #include "common_magma.h" #define BLOCK_SIZE 64 /********************************************************* * * SWAP BLAS: permute to set of N elements * ********************************************************/ /* * First version: line per line */ typedef struct { magmaFloatComplex *A1; magmaFloatComplex *A2; int n, lda1, lda2; } magmagpu_clacpy_cnjg_params_t; __global__ void magmagpu_clacpy_cnjg( magmagpu_clacpy_cnjg_params_t params ) { unsigned int x = threadIdx.x + blockDim.x*blockIdx.x; unsigned int offset1 = x*params.lda1; unsigned int offset2 = x*params.lda2; if( x < params.n ) { magmaFloatComplex *A1 = params.A1 + offset1; magmaFloatComplex *A2 = params.A2 + offset2; *A2 = MAGMA_C_CNJG(*A1); } } extern "C" void magmablas_clacpy_cnjg_q( magma_int_t n, magmaFloatComplex *dA1, magma_int_t lda1, magmaFloatComplex *dA2, magma_int_t lda2, magma_queue_t queue ) { int blocksize = 64; dim3 blocks( (n+blocksize-1) / blocksize, 1, 1); magmagpu_clacpy_cnjg_params_t params = { dA1, dA2, n, lda1, lda2 }; hipLaunchKernelGGL(( magmagpu_clacpy_cnjg), dim3(blocks), dim3(blocksize), 0, queue , params ); } extern "C" void magmablas_clacpy_cnjg( magma_int_t n, magmaFloatComplex *dA1, magma_int_t lda1, magmaFloatComplex *dA2, magma_int_t lda2) { magmablas_clacpy_cnjg_q( n, dA1, lda1, dA2, lda2, magma_stream ); }
a968fe7846cff8eb7ba17e2d1f6f9cd8c20528b5.cu
/* -- MAGMA (version 1.6.1) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date January 2015 @generated from zlacpy_cnjg.cu normal z -> c, Fri Jan 30 19:00:08 2015 */ #include "common_magma.h" #define BLOCK_SIZE 64 /********************************************************* * * SWAP BLAS: permute to set of N elements * ********************************************************/ /* * First version: line per line */ typedef struct { magmaFloatComplex *A1; magmaFloatComplex *A2; int n, lda1, lda2; } magmagpu_clacpy_cnjg_params_t; __global__ void magmagpu_clacpy_cnjg( magmagpu_clacpy_cnjg_params_t params ) { unsigned int x = threadIdx.x + blockDim.x*blockIdx.x; unsigned int offset1 = x*params.lda1; unsigned int offset2 = x*params.lda2; if( x < params.n ) { magmaFloatComplex *A1 = params.A1 + offset1; magmaFloatComplex *A2 = params.A2 + offset2; *A2 = MAGMA_C_CNJG(*A1); } } extern "C" void magmablas_clacpy_cnjg_q( magma_int_t n, magmaFloatComplex *dA1, magma_int_t lda1, magmaFloatComplex *dA2, magma_int_t lda2, magma_queue_t queue ) { int blocksize = 64; dim3 blocks( (n+blocksize-1) / blocksize, 1, 1); magmagpu_clacpy_cnjg_params_t params = { dA1, dA2, n, lda1, lda2 }; magmagpu_clacpy_cnjg<<< blocks, blocksize, 0, queue >>>( params ); } extern "C" void magmablas_clacpy_cnjg( magma_int_t n, magmaFloatComplex *dA1, magma_int_t lda1, magmaFloatComplex *dA2, magma_int_t lda2) { magmablas_clacpy_cnjg_q( n, dA1, lda1, dA2, lda2, magma_stream ); }
af45bb0a1952f0bdcaf5e82328e50c6844e3689f.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <time.h> #include <hip/hip_runtime.h> #define GRID_SIZE 4 #define BLOCK_SIZE 224 #define MAX_STATIC_SIZE 4096 #define MAX_PROBLEM_NUM 1024 #define CELL_NUM 81 #define ROW_NUM 9 #define NEXT_NUMBER_NUM (1 << ROW_NUM) static int load(char *in_file_path, int *out_number); static int find_valid_number(const int *in_static_number, int in_depth, int in_max_count, int* out_result, int *, int *inout_initial); __constant__ int static_count; __constant__ int next_number[NEXT_NUMBER_NUM]; __global__ void solve_sudoku(int *in_static_number, int *out_result, int *out_count); int main(int argc, char** argv){ int host_static_number[MAX_PROBLEM_NUM * CELL_NUM]; int result[MAX_PROBLEM_NUM * CELL_NUM]; int valid_index; int initial[CELL_NUM]; int answer_num[MAX_PROBLEM_NUM]; int host_next_number[512]; int problem_num; int *device_result; int *device_count; int count; int *device_static_number; int *valid_number; int i, j, k; if (argc < 2 || argc >= 3) { printf("Usage: sudoku_cpu file_path"); return 1; } char *file_path = argv[1]; problem_num = load(file_path, host_static_number); if (problem_num <= 0) { printf("Can't load file %s.", file_path); return 1; } hipMalloc((void**)&device_result, sizeof(int) * CELL_NUM); hipMalloc((void**)&device_count, sizeof(int)); valid_number = (int*)malloc(sizeof(int) * CELL_NUM * MAX_STATIC_SIZE); hipMalloc((void**)&device_static_number, sizeof(int) * CELL_NUM * MAX_STATIC_SIZE); hipError_t error; hipEvent_t start; error = hipEventCreate(&start); if (error != hipSuccess) { printf("failed to craete start event"); exit(EXIT_FAILURE); } hipEvent_t stop; error = hipEventCreate(&stop); if (error != hipSuccess) { printf("failed to crete stop event"); exit(EXIT_FAILURE); } error = hipEventRecord(start, NULL); if (error != hipSuccess) { printf("failed to record start event"); exit(EXIT_FAILURE); } for (i = 0; i < NEXT_NUMBER_NUM; i++) { for (j = 0; j < ROW_NUM + 1; j++) { if (((1 << j) & i) == 0) { host_next_number[i] = j; break; } } } valid_index = -1; for (i = 0; i < problem_num; i++) { answer_num[i] = 0; do { count = find_valid_number(&host_static_number[i * CELL_NUM], 16, MAX_STATIC_SIZE, valid_number, &valid_index, initial); hipMemcpy(device_static_number, valid_number, sizeof(int)* CELL_NUM * count, hipMemcpyHostToDevice); hipMemcpyToSymbol(static_count, &count, sizeof(int)); hipMemcpyToSymbol(next_number, host_next_number, sizeof(host_next_number)); hipMemset(device_result, 0, sizeof(int)* CELL_NUM); hipMemset(device_count, 0, sizeof(int)); dim3 block(BLOCK_SIZE, 1); dim3 grid(GRID_SIZE, 1); solve_sudoku << <grid, block >> >(device_static_number, device_result, device_count); hipDeviceSynchronize(); hipMemcpy(&count, device_count, sizeof(int), hipMemcpyDeviceToHost); if (answer_num[i] == 0 && count > 0) { hipMemcpy(&result[i * CELL_NUM], device_result, sizeof(int)* CELL_NUM, hipMemcpyDeviceToHost); } answer_num[i] += count; } while (valid_index >= 0); } error = hipEventRecord(stop, NULL); if (error != hipSuccess) { printf("failed to record stop event"); exit(EXIT_FAILURE); } error = hipEventSynchronize(stop); if (error != hipSuccess) { printf("failed to synchronize"); exit(EXIT_FAILURE); } float msecTotal = 0.0f; error = hipEventElapsedTime(&msecTotal, start, stop); if (error != hipSuccess) { printf("failed to get elapsed time"); exit(EXIT_FAILURE); } printf("Processing time: %f (msec)\n", msecTotal); for (i = 0; i < problem_num; i++) { printf("%d found\n", answer_num[i]); for (j = 0; j < ROW_NUM; j++) { for (k = 0; k < ROW_NUM; k++) { printf("%d ", result[i * CELL_NUM + j * ROW_NUM + k]); } printf("\n"); } } hipFree(device_result); hipFree(device_count); hipFree(device_static_number); free(valid_number); hipDeviceReset(); } static int load(char *in_file_path, int *out_number) { char buf[1024]; errno_t error; FILE *fp; int size; int i, n; error = fopen_s(&fp, in_file_path, "r"); if (error != 0) { return 0; } size = fread(buf, 1, sizeof(buf), fp); fclose(fp); for (i = 0, n = 0; i < size && n < MAX_PROBLEM_NUM * CELL_NUM; i++) { if (buf[i] >= '1' && buf[i] <= '9') { out_number[n] = buf[i] - '0'; n++; } else if (buf[i] == '-') { out_number[n] = 0; n++; } } return n / CELL_NUM; } static int find_valid_number(const int *in_static_number, int in_depth, int in_max_count, int* out_result, int *inout_index, int *inout_initial) { const int row[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 8, 8, 8, 8, 8, 8, 8, 8, 8 }; const int col[] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 0, 1, 2, 3, 4, 5, 6, 7, 8, 0, 1, 2, 3, 4, 5, 6, 7, 8, 0, 1, 2, 3, 4, 5, 6, 7, 8, 0, 1, 2, 3, 4, 5, 6, 7, 8, 0, 1, 2, 3, 4, 5, 6, 7, 8, 0, 1, 2, 3, 4, 5, 6, 7, 8, 0, 1, 2, 3, 4, 5, 6, 7, 8, 0, 1, 2, 3, 4, 5, 6, 7, 8 }; const int box[] = { 0, 0, 0, 1, 1, 1, 2, 2, 2, 0, 0, 0, 1, 1, 1, 2, 2, 2, 0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 3, 3, 3, 4, 4, 4, 5, 5, 5, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6, 7, 7, 7, 8, 8, 8, 6, 6, 6, 7, 7, 7, 8, 8, 8, 6, 6, 6, 7, 7, 7, 8, 8, 8 }; int row_flag[ROW_NUM] = { 0, 0, 0, 0, 0, 0, 0, 0, 0 }; int col_flag[ROW_NUM] = { 0, 0, 0, 0, 0, 0, 0, 0, 0 }; int box_flag[ROW_NUM] = { 0, 0, 0, 0, 0, 0, 0, 0, 0 }; int empty[CELL_NUM]; int number[CELL_NUM]; int found = 0; int pos; int flag; int index; int empty_num; int i; int *result = out_result; empty_num = 0; for (pos = 0; pos < CELL_NUM; pos++) { if (*inout_index >= 0) { number[pos] = inout_initial[pos]; } else { number[pos] = in_static_number[pos]; } if (number[pos] > 0) { flag = 1 << number[pos]; row_flag[row[pos]] |= flag; col_flag[col[pos]] |= flag; box_flag[box[pos]] |= flag; } if (in_static_number[pos] == 0) { empty[empty_num] = pos; empty_num++; } } if (*inout_index >= 0) { index = *inout_index; pos = empty[index]; flag = 1 << number[pos]; row_flag[row[pos]] ^= flag; col_flag[col[pos]] ^= flag; box_flag[box[pos]] ^= flag; } else { index = 0; } while (1) { pos = empty[index]; for (number[pos]++; number[pos] < ROW_NUM + 1; number[pos]++) { flag = 1 << number[pos]; if ((row_flag[row[pos]] & flag) != 0 || (col_flag[col[pos]] & flag) != 0 || (box_flag[box[pos]] & flag) != 0) { continue; } if (index >= in_depth - 1 || index >= empty_num - 1) { for (i = 0; i < CELL_NUM; i++) { result[i] = number[i]; } found++; result += CELL_NUM; if (found >= in_max_count) { break; } } else { break; } } if (found >= in_max_count) { break; } if (number[pos] < ROW_NUM + 1) { flag = 1 << number[pos]; index++; } else { index--; if (index < 0) { break; } number[pos] = 0; pos = empty[index]; flag = 1 << number[pos]; } row_flag[row[pos]] ^= flag; col_flag[col[pos]] ^= flag; box_flag[box[pos]] ^= flag; } if (index >= 0) { for (pos = 0; pos < CELL_NUM; pos++) { inout_initial[pos] = number[pos]; } } *inout_index = index; return found; } __constant__ int row[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 8, 8, 8, 8, 8, 8, 8, 8, 8 }; __constant__ int col[] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 0, 1, 2, 3, 4, 5, 6, 7, 8, 0, 1, 2, 3, 4, 5, 6, 7, 8, 0, 1, 2, 3, 4, 5, 6, 7, 8, 0, 1, 2, 3, 4, 5, 6, 7, 8, 0, 1, 2, 3, 4, 5, 6, 7, 8, 0, 1, 2, 3, 4, 5, 6, 7, 8, 0, 1, 2, 3, 4, 5, 6, 7, 8, 0, 1, 2, 3, 4, 5, 6, 7, 8 }; __constant__ int box[] = { 0, 0, 0, 1, 1, 1, 2, 2, 2, 0, 0, 0, 1, 1, 1, 2, 2, 2, 0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 3, 3, 3, 4, 4, 4, 5, 5, 5, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6, 7, 7, 7, 8, 8, 8, 6, 6, 6, 7, 7, 7, 8, 8, 8, 6, 6, 6, 7, 7, 7, 8, 8, 8 }; __global__ void solve_sudoku(int *in_static_number, int *out_result, int *out_count) { __shared__ int found; __shared__ short int row_flag[ROW_NUM][BLOCK_SIZE]; __shared__ short int col_flag[ROW_NUM][BLOCK_SIZE]; __shared__ short int box_flag[ROW_NUM][BLOCK_SIZE]; __shared__ char number[CELL_NUM][BLOCK_SIZE]; __shared__ char empty[CELL_NUM][BLOCK_SIZE]; __shared__ int static_index; __shared__ int static_index_end; int pos; int index; int flag; int i; int offset = -1; if (threadIdx.x == 0) { found = 0; static_index = (static_count * blockIdx.x) / gridDim.x; static_index_end = (static_count * (blockIdx.x + 1)) / gridDim.x; } __syncthreads(); index = -1; while (1) { if (index < 0) { i = atomicAdd(&static_index, 1); if (i >= static_index_end) { break; } #pragma unroll for (pos = 0; pos < ROW_NUM; pos++) { row_flag[pos][threadIdx.x] = 0; col_flag[pos][threadIdx.x] = 0; box_flag[pos][threadIdx.x] = 0; } offset = i * CELL_NUM; i = 0; #pragma unroll for (pos = 0; pos < CELL_NUM; pos++) { number[pos][threadIdx.x] = in_static_number[offset + pos]; if (number[pos][threadIdx.x] > 0) { flag = 1 << number[pos][threadIdx.x]; row_flag[row[pos]][threadIdx.x] |= flag; col_flag[col[pos]][threadIdx.x] |= flag; box_flag[box[pos]][threadIdx.x] |= flag; } else { empty[i][threadIdx.x] = pos; i++; } } empty[i][threadIdx.x] = -1; index = 0; } pos = empty[index][threadIdx.x]; if (pos < 0) { int old_found = atomicAdd(&found, 1); if (old_found == 0) { #pragma unroll for (int i = 0; i < CELL_NUM; i++) { out_result[i] = number[i][threadIdx.x]; } } index--; pos = empty[index][threadIdx.x]; flag = 1 << number[pos][threadIdx.x]; row_flag[row[pos]][threadIdx.x] ^= flag; col_flag[col[pos]][threadIdx.x] ^= flag; box_flag[box[pos]][threadIdx.x] ^= flag; continue; } number[pos][threadIdx.x] += next_number[(row_flag[row[pos]][threadIdx.x] | col_flag[col[pos]][threadIdx.x] | box_flag[box[pos]][threadIdx.x]) >> (number[pos][threadIdx.x] + 1)] + 1; if (number[pos][threadIdx.x] >= ROW_NUM + 1) { number[pos][threadIdx.x] = 0; index--; if (index < 0) { continue; } pos = empty[index][threadIdx.x]; flag = 1 << number[pos][threadIdx.x]; } else { flag = 1 << number[pos][threadIdx.x]; index++; } row_flag[row[pos]][threadIdx.x] ^= flag; col_flag[col[pos]][threadIdx.x] ^= flag; box_flag[box[pos]][threadIdx.x] ^= flag; } __syncthreads(); if (threadIdx.x == 0 && found > 0) { atomicAdd(out_count, found); } }
af45bb0a1952f0bdcaf5e82328e50c6844e3689f.cu
#include <stdio.h> #include <stdlib.h> #include <time.h> #include <cuda_runtime.h> #define GRID_SIZE 4 #define BLOCK_SIZE 224 #define MAX_STATIC_SIZE 4096 #define MAX_PROBLEM_NUM 1024 #define CELL_NUM 81 #define ROW_NUM 9 #define NEXT_NUMBER_NUM (1 << ROW_NUM) static int load(char *in_file_path, int *out_number); static int find_valid_number(const int *in_static_number, int in_depth, int in_max_count, int* out_result, int *, int *inout_initial); __constant__ int static_count; __constant__ int next_number[NEXT_NUMBER_NUM]; __global__ void solve_sudoku(int *in_static_number, int *out_result, int *out_count); int main(int argc, char** argv){ int host_static_number[MAX_PROBLEM_NUM * CELL_NUM]; int result[MAX_PROBLEM_NUM * CELL_NUM]; int valid_index; int initial[CELL_NUM]; int answer_num[MAX_PROBLEM_NUM]; int host_next_number[512]; int problem_num; int *device_result; int *device_count; int count; int *device_static_number; int *valid_number; int i, j, k; if (argc < 2 || argc >= 3) { printf("Usage: sudoku_cpu file_path"); return 1; } char *file_path = argv[1]; problem_num = load(file_path, host_static_number); if (problem_num <= 0) { printf("Can't load file %s.", file_path); return 1; } cudaMalloc((void**)&device_result, sizeof(int) * CELL_NUM); cudaMalloc((void**)&device_count, sizeof(int)); valid_number = (int*)malloc(sizeof(int) * CELL_NUM * MAX_STATIC_SIZE); cudaMalloc((void**)&device_static_number, sizeof(int) * CELL_NUM * MAX_STATIC_SIZE); cudaError_t error; cudaEvent_t start; error = cudaEventCreate(&start); if (error != cudaSuccess) { printf("failed to craete start event"); exit(EXIT_FAILURE); } cudaEvent_t stop; error = cudaEventCreate(&stop); if (error != cudaSuccess) { printf("failed to crete stop event"); exit(EXIT_FAILURE); } error = cudaEventRecord(start, NULL); if (error != cudaSuccess) { printf("failed to record start event"); exit(EXIT_FAILURE); } for (i = 0; i < NEXT_NUMBER_NUM; i++) { for (j = 0; j < ROW_NUM + 1; j++) { if (((1 << j) & i) == 0) { host_next_number[i] = j; break; } } } valid_index = -1; for (i = 0; i < problem_num; i++) { answer_num[i] = 0; do { count = find_valid_number(&host_static_number[i * CELL_NUM], 16, MAX_STATIC_SIZE, valid_number, &valid_index, initial); cudaMemcpy(device_static_number, valid_number, sizeof(int)* CELL_NUM * count, cudaMemcpyHostToDevice); cudaMemcpyToSymbol(static_count, &count, sizeof(int)); cudaMemcpyToSymbol(next_number, host_next_number, sizeof(host_next_number)); cudaMemset(device_result, 0, sizeof(int)* CELL_NUM); cudaMemset(device_count, 0, sizeof(int)); dim3 block(BLOCK_SIZE, 1); dim3 grid(GRID_SIZE, 1); solve_sudoku << <grid, block >> >(device_static_number, device_result, device_count); cudaThreadSynchronize(); cudaMemcpy(&count, device_count, sizeof(int), cudaMemcpyDeviceToHost); if (answer_num[i] == 0 && count > 0) { cudaMemcpy(&result[i * CELL_NUM], device_result, sizeof(int)* CELL_NUM, cudaMemcpyDeviceToHost); } answer_num[i] += count; } while (valid_index >= 0); } error = cudaEventRecord(stop, NULL); if (error != cudaSuccess) { printf("failed to record stop event"); exit(EXIT_FAILURE); } error = cudaEventSynchronize(stop); if (error != cudaSuccess) { printf("failed to synchronize"); exit(EXIT_FAILURE); } float msecTotal = 0.0f; error = cudaEventElapsedTime(&msecTotal, start, stop); if (error != cudaSuccess) { printf("failed to get elapsed time"); exit(EXIT_FAILURE); } printf("Processing time: %f (msec)\n", msecTotal); for (i = 0; i < problem_num; i++) { printf("%d found\n", answer_num[i]); for (j = 0; j < ROW_NUM; j++) { for (k = 0; k < ROW_NUM; k++) { printf("%d ", result[i * CELL_NUM + j * ROW_NUM + k]); } printf("\n"); } } cudaFree(device_result); cudaFree(device_count); cudaFree(device_static_number); free(valid_number); cudaThreadExit(); } static int load(char *in_file_path, int *out_number) { char buf[1024]; errno_t error; FILE *fp; int size; int i, n; error = fopen_s(&fp, in_file_path, "r"); if (error != 0) { return 0; } size = fread(buf, 1, sizeof(buf), fp); fclose(fp); for (i = 0, n = 0; i < size && n < MAX_PROBLEM_NUM * CELL_NUM; i++) { if (buf[i] >= '1' && buf[i] <= '9') { out_number[n] = buf[i] - '0'; n++; } else if (buf[i] == '-') { out_number[n] = 0; n++; } } return n / CELL_NUM; } static int find_valid_number(const int *in_static_number, int in_depth, int in_max_count, int* out_result, int *inout_index, int *inout_initial) { const int row[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 8, 8, 8, 8, 8, 8, 8, 8, 8 }; const int col[] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 0, 1, 2, 3, 4, 5, 6, 7, 8, 0, 1, 2, 3, 4, 5, 6, 7, 8, 0, 1, 2, 3, 4, 5, 6, 7, 8, 0, 1, 2, 3, 4, 5, 6, 7, 8, 0, 1, 2, 3, 4, 5, 6, 7, 8, 0, 1, 2, 3, 4, 5, 6, 7, 8, 0, 1, 2, 3, 4, 5, 6, 7, 8, 0, 1, 2, 3, 4, 5, 6, 7, 8 }; const int box[] = { 0, 0, 0, 1, 1, 1, 2, 2, 2, 0, 0, 0, 1, 1, 1, 2, 2, 2, 0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 3, 3, 3, 4, 4, 4, 5, 5, 5, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6, 7, 7, 7, 8, 8, 8, 6, 6, 6, 7, 7, 7, 8, 8, 8, 6, 6, 6, 7, 7, 7, 8, 8, 8 }; int row_flag[ROW_NUM] = { 0, 0, 0, 0, 0, 0, 0, 0, 0 }; int col_flag[ROW_NUM] = { 0, 0, 0, 0, 0, 0, 0, 0, 0 }; int box_flag[ROW_NUM] = { 0, 0, 0, 0, 0, 0, 0, 0, 0 }; int empty[CELL_NUM]; int number[CELL_NUM]; int found = 0; int pos; int flag; int index; int empty_num; int i; int *result = out_result; empty_num = 0; for (pos = 0; pos < CELL_NUM; pos++) { if (*inout_index >= 0) { number[pos] = inout_initial[pos]; } else { number[pos] = in_static_number[pos]; } if (number[pos] > 0) { flag = 1 << number[pos]; row_flag[row[pos]] |= flag; col_flag[col[pos]] |= flag; box_flag[box[pos]] |= flag; } if (in_static_number[pos] == 0) { empty[empty_num] = pos; empty_num++; } } if (*inout_index >= 0) { index = *inout_index; pos = empty[index]; flag = 1 << number[pos]; row_flag[row[pos]] ^= flag; col_flag[col[pos]] ^= flag; box_flag[box[pos]] ^= flag; } else { index = 0; } while (1) { pos = empty[index]; for (number[pos]++; number[pos] < ROW_NUM + 1; number[pos]++) { flag = 1 << number[pos]; if ((row_flag[row[pos]] & flag) != 0 || (col_flag[col[pos]] & flag) != 0 || (box_flag[box[pos]] & flag) != 0) { continue; } if (index >= in_depth - 1 || index >= empty_num - 1) { for (i = 0; i < CELL_NUM; i++) { result[i] = number[i]; } found++; result += CELL_NUM; if (found >= in_max_count) { break; } } else { break; } } if (found >= in_max_count) { break; } if (number[pos] < ROW_NUM + 1) { flag = 1 << number[pos]; index++; } else { index--; if (index < 0) { break; } number[pos] = 0; pos = empty[index]; flag = 1 << number[pos]; } row_flag[row[pos]] ^= flag; col_flag[col[pos]] ^= flag; box_flag[box[pos]] ^= flag; } if (index >= 0) { for (pos = 0; pos < CELL_NUM; pos++) { inout_initial[pos] = number[pos]; } } *inout_index = index; return found; } __constant__ int row[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 8, 8, 8, 8, 8, 8, 8, 8, 8 }; __constant__ int col[] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 0, 1, 2, 3, 4, 5, 6, 7, 8, 0, 1, 2, 3, 4, 5, 6, 7, 8, 0, 1, 2, 3, 4, 5, 6, 7, 8, 0, 1, 2, 3, 4, 5, 6, 7, 8, 0, 1, 2, 3, 4, 5, 6, 7, 8, 0, 1, 2, 3, 4, 5, 6, 7, 8, 0, 1, 2, 3, 4, 5, 6, 7, 8, 0, 1, 2, 3, 4, 5, 6, 7, 8 }; __constant__ int box[] = { 0, 0, 0, 1, 1, 1, 2, 2, 2, 0, 0, 0, 1, 1, 1, 2, 2, 2, 0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 3, 3, 3, 4, 4, 4, 5, 5, 5, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6, 7, 7, 7, 8, 8, 8, 6, 6, 6, 7, 7, 7, 8, 8, 8, 6, 6, 6, 7, 7, 7, 8, 8, 8 }; __global__ void solve_sudoku(int *in_static_number, int *out_result, int *out_count) { __shared__ int found; __shared__ short int row_flag[ROW_NUM][BLOCK_SIZE]; __shared__ short int col_flag[ROW_NUM][BLOCK_SIZE]; __shared__ short int box_flag[ROW_NUM][BLOCK_SIZE]; __shared__ char number[CELL_NUM][BLOCK_SIZE]; __shared__ char empty[CELL_NUM][BLOCK_SIZE]; __shared__ int static_index; __shared__ int static_index_end; int pos; int index; int flag; int i; int offset = -1; if (threadIdx.x == 0) { found = 0; static_index = (static_count * blockIdx.x) / gridDim.x; static_index_end = (static_count * (blockIdx.x + 1)) / gridDim.x; } __syncthreads(); index = -1; while (1) { if (index < 0) { i = atomicAdd(&static_index, 1); if (i >= static_index_end) { break; } #pragma unroll for (pos = 0; pos < ROW_NUM; pos++) { row_flag[pos][threadIdx.x] = 0; col_flag[pos][threadIdx.x] = 0; box_flag[pos][threadIdx.x] = 0; } offset = i * CELL_NUM; i = 0; #pragma unroll for (pos = 0; pos < CELL_NUM; pos++) { number[pos][threadIdx.x] = in_static_number[offset + pos]; if (number[pos][threadIdx.x] > 0) { flag = 1 << number[pos][threadIdx.x]; row_flag[row[pos]][threadIdx.x] |= flag; col_flag[col[pos]][threadIdx.x] |= flag; box_flag[box[pos]][threadIdx.x] |= flag; } else { empty[i][threadIdx.x] = pos; i++; } } empty[i][threadIdx.x] = -1; index = 0; } pos = empty[index][threadIdx.x]; if (pos < 0) { int old_found = atomicAdd(&found, 1); if (old_found == 0) { #pragma unroll for (int i = 0; i < CELL_NUM; i++) { out_result[i] = number[i][threadIdx.x]; } } index--; pos = empty[index][threadIdx.x]; flag = 1 << number[pos][threadIdx.x]; row_flag[row[pos]][threadIdx.x] ^= flag; col_flag[col[pos]][threadIdx.x] ^= flag; box_flag[box[pos]][threadIdx.x] ^= flag; continue; } number[pos][threadIdx.x] += next_number[(row_flag[row[pos]][threadIdx.x] | col_flag[col[pos]][threadIdx.x] | box_flag[box[pos]][threadIdx.x]) >> (number[pos][threadIdx.x] + 1)] + 1; if (number[pos][threadIdx.x] >= ROW_NUM + 1) { number[pos][threadIdx.x] = 0; index--; if (index < 0) { continue; } pos = empty[index][threadIdx.x]; flag = 1 << number[pos][threadIdx.x]; } else { flag = 1 << number[pos][threadIdx.x]; index++; } row_flag[row[pos]][threadIdx.x] ^= flag; col_flag[col[pos]][threadIdx.x] ^= flag; box_flag[box[pos]][threadIdx.x] ^= flag; } __syncthreads(); if (threadIdx.x == 0 && found > 0) { atomicAdd(out_count, found); } }
9460592d81eeb6aa341c55b52674afccc09a7eb2.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #include <fcntl.h> #define TILE_SIZE 32 __global__ void sobel( int width_d, int height_d, int threshold_d, unsigned int *pic_d , int *final_res) { int row_1 = blockIdx.y * blockDim.y + threadIdx.y; int col_1 = blockIdx.x * blockDim.x + threadIdx.x; int tx = threadIdx.y; int ty = threadIdx.x; int width_Tile = TILE_SIZE; int id, id1; __shared__ int sharedTile[TILE_SIZE * TILE_SIZE]; int magnitude, sum1, sum2; // Shared Tile Initialization sharedTile[tx * width_Tile + ty] = 0; __syncthreads(); // Copying Data from Global to Shared Memory sharedTile[tx * width_Tile + ty] = pic_d[row_1 * (width_d) + col_1]; __syncthreads(); // Output if ((row_1 < height_d) && (col_1 < width_d)) { final_res[row_1 * width_d + col_1] = 0; } __syncthreads(); if (row_1 > 0 && col_1 > 0 && row_1 < height_d - 1 && col_1 < width_d - 1) { // Applying Sobel Filter on the Tile Stored in the Shared Memory if ((tx > 0) && (tx < width_Tile - 1) && (ty > 0) && (ty < width_Tile - 1)) { id = row_1 * width_d + col_1; sum1 = sharedTile[ width_Tile * (tx-1) + ty+1] - sharedTile[ width_Tile * (tx-1) + ty-1 ] + 2 * sharedTile[ width_Tile * (tx) + ty+1 ] - 2 * sharedTile[ width_Tile*(tx) + ty-1 ] + sharedTile[ width_Tile * (tx+1) + ty+1] - sharedTile[ width_Tile*(tx+1) + ty-1 ]; sum2 = sharedTile[ width_Tile * (tx-1) + ty-1 ] + 2 * sharedTile[ width_Tile * (tx-1) + ty ] + sharedTile[ width_Tile * (tx-1) + ty+1] - sharedTile[width_Tile * (tx+1) + ty-1 ] - 2 * sharedTile[ width_Tile * (tx+1) + ty ] - sharedTile[ width_Tile * (tx+1) + ty+1]; magnitude = sum1 * sum1 + sum2 * sum2; if (magnitude > threshold_d) { final_res[id] = 255; } else { final_res[id] = 0; } } __syncthreads(); // For the Pixels at the Boundaries of the Block using Global Memory if ((row_1 == blockIdx.y * blockDim.y + blockDim.y - 1) || (col_1 == blockIdx.x * blockDim.x + blockDim.x - 1) || (row_1 == blockIdx.y * blockDim.y) || (col_1 == blockIdx.x * blockDim.x)) { id1 = row_1 * width_d + col_1; sum1 = pic_d[ width_d * (row_1-1) + col_1+1] - pic_d[ width_d * (row_1-1) + col_1-1 ] + 2 * pic_d[ width_d * (row_1) + col_1+1 ] - 2 * pic_d[ width_d*(row_1) + col_1-1 ] + pic_d[ width_d * (row_1+1) + col_1+1] - pic_d[ width_d*(row_1+1) + col_1-1 ]; sum2 = pic_d[ width_d * (row_1-1) + col_1-1 ] + 2 * pic_d[ width_d * (row_1-1) + col_1 ] + pic_d[ width_d * (row_1-1) + col_1+1] - pic_d[width_d * (row_1+1) + col_1-1 ] - 2 * pic_d[ width_d * (row_1+1) + col_1 ] - pic_d[ width_d * (row_1+1) + col_1+1]; magnitude = sum1*sum1 + sum2*sum2; if (magnitude > threshold_d) { final_res[id1] = 255; } else { final_res[id1] = 0; } } __syncthreads(); } }
9460592d81eeb6aa341c55b52674afccc09a7eb2.cu
#include <stdio.h> #include <stdlib.h> #include <fcntl.h> #define TILE_SIZE 32 __global__ void sobel( int width_d, int height_d, int threshold_d, unsigned int *pic_d , int *final_res) { int row_1 = blockIdx.y * blockDim.y + threadIdx.y; int col_1 = blockIdx.x * blockDim.x + threadIdx.x; int tx = threadIdx.y; int ty = threadIdx.x; int width_Tile = TILE_SIZE; int id, id1; __shared__ int sharedTile[TILE_SIZE * TILE_SIZE]; int magnitude, sum1, sum2; // Shared Tile Initialization sharedTile[tx * width_Tile + ty] = 0; __syncthreads(); // Copying Data from Global to Shared Memory sharedTile[tx * width_Tile + ty] = pic_d[row_1 * (width_d) + col_1]; __syncthreads(); // Output if ((row_1 < height_d) && (col_1 < width_d)) { final_res[row_1 * width_d + col_1] = 0; } __syncthreads(); if (row_1 > 0 && col_1 > 0 && row_1 < height_d - 1 && col_1 < width_d - 1) { // Applying Sobel Filter on the Tile Stored in the Shared Memory if ((tx > 0) && (tx < width_Tile - 1) && (ty > 0) && (ty < width_Tile - 1)) { id = row_1 * width_d + col_1; sum1 = sharedTile[ width_Tile * (tx-1) + ty+1] - sharedTile[ width_Tile * (tx-1) + ty-1 ] + 2 * sharedTile[ width_Tile * (tx) + ty+1 ] - 2 * sharedTile[ width_Tile*(tx) + ty-1 ] + sharedTile[ width_Tile * (tx+1) + ty+1] - sharedTile[ width_Tile*(tx+1) + ty-1 ]; sum2 = sharedTile[ width_Tile * (tx-1) + ty-1 ] + 2 * sharedTile[ width_Tile * (tx-1) + ty ] + sharedTile[ width_Tile * (tx-1) + ty+1] - sharedTile[width_Tile * (tx+1) + ty-1 ] - 2 * sharedTile[ width_Tile * (tx+1) + ty ] - sharedTile[ width_Tile * (tx+1) + ty+1]; magnitude = sum1 * sum1 + sum2 * sum2; if (magnitude > threshold_d) { final_res[id] = 255; } else { final_res[id] = 0; } } __syncthreads(); // For the Pixels at the Boundaries of the Block using Global Memory if ((row_1 == blockIdx.y * blockDim.y + blockDim.y - 1) || (col_1 == blockIdx.x * blockDim.x + blockDim.x - 1) || (row_1 == blockIdx.y * blockDim.y) || (col_1 == blockIdx.x * blockDim.x)) { id1 = row_1 * width_d + col_1; sum1 = pic_d[ width_d * (row_1-1) + col_1+1] - pic_d[ width_d * (row_1-1) + col_1-1 ] + 2 * pic_d[ width_d * (row_1) + col_1+1 ] - 2 * pic_d[ width_d*(row_1) + col_1-1 ] + pic_d[ width_d * (row_1+1) + col_1+1] - pic_d[ width_d*(row_1+1) + col_1-1 ]; sum2 = pic_d[ width_d * (row_1-1) + col_1-1 ] + 2 * pic_d[ width_d * (row_1-1) + col_1 ] + pic_d[ width_d * (row_1-1) + col_1+1] - pic_d[width_d * (row_1+1) + col_1-1 ] - 2 * pic_d[ width_d * (row_1+1) + col_1 ] - pic_d[ width_d * (row_1+1) + col_1+1]; magnitude = sum1*sum1 + sum2*sum2; if (magnitude > threshold_d) { final_res[id1] = 255; } else { final_res[id1] = 0; } } __syncthreads(); } }
9b0186d5a1be8bf9e87e58262c5a1c37df165feb.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright 1993-2020 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ // CUDA sample demonstrating a __nv_bfloat16 (E8M7) GEMM computation using the Warp Matrix Multiply // and Accumulate API introduced in CUDA 11.0. // In this program, the compute_gemm kernel computes the result of a matrix multiplication // and addition: D = alpha * A * B + beta * C. The dimensions of both C and D matrices // are M_GLOBAL x N_GLOBAL. The A matrix is M_GLOBAL x K_GLOBAL (row-major), the B matrix // is K_GLOBAL x N_GLOBAL (column-major). // In that kernel, each CTA computes one 128 x 128 tile of the resulting matrix // per iteration. When the tile is computed, the CTA stores it to the global memory // and begins a new iteration, selecting a new 128 x 128 tile to compute. // Each CTA consists of eight warps. For the 128 x 128 tile, each warp computes eight // 16 x 16 subtiles, organized in a 2 x 4 two-dimensional array. // Warps compute the 16 x 16 subtiles using nvcuda::wmma::mma_sync operations by // moving through the K_GLOBAL dimension of the A and B matrices and accumulating // the intermediate result in the local thread state. // There are a number of simple optimizations used in the algorithm: // - The CTA copies the 128 x 128 tile of the C matrix from the global memory to // shared memory. After that is done, each warp loads the C matrix fragments from // shared memory, thus avoiding a random global memory access. // - On each internal iteration, the CTA copies a portion of the A and B matrices from // global memory to shared memory. After that, all warps in the CTA reuse the A and B // data from shared memory, thus reducing the number of data copies from global memory. // - The portions of the A and B matrices are stored in shared memory with an additional // padding (skew) to reduce the number of shared memory access bank conflicts. // (See a detailed explanation near the SKEW_BF16 macro definition.) // - When the CTA finishes computing the tiles of the resulting matrix, each warp stores // its subtiles to shared memory. The CTA then copies the shared memory contents to // global memory, again avoiding redundant random global memory accesses. // - Note that the CTA tile size is chosen to maximize the GPU register utilization, // but carefully enough to avoid local memory use. #include <assert.h> #include <stdio.h> #include <hip/hip_runtime.h> #include <cuda_bf16.h> #include <mma.h> #include <cuda_pipeline.h> // helper functions and utilities to work with CUDA #include <helper_functions.h> #include <helper_cuda.h> // Externally configurable parameters. // Switch for choosing cpp interface for cuda pipeline // vs primitives interface. #define USE_CPP_API 0 #ifndef CPU_DEBUG // Set this to 1 to verify the correctness of the GPU-computed matrix. #define CPU_DEBUG 0 #endif #ifndef SHARED_MEMORY_LIMIT_64K // Set this to 0 to use more than 64 Kb of shared memory to cache data, to // improve the performance of the computations on GPU. // Note that you need a GPU that can have more than 64 Kb of shared memory // per multiprocessor. #define SHARED_MEMORY_LIMIT_64K 0 #endif // GPU configuration. #define WARP_SIZE 32 // MMA matrix tile dimensions. #define M 16 #define N 16 #define K 16 // GEMM configuration. #define M_TILES 512 #define N_TILES 512 #define K_TILES 512 #define M_GLOBAL (M * M_TILES) #define N_GLOBAL (N * N_TILES) #define K_GLOBAL (K * K_TILES) #define C_LAYOUT wmma::mem_row_major // Implementation constants. #define WARPS_PER_BLOCK 8 #define THREADS_PER_BLOCK (WARP_SIZE * WARPS_PER_BLOCK) #if SHARED_MEMORY_LIMIT_64K // With only 64 Kb shared memory available, we can fit two 8-tile chunks of // the A and B matrix data, that is (M = 16) * (K = 16) * 8 * (CHUNK_K = 8) // * sizeof(__nv_bfloat16) = 32 Kb each. // (i.e. two 8x8 arrays of tiles of 16x16 __nv_bfloat16-typed elements per CTA). // But we cannot account the 8 Kb total skew overhead, without which the performance // would be severely impacted. So we choose to reduce the chunk size in half, // i.e. the amount of A and B matrix data we cache in shared memory. // Accordingly, this doubles the number of outer iterations across the global K // dimension, which only slightly impacts the performance. #define CHUNK_K 4 #else #define CHUNK_K 8 #endif #define CHUNK_LINE_BYTES (CHUNK_K * K * sizeof(__nv_bfloat16)) #define WARP_COPY_BYTES (WARP_SIZE * sizeof(int4)) #define CHUNK_COPY_LINES_PER_WARP (WARP_COPY_BYTES / CHUNK_LINE_BYTES) #define CHUNK_COPY_LINE_LANES (WARP_SIZE / CHUNK_COPY_LINES_PER_WARP) #define BLOCK_ROW_WARPS 2 #define BLOCK_COL_WARPS 4 #define WARP_ROW_TILES 4 #define WARP_COL_TILES 2 #define BLOCK_ROW_TILES (WARP_ROW_TILES * BLOCK_ROW_WARPS) #define BLOCK_COL_TILES (WARP_COL_TILES * BLOCK_COL_WARPS) #define GLOBAL_MEM_STRIDE N_GLOBAL #define SHMEM_STRIDE (N * BLOCK_ROW_TILES) #define SHMEM_OFFSET (N * WARP_ROW_TILES) // The macro below is used to shift rows of the A matrix and columns of the B matrix // in shared memory to minimize possible bank conflicts. // Before performing the nvcuda::wmma::mma_sync operation, the warp must load the matrix // data using the nvcuda::wmma::load_matrix_sync operation. Although the memory access pattern // is not specified for that function, each lane in the warp can read one or multiple matrix // elements from different matrix rows or columns. // For shared memory, such access can result in bank conflicts if different rows / columns // of the matrix map to the same bank. By shifting each row and column by a few bytes, we // make sure that they map to different banks, thus reducing the number of possible bank // conflicts. // The number of 16 two-byte "__nv_bfloat16" elements is chosen as the minimum possible shift because // we must keep each row and column 256-bit aligned, as required by nvcuda::wmma::load_matrix_sync. #define SKEW_BF16 16 #define checkKernelErrors(expr) do { \ expr; \ \ hipError_t __err = hipGetLastError(); \ if (__err != hipSuccess) { \ printf("Line %d: '%s' failed: %s\n", __LINE__, # expr, hipGetErrorString(__err)); \ abort(); \ } \ } while(0) enum kernels { bf16mma_shmem_gemm_async_copy = 0, // __nv_bfloat16 MMA shmem using kernel with async_copy bf16mma_shmem_gemm = 1, // __nv_bfloat16 MMA shmem using kernel normal copy (without async_copy). simple_bf16mma_gemm = 2 // __nv_bfloat16 MMA non-shmem using simple kernel. }; const char* kernelNames[] = {"compute_bf16gemm_async_copy", "compute_bf16gemm", "simple_wmma_bf16gemm"}; using namespace nvcuda; namespace nvcuda_namespace = nvcuda::experimental; __host__ void init_host_matrices(__nv_bfloat16 *a, __nv_bfloat16 *b, float *c) { for (int i = 0; i < M_GLOBAL; i++) { for (int j = 0; j < K_GLOBAL; j++) { a[i*K_GLOBAL+j] = (__nv_bfloat16)(rand() % 3); } } for (int i = 0; i < N_GLOBAL; i++) { for (int j = 0; j < K_GLOBAL; j++) { b[i*K_GLOBAL+j] = (__nv_bfloat16)(rand() % 3); } } for (int t = 0; t < M_GLOBAL * N_GLOBAL; t++) { c[t] = (float)(rand() % 3); } } __global__ void compute_bf16gemm(const __nv_bfloat16 *A, const __nv_bfloat16 *B, const float *C, float *D, float alpha, float beta) { extern __shared__ __nv_bfloat16 shmem[][CHUNK_K * K + SKEW_BF16]; // Warp and lane identification. const unsigned int warpId = threadIdx.x / WARP_SIZE; const unsigned int laneId = threadIdx.x % WARP_SIZE; // Offset in shared memory from which the B matrix is stored. const size_t shmem_idx_b_off = BLOCK_COL_TILES * M; // This pointer is used to access the C and D matrix tiles this warp computes. float *shmem_warp_tile_ptr = (float*)&shmem[0][0] + (warpId / BLOCK_ROW_WARPS) * SHMEM_STRIDE * N * BLOCK_ROW_WARPS + (warpId % BLOCK_ROW_WARPS) * SHMEM_OFFSET; // This pointer is used to stream the C and D matrices block-wide tile to and from shared memory. float *shmem_warp_stream_ptr = (float*)&shmem[0][0] + warpId * SHMEM_STRIDE * N; // Adjust the beta scaler, as it'll be multiplied by alpha at the end of // each tile computation. Technically this is not generally correct (may result // in a loss of precision). Zero still needs to be specially handled though. beta /= alpha; // Each CTA slides along the 128 x 128 tiles from the top left corner of the matrix to the // right and down, and selects the next tile to compute. Once there's no such tile, // all warps in this CTA exit. for(unsigned int block_pos = blockIdx.x;; block_pos += gridDim.x) { const unsigned int block_tile_i = ((block_pos * BLOCK_ROW_TILES) / N_TILES) * (BLOCK_COL_TILES); const unsigned int block_tile_j = (block_pos * BLOCK_COL_TILES) % N_TILES; // Stop when there are no more D matrix tiles to compute in this CTA. if (block_tile_i >= M_TILES) { break; } // This warp's pointer to the C matrix data to copy memory from to shared memory. const size_t gmem_idx = (block_tile_i + warpId) * M * GLOBAL_MEM_STRIDE + block_tile_j * N; const float *src_gmem_warp_stream_ptr = &C[gmem_idx]; // Stream multiple C tiles to shared memory. #pragma unroll for (int i = 0; i < N; i++) { *((int4*)(shmem_warp_stream_ptr + SHMEM_STRIDE * i) + laneId) = *((int4*)(src_gmem_warp_stream_ptr + GLOBAL_MEM_STRIDE * i) + laneId); } __syncthreads(); // These fragments will accumulate the result of A and B matrix fragment multiplications // along the K_GLOBAL dimension. wmma::fragment<wmma::accumulator, M, N, K, float> c[WARP_COL_TILES][WARP_ROW_TILES]; // Load the C matrix tiles into fragments from shared memory. #pragma unroll for (int i = 0; i < WARP_COL_TILES; i++) { #pragma unroll for (int j = 0; j < WARP_ROW_TILES; j++) { const float *tile_ptr = shmem_warp_tile_ptr + i * SHMEM_STRIDE * N + j * N; wmma::load_matrix_sync(c[i][j], tile_ptr, SHMEM_STRIDE, C_LAYOUT); } } __syncthreads(); // Scale the C matrix. #pragma unroll for (int i = 0; i < WARP_COL_TILES; i++) { #pragma unroll for (int j = 0; j < WARP_ROW_TILES; j++) { #pragma unroll for (int t = 0; t < c[i][j].num_elements; t++) { c[i][j].x[t] *= beta; } } } // Select what warp copies what matrix to shared memory. // Warps 0-3 copy the A matrix, warps 4-7 copy the B matrix. const __nv_bfloat16 *warp_ptr = (warpId < (WARPS_PER_BLOCK/2)) ? (&A[block_tile_i * M * K_GLOBAL] + M * K_GLOBAL * (warpId % (WARPS_PER_BLOCK/2)) * 2) : (&B[block_tile_j * N * K_GLOBAL] + N * K_GLOBAL * (warpId % (WARPS_PER_BLOCK/2)) * 2); // Go through the global K dimension by a fixed step at a time. #pragma unroll for (int tile_k = 0; tile_k < K_TILES; tile_k += CHUNK_K) { // Copy slices of the A and B matrices to shared memory. // The first half of the warps in the CTA copy the A matrix, the rest copy the B matrix. size_t shmem_idx = warpId < (WARPS_PER_BLOCK/2) ? (M * (warpId % (WARPS_PER_BLOCK/2)) * 2) : (N * (warpId % (WARPS_PER_BLOCK/2)) * 2 + shmem_idx_b_off); // First half of the warp copies the first row / column of the matrix, // the second half of the warp copies the next. const __nv_bfloat16 *lane_ptr = (warp_ptr + tile_k * K + (laneId / CHUNK_COPY_LINE_LANES) * K_GLOBAL); // Shift the second half of the warp to the next row / column in the shared memory. shmem_idx += laneId / CHUNK_COPY_LINE_LANES; #pragma unroll for(int i = 0; i < ((WARP_SIZE/2) / CHUNK_COPY_LINES_PER_WARP) * 2; i++) { // Copy 16 bytes at once in each lane. *((int4*)&shmem[shmem_idx][0] + (laneId % CHUNK_COPY_LINE_LANES)) = *((int4*)lane_ptr + (laneId % CHUNK_COPY_LINE_LANES)); // Advance the global memory pointer and the shared memory index. lane_ptr = lane_ptr + K_GLOBAL * CHUNK_COPY_LINES_PER_WARP; shmem_idx += CHUNK_COPY_LINES_PER_WARP; } __syncthreads(); // Compute a grid of C matrix tiles in each warp. #pragma unroll for (int k_step = 0; k_step < CHUNK_K; k_step++) { wmma::fragment<wmma::matrix_a, M, N, K, __nv_bfloat16, wmma::row_major> a[WARP_COL_TILES]; wmma::fragment<wmma::matrix_b, M, N, K, __nv_bfloat16, wmma::col_major> b[WARP_ROW_TILES]; #pragma unroll for (int i = 0; i < WARP_COL_TILES; i++) { size_t shmem_idx_a = (warpId/BLOCK_ROW_WARPS) * M * BLOCK_ROW_WARPS + (i * M); const __nv_bfloat16 *tile_ptr = &shmem[shmem_idx_a][k_step * K]; wmma::load_matrix_sync(a[i], tile_ptr, K * CHUNK_K + SKEW_BF16); #pragma unroll for (int j = 0; j < WARP_ROW_TILES; j++) { if (i == 0) { // Load the B matrix fragment once, because it is going to be reused // against the other A matrix fragments. size_t shmem_idx_b = shmem_idx_b_off + (WARP_ROW_TILES * N) * (warpId%2) + (j * N); const __nv_bfloat16 *tile_ptr = &shmem[shmem_idx_b][k_step * K]; wmma::load_matrix_sync(b[j], tile_ptr, K * CHUNK_K + SKEW_BF16); } wmma::mma_sync(c[i][j], a[i], b[j], c[i][j]); } } } __syncthreads(); } // Store the D fragments to shared memory. #pragma unroll for (int i = 0; i < WARP_COL_TILES; i++) { #pragma unroll for (int j = 0; j < WARP_ROW_TILES; j++) { #pragma unroll // Uniform, point-wise transformations of ALL fragment elements by ALL threads in the // warp are well-defined even though element indices within fragment storage are not defined. for (int t = 0; t < c[i][j].num_elements; t++) c[i][j].x[t] *= alpha; float *tile_ptr = shmem_warp_tile_ptr + i * SHMEM_STRIDE * K + j * N; wmma::store_matrix_sync(tile_ptr, c[i][j], SHMEM_STRIDE, C_LAYOUT); } } __syncthreads(); // Now that shared memory contains all the D tiles, stream them to global memory. float *dst_gmem_warp_stream_ptr = &D[gmem_idx]; #pragma unroll for (int i = 0; i < N; i++) { *((int4*)(dst_gmem_warp_stream_ptr + GLOBAL_MEM_STRIDE * i) + laneId) = *((int4*)(shmem_warp_stream_ptr + SHMEM_STRIDE * i) + laneId); } __syncthreads(); } } __global__ void compute_bf16gemm_async_copy(const __nv_bfloat16 *A, const __nv_bfloat16 *B, const float *C, float *D, float alpha, float beta) { extern __shared__ __nv_bfloat16 shmem[][CHUNK_K * K + SKEW_BF16]; // Warp and lane identification. const unsigned int warpId = threadIdx.x / WARP_SIZE; const unsigned int laneId = threadIdx.x % WARP_SIZE; // Offset in shared memory from which the B matrix is stored. const size_t shmem_idx_b_off = BLOCK_COL_TILES * M; // This pointer is used to access the C and D matrix tiles this warp computes. float *shmem_warp_tile_ptr = (float*)&shmem[0][0] + (warpId / BLOCK_ROW_WARPS) * SHMEM_STRIDE * N * BLOCK_ROW_WARPS + (warpId % BLOCK_ROW_WARPS) * SHMEM_OFFSET; // This pointer is used to stream the C and D matrices block-wide tile to and from shared memory. float *shmem_warp_stream_ptr = (float*)&shmem[0][0] + warpId * SHMEM_STRIDE * N; // Adjust the beta scaler, as it'll be multiplied by alpha at the end of // each tile computation. Technically this is not generally correct (may result // in a loss of precision). Zero still needs to be specially handled though. beta /= alpha; #if USE_CPP_API nvcuda_namespace::pipeline pipe; #endif // Each CTA slides along the 128 x 128 tiles from the top left corner of the matrix to the // right and down, and selects the next tile to compute. Once there's no such tile, // all warps in this CTA exit. for(unsigned int block_pos = blockIdx.x;; block_pos += gridDim.x) { const unsigned int block_tile_i = ((block_pos * BLOCK_ROW_TILES) / N_TILES) * (BLOCK_COL_TILES); const unsigned int block_tile_j = (block_pos * BLOCK_COL_TILES) % N_TILES; // Stop when there are no more D matrix tiles to compute in this CTA. if (block_tile_i >= M_TILES) { break; } // This warp's pointer to the C matrix data to copy memory from to shared memory. const size_t gmem_idx = (block_tile_i + warpId) * M * GLOBAL_MEM_STRIDE + block_tile_j * N; const float *src_gmem_warp_stream_ptr = &C[gmem_idx]; // Stream multiple C tiles to shared memory. #pragma unroll for (int i = 0; i < N; i++) { #if USE_CPP_API nvcuda_namespace::memcpy_async(*((int4*)(shmem_warp_stream_ptr + SHMEM_STRIDE * i) + laneId), *((int4*)(src_gmem_warp_stream_ptr + GLOBAL_MEM_STRIDE * i) + laneId), pipe); pipe.commit(); #else __pipeline_memcpy_async((reinterpret_cast<int4*>(&shmem_warp_stream_ptr[(SHMEM_STRIDE * i)])) + laneId, (reinterpret_cast<const int4*>(&src_gmem_warp_stream_ptr[(GLOBAL_MEM_STRIDE * i)])) + laneId, sizeof(int4)); __pipeline_commit(); #endif } #if USE_CPP_API pipe.wait_prior<0>(); #else __pipeline_wait_prior(0); #endif __syncthreads(); // These fragments will accumulate the result of A and B matrix fragment multiplications // along the K_GLOBAL dimension. wmma::fragment<wmma::accumulator, M, N, K, float> c[WARP_COL_TILES][WARP_ROW_TILES]; // Load the C matrix tiles into fragments from shared memory. #pragma unroll for (int i = 0; i < WARP_COL_TILES; i++) { #pragma unroll for (int j = 0; j < WARP_ROW_TILES; j++) { const float *tile_ptr = shmem_warp_tile_ptr + i * SHMEM_STRIDE * N + j * N; wmma::load_matrix_sync(c[i][j], tile_ptr, SHMEM_STRIDE, C_LAYOUT); } } __syncthreads(); // Scale the C matrix. #pragma unroll for (int i = 0; i < WARP_COL_TILES; i++) { #pragma unroll for (int j = 0; j < WARP_ROW_TILES; j++) { #pragma unroll for (int t = 0; t < c[i][j].num_elements; t++) { c[i][j].x[t] *= beta; } } } // Select what warp copies what matrix to shared memory. // Warps 0-3 copy the A matrix, warps 4-7 copy the B matrix. const __nv_bfloat16 *warp_ptr = (warpId < (WARPS_PER_BLOCK/2)) ? (&A[block_tile_i * M * K_GLOBAL] + M * K_GLOBAL * (warpId % (WARPS_PER_BLOCK/2)) * 2) : (&B[block_tile_j * N * K_GLOBAL] + N * K_GLOBAL * (warpId % (WARPS_PER_BLOCK/2)) * 2); // Go through the global K dimension by a fixed step at a time. #pragma unroll for (int tile_k = 0; tile_k < K_TILES; tile_k += CHUNK_K) { // Copy slices of the A and B matrices to shared memory. // The first half of the warps in the CTA copy the A matrix, the rest copy the B matrix. size_t shmem_idx = warpId < (WARPS_PER_BLOCK/2) ? (M * (warpId % (WARPS_PER_BLOCK/2)) * 2) : (N * (warpId % (WARPS_PER_BLOCK/2)) * 2 + shmem_idx_b_off); // First half of the warp copies the first row / column of the matrix, // the second half of the warp copies the next. const __nv_bfloat16 *lane_ptr = (warp_ptr + tile_k * K + (laneId / CHUNK_COPY_LINE_LANES) * K_GLOBAL); // Shift the second half of the warp to the next row / column in the shared memory. shmem_idx += laneId / CHUNK_COPY_LINE_LANES; #pragma unroll for(int i = 0; i < ((WARP_SIZE/2) / CHUNK_COPY_LINES_PER_WARP) * 2; i++) { // Copy 16 bytes at once in each lane. #if USE_CPP_API nvcuda_namespace::memcpy_async(*((int4*)&shmem[shmem_idx][0] + (laneId % CHUNK_COPY_LINE_LANES)), *((int4*)lane_ptr + (laneId % CHUNK_COPY_LINE_LANES)), pipe); pipe.commit(); #else __pipeline_memcpy_async((int4*)&shmem[shmem_idx][0] + (laneId % CHUNK_COPY_LINE_LANES), (int4*)lane_ptr + (laneId % CHUNK_COPY_LINE_LANES), sizeof(int4)); __pipeline_commit(); #endif // Advance the global memory pointer and the shared memory index. lane_ptr = lane_ptr + K_GLOBAL * CHUNK_COPY_LINES_PER_WARP; shmem_idx += CHUNK_COPY_LINES_PER_WARP; } #if USE_CPP_API pipe.wait_prior<0>(); #else __pipeline_wait_prior(0); #endif __syncthreads(); // Compute a grid of C matrix tiles in each warp. #pragma unroll for (int k_step = 0; k_step < CHUNK_K; k_step++) { wmma::fragment<wmma::matrix_a, M, N, K, __nv_bfloat16, wmma::row_major> a[WARP_COL_TILES]; wmma::fragment<wmma::matrix_b, M, N, K, __nv_bfloat16, wmma::col_major> b[WARP_ROW_TILES]; #pragma unroll for (int i = 0; i < WARP_COL_TILES; i++) { size_t shmem_idx_a = (warpId / BLOCK_ROW_WARPS) * M * BLOCK_ROW_WARPS + (i * M); const __nv_bfloat16 *tile_ptr = &shmem[shmem_idx_a][k_step * K]; wmma::load_matrix_sync(a[i], tile_ptr, K * CHUNK_K + SKEW_BF16); #pragma unroll for (int j = 0; j < WARP_ROW_TILES; j++) { if (i == 0) { // Load the B matrix fragment once, because it is going to be reused // against the other A matrix fragments. size_t shmem_idx_b = shmem_idx_b_off + (WARP_ROW_TILES * N) * (warpId%2) + (j * N); const __nv_bfloat16 *tile_ptr = &shmem[shmem_idx_b][k_step * K]; wmma::load_matrix_sync(b[j], tile_ptr, K * CHUNK_K + SKEW_BF16); } wmma::mma_sync(c[i][j], a[i], b[j], c[i][j]); } } } __syncthreads(); } // Store the D fragments to shared memory. #pragma unroll for (int i = 0; i < WARP_COL_TILES; i++) { #pragma unroll for (int j = 0; j < WARP_ROW_TILES; j++) { #pragma unroll // Uniform, point-wise transformations of ALL fragment elements by ALL threads in the // warp are well-defined even though element indices within fragment storage are not defined. for (int t = 0; t < c[i][j].num_elements; t++) c[i][j].x[t] *= alpha; float *tile_ptr = shmem_warp_tile_ptr + i * SHMEM_STRIDE * K + j * N; wmma::store_matrix_sync(tile_ptr, c[i][j], SHMEM_STRIDE, C_LAYOUT); } } __syncthreads(); // Now that shared memory contains all the D tiles, stream them to global memory. float *dst_gmem_warp_stream_ptr = &D[gmem_idx]; #pragma unroll for (int i = 0; i < N; i++) { *((int4*)(dst_gmem_warp_stream_ptr + GLOBAL_MEM_STRIDE * i) + laneId) = *((int4*)(shmem_warp_stream_ptr + SHMEM_STRIDE * i) + laneId); } __syncthreads(); } } // Performs an MxNxK bf16 GEMM (C=alpha*A*B + beta*C) assuming: // 1) Matrices are packed in memory. // 2) M, N and K are multiples of 16, 16 and 16 respectively. // 3) A is row major, B is column major matrix. // Note: This is a less performant version of the compute_bf16gemm kernel. It is designed for // demonstration purposes only to show the CUDA WMMA API use without relying on // availability of the shared memory. __global__ void simple_wmma_bf16gemm(__nv_bfloat16 *a, __nv_bfloat16 *b, float *c, float *d, int m_ld, int n_ld, int k_ld, float alpha, float beta) { // Leading dimensions. Packed with no transpositions. int lda = k_ld; int ldb = k_ld; int ldc = n_ld; // Tile using a 2D grid int warpM = (blockIdx.x * blockDim.x + threadIdx.x) / warpSize; int warpN = (blockIdx.y * blockDim.y + threadIdx.y); // Declare the fragments wmma::fragment<wmma::matrix_a, M, N, K, __nv_bfloat16, wmma::row_major> a_frag; wmma::fragment<wmma::matrix_b, M, N, K, __nv_bfloat16, wmma::col_major> b_frag; wmma::fragment<wmma::accumulator, M, N, K, float> acc_frag; wmma::fragment<wmma::accumulator, M, N, K, float> c_frag; wmma::fill_fragment(acc_frag, 0.0f); // Loop over k for (int i = 0; i < k_ld; i += K) { int aCol = i; int aRow = warpM * M; int bCol = i; int bRow = warpN * N; // Bounds checking if (aRow < m_ld && aCol < k_ld && bRow < k_ld && bCol < n_ld) { // Load the inputs wmma::load_matrix_sync(a_frag, a + aCol + aRow * lda, lda); wmma::load_matrix_sync(b_frag, b + bRow + bCol * ldb, ldb); // Perform the matrix multiplication wmma::mma_sync(acc_frag, a_frag, b_frag, acc_frag); } } // Load in the current value of c, scale it by beta, and add this our result scaled by alpha int cCol = warpN * N; int cRow = warpM * M; if (cRow < m_ld && cCol < n_ld) { wmma::load_matrix_sync(c_frag, c + cCol + cRow * ldc, ldc, wmma::mem_row_major); for(int i=0; i < c_frag.num_elements; i++) { c_frag.x[i] = alpha * acc_frag.x[i] + beta * c_frag.x[i]; } // Store the output wmma::store_matrix_sync(d + cCol + cRow * ldc, c_frag, ldc, wmma::mem_row_major); } } __host__ void matMultiplyOnHost(__nv_bfloat16 *A, __nv_bfloat16 *B, float *C, float alpha, float beta, int numARows, int numAColumns, int numBRows, int numBColumns, int numCRows, int numCColumns) { for (int i = 0; i < numCRows; i++) { for (int j = 0; j < numCColumns; j++) { float temp = 0.0; for (int k = 0; k < numAColumns; k++) { temp += (float)A[i * numAColumns + k] * (float)B[j * numBRows + k]; } C[i*numCColumns + j] = temp * alpha + beta * C[i * numCColumns + j]; } } } int main(int argc, char **argv) { printf("Initializing...\n"); int dev = findCudaDevice(argc, (const char **)argv); hipDeviceProp_t deviceProp; checkCudaErrors(hipGetDeviceProperties(&deviceProp, dev)); // Tensor cores require a GPU of Volta (SM8X) architecture or higher. if (deviceProp.major < 8) { printf("bf16TensorCoreGemm requires requires SM 8.0 or higher to use Tensor Cores. Exiting...\n"); exit(EXIT_WAIVED); } printf("M: %d (%d x %d)\n", M_GLOBAL, M, M_TILES); printf("N: %d (%d x %d)\n", N_GLOBAL, N, N_TILES); printf("K: %d (%d x %d)\n", K_GLOBAL, K, K_TILES); __nv_bfloat16 *A_h = NULL; __nv_bfloat16 *B_h = NULL; float *C_h = NULL; #if CPU_DEBUG float *result_hD = NULL; float *result_host = NULL; #endif A_h = (__nv_bfloat16*) malloc(sizeof(__nv_bfloat16) * M_GLOBAL * K_GLOBAL); B_h = (__nv_bfloat16*) malloc(sizeof(__nv_bfloat16) * K_GLOBAL * N_GLOBAL); C_h = (float*) malloc(sizeof(float) * M_GLOBAL * N_GLOBAL); #if CPU_DEBUG result_hD = (float*) malloc(sizeof(float) * M_GLOBAL * N_GLOBAL); result_host = (float*) malloc(sizeof(float) * M_GLOBAL * N_GLOBAL); #endif __nv_bfloat16 *A = NULL; __nv_bfloat16 *B = NULL; float *C = NULL; float *D = NULL; checkCudaErrors(hipMalloc((void**)&A, sizeof(__nv_bfloat16) * M_GLOBAL * K_GLOBAL)); checkCudaErrors(hipMalloc((void**)&B, sizeof(__nv_bfloat16) * N_GLOBAL * K_GLOBAL)); checkCudaErrors(hipMalloc((void**)&C, sizeof(float) * M_GLOBAL * N_GLOBAL)); checkCudaErrors(hipMalloc((void**)&D, sizeof(float) * M_GLOBAL * N_GLOBAL)); assert(((unsigned long long)A) % 128 == 0); assert(((unsigned long long)B) % 128 == 0); assert(((unsigned long long)C) % 128 == 0); assert(((unsigned long long)D) % 128 == 0); init_host_matrices(A_h, B_h, C_h); printf("Preparing data for GPU...\n"); checkCudaErrors(hipMemcpy(A, A_h, sizeof(__nv_bfloat16) * M_GLOBAL * K_GLOBAL, hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(B, B_h, sizeof(__nv_bfloat16) * N_GLOBAL * K_GLOBAL, hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(C, C_h, sizeof(float) * M_GLOBAL * N_GLOBAL, hipMemcpyHostToDevice)); checkCudaErrors(hipMemset(D, 0, sizeof(float) * M_GLOBAL * N_GLOBAL)); enum { // Compute the right amount of shared memory to request. // We need shared memory to hold per-CTA C and D matrix tiles, and to cache per-CTA chunks // of the A and B matrices. Therefore, the right amount to request is the maximum of those // two numbers. SHMEM_SZ = MAX(sizeof(__nv_bfloat16) * (BLOCK_COL_TILES * M) * (CHUNK_K * K + SKEW_BF16) * 2, M * (BLOCK_ROW_WARPS * WARP_ROW_TILES) * N * (BLOCK_COL_WARPS * WARP_COL_TILES) * sizeof(float)) }; printf("Required shared memory size: %lu Kb\n", SHMEM_SZ / 1024UL); const float alpha = 1.1f; const float beta = 1.2f; hipEvent_t start, stop; checkCudaErrors(hipEventCreate(&start)); checkCudaErrors(hipEventCreate(&stop)); checkCudaErrors(hipEventRecord(start)); // kernel to run - default (b16mma_shmem_gemm_async_copy == 0) kernels selected_kernel = bf16mma_shmem_gemm_async_copy; if (checkCmdLineFlag(argc, (const char **)argv, "kernel")) { int kernel_number = getCmdLineArgumentInt(argc, (const char **)argv, "kernel"); if (kernel_number < 3) { selected_kernel = (kernels)kernel_number; } else { printf("Error: kernel number should be between 0 to 2, you have entered %d\n", kernel_number); exit(EXIT_FAILURE); } } // If enough shared memory available on the GPU use high performant kernel if ((deviceProp.sharedMemPerMultiprocessor >= SHMEM_SZ) && (selected_kernel != simple_bf16mma_gemm)) { printf("Computing using high performance kernel = %d - %s\n", selected_kernel, kernelNames[selected_kernel]); switch (selected_kernel) { case bf16mma_shmem_gemm_async_copy : default: checkCudaErrors(hipFuncSetAttribute(compute_bf16gemm_async_copy, hipFuncAttributeMaxDynamicSharedMemorySize, SHMEM_SZ)); hipLaunchKernelGGL(( checkKernelErrors((compute_bf16gemm_async_copy), dim3(deviceProp.multiProcessorCount*2), dim3(THREADS_PER_BLOCK), SHMEM_SZ, 0, A, B, C, D, alpha, beta))); break; case bf16mma_shmem_gemm : checkCudaErrors(hipFuncSetAttribute(compute_bf16gemm, hipFuncAttributeMaxDynamicSharedMemorySize, SHMEM_SZ)); hipLaunchKernelGGL(( checkKernelErrors((compute_bf16gemm), dim3(deviceProp.multiProcessorCount*2), dim3(THREADS_PER_BLOCK), SHMEM_SZ, 0, A, B, C, D, alpha, beta))); break; } #if CPU_DEBUG checkCudaErrors(hipMemcpy(result_hD, D, sizeof(float)*M_GLOBAL*N_GLOBAL, hipMemcpyDeviceToHost)); #endif } else { dim3 gridDim; dim3 blockDim; // blockDim.x must be a multple of warpSize // 128x4 means we have 16 warps and a block computes a 64x64 output tile blockDim.x = 128; blockDim.y = 4; gridDim.x = (M_GLOBAL + (M * blockDim.x / 32 - 1)) / (M * blockDim.x / 32); gridDim.y = (N_GLOBAL + N * blockDim.y - 1) / (N * blockDim.y); printf("Computing... using simple_wmma_gemm kernel\n"); hipLaunchKernelGGL(( simple_wmma_bf16gemm), dim3(gridDim), dim3(blockDim), 0, 0, A, B, C, D, M_GLOBAL, N_GLOBAL, K_GLOBAL, alpha, beta); #if CPU_DEBUG checkCudaErrors(hipMemcpy(result_hD, D, sizeof(float) * M_GLOBAL * N_GLOBAL, hipMemcpyDeviceToHost)); #endif } checkCudaErrors(hipEventRecord(stop)); checkCudaErrors(hipEventSynchronize(stop)); #if CPU_DEBUG printf("Verifying correctness of the computations...\n"); memcpy(result_host, C_h, sizeof(float) * M_GLOBAL * N_GLOBAL); matMultiplyOnHost(A_h, B_h, result_host, alpha, beta, M_GLOBAL, K_GLOBAL, K_GLOBAL, N_GLOBAL, M_GLOBAL, N_GLOBAL); for (int i = 0; i < N_GLOBAL * M_GLOBAL; i++) { if (fabs(result_hD[i] - result_host[i]) > 0.1f) { printf("mismatch i=%d result_hD=%f result_host=%f\n", i, result_hD[i], result_host[i]); } } free(result_hD); free(result_host); #endif float milliseconds = 0; checkCudaErrors(hipEventElapsedTime(&milliseconds, start, stop)); printf("Time: %f ms\n", milliseconds); printf("TFLOPS: %.2f\n", (((double)M_GLOBAL * N_GLOBAL * K_GLOBAL * 2)/(milliseconds/1000.)) / 1e12); free(A_h); free(B_h); free(C_h); checkCudaErrors(hipFree((void*)A)); checkCudaErrors(hipFree((void*)B)); checkCudaErrors(hipFree((void*)C)); checkCudaErrors(hipFree((void*)D)); return 0; }
9b0186d5a1be8bf9e87e58262c5a1c37df165feb.cu
/* * Copyright 1993-2020 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ // CUDA sample demonstrating a __nv_bfloat16 (E8M7) GEMM computation using the Warp Matrix Multiply // and Accumulate API introduced in CUDA 11.0. // In this program, the compute_gemm kernel computes the result of a matrix multiplication // and addition: D = alpha * A * B + beta * C. The dimensions of both C and D matrices // are M_GLOBAL x N_GLOBAL. The A matrix is M_GLOBAL x K_GLOBAL (row-major), the B matrix // is K_GLOBAL x N_GLOBAL (column-major). // In that kernel, each CTA computes one 128 x 128 tile of the resulting matrix // per iteration. When the tile is computed, the CTA stores it to the global memory // and begins a new iteration, selecting a new 128 x 128 tile to compute. // Each CTA consists of eight warps. For the 128 x 128 tile, each warp computes eight // 16 x 16 subtiles, organized in a 2 x 4 two-dimensional array. // Warps compute the 16 x 16 subtiles using nvcuda::wmma::mma_sync operations by // moving through the K_GLOBAL dimension of the A and B matrices and accumulating // the intermediate result in the local thread state. // There are a number of simple optimizations used in the algorithm: // - The CTA copies the 128 x 128 tile of the C matrix from the global memory to // shared memory. After that is done, each warp loads the C matrix fragments from // shared memory, thus avoiding a random global memory access. // - On each internal iteration, the CTA copies a portion of the A and B matrices from // global memory to shared memory. After that, all warps in the CTA reuse the A and B // data from shared memory, thus reducing the number of data copies from global memory. // - The portions of the A and B matrices are stored in shared memory with an additional // padding (skew) to reduce the number of shared memory access bank conflicts. // (See a detailed explanation near the SKEW_BF16 macro definition.) // - When the CTA finishes computing the tiles of the resulting matrix, each warp stores // its subtiles to shared memory. The CTA then copies the shared memory contents to // global memory, again avoiding redundant random global memory accesses. // - Note that the CTA tile size is chosen to maximize the GPU register utilization, // but carefully enough to avoid local memory use. #include <assert.h> #include <stdio.h> #include <cuda.h> #include <cuda_bf16.h> #include <mma.h> #include <cuda_pipeline.h> // helper functions and utilities to work with CUDA #include <helper_functions.h> #include <helper_cuda.h> // Externally configurable parameters. // Switch for choosing cpp interface for cuda pipeline // vs primitives interface. #define USE_CPP_API 0 #ifndef CPU_DEBUG // Set this to 1 to verify the correctness of the GPU-computed matrix. #define CPU_DEBUG 0 #endif #ifndef SHARED_MEMORY_LIMIT_64K // Set this to 0 to use more than 64 Kb of shared memory to cache data, to // improve the performance of the computations on GPU. // Note that you need a GPU that can have more than 64 Kb of shared memory // per multiprocessor. #define SHARED_MEMORY_LIMIT_64K 0 #endif // GPU configuration. #define WARP_SIZE 32 // MMA matrix tile dimensions. #define M 16 #define N 16 #define K 16 // GEMM configuration. #define M_TILES 512 #define N_TILES 512 #define K_TILES 512 #define M_GLOBAL (M * M_TILES) #define N_GLOBAL (N * N_TILES) #define K_GLOBAL (K * K_TILES) #define C_LAYOUT wmma::mem_row_major // Implementation constants. #define WARPS_PER_BLOCK 8 #define THREADS_PER_BLOCK (WARP_SIZE * WARPS_PER_BLOCK) #if SHARED_MEMORY_LIMIT_64K // With only 64 Kb shared memory available, we can fit two 8-tile chunks of // the A and B matrix data, that is (M = 16) * (K = 16) * 8 * (CHUNK_K = 8) // * sizeof(__nv_bfloat16) = 32 Kb each. // (i.e. two 8x8 arrays of tiles of 16x16 __nv_bfloat16-typed elements per CTA). // But we cannot account the 8 Kb total skew overhead, without which the performance // would be severely impacted. So we choose to reduce the chunk size in half, // i.e. the amount of A and B matrix data we cache in shared memory. // Accordingly, this doubles the number of outer iterations across the global K // dimension, which only slightly impacts the performance. #define CHUNK_K 4 #else #define CHUNK_K 8 #endif #define CHUNK_LINE_BYTES (CHUNK_K * K * sizeof(__nv_bfloat16)) #define WARP_COPY_BYTES (WARP_SIZE * sizeof(int4)) #define CHUNK_COPY_LINES_PER_WARP (WARP_COPY_BYTES / CHUNK_LINE_BYTES) #define CHUNK_COPY_LINE_LANES (WARP_SIZE / CHUNK_COPY_LINES_PER_WARP) #define BLOCK_ROW_WARPS 2 #define BLOCK_COL_WARPS 4 #define WARP_ROW_TILES 4 #define WARP_COL_TILES 2 #define BLOCK_ROW_TILES (WARP_ROW_TILES * BLOCK_ROW_WARPS) #define BLOCK_COL_TILES (WARP_COL_TILES * BLOCK_COL_WARPS) #define GLOBAL_MEM_STRIDE N_GLOBAL #define SHMEM_STRIDE (N * BLOCK_ROW_TILES) #define SHMEM_OFFSET (N * WARP_ROW_TILES) // The macro below is used to shift rows of the A matrix and columns of the B matrix // in shared memory to minimize possible bank conflicts. // Before performing the nvcuda::wmma::mma_sync operation, the warp must load the matrix // data using the nvcuda::wmma::load_matrix_sync operation. Although the memory access pattern // is not specified for that function, each lane in the warp can read one or multiple matrix // elements from different matrix rows or columns. // For shared memory, such access can result in bank conflicts if different rows / columns // of the matrix map to the same bank. By shifting each row and column by a few bytes, we // make sure that they map to different banks, thus reducing the number of possible bank // conflicts. // The number of 16 two-byte "__nv_bfloat16" elements is chosen as the minimum possible shift because // we must keep each row and column 256-bit aligned, as required by nvcuda::wmma::load_matrix_sync. #define SKEW_BF16 16 #define checkKernelErrors(expr) do { \ expr; \ \ cudaError_t __err = cudaGetLastError(); \ if (__err != cudaSuccess) { \ printf("Line %d: '%s' failed: %s\n", __LINE__, # expr, cudaGetErrorString(__err)); \ abort(); \ } \ } while(0) enum kernels { bf16mma_shmem_gemm_async_copy = 0, // __nv_bfloat16 MMA shmem using kernel with async_copy bf16mma_shmem_gemm = 1, // __nv_bfloat16 MMA shmem using kernel normal copy (without async_copy). simple_bf16mma_gemm = 2 // __nv_bfloat16 MMA non-shmem using simple kernel. }; const char* kernelNames[] = {"compute_bf16gemm_async_copy", "compute_bf16gemm", "simple_wmma_bf16gemm"}; using namespace nvcuda; namespace nvcuda_namespace = nvcuda::experimental; __host__ void init_host_matrices(__nv_bfloat16 *a, __nv_bfloat16 *b, float *c) { for (int i = 0; i < M_GLOBAL; i++) { for (int j = 0; j < K_GLOBAL; j++) { a[i*K_GLOBAL+j] = (__nv_bfloat16)(rand() % 3); } } for (int i = 0; i < N_GLOBAL; i++) { for (int j = 0; j < K_GLOBAL; j++) { b[i*K_GLOBAL+j] = (__nv_bfloat16)(rand() % 3); } } for (int t = 0; t < M_GLOBAL * N_GLOBAL; t++) { c[t] = (float)(rand() % 3); } } __global__ void compute_bf16gemm(const __nv_bfloat16 *A, const __nv_bfloat16 *B, const float *C, float *D, float alpha, float beta) { extern __shared__ __nv_bfloat16 shmem[][CHUNK_K * K + SKEW_BF16]; // Warp and lane identification. const unsigned int warpId = threadIdx.x / WARP_SIZE; const unsigned int laneId = threadIdx.x % WARP_SIZE; // Offset in shared memory from which the B matrix is stored. const size_t shmem_idx_b_off = BLOCK_COL_TILES * M; // This pointer is used to access the C and D matrix tiles this warp computes. float *shmem_warp_tile_ptr = (float*)&shmem[0][0] + (warpId / BLOCK_ROW_WARPS) * SHMEM_STRIDE * N * BLOCK_ROW_WARPS + (warpId % BLOCK_ROW_WARPS) * SHMEM_OFFSET; // This pointer is used to stream the C and D matrices block-wide tile to and from shared memory. float *shmem_warp_stream_ptr = (float*)&shmem[0][0] + warpId * SHMEM_STRIDE * N; // Adjust the beta scaler, as it'll be multiplied by alpha at the end of // each tile computation. Technically this is not generally correct (may result // in a loss of precision). Zero still needs to be specially handled though. beta /= alpha; // Each CTA slides along the 128 x 128 tiles from the top left corner of the matrix to the // right and down, and selects the next tile to compute. Once there's no such tile, // all warps in this CTA exit. for(unsigned int block_pos = blockIdx.x;; block_pos += gridDim.x) { const unsigned int block_tile_i = ((block_pos * BLOCK_ROW_TILES) / N_TILES) * (BLOCK_COL_TILES); const unsigned int block_tile_j = (block_pos * BLOCK_COL_TILES) % N_TILES; // Stop when there are no more D matrix tiles to compute in this CTA. if (block_tile_i >= M_TILES) { break; } // This warp's pointer to the C matrix data to copy memory from to shared memory. const size_t gmem_idx = (block_tile_i + warpId) * M * GLOBAL_MEM_STRIDE + block_tile_j * N; const float *src_gmem_warp_stream_ptr = &C[gmem_idx]; // Stream multiple C tiles to shared memory. #pragma unroll for (int i = 0; i < N; i++) { *((int4*)(shmem_warp_stream_ptr + SHMEM_STRIDE * i) + laneId) = *((int4*)(src_gmem_warp_stream_ptr + GLOBAL_MEM_STRIDE * i) + laneId); } __syncthreads(); // These fragments will accumulate the result of A and B matrix fragment multiplications // along the K_GLOBAL dimension. wmma::fragment<wmma::accumulator, M, N, K, float> c[WARP_COL_TILES][WARP_ROW_TILES]; // Load the C matrix tiles into fragments from shared memory. #pragma unroll for (int i = 0; i < WARP_COL_TILES; i++) { #pragma unroll for (int j = 0; j < WARP_ROW_TILES; j++) { const float *tile_ptr = shmem_warp_tile_ptr + i * SHMEM_STRIDE * N + j * N; wmma::load_matrix_sync(c[i][j], tile_ptr, SHMEM_STRIDE, C_LAYOUT); } } __syncthreads(); // Scale the C matrix. #pragma unroll for (int i = 0; i < WARP_COL_TILES; i++) { #pragma unroll for (int j = 0; j < WARP_ROW_TILES; j++) { #pragma unroll for (int t = 0; t < c[i][j].num_elements; t++) { c[i][j].x[t] *= beta; } } } // Select what warp copies what matrix to shared memory. // Warps 0-3 copy the A matrix, warps 4-7 copy the B matrix. const __nv_bfloat16 *warp_ptr = (warpId < (WARPS_PER_BLOCK/2)) ? (&A[block_tile_i * M * K_GLOBAL] + M * K_GLOBAL * (warpId % (WARPS_PER_BLOCK/2)) * 2) : (&B[block_tile_j * N * K_GLOBAL] + N * K_GLOBAL * (warpId % (WARPS_PER_BLOCK/2)) * 2); // Go through the global K dimension by a fixed step at a time. #pragma unroll for (int tile_k = 0; tile_k < K_TILES; tile_k += CHUNK_K) { // Copy slices of the A and B matrices to shared memory. // The first half of the warps in the CTA copy the A matrix, the rest copy the B matrix. size_t shmem_idx = warpId < (WARPS_PER_BLOCK/2) ? (M * (warpId % (WARPS_PER_BLOCK/2)) * 2) : (N * (warpId % (WARPS_PER_BLOCK/2)) * 2 + shmem_idx_b_off); // First half of the warp copies the first row / column of the matrix, // the second half of the warp copies the next. const __nv_bfloat16 *lane_ptr = (warp_ptr + tile_k * K + (laneId / CHUNK_COPY_LINE_LANES) * K_GLOBAL); // Shift the second half of the warp to the next row / column in the shared memory. shmem_idx += laneId / CHUNK_COPY_LINE_LANES; #pragma unroll for(int i = 0; i < ((WARP_SIZE/2) / CHUNK_COPY_LINES_PER_WARP) * 2; i++) { // Copy 16 bytes at once in each lane. *((int4*)&shmem[shmem_idx][0] + (laneId % CHUNK_COPY_LINE_LANES)) = *((int4*)lane_ptr + (laneId % CHUNK_COPY_LINE_LANES)); // Advance the global memory pointer and the shared memory index. lane_ptr = lane_ptr + K_GLOBAL * CHUNK_COPY_LINES_PER_WARP; shmem_idx += CHUNK_COPY_LINES_PER_WARP; } __syncthreads(); // Compute a grid of C matrix tiles in each warp. #pragma unroll for (int k_step = 0; k_step < CHUNK_K; k_step++) { wmma::fragment<wmma::matrix_a, M, N, K, __nv_bfloat16, wmma::row_major> a[WARP_COL_TILES]; wmma::fragment<wmma::matrix_b, M, N, K, __nv_bfloat16, wmma::col_major> b[WARP_ROW_TILES]; #pragma unroll for (int i = 0; i < WARP_COL_TILES; i++) { size_t shmem_idx_a = (warpId/BLOCK_ROW_WARPS) * M * BLOCK_ROW_WARPS + (i * M); const __nv_bfloat16 *tile_ptr = &shmem[shmem_idx_a][k_step * K]; wmma::load_matrix_sync(a[i], tile_ptr, K * CHUNK_K + SKEW_BF16); #pragma unroll for (int j = 0; j < WARP_ROW_TILES; j++) { if (i == 0) { // Load the B matrix fragment once, because it is going to be reused // against the other A matrix fragments. size_t shmem_idx_b = shmem_idx_b_off + (WARP_ROW_TILES * N) * (warpId%2) + (j * N); const __nv_bfloat16 *tile_ptr = &shmem[shmem_idx_b][k_step * K]; wmma::load_matrix_sync(b[j], tile_ptr, K * CHUNK_K + SKEW_BF16); } wmma::mma_sync(c[i][j], a[i], b[j], c[i][j]); } } } __syncthreads(); } // Store the D fragments to shared memory. #pragma unroll for (int i = 0; i < WARP_COL_TILES; i++) { #pragma unroll for (int j = 0; j < WARP_ROW_TILES; j++) { #pragma unroll // Uniform, point-wise transformations of ALL fragment elements by ALL threads in the // warp are well-defined even though element indices within fragment storage are not defined. for (int t = 0; t < c[i][j].num_elements; t++) c[i][j].x[t] *= alpha; float *tile_ptr = shmem_warp_tile_ptr + i * SHMEM_STRIDE * K + j * N; wmma::store_matrix_sync(tile_ptr, c[i][j], SHMEM_STRIDE, C_LAYOUT); } } __syncthreads(); // Now that shared memory contains all the D tiles, stream them to global memory. float *dst_gmem_warp_stream_ptr = &D[gmem_idx]; #pragma unroll for (int i = 0; i < N; i++) { *((int4*)(dst_gmem_warp_stream_ptr + GLOBAL_MEM_STRIDE * i) + laneId) = *((int4*)(shmem_warp_stream_ptr + SHMEM_STRIDE * i) + laneId); } __syncthreads(); } } __global__ void compute_bf16gemm_async_copy(const __nv_bfloat16 *A, const __nv_bfloat16 *B, const float *C, float *D, float alpha, float beta) { extern __shared__ __nv_bfloat16 shmem[][CHUNK_K * K + SKEW_BF16]; // Warp and lane identification. const unsigned int warpId = threadIdx.x / WARP_SIZE; const unsigned int laneId = threadIdx.x % WARP_SIZE; // Offset in shared memory from which the B matrix is stored. const size_t shmem_idx_b_off = BLOCK_COL_TILES * M; // This pointer is used to access the C and D matrix tiles this warp computes. float *shmem_warp_tile_ptr = (float*)&shmem[0][0] + (warpId / BLOCK_ROW_WARPS) * SHMEM_STRIDE * N * BLOCK_ROW_WARPS + (warpId % BLOCK_ROW_WARPS) * SHMEM_OFFSET; // This pointer is used to stream the C and D matrices block-wide tile to and from shared memory. float *shmem_warp_stream_ptr = (float*)&shmem[0][0] + warpId * SHMEM_STRIDE * N; // Adjust the beta scaler, as it'll be multiplied by alpha at the end of // each tile computation. Technically this is not generally correct (may result // in a loss of precision). Zero still needs to be specially handled though. beta /= alpha; #if USE_CPP_API nvcuda_namespace::pipeline pipe; #endif // Each CTA slides along the 128 x 128 tiles from the top left corner of the matrix to the // right and down, and selects the next tile to compute. Once there's no such tile, // all warps in this CTA exit. for(unsigned int block_pos = blockIdx.x;; block_pos += gridDim.x) { const unsigned int block_tile_i = ((block_pos * BLOCK_ROW_TILES) / N_TILES) * (BLOCK_COL_TILES); const unsigned int block_tile_j = (block_pos * BLOCK_COL_TILES) % N_TILES; // Stop when there are no more D matrix tiles to compute in this CTA. if (block_tile_i >= M_TILES) { break; } // This warp's pointer to the C matrix data to copy memory from to shared memory. const size_t gmem_idx = (block_tile_i + warpId) * M * GLOBAL_MEM_STRIDE + block_tile_j * N; const float *src_gmem_warp_stream_ptr = &C[gmem_idx]; // Stream multiple C tiles to shared memory. #pragma unroll for (int i = 0; i < N; i++) { #if USE_CPP_API nvcuda_namespace::memcpy_async(*((int4*)(shmem_warp_stream_ptr + SHMEM_STRIDE * i) + laneId), *((int4*)(src_gmem_warp_stream_ptr + GLOBAL_MEM_STRIDE * i) + laneId), pipe); pipe.commit(); #else __pipeline_memcpy_async((reinterpret_cast<int4*>(&shmem_warp_stream_ptr[(SHMEM_STRIDE * i)])) + laneId, (reinterpret_cast<const int4*>(&src_gmem_warp_stream_ptr[(GLOBAL_MEM_STRIDE * i)])) + laneId, sizeof(int4)); __pipeline_commit(); #endif } #if USE_CPP_API pipe.wait_prior<0>(); #else __pipeline_wait_prior(0); #endif __syncthreads(); // These fragments will accumulate the result of A and B matrix fragment multiplications // along the K_GLOBAL dimension. wmma::fragment<wmma::accumulator, M, N, K, float> c[WARP_COL_TILES][WARP_ROW_TILES]; // Load the C matrix tiles into fragments from shared memory. #pragma unroll for (int i = 0; i < WARP_COL_TILES; i++) { #pragma unroll for (int j = 0; j < WARP_ROW_TILES; j++) { const float *tile_ptr = shmem_warp_tile_ptr + i * SHMEM_STRIDE * N + j * N; wmma::load_matrix_sync(c[i][j], tile_ptr, SHMEM_STRIDE, C_LAYOUT); } } __syncthreads(); // Scale the C matrix. #pragma unroll for (int i = 0; i < WARP_COL_TILES; i++) { #pragma unroll for (int j = 0; j < WARP_ROW_TILES; j++) { #pragma unroll for (int t = 0; t < c[i][j].num_elements; t++) { c[i][j].x[t] *= beta; } } } // Select what warp copies what matrix to shared memory. // Warps 0-3 copy the A matrix, warps 4-7 copy the B matrix. const __nv_bfloat16 *warp_ptr = (warpId < (WARPS_PER_BLOCK/2)) ? (&A[block_tile_i * M * K_GLOBAL] + M * K_GLOBAL * (warpId % (WARPS_PER_BLOCK/2)) * 2) : (&B[block_tile_j * N * K_GLOBAL] + N * K_GLOBAL * (warpId % (WARPS_PER_BLOCK/2)) * 2); // Go through the global K dimension by a fixed step at a time. #pragma unroll for (int tile_k = 0; tile_k < K_TILES; tile_k += CHUNK_K) { // Copy slices of the A and B matrices to shared memory. // The first half of the warps in the CTA copy the A matrix, the rest copy the B matrix. size_t shmem_idx = warpId < (WARPS_PER_BLOCK/2) ? (M * (warpId % (WARPS_PER_BLOCK/2)) * 2) : (N * (warpId % (WARPS_PER_BLOCK/2)) * 2 + shmem_idx_b_off); // First half of the warp copies the first row / column of the matrix, // the second half of the warp copies the next. const __nv_bfloat16 *lane_ptr = (warp_ptr + tile_k * K + (laneId / CHUNK_COPY_LINE_LANES) * K_GLOBAL); // Shift the second half of the warp to the next row / column in the shared memory. shmem_idx += laneId / CHUNK_COPY_LINE_LANES; #pragma unroll for(int i = 0; i < ((WARP_SIZE/2) / CHUNK_COPY_LINES_PER_WARP) * 2; i++) { // Copy 16 bytes at once in each lane. #if USE_CPP_API nvcuda_namespace::memcpy_async(*((int4*)&shmem[shmem_idx][0] + (laneId % CHUNK_COPY_LINE_LANES)), *((int4*)lane_ptr + (laneId % CHUNK_COPY_LINE_LANES)), pipe); pipe.commit(); #else __pipeline_memcpy_async((int4*)&shmem[shmem_idx][0] + (laneId % CHUNK_COPY_LINE_LANES), (int4*)lane_ptr + (laneId % CHUNK_COPY_LINE_LANES), sizeof(int4)); __pipeline_commit(); #endif // Advance the global memory pointer and the shared memory index. lane_ptr = lane_ptr + K_GLOBAL * CHUNK_COPY_LINES_PER_WARP; shmem_idx += CHUNK_COPY_LINES_PER_WARP; } #if USE_CPP_API pipe.wait_prior<0>(); #else __pipeline_wait_prior(0); #endif __syncthreads(); // Compute a grid of C matrix tiles in each warp. #pragma unroll for (int k_step = 0; k_step < CHUNK_K; k_step++) { wmma::fragment<wmma::matrix_a, M, N, K, __nv_bfloat16, wmma::row_major> a[WARP_COL_TILES]; wmma::fragment<wmma::matrix_b, M, N, K, __nv_bfloat16, wmma::col_major> b[WARP_ROW_TILES]; #pragma unroll for (int i = 0; i < WARP_COL_TILES; i++) { size_t shmem_idx_a = (warpId / BLOCK_ROW_WARPS) * M * BLOCK_ROW_WARPS + (i * M); const __nv_bfloat16 *tile_ptr = &shmem[shmem_idx_a][k_step * K]; wmma::load_matrix_sync(a[i], tile_ptr, K * CHUNK_K + SKEW_BF16); #pragma unroll for (int j = 0; j < WARP_ROW_TILES; j++) { if (i == 0) { // Load the B matrix fragment once, because it is going to be reused // against the other A matrix fragments. size_t shmem_idx_b = shmem_idx_b_off + (WARP_ROW_TILES * N) * (warpId%2) + (j * N); const __nv_bfloat16 *tile_ptr = &shmem[shmem_idx_b][k_step * K]; wmma::load_matrix_sync(b[j], tile_ptr, K * CHUNK_K + SKEW_BF16); } wmma::mma_sync(c[i][j], a[i], b[j], c[i][j]); } } } __syncthreads(); } // Store the D fragments to shared memory. #pragma unroll for (int i = 0; i < WARP_COL_TILES; i++) { #pragma unroll for (int j = 0; j < WARP_ROW_TILES; j++) { #pragma unroll // Uniform, point-wise transformations of ALL fragment elements by ALL threads in the // warp are well-defined even though element indices within fragment storage are not defined. for (int t = 0; t < c[i][j].num_elements; t++) c[i][j].x[t] *= alpha; float *tile_ptr = shmem_warp_tile_ptr + i * SHMEM_STRIDE * K + j * N; wmma::store_matrix_sync(tile_ptr, c[i][j], SHMEM_STRIDE, C_LAYOUT); } } __syncthreads(); // Now that shared memory contains all the D tiles, stream them to global memory. float *dst_gmem_warp_stream_ptr = &D[gmem_idx]; #pragma unroll for (int i = 0; i < N; i++) { *((int4*)(dst_gmem_warp_stream_ptr + GLOBAL_MEM_STRIDE * i) + laneId) = *((int4*)(shmem_warp_stream_ptr + SHMEM_STRIDE * i) + laneId); } __syncthreads(); } } // Performs an MxNxK bf16 GEMM (C=alpha*A*B + beta*C) assuming: // 1) Matrices are packed in memory. // 2) M, N and K are multiples of 16, 16 and 16 respectively. // 3) A is row major, B is column major matrix. // Note: This is a less performant version of the compute_bf16gemm kernel. It is designed for // demonstration purposes only to show the CUDA WMMA API use without relying on // availability of the shared memory. __global__ void simple_wmma_bf16gemm(__nv_bfloat16 *a, __nv_bfloat16 *b, float *c, float *d, int m_ld, int n_ld, int k_ld, float alpha, float beta) { // Leading dimensions. Packed with no transpositions. int lda = k_ld; int ldb = k_ld; int ldc = n_ld; // Tile using a 2D grid int warpM = (blockIdx.x * blockDim.x + threadIdx.x) / warpSize; int warpN = (blockIdx.y * blockDim.y + threadIdx.y); // Declare the fragments wmma::fragment<wmma::matrix_a, M, N, K, __nv_bfloat16, wmma::row_major> a_frag; wmma::fragment<wmma::matrix_b, M, N, K, __nv_bfloat16, wmma::col_major> b_frag; wmma::fragment<wmma::accumulator, M, N, K, float> acc_frag; wmma::fragment<wmma::accumulator, M, N, K, float> c_frag; wmma::fill_fragment(acc_frag, 0.0f); // Loop over k for (int i = 0; i < k_ld; i += K) { int aCol = i; int aRow = warpM * M; int bCol = i; int bRow = warpN * N; // Bounds checking if (aRow < m_ld && aCol < k_ld && bRow < k_ld && bCol < n_ld) { // Load the inputs wmma::load_matrix_sync(a_frag, a + aCol + aRow * lda, lda); wmma::load_matrix_sync(b_frag, b + bRow + bCol * ldb, ldb); // Perform the matrix multiplication wmma::mma_sync(acc_frag, a_frag, b_frag, acc_frag); } } // Load in the current value of c, scale it by beta, and add this our result scaled by alpha int cCol = warpN * N; int cRow = warpM * M; if (cRow < m_ld && cCol < n_ld) { wmma::load_matrix_sync(c_frag, c + cCol + cRow * ldc, ldc, wmma::mem_row_major); for(int i=0; i < c_frag.num_elements; i++) { c_frag.x[i] = alpha * acc_frag.x[i] + beta * c_frag.x[i]; } // Store the output wmma::store_matrix_sync(d + cCol + cRow * ldc, c_frag, ldc, wmma::mem_row_major); } } __host__ void matMultiplyOnHost(__nv_bfloat16 *A, __nv_bfloat16 *B, float *C, float alpha, float beta, int numARows, int numAColumns, int numBRows, int numBColumns, int numCRows, int numCColumns) { for (int i = 0; i < numCRows; i++) { for (int j = 0; j < numCColumns; j++) { float temp = 0.0; for (int k = 0; k < numAColumns; k++) { temp += (float)A[i * numAColumns + k] * (float)B[j * numBRows + k]; } C[i*numCColumns + j] = temp * alpha + beta * C[i * numCColumns + j]; } } } int main(int argc, char **argv) { printf("Initializing...\n"); int dev = findCudaDevice(argc, (const char **)argv); cudaDeviceProp deviceProp; checkCudaErrors(cudaGetDeviceProperties(&deviceProp, dev)); // Tensor cores require a GPU of Volta (SM8X) architecture or higher. if (deviceProp.major < 8) { printf("bf16TensorCoreGemm requires requires SM 8.0 or higher to use Tensor Cores. Exiting...\n"); exit(EXIT_WAIVED); } printf("M: %d (%d x %d)\n", M_GLOBAL, M, M_TILES); printf("N: %d (%d x %d)\n", N_GLOBAL, N, N_TILES); printf("K: %d (%d x %d)\n", K_GLOBAL, K, K_TILES); __nv_bfloat16 *A_h = NULL; __nv_bfloat16 *B_h = NULL; float *C_h = NULL; #if CPU_DEBUG float *result_hD = NULL; float *result_host = NULL; #endif A_h = (__nv_bfloat16*) malloc(sizeof(__nv_bfloat16) * M_GLOBAL * K_GLOBAL); B_h = (__nv_bfloat16*) malloc(sizeof(__nv_bfloat16) * K_GLOBAL * N_GLOBAL); C_h = (float*) malloc(sizeof(float) * M_GLOBAL * N_GLOBAL); #if CPU_DEBUG result_hD = (float*) malloc(sizeof(float) * M_GLOBAL * N_GLOBAL); result_host = (float*) malloc(sizeof(float) * M_GLOBAL * N_GLOBAL); #endif __nv_bfloat16 *A = NULL; __nv_bfloat16 *B = NULL; float *C = NULL; float *D = NULL; checkCudaErrors(cudaMalloc((void**)&A, sizeof(__nv_bfloat16) * M_GLOBAL * K_GLOBAL)); checkCudaErrors(cudaMalloc((void**)&B, sizeof(__nv_bfloat16) * N_GLOBAL * K_GLOBAL)); checkCudaErrors(cudaMalloc((void**)&C, sizeof(float) * M_GLOBAL * N_GLOBAL)); checkCudaErrors(cudaMalloc((void**)&D, sizeof(float) * M_GLOBAL * N_GLOBAL)); assert(((unsigned long long)A) % 128 == 0); assert(((unsigned long long)B) % 128 == 0); assert(((unsigned long long)C) % 128 == 0); assert(((unsigned long long)D) % 128 == 0); init_host_matrices(A_h, B_h, C_h); printf("Preparing data for GPU...\n"); checkCudaErrors(cudaMemcpy(A, A_h, sizeof(__nv_bfloat16) * M_GLOBAL * K_GLOBAL, cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(B, B_h, sizeof(__nv_bfloat16) * N_GLOBAL * K_GLOBAL, cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(C, C_h, sizeof(float) * M_GLOBAL * N_GLOBAL, cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemset(D, 0, sizeof(float) * M_GLOBAL * N_GLOBAL)); enum { // Compute the right amount of shared memory to request. // We need shared memory to hold per-CTA C and D matrix tiles, and to cache per-CTA chunks // of the A and B matrices. Therefore, the right amount to request is the maximum of those // two numbers. SHMEM_SZ = MAX(sizeof(__nv_bfloat16) * (BLOCK_COL_TILES * M) * (CHUNK_K * K + SKEW_BF16) * 2, M * (BLOCK_ROW_WARPS * WARP_ROW_TILES) * N * (BLOCK_COL_WARPS * WARP_COL_TILES) * sizeof(float)) }; printf("Required shared memory size: %lu Kb\n", SHMEM_SZ / 1024UL); const float alpha = 1.1f; const float beta = 1.2f; cudaEvent_t start, stop; checkCudaErrors(cudaEventCreate(&start)); checkCudaErrors(cudaEventCreate(&stop)); checkCudaErrors(cudaEventRecord(start)); // kernel to run - default (b16mma_shmem_gemm_async_copy == 0) kernels selected_kernel = bf16mma_shmem_gemm_async_copy; if (checkCmdLineFlag(argc, (const char **)argv, "kernel")) { int kernel_number = getCmdLineArgumentInt(argc, (const char **)argv, "kernel"); if (kernel_number < 3) { selected_kernel = (kernels)kernel_number; } else { printf("Error: kernel number should be between 0 to 2, you have entered %d\n", kernel_number); exit(EXIT_FAILURE); } } // If enough shared memory available on the GPU use high performant kernel if ((deviceProp.sharedMemPerMultiprocessor >= SHMEM_SZ) && (selected_kernel != simple_bf16mma_gemm)) { printf("Computing using high performance kernel = %d - %s\n", selected_kernel, kernelNames[selected_kernel]); switch (selected_kernel) { case bf16mma_shmem_gemm_async_copy : default: checkCudaErrors(cudaFuncSetAttribute(compute_bf16gemm_async_copy, cudaFuncAttributeMaxDynamicSharedMemorySize, SHMEM_SZ)); checkKernelErrors((compute_bf16gemm_async_copy<<<deviceProp.multiProcessorCount*2, THREADS_PER_BLOCK, SHMEM_SZ>>>(A, B, C, D, alpha, beta))); break; case bf16mma_shmem_gemm : checkCudaErrors(cudaFuncSetAttribute(compute_bf16gemm, cudaFuncAttributeMaxDynamicSharedMemorySize, SHMEM_SZ)); checkKernelErrors((compute_bf16gemm<<<deviceProp.multiProcessorCount*2, THREADS_PER_BLOCK, SHMEM_SZ>>>(A, B, C, D, alpha, beta))); break; } #if CPU_DEBUG checkCudaErrors(cudaMemcpy(result_hD, D, sizeof(float)*M_GLOBAL*N_GLOBAL, cudaMemcpyDeviceToHost)); #endif } else { dim3 gridDim; dim3 blockDim; // blockDim.x must be a multple of warpSize // 128x4 means we have 16 warps and a block computes a 64x64 output tile blockDim.x = 128; blockDim.y = 4; gridDim.x = (M_GLOBAL + (M * blockDim.x / 32 - 1)) / (M * blockDim.x / 32); gridDim.y = (N_GLOBAL + N * blockDim.y - 1) / (N * blockDim.y); printf("Computing... using simple_wmma_gemm kernel\n"); simple_wmma_bf16gemm<<<gridDim, blockDim>>>(A, B, C, D, M_GLOBAL, N_GLOBAL, K_GLOBAL, alpha, beta); #if CPU_DEBUG checkCudaErrors(cudaMemcpy(result_hD, D, sizeof(float) * M_GLOBAL * N_GLOBAL, cudaMemcpyDeviceToHost)); #endif } checkCudaErrors(cudaEventRecord(stop)); checkCudaErrors(cudaEventSynchronize(stop)); #if CPU_DEBUG printf("Verifying correctness of the computations...\n"); memcpy(result_host, C_h, sizeof(float) * M_GLOBAL * N_GLOBAL); matMultiplyOnHost(A_h, B_h, result_host, alpha, beta, M_GLOBAL, K_GLOBAL, K_GLOBAL, N_GLOBAL, M_GLOBAL, N_GLOBAL); for (int i = 0; i < N_GLOBAL * M_GLOBAL; i++) { if (fabs(result_hD[i] - result_host[i]) > 0.1f) { printf("mismatch i=%d result_hD=%f result_host=%f\n", i, result_hD[i], result_host[i]); } } free(result_hD); free(result_host); #endif float milliseconds = 0; checkCudaErrors(cudaEventElapsedTime(&milliseconds, start, stop)); printf("Time: %f ms\n", milliseconds); printf("TFLOPS: %.2f\n", (((double)M_GLOBAL * N_GLOBAL * K_GLOBAL * 2)/(milliseconds/1000.)) / 1e12); free(A_h); free(B_h); free(C_h); checkCudaErrors(cudaFree((void*)A)); checkCudaErrors(cudaFree((void*)B)); checkCudaErrors(cudaFree((void*)C)); checkCudaErrors(cudaFree((void*)D)); return 0; }
c58e91c3e1971c4cf2c2c84477d21ad4d45a3658.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <time.h> #include <hip/hip_runtime.h> inline void checkCuda(hipError_t result) { #if defined(DEBUG) || defined(_DEBUG) if (result != hipSuccess) { printf("Error: %s : %d" , __FILE__, __LINE__); printf("CUDA Runtime Error: %d: %s\n", result, hipGetErrorString(result)); exit(1); } #endif } void initialData(int *ip, const int size) { int i; srand(time(NULL)); for (i = 0; i < size; i++) { ip[i] = (int)(rand() % 25); } return; } void sumMatrixOnHost(int *A, int *B, int *C, const int nx, const int ny) { int *ia = A; int *ib = B; int *ic = C; for (int iy = 0; iy < ny; iy++) { for (int ix = 0; ix < nx; ix++) { ic[ix] = ia[ix] + ib[ix]; } ia += nx; ib += nx; ic += nx; } return; } void checkResult(int *hostRef, int *gpuRef, const int N) { double epsilon = 1.0E-8; bool match = 1; for (int i = 0; i < N; i++) { if (abs(hostRef[i] - gpuRef[i]) > epsilon) { match = 0; printf("host %f gpu %f\n", hostRef[i], gpuRef[i]); break; } } if (match) printf("Arrays match.\n\n"); else printf("Arrays do not match.\n\n"); } // grid 2D block 2D __global__ void sumMatrixOnGPU2D(int *MatA, int *MatB, int *MatC, int nx, int ny) { unsigned int ix = threadIdx.x + blockIdx.x * blockDim.x; unsigned int iy = threadIdx.y + blockIdx.y * blockDim.y; unsigned int idx = iy * nx + ix; if (ix < nx && iy < ny) MatC[idx] = MatA[idx] + MatB[idx]; } int main(int argc, char **argv) { printf("%s Starting...\n", argv[0]); // set up device int dev = 0; hipDeviceProp_t deviceProp; checkCuda(hipGetDeviceProperties(&deviceProp, dev)); printf("Using Device %d: %s\n", dev, deviceProp.name); checkCuda(hipSetDevice(dev)); // set up data size of matrix int nx = 1 << 14; int ny = 1 << 14; int nxy = nx * ny; int nBytes = nxy * sizeof(int); printf("Matrix size: nx %d ny %d\n", nx, ny); // malloc host memory int *h_A, *h_B, *hostRef, *gpuRef; h_A = (int *)malloc(nBytes); h_B = (int *)malloc(nBytes); hostRef = (int *)malloc(nBytes); gpuRef = (int *)malloc(nBytes); // initialize data at host side initialData(h_A, nxy); initialData(h_B, nxy); // add matrix at host side for result checks sumMatrixOnHost(h_A, h_B, hostRef, nx, ny); // malloc device global memory int *d_MatA, *d_MatB, *d_MatC; checkCuda(hipMalloc((void **)&d_MatA, nBytes)); checkCuda(hipMalloc((void **)&d_MatB, nBytes)); checkCuda(hipMalloc((void **)&d_MatC, nBytes)); // transfer data from host to device checkCuda(hipMemcpy(d_MatA, h_A, nBytes, hipMemcpyHostToDevice)); checkCuda(hipMemcpy(d_MatB, h_B, nBytes, hipMemcpyHostToDevice)); // invoke kernel at host side int dimx = 32; int dimy = 16; dim3 block(dimx, dimy); dim3 grid((nx + block.x - 1) / block.x, (ny + block.y - 1) / block.y); sumMatrixOnGPU2D << <grid, block >> >(d_MatA, d_MatB, d_MatC, nx, ny); checkCuda(hipDeviceSynchronize()); printf("sumMatrixOnGPU2D <<<(%d,%d), (%d,%d)>>>\n", grid.x, grid.y, block.x, block.y); // check kernel error checkCuda(hipGetLastError()); // copy kernel result back to host side checkCuda(hipMemcpy(gpuRef, d_MatC, nBytes, hipMemcpyDeviceToHost)); // check device results checkResult(hostRef, gpuRef, nxy); // free device global memory checkCuda(hipFree(d_MatA)); checkCuda(hipFree(d_MatB)); checkCuda(hipFree(d_MatC)); // free host memory free(h_A); free(h_B); free(hostRef); free(gpuRef); // reset device checkCuda(hipDeviceReset()); return (0); }
c58e91c3e1971c4cf2c2c84477d21ad4d45a3658.cu
#include <stdio.h> #include <stdlib.h> #include <time.h> #include <cuda_runtime.h> inline void checkCuda(cudaError_t result) { #if defined(DEBUG) || defined(_DEBUG) if (result != cudaSuccess) { printf("Error: %s : %d" , __FILE__, __LINE__); printf("CUDA Runtime Error: %d: %s\n", result, cudaGetErrorString(result)); exit(1); } #endif } void initialData(int *ip, const int size) { int i; srand(time(NULL)); for (i = 0; i < size; i++) { ip[i] = (int)(rand() % 25); } return; } void sumMatrixOnHost(int *A, int *B, int *C, const int nx, const int ny) { int *ia = A; int *ib = B; int *ic = C; for (int iy = 0; iy < ny; iy++) { for (int ix = 0; ix < nx; ix++) { ic[ix] = ia[ix] + ib[ix]; } ia += nx; ib += nx; ic += nx; } return; } void checkResult(int *hostRef, int *gpuRef, const int N) { double epsilon = 1.0E-8; bool match = 1; for (int i = 0; i < N; i++) { if (abs(hostRef[i] - gpuRef[i]) > epsilon) { match = 0; printf("host %f gpu %f\n", hostRef[i], gpuRef[i]); break; } } if (match) printf("Arrays match.\n\n"); else printf("Arrays do not match.\n\n"); } // grid 2D block 2D __global__ void sumMatrixOnGPU2D(int *MatA, int *MatB, int *MatC, int nx, int ny) { unsigned int ix = threadIdx.x + blockIdx.x * blockDim.x; unsigned int iy = threadIdx.y + blockIdx.y * blockDim.y; unsigned int idx = iy * nx + ix; if (ix < nx && iy < ny) MatC[idx] = MatA[idx] + MatB[idx]; } int main(int argc, char **argv) { printf("%s Starting...\n", argv[0]); // set up device int dev = 0; cudaDeviceProp deviceProp; checkCuda(cudaGetDeviceProperties(&deviceProp, dev)); printf("Using Device %d: %s\n", dev, deviceProp.name); checkCuda(cudaSetDevice(dev)); // set up data size of matrix int nx = 1 << 14; int ny = 1 << 14; int nxy = nx * ny; int nBytes = nxy * sizeof(int); printf("Matrix size: nx %d ny %d\n", nx, ny); // malloc host memory int *h_A, *h_B, *hostRef, *gpuRef; h_A = (int *)malloc(nBytes); h_B = (int *)malloc(nBytes); hostRef = (int *)malloc(nBytes); gpuRef = (int *)malloc(nBytes); // initialize data at host side initialData(h_A, nxy); initialData(h_B, nxy); // add matrix at host side for result checks sumMatrixOnHost(h_A, h_B, hostRef, nx, ny); // malloc device global memory int *d_MatA, *d_MatB, *d_MatC; checkCuda(cudaMalloc((void **)&d_MatA, nBytes)); checkCuda(cudaMalloc((void **)&d_MatB, nBytes)); checkCuda(cudaMalloc((void **)&d_MatC, nBytes)); // transfer data from host to device checkCuda(cudaMemcpy(d_MatA, h_A, nBytes, cudaMemcpyHostToDevice)); checkCuda(cudaMemcpy(d_MatB, h_B, nBytes, cudaMemcpyHostToDevice)); // invoke kernel at host side int dimx = 32; int dimy = 16; dim3 block(dimx, dimy); dim3 grid((nx + block.x - 1) / block.x, (ny + block.y - 1) / block.y); sumMatrixOnGPU2D << <grid, block >> >(d_MatA, d_MatB, d_MatC, nx, ny); checkCuda(cudaDeviceSynchronize()); printf("sumMatrixOnGPU2D <<<(%d,%d), (%d,%d)>>>\n", grid.x, grid.y, block.x, block.y); // check kernel error checkCuda(cudaGetLastError()); // copy kernel result back to host side checkCuda(cudaMemcpy(gpuRef, d_MatC, nBytes, cudaMemcpyDeviceToHost)); // check device results checkResult(hostRef, gpuRef, nxy); // free device global memory checkCuda(cudaFree(d_MatA)); checkCuda(cudaFree(d_MatB)); checkCuda(cudaFree(d_MatC)); // free host memory free(h_A); free(h_B); free(hostRef); free(gpuRef); // reset device checkCuda(cudaDeviceReset()); return (0); }
e3cb80c635d08656d66aaa802e2d159178bfbdbd.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <ATen/native/FunctionOfAMatrixUtils.h> #include <ATen/native/hip/Loops.cuh> #include <ATen/hip/detail/OffsetCalculator.cuh> #include <ATen/hip/Atomic.cuh> #include <ATen/hip/HIPContext.h> namespace at { namespace native { namespace { template <int n_threads, int n_elems_per_thread, typename func_t> C10_LAUNCH_BOUNDS_2(n_threads, n_elems_per_thread) __global__ void _elemwise_kernel(int total_n_elems, func_t f) { constexpr int total_work_block = n_threads * n_elems_per_thread; int idx = total_work_block * blockIdx.x + threadIdx.x; #pragma unroll for (int i = 0; i < n_elems_per_thread; ++i) { if (idx < total_n_elems) { f(idx); idx += n_threads; } } } template <int n_threads, int n_elems_per_thread, typename func_t> void _lauch_kernel(int total_n_elems, const func_t& f) { TORCH_INTERNAL_ASSERT( total_n_elems >= 0 && total_n_elems <= std::numeric_limits<int32_t>::max() ); dim3 block(n_threads); constexpr int total_work_block = n_threads * n_elems_per_thread; dim3 grid((total_n_elems + total_work_block - 1) / total_work_block); auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); hipLaunchKernelGGL(( _elemwise_kernel<n_threads, n_elems_per_thread, func_t>) , dim3(grid), dim3(block), 0, stream, total_n_elems, f); C10_HIP_KERNEL_LAUNCH_CHECK(); } template <typename scalar_t> void _compute_linear_combination_internal_kernel( TensorIterator& iter, int32_t in_stride, int32_t coeff_stride, int32_t num_summations ) { if (iter.numel() == 0) { return; } if (!iter.can_use_32bit_indexing()) { for (auto& sub_iter : iter.with_32bit_indexing()) { _compute_linear_combination_internal_kernel<scalar_t>( sub_iter, in_stride, coeff_stride, num_summations ); } return; } auto offset_calc = make_offset_calculator<3>(iter); char* __restrict__ out_ptr = reinterpret_cast<char*>(iter.data_ptr(0)); char* __restrict__ in_ptr = reinterpret_cast<char*>(iter.data_ptr(1)); char* __restrict__ coeff_ptr = reinterpret_cast<char*>(iter.data_ptr(2)); auto loop = [=]C10_DEVICE(int idx) { auto offsets = offset_calc.get(idx); auto* __restrict__ out_data = reinterpret_cast<scalar_t*>( out_ptr + offsets[0] ); auto* __restrict__ in_data = reinterpret_cast<scalar_t*>( in_ptr + offsets[1] ); using primitive_t = typename scalar_value_type<scalar_t>::type; auto* __restrict__ coeff_data = reinterpret_cast<primitive_t*>( coeff_ptr + offsets[2] ); // perform summation for (int32_t i = 0; i < num_summations; ++i) { *out_data += in_data[i * in_stride] * coeff_data[i * coeff_stride]; } }; _lauch_kernel<num_threads, thread_work_size>(iter.numel(), loop); } void _compute_linear_combination_cuda_kernel( TensorIterator& iter, int64_t in_stride, int64_t coeff_stride, int64_t num_summations ) { AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3( at::ScalarType::Half, at::ScalarType::Bool, at::ScalarType::BFloat16, iter.dtype(), "_compute_linear_combination_cuda", [&] () { _compute_linear_combination_internal_kernel<scalar_t>( iter, in_stride, coeff_stride, num_summations ); } ); } } REGISTER_DISPATCH(_compute_linear_combination_stub, &_compute_linear_combination_cuda_kernel); }} // namespace at::native
e3cb80c635d08656d66aaa802e2d159178bfbdbd.cu
#include <ATen/native/FunctionOfAMatrixUtils.h> #include <ATen/native/cuda/Loops.cuh> #include <ATen/cuda/detail/OffsetCalculator.cuh> #include <ATen/cuda/Atomic.cuh> #include <ATen/cuda/CUDAContext.h> namespace at { namespace native { namespace { template <int n_threads, int n_elems_per_thread, typename func_t> C10_LAUNCH_BOUNDS_2(n_threads, n_elems_per_thread) __global__ void _elemwise_kernel(int total_n_elems, func_t f) { constexpr int total_work_block = n_threads * n_elems_per_thread; int idx = total_work_block * blockIdx.x + threadIdx.x; #pragma unroll for (int i = 0; i < n_elems_per_thread; ++i) { if (idx < total_n_elems) { f(idx); idx += n_threads; } } } template <int n_threads, int n_elems_per_thread, typename func_t> void _lauch_kernel(int total_n_elems, const func_t& f) { TORCH_INTERNAL_ASSERT( total_n_elems >= 0 && total_n_elems <= std::numeric_limits<int32_t>::max() ); dim3 block(n_threads); constexpr int total_work_block = n_threads * n_elems_per_thread; dim3 grid((total_n_elems + total_work_block - 1) / total_work_block); auto stream = at::cuda::getCurrentCUDAStream(); _elemwise_kernel<n_threads, n_elems_per_thread, func_t> <<<grid, block, 0, stream>>>(total_n_elems, f); C10_CUDA_KERNEL_LAUNCH_CHECK(); } template <typename scalar_t> void _compute_linear_combination_internal_kernel( TensorIterator& iter, int32_t in_stride, int32_t coeff_stride, int32_t num_summations ) { if (iter.numel() == 0) { return; } if (!iter.can_use_32bit_indexing()) { for (auto& sub_iter : iter.with_32bit_indexing()) { _compute_linear_combination_internal_kernel<scalar_t>( sub_iter, in_stride, coeff_stride, num_summations ); } return; } auto offset_calc = make_offset_calculator<3>(iter); char* __restrict__ out_ptr = reinterpret_cast<char*>(iter.data_ptr(0)); char* __restrict__ in_ptr = reinterpret_cast<char*>(iter.data_ptr(1)); char* __restrict__ coeff_ptr = reinterpret_cast<char*>(iter.data_ptr(2)); auto loop = [=]C10_DEVICE(int idx) { auto offsets = offset_calc.get(idx); auto* __restrict__ out_data = reinterpret_cast<scalar_t*>( out_ptr + offsets[0] ); auto* __restrict__ in_data = reinterpret_cast<scalar_t*>( in_ptr + offsets[1] ); using primitive_t = typename scalar_value_type<scalar_t>::type; auto* __restrict__ coeff_data = reinterpret_cast<primitive_t*>( coeff_ptr + offsets[2] ); // perform summation for (int32_t i = 0; i < num_summations; ++i) { *out_data += in_data[i * in_stride] * coeff_data[i * coeff_stride]; } }; _lauch_kernel<num_threads, thread_work_size>(iter.numel(), loop); } void _compute_linear_combination_cuda_kernel( TensorIterator& iter, int64_t in_stride, int64_t coeff_stride, int64_t num_summations ) { AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3( at::ScalarType::Half, at::ScalarType::Bool, at::ScalarType::BFloat16, iter.dtype(), "_compute_linear_combination_cuda", [&] () { _compute_linear_combination_internal_kernel<scalar_t>( iter, in_stride, coeff_stride, num_summations ); } ); } } REGISTER_DISPATCH(_compute_linear_combination_stub, &_compute_linear_combination_cuda_kernel); }} // namespace at::native
7a34d968fc27ce06e89e74f46853d49c9af82017.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * SigmoidCalculater.cu * * Created on: 28/05/2013 * Author: Zeyi Wen * Copyright @DBGroup University of Melbourne **/ #include "kernelCalculater.h" #include "kernelCalGPUHelper.h" #include "../my_assert.h" /* * @brief: compute a certain # of rows of the Hessian Matrix by Polynomial function * @param: pfDevSamples: a device pointer to the whole samples. These samples indicate which rows are computed in this round * @param: pfDevTransSamples: a device pointer to the whole samples with transposition * @param: pfdevHessianRows: a device pointer to a certain # of Hessian Matrix rows to be computed * @param: nNumofSamples: indicates the length of pfDevTransSamples * @param: nNumofRows: indicates the length of pfDevSamples */ bool CSigmoidKernel::ComputeHessianRows(float_point *pfDevSamples, float_point *pfDevTransSamples, float_point *pfDevHessianRows, const int &nNumofSamples, const int &nNumofDim, const int &nNumofRows, const int &nStartRow) { bool bReturn = true; int nBlockSize = 0; dim3 dimGrid; GetGPUSpec(dimGrid, nBlockSize, nNumofSamples, nNumofRows); assert(nBlockSize >= 0); hipLaunchKernelGGL(( SigmoidKernel), dim3(dimGrid), dim3(nBlockSize), nBlockSize * sizeof(float_point), 0, pfDevSamples, pfDevTransSamples, pfDevHessianRows, nNumofSamples, nNumofDim, nStartRow, m_fCoef); hipDeviceSynchronize(); assert(hipGetLastError() == hipSuccess); return bReturn; }
7a34d968fc27ce06e89e74f46853d49c9af82017.cu
/* * SigmoidCalculater.cu * * Created on: 28/05/2013 * Author: Zeyi Wen * Copyright @DBGroup University of Melbourne **/ #include "kernelCalculater.h" #include "kernelCalGPUHelper.h" #include "../my_assert.h" /* * @brief: compute a certain # of rows of the Hessian Matrix by Polynomial function * @param: pfDevSamples: a device pointer to the whole samples. These samples indicate which rows are computed in this round * @param: pfDevTransSamples: a device pointer to the whole samples with transposition * @param: pfdevHessianRows: a device pointer to a certain # of Hessian Matrix rows to be computed * @param: nNumofSamples: indicates the length of pfDevTransSamples * @param: nNumofRows: indicates the length of pfDevSamples */ bool CSigmoidKernel::ComputeHessianRows(float_point *pfDevSamples, float_point *pfDevTransSamples, float_point *pfDevHessianRows, const int &nNumofSamples, const int &nNumofDim, const int &nNumofRows, const int &nStartRow) { bool bReturn = true; int nBlockSize = 0; dim3 dimGrid; GetGPUSpec(dimGrid, nBlockSize, nNumofSamples, nNumofRows); assert(nBlockSize >= 0); SigmoidKernel<<<dimGrid, nBlockSize, nBlockSize * sizeof(float_point)>>>(pfDevSamples, pfDevTransSamples, pfDevHessianRows, nNumofSamples, nNumofDim, nStartRow, m_fCoef); cudaDeviceSynchronize(); assert(cudaGetLastError() == cudaSuccess); return bReturn; }
5d444ae675d6997f021aa2cca07227f5f58667a5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "ATen/ATen.h" #include "ATen/hip/HIPContext.h" #include "ATen/hip/HIPApplyUtils.cuh" namespace at { namespace cuda { #define THRESH_NUMBER_BINS_FOR_MULTI_BLOCK_MEM 100 #define THRESH_NUMBER_BINS_FOR_GLOBAL_MEM 1000 #define FOR_KERNEL_LOOP(i, lim) \ for (IndexType i = blockIdx.x * blockDim.x + threadIdx.x; i < lim; \ i += gridDim.x * blockDim.x) /* Memory types used for the 3 histogram implementations. See `CUDA_tensor_histogram` below. */ enum class CUDAHistogramMemoryType { SHARED, MULTI_BLOCK, GLOBAL }; /* Kernel for computing the histogram of the input. */ template < typename output_t, typename input_t, typename IndexType, int ADims, int PDims, int BDims, CUDAHistogramMemoryType MemoryType = CUDAHistogramMemoryType::MULTI_BLOCK, typename Op> __global__ void kernelHistogram1D( detail::TensorInfo<output_t, IndexType> a, /* output */ detail::TensorInfo<output_t, IndexType> p, /* partial output */ detail::TensorInfo<input_t, IndexType> b, /* input */ int binsize, IndexType totalElements, Op getOp) { extern __shared__ unsigned char my_smem[]; output_t* smem = nullptr; if (MemoryType == CUDAHistogramMemoryType::SHARED) { ////////////////////////// Shared memory ////////////////////////// // atomically add to block specific shared memory // then atomically add to the global output tensor smem = reinterpret_cast<output_t*>(my_smem); for (IndexType i = threadIdx.x; i < a.sizes[0]; i += blockDim.x) { smem[i] = 0; } __syncthreads(); FOR_KERNEL_LOOP(linearIndex, totalElements) { // Convert `linearIndex` into an offset of `b` const IndexType bOffset = detail::IndexToOffset<input_t, IndexType, BDims>::get(linearIndex, b); // Use value at `b` as an offset of `smem` const IndexType pOffset = b.data[bOffset] / binsize; atomicAdd(&smem[pOffset], getOp(linearIndex)); } __syncthreads(); // NOTE: atomically update output bin count. // Atomic update is imp since __syncthread() will only synchronize threads // in a given block, not across blocks. for (IndexType i = threadIdx.x; i < a.sizes[0]; i += blockDim.x) { const IndexType aOffset = detail::IndexToOffset<output_t, IndexType, ADims>::get(i, a); atomicAdd(&a.data[aOffset], smem[i]); } } else if (MemoryType == CUDAHistogramMemoryType::MULTI_BLOCK) { ////////////////////////// Multi Block memory ////////////////////////// // atomically add to block specific global tensor // then atomically add to the global output tensor // compute histogram for the block FOR_KERNEL_LOOP(linearIndex, totalElements) { // Convert `linearIndex` into an offset of `b` const IndexType bOffset = detail::IndexToOffset<input_t, IndexType, BDims>::get(linearIndex, b); const auto bVal = b.data[bOffset]; // Use value at `b` as an offset of `p` const IndexType pIdx = p.strides[0] * blockIdx.x + bVal / binsize; const IndexType pOffset = detail::IndexToOffset<output_t, IndexType, PDims>::get(pIdx, p); atomicAdd(&p.data[pOffset], getOp(linearIndex)); } __syncthreads(); // NOTE: atomically update output bin count. // Atomic update is imp since __syncthread() will only synchronize threads // in a given block, not across blocks. const IndexType pIdx = p.strides[0] * blockIdx.x; const IndexType pOffset = detail::IndexToOffset<output_t, IndexType, PDims>::get(pIdx, p); for (IndexType i = threadIdx.x; i < a.sizes[0]; i += blockDim.x) { const IndexType aOffset = detail::IndexToOffset<output_t, IndexType, ADims>::get(i, a); atomicAdd(&a.data[aOffset], p.data[pOffset + i]); } } else { ////////////////////////// Global memory ////////////////////////// // atomically add to the output tensor // compute histogram for the block FOR_KERNEL_LOOP(linearIndex, totalElements) { // Convert `linearIndex` into an offset of `b` const IndexType bOffset = detail::IndexToOffset<input_t, IndexType, BDims>::get(linearIndex, b); const auto bVal = b.data[bOffset]; // Use value at `b` as an offset of `a` const IndexType aIdx = bVal / binsize; const IndexType aOffset = detail::IndexToOffset<output_t, IndexType, ADims>::get(aIdx, a); atomicAdd(&a.data[aOffset], getOp(linearIndex)); } } } #define HANDLE_CASE(MEMORY_TYPE, WEIGHTS_OP) \ hipLaunchKernelGGL(( kernelHistogram1D<output_t, input_t, IndexType, 1, 2, 1, MEMORY_TYPE>) \ , dim3(grid), \ block, \ (MEMORY_TYPE == CUDAHistogramMemoryType::SHARED) ? sharedMem : 0, \ getCurrentHIPStreamMasqueradingAsCUDA(), \ aInfo, pInfo, bInfo, binsize, totalElements, WEIGHTS_OP); \ AT_ASSERTM(hipGetLastError() == hipSuccess, "kernelHistogram1D failed"); #define HANDLE_SWITCH_CASE(mType, getOp) \ switch (mType) { \ case CUDAHistogramMemoryType::SHARED: \ HANDLE_CASE(CUDAHistogramMemoryType::SHARED, getOp); \ break; \ case CUDAHistogramMemoryType::MULTI_BLOCK: \ HANDLE_CASE(CUDAHistogramMemoryType::MULTI_BLOCK, getOp); \ break; \ default: \ HANDLE_CASE(CUDAHistogramMemoryType::GLOBAL, getOp); \ } inline int64_t getFreeGlobalMemory() { // no need to use `hipSetDevice` size_t free_mem, total_mem; hipMemGetInfo(&free_mem, &total_mem); AT_ASSERTM( hipGetLastError() == hipSuccess, "CUDA_tensor_histogram failed to get free global memory"); return static_cast<int64_t>(free_mem); } /* Calculate the frequency of the input values. `a` contains the final output or the histogram. Input `b` is assumed to be 1-D non-negative int array. `c` optionally contains the weight vector. See `help torch.bincount` for details on the math. 3 implementations based of input size and memory usage: case: #bins < THRESH_NUMBER_BINS_FOR_MULTI_BLOCK_MEM and enough shared mem SHARED: Each block atomically adds to it's own **shared** hist copy, then atomically updates the global tensor. case: #bins < THRESH_NUMBER_BINS_FOR_GLOBAL_MEM and enough global mem MULTI_BLOCK: Each block atomically adds to it's own **global** hist copy, then atomically updates the global tensor. case: THRESH_NUMBER_BINS_FOR_GLOBAL_MEM <= #bins GLOBAL: all threads atomically update to a single **global** hist copy. */ template <typename output_t, typename input_t, bool HasWeights> bool CUDA_tensor_histogram( at::Tensor a, /* output */ at::Tensor b, /* input */ at::Tensor c, /* weights(optional) */ int64_t nbins, int binsize, TensorArgType aType = TensorArgType::ReadWrite, TensorArgType bType = TensorArgType::ReadOnly, TensorArgType cType = TensorArgType::ReadOnly) { checkBackend("CUDA_tensor_histogram", {a, b}, Backend::CUDA); if (HasWeights) { checkBackend("CUDA_tensor_histogram", {c}, Backend::CUDA); } auto totalElements = b.size(0); const dim3 block = getApplyBlock(); dim3 grid; int64_t curDevice = current_device(); if (curDevice == -1 || !getApplyGrid(totalElements, grid, curDevice)) { return false; } CUDAHistogramMemoryType memType = CUDAHistogramMemoryType::GLOBAL; auto maxSharedMem = getCurrentDeviceProperties()->sharedMemPerBlock; auto sharedMem = nbins * sizeof(output_t) + 8; // 8 guard bytes auto maxGlobalMem = getFreeGlobalMemory(); auto multiBlockMem = nbins * grid.x * sizeof(output_t) + 8; // 8 guard bytes // determine memory type to use in the kernel if (nbins < THRESH_NUMBER_BINS_FOR_MULTI_BLOCK_MEM && sharedMem < maxSharedMem) { memType = CUDAHistogramMemoryType::SHARED; } else if ( nbins < THRESH_NUMBER_BINS_FOR_GLOBAL_MEM && multiBlockMem < (maxGlobalMem / 2)) { // check against half of free mem to be extra safe // due to cached allocator, we may anyway have slightly more free mem memType = CUDAHistogramMemoryType::MULTI_BLOCK; } // alloc memory for MULTI_BLOCK using IndexType = int64_t; auto aInfo = detail::getTensorInfo<output_t, IndexType>(a); auto bInfo = detail::getTensorInfo<input_t, IndexType>(b); detail::TensorInfo<output_t, IndexType> pInfo(nullptr, 0, {}, {}); Tensor partial_output; if (memType == CUDAHistogramMemoryType::MULTI_BLOCK) { partial_output = native::zeros({grid.x, nbins}, a.options()); pInfo = detail::getTensorInfo<output_t, IndexType>(partial_output); } if (HasWeights) { auto cInfo = detail::getTensorInfo<output_t, IndexType>(c); const auto getWeightsOp = [cInfo] __device__(IndexType cIndex) { const IndexType cOffset = detail::IndexToOffset<output_t, IndexType, 1>::get(cIndex, cInfo); return cInfo.data[cOffset]; }; HANDLE_SWITCH_CASE(memType, getWeightsOp) } else { static const auto getDummyOp = [] __device__(IndexType) { return 1L; }; HANDLE_SWITCH_CASE(memType, getDummyOp) } return true; } #undef HANDLE_CASE #undef HANDLE_SWITCH_CASE #undef FOR_KERNEL_LOOP #undef THRESH_NUMBER_BINS_FOR_GLOBAL_MEM #undef THRESH_NUMBER_BINS_FOR_MULTI_BLOCK_MEM } // namespace cuda namespace { ///////////////// bincount ///////////////// template <typename input_t, typename weights_t> Tensor _bincount_cuda_template( const Tensor& self, const Tensor& weights, int64_t minlength) { if (minlength < 0) { AT_ERROR("minlength should be >= 0"); } if (self.dim() == 1 && self.numel() == 0) { return native::zeros({minlength}, device(kCUDA).dtype(kLong)); } if (self.dim() != 1 || (!std::is_same<input_t, uint8_t>::value && *self.min().cpu().data<input_t>() < 0)) { AT_ERROR("bincount only supports 1-d non-negative integral inputs."); } bool has_weights = weights.defined(); if (has_weights && weights.size(0) != self.size(0)) { AT_ERROR("input and weights should have the same length"); } auto nbins = self.max().item<int64_t>() + 1L; nbins = ::max(nbins, minlength); // alloc output counter on GPU Tensor output; if (has_weights) { output = native::zeros({nbins}, weights.options()); auto ret = cuda::CUDA_tensor_histogram<weights_t, input_t, true>( output, self, weights, nbins, 1); } else { output = native::zeros({nbins}, device(DeviceType::CUDA).dtype(kLong)); auto ret = cuda::CUDA_tensor_histogram<int64_t, input_t, false>( output, self, weights, nbins, 1); } return output; } } // namespace namespace native { Tensor _bincount_cuda( const Tensor& self, const Tensor& weights, int64_t minlength) { return AT_DISPATCH_INTEGRAL_TYPES(self.type(), "bincount", [&] { const auto scalar = weights.type().scalarType(); if (scalar == ScalarType::Undefined || scalar == ScalarType::Float) return _bincount_cuda_template<scalar_t, float>(self, weights, minlength); return _bincount_cuda_template<scalar_t, double>( self, weights.toType(CUDA(kDouble)), minlength); }); } } // namespace native } // namespace at
5d444ae675d6997f021aa2cca07227f5f58667a5.cu
#include "ATen/ATen.h" #include "ATen/cuda/CUDAContext.h" #include "ATen/cuda/CUDAApplyUtils.cuh" namespace at { namespace cuda { #define THRESH_NUMBER_BINS_FOR_MULTI_BLOCK_MEM 100 #define THRESH_NUMBER_BINS_FOR_GLOBAL_MEM 1000 #define FOR_KERNEL_LOOP(i, lim) \ for (IndexType i = blockIdx.x * blockDim.x + threadIdx.x; i < lim; \ i += gridDim.x * blockDim.x) /* Memory types used for the 3 histogram implementations. See `CUDA_tensor_histogram` below. */ enum class CUDAHistogramMemoryType { SHARED, MULTI_BLOCK, GLOBAL }; /* Kernel for computing the histogram of the input. */ template < typename output_t, typename input_t, typename IndexType, int ADims, int PDims, int BDims, CUDAHistogramMemoryType MemoryType = CUDAHistogramMemoryType::MULTI_BLOCK, typename Op> __global__ void kernelHistogram1D( detail::TensorInfo<output_t, IndexType> a, /* output */ detail::TensorInfo<output_t, IndexType> p, /* partial output */ detail::TensorInfo<input_t, IndexType> b, /* input */ int binsize, IndexType totalElements, Op getOp) { extern __shared__ unsigned char my_smem[]; output_t* smem = nullptr; if (MemoryType == CUDAHistogramMemoryType::SHARED) { ////////////////////////// Shared memory ////////////////////////// // atomically add to block specific shared memory // then atomically add to the global output tensor smem = reinterpret_cast<output_t*>(my_smem); for (IndexType i = threadIdx.x; i < a.sizes[0]; i += blockDim.x) { smem[i] = 0; } __syncthreads(); FOR_KERNEL_LOOP(linearIndex, totalElements) { // Convert `linearIndex` into an offset of `b` const IndexType bOffset = detail::IndexToOffset<input_t, IndexType, BDims>::get(linearIndex, b); // Use value at `b` as an offset of `smem` const IndexType pOffset = b.data[bOffset] / binsize; atomicAdd(&smem[pOffset], getOp(linearIndex)); } __syncthreads(); // NOTE: atomically update output bin count. // Atomic update is imp since __syncthread() will only synchronize threads // in a given block, not across blocks. for (IndexType i = threadIdx.x; i < a.sizes[0]; i += blockDim.x) { const IndexType aOffset = detail::IndexToOffset<output_t, IndexType, ADims>::get(i, a); atomicAdd(&a.data[aOffset], smem[i]); } } else if (MemoryType == CUDAHistogramMemoryType::MULTI_BLOCK) { ////////////////////////// Multi Block memory ////////////////////////// // atomically add to block specific global tensor // then atomically add to the global output tensor // compute histogram for the block FOR_KERNEL_LOOP(linearIndex, totalElements) { // Convert `linearIndex` into an offset of `b` const IndexType bOffset = detail::IndexToOffset<input_t, IndexType, BDims>::get(linearIndex, b); const auto bVal = b.data[bOffset]; // Use value at `b` as an offset of `p` const IndexType pIdx = p.strides[0] * blockIdx.x + bVal / binsize; const IndexType pOffset = detail::IndexToOffset<output_t, IndexType, PDims>::get(pIdx, p); atomicAdd(&p.data[pOffset], getOp(linearIndex)); } __syncthreads(); // NOTE: atomically update output bin count. // Atomic update is imp since __syncthread() will only synchronize threads // in a given block, not across blocks. const IndexType pIdx = p.strides[0] * blockIdx.x; const IndexType pOffset = detail::IndexToOffset<output_t, IndexType, PDims>::get(pIdx, p); for (IndexType i = threadIdx.x; i < a.sizes[0]; i += blockDim.x) { const IndexType aOffset = detail::IndexToOffset<output_t, IndexType, ADims>::get(i, a); atomicAdd(&a.data[aOffset], p.data[pOffset + i]); } } else { ////////////////////////// Global memory ////////////////////////// // atomically add to the output tensor // compute histogram for the block FOR_KERNEL_LOOP(linearIndex, totalElements) { // Convert `linearIndex` into an offset of `b` const IndexType bOffset = detail::IndexToOffset<input_t, IndexType, BDims>::get(linearIndex, b); const auto bVal = b.data[bOffset]; // Use value at `b` as an offset of `a` const IndexType aIdx = bVal / binsize; const IndexType aOffset = detail::IndexToOffset<output_t, IndexType, ADims>::get(aIdx, a); atomicAdd(&a.data[aOffset], getOp(linearIndex)); } } } #define HANDLE_CASE(MEMORY_TYPE, WEIGHTS_OP) \ kernelHistogram1D<output_t, input_t, IndexType, 1, 2, 1, MEMORY_TYPE> \ <<<grid, \ block, \ (MEMORY_TYPE == CUDAHistogramMemoryType::SHARED) ? sharedMem : 0, \ getCurrentCUDAStream()>>>( \ aInfo, pInfo, bInfo, binsize, totalElements, WEIGHTS_OP); \ AT_ASSERTM(cudaGetLastError() == cudaSuccess, "kernelHistogram1D failed"); #define HANDLE_SWITCH_CASE(mType, getOp) \ switch (mType) { \ case CUDAHistogramMemoryType::SHARED: \ HANDLE_CASE(CUDAHistogramMemoryType::SHARED, getOp); \ break; \ case CUDAHistogramMemoryType::MULTI_BLOCK: \ HANDLE_CASE(CUDAHistogramMemoryType::MULTI_BLOCK, getOp); \ break; \ default: \ HANDLE_CASE(CUDAHistogramMemoryType::GLOBAL, getOp); \ } inline int64_t getFreeGlobalMemory() { // no need to use `cudaSetDevice` size_t free_mem, total_mem; cudaMemGetInfo(&free_mem, &total_mem); AT_ASSERTM( cudaGetLastError() == cudaSuccess, "CUDA_tensor_histogram failed to get free global memory"); return static_cast<int64_t>(free_mem); } /* Calculate the frequency of the input values. `a` contains the final output or the histogram. Input `b` is assumed to be 1-D non-negative int array. `c` optionally contains the weight vector. See `help torch.bincount` for details on the math. 3 implementations based of input size and memory usage: case: #bins < THRESH_NUMBER_BINS_FOR_MULTI_BLOCK_MEM and enough shared mem SHARED: Each block atomically adds to it's own **shared** hist copy, then atomically updates the global tensor. case: #bins < THRESH_NUMBER_BINS_FOR_GLOBAL_MEM and enough global mem MULTI_BLOCK: Each block atomically adds to it's own **global** hist copy, then atomically updates the global tensor. case: THRESH_NUMBER_BINS_FOR_GLOBAL_MEM <= #bins GLOBAL: all threads atomically update to a single **global** hist copy. */ template <typename output_t, typename input_t, bool HasWeights> bool CUDA_tensor_histogram( at::Tensor a, /* output */ at::Tensor b, /* input */ at::Tensor c, /* weights(optional) */ int64_t nbins, int binsize, TensorArgType aType = TensorArgType::ReadWrite, TensorArgType bType = TensorArgType::ReadOnly, TensorArgType cType = TensorArgType::ReadOnly) { checkBackend("CUDA_tensor_histogram", {a, b}, Backend::CUDA); if (HasWeights) { checkBackend("CUDA_tensor_histogram", {c}, Backend::CUDA); } auto totalElements = b.size(0); const dim3 block = getApplyBlock(); dim3 grid; int64_t curDevice = current_device(); if (curDevice == -1 || !getApplyGrid(totalElements, grid, curDevice)) { return false; } CUDAHistogramMemoryType memType = CUDAHistogramMemoryType::GLOBAL; auto maxSharedMem = getCurrentDeviceProperties()->sharedMemPerBlock; auto sharedMem = nbins * sizeof(output_t) + 8; // 8 guard bytes auto maxGlobalMem = getFreeGlobalMemory(); auto multiBlockMem = nbins * grid.x * sizeof(output_t) + 8; // 8 guard bytes // determine memory type to use in the kernel if (nbins < THRESH_NUMBER_BINS_FOR_MULTI_BLOCK_MEM && sharedMem < maxSharedMem) { memType = CUDAHistogramMemoryType::SHARED; } else if ( nbins < THRESH_NUMBER_BINS_FOR_GLOBAL_MEM && multiBlockMem < (maxGlobalMem / 2)) { // check against half of free mem to be extra safe // due to cached allocator, we may anyway have slightly more free mem memType = CUDAHistogramMemoryType::MULTI_BLOCK; } // alloc memory for MULTI_BLOCK using IndexType = int64_t; auto aInfo = detail::getTensorInfo<output_t, IndexType>(a); auto bInfo = detail::getTensorInfo<input_t, IndexType>(b); detail::TensorInfo<output_t, IndexType> pInfo(nullptr, 0, {}, {}); Tensor partial_output; if (memType == CUDAHistogramMemoryType::MULTI_BLOCK) { partial_output = native::zeros({grid.x, nbins}, a.options()); pInfo = detail::getTensorInfo<output_t, IndexType>(partial_output); } if (HasWeights) { auto cInfo = detail::getTensorInfo<output_t, IndexType>(c); const auto getWeightsOp = [cInfo] __device__(IndexType cIndex) { const IndexType cOffset = detail::IndexToOffset<output_t, IndexType, 1>::get(cIndex, cInfo); return cInfo.data[cOffset]; }; HANDLE_SWITCH_CASE(memType, getWeightsOp) } else { static const auto getDummyOp = [] __device__(IndexType) { return 1L; }; HANDLE_SWITCH_CASE(memType, getDummyOp) } return true; } #undef HANDLE_CASE #undef HANDLE_SWITCH_CASE #undef FOR_KERNEL_LOOP #undef THRESH_NUMBER_BINS_FOR_GLOBAL_MEM #undef THRESH_NUMBER_BINS_FOR_MULTI_BLOCK_MEM } // namespace cuda namespace { ///////////////// bincount ///////////////// template <typename input_t, typename weights_t> Tensor _bincount_cuda_template( const Tensor& self, const Tensor& weights, int64_t minlength) { if (minlength < 0) { AT_ERROR("minlength should be >= 0"); } if (self.dim() == 1 && self.numel() == 0) { return native::zeros({minlength}, device(kCUDA).dtype(kLong)); } if (self.dim() != 1 || (!std::is_same<input_t, uint8_t>::value && *self.min().cpu().data<input_t>() < 0)) { AT_ERROR("bincount only supports 1-d non-negative integral inputs."); } bool has_weights = weights.defined(); if (has_weights && weights.size(0) != self.size(0)) { AT_ERROR("input and weights should have the same length"); } auto nbins = self.max().item<int64_t>() + 1L; nbins = std::max(nbins, minlength); // alloc output counter on GPU Tensor output; if (has_weights) { output = native::zeros({nbins}, weights.options()); auto ret = cuda::CUDA_tensor_histogram<weights_t, input_t, true>( output, self, weights, nbins, 1); } else { output = native::zeros({nbins}, device(DeviceType::CUDA).dtype(kLong)); auto ret = cuda::CUDA_tensor_histogram<int64_t, input_t, false>( output, self, weights, nbins, 1); } return output; } } // namespace namespace native { Tensor _bincount_cuda( const Tensor& self, const Tensor& weights, int64_t minlength) { return AT_DISPATCH_INTEGRAL_TYPES(self.type(), "bincount", [&] { const auto scalar = weights.type().scalarType(); if (scalar == ScalarType::Undefined || scalar == ScalarType::Float) return _bincount_cuda_template<scalar_t, float>(self, weights, minlength); return _bincount_cuda_template<scalar_t, double>( self, weights.toType(CUDA(kDouble)), minlength); }); } } // namespace native } // namespace at
110c3c91aa4a7ea2e5887d69ba337fa8b5c290f1.hip
// !!! This is a file automatically generated by hipify!!! #include "cuda_uniform_sampler.h" namespace cpt { CUDA_DEVICE float CudaUniformSampler::sample_1d(float min_value, float max_value) { const int idx = get_cuda_flat_thread_index(); const float num = hiprand_uniform(&_curand_state); return num * (max_value - min_value) + min_value; } CUDA_DEVICE void CudaUniformSampler::sample_2d( CudaVector<float,2>& output, const CudaVector<float,2>& min_value, const CudaVector<float,2>& max_value) { output[0] = sample_1d(min_value[0], max_value[0]); output[1] = sample_1d(min_value[1], max_value[1]); } }
110c3c91aa4a7ea2e5887d69ba337fa8b5c290f1.cu
#include "cuda_uniform_sampler.h" namespace cpt { CUDA_DEVICE float CudaUniformSampler::sample_1d(float min_value, float max_value) { const int idx = get_cuda_flat_thread_index(); const float num = curand_uniform(&_curand_state); return num * (max_value - min_value) + min_value; } CUDA_DEVICE void CudaUniformSampler::sample_2d( CudaVector<float,2>& output, const CudaVector<float,2>& min_value, const CudaVector<float,2>& max_value) { output[0] = sample_1d(min_value[0], max_value[0]); output[1] = sample_1d(min_value[1], max_value[1]); } }
61e4566467ba263b8a11b6bb801f5085e57844c5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "compute_scores.cuh" #include <catboost/cuda/methods/kernel/score_calcers.cuh> #include <catboost/cuda/cuda_util/kernel/instructions.cuh> #include <catboost/cuda/cuda_util/kernel/random_gen.cuh> #include <catboost/cuda/cuda_util/kernel/kernel_helpers.cuh> #include <catboost/cuda/cuda_util/kernel/fill.cuh> #include <catboost/cuda/cuda_lib/kernel/arch.cuh> #include <contrib/libs/cub/hipcub/hipcub.hpp> #include <cmath> #include <exception> #include <cfloat> namespace NKernel { // histId * binFeatureCount * statCount + statId * binFeatureCount + features->FirstFoldIndex; template <int BlockSize, class TScoreCalcer> __global__ void ComputeOptimalSplits(const TCBinFeature* bf, ui32 binFeatureCount, const float* histograms, const double* partStats, int statCount, const ui32* partIds, int pCount, bool multiclassOptimization, TScoreCalcer calcer, TBestSplitProperties* result) { float bestScore = FLT_MAX; int bestIndex = -1; int tid = threadIdx.x; result += blockIdx.x + blockIdx.y * gridDim.x; partIds += blockIdx.y * pCount; for (int offset = blockIdx.x * BlockSize; offset < binFeatureCount; offset += BlockSize * gridDim.x) { const int binFeatureId = offset + tid; if (binFeatureId >= binFeatureCount) { break; } calcer.NextFeature(bf[binFeatureId]); for (int i = 0; i < pCount; i++) { const int leafId = __ldg(partIds + i); const float weightLeft = max(__ldg(histograms + leafId * statCount * binFeatureCount + binFeatureId), 0.0f); const float weightRight = max(__ldg(partStats + leafId * statCount) - weightLeft, 0.0f); double totalSumLeft = 0; double totalSumPart = 0; for (int statId = 1; statId < statCount; ++statId) { float sumLeft = __ldg(histograms + leafId * statCount * binFeatureCount + statId * binFeatureCount + binFeatureId); double partStat = __ldg(partStats + leafId * statCount + statId); totalSumPart += partStat; float sumRight = static_cast<float>(partStat - sumLeft); calcer.AddLeaf(sumLeft, weightLeft); calcer.AddLeaf(sumRight, weightRight); totalSumLeft += sumLeft; } if (multiclassOptimization) { double totalSumRight = totalSumPart - totalSumLeft; calcer.AddLeaf(-totalSumLeft, weightLeft); calcer.AddLeaf(-totalSumRight, weightRight); } } const float score = calcer.GetScore(); if (score < bestScore) { bestScore = score; bestIndex = binFeatureId; } } __shared__ float scores[BlockSize]; scores[tid] = bestScore; __shared__ int indices[BlockSize]; indices[tid] = bestIndex; __syncthreads(); for (ui32 s = BlockSize >> 1; s > 0; s >>= 1) { if (tid < s) { if (scores[tid] > scores[tid + s] || (scores[tid] == scores[tid + s] && indices[tid] > indices[tid + s]) ) { scores[tid] = scores[tid + s]; indices[tid] = indices[tid + s]; } } __syncthreads(); } if (!tid) { const int index = indices[0]; if (index != -1 && index < binFeatureCount) { result->FeatureId = bf[index].FeatureId; result->BinId = bf[index].BinId; result->Score = scores[0]; } else { result->FeatureId = -1; result->BinId = -1; result->Score = FLT_MAX; } } } void ComputeOptimalSplits(const TCBinFeature* binaryFeatures, ui32 binaryFeatureCount, const float* histograms, const double* partStats, int statCount, ui32* partIds, int partBlockSize, int partBlockCount, TBestSplitProperties* result, ui32 argmaxBlockCount, EScoreFunction scoreFunction, bool multiclassOptimization, double l2, bool normalize, double scoreStdDev, ui64 seed, TCudaStream stream) { const int blockSize = 128; dim3 numBlocks; numBlocks.x = argmaxBlockCount; numBlocks.y = partBlockCount; numBlocks.z = 1; #define RUN() \ ComputeOptimalSplits<blockSize, TScoreCalcer> << < numBlocks, blockSize, 0, stream >> > (binaryFeatures, binaryFeatureCount, histograms, partStats, statCount, partIds, partBlockSize, multiclassOptimization, scoreCalcer, result); switch (scoreFunction) { case EScoreFunction::SolarL2: { using TScoreCalcer = TSolarScoreCalcer; TScoreCalcer scoreCalcer(static_cast<float>(l2)); RUN() break; } case EScoreFunction::SatL2: { using TScoreCalcer = TSatL2ScoreCalcer; TScoreCalcer scoreCalcer(static_cast<float>(l2)); RUN() break; } case EScoreFunction::LOOL2: { using TScoreCalcer = TLOOL2ScoreCalcer; TScoreCalcer scoreCalcer(static_cast<float>(l2)); RUN() break; } case EScoreFunction::L2: case EScoreFunction::NewtonL2: { using TScoreCalcer = TL2ScoreCalcer; TScoreCalcer scoreCalcer(static_cast<float>(l2)); RUN() break; } case EScoreFunction::Correlation: case EScoreFunction::NewtonCorrelation: { using TScoreCalcer = TCorrelationScoreCalcer; TCorrelationScoreCalcer scoreCalcer(static_cast<float>(l2), normalize, static_cast<float>(scoreStdDev), seed); RUN() break; } default: { throw std::exception(); } } #undef RUN } template <int BlockSize> __global__ void ComputeTargetVarianceImpl(const float* stats, ui32 size, ui32 statCount, ui64 statLineSize, bool isMulticlass, double* aggregatedStats) { ui32 i = BlockSize * blockIdx.x + threadIdx.x; float weightedSum = 0; float weightedSum2 = 0; float totalWeight = 0; while (i < size) { const float w = stats[i]; if (w > 1e-15f) { float statSum = 0; for (ui32 statId = 1; statId < statCount; ++statId) { const float wt = stats[i + statLineSize * statId]; weightedSum += wt; weightedSum2 += wt * wt / w; //cause we need sum w * t * t statSum += wt; } if (isMulticlass) { weightedSum += -statSum; weightedSum2 += (-statSum * statSum) / w; } totalWeight += w; } i += gridDim.x * BlockSize; } using BlockReduce = typename hipcub::BlockReduce<double, BlockSize>; __shared__ typename BlockReduce::TempStorage tempStorage; double blockWeightedSum = weightedSum; blockWeightedSum = BlockReduce(tempStorage).Sum(blockWeightedSum); double blockWeightedSum2 = weightedSum2; blockWeightedSum2 = BlockReduce(tempStorage).Sum(blockWeightedSum2); double blockTotalWeight = totalWeight; blockTotalWeight = BlockReduce(tempStorage).Sum(blockTotalWeight); if (threadIdx.x == 0) { TAtomicAdd<double>::Add(aggregatedStats, blockWeightedSum); TAtomicAdd<double>::Add(aggregatedStats + 1, blockWeightedSum2); TAtomicAdd<double>::Add(aggregatedStats + 2, blockTotalWeight); } } void ComputeTargetVariance(const float* stats, ui32 size, ui32 statCount, ui64 statLineSize, bool isMulticlass, double* aggregatedStats, TCudaStream stream) { const ui32 blockSize = 512; const ui32 numBlocks = min(4 * TArchProps::SMCount(), CeilDivide(size, blockSize)); FillBuffer(aggregatedStats, 0.0, 3, stream); if (numBlocks) { hipLaunchKernelGGL(( ComputeTargetVarianceImpl<blockSize>), dim3(numBlocks), dim3(blockSize), 0, stream, stats, size, statCount, statLineSize, isMulticlass, aggregatedStats); } } }
61e4566467ba263b8a11b6bb801f5085e57844c5.cu
#include "compute_scores.cuh" #include <catboost/cuda/methods/kernel/score_calcers.cuh> #include <catboost/cuda/cuda_util/kernel/instructions.cuh> #include <catboost/cuda/cuda_util/kernel/random_gen.cuh> #include <catboost/cuda/cuda_util/kernel/kernel_helpers.cuh> #include <catboost/cuda/cuda_util/kernel/fill.cuh> #include <catboost/cuda/cuda_lib/kernel/arch.cuh> #include <contrib/libs/cub/cub/block/block_reduce.cuh> #include <cmath> #include <exception> #include <cfloat> namespace NKernel { // histId * binFeatureCount * statCount + statId * binFeatureCount + features->FirstFoldIndex; template <int BlockSize, class TScoreCalcer> __global__ void ComputeOptimalSplits(const TCBinFeature* bf, ui32 binFeatureCount, const float* histograms, const double* partStats, int statCount, const ui32* partIds, int pCount, bool multiclassOptimization, TScoreCalcer calcer, TBestSplitProperties* result) { float bestScore = FLT_MAX; int bestIndex = -1; int tid = threadIdx.x; result += blockIdx.x + blockIdx.y * gridDim.x; partIds += blockIdx.y * pCount; for (int offset = blockIdx.x * BlockSize; offset < binFeatureCount; offset += BlockSize * gridDim.x) { const int binFeatureId = offset + tid; if (binFeatureId >= binFeatureCount) { break; } calcer.NextFeature(bf[binFeatureId]); for (int i = 0; i < pCount; i++) { const int leafId = __ldg(partIds + i); const float weightLeft = max(__ldg(histograms + leafId * statCount * binFeatureCount + binFeatureId), 0.0f); const float weightRight = max(__ldg(partStats + leafId * statCount) - weightLeft, 0.0f); double totalSumLeft = 0; double totalSumPart = 0; for (int statId = 1; statId < statCount; ++statId) { float sumLeft = __ldg(histograms + leafId * statCount * binFeatureCount + statId * binFeatureCount + binFeatureId); double partStat = __ldg(partStats + leafId * statCount + statId); totalSumPart += partStat; float sumRight = static_cast<float>(partStat - sumLeft); calcer.AddLeaf(sumLeft, weightLeft); calcer.AddLeaf(sumRight, weightRight); totalSumLeft += sumLeft; } if (multiclassOptimization) { double totalSumRight = totalSumPart - totalSumLeft; calcer.AddLeaf(-totalSumLeft, weightLeft); calcer.AddLeaf(-totalSumRight, weightRight); } } const float score = calcer.GetScore(); if (score < bestScore) { bestScore = score; bestIndex = binFeatureId; } } __shared__ float scores[BlockSize]; scores[tid] = bestScore; __shared__ int indices[BlockSize]; indices[tid] = bestIndex; __syncthreads(); for (ui32 s = BlockSize >> 1; s > 0; s >>= 1) { if (tid < s) { if (scores[tid] > scores[tid + s] || (scores[tid] == scores[tid + s] && indices[tid] > indices[tid + s]) ) { scores[tid] = scores[tid + s]; indices[tid] = indices[tid + s]; } } __syncthreads(); } if (!tid) { const int index = indices[0]; if (index != -1 && index < binFeatureCount) { result->FeatureId = bf[index].FeatureId; result->BinId = bf[index].BinId; result->Score = scores[0]; } else { result->FeatureId = -1; result->BinId = -1; result->Score = FLT_MAX; } } } void ComputeOptimalSplits(const TCBinFeature* binaryFeatures, ui32 binaryFeatureCount, const float* histograms, const double* partStats, int statCount, ui32* partIds, int partBlockSize, int partBlockCount, TBestSplitProperties* result, ui32 argmaxBlockCount, EScoreFunction scoreFunction, bool multiclassOptimization, double l2, bool normalize, double scoreStdDev, ui64 seed, TCudaStream stream) { const int blockSize = 128; dim3 numBlocks; numBlocks.x = argmaxBlockCount; numBlocks.y = partBlockCount; numBlocks.z = 1; #define RUN() \ ComputeOptimalSplits<blockSize, TScoreCalcer> << < numBlocks, blockSize, 0, stream >> > (binaryFeatures, binaryFeatureCount, histograms, partStats, statCount, partIds, partBlockSize, multiclassOptimization, scoreCalcer, result); switch (scoreFunction) { case EScoreFunction::SolarL2: { using TScoreCalcer = TSolarScoreCalcer; TScoreCalcer scoreCalcer(static_cast<float>(l2)); RUN() break; } case EScoreFunction::SatL2: { using TScoreCalcer = TSatL2ScoreCalcer; TScoreCalcer scoreCalcer(static_cast<float>(l2)); RUN() break; } case EScoreFunction::LOOL2: { using TScoreCalcer = TLOOL2ScoreCalcer; TScoreCalcer scoreCalcer(static_cast<float>(l2)); RUN() break; } case EScoreFunction::L2: case EScoreFunction::NewtonL2: { using TScoreCalcer = TL2ScoreCalcer; TScoreCalcer scoreCalcer(static_cast<float>(l2)); RUN() break; } case EScoreFunction::Correlation: case EScoreFunction::NewtonCorrelation: { using TScoreCalcer = TCorrelationScoreCalcer; TCorrelationScoreCalcer scoreCalcer(static_cast<float>(l2), normalize, static_cast<float>(scoreStdDev), seed); RUN() break; } default: { throw std::exception(); } } #undef RUN } template <int BlockSize> __global__ void ComputeTargetVarianceImpl(const float* stats, ui32 size, ui32 statCount, ui64 statLineSize, bool isMulticlass, double* aggregatedStats) { ui32 i = BlockSize * blockIdx.x + threadIdx.x; float weightedSum = 0; float weightedSum2 = 0; float totalWeight = 0; while (i < size) { const float w = stats[i]; if (w > 1e-15f) { float statSum = 0; for (ui32 statId = 1; statId < statCount; ++statId) { const float wt = stats[i + statLineSize * statId]; weightedSum += wt; weightedSum2 += wt * wt / w; //cause we need sum w * t * t statSum += wt; } if (isMulticlass) { weightedSum += -statSum; weightedSum2 += (-statSum * statSum) / w; } totalWeight += w; } i += gridDim.x * BlockSize; } using BlockReduce = typename cub::BlockReduce<double, BlockSize>; __shared__ typename BlockReduce::TempStorage tempStorage; double blockWeightedSum = weightedSum; blockWeightedSum = BlockReduce(tempStorage).Sum(blockWeightedSum); double blockWeightedSum2 = weightedSum2; blockWeightedSum2 = BlockReduce(tempStorage).Sum(blockWeightedSum2); double blockTotalWeight = totalWeight; blockTotalWeight = BlockReduce(tempStorage).Sum(blockTotalWeight); if (threadIdx.x == 0) { TAtomicAdd<double>::Add(aggregatedStats, blockWeightedSum); TAtomicAdd<double>::Add(aggregatedStats + 1, blockWeightedSum2); TAtomicAdd<double>::Add(aggregatedStats + 2, blockTotalWeight); } } void ComputeTargetVariance(const float* stats, ui32 size, ui32 statCount, ui64 statLineSize, bool isMulticlass, double* aggregatedStats, TCudaStream stream) { const ui32 blockSize = 512; const ui32 numBlocks = min(4 * TArchProps::SMCount(), CeilDivide(size, blockSize)); FillBuffer(aggregatedStats, 0.0, 3, stream); if (numBlocks) { ComputeTargetVarianceImpl<blockSize><<<numBlocks, blockSize, 0, stream>>>(stats, size, statCount, statLineSize, isMulticlass, aggregatedStats); } } }
c4d82c823c3a4d1d16a76edb9488e2f0c2f42096.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Program : To perform matrix multiplication using shared memory * Author : Anant Shah * Roll Number : EE16B105 * Date : 7-9-2018 **/ #include<stdio.h> #define ERROR_HANDLER(error_msg,line) error_handler(error_msg,line) #define SIZE 32 #define NUM_THREADS_X 16 #define NUM_THREADS_Y 16 void error_handler(hipError_t error_msg,int line){ /* Error handler function */ if( error_msg!=hipSuccess ){ printf("%s in %s at %d",hipGetErrorString(error_msg),__FILE__,line); exit(EXIT_FAILURE); } } void fill_matrix(double *mat, unsigned numRows, unsigned numCols){ /*Program to fill the elements of a matrix with the given number of rows and columns */ for(unsigned i=0;i<numRows;i++){ for(unsigned j=0;j<numCols;j++){ mat[i*numCols+j] = i*2.1f+j*3.2f; } } } void print_matrix_to_file(double *mat, unsigned numRows, unsigned numCols){ /* Function to print the matrix elements into a file */ const char *fname = "assignment2_out"; FILE *f = fopen(fname,"a"); for(unsigned i=0; i<numRows; i++){ for(unsigned j=0;j<numCols;j++){ fprintf(f,"%4.4f ",mat[i*numCols+j]); } fprintf(f,"\n"); } fclose(f); } __global__ void matrixMul(double *M,double *N,double *P,int width){ /* Kernel to perform the matrix multiplication output for each cell * Shared Memory on the SM will be utilized for each thread * Parameters : M - Matrix multiplicand * N - matrix Multiplicand * P - Output of the product M*N * width - Dimensions of the square matrices */ int tx = threadIdx.x; /* x-ID of a thread in a block */ int ty = threadIdx.y; /* y-ID of a thread in a block */ int Row = blockIdx.y*blockDim.y+ty; /* Row of the cell in the output matrix */ int Col = blockIdx.x*blockDim.x+tx; /* Row of the cell in the input matrix */ /* Declaring array variables in the shared memory to be used by the threads to perform the matrix multiplication */ __shared__ double Ms[NUM_THREADS_Y][SIZE]; /* A 2-D array to store the variables required by a block from matrix M */ __shared__ double Ns[SIZE][NUM_THREADS_X]; /* A 2-D array to store the variables required by a block from matrix N */ /* Loading the matrices from the global memory to the shared memory by collaborative loading */ int num_col_cells = (width+blockDim.x-1)/blockDim.x; /* Number of cells to be stored from matrix M in global memory for a given cell in the outpt matrix*/ int num_row_cells = (width+blockDim.y-1)/blockDim.y; /* Number of cells to be stored from matrix N in global memory for a given cell in the output matrix */ /*Since for this case, we are taking the square matrix to be a block, num_col_cells = num_row_cells and hence both the loading operations can be completed in the same probelm */ for(int i=0;i<num_col_cells;i++){ Ms[ty][tx*num_col_cells+i] = M[Row*width+tx*num_col_cells+i]; Ns[ty*num_row_cells+i][tx] = N[(ty*num_row_cells+i)*width+Col]; } __syncthreads(); /*All threads in the block need to finish loading before we can proceed with the multiplication */ /*The partial matrices have been stored in the shared memory, we now need to perform matrix multiplication */ double pSum=0.0; /*Partial sum to store the multiplication */ for(int i=0;i<width;i++){ pSum += Ms[ty][i]*Ns[i][tx]; } P[Row*width+Col] = pSum; } int main(int argc,char **argv){ if(argc!=1){ printf("Error : Invalid number of arguments"); exit(EXIT_FAILURE); } /******************************** Variable Declarations ***************************************************/ double *h_M; /* Matrix multiplicand M on the host */ double *h_N; /* Matrix multiplicand N on the host */ double *h_P; /* Matrix product(M*N) on the host */ double *d_M; /* Matrix multiplicand M on the device */ double *d_N; /* Matrix multiplicand N on the device */ double *d_P; /* Matrix product(M*N) on the device */ size_t size; /* The size in bytes of each matrix */ hipEvent_t start,stop; /* Cuda Events to measure the run-time of the kernel */ hipEventCreate(&start); hipEventCreate(&stop); size = sizeof(double)*SIZE*SIZE; /* These are square matrices of dimensions SIZE*SIZE */ /******************************** Allocate Memory on the host *********************************************/ h_M = (double *)malloc(size); h_N = (double *)malloc(size); h_P = (double *)malloc(size); /******************************** Initialize the matrices ************************************************/ fill_matrix(h_M,SIZE,SIZE); fill_matrix(h_N,SIZE,SIZE); /******************************** Allocate Memory on the device ******************************************/ ERROR_HANDLER(hipMalloc((void **)&d_M,size),__LINE__); ERROR_HANDLER(hipMalloc((void **)&d_N,size),__LINE__); ERROR_HANDLER(hipMalloc((void **)&d_P,size),__LINE__); /******************************* Copy matrices from host to device **************************************/ ERROR_HANDLER(hipMemcpy(d_M,h_M,size,hipMemcpyHostToDevice),__LINE__); ERROR_HANDLER(hipMemcpy(d_N,h_N,size,hipMemcpyHostToDevice),__LINE__); /****************************** Kernel Invocation *******************************************************/ dim3 threads(NUM_THREADS_X,NUM_THREADS_Y); dim3 blocks((SIZE+NUM_THREADS_X-1)/NUM_THREADS_X,(SIZE+NUM_THREADS_Y-1)/NUM_THREADS_Y); hipEventRecord(start); hipLaunchKernelGGL(( matrixMul), dim3(blocks),dim3(threads), 0, 0, d_M,d_N,d_P,SIZE); hipEventRecord(stop); ERROR_HANDLER(hipMemcpy(h_P,d_P,size,hipMemcpyDeviceToHost),__LINE__); hipEventSynchronize(stop); float milliseconds = 0.0; hipEventElapsedTime(&milliseconds, start, stop); printf("Run-Time(milli-seconds) : %.10f \n",milliseconds); print_matrix_to_file(h_P,SIZE,SIZE); /***************************** Free the memory that was allocated **************************************/ hipFree(d_M); hipFree(d_N); hipFree(d_P); free(h_M); free(h_N); free(h_P); }
c4d82c823c3a4d1d16a76edb9488e2f0c2f42096.cu
/* Program : To perform matrix multiplication using shared memory * Author : Anant Shah * Roll Number : EE16B105 * Date : 7-9-2018 **/ #include<stdio.h> #define ERROR_HANDLER(error_msg,line) error_handler(error_msg,line) #define SIZE 32 #define NUM_THREADS_X 16 #define NUM_THREADS_Y 16 void error_handler(cudaError_t error_msg,int line){ /* Error handler function */ if( error_msg!=cudaSuccess ){ printf("%s in %s at %d",cudaGetErrorString(error_msg),__FILE__,line); exit(EXIT_FAILURE); } } void fill_matrix(double *mat, unsigned numRows, unsigned numCols){ /*Program to fill the elements of a matrix with the given number of rows and columns */ for(unsigned i=0;i<numRows;i++){ for(unsigned j=0;j<numCols;j++){ mat[i*numCols+j] = i*2.1f+j*3.2f; } } } void print_matrix_to_file(double *mat, unsigned numRows, unsigned numCols){ /* Function to print the matrix elements into a file */ const char *fname = "assignment2_out"; FILE *f = fopen(fname,"a"); for(unsigned i=0; i<numRows; i++){ for(unsigned j=0;j<numCols;j++){ fprintf(f,"%4.4f ",mat[i*numCols+j]); } fprintf(f,"\n"); } fclose(f); } __global__ void matrixMul(double *M,double *N,double *P,int width){ /* Kernel to perform the matrix multiplication output for each cell * Shared Memory on the SM will be utilized for each thread * Parameters : M - Matrix multiplicand * N - matrix Multiplicand * P - Output of the product M*N * width - Dimensions of the square matrices */ int tx = threadIdx.x; /* x-ID of a thread in a block */ int ty = threadIdx.y; /* y-ID of a thread in a block */ int Row = blockIdx.y*blockDim.y+ty; /* Row of the cell in the output matrix */ int Col = blockIdx.x*blockDim.x+tx; /* Row of the cell in the input matrix */ /* Declaring array variables in the shared memory to be used by the threads to perform the matrix multiplication */ __shared__ double Ms[NUM_THREADS_Y][SIZE]; /* A 2-D array to store the variables required by a block from matrix M */ __shared__ double Ns[SIZE][NUM_THREADS_X]; /* A 2-D array to store the variables required by a block from matrix N */ /* Loading the matrices from the global memory to the shared memory by collaborative loading */ int num_col_cells = (width+blockDim.x-1)/blockDim.x; /* Number of cells to be stored from matrix M in global memory for a given cell in the outpt matrix*/ int num_row_cells = (width+blockDim.y-1)/blockDim.y; /* Number of cells to be stored from matrix N in global memory for a given cell in the output matrix */ /*Since for this case, we are taking the square matrix to be a block, num_col_cells = num_row_cells and hence both the loading operations can be completed in the same probelm */ for(int i=0;i<num_col_cells;i++){ Ms[ty][tx*num_col_cells+i] = M[Row*width+tx*num_col_cells+i]; Ns[ty*num_row_cells+i][tx] = N[(ty*num_row_cells+i)*width+Col]; } __syncthreads(); /*All threads in the block need to finish loading before we can proceed with the multiplication */ /*The partial matrices have been stored in the shared memory, we now need to perform matrix multiplication */ double pSum=0.0; /*Partial sum to store the multiplication */ for(int i=0;i<width;i++){ pSum += Ms[ty][i]*Ns[i][tx]; } P[Row*width+Col] = pSum; } int main(int argc,char **argv){ if(argc!=1){ printf("Error : Invalid number of arguments"); exit(EXIT_FAILURE); } /******************************** Variable Declarations ***************************************************/ double *h_M; /* Matrix multiplicand M on the host */ double *h_N; /* Matrix multiplicand N on the host */ double *h_P; /* Matrix product(M*N) on the host */ double *d_M; /* Matrix multiplicand M on the device */ double *d_N; /* Matrix multiplicand N on the device */ double *d_P; /* Matrix product(M*N) on the device */ size_t size; /* The size in bytes of each matrix */ cudaEvent_t start,stop; /* Cuda Events to measure the run-time of the kernel */ cudaEventCreate(&start); cudaEventCreate(&stop); size = sizeof(double)*SIZE*SIZE; /* These are square matrices of dimensions SIZE*SIZE */ /******************************** Allocate Memory on the host *********************************************/ h_M = (double *)malloc(size); h_N = (double *)malloc(size); h_P = (double *)malloc(size); /******************************** Initialize the matrices ************************************************/ fill_matrix(h_M,SIZE,SIZE); fill_matrix(h_N,SIZE,SIZE); /******************************** Allocate Memory on the device ******************************************/ ERROR_HANDLER(cudaMalloc((void **)&d_M,size),__LINE__); ERROR_HANDLER(cudaMalloc((void **)&d_N,size),__LINE__); ERROR_HANDLER(cudaMalloc((void **)&d_P,size),__LINE__); /******************************* Copy matrices from host to device **************************************/ ERROR_HANDLER(cudaMemcpy(d_M,h_M,size,cudaMemcpyHostToDevice),__LINE__); ERROR_HANDLER(cudaMemcpy(d_N,h_N,size,cudaMemcpyHostToDevice),__LINE__); /****************************** Kernel Invocation *******************************************************/ dim3 threads(NUM_THREADS_X,NUM_THREADS_Y); dim3 blocks((SIZE+NUM_THREADS_X-1)/NUM_THREADS_X,(SIZE+NUM_THREADS_Y-1)/NUM_THREADS_Y); cudaEventRecord(start); matrixMul<<<blocks,threads>>>(d_M,d_N,d_P,SIZE); cudaEventRecord(stop); ERROR_HANDLER(cudaMemcpy(h_P,d_P,size,cudaMemcpyDeviceToHost),__LINE__); cudaEventSynchronize(stop); float milliseconds = 0.0; cudaEventElapsedTime(&milliseconds, start, stop); printf("Run-Time(milli-seconds) : %.10f \n",milliseconds); print_matrix_to_file(h_P,SIZE,SIZE); /***************************** Free the memory that was allocated **************************************/ cudaFree(d_M); cudaFree(d_N); cudaFree(d_P); free(h_M); free(h_N); free(h_P); }
443912a884572a59a77c7f963110584117640027.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "header.h" #include "util.h" #include "mapper.cuh" #include "reducer.cuh" #include "wtime.h" #include "barrier.cuh" #include "gpu_graph.cuh" #include "meta_data.cuh" #include "mapper_enactor.cuh" #include "reducer_enactor.cuh" #include "cpu_bp.hpp" /*user defined vertex behavior function*/ __inline__ __host__ __device__ feature_t user_mapper_push ( vertex_t src, vertex_t dest, feature_t level, index_t* beg_pos, weight_t edge_weight, feature_t* vert_status, feature_t* vert_status_prev) { // if(feature_end==INFTY) // return feature_src+1; // else return feature_end; // return vert_status_prev[src]; } /*user defined vertex behavior function*/ __inline__ __host__ __device__ bool vertex_selector_push ( vertex_t vert_id, feature_t level, vertex_t *adj_list, index_t *beg_pos, feature_t *vert_status, feature_t *vert_status_prev) { //if(vert_status[vert_id]==level) return true; //else return false; // return true; } /*user defined vertex behavior function*/ __inline__ __host__ __device__ feature_t user_mapper_pull ( vertex_t src, vertex_t dest, feature_t level, index_t* beg_pos, weight_t edge_weight, feature_t* vert_status, feature_t* vert_status_prev) { //index_t degree=beg_pos[active_edge_src+1]-beg_pos[active_edge_src]; return edge_weight * vert_status_prev[src]; //return edge_weight * 1; //return (feature_src==level ? feature_src+1:feature_end); } /*user defined vertex behavior function*/ __inline__ __host__ __device__ bool vertex_selector_pull ( vertex_t vert_id, feature_t level, vertex_t *adj_list, index_t *beg_pos, feature_t *vert_status, feature_t *vert_status_prev) { //if(vert_status[vert_id]==INFTY) return true; //else return false; return beg_pos[vert_id + 1] != beg_pos[vert_id]; } __device__ cb_reducer vert_selector_push_d = vertex_selector_push; __device__ cb_reducer vert_selector_pull_d = vertex_selector_pull; __device__ cb_mapper vert_behave_push_d = user_mapper_push; __device__ cb_mapper vert_behave_pull_d = user_mapper_pull; __global__ void init(meta_data mdata, gpu_graph ggraph) { index_t tid = threadIdx.x+blockIdx.x*blockDim.x; while(tid < ggraph.vert_count) { mdata.vert_status_prev[tid] = 1; mdata.vert_status[tid] = 1; tid += blockDim.x*gridDim.x; } } int main(int args, char **argv) { std::cout<<"Input: /path/to/exe /path/to/beg_pos /path/to/adj_list /path/weight_list iter_limit blk_size\n"; if(args<5){std::cout<<"Wrong input\n";exit(-1);} for(int i = 0; i < args; i++) std::cout<<argv[i]<<" "; std::cout<<"\n"; double tm_map,tm_red,tm_scan; char *file_beg_pos = argv[1]; char *file_adj_list = argv[2]; char *file_weight_list = argv[3]; int iter_limit = atoi(argv[4]); int blk_size = atoi(argv[5]); H_ERR(hipSetDevice(0)); //Read graph to CPU graph<long, long, long,vertex_t, index_t, weight_t> *ginst=new graph<long, long, long,vertex_t, index_t, weight_t> (file_beg_pos, file_adj_list, file_weight_list); cb_reducer vert_selector_push_h; cb_reducer vert_selector_pull_h; H_ERR(hipMemcpyFromSymbol(&vert_selector_push_h,vert_selector_push_d,sizeof(cb_reducer))); H_ERR(hipMemcpyFromSymbol(&vert_selector_pull_h,vert_selector_pull_d,sizeof(cb_reducer))); cb_mapper vert_behave_push_h; cb_mapper vert_behave_pull_h; H_ERR(hipMemcpyFromSymbol(&vert_behave_push_h,vert_behave_push_d,sizeof(cb_reducer))); H_ERR(hipMemcpyFromSymbol(&vert_behave_pull_h,vert_behave_pull_d,sizeof(cb_reducer))); gpu_graph ggraph(ginst); meta_data mdata(ginst->vert_count, ginst->edge_count); Barrier global_barrier(BLKS_NUM); mapper compute_mapper(ggraph, mdata, vert_behave_push_h, vert_behave_pull_h); reducer worklist_gather(ggraph, mdata, vert_selector_push_h, vert_selector_pull_h); H_ERR(hipDeviceSynchronize()); H_ERR(hipMemset((vertex_t *)mdata.worklist_sz_sml, 0, sizeof(vertex_t))); H_ERR(hipMemset((vertex_t *)mdata.worklist_sz_mid, 0, sizeof(vertex_t))); H_ERR(hipMemset((vertex_t *)mdata.worklist_sz_lrg, 0, sizeof(vertex_t))); H_ERR(hipDeviceSynchronize()); vertex_t *sml, *mid, *lrg; hipHostMalloc((void **)&sml, sizeof(vertex_t)); hipHostMalloc((void **)&mid, sizeof(vertex_t)); hipHostMalloc((void **)&lrg, sizeof(vertex_t)); /*reducer*/ // tm_red=wtime(); // reducer_pull(0, ggraph, mdata, worklist_gather); // tm_red=wtime()-tm_red; feature_t *level, *level_h; hipMalloc((void **)&level, sizeof(feature_t)); hipHostMalloc((void **)&level_h, sizeof(feature_t)); hipLaunchKernelGGL(( init), dim3(256), dim3(256), 0, 0, mdata, ggraph); H_ERR(hipDeviceSynchronize()); double time=wtime(); mapper_merge_pull (blk_size, iter_limit, level, ggraph, mdata, compute_mapper, worklist_gather, global_barrier); std::cout<<"Total time: "<<wtime()-time<<" second(s).\n"; feature_t *gpu_dist = new feature_t[ginst->vert_count]; hipMemcpy(gpu_dist, mdata.vert_status, sizeof(feature_t) * ginst->vert_count, hipMemcpyDeviceToHost); feature_t gpu_rank_sum = 0; for(index_t i = 0; i < ginst->vert_count; i ++) gpu_rank_sum += gpu_dist[i]; std::cout<<"Total iteration: "<<iter_limit<<"\n"; std::cout<<"GPU rank sum: "<<gpu_rank_sum<<"\n"; feature_t *cpu_dist; feature_t cpu_rank_sum = 0; cpu_bp<index_t, vertex_t, feature_t> (cpu_rank_sum, cpu_dist, iter_limit, ginst->vert_count, ginst->edge_count, ginst->beg_pos, ginst->adj_list, ginst->weight); if (abs(gpu_rank_sum - cpu_rank_sum)/cpu_rank_sum < 0.00001) printf("Result correct\n"); else printf("Result wrong!"); //for(int levels=0;;levels++) //{ // //H_ERR(hipMemcpy(mdata.sa_chk, mdata.vert_status_prev, // // sizeof(feature_t)*ggraph.vert_count, hipMemcpyDeviceToHost)); // //for(int i = 0; i < 10; i ++) // // std::cout<<mdata.sa_chk[i] * (ginst->beg_pos[i+1] - ginst->beg_pos[i])<<" "; // //std::cout<<"\n"; // // /* mapper */ // tm_map=wtime(); // mapper_pull(level, ggraph, mdata, compute_mapper); // tm_map=wtime()-tm_map; // // feature_t *tmp = compute_mapper.vert_status; // compute_mapper.vert_status = compute_mapper.vert_status_prev; // compute_mapper.vert_status_prev = tmp; // // //H_ERR(hipMemcpy(sml, mdata.worklist_sz_sml, sizeof(vertex_t), hipMemcpyDeviceToHost)); // //H_ERR(hipMemcpy(mid, mdata.worklist_sz_mid, sizeof(vertex_t), hipMemcpyDeviceToHost)); // //H_ERR(hipMemcpy(lrg, mdata.worklist_sz_lrg, sizeof(vertex_t), hipMemcpyDeviceToHost)); // // // //printf("level-%d: %d\n", levels, sml[0]+mid[0]+lrg[0]); // // // /*monitoring*/ // std::cout<<"Level: "<<(int)levels<<" " // <<"Time (map, reduce): "<<tm_map<<" "<<tm_red<<"\n"; // if(levels == 10)break; //} //dumper(ggraph,mdata); }
443912a884572a59a77c7f963110584117640027.cu
#include "header.h" #include "util.h" #include "mapper.cuh" #include "reducer.cuh" #include "wtime.h" #include "barrier.cuh" #include "gpu_graph.cuh" #include "meta_data.cuh" #include "mapper_enactor.cuh" #include "reducer_enactor.cuh" #include "cpu_bp.hpp" /*user defined vertex behavior function*/ __inline__ __host__ __device__ feature_t user_mapper_push ( vertex_t src, vertex_t dest, feature_t level, index_t* beg_pos, weight_t edge_weight, feature_t* vert_status, feature_t* vert_status_prev) { // if(feature_end==INFTY) // return feature_src+1; // else return feature_end; // return vert_status_prev[src]; } /*user defined vertex behavior function*/ __inline__ __host__ __device__ bool vertex_selector_push ( vertex_t vert_id, feature_t level, vertex_t *adj_list, index_t *beg_pos, feature_t *vert_status, feature_t *vert_status_prev) { //if(vert_status[vert_id]==level) return true; //else return false; // return true; } /*user defined vertex behavior function*/ __inline__ __host__ __device__ feature_t user_mapper_pull ( vertex_t src, vertex_t dest, feature_t level, index_t* beg_pos, weight_t edge_weight, feature_t* vert_status, feature_t* vert_status_prev) { //index_t degree=beg_pos[active_edge_src+1]-beg_pos[active_edge_src]; return edge_weight * vert_status_prev[src]; //return edge_weight * 1; //return (feature_src==level ? feature_src+1:feature_end); } /*user defined vertex behavior function*/ __inline__ __host__ __device__ bool vertex_selector_pull ( vertex_t vert_id, feature_t level, vertex_t *adj_list, index_t *beg_pos, feature_t *vert_status, feature_t *vert_status_prev) { //if(vert_status[vert_id]==INFTY) return true; //else return false; return beg_pos[vert_id + 1] != beg_pos[vert_id]; } __device__ cb_reducer vert_selector_push_d = vertex_selector_push; __device__ cb_reducer vert_selector_pull_d = vertex_selector_pull; __device__ cb_mapper vert_behave_push_d = user_mapper_push; __device__ cb_mapper vert_behave_pull_d = user_mapper_pull; __global__ void init(meta_data mdata, gpu_graph ggraph) { index_t tid = threadIdx.x+blockIdx.x*blockDim.x; while(tid < ggraph.vert_count) { mdata.vert_status_prev[tid] = 1; mdata.vert_status[tid] = 1; tid += blockDim.x*gridDim.x; } } int main(int args, char **argv) { std::cout<<"Input: /path/to/exe /path/to/beg_pos /path/to/adj_list /path/weight_list iter_limit blk_size\n"; if(args<5){std::cout<<"Wrong input\n";exit(-1);} for(int i = 0; i < args; i++) std::cout<<argv[i]<<" "; std::cout<<"\n"; double tm_map,tm_red,tm_scan; char *file_beg_pos = argv[1]; char *file_adj_list = argv[2]; char *file_weight_list = argv[3]; int iter_limit = atoi(argv[4]); int blk_size = atoi(argv[5]); H_ERR(cudaSetDevice(0)); //Read graph to CPU graph<long, long, long,vertex_t, index_t, weight_t> *ginst=new graph<long, long, long,vertex_t, index_t, weight_t> (file_beg_pos, file_adj_list, file_weight_list); cb_reducer vert_selector_push_h; cb_reducer vert_selector_pull_h; H_ERR(cudaMemcpyFromSymbol(&vert_selector_push_h,vert_selector_push_d,sizeof(cb_reducer))); H_ERR(cudaMemcpyFromSymbol(&vert_selector_pull_h,vert_selector_pull_d,sizeof(cb_reducer))); cb_mapper vert_behave_push_h; cb_mapper vert_behave_pull_h; H_ERR(cudaMemcpyFromSymbol(&vert_behave_push_h,vert_behave_push_d,sizeof(cb_reducer))); H_ERR(cudaMemcpyFromSymbol(&vert_behave_pull_h,vert_behave_pull_d,sizeof(cb_reducer))); gpu_graph ggraph(ginst); meta_data mdata(ginst->vert_count, ginst->edge_count); Barrier global_barrier(BLKS_NUM); mapper compute_mapper(ggraph, mdata, vert_behave_push_h, vert_behave_pull_h); reducer worklist_gather(ggraph, mdata, vert_selector_push_h, vert_selector_pull_h); H_ERR(cudaThreadSynchronize()); H_ERR(cudaMemset((vertex_t *)mdata.worklist_sz_sml, 0, sizeof(vertex_t))); H_ERR(cudaMemset((vertex_t *)mdata.worklist_sz_mid, 0, sizeof(vertex_t))); H_ERR(cudaMemset((vertex_t *)mdata.worklist_sz_lrg, 0, sizeof(vertex_t))); H_ERR(cudaThreadSynchronize()); vertex_t *sml, *mid, *lrg; cudaMallocHost((void **)&sml, sizeof(vertex_t)); cudaMallocHost((void **)&mid, sizeof(vertex_t)); cudaMallocHost((void **)&lrg, sizeof(vertex_t)); /*reducer*/ // tm_red=wtime(); // reducer_pull(0, ggraph, mdata, worklist_gather); // tm_red=wtime()-tm_red; feature_t *level, *level_h; cudaMalloc((void **)&level, sizeof(feature_t)); cudaMallocHost((void **)&level_h, sizeof(feature_t)); init<<<256, 256>>>(mdata, ggraph); H_ERR(cudaDeviceSynchronize()); double time=wtime(); mapper_merge_pull (blk_size, iter_limit, level, ggraph, mdata, compute_mapper, worklist_gather, global_barrier); std::cout<<"Total time: "<<wtime()-time<<" second(s).\n"; feature_t *gpu_dist = new feature_t[ginst->vert_count]; cudaMemcpy(gpu_dist, mdata.vert_status, sizeof(feature_t) * ginst->vert_count, cudaMemcpyDeviceToHost); feature_t gpu_rank_sum = 0; for(index_t i = 0; i < ginst->vert_count; i ++) gpu_rank_sum += gpu_dist[i]; std::cout<<"Total iteration: "<<iter_limit<<"\n"; std::cout<<"GPU rank sum: "<<gpu_rank_sum<<"\n"; feature_t *cpu_dist; feature_t cpu_rank_sum = 0; cpu_bp<index_t, vertex_t, feature_t> (cpu_rank_sum, cpu_dist, iter_limit, ginst->vert_count, ginst->edge_count, ginst->beg_pos, ginst->adj_list, ginst->weight); if (abs(gpu_rank_sum - cpu_rank_sum)/cpu_rank_sum < 0.00001) printf("Result correct\n"); else printf("Result wrong!"); //for(int levels=0;;levels++) //{ // //H_ERR(cudaMemcpy(mdata.sa_chk, mdata.vert_status_prev, // // sizeof(feature_t)*ggraph.vert_count, cudaMemcpyDeviceToHost)); // //for(int i = 0; i < 10; i ++) // // std::cout<<mdata.sa_chk[i] * (ginst->beg_pos[i+1] - ginst->beg_pos[i])<<" "; // //std::cout<<"\n"; // // /* mapper */ // tm_map=wtime(); // mapper_pull(level, ggraph, mdata, compute_mapper); // tm_map=wtime()-tm_map; // // feature_t *tmp = compute_mapper.vert_status; // compute_mapper.vert_status = compute_mapper.vert_status_prev; // compute_mapper.vert_status_prev = tmp; // // //H_ERR(cudaMemcpy(sml, mdata.worklist_sz_sml, sizeof(vertex_t), cudaMemcpyDeviceToHost)); // //H_ERR(cudaMemcpy(mid, mdata.worklist_sz_mid, sizeof(vertex_t), cudaMemcpyDeviceToHost)); // //H_ERR(cudaMemcpy(lrg, mdata.worklist_sz_lrg, sizeof(vertex_t), cudaMemcpyDeviceToHost)); // // // //printf("level-%d: %d\n", levels, sml[0]+mid[0]+lrg[0]); // // // /*monitoring*/ // std::cout<<"Level: "<<(int)levels<<" " // <<"Time (map, reduce): "<<tm_map<<" "<<tm_red<<"\n"; // if(levels == 10)break; //} //dumper(ggraph,mdata); }
e9c93a72d43b996a1ef935c9f3c769e86ae688b7.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 1.5.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date September 2014 @generated from zgeqr2x_gpu-v4.cu normal z -> d, Wed Sep 17 15:08:23 2014 */ #include "common_magma.h" #include "commonblas_d.h" // 512 is maximum number of threads for CUDA capability 1.x #define BLOCK_SIZE 512 /** Purpose ------- DGEQR2 computes a QR factorization of a real m by n matrix A: A = Q * R. This expert routine requires two more arguments than the standard dgeqr2, namely, dT and ddA, explained below. The storage for A is also not as in the LAPACK's dgeqr2 routine (see below). The first is used to output the triangular n x n factor T of the block reflector used in the factorization. The second holds the diagonal nxn blocks of A, i.e., the diagonal submatrices of R. This routine implements the left looking QR. This version adds internal blocking. Arguments --------- @param[in] m INTEGER The number of rows of the matrix A. M >= 0. @param[in] n INTEGER The number of columns of the matrix A. N >= 0. @param[in,out] dA DOUBLE_PRECISION array, dimension (LDA,N) On entry, the m by n matrix A. On exit, the unitary matrix Q as a product of elementary reflectors (see Further Details). \n the elements on and above the diagonal of the array contain the min(m,n) by n upper trapezoidal matrix R (R is upper triangular if m >= n); the elements below the diagonal, with the array TAU, represent the unitary matrix Q as a product of elementary reflectors (see Further Details). @param[in] ldda INTEGER The leading dimension of the array A. LDA >= max(1,M). @param[out] dtau DOUBLE_PRECISION array, dimension (min(M,N)) The scalar factors of the elementary reflectors (see Further Details). @param[out] dT DOUBLE_PRECISION array, dimension N x N. Stores the triangular N x N factor T of the block reflector used in the factorization. The lower triangular part is 0. @param[out] ddA DOUBLE_PRECISION array, dimension N x N. Stores the elements of the upper N x N diagonal block of A. LAPACK stores this array in A. There are 0s below the diagonal. @param dwork (workspace) DOUBLE_PRECISION array, dimension (3 N) @param[out] info INTEGER - = 0: successful exit - < 0: if INFO = -i, the i-th argument had an illegal value Further Details --------------- The matrix Q is represented as a product of elementary reflectors Q = H(1) H(2) . . . H(k), where k = min(m,n). Each H(i) has the form H(i) = I - tau * v * v' where tau is a real scalar, and v is a real vector with v(1:i-1) = 0 and v(i) = 1; v(i+1:m) is stored on exit in A(i+1:m,i), and tau in TAU(i). @ingroup magma_dgeqrf_comp ********************************************************************/ extern "C" magma_int_t magma_dgeqr2x4_gpu( magma_int_t m, magma_int_t n, double *dA, magma_int_t ldda, double *dtau, double *dT, double *ddA, double *dwork, magma_int_t *info, magma_queue_t stream) { #define dA(i_,j_) (dA + (j_)*(ldda) + (i_)) #define dT(i_,j_) (dT + (j_)*(k) + (i_)) #define BS 32 magma_int_t i, k; double *dnorm = (double *)dwork; double *work = (double *)(dwork+2*n); magma_queue_t cstream; magmablasGetKernelStream(&cstream); magmablasSetKernelStream(stream); *info = 0; if (m < 0) { *info = -1; } else if (n < 0) { *info = -2; } else if (ldda < max(1,m)) { *info = -4; } if (*info != 0) { magma_xerbla( __func__, -(*info) ); return *info; } /* Compute the norms of the trailing columns */ k = min(m,n); magmablas_dnrm2_cols(m, k, dA(0,0), ldda, dnorm); for (magma_int_t b=0; b < k; b += BS) { for (i = b; i < min(k, b+BS); ++i) { /* Apply H' to A(:,i) from the left */ if (i-b > 0) { /* Compute the (i-1)th column of T */ if ( i-1 > 0 ) { hipLaunchKernelGGL(( magma_dgemv_kernel3), dim3(i-1), dim3(BLOCK_SIZE), 0, magma_stream , m-i+1, dA(i-1,0), ldda, dA(i-1, i-1), work, dtau+i-1); hipLaunchKernelGGL(( magma_dtrmv_kernel2), dim3(i-1), dim3(i-1), 0, magma_stream , dT(0,0), k, work, dT(0,i-1), dtau+i-1); } /* dwork = V' c */ hipLaunchKernelGGL(( magma_dgemv_kernel1), dim3(i-b), dim3(BLOCK_SIZE), 0, magma_stream , m-b, dA(b, b), ldda, dA(b,i), work); /* dwork = T' work */ hipLaunchKernelGGL(( magma_dtrmv_tkernel), dim3(i-b), dim3(i-b), 0, magma_stream , dT(b,b), k, work, work+i-b); /* c = c - V work */ if ( m-b > 0 ) { dim3 blocks3( (m-b + BLOCK_SIZE-1) / BLOCK_SIZE ); dim3 threads3( BLOCK_SIZE ); hipLaunchKernelGGL(( magma_dgemv_kernel2), dim3(blocks3), dim3(threads3), 0, magma_stream , m-b, i-b, dA(b,b), ldda, work+i-b, dA(b, i)); } } /* Adjust the dnorm[i] to hold the norm of A(i:m,i) */ if ( i > 0 ) { hipLaunchKernelGGL(( magma_dnrm2_adjust_kernel), dim3(1), dim3(i), 0, magma_stream , dnorm+i, dA(0, i)); } /* Generate elementary reflector H(i) to annihilate A(i+1:m,i) 1. 1 is not yet put on the diagonal of A 2. Elements above the diagonal are copied in ddA and the ones in A are set to zero 3. update T */ magma_dlarfgx_gpu(m-i, dA(i, i), dA(min(i+1,m),i), dtau+i, dnorm+i, ddA + i + i*n, i); if (i == 0) { double tt = MAGMA_D_ONE; magmablas_dlacpy(MagmaUpperLower, 1, 1, dtau, 1, dT(0,0), 1); magma_dsetmatrix_async(1, 1, &tt, 1, dA(i, i), 1, magma_stream); } } if ( i-1 > 0 ) { hipLaunchKernelGGL(( magma_dgemv_kernel3), dim3(i-1), dim3(BLOCK_SIZE), 0, magma_stream , m-i+1, dA(i-1,0), ldda, dA(i-1, i-1), work, dtau+i-1); hipLaunchKernelGGL(( magma_dtrmv_kernel2), dim3(i-1), dim3(i-1), 0, magma_stream , dT(0,0), k, work, dT(0,i-1), dtau+i-1); } /* Apply the transformations to the trailing matrix. */ //magma_dlarfb2_gpu( MagmaLeft, MagmaConjTrans, MagmaForward, MagmaColumnwise, magma_dlarfb2_gpu( m-b, k-i, BS, dA(b, b), ldda, dT+b+b*k, k, dA(b, i), ldda, work, k-i); } magmablasSetKernelStream(cstream); return *info; } /* magma_dgeqr2 */
e9c93a72d43b996a1ef935c9f3c769e86ae688b7.cu
/* -- MAGMA (version 1.5.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date September 2014 @generated from zgeqr2x_gpu-v4.cu normal z -> d, Wed Sep 17 15:08:23 2014 */ #include "common_magma.h" #include "commonblas_d.h" // 512 is maximum number of threads for CUDA capability 1.x #define BLOCK_SIZE 512 /** Purpose ------- DGEQR2 computes a QR factorization of a real m by n matrix A: A = Q * R. This expert routine requires two more arguments than the standard dgeqr2, namely, dT and ddA, explained below. The storage for A is also not as in the LAPACK's dgeqr2 routine (see below). The first is used to output the triangular n x n factor T of the block reflector used in the factorization. The second holds the diagonal nxn blocks of A, i.e., the diagonal submatrices of R. This routine implements the left looking QR. This version adds internal blocking. Arguments --------- @param[in] m INTEGER The number of rows of the matrix A. M >= 0. @param[in] n INTEGER The number of columns of the matrix A. N >= 0. @param[in,out] dA DOUBLE_PRECISION array, dimension (LDA,N) On entry, the m by n matrix A. On exit, the unitary matrix Q as a product of elementary reflectors (see Further Details). \n the elements on and above the diagonal of the array contain the min(m,n) by n upper trapezoidal matrix R (R is upper triangular if m >= n); the elements below the diagonal, with the array TAU, represent the unitary matrix Q as a product of elementary reflectors (see Further Details). @param[in] ldda INTEGER The leading dimension of the array A. LDA >= max(1,M). @param[out] dtau DOUBLE_PRECISION array, dimension (min(M,N)) The scalar factors of the elementary reflectors (see Further Details). @param[out] dT DOUBLE_PRECISION array, dimension N x N. Stores the triangular N x N factor T of the block reflector used in the factorization. The lower triangular part is 0. @param[out] ddA DOUBLE_PRECISION array, dimension N x N. Stores the elements of the upper N x N diagonal block of A. LAPACK stores this array in A. There are 0s below the diagonal. @param dwork (workspace) DOUBLE_PRECISION array, dimension (3 N) @param[out] info INTEGER - = 0: successful exit - < 0: if INFO = -i, the i-th argument had an illegal value Further Details --------------- The matrix Q is represented as a product of elementary reflectors Q = H(1) H(2) . . . H(k), where k = min(m,n). Each H(i) has the form H(i) = I - tau * v * v' where tau is a real scalar, and v is a real vector with v(1:i-1) = 0 and v(i) = 1; v(i+1:m) is stored on exit in A(i+1:m,i), and tau in TAU(i). @ingroup magma_dgeqrf_comp ********************************************************************/ extern "C" magma_int_t magma_dgeqr2x4_gpu( magma_int_t m, magma_int_t n, double *dA, magma_int_t ldda, double *dtau, double *dT, double *ddA, double *dwork, magma_int_t *info, magma_queue_t stream) { #define dA(i_,j_) (dA + (j_)*(ldda) + (i_)) #define dT(i_,j_) (dT + (j_)*(k) + (i_)) #define BS 32 magma_int_t i, k; double *dnorm = (double *)dwork; double *work = (double *)(dwork+2*n); magma_queue_t cstream; magmablasGetKernelStream(&cstream); magmablasSetKernelStream(stream); *info = 0; if (m < 0) { *info = -1; } else if (n < 0) { *info = -2; } else if (ldda < max(1,m)) { *info = -4; } if (*info != 0) { magma_xerbla( __func__, -(*info) ); return *info; } /* Compute the norms of the trailing columns */ k = min(m,n); magmablas_dnrm2_cols(m, k, dA(0,0), ldda, dnorm); for (magma_int_t b=0; b < k; b += BS) { for (i = b; i < min(k, b+BS); ++i) { /* Apply H' to A(:,i) from the left */ if (i-b > 0) { /* Compute the (i-1)th column of T */ if ( i-1 > 0 ) { magma_dgemv_kernel3<<< i-1, BLOCK_SIZE, 0, magma_stream >>> ( m-i+1, dA(i-1,0), ldda, dA(i-1, i-1), work, dtau+i-1); magma_dtrmv_kernel2<<< i-1, i-1, 0, magma_stream >>> ( dT(0,0), k, work, dT(0,i-1), dtau+i-1); } /* dwork = V' c */ magma_dgemv_kernel1<<< i-b, BLOCK_SIZE, 0, magma_stream >>> (m-b, dA(b, b), ldda, dA(b,i), work); /* dwork = T' work */ magma_dtrmv_tkernel<<< i-b, i-b, 0, magma_stream >>> (dT(b,b), k, work, work+i-b); /* c = c - V work */ if ( m-b > 0 ) { dim3 blocks3( (m-b + BLOCK_SIZE-1) / BLOCK_SIZE ); dim3 threads3( BLOCK_SIZE ); magma_dgemv_kernel2<<< blocks3, threads3, 0, magma_stream >>> (m-b, i-b, dA(b,b), ldda, work+i-b, dA(b, i)); } } /* Adjust the dnorm[i] to hold the norm of A(i:m,i) */ if ( i > 0 ) { magma_dnrm2_adjust_kernel<<< 1, i, 0, magma_stream >>>(dnorm+i, dA(0, i)); } /* Generate elementary reflector H(i) to annihilate A(i+1:m,i) 1. 1 is not yet put on the diagonal of A 2. Elements above the diagonal are copied in ddA and the ones in A are set to zero 3. update T */ magma_dlarfgx_gpu(m-i, dA(i, i), dA(min(i+1,m),i), dtau+i, dnorm+i, ddA + i + i*n, i); if (i == 0) { double tt = MAGMA_D_ONE; magmablas_dlacpy(MagmaUpperLower, 1, 1, dtau, 1, dT(0,0), 1); magma_dsetmatrix_async(1, 1, &tt, 1, dA(i, i), 1, magma_stream); } } if ( i-1 > 0 ) { magma_dgemv_kernel3<<< i-1, BLOCK_SIZE, 0, magma_stream >>> ( m-i+1, dA(i-1,0), ldda, dA(i-1, i-1), work, dtau+i-1); magma_dtrmv_kernel2<<< i-1, i-1, 0, magma_stream >>> ( dT(0,0), k, work, dT(0,i-1), dtau+i-1); } /* Apply the transformations to the trailing matrix. */ //magma_dlarfb2_gpu( MagmaLeft, MagmaConjTrans, MagmaForward, MagmaColumnwise, magma_dlarfb2_gpu( m-b, k-i, BS, dA(b, b), ldda, dT+b+b*k, k, dA(b, i), ldda, work, k-i); } magmablasSetKernelStream(cstream); return *info; } /* magma_dgeqr2 */
391ad023da33a7dca11ecb0f722df328db362419.hip
// !!! This is a file automatically generated by hipify!!! /* Copyright (c) Facebook, Inc. and its affiliates. This source code is licensed under the MIT license found in the LICENSE file in the root directory of this source tree. */ #include <ATen/ATen.h> #include <ATen/AccumulateType.h> #include <ATen/CUDAGeneratorImpl.h> #include <ATen/TensorUtils.h> #include <ATen/core/TensorAccessor.h> #include <ATen/hip/HIPContext.h> #include <ATen/hip/impl/HIPGuardImplMasqueradingAsCUDA.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <hiprand/hiprand_kernel.h> #include <THH/THHAtomics.cuh> #include <mutex> #include "cub/device/device_partition.cuh" #include "hipcub/hipcub.hpp" #include "tt_cuda_utils.cuh" #include "hashtbl_cuda_utils.cuh" using namespace at; namespace { constexpr int32_t MAX_PROBES = 3; enum { OPTIM_SGD = 0, OPTIM_ADAGRAD = 1, OPTIM_DENSE = 2, }; } // namespace inline void cuda_gemm_batched_fp32_fp32( hipblasOperation_t transa, hipblasOperation_t transb, int m, int n, int k, float* alpha, void** a_array, int lda, void** b_array, int ldb, float* beta, void** c_array, int ldc, int batch_count) { hipblasHandle_t handle = at::cuda::getCurrentCUDABlasHandle(); hipblasSetStream(handle, c10::hip::getCurrentHIPStreamMasqueradingAsCUDA()); hipblasGemmBatchedEx( handle, transa, transb, m, n, k, alpha, a_array, HIP_R_32F, lda, b_array, HIP_R_32F, ldb, beta, c_array, HIP_R_32F, ldc, batch_count, HIP_R_32F, HIPBLAS_GEMM_DEFAULT); } __global__ void init_batch_gemm_backward_2T_kernel( int32_t N, const int64_t* __restrict__ colidx, const int64_t* __restrict__ rowidx, const int64_t* __restrict__ L, PackedTensorAccessor32<float, 2, RestrictPtrTraits> tt_cores_0, PackedTensorAccessor32<float, 2, RestrictPtrTraits> tt_cores_1, PackedTensorAccessor32<float, 2, RestrictPtrTraits> tr_tt_cores_0, PackedTensorAccessor32<float, 2, RestrictPtrTraits> tr_tt_cores_1, PackedTensorAccessor32<float, 2, RestrictPtrTraits> d_output, int32_t* __restrict__ tt_idx, float** __restrict__ a0_ptr, float** __restrict__ b0_ptr, float** __restrict__ c0_ptr, float** __restrict__ a1_ptr, float** __restrict__ b1_ptr, float** __restrict__ c1_ptr) { int n = blockIdx.x * blockDim.x + threadIdx.x; if (n < N) { auto cidx = __ldg(&colidx[n]); auto ridx = __ldg(&rowidx[n]); int32_t tt_idx_0 = cidx / L[0]; cidx = cidx % L[0]; int32_t tt_idx_1 = cidx / L[1]; tt_idx[0 * N + n] = tt_idx_0; tt_idx[1 * N + n] = tt_idx_1; float* d_output_ptr = (float*)&(d_output[ridx][0]); a0_ptr[0 * N + n] = (float*)&(tt_cores_0[tt_idx_0][0]); b0_ptr[0 * N + n] = d_output_ptr; c0_ptr[0 * N + n] = (float*)&(tr_tt_cores_1[n][0]); a1_ptr[0 * N + n] = d_output_ptr; b1_ptr[0 * N + n] = (float*)&(tt_cores_1[tt_idx_1][0]); c1_ptr[0 * N + n] = (float*)&(tr_tt_cores_0[n][0]); } } __global__ void init_batch_gemm_backward_3T_kernel( int32_t N, const int64_t* __restrict__ colidx, const int64_t* __restrict__ rowidx, const int64_t* __restrict__ L, PackedTensorAccessor32<float, 2, RestrictPtrTraits> tt_cores_0, PackedTensorAccessor32<float, 2, RestrictPtrTraits> tt_cores_1, PackedTensorAccessor32<float, 2, RestrictPtrTraits> tt_cores_2, PackedTensorAccessor32<float, 2, RestrictPtrTraits> tr_tt_cores_0, PackedTensorAccessor32<float, 2, RestrictPtrTraits> tr_tt_cores_1, PackedTensorAccessor32<float, 2, RestrictPtrTraits> tr_tt_cores_2, PackedTensorAccessor32<float, 2, RestrictPtrTraits> tr_0, PackedTensorAccessor32<float, 2, RestrictPtrTraits> d_output, int32_t* __restrict__ tt_idx, float** __restrict__ a_ptr, float** __restrict__ b_ptr, float** __restrict__ c_ptr, float** __restrict__ a0_ptr, float** __restrict__ b0_ptr, float** __restrict__ c0_ptr, float** __restrict__ a1_ptr, float** __restrict__ b1_ptr, float** __restrict__ c1_ptr) { int n = blockIdx.x * blockDim.x + threadIdx.x; if (n < N) { auto cidx = __ldg(&colidx[n]); auto ridx = __ldg(&rowidx[n]); int32_t tt_idx_0 = cidx / L[0]; cidx = cidx % L[0]; int32_t tt_idx_1 = cidx / L[1]; cidx = cidx % L[1]; int32_t tt_idx_2 = cidx / L[2]; tt_idx[0 * N + n] = tt_idx_0; tt_idx[1 * N + n] = tt_idx_1; tt_idx[2 * N + n] = tt_idx_2; float* tr_0_ptr = (float*)&(tr_0[n][0]); float* d_output_ptr = (float*)&(d_output[ridx][0]); float* tt_cores_0_ptr = (float*)&(tt_cores_0[tt_idx_0][0]); float* tt_cores_1_ptr = (float*)&(tt_cores_1[tt_idx_1][0]); a_ptr[0 * N + n] = tt_cores_1_ptr; b_ptr[0 * N + n] = tt_cores_0_ptr; c_ptr[0 * N + n] = tr_0_ptr; a0_ptr[0 * N + n] = tt_cores_0_ptr; b0_ptr[0 * N + n] = tr_0_ptr; c0_ptr[0 * N + n] = (float*)&(tr_tt_cores_1[n][0]); a1_ptr[0 * N + n] = tr_0_ptr; b1_ptr[0 * N + n] = tt_cores_1_ptr; c1_ptr[0 * N + n] = (float*)&(tr_tt_cores_0[n][0]); a0_ptr[1 * N + n] = tr_0_ptr; b0_ptr[1 * N + n] = d_output_ptr; c0_ptr[1 * N + n] = (float*)&(tr_tt_cores_2[n][0]); a1_ptr[1 * N + n] = d_output_ptr; b1_ptr[1 * N + n] = (float*)&(tt_cores_2[tt_idx_2][0]); c1_ptr[1 * N + n] = tr_0_ptr; } } __global__ void init_batch_gemm_backward_4T_kernel( int32_t N, const int64_t* __restrict__ colidx, const int64_t* __restrict__ rowidx, const int64_t* __restrict__ L, PackedTensorAccessor32<float, 2, RestrictPtrTraits> tt_cores_0, PackedTensorAccessor32<float, 2, RestrictPtrTraits> tt_cores_1, PackedTensorAccessor32<float, 2, RestrictPtrTraits> tt_cores_2, PackedTensorAccessor32<float, 2, RestrictPtrTraits> tt_cores_3, PackedTensorAccessor32<float, 2, RestrictPtrTraits> tr_tt_cores_0, PackedTensorAccessor32<float, 2, RestrictPtrTraits> tr_tt_cores_1, PackedTensorAccessor32<float, 2, RestrictPtrTraits> tr_tt_cores_2, PackedTensorAccessor32<float, 2, RestrictPtrTraits> tr_tt_cores_3, PackedTensorAccessor32<float, 2, RestrictPtrTraits> tr_0, PackedTensorAccessor32<float, 2, RestrictPtrTraits> tr_1, PackedTensorAccessor32<float, 2, RestrictPtrTraits> d_output, int32_t* __restrict__ tt_idx, float** __restrict__ a_ptr, float** __restrict__ b_ptr, float** __restrict__ c_ptr, float** __restrict__ a0_ptr, float** __restrict__ b0_ptr, float** __restrict__ c0_ptr, float** __restrict__ a1_ptr, float** __restrict__ b1_ptr, float** __restrict__ c1_ptr) { int n = blockIdx.x * blockDim.x + threadIdx.x; if (n < N) { auto cidx = __ldg(&colidx[n]); auto ridx = __ldg(&rowidx[n]); int32_t tt_idx_0 = cidx / L[0]; cidx = cidx % L[0]; int32_t tt_idx_1 = cidx / L[1]; cidx = cidx % L[1]; int32_t tt_idx_2 = cidx / L[2]; cidx = cidx % L[2]; int32_t tt_idx_3 = cidx / L[3]; tt_idx[0 * N + n] = tt_idx_0; tt_idx[1 * N + n] = tt_idx_1; tt_idx[2 * N + n] = tt_idx_2; tt_idx[3 * N + n] = tt_idx_3; float* tr_0_ptr = (float*)&(tr_0[n][0]); float* tr_1_ptr = (float*)&(tr_1[n][0]); float* d_output_ptr = (float*)&(d_output[ridx][0]); float* tt_cores_0_ptr = (float*)&(tt_cores_0[tt_idx_0][0]); float* tt_cores_1_ptr = (float*)&(tt_cores_1[tt_idx_1][0]); float* tt_cores_2_ptr = (float*)&(tt_cores_2[tt_idx_2][0]); a_ptr[0 * N + n] = tt_cores_1_ptr; b_ptr[0 * N + n] = tt_cores_0_ptr; c_ptr[0 * N + n] = tr_0_ptr; a_ptr[1 * N + n] = tt_cores_2_ptr; b_ptr[1 * N + n] = tr_0_ptr; c_ptr[1 * N + n] = tr_1_ptr; a0_ptr[0 * N + n] = tt_cores_0_ptr; b0_ptr[0 * N + n] = tr_0_ptr; c0_ptr[0 * N + n] = (float*)&(tr_tt_cores_1[n][0]); a1_ptr[0 * N + n] = b0_ptr[0 * N + n]; b1_ptr[0 * N + n] = tt_cores_1_ptr; c1_ptr[0 * N + n] = (float*)&(tr_tt_cores_0[n][0]); a0_ptr[1 * N + n] = tr_0_ptr; b0_ptr[1 * N + n] = tr_1_ptr; c0_ptr[1 * N + n] = (float*)&(tr_tt_cores_2[n][0]); a1_ptr[1 * N + n] = b0_ptr[1 * N + n]; b1_ptr[1 * N + n] = tt_cores_2_ptr; c1_ptr[1 * N + n] = tr_0_ptr; a0_ptr[2 * N + n] = tr_1_ptr; b0_ptr[2 * N + n] = d_output_ptr; c0_ptr[2 * N + n] = (float*)&(tr_tt_cores_3[n][0]); a1_ptr[2 * N + n] = d_output_ptr; b1_ptr[2 * N + n] = (float*)&(tt_cores_3[tt_idx[3 * N + n]][0]); c1_ptr[2 * N + n] = tr_1_ptr; } } void init_batch_gemm_backward_cuda( int32_t T, int32_t N, const int64_t* __restrict__ colidx, const int64_t* __restrict__ rowidx, const int64_t* __restrict__ L, const std::vector<Tensor>& tt_cores, const std::vector<Tensor>& tr_tt_cores, const std::vector<Tensor>& tr, Tensor d_output, int32_t* __restrict__ tt_idx, float** __restrict__ a_ptr, float** __restrict__ b_ptr, float** __restrict__ c_ptr, float** __restrict__ a0_ptr, float** __restrict__ b0_ptr, float** __restrict__ c0_ptr, float** __restrict__ a1_ptr, float** __restrict__ b1_ptr, float** __restrict__ c1_ptr) { int32_t threads = (N > 256 ? 256 : 32); int32_t num_blocks = (N + threads - 1) / threads; if (T == 2) { hipLaunchKernelGGL(( init_batch_gemm_backward_2T_kernel), dim3(num_blocks), dim3(threads), 0, c10::hip::getCurrentHIPStreamMasqueradingAsCUDA(), N, colidx, rowidx, L, tt_cores[0].packed_accessor32<float, 2, RestrictPtrTraits>(), tt_cores[1].packed_accessor32<float, 2, RestrictPtrTraits>(), tr_tt_cores[0].packed_accessor32<float, 2, RestrictPtrTraits>(), tr_tt_cores[1].packed_accessor32<float, 2, RestrictPtrTraits>(), d_output.packed_accessor32<float, 2, RestrictPtrTraits>(), tt_idx, a0_ptr, b0_ptr, c0_ptr, a1_ptr, b1_ptr, c1_ptr); } else if (T == 3) { hipLaunchKernelGGL(( init_batch_gemm_backward_3T_kernel), dim3(num_blocks), dim3(threads), 0, c10::hip::getCurrentHIPStreamMasqueradingAsCUDA(), N, colidx, rowidx, L, tt_cores[0].packed_accessor32<float, 2, RestrictPtrTraits>(), tt_cores[1].packed_accessor32<float, 2, RestrictPtrTraits>(), tt_cores[2].packed_accessor32<float, 2, RestrictPtrTraits>(), tr_tt_cores[0].packed_accessor32<float, 2, RestrictPtrTraits>(), tr_tt_cores[1].packed_accessor32<float, 2, RestrictPtrTraits>(), tr_tt_cores[2].packed_accessor32<float, 2, RestrictPtrTraits>(), tr[0].packed_accessor32<float, 2, RestrictPtrTraits>(), d_output.packed_accessor32<float, 2, RestrictPtrTraits>(), tt_idx, a_ptr, b_ptr, c_ptr, a0_ptr, b0_ptr, c0_ptr, a1_ptr, b1_ptr, c1_ptr); } else if (T == 4) { hipLaunchKernelGGL(( init_batch_gemm_backward_4T_kernel), dim3(num_blocks), dim3(threads), 0, c10::hip::getCurrentHIPStreamMasqueradingAsCUDA(), N, colidx, rowidx, L, tt_cores[0].packed_accessor32<float, 2, RestrictPtrTraits>(), tt_cores[1].packed_accessor32<float, 2, RestrictPtrTraits>(), tt_cores[2].packed_accessor32<float, 2, RestrictPtrTraits>(), tt_cores[3].packed_accessor32<float, 2, RestrictPtrTraits>(), tr_tt_cores[0].packed_accessor32<float, 2, RestrictPtrTraits>(), tr_tt_cores[1].packed_accessor32<float, 2, RestrictPtrTraits>(), tr_tt_cores[2].packed_accessor32<float, 2, RestrictPtrTraits>(), tr_tt_cores[3].packed_accessor32<float, 2, RestrictPtrTraits>(), tr[0].packed_accessor32<float, 2, RestrictPtrTraits>(), tr[1].packed_accessor32<float, 2, RestrictPtrTraits>(), d_output.packed_accessor32<float, 2, RestrictPtrTraits>(), tt_idx, a_ptr, b_ptr, c_ptr, a0_ptr, b0_ptr, c0_ptr, a1_ptr, b1_ptr, c1_ptr); } } __global__ void update_d_tt_cores_kernel( int32_t N, int32_t D, const int32_t* __restrict__ tt_idx, PackedTensorAccessor32<float, 2, RestrictPtrTraits> tr_tt_cores, PackedTensorAccessor32<float, 2, RestrictPtrTraits> d_tt_cores) { int32_t n = blockIdx.x * blockDim.y + threadIdx.y; if (n < N) { auto idx = __ldg(&tt_idx[n]); for (int32_t d = threadIdx.x; d < D; d += blockDim.x) { atomicAdd(&(d_tt_cores[idx][d]), tr_tt_cores[n][d]); } } } __global__ void update_tt_cores_sgd_kernel( int32_t B, int32_t D, float learning_rate, PackedTensorAccessor32<float, 2, RestrictPtrTraits> d_tt_cores, PackedTensorAccessor32<float, 2, RestrictPtrTraits> tt_cores) { int32_t b = blockIdx.x * blockDim.y + threadIdx.y; if (b >= B) { return; } for (int32_t d = threadIdx.x; d < D; d += blockDim.x) { tt_cores[b][d] -= learning_rate * d_tt_cores[b][d]; } } __global__ void update_tt_cores_adagrad_kernel( int32_t B, int32_t D, float learning_rate, float eps, PackedTensorAccessor32<float, 2, RestrictPtrTraits> d_tt_cores, PackedTensorAccessor32<float, 2, RestrictPtrTraits> optimizer_state, PackedTensorAccessor32<float, 2, RestrictPtrTraits> tt_cores) { int32_t b = blockIdx.x * blockDim.y + threadIdx.y; if (b >= B) { return; } for (int32_t d = threadIdx.x; d < D; d += blockDim.x) { optimizer_state[b][d] += d_tt_cores[b][d] * d_tt_cores[b][d]; tt_cores[b][d] -= learning_rate * d_tt_cores[b][d] / (sqrt(optimizer_state[b][d]) + eps); } } std::vector<Tensor> tt_embeddings_backward_cuda( int32_t optim, int32_t batch_count, int32_t D, float learning_rate, float eps, const std::vector<int32_t>& tt_p_shapes, const std::vector<int32_t>& tt_q_shapes, const std::vector<int32_t>& tt_ranks, Tensor L, int32_t nnz, Tensor colidx, Tensor rowidx, Tensor d_output, c10::optional<std::vector<Tensor>> optimizer_state, std::vector<Tensor>& tt_cores) { at::hip::OptionalHIPGuardMasqueradingAsCUDA device_guard; device_guard.set_index(d_output.get_device()); int32_t T = tt_p_shapes.size(); std::vector<Tensor> d_tt_cores; std::vector<Tensor> tr_tt_cores; for (int32_t t = 0; t < T; ++t) { d_tt_cores.push_back(at::zeros_like(tt_cores[t])); tr_tt_cores.push_back( at::empty({batch_count, tt_cores[t].size(1)}, tt_cores[t].options())); } if (nnz == 0) { return d_tt_cores; } // batch gemm parameters std::vector<int32_t> m(T - 1); std::vector<int32_t> n(T - 1); std::vector<int32_t> k(T - 1); float alpha = 1.0; float beta = 0.0; int32_t m_ = tt_q_shapes[0]; for (int32_t t = 0; t < T - 1; ++t) { m[t] = m_; k[t] = tt_ranks[t + 1]; n[t] = tt_q_shapes[t + 1] * tt_ranks[t + 2]; m_ = m_ * tt_q_shapes[t + 1]; } // allocate the immediate buffers std::vector<Tensor> tr; int64_t tr_size = tt_q_shapes[0] * tt_ranks[1]; for (int32_t t = 0; t < T - 2; ++t) { tr_size = tr_size * tt_q_shapes[t + 1] * tt_ranks[t + 2] / tt_ranks[t + 1]; tr.push_back(at::empty({batch_count, tr_size}, tt_cores[0].options())); } auto tt_idx = at::empty({T * batch_count}, tt_cores[0].options().dtype(at::kInt)); auto a_ptr_tensor = at::empty( {(T - 2) * batch_count}, tt_cores[0].options().dtype(at::kLong)); auto b_ptr_tensor = at::empty( {(T - 2) * batch_count}, tt_cores[0].options().dtype(at::kLong)); auto c_ptr_tensor = at::empty( {(T - 2) * batch_count}, tt_cores[0].options().dtype(at::kLong)); float** a_ptr = (float**)a_ptr_tensor.data_ptr<int64_t>(); float** b_ptr = (float**)b_ptr_tensor.data_ptr<int64_t>(); float** c_ptr = (float**)c_ptr_tensor.data_ptr<int64_t>(); auto a0_ptr_tensor = at::empty( {(T - 1) * batch_count}, tt_cores[0].options().dtype(at::kLong)); auto b0_ptr_tensor = at::empty( {(T - 1) * batch_count}, tt_cores[0].options().dtype(at::kLong)); auto c0_ptr_tensor = at::empty( {(T - 1) * batch_count}, tt_cores[0].options().dtype(at::kLong)); float** a0_ptr = (float**)a0_ptr_tensor.data_ptr<int64_t>(); float** b0_ptr = (float**)b0_ptr_tensor.data_ptr<int64_t>(); float** c0_ptr = (float**)c0_ptr_tensor.data_ptr<int64_t>(); auto a1_ptr_tensor = at::empty( {(T - 1) * batch_count}, tt_cores[0].options().dtype(at::kLong)); auto b1_ptr_tensor = at::empty( {(T - 1) * batch_count}, tt_cores[0].options().dtype(at::kLong)); auto c1_ptr_tensor = at::empty( {(T - 1) * batch_count}, tt_cores[0].options().dtype(at::kLong)); float** a1_ptr = (float**)a1_ptr_tensor.data_ptr<int64_t>(); float** b1_ptr = (float**)b1_ptr_tensor.data_ptr<int64_t>(); float** c1_ptr = (float**)c1_ptr_tensor.data_ptr<int64_t>(); for (int32_t start_idx = 0; start_idx < nnz; start_idx += batch_count) { int32_t end_idx = start_idx + batch_count < nnz ? start_idx + batch_count : nnz; int32_t N = end_idx - start_idx; init_batch_gemm_backward_cuda( T, N, &(colidx.data_ptr<int64_t>()[start_idx]), &(rowidx.data_ptr<int64_t>()[start_idx]), L.data_ptr<int64_t>(), tt_cores, tr_tt_cores, tr, d_output, tt_idx.data_ptr<int32_t>(), a_ptr, b_ptr, c_ptr, a0_ptr, b0_ptr, c0_ptr, a1_ptr, b1_ptr, c1_ptr); // recompute forward for (int32_t t = 0; t < T - 2; ++t) { cuda_gemm_batched_fp32_fp32( HIPBLAS_OP_N, HIPBLAS_OP_N, n[t], m[t], k[t], &alpha, (void**)&(a_ptr[t * N]), n[t], (void**)&(b_ptr[t * N]), k[t], &beta, (void**)&(c_ptr[t * N]), n[t], N); } // for (int32_t t = 0; t < T - 2; ++t) // backward propagation for (int32_t t = T - 2; t >= 0; --t) { cuda_gemm_batched_fp32_fp32( HIPBLAS_OP_N, HIPBLAS_OP_T, n[t], k[t], m[t], &alpha, (void**)&(b0_ptr[t * N]), n[t], (void**)&(a0_ptr[t * N]), k[t], &beta, (void**)&(c0_ptr[t * N]), n[t], N); int32_t D_0 = tt_cores[t + 1].size(1); int32_t tx_0 = ::min(1024, D_0); int32_t ty_0 = 1024 / tx_0; hipLaunchKernelGGL(( update_d_tt_cores_kernel), dim3(div_round_up(N, ty_0)), dim3(dim3(tx_0, ty_0)), 0, c10::hip::getCurrentHIPStreamMasqueradingAsCUDA(), N, D_0, &(tt_idx.data_ptr<int32_t>()[(t + 1) * N]), tr_tt_cores[t + 1].packed_accessor32<float, 2, RestrictPtrTraits>(), d_tt_cores[t + 1].packed_accessor32<float, 2, RestrictPtrTraits>()); cuda_gemm_batched_fp32_fp32( HIPBLAS_OP_T, HIPBLAS_OP_N, k[t], m[t], n[t], &alpha, (void**)&(b1_ptr[t * N]), n[t], (void**)&(a1_ptr[t * N]), n[t], &beta, (void**)&(c1_ptr[t * N]), k[t], N); if (t == 0) { int32_t D_1 = tt_cores[0].size(1); int32_t tx_1 = ::min(1024, D_1); int32_t ty_1 = 1024 / tx_1; hipLaunchKernelGGL(( update_d_tt_cores_kernel), dim3(div_round_up(N, ty_1)), dim3(dim3(tx_1, ty_1)), 0, c10::hip::getCurrentHIPStreamMasqueradingAsCUDA(), N, D_1, &(tt_idx.data_ptr<int32_t>()[t * N]), tr_tt_cores[0].packed_accessor32<float, 2, RestrictPtrTraits>(), d_tt_cores[0].packed_accessor32<float, 2, RestrictPtrTraits>()); } } // for (int32_t t = T - 2; t >=0 ; --t) } // for (int32_t start_idx = 0; start_idx < nnz; start_idx += batch_count) if (optim == OPTIM_ADAGRAD) { for (int32_t t = 0; t < T; ++t) { int32_t y_size = tt_cores[t].size(0); int32_t x_size = tt_cores[t].size(1); int32_t tx = ::min(1024, y_size); int32_t ty = 1024 / tx; hipLaunchKernelGGL(( update_tt_cores_adagrad_kernel), dim3(div_round_up(x_size, ty)), dim3(dim3(tx, ty)), 0, c10::hip::getCurrentHIPStreamMasqueradingAsCUDA(), y_size, x_size, learning_rate, eps, d_tt_cores[t].packed_accessor32<float, 2, RestrictPtrTraits>(), (*optimizer_state)[t] .packed_accessor32<float, 2, RestrictPtrTraits>(), tt_cores[t].packed_accessor32<float, 2, RestrictPtrTraits>()); } } else if (optim == OPTIM_SGD) { for (int32_t t = 0; t < T; ++t) { int32_t y_size = tt_cores[t].size(0); int32_t x_size = tt_cores[t].size(1); int32_t tx = ::min(1024, y_size); int32_t ty = 1024 / tx; hipLaunchKernelGGL(( update_tt_cores_sgd_kernel), dim3(div_round_up(x_size, ty)), dim3(dim3(tx, ty)), 0, c10::hip::getCurrentHIPStreamMasqueradingAsCUDA(), y_size, x_size, learning_rate, d_tt_cores[t].packed_accessor32<float, 2, RestrictPtrTraits>(), tt_cores[t].packed_accessor32<float, 2, RestrictPtrTraits>()); } } return d_tt_cores; } std::vector<Tensor> tt_embeddings_backward_dense_cuda( int32_t batch_count, int32_t D, const std::vector<int32_t>& tt_p_shapes, const std::vector<int32_t>& tt_q_shapes, const std::vector<int32_t>& tt_ranks, Tensor L, int32_t nnz, Tensor colidx, Tensor rowidx, Tensor d_output, std::vector<Tensor>& tt_cores) { return tt_embeddings_backward_cuda( OPTIM_DENSE, batch_count, D, 0.0, 0.0, tt_p_shapes, tt_q_shapes, tt_ranks, L, nnz, colidx, rowidx, d_output, c10::nullopt, tt_cores); } void tt_embeddings_backward_sgd_cuda( int32_t batch_count, int32_t D, float learning_rate, const std::vector<int32_t>& tt_p_shapes, const std::vector<int32_t>& tt_q_shapes, const std::vector<int32_t>& tt_ranks, Tensor L, int32_t nnz, Tensor colidx, Tensor rowidx, Tensor d_output, std::vector<Tensor>& tt_cores) { tt_embeddings_backward_cuda( OPTIM_SGD, batch_count, D, learning_rate, 0.0, tt_p_shapes, tt_q_shapes, tt_ranks, L, nnz, colidx, rowidx, d_output, c10::nullopt, tt_cores); } void tt_embeddings_backward_adagrad_cuda( int32_t batch_count, int32_t D, float learning_rate, float eps, const std::vector<int32_t>& tt_p_shapes, const std::vector<int32_t>& tt_q_shapes, const std::vector<int32_t>& tt_ranks, Tensor L, int32_t nnz, Tensor colidx, Tensor rowidx, Tensor d_output, std::vector<Tensor>& optimizer_state, std::vector<Tensor>& tt_cores) { tt_embeddings_backward_cuda( OPTIM_ADAGRAD, batch_count, D, learning_rate, eps, tt_p_shapes, tt_q_shapes, tt_ranks, L, nnz, colidx, rowidx, d_output, optimizer_state, tt_cores); } __global__ void init_batch_gemm_forward_2T_kernel( int N, const int64_t* __restrict__ L, const int64_t* __restrict__ colidx, PackedTensorAccessor32<float, 2, RestrictPtrTraits> tt_cores_0, PackedTensorAccessor32<float, 2, RestrictPtrTraits> tt_cores_1, PackedTensorAccessor32<float, 2, RestrictPtrTraits> tr_0, float** __restrict__ a_ptr, float** __restrict__ b_ptr, float** __restrict__ c_ptr) { int32_t n = blockIdx.x * blockDim.x + threadIdx.x; if (n < N) { auto cidx = __ldg(&colidx[n]); auto tt_idx_0 = cidx / L[0]; cidx = cidx % L[0]; auto tt_idx_1 = cidx / L[1]; a_ptr[0 * N + n] = (float*)&(tt_cores_1[tt_idx_1][0]); b_ptr[0 * N + n] = (float*)&(tt_cores_0[tt_idx_0][0]); c_ptr[0 * N + n] = (float*)&(tr_0[n][0]); } } __global__ void init_batch_gemm_forward_3T_kernel( int N, const int64_t* __restrict__ L, const int64_t* __restrict__ colidx, PackedTensorAccessor32<float, 2, RestrictPtrTraits> tt_cores_0, PackedTensorAccessor32<float, 2, RestrictPtrTraits> tt_cores_1, PackedTensorAccessor32<float, 2, RestrictPtrTraits> tt_cores_2, PackedTensorAccessor32<float, 2, RestrictPtrTraits> tr_0, PackedTensorAccessor32<float, 2, RestrictPtrTraits> tr_1, float** __restrict__ a_ptr, float** __restrict__ b_ptr, float** __restrict__ c_ptr) { int32_t n = blockIdx.x * blockDim.x + threadIdx.x; if (n < N) { auto cidx = __ldg(&colidx[n]); auto tt_idx_0 = cidx / L[0]; cidx = cidx % L[0]; auto tt_idx_1 = cidx / L[1]; cidx = cidx % L[1]; auto tt_idx_2 = cidx / L[2]; float* tr_0_ptr = (float*)&(tr_0[n][0]); a_ptr[0 * N + n] = (float*)&(tt_cores_1[tt_idx_1][0]); b_ptr[0 * N + n] = (float*)&(tt_cores_0[tt_idx_0][0]); c_ptr[0 * N + n] = tr_0_ptr; a_ptr[1 * N + n] = (float*)&(tt_cores_2[tt_idx_2][0]); b_ptr[1 * N + n] = tr_0_ptr; c_ptr[1 * N + n] = (float*)&(tr_1[n][0]); } } __global__ void init_batch_gemm_forward_4T_kernel( int N, const int64_t* __restrict__ L, const int64_t* __restrict__ colidx, PackedTensorAccessor32<float, 2, RestrictPtrTraits> tt_cores_0, PackedTensorAccessor32<float, 2, RestrictPtrTraits> tt_cores_1, PackedTensorAccessor32<float, 2, RestrictPtrTraits> tt_cores_2, PackedTensorAccessor32<float, 2, RestrictPtrTraits> tt_cores_3, PackedTensorAccessor32<float, 2, RestrictPtrTraits> tr_0, PackedTensorAccessor32<float, 2, RestrictPtrTraits> tr_1, PackedTensorAccessor32<float, 2, RestrictPtrTraits> tr_2, float** __restrict__ a_ptr, float** __restrict__ b_ptr, float** __restrict__ c_ptr) { int32_t n = blockIdx.x * blockDim.x + threadIdx.x; if (n < N) { auto cidx = __ldg(&colidx[n]); auto tt_idx_0 = cidx / L[0]; cidx = cidx % L[0]; auto tt_idx_1 = cidx / L[1]; cidx = cidx % L[1]; auto tt_idx_2 = cidx / L[2]; cidx = cidx % L[2]; auto tt_idx_3 = cidx / L[3]; float* tr_0_ptr = (float*)&(tr_0[n][0]); float* tr_1_ptr = (float*)&(tr_1[n][0]); a_ptr[0 * N + n] = (float*)&(tt_cores_1[tt_idx_1][0]); b_ptr[0 * N + n] = (float*)&(tt_cores_0[tt_idx_0][0]); c_ptr[0 * N + n] = tr_0_ptr; a_ptr[1 * N + n] = (float*)&(tt_cores_2[tt_idx_2][0]); b_ptr[1 * N + n] = tr_0_ptr; c_ptr[1 * N + n] = tr_1_ptr; a_ptr[2 * N + n] = (float*)&(tt_cores_3[tt_idx_3][0]); b_ptr[2 * N + n] = tr_1_ptr; c_ptr[2 * N + n] = (float*)&(tr_2[n][0]); } } void init_batch_gemm_forward_cuda( int32_t T, int32_t N, const int64_t* __restrict__ L, const int64_t* __restrict__ colidx, const std::vector<Tensor>& tt_cores, const std::vector<Tensor>& tr, float** __restrict__ a_ptr, float** __restrict__ b_ptr, float** __restrict__ c_ptr) { int32_t threads = (N > 256 ? 256 : 32); int32_t num_blocks = (N + threads - 1) / threads; if (T == 2) { hipLaunchKernelGGL(( init_batch_gemm_forward_2T_kernel), dim3(num_blocks), dim3(threads), 0, c10::hip::getCurrentHIPStreamMasqueradingAsCUDA(), N, L, colidx, tt_cores[0].packed_accessor32<float, 2, RestrictPtrTraits>(), tt_cores[1].packed_accessor32<float, 2, RestrictPtrTraits>(), tr[0].packed_accessor32<float, 2, RestrictPtrTraits>(), a_ptr, b_ptr, c_ptr); } else if (T == 3) { hipLaunchKernelGGL(( init_batch_gemm_forward_3T_kernel), dim3(num_blocks), dim3(threads), 0, c10::hip::getCurrentHIPStreamMasqueradingAsCUDA(), N, L, colidx, tt_cores[0].packed_accessor32<float, 2, RestrictPtrTraits>(), tt_cores[1].packed_accessor32<float, 2, RestrictPtrTraits>(), tt_cores[2].packed_accessor32<float, 2, RestrictPtrTraits>(), tr[0].packed_accessor32<float, 2, RestrictPtrTraits>(), tr[1].packed_accessor32<float, 2, RestrictPtrTraits>(), a_ptr, b_ptr, c_ptr); } else if (T == 4) { hipLaunchKernelGGL(( init_batch_gemm_forward_4T_kernel), dim3(num_blocks), dim3(threads), 0, c10::hip::getCurrentHIPStreamMasqueradingAsCUDA(), N, L, colidx, tt_cores[0].packed_accessor32<float, 2, RestrictPtrTraits>(), tt_cores[1].packed_accessor32<float, 2, RestrictPtrTraits>(), tt_cores[2].packed_accessor32<float, 2, RestrictPtrTraits>(), tt_cores[3].packed_accessor32<float, 2, RestrictPtrTraits>(), tr[0].packed_accessor32<float, 2, RestrictPtrTraits>(), tr[1].packed_accessor32<float, 2, RestrictPtrTraits>(), tr[2].packed_accessor32<float, 2, RestrictPtrTraits>(), a_ptr, b_ptr, c_ptr); } } __global__ void reduce_output_kernel( int32_t N, int32_t D, const int64_t* __restrict__ rowidx, const float* __restrict__ tr_last, float* __restrict__ output) { int32_t indice_id = blockIdx.x * blockDim.y + threadIdx.y; if (indice_id >= N) { // don't have *warp* divergence since we launch full warps in blockDim.x, // so we can just exit this warp entirely. return; } // check if this warp is responsible for this whole segment. bool segment_start = (indice_id == 0 || rowidx[indice_id - 1] != rowidx[indice_id]); if (!segment_start) { // don't have *warp* divergence since we launch full warps in blockDim.x, // so we can just exit this warp entirely. return; } int64_t row_index = rowidx[indice_id]; // now, find the end of the segment (and thus the segment length `SL`). int32_t SL = 1; while (indice_id + SL < N && rowidx[indice_id + SL] == row_index) { SL += 1; } for (int32_t d = threadIdx.x; d * 4 < D; d += blockDim.x) { Vec4T<float> sum(&output[row_index * D + d * 4]); for (int32_t sl = 0; sl < SL; ++sl) { Vec4T<float> tr(&tr_last[(indice_id + sl) * D + d * 4]); sum.acc.x += tr.acc.x; sum.acc.y += tr.acc.y; sum.acc.z += tr.acc.z; sum.acc.w += tr.acc.w; } sum.store(&output[row_index * D + d * 4]); } } Tensor tt_embeddings_forward_cuda( int32_t batch_count, int32_t B, int32_t D, const std::vector<int>& tt_p_shapes, const std::vector<int>& tt_q_shapes, const std::vector<int>& tt_ranks, Tensor L, int32_t nnz, Tensor colidx, Tensor rowidx, const std::vector<Tensor>& tt_cores) { at::hip::OptionalHIPGuardMasqueradingAsCUDA device_guard; device_guard.set_index(rowidx.get_device()); int32_t T = tt_p_shapes.size(); auto output = at::zeros({B, D}, tt_cores[0].options().dtype(at::kFloat)); if (nnz == 0) { return output; } TORCH_CHECK(batch_count > 0); TORCH_CHECK(D > 0); TORCH_CHECK(D % 4 == 0); TORCH_CHECK(T > 0); // batch gemm parameters std::vector<int32_t> m(T - 1); std::vector<int32_t> n(T - 1); std::vector<int32_t> k(T - 1); float alpha = 1.0; float beta = 0.0; int32_t m_ = tt_q_shapes[0]; for (int32_t t = 0; t < T - 1; ++t) { m[t] = m_; k[t] = tt_ranks[t + 1]; n[t] = tt_q_shapes[t + 1] * tt_ranks[t + 2]; m_ = m_ * tt_q_shapes[t + 1]; } // allocate the immediate buffers std::vector<Tensor> tr; int32_t tr_size = tt_q_shapes[0] * tt_ranks[1]; for (int32_t t = 0; t < T - 1; ++t) { tr_size = tr_size * tt_q_shapes[t + 1] * tt_ranks[t + 2] / tt_ranks[t + 1]; tr.push_back(at::empty( {batch_count, tr_size}, tt_cores[0].options().dtype(at::kFloat))); } auto a_ptr_tensor = at::empty( {(T - 1) * batch_count}, tt_cores[0].options().dtype(at::kLong)); auto b_ptr_tensor = at::empty( {(T - 1) * batch_count}, tt_cores[0].options().dtype(at::kLong)); auto c_ptr_tensor = at::empty( {(T - 1) * batch_count}, tt_cores[0].options().dtype(at::kLong)); float** a_ptr = (float**)a_ptr_tensor.data_ptr<int64_t>(); float** b_ptr = (float**)b_ptr_tensor.data_ptr<int64_t>(); float** c_ptr = (float**)c_ptr_tensor.data_ptr<int64_t>(); for (int32_t start_idx = 0; start_idx < nnz; start_idx += batch_count) { int32_t end_idx = start_idx + batch_count < nnz ? start_idx + batch_count : nnz; int32_t N = end_idx - start_idx; init_batch_gemm_forward_cuda( T, N, L.data_ptr<int64_t>(), &(colidx.data_ptr<int64_t>()[start_idx]), tt_cores, tr, a_ptr, b_ptr, c_ptr); // batched GEMM for (int32_t t = 0; t < T - 1; ++t) { cuda_gemm_batched_fp32_fp32( HIPBLAS_OP_N, HIPBLAS_OP_N, n[t], m[t], k[t], &alpha, (void**)&(a_ptr[t * N]), n[t], (void**)&(b_ptr[t * N]), k[t], &beta, (void**)&(c_ptr[t * N]), n[t], N); } int32_t tx = kWarpSize; int32_t ty = 1024 / tx; dim3 threads(tx, ty); int32_t num_blocks = (N + ty - 1) / ty; hipLaunchKernelGGL(( reduce_output_kernel), dim3(num_blocks), dim3(threads), 0, c10::hip::getCurrentHIPStreamMasqueradingAsCUDA(), N, D, &(rowidx.data_ptr<int64_t>()[start_idx]), tr[T - 2].data_ptr<float>(), output.data_ptr<float>()); } // for (int start_idx = 0; start_idx < nnz; start_idx += batch_count) return output; } __global__ void update_cache_state_kernel( int N, const int64_t* __restrict__ colidx, int32_t hashtbl_size, int64_t* __restrict__ hashtbl, int64_t* __restrict__ cache_freq) { int32_t n = blockIdx.x * blockDim.x + threadIdx.x; if (n < N) { int64_t cidx = __ldg(&colidx[n]); hashtbl_insert<int64_t, int64_t, true>( cidx, 1, hashtbl_size, MAX_PROBES, hashtbl, cache_freq); } } void update_cache_state_cuda(Tensor colidx, Tensor hashtbl, Tensor cache_freq) { at::hip::OptionalHIPGuardMasqueradingAsCUDA device_guard; device_guard.set_index(colidx.get_device()); int32_t nnz = colidx.numel(); if (nnz == 0) { return; } TORCH_CHECK(hashtbl.numel() > 0); TORCH_CHECK(hashtbl.numel() == cache_freq.numel()); int32_t threads = (nnz > 256 ? 256 : 32); int32_t num_blocks = (nnz + threads - 1) / threads; hipLaunchKernelGGL(( update_cache_state_kernel), dim3(num_blocks), dim3(threads), 0, c10::hip::getCurrentHIPStreamMasqueradingAsCUDA(), nnz, colidx.data_ptr<int64_t>(), hashtbl.numel(), hashtbl.data_ptr<int64_t>(), cache_freq.data_ptr<int64_t>()); } __global__ void mark_popular_colidx_kernel( int32_t hashtbl_size, int32_t cache_size, int64_t* __restrict__ cache_freq_sorted_hashtbl, int64_t* __restrict__ hashtbl, int64_t* __restrict__ cache_freq, int32_t* __restrict__ cache_state) { int32_t n = blockIdx.x * blockDim.x + threadIdx.x; if (n >= hashtbl_size) { return; } if (cache_freq_sorted_hashtbl[n] != -1) { int32_t hashtbl_idx = hashtbl_find( cache_freq_sorted_hashtbl[n], hashtbl_size, MAX_PROBES, hashtbl); if (n < cache_size) { cache_state[hashtbl_idx] = n; } else { hashtbl[hashtbl_idx] = -1; cache_freq[hashtbl_idx] = 0; } } else if (n < cache_size) { // a hack to use batch gemm cache_freq_sorted_hashtbl[n] = 0; } } __global__ void copy_output_kernel( int32_t N, int32_t D, int32_t start_idx, const float* __restrict__ tr_last, float* __restrict__ output) { int32_t n = blockIdx.x * blockDim.y + threadIdx.y; if (n < N) { for (int32_t d = threadIdx.x; d * 4 < D; d += blockDim.x) { Vec4T<float> tr(&tr_last[n * D + d * 4]); tr.store(&output[(start_idx + n) * D + d * 4]); } } } void prefetch_cached_weights_cuda( int32_t batch_count, const std::vector<int>& tt_p_shapes, const std::vector<int>& tt_q_shapes, const std::vector<int>& tt_ranks, const std::vector<Tensor>& tt_cores, Tensor L, Tensor cache_freq_sorted_hashtbl, Tensor cache_weight) { at::hip::OptionalHIPGuardMasqueradingAsCUDA device_guard; device_guard.set_index(cache_weight.get_device()); int32_t nnz = cache_weight.size(0); if (nnz == 0) { return; } int32_t T = tt_p_shapes.size(); int32_t D = cache_weight.size(1); TORCH_CHECK(batch_count > 0); TORCH_CHECK(D > 0); TORCH_CHECK(D % 4 == 0); TORCH_CHECK(T > 0); // batch gemm parameters std::vector<int32_t> m(T - 1); std::vector<int32_t> n(T - 1); std::vector<int32_t> k(T - 1); float alpha = 1.0; float beta = 0.0; int32_t m_ = tt_q_shapes[0]; for (int32_t t = 0; t < T - 1; ++t) { m[t] = m_; k[t] = tt_ranks[t + 1]; n[t] = tt_q_shapes[t + 1] * tt_ranks[t + 2]; m_ = m_ * tt_q_shapes[t + 1]; } // allocate the immediate buffers std::vector<Tensor> tr; int32_t tr_size = tt_q_shapes[0] * tt_ranks[1]; for (int32_t t = 0; t < T - 1; ++t) { tr_size = tr_size * tt_q_shapes[t + 1] * tt_ranks[t + 2] / tt_ranks[t + 1]; tr.push_back(at::empty( {batch_count, tr_size}, tt_cores[0].options().dtype(at::kFloat))); } auto a_ptr_tensor = at::empty( {(T - 1) * batch_count}, tt_cores[0].options().dtype(at::kLong)); auto b_ptr_tensor = at::empty( {(T - 1) * batch_count}, tt_cores[0].options().dtype(at::kLong)); auto c_ptr_tensor = at::empty( {(T - 1) * batch_count}, tt_cores[0].options().dtype(at::kLong)); float** a_ptr = (float**)a_ptr_tensor.data_ptr<int64_t>(); float** b_ptr = (float**)b_ptr_tensor.data_ptr<int64_t>(); float** c_ptr = (float**)c_ptr_tensor.data_ptr<int64_t>(); for (int32_t start_idx = 0; start_idx < nnz; start_idx += batch_count) { int32_t end_idx = start_idx + batch_count < nnz ? start_idx + batch_count : nnz; int32_t N = end_idx - start_idx; init_batch_gemm_forward_cuda( T, N, L.data_ptr<int64_t>(), &(cache_freq_sorted_hashtbl.data_ptr<int64_t>()[start_idx]), tt_cores, tr, a_ptr, b_ptr, c_ptr); // batched GEMM for (int32_t t = 0; t < T - 1; ++t) { cuda_gemm_batched_fp32_fp32( HIPBLAS_OP_N, HIPBLAS_OP_N, n[t], m[t], k[t], &alpha, (void**)&(a_ptr[t * N]), n[t], (void**)&(b_ptr[t * N]), k[t], &beta, (void**)&(c_ptr[t * N]), n[t], N); } int32_t tx = ::min(1024, D / 4); int32_t ty = 1024 / tx; dim3 threads(tx, ty); int32_t num_blocks = (N + ty - 1) / ty; hipLaunchKernelGGL(( copy_output_kernel), dim3(num_blocks), dim3(threads), 0, c10::hip::getCurrentHIPStreamMasqueradingAsCUDA(), N, D, start_idx, tr[T - 2].data_ptr<float>(), cache_weight.data_ptr<float>()); } // for (int start_idx = 0; start_idx < nnz; start_idx += batch_count) } void cache_populate_cuda( int64_t num_embeddings, const std::vector<int>& tt_p_shapes, const std::vector<int>& tt_q_shapes, const std::vector<int>& tt_ranks, const std::vector<Tensor>& tt_cores, Tensor L, Tensor hashtbl, Tensor cache_freq, Tensor cache_state, Tensor cache_weight) { TORCH_CHECK(hashtbl.numel() > 0); TORCH_CHECK(hashtbl.numel() == cache_freq.numel()); TORCH_CHECK(cache_freq.numel() < std::numeric_limits<int32_t>::max()); TORCH_CHECK(hashtbl.numel() >= cache_weight.size(0)); auto cache_freq_sorted_hashtbl = empty_like(hashtbl); // Sort hash_table by cache_freq { auto sorted_cache_freq = empty_like(cache_freq); size_t temp_storage_bytes = 0; AT_CUDA_CHECK(hipcub::DeviceRadixSort::SortPairsDescending( nullptr, temp_storage_bytes, cache_freq.data_ptr<int64_t>(), sorted_cache_freq.data_ptr<int64_t>(), hashtbl.data_ptr<int64_t>(), cache_freq_sorted_hashtbl.data_ptr<int64_t>(), cache_freq.numel(), 0, sizeof(int64_t) * 8, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), false)); auto temp_storage = at::empty( {static_cast<int64_t>(temp_storage_bytes)}, hashtbl.options().dtype(kByte)); AT_CUDA_CHECK(hipcub::DeviceRadixSort::SortPairsDescending( temp_storage.data_ptr(), temp_storage_bytes, cache_freq.data_ptr<int64_t>(), sorted_cache_freq.data_ptr<int64_t>(), hashtbl.data_ptr<int64_t>(), cache_freq_sorted_hashtbl.data_ptr<int64_t>(), cache_freq.numel(), 0, sizeof(int64_t) * 8, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), false)); } // Mark popular colidx int32_t hashtbl_size = hashtbl.numel(); int32_t threads = 256; int32_t num_blocks = (hashtbl_size + threads - 1) / threads; hipLaunchKernelGGL(( mark_popular_colidx_kernel), dim3(num_blocks), dim3(threads), 0, c10::hip::getCurrentHIPStreamMasqueradingAsCUDA(), hashtbl_size, cache_weight.size(0), cache_freq_sorted_hashtbl.data_ptr<int64_t>(), hashtbl.data_ptr<int64_t>(), cache_freq.data_ptr<int64_t>(), cache_state.data_ptr<int32_t>()); int32_t batch_count = 200; prefetch_cached_weights_cuda( batch_count, tt_p_shapes, tt_q_shapes, tt_ranks, tt_cores, L, cache_freq_sorted_hashtbl, cache_weight); } __global__ void compute_rowidx_kernel( int32_t B, const int64_t* __restrict__ offsets, int64_t* __restrict__ rowidx) { int32_t b = blockIdx.x * blockDim.y + threadIdx.y; if (b < B) { int64_t colidx_start = offsets[b]; int64_t colidx_end = offsets[b + 1]; int32_t L = colidx_end - colidx_start; for (int32_t l = threadIdx.x; l < L; l += blockDim.x) { rowidx[l + colidx_start] = b; } } } __global__ void cache_lookup_kernel( int32_t N, const int64_t* __restrict__ colidx, int32_t hashtbl_size, const int64_t* __restrict__ hashtbl, const int32_t* __restrict__ cache_state, bool* __restrict__ is_tt, int32_t* __restrict__ cache_location) { int32_t n = blockIdx.x * blockDim.x + threadIdx.x; if (n < N) { int32_t hashtbl_idx = hashtbl_find(colidx[n], hashtbl_size, MAX_PROBES, hashtbl); if (hashtbl_idx != -1 && cache_state[hashtbl_idx] != -1) { is_tt[n] = false; cache_location[n] = cache_state[hashtbl_idx]; } else { is_tt[n] = true; } } } std::tuple<Tensor, Tensor, int32_t, c10::optional<Tensor>> preprocess_indices_sync_cuda( Tensor colidx, Tensor offsets, bool warmup, Tensor hashtbl, Tensor cache_state) { at::hip::OptionalHIPGuardMasqueradingAsCUDA device_guard; device_guard.set_index(colidx.get_device()); auto rowidx = empty_like(colidx); if (rowidx.numel() == 0) { return {colidx, rowidx, rowidx.numel(), c10::nullopt}; } int32_t B = offsets.numel() - 1; int32_t N = colidx.numel(); int32_t tx = 8; int32_t ty = 32; hipLaunchKernelGGL(( compute_rowidx_kernel), dim3(div_round_up(B, ty)), dim3(dim3(tx, ty)), 0, c10::hip::getCurrentHIPStreamMasqueradingAsCUDA(), B, offsets.data_ptr<int64_t>(), rowidx.data_ptr<int64_t>()); if (warmup) { return {colidx, rowidx, rowidx.numel(), c10::nullopt}; } else { auto partitioned_colidx = empty_like(colidx); auto partitioned_rowidx = empty_like(rowidx); auto num_tt_indices = zeros({1}, rowidx.options().dtype(kInt)); auto cache_locations = empty_like(rowidx, rowidx.options().dtype(kInt)); auto partitioned_cache_locations = empty_like(rowidx, rowidx.options().dtype(kInt)); { auto is_tt = empty_like(rowidx, rowidx.options().dtype(kBool)); int32_t threads = 256; int32_t num_blocks = div_round_up(N, threads); hipLaunchKernelGGL(( cache_lookup_kernel), dim3(num_blocks), dim3(threads), 0, c10::hip::getCurrentHIPStreamMasqueradingAsCUDA(), N, colidx.data_ptr<int64_t>(), hashtbl.numel(), hashtbl.data_ptr<int64_t>(), cache_state.data_ptr<int32_t>(), is_tt.data_ptr<bool>(), cache_locations.data_ptr<int32_t>()); size_t temp_storage_bytes = 0; AT_CUDA_CHECK(cub::DevicePartition::Flagged( nullptr, temp_storage_bytes, rowidx.data_ptr<int64_t>(), is_tt.data_ptr<bool>(), partitioned_rowidx.data_ptr<int64_t>(), num_tt_indices.data_ptr<int32_t>(), rowidx.numel(), at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), false)); auto temp_storage = at::empty( {static_cast<int64_t>(temp_storage_bytes)}, hashtbl.options().dtype(kByte)); AT_CUDA_CHECK(cub::DevicePartition::Flagged( temp_storage.data_ptr(), temp_storage_bytes, rowidx.data_ptr<int64_t>(), is_tt.data_ptr<bool>(), partitioned_rowidx.data_ptr<int64_t>(), num_tt_indices.data_ptr<int32_t>(), rowidx.numel(), at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), false)); AT_CUDA_CHECK(cub::DevicePartition::Flagged( temp_storage.data_ptr(), temp_storage_bytes, colidx.data_ptr<int64_t>(), is_tt.data_ptr<bool>(), partitioned_colidx.data_ptr<int64_t>(), num_tt_indices.data_ptr<int32_t>(), colidx.numel(), at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), false)); AT_CUDA_CHECK(cub::DevicePartition::Flagged( temp_storage.data_ptr(), temp_storage_bytes, cache_locations.data_ptr<int32_t>(), is_tt.data_ptr<bool>(), partitioned_cache_locations.data_ptr<int32_t>(), num_tt_indices.data_ptr<int32_t>(), cache_locations.numel(), at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), false)); } int32_t N_tt_indices; hipMemcpyAsync( &N_tt_indices, num_tt_indices.data_ptr<int32_t>(), sizeof(int32_t), hipMemcpyDeviceToHost, at::hip::getCurrentHIPStreamMasqueradingAsCUDA()); hipStreamSynchronize(at::hip::getCurrentHIPStreamMasqueradingAsCUDA()); return {partitioned_colidx, partitioned_rowidx, N_tt_indices, partitioned_cache_locations}; } } __global__ void cache_forward_kernel( int32_t nnz, int32_t D, const int64_t* __restrict__ rowidx, const int32_t* __restrict__ cache_locations, const float* __restrict__ cache_weight, float* __restrict__ output) { int32_t indice_id = blockIdx.x * blockDim.y + threadIdx.y; if (indice_id >= nnz) { // don't have *warp* divergence since we launch full warps in blockDim.x, // so we can just exit this warp entirely. return; } // check if this warp is responsible for this whole segment. bool segment_start = (indice_id == 0 || rowidx[indice_id - 1] != rowidx[indice_id]); if (!segment_start) { // don't have *warp* divergence since we launch full warps in blockDim.x, // so we can just exit this warp entirely. return; } int64_t row_index = rowidx[indice_id]; // now, find the end of the segment (and thus the segment length `SL`). int32_t SL = 1; while (indice_id + SL < nnz && rowidx[indice_id + SL] == row_index) { SL += 1; } for (int32_t d = threadIdx.x; d * 4 < D; d += blockDim.x) { Vec4T<float> sum(&output[row_index * D + d * 4]); for (int32_t sl = 0; sl < SL; ++sl) { int32_t idx = __ldg(&cache_locations[indice_id + sl]); Vec4T<float> weight(&cache_weight[idx * D + d * 4]); sum.acc.x += weight.acc.x; sum.acc.y += weight.acc.y; sum.acc.z += weight.acc.z; sum.acc.w += weight.acc.w; } sum.store(&output[row_index * D + d * 4]); } } void cache_forward_cuda( int32_t B, int32_t nnz, Tensor cache_locations, Tensor rowidx, Tensor cache_weight, Tensor output) { at::hip::OptionalHIPGuardMasqueradingAsCUDA device_guard; device_guard.set_index(rowidx.get_device()); TORCH_CHECK(B > 0); int32_t D = cache_weight.size(1); TORCH_CHECK(D > 0); TORCH_CHECK(D % 4 == 0); if (nnz == 0) { return; } int32_t tx = kWarpSize; int32_t ty = 1024 / tx; dim3 threads(tx, ty); int32_t num_blocks = (nnz + ty - 1) / ty; hipLaunchKernelGGL(( cache_forward_kernel), dim3(num_blocks), dim3(threads), 0, c10::hip::getCurrentHIPStreamMasqueradingAsCUDA(), nnz, D, rowidx.data_ptr<int64_t>(), cache_locations.data_ptr<int32_t>(), cache_weight.data_ptr<float>(), output.data_ptr<float>()); } __global__ void cache_backward_sgd_kernel( int32_t nnz, int32_t D, const float* __restrict__ grad_output, const int32_t* __restrict__ cache_locations, const int64_t* __restrict__ rowidx, float learning_rate, float* __restrict__ cache_weight) { int32_t indice_id = blockIdx.x * blockDim.y + threadIdx.y; if (indice_id >= nnz) { // don't have *warp* divergence since we launch full warps in blockDim.x, // so we can just exit this warp entirely. return; } // check if this warp is responsible for this whole segment. bool segment_start = (indice_id == 0 || rowidx[indice_id - 1] != rowidx[indice_id]); if (!segment_start) { // don't have *warp* divergence since we launch full warps in blockDim.x, // so we can just exit this warp entirely. return; } int64_t row_index = rowidx[indice_id]; // now, find the end of the segment (and thus the segment length `SL`). int32_t SL = 1; while (indice_id + SL < nnz && rowidx[indice_id + SL] == row_index) { SL += 1; } for (int32_t sl = 0; sl < SL; ++sl) { int32_t idx = __ldg(&cache_locations[indice_id + sl]); for (int32_t d = threadIdx.x; d * 4 < D; d += blockDim.x) { Vec4T<acc_type<float, true>> grad_out_vec( &grad_output[row_index * D + d * 4]); gpuAtomicAdd( &cache_weight[idx * D + d * 4 + 0], -grad_out_vec.acc.x * learning_rate); gpuAtomicAdd( &cache_weight[idx * D + d * 4 + 1], -grad_out_vec.acc.y * learning_rate); gpuAtomicAdd( &cache_weight[idx * D + d * 4 + 2], -grad_out_vec.acc.z * learning_rate); gpuAtomicAdd( &cache_weight[idx * D + d * 4 + 3], -grad_out_vec.acc.w * learning_rate); } } } void cache_backward_sgd_cuda( int32_t nnz, Tensor grad_output, Tensor cache_locations, Tensor rowidx, float learning_rate, Tensor cache_weight) { at::hip::OptionalHIPGuardMasqueradingAsCUDA device_guard; device_guard.set_index(cache_weight.get_device()); if (nnz == 0) { return; } const auto D = cache_weight.size(1); TORCH_CHECK(D > 0); TORCH_CHECK(D % 4 == 0); int32_t tx = kWarpSize; int32_t ty = 1024 / tx; dim3 threads(tx, ty); int32_t num_blocks = div_round_up(nnz, ty); hipLaunchKernelGGL(( cache_backward_sgd_kernel), dim3(num_blocks), dim3(threads), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), nnz, D, grad_output.data_ptr<float>(), cache_locations.data_ptr<int32_t>(), rowidx.data_ptr<int64_t>(), learning_rate, cache_weight.data_ptr<float>()); AT_CUDA_CHECK(hipGetLastError()); return; } __global__ void cache_backward_dense_kernel( int32_t nnz, int32_t D, const float* __restrict__ grad_output, const int32_t* __restrict__ cache_locations, const int64_t* __restrict__ rowidx, float* __restrict__ grad_cache_weight) { int32_t indice_id = blockIdx.x * blockDim.y + threadIdx.y; if (indice_id >= nnz) { // don't have *warp* divergence since we launch full warps in blockDim.x, // so we can just exit this warp entirely. return; } // check if this warp is responsible for this whole segment. bool segment_start = (indice_id == 0 || rowidx[indice_id - 1] != rowidx[indice_id]); if (!segment_start) { // don't have *warp* divergence since we launch full warps in blockDim.x, // so we can just exit this warp entirely. return; } int64_t row_index = rowidx[indice_id]; // now, find the end of the segment (and thus the segment length `SL`). int32_t SL = 1; while (indice_id + SL < nnz && rowidx[indice_id + SL] == row_index) { SL += 1; } for (int32_t sl = 0; sl < SL; ++sl) { int32_t idx = __ldg(&cache_locations[indice_id + sl]); for (int32_t d = threadIdx.x; d * 4 < D; d += blockDim.x) { Vec4T<acc_type<float, true>> grad_out_vec( &grad_output[row_index * D + d * 4]); gpuAtomicAdd(&grad_cache_weight[idx * D + d * 4 + 0], grad_out_vec.acc.x); gpuAtomicAdd(&grad_cache_weight[idx * D + d * 4 + 1], grad_out_vec.acc.y); gpuAtomicAdd(&grad_cache_weight[idx * D + d * 4 + 2], grad_out_vec.acc.z); gpuAtomicAdd(&grad_cache_weight[idx * D + d * 4 + 3], grad_out_vec.acc.w); } } } Tensor cache_backward_dense_cuda( int32_t nnz, Tensor grad_output, Tensor cache_locations, Tensor rowidx, float learning_rate, Tensor cache_weight) { at::hip::OptionalHIPGuardMasqueradingAsCUDA device_guard; device_guard.set_index(cache_weight.get_device()); auto grad_cache_weight = zeros_like(cache_weight); if (nnz == 0) { return grad_cache_weight; } const auto D = cache_weight.size(1); TORCH_CHECK(D > 0); TORCH_CHECK(D % 4 == 0); int32_t tx = kWarpSize; int32_t ty = 1024 / tx; dim3 threads(tx, ty); int32_t num_blocks = div_round_up(nnz, ty); hipLaunchKernelGGL(( cache_backward_dense_kernel), dim3(num_blocks), dim3(threads), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), nnz, D, grad_output.data_ptr<float>(), cache_locations.data_ptr<int32_t>(), rowidx.data_ptr<int64_t>(), grad_cache_weight.data_ptr<float>()); AT_CUDA_CHECK(hipGetLastError()); return grad_cache_weight; } __global__ void cache_backward_rowwise_adagrad_approx_kernel( int32_t nnz, int32_t D, const float* __restrict__ grad_output, const int32_t* __restrict__ cache_locations, const int64_t* __restrict__ rowidx, float learning_rate, float eps, float* __restrict__ cache_optimizer_state, float* __restrict__ cache_weight) { int32_t indice_id = blockIdx.x * blockDim.y + threadIdx.y; if (indice_id >= nnz) { // don't have *warp* divergence since we launch full warps in blockDim.x, // so we can just exit this warp entirely. return; } // check if this warp is responsible for this whole segment. bool segment_start = (indice_id == 0 || rowidx[indice_id - 1] != rowidx[indice_id]); if (!segment_start) { // don't have *warp* divergence since we launch full warps in blockDim.x, // so we can just exit this warp entirely. return; } int64_t row_index = rowidx[indice_id]; // now, find the end of the segment (and thus the segment length `SL`). int32_t SL = 1; while (indice_id + SL < nnz && rowidx[indice_id + SL] == row_index) { SL += 1; } float g_local_sum_square = 0.0; for (int32_t d = threadIdx.x; d * 4 < D; d += blockDim.x) { Vec4T<float> grad_out_vec(&grad_output[row_index * D + d * 4]); g_local_sum_square += grad_out_vec.acc.x * grad_out_vec.acc.x + grad_out_vec.acc.y * grad_out_vec.acc.y + grad_out_vec.acc.z * grad_out_vec.acc.z + grad_out_vec.acc.w * grad_out_vec.acc.w; } float g_avg_square = warpReduceAllSum<float>(g_local_sum_square) / D; for (int32_t sl = 0; sl < SL; ++sl) { auto idx = __ldg(&cache_locations[indice_id + sl]); float multiplier; if (threadIdx.x == 0) { float old_sum_square_grads = gpuAtomicAdd(&cache_optimizer_state[idx], g_avg_square); multiplier = learning_rate * (1.0 / (sqrt(old_sum_square_grads + g_avg_square) + eps)); } multiplier = __shfl_sync(0xFFFFFFFF, multiplier, 0); for (int32_t d = threadIdx.x; d * 4 < D; d += blockDim.x) { Vec4T<float> grad_out_vec(&grad_output[row_index * D + d * 4]); Vec4T<float> weight_new(&cache_weight[idx * D + d * 4]); weight_new.acc.x -= grad_out_vec.acc.x * multiplier; weight_new.acc.y -= grad_out_vec.acc.y * multiplier; weight_new.acc.z -= grad_out_vec.acc.z * multiplier; weight_new.acc.w -= grad_out_vec.acc.w * multiplier; weight_new.store(&cache_weight[idx * D + d * 4]); } } } void cache_backward_rowwise_adagrad_approx_cuda( int32_t nnz, Tensor grad_output, Tensor cache_locations, Tensor rowidx, float learning_rate, float eps, Tensor cache_optimizer_state, Tensor cache_weight) { at::hip::OptionalHIPGuardMasqueradingAsCUDA device_guard; device_guard.set_index(cache_weight.get_device()); if (nnz == 0) { return; } const auto D = cache_weight.size(1); TORCH_CHECK(D > 0); TORCH_CHECK(D % 4 == 0); int32_t tx = kWarpSize; int32_t ty = 1024 / tx; dim3 threads(tx, ty); int32_t num_blocks = div_round_up(nnz, ty); hipLaunchKernelGGL(( cache_backward_rowwise_adagrad_approx_kernel), dim3(num_blocks), dim3(threads), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), nnz, D, grad_output.data_ptr<float>(), cache_locations.data_ptr<int32_t>(), rowidx.data_ptr<int64_t>(), learning_rate, eps, cache_optimizer_state.data_ptr<float>(), cache_weight.data_ptr<float>()); AT_CUDA_CHECK(hipGetLastError()); }
391ad023da33a7dca11ecb0f722df328db362419.cu
/* Copyright (c) Facebook, Inc. and its affiliates. This source code is licensed under the MIT license found in the LICENSE file in the root directory of this source tree. */ #include <ATen/ATen.h> #include <ATen/AccumulateType.h> #include <ATen/CUDAGeneratorImpl.h> #include <ATen/TensorUtils.h> #include <ATen/core/TensorAccessor.h> #include <ATen/cuda/CUDAContext.h> #include <c10/cuda/CUDAGuard.h> #include <cuda.h> #include <cuda_runtime.h> #include <curand_kernel.h> #include <THC/THCAtomics.cuh> #include <mutex> #include "cub/device/device_partition.cuh" #include "cub/device/device_radix_sort.cuh" #include "tt_cuda_utils.cuh" #include "hashtbl_cuda_utils.cuh" using namespace at; namespace { constexpr int32_t MAX_PROBES = 3; enum { OPTIM_SGD = 0, OPTIM_ADAGRAD = 1, OPTIM_DENSE = 2, }; } // namespace inline void cuda_gemm_batched_fp32_fp32( cublasOperation_t transa, cublasOperation_t transb, int m, int n, int k, float* alpha, void** a_array, int lda, void** b_array, int ldb, float* beta, void** c_array, int ldc, int batch_count) { cublasHandle_t handle = at::cuda::getCurrentCUDABlasHandle(); cublasSetStream(handle, c10::cuda::getCurrentCUDAStream()); cublasGemmBatchedEx( handle, transa, transb, m, n, k, alpha, a_array, CUDA_R_32F, lda, b_array, CUDA_R_32F, ldb, beta, c_array, CUDA_R_32F, ldc, batch_count, CUDA_R_32F, CUBLAS_GEMM_DEFAULT); } __global__ void init_batch_gemm_backward_2T_kernel( int32_t N, const int64_t* __restrict__ colidx, const int64_t* __restrict__ rowidx, const int64_t* __restrict__ L, PackedTensorAccessor32<float, 2, RestrictPtrTraits> tt_cores_0, PackedTensorAccessor32<float, 2, RestrictPtrTraits> tt_cores_1, PackedTensorAccessor32<float, 2, RestrictPtrTraits> tr_tt_cores_0, PackedTensorAccessor32<float, 2, RestrictPtrTraits> tr_tt_cores_1, PackedTensorAccessor32<float, 2, RestrictPtrTraits> d_output, int32_t* __restrict__ tt_idx, float** __restrict__ a0_ptr, float** __restrict__ b0_ptr, float** __restrict__ c0_ptr, float** __restrict__ a1_ptr, float** __restrict__ b1_ptr, float** __restrict__ c1_ptr) { int n = blockIdx.x * blockDim.x + threadIdx.x; if (n < N) { auto cidx = __ldg(&colidx[n]); auto ridx = __ldg(&rowidx[n]); int32_t tt_idx_0 = cidx / L[0]; cidx = cidx % L[0]; int32_t tt_idx_1 = cidx / L[1]; tt_idx[0 * N + n] = tt_idx_0; tt_idx[1 * N + n] = tt_idx_1; float* d_output_ptr = (float*)&(d_output[ridx][0]); a0_ptr[0 * N + n] = (float*)&(tt_cores_0[tt_idx_0][0]); b0_ptr[0 * N + n] = d_output_ptr; c0_ptr[0 * N + n] = (float*)&(tr_tt_cores_1[n][0]); a1_ptr[0 * N + n] = d_output_ptr; b1_ptr[0 * N + n] = (float*)&(tt_cores_1[tt_idx_1][0]); c1_ptr[0 * N + n] = (float*)&(tr_tt_cores_0[n][0]); } } __global__ void init_batch_gemm_backward_3T_kernel( int32_t N, const int64_t* __restrict__ colidx, const int64_t* __restrict__ rowidx, const int64_t* __restrict__ L, PackedTensorAccessor32<float, 2, RestrictPtrTraits> tt_cores_0, PackedTensorAccessor32<float, 2, RestrictPtrTraits> tt_cores_1, PackedTensorAccessor32<float, 2, RestrictPtrTraits> tt_cores_2, PackedTensorAccessor32<float, 2, RestrictPtrTraits> tr_tt_cores_0, PackedTensorAccessor32<float, 2, RestrictPtrTraits> tr_tt_cores_1, PackedTensorAccessor32<float, 2, RestrictPtrTraits> tr_tt_cores_2, PackedTensorAccessor32<float, 2, RestrictPtrTraits> tr_0, PackedTensorAccessor32<float, 2, RestrictPtrTraits> d_output, int32_t* __restrict__ tt_idx, float** __restrict__ a_ptr, float** __restrict__ b_ptr, float** __restrict__ c_ptr, float** __restrict__ a0_ptr, float** __restrict__ b0_ptr, float** __restrict__ c0_ptr, float** __restrict__ a1_ptr, float** __restrict__ b1_ptr, float** __restrict__ c1_ptr) { int n = blockIdx.x * blockDim.x + threadIdx.x; if (n < N) { auto cidx = __ldg(&colidx[n]); auto ridx = __ldg(&rowidx[n]); int32_t tt_idx_0 = cidx / L[0]; cidx = cidx % L[0]; int32_t tt_idx_1 = cidx / L[1]; cidx = cidx % L[1]; int32_t tt_idx_2 = cidx / L[2]; tt_idx[0 * N + n] = tt_idx_0; tt_idx[1 * N + n] = tt_idx_1; tt_idx[2 * N + n] = tt_idx_2; float* tr_0_ptr = (float*)&(tr_0[n][0]); float* d_output_ptr = (float*)&(d_output[ridx][0]); float* tt_cores_0_ptr = (float*)&(tt_cores_0[tt_idx_0][0]); float* tt_cores_1_ptr = (float*)&(tt_cores_1[tt_idx_1][0]); a_ptr[0 * N + n] = tt_cores_1_ptr; b_ptr[0 * N + n] = tt_cores_0_ptr; c_ptr[0 * N + n] = tr_0_ptr; a0_ptr[0 * N + n] = tt_cores_0_ptr; b0_ptr[0 * N + n] = tr_0_ptr; c0_ptr[0 * N + n] = (float*)&(tr_tt_cores_1[n][0]); a1_ptr[0 * N + n] = tr_0_ptr; b1_ptr[0 * N + n] = tt_cores_1_ptr; c1_ptr[0 * N + n] = (float*)&(tr_tt_cores_0[n][0]); a0_ptr[1 * N + n] = tr_0_ptr; b0_ptr[1 * N + n] = d_output_ptr; c0_ptr[1 * N + n] = (float*)&(tr_tt_cores_2[n][0]); a1_ptr[1 * N + n] = d_output_ptr; b1_ptr[1 * N + n] = (float*)&(tt_cores_2[tt_idx_2][0]); c1_ptr[1 * N + n] = tr_0_ptr; } } __global__ void init_batch_gemm_backward_4T_kernel( int32_t N, const int64_t* __restrict__ colidx, const int64_t* __restrict__ rowidx, const int64_t* __restrict__ L, PackedTensorAccessor32<float, 2, RestrictPtrTraits> tt_cores_0, PackedTensorAccessor32<float, 2, RestrictPtrTraits> tt_cores_1, PackedTensorAccessor32<float, 2, RestrictPtrTraits> tt_cores_2, PackedTensorAccessor32<float, 2, RestrictPtrTraits> tt_cores_3, PackedTensorAccessor32<float, 2, RestrictPtrTraits> tr_tt_cores_0, PackedTensorAccessor32<float, 2, RestrictPtrTraits> tr_tt_cores_1, PackedTensorAccessor32<float, 2, RestrictPtrTraits> tr_tt_cores_2, PackedTensorAccessor32<float, 2, RestrictPtrTraits> tr_tt_cores_3, PackedTensorAccessor32<float, 2, RestrictPtrTraits> tr_0, PackedTensorAccessor32<float, 2, RestrictPtrTraits> tr_1, PackedTensorAccessor32<float, 2, RestrictPtrTraits> d_output, int32_t* __restrict__ tt_idx, float** __restrict__ a_ptr, float** __restrict__ b_ptr, float** __restrict__ c_ptr, float** __restrict__ a0_ptr, float** __restrict__ b0_ptr, float** __restrict__ c0_ptr, float** __restrict__ a1_ptr, float** __restrict__ b1_ptr, float** __restrict__ c1_ptr) { int n = blockIdx.x * blockDim.x + threadIdx.x; if (n < N) { auto cidx = __ldg(&colidx[n]); auto ridx = __ldg(&rowidx[n]); int32_t tt_idx_0 = cidx / L[0]; cidx = cidx % L[0]; int32_t tt_idx_1 = cidx / L[1]; cidx = cidx % L[1]; int32_t tt_idx_2 = cidx / L[2]; cidx = cidx % L[2]; int32_t tt_idx_3 = cidx / L[3]; tt_idx[0 * N + n] = tt_idx_0; tt_idx[1 * N + n] = tt_idx_1; tt_idx[2 * N + n] = tt_idx_2; tt_idx[3 * N + n] = tt_idx_3; float* tr_0_ptr = (float*)&(tr_0[n][0]); float* tr_1_ptr = (float*)&(tr_1[n][0]); float* d_output_ptr = (float*)&(d_output[ridx][0]); float* tt_cores_0_ptr = (float*)&(tt_cores_0[tt_idx_0][0]); float* tt_cores_1_ptr = (float*)&(tt_cores_1[tt_idx_1][0]); float* tt_cores_2_ptr = (float*)&(tt_cores_2[tt_idx_2][0]); a_ptr[0 * N + n] = tt_cores_1_ptr; b_ptr[0 * N + n] = tt_cores_0_ptr; c_ptr[0 * N + n] = tr_0_ptr; a_ptr[1 * N + n] = tt_cores_2_ptr; b_ptr[1 * N + n] = tr_0_ptr; c_ptr[1 * N + n] = tr_1_ptr; a0_ptr[0 * N + n] = tt_cores_0_ptr; b0_ptr[0 * N + n] = tr_0_ptr; c0_ptr[0 * N + n] = (float*)&(tr_tt_cores_1[n][0]); a1_ptr[0 * N + n] = b0_ptr[0 * N + n]; b1_ptr[0 * N + n] = tt_cores_1_ptr; c1_ptr[0 * N + n] = (float*)&(tr_tt_cores_0[n][0]); a0_ptr[1 * N + n] = tr_0_ptr; b0_ptr[1 * N + n] = tr_1_ptr; c0_ptr[1 * N + n] = (float*)&(tr_tt_cores_2[n][0]); a1_ptr[1 * N + n] = b0_ptr[1 * N + n]; b1_ptr[1 * N + n] = tt_cores_2_ptr; c1_ptr[1 * N + n] = tr_0_ptr; a0_ptr[2 * N + n] = tr_1_ptr; b0_ptr[2 * N + n] = d_output_ptr; c0_ptr[2 * N + n] = (float*)&(tr_tt_cores_3[n][0]); a1_ptr[2 * N + n] = d_output_ptr; b1_ptr[2 * N + n] = (float*)&(tt_cores_3[tt_idx[3 * N + n]][0]); c1_ptr[2 * N + n] = tr_1_ptr; } } void init_batch_gemm_backward_cuda( int32_t T, int32_t N, const int64_t* __restrict__ colidx, const int64_t* __restrict__ rowidx, const int64_t* __restrict__ L, const std::vector<Tensor>& tt_cores, const std::vector<Tensor>& tr_tt_cores, const std::vector<Tensor>& tr, Tensor d_output, int32_t* __restrict__ tt_idx, float** __restrict__ a_ptr, float** __restrict__ b_ptr, float** __restrict__ c_ptr, float** __restrict__ a0_ptr, float** __restrict__ b0_ptr, float** __restrict__ c0_ptr, float** __restrict__ a1_ptr, float** __restrict__ b1_ptr, float** __restrict__ c1_ptr) { int32_t threads = (N > 256 ? 256 : 32); int32_t num_blocks = (N + threads - 1) / threads; if (T == 2) { init_batch_gemm_backward_2T_kernel<<< num_blocks, threads, 0, c10::cuda::getCurrentCUDAStream()>>>( N, colidx, rowidx, L, tt_cores[0].packed_accessor32<float, 2, RestrictPtrTraits>(), tt_cores[1].packed_accessor32<float, 2, RestrictPtrTraits>(), tr_tt_cores[0].packed_accessor32<float, 2, RestrictPtrTraits>(), tr_tt_cores[1].packed_accessor32<float, 2, RestrictPtrTraits>(), d_output.packed_accessor32<float, 2, RestrictPtrTraits>(), tt_idx, a0_ptr, b0_ptr, c0_ptr, a1_ptr, b1_ptr, c1_ptr); } else if (T == 3) { init_batch_gemm_backward_3T_kernel<<< num_blocks, threads, 0, c10::cuda::getCurrentCUDAStream()>>>( N, colidx, rowidx, L, tt_cores[0].packed_accessor32<float, 2, RestrictPtrTraits>(), tt_cores[1].packed_accessor32<float, 2, RestrictPtrTraits>(), tt_cores[2].packed_accessor32<float, 2, RestrictPtrTraits>(), tr_tt_cores[0].packed_accessor32<float, 2, RestrictPtrTraits>(), tr_tt_cores[1].packed_accessor32<float, 2, RestrictPtrTraits>(), tr_tt_cores[2].packed_accessor32<float, 2, RestrictPtrTraits>(), tr[0].packed_accessor32<float, 2, RestrictPtrTraits>(), d_output.packed_accessor32<float, 2, RestrictPtrTraits>(), tt_idx, a_ptr, b_ptr, c_ptr, a0_ptr, b0_ptr, c0_ptr, a1_ptr, b1_ptr, c1_ptr); } else if (T == 4) { init_batch_gemm_backward_4T_kernel<<< num_blocks, threads, 0, c10::cuda::getCurrentCUDAStream()>>>( N, colidx, rowidx, L, tt_cores[0].packed_accessor32<float, 2, RestrictPtrTraits>(), tt_cores[1].packed_accessor32<float, 2, RestrictPtrTraits>(), tt_cores[2].packed_accessor32<float, 2, RestrictPtrTraits>(), tt_cores[3].packed_accessor32<float, 2, RestrictPtrTraits>(), tr_tt_cores[0].packed_accessor32<float, 2, RestrictPtrTraits>(), tr_tt_cores[1].packed_accessor32<float, 2, RestrictPtrTraits>(), tr_tt_cores[2].packed_accessor32<float, 2, RestrictPtrTraits>(), tr_tt_cores[3].packed_accessor32<float, 2, RestrictPtrTraits>(), tr[0].packed_accessor32<float, 2, RestrictPtrTraits>(), tr[1].packed_accessor32<float, 2, RestrictPtrTraits>(), d_output.packed_accessor32<float, 2, RestrictPtrTraits>(), tt_idx, a_ptr, b_ptr, c_ptr, a0_ptr, b0_ptr, c0_ptr, a1_ptr, b1_ptr, c1_ptr); } } __global__ void update_d_tt_cores_kernel( int32_t N, int32_t D, const int32_t* __restrict__ tt_idx, PackedTensorAccessor32<float, 2, RestrictPtrTraits> tr_tt_cores, PackedTensorAccessor32<float, 2, RestrictPtrTraits> d_tt_cores) { int32_t n = blockIdx.x * blockDim.y + threadIdx.y; if (n < N) { auto idx = __ldg(&tt_idx[n]); for (int32_t d = threadIdx.x; d < D; d += blockDim.x) { atomicAdd(&(d_tt_cores[idx][d]), tr_tt_cores[n][d]); } } } __global__ void update_tt_cores_sgd_kernel( int32_t B, int32_t D, float learning_rate, PackedTensorAccessor32<float, 2, RestrictPtrTraits> d_tt_cores, PackedTensorAccessor32<float, 2, RestrictPtrTraits> tt_cores) { int32_t b = blockIdx.x * blockDim.y + threadIdx.y; if (b >= B) { return; } for (int32_t d = threadIdx.x; d < D; d += blockDim.x) { tt_cores[b][d] -= learning_rate * d_tt_cores[b][d]; } } __global__ void update_tt_cores_adagrad_kernel( int32_t B, int32_t D, float learning_rate, float eps, PackedTensorAccessor32<float, 2, RestrictPtrTraits> d_tt_cores, PackedTensorAccessor32<float, 2, RestrictPtrTraits> optimizer_state, PackedTensorAccessor32<float, 2, RestrictPtrTraits> tt_cores) { int32_t b = blockIdx.x * blockDim.y + threadIdx.y; if (b >= B) { return; } for (int32_t d = threadIdx.x; d < D; d += blockDim.x) { optimizer_state[b][d] += d_tt_cores[b][d] * d_tt_cores[b][d]; tt_cores[b][d] -= learning_rate * d_tt_cores[b][d] / (sqrt(optimizer_state[b][d]) + eps); } } std::vector<Tensor> tt_embeddings_backward_cuda( int32_t optim, int32_t batch_count, int32_t D, float learning_rate, float eps, const std::vector<int32_t>& tt_p_shapes, const std::vector<int32_t>& tt_q_shapes, const std::vector<int32_t>& tt_ranks, Tensor L, int32_t nnz, Tensor colidx, Tensor rowidx, Tensor d_output, c10::optional<std::vector<Tensor>> optimizer_state, std::vector<Tensor>& tt_cores) { at::cuda::OptionalCUDAGuard device_guard; device_guard.set_index(d_output.get_device()); int32_t T = tt_p_shapes.size(); std::vector<Tensor> d_tt_cores; std::vector<Tensor> tr_tt_cores; for (int32_t t = 0; t < T; ++t) { d_tt_cores.push_back(at::zeros_like(tt_cores[t])); tr_tt_cores.push_back( at::empty({batch_count, tt_cores[t].size(1)}, tt_cores[t].options())); } if (nnz == 0) { return d_tt_cores; } // batch gemm parameters std::vector<int32_t> m(T - 1); std::vector<int32_t> n(T - 1); std::vector<int32_t> k(T - 1); float alpha = 1.0; float beta = 0.0; int32_t m_ = tt_q_shapes[0]; for (int32_t t = 0; t < T - 1; ++t) { m[t] = m_; k[t] = tt_ranks[t + 1]; n[t] = tt_q_shapes[t + 1] * tt_ranks[t + 2]; m_ = m_ * tt_q_shapes[t + 1]; } // allocate the immediate buffers std::vector<Tensor> tr; int64_t tr_size = tt_q_shapes[0] * tt_ranks[1]; for (int32_t t = 0; t < T - 2; ++t) { tr_size = tr_size * tt_q_shapes[t + 1] * tt_ranks[t + 2] / tt_ranks[t + 1]; tr.push_back(at::empty({batch_count, tr_size}, tt_cores[0].options())); } auto tt_idx = at::empty({T * batch_count}, tt_cores[0].options().dtype(at::kInt)); auto a_ptr_tensor = at::empty( {(T - 2) * batch_count}, tt_cores[0].options().dtype(at::kLong)); auto b_ptr_tensor = at::empty( {(T - 2) * batch_count}, tt_cores[0].options().dtype(at::kLong)); auto c_ptr_tensor = at::empty( {(T - 2) * batch_count}, tt_cores[0].options().dtype(at::kLong)); float** a_ptr = (float**)a_ptr_tensor.data_ptr<int64_t>(); float** b_ptr = (float**)b_ptr_tensor.data_ptr<int64_t>(); float** c_ptr = (float**)c_ptr_tensor.data_ptr<int64_t>(); auto a0_ptr_tensor = at::empty( {(T - 1) * batch_count}, tt_cores[0].options().dtype(at::kLong)); auto b0_ptr_tensor = at::empty( {(T - 1) * batch_count}, tt_cores[0].options().dtype(at::kLong)); auto c0_ptr_tensor = at::empty( {(T - 1) * batch_count}, tt_cores[0].options().dtype(at::kLong)); float** a0_ptr = (float**)a0_ptr_tensor.data_ptr<int64_t>(); float** b0_ptr = (float**)b0_ptr_tensor.data_ptr<int64_t>(); float** c0_ptr = (float**)c0_ptr_tensor.data_ptr<int64_t>(); auto a1_ptr_tensor = at::empty( {(T - 1) * batch_count}, tt_cores[0].options().dtype(at::kLong)); auto b1_ptr_tensor = at::empty( {(T - 1) * batch_count}, tt_cores[0].options().dtype(at::kLong)); auto c1_ptr_tensor = at::empty( {(T - 1) * batch_count}, tt_cores[0].options().dtype(at::kLong)); float** a1_ptr = (float**)a1_ptr_tensor.data_ptr<int64_t>(); float** b1_ptr = (float**)b1_ptr_tensor.data_ptr<int64_t>(); float** c1_ptr = (float**)c1_ptr_tensor.data_ptr<int64_t>(); for (int32_t start_idx = 0; start_idx < nnz; start_idx += batch_count) { int32_t end_idx = start_idx + batch_count < nnz ? start_idx + batch_count : nnz; int32_t N = end_idx - start_idx; init_batch_gemm_backward_cuda( T, N, &(colidx.data_ptr<int64_t>()[start_idx]), &(rowidx.data_ptr<int64_t>()[start_idx]), L.data_ptr<int64_t>(), tt_cores, tr_tt_cores, tr, d_output, tt_idx.data_ptr<int32_t>(), a_ptr, b_ptr, c_ptr, a0_ptr, b0_ptr, c0_ptr, a1_ptr, b1_ptr, c1_ptr); // recompute forward for (int32_t t = 0; t < T - 2; ++t) { cuda_gemm_batched_fp32_fp32( CUBLAS_OP_N, CUBLAS_OP_N, n[t], m[t], k[t], &alpha, (void**)&(a_ptr[t * N]), n[t], (void**)&(b_ptr[t * N]), k[t], &beta, (void**)&(c_ptr[t * N]), n[t], N); } // for (int32_t t = 0; t < T - 2; ++t) // backward propagation for (int32_t t = T - 2; t >= 0; --t) { cuda_gemm_batched_fp32_fp32( CUBLAS_OP_N, CUBLAS_OP_T, n[t], k[t], m[t], &alpha, (void**)&(b0_ptr[t * N]), n[t], (void**)&(a0_ptr[t * N]), k[t], &beta, (void**)&(c0_ptr[t * N]), n[t], N); int32_t D_0 = tt_cores[t + 1].size(1); int32_t tx_0 = std::min(1024, D_0); int32_t ty_0 = 1024 / tx_0; update_d_tt_cores_kernel<<< div_round_up(N, ty_0), dim3(tx_0, ty_0), 0, c10::cuda::getCurrentCUDAStream()>>>( N, D_0, &(tt_idx.data_ptr<int32_t>()[(t + 1) * N]), tr_tt_cores[t + 1].packed_accessor32<float, 2, RestrictPtrTraits>(), d_tt_cores[t + 1].packed_accessor32<float, 2, RestrictPtrTraits>()); cuda_gemm_batched_fp32_fp32( CUBLAS_OP_T, CUBLAS_OP_N, k[t], m[t], n[t], &alpha, (void**)&(b1_ptr[t * N]), n[t], (void**)&(a1_ptr[t * N]), n[t], &beta, (void**)&(c1_ptr[t * N]), k[t], N); if (t == 0) { int32_t D_1 = tt_cores[0].size(1); int32_t tx_1 = std::min(1024, D_1); int32_t ty_1 = 1024 / tx_1; update_d_tt_cores_kernel<<< div_round_up(N, ty_1), dim3(tx_1, ty_1), 0, c10::cuda::getCurrentCUDAStream()>>>( N, D_1, &(tt_idx.data_ptr<int32_t>()[t * N]), tr_tt_cores[0].packed_accessor32<float, 2, RestrictPtrTraits>(), d_tt_cores[0].packed_accessor32<float, 2, RestrictPtrTraits>()); } } // for (int32_t t = T - 2; t >=0 ; --t) } // for (int32_t start_idx = 0; start_idx < nnz; start_idx += batch_count) if (optim == OPTIM_ADAGRAD) { for (int32_t t = 0; t < T; ++t) { int32_t y_size = tt_cores[t].size(0); int32_t x_size = tt_cores[t].size(1); int32_t tx = std::min(1024, y_size); int32_t ty = 1024 / tx; update_tt_cores_adagrad_kernel<<< div_round_up(x_size, ty), dim3(tx, ty), 0, c10::cuda::getCurrentCUDAStream()>>>( y_size, x_size, learning_rate, eps, d_tt_cores[t].packed_accessor32<float, 2, RestrictPtrTraits>(), (*optimizer_state)[t] .packed_accessor32<float, 2, RestrictPtrTraits>(), tt_cores[t].packed_accessor32<float, 2, RestrictPtrTraits>()); } } else if (optim == OPTIM_SGD) { for (int32_t t = 0; t < T; ++t) { int32_t y_size = tt_cores[t].size(0); int32_t x_size = tt_cores[t].size(1); int32_t tx = std::min(1024, y_size); int32_t ty = 1024 / tx; update_tt_cores_sgd_kernel<<< div_round_up(x_size, ty), dim3(tx, ty), 0, c10::cuda::getCurrentCUDAStream()>>>( y_size, x_size, learning_rate, d_tt_cores[t].packed_accessor32<float, 2, RestrictPtrTraits>(), tt_cores[t].packed_accessor32<float, 2, RestrictPtrTraits>()); } } return d_tt_cores; } std::vector<Tensor> tt_embeddings_backward_dense_cuda( int32_t batch_count, int32_t D, const std::vector<int32_t>& tt_p_shapes, const std::vector<int32_t>& tt_q_shapes, const std::vector<int32_t>& tt_ranks, Tensor L, int32_t nnz, Tensor colidx, Tensor rowidx, Tensor d_output, std::vector<Tensor>& tt_cores) { return tt_embeddings_backward_cuda( OPTIM_DENSE, batch_count, D, 0.0, 0.0, tt_p_shapes, tt_q_shapes, tt_ranks, L, nnz, colidx, rowidx, d_output, c10::nullopt, tt_cores); } void tt_embeddings_backward_sgd_cuda( int32_t batch_count, int32_t D, float learning_rate, const std::vector<int32_t>& tt_p_shapes, const std::vector<int32_t>& tt_q_shapes, const std::vector<int32_t>& tt_ranks, Tensor L, int32_t nnz, Tensor colidx, Tensor rowidx, Tensor d_output, std::vector<Tensor>& tt_cores) { tt_embeddings_backward_cuda( OPTIM_SGD, batch_count, D, learning_rate, 0.0, tt_p_shapes, tt_q_shapes, tt_ranks, L, nnz, colidx, rowidx, d_output, c10::nullopt, tt_cores); } void tt_embeddings_backward_adagrad_cuda( int32_t batch_count, int32_t D, float learning_rate, float eps, const std::vector<int32_t>& tt_p_shapes, const std::vector<int32_t>& tt_q_shapes, const std::vector<int32_t>& tt_ranks, Tensor L, int32_t nnz, Tensor colidx, Tensor rowidx, Tensor d_output, std::vector<Tensor>& optimizer_state, std::vector<Tensor>& tt_cores) { tt_embeddings_backward_cuda( OPTIM_ADAGRAD, batch_count, D, learning_rate, eps, tt_p_shapes, tt_q_shapes, tt_ranks, L, nnz, colidx, rowidx, d_output, optimizer_state, tt_cores); } __global__ void init_batch_gemm_forward_2T_kernel( int N, const int64_t* __restrict__ L, const int64_t* __restrict__ colidx, PackedTensorAccessor32<float, 2, RestrictPtrTraits> tt_cores_0, PackedTensorAccessor32<float, 2, RestrictPtrTraits> tt_cores_1, PackedTensorAccessor32<float, 2, RestrictPtrTraits> tr_0, float** __restrict__ a_ptr, float** __restrict__ b_ptr, float** __restrict__ c_ptr) { int32_t n = blockIdx.x * blockDim.x + threadIdx.x; if (n < N) { auto cidx = __ldg(&colidx[n]); auto tt_idx_0 = cidx / L[0]; cidx = cidx % L[0]; auto tt_idx_1 = cidx / L[1]; a_ptr[0 * N + n] = (float*)&(tt_cores_1[tt_idx_1][0]); b_ptr[0 * N + n] = (float*)&(tt_cores_0[tt_idx_0][0]); c_ptr[0 * N + n] = (float*)&(tr_0[n][0]); } } __global__ void init_batch_gemm_forward_3T_kernel( int N, const int64_t* __restrict__ L, const int64_t* __restrict__ colidx, PackedTensorAccessor32<float, 2, RestrictPtrTraits> tt_cores_0, PackedTensorAccessor32<float, 2, RestrictPtrTraits> tt_cores_1, PackedTensorAccessor32<float, 2, RestrictPtrTraits> tt_cores_2, PackedTensorAccessor32<float, 2, RestrictPtrTraits> tr_0, PackedTensorAccessor32<float, 2, RestrictPtrTraits> tr_1, float** __restrict__ a_ptr, float** __restrict__ b_ptr, float** __restrict__ c_ptr) { int32_t n = blockIdx.x * blockDim.x + threadIdx.x; if (n < N) { auto cidx = __ldg(&colidx[n]); auto tt_idx_0 = cidx / L[0]; cidx = cidx % L[0]; auto tt_idx_1 = cidx / L[1]; cidx = cidx % L[1]; auto tt_idx_2 = cidx / L[2]; float* tr_0_ptr = (float*)&(tr_0[n][0]); a_ptr[0 * N + n] = (float*)&(tt_cores_1[tt_idx_1][0]); b_ptr[0 * N + n] = (float*)&(tt_cores_0[tt_idx_0][0]); c_ptr[0 * N + n] = tr_0_ptr; a_ptr[1 * N + n] = (float*)&(tt_cores_2[tt_idx_2][0]); b_ptr[1 * N + n] = tr_0_ptr; c_ptr[1 * N + n] = (float*)&(tr_1[n][0]); } } __global__ void init_batch_gemm_forward_4T_kernel( int N, const int64_t* __restrict__ L, const int64_t* __restrict__ colidx, PackedTensorAccessor32<float, 2, RestrictPtrTraits> tt_cores_0, PackedTensorAccessor32<float, 2, RestrictPtrTraits> tt_cores_1, PackedTensorAccessor32<float, 2, RestrictPtrTraits> tt_cores_2, PackedTensorAccessor32<float, 2, RestrictPtrTraits> tt_cores_3, PackedTensorAccessor32<float, 2, RestrictPtrTraits> tr_0, PackedTensorAccessor32<float, 2, RestrictPtrTraits> tr_1, PackedTensorAccessor32<float, 2, RestrictPtrTraits> tr_2, float** __restrict__ a_ptr, float** __restrict__ b_ptr, float** __restrict__ c_ptr) { int32_t n = blockIdx.x * blockDim.x + threadIdx.x; if (n < N) { auto cidx = __ldg(&colidx[n]); auto tt_idx_0 = cidx / L[0]; cidx = cidx % L[0]; auto tt_idx_1 = cidx / L[1]; cidx = cidx % L[1]; auto tt_idx_2 = cidx / L[2]; cidx = cidx % L[2]; auto tt_idx_3 = cidx / L[3]; float* tr_0_ptr = (float*)&(tr_0[n][0]); float* tr_1_ptr = (float*)&(tr_1[n][0]); a_ptr[0 * N + n] = (float*)&(tt_cores_1[tt_idx_1][0]); b_ptr[0 * N + n] = (float*)&(tt_cores_0[tt_idx_0][0]); c_ptr[0 * N + n] = tr_0_ptr; a_ptr[1 * N + n] = (float*)&(tt_cores_2[tt_idx_2][0]); b_ptr[1 * N + n] = tr_0_ptr; c_ptr[1 * N + n] = tr_1_ptr; a_ptr[2 * N + n] = (float*)&(tt_cores_3[tt_idx_3][0]); b_ptr[2 * N + n] = tr_1_ptr; c_ptr[2 * N + n] = (float*)&(tr_2[n][0]); } } void init_batch_gemm_forward_cuda( int32_t T, int32_t N, const int64_t* __restrict__ L, const int64_t* __restrict__ colidx, const std::vector<Tensor>& tt_cores, const std::vector<Tensor>& tr, float** __restrict__ a_ptr, float** __restrict__ b_ptr, float** __restrict__ c_ptr) { int32_t threads = (N > 256 ? 256 : 32); int32_t num_blocks = (N + threads - 1) / threads; if (T == 2) { init_batch_gemm_forward_2T_kernel<<< num_blocks, threads, 0, c10::cuda::getCurrentCUDAStream()>>>( N, L, colidx, tt_cores[0].packed_accessor32<float, 2, RestrictPtrTraits>(), tt_cores[1].packed_accessor32<float, 2, RestrictPtrTraits>(), tr[0].packed_accessor32<float, 2, RestrictPtrTraits>(), a_ptr, b_ptr, c_ptr); } else if (T == 3) { init_batch_gemm_forward_3T_kernel<<< num_blocks, threads, 0, c10::cuda::getCurrentCUDAStream()>>>( N, L, colidx, tt_cores[0].packed_accessor32<float, 2, RestrictPtrTraits>(), tt_cores[1].packed_accessor32<float, 2, RestrictPtrTraits>(), tt_cores[2].packed_accessor32<float, 2, RestrictPtrTraits>(), tr[0].packed_accessor32<float, 2, RestrictPtrTraits>(), tr[1].packed_accessor32<float, 2, RestrictPtrTraits>(), a_ptr, b_ptr, c_ptr); } else if (T == 4) { init_batch_gemm_forward_4T_kernel<<< num_blocks, threads, 0, c10::cuda::getCurrentCUDAStream()>>>( N, L, colidx, tt_cores[0].packed_accessor32<float, 2, RestrictPtrTraits>(), tt_cores[1].packed_accessor32<float, 2, RestrictPtrTraits>(), tt_cores[2].packed_accessor32<float, 2, RestrictPtrTraits>(), tt_cores[3].packed_accessor32<float, 2, RestrictPtrTraits>(), tr[0].packed_accessor32<float, 2, RestrictPtrTraits>(), tr[1].packed_accessor32<float, 2, RestrictPtrTraits>(), tr[2].packed_accessor32<float, 2, RestrictPtrTraits>(), a_ptr, b_ptr, c_ptr); } } __global__ void reduce_output_kernel( int32_t N, int32_t D, const int64_t* __restrict__ rowidx, const float* __restrict__ tr_last, float* __restrict__ output) { int32_t indice_id = blockIdx.x * blockDim.y + threadIdx.y; if (indice_id >= N) { // don't have *warp* divergence since we launch full warps in blockDim.x, // so we can just exit this warp entirely. return; } // check if this warp is responsible for this whole segment. bool segment_start = (indice_id == 0 || rowidx[indice_id - 1] != rowidx[indice_id]); if (!segment_start) { // don't have *warp* divergence since we launch full warps in blockDim.x, // so we can just exit this warp entirely. return; } int64_t row_index = rowidx[indice_id]; // now, find the end of the segment (and thus the segment length `SL`). int32_t SL = 1; while (indice_id + SL < N && rowidx[indice_id + SL] == row_index) { SL += 1; } for (int32_t d = threadIdx.x; d * 4 < D; d += blockDim.x) { Vec4T<float> sum(&output[row_index * D + d * 4]); for (int32_t sl = 0; sl < SL; ++sl) { Vec4T<float> tr(&tr_last[(indice_id + sl) * D + d * 4]); sum.acc.x += tr.acc.x; sum.acc.y += tr.acc.y; sum.acc.z += tr.acc.z; sum.acc.w += tr.acc.w; } sum.store(&output[row_index * D + d * 4]); } } Tensor tt_embeddings_forward_cuda( int32_t batch_count, int32_t B, int32_t D, const std::vector<int>& tt_p_shapes, const std::vector<int>& tt_q_shapes, const std::vector<int>& tt_ranks, Tensor L, int32_t nnz, Tensor colidx, Tensor rowidx, const std::vector<Tensor>& tt_cores) { at::cuda::OptionalCUDAGuard device_guard; device_guard.set_index(rowidx.get_device()); int32_t T = tt_p_shapes.size(); auto output = at::zeros({B, D}, tt_cores[0].options().dtype(at::kFloat)); if (nnz == 0) { return output; } TORCH_CHECK(batch_count > 0); TORCH_CHECK(D > 0); TORCH_CHECK(D % 4 == 0); TORCH_CHECK(T > 0); // batch gemm parameters std::vector<int32_t> m(T - 1); std::vector<int32_t> n(T - 1); std::vector<int32_t> k(T - 1); float alpha = 1.0; float beta = 0.0; int32_t m_ = tt_q_shapes[0]; for (int32_t t = 0; t < T - 1; ++t) { m[t] = m_; k[t] = tt_ranks[t + 1]; n[t] = tt_q_shapes[t + 1] * tt_ranks[t + 2]; m_ = m_ * tt_q_shapes[t + 1]; } // allocate the immediate buffers std::vector<Tensor> tr; int32_t tr_size = tt_q_shapes[0] * tt_ranks[1]; for (int32_t t = 0; t < T - 1; ++t) { tr_size = tr_size * tt_q_shapes[t + 1] * tt_ranks[t + 2] / tt_ranks[t + 1]; tr.push_back(at::empty( {batch_count, tr_size}, tt_cores[0].options().dtype(at::kFloat))); } auto a_ptr_tensor = at::empty( {(T - 1) * batch_count}, tt_cores[0].options().dtype(at::kLong)); auto b_ptr_tensor = at::empty( {(T - 1) * batch_count}, tt_cores[0].options().dtype(at::kLong)); auto c_ptr_tensor = at::empty( {(T - 1) * batch_count}, tt_cores[0].options().dtype(at::kLong)); float** a_ptr = (float**)a_ptr_tensor.data_ptr<int64_t>(); float** b_ptr = (float**)b_ptr_tensor.data_ptr<int64_t>(); float** c_ptr = (float**)c_ptr_tensor.data_ptr<int64_t>(); for (int32_t start_idx = 0; start_idx < nnz; start_idx += batch_count) { int32_t end_idx = start_idx + batch_count < nnz ? start_idx + batch_count : nnz; int32_t N = end_idx - start_idx; init_batch_gemm_forward_cuda( T, N, L.data_ptr<int64_t>(), &(colidx.data_ptr<int64_t>()[start_idx]), tt_cores, tr, a_ptr, b_ptr, c_ptr); // batched GEMM for (int32_t t = 0; t < T - 1; ++t) { cuda_gemm_batched_fp32_fp32( CUBLAS_OP_N, CUBLAS_OP_N, n[t], m[t], k[t], &alpha, (void**)&(a_ptr[t * N]), n[t], (void**)&(b_ptr[t * N]), k[t], &beta, (void**)&(c_ptr[t * N]), n[t], N); } int32_t tx = kWarpSize; int32_t ty = 1024 / tx; dim3 threads(tx, ty); int32_t num_blocks = (N + ty - 1) / ty; reduce_output_kernel<<< num_blocks, threads, 0, c10::cuda::getCurrentCUDAStream()>>>( N, D, &(rowidx.data_ptr<int64_t>()[start_idx]), tr[T - 2].data_ptr<float>(), output.data_ptr<float>()); } // for (int start_idx = 0; start_idx < nnz; start_idx += batch_count) return output; } __global__ void update_cache_state_kernel( int N, const int64_t* __restrict__ colidx, int32_t hashtbl_size, int64_t* __restrict__ hashtbl, int64_t* __restrict__ cache_freq) { int32_t n = blockIdx.x * blockDim.x + threadIdx.x; if (n < N) { int64_t cidx = __ldg(&colidx[n]); hashtbl_insert<int64_t, int64_t, true>( cidx, 1, hashtbl_size, MAX_PROBES, hashtbl, cache_freq); } } void update_cache_state_cuda(Tensor colidx, Tensor hashtbl, Tensor cache_freq) { at::cuda::OptionalCUDAGuard device_guard; device_guard.set_index(colidx.get_device()); int32_t nnz = colidx.numel(); if (nnz == 0) { return; } TORCH_CHECK(hashtbl.numel() > 0); TORCH_CHECK(hashtbl.numel() == cache_freq.numel()); int32_t threads = (nnz > 256 ? 256 : 32); int32_t num_blocks = (nnz + threads - 1) / threads; update_cache_state_kernel<<< num_blocks, threads, 0, c10::cuda::getCurrentCUDAStream()>>>( nnz, colidx.data_ptr<int64_t>(), hashtbl.numel(), hashtbl.data_ptr<int64_t>(), cache_freq.data_ptr<int64_t>()); } __global__ void mark_popular_colidx_kernel( int32_t hashtbl_size, int32_t cache_size, int64_t* __restrict__ cache_freq_sorted_hashtbl, int64_t* __restrict__ hashtbl, int64_t* __restrict__ cache_freq, int32_t* __restrict__ cache_state) { int32_t n = blockIdx.x * blockDim.x + threadIdx.x; if (n >= hashtbl_size) { return; } if (cache_freq_sorted_hashtbl[n] != -1) { int32_t hashtbl_idx = hashtbl_find( cache_freq_sorted_hashtbl[n], hashtbl_size, MAX_PROBES, hashtbl); if (n < cache_size) { cache_state[hashtbl_idx] = n; } else { hashtbl[hashtbl_idx] = -1; cache_freq[hashtbl_idx] = 0; } } else if (n < cache_size) { // a hack to use batch gemm cache_freq_sorted_hashtbl[n] = 0; } } __global__ void copy_output_kernel( int32_t N, int32_t D, int32_t start_idx, const float* __restrict__ tr_last, float* __restrict__ output) { int32_t n = blockIdx.x * blockDim.y + threadIdx.y; if (n < N) { for (int32_t d = threadIdx.x; d * 4 < D; d += blockDim.x) { Vec4T<float> tr(&tr_last[n * D + d * 4]); tr.store(&output[(start_idx + n) * D + d * 4]); } } } void prefetch_cached_weights_cuda( int32_t batch_count, const std::vector<int>& tt_p_shapes, const std::vector<int>& tt_q_shapes, const std::vector<int>& tt_ranks, const std::vector<Tensor>& tt_cores, Tensor L, Tensor cache_freq_sorted_hashtbl, Tensor cache_weight) { at::cuda::OptionalCUDAGuard device_guard; device_guard.set_index(cache_weight.get_device()); int32_t nnz = cache_weight.size(0); if (nnz == 0) { return; } int32_t T = tt_p_shapes.size(); int32_t D = cache_weight.size(1); TORCH_CHECK(batch_count > 0); TORCH_CHECK(D > 0); TORCH_CHECK(D % 4 == 0); TORCH_CHECK(T > 0); // batch gemm parameters std::vector<int32_t> m(T - 1); std::vector<int32_t> n(T - 1); std::vector<int32_t> k(T - 1); float alpha = 1.0; float beta = 0.0; int32_t m_ = tt_q_shapes[0]; for (int32_t t = 0; t < T - 1; ++t) { m[t] = m_; k[t] = tt_ranks[t + 1]; n[t] = tt_q_shapes[t + 1] * tt_ranks[t + 2]; m_ = m_ * tt_q_shapes[t + 1]; } // allocate the immediate buffers std::vector<Tensor> tr; int32_t tr_size = tt_q_shapes[0] * tt_ranks[1]; for (int32_t t = 0; t < T - 1; ++t) { tr_size = tr_size * tt_q_shapes[t + 1] * tt_ranks[t + 2] / tt_ranks[t + 1]; tr.push_back(at::empty( {batch_count, tr_size}, tt_cores[0].options().dtype(at::kFloat))); } auto a_ptr_tensor = at::empty( {(T - 1) * batch_count}, tt_cores[0].options().dtype(at::kLong)); auto b_ptr_tensor = at::empty( {(T - 1) * batch_count}, tt_cores[0].options().dtype(at::kLong)); auto c_ptr_tensor = at::empty( {(T - 1) * batch_count}, tt_cores[0].options().dtype(at::kLong)); float** a_ptr = (float**)a_ptr_tensor.data_ptr<int64_t>(); float** b_ptr = (float**)b_ptr_tensor.data_ptr<int64_t>(); float** c_ptr = (float**)c_ptr_tensor.data_ptr<int64_t>(); for (int32_t start_idx = 0; start_idx < nnz; start_idx += batch_count) { int32_t end_idx = start_idx + batch_count < nnz ? start_idx + batch_count : nnz; int32_t N = end_idx - start_idx; init_batch_gemm_forward_cuda( T, N, L.data_ptr<int64_t>(), &(cache_freq_sorted_hashtbl.data_ptr<int64_t>()[start_idx]), tt_cores, tr, a_ptr, b_ptr, c_ptr); // batched GEMM for (int32_t t = 0; t < T - 1; ++t) { cuda_gemm_batched_fp32_fp32( CUBLAS_OP_N, CUBLAS_OP_N, n[t], m[t], k[t], &alpha, (void**)&(a_ptr[t * N]), n[t], (void**)&(b_ptr[t * N]), k[t], &beta, (void**)&(c_ptr[t * N]), n[t], N); } int32_t tx = std::min(1024, D / 4); int32_t ty = 1024 / tx; dim3 threads(tx, ty); int32_t num_blocks = (N + ty - 1) / ty; copy_output_kernel<<< num_blocks, threads, 0, c10::cuda::getCurrentCUDAStream()>>>( N, D, start_idx, tr[T - 2].data_ptr<float>(), cache_weight.data_ptr<float>()); } // for (int start_idx = 0; start_idx < nnz; start_idx += batch_count) } void cache_populate_cuda( int64_t num_embeddings, const std::vector<int>& tt_p_shapes, const std::vector<int>& tt_q_shapes, const std::vector<int>& tt_ranks, const std::vector<Tensor>& tt_cores, Tensor L, Tensor hashtbl, Tensor cache_freq, Tensor cache_state, Tensor cache_weight) { TORCH_CHECK(hashtbl.numel() > 0); TORCH_CHECK(hashtbl.numel() == cache_freq.numel()); TORCH_CHECK(cache_freq.numel() < std::numeric_limits<int32_t>::max()); TORCH_CHECK(hashtbl.numel() >= cache_weight.size(0)); auto cache_freq_sorted_hashtbl = empty_like(hashtbl); // Sort hash_table by cache_freq { auto sorted_cache_freq = empty_like(cache_freq); size_t temp_storage_bytes = 0; AT_CUDA_CHECK(cub::DeviceRadixSort::SortPairsDescending( nullptr, temp_storage_bytes, cache_freq.data_ptr<int64_t>(), sorted_cache_freq.data_ptr<int64_t>(), hashtbl.data_ptr<int64_t>(), cache_freq_sorted_hashtbl.data_ptr<int64_t>(), cache_freq.numel(), 0, sizeof(int64_t) * 8, at::cuda::getCurrentCUDAStream(), false)); auto temp_storage = at::empty( {static_cast<int64_t>(temp_storage_bytes)}, hashtbl.options().dtype(kByte)); AT_CUDA_CHECK(cub::DeviceRadixSort::SortPairsDescending( temp_storage.data_ptr(), temp_storage_bytes, cache_freq.data_ptr<int64_t>(), sorted_cache_freq.data_ptr<int64_t>(), hashtbl.data_ptr<int64_t>(), cache_freq_sorted_hashtbl.data_ptr<int64_t>(), cache_freq.numel(), 0, sizeof(int64_t) * 8, at::cuda::getCurrentCUDAStream(), false)); } // Mark popular colidx int32_t hashtbl_size = hashtbl.numel(); int32_t threads = 256; int32_t num_blocks = (hashtbl_size + threads - 1) / threads; mark_popular_colidx_kernel<<< num_blocks, threads, 0, c10::cuda::getCurrentCUDAStream()>>>( hashtbl_size, cache_weight.size(0), cache_freq_sorted_hashtbl.data_ptr<int64_t>(), hashtbl.data_ptr<int64_t>(), cache_freq.data_ptr<int64_t>(), cache_state.data_ptr<int32_t>()); int32_t batch_count = 200; prefetch_cached_weights_cuda( batch_count, tt_p_shapes, tt_q_shapes, tt_ranks, tt_cores, L, cache_freq_sorted_hashtbl, cache_weight); } __global__ void compute_rowidx_kernel( int32_t B, const int64_t* __restrict__ offsets, int64_t* __restrict__ rowidx) { int32_t b = blockIdx.x * blockDim.y + threadIdx.y; if (b < B) { int64_t colidx_start = offsets[b]; int64_t colidx_end = offsets[b + 1]; int32_t L = colidx_end - colidx_start; for (int32_t l = threadIdx.x; l < L; l += blockDim.x) { rowidx[l + colidx_start] = b; } } } __global__ void cache_lookup_kernel( int32_t N, const int64_t* __restrict__ colidx, int32_t hashtbl_size, const int64_t* __restrict__ hashtbl, const int32_t* __restrict__ cache_state, bool* __restrict__ is_tt, int32_t* __restrict__ cache_location) { int32_t n = blockIdx.x * blockDim.x + threadIdx.x; if (n < N) { int32_t hashtbl_idx = hashtbl_find(colidx[n], hashtbl_size, MAX_PROBES, hashtbl); if (hashtbl_idx != -1 && cache_state[hashtbl_idx] != -1) { is_tt[n] = false; cache_location[n] = cache_state[hashtbl_idx]; } else { is_tt[n] = true; } } } std::tuple<Tensor, Tensor, int32_t, c10::optional<Tensor>> preprocess_indices_sync_cuda( Tensor colidx, Tensor offsets, bool warmup, Tensor hashtbl, Tensor cache_state) { at::cuda::OptionalCUDAGuard device_guard; device_guard.set_index(colidx.get_device()); auto rowidx = empty_like(colidx); if (rowidx.numel() == 0) { return {colidx, rowidx, rowidx.numel(), c10::nullopt}; } int32_t B = offsets.numel() - 1; int32_t N = colidx.numel(); int32_t tx = 8; int32_t ty = 32; compute_rowidx_kernel<<< div_round_up(B, ty), dim3(tx, ty), 0, c10::cuda::getCurrentCUDAStream()>>>( B, offsets.data_ptr<int64_t>(), rowidx.data_ptr<int64_t>()); if (warmup) { return {colidx, rowidx, rowidx.numel(), c10::nullopt}; } else { auto partitioned_colidx = empty_like(colidx); auto partitioned_rowidx = empty_like(rowidx); auto num_tt_indices = zeros({1}, rowidx.options().dtype(kInt)); auto cache_locations = empty_like(rowidx, rowidx.options().dtype(kInt)); auto partitioned_cache_locations = empty_like(rowidx, rowidx.options().dtype(kInt)); { auto is_tt = empty_like(rowidx, rowidx.options().dtype(kBool)); int32_t threads = 256; int32_t num_blocks = div_round_up(N, threads); cache_lookup_kernel<<< num_blocks, threads, 0, c10::cuda::getCurrentCUDAStream()>>>( N, colidx.data_ptr<int64_t>(), hashtbl.numel(), hashtbl.data_ptr<int64_t>(), cache_state.data_ptr<int32_t>(), is_tt.data_ptr<bool>(), cache_locations.data_ptr<int32_t>()); size_t temp_storage_bytes = 0; AT_CUDA_CHECK(cub::DevicePartition::Flagged( nullptr, temp_storage_bytes, rowidx.data_ptr<int64_t>(), is_tt.data_ptr<bool>(), partitioned_rowidx.data_ptr<int64_t>(), num_tt_indices.data_ptr<int32_t>(), rowidx.numel(), at::cuda::getCurrentCUDAStream(), false)); auto temp_storage = at::empty( {static_cast<int64_t>(temp_storage_bytes)}, hashtbl.options().dtype(kByte)); AT_CUDA_CHECK(cub::DevicePartition::Flagged( temp_storage.data_ptr(), temp_storage_bytes, rowidx.data_ptr<int64_t>(), is_tt.data_ptr<bool>(), partitioned_rowidx.data_ptr<int64_t>(), num_tt_indices.data_ptr<int32_t>(), rowidx.numel(), at::cuda::getCurrentCUDAStream(), false)); AT_CUDA_CHECK(cub::DevicePartition::Flagged( temp_storage.data_ptr(), temp_storage_bytes, colidx.data_ptr<int64_t>(), is_tt.data_ptr<bool>(), partitioned_colidx.data_ptr<int64_t>(), num_tt_indices.data_ptr<int32_t>(), colidx.numel(), at::cuda::getCurrentCUDAStream(), false)); AT_CUDA_CHECK(cub::DevicePartition::Flagged( temp_storage.data_ptr(), temp_storage_bytes, cache_locations.data_ptr<int32_t>(), is_tt.data_ptr<bool>(), partitioned_cache_locations.data_ptr<int32_t>(), num_tt_indices.data_ptr<int32_t>(), cache_locations.numel(), at::cuda::getCurrentCUDAStream(), false)); } int32_t N_tt_indices; cudaMemcpyAsync( &N_tt_indices, num_tt_indices.data_ptr<int32_t>(), sizeof(int32_t), cudaMemcpyDeviceToHost, at::cuda::getCurrentCUDAStream()); cudaStreamSynchronize(at::cuda::getCurrentCUDAStream()); return {partitioned_colidx, partitioned_rowidx, N_tt_indices, partitioned_cache_locations}; } } __global__ void cache_forward_kernel( int32_t nnz, int32_t D, const int64_t* __restrict__ rowidx, const int32_t* __restrict__ cache_locations, const float* __restrict__ cache_weight, float* __restrict__ output) { int32_t indice_id = blockIdx.x * blockDim.y + threadIdx.y; if (indice_id >= nnz) { // don't have *warp* divergence since we launch full warps in blockDim.x, // so we can just exit this warp entirely. return; } // check if this warp is responsible for this whole segment. bool segment_start = (indice_id == 0 || rowidx[indice_id - 1] != rowidx[indice_id]); if (!segment_start) { // don't have *warp* divergence since we launch full warps in blockDim.x, // so we can just exit this warp entirely. return; } int64_t row_index = rowidx[indice_id]; // now, find the end of the segment (and thus the segment length `SL`). int32_t SL = 1; while (indice_id + SL < nnz && rowidx[indice_id + SL] == row_index) { SL += 1; } for (int32_t d = threadIdx.x; d * 4 < D; d += blockDim.x) { Vec4T<float> sum(&output[row_index * D + d * 4]); for (int32_t sl = 0; sl < SL; ++sl) { int32_t idx = __ldg(&cache_locations[indice_id + sl]); Vec4T<float> weight(&cache_weight[idx * D + d * 4]); sum.acc.x += weight.acc.x; sum.acc.y += weight.acc.y; sum.acc.z += weight.acc.z; sum.acc.w += weight.acc.w; } sum.store(&output[row_index * D + d * 4]); } } void cache_forward_cuda( int32_t B, int32_t nnz, Tensor cache_locations, Tensor rowidx, Tensor cache_weight, Tensor output) { at::cuda::OptionalCUDAGuard device_guard; device_guard.set_index(rowidx.get_device()); TORCH_CHECK(B > 0); int32_t D = cache_weight.size(1); TORCH_CHECK(D > 0); TORCH_CHECK(D % 4 == 0); if (nnz == 0) { return; } int32_t tx = kWarpSize; int32_t ty = 1024 / tx; dim3 threads(tx, ty); int32_t num_blocks = (nnz + ty - 1) / ty; cache_forward_kernel<<< num_blocks, threads, 0, c10::cuda::getCurrentCUDAStream()>>>( nnz, D, rowidx.data_ptr<int64_t>(), cache_locations.data_ptr<int32_t>(), cache_weight.data_ptr<float>(), output.data_ptr<float>()); } __global__ void cache_backward_sgd_kernel( int32_t nnz, int32_t D, const float* __restrict__ grad_output, const int32_t* __restrict__ cache_locations, const int64_t* __restrict__ rowidx, float learning_rate, float* __restrict__ cache_weight) { int32_t indice_id = blockIdx.x * blockDim.y + threadIdx.y; if (indice_id >= nnz) { // don't have *warp* divergence since we launch full warps in blockDim.x, // so we can just exit this warp entirely. return; } // check if this warp is responsible for this whole segment. bool segment_start = (indice_id == 0 || rowidx[indice_id - 1] != rowidx[indice_id]); if (!segment_start) { // don't have *warp* divergence since we launch full warps in blockDim.x, // so we can just exit this warp entirely. return; } int64_t row_index = rowidx[indice_id]; // now, find the end of the segment (and thus the segment length `SL`). int32_t SL = 1; while (indice_id + SL < nnz && rowidx[indice_id + SL] == row_index) { SL += 1; } for (int32_t sl = 0; sl < SL; ++sl) { int32_t idx = __ldg(&cache_locations[indice_id + sl]); for (int32_t d = threadIdx.x; d * 4 < D; d += blockDim.x) { Vec4T<acc_type<float, true>> grad_out_vec( &grad_output[row_index * D + d * 4]); gpuAtomicAdd( &cache_weight[idx * D + d * 4 + 0], -grad_out_vec.acc.x * learning_rate); gpuAtomicAdd( &cache_weight[idx * D + d * 4 + 1], -grad_out_vec.acc.y * learning_rate); gpuAtomicAdd( &cache_weight[idx * D + d * 4 + 2], -grad_out_vec.acc.z * learning_rate); gpuAtomicAdd( &cache_weight[idx * D + d * 4 + 3], -grad_out_vec.acc.w * learning_rate); } } } void cache_backward_sgd_cuda( int32_t nnz, Tensor grad_output, Tensor cache_locations, Tensor rowidx, float learning_rate, Tensor cache_weight) { at::cuda::OptionalCUDAGuard device_guard; device_guard.set_index(cache_weight.get_device()); if (nnz == 0) { return; } const auto D = cache_weight.size(1); TORCH_CHECK(D > 0); TORCH_CHECK(D % 4 == 0); int32_t tx = kWarpSize; int32_t ty = 1024 / tx; dim3 threads(tx, ty); int32_t num_blocks = div_round_up(nnz, ty); cache_backward_sgd_kernel<<< num_blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>( nnz, D, grad_output.data_ptr<float>(), cache_locations.data_ptr<int32_t>(), rowidx.data_ptr<int64_t>(), learning_rate, cache_weight.data_ptr<float>()); AT_CUDA_CHECK(cudaGetLastError()); return; } __global__ void cache_backward_dense_kernel( int32_t nnz, int32_t D, const float* __restrict__ grad_output, const int32_t* __restrict__ cache_locations, const int64_t* __restrict__ rowidx, float* __restrict__ grad_cache_weight) { int32_t indice_id = blockIdx.x * blockDim.y + threadIdx.y; if (indice_id >= nnz) { // don't have *warp* divergence since we launch full warps in blockDim.x, // so we can just exit this warp entirely. return; } // check if this warp is responsible for this whole segment. bool segment_start = (indice_id == 0 || rowidx[indice_id - 1] != rowidx[indice_id]); if (!segment_start) { // don't have *warp* divergence since we launch full warps in blockDim.x, // so we can just exit this warp entirely. return; } int64_t row_index = rowidx[indice_id]; // now, find the end of the segment (and thus the segment length `SL`). int32_t SL = 1; while (indice_id + SL < nnz && rowidx[indice_id + SL] == row_index) { SL += 1; } for (int32_t sl = 0; sl < SL; ++sl) { int32_t idx = __ldg(&cache_locations[indice_id + sl]); for (int32_t d = threadIdx.x; d * 4 < D; d += blockDim.x) { Vec4T<acc_type<float, true>> grad_out_vec( &grad_output[row_index * D + d * 4]); gpuAtomicAdd(&grad_cache_weight[idx * D + d * 4 + 0], grad_out_vec.acc.x); gpuAtomicAdd(&grad_cache_weight[idx * D + d * 4 + 1], grad_out_vec.acc.y); gpuAtomicAdd(&grad_cache_weight[idx * D + d * 4 + 2], grad_out_vec.acc.z); gpuAtomicAdd(&grad_cache_weight[idx * D + d * 4 + 3], grad_out_vec.acc.w); } } } Tensor cache_backward_dense_cuda( int32_t nnz, Tensor grad_output, Tensor cache_locations, Tensor rowidx, float learning_rate, Tensor cache_weight) { at::cuda::OptionalCUDAGuard device_guard; device_guard.set_index(cache_weight.get_device()); auto grad_cache_weight = zeros_like(cache_weight); if (nnz == 0) { return grad_cache_weight; } const auto D = cache_weight.size(1); TORCH_CHECK(D > 0); TORCH_CHECK(D % 4 == 0); int32_t tx = kWarpSize; int32_t ty = 1024 / tx; dim3 threads(tx, ty); int32_t num_blocks = div_round_up(nnz, ty); cache_backward_dense_kernel<<< num_blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>( nnz, D, grad_output.data_ptr<float>(), cache_locations.data_ptr<int32_t>(), rowidx.data_ptr<int64_t>(), grad_cache_weight.data_ptr<float>()); AT_CUDA_CHECK(cudaGetLastError()); return grad_cache_weight; } __global__ void cache_backward_rowwise_adagrad_approx_kernel( int32_t nnz, int32_t D, const float* __restrict__ grad_output, const int32_t* __restrict__ cache_locations, const int64_t* __restrict__ rowidx, float learning_rate, float eps, float* __restrict__ cache_optimizer_state, float* __restrict__ cache_weight) { int32_t indice_id = blockIdx.x * blockDim.y + threadIdx.y; if (indice_id >= nnz) { // don't have *warp* divergence since we launch full warps in blockDim.x, // so we can just exit this warp entirely. return; } // check if this warp is responsible for this whole segment. bool segment_start = (indice_id == 0 || rowidx[indice_id - 1] != rowidx[indice_id]); if (!segment_start) { // don't have *warp* divergence since we launch full warps in blockDim.x, // so we can just exit this warp entirely. return; } int64_t row_index = rowidx[indice_id]; // now, find the end of the segment (and thus the segment length `SL`). int32_t SL = 1; while (indice_id + SL < nnz && rowidx[indice_id + SL] == row_index) { SL += 1; } float g_local_sum_square = 0.0; for (int32_t d = threadIdx.x; d * 4 < D; d += blockDim.x) { Vec4T<float> grad_out_vec(&grad_output[row_index * D + d * 4]); g_local_sum_square += grad_out_vec.acc.x * grad_out_vec.acc.x + grad_out_vec.acc.y * grad_out_vec.acc.y + grad_out_vec.acc.z * grad_out_vec.acc.z + grad_out_vec.acc.w * grad_out_vec.acc.w; } float g_avg_square = warpReduceAllSum<float>(g_local_sum_square) / D; for (int32_t sl = 0; sl < SL; ++sl) { auto idx = __ldg(&cache_locations[indice_id + sl]); float multiplier; if (threadIdx.x == 0) { float old_sum_square_grads = gpuAtomicAdd(&cache_optimizer_state[idx], g_avg_square); multiplier = learning_rate * (1.0 / (sqrt(old_sum_square_grads + g_avg_square) + eps)); } multiplier = __shfl_sync(0xFFFFFFFF, multiplier, 0); for (int32_t d = threadIdx.x; d * 4 < D; d += blockDim.x) { Vec4T<float> grad_out_vec(&grad_output[row_index * D + d * 4]); Vec4T<float> weight_new(&cache_weight[idx * D + d * 4]); weight_new.acc.x -= grad_out_vec.acc.x * multiplier; weight_new.acc.y -= grad_out_vec.acc.y * multiplier; weight_new.acc.z -= grad_out_vec.acc.z * multiplier; weight_new.acc.w -= grad_out_vec.acc.w * multiplier; weight_new.store(&cache_weight[idx * D + d * 4]); } } } void cache_backward_rowwise_adagrad_approx_cuda( int32_t nnz, Tensor grad_output, Tensor cache_locations, Tensor rowidx, float learning_rate, float eps, Tensor cache_optimizer_state, Tensor cache_weight) { at::cuda::OptionalCUDAGuard device_guard; device_guard.set_index(cache_weight.get_device()); if (nnz == 0) { return; } const auto D = cache_weight.size(1); TORCH_CHECK(D > 0); TORCH_CHECK(D % 4 == 0); int32_t tx = kWarpSize; int32_t ty = 1024 / tx; dim3 threads(tx, ty); int32_t num_blocks = div_round_up(nnz, ty); cache_backward_rowwise_adagrad_approx_kernel<<< num_blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>( nnz, D, grad_output.data_ptr<float>(), cache_locations.data_ptr<int32_t>(), rowidx.data_ptr<int64_t>(), learning_rate, eps, cache_optimizer_state.data_ptr<float>(), cache_weight.data_ptr<float>()); AT_CUDA_CHECK(cudaGetLastError()); }
0ad4bf7a9155f6e517d43cd8d3102df50d984948.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #if defined(PARTICLES) && defined(PARTICLES_GPU) #include <unistd.h> #include<stdio.h> #include<stdlib.h> #include<math.h> #include"gpu.hpp" #include <iostream> #include"../io.h" #include"../global.h" #include"../global_cuda.h" #include "particles_3D.h" #include "../grid3D.h" #include "particles_boundaries_gpu.h" #define SCAN_SHARED_SIZE 2*TPB_PARTICLES __global__ void Set_Particles_Boundary_Kernel( int side, part_int_t n_local, Real *pos_dev, Real d_min, Real d_max, Real d_length ){ part_int_t tid = blockIdx.x * blockDim.x + threadIdx.x ; if ( tid >= n_local) return; Real pos; pos = pos_dev[tid]; if ( side == 0 ){ if ( pos < d_min ) pos += d_length; } if ( side == 1 ){ if ( pos >= d_max ) pos -= d_length; } pos_dev[tid] = pos; } void Grid3D::Set_Particles_Boundary_GPU( int dir, int side ){ Real d_min, d_max, L; Real *pos_dev; if ( dir == 0 ){ d_min = Particles.G.zMin; d_max = Particles.G.zMax; pos_dev = Particles.pos_x_dev; } if ( dir == 1 ){ d_min = Particles.G.yMin; d_max = Particles.G.yMax; pos_dev = Particles.pos_y_dev; } if ( dir == 2 ){ d_min = Particles.G.zMin; d_max = Particles.G.zMax; pos_dev = Particles.pos_z_dev; } L = d_max - d_min; // set values for GPU kernels int grid_size = (Particles.n_local - 1) / TPB_PARTICLES + 1; // number of blocks per 1D grid dim3 dim1dGrid(grid_size, 1, 1); // number of threads per 1D block dim3 dim1dBlock(TPB_PARTICLES, 1, 1); hipLaunchKernelGGL(Set_Particles_Boundary_Kernel, dim1dGrid, dim1dBlock, 0, 0, side, Particles.n_local, pos_dev, d_min, d_max, L ); CudaCheckError(); } // #ifdef MPI_CHOLLA __global__ void Get_Transfer_Flags_Kernel( part_int_t n_total, int side, Real d_min, Real d_max, Real *pos_d, bool *transfer_flags_d ){ int tid = threadIdx.x + blockIdx.x * blockDim.x; if ( tid >= n_total ) return; bool transfer = 0; Real pos = pos_d[tid]; // if ( tid < 1 ) printf( "%f\n", pos); if ( side == 0 ){ if ( pos < d_min ) transfer = 1; } if ( side == 1 ){ if ( pos >= d_max ) transfer = 1; } // if ( transfer ) printf( "##Thread particles transfer\n"); transfer_flags_d[tid] = transfer; } __global__ void Scan_Kernel( part_int_t n_total, bool *transfer_flags_d, int *prefix_sum_d, int *prefix_sum_block_d ){ __shared__ int data_sh[SCAN_SHARED_SIZE]; int tid_block, block_start; // tid = threadIdx.x + blockIdx.x * blockDim.x; tid_block = threadIdx.x; block_start = 2*blockIdx.x*blockDim.x; data_sh[2*tid_block] = block_start + 2*tid_block < n_total ? (int) transfer_flags_d[block_start + 2*tid_block] : 0; data_sh[2*tid_block+1] = block_start + 2*tid_block+1 < n_total ? (int) transfer_flags_d[block_start + 2*tid_block+1] : 0; __syncthreads(); int offset = 1; int n = blockDim.x*2; int ai, bi; int t; for (int d = n/2; d>0; d/=2){ __syncthreads(); if ( tid_block < d ){ ai = offset*(2*tid_block+1)-1; bi = offset*(2*tid_block+2)-1; data_sh[bi] += data_sh[ai]; } offset *= 2; } // Clear the last element if (tid_block == 0) data_sh[n - 1] = 0; // Traverse down tree & build scan for (int d = 1; d < n; d *= 2){ __syncthreads(); offset /=2; if (tid_block < d){ ai = offset*(2*tid_block+1)-1; bi = offset*(2*tid_block+2)-1; t = data_sh[ai]; data_sh[ai] = data_sh[bi]; data_sh[bi] += t; } } __syncthreads(); // Write results to device memory if ( block_start + 2*tid_block < n_total ) prefix_sum_d[block_start + 2*tid_block] = data_sh[2*tid_block]; if ( block_start + 2*tid_block+1 < n_total) prefix_sum_d[block_start + 2*tid_block+1] = data_sh[2*tid_block+1]; // Write the block sum int last_flag_block = (int) transfer_flags_d[block_start + 2*(blockDim.x-1)+1]; if (tid_block == 0) prefix_sum_block_d[blockIdx.x] = data_sh[2*(blockDim.x-1)+1] + last_flag_block; } __global__ void Prefix_Sum_Blocks_Kernel( int n_partial, int *prefix_sum_block_d ){ int tid_block, val, start_index, n_threads; tid_block = threadIdx.x; n_threads = blockDim.x; __shared__ int data_sh[TPB_PARTICLES]; int sum = 0; int n = 0; start_index = n * n_threads; while( start_index < n_partial ){ data_sh[tid_block] = start_index+tid_block < n_partial ? prefix_sum_block_d[start_index+tid_block] : 0; __syncthreads(); if (tid_block == 0){ for ( int i=0; i<n_threads; i++ ){ val = data_sh[i]; data_sh[i] = sum; sum += val; } } __syncthreads(); if (start_index + tid_block < n_partial) prefix_sum_block_d[start_index+tid_block] = data_sh[tid_block]; n += 1; start_index = n * n_threads; } } __global__ void Sum_Blocks_Kernel( part_int_t n_total, int *prefix_sum_d, int *prefix_sum_block_d ){ int tid, tid_block, block_id, data_id; tid = threadIdx.x + blockIdx.x * blockDim.x; tid_block = threadIdx.x; block_id = blockIdx.x; data_id = block_id/2; __shared__ int block_sum_sh[1]; if ( tid_block == 0 ){ block_sum_sh[0] = prefix_sum_block_d[data_id]; // printf( "%d %d\n", block_id/2, prefix_sum_block[data_id] ); } __syncthreads(); if (tid < n_total) prefix_sum_d[tid] += block_sum_sh[0]; } __global__ void Get_N_Transfer_Particles_Kernel( part_int_t n_total, int *n_transfer_d, bool *transfer_flags_d, int *prefix_sum_d ){ n_transfer_d[0] = prefix_sum_d[n_total-1] + (int)transfer_flags_d[n_total-1]; // if ( n_transfer_d[0] > 0 ) printf( "##Thread transfer: %d\n", n_transfer_d[0]); } __global__ void Get_Transfer_Indices_Kernel( part_int_t n_total, bool *transfer_flags_d, int *prefix_sum_d, int *transfer_indices_d ){ int tid, transfer_index; tid = threadIdx.x + blockIdx.x * blockDim.x; if ( tid >= n_total ) return; transfer_index = prefix_sum_d[tid]; if ( transfer_flags_d[tid] ) transfer_indices_d[transfer_index] = tid; } __global__ void Select_Indices_to_Replace_Tranfered_Kernel( part_int_t n_total, int n_transfer, bool *transfer_flags_d, int *prefix_sum_d, int *replace_indices_d ){ int tid, tid_inv; tid = threadIdx.x + blockIdx.x * blockDim.x; if ( tid >= n_total ) return; tid_inv = n_total - tid - 1; bool transfer_flag = transfer_flags_d[tid]; if ( transfer_flag ) return; int prefix_sum_inv, replace_id; prefix_sum_inv = n_transfer - prefix_sum_d[tid]; replace_id = tid_inv - prefix_sum_inv; replace_indices_d[replace_id] = tid; } __global__ void Replace_Transfered_Particles_Kernel( int n_transfer, Real *field_d, int *transfer_indices_d, int *replace_indices_d, bool print_replace ){ int tid; tid = threadIdx.x + blockIdx.x * blockDim.x; if ( tid >= n_transfer ) return; int dst_id, src_id; dst_id = transfer_indices_d[tid]; src_id = replace_indices_d[tid]; if ( dst_id < src_id ){ if (print_replace) printf("Replacing: %f \n", field_d[dst_id] ); field_d[dst_id] = field_d[src_id]; } } void Replace_Transfered_Particles_GPU_function( int n_transfer, Real *field_d, int *transfer_indices_d, int *replace_indices_d, bool print_replace ){ int grid_size; grid_size = (n_transfer - 1) / TPB_PARTICLES + 1; // number of blocks per 1D grid dim3 dim1dGrid(grid_size, 1, 1); // number of threads per 1D block dim3 dim1dBlock(TPB_PARTICLES, 1, 1); hipLaunchKernelGGL( Replace_Transfered_Particles_Kernel, dim1dGrid, dim1dBlock, 0, 0, n_transfer, field_d, transfer_indices_d, replace_indices_d, print_replace ); CudaCheckError(); } part_int_t Select_Particles_to_Transfer_GPU_function( part_int_t n_local, int side, Real domainMin, Real domainMax, Real *pos_d, int *n_transfer_d, int *n_transfer_h, bool *transfer_flags_d, int *transfer_indices_d, int *replace_indices_d, int *transfer_prefix_sum_d, int *transfer_prefix_sum_blocks_d ){ // set values for GPU kernels int grid_size, grid_size_half; grid_size = (n_local - 1) / TPB_PARTICLES + 1; grid_size_half = ( (n_local-1)/2 ) / TPB_PARTICLES + 1; // number of blocks per 1D grid dim3 dim1dGrid(grid_size, 1, 1); dim3 dim1dGrid_half(grid_size_half, 1, 1); // number of threads per 1D block dim3 dim1dBlock(TPB_PARTICLES, 1, 1); // Initialize the number of tranfer particles n_transfer_h[0] = 0; hipLaunchKernelGGL( Get_Transfer_Flags_Kernel, dim1dGrid, dim1dBlock, 0, 0, n_local, side, domainMin, domainMax, pos_d, transfer_flags_d); CudaCheckError(); hipLaunchKernelGGL( Scan_Kernel, dim1dGrid_half, dim1dBlock, 0, 0, n_local, transfer_flags_d, transfer_prefix_sum_d, transfer_prefix_sum_blocks_d ); CudaCheckError(); hipLaunchKernelGGL( Prefix_Sum_Blocks_Kernel, 1, dim1dBlock , 0, 0, grid_size_half, transfer_prefix_sum_blocks_d ); CudaCheckError(); hipLaunchKernelGGL( Sum_Blocks_Kernel, dim1dGrid, dim1dBlock, 0, 0, n_local, transfer_prefix_sum_d, transfer_prefix_sum_blocks_d ); CudaCheckError(); hipLaunchKernelGGL( Get_N_Transfer_Particles_Kernel, 1, 1, 0, 0, n_local, n_transfer_d, transfer_flags_d, transfer_prefix_sum_d ); CudaCheckError(); CudaSafeCall( hipMemcpy( n_transfer_h, n_transfer_d, sizeof(int), hipMemcpyDeviceToHost) ); CudaCheckError(); hipLaunchKernelGGL( Get_Transfer_Indices_Kernel, dim1dGrid, dim1dBlock, 0, 0, n_local , transfer_flags_d, transfer_prefix_sum_d, transfer_indices_d ); CudaCheckError(); hipLaunchKernelGGL( Select_Indices_to_Replace_Tranfered_Kernel, dim1dGrid, dim1dBlock , 0, 0, n_local, n_transfer_h[0], transfer_flags_d, transfer_prefix_sum_d, replace_indices_d ); CudaCheckError(); // if ( n_transfer_h[0] > 0 )printf( "N transfer: %d\n", n_transfer_h[0]); return n_transfer_h[0]; } __global__ void Load_Transfered_Particles_to_Buffer_Kernel( int n_transfer, int field_id, int n_fields_to_transfer, Real *field_d, int *transfer_indices_d, Real *send_buffer_d, Real domainMin, Real domainMax, int boundary_type ){ int tid; tid = threadIdx.x + blockIdx.x * blockDim.x; if ( tid >= n_transfer ) return; int src_id, dst_id; Real field_val; src_id = transfer_indices_d[tid]; dst_id = tid * n_fields_to_transfer + field_id; field_val = field_d[src_id]; // Set global periodic boundary conditions if ( boundary_type == 1 && field_val < domainMin ) field_val += ( domainMax - domainMin ); if ( boundary_type == 1 && field_val >= domainMax ) field_val -= ( domainMax - domainMin ); send_buffer_d[dst_id] = field_val; } void Load_Particles_to_Transfer_GPU_function( int n_transfer, int field_id, int n_fields_to_transfer, Real *field_d, int *transfer_indices_d, Real *send_buffer_d, Real domainMin, Real domainMax, int boundary_type ){ // set values for GPU kernels int grid_size; grid_size = (n_transfer - 1) / TPB_PARTICLES + 1; // number of blocks per 1D grid dim3 dim1dGrid(grid_size, 1, 1); // number of threads per 1D block dim3 dim1dBlock(TPB_PARTICLES, 1, 1); hipLaunchKernelGGL( Load_Transfered_Particles_to_Buffer_Kernel, dim1dGrid, dim1dBlock , 0, 0, n_transfer, field_id, n_fields_to_transfer, field_d, transfer_indices_d, send_buffer_d, domainMin, domainMax, boundary_type ); CudaCheckError(); } void Copy_Particles_GPU_Buffer_to_Host_Buffer( int n_transfer, Real *buffer_h, Real *buffer_d ){ int transfer_size; transfer_size = n_transfer * N_DATA_PER_PARTICLE_TRANSFER; CudaSafeCall( hipMemcpy( buffer_h, buffer_d, transfer_size*sizeof(Real), hipMemcpyDeviceToHost) ); CudaCheckError(); } void Copy_Particles_Host_Buffer_to_GPU_Buffer( int n_transfer, Real *buffer_h, Real *buffer_d ){ int transfer_size; transfer_size = n_transfer * N_DATA_PER_PARTICLE_TRANSFER; CudaSafeCall( hipMemcpy( buffer_d, buffer_h, transfer_size*sizeof(Real), hipMemcpyHostToDevice) ); CudaCheckError(); } __global__ void Unload_Transfered_Particles_from_Buffer_Kernel( int n_local, int n_transfer, int field_id, int n_fields_to_transfer, Real *field_d, Real *recv_buffer_d ){ int tid; tid = threadIdx.x + blockIdx.x * blockDim.x; if ( tid >= n_transfer ) return; int src_id, dst_id; src_id = tid * n_fields_to_transfer + field_id; dst_id = n_local + tid; field_d[dst_id] = recv_buffer_d[src_id]; } void Unload_Particles_to_Transfer_GPU_function( int n_local, int n_transfer, int field_id, int n_fields_to_transfer, Real *field_d, Real *recv_buffer_d ){ // set values for GPU kernels int grid_size; grid_size = (n_transfer - 1) / TPB_PARTICLES + 1; // number of blocks per 1D grid dim3 dim1dGrid(grid_size, 1, 1); // number of threads per 1D block dim3 dim1dBlock(TPB_PARTICLES, 1, 1); hipLaunchKernelGGL( Unload_Transfered_Particles_from_Buffer_Kernel, dim1dGrid, dim1dBlock , 0, 0, n_local, n_transfer, field_id, n_fields_to_transfer, field_d, recv_buffer_d ); CudaCheckError(); } // #endif//MPI_CHOLLA #endif //PARTICLES
0ad4bf7a9155f6e517d43cd8d3102df50d984948.cu
#if defined(PARTICLES) && defined(PARTICLES_GPU) #include <unistd.h> #include<stdio.h> #include<stdlib.h> #include<math.h> #include"gpu.hpp" #include <iostream> #include"../io.h" #include"../global.h" #include"../global_cuda.h" #include "particles_3D.h" #include "../grid3D.h" #include "particles_boundaries_gpu.h" #define SCAN_SHARED_SIZE 2*TPB_PARTICLES __global__ void Set_Particles_Boundary_Kernel( int side, part_int_t n_local, Real *pos_dev, Real d_min, Real d_max, Real d_length ){ part_int_t tid = blockIdx.x * blockDim.x + threadIdx.x ; if ( tid >= n_local) return; Real pos; pos = pos_dev[tid]; if ( side == 0 ){ if ( pos < d_min ) pos += d_length; } if ( side == 1 ){ if ( pos >= d_max ) pos -= d_length; } pos_dev[tid] = pos; } void Grid3D::Set_Particles_Boundary_GPU( int dir, int side ){ Real d_min, d_max, L; Real *pos_dev; if ( dir == 0 ){ d_min = Particles.G.zMin; d_max = Particles.G.zMax; pos_dev = Particles.pos_x_dev; } if ( dir == 1 ){ d_min = Particles.G.yMin; d_max = Particles.G.yMax; pos_dev = Particles.pos_y_dev; } if ( dir == 2 ){ d_min = Particles.G.zMin; d_max = Particles.G.zMax; pos_dev = Particles.pos_z_dev; } L = d_max - d_min; // set values for GPU kernels int grid_size = (Particles.n_local - 1) / TPB_PARTICLES + 1; // number of blocks per 1D grid dim3 dim1dGrid(grid_size, 1, 1); // number of threads per 1D block dim3 dim1dBlock(TPB_PARTICLES, 1, 1); hipLaunchKernelGGL(Set_Particles_Boundary_Kernel, dim1dGrid, dim1dBlock, 0, 0, side, Particles.n_local, pos_dev, d_min, d_max, L ); CudaCheckError(); } // #ifdef MPI_CHOLLA __global__ void Get_Transfer_Flags_Kernel( part_int_t n_total, int side, Real d_min, Real d_max, Real *pos_d, bool *transfer_flags_d ){ int tid = threadIdx.x + blockIdx.x * blockDim.x; if ( tid >= n_total ) return; bool transfer = 0; Real pos = pos_d[tid]; // if ( tid < 1 ) printf( "%f\n", pos); if ( side == 0 ){ if ( pos < d_min ) transfer = 1; } if ( side == 1 ){ if ( pos >= d_max ) transfer = 1; } // if ( transfer ) printf( "##Thread particles transfer\n"); transfer_flags_d[tid] = transfer; } __global__ void Scan_Kernel( part_int_t n_total, bool *transfer_flags_d, int *prefix_sum_d, int *prefix_sum_block_d ){ __shared__ int data_sh[SCAN_SHARED_SIZE]; int tid_block, block_start; // tid = threadIdx.x + blockIdx.x * blockDim.x; tid_block = threadIdx.x; block_start = 2*blockIdx.x*blockDim.x; data_sh[2*tid_block] = block_start + 2*tid_block < n_total ? (int) transfer_flags_d[block_start + 2*tid_block] : 0; data_sh[2*tid_block+1] = block_start + 2*tid_block+1 < n_total ? (int) transfer_flags_d[block_start + 2*tid_block+1] : 0; __syncthreads(); int offset = 1; int n = blockDim.x*2; int ai, bi; int t; for (int d = n/2; d>0; d/=2){ __syncthreads(); if ( tid_block < d ){ ai = offset*(2*tid_block+1)-1; bi = offset*(2*tid_block+2)-1; data_sh[bi] += data_sh[ai]; } offset *= 2; } // Clear the last element if (tid_block == 0) data_sh[n - 1] = 0; // Traverse down tree & build scan for (int d = 1; d < n; d *= 2){ __syncthreads(); offset /=2; if (tid_block < d){ ai = offset*(2*tid_block+1)-1; bi = offset*(2*tid_block+2)-1; t = data_sh[ai]; data_sh[ai] = data_sh[bi]; data_sh[bi] += t; } } __syncthreads(); // Write results to device memory if ( block_start + 2*tid_block < n_total ) prefix_sum_d[block_start + 2*tid_block] = data_sh[2*tid_block]; if ( block_start + 2*tid_block+1 < n_total) prefix_sum_d[block_start + 2*tid_block+1] = data_sh[2*tid_block+1]; // Write the block sum int last_flag_block = (int) transfer_flags_d[block_start + 2*(blockDim.x-1)+1]; if (tid_block == 0) prefix_sum_block_d[blockIdx.x] = data_sh[2*(blockDim.x-1)+1] + last_flag_block; } __global__ void Prefix_Sum_Blocks_Kernel( int n_partial, int *prefix_sum_block_d ){ int tid_block, val, start_index, n_threads; tid_block = threadIdx.x; n_threads = blockDim.x; __shared__ int data_sh[TPB_PARTICLES]; int sum = 0; int n = 0; start_index = n * n_threads; while( start_index < n_partial ){ data_sh[tid_block] = start_index+tid_block < n_partial ? prefix_sum_block_d[start_index+tid_block] : 0; __syncthreads(); if (tid_block == 0){ for ( int i=0; i<n_threads; i++ ){ val = data_sh[i]; data_sh[i] = sum; sum += val; } } __syncthreads(); if (start_index + tid_block < n_partial) prefix_sum_block_d[start_index+tid_block] = data_sh[tid_block]; n += 1; start_index = n * n_threads; } } __global__ void Sum_Blocks_Kernel( part_int_t n_total, int *prefix_sum_d, int *prefix_sum_block_d ){ int tid, tid_block, block_id, data_id; tid = threadIdx.x + blockIdx.x * blockDim.x; tid_block = threadIdx.x; block_id = blockIdx.x; data_id = block_id/2; __shared__ int block_sum_sh[1]; if ( tid_block == 0 ){ block_sum_sh[0] = prefix_sum_block_d[data_id]; // printf( "%d %d\n", block_id/2, prefix_sum_block[data_id] ); } __syncthreads(); if (tid < n_total) prefix_sum_d[tid] += block_sum_sh[0]; } __global__ void Get_N_Transfer_Particles_Kernel( part_int_t n_total, int *n_transfer_d, bool *transfer_flags_d, int *prefix_sum_d ){ n_transfer_d[0] = prefix_sum_d[n_total-1] + (int)transfer_flags_d[n_total-1]; // if ( n_transfer_d[0] > 0 ) printf( "##Thread transfer: %d\n", n_transfer_d[0]); } __global__ void Get_Transfer_Indices_Kernel( part_int_t n_total, bool *transfer_flags_d, int *prefix_sum_d, int *transfer_indices_d ){ int tid, transfer_index; tid = threadIdx.x + blockIdx.x * blockDim.x; if ( tid >= n_total ) return; transfer_index = prefix_sum_d[tid]; if ( transfer_flags_d[tid] ) transfer_indices_d[transfer_index] = tid; } __global__ void Select_Indices_to_Replace_Tranfered_Kernel( part_int_t n_total, int n_transfer, bool *transfer_flags_d, int *prefix_sum_d, int *replace_indices_d ){ int tid, tid_inv; tid = threadIdx.x + blockIdx.x * blockDim.x; if ( tid >= n_total ) return; tid_inv = n_total - tid - 1; bool transfer_flag = transfer_flags_d[tid]; if ( transfer_flag ) return; int prefix_sum_inv, replace_id; prefix_sum_inv = n_transfer - prefix_sum_d[tid]; replace_id = tid_inv - prefix_sum_inv; replace_indices_d[replace_id] = tid; } __global__ void Replace_Transfered_Particles_Kernel( int n_transfer, Real *field_d, int *transfer_indices_d, int *replace_indices_d, bool print_replace ){ int tid; tid = threadIdx.x + blockIdx.x * blockDim.x; if ( tid >= n_transfer ) return; int dst_id, src_id; dst_id = transfer_indices_d[tid]; src_id = replace_indices_d[tid]; if ( dst_id < src_id ){ if (print_replace) printf("Replacing: %f \n", field_d[dst_id] ); field_d[dst_id] = field_d[src_id]; } } void Replace_Transfered_Particles_GPU_function( int n_transfer, Real *field_d, int *transfer_indices_d, int *replace_indices_d, bool print_replace ){ int grid_size; grid_size = (n_transfer - 1) / TPB_PARTICLES + 1; // number of blocks per 1D grid dim3 dim1dGrid(grid_size, 1, 1); // number of threads per 1D block dim3 dim1dBlock(TPB_PARTICLES, 1, 1); hipLaunchKernelGGL( Replace_Transfered_Particles_Kernel, dim1dGrid, dim1dBlock, 0, 0, n_transfer, field_d, transfer_indices_d, replace_indices_d, print_replace ); CudaCheckError(); } part_int_t Select_Particles_to_Transfer_GPU_function( part_int_t n_local, int side, Real domainMin, Real domainMax, Real *pos_d, int *n_transfer_d, int *n_transfer_h, bool *transfer_flags_d, int *transfer_indices_d, int *replace_indices_d, int *transfer_prefix_sum_d, int *transfer_prefix_sum_blocks_d ){ // set values for GPU kernels int grid_size, grid_size_half; grid_size = (n_local - 1) / TPB_PARTICLES + 1; grid_size_half = ( (n_local-1)/2 ) / TPB_PARTICLES + 1; // number of blocks per 1D grid dim3 dim1dGrid(grid_size, 1, 1); dim3 dim1dGrid_half(grid_size_half, 1, 1); // number of threads per 1D block dim3 dim1dBlock(TPB_PARTICLES, 1, 1); // Initialize the number of tranfer particles n_transfer_h[0] = 0; hipLaunchKernelGGL( Get_Transfer_Flags_Kernel, dim1dGrid, dim1dBlock, 0, 0, n_local, side, domainMin, domainMax, pos_d, transfer_flags_d); CudaCheckError(); hipLaunchKernelGGL( Scan_Kernel, dim1dGrid_half, dim1dBlock, 0, 0, n_local, transfer_flags_d, transfer_prefix_sum_d, transfer_prefix_sum_blocks_d ); CudaCheckError(); hipLaunchKernelGGL( Prefix_Sum_Blocks_Kernel, 1, dim1dBlock , 0, 0, grid_size_half, transfer_prefix_sum_blocks_d ); CudaCheckError(); hipLaunchKernelGGL( Sum_Blocks_Kernel, dim1dGrid, dim1dBlock, 0, 0, n_local, transfer_prefix_sum_d, transfer_prefix_sum_blocks_d ); CudaCheckError(); hipLaunchKernelGGL( Get_N_Transfer_Particles_Kernel, 1, 1, 0, 0, n_local, n_transfer_d, transfer_flags_d, transfer_prefix_sum_d ); CudaCheckError(); CudaSafeCall( cudaMemcpy( n_transfer_h, n_transfer_d, sizeof(int), cudaMemcpyDeviceToHost) ); CudaCheckError(); hipLaunchKernelGGL( Get_Transfer_Indices_Kernel, dim1dGrid, dim1dBlock, 0, 0, n_local , transfer_flags_d, transfer_prefix_sum_d, transfer_indices_d ); CudaCheckError(); hipLaunchKernelGGL( Select_Indices_to_Replace_Tranfered_Kernel, dim1dGrid, dim1dBlock , 0, 0, n_local, n_transfer_h[0], transfer_flags_d, transfer_prefix_sum_d, replace_indices_d ); CudaCheckError(); // if ( n_transfer_h[0] > 0 )printf( "N transfer: %d\n", n_transfer_h[0]); return n_transfer_h[0]; } __global__ void Load_Transfered_Particles_to_Buffer_Kernel( int n_transfer, int field_id, int n_fields_to_transfer, Real *field_d, int *transfer_indices_d, Real *send_buffer_d, Real domainMin, Real domainMax, int boundary_type ){ int tid; tid = threadIdx.x + blockIdx.x * blockDim.x; if ( tid >= n_transfer ) return; int src_id, dst_id; Real field_val; src_id = transfer_indices_d[tid]; dst_id = tid * n_fields_to_transfer + field_id; field_val = field_d[src_id]; // Set global periodic boundary conditions if ( boundary_type == 1 && field_val < domainMin ) field_val += ( domainMax - domainMin ); if ( boundary_type == 1 && field_val >= domainMax ) field_val -= ( domainMax - domainMin ); send_buffer_d[dst_id] = field_val; } void Load_Particles_to_Transfer_GPU_function( int n_transfer, int field_id, int n_fields_to_transfer, Real *field_d, int *transfer_indices_d, Real *send_buffer_d, Real domainMin, Real domainMax, int boundary_type ){ // set values for GPU kernels int grid_size; grid_size = (n_transfer - 1) / TPB_PARTICLES + 1; // number of blocks per 1D grid dim3 dim1dGrid(grid_size, 1, 1); // number of threads per 1D block dim3 dim1dBlock(TPB_PARTICLES, 1, 1); hipLaunchKernelGGL( Load_Transfered_Particles_to_Buffer_Kernel, dim1dGrid, dim1dBlock , 0, 0, n_transfer, field_id, n_fields_to_transfer, field_d, transfer_indices_d, send_buffer_d, domainMin, domainMax, boundary_type ); CudaCheckError(); } void Copy_Particles_GPU_Buffer_to_Host_Buffer( int n_transfer, Real *buffer_h, Real *buffer_d ){ int transfer_size; transfer_size = n_transfer * N_DATA_PER_PARTICLE_TRANSFER; CudaSafeCall( cudaMemcpy( buffer_h, buffer_d, transfer_size*sizeof(Real), cudaMemcpyDeviceToHost) ); CudaCheckError(); } void Copy_Particles_Host_Buffer_to_GPU_Buffer( int n_transfer, Real *buffer_h, Real *buffer_d ){ int transfer_size; transfer_size = n_transfer * N_DATA_PER_PARTICLE_TRANSFER; CudaSafeCall( cudaMemcpy( buffer_d, buffer_h, transfer_size*sizeof(Real), cudaMemcpyHostToDevice) ); CudaCheckError(); } __global__ void Unload_Transfered_Particles_from_Buffer_Kernel( int n_local, int n_transfer, int field_id, int n_fields_to_transfer, Real *field_d, Real *recv_buffer_d ){ int tid; tid = threadIdx.x + blockIdx.x * blockDim.x; if ( tid >= n_transfer ) return; int src_id, dst_id; src_id = tid * n_fields_to_transfer + field_id; dst_id = n_local + tid; field_d[dst_id] = recv_buffer_d[src_id]; } void Unload_Particles_to_Transfer_GPU_function( int n_local, int n_transfer, int field_id, int n_fields_to_transfer, Real *field_d, Real *recv_buffer_d ){ // set values for GPU kernels int grid_size; grid_size = (n_transfer - 1) / TPB_PARTICLES + 1; // number of blocks per 1D grid dim3 dim1dGrid(grid_size, 1, 1); // number of threads per 1D block dim3 dim1dBlock(TPB_PARTICLES, 1, 1); hipLaunchKernelGGL( Unload_Transfered_Particles_from_Buffer_Kernel, dim1dGrid, dim1dBlock , 0, 0, n_local, n_transfer, field_id, n_fields_to_transfer, field_d, recv_buffer_d ); CudaCheckError(); } // #endif//MPI_CHOLLA #endif //PARTICLES
0b80719988de0b701c0a591cbdcb114fb75ffe40.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" extern "C" __global__ void invert(uchar4* data, int w, int h) { int x = threadIdx.x+blockIdx.x*blockDim.x; int y = threadIdx.y+blockIdx.y*blockDim.y; if (x < w && y < h) { int index = y*w+x; uchar4 pixel = data[index]; pixel.x = 255 - pixel.x; pixel.y = 255 - pixel.y; pixel.z = 255 - pixel.z; pixel.w = 255 - pixel.w; data[index] = pixel; } }
0b80719988de0b701c0a591cbdcb114fb75ffe40.cu
#include "includes.h" extern "C" __global__ void invert(uchar4* data, int w, int h) { int x = threadIdx.x+blockIdx.x*blockDim.x; int y = threadIdx.y+blockIdx.y*blockDim.y; if (x < w && y < h) { int index = y*w+x; uchar4 pixel = data[index]; pixel.x = 255 - pixel.x; pixel.y = 255 - pixel.y; pixel.z = 255 - pixel.z; pixel.w = 255 - pixel.w; data[index] = pixel; } }
b0479d1f432dd2713b2a2d55c97b5d087708c05e.hip
// !!! This is a file automatically generated by hipify!!! /* Copyright 2020 The OneFlow Authors. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include <hip/hip_runtime.h> #include "oneflow/core/device/cuda_util.h" #include "oneflow/core/framework/user_op_tensor.h" #include "oneflow/user/kernels/to_contiguous_kernel.h" #if TORCH_HIP_VERSION >= 11000 #include "oneflow/user/kernels/fft_kernel_util.h" #include "cufft_plan_cache.h" namespace oneflow { namespace { template<typename FFTTYPE> __global__ void fft_apply_normalization(FFTTYPE* dst, const double normalization_scale, size_t n, bool IsNormalized) { if (!IsNormalized) { return; } CUDA_1D_KERNEL_LOOP(i, n) { dst[i].x *= normalization_scale; dst[i].y *= normalization_scale; }; } struct FillConjSymmetricParams { int64_t last_dim; int64_t elem_count; int64_t ndim; oneflow::NdIndexStrideOffsetHelper<int64_t, SHAPE_MAX_AXIS_SIZE> helper; int64_t last_dim_size; int64_t last_dim_half; FillConjSymmetricParams() = default; FillConjSymmetricParams(const Shape& shape, const Stride& strides, int64_t last_dim_, int64_t elemcnt) : last_dim(last_dim_), elem_count(elemcnt), ndim(strides.size()), helper(strides.data(), ndim) { CHECK_OR_THROW(strides.size() == shape.size()); last_dim_size = shape[last_dim]; last_dim_half = last_dim_size / 2; } }; } // namespace template<typename T> __global__ void _conj_symmetry_cuda(T* data_out, FillConjSymmetricParams param) { CUDA_1D_KERNEL_LOOP_T(int64_t, offset, param.elem_count) { int64_t ndim = param.ndim; int64_t indices[SHAPE_MAX_AXIS_SIZE]; param.helper.OffsetToNdIndex(offset, indices, ndim); if (indices[param.last_dim] <= param.last_dim_half) { continue; } int64_t cur_last_dim_index = indices[param.last_dim]; // get symmetric indices[param.last_dim] = param.last_dim_size - cur_last_dim_index; int64_t symmetric_offset = param.helper.NdIndexToOffset(indices, ndim); // conj data_out[offset] = T{data_out[symmetric_offset].x, -data_out[symmetric_offset].y}; } } template<typename T> struct FillConjSymmetryUtil<DeviceType::kCUDA, T> { static void FillConjSymmetryForward(ep::Stream* stream, T* data_out, const Shape& shape, const Stride& strides, const int64_t last_dim, int64_t elem_count) { FillConjSymmetricParams param(shape, strides, last_dim, elem_count); hipLaunchKernelGGL(( _conj_symmetry_cuda<T>), dim3(BlocksNum4ThreadsNum(elem_count)), dim3(kCudaThreadsNumPerBlock), 0, stream->As<ep::CudaStream>()->cuda_stream(), data_out, param); } }; template<typename IN, typename OUT> __global__ void _convert_to_double_sized(const IN* in, OUT* dst, size_t len, size_t n) { size_t fact_len = 2 * len - 2; CUDA_1D_KERNEL_LOOP(i, n) { int index_x = i / fact_len; int index_y = i % fact_len; if (index_y == 0) { dst[i] = in[index_x * len]; } else if (index_y == len - 1) { dst[i] = in[(index_x + 1) * len - 1]; } else if (index_y < len - 1 && index_y > 0) { dst[i] = in[index_x * len + index_y]; } else { auto index = (index_x + 2) * len - index_y - 2; dst[i].x = in[index].x; dst[i].y = -in[index].y; } } } template<typename IN, typename OUT> __global__ void _convert_complex_to_real(const IN* in, OUT* out, size_t n) { CUDA_1D_KERNEL_LOOP(i, n) { out[2 * i] = in[i].x; out[2 * i + 1] = in[i].y; }; } template<typename real_type, typename complex_type> struct ComplexConvertUtil<DeviceType::kCUDA, real_type, complex_type> { static void ConvertToDoubleSized(ep::Stream* stream, const complex_type* in, complex_type* dst, size_t len, size_t n) { hipLaunchKernelGGL(( _convert_to_double_sized), dim3(BlocksNum4ThreadsNum(n)), dim3(kCudaThreadsNumPerBlock), 0, stream->As<ep::CudaStream>()->cuda_stream(), in, dst, len, n); } static void ConvertComplexToReal(ep::Stream* stream, const complex_type* in, real_type* out, size_t n) { hipLaunchKernelGGL(( _convert_complex_to_real), dim3(BlocksNum4ThreadsNum(n)), dim3(kCudaThreadsNumPerBlock), 0, stream->As<ep::CudaStream>()->cuda_stream(), in, out, n); } }; template<typename dtype_in, typename dtype_out> class StftGpuKernel final : public user_op::OpKernel { public: StftGpuKernel() = default; ~StftGpuKernel() = default; private: using user_op::OpKernel::Compute; void Compute(user_op::KernelComputeContext* ctx) const override { const user_op::Tensor* input = ctx->Tensor4ArgNameAndIndex("input", 0); user_op::Tensor* output = ctx->Tensor4ArgNameAndIndex("output", 0); user_op::Tensor* tmp_buffer = ctx->Tensor4ArgNameAndIndex("tmp_buffer", 0); const bool normalized = ctx->Attr<bool>("normalized"); const bool onesided = ctx->Attr<bool>("onesided"); const bool return_complex = ctx->Attr<bool>("return_complex"); const ShapeView& input_shape = input->shape_view(); const ShapeView& output_shape = output->shape_view(); const Stride& input_stride = input->stride(); const int out_elem_cnt = return_complex ? output->shape_view().elem_cnt() : output->shape_view().elem_cnt() / 2; const dtype_in* data_in = input->dptr<dtype_in>(); dtype_in* data_out = output->mut_dptr<dtype_in>(); dtype_out* out_tmp_buffer = reinterpret_cast<dtype_out*>(tmp_buffer->mut_dptr<char>()); int64_t ndim = 1; int64_t batch = static_cast<int32_t>(input_shape.At(1)); int64_t fft_size = static_cast<int32_t>(input_shape.At(2)); int64_t rank[1] = {fft_size}; const Stride& in_stride = {input_stride.at(1), input_stride.at(2)}; const Shape& in_shape = {batch, fft_size}; const Shape& out_shape = {batch, fft_size / 2 + 1}; Stride out_stride = Stride(out_shape); CuFFTParams params(in_shape, out_shape, in_stride, out_stride, ndim, CUFFT_EXCUTETYPE::R2C, input->data_type()); CuFFTConfig config(params); auto& plan = config.plan(); OF_CUFFT_CHECK(hipfftSetStream(plan, ctx->stream()->As<ep::CudaStream>()->cuda_stream())); void* workspace{}; OF_CUDA_CHECK(hipMalloc(&workspace, config.workspace_size())); OF_CUFFT_CHECK(hipfftSetWorkArea(plan, workspace)); int64_t in_offset = input_stride.at(0); int64_t out_offset = std::accumulate(out_shape.begin(), out_shape.end(), 0, std::multiplies<int64_t>()); int64_t signal_groups_count = static_cast<int64_t>(input_shape.At(0)); for (int64_t i = 0; i < signal_groups_count; i++) { config.excute((void*)(data_in + i * in_offset), (void*)(out_tmp_buffer + i * out_offset), /*forward=*/true); } OF_CUDA_CHECK(hipFree(workspace)); if (!onesided) { size_t last_dim_length = fft_size / 2 + 1; dtype_out* doublesided_tmp_buffer = reinterpret_cast<dtype_out*>(tmp_buffer->mut_dptr<char>()) + out_elem_cnt; ComplexConvertUtil<DeviceType::kCUDA, dtype_in, dtype_out>::ConvertToDoubleSized( ctx->stream(), out_tmp_buffer, doublesided_tmp_buffer, last_dim_length, out_elem_cnt); out_tmp_buffer = doublesided_tmp_buffer; } const double normalization_scale = _fft_normalization_scale<double>(input_shape.back(), normalized); hipLaunchKernelGGL(( fft_apply_normalization), dim3(BlocksNum4ThreadsNum(out_elem_cnt)), dim3(kCudaThreadsNumPerBlock), 0, ctx->stream()->As<ep::CudaStream>()->cuda_stream(), out_tmp_buffer, normalization_scale, out_elem_cnt, normalized); if (!return_complex) { ComplexConvertUtil<DeviceType::kCUDA, dtype_in, dtype_out>::ConvertComplexToReal( ctx->stream(), out_tmp_buffer, data_out, out_elem_cnt); } else { // TODO(yzm):support return_complex after oneflow supports complex numbers } } bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; } }; #define REGISTER_STFT_GPU_KERNEL(intype, outtype) \ REGISTER_USER_KERNEL("stft") \ .SetCreateFn<StftGpuKernel<intype, outtype>>() \ .SetIsMatchedHob((user_op::HobDeviceType() == DeviceType::kCUDA) \ && (user_op::HobDataType("input", 0) == GetDataType<intype>::value)) \ .SetInferTmpSizeFn([](user_op::InferContext* ctx) { \ const Shape& output_shape = ctx->InputShape("output", 0); \ const bool return_complex = ctx->Attr<bool>("return_complex"); \ const bool onesided = ctx->Attr<bool>("onesided"); \ int64_t output_elem_cnt = \ return_complex ? output_shape.elem_cnt() : output_shape.elem_cnt() / 2; \ const int64_t output_bytes = GetCudaAlignedSize(output_elem_cnt * sizeof(outtype)); \ return onesided ? output_bytes : 2 * output_bytes; \ }); REGISTER_STFT_GPU_KERNEL(float, hipfftComplex) REGISTER_STFT_GPU_KERNEL(double, hipfftDoubleComplex) template<typename T, typename FCT_TYPE> class FftC2CKernelUtil<DeviceType::kCUDA, T, FCT_TYPE> { static void FftC2CForward(ep::Stream* stream, const T* data_in, T* data_out, const Shape& input_shape, const Shape& output_shape, const Stride& input_stride, const Stride& output_stride, bool forward, const std::vector<int64_t>& dims, FCT_TYPE normalization, DataType real_type) { // NOTE: before calling `FftC2CKernelUtil<DeviceType::kCUDA, T, FCT_TYPE>`, input must be // batched out already CuFFTParams params(input_shape, output_shape, input_stride, output_stride, dims.size(), CUFFT_EXCUTETYPE::C2C, real_type); CuFFTConfig config(params); auto& plan = config.plan(); OF_CUFFT_CHECK(hipfftSetStream(plan, stream->As<ep::CudaStream>()->cuda_stream())); void* workspace{}; OF_CUDA_CHECK(hipMalloc(&workspace, config.workspace_size())); OF_CUFFT_CHECK(hipfftSetWorkArea(plan, workspace)); config.excute((void*)data_in, (void*)data_out, forward); OF_CUDA_CHECK(hipFree(workspace)); } }; template<typename IN, typename OUT> struct FftR2CKernelUtil<DeviceType::kCUDA, IN, OUT> { static void FftR2CForward(ep::Stream* stream, const IN* data_in, OUT* data_out, const Shape& input_shape, const Shape& output_shape, const Stride& input_stride, const Stride& output_stride, bool forward, const std::vector<int64_t>& dims, IN normalization, DataType real_type) { // NOTE: before calling `FftR2CKernelUtil<DeviceType::kCUDA, IN, OUT>`, input must be batched // out already CuFFTParams params(input_shape, output_shape, input_stride, output_stride, dims.size(), CUFFT_EXCUTETYPE::R2C, real_type); CuFFTConfig config(params); auto& plan = config.plan(); OF_CUFFT_CHECK(hipfftSetStream(plan, stream->As<ep::CudaStream>()->cuda_stream())); void* workspace{}; OF_CUDA_CHECK(hipMalloc(&workspace, config.workspace_size())); OF_CUFFT_CHECK(hipfftSetWorkArea(plan, workspace)); config.excute((void*)data_in, (void*)data_out, forward); OF_CUDA_CHECK(hipFree(workspace)); } }; template<typename IN, typename OUT> struct FftC2RKernelUtil<DeviceType::kCUDA, IN, OUT> { static void FftC2RForward(ep::Stream* stream, const IN* data_in, OUT* data_out, const Shape& input_shape, const Shape& output_shape, const Stride& input_stride, const Stride& output_stride, bool forward, int64_t last_dim_size, const std::vector<int64_t>& dims, OUT normalization, DataType real_type) { // NOTE: before calling `FftC2RKernelUtil<DeviceType::kCUDA, IN, OUT>`, input must be batched // out already CuFFTParams params(input_shape, output_shape, input_stride, output_stride, dims.size(), CUFFT_EXCUTETYPE::C2R, real_type); CuFFTConfig config(params); auto& plan = config.plan(); OF_CUFFT_CHECK(hipfftSetStream(plan, stream->As<ep::CudaStream>()->cuda_stream())); void* workspace{}; OF_CUDA_CHECK(hipMalloc(&workspace, config.workspace_size())); OF_CUFFT_CHECK(hipfftSetWorkArea(plan, workspace)); config.excute((void*)data_in, (void*)data_out, forward); OF_CUDA_CHECK(hipFree(workspace)); } }; template struct FillConjSymmetryUtil<DeviceType::kCUDA, hipComplex>; template struct FillConjSymmetryUtil<DeviceType::kCUDA, hipDoubleComplex>; template struct ComplexConvertUtil<DeviceType::kCUDA, float, hipComplex>; template struct ComplexConvertUtil<DeviceType::kCUDA, double, hipDoubleComplex>; template struct FftC2CKernelUtil<DeviceType::kCUDA, hipComplex, /*FCT_TYPE=*/float>; template struct FftC2CKernelUtil<DeviceType::kCUDA, hipDoubleComplex, /*FCT_TYPE=*/double>; template struct FftR2CKernelUtil<DeviceType::kCUDA, float, hipComplex>; template struct FftR2CKernelUtil<DeviceType::kCUDA, double, hipDoubleComplex>; template struct FftC2RKernelUtil<DeviceType::kCUDA, hipComplex, float>; template struct FftC2RKernelUtil<DeviceType::kCUDA, hipDoubleComplex, double>; } // namespace oneflow #endif // TORCH_HIP_VERSION >= 11000
b0479d1f432dd2713b2a2d55c97b5d087708c05e.cu
/* Copyright 2020 The OneFlow Authors. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include <cuda.h> #include "oneflow/core/device/cuda_util.h" #include "oneflow/core/framework/user_op_tensor.h" #include "oneflow/user/kernels/to_contiguous_kernel.h" #if CUDA_VERSION >= 11000 #include "oneflow/user/kernels/fft_kernel_util.h" #include "cufft_plan_cache.h" namespace oneflow { namespace { template<typename FFTTYPE> __global__ void fft_apply_normalization(FFTTYPE* dst, const double normalization_scale, size_t n, bool IsNormalized) { if (!IsNormalized) { return; } CUDA_1D_KERNEL_LOOP(i, n) { dst[i].x *= normalization_scale; dst[i].y *= normalization_scale; }; } struct FillConjSymmetricParams { int64_t last_dim; int64_t elem_count; int64_t ndim; oneflow::NdIndexStrideOffsetHelper<int64_t, SHAPE_MAX_AXIS_SIZE> helper; int64_t last_dim_size; int64_t last_dim_half; FillConjSymmetricParams() = default; FillConjSymmetricParams(const Shape& shape, const Stride& strides, int64_t last_dim_, int64_t elemcnt) : last_dim(last_dim_), elem_count(elemcnt), ndim(strides.size()), helper(strides.data(), ndim) { CHECK_OR_THROW(strides.size() == shape.size()); last_dim_size = shape[last_dim]; last_dim_half = last_dim_size / 2; } }; } // namespace template<typename T> __global__ void _conj_symmetry_cuda(T* data_out, FillConjSymmetricParams param) { CUDA_1D_KERNEL_LOOP_T(int64_t, offset, param.elem_count) { int64_t ndim = param.ndim; int64_t indices[SHAPE_MAX_AXIS_SIZE]; param.helper.OffsetToNdIndex(offset, indices, ndim); if (indices[param.last_dim] <= param.last_dim_half) { continue; } int64_t cur_last_dim_index = indices[param.last_dim]; // get symmetric indices[param.last_dim] = param.last_dim_size - cur_last_dim_index; int64_t symmetric_offset = param.helper.NdIndexToOffset(indices, ndim); // conj data_out[offset] = T{data_out[symmetric_offset].x, -data_out[symmetric_offset].y}; } } template<typename T> struct FillConjSymmetryUtil<DeviceType::kCUDA, T> { static void FillConjSymmetryForward(ep::Stream* stream, T* data_out, const Shape& shape, const Stride& strides, const int64_t last_dim, int64_t elem_count) { FillConjSymmetricParams param(shape, strides, last_dim, elem_count); _conj_symmetry_cuda<T><<<BlocksNum4ThreadsNum(elem_count), kCudaThreadsNumPerBlock, 0, stream->As<ep::CudaStream>()->cuda_stream()>>>(data_out, param); } }; template<typename IN, typename OUT> __global__ void _convert_to_double_sized(const IN* in, OUT* dst, size_t len, size_t n) { size_t fact_len = 2 * len - 2; CUDA_1D_KERNEL_LOOP(i, n) { int index_x = i / fact_len; int index_y = i % fact_len; if (index_y == 0) { dst[i] = in[index_x * len]; } else if (index_y == len - 1) { dst[i] = in[(index_x + 1) * len - 1]; } else if (index_y < len - 1 && index_y > 0) { dst[i] = in[index_x * len + index_y]; } else { auto index = (index_x + 2) * len - index_y - 2; dst[i].x = in[index].x; dst[i].y = -in[index].y; } } } template<typename IN, typename OUT> __global__ void _convert_complex_to_real(const IN* in, OUT* out, size_t n) { CUDA_1D_KERNEL_LOOP(i, n) { out[2 * i] = in[i].x; out[2 * i + 1] = in[i].y; }; } template<typename real_type, typename complex_type> struct ComplexConvertUtil<DeviceType::kCUDA, real_type, complex_type> { static void ConvertToDoubleSized(ep::Stream* stream, const complex_type* in, complex_type* dst, size_t len, size_t n) { _convert_to_double_sized<<<BlocksNum4ThreadsNum(n), kCudaThreadsNumPerBlock, 0, stream->As<ep::CudaStream>()->cuda_stream()>>>(in, dst, len, n); } static void ConvertComplexToReal(ep::Stream* stream, const complex_type* in, real_type* out, size_t n) { _convert_complex_to_real<<<BlocksNum4ThreadsNum(n), kCudaThreadsNumPerBlock, 0, stream->As<ep::CudaStream>()->cuda_stream()>>>(in, out, n); } }; template<typename dtype_in, typename dtype_out> class StftGpuKernel final : public user_op::OpKernel { public: StftGpuKernel() = default; ~StftGpuKernel() = default; private: using user_op::OpKernel::Compute; void Compute(user_op::KernelComputeContext* ctx) const override { const user_op::Tensor* input = ctx->Tensor4ArgNameAndIndex("input", 0); user_op::Tensor* output = ctx->Tensor4ArgNameAndIndex("output", 0); user_op::Tensor* tmp_buffer = ctx->Tensor4ArgNameAndIndex("tmp_buffer", 0); const bool normalized = ctx->Attr<bool>("normalized"); const bool onesided = ctx->Attr<bool>("onesided"); const bool return_complex = ctx->Attr<bool>("return_complex"); const ShapeView& input_shape = input->shape_view(); const ShapeView& output_shape = output->shape_view(); const Stride& input_stride = input->stride(); const int out_elem_cnt = return_complex ? output->shape_view().elem_cnt() : output->shape_view().elem_cnt() / 2; const dtype_in* data_in = input->dptr<dtype_in>(); dtype_in* data_out = output->mut_dptr<dtype_in>(); dtype_out* out_tmp_buffer = reinterpret_cast<dtype_out*>(tmp_buffer->mut_dptr<char>()); int64_t ndim = 1; int64_t batch = static_cast<int32_t>(input_shape.At(1)); int64_t fft_size = static_cast<int32_t>(input_shape.At(2)); int64_t rank[1] = {fft_size}; const Stride& in_stride = {input_stride.at(1), input_stride.at(2)}; const Shape& in_shape = {batch, fft_size}; const Shape& out_shape = {batch, fft_size / 2 + 1}; Stride out_stride = Stride(out_shape); CuFFTParams params(in_shape, out_shape, in_stride, out_stride, ndim, CUFFT_EXCUTETYPE::R2C, input->data_type()); CuFFTConfig config(params); auto& plan = config.plan(); OF_CUFFT_CHECK(cufftSetStream(plan, ctx->stream()->As<ep::CudaStream>()->cuda_stream())); void* workspace{}; OF_CUDA_CHECK(cudaMalloc(&workspace, config.workspace_size())); OF_CUFFT_CHECK(cufftSetWorkArea(plan, workspace)); int64_t in_offset = input_stride.at(0); int64_t out_offset = std::accumulate(out_shape.begin(), out_shape.end(), 0, std::multiplies<int64_t>()); int64_t signal_groups_count = static_cast<int64_t>(input_shape.At(0)); for (int64_t i = 0; i < signal_groups_count; i++) { config.excute((void*)(data_in + i * in_offset), (void*)(out_tmp_buffer + i * out_offset), /*forward=*/true); } OF_CUDA_CHECK(cudaFree(workspace)); if (!onesided) { size_t last_dim_length = fft_size / 2 + 1; dtype_out* doublesided_tmp_buffer = reinterpret_cast<dtype_out*>(tmp_buffer->mut_dptr<char>()) + out_elem_cnt; ComplexConvertUtil<DeviceType::kCUDA, dtype_in, dtype_out>::ConvertToDoubleSized( ctx->stream(), out_tmp_buffer, doublesided_tmp_buffer, last_dim_length, out_elem_cnt); out_tmp_buffer = doublesided_tmp_buffer; } const double normalization_scale = _fft_normalization_scale<double>(input_shape.back(), normalized); fft_apply_normalization<<<BlocksNum4ThreadsNum(out_elem_cnt), kCudaThreadsNumPerBlock, 0, ctx->stream()->As<ep::CudaStream>()->cuda_stream()>>>( out_tmp_buffer, normalization_scale, out_elem_cnt, normalized); if (!return_complex) { ComplexConvertUtil<DeviceType::kCUDA, dtype_in, dtype_out>::ConvertComplexToReal( ctx->stream(), out_tmp_buffer, data_out, out_elem_cnt); } else { // TODO(yzm):support return_complex after oneflow supports complex numbers } } bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; } }; #define REGISTER_STFT_GPU_KERNEL(intype, outtype) \ REGISTER_USER_KERNEL("stft") \ .SetCreateFn<StftGpuKernel<intype, outtype>>() \ .SetIsMatchedHob((user_op::HobDeviceType() == DeviceType::kCUDA) \ && (user_op::HobDataType("input", 0) == GetDataType<intype>::value)) \ .SetInferTmpSizeFn([](user_op::InferContext* ctx) { \ const Shape& output_shape = ctx->InputShape("output", 0); \ const bool return_complex = ctx->Attr<bool>("return_complex"); \ const bool onesided = ctx->Attr<bool>("onesided"); \ int64_t output_elem_cnt = \ return_complex ? output_shape.elem_cnt() : output_shape.elem_cnt() / 2; \ const int64_t output_bytes = GetCudaAlignedSize(output_elem_cnt * sizeof(outtype)); \ return onesided ? output_bytes : 2 * output_bytes; \ }); REGISTER_STFT_GPU_KERNEL(float, cufftComplex) REGISTER_STFT_GPU_KERNEL(double, cufftDoubleComplex) template<typename T, typename FCT_TYPE> class FftC2CKernelUtil<DeviceType::kCUDA, T, FCT_TYPE> { static void FftC2CForward(ep::Stream* stream, const T* data_in, T* data_out, const Shape& input_shape, const Shape& output_shape, const Stride& input_stride, const Stride& output_stride, bool forward, const std::vector<int64_t>& dims, FCT_TYPE normalization, DataType real_type) { // NOTE: before calling `FftC2CKernelUtil<DeviceType::kCUDA, T, FCT_TYPE>`, input must be // batched out already CuFFTParams params(input_shape, output_shape, input_stride, output_stride, dims.size(), CUFFT_EXCUTETYPE::C2C, real_type); CuFFTConfig config(params); auto& plan = config.plan(); OF_CUFFT_CHECK(cufftSetStream(plan, stream->As<ep::CudaStream>()->cuda_stream())); void* workspace{}; OF_CUDA_CHECK(cudaMalloc(&workspace, config.workspace_size())); OF_CUFFT_CHECK(cufftSetWorkArea(plan, workspace)); config.excute((void*)data_in, (void*)data_out, forward); OF_CUDA_CHECK(cudaFree(workspace)); } }; template<typename IN, typename OUT> struct FftR2CKernelUtil<DeviceType::kCUDA, IN, OUT> { static void FftR2CForward(ep::Stream* stream, const IN* data_in, OUT* data_out, const Shape& input_shape, const Shape& output_shape, const Stride& input_stride, const Stride& output_stride, bool forward, const std::vector<int64_t>& dims, IN normalization, DataType real_type) { // NOTE: before calling `FftR2CKernelUtil<DeviceType::kCUDA, IN, OUT>`, input must be batched // out already CuFFTParams params(input_shape, output_shape, input_stride, output_stride, dims.size(), CUFFT_EXCUTETYPE::R2C, real_type); CuFFTConfig config(params); auto& plan = config.plan(); OF_CUFFT_CHECK(cufftSetStream(plan, stream->As<ep::CudaStream>()->cuda_stream())); void* workspace{}; OF_CUDA_CHECK(cudaMalloc(&workspace, config.workspace_size())); OF_CUFFT_CHECK(cufftSetWorkArea(plan, workspace)); config.excute((void*)data_in, (void*)data_out, forward); OF_CUDA_CHECK(cudaFree(workspace)); } }; template<typename IN, typename OUT> struct FftC2RKernelUtil<DeviceType::kCUDA, IN, OUT> { static void FftC2RForward(ep::Stream* stream, const IN* data_in, OUT* data_out, const Shape& input_shape, const Shape& output_shape, const Stride& input_stride, const Stride& output_stride, bool forward, int64_t last_dim_size, const std::vector<int64_t>& dims, OUT normalization, DataType real_type) { // NOTE: before calling `FftC2RKernelUtil<DeviceType::kCUDA, IN, OUT>`, input must be batched // out already CuFFTParams params(input_shape, output_shape, input_stride, output_stride, dims.size(), CUFFT_EXCUTETYPE::C2R, real_type); CuFFTConfig config(params); auto& plan = config.plan(); OF_CUFFT_CHECK(cufftSetStream(plan, stream->As<ep::CudaStream>()->cuda_stream())); void* workspace{}; OF_CUDA_CHECK(cudaMalloc(&workspace, config.workspace_size())); OF_CUFFT_CHECK(cufftSetWorkArea(plan, workspace)); config.excute((void*)data_in, (void*)data_out, forward); OF_CUDA_CHECK(cudaFree(workspace)); } }; template struct FillConjSymmetryUtil<DeviceType::kCUDA, cuComplex>; template struct FillConjSymmetryUtil<DeviceType::kCUDA, cuDoubleComplex>; template struct ComplexConvertUtil<DeviceType::kCUDA, float, cuComplex>; template struct ComplexConvertUtil<DeviceType::kCUDA, double, cuDoubleComplex>; template struct FftC2CKernelUtil<DeviceType::kCUDA, cuComplex, /*FCT_TYPE=*/float>; template struct FftC2CKernelUtil<DeviceType::kCUDA, cuDoubleComplex, /*FCT_TYPE=*/double>; template struct FftR2CKernelUtil<DeviceType::kCUDA, float, cuComplex>; template struct FftR2CKernelUtil<DeviceType::kCUDA, double, cuDoubleComplex>; template struct FftC2RKernelUtil<DeviceType::kCUDA, cuComplex, float>; template struct FftC2RKernelUtil<DeviceType::kCUDA, cuDoubleComplex, double>; } // namespace oneflow #endif // CUDA_VERSION >= 11000
d4d3758f7d293d561492a8616607bd9bdc067a44.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "HugeCTR/include/embeddings/sparse_embedding_functors.hpp" #include "HugeCTR/include/utils.hpp" namespace HugeCTR { namespace { // reorder operation before all2all in backward propagation template <typename TypeEmbeddingComp> __global__ void backward_reorder_kernel(int batch_size_per_gpu, int slot_num, int embedding_vec_size, int gpu_num, const TypeEmbeddingComp *input, TypeEmbeddingComp *output) { // blockDim.x = embedding_vec_size; // each thread corresponding to one element of embedding // vector gridDim.x = batch_size / gpu_num = samples_per_gpu; // each block corresponding to one // sample on each GPU Each thread needs to process slot_num slots int tid = threadIdx.x; int bid = blockIdx.x; int sample_id = bid; // sample_id on the current GPU if ((bid < batch_size_per_gpu) && (tid < embedding_vec_size)) { int src_offset = sample_id * slot_num * embedding_vec_size; int src_stride = embedding_vec_size; for (int slot_id = 0; slot_id < slot_num; slot_id++) { int gpu_id = slot_id % gpu_num; int offset_pre = 0; // offset in previous gpus for (int id = 0; id < gpu_id; id++) { int slot_num_per_gpu = slot_num / gpu_num + ((id < (slot_num % gpu_num)) ? 1 : 0); int stride = batch_size_per_gpu * slot_num_per_gpu; offset_pre += stride; } int slot_num_per_gpu = slot_num / gpu_num + ((gpu_id < (slot_num % gpu_num)) ? 1 : 0); int offset_cur = sample_id * slot_num_per_gpu; // offset in current gpu int dst_addr = (offset_cur + offset_pre + (int)(slot_id / gpu_num)) * embedding_vec_size; int src_addr = src_offset + src_stride * slot_id; output[dst_addr + tid] = input[src_addr + tid]; } } } // reorder operation before all2all in backward propagation __global__ void backward_reorder_align2_kernel(int batch_size_per_gpu, int slot_num, int embedding_vec_size, int gpu_num, const __half *input, __half *output) { // blockDim.x = embedding_vec_size; // each thread corresponding to one element of embedding // vector gridDim.x = batch_size / gpu_num = samples_per_gpu; // each block corresponding to one // sample on each GPU Each thread needs to process slot_num slots int tid = threadIdx.x; int bid = blockIdx.x; int sample_id = bid; // sample_id on the current GPU if ((bid < batch_size_per_gpu) && (tid < embedding_vec_size)) { const __half2 *input2 = reinterpret_cast<const __half2 *>(input); __half2 *output2 = reinterpret_cast<__half2 *>(output); int src_offset = sample_id * slot_num * embedding_vec_size; int src_stride = embedding_vec_size; for (int slot_id = 0; slot_id < slot_num; slot_id++) { int gpu_id = slot_id % gpu_num; int offset_pre = 0; // offset in previous gpus for (int id = 0; id < gpu_id; id++) { int slot_num_per_gpu = slot_num / gpu_num + ((id < (slot_num % gpu_num)) ? 1 : 0); int stride = batch_size_per_gpu * slot_num_per_gpu; offset_pre += stride; } int slot_num_per_gpu = slot_num / gpu_num + ((gpu_id < (slot_num % gpu_num)) ? 1 : 0); int offset_cur = sample_id * slot_num_per_gpu; // offset in current gpu int dst_addr = (offset_cur + offset_pre + (int)(slot_id / gpu_num)) * embedding_vec_size; int src_addr = src_offset + src_stride * slot_id; output2[dst_addr + tid] = input2[src_addr + tid]; } } } template <typename TypeEmbeddingComp> void do_backward_reorder(size_t batch_size_per_gpu, size_t slot_num, size_t embedding_vec_size, size_t total_gpu_count, const TypeEmbeddingComp *input, TypeEmbeddingComp *output, hipStream_t stream) { const size_t grid_size = batch_size_per_gpu; const size_t block_size = embedding_vec_size; hipLaunchKernelGGL(( backward_reorder_kernel), dim3(grid_size), dim3(block_size), 0, stream, batch_size_per_gpu, slot_num, embedding_vec_size, total_gpu_count, input, output); } void do_backward_reorder(size_t batch_size_per_gpu, size_t slot_num, size_t embedding_vec_size, size_t total_gpu_count, const __half *input, __half *output, hipStream_t stream) { const size_t grid_size = batch_size_per_gpu; if (embedding_vec_size % 2 == 0) { const size_t block_size = embedding_vec_size / 2; hipLaunchKernelGGL(( backward_reorder_align2_kernel), dim3(grid_size), dim3(block_size), 0, stream, batch_size_per_gpu, slot_num, embedding_vec_size / 2, total_gpu_count, input, output); } else { const size_t block_size = embedding_vec_size; hipLaunchKernelGGL(( backward_reorder_kernel), dim3(grid_size), dim3(block_size), 0, stream, batch_size_per_gpu, slot_num, embedding_vec_size, total_gpu_count, input, output); } } } // namespace template <typename TypeEmbeddingComp> void SparseEmbeddingFunctors::backward_reorder(size_t batch_size_per_gpu, size_t slot_num, size_t embedding_vec_size, const Tensors2<TypeEmbeddingComp> &src_tensors, Tensors2<TypeEmbeddingComp> &dst_tensors, const ResourceManager &resource_manager) { size_t total_gpu_count = resource_manager.get_global_gpu_count(); backward_reorder(batch_size_per_gpu, slot_num, embedding_vec_size, total_gpu_count, src_tensors, dst_tensors, resource_manager); } template void SparseEmbeddingFunctors::backward_reorder<float>( size_t batch_size_per_gpu, size_t slot_num, size_t embedding_vec_size, const Tensors2<float> &src_tensors, Tensors2<float> &dst_tensors, const ResourceManager &resource_manager); template void SparseEmbeddingFunctors::backward_reorder<__half>( size_t batch_size_per_gpu, size_t slot_num, size_t embedding_vec_size, const Tensors2<__half> &src_tensors, Tensors2<__half> &dst_tensors, const ResourceManager &resource_manager); template <typename TypeEmbeddingComp> void SparseEmbeddingFunctors::backward_reorder(size_t batch_size_per_gpu, size_t slot_num, size_t embedding_vec_size, size_t total_gpu_count, const Tensors2<TypeEmbeddingComp> &src_tensors, Tensors2<TypeEmbeddingComp> &dst_tensors, const ResourceManager &resource_manager) { size_t local_gpu_count = resource_manager.get_local_gpu_count(); CudaDeviceContext context; for (size_t id = 0; id < local_gpu_count; id++) { const auto &local_gpu = resource_manager.get_local_gpu(id); context.set_device(local_gpu->get_device_id()); do_backward_reorder(batch_size_per_gpu, slot_num, embedding_vec_size, total_gpu_count, src_tensors[id].get_ptr(), dst_tensors[id].get_ptr(), local_gpu->get_stream()); } } template void SparseEmbeddingFunctors::backward_reorder<float>( size_t batch_size_per_gpu, size_t slot_num, size_t embedding_vec_size, size_t total_gpu_count, const Tensors2<float> &src_tensors, Tensors2<float> &dst_tensors, const ResourceManager &resource_manager); template void SparseEmbeddingFunctors::backward_reorder<__half>( size_t batch_size_per_gpu, size_t slot_num, size_t embedding_vec_size, size_t total_gpu_count, const Tensors2<__half> &src_tensors, Tensors2<__half> &dst_tensors, const ResourceManager &resource_manager); } // namespace HugeCTR
d4d3758f7d293d561492a8616607bd9bdc067a44.cu
/* * Copyright (c) 2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "HugeCTR/include/embeddings/sparse_embedding_functors.hpp" #include "HugeCTR/include/utils.hpp" namespace HugeCTR { namespace { // reorder operation before all2all in backward propagation template <typename TypeEmbeddingComp> __global__ void backward_reorder_kernel(int batch_size_per_gpu, int slot_num, int embedding_vec_size, int gpu_num, const TypeEmbeddingComp *input, TypeEmbeddingComp *output) { // blockDim.x = embedding_vec_size; // each thread corresponding to one element of embedding // vector gridDim.x = batch_size / gpu_num = samples_per_gpu; // each block corresponding to one // sample on each GPU Each thread needs to process slot_num slots int tid = threadIdx.x; int bid = blockIdx.x; int sample_id = bid; // sample_id on the current GPU if ((bid < batch_size_per_gpu) && (tid < embedding_vec_size)) { int src_offset = sample_id * slot_num * embedding_vec_size; int src_stride = embedding_vec_size; for (int slot_id = 0; slot_id < slot_num; slot_id++) { int gpu_id = slot_id % gpu_num; int offset_pre = 0; // offset in previous gpus for (int id = 0; id < gpu_id; id++) { int slot_num_per_gpu = slot_num / gpu_num + ((id < (slot_num % gpu_num)) ? 1 : 0); int stride = batch_size_per_gpu * slot_num_per_gpu; offset_pre += stride; } int slot_num_per_gpu = slot_num / gpu_num + ((gpu_id < (slot_num % gpu_num)) ? 1 : 0); int offset_cur = sample_id * slot_num_per_gpu; // offset in current gpu int dst_addr = (offset_cur + offset_pre + (int)(slot_id / gpu_num)) * embedding_vec_size; int src_addr = src_offset + src_stride * slot_id; output[dst_addr + tid] = input[src_addr + tid]; } } } // reorder operation before all2all in backward propagation __global__ void backward_reorder_align2_kernel(int batch_size_per_gpu, int slot_num, int embedding_vec_size, int gpu_num, const __half *input, __half *output) { // blockDim.x = embedding_vec_size; // each thread corresponding to one element of embedding // vector gridDim.x = batch_size / gpu_num = samples_per_gpu; // each block corresponding to one // sample on each GPU Each thread needs to process slot_num slots int tid = threadIdx.x; int bid = blockIdx.x; int sample_id = bid; // sample_id on the current GPU if ((bid < batch_size_per_gpu) && (tid < embedding_vec_size)) { const __half2 *input2 = reinterpret_cast<const __half2 *>(input); __half2 *output2 = reinterpret_cast<__half2 *>(output); int src_offset = sample_id * slot_num * embedding_vec_size; int src_stride = embedding_vec_size; for (int slot_id = 0; slot_id < slot_num; slot_id++) { int gpu_id = slot_id % gpu_num; int offset_pre = 0; // offset in previous gpus for (int id = 0; id < gpu_id; id++) { int slot_num_per_gpu = slot_num / gpu_num + ((id < (slot_num % gpu_num)) ? 1 : 0); int stride = batch_size_per_gpu * slot_num_per_gpu; offset_pre += stride; } int slot_num_per_gpu = slot_num / gpu_num + ((gpu_id < (slot_num % gpu_num)) ? 1 : 0); int offset_cur = sample_id * slot_num_per_gpu; // offset in current gpu int dst_addr = (offset_cur + offset_pre + (int)(slot_id / gpu_num)) * embedding_vec_size; int src_addr = src_offset + src_stride * slot_id; output2[dst_addr + tid] = input2[src_addr + tid]; } } } template <typename TypeEmbeddingComp> void do_backward_reorder(size_t batch_size_per_gpu, size_t slot_num, size_t embedding_vec_size, size_t total_gpu_count, const TypeEmbeddingComp *input, TypeEmbeddingComp *output, cudaStream_t stream) { const size_t grid_size = batch_size_per_gpu; const size_t block_size = embedding_vec_size; backward_reorder_kernel<<<grid_size, block_size, 0, stream>>>( batch_size_per_gpu, slot_num, embedding_vec_size, total_gpu_count, input, output); } void do_backward_reorder(size_t batch_size_per_gpu, size_t slot_num, size_t embedding_vec_size, size_t total_gpu_count, const __half *input, __half *output, cudaStream_t stream) { const size_t grid_size = batch_size_per_gpu; if (embedding_vec_size % 2 == 0) { const size_t block_size = embedding_vec_size / 2; backward_reorder_align2_kernel<<<grid_size, block_size, 0, stream>>>( batch_size_per_gpu, slot_num, embedding_vec_size / 2, total_gpu_count, input, output); } else { const size_t block_size = embedding_vec_size; backward_reorder_kernel<<<grid_size, block_size, 0, stream>>>( batch_size_per_gpu, slot_num, embedding_vec_size, total_gpu_count, input, output); } } } // namespace template <typename TypeEmbeddingComp> void SparseEmbeddingFunctors::backward_reorder(size_t batch_size_per_gpu, size_t slot_num, size_t embedding_vec_size, const Tensors2<TypeEmbeddingComp> &src_tensors, Tensors2<TypeEmbeddingComp> &dst_tensors, const ResourceManager &resource_manager) { size_t total_gpu_count = resource_manager.get_global_gpu_count(); backward_reorder(batch_size_per_gpu, slot_num, embedding_vec_size, total_gpu_count, src_tensors, dst_tensors, resource_manager); } template void SparseEmbeddingFunctors::backward_reorder<float>( size_t batch_size_per_gpu, size_t slot_num, size_t embedding_vec_size, const Tensors2<float> &src_tensors, Tensors2<float> &dst_tensors, const ResourceManager &resource_manager); template void SparseEmbeddingFunctors::backward_reorder<__half>( size_t batch_size_per_gpu, size_t slot_num, size_t embedding_vec_size, const Tensors2<__half> &src_tensors, Tensors2<__half> &dst_tensors, const ResourceManager &resource_manager); template <typename TypeEmbeddingComp> void SparseEmbeddingFunctors::backward_reorder(size_t batch_size_per_gpu, size_t slot_num, size_t embedding_vec_size, size_t total_gpu_count, const Tensors2<TypeEmbeddingComp> &src_tensors, Tensors2<TypeEmbeddingComp> &dst_tensors, const ResourceManager &resource_manager) { size_t local_gpu_count = resource_manager.get_local_gpu_count(); CudaDeviceContext context; for (size_t id = 0; id < local_gpu_count; id++) { const auto &local_gpu = resource_manager.get_local_gpu(id); context.set_device(local_gpu->get_device_id()); do_backward_reorder(batch_size_per_gpu, slot_num, embedding_vec_size, total_gpu_count, src_tensors[id].get_ptr(), dst_tensors[id].get_ptr(), local_gpu->get_stream()); } } template void SparseEmbeddingFunctors::backward_reorder<float>( size_t batch_size_per_gpu, size_t slot_num, size_t embedding_vec_size, size_t total_gpu_count, const Tensors2<float> &src_tensors, Tensors2<float> &dst_tensors, const ResourceManager &resource_manager); template void SparseEmbeddingFunctors::backward_reorder<__half>( size_t batch_size_per_gpu, size_t slot_num, size_t embedding_vec_size, size_t total_gpu_count, const Tensors2<__half> &src_tensors, Tensors2<__half> &dst_tensors, const ResourceManager &resource_manager); } // namespace HugeCTR
67b828ce5fbb4532d6b046c94a9b788387fbddac.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. // Thrust code needs to be compiled with nvcc #include <memory> #include "core/providers/cuda/shared_inc/cuda_utils.h" #include "core/providers/cuda/cu_inc/common.cuh" #include "cudnn_common.h" namespace onnxruntime { namespace cuda { template <typename T, int NumThreadsPerBlock, int NumElementsPerThread> __global__ void _Fill( T* output_data, T val, CUDA_LONG N) { CUDA_LONG id = NumElementsPerThread * blockDim.x * blockIdx.x + threadIdx.x; #pragma unroll for (int i = 0; i < NumElementsPerThread; i++) { if (id < N) { output_data[id] = val; id += blockDim.x; } } } template <typename T> void Fill(hipStream_t stream, T* output, T value, int64_t count) { int blocksPerGrid = static_cast<int>(CeilDiv(count, GridDim::maxThreadsPerBlock * GridDim::maxElementsPerThread)); CUDA_LONG N = static_cast<CUDA_LONG>(count); hipLaunchKernelGGL(( _Fill<T, GridDim::maxThreadsPerBlock, GridDim::maxElementsPerThread>) , dim3(blocksPerGrid), dim3(GridDim::maxThreadsPerBlock), 0, stream, output, value, N); } template <typename T> class ConstantBufferImpl : public IConstantBuffer<T> { public: ConstantBufferImpl(T val) : val_(val), buffer_(nullptr), count_(0) { } ~ConstantBufferImpl() { if (buffer_) hipFree(buffer_); } virtual const T* GetBuffer(hipStream_t stream, size_t count) { if (count > count_) { if (buffer_) { hipFree(buffer_); buffer_ = nullptr; } CUDA_CALL_THROW(hipMalloc(&buffer_, count * sizeof(T))); count_ = count; Fill(stream, buffer_, val_, count); } return buffer_; } private: T* buffer_; size_t count_; T val_; }; template <typename T> std::unique_ptr<IConstantBuffer<T>> CreateConstantOnes() { return std::make_unique<ConstantBufferImpl<T>>(Consts<T>::One); } template std::unique_ptr<IConstantBuffer<float>> CreateConstantOnes<float>(); template std::unique_ptr<IConstantBuffer<double>> CreateConstantOnes<double>(); template std::unique_ptr<IConstantBuffer<half>> CreateConstantOnes<half>(); #if TORCH_HIP_VERSION >= 11000 && (__CUDA_ARCH__ >= 800 || !defined(__CUDA_ARCH__)) template std::unique_ptr<IConstantBuffer<nv_bfloat16>> CreateConstantOnes<nv_bfloat16>(); #endif #define SPECIALIZED_FILL(T) \ template void Fill<T>(hipStream_t stream, T * output, T value, int64_t count); SPECIALIZED_FILL(int8_t) SPECIALIZED_FILL(int16_t) SPECIALIZED_FILL(int32_t) SPECIALIZED_FILL(int64_t) SPECIALIZED_FILL(float) SPECIALIZED_FILL(double) SPECIALIZED_FILL(__half) #if TORCH_HIP_VERSION >= 11000 && (__CUDA_ARCH__ >= 800 || !defined(__CUDA_ARCH__)) SPECIALIZED_FILL(nv_bfloat16) #endif } // namespace cuda } // namespace onnxruntime
67b828ce5fbb4532d6b046c94a9b788387fbddac.cu
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. // Thrust code needs to be compiled with nvcc #include <memory> #include "core/providers/cuda/shared_inc/cuda_utils.h" #include "core/providers/cuda/cu_inc/common.cuh" #include "cudnn_common.h" namespace onnxruntime { namespace cuda { template <typename T, int NumThreadsPerBlock, int NumElementsPerThread> __global__ void _Fill( T* output_data, T val, CUDA_LONG N) { CUDA_LONG id = NumElementsPerThread * blockDim.x * blockIdx.x + threadIdx.x; #pragma unroll for (int i = 0; i < NumElementsPerThread; i++) { if (id < N) { output_data[id] = val; id += blockDim.x; } } } template <typename T> void Fill(cudaStream_t stream, T* output, T value, int64_t count) { int blocksPerGrid = static_cast<int>(CeilDiv(count, GridDim::maxThreadsPerBlock * GridDim::maxElementsPerThread)); CUDA_LONG N = static_cast<CUDA_LONG>(count); _Fill<T, GridDim::maxThreadsPerBlock, GridDim::maxElementsPerThread> <<<blocksPerGrid, GridDim::maxThreadsPerBlock, 0, stream>>>(output, value, N); } template <typename T> class ConstantBufferImpl : public IConstantBuffer<T> { public: ConstantBufferImpl(T val) : val_(val), buffer_(nullptr), count_(0) { } ~ConstantBufferImpl() { if (buffer_) cudaFree(buffer_); } virtual const T* GetBuffer(cudaStream_t stream, size_t count) { if (count > count_) { if (buffer_) { cudaFree(buffer_); buffer_ = nullptr; } CUDA_CALL_THROW(cudaMalloc(&buffer_, count * sizeof(T))); count_ = count; Fill(stream, buffer_, val_, count); } return buffer_; } private: T* buffer_; size_t count_; T val_; }; template <typename T> std::unique_ptr<IConstantBuffer<T>> CreateConstantOnes() { return std::make_unique<ConstantBufferImpl<T>>(Consts<T>::One); } template std::unique_ptr<IConstantBuffer<float>> CreateConstantOnes<float>(); template std::unique_ptr<IConstantBuffer<double>> CreateConstantOnes<double>(); template std::unique_ptr<IConstantBuffer<half>> CreateConstantOnes<half>(); #if CUDA_VERSION >= 11000 && (__CUDA_ARCH__ >= 800 || !defined(__CUDA_ARCH__)) template std::unique_ptr<IConstantBuffer<nv_bfloat16>> CreateConstantOnes<nv_bfloat16>(); #endif #define SPECIALIZED_FILL(T) \ template void Fill<T>(cudaStream_t stream, T * output, T value, int64_t count); SPECIALIZED_FILL(int8_t) SPECIALIZED_FILL(int16_t) SPECIALIZED_FILL(int32_t) SPECIALIZED_FILL(int64_t) SPECIALIZED_FILL(float) SPECIALIZED_FILL(double) SPECIALIZED_FILL(__half) #if CUDA_VERSION >= 11000 && (__CUDA_ARCH__ >= 800 || !defined(__CUDA_ARCH__)) SPECIALIZED_FILL(nv_bfloat16) #endif } // namespace cuda } // namespace onnxruntime
e6cb128c3ee30973622960dfb42233fb91ca2f39.hip
// !!! This is a file automatically generated by hipify!!! // -*- C++ -*- // -*- coding: utf-8 -*- // // michael a.g. avzis <[email protected]> // parasim // (c) 1998-2019 all rights reserved // // configuration #include <portinfo> // standard library #include <cmath> #include <algorithm> #include <numeric> // cuda #include <hip/hip_runtime.h> #include <hip/hip_cooperative_groups.h> // support #include <pyre/journal.h> #include <pyre/grid.h> #include <pyre/timers.h> // grab the cpu sat #include <ampcor/correlators.h> // the data grid using grid_t = pyre::grid::simple_t<2, double, std::array<std::size_t,2>>; // my reduction __global__ static void correlate(grid_t::cell_type * ref, std::size_t rdim, grid_t::cell_type * tgt, std::size_t tdim, std::size_t margin, std::size_t rowOffset, std::size_t colOffset, grid_t::cell_type * correlation); // driver int main(int argc, char *argv[]) { // the plan shape int px = 120; int py = 40; // unpack command line arguments if (argc > 1) { px = std::atoi(argv[1]); } if (argc > 2) { py = std::atoi(argv[2]); } // make a timer pyre::timer_t timer("ampcor.cuda.tile"); // make a channel pyre::journal::info_t channel("ampcor.cuda.tile"); // the plan int P = px*py; // show me channel << pyre::journal::at(__HERE__) << "plan: " << px << "x" << py << " tiles, for a total of " << P << " pairings" << pyre::journal::endl; // the reference grid dimension grid_t::index_type::value_type rdim = 128; // the target grid shape includes a margin grid_t::index_type::value_type margin = 32; // so here is its dimension grid_t::index_type::value_type tdim = margin + rdim + margin; // the shapes grid_t::shape_type rshape {rdim, rdim}; grid_t::shape_type tshape {tdim, tdim}; grid_t::shape_type cshape { margin+1, margin+1 }; // declare the grids grid_t ref { rshape }; grid_t tgt { tshape }; grid_t cor { cshape }; // the sizes grid_t::size_type rsize = ref.layout().size(); grid_t::size_type tsize = tgt.layout().size(); grid_t::size_type csize = cor.layout().size(); // memory footprints grid_t::size_type rfootprint = rsize * sizeof(grid_t::cell_type); grid_t::size_type tfootprint = tsize * sizeof(grid_t::cell_type); grid_t::size_type cfootprint = csize * sizeof(grid_t::cell_type); // pick a value for the reference grid payload grid_t::cell_type value = 1; // and a value for the mask around the border grid_t::cell_type mask = 0; // fill the reference grid the easy way for (auto idx : ref.layout()) { // by setting all slots to {value} ref[idx] = value; } #if 0 // show me channel << pyre::journal::at(__HERE__); // go through the ref slots for (auto idx : ref.layout()) { // show me the contents channel << "ref[" << idx << "] = " << ref[idx] << pyre::journal::newline; } // flush channel << pyre::journal::endl; #endif // initialize the target grid for (auto idx : tgt.layout()) { // by setting all slots to {mask} tgt[idx] = mask; } // turn the margin into an index grid_t::index_type mindex { margin, margin }; // specify a region in the interior grid_t::index_type start = tgt.layout().low() + mindex; grid_t::index_type end = tgt.layout().high() - mindex; // fill the interior for (auto idx : tgt.layout().slice(start, end)) { // with {value} tgt[idx] = value; } #if 0 // show me channel << pyre::journal::at(__HERE__); // go through the ref slots for (auto idx : tgt.layout()) { // show me the contents channel << "tgt[" << idx << "] = " << tgt[idx] << pyre::journal::newline; } // flush channel << pyre::journal::endl; #endif // grab a device hipError_t status = hipSetDevice(0); // if anything went wrong if (status != hipSuccess) { // make an error channel pyre::journal::error_t error("ampcor.cuda.tile"); // show me error << pyre::journal::at(__HERE__) << "while grabbing a device: " << hipGetErrorName(status) << " (" << status << ")" << pyre::journal::endl; // and bail return 1; } // find a spot for the reference grids grid_t::cell_type * dRef = nullptr; // allocate device memory for the grid payload status = hipMallocManaged(&dRef, P * rfootprint); // if something went wrong if (status != hipSuccess) { // make a channel pyre::journal::error_t channel("ampcor.cuda.tile"); // complain channel << pyre::journal::at(__HERE__) << "while allocating device memory for the reference payload: " << hipGetErrorName(status) << " (" << status << ")" << pyre::journal::endl; // and bail return 1; } // show me channel << pyre::journal::at(__HERE__) << "allocated an arena of " << (P*rfootprint) << " bytes for the grid data at " << dRef << pyre::journal::endl; // find a spot for the target grids grid_t::cell_type * dTgt = nullptr; // allocate device memory for the grid payload status = hipMallocManaged(&dTgt, P * tfootprint); // if something went wrong if (status != hipSuccess) { // make a channel pyre::journal::error_t channel("ampcor.cuda.tile"); // complain channel << pyre::journal::at(__HERE__) << "while allocating device memory for the target payload: " << hipGetErrorName(status) << " (" << status << ")" << pyre::journal::endl; // and bail return 1; } // show me channel << pyre::journal::at(__HERE__) << "allocated an arena of " << (P*tfootprint) << " bytes for the grid data at " << dTgt << pyre::journal::endl; // make room for the answer grid_t::cell_type * dCorrelation = nullptr; // allocate device memory for the grid payload status = hipMallocManaged(&dCorrelation, P * cfootprint); // if something went wrong if (status != hipSuccess) { // make a channel pyre::journal::error_t channel("ampcor.cuda.tile"); // complain channel << pyre::journal::at(__HERE__) << "while allocating device memory for the correlation matrix: " << hipGetErrorName(status) << " (" << status << ")" << pyre::journal::endl; // and bail return 1; } // show me channel << pyre::journal::at(__HERE__) << "allocated an arena of " << P * cfootprint << " bytes for the correlation matrix at " << dCorrelation << pyre::journal::endl; // start the clock timer.reset().start(); // move the data to the device for (auto pid = 0; pid < P; ++pid) { // first the reference grid status = hipMemcpy(dRef + pid*rsize, ref.data(), rfootprint, hipMemcpyHostToDevice); // check if (status != hipSuccess) { // make a channel pyre::journal::error_t channel("ampcor.cuda.tile"); // complain channel << pyre::journal::at(__HERE__) << "while copying data from reference grid #" << pid << " to the device: " << hipGetErrorName(status) << " (" << status << ")" << pyre::journal::endl; // and bail return 1; } // then the target grid status = hipMemcpy(dTgt + pid*tsize, tgt.data(), tfootprint, hipMemcpyHostToDevice); // check if (status != hipSuccess) { // make a channel pyre::journal::error_t channel("ampcor.cuda.tile"); // complain channel << pyre::journal::at(__HERE__) << "while copying data from target grid #" << pid << " to the device: " << hipGetErrorName(status) << " (" << status << ")" << pyre::journal::endl; // and bail return 1; } } // stop the clock timer.stop(); // show me channel << pyre::journal::at(__HERE__) << "moving reference and target data to the device: " << 1e6 * timer.read() << " s" << pyre::journal::endl; // figure out the job layout // each thread block takes care of one grid pair std::size_t B = P; // the number of threads is equal to the number of rows/cols in the reference grid std::size_t T = rdim; // each thread stores its partial sum in shared memory, so we need as much shared memory // per block as there are columns in the {ref} grid std::size_t S = T * sizeof(grid_t::cell_type); // show me channel << pyre::journal::at(__HERE__) << "launching " << B << " blocks of " << T << " threads each" << pyre::journal::endl; // start the clock timer.reset().start(); // go through all possible row offsets for (auto rowOffset = 0; rowOffset < margin + 1; ++rowOffset) { // and all possible column offsets for (auto colOffset = 0; colOffset < margin + 1; ++colOffset) { // launch a kernel // N.B.: kernel launch is an implicit barrier, so no need for extra synchronization hipLaunchKernelGGL(( correlate) , dim3(B), dim3(T), S, 0, dRef, rdim, dTgt, tdim, margin, rowOffset, colOffset, dCorrelation); } } // wait for the device to finish status = hipDeviceSynchronize(); // stop the clock timer.stop(); // show me channel << pyre::journal::at(__HERE__) << "kernel: " << 1e6 * timer.read() << " s" << pyre::journal::endl; // check if (status != hipSuccess) { // make a channel pyre::journal::error_t channel("ampcor.cuda.tile"); // complain channel << pyre::journal::at(__HERE__) << "while waiting for a kernel to finish: " << hipGetErrorName(status) << " (" << status << ")" << pyre::journal::endl; // and bail return 1; } // harvest the results timer.reset().start(); // make room on the host for the correlation grids grid_t::cell_type * correlation = new grid_t::cell_type[P * csize]; // move the correlation results status = hipMemcpy(correlation, dCorrelation, P * cfootprint, hipMemcpyDeviceToHost); // stop the clock timer.stop(); // show me channel << pyre::journal::at(__HERE__) << "harvesting the results from the device: " << 1e6 * timer.read() << " s" << pyre::journal::endl; // check if (status != hipSuccess) { // make a channel pyre::journal::error_t channel("ampcor.cuda.tile"); // complain channel << pyre::journal::at(__HERE__) << "while harvesting the sum from the device: " << hipGetErrorName(status) << " (" << status << ")" << pyre::journal::endl; // and bail return 1; } // verify timer.reset().start(); // go through all the pairs for (auto pid = 0; pid < P; ++pid) { // the starting point of this result grid std::size_t start = pid * (margin+1)*(margin+1); // go through all row offsets for (auto rowOffset = 0; rowOffset < margin+1; ++rowOffset) { // and all column offsets for (auto colOffset = 0; colOffset < margin+1; ++colOffset) { // compute the number of times {value} shows up in this sub-grid auto live = (rdim-std::abs(margin-rowOffset))*(rdim-std::abs(margin-colOffset)); // therefore, the expected result is auto expected = live * value; // get the actual auto actual = correlation[start + rowOffset*(margin+1) + colOffset]; // if they don't match if (expected != actual) { // make a channel pyre::journal::error_t error("ampcor.cuda.tile"); // complain error << pyre::journal::at(__HERE__) << "cor[" << pid << "," << rowOffset << "," << colOffset << "] = " << actual << " != " << expected << pyre::journal::endl; // and bail return 1; } } } } // stop the clock timer.stop(); // show me channel << pyre::journal::at(__HERE__) << "verifying the results: " << 1e6 * timer.read() << " s" << pyre::journal::endl; // clean up hipFree(dRef); hipFree(dTgt); hipFree(dCorrelation); delete [] correlation; // all done return 0; } // the kernel __global__ static void correlate(grid_t::cell_type * ref, std::size_t rdim, // ref grid data and shape grid_t::cell_type * tgt, std::size_t tdim, // tgt grid data and shape std::size_t margin, std::size_t rowOffset, std::size_t colOffset, grid_t::cell_type * correlation) { // build the workload descriptors // global // std::size_t B = gridDim.x; // number of blocks // std::size_t T = blockDim.x; // number of threads per block // std::size_t W = B*T; // total number of workers // local std::size_t b = blockIdx.x; // my block id std::size_t t = threadIdx.x; // my thread id within my block // std::size_t w = b*T + t; // my worker id // get access to my shared memory extern __shared__ grid_t::cell_type scratch[]; // get a handle to this thread block group cooperative_groups::thread_block cta = cooperative_groups::this_thread_block(); // initialize my partial sum grid_t::cell_type num = 0; // my ref starting point is column t of grid b std::size_t rstart = b*rdim*rdim + t; // my tgt starting point is column t of grid b at (rowOffset, colOffset) std::size_t tstart = (b*tdim*tdim) + (rowOffset*tdim + colOffset) + t; // run down the two columns for (std::size_t idx=0; idx < rdim; ++idx) { // fetch the ref value grid_t::cell_type r = ref[rstart + idx*rdim]; // fetch the tgt value grid_t::cell_type t = tgt[tstart + idx*tdim]; // update the numerator num += r * t; } // save my partial result scratch[t] = num; // barrier: make sure everybody is done updating shared memory with their partial sum cta.sync(); // now, do the reduction in shared memory // N.B.: we assume the warp size is 32; this will need updates if the warp size changes if (t < 64) { // pull a neighbor's value num += scratch[t + 64]; // and update my slot scratch[t] = num; } // barrier: make sure everybody is done updating shared memory with their partial sum cta.sync(); // we are now within a warp if (t < 32) { // get a handle to the active thread group cooperative_groups::coalesced_group active = cooperative_groups::coalesced_threads(); // pull the partial result from the second warp num += scratch[t + 32]; // the power-of-2 threads for (int offset = 16; offset > 0; offset >>= 1) { // reduce using {shuffle} num += active.shfl_down(num, offset); } } // the master thread of each block if (t == 0) { // computes the slot where this result goes std::size_t slot = b*(margin+1)*(margin+1) + rowOffset*(margin+1) + colOffset; // writes the sum to the result vector correlation[slot] = num; } // all done return; } // end of file
e6cb128c3ee30973622960dfb42233fb91ca2f39.cu
// -*- C++ -*- // -*- coding: utf-8 -*- // // michael a.g. aïvázis <[email protected]> // parasim // (c) 1998-2019 all rights reserved // // configuration #include <portinfo> // standard library #include <cmath> #include <algorithm> #include <numeric> // cuda #include <cuda_runtime.h> #include <cooperative_groups.h> // support #include <pyre/journal.h> #include <pyre/grid.h> #include <pyre/timers.h> // grab the cpu sat #include <ampcor/correlators.h> // the data grid using grid_t = pyre::grid::simple_t<2, double, std::array<std::size_t,2>>; // my reduction __global__ static void correlate(grid_t::cell_type * ref, std::size_t rdim, grid_t::cell_type * tgt, std::size_t tdim, std::size_t margin, std::size_t rowOffset, std::size_t colOffset, grid_t::cell_type * correlation); // driver int main(int argc, char *argv[]) { // the plan shape int px = 120; int py = 40; // unpack command line arguments if (argc > 1) { px = std::atoi(argv[1]); } if (argc > 2) { py = std::atoi(argv[2]); } // make a timer pyre::timer_t timer("ampcor.cuda.tile"); // make a channel pyre::journal::info_t channel("ampcor.cuda.tile"); // the plan int P = px*py; // show me channel << pyre::journal::at(__HERE__) << "plan: " << px << "x" << py << " tiles, for a total of " << P << " pairings" << pyre::journal::endl; // the reference grid dimension grid_t::index_type::value_type rdim = 128; // the target grid shape includes a margin grid_t::index_type::value_type margin = 32; // so here is its dimension grid_t::index_type::value_type tdim = margin + rdim + margin; // the shapes grid_t::shape_type rshape {rdim, rdim}; grid_t::shape_type tshape {tdim, tdim}; grid_t::shape_type cshape { margin+1, margin+1 }; // declare the grids grid_t ref { rshape }; grid_t tgt { tshape }; grid_t cor { cshape }; // the sizes grid_t::size_type rsize = ref.layout().size(); grid_t::size_type tsize = tgt.layout().size(); grid_t::size_type csize = cor.layout().size(); // memory footprints grid_t::size_type rfootprint = rsize * sizeof(grid_t::cell_type); grid_t::size_type tfootprint = tsize * sizeof(grid_t::cell_type); grid_t::size_type cfootprint = csize * sizeof(grid_t::cell_type); // pick a value for the reference grid payload grid_t::cell_type value = 1; // and a value for the mask around the border grid_t::cell_type mask = 0; // fill the reference grid the easy way for (auto idx : ref.layout()) { // by setting all slots to {value} ref[idx] = value; } #if 0 // show me channel << pyre::journal::at(__HERE__); // go through the ref slots for (auto idx : ref.layout()) { // show me the contents channel << "ref[" << idx << "] = " << ref[idx] << pyre::journal::newline; } // flush channel << pyre::journal::endl; #endif // initialize the target grid for (auto idx : tgt.layout()) { // by setting all slots to {mask} tgt[idx] = mask; } // turn the margin into an index grid_t::index_type mindex { margin, margin }; // specify a region in the interior grid_t::index_type start = tgt.layout().low() + mindex; grid_t::index_type end = tgt.layout().high() - mindex; // fill the interior for (auto idx : tgt.layout().slice(start, end)) { // with {value} tgt[idx] = value; } #if 0 // show me channel << pyre::journal::at(__HERE__); // go through the ref slots for (auto idx : tgt.layout()) { // show me the contents channel << "tgt[" << idx << "] = " << tgt[idx] << pyre::journal::newline; } // flush channel << pyre::journal::endl; #endif // grab a device cudaError_t status = cudaSetDevice(0); // if anything went wrong if (status != cudaSuccess) { // make an error channel pyre::journal::error_t error("ampcor.cuda.tile"); // show me error << pyre::journal::at(__HERE__) << "while grabbing a device: " << cudaGetErrorName(status) << " (" << status << ")" << pyre::journal::endl; // and bail return 1; } // find a spot for the reference grids grid_t::cell_type * dRef = nullptr; // allocate device memory for the grid payload status = cudaMallocManaged(&dRef, P * rfootprint); // if something went wrong if (status != cudaSuccess) { // make a channel pyre::journal::error_t channel("ampcor.cuda.tile"); // complain channel << pyre::journal::at(__HERE__) << "while allocating device memory for the reference payload: " << cudaGetErrorName(status) << " (" << status << ")" << pyre::journal::endl; // and bail return 1; } // show me channel << pyre::journal::at(__HERE__) << "allocated an arena of " << (P*rfootprint) << " bytes for the grid data at " << dRef << pyre::journal::endl; // find a spot for the target grids grid_t::cell_type * dTgt = nullptr; // allocate device memory for the grid payload status = cudaMallocManaged(&dTgt, P * tfootprint); // if something went wrong if (status != cudaSuccess) { // make a channel pyre::journal::error_t channel("ampcor.cuda.tile"); // complain channel << pyre::journal::at(__HERE__) << "while allocating device memory for the target payload: " << cudaGetErrorName(status) << " (" << status << ")" << pyre::journal::endl; // and bail return 1; } // show me channel << pyre::journal::at(__HERE__) << "allocated an arena of " << (P*tfootprint) << " bytes for the grid data at " << dTgt << pyre::journal::endl; // make room for the answer grid_t::cell_type * dCorrelation = nullptr; // allocate device memory for the grid payload status = cudaMallocManaged(&dCorrelation, P * cfootprint); // if something went wrong if (status != cudaSuccess) { // make a channel pyre::journal::error_t channel("ampcor.cuda.tile"); // complain channel << pyre::journal::at(__HERE__) << "while allocating device memory for the correlation matrix: " << cudaGetErrorName(status) << " (" << status << ")" << pyre::journal::endl; // and bail return 1; } // show me channel << pyre::journal::at(__HERE__) << "allocated an arena of " << P * cfootprint << " bytes for the correlation matrix at " << dCorrelation << pyre::journal::endl; // start the clock timer.reset().start(); // move the data to the device for (auto pid = 0; pid < P; ++pid) { // first the reference grid status = cudaMemcpy(dRef + pid*rsize, ref.data(), rfootprint, cudaMemcpyHostToDevice); // check if (status != cudaSuccess) { // make a channel pyre::journal::error_t channel("ampcor.cuda.tile"); // complain channel << pyre::journal::at(__HERE__) << "while copying data from reference grid #" << pid << " to the device: " << cudaGetErrorName(status) << " (" << status << ")" << pyre::journal::endl; // and bail return 1; } // then the target grid status = cudaMemcpy(dTgt + pid*tsize, tgt.data(), tfootprint, cudaMemcpyHostToDevice); // check if (status != cudaSuccess) { // make a channel pyre::journal::error_t channel("ampcor.cuda.tile"); // complain channel << pyre::journal::at(__HERE__) << "while copying data from target grid #" << pid << " to the device: " << cudaGetErrorName(status) << " (" << status << ")" << pyre::journal::endl; // and bail return 1; } } // stop the clock timer.stop(); // show me channel << pyre::journal::at(__HERE__) << "moving reference and target data to the device: " << 1e6 * timer.read() << " μs" << pyre::journal::endl; // figure out the job layout // each thread block takes care of one grid pair std::size_t B = P; // the number of threads is equal to the number of rows/cols in the reference grid std::size_t T = rdim; // each thread stores its partial sum in shared memory, so we need as much shared memory // per block as there are columns in the {ref} grid std::size_t S = T * sizeof(grid_t::cell_type); // show me channel << pyre::journal::at(__HERE__) << "launching " << B << " blocks of " << T << " threads each" << pyre::journal::endl; // start the clock timer.reset().start(); // go through all possible row offsets for (auto rowOffset = 0; rowOffset < margin + 1; ++rowOffset) { // and all possible column offsets for (auto colOffset = 0; colOffset < margin + 1; ++colOffset) { // launch a kernel // N.B.: kernel launch is an implicit barrier, so no need for extra synchronization correlate <<<B, T, S>>> (dRef, rdim, dTgt, tdim, margin, rowOffset, colOffset, dCorrelation); } } // wait for the device to finish status = cudaDeviceSynchronize(); // stop the clock timer.stop(); // show me channel << pyre::journal::at(__HERE__) << "kernel: " << 1e6 * timer.read() << " μs" << pyre::journal::endl; // check if (status != cudaSuccess) { // make a channel pyre::journal::error_t channel("ampcor.cuda.tile"); // complain channel << pyre::journal::at(__HERE__) << "while waiting for a kernel to finish: " << cudaGetErrorName(status) << " (" << status << ")" << pyre::journal::endl; // and bail return 1; } // harvest the results timer.reset().start(); // make room on the host for the correlation grids grid_t::cell_type * correlation = new grid_t::cell_type[P * csize]; // move the correlation results status = cudaMemcpy(correlation, dCorrelation, P * cfootprint, cudaMemcpyDeviceToHost); // stop the clock timer.stop(); // show me channel << pyre::journal::at(__HERE__) << "harvesting the results from the device: " << 1e6 * timer.read() << " μs" << pyre::journal::endl; // check if (status != cudaSuccess) { // make a channel pyre::journal::error_t channel("ampcor.cuda.tile"); // complain channel << pyre::journal::at(__HERE__) << "while harvesting the sum from the device: " << cudaGetErrorName(status) << " (" << status << ")" << pyre::journal::endl; // and bail return 1; } // verify timer.reset().start(); // go through all the pairs for (auto pid = 0; pid < P; ++pid) { // the starting point of this result grid std::size_t start = pid * (margin+1)*(margin+1); // go through all row offsets for (auto rowOffset = 0; rowOffset < margin+1; ++rowOffset) { // and all column offsets for (auto colOffset = 0; colOffset < margin+1; ++colOffset) { // compute the number of times {value} shows up in this sub-grid auto live = (rdim-std::abs(margin-rowOffset))*(rdim-std::abs(margin-colOffset)); // therefore, the expected result is auto expected = live * value; // get the actual auto actual = correlation[start + rowOffset*(margin+1) + colOffset]; // if they don't match if (expected != actual) { // make a channel pyre::journal::error_t error("ampcor.cuda.tile"); // complain error << pyre::journal::at(__HERE__) << "cor[" << pid << "," << rowOffset << "," << colOffset << "] = " << actual << " != " << expected << pyre::journal::endl; // and bail return 1; } } } } // stop the clock timer.stop(); // show me channel << pyre::journal::at(__HERE__) << "verifying the results: " << 1e6 * timer.read() << " μs" << pyre::journal::endl; // clean up cudaFree(dRef); cudaFree(dTgt); cudaFree(dCorrelation); delete [] correlation; // all done return 0; } // the kernel __global__ static void correlate(grid_t::cell_type * ref, std::size_t rdim, // ref grid data and shape grid_t::cell_type * tgt, std::size_t tdim, // tgt grid data and shape std::size_t margin, std::size_t rowOffset, std::size_t colOffset, grid_t::cell_type * correlation) { // build the workload descriptors // global // std::size_t B = gridDim.x; // number of blocks // std::size_t T = blockDim.x; // number of threads per block // std::size_t W = B*T; // total number of workers // local std::size_t b = blockIdx.x; // my block id std::size_t t = threadIdx.x; // my thread id within my block // std::size_t w = b*T + t; // my worker id // get access to my shared memory extern __shared__ grid_t::cell_type scratch[]; // get a handle to this thread block group cooperative_groups::thread_block cta = cooperative_groups::this_thread_block(); // initialize my partial sum grid_t::cell_type num = 0; // my ref starting point is column t of grid b std::size_t rstart = b*rdim*rdim + t; // my tgt starting point is column t of grid b at (rowOffset, colOffset) std::size_t tstart = (b*tdim*tdim) + (rowOffset*tdim + colOffset) + t; // run down the two columns for (std::size_t idx=0; idx < rdim; ++idx) { // fetch the ref value grid_t::cell_type r = ref[rstart + idx*rdim]; // fetch the tgt value grid_t::cell_type t = tgt[tstart + idx*tdim]; // update the numerator num += r * t; } // save my partial result scratch[t] = num; // barrier: make sure everybody is done updating shared memory with their partial sum cta.sync(); // now, do the reduction in shared memory // N.B.: we assume the warp size is 32; this will need updates if the warp size changes if (t < 64) { // pull a neighbor's value num += scratch[t + 64]; // and update my slot scratch[t] = num; } // barrier: make sure everybody is done updating shared memory with their partial sum cta.sync(); // we are now within a warp if (t < 32) { // get a handle to the active thread group cooperative_groups::coalesced_group active = cooperative_groups::coalesced_threads(); // pull the partial result from the second warp num += scratch[t + 32]; // the power-of-2 threads for (int offset = 16; offset > 0; offset >>= 1) { // reduce using {shuffle} num += active.shfl_down(num, offset); } } // the master thread of each block if (t == 0) { // computes the slot where this result goes std::size_t slot = b*(margin+1)*(margin+1) + rowOffset*(margin+1) + colOffset; // writes the sum to the result vector correlation[slot] = num; } // all done return; } // end of file
420be7146bbf4a15cd8d264962fcfd3a6dd7a795.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "fxaa.h" #include "bsp.h" #include "cudabsp.h" #include "cudarad.h" #include "cudautils.h" static __device__ inline float luma_from_rgb(float3 rgb) { return sqrt(dot(rgb / 255.0, make_float3(0.299, 0.587, 0.114))); } static __device__ inline float clamp(float x, float lower, float upper) { return fmaxf(lower, fminf(upper, x)); } static __device__ float3 subsample( float3* samples, size_t width, size_t height, float s, float t ) { //float3 zero = make_float3(0.0, 0.0, 0.0); int s0 = static_cast<int>(floorf(s)); int t0 = static_cast<int>(floorf(t)); int s1 = s0 + 1; int t1 = t0 + 1; if (s0 < 0) { s0 = 0; } if (t0 < 0) { t0 = 0; } if (s1 >= width) { s1 = width - 1; } if (t1 >= height) { t1 = height - 1; } float rWeight = s - floorf(s); float lWeight = 1.0 - rWeight; float dWeight = t - floorf(t); float uWeight = 1.0 - dWeight; float3 sampleUL = samples[t0 * width + s0]; float3 sampleUR = samples[t0 * width + s1]; float3 sampleDL = samples[t1 * width + s0]; float3 sampleDR = samples[t1 * width + s1]; float3 sampleU = lWeight * sampleUL + rWeight * sampleUR; float3 sampleD = lWeight * sampleDL + rWeight * sampleDR; return uWeight * sampleU + dWeight * sampleD; } static __device__ const float EDGE_THRESHOLD = 0.125; // 1/8 static __device__ const float EDGE_THRESHOLD_MIN = 0.03125; // 1/32 static __device__ const size_t MAX_ITERATIONS = 12; static __device__ const float SUBPIXEL_QUALITY = 0.75; /** * CUDA FXAA implementation based on shader code at: * http://blog.simonrodriguez.fr/articles/30-07-2016_implementing_fxaa.html * and also: * http://developer.download.nvidia.com/assets/gamedev/files/sdk/11/FXAA_WhitePaper.pdf */ __global__ void map_samples_fxaa( float3* samplesIn, /* output */ float3* samplesOut, size_t width, size_t height ) { size_t s = blockIdx.x * blockDim.x + threadIdx.x; size_t t = blockIdx.y * blockDim.y + threadIdx.y; if (s >= width || t >= height) { return; } float3 sample = samplesIn[t * width + s]; float lumaCenter = luma_from_rgb(sample); /* Grab the lumas of our four direct neighbors. */ float lumaUp = luma_from_rgb( samplesIn[((t > 0) ? (t - 1) : t) * width + s] ); float lumaDown = luma_from_rgb( samplesIn[((t < height - 1) ? (t + 1) : t) * width + s] ); float lumaLeft = luma_from_rgb( samplesIn[t * width + ((s > 0) ? (s - 1) : s)] ); float lumaRight = luma_from_rgb( samplesIn[t * width + ((s < width - 1) ? (s + 1) : s)] ); /* Determine the color contrast between ourselves and our neighbors. */ float lumaMin = fminf( lumaCenter, fminf( fminf(lumaUp, lumaDown), fminf(lumaLeft, lumaRight) ) ); float lumaMax = fmaxf( lumaCenter, fmaxf( fmaxf(lumaUp, lumaDown), fmaxf(lumaLeft, lumaRight) ) ); float lumaRange = lumaMax - lumaMin; /* * Luma contrast too low (or this is a really dark spot). * Don't perform AA. */ if (lumaRange < fmaxf(EDGE_THRESHOLD_MIN, lumaMax * EDGE_THRESHOLD)) { samplesOut[t * width + s] = sample; return; } //else { // samplesOut[t * width + s] = make_float3(255.0, 0.0, 0.0); // return; //} /* Grab the lumas of our remaining corner neighbors. */ float lumaUL = luma_from_rgb( (t > 0 && s > 0) ? samplesIn[(t - 1) * width + s - 1] : samplesIn[t * width + s] ); float lumaUR = luma_from_rgb( (t > 0 && s < width - 1) ? samplesIn[(t - 1) * width + s + 1] : samplesIn[t * width + s] ); float lumaDL = luma_from_rgb( (t < height - 1 && s > 0) ? samplesIn[(t + 1) * width + s - 1] : samplesIn[t * width + s] ); float lumaDR = luma_from_rgb( (t < height - 1 && s < width - 1) ? samplesIn[(t + 1) * width + s + 1] : samplesIn[t * width + s] ); /* Combine the edge lumas. */ float lumaUD = lumaUp + lumaDown; float lumaLR = lumaLeft + lumaRight; /* Combine the corner lumas. */ float lumaULUR = lumaUL + lumaUR; float lumaDLDR = lumaDL + lumaDR; float lumaULDL = lumaUL + lumaDL; float lumaURDR = lumaUR + lumaDR; /* Estimate horizontal and vertical gradients. */ float gradientHoriz = ( fabsf(-2.0 * lumaLeft + lumaULDL) + fabsf(-2.0 * lumaCenter + lumaUD) * 2.0 + fabsf(-2.0 * lumaRight + lumaURDR) ); float gradientVerti = ( fabsf(-2.0 * lumaUp + lumaULUR) + fabsf(-2.0 * lumaCenter + lumaLR) * 2.0 + fabsf(-2.0 * lumaDown + lumaDLDR) ); /* Are we at a horizontal or vertical edge? */ bool isHoriz = (gradientHoriz >= gradientVerti); //if (isHoriz) { // samplesOut[t * width + s] = make_float3(255.0, 0.0, 0.0); //} //else { // samplesOut[t * width + s] = make_float3(0.0, 255.0, 0.0); //} //return; /* Choose two lumas in the direction opposite of the edge. */ float luma1 = isHoriz ? lumaUp : lumaLeft; float luma2 = isHoriz ? lumaDown : lumaRight; /* Compute their gradients. */ float gradient1 = luma1 - lumaCenter; float gradient2 = luma2 - lumaCenter; /* Choose the steeper gradient. */ bool grad1Steeper = fabsf(gradient1) >= fabsf(gradient2); /* Normalize the gradients. */ float gradientNorm = 0.25 * fmaxf(fabsf(gradient1), fabsf(gradient2)); /* Determine directional luma average. */ float lumaLocalAvg; if (grad1Steeper) { lumaLocalAvg = 0.5 * (luma1 + lumaCenter); } else { lumaLocalAvg = 0.5 * (luma2 + lumaCenter); } /* Subsample locations for each iteration. */ float iteration1S = static_cast<float>(s); float iteration1T = static_cast<float>(t); float iteration2S = iteration1S; float iteration2T = iteration1T; /* Offset our sample locations toward the edge by half a pixel. */ if (isHoriz) { iteration1T += grad1Steeper ? -0.5 : 0.5; iteration2T += grad1Steeper ? -0.5 : 0.5; } else { iteration1S += grad1Steeper ? -0.5 : 0.5; iteration2S += grad1Steeper ? -0.5 : 0.5; } /* Determine iteration offsets. */ size_t offsetS = isHoriz ? 1 : 0; size_t offsetT = isHoriz ? 0 : 1; iteration1S -= offsetS; iteration1T -= offsetT; iteration2S += offsetS; iteration2T += offsetT; /* Iterate! */ float lumaEnd1; float lumaEnd2; bool reached1 = false; bool reached2 = false; for (size_t i=0; i<MAX_ITERATIONS; i++) { /* Sample lumas in both directions along the edge. */ if (!reached1) { lumaEnd1 = luma_from_rgb( subsample( samplesIn, width, height, iteration1S, iteration1T ) ); lumaEnd1 -= lumaLocalAvg; } if (!reached2) { lumaEnd2 = luma_from_rgb( subsample( samplesIn, width, height, iteration2S, iteration2T ) ); lumaEnd2 -= lumaLocalAvg; } /* Did we reach the end of the edge? */ reached1 = (fabsf(lumaEnd1) >= gradientNorm); reached2 = (fabsf(lumaEnd2) >= gradientNorm); /* If we've reached the end, stop iteration. */ if (reached1 && reached2) { break; } /* But if we HAVEN'T reached the end, continue... */ if (!reached1) { iteration1S -= offsetS; iteration1T -= offsetT; } if (!reached2) { iteration2S += offsetS; iteration2T += offsetT; } } /* Determine how far we've traveled along the edge. */ float dist1 = isHoriz ? (s - iteration1S) : (t - iteration1T); float dist2 = isHoriz ? (iteration2S - s) : (iteration2T - t); /* Which way is closer? */ bool dir1Closer = dist1 < dist2; float closerDist = fminf(dist1, dist2); /* Total length of the edge. */ float edgeLen = dist1 + dist2; /* * The pixel offset where we should subsample, in the direction of the * closer edge endpoint. */ float pixelOffset; if ((lumaCenter < lumaLocalAvg) != ((dir1Closer ? lumaEnd1 : lumaEnd2) < 0.0)) { pixelOffset = 0.0; } else { pixelOffset = -closerDist / edgeLen + 0.5; } //printf( // "(%u, %u) %s distance: %f / %f (%f) Offset: %f\n", // static_cast<unsigned int>(s), static_cast<unsigned int>(t), // isHoriz ? "horizontal" : "vertical", // closerDist, edgeLen, closerDist / edgeLen, // pixelOffset //); /* * Subpixel antialiasing */ /* Weighted average of all the lumas in our local 3x3 grid. */ float lumaAvg = ( (1.0 / 12.0) * (2.0 * (lumaUD + lumaLR) + lumaULDL + lumaURDR) ); float subpixelOffset1 = clamp( fabsf(lumaAvg - lumaCenter) / lumaRange, 0.0, 1.0 ); float subpixelOffset2 = ( (-2.0 * subpixelOffset1 + 3.0) * subpixelOffset1 * subpixelOffset1 ); float subpixelOffset = ( subpixelOffset2 * subpixelOffset2 * SUBPIXEL_QUALITY ); float finalOffset = fmaxf(subpixelOffset, pixelOffset); if (grad1Steeper) { finalOffset = -finalOffset; } /* Determine the final subsample coordinates. */ float finalS = static_cast<float>(s); float finalT = static_cast<float>(t); if (isHoriz) { finalT += finalOffset; } else { finalS += finalOffset; } /* Final subsample... */ float3 color = subsample(samplesIn, width, height, finalS, finalT); //{ // int s0 = static_cast<int>(floorf(s)); // int t0 = static_cast<int>(floorf(t)); // int s1 = s0 + 1; // int t1 = t0 + 1; // if (s0 < 0) { // s0 = 0; // } // if (t0 < 0) { // t0 = 0; // } // if (s1 >= width) { // s1 = width - 1; // } // if (t1 >= height) { // t1 = height - 1; // } // float3 sampleUL = samplesIn[t0 * width + s0]; // float3 sampleUR = samplesIn[t0 * width + s1]; // float3 sampleDL = samplesIn[t1 * width + s0]; // float3 sampleDR = samplesIn[t1 * width + s1]; // printf( // "(%u, %u) sampled at (%f, %f)\n" // "\tUL(%f, %f, %f) UR(%f, %f, %f)\n" // "\tDL(%f, %f, %f) DR(%f, %f, %f)\n" // "\tyields (%f, %f, %f)\n", // static_cast<unsigned int>(s), static_cast<unsigned int>(t), // finalS, finalT, // sampleUL.x, sampleUL.y, sampleUL.z, // sampleUR.x, sampleUR.y, sampleUR.z, // sampleDL.x, sampleDL.y, sampleDL.z, // sampleDR.x, sampleDR.y, sampleDR.z, // color.x, color.y, color.z // ); //} //color = isHoriz ? // make_float3(color.x * 10.0, color.y, color.z) : // make_float3(color.x, color.y * 10.0, color.z); /* ... and we're done! */ samplesOut[t * width + s] = color; } __global__ void map_samples_edgeblur( CUDABSP::CUDABSP* pCudaBSP, float3* samplesIn, /* output */ float3* samplesOut, size_t width, size_t height ) { size_t s = blockIdx.x * blockDim.x + threadIdx.x; size_t t = blockIdx.y * blockDim.y + threadIdx.y; if (s >= width || t >= height) { return; } float3 sample = samplesIn[t * width + s]; float lumaCenter = luma_from_rgb(sample); /* Grab the lumas of our four direct neighbors. */ float lumaUp = luma_from_rgb( samplesIn[((t > 0) ? (t - 1) : t) * width + s] ); float lumaDown = luma_from_rgb( samplesIn[((t < height - 1) ? (t + 1) : t) * width + s] ); float lumaLeft = luma_from_rgb( samplesIn[t * width + ((s > 0) ? (s - 1) : s)] ); float lumaRight = luma_from_rgb( samplesIn[t * width + ((s < width - 1) ? (s + 1) : s)] ); /* Determine the color contrast between ourselves and our neighbors. */ float lumaMin = fminf( lumaCenter, fminf( fminf(lumaUp, lumaDown), fminf(lumaLeft, lumaRight) ) ); float lumaMax = fmaxf( lumaCenter, fmaxf( fmaxf(lumaUp, lumaDown), fmaxf(lumaLeft, lumaRight) ) ); float lumaRange = lumaMax - lumaMin; /* * Luma contrast too low (or this is a really dark spot). * Don't perform AA. */ if (lumaRange < fmaxf(EDGE_THRESHOLD_MIN, lumaMax * EDGE_THRESHOLD)) { samplesOut[t * width + s] = sample; return; } } __global__ void map_faces(CUDABSP::CUDABSP* pCudaBSP) { size_t faceIndex = blockIdx.x * blockDim.x + threadIdx.x; if (faceIndex >= pCudaBSP->numFaces) { return; } BSP::DFace& face = pCudaBSP->faces[faceIndex]; size_t width = face.lightmapTextureSizeInLuxels[0] + 1; size_t height = face.lightmapTextureSizeInLuxels[1] + 1; size_t startIndex = face.lightOffset / sizeof(BSP::RGBExp32); float3* lightSamples = pCudaBSP->lightSamples + startIndex; float3* results = new float3[width * height]; const size_t BLOCK_WIDTH = 16; const size_t BLOCK_HEIGHT = 16; dim3 gridDim( div_ceil(width, BLOCK_WIDTH), div_ceil(height, BLOCK_HEIGHT) ); dim3 blockDim(BLOCK_WIDTH, BLOCK_HEIGHT); KERNEL_LAUNCH_DEVICE( map_samples_fxaa, gridDim, blockDim, lightSamples, results, width, height ); CUDA_CHECK_ERROR_DEVICE(hipDeviceSynchronize()); /* Transfer the AA'd results back into the light sample buffer. */ memcpy(lightSamples, results, sizeof(float3) * width * height); delete[] results; } namespace CUDAFXAA { void antialias_lightsamples(BSP::BSP& bsp, CUDABSP::CUDABSP* pCudaBSP) { CUDABSP::CUDABSP cudaBSP; CUDA_CHECK_ERROR( hipMemcpy( &cudaBSP, pCudaBSP, sizeof(CUDABSP::CUDABSP), hipMemcpyDeviceToHost ) ); for (const BSP::Face& face : bsp.get_faces()) { size_t width = face.get_lightmap_width(); size_t height = face.get_lightmap_height(); size_t numSamples = width * height; size_t startIndex = face.get_data().lightOffset / sizeof(BSP::RGBExp32); float3* samples = cudaBSP.lightSamples + startIndex; float3* results; CUDA_CHECK_ERROR( hipMalloc(&results, sizeof(float3) * numSamples) ); const size_t BLOCK_WIDTH = 16; const size_t BLOCK_HEIGHT = 16; dim3 gridDim( div_ceil(width, BLOCK_WIDTH), div_ceil(height, BLOCK_HEIGHT) ); dim3 blockDim(BLOCK_WIDTH, BLOCK_HEIGHT); KERNEL_LAUNCH( map_samples_fxaa, gridDim, blockDim, samples, results, width, height ); CUDA_CHECK_ERROR(hipDeviceSynchronize()); CUDA_CHECK_ERROR( hipMemcpy( samples, results, sizeof(float3) * numSamples, hipMemcpyDeviceToDevice ) ); CUDA_CHECK_ERROR(hipFree(results)); } //size_t numFaces; //CUDA_CHECK_ERROR( // hipMemcpy( // &numFaces, &pCudaBSP->numFaces, sizeof(size_t), // hipMemcpyDeviceToHost // ) //); //const size_t BLOCK_WIDTH = 32; //size_t numBlocks = div_ceil(numFaces, BLOCK_WIDTH); //KERNEL_LAUNCH( // map_faces, // numBlocks, BLOCK_WIDTH, // pCudaBSP //); //CUDA_CHECK_ERROR(hipDeviceSynchronize()); } }
420be7146bbf4a15cd8d264962fcfd3a6dd7a795.cu
#include "fxaa.h" #include "bsp.h" #include "cudabsp.h" #include "cudarad.h" #include "cudautils.h" static __device__ inline float luma_from_rgb(float3 rgb) { return sqrt(dot(rgb / 255.0, make_float3(0.299, 0.587, 0.114))); } static __device__ inline float clamp(float x, float lower, float upper) { return fmaxf(lower, fminf(upper, x)); } static __device__ float3 subsample( float3* samples, size_t width, size_t height, float s, float t ) { //float3 zero = make_float3(0.0, 0.0, 0.0); int s0 = static_cast<int>(floorf(s)); int t0 = static_cast<int>(floorf(t)); int s1 = s0 + 1; int t1 = t0 + 1; if (s0 < 0) { s0 = 0; } if (t0 < 0) { t0 = 0; } if (s1 >= width) { s1 = width - 1; } if (t1 >= height) { t1 = height - 1; } float rWeight = s - floorf(s); float lWeight = 1.0 - rWeight; float dWeight = t - floorf(t); float uWeight = 1.0 - dWeight; float3 sampleUL = samples[t0 * width + s0]; float3 sampleUR = samples[t0 * width + s1]; float3 sampleDL = samples[t1 * width + s0]; float3 sampleDR = samples[t1 * width + s1]; float3 sampleU = lWeight * sampleUL + rWeight * sampleUR; float3 sampleD = lWeight * sampleDL + rWeight * sampleDR; return uWeight * sampleU + dWeight * sampleD; } static __device__ const float EDGE_THRESHOLD = 0.125; // 1/8 static __device__ const float EDGE_THRESHOLD_MIN = 0.03125; // 1/32 static __device__ const size_t MAX_ITERATIONS = 12; static __device__ const float SUBPIXEL_QUALITY = 0.75; /** * CUDA FXAA implementation based on shader code at: * http://blog.simonrodriguez.fr/articles/30-07-2016_implementing_fxaa.html * and also: * http://developer.download.nvidia.com/assets/gamedev/files/sdk/11/FXAA_WhitePaper.pdf */ __global__ void map_samples_fxaa( float3* samplesIn, /* output */ float3* samplesOut, size_t width, size_t height ) { size_t s = blockIdx.x * blockDim.x + threadIdx.x; size_t t = blockIdx.y * blockDim.y + threadIdx.y; if (s >= width || t >= height) { return; } float3 sample = samplesIn[t * width + s]; float lumaCenter = luma_from_rgb(sample); /* Grab the lumas of our four direct neighbors. */ float lumaUp = luma_from_rgb( samplesIn[((t > 0) ? (t - 1) : t) * width + s] ); float lumaDown = luma_from_rgb( samplesIn[((t < height - 1) ? (t + 1) : t) * width + s] ); float lumaLeft = luma_from_rgb( samplesIn[t * width + ((s > 0) ? (s - 1) : s)] ); float lumaRight = luma_from_rgb( samplesIn[t * width + ((s < width - 1) ? (s + 1) : s)] ); /* Determine the color contrast between ourselves and our neighbors. */ float lumaMin = fminf( lumaCenter, fminf( fminf(lumaUp, lumaDown), fminf(lumaLeft, lumaRight) ) ); float lumaMax = fmaxf( lumaCenter, fmaxf( fmaxf(lumaUp, lumaDown), fmaxf(lumaLeft, lumaRight) ) ); float lumaRange = lumaMax - lumaMin; /* * Luma contrast too low (or this is a really dark spot). * Don't perform AA. */ if (lumaRange < fmaxf(EDGE_THRESHOLD_MIN, lumaMax * EDGE_THRESHOLD)) { samplesOut[t * width + s] = sample; return; } //else { // samplesOut[t * width + s] = make_float3(255.0, 0.0, 0.0); // return; //} /* Grab the lumas of our remaining corner neighbors. */ float lumaUL = luma_from_rgb( (t > 0 && s > 0) ? samplesIn[(t - 1) * width + s - 1] : samplesIn[t * width + s] ); float lumaUR = luma_from_rgb( (t > 0 && s < width - 1) ? samplesIn[(t - 1) * width + s + 1] : samplesIn[t * width + s] ); float lumaDL = luma_from_rgb( (t < height - 1 && s > 0) ? samplesIn[(t + 1) * width + s - 1] : samplesIn[t * width + s] ); float lumaDR = luma_from_rgb( (t < height - 1 && s < width - 1) ? samplesIn[(t + 1) * width + s + 1] : samplesIn[t * width + s] ); /* Combine the edge lumas. */ float lumaUD = lumaUp + lumaDown; float lumaLR = lumaLeft + lumaRight; /* Combine the corner lumas. */ float lumaULUR = lumaUL + lumaUR; float lumaDLDR = lumaDL + lumaDR; float lumaULDL = lumaUL + lumaDL; float lumaURDR = lumaUR + lumaDR; /* Estimate horizontal and vertical gradients. */ float gradientHoriz = ( fabsf(-2.0 * lumaLeft + lumaULDL) + fabsf(-2.0 * lumaCenter + lumaUD) * 2.0 + fabsf(-2.0 * lumaRight + lumaURDR) ); float gradientVerti = ( fabsf(-2.0 * lumaUp + lumaULUR) + fabsf(-2.0 * lumaCenter + lumaLR) * 2.0 + fabsf(-2.0 * lumaDown + lumaDLDR) ); /* Are we at a horizontal or vertical edge? */ bool isHoriz = (gradientHoriz >= gradientVerti); //if (isHoriz) { // samplesOut[t * width + s] = make_float3(255.0, 0.0, 0.0); //} //else { // samplesOut[t * width + s] = make_float3(0.0, 255.0, 0.0); //} //return; /* Choose two lumas in the direction opposite of the edge. */ float luma1 = isHoriz ? lumaUp : lumaLeft; float luma2 = isHoriz ? lumaDown : lumaRight; /* Compute their gradients. */ float gradient1 = luma1 - lumaCenter; float gradient2 = luma2 - lumaCenter; /* Choose the steeper gradient. */ bool grad1Steeper = fabsf(gradient1) >= fabsf(gradient2); /* Normalize the gradients. */ float gradientNorm = 0.25 * fmaxf(fabsf(gradient1), fabsf(gradient2)); /* Determine directional luma average. */ float lumaLocalAvg; if (grad1Steeper) { lumaLocalAvg = 0.5 * (luma1 + lumaCenter); } else { lumaLocalAvg = 0.5 * (luma2 + lumaCenter); } /* Subsample locations for each iteration. */ float iteration1S = static_cast<float>(s); float iteration1T = static_cast<float>(t); float iteration2S = iteration1S; float iteration2T = iteration1T; /* Offset our sample locations toward the edge by half a pixel. */ if (isHoriz) { iteration1T += grad1Steeper ? -0.5 : 0.5; iteration2T += grad1Steeper ? -0.5 : 0.5; } else { iteration1S += grad1Steeper ? -0.5 : 0.5; iteration2S += grad1Steeper ? -0.5 : 0.5; } /* Determine iteration offsets. */ size_t offsetS = isHoriz ? 1 : 0; size_t offsetT = isHoriz ? 0 : 1; iteration1S -= offsetS; iteration1T -= offsetT; iteration2S += offsetS; iteration2T += offsetT; /* Iterate! */ float lumaEnd1; float lumaEnd2; bool reached1 = false; bool reached2 = false; for (size_t i=0; i<MAX_ITERATIONS; i++) { /* Sample lumas in both directions along the edge. */ if (!reached1) { lumaEnd1 = luma_from_rgb( subsample( samplesIn, width, height, iteration1S, iteration1T ) ); lumaEnd1 -= lumaLocalAvg; } if (!reached2) { lumaEnd2 = luma_from_rgb( subsample( samplesIn, width, height, iteration2S, iteration2T ) ); lumaEnd2 -= lumaLocalAvg; } /* Did we reach the end of the edge? */ reached1 = (fabsf(lumaEnd1) >= gradientNorm); reached2 = (fabsf(lumaEnd2) >= gradientNorm); /* If we've reached the end, stop iteration. */ if (reached1 && reached2) { break; } /* But if we HAVEN'T reached the end, continue... */ if (!reached1) { iteration1S -= offsetS; iteration1T -= offsetT; } if (!reached2) { iteration2S += offsetS; iteration2T += offsetT; } } /* Determine how far we've traveled along the edge. */ float dist1 = isHoriz ? (s - iteration1S) : (t - iteration1T); float dist2 = isHoriz ? (iteration2S - s) : (iteration2T - t); /* Which way is closer? */ bool dir1Closer = dist1 < dist2; float closerDist = fminf(dist1, dist2); /* Total length of the edge. */ float edgeLen = dist1 + dist2; /* * The pixel offset where we should subsample, in the direction of the * closer edge endpoint. */ float pixelOffset; if ((lumaCenter < lumaLocalAvg) != ((dir1Closer ? lumaEnd1 : lumaEnd2) < 0.0)) { pixelOffset = 0.0; } else { pixelOffset = -closerDist / edgeLen + 0.5; } //printf( // "(%u, %u) %s distance: %f / %f (%f) Offset: %f\n", // static_cast<unsigned int>(s), static_cast<unsigned int>(t), // isHoriz ? "horizontal" : "vertical", // closerDist, edgeLen, closerDist / edgeLen, // pixelOffset //); /* * Subpixel antialiasing */ /* Weighted average of all the lumas in our local 3x3 grid. */ float lumaAvg = ( (1.0 / 12.0) * (2.0 * (lumaUD + lumaLR) + lumaULDL + lumaURDR) ); float subpixelOffset1 = clamp( fabsf(lumaAvg - lumaCenter) / lumaRange, 0.0, 1.0 ); float subpixelOffset2 = ( (-2.0 * subpixelOffset1 + 3.0) * subpixelOffset1 * subpixelOffset1 ); float subpixelOffset = ( subpixelOffset2 * subpixelOffset2 * SUBPIXEL_QUALITY ); float finalOffset = fmaxf(subpixelOffset, pixelOffset); if (grad1Steeper) { finalOffset = -finalOffset; } /* Determine the final subsample coordinates. */ float finalS = static_cast<float>(s); float finalT = static_cast<float>(t); if (isHoriz) { finalT += finalOffset; } else { finalS += finalOffset; } /* Final subsample... */ float3 color = subsample(samplesIn, width, height, finalS, finalT); //{ // int s0 = static_cast<int>(floorf(s)); // int t0 = static_cast<int>(floorf(t)); // int s1 = s0 + 1; // int t1 = t0 + 1; // if (s0 < 0) { // s0 = 0; // } // if (t0 < 0) { // t0 = 0; // } // if (s1 >= width) { // s1 = width - 1; // } // if (t1 >= height) { // t1 = height - 1; // } // float3 sampleUL = samplesIn[t0 * width + s0]; // float3 sampleUR = samplesIn[t0 * width + s1]; // float3 sampleDL = samplesIn[t1 * width + s0]; // float3 sampleDR = samplesIn[t1 * width + s1]; // printf( // "(%u, %u) sampled at (%f, %f)\n" // "\tUL(%f, %f, %f) UR(%f, %f, %f)\n" // "\tDL(%f, %f, %f) DR(%f, %f, %f)\n" // "\tyields (%f, %f, %f)\n", // static_cast<unsigned int>(s), static_cast<unsigned int>(t), // finalS, finalT, // sampleUL.x, sampleUL.y, sampleUL.z, // sampleUR.x, sampleUR.y, sampleUR.z, // sampleDL.x, sampleDL.y, sampleDL.z, // sampleDR.x, sampleDR.y, sampleDR.z, // color.x, color.y, color.z // ); //} //color = isHoriz ? // make_float3(color.x * 10.0, color.y, color.z) : // make_float3(color.x, color.y * 10.0, color.z); /* ... and we're done! */ samplesOut[t * width + s] = color; } __global__ void map_samples_edgeblur( CUDABSP::CUDABSP* pCudaBSP, float3* samplesIn, /* output */ float3* samplesOut, size_t width, size_t height ) { size_t s = blockIdx.x * blockDim.x + threadIdx.x; size_t t = blockIdx.y * blockDim.y + threadIdx.y; if (s >= width || t >= height) { return; } float3 sample = samplesIn[t * width + s]; float lumaCenter = luma_from_rgb(sample); /* Grab the lumas of our four direct neighbors. */ float lumaUp = luma_from_rgb( samplesIn[((t > 0) ? (t - 1) : t) * width + s] ); float lumaDown = luma_from_rgb( samplesIn[((t < height - 1) ? (t + 1) : t) * width + s] ); float lumaLeft = luma_from_rgb( samplesIn[t * width + ((s > 0) ? (s - 1) : s)] ); float lumaRight = luma_from_rgb( samplesIn[t * width + ((s < width - 1) ? (s + 1) : s)] ); /* Determine the color contrast between ourselves and our neighbors. */ float lumaMin = fminf( lumaCenter, fminf( fminf(lumaUp, lumaDown), fminf(lumaLeft, lumaRight) ) ); float lumaMax = fmaxf( lumaCenter, fmaxf( fmaxf(lumaUp, lumaDown), fmaxf(lumaLeft, lumaRight) ) ); float lumaRange = lumaMax - lumaMin; /* * Luma contrast too low (or this is a really dark spot). * Don't perform AA. */ if (lumaRange < fmaxf(EDGE_THRESHOLD_MIN, lumaMax * EDGE_THRESHOLD)) { samplesOut[t * width + s] = sample; return; } } __global__ void map_faces(CUDABSP::CUDABSP* pCudaBSP) { size_t faceIndex = blockIdx.x * blockDim.x + threadIdx.x; if (faceIndex >= pCudaBSP->numFaces) { return; } BSP::DFace& face = pCudaBSP->faces[faceIndex]; size_t width = face.lightmapTextureSizeInLuxels[0] + 1; size_t height = face.lightmapTextureSizeInLuxels[1] + 1; size_t startIndex = face.lightOffset / sizeof(BSP::RGBExp32); float3* lightSamples = pCudaBSP->lightSamples + startIndex; float3* results = new float3[width * height]; const size_t BLOCK_WIDTH = 16; const size_t BLOCK_HEIGHT = 16; dim3 gridDim( div_ceil(width, BLOCK_WIDTH), div_ceil(height, BLOCK_HEIGHT) ); dim3 blockDim(BLOCK_WIDTH, BLOCK_HEIGHT); KERNEL_LAUNCH_DEVICE( map_samples_fxaa, gridDim, blockDim, lightSamples, results, width, height ); CUDA_CHECK_ERROR_DEVICE(cudaDeviceSynchronize()); /* Transfer the AA'd results back into the light sample buffer. */ memcpy(lightSamples, results, sizeof(float3) * width * height); delete[] results; } namespace CUDAFXAA { void antialias_lightsamples(BSP::BSP& bsp, CUDABSP::CUDABSP* pCudaBSP) { CUDABSP::CUDABSP cudaBSP; CUDA_CHECK_ERROR( cudaMemcpy( &cudaBSP, pCudaBSP, sizeof(CUDABSP::CUDABSP), cudaMemcpyDeviceToHost ) ); for (const BSP::Face& face : bsp.get_faces()) { size_t width = face.get_lightmap_width(); size_t height = face.get_lightmap_height(); size_t numSamples = width * height; size_t startIndex = face.get_data().lightOffset / sizeof(BSP::RGBExp32); float3* samples = cudaBSP.lightSamples + startIndex; float3* results; CUDA_CHECK_ERROR( cudaMalloc(&results, sizeof(float3) * numSamples) ); const size_t BLOCK_WIDTH = 16; const size_t BLOCK_HEIGHT = 16; dim3 gridDim( div_ceil(width, BLOCK_WIDTH), div_ceil(height, BLOCK_HEIGHT) ); dim3 blockDim(BLOCK_WIDTH, BLOCK_HEIGHT); KERNEL_LAUNCH( map_samples_fxaa, gridDim, blockDim, samples, results, width, height ); CUDA_CHECK_ERROR(cudaDeviceSynchronize()); CUDA_CHECK_ERROR( cudaMemcpy( samples, results, sizeof(float3) * numSamples, cudaMemcpyDeviceToDevice ) ); CUDA_CHECK_ERROR(cudaFree(results)); } //size_t numFaces; //CUDA_CHECK_ERROR( // cudaMemcpy( // &numFaces, &pCudaBSP->numFaces, sizeof(size_t), // cudaMemcpyDeviceToHost // ) //); //const size_t BLOCK_WIDTH = 32; //size_t numBlocks = div_ceil(numFaces, BLOCK_WIDTH); //KERNEL_LAUNCH( // map_faces, // numBlocks, BLOCK_WIDTH, // pCudaBSP //); //CUDA_CHECK_ERROR(cudaDeviceSynchronize()); } }
6c901159f113bd85e0296175077f5bbc6f282140.hip
// !!! This is a file automatically generated by hipify!!! // #include <cstdio> // #include <hip/hip_runtime.h> // #include <cmath> // #include <thrust/execution_policy.h> // #include <thrust/random.h> // #include <thrust/remove.h> // #include "sceneStructs.h" // #include "scene.h" // #include "glm/glm.hpp" // #include "glm/gtx/norm.hpp" // #include "utilities.h" // #include "pathtrace.h" // #include "intersections.h" // #include "interactions.h" // #define ERRORCHECK 1 // #define ANTI_ALIASING 0 // #define CACHE_BOUNCE 0 // #define SORT_MATERIALS 0 // #define DEPTH_OF_FIELD 0 // #define DIRECT_LIGHTING 1 // #define FILENAME (strrchr(__FILE__, '/') ? strrchr(__FILE__, '/') + 1 : __FILE__) // #define checkCUDAError(msg) checkCUDAErrorFn(msg, FILENAME, __LINE__) // void checkCUDAErrorFn(const char *msg, const char *file, int line) { // #if ERRORCHECK // hipDeviceSynchronize(); // hipError_t err = hipGetLastError(); // if (hipSuccess == err) { // return; // } // fprintf(stderr, "CUDA error"); // if (file) { // fprintf(stderr, " (%s:%d)", file, line); // } // fprintf(stderr, ": %s: %s\n", msg, hipGetErrorString(err)); // # ifdef _WIN32 // getchar(); // # endif // exit(EXIT_FAILURE); // #endif // } // __host__ __device__ // thrust::default_random_engine makeSeededRandomEngine(int iter, int index, int depth) { // int h = utilhash((1 << 31) | (depth << 22) | iter) ^ utilhash(index); // return thrust::default_random_engine(h); // } // //Kernel that writes the image to the OpenGL PBO directly. // __global__ void sendImageToPBO(uchar4* pbo, glm::ivec2 resolution, // int iter, glm::vec3* image) { // int x = (blockIdx.x * blockDim.x) + threadIdx.x; // int y = (blockIdx.y * blockDim.y) + threadIdx.y; // if (x < resolution.x && y < resolution.y) { // int index = x + (y * resolution.x); // glm::vec3 pix = image[index]; // glm::ivec3 color; // color.x = glm::clamp((int) (pix.x / iter * 255.0), 0, 255); // color.y = glm::clamp((int) (pix.y / iter * 255.0), 0, 255); // color.z = glm::clamp((int) (pix.z / iter * 255.0), 0, 255); // // Each thread writes one pixel location in the texture (textel) // pbo[index].w = 0; // pbo[index].x = color.x; // pbo[index].y = color.y; // pbo[index].z = color.z; // } // // checkCUDAError("sendImageToPBO"); // } // static Scene * hst_scene = NULL; // static glm::vec3 * dev_image = NULL; // static Geom * dev_geoms = NULL; // static Material * dev_materials = NULL; // static PathSegment * dev_paths = NULL; // static ShadeableIntersection * dev_intersections = NULL; // static ShadeableIntersection* dev_intersection_first_bounce = NULL; // #if DIRECT_LIGHTING // static Geom* dev_lights = NULL; // #endif // // TODO: static variables for device memory, any extra info you need, etc // // ... // void pathtraceInit(Scene *scene) { // hst_scene = scene; // const Camera &cam = hst_scene->state.camera; // const int pixelcount = cam.resolution.x * cam.resolution.y; // hipMalloc(&dev_image, pixelcount * sizeof(glm::vec3)); // hipMemset(dev_image, 0, pixelcount * sizeof(glm::vec3)); // hipMalloc(&dev_paths, pixelcount * sizeof(PathSegment)); // hipMalloc(&dev_geoms, scene->geoms.size() * sizeof(Geom)); // hipMemcpy(dev_geoms, scene->geoms.data(), scene->geoms.size() * sizeof(Geom), hipMemcpyHostToDevice); // hipMalloc(&dev_materials, scene->materials.size() * sizeof(Material)); // hipMemcpy(dev_materials, scene->materials.data(), scene->materials.size() * sizeof(Material), hipMemcpyHostToDevice); // hipMalloc(&dev_intersections, pixelcount * sizeof(ShadeableIntersection)); // hipMemset(dev_intersections, 0, pixelcount * sizeof(ShadeableIntersection)); // // TODO: initialize any extra device memeory you need // #if CACHE_BOUNCE || SORT_MATERIALS // hipMalloc(&dev_first_bounce, pixelcount * sizeof(ShadeableIntersection)); // hipMemset(dev_first_bounce, 0, pixelcount * sizeof(ShadeableIntersection)); // #endif // #if DIRECT_LIGHTING // hipMalloc(&dev_lights, scene->lights.size() * sizeof(Geom)); // hipMemcpy(dev_lights, scene->lights.data(), scene->lights.size() * sizeof(Geom), hipMemcpyHostToDevice); // #endif // checkCUDAError("pathtraceInit"); // } // void pathtraceFree() { // hipFree(dev_image); // no-op if dev_image is null // hipFree(dev_paths); // hipFree(dev_geoms); // hipFree(dev_materials); // hipFree(dev_intersections); // // TODO: clean up any extra device memory you created // checkCUDAError("pathtraceFree"); // } // __host__ __device__ // glm::vec3 pointOnPlane(Geom light, thrust::default_random_engine& rng) { // thrust::uniform_real_distribution<float> u01(0, 1); // glm::vec2 pt(u01(rng), u01(rng)); // glm::vec3 planePt = glm::vec3((pt - glm::vec2(0.5f)), 0.f); // return glm::vec3(light.transform * glm::vec4(planePt, 1.f)); // } // /** // * Generate PathSegments with rays from the camera through the screen into the // * scene, which is the first bounce of rays. // * // * Antialiasing - add rays for sub-pixel sampling // * motion blur - jitter rays "in time" // * lens effect - jitter ray origin positions based on a lens // */ // __global__ void generateRayFromCamera(Camera cam, int iter, int traceDepth, PathSegment* pathSegments) // { // int x = (blockIdx.x * blockDim.x) + threadIdx.x; // int y = (blockIdx.y * blockDim.y) + threadIdx.y; // if (x < cam.resolution.x && y < cam.resolution.y) { // int index = x + (y * cam.resolution.x); // PathSegment & segment = pathSegments[index]; // segment.ray.origin = cam.position; // segment.color = glm::vec3(1.0f, 1.0f, 1.0f); // // TODO: implement antialiasing by jittering the ray // segment.ray.direction = glm::normalize(cam.view // - cam.right * cam.pixelLength.x * ((float)x - (float)cam.resolution.x * 0.5f) // - cam.up * cam.pixelLength.y * ((float)y - (float)cam.resolution.y * 0.5f) // ); // segment.pixelIndex = index; // segment.remainingBounces = traceDepth; // } // } // // TODO: // // computeIntersections handles generating ray intersections ONLY. // // Generating new rays is handled in your shader(s). // // Feel free to modify the code below. // __global__ void computeIntersections(int depth, int num_paths, PathSegment * pathSegments, // Geom * geoms, int geoms_size, ShadeableIntersection * intersections) // { // int path_index = blockIdx.x * blockDim.x + threadIdx.x; // if (path_index < num_paths) // { // PathSegment pathSegment = pathSegments[path_index]; // float t; // glm::vec3 intersect_point; // glm::vec3 normal; // float t_min = FLT_MAX; // int hit_geom_index = -1; // bool outside = true; // glm::vec3 tmp_intersect; // glm::vec3 tmp_normal; // // naive parse through global geoms // for (int i = 0; i < geoms_size; i++) // { // Geom & geom = geoms[i]; // if (geom.type == CUBE) // { // t = boxIntersectionTest(geom, pathSegment.ray, tmp_intersect, tmp_normal, outside); // } // else if (geom.type == SPHERE) // { // t = sphereIntersectionTest(geom, pathSegment.ray, tmp_intersect, tmp_normal, outside); // } // // TODO: add more intersection tests here... triangle? metaball? CSG? // else if (geom.type == TRIANGLE){ // t = triangleIntersectionTest(geom, pathSegment.ray, tmp_intersect, tmp_normal, outside); // } // // Compute the minimum t from the intersection tests to determine what // // scene geometry object was hit first. // if (t > 0.0f && t_min > t) // { // t_min = t; // hit_geom_index = i; // intersect_point = tmp_intersect; // normal = tmp_normal; // } // } // if (hit_geom_index == -1) // { // intersections[path_index].t = -1.0f; // } // else // { // //The ray hits something // intersections[path_index].t = t_min; // intersections[path_index].materialId = geoms[hit_geom_index].materialid; // intersections[path_index].surfaceNormal = normal; // } // } // } // // LOOK: "fake" shader demonstrating what you might do with the info in // // a ShadeableIntersection, as well as how to use thrust's random number // // generator. Observe that since the thrust random number generator basically // // adds "noise" to the iteration, the image should start off noisy and get // // cleaner as more iterations are computed. // // // // Note that this shader does NOT do a BSDF evaluation! // // Your shaders should handle that - this can allow techniques such as // // bump mapping. // __global__ void shadeFakeMaterial (int iter, int num_paths, ShadeableIntersection * shadeableIntersections, PathSegment * pathSegments, Material * materials) // { // int idx = blockIdx.x * blockDim.x + threadIdx.x; // if (idx < num_paths) // { // ShadeableIntersection intersection = shadeableIntersections[idx]; // if (intersection.t > 0.0f) { // if the intersection exists... // // Set up the RNG // // LOOK: this is how you use thrust's RNG! Please look at // // makeSeededRandomEngine as well. // thrust::default_random_engine rng = makeSeededRandomEngine(iter, idx, 0); // // thrust::uniform_real_distribution<float> u01(0, 1); // Material material = materials[intersection.materialId]; // glm::vec3 materialColor = material.color; // // If the material indicates that the object was a light, "light" the ray // if (material.emittance > 0.0f) { // pathSegments[idx].color *= (materialColor * material.emittance); // pathSegments[idx].remainingBounces = 0; // } // else if (pathSegments[idx].remainingBounces == 1) { // pathSegments[idx].remainingBounces -= 1; // pathSegments[idx].color = glm::vec3(0.0f); // } // // Otherwise, do some pseudo-lighting computation. This is actually more // // like what you would expect from shading in a rasterizer like OpenGL. // // TODO: replace this! you should be able to start with basically a one-liner // else { // scatterRay(pathSegments[idx], pathSegments[idx].ray.origin + pathSegments[idx].ray.direction * intersection.t, intersection.surfaceNormal, // material, rng); // // float lightTerm = glm::dot(intersection.surfaceNormal, glm::vec3(0.0f, 1.0f, 0.0f)); // // pathSegments[idx].color *= (materialColor * lightTerm) * 0.3f + ((1.0f - intersection.t * 0.02f) * materialColor) * 0.7f; // // pathSegments[idx].color *= u01(rng); // apply some noise because why not // } // // If there was no intersection, color the ray black. // // Lots of renderers use 4 channel color, RGBA, where A = alpha, often // // used for opacity, in which case they can indicate "no opacity". // // This can be useful for post-processing and image compositing. // } else { // pathSegments[idx].color = glm::vec3(0.0f); // pathSegments[idx].remainingBounces = 0; // } // } // } // // Add the current iteration's output to the overall image // __global__ void finalGather(int nPaths, glm::vec3 * image, PathSegment * iterationPaths) // { // int index = (blockIdx.x * blockDim.x) + threadIdx.x; // if (index < nPaths) // { // PathSegment iterationPath = iterationPaths[index]; // image[iterationPath.pixelIndex] += iterationPath.color; // } // } // struct end_condition { // __host__ __device__ // bool operator()(const PathSegment& pathSegment) { // return (pathSegment.remainingBounces >= 0); // } // }; // struct compare_materials { // __host__ __device__ // bool operator()(const ShadeableIntersection& m1, const ShadeableIntersection& m2) { // return (m1.materialId > m2.materialId); // } // }; // __global__ void shadeDirectLighting(int iter, int num_paths, ShadeableIntersection* shadeableIntersections, PathSegment* pathSegments, Material* materials, Geom* lights, int num // ){ // int idx = blockIdx.x * blockDim.x + threadIdx.x; // if (idx < num_paths) // { // if (pathSegments[idx].remainingBounces <= 0) { // return; // } // ShadeableIntersection intersection = shadeableIntersections[idx]; // thrust::default_random_engine rng = makeSeededRandomEngine(iter, idx, pathSegments[idx].remainingBounces); // PathSegment path = pathSegments[idx]; // if (path.remainingBounces != 2 && path.remainingBounces > 0 && intersection.t > 0.f) { // thrust::uniform_real_distribution<float> u01(0, 1); // Material material = materials[intersection.materialId]; // glm::vec3 materialColor = material.color; // // If the material indicates that the object was a light, "light" the ray // if (material.emittance > 0.0f) { // pathSegments[idx].color *= (materialColor * material.emittance); // pathSegments[idx].remainingBounces = 0; // } // else if (pathSegments[idx].remainingBounces == 1) { // pathSegments[idx].remainingBounces -= 1; // pathSegments[idx].color = glm::vec3(0.0f); // } // else { // pathSegments[idx].remainingBounces -= 1; // scatterRay(pathSegments[idx], pathSegments[idx].ray.origin + pathSegments[idx].ray.direction * intersection.t, intersection.surfaceNormal, // material, rng); // } // } // else if (path.remainingBounces == 2 && intersection.t > 0.f) { // Material material = materials[intersection.materialId]; // glm::vec3 materialColor = material.color; // // If the material indicates that the object was a light, "light" the ray // if (material.emittance > 0.0f) { // pathSegments[idx].color *= (materialColor * material.emittance); // pathSegments[idx].remainingBounces = 0; // } // else { // scatterRay(path, path.ray.origin + path.ray.direction * intersection.t, intersection.surfaceNormal, material, rng); // thrust::uniform_real_distribution<float> u01(0, 1); // float r = u01(rng); // int lightIdx = 0; // if (num != 0) { // lightIdx = glm::min((int)glm::floor(r * num), num - 1); // } // glm::vec3 lightPt = pointOnPlane(lights[lightIdx], rng); // path.ray.direction = glm::normalize(lightPt - path.ray.origin); // path.remainingBounces--; // } // } // else { // pathSegments[idx].color = glm::vec3(0.0f); // pathSegments[idx].remainingBounces = 0; // } // } // } // /** // * Wrapper for the __global__ call that sets up the kernel calls and does a ton // * of memory management // */ // void pathtrace(uchar4 *pbo, int frame, int iter) { // const int traceDepth = hst_scene->state.traceDepth; // const Camera &cam = hst_scene->state.camera; // const int pixelcount = cam.resolution.x * cam.resolution.y; // // 2D block for generating ray from camera // const dim3 blockSize2d(8, 8); // const dim3 blocksPerGrid2d( // (cam.resolution.x + blockSize2d.x - 1) / blockSize2d.x, // (cam.resolution.y + blockSize2d.y - 1) / blockSize2d.y); // // 1D block for path tracing // const int blockSize1d = 128; // /////////////////////////////////////////////////////////////////////////// // // Recap: // // * Initialize array of path rays (using rays that come out of the camera) // // * You can pass the Camera object to that kernel. // // * Each path ray must carry at minimum a (ray, color) pair, // // * where color starts as the multiplicative identity, white = (1, 1, 1). // // * This has already been done for you. // // * For each depth: // // * Compute an intersection in the scene for each path ray. // // A very naive version of this has been implemented for you, but feel // // free to add more primitives and/or a better algorithm. // // Currently, intersection distance is recorded as a parametric distance, // // t, or a "distance along the ray." t = -1.0 indicates no intersection. // // * Color is attenuated (multiplied) by reflections off of any object // // * TODO: Stream compact away all of the terminated paths. // // You may use either your implementation or `thrust::remove_if` or its // // cousins. // // * Note that you can't really use a 2D kernel launch any more - switch // // to 1D. // // * TODO: Shade the rays that intersected something or didn't bottom out. // // That is, color the ray by performing a color computation according // // to the shader, then generate a new ray to continue the ray path. // // We recommend just updating the ray's PathSegment in place. // // Note that this step may come before or after stream compaction, // // since some shaders you write may also cause a path to terminate. // // * Finally, add this iteration's results to the image. This has been done // // for you. // // TODO: perform one iteration of path tracing // generateRayFromCamera <<<blocksPerGrid2d, blockSize2d >>>(cam, iter, traceDepth, dev_paths); // checkCUDAError("generate camera ray"); // int depth = 0; // PathSegment* dev_path_end = dev_paths + pixelcount; // int num_paths = dev_path_end - dev_paths; // // --- PathSegment Tracing Stage --- // // Shoot ray into scene, bounce between objects, push shading chunks // bool iterationComplete = false; // while (!iterationComplete) { // // clean shading chunks // hipMemset(dev_intersections, 0, pixelcount * sizeof(ShadeableIntersection)); // // tracing // dim3 numBlocksPathSegmentTracing = (num_paths + blockSize1d - 1) / blockSize1d; // // use cached first intersection // if (CACHE_BOUNCE && !ANTI_ALIASING && depth == 0 && iter != 1) { // thrust::copy(thrust::device, dev_intersection_first_bounce, dev_intersection_first_bounce + num_paths, dev_intersections); // //sort by material // if (SORT_MATERIALS) { // thrust::sort_by_key(thrust::device, dev_intersections, dev_intersections + num_paths, dev_paths, compare_materials()); // } // }else { // // clean shading chunks // hipMemset(dev_intersections, 0, pixelcount * sizeof(ShadeableIntersection)); // computeIntersections <<<numBlocksPathSegmentTracing, blockSize1d>>> (depth, num_paths, dev_paths, dev_geoms, hst_scene->geoms.size(), dev_intersections); // checkCUDAError("trace one bounce"); // hipDeviceSynchronize(); // //cache first bounce // if (CACHE_BOUNCE && !ANTI_ALIASING && depth == 0 && iter == 1) { // thrust::copy(thrust::device, dev_intersections, dev_intersections + num_paths, dev_intersection_first_bounce); // } // //sort by material // if (SORT_MATERIALS) { // thrust::sort_by_key(thrust::device, dev_intersections, dev_intersections + num_paths, dev_paths, compare_materials()); // } // } // depth++; // #if DIRECT_LIGHTING // shadeDirectLighting<<<numBlocksPathSegmentTracing, blockSize1d>>>(iter, num_paths, dev_intersections, dev_paths, // dev_materials, dev_lights, hst_scene->lights.size()); // #else // shadeFakeMaterial<<<numBlocksPathSegmentTracing, blockSize1d>>> (iter, num_paths, dev_intersections, dev_paths, dev_materials); // #endif // dev_path_end = thrust::stable_partition(thrust::device, dev_paths, dev_path_end, end_condition()); // num_paths = dev_path_end - dev_paths; // if (num_paths == 0 || depth > traceDepth) { // iterationComplete = true; // } // // TODO: // // --- Shading Stage --- // // Shade path segments based on intersections and generate new rays by // // evaluating the BSDF. // // Start off with just a big kernel that handles all the different // // materials you have in the scenefile. // // TODO: compare between directly shading the path segments and shading // // path segments that have been reshuffled to be contiguous in memory. // // shade for direct lighting // // iterationComplete = true; // TODO: should be based off stream compaction results. // } // // Assemble this iteration and apply it to the image // dim3 numBlocksPixels = (pixelcount + blockSize1d - 1) / blockSize1d; // finalGather<<<numBlocksPixels, blockSize1d>>>(num_paths, dev_image, dev_paths); // /////////////////////////////////////////////////////////////////////////// // // Send results to OpenGL buffer for rendering // hipLaunchKernelGGL(( sendImageToPBO), dim3(blocksPerGrid2d), dim3(blockSize2d), 0, 0, pbo, cam.resolution, iter, dev_image); // // Retrieve image from GPU // hipMemcpy(hst_scene->state.image.data(), dev_image, // pixelcount * sizeof(glm::vec3), hipMemcpyDeviceToHost); // checkCUDAError("pathtrace"); // } #include <cstdio> #include <hip/hip_runtime.h> #include <cmath> #include <thrust/execution_policy.h> #include <thrust/random.h> #include <thrust/remove.h> #include <thrust/device_vector.h> #include <thrust/partition.h> #include <thrust/copy.h> #include <thrust/sort.h> #include "sceneStructs.h" #include "scene.h" #include "glm/glm.hpp" #include "glm/gtx/norm.hpp" #include "utilities.h" #include "pathtrace.h" #include "intersections.h" #include "interactions.h" #define ERRORCHECK 1 #define ANTI_ALIASING 1 #define CACHE_BOUNCE 0 #define SORT_MATERIALS 0 #define DEPTH_OF_FIELD 0 #define DIRECT_LIGHTING 0 #define LENS_RADIUS 0.07 #define FOCAL_DISTANCE 5 #define FILENAME (strrchr(__FILE__, '/') ? strrchr(__FILE__, '/') + 1 : __FILE__) #define checkCUDAError(msg) checkCUDAErrorFn(msg, FILENAME, __LINE__) void checkCUDAErrorFn(const char *msg, const char *file, int line) { #if ERRORCHECK hipDeviceSynchronize(); hipError_t err = hipGetLastError(); if (hipSuccess == err) { return; } fprintf(stderr, "CUDA error"); if (file) { fprintf(stderr, " (%s:%d)", file, line); } fprintf(stderr, ": %s: %s\n", msg, hipGetErrorString(err)); # ifdef _WIN32 getchar(); # endif exit(EXIT_FAILURE); #endif } __host__ __device__ thrust::default_random_engine makeSeededRandomEngine(int iter, int index, int depth) { int h = utilhash((1 << 31) | (depth << 22) | iter) ^ utilhash(index); return thrust::default_random_engine(h); } //Kernel that writes the image to the OpenGL PBO directly. __global__ void sendImageToPBO(uchar4* pbo, glm::ivec2 resolution, int iter, glm::vec3* image) { int x = (blockIdx.x * blockDim.x) + threadIdx.x; int y = (blockIdx.y * blockDim.y) + threadIdx.y; if (x < resolution.x && y < resolution.y) { int index = x + (y * resolution.x); glm::vec3 pix = image[index]; glm::ivec3 color; color.x = glm::clamp((int) (pix.x / iter * 255.0), 0, 255); color.y = glm::clamp((int) (pix.y / iter * 255.0), 0, 255); color.z = glm::clamp((int) (pix.z / iter * 255.0), 0, 255); // Each thread writes one pixel location in the texture (textel) pbo[index].w = 0; pbo[index].x = color.x; pbo[index].y = color.y; pbo[index].z = color.z; } } static Scene * hst_scene = NULL; static glm::vec3 * dev_image = NULL; static Geom * dev_geoms = NULL; static Material * dev_materials = NULL; static PathSegment * dev_paths = NULL; static ShadeableIntersection * dev_intersections = NULL; // TODO: static variables for device memory, any extra info you need, etc // ... static ShadeableIntersection* dev_first_bounce = NULL; #if DIRECT_LIGHTING static Geom* dev_lights = NULL; #endif void pathtraceInit(Scene *scene) { hst_scene = scene; const Camera &cam = hst_scene->state.camera; const int pixelcount = cam.resolution.x * cam.resolution.y; hipMalloc(&dev_image, pixelcount * sizeof(glm::vec3)); hipMemset(dev_image, 0, pixelcount * sizeof(glm::vec3)); hipMalloc(&dev_paths, pixelcount * sizeof(PathSegment)); hipMalloc(&dev_geoms, scene->geoms.size() * sizeof(Geom)); hipMemcpy(dev_geoms, scene->geoms.data(), scene->geoms.size() * sizeof(Geom), hipMemcpyHostToDevice); hipMalloc(&dev_materials, scene->materials.size() * sizeof(Material)); hipMemcpy(dev_materials, scene->materials.data(), scene->materials.size() * sizeof(Material), hipMemcpyHostToDevice); hipMalloc(&dev_intersections, pixelcount * sizeof(ShadeableIntersection)); hipMemset(dev_intersections, 0, pixelcount * sizeof(ShadeableIntersection)); // TODO: initialize any extra device memeory you need #if CACHE_BOUNCE || SORT_MATERIALS hipMalloc(&dev_first_bounce, pixelcount * sizeof(ShadeableIntersection)); hipMemset(dev_first_bounce, 0, pixelcount * sizeof(ShadeableIntersection)); #endif #if DIRECT_LIGHTING hipMalloc(&dev_lights, scene->lights.size() * sizeof(Geom)); hipMemcpy(dev_lights, scene->lights.data(), scene->lights.size() * sizeof(Geom), hipMemcpyHostToDevice); #endif checkCUDAError("pathtraceInit"); } void pathtraceFree() { hipFree(dev_image); // no-op if dev_image is null hipFree(dev_paths); hipFree(dev_geoms); hipFree(dev_materials); hipFree(dev_intersections); // TODO: clean up any extra device memory you created #if CACHE_BOUNCE || SORT_MATERIALS hipFree(dev_first_bounce); #endif #if DIRECT_LIGHTING hipFree(dev_lights); #endif checkCUDAError("pathtraceFree"); } __host__ __device__ glm::vec3 pointOnPlane(Geom light, thrust::default_random_engine& rng) { thrust::uniform_real_distribution<float> u01(0, 1); glm::vec2 pt(u01(rng), u01(rng)); glm::vec3 planePt = glm::vec3((pt - glm::vec2(0.5f)), 0.f); return glm::vec3(light.transform * glm::vec4(planePt, 1.f)); } __host__ __device__ glm::vec3 convertDisk(const glm::vec2 &v) { float x = v.x; float y = v.y; float phi, r; float a = 2 * x - 1.f; float b = 2 * y - 1.f; if (a > -b) { if (a > b) { r = a; phi = (PI / 4) * (b / a); } else { r = b; phi = (PI / 4) * (2 - (a / b)); } } else { if (a < b) { r = -a; phi = (PI / 4) * (4 + (b / a)); } else { r = -b; if (b < 0 || b > 0) { phi = (PI / 4) * (6 - (a / b)); } else { phi = 0; } } } return glm::vec3(cosf(phi) * r, sinf(phi) * r, 0); } /** * Generate PathSegments with rays from the camera through the screen into the * scene, which is the first bounce of rays. * * Antialiasing - add rays for sub-pixel sampling * motion blur - jitter rays "in time" * lens effect - jitter ray origin positions based on a lens */ __global__ void generateRayFromCamera(Camera cam, int iter, int traceDepth, PathSegment* pathSegments) { int x = (blockIdx.x * blockDim.x) + threadIdx.x; int y = (blockIdx.y * blockDim.y) + threadIdx.y; if (x < cam.resolution.x && y < cam.resolution.y) { int index = x + (y * cam.resolution.x); PathSegment & segment = pathSegments[index]; segment.ray.origin = cam.position; segment.color = glm::vec3(1.0f, 1.0f, 1.0f); //antialiasing by jittering the ray float x_aa = x; float y_aa = y; thrust::default_random_engine random = makeSeededRandomEngine(iter, index, traceDepth); #if ANTI_ALIASING thrust::uniform_real_distribution<float> u01(-1.0f, 1.0f); x_aa += u01(random); y_aa += u01(random); #endif segment.ray.direction = glm::normalize(cam.view - cam.right * cam.pixelLength.x * ((float)x_aa - (float)cam.resolution.x * 0.5f) - cam.up * cam.pixelLength.y * ((float)y_aa - (float)cam.resolution.y * 0.5f) ); //for depth of field #if DEPTH_OF_FIELD thrust::uniform_real_distribution<float> u02(0, 1); glm::vec3 sample = convertDisk(glm::vec2(u02(random), u02(random))); glm::vec3 lens = (float)LENS_RADIUS * sample; glm::vec3 pt = segment.ray.origin + lens; glm::vec3 fp = segment.ray.origin + (float)FOCAL_DISTANCE * segment.ray.direction; segment.ray.origin = pt; segment.ray.direction = glm::normalize(fp - pt); #endif segment.pixelIndex = index; segment.remainingBounces = traceDepth; } } // TODO: // computeIntersections handles generating ray intersections ONLY. // Generating new rays is handled in your shader(s). // Feel free to modify the code below. __global__ void computeIntersections( int depth , int num_paths , PathSegment * pathSegments , Geom * geoms , int geoms_size , ShadeableIntersection * intersections ) { int path_index = blockIdx.x * blockDim.x + threadIdx.x; if (path_index < num_paths) { PathSegment pathSegment = pathSegments[path_index]; float t; glm::vec3 intersect_point; glm::vec3 normal; float t_min = FLT_MAX; int hit_geom_index = -1; bool outside = true; glm::vec3 tmp_intersect; glm::vec3 tmp_normal; // naive parse through global geoms for (int i = 0; i < geoms_size; i++) { Geom & geom = geoms[i]; if (geom.type == CUBE) { t = boxIntersectionTest(geom, pathSegment.ray, tmp_intersect, tmp_normal, outside); } else if (geom.type == SPHERE) { t = sphereIntersectionTest(geom, pathSegment.ray, tmp_intersect, tmp_normal, outside); } else if (geom.type == TRIANGLE) { t = triangleIntersectionTest(geom, pathSegment.ray, tmp_intersect, tmp_normal, outside); } // TODO: add more intersection tests here... triangle? metaball? CSG? // Compute the minimum t from the intersection tests to determine what // scene geometry object was hit first. if (t > 0.0f && t_min > t) { t_min = t; hit_geom_index = i; intersect_point = tmp_intersect; normal = tmp_normal; } } if (hit_geom_index == -1) { intersections[path_index].t = -1.0f; } else { //The ray hits something intersections[path_index].t = t_min; intersections[path_index].materialId = geoms[hit_geom_index].materialid; intersections[path_index].surfaceNormal = normal; } } } // shade for direct lighting __global__ void shadeDirectLighting( int iter , int num_paths , ShadeableIntersection* shadeableIntersections , PathSegment* pathSegments , Material* materials , Geom* lights , int num ) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < num_paths) { if (pathSegments[idx].remainingBounces <= 0) { return; } ShadeableIntersection intersection = shadeableIntersections[idx]; thrust::default_random_engine rng = makeSeededRandomEngine(iter, idx, pathSegments[idx].remainingBounces); PathSegment path = pathSegments[idx]; if (path.remainingBounces != 2 && path.remainingBounces > 0 && intersection.t > 0.f) { thrust::uniform_real_distribution<float> u01(0, 1); Material material = materials[intersection.materialId]; glm::vec3 materialColor = material.color; // If the material indicates that the object was a light, "light" the ray if (material.emittance > 0.0f) { pathSegments[idx].color *= (materialColor * material.emittance); pathSegments[idx].remainingBounces = 0; } else if (pathSegments[idx].remainingBounces == 1) { pathSegments[idx].remainingBounces -= 1; pathSegments[idx].color = glm::vec3(0.0f); } else { pathSegments[idx].remainingBounces -= 1; scatterRay(pathSegments[idx], pathSegments[idx].ray.origin + pathSegments[idx].ray.direction * intersection.t, intersection.surfaceNormal, material, rng); } } else if (path.remainingBounces == 2 && intersection.t > 0.f) { Material material = materials[intersection.materialId]; glm::vec3 materialColor = material.color; // If the material indicates that the object was a light, "light" the ray if (material.emittance > 0.0f) { pathSegments[idx].color *= (materialColor * material.emittance); pathSegments[idx].remainingBounces = 0; } else { scatterRay(path, path.ray.origin + path.ray.direction * intersection.t, intersection.surfaceNormal, material, rng); thrust::uniform_real_distribution<float> u01(0, 1); float r = u01(rng); int lightIdx = 0; if (num != 0) { lightIdx = glm::min((int)glm::floor(r * num), num - 1); } glm::vec3 lightPt = pointOnPlane(lights[lightIdx], rng); path.ray.direction = glm::normalize(lightPt - path.ray.origin); path.remainingBounces--; } } else { pathSegments[idx].color = glm::vec3(0.0f); pathSegments[idx].remainingBounces = 0; } } } __global__ void shadeFakeMaterial ( int iter , int num_paths , ShadeableIntersection * shadeableIntersections , PathSegment * pathSegments , Material * materials ) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < num_paths) { if (pathSegments[idx].remainingBounces <= 0) { return; } ShadeableIntersection intersection = shadeableIntersections[idx]; if (intersection.t > 0.0f) { // if the intersection exists... // Set up the RNG // LOOK: this is how you use thrust's RNG! Please look at // makeSeededRandomEngine as well. thrust::default_random_engine rng = makeSeededRandomEngine(iter, idx, pathSegments[idx].remainingBounces); thrust::uniform_real_distribution<float> u01(0, 1); Material material = materials[intersection.materialId]; glm::vec3 materialColor = material.color; // If the material indicates that the object was a light, "light" the ray if (material.emittance > 0.0f) { pathSegments[idx].color *= (materialColor * material.emittance); pathSegments[idx].remainingBounces = 0; } else if (pathSegments[idx].remainingBounces == 1) { pathSegments[idx].remainingBounces -= 1; pathSegments[idx].color = glm::vec3(0.0f); } else { pathSegments[idx].remainingBounces -= 1; scatterRay(pathSegments[idx], pathSegments[idx].ray.origin + pathSegments[idx].ray.direction * intersection.t, intersection.surfaceNormal, material, rng); } // If there was no intersection, color the ray black. } else { pathSegments[idx].color = glm::vec3(0.0f); pathSegments[idx].remainingBounces = 0; } } } // Add the current iteration's output to the overall image __global__ void finalGather(int nPaths, glm::vec3 * image, PathSegment * iterationPaths) { int index = (blockIdx.x * blockDim.x) + threadIdx.x; if (index < nPaths) { PathSegment iterationPath = iterationPaths[index]; image[iterationPath.pixelIndex] += iterationPath.color; } } struct should_end { __host__ __device__ bool operator()(const PathSegment& pathSegment) { return (pathSegment.remainingBounces >= 0); } }; struct compare_materials { __host__ __device__ bool operator()(const ShadeableIntersection& m1, const ShadeableIntersection& m2) { return (m1.materialId > m2.materialId); } }; /** * Wrapper for the __global__ call that sets up the kernel calls and does a ton * of memory management */ void pathtrace(uchar4 *pbo, int frame, int iter) { const int traceDepth = hst_scene->state.traceDepth; const Camera &cam = hst_scene->state.camera; const int pixelcount = cam.resolution.x * cam.resolution.y; // 2D block for generating ray from camera const dim3 blockSize2d(8, 8); const dim3 blocksPerGrid2d( (cam.resolution.x + blockSize2d.x - 1) / blockSize2d.x, (cam.resolution.y + blockSize2d.y - 1) / blockSize2d.y); // 1D block for path tracing const int blockSize1d = 128; /////////////////////////////////////////////////////////////////////////// // Recap: // * Initialize array of path rays (using rays that come out of the camera) // * You can pass the Camera object to that kernel. // * Each path ray must carry at minimum a (ray, color) pair, // * where color starts as the multiplicative identity, white = (1, 1, 1). // * This has already been done for you. // * For each depth: // * Compute an intersection in the scene for each path ray. // A very naive version of this has been implemented for you, but feel // free to add more primitives and/or a better algorithm. // Currently, intersection distance is recorded as a parametric distance, // t, or a "distance along the ray." t = -1.0 indicates no intersection. // * Color is attenuated (multiplied) by reflections off of any object // * TODO: Stream compact away all of the terminated paths. // You may use either your implementation or `thrust::remove_if` or its // cousins. // * Note that you can't really use a 2D kernel launch any more - switch // to 1D. // * TODO: Shade the rays that intersected something or didn't bottom out. // That is, color the ray by performing a color computation according // to the shader, then generate a new ray to continue the ray path. // We recommend just updating the ray's PathSegment in place. // Note that this step may come before or after stream compaction, // since some shaders you write may also cause a path to terminate. // * Finally, add this iteration's results to the image. This has been done // for you. // TODO: perform one iteration of path tracing hipLaunchKernelGGL(( generateRayFromCamera) , dim3(blocksPerGrid2d), dim3(blockSize2d) , 0, 0, cam, iter, traceDepth, dev_paths); checkCUDAError("generate camera ray"); int depth = 0; PathSegment* dev_path_end = dev_paths + pixelcount; int num_paths = dev_path_end - dev_paths; // --- PathSegment Tracing Stage --- // Shoot ray into scene, bounce between objects, push shading chunks bool iterationComplete = false; while (!iterationComplete) { // tracing dim3 numblocksPathSegmentTracing = (num_paths + blockSize1d - 1) / blockSize1d; //cache first bounce if (CACHE_BOUNCE && !ANTI_ALIASING && depth == 0 && iter != 1) { thrust::copy(thrust::device, dev_first_bounce, dev_first_bounce + num_paths, dev_intersections); //sort by material if (SORT_MATERIALS) { thrust::sort_by_key(thrust::device, dev_intersections, dev_intersections + num_paths, dev_paths, compare_materials()); } } else { // clean shading chunks hipMemset(dev_intersections, 0, pixelcount * sizeof(ShadeableIntersection)); hipLaunchKernelGGL(( computeIntersections), dim3(numblocksPathSegmentTracing), dim3(blockSize1d), 0, 0, depth, num_paths, dev_paths, dev_geoms, hst_scene->geoms.size(), dev_intersections); checkCUDAError("trace one bounce"); hipDeviceSynchronize(); //cache first bounce if (CACHE_BOUNCE && !ANTI_ALIASING && depth == 0 && iter == 1) { thrust::copy(thrust::device, dev_intersections, dev_intersections + num_paths, dev_first_bounce); } //sort by material if (SORT_MATERIALS) { thrust::sort_by_key(thrust::device, dev_intersections, dev_intersections + num_paths, dev_paths, compare_materials()); } } depth++; #if DIRECT_LIGHTING hipLaunchKernelGGL(( shadeDirectLighting), dim3(numblocksPathSegmentTracing), dim3(blockSize1d), 0, 0, iter, num_paths, dev_intersections, dev_paths, dev_materials, dev_lights, hst_scene->lights.size()); #else hipLaunchKernelGGL(( shadeFakeMaterial), dim3(numblocksPathSegmentTracing), dim3(blockSize1d), 0, 0, iter, num_paths, dev_intersections, dev_paths, dev_materials); #endif dev_path_end = thrust::stable_partition(thrust::device, dev_paths, dev_path_end, should_end()); num_paths = dev_path_end - dev_paths; if (num_paths == 0 || depth > traceDepth) { iterationComplete = true; } } // Assemble this iteration and apply it to the image dim3 numBlocksPixels = (pixelcount + blockSize1d - 1) / blockSize1d; hipLaunchKernelGGL(( finalGather), dim3(numBlocksPixels), dim3(blockSize1d), 0, 0, pixelcount, dev_image, dev_paths); /////////////////////////////////////////////////////////////////////////// // Send results to OpenGL buffer for rendering hipLaunchKernelGGL(( sendImageToPBO), dim3(blocksPerGrid2d), dim3(blockSize2d), 0, 0, pbo, cam.resolution, iter, dev_image); // Retrieve image from GPU hipMemcpy(hst_scene->state.image.data(), dev_image, pixelcount * sizeof(glm::vec3), hipMemcpyDeviceToHost); checkCUDAError("pathtrace"); }
6c901159f113bd85e0296175077f5bbc6f282140.cu
// #include <cstdio> // #include <cuda.h> // #include <cmath> // #include <thrust/execution_policy.h> // #include <thrust/random.h> // #include <thrust/remove.h> // #include "sceneStructs.h" // #include "scene.h" // #include "glm/glm.hpp" // #include "glm/gtx/norm.hpp" // #include "utilities.h" // #include "pathtrace.h" // #include "intersections.h" // #include "interactions.h" // #define ERRORCHECK 1 // #define ANTI_ALIASING 0 // #define CACHE_BOUNCE 0 // #define SORT_MATERIALS 0 // #define DEPTH_OF_FIELD 0 // #define DIRECT_LIGHTING 1 // #define FILENAME (strrchr(__FILE__, '/') ? strrchr(__FILE__, '/') + 1 : __FILE__) // #define checkCUDAError(msg) checkCUDAErrorFn(msg, FILENAME, __LINE__) // void checkCUDAErrorFn(const char *msg, const char *file, int line) { // #if ERRORCHECK // cudaDeviceSynchronize(); // cudaError_t err = cudaGetLastError(); // if (cudaSuccess == err) { // return; // } // fprintf(stderr, "CUDA error"); // if (file) { // fprintf(stderr, " (%s:%d)", file, line); // } // fprintf(stderr, ": %s: %s\n", msg, cudaGetErrorString(err)); // # ifdef _WIN32 // getchar(); // # endif // exit(EXIT_FAILURE); // #endif // } // __host__ __device__ // thrust::default_random_engine makeSeededRandomEngine(int iter, int index, int depth) { // int h = utilhash((1 << 31) | (depth << 22) | iter) ^ utilhash(index); // return thrust::default_random_engine(h); // } // //Kernel that writes the image to the OpenGL PBO directly. // __global__ void sendImageToPBO(uchar4* pbo, glm::ivec2 resolution, // int iter, glm::vec3* image) { // int x = (blockIdx.x * blockDim.x) + threadIdx.x; // int y = (blockIdx.y * blockDim.y) + threadIdx.y; // if (x < resolution.x && y < resolution.y) { // int index = x + (y * resolution.x); // glm::vec3 pix = image[index]; // glm::ivec3 color; // color.x = glm::clamp((int) (pix.x / iter * 255.0), 0, 255); // color.y = glm::clamp((int) (pix.y / iter * 255.0), 0, 255); // color.z = glm::clamp((int) (pix.z / iter * 255.0), 0, 255); // // Each thread writes one pixel location in the texture (textel) // pbo[index].w = 0; // pbo[index].x = color.x; // pbo[index].y = color.y; // pbo[index].z = color.z; // } // // checkCUDAError("sendImageToPBO"); // } // static Scene * hst_scene = NULL; // static glm::vec3 * dev_image = NULL; // static Geom * dev_geoms = NULL; // static Material * dev_materials = NULL; // static PathSegment * dev_paths = NULL; // static ShadeableIntersection * dev_intersections = NULL; // static ShadeableIntersection* dev_intersection_first_bounce = NULL; // #if DIRECT_LIGHTING // static Geom* dev_lights = NULL; // #endif // // TODO: static variables for device memory, any extra info you need, etc // // ... // void pathtraceInit(Scene *scene) { // hst_scene = scene; // const Camera &cam = hst_scene->state.camera; // const int pixelcount = cam.resolution.x * cam.resolution.y; // cudaMalloc(&dev_image, pixelcount * sizeof(glm::vec3)); // cudaMemset(dev_image, 0, pixelcount * sizeof(glm::vec3)); // cudaMalloc(&dev_paths, pixelcount * sizeof(PathSegment)); // cudaMalloc(&dev_geoms, scene->geoms.size() * sizeof(Geom)); // cudaMemcpy(dev_geoms, scene->geoms.data(), scene->geoms.size() * sizeof(Geom), cudaMemcpyHostToDevice); // cudaMalloc(&dev_materials, scene->materials.size() * sizeof(Material)); // cudaMemcpy(dev_materials, scene->materials.data(), scene->materials.size() * sizeof(Material), cudaMemcpyHostToDevice); // cudaMalloc(&dev_intersections, pixelcount * sizeof(ShadeableIntersection)); // cudaMemset(dev_intersections, 0, pixelcount * sizeof(ShadeableIntersection)); // // TODO: initialize any extra device memeory you need // #if CACHE_BOUNCE || SORT_MATERIALS // cudaMalloc(&dev_first_bounce, pixelcount * sizeof(ShadeableIntersection)); // cudaMemset(dev_first_bounce, 0, pixelcount * sizeof(ShadeableIntersection)); // #endif // #if DIRECT_LIGHTING // cudaMalloc(&dev_lights, scene->lights.size() * sizeof(Geom)); // cudaMemcpy(dev_lights, scene->lights.data(), scene->lights.size() * sizeof(Geom), cudaMemcpyHostToDevice); // #endif // checkCUDAError("pathtraceInit"); // } // void pathtraceFree() { // cudaFree(dev_image); // no-op if dev_image is null // cudaFree(dev_paths); // cudaFree(dev_geoms); // cudaFree(dev_materials); // cudaFree(dev_intersections); // // TODO: clean up any extra device memory you created // checkCUDAError("pathtraceFree"); // } // __host__ __device__ // glm::vec3 pointOnPlane(Geom light, thrust::default_random_engine& rng) { // thrust::uniform_real_distribution<float> u01(0, 1); // glm::vec2 pt(u01(rng), u01(rng)); // glm::vec3 planePt = glm::vec3((pt - glm::vec2(0.5f)), 0.f); // return glm::vec3(light.transform * glm::vec4(planePt, 1.f)); // } // /** // * Generate PathSegments with rays from the camera through the screen into the // * scene, which is the first bounce of rays. // * // * Antialiasing - add rays for sub-pixel sampling // * motion blur - jitter rays "in time" // * lens effect - jitter ray origin positions based on a lens // */ // __global__ void generateRayFromCamera(Camera cam, int iter, int traceDepth, PathSegment* pathSegments) // { // int x = (blockIdx.x * blockDim.x) + threadIdx.x; // int y = (blockIdx.y * blockDim.y) + threadIdx.y; // if (x < cam.resolution.x && y < cam.resolution.y) { // int index = x + (y * cam.resolution.x); // PathSegment & segment = pathSegments[index]; // segment.ray.origin = cam.position; // segment.color = glm::vec3(1.0f, 1.0f, 1.0f); // // TODO: implement antialiasing by jittering the ray // segment.ray.direction = glm::normalize(cam.view // - cam.right * cam.pixelLength.x * ((float)x - (float)cam.resolution.x * 0.5f) // - cam.up * cam.pixelLength.y * ((float)y - (float)cam.resolution.y * 0.5f) // ); // segment.pixelIndex = index; // segment.remainingBounces = traceDepth; // } // } // // TODO: // // computeIntersections handles generating ray intersections ONLY. // // Generating new rays is handled in your shader(s). // // Feel free to modify the code below. // __global__ void computeIntersections(int depth, int num_paths, PathSegment * pathSegments, // Geom * geoms, int geoms_size, ShadeableIntersection * intersections) // { // int path_index = blockIdx.x * blockDim.x + threadIdx.x; // if (path_index < num_paths) // { // PathSegment pathSegment = pathSegments[path_index]; // float t; // glm::vec3 intersect_point; // glm::vec3 normal; // float t_min = FLT_MAX; // int hit_geom_index = -1; // bool outside = true; // glm::vec3 tmp_intersect; // glm::vec3 tmp_normal; // // naive parse through global geoms // for (int i = 0; i < geoms_size; i++) // { // Geom & geom = geoms[i]; // if (geom.type == CUBE) // { // t = boxIntersectionTest(geom, pathSegment.ray, tmp_intersect, tmp_normal, outside); // } // else if (geom.type == SPHERE) // { // t = sphereIntersectionTest(geom, pathSegment.ray, tmp_intersect, tmp_normal, outside); // } // // TODO: add more intersection tests here... triangle? metaball? CSG? // else if (geom.type == TRIANGLE){ // t = triangleIntersectionTest(geom, pathSegment.ray, tmp_intersect, tmp_normal, outside); // } // // Compute the minimum t from the intersection tests to determine what // // scene geometry object was hit first. // if (t > 0.0f && t_min > t) // { // t_min = t; // hit_geom_index = i; // intersect_point = tmp_intersect; // normal = tmp_normal; // } // } // if (hit_geom_index == -1) // { // intersections[path_index].t = -1.0f; // } // else // { // //The ray hits something // intersections[path_index].t = t_min; // intersections[path_index].materialId = geoms[hit_geom_index].materialid; // intersections[path_index].surfaceNormal = normal; // } // } // } // // LOOK: "fake" shader demonstrating what you might do with the info in // // a ShadeableIntersection, as well as how to use thrust's random number // // generator. Observe that since the thrust random number generator basically // // adds "noise" to the iteration, the image should start off noisy and get // // cleaner as more iterations are computed. // // // // Note that this shader does NOT do a BSDF evaluation! // // Your shaders should handle that - this can allow techniques such as // // bump mapping. // __global__ void shadeFakeMaterial (int iter, int num_paths, ShadeableIntersection * shadeableIntersections, PathSegment * pathSegments, Material * materials) // { // int idx = blockIdx.x * blockDim.x + threadIdx.x; // if (idx < num_paths) // { // ShadeableIntersection intersection = shadeableIntersections[idx]; // if (intersection.t > 0.0f) { // if the intersection exists... // // Set up the RNG // // LOOK: this is how you use thrust's RNG! Please look at // // makeSeededRandomEngine as well. // thrust::default_random_engine rng = makeSeededRandomEngine(iter, idx, 0); // // thrust::uniform_real_distribution<float> u01(0, 1); // Material material = materials[intersection.materialId]; // glm::vec3 materialColor = material.color; // // If the material indicates that the object was a light, "light" the ray // if (material.emittance > 0.0f) { // pathSegments[idx].color *= (materialColor * material.emittance); // pathSegments[idx].remainingBounces = 0; // } // else if (pathSegments[idx].remainingBounces == 1) { // pathSegments[idx].remainingBounces -= 1; // pathSegments[idx].color = glm::vec3(0.0f); // } // // Otherwise, do some pseudo-lighting computation. This is actually more // // like what you would expect from shading in a rasterizer like OpenGL. // // TODO: replace this! you should be able to start with basically a one-liner // else { // scatterRay(pathSegments[idx], pathSegments[idx].ray.origin + pathSegments[idx].ray.direction * intersection.t, intersection.surfaceNormal, // material, rng); // // float lightTerm = glm::dot(intersection.surfaceNormal, glm::vec3(0.0f, 1.0f, 0.0f)); // // pathSegments[idx].color *= (materialColor * lightTerm) * 0.3f + ((1.0f - intersection.t * 0.02f) * materialColor) * 0.7f; // // pathSegments[idx].color *= u01(rng); // apply some noise because why not // } // // If there was no intersection, color the ray black. // // Lots of renderers use 4 channel color, RGBA, where A = alpha, often // // used for opacity, in which case they can indicate "no opacity". // // This can be useful for post-processing and image compositing. // } else { // pathSegments[idx].color = glm::vec3(0.0f); // pathSegments[idx].remainingBounces = 0; // } // } // } // // Add the current iteration's output to the overall image // __global__ void finalGather(int nPaths, glm::vec3 * image, PathSegment * iterationPaths) // { // int index = (blockIdx.x * blockDim.x) + threadIdx.x; // if (index < nPaths) // { // PathSegment iterationPath = iterationPaths[index]; // image[iterationPath.pixelIndex] += iterationPath.color; // } // } // struct end_condition { // __host__ __device__ // bool operator()(const PathSegment& pathSegment) { // return (pathSegment.remainingBounces >= 0); // } // }; // struct compare_materials { // __host__ __device__ // bool operator()(const ShadeableIntersection& m1, const ShadeableIntersection& m2) { // return (m1.materialId > m2.materialId); // } // }; // __global__ void shadeDirectLighting(int iter, int num_paths, ShadeableIntersection* shadeableIntersections, PathSegment* pathSegments, Material* materials, Geom* lights, int num // ){ // int idx = blockIdx.x * blockDim.x + threadIdx.x; // if (idx < num_paths) // { // if (pathSegments[idx].remainingBounces <= 0) { // return; // } // ShadeableIntersection intersection = shadeableIntersections[idx]; // thrust::default_random_engine rng = makeSeededRandomEngine(iter, idx, pathSegments[idx].remainingBounces); // PathSegment path = pathSegments[idx]; // if (path.remainingBounces != 2 && path.remainingBounces > 0 && intersection.t > 0.f) { // thrust::uniform_real_distribution<float> u01(0, 1); // Material material = materials[intersection.materialId]; // glm::vec3 materialColor = material.color; // // If the material indicates that the object was a light, "light" the ray // if (material.emittance > 0.0f) { // pathSegments[idx].color *= (materialColor * material.emittance); // pathSegments[idx].remainingBounces = 0; // } // else if (pathSegments[idx].remainingBounces == 1) { // pathSegments[idx].remainingBounces -= 1; // pathSegments[idx].color = glm::vec3(0.0f); // } // else { // pathSegments[idx].remainingBounces -= 1; // scatterRay(pathSegments[idx], pathSegments[idx].ray.origin + pathSegments[idx].ray.direction * intersection.t, intersection.surfaceNormal, // material, rng); // } // } // else if (path.remainingBounces == 2 && intersection.t > 0.f) { // Material material = materials[intersection.materialId]; // glm::vec3 materialColor = material.color; // // If the material indicates that the object was a light, "light" the ray // if (material.emittance > 0.0f) { // pathSegments[idx].color *= (materialColor * material.emittance); // pathSegments[idx].remainingBounces = 0; // } // else { // scatterRay(path, path.ray.origin + path.ray.direction * intersection.t, intersection.surfaceNormal, material, rng); // thrust::uniform_real_distribution<float> u01(0, 1); // float r = u01(rng); // int lightIdx = 0; // if (num != 0) { // lightIdx = glm::min((int)glm::floor(r * num), num - 1); // } // glm::vec3 lightPt = pointOnPlane(lights[lightIdx], rng); // path.ray.direction = glm::normalize(lightPt - path.ray.origin); // path.remainingBounces--; // } // } // else { // pathSegments[idx].color = glm::vec3(0.0f); // pathSegments[idx].remainingBounces = 0; // } // } // } // /** // * Wrapper for the __global__ call that sets up the kernel calls and does a ton // * of memory management // */ // void pathtrace(uchar4 *pbo, int frame, int iter) { // const int traceDepth = hst_scene->state.traceDepth; // const Camera &cam = hst_scene->state.camera; // const int pixelcount = cam.resolution.x * cam.resolution.y; // // 2D block for generating ray from camera // const dim3 blockSize2d(8, 8); // const dim3 blocksPerGrid2d( // (cam.resolution.x + blockSize2d.x - 1) / blockSize2d.x, // (cam.resolution.y + blockSize2d.y - 1) / blockSize2d.y); // // 1D block for path tracing // const int blockSize1d = 128; // /////////////////////////////////////////////////////////////////////////// // // Recap: // // * Initialize array of path rays (using rays that come out of the camera) // // * You can pass the Camera object to that kernel. // // * Each path ray must carry at minimum a (ray, color) pair, // // * where color starts as the multiplicative identity, white = (1, 1, 1). // // * This has already been done for you. // // * For each depth: // // * Compute an intersection in the scene for each path ray. // // A very naive version of this has been implemented for you, but feel // // free to add more primitives and/or a better algorithm. // // Currently, intersection distance is recorded as a parametric distance, // // t, or a "distance along the ray." t = -1.0 indicates no intersection. // // * Color is attenuated (multiplied) by reflections off of any object // // * TODO: Stream compact away all of the terminated paths. // // You may use either your implementation or `thrust::remove_if` or its // // cousins. // // * Note that you can't really use a 2D kernel launch any more - switch // // to 1D. // // * TODO: Shade the rays that intersected something or didn't bottom out. // // That is, color the ray by performing a color computation according // // to the shader, then generate a new ray to continue the ray path. // // We recommend just updating the ray's PathSegment in place. // // Note that this step may come before or after stream compaction, // // since some shaders you write may also cause a path to terminate. // // * Finally, add this iteration's results to the image. This has been done // // for you. // // TODO: perform one iteration of path tracing // generateRayFromCamera <<<blocksPerGrid2d, blockSize2d >>>(cam, iter, traceDepth, dev_paths); // checkCUDAError("generate camera ray"); // int depth = 0; // PathSegment* dev_path_end = dev_paths + pixelcount; // int num_paths = dev_path_end - dev_paths; // // --- PathSegment Tracing Stage --- // // Shoot ray into scene, bounce between objects, push shading chunks // bool iterationComplete = false; // while (!iterationComplete) { // // clean shading chunks // cudaMemset(dev_intersections, 0, pixelcount * sizeof(ShadeableIntersection)); // // tracing // dim3 numBlocksPathSegmentTracing = (num_paths + blockSize1d - 1) / blockSize1d; // // use cached first intersection // if (CACHE_BOUNCE && !ANTI_ALIASING && depth == 0 && iter != 1) { // thrust::copy(thrust::device, dev_intersection_first_bounce, dev_intersection_first_bounce + num_paths, dev_intersections); // //sort by material // if (SORT_MATERIALS) { // thrust::sort_by_key(thrust::device, dev_intersections, dev_intersections + num_paths, dev_paths, compare_materials()); // } // }else { // // clean shading chunks // cudaMemset(dev_intersections, 0, pixelcount * sizeof(ShadeableIntersection)); // computeIntersections <<<numBlocksPathSegmentTracing, blockSize1d>>> (depth, num_paths, dev_paths, dev_geoms, hst_scene->geoms.size(), dev_intersections); // checkCUDAError("trace one bounce"); // cudaDeviceSynchronize(); // //cache first bounce // if (CACHE_BOUNCE && !ANTI_ALIASING && depth == 0 && iter == 1) { // thrust::copy(thrust::device, dev_intersections, dev_intersections + num_paths, dev_intersection_first_bounce); // } // //sort by material // if (SORT_MATERIALS) { // thrust::sort_by_key(thrust::device, dev_intersections, dev_intersections + num_paths, dev_paths, compare_materials()); // } // } // depth++; // #if DIRECT_LIGHTING // shadeDirectLighting<<<numBlocksPathSegmentTracing, blockSize1d>>>(iter, num_paths, dev_intersections, dev_paths, // dev_materials, dev_lights, hst_scene->lights.size()); // #else // shadeFakeMaterial<<<numBlocksPathSegmentTracing, blockSize1d>>> (iter, num_paths, dev_intersections, dev_paths, dev_materials); // #endif // dev_path_end = thrust::stable_partition(thrust::device, dev_paths, dev_path_end, end_condition()); // num_paths = dev_path_end - dev_paths; // if (num_paths == 0 || depth > traceDepth) { // iterationComplete = true; // } // // TODO: // // --- Shading Stage --- // // Shade path segments based on intersections and generate new rays by // // evaluating the BSDF. // // Start off with just a big kernel that handles all the different // // materials you have in the scenefile. // // TODO: compare between directly shading the path segments and shading // // path segments that have been reshuffled to be contiguous in memory. // // shade for direct lighting // // iterationComplete = true; // TODO: should be based off stream compaction results. // } // // Assemble this iteration and apply it to the image // dim3 numBlocksPixels = (pixelcount + blockSize1d - 1) / blockSize1d; // finalGather<<<numBlocksPixels, blockSize1d>>>(num_paths, dev_image, dev_paths); // /////////////////////////////////////////////////////////////////////////// // // Send results to OpenGL buffer for rendering // sendImageToPBO<<<blocksPerGrid2d, blockSize2d>>>(pbo, cam.resolution, iter, dev_image); // // Retrieve image from GPU // cudaMemcpy(hst_scene->state.image.data(), dev_image, // pixelcount * sizeof(glm::vec3), cudaMemcpyDeviceToHost); // checkCUDAError("pathtrace"); // } #include <cstdio> #include <cuda.h> #include <cmath> #include <thrust/execution_policy.h> #include <thrust/random.h> #include <thrust/remove.h> #include <thrust/device_vector.h> #include <thrust/partition.h> #include <thrust/copy.h> #include <thrust/sort.h> #include "sceneStructs.h" #include "scene.h" #include "glm/glm.hpp" #include "glm/gtx/norm.hpp" #include "utilities.h" #include "pathtrace.h" #include "intersections.h" #include "interactions.h" #define ERRORCHECK 1 #define ANTI_ALIASING 1 #define CACHE_BOUNCE 0 #define SORT_MATERIALS 0 #define DEPTH_OF_FIELD 0 #define DIRECT_LIGHTING 0 #define LENS_RADIUS 0.07 #define FOCAL_DISTANCE 5 #define FILENAME (strrchr(__FILE__, '/') ? strrchr(__FILE__, '/') + 1 : __FILE__) #define checkCUDAError(msg) checkCUDAErrorFn(msg, FILENAME, __LINE__) void checkCUDAErrorFn(const char *msg, const char *file, int line) { #if ERRORCHECK cudaDeviceSynchronize(); cudaError_t err = cudaGetLastError(); if (cudaSuccess == err) { return; } fprintf(stderr, "CUDA error"); if (file) { fprintf(stderr, " (%s:%d)", file, line); } fprintf(stderr, ": %s: %s\n", msg, cudaGetErrorString(err)); # ifdef _WIN32 getchar(); # endif exit(EXIT_FAILURE); #endif } __host__ __device__ thrust::default_random_engine makeSeededRandomEngine(int iter, int index, int depth) { int h = utilhash((1 << 31) | (depth << 22) | iter) ^ utilhash(index); return thrust::default_random_engine(h); } //Kernel that writes the image to the OpenGL PBO directly. __global__ void sendImageToPBO(uchar4* pbo, glm::ivec2 resolution, int iter, glm::vec3* image) { int x = (blockIdx.x * blockDim.x) + threadIdx.x; int y = (blockIdx.y * blockDim.y) + threadIdx.y; if (x < resolution.x && y < resolution.y) { int index = x + (y * resolution.x); glm::vec3 pix = image[index]; glm::ivec3 color; color.x = glm::clamp((int) (pix.x / iter * 255.0), 0, 255); color.y = glm::clamp((int) (pix.y / iter * 255.0), 0, 255); color.z = glm::clamp((int) (pix.z / iter * 255.0), 0, 255); // Each thread writes one pixel location in the texture (textel) pbo[index].w = 0; pbo[index].x = color.x; pbo[index].y = color.y; pbo[index].z = color.z; } } static Scene * hst_scene = NULL; static glm::vec3 * dev_image = NULL; static Geom * dev_geoms = NULL; static Material * dev_materials = NULL; static PathSegment * dev_paths = NULL; static ShadeableIntersection * dev_intersections = NULL; // TODO: static variables for device memory, any extra info you need, etc // ... static ShadeableIntersection* dev_first_bounce = NULL; #if DIRECT_LIGHTING static Geom* dev_lights = NULL; #endif void pathtraceInit(Scene *scene) { hst_scene = scene; const Camera &cam = hst_scene->state.camera; const int pixelcount = cam.resolution.x * cam.resolution.y; cudaMalloc(&dev_image, pixelcount * sizeof(glm::vec3)); cudaMemset(dev_image, 0, pixelcount * sizeof(glm::vec3)); cudaMalloc(&dev_paths, pixelcount * sizeof(PathSegment)); cudaMalloc(&dev_geoms, scene->geoms.size() * sizeof(Geom)); cudaMemcpy(dev_geoms, scene->geoms.data(), scene->geoms.size() * sizeof(Geom), cudaMemcpyHostToDevice); cudaMalloc(&dev_materials, scene->materials.size() * sizeof(Material)); cudaMemcpy(dev_materials, scene->materials.data(), scene->materials.size() * sizeof(Material), cudaMemcpyHostToDevice); cudaMalloc(&dev_intersections, pixelcount * sizeof(ShadeableIntersection)); cudaMemset(dev_intersections, 0, pixelcount * sizeof(ShadeableIntersection)); // TODO: initialize any extra device memeory you need #if CACHE_BOUNCE || SORT_MATERIALS cudaMalloc(&dev_first_bounce, pixelcount * sizeof(ShadeableIntersection)); cudaMemset(dev_first_bounce, 0, pixelcount * sizeof(ShadeableIntersection)); #endif #if DIRECT_LIGHTING cudaMalloc(&dev_lights, scene->lights.size() * sizeof(Geom)); cudaMemcpy(dev_lights, scene->lights.data(), scene->lights.size() * sizeof(Geom), cudaMemcpyHostToDevice); #endif checkCUDAError("pathtraceInit"); } void pathtraceFree() { cudaFree(dev_image); // no-op if dev_image is null cudaFree(dev_paths); cudaFree(dev_geoms); cudaFree(dev_materials); cudaFree(dev_intersections); // TODO: clean up any extra device memory you created #if CACHE_BOUNCE || SORT_MATERIALS cudaFree(dev_first_bounce); #endif #if DIRECT_LIGHTING cudaFree(dev_lights); #endif checkCUDAError("pathtraceFree"); } __host__ __device__ glm::vec3 pointOnPlane(Geom light, thrust::default_random_engine& rng) { thrust::uniform_real_distribution<float> u01(0, 1); glm::vec2 pt(u01(rng), u01(rng)); glm::vec3 planePt = glm::vec3((pt - glm::vec2(0.5f)), 0.f); return glm::vec3(light.transform * glm::vec4(planePt, 1.f)); } __host__ __device__ glm::vec3 convertDisk(const glm::vec2 &v) { float x = v.x; float y = v.y; float phi, r; float a = 2 * x - 1.f; float b = 2 * y - 1.f; if (a > -b) { if (a > b) { r = a; phi = (PI / 4) * (b / a); } else { r = b; phi = (PI / 4) * (2 - (a / b)); } } else { if (a < b) { r = -a; phi = (PI / 4) * (4 + (b / a)); } else { r = -b; if (b < 0 || b > 0) { phi = (PI / 4) * (6 - (a / b)); } else { phi = 0; } } } return glm::vec3(cosf(phi) * r, sinf(phi) * r, 0); } /** * Generate PathSegments with rays from the camera through the screen into the * scene, which is the first bounce of rays. * * Antialiasing - add rays for sub-pixel sampling * motion blur - jitter rays "in time" * lens effect - jitter ray origin positions based on a lens */ __global__ void generateRayFromCamera(Camera cam, int iter, int traceDepth, PathSegment* pathSegments) { int x = (blockIdx.x * blockDim.x) + threadIdx.x; int y = (blockIdx.y * blockDim.y) + threadIdx.y; if (x < cam.resolution.x && y < cam.resolution.y) { int index = x + (y * cam.resolution.x); PathSegment & segment = pathSegments[index]; segment.ray.origin = cam.position; segment.color = glm::vec3(1.0f, 1.0f, 1.0f); //antialiasing by jittering the ray float x_aa = x; float y_aa = y; thrust::default_random_engine random = makeSeededRandomEngine(iter, index, traceDepth); #if ANTI_ALIASING thrust::uniform_real_distribution<float> u01(-1.0f, 1.0f); x_aa += u01(random); y_aa += u01(random); #endif segment.ray.direction = glm::normalize(cam.view - cam.right * cam.pixelLength.x * ((float)x_aa - (float)cam.resolution.x * 0.5f) - cam.up * cam.pixelLength.y * ((float)y_aa - (float)cam.resolution.y * 0.5f) ); //for depth of field #if DEPTH_OF_FIELD thrust::uniform_real_distribution<float> u02(0, 1); glm::vec3 sample = convertDisk(glm::vec2(u02(random), u02(random))); glm::vec3 lens = (float)LENS_RADIUS * sample; glm::vec3 pt = segment.ray.origin + lens; glm::vec3 fp = segment.ray.origin + (float)FOCAL_DISTANCE * segment.ray.direction; segment.ray.origin = pt; segment.ray.direction = glm::normalize(fp - pt); #endif segment.pixelIndex = index; segment.remainingBounces = traceDepth; } } // TODO: // computeIntersections handles generating ray intersections ONLY. // Generating new rays is handled in your shader(s). // Feel free to modify the code below. __global__ void computeIntersections( int depth , int num_paths , PathSegment * pathSegments , Geom * geoms , int geoms_size , ShadeableIntersection * intersections ) { int path_index = blockIdx.x * blockDim.x + threadIdx.x; if (path_index < num_paths) { PathSegment pathSegment = pathSegments[path_index]; float t; glm::vec3 intersect_point; glm::vec3 normal; float t_min = FLT_MAX; int hit_geom_index = -1; bool outside = true; glm::vec3 tmp_intersect; glm::vec3 tmp_normal; // naive parse through global geoms for (int i = 0; i < geoms_size; i++) { Geom & geom = geoms[i]; if (geom.type == CUBE) { t = boxIntersectionTest(geom, pathSegment.ray, tmp_intersect, tmp_normal, outside); } else if (geom.type == SPHERE) { t = sphereIntersectionTest(geom, pathSegment.ray, tmp_intersect, tmp_normal, outside); } else if (geom.type == TRIANGLE) { t = triangleIntersectionTest(geom, pathSegment.ray, tmp_intersect, tmp_normal, outside); } // TODO: add more intersection tests here... triangle? metaball? CSG? // Compute the minimum t from the intersection tests to determine what // scene geometry object was hit first. if (t > 0.0f && t_min > t) { t_min = t; hit_geom_index = i; intersect_point = tmp_intersect; normal = tmp_normal; } } if (hit_geom_index == -1) { intersections[path_index].t = -1.0f; } else { //The ray hits something intersections[path_index].t = t_min; intersections[path_index].materialId = geoms[hit_geom_index].materialid; intersections[path_index].surfaceNormal = normal; } } } // shade for direct lighting __global__ void shadeDirectLighting( int iter , int num_paths , ShadeableIntersection* shadeableIntersections , PathSegment* pathSegments , Material* materials , Geom* lights , int num ) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < num_paths) { if (pathSegments[idx].remainingBounces <= 0) { return; } ShadeableIntersection intersection = shadeableIntersections[idx]; thrust::default_random_engine rng = makeSeededRandomEngine(iter, idx, pathSegments[idx].remainingBounces); PathSegment path = pathSegments[idx]; if (path.remainingBounces != 2 && path.remainingBounces > 0 && intersection.t > 0.f) { thrust::uniform_real_distribution<float> u01(0, 1); Material material = materials[intersection.materialId]; glm::vec3 materialColor = material.color; // If the material indicates that the object was a light, "light" the ray if (material.emittance > 0.0f) { pathSegments[idx].color *= (materialColor * material.emittance); pathSegments[idx].remainingBounces = 0; } else if (pathSegments[idx].remainingBounces == 1) { pathSegments[idx].remainingBounces -= 1; pathSegments[idx].color = glm::vec3(0.0f); } else { pathSegments[idx].remainingBounces -= 1; scatterRay(pathSegments[idx], pathSegments[idx].ray.origin + pathSegments[idx].ray.direction * intersection.t, intersection.surfaceNormal, material, rng); } } else if (path.remainingBounces == 2 && intersection.t > 0.f) { Material material = materials[intersection.materialId]; glm::vec3 materialColor = material.color; // If the material indicates that the object was a light, "light" the ray if (material.emittance > 0.0f) { pathSegments[idx].color *= (materialColor * material.emittance); pathSegments[idx].remainingBounces = 0; } else { scatterRay(path, path.ray.origin + path.ray.direction * intersection.t, intersection.surfaceNormal, material, rng); thrust::uniform_real_distribution<float> u01(0, 1); float r = u01(rng); int lightIdx = 0; if (num != 0) { lightIdx = glm::min((int)glm::floor(r * num), num - 1); } glm::vec3 lightPt = pointOnPlane(lights[lightIdx], rng); path.ray.direction = glm::normalize(lightPt - path.ray.origin); path.remainingBounces--; } } else { pathSegments[idx].color = glm::vec3(0.0f); pathSegments[idx].remainingBounces = 0; } } } __global__ void shadeFakeMaterial ( int iter , int num_paths , ShadeableIntersection * shadeableIntersections , PathSegment * pathSegments , Material * materials ) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < num_paths) { if (pathSegments[idx].remainingBounces <= 0) { return; } ShadeableIntersection intersection = shadeableIntersections[idx]; if (intersection.t > 0.0f) { // if the intersection exists... // Set up the RNG // LOOK: this is how you use thrust's RNG! Please look at // makeSeededRandomEngine as well. thrust::default_random_engine rng = makeSeededRandomEngine(iter, idx, pathSegments[idx].remainingBounces); thrust::uniform_real_distribution<float> u01(0, 1); Material material = materials[intersection.materialId]; glm::vec3 materialColor = material.color; // If the material indicates that the object was a light, "light" the ray if (material.emittance > 0.0f) { pathSegments[idx].color *= (materialColor * material.emittance); pathSegments[idx].remainingBounces = 0; } else if (pathSegments[idx].remainingBounces == 1) { pathSegments[idx].remainingBounces -= 1; pathSegments[idx].color = glm::vec3(0.0f); } else { pathSegments[idx].remainingBounces -= 1; scatterRay(pathSegments[idx], pathSegments[idx].ray.origin + pathSegments[idx].ray.direction * intersection.t, intersection.surfaceNormal, material, rng); } // If there was no intersection, color the ray black. } else { pathSegments[idx].color = glm::vec3(0.0f); pathSegments[idx].remainingBounces = 0; } } } // Add the current iteration's output to the overall image __global__ void finalGather(int nPaths, glm::vec3 * image, PathSegment * iterationPaths) { int index = (blockIdx.x * blockDim.x) + threadIdx.x; if (index < nPaths) { PathSegment iterationPath = iterationPaths[index]; image[iterationPath.pixelIndex] += iterationPath.color; } } struct should_end { __host__ __device__ bool operator()(const PathSegment& pathSegment) { return (pathSegment.remainingBounces >= 0); } }; struct compare_materials { __host__ __device__ bool operator()(const ShadeableIntersection& m1, const ShadeableIntersection& m2) { return (m1.materialId > m2.materialId); } }; /** * Wrapper for the __global__ call that sets up the kernel calls and does a ton * of memory management */ void pathtrace(uchar4 *pbo, int frame, int iter) { const int traceDepth = hst_scene->state.traceDepth; const Camera &cam = hst_scene->state.camera; const int pixelcount = cam.resolution.x * cam.resolution.y; // 2D block for generating ray from camera const dim3 blockSize2d(8, 8); const dim3 blocksPerGrid2d( (cam.resolution.x + blockSize2d.x - 1) / blockSize2d.x, (cam.resolution.y + blockSize2d.y - 1) / blockSize2d.y); // 1D block for path tracing const int blockSize1d = 128; /////////////////////////////////////////////////////////////////////////// // Recap: // * Initialize array of path rays (using rays that come out of the camera) // * You can pass the Camera object to that kernel. // * Each path ray must carry at minimum a (ray, color) pair, // * where color starts as the multiplicative identity, white = (1, 1, 1). // * This has already been done for you. // * For each depth: // * Compute an intersection in the scene for each path ray. // A very naive version of this has been implemented for you, but feel // free to add more primitives and/or a better algorithm. // Currently, intersection distance is recorded as a parametric distance, // t, or a "distance along the ray." t = -1.0 indicates no intersection. // * Color is attenuated (multiplied) by reflections off of any object // * TODO: Stream compact away all of the terminated paths. // You may use either your implementation or `thrust::remove_if` or its // cousins. // * Note that you can't really use a 2D kernel launch any more - switch // to 1D. // * TODO: Shade the rays that intersected something or didn't bottom out. // That is, color the ray by performing a color computation according // to the shader, then generate a new ray to continue the ray path. // We recommend just updating the ray's PathSegment in place. // Note that this step may come before or after stream compaction, // since some shaders you write may also cause a path to terminate. // * Finally, add this iteration's results to the image. This has been done // for you. // TODO: perform one iteration of path tracing generateRayFromCamera <<<blocksPerGrid2d, blockSize2d >>>(cam, iter, traceDepth, dev_paths); checkCUDAError("generate camera ray"); int depth = 0; PathSegment* dev_path_end = dev_paths + pixelcount; int num_paths = dev_path_end - dev_paths; // --- PathSegment Tracing Stage --- // Shoot ray into scene, bounce between objects, push shading chunks bool iterationComplete = false; while (!iterationComplete) { // tracing dim3 numblocksPathSegmentTracing = (num_paths + blockSize1d - 1) / blockSize1d; //cache first bounce if (CACHE_BOUNCE && !ANTI_ALIASING && depth == 0 && iter != 1) { thrust::copy(thrust::device, dev_first_bounce, dev_first_bounce + num_paths, dev_intersections); //sort by material if (SORT_MATERIALS) { thrust::sort_by_key(thrust::device, dev_intersections, dev_intersections + num_paths, dev_paths, compare_materials()); } } else { // clean shading chunks cudaMemset(dev_intersections, 0, pixelcount * sizeof(ShadeableIntersection)); computeIntersections<<<numblocksPathSegmentTracing, blockSize1d>>>(depth, num_paths, dev_paths, dev_geoms, hst_scene->geoms.size(), dev_intersections); checkCUDAError("trace one bounce"); cudaDeviceSynchronize(); //cache first bounce if (CACHE_BOUNCE && !ANTI_ALIASING && depth == 0 && iter == 1) { thrust::copy(thrust::device, dev_intersections, dev_intersections + num_paths, dev_first_bounce); } //sort by material if (SORT_MATERIALS) { thrust::sort_by_key(thrust::device, dev_intersections, dev_intersections + num_paths, dev_paths, compare_materials()); } } depth++; #if DIRECT_LIGHTING shadeDirectLighting<<<numblocksPathSegmentTracing, blockSize1d>>>(iter, num_paths, dev_intersections, dev_paths, dev_materials, dev_lights, hst_scene->lights.size()); #else shadeFakeMaterial<<<numblocksPathSegmentTracing, blockSize1d>>> (iter, num_paths, dev_intersections, dev_paths, dev_materials); #endif dev_path_end = thrust::stable_partition(thrust::device, dev_paths, dev_path_end, should_end()); num_paths = dev_path_end - dev_paths; if (num_paths == 0 || depth > traceDepth) { iterationComplete = true; } } // Assemble this iteration and apply it to the image dim3 numBlocksPixels = (pixelcount + blockSize1d - 1) / blockSize1d; finalGather<<<numBlocksPixels, blockSize1d>>>(pixelcount, dev_image, dev_paths); /////////////////////////////////////////////////////////////////////////// // Send results to OpenGL buffer for rendering sendImageToPBO<<<blocksPerGrid2d, blockSize2d>>>(pbo, cam.resolution, iter, dev_image); // Retrieve image from GPU cudaMemcpy(hst_scene->state.image.data(), dev_image, pixelcount * sizeof(glm::vec3), cudaMemcpyDeviceToHost); checkCUDAError("pathtrace"); }
eb115e0d2d6b3a1682728d15df949b1e361471bb.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include <set> #include <vector> #include "paddle/fluid/operators/math/math_function.h" #include "paddle/fluid/operators/math/selected_rows_functor.h" #include "paddle/fluid/platform/cuda_primitives.h" #include "paddle/fluid/platform/float16.h" namespace paddle { namespace operators { namespace math { template <typename T> struct SelectedRowsAdd<platform::CUDADeviceContext, T> { void operator()(const platform::CUDADeviceContext& context, const framework::SelectedRows& input1, const framework::SelectedRows& input2, framework::SelectedRows* output) { auto in1_height = input1.height(); PADDLE_ENFORCE_EQ(in1_height, input2.height()); output->set_height(in1_height); framework::Vector<int64_t> in1_rows(input1.rows()); auto& in2_rows = input2.rows(); std::vector<int64_t> out_rows; out_rows.reserve(in1_rows.size() + in2_rows.size()); // concat rows out_rows.insert(out_rows.end(), in1_rows.begin(), in1_rows.end()); out_rows.insert(out_rows.end(), in2_rows.begin(), in2_rows.end()); output->set_rows(out_rows); auto* out_value = output->mutable_value(); auto& in1_value = input1.value(); auto& in2_value = input2.value(); auto in1_row_numel = in1_value.numel() / in1_rows.size(); PADDLE_ENFORCE_EQ(in1_row_numel, in2_value.numel() / in2_rows.size()); PADDLE_ENFORCE_EQ(in1_row_numel, out_value->numel() / out_rows.size()); auto* out_data = out_value->data<T>(); auto* in1_data = in1_value.data<T>(); auto in1_place = input1.place(); PADDLE_ENFORCE_EQ(platform::is_gpu_place(in1_place), true); auto in2_place = input2.place(); PADDLE_ENFORCE_EQ(platform::is_gpu_place(in2_place), true); auto out_place = context.GetPlace(); PADDLE_ENFORCE_EQ(platform::is_gpu_place(out_place), true); memory::Copy(BOOST_GET_CONST(platform::CUDAPlace, out_place), out_data, BOOST_GET_CONST(platform::CUDAPlace, in1_place), in1_data, in1_value.numel() * sizeof(T), context.stream()); auto* in2_data = in2_value.data<T>(); memory::Copy(BOOST_GET_CONST(platform::CUDAPlace, out_place), out_data + in1_value.numel(), BOOST_GET_CONST(platform::CUDAPlace, in2_place), in2_data, in2_value.numel() * sizeof(T), context.stream()); } }; template struct SelectedRowsAdd<platform::CUDADeviceContext, float>; template struct SelectedRowsAdd<platform::CUDADeviceContext, double>; namespace { template <typename T, int block_size> __global__ void SelectedRowsAddTensorKernel(const T* selected_rows, const int64_t* rows, T* tensor_out, int64_t row_numel) { const int ty = blockIdx.x; int tid = threadIdx.x; selected_rows += ty * row_numel; tensor_out += rows[ty] * row_numel; for (int index = tid; index < row_numel; index += block_size) { // Since index in rows of SelectedRows can be duplicate, we can not use // tensor_out[index] += selected_rows[index]; Instead, we have to use // AtomicAdd to avoid concurrent write error. paddle::platform::CudaAtomicAdd(tensor_out + index, selected_rows[index]); } } } // namespace template <typename T> struct SelectedRowsAddTensor<platform::CUDADeviceContext, T> { void operator()(const platform::CUDADeviceContext& context, const framework::SelectedRows& input1, const framework::Tensor& input2, framework::Tensor* output) { auto in1_height = input1.height(); auto in2_dims = input2.dims(); auto out_dims = output->dims(); PADDLE_ENFORCE_EQ(in1_height, in2_dims[0]); PADDLE_ENFORCE_EQ(in1_height, out_dims[0]); auto& in1_value = input1.value(); auto& in1_rows = input1.rows(); int64_t in1_row_numel = in1_value.numel() / in1_rows.size(); PADDLE_ENFORCE_EQ(in1_row_numel, input2.numel() / in1_height); PADDLE_ENFORCE_EQ(in1_row_numel, output->numel() / in1_height); auto* in1_data = in1_value.data<T>(); auto* in2_data = input2.data<T>(); auto* out_data = output->data<T>(); SetConstant<platform::CUDADeviceContext, T> functor; functor(context, output, static_cast<T>(0)); const int block_size = 256; dim3 threads(block_size, 1); dim3 grid(in1_rows.size(), 1); hipLaunchKernelGGL(( SelectedRowsAddTensorKernel< T, block_size>), dim3(grid), dim3(threads), 0, context.stream(), in1_data, in1_rows.CUDAData(context.GetPlace()), out_data, in1_row_numel); auto out_eigen = framework::EigenVector<T>::Flatten(*output); auto in2_eigen = framework::EigenVector<T>::Flatten(input2); out_eigen.device(*context.eigen_device()) = out_eigen + in2_eigen; } }; template struct SelectedRowsAddTensor<platform::CUDADeviceContext, float>; template struct SelectedRowsAddTensor<platform::CUDADeviceContext, double>; template struct SelectedRowsAdd<platform::CUDADeviceContext, platform::float16>; template struct SelectedRowsAddTensor<platform::CUDADeviceContext, platform::float16>; template <typename T> struct SelectedRowsAddTo<platform::CUDADeviceContext, T> { void operator()(const platform::CUDADeviceContext& context, const framework::SelectedRows& input1, const int64_t input2_offset, framework::SelectedRows* input2) { auto in1_height = input1.height(); PADDLE_ENFORCE_EQ(in1_height, input2->height()); auto& in1_rows = input1.rows(); auto& in2_rows = *(input2->mutable_rows()); auto& in1_value = input1.value(); auto* in2_value = input2->mutable_value(); // concat rows if (in1_rows.size()) { in2_rows.Extend(in1_rows.begin(), in1_rows.end()); } auto in1_place = input1.place(); PADDLE_ENFORCE_EQ(platform::is_gpu_place(in1_place), true); auto in2_place = input2->place(); PADDLE_ENFORCE_EQ(platform::is_gpu_place(in2_place), true); auto* in1_data = in1_value.data<T>(); auto* in2_data = in2_value->data<T>(); memory::Copy(BOOST_GET_CONST(platform::CUDAPlace, in2_place), in2_data + input2_offset, BOOST_GET_CONST(platform::CUDAPlace, in1_place), in1_data, in1_value.numel() * sizeof(T), context.stream()); } }; template struct SelectedRowsAddTo<platform::CUDADeviceContext, float>; template struct SelectedRowsAddTo<platform::CUDADeviceContext, double>; template struct SelectedRowsAddTo<platform::CUDADeviceContext, int>; template struct SelectedRowsAddTo<platform::CUDADeviceContext, int64_t>; template struct SelectedRowsAddTo<platform::CUDADeviceContext, platform::float16>; namespace { template <typename T, int block_size> __global__ void SelectedRowsAddToTensorKernel(const T* selected_rows, const int64_t* rows, T* tensor_out, int64_t row_numel) { const int ty = blockIdx.x; int tid = threadIdx.x; selected_rows += ty * row_numel; tensor_out += rows[ty] * row_numel; for (int index = tid; index < row_numel; index += block_size) { // Since index in rows of SelectedRows can be duplicate, we have to use // Atomic Operation to avoid concurrent write error. paddle::platform::CudaAtomicAdd(tensor_out + index, selected_rows[index]); } } } // namespace template <typename T> struct SelectedRowsAddToTensor<platform::CUDADeviceContext, T> { void operator()(const platform::CUDADeviceContext& context, const framework::SelectedRows& input1, framework::Tensor* input2) { auto in1_height = input1.height(); auto in2_dims = input2->dims(); PADDLE_ENFORCE_EQ(in1_height, in2_dims[0]); auto& in1_value = input1.value(); auto& in1_rows = input1.rows(); int64_t in1_row_numel = in1_value.numel() / in1_rows.size(); PADDLE_ENFORCE_EQ(in1_row_numel, input2->numel() / in1_height); auto* in1_data = in1_value.data<T>(); auto* in2_data = input2->data<T>(); const int block_size = 256; dim3 threads(block_size, 1); dim3 grid(in1_rows.size(), 1); hipLaunchKernelGGL(( SelectedRowsAddToTensorKernel< T, block_size>), dim3(grid), dim3(threads), 0, context.stream(), in1_data, in1_rows.CUDAData(context.GetPlace()), in2_data, in1_row_numel); } }; template struct SelectedRowsAddToTensor<platform::CUDADeviceContext, float>; template struct SelectedRowsAddToTensor<platform::CUDADeviceContext, double>; template struct SelectedRowsAddToTensor<platform::CUDADeviceContext, int>; template struct SelectedRowsAddToTensor<platform::CUDADeviceContext, int64_t>; template struct SelectedRowsAddToTensor<platform::CUDADeviceContext, platform::float16>; namespace scatter { template <typename T, int block_size> __global__ void MergeAddKernel(const T* input, const int64_t* input_rows, T* out, const int64_t* out_rows, size_t out_rows_size, int64_t row_numel) { const int ty = blockIdx.x; int tid = threadIdx.x; __shared__ size_t out_idx; if (tid == 0) { for (size_t i = 0; i < out_rows_size; i++) { if (input_rows[ty] == out_rows[i]) { out_idx = i; } } } __syncthreads(); input += ty * row_numel; out += out_idx * row_numel; for (int index = tid; index < row_numel; index += block_size) { paddle::platform::CudaAtomicAdd(out + index, input[index]); } } template <typename T> struct MergeAdd<platform::CUDADeviceContext, T> { framework::SelectedRows operator()(const platform::CUDADeviceContext& context, const framework::SelectedRows& input, const bool sorted_result = false) { framework::SelectedRows out; (*this)(context, input, &out); return out; } void operator()(const platform::CUDADeviceContext& context, const framework::SelectedRows& input, framework::SelectedRows* output, const bool sorted_result = false) { framework::Vector<int64_t> input_rows(input.rows()); if (input_rows.size() == 0) { return; } framework::SelectedRows& out = *output; std::set<int64_t> row_set(input_rows.begin(), input_rows.end()); std::vector<int64_t> merge_rows_cpu(row_set.begin(), row_set.end()); framework::Vector<int64_t> merge_rows(merge_rows_cpu); auto input_width = input.value().dims()[1]; out.set_rows(merge_rows); out.set_height(input.height()); out.mutable_value()->mutable_data<T>( framework::make_ddim( {static_cast<int64_t>(merge_rows.size()), input_width}), context.GetPlace()); math::SetConstant<platform::CUDADeviceContext, T> constant_functor; constant_functor(context, out.mutable_value(), static_cast<T>(0)); auto* out_data = out.mutable_value()->data<T>(); auto* input_data = input.value().data<T>(); const int block_size = 256; dim3 threads(block_size, 1); dim3 grid1(input_rows.size(), 1); hipLaunchKernelGGL(( MergeAddKernel<T, 256>), dim3(grid1), dim3(threads), 0, context.stream(), input_data, input_rows.CUDAData(context.GetPlace()), out_data, out.mutable_rows()->CUDAMutableData(context.GetPlace()), out.rows().size(), input_width); } void operator()(const platform::CUDADeviceContext& context, const std::vector<const framework::SelectedRows*>& inputs, framework::SelectedRows* output, const bool sorted_result = false) { if (inputs.size() == 0) { VLOG(3) << "no input! return"; return; } const framework::SelectedRows* has_value_input = nullptr; for (auto* in : inputs) { if (in->rows().size() > 0) { has_value_input = in; break; } } if (has_value_input == nullptr) { VLOG(3) << "no input has value! just return" << std::endl; return; } auto input_width = has_value_input->value().dims()[1]; auto input_height = has_value_input->height(); framework::SelectedRows& out = *output; std::set<int64_t> merged_row_set; for (auto* input : inputs) { if (input->rows().size() == 0) { continue; } PADDLE_ENFORCE_EQ(input_width, input->value().dims()[1], "all input should have same " "dimension except for the first one"); PADDLE_ENFORCE_EQ(input_height, input->height(), "all input should have same height"); merged_row_set.insert(input->rows().begin(), input->rows().end()); } std::vector<int64_t> merge_rows_cpu(merged_row_set.begin(), merged_row_set.end()); framework::Vector<int64_t> merge_rows(merge_rows_cpu); out.set_rows(merge_rows); out.set_height(input_height); out.mutable_value()->mutable_data<T>( framework::make_ddim( {static_cast<int64_t>(merge_rows.size()), input_width}), context.GetPlace()); math::SetConstant<platform::CUDADeviceContext, T> constant_functor; constant_functor(context, out.mutable_value(), static_cast<T>(0)); auto* out_data = out.mutable_value()->data<T>(); const int block_size = 256; dim3 threads(block_size, 1); for (auto* input : inputs) { if (input->rows().size() == 0) { continue; } auto* input_data = input->value().data<T>(); auto& input_rows = input->rows(); dim3 grid1(input_rows.size(), 1); hipLaunchKernelGGL(( MergeAddKernel<T, 256>), dim3(grid1), dim3(threads), 0, context.stream(), input_data, input_rows.CUDAData(context.GetPlace()), out_data, out.mutable_rows()->CUDAMutableData(context.GetPlace()), out.rows().size(), input_width); } } }; template struct MergeAdd<platform::CUDADeviceContext, float>; template struct MergeAdd<platform::CUDADeviceContext, double>; template struct MergeAdd<platform::CUDADeviceContext, int>; template struct MergeAdd<platform::CUDADeviceContext, int64_t>; template struct MergeAdd<platform::CUDADeviceContext, platform::float16>; template <typename T, int block_size> __global__ void UpdateToTensorKernel(const T* selected_rows, const int64_t* rows, const ScatterOps& op, T* tensor_out, int64_t row_numel) { const int ty = blockIdx.x; int tid = threadIdx.x; selected_rows += ty * row_numel; tensor_out += rows[ty] * row_numel; // FIXME(typhoonzero): use macro fix the below messy code. switch (op) { case ScatterOps::ASSIGN: for (int index = tid; index < row_numel; index += block_size) { tensor_out[index] = selected_rows[index]; } break; case ScatterOps::ADD: for (int index = tid; index < row_numel; index += block_size) { tensor_out[index] += selected_rows[index]; } break; case ScatterOps::SUB: for (int index = tid; index < row_numel; index += block_size) { tensor_out[index] -= selected_rows[index]; } break; case ScatterOps::SUBBY: for (int index = tid; index < row_numel; index += block_size) { tensor_out[index] = selected_rows[index] - tensor_out[index]; } break; case ScatterOps::MUL: for (int index = tid; index < row_numel; index += block_size) { tensor_out[index] *= selected_rows[index]; } break; case ScatterOps::DIV: for (int index = tid; index < row_numel; index += block_size) { tensor_out[index] /= selected_rows[index]; } break; case ScatterOps::DIVBY: for (int index = tid; index < row_numel; index += block_size) { tensor_out[index] = selected_rows[index] / tensor_out[index]; } break; } } template <typename T> struct UpdateToTensor<platform::CUDADeviceContext, T> { void operator()(const platform::CUDADeviceContext& context, const ScatterOps& op, const framework::SelectedRows& input1, framework::Tensor* input2) { // NOTE: Use SelectedRowsAddToTensor for better performance // no additional MergeAdd called. MergeAdd<platform::CUDADeviceContext, T> merge_func; auto merged_in1 = merge_func(context, input1); auto in1_height = merged_in1.height(); auto in2_dims = input2->dims(); PADDLE_ENFORCE_EQ(in1_height, in2_dims[0]); auto& in1_value = merged_in1.value(); auto& in1_rows = merged_in1.rows(); int64_t in1_row_numel = in1_value.numel() / in1_rows.size(); PADDLE_ENFORCE_EQ(in1_row_numel, input2->numel() / in1_height); auto* in1_data = in1_value.template data<T>(); auto* in2_data = input2->data<T>(); dim3 threads(platform::PADDLE_CUDA_NUM_THREADS, 1); dim3 grid(in1_rows.size(), 1); hipLaunchKernelGGL(( UpdateToTensorKernel<T, platform::PADDLE_CUDA_NUM_THREADS>), dim3(grid), dim3(threads), 0, context.stream(), in1_data, in1_rows.cuda_data(), op, in2_data, in1_row_numel); } }; } // namespace scatter } // namespace math } // namespace operators } // namespace paddle
eb115e0d2d6b3a1682728d15df949b1e361471bb.cu
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include <set> #include <vector> #include "paddle/fluid/operators/math/math_function.h" #include "paddle/fluid/operators/math/selected_rows_functor.h" #include "paddle/fluid/platform/cuda_primitives.h" #include "paddle/fluid/platform/float16.h" namespace paddle { namespace operators { namespace math { template <typename T> struct SelectedRowsAdd<platform::CUDADeviceContext, T> { void operator()(const platform::CUDADeviceContext& context, const framework::SelectedRows& input1, const framework::SelectedRows& input2, framework::SelectedRows* output) { auto in1_height = input1.height(); PADDLE_ENFORCE_EQ(in1_height, input2.height()); output->set_height(in1_height); framework::Vector<int64_t> in1_rows(input1.rows()); auto& in2_rows = input2.rows(); std::vector<int64_t> out_rows; out_rows.reserve(in1_rows.size() + in2_rows.size()); // concat rows out_rows.insert(out_rows.end(), in1_rows.begin(), in1_rows.end()); out_rows.insert(out_rows.end(), in2_rows.begin(), in2_rows.end()); output->set_rows(out_rows); auto* out_value = output->mutable_value(); auto& in1_value = input1.value(); auto& in2_value = input2.value(); auto in1_row_numel = in1_value.numel() / in1_rows.size(); PADDLE_ENFORCE_EQ(in1_row_numel, in2_value.numel() / in2_rows.size()); PADDLE_ENFORCE_EQ(in1_row_numel, out_value->numel() / out_rows.size()); auto* out_data = out_value->data<T>(); auto* in1_data = in1_value.data<T>(); auto in1_place = input1.place(); PADDLE_ENFORCE_EQ(platform::is_gpu_place(in1_place), true); auto in2_place = input2.place(); PADDLE_ENFORCE_EQ(platform::is_gpu_place(in2_place), true); auto out_place = context.GetPlace(); PADDLE_ENFORCE_EQ(platform::is_gpu_place(out_place), true); memory::Copy(BOOST_GET_CONST(platform::CUDAPlace, out_place), out_data, BOOST_GET_CONST(platform::CUDAPlace, in1_place), in1_data, in1_value.numel() * sizeof(T), context.stream()); auto* in2_data = in2_value.data<T>(); memory::Copy(BOOST_GET_CONST(platform::CUDAPlace, out_place), out_data + in1_value.numel(), BOOST_GET_CONST(platform::CUDAPlace, in2_place), in2_data, in2_value.numel() * sizeof(T), context.stream()); } }; template struct SelectedRowsAdd<platform::CUDADeviceContext, float>; template struct SelectedRowsAdd<platform::CUDADeviceContext, double>; namespace { template <typename T, int block_size> __global__ void SelectedRowsAddTensorKernel(const T* selected_rows, const int64_t* rows, T* tensor_out, int64_t row_numel) { const int ty = blockIdx.x; int tid = threadIdx.x; selected_rows += ty * row_numel; tensor_out += rows[ty] * row_numel; for (int index = tid; index < row_numel; index += block_size) { // Since index in rows of SelectedRows can be duplicate, we can not use // tensor_out[index] += selected_rows[index]; Instead, we have to use // AtomicAdd to avoid concurrent write error. paddle::platform::CudaAtomicAdd(tensor_out + index, selected_rows[index]); } } } // namespace template <typename T> struct SelectedRowsAddTensor<platform::CUDADeviceContext, T> { void operator()(const platform::CUDADeviceContext& context, const framework::SelectedRows& input1, const framework::Tensor& input2, framework::Tensor* output) { auto in1_height = input1.height(); auto in2_dims = input2.dims(); auto out_dims = output->dims(); PADDLE_ENFORCE_EQ(in1_height, in2_dims[0]); PADDLE_ENFORCE_EQ(in1_height, out_dims[0]); auto& in1_value = input1.value(); auto& in1_rows = input1.rows(); int64_t in1_row_numel = in1_value.numel() / in1_rows.size(); PADDLE_ENFORCE_EQ(in1_row_numel, input2.numel() / in1_height); PADDLE_ENFORCE_EQ(in1_row_numel, output->numel() / in1_height); auto* in1_data = in1_value.data<T>(); auto* in2_data = input2.data<T>(); auto* out_data = output->data<T>(); SetConstant<platform::CUDADeviceContext, T> functor; functor(context, output, static_cast<T>(0)); const int block_size = 256; dim3 threads(block_size, 1); dim3 grid(in1_rows.size(), 1); SelectedRowsAddTensorKernel< T, block_size><<<grid, threads, 0, context.stream()>>>( in1_data, in1_rows.CUDAData(context.GetPlace()), out_data, in1_row_numel); auto out_eigen = framework::EigenVector<T>::Flatten(*output); auto in2_eigen = framework::EigenVector<T>::Flatten(input2); out_eigen.device(*context.eigen_device()) = out_eigen + in2_eigen; } }; template struct SelectedRowsAddTensor<platform::CUDADeviceContext, float>; template struct SelectedRowsAddTensor<platform::CUDADeviceContext, double>; template struct SelectedRowsAdd<platform::CUDADeviceContext, platform::float16>; template struct SelectedRowsAddTensor<platform::CUDADeviceContext, platform::float16>; template <typename T> struct SelectedRowsAddTo<platform::CUDADeviceContext, T> { void operator()(const platform::CUDADeviceContext& context, const framework::SelectedRows& input1, const int64_t input2_offset, framework::SelectedRows* input2) { auto in1_height = input1.height(); PADDLE_ENFORCE_EQ(in1_height, input2->height()); auto& in1_rows = input1.rows(); auto& in2_rows = *(input2->mutable_rows()); auto& in1_value = input1.value(); auto* in2_value = input2->mutable_value(); // concat rows if (in1_rows.size()) { in2_rows.Extend(in1_rows.begin(), in1_rows.end()); } auto in1_place = input1.place(); PADDLE_ENFORCE_EQ(platform::is_gpu_place(in1_place), true); auto in2_place = input2->place(); PADDLE_ENFORCE_EQ(platform::is_gpu_place(in2_place), true); auto* in1_data = in1_value.data<T>(); auto* in2_data = in2_value->data<T>(); memory::Copy(BOOST_GET_CONST(platform::CUDAPlace, in2_place), in2_data + input2_offset, BOOST_GET_CONST(platform::CUDAPlace, in1_place), in1_data, in1_value.numel() * sizeof(T), context.stream()); } }; template struct SelectedRowsAddTo<platform::CUDADeviceContext, float>; template struct SelectedRowsAddTo<platform::CUDADeviceContext, double>; template struct SelectedRowsAddTo<platform::CUDADeviceContext, int>; template struct SelectedRowsAddTo<platform::CUDADeviceContext, int64_t>; template struct SelectedRowsAddTo<platform::CUDADeviceContext, platform::float16>; namespace { template <typename T, int block_size> __global__ void SelectedRowsAddToTensorKernel(const T* selected_rows, const int64_t* rows, T* tensor_out, int64_t row_numel) { const int ty = blockIdx.x; int tid = threadIdx.x; selected_rows += ty * row_numel; tensor_out += rows[ty] * row_numel; for (int index = tid; index < row_numel; index += block_size) { // Since index in rows of SelectedRows can be duplicate, we have to use // Atomic Operation to avoid concurrent write error. paddle::platform::CudaAtomicAdd(tensor_out + index, selected_rows[index]); } } } // namespace template <typename T> struct SelectedRowsAddToTensor<platform::CUDADeviceContext, T> { void operator()(const platform::CUDADeviceContext& context, const framework::SelectedRows& input1, framework::Tensor* input2) { auto in1_height = input1.height(); auto in2_dims = input2->dims(); PADDLE_ENFORCE_EQ(in1_height, in2_dims[0]); auto& in1_value = input1.value(); auto& in1_rows = input1.rows(); int64_t in1_row_numel = in1_value.numel() / in1_rows.size(); PADDLE_ENFORCE_EQ(in1_row_numel, input2->numel() / in1_height); auto* in1_data = in1_value.data<T>(); auto* in2_data = input2->data<T>(); const int block_size = 256; dim3 threads(block_size, 1); dim3 grid(in1_rows.size(), 1); SelectedRowsAddToTensorKernel< T, block_size><<<grid, threads, 0, context.stream()>>>( in1_data, in1_rows.CUDAData(context.GetPlace()), in2_data, in1_row_numel); } }; template struct SelectedRowsAddToTensor<platform::CUDADeviceContext, float>; template struct SelectedRowsAddToTensor<platform::CUDADeviceContext, double>; template struct SelectedRowsAddToTensor<platform::CUDADeviceContext, int>; template struct SelectedRowsAddToTensor<platform::CUDADeviceContext, int64_t>; template struct SelectedRowsAddToTensor<platform::CUDADeviceContext, platform::float16>; namespace scatter { template <typename T, int block_size> __global__ void MergeAddKernel(const T* input, const int64_t* input_rows, T* out, const int64_t* out_rows, size_t out_rows_size, int64_t row_numel) { const int ty = blockIdx.x; int tid = threadIdx.x; __shared__ size_t out_idx; if (tid == 0) { for (size_t i = 0; i < out_rows_size; i++) { if (input_rows[ty] == out_rows[i]) { out_idx = i; } } } __syncthreads(); input += ty * row_numel; out += out_idx * row_numel; for (int index = tid; index < row_numel; index += block_size) { paddle::platform::CudaAtomicAdd(out + index, input[index]); } } template <typename T> struct MergeAdd<platform::CUDADeviceContext, T> { framework::SelectedRows operator()(const platform::CUDADeviceContext& context, const framework::SelectedRows& input, const bool sorted_result = false) { framework::SelectedRows out; (*this)(context, input, &out); return out; } void operator()(const platform::CUDADeviceContext& context, const framework::SelectedRows& input, framework::SelectedRows* output, const bool sorted_result = false) { framework::Vector<int64_t> input_rows(input.rows()); if (input_rows.size() == 0) { return; } framework::SelectedRows& out = *output; std::set<int64_t> row_set(input_rows.begin(), input_rows.end()); std::vector<int64_t> merge_rows_cpu(row_set.begin(), row_set.end()); framework::Vector<int64_t> merge_rows(merge_rows_cpu); auto input_width = input.value().dims()[1]; out.set_rows(merge_rows); out.set_height(input.height()); out.mutable_value()->mutable_data<T>( framework::make_ddim( {static_cast<int64_t>(merge_rows.size()), input_width}), context.GetPlace()); math::SetConstant<platform::CUDADeviceContext, T> constant_functor; constant_functor(context, out.mutable_value(), static_cast<T>(0)); auto* out_data = out.mutable_value()->data<T>(); auto* input_data = input.value().data<T>(); const int block_size = 256; dim3 threads(block_size, 1); dim3 grid1(input_rows.size(), 1); MergeAddKernel<T, 256><<<grid1, threads, 0, context.stream()>>>( input_data, input_rows.CUDAData(context.GetPlace()), out_data, out.mutable_rows()->CUDAMutableData(context.GetPlace()), out.rows().size(), input_width); } void operator()(const platform::CUDADeviceContext& context, const std::vector<const framework::SelectedRows*>& inputs, framework::SelectedRows* output, const bool sorted_result = false) { if (inputs.size() == 0) { VLOG(3) << "no input! return"; return; } const framework::SelectedRows* has_value_input = nullptr; for (auto* in : inputs) { if (in->rows().size() > 0) { has_value_input = in; break; } } if (has_value_input == nullptr) { VLOG(3) << "no input has value! just return" << std::endl; return; } auto input_width = has_value_input->value().dims()[1]; auto input_height = has_value_input->height(); framework::SelectedRows& out = *output; std::set<int64_t> merged_row_set; for (auto* input : inputs) { if (input->rows().size() == 0) { continue; } PADDLE_ENFORCE_EQ(input_width, input->value().dims()[1], "all input should have same " "dimension except for the first one"); PADDLE_ENFORCE_EQ(input_height, input->height(), "all input should have same height"); merged_row_set.insert(input->rows().begin(), input->rows().end()); } std::vector<int64_t> merge_rows_cpu(merged_row_set.begin(), merged_row_set.end()); framework::Vector<int64_t> merge_rows(merge_rows_cpu); out.set_rows(merge_rows); out.set_height(input_height); out.mutable_value()->mutable_data<T>( framework::make_ddim( {static_cast<int64_t>(merge_rows.size()), input_width}), context.GetPlace()); math::SetConstant<platform::CUDADeviceContext, T> constant_functor; constant_functor(context, out.mutable_value(), static_cast<T>(0)); auto* out_data = out.mutable_value()->data<T>(); const int block_size = 256; dim3 threads(block_size, 1); for (auto* input : inputs) { if (input->rows().size() == 0) { continue; } auto* input_data = input->value().data<T>(); auto& input_rows = input->rows(); dim3 grid1(input_rows.size(), 1); MergeAddKernel<T, 256><<<grid1, threads, 0, context.stream()>>>( input_data, input_rows.CUDAData(context.GetPlace()), out_data, out.mutable_rows()->CUDAMutableData(context.GetPlace()), out.rows().size(), input_width); } } }; template struct MergeAdd<platform::CUDADeviceContext, float>; template struct MergeAdd<platform::CUDADeviceContext, double>; template struct MergeAdd<platform::CUDADeviceContext, int>; template struct MergeAdd<platform::CUDADeviceContext, int64_t>; template struct MergeAdd<platform::CUDADeviceContext, platform::float16>; template <typename T, int block_size> __global__ void UpdateToTensorKernel(const T* selected_rows, const int64_t* rows, const ScatterOps& op, T* tensor_out, int64_t row_numel) { const int ty = blockIdx.x; int tid = threadIdx.x; selected_rows += ty * row_numel; tensor_out += rows[ty] * row_numel; // FIXME(typhoonzero): use macro fix the below messy code. switch (op) { case ScatterOps::ASSIGN: for (int index = tid; index < row_numel; index += block_size) { tensor_out[index] = selected_rows[index]; } break; case ScatterOps::ADD: for (int index = tid; index < row_numel; index += block_size) { tensor_out[index] += selected_rows[index]; } break; case ScatterOps::SUB: for (int index = tid; index < row_numel; index += block_size) { tensor_out[index] -= selected_rows[index]; } break; case ScatterOps::SUBBY: for (int index = tid; index < row_numel; index += block_size) { tensor_out[index] = selected_rows[index] - tensor_out[index]; } break; case ScatterOps::MUL: for (int index = tid; index < row_numel; index += block_size) { tensor_out[index] *= selected_rows[index]; } break; case ScatterOps::DIV: for (int index = tid; index < row_numel; index += block_size) { tensor_out[index] /= selected_rows[index]; } break; case ScatterOps::DIVBY: for (int index = tid; index < row_numel; index += block_size) { tensor_out[index] = selected_rows[index] / tensor_out[index]; } break; } } template <typename T> struct UpdateToTensor<platform::CUDADeviceContext, T> { void operator()(const platform::CUDADeviceContext& context, const ScatterOps& op, const framework::SelectedRows& input1, framework::Tensor* input2) { // NOTE: Use SelectedRowsAddToTensor for better performance // no additional MergeAdd called. MergeAdd<platform::CUDADeviceContext, T> merge_func; auto merged_in1 = merge_func(context, input1); auto in1_height = merged_in1.height(); auto in2_dims = input2->dims(); PADDLE_ENFORCE_EQ(in1_height, in2_dims[0]); auto& in1_value = merged_in1.value(); auto& in1_rows = merged_in1.rows(); int64_t in1_row_numel = in1_value.numel() / in1_rows.size(); PADDLE_ENFORCE_EQ(in1_row_numel, input2->numel() / in1_height); auto* in1_data = in1_value.template data<T>(); auto* in2_data = input2->data<T>(); dim3 threads(platform::PADDLE_CUDA_NUM_THREADS, 1); dim3 grid(in1_rows.size(), 1); UpdateToTensorKernel<T, platform::PADDLE_CUDA_NUM_THREADS><<< grid, threads, 0, context.stream()>>>(in1_data, in1_rows.cuda_data(), op, in2_data, in1_row_numel); } }; } // namespace scatter } // namespace math } // namespace operators } // namespace paddle
321735e327debf865d25d9930741c9bf12fcaeb8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /******************************************************************************* * Copyright (c) 2015-2018 Skymind, Inc. * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author GS <[email protected]>, created on 25.01.2019 // #include <loops/special_kernels.h> namespace sd { //////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // kernel to swap two NDArrays vals as linear sequences // input - theSecondBuffer/Shape from input NDArray // output - theFirstBuffer/Shape from input NDArray template <typename T> static __global__ void swapUnsafeKernel(void* theFirstBuffer, Nd4jLong* theFirstShape, void* theSecondBuffer, Nd4jLong* theSecondShape) { auto tid = blockIdx.x * blockDim.x + threadIdx.x; int totalThreads = gridDim.x * blockDim.x; __shared__ Nd4jLong resultLength; __shared__ T* input; __shared__ T* output; if (0 == threadIdx.x) { resultLength = shape::length(theFirstShape); input = reinterpret_cast<T*>(theSecondBuffer); output = reinterpret_cast<T*>(theFirstBuffer); } __syncthreads(); for (int i = tid; i < resultLength; i += totalThreads) { auto xEws = shape::order(theFirstShape) == 'c'? shape::elementWiseStride(theFirstShape) :1; auto yEws = shape::order(theSecondShape) == 'c'? shape::elementWiseStride(theSecondShape):1; auto xOffset = shape::getIndexOffset(i * xEws, theFirstShape); auto yOffset = shape::getIndexOffset(i * yEws, theSecondShape); sd::math::nd4j_swap(output[xOffset], input[yOffset]); } } BUILD_SINGLE_TEMPLATE(template __global__ void swapUnsafeKernel, (void* theFirstBuffer, Nd4jLong* theFirstShape, void* theSecondBuffer, Nd4jLong* theSecondShape), LIBND4J_TYPES); template <typename T> void templatedSwapUnsafe(void* theFirstBuffer, Nd4jLong* theFirstShape, void* theSecondBuffer, Nd4jLong* theSecondShape, hipStream_t* theStream) { hipLaunchKernelGGL(( swapUnsafeKernel<T>), dim3(256), dim3(512), 8192, *theStream, theFirstBuffer, theFirstShape, theSecondBuffer, theSecondShape); } BUILD_SINGLE_TEMPLATE(template void templatedSwapUnsafe, (void* theFirstBuffer, Nd4jLong* theFirstShape, void* theSecondBuffer, Nd4jLong* theSecondShape, hipStream_t* theStream), LIBND4J_TYPES); }
321735e327debf865d25d9930741c9bf12fcaeb8.cu
/******************************************************************************* * Copyright (c) 2015-2018 Skymind, Inc. * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author GS <[email protected]>, created on 25.01.2019 // #include <loops/special_kernels.h> namespace sd { //////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // kernel to swap two NDArrays vals as linear sequences // input - theSecondBuffer/Shape from input NDArray // output - theFirstBuffer/Shape from input NDArray template <typename T> static __global__ void swapUnsafeKernel(void* theFirstBuffer, Nd4jLong* theFirstShape, void* theSecondBuffer, Nd4jLong* theSecondShape) { auto tid = blockIdx.x * blockDim.x + threadIdx.x; int totalThreads = gridDim.x * blockDim.x; __shared__ Nd4jLong resultLength; __shared__ T* input; __shared__ T* output; if (0 == threadIdx.x) { resultLength = shape::length(theFirstShape); input = reinterpret_cast<T*>(theSecondBuffer); output = reinterpret_cast<T*>(theFirstBuffer); } __syncthreads(); for (int i = tid; i < resultLength; i += totalThreads) { auto xEws = shape::order(theFirstShape) == 'c'? shape::elementWiseStride(theFirstShape) :1; auto yEws = shape::order(theSecondShape) == 'c'? shape::elementWiseStride(theSecondShape):1; auto xOffset = shape::getIndexOffset(i * xEws, theFirstShape); auto yOffset = shape::getIndexOffset(i * yEws, theSecondShape); sd::math::nd4j_swap(output[xOffset], input[yOffset]); } } BUILD_SINGLE_TEMPLATE(template __global__ void swapUnsafeKernel, (void* theFirstBuffer, Nd4jLong* theFirstShape, void* theSecondBuffer, Nd4jLong* theSecondShape), LIBND4J_TYPES); template <typename T> void templatedSwapUnsafe(void* theFirstBuffer, Nd4jLong* theFirstShape, void* theSecondBuffer, Nd4jLong* theSecondShape, cudaStream_t* theStream) { swapUnsafeKernel<T><<<256, 512, 8192, *theStream>>>(theFirstBuffer, theFirstShape, theSecondBuffer, theSecondShape); } BUILD_SINGLE_TEMPLATE(template void templatedSwapUnsafe, (void* theFirstBuffer, Nd4jLong* theFirstShape, void* theSecondBuffer, Nd4jLong* theSecondShape, cudaStream_t* theStream), LIBND4J_TYPES); }
364f712dc046888820088cf48f40178b102c2418.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Reads a cell at (x+dx, y+dy) __device__ int read_cell(int* source_domain, int x, int y, int dx, int dy, unsigned int domain_x, unsigned int domain_y) { // Wrap around x = (unsigned int)(x + dx) % domain_x; y = (unsigned int)(y + dy) % domain_y; return source_domain[y * domain_x + x]; } // Compute kernel __global__ void life_kernel(int* source_domain, int* dest_domain, int domain_x, int domain_y) { int tx = blockIdx.x * blockDim.x + threadIdx.x; int ty = blockIdx.y * blockDim.y + threadIdx.y; if (tx >= domain_x || ty >= domain_y) { return; } // Read cell int myself = read_cell(source_domain, tx, ty, 0, 0, domain_x, domain_y); // TODO: Read the 8 neighbors and count number of blue and red int redcells = 0; int bluecells = 0; int cell; for (int line = -1; line < 2; ++line) { for (int column = -1; column < 2; ++column) { //Do not read myself if (!(line == 0 && column == 0)) { cell = read_cell(source_domain, tx, ty, line, column, domain_x, domain_y); if (cell == 1) { redcells++; } else if (cell == 2) { bluecells++; } } } } // TODO: Compute new value int sum = redcells + bluecells; // By default, the cell dies (or stay empty) int newvalue = 0; if (myself == 0 && sum == 3) { // New cell newvalue = redcells > bluecells ? 1 : 2; } else if (sum == 2 || sum == 3) { // Survives newvalue = myself; } // TODO: Write it in dest_domain dest_domain[ty * domain_x + tx] = newvalue; } // Compute kernel __global__ void life_kernel_q5(int* source_domain, int* dest_domain, int domain_x, int domain_y) { int tx = blockIdx.x * blockDim.x + threadIdx.x; int ty = blockIdx.y * blockDim.y + threadIdx.y; if (tx >= domain_x || ty >= domain_y) { return; } extern __shared__ int sharedData[]; int ligneDessous = ((int)blockIdx.y - 1 < 0) ? gridDim.y - 1 : blockIdx.y - 1; int ligneDessus = (blockIdx.y + 1 >= gridDim.y) ? 0 : blockIdx.y + 1; // Ligne de dessus memcpy(&sharedData[0 * domain_x], &source_domain[blockIdx.y * domain_x], domain_x); // Ligne courante memcpy(&sharedData[1 * domain_x], &source_domain[ligneDessus * domain_x], domain_x); // Ligne de dessous memcpy(&sharedData[2 * domain_x], &source_domain[ligneDessous * domain_x], domain_x); // Read cell int myself = read_cell(sharedData, tx, ty, 0, 0, domain_x, domain_y); // TODO: Read the 8 neighbors and count number of blue and red int redcells = 0; int bluecells = 0; int cell; for (int line = -1; line < 2; ++line) { for (int column = -1; column < 2; ++column) { //Do not read myself if (!(line == 0 && column == 0)) { cell = read_cell(sharedData, tx, ty, line, column, domain_x, domain_y); if (cell == 1) { redcells++; } else if (cell == 2) { bluecells++; } } } } // TODO: Compute new value int sum = redcells + bluecells; // By default, the cell dies (or stay empty) int newvalue = 0; if (myself == 0 && sum == 3) { // New cell newvalue = redcells > bluecells ? 1 : 2; } else if (sum == 2 || sum == 3) { // Survives newvalue = myself; } // TODO: Write it in dest_domain dest_domain[ty * domain_x + tx] = newvalue; }
364f712dc046888820088cf48f40178b102c2418.cu
// Reads a cell at (x+dx, y+dy) __device__ int read_cell(int* source_domain, int x, int y, int dx, int dy, unsigned int domain_x, unsigned int domain_y) { // Wrap around x = (unsigned int)(x + dx) % domain_x; y = (unsigned int)(y + dy) % domain_y; return source_domain[y * domain_x + x]; } // Compute kernel __global__ void life_kernel(int* source_domain, int* dest_domain, int domain_x, int domain_y) { int tx = blockIdx.x * blockDim.x + threadIdx.x; int ty = blockIdx.y * blockDim.y + threadIdx.y; if (tx >= domain_x || ty >= domain_y) { return; } // Read cell int myself = read_cell(source_domain, tx, ty, 0, 0, domain_x, domain_y); // TODO: Read the 8 neighbors and count number of blue and red int redcells = 0; int bluecells = 0; int cell; for (int line = -1; line < 2; ++line) { for (int column = -1; column < 2; ++column) { //Do not read myself if (!(line == 0 && column == 0)) { cell = read_cell(source_domain, tx, ty, line, column, domain_x, domain_y); if (cell == 1) { redcells++; } else if (cell == 2) { bluecells++; } } } } // TODO: Compute new value int sum = redcells + bluecells; // By default, the cell dies (or stay empty) int newvalue = 0; if (myself == 0 && sum == 3) { // New cell newvalue = redcells > bluecells ? 1 : 2; } else if (sum == 2 || sum == 3) { // Survives newvalue = myself; } // TODO: Write it in dest_domain dest_domain[ty * domain_x + tx] = newvalue; } // Compute kernel __global__ void life_kernel_q5(int* source_domain, int* dest_domain, int domain_x, int domain_y) { int tx = blockIdx.x * blockDim.x + threadIdx.x; int ty = blockIdx.y * blockDim.y + threadIdx.y; if (tx >= domain_x || ty >= domain_y) { return; } extern __shared__ int sharedData[]; int ligneDessous = ((int)blockIdx.y - 1 < 0) ? gridDim.y - 1 : blockIdx.y - 1; int ligneDessus = (blockIdx.y + 1 >= gridDim.y) ? 0 : blockIdx.y + 1; // Ligne de dessus memcpy(&sharedData[0 * domain_x], &source_domain[blockIdx.y * domain_x], domain_x); // Ligne courante memcpy(&sharedData[1 * domain_x], &source_domain[ligneDessus * domain_x], domain_x); // Ligne de dessous memcpy(&sharedData[2 * domain_x], &source_domain[ligneDessous * domain_x], domain_x); // Read cell int myself = read_cell(sharedData, tx, ty, 0, 0, domain_x, domain_y); // TODO: Read the 8 neighbors and count number of blue and red int redcells = 0; int bluecells = 0; int cell; for (int line = -1; line < 2; ++line) { for (int column = -1; column < 2; ++column) { //Do not read myself if (!(line == 0 && column == 0)) { cell = read_cell(sharedData, tx, ty, line, column, domain_x, domain_y); if (cell == 1) { redcells++; } else if (cell == 2) { bluecells++; } } } } // TODO: Compute new value int sum = redcells + bluecells; // By default, the cell dies (or stay empty) int newvalue = 0; if (myself == 0 && sum == 3) { // New cell newvalue = redcells > bluecells ? 1 : 2; } else if (sum == 2 || sum == 3) { // Survives newvalue = myself; } // TODO: Write it in dest_domain dest_domain[ty * domain_x + tx] = newvalue; }
fefa6714374b27dd160ef77a9664b600a9559094.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //xfail:BUGLE_ERROR //--gridDim=1 --blockDim=32 --no-inline //This kernel is not-racy: memset is called with variable value. #define memset(dst,val,len) __builtin_memset(dst,val,len) __device__ int bar(void); __global__ void kernel(uint4 *out) { uint4 vector; int val = bar(); memset(&vector, val, 16); out[threadIdx.x] = vector; }
fefa6714374b27dd160ef77a9664b600a9559094.cu
//xfail:BUGLE_ERROR //--gridDim=1 --blockDim=32 --no-inline //This kernel is not-racy: memset is called with variable value. #define memset(dst,val,len) __builtin_memset(dst,val,len) __device__ int bar(void); __global__ void kernel(uint4 *out) { uint4 vector; int val = bar(); memset(&vector, val, 16); out[threadIdx.x] = vector; }
ac864e6a406e71ad472e87c1c7b8adf082871788.hip
// !!! This is a file automatically generated by hipify!!! // Copyright 2014 BVLC and contributors. #include <vector> #include "caffe/layer.hpp" #include "caffe/vision_layers.hpp" #include "caffe/util/im2col.hpp" #include "caffe/filler.hpp" #include "caffe/util/math_functions.hpp" #include <iostream> #include <fstream> //#define DEBUG_LOCAL_FP namespace caffe { template <typename Dtype> Dtype LocallyConnectedLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, vector<Blob<Dtype>*>* top) { //BUG FOUND! update memory before copy to slave memory!!! CUDA_CHECK(hipDeviceSynchronize()); //Caffe::switch_to_master_device(); const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = (*top)[0]->mutable_gpu_data(); Dtype* col_data = col_buffer_.mutable_gpu_data(); Dtype* trans_data = trans_buffer_.mutable_gpu_data(); const Dtype* weight = this->blobs_[0]->gpu_data(); int weight_offset = M_ * K_; int col_offset = K_ * mem_group_size; int result_offset = M_ * mem_group_size; int top_offset = M_ * N_; //For the first time running, intialize the entry point buffer for batched gemm if (!entry_initialized_){ const Dtype** weight_entry_cpu_data = (const Dtype**)weight_entry_->mutable_cpu_data(); Dtype** col_entry_cpu_data = (Dtype**)col_entry_->mutable_cpu_data(); Dtype** result_entry_cpu_data = (Dtype**)result_entry_->mutable_cpu_data(); for (int i = 0; i < N_; i++){ weight_entry_cpu_data[i] = weight + weight_offset * i; col_entry_cpu_data[i] = col_data + col_offset * i; result_entry_cpu_data[i] = trans_data + result_offset * i; } entry_initialized_ = true; } // get gpu version of these entry points Dtype** weight_entry_gpu_data = (Dtype**)weight_entry_->mutable_gpu_data(); Dtype** col_entry_gpu_data = (Dtype**)col_entry_->mutable_gpu_data(); Dtype** result_entry_gpu_data = (Dtype**)result_entry_->mutable_gpu_data(); //add bias to top first if (bias_term_) { // distribute bias into outputs caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, num_, N_*(num_output_), 1, (Dtype)1., reinterpret_cast<const Dtype*>(bias_multiplier_->gpu_data()), this->blobs_[1]->gpu_data(), (Dtype)0., top_data); } int mem_group_counter = 0; for (int n = 0; n < num_; n+=mem_group_size, mem_group_counter++){ // im2col, here we attach sames columns from images inside a mem group to a contiguous block. size_t this_mem_group_size = min(mem_group_size,num_-n); bu_im2col_gpu(bottom_data + bottom[0]->offset(n), channels_, height_, width_, kernel_size_, pad_, stride_, col_data, this_mem_group_size, true); caffe_gpu_gemm_batched<Dtype>(CblasNoTrans, CblasNoTrans, this_mem_group_size, M_, K_, (Dtype)1., (const Dtype **)col_entry_gpu_data, (const Dtype **)weight_entry_gpu_data, (Dtype)0., result_entry_gpu_data, N_); // permute 3d and add bias cu_permute_3D_acc_gpu( trans_data, top_data + (*top)[0]->offset(n), M_, this_mem_group_size, N_, XYZtoZXY, Dtype(1.)); } return Dtype(0.); } template <typename Dtype> void LocallyConnectedLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const bool propagate_down, vector<Blob<Dtype>*>* bottom) { //BUG FOUND! update memory before copy to slave memory!!! CUDA_CHECK(hipDeviceSynchronize()); //Caffe::switch_to_master_device(); const Dtype* top_diff = top[0]->gpu_diff(); Dtype* top_data = top[0]->mutable_gpu_data(); const Dtype* weight = this->blobs_[0]->gpu_data(); Dtype* weight_diff = this->blobs_[0]->mutable_gpu_diff(); const Dtype* bottom_data = (*bottom)[0]->gpu_data(); Dtype* bottom_diff = (*bottom)[0]->mutable_gpu_diff(); Dtype* col_data = col_buffer_.mutable_gpu_data(); Dtype* col_diff = col_buffer_.mutable_gpu_diff(); Dtype* trans_data = trans_buffer_.mutable_gpu_data(); Dtype* trans_diff = trans_buffer_.mutable_gpu_diff(); // bias gradient if necessary Dtype* bias_diff = NULL; int weight_offset = M_ * K_; int col_offset = K_ * mem_group_size; int result_offset = M_ * mem_group_size; int top_offset = M_ * N_; int bias_offset = num_output_; if (!bp_entry_initialized_) { Dtype **col_diff_entry_cpu_data = (Dtype **)col_diff_entry_->mutable_cpu_data(); Dtype **weight_diff_entry_cpu_data = (Dtype **)weight_diff_entry_->mutable_cpu_data(); for (int i = 0; i < N_; i++) { col_diff_entry_cpu_data[i] = col_diff + col_offset * i; weight_diff_entry_cpu_data[i] = weight_diff + weight_offset * i; } bp_entry_initialized_ = true; } Dtype** weight_entry_gpu_data = (Dtype**)weight_entry_->mutable_gpu_data(); Dtype** weight_diff_entry_gpu_data = (Dtype**)weight_diff_entry_->mutable_gpu_data(); Dtype** col_entry_gpu_data = (Dtype**)col_entry_->mutable_gpu_data(); Dtype** col_diff_entry_gpu_data = (Dtype**)col_diff_entry_->mutable_gpu_data(); Dtype** trans_entry_gpu_data = (Dtype**)result_entry_->mutable_gpu_data(); if (bias_term_) { bias_diff = this->blobs_[1]->mutable_gpu_diff(); caffe_gpu_gemv<Dtype>(CblasTrans, num_, num_output_ * N_, 1., top_diff, reinterpret_cast<const Dtype*>(row_sumer_->gpu_data()), 0., bias_diff); } CUDA_CHECK(hipMemsetAsync(weight_diff, 0, sizeof(Dtype) * this->blobs_[0]->count(), Caffe::cu_stream())); int ct = this->blobs_[0]->count(); int mem_group_counter = 0; for (int n = 0; n < num_; n+= mem_group_size, mem_group_counter++) { size_t this_mem_group_size = min(mem_group_size,num_-n); bu_im2col_gpu(bottom_data + (*bottom)[0]->offset(n), channels_, height_, width_, kernel_size_, pad_, stride_, col_data, this_mem_group_size, true); cu_permute_3D_acc_gpu(top_diff + top[0]->offset(n), trans_data, N_, M_, this_mem_group_size, XYZtoZYX); caffe_gpu_gemm_batched<Dtype>(CblasTrans, CblasTrans, K_, M_, this_mem_group_size, (Dtype)1., (const Dtype **)col_entry_gpu_data, (const Dtype **)trans_entry_gpu_data, (Dtype)1., weight_diff_entry_gpu_data, N_); if (propagate_down) { caffe_gpu_gemm_batched<Dtype>(CblasNoTrans, CblasNoTrans, K_, this_mem_group_size, M_, (Dtype)1., (const Dtype **)weight_entry_gpu_data, (const Dtype **)trans_entry_gpu_data, (Dtype)0., col_diff_entry_gpu_data, N_); bu_col2im_gpu(col_diff, channels_, height_, width_, kernel_size_, pad_, stride_, bottom_diff + (*bottom)[0]->offset(n), this_mem_group_size, true); } } // Sync master device CUDA_CHECK(hipStreamSynchronize(Caffe::cu_stream())); } INSTANTIATE_CLASS(LocallyConnectedLayer); } // namespace caffe
ac864e6a406e71ad472e87c1c7b8adf082871788.cu
// Copyright 2014 BVLC and contributors. #include <vector> #include "caffe/layer.hpp" #include "caffe/vision_layers.hpp" #include "caffe/util/im2col.hpp" #include "caffe/filler.hpp" #include "caffe/util/math_functions.hpp" #include <iostream> #include <fstream> //#define DEBUG_LOCAL_FP namespace caffe { template <typename Dtype> Dtype LocallyConnectedLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, vector<Blob<Dtype>*>* top) { //BUG FOUND! update memory before copy to slave memory!!! CUDA_CHECK(cudaDeviceSynchronize()); //Caffe::switch_to_master_device(); const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = (*top)[0]->mutable_gpu_data(); Dtype* col_data = col_buffer_.mutable_gpu_data(); Dtype* trans_data = trans_buffer_.mutable_gpu_data(); const Dtype* weight = this->blobs_[0]->gpu_data(); int weight_offset = M_ * K_; int col_offset = K_ * mem_group_size; int result_offset = M_ * mem_group_size; int top_offset = M_ * N_; //For the first time running, intialize the entry point buffer for batched gemm if (!entry_initialized_){ const Dtype** weight_entry_cpu_data = (const Dtype**)weight_entry_->mutable_cpu_data(); Dtype** col_entry_cpu_data = (Dtype**)col_entry_->mutable_cpu_data(); Dtype** result_entry_cpu_data = (Dtype**)result_entry_->mutable_cpu_data(); for (int i = 0; i < N_; i++){ weight_entry_cpu_data[i] = weight + weight_offset * i; col_entry_cpu_data[i] = col_data + col_offset * i; result_entry_cpu_data[i] = trans_data + result_offset * i; } entry_initialized_ = true; } // get gpu version of these entry points Dtype** weight_entry_gpu_data = (Dtype**)weight_entry_->mutable_gpu_data(); Dtype** col_entry_gpu_data = (Dtype**)col_entry_->mutable_gpu_data(); Dtype** result_entry_gpu_data = (Dtype**)result_entry_->mutable_gpu_data(); //add bias to top first if (bias_term_) { // distribute bias into outputs caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, num_, N_*(num_output_), 1, (Dtype)1., reinterpret_cast<const Dtype*>(bias_multiplier_->gpu_data()), this->blobs_[1]->gpu_data(), (Dtype)0., top_data); } int mem_group_counter = 0; for (int n = 0; n < num_; n+=mem_group_size, mem_group_counter++){ // im2col, here we attach sames columns from images inside a mem group to a contiguous block. size_t this_mem_group_size = min(mem_group_size,num_-n); bu_im2col_gpu(bottom_data + bottom[0]->offset(n), channels_, height_, width_, kernel_size_, pad_, stride_, col_data, this_mem_group_size, true); caffe_gpu_gemm_batched<Dtype>(CblasNoTrans, CblasNoTrans, this_mem_group_size, M_, K_, (Dtype)1., (const Dtype **)col_entry_gpu_data, (const Dtype **)weight_entry_gpu_data, (Dtype)0., result_entry_gpu_data, N_); // permute 3d and add bias cu_permute_3D_acc_gpu( trans_data, top_data + (*top)[0]->offset(n), M_, this_mem_group_size, N_, XYZtoZXY, Dtype(1.)); } return Dtype(0.); } template <typename Dtype> void LocallyConnectedLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const bool propagate_down, vector<Blob<Dtype>*>* bottom) { //BUG FOUND! update memory before copy to slave memory!!! CUDA_CHECK(cudaDeviceSynchronize()); //Caffe::switch_to_master_device(); const Dtype* top_diff = top[0]->gpu_diff(); Dtype* top_data = top[0]->mutable_gpu_data(); const Dtype* weight = this->blobs_[0]->gpu_data(); Dtype* weight_diff = this->blobs_[0]->mutable_gpu_diff(); const Dtype* bottom_data = (*bottom)[0]->gpu_data(); Dtype* bottom_diff = (*bottom)[0]->mutable_gpu_diff(); Dtype* col_data = col_buffer_.mutable_gpu_data(); Dtype* col_diff = col_buffer_.mutable_gpu_diff(); Dtype* trans_data = trans_buffer_.mutable_gpu_data(); Dtype* trans_diff = trans_buffer_.mutable_gpu_diff(); // bias gradient if necessary Dtype* bias_diff = NULL; int weight_offset = M_ * K_; int col_offset = K_ * mem_group_size; int result_offset = M_ * mem_group_size; int top_offset = M_ * N_; int bias_offset = num_output_; if (!bp_entry_initialized_) { Dtype **col_diff_entry_cpu_data = (Dtype **)col_diff_entry_->mutable_cpu_data(); Dtype **weight_diff_entry_cpu_data = (Dtype **)weight_diff_entry_->mutable_cpu_data(); for (int i = 0; i < N_; i++) { col_diff_entry_cpu_data[i] = col_diff + col_offset * i; weight_diff_entry_cpu_data[i] = weight_diff + weight_offset * i; } bp_entry_initialized_ = true; } Dtype** weight_entry_gpu_data = (Dtype**)weight_entry_->mutable_gpu_data(); Dtype** weight_diff_entry_gpu_data = (Dtype**)weight_diff_entry_->mutable_gpu_data(); Dtype** col_entry_gpu_data = (Dtype**)col_entry_->mutable_gpu_data(); Dtype** col_diff_entry_gpu_data = (Dtype**)col_diff_entry_->mutable_gpu_data(); Dtype** trans_entry_gpu_data = (Dtype**)result_entry_->mutable_gpu_data(); if (bias_term_) { bias_diff = this->blobs_[1]->mutable_gpu_diff(); caffe_gpu_gemv<Dtype>(CblasTrans, num_, num_output_ * N_, 1., top_diff, reinterpret_cast<const Dtype*>(row_sumer_->gpu_data()), 0., bias_diff); } CUDA_CHECK(cudaMemsetAsync(weight_diff, 0, sizeof(Dtype) * this->blobs_[0]->count(), Caffe::cu_stream())); int ct = this->blobs_[0]->count(); int mem_group_counter = 0; for (int n = 0; n < num_; n+= mem_group_size, mem_group_counter++) { size_t this_mem_group_size = min(mem_group_size,num_-n); bu_im2col_gpu(bottom_data + (*bottom)[0]->offset(n), channels_, height_, width_, kernel_size_, pad_, stride_, col_data, this_mem_group_size, true); cu_permute_3D_acc_gpu(top_diff + top[0]->offset(n), trans_data, N_, M_, this_mem_group_size, XYZtoZYX); caffe_gpu_gemm_batched<Dtype>(CblasTrans, CblasTrans, K_, M_, this_mem_group_size, (Dtype)1., (const Dtype **)col_entry_gpu_data, (const Dtype **)trans_entry_gpu_data, (Dtype)1., weight_diff_entry_gpu_data, N_); if (propagate_down) { caffe_gpu_gemm_batched<Dtype>(CblasNoTrans, CblasNoTrans, K_, this_mem_group_size, M_, (Dtype)1., (const Dtype **)weight_entry_gpu_data, (const Dtype **)trans_entry_gpu_data, (Dtype)0., col_diff_entry_gpu_data, N_); bu_col2im_gpu(col_diff, channels_, height_, width_, kernel_size_, pad_, stride_, bottom_diff + (*bottom)[0]->offset(n), this_mem_group_size, true); } } // Sync master device CUDA_CHECK(cudaStreamSynchronize(Caffe::cu_stream())); } INSTANTIATE_CLASS(LocallyConnectedLayer); } // namespace caffe
75c01dc3a3d7c2c853bedd892258eb57f04217a5.hip
// !!! This is a file automatically generated by hipify!!! #include "gpu_data.cuh" #include <algorithm> #include <hip/hip_runtime.h> #include <device_launch_parameters.h> GPUData::GPUData(Info const & info) : chunk_size_(0), info_(info), data_( (info_.data_location_ == HOST) ? info_.max_chunk_size_*info_.n_points_ : 0), weights_( (info_.use_weights_ && info_.data_location_ == HOST) ? info_.n_points_ * info_.max_chunk_size_ : 0 ), parameters_( (info_.data_location_ == HOST) ? info_.max_chunk_size_*info_.n_parameters_ : 0 ), user_info_( (info_.data_location_ == HOST) ? info_.user_info_size_ : 0), prev_parameters_( info_.max_chunk_size_*info_.n_parameters_ ), parameters_to_fit_indices_( info_.n_parameters_to_fit_ ), chi_squares_( (info_.data_location_ == HOST) ? info_.max_chunk_size_ : 0), prev_chi_squares_( info_.max_chunk_size_ ), gradients_( info_.max_chunk_size_ * info_.n_parameters_to_fit_), hessians_( info_.max_chunk_size_ * info_.n_parameters_to_fit_ * info_.n_parameters_to_fit_ ), deltas_(info_.max_chunk_size_ * info_.n_parameters_to_fit_), scaling_vectors_(info_.max_chunk_size_ * info_.n_parameters_to_fit_), subtotals_( (info_.n_blocks_per_fit_ > 1) ? ::max( info_.max_chunk_size_ * info_.n_parameters_to_fit_ * info_.n_blocks_per_fit_, info_.max_chunk_size_ * info_.n_blocks_per_fit_) : 0), values_( info_.max_chunk_size_ * info_.n_points_ ), derivatives_( info_.max_chunk_size_ * info_.n_points_ * info_.n_parameters_ ), lambdas_( info_.max_chunk_size_ ), states_( (info_.data_location_ == HOST) ? info_.max_chunk_size_ : 0), finished_( info_.max_chunk_size_ ), iteration_failed_(info_.max_chunk_size_), all_finished_( 1 ), n_iterations_( (info_.data_location_ == HOST) ? info_.max_chunk_size_ : 0), solution_info_(info_.max_chunk_size_) #ifdef USE_CUBLAS , decomposed_hessians_(info_.max_chunk_size_ * info_.n_parameters_to_fit_ * info_.n_parameters_to_fit_), pointer_decomposed_hessians_(info_.max_chunk_size_), pointer_deltas_(info_.max_chunk_size_), pivot_vectors_(info_.max_chunk_size_ * info_.n_parameters_to_fit_) #endif // USE_CUBLAS { #ifdef USE_CUBLAS hipblasCreate(&cublas_handle_); point_to_data_sets(); #endif // USE_CUBLAS } GPUData::~GPUData() { #ifdef USE_CUBLAS hipblasDestroy(cublas_handle_); #endif // USE_CUBLAS } void GPUData::init ( int const chunk_size, int const chunk_index, REAL const * const data, REAL const * const weights, REAL const * const initial_parameters, std::vector<int> const & parameters_to_fit_indices, int * states, REAL * chi_squares, int * n_iterations) { chunk_size_ = chunk_size; chunk_index_ = chunk_index; if (info_.data_location_ == HOST) { write( data_, data + chunk_index_*info_.max_chunk_size_*info_.n_points_, chunk_size_ * info_.n_points_); write( parameters_, initial_parameters + chunk_index_*info_.max_chunk_size_*info_.n_parameters_, chunk_size_ * info_.n_parameters_); if (info_.use_weights_) write( weights_, weights + chunk_index_*info_.max_chunk_size_*info_.n_points_, chunk_size_ * info_.n_points_); } else if (info_.data_location_ == DEVICE) { data_.assign( data + chunk_index_*info_.max_chunk_size_*info_.n_points_); parameters_.assign( initial_parameters + chunk_index_*info_.max_chunk_size_*info_.n_parameters_); if (info_.use_weights_) weights_.assign( weights + chunk_index_*info_.max_chunk_size_*info_.n_points_); states_.assign( states + chunk_index_ * info_.max_chunk_size_); chi_squares_.assign( chi_squares + chunk_index_ * info_.max_chunk_size_); n_iterations_.assign( n_iterations + chunk_index_ * info_.max_chunk_size_); } write(parameters_to_fit_indices_, parameters_to_fit_indices); set(prev_chi_squares_, 0., chunk_size_); set(finished_, 0, chunk_size_); set(scaling_vectors_, 0., chunk_size_ * info_.n_parameters_to_fit_); set(states_, 0, chunk_size_); set(lambdas_, 0.001f, chunk_size_); set(n_iterations_, 0, chunk_size_); } void GPUData::init_user_info(char const * const user_info) { if (info_.user_info_size_ > 0) { if (info_.data_location_ == HOST) { write(user_info_, user_info, info_.user_info_size_); } else if (info_.data_location_ == DEVICE) { user_info_.assign(user_info); } } } void GPUData::read(bool * dst, int const * src) { int int_dst = 0; CUDA_CHECK_STATUS(hipMemcpy(&int_dst, src, sizeof(int), hipMemcpyDeviceToHost)); * dst = (int_dst == 1) ? true : false; } void GPUData::write(REAL* dst, REAL const * src, int const count) { CUDA_CHECK_STATUS(hipMemcpy(dst, src, count * sizeof(REAL), hipMemcpyHostToDevice)); } void GPUData::write(int* dst, std::vector<int> const & src) { std::size_t const size = src.size() * sizeof(int); CUDA_CHECK_STATUS(hipMemcpy(dst, src.data(), size, hipMemcpyHostToDevice)); } void GPUData::write(char* dst, char const * src, std::size_t const count) { CUDA_CHECK_STATUS(hipMemcpy(dst, src, count * sizeof(char), hipMemcpyHostToDevice)); } void GPUData::copy(REAL * dst, REAL const * src, std::size_t const count) { CUDA_CHECK_STATUS(hipMemcpy(dst, src, count * sizeof(REAL), hipMemcpyDeviceToDevice)); } __global__ void set_kernel(int* dst, int const value, int const count) { int const index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= count) return; dst[index] = value; } void GPUData::set(int* arr, int const value, int const count) { int const tx = 256; int const bx = (count / tx) + 1; dim3 threads(tx, 1, 1); dim3 blocks(bx, 1, 1); hipLaunchKernelGGL(( set_kernel), dim3(blocks), dim3(threads) , 0, 0, arr, value, count); CUDA_CHECK_STATUS(hipGetLastError()); } void GPUData::set(int* arr, int const value) { int const tx = 1; int const bx = 1; dim3 threads(tx, 1, 1); dim3 blocks(bx, 1, 1); hipLaunchKernelGGL(( set_kernel), dim3(blocks), dim3(threads) , 0, 0, arr, value, 1); CUDA_CHECK_STATUS(hipGetLastError()); } __global__ void set_kernel(REAL* dst, REAL const value, std::size_t const count) { std::size_t const index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= count) return; dst[index] = value; } void GPUData::set(REAL* arr, REAL const value, int const count) { int const tx = 256; int const bx = (count / tx) + 1; dim3 threads(tx, 1, 1); dim3 blocks(bx, 1, 1); hipLaunchKernelGGL(( set_kernel), dim3(blocks), dim3(threads) , 0, 0, arr, value, count); CUDA_CHECK_STATUS(hipGetLastError()); } __global__ void cuda_point_to_data_sets( REAL ** pointer_to_pointers, REAL * pointer, std::size_t const n_pointers, std::size_t const size) { std::size_t const index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= n_pointers) return; int const begin = index * size; pointer_to_pointers[index] = pointer + begin; } #ifdef USE_CUBLAS void GPUData::point_to_data_sets() { dim3 threads(1, 1, 1); dim3 blocks(1, 1, 1); std::size_t max_threads = 256; threads.x = static_cast<unsigned int> (::min(info_.max_chunk_size_, max_threads)); blocks.x = static_cast<unsigned int> (::ceil(REAL(info_.max_chunk_size_) / REAL(threads.x))); hipLaunchKernelGGL(( cuda_point_to_data_sets) , dim3(blocks), dim3(threads) , 0, 0, pointer_decomposed_hessians_, decomposed_hessians_, info_.max_chunk_size_, info_.n_parameters_to_fit_*info_.n_parameters_to_fit_); hipLaunchKernelGGL(( cuda_point_to_data_sets) , dim3(blocks), dim3(threads) , 0, 0, pointer_deltas_, deltas_, info_.max_chunk_size_, info_.n_parameters_to_fit_); } #endif // USE_CUBLAS
75c01dc3a3d7c2c853bedd892258eb57f04217a5.cu
#include "gpu_data.cuh" #include <algorithm> #include <cuda_runtime.h> #include <device_launch_parameters.h> GPUData::GPUData(Info const & info) : chunk_size_(0), info_(info), data_( (info_.data_location_ == HOST) ? info_.max_chunk_size_*info_.n_points_ : 0), weights_( (info_.use_weights_ && info_.data_location_ == HOST) ? info_.n_points_ * info_.max_chunk_size_ : 0 ), parameters_( (info_.data_location_ == HOST) ? info_.max_chunk_size_*info_.n_parameters_ : 0 ), user_info_( (info_.data_location_ == HOST) ? info_.user_info_size_ : 0), prev_parameters_( info_.max_chunk_size_*info_.n_parameters_ ), parameters_to_fit_indices_( info_.n_parameters_to_fit_ ), chi_squares_( (info_.data_location_ == HOST) ? info_.max_chunk_size_ : 0), prev_chi_squares_( info_.max_chunk_size_ ), gradients_( info_.max_chunk_size_ * info_.n_parameters_to_fit_), hessians_( info_.max_chunk_size_ * info_.n_parameters_to_fit_ * info_.n_parameters_to_fit_ ), deltas_(info_.max_chunk_size_ * info_.n_parameters_to_fit_), scaling_vectors_(info_.max_chunk_size_ * info_.n_parameters_to_fit_), subtotals_( (info_.n_blocks_per_fit_ > 1) ? std::max( info_.max_chunk_size_ * info_.n_parameters_to_fit_ * info_.n_blocks_per_fit_, info_.max_chunk_size_ * info_.n_blocks_per_fit_) : 0), values_( info_.max_chunk_size_ * info_.n_points_ ), derivatives_( info_.max_chunk_size_ * info_.n_points_ * info_.n_parameters_ ), lambdas_( info_.max_chunk_size_ ), states_( (info_.data_location_ == HOST) ? info_.max_chunk_size_ : 0), finished_( info_.max_chunk_size_ ), iteration_failed_(info_.max_chunk_size_), all_finished_( 1 ), n_iterations_( (info_.data_location_ == HOST) ? info_.max_chunk_size_ : 0), solution_info_(info_.max_chunk_size_) #ifdef USE_CUBLAS , decomposed_hessians_(info_.max_chunk_size_ * info_.n_parameters_to_fit_ * info_.n_parameters_to_fit_), pointer_decomposed_hessians_(info_.max_chunk_size_), pointer_deltas_(info_.max_chunk_size_), pivot_vectors_(info_.max_chunk_size_ * info_.n_parameters_to_fit_) #endif // USE_CUBLAS { #ifdef USE_CUBLAS cublasCreate(&cublas_handle_); point_to_data_sets(); #endif // USE_CUBLAS } GPUData::~GPUData() { #ifdef USE_CUBLAS cublasDestroy(cublas_handle_); #endif // USE_CUBLAS } void GPUData::init ( int const chunk_size, int const chunk_index, REAL const * const data, REAL const * const weights, REAL const * const initial_parameters, std::vector<int> const & parameters_to_fit_indices, int * states, REAL * chi_squares, int * n_iterations) { chunk_size_ = chunk_size; chunk_index_ = chunk_index; if (info_.data_location_ == HOST) { write( data_, data + chunk_index_*info_.max_chunk_size_*info_.n_points_, chunk_size_ * info_.n_points_); write( parameters_, initial_parameters + chunk_index_*info_.max_chunk_size_*info_.n_parameters_, chunk_size_ * info_.n_parameters_); if (info_.use_weights_) write( weights_, weights + chunk_index_*info_.max_chunk_size_*info_.n_points_, chunk_size_ * info_.n_points_); } else if (info_.data_location_ == DEVICE) { data_.assign( data + chunk_index_*info_.max_chunk_size_*info_.n_points_); parameters_.assign( initial_parameters + chunk_index_*info_.max_chunk_size_*info_.n_parameters_); if (info_.use_weights_) weights_.assign( weights + chunk_index_*info_.max_chunk_size_*info_.n_points_); states_.assign( states + chunk_index_ * info_.max_chunk_size_); chi_squares_.assign( chi_squares + chunk_index_ * info_.max_chunk_size_); n_iterations_.assign( n_iterations + chunk_index_ * info_.max_chunk_size_); } write(parameters_to_fit_indices_, parameters_to_fit_indices); set(prev_chi_squares_, 0., chunk_size_); set(finished_, 0, chunk_size_); set(scaling_vectors_, 0., chunk_size_ * info_.n_parameters_to_fit_); set(states_, 0, chunk_size_); set(lambdas_, 0.001f, chunk_size_); set(n_iterations_, 0, chunk_size_); } void GPUData::init_user_info(char const * const user_info) { if (info_.user_info_size_ > 0) { if (info_.data_location_ == HOST) { write(user_info_, user_info, info_.user_info_size_); } else if (info_.data_location_ == DEVICE) { user_info_.assign(user_info); } } } void GPUData::read(bool * dst, int const * src) { int int_dst = 0; CUDA_CHECK_STATUS(cudaMemcpy(&int_dst, src, sizeof(int), cudaMemcpyDeviceToHost)); * dst = (int_dst == 1) ? true : false; } void GPUData::write(REAL* dst, REAL const * src, int const count) { CUDA_CHECK_STATUS(cudaMemcpy(dst, src, count * sizeof(REAL), cudaMemcpyHostToDevice)); } void GPUData::write(int* dst, std::vector<int> const & src) { std::size_t const size = src.size() * sizeof(int); CUDA_CHECK_STATUS(cudaMemcpy(dst, src.data(), size, cudaMemcpyHostToDevice)); } void GPUData::write(char* dst, char const * src, std::size_t const count) { CUDA_CHECK_STATUS(cudaMemcpy(dst, src, count * sizeof(char), cudaMemcpyHostToDevice)); } void GPUData::copy(REAL * dst, REAL const * src, std::size_t const count) { CUDA_CHECK_STATUS(cudaMemcpy(dst, src, count * sizeof(REAL), cudaMemcpyDeviceToDevice)); } __global__ void set_kernel(int* dst, int const value, int const count) { int const index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= count) return; dst[index] = value; } void GPUData::set(int* arr, int const value, int const count) { int const tx = 256; int const bx = (count / tx) + 1; dim3 threads(tx, 1, 1); dim3 blocks(bx, 1, 1); set_kernel<<< blocks, threads >>>(arr, value, count); CUDA_CHECK_STATUS(cudaGetLastError()); } void GPUData::set(int* arr, int const value) { int const tx = 1; int const bx = 1; dim3 threads(tx, 1, 1); dim3 blocks(bx, 1, 1); set_kernel<<< blocks, threads >>>(arr, value, 1); CUDA_CHECK_STATUS(cudaGetLastError()); } __global__ void set_kernel(REAL* dst, REAL const value, std::size_t const count) { std::size_t const index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= count) return; dst[index] = value; } void GPUData::set(REAL* arr, REAL const value, int const count) { int const tx = 256; int const bx = (count / tx) + 1; dim3 threads(tx, 1, 1); dim3 blocks(bx, 1, 1); set_kernel<<< blocks, threads >>>(arr, value, count); CUDA_CHECK_STATUS(cudaGetLastError()); } __global__ void cuda_point_to_data_sets( REAL ** pointer_to_pointers, REAL * pointer, std::size_t const n_pointers, std::size_t const size) { std::size_t const index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= n_pointers) return; int const begin = index * size; pointer_to_pointers[index] = pointer + begin; } #ifdef USE_CUBLAS void GPUData::point_to_data_sets() { dim3 threads(1, 1, 1); dim3 blocks(1, 1, 1); std::size_t max_threads = 256; threads.x = static_cast<unsigned int> (std::min(info_.max_chunk_size_, max_threads)); blocks.x = static_cast<unsigned int> (std::ceil(REAL(info_.max_chunk_size_) / REAL(threads.x))); cuda_point_to_data_sets <<< blocks, threads >>>( pointer_decomposed_hessians_, decomposed_hessians_, info_.max_chunk_size_, info_.n_parameters_to_fit_*info_.n_parameters_to_fit_); cuda_point_to_data_sets <<< blocks, threads >>> ( pointer_deltas_, deltas_, info_.max_chunk_size_, info_.n_parameters_to_fit_); } #endif // USE_CUBLAS
393b41a011a6cd8fa271561e608d474c25256cc2.hip
// !!! This is a file automatically generated by hipify!!! /** * Nathan Dunn * Project 3: Work Efficient Parallel Reduction and Work Efficient Parallel Prefix Sum * Professor Liu * CS-4370-90 * 11-18-19 */ #include <stdio.h> #include <hip/hip_runtime.h> #define N 2048 #define BLOCK_SIZE 1024 /** * Performs Prefix Sum on a Vector using the CPU * */ void hostPrefixSum(int *y, int *x, int length){ y[0] = x[0]; for (int i = 1; i < length; i++) y[i] = y [i-1] + x[i]; } /** * Performs Prefix Sum on a vector using GPU */ __global__ void work_efficient_scan_kernel(int *x, int *y, int *sum_arr, int InputSize){ __shared__ int scan_array[2 * BLOCK_SIZE]; unsigned int t = threadIdx.x; unsigned int start = 2 * blockIdx.x * blockDim.x; scan_array[t] = y[start + t]; scan_array[blockDim.x + t] = y[start + blockDim.x + t]; __syncthreads(); // Perform reduction step int reduction_stride = 1; while(reduction_stride <= BLOCK_SIZE){ int index = (threadIdx.x + 1) * reduction_stride * 2 - 1; if(index < 2 * BLOCK_SIZE) scan_array[index] += scan_array[index-reduction_stride]; reduction_stride = reduction_stride * 2; __syncthreads(); } // Perform post scan step int post_stride = BLOCK_SIZE / 2; while(post_stride > 0){ int index = (threadIdx.x + 1) * post_stride * 2 - 1; if(index + post_stride < 2 * BLOCK_SIZE) scan_array[index + post_stride] += scan_array[index]; post_stride = post_stride / 2; __syncthreads(); } __syncthreads(); x[start + t] = scan_array[t]; x[start+ blockDim.x + t] = scan_array[blockDim.x + t]; sum_arr[blockIdx.x] = x[start + blockDim.x + t]; } /** * Compares two vectors a and b for equality */ int verify(int *a, int *b, int length){ for(int i = 0; i < length; i++){ if(a[i] != b[i]) return 0; } return 1; } /** * Print the given vector a */ void printVector(int *a, int length){ for(int i = 0; i < length; i++){ printf("|%d", a[i]); } printf("|\n"); } /** Performs prefix sum on the vector */ void doPrefixSum(int *vect, int *gpu_sum, int *sum_arr, int length){ hipEvent_t gpuStart,gpuStop; int sumArraySize = ceil((float)N / (2 * BLOCK_SIZE)); int *vect_dev, *gpu_sum_dev, *sum_arr_dev; // holds each time for computation / copy of each kernel call float copyTo, computationTime, copyFrom; // block and grid initialization for gpu dim3 dimBlock(BLOCK_SIZE, 1, 1); dim3 dimGrid(ceil(N / dimBlock.x), 1, 1); // allocate device memory hipMalloc((void **)(&vect_dev), N * sizeof(int)); hipMalloc((void **)(&gpu_sum_dev), N * sizeof(int)); hipMalloc((void **)(&sum_arr_dev), sumArraySize * sizeof(int)); // Begin measuring time for copying memory over to device hipEventCreate(&gpuStart); hipEventCreate(&gpuStop); hipEventRecord(gpuStart,0); // copy vector on host to gpu device hipMemcpy(vect_dev, vect, N * sizeof(int), hipMemcpyHostToDevice); hipDeviceSynchronize(); // Finish measuring time for copying memory over to device hipEventRecord(gpuStop,0); hipEventSynchronize(gpuStop); hipEventElapsedTime(&copyTo,gpuStart,gpuStop); hipEventDestroy(gpuStart); hipEventDestroy(gpuStop); // Begin measuring GPU computation time hipEventCreate(&gpuStart); hipEventCreate(&gpuStop); hipEventRecord(gpuStart,0); // Launch kernels for sum hipLaunchKernelGGL(( work_efficient_scan_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, gpu_sum_dev, vect_dev, sum_arr_dev, N); hipDeviceSynchronize(); // Finish measuring GPU computation time hipEventRecord(gpuStop,0); hipEventSynchronize(gpuStop); hipEventElapsedTime(&computationTime,gpuStart,gpuStop); hipEventDestroy(gpuStart); hipEventDestroy(gpuStop); // Begin measuring time for copying memory back to host hipEventCreate(&gpuStart); hipEventCreate(&gpuStop); hipEventRecord(gpuStart,0); // copy sum scan vector on device back to host hipMemcpy(gpu_sum, gpu_sum_dev, N * sizeof(int), hipMemcpyDeviceToHost); hipDeviceSynchronize(); // copy block sum vector on device back to host hipMemcpy(sum_arr, sum_arr_dev, sumArraySize * sizeof(int), hipMemcpyDeviceToHost); hipDeviceSynchronize(); // Finish measuring time for copying memory back to host hipEventRecord(gpuStop,0); hipEventSynchronize(gpuStop); hipEventElapsedTime(&copyFrom,gpuStart,gpuStop); hipEventDestroy(gpuStart); hipEventDestroy(gpuStop); // free system and device memory hipFree(vect_dev); hipFree(gpu_sum_dev); hipFree(sum_arr_dev); printf("GPU Time: %f\n", computationTime); printf("Memory Transfer Time: %f\n", copyTo + copyFrom); } int main(void){ printf("\nVECTOR SIZE: %d\nBLOCK SIZE: %d\n\n", N, BLOCK_SIZE); int sumArraySize = ceil((float)N / (2 * BLOCK_SIZE)); int *vect, *cpu_sum, *gpu_sum, *sum_arr, *sum_arr_prefix; // initialize cpu vectors vect = (int*)malloc(sizeof(int) * N); // original vector cpu_sum = (int*)malloc(sizeof(int) * N); // stores cpu prefix sum gpu_sum = (int*)malloc(sizeof(int) * N); // stores copied gpu prefix sum sum_arr = (int*)malloc(sizeof(int) * sumArraySize); // stores block sums sum_arr_prefix = (int*)malloc(sizeof(int) * 2 * BLOCK_SIZE); // stores prefix sum of block sum // initialize vect int init = 1325; for (int i = 0; i < N; i++){ init = 3125 * init % 65521; vect[i] = (init - 32768) / 16384; } // perform initial sum on vector and then prefix sum on the sum array (if applicable) doPrefixSum(vect, gpu_sum, sum_arr, N); // variables used to measure cpu computation time clock_t cpuStart, cpuEnd; float cpuTimeTaken; // start measuring cpu computation time cpuStart = clock(); // perform prefix sum on cpu hostPrefixSum(cpu_sum, vect, N); // stop measuring cpu computation time cpuEnd = clock(); cpuTimeTaken = ((float)cpuEnd - cpuStart)/CLOCKS_PER_SEC; // in seconds printf("\nCPU Time: %f\n", cpuTimeTaken); if(verify(gpu_sum, cpu_sum, N)) printf("\nTEST PASSED!\n"); else printf("\nTEST FAILED!\n"); // free system memory free(vect); free(cpu_sum); free(gpu_sum); free(sum_arr); free(sum_arr_prefix); return 0; }
393b41a011a6cd8fa271561e608d474c25256cc2.cu
/** * Nathan Dunn * Project 3: Work Efficient Parallel Reduction and Work Efficient Parallel Prefix Sum * Professor Liu * CS-4370-90 * 11-18-19 */ #include <stdio.h> #include <cuda.h> #define N 2048 #define BLOCK_SIZE 1024 /** * Performs Prefix Sum on a Vector using the CPU * */ void hostPrefixSum(int *y, int *x, int length){ y[0] = x[0]; for (int i = 1; i < length; i++) y[i] = y [i-1] + x[i]; } /** * Performs Prefix Sum on a vector using GPU */ __global__ void work_efficient_scan_kernel(int *x, int *y, int *sum_arr, int InputSize){ __shared__ int scan_array[2 * BLOCK_SIZE]; unsigned int t = threadIdx.x; unsigned int start = 2 * blockIdx.x * blockDim.x; scan_array[t] = y[start + t]; scan_array[blockDim.x + t] = y[start + blockDim.x + t]; __syncthreads(); // Perform reduction step int reduction_stride = 1; while(reduction_stride <= BLOCK_SIZE){ int index = (threadIdx.x + 1) * reduction_stride * 2 - 1; if(index < 2 * BLOCK_SIZE) scan_array[index] += scan_array[index-reduction_stride]; reduction_stride = reduction_stride * 2; __syncthreads(); } // Perform post scan step int post_stride = BLOCK_SIZE / 2; while(post_stride > 0){ int index = (threadIdx.x + 1) * post_stride * 2 - 1; if(index + post_stride < 2 * BLOCK_SIZE) scan_array[index + post_stride] += scan_array[index]; post_stride = post_stride / 2; __syncthreads(); } __syncthreads(); x[start + t] = scan_array[t]; x[start+ blockDim.x + t] = scan_array[blockDim.x + t]; sum_arr[blockIdx.x] = x[start + blockDim.x + t]; } /** * Compares two vectors a and b for equality */ int verify(int *a, int *b, int length){ for(int i = 0; i < length; i++){ if(a[i] != b[i]) return 0; } return 1; } /** * Print the given vector a */ void printVector(int *a, int length){ for(int i = 0; i < length; i++){ printf("|%d", a[i]); } printf("|\n"); } /** Performs prefix sum on the vector */ void doPrefixSum(int *vect, int *gpu_sum, int *sum_arr, int length){ cudaEvent_t gpuStart,gpuStop; int sumArraySize = ceil((float)N / (2 * BLOCK_SIZE)); int *vect_dev, *gpu_sum_dev, *sum_arr_dev; // holds each time for computation / copy of each kernel call float copyTo, computationTime, copyFrom; // block and grid initialization for gpu dim3 dimBlock(BLOCK_SIZE, 1, 1); dim3 dimGrid(ceil(N / dimBlock.x), 1, 1); // allocate device memory cudaMalloc((void **)(&vect_dev), N * sizeof(int)); cudaMalloc((void **)(&gpu_sum_dev), N * sizeof(int)); cudaMalloc((void **)(&sum_arr_dev), sumArraySize * sizeof(int)); // Begin measuring time for copying memory over to device cudaEventCreate(&gpuStart); cudaEventCreate(&gpuStop); cudaEventRecord(gpuStart,0); // copy vector on host to gpu device cudaMemcpy(vect_dev, vect, N * sizeof(int), cudaMemcpyHostToDevice); cudaDeviceSynchronize(); // Finish measuring time for copying memory over to device cudaEventRecord(gpuStop,0); cudaEventSynchronize(gpuStop); cudaEventElapsedTime(&copyTo,gpuStart,gpuStop); cudaEventDestroy(gpuStart); cudaEventDestroy(gpuStop); // Begin measuring GPU computation time cudaEventCreate(&gpuStart); cudaEventCreate(&gpuStop); cudaEventRecord(gpuStart,0); // Launch kernels for sum work_efficient_scan_kernel<<<dimGrid, dimBlock>>>(gpu_sum_dev, vect_dev, sum_arr_dev, N); cudaDeviceSynchronize(); // Finish measuring GPU computation time cudaEventRecord(gpuStop,0); cudaEventSynchronize(gpuStop); cudaEventElapsedTime(&computationTime,gpuStart,gpuStop); cudaEventDestroy(gpuStart); cudaEventDestroy(gpuStop); // Begin measuring time for copying memory back to host cudaEventCreate(&gpuStart); cudaEventCreate(&gpuStop); cudaEventRecord(gpuStart,0); // copy sum scan vector on device back to host cudaMemcpy(gpu_sum, gpu_sum_dev, N * sizeof(int), cudaMemcpyDeviceToHost); cudaDeviceSynchronize(); // copy block sum vector on device back to host cudaMemcpy(sum_arr, sum_arr_dev, sumArraySize * sizeof(int), cudaMemcpyDeviceToHost); cudaDeviceSynchronize(); // Finish measuring time for copying memory back to host cudaEventRecord(gpuStop,0); cudaEventSynchronize(gpuStop); cudaEventElapsedTime(&copyFrom,gpuStart,gpuStop); cudaEventDestroy(gpuStart); cudaEventDestroy(gpuStop); // free system and device memory cudaFree(vect_dev); cudaFree(gpu_sum_dev); cudaFree(sum_arr_dev); printf("GPU Time: %f\n", computationTime); printf("Memory Transfer Time: %f\n", copyTo + copyFrom); } int main(void){ printf("\nVECTOR SIZE: %d\nBLOCK SIZE: %d\n\n", N, BLOCK_SIZE); int sumArraySize = ceil((float)N / (2 * BLOCK_SIZE)); int *vect, *cpu_sum, *gpu_sum, *sum_arr, *sum_arr_prefix; // initialize cpu vectors vect = (int*)malloc(sizeof(int) * N); // original vector cpu_sum = (int*)malloc(sizeof(int) * N); // stores cpu prefix sum gpu_sum = (int*)malloc(sizeof(int) * N); // stores copied gpu prefix sum sum_arr = (int*)malloc(sizeof(int) * sumArraySize); // stores block sums sum_arr_prefix = (int*)malloc(sizeof(int) * 2 * BLOCK_SIZE); // stores prefix sum of block sum // initialize vect int init = 1325; for (int i = 0; i < N; i++){ init = 3125 * init % 65521; vect[i] = (init - 32768) / 16384; } // perform initial sum on vector and then prefix sum on the sum array (if applicable) doPrefixSum(vect, gpu_sum, sum_arr, N); // variables used to measure cpu computation time clock_t cpuStart, cpuEnd; float cpuTimeTaken; // start measuring cpu computation time cpuStart = clock(); // perform prefix sum on cpu hostPrefixSum(cpu_sum, vect, N); // stop measuring cpu computation time cpuEnd = clock(); cpuTimeTaken = ((float)cpuEnd - cpuStart)/CLOCKS_PER_SEC; // in seconds printf("\nCPU Time: %f\n", cpuTimeTaken); if(verify(gpu_sum, cpu_sum, N)) printf("\nTEST PASSED!\n"); else printf("\nTEST FAILED!\n"); // free system memory free(vect); free(cpu_sum); free(gpu_sum); free(sum_arr); free(sum_arr_prefix); return 0; }
60b457b2edab6fca21c3964e46aac4b0f4b1d2ee.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Homework 1 // Color to Greyscale Conversion //A common way to represent color images is known as RGBA - the color //is specified by how much Red, Grean and Blue is in it. //The 'A' stands for Alpha and is used for transparency, it will be //ignored in this homework. //Each channel Red, Blue, Green and Alpha is represented by one byte. //Since we are using one byte for each color there are 256 different //possible values for each color. This means we use 4 bytes per pixel. //Greyscale images are represented by a single intensity value per pixel //which is one byte in size. //To convert an image from color to grayscale one simple method is to //set the intensity to the average of the RGB channels. But we will //use a more sophisticated method that takes into account how the eye //perceives color and weights the channels unequally. //The eye responds most strongly to green followed by red and then blue. //The NTSC (National Television System Committee) recommends the following //formula for color to greyscale conversion: //I = .299f * R + .587f * G + .114f * B //Notice the trailing f's on the numbers which indicate that they are //single precision floating point constants and not double precision //constants. //You should fill in the kernel as well as set the block and grid sizes //so that the entire image is processed. #include "utils.h" __global__ void rgba_to_greyscale(const uchar4* const rgbaImage, unsigned char* const greyImage, int numRows, int numCols) { //TODO //Fill in the kernel to convert from color to greyscale //the mapping from components of a uchar4 to RGBA is: // .x -> R ; .y -> G ; .z -> B ; .w -> A // //The output (greyImage) at each pixel should be the result of //applying the formula: output = .299f * R + .587f * G + .114f * B; //Note: We will be ignoring the alpha channel for this conversion int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if (i < numRows && j < numCols) { int k = i * numCols + j; uchar4 rgba = rgbaImage[k]; float grey = .299f * rgba.x + .587f * rgba.y + .114f * rgba.z; greyImage[k] = grey; } //First create a mapping from the 2D block and grid locations //to an absolute 2D location in the image, then use that to //calculate a 1D offset } void your_rgba_to_greyscale(const uchar4 * const h_rgbaImage, uchar4 * const d_rgbaImage, unsigned char* const d_greyImage, size_t numRows, size_t numCols) { //You must fill in the correct sizes for the blockSize and gridSize //currently only one block with one thread is being launched const dim3 blockSize(32, 32, 1); const dim3 gridSize((numRows / blockSize.x) + 1, (numCols / blockSize.y) + 1, 1); //TODO hipLaunchKernelGGL(( rgba_to_greyscale), dim3(gridSize), dim3(blockSize), 0, 0, d_rgbaImage, d_greyImage, numRows, numCols); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); }
60b457b2edab6fca21c3964e46aac4b0f4b1d2ee.cu
// Homework 1 // Color to Greyscale Conversion //A common way to represent color images is known as RGBA - the color //is specified by how much Red, Grean and Blue is in it. //The 'A' stands for Alpha and is used for transparency, it will be //ignored in this homework. //Each channel Red, Blue, Green and Alpha is represented by one byte. //Since we are using one byte for each color there are 256 different //possible values for each color. This means we use 4 bytes per pixel. //Greyscale images are represented by a single intensity value per pixel //which is one byte in size. //To convert an image from color to grayscale one simple method is to //set the intensity to the average of the RGB channels. But we will //use a more sophisticated method that takes into account how the eye //perceives color and weights the channels unequally. //The eye responds most strongly to green followed by red and then blue. //The NTSC (National Television System Committee) recommends the following //formula for color to greyscale conversion: //I = .299f * R + .587f * G + .114f * B //Notice the trailing f's on the numbers which indicate that they are //single precision floating point constants and not double precision //constants. //You should fill in the kernel as well as set the block and grid sizes //so that the entire image is processed. #include "utils.h" __global__ void rgba_to_greyscale(const uchar4* const rgbaImage, unsigned char* const greyImage, int numRows, int numCols) { //TODO //Fill in the kernel to convert from color to greyscale //the mapping from components of a uchar4 to RGBA is: // .x -> R ; .y -> G ; .z -> B ; .w -> A // //The output (greyImage) at each pixel should be the result of //applying the formula: output = .299f * R + .587f * G + .114f * B; //Note: We will be ignoring the alpha channel for this conversion int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if (i < numRows && j < numCols) { int k = i * numCols + j; uchar4 rgba = rgbaImage[k]; float grey = .299f * rgba.x + .587f * rgba.y + .114f * rgba.z; greyImage[k] = grey; } //First create a mapping from the 2D block and grid locations //to an absolute 2D location in the image, then use that to //calculate a 1D offset } void your_rgba_to_greyscale(const uchar4 * const h_rgbaImage, uchar4 * const d_rgbaImage, unsigned char* const d_greyImage, size_t numRows, size_t numCols) { //You must fill in the correct sizes for the blockSize and gridSize //currently only one block with one thread is being launched const dim3 blockSize(32, 32, 1); const dim3 gridSize((numRows / blockSize.x) + 1, (numCols / blockSize.y) + 1, 1); //TODO rgba_to_greyscale<<<gridSize, blockSize>>>(d_rgbaImage, d_greyImage, numRows, numCols); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); }
dea9eacb9af05d39b64f69f29545cd4e31b39e08.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //Three Dimensional Poisson solver using NVIDIA CUDA //Author: Arkavo Hait, 2021 #include <stdio.h> #include <iostream> #include <cmath> #include <time.h> #include <chrono> #include <string> #include <limits.h> #include <fstream> using namespace std; //Box struct struct BOX { int X; int Y; int Z; }; //X double derivative __global__ void DDX(double* R, double* C,int X,int Y,int Z,double dx) { int idx = threadIdx.x + blockIdx.x*blockDim.x; if(idx<((Y-2)*(Z-2))) { int idy = idx / (Y-2) ; int idz = idx % (Y-2) ; int index = idy*X + idz*X*Y + X+X*Y; for(int i=1;i<X-1;i++) { *(R+index+i) = (*(C+index+1+i) + *(C+index-1+i) - 2* *(C+index+i))/(dx*dx); } } } //Y double derivative __global__ void DDY(double* R, double* C,int X,int Y,int Z, double dy) { int idx = threadIdx.x + blockIdx.x*blockDim.x; if(idx<((Y-2)*(Z-2))){ int idy = idx / (Y-2) ; int idz = idx % (Y-2) ; int index = idy*X + idz*X*Y + X+X*Y; for(int i=1;i<Y-1;i++) { *(R+index+i*X) = (*(C+index+X*(i+1)) + *(C+index+X*(i-1)) - 2* *(C+index+i*X))/(dy*dy); }} } //Z double derivative __global__ void DDZ(double* R, double* C,int X,int Y,int Z, double dz) { int idx = threadIdx.x + blockIdx.x*blockDim.x; if(idx<((Y-2)*(Z-2))){ int idy = idx / (Y-2) ; int idz = idx % (Y-2) ; int index = idy*X + idz*X*Y + X+X*Y; for(int i=0;i<Z-1;i++) { *(R+index+i*X*Y) = (*(C+index+X*Y*(i+1)) + *(C+index+X*Y*(i-1))- 2* *(C+index+i*X*Y))/(dz*dz); }} } //parallel function to update matrices __global__ void ASSIGN(double* R, double* C,int X,int Y,int Z) { int idx = threadIdx.x + blockIdx.x*blockDim.x; if(idx<((Y-2)*(Z-2))){ int idy = idx / (Y-2) ; int idz = idx % (Y-2) ; int index = idy*X + idz*X*Y + X+X*Y; for(int i=1;i<X-1;i++) { *(R+index+i) = *(C+index+i); }} } //parallel function to add two matrices __global__ void ADD(double* R,double* C,double dt,int X,int Y,int Z) { int idx = threadIdx.x + blockIdx.x*blockDim.x; if(idx<((Y-2)*(Z-2))){ int idy = idx / (Y-2) ; int idz = idx % (Y-2) ; int index = idy*X + idz*X*Y + X+X*Y; for(int i=0;i<X;i++) { *(R+index+i) += (*(C+index+i) * dt); }} } //parallel function to compare two matrices, outputting a maximum difference bteween elements __global__ void COMPARE(double* R, double* C, double* OUT_H,int X,int Y,int Z) { int idx = threadIdx.x + blockIdx.x*blockDim.x; if(idx<((Y-2)*(Z-2))){ int idy = idx / (Y-2) ; int idz = idx % (Y-2) ; int index = idy*X + idz*X*Y + X+X*Y; for(int i=1;i<X-1;i++) { if(abs(*(R+index+i)-*(C+index+i))>= *OUT_H) { *OUT_H = abs(*(R+index+i) - *(C+index+i)); } }} } //reset function, use it to reset any pointer __global__ void RESET_CTR(double* C) { *C = 0; } //display function, use to disaply any array [deprecated for future use] void display(double* DATA,int X,int Y,int Z) { for(int k=0;k<Z;k++) {for(int j=0;j<Y;j++) {for(int i=0;i<X;i++) {printf("%.2lf ",*(DATA+i+X*j+X*Z*k));} printf("\n");}printf("\n");} } //main fxn, will fix with args after int main(int argc, char* argv[]) { std::ofstream file; file.open("data.csv",ios::app); //file<<"Threads, X, Y, Z, Total, Time\n"; //DECLARE YOUR VARIABLES HERE struct BOX grid; int threads; //std::cout<<argc<<"\n"; if(argc==2) { threads = stoi(argv[1]); grid.X=10; grid.Y=10; grid.Z=10; } if(argc>2) { threads = stoi(argv[1]); grid.X = stoi(argv[2]); grid.Y = stoi(argv[3]); grid.Z = stoi(argv[4]); } std::cout<<"Threads: "<<threads<<"\n"; const int X = grid.X; const int Y = grid.Y; const int Z = grid.Z; int blocks = 1 + (Y-2) * (Z-2) / threads; std::cout<<"Blocks: "<<blocks<<"\n"; //step for double derivatives double step = 0.001; //tolerence double tol = 0.00001; std::cout<<"\nX"<<X<<" Y"<<Y<<" Z"<<Z<<" Total Capacity"<<X*Y*Z<<endl; unsigned long long SIZE_0 = ((int)sizeof(double))*X*Y*Z; double* DATA_H; double* DATA_F; DATA_H = (double*)malloc(SIZE_0); DATA_F = (double*)malloc(SIZE_0); //Impose Boundary conditions here for(int i=0;i<X;i++) {for(int j=0;j<Y;j++) {for(int k=0;k<Z;k++) { if(i==0||i==(X-1)||j==0||j==(Y-1)||k==0||k==(Z-1)) *(DATA_H+i+j*X+k*X*Y) = 5; else *(DATA_H+i+j*X+k*X*Y) = 0; }}} //error counters double* CC; double CCD = 0; //DATA pointer for device state 0 double* DATA_ORIGINAL; //DATA pointer for device state 1 double* DATA_NEXT; //Derivative results pointers double* DDX_D; double* DDY_D; double* DDZ_D; //array size for device //allocating space for arrays hipMalloc((void**)&DATA_ORIGINAL,SIZE_0); hipMalloc((void**)&DATA_NEXT,SIZE_0); hipMalloc((void**)&DDX_D,SIZE_0); hipMalloc((void**)&DDY_D,SIZE_0); hipMalloc((void**)&DDZ_D,SIZE_0); hipMalloc(&CC,(int)sizeof(double)); //token counter CCD = 10.; //copy data state0, state1 hipMemcpy(DATA_ORIGINAL,DATA_H,SIZE_0,hipMemcpyHostToDevice); hipMemcpy(DATA_NEXT,DATA_H,SIZE_0,hipMemcpyHostToDevice); //counter int ct = 0; auto hst_st = std::chrono::high_resolution_clock::now(); //run while tolerence > differences while(CCD>tol) { //reset difference every loop hipLaunchKernelGGL(( RESET_CTR) , dim3(1),dim3(1), 0, 0, CC); //run derivatives hipLaunchKernelGGL(( DDY) , dim3(blocks),dim3(threads), 0, 0, DDY_D,DATA_ORIGINAL,X,Y,Z,10.); hipLaunchKernelGGL(( DDZ) , dim3(blocks),dim3(threads), 0, 0, DDZ_D,DATA_ORIGINAL,X,Y,Z,10.); hipLaunchKernelGGL(( DDX) , dim3(blocks),dim3(threads), 0, 0, DDX_D,DATA_ORIGINAL,X,Y,Z,10.); //add into state 1 hipLaunchKernelGGL(( ADD) , dim3(blocks),dim3(threads), 0, 0, DATA_NEXT,DDX_D,step,X,Y,Z); hipLaunchKernelGGL(( ADD) , dim3(blocks),dim3(threads), 0, 0, DATA_NEXT,DDY_D,step,X,Y,Z); hipLaunchKernelGGL(( ADD) , dim3(blocks),dim3(threads), 0, 0, DATA_NEXT,DDZ_D,step,X,Y,Z); //compare state1 state 0 hipLaunchKernelGGL(( COMPARE), dim3(blocks),dim3(threads), 0, 0, DATA_ORIGINAL,DATA_NEXT,CC,X,Y,Z); //copy back max error hipMemcpy(&CCD,CC,sizeof(double),hipMemcpyDeviceToHost); //make state 1 as state0 hipLaunchKernelGGL(( ASSIGN) , dim3(blocks),dim3(threads), 0, 0, DATA_ORIGINAL,DATA_NEXT,X,Y,Z); //update counter ct += 1; //information every 1000 loops because of visibility if(ct%1000==0) { hipMemcpy(&CCD,CC,sizeof(double),hipMemcpyDeviceToHost); printf("%d loops %0.6lf max error\r",ct,CCD); } } //copy back final array hipMemcpy(DATA_F,DATA_ORIGINAL,SIZE_0,hipMemcpyDeviceToHost); auto hst_en = std::chrono::high_resolution_clock::now(); std::chrono::duration<float> duration = hst_en-hst_st; std::cout<<"\nDuration: "<<duration.count()<<"\n"; //final print statement std::cout<<"\n\nConverged in "<<ct-1<<" loops\n\n"; //file<<"Threads, X, Y, Z, Total, Time\n"; file<<threads<<","<<X<<","<<Y<<","<<Z<<","<<X*Y*Z<<","<<duration.count()<<"\n"; file.close(); //display optional //display(DATA_F,X,Y,Z); //free pointers hipFree(DATA_ORIGINAL); hipFree(DATA_NEXT); hipFree(DDX_D); hipFree(DDY_D); hipFree(DDZ_D); return 0; }
dea9eacb9af05d39b64f69f29545cd4e31b39e08.cu
//Three Dimensional Poisson solver using NVIDIA CUDA //Author: Arkavo Hait, 2021 #include <stdio.h> #include <iostream> #include <cmath> #include <time.h> #include <chrono> #include <string> #include <limits.h> #include <fstream> using namespace std; //Box struct struct BOX { int X; int Y; int Z; }; //X double derivative __global__ void DDX(double* R, double* C,int X,int Y,int Z,double dx) { int idx = threadIdx.x + blockIdx.x*blockDim.x; if(idx<((Y-2)*(Z-2))) { int idy = idx / (Y-2) ; int idz = idx % (Y-2) ; int index = idy*X + idz*X*Y + X+X*Y; for(int i=1;i<X-1;i++) { *(R+index+i) = (*(C+index+1+i) + *(C+index-1+i) - 2* *(C+index+i))/(dx*dx); } } } //Y double derivative __global__ void DDY(double* R, double* C,int X,int Y,int Z, double dy) { int idx = threadIdx.x + blockIdx.x*blockDim.x; if(idx<((Y-2)*(Z-2))){ int idy = idx / (Y-2) ; int idz = idx % (Y-2) ; int index = idy*X + idz*X*Y + X+X*Y; for(int i=1;i<Y-1;i++) { *(R+index+i*X) = (*(C+index+X*(i+1)) + *(C+index+X*(i-1)) - 2* *(C+index+i*X))/(dy*dy); }} } //Z double derivative __global__ void DDZ(double* R, double* C,int X,int Y,int Z, double dz) { int idx = threadIdx.x + blockIdx.x*blockDim.x; if(idx<((Y-2)*(Z-2))){ int idy = idx / (Y-2) ; int idz = idx % (Y-2) ; int index = idy*X + idz*X*Y + X+X*Y; for(int i=0;i<Z-1;i++) { *(R+index+i*X*Y) = (*(C+index+X*Y*(i+1)) + *(C+index+X*Y*(i-1))- 2* *(C+index+i*X*Y))/(dz*dz); }} } //parallel function to update matrices __global__ void ASSIGN(double* R, double* C,int X,int Y,int Z) { int idx = threadIdx.x + blockIdx.x*blockDim.x; if(idx<((Y-2)*(Z-2))){ int idy = idx / (Y-2) ; int idz = idx % (Y-2) ; int index = idy*X + idz*X*Y + X+X*Y; for(int i=1;i<X-1;i++) { *(R+index+i) = *(C+index+i); }} } //parallel function to add two matrices __global__ void ADD(double* R,double* C,double dt,int X,int Y,int Z) { int idx = threadIdx.x + blockIdx.x*blockDim.x; if(idx<((Y-2)*(Z-2))){ int idy = idx / (Y-2) ; int idz = idx % (Y-2) ; int index = idy*X + idz*X*Y + X+X*Y; for(int i=0;i<X;i++) { *(R+index+i) += (*(C+index+i) * dt); }} } //parallel function to compare two matrices, outputting a maximum difference bteween elements __global__ void COMPARE(double* R, double* C, double* OUT_H,int X,int Y,int Z) { int idx = threadIdx.x + blockIdx.x*blockDim.x; if(idx<((Y-2)*(Z-2))){ int idy = idx / (Y-2) ; int idz = idx % (Y-2) ; int index = idy*X + idz*X*Y + X+X*Y; for(int i=1;i<X-1;i++) { if(abs(*(R+index+i)-*(C+index+i))>= *OUT_H) { *OUT_H = abs(*(R+index+i) - *(C+index+i)); } }} } //reset function, use it to reset any pointer __global__ void RESET_CTR(double* C) { *C = 0; } //display function, use to disaply any array [deprecated for future use] void display(double* DATA,int X,int Y,int Z) { for(int k=0;k<Z;k++) {for(int j=0;j<Y;j++) {for(int i=0;i<X;i++) {printf("%.2lf ",*(DATA+i+X*j+X*Z*k));} printf("\n");}printf("\n");} } //main fxn, will fix with args after int main(int argc, char* argv[]) { std::ofstream file; file.open("data.csv",ios::app); //file<<"Threads, X, Y, Z, Total, Time\n"; //DECLARE YOUR VARIABLES HERE struct BOX grid; int threads; //std::cout<<argc<<"\n"; if(argc==2) { threads = stoi(argv[1]); grid.X=10; grid.Y=10; grid.Z=10; } if(argc>2) { threads = stoi(argv[1]); grid.X = stoi(argv[2]); grid.Y = stoi(argv[3]); grid.Z = stoi(argv[4]); } std::cout<<"Threads: "<<threads<<"\n"; const int X = grid.X; const int Y = grid.Y; const int Z = grid.Z; int blocks = 1 + (Y-2) * (Z-2) / threads; std::cout<<"Blocks: "<<blocks<<"\n"; //step for double derivatives double step = 0.001; //tolerence double tol = 0.00001; std::cout<<"\nX"<<X<<" Y"<<Y<<" Z"<<Z<<" Total Capacity⇒"<<X*Y*Z<<endl; unsigned long long SIZE_0 = ((int)sizeof(double))*X*Y*Z; double* DATA_H; double* DATA_F; DATA_H = (double*)malloc(SIZE_0); DATA_F = (double*)malloc(SIZE_0); //Impose Boundary conditions here for(int i=0;i<X;i++) {for(int j=0;j<Y;j++) {for(int k=0;k<Z;k++) { if(i==0||i==(X-1)||j==0||j==(Y-1)||k==0||k==(Z-1)) *(DATA_H+i+j*X+k*X*Y) = 5; else *(DATA_H+i+j*X+k*X*Y) = 0; }}} //error counters double* CC; double CCD = 0; //DATA pointer for device state 0 double* DATA_ORIGINAL; //DATA pointer for device state 1 double* DATA_NEXT; //Derivative results pointers double* DDX_D; double* DDY_D; double* DDZ_D; //array size for device //allocating space for arrays cudaMalloc((void**)&DATA_ORIGINAL,SIZE_0); cudaMalloc((void**)&DATA_NEXT,SIZE_0); cudaMalloc((void**)&DDX_D,SIZE_0); cudaMalloc((void**)&DDY_D,SIZE_0); cudaMalloc((void**)&DDZ_D,SIZE_0); cudaMalloc(&CC,(int)sizeof(double)); //token counter CCD = 10.; //copy data state0, state1 cudaMemcpy(DATA_ORIGINAL,DATA_H,SIZE_0,cudaMemcpyHostToDevice); cudaMemcpy(DATA_NEXT,DATA_H,SIZE_0,cudaMemcpyHostToDevice); //counter int ct = 0; auto hst_st = std::chrono::high_resolution_clock::now(); //run while tolerence > differences while(CCD>tol) { //reset difference every loop RESET_CTR <<<1,1>>> (CC); //run derivatives DDY <<<blocks,threads>>> (DDY_D,DATA_ORIGINAL,X,Y,Z,10.); DDZ <<<blocks,threads>>> (DDZ_D,DATA_ORIGINAL,X,Y,Z,10.); DDX <<<blocks,threads>>> (DDX_D,DATA_ORIGINAL,X,Y,Z,10.); //add into state 1 ADD <<<blocks,threads>>> (DATA_NEXT,DDX_D,step,X,Y,Z); ADD <<<blocks,threads>>> (DATA_NEXT,DDY_D,step,X,Y,Z); ADD <<<blocks,threads>>> (DATA_NEXT,DDZ_D,step,X,Y,Z); //compare state1 state 0 COMPARE<<<blocks,threads>>>(DATA_ORIGINAL,DATA_NEXT,CC,X,Y,Z); //copy back max error cudaMemcpy(&CCD,CC,sizeof(double),cudaMemcpyDeviceToHost); //make state 1 as state0 ASSIGN <<<blocks,threads>>> (DATA_ORIGINAL,DATA_NEXT,X,Y,Z); //update counter ct += 1; //information every 1000 loops because of visibility if(ct%1000==0) { cudaMemcpy(&CCD,CC,sizeof(double),cudaMemcpyDeviceToHost); printf("%d loops %0.6lf max error\r",ct,CCD); } } //copy back final array cudaMemcpy(DATA_F,DATA_ORIGINAL,SIZE_0,cudaMemcpyDeviceToHost); auto hst_en = std::chrono::high_resolution_clock::now(); std::chrono::duration<float> duration = hst_en-hst_st; std::cout<<"\nDuration: "<<duration.count()<<"\n"; //final print statement std::cout<<"\n\nConverged in "<<ct-1<<" loops\n\n"; //file<<"Threads, X, Y, Z, Total, Time\n"; file<<threads<<","<<X<<","<<Y<<","<<Z<<","<<X*Y*Z<<","<<duration.count()<<"\n"; file.close(); //display optional //display(DATA_F,X,Y,Z); //free pointers cudaFree(DATA_ORIGINAL); cudaFree(DATA_NEXT); cudaFree(DDX_D); cudaFree(DDY_D); cudaFree(DDZ_D); return 0; }